diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000..0c0d773 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,9 @@ +cff-version: 1.2.0 +message: "If you use this software, please cite it as below." +title: "OpenMMLab's Image Classification Toolbox and Benchmark" +authors: + - name: "MMClassification Contributors" +version: 0.15.0 +date-released: 2020-07-09 +repository-code: "https://github.com/open-mmlab/mmclassification" +license: Apache-2.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..8a0c632 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,61 @@ +# Contributing to OpenMMLab + +All kinds of contributions are welcome, including but not limited to the following. + +- Fix typo or bugs +- Add documentation or translate the documentation into other languages +- Add new features and components + +## Workflow + +1. fork and pull the latest OpenMMLab repository (MMClassification) +2. checkout a new branch (do not use master branch for PRs) +3. commit your changes +4. create a PR + +```{note} +If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first. +``` + +## Code style + +### Python + +We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style. + +We use the following tools for linting and formatting: + +- [flake8](https://github.com/PyCQA/flake8): A wrapper around some linter tools. +- [isort](https://github.com/timothycrosley/isort): A Python utility to sort imports. +- [yapf](https://github.com/google/yapf): A formatter for Python files. +- [codespell](https://github.com/codespell-project/codespell): A Python utility to fix common misspellings in text files. +- [mdformat](https://github.com/executablebooks/mdformat): Mdformat is an opinionated Markdown formatter that can be used to enforce a consistent style in Markdown files. +- [docformatter](https://github.com/myint/docformatter): A formatter to format docstring. + +Style configurations can be found in [setup.cfg](./setup.cfg). + +We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`, +fixes `end-of-files`, `double-quoted-strings`, `python-encoding-pragma`, `mixed-line-ending`, sorts `requirments.txt` automatically on every commit. +The config for a pre-commit hook is stored in [.pre-commit-config](https://github.com/open-mmlab/mmclassification/blob/master/.pre-commit-config.yaml). + +After you clone the repository, you will need to install initialize pre-commit hook. + +```shell +pip install -U pre-commit +``` + +From the repository folder + +```shell +pre-commit install +``` + +After this on every commit check code linters and formatter will be enforced. + +```{important} +Before you create a PR, make sure that your code lints and is formatted by yapf. +``` + +### C++ and CUDA + +We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f731325 --- /dev/null +++ b/LICENSE @@ -0,0 +1,203 @@ +Copyright (c) OpenMMLab. All rights reserved + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 MMClassification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..17ddc8c --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,4 @@ +include requirements/*.txt +include mmcls/.mim/model-index.yml +recursive-include mmcls/.mim/configs *.py *.yml +recursive-include mmcls/.mim/tools *.py *.sh diff --git a/README.md b/README.md new file mode 100644 index 0000000..75c9cf5 --- /dev/null +++ b/README.md @@ -0,0 +1,197 @@ +# GPViT: A High Resolution Non-Hierarchical Vision Transformer with Group Propagation +

+ +

+ +GPViT is a high-resolution non-hierarchical vision transformer architecture designed for high-performing visual recognition. This repository contains the official PyTorch implementation of our paper: + +[GPViT: A High Resolution Non-Hierarchical Vision Transformer with Group Propagation, *Chenhongyi Yang**, *Jiarui Xu**, *Shalini De Mello*, *Elliot J. Crowley*, *Xiaolong Wang*.](TBD) + +## Usage + +### Environment Setup +Our code base is built upon the MM-series toolkits. Specifically, classification is based on [MMClassification](); object detection is based on [MMDetection](); and semantic segmentation is based on [MMSegmentation](). Users can follow the official site of those toolkit to set up their environments. We also provide a sample setting up script as following: + +```shell +conda create -n gpvit python=3.7 -y +source activate gpvit +pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 -f https://download.pytorch.org/whl/torch_stable.html +pip install -U openmim +mim install mmcv-full==1.4.8 +pip install timm +pip install lmdb # for ImageNet experiments +pip install -v -e . +cd downstream/mmdetection # setup object detection and instance segmentation +pip install -v -e . +cd ../mmsegmentation # setup semantic segmentation +pip install -v -e . +``` + +### Data Preparation +Please follow [MMClassification](), [MMDetection]() and [MMSegmentation]() to set up the ImageNet, COCO and ADE20K datasets. For ImageNet experiment, we convert the dataset to LMDB format to accelerate training and testing. For example, you can convert you own dataset by running: +```shell +python tools/dataset_tools/create_lmdb_dataset.py \ + --train-img-dir data/imagenet/train \ + --train-out data/imagenet/imagenet_lmdb/train \ + --val_img_dir data/imagenet/val \ + --val-out data/imagenet/imagenet_lmdb/val +``` +After setting up, the datasets file structure should be as follows: +``` +GPViT +|-- data +| |-- imagenet +| | |-- imagenet_lmdb +| | | |-- train +| | | | |-- data.mdb +| | | | |-- lock.mdb +| | | |-- val +| | | | |-- data.mdb +| | | | |-- lock.mdb +| | |-- meta +| | | |- ... +|-- downstream +| |-- mmsegmentation +| | |-- data +| | | |-- ade +| | | | |-- ADEChallengeData2016 +| | | | | |-- annotations +| | | | | | |-- ... +| | | | | |-- images +| | | | | | |-- ... +| | | | | |-- objectInfo150.txt +| | | | | |-- sceneCategories.txt +| | |-- ... +| |-- mmdetection +| | |-- data +| | | |-- coco +| | | | |-- train2017 +| | | | | |-- ... +| | | | |-- val2017 +| | | | | |-- ... +| | | | |-- annotations +| | | | | |-- instances_train2017.json +| | | | | |-- instances_val2017.json +| | | | | |-- ... +| | |-- ... +|-- ... +``` + +### ImageNet classification +#### Training GPViT +```shell +# Example: Training GPViT-L1 model +zsh tool/dist_train.sh configs/gpvit/gpvit_l1.py 16 +``` +#### Testing GPViT +```shell +# Example: Testing GPViT-L1 model +zsh tool/dist_test.sh configs/gpvit/gpvit_l1.py work_dirs/gpvit_l1/epoch_300.pth 16 --metrics accuracy +``` +### COCO Object Detection and Instance Segmentation + +#### Training GPViT based Mask R-CNN +```shell +# Example: Training GPViT-L1 models with 1x and 3x+MS schedules +zsh tools/dist_train.sh configs/gpvit/mask_rcnn/gpvit_l1_maskrcnn_1x.py 16 +zsh tools/dist_train.sh configs/gpvit/mask_rcnn/gpvit_l1_maskrcnn_3x.py 16 +``` + +#### Training GPViT based RetinaNet +```shell +# Example: Training GPViT-L1 models with 1x and 3x+MS schedules +zsh tools/dist_train.sh configs/gpvit/retinanet/gpvit_l1_retinanet_1x.py 16 +zsh tools/dist_train.sh configs/gpvit/retinanet/gpvit_l4_retinanet_3x.py 16 +``` + +#### Testing GPViT based Mask R-CNN +```shell +# Example: Testing GPViT-L1 Mask R-CNN 1x model +zsh tools/dist_test.sh configs/gpvit/mask_rcnn/gpvit_l1_maskrcnn_1x.py work_dirs/gpvit_l1_maskrcnn_1x/epoch_12.pth 16 --eval bbox segm +``` + +#### Testing GPViT based RetinaNet +```shell +# Example: Testing GPViT-L1 RetinaNet 1x model +zsh tools/dist_test.sh configs/gpvit/retinanet/gpvit_l1_retinanet_1x.py work_dirs/gpvit_l1_retinanet_1x/epoch_12.pth 16 --eval bbox +``` + +### ADE20K semantic segmentation +#### Training GPViT based semantic segmentation models +```shell +# Example: Training GPViT-L1 based SegFormer and UperNet models +zsh tools/dist_train.sh configs/gpvit/gpvit_l1_segformer.py 16 +zsh tools/dist_train.sh configs/gpvit/gpvit_l1_upernet.py 16 +``` +#### Testing GPViT based semantic segmentation models +```shell +# Example: Testing GPViT-L1 based SegFormer and UperNet models +zsh tools/dist_test.sh configs/gpvit/gpvit_l1_segformer.py work_dirs/gpvit_l1_segformer/iter_160000.pth 16 --eval mIoU +zsh tools/dist_test.sh configs/gpvit/gpvit_l1_upernet.py work_dirs/gpvit_l1_upernet/iter_160000.pth 16 --eval mIoU +``` + +## Benchmark results + +### ImageNet-1k classification +| Model | #Params (M) | Top-1 Acc | Top-5 Acc | Config | Model | +|:--------:|:-----------:|:---------:|:---------:|:----------:|:---------:| +| GPViT-L1 | 9.3 | 80.5 | 95.4 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/configs/gpvit/gpvit_l1.py) | [model]() | +| GPViT-L2 | 23.8 | 83.4 | 96.6 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/configs/gpvit/gpvit_l2.py) | [model]() | +| GPViT-L3 | 36.2 | 84.1 | 96.9 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/configs/gpvit/gpvit_l3.py) | [model]() | +| GPViT-L4 | 75.4 | 84.3 | 96.9 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/configs/gpvit/gpvit_l4.py) | [model]() | + +### COCO Mask R-CNN 1x Schedule +| Model | #Params (M) | AP Box | AP Mask | Config | Model | +|:--------:|:-----------:|:------:|:-------:|:----------:|:---------:| +| GPViT-L1 | 33 | 48.1 | 42.7 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l1_maskrcnn_1x.py) | [model]() | +| GPViT-L2 | 50 | 49.9 | 43.9 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l2_maskrcnn_1x.py) | [model]() | +| GPViT-L3 | 64 | 50.4 | 44.4 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l3_maskrcnn_1x.py) | [model]() | +| GPViT-L4 | 109 | 51.0 | 45.0 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l4_maskrcnn_1x.py) | [model]() | + +### COCO Mask R-CNN 3x+MS Schedule +| Model | #Params (M) | AP Box | AP Mask | Config | Model | +|:--------:|:-----------:|:------:|:-------:|:----------:|:---------:| +| GPViT-L1 | 33 | 50.2 | 44.3 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l1_maskrcnn_3x.py) | [model]() | +| GPViT-L2 | 50 | 51.4 | 45.1 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l2_maskrcnn_3x.py) | [model]() | +| GPViT-L3 | 64 | 51.6 | 45.2 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l3_maskrcnn_3x.py) | [model]() | +| GPViT-L4 | 109 | 52.1 | 45.7 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l4_maskrcnn_3x.py) | [model]() | + +### COCO RetinaNet 1x Schedule +| Model | #Params (M) | AP Box | Config | Model | +|:--------:|:-----------:|:------:|:----------:|:---------:| +| GPViT-L1 | 21 | 45.8 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l1_retinanet_1x.py) | [model]() | +| GPViT-L2 | 37 | 48.0 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l2_retinanet_1x.py) | [model]() | +| GPViT-L3 | 52 | 48.3 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l3_retinanet_1x.py) | [model]() | +| GPViT-L4 | 96 | 48.7 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l4_retinanet_1x.py) | [model]() | + +### COCO RetinaNet 3x+MS Schedule +| Model | #Params (M) | AP Box | Config | Model | +|:--------:|:-----------:|:------:|:----------:|:---------:| +| GPViT-L1 | 21 | 48.1 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l1_retinanet_3x.py) | [model]() | +| GPViT-L2 | 37 | 49.0 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l2_retinanet_3x.py) | [model]() | +| GPViT-L3 | 52 | 49.4 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l3_retinanet_3x.py) | [model]() | +| GPViT-L4 | 96 | 49.8 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l4_retinanet_3x.py) | [model]() | + +### ADE20K UperNet +| Model | #Params (M) | mIoU | Config | Model | +|:--------:|:-----------:|:----:|:----------:|:---------:| +| GPViT-L1 | 37 | 49.1 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmsegmentation/configs/gpvit/gpvit_l1_upernet.py) | [model]() | +| GPViT-L2 | 53 | 50.2 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmsegmentation/configs/gpvit/gpvit_l2_upernet.py) | [model]() | +| GPViT-L3 | 66 | 51.7 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmsegmentation/configs/gpvit/gpvit_l3_upernet.py) | [model]() | +| GPViT-L4 | 107 | 52.5 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmsegmentation/configs/gpvit/gpvit_l14_upernet.py) | [model]() | + +### ADE20K SegFormer +| Model | #Params (M) | mIoU | Config | Model | +|:--------:|:-----------:|:----:|:----------:|:---------:| +| GPViT-L1 | 9 | 46.9 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmsegmentation/configs/gpvit/gpvit_l1_segformer.py) | [model]() | +| GPViT-L2 | 24 | 49.2 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmsegmentation/configs/gpvit/gpvit_l2_segformer.py) | [model]() | +| GPViT-L3 | 36 | 50.8 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmsegmentation/configs/gpvit/gpvit_l3_segformer.py) | [model]() | +| GPViT-L4 | 76 | 51.3 | [config](https://github.com/ChenhongyiYang/GPViT/blob/main/downstream/mmsegmentation/configs/gpvit/gpvit_l14_segformer.py) | [model]() | + + + +## Citation +``` +TBD +``` + diff --git a/configs/_base_/datasets/cifar100_bs16.py b/configs/_base_/datasets/cifar100_bs16.py new file mode 100644 index 0000000..d4f8db7 --- /dev/null +++ b/configs/_base_/datasets/cifar100_bs16.py @@ -0,0 +1,36 @@ +# dataset settings +dataset_type = 'CIFAR100' +img_norm_cfg = dict( + mean=[129.304, 124.070, 112.434], + std=[68.170, 65.392, 70.418], + to_rgb=False) +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=16, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/cifar100', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/cifar100', + pipeline=test_pipeline, + test_mode=True), + test=dict( + type=dataset_type, + data_prefix='data/cifar100', + pipeline=test_pipeline, + test_mode=True)) diff --git a/configs/_base_/datasets/cifar10_bs16.py b/configs/_base_/datasets/cifar10_bs16.py new file mode 100644 index 0000000..0d28adf --- /dev/null +++ b/configs/_base_/datasets/cifar10_bs16.py @@ -0,0 +1,35 @@ +# dataset settings +dataset_type = 'CIFAR10' +img_norm_cfg = dict( + mean=[125.307, 122.961, 113.8575], + std=[51.5865, 50.847, 51.255], + to_rgb=False) +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=16, + workers_per_gpu=2, + train=dict( + type=dataset_type, data_prefix='data/cifar10', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/cifar10', + pipeline=test_pipeline, + test_mode=True), + test=dict( + type=dataset_type, + data_prefix='data/cifar10', + pipeline=test_pipeline, + test_mode=True)) diff --git a/configs/_base_/datasets/cub_bs8_384.py b/configs/_base_/datasets/cub_bs8_384.py new file mode 100644 index 0000000..4acad24 --- /dev/null +++ b/configs/_base_/datasets/cub_bs8_384.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'CUB' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=510), + dict(type='RandomCrop', size=384), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=510), + dict(type='CenterCrop', crop_size=384), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data_root = 'data/CUB_200_2011/' +data = dict( + samples_per_gpu=8, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'images.txt', + image_class_labels_file=data_root + 'image_class_labels.txt', + train_test_split_file=data_root + 'train_test_split.txt', + data_prefix=data_root + 'images', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'images.txt', + image_class_labels_file=data_root + 'image_class_labels.txt', + train_test_split_file=data_root + 'train_test_split.txt', + data_prefix=data_root + 'images', + test_mode=True, + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'images.txt', + image_class_labels_file=data_root + 'image_class_labels.txt', + train_test_split_file=data_root + 'train_test_split.txt', + data_prefix=data_root + 'images', + test_mode=True, + pipeline=test_pipeline)) + +evaluation = dict( + interval=1, metric='accuracy', + save_best='auto') # save the checkpoint with highest accuracy diff --git a/configs/_base_/datasets/cub_bs8_448.py b/configs/_base_/datasets/cub_bs8_448.py new file mode 100644 index 0000000..9e909a1 --- /dev/null +++ b/configs/_base_/datasets/cub_bs8_448.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'CUB' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=600), + dict(type='RandomCrop', size=448), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=600), + dict(type='CenterCrop', crop_size=448), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data_root = 'data/CUB_200_2011/' +data = dict( + samples_per_gpu=8, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'images.txt', + image_class_labels_file=data_root + 'image_class_labels.txt', + train_test_split_file=data_root + 'train_test_split.txt', + data_prefix=data_root + 'images', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'images.txt', + image_class_labels_file=data_root + 'image_class_labels.txt', + train_test_split_file=data_root + 'train_test_split.txt', + data_prefix=data_root + 'images', + test_mode=True, + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'images.txt', + image_class_labels_file=data_root + 'image_class_labels.txt', + train_test_split_file=data_root + 'train_test_split.txt', + data_prefix=data_root + 'images', + test_mode=True, + pipeline=test_pipeline)) + +evaluation = dict( + interval=1, metric='accuracy', + save_best='auto') # save the checkpoint with highest accuracy diff --git a/configs/_base_/datasets/imagenet21k_bs128.py b/configs/_base_/datasets/imagenet21k_bs128.py new file mode 100644 index 0000000..b81a746 --- /dev/null +++ b/configs/_base_/datasets/imagenet21k_bs128.py @@ -0,0 +1,43 @@ +# dataset settings +dataset_type = 'ImageNet21k' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=128, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet21k/train', + pipeline=train_pipeline, + recursion_subdir=True), + val=dict( + type=dataset_type, + data_prefix='data/imagenet21k/val', + ann_file='data/imagenet21k/meta/val.txt', + pipeline=test_pipeline, + recursion_subdir=True), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet21k/val', + ann_file='data/imagenet21k/meta/val.txt', + pipeline=test_pipeline, + recursion_subdir=True)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs128_poolformer_medium_224.py b/configs/_base_/datasets/imagenet_bs128_poolformer_medium_224.py new file mode 100644 index 0000000..667e58a --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs128_poolformer_medium_224.py @@ -0,0 +1,71 @@ +_base_ = ['./pipelines/rand_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(236, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=128, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) + +evaluation = dict(interval=10, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs128_poolformer_small_224.py b/configs/_base_/datasets/imagenet_bs128_poolformer_small_224.py new file mode 100644 index 0000000..76aee7e --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs128_poolformer_small_224.py @@ -0,0 +1,71 @@ +_base_ = ['./pipelines/rand_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(248, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=128, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) + +evaluation = dict(interval=10, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs256_rsb_a12.py b/configs/_base_/datasets/imagenet_bs256_rsb_a12.py new file mode 100644 index 0000000..7596855 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs256_rsb_a12.py @@ -0,0 +1,53 @@ +_base_ = ['./pipelines/rand_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=7, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(236, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=256, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) + +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs256_rsb_a3.py b/configs/_base_/datasets/imagenet_bs256_rsb_a3.py new file mode 100644 index 0000000..aee640d --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs256_rsb_a3.py @@ -0,0 +1,53 @@ +_base_ = ['./pipelines/rand_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=160), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=6, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(236, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=256, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) + +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs32.py b/configs/_base_/datasets/imagenet_bs32.py new file mode 100644 index 0000000..8a54659 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs32.py @@ -0,0 +1,40 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs32_pil_bicubic.py b/configs/_base_/datasets/imagenet_bs32_pil_bicubic.py new file mode 100644 index 0000000..d66c1bd --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs32_pil_bicubic.py @@ -0,0 +1,48 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(256, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs32_pil_resize.py b/configs/_base_/datasets/imagenet_bs32_pil_resize.py new file mode 100644 index 0000000..22b74f7 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs32_pil_resize.py @@ -0,0 +1,40 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64.py b/configs/_base_/datasets/imagenet_bs64.py new file mode 100644 index 0000000..b9f866a --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64.py @@ -0,0 +1,40 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64_autoaug.py b/configs/_base_/datasets/imagenet_bs64_autoaug.py new file mode 100644 index 0000000..a1092a3 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_autoaug.py @@ -0,0 +1,43 @@ +_base_ = ['./pipelines/auto_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='AutoAugment', policies={{_base_.auto_increasing_policies}}), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64_convmixer_224.py b/configs/_base_/datasets/imagenet_bs64_convmixer_224.py new file mode 100644 index 0000000..afd7113 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_convmixer_224.py @@ -0,0 +1,71 @@ +_base_ = ['./pipelines/rand_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(233, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) + +evaluation = dict(interval=10, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64_mixer_224.py b/configs/_base_/datasets/imagenet_bs64_mixer_224.py new file mode 100644 index 0000000..a005436 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_mixer_224.py @@ -0,0 +1,48 @@ +# dataset settings +dataset_type = 'ImageNet' + +# change according to https://github.com/rwightman/pytorch-image-models/blob +# /master/timm/models/mlp_mixer.py +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) + +# training is not supported for now +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224, backend='cv2'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', size=(256, -1), backend='cv2', interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) + +evaluation = dict(interval=10, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64_pil_resize.py b/configs/_base_/datasets/imagenet_bs64_pil_resize.py new file mode 100644 index 0000000..95d0e1f --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_pil_resize.py @@ -0,0 +1,40 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py b/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py new file mode 100644 index 0000000..2a9a4de --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py @@ -0,0 +1,53 @@ +_base_ = [ + 'pipelines/auto_aug.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='AutoAugment', policies={{_base_.policy_imagenet}}), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(256, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64_swin_224.py b/configs/_base_/datasets/imagenet_bs64_swin_224.py new file mode 100644 index 0000000..4a059a3 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_swin_224.py @@ -0,0 +1,71 @@ +_base_ = ['./pipelines/rand_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(256, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) + +evaluation = dict(interval=10, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64_swin_224_lmdb.py b/configs/_base_/datasets/imagenet_bs64_swin_224_lmdb.py new file mode 100644 index 0000000..7d79449 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_swin_224_lmdb.py @@ -0,0 +1,80 @@ +_base_ = ['./pipelines/rand_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFileLMDB', + file_client_args={ + 'backend': 'lmdb', + 'db_path': 'data/imagenet/imagenet_lmdb/train' + }), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFileLMDB', + file_client_args={ + 'backend': 'lmdb', + 'db_path': 'data/imagenet/imagenet_lmdb/val' + }), + dict( + type='Resize', + size=(256, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_prefix='', + ann_file='data/imagenet/meta/train.txt', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) + +evaluation = dict(interval=10, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64_swin_256.py b/configs/_base_/datasets/imagenet_bs64_swin_256.py new file mode 100644 index 0000000..1f73683 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_swin_256.py @@ -0,0 +1,71 @@ +_base_ = ['./pipelines/rand_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=256, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(292, -1), # ( 256 / 224 * 256 ) + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) + +evaluation = dict(interval=10, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64_swin_384.py b/configs/_base_/datasets/imagenet_bs64_swin_384.py new file mode 100644 index 0000000..d263939 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_swin_384.py @@ -0,0 +1,43 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=384, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=384, backend='pillow', interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=10, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64_t2t_224.py b/configs/_base_/datasets/imagenet_bs64_t2t_224.py new file mode 100644 index 0000000..1190d6f --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_t2t_224.py @@ -0,0 +1,71 @@ +_base_ = ['./pipelines/rand_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(248, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) + +evaluation = dict(interval=1, metric='accuracy', save_best='auto') diff --git a/configs/_base_/datasets/pipelines/auto_aug.py b/configs/_base_/datasets/pipelines/auto_aug.py new file mode 100644 index 0000000..5a10f7e --- /dev/null +++ b/configs/_base_/datasets/pipelines/auto_aug.py @@ -0,0 +1,96 @@ +# Policy for ImageNet, refers to +# https://github.com/DeepVoltaire/AutoAugment/blame/master/autoaugment.py +policy_imagenet = [ + [ + dict(type='Posterize', bits=4, prob=0.4), + dict(type='Rotate', angle=30., prob=0.6) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], + [ + dict(type='Posterize', bits=5, prob=0.6), + dict(type='Posterize', bits=5, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8) + ], + [ + dict(type='Solarize', thr=256 / 9 * 6, prob=0.6), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Posterize', bits=6, prob=0.8), + dict(type='Equalize', prob=1.)], + [ + dict(type='Rotate', angle=10., prob=0.2), + dict(type='Solarize', thr=256 / 9, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.6), + dict(type='Posterize', bits=5, prob=0.4) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0., prob=0.4) + ], + [ + dict(type='Rotate', angle=30., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Equalize', prob=0.0), + dict(type='Equalize', prob=0.8)], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0.2, prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0.8, prob=0.8), + dict(type='Solarize', thr=256 / 9 * 2, prob=0.8) + ], + [ + dict(type='Sharpness', magnitude=0.7, prob=0.4), + dict(type='Invert', prob=0.6) + ], + [ + dict( + type='Shear', + magnitude=0.3 / 9 * 5, + prob=0.6, + direction='horizontal'), + dict(type='Equalize', prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], +] diff --git a/configs/_base_/datasets/pipelines/rand_aug.py b/configs/_base_/datasets/pipelines/rand_aug.py new file mode 100644 index 0000000..f2bab3c --- /dev/null +++ b/configs/_base_/datasets/pipelines/rand_aug.py @@ -0,0 +1,43 @@ +# Refers to `_RAND_INCREASING_TRANSFORMS` in pytorch-image-models +rand_increasing_policies = [ + dict(type='AutoContrast'), + dict(type='Equalize'), + dict(type='Invert'), + dict(type='Rotate', magnitude_key='angle', magnitude_range=(0, 30)), + dict(type='Posterize', magnitude_key='bits', magnitude_range=(4, 0)), + dict(type='Solarize', magnitude_key='thr', magnitude_range=(256, 0)), + dict( + type='SolarizeAdd', + magnitude_key='magnitude', + magnitude_range=(0, 110)), + dict( + type='ColorTransform', + magnitude_key='magnitude', + magnitude_range=(0, 0.9)), + dict(type='Contrast', magnitude_key='magnitude', magnitude_range=(0, 0.9)), + dict( + type='Brightness', magnitude_key='magnitude', + magnitude_range=(0, 0.9)), + dict( + type='Sharpness', magnitude_key='magnitude', magnitude_range=(0, 0.9)), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + direction='horizontal'), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + direction='vertical'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.45), + direction='horizontal'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.45), + direction='vertical') +] diff --git a/configs/_base_/datasets/stanford_cars_bs8_448.py b/configs/_base_/datasets/stanford_cars_bs8_448.py new file mode 100644 index 0000000..636b2e1 --- /dev/null +++ b/configs/_base_/datasets/stanford_cars_bs8_448.py @@ -0,0 +1,46 @@ +# dataset settings +dataset_type = 'StanfordCars' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=512), + dict(type='RandomCrop', size=448), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=512), + dict(type='CenterCrop', crop_size=448), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data_root = 'data/stanfordcars' +data = dict( + samples_per_gpu=8, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix=data_root, + test_mode=False, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix=data_root, + test_mode=True, + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_prefix=data_root, + test_mode=True, + pipeline=test_pipeline)) + +evaluation = dict( + interval=1, metric='accuracy', + save_best='auto') # save the checkpoint with highest accuracy diff --git a/configs/_base_/datasets/voc_bs16.py b/configs/_base_/datasets/voc_bs16.py new file mode 100644 index 0000000..73fa0bc --- /dev/null +++ b/configs/_base_/datasets/voc_bs16.py @@ -0,0 +1,41 @@ +# dataset settings +dataset_type = 'VOC' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=16, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/VOCdevkit/VOC2007/', + ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/VOCdevkit/VOC2007/', + ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_prefix='data/VOCdevkit/VOC2007/', + ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt', + pipeline=test_pipeline)) +evaluation = dict( + interval=1, metric=['mAP', 'CP', 'OP', 'CR', 'OR', 'CF1', 'OF1']) diff --git a/configs/_base_/default_runtime.py b/configs/_base_/default_runtime.py new file mode 100644 index 0000000..ba965a4 --- /dev/null +++ b/configs/_base_/default_runtime.py @@ -0,0 +1,16 @@ +# checkpoint saving +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=100, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable + +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/_base_/models/conformer/base-p16.py b/configs/_base_/models/conformer/base-p16.py new file mode 100644 index 0000000..157dcc9 --- /dev/null +++ b/configs/_base_/models/conformer/base-p16.py @@ -0,0 +1,22 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='Conformer', arch='base', drop_path_rate=0.1, init_cfg=None), + neck=None, + head=dict( + type='ConformerHead', + num_classes=1000, + in_channels=[1536, 576], + init_cfg=None, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/conformer/small-p16.py b/configs/_base_/models/conformer/small-p16.py new file mode 100644 index 0000000..1729808 --- /dev/null +++ b/configs/_base_/models/conformer/small-p16.py @@ -0,0 +1,22 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='Conformer', arch='small', drop_path_rate=0.1, init_cfg=None), + neck=None, + head=dict( + type='ConformerHead', + num_classes=1000, + in_channels=[1024, 384], + init_cfg=None, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/conformer/small-p32.py b/configs/_base_/models/conformer/small-p32.py new file mode 100644 index 0000000..593aba1 --- /dev/null +++ b/configs/_base_/models/conformer/small-p32.py @@ -0,0 +1,26 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='Conformer', + arch='small', + patch_size=32, + drop_path_rate=0.1, + init_cfg=None), + neck=None, + head=dict( + type='ConformerHead', + num_classes=1000, + in_channels=[1024, 384], + init_cfg=None, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/conformer/tiny-p16.py b/configs/_base_/models/conformer/tiny-p16.py new file mode 100644 index 0000000..dad8eca --- /dev/null +++ b/configs/_base_/models/conformer/tiny-p16.py @@ -0,0 +1,22 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='Conformer', arch='tiny', drop_path_rate=0.1, init_cfg=None), + neck=None, + head=dict( + type='ConformerHead', + num_classes=1000, + in_channels=[256, 384], + init_cfg=None, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/convmixer/convmixer-1024-20.py b/configs/_base_/models/convmixer/convmixer-1024-20.py new file mode 100644 index 0000000..a8f4d51 --- /dev/null +++ b/configs/_base_/models/convmixer/convmixer-1024-20.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ConvMixer', arch='1024/20'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/convmixer/convmixer-1536-20.py b/configs/_base_/models/convmixer/convmixer-1536-20.py new file mode 100644 index 0000000..9ad8209 --- /dev/null +++ b/configs/_base_/models/convmixer/convmixer-1536-20.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ConvMixer', arch='1536/20'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/convmixer/convmixer-768-32.py b/configs/_base_/models/convmixer/convmixer-768-32.py new file mode 100644 index 0000000..1cba528 --- /dev/null +++ b/configs/_base_/models/convmixer/convmixer-768-32.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ConvMixer', arch='768/32', act_cfg=dict(type='ReLU')), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/convnext/convnext-base.py b/configs/_base_/models/convnext/convnext-base.py new file mode 100644 index 0000000..7fc5ce7 --- /dev/null +++ b/configs/_base_/models/convnext/convnext-base.py @@ -0,0 +1,23 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='base', + out_indices=(3, ), + drop_path_rate=0.5, + gap_before_final_norm=True, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.), + ]), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/convnext/convnext-large.py b/configs/_base_/models/convnext/convnext-large.py new file mode 100644 index 0000000..4d9e37c --- /dev/null +++ b/configs/_base_/models/convnext/convnext-large.py @@ -0,0 +1,23 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='large', + out_indices=(3, ), + drop_path_rate=0.5, + gap_before_final_norm=True, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.), + ]), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/convnext/convnext-small.py b/configs/_base_/models/convnext/convnext-small.py new file mode 100644 index 0000000..989ad1d --- /dev/null +++ b/configs/_base_/models/convnext/convnext-small.py @@ -0,0 +1,23 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='small', + out_indices=(3, ), + drop_path_rate=0.4, + gap_before_final_norm=True, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.), + ]), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/convnext/convnext-tiny.py b/configs/_base_/models/convnext/convnext-tiny.py new file mode 100644 index 0000000..0b692ab --- /dev/null +++ b/configs/_base_/models/convnext/convnext-tiny.py @@ -0,0 +1,23 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='tiny', + out_indices=(3, ), + drop_path_rate=0.1, + gap_before_final_norm=True, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.), + ]), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/convnext/convnext-xlarge.py b/configs/_base_/models/convnext/convnext-xlarge.py new file mode 100644 index 0000000..0c75e32 --- /dev/null +++ b/configs/_base_/models/convnext/convnext-xlarge.py @@ -0,0 +1,23 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='xlarge', + out_indices=(3, ), + drop_path_rate=0.5, + gap_before_final_norm=True, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.), + ]), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/densenet/densenet121.py b/configs/_base_/models/densenet/densenet121.py new file mode 100644 index 0000000..0a14d30 --- /dev/null +++ b/configs/_base_/models/densenet/densenet121.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='DenseNet', arch='121'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/densenet/densenet161.py b/configs/_base_/models/densenet/densenet161.py new file mode 100644 index 0000000..61a0d83 --- /dev/null +++ b/configs/_base_/models/densenet/densenet161.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='DenseNet', arch='161'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2208, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/densenet/densenet169.py b/configs/_base_/models/densenet/densenet169.py new file mode 100644 index 0000000..779ea17 --- /dev/null +++ b/configs/_base_/models/densenet/densenet169.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='DenseNet', arch='169'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1664, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/densenet/densenet201.py b/configs/_base_/models/densenet/densenet201.py new file mode 100644 index 0000000..2909af0 --- /dev/null +++ b/configs/_base_/models/densenet/densenet201.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='DenseNet', arch='201'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1920, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/efficientnet_b0.py b/configs/_base_/models/efficientnet_b0.py new file mode 100644 index 0000000..d9ba685 --- /dev/null +++ b/configs/_base_/models/efficientnet_b0.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b0'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b1.py b/configs/_base_/models/efficientnet_b1.py new file mode 100644 index 0000000..63e15c8 --- /dev/null +++ b/configs/_base_/models/efficientnet_b1.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b1'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b2.py b/configs/_base_/models/efficientnet_b2.py new file mode 100644 index 0000000..5edcfa5 --- /dev/null +++ b/configs/_base_/models/efficientnet_b2.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b2'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1408, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b3.py b/configs/_base_/models/efficientnet_b3.py new file mode 100644 index 0000000..c7c6d6d --- /dev/null +++ b/configs/_base_/models/efficientnet_b3.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b3'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b4.py b/configs/_base_/models/efficientnet_b4.py new file mode 100644 index 0000000..06840ed --- /dev/null +++ b/configs/_base_/models/efficientnet_b4.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b4'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1792, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b5.py b/configs/_base_/models/efficientnet_b5.py new file mode 100644 index 0000000..a86eebd --- /dev/null +++ b/configs/_base_/models/efficientnet_b5.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b5'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b6.py b/configs/_base_/models/efficientnet_b6.py new file mode 100644 index 0000000..4eada1d --- /dev/null +++ b/configs/_base_/models/efficientnet_b6.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b6'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2304, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b7.py b/configs/_base_/models/efficientnet_b7.py new file mode 100644 index 0000000..1d84ba4 --- /dev/null +++ b/configs/_base_/models/efficientnet_b7.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b7'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2560, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b8.py b/configs/_base_/models/efficientnet_b8.py new file mode 100644 index 0000000..c950064 --- /dev/null +++ b/configs/_base_/models/efficientnet_b8.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b8'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2816, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_em.py b/configs/_base_/models/efficientnet_em.py new file mode 100644 index 0000000..abecdbe --- /dev/null +++ b/configs/_base_/models/efficientnet_em.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + # `em` means EfficientNet-EdgeTPU-M arch + backbone=dict(type='EfficientNet', arch='em', act_cfg=dict(type='ReLU')), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_es.py b/configs/_base_/models/efficientnet_es.py new file mode 100644 index 0000000..911ba4a --- /dev/null +++ b/configs/_base_/models/efficientnet_es.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + # `es` means EfficientNet-EdgeTPU-S arch + backbone=dict(type='EfficientNet', arch='es', act_cfg=dict(type='ReLU')), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/hornet/hornet-base-gf.py b/configs/_base_/models/hornet/hornet-base-gf.py new file mode 100644 index 0000000..7544970 --- /dev/null +++ b/configs/_base_/models/hornet/hornet-base-gf.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='base-gf', drop_path_rate=0.5), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/hornet/hornet-base.py b/configs/_base_/models/hornet/hornet-base.py new file mode 100644 index 0000000..8276414 --- /dev/null +++ b/configs/_base_/models/hornet/hornet-base.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='base', drop_path_rate=0.5), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/hornet/hornet-large-gf.py b/configs/_base_/models/hornet/hornet-large-gf.py new file mode 100644 index 0000000..a5b5511 --- /dev/null +++ b/configs/_base_/models/hornet/hornet-large-gf.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='large-gf', drop_path_rate=0.2), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/hornet/hornet-large-gf384.py b/configs/_base_/models/hornet/hornet-large-gf384.py new file mode 100644 index 0000000..fbb5478 --- /dev/null +++ b/configs/_base_/models/hornet/hornet-large-gf384.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='large-gf384', drop_path_rate=0.4), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ]) diff --git a/configs/_base_/models/hornet/hornet-large.py b/configs/_base_/models/hornet/hornet-large.py new file mode 100644 index 0000000..26d99e1 --- /dev/null +++ b/configs/_base_/models/hornet/hornet-large.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='large', drop_path_rate=0.2), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/hornet/hornet-small-gf.py b/configs/_base_/models/hornet/hornet-small-gf.py new file mode 100644 index 0000000..42d9d11 --- /dev/null +++ b/configs/_base_/models/hornet/hornet-small-gf.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='small-gf', drop_path_rate=0.4), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/hornet/hornet-small.py b/configs/_base_/models/hornet/hornet-small.py new file mode 100644 index 0000000..e803976 --- /dev/null +++ b/configs/_base_/models/hornet/hornet-small.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='small', drop_path_rate=0.4), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/hornet/hornet-tiny-gf.py b/configs/_base_/models/hornet/hornet-tiny-gf.py new file mode 100644 index 0000000..0e417d0 --- /dev/null +++ b/configs/_base_/models/hornet/hornet-tiny-gf.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='tiny-gf', drop_path_rate=0.2), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/hornet/hornet-tiny.py b/configs/_base_/models/hornet/hornet-tiny.py new file mode 100644 index 0000000..068d7d6 --- /dev/null +++ b/configs/_base_/models/hornet/hornet-tiny.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='tiny', drop_path_rate=0.2), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/hrnet/hrnet-w18.py b/configs/_base_/models/hrnet/hrnet-w18.py new file mode 100644 index 0000000..f7fbf29 --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w18.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w18'), + neck=[ + dict(type='HRFuseScales', in_channels=(18, 36, 72, 144)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/hrnet/hrnet-w30.py b/configs/_base_/models/hrnet/hrnet-w30.py new file mode 100644 index 0000000..babcaca --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w30.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w30'), + neck=[ + dict(type='HRFuseScales', in_channels=(30, 60, 120, 240)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/hrnet/hrnet-w32.py b/configs/_base_/models/hrnet/hrnet-w32.py new file mode 100644 index 0000000..2c1e980 --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w32.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w32'), + neck=[ + dict(type='HRFuseScales', in_channels=(32, 64, 128, 256)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/hrnet/hrnet-w40.py b/configs/_base_/models/hrnet/hrnet-w40.py new file mode 100644 index 0000000..83f65d8 --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w40.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w40'), + neck=[ + dict(type='HRFuseScales', in_channels=(40, 80, 160, 320)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/hrnet/hrnet-w44.py b/configs/_base_/models/hrnet/hrnet-w44.py new file mode 100644 index 0000000..e75dc0f --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w44.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w44'), + neck=[ + dict(type='HRFuseScales', in_channels=(44, 88, 176, 352)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/hrnet/hrnet-w48.py b/configs/_base_/models/hrnet/hrnet-w48.py new file mode 100644 index 0000000..f060495 --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w48.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w48'), + neck=[ + dict(type='HRFuseScales', in_channels=(48, 96, 192, 384)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/hrnet/hrnet-w64.py b/configs/_base_/models/hrnet/hrnet-w64.py new file mode 100644 index 0000000..844c3fe --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w64.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w64'), + neck=[ + dict(type='HRFuseScales', in_channels=(64, 128, 256, 512)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/mlp_mixer_base_patch16.py b/configs/_base_/models/mlp_mixer_base_patch16.py new file mode 100644 index 0000000..5ebd17f --- /dev/null +++ b/configs/_base_/models/mlp_mixer_base_patch16.py @@ -0,0 +1,25 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='MlpMixer', + arch='b', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=dict(type='GlobalAveragePooling', dim=1), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ), +) diff --git a/configs/_base_/models/mlp_mixer_large_patch16.py b/configs/_base_/models/mlp_mixer_large_patch16.py new file mode 100644 index 0000000..ff10713 --- /dev/null +++ b/configs/_base_/models/mlp_mixer_large_patch16.py @@ -0,0 +1,25 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='MlpMixer', + arch='l', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=dict(type='GlobalAveragePooling', dim=1), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ), +) diff --git a/configs/_base_/models/mobilenet_v2_1x.py b/configs/_base_/models/mobilenet_v2_1x.py new file mode 100644 index 0000000..6ebff1e --- /dev/null +++ b/configs/_base_/models/mobilenet_v2_1x.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV2', widen_factor=1.0), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/mobilenet_v3_large_imagenet.py b/configs/_base_/models/mobilenet_v3_large_imagenet.py new file mode 100644 index 0000000..5318f50 --- /dev/null +++ b/configs/_base_/models/mobilenet_v3_large_imagenet.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV3', arch='large'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='StackedLinearClsHead', + num_classes=1000, + in_channels=960, + mid_channels=[1280], + dropout_rate=0.2, + act_cfg=dict(type='HSwish'), + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + init_cfg=dict( + type='Normal', layer='Linear', mean=0., std=0.01, bias=0.), + topk=(1, 5))) diff --git a/configs/_base_/models/mobilenet_v3_small_cifar.py b/configs/_base_/models/mobilenet_v3_small_cifar.py new file mode 100644 index 0000000..5dbe980 --- /dev/null +++ b/configs/_base_/models/mobilenet_v3_small_cifar.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV3', arch='small'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='StackedLinearClsHead', + num_classes=10, + in_channels=576, + mid_channels=[1280], + act_cfg=dict(type='HSwish'), + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/configs/_base_/models/mobilenet_v3_small_imagenet.py b/configs/_base_/models/mobilenet_v3_small_imagenet.py new file mode 100644 index 0000000..af6cc1b --- /dev/null +++ b/configs/_base_/models/mobilenet_v3_small_imagenet.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV3', arch='small'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='StackedLinearClsHead', + num_classes=1000, + in_channels=576, + mid_channels=[1024], + dropout_rate=0.2, + act_cfg=dict(type='HSwish'), + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + init_cfg=dict( + type='Normal', layer='Linear', mean=0., std=0.01, bias=0.), + topk=(1, 5))) diff --git a/configs/_base_/models/mvit/mvitv2-base.py b/configs/_base_/models/mvit/mvitv2-base.py new file mode 100644 index 0000000..c75e78e --- /dev/null +++ b/configs/_base_/models/mvit/mvitv2-base.py @@ -0,0 +1,19 @@ +model = dict( + type='ImageClassifier', + backbone=dict(type='MViT', arch='base', drop_path_rate=0.3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + in_channels=768, + num_classes=1000, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/mvit/mvitv2-large.py b/configs/_base_/models/mvit/mvitv2-large.py new file mode 100644 index 0000000..aa4a325 --- /dev/null +++ b/configs/_base_/models/mvit/mvitv2-large.py @@ -0,0 +1,23 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='MViT', + arch='large', + drop_path_rate=0.5, + dim_mul_in_attention=False), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + in_channels=1152, + num_classes=1000, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/mvit/mvitv2-small.py b/configs/_base_/models/mvit/mvitv2-small.py new file mode 100644 index 0000000..bb9329d --- /dev/null +++ b/configs/_base_/models/mvit/mvitv2-small.py @@ -0,0 +1,19 @@ +model = dict( + type='ImageClassifier', + backbone=dict(type='MViT', arch='small', drop_path_rate=0.1), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + in_channels=768, + num_classes=1000, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/mvit/mvitv2-tiny.py b/configs/_base_/models/mvit/mvitv2-tiny.py new file mode 100644 index 0000000..7ca85dc --- /dev/null +++ b/configs/_base_/models/mvit/mvitv2-tiny.py @@ -0,0 +1,19 @@ +model = dict( + type='ImageClassifier', + backbone=dict(type='MViT', arch='tiny', drop_path_rate=0.1), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + in_channels=768, + num_classes=1000, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/poolformer/poolformer_m36.py b/configs/_base_/models/poolformer/poolformer_m36.py new file mode 100644 index 0000000..276a721 --- /dev/null +++ b/configs/_base_/models/poolformer/poolformer_m36.py @@ -0,0 +1,22 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PoolFormer', + arch='m36', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/poolformer/poolformer_m48.py b/configs/_base_/models/poolformer/poolformer_m48.py new file mode 100644 index 0000000..8c006ac --- /dev/null +++ b/configs/_base_/models/poolformer/poolformer_m48.py @@ -0,0 +1,22 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PoolFormer', + arch='m48', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/poolformer/poolformer_s12.py b/configs/_base_/models/poolformer/poolformer_s12.py new file mode 100644 index 0000000..b7b3600 --- /dev/null +++ b/configs/_base_/models/poolformer/poolformer_s12.py @@ -0,0 +1,22 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PoolFormer', + arch='s12', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/poolformer/poolformer_s24.py b/configs/_base_/models/poolformer/poolformer_s24.py new file mode 100644 index 0000000..822ab5b --- /dev/null +++ b/configs/_base_/models/poolformer/poolformer_s24.py @@ -0,0 +1,22 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PoolFormer', + arch='s24', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/poolformer/poolformer_s36.py b/configs/_base_/models/poolformer/poolformer_s36.py new file mode 100644 index 0000000..489f222 --- /dev/null +++ b/configs/_base_/models/poolformer/poolformer_s36.py @@ -0,0 +1,22 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PoolFormer', + arch='s36', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/regnet/regnetx_1.6gf.py b/configs/_base_/models/regnet/regnetx_1.6gf.py new file mode 100644 index 0000000..b81f0ad --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_1.6gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_1.6gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=912, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_12gf.py b/configs/_base_/models/regnet/regnetx_12gf.py new file mode 100644 index 0000000..383d4f8 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_12gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_12gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2240, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_3.2gf.py b/configs/_base_/models/regnet/regnetx_3.2gf.py new file mode 100644 index 0000000..67d4541 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_3.2gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_3.2gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1008, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_4.0gf.py b/configs/_base_/models/regnet/regnetx_4.0gf.py new file mode 100644 index 0000000..01419c6 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_4.0gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_4.0gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1360, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_400mf.py b/configs/_base_/models/regnet/regnetx_400mf.py new file mode 100644 index 0000000..ef518b9 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_400mf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_400mf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_6.4gf.py b/configs/_base_/models/regnet/regnetx_6.4gf.py new file mode 100644 index 0000000..44e6222 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_6.4gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_6.4gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1624, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_8.0gf.py b/configs/_base_/models/regnet/regnetx_8.0gf.py new file mode 100644 index 0000000..2929826 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_8.0gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_8.0gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1920, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_800mf.py b/configs/_base_/models/regnet/regnetx_800mf.py new file mode 100644 index 0000000..210f760 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_800mf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_800mf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=672, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/repmlp-base_224.py b/configs/_base_/models/repmlp-base_224.py new file mode 100644 index 0000000..7db0077 --- /dev/null +++ b/configs/_base_/models/repmlp-base_224.py @@ -0,0 +1,18 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RepMLPNet', + arch='B', + img_size=224, + out_indices=(3, ), + reparam_conv_kernels=(1, 3), + deploy=False), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/repvgg-A0_in1k.py b/configs/_base_/models/repvgg-A0_in1k.py new file mode 100644 index 0000000..093ffb7 --- /dev/null +++ b/configs/_base_/models/repvgg-A0_in1k.py @@ -0,0 +1,15 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='RepVGG', + arch='A0', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py b/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py new file mode 100644 index 0000000..5bb07db --- /dev/null +++ b/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py @@ -0,0 +1,23 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='RepVGG', + arch='B3', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2560, + loss=dict( + type='LabelSmoothLoss', + loss_weight=1.0, + label_smooth_val=0.1, + mode='classy_vision', + num_classes=1000), + topk=(1, 5), + ), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000, + prob=1.))) diff --git a/configs/_base_/models/res2net101-w26-s4.py b/configs/_base_/models/res2net101-w26-s4.py new file mode 100644 index 0000000..3bf64c5 --- /dev/null +++ b/configs/_base_/models/res2net101-w26-s4.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/res2net50-w14-s8.py b/configs/_base_/models/res2net50-w14-s8.py new file mode 100644 index 0000000..5875142 --- /dev/null +++ b/configs/_base_/models/res2net50-w14-s8.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=8, + base_width=14, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/res2net50-w26-s4.py b/configs/_base_/models/res2net50-w26-s4.py new file mode 100644 index 0000000..be8fdb5 --- /dev/null +++ b/configs/_base_/models/res2net50-w26-s4.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=4, + base_width=26, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/res2net50-w26-s6.py b/configs/_base_/models/res2net50-w26-s6.py new file mode 100644 index 0000000..281b136 --- /dev/null +++ b/configs/_base_/models/res2net50-w26-s6.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=6, + base_width=26, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/res2net50-w26-s8.py b/configs/_base_/models/res2net50-w26-s8.py new file mode 100644 index 0000000..b4f62f3 --- /dev/null +++ b/configs/_base_/models/res2net50-w26-s8.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=8, + base_width=26, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/res2net50-w48-s2.py b/configs/_base_/models/res2net50-w48-s2.py new file mode 100644 index 0000000..8675c91 --- /dev/null +++ b/configs/_base_/models/res2net50-w48-s2.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=2, + base_width=48, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnest101.py b/configs/_base_/models/resnest101.py new file mode 100644 index 0000000..97f7749 --- /dev/null +++ b/configs/_base_/models/resnest101.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeSt', + depth=101, + num_stages=4, + stem_channels=128, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + num_classes=1000, + reduction='mean', + loss_weight=1.0), + topk=(1, 5), + cal_acc=False)) +train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000)) diff --git a/configs/_base_/models/resnest200.py b/configs/_base_/models/resnest200.py new file mode 100644 index 0000000..4610017 --- /dev/null +++ b/configs/_base_/models/resnest200.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeSt', + depth=200, + num_stages=4, + stem_channels=128, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + num_classes=1000, + reduction='mean', + loss_weight=1.0), + topk=(1, 5), + cal_acc=False)) +train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000)) diff --git a/configs/_base_/models/resnest269.py b/configs/_base_/models/resnest269.py new file mode 100644 index 0000000..ad365d0 --- /dev/null +++ b/configs/_base_/models/resnest269.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeSt', + depth=269, + num_stages=4, + stem_channels=128, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + num_classes=1000, + reduction='mean', + loss_weight=1.0), + topk=(1, 5), + cal_acc=False)) +train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000)) diff --git a/configs/_base_/models/resnest50.py b/configs/_base_/models/resnest50.py new file mode 100644 index 0000000..15269d4 --- /dev/null +++ b/configs/_base_/models/resnest50.py @@ -0,0 +1,23 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeSt', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + num_classes=1000, + reduction='mean', + loss_weight=1.0), + topk=(1, 5), + cal_acc=False)) +train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000)) diff --git a/configs/_base_/models/resnet101.py b/configs/_base_/models/resnet101.py new file mode 100644 index 0000000..1147cd4 --- /dev/null +++ b/configs/_base_/models/resnet101.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet101_cifar.py b/configs/_base_/models/resnet101_cifar.py new file mode 100644 index 0000000..a84d470 --- /dev/null +++ b/configs/_base_/models/resnet101_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/resnet152.py b/configs/_base_/models/resnet152.py new file mode 100644 index 0000000..94a718c --- /dev/null +++ b/configs/_base_/models/resnet152.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=152, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet152_cifar.py b/configs/_base_/models/resnet152_cifar.py new file mode 100644 index 0000000..55c0cc6 --- /dev/null +++ b/configs/_base_/models/resnet152_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=152, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/resnet18.py b/configs/_base_/models/resnet18.py new file mode 100644 index 0000000..7c66758 --- /dev/null +++ b/configs/_base_/models/resnet18.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=18, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet18_cifar.py b/configs/_base_/models/resnet18_cifar.py new file mode 100644 index 0000000..7b9cf1e --- /dev/null +++ b/configs/_base_/models/resnet18_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=18, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/resnet34.py b/configs/_base_/models/resnet34.py new file mode 100644 index 0000000..100ee28 --- /dev/null +++ b/configs/_base_/models/resnet34.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=34, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet34_cifar.py b/configs/_base_/models/resnet34_cifar.py new file mode 100644 index 0000000..55d033b --- /dev/null +++ b/configs/_base_/models/resnet34_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=34, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/resnet34_gem.py b/configs/_base_/models/resnet34_gem.py new file mode 100644 index 0000000..5c0e0d3 --- /dev/null +++ b/configs/_base_/models/resnet34_gem.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=34, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GeneralizedMeanPooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet50.py b/configs/_base_/models/resnet50.py new file mode 100644 index 0000000..129a2bb --- /dev/null +++ b/configs/_base_/models/resnet50.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet50_cifar.py b/configs/_base_/models/resnet50_cifar.py new file mode 100644 index 0000000..33b66d5 --- /dev/null +++ b/configs/_base_/models/resnet50_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/resnet50_cifar_cutmix.py b/configs/_base_/models/resnet50_cifar_cutmix.py new file mode 100644 index 0000000..73c38be --- /dev/null +++ b/configs/_base_/models/resnet50_cifar_cutmix.py @@ -0,0 +1,18 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)), + train_cfg=dict( + augments=dict(type='BatchCutMix', alpha=1.0, num_classes=10, + prob=1.0))) diff --git a/configs/_base_/models/resnet50_cifar_mixup.py b/configs/_base_/models/resnet50_cifar_mixup.py new file mode 100644 index 0000000..3de14f3 --- /dev/null +++ b/configs/_base_/models/resnet50_cifar_mixup.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=1., num_classes=10, prob=1.))) diff --git a/configs/_base_/models/resnet50_cutmix.py b/configs/_base_/models/resnet50_cutmix.py new file mode 100644 index 0000000..fb79088 --- /dev/null +++ b/configs/_base_/models/resnet50_cutmix.py @@ -0,0 +1,18 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)), + train_cfg=dict( + augments=dict( + type='BatchCutMix', alpha=1.0, num_classes=1000, prob=1.0))) diff --git a/configs/_base_/models/resnet50_label_smooth.py b/configs/_base_/models/resnet50_label_smooth.py new file mode 100644 index 0000000..b6f7937 --- /dev/null +++ b/configs/_base_/models/resnet50_label_smooth.py @@ -0,0 +1,18 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet50_mixup.py b/configs/_base_/models/resnet50_mixup.py new file mode 100644 index 0000000..8ff9522 --- /dev/null +++ b/configs/_base_/models/resnet50_mixup.py @@ -0,0 +1,18 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000, + prob=1.))) diff --git a/configs/_base_/models/resnetv1c50.py b/configs/_base_/models/resnetv1c50.py new file mode 100644 index 0000000..3b973e2 --- /dev/null +++ b/configs/_base_/models/resnetv1c50.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnetv1d101.py b/configs/_base_/models/resnetv1d101.py new file mode 100644 index 0000000..1e56223 --- /dev/null +++ b/configs/_base_/models/resnetv1d101.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNetV1d', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnetv1d152.py b/configs/_base_/models/resnetv1d152.py new file mode 100644 index 0000000..58cc73b --- /dev/null +++ b/configs/_base_/models/resnetv1d152.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNetV1d', + depth=152, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnetv1d50.py b/configs/_base_/models/resnetv1d50.py new file mode 100644 index 0000000..015aaa3 --- /dev/null +++ b/configs/_base_/models/resnetv1d50.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNetV1d', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnext101_32x4d.py b/configs/_base_/models/resnext101_32x4d.py new file mode 100644 index 0000000..1c89fb6 --- /dev/null +++ b/configs/_base_/models/resnext101_32x4d.py @@ -0,0 +1,19 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeXt', + depth=101, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnext101_32x8d.py b/configs/_base_/models/resnext101_32x8d.py new file mode 100644 index 0000000..2bb63f3 --- /dev/null +++ b/configs/_base_/models/resnext101_32x8d.py @@ -0,0 +1,19 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeXt', + depth=101, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=8, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnext152_32x4d.py b/configs/_base_/models/resnext152_32x4d.py new file mode 100644 index 0000000..d392eff --- /dev/null +++ b/configs/_base_/models/resnext152_32x4d.py @@ -0,0 +1,19 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeXt', + depth=152, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnext50_32x4d.py b/configs/_base_/models/resnext50_32x4d.py new file mode 100644 index 0000000..0604262 --- /dev/null +++ b/configs/_base_/models/resnext50_32x4d.py @@ -0,0 +1,19 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeXt', + depth=50, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/seresnet101.py b/configs/_base_/models/seresnet101.py new file mode 100644 index 0000000..137a6f9 --- /dev/null +++ b/configs/_base_/models/seresnet101.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SEResNet', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/seresnet50.py b/configs/_base_/models/seresnet50.py new file mode 100644 index 0000000..e5f6bfc --- /dev/null +++ b/configs/_base_/models/seresnet50.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SEResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/seresnext101_32x4d.py b/configs/_base_/models/seresnext101_32x4d.py new file mode 100644 index 0000000..cc8a62c --- /dev/null +++ b/configs/_base_/models/seresnext101_32x4d.py @@ -0,0 +1,20 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SEResNeXt', + depth=101, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + se_ratio=16, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/seresnext50_32x4d.py b/configs/_base_/models/seresnext50_32x4d.py new file mode 100644 index 0000000..0cdf7cb --- /dev/null +++ b/configs/_base_/models/seresnext50_32x4d.py @@ -0,0 +1,20 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SEResNeXt', + depth=50, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + se_ratio=16, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/shufflenet_v1_1x.py b/configs/_base_/models/shufflenet_v1_1x.py new file mode 100644 index 0000000..f0f9d1f --- /dev/null +++ b/configs/_base_/models/shufflenet_v1_1x.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ShuffleNetV1', groups=3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=960, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/shufflenet_v2_1x.py b/configs/_base_/models/shufflenet_v2_1x.py new file mode 100644 index 0000000..190800e --- /dev/null +++ b/configs/_base_/models/shufflenet_v2_1x.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ShuffleNetV2', widen_factor=1.0), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/swin_transformer/base_224.py b/configs/_base_/models/swin_transformer/base_224.py new file mode 100644 index 0000000..e16b4e6 --- /dev/null +++ b/configs/_base_/models/swin_transformer/base_224.py @@ -0,0 +1,22 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', arch='base', img_size=224, drop_path_rate=0.5), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/swin_transformer/base_384.py b/configs/_base_/models/swin_transformer/base_384.py new file mode 100644 index 0000000..ce78981 --- /dev/null +++ b/configs/_base_/models/swin_transformer/base_384.py @@ -0,0 +1,16 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', + arch='base', + img_size=384, + stage_cfgs=dict(block_cfgs=dict(window_size=12))), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/configs/_base_/models/swin_transformer/large_224.py b/configs/_base_/models/swin_transformer/large_224.py new file mode 100644 index 0000000..747d00e --- /dev/null +++ b/configs/_base_/models/swin_transformer/large_224.py @@ -0,0 +1,12 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict(type='SwinTransformer', arch='large', img_size=224), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/configs/_base_/models/swin_transformer/large_384.py b/configs/_base_/models/swin_transformer/large_384.py new file mode 100644 index 0000000..7026f81 --- /dev/null +++ b/configs/_base_/models/swin_transformer/large_384.py @@ -0,0 +1,16 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', + arch='large', + img_size=384, + stage_cfgs=dict(block_cfgs=dict(window_size=12))), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/configs/_base_/models/swin_transformer/small_224.py b/configs/_base_/models/swin_transformer/small_224.py new file mode 100644 index 0000000..7873986 --- /dev/null +++ b/configs/_base_/models/swin_transformer/small_224.py @@ -0,0 +1,23 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', arch='small', img_size=224, + drop_path_rate=0.3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/swin_transformer/tiny_224.py b/configs/_base_/models/swin_transformer/tiny_224.py new file mode 100644 index 0000000..2d68d66 --- /dev/null +++ b/configs/_base_/models/swin_transformer/tiny_224.py @@ -0,0 +1,22 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', arch='tiny', img_size=224, drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/swin_transformer_v2/base_256.py b/configs/_base_/models/swin_transformer_v2/base_256.py new file mode 100644 index 0000000..f711a9c --- /dev/null +++ b/configs/_base_/models/swin_transformer_v2/base_256.py @@ -0,0 +1,25 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformerV2', + arch='base', + img_size=256, + drop_path_rate=0.5), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/swin_transformer_v2/base_384.py b/configs/_base_/models/swin_transformer_v2/base_384.py new file mode 100644 index 0000000..5fb9aea --- /dev/null +++ b/configs/_base_/models/swin_transformer_v2/base_384.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformerV2', + arch='base', + img_size=384, + drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False)) diff --git a/configs/_base_/models/swin_transformer_v2/large_256.py b/configs/_base_/models/swin_transformer_v2/large_256.py new file mode 100644 index 0000000..fe557c3 --- /dev/null +++ b/configs/_base_/models/swin_transformer_v2/large_256.py @@ -0,0 +1,16 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformerV2', + arch='large', + img_size=256, + drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/configs/_base_/models/swin_transformer_v2/large_384.py b/configs/_base_/models/swin_transformer_v2/large_384.py new file mode 100644 index 0000000..a626c40 --- /dev/null +++ b/configs/_base_/models/swin_transformer_v2/large_384.py @@ -0,0 +1,16 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformerV2', + arch='large', + img_size=384, + drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/configs/_base_/models/swin_transformer_v2/small_256.py b/configs/_base_/models/swin_transformer_v2/small_256.py new file mode 100644 index 0000000..8808f09 --- /dev/null +++ b/configs/_base_/models/swin_transformer_v2/small_256.py @@ -0,0 +1,25 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformerV2', + arch='small', + img_size=256, + drop_path_rate=0.3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/swin_transformer_v2/tiny_256.py b/configs/_base_/models/swin_transformer_v2/tiny_256.py new file mode 100644 index 0000000..d40e394 --- /dev/null +++ b/configs/_base_/models/swin_transformer_v2/tiny_256.py @@ -0,0 +1,25 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformerV2', + arch='tiny', + img_size=256, + drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/t2t-vit-t-14.py b/configs/_base_/models/t2t-vit-t-14.py new file mode 100644 index 0000000..91dbb67 --- /dev/null +++ b/configs/_base_/models/t2t-vit-t-14.py @@ -0,0 +1,41 @@ +# model settings +embed_dims = 384 +num_classes = 1000 + +model = dict( + type='ImageClassifier', + backbone=dict( + type='T2T_ViT', + img_size=224, + in_channels=3, + embed_dims=embed_dims, + t2t_cfg=dict( + token_dims=64, + use_performer=False, + ), + num_layers=14, + layer_cfgs=dict( + num_heads=6, + feedforward_channels=3 * embed_dims, # mlp_ratio = 3 + ), + drop_path_rate=0.1, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=num_classes, + in_channels=embed_dims, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)), + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes), + dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes), + ])) diff --git a/configs/_base_/models/t2t-vit-t-19.py b/configs/_base_/models/t2t-vit-t-19.py new file mode 100644 index 0000000..8ab139d --- /dev/null +++ b/configs/_base_/models/t2t-vit-t-19.py @@ -0,0 +1,41 @@ +# model settings +embed_dims = 448 +num_classes = 1000 + +model = dict( + type='ImageClassifier', + backbone=dict( + type='T2T_ViT', + img_size=224, + in_channels=3, + embed_dims=embed_dims, + t2t_cfg=dict( + token_dims=64, + use_performer=False, + ), + num_layers=19, + layer_cfgs=dict( + num_heads=7, + feedforward_channels=3 * embed_dims, # mlp_ratio = 3 + ), + drop_path_rate=0.1, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=num_classes, + in_channels=embed_dims, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)), + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes), + dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes), + ])) diff --git a/configs/_base_/models/t2t-vit-t-24.py b/configs/_base_/models/t2t-vit-t-24.py new file mode 100644 index 0000000..5990960 --- /dev/null +++ b/configs/_base_/models/t2t-vit-t-24.py @@ -0,0 +1,41 @@ +# model settings +embed_dims = 512 +num_classes = 1000 + +model = dict( + type='ImageClassifier', + backbone=dict( + type='T2T_ViT', + img_size=224, + in_channels=3, + embed_dims=embed_dims, + t2t_cfg=dict( + token_dims=64, + use_performer=False, + ), + num_layers=24, + layer_cfgs=dict( + num_heads=8, + feedforward_channels=3 * embed_dims, # mlp_ratio = 3 + ), + drop_path_rate=0.1, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=num_classes, + in_channels=embed_dims, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)), + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes), + dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes), + ])) diff --git a/configs/_base_/models/tnt_s_patch16_224.py b/configs/_base_/models/tnt_s_patch16_224.py new file mode 100644 index 0000000..5e13d07 --- /dev/null +++ b/configs/_base_/models/tnt_s_patch16_224.py @@ -0,0 +1,29 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='TNT', + arch='s', + img_size=224, + patch_size=16, + in_channels=3, + ffn_ratio=4, + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + first_stride=4, + num_fcs=2, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ]), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + topk=(1, 5), + init_cfg=dict(type='TruncNormal', layer='Linear', std=.02))) diff --git a/configs/_base_/models/twins_pcpvt_base.py b/configs/_base_/models/twins_pcpvt_base.py new file mode 100644 index 0000000..473d7ee --- /dev/null +++ b/configs/_base_/models/twins_pcpvt_base.py @@ -0,0 +1,30 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PCPVT', + arch='base', + in_channels=3, + out_indices=(3, ), + qkv_bias=True, + norm_cfg=dict(type='LN', eps=1e-06), + norm_after_stage=[False, False, False, True], + drop_rate=0.0, + attn_drop_rate=0., + drop_path_rate=0.3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/twins_svt_base.py b/configs/_base_/models/twins_svt_base.py new file mode 100644 index 0000000..cabd373 --- /dev/null +++ b/configs/_base_/models/twins_svt_base.py @@ -0,0 +1,30 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SVT', + arch='base', + in_channels=3, + out_indices=(3, ), + qkv_bias=True, + norm_cfg=dict(type='LN'), + norm_after_stage=[False, False, False, True], + drop_rate=0.0, + attn_drop_rate=0., + drop_path_rate=0.3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/van/van_b0.py b/configs/_base_/models/van/van_b0.py new file mode 100644 index 0000000..5fa977e --- /dev/null +++ b/configs/_base_/models/van/van_b0.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VAN', arch='b0', drop_path_rate=0.1), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=256, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/van/van_b1.py b/configs/_base_/models/van/van_b1.py new file mode 100644 index 0000000..a27a50b --- /dev/null +++ b/configs/_base_/models/van/van_b1.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VAN', arch='b1', drop_path_rate=0.1), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/configs/_base_/models/van/van_b2.py b/configs/_base_/models/van/van_b2.py new file mode 100644 index 0000000..41b0484 --- /dev/null +++ b/configs/_base_/models/van/van_b2.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VAN', arch='b2', drop_path_rate=0.1), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False)) diff --git a/configs/_base_/models/van/van_b3.py b/configs/_base_/models/van/van_b3.py new file mode 100644 index 0000000..d32b12c --- /dev/null +++ b/configs/_base_/models/van/van_b3.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VAN', arch='b3', drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False)) diff --git a/configs/_base_/models/van/van_b4.py b/configs/_base_/models/van/van_b4.py new file mode 100644 index 0000000..417835c --- /dev/null +++ b/configs/_base_/models/van/van_b4.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VAN', arch='b4', drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False)) diff --git a/configs/_base_/models/van/van_b5.py b/configs/_base_/models/van/van_b5.py new file mode 100644 index 0000000..fe8b923 --- /dev/null +++ b/configs/_base_/models/van/van_b5.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VAN', arch='b5', drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False)) diff --git a/configs/_base_/models/van/van_b6.py b/configs/_base_/models/van/van_b6.py new file mode 100644 index 0000000..a0dfb3c --- /dev/null +++ b/configs/_base_/models/van/van_b6.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VAN', arch='b6', drop_path_rate=0.3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False)) diff --git a/configs/_base_/models/van/van_base.py b/configs/_base_/models/van/van_base.py new file mode 100644 index 0000000..5c2bcf0 --- /dev/null +++ b/configs/_base_/models/van/van_base.py @@ -0,0 +1 @@ +_base_ = ['./van-b2.py'] diff --git a/configs/_base_/models/van/van_large.py b/configs/_base_/models/van/van_large.py new file mode 100644 index 0000000..bc9536c --- /dev/null +++ b/configs/_base_/models/van/van_large.py @@ -0,0 +1 @@ +_base_ = ['./van-b3.py'] diff --git a/configs/_base_/models/van/van_small.py b/configs/_base_/models/van/van_small.py new file mode 100644 index 0000000..3973c22 --- /dev/null +++ b/configs/_base_/models/van/van_small.py @@ -0,0 +1 @@ +_base_ = ['./van-b1.py'] diff --git a/configs/_base_/models/van/van_tiny.py b/configs/_base_/models/van/van_tiny.py new file mode 100644 index 0000000..ace9ebb --- /dev/null +++ b/configs/_base_/models/van/van_tiny.py @@ -0,0 +1 @@ +_base_ = ['./van-b0.py'] diff --git a/configs/_base_/models/vgg11.py b/configs/_base_/models/vgg11.py new file mode 100644 index 0000000..2b6ee14 --- /dev/null +++ b/configs/_base_/models/vgg11.py @@ -0,0 +1,10 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=11, num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg11bn.py b/configs/_base_/models/vgg11bn.py new file mode 100644 index 0000000..cb4c64e --- /dev/null +++ b/configs/_base_/models/vgg11bn.py @@ -0,0 +1,11 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VGG', depth=11, norm_cfg=dict(type='BN'), num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg13.py b/configs/_base_/models/vgg13.py new file mode 100644 index 0000000..a938910 --- /dev/null +++ b/configs/_base_/models/vgg13.py @@ -0,0 +1,10 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=13, num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg13bn.py b/configs/_base_/models/vgg13bn.py new file mode 100644 index 0000000..b12173b --- /dev/null +++ b/configs/_base_/models/vgg13bn.py @@ -0,0 +1,11 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VGG', depth=13, norm_cfg=dict(type='BN'), num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg16.py b/configs/_base_/models/vgg16.py new file mode 100644 index 0000000..93ce864 --- /dev/null +++ b/configs/_base_/models/vgg16.py @@ -0,0 +1,10 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=16, num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg16bn.py b/configs/_base_/models/vgg16bn.py new file mode 100644 index 0000000..765e34f --- /dev/null +++ b/configs/_base_/models/vgg16bn.py @@ -0,0 +1,11 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VGG', depth=16, norm_cfg=dict(type='BN'), num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg19.py b/configs/_base_/models/vgg19.py new file mode 100644 index 0000000..6f4ab06 --- /dev/null +++ b/configs/_base_/models/vgg19.py @@ -0,0 +1,10 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=19, num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg19bn.py b/configs/_base_/models/vgg19bn.py new file mode 100644 index 0000000..c468b5d --- /dev/null +++ b/configs/_base_/models/vgg19bn.py @@ -0,0 +1,11 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VGG', depth=19, norm_cfg=dict(type='BN'), num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vit-base-p16.py b/configs/_base_/models/vit-base-p16.py new file mode 100644 index 0000000..bb42bed --- /dev/null +++ b/configs/_base_/models/vit-base-p16.py @@ -0,0 +1,25 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='b', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, + mode='classy_vision'), + )) diff --git a/configs/_base_/models/vit-base-p32.py b/configs/_base_/models/vit-base-p32.py new file mode 100644 index 0000000..ad550ef --- /dev/null +++ b/configs/_base_/models/vit-base-p32.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='b', + img_size=224, + patch_size=32, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vit-large-p16.py b/configs/_base_/models/vit-large-p16.py new file mode 100644 index 0000000..9716230 --- /dev/null +++ b/configs/_base_/models/vit-large-p16.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='l', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vit-large-p32.py b/configs/_base_/models/vit-large-p32.py new file mode 100644 index 0000000..f9491bb --- /dev/null +++ b/configs/_base_/models/vit-large-p32.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='l', + img_size=224, + patch_size=32, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/wide-resnet50.py b/configs/_base_/models/wide-resnet50.py new file mode 100644 index 0000000..a2913b9 --- /dev/null +++ b/configs/_base_/models/wide-resnet50.py @@ -0,0 +1,20 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + stem_channels=64, + base_channels=128, + expansion=2, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/schedules/cifar10_bs128.py b/configs/_base_/schedules/cifar10_bs128.py new file mode 100644 index 0000000..f134dbc --- /dev/null +++ b/configs/_base_/schedules/cifar10_bs128.py @@ -0,0 +1,6 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[100, 150]) +runner = dict(type='EpochBasedRunner', max_epochs=200) diff --git a/configs/_base_/schedules/cub_bs64.py b/configs/_base_/schedules/cub_bs64.py new file mode 100644 index 0000000..93cce6a --- /dev/null +++ b/configs/_base_/schedules/cub_bs64.py @@ -0,0 +1,13 @@ +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, nesterov=True) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=5, + warmup_ratio=0.01, + warmup_by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=100) diff --git a/configs/_base_/schedules/imagenet_bs1024_adamw_conformer.py b/configs/_base_/schedules/imagenet_bs1024_adamw_conformer.py new file mode 100644 index 0000000..92f1801 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs1024_adamw_conformer.py @@ -0,0 +1,29 @@ +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + }) + +# for batch in each gpu is 128, 8 gpu +# lr = 5e-4 * 128 * 8 / 512 = 0.001 +optimizer = dict( + type='AdamW', + lr=5e-4 * 128 * 8 / 512, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999), + paramwise_cfg=paramwise_cfg) +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + by_epoch=False, + min_lr_ratio=1e-2, + warmup='linear', + warmup_ratio=1e-3, + warmup_iters=5 * 1252, + warmup_by_epoch=False) + +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py b/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py new file mode 100644 index 0000000..2ad035c --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py @@ -0,0 +1,30 @@ +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0) + }) + +# for batch in each gpu is 128, 8 gpu +# lr = 5e-4 * 128 * 8 / 512 = 0.001 +optimizer = dict( + type='AdamW', + lr=5e-4 * 1024 / 512, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999), + paramwise_cfg=paramwise_cfg) +optimizer_config = dict(grad_clip=dict(max_norm=5.0)) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + by_epoch=False, + min_lr_ratio=1e-2, + warmup='linear', + warmup_ratio=1e-3, + warmup_iters=20, + warmup_by_epoch=True) + +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/configs/_base_/schedules/imagenet_bs1024_coslr.py b/configs/_base_/schedules/imagenet_bs1024_coslr.py new file mode 100644 index 0000000..ee84e7a --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs1024_coslr.py @@ -0,0 +1,12 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.8, momentum=0.9, weight_decay=5e-5) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=5, + warmup_ratio=0.1, + warmup_by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=100) diff --git a/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py b/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py new file mode 100644 index 0000000..99fbdda --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py @@ -0,0 +1,17 @@ +# optimizer +optimizer = dict( + type='SGD', + lr=0.5, + momentum=0.9, + weight_decay=0.00004, + paramwise_cfg=dict(norm_decay_mult=0)) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='poly', + min_lr=0, + by_epoch=False, + warmup='constant', + warmup_iters=5000, +) +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/configs/_base_/schedules/imagenet_bs2048.py b/configs/_base_/schedules/imagenet_bs2048.py new file mode 100644 index 0000000..93fdebf --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs2048.py @@ -0,0 +1,12 @@ +# optimizer +optimizer = dict( + type='SGD', lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=2500, + warmup_ratio=0.25, + step=[30, 60, 90]) +runner = dict(type='EpochBasedRunner', max_epochs=100) diff --git a/configs/_base_/schedules/imagenet_bs2048_AdamW.py b/configs/_base_/schedules/imagenet_bs2048_AdamW.py new file mode 100644 index 0000000..6d4f208 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs2048_AdamW.py @@ -0,0 +1,20 @@ +# optimizer +# In ClassyVision, the lr is set to 0.003 for bs4096. +# In this implementation(bs2048), lr = 0.003 / 4096 * (32bs * 64gpus) = 0.0015 +optimizer = dict(type='AdamW', lr=0.0015, weight_decay=0.3) +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) + +# specific to vit pretrain +paramwise_cfg = dict( + custom_keys={ + '.backbone.cls_token': dict(decay_mult=0.0), + '.backbone.pos_embed': dict(decay_mult=0.0) + }) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=10000, + warmup_ratio=1e-4) +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/configs/_base_/schedules/imagenet_bs2048_coslr.py b/configs/_base_/schedules/imagenet_bs2048_coslr.py new file mode 100644 index 0000000..b9e77f2 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs2048_coslr.py @@ -0,0 +1,12 @@ +# optimizer +optimizer = dict( + type='SGD', lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=2500, + warmup_ratio=0.25) +runner = dict(type='EpochBasedRunner', max_epochs=100) diff --git a/configs/_base_/schedules/imagenet_bs2048_rsb.py b/configs/_base_/schedules/imagenet_bs2048_rsb.py new file mode 100644 index 0000000..e021cb0 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs2048_rsb.py @@ -0,0 +1,12 @@ +# optimizer +optimizer = dict(type='Lamb', lr=0.005, weight_decay=0.02) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=1.0e-6, + warmup='linear', + # For ImageNet-1k, 626 iters per epoch, warmup 5 epochs. + warmup_iters=5 * 626, + warmup_ratio=0.0001) +runner = dict(type='EpochBasedRunner', max_epochs=100) diff --git a/configs/_base_/schedules/imagenet_bs256.py b/configs/_base_/schedules/imagenet_bs256.py new file mode 100644 index 0000000..3b5d198 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs256.py @@ -0,0 +1,6 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[30, 60, 90]) +runner = dict(type='EpochBasedRunner', max_epochs=100) diff --git a/configs/_base_/schedules/imagenet_bs256_140e.py b/configs/_base_/schedules/imagenet_bs256_140e.py new file mode 100644 index 0000000..caba157 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs256_140e.py @@ -0,0 +1,6 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[40, 80, 120]) +runner = dict(type='EpochBasedRunner', max_epochs=140) diff --git a/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py b/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py new file mode 100644 index 0000000..49456b2 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py @@ -0,0 +1,11 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=25025, + warmup_ratio=0.25) +runner = dict(type='EpochBasedRunner', max_epochs=200) diff --git a/configs/_base_/schedules/imagenet_bs256_coslr.py b/configs/_base_/schedules/imagenet_bs256_coslr.py new file mode 100644 index 0000000..779b479 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs256_coslr.py @@ -0,0 +1,6 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +runner = dict(type='EpochBasedRunner', max_epochs=100) diff --git a/configs/_base_/schedules/imagenet_bs256_epochstep.py b/configs/_base_/schedules/imagenet_bs256_epochstep.py new file mode 100644 index 0000000..2347a04 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs256_epochstep.py @@ -0,0 +1,6 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.045, momentum=0.9, weight_decay=0.00004) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', gamma=0.98, step=1) +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/configs/_base_/schedules/imagenet_bs4096_AdamW.py b/configs/_base_/schedules/imagenet_bs4096_AdamW.py new file mode 100644 index 0000000..75b00d8 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs4096_AdamW.py @@ -0,0 +1,24 @@ +# specific to vit pretrain +paramwise_cfg = dict(custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) +}) + +# optimizer +optimizer = dict( + type='AdamW', + lr=0.003, + weight_decay=0.3, + paramwise_cfg=paramwise_cfg, +) +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=10000, + warmup_ratio=1e-4, +) +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/configs/_base_/schedules/stanford_cars_bs8.py b/configs/_base_/schedules/stanford_cars_bs8.py new file mode 100644 index 0000000..dee252e --- /dev/null +++ b/configs/_base_/schedules/stanford_cars_bs8.py @@ -0,0 +1,7 @@ +# optimizer +optimizer = dict( + type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005, nesterov=True) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[40, 70, 90]) +runner = dict(type='EpochBasedRunner', max_epochs=100) diff --git a/configs/conformer/README.md b/configs/conformer/README.md new file mode 100644 index 0000000..5b7d96b --- /dev/null +++ b/configs/conformer/README.md @@ -0,0 +1,37 @@ +# Conformer + +> [Conformer: Local Features Coupling Global Representations for Visual Recognition](https://arxiv.org/abs/2105.03889) + + + +## Abstract + +Within Convolutional Neural Network (CNN), the convolution operations are good at extracting local features but experience difficulty to capture global representations. Within visual transformer, the cascaded self-attention modules can capture long-distance feature dependencies but unfortunately deteriorate local feature details. In this paper, we propose a hybrid network structure, termed Conformer, to take advantage of convolutional operations and self-attention mechanisms for enhanced representation learning. Conformer roots in the Feature Coupling Unit (FCU), which fuses local features and global representations under different resolutions in an interactive fashion. Conformer adopts a concurrent structure so that local features and global representations are retained to the maximum extent. Experiments show that Conformer, under the comparable parameter complexity, outperforms the visual transformer (DeiT-B) by 2.3% on ImageNet. On MSCOCO, it outperforms ResNet-101 by 3.7% and 3.6% mAPs for object detection and instance segmentation, respectively, demonstrating the great potential to be a general backbone network. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------: | :-----------------------------------------------------------------------: | +| Conformer-tiny-p16\* | 23.52 | 4.90 | 81.31 | 95.60 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/conformer/conformer-tiny-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-tiny-p16_3rdparty_8xb128_in1k_20211206-f6860372.pth) | +| Conformer-small-p32\* | 38.85 | 7.09 | 81.96 | 96.02 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/conformer/conformer-small-p32_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p32_8xb128_in1k_20211206-947a0816.pth) | +| Conformer-small-p16\* | 37.67 | 10.31 | 83.32 | 96.46 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/conformer/conformer-small-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p16_3rdparty_8xb128_in1k_20211206-3065dcf5.pth) | +| Conformer-base-p16\* | 83.29 | 22.89 | 83.82 | 96.59 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/conformer/conformer-base-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-base-p16_3rdparty_8xb128_in1k_20211206-bfdf8637.pth) | + +*Models with * are converted from the [official repo](https://github.com/pengzhiliang/Conformer). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +``` +@article{peng2021conformer, + title={Conformer: Local Features Coupling Global Representations for Visual Recognition}, + author={Zhiliang Peng and Wei Huang and Shanzhi Gu and Lingxi Xie and Yaowei Wang and Jianbin Jiao and Qixiang Ye}, + journal={arXiv preprint arXiv:2105.03889}, + year={2021}, +} +``` diff --git a/configs/conformer/conformer-base-p16_8xb128_in1k.py b/configs/conformer/conformer-base-p16_8xb128_in1k.py new file mode 100644 index 0000000..29ed58b --- /dev/null +++ b/configs/conformer/conformer-base-p16_8xb128_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/conformer/base-p16.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_conformer.py', + '../_base_/default_runtime.py' +] + +data = dict(samples_per_gpu=128) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/conformer/conformer-small-p16_8xb128_in1k.py b/configs/conformer/conformer-small-p16_8xb128_in1k.py new file mode 100644 index 0000000..c40ed04 --- /dev/null +++ b/configs/conformer/conformer-small-p16_8xb128_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/conformer/small-p16.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_conformer.py', + '../_base_/default_runtime.py' +] + +data = dict(samples_per_gpu=128) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/conformer/conformer-small-p32_8xb128_in1k.py b/configs/conformer/conformer-small-p32_8xb128_in1k.py new file mode 100644 index 0000000..aaa1189 --- /dev/null +++ b/configs/conformer/conformer-small-p32_8xb128_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/conformer/small-p32.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_conformer.py', + '../_base_/default_runtime.py' +] + +data = dict(samples_per_gpu=128) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/conformer/conformer-tiny-p16_8xb128_in1k.py b/configs/conformer/conformer-tiny-p16_8xb128_in1k.py new file mode 100644 index 0000000..76a264c --- /dev/null +++ b/configs/conformer/conformer-tiny-p16_8xb128_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/conformer/tiny-p16.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_conformer.py', + '../_base_/default_runtime.py' +] + +data = dict(samples_per_gpu=128) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/conformer/metafile.yml b/configs/conformer/metafile.yml new file mode 100644 index 0000000..4efe05f --- /dev/null +++ b/configs/conformer/metafile.yml @@ -0,0 +1,78 @@ +Collections: + - Name: Conformer + Metadata: + Training Data: ImageNet-1k + Architecture: + - Layer Normalization + - Scaled Dot-Product Attention + - Dropout + Paper: + URL: https://arxiv.org/abs/2105.03889 + Title: "Conformer: Local Features Coupling Global Representations for Visual Recognition" + README: configs/conformer/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.19.0/mmcls/models/backbones/conformer.py + Version: v0.19.0 + +Models: + - Name: conformer-tiny-p16_3rdparty_8xb128_in1k + In Collection: Conformer + Config: configs/conformer/conformer-tiny-p16_8xb128_in1k.py + Metadata: + FLOPs: 4899611328 + Parameters: 23524704 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.31 + Top 5 Accuracy: 95.60 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/conformer/conformer-tiny-p16_3rdparty_8xb128_in1k_20211206-f6860372.pth + Converted From: + Weights: https://drive.google.com/file/d/19SxGhKcWOR5oQSxNUWUM2MGYiaWMrF1z/view?usp=sharing + Code: https://github.com/pengzhiliang/Conformer/blob/main/models.py#L65 + - Name: conformer-small-p16_3rdparty_8xb128_in1k + In Collection: Conformer + Config: configs/conformer/conformer-small-p16_8xb128_in1k.py + Metadata: + FLOPs: 10311309312 + Parameters: 37673424 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.32 + Top 5 Accuracy: 96.46 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p16_3rdparty_8xb128_in1k_20211206-3065dcf5.pth + Converted From: + Weights: https://drive.google.com/file/d/1mpOlbLaVxOfEwV4-ha78j_1Ebqzj2B83/view?usp=sharing + Code: https://github.com/pengzhiliang/Conformer/blob/main/models.py#L73 + - Name: conformer-small-p32_8xb128_in1k + In Collection: Conformer + Config: configs/conformer/conformer-small-p32_8xb128_in1k.py + Metadata: + FLOPs: 7087281792 + Parameters: 38853072 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.96 + Top 5 Accuracy: 96.02 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p32_8xb128_in1k_20211206-947a0816.pth + - Name: conformer-base-p16_3rdparty_8xb128_in1k + In Collection: Conformer + Config: configs/conformer/conformer-base-p16_8xb128_in1k.py + Metadata: + FLOPs: 22892078080 + Parameters: 83289136 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.82 + Top 5 Accuracy: 96.59 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/conformer/conformer-base-p16_3rdparty_8xb128_in1k_20211206-bfdf8637.pth + Converted From: + Weights: https://drive.google.com/file/d/1oeQ9LSOGKEUaYGu7WTlUGl3KDsQIi0MA/view?usp=sharing + Code: https://github.com/pengzhiliang/Conformer/blob/main/models.py#L89 diff --git a/configs/convmixer/README.md b/configs/convmixer/README.md new file mode 100644 index 0000000..763bad3 --- /dev/null +++ b/configs/convmixer/README.md @@ -0,0 +1,42 @@ +# ConvMixer + +> [Patches Are All You Need?](https://arxiv.org/abs/2201.09792) + + + +## Abstract + + + +Although convolutional networks have been the dominant architecture for vision tasks for many years, recent experiments have shown that Transformer-based models, most notably the Vision Transformer (ViT), may exceed their performance in some settings. However, due to the quadratic runtime of the self-attention layers in Transformers, ViTs require the use of patch embeddings, which group together small regions of the image into single input features, in order to be applied to larger image sizes. This raises a question: Is the performance of ViTs due to the inherently-more-powerful Transformer architecture, or is it at least partly due to using patches as the input representation? In this paper, we present some evidence for the latter: specifically, we propose the ConvMixer, an extremely simple model that is similar in spirit to the ViT and the even-more-basic MLP-Mixer in that it operates directly on patches as input, separates the mixing of spatial and channel dimensions, and maintains equal size and resolution throughout the network. In contrast, however, the ConvMixer uses only standard convolutions to achieve the mixing steps. Despite its simplicity, we show that the ConvMixer outperforms the ViT, MLP-Mixer, and some of their variants for similar parameter counts and data set sizes, in addition to outperforming classical vision models such as the ResNet. + + + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-----------------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------------: | :------------------------------------------------------------------------: | +| ConvMixer-768/32\* | 21.11 | 19.62 | 80.16 | 95.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convmixer/convmixer-768-32_10xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-768-32_3rdparty_10xb64_in1k_20220323-bca1f7b8.pth) | +| ConvMixer-1024/20\* | 24.38 | 5.55 | 76.94 | 93.36 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convmixer/convmixer-1024-20_10xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1024-20_3rdparty_10xb64_in1k_20220323-48f8aeba.pth) | +| ConvMixer-1536/20\* | 51.63 | 48.71 | 81.37 | 95.61 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convmixer/convmixer-1536-20_10xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1536_20_3rdparty_10xb64_in1k_20220323-ea5786f3.pth) | + +*Models with * are converted from the [official repo](https://github.com/locuslab/convmixer). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +```bibtex +@misc{trockman2022patches, + title={Patches Are All You Need?}, + author={Asher Trockman and J. Zico Kolter}, + year={2022}, + eprint={2201.09792}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/convmixer/convmixer-1024-20_10xb64_in1k.py b/configs/convmixer/convmixer-1024-20_10xb64_in1k.py new file mode 100644 index 0000000..58694d6 --- /dev/null +++ b/configs/convmixer/convmixer-1024-20_10xb64_in1k.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/convmixer/convmixer-1024-20.py', + '../_base_/datasets/imagenet_bs64_convmixer_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +optimizer = dict(lr=0.01) + +runner = dict(type='EpochBasedRunner', max_epochs=150) diff --git a/configs/convmixer/convmixer-1536-20_10xb64_in1k.py b/configs/convmixer/convmixer-1536-20_10xb64_in1k.py new file mode 100644 index 0000000..17a7559 --- /dev/null +++ b/configs/convmixer/convmixer-1536-20_10xb64_in1k.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/convmixer/convmixer-1536-20.py', + '../_base_/datasets/imagenet_bs64_convmixer_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +optimizer = dict(lr=0.01) + +runner = dict(type='EpochBasedRunner', max_epochs=150) diff --git a/configs/convmixer/convmixer-768-32_10xb64_in1k.py b/configs/convmixer/convmixer-768-32_10xb64_in1k.py new file mode 100644 index 0000000..fa4c060 --- /dev/null +++ b/configs/convmixer/convmixer-768-32_10xb64_in1k.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/convmixer/convmixer-768-32.py', + '../_base_/datasets/imagenet_bs64_convmixer_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +optimizer = dict(lr=0.01) + +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/configs/convmixer/metafile.yml b/configs/convmixer/metafile.yml new file mode 100644 index 0000000..7831d74 --- /dev/null +++ b/configs/convmixer/metafile.yml @@ -0,0 +1,61 @@ +Collections: + - Name: ConvMixer + Metadata: + Training Data: ImageNet-1k + Architecture: + - 1x1 Convolution + - LayerScale + Paper: + URL: https://arxiv.org/abs/2201.09792 + Title: Patches Are All You Need? + README: configs/convmixer/README.md + +Models: + - Name: convmixer-768-32_10xb64_in1k + Metadata: + FLOPs: 19623051264 + Parameters: 21110248 + In Collections: ConvMixer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.16 + Top 5 Accuracy: 95.08 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-768-32_3rdparty_10xb64_in1k_20220323-bca1f7b8.pth + Config: configs/convmixer/convmixer-768-32_10xb64_in1k.py + Converted From: + Weights: https://github.com/tmp-iclr/convmixer/releases/download/v1.0/convmixer_768_32_ks7_p7_relu.pth.tar + Code: https://github.com/locuslab/convmixer + - Name: convmixer-1024-20_10xb64_in1k + Metadata: + FLOPs: 5550112768 + Parameters: 24383464 + In Collections: ConvMixer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.94 + Top 5 Accuracy: 93.36 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1024-20_3rdparty_10xb64_in1k_20220323-48f8aeba.pth + Config: configs/convmixer/convmixer-1024-20_10xb64_in1k.py + Converted From: + Weights: https://github.com/tmp-iclr/convmixer/releases/download/v1.0/convmixer_1024_20_ks9_p14.pth.tar + Code: https://github.com/locuslab/convmixer + - Name: convmixer-1536-20_10xb64_in1k + Metadata: + FLOPs: 48713170944 + Parameters: 51625960 + In Collections: ConvMixer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.37 + Top 5 Accuracy: 95.61 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1536_20_3rdparty_10xb64_in1k_20220323-ea5786f3.pth + Config: configs/convmixer/convmixer-1536-20_10xb64_in1k.py + Converted From: + Weights: https://github.com/tmp-iclr/convmixer/releases/download/v1.0/convmixer_1536_20_ks9_p7.pth.tar + Code: https://github.com/locuslab/convmixer diff --git a/configs/convnext/README.md b/configs/convnext/README.md new file mode 100644 index 0000000..7db8136 --- /dev/null +++ b/configs/convnext/README.md @@ -0,0 +1,59 @@ +# ConvNeXt + +> [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545v1) + + + +## Abstract + + + +The "Roaring 20s" of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually "modernize" a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets. + + + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Pretrain | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-----------: | :----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------: | :---------------------------------------------------------------------: | +| ConvNeXt-T\* | From scratch | 28.59 | 4.46 | 82.05 | 95.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-tiny_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_3rdparty_32xb128_in1k_20220124-18abde00.pth) | +| ConvNeXt-S\* | From scratch | 50.22 | 8.69 | 83.13 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-small_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_3rdparty_32xb128_in1k_20220124-d39b5192.pth) | +| ConvNeXt-B\* | From scratch | 88.59 | 15.36 | 83.85 | 96.74 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-base_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128_in1k_20220124-d0915162.pth) | +| ConvNeXt-B\* | ImageNet-21k | 88.59 | 15.36 | 85.81 | 97.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-base_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_in21k-pre-3rdparty_32xb128_in1k_20220124-eb2d6ada.pth) | +| ConvNeXt-L\* | From scratch | 197.77 | 34.37 | 84.30 | 96.89 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-large_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_64xb64_in1k_20220124-f8a0ded0.pth) | +| ConvNeXt-L\* | ImageNet-21k | 197.77 | 34.37 | 86.61 | 98.04 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-large_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_in21k-pre-3rdparty_64xb64_in1k_20220124-2412403d.pth) | +| ConvNeXt-XL\* | ImageNet-21k | 350.20 | 60.93 | 86.97 | 98.20 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-xlarge_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_in21k-pre-3rdparty_64xb64_in1k_20220124-76b6863d.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +### Pre-trained Models + +The pre-trained models on ImageNet-1k or ImageNet-21k are used to fine-tune on the downstream tasks. + +| Model | Training Data | Params(M) | Flops(G) | Download | +| :-----------: | :-----------: | :-------: | :------: | :-----------------------------------------------------------------------------------------------------------------------------------: | +| ConvNeXt-T\* | ImageNet-1k | 28.59 | 4.46 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_3rdparty_32xb128-noema_in1k_20220222-2908964a.pth) | +| ConvNeXt-S\* | ImageNet-1k | 50.22 | 8.69 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_3rdparty_32xb128-noema_in1k_20220222-fa001ca5.pth) | +| ConvNeXt-B\* | ImageNet-1k | 88.59 | 15.36 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128-noema_in1k_20220222-dba4f95f.pth) | +| ConvNeXt-B\* | ImageNet-21k | 88.59 | 15.36 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_in21k_20220124-13b83eec.pth) | +| ConvNeXt-L\* | ImageNet-21k | 197.77 | 34.37 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_in21k_20220124-41b5a79f.pth) | +| ConvNeXt-XL\* | ImageNet-21k | 350.20 | 60.93 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_3rdparty_in21k_20220124-f909bad7.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt).* + +## Citation + +```bibtex +@Article{liu2022convnet, + author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie}, + title = {A ConvNet for the 2020s}, + journal = {arXiv preprint arXiv:2201.03545}, + year = {2022}, +} +``` diff --git a/configs/convnext/convnext-base_32xb128_in1k.py b/configs/convnext/convnext-base_32xb128_in1k.py new file mode 100644 index 0000000..6c0450a --- /dev/null +++ b/configs/convnext/convnext-base_32xb128_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/convnext/convnext-base.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=128) + +optimizer = dict(lr=4e-3) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/convnext/convnext-large_64xb64_in1k.py b/configs/convnext/convnext-large_64xb64_in1k.py new file mode 100644 index 0000000..1faae25 --- /dev/null +++ b/configs/convnext/convnext-large_64xb64_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/convnext/convnext-large.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=64) + +optimizer = dict(lr=4e-3) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/convnext/convnext-small_32xb128_in1k.py b/configs/convnext/convnext-small_32xb128_in1k.py new file mode 100644 index 0000000..d820fc6 --- /dev/null +++ b/configs/convnext/convnext-small_32xb128_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/convnext/convnext-small.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=128) + +optimizer = dict(lr=4e-3) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/convnext/convnext-tiny_32xb128_in1k.py b/configs/convnext/convnext-tiny_32xb128_in1k.py new file mode 100644 index 0000000..46d0185 --- /dev/null +++ b/configs/convnext/convnext-tiny_32xb128_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/convnext/convnext-tiny.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=128) + +optimizer = dict(lr=4e-3) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/convnext/convnext-xlarge_64xb64_in1k.py b/configs/convnext/convnext-xlarge_64xb64_in1k.py new file mode 100644 index 0000000..7284901 --- /dev/null +++ b/configs/convnext/convnext-xlarge_64xb64_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/convnext/convnext-xlarge.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=64) + +optimizer = dict(lr=4e-3) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/convnext/metafile.yml b/configs/convnext/metafile.yml new file mode 100644 index 0000000..823f332 --- /dev/null +++ b/configs/convnext/metafile.yml @@ -0,0 +1,221 @@ +Collections: + - Name: ConvNeXt + Metadata: + Training Data: ImageNet-1k + Architecture: + - 1x1 Convolution + - LayerScale + Paper: + URL: https://arxiv.org/abs/2201.03545v1 + Title: A ConvNet for the 2020s + README: configs/convnext/README.md + Code: + Version: v0.20.1 + URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/convnext.py + +Models: + - Name: convnext-tiny_3rdparty_32xb128_in1k + Metadata: + FLOPs: 4457472768 + Parameters: 28589128 + In Collections: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.05 + Top 5 Accuracy: 95.86 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_3rdparty_32xb128_in1k_20220124-18abde00.pth + Config: configs/convnext/convnext-tiny_32xb128_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-tiny_3rdparty_32xb128-noema_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 4457472768 + Parameters: 28589128 + In Collections: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.81 + Top 5 Accuracy: 95.67 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_3rdparty_32xb128-noema_in1k_20220222-2908964a.pth + Config: configs/convnext/convnext-tiny_32xb128_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-small_3rdparty_32xb128_in1k + Metadata: + FLOPs: 8687008512 + Parameters: 50223688 + In Collections: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.13 + Top 5 Accuracy: 96.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_3rdparty_32xb128_in1k_20220124-d39b5192.pth + Config: configs/convnext/convnext-small_32xb128_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-small_3rdparty_32xb128-noema_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 8687008512 + Parameters: 50223688 + In Collections: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.11 + Top 5 Accuracy: 96.34 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_3rdparty_32xb128-noema_in1k_20220222-fa001ca5.pth + Config: configs/convnext/convnext-small_32xb128_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-base_3rdparty_32xb128_in1k + Metadata: + FLOPs: 15359124480 + Parameters: 88591464 + In Collections: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.85 + Top 5 Accuracy: 96.74 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128_in1k_20220124-d0915162.pth + Config: configs/convnext/convnext-base_32xb128_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-base_3rdparty_32xb128-noema_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 15359124480 + Parameters: 88591464 + In Collections: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.71 + Top 5 Accuracy: 96.60 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128-noema_in1k_20220222-dba4f95f.pth + Config: configs/convnext/convnext-base_32xb128_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-base_3rdparty_in21k + Metadata: + Training Data: ImageNet-21k + FLOPs: 15359124480 + Parameters: 88591464 + In Collections: ConvNeXt + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_in21k_20220124-13b83eec.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-base_in21k-pre-3rdparty_32xb128_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 15359124480 + Parameters: 88591464 + In Collections: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.81 + Top 5 Accuracy: 97.86 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_in21k-pre-3rdparty_32xb128_in1k_20220124-eb2d6ada.pth + Config: configs/convnext/convnext-base_32xb128_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-large_3rdparty_64xb64_in1k + Metadata: + FLOPs: 34368026112 + Parameters: 197767336 + In Collections: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.30 + Top 5 Accuracy: 96.89 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_64xb64_in1k_20220124-f8a0ded0.pth + Config: configs/convnext/convnext-large_64xb64_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-large_3rdparty_in21k + Metadata: + Training Data: ImageNet-21k + FLOPs: 34368026112 + Parameters: 197767336 + In Collections: ConvNeXt + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_in21k_20220124-41b5a79f.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-large_in21k-pre-3rdparty_64xb64_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 34368026112 + Parameters: 197767336 + In Collections: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.61 + Top 5 Accuracy: 98.04 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_in21k-pre-3rdparty_64xb64_in1k_20220124-2412403d.pth + Config: configs/convnext/convnext-large_64xb64_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-xlarge_3rdparty_in21k + Metadata: + Training Data: ImageNet-21k + FLOPs: 60929820672 + Parameters: 350196968 + In Collections: ConvNeXt + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_3rdparty_in21k_20220124-f909bad7.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-xlarge_in21k-pre-3rdparty_64xb64_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 60929820672 + Parameters: 350196968 + In Collections: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.97 + Top 5 Accuracy: 98.20 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_in21k-pre-3rdparty_64xb64_in1k_20220124-76b6863d.pth + Config: configs/convnext/convnext-xlarge_64xb64_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth + Code: https://github.com/facebookresearch/ConvNeXt diff --git a/configs/cspnet/README.md b/configs/cspnet/README.md new file mode 100644 index 0000000..10eb9d0 --- /dev/null +++ b/configs/cspnet/README.md @@ -0,0 +1,41 @@ +# CSPNet + +> [CSPNet: A New Backbone that can Enhance Learning Capability of CNN](https://arxiv.org/abs/1911.11929) + + + +## Abstract + + + +Neural networks have enabled state-of-the-art approaches to achieve incredible results on computer vision tasks such as object detection. However, such success greatly relies on costly computation resources, which hinders people with cheap devices from appreciating the advanced technology. In this paper, we propose Cross Stage Partial Network (CSPNet) to mitigate the problem that previous works require heavy inference computations from the network architecture perspective. We attribute the problem to the duplicate gradient information within network optimization. The proposed networks respect the variability of the gradients by integrating feature maps from the beginning and the end of a network stage, which, in our experiments, reduces computations by 20% with equivalent or even superior accuracy on the ImageNet dataset, and significantly outperforms state-of-the-art approaches in terms of AP50 on the MS COCO object detection dataset. The CSPNet is easy to implement and general enough to cope with architectures based on ResNet, ResNeXt, and DenseNet. Source code is at this https URL. + + + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Pretrain | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------: | :----------: | :-------: | :------: | :-------: | :-------: | :------------------------------------------------------------------: | :---------------------------------------------------------------------: | +| CSPDarkNet50\* | From scratch | 27.64 | 5.04 | 80.05 | 95.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/cspnet/cspdarknet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspdarknet50_3rdparty_8xb32_in1k_20220329-bd275287.pth) | +| CSPResNet50\* | From scratch | 21.62 | 3.48 | 79.55 | 94.68 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/cspnet/cspresnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnet50_3rdparty_8xb32_in1k_20220329-dd6dddfb.pth) | +| CSPResNeXt50\* | From scratch | 20.57 | 3.11 | 79.96 | 94.96 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/cspnet/cspresnext50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnext50_3rdparty_8xb32_in1k_20220329-2cc84d21.pth) | + +*Models with * are converted from the [timm repo](https://github.com/rwightman/pytorch-image-models). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +```bibtex +@inproceedings{wang2020cspnet, + title={CSPNet: A new backbone that can enhance learning capability of CNN}, + author={Wang, Chien-Yao and Liao, Hong-Yuan Mark and Wu, Yueh-Hua and Chen, Ping-Yang and Hsieh, Jun-Wei and Yeh, I-Hau}, + booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops}, + pages={390--391}, + year={2020} +} +``` diff --git a/configs/cspnet/cspdarknet50_8xb32_in1k.py b/configs/cspnet/cspdarknet50_8xb32_in1k.py new file mode 100644 index 0000000..cf2ce73 --- /dev/null +++ b/configs/cspnet/cspdarknet50_8xb32_in1k.py @@ -0,0 +1,65 @@ +_base_ = [ + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='CSPDarkNet', depth=53), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(288, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/cspnet/cspresnet50_8xb32_in1k.py b/configs/cspnet/cspresnet50_8xb32_in1k.py new file mode 100644 index 0000000..f4cfbf8 --- /dev/null +++ b/configs/cspnet/cspresnet50_8xb32_in1k.py @@ -0,0 +1,66 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='CSPResNet', depth=50), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(288, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/cspnet/cspresnext50_8xb32_in1k.py b/configs/cspnet/cspresnext50_8xb32_in1k.py new file mode 100644 index 0000000..a82ab75 --- /dev/null +++ b/configs/cspnet/cspresnext50_8xb32_in1k.py @@ -0,0 +1,65 @@ +_base_ = [ + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='CSPResNeXt', depth=50), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(256, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/cspnet/metafile.yml b/configs/cspnet/metafile.yml new file mode 100644 index 0000000..8c4a78e --- /dev/null +++ b/configs/cspnet/metafile.yml @@ -0,0 +1,64 @@ +Collections: + - Name: CSPNet + Metadata: + Training Data: ImageNet-1k + Architecture: + - Cross Stage Partia Stage + Paper: + URL: https://arxiv.org/abs/1911.11929 + Title: 'CSPNet: A New Backbone that can Enhance Learning Capability of CNN' + README: configs/cspnet/README.md + Code: + Version: v0.22.0 + URL: https://github.com/open-mmlab/mmclassification/blob/v0.22.0/mmcls/models/backbones/cspnet.py + +Models: + - Name: cspdarknet50_3rdparty_8xb32_in1k + Metadata: + FLOPs: 5040000000 + Parameters: 27640000 + In Collections: CSPNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.05 + Top 5 Accuracy: 95.07 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/cspnet/cspdarknet50_3rdparty_8xb32_in1k_20220329-bd275287.pth + Config: configs/cspnet/cspdarknet50_8xb32_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth + Code: https://github.com/rwightman/pytorch-image-models + - Name: cspresnet50_3rdparty_8xb32_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 3480000000 + Parameters: 21620000 + In Collections: CSPNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.55 + Top 5 Accuracy: 94.68 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnet50_3rdparty_8xb32_in1k_20220329-dd6dddfb.pth + Config: configs/cspnet/cspresnet50_8xb32_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth + Code: https://github.com/rwightman/pytorch-image-models + - Name: cspresnext50_3rdparty_8xb32_in1k + Metadata: + FLOPs: 3110000000 + Parameters: 20570000 + In Collections: CSPNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.96 + Top 5 Accuracy: 94.96 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnext50_3rdparty_8xb32_in1k_20220329-2cc84d21.pth + Config: configs/cspnet/cspresnext50_8xb32_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth + Code: https://github.com/rwightman/pytorch-image-models diff --git a/configs/csra/README.md b/configs/csra/README.md new file mode 100644 index 0000000..fa677cf --- /dev/null +++ b/configs/csra/README.md @@ -0,0 +1,36 @@ +# CSRA + +> [Residual Attention: A Simple but Effective Method for Multi-Label Recognition](https://arxiv.org/abs/2108.02456) + + + +## Abstract + +Multi-label image recognition is a challenging computer vision task of practical use. Progresses in this area, however, are often characterized by complicated methods, heavy computations, and lack of intuitive explanations. To effectively capture different spatial regions occupied by objects from different categories, we propose an embarrassingly simple module, named class-specific residual attention (CSRA). CSRA generates class-specific features for every category by proposing a simple spatial attention score, and then combines it with the class-agnostic average pooling feature. CSRA achieves state-of-the-art results on multilabel recognition, and at the same time is much simpler than them. Furthermore, with only 4 lines of code, CSRA also leads to consistent improvement across many diverse pretrained models and datasets without any extra training. CSRA is both easy to implement and light in computations, which also enjoys intuitive explanations and visualizations. + +
+ +
+ +## Results and models + +### VOC2007 + +| Model | Pretrain | Params(M) | Flops(G) | mAP | OF1 (%) | CF1 (%) | Config | Download | +| :------------: | :------------------------------------------------: | :-------: | :------: | :---: | :-----: | :-----: | :-----------------------------------------------: | :-------------------------------------------------: | +| Resnet101-CSRA | [ImageNet-1k](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth) | 23.55 | 4.12 | 94.98 | 90.80 | 89.16 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/csra/resnet101-csra_1xb16_voc07-448px.py) | [model](https://download.openmmlab.com/mmclassification/v0/csra/resnet101-csra_1xb16_voc07-448px_20220722-29efb40a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/csra/resnet101-csra_1xb16_voc07-448px_20220722-29efb40a.log.json) | + +## Citation + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2108.02456, + doi = {10.48550/ARXIV.2108.02456}, + url = {https://arxiv.org/abs/2108.02456}, + author = {Zhu, Ke and Wu, Jianxin}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {Residual Attention: A Simple but Effective Method for Multi-Label Recognition}, + publisher = {arXiv}, + year = {2021}, + copyright = {arXiv.org perpetual, non-exclusive license} +} +``` diff --git a/configs/csra/metafile.yml b/configs/csra/metafile.yml new file mode 100644 index 0000000..f1fa622 --- /dev/null +++ b/configs/csra/metafile.yml @@ -0,0 +1,29 @@ +Collections: + - Name: CSRA + Metadata: + Training Data: PASCAL VOC 2007 + Architecture: + - Class-specific Residual Attention + Paper: + URL: https://arxiv.org/abs/1911.11929 + Title: 'Residual Attention: A Simple but Effective Method for Multi-Label Recognition' + README: configs/csra/README.md + Code: + Version: v0.24.0 + URL: https://github.com/open-mmlab/mmclassification/blob/v0.24.0/mmcls/models/heads/multi_label_csra_head.py + +Models: + - Name: resnet101-csra_1xb16_voc07-448px + Metadata: + FLOPs: 4120000000 + Parameters: 23550000 + In Collections: CSRA + Results: + - Dataset: PASCAL VOC 2007 + Metrics: + mAP: 94.98 + OF1: 90.80 + CF1: 89.16 + Task: Multi-Label Classification + Weights: https://download.openmmlab.com/mmclassification/v0/csra/resnet101-csra_1xb16_voc07-448px_20220722-29efb40a.pth + Config: configs/csra/resnet101-csra_1xb16_voc07-448px.py diff --git a/configs/csra/resnet101-csra_1xb16_voc07-448px.py b/configs/csra/resnet101-csra_1xb16_voc07-448px.py new file mode 100644 index 0000000..5dc5dd6 --- /dev/null +++ b/configs/csra/resnet101-csra_1xb16_voc07-448px.py @@ -0,0 +1,75 @@ +_base_ = ['../_base_/datasets/voc_bs16.py', '../_base_/default_runtime.py'] + +# Pre-trained Checkpoint Path +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth' # noqa +# If you want to use the pre-trained weight of ResNet101-CutMix from +# the originary repo(https://github.com/Kevinz-code/CSRA). Script of +# 'tools/convert_models/torchvision_to_mmcls.py' can help you convert weight +# into mmcls format. The mAP result would hit 95.5 by using the weight. +# checkpoint = 'PATH/TO/PRE-TRAINED_WEIGHT' + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint, prefix='backbone')), + neck=None, + head=dict( + type='CSRAClsHead', + num_classes=20, + in_channels=2048, + num_heads=1, + lam=0.1, + loss=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))) + +# dataset setting +img_norm_cfg = dict(mean=[0, 0, 0], std=[255, 255, 255], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=448, scale=(0.7, 1.0)), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=448), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + # map the difficult examples as negative ones(0) + train=dict(pipeline=train_pipeline, difficult_as_postive=False), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +# optimizer +# the lr of classifier.head is 10 * base_lr, which help convergence. +optimizer = dict( + type='SGD', + lr=0.0002, + momentum=0.9, + weight_decay=0.0001, + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10)})) + +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='step', + step=6, + gamma=0.1, + warmup='linear', + warmup_iters=1, + warmup_ratio=1e-7, + warmup_by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/configs/deit/README.md b/configs/deit/README.md new file mode 100644 index 0000000..e310365 --- /dev/null +++ b/configs/deit/README.md @@ -0,0 +1,52 @@ +# DeiT + +> [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) + + + +## Abstract + +Recently, neural networks purely based on attention were shown to address image understanding tasks such as image classification. However, these visual transformers are pre-trained with hundreds of millions of images using an expensive infrastructure, thereby limiting their adoption. In this work, we produce a competitive convolution-free transformer by training on Imagenet only. We train them on a single computer in less than 3 days. Our reference vision transformer (86M parameters) achieves top-1 accuracy of 83.1% (single-crop evaluation) on ImageNet with no external data. More importantly, we introduce a teacher-student strategy specific to transformers. It relies on a distillation token ensuring that the student learns from the teacher through attention. We show the interest of this token-based distillation, especially when using a convnet as a teacher. This leads us to report results competitive with convnets for both Imagenet (where we obtain up to 85.2% accuracy) and when transferring to other tasks. We share our code and models. + +
+ +
+ +## Results and models + +### ImageNet-1k + +The teacher of the distilled version DeiT is RegNetY-16GF. + +| Model | Pretrain | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------------------: | :----------: | :-------: | :------: | :-------: | :-------: | :------------------------------------------------------------: | :--------------------------------------------------------------: | +| DeiT-tiny | From scratch | 5.72 | 1.08 | 74.50 | 92.24 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-tiny_pt-4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny_pt-4xb256_in1k_20220218-13b382a0.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny_pt-4xb256_in1k_20220218-13b382a0.log.json) | +| DeiT-tiny distilled\* | From scratch | 5.72 | 1.08 | 74.51 | 91.90 | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/deit/deit-tiny-distilled_pt-4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny-distilled_3rdparty_pt-4xb256_in1k_20211216-c429839a.pth) | +| DeiT-small | From scratch | 22.05 | 4.24 | 80.69 | 95.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-small_pt-4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-small_pt-4xb256_in1k_20220218-9425b9bb.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-small_pt-4xb256_in1k_20220218-9425b9bb.log.json) | +| DeiT-small distilled\* | From scratch | 22.05 | 4.24 | 81.17 | 95.40 | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/deit/deit-small-distilled_pt-4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-small-distilled_3rdparty_pt-4xb256_in1k_20211216-4de1d725.pth) | +| DeiT-base | From scratch | 86.57 | 16.86 | 81.76 | 95.81 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-base_pt-16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_pt-16xb64_in1k_20220216-db63c16c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_pt-16xb64_in1k_20220216-db63c16c.log.json) | +| DeiT-base\* | From scratch | 86.57 | 16.86 | 81.79 | 95.59 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-base_pt-16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_pt-16xb64_in1k_20211124-6f40c188.pth) | +| DeiT-base distilled\* | From scratch | 86.57 | 16.86 | 83.33 | 96.49 | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/deit/deit-base-distilled_pt-16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_pt-16xb64_in1k_20211216-42891296.pth) | +| DeiT-base 384px\* | ImageNet-1k | 86.86 | 49.37 | 83.04 | 96.31 | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/deit/deit-base_ft-16xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_ft-16xb32_in1k-384px_20211124-822d02f2.pth) | +| DeiT-base distilled 384px\* | ImageNet-1k | 86.86 | 49.37 | 85.55 | 97.35 | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/deit/deit-base-distilled_ft-16xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_ft-16xb32_in1k-384px_20211216-e48d6000.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/deit). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +```{warning} +MMClassification doesn't support training the distilled version DeiT. +And we provide distilled version checkpoints for inference only. +``` + +## Citation + +``` +@InProceedings{pmlr-v139-touvron21a, + title = {Training data-efficient image transformers & distillation through attention}, + author = {Touvron, Hugo and Cord, Matthieu and Douze, Matthijs and Massa, Francisco and Sablayrolles, Alexandre and Jegou, Herve}, + booktitle = {International Conference on Machine Learning}, + pages = {10347--10357}, + year = {2021}, + volume = {139}, + month = {July} +} +``` diff --git a/configs/deit/deit-base-distilled_ft-16xb32_in1k-384px.py b/configs/deit/deit-base-distilled_ft-16xb32_in1k-384px.py new file mode 100644 index 0000000..c8bdfb5 --- /dev/null +++ b/configs/deit/deit-base-distilled_ft-16xb32_in1k-384px.py @@ -0,0 +1,9 @@ +_base_ = './deit-base_ft-16xb32_in1k-384px.py' + +# model settings +model = dict( + backbone=dict(type='DistilledVisionTransformer'), + head=dict(type='DeiTClsHead'), + # Change to the path of the pretrained model + # init_cfg=dict(type='Pretrained', checkpoint=''), +) diff --git a/configs/deit/deit-base-distilled_pt-16xb64_in1k.py b/configs/deit/deit-base-distilled_pt-16xb64_in1k.py new file mode 100644 index 0000000..6716583 --- /dev/null +++ b/configs/deit/deit-base-distilled_pt-16xb64_in1k.py @@ -0,0 +1,10 @@ +_base_ = './deit-small_pt-4xb256_in1k.py' + +# model settings +model = dict( + backbone=dict(type='DistilledVisionTransformer', arch='deit-base'), + head=dict(type='DeiTClsHead', in_channels=768), +) + +# data settings +data = dict(samples_per_gpu=64, workers_per_gpu=5) diff --git a/configs/deit/deit-base_ft-16xb32_in1k-384px.py b/configs/deit/deit-base_ft-16xb32_in1k-384px.py new file mode 100644 index 0000000..db44416 --- /dev/null +++ b/configs/deit/deit-base_ft-16xb32_in1k-384px.py @@ -0,0 +1,29 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='deit-base', + img_size=384, + patch_size=16, + ), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + # Change to the path of the pretrained model + # init_cfg=dict(type='Pretrained', checkpoint=''), +) + +# data settings +data = dict(samples_per_gpu=32, workers_per_gpu=5) diff --git a/configs/deit/deit-base_pt-16xb64_in1k.py b/configs/deit/deit-base_pt-16xb64_in1k.py new file mode 100644 index 0000000..24c13dc --- /dev/null +++ b/configs/deit/deit-base_pt-16xb64_in1k.py @@ -0,0 +1,13 @@ +_base_ = './deit-small_pt-4xb256_in1k.py' + +# model settings +model = dict( + backbone=dict( + type='VisionTransformer', arch='deit-base', drop_path_rate=0.1), + head=dict(type='VisionTransformerClsHead', in_channels=768), +) + +# data settings +data = dict(samples_per_gpu=64, workers_per_gpu=5) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/deit/deit-small-distilled_pt-4xb256_in1k.py b/configs/deit/deit-small-distilled_pt-4xb256_in1k.py new file mode 100644 index 0000000..3b1fac2 --- /dev/null +++ b/configs/deit/deit-small-distilled_pt-4xb256_in1k.py @@ -0,0 +1,7 @@ +_base_ = './deit-small_pt-4xb256_in1k.py' + +# model settings +model = dict( + backbone=dict(type='DistilledVisionTransformer', arch='deit-small'), + head=dict(type='DeiTClsHead', in_channels=384), +) diff --git a/configs/deit/deit-small_pt-4xb256_in1k.py b/configs/deit/deit-small_pt-4xb256_in1k.py new file mode 100644 index 0000000..550f080 --- /dev/null +++ b/configs/deit/deit-small_pt-4xb256_in1k.py @@ -0,0 +1,44 @@ +# In small and tiny arch, remove drop path and EMA hook comparing with the +# original config +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='deit-small', + img_size=224, + patch_size=16), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=384, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) + +# data settings +data = dict(samples_per_gpu=256, workers_per_gpu=5) + +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + }) +optimizer = dict(paramwise_cfg=paramwise_cfg) diff --git a/configs/deit/deit-tiny-distilled_pt-4xb256_in1k.py b/configs/deit/deit-tiny-distilled_pt-4xb256_in1k.py new file mode 100644 index 0000000..175f980 --- /dev/null +++ b/configs/deit/deit-tiny-distilled_pt-4xb256_in1k.py @@ -0,0 +1,7 @@ +_base_ = './deit-small_pt-4xb256_in1k.py' + +# model settings +model = dict( + backbone=dict(type='DistilledVisionTransformer', arch='deit-tiny'), + head=dict(type='DeiTClsHead', in_channels=192), +) diff --git a/configs/deit/deit-tiny_pt-4xb256_in1k.py b/configs/deit/deit-tiny_pt-4xb256_in1k.py new file mode 100644 index 0000000..43df6e1 --- /dev/null +++ b/configs/deit/deit-tiny_pt-4xb256_in1k.py @@ -0,0 +1,7 @@ +_base_ = './deit-small_pt-4xb256_in1k.py' + +# model settings +model = dict( + backbone=dict(type='VisionTransformer', arch='deit-tiny'), + head=dict(type='VisionTransformerClsHead', in_channels=192), +) diff --git a/configs/deit/metafile.yml b/configs/deit/metafile.yml new file mode 100644 index 0000000..ddd4c67 --- /dev/null +++ b/configs/deit/metafile.yml @@ -0,0 +1,153 @@ +Collections: + - Name: DeiT + Metadata: + Training Data: ImageNet-1k + Architecture: + - Layer Normalization + - Scaled Dot-Product Attention + - Attention Dropout + - Multi-Head Attention + Paper: + URL: https://arxiv.org/abs/2012.12877 + Title: "Training data-efficient image transformers & distillation through attention" + README: configs/deit/README.md + Code: + URL: v0.19.0 + Version: https://github.com/open-mmlab/mmclassification/blob/v0.19.0/mmcls/models/backbones/deit.py + +Models: + - Name: deit-tiny_pt-4xb256_in1k + Metadata: + FLOPs: 1080000000 + Parameters: 5720000 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.50 + Top 5 Accuracy: 92.24 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny_pt-4xb256_in1k_20220218-13b382a0.pth + Config: configs/deit/deit-tiny_pt-4xb256_in1k.py + - Name: deit-tiny-distilled_3rdparty_pt-4xb256_in1k + Metadata: + FLOPs: 1080000000 + Parameters: 5720000 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.51 + Top 5 Accuracy: 91.90 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny-distilled_3rdparty_pt-4xb256_in1k_20211216-c429839a.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth + Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L108 + Config: configs/deit/deit-tiny-distilled_pt-4xb256_in1k.py + - Name: deit-small_pt-4xb256_in1k + Metadata: + FLOPs: 4240000000 + Parameters: 22050000 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.69 + Top 5 Accuracy: 95.06 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-small_pt-4xb256_in1k_20220218-9425b9bb.pth + Config: configs/deit/deit-small_pt-4xb256_in1k.py + - Name: deit-small-distilled_3rdparty_pt-4xb256_in1k + Metadata: + FLOPs: 4240000000 + Parameters: 22050000 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.17 + Top 5 Accuracy: 95.40 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-small-distilled_3rdparty_pt-4xb256_in1k_20211216-4de1d725.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth + Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L123 + Config: configs/deit/deit-small-distilled_pt-4xb256_in1k.py + - Name: deit-base_pt-16xb64_in1k + Metadata: + FLOPs: 16860000000 + Parameters: 86570000 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.76 + Top 5 Accuracy: 95.81 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base_pt-16xb64_in1k_20220216-db63c16c.pth + Config: configs/deit/deit-base_pt-16xb64_in1k.py + - Name: deit-base_3rdparty_pt-16xb64_in1k + Metadata: + FLOPs: 16860000000 + Parameters: 86570000 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.79 + Top 5 Accuracy: 95.59 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_pt-16xb64_in1k_20211124-6f40c188.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth + Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L93 + Config: configs/deit/deit-base_pt-16xb64_in1k.py + - Name: deit-base-distilled_3rdparty_pt-16xb64_in1k + Metadata: + FLOPs: 16860000000 + Parameters: 86570000 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.33 + Top 5 Accuracy: 96.49 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_pt-16xb64_in1k_20211216-42891296.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth + Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L138 + Config: configs/deit/deit-base-distilled_pt-16xb64_in1k.py + - Name: deit-base_3rdparty_ft-16xb32_in1k-384px + Metadata: + FLOPs: 49370000000 + Parameters: 86860000 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.04 + Top 5 Accuracy: 96.31 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_ft-16xb32_in1k-384px_20211124-822d02f2.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth + Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L153 + Config: configs/deit/deit-base_ft-16xb32_in1k-384px.py + - Name: deit-base-distilled_3rdparty_ft-16xb32_in1k-384px + Metadata: + FLOPs: 49370000000 + Parameters: 86860000 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.55 + Top 5 Accuracy: 97.35 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_ft-16xb32_in1k-384px_20211216-e48d6000.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth + Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L168 + Config: configs/deit/deit-base-distilled_ft-16xb32_in1k-384px.py diff --git a/configs/densenet/README.md b/configs/densenet/README.md new file mode 100644 index 0000000..f07f25c --- /dev/null +++ b/configs/densenet/README.md @@ -0,0 +1,41 @@ +# DenseNet + +> [Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993) + + + +## Abstract + +Recent work has shown that convolutional networks can be substantially deeper, more accurate, and efficient to train if they contain shorter connections between layers close to the input and those close to the output. In this paper, we embrace this observation and introduce the Dense Convolutional Network (DenseNet), which connects each layer to every other layer in a feed-forward fashion. Whereas traditional convolutional networks with L layers have L connections - one between each layer and its subsequent layer - our network has L(L+1)/2 direct connections. For each layer, the feature-maps of all preceding layers are used as inputs, and its own feature-maps are used as inputs into all subsequent layers. DenseNets have several compelling advantages: they alleviate the vanishing-gradient problem, strengthen feature propagation, encourage feature reuse, and substantially reduce the number of parameters. We evaluate our proposed architecture on four highly competitive object recognition benchmark tasks (CIFAR-10, CIFAR-100, SVHN, and ImageNet). DenseNets obtain significant improvements over the state-of-the-art on most of them, whilst requiring less computation to achieve high performance. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :---------------------------------------------------------------------------: | +| DenseNet121\* | 7.98 | 2.88 | 74.96 | 92.21 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet121_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet121_4xb256_in1k_20220426-07450f99.pth) | +| DenseNet169\* | 14.15 | 3.42 | 76.08 | 93.11 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet169_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet169_4xb256_in1k_20220426-a2889902.pth) | +| DenseNet201\* | 20.01 | 4.37 | 77.32 | 93.64 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet201_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet201_4xb256_in1k_20220426-05cae4ef.pth) | +| DenseNet161\* | 28.68 | 7.82 | 77.61 | 93.83 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet161_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet161_4xb256_in1k_20220426-ee6a80a9.pth) | + +*Models with * are converted from [pytorch](https://pytorch.org/vision/stable/models.html), guided by [original repo](https://github.com/liuzhuang13/DenseNet). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +```bibtex +@misc{https://doi.org/10.48550/arxiv.1608.06993, + doi = {10.48550/ARXIV.1608.06993}, + url = {https://arxiv.org/abs/1608.06993}, + author = {Huang, Gao and Liu, Zhuang and van der Maaten, Laurens and Weinberger, Kilian Q.}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {Densely Connected Convolutional Networks}, + publisher = {arXiv}, + year = {2016}, + copyright = {arXiv.org perpetual, non-exclusive license} +} +``` diff --git a/configs/densenet/densenet121_4xb256_in1k.py b/configs/densenet/densenet121_4xb256_in1k.py new file mode 100644 index 0000000..08d65ae --- /dev/null +++ b/configs/densenet/densenet121_4xb256_in1k.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/densenet/densenet121.py', + '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=256) + +runner = dict(type='EpochBasedRunner', max_epochs=90) diff --git a/configs/densenet/densenet161_4xb256_in1k.py b/configs/densenet/densenet161_4xb256_in1k.py new file mode 100644 index 0000000..4581d1d --- /dev/null +++ b/configs/densenet/densenet161_4xb256_in1k.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/densenet/densenet161.py', + '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=256) + +runner = dict(type='EpochBasedRunner', max_epochs=90) diff --git a/configs/densenet/densenet169_4xb256_in1k.py b/configs/densenet/densenet169_4xb256_in1k.py new file mode 100644 index 0000000..6179293 --- /dev/null +++ b/configs/densenet/densenet169_4xb256_in1k.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/densenet/densenet169.py', + '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=256) + +runner = dict(type='EpochBasedRunner', max_epochs=90) diff --git a/configs/densenet/densenet201_4xb256_in1k.py b/configs/densenet/densenet201_4xb256_in1k.py new file mode 100644 index 0000000..897a141 --- /dev/null +++ b/configs/densenet/densenet201_4xb256_in1k.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/densenet/densenet201.py', + '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=256) + +runner = dict(type='EpochBasedRunner', max_epochs=90) diff --git a/configs/densenet/metafile.yml b/configs/densenet/metafile.yml new file mode 100644 index 0000000..84366b2 --- /dev/null +++ b/configs/densenet/metafile.yml @@ -0,0 +1,76 @@ +Collections: + - Name: DenseNet + Metadata: + Training Data: ImageNet-1k + Architecture: + - DenseBlock + Paper: + URL: https://arxiv.org/abs/1608.06993 + Title: Densely Connected Convolutional Networks + README: configs/densenet/README.md + +Models: + - Name: densenet121_4xb256_in1k + Metadata: + FLOPs: 2881695488 + Parameters: 7978856 + In Collections: DenseNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.96 + Top 5 Accuracy: 92.21 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/densenet/densenet121_4xb256_in1k_20220426-07450f99.pth + Config: configs/densenet/densenet121_4xb256_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/densenet121-a639ec97.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py + - Name: densenet169_4xb256_in1k + Metadata: + FLOPs: 3416860160 + Parameters: 14149480 + In Collections: DenseNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.08 + Top 5 Accuracy: 93.11 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/densenet/densenet169_4xb256_in1k_20220426-a2889902.pth + Config: configs/densenet/densenet169_4xb256_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/densenet169-b2777c0a.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py + - Name: densenet201_4xb256_in1k + Metadata: + FLOPs: 4365236736 + Parameters: 20013928 + In Collections: DenseNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.32 + Top 5 Accuracy: 93.64 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/densenet/densenet201_4xb256_in1k_20220426-05cae4ef.pth + Config: configs/densenet/densenet201_4xb256_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/densenet201-c1103571.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py + - Name: densenet161_4xb256_in1k + Metadata: + FLOPs: 7816363968 + Parameters: 28681000 + In Collections: DenseNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.61 + Top 5 Accuracy: 93.83 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/densenet/densenet161_4xb256_in1k_20220426-ee6a80a9.pth + Config: configs/densenet/densenet161_4xb256_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/densenet161-8d451a50.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py diff --git a/configs/efficientformer/README.md b/configs/efficientformer/README.md new file mode 100644 index 0000000..ecd6b49 --- /dev/null +++ b/configs/efficientformer/README.md @@ -0,0 +1,47 @@ +# EfficientFormer + +> [EfficientFormer: Vision Transformers at MobileNet Speed](https://arxiv.org/abs/2206.01191) + + + +## Abstract + +Vision Transformers (ViT) have shown rapid progress in computer vision tasks, achieving promising results on various benchmarks. However, due to the massive number of parameters and model design, e.g., attention mechanism, ViT-based models are generally times slower than lightweight convolutional networks. Therefore, the deployment of ViT for real-time applications is particularly challenging, especially on resource-constrained hardware such as mobile devices. Recent efforts try to reduce the computation complexity of ViT through network architecture search or hybrid design with MobileNet block, yet the inference speed is still unsatisfactory. This leads to an important question: can transformers run as fast as MobileNet while obtaining high performance? To answer this, we first revisit the network architecture and operators used in ViT-based models and identify inefficient designs. Then we introduce a dimension-consistent pure transformer (without MobileNet blocks) as a design paradigm. Finally, we perform latency-driven slimming to get a series of final models dubbed EfficientFormer. Extensive experiments show the superiority of EfficientFormer in performance and speed on mobile devices. Our fastest model, EfficientFormer-L1, achieves 79.2% top-1 accuracy on ImageNet-1K with only 1.6 ms inference latency on iPhone 12 (compiled with CoreML), which runs as fast as MobileNetV2×1.4 (1.6 ms, 74.7% top-1), and our largest model, EfficientFormer-L7, obtains 83.3% accuracy with only 7.0 ms latency. Our work proves that properly designed transformers can reach extremely low latency on mobile devices while maintaining high performance. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------: | :------------------------------------------------------------------------: | +| EfficientFormer-l1\* | 12.19 | 1.30 | 80.46 | 94.99 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientformer/efficientformer-l1_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l1_3rdparty_in1k_20220803-d66e61df.pth) | +| EfficientFormer-l3\* | 31.41 | 3.93 | 82.45 | 96.18 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientformer/efficientformer-l3_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l3_3rdparty_in1k_20220803-dde1c8c5.pth) | +| EfficientFormer-l7\* | 82.23 | 10.16 | 83.40 | 96.60 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientformer/efficientformer-l7_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l7_3rdparty_in1k_20220803-41a552bb.pth) | + +*Models with * are converted from the [official repo](https://github.com/snap-research/EfficientFormer). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2206.01191, + doi = {10.48550/ARXIV.2206.01191}, + + url = {https://arxiv.org/abs/2206.01191}, + + author = {Li, Yanyu and Yuan, Geng and Wen, Yang and Hu, Eric and Evangelidis, Georgios and Tulyakov, Sergey and Wang, Yanzhi and Ren, Jian}, + + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + + title = {EfficientFormer: Vision Transformers at MobileNet Speed}, + + publisher = {arXiv}, + + year = {2022}, + + copyright = {Creative Commons Attribution 4.0 International} +} +``` diff --git a/configs/efficientformer/efficientformer-l1_8xb128_in1k.py b/configs/efficientformer/efficientformer-l1_8xb128_in1k.py new file mode 100644 index 0000000..f5db2bf --- /dev/null +++ b/configs/efficientformer/efficientformer-l1_8xb128_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs128_poolformer_small_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='EfficientFormer', + arch='l1', + drop_path_rate=0, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-5) + ]), + neck=dict(type='GlobalAveragePooling', dim=1), + head=dict( + type='EfficientFormerClsHead', in_channels=448, num_classes=1000)) diff --git a/configs/efficientformer/efficientformer-l3_8xb128_in1k.py b/configs/efficientformer/efficientformer-l3_8xb128_in1k.py new file mode 100644 index 0000000..e920f78 --- /dev/null +++ b/configs/efficientformer/efficientformer-l3_8xb128_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs128_poolformer_small_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='EfficientFormer', + arch='l3', + drop_path_rate=0, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-5) + ]), + neck=dict(type='GlobalAveragePooling', dim=1), + head=dict( + type='EfficientFormerClsHead', in_channels=512, num_classes=1000)) diff --git a/configs/efficientformer/efficientformer-l7_8xb128_in1k.py b/configs/efficientformer/efficientformer-l7_8xb128_in1k.py new file mode 100644 index 0000000..a59e3a7 --- /dev/null +++ b/configs/efficientformer/efficientformer-l7_8xb128_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs128_poolformer_small_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='EfficientFormer', + arch='l7', + drop_path_rate=0, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-5) + ]), + neck=dict(type='GlobalAveragePooling', dim=1), + head=dict( + type='EfficientFormerClsHead', in_channels=768, num_classes=1000)) diff --git a/configs/efficientformer/metafile.yml b/configs/efficientformer/metafile.yml new file mode 100644 index 0000000..33c4786 --- /dev/null +++ b/configs/efficientformer/metafile.yml @@ -0,0 +1,67 @@ +Collections: + - Name: EfficientFormer + Metadata: + Training Data: ImageNet-1k + Architecture: + - Pooling + - 1x1 Convolution + - LayerScale + - MetaFormer + Paper: + URL: https://arxiv.org/pdf/2206.01191.pdf + Title: "EfficientFormer: Vision Transformers at MobileNet Speed" + README: configs/efficientformer/README.md + Code: + Version: v0.24.0 + URL: https://github.com/open-mmlab/mmclassification/blob/v0.24.0/mmcls/models/backbones/efficientformer.py + +Models: + - Name: efficientformer-l1_3rdparty_8xb128_in1k + Metadata: + FLOPs: 1304601088 # 1.3G + Parameters: 12278696 # 12M + In Collections: EfficientFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.46 + Top 5 Accuracy: 94.99 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l1_3rdparty_in1k_20220803-d66e61df.pth + Config: configs/efficientformer/efficientformer-l1_8xb128_in1k.py + Converted From: + Weights: https://drive.google.com/file/d/11SbX-3cfqTOc247xKYubrAjBiUmr818y/view?usp=sharing + Code: https://github.com/snap-research/EfficientFormer + - Name: efficientformer-l3_3rdparty_8xb128_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 3737045760 # 3.7G + Parameters: 31406000 # 31M + In Collections: EfficientFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.45 + Top 5 Accuracy: 96.18 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l3_3rdparty_in1k_20220803-dde1c8c5.pth + Config: configs/efficientformer/efficientformer-l3_8xb128_in1k.py + Converted From: + Weights: https://drive.google.com/file/d/1OyyjKKxDyMj-BcfInp4GlDdwLu3hc30m/view?usp=sharing + Code: https://github.com/snap-research/EfficientFormer + - Name: efficientformer-l7_3rdparty_8xb128_in1k + Metadata: + FLOPs: 10163951616 # 10.2G + Parameters: 82229328 # 82M + In Collections: EfficientFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.40 + Top 5 Accuracy: 96.60 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l7_3rdparty_in1k_20220803-41a552bb.pth + Config: configs/efficientformer/efficientformer-l7_8xb128_in1k.py + Converted From: + Weights: https://drive.google.com/file/d/1cVw-pctJwgvGafeouynqWWCwgkcoFMM5/view?usp=sharing + Code: https://github.com/snap-research/EfficientFormer diff --git a/configs/efficientnet/README.md b/configs/efficientnet/README.md new file mode 100644 index 0000000..832f5c6 --- /dev/null +++ b/configs/efficientnet/README.md @@ -0,0 +1,62 @@ +# EfficientNet + +> [Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946v5) + + + +## Abstract + +Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3% top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters. + +
+ +
+ +## Results and models + +### ImageNet-1k + +In the result table, AA means trained with AutoAugment pre-processing, more details can be found in the [paper](https://arxiv.org/abs/1805.09501), and AdvProp is a method to train with adversarial examples, more details can be found in the [paper](https://arxiv.org/abs/1911.09665). + +Note: In MMClassification, we support training with AutoAugment, don't support AdvProp by now. + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------: | :------------------------------------------------------------------: | +| EfficientNet-B0\* | 5.29 | 0.02 | 76.74 | 93.17 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b0_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32_in1k_20220119-a7e2a0b1.pth) | +| EfficientNet-B0 (AA)\* | 5.29 | 0.02 | 77.26 | 93.41 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b0_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa_in1k_20220119-8d939117.pth) | +| EfficientNet-B0 (AA + AdvProp)\* | 5.29 | 0.02 | 77.53 | 93.61 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth) | +| EfficientNet-B1\* | 7.79 | 0.03 | 78.68 | 94.28 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b1_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32_in1k_20220119-002556d9.pth) | +| EfficientNet-B1 (AA)\* | 7.79 | 0.03 | 79.20 | 94.42 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b1_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa_in1k_20220119-619d8ae3.pth) | +| EfficientNet-B1 (AA + AdvProp)\* | 7.79 | 0.03 | 79.52 | 94.43 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa-advprop_in1k_20220119-5715267d.pth) | +| EfficientNet-B2\* | 9.11 | 0.03 | 79.64 | 94.80 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32_in1k_20220119-ea374a30.pth) | +| EfficientNet-B2 (AA)\* | 9.11 | 0.03 | 80.21 | 94.96 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa_in1k_20220119-dd61e80b.pth) | +| EfficientNet-B2 (AA + AdvProp)\* | 9.11 | 0.03 | 80.45 | 95.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa-advprop_in1k_20220119-1655338a.pth) | +| EfficientNet-B3\* | 12.23 | 0.06 | 81.01 | 95.34 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32_in1k_20220119-4b4d7487.pth) | +| EfficientNet-B3 (AA)\* | 12.23 | 0.06 | 81.58 | 95.67 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth) | +| EfficientNet-B3 (AA + AdvProp)\* | 12.23 | 0.06 | 81.81 | 95.69 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k_20220119-53b41118.pth) | +| EfficientNet-B4\* | 19.34 | 0.12 | 82.57 | 96.09 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32_in1k_20220119-81fd4077.pth) | +| EfficientNet-B4 (AA)\* | 19.34 | 0.12 | 82.95 | 96.26 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa_in1k_20220119-45b8bd2b.pth) | +| EfficientNet-B4 (AA + AdvProp)\* | 19.34 | 0.12 | 83.25 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa-advprop_in1k_20220119-38c2238c.pth) | +| EfficientNet-B5\* | 30.39 | 0.24 | 83.18 | 96.47 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b5_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32_in1k_20220119-e9814430.pth) | +| EfficientNet-B5 (AA)\* | 30.39 | 0.24 | 83.82 | 96.76 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b5_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa_in1k_20220119-2cab8b78.pth) | +| EfficientNet-B5 (AA + AdvProp)\* | 30.39 | 0.24 | 84.21 | 96.98 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa-advprop_in1k_20220119-f57a895a.pth) | +| EfficientNet-B6 (AA)\* | 43.04 | 0.41 | 84.05 | 96.82 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b6_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa_in1k_20220119-45b03310.pth) | +| EfficientNet-B6 (AA + AdvProp)\* | 43.04 | 0.41 | 84.74 | 97.14 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa-advprop_in1k_20220119-bfe3485e.pth) | +| EfficientNet-B7 (AA)\* | 66.35 | 0.72 | 84.38 | 96.88 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b7_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa_in1k_20220119-bf03951c.pth) | +| EfficientNet-B7 (AA + AdvProp)\* | 66.35 | 0.72 | 85.14 | 97.23 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa-advprop_in1k_20220119-c6dbff10.pth) | +| EfficientNet-B8 (AA + AdvProp)\* | 87.41 | 1.09 | 85.38 | 97.28 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b8_3rdparty_8xb32-aa-advprop_in1k_20220119-297ce1b7.pth) | + +*Models with * are converted from the [official repo](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +``` +@inproceedings{tan2019efficientnet, + title={Efficientnet: Rethinking model scaling for convolutional neural networks}, + author={Tan, Mingxing and Le, Quoc}, + booktitle={International Conference on Machine Learning}, + pages={6105--6114}, + year={2019}, + organization={PMLR} +} +``` diff --git a/configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py new file mode 100644 index 0000000..fbb490d --- /dev/null +++ b/configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b0.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=224, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b0_8xb32_in1k.py b/configs/efficientnet/efficientnet-b0_8xb32_in1k.py new file mode 100644 index 0000000..33931e5 --- /dev/null +++ b/configs/efficientnet/efficientnet-b0_8xb32_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b0.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=224, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py new file mode 100644 index 0000000..6b66395 --- /dev/null +++ b/configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b1.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=240, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=240, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b1_8xb32_in1k.py b/configs/efficientnet/efficientnet-b1_8xb32_in1k.py new file mode 100644 index 0000000..d702a15 --- /dev/null +++ b/configs/efficientnet/efficientnet-b1_8xb32_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b1.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=240, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=240, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py new file mode 100644 index 0000000..ae8cda8 --- /dev/null +++ b/configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b2.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=260, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=260, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b2_8xb32_in1k.py b/configs/efficientnet/efficientnet-b2_8xb32_in1k.py new file mode 100644 index 0000000..53f7c84 --- /dev/null +++ b/configs/efficientnet/efficientnet-b2_8xb32_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b2.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=260, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=260, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py new file mode 100644 index 0000000..dfd3f92 --- /dev/null +++ b/configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b3.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=300, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=300, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b3_8xb32_in1k.py b/configs/efficientnet/efficientnet-b3_8xb32_in1k.py new file mode 100644 index 0000000..2838713 --- /dev/null +++ b/configs/efficientnet/efficientnet-b3_8xb32_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b3.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=300, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=300, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py new file mode 100644 index 0000000..333a19a --- /dev/null +++ b/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b4.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=380, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=380, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b4_8xb32_in1k.py b/configs/efficientnet/efficientnet-b4_8xb32_in1k.py new file mode 100644 index 0000000..82f06cd --- /dev/null +++ b/configs/efficientnet/efficientnet-b4_8xb32_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b4.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=380, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=380, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py new file mode 100644 index 0000000..f66855c --- /dev/null +++ b/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b5.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=456, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=456, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b5_8xb32_in1k.py b/configs/efficientnet/efficientnet-b5_8xb32_in1k.py new file mode 100644 index 0000000..9b0eaab --- /dev/null +++ b/configs/efficientnet/efficientnet-b5_8xb32_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b5.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=456, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=456, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py new file mode 100644 index 0000000..da64e0e --- /dev/null +++ b/configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b6.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=528, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=528, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b6_8xb32_in1k.py b/configs/efficientnet/efficientnet-b6_8xb32_in1k.py new file mode 100644 index 0000000..6e03bb4 --- /dev/null +++ b/configs/efficientnet/efficientnet-b6_8xb32_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b6.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=528, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=528, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py new file mode 100644 index 0000000..27c19fc --- /dev/null +++ b/configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b7.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=600, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=600, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b7_8xb32_in1k.py b/configs/efficientnet/efficientnet-b7_8xb32_in1k.py new file mode 100644 index 0000000..5146383 --- /dev/null +++ b/configs/efficientnet/efficientnet-b7_8xb32_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b7.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=600, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=600, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py new file mode 100644 index 0000000..25540a1 --- /dev/null +++ b/configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b8.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=672, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=672, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b8_8xb32_in1k.py b/configs/efficientnet/efficientnet-b8_8xb32_in1k.py new file mode 100644 index 0000000..4ff28c0 --- /dev/null +++ b/configs/efficientnet/efficientnet-b8_8xb32_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_b8.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=672, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=672, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-em_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-em_8xb32-01norm_in1k.py new file mode 100644 index 0000000..faa5386 --- /dev/null +++ b/configs/efficientnet/efficientnet-em_8xb32-01norm_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_em.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=240, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=240, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-es_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-es_8xb32-01norm_in1k.py new file mode 100644 index 0000000..5f11746 --- /dev/null +++ b/configs/efficientnet/efficientnet-es_8xb32-01norm_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/efficientnet_es.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=224, + efficientnet_style=True, + interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/metafile.yml b/configs/efficientnet/metafile.yml new file mode 100644 index 0000000..c8bbf0d --- /dev/null +++ b/configs/efficientnet/metafile.yml @@ -0,0 +1,391 @@ +Collections: + - Name: EfficientNet + Metadata: + Training Data: ImageNet-1k + Architecture: + - 1x1 Convolution + - Average Pooling + - Convolution + - Dense Connections + - Dropout + - Inverted Residual Block + - RMSProp + - Squeeze-and-Excitation Block + - Swish + Paper: + URL: https://arxiv.org/abs/1905.11946v5 + Title: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" + README: configs/efficientnet/README.md + Code: + Version: v0.20.1 + URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/efficientnet.py + +Models: + - Name: efficientnet-b0_3rdparty_8xb32_in1k + Metadata: + FLOPs: 16481180 + Parameters: 5288548 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.74 + Top 5 Accuracy: 93.17 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32_in1k_20220119-a7e2a0b1.pth + Config: configs/efficientnet/efficientnet-b0_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b0.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b0_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 16481180 + Parameters: 5288548 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.26 + Top 5 Accuracy: 93.41 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa_in1k_20220119-8d939117.pth + Config: configs/efficientnet/efficientnet-b0_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b0.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 16481180 + Parameters: 5288548 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.53 + Top 5 Accuracy: 93.61 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth + Config: configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b0.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b1_3rdparty_8xb32_in1k + Metadata: + FLOPs: 27052224 + Parameters: 7794184 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.68 + Top 5 Accuracy: 94.28 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32_in1k_20220119-002556d9.pth + Config: configs/efficientnet/efficientnet-b1_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b1.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b1_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 27052224 + Parameters: 7794184 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.20 + Top 5 Accuracy: 94.42 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa_in1k_20220119-619d8ae3.pth + Config: configs/efficientnet/efficientnet-b1_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b1.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b1_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 27052224 + Parameters: 7794184 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.52 + Top 5 Accuracy: 94.43 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa-advprop_in1k_20220119-5715267d.pth + Config: configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b1.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b2_3rdparty_8xb32_in1k + Metadata: + FLOPs: 34346386 + Parameters: 9109994 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.64 + Top 5 Accuracy: 94.80 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32_in1k_20220119-ea374a30.pth + Config: configs/efficientnet/efficientnet-b2_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b2.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b2_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 34346386 + Parameters: 9109994 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.21 + Top 5 Accuracy: 94.96 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa_in1k_20220119-dd61e80b.pth + Config: configs/efficientnet/efficientnet-b2_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b2.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b2_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 34346386 + Parameters: 9109994 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.45 + Top 5 Accuracy: 95.07 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa-advprop_in1k_20220119-1655338a.pth + Config: configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b2.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b3_3rdparty_8xb32_in1k + Metadata: + FLOPs: 58641904 + Parameters: 12233232 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.01 + Top 5 Accuracy: 95.34 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32_in1k_20220119-4b4d7487.pth + Config: configs/efficientnet/efficientnet-b3_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b3.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b3_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 58641904 + Parameters: 12233232 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.58 + Top 5 Accuracy: 95.67 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth + Config: configs/efficientnet/efficientnet-b3_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b3.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 58641904 + Parameters: 12233232 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.81 + Top 5 Accuracy: 95.69 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k_20220119-53b41118.pth + Config: configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b3.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b4_3rdparty_8xb32_in1k + Metadata: + FLOPs: 121870624 + Parameters: 19341616 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.57 + Top 5 Accuracy: 96.09 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32_in1k_20220119-81fd4077.pth + Config: configs/efficientnet/efficientnet-b4_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b4.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b4_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 121870624 + Parameters: 19341616 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.95 + Top 5 Accuracy: 96.26 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa_in1k_20220119-45b8bd2b.pth + Config: configs/efficientnet/efficientnet-b4_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b4.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b4_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 121870624 + Parameters: 19341616 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.25 + Top 5 Accuracy: 96.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa-advprop_in1k_20220119-38c2238c.pth + Config: configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b4.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b5_3rdparty_8xb32_in1k + Metadata: + FLOPs: 243879440 + Parameters: 30389784 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.18 + Top 5 Accuracy: 96.47 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32_in1k_20220119-e9814430.pth + Config: configs/efficientnet/efficientnet-b5_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b5.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b5_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 243879440 + Parameters: 30389784 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.82 + Top 5 Accuracy: 96.76 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa_in1k_20220119-2cab8b78.pth + Config: configs/efficientnet/efficientnet-b5_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b5.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b5_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 243879440 + Parameters: 30389784 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.21 + Top 5 Accuracy: 96.98 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa-advprop_in1k_20220119-f57a895a.pth + Config: configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b5.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b6_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 412002408 + Parameters: 43040704 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.05 + Top 5 Accuracy: 96.82 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa_in1k_20220119-45b03310.pth + Config: configs/efficientnet/efficientnet-b6_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b6.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b6_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 412002408 + Parameters: 43040704 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.74 + Top 5 Accuracy: 97.14 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa-advprop_in1k_20220119-bfe3485e.pth + Config: configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b6.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b7_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 715526512 + Parameters: 66347960 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.38 + Top 5 Accuracy: 96.88 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa_in1k_20220119-bf03951c.pth + Config: configs/efficientnet/efficientnet-b7_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b7.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b7_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 715526512 + Parameters: 66347960 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.14 + Top 5 Accuracy: 97.23 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa-advprop_in1k_20220119-c6dbff10.pth + Config: configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b7.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b8_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 1092755326 + Parameters: 87413142 + In Collections: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.38 + Top 5 Accuracy: 97.28 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b8_3rdparty_8xb32-aa-advprop_in1k_20220119-297ce1b7.pth + Config: configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b8.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet diff --git a/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py b/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py new file mode 100644 index 0000000..9075a89 --- /dev/null +++ b/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py @@ -0,0 +1,6 @@ +_base_ = '../resnet/resnet50_8xb32-fp16-dynamic_in1k.py' + +_deprecation_ = dict( + expected='../resnet/resnet50_8xb32-fp16-dynamic_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/fp16/resnet50_b32x8_fp16_imagenet.py b/configs/fp16/resnet50_b32x8_fp16_imagenet.py new file mode 100644 index 0000000..a73a409 --- /dev/null +++ b/configs/fp16/resnet50_b32x8_fp16_imagenet.py @@ -0,0 +1,6 @@ +_base_ = '../resnet/resnet50_8xb32-fp16_in1k.py' + +_deprecation_ = dict( + expected='../resnet/resnet50_8xb32-fp16_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/gpvit/gpvit_l1.py b/configs/gpvit/gpvit_l1.py new file mode 100644 index 0000000..56a4874 --- /dev/null +++ b/configs/gpvit/gpvit_l1.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224_lmdb.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='GPViT', + arch='L1', + img_size=224, + drop_path_rate=-1, # dpr is in arch config + att_with_cp=False, + group_with_cp=False), + neck=dict(type='GroupNeck', embed_dims=216), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=216, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + topk=(1, 5)), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) + +# data settings +samples_per_gpu=128 +data = dict(samples_per_gpu=samples_per_gpu, workers_per_gpu=4) + +# opt settings +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + }) +world_size = 16 +optimizer = dict( + lr=5e-4 * samples_per_gpu * world_size / 512, + paramwise_cfg=paramwise_cfg) +lr_config = dict(warmup_iters=15) +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) + +# other running settings +checkpoint_config = dict(interval=5, max_keep_ckpts=5) +evaluation = dict(interval=5, metric='accuracy') +fp16 = None # make sure fp16 (mm version) is None when using AMP optimizer +runner = dict(type='AmpEpochBasedRunner') +work_dir = 'work_dirs/gpvit_l1' diff --git a/configs/gpvit/gpvit_l2.py b/configs/gpvit/gpvit_l2.py new file mode 100644 index 0000000..5261395 --- /dev/null +++ b/configs/gpvit/gpvit_l2.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224_lmdb.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='GPViT', + arch='L2', + img_size=224, + drop_path_rate=-1, # dpr is in arch config + att_with_cp=False, + group_with_cp=False), + neck=dict(type='GroupNeck', embed_dims=348), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=348, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + topk=(1, 5)), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) + +# data settings +samples_per_gpu=128 +data = dict(samples_per_gpu=samples_per_gpu, workers_per_gpu=4) + +# opt settings +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + }) +world_size = 16 +optimizer = dict( + lr=5e-4 * samples_per_gpu * world_size / 512, + paramwise_cfg=paramwise_cfg) +lr_config = dict(warmup_iters=15) +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) + +# other running settings +checkpoint_config = dict(interval=5, max_keep_ckpts=5) +evaluation = dict(interval=5, metric='accuracy') +fp16 = None # make sure fp16 (mm version) is None when using AMP optimizer +runner = dict(type='AmpEpochBasedRunner') +work_dir = 'work_dirs/gpvit_l2' diff --git a/configs/gpvit/gpvit_l3.py b/configs/gpvit/gpvit_l3.py new file mode 100644 index 0000000..0fb1868 --- /dev/null +++ b/configs/gpvit/gpvit_l3.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224_lmdb.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='GPViT', + arch='L3', + img_size=224, + drop_path_rate=-1, # dpr is in arch config + att_with_cp=False, + group_with_cp=False), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=432, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + topk=(1, 5)), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) + +# data settings +samples_per_gpu=128 +data = dict(samples_per_gpu=samples_per_gpu, workers_per_gpu=4) + +# opt settings +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + }) +world_size = 16 +optimizer = dict( + lr=5e-4 * samples_per_gpu * world_size / 512, + paramwise_cfg=paramwise_cfg) +lr_config = dict(warmup_iters=15) +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) + +# other running settings +checkpoint_config = dict(interval=5, max_keep_ckpts=5) +evaluation = dict(interval=5, metric='accuracy') +fp16 = None # make sure fp16 (mm version) is None when using AMP optimizer +runner = dict(type='AmpEpochBasedRunner') +work_dir = 'work_dirs/gpvit_l3' diff --git a/configs/gpvit/gpvit_l4.py b/configs/gpvit/gpvit_l4.py new file mode 100644 index 0000000..9bddb42 --- /dev/null +++ b/configs/gpvit/gpvit_l4.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224_lmdb.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='GPViT', + arch='L4', + img_size=224, + drop_path_rate=-1, # dpr is in arch config + att_with_cp=False, + group_with_cp=False), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=624, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + topk=(1, 5)), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) + +# data settings +samples_per_gpu=128 +data = dict(samples_per_gpu=samples_per_gpu, workers_per_gpu=4) + +# opt settings +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + }) +world_size = 16 +optimizer = dict( + lr=5e-4 * samples_per_gpu * world_size / 512, + paramwise_cfg=paramwise_cfg) +lr_config = dict(warmup_iters=15) +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) + +# other running settings +checkpoint_config = dict(interval=5, max_keep_ckpts=5) +evaluation = dict(interval=5, metric='accuracy') +fp16 = None # make sure fp16 (mm version) is None when using AMP optimizer +runner = dict(type='AmpEpochBasedRunner') +work_dir = 'work_dirs/gpvit_l4' diff --git a/configs/hornet/README.md b/configs/hornet/README.md new file mode 100644 index 0000000..7c1b9a9 --- /dev/null +++ b/configs/hornet/README.md @@ -0,0 +1,51 @@ +# HorNet + +> [HorNet: Efficient High-Order Spatial Interactions with Recursive Gated Convolutions](https://arxiv.org/pdf/2207.14284v2.pdf) + + + +## Abstract + +Recent progress in vision Transformers exhibits great success in various tasks driven by the new spatial modeling mechanism based on dot-product self-attention. In this paper, we show that the key ingredients behind the vision Transformers, namely input-adaptive, long-range and high-order spatial interactions, can also be efficiently implemented with a convolution-based framework. We present the Recursive Gated Convolution (g nConv) that performs high-order spatial interactions with gated convolutions and recursive designs. The new operation is highly flexible and customizable, which is compatible with various variants of convolution and extends the two-order interactions in self-attention to arbitrary orders without introducing significant extra computation. g nConv can serve as a plug-and-play module to improve various vision Transformers and convolution-based models. Based on the operation, we construct a new family of generic vision backbones named HorNet. Extensive experiments on ImageNet classification, COCO object detection and ADE20K semantic segmentation show HorNet outperform Swin Transformers and ConvNeXt by a significant margin with similar overall architecture and training configurations. HorNet also shows favorable scalability to more training data and a larger model size. Apart from the effectiveness in visual encoders, we also show g nConv can be applied to task-specific decoders and consistently improve dense prediction performance with less computation. Our results demonstrate that g nConv can be a new basic module for visual modeling that effectively combines the merits of both vision Transformers and CNNs. Code is available at https://github.com/raoyongming/HorNet. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-----------: | :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :--------------------------------------------------------------: | :----------------------------------------------------------------: | +| HorNet-T\* | From scratch | 224x224 | 22.41 | 3.98 | 82.84 | 96.24 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-tiny_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny_3rdparty_in1k_20220915-0e8eedff.pth) | +| HorNet-T-GF\* | From scratch | 224x224 | 22.99 | 3.9 | 82.98 | 96.38 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-tiny-gf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny-gf_3rdparty_in1k_20220915-4c35a66b.pth) | +| HorNet-S\* | From scratch | 224x224 | 49.53 | 8.83 | 83.79 | 96.75 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-small_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-small_3rdparty_in1k_20220915-5935f60f.pth) | +| HorNet-S-GF\* | From scratch | 224x224 | 50.4 | 8.71 | 83.98 | 96.77 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-small-gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-small-gf_3rdparty_in1k_20220915-649ca492.pth) | +| HorNet-B\* | From scratch | 224x224 | 87.26 | 15.59 | 84.24 | 96.94 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-base_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base_3rdparty_in1k_20220915-a06176bb.pth) | +| HorNet-B-GF\* | From scratch | 224x224 | 88.42 | 15.42 | 84.32 | 96.95 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-base-gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base-gf_3rdparty_in1k_20220915-82c06fa7.pth) | + +\*Models with * are converted from [the official repo](https://github.com/raoyongming/HorNet). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results. + +### Pre-trained Models + +The pre-trained models on ImageNet-21k are used to fine-tune on the downstream tasks. + +| Model | Pretrain | resolution | Params(M) | Flops(G) | Download | +| :--------------: | :----------: | :--------: | :-------: | :------: | :------------------------------------------------------------------------------------------------------------------------: | +| HorNet-L\* | ImageNet-21k | 224x224 | 194.54 | 34.83 | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-large_3rdparty_in21k_20220909-9ccef421.pth) | +| HorNet-L-GF\* | ImageNet-21k | 224x224 | 196.29 | 34.58 | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-large-gf_3rdparty_in21k_20220909-3aea3b61.pth) | +| HorNet-L-GF384\* | ImageNet-21k | 384x384 | 201.23 | 101.63 | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-large-gf384_3rdparty_in21k_20220909-80894290.pth) | + +\*Models with * are converted from [the official repo](https://github.com/raoyongming/HorNet). + +## Citation + +``` +@article{rao2022hornet, + title={HorNet: Efficient High-Order Spatial Interactions with Recursive Gated Convolutions}, + author={Rao, Yongming and Zhao, Wenliang and Tang, Yansong and Zhou, Jie and Lim, Ser-Lam and Lu, Jiwen}, + journal={arXiv preprint arXiv:2207.14284}, + year={2022} +} +``` diff --git a/configs/hornet/hornet-base-gf_8xb64_in1k.py b/configs/hornet/hornet-base-gf_8xb64_in1k.py new file mode 100644 index 0000000..6c29de6 --- /dev/null +++ b/configs/hornet/hornet-base-gf_8xb64_in1k.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/hornet/hornet-base-gf.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=64) + +optimizer = dict(lr=4e-3) +optimizer_config = dict(grad_clip=dict(max_norm=1.0), _delete_=True) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/hornet/hornet-base_8xb64_in1k.py b/configs/hornet/hornet-base_8xb64_in1k.py new file mode 100644 index 0000000..969d8b9 --- /dev/null +++ b/configs/hornet/hornet-base_8xb64_in1k.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/hornet/hornet-base.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=64) + +optimizer = dict(lr=4e-3) +optimizer_config = dict(grad_clip=dict(max_norm=5.0), _delete_=True) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/hornet/hornet-small-gf_8xb64_in1k.py b/configs/hornet/hornet-small-gf_8xb64_in1k.py new file mode 100644 index 0000000..deb570e --- /dev/null +++ b/configs/hornet/hornet-small-gf_8xb64_in1k.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/hornet/hornet-small-gf.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=64) + +optimizer = dict(lr=4e-3) +optimizer_config = dict(grad_clip=dict(max_norm=1.0), _delete_=True) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/hornet/hornet-small_8xb64_in1k.py b/configs/hornet/hornet-small_8xb64_in1k.py new file mode 100644 index 0000000..c07fa60 --- /dev/null +++ b/configs/hornet/hornet-small_8xb64_in1k.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/hornet/hornet-small.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=64) + +optimizer = dict(lr=4e-3) +optimizer_config = dict(grad_clip=dict(max_norm=5.0), _delete_=True) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/hornet/hornet-tiny-gf_8xb128_in1k.py b/configs/hornet/hornet-tiny-gf_8xb128_in1k.py new file mode 100644 index 0000000..3a1d1a7 --- /dev/null +++ b/configs/hornet/hornet-tiny-gf_8xb128_in1k.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/hornet/hornet-tiny-gf.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=128) + +optimizer = dict(lr=4e-3) +optimizer_config = dict(grad_clip=dict(max_norm=1.0), _delete_=True) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/hornet/hornet-tiny_8xb128_in1k.py b/configs/hornet/hornet-tiny_8xb128_in1k.py new file mode 100644 index 0000000..69a7cdf --- /dev/null +++ b/configs/hornet/hornet-tiny_8xb128_in1k.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/hornet/hornet-tiny.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=128) + +optimizer = dict(lr=4e-3) +optimizer_config = dict(grad_clip=dict(max_norm=100.0), _delete_=True) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/hornet/metafile.yml b/configs/hornet/metafile.yml new file mode 100644 index 0000000..7120772 --- /dev/null +++ b/configs/hornet/metafile.yml @@ -0,0 +1,97 @@ +Collections: + - Name: HorNet + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + - Weight Decay + Architecture: + - HorNet + - gnConv + Paper: + URL: https://arxiv.org/pdf/2207.14284v2.pdf + Title: "HorNet: Efficient High-Order Spatial Interactions with Recursive Gated Convolutions" + README: configs/hornet/README.md + Code: + Version: v0.24.0 + URL: https://github.com/open-mmlab/mmclassification/blob/v0.24.0/mmcls/models/backbones/hornet.py + +Models: + - Name: hornet-tiny_3rdparty_in1k + Metadata: + FLOPs: 3980000000 # 3.98G + Parameters: 22410000 # 22.41M + In Collection: HorNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.84 + Top 5 Accuracy: 96.24 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny_3rdparty_in1k_20220915-0e8eedff.pth + Config: configs/hornet/hornet-tiny_8xb128_in1k.py + - Name: hornet-tiny-gf_3rdparty_in1k + Metadata: + FLOPs: 3900000000 # 3.9G + Parameters: 22990000 # 22.99M + In Collection: HorNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.98 + Top 5 Accuracy: 96.38 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny-gf_3rdparty_in1k_20220915-4c35a66b.pth + Config: configs/hornet/hornet-tiny-gf_8xb128_in1k.py + - Name: hornet-small_3rdparty_in1k + Metadata: + FLOPs: 8830000000 # 8.83G + Parameters: 49530000 # 49.53M + In Collection: HorNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.79 + Top 5 Accuracy: 96.75 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-small_3rdparty_in1k_20220915-5935f60f.pth + Config: configs/hornet/hornet-small_8xb64_in1k.py + - Name: hornet-small-gf_3rdparty_in1k + Metadata: + FLOPs: 8710000000 # 8.71G + Parameters: 50400000 # 50.4M + In Collection: HorNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.98 + Top 5 Accuracy: 96.77 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-small-gf_3rdparty_in1k_20220915-649ca492.pth + Config: configs/hornet/hornet-small-gf_8xb64_in1k.py + - Name: hornet-base_3rdparty_in1k + Metadata: + FLOPs: 15590000000 # 15.59G + Parameters: 87260000 # 87.26M + In Collection: HorNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.24 + Top 5 Accuracy: 96.94 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base_3rdparty_in1k_20220915-a06176bb.pth + Config: configs/hornet/hornet-base_8xb64_in1k.py + - Name: hornet-base-gf_3rdparty_in1k + Metadata: + FLOPs: 15420000000 # 15.42G + Parameters: 88420000 # 88.42M + In Collection: HorNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.32 + Top 5 Accuracy: 96.95 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base-gf_3rdparty_in1k_20220915-82c06fa7.pth + Config: configs/hornet/hornet-base-gf_8xb64_in1k.py diff --git a/configs/hrnet/README.md b/configs/hrnet/README.md new file mode 100644 index 0000000..0a30ccd --- /dev/null +++ b/configs/hrnet/README.md @@ -0,0 +1,44 @@ +# HRNet + +> [Deep High-Resolution Representation Learning for Visual Recognition](https://arxiv.org/abs/1908.07919v2) + + + +## Abstract + +High-resolution representations are essential for position-sensitive vision problems, such as human pose estimation, semantic segmentation, and object detection. Existing state-of-the-art frameworks first encode the input image as a low-resolution representation through a subnetwork that is formed by connecting high-to-low resolution convolutions *in series* (e.g., ResNet, VGGNet), and then recover the high-resolution representation from the encoded low-resolution representation. Instead, our proposed network, named as High-Resolution Network (HRNet), maintains high-resolution representations through the whole process. There are two key characteristics: (i) Connect the high-to-low resolution convolution streams *in parallel*; (ii) Repeatedly exchange the information across resolutions. The benefit is that the resulting representation is semantically richer and spatially more precise. We show the superiority of the proposed HRNet in a wide range of applications, including human pose estimation, semantic segmentation, and object detection, suggesting that the HRNet is a stronger backbone for computer vision problems. + +
+ +
+ +## Results and models + +## ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------------: | :-------------------------------------------------------------------------: | +| HRNet-W18\* | 21.30 | 4.33 | 76.75 | 93.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w18_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32_in1k_20220120-0c10b180.pth) | +| HRNet-W30\* | 37.71 | 8.17 | 78.19 | 94.22 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w30_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w30_3rdparty_8xb32_in1k_20220120-8aa3832f.pth) | +| HRNet-W32\* | 41.23 | 8.99 | 78.44 | 94.19 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w32_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w32_3rdparty_8xb32_in1k_20220120-c394f1ab.pth) | +| HRNet-W40\* | 57.55 | 12.77 | 78.94 | 94.47 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w40_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w40_3rdparty_8xb32_in1k_20220120-9a2dbfc5.pth) | +| HRNet-W44\* | 67.06 | 14.96 | 78.88 | 94.37 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w44_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w44_3rdparty_8xb32_in1k_20220120-35d07f73.pth) | +| HRNet-W48\* | 77.47 | 17.36 | 79.32 | 94.52 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w48_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32_in1k_20220120-e555ef50.pth) | +| HRNet-W64\* | 128.06 | 29.00 | 79.46 | 94.65 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w64_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w64_3rdparty_8xb32_in1k_20220120-19126642.pth) | +| HRNet-W18 (ssld)\* | 21.30 | 4.33 | 81.06 | 95.70 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w18_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32-ssld_in1k_20220120-455f69ea.pth) | +| HRNet-W48 (ssld)\* | 77.47 | 17.36 | 83.63 | 96.79 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w48_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32-ssld_in1k_20220120-d0459c38.pth) | + +*Models with * are converted from the [official repo](https://github.com/HRNet/HRNet-Image-Classification). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +``` +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal = {TPAMI} + year={2019} +} +``` diff --git a/configs/hrnet/hrnet-w18_4xb32_in1k.py b/configs/hrnet/hrnet-w18_4xb32_in1k.py new file mode 100644 index 0000000..a84fe67 --- /dev/null +++ b/configs/hrnet/hrnet-w18_4xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w18.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] diff --git a/configs/hrnet/hrnet-w30_4xb32_in1k.py b/configs/hrnet/hrnet-w30_4xb32_in1k.py new file mode 100644 index 0000000..d2a9c0d --- /dev/null +++ b/configs/hrnet/hrnet-w30_4xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w30.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] diff --git a/configs/hrnet/hrnet-w32_4xb32_in1k.py b/configs/hrnet/hrnet-w32_4xb32_in1k.py new file mode 100644 index 0000000..91380a9 --- /dev/null +++ b/configs/hrnet/hrnet-w32_4xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w32.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] diff --git a/configs/hrnet/hrnet-w40_4xb32_in1k.py b/configs/hrnet/hrnet-w40_4xb32_in1k.py new file mode 100644 index 0000000..5d35cec --- /dev/null +++ b/configs/hrnet/hrnet-w40_4xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w40.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] diff --git a/configs/hrnet/hrnet-w44_4xb32_in1k.py b/configs/hrnet/hrnet-w44_4xb32_in1k.py new file mode 100644 index 0000000..ce6bb41 --- /dev/null +++ b/configs/hrnet/hrnet-w44_4xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w44.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] diff --git a/configs/hrnet/hrnet-w48_4xb32_in1k.py b/configs/hrnet/hrnet-w48_4xb32_in1k.py new file mode 100644 index 0000000..6943892 --- /dev/null +++ b/configs/hrnet/hrnet-w48_4xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w48.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] diff --git a/configs/hrnet/hrnet-w64_4xb32_in1k.py b/configs/hrnet/hrnet-w64_4xb32_in1k.py new file mode 100644 index 0000000..0009bc6 --- /dev/null +++ b/configs/hrnet/hrnet-w64_4xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w64.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] diff --git a/configs/hrnet/metafile.yml b/configs/hrnet/metafile.yml new file mode 100644 index 0000000..64fe142 --- /dev/null +++ b/configs/hrnet/metafile.yml @@ -0,0 +1,162 @@ +Collections: + - Name: HRNet + Metadata: + Training Data: ImageNet-1k + Architecture: + - Batch Normalization + - Convolution + - ReLU + - Residual Connection + Paper: + URL: https://arxiv.org/abs/1908.07919v2 + Title: "Deep High-Resolution Representation Learning for Visual Recognition" + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/hrnet.py + Version: v0.20.1 + +Models: + - Name: hrnet-w18_3rdparty_8xb32_in1k + Metadata: + FLOPs: 4330397932 + Parameters: 21295164 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.75 + Top 5 Accuracy: 93.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32_in1k_20220120-0c10b180.pth + Config: configs/hrnet/hrnet-w18_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33cMkPimlmClRvmpw + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w30_3rdparty_8xb32_in1k + Metadata: + FLOPs: 8168305684 + Parameters: 37708380 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.19 + Top 5 Accuracy: 94.22 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w30_3rdparty_8xb32_in1k_20220120-8aa3832f.pth + Config: configs/hrnet/hrnet-w30_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33cQoACCEfrzcSaVI + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w32_3rdparty_8xb32_in1k + Metadata: + FLOPs: 8986267584 + Parameters: 41228840 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.44 + Top 5 Accuracy: 94.19 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w32_3rdparty_8xb32_in1k_20220120-c394f1ab.pth + Config: configs/hrnet/hrnet-w32_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33dYBMemi9xOUFR0w + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w40_3rdparty_8xb32_in1k + Metadata: + FLOPs: 12767574064 + Parameters: 57553320 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.94 + Top 5 Accuracy: 94.47 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w40_3rdparty_8xb32_in1k_20220120-9a2dbfc5.pth + Config: configs/hrnet/hrnet-w40_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33ck0gvo5jfoWBOPo + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w44_3rdparty_8xb32_in1k + Metadata: + FLOPs: 14963902632 + Parameters: 67061144 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.88 + Top 5 Accuracy: 94.37 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w44_3rdparty_8xb32_in1k_20220120-35d07f73.pth + Config: configs/hrnet/hrnet-w44_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33czZQ0woUb980gRs + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w48_3rdparty_8xb32_in1k + Metadata: + FLOPs: 17364014752 + Parameters: 77466024 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.32 + Top 5 Accuracy: 94.52 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32_in1k_20220120-e555ef50.pth + Config: configs/hrnet/hrnet-w48_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33dKvqI6pBZlifgJk + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w64_3rdparty_8xb32_in1k + Metadata: + FLOPs: 29002298752 + Parameters: 128056104 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.46 + Top 5 Accuracy: 94.65 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w64_3rdparty_8xb32_in1k_20220120-19126642.pth + Config: configs/hrnet/hrnet-w64_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33gQbJsUPTIj3rQu99 + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w18_3rdparty_8xb32-ssld_in1k + Metadata: + FLOPs: 4330397932 + Parameters: 21295164 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.06 + Top 5 Accuracy: 95.7 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32-ssld_in1k_20220120-455f69ea.pth + Config: configs/hrnet/hrnet-w18_4xb32_in1k.py + Converted From: + Weights: https://github.com/HRNet/HRNet-Image-Classification/releases/download/PretrainedWeights/HRNet_W18_C_ssld_pretrained.pth + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w48_3rdparty_8xb32-ssld_in1k + Metadata: + FLOPs: 17364014752 + Parameters: 77466024 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.63 + Top 5 Accuracy: 96.79 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32-ssld_in1k_20220120-d0459c38.pth + Config: configs/hrnet/hrnet-w48_4xb32_in1k.py + Converted From: + Weights: https://github.com/HRNet/HRNet-Image-Classification/releases/download/PretrainedWeights/HRNet_W48_C_ssld_pretrained.pth + Code: https://github.com/HRNet/HRNet-Image-Classification diff --git a/configs/lenet/README.md b/configs/lenet/README.md new file mode 100644 index 0000000..2cd68ea --- /dev/null +++ b/configs/lenet/README.md @@ -0,0 +1,28 @@ +# LeNet + +> [Backpropagation Applied to Handwritten Zip Code Recognition](https://ieeexplore.ieee.org/document/6795724) + + + +## Abstract + +The ability of learning networks to generalize can be greatly enhanced by providing constraints from the task domain. This paper demonstrates how such constraints can be integrated into a backpropagation network through the architecture of the network. This approach has been successfully applied to the recognition of handwritten zip code digits provided by the U.S. Postal Service. A single network learns the entire recognition operation, going from the normalized image of the character to the final classification. + +
+ +
+ +## Citation + +``` +@ARTICLE{6795724, + author={Y. {LeCun} and B. {Boser} and J. S. {Denker} and D. {Henderson} and R. E. {Howard} and W. {Hubbard} and L. D. {Jackel}}, + journal={Neural Computation}, + title={Backpropagation Applied to Handwritten Zip Code Recognition}, + year={1989}, + volume={1}, + number={4}, + pages={541-551}, + doi={10.1162/neco.1989.1.4.541}} +} +``` diff --git a/configs/lenet/lenet5_mnist.py b/configs/lenet/lenet5_mnist.py new file mode 100644 index 0000000..7286b79 --- /dev/null +++ b/configs/lenet/lenet5_mnist.py @@ -0,0 +1,59 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='LeNet5', num_classes=10), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) +# dataset settings +dataset_type = 'MNIST' +img_norm_cfg = dict(mean=[33.46], std=[78.87], to_rgb=True) +train_pipeline = [ + dict(type='Resize', size=32), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']), +] +test_pipeline = [ + dict(type='Resize', size=32), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), +] +data = dict( + samples_per_gpu=128, + workers_per_gpu=2, + train=dict( + type=dataset_type, data_prefix='data/mnist', pipeline=train_pipeline), + val=dict( + type=dataset_type, data_prefix='data/mnist', pipeline=test_pipeline), + test=dict( + type=dataset_type, data_prefix='data/mnist', pipeline=test_pipeline)) +evaluation = dict( + interval=5, metric='accuracy', metric_options={'topk': (1, )}) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[15]) +# checkpoint saving +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=150, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=5) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mnist/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/mlp_mixer/README.md b/configs/mlp_mixer/README.md new file mode 100644 index 0000000..5ec9887 --- /dev/null +++ b/configs/mlp_mixer/README.md @@ -0,0 +1,37 @@ +# Mlp-Mixer + +> [MLP-Mixer: An all-MLP Architecture for Vision](https://arxiv.org/abs/2105.01601) + + + +## Abstract + +Convolutional Neural Networks (CNNs) are the go-to model for computer vision. Recently, attention-based networks, such as the Vision Transformer, have also become popular. In this paper we show that while convolutions and attention are both sufficient for good performance, neither of them are necessary. We present MLP-Mixer, an architecture based exclusively on multi-layer perceptrons (MLPs). MLP-Mixer contains two types of layers: one with MLPs applied independently to image patches (i.e. "mixing" the per-location features), and one with MLPs applied across patches (i.e. "mixing" spatial information). When trained on large datasets, or with modern regularization schemes, MLP-Mixer attains competitive scores on image classification benchmarks, with pre-training and inference cost comparable to state-of-the-art models. We hope that these results spark further research beyond the realms of well established CNNs and Transformers. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :----------------------------------------------------------------------------: | +| Mixer-B/16\* | 59.88 | 12.61 | 76.68 | 92.25 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-base-p16_3rdparty_64xb64_in1k_20211124-1377e3e0.pth) | +| Mixer-L/16\* | 208.2 | 44.57 | 72.34 | 88.02 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-large-p16_3rdparty_64xb64_in1k_20211124-5a2519d2.pth) | + +*Models with * are converted from [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mlp_mixer.py). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +``` +@misc{tolstikhin2021mlpmixer, + title={MLP-Mixer: An all-MLP Architecture for Vision}, + author={Ilya Tolstikhin and Neil Houlsby and Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Thomas Unterthiner and Jessica Yung and Andreas Steiner and Daniel Keysers and Jakob Uszkoreit and Mario Lucic and Alexey Dosovitskiy}, + year={2021}, + eprint={2105.01601}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/mlp_mixer/metafile.yml b/configs/mlp_mixer/metafile.yml new file mode 100644 index 0000000..e8efa08 --- /dev/null +++ b/configs/mlp_mixer/metafile.yml @@ -0,0 +1,50 @@ +Collections: + - Name: MLP-Mixer + Metadata: + Training Data: ImageNet-1k + Architecture: + - MLP + - Layer Normalization + - Dropout + Paper: + URL: https://arxiv.org/abs/2105.01601 + Title: "MLP-Mixer: An all-MLP Architecture for Vision" + README: configs/mlp_mixer/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.18.0/mmcls/models/backbones/mlp_mixer.py + Version: v0.18.0 + +Models: + - Name: mlp-mixer-base-p16_3rdparty_64xb64_in1k + In Collection: MLP-Mixer + Config: configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py + Metadata: + FLOPs: 12610000000 # 12.61 G + Parameters: 59880000 # 59.88 M + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.68 + Top 5 Accuracy: 92.25 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-base-p16_3rdparty_64xb64_in1k_20211124-1377e3e0.pth + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mlp_mixer.py#L70 + + - Name: mlp-mixer-large-p16_3rdparty_64xb64_in1k + In Collection: MLP-Mixer + Config: configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py + Metadata: + FLOPs: 44570000000 # 44.57 G + Parameters: 208200000 # 208.2 M + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 72.34 + Top 5 Accuracy: 88.02 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-large-p16_3rdparty_64xb64_in1k_20211124-5a2519d2.pth + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mlp_mixer.py#L73 diff --git a/configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py b/configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py new file mode 100644 index 0000000..e35dae5 --- /dev/null +++ b/configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/mlp_mixer_base_patch16.py', + '../_base_/datasets/imagenet_bs64_mixer_224.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py', +] diff --git a/configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py b/configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py new file mode 100644 index 0000000..459563c --- /dev/null +++ b/configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/mlp_mixer_large_patch16.py', + '../_base_/datasets/imagenet_bs64_mixer_224.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py', +] diff --git a/configs/mobilenet_v2/README.md b/configs/mobilenet_v2/README.md new file mode 100644 index 0000000..675c8dd --- /dev/null +++ b/configs/mobilenet_v2/README.md @@ -0,0 +1,38 @@ +# MobileNet V2 + +> [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) + + + +## Abstract + +In this paper we describe a new mobile architecture, MobileNetV2, that improves the state of the art performance of mobile models on multiple tasks and benchmarks as well as across a spectrum of different model sizes. We also describe efficient ways of applying these mobile models to object detection in a novel framework we call SSDLite. Additionally, we demonstrate how to build mobile semantic segmentation models through a reduced form of DeepLabv3 which we call Mobile DeepLabv3. + +The MobileNetV2 architecture is based on an inverted residual structure where the input and output of the residual block are thin bottleneck layers opposite to traditional residual models which use expanded representations in the input an MobileNetV2 uses lightweight depthwise convolutions to filter features in the intermediate expansion layer. Additionally, we find that it is important to remove non-linearities in the narrow layers in order to maintain representational power. We demonstrate that this improves performance and provide an intuition that led to this design. Finally, our approach allows decoupling of the input/output domains from the expressiveness of the transformation, which provides a convenient framework for further analysis. We measure our performance on Imagenet classification, COCO object detection, VOC image segmentation. We evaluate the trade-offs between accuracy, and number of operations measured by multiply-adds (MAdd), as well as the number of parameters + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :----------------------------------------------------------------------------: | +| MobileNet V2 | 3.5 | 0.319 | 71.86 | 90.42 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.log.json) | + +## Citation + +``` +@INPROCEEDINGS{8578572, + author={M. {Sandler} and A. {Howard} and M. {Zhu} and A. {Zhmoginov} and L. {Chen}}, + booktitle={2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + title={MobileNetV2: Inverted Residuals and Linear Bottlenecks}, + year={2018}, + volume={}, + number={}, + pages={4510-4520}, + doi={10.1109/CVPR.2018.00474}} +} +``` diff --git a/configs/mobilenet_v2/metafile.yml b/configs/mobilenet_v2/metafile.yml new file mode 100644 index 0000000..e16557f --- /dev/null +++ b/configs/mobilenet_v2/metafile.yml @@ -0,0 +1,34 @@ +Collections: + - Name: MobileNet V2 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 300 + Batch Size: 256 + Architecture: + - MobileNet V2 + Paper: + URL: https://arxiv.org/abs/1801.04381 + Title: "MobileNetV2: Inverted Residuals and Linear Bottlenecks" + README: configs/mobilenet_v2/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/mobilenet_v2.py#L101 + Version: v0.15.0 + +Models: + - Name: mobilenet-v2_8xb32_in1k + Metadata: + FLOPs: 319000000 + Parameters: 3500000 + In Collection: MobileNet V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 71.86 + Top 5 Accuracy: 90.42 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth + Config: configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py diff --git a/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py b/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py new file mode 100644 index 0000000..afd2d97 --- /dev/null +++ b/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/mobilenet_v2_1x.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_epochstep.py', + '../_base_/default_runtime.py' +] diff --git a/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py b/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py new file mode 100644 index 0000000..26c2b6d --- /dev/null +++ b/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'mobilenet-v2_8xb32_in1k.py' + +_deprecation_ = dict( + expected='mobilenet-v2_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/mobilenet_v3/README.md b/configs/mobilenet_v3/README.md new file mode 100644 index 0000000..737c4d3 --- /dev/null +++ b/configs/mobilenet_v3/README.md @@ -0,0 +1,36 @@ +# MobileNet V3 + +> [Searching for MobileNetV3](https://arxiv.org/abs/1905.02244) + + + +## Abstract + +We present the next generation of MobileNets based on a combination of complementary search techniques as well as a novel architecture design. MobileNetV3 is tuned to mobile phone CPUs through a combination of hardware-aware network architecture search (NAS) complemented by the NetAdapt algorithm and then subsequently improved through novel architecture advances. This paper starts the exploration of how automated search algorithms and network design can work together to harness complementary approaches improving the overall state of the art. Through this process we create two new MobileNet models for release: MobileNetV3-Large and MobileNetV3-Small which are targeted for high and low resource use cases. These models are then adapted and applied to the tasks of object detection and semantic segmentation. For the task of semantic segmentation (or any dense pixel prediction), we propose a new efficient segmentation decoder Lite Reduced Atrous Spatial Pyramid Pooling (LR-ASPP). We achieve new state of the art results for mobile classification, detection and segmentation. MobileNetV3-Large is 3.2% more accurate on ImageNet classification while reducing latency by 15% compared to MobileNetV2. MobileNetV3-Small is 4.6% more accurate while reducing latency by 5% compared to MobileNetV2. MobileNetV3-Large detection is 25% faster at roughly the same accuracy as MobileNetV2 on COCO detection. MobileNetV3-Large LR-ASPP is 30% faster than MobileNetV2 R-ASPP at similar accuracy for Cityscapes segmentation. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-----------------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------------: | :------------------------------------------------------------------------: | +| MobileNetV3-Small\* | 2.54 | 0.06 | 67.66 | 87.41 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v3/mobilenet-v3-small_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth) | +| MobileNetV3-Large\* | 5.48 | 0.23 | 74.04 | 91.34 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v3/mobilenet-v3-large_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth) | + +*Models with * are converted from [torchvision](https://pytorch.org/vision/stable/_modules/torchvision/models/mobilenetv3.html). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +``` +@inproceedings{Howard_2019_ICCV, + author = {Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and Le, Quoc V. and Adam, Hartwig}, + title = {Searching for MobileNetV3}, + booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` diff --git a/configs/mobilenet_v3/metafile.yml b/configs/mobilenet_v3/metafile.yml new file mode 100644 index 0000000..09c4732 --- /dev/null +++ b/configs/mobilenet_v3/metafile.yml @@ -0,0 +1,47 @@ +Collections: + - Name: MobileNet V3 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - RMSprop with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 600 + Batch Size: 1024 + Architecture: + - MobileNet V3 + Paper: + URL: https://arxiv.org/abs/1905.02244 + Title: Searching for MobileNetV3 + README: configs/mobilenet_v3/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/mobilenet_v3.py + Version: v0.15.0 + +Models: + - Name: mobilenet_v3_small_imagenet + Metadata: + FLOPs: 60000000 + Parameters: 2540000 + In Collection: MobileNet V3 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 67.66 + Top 5 Accuracy: 87.41 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth + Config: configs/mobilenet_v3/mobilenet-v3-small_8xb32_in1k.py + - Name: mobilenet_v3_large_imagenet + Metadata: + FLOPs: 230000000 + Parameters: 5480000 + In Collection: MobileNet V3 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.04 + Top 5 Accuracy: 91.34 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth + Config: configs/mobilenet_v3/mobilenet-v3-large_8xb32_in1k.py diff --git a/configs/mobilenet_v3/mobilenet-v3-large_8xb32_in1k.py b/configs/mobilenet_v3/mobilenet-v3-large_8xb32_in1k.py new file mode 100644 index 0000000..985ef52 --- /dev/null +++ b/configs/mobilenet_v3/mobilenet-v3-large_8xb32_in1k.py @@ -0,0 +1,158 @@ +# Refer to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification +# ---------------------------- +# -[x] auto_augment='imagenet' +# -[x] batch_size=128 (per gpu) +# -[x] epochs=600 +# -[x] opt='rmsprop' +# -[x] lr=0.064 +# -[x] eps=0.0316 +# -[x] alpha=0.9 +# -[x] weight_decay=1e-05 +# -[x] momentum=0.9 +# -[x] lr_gamma=0.973 +# -[x] lr_step_size=2 +# -[x] nproc_per_node=8 +# -[x] random_erase=0.2 +# -[x] workers=16 (workers_per_gpu) +# - modify: RandomErasing use RE-M instead of RE-0 + +_base_ = [ + '../_base_/models/mobilenet_v3_large_imagenet.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/default_runtime.py' +] + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +policies = [ + [ + dict(type='Posterize', bits=4, prob=0.4), + dict(type='Rotate', angle=30., prob=0.6) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], + [ + dict(type='Posterize', bits=5, prob=0.6), + dict(type='Posterize', bits=5, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8) + ], + [ + dict(type='Solarize', thr=256 / 9 * 6, prob=0.6), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Posterize', bits=6, prob=0.8), + dict(type='Equalize', prob=1.)], + [ + dict(type='Rotate', angle=10., prob=0.2), + dict(type='Solarize', thr=256 / 9, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.6), + dict(type='Posterize', bits=5, prob=0.4) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0., prob=0.4) + ], + [ + dict(type='Rotate', angle=30., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Equalize', prob=0.0), + dict(type='Equalize', prob=0.8)], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0.2, prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0.8, prob=0.8), + dict(type='Solarize', thr=256 / 9 * 2, prob=0.8) + ], + [ + dict(type='Sharpness', magnitude=0.7, prob=0.4), + dict(type='Invert', prob=0.6) + ], + [ + dict( + type='Shear', + magnitude=0.3 / 9 * 5, + prob=0.6, + direction='horizontal'), + dict(type='Equalize', prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='AutoAugment', policies=policies), + dict( + type='RandomErasing', + erase_prob=0.2, + mode='const', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean']), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +data = dict( + samples_per_gpu=128, + workers_per_gpu=4, + train=dict(pipeline=train_pipeline)) +evaluation = dict(interval=10, metric='accuracy') + +# optimizer +optimizer = dict( + type='RMSprop', + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=2, gamma=0.973, by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=600) diff --git a/configs/mobilenet_v3/mobilenet-v3-small_8xb16_cifar10.py b/configs/mobilenet_v3/mobilenet-v3-small_8xb16_cifar10.py new file mode 100644 index 0000000..06e63da --- /dev/null +++ b/configs/mobilenet_v3/mobilenet-v3-small_8xb16_cifar10.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/mobilenet-v3-small_cifar.py', + '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] + +lr_config = dict(policy='step', step=[120, 170]) +runner = dict(type='EpochBasedRunner', max_epochs=200) diff --git a/configs/mobilenet_v3/mobilenet-v3-small_8xb32_in1k.py b/configs/mobilenet_v3/mobilenet-v3-small_8xb32_in1k.py new file mode 100644 index 0000000..2612166 --- /dev/null +++ b/configs/mobilenet_v3/mobilenet-v3-small_8xb32_in1k.py @@ -0,0 +1,158 @@ +# Refer to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification +# ---------------------------- +# -[x] auto_augment='imagenet' +# -[x] batch_size=128 (per gpu) +# -[x] epochs=600 +# -[x] opt='rmsprop' +# -[x] lr=0.064 +# -[x] eps=0.0316 +# -[x] alpha=0.9 +# -[x] weight_decay=1e-05 +# -[x] momentum=0.9 +# -[x] lr_gamma=0.973 +# -[x] lr_step_size=2 +# -[x] nproc_per_node=8 +# -[x] random_erase=0.2 +# -[x] workers=16 (workers_per_gpu) +# - modify: RandomErasing use RE-M instead of RE-0 + +_base_ = [ + '../_base_/models/mobilenet_v3_small_imagenet.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/default_runtime.py' +] + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +policies = [ + [ + dict(type='Posterize', bits=4, prob=0.4), + dict(type='Rotate', angle=30., prob=0.6) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], + [ + dict(type='Posterize', bits=5, prob=0.6), + dict(type='Posterize', bits=5, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8) + ], + [ + dict(type='Solarize', thr=256 / 9 * 6, prob=0.6), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Posterize', bits=6, prob=0.8), + dict(type='Equalize', prob=1.)], + [ + dict(type='Rotate', angle=10., prob=0.2), + dict(type='Solarize', thr=256 / 9, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.6), + dict(type='Posterize', bits=5, prob=0.4) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0., prob=0.4) + ], + [ + dict(type='Rotate', angle=30., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Equalize', prob=0.0), + dict(type='Equalize', prob=0.8)], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0.2, prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0.8, prob=0.8), + dict(type='Solarize', thr=256 / 9 * 2, prob=0.8) + ], + [ + dict(type='Sharpness', magnitude=0.7, prob=0.4), + dict(type='Invert', prob=0.6) + ], + [ + dict( + type='Shear', + magnitude=0.3 / 9 * 5, + prob=0.6, + direction='horizontal'), + dict(type='Equalize', prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='AutoAugment', policies=policies), + dict( + type='RandomErasing', + erase_prob=0.2, + mode='const', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean']), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +data = dict( + samples_per_gpu=128, + workers_per_gpu=4, + train=dict(pipeline=train_pipeline)) +evaluation = dict(interval=10, metric='accuracy') + +# optimizer +optimizer = dict( + type='RMSprop', + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=2, gamma=0.973, by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=600) diff --git a/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py b/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py new file mode 100644 index 0000000..93e89a4 --- /dev/null +++ b/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'mobilenet-v3-large_8xb32_in1k.py' + +_deprecation_ = dict( + expected='mobilenet-v3-large_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/mobilenet_v3/mobilenet_v3_small_cifar.py b/configs/mobilenet_v3/mobilenet_v3_small_cifar.py new file mode 100644 index 0000000..c09bd1c --- /dev/null +++ b/configs/mobilenet_v3/mobilenet_v3_small_cifar.py @@ -0,0 +1,6 @@ +_base_ = 'mobilenet-v3-small_8xb16_cifar10.py' + +_deprecation_ = dict( + expected='mobilenet-v3-small_8xb16_cifar10.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py b/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py new file mode 100644 index 0000000..15debd0 --- /dev/null +++ b/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'mobilenet-v3-small_8xb32_in1k.py' + +_deprecation_ = dict( + expected='mobilenet-v3-small_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/mvit/README.md b/configs/mvit/README.md new file mode 100644 index 0000000..6f5c560 --- /dev/null +++ b/configs/mvit/README.md @@ -0,0 +1,44 @@ +# MViT V2 + +> [MViTv2: Improved Multiscale Vision Transformers for Classification and Detection](http://openaccess.thecvf.com//content/CVPR2022/papers/Li_MViTv2_Improved_Multiscale_Vision_Transformers_for_Classification_and_Detection_CVPR_2022_paper.pdf) + + + +## Abstract + +In this paper, we study Multiscale Vision Transformers (MViTv2) as a unified architecture for image and video +classification, as well as object detection. We present an improved version of MViT that incorporates +decomposed relative positional embeddings and residual pooling connections. We instantiate this architecture +in five sizes and evaluate it for ImageNet classification, COCO detection and Kinetics video recognition where +it outperforms prior work. We further compare MViTv2s' pooling attention to window attention mechanisms where +it outperforms the latter in accuracy/compute. Without bells-and-whistles, MViTv2 has state-of-the-art +performance in 3 domains: 88.8% accuracy on ImageNet classification, 58.7 boxAP on COCO object detection as +well as 86.1% on Kinetics-400 video classification. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Pretrain | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------: | :----------: | :-------: | :------: | :-------: | :-------: | :------------------------------------------------------------------: | :---------------------------------------------------------------------: | +| MViTv2-tiny\* | From scratch | 24.17 | 4.70 | 82.33 | 96.15 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mvit/mvitv2-tiny_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-tiny_3rdparty_in1k_20220722-db7beeef.pth) | +| MViTv2-small\* | From scratch | 34.87 | 7.00 | 83.63 | 96.51 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mvit/mvitv2-small_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-small_3rdparty_in1k_20220722-986bd741.pth) | +| MViTv2-base\* | From scratch | 51.47 | 10.20 | 84.34 | 96.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mvit/mvitv2-base_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-base_3rdparty_in1k_20220722-9c4f0a17.pth) | +| MViTv2-large\* | From scratch | 217.99 | 42.10 | 85.25 | 97.14 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mvit/mvitv2-large_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-large_3rdparty_in1k_20220722-2b57b983.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/mvit). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +```bibtex +@inproceedings{li2021improved, + title={MViTv2: Improved multiscale vision transformers for classification and detection}, + author={Li, Yanghao and Wu, Chao-Yuan and Fan, Haoqi and Mangalam, Karttikeya and Xiong, Bo and Malik, Jitendra and Feichtenhofer, Christoph}, + booktitle={CVPR}, + year={2022} +} +``` diff --git a/configs/mvit/metafile.yml b/configs/mvit/metafile.yml new file mode 100644 index 0000000..8d46a0c --- /dev/null +++ b/configs/mvit/metafile.yml @@ -0,0 +1,95 @@ +Collections: + - Name: MViT V2 + Metadata: + Architecture: + - Attention Dropout + - Convolution + - Dense Connections + - GELU + - Layer Normalization + - Scaled Dot-Product Attention + - Attention Pooling + Paper: + URL: http://openaccess.thecvf.com//content/CVPR2022/papers/Li_MViTv2_Improved_Multiscale_Vision_Transformers_for_Classification_and_Detection_CVPR_2022_paper.pdf + Title: 'MViTv2: Improved Multiscale Vision Transformers for Classification and Detection' + README: configs/mvit/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.24.0/mmcls/models/backbones/mvit.py + Version: v0.24.0 + +Models: + - Name: mvitv2-tiny_3rdparty_in1k + In Collection: MViT V2 + Metadata: + FLOPs: 4700000000 + Parameters: 24173320 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 82.33 + Top 5 Accuracy: 96.15 + Weights: https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-tiny_3rdparty_in1k_20220722-db7beeef.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_T_in1k.pyth + Code: https://github.com/facebookresearch/mvit + Config: configs/mvit/mvitv2-tiny_8xb256_in1k.py + + - Name: mvitv2-small_3rdparty_in1k + In Collection: MViT V2 + Metadata: + FLOPs: 7000000000 + Parameters: 34870216 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 83.63 + Top 5 Accuracy: 96.51 + Weights: https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-small_3rdparty_in1k_20220722-986bd741.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_S_in1k.pyth + Code: https://github.com/facebookresearch/mvit + Config: configs/mvit/mvitv2-small_8xb256_in1k.py + + - Name: mvitv2-base_3rdparty_in1k + In Collection: MViT V2 + Metadata: + FLOPs: 10200000000 + Parameters: 51472744 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 84.34 + Top 5 Accuracy: 96.86 + Weights: https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-base_3rdparty_in1k_20220722-9c4f0a17.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in1k.pyth + Code: https://github.com/facebookresearch/mvit + Config: configs/mvit/mvitv2-base_8xb256_in1k.py + + - Name: mvitv2-large_3rdparty_in1k + In Collection: MViT V2 + Metadata: + FLOPs: 42100000000 + Parameters: 217992952 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.25 + Top 5 Accuracy: 97.14 + Weights: https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-large_3rdparty_in1k_20220722-2b57b983.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in1k.pyth + Code: https://github.com/facebookresearch/mvit + Config: configs/mvit/mvitv2-large_8xb256_in1k.py diff --git a/configs/mvit/mvitv2-base_8xb256_in1k.py b/configs/mvit/mvitv2-base_8xb256_in1k.py new file mode 100644 index 0000000..ea92cf4 --- /dev/null +++ b/configs/mvit/mvitv2-base_8xb256_in1k.py @@ -0,0 +1,29 @@ +_base_ = [ + '../_base_/models/mvit/mvitv2-base.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# dataset settings +data = dict(samples_per_gpu=256) + +# schedule settings +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.pos_embed': dict(decay_mult=0.0), + '.rel_pos_h': dict(decay_mult=0.0), + '.rel_pos_w': dict(decay_mult=0.0) + }) + +optimizer = dict(lr=0.00025, paramwise_cfg=paramwise_cfg) +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=70, + warmup_by_epoch=True) diff --git a/configs/mvit/mvitv2-large_8xb256_in1k.py b/configs/mvit/mvitv2-large_8xb256_in1k.py new file mode 100644 index 0000000..fbb81d6 --- /dev/null +++ b/configs/mvit/mvitv2-large_8xb256_in1k.py @@ -0,0 +1,29 @@ +_base_ = [ + '../_base_/models/mvit/mvitv2-large.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs2048_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset settings +data = dict(samples_per_gpu=256) + +# schedule settings +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.pos_embed': dict(decay_mult=0.0), + '.rel_pos_h': dict(decay_mult=0.0), + '.rel_pos_w': dict(decay_mult=0.0) + }) + +optimizer = dict(lr=0.00025, paramwise_cfg=paramwise_cfg) +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=70, + warmup_by_epoch=True) diff --git a/configs/mvit/mvitv2-small_8xb256_in1k.py b/configs/mvit/mvitv2-small_8xb256_in1k.py new file mode 100644 index 0000000..1803859 --- /dev/null +++ b/configs/mvit/mvitv2-small_8xb256_in1k.py @@ -0,0 +1,29 @@ +_base_ = [ + '../_base_/models/mvit/mvitv2-small.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs2048_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset settings +data = dict(samples_per_gpu=256) + +# schedule settings +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.pos_embed': dict(decay_mult=0.0), + '.rel_pos_h': dict(decay_mult=0.0), + '.rel_pos_w': dict(decay_mult=0.0) + }) + +optimizer = dict(lr=0.00025, paramwise_cfg=paramwise_cfg) +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=70, + warmup_by_epoch=True) diff --git a/configs/mvit/mvitv2-tiny_8xb256_in1k.py b/configs/mvit/mvitv2-tiny_8xb256_in1k.py new file mode 100644 index 0000000..f4b9bc4 --- /dev/null +++ b/configs/mvit/mvitv2-tiny_8xb256_in1k.py @@ -0,0 +1,29 @@ +_base_ = [ + '../_base_/models/mvit/mvitv2-tiny.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs2048_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset settings +data = dict(samples_per_gpu=256) + +# schedule settings +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.pos_embed': dict(decay_mult=0.0), + '.rel_pos_h': dict(decay_mult=0.0), + '.rel_pos_w': dict(decay_mult=0.0) + }) + +optimizer = dict(lr=0.00025, paramwise_cfg=paramwise_cfg) +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=70, + warmup_by_epoch=True) diff --git a/configs/poolformer/README.md b/configs/poolformer/README.md new file mode 100644 index 0000000..cc557e1 --- /dev/null +++ b/configs/poolformer/README.md @@ -0,0 +1,38 @@ +# PoolFormer + +> [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) + + + +## Abstract + +Transformers have shown great potential in computer vision tasks. A common belief is their attention-based token mixer module contributes most to their competence. However, recent works show the attention-based module in transformers can be replaced by spatial MLPs and the resulted models still perform quite well. Based on this observation, we hypothesize that the general architecture of the transformers, instead of the specific token mixer module, is more essential to the model's performance. To verify this, we deliberately replace the attention module in transformers with an embarrassingly simple spatial pooling operator to conduct only basic token mixing. Surprisingly, we observe that the derived model, termed as PoolFormer, achieves competitive performance on multiple computer vision tasks. For example, on ImageNet-1K, PoolFormer achieves 82.1% top-1 accuracy, surpassing well-tuned vision transformer/MLP-like baselines DeiT-B/ResMLP-B24 by 0.3%/1.1% accuracy with 35%/52% fewer parameters and 49%/61% fewer MACs. The effectiveness of PoolFormer verifies our hypothesis and urges us to initiate the concept of "MetaFormer", a general architecture abstracted from transformers without specifying the token mixer. Based on the extensive experiments, we argue that MetaFormer is the key player in achieving superior results for recent transformer and MLP-like models on vision tasks. This work calls for more future research dedicated to improving MetaFormer instead of focusing on the token mixer modules. Additionally, our proposed PoolFormer could serve as a starting baseline for future MetaFormer architecture design. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :--------------: | :-------: | :------: | :-------: | :-------: | :-----------------------------------------------------------------------: | :--------------------------------------------------------------------------: | +| PoolFormer-S12\* | 11.92 | 1.87 | 77.24 | 93.51 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/poolformer/poolformer-s12_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s12_3rdparty_32xb128_in1k_20220414-f8d83051.pth) | +| PoolFormer-S24\* | 21.39 | 3.51 | 80.33 | 95.05 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/poolformer/poolformer-s24_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s24_3rdparty_32xb128_in1k_20220414-d7055904.pth) | +| PoolFormer-S36\* | 30.86 | 5.15 | 81.43 | 95.45 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/poolformer/poolformer-s36_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s36_3rdparty_32xb128_in1k_20220414-d78ff3e8.pth) | +| PoolFormer-M36\* | 56.17 | 8.96 | 82.14 | 95.71 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/poolformer/poolformer-m36_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m36_3rdparty_32xb128_in1k_20220414-c55e0949.pth) | +| PoolFormer-M48\* | 73.47 | 11.80 | 82.51 | 95.95 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/poolformer/poolformer-m48_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m48_3rdparty_32xb128_in1k_20220414-9378f3eb.pth) | + +*Models with * are converted from the [official repo](https://github.com/sail-sg/poolformer). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +```bibtex +@article{yu2021metaformer, + title={MetaFormer is Actually What You Need for Vision}, + author={Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng}, + journal={arXiv preprint arXiv:2111.11418}, + year={2021} +} +``` diff --git a/configs/poolformer/metafile.yml b/configs/poolformer/metafile.yml new file mode 100644 index 0000000..d94219d --- /dev/null +++ b/configs/poolformer/metafile.yml @@ -0,0 +1,99 @@ +Collections: + - Name: PoolFormer + Metadata: + Training Data: ImageNet-1k + Architecture: + - Pooling + - 1x1 Convolution + - LayerScale + Paper: + URL: https://arxiv.org/abs/2111.11418 + Title: MetaFormer is Actually What You Need for Vision + README: configs/poolformer/README.md + Code: + Version: v0.22.1 + URL: https://github.com/open-mmlab/mmclassification/blob/v0.22.1/mmcls/models/backbones/poolformer.py + +Models: + - Name: poolformer-s12_3rdparty_32xb128_in1k + Metadata: + FLOPs: 1871399424 + Parameters: 11915176 + In Collections: PoolFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.24 + Top 5 Accuracy: 93.51 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s12_3rdparty_32xb128_in1k_20220414-f8d83051.pth + Config: configs/poolformer/poolformer-s12_32xb128_in1k.py + Converted From: + Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_s12.pth.tar + Code: https://github.com/sail-sg/poolformer + - Name: poolformer-s24_3rdparty_32xb128_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 3510411008 + Parameters: 21388968 + In Collections: PoolFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.33 + Top 5 Accuracy: 95.05 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s24_3rdparty_32xb128_in1k_20220414-d7055904.pth + Config: configs/poolformer/poolformer-s24_32xb128_in1k.py + Converted From: + Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_s24.pth.tar + Code: https://github.com/sail-sg/poolformer + - Name: poolformer-s36_3rdparty_32xb128_in1k + Metadata: + FLOPs: 5149422592 + Parameters: 30862760 + In Collections: PoolFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.43 + Top 5 Accuracy: 95.45 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s36_3rdparty_32xb128_in1k_20220414-d78ff3e8.pth + Config: configs/poolformer/poolformer-s36_32xb128_in1k.py + Converted From: + Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_s36.pth.tar + Code: https://github.com/sail-sg/poolformer + - Name: poolformer-m36_3rdparty_32xb128_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 8960175744 + Parameters: 56172520 + In Collections: PoolFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.14 + Top 5 Accuracy: 95.71 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m36_3rdparty_32xb128_in1k_20220414-c55e0949.pth + Config: configs/poolformer/poolformer-m36_32xb128_in1k.py + Converted From: + Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_m36.pth.tar + Code: https://github.com/sail-sg/poolformer + - Name: poolformer-m48_3rdparty_32xb128_in1k + Metadata: + FLOPs: 11801805696 + Parameters: 73473448 + In Collections: PoolFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.51 + Top 5 Accuracy: 95.95 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m48_3rdparty_32xb128_in1k_20220414-9378f3eb.pth + Config: configs/poolformer/poolformer-m48_32xb128_in1k.py + Converted From: + Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_m48.pth.tar + Code: https://github.com/sail-sg/poolformer diff --git a/configs/poolformer/poolformer-m36_32xb128_in1k.py b/configs/poolformer/poolformer-m36_32xb128_in1k.py new file mode 100644 index 0000000..1937a78 --- /dev/null +++ b/configs/poolformer/poolformer-m36_32xb128_in1k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/poolformer/poolformer_m36.py', + '../_base_/datasets/imagenet_bs128_poolformer_medium_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +optimizer = dict(lr=4e-3) diff --git a/configs/poolformer/poolformer-m48_32xb128_in1k.py b/configs/poolformer/poolformer-m48_32xb128_in1k.py new file mode 100644 index 0000000..a65b76a --- /dev/null +++ b/configs/poolformer/poolformer-m48_32xb128_in1k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/poolformer/poolformer_m48.py', + '../_base_/datasets/imagenet_bs128_poolformer_medium_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +optimizer = dict(lr=4e-3) diff --git a/configs/poolformer/poolformer-s12_32xb128_in1k.py b/configs/poolformer/poolformer-s12_32xb128_in1k.py new file mode 100644 index 0000000..98027c0 --- /dev/null +++ b/configs/poolformer/poolformer-s12_32xb128_in1k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/poolformer/poolformer_s12.py', + '../_base_/datasets/imagenet_bs128_poolformer_small_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +optimizer = dict(lr=4e-3) diff --git a/configs/poolformer/poolformer-s24_32xb128_in1k.py b/configs/poolformer/poolformer-s24_32xb128_in1k.py new file mode 100644 index 0000000..9774259 --- /dev/null +++ b/configs/poolformer/poolformer-s24_32xb128_in1k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/poolformer/poolformer_s24.py', + '../_base_/datasets/imagenet_bs128_poolformer_small_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +optimizer = dict(lr=4e-3) diff --git a/configs/poolformer/poolformer-s36_32xb128_in1k.py b/configs/poolformer/poolformer-s36_32xb128_in1k.py new file mode 100644 index 0000000..4d742d3 --- /dev/null +++ b/configs/poolformer/poolformer-s36_32xb128_in1k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/poolformer/poolformer_s36.py', + '../_base_/datasets/imagenet_bs128_poolformer_small_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +optimizer = dict(lr=4e-3) diff --git a/configs/regnet/README.md b/configs/regnet/README.md new file mode 100644 index 0000000..1ae074d --- /dev/null +++ b/configs/regnet/README.md @@ -0,0 +1,51 @@ +# RegNet + +> [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678) + + + +## Abstract + +In this work, we present a new network design paradigm. Our goal is to help advance the understanding of network design and discover design principles that generalize across settings. Instead of focusing on designing individual network instances, we design network design spaces that parametrize populations of networks. The overall process is analogous to classic manual design of networks, but elevated to the design space level. Using our methodology we explore the structure aspect of network design and arrive at a low-dimensional design space consisting of simple, regular networks that we call RegNet. The core insight of the RegNet parametrization is surprisingly simple: widths and depths of good networks can be explained by a quantized linear function. We analyze the RegNet design space and arrive at interesting findings that do not match the current practice of network design. The RegNet design space provides simple and fast networks that work well across a wide range of flop regimes. Under comparable training settings and flops, the RegNet models outperform the popular EfficientNet models while being up to 5x faster on GPUs. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------: | :-------: | :------: | :-------: | :-------: | :------------------------------------------------------------------------: | :--------------------------------------------------------------------------: | +| RegNetX-400MF | 5.16 | 0.41 | 72.56 | 90.78 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-400mf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-400mf_8xb128_in1k_20211213-89bfc226.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-400mf_8xb128_in1k_20211208_143316.log.json) | +| RegNetX-800MF | 7.26 | 0.81 | 74.76 | 92.32 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-800mf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-800mf_8xb128_in1k_20211213-222b0f11.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-800mf_8xb128_in1k_20211207_143037.log.json) | +| RegNetX-1.6GF | 9.19 | 1.63 | 76.84 | 93.31 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-1.6gf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-1.6gf_8xb128_in1k_20211213-d1b89758.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-1.6gf_8xb128_in1k_20211208_143018.log.json) | +| RegNetX-3.2GF | 15.3 | 3.21 | 78.09 | 94.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-3.2gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-3.2gf_8xb64_in1k_20211213-1fdd82ae.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-3.2gf_8xb64_in1k_20211208_142720.log.json) | +| RegNetX-4.0GF | 22.12 | 4.0 | 78.60 | 94.17 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-4.0gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-4.0gf_8xb64_in1k_20211213-efed675c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-4.0gf_8xb64_in1k_20211207_150431.log.json) | +| RegNetX-6.4GF | 26.21 | 6.51 | 79.38 | 94.65 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-6.4gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-6.4gf_8xb64_in1k_20211215-5c6089da.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-6.4gf_8xb64_in1k_20211213_172748.log.json) | +| RegNetX-8.0GF | 39.57 | 8.03 | 79.12 | 94.51 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-8.0gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-8.0gf_8xb64_in1k_20211213-9a9fcc76.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-8.0gf_8xb64_in1k_20211208_103250.log.json) | +| RegNetX-12GF | 46.11 | 12.15 | 79.67 | 95.03 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-12gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-12gf_8xb64_in1k_20211213-5df8c2f8.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-12gf_8xb64_in1k_20211208_143713.log.json) | +| RegNetX-400MF\* | 5.16 | 0.41 | 72.55 | 90.91 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-400mf_8xb128_in1k) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-400MF-0db9f35c.pth) | +| RegNetX-800MF\* | 7.26 | 0.81 | 75.21 | 92.37 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-800mf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-800MF-4f9d1e8a.pth) | +| RegNetX-1.6GF\* | 9.19 | 1.63 | 77.04 | 93.51 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-1.6gf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-1.6GF-cfb32375.pth) | +| RegNetX-3.2GF\* | 15.3 | 3.21 | 78.26 | 94.20 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-3.2gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-3.2GF-82c43fd5.pth) | +| RegNetX-4.0GF\* | 22.12 | 4.0 | 78.72 | 94.22 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-4.0gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-4.0GF-ef8bb32c.pth) | +| RegNetX-6.4GF\* | 26.21 | 6.51 | 79.22 | 94.61 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-6.4gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-6.4GF-6888c0ea.pth) | +| RegNetX-8.0GF\* | 39.57 | 8.03 | 79.31 | 94.57 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-8.0gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-8.0GF-cb4c77ec.pth) | +| RegNetX-12GF\* | 46.11 | 12.15 | 79.91 | 94.78 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-12gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-12GF-0574538f.pth) | + +*Models with * are converted from [pycls](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md). The config files of these models are only for validation.* + +## Citation + +``` +@article{radosavovic2020designing, + title={Designing Network Design Spaces}, + author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, + year={2020}, + eprint={2003.13678}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/regnet/metafile.yml b/configs/regnet/metafile.yml new file mode 100644 index 0000000..6b301ab --- /dev/null +++ b/configs/regnet/metafile.yml @@ -0,0 +1,122 @@ +Collections: + - Name: RegNet + Metadata: + Training Data: ImageNet-1k + Architecture: + - Neural Architecture Search + - Design Space Design + - Precise BN + - SGD with nesterov + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: Designing Network Design Spaces + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.18.0/mmcls/models/backbones/regnet.py + Version: v0.18.0 + +Models: + - Name: regnetx-400mf_8xb128_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-400mf_8xb128_in1k.py + Metadata: + FLOPs: 410000000 # 0.41G + Parameters: 5160000 # 5.16M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 72.56 + Top 5 Accuracy: 90.78 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-400mf_8xb128_in1k_20211213-89bfc226.pth + - Name: regnetx-800mf_8xb128_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-800mf_8xb128_in1k.py + Metadata: + FLOPs: 810000000 # 0.81G + Parameters: 7260000 # 7.26M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 74.76 + Top 5 Accuracy: 92.32 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-800mf_8xb128_in1k_20211213-222b0f11.pth + - Name: regnetx-1.6gf_8xb128_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-1.6gf_8xb128_in1k.py + Metadata: + FLOPs: 1630000000 # 1.63G + Parameters: 9190000 # 9.19M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 76.84 + Top 5 Accuracy: 93.31 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-1.6gf_8xb128_in1k_20211213-d1b89758.pth + - Name: regnetx-3.2gf_8xb64_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-3.2gf_8xb64_in1k.py + Metadata: + FLOPs: 1530000000 # 1.53G + Parameters: 3210000 # 32.1M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 78.09 + Top 5 Accuracy: 94.08 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-3.2gf_8xb64_in1k_20211213-1fdd82ae.pth + - Name: regnetx-4.0gf_8xb64_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-4.0gf_8xb64_in1k.py + Metadata: + FLOPs: 4000000000 # 4G + Parameters: 22120000 # 22.12M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 78.60 + Top 5 Accuracy: 94.17 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-4.0gf_8xb64_in1k_20211213-efed675c.pth + - Name: regnetx-6.4gf_8xb64_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-6.4gf_8xb64_in1k.py + Metadata: + FLOPs: 6510000000 # 6.51G + Parameters: 26210000 # 26.21M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 79.38 + Top 5 Accuracy: 94.65 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-6.4gf_8xb64_in1k_20211215-5c6089da.pth + - Name: regnetx-8.0gf_8xb64_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-8.0gf_8xb64_in1k.py + Metadata: + FLOPs: 8030000000 # 8.03G + Parameters: 39570000 # 39.57M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 79.12 + Top 5 Accuracy: 94.51 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-8.0gf_8xb64_in1k_20211213-9a9fcc76.pth + - Name: regnetx-12gf_8xb64_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-12gf_8xb64_in1k.py + Metadata: + FLOPs: 12150000000 # 12.15G + Parameters: 46110000 # 46.11M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 79.67 + Top 5 Accuracy: 95.03 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-12gf_8xb64_in1k_20211213-5df8c2f8.pth diff --git a/configs/regnet/regnetx-1.6gf_8xb128_in1k.py b/configs/regnet/regnetx-1.6gf_8xb128_in1k.py new file mode 100644 index 0000000..d3e9e93 --- /dev/null +++ b/configs/regnet/regnetx-1.6gf_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_1.6gf'), + head=dict(in_channels=912, )) diff --git a/configs/regnet/regnetx-12gf_8xb64_in1k.py b/configs/regnet/regnetx-12gf_8xb64_in1k.py new file mode 100644 index 0000000..5da0ebe --- /dev/null +++ b/configs/regnet/regnetx-12gf_8xb64_in1k.py @@ -0,0 +1,11 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_12gf'), + head=dict(in_channels=2240, )) + +# for batch_size 512, use lr = 0.4 +optimizer = dict(lr=0.4) + +data = dict(samples_per_gpu=64, ) diff --git a/configs/regnet/regnetx-3.2gf_8xb64_in1k.py b/configs/regnet/regnetx-3.2gf_8xb64_in1k.py new file mode 100644 index 0000000..98c4a0b --- /dev/null +++ b/configs/regnet/regnetx-3.2gf_8xb64_in1k.py @@ -0,0 +1,11 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_3.2gf'), + head=dict(in_channels=1008, )) + +# for batch_size 512, use lr = 0.4 +optimizer = dict(lr=0.4) + +data = dict(samples_per_gpu=64, ) diff --git a/configs/regnet/regnetx-4.0gf_8xb64_in1k.py b/configs/regnet/regnetx-4.0gf_8xb64_in1k.py new file mode 100644 index 0000000..87bc847 --- /dev/null +++ b/configs/regnet/regnetx-4.0gf_8xb64_in1k.py @@ -0,0 +1,11 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_4.0gf'), + head=dict(in_channels=1360, )) + +# for batch_size 512, use lr = 0.4 +optimizer = dict(lr=0.4) + +data = dict(samples_per_gpu=64, ) diff --git a/configs/regnet/regnetx-400mf_8xb128_in1k.py b/configs/regnet/regnetx-400mf_8xb128_in1k.py new file mode 100644 index 0000000..86fee90 --- /dev/null +++ b/configs/regnet/regnetx-400mf_8xb128_in1k.py @@ -0,0 +1,77 @@ +_base_ = [ + '../_base_/models/regnet/regnetx_400mf.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs1024_coslr.py', + '../_base_/default_runtime.py' +] + +# Precise BN hook will update the bn stats, so this hook should be executed +# before CheckpointHook, which has priority of 'NORMAL'. So set the +# priority of PreciseBNHook to 'ABOVE_NORMAL' here. +custom_hooks = [ + dict( + type='PreciseBNHook', + num_samples=8192, + interval=1, + priority='ABOVE_NORMAL') +] + +# sgd with nesterov, base ls is 0.8 for batch_size 1024, +# 0.4 for batch_size 512 and 0.2 for batch_size 256 when training ImageNet1k +optimizer = dict(lr=0.8, nesterov=True) + +# dataset settings +dataset_type = 'ImageNet' + +# normalization params, in order of BGR +NORM_MEAN = [103.53, 116.28, 123.675] +NORM_STD = [57.375, 57.12, 58.395] + +# lighting params, in order of RGB, from repo. pycls +EIGVAL = [0.2175, 0.0188, 0.0045] +EIGVEC = [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.814], + [-0.5836, -0.6948, 0.4203]] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='Lighting', + eigval=EIGVAL, + eigvec=EIGVEC, + alphastd=25.5, # because the value range of images is [0,255] + to_rgb=True + ), # BGR image from cv2 in LoadImageFromFile, convert to RGB here + dict(type='Normalize', mean=NORM_MEAN, std=NORM_STD, + to_rgb=True), # RGB2BGR + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', mean=NORM_MEAN, std=NORM_STD, to_rgb=False), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=128, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) diff --git a/configs/regnet/regnetx-6.4gf_8xb64_in1k.py b/configs/regnet/regnetx-6.4gf_8xb64_in1k.py new file mode 100644 index 0000000..02ee424 --- /dev/null +++ b/configs/regnet/regnetx-6.4gf_8xb64_in1k.py @@ -0,0 +1,11 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_6.4gf'), + head=dict(in_channels=1624, )) + +# for batch_size 512, use lr = 0.4 +optimizer = dict(lr=0.4) + +data = dict(samples_per_gpu=64, ) diff --git a/configs/regnet/regnetx-8.0gf_8xb64_in1k.py b/configs/regnet/regnetx-8.0gf_8xb64_in1k.py new file mode 100644 index 0000000..84ab811 --- /dev/null +++ b/configs/regnet/regnetx-8.0gf_8xb64_in1k.py @@ -0,0 +1,11 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_8.0gf'), + head=dict(in_channels=1920, )) + +# for batch_size 512, use lr = 0.4 +optimizer = dict(lr=0.4) + +data = dict(samples_per_gpu=64, ) diff --git a/configs/regnet/regnetx-800mf_8xb128_in1k.py b/configs/regnet/regnetx-800mf_8xb128_in1k.py new file mode 100644 index 0000000..9cd7137 --- /dev/null +++ b/configs/regnet/regnetx-800mf_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_800mf'), + head=dict(in_channels=672, )) diff --git a/configs/repmlp/README.md b/configs/repmlp/README.md new file mode 100644 index 0000000..4533463 --- /dev/null +++ b/configs/repmlp/README.md @@ -0,0 +1,93 @@ +# RepMLP + +> [RepMLP: Re-parameterizing Convolutions into Fully-connected Layers forImage Recognition](https://arxiv.org/abs/2105.01883) + + + +## Abstract + +We propose RepMLP, a multi-layer-perceptron-style neural network building block for image recognition, which is composed of a series of fully-connected (FC) layers. Compared to convolutional layers, FC layers are more efficient, better at modeling the long-range dependencies and positional patterns, but worse at capturing the local structures, hence usually less favored for image recognition. We propose a structural re-parameterization technique that adds local prior into an FC to make it powerful for image recognition. Specifically, we construct convolutional layers inside a RepMLP during training and merge them into the FC for inference. On CIFAR, a simple pure-MLP model shows performance very close to CNN. By inserting RepMLP in traditional CNN, we improve ResNets by 1.8% accuracy on ImageNet, 2.9% for face recognition, and 2.3% mIoU on Cityscapes with lower FLOPs. Our intriguing findings highlight that combining the global representational capacity and positional perception of FC with the local prior of convolution can improve the performance of neural network with faster speed on both the tasks with translation invariance (e.g., semantic segmentation) and those with aligned images and positional patterns (e.g., face recognition). + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :---------------------------------------------------------------------------: | +| RepMLP-B224\* | 68.24 | 6.71 | 80.41 | 95.12 | [train_cfg](https://github.com/open-mmlab/mmclassification/blob/master/configs/repmlp/repmlp-base_8xb64_in1k.py) \| [deploy_cfg](https://github.com/open-mmlab/mmclassification/blob/master/configs/repmlp/repmlp-base_delopy_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k_20220330-1cb1f11b.pth) | +| RepMLP-B256\* | 96.45 | 9.69 | 81.11 | 95.5 | [train_cfg](https://github.com/open-mmlab/mmclassification/blob/master/configs/repmlp/repmlp-base_8xb64_in1k-256px.py) \| [deploy_cfg](https://github.com/open-mmlab/mmclassification/blob/master/configs/repmlp/repmlp-b256_deploy_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k-256px_20220330-7c5a91ce.pth) | + +*Models with * are converted from [the official repo.](https://github.com/DingXiaoH/RepMLP). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## How to use + +The checkpoints provided are all `training-time` models. Use the reparameterize tool to switch them to more efficient `inference-time` architecture, which not only has fewer parameters but also less calculations. + +### Use tool + +Use provided tool to reparameterize the given model and save the checkpoint: + +```bash +python tools/convert_models/reparameterize_model.py ${CFG_PATH} ${SRC_CKPT_PATH} ${TARGET_CKPT_PATH} +``` + +`${CFG_PATH}` is the config file, `${SRC_CKPT_PATH}` is the source chenpoint file, `${TARGET_CKPT_PATH}` is the target deploy weight file path. + +To use reparameterized weights, the config file must switch to the deploy config files. + +```bash +python tools/test.py ${Deploy_CFG} ${Deploy_Checkpoint} --metrics accuracy +``` + +### In the code + +Use `backbone.switch_to_deploy()` or `classificer.backbone.switch_to_deploy()` to switch to the deploy mode. For example: + +```python +from mmcls.models import build_backbone + +backbone_cfg=dict(type='RepMLPNet', arch='B', img_size=224, reparam_conv_kernels=(1, 3), deploy=False) +backbone = build_backbone(backbone_cfg) +backbone.switch_to_deploy() +``` + +or + +```python +from mmcls.models import build_classifier + +cfg = dict( + type='ImageClassifier', + backbone=dict( + type='RepMLPNet', + arch='B', + img_size=224, + reparam_conv_kernels=(1, 3), + deploy=False), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) + +classifier = build_classifier(cfg) +classifier.backbone.switch_to_deploy() +``` + +## Citation + +``` +@article{ding2021repmlp, + title={Repmlp: Re-parameterizing convolutions into fully-connected layers for image recognition}, + author={Ding, Xiaohan and Xia, Chunlong and Zhang, Xiangyu and Chu, Xiaojie and Han, Jungong and Ding, Guiguang}, + journal={arXiv preprint arXiv:2105.01883}, + year={2021} +} +``` diff --git a/configs/repmlp/metafile.yml b/configs/repmlp/metafile.yml new file mode 100644 index 0000000..19caecb --- /dev/null +++ b/configs/repmlp/metafile.yml @@ -0,0 +1,48 @@ +Collections: + - Name: RepMLP + Metadata: + Training Data: ImageNet-1k + Architecture: + - Multi-layer Perceptron + - Re-parameterization Convolution + Paper: + URL: https://arxiv.org/abs/2105.01883 + Title: 'RepMLP: Re-parameterizing Convolutions into Fully-connected Layers for Image Recognition' + README: configs/repmlp/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.21.0/mmcls/models/backbones/repmlp.py + Version: v0.21.0 + +Models: + - Name: repmlp-base_3rdparty_8xb64_in1k + In Collection: RepMLP + Config: configs/repmlp/repmlp-base_8xb64_in1k.py + Metadata: + FLOPs: 6710000000 # 6.71 G + Parameters: 68240000 # 68.24 M + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.41 + Top 5 Accuracy: 95.14 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k_20220330-1cb1f11b.pth + Converted From: + Weights: https://github.com/DingXiaoH/RepMLP + Code: https://github.com/DingXiaoH/RepMLP/blob/072d8516beba83d75dfe6ebb12f625abad4b53d5/repmlpnet.py#L274 + - Name: repmlp-base_3rdparty_8xb64_in1k-256px.py + In Collection: RepMLP + Config: configs/repmlp/repmlp-base_8xb64_in1k-256px.py + Metadata: + FLOPs: 9690000000 # 9.69 G + Parameters: 96450000 # 96.45M + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.11 + Top 5 Accuracy: 95.50 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k-256px_20220330-7c5a91ce.pth + Converted From: + Weights: https://github.com/DingXiaoH/RepMLP + Code: https://github.com/DingXiaoH/RepMLP/blob/072d8516beba83d75dfe6ebb12f625abad4b53d5/repmlpnet.py#L278 diff --git a/configs/repmlp/repmlp-base_8xb64_in1k-256px.py b/configs/repmlp/repmlp-base_8xb64_in1k-256px.py new file mode 100644 index 0000000..ff03c6f --- /dev/null +++ b/configs/repmlp/repmlp-base_8xb64_in1k-256px.py @@ -0,0 +1,21 @@ +_base_ = [ + '../_base_/models/repmlp-base_224.py', + '../_base_/datasets/imagenet_bs64_mixer_224.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(img_size=256)) + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256 * 256 // 224, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) diff --git a/configs/repmlp/repmlp-base_8xb64_in1k.py b/configs/repmlp/repmlp-base_8xb64_in1k.py new file mode 100644 index 0000000..430cdc0 --- /dev/null +++ b/configs/repmlp/repmlp-base_8xb64_in1k.py @@ -0,0 +1,20 @@ +_base_ = [ + '../_base_/models/repmlp-base_224.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +test_pipeline = [ + dict(type='LoadImageFromFile'), + # resizing to (256, 256) here, different with resizing shorter edge to 256 + dict(type='Resize', size=(256, 256), backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) diff --git a/configs/repmlp/repmlp-base_delopy_8xb64_in1k.py b/configs/repmlp/repmlp-base_delopy_8xb64_in1k.py new file mode 100644 index 0000000..b5b2c88 --- /dev/null +++ b/configs/repmlp/repmlp-base_delopy_8xb64_in1k.py @@ -0,0 +1,3 @@ +_base_ = ['./repmlp-base_8xb64_in1k.py'] + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repmlp/repmlp-base_deploy_8xb64_in1k-256px.py b/configs/repmlp/repmlp-base_deploy_8xb64_in1k-256px.py new file mode 100644 index 0000000..27ff50a --- /dev/null +++ b/configs/repmlp/repmlp-base_deploy_8xb64_in1k-256px.py @@ -0,0 +1,3 @@ +_base_ = ['./repmlp-base_8xb64_in1k-256px.py'] + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/README.md b/configs/repvgg/README.md new file mode 100644 index 0000000..b934132 --- /dev/null +++ b/configs/repvgg/README.md @@ -0,0 +1,101 @@ +# RepVGG + +> [Repvgg: Making vgg-style convnets great again](https://arxiv.org/abs/2101.03697) + + + +## Abstract + +We present a simple but powerful architecture of convolutional neural network, which has a VGG-like inference-time body composed of nothing but a stack of 3x3 convolution and ReLU, while the training-time model has a multi-branch topology. Such decoupling of the training-time and inference-time architecture is realized by a structural re-parameterization technique so that the model is named RepVGG. On ImageNet, RepVGG reaches over 80% top-1 accuracy, which is the first time for a plain model, to the best of our knowledge. On NVIDIA 1080Ti GPU, RepVGG models run 83% faster than ResNet-50 or 101% faster than ResNet-101 with higher accuracy and show favorable accuracy-speed trade-off compared to the state-of-the-art models like EfficientNet and RegNet. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Epochs | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-----------: | :----: | :-------------------------------: | :-----------------------------: | :-------: | :-------: | :----------------------------------------------: | :-------------------------------------------------: | +| RepVGG-A0\* | 120 | 9.11(train) \| 8.31 (deploy) | 1.52 (train) \| 1.36 (deploy) | 72.41 | 90.50 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_3rdparty_4xb64-coslr-120e_in1k_20210909-883ab98c.pth) | +| RepVGG-A1\* | 120 | 14.09 (train) \| 12.79 (deploy) | 2.64 (train) \| 2.37 (deploy) | 74.47 | 91.85 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_3rdparty_4xb64-coslr-120e_in1k_20210909-24003a24.pth) | +| RepVGG-A2\* | 120 | 28.21 (train) \| 25.5 (deploy) | 5.7 (train) \| 5.12 (deploy) | 76.48 | 93.01 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_3rdparty_4xb64-coslr-120e_in1k_20210909-97d7695a.pth) | +| RepVGG-B0\* | 120 | 15.82 (train) \| 14.34 (deploy) | 3.42 (train) \| 3.06 (deploy) | 75.14 | 92.42 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_3rdparty_4xb64-coslr-120e_in1k_20210909-446375f4.pth) | +| RepVGG-B1\* | 120 | 57.42 (train) \| 51.83 (deploy) | 13.16 (train) \| 11.82 (deploy) | 78.37 | 94.11 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_3rdparty_4xb64-coslr-120e_in1k_20210909-750cdf67.pth) | +| RepVGG-B1g2\* | 120 | 45.78 (train) \| 41.36 (deploy) | 9.82 (train) \| 8.82 (deploy) | 77.79 | 93.88 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_3rdparty_4xb64-coslr-120e_in1k_20210909-344f6422.pth) | +| RepVGG-B1g4\* | 120 | 39.97 (train) \| 36.13 (deploy) | 8.15 (train) \| 7.32 (deploy) | 77.58 | 93.84 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_3rdparty_4xb64-coslr-120e_in1k_20210909-d4c1a642.pth) | +| RepVGG-B2\* | 120 | 89.02 (train) \| 80.32 (deploy) | 20.46 (train) \| 18.39 (deploy) | 78.78 | 94.42 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_3rdparty_4xb64-coslr-120e_in1k_20210909-bd6b937c.pth) | +| RepVGG-B2g4\* | 200 | 61.76 (train) \| 55.78 (deploy) | 12.63 (train) \| 11.34 (deploy) | 79.38 | 94.68 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-7b7955f0.pth) | +| RepVGG-B3\* | 200 | 123.09 (train) \| 110.96 (deploy) | 29.17 (train) \| 26.22 (deploy) | 80.52 | 95.26 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-dda968bf.pth) | +| RepVGG-B3g4\* | 200 | 83.83 (train) \| 75.63 (deploy) | 17.9 (train) \| 16.08 (deploy) | 80.22 | 95.10 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-4e54846a.pth) | +| RepVGG-D2se\* | 200 | 133.33 (train) \| 120.39 (deploy) | 36.56 (train) \| 32.85 (deploy) | 81.81 | 95.94 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-cf3139b7.pth) | + +*Models with * are converted from the [official repo](https://github.com/DingXiaoH/RepVGG). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## How to use + +The checkpoints provided are all `training-time` models. Use the reparameterize tool to switch them to more efficient `inference-time` architecture, which not only has fewer parameters but also less calculations. + +### Use tool + +Use provided tool to reparameterize the given model and save the checkpoint: + +```bash +python tools/convert_models/reparameterize_model.py ${CFG_PATH} ${SRC_CKPT_PATH} ${TARGET_CKPT_PATH} +``` + +`${CFG_PATH}` is the config file, `${SRC_CKPT_PATH}` is the source chenpoint file, `${TARGET_CKPT_PATH}` is the target deploy weight file path. + +To use reparameterized weights, the config file must switch to the deploy config files. + +```bash +python tools/test.py ${Deploy_CFG} ${Deploy_Checkpoint} --metrics accuracy +``` + +### In the code + +Use `backbone.switch_to_deploy()` or `classificer.backbone.switch_to_deploy()` to switch to the deploy mode. For example: + +```python +from mmcls.models import build_backbone + +backbone_cfg=dict(type='RepVGG',arch='A0'), +backbone = build_backbone(backbone_cfg) +backbone.switch_to_deploy() +``` + +or + +```python +from mmcls.models import build_classifier + +cfg = dict( + type='ImageClassifier', + backbone=dict( + type='RepVGG', + arch='A0'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) + +classifier = build_classifier(cfg) +classifier.backbone.switch_to_deploy() +``` + +## Citation + +``` +@inproceedings{ding2021repvgg, + title={Repvgg: Making vgg-style convnets great again}, + author={Ding, Xiaohan and Zhang, Xiangyu and Ma, Ningning and Han, Jungong and Ding, Guiguang and Sun, Jian}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={13733--13742}, + year={2021} +} +``` diff --git a/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py b/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..20787f2 --- /dev/null +++ b/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py b/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..eea0da9 --- /dev/null +++ b/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-A1_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py b/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..7b0cea7 --- /dev/null +++ b/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-A2_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py b/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..23a2898 --- /dev/null +++ b/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py b/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..24355ed --- /dev/null +++ b/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B1_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py b/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..579fcc4 --- /dev/null +++ b/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B1g2_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py b/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..eab5d44 --- /dev/null +++ b/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B1g4_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py b/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..0681f14 --- /dev/null +++ b/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B2_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000..8f18401 --- /dev/null +++ b/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000..e60b067 --- /dev/null +++ b/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000..46f1877 --- /dev/null +++ b/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000..66dff3b --- /dev/null +++ b/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/metafile.yml b/configs/repvgg/metafile.yml new file mode 100644 index 0000000..84fee59 --- /dev/null +++ b/configs/repvgg/metafile.yml @@ -0,0 +1,208 @@ +Collections: + - Name: RepVGG + Metadata: + Training Data: ImageNet-1k + Architecture: + - re-parameterization Convolution + - VGG-style Neural Network + Paper: + URL: https://arxiv.org/abs/2101.03697 + Title: 'RepVGG: Making VGG-style ConvNets Great Again' + README: configs/repvgg/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.16.0/mmcls/models/backbones/repvgg.py#L257 + Version: v0.16.0 + +Models: + - Name: repvgg-A0_3rdparty_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 1520000000 + Parameters: 9110000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 72.41 + Top 5 Accuracy: 90.50 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_3rdparty_4xb64-coslr-120e_in1k_20210909-883ab98c.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L196 + - Name: repvgg-A1_3rdparty_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 2640000000 + Parameters: 14090000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 74.47 + Top 5 Accuracy: 91.85 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_3rdparty_4xb64-coslr-120e_in1k_20210909-24003a24.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L200 + - Name: repvgg-A2_3rdparty_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 28210000000 + Parameters: 5700000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 76.48 + Top 5 Accuracy: 93.01 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_3rdparty_4xb64-coslr-120e_in1k_20210909-97d7695a.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L204 + - Name: repvgg-B0_3rdparty_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 15820000000 + Parameters: 3420000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 75.14 + Top 5 Accuracy: 92.42 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_3rdparty_4xb64-coslr-120e_in1k_20210909-446375f4.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L208 + - Name: repvgg-B1_3rdparty_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 57420000000 + Parameters: 13160000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 78.37 + Top 5 Accuracy: 94.11 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_3rdparty_4xb64-coslr-120e_in1k_20210909-750cdf67.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L212 + - Name: repvgg-B1g2_3rdparty_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 45780000000 + Parameters: 9820000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 77.79 + Top 5 Accuracy: 93.88 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_3rdparty_4xb64-coslr-120e_in1k_20210909-344f6422.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L216 + - Name: repvgg-B1g4_3rdparty_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 39970000000 + Parameters: 8150000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 77.58 + Top 5 Accuracy: 93.84 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_3rdparty_4xb64-coslr-120e_in1k_20210909-d4c1a642.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L220 + - Name: repvgg-B2_3rdparty_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 89020000000 + Parameters: 20420000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 78.78 + Top 5 Accuracy: 94.42 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_3rdparty_4xb64-coslr-120e_in1k_20210909-bd6b937c.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L225 + - Name: repvgg-B2g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py + Metadata: + FLOPs: 61760000000 + Parameters: 12630000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 79.38 + Top 5 Accuracy: 94.68 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-7b7955f0.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L229 + - Name: repvgg-B3_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py + Metadata: + FLOPs: 123090000000 + Parameters: 29170000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 80.52 + Top 5 Accuracy: 95.26 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-dda968bf.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L238 + - Name: repvgg-B3g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py + Metadata: + FLOPs: 83830000000 + Parameters: 17900000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 80.22 + Top 5 Accuracy: 95.10 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-4e54846a.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L238 + - Name: repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py + Metadata: + FLOPs: 133330000000 + Parameters: 36560000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 81.81 + Top 5 Accuracy: 95.94 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-cf3139b7.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L250 diff --git a/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py b/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..a7fd3bb --- /dev/null +++ b/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/repvgg-A0_in1k.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +runner = dict(max_epochs=120) diff --git a/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py b/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..649020f --- /dev/null +++ b/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='A1')) diff --git a/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py b/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..eedaf2d --- /dev/null +++ b/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='A2'), head=dict(in_channels=1408)) diff --git a/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py b/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..b3ce7ea --- /dev/null +++ b/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='B0'), head=dict(in_channels=1280)) diff --git a/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py b/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..30adea3 --- /dev/null +++ b/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='B1'), head=dict(in_channels=2048)) diff --git a/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py b/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..2749db8 --- /dev/null +++ b/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='B1g2'), head=dict(in_channels=2048)) diff --git a/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py b/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..2647690 --- /dev/null +++ b/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='B1g4'), head=dict(in_channels=2048)) diff --git a/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py b/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000..4d21556 --- /dev/null +++ b/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='B2'), head=dict(in_channels=2560)) diff --git a/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000..11331cf --- /dev/null +++ b/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(arch='B2g4')) diff --git a/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000..7b6dc50 --- /dev/null +++ b/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/repvgg-B3_lbs-mixup_in1k.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs256_200e_coslr_warmup.py', + '../_base_/default_runtime.py' +] diff --git a/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000..67e3688 --- /dev/null +++ b/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(arch='B3g4')) diff --git a/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000..d235610 --- /dev/null +++ b/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(arch='D2se')) diff --git a/configs/res2net/README.md b/configs/res2net/README.md new file mode 100644 index 0000000..6119009 --- /dev/null +++ b/configs/res2net/README.md @@ -0,0 +1,37 @@ +# Res2Net + +> [Res2Net: A New Multi-scale Backbone Architecture](https://arxiv.org/pdf/1904.01169.pdf) + + + +## Abstract + +Representing features at multiple scales is of great importance for numerous vision tasks. Recent advances in backbone convolutional neural networks (CNNs) continually demonstrate stronger multi-scale representation ability, leading to consistent performance gains on a wide range of applications. However, most existing methods represent the multi-scale features in a layer-wise manner. In this paper, we propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. The proposed Res2Net block can be plugged into the state-of-the-art backbone CNN models, e.g., ResNet, ResNeXt, and DLA. We evaluate the Res2Net block on all these models and demonstrate consistent performance gains over baseline models on widely-used datasets, e.g., CIFAR-100 and ImageNet. Further ablation studies and experimental results on representative computer vision tasks, i.e., object detection, class activation mapping, and salient object detection, further verify the superiority of the Res2Net over the state-of-the-art baseline methods. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------: | :--------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------: | :-------------------------------------------------------------------: | +| Res2Net-50-14w-8s\* | 224x224 | 25.06 | 4.22 | 78.14 | 93.85 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/res2net/res2net50-w14-s8_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth) \| [log](<>) | +| Res2Net-50-26w-8s\* | 224x224 | 48.40 | 8.39 | 79.20 | 94.36 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/res2net/res2net50-w26-s8_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth) \| [log](<>) | +| Res2Net-101-26w-4s\* | 224x224 | 45.21 | 8.12 | 79.19 | 94.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/res2net/res2net101-w26-s4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth) \| [log](<>) | + +*Models with * are converted from the [official repo](https://github.com/Res2Net/Res2Net-PretrainedModels). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +``` +@article{gao2019res2net, + title={Res2Net: A New Multi-scale Backbone Architecture}, + author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip}, + journal={IEEE TPAMI}, + year={2021}, + doi={10.1109/TPAMI.2019.2938758}, +} +``` diff --git a/configs/res2net/metafile.yml b/configs/res2net/metafile.yml new file mode 100644 index 0000000..d76f898 --- /dev/null +++ b/configs/res2net/metafile.yml @@ -0,0 +1,70 @@ +Collections: + - Name: Res2Net + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - Batch Normalization + - Convolution + - Global Average Pooling + - ReLU + - Res2Net Block + Paper: + Title: 'Res2Net: A New Multi-scale Backbone Architecture' + URL: https://arxiv.org/pdf/1904.01169.pdf + README: configs/res2net/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.17.0/mmcls/models/backbones/res2net.py + Version: v0.17.0 + +Models: + - Name: res2net50-w14-s8_3rdparty_8xb32_in1k + Metadata: + FLOPs: 4220000000 + Parameters: 25060000 + In Collection: Res2Net + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.14 + Top 5 Accuracy: 93.85 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth + Converted From: + Weights: https://1drv.ms/u/s!AkxDDnOtroRPdOTqhF8ne_aakDI?e=EVb8Ri + Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L221 + Config: configs/res2net/res2net50-w14-s8_8xb32_in1k.py + - Name: res2net50-w26-s8_3rdparty_8xb32_in1k + Metadata: + FLOPs: 8390000000 + Parameters: 48400000 + In Collection: Res2Net + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.20 + Top 5 Accuracy: 94.36 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth + Converted From: + Weights: https://1drv.ms/u/s!AkxDDnOtroRPdTrAd_Afzc26Z7Q?e=slYqsR + Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L201 + Config: configs/res2net/res2net50-w26-s8_8xb32_in1k.py + - Name: res2net101-w26-s4_3rdparty_8xb32_in1k + Metadata: + FLOPs: 8120000000 + Parameters: 45210000 + In Collection: Res2Net + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.19 + Top 5 Accuracy: 94.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth + Converted From: + Weights: https://1drv.ms/u/s!AkxDDnOtroRPcJRgTLkahL0cFYw?e=nwbnic + Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L181 + Config: configs/res2net/res2net101-w26-s4_8xb32_in1k.py diff --git a/configs/res2net/res2net101-w26-s4_8xb32_in1k.py b/configs/res2net/res2net101-w26-s4_8xb32_in1k.py new file mode 100644 index 0000000..7ebe9e9 --- /dev/null +++ b/configs/res2net/res2net101-w26-s4_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/res2net101-w26-s4.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/res2net/res2net50-w14-s8_8xb32_in1k.py b/configs/res2net/res2net50-w14-s8_8xb32_in1k.py new file mode 100644 index 0000000..56cc02e --- /dev/null +++ b/configs/res2net/res2net50-w14-s8_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/res2net50-w14-s8.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/res2net/res2net50-w26-s8_8xb32_in1k.py b/configs/res2net/res2net50-w26-s8_8xb32_in1k.py new file mode 100644 index 0000000..d7dcbeb --- /dev/null +++ b/configs/res2net/res2net50-w26-s8_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/res2net50-w26-s8.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnest/README.md b/configs/resnest/README.md new file mode 100644 index 0000000..eb6c5fd --- /dev/null +++ b/configs/resnest/README.md @@ -0,0 +1,26 @@ +# ResNeSt + +> [ResNeSt: Split-Attention Networks](https://arxiv.org/abs/2004.08955) + + + +## Abstract + +It is well known that featuremap attention and multi-path representation are important for visual recognition. In this paper, we present a modularized architecture, which applies the channel-wise attention on different network branches to leverage their success in capturing cross-feature interactions and learning diverse representations. Our design results in a simple and unified computation block, which can be parameterized using only a few variables. Our model, named ResNeSt, outperforms EfficientNet in accuracy and latency trade-off on image classification. In addition, ResNeSt has achieved superior transfer learning results on several public benchmarks serving as the backbone, and has been adopted by the winning entries of COCO-LVIS challenge. The source code for complete system and pretrained models are publicly available. + +
+ +
+ +## Citation + +``` +@misc{zhang2020resnest, + title={ResNeSt: Split-Attention Networks}, + author={Hang Zhang and Chongruo Wu and Zhongyue Zhang and Yi Zhu and Haibin Lin and Zhi Zhang and Yue Sun and Tong He and Jonas Mueller and R. Manmatha and Mu Li and Alexander Smola}, + year={2020}, + eprint={2004.08955}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/resnest/resnest101_32xb64_in1k.py b/configs/resnest/resnest101_32xb64_in1k.py new file mode 100644 index 0000000..27b1882 --- /dev/null +++ b/configs/resnest/resnest101_32xb64_in1k.py @@ -0,0 +1,181 @@ +_base_ = ['../_base_/models/resnest101.py', '../_base_/default_runtime.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_lighting_cfg = dict( + eigval=[55.4625, 4.7940, 1.1475], + eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203]], + alphastd=0.1, + to_rgb=True) +policies = [ + dict(type='AutoContrast', prob=0.5), + dict(type='Equalize', prob=0.5), + dict(type='Invert', prob=0.5), + dict( + type='Rotate', + magnitude_key='angle', + magnitude_range=(0, 30), + pad_val=0, + prob=0.5, + random_negative_prob=0.5), + dict( + type='Posterize', + magnitude_key='bits', + magnitude_range=(0, 4), + prob=0.5), + dict( + type='Solarize', + magnitude_key='thr', + magnitude_range=(0, 256), + prob=0.5), + dict( + type='SolarizeAdd', + magnitude_key='magnitude', + magnitude_range=(0, 110), + thr=128, + prob=0.5), + dict( + type='ColorTransform', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Contrast', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Brightness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Sharpness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5), + dict( + type='Cutout', + magnitude_key='shape', + magnitude_range=(1, 41), + pad_val=0, + prob=0.5), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='bicubic'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5, + interpolation='bicubic') +] +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12), + dict( + type='RandomResizedCrop', + size=256, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict(type='Lighting', **img_lighting_cfg), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=False), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=256, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') + +# optimizer +optimizer = dict( + type='SGD', + lr=0.8, + momentum=0.9, + weight_decay=1e-4, + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=5, + warmup_ratio=1e-6, + warmup_by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=270) diff --git a/configs/resnest/resnest101_b64x32_imagenet.py b/configs/resnest/resnest101_b64x32_imagenet.py new file mode 100644 index 0000000..31c3647 --- /dev/null +++ b/configs/resnest/resnest101_b64x32_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnest101_32xb64_in1k.py' + +_deprecation_ = dict( + expected='resnest101_32xb64_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnest/resnest200_64xb32_in1k.py b/configs/resnest/resnest200_64xb32_in1k.py new file mode 100644 index 0000000..3b166a2 --- /dev/null +++ b/configs/resnest/resnest200_64xb32_in1k.py @@ -0,0 +1,181 @@ +_base_ = ['../_base_/models/resnest200.py', '../_base_/default_runtime.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_lighting_cfg = dict( + eigval=[55.4625, 4.7940, 1.1475], + eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203]], + alphastd=0.1, + to_rgb=True) +policies = [ + dict(type='AutoContrast', prob=0.5), + dict(type='Equalize', prob=0.5), + dict(type='Invert', prob=0.5), + dict( + type='Rotate', + magnitude_key='angle', + magnitude_range=(0, 30), + pad_val=0, + prob=0.5, + random_negative_prob=0.5), + dict( + type='Posterize', + magnitude_key='bits', + magnitude_range=(0, 4), + prob=0.5), + dict( + type='Solarize', + magnitude_key='thr', + magnitude_range=(0, 256), + prob=0.5), + dict( + type='SolarizeAdd', + magnitude_key='magnitude', + magnitude_range=(0, 110), + thr=128, + prob=0.5), + dict( + type='ColorTransform', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Contrast', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Brightness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Sharpness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5), + dict( + type='Cutout', + magnitude_key='shape', + magnitude_range=(1, 41), + pad_val=0, + prob=0.5), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='bicubic'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5, + interpolation='bicubic') +] +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12), + dict( + type='RandomResizedCrop', + size=320, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict(type='Lighting', **img_lighting_cfg), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=False), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=320, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') + +# optimizer +optimizer = dict( + type='SGD', + lr=0.8, + momentum=0.9, + weight_decay=1e-4, + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=5, + warmup_ratio=1e-6, + warmup_by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=270) diff --git a/configs/resnest/resnest200_b32x64_imagenet.py b/configs/resnest/resnest200_b32x64_imagenet.py new file mode 100644 index 0000000..8e62865 --- /dev/null +++ b/configs/resnest/resnest200_b32x64_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnest200_64xb32_in1k.py' + +_deprecation_ = dict( + expected='resnest200_64xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnest/resnest269_64xb32_in1k.py b/configs/resnest/resnest269_64xb32_in1k.py new file mode 100644 index 0000000..7a4db09 --- /dev/null +++ b/configs/resnest/resnest269_64xb32_in1k.py @@ -0,0 +1,181 @@ +_base_ = ['../_base_/models/resnest269.py', '../_base_/default_runtime.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_lighting_cfg = dict( + eigval=[55.4625, 4.7940, 1.1475], + eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203]], + alphastd=0.1, + to_rgb=True) +policies = [ + dict(type='AutoContrast', prob=0.5), + dict(type='Equalize', prob=0.5), + dict(type='Invert', prob=0.5), + dict( + type='Rotate', + magnitude_key='angle', + magnitude_range=(0, 30), + pad_val=0, + prob=0.5, + random_negative_prob=0.5), + dict( + type='Posterize', + magnitude_key='bits', + magnitude_range=(0, 4), + prob=0.5), + dict( + type='Solarize', + magnitude_key='thr', + magnitude_range=(0, 256), + prob=0.5), + dict( + type='SolarizeAdd', + magnitude_key='magnitude', + magnitude_range=(0, 110), + thr=128, + prob=0.5), + dict( + type='ColorTransform', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Contrast', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Brightness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Sharpness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5), + dict( + type='Cutout', + magnitude_key='shape', + magnitude_range=(1, 41), + pad_val=0, + prob=0.5), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='bicubic'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5, + interpolation='bicubic') +] +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12), + dict( + type='RandomResizedCrop', + size=416, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict(type='Lighting', **img_lighting_cfg), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=False), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=416, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') + +# optimizer +optimizer = dict( + type='SGD', + lr=0.8, + momentum=0.9, + weight_decay=1e-4, + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=5, + warmup_ratio=1e-6, + warmup_by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=270) diff --git a/configs/resnest/resnest269_b32x64_imagenet.py b/configs/resnest/resnest269_b32x64_imagenet.py new file mode 100644 index 0000000..0f8b76c --- /dev/null +++ b/configs/resnest/resnest269_b32x64_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnest269_64xb32_in1k.py' + +_deprecation_ = dict( + expected='resnest269_64xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnest/resnest50_32xb64_in1k.py b/configs/resnest/resnest50_32xb64_in1k.py new file mode 100644 index 0000000..812a3be --- /dev/null +++ b/configs/resnest/resnest50_32xb64_in1k.py @@ -0,0 +1,181 @@ +_base_ = ['../_base_/models/resnest50.py', '../_base_/default_runtime.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_lighting_cfg = dict( + eigval=[55.4625, 4.7940, 1.1475], + eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203]], + alphastd=0.1, + to_rgb=True) +policies = [ + dict(type='AutoContrast', prob=0.5), + dict(type='Equalize', prob=0.5), + dict(type='Invert', prob=0.5), + dict( + type='Rotate', + magnitude_key='angle', + magnitude_range=(0, 30), + pad_val=0, + prob=0.5, + random_negative_prob=0.5), + dict( + type='Posterize', + magnitude_key='bits', + magnitude_range=(0, 4), + prob=0.5), + dict( + type='Solarize', + magnitude_key='thr', + magnitude_range=(0, 256), + prob=0.5), + dict( + type='SolarizeAdd', + magnitude_key='magnitude', + magnitude_range=(0, 110), + thr=128, + prob=0.5), + dict( + type='ColorTransform', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Contrast', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Brightness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Sharpness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5), + dict( + type='Cutout', + magnitude_key='shape', + magnitude_range=(1, 41), + pad_val=0, + prob=0.5), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='bicubic'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5, + interpolation='bicubic') +] +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12), + dict( + type='RandomResizedCrop', + size=224, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict(type='Lighting', **img_lighting_cfg), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=False), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=224, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') + +# optimizer +optimizer = dict( + type='SGD', + lr=0.8, + momentum=0.9, + weight_decay=1e-4, + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=5, + warmup_ratio=1e-6, + warmup_by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=270) diff --git a/configs/resnest/resnest50_b64x32_imagenet.py b/configs/resnest/resnest50_b64x32_imagenet.py new file mode 100644 index 0000000..c0da422 --- /dev/null +++ b/configs/resnest/resnest50_b64x32_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnest50_32xb64_in1k.py' + +_deprecation_ = dict( + expected='resnest50_32xb64_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/README.md b/configs/resnet/README.md new file mode 100644 index 0000000..d32fcd6 --- /dev/null +++ b/configs/resnet/README.md @@ -0,0 +1,91 @@ +# ResNet + +> [Deep Residual Learning for Image Recognition](https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html) + + + +## Abstract + +Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers---8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. + +The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation. + +
+ +
+ +## Results and models + +The pre-trained models on ImageNet-21k are used to fine-tune, and therefore don't have evaluation results. + +| Model | resolution | Params(M) | Flops(G) | Download | +| :------------: | :--------: | :-------: | :------: | :-------------------------------------------------------------------------------------------------------------------: | +| ResNet-50-mill | 224x224 | 86.74 | 15.14 | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth) | + +*The "mill" means using the mutil-label pretrain weight from [ImageNet-21K Pretraining for the Masses](https://github.com/Alibaba-MIIL/ImageNet21K).* + +### Cifar10 + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :--------: | :-------: | :------: | :-------: | :-------: | :--------------------------------------------------------------------------: | :-----------------------------------------------------------------------------: | +| ResNet-18 | 11.17 | 0.56 | 94.82 | 99.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.log.json) | +| ResNet-34 | 21.28 | 1.16 | 95.34 | 99.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet34_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.log.json) | +| ResNet-50 | 23.52 | 1.31 | 95.55 | 99.91 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.log.json) | +| ResNet-101 | 42.51 | 2.52 | 95.58 | 99.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.log.json) | +| ResNet-152 | 58.16 | 3.74 | 95.76 | 99.89 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet152_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.log.json) | + +### Cifar100 + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------------: | :-----------------------------------------------------------------------------: | +| ResNet-50 | 23.71 | 1.31 | 79.90 | 95.19 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb16_cifar100.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.log.json) | + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------------: | :-------------------------------------------------------------------------: | +| ResNet-18 | 11.69 | 1.82 | 69.90 | 89.43 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.log.json) | +| ResNet-34 | 21.8 | 3.68 | 73.62 | 91.59 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet34_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.log.json) | +| ResNet-50 | 25.56 | 4.12 | 76.55 | 93.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.log.json) | +| ResNet-101 | 44.55 | 7.85 | 77.97 | 94.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.log.json) | +| ResNet-152 | 60.19 | 11.58 | 78.48 | 94.13 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.log.json) | +| ResNetV1C-50 | 25.58 | 4.36 | 77.01 | 93.58 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1c50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c50_8xb32_in1k_20220214-3343eccd.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c50_8xb32_in1k_20220214-3343eccd.log.json) | +| ResNetV1C-101 | 44.57 | 8.09 | 78.30 | 94.27 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1c101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c101_8xb32_in1k_20220214-434fe45f.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c101_8xb32_in1k_20220214-434fe45f.log.json) | +| ResNetV1C-152 | 60.21 | 11.82 | 78.76 | 94.41 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1c152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c152_8xb32_in1k_20220214-c013291f.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c152_8xb32_in1k_20220214-c013291f.log.json) | +| ResNetV1D-50 | 25.58 | 4.36 | 77.54 | 93.57 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.log.json) | +| ResNetV1D-101 | 44.57 | 8.09 | 78.93 | 94.48 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.log.json) | +| ResNetV1D-152 | 60.21 | 11.82 | 79.41 | 94.70 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.log.json) | +| ResNet-50 (fp16) | 25.56 | 4.12 | 76.30 | 93.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb32-fp16_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.log.json) | +| Wide-ResNet-50\* | 68.88 | 11.44 | 78.48 | 94.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/wide-resnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/wide-resnet50_3rdparty_8xb32_in1k_20220304-66678344.pth) | +| Wide-ResNet-101\* | 126.89 | 22.81 | 78.84 | 94.28 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/wide-resnet101_3rdparty_8xb32_in1k_20220304-8d5f9d61.pth) | +| ResNet-50 (rsb-a1) | 25.56 | 4.12 | 80.12 | 94.78 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.log.json) | +| ResNet-50 (rsb-a2) | 25.56 | 4.12 | 79.55 | 94.37 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a2-300e_in1k_20211228-0fd8be6e.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a2-300e_in1k_20211228-0fd8be6e.log.json) | +| ResNet-50 (rsb-a3) | 25.56 | 4.12 | 78.30 | 93.80 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a3-100e_in1k_20211228-3493673c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a3-100e_in1k_20211228-3493673c.log.json) | + +*The "rsb" means using the training settings from [ResNet strikes back: An improved training procedure in timm](https://arxiv.org/abs/2110.00476).* + +*Models with * are converted from the [official repo](https://github.com/pytorch/vision). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +### CUB-200-2011 + +| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Config | Download | +| :-------: | :--------------------------------------------------: | :--------: | :-------: | :------: | :-------: | :------------------------------------------------: | :---------------------------------------------------: | +| ResNet-50 | [ImageNet-21k-mill](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth) | 448x448 | 23.92 | 16.48 | 88.45 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb8_cub.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cub_20220307-57840e60.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cub_20220307-57840e60.log.json) | + +### Stanford-Cars + +| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Config | Download | +| :-------: | :--------------------------------------------------: | :--------: | :-------: | :------: | :-------: | :------------------------------------------------: | :---------------------------------------------------: | +| ResNet-50 | [ImageNet-21k-mill](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth) | 448x448 | 23.92 | 16.48 | 92.82 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb8_cars.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cars_20220812-9d85901a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cars_20220812-9d85901a.log.json) | + +## Citation + +``` +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` diff --git a/configs/resnet/metafile.yml b/configs/resnet/metafile.yml new file mode 100644 index 0000000..4be4bf9 --- /dev/null +++ b/configs/resnet/metafile.yml @@ -0,0 +1,365 @@ +Collections: + - Name: ResNet + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 100 + Batch Size: 256 + Architecture: + - ResNet + Paper: + URL: https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html + Title: "Deep Residual Learning for Image Recognition" + README: configs/resnet/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/resnet.py#L383 + Version: v0.15.0 + +Models: + - Name: resnet18_8xb16_cifar10 + Metadata: + Training Data: CIFAR-10 + Epochs: 200 + Batch Size: 128 + FLOPs: 560000000 + Parameters: 11170000 + In Collection: ResNet + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 94.82 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth + Config: configs/resnet/resnet18_8xb16_cifar10.py + - Name: resnet34_8xb16_cifar10 + Metadata: + Training Data: CIFAR-10 + Epochs: 200 + Batch Size: 128 + FLOPs: 1160000000 + Parameters: 21280000 + In Collection: ResNet + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 95.34 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.pth + Config: configs/resnet/resnet34_8xb16_cifar10.py + - Name: resnet50_8xb16_cifar10 + Metadata: + Training Data: CIFAR-10 + Epochs: 200 + Batch Size: 128 + FLOPs: 1310000000 + Parameters: 23520000 + In Collection: ResNet + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 95.55 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth + Config: configs/resnet/resnet50_8xb16_cifar10.py + - Name: resnet101_8xb16_cifar10 + Metadata: + Training Data: CIFAR-10 + Epochs: 200 + Batch Size: 128 + FLOPs: 2520000000 + Parameters: 42510000 + In Collection: ResNet + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 95.58 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.pth + Config: configs/resnet/resnet101_8xb16_cifar10.py + - Name: resnet152_8xb16_cifar10 + Metadata: + Training Data: CIFAR-10 + Epochs: 200 + Batch Size: 128 + FLOPs: 3740000000 + Parameters: 58160000 + In Collection: ResNet + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 95.76 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.pth + Config: configs/resnet/resnet152_8xb16_cifar10.py + - Name: resnet50_8xb16_cifar100 + Metadata: + Training Data: CIFAR-100 + Epochs: 200 + Batch Size: 128 + FLOPs: 1310000000 + Parameters: 23710000 + In Collection: ResNet + Results: + - Dataset: CIFAR-100 + Metrics: + Top 1 Accuracy: 79.90 + Top 5 Accuracy: 95.19 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.pth + Config: configs/resnet/resnet50_8xb16_cifar100.py + - Name: resnet18_8xb32_in1k + Metadata: + FLOPs: 1820000000 + Parameters: 11690000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 69.90 + Top 5 Accuracy: 89.43 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth + Config: configs/resnet/resnet18_8xb32_in1k.py + - Name: resnet34_8xb32_in1k + Metadata: + FLOPs: 3680000000 + Parameters: 2180000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 73.62 + Top 5 Accuracy: 91.59 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.pth + Config: configs/resnet/resnet34_8xb32_in1k.py + - Name: resnet50_8xb32_in1k + Metadata: + FLOPs: 4120000000 + Parameters: 25560000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.55 + Top 5 Accuracy: 93.06 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth + Config: configs/resnet/resnet50_8xb32_in1k.py + - Name: resnet101_8xb32_in1k + Metadata: + FLOPs: 7850000000 + Parameters: 44550000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.97 + Top 5 Accuracy: 94.06 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth + Config: configs/resnet/resnet101_8xb32_in1k.py + - Name: resnet152_8xb32_in1k + Metadata: + FLOPs: 11580000000 + Parameters: 60190000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.48 + Top 5 Accuracy: 94.13 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.pth + Config: configs/resnet/resnet152_8xb32_in1k.py + - Name: resnetv1d50_8xb32_in1k + Metadata: + FLOPs: 4360000000 + Parameters: 25580000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.54 + Top 5 Accuracy: 93.57 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.pth + Config: configs/resnet/resnetv1d50_8xb32_in1k.py + - Name: resnetv1d101_8xb32_in1k + Metadata: + FLOPs: 8090000000 + Parameters: 44570000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.93 + Top 5 Accuracy: 94.48 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth + Config: configs/resnet/resnetv1d101_8xb32_in1k.py + - Name: resnetv1d152_8xb32_in1k + Metadata: + FLOPs: 11820000000 + Parameters: 60210000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.41 + Top 5 Accuracy: 94.70 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth + Config: configs/resnet/resnetv1d152_8xb32_in1k.py + - Name: resnet50_8xb32-fp16_in1k + Metadata: + FLOPs: 4120000000 + Parameters: 25560000 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + In Collection: ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.30 + Top 5 Accuracy: 93.07 + Weights: https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.pth + Config: configs/resnet/resnet50_8xb32-fp16_in1k.py + - Name: resnet50_8xb256-rsb-a1-600e_in1k + Metadata: + FLOPs: 4120000000 + Parameters: 25560000 + Training Techniques: + - LAMB + - Weight Decay + - Cosine Annealing + - Mixup + - CutMix + - RepeatAugSampler + - RandAugment + Epochs: 600 + Batch Size: 2048 + In Collection: ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.12 + Top 5 Accuracy: 94.78 + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth + Config: configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py + - Name: resnet50_8xb256-rsb-a2-300e_in1k + Metadata: + FLOPs: 4120000000 + Parameters: 25560000 + Training Techniques: + - LAMB + - Weight Decay + - Cosine Annealing + - Mixup + - CutMix + - RepeatAugSampler + - RandAugment + Epochs: 300 + Batch Size: 2048 + In Collection: ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.55 + Top 5 Accuracy: 94.37 + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a2-300e_in1k_20211228-0fd8be6e.pth + Config: configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py + - Name: resnet50_8xb256-rsb-a3-100e_in1k + Metadata: + FLOPs: 4120000000 + Parameters: 25560000 + Training Techniques: + - LAMB + - Weight Decay + - Cosine Annealing + - Mixup + - CutMix + - RandAugment + Batch Size: 2048 + In Collection: ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.30 + Top 5 Accuracy: 93.80 + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a3-100e_in1k_20211228-3493673c.pth + Config: configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py + - Name: resnetv1c50_8xb32_in1k + Metadata: + FLOPs: 4360000000 + Parameters: 25580000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.01 + Top 5 Accuracy: 93.58 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c50_8xb32_in1k_20220214-3343eccd.pth + Config: configs/resnet/resnetv1c50_8xb32_in1k.py + - Name: resnetv1c101_8xb32_in1k + Metadata: + FLOPs: 8090000000 + Parameters: 44570000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.30 + Top 5 Accuracy: 94.27 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c101_8xb32_in1k_20220214-434fe45f.pth + Config: configs/resnet/resnetv1c101_8xb32_in1k.py + - Name: resnetv1c152_8xb32_in1k + Metadata: + FLOPs: 11820000000 + Parameters: 60210000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.76 + Top 5 Accuracy: 94.41 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c152_8xb32_in1k_20220214-c013291f.pth + Config: configs/resnet/resnetv1c152_8xb32_in1k.py + - Name: resnet50_8xb8_cub + Metadata: + FLOPs: 16480000000 + Parameters: 23920000 + In Collection: ResNet + Results: + - Dataset: CUB-200-2011 + Metrics: + Top 1 Accuracy: 88.45 + Task: Image Classification + Pretrain: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cub_20220307-57840e60.pth + Config: configs/resnet/resnet50_8xb8_cub.py + - Name: resnet50_8xb8_cars + Metadata: + FLOPs: 16480000000 + Parameters: 23920000 + In Collection: ResNet + Results: + - Dataset: StanfordCars + Metrics: + Top 1 Accuracy: 92.82 + Task: Image Classification + Pretrain: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cars_20220812-9d85901a.pth + Config: configs/resnet/resnet50_8xb8_cars.py diff --git a/configs/resnet/resnet101_8xb16_cifar10.py b/configs/resnet/resnet101_8xb16_cifar10.py new file mode 100644 index 0000000..166a174 --- /dev/null +++ b/configs/resnet/resnet101_8xb16_cifar10.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet101_cifar.py', + '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet101_8xb32_in1k.py b/configs/resnet/resnet101_8xb32_in1k.py new file mode 100644 index 0000000..388d2cd --- /dev/null +++ b/configs/resnet/resnet101_8xb32_in1k.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet101.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet101_b16x8_cifar10.py b/configs/resnet/resnet101_b16x8_cifar10.py new file mode 100644 index 0000000..57758f2 --- /dev/null +++ b/configs/resnet/resnet101_b16x8_cifar10.py @@ -0,0 +1,6 @@ +_base_ = 'resnet101_8xb16_cifar10.py' + +_deprecation_ = dict( + expected='resnet101_8xb16_cifar10.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet101_b32x8_imagenet.py b/configs/resnet/resnet101_b32x8_imagenet.py new file mode 100644 index 0000000..8d45adc --- /dev/null +++ b/configs/resnet/resnet101_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnet101_8xb32_in1k.py' + +_deprecation_ = dict( + expected='resnet101_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet152_8xb16_cifar10.py b/configs/resnet/resnet152_8xb16_cifar10.py new file mode 100644 index 0000000..3f307b6 --- /dev/null +++ b/configs/resnet/resnet152_8xb16_cifar10.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet152_cifar.py', + '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet152_8xb32_in1k.py b/configs/resnet/resnet152_8xb32_in1k.py new file mode 100644 index 0000000..cc9dc2c --- /dev/null +++ b/configs/resnet/resnet152_8xb32_in1k.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet152.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet152_b16x8_cifar10.py b/configs/resnet/resnet152_b16x8_cifar10.py new file mode 100644 index 0000000..5c76cac --- /dev/null +++ b/configs/resnet/resnet152_b16x8_cifar10.py @@ -0,0 +1,6 @@ +_base_ = 'resnet152_8xb16_cifar10.py' + +_deprecation_ = dict( + expected='resnet152_8xb16_cifar10.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet152_b32x8_imagenet.py b/configs/resnet/resnet152_b32x8_imagenet.py new file mode 100644 index 0000000..133638a --- /dev/null +++ b/configs/resnet/resnet152_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnet152_8xb32_in1k.py' + +_deprecation_ = dict( + expected='resnet152_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet18_8xb16_cifar10.py b/configs/resnet/resnet18_8xb16_cifar10.py new file mode 100644 index 0000000..c7afa39 --- /dev/null +++ b/configs/resnet/resnet18_8xb16_cifar10.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet18_cifar.py', '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet18_8xb32_in1k.py b/configs/resnet/resnet18_8xb32_in1k.py new file mode 100644 index 0000000..ac452ff --- /dev/null +++ b/configs/resnet/resnet18_8xb32_in1k.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet18.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet18_b16x8_cifar10.py b/configs/resnet/resnet18_b16x8_cifar10.py new file mode 100644 index 0000000..5a25a0e --- /dev/null +++ b/configs/resnet/resnet18_b16x8_cifar10.py @@ -0,0 +1,6 @@ +_base_ = 'resnet18_8xb16_cifar10.py' + +_deprecation_ = dict( + expected='resnet18_8xb16_cifar10.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet18_b32x8_imagenet.py b/configs/resnet/resnet18_b32x8_imagenet.py new file mode 100644 index 0000000..e6d08f6 --- /dev/null +++ b/configs/resnet/resnet18_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnet18_8xb32_in1k.py' + +_deprecation_ = dict( + expected='resnet18_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet34_8xb16_cifar10.py b/configs/resnet/resnet34_8xb16_cifar10.py new file mode 100644 index 0000000..7f5cd51 --- /dev/null +++ b/configs/resnet/resnet34_8xb16_cifar10.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet34_cifar.py', '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet34_8xb32_in1k.py b/configs/resnet/resnet34_8xb32_in1k.py new file mode 100644 index 0000000..7749261 --- /dev/null +++ b/configs/resnet/resnet34_8xb32_in1k.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet34.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet34_b16x8_cifar10.py b/configs/resnet/resnet34_b16x8_cifar10.py new file mode 100644 index 0000000..eec98b2 --- /dev/null +++ b/configs/resnet/resnet34_b16x8_cifar10.py @@ -0,0 +1,6 @@ +_base_ = 'resnet34_8xb16_cifar10.py' + +_deprecation_ = dict( + expected='resnet34_8xb16_cifar10.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet34_b32x8_imagenet.py b/configs/resnet/resnet34_b32x8_imagenet.py new file mode 100644 index 0000000..144613a --- /dev/null +++ b/configs/resnet/resnet34_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnet34_8xb32_in1k.py' + +_deprecation_ = dict( + expected='resnet34_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet50_32xb64-warmup-coslr_in1k.py b/configs/resnet/resnet50_32xb64-warmup-coslr_in1k.py new file mode 100644 index 0000000..c26245e --- /dev/null +++ b/configs/resnet/resnet50_32xb64-warmup-coslr_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs2048_coslr.py', + '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_32xb64-warmup-lbs_in1k.py b/configs/resnet/resnet50_32xb64-warmup-lbs_in1k.py new file mode 100644 index 0000000..2f24f9a --- /dev/null +++ b/configs/resnet/resnet50_32xb64-warmup-lbs_in1k.py @@ -0,0 +1,12 @@ +_base_ = ['./resnet50_32xb64-warmup_in1k.py'] +model = dict( + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + loss_weight=1.0, + label_smooth_val=0.1, + num_classes=1000), + )) diff --git a/configs/resnet/resnet50_32xb64-warmup_in1k.py b/configs/resnet/resnet50_32xb64-warmup_in1k.py new file mode 100644 index 0000000..34d5288 --- /dev/null +++ b/configs/resnet/resnet50_32xb64-warmup_in1k.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs2048.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py b/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py new file mode 100644 index 0000000..8cc7921 --- /dev/null +++ b/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet21k_bs128.py', + '../_base_/schedules/imagenet_bs1024_coslr.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict(head=dict(num_classes=21843)) + +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=90) diff --git a/configs/resnet/resnet50_8xb16-mixup_cifar10.py b/configs/resnet/resnet50_8xb16-mixup_cifar10.py new file mode 100644 index 0000000..2420ebf --- /dev/null +++ b/configs/resnet/resnet50_8xb16-mixup_cifar10.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50_cifar_mixup.py', + '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb16_cifar10.py b/configs/resnet/resnet50_8xb16_cifar10.py new file mode 100644 index 0000000..669e5de --- /dev/null +++ b/configs/resnet/resnet50_8xb16_cifar10.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet50_cifar.py', '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb16_cifar100.py b/configs/resnet/resnet50_8xb16_cifar100.py new file mode 100644 index 0000000..39bd90f --- /dev/null +++ b/configs/resnet/resnet50_8xb16_cifar100.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/resnet50_cifar.py', + '../_base_/datasets/cifar100_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] + +model = dict(head=dict(num_classes=100)) + +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005) +lr_config = dict(policy='step', step=[60, 120, 160], gamma=0.2) diff --git a/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py b/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py new file mode 100644 index 0000000..192776f --- /dev/null +++ b/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py @@ -0,0 +1,33 @@ +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/imagenet_bs256_rsb_a12.py', + '../_base_/schedules/imagenet_bs2048_rsb.py', + '../_base_/default_runtime.py' +] + +# Model settings +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + drop_path_rate=0.05, + ), + head=dict( + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + )), + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.2, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) + +# Dataset settings +sampler = dict(type='RepeatAugSampler') + +# Schedule settings +runner = dict(max_epochs=600) +optimizer = dict( + weight_decay=0.01, + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.), +) diff --git a/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py b/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py new file mode 100644 index 0000000..fcdc880 --- /dev/null +++ b/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py @@ -0,0 +1,25 @@ +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/imagenet_bs256_rsb_a12.py', + '../_base_/schedules/imagenet_bs2048_rsb.py', + '../_base_/default_runtime.py' +] + +# Model settings +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + drop_path_rate=0.05, + ), + head=dict(loss=dict(use_sigmoid=True)), + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.1, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) + +# Dataset settings +sampler = dict(type='RepeatAugSampler') + +# Schedule settings +runner = dict(max_epochs=300) +optimizer = dict(paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.)) diff --git a/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py b/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py new file mode 100644 index 0000000..4ff52ac --- /dev/null +++ b/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py @@ -0,0 +1,19 @@ +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/imagenet_bs256_rsb_a3.py', + '../_base_/schedules/imagenet_bs2048_rsb.py', + '../_base_/default_runtime.py' +] + +# Model settings +model = dict( + backbone=dict(norm_cfg=dict(type='SyncBN', requires_grad=True)), + head=dict(loss=dict(use_sigmoid=True)), + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.1, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) + +# Schedule settings +optimizer = dict( + lr=0.008, paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.)) diff --git a/configs/resnet/resnet50_8xb32-coslr-preciseBN_in1k.py b/configs/resnet/resnet50_8xb32-coslr-preciseBN_in1k.py new file mode 100644 index 0000000..dab82c6 --- /dev/null +++ b/configs/resnet/resnet50_8xb32-coslr-preciseBN_in1k.py @@ -0,0 +1,12 @@ +_base_ = 'resnet50_8xb32-coslr_in1k.py' + +# Precise BN hook will update the bn stats, so this hook should be executed +# before CheckpointHook, which has priority of 'NORMAL'. So set the +# priority of PreciseBNHook to 'ABOVE_NORMAL' here. +custom_hooks = [ + dict( + type='PreciseBNHook', + num_samples=8192, + interval=1, + priority='ABOVE_NORMAL') +] diff --git a/configs/resnet/resnet50_8xb32-coslr_in1k.py b/configs/resnet/resnet50_8xb32-coslr_in1k.py new file mode 100644 index 0000000..938a114 --- /dev/null +++ b/configs/resnet/resnet50_8xb32-coslr_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb32-cutmix_in1k.py b/configs/resnet/resnet50_8xb32-cutmix_in1k.py new file mode 100644 index 0000000..2f8d0ca --- /dev/null +++ b/configs/resnet/resnet50_8xb32-cutmix_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50_cutmix.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py b/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py new file mode 100644 index 0000000..7a6c93c --- /dev/null +++ b/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py @@ -0,0 +1,4 @@ +_base_ = ['./resnet50_8xb32_in1k.py'] + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/configs/resnet/resnet50_8xb32-fp16_in1k.py b/configs/resnet/resnet50_8xb32-fp16_in1k.py new file mode 100644 index 0000000..4245d19 --- /dev/null +++ b/configs/resnet/resnet50_8xb32-fp16_in1k.py @@ -0,0 +1,4 @@ +_base_ = ['./resnet50_8xb32_in1k.py'] + +# fp16 settings +fp16 = dict(loss_scale=512.) diff --git a/configs/resnet/resnet50_8xb32-lbs_in1k.py b/configs/resnet/resnet50_8xb32-lbs_in1k.py new file mode 100644 index 0000000..1c1aa5a --- /dev/null +++ b/configs/resnet/resnet50_8xb32-lbs_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50_label_smooth.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb32-mixup_in1k.py b/configs/resnet/resnet50_8xb32-mixup_in1k.py new file mode 100644 index 0000000..2a153d0 --- /dev/null +++ b/configs/resnet/resnet50_8xb32-mixup_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50_mixup.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb32_in1k.py b/configs/resnet/resnet50_8xb32_in1k.py new file mode 100644 index 0000000..c32f333 --- /dev/null +++ b/configs/resnet/resnet50_8xb32_in1k.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb8_cars.py b/configs/resnet/resnet50_8xb8_cars.py new file mode 100644 index 0000000..2d2db45 --- /dev/null +++ b/configs/resnet/resnet50_8xb8_cars.py @@ -0,0 +1,19 @@ +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/stanford_cars_bs8_448.py', + '../_base_/schedules/stanford_cars_bs8.py', '../_base_/default_runtime.py' +] + +# use pre-train weight converted from https://github.com/Alibaba-MIIL/ImageNet21K # noqa +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth' # noqa + +model = dict( + type='ImageClassifier', + backbone=dict( + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint, prefix='backbone')), + head=dict(num_classes=196, )) + +log_config = dict(interval=50) +checkpoint_config = dict( + interval=1, max_keep_ckpts=3) # save last three checkpoints diff --git a/configs/resnet/resnet50_8xb8_cub.py b/configs/resnet/resnet50_8xb8_cub.py new file mode 100644 index 0000000..dffb076 --- /dev/null +++ b/configs/resnet/resnet50_8xb8_cub.py @@ -0,0 +1,19 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/cub_bs8_448.py', + '../_base_/schedules/cub_bs64.py', '../_base_/default_runtime.py' +] + +# use pre-train weight converted from https://github.com/Alibaba-MIIL/ImageNet21K # noqa +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth' # noqa + +model = dict( + type='ImageClassifier', + backbone=dict( + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint, prefix='backbone')), + head=dict(num_classes=200, )) + +log_config = dict(interval=20) # log every 20 intervals + +checkpoint_config = dict( + interval=1, max_keep_ckpts=3) # save last three checkpoints diff --git a/configs/resnet/resnet50_b16x8_cifar10.py b/configs/resnet/resnet50_b16x8_cifar10.py new file mode 100644 index 0000000..e40d1ee --- /dev/null +++ b/configs/resnet/resnet50_b16x8_cifar10.py @@ -0,0 +1,6 @@ +_base_ = 'resnet50_8xb16_cifar10.py' + +_deprecation_ = dict( + expected='resnet50_8xb16_cifar10.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet50_b16x8_cifar100.py b/configs/resnet/resnet50_b16x8_cifar100.py new file mode 100644 index 0000000..b49b6f4 --- /dev/null +++ b/configs/resnet/resnet50_b16x8_cifar100.py @@ -0,0 +1,6 @@ +_base_ = 'resnet50_8xb16_cifar100.py' + +_deprecation_ = dict( + expected='resnet50_8xb16_cifar100.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet50_b16x8_cifar10_mixup.py b/configs/resnet/resnet50_b16x8_cifar10_mixup.py new file mode 100644 index 0000000..409a40e --- /dev/null +++ b/configs/resnet/resnet50_b16x8_cifar10_mixup.py @@ -0,0 +1,6 @@ +_base_ = 'resnet50_8xb16-mixup_cifar10.py' + +_deprecation_ = dict( + expected='resnet50_8xb16-mixup_cifar10.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet50_b32x8_coslr_imagenet.py b/configs/resnet/resnet50_b32x8_coslr_imagenet.py new file mode 100644 index 0000000..647153b --- /dev/null +++ b/configs/resnet/resnet50_b32x8_coslr_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnet50_8xb32-coslr_in1k.py' + +_deprecation_ = dict( + expected='resnet50_8xb32-coslr_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet50_b32x8_cutmix_imagenet.py b/configs/resnet/resnet50_b32x8_cutmix_imagenet.py new file mode 100644 index 0000000..87b27d5 --- /dev/null +++ b/configs/resnet/resnet50_b32x8_cutmix_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnet50_8xb32-cutmix_in1k.py' + +_deprecation_ = dict( + expected='resnet50_8xb32-cutmix_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet50_b32x8_imagenet.py b/configs/resnet/resnet50_b32x8_imagenet.py new file mode 100644 index 0000000..7d7f69e --- /dev/null +++ b/configs/resnet/resnet50_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnet50_8xb32_in1k.py' + +_deprecation_ = dict( + expected='resnet50_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py b/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py new file mode 100644 index 0000000..6e87415 --- /dev/null +++ b/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnet50_8xb32-lbs_in1k.py' + +_deprecation_ = dict( + expected='resnet50_8xb32-lbs_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet50_b32x8_mixup_imagenet.py b/configs/resnet/resnet50_b32x8_mixup_imagenet.py new file mode 100644 index 0000000..3405319 --- /dev/null +++ b/configs/resnet/resnet50_b32x8_mixup_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnet50_8xb32-mixup_in1k.py' + +_deprecation_ = dict( + expected='resnet50_8xb32-mixup_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py b/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py new file mode 100644 index 0000000..4724616 --- /dev/null +++ b/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnet50_32xb64-warmup-coslr_in1k.py' + +_deprecation_ = dict( + expected='resnet50_32xb64-warmup-coslr_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet50_b64x32_warmup_imagenet.py b/configs/resnet/resnet50_b64x32_warmup_imagenet.py new file mode 100644 index 0000000..3e35054 --- /dev/null +++ b/configs/resnet/resnet50_b64x32_warmup_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnet50_32xb64-warmup_in1k.py' + +_deprecation_ = dict( + expected='resnet50_32xb64-warmup_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py b/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py new file mode 100644 index 0000000..2544e33 --- /dev/null +++ b/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnet50_32xb64-warmup-lbs_in1k.py' + +_deprecation_ = dict( + expected='resnet50_32xb64-warmup-lbs_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnetv1c101_8xb32_in1k.py b/configs/resnet/resnetv1c101_8xb32_in1k.py new file mode 100644 index 0000000..441aff5 --- /dev/null +++ b/configs/resnet/resnetv1c101_8xb32_in1k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/resnetv1c50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(depth=101)) diff --git a/configs/resnet/resnetv1c152_8xb32_in1k.py b/configs/resnet/resnetv1c152_8xb32_in1k.py new file mode 100644 index 0000000..b9f466f --- /dev/null +++ b/configs/resnet/resnetv1c152_8xb32_in1k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/resnetv1c50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(depth=152)) diff --git a/configs/resnet/resnetv1c50_8xb32_in1k.py b/configs/resnet/resnetv1c50_8xb32_in1k.py new file mode 100644 index 0000000..aa1c8b6 --- /dev/null +++ b/configs/resnet/resnetv1c50_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnetv1c50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnetv1d101_8xb32_in1k.py b/configs/resnet/resnetv1d101_8xb32_in1k.py new file mode 100644 index 0000000..b16ca86 --- /dev/null +++ b/configs/resnet/resnetv1d101_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnetv1d101.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnetv1d101_b32x8_imagenet.py b/configs/resnet/resnetv1d101_b32x8_imagenet.py new file mode 100644 index 0000000..e736937 --- /dev/null +++ b/configs/resnet/resnetv1d101_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnetv1d101_8xb32_in1k.py' + +_deprecation_ = dict( + expected='resnetv1d101_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnetv1d152_8xb32_in1k.py b/configs/resnet/resnetv1d152_8xb32_in1k.py new file mode 100644 index 0000000..76926dd --- /dev/null +++ b/configs/resnet/resnetv1d152_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnetv1d152.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnetv1d152_b32x8_imagenet.py b/configs/resnet/resnetv1d152_b32x8_imagenet.py new file mode 100644 index 0000000..88e5b9f --- /dev/null +++ b/configs/resnet/resnetv1d152_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnetv1d152_8xb32_in1k.py' + +_deprecation_ = dict( + expected='resnetv1d152_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnet/resnetv1d50_8xb32_in1k.py b/configs/resnet/resnetv1d50_8xb32_in1k.py new file mode 100644 index 0000000..208bde4 --- /dev/null +++ b/configs/resnet/resnetv1d50_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnetv1d50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnetv1d50_b32x8_imagenet.py b/configs/resnet/resnetv1d50_b32x8_imagenet.py new file mode 100644 index 0000000..5455e05 --- /dev/null +++ b/configs/resnet/resnetv1d50_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnetv1d50_8xb32_in1k.py' + +_deprecation_ = dict( + expected='resnetv1d50_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnext/README.md b/configs/resnext/README.md new file mode 100644 index 0000000..56df277 --- /dev/null +++ b/configs/resnext/README.md @@ -0,0 +1,36 @@ +# ResNeXt + +> [Aggregated Residual Transformations for Deep Neural Networks](https://openaccess.thecvf.com/content_cvpr_2017/html/Xie_Aggregated_Residual_Transformations_CVPR_2017_paper.html) + + + +## Abstract + +We present a simple, highly modularized network architecture for image classification. Our network is constructed by repeating a building block that aggregates a set of transformations with the same topology. Our simple design results in a homogeneous, multi-branch architecture that has only a few hyper-parameters to set. This strategy exposes a new dimension, which we call "cardinality" (the size of the set of transformations), as an essential factor in addition to the dimensions of depth and width. On the ImageNet-1K dataset, we empirically show that even under the restricted condition of maintaining complexity, increasing cardinality is able to improve classification accuracy. Moreover, increasing cardinality is more effective than going deeper or wider when we increase the capacity. Our models, named ResNeXt, are the foundations of our entry to the ILSVRC 2016 classification task in which we secured 2nd place. We further investigate ResNeXt on an ImageNet-5K set and the COCO detection set, also showing better results than its ResNet counterpart. The code and models are publicly available online. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------: | :-------: | :------: | :-------: | :-------: | :-----------------------------------------------------------------------: | :-------------------------------------------------------------------------: | +| ResNeXt-32x4d-50 | 25.03 | 4.27 | 77.90 | 93.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext50-32x4d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.log.json) | +| ResNeXt-32x4d-101 | 44.18 | 8.03 | 78.61 | 94.17 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext101-32x4d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.log.json) | +| ResNeXt-32x8d-101 | 88.79 | 16.5 | 79.27 | 94.58 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext101-32x8d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.log.json) | +| ResNeXt-32x4d-152 | 59.95 | 11.8 | 78.88 | 94.33 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext152-32x4d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.log.json) | + +## Citation + +``` +@inproceedings{xie2017aggregated, + title={Aggregated residual transformations for deep neural networks}, + author={Xie, Saining and Girshick, Ross and Doll{\'a}r, Piotr and Tu, Zhuowen and He, Kaiming}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1492--1500}, + year={2017} +} +``` diff --git a/configs/resnext/metafile.yml b/configs/resnext/metafile.yml new file mode 100644 index 0000000..c68e7f9 --- /dev/null +++ b/configs/resnext/metafile.yml @@ -0,0 +1,73 @@ +Collections: + - Name: ResNeXt + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 100 + Batch Size: 256 + Architecture: + - ResNeXt + Paper: + URL: https://openaccess.thecvf.com/content_cvpr_2017/html/Xie_Aggregated_Residual_Transformations_CVPR_2017_paper.html + Title: "Aggregated Residual Transformations for Deep Neural Networks" + README: configs/resnext/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/resnext.py#L90 + Version: v0.15.0 + +Models: + - Name: resnext50-32x4d_8xb32_in1k + Metadata: + FLOPs: 4270000000 + Parameters: 25030000 + In Collection: ResNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.90 + Top 5 Accuracy: 93.66 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth + Config: configs/resnext/resnext50-32x4d_8xb32_in1k.py + - Name: resnext101-32x4d_8xb32_in1k + Metadata: + FLOPs: 8030000000 + Parameters: 44180000 + In Collection: ResNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.61 + Top 5 Accuracy: 94.17 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth + Config: configs/resnext/resnext101-32x4d_8xb32_in1k.py + - Name: resnext101-32x8d_8xb32_in1k + Metadata: + FLOPs: 16500000000 + Parameters: 88790000 + In Collection: ResNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.27 + Top 5 Accuracy: 94.58 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth + Config: configs/resnext/resnext101-32x8d_8xb32_in1k.py + - Name: resnext152-32x4d_8xb32_in1k + Metadata: + FLOPs: 11800000000 + Parameters: 59950000 + In Collection: ResNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.88 + Top 5 Accuracy: 94.33 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth + Config: configs/resnext/resnext152-32x4d_8xb32_in1k.py diff --git a/configs/resnext/resnext101-32x4d_8xb32_in1k.py b/configs/resnext/resnext101-32x4d_8xb32_in1k.py new file mode 100644 index 0000000..970aa60 --- /dev/null +++ b/configs/resnext/resnext101-32x4d_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnext101_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnext/resnext101-32x8d_8xb32_in1k.py b/configs/resnext/resnext101-32x8d_8xb32_in1k.py new file mode 100644 index 0000000..315d05f --- /dev/null +++ b/configs/resnext/resnext101-32x8d_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnext101_32x8d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnext/resnext101_32x4d_b32x8_imagenet.py b/configs/resnext/resnext101_32x4d_b32x8_imagenet.py new file mode 100644 index 0000000..07d66c3 --- /dev/null +++ b/configs/resnext/resnext101_32x4d_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnext101-32x4d_8xb32_in1k.py' + +_deprecation_ = dict( + expected='resnext101-32x4d_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnext/resnext101_32x8d_b32x8_imagenet.py b/configs/resnext/resnext101_32x8d_b32x8_imagenet.py new file mode 100644 index 0000000..071ca60 --- /dev/null +++ b/configs/resnext/resnext101_32x8d_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnext101-32x8d_8xb32_in1k.py' + +_deprecation_ = dict( + expected='resnext101-32x8d_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnext/resnext152-32x4d_8xb32_in1k.py b/configs/resnext/resnext152-32x4d_8xb32_in1k.py new file mode 100644 index 0000000..9c13731 --- /dev/null +++ b/configs/resnext/resnext152-32x4d_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnext152_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnext/resnext152_32x4d_b32x8_imagenet.py b/configs/resnext/resnext152_32x4d_b32x8_imagenet.py new file mode 100644 index 0000000..6d05c8b --- /dev/null +++ b/configs/resnext/resnext152_32x4d_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnext152-32x4d_8xb32_in1k.py' + +_deprecation_ = dict( + expected='resnext152-32x4d_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/resnext/resnext50-32x4d_8xb32_in1k.py b/configs/resnext/resnext50-32x4d_8xb32_in1k.py new file mode 100644 index 0000000..bd9c9fc --- /dev/null +++ b/configs/resnext/resnext50-32x4d_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnext50_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnext/resnext50_32x4d_b32x8_imagenet.py b/configs/resnext/resnext50_32x4d_b32x8_imagenet.py new file mode 100644 index 0000000..92ae063 --- /dev/null +++ b/configs/resnext/resnext50_32x4d_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'resnext50-32x4d_8xb32_in1k.py' + +_deprecation_ = dict( + expected='resnext50-32x4d_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/seresnet/README.md b/configs/seresnet/README.md new file mode 100644 index 0000000..ccfd1d1 --- /dev/null +++ b/configs/seresnet/README.md @@ -0,0 +1,34 @@ +# SE-ResNet + +> [Squeeze-and-Excitation Networks](https://openaccess.thecvf.com/content_cvpr_2018/html/Hu_Squeeze-and-Excitation_Networks_CVPR_2018_paper.html) + + + +## Abstract + +The central building block of convolutional neural networks (CNNs) is the convolution operator, which enables networks to construct informative features by fusing both spatial and channel-wise information within local receptive fields at each layer. A broad range of prior research has investigated the spatial component of this relationship, seeking to strengthen the representational power of a CNN by enhancing the quality of spatial encodings throughout its feature hierarchy. In this work, we focus instead on the channel relationship and propose a novel architectural unit, which we term the "Squeeze-and-Excitation" (SE) block, that adaptively recalibrates channel-wise feature responses by explicitly modelling interdependencies between channels. We show that these blocks can be stacked together to form SENet architectures that generalise extremely effectively across different datasets. We further demonstrate that SE blocks bring significant improvements in performance for existing state-of-the-art CNNs at slight additional computational cost. Squeeze-and-Excitation Networks formed the foundation of our ILSVRC 2017 classification submission which won first place and reduced the top-5 error to 2.251%, surpassing the winning entry of 2016 by a relative improvement of ~25%. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :---------------------------------------------------------------------------: | +| SE-ResNet-50 | 28.09 | 4.13 | 77.74 | 93.84 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200708-657b3c36.log.json) | +| SE-ResNet-101 | 49.33 | 7.86 | 78.26 | 94.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200708-038a4d04.log.json) | + +## Citation + +``` +@inproceedings{hu2018squeeze, + title={Squeeze-and-excitation networks}, + author={Hu, Jie and Shen, Li and Sun, Gang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={7132--7141}, + year={2018} +} +``` diff --git a/configs/seresnet/metafile.yml b/configs/seresnet/metafile.yml new file mode 100644 index 0000000..7d2a381 --- /dev/null +++ b/configs/seresnet/metafile.yml @@ -0,0 +1,47 @@ +Collections: + - Name: SEResNet + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 140 + Batch Size: 256 + Architecture: + - ResNet + Paper: + URL: https://openaccess.thecvf.com/content_cvpr_2018/html/Hu_Squeeze-and-Excitation_Networks_CVPR_2018_paper.html + Title: "Squeeze-and-Excitation Networks" + README: configs/seresnet/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/seresnet.py#L58 + Version: v0.15.0 + +Models: + - Name: seresnet50_8xb32_in1k + Metadata: + FLOPs: 4130000000 + Parameters: 28090000 + In Collection: SEResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.74 + Top 5 Accuracy: 93.84 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth + Config: configs/seresnet/seresnet50_8xb32_in1k.py + - Name: seresnet101_8xb32_in1k + Metadata: + FLOPs: 7860000000 + Parameters: 49330000 + In Collection: SEResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.26 + Top 5 Accuracy: 94.07 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth + Config: configs/seresnet/seresnet101_8xb32_in1k.py diff --git a/configs/seresnet/seresnet101_8xb32_in1k.py b/configs/seresnet/seresnet101_8xb32_in1k.py new file mode 100644 index 0000000..8be39e7 --- /dev/null +++ b/configs/seresnet/seresnet101_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/seresnet101.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/seresnet/seresnet101_b32x8_imagenet.py b/configs/seresnet/seresnet101_b32x8_imagenet.py new file mode 100644 index 0000000..46daa09 --- /dev/null +++ b/configs/seresnet/seresnet101_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'seresnet101_8xb32_in1k.py' + +_deprecation_ = dict( + expected='seresnet101_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/seresnet/seresnet50_8xb32_in1k.py b/configs/seresnet/seresnet50_8xb32_in1k.py new file mode 100644 index 0000000..19082bd --- /dev/null +++ b/configs/seresnet/seresnet50_8xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/seresnet50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_140e.py', + '../_base_/default_runtime.py' +] diff --git a/configs/seresnet/seresnet50_b32x8_imagenet.py b/configs/seresnet/seresnet50_b32x8_imagenet.py new file mode 100644 index 0000000..0fb9df3 --- /dev/null +++ b/configs/seresnet/seresnet50_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'seresnet50_8xb32_in1k.py' + +_deprecation_ = dict( + expected='seresnet50_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/seresnet/seresnext101-32x4d_8xb32_in1k.py b/configs/seresnet/seresnext101-32x4d_8xb32_in1k.py new file mode 100644 index 0000000..0177830 --- /dev/null +++ b/configs/seresnet/seresnext101-32x4d_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/seresnext101_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/seresnet/seresnext101_32x4d_b32x8_imagenet.py b/configs/seresnet/seresnext101_32x4d_b32x8_imagenet.py new file mode 100644 index 0000000..cb99ec6 --- /dev/null +++ b/configs/seresnet/seresnext101_32x4d_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'seresnext101-32x4d_8xb32_in1k.py' + +_deprecation_ = dict( + expected='seresnext101-32x4d_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/seresnet/seresnext50-32x4d_8xb32_in1k.py b/configs/seresnet/seresnext50-32x4d_8xb32_in1k.py new file mode 100644 index 0000000..4d593e4 --- /dev/null +++ b/configs/seresnet/seresnext50-32x4d_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/seresnext50_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/seresnet/seresnext50_32x4d_b32x8_imagenet.py b/configs/seresnet/seresnext50_32x4d_b32x8_imagenet.py new file mode 100644 index 0000000..4922960 --- /dev/null +++ b/configs/seresnet/seresnext50_32x4d_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'seresnext50-32x4d_8xb32_in1k.py' + +_deprecation_ = dict( + expected='seresnext50-32x4d_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/shufflenet_v1/README.md b/configs/shufflenet_v1/README.md new file mode 100644 index 0000000..fd13127 --- /dev/null +++ b/configs/shufflenet_v1/README.md @@ -0,0 +1,33 @@ +# ShuffleNet V1 + +> [ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices](https://openaccess.thecvf.com/content_cvpr_2018/html/Zhang_ShuffleNet_An_Extremely_CVPR_2018_paper.html) + + + +## Abstract + +We introduce an extremely computation-efficient CNN architecture named ShuffleNet, which is designed specially for mobile devices with very limited computing power (e.g., 10-150 MFLOPs). The new architecture utilizes two new operations, pointwise group convolution and channel shuffle, to greatly reduce computation cost while maintaining accuracy. Experiments on ImageNet classification and MS COCO object detection demonstrate the superior performance of ShuffleNet over other structures, e.g. lower top-1 error (absolute 7.8%) than recent MobileNet on ImageNet classification task, under the computation budget of 40 MFLOPs. On an ARM-based mobile device, ShuffleNet achieves ~13x actual speedup over AlexNet while maintaining comparable accuracy. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------------------: | :-------: | :------: | :-------: | :-------: | :------------------------------------------------------------------: | :--------------------------------------------------------------------: | +| ShuffleNetV1 1.0x (group=3) | 1.87 | 0.146 | 68.13 | 87.81 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.log.json) | + +## Citation + +``` +@inproceedings{zhang2018shufflenet, + title={Shufflenet: An extremely efficient convolutional neural network for mobile devices}, + author={Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={6848--6856}, + year={2018} +} +``` diff --git a/configs/shufflenet_v1/metafile.yml b/configs/shufflenet_v1/metafile.yml new file mode 100644 index 0000000..2cfffa1 --- /dev/null +++ b/configs/shufflenet_v1/metafile.yml @@ -0,0 +1,35 @@ +Collections: + - Name: Shufflenet V1 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + - No BN decay + Training Resources: 8x 1080 GPUs + Epochs: 300 + Batch Size: 1024 + Architecture: + - Shufflenet V1 + Paper: + URL: https://openaccess.thecvf.com/content_cvpr_2018/html/Zhang_ShuffleNet_An_Extremely_CVPR_2018_paper.html + Title: "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" + README: configs/shufflenet_v1/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/shufflenet_v1.py#L152 + Version: v0.15.0 + +Models: + - Name: shufflenet-v1-1x_16xb64_in1k + Metadata: + FLOPs: 146000000 + Parameters: 1870000 + In Collection: Shufflenet V1 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 68.13 + Top 5 Accuracy: 87.81 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth + Config: configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py diff --git a/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py b/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py new file mode 100644 index 0000000..58e45f1 --- /dev/null +++ b/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/shufflenet_v1_1x.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py', + '../_base_/default_runtime.py' +] diff --git a/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py b/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py new file mode 100644 index 0000000..0312197 --- /dev/null +++ b/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'shufflenet-v1-1x_16xb64_in1k.py' + +_deprecation_ = dict( + expected='shufflenet-v1-1x_16xb64_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/shufflenet_v2/README.md b/configs/shufflenet_v2/README.md new file mode 100644 index 0000000..7827154 --- /dev/null +++ b/configs/shufflenet_v2/README.md @@ -0,0 +1,33 @@ +# ShuffleNet V2 + +> [Shufflenet v2: Practical guidelines for efficient cnn architecture design](https://openaccess.thecvf.com/content_ECCV_2018/papers/Ningning_Light-weight_CNN_Architecture_ECCV_2018_paper.pdf) + + + +## Abstract + +Currently, the neural network architecture design is mostly guided by the *indirect* metric of computation complexity, i.e., FLOPs. However, the *direct* metric, e.g., speed, also depends on the other factors such as memory access cost and platform characterics. Thus, this work proposes to evaluate the direct metric on the target platform, beyond only considering FLOPs. Based on a series of controlled experiments, this work derives several practical *guidelines* for efficient network design. Accordingly, a new architecture is presented, called *ShuffleNet V2*. Comprehensive ablation experiments verify that our model is the state-of-the-art in terms of speed and accuracy tradeoff. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------: | :-------: | :------: | :-------: | :-------: | :-----------------------------------------------------------------------: | :-------------------------------------------------------------------------: | +| ShuffleNetV2 1.0x | 2.28 | 0.149 | 69.55 | 88.92 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200804-8860eec9.log.json) | + +## Citation + +``` +@inproceedings{ma2018shufflenet, + title={Shufflenet v2: Practical guidelines for efficient cnn architecture design}, + author={Ma, Ningning and Zhang, Xiangyu and Zheng, Hai-Tao and Sun, Jian}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={116--131}, + year={2018} +} +``` diff --git a/configs/shufflenet_v2/metafile.yml b/configs/shufflenet_v2/metafile.yml new file mode 100644 index 0000000..a06322d --- /dev/null +++ b/configs/shufflenet_v2/metafile.yml @@ -0,0 +1,35 @@ +Collections: + - Name: Shufflenet V2 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + - No BN decay + Training Resources: 8x 1080 GPUs + Epochs: 300 + Batch Size: 1024 + Architecture: + - Shufflenet V2 + Paper: + URL: https://openaccess.thecvf.com/content_ECCV_2018/papers/Ningning_Light-weight_CNN_Architecture_ECCV_2018_paper.pdf + Title: "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + README: configs/shufflenet_v2/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/shufflenet_v2.py#L134 + Version: v0.15.0 + +Models: + - Name: shufflenet-v2-1x_16xb64_in1k + Metadata: + FLOPs: 149000000 + Parameters: 2280000 + In Collection: Shufflenet V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 69.55 + Top 5 Accuracy: 88.92 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth + Config: configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py diff --git a/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py b/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py new file mode 100644 index 0000000..a106ab8 --- /dev/null +++ b/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/shufflenet_v2_1x.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py', + '../_base_/default_runtime.py' +] diff --git a/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py b/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py new file mode 100644 index 0000000..c0938b0 --- /dev/null +++ b/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'shufflenet-v2-1x_16xb64_in1k.py' + +_deprecation_ = dict( + expected='shufflenet-v2-1x_16xb64_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/swin_transformer/README.md b/configs/swin_transformer/README.md new file mode 100644 index 0000000..86975ec --- /dev/null +++ b/configs/swin_transformer/README.md @@ -0,0 +1,60 @@ +# Swin Transformer + +> [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/pdf/2103.14030.pdf) + + + +## Abstract + +This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with **S**hifted **win**dows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. + +
+ +
+ +## Results and models + +### ImageNet-21k + +The pre-trained models on ImageNet-21k are used to fine-tune, and therefore don't have evaluation results. + +| Model | resolution | Params(M) | Flops(G) | Download | +| :----: | :--------: | :-------: | :------: | :---------------------------------------------------------------------------------------------------------------------: | +| Swin-B | 224x224 | 86.74 | 15.14 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-base_3rdparty_in21k.pth) | +| Swin-B | 384x384 | 86.88 | 44.49 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-base_3rdparty_in21k-384px.pth) | +| Swin-L | 224x224 | 195.00 | 34.04 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k.pth) | +| Swin-L | 384x384 | 195.20 | 100.04 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-base_3rdparty_in21k-384px.pth) | + +### ImageNet-1k + +| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------: | :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------: | :-------------------------------------------------------------------: | +| Swin-T | From scratch | 224x224 | 28.29 | 4.36 | 81.18 | 95.61 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-tiny_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925.log.json) | +| Swin-S | From scratch | 224x224 | 49.61 | 8.52 | 83.02 | 96.29 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-small_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219.log.json) | +| Swin-B | From scratch | 224x224 | 87.77 | 15.14 | 83.36 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742-93230b0d.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742.log.json) | +| Swin-S\* | From scratch | 224x224 | 49.61 | 8.52 | 83.21 | 96.25 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-small_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_small_patch4_window7_224-cc7a01c9.pth) | +| Swin-B\* | From scratch | 224x224 | 87.77 | 15.14 | 83.42 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-base_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224-4670dd19.pth) | +| Swin-B\* | From scratch | 384x384 | 87.90 | 44.49 | 84.49 | 96.95 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-base_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384-02c598a4.pth) | +| Swin-B\* | ImageNet-21k | 224x224 | 87.77 | 15.14 | 85.16 | 97.50 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-base_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224_22kto1k-f967f799.pth) | +| Swin-B\* | ImageNet-21k | 384x384 | 87.90 | 44.49 | 86.44 | 98.05 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-base_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384_22kto1k-d59b0d1d.pth) | +| Swin-L\* | ImageNet-21k | 224x224 | 196.53 | 34.04 | 86.24 | 97.88 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-large_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window7_224_22kto1k-5f0996db.pth) | +| Swin-L\* | ImageNet-21k | 384x384 | 196.74 | 100.04 | 87.25 | 98.25 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-large_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window12_384_22kto1k-0a40944b.pth) | + +*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer#main-results-on-imagenet-with-pretrained-models). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +### CUB-200-2011 + +| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Config | Download | +| :----: | :---------------------------------------------------: | :--------: | :-------: | :------: | :-------: | :-------------------------------------------------: | :----------------------------------------------------: | +| Swin-L | [ImageNet-21k](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-base_3rdparty_in21k-384px.pth) | 384x384 | 195.51 | 100.04 | 91.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-large_8xb8_cub_384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin-large_8xb8_cub_384px_20220307-1bbaee6a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin-large_8xb8_cub_384px_20220307-1bbaee6a.log.json) | + +## Citation + +``` +@article{liu2021Swin, + title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, + author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, + journal={arXiv preprint arXiv:2103.14030}, + year={2021} +} +``` diff --git a/configs/swin_transformer/metafile.yml b/configs/swin_transformer/metafile.yml new file mode 100644 index 0000000..b44c1ba --- /dev/null +++ b/configs/swin_transformer/metafile.yml @@ -0,0 +1,201 @@ +Collections: + - Name: Swin-Transformer + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + - Weight Decay + Training Resources: 16x V100 GPUs + Epochs: 300 + Batch Size: 1024 + Architecture: + - Shift Window Multihead Self Attention + Paper: + URL: https://arxiv.org/pdf/2103.14030.pdf + Title: "Swin Transformer: Hierarchical Vision Transformer using Shifted Windows" + README: configs/swin_transformer/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/swin_transformer.py#L176 + Version: v0.15.0 + +Models: + - Name: swin-tiny_16xb64_in1k + Metadata: + FLOPs: 4360000000 + Parameters: 28290000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.18 + Top 5 Accuracy: 95.61 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth + Config: configs/swin_transformer/swin-tiny_16xb64_in1k.py + - Name: swin-small_16xb64_in1k + Metadata: + FLOPs: 8520000000 + Parameters: 49610000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.02 + Top 5 Accuracy: 96.29 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth + Config: configs/swin_transformer/swin-small_16xb64_in1k.py + - Name: swin-base_16xb64_in1k + Metadata: + FLOPs: 15140000000 + Parameters: 87770000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.36 + Top 5 Accuracy: 96.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742-93230b0d.pth + Config: configs/swin_transformer/swin-base_16xb64_in1k.py + - Name: swin-tiny_3rdparty_in1k + Metadata: + FLOPs: 4360000000 + Parameters: 28290000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.18 + Top 5 Accuracy: 95.52 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_tiny_patch4_window7_224-160bb0a5.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-tiny_16xb64_in1k.py + - Name: swin-small_3rdparty_in1k + Metadata: + FLOPs: 8520000000 + Parameters: 49610000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.21 + Top 5 Accuracy: 96.25 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_small_patch4_window7_224-cc7a01c9.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-small_16xb64_in1k.py + - Name: swin-base_3rdparty_in1k + Metadata: + FLOPs: 15140000000 + Parameters: 87770000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.42 + Top 5 Accuracy: 96.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224-4670dd19.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-base_16xb64_in1k.py + - Name: swin-base_3rdparty_in1k-384 + Metadata: + FLOPs: 44490000000 + Parameters: 87900000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.49 + Top 5 Accuracy: 96.95 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384-02c598a4.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-base_16xb64_in1k-384px.py + - Name: swin-base_in21k-pre-3rdparty_in1k + Metadata: + FLOPs: 15140000000 + Parameters: 87770000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.16 + Top 5 Accuracy: 97.50 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224_22kto1k-f967f799.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-base_16xb64_in1k.py + - Name: swin-base_in21k-pre-3rdparty_in1k-384 + Metadata: + FLOPs: 44490000000 + Parameters: 87900000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.44 + Top 5 Accuracy: 98.05 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384_22kto1k-d59b0d1d.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-base_16xb64_in1k-384px.py + - Name: swin-large_in21k-pre-3rdparty_in1k + Metadata: + FLOPs: 34040000000 + Parameters: 196530000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.24 + Top 5 Accuracy: 97.88 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window7_224_22kto1k-5f0996db.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-large_16xb64_in1k.py + - Name: swin-large_in21k-pre-3rdparty_in1k-384 + Metadata: + FLOPs: 100040000000 + Parameters: 196740000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 87.25 + Top 5 Accuracy: 98.25 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window12_384_22kto1k-0a40944b.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-large_16xb64_in1k-384px.py + - Name: swin-large_8xb8_cub_384px + Metadata: + FLOPs: 100040000000 + Parameters: 195510000 + In Collection: Swin-Transformer + Results: + - Dataset: CUB-200-2011 + Metrics: + Top 1 Accuracy: 91.87 + Task: Image Classification + Pretrain: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin-large_8xb8_cub_384px_20220307-1bbaee6a.pth + Config: configs/swin_transformer/swin-large_8xb8_cub_384px.py diff --git a/configs/swin_transformer/swin-base_16xb64_in1k-384px.py b/configs/swin_transformer/swin-base_16xb64_in1k-384px.py new file mode 100644 index 0000000..711a0d6 --- /dev/null +++ b/configs/swin_transformer/swin-base_16xb64_in1k-384px.py @@ -0,0 +1,7 @@ +# Only for evaluation +_base_ = [ + '../_base_/models/swin_transformer/base_384.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/configs/swin_transformer/swin-base_16xb64_in1k.py b/configs/swin_transformer/swin-base_16xb64_in1k.py new file mode 100644 index 0000000..2a4548a --- /dev/null +++ b/configs/swin_transformer/swin-base_16xb64_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/swin_transformer/base_224.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/configs/swin_transformer/swin-large_16xb64_in1k-384px.py b/configs/swin_transformer/swin-large_16xb64_in1k-384px.py new file mode 100644 index 0000000..a7f0ad2 --- /dev/null +++ b/configs/swin_transformer/swin-large_16xb64_in1k-384px.py @@ -0,0 +1,7 @@ +# Only for evaluation +_base_ = [ + '../_base_/models/swin_transformer/large_384.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/configs/swin_transformer/swin-large_16xb64_in1k.py b/configs/swin_transformer/swin-large_16xb64_in1k.py new file mode 100644 index 0000000..4e875c5 --- /dev/null +++ b/configs/swin_transformer/swin-large_16xb64_in1k.py @@ -0,0 +1,7 @@ +# Only for evaluation +_base_ = [ + '../_base_/models/swin_transformer/large_224.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/configs/swin_transformer/swin-large_8xb8_cub_384px.py b/configs/swin_transformer/swin-large_8xb8_cub_384px.py new file mode 100644 index 0000000..d113716 --- /dev/null +++ b/configs/swin_transformer/swin-large_8xb8_cub_384px.py @@ -0,0 +1,37 @@ +_base_ = [ + '../_base_/models/swin_transformer/large_384.py', + '../_base_/datasets/cub_bs8_384.py', '../_base_/schedules/cub_bs64.py', + '../_base_/default_runtime.py' +] + +# model settings +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth' # noqa +model = dict( + type='ImageClassifier', + backbone=dict( + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint, prefix='backbone')), + head=dict(num_classes=200, )) + +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0) + }) + +optimizer = dict( + _delete_=True, + type='AdamW', + lr=5e-6, + weight_decay=0.0005, + eps=1e-8, + betas=(0.9, 0.999), + paramwise_cfg=paramwise_cfg) +optimizer_config = dict(grad_clip=dict(max_norm=5.0), _delete_=True) + +log_config = dict(interval=20) # log every 20 intervals + +checkpoint_config = dict( + interval=1, max_keep_ckpts=3) # save last three checkpoints diff --git a/configs/swin_transformer/swin-small_16xb64_in1k.py b/configs/swin_transformer/swin-small_16xb64_in1k.py new file mode 100644 index 0000000..aa1fa21 --- /dev/null +++ b/configs/swin_transformer/swin-small_16xb64_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/swin_transformer/small_224.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/configs/swin_transformer/swin-tiny_16xb64_in1k.py b/configs/swin_transformer/swin-tiny_16xb64_in1k.py new file mode 100644 index 0000000..e1ed022 --- /dev/null +++ b/configs/swin_transformer/swin-tiny_16xb64_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/swin_transformer/tiny_224.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py b/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py new file mode 100644 index 0000000..912c379 --- /dev/null +++ b/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'swin-base_16xb64_in1k.py' + +_deprecation_ = dict( + expected='swin-base_16xb64_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/swin_transformer/swin_base_384_evalonly_imagenet.py b/configs/swin_transformer/swin_base_384_evalonly_imagenet.py new file mode 100644 index 0000000..9ed5888 --- /dev/null +++ b/configs/swin_transformer/swin_base_384_evalonly_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'swin-base_16xb64_in1k-384px.py' + +_deprecation_ = dict( + expected='swin-base_16xb64_in1k-384px.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/swin_transformer/swin_large_224_evalonly_imagenet.py b/configs/swin_transformer/swin_large_224_evalonly_imagenet.py new file mode 100644 index 0000000..5ebb54a --- /dev/null +++ b/configs/swin_transformer/swin_large_224_evalonly_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'swin-large_16xb64_in1k.py' + +_deprecation_ = dict( + expected='swin-large_16xb64_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/swin_transformer/swin_large_384_evalonly_imagenet.py b/configs/swin_transformer/swin_large_384_evalonly_imagenet.py new file mode 100644 index 0000000..9a59f5b --- /dev/null +++ b/configs/swin_transformer/swin_large_384_evalonly_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'swin-large_16xb64_in1k-384px.py' + +_deprecation_ = dict( + expected='swin-large_16xb64_in1k-384px.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py b/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py new file mode 100644 index 0000000..a747aa4 --- /dev/null +++ b/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'swin-small_16xb64_in1k.py' + +_deprecation_ = dict( + expected='swin-small_16xb64_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py b/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py new file mode 100644 index 0000000..2160eb9 --- /dev/null +++ b/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'swin-tiny_16xb64_in1k.py' + +_deprecation_ = dict( + expected='swin-tiny_16xb64_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/swin_transformer_v2/README.md b/configs/swin_transformer_v2/README.md new file mode 100644 index 0000000..31d1aff --- /dev/null +++ b/configs/swin_transformer_v2/README.md @@ -0,0 +1,58 @@ +# Swin Transformer V2 + +> [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883.pdf) + + + +## Abstract + +Large-scale NLP models have been shown to significantly improve the performance on language tasks with no signs of saturation. They also demonstrate amazing few-shot capabilities like that of human beings. This paper aims to explore large-scale models in computer vision. We tackle three major issues in training and application of large vision models, including training instability, resolution gaps between pre-training and fine-tuning, and hunger on labelled data. Three main techniques are proposed: 1) a residual-post-norm method combined with cosine attention to improve training stability; 2) A log-spaced continuous position bias method to effectively transfer models pre-trained using low-resolution images to downstream tasks with high-resolution inputs; 3) A self-supervised pre-training method, SimMIM, to reduce the needs of vast labeled images. Through these techniques, this paper successfully trained a 3 billion-parameter Swin Transformer V2 model, which is the largest dense vision model to date, and makes it capable of training with images of up to 1,536×1,536 resolution. It set new performance records on 4 representative vision tasks, including ImageNet-V2 image classification, COCO object detection, ADE20K semantic segmentation, and Kinetics-400 video action classification. Also note our training is much more efficient than that in Google's billion-level visual models, which consumes 40 times less labelled data and 40 times less training time. + +
+ +
+ +## Results and models + +### ImageNet-21k + +The pre-trained models on ImageNet-21k are used to fine-tune, and therefore don't have evaluation results. + +| Model | resolution | Params(M) | Flops(G) | Download | +| :------: | :--------: | :-------: | :------: | :--------------------------------------------------------------------------------------------------------------------------------------: | +| Swin-B\* | 192x192 | 87.92 | 8.51 | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-base-w12_3rdparty_in21k-192px_20220803-f7dc9763.pth) | +| Swin-L\* | 192x192 | 196.74 | 19.04 | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-large-w12_3rdparty_in21k-192px_20220803-d9073fee.pth) | + +### ImageNet-1k + +| Model | Pretrain | resolution | window | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------: | :----------: | :--------: | :----: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------: | :----------------------------------------------------------------: | +| Swin-T\* | From scratch | 256x256 | 8x8 | 28.35 | 4.35 | 81.76 | 95.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-w8_3rdparty_in1k-256px_20220803-e318968f.pth) | +| Swin-T\* | From scratch | 256x256 | 16x16 | 28.35 | 4.4 | 82.81 | 96.23 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-w16_3rdparty_in1k-256px_20220803-9651cdd7.pth) | +| Swin-S\* | From scratch | 256x256 | 8x8 | 49.73 | 8.45 | 83.74 | 96.6 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-w8_3rdparty_in1k-256px_20220803-b01a4332.pth) | +| Swin-S\* | From scratch | 256x256 | 16x16 | 49.73 | 8.57 | 84.13 | 96.83 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-w16_3rdparty_in1k-256px_20220803-b707d206.pth) | +| Swin-B\* | From scratch | 256x256 | 8x8 | 87.92 | 14.99 | 84.2 | 96.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w8_3rdparty_in1k-256px_20220803-8ff28f2b.pth) | +| Swin-B\* | From scratch | 256x256 | 16x16 | 87.92 | 15.14 | 84.6 | 97.05 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w16_3rdparty_in1k-256px_20220803-5a1886b7.pth) | +| Swin-B\* | ImageNet-21k | 256x256 | 16x16 | 87.92 | 15.14 | 86.17 | 97.88 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w16_in21k-pre_3rdparty_in1k-256px_20220803-8d7aa8ad.pth) | +| Swin-B\* | ImageNet-21k | 384x384 | 24x24 | 87.92 | 34.07 | 87.14 | 98.23 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w24_in21k-pre_3rdparty_in1k-384px_20220803-44eb70f8.pth) | +| Swin-L\* | ImageNet-21k | 256X256 | 16x16 | 196.75 | 33.86 | 86.93 | 98.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w16_in21k-pre_3rdparty_in1k-256px_20220803-c40cbed7.pth) | +| Swin-L\* | ImageNet-21k | 384x384 | 24x24 | 196.75 | 76.2 | 87.59 | 98.27 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w24_in21k-pre_3rdparty_in1k-384px_20220803-3b36c165.pth) | + +*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer#main-results-on-imagenet-with-pretrained-models). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +*ImageNet-21k pretrained models with input resolution of 256x256 and 384x384 both fine-tuned from the same pre-training model using a smaller input resolution of 192x192.* + +## Citation + +``` +@article{https://doi.org/10.48550/arxiv.2111.09883, + doi = {10.48550/ARXIV.2111.09883}, + url = {https://arxiv.org/abs/2111.09883}, + author = {Liu, Ze and Hu, Han and Lin, Yutong and Yao, Zhuliang and Xie, Zhenda and Wei, Yixuan and Ning, Jia and Cao, Yue and Zhang, Zheng and Dong, Li and Wei, Furu and Guo, Baining}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {Swin Transformer V2: Scaling Up Capacity and Resolution}, + publisher = {arXiv}, + year = {2021}, + copyright = {Creative Commons Attribution 4.0 International} +} +``` diff --git a/configs/swin_transformer_v2/metafile.yml b/configs/swin_transformer_v2/metafile.yml new file mode 100644 index 0000000..cef8392 --- /dev/null +++ b/configs/swin_transformer_v2/metafile.yml @@ -0,0 +1,204 @@ +Collections: + - Name: Swin-Transformer-V2 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + - Weight Decay + Training Resources: 16x V100 GPUs + Epochs: 300 + Batch Size: 1024 + Architecture: + - Shift Window Multihead Self Attention + Paper: + URL: https://arxiv.org/abs/2111.09883.pdf + Title: "Swin Transformer V2: Scaling Up Capacity and Resolution" + README: configs/swin_transformer_v2/README.md + +Models: + - Name: swinv2-tiny-w8_3rdparty_in1k-256px + Metadata: + FLOPs: 4350000000 + Parameters: 28350000 + In Collection: Swin-Transformer-V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.76 + Top 5 Accuracy: 95.87 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-w8_3rdparty_in1k-256px_20220803-e318968f.pth + Config: configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window8_256.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-tiny-w16_3rdparty_in1k-256px + Metadata: + FLOPs: 4400000000 + Parameters: 28350000 + In Collection: Swin-Transformer-V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.81 + Top 5 Accuracy: 96.23 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-w16_3rdparty_in1k-256px_20220803-9651cdd7.pth + Config: configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window16_256.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-small-w8_3rdparty_in1k-256px + Metadata: + FLOPs: 8450000000 + Parameters: 49730000 + In Collection: Swin-Transformer-V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.74 + Top 5 Accuracy: 96.6 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-w8_3rdparty_in1k-256px_20220803-b01a4332.pth + Config: configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window8_256.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-small-w16_3rdparty_in1k-256px + Metadata: + FLOPs: 8570000000 + Parameters: 49730000 + In Collection: Swin-Transformer-V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.13 + Top 5 Accuracy: 96.83 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-w16_3rdparty_in1k-256px_20220803-b707d206.pth + Config: configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window16_256.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-base-w8_3rdparty_in1k-256px + Metadata: + FLOPs: 14990000000 + Parameters: 87920000 + In Collection: Swin-Transformer-V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.2 + Top 5 Accuracy: 96.86 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w8_3rdparty_in1k-256px_20220803-8ff28f2b.pth + Config: configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window8_256.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-base-w16_3rdparty_in1k-256px + Metadata: + FLOPs: 15140000000 + Parameters: 87920000 + In Collection: Swin-Transformer-V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.6 + Top 5 Accuracy: 97.05 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w16_3rdparty_in1k-256px_20220803-5a1886b7.pth + Config: configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window16_256.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-base-w16_in21k-pre_3rdparty_in1k-256px + Metadata: + Training Data: ImageNet-21k + FLOPs: 15140000000 + Parameters: 87920000 + In Collection: Swin-Transformer-V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.17 + Top 5 Accuracy: 97.88 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w16_in21k-pre_3rdparty_in1k-256px_20220803-8d7aa8ad.pth + Config: configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to16_192to256_22kto1k_ft.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-base-w24_in21k-pre_3rdparty_in1k-384px + Metadata: + Training Data: ImageNet-21k + FLOPs: 34070000000 + Parameters: 87920000 + In Collection: Swin-Transformer-V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 87.14 + Top 5 Accuracy: 98.23 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w24_in21k-pre_3rdparty_in1k-384px_20220803-44eb70f8.pth + Config: configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to24_192to384_22kto1k_ft.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-large-w16_in21k-pre_3rdparty_in1k-256px + Metadata: + Training Data: ImageNet-21k + FLOPs: 33860000000 + Parameters: 196750000 + In Collection: Swin-Transformer-V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.93 + Top 5 Accuracy: 98.06 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w16_in21k-pre_3rdparty_in1k-256px_20220803-c40cbed7.pth + Config: configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to16_192to256_22kto1k_ft.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-large-w24_in21k-pre_3rdparty_in1k-384px + Metadata: + Training Data: ImageNet-21k + FLOPs: 76200000000 + Parameters: 196750000 + In Collection: Swin-Transformer-V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 87.59 + Top 5 Accuracy: 98.27 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w24_in21k-pre_3rdparty_in1k-384px_20220803-3b36c165.pth + Config: configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to24_192to384_22kto1k_ft.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-base-w12_3rdparty_in21k-192px + Metadata: + Training Data: ImageNet-21k + FLOPs: 8510000000 + Parameters: 87920000 + In Collections: Swin-Transformer-V2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-base-w12_3rdparty_in21k-192px_20220803-f7dc9763.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12_192_22k.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-large-w12_3rdparty_in21k-192px + Metadata: + Training Data: ImageNet-21k + FLOPs: 19040000000 + Parameters: 196740000 + In Collections: Swin-Transformer-V2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-large-w12_3rdparty_in21k-192px_20220803-d9073fee.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12_192_22k.pth + Code: https://github.com/microsoft/Swin-Transformer diff --git a/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py new file mode 100644 index 0000000..5f375ee --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/base_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(window_size=[16, 16, 16, 8])) diff --git a/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py new file mode 100644 index 0000000..0725f9e --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/base_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + window_size=[16, 16, 16, 8], + drop_path_rate=0.2, + pretrained_window_sizes=[12, 12, 12, 6])) diff --git a/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py b/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py new file mode 100644 index 0000000..3dd4e5f --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/base_384.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + img_size=384, + window_size=[24, 24, 24, 12], + drop_path_rate=0.2, + pretrained_window_sizes=[12, 12, 12, 6])) diff --git a/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py new file mode 100644 index 0000000..23fc407 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/base_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py new file mode 100644 index 0000000..62a2a29 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py @@ -0,0 +1,13 @@ +# Only for evaluation +_base_ = [ + '../_base_/models/swin_transformer_v2/large_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + window_size=[16, 16, 16, 8], pretrained_window_sizes=[12, 12, 12, 6]), +) diff --git a/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py b/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py new file mode 100644 index 0000000..d97d9b2 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py @@ -0,0 +1,15 @@ +# Only for evaluation +_base_ = [ + '../_base_/models/swin_transformer_v2/large_384.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + img_size=384, + window_size=[24, 24, 24, 12], + pretrained_window_sizes=[12, 12, 12, 6]), +) diff --git a/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py new file mode 100644 index 0000000..f87265d --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/small_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(window_size=[16, 16, 16, 8])) diff --git a/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py new file mode 100644 index 0000000..f1001f1 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/small_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py new file mode 100644 index 0000000..7e1f290 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/tiny_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(window_size=[16, 16, 16, 8])) diff --git a/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py new file mode 100644 index 0000000..2cdc9a2 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/tiny_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/configs/t2t_vit/README.md b/configs/t2t_vit/README.md new file mode 100644 index 0000000..1f80c25 --- /dev/null +++ b/configs/t2t_vit/README.md @@ -0,0 +1,36 @@ +# Tokens-to-Token ViT + +> [Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet](https://arxiv.org/abs/2101.11986) + + + +## Abstract + +Transformers, which are popular for language modeling, have been explored for solving vision tasks recently, e.g., the Vision Transformer (ViT) for image classification. The ViT model splits each image into a sequence of tokens with fixed length and then applies multiple Transformer layers to model their global relation for classification. However, ViT achieves inferior performance to CNNs when trained from scratch on a midsize dataset like ImageNet. We find it is because: 1) the simple tokenization of input images fails to model the important local structure such as edges and lines among neighboring pixels, leading to low training sample efficiency; 2) the redundant attention backbone design of ViT leads to limited feature richness for fixed computation budgets and limited training samples. To overcome such limitations, we propose a new Tokens-To-Token Vision Transformer (T2T-ViT), which incorporates 1) a layer-wise Tokens-to-Token (T2T) transformation to progressively structurize the image to tokens by recursively aggregating neighboring Tokens into one Token (Tokens-to-Token), such that local structure represented by surrounding tokens can be modeled and tokens length can be reduced; 2) an efficient backbone with a deep-narrow structure for vision transformer motivated by CNN architecture design after empirical study. Notably, T2T-ViT reduces the parameter count and MACs of vanilla ViT by half, while achieving more than 3.0% improvement when trained from scratch on ImageNet. It also outperforms ResNets and achieves comparable performance with MobileNets by directly training on ImageNet. For example, T2T-ViT with comparable size to ResNet50 (21.5M parameters) can achieve 83.3% top1 accuracy in image resolution 384×384 on ImageNet. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :----------------------------------------------------------------------------: | +| T2T-ViT_t-14 | 21.47 | 4.34 | 81.83 | 95.84 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_8xb64_in1k_20211220-f7378dd5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_8xb64_in1k_20211220-f7378dd5.log.json) | +| T2T-ViT_t-19 | 39.08 | 7.80 | 82.63 | 96.18 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_8xb64_in1k_20211214-7f5e3aaf.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_8xb64_in1k_20211214-7f5e3aaf.log.json) | +| T2T-ViT_t-24 | 64.00 | 12.69 | 82.71 | 96.09 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_8xb64_in1k_20211214-b2a68ae3.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_8xb64_in1k_20211214-b2a68ae3.log.json) | + +*In consistent with the [official repo](https://github.com/yitu-opensource/T2T-ViT), we adopt the best checkpoints during training.* + +## Citation + +``` +@article{yuan2021tokens, + title={Tokens-to-token vit: Training vision transformers from scratch on imagenet}, + author={Yuan, Li and Chen, Yunpeng and Wang, Tao and Yu, Weihao and Shi, Yujun and Tay, Francis EH and Feng, Jiashi and Yan, Shuicheng}, + journal={arXiv preprint arXiv:2101.11986}, + year={2021} +} +``` diff --git a/configs/t2t_vit/metafile.yml b/configs/t2t_vit/metafile.yml new file mode 100644 index 0000000..f212542 --- /dev/null +++ b/configs/t2t_vit/metafile.yml @@ -0,0 +1,58 @@ +Collections: + - Name: Tokens-to-Token ViT + Metadata: + Training Data: ImageNet-1k + Architecture: + - Layer Normalization + - Scaled Dot-Product Attention + - Attention Dropout + - Dropout + - Tokens to Token + Paper: + URL: https://arxiv.org/abs/2101.11986 + Title: "Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet" + README: configs/t2t_vit/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.17.0/mmcls/models/backbones/t2t_vit.py + Version: v0.17.0 + +Models: + - Name: t2t-vit-t-14_8xb64_in1k + Metadata: + FLOPs: 4340000000 + Parameters: 21470000 + In Collection: Tokens-to-Token ViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.83 + Top 5 Accuracy: 95.84 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_8xb64_in1k_20211220-f7378dd5.pth + Config: configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py + - Name: t2t-vit-t-19_8xb64_in1k + Metadata: + FLOPs: 7800000000 + Parameters: 39080000 + In Collection: Tokens-to-Token ViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.63 + Top 5 Accuracy: 96.18 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_8xb64_in1k_20211214-7f5e3aaf.pth + Config: configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py + - Name: t2t-vit-t-24_8xb64_in1k + Metadata: + FLOPs: 12690000000 + Parameters: 64000000 + In Collection: Tokens-to-Token ViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.71 + Top 5 Accuracy: 96.09 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_8xb64_in1k_20211214-b2a68ae3.pth + Config: configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py diff --git a/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py b/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py new file mode 100644 index 0000000..a391df4 --- /dev/null +++ b/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/t2t-vit-t-14.py', + '../_base_/datasets/imagenet_bs64_t2t_224.py', + '../_base_/default_runtime.py', +] + +# optimizer +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={'cls_token': dict(decay_mult=0.0)}, +) +optimizer = dict( + type='AdamW', + lr=5e-4, + weight_decay=0.05, + paramwise_cfg=paramwise_cfg, +) +optimizer_config = dict(grad_clip=None) + +# learning policy +# FIXME: lr in the first 300 epochs conforms to the CosineAnnealing and +# the lr in the last 10 epoch equals to min_lr +lr_config = dict( + policy='CosineAnnealingCooldown', + min_lr=1e-5, + cool_down_time=10, + cool_down_ratio=0.1, + by_epoch=True, + warmup_by_epoch=True, + warmup='linear', + warmup_iters=10, + warmup_ratio=1e-6) +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] +runner = dict(type='EpochBasedRunner', max_epochs=310) diff --git a/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py b/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py new file mode 100644 index 0000000..e1157f8 --- /dev/null +++ b/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/t2t-vit-t-19.py', + '../_base_/datasets/imagenet_bs64_t2t_224.py', + '../_base_/default_runtime.py', +] + +# optimizer +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={'cls_token': dict(decay_mult=0.0)}, +) +optimizer = dict( + type='AdamW', + lr=5e-4, + weight_decay=0.065, + paramwise_cfg=paramwise_cfg, +) +optimizer_config = dict(grad_clip=None) + +# learning policy +# FIXME: lr in the first 300 epochs conforms to the CosineAnnealing and +# the lr in the last 10 epoch equals to min_lr +lr_config = dict( + policy='CosineAnnealingCooldown', + min_lr=1e-5, + cool_down_time=10, + cool_down_ratio=0.1, + by_epoch=True, + warmup_by_epoch=True, + warmup='linear', + warmup_iters=10, + warmup_ratio=1e-6) +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] +runner = dict(type='EpochBasedRunner', max_epochs=310) diff --git a/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py b/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py new file mode 100644 index 0000000..815f2f1 --- /dev/null +++ b/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/t2t-vit-t-24.py', + '../_base_/datasets/imagenet_bs64_t2t_224.py', + '../_base_/default_runtime.py', +] + +# optimizer +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={'cls_token': dict(decay_mult=0.0)}, +) +optimizer = dict( + type='AdamW', + lr=5e-4, + weight_decay=0.065, + paramwise_cfg=paramwise_cfg, +) +optimizer_config = dict(grad_clip=None) + +# learning policy +# FIXME: lr in the first 300 epochs conforms to the CosineAnnealing and +# the lr in the last 10 epoch equals to min_lr +lr_config = dict( + policy='CosineAnnealingCooldown', + min_lr=1e-5, + cool_down_time=10, + cool_down_ratio=0.1, + by_epoch=True, + warmup_by_epoch=True, + warmup='linear', + warmup_iters=10, + warmup_ratio=1e-6) +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] +runner = dict(type='EpochBasedRunner', max_epochs=310) diff --git a/configs/tnt/README.md b/configs/tnt/README.md new file mode 100644 index 0000000..948eef7 --- /dev/null +++ b/configs/tnt/README.md @@ -0,0 +1,36 @@ +# TNT + +> [Transformer in Transformer](https://arxiv.org/abs/2103.00112) + + + +## Abstract + +Transformer is a new kind of neural architecture which encodes the input data as powerful features via the attention mechanism. Basically, the visual transformers first divide the input images into several local patches and then calculate both representations and their relationship. Since natural images are of high complexity with abundant detail and color information, the granularity of the patch dividing is not fine enough for excavating features of objects in different scales and locations. In this paper, we point out that the attention inside these local patches are also essential for building visual transformers with high performance and we explore a new architecture, namely, Transformer iN Transformer (TNT). Specifically, we regard the local patches (e.g., 16×16) as "visual sentences" and present to further divide them into smaller patches (e.g., 4×4) as "visual words". The attention of each word will be calculated with other words in the given visual sentence with negligible computational costs. Features of both words and sentences will be aggregated to enhance the representation ability. Experiments on several benchmarks demonstrate the effectiveness of the proposed TNT architecture, e.g., we achieve an 81.5% top-1 accuracy on the ImageNet, which is about 1.7% higher than that of the state-of-the-art visual transformer with similar computational cost. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------: | :-------: | :------: | :-------: | :-------: | :--------------------------------------------------------------------------: | :----------------------------------------------------------------------------: | +| TNT-small\* | 23.76 | 3.36 | 81.52 | 95.73 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/tnt/tnt-s-p16_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth) | + +*Models with * are converted from [timm](https://github.com/rwightman/pytorch-image-models/). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +``` +@misc{han2021transformer, + title={Transformer in Transformer}, + author={Kai Han and An Xiao and Enhua Wu and Jianyuan Guo and Chunjing Xu and Yunhe Wang}, + year={2021}, + eprint={2103.00112}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/tnt/metafile.yml b/configs/tnt/metafile.yml new file mode 100644 index 0000000..67f3c78 --- /dev/null +++ b/configs/tnt/metafile.yml @@ -0,0 +1,29 @@ +Collections: + - Name: Transformer in Transformer + Metadata: + Training Data: ImageNet-1k + Paper: + URL: https://arxiv.org/abs/2103.00112 + Title: "Transformer in Transformer" + README: configs/tnt/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/tnt.py#L203 + Version: v0.15.0 + +Models: + - Name: tnt-small-p16_3rdparty_in1k + Metadata: + FLOPs: 3360000000 + Parameters: 23760000 + In Collection: Transformer in Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.52 + Top 5 Accuracy: 95.73 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth + Config: configs/tnt/tnt-s-p16_16xb64_in1k.py + Converted From: + Weights: https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar + Code: https://github.com/contrastive/pytorch-image-models/blob/809271b0f3e5d9be4e11c0c5cec1dbba8b5e2c60/timm/models/tnt.py#L144 diff --git a/configs/tnt/tnt-s-p16_16xb64_in1k.py b/configs/tnt/tnt-s-p16_16xb64_in1k.py new file mode 100644 index 0000000..3669368 --- /dev/null +++ b/configs/tnt/tnt-s-p16_16xb64_in1k.py @@ -0,0 +1,39 @@ +# accuracy_top-1 : 81.52 accuracy_top-5 : 95.73 +_base_ = [ + '../_base_/models/tnt_s_patch16_224.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/default_runtime.py' +] + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(248, -1), + interpolation='bicubic', + backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +dataset_type = 'ImageNet' +data = dict( + samples_per_gpu=64, workers_per_gpu=4, test=dict(pipeline=test_pipeline)) + +# optimizer +optimizer = dict(type='AdamW', lr=1e-3, weight_decay=0.05) +optimizer_config = dict(grad_clip=None) + +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup_by_epoch=True, + warmup='linear', + warmup_iters=5, + warmup_ratio=1e-3) +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py b/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py new file mode 100644 index 0000000..3c054d4 --- /dev/null +++ b/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'tnt-s-p16_16xb64_in1k.py' + +_deprecation_ = dict( + expected='tnt-s-p16_16xb64_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/twins/README.md b/configs/twins/README.md new file mode 100644 index 0000000..87e7294 --- /dev/null +++ b/configs/twins/README.md @@ -0,0 +1,39 @@ +# Twins + +> [Twins: Revisiting the Design of Spatial Attention in Vision Transformers](http://arxiv-export-lb.library.cornell.edu/abs/2104.13840) + + + +## Abstract + +Very recently, a variety of vision transformer architectures for dense prediction tasks have been proposed and they show that the design of spatial attention is critical to their success in these tasks. In this work, we revisit the design of the spatial attention and demonstrate that a carefully-devised yet simple spatial attention mechanism performs favourably against the state-of-the-art schemes. As a result, we propose two vision transformer architectures, namely, Twins-PCPVT and Twins-SVT. Our proposed architectures are highly-efficient and easy to implement, only involving matrix multiplications that are highly optimized in modern deep learning frameworks. More importantly, the proposed architectures achieve excellent performance on a wide range of visual tasks, including image level classification as well as dense detection and segmentation. The simplicity and strong performance suggest that our proposed architectures may serve as stronger backbones for many vision tasks. Our code is released at [this https URL](https://github.com/Meituan-AutoML/Twins). + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-----------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------------------------------------: | :---------------------------------------------------------------------------: | +| PCPVT-small\* | 24.11 | 3.67 | 81.14 | 95.69 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-pcpvt-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-small_3rdparty_8xb128_in1k_20220126-ef23c132.pth) | +| PCPVT-base\* | 43.83 | 6.45 | 82.66 | 96.26 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-pcpvt-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-base_3rdparty_8xb128_in1k_20220126-f8c4b0d5.pth) | +| PCPVT-large\* | 60.99 | 9.51 | 83.09 | 96.59 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-pcpvt-large_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-large_3rdparty_16xb64_in1k_20220126-c1ef8d80.pth) | +| SVT-small\* | 24.06 | 2.82 | 81.77 | 95.57 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-svt-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-small_3rdparty_8xb128_in1k_20220126-8fe5205b.pth) | +| SVT-base\* | 56.07 | 8.35 | 83.13 | 96.29 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-svt-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-base_3rdparty_8xb128_in1k_20220126-e31cc8e9.pth) | +| SVT-large\* | 99.27 | 14.82 | 83.60 | 96.50 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-svt-large_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-large_3rdparty_16xb64_in1k_20220126-4817645f.pth) | + +*Models with * are converted from [the official repo](https://github.com/Meituan-AutoML/Twins). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results. The validation accuracy is a little different from the official paper because of the PyTorch version. This result is get in PyTorch=1.9 while the official result is get in PyTorch=1.7* + +## Citation + +``` +@article{chu2021twins, + title={Twins: Revisiting spatial attention design in vision transformers}, + author={Chu, Xiangxiang and Tian, Zhi and Wang, Yuqing and Zhang, Bo and Ren, Haibing and Wei, Xiaolin and Xia, Huaxia and Shen, Chunhua}, + journal={arXiv preprint arXiv:2104.13840}, + year={2021}altgvt +} +``` diff --git a/configs/twins/metafile.yml b/configs/twins/metafile.yml new file mode 100644 index 0000000..f8a7d81 --- /dev/null +++ b/configs/twins/metafile.yml @@ -0,0 +1,114 @@ +Collections: + - Name: Twins + Metadata: + Training Data: ImageNet-1k + Architecture: + - Global Subsampled Attention + - Locally Grouped SelfAttention + - Conditional Position Encoding + - Pyramid Vision Transformer + Paper: + URL: http://arxiv-export-lb.library.cornell.edu/abs/2104.13840 + Title: "Twins: Revisiting the Design of Spatial Attention in Vision Transformers" + README: configs/twins/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/twins.py + Version: v0.20.1 + +Models: + - Name: twins-pcpvt-small_3rdparty_8xb128_in1k + Metadata: + FLOPs: 3670000000 # 3.67G + Parameters: 24110000 # 24.11M + In Collection: Twins + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.14 + Top 5 Accuracy: 95.69 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-small_3rdparty_8xb128_in1k_20220126-ef23c132.pth + Config: configs/twins/twins-pcpvt-small_8xb128_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py + - Name: twins-pcpvt-base_3rdparty_8xb128_in1k + Metadata: + FLOPs: 6450000000 # 6.45G + Parameters: 43830000 # 43.83M + In Collection: Twins + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.66 + Top 5 Accuracy: 96.26 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-base_3rdparty_8xb128_in1k_20220126-f8c4b0d5.pth + Config: configs/twins/twins-pcpvt-base_8xb128_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py + - Name: twins-pcpvt-large_3rdparty_16xb64_in1k + Metadata: + FLOPs: 9510000000 # 9.51G + Parameters: 60990000 # 60.99M + In Collection: Twins + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.09 + Top 5 Accuracy: 96.59 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-large_3rdparty_16xb64_in1k_20220126-c1ef8d80.pth + Config: configs/twins/twins-pcpvt-large_16xb64_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py + - Name: twins-svt-small_3rdparty_8xb128_in1k + Metadata: + FLOPs: 2820000000 # 2.82G + Parameters: 24060000 # 24.06M + In Collection: Twins + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.77 + Top 5 Accuracy: 95.57 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-small_3rdparty_8xb128_in1k_20220126-8fe5205b.pth + Config: configs/twins/twins-svt-small_8xb128_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py + - Name: twins-svt-base_8xb128_3rdparty_in1k + Metadata: + FLOPs: 8350000000 # 8.35G + Parameters: 56070000 # 56.07M + In Collection: Twins + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.13 + Top 5 Accuracy: 96.29 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-base_3rdparty_8xb128_in1k_20220126-e31cc8e9.pth + Config: configs/twins/twins-svt-base_8xb128_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py + - Name: twins-svt-large_3rdparty_16xb64_in1k + Metadata: + FLOPs: 14820000000 # 14.82G + Parameters: 99270000 # 99.27M + In Collection: Twins + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.60 + Top 5 Accuracy: 96.50 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-large_3rdparty_16xb64_in1k_20220126-4817645f.pth + Config: configs/twins/twins-svt-large_16xb64_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py diff --git a/configs/twins/twins-pcpvt-base_8xb128_in1k.py b/configs/twins/twins-pcpvt-base_8xb128_in1k.py new file mode 100644 index 0000000..8ea9adc --- /dev/null +++ b/configs/twins/twins-pcpvt-base_8xb128_in1k.py @@ -0,0 +1,33 @@ +_base_ = [ + '../_base_/models/twins_pcpvt_base.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +data = dict(samples_per_gpu=128) + +paramwise_cfg = dict(_delete=True, norm_decay_mult=0.0, bias_decay_mult=0.0) + +# for batch in each gpu is 128, 8 gpu +# lr = 5e-4 * 128 * 8 / 512 = 0.001 +optimizer = dict( + type='AdamW', + lr=5e-4 * 128 * 8 / 512, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999), + paramwise_cfg=paramwise_cfg) +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=5.0)) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + by_epoch=True, + min_lr_ratio=1e-2, + warmup='linear', + warmup_ratio=1e-3, + warmup_iters=5, + warmup_by_epoch=True) + +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/twins/twins-pcpvt-large_16xb64_in1k.py b/configs/twins/twins-pcpvt-large_16xb64_in1k.py new file mode 100644 index 0000000..e9c9a35 --- /dev/null +++ b/configs/twins/twins-pcpvt-large_16xb64_in1k.py @@ -0,0 +1,5 @@ +_base_ = ['twins-pcpvt-base_8xb128_in1k.py'] + +model = dict(backbone=dict(arch='large'), head=dict(in_channels=512)) + +data = dict(samples_per_gpu=64) diff --git a/configs/twins/twins-pcpvt-small_8xb128_in1k.py b/configs/twins/twins-pcpvt-small_8xb128_in1k.py new file mode 100644 index 0000000..cb8bdc3 --- /dev/null +++ b/configs/twins/twins-pcpvt-small_8xb128_in1k.py @@ -0,0 +1,3 @@ +_base_ = ['twins-pcpvt-base_8xb128_in1k.py'] + +model = dict(backbone=dict(arch='small'), head=dict(in_channels=512)) diff --git a/configs/twins/twins-svt-base_8xb128_in1k.py b/configs/twins/twins-svt-base_8xb128_in1k.py new file mode 100644 index 0000000..e2db230 --- /dev/null +++ b/configs/twins/twins-svt-base_8xb128_in1k.py @@ -0,0 +1,33 @@ +_base_ = [ + '../_base_/models/twins_svt_base.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +data = dict(samples_per_gpu=128) + +paramwise_cfg = dict(_delete=True, norm_decay_mult=0.0, bias_decay_mult=0.0) + +# for batch in each gpu is 128, 8 gpu +# lr = 5e-4 * 128 * 8 / 512 = 0.001 +optimizer = dict( + type='AdamW', + lr=5e-4 * 128 * 8 / 512, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999), + paramwise_cfg=paramwise_cfg) +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=5.0)) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + by_epoch=True, + min_lr_ratio=1e-2, + warmup='linear', + warmup_ratio=1e-3, + warmup_iters=5, + warmup_by_epoch=True) + +evaluation = dict(interval=1, metric='accuracy') diff --git a/configs/twins/twins-svt-large_16xb64_in1k.py b/configs/twins/twins-svt-large_16xb64_in1k.py new file mode 100644 index 0000000..9288a70 --- /dev/null +++ b/configs/twins/twins-svt-large_16xb64_in1k.py @@ -0,0 +1,5 @@ +_base_ = ['twins-svt-base_8xb128_in1k.py'] + +data = dict(samples_per_gpu=64) + +model = dict(backbone=dict(arch='large'), head=dict(in_channels=1024)) diff --git a/configs/twins/twins-svt-small_8xb128_in1k.py b/configs/twins/twins-svt-small_8xb128_in1k.py new file mode 100644 index 0000000..b92f1d3 --- /dev/null +++ b/configs/twins/twins-svt-small_8xb128_in1k.py @@ -0,0 +1,3 @@ +_base_ = ['twins-svt-base_8xb128_in1k.py'] + +model = dict(backbone=dict(arch='small'), head=dict(in_channels=512)) diff --git a/configs/van/README.md b/configs/van/README.md new file mode 100644 index 0000000..a84cf32 --- /dev/null +++ b/configs/van/README.md @@ -0,0 +1,50 @@ +# Visual Attention Network + +> [Visual Attention Network](https://arxiv.org/pdf/2202.09741v2.pdf) + + + +## Abstract + +While originally designed for natural language processing (NLP) tasks, the self-attention mechanism has recently taken various computer vision areas by storm. However, the 2D nature of images brings three challenges for applying self-attention in computer vision. (1) Treating images as 1D sequences neglects their 2D structures. (2) The quadratic complexity is too expensive for high-resolution images. (3) It only captures spatial adaptability but ignores channel adaptability. In this paper, we propose a novel large kernel attention (LKA) module to enable self-adaptive and long-range correlations in self-attention while avoiding the above issues. We further introduce a novel neural network based on LKA, namely Visual Attention Network (VAN). While extremely simple and efficient, VAN outperforms the state-of-the-art vision transformers and convolutional neural networks with a large margin in extensive experiments, including image classification, object detection, semantic segmentation, instance segmentation, etc. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------: | :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :----------------------------------------------------------------: | :-------------------------------------------------------------------: | +| VAN-B0\* | From scratch | 224x224 | 4.11 | 0.88 | 75.41 | 93.02 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-b0_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-tiny_8xb128_in1k_20220501-385941af.pth) | +| VAN-B1\* | From scratch | 224x224 | 13.86 | 2.52 | 81.01 | 95.63 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-b1_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-small_8xb128_in1k_20220501-17bc91aa.pth) | +| VAN-B2\* | From scratch | 224x224 | 26.58 | 5.03 | 82.80 | 96.21 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-b2_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-base_8xb128_in1k_20220501-6a4cc31b.pth) | +| VAN-B3\* | From scratch | 224x224 | 44.77 | 8.99 | 83.86 | 96.73 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-b3_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-large_8xb128_in1k_20220501-f212ba21.pth) | +| VAN-B4\* | From scratch | 224x224 | 60.28 | 12.22 | 84.13 | 96.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-b4_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-b4_3rdparty_in1k_20220909-f4665b92.pth) | + +\*Models with * are converted from [the official repo](https://github.com/Visual-Attention-Network/VAN-Classification). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results. + +### Pre-trained Models + +The pre-trained models on ImageNet-21k are used to fine-tune on the downstream tasks. + +| Model | Pretrain | resolution | Params(M) | Flops(G) | Download | +| :------: | :----------: | :--------: | :-------: | :------: | :---------------------------------------------------------------------------------------------------------: | +| VAN-B4\* | ImageNet-21k | 224x224 | 60.28 | 12.22 | [model](https://download.openmmlab.com/mmclassification/v0/van/van-b4_3rdparty_in21k_20220909-db926b18.pth) | +| VAN-B5\* | ImageNet-21k | 224x224 | 89.97 | 17.21 | [model](https://download.openmmlab.com/mmclassification/v0/van/van-b5_3rdparty_in21k_20220909-18e904e3.pth) | +| VAN-B6\* | ImageNet-21k | 224x224 | 283.9 | 55.28 | [model](https://download.openmmlab.com/mmclassification/v0/van/van-b6_3rdparty_in21k_20220909-96c2cb3a.pth) | + +\*Models with * are converted from [the official repo](https://github.com/Visual-Attention-Network/VAN-Classification). + +## Citation + +``` +@article{guo2022visual, + title={Visual Attention Network}, + author={Guo, Meng-Hao and Lu, Cheng-Ze and Liu, Zheng-Ning and Cheng, Ming-Ming and Hu, Shi-Min}, + journal={arXiv preprint arXiv:2202.09741}, + year={2022} +} +``` diff --git a/configs/van/metafile.yml b/configs/van/metafile.yml new file mode 100644 index 0000000..c32df84 --- /dev/null +++ b/configs/van/metafile.yml @@ -0,0 +1,84 @@ +Collections: + - Name: Visual-Attention-Network + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + - Weight Decay + Architecture: + - Visual Attention Network + - LKA + Paper: + URL: https://arxiv.org/pdf/2202.09741v2.pdf + Title: "Visual Attention Network" + README: configs/van/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.23.0/mmcls/models/backbones/van.py + Version: v0.23.0 + +Models: + - Name: van-b0_3rdparty_in1k + Metadata: + FLOPs: 880000000 # 0.88G + Parameters: 4110000 # 4.11M + In Collection: Visual-Attention-Network + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 75.41 + Top 5 Accuracy: 93.02 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/van/van-tiny_8xb128_in1k_20220501-385941af.pth + Config: configs/van/van-b0_8xb128_in1k.py + - Name: van-b1_3rdparty_in1k + Metadata: + FLOPs: 2520000000 # 2.52G + Parameters: 13860000 # 13.86M + In Collection: Visual-Attention-Network + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.01 + Top 5 Accuracy: 95.63 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/van/van-small_8xb128_in1k_20220501-17bc91aa.pth + Config: configs/van/van-b1_8xb128_in1k.py + - Name: van-b2_3rdparty_in1k + Metadata: + FLOPs: 5030000000 # 5.03G + Parameters: 26580000 # 26.58M + In Collection: Visual-Attention-Network + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.80 + Top 5 Accuracy: 96.21 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/van/van-base_8xb128_in1k_20220501-6a4cc31b.pth + Config: configs/van/van-b2_8xb128_in1k.py + - Name: van-b3_3rdparty_in1k + Metadata: + FLOPs: 8990000000 # 8.99G + Parameters: 44770000 # 44.77M + In Collection: Visual-Attention-Network + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.86 + Top 5 Accuracy: 96.73 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/van/van-large_8xb128_in1k_20220501-f212ba21.pth + Config: configs/van/van-b3_8xb128_in1k.py + - Name: van-b4_3rdparty_in1k + Metadata: + FLOPs: 12220000000 # 12.22G + Parameters: 60280000 # 60.28M + In Collection: Visual-Attention-Network + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.13 + Top 5 Accuracy: 96.86 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/van/van-b4_3rdparty_in1k_20220909-f4665b92.pth + Config: configs/van/van-b4_8xb128_in1k.py diff --git a/configs/van/van-b0_8xb128_in1k.py b/configs/van/van-b0_8xb128_in1k.py new file mode 100644 index 0000000..1acb7af --- /dev/null +++ b/configs/van/van-b0_8xb128_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/van/van_b0.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# Note that the mean and variance used here are different from other configs +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(248, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + samples_per_gpu=128, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/van/van-b1_8xb128_in1k.py b/configs/van/van-b1_8xb128_in1k.py new file mode 100644 index 0000000..64483db --- /dev/null +++ b/configs/van/van-b1_8xb128_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/van/van_b1.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# Note that the mean and variance used here are different from other configs +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(248, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + samples_per_gpu=128, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/van/van-b2_8xb128_in1k.py b/configs/van/van-b2_8xb128_in1k.py new file mode 100644 index 0000000..88493dc --- /dev/null +++ b/configs/van/van-b2_8xb128_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/van/van_b2.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# Note that the mean and variance used here are different from other configs +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(248, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + samples_per_gpu=128, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/van/van-b3_8xb128_in1k.py b/configs/van/van-b3_8xb128_in1k.py new file mode 100644 index 0000000..6b415f6 --- /dev/null +++ b/configs/van/van-b3_8xb128_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/van/van_b3.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# Note that the mean and variance used here are different from other configs +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(248, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + samples_per_gpu=128, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/van/van-b4_8xb128_in1k.py b/configs/van/van-b4_8xb128_in1k.py new file mode 100644 index 0000000..ba8914f --- /dev/null +++ b/configs/van/van-b4_8xb128_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/van/van_b4.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# Note that the mean and variance used here are different from other configs +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(248, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + samples_per_gpu=128, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/van/van-base_8xb128_in1k.py b/configs/van/van-base_8xb128_in1k.py new file mode 100644 index 0000000..e331980 --- /dev/null +++ b/configs/van/van-base_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = ['./van-b2_8xb128_in1k.py'] + +_deprecation_ = dict( + expected='van-b2_8xb128_in1k.p', + reference='https://github.com/open-mmlab/mmclassification/pull/1017', +) diff --git a/configs/van/van-large_8xb128_in1k.py b/configs/van/van-large_8xb128_in1k.py new file mode 100644 index 0000000..84f8c7e --- /dev/null +++ b/configs/van/van-large_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = ['./van-b3_8xb128_in1k.py'] + +_deprecation_ = dict( + expected='van-b3_8xb128_in1k.p', + reference='https://github.com/open-mmlab/mmclassification/pull/1017', +) diff --git a/configs/van/van-small_8xb128_in1k.py b/configs/van/van-small_8xb128_in1k.py new file mode 100644 index 0000000..75d3220 --- /dev/null +++ b/configs/van/van-small_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = ['./van-b1_8xb128_in1k.py'] + +_deprecation_ = dict( + expected='van-b1_8xb128_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/1017', +) diff --git a/configs/van/van-tiny_8xb128_in1k.py b/configs/van/van-tiny_8xb128_in1k.py new file mode 100644 index 0000000..9f83e77 --- /dev/null +++ b/configs/van/van-tiny_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = ['./van-b0_8xb128_in1k.py'] + +_deprecation_ = dict( + expected='van-b0_8xb128_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/1017', +) diff --git a/configs/vgg/README.md b/configs/vgg/README.md new file mode 100644 index 0000000..454489f --- /dev/null +++ b/configs/vgg/README.md @@ -0,0 +1,39 @@ +# VGG + +> [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556) + + + +## Abstract + +In this work we investigate the effect of the convolutional network depth on its accuracy in the large-scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing depth using an architecture with very small (3x3) convolution filters, which shows that a significant improvement on the prior-art configurations can be achieved by pushing the depth to 16-19 weight layers. These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first and the second places in the localisation and classification tracks respectively. We also show that our representations generalise well to other datasets, where they achieve state-of-the-art results. We have made our two best-performing ConvNet models publicly available to facilitate further research on the use of deep visual representations in computer vision. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------------: | :-----------------------------------------------------------------------------: | +| VGG-11 | 132.86 | 7.63 | 68.75 | 88.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.log.json) | +| VGG-13 | 133.05 | 11.34 | 70.02 | 89.46 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg13_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.log.json) | +| VGG-16 | 138.36 | 15.5 | 71.62 | 90.49 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg16_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.log.json) | +| VGG-19 | 143.67 | 19.67 | 72.41 | 90.80 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg19_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.log.json) | +| VGG-11-BN | 132.87 | 7.64 | 70.67 | 90.16 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11bn_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.log.json) | +| VGG-13-BN | 133.05 | 11.36 | 72.12 | 90.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg13bn_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.log.json) | +| VGG-16-BN | 138.37 | 15.53 | 73.74 | 91.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg16_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.log.json) | +| VGG-19-BN | 143.68 | 19.7 | 74.68 | 92.27 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg19bn_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.log.json) | + +## Citation + +``` +@article{simonyan2014very, + title={Very deep convolutional networks for large-scale image recognition}, + author={Simonyan, Karen and Zisserman, Andrew}, + journal={arXiv preprint arXiv:1409.1556}, + year={2014} +} +``` diff --git a/configs/vgg/metafile.yml b/configs/vgg/metafile.yml new file mode 100644 index 0000000..4410c95 --- /dev/null +++ b/configs/vgg/metafile.yml @@ -0,0 +1,125 @@ +Collections: + - Name: VGG + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x Xp GPUs + Epochs: 100 + Batch Size: 256 + Architecture: + - VGG + Paper: + URL: https://arxiv.org/abs/1409.1556 + Title: "Very Deep Convolutional Networks for Large-Scale Image" + README: configs/vgg/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/vgg.py#L39 + Version: v0.15.0 + +Models: + - Name: vgg11_8xb32_in1k + Metadata: + FLOPs: 7630000000 + Parameters: 132860000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 68.75 + Top 5 Accuracy: 88.87 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth + Config: configs/vgg/vgg11_8xb32_in1k.py + - Name: vgg13_8xb32_in1k + Metadata: + FLOPs: 11340000000 + Parameters: 133050000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 70.02 + Top 5 Accuracy: 89.46 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth + Config: configs/vgg/vgg13_8xb32_in1k.py + - Name: vgg16_8xb32_in1k + Metadata: + FLOPs: 15500000000 + Parameters: 138360000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 71.62 + Top 5 Accuracy: 90.49 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth + Config: configs/vgg/vgg16_8xb32_in1k.py + - Name: vgg19_8xb32_in1k + Metadata: + FLOPs: 19670000000 + Parameters: 143670000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 72.41 + Top 5 Accuracy: 90.8 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth + Config: configs/vgg/vgg19_8xb32_in1k.py + - Name: vgg11bn_8xb32_in1k + Metadata: + FLOPs: 7640000000 + Parameters: 132870000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 70.67 + Top 5 Accuracy: 90.16 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth + Config: configs/vgg/vgg11bn_8xb32_in1k.py + - Name: vgg13bn_8xb32_in1k + Metadata: + FLOPs: 11360000000 + Parameters: 133050000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 72.12 + Top 5 Accuracy: 90.66 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth + Config: configs/vgg/vgg13bn_8xb32_in1k.py + - Name: vgg16bn_8xb32_in1k + Metadata: + FLOPs: 15530000000 + Parameters: 138370000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 73.74 + Top 5 Accuracy: 91.66 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth + Config: configs/vgg/vgg16bn_8xb32_in1k.py + - Name: vgg19bn_8xb32_in1k + Metadata: + FLOPs: 19700000000 + Parameters: 143680000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.68 + Top 5 Accuracy: 92.27 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth + Config: configs/vgg/vgg19bn_8xb32_in1k.py diff --git a/configs/vgg/vgg11_8xb32_in1k.py b/configs/vgg/vgg11_8xb32_in1k.py new file mode 100644 index 0000000..c5742bc --- /dev/null +++ b/configs/vgg/vgg11_8xb32_in1k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/vgg11.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] +optimizer = dict(lr=0.01) diff --git a/configs/vgg/vgg11_b32x8_imagenet.py b/configs/vgg/vgg11_b32x8_imagenet.py new file mode 100644 index 0000000..b15396b --- /dev/null +++ b/configs/vgg/vgg11_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'vgg11_8xb32_in1k.py' + +_deprecation_ = dict( + expected='vgg11_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/vgg/vgg11bn_8xb32_in1k.py b/configs/vgg/vgg11bn_8xb32_in1k.py new file mode 100644 index 0000000..4ead074 --- /dev/null +++ b/configs/vgg/vgg11bn_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/vgg11bn.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/vgg/vgg11bn_b32x8_imagenet.py b/configs/vgg/vgg11bn_b32x8_imagenet.py new file mode 100644 index 0000000..350c9be --- /dev/null +++ b/configs/vgg/vgg11bn_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'vgg11bn_8xb32_in1k.py' + +_deprecation_ = dict( + expected='vgg11bn_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/vgg/vgg13_8xb32_in1k.py b/configs/vgg/vgg13_8xb32_in1k.py new file mode 100644 index 0000000..50d26f3 --- /dev/null +++ b/configs/vgg/vgg13_8xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vgg13.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] +optimizer = dict(lr=0.01) diff --git a/configs/vgg/vgg13_b32x8_imagenet.py b/configs/vgg/vgg13_b32x8_imagenet.py new file mode 100644 index 0000000..6198ca2 --- /dev/null +++ b/configs/vgg/vgg13_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'vgg13_8xb32_in1k.py' + +_deprecation_ = dict( + expected='vgg13_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/vgg/vgg13bn_8xb32_in1k.py b/configs/vgg/vgg13bn_8xb32_in1k.py new file mode 100644 index 0000000..8d22a81 --- /dev/null +++ b/configs/vgg/vgg13bn_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/vgg13bn.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/vgg/vgg13bn_b32x8_imagenet.py b/configs/vgg/vgg13bn_b32x8_imagenet.py new file mode 100644 index 0000000..0a715d7 --- /dev/null +++ b/configs/vgg/vgg13bn_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'vgg13bn_8xb32_in1k.py' + +_deprecation_ = dict( + expected='vgg13bn_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/vgg/vgg16_8xb16_voc.py b/configs/vgg/vgg16_8xb16_voc.py new file mode 100644 index 0000000..d096959 --- /dev/null +++ b/configs/vgg/vgg16_8xb16_voc.py @@ -0,0 +1,25 @@ +_base_ = ['../_base_/datasets/voc_bs16.py', '../_base_/default_runtime.py'] + +# use different head for multilabel task +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=16, num_classes=20), + neck=None, + head=dict( + type='MultiLabelClsHead', + loss=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))) + +# load model pretrained on imagenet +load_from = 'https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth' # noqa + +# optimizer +optimizer = dict( + type='SGD', + lr=0.001, + momentum=0.9, + weight_decay=0, + paramwise_cfg=dict(custom_keys={'.backbone.classifier': dict(lr_mult=10)})) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=20, gamma=0.1) +runner = dict(type='EpochBasedRunner', max_epochs=40) diff --git a/configs/vgg/vgg16_8xb32_in1k.py b/configs/vgg/vgg16_8xb32_in1k.py new file mode 100644 index 0000000..55cd9fc --- /dev/null +++ b/configs/vgg/vgg16_8xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vgg16.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] +optimizer = dict(lr=0.01) diff --git a/configs/vgg/vgg16_b16x8_voc.py b/configs/vgg/vgg16_b16x8_voc.py new file mode 100644 index 0000000..06225e7 --- /dev/null +++ b/configs/vgg/vgg16_b16x8_voc.py @@ -0,0 +1,6 @@ +_base_ = 'vgg16_8xb16_voc.py' + +_deprecation_ = dict( + expected='vgg16_8xb16_voc.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/vgg/vgg16_b32x8_imagenet.py b/configs/vgg/vgg16_b32x8_imagenet.py new file mode 100644 index 0000000..2fefb94 --- /dev/null +++ b/configs/vgg/vgg16_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'vgg16_8xb32_in1k.py' + +_deprecation_ = dict( + expected='vgg16_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/vgg/vgg16bn_8xb32_in1k.py b/configs/vgg/vgg16bn_8xb32_in1k.py new file mode 100644 index 0000000..60674c7 --- /dev/null +++ b/configs/vgg/vgg16bn_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/vgg16bn.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/vgg/vgg16bn_b32x8_imagenet.py b/configs/vgg/vgg16bn_b32x8_imagenet.py new file mode 100644 index 0000000..cb21917 --- /dev/null +++ b/configs/vgg/vgg16bn_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'vgg16bn_8xb32_in1k.py' + +_deprecation_ = dict( + expected='vgg16bn_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/vgg/vgg19_8xb32_in1k.py b/configs/vgg/vgg19_8xb32_in1k.py new file mode 100644 index 0000000..6b033c9 --- /dev/null +++ b/configs/vgg/vgg19_8xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vgg19.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] +optimizer = dict(lr=0.01) diff --git a/configs/vgg/vgg19_b32x8_imagenet.py b/configs/vgg/vgg19_b32x8_imagenet.py new file mode 100644 index 0000000..e8b8b25 --- /dev/null +++ b/configs/vgg/vgg19_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'vgg19_8xb32_in1k.py' + +_deprecation_ = dict( + expected='vgg19_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/vgg/vgg19bn_8xb32_in1k.py b/configs/vgg/vgg19bn_8xb32_in1k.py new file mode 100644 index 0000000..18a1897 --- /dev/null +++ b/configs/vgg/vgg19bn_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/vgg19bn.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/vgg/vgg19bn_b32x8_imagenet.py b/configs/vgg/vgg19bn_b32x8_imagenet.py new file mode 100644 index 0000000..f615496 --- /dev/null +++ b/configs/vgg/vgg19bn_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = 'vgg19bn_8xb32_in1k.py' + +_deprecation_ = dict( + expected='vgg19bn_8xb32_in1k.py', + reference='https://github.com/open-mmlab/mmclassification/pull/508', +) diff --git a/configs/vision_transformer/README.md b/configs/vision_transformer/README.md new file mode 100644 index 0000000..c35c242 --- /dev/null +++ b/configs/vision_transformer/README.md @@ -0,0 +1,57 @@ +# Vision Transformer + +> [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/pdf/2010.11929.pdf) + + + +## Abstract + +While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train. + +
+ +
+ +## Results and models + +The training step of Vision Transformers is divided into two steps. The first +step is training the model on a large dataset, like ImageNet-21k, and get the +pre-trained model. And the second step is training the model on the target +dataset, like ImageNet-1k, and get the fine-tuned model. Here, we provide both +pre-trained models and fine-tuned models. + +### ImageNet-21k + +The pre-trained models on ImageNet-21k are used to fine-tune, and therefore don't have evaluation results. + +| Model | resolution | Params(M) | Flops(G) | Download | +| :-------: | :--------: | :-------: | :------: | :--------------------------------------------------------------------------------------------------------------------------------------: | +| ViT-B16\* | 224x224 | 86.86 | 33.03 | [model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-p16_3rdparty_pt-64xb64_in1k-224_20210928-02284250.pth) | +| ViT-B32\* | 224x224 | 88.30 | 8.56 | [model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-p32_3rdparty_pt-64xb64_in1k-224_20210928-eee25dd4.pth) | +| ViT-L16\* | 224x224 | 304.72 | 116.68 | [model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-large-p16_3rdparty_pt-64xb64_in1k-224_20210928-0001f9a1.pth) | + +*Models with * are converted from the [official repo](https://github.com/google-research/vision_transformer#available-vit-models).* + +### ImageNet-1k + +| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-----------: | :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :--------------------------------------------------------------: | :----------------------------------------------------------------: | +| ViT-B16\* | ImageNet-21k | 384x384 | 86.86 | 33.03 | 85.43 | 97.77 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth) | +| ViT-B32\* | ImageNet-21k | 384x384 | 88.30 | 8.56 | 84.01 | 97.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth) | +| ViT-L16\* | ImageNet-21k | 384x384 | 304.72 | 116.68 | 85.63 | 97.63 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth) | +| ViT-B16 (IPU) | ImageNet-21k | 224x224 | 86.86 | 33.03 | 81.22 | 95.56 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p16_ft-4xb544-ipu_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/vit-base-p16_ft-4xb544-ipu_in1k_20220603-c215811a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vit/vit-base-p16_ft-4xb544-ipu_in1k.log) | + +*Models with * are converted from the [official repo](https://github.com/google-research/vision_transformer#available-vit-models). The config files of these models are only for validation. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +``` +@inproceedings{ + dosovitskiy2021an, + title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, + author={Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby}, + booktitle={International Conference on Learning Representations}, + year={2021}, + url={https://openreview.net/forum?id=YicbFdNTTy} +} +``` diff --git a/configs/vision_transformer/metafile.yml b/configs/vision_transformer/metafile.yml new file mode 100644 index 0000000..9ac8046 --- /dev/null +++ b/configs/vision_transformer/metafile.yml @@ -0,0 +1,79 @@ +Collections: + - Name: Vision Transformer + Metadata: + Architecture: + - Attention Dropout + - Convolution + - Dense Connections + - Dropout + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + - Tanh Activation + Paper: + URL: https://arxiv.org/pdf/2010.11929.pdf + Title: 'An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale' + README: configs/vision_transformer/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.17.0/mmcls/models/backbones/vision_transformer.py + Version: v0.17.0 + +Models: + - Name: vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384 + In Collection: Vision Transformer + Metadata: + FLOPs: 33030000000 + Parameters: 86860000 + Training Data: + - ImageNet-21k + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.43 + Top 5 Accuracy: 97.77 + Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth + Converted From: + Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz + Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208 + Config: configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py + - Name: vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384 + In Collection: Vision Transformer + Metadata: + FLOPs: 8560000000 + Parameters: 88300000 + Training Data: + - ImageNet-21k + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 84.01 + Top 5 Accuracy: 97.08 + Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth + Converted From: + Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz + Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208 + Config: configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py + - Name: vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384 + In Collection: Vision Transformer + Metadata: + FLOPs: 116680000000 + Parameters: 304720000 + Training Data: + - ImageNet-21k + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.63 + Top 5 Accuracy: 97.63 + Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth + Converted From: + Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_strong1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz + Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208 + Config: configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py diff --git a/configs/vision_transformer/vit-base-p16_ft-4xb544-ipu_in1k.py b/configs/vision_transformer/vit-base-p16_ft-4xb544-ipu_in1k.py new file mode 100644 index 0000000..097d8d6 --- /dev/null +++ b/configs/vision_transformer/vit-base-p16_ft-4xb544-ipu_in1k.py @@ -0,0 +1,115 @@ +_base_ = [ + '../_base_/models/vit-base-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/default_runtime.py' +] + +# specific to vit pretrain +paramwise_cfg = dict(custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) +}) + +pretrained = 'https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-p16_3rdparty_pt-64xb64_in1k-224_20210928-02284250.pth' # noqa + +model = dict( + head=dict( + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, _delete_=True), ), + backbone=dict( + img_size=224, + init_cfg=dict( + type='Pretrained', + checkpoint=pretrained, + _delete_=True, + prefix='backbone'))) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='ToHalf', keys=['img']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(224, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToHalf', keys=['img']), + dict(type='Collect', keys=['img']) +] + +# change batch size +data = dict( + samples_per_gpu=17, + workers_per_gpu=16, + drop_last=True, + train=dict(pipeline=train_pipeline), + train_dataloader=dict(mode='async'), + val=dict(pipeline=test_pipeline, ), + val_dataloader=dict(samples_per_gpu=4, workers_per_gpu=1), + test=dict(pipeline=test_pipeline), + test_dataloader=dict(samples_per_gpu=4, workers_per_gpu=1)) + +# remove clip-norm +optimizer_config = dict() + +# optimizer +optimizer = dict( + type='SGD', + lr=0.08, + weight_decay=1e-5, + momentum=0.9, + paramwise_cfg=paramwise_cfg, +) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=800, + warmup_ratio=0.02, +) + +# ipu cfg +# model partition config +ipu_model_cfg = dict( + train_split_edges=[ + dict(layer_to_call='backbone.patch_embed', ipu_id=0), + dict(layer_to_call='backbone.layers.3', ipu_id=1), + dict(layer_to_call='backbone.layers.6', ipu_id=2), + dict(layer_to_call='backbone.layers.9', ipu_id=3) + ], + train_ckpt_nodes=['backbone.layers.{}'.format(i) for i in range(12)]) + +# device config +options_cfg = dict( + randomSeed=42, + partialsType='half', + train_cfg=dict( + executionStrategy='SameAsIpu', + Training=dict(gradientAccumulation=32), + availableMemoryProportion=[0.3, 0.3, 0.3, 0.3], + ), + eval_cfg=dict(deviceIterations=1, ), +) + +# add model partition config and device config to runner +runner = dict( + type='IterBasedRunner', + ipu_model_cfg=ipu_model_cfg, + options_cfg=options_cfg, + max_iters=5000) + +checkpoint_config = dict(interval=1000) + +fp16 = dict(loss_scale=256.0, velocity_accum_type='half', accum_type='half') diff --git a/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py b/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py new file mode 100644 index 0000000..cb42d0d --- /dev/null +++ b/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py @@ -0,0 +1,36 @@ +_base_ = [ + '../_base_/models/vit-base-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(img_size=384)) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=384, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(384, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=384), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) diff --git a/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py b/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py new file mode 100644 index 0000000..79c323b --- /dev/null +++ b/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/vit-base-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict( + head=dict(hidden_dim=3072), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000, + prob=1.))) diff --git a/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py b/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py new file mode 100644 index 0000000..0386fef --- /dev/null +++ b/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py @@ -0,0 +1,36 @@ +_base_ = [ + '../_base_/models/vit-base-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(img_size=384)) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=384, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(384, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=384), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) diff --git a/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py b/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py new file mode 100644 index 0000000..a477e21 --- /dev/null +++ b/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/vit-base-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict( + head=dict(hidden_dim=3072), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000, + prob=1.))) diff --git a/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py b/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py new file mode 100644 index 0000000..5be9918 --- /dev/null +++ b/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py @@ -0,0 +1,36 @@ +_base_ = [ + '../_base_/models/vit-large-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(img_size=384)) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=384, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(384, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=384), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) diff --git a/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py b/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py new file mode 100644 index 0000000..5cf7a7d --- /dev/null +++ b/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/vit-large-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict( + head=dict(hidden_dim=3072), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000, + prob=1.))) diff --git a/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py b/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py new file mode 100644 index 0000000..60506b0 --- /dev/null +++ b/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py @@ -0,0 +1,37 @@ +# Refer to pytorch-image-models +_base_ = [ + '../_base_/models/vit-large-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(img_size=384)) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=384, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(384, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=384), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) diff --git a/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py b/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py new file mode 100644 index 0000000..773ade8 --- /dev/null +++ b/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/vit-large-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict( + head=dict(hidden_dim=3072), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000, + prob=1.))) diff --git a/configs/wrn/README.md b/configs/wrn/README.md new file mode 100644 index 0000000..b036caa --- /dev/null +++ b/configs/wrn/README.md @@ -0,0 +1,35 @@ +# Wide-ResNet + +> [Wide Residual Networks](https://arxiv.org/abs/1605.07146) + + + +## Abstract + +Deep residual networks were shown to be able to scale up to thousands of layers and still have improving performance. However, each fraction of a percent of improved accuracy costs nearly doubling the number of layers, and so training very deep residual networks has a problem of diminishing feature reuse, which makes these networks very slow to train. To tackle these problems, in this paper we conduct a detailed experimental study on the architecture of ResNet blocks, based on which we propose a novel architecture where we decrease depth and increase width of residual networks. We call the resulting network structures wide residual networks (WRNs) and show that these are far superior over their commonly used thin and very deep counterparts. For example, we demonstrate that even a simple 16-layer-deep wide residual network outperforms in accuracy and efficiency all previous deep residual networks, including thousand-layer-deep networks, achieving new state-of-the-art results on CIFAR, SVHN, COCO, and significant improvements on ImageNet. + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------: | :-------: | :------: | :-------: | :-------: | :------------------------------------------------------------------------: | :--------------------------------------------------------------------------: | +| WRN-50\* | 68.88 | 11.44 | 78.48 | 94.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/wrn/wide-resnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty_8xb32_in1k_20220304-66678344.pth) | +| WRN-101\* | 126.89 | 22.81 | 78.84 | 94.28 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/wrn/wide-resnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet101_3rdparty_8xb32_in1k_20220304-8d5f9d61.pth) | +| WRN-50 (timm)\* | 68.88 | 11.44 | 81.45 | 95.53 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/wrn/wide-resnet50_timm_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty-timm_8xb32_in1k_20220304-83ae4399.pth) | + +*Models with * are converted from the [TorchVision](https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py) and [TIMM](https://github.com/rwightman/pytorch-image-models/blob/master). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + +```bibtex +@INPROCEEDINGS{Zagoruyko2016WRN, + author = {Sergey Zagoruyko and Nikos Komodakis}, + title = {Wide Residual Networks}, + booktitle = {BMVC}, + year = {2016}} +``` diff --git a/configs/wrn/metafile.yml b/configs/wrn/metafile.yml new file mode 100644 index 0000000..cc37eef --- /dev/null +++ b/configs/wrn/metafile.yml @@ -0,0 +1,77 @@ +Collections: + - Name: Wide-ResNet + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 100 + Batch Size: 256 + Architecture: + - 1x1 Convolution + - Batch Normalization + - Convolution + - Global Average Pooling + - Max Pooling + - ReLU + - Residual Connection + - Softmax + - Wide Residual Block + Paper: + URL: https://arxiv.org/abs/1605.07146 + Title: "Wide Residual Networks" + README: configs/wrn/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.20.1/mmcls/models/backbones/resnet.py#L383 + Version: v0.20.1 + +Models: + - Name: wide-resnet50_3rdparty_8xb32_in1k + Metadata: + FLOPs: 11440000000 # 11.44G + Parameters: 68880000 # 68.88M + In Collection: Wide-ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.48 + Top 5 Accuracy: 94.08 + Weights: https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty_8xb32_in1k_20220304-66678344.pth + Config: configs/wrn/wide-resnet50_8xb32_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py + - Name: wide-resnet101_3rdparty_8xb32_in1k + Metadata: + FLOPs: 22810000000 # 22.81G + Parameters: 126890000 # 126.89M + In Collection: Wide-ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.84 + Top 5 Accuracy: 94.28 + Weights: https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet101_3rdparty_8xb32_in1k_20220304-8d5f9d61.pth + Config: configs/wrn/wide-resnet101_8xb32_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py + - Name: wide-resnet50_3rdparty-timm_8xb32_in1k + Metadata: + FLOPs: 11440000000 # 11.44G + Parameters: 68880000 # 68.88M + In Collection: Wide-ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.45 + Top 5 Accuracy: 95.53 + Weights: https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty-timm_8xb32_in1k_20220304-83ae4399.pth + Config: configs/wrn/wide-resnet50_timm_8xb32_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py diff --git a/configs/wrn/wide-resnet101_8xb32_in1k.py b/configs/wrn/wide-resnet101_8xb32_in1k.py new file mode 100644 index 0000000..d1bf5e5 --- /dev/null +++ b/configs/wrn/wide-resnet101_8xb32_in1k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/wide-resnet50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(depth=101)) diff --git a/configs/wrn/wide-resnet50_8xb32_in1k.py b/configs/wrn/wide-resnet50_8xb32_in1k.py new file mode 100644 index 0000000..edf6a05 --- /dev/null +++ b/configs/wrn/wide-resnet50_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/wide-resnet50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/wrn/wide-resnet50_timm_8xb32_in1k.py b/configs/wrn/wide-resnet50_timm_8xb32_in1k.py new file mode 100644 index 0000000..8dca8f3 --- /dev/null +++ b/configs/wrn/wide-resnet50_timm_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/wide-resnet50.py', + '../_base_/datasets/imagenet_bs32_pil_bicubic.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/docs/en/Makefile b/docs/en/Makefile new file mode 100644 index 0000000..d4bb2cb --- /dev/null +++ b/docs/en/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/en/_static/css/readthedocs.css b/docs/en/_static/css/readthedocs.css new file mode 100644 index 0000000..577a67a --- /dev/null +++ b/docs/en/_static/css/readthedocs.css @@ -0,0 +1,27 @@ +.header-logo { + background-image: url("../image/mmcls-logo.png"); + background-size: 204px 40px; + height: 40px; + width: 204px; +} + +pre { + white-space: pre; +} + +article.pytorch-article section code { + padding: .2em .4em; + background-color: #f3f4f7; + border-radius: 5px; +} + +/* Disable the change in tables */ +article.pytorch-article section table code { + padding: unset; + background-color: unset; + border-radius: unset; +} + +table.autosummary td { + width: 50% +} diff --git a/docs/en/_static/image/mmcls-logo.png b/docs/en/_static/image/mmcls-logo.png new file mode 100644 index 0000000..6e65420 Binary files /dev/null and b/docs/en/_static/image/mmcls-logo.png differ diff --git a/docs/en/_static/image/tools/analysis/analyze_log.jpg b/docs/en/_static/image/tools/analysis/analyze_log.jpg new file mode 100644 index 0000000..8eb1a27 Binary files /dev/null and b/docs/en/_static/image/tools/analysis/analyze_log.jpg differ diff --git a/docs/en/_static/image/tools/visualization/lr_schedule1.png b/docs/en/_static/image/tools/visualization/lr_schedule1.png new file mode 100644 index 0000000..31fca35 Binary files /dev/null and b/docs/en/_static/image/tools/visualization/lr_schedule1.png differ diff --git a/docs/en/_static/image/tools/visualization/lr_schedule2.png b/docs/en/_static/image/tools/visualization/lr_schedule2.png new file mode 100644 index 0000000..8c6231d Binary files /dev/null and b/docs/en/_static/image/tools/visualization/lr_schedule2.png differ diff --git a/docs/en/_static/js/custom.js b/docs/en/_static/js/custom.js new file mode 100644 index 0000000..44a4057 --- /dev/null +++ b/docs/en/_static/js/custom.js @@ -0,0 +1 @@ +var collapsedSections = ['Model zoo']; diff --git a/docs/en/_templates/classtemplate.rst b/docs/en/_templates/classtemplate.rst new file mode 100644 index 0000000..4f74842 --- /dev/null +++ b/docs/en/_templates/classtemplate.rst @@ -0,0 +1,14 @@ +.. role:: hidden + :class: hidden-section +.. currentmodule:: {{ module }} + + +{{ name | underline}} + +.. autoclass:: {{ name }} + :members: + + +.. + autogenerated from source/_templates/classtemplate.rst + note it does not have :inherited-members: diff --git a/docs/en/api/apis.rst b/docs/en/api/apis.rst new file mode 100644 index 0000000..67e05b9 --- /dev/null +++ b/docs/en/api/apis.rst @@ -0,0 +1,45 @@ +.. role:: hidden + :class: hidden-section + +mmcls.apis +=================================== + +These are some high-level APIs for classification tasks. + +.. contents:: mmcls.apis + :depth: 2 + :local: + :backlinks: top + +.. currentmodule:: mmcls.apis + +Train +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + + init_random_seed + set_random_seed + train_model + +Test +------------------ +.. autosummary:: + :toctree: generated + :nosignatures: + + single_gpu_test + multi_gpu_test + +Inference +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + + init_model + inference_model + show_result_pyplot diff --git a/docs/en/api/core.rst b/docs/en/api/core.rst new file mode 100644 index 0000000..83e1dbf --- /dev/null +++ b/docs/en/api/core.rst @@ -0,0 +1,62 @@ +.. role:: hidden + :class: hidden-section + +mmcls.core +=================================== + +This package includes some runtime components. These components are useful in +classification tasks but not supported by MMCV yet. + +.. note:: + + Some components may be moved to MMCV in the future. + +.. contents:: mmcls.core + :depth: 2 + :local: + :backlinks: top + +.. currentmodule:: mmcls.core + +Evaluation +------------------ + +Evaluation metrics calculation functions + +.. autosummary:: + :toctree: generated + :nosignatures: + + precision + recall + f1_score + precision_recall_f1 + average_precision + mAP + support + average_performance + calculate_confusion_matrix + +Hook +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: classtemplate.rst + + ClassNumCheckHook + PreciseBNHook + CosineAnnealingCooldownLrUpdaterHook + MMClsWandbHook + + +Optimizers +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: classtemplate.rst + + Lamb diff --git a/docs/en/api/datasets.rst b/docs/en/api/datasets.rst new file mode 100644 index 0000000..640ce1a --- /dev/null +++ b/docs/en/api/datasets.rst @@ -0,0 +1,61 @@ +.. role:: hidden + :class: hidden-section + +mmcls.datasets +=================================== + +The ``datasets`` package contains several usual datasets for image classification tasks and some dataset wrappers. + +.. currentmodule:: mmcls.datasets + +Custom Dataset +-------------- + +.. autoclass:: CustomDataset + +ImageNet +-------- + +.. autoclass:: ImageNet + +.. autoclass:: ImageNet21k + +CIFAR +----- + +.. autoclass:: CIFAR10 + +.. autoclass:: CIFAR100 + +MNIST +----- + +.. autoclass:: MNIST + +.. autoclass:: FashionMNIST + +VOC +--- + +.. autoclass:: VOC + +StanfordCars Cars +----------------- + +.. autoclass:: StanfordCars + +Base classes +------------ + +.. autoclass:: BaseDataset + +.. autoclass:: MultiLabelDataset + +Dataset Wrappers +---------------- + +.. autoclass:: ConcatDataset + +.. autoclass:: RepeatDataset + +.. autoclass:: ClassBalancedDataset diff --git a/docs/en/api/models.rst b/docs/en/api/models.rst new file mode 100644 index 0000000..0c31791 --- /dev/null +++ b/docs/en/api/models.rst @@ -0,0 +1,141 @@ +.. role:: hidden + :class: hidden-section + +mmcls.models +=================================== + +The ``models`` package contains several sub-packages for addressing the different components of a model. + +- :ref:`classifiers`: The top-level module which defines the whole process of a classification model. +- :ref:`backbones`: Usually a feature extraction network, e.g., ResNet, MobileNet. +- :ref:`necks`: The component between backbones and heads, e.g., GlobalAveragePooling. +- :ref:`heads`: The component for specific tasks. In MMClassification, we provides heads for classification. +- :ref:`losses`: Loss functions. + +.. currentmodule:: mmcls.models + +.. autosummary:: + :toctree: generated + :nosignatures: + + build_classifier + build_backbone + build_neck + build_head + build_loss + +.. _classifiers: + +Classifier +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: classtemplate.rst + + BaseClassifier + ImageClassifier + +.. _backbones: + +Backbones +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: classtemplate.rst + + AlexNet + CSPDarkNet + CSPNet + CSPResNeXt + CSPResNet + Conformer + ConvMixer + ConvNeXt + DenseNet + DistilledVisionTransformer + EfficientNet + HRNet + LeNet5 + MlpMixer + MobileNetV2 + MobileNetV3 + PCPVT + PoolFormer + RegNet + RepMLPNet + RepVGG + Res2Net + ResNeSt + ResNeXt + ResNet + ResNetV1c + ResNetV1d + ResNet_CIFAR + SEResNeXt + SEResNet + SVT + ShuffleNetV1 + ShuffleNetV2 + SwinTransformer + T2T_ViT + TIMMBackbone + TNT + VAN + VGG + VisionTransformer + EfficientFormer + HorNet + +.. _necks: + +Necks +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: classtemplate.rst + + GlobalAveragePooling + GeneralizedMeanPooling + HRFuseScales + +.. _heads: + +Heads +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: classtemplate.rst + + ClsHead + LinearClsHead + StackedLinearClsHead + MultiLabelClsHead + MultiLabelLinearClsHead + VisionTransformerClsHead + DeiTClsHead + ConformerHead + +.. _losses: + +Losses +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: classtemplate.rst + + Accuracy + AsymmetricLoss + CrossEntropyLoss + LabelSmoothLoss + FocalLoss + SeesawLoss diff --git a/docs/en/api/models.utils.augment.rst b/docs/en/api/models.utils.augment.rst new file mode 100644 index 0000000..54442f7 --- /dev/null +++ b/docs/en/api/models.utils.augment.rst @@ -0,0 +1,35 @@ +.. role:: hidden + :class: hidden-section + +Batch Augmentation +=================================== + +Batch augmentation is the augmentation which involve multiple samples, such as Mixup and CutMix. + +In MMClassification, these batch augmentation is used as a part of :ref:`classifiers`. A typical usage is as below: + +.. code-block:: python + + model = dict( + backbone = ..., + neck = ..., + head = ..., + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes), + dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes), + ])) + ) + +.. currentmodule:: mmcls.models.utils.augment + +Mixup +----- +.. autoclass:: BatchMixupLayer + +CutMix +------ +.. autoclass:: BatchCutMixLayer + +ResizeMix +--------- +.. autoclass:: BatchResizeMixLayer diff --git a/docs/en/api/models.utils.rst b/docs/en/api/models.utils.rst new file mode 100644 index 0000000..c9687a7 --- /dev/null +++ b/docs/en/api/models.utils.rst @@ -0,0 +1,50 @@ +.. role:: hidden + :class: hidden-section + +mmcls.models.utils +=================================== + +This package includes some helper functions and common components used in various networks. + +.. contents:: mmcls.models.utils + :depth: 2 + :local: + :backlinks: top + +.. currentmodule:: mmcls.models.utils + +Common Components +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: classtemplate.rst + + InvertedResidual + SELayer + ShiftWindowMSA + MultiheadAttention + ConditionalPositionEncoding + +Helper Functions +------------------ + +channel_shuffle +^^^^^^^^^^^^^^^ +.. autofunction:: channel_shuffle + +make_divisible +^^^^^^^^^^^^^^ +.. autofunction:: make_divisible + +to_ntuple +^^^^^^^^^^^^^^ +.. autofunction:: to_ntuple +.. autofunction:: to_2tuple +.. autofunction:: to_3tuple +.. autofunction:: to_4tuple + +is_tracing +^^^^^^^^^^^^^^ +.. autofunction:: is_tracing diff --git a/docs/en/api/transforms.rst b/docs/en/api/transforms.rst new file mode 100644 index 0000000..4a39f08 --- /dev/null +++ b/docs/en/api/transforms.rst @@ -0,0 +1,171 @@ +.. role:: hidden + :class: hidden-section + +Data Transformations +*********************************** + +In MMClassification, the data preparation and the dataset is decomposed. The +datasets only define how to get samples' basic information from the file +system. These basic information includes the ground-truth label and raw images +data / the paths of images. + +To prepare the inputs data, we need to do some transformations on these basic +information. These transformations includes loading, preprocessing and +formatting. And a series of data transformations makes up a data pipeline. +Therefore, you can find the a ``pipeline`` argument in the configs of dataset, +for example: + +.. code:: python + + img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) + ] + test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=256), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ] + + data = dict( + train=dict(..., pipeline=train_pipeline), + val=dict(..., pipeline=test_pipeline), + test=dict(..., pipeline=test_pipeline), + ) + +Every item of a pipeline list is one of the following data transformations class. And if you want to add a custom data transformation class, the tutorial :doc:`Custom Data Pipelines ` will help you. + +.. contents:: mmcls.datasets.pipelines + :depth: 2 + :local: + :backlinks: top + +.. currentmodule:: mmcls.datasets.pipelines + +Loading +======= + +LoadImageFromFile +--------------------- +.. autoclass:: LoadImageFromFile + +Preprocessing and Augmentation +============================== + +CenterCrop +--------------------- +.. autoclass:: CenterCrop + +Lighting +--------------------- +.. autoclass:: Lighting + +Normalize +--------------------- +.. autoclass:: Normalize + +Pad +--------------------- +.. autoclass:: Pad + +Resize +--------------------- +.. autoclass:: Resize + +RandomCrop +--------------------- +.. autoclass:: RandomCrop + +RandomErasing +--------------------- +.. autoclass:: RandomErasing + +RandomFlip +--------------------- +.. autoclass:: RandomFlip + +RandomGrayscale +--------------------- +.. autoclass:: RandomGrayscale + +RandomResizedCrop +--------------------- +.. autoclass:: RandomResizedCrop + +ColorJitter +--------------------- +.. autoclass:: ColorJitter + + +Composed Augmentation +--------------------- +Composed augmentation is a kind of methods which compose a series of data +augmentation transformations, such as ``AutoAugment`` and ``RandAugment``. + +.. autoclass:: AutoAugment + +.. autoclass:: RandAugment + +In composed augmentation, we need to specify several data transformations or +several groups of data transformations (The ``policies`` argument) as the +random sampling space. These data transformations are chosen from the below +table. In addition, we provide some preset policies in `this folder`_. + +.. _this folder: https://github.com/open-mmlab/mmclassification/tree/master/configs/_base_/datasets/pipelines + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: classtemplate.rst + + AutoContrast + Brightness + ColorTransform + Contrast + Cutout + Equalize + Invert + Posterize + Rotate + Sharpness + Shear + Solarize + SolarizeAdd + Translate + +Formatting +========== + +Collect +--------------------- +.. autoclass:: Collect + +ImageToTensor +--------------------- +.. autoclass:: ImageToTensor + +ToNumpy +--------------------- +.. autoclass:: ToNumpy + +ToPIL +--------------------- +.. autoclass:: ToPIL + +ToTensor +--------------------- +.. autoclass:: ToTensor + +Transpose +--------------------- +.. autoclass:: Transpose diff --git a/docs/en/api/utils.rst b/docs/en/api/utils.rst new file mode 100644 index 0000000..206fc82 --- /dev/null +++ b/docs/en/api/utils.rst @@ -0,0 +1,23 @@ +.. role:: hidden + :class: hidden-section + +mmcls.utils +=================================== + +These are some useful help function in the ``utils`` package. + +.. contents:: mmcls.utils + :depth: 1 + :local: + :backlinks: top + +.. currentmodule:: mmcls.utils + +.. autosummary:: + :toctree: generated + :nosignatures: + + collect_env + get_root_logger + load_json_log + setup_multi_processes diff --git a/docs/en/changelog.md b/docs/en/changelog.md new file mode 100644 index 0000000..7928a13 --- /dev/null +++ b/docs/en/changelog.md @@ -0,0 +1,746 @@ +# Changelog + +## v0.25.0(06/12/2022) + +### Highlights + +- Support MLU backend. + +### New Features + +- Support MLU backend. ([#1159](https://github.com/open-mmlab/mmclassification/pull/1159)) +- Support Activation Checkpointing for ConvNeXt. ([#1152](https://github.com/open-mmlab/mmclassification/pull/1152)) + +### Improvements + +- Add `dist_train_arm.sh` for ARM device and update NPU results. ([#1218](https://github.com/open-mmlab/mmclassification/pull/1218)) + +### Bug Fixes + +- Fix a bug caused `MMClsWandbHook` stuck. ([#1242](https://github.com/open-mmlab/mmclassification/pull/1242)) +- Fix the redundant `device_ids` in `tools/test.py`. ([#1215](https://github.com/open-mmlab/mmclassification/pull/1215)) + +### Docs Update + +- Add version banner and version warning in master docs. ([#1216](https://github.com/open-mmlab/mmclassification/pull/1216)) +- Update NPU support doc. ([#1198](https://github.com/open-mmlab/mmclassification/pull/1198)) +- Fixed typo in `pytorch2torchscript.md`. ([#1173](https://github.com/open-mmlab/mmclassification/pull/1173)) +- Fix typo in `miscellaneous.md`. ([#1137](https://github.com/open-mmlab/mmclassification/pull/1137)) +- further detail for the doc for `ClassBalancedDataset`. ([#901](https://github.com/open-mmlab/mmclassification/pull/901)) + +## v0.24.1(31/10/2022) + +### New Features + +- Support mmcls with NPU backend. ([#1072](https://github.com/open-mmlab/mmclassification/pull/1072)) + +### Bug Fixes + +- Fix performance issue in convnext DDP train. ([#1098](https://github.com/open-mmlab/mmclassification/pull/1098)) + +## v0.24.0(30/9/2022) + +### Highlights + +- Support HorNet, EfficientFormerm, SwinTransformer V2 and MViT backbones. +- Support Standford Cars dataset. + +### New Features + +- Support HorNet Backbone. ([#1013](https://github.com/open-mmlab/mmclassification/pull/1013)) +- Support EfficientFormer. ([#954](https://github.com/open-mmlab/mmclassification/pull/954)) +- Support Stanford Cars dataset. ([#893](https://github.com/open-mmlab/mmclassification/pull/893)) +- Support CSRA head. ([#881](https://github.com/open-mmlab/mmclassification/pull/881)) +- Support Swin Transform V2. ([#799](https://github.com/open-mmlab/mmclassification/pull/799)) +- Support MViT and add checkpoints. ([#924](https://github.com/open-mmlab/mmclassification/pull/924)) + +### Improvements + +- [Improve] replace loop of progressbar in api/test. ([#878](https://github.com/open-mmlab/mmclassification/pull/878)) +- [Enhance] RepVGG for YOLOX-PAI. ([#1025](https://github.com/open-mmlab/mmclassification/pull/1025)) +- [Enhancement] Update VAN. ([#1017](https://github.com/open-mmlab/mmclassification/pull/1017)) +- [Refactor] Re-write `get_sinusoid_encoding` from third-party implementation. ([#965](https://github.com/open-mmlab/mmclassification/pull/965)) +- [Improve] Upgrade onnxsim to v0.4.0. ([#915](https://github.com/open-mmlab/mmclassification/pull/915)) +- [Improve] Fixed typo in `RepVGG`. ([#985](https://github.com/open-mmlab/mmclassification/pull/985)) +- [Improve] Using `train_step` instead of `forward` in PreciseBNHook ([#964](https://github.com/open-mmlab/mmclassification/pull/964)) +- [Improve] Use `forward_dummy` to calculate FLOPS. ([#953](https://github.com/open-mmlab/mmclassification/pull/953)) + +### Bug Fixes + +- Fix warning with `torch.meshgrid`. ([#860](https://github.com/open-mmlab/mmclassification/pull/860)) +- Add matplotlib minimum version requriments. ([#909](https://github.com/open-mmlab/mmclassification/pull/909)) +- val loader should not drop last by default. ([#857](https://github.com/open-mmlab/mmclassification/pull/857)) +- Fix config.device bug in toturial. ([#1059](https://github.com/open-mmlab/mmclassification/pull/1059)) +- Fix attenstion clamp max params ([#1034](https://github.com/open-mmlab/mmclassification/pull/1034)) +- Fix device mismatch in Swin-v2. ([#976](https://github.com/open-mmlab/mmclassification/pull/976)) +- Fix the output position of Swin-Transformer. ([#947](https://github.com/open-mmlab/mmclassification/pull/947)) + +### Docs Update + +- Fix typo in config.md. ([#827](https://github.com/open-mmlab/mmclassification/pull/827)) +- Add version for torchvision to avoide error. ([#903](https://github.com/open-mmlab/mmclassification/pull/903)) +- Fixed typo for `--out-dir` option of analyze_results.py. ([#898](https://github.com/open-mmlab/mmclassification/pull/898)) +- Refine the docstring of RegNet ([#935](https://github.com/open-mmlab/mmclassification/pull/935)) + +## v0.23.2(28/7/2022) + +### New Features + +- Support MPS device. ([#894](https://github.com/open-mmlab/mmclassification/pull/894)) + +### Bug Fixes + +- Fix a bug in Albu which caused crashing. ([#918](https://github.com/open-mmlab/mmclassification/pull/918)) + +## v0.23.1(2/6/2022) + +### New Features + +- Dedicated MMClsWandbHook for MMClassification (Weights and Biases Integration) ([#764](https://github.com/open-mmlab/mmclassification/pull/764)) + +### Improvements + +- Use mdformat instead of markdownlint to format markdown. ([#844](https://github.com/open-mmlab/mmclassification/pull/844)) + +### Bug Fixes + +- Fix wrong `--local_rank`. + +### Docs Update + +- Update install tutorials. ([#854](https://github.com/open-mmlab/mmclassification/pull/854)) +- Fix wrong link in README. ([#835](https://github.com/open-mmlab/mmclassification/pull/835)) + +## v0.23.0(1/5/2022) + +### New Features + +- Support DenseNet. ([#750](https://github.com/open-mmlab/mmclassification/pull/750)) +- Support VAN. ([#739](https://github.com/open-mmlab/mmclassification/pull/739)) + +### Improvements + +- Support training on IPU and add fine-tuning configs of ViT. ([#723](https://github.com/open-mmlab/mmclassification/pull/723)) + +### Docs Update + +- New style API reference, and easier to use! Welcome [view it](https://mmclassification.readthedocs.io/en/master/api/models.html). ([#774](https://github.com/open-mmlab/mmclassification/pull/774)) + +## v0.22.1(15/4/2022) + +### New Features + +- [Feature] Support resize relative position embedding in `SwinTransformer`. ([#749](https://github.com/open-mmlab/mmclassification/pull/749)) +- [Feature] Add PoolFormer backbone and checkpoints. ([#746](https://github.com/open-mmlab/mmclassification/pull/746)) + +### Improvements + +- [Enhance] Improve CPE performance by reduce memory copy. ([#762](https://github.com/open-mmlab/mmclassification/pull/762)) +- [Enhance] Add extra dataloader settings in configs. ([#752](https://github.com/open-mmlab/mmclassification/pull/752)) + +## v0.22.0(30/3/2022) + +### Highlights + +- Support a series of CSP Network, such as CSP-ResNet, CSP-ResNeXt and CSP-DarkNet. +- A new `CustomDataset` class to help you build dataset of yourself! +- Support ConvMixer, RepMLP and new dataset - CUB dataset. + +### New Features + +- [Feature] Add CSPNet and backbone and checkpoints ([#735](https://github.com/open-mmlab/mmclassification/pull/735)) +- [Feature] Add `CustomDataset`. ([#738](https://github.com/open-mmlab/mmclassification/pull/738)) +- [Feature] Add diff seeds to diff ranks. ([#744](https://github.com/open-mmlab/mmclassification/pull/744)) +- [Feature] Support ConvMixer. ([#716](https://github.com/open-mmlab/mmclassification/pull/716)) +- [Feature] Our `dist_train` & `dist_test` tools support distributed training on multiple machines. ([#734](https://github.com/open-mmlab/mmclassification/pull/734)) +- [Feature] Add RepMLP backbone and checkpoints. ([#709](https://github.com/open-mmlab/mmclassification/pull/709)) +- [Feature] Support CUB dataset. ([#703](https://github.com/open-mmlab/mmclassification/pull/703)) +- [Feature] Support ResizeMix. ([#676](https://github.com/open-mmlab/mmclassification/pull/676)) + +### Improvements + +- [Enhance] Use `--a-b` instead of `--a_b` in arguments. ([#754](https://github.com/open-mmlab/mmclassification/pull/754)) +- [Enhance] Add `get_cat_ids` and `get_gt_labels` to KFoldDataset. ([#721](https://github.com/open-mmlab/mmclassification/pull/721)) +- [Enhance] Set torch seed in `worker_init_fn`. ([#733](https://github.com/open-mmlab/mmclassification/pull/733)) + +### Bug Fixes + +- [Fix] Fix the discontiguous output feature map of ConvNeXt. ([#743](https://github.com/open-mmlab/mmclassification/pull/743)) + +### Docs Update + +- [Docs] Add brief installation steps in README for copy&paste. ([#755](https://github.com/open-mmlab/mmclassification/pull/755)) +- [Docs] fix logo url link from mmocr to mmcls. ([#732](https://github.com/open-mmlab/mmclassification/pull/732)) + +## v0.21.0(04/03/2022) + +### Highlights + +- Support ResNetV1c and Wide-ResNet, and provide pre-trained models. +- Support dynamic input shape for ViT-based algorithms. Now our ViT, DeiT, Swin-Transformer and T2T-ViT support forwarding with any input shape. +- Reproduce training results of DeiT. And our DeiT-T and DeiT-S have higher accuracy comparing with the official weights. + +### New Features + +- Add ResNetV1c. ([#692](https://github.com/open-mmlab/mmclassification/pull/692)) +- Support Wide-ResNet. ([#715](https://github.com/open-mmlab/mmclassification/pull/715)) +- Support gem pooling ([#677](https://github.com/open-mmlab/mmclassification/pull/677)) + +### Improvements + +- Reproduce training results of DeiT. ([#711](https://github.com/open-mmlab/mmclassification/pull/711)) +- Add ConvNeXt pretrain models on ImageNet-1k. ([#707](https://github.com/open-mmlab/mmclassification/pull/707)) +- Support dynamic input shape for ViT-based algorithms. ([#706](https://github.com/open-mmlab/mmclassification/pull/706)) +- Add `evaluate` function for ConcatDataset. ([#650](https://github.com/open-mmlab/mmclassification/pull/650)) +- Enhance vis-pipeline tool. ([#604](https://github.com/open-mmlab/mmclassification/pull/604)) +- Return code 1 if scripts runs failed. ([#694](https://github.com/open-mmlab/mmclassification/pull/694)) +- Use PyTorch official `one_hot` to implement `convert_to_one_hot`. ([#696](https://github.com/open-mmlab/mmclassification/pull/696)) +- Add a new pre-commit-hook to automatically add a copyright. ([#710](https://github.com/open-mmlab/mmclassification/pull/710)) +- Add deprecation message for deploy tools. ([#697](https://github.com/open-mmlab/mmclassification/pull/697)) +- Upgrade isort pre-commit hooks. ([#687](https://github.com/open-mmlab/mmclassification/pull/687)) +- Use `--gpu-id` instead of `--gpu-ids` in non-distributed multi-gpu training/testing. ([#688](https://github.com/open-mmlab/mmclassification/pull/688)) +- Remove deprecation. ([#633](https://github.com/open-mmlab/mmclassification/pull/633)) + +### Bug Fixes + +- Fix Conformer forward with irregular input size. ([#686](https://github.com/open-mmlab/mmclassification/pull/686)) +- Add `dist.barrier` to fix a bug in directory checking. ([#666](https://github.com/open-mmlab/mmclassification/pull/666)) + +## v0.20.1(07/02/2022) + +### Bug Fixes + +- Fix the MMCV dependency version. + +## v0.20.0(30/01/2022) + +### Highlights + +- Support K-fold cross-validation. The tutorial will be released later. +- Support HRNet, ConvNeXt, Twins and EfficientNet. +- Support model conversion from PyTorch to Core-ML by a tool. + +### New Features + +- Support K-fold cross-validation. ([#563](https://github.com/open-mmlab/mmclassification/pull/563)) +- Support HRNet and add pre-trained models. ([#660](https://github.com/open-mmlab/mmclassification/pull/660)) +- Support ConvNeXt and add pre-trained models. ([#670](https://github.com/open-mmlab/mmclassification/pull/670)) +- Support Twins and add pre-trained models. ([#642](https://github.com/open-mmlab/mmclassification/pull/642)) +- Support EfficientNet and add pre-trained models.([#649](https://github.com/open-mmlab/mmclassification/pull/649)) +- Support `features_only` option in `TIMMBackbone`. ([#668](https://github.com/open-mmlab/mmclassification/pull/668)) +- Add conversion script from pytorch to Core-ML model. ([#597](https://github.com/open-mmlab/mmclassification/pull/597)) + +### Improvements + +- New-style CPU training and inference. ([#674](https://github.com/open-mmlab/mmclassification/pull/674)) +- Add setup multi-processing both in train and test. ([#671](https://github.com/open-mmlab/mmclassification/pull/671)) +- Rewrite channel split operation in ShufflenetV2. ([#632](https://github.com/open-mmlab/mmclassification/pull/632)) +- Deprecate the support for "python setup.py test". ([#646](https://github.com/open-mmlab/mmclassification/pull/646)) +- Support single-label, softmax, custom eps by asymmetric loss. ([#609](https://github.com/open-mmlab/mmclassification/pull/609)) +- Save class names in best checkpoint created by evaluation hook. ([#641](https://github.com/open-mmlab/mmclassification/pull/641)) + +### Bug Fixes + +- Fix potential unexcepted behaviors if `metric_options` is not specified in multi-label evaluation. ([#647](https://github.com/open-mmlab/mmclassification/pull/647)) +- Fix API changes in `pytorch-grad-cam>=1.3.7`. ([#656](https://github.com/open-mmlab/mmclassification/pull/656)) +- Fix bug which breaks `cal_train_time` in `analyze_logs.py`. ([#662](https://github.com/open-mmlab/mmclassification/pull/662)) + +### Docs Update + +- Update README in configs according to OpenMMLab standard. ([#672](https://github.com/open-mmlab/mmclassification/pull/672)) +- Update installation guide and README. ([#624](https://github.com/open-mmlab/mmclassification/pull/624)) + +## v0.19.0(31/12/2021) + +### Highlights + +- The feature extraction function has been enhanced. See [#593](https://github.com/open-mmlab/mmclassification/pull/593) for more details. +- Provide the high-acc ResNet-50 training settings from [*ResNet strikes back*](https://arxiv.org/abs/2110.00476). +- Reproduce the training accuracy of T2T-ViT & RegNetX, and provide self-training checkpoints. +- Support DeiT & Conformer backbone and checkpoints. +- Provide a CAM visualization tool based on [pytorch-grad-cam](https://github.com/jacobgil/pytorch-grad-cam), and detailed [user guide](https://mmclassification.readthedocs.io/en/latest/tools/visualization.html#class-activation-map-visualization)! + +### New Features + +- Support Precise BN. ([#401](https://github.com/open-mmlab/mmclassification/pull/401)) +- Add CAM visualization tool. ([#577](https://github.com/open-mmlab/mmclassification/pull/577)) +- Repeated Aug and Sampler Registry. ([#588](https://github.com/open-mmlab/mmclassification/pull/588)) +- Add DeiT backbone and checkpoints. ([#576](https://github.com/open-mmlab/mmclassification/pull/576)) +- Support LAMB optimizer. ([#591](https://github.com/open-mmlab/mmclassification/pull/591)) +- Implement the conformer backbone. ([#494](https://github.com/open-mmlab/mmclassification/pull/494)) +- Add the frozen function for Swin Transformer model. ([#574](https://github.com/open-mmlab/mmclassification/pull/574)) +- Support using checkpoint in Swin Transformer to save memory. ([#557](https://github.com/open-mmlab/mmclassification/pull/557)) + +### Improvements + +- [Reproduction] Reproduce RegNetX training accuracy. ([#587](https://github.com/open-mmlab/mmclassification/pull/587)) +- [Reproduction] Reproduce training results of T2T-ViT. ([#610](https://github.com/open-mmlab/mmclassification/pull/610)) +- [Enhance] Provide high-acc training settings of ResNet. ([#572](https://github.com/open-mmlab/mmclassification/pull/572)) +- [Enhance] Set a random seed when the user does not set a seed. ([#554](https://github.com/open-mmlab/mmclassification/pull/554)) +- [Enhance] Added `NumClassCheckHook` and unit tests. ([#559](https://github.com/open-mmlab/mmclassification/pull/559)) +- [Enhance] Enhance feature extraction function. ([#593](https://github.com/open-mmlab/mmclassification/pull/593)) +- [Enhance] Improve efficiency of precision, recall, f1_score and support. ([#595](https://github.com/open-mmlab/mmclassification/pull/595)) +- [Enhance] Improve accuracy calculation performance. ([#592](https://github.com/open-mmlab/mmclassification/pull/592)) +- [Refactor] Refactor `analysis_log.py`. ([#529](https://github.com/open-mmlab/mmclassification/pull/529)) +- [Refactor] Use new API of matplotlib to handle blocking input in visualization. ([#568](https://github.com/open-mmlab/mmclassification/pull/568)) +- [CI] Cancel previous runs that are not completed. ([#583](https://github.com/open-mmlab/mmclassification/pull/583)) +- [CI] Skip build CI if only configs or docs modification. ([#575](https://github.com/open-mmlab/mmclassification/pull/575)) + +### Bug Fixes + +- Fix test sampler bug. ([#611](https://github.com/open-mmlab/mmclassification/pull/611)) +- Try to create a symbolic link, otherwise copy. ([#580](https://github.com/open-mmlab/mmclassification/pull/580)) +- Fix a bug for multiple output in swin transformer. ([#571](https://github.com/open-mmlab/mmclassification/pull/571)) + +### Docs Update + +- Update mmcv, torch, cuda version in Dockerfile and docs. ([#594](https://github.com/open-mmlab/mmclassification/pull/594)) +- Add analysis&misc docs. ([#525](https://github.com/open-mmlab/mmclassification/pull/525)) +- Fix docs build dependency. ([#584](https://github.com/open-mmlab/mmclassification/pull/584)) + +## v0.18.0(30/11/2021) + +### Highlights + +- Support MLP-Mixer backbone and provide pre-trained checkpoints. +- Add a tool to visualize the learning rate curve of the training phase. Welcome to use with the [tutorial](https://mmclassification.readthedocs.io/en/latest/tools/visualization.html#learning-rate-schedule-visualization)! + +### New Features + +- Add MLP Mixer Backbone. ([#528](https://github.com/open-mmlab/mmclassification/pull/528), [#539](https://github.com/open-mmlab/mmclassification/pull/539)) +- Support positive weights in BCE. ([#516](https://github.com/open-mmlab/mmclassification/pull/516)) +- Add a tool to visualize learning rate in each iterations. ([#498](https://github.com/open-mmlab/mmclassification/pull/498)) + +### Improvements + +- Use CircleCI to do unit tests. ([#567](https://github.com/open-mmlab/mmclassification/pull/567)) +- Focal loss for single label tasks. ([#548](https://github.com/open-mmlab/mmclassification/pull/548)) +- Remove useless `import_modules_from_string`. ([#544](https://github.com/open-mmlab/mmclassification/pull/544)) +- Rename config files according to the config name standard. ([#508](https://github.com/open-mmlab/mmclassification/pull/508)) +- Use `reset_classifier` to remove head of timm backbones. ([#534](https://github.com/open-mmlab/mmclassification/pull/534)) +- Support passing arguments to loss from head. ([#523](https://github.com/open-mmlab/mmclassification/pull/523)) +- Refactor `Resize` transform and add `Pad` transform. ([#506](https://github.com/open-mmlab/mmclassification/pull/506)) +- Update mmcv dependency version. ([#509](https://github.com/open-mmlab/mmclassification/pull/509)) + +### Bug Fixes + +- Fix bug when using `ClassBalancedDataset`. ([#555](https://github.com/open-mmlab/mmclassification/pull/555)) +- Fix a bug when using iter-based runner with 'val' workflow. ([#542](https://github.com/open-mmlab/mmclassification/pull/542)) +- Fix interpolation method checking in `Resize`. ([#547](https://github.com/open-mmlab/mmclassification/pull/547)) +- Fix a bug when load checkpoints in mulit-GPUs environment. ([#527](https://github.com/open-mmlab/mmclassification/pull/527)) +- Fix an error on indexing scalar metrics in `analyze_result.py`. ([#518](https://github.com/open-mmlab/mmclassification/pull/518)) +- Fix wrong condition judgment in `analyze_logs.py` and prevent empty curve. ([#510](https://github.com/open-mmlab/mmclassification/pull/510)) + +### Docs Update + +- Fix vit config and model broken links. ([#564](https://github.com/open-mmlab/mmclassification/pull/564)) +- Add abstract and image for every paper. ([#546](https://github.com/open-mmlab/mmclassification/pull/546)) +- Add mmflow and mim in banner and readme. ([#543](https://github.com/open-mmlab/mmclassification/pull/543)) +- Add schedule and runtime tutorial docs. ([#499](https://github.com/open-mmlab/mmclassification/pull/499)) +- Add the top-5 acc in ResNet-CIFAR README. ([#531](https://github.com/open-mmlab/mmclassification/pull/531)) +- Fix TOC of `visualization.md` and add example images. ([#513](https://github.com/open-mmlab/mmclassification/pull/513)) +- Use docs link of other projects and add MMCV docs. ([#511](https://github.com/open-mmlab/mmclassification/pull/511)) + +## v0.17.0(29/10/2021) + +### Highlights + +- Support Tokens-to-Token ViT backbone and Res2Net backbone. Welcome to use! +- Support ImageNet21k dataset. +- Add a pipeline visualization tool. Try it with the [tutorials](https://mmclassification.readthedocs.io/en/latest/tools/visualization.html#pipeline-visualization)! + +### New Features + +- Add Tokens-to-Token ViT backbone and converted checkpoints. ([#467](https://github.com/open-mmlab/mmclassification/pull/467)) +- Add Res2Net backbone and converted weights. ([#465](https://github.com/open-mmlab/mmclassification/pull/465)) +- Support ImageNet21k dataset. ([#461](https://github.com/open-mmlab/mmclassification/pull/461)) +- Support seesaw loss. ([#500](https://github.com/open-mmlab/mmclassification/pull/500)) +- Add a pipeline visualization tool. ([#406](https://github.com/open-mmlab/mmclassification/pull/406)) +- Add a tool to find broken files. ([#482](https://github.com/open-mmlab/mmclassification/pull/482)) +- Add a tool to test TorchServe. ([#468](https://github.com/open-mmlab/mmclassification/pull/468)) + +### Improvements + +- Refator Vision Transformer. ([#395](https://github.com/open-mmlab/mmclassification/pull/395)) +- Use context manager to reuse matplotlib figures. ([#432](https://github.com/open-mmlab/mmclassification/pull/432)) + +### Bug Fixes + +- Remove `DistSamplerSeedHook` if use `IterBasedRunner`. ([#501](https://github.com/open-mmlab/mmclassification/pull/501)) +- Set the priority of `EvalHook` to "LOW" to avoid a bug when using `IterBasedRunner`. ([#488](https://github.com/open-mmlab/mmclassification/pull/488)) +- Fix a wrong parameter of `get_root_logger` in `apis/train.py`. ([#486](https://github.com/open-mmlab/mmclassification/pull/486)) +- Fix version check in dataset builder. ([#474](https://github.com/open-mmlab/mmclassification/pull/474)) + +### Docs Update + +- Add English Colab tutorials and update Chinese Colab tutorials. ([#483](https://github.com/open-mmlab/mmclassification/pull/483), [#497](https://github.com/open-mmlab/mmclassification/pull/497)) +- Add tutuorial for config files. ([#487](https://github.com/open-mmlab/mmclassification/pull/487)) +- Add model-pages in Model Zoo. ([#480](https://github.com/open-mmlab/mmclassification/pull/480)) +- Add code-spell pre-commit hook and fix a large mount of typos. ([#470](https://github.com/open-mmlab/mmclassification/pull/470)) + +## v0.16.0(30/9/2021) + +### Highlights + +- We have improved compatibility with downstream repositories like MMDetection and MMSegmentation. We will add some examples about how to use our backbones in MMDetection. +- Add RepVGG backbone and checkpoints. Welcome to use it! +- Add timm backbones wrapper, now you can simply use backbones of pytorch-image-models in MMClassification! + +### New Features + +- Add RepVGG backbone and checkpoints. ([#414](https://github.com/open-mmlab/mmclassification/pull/414)) +- Add timm backbones wrapper. ([#427](https://github.com/open-mmlab/mmclassification/pull/427)) + +### Improvements + +- Fix TnT compatibility and verbose warning. ([#436](https://github.com/open-mmlab/mmclassification/pull/436)) +- Support setting `--out-items` in `tools/test.py`. ([#437](https://github.com/open-mmlab/mmclassification/pull/437)) +- Add datetime info and saving model using torch\<1.6 format. ([#439](https://github.com/open-mmlab/mmclassification/pull/439)) +- Improve downstream repositories compatibility. ([#421](https://github.com/open-mmlab/mmclassification/pull/421)) +- Rename the option `--options` to `--cfg-options` in some tools. ([#425](https://github.com/open-mmlab/mmclassification/pull/425)) +- Add PyTorch 1.9 and Python 3.9 build workflow, and remove some CI. ([#422](https://github.com/open-mmlab/mmclassification/pull/422)) + +### Bug Fixes + +- Fix format error in `test.py` when metric returns `np.ndarray`. ([#441](https://github.com/open-mmlab/mmclassification/pull/441)) +- Fix `publish_model` bug if no parent of `out_file`. ([#463](https://github.com/open-mmlab/mmclassification/pull/463)) +- Fix num_classes bug in pytorch2onnx.py. ([#458](https://github.com/open-mmlab/mmclassification/pull/458)) +- Fix missing runtime requirement `packaging`. ([#459](https://github.com/open-mmlab/mmclassification/pull/459)) +- Fix saving simplified model bug in ONNX export tool. ([#438](https://github.com/open-mmlab/mmclassification/pull/438)) + +### Docs Update + +- Update `getting_started.md` and `install.md`. And rewrite `finetune.md`. ([#466](https://github.com/open-mmlab/mmclassification/pull/466)) +- Use PyTorch style docs theme. ([#457](https://github.com/open-mmlab/mmclassification/pull/457)) +- Update metafile and Readme. ([#435](https://github.com/open-mmlab/mmclassification/pull/435)) +- Add `CITATION.cff`. ([#428](https://github.com/open-mmlab/mmclassification/pull/428)) + +## v0.15.0(31/8/2021) + +### Highlights + +- Support `hparams` argument in `AutoAugment` and `RandAugment` to provide hyperparameters for sub-policies. +- Support custom squeeze channels in `SELayer`. +- Support classwise weight in losses. + +### New Features + +- Add `hparams` argument in `AutoAugment` and `RandAugment` and some other improvement. ([#398](https://github.com/open-mmlab/mmclassification/pull/398)) +- Support classwise weight in losses. ([#388](https://github.com/open-mmlab/mmclassification/pull/388)) +- Enhance `SELayer` to support custom squeeze channels. ([#417](https://github.com/open-mmlab/mmclassification/pull/417)) + +### Code Refactor + +- Better result visualization. ([#419](https://github.com/open-mmlab/mmclassification/pull/419)) +- Use `post_process` function to handle pred result processing. ([#390](https://github.com/open-mmlab/mmclassification/pull/390)) +- Update `digit_version` function. ([#402](https://github.com/open-mmlab/mmclassification/pull/402)) +- Avoid albumentations to install both opencv and opencv-headless. ([#397](https://github.com/open-mmlab/mmclassification/pull/397)) +- Avoid unnecessary listdir when building ImageNet. ([#396](https://github.com/open-mmlab/mmclassification/pull/396)) +- Use dynamic mmcv download link in TorchServe dockerfile. ([#387](https://github.com/open-mmlab/mmclassification/pull/387)) + +### Docs Improvement + +- Add readme of some algorithms and update meta yml. ([#418](https://github.com/open-mmlab/mmclassification/pull/418)) +- Add Copyright information. ([#413](https://github.com/open-mmlab/mmclassification/pull/413)) +- Fix typo 'metirc'. ([#411](https://github.com/open-mmlab/mmclassification/pull/411)) +- Update QQ group QR code. ([#393](https://github.com/open-mmlab/mmclassification/pull/393)) +- Add PR template and modify issue template. ([#380](https://github.com/open-mmlab/mmclassification/pull/380)) + +## v0.14.0(4/8/2021) + +### Highlights + +- Add transformer-in-transformer backbone and pretrain checkpoints, refers to [the paper](https://arxiv.org/abs/2103.00112). +- Add Chinese colab tutorial. +- Provide dockerfile to build mmcls dev docker image. + +### New Features + +- Add transformer in transformer backbone and pretrain checkpoints. ([#339](https://github.com/open-mmlab/mmclassification/pull/339)) +- Support mim, welcome to use mim to manage your mmcls project. ([#376](https://github.com/open-mmlab/mmclassification/pull/376)) +- Add Dockerfile. ([#365](https://github.com/open-mmlab/mmclassification/pull/365)) +- Add ResNeSt configs. ([#332](https://github.com/open-mmlab/mmclassification/pull/332)) + +### Improvements + +- Use the `presistent_works` option if available, to accelerate training. ([#349](https://github.com/open-mmlab/mmclassification/pull/349)) +- Add Chinese ipynb tutorial. ([#306](https://github.com/open-mmlab/mmclassification/pull/306)) +- Refactor unit tests. ([#321](https://github.com/open-mmlab/mmclassification/pull/321)) +- Support to test mmdet inference with mmcls backbone. ([#343](https://github.com/open-mmlab/mmclassification/pull/343)) +- Use zero as default value of `thrs` in metrics. ([#341](https://github.com/open-mmlab/mmclassification/pull/341)) + +### Bug Fixes + +- Fix ImageNet dataset annotation file parse bug. ([#370](https://github.com/open-mmlab/mmclassification/pull/370)) +- Fix docstring typo and init bug in ShuffleNetV1. ([#374](https://github.com/open-mmlab/mmclassification/pull/374)) +- Use local ATTENTION registry to avoid conflict with other repositories. ([#376](https://github.com/open-mmlab/mmclassification/pull/375)) +- Fix swin transformer config bug. ([#355](https://github.com/open-mmlab/mmclassification/pull/355)) +- Fix `patch_cfg` argument bug in SwinTransformer. ([#368](https://github.com/open-mmlab/mmclassification/pull/368)) +- Fix duplicate `init_weights` call in ViT init function. ([#373](https://github.com/open-mmlab/mmclassification/pull/373)) +- Fix broken `_base_` link in a resnet config. ([#361](https://github.com/open-mmlab/mmclassification/pull/361)) +- Fix vgg-19 model link missing. ([#363](https://github.com/open-mmlab/mmclassification/pull/363)) + +## v0.13.0(3/7/2021) + +- Support Swin-Transformer backbone and add training configs for Swin-Transformer on ImageNet. + +### New Features + +- Support Swin-Transformer backbone and add training configs for Swin-Transformer on ImageNet. (#271) +- Add pretained model of RegNetX. (#269) +- Support adding custom hooks in config file. (#305) +- Improve and add Chinese translation of `CONTRIBUTING.md` and all tools tutorials. (#320) +- Dump config before training. (#282) +- Add torchscript and torchserve deployment tools. (#279, #284) + +### Improvements + +- Improve test tools and add some new tools. (#322) +- Correct MobilenetV3 backbone structure and add pretained models. (#291) +- Refactor `PatchEmbed` and `HybridEmbed` as independent components. (#330) +- Refactor mixup and cutmix as `Augments` to support more functions. (#278) +- Refactor weights initialization method. (#270, #318, #319) +- Refactor `LabelSmoothLoss` to support multiple calculation formulas. (#285) + +### Bug Fixes + +- Fix bug for CPU training. (#286) +- Fix missing test data when `num_imgs` can not be evenly divided by `num_gpus`. (#299) +- Fix build compatible with pytorch v1.3-1.5. (#301) +- Fix `magnitude_std` bug in `RandAugment`. (#309) +- Fix bug when `samples_per_gpu` is 1. (#311) + +## v0.12.0(3/6/2021) + +- Finish adding Chinese tutorials and build Chinese documentation on readthedocs. +- Update ResNeXt checkpoints and ResNet checkpoints on CIFAR. + +### New Features + +- Improve and add Chinese translation of `data_pipeline.md` and `new_modules.md`. (#265) +- Build Chinese translation on readthedocs. (#267) +- Add an argument efficientnet_style to `RandomResizedCrop` and `CenterCrop`. (#268) + +### Improvements + +- Only allow directory operation when rank==0 when testing. (#258) +- Fix typo in `base_head`. (#274) +- Update ResNeXt checkpoints. (#283) + +### Bug Fixes + +- Add attribute `data.test` in MNIST configs. (#264) +- Download CIFAR/MNIST dataset only on rank 0. (#273) +- Fix MMCV version compatibility. (#276) +- Fix CIFAR color channels bug and update checkpoints in model zoo. (#280) + +## v0.11.1(21/5/2021) + +- Refine `new_dataset.md` and add Chinese translation of `finture.md`, `new_dataset.md`. + +### New Features + +- Add `dim` argument for `GlobalAveragePooling`. (#236) +- Add random noise to `RandAugment` magnitude. (#240) +- Refine `new_dataset.md` and add Chinese translation of `finture.md`, `new_dataset.md`. (#243) + +### Improvements + +- Refactor arguments passing for Heads. (#239) +- Allow more flexible `magnitude_range` in `RandAugment`. (#249) +- Inherits MMCV registry so that in the future OpenMMLab repos like MMDet and MMSeg could directly use the backbones supported in MMCls. (#252) + +### Bug Fixes + +- Fix typo in `analyze_results.py`. (#237) +- Fix typo in unittests. (#238) +- Check if specified tmpdir exists when testing to avoid deleting existing data. (#242 & #258) +- Add missing config files in `MANIFEST.in`. (#250 & #255) +- Use temporary directory under shared directory to collect results to avoid unavailability of temporary directory for multi-node testing. (#251) + +## v0.11.0(1/5/2021) + +- Support cutmix trick. +- Support random augmentation. +- Add `tools/deployment/test.py` as a ONNX runtime test tool. +- Support ViT backbone and add training configs for ViT on ImageNet. +- Add Chinese `README.md` and some Chinese tutorials. + +### New Features + +- Support cutmix trick. (#198) +- Add `simplify` option in `pytorch2onnx.py`. (#200) +- Support random augmentation. (#201) +- Add config and checkpoint for training ResNet on CIFAR-100. (#208) +- Add `tools/deployment/test.py` as a ONNX runtime test tool. (#212) +- Support ViT backbone and add training configs for ViT on ImageNet. (#214) +- Add finetuning configs for ViT on ImageNet. (#217) +- Add `device` option to support training on CPU. (#219) +- Add Chinese `README.md` and some Chinese tutorials. (#221) +- Add `metafile.yml` in configs to support interaction with paper with code(PWC) and MMCLI. (#225) +- Upload configs and converted checkpoints for ViT fintuning on ImageNet. (#230) + +### Improvements + +- Fix `LabelSmoothLoss` so that label smoothing and mixup could be enabled at the same time. (#203) +- Add `cal_acc` option in `ClsHead`. (#206) +- Check `CLASSES` in checkpoint to avoid unexpected key error. (#207) +- Check mmcv version when importing mmcls to ensure compatibility. (#209) +- Update `CONTRIBUTING.md` to align with that in MMCV. (#210) +- Change tags to html comments in configs README.md. (#226) +- Clean codes in ViT backbone. (#227) +- Reformat `pytorch2onnx.md` tutorial. (#229) +- Update `setup.py` to support MMCLI. (#232) + +### Bug Fixes + +- Fix missing `cutmix_prob` in ViT configs. (#220) +- Fix backend for resize in ResNeXt configs. (#222) + +## v0.10.0(1/4/2021) + +- Support AutoAugmentation +- Add tutorials for installation and usage. + +### New Features + +- Add `Rotate` pipeline for data augmentation. (#167) +- Add `Invert` pipeline for data augmentation. (#168) +- Add `Color` pipeline for data augmentation. (#171) +- Add `Solarize` and `Posterize` pipeline for data augmentation. (#172) +- Support fp16 training. (#178) +- Add tutorials for installation and basic usage of MMClassification.(#176) +- Support `AutoAugmentation`, `AutoContrast`, `Equalize`, `Contrast`, `Brightness` and `Sharpness` pipelines for data augmentation. (#179) + +### Improvements + +- Support dynamic shape export to onnx. (#175) +- Release training configs and update model zoo for fp16 (#184) +- Use MMCV's EvalHook in MMClassification (#182) + +### Bug Fixes + +- Fix wrong naming in vgg config (#181) + +## v0.9.0(1/3/2021) + +- Implement mixup trick. +- Add a new tool to create TensorRT engine from ONNX, run inference and verify outputs in Python. + +### New Features + +- Implement mixup and provide configs of training ResNet50 using mixup. (#160) +- Add `Shear` pipeline for data augmentation. (#163) +- Add `Translate` pipeline for data augmentation. (#165) +- Add `tools/onnx2tensorrt.py` as a tool to create TensorRT engine from ONNX, run inference and verify outputs in Python. (#153) + +### Improvements + +- Add `--eval-options` in `tools/test.py` to support eval options override, matching the behavior of other open-mmlab projects. (#158) +- Support showing and saving painted results in `mmcls.apis.test` and `tools/test.py`, matching the behavior of other open-mmlab projects. (#162) + +### Bug Fixes + +- Fix configs for VGG, replace checkpoints converted from other repos with the ones trained by ourselves and upload the missing logs in the model zoo. (#161) + +## v0.8.0(31/1/2021) + +- Support multi-label task. +- Support more flexible metrics settings. +- Fix bugs. + +### New Features + +- Add evaluation metrics: mAP, CP, CR, CF1, OP, OR, OF1 for multi-label task. (#123) +- Add BCE loss for multi-label task. (#130) +- Add focal loss for multi-label task. (#131) +- Support PASCAL VOC 2007 dataset for multi-label task. (#134) +- Add asymmetric loss for multi-label task. (#132) +- Add analyze_results.py to select images for success/fail demonstration. (#142) +- Support new metric that calculates the total number of occurrences of each label. (#143) +- Support class-wise evaluation results. (#143) +- Add thresholds in eval_metrics. (#146) +- Add heads and a baseline config for multilabel task. (#145) + +### Improvements + +- Remove the models with 0 checkpoint and ignore the repeated papers when counting papers to gain more accurate model statistics. (#135) +- Add tags in README.md. (#137) +- Fix optional issues in docstring. (#138) +- Update stat.py to classify papers. (#139) +- Fix mismatched columns in README.md. (#150) +- Fix test.py to support more evaluation metrics. (#155) + +### Bug Fixes + +- Fix bug in VGG weight_init. (#140) +- Fix bug in 2 ResNet configs in which outdated heads were used. (#147) +- Fix bug of misordered height and width in `RandomCrop` and `RandomResizedCrop`. (#151) +- Fix missing `meta_keys` in `Collect`. (#149 & #152) + +## v0.7.0(31/12/2020) + +- Add more evaluation metrics. +- Fix bugs. + +### New Features + +- Remove installation of MMCV from requirements. (#90) +- Add 3 evaluation metrics: precision, recall and F-1 score. (#93) +- Allow config override during testing and inference with `--options`. (#91 & #96) + +### Improvements + +- Use `build_runner` to make runners more flexible. (#54) +- Support to get category ids in `BaseDataset`. (#72) +- Allow `CLASSES` override during `BaseDateset` initialization. (#85) +- Allow input image as ndarray during inference. (#87) +- Optimize MNIST config. (#98) +- Add config links in model zoo documentation. (#99) +- Use functions from MMCV to collect environment. (#103) +- Refactor config files so that they are now categorized by methods. (#116) +- Add README in config directory. (#117) +- Add model statistics. (#119) +- Refactor documentation in consistency with other MM repositories. (#126) + +### Bug Fixes + +- Add missing `CLASSES` argument to dataset wrappers. (#66) +- Fix slurm evaluation error during training. (#69) +- Resolve error caused by shape in `Accuracy`. (#104) +- Fix bug caused by extremely insufficient data in distributed sampler.(#108) +- Fix bug in `gpu_ids` in distributed training. (#107) +- Fix bug caused by extremely insufficient data in collect results during testing (#114) + +## v0.6.0(11/10/2020) + +- Support new method: ResNeSt and VGG. +- Support new dataset: CIFAR10. +- Provide new tools to do model inference, model conversion from pytorch to onnx. + +### New Features + +- Add model inference. (#16) +- Add pytorch2onnx. (#20) +- Add PIL backend for transform `Resize`. (#21) +- Add ResNeSt. (#25) +- Add VGG and its pretained models. (#27) +- Add CIFAR10 configs and models. (#38) +- Add albumentations transforms. (#45) +- Visualize results on image demo. (#58) + +### Improvements + +- Replace urlretrieve with urlopen in dataset.utils. (#13) +- Resize image according to its short edge. (#22) +- Update ShuffleNet config. (#31) +- Update pre-trained models for shufflenet_v2, shufflenet_v1, se-resnet50, se-resnet101. (#33) + +### Bug Fixes + +- Fix init_weights in `shufflenet_v2.py`. (#29) +- Fix the parameter `size` in test_pipeline. (#30) +- Fix the parameter in cosine lr schedule. (#32) +- Fix the convert tools for mobilenet_v2. (#34) +- Fix crash in CenterCrop transform when image is greyscale (#40) +- Fix outdated configs. (#53) diff --git a/docs/en/community/CONTRIBUTING.md b/docs/en/community/CONTRIBUTING.md new file mode 120000 index 0000000..c97564d --- /dev/null +++ b/docs/en/community/CONTRIBUTING.md @@ -0,0 +1 @@ +../../../CONTRIBUTING.md \ No newline at end of file diff --git a/docs/en/compatibility.md b/docs/en/compatibility.md new file mode 100644 index 0000000..1affb8e --- /dev/null +++ b/docs/en/compatibility.md @@ -0,0 +1,8 @@ +# Compatibility of MMClassification 0.x + +## MMClassification 0.20.1 + +### MMCV compatibility + +In Twins backbone, we use the `PatchEmbed` module of MMCV, and this module is added after MMCV 1.4.2. +Therefore, we need to update the mmcv version to 1.4.2. diff --git a/docs/en/conf.py b/docs/en/conf.py new file mode 100644 index 0000000..aa1d9f4 --- /dev/null +++ b/docs/en/conf.py @@ -0,0 +1,256 @@ +# flake8: noqa +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys + +import pytorch_sphinx_theme +from sphinx.builders.html import StandaloneHTMLBuilder + +sys.path.insert(0, os.path.abspath('../../')) + +# -- Project information ----------------------------------------------------- + +project = 'MMClassification' +copyright = '2020, OpenMMLab' +author = 'MMClassification Authors' + +# The full version, including alpha/beta/rc tags +version_file = '../../mmcls/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.intersphinx', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'myst_parser', + 'sphinx_copybutton', +] + +autodoc_mock_imports = ['mmcv._ext', 'matplotlib'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +language = 'en' + +# The master toctree document. +master_doc = 'index' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# yapf: disable +html_theme_options = { + 'logo_url': 'https://mmclassification.readthedocs.io/en/latest/', + 'menu': [ + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmclassification' + }, + { + 'name': 'Colab Tutorials', + 'children': [ + { + 'name': 'Train and inference with shell commands', + 'url': 'https://colab.research.google.com/github/open-mmlab/mmclassification/blob/master/docs/en/tutorials/MMClassification_tools.ipynb', + }, + { + 'name': 'Train and inference with Python APIs', + 'url': 'https://colab.research.google.com/github/open-mmlab/mmclassification/blob/master/docs/en/tutorials/MMClassification_python.ipynb', + }, + ] + }, + { + 'name': 'Version', + 'children': [ + { + 'name': 'MMClassification 0.x', + 'url': 'https://mmclassification.readthedocs.io/en/latest/', + 'description': 'master branch' + }, + { + 'name': 'MMClassification 1.x', + 'url': 'https://mmclassification.readthedocs.io/en/dev-1.x/', + 'description': '1.x branch' + }, + ], + } + ], + # Specify the language of shared menu + 'menu_lang': 'en', + 'header_note': { + 'content': + 'You are reading the documentation for MMClassification 0.x, which ' + 'will soon be deprecated at the end of 2022. We recommend you upgrade ' + 'to MMClassification 1.0 to enjoy fruitful new features and better ' + 'performance brought by OpenMMLab 2.0. Check the ' + 'installation tutorial, ' + 'migration tutorial ' + 'and changelog ' + 'for more details.', + } +} +# yapf: enable + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] +html_js_files = ['js/custom.js'] + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'mmclsdoc' + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + 'preamble': + r''' +\hypersetup{unicode=true} +\usepackage{CJKutf8} +\DeclareUnicodeCharacter{00A0}{\nobreakspace} +\DeclareUnicodeCharacter{2203}{\ensuremath{\exists}} +\DeclareUnicodeCharacter{2200}{\ensuremath{\forall}} +\DeclareUnicodeCharacter{2286}{\ensuremath{\subseteq}} +\DeclareUnicodeCharacter{2713}{x} +\DeclareUnicodeCharacter{27FA}{\ensuremath{\Longleftrightarrow}} +\DeclareUnicodeCharacter{221A}{\ensuremath{\sqrt{}}} +\DeclareUnicodeCharacter{221B}{\ensuremath{\sqrt[3]{}}} +\DeclareUnicodeCharacter{2295}{\ensuremath{\oplus}} +\DeclareUnicodeCharacter{2297}{\ensuremath{\otimes}} +\begin{CJK}{UTF8}{gbsn} +\AtEndDocument{\end{CJK}} +''', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'mmcls.tex', 'MMClassification Documentation', author, + 'manual'), +] + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, 'mmcls', 'MMClassification Documentation', [author], + 1)] + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'mmcls', 'MMClassification Documentation', author, 'mmcls', + 'OpenMMLab image classification toolbox and benchmark.', 'Miscellaneous'), +] + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# set priority when building html +StandaloneHTMLBuilder.supported_image_types = [ + 'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg' +] + +# -- Extension configuration ------------------------------------------------- +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True +# Auto-generated header anchors +myst_heading_anchors = 3 +# Configuration for intersphinx +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None), + 'numpy': ('https://numpy.org/doc/stable', None), + 'torch': ('https://pytorch.org/docs/stable/', None), + 'mmcv': ('https://mmcv.readthedocs.io/en/master/', None), +} + + +def builder_inited_handler(app): + subprocess.run(['./stat.py']) + + +def setup(app): + app.connect('builder-inited', builder_inited_handler) diff --git a/docs/en/device/npu.md b/docs/en/device/npu.md new file mode 100644 index 0000000..857a3a4 --- /dev/null +++ b/docs/en/device/npu.md @@ -0,0 +1,64 @@ +# NPU (HUAWEI Ascend) + +## Usage + +### General Usage + +Please install MMCV with NPU device support according to {external+mmcv:doc}`the tutorial `. + +Here we use 8 NPUs on your computer to train the model with the following command: + +```shell +bash ./tools/dist_train.sh configs/resnet/resnet50_8xb32_in1k.py 8 --device npu +``` + +Also, you can use only one NPU to train the model with the following command: + +```shell +python ./tools/train.py configs/resnet/resnet50_8xb32_in1k.py --device npu +``` + +### High-performance Usage on ARM server + +Since the scheduling ability of ARM CPUs when processing resource preemption is not as good as that of X86 CPUs during multi-card training, we provide a high-performance startup script to accelerate training with the following command: + +```shell +# The script under the 8 cards of a single machine is shown here +bash tools/dist_train_arm.sh configs/resnet/resnet50_8xb32_in1k.py 8 --device npu --cfg-options data.workers_per_gpu=$(($(nproc)/8)) +``` + +For resnet50 8 NPUs training with batch_size(data.samples_per_gpu)=512, the performance data is shown below: + +| CPU | Start Script | IterTime(s) | +| :------------------ | :------------------------ | :--------------: | +| ARM(Kunpeng920 \*4) | ./tools/dist_train.sh | ~0.9(0.85-1.0) | +| ARM(Kunpeng920 \*4) | ./tools/dist_train_arm.sh | ~0.8(0.78s-0.85) | + +## Models Results + +| Model | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------------------------------------: | :-------: | :-------: | :----------------------------------------------------------: | :-------------------------------------------------------------: | +| [ResNet-50](../papers/resnet.md) | 76.38 | 93.22 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/resnet50_8xb32_in1k.log) | +| [ResNetXt-32x4d-50](../papers/resnext.md) | 77.55 | 93.75 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext50-32x4d_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/resnext50-32x4d_8xb32_in1k.log.json) | +| [HRNet-W18](../papers/hrnet.md) | 77.01 | 93.46 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w18_4xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/hrnet-w18_4xb32_in1k.log.json) | +| [ResNetV1D-152](../papers/resnet.md) | 79.11 | 94.54 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d152_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/resnetv1d152_8xb32_in1k.log.json) | +| [SE-ResNet-50](../papers/seresnet.md) | 77.64 | 93.76 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet50_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/seresnet50_8xb32_in1k.log.json) | +| [VGG-11](../papers/vgg.md) | 68.92 | 88.83 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/vgg11_8xb32_in1k.log.json) | +| [ShuffleNetV2 1.0x](../papers/shufflenet_v2.md) | 69.53 | 88.82 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/shufflenet-v2-1x_16xb64_in1k.json) | +| [MobileNetV2](../papers/mobilenet_v2.md) | 71.758 | 90.394 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/mobilenet-v2_8xb32_in1k.json) | +| [MobileNetV3-Small](../papers/mobilenet_v3.md) | 67.522 | 87.316 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v3/mobilenet-v3-small_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/mobilenet-v3-small_8xb32_in1k.json) | +| [\*CSPResNeXt50](../papers/cspnet.md) | 77.10 | 93.55 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/cspnet/cspresnext50_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/cspresnext50_8xb32_in1k.log.json) | +| [\*EfficientNet-B4(AA + AdvProp)](../papers/efficientnet.md) | 75.55 | 92.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/efficientnet-b4_8xb32-01norm_in1k.log.json) | +| [\*\*DenseNet121](../papers/densenet.md) | 72.62 | 91.04 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet121_4xb256_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/densenet121_4xb256_in1k.log.json) | + +**Notes:** + +- If not specially marked, the results are almost same between results on the NPU and results on the GPU with FP32. +- (\*) The training results of these models are lower than the results on the readme in the corresponding model, mainly + because the results on the readme are directly the weight of the timm of the eval, and the results on this side are + retrained according to the config with mmcls. The results of the config training on the GPU are consistent with the + results of the NPU. +- (\*\*) The accuracy of this model is slightly lower because config is a 4-card config, we use 8 cards to run, and users + can adjust hyperparameters to get the best accuracy results. + +**All above models are provided by Huawei Ascend group.** diff --git a/docs/en/docutils.conf b/docs/en/docutils.conf new file mode 100644 index 0000000..0c00c84 --- /dev/null +++ b/docs/en/docutils.conf @@ -0,0 +1,2 @@ +[html writers] +table_style: colwidths-auto diff --git a/docs/en/faq.md b/docs/en/faq.md new file mode 100644 index 0000000..6bd9822 --- /dev/null +++ b/docs/en/faq.md @@ -0,0 +1,84 @@ +# Frequently Asked Questions + +We list some common troubles faced by many users and their corresponding +solutions here. Feel free to enrich the list if you find any frequent issues +and have ways to help others to solve them. If the contents here do not cover +your issue, please create an issue using the +[provided templates](https://github.com/open-mmlab/mmclassification/issues/new/choose) +and make sure you fill in all required information in the template. + +## Installation + +- Compatibility issue between MMCV and MMClassification; "AssertionError: + MMCV==xxx is used but incompatible. Please install mmcv>=xxx, \<=xxx." + + Compatible MMClassification and MMCV versions are shown as below. Please + choose the correct version of MMCV to avoid installation issues. + + | MMClassification version | MMCV version | + | :----------------------: | :--------------------: | + | dev | mmcv>=1.7.0, \<1.9.0 | + | 0.25.0 (master) | mmcv>=1.4.2, \<1.9.0 | + | 0.24.1 | mmcv>=1.4.2, \<1.9.0 | + | 0.23.2 | mmcv>=1.4.2, \<1.7.0 | + | 0.22.1 | mmcv>=1.4.2, \<1.6.0 | + | 0.21.0 | mmcv>=1.4.2, \<=1.5.0 | + | 0.20.1 | mmcv>=1.4.2, \<=1.5.0 | + | 0.19.0 | mmcv>=1.3.16, \<=1.5.0 | + | 0.18.0 | mmcv>=1.3.16, \<=1.5.0 | + | 0.17.0 | mmcv>=1.3.8, \<=1.5.0 | + | 0.16.0 | mmcv>=1.3.8, \<=1.5.0 | + | 0.15.0 | mmcv>=1.3.8, \<=1.5.0 | + | 0.15.0 | mmcv>=1.3.8, \<=1.5.0 | + | 0.14.0 | mmcv>=1.3.8, \<=1.5.0 | + | 0.13.0 | mmcv>=1.3.8, \<=1.5.0 | + | 0.12.0 | mmcv>=1.3.1, \<=1.5.0 | + | 0.11.1 | mmcv>=1.3.1, \<=1.5.0 | + | 0.11.0 | mmcv>=1.3.0 | + | 0.10.0 | mmcv>=1.3.0 | + | 0.9.0 | mmcv>=1.1.4 | + | 0.8.0 | mmcv>=1.1.4 | + | 0.7.0 | mmcv>=1.1.4 | + | 0.6.0 | mmcv>=1.1.4 | + + ```{note} + Since the `dev` branch is under frequent development, the MMCV + version dependency may be inaccurate. If you encounter problems when using + the `dev` branch, please try to update MMCV to the latest version. + ``` + +- Using Albumentations + + If you would like to use `albumentations`, we suggest using `pip install -r requirements/albu.txt` or + `pip install -U albumentations --no-binary qudida,albumentations`. + + If you simply use `pip install albumentations>=0.3.2`, it will install `opencv-python-headless` simultaneously + (even though you have already installed `opencv-python`). Please refer to the + [official documentation](https://albumentations.ai/docs/getting_started/installation/#note-on-opencv-dependencies) + for details. + +## Coding + +- Do I need to reinstall mmcls after some code modifications? + + If you follow [the best practice](install.md) and install mmcls from source, + any local modifications made to the code will take effect without + reinstallation. + +- How to develop with multiple MMClassification versions? + + Generally speaking, we recommend to use different virtual environments to + manage MMClassification in different working directories. However, you + can also use the same environment to develop MMClassification in different + folders, like mmcls-0.21, mmcls-0.23. When you run the train or test shell script, + it will adopt the mmcls package in the current folder. And when you run other Python + script, you can also add `` PYTHONPATH=`pwd` `` at the beginning of your command + to use the package in the current folder. + + Conversely, to use the default MMClassification installed in the environment + rather than the one you are working with, you can remove the following line + in those shell scripts: + + ```shell + PYTHONPATH="$(dirname $0)/..":$PYTHONPATH + ``` diff --git a/docs/en/getting_started.md b/docs/en/getting_started.md new file mode 100644 index 0000000..4e8a9fc --- /dev/null +++ b/docs/en/getting_started.md @@ -0,0 +1,275 @@ +# Getting Started + +This page provides basic tutorials about the usage of MMClassification. + +## Prepare datasets + +It is recommended to symlink the dataset root to `$MMCLASSIFICATION/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +``` +mmclassification +├── mmcls +├── tools +├── configs +├── docs +├── data +│ ├── imagenet +│ │ ├── meta +│ │ ├── train +│ │ ├── val +│ ├── cifar +│ │ ├── cifar-10-batches-py +│ ├── mnist +│ │ ├── train-images-idx3-ubyte +│ │ ├── train-labels-idx1-ubyte +│ │ ├── t10k-images-idx3-ubyte +│ │ ├── t10k-labels-idx1-ubyte + +``` + +For ImageNet, it has multiple versions, but the most commonly used one is [ILSVRC 2012](http://www.image-net.org/challenges/LSVRC/2012/). It can be accessed with the following steps. + +1. Register an account and login to the [download page](http://www.image-net.org/download-images). +2. Find download links for ILSVRC2012 and download the following two files + - ILSVRC2012_img_train.tar (~138GB) + - ILSVRC2012_img_val.tar (~6.3GB) +3. Untar the downloaded files +4. Download meta data using this [script](https://github.com/BVLC/caffe/blob/master/data/ilsvrc12/get_ilsvrc_aux.sh) + +For MNIST, CIFAR10 and CIFAR100, the datasets will be downloaded and unzipped automatically if they are not found. + +For using custom datasets, please refer to [Tutorial 3: Customize Dataset](tutorials/new_dataset.md). + +## Inference with pretrained models + +We provide scripts to inference a single image, inference a dataset and test a dataset (e.g., ImageNet). + +### Inference a single image + +```shell +python demo/image_demo.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} + +# Example +python demo/image_demo.py demo/demo.JPEG configs/resnet/resnet50_8xb32_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth +``` + +### Inference and test a dataset + +- single GPU +- CPU +- single node multiple GPU +- multiple node + +You can use the following commands to infer a dataset. + +```shell +# single-gpu +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--metrics ${METRICS}] [--out ${RESULT_FILE}] + +# CPU: disable GPUs and run single-gpu testing script +export CUDA_VISIBLE_DEVICES=-1 +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--metrics ${METRICS}] [--out ${RESULT_FILE}] + +# multi-gpu +./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [--metrics ${METRICS}] [--out ${RESULT_FILE}] + +# multi-node in slurm environment +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--metrics ${METRICS}] [--out ${RESULT_FILE}] --launcher slurm +``` + +Optional arguments: + +- `RESULT_FILE`: Filename of the output results. If not specified, the results will not be saved to a file. Support formats include json, yaml and pickle. +- `METRICS`:Items to be evaluated on the results, like accuracy, precision, recall, etc. + +Examples: + +Infer ResNet-50 on ImageNet validation set to get predicted labels and their corresponding predicted scores. + +```shell +python tools/test.py configs/resnet/resnet50_8xb16_cifar10.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth \ + --out result.pkl +``` + +## Train a model + +MMClassification implements distributed training and non-distributed training, +which uses `MMDistributedDataParallel` and `MMDataParallel` respectively. + +All outputs (log files and checkpoints) will be saved to the working directory, +which is specified by `work_dir` in the config file. + +By default we evaluate the model on the validation set after each epoch, you can change the evaluation interval by adding the interval argument in the training config. + +```python +evaluation = dict(interval=12) # Evaluate the model per 12 epochs. +``` + +### Train with a single GPU + +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +If you want to specify the working directory in the command, you can add an argument `--work_dir ${YOUR_WORK_DIR}`. + +### Train with CPU + +The process of training on the CPU is consistent with single GPU training. We just need to disable GPUs before the training process. + +```shell +export CUDA_VISIBLE_DEVICES=-1 +``` + +And then run the script [above](#train-with-a-single-gpu). + +```{warning} +The process of training on the CPU is consistent with single GPU training. We just need to disable GPUs before the training process. +``` + +### Train with multiple GPUs in single machine + +```shell +./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [optional arguments] +``` + +Optional arguments are: + +- `--no-validate` (**not suggested**): By default, the codebase will perform evaluation at every k (default value is 1) epochs during the training. To disable this behavior, use `--no-validate`. +- `--work-dir ${WORK_DIR}`: Override the working directory specified in the config file. +- `--resume-from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file. + +Difference between `resume-from` and `load-from`: +`resume-from` loads both the model weights and optimizer status, and the epoch is also inherited from the specified checkpoint. It is usually used for resuming the training process that is interrupted accidentally. +`load-from` only loads the model weights and the training epoch starts from 0. It is usually used for finetuning. + +### Train with multiple machines + +If you launch with multiple machines simply connected with ethernet, you can simply run following commands: + +On the first machine: + +```shell +NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR sh tools/dist_train.sh $CONFIG $GPUS +``` + +On the second machine: + +```shell +NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR sh tools/dist_train.sh $CONFIG $GPUS +``` + +Usually it is slow if you do not have high speed networking like InfiniBand. + +If you run MMClassification on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `slurm_train.sh`. (This script also supports single machine training.) + +```shell +[GPUS=${GPUS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} +``` + +You can check [slurm_train.sh](https://github.com/open-mmlab/mmclassification/blob/master/tools/slurm_train.sh) for full arguments and environment variables. + +If you have just multiple machines connected with ethernet, you can refer to +PyTorch [launch utility](https://pytorch.org/docs/stable/distributed_deprecated.html#launch-utility). +Usually it is slow if you do not have high speed networking like InfiniBand. + +### Launch multiple jobs on a single machine + +If you launch multiple jobs on a single machine, e.g., 2 jobs of 4-GPU training on a machine with 8 GPUs, +you need to specify different ports (29500 by default) for each job to avoid communication conflict. + +If you use `dist_train.sh` to launch training jobs, you can set the port in commands. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 ./tools/dist_train.sh ${CONFIG_FILE} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 ./tools/dist_train.sh ${CONFIG_FILE} 4 +``` + +If you use launch training jobs with Slurm, you need to modify the config files (usually the 6th line from the bottom in config files) to set different communication ports. + +In `config1.py`, + +```python +dist_params = dict(backend='nccl', port=29500) +``` + +In `config2.py`, + +```python +dist_params = dict(backend='nccl', port=29501) +``` + +Then you can launch two jobs with `config1.py` ang `config2.py`. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} +CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} +``` + +### Train with IPU + +The process of training on the IPU is consistent with single GPU training. We just need to have IPU machine and environment +and add an extra argument `--ipu-replicas ${IPU_NUM}` + +## Useful tools + +We provide lots of useful tools under `tools/` directory. + +### Get the FLOPs and params (experimental) + +We provide a script adapted from [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) to compute the FLOPs and params of a given model. + +```shell +python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +You will get the result like this. + +``` +============================== +Input shape: (3, 224, 224) +Flops: 4.12 GFLOPs +Params: 25.56 M +============================== +``` + +```{warning} +This tool is still experimental and we do not guarantee that the number is correct. You may well use the result for simple comparisons, but double check it before you adopt it in technical reports or papers. +- FLOPs are related to the input shape while parameters are not. The default input shape is (1, 3, 224, 224). +- Some operators are not counted into FLOPs like GN and custom operators. Refer to [`mmcv.cnn.get_model_complexity_info()`](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/flops_counter.py) for details. +``` + +### Publish a model + +Before you publish a model, you may want to + +1. Convert model weights to CPU tensors. +2. Delete the optimizer states. +3. Compute the hash of the checkpoint file and append the hash id to the filename. + +```shell +python tools/convert_models/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME} +``` + +E.g., + +```shell +python tools/convert_models/publish_model.py work_dirs/resnet50/latest.pth imagenet_resnet50.pth +``` + +The final output filename will be `imagenet_resnet50_{date}-{hash id}.pth`. + +## Tutorials + +Currently, we provide five tutorials for users. + +- [learn about config](tutorials/config.md) +- [finetune models](tutorials/finetune.md) +- [add new dataset](tutorials/new_dataset.md) +- [design data pipeline](tutorials/data_pipeline.md) +- [add new modules](tutorials/new_modules.md) +- [customize schedule](tutorials/schedule.md) +- [customize runtime settings](tutorials/runtime.md). diff --git a/docs/en/index.rst b/docs/en/index.rst new file mode 100644 index 0000000..d0a15b1 --- /dev/null +++ b/docs/en/index.rst @@ -0,0 +1,99 @@ +Welcome to MMClassification's documentation! +============================================ + +You can switch between Chinese and English documentation in the lower-left corner of the layout. + +您可以在页面左下角切换中英文文档。 + +.. toctree:: + :maxdepth: 1 + :caption: Get Started + + install.md + getting_started.md + + +.. toctree:: + :maxdepth: 1 + :caption: Tutorials + + tutorials/config.md + tutorials/finetune.md + tutorials/new_dataset.md + tutorials/data_pipeline.md + tutorials/new_modules.md + tutorials/schedule.md + tutorials/runtime.md + + +.. toctree:: + :maxdepth: 1 + :caption: Model zoo + :glob: + + modelzoo_statistics.md + model_zoo.md + papers/* + + +.. toctree:: + :maxdepth: 1 + :caption: Useful Tools and Scripts + + tools/pytorch2onnx.md + tools/onnx2tensorrt.md + tools/pytorch2torchscript.md + tools/model_serving.md + tools/visualization.md + tools/analysis.md + tools/miscellaneous.md + + +.. toctree:: + :maxdepth: 1 + :caption: Community + + community/CONTRIBUTING.md + + +.. toctree:: + :maxdepth: 1 + :caption: API Reference + + mmcls.apis + mmcls.core + mmcls.models + mmcls.models.utils + mmcls.datasets + Data Transformations + Batch Augmentation + mmcls.utils + + +.. toctree:: + :maxdepth: 1 + :caption: Notes + + changelog.md + compatibility.md + faq.md + + +.. toctree:: + :maxdepth: 1 + :caption: Device Support + + device/npu.md + +.. toctree:: + :caption: Language Switch + + English + 简体中文 + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/en/install.md b/docs/en/install.md new file mode 100644 index 0000000..bde1a81 --- /dev/null +++ b/docs/en/install.md @@ -0,0 +1,219 @@ +# Prerequisites + +In this section we demonstrate how to prepare an environment with PyTorch. + +MMClassification works on Linux, Windows and macOS. It requires Python 3.6+, CUDA 9.2+ and PyTorch 1.5+. + +```{note} +If you are experienced with PyTorch and have already installed it, just skip this part and jump to the [next section](#installation). Otherwise, you can follow these steps for the preparation. +``` + +**Step 1.** Download and install Miniconda from the [official website](https://docs.conda.io/en/latest/miniconda.html). + +**Step 2.** Create a conda environment and activate it. + +```shell +conda create --name openmmlab python=3.8 -y +conda activate openmmlab +``` + +**Step 3.** Install PyTorch following [official instructions](https://pytorch.org/get-started/locally/), e.g. + +On GPU platforms: + +```shell +conda install pytorch torchvision -c pytorch +``` + +```{warning} +This command will automatically install the latest version PyTorch and cudatoolkit, please check whether they matches your environment. +``` + +On CPU platforms: + +```shell +conda install pytorch torchvision cpuonly -c pytorch +``` + +# Installation + +We recommend that users follow our best practices to install MMClassification. However, the whole process is highly customizable. See [Customize Installation](#customize-installation) section for more information. + +## Best Practices + +**Step 0.** Install [MMCV](https://github.com/open-mmlab/mmcv) using [MIM](https://github.com/open-mmlab/mim). + +```shell +pip install -U openmim +mim install mmcv-full +``` + +**Step 1.** Install MMClassification. + +According to your needs, we support two install modes: + +- [Install from source (Recommended)](#install-from-source): You want to develop your own image classification task or new features based on MMClassification framework. For example, you want to add new dataset or new models. And you can use all tools we provided. +- [Install as a Python package](#install-as-a-python-package): You just want to call MMClassification's APIs or import MMClassification's modules in your project. + +### Install from source + +In this case, install mmcls from source: + +```shell +git clone https://github.com/open-mmlab/mmclassification.git +cd mmclassification +pip install -v -e . +# "-v" means verbose, or more output +# "-e" means installing a project in editable mode, +# thus any local modifications made to the code will take effect without reinstallation. +``` + +Optionally, if you want to contribute to MMClassification or experience experimental functions, please checkout to the dev branch: + +```shell +git checkout dev +``` + +### Install as a Python package + +Just install with pip. + +```shell +pip install mmcls +``` + +## Verify the installation + +To verify whether MMClassification is installed correctly, we provide some sample codes to run an inference demo. + +**Step 1.** We need to download config and checkpoint files. + +```shell +mim download mmcls --config resnet50_8xb32_in1k --dest . +``` + +**Step 2.** Verify the inference demo. + +Option (a). If you install mmcls from source, just run the following command: + +```shell +python demo/image_demo.py demo/demo.JPEG resnet50_8xb32_in1k.py resnet50_8xb32_in1k_20210831-ea4938fc.pth --device cpu +``` + +You will see the output result dict including `pred_label`, `pred_score` and `pred_class` in your terminal. +And if you have graphical interface (instead of remote terminal etc.), you can enable `--show` option to show +the demo image with these predictions in a window. + +Option (b). If you install mmcls as a python package, open you python interpreter and copy&paste the following codes. + +```python +from mmcls.apis import init_model, inference_model + +config_file = 'resnet50_8xb32_in1k.py' +checkpoint_file = 'resnet50_8xb32_in1k_20210831-ea4938fc.pth' +model = init_model(config_file, checkpoint_file, device='cpu') # or device='cuda:0' +inference_model(model, 'demo/demo.JPEG') +``` + +You will see a dict printed, including the predicted label, score and category name. + +## Customize Installation + +### CUDA versions + +When installing PyTorch, you need to specify the version of CUDA. If you are +not clear on which to choose, follow our recommendations: + +- For Ampere-based NVIDIA GPUs, such as GeForce 30 series and NVIDIA A100, CUDA 11 is a must. +- For older NVIDIA GPUs, CUDA 11 is backward compatible, but CUDA 10.2 offers better compatibility and is more lightweight. + +Please make sure the GPU driver satisfies the minimum version requirements. See [this table](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions) for more information. + +```{note} +Installing CUDA runtime libraries is enough if you follow our best practices, +because no CUDA code will be compiled locally. However if you hope to compile +MMCV from source or develop other CUDA operators, you need to install the +complete CUDA toolkit from NVIDIA's [website](https://developer.nvidia.com/cuda-downloads), +and its version should match the CUDA version of PyTorch. i.e., the specified +version of cudatoolkit in `conda install` command. +``` + +### Install MMCV without MIM + +MMCV contains C++ and CUDA extensions, thus depending on PyTorch in a complex +way. MIM solves such dependencies automatically and makes the installation +easier. However, it is not a must. + +To install MMCV with pip instead of MIM, please follow +[MMCV installation guides](https://mmcv.readthedocs.io/en/latest/get_started/installation.html). +This requires manually specifying a find-url based on PyTorch version and its CUDA version. + +For example, the following command install mmcv-full built for PyTorch 1.10.x and CUDA 11.3. + +```shell +pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10/index.html +``` + +### Install on CPU-only platforms + +MMClassification can be built for CPU only environment. In CPU mode you can +train (requires MMCV version >= 1.4.4), test or inference a model. + +Some functionalities are gone in this mode, usually GPU-compiled ops. But don't +worry, almost all models in MMClassification don't depends on these ops. + +### Install on Google Colab + +[Google Colab](https://research.google.com/) usually has PyTorch installed, +thus we only need to install MMCV and MMClassification with the following +commands. + +**Step 1.** Install [MMCV](https://github.com/open-mmlab/mmcv) using [MIM](https://github.com/open-mmlab/mim). + +```shell +!pip3 install openmim +!mim install mmcv-full +``` + +**Step 2.** Install MMClassification from the source. + +```shell +!git clone https://github.com/open-mmlab/mmclassification.git +%cd mmclassification +!pip install -e . +``` + +**Step 3.** Verification. + +```python +import mmcls +print(mmcls.__version__) +# Example output: 0.23.0 or newer +``` + +```{note} +Within Jupyter, the exclamation mark `!` is used to call external executables and `%cd` is a [magic command](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-cd) to change the current working directory of Python. +``` + +### Using MMClassification with Docker + +We provide a [Dockerfile](https://github.com/open-mmlab/mmclassification/blob/master/docker/Dockerfile) +to build an image. Ensure that your [docker version](https://docs.docker.com/engine/install/) >=19.03. + +```shell +# build an image with PyTorch 1.8.1, CUDA 10.2 +# If you prefer other versions, just modified the Dockerfile +docker build -t mmclassification docker/ +``` + +Run it with + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmclassification/data mmclassification +``` + +## Trouble shooting + +If you have some issues during the installation, please first view the [FAQ](faq.md) page. +You may [open an issue](https://github.com/open-mmlab/mmclassification/issues/new/choose) +on GitHub if no solution is found. diff --git a/docs/en/model_zoo.md b/docs/en/model_zoo.md new file mode 100644 index 0000000..46b42a9 --- /dev/null +++ b/docs/en/model_zoo.md @@ -0,0 +1,162 @@ +# Model Zoo + +## ImageNet + +ImageNet has multiple versions, but the most commonly used one is [ILSVRC 2012](http://www.image-net.org/challenges/LSVRC/2012/). +The ResNet family models below are trained by standard data augmentations, i.e., RandomResizedCrop, RandomHorizontalFlip and Normalize. + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :--------------------------------: | :-------------------------------: | :-----------------------------: | :-------: | :-------: | :---------------------------------------: | :-----------------------------------------: | +| VGG-11 | 132.86 | 7.63 | 68.75 | 88.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.log.json) | +| VGG-13 | 133.05 | 11.34 | 70.02 | 89.46 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg13_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.log.json) | +| VGG-16 | 138.36 | 15.5 | 71.62 | 90.49 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg16_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.log.json) | +| VGG-19 | 143.67 | 19.67 | 72.41 | 90.80 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg19_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.log.json) | +| VGG-11-BN | 132.87 | 7.64 | 70.75 | 90.12 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11bn_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.log.json) | +| VGG-13-BN | 133.05 | 11.36 | 72.15 | 90.71 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg13bn_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.log.json) | +| VGG-16-BN | 138.37 | 15.53 | 73.72 | 91.68 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg16_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.log.json) | +| VGG-19-BN | 143.68 | 19.7 | 74.70 | 92.24 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg19bn_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.log.json) | +| RepVGG-A0\* | 9.11(train) \| 8.31 (deploy) | 1.52 (train) \| 1.36 (deploy) | 72.41 | 90.50 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_3rdparty_4xb64-coslr-120e_in1k_20210909-883ab98c.pth) | +| RepVGG-A1\* | 14.09 (train) \| 12.79 (deploy) | 2.64 (train) \| 2.37 (deploy) | 74.47 | 91.85 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_3rdparty_4xb64-coslr-120e_in1k_20210909-24003a24.pth) | +| RepVGG-A2\* | 28.21 (train) \| 25.5 (deploy) | 5.7 (train) \| 5.12 (deploy) | 76.48 | 93.01 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_3rdparty_4xb64-coslr-120e_in1k_20210909-97d7695a.pth) | +| RepVGG-B0\* | 15.82 (train) \| 14.34 (deploy) | 3.42 (train) \| 3.06 (deploy) | 75.14 | 92.42 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_3rdparty_4xb64-coslr-120e_in1k_20210909-446375f4.pth) | +| RepVGG-B1\* | 57.42 (train) \| 51.83 (deploy) | 13.16 (train) \| 11.82 (deploy) | 78.37 | 94.11 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_3rdparty_4xb64-coslr-120e_in1k_20210909-750cdf67.pth) | +| RepVGG-B1g2\* | 45.78 (train) \| 41.36 (deploy) | 9.82 (train) \| 8.82 (deploy) | 77.79 | 93.88 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_3rdparty_4xb64-coslr-120e_in1k_20210909-344f6422.pth) | +| RepVGG-B1g4\* | 39.97 (train) \| 36.13 (deploy) | 8.15 (train) \| 7.32 (deploy) | 77.58 | 93.84 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_3rdparty_4xb64-coslr-120e_in1k_20210909-d4c1a642.pth) | +| RepVGG-B2\* | 89.02 (train) \| 80.32 (deploy) | 20.46 (train) \| 18.39 (deploy) | 78.78 | 94.42 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_3rdparty_4xb64-coslr-120e_in1k_20210909-bd6b937c.pth) | +| RepVGG-B2g4\* | 61.76 (train) \| 55.78 (deploy) | 12.63 (train) \| 11.34 (deploy) | 79.38 | 94.68 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-7b7955f0.pth) | +| RepVGG-B3\* | 123.09 (train) \| 110.96 (deploy) | 29.17 (train) \| 26.22 (deploy) | 80.52 | 95.26 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-dda968bf.pth) | +| RepVGG-B3g4\* | 83.83 (train) \| 75.63 (deploy) | 17.9 (train) \| 16.08 (deploy) | 80.22 | 95.10 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-4e54846a.pth) | +| RepVGG-D2se\* | 133.33 (train) \| 120.39 (deploy) | 36.56 (train) \| 32.85 (deploy) | 81.81 | 95.94 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-cf3139b7.pth) | +| ResNet-18 | 11.69 | 1.82 | 70.07 | 89.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_batch256_imagenet_20200708-34ab8f90.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_batch256_imagenet_20200708-34ab8f90.log.json) | +| ResNet-34 | 21.8 | 3.68 | 73.85 | 91.53 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet34_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_batch256_imagenet_20200708-32ffb4f7.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_batch256_imagenet_20200708-32ffb4f7.log.json) | +| ResNet-50 (rsb-a1) | 25.56 | 4.12 | 80.12 | 94.78 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.log.json) | +| ResNet-101 | 44.55 | 7.85 | 78.18 | 94.03 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_batch256_imagenet_20200708-753f3608.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_batch256_imagenet_20200708-753f3608.log.json) | +| ResNet-152 | 60.19 | 11.58 | 78.63 | 94.16 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_batch256_imagenet_20200708-ec25b1f9.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_batch256_imagenet_20200708-ec25b1f9.log.json) | +| Res2Net-50-14w-8s\* | 25.06 | 4.22 | 78.14 | 93.85 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/res2net/res2net50-w14-s8_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth) | +| Res2Net-50-26w-8s\* | 48.40 | 8.39 | 79.20 | 94.36 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/res2net/res2net50-w26-s8_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth) | +| Res2Net-101-26w-4s\* | 45.21 | 8.12 | 79.19 | 94.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/res2net/res2net101-w26-s4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth) | +| ResNeSt-50\* | 27.48 | 5.41 | 81.13 | 95.59 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnest/resnest50_32xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnest/resnest50_imagenet_converted-1ebf0afe.pth) | +| ResNeSt-101\* | 48.28 | 10.27 | 82.32 | 96.24 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnest/resnest101_32xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnest/resnest101_imagenet_converted-032caa52.pth) | +| ResNeSt-200\* | 70.2 | 17.53 | 82.41 | 96.22 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnest/resnest200_64xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnest/resnest200_imagenet_converted-581a60f2.pth) | +| ResNeSt-269\* | 110.93 | 22.58 | 82.70 | 96.28 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnest/resnest269_64xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnest/resnest269_imagenet_converted-59930960.pth) | +| ResNetV1D-50 | 25.58 | 4.36 | 77.54 | 93.57 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.log.json) | +| ResNetV1D-101 | 44.57 | 8.09 | 78.93 | 94.48 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.log.json) | +| ResNetV1D-152 | 60.21 | 11.82 | 79.41 | 94.7 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.log.json) | +| ResNeXt-32x4d-50 | 25.03 | 4.27 | 77.90 | 93.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext50-32x4d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.log.json) | +| ResNeXt-32x4d-101 | 44.18 | 8.03 | 78.71 | 94.12 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext101-32x4d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.log.json) | +| ResNeXt-32x8d-101 | 88.79 | 16.5 | 79.23 | 94.58 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext101-32x8d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.log.json) | +| ResNeXt-32x4d-152 | 59.95 | 11.8 | 78.93 | 94.41 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext152-32x4d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.log.json) | +| SE-ResNet-50 | 28.09 | 4.13 | 77.74 | 93.84 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200708-657b3c36.log.json) | +| SE-ResNet-101 | 49.33 | 7.86 | 78.26 | 94.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200708-038a4d04.log.json) | +| RegNetX-400MF | 5.16 | 0.41 | 72.56 | 90.78 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-400mf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-400mf_8xb128_in1k_20211213-89bfc226.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-400mf_8xb128_in1k_20211208_143316.log.json) | +| RegNetX-800MF | 7.26 | 0.81 | 74.76 | 92.32 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-800mf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-800mf_8xb128_in1k_20211213-222b0f11.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-800mf_8xb128_in1k_20211207_143037.log.json) | +| RegNetX-1.6GF | 9.19 | 1.63 | 76.84 | 93.31 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-1.6gf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-1.6gf_8xb128_in1k_20211213-d1b89758.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-1.6gf_8xb128_in1k_20211208_143018.log.json) | +| RegNetX-3.2GF | 15.3 | 3.21 | 78.09 | 94.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-3.2gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-3.2gf_8xb64_in1k_20211213-1fdd82ae.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-3.2gf_8xb64_in1k_20211208_142720.log.json) | +| RegNetX-4.0GF | 22.12 | 4.0 | 78.60 | 94.17 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-4.0gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-4.0gf_8xb64_in1k_20211213-efed675c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-4.0gf_8xb64_in1k_20211207_150431.log.json) | +| RegNetX-6.4GF | 26.21 | 6.51 | 79.38 | 94.65 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-6.4gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-6.4gf_8xb64_in1k_20211215-5c6089da.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-6.4gf_8xb64_in1k_20211213_172748.log.json) | +| RegNetX-8.0GF | 39.57 | 8.03 | 79.12 | 94.51 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-8.0gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-8.0gf_8xb64_in1k_20211213-9a9fcc76.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-8.0gf_8xb64_in1k_20211208_103250.log.json) | +| RegNetX-12GF | 46.11 | 12.15 | 79.67 | 95.03 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/regnet/regnetx-12gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-12gf_8xb64_in1k_20211213-5df8c2f8.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-12gf_8xb64_in1k_20211208_143713.log.json) | +| ShuffleNetV1 1.0x (group=3) | 1.87 | 0.146 | 68.13 | 87.81 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.log.json) | +| ShuffleNetV2 1.0x | 2.28 | 0.149 | 69.55 | 88.92 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200804-8860eec9.log.json) | +| MobileNet V2 | 3.5 | 0.319 | 71.86 | 90.42 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.log.json) | +| ViT-B/16\* | 86.86 | 33.03 | 85.43 | 97.77 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth) | +| ViT-B/32\* | 88.3 | 8.56 | 84.01 | 97.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth) | +| ViT-L/16\* | 304.72 | 116.68 | 85.63 | 97.63 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth) | +| Swin-Transformer tiny | 28.29 | 4.36 | 81.18 | 95.61 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-tiny_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925.log.json) | +| Swin-Transformer small | 49.61 | 8.52 | 83.02 | 96.29 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin-small_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219.log.json) | +| Swin-Transformer base | 87.77 | 15.14 | 83.36 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742-93230b0d.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742.log.json) | +| Transformer in Transformer small\* | 23.76 | 3.36 | 81.52 | 95.73 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/tnt/tnt-s-p16_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth) | +| T2T-ViT_t-14 | 21.47 | 4.34 | 81.83 | 95.84 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_8xb64_in1k_20211220-f7378dd5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_8xb64_in1k_20211220-f7378dd5.log.json) | +| T2T-ViT_t-19 | 39.08 | 7.80 | 82.63 | 96.18 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_8xb64_in1k_20211214-7f5e3aaf.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_8xb64_in1k_20211214-7f5e3aaf.log.json) | +| T2T-ViT_t-24 | 64.00 | 12.69 | 82.71 | 96.09 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_8xb64_in1k_20211214-b2a68ae3.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_8xb64_in1k_20211214-b2a68ae3.log.json) | +| Mixer-B/16\* | 59.88 | 12.61 | 76.68 | 92.25 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-base-p16_3rdparty_64xb64_in1k_20211124-1377e3e0.pth) | +| Mixer-L/16\* | 208.2 | 44.57 | 72.34 | 88.02 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-large-p16_3rdparty_64xb64_in1k_20211124-5a2519d2.pth) | +| DeiT-tiny | 5.72 | 1.08 | 74.50 | 92.24 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-tiny_pt-4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny_pt-4xb256_in1k_20220218-13b382a0.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny_pt-4xb256_in1k_20220218-13b382a0.log.json) | +| DeiT-tiny distilled\* | 5.72 | 1.08 | 74.51 | 91.90 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-tiny-distilled_pt-4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny-distilled_3rdparty_pt-4xb256_in1k_20211216-c429839a.pth) | +| DeiT-small | 22.05 | 4.24 | 80.69 | 95.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-small_pt-4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-small_pt-4xb256_in1k_20220218-9425b9bb.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-small_pt-4xb256_in1k_20220218-9425b9bb.log.json) | +| DeiT-small distilled\* | 22.05 | 4.24 | 81.17 | 95.40 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-small-distilled_pt-4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-small-distilled_3rdparty_pt-4xb256_in1k_20211216-4de1d725.pth) | +| DeiT-base | 86.57 | 16.86 | 81.76 | 95.81 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-base_pt-16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_pt-16xb64_in1k_20220216-db63c16c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_pt-16xb64_in1k_20220216-db63c16c.log.json) | +| DeiT-base distilled\* | 86.57 | 16.86 | 83.33 | 96.49 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-base-distilled_pt-16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_pt-16xb64_in1k_20211216-42891296.pth) | +| DeiT-base 384px\* | 86.86 | 49.37 | 83.04 | 96.31 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-base_ft-16xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_ft-16xb32_in1k-384px_20211124-822d02f2.pth) | +| DeiT-base distilled 384px\* | 86.86 | 49.37 | 85.55 | 97.35 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/deit/deit-base-distilled_ft-16xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_ft-16xb32_in1k-384px_20211216-e48d6000.pth) | +| Conformer-tiny-p16\* | 23.52 | 4.90 | 81.31 | 95.60 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/conformer/conformer-tiny-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-tiny-p16_3rdparty_8xb128_in1k_20211206-f6860372.pth) | +| Conformer-small-p32\* | 38.85 | 7.09 | 81.96 | 96.02 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/conformer/conformer-small-p32_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p32_8xb128_in1k_20211206-947a0816.pth) | +| Conformer-small-p16\* | 37.67 | 10.31 | 83.32 | 96.46 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/conformer/conformer-small-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p16_3rdparty_8xb128_in1k_20211206-3065dcf5.pth) | +| Conformer-base-p16\* | 83.29 | 22.89 | 83.82 | 96.59 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/conformer/conformer-base-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-base-p16_3rdparty_8xb128_in1k_20211206-bfdf8637.pth) | +| PCPVT-small\* | 24.11 | 3.67 | 81.14 | 95.69 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-pcpvt-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-small_3rdparty_8xb128_in1k_20220126-ef23c132.pth) | +| PCPVT-base\* | 43.83 | 6.45 | 82.66 | 96.26 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-pcpvt-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-base_3rdparty_8xb128_in1k_20220126-f8c4b0d5.pth) | +| PCPVT-large\* | 60.99 | 9.51 | 83.09 | 96.59 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-pcpvt-large_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-large_3rdparty_16xb64_in1k_20220126-c1ef8d80.pth) | +| SVT-small\* | 24.06 | 2.82 | 81.77 | 95.57 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-svt-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-small_3rdparty_8xb128_in1k_20220126-8fe5205b.pth) | +| SVT-base\* | 56.07 | 8.35 | 83.13 | 96.29 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-svt-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-base_3rdparty_8xb128_in1k_20220126-e31cc8e9.pth) | +| SVT-large\* | 99.27 | 14.82 | 83.60 | 96.50 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/twins/twins-svt-large_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-large_3rdparty_16xb64_in1k_20220126-4817645f.pth) | +| EfficientNet-B0\* | 5.29 | 0.02 | 76.74 | 93.17 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b0_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32_in1k_20220119-a7e2a0b1.pth) | +| EfficientNet-B0 (AA)\* | 5.29 | 0.02 | 77.26 | 93.41 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b0_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa_in1k_20220119-8d939117.pth) | +| EfficientNet-B0 (AA + AdvProp)\* | 5.29 | 0.02 | 77.53 | 93.61 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth) | +| EfficientNet-B1\* | 7.79 | 0.03 | 78.68 | 94.28 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b1_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32_in1k_20220119-002556d9.pth) | +| EfficientNet-B1 (AA)\* | 7.79 | 0.03 | 79.20 | 94.42 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b1_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa_in1k_20220119-619d8ae3.pth) | +| EfficientNet-B1 (AA + AdvProp)\* | 7.79 | 0.03 | 79.52 | 94.43 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa-advprop_in1k_20220119-5715267d.pth) | +| EfficientNet-B2\* | 9.11 | 0.03 | 79.64 | 94.80 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32_in1k_20220119-ea374a30.pth) | +| EfficientNet-B2 (AA)\* | 9.11 | 0.03 | 80.21 | 94.96 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa_in1k_20220119-dd61e80b.pth) | +| EfficientNet-B2 (AA + AdvProp)\* | 9.11 | 0.03 | 80.45 | 95.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa-advprop_in1k_20220119-1655338a.pth) | +| EfficientNet-B3\* | 12.23 | 0.06 | 81.01 | 95.34 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32_in1k_20220119-4b4d7487.pth) | +| EfficientNet-B3 (AA)\* | 12.23 | 0.06 | 81.58 | 95.67 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth) | +| EfficientNet-B3 (AA + AdvProp)\* | 12.23 | 0.06 | 81.81 | 95.69 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k_20220119-53b41118.pth) | +| EfficientNet-B4\* | 19.34 | 0.12 | 82.57 | 96.09 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32_in1k_20220119-81fd4077.pth) | +| EfficientNet-B4 (AA)\* | 19.34 | 0.12 | 82.95 | 96.26 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa_in1k_20220119-45b8bd2b.pth) | +| EfficientNet-B4 (AA + AdvProp)\* | 19.34 | 0.12 | 83.25 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa-advprop_in1k_20220119-38c2238c.pth) | +| EfficientNet-B5\* | 30.39 | 0.24 | 83.18 | 96.47 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b5_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32_in1k_20220119-e9814430.pth) | +| EfficientNet-B5 (AA)\* | 30.39 | 0.24 | 83.82 | 96.76 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b5_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa_in1k_20220119-2cab8b78.pth) | +| EfficientNet-B5 (AA + AdvProp)\* | 30.39 | 0.24 | 84.21 | 96.98 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa-advprop_in1k_20220119-f57a895a.pth) | +| EfficientNet-B6 (AA)\* | 43.04 | 0.41 | 84.05 | 96.82 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b6_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa_in1k_20220119-45b03310.pth) | +| EfficientNet-B6 (AA + AdvProp)\* | 43.04 | 0.41 | 84.74 | 97.14 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa-advprop_in1k_20220119-bfe3485e.pth) | +| EfficientNet-B7 (AA)\* | 66.35 | 0.72 | 84.38 | 96.88 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b7_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa_in1k_20220119-bf03951c.pth) | +| EfficientNet-B7 (AA + AdvProp)\* | 66.35 | 0.72 | 85.14 | 97.23 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa-advprop_in1k_20220119-c6dbff10.pth) | +| EfficientNet-B8 (AA + AdvProp)\* | 87.41 | 1.09 | 85.38 | 97.28 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b8_3rdparty_8xb32-aa-advprop_in1k_20220119-297ce1b7.pth) | +| ConvNeXt-T\* | 28.59 | 4.46 | 82.05 | 95.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-tiny_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_3rdparty_32xb128_in1k_20220124-18abde00.pth) | +| ConvNeXt-S\* | 50.22 | 8.69 | 83.13 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-small_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_3rdparty_32xb128_in1k_20220124-d39b5192.pth) | +| ConvNeXt-B\* | 88.59 | 15.36 | 83.85 | 96.74 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-base_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128_in1k_20220124-d0915162.pth) | +| ConvNeXt-B\* | 88.59 | 15.36 | 85.81 | 97.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-base_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_in21k-pre-3rdparty_32xb128_in1k_20220124-eb2d6ada.pth) | +| ConvNeXt-L\* | 197.77 | 34.37 | 84.30 | 96.89 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-large_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_64xb64_in1k_20220124-f8a0ded0.pth) | +| ConvNeXt-L\* | 197.77 | 34.37 | 86.61 | 98.04 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-large_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_in21k-pre-3rdparty_64xb64_in1k_20220124-2412403d.pth) | +| ConvNeXt-XL\* | 350.20 | 60.93 | 86.97 | 98.20 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/convnext/convnext-xlarge_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_in21k-pre-3rdparty_64xb64_in1k_20220124-76b6863d.pth) | +| HRNet-W18\* | 21.30 | 4.33 | 76.75 | 93.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w18_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32_in1k_20220120-0c10b180.pth) | +| HRNet-W30\* | 37.71 | 8.17 | 78.19 | 94.22 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w30_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w30_3rdparty_8xb32_in1k_20220120-8aa3832f.pth) | +| HRNet-W32\* | 41.23 | 8.99 | 78.44 | 94.19 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w32_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w32_3rdparty_8xb32_in1k_20220120-c394f1ab.pth) | +| HRNet-W40\* | 57.55 | 12.77 | 78.94 | 94.47 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w40_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w40_3rdparty_8xb32_in1k_20220120-9a2dbfc5.pth) | +| HRNet-W44\* | 67.06 | 14.96 | 78.88 | 94.37 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w44_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w44_3rdparty_8xb32_in1k_20220120-35d07f73.pth) | +| HRNet-W48\* | 77.47 | 17.36 | 79.32 | 94.52 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w48_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32_in1k_20220120-e555ef50.pth) | +| HRNet-W64\* | 128.06 | 29.00 | 79.46 | 94.65 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w64_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w64_3rdparty_8xb32_in1k_20220120-19126642.pth) | +| HRNet-W18 (ssld)\* | 21.30 | 4.33 | 81.06 | 95.70 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w18_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32-ssld_in1k_20220120-455f69ea.pth) | +| HRNet-W48 (ssld)\* | 77.47 | 17.36 | 83.63 | 96.79 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w48_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32-ssld_in1k_20220120-d0459c38.pth) | +| WRN-50\* | 68.88 | 11.44 | 81.45 | 95.53 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/wrn/wide-resnet50_timm_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty-timm_8xb32_in1k_20220304-83ae4399.pth) | +| WRN-101\* | 126.89 | 22.81 | 78.84 | 94.28 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/wrn/wide-resnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet101_3rdparty_8xb32_in1k_20220304-8d5f9d61.pth) | +| CSPDarkNet50\* | 27.64 | 5.04 | 80.05 | 95.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/cspnet/cspdarknet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspdarknet50_3rdparty_8xb32_in1k_20220329-bd275287.pth) | +| CSPResNet50\* | 21.62 | 3.48 | 79.55 | 94.68 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/cspnet/cspresnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnet50_3rdparty_8xb32_in1k_20220329-dd6dddfb.pth) | +| CSPResNeXt50\* | 20.57 | 3.11 | 79.96 | 94.96 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/cspnet/cspresnext50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnext50_3rdparty_8xb32_in1k_20220329-2cc84d21.pth) | +| DenseNet121\* | 7.98 | 2.88 | 74.96 | 92.21 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet121_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet121_4xb256_in1k_20220426-07450f99.pth) | +| DenseNet169\* | 14.15 | 3.42 | 76.08 | 93.11 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet169_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet169_4xb256_in1k_20220426-a2889902.pth) | +| DenseNet201\* | 20.01 | 4.37 | 77.32 | 93.64 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet201_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet201_4xb256_in1k_20220426-05cae4ef.pth) | +| DenseNet161\* | 28.68 | 7.82 | 77.61 | 93.83 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet161_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet161_4xb256_in1k_20220426-ee6a80a9.pth) | +| VAN-T\* | 4.11 | 0.88 | 75.41 | 93.02 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-tiny_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-tiny_8xb128_in1k_20220501-385941af.pth) | +| VAN-S\* | 13.86 | 2.52 | 81.01 | 95.63 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-small_8xb128_in1k_20220501-17bc91aa.pth) | +| VAN-B\* | 26.58 | 5.03 | 82.80 | 96.21 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-base_8xb128_in1k_20220501-6a4cc31b.pth) | +| VAN-L\* | 44.77 | 8.99 | 83.86 | 96.73 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/van/van-large_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-large_8xb128_in1k_20220501-f212ba21.pth) | +| MViTv2-tiny\* | 24.17 | 4.70 | 82.33 | 96.15 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mvit/mvitv2-tiny_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-tiny_3rdparty_in1k_20220722-db7beeef.pth) | +| MViTv2-small\* | 34.87 | 7.00 | 83.63 | 96.51 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mvit/mvitv2-small_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-small_3rdparty_in1k_20220722-986bd741.pth) | +| MViTv2-base\* | 51.47 | 10.20 | 84.34 | 96.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mvit/mvitv2-base_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-base_3rdparty_in1k_20220722-9c4f0a17.pth) | +| MViTv2-large\* | 217.99 | 42.10 | 85.25 | 97.14 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mvit/mvitv2-large_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-large_3rdparty_in1k_20220722-2b57b983.pth) | +| EfficientFormer-l1\* | 12.19 | 1.30 | 80.46 | 94.99 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientformer/efficientformer-l1_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l1_3rdparty_in1k_20220803-d66e61df.pth) | +| EfficientFormer-l3\* | 31.41 | 3.93 | 82.45 | 96.18 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientformer/efficientformer-l3_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l3_3rdparty_in1k_20220803-dde1c8c5.pth) | +| EfficientFormer-l7\* | 82.23 | 10.16 | 83.40 | 96.60 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientformer/efficientformer-l7_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l7_3rdparty_in1k_20220803-41a552bb.pth) | + +*Models with * are converted from other repos, others are trained by ourselves.* + +## CIFAR10 + +| Model | Params(M) | Flops(G) | Top-1 (%) | Config | Download | +| :--------------: | :-------: | :------: | :-------: | :----: | :------------------------------------------------------------------------------------------------------------: | +| ResNet-18-b16x8 | 11.17 | 0.56 | 94.82 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_8xb16_cifar10.py) | +| ResNet-34-b16x8 | 21.28 | 1.16 | 95.34 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet34_8xb16_cifar10.py) | +| ResNet-50-b16x8 | 23.52 | 1.31 | 95.55 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb16_cifar10.py) | +| ResNet-101-b16x8 | 42.51 | 2.52 | 95.58 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_8xb16_cifar10.py) | +| ResNet-152-b16x8 | 58.16 | 3.74 | 95.76 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet152_8xb16_cifar10.py) | diff --git a/docs/en/stat.py b/docs/en/stat.py new file mode 100755 index 0000000..8f1e5b2 --- /dev/null +++ b/docs/en/stat.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python +import functools as func +import glob +import os +import re +from pathlib import Path + +import numpy as np + +MMCLS_ROOT = Path(__file__).absolute().parents[1] +url_prefix = 'https://github.com/open-mmlab/mmclassification/blob/master/' + +papers_root = Path('papers') +papers_root.mkdir(exist_ok=True) +files = [Path(f) for f in sorted(glob.glob('../../configs/*/README.md'))] + +stats = [] +titles = [] +num_ckpts = 0 +num_configs = 0 + +for f in files: + with open(f, 'r') as content_file: + content = content_file.read() + + # Extract checkpoints + ckpts = set(x.lower().strip() + for x in re.findall(r'\[model\]\((https?.*)\)', content)) + if len(ckpts) == 0: + continue + num_ckpts += len(ckpts) + + # Extract paper title + match_res = list(re.finditer(r'> \[(.*)\]\((.*)\)', content)) + if len(match_res) > 0: + title, paperlink = match_res[0].groups() + else: + title = content.split('\n')[0].replace('# ', '').strip() + paperlink = None + titles.append(title) + + # Replace paper link to a button + if paperlink is not None: + start = match_res[0].start() + end = match_res[0].end() + # link_button = f'{title}' + link_button = f'[{title}]({paperlink})' + content = content[:start] + link_button + content[end:] + + # Extract paper type + _papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)] + assert len(_papertype) > 0 + papertype = _papertype[0] + paper = set([(papertype, title)]) + + # Write a copy of README + copy = papers_root / (f.parent.name + '.md') + if copy.exists(): + os.remove(copy) + + def replace_link(matchobj): + # Replace relative link to GitHub link. + name = matchobj.group(1) + link = matchobj.group(2) + if not link.startswith('http') and (f.parent / link).exists(): + rel_link = (f.parent / link).absolute().relative_to(MMCLS_ROOT) + link = url_prefix + str(rel_link) + return f'[{name}]({link})' + + content = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', replace_link, content) + + with open(copy, 'w') as copy_file: + copy_file.write(content) + + statsmsg = f""" +\t* [{papertype}] [{title}]({copy}) ({len(ckpts)} ckpts) +""" + stats.append(dict(paper=paper, ckpts=ckpts, statsmsg=statsmsg, copy=copy)) + +allpapers = func.reduce(lambda a, b: a.union(b), + [stat['paper'] for stat in stats]) +msglist = '\n'.join(stat['statsmsg'] for stat in stats) + +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +# Model Zoo Summary + +* Number of papers: {len(set(titles))} +{countstr} + +* Number of checkpoints: {num_ckpts} +{msglist} +""" + +with open('modelzoo_statistics.md', 'w') as f: + f.write(modelzoo) diff --git a/docs/en/tools/analysis.md b/docs/en/tools/analysis.md new file mode 100644 index 0000000..0e583b0 --- /dev/null +++ b/docs/en/tools/analysis.md @@ -0,0 +1,211 @@ +# Analysis + + + +- [Log Analysis](#log-analysis) + - [Plot Curves](#plot-curves) + - [Calculate Training Time](#calculate-training-time) +- [Result Analysis](#result-analysis) + - [Evaluate Results](#evaluate-results) + - [View Typical Results](#view-typical-results) +- [Model Complexity](#model-complexity) +- [FAQs](#faqs) + + + +## Log Analysis + +### Plot Curves + +`tools/analysis_tools/analyze_logs.py` plots curves of given keys according to the log files. + +
+ +```shell +python tools/analysis_tools/analyze_logs.py plot_curve \ + ${JSON_LOGS} \ + [--keys ${KEYS}] \ + [--title ${TITLE}] \ + [--legend ${LEGEND}] \ + [--backend ${BACKEND}] \ + [--style ${STYLE}] \ + [--out ${OUT_FILE}] \ + [--window-size ${WINDOW_SIZE}] +``` + +**Description of all arguments**: + +- `json_logs` : The paths of the log files, separate multiple files by spaces. +- `--keys` : The fields of the logs to analyze, separate multiple keys by spaces. Defaults to 'loss'. +- `--title` : The title of the figure. Defaults to use the filename. +- `--legend` : The names of legend, the number of which must be equal to `len(${JSON_LOGS}) * len(${KEYS})`. Defaults to use `"${JSON_LOG}-${KEYS}"`. +- `--backend` : The backend of matplotlib. Defaults to auto selected by matplotlib. +- `--style` : The style of the figure. Default to `whitegrid`. +- `--out` : The path of the output picture. If not set, the figure won't be saved. +- `--window-size`: The shape of the display window. The format should be `'W*H'`. Defaults to `'12*7'`. + +```{note} +The `--style` option depends on `seaborn` package, please install it before setting it. +``` + +Examples: + +- Plot the loss curve in training. + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve your_log_json --keys loss --legend loss + ``` + +- Plot the top-1 accuracy and top-5 accuracy curves, and save the figure to results.jpg. + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve your_log_json --keys accuracy_top-1 accuracy_top-5 --legend top1 top5 --out results.jpg + ``` + +- Compare the top-1 accuracy of two log files in the same figure. + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log1.json log2.json --keys accuracy_top-1 --legend exp1 exp2 + ``` + +```{note} +The tool will automatically select to find keys in training logs or validation logs according to the keys. +Therefore, if you add a custom evaluation metric, please also add the key to `TEST_METRICS` in this tool. +``` + +### Calculate Training Time + +`tools/analysis_tools/analyze_logs.py` can also calculate the training time according to the log files. + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time \ + ${JSON_LOGS} + [--include-outliers] +``` + +**Description of all arguments**: + +- `json_logs` : The paths of the log files, separate multiple files by spaces. +- `--include-outliers` : If set, include the first iteration in each epoch (Sometimes the time of first iterations is longer). + +Example: + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time work_dirs/some_exp/20200422_153324.log.json +``` + +The output is expected to be like the below. + +```text +-----Analyze train time of work_dirs/some_exp/20200422_153324.log.json----- +slowest epoch 68, average time is 0.3818 +fastest epoch 1, average time is 0.3694 +time std over epochs is 0.0020 +average iter time: 0.3777 s/iter +``` + +## Result Analysis + +With the `--out` argument in `tools/test.py`, we can save the inference results of all samples as a file. +And with this result file, we can do further analysis. + +### Evaluate Results + +`tools/analysis_tools/eval_metric.py` can evaluate metrics again. + +```shell +python tools/analysis_tools/eval_metric.py \ + ${CONFIG} \ + ${RESULT} \ + [--metrics ${METRICS}] \ + [--cfg-options ${CFG_OPTIONS}] \ + [--metric-options ${METRIC_OPTIONS}] +``` + +Description of all arguments: + +- `config` : The path of the model config file. +- `result`: The Output result file in json/pickle format from `tools/test.py`. +- `--metrics` : Evaluation metrics, the acceptable values depend on the dataset. +- `--cfg-options`: If specified, the key-value pair config will be merged into the config file, for more details please refer to [Tutorial 1: Learn about Configs](../tutorials/config.md) +- `--metric-options`: If specified, the key-value pair arguments will be passed to the `metric_options` argument of dataset's `evaluate` function. + +```{note} +In `tools/test.py`, we support using `--out-items` option to select which kind of results will be saved. Please ensure the result file includes "class_scores" to use this tool. +``` + +**Examples**: + +```shell +python tools/analysis_tools/eval_metric.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py your_result.pkl --metrics accuracy --metric-options "topk=(1,5)" +``` + +### View Typical Results + +`tools/analysis_tools/analyze_results.py` can save the images with the highest scores in successful or failed prediction. + +```shell +python tools/analysis_tools/analyze_results.py \ + ${CONFIG} \ + ${RESULT} \ + [--out-dir ${OUT_DIR}] \ + [--topk ${TOPK}] \ + [--cfg-options ${CFG_OPTIONS}] +``` + +**Description of all arguments**: + +- `config` : The path of the model config file. +- `result`: Output result file in json/pickle format from `tools/test.py`. +- `--out-dir`: Directory to store output files. +- `--topk`: The number of images in successful or failed prediction with the highest `topk` scores to save. If not specified, it will be set to 20. +- `--cfg-options`: If specified, the key-value pair config will be merged into the config file, for more details please refer to [Tutorial 1: Learn about Configs](../tutorials/config.md) + +```{note} +In `tools/test.py`, we support using `--out-items` option to select which kind of results will be saved. Please ensure the result file includes "pred_score", "pred_label" and "pred_class" to use this tool. +``` + +**Examples**: + +```shell +python tools/analysis_tools/analyze_results.py \ + configs/resnet/resnet50_b32x8_imagenet.py \ + result.pkl \ + --out-dir results \ + --topk 50 +``` + +## Model Complexity + +### Get the FLOPs and params (experimental) + +We provide a script adapted from [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) to compute the FLOPs and params of a given model. + +```shell +python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +Description of all arguments: + +- `config` : The path of the model config file. +- `--shape`: Input size, support single value or double value parameter, such as `--shape 256` or `--shape 224 256`. If not set, default to be `224 224`. + +You will get a result like this. + +```text +============================== +Input shape: (3, 224, 224) +Flops: 4.12 GFLOPs +Params: 25.56 M +============================== +``` + +```{warning} +This tool is still experimental and we do not guarantee that the number is correct. You may well use the result for simple comparisons, but double-check it before you adopt it in technical reports or papers. +- FLOPs are related to the input shape while parameters are not. The default input shape is (1, 3, 224, 224). +- Some operators are not counted into FLOPs like GN and custom operators. Refer to [`mmcv.cnn.get_model_complexity_info()`](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/flops_counter.py) for details. +``` + +## FAQs + +- None diff --git a/docs/en/tools/miscellaneous.md b/docs/en/tools/miscellaneous.md new file mode 100644 index 0000000..4e2d5d6 --- /dev/null +++ b/docs/en/tools/miscellaneous.md @@ -0,0 +1,59 @@ +# Miscellaneous + + + +- [Print the entire config](#print-the-entire-config) +- [Verify Dataset](#verify-dataset) +- [FAQs](#faqs) + + + +## Print the entire config + +`tools/misc/print_config.py` prints the whole config verbatim, expanding all its imports. + +```shell +python tools/misc/print_config.py ${CONFIG} [--cfg-options ${CFG_OPTIONS}] +``` + +Description of all arguments: + +- `config` : The path of the model config file. +- `--cfg-options`: If specified, the key-value pair config will be merged into the config file, for more details please refer to [Tutorial 1: Learn about Configs](../tutorials/config.md) + +**Examples**: + +```shell +python tools/misc/print_config.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py +``` + +## Verify Dataset + +`tools/misc/verify_dataset.py` can verify dataset, check whether there are broken pictures in the given dataset. + +```shell +python tools/misc/verify_dataset.py \ + ${CONFIG} \ + [--out-path ${OUT-PATH}] \ + [--phase ${PHASE}] \ + [--num-process ${NUM-PROCESS}] + [--cfg-options ${CFG_OPTIONS}] +``` + +**Description of all arguments**: + +- `config` : The path of the model config file. +- `--out-path` : The path to save the verification result, if not set, defaults to 'brokenfiles.log'. +- `--phase` : Phase of dataset to verify, accept "train" "test" and "val", if not set, defaults to "train". +- `--num-process` : number of process to use, if not set, defaults to 1. +- `--cfg-options`: If specified, the key-value pair config will be merged into the config file, for more details please refer to [Tutorial 1: Learn about Configs](../tutorials/config.md) + +**Examples**: + +```shell +python tools/misc/verify_dataset.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py --out-path broken_imgs.log --phase val --num-process 8 +``` + +## FAQs + +- None diff --git a/docs/en/tools/model_serving.md b/docs/en/tools/model_serving.md new file mode 100644 index 0000000..d633a0f --- /dev/null +++ b/docs/en/tools/model_serving.md @@ -0,0 +1,87 @@ +# Model Serving + +In order to serve an `MMClassification` model with [`TorchServe`](https://pytorch.org/serve/), you can follow the steps: + +## 1. Convert model from MMClassification to TorchServe + +```shell +python tools/deployment/mmcls2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ +--output-folder ${MODEL_STORE} \ +--model-name ${MODEL_NAME} +``` + +```{note} +${MODEL_STORE} needs to be an absolute path to a folder. +``` + +Example: + +```shell +python tools/deployment/mmcls2torchserve.py \ + configs/resnet/resnet18_8xb32_in1k.py \ + checkpoints/resnet18_8xb32_in1k_20210831-fbbb1da6.pth \ + --output-folder ./checkpoints \ + --model-name resnet18_in1k +``` + +## 2. Build `mmcls-serve` docker image + +```shell +docker build -t mmcls-serve:latest docker/serve/ +``` + +## 3. Run `mmcls-serve` + +Check the official docs for [running TorchServe with docker](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment). + +In order to run in GPU, you need to install [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). You can omit the `--gpus` argument in order to run in GPU. + +Example: + +```shell +docker run --rm \ +--cpus 8 \ +--gpus device=0 \ +-p8080:8080 -p8081:8081 -p8082:8082 \ +--mount type=bind,source=`realpath ./checkpoints`,target=/home/model-server/model-store \ +mmcls-serve:latest +``` + +```{note} +`realpath ./checkpoints` points to the absolute path of "./checkpoints", and you can replace it with the absolute path where you store torchserve models. +``` + +[Read the docs](https://github.com/pytorch/serve/blob/master/docs/rest_api.md) about the Inference (8080), Management (8081) and Metrics (8082) APis + +## 4. Test deployment + +```shell +curl http://127.0.0.1:8080/predictions/${MODEL_NAME} -T demo/demo.JPEG +``` + +You should obtain a response similar to: + +```json +{ + "pred_label": 58, + "pred_score": 0.38102269172668457, + "pred_class": "water snake" +} +``` + +And you can use `test_torchserver.py` to compare result of TorchServe and PyTorch, and visualize them. + +```shell +python tools/deployment/test_torchserver.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} +[--inference-addr ${INFERENCE_ADDR}] [--device ${DEVICE}] +``` + +Example: + +```shell +python tools/deployment/test_torchserver.py \ + demo/demo.JPEG \ + configs/resnet/resnet18_8xb32_in1k.py \ + checkpoints/resnet18_8xb32_in1k_20210831-fbbb1da6.pth \ + resnet18_in1k +``` diff --git a/docs/en/tools/onnx2tensorrt.md b/docs/en/tools/onnx2tensorrt.md new file mode 100644 index 0000000..ea0f148 --- /dev/null +++ b/docs/en/tools/onnx2tensorrt.md @@ -0,0 +1,80 @@ +# ONNX to TensorRT (Experimental) + + + +- [ONNX to TensorRT (Experimental)](#onnx-to-tensorrt-experimental) + - [How to convert models from ONNX to TensorRT](#how-to-convert-models-from-onnx-to-tensorrt) + - [Prerequisite](#prerequisite) + - [Usage](#usage) + - [List of supported models convertible to TensorRT](#list-of-supported-models-convertible-to-tensorrt) + - [Reminders](#reminders) + - [FAQs](#faqs) + + + +## How to convert models from ONNX to TensorRT + +### Prerequisite + +1. Please refer to [install.md](https://mmclassification.readthedocs.io/en/latest/install.html#install-mmclassification) for installation of MMClassification from source. +2. Use our tool [pytorch2onnx.md](./pytorch2onnx.md) to convert the model from PyTorch to ONNX. + +### Usage + +```bash +python tools/deployment/onnx2tensorrt.py \ + ${MODEL} \ + --trt-file ${TRT_FILE} \ + --shape ${IMAGE_SHAPE} \ + --max-batch-size ${MAX_BATCH_SIZE} \ + --workspace-size ${WORKSPACE_SIZE} \ + --fp16 \ + --show \ + --verify \ +``` + +Description of all arguments: + +- `model` : The path of an ONNX model file. +- `--trt-file`: The Path of output TensorRT engine file. If not specified, it will be set to `tmp.trt`. +- `--shape`: The height and width of model input. If not specified, it will be set to `224 224`. +- `--max-batch-size`: The max batch size of TensorRT model, should not be less than 1. +- `--fp16`: Enable fp16 mode. +- `--workspace-size` : The required GPU workspace size in GiB to build TensorRT engine. If not specified, it will be set to `1` GiB. +- `--show`: Determines whether to show the outputs of the model. If not specified, it will be set to `False`. +- `--verify`: Determines whether to verify the correctness of models between ONNXRuntime and TensorRT. If not specified, it will be set to `False`. + +Example: + +```bash +python tools/deployment/onnx2tensorrt.py \ + checkpoints/resnet/resnet18_b16x8_cifar10.onnx \ + --trt-file checkpoints/resnet/resnet18_b16x8_cifar10.trt \ + --shape 224 224 \ + --show \ + --verify \ +``` + +## List of supported models convertible to TensorRT + +The table below lists the models that are guaranteed to be convertible to TensorRT. + +| Model | Config | Status | +| :----------: | :-----------------------------------------------------: | :----: | +| MobileNetV2 | `configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py` | Y | +| ResNet | `configs/resnet/resnet18_8xb16_cifar10.py` | Y | +| ResNeXt | `configs/resnext/resnext50-32x4d_8xb32_in1k.py` | Y | +| ShuffleNetV1 | `configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py` | Y | +| ShuffleNetV2 | `configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py` | Y | + +Notes: + +- *All models above are tested with Pytorch==1.6.0 and TensorRT-7.2.1.6.Ubuntu-16.04.x86_64-gnu.cuda-10.2.cudnn8.0* + +## Reminders + +- If you meet any problem with the listed models above, please create an issue and it would be taken care of soon. For models not included in the list, we may not provide much help here due to the limited resources. Please try to dig a little deeper and debug by yourself. + +## FAQs + +- None diff --git a/docs/en/tools/pytorch2onnx.md b/docs/en/tools/pytorch2onnx.md new file mode 100644 index 0000000..7352d45 --- /dev/null +++ b/docs/en/tools/pytorch2onnx.md @@ -0,0 +1,204 @@ +# Pytorch to ONNX (Experimental) + + + +- [Pytorch to ONNX (Experimental)](#pytorch-to-onnx-experimental) + - [How to convert models from Pytorch to ONNX](#how-to-convert-models-from-pytorch-to-onnx) + - [Prerequisite](#prerequisite) + - [Usage](#usage) + - [Description of all arguments:](#description-of-all-arguments) + - [How to evaluate ONNX models with ONNX Runtime](#how-to-evaluate-onnx-models-with-onnx-runtime) + - [Prerequisite](#prerequisite-1) + - [Usage](#usage-1) + - [Description of all arguments](#description-of-all-arguments-1) + - [Results and Models](#results-and-models) + - [List of supported models exportable to ONNX](#list-of-supported-models-exportable-to-onnx) + - [Reminders](#reminders) + - [FAQs](#faqs) + + + +## How to convert models from Pytorch to ONNX + +### Prerequisite + +1. Please refer to [install](https://mmclassification.readthedocs.io/en/latest/install.html#install-mmclassification) for installation of MMClassification. +2. Install onnx and onnxruntime + +```shell +pip install onnx onnxruntime==1.5.1 +``` + +### Usage + +```bash +python tools/deployment/pytorch2onnx.py \ + ${CONFIG_FILE} \ + --checkpoint ${CHECKPOINT_FILE} \ + --output-file ${OUTPUT_FILE} \ + --shape ${IMAGE_SHAPE} \ + --opset-version ${OPSET_VERSION} \ + --dynamic-export \ + --show \ + --simplify \ + --verify \ +``` + +### Description of all arguments: + +- `config` : The path of a model config file. +- `--checkpoint` : The path of a model checkpoint file. +- `--output-file`: The path of output ONNX model. If not specified, it will be set to `tmp.onnx`. +- `--shape`: The height and width of input tensor to the model. If not specified, it will be set to `224 224`. +- `--opset-version` : The opset version of ONNX. If not specified, it will be set to `11`. +- `--dynamic-export` : Determines whether to export ONNX with dynamic input shape and output shapes. If not specified, it will be set to `False`. +- `--show`: Determines whether to print the architecture of the exported model. If not specified, it will be set to `False`. +- `--simplify`: Determines whether to simplify the exported ONNX model. If not specified, it will be set to `False`. +- `--verify`: Determines whether to verify the correctness of an exported model. If not specified, it will be set to `False`. + +Example: + +```bash +python tools/deployment/pytorch2onnx.py \ + configs/resnet/resnet18_8xb16_cifar10.py \ + --checkpoint checkpoints/resnet/resnet18_8xb16_cifar10.pth \ + --output-file checkpoints/resnet/resnet18_8xb16_cifar10.onnx \ + --dynamic-export \ + --show \ + --simplify \ + --verify \ +``` + +## How to evaluate ONNX models with ONNX Runtime + +We prepare a tool `tools/deployment/test.py` to evaluate ONNX models with ONNXRuntime or TensorRT. + +### Prerequisite + +- Install onnx and onnxruntime-gpu + + ```shell + pip install onnx onnxruntime-gpu + ``` + +### Usage + +```bash +python tools/deployment/test.py \ + ${CONFIG_FILE} \ + ${ONNX_FILE} \ + --backend ${BACKEND} \ + --out ${OUTPUT_FILE} \ + --metrics ${EVALUATION_METRICS} \ + --metric-options ${EVALUATION_OPTIONS} \ + --show + --show-dir ${SHOW_DIRECTORY} \ + --cfg-options ${CFG_OPTIONS} \ +``` + +### Description of all arguments + +- `config`: The path of a model config file. +- `model`: The path of a ONNX model file. +- `--backend`: Backend for input model to run and should be `onnxruntime` or `tensorrt`. +- `--out`: The path of output result file in pickle format. +- `--metrics`: Evaluation metrics, which depends on the dataset, e.g., "accuracy", "precision", "recall", "f1_score", "support" for single label dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for multi-label dataset. +- `--show`: Determines whether to show classifier outputs. If not specified, it will be set to `False`. +- `--show-dir`: Directory where painted images will be saved +- `--metrics-options`: Custom options for evaluation, the key-value pair in `xxx=yyy` format will be kwargs for `dataset.evaluate()` function +- `--cfg-options`: Override some settings in the used config file, the key-value pair in `xxx=yyy` format will be merged into config file. + +### Results and Models + +This part selects ImageNet for onnxruntime verification. ImageNet has multiple versions, but the most commonly used one is [ILSVRC 2012](http://www.image-net.org/challenges/LSVRC/2012/). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelConfigMetricPyTorchONNXRuntimeTensorRT-fp32TensorRT-fp16
ResNetresnet50_8xb32_in1k.pyTop 1 / 576.55 / 93.1576.49 / 93.2276.49 / 93.2276.50 / 93.20
ResNeXtresnext50-32x4d_8xb32_in1k.pyTop 1 / 577.90 / 93.6677.90 / 93.6677.90 / 93.6677.89 / 93.65
SE-ResNetseresnet50_8xb32_in1k.pyTop 1 / 577.74 / 93.8477.74 / 93.8477.74 / 93.8477.74 / 93.85
ShuffleNetV1shufflenet-v1-1x_16xb64_in1k.pyTop 1 / 568.13 / 87.8168.13 / 87.8168.13 / 87.8168.10 / 87.80
ShuffleNetV2shufflenet-v2-1x_16xb64_in1k.pyTop 1 / 569.55 / 88.9269.55 / 88.9269.55 / 88.9269.55 / 88.92
MobileNetV2mobilenet-v2_8xb32_in1k.pyTop 1 / 571.86 / 90.4271.86 / 90.4271.86 / 90.4271.88 / 90.40
+ +## List of supported models exportable to ONNX + +The table below lists the models that are guaranteed to be exportable to ONNX and runnable in ONNX Runtime. + +| Model | Config | Batch Inference | Dynamic Shape | Note | +| :----------: | :-------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------: | :-----------: | ---- | +| MobileNetV2 | [mobilenet-v2_8xb32_in1k.py](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py) | Y | Y | | +| ResNet | [resnet18_8xb16_cifar10.py](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnet/resnet18_8xb16_cifar10.py) | Y | Y | | +| ResNeXt | [resnext50-32x4d_8xb32_in1k.py](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnext/resnext50-32x4d_8xb32_in1k.py) | Y | Y | | +| SE-ResNet | [seresnet50_8xb32_in1k.py](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet/seresnet50_8xb32_in1k.py) | Y | Y | | +| ShuffleNetV1 | [shufflenet-v1-1x_16xb64_in1k.py](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py) | Y | Y | | +| ShuffleNetV2 | [shufflenet-v2-1x_16xb64_in1k.py](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py) | Y | Y | | + +Notes: + +- *All models above are tested with Pytorch==1.6.0* + +## Reminders + +- If you meet any problem with the listed models above, please create an issue and it would be taken care of soon. For models not included in the list, please try to dig a little deeper and debug a little bit more and hopefully solve them by yourself. + +## FAQs + +- None diff --git a/docs/en/tools/pytorch2torchscript.md b/docs/en/tools/pytorch2torchscript.md new file mode 100644 index 0000000..fca0856 --- /dev/null +++ b/docs/en/tools/pytorch2torchscript.md @@ -0,0 +1,56 @@ +# Pytorch to TorchScript (Experimental) + + + +- [Pytorch to TorchScript (Experimental)](#pytorch-to-torchscript-experimental) + - [How to convert models from Pytorch to TorchScript](#how-to-convert-models-from-pytorch-to-torchscript) + - [Usage](#usage) + - [Description of all arguments](#description-of-all-arguments) + - [Reminders](#reminders) + - [FAQs](#faqs) + + + +## How to convert models from Pytorch to TorchScript + +### Usage + +```bash +python tools/deployment/pytorch2torchscript.py \ + ${CONFIG_FILE} \ + --checkpoint ${CHECKPOINT_FILE} \ + --output-file ${OUTPUT_FILE} \ + --shape ${IMAGE_SHAPE} \ + --verify \ +``` + +### Description of all arguments + +- `config` : The path of a model config file. +- `--checkpoint` : The path of a model checkpoint file. +- `--output-file`: The path of output TorchScript model. If not specified, it will be set to `tmp.pt`. +- `--shape`: The height and width of input tensor to the model. If not specified, it will be set to `224 224`. +- `--verify`: Determines whether to verify the correctness of an exported model. If not specified, it will be set to `False`. + +Example: + +```bash +python tools/deployment/pytorch2torchscript.py \ + configs/resnet/resnet18_8xb16_cifar10.py \ + --checkpoint checkpoints/resnet/resnet18_8xb16_cifar10.pth \ + --output-file checkpoints/resnet/resnet18_8xb16_cifar10.pt \ + --verify \ +``` + +Notes: + +- *All models are tested with Pytorch==1.8.1* + +## Reminders + +- For torch.jit.is_tracing() is only supported after v1.6. For users with pytorch v1.3-v1.5, we suggest early returning tensors manually. +- If you meet any problem with the models in this repo, please create an issue and it would be taken care of soon. + +## FAQs + +- None diff --git a/docs/en/tools/visualization.md b/docs/en/tools/visualization.md new file mode 100644 index 0000000..0128245 --- /dev/null +++ b/docs/en/tools/visualization.md @@ -0,0 +1,302 @@ +# Visualization + + + +- [Pipeline Visualization](#pipeline-visualization) +- [Learning Rate Schedule Visualization](#learning-rate-schedule-visualization) +- [Class Activation Map Visualization](#class-activation-map-visualization) +- [FAQs](#faqs) + + + +## Pipeline Visualization + +```bash +python tools/visualizations/vis_pipeline.py \ + ${CONFIG_FILE} \ + [--output-dir ${OUTPUT_DIR}] \ + [--phase ${DATASET_PHASE}] \ + [--number ${BUNBER_IMAGES_DISPLAY}] \ + [--skip-type ${SKIP_TRANSFORM_TYPE}] \ + [--mode ${DISPLAY_MODE}] \ + [--show] \ + [--adaptive] \ + [--min-edge-length ${MIN_EDGE_LENGTH}] \ + [--max-edge-length ${MAX_EDGE_LENGTH}] \ + [--bgr2rgb] \ + [--window-size ${WINDOW_SIZE}] \ + [--cfg-options ${CFG_OPTIONS}] +``` + +**Description of all arguments**: + +- `config` : The path of a model config file. +- `--output-dir`: The output path for visualized images. If not specified, it will be set to `''`, which means not to save. +- `--phase`: Phase of visualizing dataset,must be one of `[train, val, test]`. If not specified, it will be set to `train`. +- `--number`: The number of samples to visualized. If not specified, display all images in the dataset. +- `--skip-type`: The pipelines to be skipped. If not specified, it will be set to `['ToTensor', 'Normalize', 'ImageToTensor', 'Collect']`. +- `--mode`: The display mode, can be one of `[original, pipeline, concat]`. If not specified, it will be set to `concat`. +- `--show`: If set, display pictures in pop-up windows. +- `--adaptive`: If set, adaptively resize images for better visualization. +- `--min-edge-length`: The minimum edge length, used when `--adaptive` is set. When any side of the picture is smaller than `${MIN_EDGE_LENGTH}`, the picture will be enlarged while keeping the aspect ratio unchanged, and the short side will be aligned to `${MIN_EDGE_LENGTH}`. If not specified, it will be set to 200. +- `--max-edge-length`: The maximum edge length, used when `--adaptive` is set. When any side of the picture is larger than `${MAX_EDGE_LENGTH}`, the picture will be reduced while keeping the aspect ratio unchanged, and the long side will be aligned to `${MAX_EDGE_LENGTH}`. If not specified, it will be set to 1000. +- `--bgr2rgb`: If set, flip the color channel order of images. +- `--window-size`: The shape of the display window. If not specified, it will be set to `12*7`. If used, it must be in the format `'W*H'`. +- `--cfg-options` : Modifications to the configuration file, refer to [Tutorial 1: Learn about Configs](https://mmclassification.readthedocs.io/en/latest/tutorials/config.html). + +```{note} + +1. If the `--mode` is not specified, it will be set to `concat` as default, get the pictures stitched together by original pictures and transformed pictures; if the `--mode` is set to `original`, get the original pictures; if the `--mode` is set to `transformed`, get the transformed pictures; if the `--mode` is set to `pipeline`, get all the intermediate images through the pipeline. + +2. When `--adaptive` option is set, images that are too large or too small will be automatically adjusted, you can use `--min-edge-length` and `--max-edge-length` to set the adjust size. +``` + +**Examples**: + +1. In **'original'** mode, visualize 100 original pictures in the `CIFAR100` validation set, then display and save them in the `./tmp` folder: + +```shell +python ./tools/visualizations/vis_pipeline.py configs/resnet/resnet50_8xb16_cifar100.py --phase val --output-dir tmp --mode original --number 100 --show --adaptive --bgr2rgb +``` + +
+ +2. In **'transformed'** mode, visualize all the transformed pictures of the `ImageNet` training set and display them in pop-up windows: + +```shell +python ./tools/visualizations/vis_pipeline.py ./configs/resnet/resnet50_8xb32_in1k.py --show --mode transformed +``` + +
+ +3. In **'concat'** mode, visualize 10 pairs of origin and transformed images for comparison in the `ImageNet` train set and save them in the `./tmp` folder: + +```shell +python ./tools/visualizations/vis_pipeline.py configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py --phase train --output-dir tmp --number 10 --adaptive +``` + +
+ +4. In **'pipeline'** mode, visualize all the intermediate pictures in the `ImageNet` train set through the pipeline: + +```shell +python ./tools/visualizations/vis_pipeline.py configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py --phase train --adaptive --mode pipeline --show +``` + +
+ +## Learning Rate Schedule Visualization + +```bash +python tools/visualizations/vis_lr.py \ + ${CONFIG_FILE} \ + --dataset-size ${DATASET_SIZE} \ + --ngpus ${NUM_GPUs} + --save-path ${SAVE_PATH} \ + --title ${TITLE} \ + --style ${STYLE} \ + --window-size ${WINDOW_SIZE} + --cfg-options +``` + +**Description of all arguments**: + +- `config` : The path of a model config file. +- `dataset-size` : The size of the datasets. If set,`build_dataset` will be skipped and `${DATASET_SIZE}` will be used as the size. Default to use the function `build_dataset`. +- `ngpus` : The number of GPUs used in training, default to be 1. +- `save-path` : The learning rate curve plot save path, default not to save. +- `title` : Title of figure. If not set, default to be config file name. +- `style` : Style of plt. If not set, default to be `whitegrid`. +- `window-size`: The shape of the display window. If not specified, it will be set to `12*7`. If used, it must be in the format `'W*H'`. +- `cfg-options` : Modifications to the configuration file, refer to [Tutorial 1: Learn about Configs](https://mmclassification.readthedocs.io/en/latest/tutorials/config.html). + +```{note} +Loading annotations maybe consume much time, you can directly specify the size of the dataset with `dataset-size` to save time. +``` + +**Examples**: + +```bash +python tools/visualizations/vis_lr.py configs/resnet/resnet50_b16x8_cifar100.py +``` + +
+ +When using ImageNet, directly specify the size of ImageNet, as below: + +```bash +python tools/visualizations/vis_lr.py configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py --dataset-size 1281167 --ngpus 4 --save-path ./repvgg-B3g4_4xb64-lr.jpg +``` + +
+ +## Class Activation Map Visualization + +MMClassification provides `tools\visualizations\vis_cam.py` tool to visualize class activation map. Please use `pip install "grad-cam>=1.3.6"` command to install [pytorch-grad-cam](https://github.com/jacobgil/pytorch-grad-cam). + +The supported methods are as follows: + +| Method | What it does | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------- | +| GradCAM | Weight the 2D activations by the average gradient | +| GradCAM++ | Like GradCAM but uses second order gradients | +| XGradCAM | Like GradCAM but scale the gradients by the normalized activations | +| EigenCAM | Takes the first principle component of the 2D Activations (no class discrimination, but seems to give great results) | +| EigenGradCAM | Like EigenCAM but with class discrimination: First principle component of Activations\*Grad. Looks like GradCAM, but cleaner | +| LayerCAM | Spatially weight the activations by positive gradients. Works better especially in lower layers | + +**Command**: + +```bash +python tools/visualizations/vis_cam.py \ + ${IMG} \ + ${CONFIG_FILE} \ + ${CHECKPOINT} \ + [--target-layers ${TARGET-LAYERS}] \ + [--preview-model] \ + [--method ${METHOD}] \ + [--target-category ${TARGET-CATEGORY}] \ + [--save-path ${SAVE_PATH}] \ + [--vit-like] \ + [--num-extra-tokens ${NUM-EXTRA-TOKENS}] + [--aug_smooth] \ + [--eigen_smooth] \ + [--device ${DEVICE}] \ + [--cfg-options ${CFG-OPTIONS}] +``` + +**Description of all arguments**: + +- `img` : The target picture path. +- `config` : The path of the model config file. +- `checkpoint` : The path of the checkpoint. +- `--target-layers` : The target layers to get activation maps, one or more network layers can be specified. If not set, use the norm layer of the last block. +- `--preview-model` : Whether to print all network layer names in the model. +- `--method` : Visualization method, supports `GradCAM`, `GradCAM++`, `XGradCAM`, `EigenCAM`, `EigenGradCAM`, `LayerCAM`, which is case insensitive. Defaults to `GradCAM`. +- `--target-category` : Target category, if not set, use the category detected by the given model. +- `--save-path` : The path to save the CAM visualization image. If not set, the CAM image will not be saved. +- `--vit-like` : Whether the network is ViT-like network. +- `--num-extra-tokens` : The number of extra tokens in ViT-like backbones. If not set, use num_extra_tokens the backbone. +- `--aug_smooth` : Whether to use TTA(Test Time Augment) to get CAM. +- `--eigen_smooth` : Whether to use the principal component to reduce noise. +- `--device` : The computing device used. Default to 'cpu'. +- `--cfg-options` : Modifications to the configuration file, refer to [Tutorial 1: Learn about Configs](https://mmclassification.readthedocs.io/en/latest/tutorials/config.html). + +```{note} +The argument `--preview-model` can view all network layers names in the given model. It will be helpful if you know nothing about the model layers when setting `--target-layers`. +``` + +**Examples(CNN)**: + +Here are some examples of `target-layers` in ResNet-50, which can be any module or layer: + +- `'backbone.layer4'` means the output of the forth ResLayer. +- `'backbone.layer4.2'` means the output of the third BottleNeck block in the forth ResLayer. +- `'backbone.layer4.2.conv1'` means the output of the `conv1` layer in above BottleNeck block. + +```{note} +For `ModuleList` or `Sequential`, you can also use the index to specify which sub-module is the target layer. + +For example, the `backbone.layer4[-1]` is the same as `backbone.layer4.2` since `layer4` is a `Sequential` with three sub-modules. +``` + +1. Use different methods to visualize CAM for `ResNet50`, the `target-category` is the predicted result by the given checkpoint, using the default `target-layers`. + + ```shell + python tools/visualizations/vis_cam.py \ + demo/bird.JPEG \ + configs/resnet/resnet50_8xb32_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth \ + --method GradCAM + # GradCAM++, XGradCAM, EigenCAM, EigenGradCAM, LayerCAM + ``` + + | Image | GradCAM | GradCAM++ | EigenGradCAM | LayerCAM | + | ------------------------------------ | --------------------------------------- | ----------------------------------------- | -------------------------------------------- | ---------------------------------------- | + |
|
|
|
|
| + +2. Use different `target-category` to get CAM from the same picture. In `ImageNet` dataset, the category 238 is 'Greater Swiss Mountain dog', the category 281 is 'tabby, tabby cat'. + + ```shell + python tools/visualizations/vis_cam.py \ + demo/cat-dog.png configs/resnet/resnet50_8xb32_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth \ + --target-layers 'backbone.layer4.2' \ + --method GradCAM \ + --target-category 238 + # --target-category 281 + ``` + + | Category | Image | GradCAM | XGradCAM | LayerCAM | + | -------- | ---------------------------------------------- | ------------------------------------------------ | ------------------------------------------------- | ------------------------------------------------- | + | Dog |
|
|
|
| + | Cat |
|
|
|
| + +3. Use `--eigen-smooth` and `--aug-smooth` to improve visual effects. + + ```shell + python tools/visualizations/vis_cam.py \ + demo/dog.jpg \ + configs/mobilenet_v3/mobilenet-v3-large_8xb32_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth \ + --target-layers 'backbone.layer16' \ + --method LayerCAM \ + --eigen-smooth --aug-smooth + ``` + + | Image | LayerCAM | eigen-smooth | aug-smooth | eigen&aug | + | ------------------------------------ | --------------------------------------- | ------------------------------------------- | ----------------------------------------- | ----------------------------------------- | + |
|
|
|
|
| + +**Examples(Transformer)**: + +Here are some examples: + +- `'backbone.norm3'` for Swin-Transformer; +- `'backbone.layers[-1].ln1'` for ViT; + +For ViT-like networks, such as ViT, T2T-ViT and Swin-Transformer, the features are flattened. And for drawing the CAM, we need to specify the `--vit-like` argument to reshape the features into square feature maps. + +Besides the flattened features, some ViT-like networks also add extra tokens like the class token in ViT and T2T-ViT, and the distillation token in DeiT. In these networks, the final classification is done on the tokens computed in the last attention block, and therefore, the classification score will not be affected by other features and the gradient of the classification score with respect to them, will be zero. Therefore, you shouldn't use the output of the last attention block as the target layer in these networks. + +To exclude these extra tokens, we need know the number of extra tokens. Almost all transformer-based backbones in MMClassification have the `num_extra_tokens` attribute. If you want to use this tool in a new or third-party network that don't have the `num_extra_tokens` attribute, please specify it the `--num-extra-tokens` argument. + +1. Visualize CAM for `Swin Transformer`, using default `target-layers`: + + ```shell + python tools/visualizations/vis_cam.py \ + demo/bird.JPEG \ + configs/swin_transformer/swin-tiny_16xb64_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth \ + --vit-like + ``` + +2. Visualize CAM for `Vision Transformer(ViT)`: + + ```shell + python tools/visualizations/vis_cam.py \ + demo/bird.JPEG \ + configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py \ + https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth \ + --vit-like \ + --target-layers 'backbone.layers[-1].ln1' + ``` + +3. Visualize CAM for `T2T-ViT`: + + ```shell + python tools/visualizations/vis_cam.py \ + demo/bird.JPEG \ + configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_3rdparty_8xb64_in1k_20210928-b7c09b62.pth \ + --vit-like \ + --target-layers 'backbone.encoder[-1].ln1' + ``` + +| Image | ResNet50 | ViT | Swin | T2T-ViT | +| --------------------------------------- | ------------------------------------------ | -------------------------------------- | --------------------------------------- | ------------------------------------------ | +|
|
|
|
|
| + +## FAQs + +- None diff --git a/docs/en/tutorials/MMClassification_python.ipynb b/docs/en/tutorials/MMClassification_python.ipynb new file mode 100755 index 0000000..e046666 --- /dev/null +++ b/docs/en/tutorials/MMClassification_python.ipynb @@ -0,0 +1,2040 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "XjQxmm04iTx4" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UdMfIsMpiODD" + }, + "source": [ + "# MMClassification Python API tutorial on Colab\n", + "\n", + "In this tutorial, we will introduce the following content:\n", + "\n", + "* How to install MMCls\n", + "* Inference a model with Python API\n", + "* Fine-tune a model with Python API" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iOl0X9UEiRvE" + }, + "source": [ + "## Install MMClassification\n", + "\n", + "Before using MMClassification, we need to prepare the environment with the following steps:\n", + "\n", + "1. Install Python, CUDA, C/C++ compiler and git\n", + "2. Install PyTorch (CUDA version)\n", + "3. Install mmcv\n", + "4. Clone mmcls source code from GitHub and install it\n", + "\n", + "Because this tutorial is on Google Colab, and the basic environment has been completed, we can skip the first two steps." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_i7cjqS_LtoP" + }, + "source": [ + "### Check environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "c6MbAw10iUJI", + "outputId": "dd37cdf5-7bcf-4a03-f5b5-4b17c3ca16de" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/content\n" + ] + } + ], + "source": [ + "%cd /content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "4IyFL3MaiYRu", + "outputId": "5008efdf-0356-4d93-ba9d-e51787036213" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/content\n" + ] + } + ], + "source": [ + "!pwd" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "DMw7QwvpiiUO", + "outputId": "33fa5eb8-d083-4a1f-d094-ab0f59e2818e" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "nvcc: NVIDIA (R) Cuda compiler driver\n", + "Copyright (c) 2005-2020 NVIDIA Corporation\n", + "Built on Mon_Oct_12_20:09:46_PDT_2020\n", + "Cuda compilation tools, release 11.1, V11.1.105\n", + "Build cuda_11.1.TC455_06.29190527_0\n" + ] + } + ], + "source": [ + "# Check nvcc version\n", + "!nvcc -V" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "4VIBU7Fain4D", + "outputId": "ec20652d-ca24-4b82-b407-e90354d728f8" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0\n", + "Copyright (C) 2017 Free Software Foundation, Inc.\n", + "This is free software; see the source for copying conditions. There is NO\n", + "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n", + "\n" + ] + } + ], + "source": [ + "# Check GCC version\n", + "!gcc --version" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "24lDLCqFisZ9", + "outputId": "30ec9a1c-cdb3-436c-cdc8-f2a22afe254f" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1.9.0+cu111\n", + "True\n" + ] + } + ], + "source": [ + "# Check PyTorch installation\n", + "import torch, torchvision\n", + "print(torch.__version__)\n", + "print(torch.cuda.is_available())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R2aZNLUwizBs" + }, + "source": [ + "### Install MMCV\n", + "\n", + "MMCV is the basic package of all OpenMMLab packages. We have pre-built wheels on Linux, so we can download and install them directly.\n", + "\n", + "Please pay attention to PyTorch and CUDA versions to match the wheel.\n", + "\n", + "In the above steps, we have checked the version of PyTorch and CUDA, and they are 1.9.0 and 11.1 respectively, so we need to choose the corresponding wheel.\n", + "\n", + "In addition, we can also install the full version of mmcv (mmcv-full). It includes full features and various CUDA ops out of the box, but needs a longer time to build." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "nla40LrLi7oo", + "outputId": "162bf14d-0d3e-4540-e85e-a46084a786b1" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in links: https://download.openmmlab.com/mmcv/dist/cu111/torch1.9.0/index.html\n", + "Collecting mmcv\n", + " Downloading mmcv-1.3.15.tar.gz (352 kB)\n", + "\u001b[K |████████████████████████████████| 352 kB 5.2 MB/s \n", + "\u001b[?25hCollecting addict\n", + " Downloading addict-2.4.0-py3-none-any.whl (3.8 kB)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from mmcv) (1.19.5)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from mmcv) (21.0)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.7/dist-packages (from mmcv) (7.1.2)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from mmcv) (3.13)\n", + "Collecting yapf\n", + " Downloading yapf-0.31.0-py2.py3-none-any.whl (185 kB)\n", + "\u001b[K |████████████████████████████████| 185 kB 49.9 MB/s \n", + "\u001b[?25hRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->mmcv) (2.4.7)\n", + "Building wheels for collected packages: mmcv\n", + " Building wheel for mmcv (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for mmcv: filename=mmcv-1.3.15-py2.py3-none-any.whl size=509835 sha256=793fe3796421336ca7a7740a1397a54016ba71ce95fd80cb80a116644adb4070\n", + " Stored in directory: /root/.cache/pip/wheels/b2/f4/4e/8f6d2dd2bef6b7eb8c89aa0e5d61acd7bff60aaf3d4d4b29b0\n", + "Successfully built mmcv\n", + "Installing collected packages: yapf, addict, mmcv\n", + "Successfully installed addict-2.4.0 mmcv-1.3.15 yapf-0.31.0\n" + ] + } + ], + "source": [ + "# Install mmcv\n", + "!pip install mmcv -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.9.0/index.html\n", + "# !pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu110/torch1.9.0/index.html" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GDTUrYvXjlRb" + }, + "source": [ + "### Clone and install MMClassification\n", + "\n", + "Next, we clone the latest mmcls repository from GitHub and install it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Bwme6tWHjl5s", + "outputId": "eae20624-4695-4cd9-c3e5-9c59596d150a" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Cloning into 'mmclassification'...\n", + "remote: Enumerating objects: 4152, done.\u001b[K\n", + "remote: Counting objects: 100% (994/994), done.\u001b[K\n", + "remote: Compressing objects: 100% (576/576), done.\u001b[K\n", + "remote: Total 4152 (delta 476), reused 765 (delta 401), pack-reused 3158\u001b[K\n", + "Receiving objects: 100% (4152/4152), 8.20 MiB | 21.00 MiB/s, done.\n", + "Resolving deltas: 100% (2524/2524), done.\n" + ] + } + ], + "source": [ + "# Clone mmcls repository\n", + "!git clone https://github.com/open-mmlab/mmclassification.git\n", + "%cd mmclassification/\n", + "\n", + "# Install MMClassification from source\n", + "!pip install -e . " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "hFg_oSG4j3zB", + "outputId": "05a91f9b-d41c-4ae7-d4fe-c30a30d3f639" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.16.0\n" + ] + } + ], + "source": [ + "# Check MMClassification installation\n", + "import mmcls\n", + "print(mmcls.__version__)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4Mi3g6yzj96L" + }, + "source": [ + "## Inference a model with Python API\n", + "\n", + "MMClassification provides many pre-trained models, and you can check them by the link of [model zoo](https://mmclassification.readthedocs.io/en/latest/model_zoo.html). Almost all models can reproduce the results in original papers or reach higher metrics. And we can use these models directly.\n", + "\n", + "To use the pre-trained model, we need to do the following steps:\n", + "\n", + "- Prepare the model\n", + " - Prepare the config file\n", + " - Prepare the checkpoint file\n", + "- Build the model\n", + "- Inference with the model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "nDQchz8CkJaT", + "outputId": "9805bd7d-cc2a-4269-b43d-257412f1df93" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--2021-10-21 03:52:36-- https://www.dropbox.com/s/k5fsqi6qha09l1v/banana.png?dl=0\n", + "Resolving www.dropbox.com (www.dropbox.com)... 162.125.3.18, 2620:100:601b:18::a27d:812\n", + "Connecting to www.dropbox.com (www.dropbox.com)|162.125.3.18|:443... connected.\n", + "HTTP request sent, awaiting response... 301 Moved Permanently\n", + "Location: /s/raw/k5fsqi6qha09l1v/banana.png [following]\n", + "--2021-10-21 03:52:36-- https://www.dropbox.com/s/raw/k5fsqi6qha09l1v/banana.png\n", + "Reusing existing connection to www.dropbox.com:443.\n", + "HTTP request sent, awaiting response... 302 Found\n", + "Location: https://uc10f85c3c33c4b5233bac4d074e.dl.dropboxusercontent.com/cd/0/inline/BYYklQk6LNPXNm7o5xE_fxE2GA9reePyNajQgoe9roPlSrtsJd4WN6RVww7zrtNZWFq8iZv349MNQJlm7vVaqRBxTcd0ufxkqbcJYJvOrORpxOPV7mHmhMjKYUncez8YNqELGwDd-aeZqLGKBC8spSnx/file# [following]\n", + "--2021-10-21 03:52:36-- https://uc10f85c3c33c4b5233bac4d074e.dl.dropboxusercontent.com/cd/0/inline/BYYklQk6LNPXNm7o5xE_fxE2GA9reePyNajQgoe9roPlSrtsJd4WN6RVww7zrtNZWFq8iZv349MNQJlm7vVaqRBxTcd0ufxkqbcJYJvOrORpxOPV7mHmhMjKYUncez8YNqELGwDd-aeZqLGKBC8spSnx/file\n", + "Resolving uc10f85c3c33c4b5233bac4d074e.dl.dropboxusercontent.com (uc10f85c3c33c4b5233bac4d074e.dl.dropboxusercontent.com)... 162.125.3.15, 2620:100:601b:15::a27d:80f\n", + "Connecting to uc10f85c3c33c4b5233bac4d074e.dl.dropboxusercontent.com (uc10f85c3c33c4b5233bac4d074e.dl.dropboxusercontent.com)|162.125.3.15|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 297299 (290K) [image/png]\n", + "Saving to: ‘demo/banana.png’\n", + "\n", + "demo/banana.png 100%[===================>] 290.33K --.-KB/s in 0.08s \n", + "\n", + "2021-10-21 03:52:36 (3.47 MB/s) - ‘demo/banana.png’ saved [297299/297299]\n", + "\n" + ] + } + ], + "source": [ + "# Get the demo image\n", + "!wget https://www.dropbox.com/s/k5fsqi6qha09l1v/banana.png?dl=0 -O demo/banana.png" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 420 + }, + "id": "o2eiitWnkQq_", + "outputId": "192b3ebb-202b-4d6e-e178-561223024318" + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYkAAAGTCAYAAADdkO5AAAABd2lDQ1BJQ0MgUHJvZmlsZQAAeJx1kc0rRFEYxn8zaDA0CxaTqLsYsqAmSpaMhc0kDcpgM3PdmVF3Zm733kmTrbKxUBZi42vhP2CrbCmlSEkW/gJfG+l6j6tG4tzOfX895zxv5zwHgklTLzr1cSiWXDs1ntBm03Na6JEwUZroojmjO9bo5GSSf8fbNQFVr/pVr//3/TnCi4ajQ6BReEi3bFd4RDi57FqK14Xb9UJmUXhPuM+WAwqfKz3r84PivM8viu3p1BgEVU8t/4OzP1gv2EXhXuFY0azo3+dRN2kxSjNTUjtkduKQYpwEGlkqLGHi0i+1JJn97Yt/+SYoi0eXv0UVWxx5CuLtE7UiXQ2pOdEN+UyqKvffeTq5wQG/e0sCGu4977kbQpvwseF57/ue93EAdXdwWqr5y5LT8KvoGzUttguRVTg+q2nZLThZg+itlbEzX1KdzGAuB09H0JqGtktonvez+l7n8AamV+SJLmB7B3pkf2ThE7z6Z+tvc+SlAAEAAElEQVR4nLz9S6ws25aeh31jPiIiM9dae+/zuHWqWHy4YViCDD1sqCfZXUvuGLDbBqG2AUN9wbYoWDbctiULBGSxRcA2BJMQIIsPyZRokiWbpkULIGmKZJGsB+s+zjl7r7UyI2I+hhtjzMi1q27de9lxAvfuc85eKzMyYs4xx/jH//9D/jf/xr+hqkpKiQCoKACqQMf+WUDU/mOIgS7Cy7YSEHJKQEPVfi+lhKrSWiOmBEDvnVYrMQZEAhICqoqqMsWJLopqp/d+/PecM/teuF2fmeYTKUYQEASAnDO9d2rvpBgRkeMaemt24SLUUvFvgYggQYgx0fZKRzlfztRaj2vf953q12r3QUHv/zzPma56/HsIgVorH58/Mk0zT49P3K43WqucTjMxZlQ727bbJSHEmBEBbY2YJ0IM7PuKANu2Mc9nUk60WgkxECSw7zv7euPp/XtEOkqA1thrI4To96OiQK2V1hopJfseChICQkCCECTY8xUopYAqIoHeO3my+xpCIIRIErvq0hoShPM8232UYD8TE7V3tHVOy4k4TYjA+Xzi6emJx8cHPnz5FX/gV/8AMUZ+kdfLyws//OEP+fjtt3z/7Sdu2yu9d24vK6VvtNYQVVTEn2xn3wt9b4QYEHv09CC0pkgMBIRt3UDU/q53YhRyTrTW6N3eqew7L88vnE4nnh4utNrRCEFhb50kgZgSVZSgQkqzr/8GCAElxsS+7dz2wnle2PaNT8/P/PKv/AohCrVX1utKq0rKiXmaeb4+Q+vMy4k8Z6RDKZWQo312Kcwpk1Km9krviiSh10qtnXma7P6KEHx9AlRRtm1HBKaY6QLSlZQzYl8aiYkQ7vtnmiZCsDXfWiP6/sohoCH4v0NtHREhpYiqEkNkmjJVO1o7RHvPJJE8TYgIKASUbz9+ZF1XHh8fOJ8eUNrxmfu2IyIsp4uvNaH3SmudEKB3ECBPEzEkgtj6jjEiQWi+n0O0ddpRtFZASFMmIKgIOWdCgADUrrTWOU0TIWcC2PX4GhP/c7yCx7DeGhJsL4x7FWOk+71trR2/O2KUqvp36mhrNBS6stdCKYWy7fz4xz+m0ZnzBAp9L7R+fxbX2xVBOJ1OlFKP+LauKwpHTOwiKIB2QIgx0Hujq1LKTlCIOUFXeu+klECh9QaqpJhI+HpSVZq/mQQBxA4HEQsi/iWbKoIwe5DOObGu5biZtd0ftmCbcUSk1pQYleSBtTelR+zvVQkiEGyT11oRgdP5kRiE1hVRqN0OgJSz/emHx7j5IxD11izYRVs8tTZUO9SOhk6jMeXpeHghhCM4igRyTpTS6K2Rc0aCHThiEcYCRa120E0TX3/1tR8oSi0bwb9vSoLfPcS/ateGNqXWjTkKMU7HwpvnxT67FnpT2/QRTqcz0zSjKvSuxASSM4lAFLGg6QtYxD6rtUYIgiBI78QY0K4Q/XF1u167Z4Jqp1U7DLR3tCuaMzEEcgwExTdDp/dCnjKIIBIJMYAnByEEAtC126NHWdeVy+Xy+x4Mb1/zPJOmTMoTec6UNrOuK1vbLUV4s05UlUAgp0xtSggRkW5rTUFipGoHDwqt24bKORGzBTchEIOfmjGxLDMxRvbWqa0yS7aNJ4GYImoLGySi2u3AR2z9KpRW+fj8id6V05Rp2ii90nslhUQvtra72MZWIBDpMTDlcU8tNwlqz3PKmTBltlJopZCiHSSEyDRFJAWkC006SMBCXyeMRdGgS7cgE4M9X18nQe4JzzRN94TPA5+qkv0AEt/bdj+TPWsRamsQLQaklOihUWtjSokQoq89SzgrcDmfuTxejmRDgiAKEoR0udi+ShEloCgpzah2SinkHEgpEmMiSLDEIIgnNoERzlOKdJSMIPNClICI0rvSaYjfj94VEWXOCUmRI3r5ATViytsgf+yzEMgeiwA6SgxCwOLJ+J0RY1qzmDIS6AYEAk06np8TUuR0PlNKsaghEKbE9z/8no+fPpJy5osPX7CcFkiJFAPURmsdBcq2sXVlOp8s6LdGo5FiomsjihDBf14JmmiiMBKDbrEzxUjrjYRXCdKVLrahA4Hu2SVd7QEKiAqtFWKyLxzxgBmD3TAFDQLaUcQybhFbaKidbiHQ+ng4FvBH4vP2xA3BFnKIdi0528kYWgDtnu1Hu/G9WUDzhzEWPP6nVUqRsnfLpFq3xRWCZR1iQbG2RoqWDanagdE9EARJLMtC1+pZOV5FQfPDJYZIa42cZ6aUaKgdhl1JMdimUyXEiPaOyIygKN2Cut8XO1ACKVmWFrBDwLI9UKzyWm83luVCTMk2/V4s4IRAmCZSjHRt9m4x2e+OHeQxMQW7v6qdrkqyiGEb5E01NaVkvwBe8XkV0hp5ORE1oNIptxshRup5prXOuq2s68qnlxeWZfmFq4kUoh1wggcEYZkXRJR9345n7Q8A1YaqBdXeGnutlgSIoq2xdft3W/gV6bboguLr3tZvj5GH0wUJQhdlSQmNYs/AK5KO0iVTS6XrTkqJ2pU5+d/XwjTPjFiTc+KyLERVmidMp2lBZCdmC3Q5JcSfd0OprSAdJNoKERG0Nsq2U1qzA4lAium4B10gqPgeVjqdqp0lT4gq120jq11Po5M12GGrSsz5ntyJJSIShRCjrXM/mHNKxx7r3RLKFCIpRCQmJDia0DM59iOIxxgtadRKa/D4/j2N7vtlonvFP00JkWjLUwK9dVpvVi1IYmTMy3Lye2CVW0qRkKPVcqlZYA2Rpt0qJrH9CZ3elYb4+hFLgPLs+0g9CbOKQN/EkLfVxPjvIwapWsWaQ7DqXSCKUN/8zqiKYkpH8hxHzOuWbEwp00OEp0e2dePj80dondO0MJ9O9O+/Y982QxhipvRqh4uv52VeALhdb0zR9tBeO1EC6/XK7bbyxRcfrIqaJiJehXUhJqErdI9HXazCSSEE1m0jiHA6LfSuHoTUHogHHXonADkm9t6JKYJ2WnW4B8sCqnZqV6IHectqhRADeZqOzAQRew8gBL84r1haa3S1Babw5jSGoMpWu5W4Djd1L6VSjJ+Vd+PU772RUmaaJ3qzDMc+0wLv2yqiqxIk0NWuK6ZALQZvpCURiKgotTXaXphO9lD2fSPniSSB0zwRQ2TvhVobMQRSytAbI08JDsuV0uhajmxERCyb9w1gSaEvQvW9KoHrdeO7n3zLV19HnvIDm9o15ZT84LRyN4gQgm1yBVopiESmnGxdxUjbi8cz8ViQoFtFYRvbNlaM90xKVdn2SoiRNC8QFNFITEKKgmL3rU/Ktq7k65Wy7cTz6ecfENEO5NNy4iW9UEolxkzOndY2C8p1VHXNn2MgCKz7yt52kMgpBnqzZCZKQAKUplxfrrxcX/ilH3xFSonmGzQS7CBuDUKi1U5R5ZwSEgy2sIDQvXy3QJZiIkhHgx0mOcBTzlZpdlub87IQUoLxXEJgipMd3L2ybiuP+QGRCLWgfr/lgFjF4JYUueRpFO50UQIGdFntYBUC2gkhEVs9Mt7JD4LeOzkYbCxA6Z0k932WQiAkgRjIqszz7BVTcCgqojT228Y0zUdWPIqMECIxRxhZt1dGy3IyeLYUJEVLMkWskol2AASHSSwNC/TQkCqAVfQp2T45LzNIIHgShUNDqkqYAoFgCQbYwdybVRGj4vN92HtDYyREO5xitHuasiVrY0+mYIlwcYRCRJgGtK5qVbMIIcgR0wiRqYfjZ94msaOCxO9rCImYLeluTTiFGVFo7cEOhRD54sN7Hh8fqMWSwarVYCLtBI+zMUYWvGoKgmo7Kr5aNl6uL1wuJ4ttMu4FhBTo1XC8HDK1FyIBjUqqpXB7feZ0efAtahh2SpFe+5F59m6ZpgSDNyzbhBgiXaycF/vO9lch3P9lHDQWuUE7pTRiTEiw95Sux8ZDBC0dmQdWagF/3yu1Fuq+kx8eCDFSym6YoNh1xxD8vxdbbCKU2lEt1neJkQSWTbRGKUpKhukqoP493/ZYUGUrBfZ43PDWGqU3Umsotve1dwqdFCKlVvZSqLXy8PBAQFj3yrTM1FZR7xncq4fR/7j3OUTswEzcD1Ar960KujycOV1Odpt7I4dAjpEeggW63tFwPyzVv3OwB+yboDNny2xUOLJ39fK/velR9W7XMOVx2Nv9CdrpPdJD47ScyTmRg22Y0ivrujHNmwfMn/8SEYecJuurpPuGHuV/792zsXHf/FnZfzqCYaVbIiCZ3gybrnWH3undAsi+WzWQpkSojYKQuvccciTESG/NqrUkaG+ELqTJqgXxZyb4vvB6IwTL1BOQzmfL0GNkFktGOh2RRMyZuVcQ2/giQpwSSaJlc63TRd/8ncNkDjeNwwECMQTW9cbr7cZ5WZjydGDNOUaaZ68qQn8DDZatcDp5gPZqQSSg2bLf6bSQQj76ir110uN8oAjBM2RBUa9WrHq1NT0OqZQiJRoMFFOyICVCcJhOBKKMiiYgkln3nRwNhj2uy9GG5FV0V4XoyV7TI1hLsN5d9aAcJdBohI4fUJEo8cjyQ7T7KcESrO4Zuvj3zsEODLVsjd4s9sUQ7ZD0RK4riCd5QkBKPeDgEVvsn20PjiSlRTF4h0iblQdgS4lSiiVd3SClrRSHwTHYs7Tjs3NO3NYb+213WDVRe4UY+eabXyKExF4NEdGuqIBWq9iSGJQnRCREunZSE+V0ufD4cLZyneCnnEDwikCtCdV7t8Uv0BS7SdipiogfIkJyPK73Tm92KuecraEshtWXfQd2YkxcLraBRsas3TZFUGVbCylF8pStwijVm1QDk7asI4aAvb3QarfDIliTPE/5gK9G1imOByPdMwE9fgaBnO4N3JgseIjo0f/IKaHz7LCckFOE3imt0aIHMAnMy3LAQXursFvVlLycXbwR3Honhng08EeFE0SovVP2nSlntFuJPuXMw/lrw8CxzZUGxKFq2LZ1rO0o8GavOJ7uy4A9FLrafUr+swNuatI9g4yePacDP48pEJPjrr2z6RXJsx2Yas3SGDNkC7ABKLXy8+sIQGBZFqYpH/cp5EjYE1vZkdoc/rSAFIJQHV+tQQkhkaM1/hWFZLj2vu2U3nn37on3H54QojWmu9Kb0ppt5Bytgg05MCcjKoyDR1WptbHVxqNDDejI5i3wiDXziIED1+5YFaytoTFRW/UDG1QS8zyh1Rq+PUAekK9nobU1ylb9uYI2g/xEO9agAggksYrj9dOLPed3iRDtugnBoJdgcIgkIXRQPzRQmOYJkUBrlek8M+XZ+ndxZnpDWlDttGaJWM4TMSeDN5sdfskDfQiGIKQUDb4Odi/avntVInZ4q5L0nkQ0VWKAlCPnMFt/NFhAD8GCuTWMxdZvs30bnEgzDqaRnOY3GTxdaMGIBuIxIvh7iUPuMSXL8H2/xRAJUVANSPakqXU0KAQhBEtMrbKLoA26oSv9aOPIcT2DpDPy7fFKiCV5qkhXgxa979F6h97Z6+5wl1dER2/rXuXv206I0Ei2T7q1FdI0HfA6CLtWJzp4TxKgG4yGKq+vL6Tzsjie75miw09TyqQ82EnqG8C+YK2NEOWAMMRPRxll7ujuK2grhDkflUjv/cgoxyYZmfvYZCklW+i3m512tdN6Z8qZIvbz67ozLzPRM4au40oM20/JN1mrRM8qx7MYgX2aJgZdorZGrZXr9Xo/uEI4mFIhWqAZ2fxokneHz3rvlHWltEYKgdPDA4I12NU/+3w+27X4qhjQWGuNIJ6hOcsghHAE3BQj5My+74QYWW9XNoX3X35pC0siGo1ooKqErvQQrEfUoQdjNIgKOWYIRlwTUTITt+2Vqta3STl7o8oyIBzuKL15tmx9lQFPgFBrNShyiUiEvTb2fSVOiVgTeyho71yvV87nM2m6N/p+v9c4QFXxewOosq+7B4FA2VZv5nfLsrRBM2bc2AQpeFZGp7ZCr5VdApMkuozGojXrWhdySBbMwx2y7KgnDxZo9loP7HoElYHj23LqR0CwnlQ3OHFg/r7egx8y3dlhju1ZhRBtu2mz75+SIJPdl15sjWkQrtuN3BtTnq0n1IVlnvnqq68OzH8wYoxZZNskSDDoNIxr9XWIJTzx4cyyzICtncvpRIjZiQGW/cKCKsTRcI+2fgMe/Lp6cA2U3okoSayiCctyMGmInVYLrfXj2ccoft3RkiyxIDd6BaMCGgE2J2PZAajHn+N7w8GEExGoFbodNsch7s8sJgFJR9VSq/UpoyTvZVvFBtZ8ziGhYveoNIgSPR4o2Q/FgKDxDr+9va7m6xSPTm2UA77WXl9fOJ/O9ll+sMx9ggyESFlXQJlSZrCtFEvMY57IS0Zro2k70AEVS4p7qwbqxUjtBk927HAKCtu+8Xp9JUkXz+67n07GXqqt+QkZjv7EnUEUrEkWBh9N0FZBHL8PnZwiGgOtW+9iPEztdhOW0+loZo1sedwYgFo2vv/4iV/5lV9h34stxpSQnHn59luWU2VK8WBbaL8fEjFYf2LbN1pTLueTMXK0H80o8QfUS/HKwk7SEK0RPlhd6lnkwZDqG9M0ee/CKGaMrCAlYu+G83sGUEuxDN4bgOpZ54C2DJYzVljO4aDdNe+bWHZj5fy+7zy9e0fZi7W7/cDtfv2JADFQuxJ6h2gHbCCQstEy991KyhDjEQiv2856faH2ztPTOx4fLsySkSWyroVGJ2C9qEkM52zqkFspTFNmztkxVHvOp9NC8sDZamPdC9O2Ukr5hQ4J8MZjN4qgJSGdecrUHdZ15SfffcuHp3cs59OxjohWJY5g07VRtdG7sdB6MvaTiNgB6usq+j2049zgq7H2W7dGaIwBrY2H08myYg+gloU1S448WZFgScVtL2zryod374n+LFElh4jFRLEGtdzfK/halJGCeg9hrEERIeVMqZVeO2tpxJyIGqiqRAmcl5l13djWnSTWI1BsjQ4aOaiTFcKxBzpKfrhwmpcjgZnyhOTMnCeSRAjWYLe9Zz3IFAzKQpxG6dBbBLoI0aGyEISUJnIU77VaDzCmmaBWURs19V4xjAR0HLx3XN8+f3K4tjkaEKJwu66Gu8dIK8bcHL/bWnN6rByBNcVIygEJ3gQPd+ZaFEFptK7kmAle+YPBTN0roCkZYrHvzkpylmhXX1VvvsfBeDoSTe9BqvehRFguF77wJKX37msUtnUj5MgkVs0LQrNsmxpsLYVkkHDcxp6we7W3SiJ5cqJoq0iMiOrRqFbtNE/Wf/Dl16Te+9H86ePi5plt349D4bOXY8KibzImEfamBGm2kVTZSyWnRFwWyr6TspeGAqgcVLWDAuhle/XsOqWJy/lMb40ppwPmuTg9rtXCVgrLshzZgPaxscWDMNSyU9vENE9HVtdaG2Ga1jzjCIFt3YkhkdP9sAohUJs1n3volFqZ59ngBV+0XZV5Muy3O8e8FGsGl1rZ1pV5njmdTgwNScrJMnU17UnzhTAyDCslFQnxWGDzYht3nqfjgFBVaim2gFIituAHTiZJoGfL/lWNyQSQnQEh3eCkOWWYTmytEETY9sLz9sqjPqF0gkRELDmwzDCDVmO0CAf7pamibafWxum0sLeGlpU8XSjF9Cf3eu7nv86XMz/4+mt++KMfcXu5Yedvgmjsi4fLA8vZmnRMmX3fXN8ix6EiDGamYckldIPBmmVUg5F2u61c5gUm78WM3kKwzwxq+pIafKPja+xI/GyzjnWjXhFOMZNOVhmLWI8miUMlop4Kd3vWg6EXgwfMdlA7x1pUGjHYZ+eUmXLj+fkTAKflRNXGHCIyZZbzibbCx0+fmOeZd09PR4Ay2Mx7HDG6TkoorRJximmwClZCIIVkxIxpclgaVDqBTKIanBYE7XcqKs2g3JwS3fU+A+I9EiThoA4nJrLg72UQy1tdwcFaxAI3QRAiMdgVJOmEEE1vMs/WjxGrynPXo9JI3nMZdNSUM6dlAcFYiDFbfw5jbVlSKUg3VEJRZ0p55S4mtsgOK9r6sGerYJXhQFr8d8ZeH6QCg4Q7te5GdMmZqMpyPtFr47atlFqppR19GFolJaMsN20QAlOI7NvOtq2GVJzPzDmxbSvX1yspBx4eHi0O5IgmO6SaVxVlK4jCcj6hpaLRemr02ggpMomw+uGQp+xwhfUgxpdPIjQ1HDckR2JFSE5VHSCbdmXbNvI0gQu1Suks80QYpSAOE+nbKiUeYrand+8wFs2GSOR0shPwcrmwrptlXmPRd6PCqZ1jni0Epnk+WE+j5xBDoLbOXqzzL+EerI2UYAsgiVDeLMxpmphyfpONRFKMVP8eBhPZ7+Zk13J7vfLy8on3794fGzJP2Wh/rTn9NRCSCelatyw2RhOzjYXWdbBMPt80YFTU+WRof3WmA34fomON9I6kRNZOb9U2cgoEDSwxsTw8oGrsqOfnZ7btxlJm5sWYELV6U5/mWYlhotO82Gks4nwRWxLaoJfO8+1q8OFyYlvX4/D8RV4pJR6f3vHy8sp2MTHhuhd7rsvM6bwYfu4Mp3XbySkdTDH1rL43tT5a6CRr23vGbYya9Xbj+nplXhZOMdoGb+pVZ0ebNUWrtuNUsObtvdIMTku26+kEsQqk1so8T9zWjVqtitEpWnbq7LHmiENMyTZscWjADx7rBdoznb0CVGc6PZxOBvO4gCzGaHRWsQx3niek8xnEEWJgva3c1psJIF0AZnCT9fCmPFnPIEVyiKiKJTbWibXv6mywpoGgltBEF/UZKc8q6PEsjqRTHcdPFtAtWFq7P6T4Rq8kx4HQWjMYOQTSlI/+BmAMx95Jeba9L8I0KOfVnrs2+13UhITVEyx1yrut4SEQdU0I3lju3SG/uzgY8X15kCccTcD6pYqxGa2qhKRQBmzvz+ltZSEi9NCptaAeb4aGQoISEGot7G1HW2M6zRgl2H4yKGg3mnbOE2cnSsQpOXtRWE4z8zSBCoqJVox9FpinhQCkkIwB2RqfXl9poiRr90FSW5Qp2Mly0NByQkUJtZJECJL4eH1lW2+keeHx8cHhGWPUhGQshxA6ezEsep5MPW2BzzZpc673wPfhzoA6egjjxku0Q6j3Qx19WuZDuNdb8yDtWKjfuiF0k+OBW8C3nkX0stFKveQPbS+VcnvmdDpzOp9s8cdotDM//Uutd7YW0JupQYfCeQBfXTuXhwvzshyNu2mayDEdfZge7KCVbtTTKNYDUh3NdHGWhanWU84HldJUx7DMM92DtMFTTs0zigWgaARp9juW+tp/dzkNGUGjZVg5RE7z6WjeG0XPmsPSBaEZ+613wLJdeqchJFWSZFSMLrhtm6nF952pTNxuK09Pd/bJz3x51RS9gd0dRurNnh/q2h6HLc7LfMB122bVYvR1k3NCW6NqJYdIEoHYCQ3XbwhTtqDVdTQxbQMN2irN2HptHAZOh7R7lEkxIZ6NWifH1PAxJlKsCMbvb310t0w1Li7kGwdBVSVLPKDqwSZSuolInYnW/Nmc55mWEnurTkiwbLdVO5DneTLlrXIkflGE87zc9RweuB4v1jdDcbZbRIN9L7hDJUP7MJ5TlEBwcaKqa08cZRh6hjvIhWlP5K7HCk4NuvdqDKox6DiTUCbrBJOT9Sq6r15Vu3dW2QaidG//GPSkVdGUTHPS7JDKclw6IP4cXITYlZD9kFJxkTF3jdHoMyguPDb6t8USO4wqto4ihhIoSqIZPA2fwU4jVokI58vFCT+mbWimpuR8vgBCfn3huq5GdZehLjdtTQmmF2o067dGMV1NqcwxI8tCadY/RMOxb5PH5V6rISI5U0rh4eFCB1IX8SxAqHVHxaTee6tUGqma/F9iQNShpH3nuu08Omd5NL5NJNbog5oGBxsHlOTNleDQRGumyLQMFdv0R6bm9DKE8/l0NPgAWi3sLocfi7v3Qq3FP8MWyaCqjswf7BpHXDHoZ/DRrSIq643btrEsJ0zU04xO+qZUHBDbeI9SGj/+4e/w7t07vvj6K/Ztp/ZG2SuXy5lpcgl8tABWHXsEq+KsErKDM/s9jTGiLvyRiDfz2wEzjewvSqB2u0Z1YRZYD6iPDSSjYdUNTxZTeAaNILaxREwfsO2VmCdO2Q6yXgoi04GTjo3cu0Wcrp2g4TgUt2pK1rZ3cg6cTiagK90Oitv1xsvLK4+PlyNT/FmvkBLZA9mgJI6mZNN24OiqJggbOKMx8gJrKfRSmNOD0V23QovK6WQwZcqRBxGuYVQD4sHQsshRHaC4PqPYgS0W9HI2PUWQCCi16rH2UkqkbIlIzumAOUTfUncHpCsW2ERIKRw9kNHjCDG4a4BDGKpo8P6ZCrd19UNopgBl36BXCCYYnWL05qmvfg8MyXU11luakClxva3kaWIZ1UdvdlhGjr1pZ/EbaIk7gQXFdAdeNGj0JFCC63Zci0OnihgJRo3wkFNAgt3L2A2zTxIsyfHMWSR6I9v2Tc6JpsGtKAKlWkUfvacVAv58oIXqNjIQ/dCptVp1HxKtKQRlfpNtDoErzjwcFGL8u+poY417i0ODokaFbS5Ow6BPO7DvgsSRvL6Fmu2WG11/9MgeHw0WnJL1D32loq2zq0ItTtUFiUJp3Xtxhgr12hwxMaaWBzQTT7fG3it0Yz2KAMEqxBRQWu+UVlm33U5ub0IO7UFpDdFOCFbmPT09kWcT1Rz6CcExf4MsJFuDB1cX1tYIrRGHyMiDwKDFjhvU3UrCmjVKSv4Q/X1arcZv7405JVo1Gl5MCWn3UtCqDznsOVo3LUQ7msWjGeQHkkNdT+/f8+AVw1CED0hslKCj6R0cg0w5QYzWE3C8uWw7ilJcGV5qJ3TzScqj8R2CWY40y/innDzQFLoTCkIQL78BDY5NeC9HhKLOypgmK9cB1UCnOZyFfzcjFaBKyAYBNjWBpKQI3ZgVrVVSNuWp+IJtqkR1+wO3BRHpIEaPRewzgt/vFIIlFhKNlWMniOHg+8627VzOp0NM+bNeMUamZOK6h8dHWmus/sxTjHQR2yDjUAlC78a2ad2sM0oyb7LelCUbfCYdO+TENynBoRGBNlDT7n5BgwJrf59SRD37jCESpjutkUEh90NAvboVZwVZPNFDW9G9ATqe53AQGLipqB5iv+R7YFfbF/RK26tpILI134m24bfban2sKXGeZiQ6scChQenG4HoLedRa2dcNFhOgRocVtavpmQb99Ahod7uJOxowMmUPkq4zSP79hyeYCJZpx8qw7ZBQ3QlB/L6oPw49LCuis/MEYV2NjDJlE8oO5lVInRCTZfyCOQc4bASBFHBRXUCk8nq7mrNDVmrrTMt0PM9xb1D1/pZXHaoHaxEdVFYL5hKcPBEEulfo2ghqgky5L9fPNFL3pCkgoY+8FLPisUQ7pIjEaKiNkxkanb7t7Fs59DUaTHsyLROhKbUY5dqQm8ZeICVT229lNdYYgdt6QwSW5UwPHaypn9y3BqYYicnsMlqtEBw6UgCh1B1VIeTM+XyhNbOcQO/4bfdDBjgyfWtEJwbVbKgwYZTMQ9lrN2rfh+GeZd4jyLdm/iQpJ7JYL2HdCyJwWqaDWithYOZuctWs/Eo507ftoKAOM0Lt/a5qZjCZ6iGyesuqsOZRsZ6DQ1Q5Jb755htSSiYIwhlV3bxmeu+cTjOofcfmwqwwWZ4QHQJTZ0FIMN3HYJf15sEqRqJCCkJVUzKP37+cT8QQ2PZq/lZvKp1g8ZAIB6vJgRtah9Bc05EyYL4uRbo3s4Q5W4amOhgiVqr6bXOIobPX5o3lyOTfbfydJVlCce8h86/5+YfEkamGQAfm8wJYZUbrB7tO/L2MtttJ04TUikgjqrGOijYTG3arIA1VsUagBN6sH6PHgjUdjdpqmPABE3gCoypmfTIybPewuh8w3r/onWEaKYzKNfj9e/OFFdatkGM31XIIiFuOIEZzzt7r7r0dnkpJTK8wBJPLMtFbZ5qNoqm9oMEqTzOFDKQ38NH1euX55Zlv5Bu+uXwD6uZ+IVnS4N8tffbMPLHrw+jPVLtjzVVHO1N4myWHo2+n2tm7kV1SMlM9BltR7dn0bnoJkWTBPtwx/GmaqK2608Ew+bSGvrNi/LT3prqOaxh2FTuqgdP5bCZ3yLG+7VFYPKi1gTq7yKucUdGOmGUJRqfUdpiAhuA+WbXTesUdi4x0EINX+xwoB9xhqJzzm/9uqUWHu20OFqeSBF72ld46p/PZDgNnKdHufk4jflU6dKsuhWQtBDdCbNL49OkTl/OZdAn0DpoCScXk2d0z7AP/9KZnaY2qhRTspOmifjgE8Cw2RNi21Rq7KVH2nbLvRwMxiDfpPONPwah6dqH329CcRTRPRu9rHaOF+kKOIRAmyxCtWciRdXQdknJTQ7ZeEW8ugwXS9fXV4JY8Hb2NUe4bf/x+cA2LC3XMmRCOhvDwoOq9OVVOj7+rZWfdyhG0d1X22w2dZubTcmQkVs3YtRs+bu9T/Z6IQwDDX2pdNx7OZz+ALUMam7O1xlaqPehaTE2MElMmwiGWCtkM04wLXa3k96wOx4jLeuN53zhfFub5RGvVLByaspZCnObD20oEGqZNkBgprRPj6Cnd3VZVgabsZaPWhb3sZtQ2T/y818DpBzRSigm+eq1srbCN/pA646Yb5/4tKy96/6LsGyEnpmk+nDdBPMOygNIV0OYWKHEYANuzrY15nmlqFWKKE3FAgjJgAqNTuiuZVVpYQLT3dgxa5CAQjKBjMCacnR7etVP24nRToQVlkmgnF9AlEByGlBCdBu4sNMXWlAREO7s2skZ6UKD5Ur+7la7bZvonTwZqa+z7zilGmijZ7//vxtHHP1uPL6G5uX+UkLyhKw7FWbKVjn6DVSbxSNbE+wO2rg3iCiJOXXcIW0e0EFLKpjr2w2GcC4wK6c2elhAY/kYAGuz3RSqkYQUSSFM4MvgYouuLumXv3Hub4769XaNWKXoPT5s3oMVdC6yyGvRrxLJ4OSqwNwuNO5T3lgkVVGkxcn54YNo3R2g80csjSRBK2Swj9MR6ULynmKFUggpxnhEXLKPWc5pS4t3jI8vpfFwj2knqZnxmn2vl6J0903l+fuG2vjJNiS8/fE3o4g2cfhzU23XlN//Rb/P+3RNffvX13UobjgqhNSVicFGeJmLvhy3AaNIeeL9nj4PlgB8o0T10BpVzKJNh/GwkpOyB3YUqYhTG2hpbKfSuXILZB3SHjMYXCXBULDGaSrz6aa7d6HxBzLZAe6dshZD1MPqLIfB6Wym3K7iKfCiqa61M3CGDsYDtAP1danDupevwKaplp/aZJU4YOmXOoAMSaK1Ry8YyZTQlo7vVxnlZzOy1N+ND+3cJGkiSiY5NajMLj9fbjdv1hXmeeHqY2DdxkzX8nlsTTkIwAVgzLcB8Dq6fubPBRlN33906JVgw2NfV7Y1//kuCsCwLy3JiSq9cR5DwhCY6ESKKJSOldfPLivcAeByowZqQSjfKt3ajtwbr/xzJR0heNYzGt5u+RWvo1WAeZritRkp4v8Az4GQpoyn+xWE7UykfVUQ3NwJDQrxJDhaI3rRqilPJh3jMOyeWbIQ4mmsH2WPg1yIGPRlcBBPJ4VJjv4gaE048AL1/9453T08s00SpO/un1RKYKSO9kePJIEO5wyIHDdX3hCEsxtyyPpdl7HpssaHW1oO0kvPbvexsMt8Hg020joo5xuNg0qM6GIFfxoI57uXbnpeqkRxyikcGj9jYgN7NKl6lERWrxkYC7BXsuHYjjODN63AII0Wt5xrjOMjupqcpBJpYVyISPPFQaGb9EiV8Rg0fse+uyrbKvzfbw+fTiT0It9tKDEbikd2ucZqNEvvd9z8xn69puR/kIdGCU+nEKNgRT678c/JpYSi3gyhNIY3SGb/xEu+Zey0bIQUuD49kLwd7GA/J4IWHy5mb2BfI0XyOQgj2PqOx7MFwXY27+/D4YP0OXyja3Lvcm93Da+ZtpjJKptEb0G6aBdTpon6oaTf77pgS+7ZRN6OHBomY5b99t20rzHMmpvwZS2WoYwcL63CaHeK4Wg8hXdVuKkhfcJ+en83XZ5qopRyZ1+QeRLUafjyqjre6iO6w1/gsVT2smkWwHlA0P396BQ3EaDbenY7um0EBfhDMKdrhEwJhNN7FNoCtE2OMDZiltIo05f3lzMNp5jSdqdWOeUUIQVnidLCKrDRPhAAvtyu9mmCr1srL6wsPl0dUI63tlNLJWam1U+qOzQaoDP45b4LiT3udlxMvp/l+H5sdPD/57lsupxNxMjaVdv1dDcuI6IA34ewN8FEZer/RAvFkvl5DTGdJktkxvO1PIdGqM7+2VgvP643H09lddzsegj6DUYepnoEYuH2Nd7jcoNKSpX7oCXrtJkrMbtDYB+zgFYMfEEE5RHYWUBRUSBJpA7axzINkvIbPhFN0x9lFKL0ztcanTx9BhafHR1977rfEyHLNiiKo4ftDaZyC+H50ybjI+MP38mAxec/zqBRHxSXAbu/tENLpdEbld1O/FY4nIWZmJyOr/9yxdTyLGBMaut8zeybt+FmDtmgN0YbEZP2CAY8N2NQhqEBkEA7Qu2kpYkLY6Kyn1ivGfLM4V4PZ+wz4UIKptMUFvHpHk45E5/gu3ZKbkfClFNn3zrQsnxkPBpTb9cbj5eJWPnqs1+waEOvz2Loe6xTt5JAodQfJtiyNBCTHDRzVRHDP/Jwnnh4feXx4YJ6nQ5UN1ogadM+Hy4U/8kf+CE/v3zMG4LRmlYJVf0OBaq9t3U3t7Jleyu4v0+0BjoPi7WMeGfZdj3AfMjJ6HbjNx3CSlBAozXoL4pAXYtjqaZ7Yi1E0g2+QON7zuGm2MILcPeWt12GrPud8WHMI+GClzOV8Pr7DEOwwtoPfPwl39ae6YtM+365hyvmg5dbayWnifL54Kf/5S4djqx8qAGlZyCnRaqNLpEs0TxmJaDDTulJtY3RVmliDNKfMw/nC1iu3bT3gEIMHBl48AqFtmvP5RMwTEGml8Pr6erBAkpMdLFhXtnXzwSob6/X6cw8IsAPyvJyIOXFeTtYo7x1pelSHIURKN4+rQRsWlMHcMlVvQoP1vLT1z+YAHPeyOVQXA9ftxvcvr3c81+dAxBgPv6fX9canb7/zZ/pGE9BtENJ126ilGq5tmNRxL0O04F+HHcVhGHfvXR2Dug7G1b36VWcs7XvldrNs25qs9/0jfg51+pGEWLC39UuHzQknI1krpTDNp4OOmd2OvLXGertZEheNrdXFBKPm0Wb+TbZLBr36noVbLyC4u7Me1/r2ZT/rzL141y2MhPNzurz/sv8hP2UxfQ7b3IejHdYeb3okIMYN8YBrFbzdmz7k+eP5eqUxTLvGAS2MpNWeWQjpfmDoPeaNOS+OzB39Fnv23ezLw10JP2LCMDyNEsnJ4k8Q4bQsLLNRwEPOnM6n+/Axj68jFoMcz0SQI+aWarDeNA2PLqtI0zHFLYA4z9myZtCmXkKPrENtETuLImo4MuZlWZy3bw9zv5qP0bwsXC4XUjC1dG/NKoDxUFQIIRN8ENDb13GKjrLvzQIZi+/tQJDb7cpt301IguGk58uFXipl23m4PFjDUyHOMyNEjMqAEKyp2g2DrLWaUV0Qszmv1RXaBjcJo9pX6J3lZLoKO818etxYPGLNrQGtJQZeb/CA9g5T9qV+b8C3spNiOIR0XTuo6VmqmjJYCQwHXSurrTJs3YDE7llmdby598GcEMRjVxJBQ7Ln6/e51YLMJ0KwASWtVVAnN7gNi80qSY7hC2FeOJ+sz1FaQcTZbR2iY/n7vrPVwl4rp4FJ/oxXCIHT6cTjwyPbtrqISnl4erSg5FqIXq1CGRtrrB2Oj1AznnNbkFoL6H2qYWcwWIxhdLuuxBTY5xPDUTZhsIUG6O6k+fT+/ZEEiZimJsZIija9rWsnRfE+1qgWbNaB1mrJxZThjbpXtTPl5FToASF5pRACrm50KKy9mfPQjwCXUmQthdfrjfk0MTucM0RlUQIrjb0WgswmFrzdUDWK8LatlGVhr+UQhzbvHYRoVFaD4yBPni178FQxrt1Igu+Zvficis8x+Lcv6xVgJASEOKqGn7ZO1CtkfXvafP7ew2frSPC4wytTSJTuDEknEoQQjTlYdjrlOEhiSt6Q78d3064H5DauIHjVLghRldG+yDEda1O1IjGYCnzcM1W02vPpXr2O7zVi3YjFIhBy4iTCtm9ugChHXHy4PLBtK/Hkc2TQu6NztypOusUOo1X7vBr/ffUYEWMgfcbVjSY+6b3ZhZhdKREbgzjFSO1yt5DwG15KYV6sKdubYYuSswVcP81idGGNy/zBWT210Utxap59gxQdAnKsdcBfnz141btK1KuGIJHX12ezmfZsL0jkddv4+P1H/uCv/gFrWrZC6Hd78EENa9Vk94gcn9n2jZ4SKWeen5+ptfA+fnGMBxwqbsPF7aCKbh98TjZlrLdm1h5+6h/F8vj9ENBaKet6jI7U1pEY2F5fqb0flhwGVQSU4eRowc+CiWGmrQ67lLsGwnx+zEojyBuGCYaJ1ybUspvjLsKcJ/ohprOs4uV6ZZpnLunE67qSfe7DgGJyPhHELUcEa8ppgW7jJ1UT+7by3XfK48MD23KiP3aHHX72y4SKkeg9pvP5jGY74YrajJDz6UzLlb14Y1ctkA6IEXG7h2A00bUUQoA5mlI3BVPPW4YoXB4fjEXnh0LsgSDJNr0b752mBLNl6qNh3rsa4SJGpNqzz9Nky9uhxDFiU+AQM4UQmLyZH5BjUJQBRhBVII4ZLIFOIEjj9HA5MkywYBFyoqrS1SrV02S6kN7vNiUiwsmHU6maSZyImPK/VVShXCqtFMo0k1PkspyPiY6W4EWWZXK3aKzvFWzt3Xfsz8gEftd/dhTGq4z7wfL7JhJy/8WjOhn/yQ8CCeLqeg8xR2VsYOrofdDvIs/Wqjtiu9jP9yp9wErBzVQ8mVOsvzOMIYe2xG/CgXAgrlcwi5VB3hn0asn3PhPcD9fxXQaEDSCtUZMwOSxfFTRawvv49I5TPSGoTb3zQ0u8r2SMQ6OR79uOBDMSteVmIt3WzG0jHQwZv5Dof+KOkL014jSR3ZpYtVkJExKEwLZtn52iBrF0zqcTp+WNMbSfTkNAFDxDqq2bEChEa+KhTNPMvu+WhR8P3FgSNjSo3oOpv19rjdP5ZJvM8UFbaGYwV0vh0+urqVO1kYKg2Tx/h6IZv2m12XecvXwbjbaBiQt3C4WjucSYB5w5X85HRjX6HH7zjsMIQHxxamt8/Pgd19vGF+8/8Pj0aJll60hOpMF8gbvoMNxZD1FAQ6CsPtrSKxJVZZlPEIzt9baROgJnFwtIij2TKWXPDo3MIGKKcgmYG6VY8xqBlAUT7ok3Sa35vddiVD+1DZRdiDmw7xSSqUZvt+NZ/rzXAaNNM1PK7CGhk83+Hsyg6PRT2Xa6RCMWRJ/21k1rMkRIDU96qtKjwS8hBCZvGkYJJij0BZgkQA506ezFqOBTtD5Gq83pm0Nopk5/DQdk0sWamBLNftoEnxFCtKYvdm1BoTZT1h9VqtuqaDIPqS4myMKpxeL/PkbYHu/dO/O8IG1U3G8SLXENjHuX1ZFETNNBU75cHhAxeG6eiqn6nbppzWXTiViGOw6IN5+hnzfhf9HX7+4n/EK/83MU/G/f0yAW/MTwJnlMIM3kFIcbrbP0kM/6EtEPiAEfNhTtzdaQWCDueLNZjUQjMTgNXCEEo5nv1SpJQMRcBBJmnmoH1pgrc4fMxj+PVwJKSkR/5iL3GeEpRY8194PGWBxqBxWgIdO0Y6XA/XAbQmYEUsAcX5fTyW5CLcZOSJnn11euz5/4pW++sWagG+SJCDEbR3lMXTN/nGEeZlhWjOL8f3U0JNwhGr8JoSvLMqOtU8ob2qxXCQPDK7UyTa7b8Aen7nM0mFGDJltKobnGYZpmHh4vpBzpZecmhv0+X68s82yb0QN4HA1qMT7/qFb2fSPmiXfv3lE9ExTu2HMIkXWzkZrn08k2B5Ha7DAYwjsJY/azKZnRTsoTn56f+fjpmSSBeTbcMapgjis2FrS3doxCVITkgjnEqJBaG4g6nh69f4TBQTg9Dr1bUqv5GZmIrpo9dExIV2tqHRi4UrQzkciuW5EonHQmYYs4iAmXSimmkg12bVEs87df62x1NUV8gm3bKdvGz68h8A1rMNeUffCNO7IO6ERFDvU6QWitcisruSVncClFgWYGf0G7DzNqQEUY80PkWK+1qvnuzHeNyOvrldt24/L44H0oa5AP0eBdHBfQEBkFci/lGAcro1JlVN73QwTPoC1PK9RSWZaZHs1l9dPthSlmmy+OBYScbU8Mq/iX68rTwwWi0dan8+xBwg5SVfOwMqjZKPASI7d9J4s5LqQ8kdLE86fno7vwxYcvvMeu1G772/odhgrAnWgywvE/7uuzvsI/5q9bEB3NcQ4I56ceOnL/h1EpIzjMMhiGzq4Uh9n6EBbeg676/iIYSiCqdB14vyVeY360rUKbyCkKeTItSduMzhrdXsPih73eUvMPVtmbhnaI0SxGeuJabrbncyR1E+Q2DfSyoU0d7nRrHd8/te521okxMFUVCd5DFSilEQ6RW2uUbeX1dkO9BCr7xo+//55ahrgtWgYfo1Uab3FSx0HHZuq9GY2wNzzBO/5OFW43C6opBfKUELFDZgTqGKNTffsRbEdjaPQnRkY7HCPBsxnfhDYEpSJBOJ1PxHmykxIxocmAUsBP/XvP48AOvTIa2f84GFTNFyomL9XzzGU50zsHPj7e235uKGLt59dt47bufPftt0xT5pe/+WV+6ZtvOC1nRM0tVJs9xNv1lb0U+85Oo2zO3QaO6kr8gBg7wA4vY5QljLcu48vC4S0fsIwE70GNHxARiHiW7eVwNFbXddtY992yJDoxmhf/2xgh2JCqVquJ3mq1TF9Hc7Ad9/XnvaZp4vHx0WGnxLzYcBxxexJR87/fy34cUjISkZhAhlo6I64EVjXNS8zZ8eXOtu1v1oCPq/Rgoao8v77wel3NWwlzGbjTuA12NHFeul/8myrnCKLhLhTd29ALOeSbIvu20ltjnm1ITG2V2srBfe/g68MzRbHnLSnZ6E2vGofP1Pg++HPea6F4UDiuS01UZwe/MxxrY7+tZrfje8HM5zG68dAeCEf1PijDg5Dxj//yiP2P8TpEm3r/91LKZ0K1n/5RdiiPflEYcCwcDCtjwjlINRIlGcLIIc4zZGQkyPfDx50KBBCDv8WhsRDHWl54fHgixGhjEVqj++jatEwu2vzcDPBtjGq9+z62dRaDmRaaj5XceyxroayFkcQMT7acvO+FJRI2VsE+7/r6QmhqtroRm0OACDfvI7x7euJXvvnGpsp1cxGVGA9f85CTwS6uLl3XzWAWF8sohqHlNNF7hQONV2KyByFhNFP1jVgHg1QYugIX6+h9cHgtxRlUd1weOMQpt9uVj999x77vx5S6lAzLPp9PPL5/dzf/e1POdYvyxwjU55dXwObZggWFOnyjfPPXZhTKmBOqJr65enYwHm58ExSmPPP48EDZN/7hb/yG8djnCQmBve4UF8S1WpjnE+/evSeEwMePH7m9XI9Gewzymd3zuu303lmmfASEmJOJv9RYO12VHJPNFwaHsJStNYcxbKEc1Ny9U3UInZS2V+pebAzoXvj+00c+vbxAU1JMNqkLjE8urkjulonGtxbNpVJrPVyHf5HXsizkPLPMC+ZAagnFd58+8un6SgObrRECUzjxcHokxUSrBjk2VdcB3ZXGrTWnhBqvPwabtzCo0K1Wnl9fjwpjnmfev3t36CvAFfRvglXplabGoDJqZT0q4l48MHtPKYZo8yRCOMZ5fvr4Pb/xD3+D19dXUvTRst10OkvO1jRW0ya8f/+O2SmqIua7czlfPEEJ3h8YQdT7jzh89ybYDGjjtq62pty3KqbI+fGJoW8YvcLarfJvpdn3fPMIjaF3p8D//+Ol3exs7GCyGDPcj+9xZ/ww7jfnh6Pj9TbeOHwWjEX0s3vnoL2jFtHtve/C3PEyW5J4mDmKN/LH2Teo39OUjbHnB0yMiZDMWiP5kKforLDo1xQdeh6fJmoswuVsc9RDNE3S+A45ZlPmYz5O3pRCUmA4KRxITJL7PfJEOIhjaK/rFVDyPPnsVGFOmS/efzh4+2OiTArBmjLOBrETy77wwJfVqwwNwm29sa4bY/xnzpHTaTEzqjceQTAYP/3QJQzfIVXl+Xrl0+sVcFMv7x20vRwZANjBcrutfHx5sQaxawdO02QNRBGurzf2fTv6CoMyaKeyY7giSLdsTsbAoCCHXXhvDdR6FWZbrnZYTJllyb7xyqGtGOVcFKMcL6cTv/LNN+NWAjb2c9t3wy3nmdNp4XQ6Wd+k2VxtE95x2B37ZA6WeeI0zzaop5lHvDo2GmPwGQmjMWhQ3fCuak6DtCzDDrbaGhULFHaAWIYYVDjNM3memOeFZV5Q7dz2jfW60cVmIgzRl6mg62FBUNabkx8seP+is6+TN6xv68r1ej2MIj89v/I7P/ydQ4ugauybvMxOFjDGTA4Gw5nq3sdThuizr+/zsz0/NKy+Vcq+MTxMH85nHs6n4z6OvoE4XjG8ABrukwTH/WwNMxzsg1ptVcM0JRNFKpTaIEbSPEGMlFYdn3bngGl2q/t6bGxD2NyVVsd0M99Tas1q1e6cfTlgqewzp2MwwkMX5csvP3BeFmez4cygyvV2ZV1XI6LUgrZOFFMkX33WwRBh/aKH/j/Wz7350VrrEZjH3wWHvQfDS8RiRJrMD+2z96PTvbd6XIMFIRgN7dGzG4eDP1/7cQv2io0XHfPoh3vt0Vge6IkMyZoch8M4mAZJQkRIkjidToY8HNdhl5fC/d9VMF0Ldp15yua9JIkppcNeacpjdjcESUiysbRtrz5/2n4/xXjE3/GK0YSCNmskBG7bxrauSE48PZysUSP2heJeSSnbwvPyxppm5pgpwWiQMk2Gizpe37s1t0Uy6/bKvm9cHi7HiWzPw7xNktvz1mKl0Gd+Tr27WVtzCqpRXE/ziRwbL9dX9taRm/Hj99bIIfCDr74iTZlSKmXbSJPN4W21sa6rNQclu73CGO+ovL68cAuBDx8+ICLM5/NBL2ytUWtH8htH2taORWHQg71P9gW7NrURxP77giA5oSinaeH0g5NBLiKGFQrH592tUoxi+P7piRCCGS46q8oO9Ihuxcy/ghFop3m2aqwWiIkkEyEOGMCgkaadoObf0oMQsR7CVjsxdceZxSdbWZajdGqpRqOLwvvHRwjCrs2DmS9qsQDbgEikbp1JvaF3urD4Oti21SqjXyBYSBDmZWaa5oOSGOPG5Xxi31YfqxttgwXvMyX3DWqN1gNTNH8jMza3XScYrGKld7JJas4AOS9362yrfIpZqZ8sS1/mxTJxwedDDAorR4+IbodxCNhaplszORlzKgYTAw4I7vF84csPX5iTq1pVK73TS8fM/QFtZgwZrfpQ/3zUR+H6Iff9x4+oKu+e3jEsQqKYLUMQ60tc3Yzy8XxhWU6g8PLywqKVvDzZnOUSeH55IeeJaVnQ5j5dEmw0bO82Z5oxC+JnP0uDbR3fl9/7d+PPO2XU+jfPz8/85b/8l2lt51/4F/47vH//4b4+xp+/C8P3d+UOo97V2sc+E7mvBwlEFw132+QGMbdOD5Gc3d9JcVuQ+6GiISDdYyRGQf/sIPLLGH0NUYGuRwySZmyqLpHgvYEa7u+hXTkUW2/RDzBvLc2+rzshTMxTp1SraLfvdz59/MT79+84LzPFPcGs+LLEYlRIe6vW5wSSID7IY6LuhbLtNv9AovPIrTQ+6JdvHmR3ZoT5q1gnfwh2cp78ZxrneeZ8mjlcL1VRDfRmGXaeklNWzTMJtYZPTve5C9I702xzcZMPksdVn6UU1uKnbG20GHj3/okcAttuE+zenU4ECdReUDh0B2P5yICFotsK+CIa/PBeq8+RsEx7TJs7fGfG+2APLfkDzJOpogUXFXqTSDB2lQY7rbd1Zd+N225VTKC3wpiaJxKIkxkBjqZ8KaY5eHx6JC+zZbzBlJ0pJiREeozmCuk9nIaSvNlf9p2ebDDNnKcDT04SDutyDYLWxmvfmfPEknxm7rDfcMgC7xuZl1CnxvtGH+NWb9uNnBemaTINQgw8PT1S/fv8Iq8UAl98+IAofPr4PRIij4+PPqzFFntD6GV3GxIL9mvZaV1NdS2C9MDByROD54LxFK1i9mBxOtkhcH195cfff0dvjQ9ffsnZRZRmeBdRrB9XvZqxz/CBOQ4l9GaDsFrr9NDYrzspZVP++/q/bRtBDVpbTiertFCE7qaJ4m4FlpsOBo4FsQbD8dRhkuW8ENTHjY6AiB1WCqzbzrfffsvT0yOP758Mhgree0TppRFyQCVwvV6Z0sTJB1zl2WixQrBMd0TgX+BluiE5BF9gCeH333/P7XazirPsfPnlV1wuF377t3+Lv/t3/w5/+k//B/yF/9t/wnq78q/9a/8z/ui/8q98Vsn97qrhZ36+Q9vHAeGv9qZHM6oUs2zpxtLzWDdgZGUkvp7mC4f7r9l0eP9Uh8fXXRiMV6OBQAzYLGoU4v2agio9Ru9z2jWqRXT7e8/Mqiebycoac4KUGdWNVhvn04k5JXfwFkLpR98ohEjZrmzbzuPjk40gYEwJTZkYM999/MQPf/g7/OHljzAnpUXLrlqrJHHvmN7N9kEGNdIOie72hgNO0tEgHdjdNIErPv0JHVkCaspVQnDaZ6Q437z6kJ3VJ5rNDhd1bcfJe17mY/F37UyXBQnC6+vrMUYzRRtQEnqwMkxNGBdiZJ4mdp+WFmPk6enJRgfK3dAM1aO6STGyrsYjz9mtStSsiXuzKiUEGxIiISCD7hqCCdZUaYzGIJ51KikHUrsPd+nNsPOybbYZY/DegDBNia6NPDm1VJsPgXGascMeCV8wvoirurXIqCR6Ix72Aka9ExWINuWtbJvh8qMpLDfk8mhVTMADD9RiGbAQkQDX5xfqvHO6nLHSvJn5G52cfSobhbrt3G5X1u3Gcj7/QpvbkX3SZHTUFCM1RvZSWbyJXWtxwaeYFQvW+A1esquPLu1iumCJBsds+0aowZp+KdIb9BjQLmwol4cH5tPCspijr4ZAi9wPSElEh9E0NGOPRRs7U8vdQiY52aHVesxYsQmQypwiyWeK9K5uBW8W2TlA7daQh7vVuE3D82a7GESlzeCky3K+V2mq3F6v1H3ji69/QHS34fcf3vN4udDbCHZq8+NTdOKCVZiSEiEFPxyT4/iR2qtx71FT9XubdxzCjsoD4eg3IvD9x2/5T/7sn+dv/1d/h47yt//2f8Xf+lt/k+fnZxcE7vzgm1/ii/df8Fv/4O/zW//oh/RaeP/0hLTCX/krf5H/8R/9o3eoRA6S+M95jQPlp5xpI9n1DL13q5gG6cR2k6/1o52NV2lywFCIQcJxzHOxGaa2BvFDBhyTOmoco46L9c6sT9p8rDQuWDWoydzG7lWXiJJjoCGg5oJrtuRujhgzDw+RbTcyEVGIXWh99ycmRt8/Z787xpxq2n3GdQi8f3pi3XYTjXDHqI9gnZLNS8BPVpQ8DLdq9cAO0zTTmzct/a5bECmHhfg4AbsO2T5Hcw6RI3sX/9kYgnHEnRabojlH0pQabVPfbjc72d1b6Pr6zA9/+CP+8B/5r5FTYrutlBg5Lwua7v0M4NBGqK8awyGrV0j2+dlX06gOMtkx64YndoQgrLdXyl55WM6MOfZvG18hiDVTW0dpBGx+d4yJ0xJ9mpwP+0iRvVcb+RncNRKO4U45ZtIlmVuA/16tlTicaeP9gOhik7tQWPeNHM18MKZsow+jZVQhyjFzYPKMUfZimHdv7K2wxOUYjtR7p3SHbqLRAT++PCMv8Acm61l0bwom1fvgnmtjyhO3deXl9ZWnh/eE/POtw3NMLPPM6+vV5hGnREzRx8Pi1WA+BtmUZgnHaVlQtYGQjDI7A47XE82e4vWjjZo9v3tEg6KlU3o3WqiIZXidw/NHmx24MWQfYm8CrBSgBev3ldKc8n3PV1vrdgC0wiTJIKicuMwTISSr+lR9IJg5p9bW2OrGKVjfTw4DP/cgcprs6/Vq410dwhqkDO3Wc0xviA05Z6Z5JqRkOg1vkBp0haELfechXqyPiE3Ok2aHakyRMCC9N0H489j7BoNQg5j+wd//B/yv/s0/xn/2f//LvLx8ZNuMDWmeW5ZF11r57d/+HcZsi5wS79490nplLYX/4r/46/yNv/Ff8k/+k//NN/f2bUv387pCPvu33+cleFV7twO/T8gc7rJ3e23GMeGB3nOyo1IZX9uqP3dFOCpYTxLtF4gBStWjd2j29F6RSEB9CJXAMXtE1cwFG+owo/mNISDdPfBqIsZqTfReEXW3DCqvL69M08QyT8zzTC3WQ22YuDIqhC6WBU7LzB/65W/M0iJFajV/pWUwgDzrb93oUepTjg6qWfUgr3ZTuzbPZzu9W6Y+rCx+95yJnNNh1TVGhA6V8sD8Y7Tg3wZtr43MBcA47wZvWnY4z2c+vP9wKJxbM1O/2g0CizE69DBWkx6H15imN1xZwX2VvA8wfKPw61pvG9999y2vrzdOpwvny+W4X9nhKDBl+vX1xu6KVpwuKx2D30TsIZWKSjB9g9Nnmw9qssTYDsNBb5TWzIKjqxuJDdaX0dkaA8dUmjRiEiQ5e6Iblh09N+oe/E7TzJJn5jQ4+X5/XMGdkgWkfR9wgV1vR/nw7j3v3n8w3YUObPguAmpqJITqszmuzy+8vD4zBtT8rNe0zJwfHpinbD0tr/Smyaq6gTfTlG1bqcMXy9lft+1GyhP5zVyEoPYcz6eFp3dPVFF6q2jtvLy88PLx+W4U2O7Y9vcfP/Ldd9/RBm2xd2PggdlSdJvieCsbt7L5EhsZqr3HHL1qlUQgOH5swSVaCWHfsXeu1yv7zRKGkIaIzSbfSbeq6vX1hd/6zd+klHIEtaZ6kDHmaXaigV3bEfx6P7RBGu70SFqnbHbtpRRu68p2vdHVKbHVZhbImOfg0dLElMNG5k3mHoRvv/0J/+b/8o/x5//8n6XuG7Va8tR7P9hCrTd3iU3M82xOyyLUvdKcIfYP/t7f41//n/8v+Jt/42+w7/uhq2pt3F9587+f/hqN73GIHlRT7g3wEYxr2Xl5eeHT8yfW2+2A4kcS+LYnMijHgmX5ZnL4+XWMXkJM0RnR3pOMuIbnPhWvdqsI7n4N3goX60EO88fjs936P/kzz9PsIw4ikqCJjTmdzzOzIxO92UFiCWVyUouSerASJwZzDW1lp9ad1+sVUeV0OjG7eMOU1uGYt2DNuHBYfw9KYivGzjmdzzY6MmejJzpuqv57iDgne0JSJBTLhoaflA7cSvUI9iG4h749DpoaJBYdXqjNvFbmeWJevj5QyhA9QxqNHrHGYUfuzU15owrHDPvUq5aOWRbvpXA+n83V9nYjxsDr6yderytffviC919+xbreaM7QkhCOmRbGSjJ9BqrWwFbjvNshq4cgR0QM+vHvPu5DG6V+iCZYU6N9RrFmWRbYujFiWqvsdbf7czE/q1DtHge1EaaKEntEQ6eqaSK6KNWGKDP44LU1+r7boTFYE9UbXb74DXOPPJ7PtrFwUzUZG+5+UFwWw/pv+458emY+feR8uRDC7zXd+92vGALz+Ww9plaPsZ5VMbFcCLyuVz59euZyuRCmidI7z5+eEQnkkMmTVaJbtV6LdDO0nFL2menQ6Oz7RqvWzCPa4aPBqomy76z7Tp4mTnk2UCUmOq6Qxkr+KU8sg4ygLtAjcA5CzNHHWipl2/jxtz/h6cFsu03vYx9b1e5ZyzYAp1UlpOymcGMKXGPJE199/RVLno4E621jdrBv7k3hYPRRzJAxev9Pezdrj9bIs9mWtFZpLbKVjYUTZrXSAbeS8MMNcVU++sajyddIb/zxf+d/z5/7c3/mgKQBo9MOTcMRB/VIKMf77L3T6OSYKVL5f/7ar/Gv/k//J/z3/qV/mX/un/tv8YNf+oaf/PhHvHv3jv/6f+OfOEw4Re4Hwlt19r0vqUcVHtPdgPDt//bd6f36phE9gKK37CAZ/6fcx9DdeybjsBhmh6O/gZiVecP6EHZgBWLEbHrEIM2m1foUvvc69wPoaJKrkILQuwklU1TWAfsJhG7sLxEhEWilUnrzuSDisdkOXYObVOwE6crr7ZVPHz9xu93IKfHoGP2glmlIzmwwJ9ERxJrjqgLs1YRgOWdz3vQvTQi8vr4SQuTycDFY6PmZW0q8f//eYZR+VA7Jg0q1b2+K3hih22yIEdBLbSxO/dy3nWGHMGicEoIxFILZlS/zfFc+94bhmOE4CBV1ha9x1EvZiSHy/OkjikEXVZW9FJJmHh7ecT4/cbmcjXbaG70a46T5NWvvdlh6kzGmxMuLQyY58/Hj9zw8PHLKkw1rLwXJyawscEjkTTbeUbP1xawZuq/Jqt2eF3e47+XlhTxl5pT54bffotWarylPIM3YTd0cVdu+c7lcKHVDPMNuXTktJ+LlgRTdB6kZb99mSEDXdjTotXfv4QQkm9NkG+K15GNbsw0uohTi5eIDWH724TBeIUZSCLx7/47b7WrQmEMo48DYNmuGn5bFmrnrjXmerf/la7WL+/YEOeZpqGf4DaNFv//iA63YNatYf2VUgF999ZUFW1UkZkKwYXl134x5lbPLztzArZgwdF4mgijb7vRfx7rNULJwyxtnH9kbvbMZxUbUDq5+KbtX0mJrSGyWRsyZH/zgBwfce4dJzHByzGEOMma2+LjcZv5gIdpIYIAeIgRhdq1NEJyK3dFW6c2mu10WY9KpwyIiweZa6+/Nnv/f/6+/xv/5//R/dKJAcvGeVTCD1g4+dc4dHgaFXRXkjYXELInLZeG3fuc3+eP/zr9l0GmceH35SEoT/+J/91/kX/6X/vv80//sP8Mv//Ivk9L8e5x/x34SfeOL5OswSDiC/6DYnp2KnCRRd2MU4r+jMnovv9/rDT32zYF6oHHdEjhaPZLC+3Xa79ZQCRW3Y5GjGjFfKe7VHGBDxTqBxtr3Aw8r+2YaCb+eoTXL2ckjagmmyd0CqXczYdhWa1ROeWJazLv/cFh1nvvuc1JTyuZUiH3o7x6iIyFQ9h1RY9C03pkmYzst8+xlqqtMY2SZJ/OsOXoUZn5lDW7xjNEW7pQz1+vK9dUC37vHR5scFcwrPTm7ZkwJG46xpXcCkWnyhzrwRREQW5y17HS1HoU1Ga2CWtedecqcLjYHY86Z0prpF96IkcBEfvtekTkQ3U65+IE2T5PNn9C711BwGGs8ME0BOSZlCTSzSVm3HQ3mBkuwkZk9GLbYxQJsR+2AEUWCu83mzOO7J2QIe0rjd378Y5bzmS/mieKNEztkjF1zOp8ZYxoVw5DP+UTpjXXfXeQz1rZl1YplU9nFX93FeUGE4XIpEg9NSnB6bcHuR2uVvWycpp9fSeRsxolTziAWXIZjaGvjMI2UUp3pFVmWE6dpoddmg6lasWDjGXaPSi/VMv8QmaxRZNVO2+h1N+jKGXX3DDPasCrxuQ77yrYXJuCUs01yHMyWCNknsVVVaOYia2NvG9M88wd/9Vcts3e4c54mfw7W+Iop2SHh6uJ1XRERJofQNm3Gqoq+2XEYQ++K68H/9yhuMKbYEKYuED2QNpS6bjRg6Z1pPrGx24S+00SMBoPu+048nQ8sfGDt9zbEHdL6k3/yT/KT775lnmdutytNOznPEBpl2yzeePERR7NP7/CbiPWZUgikOTPPJ2KE19ahNl6v3xOAl+dv+dP//r/Pn/5T/xf+0B/5w/yz/8w/zT//3/7n+R/8D/9H/OAHv/J5MeDXPHRW5qTbGDTXEYwHxVoQ9lIoZeO0nAjD7cHBbjkCvL5BuuTzP8c9wphFeBKNmp79rc06GJRea6E6A9J6Q3fXCPXYeRw6iCvHA92hrGmKtCZcrzupmxYnqfUgSy0m5hz9qzrQZSElsWEi8zxB70xTZp4/HHYDOZqkO+aM7oXvv/+er7/8yszphvHYgePZQfHw8ICezxZkS7HBON4YjimRvK/RVbmcz8zTbErYoQsAbq9GkYxpYk6ZHoz5kVNij2IHWcxICuS4UIrBAstyQmKgd2umj6zk3pswvcXpfEZH2efQUWud0/lEzmaZLcA8sgsRm/3qXk8phCPTEXvSBnk0m908qhDAh81YL2aKkd1pvvM8Hw30d+/eWbYi0WGBeDTyNQRODw+sbuOcMJ+da7lxPp9JYQy7ET8M+nFoqSoxDspe4pd+6ZeP7Kg1PWAqaWYt8u6DewLtKwKkaSIRbd54VdZS2fYr8zRxWmZsYpu4O2ozR2evBLtyiBWXaT6sjgcZYt93Uk+eLQbGsPefhSED7hdmM4+X5cK+F/ZtZ7vdHNqCh8sD05R8AzvUIp0evHEtgjRx5oiQVGBMDZRgfZrR70iJ19uVjx8/0bTzcLnw+PBEU4OJCGK9l5CYltlzEDkCTPBAl6Y3A2B6t/nR02SkhN4hZlIQ9nXld370Y07zzNdff8WYnVCVY4rY4RDQGtteSO8m5ni3eh8ahBFAbHiMWTWYEE0PamfMNuZ2ePvg8xD2bTMjxr0wfXhn66JVUlfW65WgENOTsaK0kxjsvM9f44n++q//Pf7if/afGoW97Kx+KFgvweEVZ6CBeyLFMV+9EodpZBcISt0a15dP5HlBdwvqS7J4tixnptxZ68o//Pt/n9/+zd/kP/sL/wm//ut/l3/9j/2v7Xd+n8r1QGzeNKsHRO1dBusd5Pv8EBzOulcS3iP4PT2R32dtjy+t4gO0+rFfAkZ0GPBPUCM6hGCHf/FkOATby+roUMd6ZN3JFDkb9VV8r/Vug+ViCPQYHa0IHLRY10cFExp1Uog8XB6MhRIsWw4hWJPMs/HTlPnw7j15aAzEGqDWi+jsnjEzhvGJMWjGyM4xQU5SIubINNuks05jTGvbt80cS92nHjAMPZg6tmrjfHngqw9fslzODjl1G7zy+kp540rbOwcmmXNmypmcrBF2UHJ9Aezryu324gdAPOT54/tv28anTx8tUPvBqA611aNhZpjqMs+HSeC6rubymYzFFKbM7PoP4e5DNZgRIyNorbF3U7fWUsyXxd+ztIpgthyWLar3Bg1uwZv3SKNR2VazbN/3naKND+8/8PD4YAEyjcVlCduYFpZiIM/Z7EEMMGSeJ/bbyrc/+hEvz5+O3lH3/sdggQDW/IxDCOVZqZMOBnXTkJ3Evlf2vVBKO0r9n/kK4bBJmabsdhI3tjeOmVNKnE8XFyaq4/o++GVsSAGiWxz0dpTwikHJtSsaIq+rNVfPlwsKPH965ttvv7Wmda3HBEAVm9EwzTPiIiuDPu1AG81Q1LJHmxVu905UiThkm7NZbYTPmTpH+BlkDhHyPPNwubDME6+3Gy8vL9bk9KbpSFzG5LmXlxf+4W/+Js/PttYHnv02qUA5hh6hyuXxwZCArujeKFvh9fXGXiu9GlW17eWz5/95/LMr/9N/6k/xO7/1W9RWeX5+sQ3aga7MebL7ge2pFCBmo1SLVgNPkzlFSxh2GsqrD0Fq2qi9MBroNnVRyHE59Bj72vhzf/bP8tf/+l/97H6O17GngzHkYkzekwuuZzERp6g5tuY8MXAwe9aWxt+tQf4xXjoOpXuSdTCkHG2IITCfZqMDDX8s/5nD9qXfe6H+tqQYWJaTOVccMTkfsUY9vtdqCE8pBvkZ4tMJUSGrEIoboqldFME8cJLPJaY10jxzfng0Ob5L2rMkQopH88X6FD60uzWa+7/Mp4VpmiFA9WldWaJn5mLlN1Y+rzfDk1OK5geEewep2nsjR5A1AZ+V5cv5Ql6839Ca002tNBs2yvds1ktvD/jnx0fO50df7LgVuv1MDIH1drOZstEy9eYmaGmMOvXriW8221Dwii+eoELE+Nba7VAdrrPPnz7y6dOLDX5Rs8Bo+8667sbcoJPmmSnbnOWcI4+Xp6PRLwSjCUYrWWstKDbDeHLZfg5meKgxuFe+ZWvi9EzVzr5txCBM88wcTQm83lY+vXwy8V4tLA8XLk/v/AC9L9DkCl7tnar9mOWbszVzB6V6BJLeLIju+86PfvRjPn38xHbbjr7A7/sSV5c61bfshV7qceCrKsVJEvTuRoa2gW7OglFLkexaQiBkm8wo9vBwHt7BQppOZ75494EvP3zJ07t3xyhSEev5vL6+mkBRu/X3nI02sBPzO7M12J01kxxC0WZeSBaYO1OM/MrXP+DD+/cmanQmYQS01KOxue8+QCslC+C9G3XVe3bFE4aczPbDKrhCAG7rar/vgeVtktNaQ2tlWRYeHi5Go/R7VXvhtl/NaypEijsl7Ptu4tjef098FOD54/f8mf/wP2TbC7eXVyNpqBCDuy+rkuNElnjcW+iE3gi92r3rK+bMagLgIMHt+zMpRKOCajvYYtqt6Z/jRIqJdTU/t7/8l/7SEUDfnhR3Ud3n/7Ng7dPmDP05etY2EXI0jO8eZ+P97nfgZ7/ulibwtrkNeHNbj+TG5sjf13r0Ht1IwAYT1H45UL0Ca70jwQ634HbgrTVC77ZGnGYtosRk7td7K2b+OQ4EO8nivSPfG1XNN0ajU/e0MeeJlDO1m7dPkGi9Br2zb4KIszaiPWB17LrD62qW0XmayX7ym5d95v17Cz51L0Zp104tZq/Qy36fInecoPYzQYR5cZW3BDuUittdRLGGu08SG02h0SQH63WklHh5eWEvO6fzQqhm043Au/fvKLtTeR0qG9XP0FlEr8DWvdCaHTaSB53QGEJ4OZj9d5IIecncfufK7WZCvIfHd0YMyJlLDDZ+kEDp9fi8HDIpR0SFrRV6q8Q0ISnQVzuYY0i0Xgz/FGN4pGRiRPPydBdZb9i21thLMWuLmFjLjehGeqo2R/np6R0xiA1s8mxuWCdYE9u22Rg5Y86a6k25fGSsOSZEK70XejO8dVtXtn0jL5Nt1t8HeRI3ogz+We8eH5nm2UdrmgbC5i6YiE4Qogb2sh+DnmwT25rS4cVFt6ytmX/X0ICclpMFihh5PJ15FSBmHi5nSil8+v57G8a0LFZBaT9GgUpwvbTa/R6MlgFbD8h9qPAlCGXdmHI+HH/xKtIcWGFy80ntnby4zUhXzucHTh22fafXRp4DtXVSwE04lYeHBx4uDyY89aFbe737QE3J5sSUUsghMM2LkUB68IBkVjshJJ6fX7nebnz5xZdMk7Gpop3gv+eZ/ad/4S/w1/7aXztcmePYw4iN163FWDtO8xRgDom9rwYpTxPVp0qCHt5GMQllv5LnmdQNRrGJjkqeM7VUUhdKgN4r63bjb/6X/x/KdiXPP0XA+TPj+bCdMZq5OgHAYkKndzmC9tHz+Gn420953X/ufsLe3wOg2wwcdTPKEMm9Hupws0eyX4gx2gROJ0WEYMPeFjDvN7XBYIOgg1cuum3s+269vgjTZLqYpMEzKhGCT1hRhKDutF8KKVmQK8UywRJtiM/eqg/7OR89CROgVhiuqb35UB+fNAdczifDq2uliQ/TiZGoQo8m7rqVK6fTidbNM/92q3StBhnlfDRokrNvgljWfl03Srnx7vGRnk2UpG2wmAx3TepMnm4znU1jIWgzltTwUAlBTFupgTDbPemtEXz+dMA4/+NEVzg8W9pgGvROWVdjh/jmCzEyzTOpNeKUSHHil3/ll/n06cUyT+1Ejey1kaZ0wHnVrbZjSuzaiN11rDHTW6FjvlUsi2WOCEnUhI40NESHqcx5dN93wuLiLUwDA8YHF2B9Xbk8PXFxWETDWLiWWWmvB6Q38Ns22tlyF/4AxJCMHRPeeNk4ZRlRkmRu25V1vZm453z6fTds7x3xTHotG9PlYve8Vl5edl5eX7lcLjYcxisFFZhPJ6aYqPvO7urz4JqgGAP4WtU3cx8mjKAxrE1ijFyWkw2ZSYbjPjw93ecptGZ23Z7EhH5notjcouB7Nx7mhqb7k2MK49qcJi3iVWtzuqJVJFspfPz4iUueCOdI18qwuam+boeYcN838hieE8Q1HCboHOr7Y7CWH/ZDHxSwLL9779FcAjrzcgKEveywK/XJoKbWqh2yvMXmYdtW/sSf+BO8Xq9cTpM3aJWgNm0bn4LHYGuFRI5GMonMSBQkKr0aYUM8NrXWCZtZ0HQ1dk4XqzBj7MbGyJ19rQiRHBNTFn7rt3+D7777CT/45hdT+d+D9oC57DAfB+cI6N3t1Ad89wueD8dr6InS0RuzyqLUSi0NDVb5BxdpWtWIrRO9Vy/Wd23GgCR6zOu0lNAulNJIMfP09EArjdu6sd9uNkM7OuGjt+P607iwUvY3o/FsHmqUQb9UujqvF29grjt7c7opHIs6T5M5QnYrC1upiDcPtTdknjilia1sB+bWeqeVguRMb1b6Tim502ED6cQkzHE5sngLgkqIM+LS9dqFKdpwpFp2swPRyrbvzC5qK/vOPE3M3gs4nCAl2aZwRtPWK0ncIbOaD7zGhM4m6ClqNro43TNGm21cSmFZFjuZPaA9X8259v379yYSErMBN6dFjKM/Lbx7F9ncYnynULaVEE8oRt0dzffq9Mha+8EkUlX2XpA8Gb7pAk+VSMomvAtiWUXzPo6ESG0KWthuK8u8GM4KhN54eveOOWd6ELRamZvFxDkovK439n3ni/fvSdn1Ba2RpjHAJxA9AVEwlk8xHUuhEnpkrzuCsG03vv8E75/esbiLLj4C09akwxhdUTcYnOeJ948PXK8rL3txA8bCdz/5lhAiX3/1JW2PxyE+BdfcxoD4OqNb03VyOaGKsNdKKRvTtJBygn2nJ7PcGGIzg/QaGiPv3z0yTjRt3YW17qkkd/pir/2ogE2E2m1tH/0KO9CmKbO33WdHGITTjiCtbK83czQe1FPjulCqjRqNeSJKMOKGm0oGMShkb/Y+OSZvYCf3WTOybu0NejAVdTKRlTbQUAh5YrjRtlrNlfdyIqVM2So52twQTWKmk9ih8+f+o/8rf/bP/EdW6XuQo5vfm5tU4FPJbBAOOw/vHwElL27HrjZ7plb3wQIOV/rW6FFBMqE3Wt3RLkiv9NZtMmKr1Ai0wE9+8hP+3t/9O/zgmz/4cw+Ft/qD1se8OV+TDoE3tUqwdVM4300Jx7Pmpx8Yvy9HwxmH3SDLUipoQDr0MCApbNSu6r2akLuwD8RnYqhDSDYxERciShJinxBpxFrJp9kq9Np4fXlB5tlgTFVCQOjVGpvj8porYVV8JrUYpbX3TkiJHowxdD5feHz3SA+BbTfsvA5GDkAwyET94IkxEtS41WPqU9B7c617id57J04TrRReXl9Zb1cre/yks6D4ZpUMHnVvxMnGh4acvQw3vC3lTJgyKSc31rLlmUMkR4dOQjTfdRFaHY0gpfXKx48f+fjyQq+dPE9ErMmuendwzJNNTRvskYHzGhNoobvHTymecYR4zDGWIG41bLbrrexenVmzdeg2sjuHhhSpVJ9F2/j+u+/40Q//EaW6tYoO/YJlskkicWCWwWChGBPreuNHP/wRP/n2J+7tZJYstcOUE3QTToYpeHaXETG88uX1yrc/+ZZ9N8V9a5112/j46ZmXlxfzRvImdSm7wX0Cwwq+lGI06VJIEnk4n63BeLtxe35hsEMO/Ypan8aUwuYObCNybVZGa43z+cwf+NU/xNPju+OgelvJCwZFmQjQXIin7KNwHYrsqqyl8PzyyRp5u+lHjG0WKD5zoTUb+am1sr6+Wn8siNmxjOCCNYKN1lqprbKXwrpu1gsZQss3wWKMyIwxscwnltN5FM4IcHq8EFPk9eWVUos9k2Bq/Y+vV9bbDbOC75zOFxNIeTUaZNhOGD/+dV0ppR+sKVWrulLKnrHer+uoxv0Z7mVn8/7i3gqvry9cr1dzHvZrXa83/u3/3b9F6+a2HFI8YDWJ9wAMEESJooh01q0A1ouopTrUOwYC2TwN/H3ynCh7t56IQK+d3nYkR85zZp6dNBKEbd/Z1sKv//qv/9SgfTSAm1Vw4/oGnPr20BCRgyE5tAmjrzOC9V3c9ns/DqCWu8Hh29ddtW0D1ZZl9mFvFvbUAzrclddv53dM82SQkrr+SDgU2NOY2zOa3GI2/DmZb5Mp77nD6R3rkOd5tkHwfTT7HD7wjZ1jNO90b0xaSRvcT8sCMdwbwWZl3XyD9AOzv5Yb1+dn8rwwT8aSatXGZ1a/WbVWNz8zJtIuwbDyEKxB4xlWdlhgK4VtW608jba5tm3jJz/+ESLwg1/6JdzswMVC7qYo7lEzHkz0FNw56l3EewlWMscQKLWaaCdneimHvxNqDA0y7KVwvV7J00QSMcdZgefnV/IUWeaE4iV3CK6OtvsQnBWU1B9aShYUvbGZpswy+XwJd6QMySaSSdt5fb3y9G4iuddZC+YDZbCCMLnupGg9qsfHhwcTP8bktLhyuNuqWCYTCRAivVRiMCXol+/fcz4ZXl96pe/F7AqA5XJhu2303KheGRIjvRreb46mBn1s6+riqcbz6yvTNDNpJ+/FqNfVcNSmhikb5VK9HwU0oYsx0HLMXC4Xz8LN9CxopLVywChVjSIoQcg6OOm2njtONvB+gIh767u7Wq2FtVQup4nsDJHrtvL8/MLX88Rt2wkE5tPZoJ3e6U7rFIk0GnXb6V14eLShMmYhb8G390rV5oNkAqtXA2FaLAj5vazA6+1K6Z33qpyceTM5Ht475GmypG40PWN406ztlFYot8Lj+QRd2faNFIO5vhLRbjBG3TZzM/VZMUkn9l5YXy15uy03G3QzTYh2kGjZSRT+4z/75/m1/8d/bs6xI8MVGxhrC9TNDbFGvkE5CXEGTwgGuwjCPGWuL/tnMVfGTJLWXZUckRRQSQS1fqJg9h5lN/fd55dP/K2/+f89KPfjdWgcRtBXt+rgbtUxLPr7EfwH3GSsJhF7njHGQ87qV/p7Kwfx5PDNNXx+SNi8jtGQD4bXWk9RlOjiN2OkNXozK5aUbRjS0fASzNEXDldp7R3NpoWLqly3zSp9bMzA5O8dREjDAGywH7oqIUciZt2w7qtNTUuJSQSqUQaaeYMz7DVSznbTx4IcJ9jIlt70LOI0mbpS7QGkaSJL4OV2RdWtlhWmlG0DTVBb8MlRbtvRGj05pVA6t9sz339fEOCXvvkG7Z0pZTv8/H1KqYRg9DVrjym1dar4A1MxHDs21m0n1uI9i8y7Lz4QJPpci3JknGPB9lZpOfuozwASbDiR08sEmMb86piozRZVCqaULWVj31aWaSGFRAv1yEYPN9Fog9JbtwNtmMnV1tjWG1VBgmkauiguW7Cpc62yzMuxCMd41cvpREcppdKb9YzmZXHrDQuggvVw5tmsTw4+O4HzcrKqqCl7t7W0nM7knFjXlSGgzDmzb4Xv1xspCA+XBx4eHu7ZvirX1xeur8+c5kzMibpXluXsrqmdWna2Uh26aT7WViCKqd33wr6WQ+MDDTS5BiOaornsaDDoTHpHYzRXYcQN0qzH8+HpHT0GMoGGTUFMWdhuG0ueWJaTKY8xiOHp3SMxJnpdKTRyrWY2KYNnr+6ya5tQwnDfdQtqnMGCkkKilI0ike169arT1OPNK6tlWfjywwf2befl+YUYEzlnHh4f+fT997z0zvsv3hvM9wavVocn9t0Os/PDgojy/PxCKRuXy8n7GsF0I63xcruRU+bdPBNjouyV5+dnY8QFofqUwTRlZ9B1eoT99ZV/+4//b6m1cDnP4FPQmveqGt0SEDHdUSAQcvSJlRaAt7WQfYAO3Wa6m9NxoNdCZCKGwM5ujMkcPTNvxroEQlRmjPGjmvj+40f+6l/9z/n48SPvP3z4LDCP14hrIy589uqWcnbBNTJ6HMwpGYX8OIBGCTgqSu5ViDCGXN1f6geT/XNj6ACCtwWIaiaIGghNLXYB27bRWrd55248aIJioxn3cej551iCY4aCIU/odaXWnc2HpFW6oRGoqV9M2OZNuz7m9Aq1t4PSeASqYJh/q5U4peNUAhf6jFOqd8KUDJd78wBCCDxcLtYg2XdXlWZq8OHwEpliOg6g3ho5RGYffHS9XkkinB4u4PhmnGfm+UytzwwrjpwSv/zL31gGrbbqzEURd+sUn45mPQ9Ddc0HKcdMTWYSVnojE5jSdDThRY2yOLlVuIqwlkopz7x7ekeINocAEdR59CLCw+lCw7LiAYNsjq+nnNhWW1HmH+/3C0HS5GZ+TrGsFWEiZMsUYgx88eWX5k+VMrX2I3FpHT5++y2gnL5ejoyn1saUzAJZVMFL0yHYaS0QFlN69l0Ooz6R0Twzv/kxq2CYxT08PrLkzFoKOWfmaSZOBqnNOUE8G9aq94EvMdqQE7aVy+WBUhtl39HajNSQTddSy+5NS6PqdhdhSbfAerk88f33v8leNp4e3zEsCrqKsbd6d4ze4MHS2t1zzLU6IsYAIZioqHL32NnKxt4bT9Nke98WvA0hmhZa7zw8XFhv27HmLfFyFl3rEKyfum83kmSmefLJgu4u6pm2sYjgdD5zfb2y3Vbm0dtTywhtIpn778TItt748Xff8/LyzIcvv/jsYLhDJsZeLLWg3SbtNU8yYrLhVmPGs6oFnXdPT8f1DGFZAOZlIsd4n97WbdTtbd14dzrzH/+5/4C/9mu/xmnJ4IaBQ1QLw7LGnk3EGEwW39rhs2aCNfHGvFHdr9tKUJjmha6Nve4kH6gkarFqr4V5TjZxMARj/W1WzYU88xv/8Lf5R7/1W58dEp9Haz84RobngV613yGmt3ASeDJzh+P8hPpMcDz6MOrY5++28RBL+y2x1OB73VaGWavY/rNjsB8V85QjLUaDNbvVaeLIB920X+rPtJZC9GFVyX8nT4mCMnvPsrklFyGQpPfDIhinTqUQwSer5Wm2silEpx4qkgNLXuzOqR5BELewHvBIiDaXuoHNLhhwTTDbX0nJSv94dznNbrqHKHXfYdDlYiA4j7+IsFweTO3qJniXhwvn8+nNw1D2ZiKq0u3PEBO0RlUbvTg20WAEdO1ID7QsFuB7o+3GCoopmSpWLYMP3eCaeV7MjGtgzm51HPw9r9crj48PnjnZnDYDoK3hLNrNJoLA5fEBuuHrs5hOY8eYCibvjagzF2pSTjGitbHkmZoyIY5JVXpkExJtTkcSoWhnUsvmsltalF4JvgAHH7zWFWLgHCK9NnQoOr1Fani0LTS8fxdj4hQikgOFZtVMC0gSQhM6jS6Bc5pQJoOzmmsa3J44icGd274T1xsP5wfqvpuqW8xQ71ars3h8WhgdSULfK6qVaYrkuHC7vTobbqE7nGnuxOJsKusd7dU0Ja8vr3z95VfkxSCV5vYWtvTs8A0tkM+RKZmHUgqRl/VKa8pjXjxZige2W9Ura99f3ZOM15cr33/8SAiRd49PvHv/CNqdbTdxzEVQYZ4S0zRTazdvpmAisRgjrUVKXVmW09FTu5xOvH96ZDmdrCoSMRuX7ti1R7GHy8NRTQL84Ksvue43xkCcuhdzW5iMbRMlsN5WalZOpwsPT0+k7C60rR3Dg4onB6/Pn/h3/w//LnvrTPPwR7JkDe2uY0pAJ4sQp2hxJ7SjOd5rR5MN44nuYKqtMe2JJoYISIhoPwhTdiBkiBqp7gSr3ouKwWj5qQU+PX/H3/s7f5t/4p/6p+4nwpuAbbdF7v/yBjkKmCur/dV9SqVddKETCQoFo6Uba9a9tJxl9nt71vrm/4dEy3zdenWmUbckvmNz6guKVH+/OEGr5lzRfB5Fd7seMfis++jjkSiMpC8lE+k1lxtMy0zd92O8dJIYD36tiDClREjZOuIIOdpwDVqjhQDBKV5iAjorlT1Ta9XsHELkdDkjXdAo1K2YO+s0HU3eqpatTWIWwARrqrRW7ca/PTXVYJAYEl+8f09pDe2NKoodQeGzEz0ka7iWfaMPtXdKRrENxqBoqneXTFcoLnm2TU23sjGYiVfvHXGNhXRztuxNaHsjzta8eni80N1iwzzg5RC2hJBs5gAG4RQRG/Ua4qFPaXXM6bZAvZdKqRvpvGBeQe7jL8GGQV1fWBazmS6ho7VDcwsIwbIx7LD64v0Hamvs24pOk1Vm2dStpTTmJL7ZbeM9nC6oez+9bCu1FM6XCzEIu1abpidejYupcweGWnsxyA6s39PUs3QTnhXUPIswewBVtVK5G5W6bIVr3kgxUVImTDOhNRisLKddj9nPAKUYa01EeLycaArrT35iB3+sxsLLJ9zNwXRBBJZ8ooWdUirremOrOxMLYD2L4AcgYZTvwYWjd2vo7LMg7LRMiCp5Wszk0cwTrOTo9eDwp8nMCaMGn9FgPSJwNkw3HN16FNZwzUc/QbzBamt3ni1Z0642E8C9r2zegf1sUIs+Yz0OVKWLsG6bHQJzZvaKevQIGP0/oEnn0+uVxwchxAdLVEo3eMkFlXutxJyYp8Rf/It/gb/0a3/FPODEFPdNjWHVqlVzASFqJy2ZOYhrSrq5sbsHV8cMPGcRQq/0bhVJCDOgtGL9w2nOoJXWC7oZlbbv1Qz4wOHfiJZK6ZG6N7777rvPAvTbsD1mSL/966MZ7XHJfxCV4NBQc4puo6oQg1NZo40Vtl6u7YND9X98gB5V2kjWRr9j2PIPxGa4Lku/Izdl23GzBP8mPsws2rx1/MCUQdVGQaweETHyzZQNAk290qLYdwpKOmheTrPLeabTkdqQnK3doaDR80hV+pusaFBoxxfFiyORaHvDVbY2HOZudpfEsoiODVzHH8C6Fcfto/nJeLDV3qnBdBHRP9c8+7s3y4Tg79/b3eYD9YUvBhHdbps1cJ2yqtpN2Cdy9CkgMHpJMQZ67UeDWUZAz5FTWDxwG9yhMZmhYbWpdahNB+vNZ0GESJZAEqUar8yzECGmzKhVA3DbV9ay83iazdabjrTGEMW1qpStOjtGyCFTtJJ0eM4Ht/61DYMY+cCMFLtv0YF1KkmE6myzHqE3C7wpRArGSW+9mwjOg08pY0CMJQzGYirsarBASGn0Kg+Yi2Z4eHeIramJLxN272KMtLJR6mSQSDO6bsqTsaIc6w3VmrghBoPZpslswvNEL8V0Eq6uTzmzrRu1VS4Xs+rovbuNxsTlDPC1ky/MK2eIAlUg+MQ909uMasq2Wp4nNLpyNRmRIziUWbXRyw5ETvPEFCM5RU6L6Yp2n1YnwQ6bYRw4fIIsTtv+GhV4iNaCNK8j85kq1Vhus4Y3exJn95meI3BvrA6Qo1YbaJWTrdGUkmeqbiTI+KKdEBKPDxdyNqit90II2arGZlz+XI0Ce1s3/sS/9+9Rt41pnp2t5b27EY4jNhp1csZSDEbRdDfm6hB4YkJ7Z7ttQLTEyDUjvVmCMk2JHCJr7UhoxDmh1cgzrSkhiU1jc4hXgkHpv/Wbv8Hv9/ppA4q6NoJa9aCix3exHLofyXZv6nqhxnTsF3V/L6tm0I5qcHiNt+fTm4twBbiadsl6HhFD9tqxDwcMWIsNHBp9jlorpevheD3I0uM5qN4dBUQsBmVV9t2NQt2FIR1D6HMiB+shBIQwB/O58WZa2YqV9U3tdFacjunq0a6moJ5MKWqD6Y03khxOYp498BpfX2uDnJFWWbeNacpMTlGdckaiKa9VjCoWYnRjQX9Ao3EsYk6U46SN4Ritaqwpa8JogOj2FZISsVbWokxTcOO3fnwX8UZ+DImQXRQm0D0z6QPjdighBDUVdBhmWXqwaYoztVKSOz4dbAFoaZ49WrktEUpT4pRZpnhg8CEkWrDvPc0Tz5+eUW1oMOPkFCMUe/+mTp/TxkhyUwjUaDKnVqvNnhiDXKqSfFyC0ZE7Hz9+IkWbJ12nmXZHzI0dVSuvtyu9m3HY5XQmdGvAju0jVTFXtuHPb5tkShMh2oQ602U0xA//7fXK66dPpF9JzNNCj90D+ljQFuxKs94ODVN/98bqtOFt22z+eDDzs967W3/XI3k4+gVeIZwfTuSQvDyXw1xRBPdKSoYStkbIBnGoqt+RjoqJyErdWUu1jDgEylbQtvFwWkzE6BYOyhhoZeLG1ryZ6wNzBLONdsDErF1G3vOGWCNiJIQQjdpsTfvo0OPRe3V24B1LR6y/eD6dmCYjdgw34hgi1efPz856VLG58CFOTke2fd92c+8dbsdNO3/rb/4Nfu3Xfo3Jx7xar9O+QyvFrq13klimaySJe78jpuDQWCBJIObZlPNdaBWWbHRemul2emswZxYSezVb/3Sa0FbYuw9Hs4DhqvBASJnf+e3foLdiMPRnh4KtMzPvvOtaRhxv2iyh7N0SpFpAC+qjgI3qbpqbrp2g7o4b7oOM7k14ORycj2fjf4q7BzeUmIwUI9qPymXAVqHZDAyaz4Px9Y0ovRR6Chb/xgHRmrH9PEFtzhZlECwc1gRoXUhjzsMUTLVoD8upfwRKNwpeAq6rqZQf8+VwmARvBMc7vh96ZKc6Fcx4vnmyLKXVSppny2iSeB9ioqbG7bZxvpwP3nAiIDHy/Hqlaz+Uvcm9j45HqsrryzMg5quTM6/7lfXlhaf37y2oqwni5tMJsCwjL0bBLbWSgeD+/01NJxIHwwHDUyVGbrdX4xs791x7p+wGWVwuZ9MyeDYYxGZMv3pTd8k2XB5x1oyf8ohtni4JTeZgK9G8lpqadTVR6N2Gg8zLQjsVktgB0dvod1jWmYIxHPbQaXVH5ox0e0ZBsT4DplSfspW219UcOU+urj6d5mNjXJbzPTB1uxnVK4daKznbzPC1FE6LZaAWsHysZ/B0eTS/p2zN/1Jt4HpvhGZZ0KfnTzx//MQyzzw8PTpUpz6H3CxipDu+rInWO6VWugo5ZvrUjgpiiIymaeJJBpx2d+d1/hODrWUB4E31KmJ+VD5zOMdEFdvURbsN4HJAXIKpl2upHhAhdrO+mJL3BzxxsMBvtEwcRrB72e8iTLVBUtaAFLJ441L1YP4oIE6FNJsUOzgGtIncsfU8ZYcgrHnbWyfF2asSPyCcVSjRIR3vV+KiT0tDK7VaD8oyVqWF7kG10UrhN//BP2Rdb1hFHg3WEIN5tTcbbiWBnANLtvnz2q2fEJ3Wnrz3koIN/Oqt0gPkDPMye+JaUW3croX9trKcTgRNbOtGPNveCTlQ9nr06sAprK3x7Xffsm+vLOf398NhrHNcPKdDV2L9HPUeVa9KKaYL02YHYaQdMxii2AC2vTV0VpvdgiMR6mLabH3OQTUPITp71GBtHWk/ckCEJn4fVF1FmjoDMZCiG2e6xk2bHl566mrw4d81UIDRgxLJtPZqAjxXcYuXaykM+ww1f/rg/ic41TkEIYp56VSHmmK0DHdQQKt2Zz/IMVY0xsg8m/6it34Yni3TxLbv5Kx3m+zHR/NN+vSJ4nOLDUqwZu28TIew5Ri649jdsH6Y58VOVl8MKUXDXG83W8zTREY9kxOb3ZoynF18M7jPx1jGelQtdCXPCylFPtXGbX0lTTMpm2dVjPNhId6bzS/o/s/dFZgxBG7bSsyZxbnrTW2u8YjAP/zR7/DyeuXLX/qKx4cLY2pYQcnuWjssgc+XB7RDLQ2cuqle+c0p0VDzssnZ6Oi4kVozoWIi0lz2L2LeSYLQc0a0M2fLAm+7Cd2iz+Vey4Zq5zRNnBebMdFqIyyR+vrCqxYezxdKaXYNx+q2g6W8bpzPwbI3TWyt0rbt2ETn04mcs8++fuHp6R2mbu7gLgAR88MaGd8QOkFA55nLw4Xb9XaskcEwCsPQD6sOdjWlf3B/foBGIWIzH6T3IZuxvkhMpBgpvdFLYfdwH+eZIFCqjQG2eca2DqNz/msDCf1wLiBwaIpqrWa54K6+wfs7rRaDfiWwzDO12vcN4hVSa3TuBAwQsvtQIRzVNP7sTT07/JLsGo997AfECEwhWF+p1Wqwb57Q1ijenD6fTzYGl8Diyu/WO+u6cXl6Yp5PbNvq38n6DU2hbxUJmZjtoJdgxpB3tpvSa3M9UiJdJkK1+DJFg1qeX66IdGd0Fbp0+g7xIXFOma2udmChlHV3G/1IKwa0aDOHhhiG07R6D8QOCXXqaorJtV5ewYoca6h7vJRuwVRQp3eKU2bd4hvxyZnRk1PbCylNFvhHPwNoPnwpeoU56K/R78uoQPHzulTrnY0qXb1S2vbdRMN4pew7ZUCZBv9HhhvzqOKMDWi6q1aqV0RKsIrGFzRuYRDkyDZ6geq8f1E4pWzZcLQb0VszRWptRDtLaU57bb0ZgwLDjm2QvLs1lkKpBjOtpbBtO+tW2Url5XY7LB6025zt5XSyyVm+2IeFyPjfu8d3PD4+kUS43W5cX69MKR12xyZqcmVpb5TebJ6AT5BTEWdYWZN8ydma9c5Wqhge/v7pHZenJ0IUWlNeXq4299sDkvNSjmxl6FDCARcYVlmaCfqiepMqwHVbkWiLc99tbjWtc7u+ot3gvSlm799A7ZWi0MT+pyjff/yel+srL8/PlG4iuObXArDfbqyvr0ZfDWI2F6qc5zPVg0B3iKv2Sq2FGDFRU1B627ldX2lYSVpLZV9vxCCkKZnHDKCeBYpYhl3UWGVr3VjLjgZFo7Fu4nyyqi4E8rLw+PDA7IZ9NinQg2cwlo01NMWnoNms7pyNLx8cIhg+OgOffbm+uuWJ/ffaO8EnctGU4GNJc5gOqNE0CT55LpjTpsiwfims20prxXpFHYLTtsU1ppaR2xM3lNLfL0SEUaWaEWEgWoAEkgiq1Te/bdjWPCghXgXYAKqmoyE8Aq2tMcGoySJwt4nweSq9UMqV6/WF1stB3rBRwP0IJKpmxEgyLYnZvRcLMDn5XOlqPbNS2beddV351V/9Vf7gH/7DltTRUalG/e6NKJFTDqQczLCumx113cxCw9apMuVE6Mp+XYk5mnnhfCIK9H1HFfZitiqZ6I7jnWlKPJ4v9j33apC368okcFRyQY3qXF1FjmCTB4Ng4r2OSCeINdNVu0FMDftfb8a6Cjb+OE3ZHRO8CsVYReKuEKWaY4WxzMJR0fdej/sNHOt1DHnzO2hrttnPQ2Mv5jpRGY7Wgd7d8DRYcilqDXafeOOVqNkCSbCe3l7svcq+0ponjdHsdF7X1YaMjbnEVlbjY+1cXt47pVXCrkjOTMGaz80zQzGy0zGAZzUA14KEl/q9VuKUiSGhwXBeg3wgtmb+R1gz6MMXX5hferXJU8MrvfbREBr2cUCwLFpEjqHvc7JmaX195dOnj3z99dc+bSlSt411bZzOJ4MWVKh+8pa2k7Phsss02bhUcf92MZx2LRsajfY4HvaUI9N8pvfG7eWFd198AO+PjMNhGKelPBm7CscYY0TVFOw5JOI08wf+4K+gHbbb5rbfmRohYV5a0i0bk62hXv93kXumgWH61MrThw/kmI1hpBbY4v+PsT/9tW5d0/ug39ONMWazmrfZ/alTVedUle2yXa4q23EIRHwJihAgIJAPCBQQ4l9CQiD4iISIAhKBhCDAkYhDEgsFiBxX1fFp6jS7dvu+q5lzjjGejg/XPebaFRxXLfmots9Ze601x3ia+77uq3GOMo08vH/Ap8TNfsdcK2P0pCEyFNmQjwa7ZawSt/rFBc84TXrv3rPY83/96p7WKsdxR5t2MqErhcXyreVNFLicHlmXzP3dSAiJmmXf4IO+Z2Obla2qtPnCNI4qMkIw8ZK/GtV5NKPprbMuEp2dn0+qaI8HbbhaWC4zfehmN4Ha6RhJPcg+O70w7za40zmpumVXD5d1EeTZGvvjXgd273TLjvUxmL7AUgW3rsHpOdYiq/VrtW9fyQ6GVlGYz34vh4Om0y2YpxlO/le1VUGx1qH20lTMOH+txpd1JoVoHa51mU2QZIqBWhuff/EFr+/vePXqtaptw82v1vfuJVsDm4vIl8z/Oey7A610XFnx3nG7v+N7n32PP/nH/xjoBBfpvbGWheNOyYpjDDinuVKv2yW3XWiqpXItlLLSCgyj6LrLvNKcwsBk/V7YeEjzecY7z+EwqUDMM2OKLLbOeqs2c/K0Xnj/8I68Xq7vQd3AhvG4K6R3HRLb2Pe7TEp1srrAW6uinTqxKEvJnM8zN8cbfWfshB5tnTjt4YbR/P2V/OGtO47BXA/allOnr5Irl8sFj7chvjkW55WSq5hwXV1RtYF5a82cazUvK2bsF3ykd+vcQgQk1HMpsZ8mFQUgWbtPYgq1JhuB4ALNK5d5IyHHYbRB7KyqL0RqFATg1sLz0zPOdQ7HvS6DIeEtznJZZpLBSFvO7LXy3qpwrwFXjIkNRqB31svFksi2oXEjn2cuZgERYmS331HQjbzbj9R6xzQqaGOthfOy0HJhSAP7aWIpDUrGOfntl5ztRofoA4tRKnPYhkD6T+mVp8dHai0cP/qYwzSx5oy7k+DIN1jJNtR3Zgqm6nCtitMc4iSzLjzOWwg9CjYpOdPgGnDjjNHhu2Pd7MeDJ7mIi86U26owg4MPP/sUR2cIEh+K6x/ovrPaAP3+9StSDKyl8fTwRLy/Jw6BMY5c1ouookG4eS+Vy5o5jJoFjMYEqwiHnQbx+muT06b3wv5Fp96U7dpeT4/PdghnetW68shTZpsXXDfsNjQ2GCPGdA0xor0oVVvwYENw5/bQHKfTmcfTE69evaZWVZPTfmee+5XeI3Fzeu2NYOv6u7+b9rLxBc0YPNzFU3fdbDys8t0cCFbv8KXy/nwmOs/+sGe/m6CLwkx3PD4+sdvtiPElh7o7peGpE9047P56CJdaroPljYVF73bhmH1CDNapSFNSfOHG9tx3Owofo4XL5OsBowMKhrjljIhsoBFUv1pObxAvVZeStFEFhycE7dvWOsOkQlAHqqfmzBSjoOOccU4iWe89/QVxVbezygK7mIdVqQVXIETpblxpXC5nUQa2i5jGkHTxOO+ZhonLZTazSa6FZje4MtfM5XRmXcr1WfaNJnqNShB822vFB09F+dDeB3OJjriouVzrVSu9uyuUDLY/msSxvus88M0CuTA9Do7OBp8bCcL0F9XcWNUBVzsXRc7pXVCUyBmVXBprXhgZkTVqo+byncvNBMK58PR8xgdJGXIu9NKpvVCzkTzEzLgSDFRA4OQx04WNXRaZqB12e1rwLOcLl1x49eoe34OGdIbXxWGAYWCsTUEVa7aMbMUUgpnFGbNmC8UoGxSE4aJNxmLOO1VGFXoIDOYPVDYTQu/JNivwXpYMCkdqtCADsJvDntLNv8fJmK6FqAqWzuXyzDwvTNPEuNmD22YcYrqKg7ouesEmXjYUr1+/wofIGCPnZWEIgTGNYpE0Ybg+eLk3upcoT7xiPnvt5JpNGS7GUXMa3E67iTTKSt07NLD0QHVGw9Ql6VOn1xfmhS43R2riRbemxeeivFkUNt+IiDFC7SQvVkS16igEx2Sbm+3wpticyRTIa2FtRlu1eYqCXsKVPeO9ujrbl9bqe95+8KH5bjVK25wp3TUwZRMN5Vo5HA9XIz7AoBMjRoRguP92gVdimGygB3ev7jkcdozjwLLCbrdjGIZryM52YC5lIbDZdmyRr8Jwl23IWDWfi2hWtxblqayl0uo2yNel4oLHlcLj6cy3X39Fd/A6v2I/fWS/NxJ9o5jxZKsqPHoXLDHGSDwcrkLM7e/srVGdp1pHMIRgM5ktvyDyXY79spgZp81hQ1CRt3XmtVRCSnzw9g23t7dsVhAhKHwp4OldthMtZ5qXojd4MdF607pIScVLa5YlQqfnRlkzN4eDXU6m5F9XPnj7mlYzoKjT6AXvdJwGta2zrMWelcSzci/211lA73rO3jlaroI5cTqDepeIzrRNJTe803NMwdGIVxdZajOTQLucbI/bZFgzjSZIVR5K0NqiQxpT76G5zyZIfWHOGfUfzIiz46vjSox1G0FIsKDr5qJta925TnfVYOtqqXum4LY5g/Nac0uW2LS3SnTQg2jlzinzQned5pk5yzRR4mlnEOaMQ5fW8+OjzljXeX5+UgIpjajq7Dtybu9ppfD8rEO0vu5M+z0eR+6ddckQPd51g386gxPTcT9N7KaRp9MT8+XCfpquopzgA6WrM9nUstFU3r1W3KD0KLV7zXz4lfHQ7WLYIlCdU3Tn/ZtXGgrjqfZSkg+GBysvoZTCcj7x9HzmzQdvdbA0o1N24YpUtXi1a6MUTDjlPSFGTs/PtF64OR5prjOmkefLmW+++ophGnnz5g0AtRh1LAVVVU4vAmPBeKf/62isqyCuGBKX+Swuu4+4BjG4K1OlUnE9WDeSWMmcns+ku3vDT0WblOjJUvgc5gEErRfWi/yKhpAI3ZG7VK0peO6Odwovcg6fIkPrVy8nMYNGcEEsN2TkN19m0jCSqzlKovyC71bjrYsvDhIfug67K0uqm/keJK/NvSGnGqRvUI3EhZtqODqJgZwTRBSHUUy3asIhByVEduOeJQSck7Cr5E6t6/Wi2dhNAaUffverlM06Xv9/WccXvn58pJaVYRx4fffaDhQItt560N/p08D9feRw3FPWIoioVnufBZ8iB7NSr6ZXqTQioqT29iLU2yCpUo2w4LylIyqCstYXl9v5MrMN8+WZBWGImqflzHmeub29FU21ypl4GtO1y2utKgkSz+n0zLJUpmmkGsXZC9CnWyoabGpr/e0daLnhBvm6/d7v/wH/+//dv07vnprP3Bz3pAkuT5oxyIdNJUSzn+u8FOa1d7Px11oR7GeMNP9CMoijyBUhRnwUvFN6IRZRjceUmOcV8AxjNJGs/u28wrzMslPfKhmaLgkrXloz/YOTNsd5oQzbTLBXrWPnvPzmTLui4bWKHQPldJHjSTjNgZrOxU0Hg7kKgyjYOEd1jrVkG+xrTujoFmm8UrIK9asXlDHgauv0milNgtNqxXstK2kIUPtV9zOv0iKRK8u6MFrUQ0qD5lG9E4Ph2tncBsuysOTC/rDn7evXrFWV8Xg4cHM8UHKlGAzkzNzr3bsHWkP2Ew72e2s1XcehX/R0ema/30MIkPM1FQswVpJar+hkTd6CNuGWi9yqgldSSuR1ZV31QUL484EbuRaSk0vm5mnzfl7orWr46wLNdW5ubuBGYTXzaSElsSdciCyrLMC3YHjvAxETGHVnL8bYNlZhyx9/Cx/akrdkM9DtMo1JB6LvXpdC8HJD9QfmeYbueXh+AuCDN2+u3lY+cR0u9t71HD0vlsFNF5AYLI2A8j/WUvF4iJFesyrnoECX6Bx0WVpAt2pJh846z6x5tYGwnFHX3AlBGQ4xCvdf50rf7ZgsVL50FCRjVQohWKaFKuBas/QeLePdYJREa6NttrJBIhhEGWIiJqxzGex/NyV7lHCo2sVYWiO1xOsPXvP09CwiQq+k5MmLdbAbddq566XRO5b528wXp8siRNMpSi1yX52mK/MuxeHKcFEms55f7gXXRLuVKebGT9fMItvcxTt1Jj36K5tqO6CuHUTfOjwzA+yN/X6P9/0aQrVRRrsTMWKjhh+OB1KIvH//nufLhcNuj3Oe2dLdNgdmDM4zxEWzxXlRXswyE2Jg2u/pvV2JEC2qW3RGUGk0idecp5bKPJ/5O3/4h/zdP/y7/Af//r/HOAZevbrhcnmmlcK0S6TJ8hGMVeScVfN0yrwhBhgkYw6vwZPsAO2I5eVTpOXMPIsIklejhveA9wlaNpeTRgjC/lezI1HBIOr3dbotFot1YFHF5wa9dAn8erc9Z0mDQYcX1VhK9G1YrqCyUhvV5p7OOzyeteRrEaqLQfNG6FeIK9u7XOvmBNsN1lLKZa3mcjGrQIgxcJmXK0llG3udTid67Qxj4HRalIgZPZdloRc92z/7sy94fH7it37wm6QUda7WylILsdLJl4uqhZSEURUJNeI4MrmdbKDt4AlJQ+7n84nn06NuKzz7/Q2udeZ15nhzIOAUcGK45pRGkk+sZVHlHi3O08uoK6+L+MytsptGulMgzWZCVb9Dsw0xsT/sCCmqBd8KAZQRUGgsl4ustWPk7QcfiMeeld7muvV7xvN1UV0DhnOHDtW9mM/t97vrpm214GPk7uaGw+EovLY5LWR7K61WRUK2RgyDHEdtwQCULr+lih3kXjhpKYX9/nDlzK+tiqaYG/N64XyZmaaBaTqo6KERcBSnxDGCBlKhO8uGkEq+LCvzfDHPn4zb650mywzItUEveO9Yyso3777l9PzMNO346OOPGFMkuPhii+5h3I2qoJyEVkutrJcZ5xrDMBJSoBuFLvQkG5bWOSRhoC7oUqlmR91sVhXsACVEQpg2WR6eQIoDPgZSivTujemxuao6Ts8nqoPDzYHaK/N8YV0bbtVFVEqz9D0J8ATPJPNBWtmS2Dy6pGptVBSi9Pbta6v+TSroOs6SHIXdyhY/uECjsiyqasO0s/cuxlWZV/wQcYMxnDoyseud87zQa2W07IB5XvBebsK1VZY1M5ieQfCQZVH0wJCM7+6D/MScY15XHh+fmKaJ+7tb8roqQ9ps2h3KMi6li05ZteZ3uwPTtCfnFecjKdlQ2SYmrncCCe80CC9Zrrtp2KvIKCuH/ZF/9b/73+Mf/kf/HnevJtLoeD6ruvVJe6Hb7Evdlqjoyo0XZLSfJkpr8hECxhA4HA7MeWFdMsMxCV4KDmaxKXtrLKXgvZTYl+du3cYofN+LTNPo+NBZL7NdsuDaJhJ0VrObiby3C31tNK8OuTuHdxL0tQ3DZ5vBYJqRynktDMHjgqNSaWHAuSYDQ+dwJbOpnreTbF1X0eV9EIm0dQpwmS/W2cPju/f03jgeDvQOOS88Pa9WtGhGuFmNPzw+0mrjcHtHKyu5rvhsXX/Vur27u2VIgwWZFRtDd5JLxOgDficDuGrii/00cbmcqZbu1hFVEwe5NcaoKsw7xzQdiCmR4oALgXEaxXxwEsBgopgwJHsI4K0yckAMidA7Lgqe6aXKk9+q+GizjOgVVN9NxZ2SqIo5L5RSrvz0arf8vCzsdztarczLwu3hQDNxWuzKM/bOUXtlHOXg6exS8MOAL4WlZA04zXBsq9J6a6xdJnFZfaeKENNWBLsonHNXmEn/3NnZ4bTaPObpfGJIw9V2YvDxmidelpV5XYhJbq273cQ0jGohiny1aq0y9moN4qCKw8y7Nl55jZGb2xt6r8zzbPRhiXJKETyxhnAdgsY4cHf3Sg6dNrzC7AR6FUwYx5Hgo6qk68xjYw850UuD/K8ckHyE2mjOicrcO8WsUpxX1ZKGgS0fuBnU0Eu3oby/doYxqtuMfrwOCINz9L0+d84r5/OF3SjzwK/eP7KcT4zjyDiOJO+t+3A4Jw799qUuplOs8nBscwO1875VOpVWzXLEKswe5KabzbIZL/v86DxLNRZX64yHHdEcVefWWOYz0e3xw0CrhafzmVuHwRM2bPaiXp6eT9Rp4Pb2BqJjY2/qDxWe3ukWDKV39/aDDxgN3tr0E9shdrnMDEMimP4jhC3gqRDCwPF4IzZj1TDcO3WurUJ3Ta5tzuHjILcAwzpLUa7J7/7eX+cPf//3+NUXf4JzK9F1Ztp3Ln5Pc9vhasWbMeGm3cB+t+Ph6b2e4zBwfzgYldy6LbOyXucZ5zXjCElwpFTrzpTikVbl3NC9lwDOZgCrMZ+cLH3VkfamYbxTl12KZlGxaShf6bguHUIvVsjx8lVqZb5cOJ1nWmm445HYnc5jN2vdGUzYbbbasThW73h6euLd+3d88OFHnJ9P9CYGqgvqFnIufPn1l7RccZ99SkR+bsu84r0kHxFwo2Zq3inThmZD+qaLjdZ5//W37G727Hc79vudyEfesd/vqFV/Z/z63bfspj2H6YAPxsGOif3tDRiuJjK7vZBlxrmRED23r1/jWuNig6KOPGg27/zNmmLogerEY+61Xi08NhO5ajjsGBOrc0qhilJ0phB1kDgZfT0/P+Nw3L+6JwDZNsMW8dGz/t2bmxuJx5q50aZIcJnT6STX0XXl7v4Vu0kZC83gJQfX8J/j7kDrL7hfLoXFDNGmcVS4un1vCIGaknQh9nO2wYI3Op0PYrCUKgFVjFvSW7cA+UhuRRYjzjHsdvikDmMTHC5lZV3lXFp7JGDzixDw1S5hE+5ofuOZXCBEx5JXbg+DHRI25ymCyGop1snseP36Xi125Wot0HO/HjxaX6pA9N8Jz57Gidoy6yqGVgqBaTTuuPM071jywsO370nTyKvbOzac4Rp12jeGiSCrIZpaOGw5A9IHbDOj7au3TS/guFyER59zJqXIcTcyescwjex2k6pmgxx65+q5s4nN1lp5//jANI2C41YBRnmr+mon3EjV/vD4RO2Nu9sbnPf88osvub+/5+54kLlk18DRHHu+YwynKvZs1eFYG/v9QYmKWfh0ii9mfBseVKtyNJTD4Jh2extAvmQU6J81cB38CyMwpURr/voevWVf9y7WW7Bn2ns3JqhgLRUH4KLZR1QVSd7JpHGzdxF12Jl62PP5z/+U9+f3pEHi1DR5ppyoudBDArOQ0d0SlFTXwbVO2gVqL8Q0EpJnP07EcaQXaRcqMvCMyTIjWiFOIwnZqYcYqa1wuDkKPkbJls1JTyB368wvfvwj/vrf/AMup2xhYNWep7GNamctmVb79RJUfoTWvwSNFde6YCHnrmtxSok+2rPByAo6bJT+59XR1c33bFlkINmUs3F6euKbL79kGCdOp2fmnPmN3/hNQnAiFMVCXlZ15OsCpdBTJJ/P+N0EK+ReSFOScr1VFcc2h/POcbi7FYW+yz32NF8IeMGRSMcSay22OcSCWBHzJDh/zZdwxrVuRsdaLzM+RSVYOceUErXD5aIg++PhcGWMRKD4ZjxvDUd9aVYBCquNGPSAFrfzXpGWMZLbNtD0tJo5Pz8RU+J8lhXEtNsxxGj88wGM3iesVgswoIFTHIxpMc+6mXsjm1FhN4z+/dOZdV24ub2h+aBqwTbExj0GsRFazvLAR3j4Zl4YwovJFnBVzepzbP/clZsREw9PT7gpKTPDDj6fBiKOkJLmN11V8VZ1OEzJGRJP5xN5Wbh7fUdKAzF4vv3mW25vbhnSaN3AFofoNGAM8uh3oDClqorId1lRrE1ZxR5Hrvrvu3cMXnYea6lMgw5AsdlGrZ9s7XbOPDw+sv/0Uwt9KjgPZS5m2QCP7ondpIuwVzGn4qBBZggWAxk7Qxrorm8hW4QQRHKw9eOcgwBj2yjQE6XcUkpmXQJ3d/e43rksC7V20TeRvmct1dh9OgCCD7T1xOX5mZv9nuYh15UhJMFaBkVhqu8pJdYiMWnzjbvjkcMmDMRxns8s68px3BGHgY5mC6UpzfHu5oZkxnrOOdGz3eW6H3tvCldynVev7xiGxBdff0XLjbv7OzYBneqR/sKs+Q78oZhcVdcpJlOfe3Xarb9oeszaxdsQWQeh8PFKp1dV20NMMl8MkSFGWi8GR9l8q4u48Wdf/Bnn0wNjMlg5JMLNwPO3J9Ye5ILsIPhtJqbDFa/zqLvGfj+KbpxEc14vM0sVNF0btCLtzFoqzAsMHecTSz4LYrZ2K1pHVGumEhicEhZ//NN/DHWWzf+50hOsreGbqLBCGPpLyiAyFqVZXGqRMLc2zR6bIQetdXMV6KyrzCW970QiW6KGi+bKgDrx0jqjXbA3xyPn84mf/vRn3N7c8ObNGz7/+kvu399xf3cv9CNkyjxfC1n9fGhjpJZVc1mD0nNVp19rtWJBtOgxRouO1tp5ff9K55idMblX4oeffIKrXIfDU4qczmdyExTkLYWuGbyylMJaKqlXhjCBU6CHM1ZIW1fafqRmBAmZXcUyX4hpYD9M9GQNpxMvXNWvuw7PWmtistiwNsYgUU3vvH77AfO88O7bb0QZnUZ8StR1peSVTbw2xkE2EFmMFIm6HCM74ptxK6WFPXcdvCkFDjcH0hIZhomOXW6q44QN13q9ADZ6ogvh6s0fh0FdQpeJOg610V04Zu+QPMRxJ1lLrQYLFeiJwQfCOBFCYCkrzqmzm/MKzjEYRhyiMWRaYz9O9GFgTCM+imP99PTIsq58+OGHBBfF0KC/YMGIa1+xYHoL4nFR+GutmgFtZofeOU7zhcU5pjgopIadeePogq61ksYkbyXLsZA4ruh3+0hKkbcffYTvjvPlTKWS0GfelKW6ZQqtZcqiVrpXKe/31lHRNVCWJYfNkmJk5z25eM7hjAvB6L1ip8QYWLMgwdYblM12RlBNLiu1CmL95JNPbfgMa9Ql7TtyNu4YBNWZ9jtGdtQq5ezd/b1Wi3WwwXlG03jkvIqBErTGQ4xs8Zm5ZoITSyj4eB3O5izzPlF/VUCNaSDskx18m1XLy0WxHQbdzNjSlULaMBPwK5wrF+R4pS875wnjYPtQRoPi6HcoC805ptvxOtfbBvmtVcuVb+A783kWicJJLb5lqAQGqiu8Pz2pOwjRHB7cy9yuQ62ZFMz1gEa/FBMu6mAuy0LpsLqgYKsgq8UOrJcTtWsGGWJSVIHvZGCujdAaIcIUHX/8n/7HvH/3Fbv9h6xrpWdRX3M2LG/zUGoKM+o46oawGNyoVEkNwLNd/sE5StXPOZ9P6rhSxJGvHZuYdI1lXfn5L37JlBIfffqx8rVbI4bI/fGWGAOv7u7xMRBdZF4Wvd8izdblcubtm9d6F73y9PRI753bm1ua88znM+uyMB32xnbaVOa61K8mrYYMjOP4otHwEGMVheDh4R3fPrzjs+99T4dnzpQOu2FkXmdyzhz3e4bdRHKiI5a+8TYgxcA0HNkfJgu82WAPZRivaza78EBrtmANWuidq/pQ5leFOA4GI2nhPj098/T4wKeffsZhr3jN3W6v8BgaaRhkER4kzHo6nTjuj6Yi1CYpxSL9cmU2Cip2EW3Ve4iqJpOXoV7DjLGcupJkvksOxbxuA+Qt57qWQskradhTc+H8fBEOHgcmJxfcGnTzz7OGZuM0EjGdh4gm1FrNQG6j4YlxgI/W7tow3Hn2h4NmJ87skYVv8Pj8wAcffEhIjrUUHh8e2B+PjCFQuiqLUosSxuwCFBFEQ9HQzPHXDARLXpnnhfD6NdNur1nPcMPr12I3NXNndV0Fxm6KNCfak87VRnSeaCpRH5zmMT6YYaTWTQhRM48wmN5kpbQRvtNB4Phz3dp28Plgvjtx4Hi4oayFx1aEq6eR0AAqLet9BbMGaV1mlLX360AfGvNSNOjtUlwPVR3q4AVFnC8nns6zwYGyTC+5XA0tow/C3qsu5GRkgh5MqVwLPo7Q45UF5Rz03Khe1m3O+ZfOqcHN3b3WURbG3NrGZhGdMsQIpZBbFqTRG84n60L7lZ1TSzXRp+A6kWuUKw7dTBQ1szrs98T9npJ1UTjnWeaLiSn1M2qp0BzjAPO68ObDt+x3R5blRO2wlIWeV6YxEpcGNGorBBKmqrxi+yVX8qpsGeVEdeKgeRUEMl0iyXEgjpHduGNeF+ZlUQ3sG34I4DwrneUyqyBJg4rfvpLGxuef/5Q/+pM/4W//4cfMdaHmhZLbFVoe0kCISQmVziJmjcCyoQNrziwl060LL6Wx1EpeFhqQSyYx0C16tIDRXSVYXNeFaT8S8FzOF8XeCoTjg08/wtXGSiMO6uB8g/WieSXBc7w50KNnMUiToEiCFmWBJD1SlP1M3GJzRTGupjV7oYa/RDPI/j4SK43kI2uprJeF80kDvjhOanW7FuHj83vWvPLB69daoF18aaWaiefcXCP4cK24XNOiL1U3Xuud2+ON+R6pyl6KGAnDkMilsswz005BO5fLhbiboHU+//xXvH//ns8++x5DHBinSVbQxubZZOvBR07Pj/ziF7/ihz/8oS4aE6bUvOLGCRcDg2GXOJmidXRBherlD9Nefp4PXf5U9hCTzUtat7qsyfK4lWreOAi3b5VlPkNtDHfjtcupNjhrrTGNo+iW5oK6WSBsecglZ8Y0koZoeCd0+7w+BJ6fn3h89y1vP/6Yw04VQGmV+/tX1NYVntQr3kNKkVayvF0QDFfRMLq5ojbZwl5wcM6ZUOShNcbE7d0NN4eba+U5X2Zh3UFWzn2biThlFW+V6Kao7r0IF66VZkNohcw4QndXxbViVQWBxeKvFybdXS/Hl0Pxz3/13ulV7qvLODLudvjzM7VaFW+FSV0zPkUSjsuawejDDnBBFglrLmIoVW1Yj5L8qI2lSFDobavHLc+6KtY2pUir6iacDzSnofJLQp2RIdxmUOjYfLS2joC2UYK1ZkqW1bkfBXGmlGT73jYhIAaZialXzfL5kjOTfX/vzqjpL88LpAaey0qphZ3h6nGQa7H3gTjIBj8MkwqtLu+p8zozxB0hqYP03iktDkeMicPuQMmaqw/TyHQYmcYjX3/9DeslgxME5HvHtShWpSnH6Vz3NV7FZbPK/rg78r4/k2vhbpjASw1d1kK1LsfFwLrMXBYzzjNWV61n7t7uUcsC61nfU1Z5e+VaWRehEjiLAEUzh1KrYC9zdk5DopTMfDkzTXtq8BTLHXfOSYQYIsGhyF0DcsqyEsxx2fvA7VGixjlnJgfguCyLnktMtLzy5Zdf8fT0yCs7g+fLzIcfvpUJZamseSWNA4ebG8qyyqliHJQXb5/Be09ZV3KuV8SGrvXukEVOMSFuySLuxIAGV6/fvOLu9S1lXnj3+MgHr9/o4Mwr027HYb0BXpwaN4+abNnYEW/trV0Q5gUVXeD2cOQ0TSyXE+d1ZfBBIpEUKetqN5k0BDGaEhO5NPrumNeF4+HI8XB8qbaagk4qUhe2Kpy9mqX4Jx9+aD8XGxZnqY8DeB+JBHLPdpFxzVjoKRB5oXs212XrHYJt8iSL6zgIpgsB7zuBjo+R0DupNmvpPLv9jVmOY+GlwgKdd0SDEdx3FLGbG20KgtiKJbHdpVt8CAopr6oAdRYEnk4X7uYZpsGouHB7d0d3ZmFSJUKc9ntdMsHjS6MHx2AY9VKKrMy9swuqc3p8ppSV+7t74jhYrK0EZ+tartXz5XJhiMr7nkzLUErVOsAU8q1Q7GAVxz8zhChoxb7nOoy2A7u1Sh/Ebuttg43+/y+G735tiuzdbkdeMyfvzWHWCbqx95p2IyXLkn3c7QRtlczGn+9d8OJA1GXRMs55yjLzeHom4Li7vWGaJoa9VNIlZ81MkrqCDTLoTu6vvXQYNphVVNrN/n7jvKcQ5E6K4Be5E3xnHlgKrjd2ux3F8o+vGo+NhdctwtZ7llxoOZNxpDRQjGkVY5RvWt/iVR3DMFDOmcvlQqmFo7thHEdubm6MVmlr16ChkjPrspKHytgmhmEyZ1Vx+JdFkBM94nwlRVEzh9Hz/U8+4umy8P7hiVrgzasPcR2++Oorcm82R3HXQb/qAROrOQ8RBW2tK+fziWk3yYV3TNRZsxTXX1h3KXq5FtD4te/d8WufHkjec8maJS7rzGXR0HbNhW/ff6vZQJVNxqZez7VwPl+4OrfWTDdvMEUmJM5Pj+RSubm5Ba855OPpwp/80Z/w0Scf8+btGzIdv5FqvKeUDF2+qUvOLItQhqDIHZrT+9rtd6QUSHHkeLgRVNcrzSOTQbR3a5PxKkFw0y8//5y74y0ffPwhea1XvYf3SeaNRXbuu2FHWTLn0xPLmtl5TzyvC7thED+9y1o69G74qQ07neP2Tgulobb7+Xzm7v5WEaFVfF3XnQWzdIi6IYNhwXevX3E+XcjrwjmLAXN7c0MaBwYva21nFVHpEHGMgw69OCQ+++zTKw7sujNrc93KQwwsrW1gO8M0Mk47epMR1uF4wMdBeGfjym12BOWUOyXlrWsmXwrTzjGmQbd4VyUhKllgXfI1Q8HHQLDhr+EYuNZZqtpVMaccD+/ekZeZm7tb5hAZfcC5QBh31M02IEpclC3bexgUa8lSeT6dWXPhzf0dazXn3VYZQmI/jtzdHolDAmcCO6My0l/a995V5fXgzMJY39ucuP1Yd7N5+tdamKYB7ycO+734/7VxmhcLUpfXU22CkHozVTGdtarriigzgw69VPKycsFxGEcGAu+fnggh8Ob1a4ZBMw2ZJ24mah3n9hJ4LQvruly1K//0G4IrayciaNM7zzROLNa+y5p7oeVOzSuPj8+ydYkvl5UsWTQj0HB6oGStSzdMTGtRGtooC/1gl0qIQZGxxqXHyaWTpiSzmF7+btGGZW2xERNak0uAd86Eottl2e2daIZQi6wjTqdnWm3c3d4ypkRzmxW04NwQHB2Pd3pPy3JhnhfGaeJqF21QbPeeXivfvHtHa0rzG4fRLGu4msy1ZkNtOrvdJMX1ONgz2CKBIYSRcdxZ3Wz1s5Oj77tvvyClHbvpyN3NnlY9p/MDx8M9h+NO4q/S6DXQvMV1mg6yG7Oo1BdhW62KQ01eF+pumpiXhc3KYxhsBlkrx5uJN68OlLLi48j93Ye8evuWh3fveX56JIXBfJ483ifmbB12N5vLUmit2MVs5pE5K7nTO+q68ng+kVxAXDBpPnLOxo6wbrWLrRn6d8KmnEgCa1Hx6lMgGKmh1CIK9zSQUqBVWf+oyNbRl7ziEdZ5xgHTYUdeVkJIvLl/Jbp6FquyY3OuJnsYFwO+O1n816JsoSDlevzm22/59ONP8F2+Q+Mw8PrtW06nE8HLq8g5R+j+equXvLKcnnGv7tnv92Zi5yiukZfMfJkZx4HDdAAfWNfCbrB8gsvMu3ffykvpsGcIgyiQveNiFA+9VlbqVX2bYqC7cDUbK3xHUdglSJOISX4sUR6+gLjFAaeHjuVPOAQRNQ1qO47oHSuy8UhOgsEetnkAlhGh7OsUBpZ1kUHZKDuR0qt1JdqoUxKcVUtR/q7zXOaFYWiQBp4fH7m9vVUamlOV2YPCWLzvtNIIAXbHPQ9Pz6yPT9zc3KIQ9s5XX/0ZzgU+/uQjDrdHnHeq6lqlZ0EOYQgKGNKatzAcwTelV3wT1LZ5OTXXoVdCUTez+TiV1qBLjLYuCzEmxmmCgMUjNtoq8ZLvsJqWAsQ5b6UT8RymSdnoTgfrkCzJz/DPYJ2ShrFq0c/nZ9IQaW1PXjOt1P/8S8I6CbrsMvbTjt1+x7IunM/n6yXQDTaY1wKtUivE2NkcYM/LhfPDMz4GXpm3UW0Ku8o4bu9u1VU1MaOaQUbNLjaa5zSfuJxXbm+OkAZCEyc/l5X9fk+jczqf8V7v4DjtiD4q46I2xiiYdpszNBuUrksmJl1+YxpZ6/ISqdn7n6Nv9q51272chEtVBemjGFolV/bjZO9IuSQfvX17hUCmabyypNjmHeaE4HH4NJISxDSpWzJ4WaZ7WNdnlT/FDE/VXq7zSl2fxOn3MvY4P3/LOO5I8ZbH9w+0nuVk4Bwb3H41TgX2447HeoLuGH0gp4F5XdntBi7zQl6KdSCVshZuj4FPPh6gn+k9knvnh3/1r7A/3PN0nundMa+Z1l+Chi6XmeAdqUu/9Pj0KNX4NOHM2VrK/aT3HwN3r9/gihlE1sJclW/+m7/1A8Y4WNFZDI70xCimZkyRec08Pjzw+vUbRbqWZjPKwtdff83p9MTHn3zC4XCg2HqKQ7zuK1cq56dnmnPsDztBv7FzvLkhDtGU4Zt3U6WbWaarigZY5wsxBG5ubkgx0Gon3h4PJPMKSk5slOgcw5Be8nLbJklXC3t3vGV32OvKqF3sHNfxLVDrhWAh7GurdngkCTOc8Ma7myPDNLEbd9TciGMywzRZdV+WxQbQVXTWwx5lYW9eNfIC8s5RUABMT9HYB1aFdrnTjoa/z1nhOZv7ZzB6K51rW5vnla+//pL9bgfB4YqM5YrzKBwu4oKThP5yoSHL7t47rkLz4LoyhbGNe9gduL25oVLMWmRgmVd+8tOf8v1f+zXevnmDy4Ypd7GC3DApstQJfnnz9jV0R2uFNUu3cjgcqVlGbjEE1lXzlhAia17pzpE6EO1/c4lvv/mG3W7icDwwBrGjPDJTXHJRXkJQoA3owupd3VH3qmZvDjfEEChUZSw4qKtM27zXYDGaIrg2DdDjdlHbbKJmXfOH3c6GoCo+NtaF3rFMztZ1oWVZLSt8KJPG4T/3ktBJossijYPgoGG4FhzK19b62I0jPSWWsgB2WffOftzBXrOylJK0Netq4jL5kLnWTJAnP/9KNXdTWZCfzxf+9Cd/yoeffMynH3+s9+FFGV+WCykOHKaJ6pUAWaq8tsKQ8L2Tu7Qz3myfSxUuuttNpr6Wo0Cahqs1Se+VUlYrBpRMJ1h42xNwcyNlbS5FFxxmPdNVme6PR4lMrfDrXmr+vGaRBZKe/TZLeQkBM6aMl9Nucp3H04l1KUYC0twDm6ltWfEQWJv0J7118rkrn3xK5PNsfm7SUPT6kuS4xf/u1sSyXpjrFgGg/TwMieflzLIs4DI3u5HPPjqw26m4cz5yPBz4w7/zLzJnKc6HYGaXBhf7oDnTuqzkvl7PFm+00egkaJSNl2Yn3c4dZxGrtRacMy+4rv3U0FwlhYgLgqN8UHcZTMPw/v07Xr95TfMB1wq5FKZpJEZZ+WS7ZDZtUbdirBe54PaSKbkRx0hZq1niWJnvHbU03r174DCN7A97swASM2uDwGqVNigOhz1z71Yp6wDqDoY0ErZKMGvYeRVueUdI0RS7RVd78ISAPGKQ6vVyfpbSLwC+XQU9u/2BiOP5fOJPf/5zfuuHP6SUrEr7zWstfvNY8rbrtyoTRFmsyCPId0dPUYyUvikiG9HJJ6fUxhgCDlnh0qJYGzanCC7oFm6dw+2R7hU6vz5lpt0kBKujdryLLuvxHHc7mvkngXQeyotQZXQ13grehDMaYNIgDgPf/7XvGV3y5fLEAl+C33xtOkvOjFGCv9wKuayUktnvD0zTPTmvgmKWWaK7u1sO005WAa2qIg+RdV14enwghMChQo+dvKzMWX5Ow5CUS2CDaydWLKE70YG72CS6yyq+d8jVWEYegg6U2mx4b3OQYdxgO+WUny7zVcxzO9xwPp9YUuS4O8j2uFWzB5AR3eFwwEV59pfeWEtm6i/Cvn/a8Hr7Cj4wDhO7acd+2imTu8gmLS8ry7JyvL3h/DBTeuFm2l0rq3A8UoyIMI6jGC6WMNZ7uzrnehdwvhOap8UItVJ6Z5oGPvv+r3F/cyN4rwrSDONAbRWouOgZvWxGclHaWUQzoeg9a620JpfcGCK5FIYYqMvK+8cz0Wke8a4V9uNohAj9nCkpnOrajZjjaQyj1MkBSlGQjXcRfGddMy4L1sB1inewsXgsz6M7ZSD34PFOkEjJyzUmsyFNRGpwPp+Vw+LAIVKG9C6daqwJaTHMQdZ5ei88nx6YgjRDOa/4FgldbgDdWHFtm6VMnnpuPD+f2R33jGO8aiyiT3h3Ztp5Pvt4x+7oSG6HiGKFDz76Nb73vd9iyZm6rlw67AYRZeZ1UVaDFR1ior1QtF2M4IUyV6Bm05j4zroWLpez5j7jcKVD+wDzfGFeFqb9hLMLtzbNTXop1Fa5fXWnZDjA+0bpldN8IqWR+zevwQxCe6sstTJMwzUnuwfHYb8nm91PB6NHS7PhUiCgwuS4MyFxEStUtuH56ie1lMzTwwMxpaSXhlrn2uThnnuhowq7py0RTLe4/ORVYXqvwG9avw5hXe34FBnB3E3BNc0mxAAqOBt4Pr5/x3K5sL+5Aeeuh0sz7C4OA+uyME6jYbdcrRh0eAd8N/FNEW7om0JpSn2JEE1emH0xmmmM8oAqpRCLGB8pJV7d3vHw9HjNF25FwkLRc4X1rlV03lg9LXTLqW14l3CtU3o1/URCHqky/VvLSu/iIb95+wGXy0W4v5kENpocPy2HoWECxyIO9vFwkDNnLvgoKuYyLyzzzBATo2HDLnhalq+Mm+SzNE0j8fu/frUMby3w7vGR0/Mzb968YTfcsyB32GVZrwd0c5CzqpYNU//m3TtijNxamMrVI8o89ZectW56x0dRYjfM/vZwVB56SEr3MujoMp9JaeT5fGI3jNze3l7Vv8s8sywr+327mhxqKf4zhthdcMdhv+P59KQNP6u4GX3j/Cw/rEPdy+wR5Eb8HUrppiWQQArh+1teBmLxCPJ01FaYUiI7QYy5dMZxZLfbs2kwipOjKTZzaAZDrFmCRoeYJeuyasCP8PYwKh50SJHoPIfDgf3hwHmeaesqu27rFCoSu6UYZVWPfrZPnmUVK2d01+HctejbqtJo2o3aG6fHJw7TRAvOLpxmAUkqznzvlv5WGL1/Cb/yDocXo7Hl62xA6nPzPbrucDb+EtvrrK2x9kr3sk/pRRddDOqs8EFOzl4OtCkk5mXleHNLT53S81XMNu0iv/7JKw5HObWmYSSmQs2B3/iNv8Jm7dN7ozRHv8wslwuXZWYaR6ZpMkW5EhZDsMTO1ihZHYFvTQU2UJtnzYuF+ARFDLfGEJMZkQp5AZFlnIeWlRNRirlqh4Afx6t+R35aO5wLtJI5nc48PT3hnePu/pYtF53er9Cjp5iXnDrdGD0+ymGh5MJuGDidz8znC59973uMZv64uSLXWlnnWcxCZ5BLKWpnvZcIBOcoRdzgIYTNHBFwYnq0Fy8d1/o1UH7TOjgf2E/y9pEDb8d5qbTLsuBS4v7VPb/zV/8qu6MMAcdRg95oLI2t/XJemDl+a+81DoOtyneYhSIOR3UNV2DJstCIDi4m2ooxsK6Zec6M44BD4SYeU0kFudg6L5HY5TJzvDlKhdi7krmaDqsSGwNJGgqvYJ/ajS7VGvhKpiMDZivNqVcGzRgjvTSIau02ZTd01lYVYuQjz0/PdExTEBQ845qje9gf9gzjKGzbKz40WSewVdutVAiO415t5Voqp8cHzUuSsVzoJJ/wwXMu56s6u5dOdmJU+SAqZzMc/7uZ0a6JZhud53YaFYWKuMAuBMqSmeII48aokXp3t9txOV+4rCu7+0nzEWCeZ6toHWVdeXh84Hg4crnM7A9Z/9s/44J4uSg8wzAwDiOXeDG/K7i7u+N4OFw36FLk3np1VbUuJcZAyVaUBO2JXDLTOJrRnirLVhskrd0epBua55Xn5+erK3IrhTgkepftx+3xBjws8yIIy6vjxeZmW/pbQ1Vs7JVusbkpJXZpYPEqKBSHClNMeB/MzLAb7GmHfMmc1xPe38mjzHuLCNYDi8MgqKko6yRgTqj1xZU1V+sc6dSiXJab49HWsXZlzx3Gxi9+9RNazXivTrK3rkOkdUvz4+qZ9N0X55yjZEEk0HUueLMTD4KqbPdTvfZFybOyz32iukpMjnrJ/NrbNxyOnsllYpgQgqSY3O9/7weUhgVzeVJXWJPrcLPb42w2Wc3AsPWXM7K1xlpmUgrkYB2F6/RcSD4y3WjgX0ohtM7SV2J3JB/oSfkyhQyrYPySRWLoXh15y4V5LVftjaosifhiDOx3k9biWgmpgkFkw27C9cZcMvPjheNRswjvHctcrmyvS2uM48Bxf8D3zsU84hzu6rw9jkprjL1YO1JFBwx4lrLKJbE2Wog077d3qwAc3NW0Tni1BsoNz7pqnpAsCyAGtc0vkZCOOI3mWhq5v3+lA83gmtmGjM57ed4PI94GpJj/vnPbpeWvQx1vlUzFLDQaTJsvU93gK21U+e2r0osh8vj8bNDIZC6Z4nxXXwQ5tcbl+VlQUoxXgzxvNF/XG8l7XG90rxrJD4kxBNbt8mqazYAoj9E7yw5Xp7Fh210fk2ApbylEnpwnBmGwa+2UvDLtRlwH5wM7q/ALwrE3Y8RLnnn88lsOxxuSD5Roaty8UkuXOHKaZGHdJCyja+jcvfyoSi+40tmN8piqc+bmeGSLm1TQzxZ802S7Qsf3ROpVhm3NQSxU70gOenO4nq36TdcM89watWVud7fXCy6avfZ8uXCeF0bLNvlnfrk//88hBOVBV4W3lCUrYAcHSV45ocq7v/dgyYkN6BJ1lcIwDsznmTVXjse98r1tSOxDYBhGI9BIUxNC5PY40BuW/+5smusIYeD56Vtcc9zc3Oi95kwK4t3vDju8E+V6S6nzZiUdjGW0rplcqhke6v1viY4bQ4wOwzApTrc1duOgtEiv2cymWPeucD6fOez3HI5HxBiMV9O+rWPrHWiF7rZ8eWOD1ca6nCVuTYkUd9A6v/r8T+lbt9IB59lS2LQxFTjUcXgnuGTj46nslManO0caItGwf5nTIcppB1zXBd5XQhK9nd54/Spx+2okuVlMruBwQddu8iPHm3t6tt9Tu0EzAz4WSq8kJ3FoBzlJGCxPDLgsWm0IiV4zzXm8GefJtj+QsyChlEaisdQAlmVWEVq3mV0wLzd1irU1TqcT67xw/8ErelURM7kIoROHSBpveXp44OtvvuI237DfH0V46BLIRi9/uWDIzjqvrHklBBW/YRg4HI54JHZ2TgW2smHkHjukAUbwyTtCB98dQxfbZ10Xvv3mW+Kg7IDtYmhoYKQqxDQIRrcM1op770lp4HQ68/Nf/YrT5azbsFouq3ekmNgMLDe13/bVDILwztSz4YWu6ZyCTjZHy9EO6+g8EVmTt5xhsx6wQ3dtlSVnqWHNkM97zzLPbGE2JVdayZRcaEUuoEMabXivtLP7V/emQm1sl7v+IxiqWNUm6q/S+2J0NrDy+B6sahFnv+SVtWSWUsm906KjJ2cJYRr0llLZ73fsDwcIXm1jCLSshXN+fmbtlbXpc7m45TF05suFd+8fzHpkg2n0uw+HHbubG0IMnJ5OXM4nWlNHGYbB/sZsnZraZEfji2++5KtvvrbNbCIyPPO88NU33zCfzsSQoFVOpwvPjye6F7//3btvuVxmUf28KshiVUpvjbou3Bzv1eE593JReE8ch2syYd+gz7/EVwiBaZqIY+JwUOZ1Mf8sc2BSxvs4yIbDOrmNVVSNKVRzlcmgDcAvy8KaV0qpXM4Xm+WYi2ip5Cx2XorWmXhlB/Su4uKDN294Oj3zy1/+AlAi4DCN1k3od+Ra2fKyRWHWz6pNF4FgrWy0U3+1BgHBRltMcBoSu3EwavhITIFxSqLxRs9+P9G7rUHhFuqOg8WDOulUvIM0DCgMSgc+HX784x/z888/N38iQV3z+Ykvv/wVV7sQMOO7zVNquyBs31sHtbkXtK7ZYu+Qc2aw9RC8IzppqBQG1Kh9ZYPPHI1WCnf7kddv9oSYGZLs4GN0poDvlHa5nldsQjIc1MrT6UQuhZYi1cgwS141j4rSTDjUfdRW9HeXqnAys+npwLIsfPXVN6Z5MAKCQWcOdWTeqXL3QRdy6Q3XZH+/Ox4wXjeTRfSWtTCfFsqykuLAzc0N07THe6E667LSqgrSw/GIogAy3zy8Z11ErR+tC8YYd8TAEAfovMD8ZslTS8NjzIcUog3XLHazGVsFDV2DU6Xs2tYSapCNHZK1Sn08TgPJe959+w1/9Md/xHJZmIZRSXenExruuKvz5vbzug1Z9tOeV3d3vL6/Y5oUSSkoyQ5bL4OtWhXm0ZroiRfzbnKbD01wVrgJI356eC93wzRaG+VJQ6LXzrjfs9/v5AwZPEtexAaInjgMDHGUpXmIhJg4P5/56osvOD2fiM7jog7S1sRy8sbk0OBToUEhGB1Qn1YukePu2pU4dNn5bvYWpRlOKrbUGJMZWouVU8qqfOooCX4Isih3rV31JMM08cnHH3M87Ek+Xu027u/uOOz37IwZczjsmHYHnOPq3ltrUYCLZXXPubCshR//6Ed88Wd/RpqiHYgrzRWca7pwLifWvHC6zNdKFWC5XPj6m6+5zDO1FAsE6tdWv7VGGvfcHo94OyQ3GmLvctKtVfTYsi5XiOQv/LJvC8YUc85xPB5w0VkuxQZfQkxihsk+WsXBNAw6zBzs93t2+4Oog11D4xQjX33zDb/8/FemwsUM85zBXYHzuvLtw3tRfwdBCuM0kkvhi2++VuedInhvwVrRDns5bF3DiL6zT1JKTNPIOCZC9GB6G9FfuTL9oEk1a+aW67oKOrXZjnce5yMffPgJx93uquDeYKjeCi446XC8u+ZxdLS3LqdnPv/Vr5hPZ77+4kvOJ1GWv/7mKx7ef3tlWLmmPe/pV2GkLoQGvV3JDRtTZyOEtF6Z85nTsuoMCh7noiEKWltrzqQhst9NJO8YfOd4GEg0Rqc/IKUoVrwz40garjdad+afZR1K9OpaRgkUaxNdfDft1Qmar5LzdoH0Dm5LhHP05hXMljVXur+/w3kV3vreLuv/4AlBGq+8LKyXC3leCU3PK4ZA8p7QdK7muu2XTjd3gzFF7g5HzTBs3qHZMNJWlCwBqY8cdhO7/WTnkC7l0iqtFmourDVf4bRrJobaNWKzgbPUuZW+Fg6TvJFAt/jT+cSrmxtCdy+COWdrt1vkZ1VlE7oj7RJv3n4gkd3dDdVwsJDSFaLa6LXNcZ0nbLRPnIZYrpq5ljOrAic/KN/gNF/49umJcTcx7UYeHh6YJj2IrQItJmrrvXO4vWEak7m3wjRKd9FMxVtK4XI6c3t7y2RBMcF5YvImTINsthDeq6LRAaasi5WF1golJHVmPjAMgmO2cHpQR1Ttpt+S5a45vlgQSVX1FvA0bNaBgkpabrgIPiV2u0nOpM4T7YB3zQ6TVuk5SxG9ZIIxQzYOvS5oXWghJnaWOdB9J+fGbrcXgcAO0RjEivirv/u7SkdDN1u1Id5+1HD86Xzhcp7xwZGCPKFarqQ08fbNW/bGqBCUVa8OpD5EUhqvAUSiO1f60rlcLux3By6XWWZl86qZgIt/Hlr6p305bICsC1ltvWe9ZJ6fnyilcn9/B8BSCksrXE4Lx90kn6og9lGMA8syW77HxBBVQNUurvu6FjGLfLPfU2mhioKcV06XM6/fvFaKGZq33d7dcXt3J68kr0gf5xyPD098+fWX3N3c0o4HphgZUpRWwlll3xspCb6qpUi/Gq3bcKjjv+q6+/XSi1FuAt4H1nIWNTwOdPSut86q1sblcpKnlQ/sdzvbU/1aiQbvuH/7ls9+/ftMKZE2tkxb+eM//k+Y1zPJ7L/1KoKJ/awwdCZQ68rr2L6vO2h+O8wEyTyfnwmD55B2aDdUXNtEnJpbtFIJvhOnTkgQxZMHuin5X6xRYnRc8sK4zrTazfNJM4l5vnCb7mh2MEcvzRRdhRuGLmCfxdl7CwFOp2em3WhCyE4ajDLcxX5yG90+q4BKlrPzzbt37KcdH759Qwg6G5zvV1+56IJEdF0WQM4IFL0qr3yLCu7o3WOwrsYC0jxpSF959/DAGBN3d3cU7429KoRmo/1qvejQirUpbWmKiZ4G1l4Zpom9V2U598bnv/qc/W9Lxu5w+NbxAbPB0CE/JeU+VLO+3e8nPv7sM8BzWQvH45HT+Qyt04IhjnYTuy6Nwab+XGbdutNuInZYuhZ6bOpCFjPHW/LK/mZP8on9fqfkugbdbxW7qq5tIDcb22O9nBl25t2ytZxGvyx09naA0RvPl5mvvvyCDz/+hJ2xq/aHA9U2CzhKXfHBMXaL5HRYFSaMtbmKa6qMSy2srTEEDYmWVnj//j3H3Y6bdKs/O5gdg3kvLaVeLUIqUtxOwyDoyaoC192L2lp1Em4cCHjyuhCmyeiVqjR8F4vkdDnTWmOMUc/BKKwphOtlA5DMm+rt2zckr6TAbeDVe2Ow0KJpetEwVFT5VN+UNsiLcylgbLQK5qNVc6EE8D4Rgt75fL7w9bffcLw5cn//GuhkU3T/pUCn/gI5AZyfn9kfjxyPR8EABl1W6/KambvVBo+nM847Xt/d47sa8ZgGgofSO2WWAvztq9dXuuE2D9q6lJY1w7m5OWje16qKgdbY7SZBGFWXS++NnLsU7c7x8PDA+Xzmg48/YuegtI7f4K9SyFnqZ++cugmV9+bxJHirNekh6KKubmp5F0UbD96S12onmANswFHKxRxoHfN8obXO7d0tOAkdS1lpNjT+4W/8Jmzzo1J5fnrPj3/2JzjkqCqkQRYiG7S4TR8cZllic8n+nffWq/lauaAogtOZ/aQ91wyGqzkzDgOjWbbfHnbsdhHvKj55gjO9A918tjzRBWprnB/f8/rtbzK39aqfOteCj5obtvZCnNkICmB7Oyi07HyZKWXV3GvYE2MxEkG/6hKuVjre0XNTrK1ZosjJAV7f3coWpW0mpcUuNJ1P0cv0sFuR0Eq7zhiSQ/HTZjjYrmgPuvCLrJOclyXR7WEvE80olli284Og7kO27VKS0xpxHIfrph2imB8NMLIEh/2e7/3691VJOlVADn3YpVccGhiXXK+zC3HcNSTObWHzoLmcnqn3d3hdy3ZQNKpD/kj2AR8eHhgNP11quybddd94Op25nC847/ngww/YDYNofSaa6qUZE8rjIxzSDgdXnHFKicu64GJgTEo4o4mNcntzZEjiwQs2kP3B4/OJu2VmPww8XS6sy8J+v2dKwrDX3NUmukClkYzSupiycllmqXSd7A+G4KzqdPRSWJeFZzq3pu517jsMgxCJsYny5zbOs7DC5uRfpfQ+0ZFL12LxKTAlr4F5mK60xtaMJhfUtZxOzzw9POJD4NXd7RUDpneqIWS+y0VWmhPH8zzTTCHtvSPGSQeEHYANCZJC8MoesGo2BW3wEIINxh2tB8OrGz3aQb11t71zPB4Yd7KUtrtcStNc/mJRnZajbRgNiX/zhz9kuVzorXH/5hXrZaGb+nyDB7a51evXrwALq9k6SCdTv1oUa7rMi2DRWq86HrkG61B2NnOLIQmPb9LaqAMr1NDYjeZs26DXyv6w53BzFNbdjZK+FpZ5UbjTNLKsEu8djkcNbU3IV6tYTX4TVVlX27dDw1tXXhrOBVJytheNyw/XNXhzPJLGJJzayQ5fTYBZS6AiwqG54XldSSHw1Vd/xi9/8VOCh9bd9UU0MBNIC0dig34MZjJITbRjwHfzQ9IvWHPm6enC7e0Rp3wDQXrNCfmo6mK8S+pgrGtQM2GMKJu1uNaYl5PWYCm4kAjec9zvtfdbtQ5LiAmoqF2NhaQpt7rM2orNJU1J3TtTGOkoqCyGYIQUdcWi1u5YswS24ziRUr0WzrW95FJsVmWdztqKhIDBU4w+PQwDuaibH5L2Y86CqJ2hQz68oCG9Vqbd/uphZkANNa+M0+6F9o32n/QW5tVja1SzBjs4s1XFh+MOZ9CCq7LLzr3x9PDM5XwipM0Gwz5Q6bLa7l1eR7Uy7SbuXr2y6MZOMOzr3eMDp9Ozmd8pX/rm9obj8UYHuJOnSUDxf+tlMThKN2xzzvzoRQVUKKxUopvXC85M34YBnyJ3d/eM4yimS9tsNyJjUqdQereM6UwcB374g99kvz9wySvPT096ia2xNg3CZUGh9jI4RaKW7dX2plsaz9rLFTpzW2Fil1UM6ZqTG71nvlx4eHg0g7+XoWjvml2sNfPw8KAOo0hfAdjAz+JP12JKVkz9qecYY+CSM+8eHkhp5O71K4Ol1usFKIWoiAWjQWPrskDVQPzrr77i6fFJ9iqtczaP+0aD2q4DfEIgBInBnFcGxFJWa3H1WbrBjht/QaE55gbqnLQ2wfP89MxlvnBeZnJZ/+ILYjuBEAyz2+3Y71RFOR+Yn2aenp7JRbbOyzLjer8qtHe7HaNZqINRP528qUovrKsyvZ2zwavXHEqzKOWUpzTgzcRQVfG2bhvLKkdZumDb0ptgKrPLF9QnPFrzHR122DscRtEgl6Uw5yzmVAwKMQpblG+//hwtt3AlXLSmuUOtVQe6czw/PfHtu29tRjSYbf4L60a0Z+lNRAtV4dhKwbVGDI4f/fSPOT+/o7t6jQ3oDcEFG9GDl7/p+qq2ar1ZB9u4Zqi7AM0FER9axXmn310b0UeGNOCjWJW9y4Zbz7xf5zRak3oHgcb59CySwbLSqlwD0jDiLR6393btpJ0TYUJzMmduEJrZHW6OjJOsekIITONIq4KUuhktbmhFTCOlNp6en+XcarY5zaFZQ1fOekpJlOPywrILLhi8qOc6DqOQRa+C77KummVuZ8WVXNOus7e86YzsMHn3/j3v3z/QQ7gWNM45UkgMKXE87IjF1NTedpMLwqKcHVjNSYE8xkitdgbT2YVI2Y9sfjEpRKoNW7GBY8krtVUO00EP83C8JtYVGrF5VeSTcPWO6KzjOJkPOngnCKl5qCVzuD0qL9k2rdSpQeKybXgjYTOPD4+kGLk5HoW3xaiDywbMySfzgXLXaig0+f4XWwDgGNMolol33N3dkoZELY2yzDCMjObkuSlwN5fWlAZqlNe9FKka/Ne+8fGVXXD3+jU4caCdczyfL5wvC7shUWrn4fGJ3W5itxvJeWVplXHccTG7iJubG6ZhssVhG9FwaIBsA6ltFlHMYXUYBNNtc5PewcfIfkj27qXsbLOq6ePxILaDd9zc3rA/HGitK0Zyv1fFm6uGhM7Tg5gVy2VmSAPDqHzuvnRWMvs04kI0mqgostJfNJlNNmhlVfVdKg8PD+wPR9K4M3uN71hk/wVf2wB+C9qpVW8+DcmMCy+8f//INA68efMG75WOOC+LVXujhsvVjCudJ07y629NNM3gA+csiGqMA9VcBrpzNC8my1YbDM7jh0gLXtBNiKaJaIC48d0orhss0VrX3+BUALXWaCVTe2OeF6iVu7t71pohZ6pzrOvMYdorsAjH49MTp9OFV6/uSUnwZ4heiY7ryq9+9Suen574/q//uuJTU7IeQLROnKP1SuyWFd2No2Sq8NYrP/7RH9mh666sps2w0YxCrgeXI2CpZ9b5dDYB2TAMhJS4XM7ARpOF07xyc9ix5JnWOtNNohll2weFLG0MIsCKuJfhVUPU7K/ffYkPMO0mFWMOfKv41q7wU7ULopR8dYzQpeGuA2sFealzSUkixof3z9zeHYlJlvebZUjwfqvOdd6Vld5EWAghvZCFQuCSizGjRptBqSC7dgDfYfl99dWXfPXV1/zOb/0Wm0tvTIHejH1qa88mqdd/Ly/K4DgeBL/XDbLszf6OSMytk5ym/FsGawpiDDWEyQUfdJN1CFEVxaVmpmGCzakSy4R1XVnLwXF7d49zJgoyQYprmwmfePXxmvImqXjv6mKc4YgBWGomuWg6A0E1W9B8LVkb1EnaTzFxSm/88ud/SgyR3/jBD8AyDIYhslS1zWOKpBqY14UUlSnhfCB4rFJRHOA8X+jecdwd6MMkSZyDNE2iCG9qc1so377/ltvjEZcST4+PTGngJh7kV1MlCgINsJMXc2S1rOn5cuF8uTAMA3EcKa0ymC9VzcJBh92e3TDw4ccfc356xnW5yFa6qOXdXdv63nUZ9lbJ9n7PlwvewTBOguaaLv84CBLJrRO9hphPT098+/U7Pv3e93h1d8dzPrPbHbQ5zF56CFEpa869VE3bkdA3VEAGYq7BsBsM0pIKv/tOlxoT3zPRJZ7OohMeLAq3bApWB6FrVnE6na5q6f/cr+8MtlNKTLuJy3wi18Lt/YHHx87lcsJ7z/F2z3HcWzeo4qeWwuPTe3qH73/2Gc1pXa7bHK5kwal0lnVlmsarArabLsM56UNch14y3TqBECY7aJEzQFcaYHEWBIOnuUBHHPq8riIL5EJeF11WKWleIhsBQZBVgr/zfKaUxjCOirWsjR7AJXBRClsfPMHbZZECbz78AG/uBt4H+lrpUb5hW+nfW6G5QLF/X5i94K2H91/zy89/gkveugfBOV2sCjb9CR19vt7gz02XtG5bDfyr/4N/jf/wP/j3+eMf/ZGYUUGBSmteWcrEPGd8aOz3E2U5E6k033FhxEdBaNqXxoRy2wxEDq/P777hvDyxZqeAo65gquYSPqgMb6XocI8R33Q4bxHE/XrmQadyOOxZ18zlsjKM8ntzTrM5to4/ryL4mHHqMO6oveh7nNhOAKVWUZTTwVLyJKLcum1n847WIXq4u7khdHnNiWorS/htVqVZ6dauG+3Xe+5evaK/f8/D4yMxJXyMFsMqk9DuHVFzCKVyzRaoPpiVhrcKvRb5iDTnOK+rfJnWrArWOQqS49OU4dpNS+GabJ03O18fvJS7XtP6Upq8jbratxgl5lspDN7TC2ZZu1UCgpHwEs3lmkU3602xnXhWa8e9CxxvbtkfDhx2O+YsKKXnRHRBnjI+KAM2RhqN5NRyBe+E+84Lj88X+va37ZElcO9MPgH6/ubt81m34UPkNM8cY6SXQnHqckqtplpXXGAKjrw2CHq5LVdV7EFRot0qlnEciT6wWISpc9Cq/Hr2dnHWKk/5rYUPwRnurcHm0iquFmpS4L3mGHLYdBFLH+qi0nlVW7477u9fczzest/vddiloMFf1TuIzuFMqHalzlUdGNsG3R1211wQGuzCXl43A9duYBgGQSOtsy4z5/OZjqDHcdrhHRwOB804TJzZTDfxl/3a7/asx5V5PoODb7/9ml99/ks+/egT3HS4YrhlyfJMigOln3h4fOawGylNF8fp+cw0TKLudg1E1zVzvlxk45ACtWRaLeCCbJjNdVPL9+Xm2gqrVpSeF0c5Lvetk0YzO4BhmtShwZUFGM1+oxYlvK3rylpXHt4/qMu8v2XcjcqecHDYHyzkXhfROApao+qy2o0jw+tX7HcHzTh8N8ioXXOyvbFrNKgWS6e2iqudP/qTf8zp9CTlOZ5cV1ozJTkbg+mFdqx74aXrdc6zrit/8Id/j3/5v/JfI+fOj3/8U0pbrPtu3N+9usYK7w5HHcS9sebMmPx3nq0Yilvi3Ua7Dch++93DF7x//zWH3QdyPG3mTuAtfMthtGnBWr11TvPMfjdhrFlaF114Xdarrfp+P+qANbzHd/mWOe8133Jynd50Os6FK+SvbleOv6WqS4tB3UdZF8GX9qxKLZQ1wzhyc7hhvxNMXG0P+aDBNDb/EcgjWxXNNaX9kdRAf2tA4Uo1Z8I4atDfbSOHEJRI1S26EoVfXPKKj579NFFKYV0XaheF83I+UcfNU0kARwCi041f8kprRd4yzkwpumYWzcuuOYVAiJ51bTTf1AKvNmCNjroWsTDsAea2Df+A1shVwSdTTBBVfVXE1vnhD36wTYJ0EI0T2YZuvcmkbXSeKQZK1Rjt4eG9YLEhyofeqojkgikkvZhJzrOu1fBTPeAedHC+ub+X4aHz7D76+CqF3xZBip5WOisyHUzO4B27TO9ubmRZfbngolK+Nitq1xUnyTgxhShZfjDlaTXGH2KqVBtAdsRqSYOw5WGaxLfvLwZ53dy39XjFXrvkzJQSfhjEz6bRc+PUMvc3N+RWuayrZjmbxxHqNGPyV2fc7ct3r4F5l0bEOylNW2vsUtIsZV1xKXLY7UQ+cB7fO3FIYpN1y302CuBmt/yX+jLYwHsVEcuy4vAM00CvMrakgCdfqde+O17d33O8PWiW5Dx3d69smHyRL5IdKtvh3zQtZtxNYo/lhbA/QCvkrHS+jUreqywVUhRhldzxFuzkB4MfujGYvFPOghP8F+wi/smP/wkPD0/8zb/510lTZG2eVzd34Bz7myNlkf9Y6yYYa52yzReawSAhMa8XHp+fGJNZjqCI1jU/k6KG98syM04TrsvPSJCgisxO5Y//8f+H4Ix42zplheU848LAOCY65TqXaLZOu9Gaetf5MAwT/43/9r/K2hx/5w//Hn////b3+fzLn+Cc5+ZwYLCcit4cw86U/s5zWRdubw5ofmdr20xAPZ3ujf7tddmcz8+cHr6lrhGI7KZJ7q6hyhnae+sYPHlVCNBuGvFerDEQc6k2U2U3MakcgUCnGWU++UChXAOZei6sIVDmmWXN7PcT43gQ5N2aHIGDvzK/lMjrCUneXr2p4/QuEEaDlEumOUfCKYLVBuY4j4+OVuXO23WEaS0jKM57uWrrBWBQVaKumbUWonNOISI2dG1Oxl25dT7/4guezs/cv3rN8MGHBOdYloXuHWkY1HY1VSQ1N9Z1YV5mhmHgzZAYQuR8mald8aS1NJ4eH5WwZC8a1Oo1GzCVIjiqV4W2B+evg2Wc4/nxkdP5zPGwZ9jvGUjyU7KKYRzTFQNMUQlhog468GLq5KZoxm1+07oO7myYo/InOsHDeDhIWAR4l4T122GgW7lbtxTMy78bbtuZ84UpJcYxsi4y83LIs35TD4/jiKvd8iz0DEue7FQTk+xyOXOqlZ3x0LVIRJOjiYW02TD7EOm+GjNG0MxSCkNQjGLtwidd19+yeW6FDZcJjujhkguPj49wc2S/2xNj4N3DA8+nM9TK3qC2bHTO0oWZDwSiS4InG7gYtIm8iRcbphDWs8w5az4SRQkOUT5XYbe7JvWV1inzigsR7wsuZzm43vg/Byf9xXeE1m0ad/QOn376Ka9f37Ou2TjnTdbnKUIBeuH+/o6bm4MOBWe1n2tczhdKWUhDpFQnBtYQr0ro8/lMip5lWXk+X/DeX/Uz2DN3dKIPsnTwnlIWw5MDD09PYpslZ9TQLg2M99fnkktlCJ5ht+PNMMinJ0aGlIylomqx14YfVYG21liXzFfffMNxv+fwyV7r28sAcFlWs/ew3JhlpbvGMGjQuazrdd3q8vKcLmeca3z7/it++We/kkmlc9YVrjw9L9zcJqBeh9POhKNazy/vaD6t/PP/hf8y3/v0+yxz5u2Hn/LX/sbv8ct/58c415jnmf1+R3cwHfakJGjw+fyeaxRr2PygKluSXQVct4jZLg8o+srXX3/O7/zOr7Gsjtw9h5iIXmgIbFDgir8WVd5QG3+dQ8aoIfNWLNZWNtNdeuuEoAJHhIRGGAfzcEp4m3fRJW4DiYAv64rrmnHUIh+raMPs7oIMWc0huHbBT0OQFVJLiSlFnJEeApEYZUffe2PyiUqlVVmEdEN8FBMshpsLiZYt+bBuVsKtafjkPfOyMj+fiMPA/fiG27s78yOXb1EYErtpIudMhat46PT0yOkyA53DdGC3G3n38N6MpPY039jt9zgv24mRJAfUrJjHbPht8KDO3Fmr261VVyzfOI5Mw0QKGiSm8B0KaKs8nc88P8um/PXtLSElYpIHe68dzL6gOcfiHVNKlFo4PT9by+Z4Pp1YzjO73Y6b4+Ga7dCd+bPoUbI5lTa7LEGtdwgOnJw885J5Pj3z6niDC4HH9++IIfDm9SvOpzPfnk7cv3pFjJG74+2Vlz0NE5XObPTdu5tbDcjsQs22uSLgXOByPuEC7IbxyhzKtZCXhfEwEY2hcZ5n0YZ3O3bDYAPDJjFj1+bOy8w0TYxJc5H1vHI+z9ze3Oj92QxmCBJIebfhnPLr2p5LtfnRZmDX4fp8cq08n56JITFN5uRb5SIazCQyek+vwsPLknHDhEMJbsXUw/9Ms7/vXhLOsZt2zJcL2ZnjqQtAxsfAclppIXA5nYhxYkhBXmA+0bvnclnwvtmh3q+W2t5tGHFgrY3cdElcLifu337Aq9c7YnAMPqJQHllRj8moveb/tHXxeM+dzbR6NyotgeyV9e2AvGRaK0w3B379s++pM7DLYxwnnBNltDtHTPJhKlX6iSlGPnzzmv3haPtOlv9DSnz49q3EWeb9NMYESRfsui5cloXzfOH2cAs4QkikkKg98+Mf/4h1fSb6Tm+evBTm00wwiHNTgG9MQIzhtHWzuXTGac8//1/8F3l6PpHSnurhb/7+3+Lf/fv/JrLHyHz1zbdM08h8+hYXHa1nQvccdsP1YHZhszHpW4VgX4LrtnCkh4eveX1/y9MFLmslRBUvtpxRRocgtd5FDy816yLtOnPoGN0ZY15uhaRYXBvzrWbpQ8YQLDdC52iMQZYbWfuiNulrYpKNy1qrdY06F7wRekrOuJioy4Xz6cy026lgNiSotEovjUahORmRjjYvbM26j1oZjMF3Ol+Id4lGJwUYD3v6umyiRIu7bBWGIFXmEPnszSszoqrm8eG5uzmqK2kNhydFMZJiDLx9+xHj+ZmcC8OoW/vmeGRMw/Wm2k/Tla+fgrBBF70ONKR4XJqw+RENsHOr5JoZQmC/P5ikPbCZjOUm2mEIheA8JWeWy5myrqI9biEaq4m3QpDFiFXgpVcW0yVUTJNQpY8PKdJKwzlhw0uVcCyMQa/foDQsSa9WWQN7PCkE8lrJZSXhrurPm+MNtRXW0vDDwAGuoqoYwwuf23yl7t+80VBrSNReRe0tCgShNYKfCFGH1eU8G+WycZh21CrRliJGobTC8/PpOtjaDVrszVTveI0n9scjY5RCvtZKDvDhB2+JMVFzZhNDhRhx1nJvMzHLnwFMwBN0QftW8TGyrhfyCsNu4nBz0Pow1tkGHeUifUBxzfyC7NIBfOvyTaqCb148l/6iW0JspmEYOJ2UCCdRkbFHYtasaKvYguP0+EyM2hObS2brleY6eblwPOyvYrTaDHJ1nvv7+2tBNIaBJS+UOot84brcBBC0RoP5cuLx+ZHduGecGmlM10hYFWiNEOQY0L3DD57gRmpV5bpcLjjXuTkeBQm3RvDmPmyMlY2tF2JgF7RP1rwIbqidwdTYgnKasrpjFwPHe4Zx5KM3byzr2ssOI3l2u1eUcuaXP/8ZCahe3eLlcmEthWG3N7eFSjfPp9bMZtvIFc6pc/nbf+fv8Vf/yl/nssyE6Gm58MlHn3J3/5r372Vh0mrn6f2TBv0tMgazsUgOnwTjaR1tDnHSW2z8KtfVFEYc777+3JThR2qCdTnz7pt3vPnwDcMw2lxP69J7uFwuPD+fRNgYhiuZxnunwvEK4Yr11oQ8svnNtbqyNE9swTqQ8Oco1l33CWnTcIGKCJ/MiqbjnNhvm5fV5bLwxz/+Md4Hbm9v+ODNG27v74ku0F25GvatlwtumrRfcyYX5c4PN5P2sX+hObdSuSwzq6x5glXtgk7EvnFM40Q1SKR1Yfzi2SqHNQ6JGoziZ0whN8BYR2IMnOaVcajcHA5snjGimZo/VKlUr+FQbzLNC3RqCCZV3wZZsMwza87Eww3hKv6xXAkn5eXWXpcm59Zx/Eh25eNolf6WPLXx1BXr6IMW7Ogj4XjUINY5mwuIBrbSeXj3Ducch+NRcwKzsOi1XiGojY89BE9zcoM9XZ5Z15WbwxHXHbkWnHfsp70xsrzw6K4WUlVWlVrZNus4qlpb8krJVXRfOxRrqSwhSzQ3TWJ2lUKpmf3uQO2FnDP7aXeluL559ZrzfCYOiZKNQ+5Fzyv2GdI4sPHZAVIIxBBYLitrqxqYhwClkMaR9flE72I/b8wwEB5qVBZ7pYqS7L2z3+/ZHyZ6aZzn2XB2/Z6NKbWY/uL+/h7vxaBay0o5N16bwv8vCh/67lfwQVWY5UsfDnucm8l5JaVELpn9bsfj6USeL5ReGfyoz+IctWa5v9bV5jCOaPYSm6CtJyu0HLx/er5SMnNvTJYC+e3De9Z15XuffXY10dzt93jfWctq7CZh3jbrpRZtCNcjmFbgfDmT0sA4Sa1bTY1uYxHpCGJgni9mcifPrWWZeT6fOe52MlvsneAF6bZSKKXRl67oXRB8KdyXyY+stTHQcSniQ+IXP/lT3n3zZwqUKpmlQ6nQe5AWw9uH0DjA7PVl8+Po10PvD/7w75FCYvV2WLXGhx9+xF/57b/B/+Mf/N+1viqUIrbTmBLRO2KC5MXSc71b0VlfoDq3nRsW7WqwzsO7P+PrX/2Uj77/ewzdcVk7p8uZu3LPMIoNFKxbxkgk0tlI9e4M3to6DdmMyFam1MyQJqO+qvhLSXqKb56eGHzk9u5WnagVvq2pCNS5Wwk9kuIgmvPlQkoyRw3GVFrWFefg1atX5DXz+PhoWTA7COn6Pn2MjPs9y/lMiIHn52d+/JOf4XH8rd//A6EgVxp/JDcp81PsRN1w4Yq5ldYIFo5SinDPNEQ2WMo5803vjegctQHBGZ6GZgItMV/OhLBTq9K78DrvqVkZuz5tm1oMmd47pXcdsK1DEDQif5OB/WEPQFllODZE2U2cl5Xjbsc0KCd2bt1S4CI+bUpTswBwztapI6SXcI3aGrk1BtN3gFxKZaWh77u5vcH7yH4cbMBcyLYZFdf6YlbXPaoUvXz/y5ptgURc9/jeLDPDrDtM26CWvGluALTSWVplP2r+s8wLT09PvHr9mhg90Y20qJjZuRSSl8iplEwtcpndUqZ6l4VG6pFxHBjGAed0QZ3nWRUajnfvHpjnZ968/ZDDfi8KXHCsS2ZZdAANMXHJmVQL4zTSWyempEPTFqXsThy+SezorSPxfuDueNRnDAHfZbvy/v07wHG8u+PN/b0Wq/fCctuWgZ3o1dS+Dr744gti9Nze/gU02P/M13bwCV/WAdCaiAJbZGlKiVaKqIDGaJNJm9ZRjAMxDoSgmcw2a3AxsV7OYhJNE4fSOK8LN6bQ792CZYaBUqsVYmLAHKaJy2XGO62F1pX7HjaRXlfYUcsrMShxMHgJtp5PM++e3hM++54w+25xw8EbDMr1c3rnOJ/P1HXFH4+mStaho6rWDqCaGVGmQO39alq3BgjVk5vD1QZ95h/94/+YNc/C+n3Ee6t48UTfSden7/X/2hUIQvkRjQ8//JSPPv6Iy7qwP+xY5pnTvPD6/p6/9jf/Fv/hP/wH5JqhCZIcUlSYVG+MRsV16ALqVjyG79Br3dbqoouE7ljnhS+//pIPvreyroKyv//970tpXl86kY2Ft7nrOud4enrmy6+/4uOPProqlZ3zLGtBXk3eZoeZb799R0qJm5s9h8NOYt7SZNudPMEgqmr6F8Bo2BvU1a55E5SCN3q8a4I9f+2zz1iXBedgGidCEIS+0VhrqeScSUNiHCeWWZfL/avX8qhqjpDGa+Ez+AEixFiJcuB8sUb23pnyz+7dKm+dzRzNw9VR1PaFnEsdSqpyOlR6V5BG8xID+SC/n+4kgNtyCHTIBnnoVChebXtwKlRb76QUDK4SPzxNI6Vk1mqJYUZD894zpmhOltJY+BDsg+sleKPLbY6SFG2KdV10aH5XeBM2uqmUs8k2bquFVrt52ZuNhc1EMPFUcjJqwzkOt7dM5uWjSlYc89bVJmNt5lwydSm4rtxvOgw+8XyeBVEYC6HSiOgyUnY0zOczD8ssu5Fx4u721v4m5Xcs6yqbZR+puRlFWQMyjOEwmmlg71Lbbgyoec0s6yKIcBiIYyLWzvPlTIhJ77s5LAWD7jRsdl5+U32bNYWArw1CYDlfeHp+5v5eKvxx2iuLxGCvZiy2YEPYL774gsPhyEcff8LmMPn4+EjvldvbW25ubv7SHcU0Tdzd3fP09EQ1P6/eHYebA73Iv2YcR9KQKDkzny86dDbhnhNcEcwCobSVKYki3Iw9FWMU/jwmUWKrGF+bsOzNq9fc3hV6bczzSoqBmJKIA0F6BxVgwYaf8fr3Nzo4wcDRmEjlcuZyOrHMZ6YxsZnkqpvoykXOmd4dLiY9L5tj9KZ3IscFVLA4x3EnuAzXTWAm+ErOzZ407CBUfvlnP+VHf/L/xcWGa8VcAKRid8Ex7gdeetItAlmHL7aHaq383b/zX+LTTz7Fh0irma++/oalFe7vb/jtH/wAeqDmmegih90eLDfe+w5BAk5FgUYN1X2kkxVsZJDWy+rQvkk0fvJP/hFvP/4B54vj9uaW+/ubqy2OcwHnPMPgzeabF/qug9Pzmfq2Xde4814wOlvxIWuOd+/eiwiz/zVcUeZnDJHcM3Tl32j+5cEH1pIZotb++fmZ87xwc3OUAj1qHSUfpE/zTt2HcyzLevWla1V0x2h6FO+cupLauLm94W/93t+ELolDafKDA4V8uTHhmqzyo0MDnsqLkq915Q8E59ntFErTwNo/gWa9NkpR4JCevAZBKoo7h8OR0+mZby4XxhC5ub01Fa6M4RovHj3eKTCnG2a/zBem3V4pUvZ9vWeoCopPXqZVzsnBdM2VkHRgVpQF0eiwFtIgZtRWFfj+skBKyZTeGFMihIPhis4WlKdQTQUtl1WCBqtrNY8eBMPMzwtff/UFH3z4kdmOeDDm0+V8odXK/tUrXXQ2hC+lkbcXFwKFznK+cD6fcNEzlYHd/kjujZ//7Ge8e//A3/i93+Xu9SvBQN0YW03RqBscELrjfDrJcG8a6OZr4xAFeBiSWFHouS8bPddgnMNux3G/w6fEWlagM89nxnF3ZXfQKqW8LELnPN5LJNW0oFA6cDdCkCqlDTYJzrGfJsRDl8Pqq/tXytfdBuF2IG8uscqa3mOgP/tp4u2rV3jT+VzL0s53T4J/6lfwgf1+z+VyYctHCWa3XJzsLRwSE6Y08tye6R2GYbjagKck2KYDCuqVBXQpin29rBeWvOKiRbvWTm/VMtU9rXuiH+ihX3+/NEpJa75Ucqm44jXktM8XY7TcaSOb2Ezx/uaGMQYOhxvZrwRBkq1VfEw2R3BXmNI7hzP32b6x95oGq2GI9CbXAZxjuYj+Oe32YgsBnYpHyvNf/uInnOcHXUi2Bi6XmbVkbo57UowE1ylGexXldRvsyvJ/OtzyB3/r93n/8MhlXnj79g1xiIL6KvwH/89/yOl0wkcVH4JGCniJYKkQJrvIW6EUCWcV3qS/2fqM6/53qPN+fPya0+kdh8Nn9N755a8+Z7ffsT8eoFY5LYsuwDKL5OCcI42Jjz95C07nRGud3XSQjqdkRdRa5/b2g3uGQe7Bl/MZb0FEJWd6jIS+hUd5vOsKQfIqOP0wsLP3Xey8bq0KOkQwofeOh4f3/Pif/IgPP/mETz/6WBYtQYP4lAZi0hC9LCvDODAMI3kRXByqGSE6rq4AzknPFi/rwuFwoKx1Wz/GP1DVGqLZRaOwb/np6BBtbhukwLo2/DZUqWqNdrsd65otPF7fd1UN2i9zTcHha1OO8jiODNPEsMV0OjQ4xhGGwFCTOhE72JyTbfhQFayillk4/GmVlP7Dt2+uBnfO1JJLln4jxmB+Q2rvSleIfLfWdckLvSwaMjWxdLz3luBX+Pmf/unVaiRGx26YWI3KdjzcMO32OPN6clGVdCMLsquV01y5i/qZ02Gvy43Gbn+Qk2iB+zdvGaeBcRSPe11mwn6vZVsLIUR2+x2xDjyfTrTWeDqfeT2NxCiJ/bLM0ntMYuqEKo8nbNE5PDk38npht99DyWKCeacNvxSGMXJzc6eLqVUOuz3RWZttkEWlmbpWgSbrurI/HuS/5KAHPechDsRBcbXSLvirc6oWqNZHzqKnfvTxxxwOB7rnWiQsy8JxumHLT/7LWnRscM/usDcYSxd6QYVDDFIgbwPV+1evqetqB4Hw5twbORd200g1d+IYoy45pIpOccAH5bO4IA1PphJdwDnLfMZxWVb24yD2HTD4wKbqra2p0iyiWoqyLD3RUitPz8/cHQ+klMRtb9UuMB3CtTa8K+A8Q3oBfbYBpcvF7PJNOQ3fQRY0E5vXVZ2mrRXFFGuPLeuFP/nRn6jbwMu0MmdqXdntI+M+ErxIIK6qc/C9k6l0A4PyuvL7v/fP8eu/9Ts8Pp15fHqi1spHH3zA+/cP/C/+V/9L/t3/y/8ZF5VFPwyiIffWab1yGPakAXXbesHkkhmiN7KI2fB3o1s46F0ISnSe5fLI/PzEb/7gIy7PFx6f39O7BKzd6RkUE9eVVhlNdR8c7HY7rV8XwYl96XH0YEy2ZsSaXC25t5MGXTprrZSS7UIXRX2DmjYkZwtXW6rmCqvN6MZJyZTBB+XqWKb6MO04n89Mu4lWGss6M447Njqvx8gmqHuJSQxM1y1/B0deF9ZamKbJCmTnucwXhnGUcKYWMxurlMtMnib246iX2TprLvioHN/BR0qWUrTWQveecr5ILNQksNnvdRDQYcnGIGqI3+xkFpZCoKyqMKJzpFFsE3pnydXMzTxDTfRe6I3rDKW1wDgMIrb1blSxIG5zEAtgLiuTbZAqVFd85ThYZkNmsLmL/NZ0+SU8YZq4zAvffvMN0zTx6v6eijqvYUiMceDnv/gZbz/8kP1ogitEVxtSYBcmzvMiKwzzaq/Wujs88+VsGorAEBLh9hbfISXBcjV0PvroLbnccTqdrwrZWi0TN2gAH0OCGLj1/ooti23T8a4RU2CeZ9JFxl0tOiIOnyItZ4Y48PzV13z7zTf85m/9gDiO0sT0zqvXb1iWmRgGPPLnyTRZgbh2rfaJUfnGG2PFtBl5WZH9d6SXSsuZtN9rhtmaZZm0a8hQrfVKbXXO8e279xyPB86nZ5zzvHr1mtYLz88L034US2ddr3bgf9GXDMyi/mMMLrkLdFwUFh08tCzDtOQDLQScOYB673j3/hv+9Gc/57d/57f1e6tol+C4PD1xOZ8Jd4HYZTsSGjjfIFdqhBQTy3nG5qH0qHUb8aIm2iGe4mDQh1x/Q1UKZMnqpKfdqH3XG4PlXbfacJ1rKFJt3To9U7jbM9jgUvoL+2br8h3mUtugeoleey90cyatIRBc5/Nf/IIv/+ynBO9Nndtktz9NtAli1EyK7qyDUMxxqwqqakQcgd/563+DNa+Mw8D3P/0ev/zlr/j7/8n/lf/w//UP+Sd/8ke4pA7u5nBDb1k72XV6y4SgIbYEvZ7SHEMP2us2Q6IJ+oxGqHBmjdMJ9Lbw9dc/Ibl/gfd15e7uXl2AreveZckevaj2Sik0CMsLOqqt4Kx/jingm5h9mjdF4v2dGHwdY3dVxhSIlh4nIeb2jlTEOOOn5FI4nWdizHgnlXyKKgh8CJR14dvHR24OB37zN35TxVqurK0yDhPeCW1wYBoRwdvrcoEYxWIckpFAZJXk7KKCTizrQjJubTVFtfx3AtP+cPUCcU6DomwK1x7UBcQkV8lhN12rHHHutficebJXo4hKBJQhDEyDqifvnFTP0eOMM9xzxyWHj45dsPxgOktrBP8d//wOiUTv2Wh1/TrE7r3y6vbmOrU3I2J6d+zHyZhN2M/WA/euyTLCVXzSQVJT5Hg8cjgexFH2Tu2tc7z68APCENkf9vJwaaLINhvGtyrBUx9Hcqtm0NVk4+08d5ZxXBuWFaIqvNnBuWUc9955fHpknydev35NK+JCpehxDVYqyXmG/cRoFWfpOsBpkJIVAeuCj06Ct9ZwVYw2oqyIC5U5Fw6DubkGMV9SGmTnoI8uYZ9p2Zz5efWqgXXOmehEAfZpIAxB0GHTZiO8RF/Ket5TesE3f8V8Ny8saRsUAPTu8Qu6udl+9NGHpCERDFMvJdPbqJ//l/gKMV7dZWOM19lSa41+jVfWenE2a7qsi9EeR8Zxx9sPPiD5QLAutRkUteYVgs25RrOgoajCpClz3WujllKY9sqh9t1RzP5iiEm0bN8Zg2YVZc2U1jU8tn0aQ1Qec/Uq3gx3b122DZspd2toLhVkkaHMgMbmtSWbhqIMCss/cED3jsnCo2S+J1g5xMQwen7xqx+zrGdiaNCg9YLrMEyRXKt1+0YY8Yjh16sC4qKwjY8//T5/9bd/l2Wt7KfIr371K/5n//P/KT/72U/Z7Q68fnNPXVc++ewTnt99y9NpsdteTMCcM7tph7taK6Muo2lTdWxmesUhX2ik3TW87/z0p/+Ed+++4u74Ac3Ler33zmleGEIyt+OqopjOYD9LiKvWf+uF2I2ZFzBhZTP4U1OZnIsMPp3DG+W9t2aW4oglucGtvZHXYnRmxZN6p0wTcNYNBv7088/55osv+P3f/31e3d9RW+OLr75mWWY+fPMWP430K9Qm/dRlvvDzzz/nMI68fv1aBJeuteUNqtRcOeN7VGC2YIvAbpgYQmI/Thx2E0PQ7VfNG184n6OshaVUllx4Op2kU8AxDKKIbVF7kl/Ua8UEjlwyyzLTqnQNtVbOq16874KXqlN7tKlMCdCT0VetWlcp5AneM/hExxNdZCmVECLTqGHiXAq/+vwLfvXLX/HtN+9ppXHJK5d5vf5OA3c5r6J4LiVzXlYb1jv2hx0e2SUEJ35yqxpi372647jb00oj2+DVd8/D4zOlNC45E23g9/T0yLv3Dyy5mMmbQR2lcF4XOXhiB2q352GLJjjP+4dHcJrflNpZcyV3Deadc5S18nSetQF6u34uHz2H2yO73Y5eu8E4ygDxHc7zyqtXr/nhb/4W+91Ey+XqVhlHYdfNLi8xAmR30O1Q3qzFjctA65ai5mVS4BoKcmpSHStESf9uCI4hJFzvLKbf2CiFKSVub+/Z7Xa8ff2aaZx4fHpkWReKfYZq3vrbaPQv9eUwhpCgvi3oSp2N2cdHT3BRlhsxWuANPD0/M6TEp598ImuSebG1rZnNtN8RXKCslfPzRXM7r2c1WO5J6Y04JPb7SWw8x5WSHW1GMQ4jydghrVTRS61i3mizGzxXStYzRYXG+XLi/fv3LHllNbx8+3yXy0Jel6s6vNpMovV+/e+DMXmGmBiPB8LmdKyXg6PzvDzys5/9ER3LdkBQXLPnm8yzCqSJkOW3IB4/Kp+mlMwf/sHf5u0HH4oJ1Br/m3/9f8vPf/mn3N3eMh4GzqcLb159yP/4v/8/YrfbmZDVnFC9+cH9ZwZRKtLqdUVsF0W1/SRzPmhdRIP59MCXn/+EOJqSGh3AsuAxhbvZsScvMkEumafTs0w4TTfhDSrMubHMmefnZzub3bUy972zLAuNFwtv+qYREjOrNiEorYvNF0JgnCbFyBrrSQmDjle3N7z96COL31W++nEvKHhZZkvBe3Hi1RyoctjvRLtOiWWeLWyrbZ6fRnToxJtpQk2XM6w2ST1rtEO6GDJbjGU33H4+X64D6t4bpUNErqbdNOmtd3xU4HwwBWeMAT/twTC5EGQgFn3U4NlEegHj7G6iOdQyxrj9/H79Pb0LMwzW9STD+Lde+uHhPe++/ZaQEq+jrClKrngvqKptFLO6+eV34cY4a8PFZsnIFrgWie/208QYB0AsBocWUKmd6I0QYHYPa5AW43hzyzKfVcm6QM4L337zjsfnJ3bTjrdvP1A3xsZ6EhOrucar1/fc1rZFHlOrxF+t6XOvtdGcjonSmsrJLpijO29DZAdNw8h1XTnsD8pqeHwC4KMP3qooAHYpEq0r8t6xrqsiFJtZqPTGEIbrJVFKwdlcqZd6rV16t03cZAMSYqA6J+GVk7tlCoGlrHjg+fmZx8cn3r59w83NrfjaZmt+YzkjGzyyBaO01swd+C97R7wMrGXXkQhV8NiyKCcC39GcuNF7Zbfb83w+k2smRQ35nPPUkslOFWsA9uNEsPWyUceTlyj0OnPpYqv58J1Iz9aI48SWXNdx1pV3i73V3DB4R3ONtmYLtOc6TK2lUGOkrIV3jw8spbDbTby6vbselusy45jwQcrvKQ2Kgw2By1worXJ7OIBT+BW2Btdl0bs1y/Jf/uwnfPH1L7mOgly9/rOz88Q72AJspLl2eq4GBw3DxO/+jT/UfCAE/p1/+//Ef/Tv/wOm/V7v9jJzOV34r/83/1v88Ld/h9dvPuDrr7/Y5BaaiZVMaYVhkvVH64XQnHQl4YW5aQ/+ei647axyncF1fvaT/5SPP/shz0tjvz8yjklBYq3ab1NXpFmBSBdTai8XlrlDFLPT36jUzlLqajVBpK8M3f6Obd6rg0xnr3cihhgbsodmKYLe/KD0Hmvv9Jz54O0HfPD2Azbb/s3y5+7ujnlZeffNN7x++4EubGu2xiHx+vaOEAPrfLmem3kVzRkjMfkQ8MEWGZ3rsJarOrCZG6Igh2g3aG2dOE3spz3Re/b7A0PwFrbSjX2jDxG9l4Npq/ynP/oRP/7pj8XXH0SXpIqJNAw2kEYQFvbQneFjvovSGpE6lNqhaMFvYmHlS+hvz7Wy1EwIkTdv3vIbP/gBv/7973P76hXjOHBzuOF4UI6z92ag16xydtHM6xZOlwvrWmm1M8aIi4KJDLiyxaKDVAFI2iJLFf+9dNlrJBeN/TAyjHu73JRdUR0MKYDn+tk351xVf7JZH4aRlJKxq7BFJ2ryMl9YlgvRbywJYY9byIwMAoU5+ihoIpkjJV3P7vT8zGVdyMZEWmvjcrqI1tstqN7cbl3jSqPVASc31Gw0QZ/in+syNq1KuQbC6B0pdyOK9QI4H0jTSBoGQtz8huqV4x+GiPfyAZO3vwgAl8vMZb68sJz+4lvCfMFkW3FjvkdaDzIerBuzLelZbQfBftxdleHjMOBM5LeJANVCqagZxwHfxbrzdsg4q9Q2qKfZhb6Wyrpm+/scOa+seSZGb4edKZSt+3NeAVcOsQxrFzxZgfF44Obujv0wKLb2SgF37PZ7clbnNMZBB7/97dM4sBsVZpOzPMZqLszzzFx0KW3C1T/6R/8JoM7asc0Iq8089J/GS3HZvvNynIPS4Dd/43f43q/9Opd54Sc/+RH/h3/r3wQXqLXzdDozTUf+pX/5v8o/97f/Lrl2fuM3fqhLxzI9tr1SWiNci0bILSsczF72i+vsS8dxzbjo4GLj55//mH/j3/hfc3p+Yr+byGumA5fLmT/76kvm5UJKkRS1px4fHyxvIhhVWL87WpG6zdhah+fTiVqL1leXqNWxxY46JvPC680cH7rwuY3c4Id07QitJREbrcn9dllXcyGocj8wIkWMge4c6zLrPRhhIsZEGAeenk/88Y9+zLvHB0vIS/Ln6p1oM8FICBLEqDRlzrK/AGt/eqf371g/e8/Om6y/miMhLxhy95C6Y62dx4dHwt09u/3E4CPrMjNE+bjk1R5YisKZLbwjhGBRkWLLeDCGhzoGb6sgRCG8KUrsU7ra7YA2Tu1bqlRnPwzsp4Gcq1VoQC/03MneQVfk41oLQ4iWK21VYIxSwIbAgOi3+I4PE42tg1IGtARgidwKy/nEWgo756nOX6uqbkNpB/RWScHz5u4V9f7VdSNtg6xqrKFW9JniFgZkdg2tFrzfcdgnyjbsR74+IQR8lKOkNArYBd4ISWljBP3dtVS8MUJ6t8MzONbTidP5xD2vGVNg7UoOdHL9oDtHbnqPQrUCy3wSnz5Fub56rmrm4DytydAxjsmMBlVRhxBk3Jdn9uOOu1d3Sjekc35+AiqvX7/CdSfvLK9ktFwaoRQG5OfU9u2ahvYXffkom+QtwKgUkTZy8JTV4bwd+lV0h4y6Itc7D0/P3OxlXb4fZTuzlkwxn6nmJNh03eOoVzWtB8tz39hbK8tZzLOjMdaqHQJxTLo81oXog3kidLbcFxAlVv9LMyjQU7vo3rvDjoCXcGw7Ir1cdcMwXUVhmifV62WeUpRjQGu6xAdROYeqXGbXG+/efcnPfvFPcE7eAM45ejHEwTXolv1hRZDgDgc2kKcBxfFX/tpfIzjPN4+P/B//7X+Lr7/8hmEYcb3yL/wL/yL/yr/y3+HDDz/gsqyMrvP6ww8IPtJY1N6rtGC9LJRpEFUdzSSUCLl1AC+zLh3cWidbVe6dZ7mc+X//o3/AF+8W/rX/4f+E/XggpUAaBtypyVTyqHvm+fmJP/nRj9jtdvyV3/4tahBeuBWctXXzexNFu9VGM5PE0lVQ4UQV762DExpSLX44RQUozWvGu84QAvNlZjGx5xYfjEMOxnZZhOA5Hm919jnwK2Bz4H3Y0xtWLKp7vzke+OjDt9ze3FyLyRgjRHmmOQexGX3Lt866VqI3O4QsClkuOmhjDKy9apO44frAQ4qikzZhfqF3fErk+Yl3337D8ebA0AdSjPzu7/4N/n+U/WevZlmanoldy233uuMiIiNNZflmu2E3m5whNCMBkjDzQRhpBEEC9Aso6Pfpm0aABkM2yTZT7ck2VZVZJjP8iWNes/deTh+eZ7+RBQjo6kOwG50m8pz37L3WY+77un3rcUViTcWqbjA6AjKlaoxf+VDdU8/Lr1KKeiBgSgVjKkGt59VaJbUUgvXYYEhZbuL9OOEwtH0raIG64BzE53CYJCMjl4JbbTAUWi8ZCdM4SvKcKkgCllOM4l4MgZIjM4bONlotV4K37LY7OTCQZWTJGWMKbW1w2mnNKVGotH1Lg5Gc5CoKK+M/pHRZb8+ft3eOw3gCYxlW6/NLYn2g6j4jzhNTFQnqNE08ffIEFxwuOEpaZscC/ksxyjx1GLjRQyJX0Zys+g4fGhpnmJNouhvvBfWuVa1ZvlfNkBhWazFexkxxmdZ9MOUZZwlOOtTGBKwpgiJGqmrnHL3rxRiUK9EUXCm0vfwuxnFSTpM7K6BC2wqOJWWa5p+wk0AURo0PZ+lq4zzZeZHYNoILSXkk5UKwwu7ph4GCYWfk/17CgQwie8XqOKAuiANhLZELphTyr1wQUV5wa/CNl0LImfOIylsvij/Q5ssq8wnI5Yzfh0w1DqsOOlEaStdTNDzqcDoxNA22aQT01rVSiOn3IfBK6faEBCvdkSlGx3sz6MHrGs+XX/xn9vs7fBAOGtrxogfN8i3rOF6/quSUYKjVstlueP7sY06nI7UWHh725AyN7/m//d//r/y3/+1/R4qR0zhx2D+yB9b9mtA2xNNJ1GIVjIO5qKhW8+OX3Zj9lfnjh04DRd8vC2WMI1RYhZ7/+G//Hb/9W/+c/+P/6X/g8f6Rvmvpnj+Xn1/HSU0IfPzRRzRtI1DAKhch2kE5nbrIPsGy2WwoOUv+RhaGWcrp3FEa9UnYoiO6UqkFOk1OzLlinCMY6VJLqvpMWmKVSUOvWBvnRIlqjaPvVyLDV9P0uUiWu4MmtHzyyaeUXCSdDynGD48jx/HIqhvw45zoGgne6axYyLM6qxftelSZa9V5t8zM5cWXw95gnMzcspFfS9MEdhc7oSXmQi5F3NDIGCPVTKuz7loKSk/XrGYZbVErNRed29XzKCc4T66ZWi2nNHM6HNmsVrL1r5VY4tno5ZxnPN5TK/SduInlKFeFlBH2yuDl5e8aiWE1wdDVDmsdznuBraXMqUTGeSYoLM2HBof54CgvlWIE8LfMzIMR41KqH1Di1VSSkcW3z+rCrpmaZCRTjQTZ+BAwrqgCRUiTj8cDcZz4+LPPyLEA0mVUKzJeHxqsNcQ5crHbYa0jTkn2Q7nIqMgHusFxPBxk5mwlQGmaJlDpZSnqGzANbePpwyA6+CzZEXmK6uVQ6ByGTllZIUgHmIs45k/TxDxGttsdXePIFVHKmQ9O5qWSLbUwNINgMYrsOOZ5JseZ5CxZO7vQted5finSjf5KmM0/9mUQ9ZY154NN5rkygpryiHEWq3+2KNAEpU4jiA2W3ycQS5EcYu0GD4eR3XqHBcY0qb8GfY7BF4NpAm3TSP4IIg4wRnESVQ6LKUXSnFmvexlrAFMVHIc3jsM8sgQ32SrqMW8EoV1Txuhi0hkUv4KqwCSi9ZtST9khLReciExsXjwFXjv/yI+/+DG2ihMbvRSlAFmuh+WGWE7lDwcTRXZm3/vBd/n2t76Lt56r7YUe7oZ/8//4N/zr/9W/ZhxHplkWr6v1mlIqNzdPGfqB9+MjVjljC2q85ATB62L9w/jLucCCyVi6ivNYsFb9XxK2Y72l6Vq+/PKnTLMwtCR6oDKnhEnSafXDwKeffnp20C/LfoPEPS8iO/G7LMvixMJ5K7WqKk/ou9UI8rvWqt4OhSBqXnwpmXmWnZCzjmJlKb8YaUF+t7UWUlTXfhays6lQcpRzDIkodcaQUyQ7h8dRqkBSrTGM4yxGypTpugbbtg3OepbnfYqL0kh+kTln4aM4K+Yza7+h5JAfJuXKFJPozq1V5QxcXd9wOp0kpF0vF5BDo3Ee1DB0nGZev3rFNMXz7mMJVAHBUMy5UqJ8LAVxe3tjmMeRn/zkp9w/PlCr6MljTKQaEbRuZbfZcX15eaYtFsoZJZxzwRkdgcVZuiJrsBXGSV6+NWNLKQABAABJREFUoW3xPohSy8hCqlfDn1M65LL8l73csjRfgkukuvDGnh8OU4Q3I39u5TDP1GqwQfTy+/2B4/GId/K9+GpIk1Abby5vePLkmQTUO4+znuk4nf0KQ9/p/FE6j1/+/Gf8/Oc/4/HuTi7+NmC9LK+2lztcaHj5+jUvX71iAaHlFDHeUnJmihMvXr1iP44450hG1DXGafVpFzhZ4XA8iotbVS3jPMrFHoIYF7PEqJpa2e8fmOMEZQFF6iGvL8uCsBYnslcPQyFnGXUtKWalyss2j9PZjPTrfrVNy9D1eqZ9mPeel+6IKicX6eqyVpIxy4I36/cOsF6tCF78L0Mr2OY0Tzwe9uI/0J/bV4OvhhgTp5MYl+I4iXomy8GQdGQktNmZGCcWwKF0GZpeqCbCEIL8mRoRWxVt7zTtcbvZEryMXJclf9EdRoXzDuY8S9eLwmlim4hYBDnx5vY1t29+ifcZZ2S5egau6hu2XBKmLmeFkJStKu0qjj/4g3/Nbrej1soUZ776xVf8H/77/57/5r/5r8kxYqg0vhXys3U0IXB584TVZisXlPvGRQTM08yvCF1rFQNkFnuwHLb1fGfVb3y3y1+LMeGd4eXrV8zjUcavzpOSKJIq5ZwbEXVhnpWcMM2zjLSl1WOaTpyOB1k2q/zbGqvBRJyVdTjtIqyoJsFwOh5k56EjzFILQ78ieBlB1pyJ88zpdBRhyaKM0vOylErSqYoc07oLKxWjXXjOmWkaOZ0O7B/2vHv3lnmeWQ89282GJ88/knAwi9xw1sq81VvoQit5CGfppTk/cCAVruSuSoh4ofL4uJcZmTPc391z9/4Waw0X2y2rvsdUWcylORHHqAeBVDM5Zd68fcv+uD8/oGfFR60K2rMYLw5SsrTUp3lm1Q/88IffY71ZkZPkOpxOJ5ogWQm1JooR443xTjqQJDGU6AvWhIZUDc5KUEdO4h04no7M8ywLyVqZ00TrPOt+EDdtaLDOM+XKlGRUNEU5IL23SE6SPBxGPz/vHRkJV4o6n21DI6HzRtzE3tvzbDBnlTsuDllVAg1tiykyoqsxs398ZDwccUVekyYENqsVxgdizbx6+YKf/uQLkeQBNUul75yj61quLi/46PlzhvUKUw2zzs83w0DfdnjfUKlMSWTDS063MeJlMcYITqQL4gvJkXGceP3qBafphCmIgsYICXdOidZLUl5KEVMKZBmNWDw1y14mhEaCe04TcY6Uamn6lhBaDocTUY10i6Jjke392l9G5vpt29J3PcNqOOvEvXfSJSU5FIwXs5pU5bKYTimpxFuzrMOiqS+s+xUELyNcUYmoWkhVgrnoQl5AmtXKpWCUKpqRIqpfDcJaMvrf04PZWysdiNNISmslT74WGZ/GBNWIqirJ95c0UyUo9jw4rwIAznN74LwXbPRAG8eZ4zThPfzDj/+a/ekeYyGZKmrBms6XwyJyQA/f5ddhDBjnqMlyfXHFD3/4m8xzIubCj/7sz/A28H/5H/7PzKcjc0yyJ0wzx8NeLqsqDudnT59L91a0+kLcw+MUhalml78ue7apStLg2edRyocFsNFIZGScPU4TVHj9+mtevXnD4XhgHKezRNpgmVRltkAesZa0LOcVwyEzJ4NVy0CtyleC88WMkS5lPAolwSDdjDGV0zTKs2Ila7oJLT5IIZaNBBfJriPLOaCEjJIzh4c9r16/ZP+wl7PHKPbFoDtJWSr+7Be/4KuvvmaaIqvNmtVK+WdBnt/xcBIhkSQsyaFF1dCTKi1u37bU+oGfI4A6WcRap2qVkulCYHhyI0vG46RhNQFbJR0upcSc03nz7pqGVWh0XCW8oI8/+YS+Uy+Cum+XEYsQJaUKzwXarqVtmzPrp2l2xJgEyFdhGNbYari9f6ALDav1IIHzKVNUFVMS8jM7mY9672mcoKJjLXhnubm8IQSn7aOhDWrqy3B7+w7fNvTrAes9IJVYRsRgRZoKjtMIxrBaCRr88XDk/u6WeRrp+jVPrq4+APBQvId1XGy38lkXkdBihLiZDSIz1hGH8yICkCCmhoKAv4wJ4mSvhe9/9/tc764pKuPMOopj2fOUxOk00g29zKeRnzFnWWR7LE+uLynOSoXnHDlmghduzuHxIAXD0EPJPOwP7A8HVsPA0PWUkskkcetbR/CBmEe6phEJss5JFx16NcIe6sJKFUAZMx2ZponQ9HjvefPmHSVlNt/7nJTS2Qg3nmRvsShL/rGvRUUmi34nXWiRttognVhOSbwxwGGeOB6PXF1dSEWcsvgT9PdWkIN8aUVaH4itLFC9bSiaXphypgkenOZfW3WtKxKjqK695oJxRrAwQDSVHCOtb3ChIedIsJ6cE1mEaqSSdVdi9FKOGBckSMY0HE8n3v3yPf2q58nu4tyxex8o6UOqo9H5t7eO3XZLionXb1/wt3//l1gP2ZYzirvUqgt38yufrzHq9dG+zFrDTOJ7P/hNLi+vOR4m5sMj/+Hf/zH/z3/zb7i8vJTYXr1lfvGLr3h8eOAP/sXvi7jBN3zy6af82Z8gKgrNxzVWR44p4nzg7AtIlXiaaFeNXrxZ9xQZg/yMsriWzrSmRDaGtulY9S1v3ryh8Q1t1xK8ZY4z43ii2V2ecTyNFnHtMDCNIzkXGi+GSEGcCIallkqxRrxhRi4FUyXLZZzkuS3zTHU9V1c3SoC155FVSXK5WCzWWdrtVooiw68If9q24f7+Tvw9N9d6Gcr32oaWQmU8Ttzf3mKdnDXGbOi7lmmeOO33pCxJmAbwRR8o2TOIounheOC0P+K2VoJWdLaezy+UFXmWBUrlFCO9s5hSuX3zks3ukpsnT+QDz1niTkODa4RtElpZ3pZcOJxOhMbz5PqaOScxmakscAntqOoyddbimwaj38fyEghSWWeq1uKt4Rc//wVv3r7hanfB59/+rqhYKOcqzTnHmAQaGAKCVfCO4A02i6rKBlGkLCqXYsCUql1MZN4/8q3VIAd1lIETOXKaIoPz8uB+Y/aIsbx985oXL14C8K3PV3RDLy9mks5qUTdVNdF4JyyslNS0OCeG9cBmvdYW1soc1kmU45KtUVNiSolRUcub7QaMoemCjNiwYm4qMgbb7S6kPZ+msxvXOU+ME6cY2fgNwbfKApK3X+JrLY+Pe37+85/x7c+/zWa7pVa4vroitA0LPjn4RruCiZO28V3XLsJXnLEMQ38e86SUyIwSu2mR/JJcyGmkVnEA/+Ll1+wuL3jy5IpxPOKslSzmNEiwxa/xZYyRsVBK7FU54maRwHrrKN7hkjwHU8o467i4uEAQ41VS6oxo5quBlObz3gvtLMbxyP4wMvQt6MVf9F2rhnPrHxTUVtQI8+71G+4e7littnz09AmN685+lJRlrDSeJlGrOSd52dZiqqENve7e8nn+fAIlh6q4YRxpQ8vQ98Sc8AR8K2j7mCWLYZ5HVpsNuRhinvjbv/0RD/evwEzUkrEusOQ9LxfE+eBFcp6FXlzOC31H4Pd/719KsWIqLx/e8/t/8Ht89/vf4/bhTkCKWulvdhtxvxswGPKceXL9Ea4N5CgBW8tOggpzKnRVoH65ysUt+8KMN5J1bbQjWL5lU8EZRyWB87S28vHTGy4vLrG25fb2jlQrq9WK8XDg9u6e1WZLjTPWaBiYjrIrUlzNiyncyIQm5cTheCAET9v3eCv0itCv6I2Y56hIrHKpOK9eiXkE70hZ9sXeiez7m2M2W/WCM6J6a7uWSwWKllKYponX796Q58RHH32EdZ71dsO//C//SznT54miuSU5S2fYhUDX9qSc8DEnQmipGkxhgqVxgdQGTjmy7RplFotXIWuLZpaqwULnBdMbWs84RV7/9KfsLraYogYga/HeqSbYMo8zKHpgPfSMMZ61/w5ZJi5u7UWWaqxqz61I9jCqE89ZmPlVTC/FGWqSuL+LiwtSLrx7f8fHHz0jlQQZkpFfaAiemGR8lWqVitwvUl9RPcy5EqyQZeM84kOLx3Fzc8PxeODh4UGS9XTWH2o4V8UmGFZ9L0EhOatGfcVuc0nbBZ5dP2U8zjjANY5cxPFojSHlzMPxwKrr6bqGisd5L4e0UxdukTwCq4ocSqGo5NRh6UJzjvbs+06ot0rPrWSMERlsRWbzeY7U2opRzMkF3HQtx/3+3CobY0mzjLdWa7nkLq8vORwPQMU3jk27kapYncpxihyzGHa8ehPWq0GNZPoiUc/Lfvm/xfVekO7Wgo6ZGkwxXF5uyeVjxvFETFFiNJuGXAp3Dw9c2C1N84/HmhpjaJpGO4pC07YMtQiG3KiXITjKVDAaeO+DV7WaJThRAhojJsuUPpiuZGwFPrS0reRBON0VVFU5WW/Z3z/wxZc/4/PPv8WVVn6mwHqzI6XC/vDAu3eWi+srSkq0fQex8HA6QimEdqVjAQfVcDzJDrANUglaK0jqFCcILcMwcLFeC9bfO8Zx4u7+gVolN8VrwNjDfs/D3R2hHzieIq9f/Yw/+5s/pZQZF2SEUkumFMOylDgDXlk8Cl6FEfLPx2TYXV5zc3XD8SSjy+9863O+9dknvL1/R+MaQtOQovy9J9dPuLm6kWhPD6YaPvv4E4Z+xX3eiwpWP2/vgJT1ktY43rO66ay1Ekd9NSiyWv+aJdfKZjXw6efP+eIXP+Wv//ov+d3f+X2MkfCr4D3NxY5B4Zun45Hj6YR3WzC6sLYGQyDOk7ynzkkUr4GhE8NizUlyqudZ3qfgabzXsKiiJjnthBrxCn391S95/fYdv/vbvyXcN4fuE0baJtB2PQZ3ljsPqxU5J46Hgzzbc2J/PPLzr74ipcj1xSUfPf8IXyrZWFKWdMPxdOTy8hIfgmZyJ3zOlZrn84Mxp5kaDJ0d5KCzTsYd1ipQyoKVyooCU4znyM1qLR9/9hnxpz9hHiNNIyEZRpeRFhmTnKYZ33Y8nPY0TcAZIBd8CExWgturHuJOzWtt05BSOcu4UpLvpebMnLOoeqxlHmVRur285GloyYgqYxxnqhEH+NJen9HlWMgSquIQ7XgqEjEZgizrnClY24vGO8mhZqzhl199RYqRZx89Y2d3GKzA4XQbdooTVauNUgq73ZaLyyse7u64u7+j6VqcsXRWzDLTNAqVtxSJocyZOfes+47NZo3f6tgtRTHXVENTZa7tvQgQcgTfyFIs5aQBT5ZTlIdQEOJ8WNKWSrFFsRGeEjPWyMWPkfyFZXzjncAFT6cD291OzYeW73z726SSFD0czkC1koSAWpMYEptVi/MyfonzeM7ktVUQ0LVUvAv0vQTdLP6FYbWi1cTCUgouO55e3VCtdKSn08R2k0TiV+v5Iv11vmoVoNpuu5U9VBJY4H6/5/B4oNZC1zbgPG9v38m4s5EdTUF2R8sObWGdOS/y7lrEyVraRkaiup+ySfwPnelYr9caDevPIyacFF2ffPKcx/2WN2/fkN68pe9aSoyyu6hFfzfy3yxUKJXQOnKcycZKYVESaYqaVSGeilFHlMse5eryUh3IorhzTnwbXSeYne1QeZEOHB7f4xSPY9AsmlKISR3VVVDXYpkUdM5CTTPWUVLhN37zt1ntLimpkHLGB0l8u7q44Az0tpYSI0ZHj+cdJZV+u6EdVtgHMaAKAl38JTHls/N+uSBSLviS8VaqcFMzxVhcReX1UpRUZ4hlptbEeuj48z/7E37vn/8LhSV2OOexTiStj/ePTJNIc4fVir7vzzuCKSVMELPcdDpB24oqTjuO43GkayvjONH33XlcVYsUgIV6Hp8uz+cYE/3QYbwwp0iyUDfG8NVXL/j4k+e0Tbc80Nze3TEejtzcXBF8YLfbEdSwl7P4LKSoSRivmT/WsLu4+CCucQ7nLL4LQaIAFb2RZgnssRXG/YncDXLZ6hI7IyoPjFH0tvxac87UaeJis6H5/g/lMK2FoNgDgz783rJdrbDG8OMf/4RpHvn+D3/I0HlSEf4TTsYl1fhzUp7EjmpgeFocpvILifMs88Ym8P7uXqrRlPDXVxK40jVS8dR6Tspbgudz1qU8Fdv1jKcTtULftsSaCEl02FhD0BXXIY0cjycuLi/49JNPgSI4E9kjnl3kc5aFfq0F3+6Yc5RZX9fTdh2Pj49sm61I2krCVUvX9dIitxZjduQUdRSIjJDUn1CyjHEc4ryuSJqdc1Y8G9YyTzOn6YBZrZFfYdZvUKo9bzhTPZe5ekmZu/d34kq/2BGso+laOfRLxngh2z558ozQir/AWEueZ9AZ/tItzKP4SfqhxzYBV+X3FlzDFGce7x9Yr9ZsNhtiynh1v9fqzyMX77WLrIsJsgKieErVUWf5uZrGcjwcaNtOOohcF8XjP/plvvHfcTree/fuHeNxYhz3IlENXg58NTSiuRql6DinVr1IBWpnjaj8fPDk/AFV45BxU8wSEZoaYQRdXd1IAmGpPB7FOZ9SJDethBbFxCnNOGPx3hJcIGngrUlSkUqhEAh4Xrx5x267Y7PbCEzPSW7xYTyyCh2hkWJojvGcxmeM5EDEGNkMgyoMlXrgIn/7d3+GsRGvShqrVfhUixaCqixaVJzfWFgvv7ph2PJf/av/NevhgsPxJL9j50hJqm2qkcCknOUZqJCizOatE5GN84GL3SVv3vxSPm/95xDvr8h1raQ8Lgq1nDPVLaNzJBnP6/drjFwMbUPbGPb7Wy43F/z93/4NX//i56wubnC5kvJMqJ5RY0JrKaxWa3Gs66K/loItBecDzlmmOjJNI8OwUrFOVluBoF2ck0yIWgEnSHOD4P3THGnUBPzJs6e0/aBLcPE0LNnvP/7JP3D38MBv/OD7dL1Qt4e2ZdV1soNUU64x8PHzT+Qy0zGwMUYYT1bIwouy0KsjHGOxNoh1WxhDGV+h94HOOa6vLvFWliRGW2fHB28DQBOW7X09B+q0rTo5s2iLHx/3nEax9JtSNWUuc/P0BqwlKq/ealVmMcxzIlexod8/7nn//j2pFknqclY1vzJr9c6x22xI08T93R1vXr8CJFKzbzsWg5hDyZsL4K4WwfUGGeXUXPBBKkRDlTmljv+Kvkwow+jh/p5SKrvLHRe7nQTXG5kjplpFUVULgypxFhqrzKAnulXH9fWFVMokQtfQb3uaxvN4PHA8HfDe0K17+qEnlUysVSSMLEYqg/cGnMDqUplJaZZQFgq4ShPas2Kl69qzUg1gnhOHxz3H0yi7JrQDsvL5mlIZo7Bg5BriGxGtAhOUP2c+q3v6phHsQFUKqRUJpgQtJc06F+nwZreh6RtO80iuiVIXV7gYOEUkUrAWpbyqP0NhfsE5qikcDgfAM8aZ0+l07j5+nQsCPdTapqVTKWxW/tR63XNzc8PV1TU5Feacuby4YL0WOjJFdSp6uTn3wR+wuN9TTPL9mnp+wRcj5rAWPlExlb4VJPjhcKIsZk3f4JDl97e/9S0++/hbXO8EJtmGRokERj8zicoVIUJm6DtCq/DIxWHrAxbJ2nbOgVXsd5YC4HQaGU9HVl13RsJInoHjZz//Ma9f/hK/pFHq/8wxS1zteUYut4NRR/559IanZHj28Wd8/MnnTFPGOWgaf1b9GSMeB+eUaZWEwzTnyGk8UvKy/LZcXl5jkkJAjaJsinzuOUWcMfqeiGR++b3IFEG9Ilm5bKYK1ToYrq8vmE8nGiCdTvzhH/1bVkOjcnAZt59OB6qBq8srdtudFptJD98qaqcsxXbTdQzDmqYRUUxWFE3TBJquw3/jz7WqTCs10zTtByGJtbi2JSVFb1A/5JI7y0fPnn9IRNSANescTd/p52PYrjc8e/pMTzaoVuTSy9ienDmejjJOVbFRqvKzeFM1c0DNNWlO+CDmOnJmGidsGzDWMsVMTBNt24nCq6ISK/MBiaFtjLTA4q40XlAXtUh2QM6ZX774mhwj3/78c7a73XkEVGrl9du3xBh5+vQJ3si/l3NijLNIPxE3onGWzWotCF3V6l9fXbJarej6XrwYephl9WRURLIZY6ZdlDajmNe6rsMaw32cmUuSF7cILjdjCCox3K5WNKGhaeTDlMWdLow1cMiZgLVyIzeNuMJd8PhSzgdgLnA4PDCNJzKVz771CdXA3f0tfdefjYgSgpNxzogBLUPjghqtdF9kJWAEIJRA0zTISDScw4McXqSFWQ7hFCNvXr/mydOnDLZnnEbiPHF9c6WKJFFUWHXtFuDh8YFaK8MwENqGeZ65v31PBbYXO2rjVQdu2PaiToo5CxCywjSO+KdCG45O2+k8Y5oG487rxA9yzep+5dmw+nIYBLznnGO1Gsh5plYJ3pmmkRAcbe3O/+4/9mWdpW0blcM2bNdrEVW4pVOT8ZDOJgiN5+FwoG3FsX04nCTFcWFlIfG78zxzPBxlPq7PqHMirV3+mima8mcMp9OeYbXW7GYZ8B9OI10TqBbGNNPYRl3i+l4Zg6kC43NG8OFusyHqpex09OaMk0MzS9jNIkYRb0QQZzmN7GJUTegsnOYj/8uf/RGpTpp7oBiQKpklRlaE+vcAHBIWkKnGS8dhKqlY/ovf+n2c7Znm0/k9MLaSouA9pAMR5tXSGcRpIqeKbzSYzHkuthfgG6jTuQgyRkZMoqIURY9RW3rBClK9Lqq+RMnCF/tgtyhcbje8e7fn4XFPt9nwox/9Kf/7/91/R9tv1JtjWK/XXGy2xDkSYyKejvSapBmL7ENTiszTSGhF3p5SZh5HWaI7w2F/1DPDsNps6EIglypVvxOydZpncir41hOniXmONEEyfNq+o1pH27b8s9/+TfI807SdGJ+RcycYR1J+VdJur8wzPsgO8xzAlCuH45HtZiPZ21l+BpQObGupKsszpJgEU6tV336aGReEt7UC5XOC2GidZ86FedYsWGe/8c8FXVZLhb7qW1n2qRyuWtFL3z084NpGM6xlsfT+/p75dKLvOq16DcNqhTGG8bBnPBwIBihZQoGCZAHknNlttjy7ecJus8FZx6x00DlLvkE1MOcMWTKDi+qGS0mMGl5fa8H4QLDh/Es01khuQa26uHRs1oMspKK4pH21eGFCCwpZL8+l63JOKmpzDqYXN7i1ENPEfv/AdJpwxnFzfcP24kIuxv1RxkhWJLIBJ//Pe2xw0m10coBIDvISMaoxqVNciiiqhbZvdO4tFfCTp09Zr9ekLOMi1zSMx/Gcg933HUEvW6nWZF4Zo3QP43FkPJ047B+Zx5l0DnMRiBlqimwbCQdyznIcT8xVEtpKyTI/No6YMlETwGqR/Ib9/kEOP52bmuWCzfLcees0DEbkfyklUVCdTmfvxD/6pWfbstfpuo6m6yhlloUpmheRMvM08erVS+5vb7m6vKTvV5rE6BV0Z/WjqudnBycmVECQMxi9IAw5xvO3Idyd9dmgab3nMI7c3r1nXrDu1oKTl9t4x/54xFrH4XDg9at3HA4jsSR517wSFHLSTPgoRiwrO7dYqnZv0nm1Tct63alIxGM0x+InP/lrXrz4CT5UjPH6nIsKJmdRClktIxYkuJjXnEpZ5TJa9Wu+891/Rp5HClHMukopmONIySOlZqZ5ppRM18gOIYQgh+g0EZVE/PHzj8WrpDsMIbDK8nlx+hvF35RqzmpJlNYsV1iGmvQRkA7It4Wb6zV3h0fiHLl9+4o//fM/wmQ5TE3RPD29bJpGgp1STqRcmI5H5mkSNWfbaGEqoohhLaOpNrTsdltqSdw9PBBHuTCjdu0WMQG7EPBe1GxB/TYVlO8kIykKBBfo+xUPD498+eUXvPjqa+ZpYsqJ0+kksnVdQqf0AZ2SshQRKQvRuut78d1YzcR2cs544x2tc9RiEBaSOfNcToc9m8327JHouh7fZOocGVPieDxKJdR6grFUJyyfEpPST2XUcTpOYKpor61UKJ998glPnzzBesfD4x5nBUtwf3fLxeU115eX7I8n2uAZ2lYWLaUwn0ahwBqIJksspLEEa3UxU3k8HgXO1XiV2k6McWS1Gmgaz5wyjZWAI5AOwoWgudmw7gcqkaQgvTGKYartWkiVU5w57A9stAIIyIdu4JyAV2olW6g6W616+LWhkTZYO+B+WLHZrilFZZ41a6CPk4O8k0O2cTLCM04ugJKzPjhSLZhiBFseAgUoWbqtWDKuql2oVMZRXsAlKyB0w9mh6YLDG08p+TxKAjHoWJxmO2y1lZYLousazM0N3jr6rgNjmOJMY2V0gFkcwpbry0ve39/z93//D/zO7/wWE4b9/QPDMOC959WLV6w2a7rmRqoZ43gc90yTCANKlSVpUmNY08iFezpONL3ItMZxFECdjr/+SV+lqvfGa+HSUlPkcNjjjWOz2ZBrZbfbKaMf4iQ54F0nh2vRkcOyTO27DlsNp1kko7PmU0sHIfyoolLkAvRtJxeSkdlwN3QyJgyeHCPWONnNlYS3HksVr0TwbNZCd7WjjDmgyvzQySFqJIiFBSMRglyuZ6yEA4yX8UsByDw+3vGjH/0h1qr60EouCqCGOZWnCYVI/zxVeNUPfy0n+OzTz9iuVoxxogmBaNJ5Z9A1PXNZls5SRC2fY9O0mGCZS8TOE/Oc6NarM8blw6xLZbZ5UVuJpNwVQ8mQbSU5YR65KmpCWwNGDahYQYJf7lrevoW729fUavnRf/hj/uB3/yWbiyfMkwh1cs26l82yszFyBqzWK4lGsEJ7rUhhJd2tx1oR+bRNw263pes6QtexYOHjPOOtjGetd8IKKwVnPW7tefP6NW3Xy+h1nqlNQ1KBi3eWy6sr2Qt6T9Yu0luL8w0ln6RDQDq0h4cH3rx+Q62VzWbN1fUV/bBizhJ2ZICmaVkYGxiFlUlQiRiqutWKtm149eYNr9+8wSAYhP00UQqsdxsuLi/k8KqV02HkNE48Pj5yOByx6keIJfH29pbjNBJz4u72juPpJK1YCOdftms8Hz3/hIuNOP/aVtOwjMG7wKpd0ba9PAhGeEegjkljKCmxPx548/IVpxQxOB5PRxonS0dj0Jvc65xUUtT2pxPzPOtuRC7L/XEkzRFjDI8PD9w/PMgLjASPD30vYwNdj+VlsWrU8ZoTOc76vSqosAoSZHkpoSjWG5quAWS2PMWs5FTHqusEkFcEEuiMElOtjCGiVqJN20jWwSxmofqNF22MSYMVZbafU9bvC0pZxk/mDHUrSQ9LY4k5cnf/QNL/TghBHZuQla0UnMU30tlUnUejy/U4z/LMHA4Ka4RPP37Oql9hqwDzhtUKi8ylp+PIPMez/ny7uaAdOqFcTiN3t3dnFLL4JipNH/BWLrdlIQdStf9Tvrqhp21bQUdYK8hs73j3/j37gzjV53ni8vKS68sdSbk6ORdK+kY3ihjqBMFgddxRCKGlC80HBLyGONVUSFEMgVHn6DI6kYPHKX+iKKkYqiiDSiK0vXLVDKGVEahB/BpimtRuDVGvZcR0VapAEWedeYMgKUaVQQppOPK3//l/4f37XxBsxlb1yCCu8ZyiXEJGzKPZLDN+qzL5hUCbIcFv//Pfx3cdlYoLFq9xrTllppzkUjFIJEGVQscaPaKsCDe6bpALe9gxDHIWfDBKoOcFiuJYLo6iSp56joaFD7G5S/iViHAK1sPz59d4G0nzyM+//BlffvljgoVcE6dJOVxO3uvFwS2ZMlafxUI8e9DqeXmfa1E1pjwPUKkp4ayhbTv69QrrHftxZK8jKWMcs6Ldry4vWa16SVVcZLM6qWiahouLC3a7C1wTxOVvrY4xj4xTFJ+ZtVIYGhmdb9drYsxMc/rgZTGG0LRkwJaczyRCCfBIjMcjY8oMfU8qhf3jo6ggVIvdaRtUaz0z9B9HGTk4Z9lPI/ePD0yK7G7awHq1whjLOI68evOa93f31CoyTzmQxOiy6nucC1ANwQaME7NME5oPjHRFHzRNoHENVfpw9uPMPCX61ZrgLPM8UbJgDLqh18Q8eZAf9nseHh85zTOxJLKpJJMV8TCT4nwGYe0uLlmvVtRSJKAl6CWBmHecEUv80h6kmPnlL37OF198STWyYLXWYry05TUVKIJWMAYd8Ul34Y2og8BSi7TJy2F3rpxqZZwj4zjKIaIqhVzEz5FigWzwXt3MVZzuNWWGfmC7XuOdpTGOrFC1xnmIMsLLSaqIFCMlyiIwJpEml5J1dCLmt5oqb96+4+H9HfNplDjPEMg58bh/5OtXr/jiyy/kInOe7cWO588/wWFZrdYYZzkejwTvefrkGRfbjXQEaWY8HLHOsFmtMdZye3/H2/s7ccfq/qskicsc55HT/oitleN+Ly/jPJ/n1b/OlzGG1Wqlju1AN/Q0TcNqtQIrs//QtBLUkgv7xwdAEuy8l66mfGMmPc2TjHFTFlyKE1fzeDypgcuqS7+QEIREmqMsg7WiDs5gvBzoMU4aIyqjqMVj4pw9fw/GyOxdsj9k/yWHJyym2GqrzKpr/YBVN1CMHF45F7xN3N1+xd//7Z/RhoxDolBrVeFElShVVNYukMT/f4h2GZF0qwt+43u/LRL2GtWcKOOl129est8/EHyLdwLprKUSS2bOBVN1D6XeEuc8u6srqahzETwHHy6FopduUUluYVklSZpkpghWxsrIaemwloPcuspuHfj+957ThcLx9MDf/qe/YL+/Fdx9zhjjqNZhnRej4unIfv8oXpjls9ALHWPo2p7gA8F5jC6np/HEm9u7M5oFFNOPZd33eC/j09PxwOk4UnPWEVSDVeNlzRl0rO9DoCBYcG8daZ55eHg4nxtN8Dgr0nJTDeu+5+ajZ3z2+Wd88unHWGsYxxPOO/rVIFk0qeBrrfJgmohvW4b1GrAEJ6MSZyzf+fzbgizIAn3rbMPLN2/YPzzw8fOPNd/Z0W22eO+52IlqJ8coaWmlslmv9QB1fP7553q7aqpUTByrOAVDgThnWY5bCRJx3jOpNrxVCWIu4uS2VQ/OUulCy6qXIKFTipBnVquOOCW6tsE5y8P+yOPDAzHO7HY7ALqu0apZbvScK5cXF5LylQp9E1j3nQSB6F5iydbwVhrm4zjivcNT8cGzWu9Ym8r97R39ek3XtQQn4TroAV0qxDmqPl0e0mmeCV4IozLmk3HNmR1fxWXdeI/TkcKcBNNwc3nFOE461pMD4WKz5TSOkhmhSABrDA7p/pZuT1OnyaUw6AK/1Ir3QYQFzpJKFpCc9SxxwoeYBMvSDXogLyMeQxNagh+5vnkijm/nIAkwEN8oCsVpmp58AKHrtRjxJF/0Mkhn4OPldiuOewo1RVwrXU0XGnKBd2/ecfPsCaZCXPwzv6b7Wg58f5YAtm1DSi2b7VZNoUbyw1NWDtVOLoQ8U4G+H5jHkTHNvLt9z+PjA08/ekbrW6r/4Az2Xrpn7ILiNpgGStIZ+uL1aAP74xHvRYgAooZBmU7OyXLzOM9YKzgG3zQYDQtaLgaj3iAJPbKMxxPWe1xj5PLTd9FaoLE0OKyv/OVf/nvG6Y3gt7OoiGpNYGfyrFng35CQFYq8j7Wo5Ff+XkyJH/zg+1xfPpGfJ3jmOTIdT9ih5/LygrZZ6U4tM0eZu6dZsDsxCD9OjJTw+PDAnGe2u0u+/uoLLVqW1TPUksi1pVW1kGj9Kq7ac1gUKsuXDi3iaiMvYDFYm6m2cnHp+d53n/Hjn7zk5Yuf8vb2Jd/65AccTknGjaIhZ5wi7+/v6dvA5cWFcNqo1CqUAFOlg3DGaqErAVYuBJ5eX9F1nRrx3Hlf5YM/izSaEAidGCPjPIsIxsp0pXovHT0iHLDGiVKwVJphOOfzrAY5F8dp4vb2Hc5Y1sMK1whqKM6z/vuyH6wV7u/vsdYJqt4YyUimZE28qqRUBROr5p9ixMlrDJINnQtd3zOnSKPqH6sStFXXERGYX60ikzXG8P7uTrDcm7XiLeQgXO82eCOwsXmOsoT1cNyPZCNz/pxkNJNKPi9RjTomRTL34aEsOeNNJVY58DP1jFhOUTC4u4sLLrZbstrR4QOWOai5RYxG0gVklZUtCOWiFapFkNcS+7eSihx4+vSGcZp58/YNbUqQxUdhjFEaZyQmcZq3NkhozSiV7zzvefnqFRbDxcUlT66uMNZy3B85TicutztyzRynE11pJPdXcQ2ZzKrrxYCTk8ianWRhxFToG6eeAFGNNVpdihROXIB1kb7py9dYeQ6WzykVmZcuKII5ZmJOrMOgYz2DCZ6m9TwbxPV8nEZSToJcUA9EKZVhWIlfIInKxixEXWN1nAjjOOFc4PriUnTl1mpVm4BIzAbvO9J04tXrNxRXuLy+5EN1+et/LW17SlEWstZgvWe7XmN9UPaPxIQKlNGSy4eRrWsCvbdc7C5krJTLmRs0zzPGwno1kKnnUWFRj4D1glGw1X0YYaRCKhNN42lDI91xkeCoJdVuOk0yrvABZyU7e472LGvt+xZjHPcPd6y2m/N7IkvdJeGwCr3YgPeZv/mbP+WLX/5njM+CsLZgykyuBrJUzxbOxUitGiBl1LNwjgk1kDy//Zu/I9WvC+Q8c39/x+FwZJ0zN1fXOMVcz3NknCeJFcgZ7wKNFUMgRnxaNjg2qy3Pnj3nP/+NyM3t4qBGMt9dSeQStDsQRVrNhZiMoHeMGCGh6o4waTEhMQJYgWc+ebpivf0OrTd8+Xd/wueffpvtekWU2xyLEUNcCHRdj7XSkWeWzwHmkilxhqbBVWViVWGUNU17HlWVHH/lPUNJA6UU4jhyiLLsX/c9IfRibrYWvJBehZAcBSSpju+26wSCCbgiP1/fDEBlP0oUr7VShPd9x2GcKfsDKUV+/JOf8tknn+GrYm5zreSaiVSNOoQGQVbUWinB6gcZGDpBOOAdRuNC2zbomEQenHGcOR2PrNdr+rbjNI2c5omVzn1NEmVDtHI4TOPMXCKh8bTB8+72ltA2tK4VMJwRjru4GnVxVpDLoQoSAVPPagJbqn4/FYIXPDgCKhtWA63e5mdTXRILe86FoM7IJ9fX8hjVD2HlxjpSnLFGTFEgu5zdZg0K44qqkgltEKWVmngEKSyH+fF04Hg4AZDblpweZT7f97x6dS/5032vHBtRaCVTaLuOwzTRNA7I3O8ftYrUXcg883g4sFqvhBBbxS8Sp5nbN69Zbbc8vb4B1c5b585uT2NFxfP4+MjD/T2+abi4vKDtu3NE53KweX1hi0GyNqqEHC1L/KgLWpwlThO3b95ydX1DE1ox7iF+h1KdVqCikZlzwhuDbyxWUeB3d3e8e/eeJzdPWK/X8nL6gG90hGdEnuvbwMXlTp+TpBd/Oev9f52vZY9grVwADkfftkwYYpwE5a1zd5H1G4gWW2VMI2eVZ1j1hPAM69SRrRQBUeJUyIXgxCxqvRxWtX7wEC2z4e12c4YGGiMEz3QaqbN8BmBYbzcE5wSdn4QKOo0SV2m/QaXoB/GB+EbIrxV5Z1KGnB3OJfre8urVz/jzv/hDDFHGUAjvSNIVHTHLSEeenQ/eEBScpwJgvYgq3bDl88+/T1rEBBX60OK2kgFdS1UyajnvrYqzrFZbnBXcRTESJGSArmlo+pary2uhJqCYHv0dZyMJb6lKlsvywZeaVfgAJng5gPUcKZ7zWEq2SuiSubJeS07Hm9c/5x/+/s/ZXX4XfEvTS/Jf1/bYQS7JUqL+9wyxZvb3j3hvWa83+jHJ+Gwcj5QiIyA9WuQus4aisanzPNEPKwzo7ws6a/FtKyh/uyhKRVVXHJgiOdq4D6wsa2VEdjqdWA0DFxc7fPC8f/+OGBPt0ND3KzKFUApTSey2O37/n/9zEZUsElZ5eASgJ2aOhnGa6d0HVICpQlltgiAM3ty+Y7NagzHUFKkh6Nxf5X96aZSSabwjuMBpnLlIsoStVlQ7UxIuepoiNniaRirtthtEUmssTbP8uU4fRDmQa5EWLkWRSdpvjGSCl/l6sIY4zbhWuDk+i6bcKvG2lEwxwq0JVhzEyyH3QYMtVZGQQRtqBeeMtuCi9kp5pg2BU5zo+wHjJE/Y6PdUdf+TkQoiZ0l7897x4uULqPDd73+Xi8tL1pvNB+lfrZQoiolgHfvTid1mIKXI3fuXZ9lmrgLzmuZJF1qVfmiZ0kzTBq6ePhXzjr43SyVavtElmVI5jSNv373l6vICZ6/ln0sR74Qjk2sh6iXjvePjT57T+OYD5MzI796oI99guLy8Zr0aSElcyqlKdrJvGhm9mSxwRr1oZewWZOn9uGeaJq4ur0RqGBPbXYP1TkN1PGma6IcetzG8f7jncDgyDLIHs5hz9Os/+mVgaHviHJn9LMojI4wjgdSas4TZFDRIKpGMZLnXnDnsHzmPWmIme/lcmkaSDJ0LlDJLNol2zzln2qWStYbq1FmrVaao5BD0eNNIEmAVmaTRZe+SrWEQ1Ma5yldlkxBFhUoq1atUkalWySaxifuHt/zpn/1P5LQneEtMVqxnVqTPNltsrlKcGauxuBr+o5JTjEQL1ypS288/+ZSm78Wbgbi7/drSKNctppmu7ajF4IwjLCw4dXxnmyFNGCVH1+rIqfLsyXPx15RJL6lvvKtVLmJpjqy+dUo3TknySSyy90OBshmcrxhb1NtRoFqMnTF4knnkF7/8Tzz/1g/IdEwp4Zxj8O4cPgQKNTAG5sxq6AhNd15gU+SyGkeJ9MUoDLFq9rQJlCBcpryoyKwhzUlzsCukdPZSSNNmyDXhjSM5R5onQtuRa8YihUe1lhZh1JmSOT7suXv/nmG1oXWN5qIIwHK3u6TqiJdq8IsjmVwoWGl/jLheZQ4qC8xa5UWzOsmLcZa5uHd45ziOEynNdL05o5f7tczExpO0yJdXF/JDOiNSS0Sz3ZkWVpXcFVrvsMZysdlgnWMaRWG0BKVYZ88L6FKFJ+WtP1c8Dn6lavStsG5e3b3n6c0TWu95mEaMsfR9J+EdJUslFhq5vEqhcYIJEYKpl6Qm/WMF3ZApRuRspzGddcUi2ZPQnzxlpWomxv2ebrWSSzdJ/uzQ96TQUnLi4+fPWVyzFqG/1lo4HkdckMstx4prPDs1ejlj+fiTTzFFKnxb4emTJxxOI+/evcb4QLd6JvNT5xjWgxxCy8isSGLgIj6YUqRtGp4/e8b1zQ137+84Ho80fSN+mFY8MK5WCWg6zUD5cMhXUdlYhMrrTGBOE94H+n7gcf/IP/zDP9C0ge98+zty4XqHMQ50SW/lLcdh1Ci34tNPP+Xh8aBjGUun0Zs1F2zwpFl+dmsdxla6TkZc8xwJ4UMR9Ot+hbahmRtOpxPOLsFR4gROKZKMoc5RwpOoWCumNqxVCqyha4KkuJUikZUY9X7IkrOkTDcM5wIszTM5ShJYzZU4zjgvsbRoVrtvRA1jTMU4L8ttBOnw+PjIPM1YZ7h9/56Pnj/XKExLniMmePGj1IopaqQq4rrughgvx+k9f/zH/yMPDy/wbdXUQyNZ1XqYVnWZGyu/54IV4ch5SFIxRoN3jGcaI9/9/m/QtmtiypLEaD0Vi/Myn08pk7ImGB4nnBcywpyiXhqBXBOpFJHaa2F3cXVN2wwi4ND/ulOJPcg9YTUGUL11OINcvFrRg+D3TSy4GiXVD1E4yuhJjJvWZgyO23cveHj/ko8//R1evL3n/v6etBqkU5ApFcc4nfE+m80W76264mWXWpCRo1OUOKVgvMMZyzSN6k8a6Dq5UETdxjngq6pYpVLFk9FIbGlMiZwS+9OJIWd8CFgf1B+ik4IUef/wgLWG/ekoo6m8FrVoVUOkOsMdkLyX3HADsvzyMpaRvOQq0j01nyx+fIMeLrlgQzjfUqtOnHoLumIhaErlbTkVYQtZlU8aY865qo3ztE1zXqqKkiPikX1DjLKIaZtlxihKBaP6/tPhxBhHLnabc+BHQQx2KCX26upKsgtS0sWhKAMyguuYYiJXmYUvKbBCk6w4HNW789w1pZn96XRevM5xVI8DzKUw9CKVndNI0MNljBHGk0a4FkxWX0XXcBor6/UgnVFKTGrOylEqVZcF9kauZ88KCIws2EZeosWPUSveW66un+gBbGj7/tzN5JzEDFQyv/jlV4Sm4eNnTzHWEZwnGsBbrCk4JzGeaY7nSqgm4T/NoybGNSLdLVUW6DhLQLDuphbSlFipfyKlrC29oxZYb9ciC64iI7TWUoyhpkRxUrF671mvt7RtzxIRuUijSxFcvDUW30gnNI0nVqsNjT4rzjl+3VHTN78WT4oPQcaZOXHYH8ilsFoN7GMizqMuBjPWdEzHE8fjSHAW1zWqBpODupTCOE6UmuibltrJ0nrBdzgX2D8+cjzu2V1eSsCRrewf7nncP/LRs4/w9gNxtuas0lZz3qO0bUfMkX5YKdxOE+6soNdDK8iGmRFfHCH0lBzJ/gTM/Nlf/H+5vfsFxiRMSRrVKh4DwcLLSJoqzmYZxxmq03vsPGoy6vNwNKHjN37w2zgXiGkSZViURTsq7Rb3tyiX7t+/5/LmCmrm/uFA1zRstjsa37Lf34OeOTZbGt8QfM+xVF1TFlIBaz+olWQyndULseDxHTkvgo1lNyE8qpoLxRqV3ipzDkGXOOcgn/j7v/sjNrunkA2nw4GcZ5q2kRGsgRcvXvDu/T3rjTDqttudNBbzTFIkS1WKgjEGp4DGBern24Y5Z5wBZ73InlWuO08Tt/d33Fxdi8eiaZQanc77xKFrtdi1VPz5z856bj8eDrTB8/VXXxG+/W2cdrPi8hdfR+M9NaphtRiHpaippzJOMytlsH8zk9AUzZk24mBsu47W6M2dhE+zKENQL0Ct4n6c9YcPrpPlnJzyorYp0qJ6vS2XvF1jw7k4aZog5hTnWO73HDN3D/fUXGh6OUCO48x6vZLvtxbe3b6j7XrA4I2hGOmGViEQ4yxUTqrmAci4YKHbhuAYp8T93S1N07C9vCAn4QjVUvDe4JyMHtarlXgPSsJYWRKD4LmtNfgQuLm5JuuCl2yEhKpjgj40UA2zpv0ZzeswJpNjZMyZru/IFL5++RJq5ebqCowk1y2p7zEXXKiSdBdkYV/KoskWtUSMkePxII70eeL93XuMMTx/9lT+fpEHI8fMerfFGCOuY6eIa7k9Ca0n4Gm9kCWNsTSIgdBoClaak4wMrGXOkc1uw2/91m+KUqXtZD9TslzETjrUxlvmKql0y3MjAVTyIpeUSN5jjUg4M5G27bFWLo7NZiNquEkc4W3bUobhn3xBOOfP7lxjJR50tb0ANGin7umHQU1u6ezoXa0HzoRU1eifk9AMdEEu7Gmazjst3wRsEtTHaZyAKgTlms+X3TxNso+YZ0LbCE6/FlwIBBeUT2QIBEFaYMlxVlm2YZ4nvE4GYpyp1jDHREoH3HTiiy/+nHfvvsCbJMo647FWMA6mipGVIhJYjFEOlSiJ5Mue1U+1SvVNTewubwhtz+F4FJVVlcKtCR2JojJcKUBiipJDnzImOPq+JbQdxsguZBg2+udnxjiL6bFr4MF8Q3WoYU2xUJqM94acLM7ojB6rO0jhNZVF7KI7S2dAmT/UaqWjqMIMMylifebduy948cu/5Ae/9b/h+sk1h/FImtJ50rJZr3h43NPaQL9ZMyfJEX/z9i39smeM4ldyXoKnQPajXp37cn5K3r2phaD+mmmeCc6rEkliE5ZLDW/PLmlKPSsSF/Nmzpm+a+nbG0rN/M7v/i69Ls6Nc9hqyCUxnUa9fDRN8+7+Du89V7stcZxYd5L8ha1kST+Ul6aYs0HLnwmqWW+4zBQzHV4NPxLBJweE0YPdyq/BOKGBKtep5Ir10uZMKZ5bnrNKxohuGSdLVVOk1U05Mk0Tw2qgazxdtxXXZCnEHBnHSEmFzTDw1Ve/5OtXr/n+975H1/cMwyDy2eAkoa4WbJAPdzxE8jTStI1W+zKCa60nu0zU9hRtexckiUcWp7bCXMt5nzGrIa918gDNaabvGvFFUInzzHGKDG2gcZ796Ujbd/KzGIl/XWRsP//yS169fs16s+Xm6prVasM8z+cLeomxLDlTbCWnRakkM81vylNzLnz2+bdlSVoqyYgBC8RwOGXxGQzqbEX9FEbLs9Y34ESx462lmspxnCglM6xF5VWco2kb+e+qgWwIDcnIzDW0A9VIYltFxAM1G8nGAJ23Ow6HPeM04b2lbXvWxuDbRhVO8j3UKTKsOhEpZCjTCPSKnMjo+fhrXxLeOc0BkQ7ZOC/omCzvxMXFTp9HSNZIVGg34JyRqEvEiRuaRggEplCOkYfjgfVqi3Eek5MY3GJimmeGYaBf9eeFcynSOT598hRwGjvaysWTMlYvz2qqUoFlSR5jZDweaBqv76uHVvhO1UKJkDAEFzkdX/Hi5d+wP3zNqrXEaKklUFMllkIxRXYh1Z7n/tVWdV3L6KmqgKSeU9gM2Mw0Zy4ub4g18+7uHR89fSKL7hB4fLzHtAK/rJqPYIHVEh9bDaEVKbdgWVSiWz21Ot69fUPXWjZ9z9uzsVtHcShmXvekxkCqBluK4DWcyKNdlkKPqjiRCqYGpDjWPYuRjh8vXbipAZ8n/u7v/5BhteLjT39PYJZz4mGOrDrPdrdhaHtCP2AxTFlwNMEHyah2Dkqk7QLzLKTgQhGHtOa/OC2mS4ySMKdG1/Vqhd9udREvF5itjoR0nbnC9cWljNC1y55rxnhP1wSJtzUGVzJtyowx4udJRvhWOpdu6JnGCR8CNSX8q7evuby44Gq3xXpHAKZRsk2tlQUY1sg3WxFaqhWUN7rMtc7hvbyMIQRCNfIL1Qr3Yr1lHOUAyikxTiNdEMzt4vY0VhrkZZySFWxnbOX+URKdhrYT45leVE9vrsHoDqQqJ6cKKbWmyEeffEwXAlfXT3j7/j0xJdJ+L2Yp56DxzHmSGWUq4GHdd5ywEhoSDDeXFzRKxVy+rNXMAGT+WWPGBqGmqtlYOD5GgIkoibJQaXzAhYZCpIyRuUZhqjiHDdB3/bkCSLnQ6mEVj0emGLm4vOTj5x/LYTDPys8pYMuZIumdE9if+iJkUS0jnaZpNNPYMk0zh/09Xd8TlL8zIwefbwLGWXXEO0qSdnXBgPONSn+KgkeJujgVsq18XraKv8MhLk/jPK3+jsV45Iglyk7MVGYVR4DBIYvrmGaaIPC+tmlwTaMV/geonPcywnq8u6dZ9Wx3lwxDRyoCqez7Xx/2B0LVXa3WWCtKlFqLeG5iIufI4ZCY54kQPI1viEXFCdryj5PE+C4Kk5zFLWycxXmLq4YpFEVPyJhz+fcBSjHsD3uC/v6Njs1qNRrmlbQ6VGWMjoFTUj6RkWfRusKcR0rSLPcscZvWFx4efsGL13/NNL3EuSxBP8ZjnVFEfcIikmeQLtKqRj8b8YopohFrKtktY6gPaq2Pnj3n8fHA27fv6ZuO3W5NrYW5JMoJgnOCuymSi120my/qki6632zQACkjo+Z+6FmvGtabteAvSpHsm7pc9Jx3NjKLB6ohW9lJhCwyfyEmi6x+eVYBcok436qs12Cs+A+SjoHMfM9f/eX/hxQj6/X3aNs1tu1wpkLKkkGdIkW9B8EFPv74uYxXqeAHYs7Mc+L+/hXPnj6j6VYgKmRyFWaYa1phuJ1OwghrW/nhtAg1VvIlapIED4o8n1SYszjDf/bllzw83PPsyVOeP39+dpR0XUNTpCtZfs5MwRsnaJuceTid8LvdjovNVu5OA5TMcTxx++6Wy6sLrre7c6xoyontZi0uV2DRblkrwTzVGKwNGL2F0yxOX9uJ0SeXzIsXL8AaPnn2kS5/5S7J9YN8FSSEBGd58/IlL16+oGkDH3/yKbv1Wg6jUuTCqvWch13UguqtIXnP6XSA0tEEzw+//0NWg2iLg/cC+CraQVDOhjLrHW1bNFS90K9X4oqtBW8Xy1nVZauGzOgLbFTKaEvlcDyJtl3DhkQ5JL+cHKOM76yh9w3b1Vqq4VLAVGqSzqltGlytnA4HrIHvf/+HGAOrvpcxUEq0xjHlifv7Bx7u7ri6ueFyt8MWQ8oV46ssghfAoB5mLjiO+0d+/vOf8dGzj7harWn6ARNnkhr5rLUcDgfG44lu6Omb7mzAy7r/aFwgWeH9b7fbs9chadJhrrI3mOIk3H19g01BJMs4TMk4r05iI2O4WBLH44F3796zXq+4vNgyzzNd0+Cd1T0GjPOB42Hio4+e47yTLPNpFMduTMDIfJpo2iDO6V/zy1gZJTrniDEzjqPEqxqZKe/3j7x8+ZKUEp9/9m2Rqhoxplpn6fteKkArL7QzIjowrpMqMKM7uEKkCoOMivXaDcbENB7xw0pm61HGOqHtwFYaK3LWRd5blDMG4pOYphM1J6xvKaXwcHdHPI6EztENcP/+K75++Vfk/BbnKiQrahiTkXqo4Axkk7F1uYksigGjVtl7e4ygq41mRmPxVvYRzlS+9fm3+fTTz7i5eaYRApbgW64uW8aYVTkniqwlltWHcFYoVcRg6ryXs0bpBUE7565bUZx4tLTmZ8mFLrnKUsLJtEMW7x5TDKlGBiMXtnGy0Ae9bFz+lYOzmIpFGoBcZhIenx11euDH/+nf8tFne773G/81put5+/YdcRQvk1HRg7MNsWTSLHaD4B3VGLxxOGcIXce797dsdlvaoJdhlWw9W0WOPUfZQXnv5QLS5yGrtynnLOrBrJGyaT67uJ/c3NA3LRe7C5xzTNMkeRRBY4p9oDonyqdaSVZl87YQ5hn//OlTUFxGyYnVRvWyTcOqHajWQwZvCxVPKcIsMdYS44lMofGCS05RDSEOvTwMOI/Vpc3Xr17w9u0bthdbCRiydnHzSVvjREoWc8a5ShxHvn79klwyT3ZPWXUDGAdG3K/WCJJ4qcCWByuVSue9RD/mkb5t2OreIjin2uiiqyorubFGxgRNK85LFyyNVmcgSzXdF0suhZV5c62C1xC1lYyPihFFGMaw9o1IyYw8gKdxpvfCOOqbwDRFUhwZVj3zNDHGSKuwQest+3FkPp3oVisGlbm+v78XtIa1zDHyeDhQamWz3dJ1LbMyqDrnMU6jSAu/IuktpdB1A9/61udcX16SqdScJJwmfki6i1Pk8eEBa51A7KpyeeBceZSUsaaKwcga+n6QoqEuuAFD41pAHvjxNPLw+MhqkIAgH4T2mUoiTYmhFR8LdimaZNnZDx04o3p9wViMJ5mv1ypT5e1up/scOJ2OzPPMxU7yfv8pfonlIDRuyeIuenl4cspstjve373n9us7Xr9+zWazkcu9illyVJ+Cc5JzkVLBWQ3WwZx3MMUYutAwGemelzFuTDPXV9d6+Bf1fRgaIxXwN13ktYpxNSWZi4cQaPtBZbIGbwKmFt7evmA1wHH/yJvbn2LZY50hFtlmGJtEhVMEyS0BDUWyC7AYEzBkkQll6TKqNXgNGxKxiuyOquaADL34pGTxG5i1AzJVhCPFWl3A68Wkz6YxIpowSfZhRke88umB1UmEUxk0i11jMaMg04lcJCMmpXx+9o3zMBtGwHUej0cs5YgKqGp2Ss06brKyDCdD0E63ihm1lDtevvpTQuP5zvf/NS2Rd4+3bNdr2tDjrEQRx3EkpcRqvWachYqAgfVmS7m/583dHY+Pj9xcX7Pd7bSiV/OgqgCtDQpuFLNqzvk80QmNGDa9QSJ9YxKFnbWst1sudjuRxaNdN4rdp5JLIk5SxBlrISVs2zLtD4zjiJeNfpYqjMwYJy52O66vnkCx2JIkyrLxdCqFElx0FvlajYC8HLUWzZEwxJQx3uByEU18zpRcePLRM549uaFRRc7ZieotznghyeYCxlIKdE3Lk08+4cn1zRlLXnJlfzxI0FBKNK08MHGeMNYxaFJUaMz5YFwOe2qVqsEYaf1EuiGVs9PkPWtoTFAPRTk/mFVlrmi+AMvFYQ3OekwuRMR05b10EcLxySwqspwyj6cDrWvxXeAwnpjGmbZraLtO3L456YNpZYEJZ6OiiheJc8QPvVzSbSuOYGuZ00yJCR+a84VYMVRTqFaWYaYY0hQJbeCqv8I6DxViFsR0sF5w06UwrAdWG0EmOHR+W4R5n5K4VHOuFFNEemolqlQYQfL5LmE8cZrYjxL7WEth8g7rLF3Xyfw9i4tX6QxMOUuLjuFw3GONuPxFkmglRrSVkU4ILWmamKYTw3Z1Dvax1kmUqOKil2yFf+zLWTHRlZJlGe4sx+MJZ72EBHUD3/n293n29DkOS4pJwodqIWVhc2UtHJwT4vBxmqQbVdWZPBPSSrsqo5KcE29ev+F0OvKdb3/rLApwrsE3gtSXQ1SeyrvbW7ph0EsbMb1pV26sqntq5snTC9r9gbu7L5iOr3FmouChiEow+ErNjpRnStTRQwWQotDiKE4YZaUUvDFEIwmWWHOGSi4j31IBK5X6YoZONVNjJHQdj48HTtPE0HX0q7WSHpzE6Krs1GBxfkHgf1BETvOknaLM3Jd3UV9wqtxWMiIPomScpvn8u/VGDs95jvhTixnkdxCqtHi1QHWZZRnvnPLR6LEuUUhS5QPFFnK545df/0fGw9fsnv6Azz7+Dn3fy2Wp/Kyf/eIXvHv7hn/5B/8S5yGmgndBeV6O7Wp1TuqbpomUIn5YYXWB3S4k7JQwQf1KpZ4X/yVLt+uc5XQaWW822BCI0ySFkZd8kuos1omMHQRk6q2n7z6MY40W633X0TYNPiv3xQWrfoMKVhQbHpEkOi+Gnpzzue3LQqMjA+MsQKmiaODFneytl66ggm89n33rU31JPCXJgT+nqJXxhsYtiqbKaTpSc+Fbn3+mXJ5CyhFjCs7Dbr3Sk7ucHbdjjHSNldkkktAVdH8gjlWDC0GqBaNwLKO5D2ZZyMucNdclwVg0+/r8YZPoIYw+iFC5v7ujOsduJQHpxsKUxOyEc1itpjHQtR1v797DYBhWA9e7HWMbP7wE2p46DfxoQ3tuQVOJnE4Tm9VKFDzjSKmV3XpNLpK/kGLGB0cbAgUJV4k5ESeJeG2C7GOyrZSUSRRs5yQvOyWslyVYUVWEMUDRRaoGlxgnVNKu6844kKAJhaXUs9Gucy3FKzn44ZHjSUiWz5/c0HQ9j/sH9scRZ73QV4OnwZGzAOgkwElUSyE0lGI4HUcwVUJevPyerXOUIsDD/X4PxtA3kkf++PjI+/d3bNZrejeI5vzXaSYMuOBpS8PhJGHy4zhK3rGzzFEgeiEEvPGyV0lF6LRzBAetlaW1wdC0Alsc44TXjvdwPGIrdF0vWeXGMZEpaabvOwEDhhYfHCZLwTJNwjPy6t1p246uE3HAsjfzxpFNBhsgVqwfOY1f8/jwY0p+j0Pc+zUVwKFrU0pJCqXT55WK9wZTBcNujYD3qoEaIEQx6omMV1VKsm6VfHQKTdNRYsbZwPF4YB5PtE1H2/eCs4kZezwSulZ2e87iQyNFfc3CEVKxiuTBSMbKnGae3FypmqngXCPjpQrVy+gtmUKoWT8b7QXnSgiQRhiPkf3xyHrVsBkstuccRSvvLnhbNVYArHpThIMlWfbWVazxeDPyuP+C+8eXbC9+xmef/QGbi8/kHFD8/v3dHY/7e549+4gpzqQcKVOibTr6YcWlIvjfvX2L854YhZ799Pr67FmzznI8Hnjx+hUfXd/Q9r1cHlUEQ02VHfE8T/TGiPq0FKiymzFV9k0pifemcUG4dprTro++xAEY8CHgg/eCtK0SrH4aZ+L0yHq9kaCXou2flfZNHgEhP1aKsEqQmdgyQ7TWY6y4iedUGLqA14O41EqJUTT1iFuz6zuNUM2inzbastZCMIGYCs5qpOiS7IXMvduV8IJyjCyZrblI5a5N8Ln6LXC+mLyVF22OE94LPnfZsXhjyEYYLiq5liWrXijzPGkVYFTvbb/xS5ADrC1CTy1qwislKz6j5ebykliEVOlDYLWRvUWuYqxZLr1Fny8+i0lmxabqoVjUQ2qI2q5P04xxVhHIuvyrHzwILnh5wb3DOyMk0FS1TbU8xoRLi5Pa4xr57xiqVjRV9wmCrLbBYUqlbVvmcSLWSGibX/EmmCJVf9/3bNZrGie0ypwWblFknE6afR20IrZ4a6CRljx4R9PISPN4PFKKeGCOhwOgDH9TcM5KxsfQnXOEjTHsdltiKjQ58U+ROS3uatlLRMk4Dq3sY6xkV9RUqFaWq6J9F0e1tZKVLc88kqkCeOulKzRAzhzmmaZpZNQH5BQ5nEaurq7PlwFwliBPpyPBiYz2dBjJeeZ0qPQrubAXbEiaK20DU3pgPH3Fw92Pmed7jE3UMpPTCIjSUOsAjMk4l9UVXCXkpho1oUmGgkA15e3K1pALhGTJpWJl+Iu3llikqNgfDtxUQxpPBGexXUcxwh8aNOf9eDwxzbMm+wWcXQyfUp7NRWbzJRUxIpbEajXQ961+36L+q1XwHiZLp2uW94hK4x1jglIt02y4u33keJgx5sDlRUfsHeNgWG09bc30ncPZluANvnF0rYw/LTJGpaQPHY8x2FKV+JyYDl/ws5/dcrn/Lk+u/xmuecIPv/cZH390zXqzI6VKnmdOpxHvAl2/FlkrhlM60TRBfUGVRs+6Rb4eXGAyE8fDieki0VYBiuaU2a3XYvcuhdNxVFe2wyrBOwMmziq9lrNjGIYzx22RuFc9J1OM4AreWItH5sn748j7N28Yx4nNDyTTIZYKy8hgmekacQ6mKDRMlgq8yMXQNK083LWQamGK8/lQXrqMWgt905FdISIVGc5hqhBAqQnaTn7FZmnNrCir4JzoddwfMM7ShoZe5WMLZgLdAVSReAsqpBSCdec2TZyQszDddRQhH6EurdCllco/Y0okZA7sVU203mwU9JfOHYt17kycXMZd1jhp++LEPIkLvTecv59ai3RkxojHIcv3lEuCXCWBrkoFuT+eWPe9oCl0ib9eLUEsehlOER8kp8E6Sa+Ti1ks/IsWu1ap3LvQyMWiL5kcTg40Ba4itEjjxFNCNWRNohMeCmrUg2oFM2CMBC1JtyFdTEGq59D1PB4fefvmHeFxz831tYScqHpj+fxkZCimq77vqVkWi/vjSNvJBeuMoet66f4qOvJr6TcDq2HF7fu3NM1T+n44vyC/zpezXtz43nF1ec08J8bxJKoc33CMR4wVqSlO2E8BS8oyEnTekeeZ42kvXakLtK3neNwTc2E19BRgikn3WvDx849EuGGt/twqrLAW34jb/HQ4cn/3nsPpiDOWjz75lL4fBGdeAZexdWI6/ITH+y+pZY9zwsvKVdLPlJ2gBsdK4ww1WJG4xmVAKnibag0ui2S9lEp1VY1eBePkwgNRPpkacLFSy8zbd2/5wQ88pXTM88hqWJ89MNVYumFF6Fru7x85jBMb70nVKbVBxjW1ZlLK+NDQNC37ecYYS6mG0zhRsKS0jKkqOKPGWPV15Iq1gmWJc2Y+PQoJoIGbqwtam3Fupuk6Qm807bGh9R7nDE3Qs8ss0QgVbzsq8q4UI8oib+WadM7i0x23L/6Ex7t/4MmT32K9+yFXuytq6JjnwnHK5Jxou4aUZ2KSqYS1RoywTorU7XbDNJ1ISTrIpOfudz7/FjkX7h4fuLm6hsZSShXpQGjJTWa/PxKCF/y3E/9MaFvqNDFPkdVqUOVXwiwTolpFx6bFg8ZBGIqxuFpY9x3dx88JTmzeiwzWOVksShA9UGUfgdE5pB7CcnjLB5aSHFDOWF3c6tK0iuxPOC/yZ2aQb1LT5yyOaZwJTSA0AVsMqsWTx1pHHqYU5pRobNCDROeyqrMXMF6ibRpa60Q1oYdkVoVU32plmLNWQR+q4JzFzl5VpuusxI8G7SAK5Vy9Rb0cFlMgVVpzoTJKu55qoSmVOdezMqumjGmDtOnOCN66CvCuWhn3vL29xXvH2q9ovLB24jzDeiVVu/6Cl4hMqsy6Wy8vRi3aYlbJwwDIXhZjycoobjpJHq+xViV7YsiRWFRZAEoWsub1GiThy1pyFtOasGnUzDRnatvo2AEhlJoPL69GEZDmrKM+L1kNehjKsyKX9TxH9QxA21lsE6hU1puVyKT12ZqjVOuhCcS50LYC1hvHo+YkfIgK/XW/jBUMTNcNzPPMNMUzF8t7ia70riGWWUJ2jGFOQgCVyFVRMXXdIBGnTnZdLjSsVhWvY5jXr15wcXVJaDt6258VcfM4yuiv6/WzEQmlNY7t5QW+7QhWDjXvnX5OhcYm3rz+Mfd3PyHnRyBTM6J+0XcVA6ZkYpHK2BjJ/DBmyVyQ0emCqMBZUaJlR0kSXeqNSEfVQgLGCLSwgmss7+/ectg/ME3CVku1QJbxECxdrGMYOu4fH5jncC5GhmElXhTvcYD3hrAeiONInOVSff/+Vrptb3FVCkq5rSvGy7QjIf/sPE4QoelaLnc915cXXFz2eDtibaYLnm4TaIIEaQUfMKbQegsm45tA4xwO6W6rHqhZkRfG2DM9OBhH4w25PPLm9V9xeHjL9c33GdafEcyazdAzzYLfSFHk7s4Z5nnZ8Yr4wVnLPEVyrWzWEoiVS6YLUkx9+bOf0frA5cUlp/nE2/2e7UrOBe/kApDO1pwvGmslc2SeZxrdgbTILsLUyvF4kn2qE1O1r8ZIFWDBhkCj8rM0J8lZxVG92OyNc6SYmcaZJjTYoBI85EVIZGKt1Jhou8BCPnVWE8swHI4zxhtZXKsaY6n6UZJlTIlq5KUOiG671iqHg+ZWLxfPdrX5AKczcDodsEYw1bmK5rdrVPGjB53xgTrOPBwOxDhztbvAd6IuKlW8HinLzG75UHNKmlxnCQoa2x8kktA7h3GStwEoU78ihE0ZK6364XyYDV1LrY12MpJCZxQ1kdWtXQRCj7FCw2ydJVeYonRCT66vyTkzpxEwqjyq55HfIgcGWWZ673GlnruOWsWteTqd2Pj1GYDnnDvnXtQkMDYJlakU0UQKJhxVQJlGfFbL8p/K/njk9t07bp5cs95dyoJt4evp3HOeJ3COm5trLi8uiEUQ7csFURUnblR3H5wl5kycZzoFM0pgu+R627rskeQzTKeTZKPbTAgdu8sV1nqm6SRxmL+myskYQ+Mb6VIqWDvycP9Ammeur67o+xVxOhFHMUy5xil+fslujhJU1cl/sxrD4TiJF8W3zDnS9B1Pnn98/l1lNdFxXrQb5nHm+HggNA3d0BO6lnbo2G53ElQVHFMUdPqcHrl9/ffcv/spmEmc01lx3kaBcgt+xlj1hoOWeVgkgyJrF2109ydAP6/5HwZnOMcDmGJw1ZCQZ7+EQtNavvrqp7x4/QLvez66eSrgxHlm6Cs2tNKBF5nLP7kSU+XpdFBAoRQcjZPl+ThKVv14GrHec3f3jtt3L3BIVgRGdgTGVkSrK+OyaY6c4gljK+2m42IzUG1ku4N1O4tQwltCMLQBjDUqfZ3pvKda8Rf13st75C2Nk+LZKUW5FI1iLUHy4quhFIc3DusqOf6SN6/esTm94eL6n+H9BRAw6P40zljb0DYNNnhRugF4Txc80/Go61rDbrvVFYDls08+JtXKFGesc8Rp5M008uzmCd57Hh8fGYZBvFYpcX9/z263wxnHy7dvqaUwDAOXTnIjSs74BSKpRbyfTieO0wzOsN6sZL6dZEndeLFY1VrO45fGebIXsJtDYkFzEdt4cJZaJoIX41MpCVMdsQj1cNW1MlsfJbRjtV7zZBjEhyCDZuFBhcBF2zLHqIeB+DI635O8IbgPwKusfP1YxHySCnSNbPu994RGDhTfNGeFUcnCnJ+nibdv37IZVvL9Vnkpqr4YwvyfWHWDjtKS5A07UV416psQjX/hNM2YWglOZrrjrLuXkimlFQWU+cBf8t7jFcCVkFGN8cqsrOrFwLDVZLYpjvAN8UDVM7dpNI51uYxLwTpLjFl4LKqkqaae5YXoUss5x2maWQ+DaNVrkhQ6HfHYLHJnrFHpoSfPiXmccaHgW3eWH5ZScMiBjjHEKDRX3zS4JFhxkLCU2/dvSKXwyccfAbKPEJyLfHY5qefF67jOQAjSfU2jQM2Wy8bYFlCzZ/DY6rAhSIFShTJaUqc6/JlQG37NO4KF4STvq9B2Ly8vOZ1O4heyBtO0+JSpVNI80/e9FDo6nk0xQZW9XrUGwUmb88jPOi9jtCou9zwlnBOlmEiRhfC67KKMSjULhporycPj/Z7OGVI98vrV33N6+CnGHhTAt3QOFedUvZORixWpvuXSdFjdrxQcrkqCm6mLdFqKPmMqNlSyLbiMsJBMIZdKMChFudAPjrv7lxQz8+3PfsBpTuzv96xXA9Y5cpwxXoCWVnd61cBqtZFnoRQsMoufoviLqm+4efqU4D1/9hf/ltN4T9M5DdFS34ApYAq1OCoZUxOboYeVo2lbuibQOEPXVlwjcnfvRd1kqyZJesPQNLhGitRVI3tT5zx9I05w5xzOOrwLlJr0nCzE3AgOJyZKhmDF5Vztkf39XxHjgY8++Rc07bVczsWQSyRH2Q2F4IhGcTS1sn888P7hnqc3lqHvz3uDnBIXux1zSqJ2DJ7VsDp7IZqm4WK308tFDvyFDxW84/mTJ4xxpg0i+797fJQd3nolZUMVNab33tOmpAA7SONMTJnVZiWzZaMLDxbssBh+zsY3fYGX3N6m70QbPp2IMdLoQmY99Fjvub685Hg48u74hmkcud/v2a3XcjDkLEog82F5EudJNMBoGl0p58OmJuG4OOcpU6btAoPtWXygjfNUVdq4ZYRh5BDuupbWCxitaUR5YpxkGMjy2nI8jjIH1zmwD0KGHe8f6fqOduhVcTJKzGXXapqbKAWGtieOM4+HE9Y2bNdeZvfLclrNVsZabK0K/tP9TIxyOFpRKInCyDGlRIozvc73KQsiQQ7pcRyFTmpgaL/hMnayZJbMYZiS5G9vNhudzguyYToKKMw3nhwzY5KHtelaxvFEF1qp2DQZbUoTjRNjU85VQqKGFZ8+97x885a7u/c8f/4RbddRYsQhMLLteo2xIjioRnKcZaRlWFhOi7Pd+4acI6UWmhBY4h6tEVf73fs7MIahX9FbS7GJaoXzJEvyyjieZK7tLyiNCiT+sS9dtzRNQ9d1zCnKfojCdj0wjhNxnqlZcqJzSZSiyH0rQEaH4TBJUVVEE6yftVS5XegQRA2MMXF/ewcG2n4Q1HoB4wxd2xGcl4x15yWeskSsl8u89QHrJu5f/h3z45c4M0vvXuPZAyCTP08hkas8X6YsyiRLxeBMI5kpNVGKlVYBA4oXt8VQjSXVGRBEB1k0gKEUYKYkOWxlwX3kj/7o/81HH32Myx0X6zW+9WcZfa3L7zmfvQcVkWFXI6gKH1qCsZRgSMapuezAn/4v/w6DIju0vPN4ZMsJxqHMN08j4w7aJtP6Sjs42jbQdB5nKt4J38l6g/UQvIXg1MslBrxSINhyHvs5L6iWWq2OdAqGwmACuUjIV0pV2GdGJbVtosy/5Osvb7m8/i/YXv4edrXCx0rOEzYF5mkm1pHgGhlh5cTpeCTGC2qPnmnyDFcd1QK8efOGUgrPP/lEYk8PB1brtcb9QgAuLy9lRGwtXd/TDytSjKQc6ZtGCmTrcNYzxZG721u8C4FglHVYpVL0VEqUoA9rBSFrjGAVKCKbqhbSJBfKsB5ETlkFrW2KVP7B2g/bfyNoD73U2F1dsx0GCuhoR9VTpYjGWQofYsq0rT3LWFNKzCmx6XuKEdBgTgmcsPXrPAvSQ2mzWV3docrIqFqLDQGf4JAFKOZ8gFrOWdGAkG37FkPH/jjy9s1rrm5u1K1dhVm0GJpKYZwnGg0yolZBHDiHb1vW1tIPElGYa6Zve1JSB60NHE5HTuPIsF7RhSAVlI4mlhyKcZ4Zuk72CFVUJLmKqTDpy56zeEYa4DRNpBx5fDiyWa1omkDMgmzw+jM2vYxAjsejXObOE4JjOs3UbFkovk0IeCuo8qnO9KuBdQjc3d1z/3DPzfU1q9WKYiqHxwd26y2h63DWcpgmjsejyDx1SZ9TkkxeZ8/jylI5L93jLHutJXN3GAagMJ9GjM5b52nGWEPfdcQSaXxH27UYZym5UMZEVXmgDUGVc1kUa7/OBQFnT5CMnCSf2FpZkJtaiXEvIgDfYMlELKGU88jQGElCfHh8IITAercl58JqGGQvh2RZlyIjC28N3aonuCDdwymRS8QbLzuwkmRZPE4UhdsFbeOm9I6XL/4z+/svMOaEMQlDpSzvlhZeNi93n470StYwnyLGWYUo+eyoNpFkHy/+CGMpiwiwCKW2VtmROFOxJpCqIdhMTuAr9KuGr1/9lB/96A/5Zz/4V1ztrkXabA1WuUpL1+4VTbKMoCyG+/0jQxfZrNdUPAGHt5Uf/dV/5OXXPyW0gvE42+xqxRl3HsEZwAWjf1/4T4uAsukCfadGOld0n1g0YtQJyBGR/lIL3gaaRp7b4Ay+adWwl4UaK/+07gYLNlQam0nWMiaoVXwmwRtyPvH6zV8wz0eub/4Fq/aS0+Q45ANpilDANAkTLG3b8uzpE/VCnHRfBU1jmKdZxqdUrq+uZDycEn//d3/Li69f8Du//dt8/MnHZzOhO5sXRRJc4yzIj1Lp+07OCGvOarqmafDj4YDRzf0Zx9C2nPYnrDO0vaht9OfGOJGxLhb4BSHtgscXwWCD5NHGMp5HK0LBFHXEOJ4YxwmQRLeUPmQYG2M4xQmfZD/h3ZLtjKKLJZmpWAtqxns8nZinEYdnTJGLzUY6HK04F7ctRtDEJSXQWafVsYqMBaJw3ZElz7pbkSiM7295/UZctc+ur5nPGcIyOnIaAlPLh0DzWCqNN6xWA3MWOmye9ZI0Ukl6J59tBcH+hoCx/szvMVXCULCWUA11CVbS0Vg1BusUA00gKS/fekfrOqaTAPdscDwcjjS63KzGYIuhpExB2mvn/JnDH0JDHxr240ScTmAqQ9iw3q5ISbTcx+OR169e0PaD7jMMaY6iAvGizGiGFj8HnLfn56JqlnI1ApBDd1XByghtGk8cT9qZDRs2q5VozSnYoNwpvVCaxoOzXF9e432Qtr+IFFb2DjKmwRiCD7Q60okx0ra/XidhdMu75IxbY2nbhjjNokBC/573IohUM1bOMn7COcmBsFUySVBFTpEoV3GVS473nCTXZJpGYpwwRn4XgOx1DHgjCI9qPaEKFOPN+6+4ff23lPwKx4g10m2nHEGRMyJlFtrpmdBcs2QF4FTw4VX0JYiZmr0SETRetRisxJ/JqEspqVlHmNaAKUrsNYVTNfgKjTvx0y//kt/73f9KDZaOxhnSNFO1UPBGMjlqtdqxFKwz7DaSzJdLJmbDMHTUfOBv/+aPgRMuFEFyqKS44PDGarJBlYW2KuFdgNY7QmtprCFYQW1YLwIMiaw2NF5gn9Kpyu/HB09opYuRblwuFCoi7bVW6cBifgxOxnzFWHKWAK1SCzULaNFbi3WJx7v/RJxO3Dz7V8S6YRwzzhnyOIkZs4jC8eJixxc/+wL76Hn+7AnDsKJWwzw9ijS2a2XHC5jg2W52dN8buLq60hhgUXxaY/ChpcmFlBIPj49UY875Pe9evaKbI6tBIpE311fYqNkNx2mmtR6HlYSjWnh8kG8gTTN1TtRUOD4eZeZdjbyIzlBiVv3yB3dtqoVZl8xgKEXa65wrh9NEqvIiLRTRRatLleAd5xwmBLqhF9eq/jMuBFGNZPmzU0r0ytI/jidWbacSW1HG2CUTAKil8Hg6Mc6T+i00Fzpn2cG0Ld5Y3r6/5XA4UijEaWI9rPjWZ5+zWa/OwR6w0GqLzIfVDWl1zt+3rQLYhFxbUqJvW/phkBclK7pYu4ZGaaFxFoeksSIBLVbyNKaaVKoomOnbN285PO45nibevbtjCSwyzp6d7P2q5+mzp3jfsFmvxKyILMVtEJ6/tbLALHpRtj7IXLNtuVivxINQ4Hg8sCCHHZaUhPoKAmlLJfHzn/2cmCvBCeZaFDdSBbe6YB6Pe/ld10LNUVRmum8Y48xxmqgVfNOeu0pnBeexqDWWbPTTaVZTk/0g2bVO5/6ZBZWSoihGLJb94XCOYP1Hw4jMh//tgnz/S8a2846ubWkUNigWFtHpL18xZk6Hg/zem4bD6Qg5M7Tio6gpc//+jtN+D0AXGozzCmAUA2HMWQ84QWMkBS0Gayhl5vHuK25f/wNpfIVnwlrJSMlZqlqDgaohTsj6qOhoOCCBT0bHhxYJ7RJPkz2/I9J5iYPbOKRosQYnGw8aJ2MYawveL/+e8qpcwTeG169+ys9/8de0avQ0RkY2i6tbxlsybZ/GI3NOqprr2e62xBi5e/eemma+/OKv+frlj/FNwlEIFhoDjXN0HsHwW4cJ4L09QxUbVzXAyxBaRz+0hEaS6DAF4+SCQFWCNngalUA3TdBCRtR+zqBpfxCcIThJraQIuw01njpjaawUo+Jcj+LCrhZbM513TNNPefX6P9CYB1atgwTVWbyTBbZwpyo3lzc8u7lhtdrq783StKJ4IgtWfp4mnLF89tknfP7pJ4QmMJ6O8t5r8Zlz4c37W+YY6duWVd/LDnGeMFRRQuZEUVGTbfqOxnv6rj1LPYXHk2g7eVGDSqPQwJpxnNGUEXIsMt/O4okwVSqDaRolbs/yQX5lLev1wMcfPWOzXrPqO8VhiC4663JccLZLwIn4GbCG0+nIeDqdVQUhiKwSXSKth4HgPeeYRqymeaFqGehCoPMCGptSYtIUJmHMCwdlvd6w22xErhkCm82W5x8/p+tkTPSBgS9jH2H0WNpeMOuy4qsi9ZNCjEZ/qa5a8jhyPBy4e/9eNP8YcsoE58/7DDDneeR+PLLfHzjOI/uTHMztIDPxi/WKj589xTmDr1KdHR5FtSWIBiPKCJXIOpX4lpgEqVAq8xjPwL7TKPCvEqPKP+Uw7Pte5LrGkGrm3e1bhq7ls299qpUKPPvoGdvtmlIq+/2e9+/ec7Hd0g0dc854Y2hWgtUYx4nDaWQ/noRjn+VQD8Gz3e5Eqqt+D8FrWJZbUv45mSl7BeIdH/fknAmNRF9aYE4SJTpNHzramnXMAr+2wgmEPZarpN4FjRkVVzqqplF9kHaXInSYRbEFzHPCGc84i9u1X0YtWYB8pWTmPOO9o+la3cPICGTOkZqr8oOMqgkrlYlX737CNP0C3EiskYx8jrVK9yUrNv1ruYoKJ+czTkMuYAHzLfHEkl6XsbXgdV4frFwewTmpnJ3Hug88quX/YzKQsN7StBbrDd56ugb++E//J8b5UTAbVTI7TCnEOONUpFEKPO4PzOPI6XTg8fGe6TByOkaa0FDKzJ/86N9R66i+ETF9yjxJ3hnnKsaJZNo7j7MQXMUEi3dOZP2t5LU4b/FGfu7z8h6L9Q6vSFbn7Pm5y0WKRGNFvWhLJc0TKc5S9RvOAhljqvCfLBgjplRzBpIted6Vxjrq/DVvXv0hef6azabj4uoG6wyv374hxoy3jvVuQ7fqUR0JxgjBoWlbjBF8yZIZHnxD2w3sH/Y8Hg7ie9Cpxd37W+5vbwm6a0wx8vLFC756+YpplHOfiuyHa8XnWcw08jSJLR6dT3kjUDexxYvpzvkkXHVjOcWJGIWJsjhic83MaVZNvSamqeY7xcRRK6vr7QbXBInzWypUdVhmlszcqrJD2W2c5ol5nDEGMUUZXfoqvncYVlpBylLMeckcrtZyHGceHu55cn2NC55SC/M0yz7CyiE1zpGha8/spwWeNX+jczDL7AuYxoh1hvenB0LTsttssc7KknSKDMNC6iy40FJS5v3jI4f9AWMN680G7zzzJOym0zQKb6mIx8I5jeNsOi53Ow7jxIuXX/HZJ5/RdB0Xq7V8/0aUXHOV1LgUJ3IOmMYyzkdevn3Nk+sbhmHFnOWFlMO+UFOm7VtlMAnyuyDVt7OWxnlO8wTzsjOQqNSL3QWrzYDFnZVvGw0pylW8KduLCyxwOhxofUu/WuEV0+G9PXeEqSbSKdP4BrdgVVSSczweWPUrhd5FrHdSAQ0r2taKhNpaHWcqzgMJ7ZFdgCc0HfMc2e8fuby+BiR+1TunYfL/+GURQmDoeo4j1DpK4I91xPkoB4y15CjhW8YYptPIfn9kM/TY4Al9S0mCSElzwntZqm53O3mW5glRLRcZ/QXPmzfv2G0G2kYYUqY4Gek1DbUeefPq7zjsf0HTjBijF4AWQ+d8h7Iwy8r5MzUIWFDuHCsRrXBe1GOko6hGxkkeSADBYbIMrbwzUIP+41IwFopyliw1Q7aVzhtKseTgub39ii++/E/83u/8bzllidSlylzcGCmsrPNsLy9JcWbWC7WpSmVwji+//Cu++OIv8EFwIcVUmfp4yE6J0gYMGWc8wcqo3ARz3ikZUwhWPBC16H7OOxm3lSKLbhdwFdzyodSskm8R7NQKlEy2RWRiCtCU6CKDqXoJVNkLlSh8BHLFkERRTBVeqZFdXM5veHyf5D1xz6lNw+XugmGQGGiTRQyQYyGmKCq/IBGoD/sHHu734Czf+da3RFQyjqSSWa9WklsyRYz3bC4u2O523L59S9cLOfbxcMQY+OjZUzbrrYowFuqu90sPKUu0KOOntulogmzX5WCUm94ZuY3ByIvfLfp/ySd4/+5Wuw9/vtlr4ewtGOPM4zRig5JTdUdwdmKfX0uLrZrpqovzFBNv377m9dvbs/qpqPu271fEFMl6uPpGeCmLIa4Lnt1mQwiB4+FErZXddsuq78RN2jX0fXvOkV1u3cW7UGtVBVbiYf+hajXGcDpNPNw/MMWJApxOIzGncxA9RivR/CGDYDUM9I3QZ/u+p5C5v7/jNIlqZH86EkumaTpWqxXWB1xwrPqNSNaU0nn3cM9Pf/IT3t7eYvX3M6xW9H1LrvLzb9drXHDc39/z+tVbgZ05wYY772mcPy9Al8u+V2EAaqiJJZGqPBu1VrbbDX0jC+Wk6WQWyDnKARwCTROYovCaDqeDtMVFWvBhWLNZrwnDIO553eeI1nyBLhqOp5HTPDNNE3fv3wPo4jdJzO04k1IkNIKHefvqFe/fvQUkw7zrBkoukvONYTwemeckAUu/fiMh8tempQ1C25xS0rGEyBtl5PQBsX73cMft+3cCFyxZUuKUaXQ6HZjnyKjZ8bXKMrwLgVF/Hmslcte5ICOmc6cViNMDX/7sR7x7+58I9gg1UmvUBasUJbJwl0O+5kotwtVapMWCm5GRkLciLDFWTbFy5slfR7INJHjMgStnoJ+3XnZoxlGNkwG/qWJEswZbC7ZmGpNxGBqf+Ku/+I/s9/eyZ8iZcTxyOI2ULHnruWZ847GhMqwGtrsdITQ0IfDw8IL/+X/+f2FMOit7MGL4xBmct+A8wTq8la7HhoDxTg3CBusMTfCar+21whaUkDMG471cVqIb1bNArsBlpzOnwjRHIfPWKjna3+hKq+ZTGCPInJIKXrtGoTcgiika2V/VQimCJi/5BV/97N/z/t1PwMBueyGij5yxVXDq+4cH0jQRrCEEhw+GN69f89d//VdM40nG1EVoEL2SXUtSLHstxGmEWjmo8Giz3rDb7NhuNlxcXOK9dJcpRVV6GkPbeLI35xjSEALzOHE6TlxfXqq5RBeP3mOyIeaEtZ6maWVpkgvvXr/BmMrFxmtLqSHizpIB6xy7i504nnWZJMoWdekao8lsa1rviHlmHCXTdzX0fPT0GX3osI0jeMmIrcbgmkDNhbfvbrHWcnN1JTGcCBisoPr+aZYXTTX2y6K85EiehZEkfBQ5HFkQG1UqMzFDWULTystkJIzk+UfPdLkpuJBh6IX/pJpycSknQttwudtyHEeGrgULJUfmGJnTzOV2J1jqCrvdluAC8zyxoJWD8zx99gSs5fF4UAKq4f3dHW/e3/LR8495+uwJQz8IHsSJgCAEAZPFeWI19HRth8ewzxN1LoIf6Fd4ldA2XUPjWmKSpa/cFe6D9NRa5iTfc9+vxHBVKliHr5ZZq9f9wz3ONex2VxjtJk9RfwehoWa51Gzb6WWcaF1DiQuXCpF4lsJmsxEHvu6rHh4eWa9XghEpBe8sKRdJ2soCIDRGDnFjIcbIaRoZVgNTnGly+CeNm0BhZ1G8LZ3GbK7XK6Z5EhgfVivbwvX1Uy53F5RSGU8z9w/v2e52tG1D0/U450k60061YK2jaRyd6uuxlRAstRriZKlVlt5TfGA+fc10/Io2JMmil5NJD6yqnR1ahFU52nSZLleDqBlVekJVCbWIQz4ooRZvhEUPZAQPjpXOYvEUZY11E+aY/FnGykI3IobWpgJ0vHz5M37y5d/wwx/+HjFB3/e0TSaWRGPNOfHOYs5jTHIh5RN/8qf/I+9uf05rGyIVazOlyE5Fpn1W9iJGCtTgJbTMW8lcd14uMBsqwVYab0lFphiLknCRVVv9EAR9Loe4NYLtpmRKtmRTcVb8IrbInqIiBFxTzbmTEGMwZ8uiCBUqzhrBHumAGlMJFmJ+w8Ptn7MedrTNjRzsdcbTAFJcOxdkZ+I8+MDNxSWX/+oP+PTTb519EX03SHGndAkDZ2qCMfDx06dnjtOTpzdQBYw4x8w0nZjntAhCggZsiNwNLw5jkXRxXoLWIiOI0AROGrrtvVjXc5xx1vL05gmmsVqTVuSKr0wpSYD8MOiMzgoDyFhZYhXFT+t4o6ZENd80MDUSoD6NvHn/jovdjrKp52hVi7i9d5udqHeskUhFhbx5J5b5eZ7YGyPdwzKj0wsq1oS1gZik+h66XqS5Vh42r/kGuVRaU4kqPQOdP2q0qzGGMk2MpeK8oWkC/z/K/qtZlu7M88N+y6Qps92xrwEawNtodM8MzUgUNSGGvoBC+ni60YUidCWGbhTBUFBBiaKoGVE0M+Rw2gzQaJhGA689fpuqyszldPF/svbBMMSGdkc3Gufss3dVVuZa6/nb46TpYZkmYuzYjiO0iidKedNFcyDrZF+aNSTb4te8RQ57a8kCbp7cSGnhHD/6yY/5+quvWKaJ48MD/ZMnqtkskhPOs6oxh2dPaQbrQWOZNAE173j5/DnjMOKC4/hwoHZZuVxA36kdq9lUh1VKLtMCm4b38qP41mixU9ZPa4zjjnk+cZwObPrxcRVqUQ+1N77JNPhdHMRX2Ka93e7Yby7Ybrd477jodtrsUuL9+3dM04knT27oItweHgjBc7Xfs7u8YJknam30Fx1pmfnZz35Ozpl/8k/+53z++eecjhP73d42kj/8y+MIXVQHRBU/lJJerzcRx/sPH8i1cLHbs5wmhqHj+vqGlBO1Frv/KiEM5FzMtdzItbIdtuSQOM4T0/GEknvlx+lD4d2brzg9/ALvjlAXW8SQctDuxWb68ZoU8tiqiGxXdTJbVWzODJI27OpLQc44ZzJ0586bge4/bxAwEhXUx83I2cax9rpUDzRPYaHzjuwKJd3z3Zc/5x/+5N/mkGYtYEArhfHJE1bzYYyDxacrtfUv/vK/5hd/89/SD0W1wm01739k9qXgDBXxztONg7KMvEeSYC34uEIIowQj1nuykszV5OTOYmAwuqk5qeukhlUeVAsqtnJVXoXmsZ9R1SJnajtlnxb7t+umro3b2SYi019HoGPnYC5veX/7L3j5/N/D1SeQHS1o89/sdAj0pXDKBRc9Lz79hOCUui0PTWWelJj8uEapTjp4pXv3Y29d8IXgHD5Eg/kWFjMO55Tw5AQli7SqxqTXSt8PPLm5puTCq9evuL3/wLHMHOYZ7x1DP0huVZVjXmvWDpWtL9Z55tPE+9tb7m7vON0/UJPG6OilQfa0xw5Zu7HG7faMI8oTkKle2fYPpxPLdGJ/sdMu33RrF+u2uLjYPeKr8PvyV+Dy6oKhi7rDg6P4RovqM4ircSsEOh9wwQnjx4H3Z0il5MS3X33Hq9dvBR3FyMNp5uF4OquznMMK7D3zlM5hcOI2pArbbHZyq+dMTlIvnOaJXC1Jt60PvzeRQDuby3LRw7AG+0Yf+P73fsAXX3xhhTeqn61YVEVDapLQkaaJaTkBjuuLSy6vLtkOI6HrqK7x+tUb/uLP/4rb+3sFN+qOxjU4HI6SCWf1CXRDpxRU16xnwTGniePDUVjyOOgkYidCB8J6q4rlvfPUJo9E33eE4JQLtcwcjgfmrBtYEJfC1IKDro88f/ZcNa2zEezes93siJ1au1JOeC+pYSuV5XTk6eWlHUp0orSn9Q/eIJpNj0PX69o4eXxyzjgj0JWZo6jm1R/UDz2b/ZYQOhbrGk+LpLIhSFcvtLWIJ/Kw2YzUWvjt777iNB2o7cjD/W+o01eEdqLzae3gsa5jwRmtymDalizYo+mzc/weIiIJJl7SYnc2zetAiwkvwOSeeqYe/1yGQu8qzhecUx4UFBHgTsR29I7ozaAWEjE4hiHz1dd/w/F4L9w8JU6HB2VwIUisC4Py20pirid+89Vf89/8N/83PBlXNPk46UtNdqrT+ApW55oIodF5J+WSTUDijixHbAjq9fD+nPVWLXNKir9iMt/HEMyaZKasTRBrS/q9jkpOM3VR6vNaopSK7ltvn2/OmVqKCtfcmmVn918uZtTULTmGxnL7N7z65p+TylsUNy6+rzXbjJzn4XhiOkw457i/v5UxrlZOxwP393cWs6PDpP5TGwUlncVEq1+ltDXA1en+MxVhbNaq1uCjXBZ9CLU1ur7j5vqScehZSmVJ5dxz7H2jtkKMTuFtJYEXPNBK5ZQT3gcud4PSJxf1DlAr9w/3jL2gldB19vsq1YqFXKumRGuUvNC6wNXFnos/+ROGYWS2aHJnHMKKxa44ceh0Qmi5agoAwjBCM2IcT8kza6GI9x7foI+R0HU68W+hVsWCqB0rkOeF48MdtTVuY+Dm8poherJlItVSmHNlcJVxGM6yMx+ieJsp0XXQBun9D0uyTdIz9j2eZmOpbqJSLdvJQR9kPtTRTtdLp3EVJs3zTG4NlnKO7YZGP45UCuTKvFgQoUWnXF9dk2omRnFKt7e3bHcbxQ+Xwt39PQHY73bWwCf1znGeCZbZ5Gqlecfh9EBJlaGPhC5QUqIfheGLEAzqRzZSrYt7puWkRX0cSWlhM2zwPrDf79mNG51ozWNzrgL1ke1upw4K+7w3Nm2suTuhi/gQOM0zm82Gf/zv/c9EAm4G7h7uTMHRfn/l/AO+hqGXgc4i6/u1A3xdRGPk5skTekswHsZRC3DNDENHK8K455Jo88R2v4dWyTVpMcrJPCWNftzy/NlzYOZ0eMPx7m/x7ZYuihR1zpvUVcqbZtyIy80UTYAV08jV3Utl06DZIaThbNH5eCK2/cJpIm/NqU+hGpTlBa2spsFg/TB1nTDQs+uo4JV8UMtM7wdaV3h3+zXv37/i+ac/Zr/JnJwKpLBmPkXAVPoAp+mef/b//r+Q8yu830D2NBZaUwpso6iS13a50gqeQjf2+KD1yVlmlWJoBCF1yK9w1ou5x2nKE+R0p+GjiGpXBRe5ZvdhLSTnqckTqln5fKO1fA5QbF6+ilVZV2qlLdpEcqecstIaLTUanlYz2aDt1gobNzAdv+Zw/3dcXz8nMxCcJ81Ktg4NLnZ7qtPmmiuUeSZ4z2azZdxshFgET+x7HUiXBR+Hc+J2wym7z+D1GqMdyEyoEiOxeY1T3715RYyRi8srhYoFjzf3qO86WuhI6SiOe3V5FcjLQnDaJNaTK03l6ftx1AkhhPMNmPMCOMZeeHqtRRlQXgFux3k2V2PAO9jv9+Sm78GjrKNWKfOMD4EpLQScGsuqTki5KoYgukjpzOV7zgVSrHTLhfm4MHSVuNlAg1/+6peclpl/8Kd/qsROIgrt1kIcaBznWSOb1W3VajElaX1woe8VkufNLBeco+RKIFhdZGJ6mLnY7dkFMzKFaMUqmqiaZcHc3r3ncNCp9PNPBzovGKCURPMOCmx3O5ZlJpVM30eWJWmzC4Fi8eU5a4HbbgX7lJIMyy3knMhLYhx7/vSPv8B3+sxKy8Im54XtdmRvMSTOgY+wzAuduZwP9/d8+913UOGTTz9l4wI+muBgmWWaqo+9HJrkm9znXTnHl8cQJVsMEe8xElfw2P3dPR/u7wgE9psNFyZTbjRiUGTK6uReF8oPt2/xL16yGbacTkdTSmpiyfOC7/moiOh/fMNY40JAME6thdB3bPc7+WoatJJwVRDlaqjLRSmeIQSaeWeC78gdIt9L4nSaFALpoVE5HSamknjy7JLl/hX3H35DaB9wQcbU0HQYc82UgEVKPLdC3A1c9Ra/4cUrtFWu6sGtJUFSVIlW8OeTrTjhJs9Jq8SqOYLWCNVTQ7XsqSKPiFd0hRW5a7NygoC8h+h7Sqf+krYsfPfd3/L97/8ZuRvoWiO3zKnA6aDK2W7YcLUb+PnP/gXvvvor6AK1KERPLvWsza7Y5L2S6rUSTK5LrYQmTq15bT4BZx0aDShnyHntjdGCfrZ+imuKmrKdWGYd/FzE10atnfjGEChBbmZx+BLeNNuM1nSGOatOITLj+hFMVGG7E5VK5wLOdbZ2TBzf/Y6by39A8UG+kpqt28fRhYElnch4+sHUpF7hi60+8k7rhFBKIc0z0zQzjj05CW3pgzxo8zSRc2UcTIpfK7E1VdotS6akyu4is0wz+0EGi4gML+phiCrR6aTJbw66vjfiR73OtVrhuxE/zjtqyThvuuPWOC0zwziYHM2fcb7YHBsL1dIpz9iN1lCLA8QqnbPCzgp938tjYael4Bx5WSgOwjBQloWvb2+ZThN4ePL0CfvtlpQTm01PH1TxF4Ln5fMXLDkzbrbKISqKT/ZO00Yuhe3Q8/n3JDEbuk7qhaZ8I6okgNvQCUpxVafs2OFc4eJSTvBcEqd5Zp5OMh7FjtNRyZelSuM+DMqrOZ0Sb1+/ogIvXz6n9yogag02vaI+umiYqNO1EtSlALlujCzLco7sxhRX83GCDXg0wRTr03Dbrb6vKsLi+skNsVZiCJymE33f6yQWAn4z6oCAZ7ff8cPh+2RTJaWSCAaJ+a6n894cwOKZpmU+E7ieQj9smdPMw+mB7bADl8lZo34/qLR1GAaexWekeTpDiGusR6Naubx1h9j7HTe78+Jf7dBzeXkJwHGZYZlVsBUCZwLxf+RrlXP3fc+79x8EKwVPjJ7jMZOmmX6Q4bM6nRJ97AmtSlmVMv3QEaOjJc9xViTKfjPqVO8cx/sjr779luvnV5Tynjfvf4krD1QPsQm/butKbgpBnIk0ilY78ZGN2jyuLjjXn6GT9VZY0epaq8ld5SDWQuzwLuPNebx+d6uV4hS8J8GjKlvxOknXVR5lr8dVbRY0LdzBaeP55svfcPy3DjQX6fueJc189+23XO4vaK4jhJ5v337JP//v/hm+6818qumo1Aal2ucu70GhyKJBJXYday+2W/PRUE2oBAG9DhLVSop0MYUsG5GtsEm99pKTTt7e4GQaNep6hCp/VzAyaKmZXdwo6cBiS7xTW+BSiw5fpTItR3ZowgjR0VqwDupKKSKXa4PoA1N5zZtX/4rd1b+F6y4Jw0DOC2k6yjMRA8mSEELQVFeaOOTNfs9Pf/ZTGvCP/uzP7FDg2O+3lFLposd1SitItdi1Ezzlu45WCpHgiAS+9+mn0t6al2E37ojrrWFW9Ijjdl7YhkDt9WaxXyoYItDIOtEbCcQ6AiN4JKyYv3PQFHIXvflBna6od4FlmugGJa7GKt6hmupIY3BlmRPdKI5jrSKtTkRWNELqeDpw+/4tmDdgPp642O7ZbJR6WluloxG7nqdPu/ODM+cZamPoB5yvZ7xut7vAeU8XA0vKkl/6DpkeM69uP5DmxJOrK7a7HZVGmk50fU8MjpSFQ+63G5XNg+Xr6OZ8f3fHsiQ+efEc30e+/9mn7C92ysK3RarzKpRPWV3Yxciw4Dwu6gRTasEwAVusC3mWIz4EwTmlVHwUjLZWt7pWHxdZr8lMYY7wcDri54knT54pl6tqYS5FksSuH+nd2hCmbpFpWdj0I9kDQW5t5zyXl1d4M4qdTkeeXPdsw8ARPVg5WQ6Y3T8pzcJKtwO9iStA0EEI0XgcSGmh5UTc7ZXt1G/wQd3fXeioJZOXBBupb0rj/HD9/V8rNWsb0DhwPDywiRvoBurYrGayUlJm7Admt0DJRB9Z+h5X8nnBrS3r0GW6/OPpiPeO/WbH3x6PpK/fsVycqMtXxNAgN5rLj4GOrF3r2jBKrZAbc8rkkogEWk3glIYK4VwxG/yKr8iJLm6offRO1QsRXT1f62oQeq2K45Y6bE2Ibfacr5uXPsdQHTRP8Ir+l2N/4d39W9J8xIWRFgVJpmmibrdc7p8RY+Wf/rP/F9PpNd4I7LVlsZaVfFdont4bxsI1+n6rJAOLGHdOCkvxE3ZP+w5QxfAqXliPB+v7qdW6H+0vvJk5fdA1dK1SvTaSXCurCmCxA1dKhaF77Dvpukgrle1my7sP70kln3tlal2E1VVPLQstKDSxFs/QVR7ufk5rHZfP/22y30Hs6EdwMeCrx4+S/KclqQuoyBTXclaDYqu8u71lPh558eIZoRtI6cDhNLO8f8/zZy9wbs3NqqScJZrBEVWDCa4LxOi4f3/SxNkqBWfyM43VS1q4vb/j9v6eT14+pxsGjdReVu85zYoW8KgVzaRoPgS5qYsWrtFC+eq6sRhm52xcrjlzPJ1I9/c8ub62roR6rumkQDTMORvm32yCqQ02WxHYqVSeXj3h+vqGkhNLKYz9yFpSL520U0Cbs6A852gpUVJSbo5DxfZF6qvqRZwtx/mcWBmCLJD3Dwd+/rO/xvvA5h/9IwYqhEBNC7/98kueXF9zc30Dvic6T3GFbL6DYRBvcmPmqtIKLI7QB/abHWWz1UYYOwKOTd8xzzNjF6gOajbuY3UWZ4UQrjd9XrKFoVXGXlI6fKGkAr7ag9JoRSax4lSIFEKgtsycEuN2q1iH9eFsjXme1ToY1iTPJue4C+QWrEq12gIlAUC1RbPUTPSe3bgjZz2Y0UdKs4BIi2YuJRuk5FiWSf4ZSwM9n6sNV+1DpOApqdAiqr10hc78DffHE9++fsUff/Fjhs2G/W6DD+tx6O/nJ7z3bLc7Ukrs9nvu7+7IOfP27VtNKF73Yq6JbEU+Dcc0LZqAnKN4R4iBmALNR7a9qZxcI6XK0AVePh24vf2GMt3TBx28WjQewDdDdeoj8VkEPeVSqcsiItmtC7mgpOC0yYnaccbjGd+jd3d+n6UZHFWxyBbFcmhAEQflkHAFW1xqa2uikRJ9GxSnDujgHc4lWpuJvefu9jV3x/dc7F5Qlokudvzoix/RSqPrPV/+7uf87jd/iW+Z1oI4lGL8Q7PXXg0ywhsX2gixETsz5tp7cLbpuSp+s1jXjSLQV5jJ2X3abBTV1LQu/NVVUgx03kPVXaehSWF4lGZTujxRu90W12DJ1jPiFPnhx5FDS2x3e2qT8rDvpYJyBp225khpIkSHa5qEo19YTr/Et5fE8EOWEvAxknKi9508Mh5SOqqwab/He1XO/tlPfiJBQBfZb7fELpKmCe8C20HpzKXIfe6dQWNOfq/NZkNspUIXOT488Obde/a7LTdXV4/yUC9lTAkQh4HvvfyEd7cfZCZbF6VslZHTwsW4VXjdAC47ycWcooOXRdHGLkbrg7Yz1aoicp4aHS7DzdU1uehBCyFQi7PxrZFdJfhIa5l3795yeHggLZk//cmfKDG0VE4lM8+TzE/BE2PPMAQeTgd+9tOf0TnP02fPeHpzozylZo5q5ygWR4EhEL5hngg1q83HA6dZZUWlFt0IRqg+ffqETz//Ps9vbvR3aea3v/str9+8J6ekCsEQyVGBen4tLEqzKGunYIDTMbPf9dA8LkTSMjEMAyH0imluIkKVXeQoTYmpqSgjqnrFIayk3rYbcDHQWqbiqTWpXCgI3/VebWPeq22wZY3GSh/V5jwMggGSwV190NieXSM2dQhgCqDqK4fpgTQvXF1dQlP+0N3tiZoL11eaJKrh0ctplucmdnQelsWIXOMB1mKpmqEVtc7N80wcOih24qvVzFSSb8/Hhf3FRtxXKtQxEjtNHYfjgZwThyPsL+J6Bv6DvhQJMzCnhefPX/Du3Vv1dgRHK5BqOXcjeDzON+4PD5ymiTiO1FnywjInWuyopmzxfccQHMEtXO4L8/EdUKkl4NpMc0ZGNxGyZ71RKbSaJcrIi/2pP0NHq1+kGlkdvHiV0hSP3axRby17chZr0qqiQJzBWhIAI2iqWqO1QSTOzJnaRAyasnu5tELwDe8sFs855tOJu/v3PHnyGTELy6/e0fUdISz81Z//U1o60Qim2MI2NXClQZP4pBnnYMUY9HFgiJt1YTlv7OvEFbteP6tUXIhmQGxyQjeoJLyL5j8Rx4j105MhRQjNERGk1koxaEkRJM45SirUiqaYppiP5pqFWzY8QcKaVGzoKoAMkK1p8W/VUXMgRHFGIQZO8y23t7/m5eYpKQ1UO0CVkInNW0SNqnCHnKguy1wcFdSYUqLre1pz+BjJs/rVnz57oc+xKf3CRUm8v331mt999SXRBU8rhd12y5xEhjaPiEYHZUmkVujpKRTi0HP9VHpmSmPJC/Np4vLiksubazPdCLPWTizCpAHdZrSUTJF4xVJI+17qptIasSK/hoPQ9WysnjJ6JVBmhPPj/ZlYmqfZSM5MybrwWJSGC57gVsNQow+Ry90O7xU50ltXhW6pNQbEsRlGey88xgH7yDIdefvuvVQg11dnviIEz24z8g/+7B+y3Ww4HI9kUyXd3x2IznN/e8f87CX99UhJmXmegcZo1ZRLEadTqcTomOYT83xid3F5zgMqZA6HA6EL9LHjsJzEhXRRN16uNKeUzcc8LE1AvQ+kXCk5iZQ+CU/fjltyTbjg5KC1SIHOezME6posS2I7BFJKTMeZq8srur6no9oU5pGaUKe6cRwYu44VPY5V0s7VxDgvEyH0dKEnIXI+RP3boe/FM9TCUiv3H+65vrxit9vgXOB4f+D1u/e8ePHsHB2T0nKW/AXv8WMPPlpEgvo0+q7js08/5fLySmmrpbDb7VQg///HV6mJZZoInQx++/1eTnPvGGJkKrq3U1KufxgHrnZblkUb8nKacVVZWrkmclpoQREr9x9+y5vvfob3EwL+PYJGipoiqYKQWtUkiJ6pWqodaB7lvWuz3+9VkbcGeJNhWk1xM2anNbyJq5t1TKxk/wq9qFrXDBUG8rgGzUxlgpjFSeRacUgR1HnH4kcyes0f3r2i+9G/y0whpUIXPMO2529/9Rf83Ze/JLRGLs42o2ZreVWkhXtUaNHsmuDpglJzPZZW7daNMUjZZROEthirmtWWCVnQt16fvlcLvcF7tdrmYS57c6vXGuhco2aJWNbJwsXHe0qTjYS1xYkfaWuUh0UJVZvaokeRIxmqT4ZWdIx9x+2H33C5/4xx88ccl2o5TkVoD9okTvNJDaMeqfBMsYTzpLRwOp1k9Iw61JUsGXkpieA6WirU5rjYbImIl1ZUd9dxc3lpGnzwdhN6U924iknuMsHJcbt2Orvthtk2ixg7xmEgxIjrZGT6cHvPdrvh6vKKZQFK0jhznLg/3DOOG26uL+miwVI+aAOaJuh6XNINepwmbi6vqE6GN+cDP/j+9zk+ecKyZHb7HTlJAeFCYLDXp7iHRmiS4/3xF1/Qx96kgZbG2tQbnGoxNZQauYwS5eHhge1mw5JV0CEorbHmEFcac0q8evMO5+DpzVO6oaeVxg/+6Ac8nCZVh9oiVlPm3YcPUCrPnsn4I5VB4NbSd68ur+w0iiRqOHV2DwOx78jzTJ4XiQJ6EXE+BCiZFkxil6Q+m08TcbuV9jlGet9zbEfA+q9rPmPM6sKNpmRqlFTY9hvuy0HqLufZX+5wUTddjJ0KZErFh54lnUQix0DfDcxVhPVxPjF0yiEqVa+j1ExKVQ+3cRFLKSpAsUa0WhtvXr9jO+zYboLh5Y3L60s2u52MTSg1NOBxwZHIBlVVI/EtrsWJw1L1a2UbOafKutXh+/d8KaVAoYepKNYgzwvZ0oXv7x5ItTCOA1999SXvX7/hydOnfP7JJ+RoMEwq9BGal9fj4XBgu9mSple8ffMzPEc7JVfktsYW5WpRQfWc5ip4URvqml7gmk0yJg/X19luxxpm51ZBkltJVovS0UFZsk4eOQYfFBUuh4UmJXWb6yhQKOL6fLVTfxUh3yog9EGzzonb96/UGojEDi4EyBN//uf/NSXNtJJtu9Lz2Wh22HPne8PBWVVYWqEbOlKRl2HsejngwZRWjw17LSkiKJVFajp7DohOIikPgQJVm0sji1tpgeKbnX0r1UWi3r4a/pIqCOa8QBjpY29ubhSg6mEcBpa0KPfJ2jwkIMhnWMzkaRZEFWlVU1/vEne3f8vn+0+Z/EhzgY7A4hPg2e33OGTi7fqOVHUYramy3WyIfc98mkjlse7AeXUAich24g594ObpM65bVSxHjOowqN6Tk7D7oYqEq97hWzybyZzdROtGEaPw5ylPdF3HYGX2zSyRx9PMNAsqca0xdsF08jMxRk6HI7/5zW94+fwFP/mTP7GTT8XFwMBAnheC9WCP3YolipBKlmZ5SonT8cDT9sQav7SwRJtmXBNG2oLaRmLsFATnRFwrw0j6Yx+0CNHABX14vQvsd3uad/St8r3PPmO32zLPC4stDGtXwDD0nI6TJgwCpSWePnvK/jRJNeIay6zcoM9evjQHtCI8ujBQciKZ6bDrOlx8bP1bb6bYqS51s93RDxuO00mfjXe4oIiBlGbwXr4UnE4wtXJaJoZhpOt7dmbbTynLeBbXPoZgTl7HdJw4HA70LwZ245al6PUly5iqtRGj1+TXR/o+4t1AbtnqZC2W2kcpN1oC51QMEwK1FD48fOBuuaWUzMuXn7IZ5FOhVYqZ1T77/DMKlV/+6pc05/njH/6IfuhoDVIr1Fx4enOtCbUJW25NCrWh65mnmY057QXLaFN8bzHKz58//4M2iPWr6zr1Vs9JcSCzWupi9Pzyb3/JzeUNm+fP2PUbhk8/ZYiRN2/estsrFmRZ7vFhg2viUK6uLqint3z53V9CvVWwWhN3tp5k0dKhE34ttKKU0SWVc8ieKIbwyFWARUyYiMSEHevCy+qXaOsmIs6hNvEm+halO7NuQM59JK985ATXoLr1hdZSsHOWLfLYdK4mxuP0jjTP5NBpXsmZL1//ji+//DWORd9vJH1pVSkCFqG3QkHmGdd/903IgR4GaMWUWZyfHR8Et5Vaycuk64M6RlxTtLnK1RwZiCZx1yVrphqrZh7uiE6KtFrEUygos0LKLH6hjz3Ry/fQqqbCED2xRXIFlyveF+ZZNQOlNGoBF7T5tSKoKwS3br8cHn7Hw+lvGTZ/yuHkmN2Mt3qH6ALDbjxvqjUXxe6M/Rl2DF3kdDwx9J1ZFpoiZ7powiCsObCqo8LlSrXxPFm2/3raaGDKFdP8em1uzRlkZBc+JTUpbfaST9ZsJK+D7X7LxXZrrmUzyxRpp72PfPGjH7PdbmnOka3hyaOwrcN8ZLvZnnHk7Thqojgd6PtRuoZa2G4G9psNzmmsw0Eqi5FyXpI9rw8SgzqcU3OevKP2e/vuXK+55IXqnBm64P7hgRgCw2jmqWp1p0mYcEo6/e6fPKNd6wMqVJMQerAohvXhPTwcSa1wvd/LvdsqD8d7DvcP55+f5pka5DQe+0ER6mHVdSt4sRZNPnHojKQVmZyXrCjgTWQpiT4GKpmSMn7jzkUxp5Mil/fDjgIs+WgjsGpYN7sNu3HUqbFhm1dvPcB2+nb+rCxa+7aj0/tOOUlb7iR9FeEstVGZZ7bDSD/K8b3f7BiGjiVlQjAc/+HA9mLPxW7P3d0dv/jF3xBCxw+//0eUvBZSPSYFhCDgW9JLZ7lVlWHs5Yx2Tu2FtdIHdQK3ZsVN3R82Sawtiav6KtfC6XhSwU0IfP7p52y3G6bTzMXFJZ7GNB1JLpNmndhc14msd4Wh61nm93z17V/i0ytCZ6oxy/5q64KO1Gfra2iUR4Wc/b2FOMtDY2ogb4ejc9QGkItTOVHzHy2kfLSJaE5oDlwVaX2GX50zaNZa71b2F6xjwYP3SNapRUgyVG9cpMqw0nSyQ4VJtmn8xV/8c5bpHa5K5uwJ5Nq0QbSVZDm/WqpTt01tjc1mo3bIVumd8QleacPBN1PvOEptpGXhdFAkeWuJECPjMJKLueA7lYJlCi4LPtc2ocw6mj/nYbXWaFFwkssZ13XiZnJhyTNd63CusVbiOefp4kAtsBQLN0XcXy2KCIoYmW5erFIKWNhq5xPv3/+al5sfMvZPcRRSnkgpU/vIfEoMQ0dxuvfD0EuMUgqlZU6WmtGPA2lZNMEXJRR4GjEO5FYUfNoFou+s+9jyTgIwbEZcjHIYOmc3mnb/6m0MjZ5QlMzqu57eO2pS3klbSaVc1L9qIW3aQLJuqibxmnOVTz55yVqtV2rm4XAkBgvoCsoJCjFySguvXr3i9avX/PhHP2K731NKYdNvGcZe8sa8WHe0Nj0/DAoL80Gub+Mq+q7nNE10fU/wkj92PpBM1nmaFr759is+/+z77LZbSmscDw/s2CrddJ7Z7XbEEHg4HNjttoSuJye17EkT22jRMMeauT8ecc5zdXGJ846HDweCj9xcXZKWibsPHwgh8vTJDRKeB2qd6E3Jg3P0IfDq9bfcXCvEcEqLOqbR51VrZQwdebQFzx6Ww/FI7Ht2F3udiIpqGjvTlKf6GBewunmxFNpkxr7qPNF1ZBJ4yTbPJqF54XhUo5x33qpoBeE1X5mWie040neaKMjq4Jhr4XK3O59CT4vKr97fvmM6nLi6vrGOjcRut+Hz7/0R281ICJ7j6WhGvw1LzhyPD3TdcOZhtlvhw85bbH2uhFDluB4Grq6vdRr9vYiOv5/Adk4TY2uFUhV1UF2lDz3zpPtvnhf67QDHiWk+MW4Vd348TWzGDTEEjsd7NuOIK/d89bu/IJ2+IoZsMKcOah4ZECn59zaDmis5FVqREsw5KPZwVqtcXRf82gzCbfX87oJN3Gree+QjdDrnUSrrjHRtZ+7arsHj9RC8pU1LCp9qU0ex/7WFriZqDTQnscXp4cDt3R3j/oYYA9PhPV9++QtcnSktA5qYW5XZrTZ3hox0lhXfgdN1GdbekZJ1KLGIntVf0UATEY772zvevP6KZVYsyW7TcxoOpDJzffOMod+wGXsLBlQ8TWkNnxyuK8a7CEZaN4rOvq+WhKPTAp8ktMEFgrd5ppnpzntCjJBUZpWzXre3tF7fK76olkoLq4Km0xw1TTy8/5b9zROmXFnmrCDL2cQxsZfs1z6L3Ap4z/H+SKuF7YVyy7pejXbTaTknVF9fj6RJSRD9MBCjCxCEQ8euU8xsa8rjQaSz4AggBEJpTHmWuilEM3w4S7W0yOgumFStcZomhs1gSjKdTh0yxDgHNS3MFhxYqu7XYTOwCb0ljxalLTbonOSgux+Z5Rydd5b5xOFwz36vfobizFiFyKbqIJhhaM1S8cHz5Tff8OTJE148e6ZduBbaWZrr+PzT77HZDKSU2G5H+hDYbAZNBlGLhe8iF1eXRCN8p5JwYaT3UcYyoBr2qEiHyrIspFLouiiFkhO0tNtd0o+qMK1FpTHBR+M91Bb29t17ptPMvE/0fWQ7bCRBrkmJuCiXiqpRleboQqccJTvFH49HYoxsx825Ba3kzN2tNqmb6yd8/c3XnOaJH//oCyMroS4Lk2vMJ/OwDHKdtgYx9lxeXHJ3d8tmt2Xt4R26Xgs0jbuHA9ttYewHKbwsAqZaoOE0L5I0+8rQbeiuB0YrQCm10JrjT378x+x2u3OjYKMR+sAubplPJ3Iu/Oznf0PXd/wH/8H/Um7qpvvAecESXTfSHJYRlXk6SILadX+4wsk5rxReHKfDd0yHE2Pf8atf/xpa4/t/9D1wMNVC6AeV8GxHQRcNTrPBufWOb7/8Genh7+h7KY1KFaxALTKF1kZxKy/QTLX0eJLXNRTuDjqBnnk2vyaaVttgPN4162V2FjCHWZQsAs/+u1ulrk2Tg8gLO1GvoX7rbnEmd1fprL2+1mgIig0+kGzzcM4zTemcW0ZL/Oxv/pL7u7fUsuCj4q4bjtoyrZpzujWUG2WuiBbtmqz5WZbKt+L8aLpptLORlKoq2ZRsKrN1JOdCyoWcCkMPqZjZt0Cx0NCCej3WDhO58FcfiXEJVUrMnDO+q9RU8KFCq/jY2VpoG+ljlonu0SrIuLZKzYoJqqVqfXZRidotQk4cH77l8uaPGNyO1uvQUptQkwa00ui78eOblrYZuX84nDknnEj4GANd3FjAaeXD+w/cHR74kz/+gphq4dV33+Gd4+WLl/IezLMsQyHQLHslg2SOSLFTcyVVEd4d7qyiyDmTJn3IVOi7jpISzTW60GvCwFPJLBbBgVebXK2VeTrxu6++5Mmz53z64qVOgdXJJOXhydMb5ln5J75BWRZ+9+WXAHzxQ3VgJCotaYRsDjpvGfEhUMwP4JwzA6F0+85Ony4G5uORGCLjRjBL7xv3DwdKbVzGkYKiJZzTdIWdglsTkVct8G3oNW5LRZHpQk8XhJPP84nWKkMnrbNzgd3F1jJYMsE7qv2e03yS/NV5QuctLC4QXc8hiXzeDBtKnuU7sekhdr3CyMyXgE0aQ+yMqNQYm9LCzf6K29s7SqlSeoBOns6iBXCqji2VUy3cvf/Aspm5vL6ijzIT3t0fpLAqle+++Y5PPnnJ0KsUpQ89cbP6JtoZ41WMM0xz4v7unu24IYy9xuUqcjaEaD4McxVbEqx3jqurPV3UCXLsBw6He1pe2F5fUvJCtdNozY3NOJCmmX3f0W23uFrYjQOhVVxL2Er7hwwTmry9xBdffvMVv/vyt5ymJ9Aqn33+maYfu9OX00QMF/RxZD4uLGUmLyd6X3j13c94eP83+F4RETJtavEK2g3Itmg3W9GEFxdyrmeuIeChOUXw29daO6oFbC3OahYPbUkKrpiwVV/F5M7KZgv6XTJBUIs6ttv5hQDO+DJn7ERbgWrwLQhCxnEOP7XpgqpNaRg7govUOvGrX/4lcz7QSmYM27Ofyf4lq8kN0195Ps6lcmy6Ad+SNj1khgtGiAtSjlSv+2/odwyx57QsNAepwNgP7MYd262CIqNFX7QGZAX5OW+hmanSArTcoNehwbtGdZLQUwsxdJScpPpqkURhNEsBeKtNrqY8cnS9kAiHDss5FwUkBn/+3PHWuUJinr/hePwG+h9C6Am+0hucXmtlKRlv/M3x/gBOJtmcFpPOBpZ5OUPIEpsoIsd5uLrYax25u7vnV7/8FT/4wQ/sxjEME33owQWqL7z65huOpxMvXzzn8vKarpO6yVUnvqEJa80lMww9Qz+QS9VpzeCYte4x1wRFkJVOoVEu4Zx5++GOf/4v/iU//MEPuNhs2O4uJEOTf5RlTgqp6jtKzfTDwOeffWYkb2TJi51eYOh7wx9nVfv11siWEq1W9hcXkpXmwrxM0FSOdLm/wFvybKoJ7wL92HE6nCi2sDmg1VUV486EnmvCO7/65lu+973P2G22pJSN2HsMFHt6fc2cpcBprZCqppdSsk1bXs14XUfXDbTcSDEzdB1j6HDmVu4sQMytTmcHDaXolpTwXTh7Po4PM6GrRB+lpR50Q/V9z5wWrm6uKCnhauPFJy+V6WRR8c4ZPegdF5ud/t3YSxVSCrU2NrsNwSnz6ZNPXuCDU0qqLTzj0J+jIxIZH4Kl72qifP/uHfX6ks+urwh9h0uJeV5kkOp7pLzKHB4emPPC2A1GcCtttesDzV3wP/33/31qLhwfDiwl8/BwpO96njy5ZowdC81CDW/ovNzpqTRG94cCTvZNVZ/78+fPLRfK8fbtO8ZxQ8mF+4cHnHM8nCbGYaPX6ixIM2Ru3/yK+/e/IXTFpgCnw4H1n0vFJTnmqtBbewHKOZZCRs/mO7yLCspEe12tmePxnmHYK27hrPRZyepVVqp7c+UmvJPQo9qm7IrIctcU3OlcNAhKp3Jn95zpj7BVWr+r+o/EVWndsoBMKTPzfGDYXvP6m7/lw4cvGUMgVU8taqwIxRz45qbGyFttOv68YXV9pIuSCrdWaC7YZis43UfZ/By6P1Oa6MeOVNSZEoIW6YvLHSFatazTyRweBQT+I6o8FS3YbnG4UGlNPezBiUfxpbC0RoyN4ORst/1O78N5QEZR7+36n9eIiPfq2Yi+U3hiq7gaqU73S60nptM3tNM1zQ9cXl3YdGninVzOKrg5J+bTxPX1Fc9fPIfmOB4PgAJWU84GramnZ1oS1xeXeOeJIQT+3X/8j7m5vGSphcPhSOeUbpnnGb/Z0HBM88zth/e8fP6COVkMcnMcpoMynZxj223UMRGCScI04vXjQN8P5Jw45oW8VEJQufzcFLm76Ud8CHz28gX/m//1/4rtZkNeMsfpyH6zowvWnNcqXd9zPB2YTjOXV5dyulqSbXaR4KQG8D7gXSU5mJeJfpAnwvtA7DT2+TV80G2Y54k3H94znU7s9ns1g9UgOV0/sht3ZyzWO5nHWhMUJlORI449Iavf+HB/YL/d4r2SMp3jTEp/uLtlWSbGLhIHdWp43zEMOnkrO0YGlzVXqOZM1w26TWuVc9d8JI5G7EzZ00Qcu75y9+GW0PVc7nZs9zsR19ExOnEJrmlOwDui6wj9OkoXpU02jcbbcSsCsjW6YSQOvXqygcU2gT7o1O6bp+vH9WkgenNbl8p0mnT9rVReiKZj3O750Q9/qHgMBy1nlmXh9va98QtbdtstNAjOcbnf0/cjzmHmvo6aRVzHvseNgYfTkfu7e6pzjJue6FVyX4D3b97Rgte0WCqn45GhG8/prX/vV9Prvr6+xrnGzc0193e3vHzxDO8jv/n133J/UBvfxXbH/cMD3jm2l3tcPvDNd7/l7etf4OJkPN559bAfvy5Q9ZwXReN8Ci95OauURBI71vxgW57tQOS4u/uKT158Cl0vaM+t0IizzUQTY7EQv1y16Xu/egR0P6pbIn70Otvvbaqr6XRdyotxjsXIjFWu66qmGZeTKoBb4Ve/+CnLwwdKnfHmdaAIFlunEN3l623jz6+fBpuuQ8r51fxX6JwDisGM62uTRD5sPJf9Fdvd9hzJMow7bTT2/blCCG3F3s4T9QpPAQpBJcvcWyvEoE2ywpKqpc5qksDCHaMdwtfeDe9sAgwBijbnVpM4BSdpbGueXCA68LHQkNx7OnzDkxf/ANftgHZOaj4eTzw8HHj29Bm5wdX+En/1RMGDzpNM+RVMMeqrJA3VOabTxLMnTxmGUTUMu+2Glivvbj/gHIz9SPTq662tMM8qZ//0k095/uwZ+/1e0igaS5p5/+Ytwzjy5OqpYA4gUSlJ2vvOd2TncKXKu0Bg2zm8i2SvReY4nazZTHn8T2+eanztC/OykHKii51usiry/NWbt7x/95aLt3t++KMf0ve9OICgboGSCw/Tkdh3xNjTdwMxqNt6zXEPXdRI13V0vVRz6f07DocHbm6udaprjwsCVSd1EZWNpSyklNn1I7NJ6IZhJAw9P/nxn1IopCVbrIFusNXwtx03UDIhdhrHqzLpx36gOauL1B1GcJ5jnigpsQu9IgNMiaAgPU0DpTojn3pSy3RBRecABZnNaq1s/VYbOJxhEUOUyaXQe28bkqTHYV0YnEWs2GmWNe+oVKVkut7GZyPGo8Zk75VhP08T0/HI6TRxc33D1ZNrtbnVSqCSs/islTe5v7+T+7RU7u8V0nhzdc2w3Shd2Dlc1cNS0kIqle0wkIFhjJoervX92+2OWtYwvMp+tzWVVaa0iavLS01ErZ7DAf9Hv2xxCj4Qox1gKrjYscwT2/2OruuYpiPb7ZbSsgLu2sSrVz/l62//nCgREM0lsNjqWiqsDtwzxAKuQG6ZlqqZLjmbwmT48utHdP43rcHV9TWXJlbQgvTRW3ArWmVxOE1R2DTwRkaXqmmiVVMIOnEYvjUIhq3bNiEXuNUIgPw6rRIMqjoX7uibCd3I1cUVp+M9f/ebn9OSVDf6KZVS1XnRjFdcD2iCtm2Rdo2lZeLY6y43HtDjaP7ROe68O/dqeKcsKR875RtVdxZbYFlM58j0ymPkh3NaJOxA6oFqyj5tqoFWReC5oNftnKO28Ej6t0BbvSUOYt9pE81F6dKxoywzS8l09PS9OEOQjH3JC51r4OXtmI/vmI9fs7m8YkoF77tzxfSqQgtRqQ59iJTaWJaZOSfSsjA4BGVHQU/pdMJ7zzSdOB0euHp6TYyx47gcFVdbdGJeT3NjtxVm7xpj15FCYDJfgKsikj//5DNhnK3pprCdrzRFY4sMl4IitqZ4Cxdw3tG7jkJREU3UYpGWTOjU2hSDV2Bas0RW72m+MXQ9T5/eYIymSE17WGqF6oVHppLoWiB45TxJ+u8pwasycpXraWomho7PX37K06sb+qFnNm7GOVTrGQJxGASZZXXX7ocNoQt89c03dF3PJy92LCkRQ1R945Ik6ewCHnjz9j3XVxc4HK9fv7XFK7KkhdBp0e1jh050E52ZGvs+0DqFIIb2GDPQqtybrkqxFUNkSjN3Hz7Q9wMXF5dnOKwPHcVZYUqR07tVI7DsRjwuBx5ubxl3Wy43e+6nI9V5wYqtUb3k0nUlJZtOm3lZYNgotAyPi8ZlFYWotdoYYk9/2VHKOz0QIZwjXRbzZ6irRJ/7brsDF+i7aN3ZheYqnVORUD2fnQUBNt9oXuGQOIeLgfHiQn4GU8FQCiEO3B9ucUPHjd03zSCr1SPzB2FO6zpSJcHNJXO4vyfGyPXNFfd39+x2W2iV64sLak28e/U3fPl3f4Fzk5Jim7iV5hq1qMLV2UGmtGJTriO3RE2ZVvLKEdOcdRuALYLWJmmL5bl6N/acz75NK98aEujWxY5mHQru/OaaEbO1tPOiXFtBoflZwXs8chDOILKVmPXVwgNpFKfCHV8V/6LF3ONd4be/+yXv3/1Wah/sRP4RrNPcSjD7/8EHU1vAo9y5dYNwzrwh9p6dFz8avMc3dZoQejEbrWJJ9Kyd4PBoQFx9HqoutnDDoJih6lSnIGmqF8Hs5GYIZn51tqmU4s5hfqvSEzADoQQG2XLwCI6yeL783Vd8+snnXFxutaHXimemlZ7WIpWM943j4UuGyy+gBJbpJEXS0PPk+RPqonUzEljSDA2tT86zubwkuECjqTXRNfrBEAbfcTpmdWM3Glf7Pc+fPmW7GXk4HsnLQsvNsltMCx/U1hacSrWzBebN1kVQSjV3axNDb9fBVVPorFI2pMJoVZknFUVeO6e+gWIL2NrctOKk1W6c6DtarQyh58nVDS8+/YRx2JxlekoB1ch2c3XNxf5Smfg0UlYCZ3CSkjonKWCthXmerLtA+TGtWYuaDwQ8Q9A1WF8/iL85TEfSlHj57AWfPH+Op/Hh7gNff/stD/cHCF5tVUmqif3Fls1mw4e7O8VvdB0uenbbLdtB0kilbjaW00yqSm7tTHIavBI9a5EvgCZYLVdNAVKjFWIvddicFg6nI8Fr4Y0hMvievh9Y8sRxnmQecg7nPfPJTjE+sjTxS30fcaVZiGCjukZdTJJZjbPwillRrtLA0A36mdhTa5DlvKgEaDNuzvBDroXTfKJW8FYTm3MhN+itnEXTQrLOAsfqjQDs85aLu9VC39nn1kW6oHtHvga4u7+n1cLFxQUPt3e8fveaLojfUm3jfP65f8iXc7DdjipAonGxv8A56KIUXKXNeCc+5uH2a/7u1/8Nvk16XyHhbFHR/VRYoZ1aRKTLGFrNb7A2SEvB5Fchjy2dzXwVwavI3iMTl5zZjwvTusmsX8UgJXELeg2r+W39+R/PVuuifa7Xbka+0lhP85JMC77ARULrZPgqC7UmcXuhcTq95le/+AtKPtoG9/ga2zr92G9//B3r9KL8t+jXbK8MHwFuH8UVGixl3iIspsRyn4JTQ50KyPSjXXv8166imAyDgalqVqSJqMZqRVuDWiDb358zs6q54bMOEq2JcNcxRwGJ+v1CFXAytX319Zf865/+S968+8DKv0jRZQcZBO1Pp3eUfEfXD9ROfoi8JFxBKbE+nGP6mxdy0+yAJZlKJTdLmmiVh4cHWms8ffZEm/1pnqitEfGkXHj75jX3x4P+ueXvr/+jsC7hXt3Y0290qo6xJwRPqdowXFNonIptDEpZG7Scw/tIC4HqPBQ5z0tT1kkIkSXNItAtbtyxdmwv0vSnROgC+6sLxs2G1DKnycw33ukNx8jQd7ZpNXofNR5X4bwpJZxr+CBzTU7KL7m7v+NXv/41r96+MfJKP696z9CpCMc3yWu/+c2X/Mv/9r/nN7/9HeNuy2mauDs+8PVXX/Htt1/y9Xff4iqM2y0herq+YzfuoDl2+x1//MWPdfNa/ar38pKSdQocd1tCkdHL2x24mqlwKLiw00nFY3K61hjHnr4f2W63dLbI4pzSdY8PzGlimSeTfUZFjDtHrombmxueXD8hek/vAh2KsfBdwA9RMAEe30eTT0o+Nw49azvaunmBFGXRrZr9xocPtwrD845lmslFk2prjn4YdKqZ1N28HQaa18nH99FKe0TIYjEbAU8ulgZUNFeEQcVULatnJC8zX375W5k+O88pL5Tg+fDhjtPDbCZPmSqpv+9w/nu/nCPGgXHsubq+Ydhu5Jh1jtDJ90PXePvqV/z65/+MUj8Q+4L3xaA4ncpzSZxPVm0lNbUmtpLwqzrJy9jmvDv7mqTstTid5gUZlbVKdF3418V8/Tlr6unjoizeYV1/BdpgaarrCV6yWb82WpBto2+2SJa2Rt1IOks1KA55HVYIrTTHZnfBh7df89Xv/gZPpJkSx+yD8j21dt441qqqddPQBKb34JsR5jZBrXbAdQJqYAeJdJb9Kr1WiQRrzJJzuqeqs95rrKCo5d/bKB2C53wTzFho1uq55mi1M+xKg+aUPrDkRGqZ3ETMr/5ao5wARxcDm9Dxk5/8Q374o58QYiAtawSKakzPhVDV09ID08NXOGAYB6pZBpwXFPb+/TvevH6vV10r83zUvVZUgFRzYoiRVDU5rJEzFXh4OODv3t/x3evXCvHrRfq22gR5OAWUTbYAq8tAxiuNjFXkcFCch3eeOS3c3t0CjjEOrPWWKgoSTleK7N5N8il5Kkqh9x0Pp4m37z9w9/AgOKFYwmTwVjRUOS2z/rxxPhkEvMV+9AQXWIoSEFvjnHU0dD2hD6aEksFKqQRNdZi9TCjb3Y6L7cY6npUgOh8O3N7fM88zLnqWmuk2A5998im73Q5nUk3vPU+un3B1cSU1VlpoVZ3hlcZpOpJqprd4cix+ozZrqHKOlBOtWpSFt+KRebbo78cFTOCCokWqPV6lqq6x7yNd19HHjv1G1Z4pZW4/3PLd19/y9sM7lmmmC4K2apXqyXtP6BTRfZ4kMQgoKM48lyxna/Bn78XQdXQhcrHdPqqtanuU4vnGfr/j6ZMbab9rphQVpbRc2W83QON4nEizgvoayiQaB3k8bm5u2PQDacn2YDWbtEzhpT5VliVxf3ggZ3EQ79694+H+HhccT29udM96+KMffJ+r6ytqzlpk66Ms+A/9klTUM/YjXd8JbhoCtc4UKmO34fbt3/Hzn/9nzOkVQxS4487wTxEmDpZg8NGnmys1azmuzmATi8SR416nxI8TXNNa7OTD772+latYQ/HWTCs+WkilGLAVi8fYjWbqNng8yTuH3WvYEdr8Fu7R7NbMBV9qppTFWgQN18cxbrZ88813PNzd4uz3+fpxxhK0j6n48+RSzrxia83EFo9d9nKAN+Mj6jnvyRnL7ew9rwHppckfQS3nTc43bWxrRer6Sor9Odh1/WicU/T4CpM1iuNcSsZH3E1akm0i64T3CHeGEPT5Bs/FxZ5+UFKD0mk7PNE2wHr+PAMwffgaX+/xWX0avouqY3CNkvWe5lnr5rMnT9VClxdyUV5U805FZIAzA/Dh/mBwdgwmYazE2PPJ55/SucCSE8O4wbmiKOKmInAshwSELRZU4hG6SEozNWX6OLDf7sg5MacFTJ0R/Ih3TnlBOTP0AJGH0wFX1IIFlWEcGbYbMNNbBdtMRLpEby1OKeFiIBdNDsEigYNzDJvt2dSzlhQ93N8zbrQJdFFyzOW4MA4947ghl8LNk2tubq5JKanb2nuGoefhdKBNmj66rmfoOn74/R9YzovC/TbjQGsDlz/am+t2Jna9gsMaNIPk+tDhOid+Z30CmqSRqdRzX6//6GEfhoE1PTWlxLIs7Pd75rRYkJ/ynnwXeVhb7kpRAUnTaaaUzDCOxC6wGbe4qHrTCpxOUpHFrqPm5ayXF4giw1CzyWGZJrA/K63gWrRRXJpyb1zMh9sPxD4yjhuiKdM2mw1LWqw+ceQ4HRk76dNfv33D3/z8b/jjL77g2XbDNE0W1Nhz7iFzympa5iwTYq/qW2diiyVUegIXuwsIkObFugq+IKXE6TBxc3OtA0IzWNMJipxSph8KblXX2Ofy9/ET3qkjfTpNuNbYbbZM85EBz2H6jp//6/+Uurxh14kTciZ7hIJvHk8je2Harq4+BL3jQFUsRWvWuwIyNpuSqWkDd61YQoImLW/qsVot/n0VX7AGWhYeAw1X9B9wj0SxeitWSYMW1doaXprYM+zakGx25XMa+qybyU1Lk9lPgase53pantkMI69ff6tp3zah5rVRVNaRhrPZT8Op04bJCixV+XT8Y55UddX+PpgIwe4dL3K5ukaogs0qQQdjv84JVuW6Lvyu0ao/b67rd0mRdgbn9P3OYMAof0hnOUYNxWuou5rzZqUOFV3B9Vo6F2yz1baU5kw/al10HlyVOs3ZulY8uNCR8x1leUVqkRgHXKdY8tev39ANG/VIRPVLUCplSeL1nJMVIYsPaVU9NH1QEsO4G4nPnzyVKc57SmhqObLdcfU1dLGna45mZo/B93inD32IvULqLIlCKpvCMi/Ms4WqOZEmIn8cvlS8Nbv50pht0RlK5mKzp1LZ73csOZ+dzEqWbBQL5QrOnzNYSsmEjcPRCULpO9swJAsMMTCnZG5E4eveJLXQ6LtBN0kp+vchmAwNanTEoePp9TXns02tpmkW9NK8dnZxo42SqlWadsTgoMnT0Pc9+508BktKdD6eb7beK1BsKYkh9oILTJ673ljembEHqZmcg84URjUE8GoE3HaDDGfTTO2UBtmqsm28GQZxTlJmq5/1wJQWttbNu8r0prTIlFcbrSjvyI8jrRRCHzXmYwuEc+QKKSdO84nY9wxxUBKmd9Qkp6z3nqEfTRGi+y3nwv3tPaANMefMdrtlWRIpa8o9Ho/MKXG5u6Ab1CFNU+KliFqT28Yo70CFVAqb/ZauH0X6BiNB7QQ3l8LpNHG5V5LtvCS6cWPKFP8HEdhadD2XF5cMseP+/gO//cWv6OKBX/7iP6dMrywpt4Ev4OSmp6xGtWqfq51+m+4hhcLpLpafzQhnh3B+I7qjCyQeuQa9nnxe2B5NaO7x9zhNsmfRbMMIWN3DrqmzYl0a3TodGKSEf/x9IlXtd9g9qo0NnaBbpaQKLVFrpDQpCl0pvPr6d+fv04ylhALXHi+8q5wVVs5pgtJkpINMsGiY9bNaPSfnO9NL8NGo5Ib1069O8wr0rFWjAW/rjSL3hbTp965LulRDouRrqzZNwDmevKFdZOVTgQ6FSvrQnRs9WzPYr0Fr4SwGWSfaXAqvXr/j5vopMSZcFwlOycner02SiPPNiWV5z+7yh6Sqz7w66GKHq4VlmgkXW6aHkzazqMSAaZlIcwKEHimBQfzkMAxM00RsYlrIAaaHA3f3dzx//oztsOWUrMLTFqt5XpjzwuV2J/y3qLgmmTmui5HovZTItbCLW2LsWJaZ06Kso1ZtM8FxbCI6CZ5nV1fqavWZzul0FlGLVjNs1QHd2Ouhcg6XC32IHBbFWbve0UpjsmkjxoAzEjOOHaf7B07HB+Yp0A8Dfd+x26nB6Xg6KlUTjfXR3rM2vEx1SljMFqeRShHU4jwuOkrR7661Ms2TEVEeX3rlyFgyZymCR7I5xpeiEX6InZrgTBJ6PB3pfafoC7sxSy1k26CaBX+dlUumnHDO0XcdtTVSUXSFy4pm8DHQDzKlFSpd18nxHDzd/opSCqd5pjXFLHscqwa4FCPsqjJgvIG4tWgTnpaZoRtErNdGjB3bYcD5jsPDHSHopru7v2O/2+P7QF4Wtdh1kZwXnj9/zqeffmJ9CBrrD6cj7VjouoHdZsNgUBrOMwxW/WgLxcO7t8xz4vvf+xzA2t5UEuRRkGW04L9mfMAYrRqTRptnam3sdzvp3f/AL30OgsX66Hnz5ju++ubnHD/8DOobxijHb42F1uTuF++o56RWO7nWZoevjxzVVtZTG5aPZKX2Z7I5nIGhxir0eDyxrhEdH+92K/JRDK4Rvr7+ZzkvhutJObh1inO4KgWjgfqsJs7zAmr/jtpIdbGfp8MaxRzcxdGHgTfvXnM8nnDBFtYqXtJlcR11HXDcOsd4k26velK9a0f8SM6r9xKcOBuap1UvgYBzOG9dYs5O5AT9fhf4iDbXb2gG89mmZCQl1ApB6qaP4d/z9T1fP2XYea9GxuCk9PJenMcjRKZIeI+6PVJVOGkXRn72V9/xwx90PH32Ix3e7ZDQqKiH/JH3SPlIN0ZOd5Pu51bZ7jbk2khzYlPU3hmc4+7unngZ8Gaq2262DH3PUjJxECpTa5EgYz5J2lmXbJk+HYHAssxoz8Y6hEUKX2129gBm7ZY25hOU95SaMoOqA4JTM9tXX7PMs/BurwjinBOjj+w3GzZ9x2meqHaKDz5SctGNXDTQHY5H5tOMRx6IvCScg3HcsO16dsOI956+6xhiR2pZTWEelirC+9PPPmW3v+DrV9/x/vVbLrYXeu+u0lmz2Ot3b3j77h3DqOpSbXCTIjH6qM5r53SiDjIerWPuUhcajWiO8y52CMYXftlwzMvCcZ4tF6spXbI2ka8fpWqO/UA/aKE+nI4mDe4YO3Eb8zyd4YRlXoy0D6SiUA3vHEPs6TolQAIc5yNLK5Yu6um8OIZSKjkv5FbIy8Iyzfzql7/m1es3bDdb1Ye2evZtdF0nG39T0m2z09OKfXd9pPOBh4cHPty9Vbx4lgLOhci8zLb5PJoBFQ2w+i6KyqMMgqOqVSuVzPZiZ6fVR1WQR8TlZtzimwp01HAXrB5W4I1zsBRVuk7TkePxHhxMpxPzNPPV11/y9u1rS8htf88UsZ7OITiR6j6IqPUcCHxJa98Re5NO2klVB2stsq5Gw5RFkHonOXctMzknal0sfwvBSusivEY52JcCGR8d0+vXuRfCreqgdn5fHy9t50WtfrxInoX9NJeNu1sjO+xjavl8ml9RAr0eSHnWfVPFk5Ui93AyL0oqhbevvqIy6cwfLVUYKGGdAtZX5k0UYZLWJvx/3bz68exlRk0XkdYCFmmN803wm4uE9njCd7Wd130oeBp4eX5q1XrmKZyTEkzU4E02v45SzaDk9bMomHLTYl6UFiv7m3OSNmsYUy3r6nw3PELoifPEfmC/f8IvfvkN96eFNE3oiVtJ+fVwoMnn/u6Omo5kZE/oUdwGpdrBqjGOWidTSjw8POCiY7/fkVvh4XRkOp5UmOS9uoScw885M+UFHwPX19c8ubnRKa8JPvFjT/Vwf3/Uybm3+j+vTKRWskx0VZ9JzgmCx/vAsihC/Prqipubm/N5x4G1wilTaTtu6HzUZtMaS8ksNfNwOvLm3TvmZebVm9f8za9/Qc4L/TAyjiMxCjsehoGu789NVCF4i7UWj+Ga47TM9JuR62dP+JMvfsyn3/uM0EcFytXCZhx1cjWCWouXegMudztNI86fVSUi4TXup6oioiH24BwBlaX7zogx9elRqExWptJo8m84b8UkUhEp0z4oqqQLBB/Yb3fnmHaCGrU6Ly5nnmZ8F8g5s2SL6bDo5BW+wGG9EerTFjIkY9S68HR9j/eOzW7L0A9cXOx5+uyJHhg0/YSgNrxGY1oWmR+dIpN184EzhcdSFgiO7bjhcrdnv98ToqfvI2/evKG1xnazFfmclnMJSkqZJS1421gvdluePLmRo/n+gYe7ey1c2YrqVyVLbVxc7Pnkk5eCEp0IzXEcmU8nKZvQiF1a5ac//Wv+9V/9a3JSP8bd7XvmNNOFyDQdmC1C+f/31yNmjpf3wnvPb3/75/yX//Q/5HD7d/RRURaCTwx+cYEmMTy1KdwuNzloS6kUE2Ws5Gxr1SSqGPxRz/j1WqjTVoOoroxh/M7MbdWWzsdF5fHLYJQVllr/un3ks8CZBPSR6F5/n2/ipM6LVpOcuZSFnBdKWjR5lsdColYV7V1S4u70YAS+Ve0ak7D+rmYLzfp++Ojkvl4DaGpT9GsFgK7tmkd1nrG0kp9fq65H4BFEaudyoHPSrHM0iyApduJfyfLzcGYQMx7qeWuzWKNV7mq8jp4Pqc/EPTR7nd72HBHGhSpZry988acvITTevTmSi57FVqRMOl8LsUCkhwdOx/fytK0HiYatszrYK1bFc3VzzThs6LwCRvOSGLqey8tLcRQOGQsBP44bcMKzu+1IN/R4HwhDh7PCGuccxVWmllnyTAuObtwQei2kQ9fhu8iyLLx585acdHOWkvAx8PTJE/rQSXHkLJTLCLr15sY1Yh+1AdVCnrVQbKya8urqiqdPnhuOrZ1brkjTK9cM3jBe1yTR7Qe22x37/Y6WK9M007nI1dUlu82WPC+M40AMyi5xVRCEM++E+IeG7yKhjyT7M+fl6o5dYNOPBCdCpnno+44P9/d8+buvzPAFm9ijwqfAfrcj2iQlM5W8AdFrEe6GnjUriSp1VAj6Pc3D/Ydb3rx9B8Fxsd3T9VIwhRBopeg5CqadMfWUVGnhnKobvIqVvDFk66EouEA0o97TZ0/pQtRp3klBczwcNT3kbPBcUT2inUyOk4qVikW2U/M5GrxWxSEHFxiGUe73Wjmdjtze33F8UI5MjEHTWvTUpFN0F9dKSvkNOhfpzJzomk0SXpp5HzvWqHPX3JmgXifSGEXAX15fEGJknhZCcMzTwnbcMu42KNTtDAr9vV/mjebVt7/gv/ov/kPu3v+M6LItdloCnC+6R1vRRtoy6zTinHKAcpqln19x7XWaqRlatW5nBy7gzssgH20Q6+xvjxTWr9zWDeCR+1ifG0li7b5uHy08DYNoxAdYLy2VcuYLJBN1tCK/TGmCJFNeyEmwZc3W7lYdLaGSpSIBitYwr8Mchic7j6+PUI0zaGuFxdx5ZlgnI5H7CqNcN0PT+rlCI4uIr9nu89XPpedcyq01Wt0+j4YZ3+w6ulUZpQ1Evoe6pnfo7+0ZkrdjVXPG9YUaz/soB3atPUpx7QNafSC+OSyglicvdzjv+Kt/9bcssyc4+U0kDiici7lapvmZ0+kt+MZ0mjgtOqj5rmPcqeJg7V0ZYmSz3VCd4PEQvB28A0PszbgpKDmWmoQ7+Xh23q7tb61Wpjmx3W55+uQpp+kkLsJKfDZBLHpuCsDqezHipSQCI32viHDamlvfCE2kaTWoggY1qcd1GBQ3Xqx3tjNFT+cDvTXegeCROc10LRLCcMYhIyIac63cvn7Nd7Xw5PopT26ecHl5ac7KxuHhwDSf2I1brsbRFFviQT57+ZJ5loww18rxeGCzGZVvU6G4ZmUtumlC8IRm8QG5Qic/xb3BUB6F3JUmh3P0jvfv3wrCM8KvoWBE54DsCPHRWeqcVC+hj9Q5M+X0WCcbPYol1sN0Ohy5Pzxwsdur6NzG6VIK+MZm2JxzgJRnv56csD+vZBrv37233uadZU/pIQ0WERC7jqvLS4JNbYfTrEXeO8a+N+NflOdlURTIZitieNMPfPrpJyxLUsXi0LPcT8TNhs1mY4cHqWOwE41rjS5EPnn+gmh67uBkiErTpOiQreK/p2li6EeGToVHKwQi+KYn50oIkR9/8QUpZYIXpBU7ncBbLgIMykJrHash6t/8elxsRRh889Vf8p/8x/873nz3U7ZRCT/NC4tvbvVEPG47azyIr0ZcZsFiOSdVfraie+px2RaR7KI5b53NB+vUYHr8x61Dz8V5E5Ge/zGyo5xhKIkn1nTRav+2anH/H0BY8Vz7+wj2Nds4Kjk70jTTWmBVT5Vqks8cqMWRl8I8N2KPIKTOUU3TL4VTUDGZmcbaR4qjcCakHxdvqbeqndBty/Zym9sCxDoFCGpLlCYPUKVQq/wZ3iI5is0CuRWCZZzpM2t8LAirzgnKsirjNe1Wk5bBg+eJCNbwveD4CM7ydsSohiXq0LE6uodYePnimp//9BXffXNgt3+G9w23TpoALli+1QzLA6OvuJsb0nKkpqxpYtufE33PPSLe8d03r6itsBm3bDZbutgxrwfYos6S2I8bvaAixhz0UKbTxCkt1FLoY6DreqIx/K0Wpmlic9GfR1Vqo+96Xrx8oRuabFknZjoxvX3Dkiydh1TxweH6Tqdg1+i84idKaaj0ZlCkRtXJWQfsyjhsFYZnRJvypAqhKaOk1sbthztFaN9c4Zrn7v0dwTtubm54dvPE4kJgzsK3u3Hgk08/ZVoW5uNRpP1mhNbEgxRBNsMwnBU4KvPQgpVrhuy4vLzkYn+BDzAtmoicQ9EVMTCOavDzzRP6jrIsTNOJ3Xan4K36GLveXCU3R52VhfX0+kabUxegyBWusMHEV999R1om9l/8mC4EDg8Hsoe8TAybkXG3V7FJQVJD4U4aRZtqQpdlYU4L3/vsM4t7F9yR54nOdxxOJ/b73fnE25zndDpxOJ34/NNPGYaBJQtm9DaCex+EqbtHiCR44boKb4xnNVWtleAEZZaqovdUBRUdjyfm08Tz589pQc7yN2/fcHVzTVd6nE00MnxVFUBFg8nGwXpCNEnM84LDMdWFy3FguxnxXcR1nuPdgUNc2G53/+bWwO9vGI1aFn73m/+O//Q//t9z+/av2Zm02pHVasa60D1GL5zff4WlLbRaZFzNggOLQTmP8A+sRjs9b4H60eJjAMlHi6da0j6eg1bfnHjXlc9YJwuT1rp25m4d4cxdNCBo7BcEzSPFqy/jLpojt0nx+M7MeFRaVRlpBZbcmLPMj330xBDxzX/0WhqrIaJRbJNffRr8HscCldh5PV+t0VqiuR6cElmrrQePEfOWoWYTXqWJG/tIo9DMhd0MalsPSDaUnwnuNVTTrZs3nhY+Zsn0Vn4v28pCE/Hho43AwhWL+TlaffQ91YqPnp/82R/x7atb/tWf/5ztLvDZ59c41wOZWmQIji5Q3ImH02ueuUbfefpup3UxZeOHKn0IMkCi5/+zl5/oIN53SmeuheU0qa8neOpS8M4Mca1WWl5DyBzTNDM4z/XVJV2nOOvgPF0UNt+ZGxrD8ADzJIAzk02z3QqLxShV4XaO1VshO3zNheM0aXddYYmgsvk17wTn6PoocsuHc0+t3er6QHLjOJ+E/S8TpRTGcWS33TNPJ24/vOfh7oFWqmKtYwfB0/tOvggjgPsYuX2443Q6cbm/YCkZgtJsnZ1Igkk3XZMDu9ZHt3FzqiWcpokh9udMIp0wTLKG53B8wNVC7AKX11f44FVIlDUm5pTJczY+wKvkqVYR2bOyVlKeeTgdoDVefvaSH37xBeN+qyiGWvGl0o8bQvBMy8ycFnLLZyPeOuXFpoTKkhI319cEa98rRfWauZUzx7AuTt55qIWL3Y6XL1/IpVkrVNhudwzjKBjq4V5R7I4z9ONRR0iaZ5Ux0YRd2+J3SjOpZpXj4Ohj5M27d9wdHwhRWfz3x4Ni1Uu1KJmse7OTBLobde3HqC6OVhtULZSKxFcHxXpW70Nknk6kmkwt9kgOf7SMnBffkmd+8fP/kv/4//y/5f7dLxmDI3hBOWtQoP+9f6EYGH8+3QoGqVmRIikrVsQJ4/s93H39EjTSzoRxOyfHfQTDWMpqNcL4fJI1qMHZ7z/vMeavaAU+8juf8XyPFk3F0Nd/wwFtT2BdzZMO5RF4WhN5q8geSFWHmeMs1VlnRsDqtAeu/iCRufr/vU3pAUmzeeSdtcytPMU6GTVJ1zEy+HwFV5PeqiYrBRkAvV1Xzv+55mH5j7gJTSxKCnD+o4Qrp1RVhU1isJH9bKpt6o2cV+OcNQca1K7cr0orhVIUzZ9TopRkOVmwvwz8O//O92n5yF/8xV+Zv8LRml3n6ikIOkvLgVIe7HCuBAG8J8+JUm36carGLVjKRZRJuTalaow7FbpFLx9SbD6wtKLcpJJsu7RvGEZiWNNX67mprrVmUkoVD1EbyzzT7XZSDwcPJeObJ3bKWsJ5si84JG1bjWql6SJthw00VKHpzfELZGuMS7WwGQZqeVygq6XRtlLlBXCNUD2bYcMPvv8DSs48efKU6OD64oJtL9nrOGyMaBOBFGLFV/j622949slLogtc7i544EG/I9nGNvRs9iJbT8us5EyDglz1dAb71KTFKjnUc+3UFZtzZk6TPmDT8Qsey3Su45iTFkXfUxb5PSpSYfgQzieVTdio7zrJme1qoVTHfrMjt6rd3yteuwG+l8M5Zy34D8cTtSpDaR2nmwdfKk+ePFMvdCm2OXZEHDvrkAhRZp9YNTbHEJmmBLYYOZzh/o5cK9thpPa9MqlalSACqMHOv2F9wBzZVX22zpHnhe0wKnsGR04LT2+uuXpyQxcjuWQ225Gr8Uqf/9oNWTOHwz2XV5dgZqVSEpGOLnhO00npuF1P6NRJ7Y3ET5YbRoWb645W7XlY4yKMvKwOSjrx13/x/+Sf/if/B1L6RkVOzRZxg2Jyy/hmTurmDIPPKnWymJmaqn6nxa6vjt11EdSp2J2lj5DthOrACb6gmnqLVTFjTXXWhWLCWYp7hEBqyx9xGuvmsM4Nes5FE6x8RLVF76OwP1fPLuja1o2i4lynU/06zTRPXZr1y2RKgaFbNxrLS2q2UZxhVv1hdQ3fdP3FT2CThqOtpkFfaeqhtB+ic7LMHOXMp6yek+YFlzUfV9yQM3qIFTvaGWr90mamZ9mvReLYZuYev/Ws/KIRcsEPAdWuVqje0psrBUcwxGDltNS+KGFDrVVV0QjSffb8gn/n3/sxQ4w4rzorWjMpr65aqR3NzeTlgcAVxXO+Z7NtACABTc2ZUrUhxnE8J9Y2KtEmHYfDuUKUY9Z2PadIiM4FQQpOhFRuld4udM4V3wUpm6icHh4oJWs00e1CaJ7D4chhXvjk+QsFv9EUh+ElZzzNE+OgF9cNgzJovIwvpqngcDzw6s1rttuRT569ZHWMOi/YK4RIXWZ1YHeR0BzbzUhrMo4NnchrT2McRi73O6lbXKMUyU5jCOQs/fDLT15qiomRmyfXZJsG1qiDdCpcbHeSq9p7SqVwOfTUjrM5L9hmQeqYjic24wAxsCwLx+OBy4sLigMfHMuy8OrNdzx99gLvRB611sgtCd93wUrtZecPQacZ6dALMXhK0WRwmheRT33k9HCgi4oh9sDhdGQ6Hnn54iUX2/15ynHeU6ynIm/0EOQs4cF+uxO5PE+qIQ1Rm17TQ5OrZXTVTDbb/5p2O8+LSkz6jrYkpfgGmebk7teEmKZZU5IZ4rCUzYvdXgt0qeQizmq/2xG8Y0mK8+iGXuqq6Ei1EXyTIXFRz7SzRbfWhlOwMfcfbmnAZ59+xrzM+JzAZIGv3r4npYUf/uiPqUVmphATjzIWLcRl+sCf/7f/V/4///n/iVbuFW3iJKhw1YLb3EoMr4uyAtQmOzmWknX4qMqmKm3ta14XrRUQUFZTKe0MXan6V3CVuIp1gXfmgHdnNVVrBcwUaMgNpWZzMK9sh5EX66aByFMAF8IjjGWr4NmlUD96vbWR2iyCuOmaNduBqk0tuWTmuUjE0Q96TQE5moPpfOo6qeiUvzpmqvER6ytphox1H3EmZ+hbn5J4kjNMZa9TGh1ya/S2+LtWoK2Njqa3MgpDU8X6QvPjLrAu0FU/U8iJ3nVr4kOKa7iczeynnxuczWgGNcnHoU2rlqrJxiYgyaUztTn6biRwVGICkZKT+BCbDhrgvVKXl/menO4h9gzDALURhg5K1ZrhPMV5SsvK1Jv1DK6Jkc7WsWzIR8SBd4EwRFMiNJNA6UZT1zLgHW9fv+Gr777m08+/z7Oba5GfwVNK4+nFFad5FtU2jDTv+earr+hiZLfZU2s+55LgnEht55hzpneOGjxp1ilpu9nhg2O3VUTu6ThBFOq6piiGEJjSTOwCfacLUbwcyMuycDqduLq8oNv01AzLcsIxMG5Gg3sqbHtahthF+i7CxZ5WGvcPDyxpUUdFSnz19df867/6S26ePuV/8U/+icxxQ6cCn1JIWTryPnhykWJhXiZqhePxQAiei2HDOKrrONcq6MhB10Wurq5kHvSBSqCUxDzNnO6PPH/5Eo/ju3ev+fD+A59/9imhC8qO6jrWyJNUCl3whOhxBtlV13BBm1OMge1eHgNJUXtKKUSnZrm7w526MILgv9Iax/lIWbKKdZogDm95TK3Kte6jPBPpcMQP7iOpZVNzHh1/+3e/IcbIH//oR3TO0fqOlFQolZdE6CLbQeUnc1voi8WPTJOguqE36EYenOYsirk0itemrGbEQhy02ZwOR8ZhOPMBLSd83wsCK1kQGzI3nSa5+A+HA1vLnjodH3ChEOIlMZhCqGXu777mX/yX/xF//a/+c7p2T4j1rCSzSgfI1jdwhnnWDctiwAuUXIUBZ2HFkUDxynHS87auFAje+Gj9c659xEF8BBvZAuTlvKOhlNKIQjQbdvJvH62C9u/OTXP2VTFj3roYtqbNBs1VxbT56z8pJNZa1daKTrpVJ+5SMrmiyuOkbp3gw1k9hVNR1eq1UO/H40YllGk1QD7KYD1A7AhufT165euFkifk98utVml1sI1DEmUbZez7vGu0tpYkqYmPlVdtDaotyrZ9aYIMnDOdnD9360jwYteyFmq1kNDzpqZ/JZS2KSUXTYZpKeL+sid2G969+8C33x24eXqBC47BeaXQ1ipe1Dl8SzwcPuDCDcOuIzjH0aTdrQGpkLuGM1VZcBb9D+q0cI7s5eMoKYteiEElF61BFzqduJpjmWe7YJ6+k1los98R3/UspxPu5vpMrI3DSMnCz7quo1qpzp/9g39INIVGprFMM66LGnFiZze4Ln4XO6iL6XMtqC46vve970kSuyRddCp5Sez3l3x4944Q4NnNE1xwdDGetb3b7ZbTcSalt6ztcZtxVPZQEZnchw6l3arzYYgDofOkeeLtm7f0m9E0xAu7zY6n109YlsRxOvH05oaK4jFSWnDA0neWFRTNiS5T4Zpw29HjcOSS+HB3z1e/+x0/+MEf8fLlC05LMmgCxm7Dh7fvuD8ceP7yBall0pLY7bbqetaRjtoUgR36nvv7Oy4vr/BVrXfv7+40Edgc/dlnL9ludmd9teIw3OP/mjprzfqhNi7GLX4fCDhSFqHlvbwGq0piLaEZdhuN9V4Q4TwvbLcb+lFqpu24oR9Hpmmm1cwyT4xdj+sHEWsx6zWVylyUy/TlN1/jveOLL76QcTFX5tOJcbTNzM6MKWVySnLZB09yjnmeyVWBj47KlGdiq4pG6fdSfAVHh1RmIQa+9/ln+BCYpiNdABi1OdRMLplXX/2Mf/af/R/57qt/yeiQvNitwoVqRGg6l0x9fAJ3DXIpzDXRUiKlSk2qlgSU6XQ2w3jWxNMmLAnv1Pq3/tCVL9FippoecV6Cl3TAdzi85UWte4L+zLYT22Z+PwZj3X70esQxNltI1yCPNSm3mehFZV5p3V5Q+quj1UBeJOhYFnETu602CEFIFgYprOnRUOoe01/9yqD5x/fgvXgp+Q00CYk7seW7NVroznutO28UgXMBWC1Kea2rF0N+r9LkR6m1EGMkhFX9Z8/N2TPyER6F+AmpDm0T81J4tbVQCmdlTjaEUY270J4dQ8+0TJqbmg5vrdq04xrPXz7h7j5yf39gdzFS3UAMjyS5J5LcTM0nrq83uF6qzD52dp85ZfL5Ad95XOspKQsCq4Ioa62QG9MyMU0TN0+fEEspUiPUBtFRlsS8KKp77AZccByPJz7c33N5fcFPfvxjwSFFXIEiDxwER/DS6+ecGIeBofcsJXO6P9KPPa0W0nRizoXL3Y7mHdOScW7Gh8i4MVK0idDzBIbYkZuXCzB6ysPM4XRiGEaeXd+Qa2aaF+H+Fx0djhqtgrNlbj88cLm/kE64i3K0Tgv0Pb4LpJoZxw3JiSwupZIruKjFkRj4ky9+zL/9D/8RtVbujgdKyXx4f8vlzRUepxBDIJ9mM8GtfgCdeD/cfeBit5dPwSla/f7+nl/+8teUUnny5AneTmwBx8PDA947Xj57Sq7qtf7s0xecZiWLtqITTEqFUHRKubm6ZrvZMKVEt92wr5mHhwM5ZTbjwGYYoRZOS2LoBlxzgsy8Z9Or+4DWxAEFJ0NOUtHNh/t7LnZ7HCK/N5tRXgJDkJuD+4cHhtgxxMDD4YEKlNxDgSfXN8btJPoYqNWBKbxSKaTpiN9spYaLeh2xC3zvs8/OCcRYLIi4BCmfasoUmxQ678klkZMeqnHc0nWKUzgeT1zur2hu5WYyuRb1a/eNzmsqrLGD0M6Bb9XgnWV6y89/+l/wX/2z/4jp7hvGwZ0POCuk0pypVRDvIMOc6f9Bv7dm8iJuqOUseNIjGaxdR00Pq5FL/72atHNlDkTur4SrDGRr46LHtPfVYi2cLUQGWapBpknibguz9qF6JjmFRdu/RfdsaFrIq3Tb5/WxUCktQWuU5Ggt6PltiuDIVc/TnBPT1IgRnYScelK8l1+n0SzuY0V01BXdzsY6SfI+HoC898Q145uKgi3QL3DFpr/HIEOnUcTI50eXdrMpeQ3Xo+kzWtKRcdjg3EYqp3UzCIrgWTkb52QqXIl1cUmPoYofcz/UTDbCfRUENIOoXJCoIWUdFGpdi5QKKU1cX++I0XP37lbRI5cBv6ZJOyEsvlVynoQkNEwhGGitKLbeq7itLIlpmnjz9i0vX7wg9lF1BU5KzY0VvrnWiK4qpG5pCZZJH4xDjSJOb8LFANGMSd7jalFb1UcXuo89yzwzpVnqkugpWSTI/vKCkhM1OKJzVLcQu47b+3upOwxjO+bEZjOCh+molNBV9+6DNUo1ja/H45HL6yvCIjI05yzoYmOkUq08uXpC/7yzMDz1T6t6FOWth0YsOom1AOTCsmT6PqoE3CSVhyKVRmrKMgk+8Pb2HdftWgm6ueg1dk4mLxesrU/kcVoWdUJ4z4cPH3DOcXF5wT/+n/xjLq8uqUVPRuc9x3niyy+/lKIsOOLxQHdxIax/Fi4dhoHjdCTGyHYz8O7DB6ITjBVjpI+R8clTrq9vyLkwDgNdCNze34tPALIZqHIpIt2jPCq3H24ppfDixUuVEJmzutrCsB23eBdwVMsW8nodwyioKUYuL68IIXA8HhVY2HVn6al3jnd3d4xdz7gZ2fiRZa1YLYUY1+YwXcdhGEjzQvKePka6XgqvMlsHMHKLl1rofK/Jykk51VpgHHs2mx0tOOb5xN39PS+ePsVVR8oL8+TZXu5sk4F2KqRxxp8GdpvEh+Ov+Vf//P/OT//q/4HnxGbTYai2JM+mUPH+3wzL0+dfmr6nlar2r1Qe2+WcN27AGYKv4DitleuSZFWdRkA6myDBup8fJUHasNH0qD9+hEbWqgqq1yLvNblIQt4Ehdg7+5i/cObxEF1S8ZYAfZbA1nz+7BQqGHBVRPdSM3V2tOqZF0gzbMZAINCtP9/UZnwMJdl7becNQ1HzKze0/pk8VOAMxrMBW74U14xDeAz/a8HgUJO9eudE9K8bolPrYyo6AMQwUDKUIBo3eOuyWCFBO9i1skJRj0geDYV72kEKo0cSjWi7XPXrXuvsk24MfWQp4viiEwGuCpBGC4F37++4P9zxxW7PcSn0faVYKGdxmYrndPog74v33N/dcXWxx8VAm5XoPZfMPM3s9nuWtPBX//qvuLl5xg9/8H2GfqC5dk6RTikTq6uELlKXia/fvOXp5TXvPrzn2dW1dmrvcV3Hy2cv5KwtmeI8RHSSK5mhH6CJX7h9955C5erqiutLKU8ELVXKvND3UbBPc2wHkcubcSTVTF4WHu4z42YgpcS4dge4RDksbHcbvbGSVKZDg+jpXORk46AKcjwPhyMXFxdsN6Me3aqu1xDlngahl7GXkqcHUmmE4NhsVI6e5pn3tx+00I6VEKPOK13g0xefsOqvt2Y4U3GQZ1lmciq0oeGLZ39xYc+xorJTrey6nqdPnihvCs/cEktJbIaBL/7kj4neM51OHJeJ97/7HTh4+fwFsZf7eNsp1iQlTRql5PNCLOerTspD3xO9Vw+GYb4hBnWPV5UV5dZIVVPD05sbLdbekQoMMTJeXnGaF2Vrdb1FfuiaOgeuiIz87u0bPnn2HGIkpcRsNa50krJ4L4JSS5mKnloUIbkuhIoXyYwmu42GmcZudU4r+qE1wTAxdnz9zTdMy8zTp8948+Y185Lou8g4jtzcXGuUz5mH48lCGVUtGUIkRihzZqmFrlS8C2z9nofpNe+/++/5+U//M969+RXb2IHvBS/Z3fOYlpypVRCJ/h/BGg3lFNVcxZ8kpRBQHxf+ihZr98ixnjcaj5M0uHEO4JNi57zu2eBROa/kWk7OcNO62dRV/+TWCA/D7Bv8m018HizNQH+u9TmwJh2vf36GvOBcJ6BfGwTRpUStHSl7jie9/th7OoM4m1NYjfedNhfjFVw1NdCZiFmJFE026xS3dlKv+L91ztGM4wDEkfAoa3UuEL2TFNmvZkGZ5yqauIIVEYFk+7UVfNGftTMf1IzTKLhVqGJXu9kmXlqw92lchqtyngddc2d/bxeTWhtdjIx9x7QUaoBQKrPda745psPCcoQPDye6YWDZZvnCnGAwaqCxiD904oBP80LLRcVhwVOWxLCV/+v68hJwXF9eakI3BRtZh/HD4UCsteEo6lgYR5ZlZn+x10Jq8bz3t/ekmrm6upK+2AsTXGVSPgTmnLjYXRC948PdHXd392z6kb7vNco1p1iOSWa05lZMXCNly3pAnf3sqxuRpf1mlAu2GX4ZPBe7K3Kt3N8fiFFGqaurK90ozhODU/phqSwps91s5U5u0KrC81YNd0pJBHLfkXIhuo5pWnAOPtzd8pvf/JbLqz3D8OLczZBzYU4ijlfCr5mt/nA6qS8irieLtcYQliXh/VqB6s5dEUvJ1JxxtsENXY8Lnutxw3A88PbNG7756hu248izZy84TifBC5ad5L0nuIHNMFhfR2FJC7f3d6R54WK7Y7fbE30hLct5DG45U6Pys07zzHbcEHl0cCvtMhC916bhYFomaJ1OwTwWsfigzZ8Q1NGA5+n1FalKtbGugK1ULveXwj69PztpfZPD3TV4uLvF39zQOW+hk5GUs/4zJRG9Q8/t+/fsLy/IWflLy+lEiJHvPX3OOA4KP1xmTvPEbrdnvxkVS9AETYDMm6lm3rx/z6bfcNEH3PE1X33zr3jz6hc03nIxaNHymJLETs/VFknJXiGXCuvJuwlqqTmz5AWSIxelzPJRBLdfDaofkcZr0U49cwa20LfHxNaPw/xWZdPHX+2j/7t+VVeVeKAfTvMf/301N4I7g//NFtW1n8GbePPjxbe1onKbFYIyx3wzbiu3xnTKLCfYbKAL4ELFR5sKTKno3DrHPG5YWkD9710bWH0L4m88SqmtGH/isWtaTaa6OuVk2KvrdbecNJyzJjp33oucdwSD2gKdNh4yj/0b9hM/bg9sBu+xmvYAb+vj2YuhEa2pQYrVD7KGu9IqqeqZHrqOaZlpsaNvj5/vzdOXfPf6VzzcLjx7vmZ8Ddq4DP6SfyyRyYzjCEBCSdQheG7rBHNmcY6LqyuuDQ7uwgoRFgiecdxonaJUCYNNax62I/v9Ba5W6qQuge1u5P50IufEh9s7LrY7thc7XGsMndrdnPVbb7c7QlCZjYsBbymnc1mAQrFav846s5ciBj/GQOyUtd6aHt7jdKJmuTcvLy+JIZJyYbvZsCwLxQcKmZIyh3rENdhtt3TjgK+eeZrYDaPKRmi46uh6i/0l4F2xm9+pPCl68tI43N0z7LbUXOg3A7vtXlHerbIsKvzp+55lWeQdMF1xa41vvvmO3W7L9eWV+hqcl8nMyFjn1MmwGQac85xOR07HI/srdSM753i4vwPnuLy+otuMvHj6nBfPXnB5fY0L9mAlmW5c1517IkDXcz6dmIquS6tVJ+9VEdQPdrpRl8KA5IDQCN7xcDhwOp24vrzWg+48uSlW/KzeCNLiNx9tdNck8OLZc1aDkO+k9MpLxo0KJFQFra5fCHoQ1q4QwbWSyV5cXdNyIQU5k+eUePXqFcPY8+z6KaEXrHScTvR9z/WTpzyLgRg7nojDVFgkTfdQLVqsDUdZSsK5qIehSiZ8c3ONL5l3b37J3Zu/5HD6DV0cGcKO4ArN6f5VzaYRrK0ZxAK0tZc9ID+Bgu1qzfjiOOZF6hE4R4U0I6u1BTWQeh7WvoPWzozrquZcw+LOtb6tnmGOx2W0nRelsxmsOZ2uDeapreKr12K/ThIecAHn6nlDEqmtja36urJQrJlKtWrCXDONSpZCUftlpCWYjhKddEM4K3uUuOrsNcpHtW5Bzek6rDHk5/eyEvqrOaMZtNQ8zjec61ldBx6oFl+zrrFSaT2qiUqrdL43L0YDU7utVbDydzVWVdfHIYGrXHldvlutEFYfioFMDdQTsfJIxlGdpxq9idUAX01NitOkpYQA8L2nNY9rnjnNfP3VOza7S7xT3a9b1WxBYS3USs4zqc68f/+BpzdP8DimNLOLOy42WwpN6iUfmJeFruv47rtXtNa4vrpmGDX9bzYb4qYfrZIz6EZyOk20Cr7r5IkII88GufDGrYxo02ni9ds3PL2+Zr+/pEZHzY1KttJ5mb0O80K/GTmdJjZDj4/SXS/LgvOevu9YzHMghl83zjwtuOYZR0laCZHUKl99/RXXN9c8vblhTonRWsTmaWYuCSbHdd8ZoaQ0VtBrI2Y8/TmHKeUkojl4SoPQO0qZ2O62+BjZP7nm86Y00WQPYwiezaiqymWRLyGVwvv372lNSaQAx+OR0AX64FnzYHzniCmYv6BRmjrAt9utTrQ58bAc+fXv/o7OR35o8tXffvUln3/+OdE53r//wNt376Q6INJK4TjNXOx3rD3S4ziy855lGAmxJ3hHqlKzdF2QFrsUhhBYSmHsBwKR02nidDqdYZRoBSSl6QDRuyjCDi89vBXa96FnhV1a5XwwyGRCHyTttc0qxgBE+RicFCApJY7HI4fTUVPk/lIn06JDRd8F/ujz70nS6zyuiuv67LPPaKUyTxO+deTamJokh11UvP1mM/JwOJBrZqSTiidX4gg1JVKZCL7h8sK7b3/B3e2v8PWOy3GjiG+STp8tyGRFMX17ZpVbevNs2fpLRX6iUhMlFUquOHv4tczaybKtMNBaLuXB23SCF7/A2kXtbfN5RGE+7nFYvxyaMhQSqJ3FE6jOnrF1oUJkOqzmOG8lV9VygNYsJ+z1riS9tVSuG0t10Kri1WvV55BFYqcF5mNjPjW6jVNIZZ2Vf+TDmbxdX/kKKa2Dw+MGocMVDZvK7bpHO3ysvd2Ns+LSh2DKIC9TJCJkS230vkGrRO9t8tMz0Vh322bXW5vf+nPld1kJ50cPjOeRGqr2mcT1Qyr1vMnoP0So6/PVxnH+DPQX0KTcWkM5BQtDbZmLvdbkr373nh/80TO6fsR3ntD1dE1k91IXHg4H9tfPef7kKSlnfvbzn7PdDPzoiy/oh47OOQ5FcTDjoOij43Ti7vaOZy+eUam8efWKi4sLYquVLj725OZl4cPdLZvthiEKNglendGrXiN6z9SaFCT9+FE6KszTQm9dCl99+w3TNPHFD3/E1cWFKRMq0zTx/vaOzWbDTbhiiIFSVR4O4DvPaTqo48EFutgRnefu7pbD/T3guLm4onPC9ErK8ksMvbmxVaRjeX5454idp5bCnBL9MMj16rx4BgehVhJQFym2Dqcj0+HEs+dPSSlzOGlSiTHSDQMPt3fc3d/x7NkL6ehz4TjP7Lcbdrs9y7LQ+UDXB0qxG7I2WvR05xuxMfQDMTideEshOMf15RVd1ymqpDV+/KMv2Ox3LCaX3G5HNsNAHzpyTvhO1w8nn0vsO4vyFpxTajPvg2SXu3Hk/uEeHyMXu50y/nOh73uiu3w0EIGgxax7I7pIagkCBCJLW+icV39IySrq8ZG+QUoLx6NSY7fbHeMwkpeJt/dvBTECF1WS1FUddn1xRXXqBXa1Kv6dImG9V35VCJEW5G6/v7vn9bu3PBwOPL254fmTJ1oYgk0/DrZjR0ueutyxzB+oNXNCJ/plmQgxUOotKX1HOr1l8MaduIhzTWfL30tHrUTnyA5aW53L/iN+QMq/XBeWeaHmel5QzapkC9+6/Bqc4s5bx3nh11ptKqoiLbymlxVMstPqeQIRpr5+eqv6DKcp2jnrZ7YXe053WtU/6+9eV2kHrikOP7iOVvPvbVC5QSuzvrVWWlWUSUXx8SkVDkfVYm63vcxfUfDl4+bwuIiu3Hp0TjETPH6dX9IZVZPQw/sIZMB/tEFYzSpa3H0QJyoPRiaFIPexd49eFtsg3EcbjcNbwdc6FWja1qLuWROz6jmzCdYEZ4eHZgjFSsSbms2tv2jdjJoZ7Jx501yleS/+0YEvDZoOZpvdwGefv+TXf/0tX/72G548+yOGUFi3Tuc9rnj6TtC8c57f/PpXvHvzhosvfqApzvwe6ppvuKhp4vriihcvntOFjlQSh2kipYXoTLZXUBCbozJNJ7WvpZmBwn6rYDiPp/eRUrT7aBHXAjKlif0w2klR1vebqyumzQ7n4HQ8MOeF68sr+r7n+dMndJ11054DAO2kUB37/YUgK2SDTynxcDxydXPDzfU1Pj4Gwq0uRfCE2HN3e08umefPnj7igA7VbYaAqxoY11O1b4VTUigdHvpxoLTCvHTnrJNtP5DmRHGwTIkYOrb7C1JKlOy4uLzkyuJCWq1K1o2eZUnE2FTFGqNurqabcogdqRYe7o4MfU/oBeN8/7PvUaicDke6YWA7bpimEw64vrzk5uoKUPNajJHYdKDztqC8+/CBZZnpQieXsqW3huDpY2CaZR7bDMNHp0rFrrg+CiC1WsXYoAVn4WSN0Pd0IZBrYr/fMc8L1DVCRGa3uemB887he23cqlxUCUrK6mr46puvcc7xky/+hDlYm58PLGWRy3jJhBB1GveCB0IAykJugW++/YbTMrEZBq6uLvBdoCXBnr7O3N2/ZXp4y+n4nrI8ME8PTKeTOiTKzMXFyNMXN0SfwGX6aFCesxOt5W0p7Keas9UmpnWxtngM7yTLzUtiyUfSnMllgdWU5aCaKuzjFc+Lfz9DRFqhsp3mHw+Xq7SysZLPTuMLa2R4MRmuAxdtET6LPM9uYjCdvoE3lYInqOKzVSOT19gHg8TcI5ovHD0L3sgLqUjuCupPLk2Cp5IDpylxmKEfYOiCuAiB9bZISt5abcFe845YobXWOMd3u2D3arVpq5hL2HGOqPAKgnTF+AyPamzPRjhB652PhKADgHNeB0a7TtVSZF2112ib6xoJIsJa6b1uVSdVR4sKRPRNzyF+nfoMKrKd21uwZa1FELHD7rcVXuy0gFticCkN74JqV2NP31c2Q89ksT6+OVLLSsuyylpv6rshDBzLxA//6I/48Y+/YDtuhPI0WMw4B035ad3A4hWXDzpUvnz2gn4ciGsy5brLxS7y/MULqYesR1XNZZXemthOp6MW2dCR60IcekpamHF0w8D9wz3D3LPb7wlh5nQ8SRobohaBrIKeGIJFJmOR2RDiCpoK81XGU6RVlQttt1tijIr16HphnV1HWhI0ePPmNcuS2I6jiJouWmiaFC3ROfABbzwIqHrUFT2t2+2eeZmY54XNuCXVQimZmhJ3D3fEvlM0RucZ6KitMueFQGAYlQB6mmeGfmBZZn79m98QfGDcDHzy4iXj2JFq43gUtLIbN3pALKKjD5HkMsEFmRRL1Um7YdEVGCGrbu/tZmv9HJUPD/eqI3We3WZLsJ6PmgUrKMxO2Oxm3FCd1BvROULfc5pO9F2nk3vWA5z9CgtpgfAOckq8/fCBnBMvnz0nlQy+w/vIskzkKbPb79iGvYIYaczzTAyBi91eMS443rx7y/t37/nTP/1TXJaEtzmoJlSIQRNuMTmnD3pvPsrAGDvH1nc8e/KEYRxxtZDqxPH9G27ffMvp+IFyvGOZPuBqZj594DidCF3j5tk1z57s6eNJ5zmvxVcJnxYAaJNtrWqO08UrlJpskW/6NzmRqSxTYskLpRSWMoscdmsGDo8TicEUsJ7m3Rn/12KxrlnrKVc8x6M01FnK6wrT2nSyZgOtmLjBGK1hDoJ6nngefzesU+3acLZ+tbNuNpxPqqubugKlqlLXGAb9eYWUHXOqPBz08ze7gRaUy+VaxbtVpbR2Vj9uhuqQKP9fsv7sx5YsS/PDfmvvbWZn8PEOcWPKGHKsLKq70Wo1oRZfBAh6k/TfSfoTBAigIFEPpACKhChoeBHZBAtks6u6snLOjIyIO7j7OWa2h6WHtbYdj6J3Z+XNe939nGO2bQ3f+r5vse2TwJKwBEWfjU5aaxAHQjDDSBUImszWJ7RLVwEXRXXv4oAYJ7QaEdyuq3c0rVNsm2NIuhVSmy6IsP2v7r2EXFKy3SpzozAtmUHOtubWZi2hZ36HBduFo3zpUsT4Wt04tbj0YL+z9QhrtlWyXUdiI5RIwzr5XFbefvcWgPuXd7YMrgXWkinLgiTbE38+zexeH7i/vbPP447e4zDZcwg2dA7RXmjJlZDMYGpZVqZpQFVIAaPF1Wr21MEq9DTuKGqrNWOM1DXz4cMD4zjw29//ge/fvuXLL7/ko1cvELHhyXw6E66MilhcAVxrYxp3foGUc6189/1faEX55OOPScPA9fXVtjO7NWWujd1uzzLPPD4+cnV9zbpkijZu7+4YhoH1PBOPB8Jk/PrcGpPZvzvTxB6y3W6HVrNpjkF4enhAh8iyLAzRbIc/vHvP9f0N0+hU19QgK1SlBfM6iRXyvG6zi/sXd8zzwou7O46HAxoCQ7FMPU4DaRgYFpstDCGQW2F5OnM8XiExMjieHwb35i+Zp/lsPkkxMU12qIydYPDgYb+3davB/JHmcqavSCwl+6C+oaUP1IwFNK8r427PmCJZHYuvRkWswYzYQgg8PJ34r//1v+Y8n/kP/tW/4u7uzjtRowEPw0j1Z7LUSq2++jRGWrbuQFX56MUrDvsDZc2U1nj68IHj1ZWraQsFJQUYUyJXw7xzNgdRQuDl3QtqVFIcWB4/8PT2z3z79jfUhyfWxwfK+khbzyzzA7SVU/6eF28+5uuvv2B3HGghI1JMQCT2lKu72ErH+9XsyiVE3/lQTKUr5l1FyaxNaWVlWVZytSQhagFCPJGYSZ85iHb6pDGILGBaoFQ3ffSLpzwbINvfC1YQ9O5ZPHFB9C5AvHK9YPcAEtQZNJ6ONtprNeNIT2Di32LzkM44Kp4g2nZN2vb8+J7xqhSEmgOlwMPjSllgtw/shkQKikh1XzN//jy5BQl9MSodhJHtw/mMojXEZ109tUVxzYdVTm6AaYXfNtuWvg7UflcUg6OsMGjU5pRZog25xWdGzuz07GDdjYg32eL7azwDi7te92Qi0afXBZGE7biwFLhRd9Wup9ix2L4sWermqtAV2nad7Jxe315zfZUISX0hGHQFvY/lUVUeHh4pzbz5ogTKvPA0z8ynk8WPEHn58jW7w8G2Yvo9qK1QxT+4QCq+vnS/M+/8mOvWLaxr5vD6NRXl4emBw+GK68OBZZlppRG9vsq5kILtnGgoH7/+iH/3D3/Hd99+4OOPPuLGubhVbXVh9xwqa6a1xjfffktei1WE08T+cGBKiVoaMZllR87ZrBha5f3DB5bFXEF38QAtcnNzwzhNfP755wzjYGso14XbqyNdTp82JTae+QObykgCBau4Y0zc3d3ycD7z9PDI4bAn15XPPv0YSYNVi6jNGWIgx8A0jgwxseSVNCT3qIL7mzvkBvbXV/a6tTHnlSBm0SsixDEyxEhI0f2zLGBQG1mtio9etUgIJAnEw5H9ONr59SfbLEKyDyyFNa8+kygMXn0HsWpTqlXqTRtzziDC9fHImAya63zw7h4ZNGwsjRiFn//VLzg9PTLudrbCsthgNueFwcV75/PZBH4hAAlqNWFma5xPJ47HIzc3N2Z18vTIU145+gd6PC88Przj5uaWu7tbQDnnjLTGOCRCUPZXO87rifnxW9796Te8/+a3MD9QS6XmCmSeHr8nrx84XF3x4x//kjefvSENStXFnzyH6LWawZE4pKE9wFZHRmxfiQUQSyaiSm6NvK7kbIujWu3D6D5kxSiPHevekoO9zmaNoaAantW7/cFXf3TDZVAtgGaeL0Kw39gwBzt9FuCf4fgunLMEUB2osnW3W9cgEROqmX+R/Ybi8BYGUClosWG1BXDbnqE5Uorw9JQ5nxoS4PqwIw1ClGJupVIRGbz38IG8P4Z2602YSAs+tvdNlP5nu2o255DYldJmNYIz/FSscNOmxL6K0xOEOcYq2goaIuKvgkNqkT4fsQSuChrMKVi2QO1IlCeoJu5YgT+sdGAwEMTZfoAEq07Vk3UnDOAQ2/PVwtCH4G7WiBESQoL7+z2ffn5v++iXlfGw8ybTeq+iQisrSOb2+obdYceQIqXYDnuJiZAMqQlRKDlDU4Zp9FlTcz2RNwK/+vWv+fLLLxAJ5GWxQCqBOo5cH67ozpz7aU/OC2UZWD24H3YJWiUvM3G3N6xOAsNxz6effs4Xn0WmwxFaQdXocXFIDNNokIKbxF0dDoSbxDgYLVQ9GO73O/KaeXh8IMaIJNvDPA0D+3Fid9ijpTL6/mUwymSnYQ6DJa7etIeQPAb0R9GfHgGRSBKlRpC1ME57xla5f/UCLRUpzgOPgcBgw7WmpDRyjIP/Ntuit7ueaLnyxz//Ga2FF68+Qkv1G+1GilHoezaGaIkniVHO4mAeUMs88937t5R15eM3H3M4mJXFuD9wOOygKadl2eYtWowdXWsXeglJAkzGlQ4VXwtaCJ3SqsLV8WgNtPvVSGcRqRIjSBMf5p8ZgjEpfvTp5zw8PbqhorFLJBcez2fkfOL6+oaUIqKBvNhhJiUO04SGQBjGrYZW10wcdpPNLjQwxsh5WXn6w++ZBrMI0JKNJFFWxt1AXh54+svvePvd7zh//x2sK+W8Ms8nCySc2R3hq5/9nFcf3cEYED0DwQLGNuy8QAq4C+m2T6Ea0KzN3I0bxWzNqzkm53UhrwulNFQH+kY5CzO2oay5pUT0eVWvOC20X/5kEIsPoJ9BIGaa5wZ4W6kd2fAXP3u21tR/LHZ4yxwFbObgr9h8t3JoCMk/vTp8ZjsILOboD3fyCGgVaBhbsInn0wBro7bAkitPs80mbo6BNDVCcBMXh1mEjMiAOPEEhdIfxT738QQqPeh6p6DRWU6tMQymq+ioBgTv1ALEhtZCIF2gG7dqrTV7oRocR7UEGQkXerInn36rOrmgu+r2NRb93wxWSiBGaG5EUooXiMrps922pO/iNmJNeHYSnpMHLiCk/VMgJeXqOvHFjz/i13/3Kx6eTty8vKJvw7MGVKkKb958zOl0pubC6q9wOBwIKRLjHcX3+KzzzM3NDafHJ3I1cS5DMGX2eSYhwjCMNsQy4BlE2Y87Ss62O1mUthYeHh4Y7xPJ6Z/VK8v9/kgcjA+cxDjqt9c3hBBdS2DeIYIwzzOlVm6vrreW6u7FPVTlPC/GCa72uq/uX/DN99+aAvzVS1P8TonD1dGdHDue6SxzhVoqU9xZXdUCuaykmHh6OCER7raht98ANdV0wLjspRQ3Gyz0hSJVK7urAyUXTucnvv/+LTf3d7YD2o6/DcWxqqPlwlorjw8P7I8H23WAHQYJYm6wC7ab2g/i6fGRh8cHPv/0M0YnAJRx4P76htm3rdkAVIyNpgYZjcNggEAutAGGOmyHV0RdhOb1YJDLWsY4QbOBMLXweDpBDAzj6PO+wmmeOe5ciIgyxsBSClMQZp+JkBLT4CwJEdb5TC5Gqx0OBzMYG0dKa9ZZxOhKfnv6Ht6/Z9rvuNrvqbWQmwsnY+SzN584j185n94xpAnNmSXPPD4+8Pjhjzz9+bc8ff+O9XymnGcLiqExXUVefvyG169vGA/JB6ArcRBi7PRFd+70oNhdP2nVsXGMhw+gtiO5VaGU7CaGM3mtbiUfaG31CtGqcEEvMIT9smcJwjqHDv10Rsym5O1xwoN/r20vgrPID1eqWReh7hjbfP+EOJDTSqGpaRhMTW0UUWLxX3BhGIlrJUCJNJoYQCkN59dbsyK+YCc3pbaBvAqncyYvyjDA/mrHMChCQaRtUBAkmiuuG4rtorYOMzjsF1ov7fzjhQ6fOccyFFfO969IuEhMNpGmhWu7lr7sgMaIqtGCTafiw2TYPNSCGFMr8D/86jCUQfQ+cwreE1rrgUS8Q4MQ+gIrh8I89tj99RZKt3y0zYpauySKS0JqSAp8/PKaXfqaq6ujzVufJzM1gaGIINHWwi7LYq7VITLqYHC0mKnn9+/e8/7hgfvbO8ZhMDIQBlMFEdLXX3zhLYhtT7OdrzA4yyfnzLfffsc33/7ZlsDc3zMMibfv3vH4cOLTj99sSr0+QCq5gMA4RJYlsy4zwzggEfJiBn3HaW9/h2xLfUS6IEm3taD3N3e0G1PkdmFQzRViI8fGbtwhwGmeGaMJ+yqrZfTdSK0TuzFxOj3x+PTIMIxcXV3Zg6S+HxcbyJeymoTezbxKK2gpLGVl1IRE8zi6vb3ZNs01FVu/KcLpfKaUwvH6mn2LfPHll4w+HAohoCGQ15mUbDE5tbEsmTgM7Hbm2FhUqcvZJPohMt3dcaPK+4cHBg+4Wqqxr5YVWiONA03cb380umgQIau5OY4tcTgcKPNMTImb6xueTo8svn9i1cZ5Xbi+vqLRyIt54O/3E1NKmwNmqcV2MHuA3497m5UI0AolF+7uX7DmlWmcHMe2c56CcHN1ZfW1499vP7zj7//27/j8ix/x2SefGfOs6wmKucW2ekZz5f27P1PKSssrpcwspwee3n3L04d36Hlhnh8p+cTxas8nP/qcjz95zf4q0iQTBhvCJzFx5wUewNW2ug2VK926sF7mBm6e121B5vNMa6ZeL81ghdaw8bL65gPFgrlEEr45zIeSfQgtcgkUqoEgzZOLoKwWOLVjzfZlJ7N3EdETT7PPhLu5diUwSt/FrlQnEHmwEnznOxv2j7odu7ZNjdwkbL5IZiqZoYmJYqsaM6gk1po4PVXyIsRROR4SuyEgwYfxEp09iXfuwbH6/rrYkpxm57iksC3lajS3DdceRSEFugJaCTasF4ih+mA/IHEiOTRocyzzm0tBqD5T6DBb14JYQHeHYQku7JNL8qEn825vgqfuy0KnzjrbpIcVCPYeu6Or/aSbCqp/k3c7Nlfy2UcTaJVIomCD7ygwHSP3suN4vQeH70Sqa2qEJRfmZaa0xjgmphQpsrLkwocPH7i7vTV/tMF2Bz09GV09DCYfKKqkGNgddyS3NLdqKiVatgMRp8mtHWz48frFa17ev4Ag5HnleH1FW0yMYbbNxURUYO6qy8qiC6fzieU8c7+/47g7MA0TN9e2qhP1QKzG395Nxr5ptREwXUNKiWEY+Mt33/L0dOKrL74wtgBCK42Zhf00kUKgqA3VT/OJ47QjpZF1zVwd4HhjTrDLeeF0euLu7o7dbk9tkNeZeV4ZBluXavbstm8AEfMRKtmgEW1MoyXPECODD7EfTo+cn54IKXHEdhwk78w6xtgH7q1kW5aDDZTEh3KvXr0iRTMHrNpY54VdjLQY2E+TezI1JAaePjyx209E3/yniCmnVRmDrWUdNHKulXenJ6veo89QWiXFgb88fc8f//xHPv74U+5ubrYg2FrlkAznXKtvQiuVYbejewhINIX8PJ8dAhxZfS5xe2P2LX3bVZeUWtXl0E4KhBSIk+3W7RWZKeNnnh6+pS4n5uUd6Mp6ntG6ksuZOi98982fyOeCaOF0+iPHfeDLn/2ITz77hGE3kEKDYF5cwY0lJUQb2HvnELCg1AOw2ceY3Ulr2dAXOpzQyOvCmhutrazuw9S675IHfwsmzR/+tAUg8QRwqQgtKDUXW1nC6DXmJSFsugo1nyD1AI8noWqFN7EPv6Vj++pzBJuv9MmGwTX2fDnaaAGyl7FO9ezRuA9SmwitVbRmWrU9yU0rtQbWGljmxjJn0rAj6cLNYY9EH6L2qrYXxb2r0MvuBav0TbRq+akP+H0OtF0Te3tRhBQTEuqW6XoQVnUG0ejwXMQU2ZgFh7ovlXpXpQ7TiQbni3XFt3WBF5HdRhh/VvXbawZ/c+bnlnx+ZG4RDfNpatp1Gb2T0Ge/BZulbE3DxerHdvtgq56bcavGYWCVBRBDI5oldVGcYm4kiHVeiG7YV1B248h+TGgIlGWhrIW7mxuO+wMPDw+c5hOvX77i7MLW69tbEtVWkTYErStzXs0UrzVSjNzd3fHRq9dEx9BPjyfenh4Yx4nj9ZFSqu1OwAYsXWz3sC6UcuLF/R3hpWXm3FxyHsUpr6bNKCWT19U8lZpXms1siDW7gC1Gbq6utoMiQYgaWc6L8e9jsgWGrZFiIo07FGyukTNRbH3lH/70R/7h17/mF7/4BT/9+S8AZS6Ntham45UzNcwtMShMcWDOZztctbAs5mo6xGADoOArWHNmN+2Q0LURyRblxADeSZQ1k9JAnldqjEwu/ltKQVR59+EDL5yVxbry9sMHXtwHhpb4/t1bcsmMw8TVfs+4n7agYDYZgDbO64rEQFTDvw+HA4+PD/z2d7/jzUev0dlENNP+wJ//9Ge++ebPxnDY7RCFwzRSovk5hRBYztmgPd95/fD4gd204zAMtFw5z2emGBnG0WYQYuJGW/conJ5sp/i42xk8J5b8Y4gMYeTNR28YB+Xp4RtSEk6Pb3l6ek9bZwuC60rNK09Pj6xrRtfK6fED3377Zw77yO3twJc//ZrXb+62HeTSg7Wz2CLRr5GJ9PrD3AfBl10D9nCKVoKqM8CaUVrXE+taaJIsqdVsnlXYAqbovHi1YQCthwKxdxTU/IEsGXYKpT34NiYTh6rE5gI2md38vyyIDDxf9AM/AJyM8dRprGoqiOYdgTisYbi7O8Vug/PLkNtimDGaLDk55VbsOhgZUZDme5JzYJ0Dp4eFNB4IQRnTzjeGLvbZDd2yzyEOE/ey3CK1XTvas3tgMKxoePb58C6rMoTBB+EuBAweJ591aOLJMW7rSj3Qu++TbmG/twgds6n0vRR9XvFc3wQdjrPXi6Ezo/zH6R1FnzMNFx1Y8P+j5lRrL2lkAdR811Bx0Z79HlvyZOmq+wHGEJmcCmu2JvZvVS703zQE4mLMx3meSdFIOxITWoyRmirktTCMievbG8qaWZaV79695Y+//wP//r/8l6SizVYG1sKyLIxjYhwn3wAWCME4t60lW0c5JHZpZBgnc1zdmfmalgohkO88HY4AAQAASURBVLUSBXbDxKnN3lYGNK82qB7s4HfxXjcC27mT6trsANviG2uPW2scr68NimrN4A7HAMf9ZPueWyXXamZ0MRnm7fBXzoWnpzPDlPjLt99yfX3jQjjD1Xf7PeO0I4RIyys1L8yPJ07LmWncMdBY5rOtjYzGJLc5DpS1UFvju/fvub26MtHcmri5umIcBx5PZ3LO3N3csD8eaaWwqG50Uo2BXRg3SKeBG2wZdyzngoymoQi+qnStlevdnu++/x5K5e7lC9u1sWZ2u2nDFdeijMPAmzcf8+e//IVlWZnzzG6YeDrPfPrpJ/x7v/wl0zj6PutAbZWcM8Nodu/TNFGrhbxWMof9gRTcaE+Em6Ntyaq1oJjtvL3Hwnk++c+qQXVDZF0Wx80D6/yBpGcmGXh6/wdaOVHagtbMvMzk00wpwrKunPIT5/fvmdeVQ6z85Gef8OqjK+5fjOzGZP5MtUG0+5JScAzcAvHmjhsgaLXK2Isa66ar213r9p/SDF4ri+0XyFWROm8dlymurSptzfZPmGLW4m4TvHLUC93TA7hiYigJ3ZW/r7Xs6cVgoUqHQwBM0Cfa5wgXLBqtHnjiNsA06lZnu/XAFz3QWmBhC1LPBqz9Bb06hcZaM7Vk+91NqBpY5sK8KPPZ5o43V3e8P/2J6XggsNCt4XGRbg/Q0rtRf4+C00B754YtY5LoC4X8PXXZoISApBFiNP1EU0+2fdbUnGQUCG5EKMEMNWMI5sbg9FnVZ5LDhk+FXETpicMugXefktwWvidVXwwUrE8RHOoKHUqUS+ILl2VS4veviSDNvreJxYVetfTZknqiKH4mjJNi4ljzYvTeQ3FbJbvGMQ2MByWIUnNmKYVWGvv9jqVW2mKeTZJsl4Rtizzz8u4Fn3/8Gfd3L1jmmbSfps3LaBztgSsuvkpiC0f++M03XB0PfPz6DZqVq+OVZ39/UJpRG0spJlIbJhjhJiWzRtbBlu0MNrQ8n2dEII7W3iWHqUpuBr+MgVZMtfxweuDh+0de3r2wZRnYwR3EfjYilFoZUjLLXNc+qEAcBupqe6zvXt5S18LPf/YL7l7espv21FYJwQbOrRTClNCYyK70HSRCrUQ3R1vWmdPjEy/uXjDEyJINKnv/8I7f/vo3yJdf8OL2zlekBnJtfPjwjt/99ne8ev2aVy8MThp3EzvfcdG0H4hASgNlXdlNk8N4pp/YT9fmcTUvPii2VZ/7cUL21m6uuZKXlaurIxoseYVge4UbcHt3y24aOeQ97z98IGng5cuXHHdmXY7brlQ1QaXzFwAxlkZTpv3BLFnWlTyvTNPOHummrncxd8+CVZ1DmkixOm02uIiuUOZCPj8S5USuD8yPGW0ry/pEXs0O5f37tzw+PSIhkeKIxJXjceGrH13x5vUt08759o7nJ995EiRuPjqXGtQ6G1sj2jwguQmcWJBptbp2wP5ctNJyIS/ZOhhVtFVKLcbVl7gFFHtAO24/PJc5bHDEha3i+0s8EPec0PlVW3XrAarbRnQmk6hX9+6e2v/ueU/RyIj2RTsJlWfWIeoWHpbSnr1Xh1XEo2XXI2ijNqUumVyMZaXAuhTKCuvcOM/Kx28+53R+x+3VnuCDag3NNRBtA2r6PKtvkNyCs5qmWzFr7S3P+eeXcLmG2hrTOGxFo6qaHbtfgih495MhBqvYfWgcggX5TlTtX+r0/OcCCyMeqNnbBzNJBGibPbj6bhW5PC8BarAlWPQuzt9Pc8PAoPZebPZjHlHVoBMbmgfrrGrtZoQNEbMWseG8JZk0jP5WTefR5ySCsRFDs1lubZVxHAg6MD+eUYX9NFKTGrTvsXy/m6yDEXvf97d3lFZIxXOiamNdVj48vOP66oZpZ9BPCpG7u1uG4F7jzVaLRh96NSDGxLosvP/wQBoCU0oXVkeprPVsgrvDARFhmAa02qKgIIE4mB3F6gN0nVc+PD7y5vVrnt4/sRtHG7plGyIZn9c8dHKzbkGC2F5jX2pvF1ZRCrVVUrpmN06M086qxFK83ROKCYA36mgcI6mZClcCnOcT33//PcO049adWHMt0CppTLy4u+Nf/ot/YTi7swvWXHh8eGCeFw6HI6enJ/affUoxUxu0BuJux/npxIcPD7x4+YJaC2/fv+PV/Qt+//vf8c2fv+FHP/6a128+4ni8ZllWrq6uSXHg/HhiHEYU2yZ3Op1I48i8mv1FLZVhSL5PW9iNZlqYc0YExt2Om9tb5vNsNttrtt0hADFS15WQgjvGKrkYNfLhPDOlya5zisZqakKKkRArmgtJlWm/ByLffvcXjseJMRUoJ85P73h6ek+thVqeKHlBGzy9f+Dh6R3z+cz59MESIjMv7m749PMrXtxeM4zGtxcqra0MrgoldN9+w6qrV4iIeWFZNRacFOFOmTTz9S/Z+Oihodlc+lortFyZzzNrydSaDd+n49YWwOoGhQxARjQ9q8ZlgwaaFro6t/YZgci2wzk0oZI9kHtVL6OrcG2wnnTY5h1NbJGROGzWgpnQOd7lyWu1hCSXYGwfwOCa/ufmdtv04SmeNNQMOwLQajaIjUBrgSUreV1Zy8DpVLm7/5QQKiIzw5hsS5t0x1bTHjSvpO0Cq8MuXk8HZW3VFNP09aBOgxXBZSYE31cdgWgyYLfGCM44EhfrBbe96B/H9ObJM5I0Ou7T88E2h+msNKRDXoa0xDhYEg2WCUS6geh28TA2VSBKMnZt6w63yew6sI7biW5my6GerCX6rhJxZlq/v/Z5pJmw1PaPWIK7DNItUaAJZaFpJYWRlAIjibdv31OGRApmrKmtoTESB4u9rVQjB4TAsDNBM61SSrG5sM0EjA0UMPXqEE0rYWq9xtX+QMNsn4varCKoUKibfT4KV4c9N8cjj+dHIoFht7eZxTRCsQckNqMFllKtBRxcsdkqa8lILRymPeM4cppnPv74I6IY7p+rtdmjDra+sjVTeodIq9YV2Ga2wYdOSoo7dtNIKZUwDb6y04JazbaTV6IJcTrem1KgRjt4TcwuYskrN3e31mo2Zc2rMZC0IUNiP5gWoaoxuKyjbhADX3/1lXPdbZCa1UzqInB+euLbP/2Jl/d3TPs9H08TIQSurm54Os+8uL2z2co4cXNzy/XxyMPTCYC76QYVePf9d/zmd7/jRz/6wjbEiTLuze4jOjsLoNXK3c0NpWTq6vCBQyx7T579EKpTay3P2+B2Gmwrm0SjXzZRlLyJXrXazoxxjH4tK7uxMcgD6+mBxw/vWM8nK0jymfN85v27tyzLyuPDI7lkQntidwWff/YRH338muMukpKSQnPIaCCJgpiyu1dSKr2zsJbdiZ+u/A6XAaBXSq0aTl+yD2P9+jQXx9VS7OFptneE5msge62/dYD4n5MHV/pju403pcNJ0nF52eYhFl+6ZK4nt2h4u4ohRs2oorHz4N0epQc32WChPj/oPUZ/B5e5g2wUWvv34NdHgBaqbZVzfUUEcl1pZSU7xXapmXmOzGvk8bEy7g+8eHHDt9/+iqurkSBWhFSHfSQkTI3RK2rsOhpd0d6FW9drNWW6Uh1XDx7iPen2QJ4c5WhGbyU4BCXFr0E16Fh8A6Wfh4TNCIMroKUniKZe0fd+yuFVae7masIyDZHuJRUirrOA7l9nrgK2ItcHKHZt5eLzFELaCgS7/34u+nwMddLDpo7YTP+6hsZmKgrexSR3sbbO1d4fQVjLSqswjW7jsWb+8u2fuL9/yfX9rWu38OVLdsatGIiEKLRqaEbKy8qf//xndvs99zc33F3tqT5waVUZNTJ3Zk+E8/nsxnw+KYpq9jXryvH62h1hhbAz+4y1ZO5uXqBnCzQlNB4eH5iXhevDkZAig9tUDzEyjiNxTNSHyvl85mq3p2hBVBiTWVLXWm2vggaiKqVkszsHwC2em5lj6eADqmbmcCpqjpUhEGLfkVDoDI+SV8PbFa6uD6wO27x6/coqUuzQ7aaJ7NveRJVWjPFDLQzTDtXG9e0t07p3zN8GUsM4UfTMu/fvoTZe3N1xfTyirdJWw+5LLlzd3XD38t4gpWa23V3FPA19JSvMpxN/+e47U1OrcjjuOZ1molqFv5TMw+Mj11fXhpsuCw+Pj8zzwuuP3zDnTMuZ/W5nFDtvW1stPDzYrvNhiEzTnloK5+XMbrcnSgRfNlNQq5pqBm3EENF6oixn8vlbHt6+pZUHlmVmnVfOpxNPeeH8OLM8PRBT5fZKuHtxxcuXb7g77onDRBSzU69tMa8dFcZgrBNJmOo/DhvshGPS1nK3bYBp+5Et8Da1jkG1kGuhlWweOLWa1qRW1jwTNDgturi54UDXURh+fLHUb+JMqQ7niwvYVDdmD1jAAit4wBS11t3Yz/TAbYHOo4p2eni4UIrRTfQl2/+xv3eCLc+hlJ4ounlez2Y2C/BuQZz2SvA5YKVqpq1WnGmdKFqpc2BdhIdZaEH46Rdf8d13v+FwCKTUFd0O7mjYlPumvQieEB2X6dCWmr1EwRlcAkGjCeD9Eyjq0GBjin4v62y/twUfXJteOoQ+a+jwka8tFYeiJCBaEUmgjeYD/oj9m9ME7DP4HAOCr17WLXn1S6x0exsYaE5eMFJA6IUEdk5kQ7Q2CSObsWG/T8/ORz9PBqH3zsrWqHY9Ta3OpgsNKV0MCue8Mj8tIPDh8ZH721u+/OprSslosRlebdX86MQStbjrQ8mQy8L+eLCZxKsXL2wLkirz6USapm3mUMWrjWgVym43ma4hKiSjZDVR4jjYqj9xFtNS+PD4BMBNrix5JUpgPwyk2ztU1SEbJU2JemqMw0QcE3leubm6ppXG+Wx22VfHg9MpFQ3YTCIqa60kMVuM4IuMmpoVhDr/X7Xx4uULW2Nrp8huQLVkEIdkokFVSDsOO2ENM+/enVBsVqNVmdeVFK0yyKXyOC/sp5ExJasbO9tCFK2NKOahH1Ni75bk2pS6Nn73298RQ+TqeGScBta1OMapyBDROdNEWGph1Ma6zuYsu2G60KhMh4mPPnpNSoH91RX67PnrPk131zespTCEkaUUPvnoI1OgFls4U1R98VDYbMLn2YSDa165PlxxmPZoDJznE9TK8XhN3y4XFabRkkSuJ5bTmafHd6xPj5yX75kXE7vN55l5WVEt7HcH7m8a15+95tWLG6a9wceD8VJpuqASSAIpTF6BRcZkQTYJXgB4q68XP6I+EO1mffbIWFegzYVFtZDzyrqeadWq15qLKacbtI1zfgngPbhv/HzpPYPb4nkA0OdK6F4hioNceqFEd1jqeZw3eMHer320iEg1ooff+I6NPx+sGrvmQprV7Rc2Wt+GqBuq4lDTZSZhgetC2W1YgiitUrKQtVHOcJ4b8yKsc+Svf/ELqpwIKbM/RhoFqcEH673+985GLOBu9rY8x3jsuQwBqgqhCk0KQtqCvHjlra0R3FbCICYXGor4Ui9/POQSdPuL6EYKgIuWBLpPVKvF4p5DQSoX3pclUnFrDV+NIB4zFLPlBkzm4wmiw4HN/JrMXuTy+bdbFKA1Ox9puz1WaHQRLZ6E6PeXPqS36xK8m29N7MzEwQW3preKyR0ThhEVX03QGh8+fGBeFqZxYEwjr169Zr878OHpgfcPD7b+mCBcX19zO9yznE68ff+OqzszvKu1EhrEwXYilGq0yDhEFldjm9MhxHFkXhezgaiNTOb2+prRl9Qf93tnM6njeZE6zzSUfThwdbgixcjTwxOn+czN8ZqQhPPT2dxe3UrYhpMGedR1garMbfVB7khKowdrgMoUE9PofvJaCGqwwBiUpdrimCFYZVJboWplHGxOMYxmLAfw4fTANE2s2dxMh5R49eKOdTXBey0FbYGUzEr96emJdV25urmhqlqljw1Yd8PEz3/yU0ottuu5NBu8BqGuhZyfzFAO5TzPrOMAIowOHr97+5Z3D+/44vOvGKaRq6sj+/2O/X4011wRNApDGjjPhfk8I2pEALCB+sPDA8MwcHU4OpwjZqKnlvivj0d247SZ7a2tbruo4zigFMYAi/pATwqlvOXD29/z29/8PYM0Sj5TlncsekaCcHd7w+t45HDYcTzsGXeJ2lajKFa3nmAEKdCye+0MxGSWEva/ldhcKY96QHZ6qw0E6JYW3Qq5VksO67pQazamUp5tf7qz52ot1LXTIr0LpYA8GzLKpcoT7YphH4I6DHmhp0qPxRYIvOrr0MTzlZwiNrjtIcD0JdEBH3zRjcFUEhxCY6tnrfgRS0A9cGwzhgCi1bqdnsSwZ6MTE9qWeCzbtFppqz0LpVSaRsoiPC2Z9Sw8PVW+/NGPub274de/+luubszmpGpEY9uug4VBc2rt0EoICu4DBm542NTIWVzU5nZhvKrboD27r7uDxZLmBnkSAlobLVaSpK1zuXxYB5fCZbhvFNzLXMegyWZRPrpdiKTt+5XOmAI3k6FVYzuiBlXa6mT1Arv2itTOIn1vCBaLWtgSfMRtPjzFt3aBKYt3XR2qs67WKMnWnfnra0Ewl+iyNmIcGaY9t8NAK5Xj8YqHhwfm8xPjtNtYi2kYOL/9noe3C6d5pmrjx1/8hCEO7IaBOA4kaXa4Wi6Ic/dPHx5M1TeOSDO3xT6jSyHw7uGBP/7hj8iU+MnXXxPd5nqIiTDAfjiwzitLWQlJPHAHtCm5VYaQCBEbvGqlrManXvPK8XgkJcMbRyZiTBxvjkYHVdtUth8nclnJ85kwTMQopCBMUdH1wSqAWAilUWRgqYE6G2n+fH6itcLd3a0bz8GjVp97KEM0Zs9aGrViiak+cRhAtFF9l8FuPPrI3wZkQ7QKVwENYm6urXCXEjVnkiRiMspvLaY9GMJAGo2u27cen2fj418drxmGkRQ71m2HI8ZImkaO5ciQIi0vUJsNxdZsjqtBkGLrYzWbBuNwOJDEVpZKEK6PV2Y8JkIthdNskN3hcEWuhevDYdOA1AB1yaQgXB2v2U+JWmw3eEIo+sTbb3/Hd9/+Lae3f+Lt93/k5f0LpiFy9+KKw3jPOI0cp50NJMuKhoJq8Ic6QzS7jJTM3yYNBvFsFFBv1XsSUFHqmp1cYJVdjKbSzcWdW7Fuap1XgyPVKsNasg/GQWsjl0KnGWnzgCJig0BXH6uID6+jB2QrOIIoVe1sB+3F/aXqswTjaLGqV3oW/HqV2Fr1rXuupBbrRpTu7rr9KksOW37RHj89GLYtmFj2uHhSiXsRbZ3GVtAa9LIV9f3vVWjZbPRzFZa1Mc9wnuH2/jU//vEX/P2v/jWH64EhGv05iBUYhEBQS5riy70ua5nM36g3xHiiktC6NMOQHHXHVXXFNQFtgRCFfXIIz2O+eqWeQtyKK+3sLbEdGylATB0IY/u8iFNTnZZM6Dve7f7gq3+RiLbnwRwuW+bYzsVm0+GdY+z+WZ78babJ1lV0p6f+lrTfR1MAYqr0wA822D07V32YrqJENbbpMBy4Pt7aYEh122Ex7twAVIRxMPLQx68/4uZ4xR+/+TPy/j2H3YFcFqZpJL14SZRAKqpuUx1Za+Z0ng0Tv742WbiYK+Diy1rSNNlym9Cccgp1nVnnhevra7uZMRD3tuNBGpSyUoN5iMRgQxmtxhfuZ8e2PkWESK6FdV0Yh4FpN9BapiLUBuOQOC2P/On3v2aZH4gjHMaB+5sdspi/fa0LefmWWs+IFJzBbs+mUxX/QZuxmaK4/N0UisO097M8UIwPSmAgpIlaBWJiGBKs16CJZWnsdjfs9rfMudLWjErgOA3cXB9tqVoXSqkNnGyIqhtNDQnk+cS8zEyHI1e3A4TAQKRWmLNRMFuBndhWvrvrW2ptrKsd1C6+SdGCfkqJXDNhCFxNx21Gs5v2VoVHk+3PZTY19mTOqsNgrpu52LwlxoCulZgCaGFiRddHtD1R2kJsmfXpL7z/y98y6szVRwM/+fEvzewMHyC27AF0tWIhWBCK0YL7MO5obUVir7ZBfC1t6v5GGFRngUx9s1g0QZvae0YNn61loYWAamFZ7D+t9K7B7Mtbrra/AnsoWxPvHMJmZWHuou6142Kq6sErSbSVtzi80LoSoD/oTnVV9fdvhA383wKyVf6tzyGam8oZoI5WRaVsgTRgVeOlG2mXgPGDyP8MxHJ4S11MZ8wqz4EXUJ3+Lmun/lYoLVFWmFfl6VQ5n4XddOCf/JO/5nd//BvS9MQ0JIOifXKaQiJ6BSzRupXo799ou564PIk2v5/q7ZF0Zhp9i8Xlvw37LzYHpZIkmumf2+sELMYY9t+TawWpIAMhRE9kDQmmJ9CuH1HQeNFtCc6U8+sjoXmCCM+uv4NqYv2YMUDNtLRZ9e37s83BQfscwa1/cEFx38khmF1IQ0gOJQYCdZs/eeKVy61uIrhm1Ii0pSFxYhr3ZDUpwLpms1f65hvefPKx2fiUvoZW2e8PfP2jL1jfrKRh4HQ+2QbBYWCZF1JEaCH68okTh3Hi6uULg4O0OVbXfP+tHaXjzTWfy2c8LAtUG+49zWf2hz27dAARd2NtzOvMoJGSjZp52Cc7uNoYUuDd0xMxBMZxxxiEpdhKyb3sSNEraDLrklnWhXSMPL79A6F+w3FcGcJMLAvnt8oSQB17jqogmRDt1ARnGWiDtbq9sNrAPSbb5dwazMsTp/MJRvOnEYmsa0VUSHFktx85HA48rYWmgmjivDsyHg5IGAlxT66BaXeg1QkNiVwaOTfef3jk1cefcDiYpYkMEWnGxkgpcAxHYpyoayHExmN+RDOElBjG0fb5NmXJC6hVyblW40Mf9nbIanN7buH0/oGYBuIYL5EjOmwXIFZhfnhgmkamIaFRaflMCjBE07+sT2do1n3VthJ1oeoTpT7R6hlpmUEKn7ycUJ1Iw4iGgNRsojm1TkuiVduSzIo9BBuRSwhIVEKNWwIwJZqJObcWovX5gz2QNIcafGAdsPdbykopDcRsM9bVPJZaNbvrmmfj+9sTZg6tGNRlfVGPXz2cBePtqzhEoAS16i5gAerZd/o7VMzxs88mov1rY/u75gNK6xAj2vc7+P6OoGwBymOTBwiDLy77n+VZbrhUm5e5h+ec4LYaYjODFgTpgrHeRTShZBMO1gzLrORVOT8p8xxQTfz8n/wT3n74I7W+5ebqQMUoxIREbMWGtmCOw2piv8vA3Iq1oJfVRiJ90VEXneGVeKBbl/T5RimZcbCdJTEJqrY0qMPLjeZut3JJkIgL4FxkJnGr4s236ZJIY+gGiTbnaLVtO1m0mX7DYCt7vRAdBlOHQqMQXeAYPXF38VyHP3sC6kkSBS0FYrQk0sw/i9A7KIPo+0ziOXwJJtbcrFuaFTtLqZdEGyNlfeL9h/e8fvPGmZ+WpHM2N9wg0FJCmi0zS+PI6fGJabI11GlZV2vZo92Y3WFPGGx3cm/PRYRxGjnIftNK7I57D4yBw/HIbrdjOZ2JMRJTIkZhSAE0EmIgqXmt1LKiWhliRFSZH99z3B3YHfas+UyKkSlCGqG1D5TyaF1MfaC1Jx6+XUj6xN1+ATKpjmgz7rxgSloSruOIxMRW5TUM5kzyTKSSsIGbC1iWsvJweseYD6S0pwHraeb8eCYOAgTefPyG08MHntYzQ5wYhkDcJYZhR/DuahwmSIkhWAIiRnPS/eY3yN1LalN2+UBrgVoh7Q6EsEfLBCT2ceD9t98QJfLqzRuqrkQa87JQFtNe7NOAtmQ7rnNBtNG5GVIy98fjxgiJg4Au5PURamY3DdAeifV72rmS0oQWpdQMrCxPTzQ9U8qMOE4trWH20o2xm6OGvOGvIkKrC0FlU8CGIaDVzMcIbIN/87gRNpGbn/6m6u6cF1s7BbqnvvZgIgqtblbTuazUtVCKsV5KwVwE1mJ7S4CmK7X0AJ641NuDQd+OBXcowSpIs4G3vcNm4leJ9mB5uEf988gzr51nD3L/jY3u0dQt2cUShLYNQjIvpq4RYAuQPZA1zeAUz54E+pdqhyOevx9h83Fysz5b2du2jsk6KSXXSCmNdW2sC5yXxnIWyiLUEvjrv/4ryvKO09Nvub/dI8GEploHqigtGDVdtGz30yrkS0KrCk0ykLDVrmFLIIAHYjP+s/ffu51G05VhGlwjoRg1tZ8BmxlFmlfeAaSap5V37TF2eu8luXa6sKq6dYtpF6z5EesStoRbfI5gthzmtSUmAHYvOpvAC0raPF8vBAi/hxgEpo6gSBxsNuPJ3E6+Letq4vDZdj8vicKYUrbwLKhTjFHG8chSlEZmDIMtX1tXT3Y2g8tl5XyebaeMJ4gxRoIvj1vn2eQCIqRaK60JY0rsp50l1lJJRFYKms1lNCCOe5l1RsdcW2tEiWa2tdsTQyKqMIhyWt7z4d1fePXinnV9ZNUGWgz6CBax7w+VaSpIfiDWijSl5hOFTGuPaDvT6oJoYSf28Hb3yFYbDfP4gUrJK7XCsDcWUBoiKY4m5BPDk1WVSV0EhnGB1QdJpZnvy1W5orTKboIlr4y7QM6Ndal8+913DOPI4WqHLI9UPbM8Fuo7Kyd2QyK3zLibOB6vKHFgECHEEZMjRh6+/T2qMIfRNmM1SHszJAxyRGSgDBOpZpCRx3cfQMxtllKZ3z+gy46bmxt7vhaBWmirsrZsKvFhoA2BUgoxBub5hLaVnE/QGmuI5HwmSiPKyvqhGBWwc6WbPegpJpoWhpC2ktaSrhKjDZBt53MhhtG0GQHEo16Qhiab90TBKr0e6B3+sLjm6x2xM6bY7w7dlKeLjnBIwgo6Xxe6mnXMapBa1Wr8/lzNi6bWDdI0DNlEXt26WbnA+mat0Dyo4sEgOlSIwz09pGGBwu0wGvYZwCxAgphXTg/eBlcZTbW2vkynuS2DWhXbXKEsvYp+hlc3+9MGE/WhqMMPImxC0kui4PL5+rUGlORzwkBlpdRCXgo1K0uGeYnMCywZFg386KuvONwM/OnPf8v9dSIOeeuuJEWS2sIl684SkUr1wGUFmGxMIe3Op6hf30vHY199GdDlq6hZxh8PV3Yh1cSRVXRjI1kydCsQUTSY1iSo2vnqupF+UXxHRe+8NnkJ4VLp+01XbOZlX5eipiccU0kHQlBXMLNBh323ix1C7y207+wQL1rw58KmFKKBUlbWdWV3sJjQyRkdcgJLqrHTsBxu+vTLHzFOOx4eV0RsvfNnn3xKjNFWK2Pjipuba58fKRrN5LB5TDfCSmM9nUjDEDGMNKBDoC6FWVdCGpiGgYyZ3ZUAujxzdDTCsbX4LTANA2ES/sv/8v/H+++/4Z/90y/I859Zz9/x3WIDGEsogJpP0Bh8Dejs4hFnL6BWjTQyg5h7qWHugRT6Qwg1JKtimyK1cVrOaFV2JMYwMcQdKjAxogHfEmVW6CFalteY7P00YWwQSIhMaMm0ujIcBggjx6uJd+8f+Ptf/Zrf/v73/E/++T/lej9Q1sL7x7cs60qMwvF6T6uTV36BJS+kcaS12SqaGhjCaHqIMlOWTBoG4rxSaKZsdvM74oiESG7Wcq79AS8L8wlaSS6EMQgpBNlsEM7ZhEa5lUs1VCr9Uc1iJ8WWZTVMV2ND7RD6Q+eJQYENGw+EQRhS9cU9EdU9rRUkCsmV+eZ8agHepEuV4MuZgvTZUWGIgycnHx57ADAMd4uMG5vJBu32/bkV1vVsbqxNacWsIpay2l7v6s/OJjbz/1J/iOlmez2Axq3aUx3YwooPSi4wAVzSRC8+DPoSMRuLp6cZVeH6eHDk3WilYftf/WHXPnLwSL+BB3YpNpsIj23atiDkY/gtuPbuuLNhejJurW4zCQ2J7kPkvRu1QF4aa1bWuVGysiyF8yIsa+Crr37GR5/c8sff/w0vbiam8URgpISBwW06KokQqttG+G4I/zxo2vQC3RKl3wxtwYIkzQV4Dp/1Ibh5VaC1IAHGKaKaCTLhiL37O9nZ7HK4oIYgIFjyxQQtusFLwdcE2JVovojJ4G3BnFsvNFmcGt4p6NrnYAjNhYG2XGlwV4u4nREJHeLqDl2yHSNLFNFV6n5PHEbdjTtUFz68fcv+eM1uN20U6v6frbsmUNScE2LcoW7AOK8roAZVa6MsK1NKjCltCaKJUJdl604lCNM0UVolroFk1D6HY3Ijq4nYUspM0wvOj0/ksnJ1vMLd5h0msMbbTm5Da+PXv/l3/J//L/8HfvrVKz559Y7d8MRhBMpASBdnQ8QYU6ILqSnGchGvkOzgEy6shNDPgNTtwgQxTDWECJLIeWGnB8YpMcaB5FuroppIxAKFrycVg8Gwt05uxlMKAXbJW7ppR1QhjYm1VvTpiZf3e/75P/+5K31NtzAOA+Nk7WGKNm8ZBrvAVRq2GXAlxsiUJoZxJLqRv7hl8bKshJBY5jNP84mbqzsIYgkxTNCMPYIHtiE5h78uVlGlgASzQhHXLajDCpPU7SCFiPvvdGl/g2aqTdQ6CYOXjc8epZia2Q3+1K97IFoyEXvwQ7LZzBbTXfW12RsgpGDYp+kUZub5RBz3mBlbY55PjGM09TyN4ApYBypQrWixIsUWua9mvldmAEpTWqmsy2qd7lbqWxGkz2EG2g80A7ZX3dgkfU+DuEXCVjB6QK+9IKyXSlLp0JLijGnKupi54fF6C+5bIeTsLHuTnZYZPLhc4IjmCcRYUP19hR5frfreqmOeVcI9KTWbcfShBsnjls2HCI1aGuWcOZ8LtQTmVTnPyrIEchY+fvMZP/ryc37/2/+Kl7cT+12FdkAEJqBFdTgxkEMktmq2IY7ZaxOHzgY668qeTIDsSUwci2/uFG2iO7sSLoAsld1BnNzi1z6oJ4pAV41XVkMICF5sgqSLfsL8uwAXpnVYsAONG6TjswVVhaAkCT8I/L1TsKZFLnBR7C9UL53Cdj6gGzzZa5g+RyTa/AaHBB1i1AC73R5iZH46ISnYTMHJz7bSwA6DhgYVahHu7l+SxoErhXlebG2pFvZpx83tDQFlWReD5ONAW1eyF/BDtLnnuq4IgTgmklkVGMQkKZJa5fpwZBzNOnpMthBnjGGzJ0BsK5oC42hWDX/83e/4T//v/zG3e+FnP/mUiPkshTAwjBOtnVENXrE5qwAX64nZX/ygvVO3vlX1Nit4HrabFekPm92EcRgYx8SQhq0hF+kJwoabQthUpTgu3gQfqroJWYIkA1EDw2Co+CDK3d01ISZubg8W6E5nq4TTyDAkxvFAiIFcKsMQacUsDHaTXXSjr3bedSUAcRD2u2vmJZNzYRxHxmliHPfENFqb6h3Uw8MD59nEMNM0MqRAiMqQ1CmMTpYn9kerR68t6BIC4lzypFCKAitBElWawxXilGdxb5kIsfpB79YibmGtimpBa7AKstlClBh834de9hEEr/aWfOLx6YHddCQNAfFzkNczw7A3aKdX8GVlrYbf52wLlqqqs2+q+S65iKjk7CtEK4rRa/vGro7/22fomJHj8z3K26njEgEcLiNulgfS+jW+VPXQjS63nIFq4+rmhtaUqsU2PTTZxJTPkIotMG0wE8546mIwH5L36083jvadCULbAmTTRl/3ac9ZM3ZUe24o3u9jpS7KeV1Z5kYtypKFeYVlhXltfPTmc77+6Vd886d/w37fOO49yYW0YfKhqf36KoRklXrKSvGgZVWxP7lBqK4viZgHlrbL815tnO9UhLZBPrVlisKLw2Q0cu/YtqSjBgvajbkE8gCbP5ZGgVpprZgHFOLx6CJuvNyRfoP9em7HokN7hoyIdF9a6DOjrXMWIUTfSKc/JBdUh3N6R2PFV39N/0xiYscAFhOG0fUTljTDsx3UFsdsW6ISGccDuZk3nnGPDFYvpZhRaFkotbILe7ueJRLIjNFtZ1JE1oAkWz2cUnAP86pItaA87Saezid2w8Q4jjRxDNVhpoiwAilETqczf/t3f8t/9B/+HxniE//yX/yMw5jJa2NAWEO2ziEq0avPVivB2Smo0lpXx7qIKHYut/n6N4VENOaANdV02hu+o0IFktgWuFYK4lbepoXyByeYaKnjxiHYMpq1WgVbqSQSMY3UmulDrRDSBsMc9oPtmUjmABljJMSBtdqi9+v9RK4NjZVr2RF9xWlrdbOmzqUYHcL9UvbjxBgC6eqKlKJZnktkbdW7kJEhKn/87k98/+53fP3VP+XTT954BV+5UC2b7fUNjuk71m2YqFH9jtPOx4K2IzhXa5OjRPucnnhjH9CpVXoiQm2pl6+0ZtRoqIRmwSs6xKR0mIEuCcA2262cn84c9lfu/Nv9aOD2+gWNCjUjQPZKXJqyrgulmc1xH7qVbDYaXVFb1DykJA6gCfPxYaPN+nO8daUXb5yOF3eoow9S7aiZ6Vr0DsTxEjHLbrYAZxBT/7MFnn7djXbbUKQF79ac8ujn8rK+tCcbX1BE2+6rvX/DvJsGxCtQ+7N/HtmQby4Db58zKmhbLKJqo+TGaS7MWSizUCqcT4WlCGsO3L94xdc//Smn0x/Z7x447BPRq+8+SLXoHAgt0GI1Jk60wJuqQcKlyda12eXqA+q+M9x7RQlIrbaYx699daibYmuR9+OVqeql/fD6+ef2Hss7Cx9lkRllRLPPUBAvIiZzc8XNAKNV8pZEXMS4xZne9V/uC1s/04VulvyLXnY+WOdvLgbNEZuq7nTdk2FTLw1sXN1aMNhKTGsiHXbECAilU4W7YFhBxTrbvBY0HGxCe154//BAKZnD4YoAnM5PzHPg6Xzi4ekDX3/9E5s/DxF0JOfCn/7yF25ubri9vbUNmKGQqprvTkjRl9UUIoEpDbalqTVbwJOM57/mbBDPEPnv/93f8Z//5/8Z//a//W95cbvnl7/8JdNeeXx4b5n95kjWTBr2ZgMeTUQ2JReIGRpto0L1DxxsxtBqsUVHPjNoahk0NcPM+jBMetXrnPGgENJgN1kt8UmwTNpx+x4xTIgSrUuKghTjLCc10ZqIEjXSogcKpy+GGNjvzdDPIVdSEGI0ZXQKgagmIBw6tl4vgWgaBpPR0wjjYN1RdUuEGIiDdTqxVEoTQqzc3x7Z/eynPD6+YRgGxsEChqCGnWpgqStEzGdeetEbNqgKVSTYWDBivWqK0SG73nqbdw8qziG/2AeIKDEFSq7Usl5a9CRQbVDYjdtsw13fMWGsESFwvL5yqOMyhO73ViuspVjn15SaV3JtrCWzLJmiJojTKuSSATb1sTZnKzUPrgJo9UAw0Ae76kyeXjyGoM8Urt3yoH8yhQZ9iKrNHtTqga9tAV1csd87Fdn0OKgljrBBqboFt96J2J8vyaAPpJ+rsu362+83swW2gGmJ2Dqj5l1za4szs+xsGHSTDF5dzixLYT5b17CUQM0w10opgau7l3z54y85nf5E0Lcc9okpKUOcbL6hQkMoliOsU612nTLJq2QjgyQJfkssscQObbfm9GNTNzc/a7gCG7Vntlb7fGkEGQLashV+3eyQfm2NPWYXqv9uv5OKEyfsWQkALVuHpo6JFN0C/qU9uMyNnmVf67g77GdTaKO2umvs1l3S4XBLAerki43OHHqU7zAoqKgz1K266jOIDl0F70ZVtwec3lKV0gjR9nd7O8Svfv0PfP2jL7m/vUVCcCEvHI/XpBDs/SmEYOsW3tzfc1pWymIQlNZqViHRE4QoLKVBqIy7ifW8ElOgJauEhxRYlpmQIv/Zf/pf8H/9j/9jRJX9uEcb/O4PvyPoS06Pb3n16prDzjDPJCu5zARgt5soQ8e07WKkoZk3Uq4s5xNXV0dAGVJgmqzZrprtwRgiosIkZoVby2pCnBgMUulDeG2UavQ40bjdoFadU+3GaV0zgNoCkyTGzGjaKE1tgFxBsW1qrZkDqjgdW70jGdyRsW3sigihkLvPscv3rVLWTXcSpBJDoknxaqQR3ZiuxsYY7ICnIXAVR25uPrIKGxucmcDKPe1jskBSKiENBIk+s/2hFYQdrcHjkScacUow+IPcq+PgdL3Kui5stD0RRJI9FM0qw6rVLB18X6/ZNndVcbHfHRwrB1QLVYVlOVFzNe+v2qzSK2YymGuh1EbLDS0WFAzCMr+s0PcJi9LtOdSluyFEQrswgQy2jITQX//CqBJxi2a/QD0+N/oYoesMDO5pXFTV7dm1tZ9XhzUtmHdQQnVravlB7fvM7uPyqv78d3Ol7f8+h8Q6PNG2GYz6VrVuF6Fat58pasLXZa6cz7Bk4TQbjl2bsma4ur7l57/4CSW/JcVvOe4aSew5iGKFU3McvorrS9wKgwZJI4r0vXZbYsbtzI2V5gr21umhDjW3Bm41UbVSfbZUG9werhjiCGG1eVFol6E+6oPxPsVyhlFThjiYI7Oz9ULXQXgQt1GSa3JoRBnQqrRgXYDYQ7YF3a3IhG2mgeD7Lixwh5C297XdKssE9pw6nKl6SUzav88L36b958OlaOlFRdRnn/UirGwqjNNEjCO5wuGw56uvv+J4OJBpDMnslSqNNA28ffuW+TwTgePxComRw/U1u13lvNhCttaUtCwLV4crBEsAT49PSIq8vLvftAPiexVMfLHjN7/9B/6T/+T/ZqOwIVFa4dv3Z3KBp6cnHr9/z1/9AkJT8vKOlheubw2DHsbI9XFHCNXmCjSWFc7nlSBKGtTWdCbzQTo/NiSF7eYcDkeiQiMz7XfkuqICO9nZwQtCKWaWp636ovpCLoUUol3jwRJikGRCKQyrtAK3otGWHokaftj8xjcRoiQ7HP5+KnXDuWvzfbwd//YVZep6ANuvm9BW0KD2TNBMcBY6NGPdW/dgt+pBUayCMsgPb/nNtCJt8wOjKrekzg7q9gRemxpw7UEJhziwbqFZMg7+gNtcxx1Pu8CqWQKLIXl1Zt5XS10Zxx1RlKaVvtWrqW0B24KxCDQzyst5RV1Z323jTQW90pr5K/XlV0ikqdGH+0OiPiHeHFhb9SQwbMlr0z3oM8z6wi3aHuQuUrsk07oF/UbDfPrZINAojUq0YKz/mIZq0ILh6w0YXMBmvyDgjgNbgDMIQ/2N/mN8fPvyZGBBo0McDjF5caSaPegJGnwlpoAwknMjr4XzUlkWpa5CrhGqsDYhZ7i6fskvfvYzKCeSPHJzDKRYCG3AnAsgDFZglRYYml2D4o4CbWModkjE9CRSgxVBGzwil+7Mnhw6BTS4lWGrzsbyy3Q87k1A54QZ91ft4dq6AfURnl/rrJlEcFtx2brc7rjqT60XB87eC82Ebc1ouEEGk0KGrtl4Xgx4V4GjBU7S2ESUimlSgvQ20GAmjxOKOCnhefEQtp+tqBeMeulQneCzdTj9eKhwXgtff/WlxcVTJjR4cfeCIOI6CUhp4u56RxN4ev+eZZlBAtO+UnLm3fv37PcHc5l22DS11sg5A7bh7e7+3k3qbC/zsiwMu4lxmHjKJ+Z55r/57/4N5/PMfrcHFQ67xG4aOEwWdcKU+P1v/kw7P3J9vePPf3mg/v6BF3d78vrEl198xO3NjrU8UGqhLDv+9t98w2dfHPn6q48dPw20tbBUq4xiCIQEbXXRXGvEx4mUAo1A2Stxsm1yFsSssrVQWc25dhwNB812SGosSIMhJESi0TINPbZqVoLR+tzkrSkg9dlw0A6dGdB1AZH6rW7ejscNY1UfhHV/GWmNJkpr2RKX97o9GSUxyMGsH4yHnbySVwkUt57omKi5TtZnD09zdau6B5JAUGydjLOVrGe3T93pcNVsrg0dyeRi+5wj3p5itD/bOqikNFgH4JCJBPUQuVKyvZf59ESulXEw40jAZwuZkm1ulbP5KRUX/GgpGJ9f0SrU3iE0sz0wtCf4alCxACX4A5/o1FRzgMWrSOsILnV5ATFTyM5dN6Vz9fGD6V5NWPos6Wjx4Cyuh2j9J726FFrvbN0XyKCf7CHN93F0jLtnbS6dWid5XAKTFR0b5K1ODyVAqKC27dAwaj+PQVnyTF5gWZT5ZIy7VY0cd66RNTdubu/5+uuvafLAJN9wfVhJY0IYoVUkeKJswe22rertFhIlWgEQqFRRh/Gs8kkiBE2YFVrduj5r2WzW1C1GtJmSurXiVilKSoEw+NKlDt1IwcouYxbaMxfogj1jNlkCaBpN1U8PrF4UqAdlsY4v+pUtuSHxksREIsOYtvvQdRhbKeUwkME2wU0LnSkYjOzQNxlW71abNu84ZCuq7F76nyVsHW1nTtVe2DU8Vtj5arVn04FhuOE0LwSUt+8+8DQ/8vLVa8Y42MwCJQ62BGl/2LPf72y0oIrkyuBQ/ePjgz3b40iahsncW9PAtN+jVEoxhWoU2wIndjY5Tnuujld88vEbYvItURLY7fYcpsbNIbKbAh/dTaS0cLMbSFPgs8Nr5tOJIUTG4cDpfCZGRYbEEHdMNyM//+VnHA87clGWljm32RlKyhAjJQjkzFoDwcuywsnQdW0Mw8R+dyAOI4f9niB9vXjxC2jTfRnsb6NEYgykFFhTs6FibYQ0mK0D4him+hDbHsaihRgGorO9grfG4gPEhPrNtVPzA1dL/PD6/9MoxuWOsm07s1mCm351908/rSLNPbt0c47sG9PsW+xBq1oderNjHEU2eEY7I5ILLt+cItRQHx6amKf6bAHM719S5+f7zEjMa6Y2s8AIEqktb224quHmtRZyybS6cD6fCBLdJh5UzZU2oLRqD0+t6w8Hzih9uaVi17tfS1PI9u+0AKN9yTyCm3/Qdx43t5cW6d/fA65BfAo0t6WWaiWeeFXXVdHNWUXixUNt3YLjeYfiAWh7Z8EKD+1rYfQffS9e5eKQngcpMUFY9xfq9e8PXWmbETgwJfklICt1UZZzYc7KOgtLhlygZFirGVne3L7k66+/IMYHJvnA9UEYx0SSwQbjsUM0gRpNXDhgTLOiDq2pUFOgNCEG68ptfmw8plat3u+Rb1u65J+lXwk7c9W7bIOaro97ptGeBcWgtK5vES+oLuw+cUfawC7uiUMkBXtdpbkP1AW+Ubl0gR02FIk+UPYZZhifzbGePc2qG6Jgf34+XwLxlQQWzht94G8FV+x3km01T+jD9j6Ct+Ktn9GAwYJRxFcbiLP7lFYbS25cH++QZt17jAO//c1vSGngzes3JJ8FB7HNmo+PTwSgTJP5O80rN/e3CMJh2pslfwqkRiOmyDQMtmykVMKQbNGGvyFtylxnQohEGn/9V3/FT378Bb/6h99ymEZ2U2JMhaDmJx8m8zDPumLsgMTN9REJybUSlaU0g40SUB4Yph25ZdaTTdRDqyQCQwiUYHuaRUDr2ZfNuIo3mj5giZWnk7Xbu+lo1XqrNKkMMTGMA6ozKRlbISVTIo7JdlDEOJlXfDHR25CCLVpvahv6gjCkiOC7qatjktEYQU0rilCaWQ2nyznaHoBe8VuvYgd9o2b2gCCBWuxgmfDQVKVhswK1rqJ5qxnxLVfa/D3oZc+Bny5tgWRNPdVfu6mSvErHh4k08+gP/p5ETVWtJBNNroZRlpI3503tYjAMessuZAsx2kwhZ5s1oG4H7t9bGqVY8FaFXDsF0h+M/jBvWL93Cxt237YEuQ3oN256dGPB55i/bAEgSId3fFmL3wClJ0w6gEDVur3Olvh75Q8+RLT71llzBgn9owTgj7lZdKvpT7a5gusmemR9/uUmcPb/3efJK+Bu99BBG+tkCoWIaqIVZZ4L57WxzoG5BPLi84cCuQSub17w45/8CJGZQ3zLcd+YpkZUe8bsPZmNvRoG4q8UUD0bhh9t+CkloKmiLZGoVE/CKhBa82vibJ2GD8F75rWz0Wqj6yrxsxx3yvl84nC191dOW79u99eKB3TLE4zB9zHQlwnZGeozC/G5gL1K27rvwTumVdVnagbnPr+H/mp+buxFbUZn7+eyPhk6/VRSf39iy7qCQvMO1u+ciRyjeWoByrNNiGKISfDf2XeRiKMH61rQlhh3R4PNgzCMA7f39w5H9wRs72sYBvb7A601xmEk9N0YwOn0REqJ49WV6SSGcWAIg23pcnVGdIhEBdZibKbg8vbWKq9fv+Z/9b/+3/C/+9/+78lroebVqmEN5FqQHNz9NFCzsaSXoRKqm1z5ja1VkWKl7dIWu3lSqWL860GMSdTx1yCDsY2oaM6kOLCsvkSclagrj+cTbfkWGaetxY8xMQ0TKQnDaGyDGISr44FhSsSYGAchxoUhKJIaQ4zEaWdDMsXt0BcbtgfzoxKMDaaOcoSonJfMbgzkPhxtjRRtn0V1pWvo8wmcgaW2UrGqt+Ot0Re/B8WTCduD0DsUVaMvF0frEQMjamv+iD471mLdit1XEwdBM38W/1nVZvMQoGhx7Ywtk1qWgpZCqSt5qTT3sQkhPkuCAVqzpNCMfVRKIeezBYoQyUv2ZGAOrqlZe1spSKtU6Z1Bc+qtvzl6sO9x1f35UfoSIPueHtCTP0yGjfTr2WGlTXnmpXvTywzDXr0jCsmTZriYAdK/vxCITmHty4JsGGuBvcMqOBx16Y7EX3N7788SxEX4t8Wgy/t6HlgBxZfz2Kmw6rIIrRbKYsK4NZuTa8lC1WSsFZTb+5d8/fVnjPEDu+mJ6x0MoRCHSMDOLD7AvVwqoakQtaHDhK4LhORFnC0A6wWM/UwgK2YF7jBJT6oxBFrL9AU7TdWGpX4ga1OmEW6vrsyDLdpMJiBW0Gk/t2ydlZEG/JyrkKJstF8VNlLL5q/V2hbcjSRoeE5SixuNZsaCcTL4yH6IPh/oRZYiHtztjMZo6w42kgcY1OTJJIgjAc2hJzCnbH/WS59z9SIkXDr/vpOnT9RVYF5skdcwjHz79i2H444UIj/+8mtSiiylbM94f99XV0d3L2jUdaWumf2LPafzmW+++5747i1fffkFSUsjx8IYbceDPSeGp601U2phlzbHPASl5srPfvJTvv7qK/7+7/+Op5MQ0uQBXYllBgZjIKgNMDUH1DxnKWIVsLRKbND95kUbNWFeNthuaanVsq5EkjXsfiMSZSvWHPcuK6dz5fSYKeXMstgDNqTBeNCqkALTmExMshuZdnuGIAxj4LgfGafG4TAw7iK7oVinEhq73Z4QIImiuiIxElMwZpJe7I3F3+EQjKIKnWvP9sQrwSrUDATzZGrNxGFdxakqVFd9mqvwpZNQb/XFjIFsQK9i98hZRP2q1O7jE+xwlRiMC24ltzni4px+F3/ZoBlKLtTziaVk6lrJLdOqsvrsQFUZwgRBbI5AYyKSm8EfGiJaCkszG/hajXek1QgGaKWQ6Ru+1J8a7QZ8/jtkezovlxHwYeKz4LmdBO+ivKp8PuTVTpH1s9N8YBlCQjFX2KomULyI2AwaMmJDRLR4ZyIU9TG/Cz7BX89hAJELzdWG2njndrmXl27pOe310iX9MCkAeGEiDrdJsdkMybUkyrI0zudMXiJLsQRRVFi0QRPu7l/xyWdvCPGJ68OJq+uZpIkkk+1z0eYBypJEX4CkNKSq3Zem5DQiapqoLMkCcgi+C8ICbIjGZFKviMVnRJ3k0IsTIypwmaMp7HeBaT/ZjEAxVwNMIyFut2ELjPomODEKfbRd9fY8uQ2Jn4kQuhjUnKlLyVaIIf47oyU1TNNwmWVdiCCKbcfsi4ysg7lAWb3L65v0VHuClK3v63vSQwhmjNhseliCEUk0GGTVi6NOH7aY0OdgwpBG1vUtV3evOBwPpMeFv/zlj3z65nOmcSLnQlmWTe8lMVCLWagH/M+l2YKhIfH6/p7jfs/7Dx94fHy07e3G1glUMShhXVZolg0j5jA6zzM5Z27vb6gls0sDX3/9Jf/9v/3vqG1ifTgzysA4wqyVsUUG7QfLNZfaRUNdJWiy9OBzBYBQPcPSBZuCUTzDhVbnwbM/NuYhL6gkxp0Q08jDaTZKpgbW2tDVhqN1hocm1AIxLBAeTX8RTMF4fTNxc5/Y70bge3bDyDAEro5HpjExjQpRDKtLo22UcwZCDDD4Fr86JmcHYHsTnlk1o+ZFZcNrYalG45XYaXjikIhZaxDU1MGa6XYFTSEMccODQdGSNyxf3GtGgIIJklSbCw4bazW4KM/zFkQbjqE2m8M8nh5N69CM2bYui/vQjE6ZPEF7YppuWOsKCrM0Hya2Z9CQ2WbYw+MVc09ezUSLIs76kGSBr3WVt/HFpf8Z6xLsdHTKq1VU21pRNUXulnwuMRakPYvJ9jOtW35Qzerl8tTb92o3CLSVsp2nLv7e1Bf69BlB58hb0qimr1fbVeHLNNgEaZs4SrZksNGGdUGY/K16d+HDcJwybcEnUVtA20qtkXlR5nNjXZVzaZADuUWyJrRWXr56xcefveJqeOR4OHF3OxFDJGpwA0bH7pMFKxjNK83PeBNPIOLJ3+dcQ1JqtU5DU6TWYsHcz6c5z1pSFMywj2qGctUTt2Kdbh8x3Ny8wBQQntw98W7urY7tB1fXW1uREY0MaTDLe6rNRTahnlXiMQ6UljFGDG5Nb++hoYxpsAQRIsF1L0K/z9adi/azAqbQHvDD4h1ARwGcZq2mPFcJFFWfbTYrJD3wiT+/zymuW6HhgsDts2K6nQ+PZ15/cktrym4/Ucqtn6/GEAM6mlZoSNEISTlTFosnwzCgKTCEiZILORdCSLx4+RoRJcXBhrAEJXhH/JdvvyXGyKeffEbNtoFsrXXbvGa3q/HtN98SgxAlkmvm6WFmvBt9v7RAbag7J1aHq+TyccHHsuI3jc55p224nIToN0e3i2MPRsdumz+ENmwdUiANAqNwmCqtGZ3N9hsvhn0XnO1R3TcmMq/KvGQeTivfvzPLjETEdIPC8frA9SGwmzAfq2BagBB3jLuRIdo2tRhhGIVdMvvfhi0hmVLweUBw73m77n0+ELX5YMndT4cBowYazEOwoWxoisTkxojLDwJowOBCrc2ZIGLH3UVJpSljitRSWbPtz52X01bJ2pnqi2eyrfssVrVWmsn+FULw1pVAcwdWrdZBWaC3IGoskK6LKA51dduEbovhIjAJQNoqMOkV2rOgeWGPVU8AlyDRpzyWLII/XM8gHoWmGWHwhy/SyO451cwGw2cPAds2t5Uq3fYEw8zprX4Qn11cbLsvWpTwLOh3y8M+ixDvsMtl/4H22Y4xs0RxYdVKhzDs9d2PLFhkbgS0BGpdKLmxZtsgt7i9hlZlaYnWbPnTy5ef8tmn9xz2H7i+OnOVMtGdnCUqQazjatWea6It+xK/DupVvMbqgmuv1PvzHoLZuhQooaMJAQk2M5NmqMG2L1ztz61eCgow9G2fxLt16z66KM04VR1i3PBH6wjVIFdzIr7srQBziMADvaIOB7n5gTsg9FmRiK086FRynuUCsKrecBH7ezuaz4wVlUvH4Of3ea3SsAKjadvYWXbuLUupRH7wA96t1ho3SFXEvNjWtVBL4tWrz5mzCZZvro7M88JZlOP+YEmn9iJLGWOE0RYSaQiM42AFTjX25m9/91tqUz799BOS7V4eqVUJKfHw/j2/+c1v+MmPf8IQI+u6Ehrc39+ZCZ9asogh2pIaX8kX1JTRVZVd7H427jkqEHxfhWgzpkjwTVs+5LOM3G+80Bko/THsVgd4Jqe3b37Tkfh8hS1IQo+jsTOqt6EEIFJqYZkry5xpKK1FcoZhFpoWo+8V5ZTNeTIGeJrf87gX9jth/3BGxCohRRhDZHc4EAfl+rhjGBIpmWVHCnbgZBBSHAiJDaOMKVk1Gwz/FMlewCpjWOgLVVrLJNdXxGSc9eYDsK0iBe80vANTO1gND9wWwXiisZbqCumA5mwoe6cgViWXYvMTFdb1CdVGDDuKKjh7BTGWWSOx5MUT/+BQkQ9XQ/fvt/tZxeBK38u26RMMcnm2lc2PQN998AO4ReFCbb0UVBcVM1sy2eYXTWmaYbN1wWmd0TUpz1hJcilhjCprSvDOUgvRzeOCeFfQf85CQtNK0PjD4KCGL9vebEsXvatW30qG6zjEQVUzqPPP3ky01hNlwXa90AxCXUumrJV5KSwr5CLkHCnFvINqs2Ty5uPP+eijI9eHR67vzhwH2xGSJBKDnSt12/PgA9wApAi1BWLtJGTD9lvwDW8t2b72GDZUMEVhjMLczENKmrq+xK5l2NZ9ZnALDzyw4g3W9e2e0Y06Vc1zzATCyWFE2e7xNmCParBps87UNG52rWMIqBjRw0qXDM0K2BhG1IkTRMxm3OHKvny17/+wLrkngA0Y9LPQLT28i+jhSDqNtb/hjqg8Lyx8eNwvgrUgF5gSh/Ojs9ObuQWYd9qRYbhiOa8M08S8zNAqcRzJeTXPt3FEXEd2Op2RGBnTSAp2pmtpnNaZ43TgzauPKO5SkaTBMi8s68Ldi3sLBCjDkPjw+Mjp8ZHr62tGbCDXsGUuREGzXfxAYBy9YmyFGG35eQhm+Zxicgz2ktdVzVHT91gBZkInvij9krWddxAcRmC7dtupsmrMueVN0eDujx1pTArt8pAljbYI/GjL3mtdgESQHbVklIzISCmFVmzHRYhKiBDSuGGhRHMsKg0en05MQ0SrMqZAY6FRaFUZgpDGyTbMpYGUPBBFb4lT2qAU+sELkSCNmAIhJkI7bf8eo8nrN3aM9Vgbnt5U0Zahr9SUXs3Z9Wm14/dhM7+zu97tMQq1Vntw2wDa7FyoPQi1GU+9dwNC8mGl3ZVON90c3jYU1x076cG80emPG2G3BZuLVSUF5/3X3kkBOCatDj9FHGLr1Z79potAKm1QTV+OtHknbf2pUYRr7xroA29jg6DPtAktEEOjNTvTIhC0bkZ13jf302mf7xnkpeqbx/qcBDeLwz5bp8lCo2als1cuiTI43GbXcM2w5syyNEqJzKVSc3AVtVBrIqTE64/f8OnHB17eP3G7F2I4ksITNGF09X/1udCw8ffx+Yyvx42jiRbx8iNc2GOJZF1BLNQqhNKQKEw6sBSHFINvpWsYzLcuLGtlzaZXUi4d1ZDg+vqKMIjPB5y1pwFnvnh08BjQC8xaQRZSOvjWNivyCL5vIjSCryEWgQElVoyl5RYdEoo5IjxjFaovTb+cVp8lNcCfZ54VNM/4T17I2VsOjqxou+zXEBmcFGHFbOjq7V7P6LOfDxddhbgl+/lckDBxfXMPYgPz3TSh42izDjdPPS0Lu92Ob7//nn/4h3/gxcuX/Pjrn6K1cppnRIKtsV5Xpt3IJJGcV1KIwYRzEljXlZuba37+s5+y5sw//M3fsCwrn3zxGT/94mtyK3zzl2+JUXj9+hX/4t//l/z617+itEyqymEX2B9HUrBd1CG5CR+R4APd1gOa2GCrSQ8blmTi9jg+/4qYCOkSBPoHl66A7g/yBkcl3x7W23xxvFsxH/VGGM3x1ayzXayjCRV7CKOM9LWSm0dM8N3NMm68+dIuzDClkZuCmBnekk/kAIMWYhlZWDZKXoyRlIZLoBCrzEx4ExwPjEhyT6ogHjB6u6nOwzZFa8fxuz+9aIXm1U/n08uAkH3YbnqMwGVLm31StZmBU2tFxu3aouvGxqDzpTY2Sxf2QSc/XAa6rkv1Z0dDfEbhBBisOpIMGGRhD40HFOXSeYpQXEiGGxra93YxW19AkzaIwoLrYp3NM3ZLEHMVrVq9Yu/299atRjH2Vu9YrLBXzBRMbA91Z9bApRpuFZuhZlR7EWDuqU2blcrBvs/wDrt+qmz7nvta1W6psjG3mt3PtSp5LSyz0YnnEmhZaESyKhQlToE3H33EZ58O3L/4lttjYQhXDNgOCKuwbbFUiub1pMHcklVdRNjsM4YopGCfuRBJAqrROtGYCNVW+iYR6yqq3fsUjJGY/Tnvhoi1OASlvdO3oDw35f46cLia3EIlW3nhbtG9HjFUsXnHVbfuOcUDQxisg/DCNBJowfRFRhHvsKbTsJvB1VFXE9FGdcaIbBrNTltHm+/rvhyMTk3u1PBNOS+waXA25p13Iaap3lClS3ccLn9Hz4ed6WdzzUigOrFgnhdOs/CHP/yBT778KSLmdxYl8Mdv/sz9/T3jMPL7P/yezz7+mJxXo+lWs+jfTzumYSRX724Rci7I4OXThw8P3Nxcc7w/cl5mqMrd3QuW05lPP/0EQuDq6poWlJZhnmfzX1pW/sf/7J9xddjzH/2f/kPy+oFpl2y3wWCfWJ4tsY/OGmgY1Q2Hl8KWcS3gPF9luTmJigXG0IG+H8AShnHbQ6/eTjpWK/2mYhvSUEpz22zpRwcUc8oUOtpgQbdi+L+lGqexbeyGYq6yJNK2n7aiAZrTLVMc2B9eILq46AET7WmwikmVJectIdiXPRTqlU8S/PXtv42h4JWpWtoVX4G5EQSDD1opiAwWSGVwb6ZsD1ZnXJHo6gRLVnVLKPaZHYsXs+lA+qYva1Et8RoI0QP78y95DuVgjqhmfO+22A43FR8ad7pl1eaa3LjZhmzeBU176vcnqasakj+A2c3tOpVUvNMM2yrb6Pegblmrv0dzKbbH12Ag2Z7Wdnlw/X/2jgza5cHuNFW13sJ0AWL3Fa+ErYLxjqUnw+CdDK48fh4gLvYmrTRKE9bVWEy5BtYF1mI+xhVF18b+sOPLH33ER28CH706k2IlIQyyOvNO0bhDKwQx5ksTNsNNex7sfbfWE69pY4zO7kJF3/SoMlA68y1CCo1CMjfTaGe1enGS85lcKqVWN0a0TrX6GOn6/pooCrJazycCwWjN4s9uLwENmgQNgpZmz1rwJ1bYXF3d+6C3rdgQ1vy9GqDREAebo6i5G4AP6BN9MxwSfbC84Rxb8fZDhXz/d6Oc1nbRSHkP5N/zQyZd35t+mWkF0EiTZskbizHNi8LT05lxekWLAWphXhulrI5amAuFbNvmlN048aMffcHN1RW9bovR5AuXpK2E1ohDIj0+PKC1sf9khzRYa2EII+Nhz0eHPURrP+fTiRgjP/nqK9biC2rays9+8mP+1b/6D/j//L//H1QWb5sGWvOBLIJKcTjCKWn90nqiUCl0G+1GH9g0ItEHipZATEbTE0QPau6Z7vQw9QczdijBHSFrYzOZc1CEbn1gTyOoPO9jenC7eB+JH82NRdOqLZSnY4cB5HKQiOrCmh3i3ZoZ6lm2ts+mLkgLsHHtuweMhxUJW8DTVraAYopn0JqJkuyYSQEdLXBrX/giNB829/3R3akpCpi1h3UzRo91BgXQB+NWfnlSoGBVcXRhmH2rdYc9h18Shurluvd/EzE4qy9QCT1D+5OzdRIi/mBcuiTV1R037eGsmjHPJq8qnUor/T4DaHWmTtkW+th7y35d2MgSP4TFHC7zD3CRAPQZAj537hHF2Uf+gCeJFKl9orpdKPVZjaqgBd+I7LswvJIMOnjCjjQiTcXYh/NKLiO1qDm3rhNGYDMigNbI8Zj4q59/ypefLezGM+PO5z9FvPu2PJXnhVIK+/2RGJIv/3LZm89T0AsOnpxC3CSgTag+8A8uJkzBiqCkKzUJFCXraq4FAORNoW+UartUQU3Hk5tytQscr44UWQhaiWGy4k9kUyVbUdlvRvLZwEpLMA4RpBK9E9wKrGZQpYYON9rPt1ChQWi9a/SVoHYCvJvK5lbbCyX6AFn6t9EH2GYmmC7FbsdK1JT6pb+uxK373eZnHfbTTp/2XeCoH1If2auiTalFOc2F/8X/8n/OJz/6CY9LISWb9O7Gkd2LV0iyLud6nNCcmaYdoRbCMFByZm2V83y29+jsNqWhVWgxkF6/eoliLcswpI6SWLsRjeeuQXmaT9Raef3iFSkE5rWwNqUsM19+9QW//93n/OUvvzKDvLqQpqO5lDqWp1oJoRmU4sMx/O1c+Et1qyT7Q2rXs1cL6qnBGBgRw+sQS0gbJ115hvF59dUqq7dZKfpDTfQqVNm4Ch6og7fAFiPU/YH0maxftt9/aQUtEfYb3lCSqrEqtjPnKSr4YFKKv+b2Yc3rx/fz2gBY/HvXbYAFrur0hKHq7o4ybVV160wNIDQF6RqK3uU5aUDCs/c9WrekXTNuXYRo8N4iIHRjxEsL3b8uOLtXqtv765eg0iv+XpmD31vt1xGfQQSklS0UVG1QV7/2VnUbjDg8f1b9D9XhEhPANQ94faD+3Ka7i+G2axXY3EmfQ0ndUsO6EraOsLXF3sPl9pp3T1PwJTmbE2GzpKkOjfRFQsVtFfEwJRq96IlAoupAK5l1qZwX3z9cC63A0ipVzayvhcKruxt+/OMbPvtk5erQSMPkkFylhW5u2Qhh5DG/Iy9ndscrq7odmuxGk6E7C6t1X05isqo2WrA0exorZFIcKK1YUI9KbZezb0JRoWqhls7ysnKlcOGn3b16ZRUwzWabySHr7XDo9mcVu9ad7ZScnq7VyTW6sjkW475lbry5nT2cYSW9ELmUklHE+QmeOB3pUF+8JYFLMlB1ttclNnS1/HY4xV+5FUu0/A+/1OeKF6+pRmcLioiZN/oCrnnJjOOBaXc0A84g5DmzrAv7aeK7d29ptfLm44/ZX115oSaMaWRKkeS/MwWzX7HePboyW0gSSWYta4Z+OWfO68y0M252AIZhYq2FMQ6cls7NVk7zyVS8uXJ//4r/6b/6n/Ff/D/fseYHapsNt3YBHmpSHNFAbcX2P/jJiVw6DHtNX0SidksNVnB9hVjQ6R6K7VngtWrebm4XvICJwqo69imB1mBtPnQN9dnhcu77psQVUzlqMzpk7G1gMxENgeYul5evTs8DqGiFmsx+5AJndGy5Wdu64eVtC5DNVaM9YTXMvE/U/JcMgy02jNuCr78n2ga3BfdraoorsK3CNlPKuL3nqpdtBxDs57V3R5fGOLSeNJ3G6Pe2JxpV3GffrxkWWAOg0gVWHUKxBND/3Fv3fj/MM6oZq0uVznDpGhuVBq3ZA4zjPhhHvQ+kO321bXCOXRfxB0C9DRA8oPdmoHkR4/MefTY70daH38JlgX1CW0axeVVtCq3avWhgHUNBWnKI4HlouBQIBtuJX4+EubiqqdZrZV2VeSnkGmnZNAmrJtpq4syQlC8/f8HPf/yCm+PMcV8Zx5EhRExXEZHgVEddESp3N6/cKiderNLVu/0+mI6XDk86i6j1rtC2CCoFQiREdxMoYv9RbNgvtt9ctdJyMEv56qIxvxJFld0E93c7hmAaAhvEqjnA+veJM1dULIjXgAXdFhgFovuHgSW94FmuiTGc7MRYJS4h2hI0h3LrpnC2vdxGDgn+HNiZTw6TN3GyjBcXiPBDUaWRRVpjU1/TNqRxO5Tamj3j6vYx0m1DLt14X3oliCWsZr8r18a4OzDs9jQJlHVmrcZeujpecXtzY+y3UhjHiVoKcRhoKE9PZzdzLfz5L3/i+vqO+/t7d9cwCL+WlaRq1gnB3Un3afKz6S6N2PB5SCM3N9FELwpXuyPzPBPHARDG6cg//R/9+/zt3/3X1PrBhD0lE4e4VbvQoJhXO9HUjC14Z4AFrl5NtI7SiRDVqrHWGQ3qFfsG3EAXq/TOo7kqYdtiFZJBQa3vDRBTGmpFYqKKpacgl5ArLTgFM18OGlA6rRWHbdQCgUhfot69YOzANWf5dC8m63Qs6aFWuTr40Y/E5r/UsJlIh9cuATVZtSoda73AIxWvdMWgCmnWwdHqxevepvQOKzwbCAJNkgVOxz8N2hHz2HfRXX+75mzb5xKNKsnfeANpVhGLIs3KgibBF0SZQhjt0JJsD1fbhs1Qfam0irXAHfmPQItig1/pHWGh+/AYPnx5j9a9dOdWZ6nUrsuRjaDgjaO18ly60ediN22dAdV/d0+wjdACoXXxYPNziJ+FvD30z9lsttNCXDnMs4ICSq3kAusamNfKWht1beQGtSYoEZWV673wy19+zmc/itwcVgZV9kOy62be+s5GLuBEVqXawNXPsimjLYkZ4cSLt9ahPbvmIaitLW3d7M8+Q6MSg9ntqLPEUn8mfclUy3ZOTEjqCcbbzNbg1euRaTJUwJyNo7MjfSWLw4/9qtoyIetwUrIurG+XC8GAZ+3zQKAXUoAroa3TJEYagRTbs+442LPZE4Q6fVt6UWdFkg0nLz/zg/PS0Wzx8+JTiW70F/zh77MNFMRjEv6uu+IfjV7I9jOkzMvC/Ysvub69I+dKGq3jHFJCUYZxJMbE49ODwUwhbIPrNA4MQ0JXmHbmiyVuZChx4Ltv/0IaBtNJPD2euLu9YZomlmj8/9oKqSYKZhexzCem4xEtjSWvjHEgTYnqi8R2uxvuXn7CX0+RX/27vyHP31H1bFkIC1gBSMNAa4X5dGJ3PDB4C9svkPZW8lnL1kSQDlPpPwqn3sJt8EGHBPCDjR0qCcGqxWAe+K0URK2rkFaJpdLGwR4WbdQQ/bAVO+g1IZh9dOtCMd1eCVC3yOZSthpAafS5ptTgoa/JJpp5/u2XL0seLVjgEw2b3CZhvqYbjOMp9mLn0d9RhG4lLdFmN7ivjjZvi11I1ju2/vOexPqbMhivJ++O8ftnF4C2/S57+83vSbBKqXd30vFsv17+7U0EW7bij7EfCEuN9XKfO8wkdePbb1DZBtVc2vyLU2jY5lVbPeqdQ6Vswd9+1jD2ignsuptu+0ffc6miK8/JF9XtSS5wpNmMIJck2CmMm0gL0+fg3XCtpi/JDfIaKFWZ10zN5ss0N2hF0AqjZD79JPFX/95nfPp6h8T37MMOYQfB9BRSbG6UvApofo98nZfd1zAYZh5sx3n1wBYwmKziFHV/39Whib66Vjfo0BJekOg2MYkUEkvOzMsTOQfyWqn9UfbrWoFhhPtXtwwpuwVKcghyIErvwNQLIzuNVbvQko16b/tZhOCEEtyhwdKYnfXLMiybX0hwWG1jtsmWpHqhERFnH/oZ6HTm7lLpXUGIBm11hTUY9Vwun6CzyV1WYC7LVS8nIjYjpNSmJJ/DbANvvdQReVU++fRzKmL74NeV0/lsu23EVj3EGJnSaDhhjGbNHwO7EJhPZwT47M0nl73YEmjryt3NDSEF0uPTCRkikiIZUz5u4q5oTIa1ZgqNXWvG7R8nu1E1UYOVKCaMmvjiy18yHQ78m//m/2VWECxWb3vAsLuZmI7CvMzoNDAF35K2BSKvsrFq1/zeI0G79N7EYdbiPbNZcLhgi7eqz/7ODrwKRI3b4p6RyFpnilbSaj9WA0AGlOoc8ibFFw5l0EB3fjWKnnc11Qbp/b3gLaNWu/kesmwJEX3uYoyP6NVHn4VZ8WBsKfGtWl3P0NWqQen9llUYoVtgg6p/GPLl+vTKB9tLjC9z7wycTjVuzkoLTgaw+8H29dymWiSgLfn7vkBn9KFnV9zy7Evxh713JM9sKvy61D4f0maiqO5hw8U7By8M/Lsvn5NGUKuiO4IiasIp9blMUx9Ob5mxJ6K+d1kNg99e6/kgW+36NoMftYnb0qs5C+BKCellqNMjtX94TyDNNwd2RzIZqNUKr7IWznllLbCWSKuR2ho5K7kFIo2PXkZ+8bPP+Oyzif1uJsYnhrgjxUBrq/lUScfJn0FGaq7G4ronFGSzULcCwwga4o1vwK2T/BEeSBRWQIMtxgktIq261YRQi71Y9p0O63qmZvV1pIapbx0E0Ap89mZgN7pYLbiBpoCp9eUHzxBAle4o1RXQCnVF6TPPXnBuNDSCOiTrFVXVTCmZYZi4wJbG3JJ40TJY0LelZUFtwA4VSWHzYPLg5HGhFy4CIV58quTi2xWAXiH3RUreOKNOGAle6ILTucWhK08w61r46PWPTMslwBAZY+L6cCSGQM5u6Bej7eZZZkKM5FzIsTGmxHldmR8+sJsmpjRyPp3JxQb1N7sr0uvXr8ziwV0Aw9ANqyzbRmCJgcREt0KorSApEcfEOp9YTmeWZeVw2FMKfPrxj6Eu/Nu/+f96sFipcbhcGBFEE0GyB4hL9FDt1022RKE07+ZGtkssXTXpD2NrFqM2jLzfABPjbIWoBzbBB1KO3VMbpVU7lE7BNzaHPcIijUImub8SQ7dikN60eLXlME5bSTJsn0ykURFqs+pDgu0hcLsYMnb44hYB+3l1CrHqJSH4lexe9X1uIB3ieB6R9TKABX92ekW1QT3NFa++fKU70nagxKuwbWT/7PdvCF+H3foh9zTYA1WH/eDyPTz7rDz7o/a/bzaYDqE7qRpDpLkIT0hu6vZ8FuT/29+X9DZV+3u1pNUN0noMaf7+qvYKsHvmpo3Saq9pMKZJUJrHH+NeVbWBv82UvEPzZ8Yq024h7rOwjQqdzBuJxppXSlbWtXJaYS1uV56DE3oit9fw9dcf8ctf3HN9yCQphLRHBPMaohHDQCCy1OwB9jLs7Tu9L1YklgF8+meyjW4dLeIzi4BW8YG23e9B7CRpsH7UzFYrqDkDlEWgFNa6kNdGLgrNljH5Km47r03YjcqrV6/N8sWve+yzOwJseiq/r0GJ3tVcQnn1LsEttVswF4YexNVhMDGYTIDQKoPT6EX7LA8LyD4La94NJ6kIiaIusk39GrXtfkq0mYIbzrhWpjqVuBv5Pfsc4u/PId2ofYM5lwKn+e9sRoSxDticX+/vXxDjyMPTA6LCOIwcb2+oTi9elsVM/1JinEaWOnI6PXE87BnH0dxha7G11GLR8Nd/+ANDFG52e3a7yfTtpTUbXNfCFIcLfqa2ACQNA2M0Lm3L9VkCCeRc+fD4gZurWw77HU/zmaEmPv/0r8inmX/41b9G2wcblInh0MGZM+O4B+kDJNku2IbPbV+DBZ1NJ2FXtxG2tkFCNAKJ4A9f8+/zxOOMqs3Rk2BVAUqsgUzZhrnqFSxaqYLTAu2BzrVZtc3IkKC1BCEYS0DVLK+xi62Ok4cYLXAVCxBdrCYWBz0IKkUXajCzNUJzCKbTf40IaXqMjkt2nNO565ZqjMLag9Oz4NnPI914MeiGxds3GERVO0TY4SAuXcwWbLuKGTZYxzoIx7KpPxAtaRO6AM2xLJx+cPnvZ/MCGxBDX/OI6rMKWB2yqx7ULgM+h4FBi8/VjK77gy/BxYTqSbhY5+FMmw4tGXXXAkrH8Wv/PKL28Hr1aXHIgor9uVssKEr8gf1Gp7+22udqAa0wr5l1bcwVaoF1gVIxjUJTDvvAz378ip//8pr72yOqD7YXxc9nw5CPqJcZzxAmEk4Yac7l993UwWds/YBUxTDpaO/LjOjsrIVoyV49YDaHeMSv97YrIgpRQVJgWQpLWVjmEzmvNB+0tpqpz57xlpWXb64Y93tg9iVZOET3bF+0F2DSLXbEf4v25Ci+88ZWEFt3WonitufIdvzEE6CE2GmSFqR7PHE4KnR/Lp+buefEBjFte67p999+l/rr2HzQ4pTJj2y+Jhr6qMfp13ZW7HSly6/zJKRqA/alqFmmqLKuyo++/AUSIn/685/Y7Y68uL9jP0yMY+S8LqxrZr9TdoeJNWfyupqz622Emnj3/p0llqtrUrS48PThPe/fvuWvfvlXNucobssQUmIKgVwyKSZzDpRgE/AxEQd3N7R0w5QG1po5Hvfsdp+SMAOp28M1j+cTj+vKi9df0qTx61/9DaW+M9OtTjMj2YPkOH/wgGbRz6rS5sOhENg6jj5EsoqXH1AwbQ+yNcsds+98eQvyphFo/lqBQGll25KGNBPCVbcs9p/sLBhjqxTD2ktGJBGTiU6qBx2JfV4w+GEz9kDYXB0tYdVWHDILF58Y9xaqqDGJYtsCu/hB6nXGNtj306e4SM9NzYJe7CfMw8b75HbB70Ov8gElYZb/Nv+xwbPae6peTYkJ3GzPgw/9elWkFyime91v9tzbNwW6GVwIl+7Giv3LsNY+Vk/0/rCq/XxV9bNjxneikUq5fD94clDQ7jTUB9qBrmZ9PoewV7dVqfZvP2QzGZvaLUsUupdOcKiwv18zq7P7222h2e45/p6aU2eDm05G1rJa+5/NlK9lsYF9M9rC/ih8+cVrfvlXL3n5UhjDI7W+Q2QkSAIxSDGFRAidAWaFXOqBTHGmjZ1P4/y7zoV+3WxgqpqRmKxYEYUQXGPkYjF/BonBIUnbq6HNXIZLVOqqrMtCXU6sufjZ9sVC6GZDXhV2O3jx8pYQ7Lnpj7QVSb1D9WfRP4fNBMCcfNXFgWHTHNk79dmDqLehBg2JU0+bVtsO6We+US8Lt8DOqHApkHpUCCOB6q61W/96ed9iPKiiysWJuMtwoa+97acjONW9hf4wCNI1KP59MSitwOm0sJvse4omPvroc463t0wfHvnw/ntKXvnszaekceAPf/gD//APf8/9q4/4p3/914SUiDFyc7zisDsQUuTqeOS4P/gZbYQw8M/+yT/l93/4A/v9ntoqqZVCSsk9090qoYkv37CHeZwmW2JS1XDH0phFeHx4oNbKbrdDS6M0o9Z9//Z7slYiyv3LH4Emfv3r/4qcH0mxqzrdCFz6LbEuQbxKYess+gNm/4a3mDj3pFMmO2bvj6IPmNrGLgCrEI0LbTuZqxZKsS7KhGnd18Udpfy1SoMQfDGIVwlpGBBxvxqquaAKjBK9nSn+/pzC6e+huMhNm+9eCxaw+mGLYg9AAwe2bSAuYmpUdwqwJIpRTlVx7Yn/o4t+1OEvC4yBTvF93gXYxbHgXZqva/TdkVXxas0q+bB5C+k2XrrYVffrrxvWbHOL8qwjgI6Fbcvi/e73fcb9L8z10zsIu3vOerJ7v9lyB1ta88O0Wbjsh4aqhUCiqDnUihrcaW17FyeyPSgbmwQz4mte0W09qNr3XYwU7aHd3DTEXYc9uGnrOw8sEYHYbu9qHlzrWphX6yp1DRRtpEG5u4t8/fUnfP2ze26uK/uh2WKmlpCQiaEYpbVekQJIzDQiyTtgUDfWdKhSBdW82Zj0e1hrv+Z2ncVtYWJyby+cMeYXKQbbw95UKSoW9ELvHKFV+0xrObPWszkQF8gZ9NnzCJbHP/nsntubHXhQtDxkrEYrFMW7v/6m7RwEhRA90bVGGIT+KcxC50KysEZP/XMbPI3vuIi+2AyNbI7UsDGy+jnsBWzJZwvukhCHGk1T0EWS3arFz7QY2aU/N52BiNjWum3+omHrsLr+y1hml/eTQrLYVSvTcMvtyzeU1nj1+pVZtSyzBfYifP/ue5vltcbbd++4u79jmkbGlKhaGcPAt99+x9v0ns8+/dT8pGpj2u/57NNPTQAZI2ldV1JKBiMFIaRoqkmwChKDnDoTQUJkiomn85l3372loRw+mlhKtr3H0aikHVv/cCrcvviUz/PMv/m7/wrVhSEUNAaXryWzefBb2xfcm3i2U8n82QsdArBnuM/C+xN+2aDVLoyp58NOEVOxXpoPUhBTqoZI8m1SQ/de0Q4PKFs9HCKDt+FVOn6qWyKoQV2xrCCD/2u/8XWDioY0oN1cT43eJpI2OKeLDdXb3eY4sNn5uouuWBLrq+Cls0+0z2vsffs5pcsGedbqXp5W4/93fQO9s9OAundOp7tad1M2rN1sQkyQaHqU7urL9rtaz9YidE3FRahW+cdf/aFWqls+N7e2ELYu3K0JNmEVlYv+wyCunghVLgZqaNiWBfVqri+/6VYHHWtu2E7t0L/Hh4uidp6037/N+M6Egh1zD8GSlAVHe92mQlmFNRfW1azrizlhM0yNj+4jv/irr/n8Rzuur0B1pdWZwJ4Q9jTxPRPilXzIXlFbQmitE6XtAbFKFKN/u/MBQEyFWqziVoDkynitKBlI23a2IFal+1Nj97ZkaAsXowkjCrSysuaVVjJ5zWi12UorUNoFxqwK+wO8fnNHTNahb9V2jIbqeOdycUrFdSuXYqEnOGk9wfRq3ouYZ2ddtzNoxZ0ttLKu35AJmy8IW5O9dRMAtWYrNqsSh+A26mquER2zCLK52l7eu0FMVfu59sIxiHX9fYrRCzn/tD03NbVdEDoF1pw5L5W7+3t2w4G3Tyvz6cxhv+f29pZpHHn7/h23t/cEAte3N9ze3XGaF4v1pfLw9MSyW3j7/Xd89/33oI0f/egL1jVD7p20TYDTcyOq5mV79iAkQcwufFkY04AMkVaVVk0kdHd37zin3wSB3GzD0fLwSKmNFAM5jdx/9DUffzjxl2//DuoToVVKCpfpvRbvKl2h2gehDjlsvGPtraJV/UZOCUbvU/VEcnkYjOlgr5Fc0BWx4Ktu9TyMiShhc36UGP0GKZDMiLA1YmukcbILjbnFqh8uJHoF1g+NvbfqmG7vbgaBpslfp7dKharFDin2e8TXn/ZOAXpBE4wxhDqdr1g152WhBf8tPWyUP/VlQAJOv70IuwwCuvwZsK5BsAdRFZFiv6f1rif4KzSkmk+W3ae+50HNv14uHaFBR8Ht5S87dX/wJbDRmS0ibGI2Baclj57DTMOi0mchFqCrDx+rr1C1OjhtkaLz5Jt3TKh3rz5Ebo6yS4W+PtIq8+BwZZ8zePetiyd4deFU9WMbqa2a5XKJVMxMc8kmjKsZShNocH8V+fjjI1//7BVvXk0cDgGtMyDYbnjbCRAotueEvjbWlLnqqspE7EQsiyy9mPJEHqKYi7NHMAlsorkYBtM7qHkVdYX16Peipf6zK2IFMDIMUBd3VmjkcmYtZ8rayGVl9eF7WX3u3gOvHXs+/uqGYchQoos8LdkFBI3VthhGhaL+GayTDT6zM0TsojHpNaPBThZsrSOpmOGfFTubM3DXUIj0UssghGAQpCm9TaEfevfa1fTiHbG9IH3uYYnAnv+gQhafK6lLg6WCJnPB1guTrxdzfY79wzLOYfKQiUGY18DrV5+jLfD27Vvevv0Ln7z5hOv9EUR4/+4905gYXr9kHEdEAmNy/zYRlvXM/rDn/vUrlnXh6enJknpe0RTR4MujWiPtjgdUYcmZlJK5PIq5GpZqVtoh2OEoi5naPT0+0RSO04GFwuPjI4pymp9IaWAaJuTGLMhTjCzrwvn/T9afdklyLOt62GPuHhGZWVMPQDeAPZ5L8opcpChS+iLp//8CcZFci+K94h3PsIG9ge6uyowIdzd9eM2jaku9zj4AuqurMiN9MHvtHbzz9P4Hmjeef/0nbvuv5F4pi9xPY5mCF+RrH+1h+nsYYwR95OPwlX6ionjT4+Bxi0P3TXfx5rknoJvJdiDnI3Mh54mDmx8HluXMlDPeKlaCWodhNsf3iqQ512a1pENqNzFKBvpoUTWMxLo4lyAOXysWFW2OwewrNKRZSxxUsWjkEAqi3WpxpmGXEXXN+L2B+Q+ICPY3r2FcmDC0B93qqxkemuDEU3sDzahNbrHrzZqgrq4q/GBEDQ4B4dgbbJKURlfwegkcl+IRcBMXI9vxHob53DH0DqaTNq4GyrsbbwfWh91yXHAtOpsxXG6j+wrMSLOG9toFthB/9h7UXW1abxvepziqN8XFBmmo4bCJq99bo7XGbYfbTedQmeDTxzP/6s9/4E9/vOPhXaakG71vep55Yi7D7iP0BzmHGHGsB6ICB+tVA+Y+zG6GINLAw0nZoNgrEaKm12dkSQmNQ3F8jFTG592lNnbv3NpKCsepXia87fQmN1oVo41anb2re3AXrNXHv+/w+GR8+PgUOibTXu/D/AUFUeUuUWvSwZxMYVEAOZv0T3SRGQZ5zlTs6aR3hl/acXs2wVUa4elwF/MsKf8FPw55DuhI5njK5IY8KbVO1vuVXGY6Cm8aTYJ7CDJ9dLC6vIQWBNFlFGWWw/I+4kwZBdSAc9/8d5eH2ruP31Nr5en+nnf3d8zLHEaYzk8//aT9lDRb7vAqCsT58P4jl9OJ9Xbm97//I+8eH+k9qMSW+fL8la/fvnA+XSiZxG/fvvBv/+2/4bvvvuOHHz6pYqv1SDLqAbd8fX7WhdE7Xju/9S/0rbG3XUE9e2Xdd9Z9Z86F3p3bduXbyzVWWubp4UdSXvj1l8TL+gtzX7mcZ2QlHnMKiIO6knuJiluHyTAuEEYqNo4w+qbs7FDa5lcnNkb8ZI8PBsRXzgEppRjYpdgUyYuSv6KiTIQ4yMbQF95896MVTSmhtLOEpah2jtfAcaABUbH1OMB1eLdeZdsc4fDmryIlc6iWsDQu1CS4KoEfVto6YLVYB90y7DDi8D2sF44DZhzQPeYFYfQXGyQhPrqNriGGbB6eMq+RoVGltaETCdaRSv74mgEWgbuMHS3Yba+v1RkD3+5voCQXt72P1h0dBpjJ6p4hYQv2TRex4PWN9njvMUz3uDh6ZJ2F6K0NLHlcIGQFNMUKHeygGuIovZ4N4rm0BjS9Glk8VNpubLvLyK/A73848bvffeb3Pz3w/fcz5wXc16OLIxVdPN6D+RZFxvDIVlt6qI/NwIIurMt6VpaCvWHjhIZFoTlO6hvkidzSAckNSEAU0PH/YlaXE7mP0ByZflaPCFIXvXXfd1pTh0Q7U/cbtEqr/N0swh3yBD/99D3FGnmZRKJImhkpc6HKQ6j3A5oxNABO0S5Y0uzEx2Xi41CMgXEM8V7dVMfbfE2FGIy64bjspk7iuJRTCsqwclbkhpuYswoDGwzvsNixMDq06GZr6gH/af4x0inxV2uiodVxxmf9plgaRZXrQstWuPXG48NH3r//nr/89VfAeHp6pHVn71UX12CC4vKqayGDzcaSC9++fOXLtvH+/Qeu1xvPL1/JU2GvskBaloV1U55L2dvOvq5cLnfc3z/gTdDKHgO859uVvjfuTid6LIYpZVrufP3yFW+VeZlpnrCSqdtG6p2v12emnNhCWqmcZWOtTs4XHj78jvYL7Nef6UuhxbA3xy2eUCaC+MEhCBkHamx68xZcb46DtIcbqMWq7McZpcU8BC4HZzzgqGEnPLJms4kmm1zYv2EH86O7H2KcAY81BpVV9snVOXDteHUHg1eL0l4rGybZARzwS6RqJXDs6GYHW6l1Iyf5+zcXE8oQzHH8tOShb0A26b28qbg0wByvw4NtgQ1KLfHe1AuoBX59/RrepujoeuDy8dkMsk/AM3pH6ThQD2uVMbcgdDBxCKcQHZmp+nYb1NHBSuoHRsuwVHBotsUMQviK2YwgTCUmdu/0OmAXCzZlpkcXYT70Fy6aYrT83uWGaVZUkfqwth7VYpQSrktAy90Zc4neYC7O54+Zn378kR9//8Tn7x+ZJ0Gs9D1otfPxXGJBiipuUXx0eS9FWUpKuuBIoQpOCXd5rok+LKqwrv1EQyrbRCdl7ZNc5DJbew3WWHSJeQoXZWLIHbqigYUgOry5s/nGuq1s68667uzVWGOftzaxbjdqDdjHoAaV8/OPd0yzc739jcvdZ0jDyiZz2Kx4UJJHNY3y3zswlUJJS8yCNDMi67I37IASj7lC/LPHvDJ5RLZaYuSPaI6h8+GQ1/grxbpaeI2ZfNtyoACakR8DUgYZR9npDbMpKLF+rPkWDLiE5oPKuUFzlVFMWXxP72Kxof2zNecPf/6Dzpna+NvffsYyPD488vyystXKaT6Rwu+qVhOU1GR0+Xh3z3xaxJ5LiZSU9+21c7mcVBRZ57uP39PdKX2XCdjHjx/JZkoocljrhoz8bodQ9+V2o1iG4txuN16+fGNaMvM841XCjVZ3cpHR17rpIKoRWqI2WArMki98ePodf6vO1y+/8PCghCwMHImRRhaLBT/679LbGDdtZ4TwiEHWoDXhkce8RRUwwSTS0LHF3xnsKNUWyQdtTpRZwbHTEfhDCHXi9A0MviJlsyrrPoDRENVkUnCN9OGbtVB+zxyMo6hyPWUxpnpVBVdytKeqmr3FeKwbPWWsNbzYsRmGD5b3Rkeq0eM51WjraxO99oCmVFF57/TkjByQEbg1Npku5Kj44+c0T1K62jho1S2lYT0RXk3dCr3X4wIZVdJ41bIuiWqYTu/7AYWBHzMaHR6jG4shecBAIyZZ85AddRzy25E4MCi7Hr78wVahJzHUxjwUHfApgXQU+uytBfOsc9Bho0kV/N91KKXiPNwb33/4jo/f3fP5+wsfP9xTSqb7htTw8drjAtzjRk82cgs6lqYoXlpw48E9kXLnsE736VjjKalKHfCOEJwOlGBzVRI6sKYsSM3jMldg2shdUMFzDFnjsNVBr8/HXNWptx18p3aFK/VueG1sN7htjb0lvLXDRqL3xrzApx/ek/PKaXnHELe+ms/oB+6tHbqUNAhJOKXk8CAbBz6IXpuPzjbF/h8XxmDfmRs1ReyvZUq2A3pyU/dgZkeHP2i0675GwFQc4K1BuDsfiZgGA44ddt/q+ARnkaLQej3GAAubdOXcY/LHy1FgHZdcH4WHyDIf33+Pp8L95Z62r7w8v/B4/8hyPsN1xUzBQefpRG/ypkrN2faVbV359be/8XD/yMvLC1MucD5TcuaXX/5GKYmnp3fUKpp+aTGE8ercthuVRkYq49ort+erHtJNWRGeOnuvbOuN+XIiAdu2cV1v3LadKRnZFaazb9uBgZvDbd8wZOeLQ5pPPH74kedfM7eXv5EuHfIWUnpXpWed7jvFRWlL6PCJOv+4IICgSIbIjv6K8WFgYTQYGyq/uWR6fJVw8lcnHg+Ts2RjViC4x+PTO7IfxqEXFfmYlzST4liMiYgjCc+mFFbeHZmI6cCcKGbsISQjqt2/w4ZHBxMMDIZ6nFcoZ8RKpuahARseMv1Qy4ZCIyY1NgK+sOaMOE1pJtJxEL+CvqMzULVWXcwNvU4NReXKUPVZGWGXEAeAg0UYTVzNUqLS45LrOpwDMzagtpjvmI4x7yJB9xah9z0d70UD+eHJVAg9OwTv/qCn4micqv9O/qbifHMBuEtt7G+6CMGDcXgavHuAn354x+Wh8N277/nup09cTp1cwCxR951+0zMoc8GRDX+PA3cyp0ZXnEzDakGjNT7zIHkYwqQhbFiaum4GC1BVIT1EqgGdGYmcZprXIzBHC1+HrnsJOxatg2E+N+BV3Z8N76Ke9964bcqJkKW79kfzztYa+96oW1ez1PWsWjzT3/3pHU+PZywHgcM69MxIhFSuiUNqKkqR0aNo6ENVbcd617p7te4WbDnckOMQV2U1ViBmWUzMCFqK4zqKweick/aJ4XiNcyKMUA3TzCTmt+N19L7D6PhwkhWhCrHuX4kfdlgVtbiBu0tzYqa0v3TAtVkeVcC+VlK6cPfwHvdEWSa++/QDlo3WnSkV8jnTWmW93cgnnTy1qQu6O58hZ+7PF3KW/QpZ+eUpJdbbC//hL//Cf//f/Z8PoWT59vIifm9v7HWnm7ZNb43ujWmWAntvFaqzbjeyZWrv9K2y9oZlDXXmoipla1UOq7zy/3vvfP3tCx3n/cO9GEJupLzw7uMf2Nf3fH35F3J/Zp4Dqw6eyrau+JSYy4nexbRIcLBeDgjBB/74yo6xOAtC5BqHzJvFOBaR9+gq9Hc9huWiPrZxNgrKoOFpeL4Pv5uJPMQ63sIYbBJdkkjVA5KH6ZhrhNR9xZlIOVOQb00mK37RoRJK5NFVJdPhf1T5FplMGnYlDxOGblhWGpbFieDsssnIhHHdoDx2fRbDttkRTNMTiueyI8Rey1/4vSItg2XTdwhOtj5vi3lJlzK57QGXDKZaQ1oIsTzcd4a6QytXLrdiZ+lzrTgpDsBBVW10evWDodN6FYMt2FaiGedXNkoPGvKxGBrVBZMJNnp9tG0gmV5lw9GFpV9O8O7pex7f3XF//475BJ8/3fPhLlFKD9w6jP663lVJE/2UMTbmJCPJ3bMYWEYATnqWw1A0GVQfCuq4nEih9wgagQsXT7GORYXVTK277K97CDdJRuZE79eYhxGJa9KDYGL96xKMgyyeRUfq825ObztrXWMQPdPbFesL3hJtq/QVtqrLorpsO6rcOnj3vvD580dyjss5Dm0SJJ910BIdvI2s7RZ6H331PE2CrqIY0E7NkLJgZl7xfnPBoRqEC6IW1XXSGRNwkh9IRdbFEdXCsHGJlzhOM8jzcQFzzLBGhGrA5PH1oh5znIejqxipiAfDiRBuZqNE1PIwOQTCVt1IqbCvnbV9BYPTcsIxvn37yu228eHdO3LKPD4+Mqx12q755TIrhbPc3fPzX3/hbjmTe2cqYuf98P1nllToBMzeu1xg3ZKoerWyj57bna1tcThm+rbpmLlt/PLtG9++fKHhvP/wgZnpEGINnxtLJsvk1nSQ9MA7d4nxSgnGSJnBE8tlIk8Lv/ztP0F95jwHZc0b82k+fEz+zn3VEhxBIWPgOSq90dOl44AfoTNqB5uYCLyZqBHf483fHcK15MrY1WzAXnUALhtuSwPeiIS8Lv/6lPyAMMYy8YDUMHnYTAP/POYrKYzTJlod1D1dEJhjk2y/1YWMw7sf/GsPF1H3qHR9kAHFEsnuYIWeJ6j7MaytMajNNoa2jVT14lrASDUU91bDsKwN+Oc1+0Jv5rWTa0c++fh89L17b/r83nRDakBGJRbPehgHdqe9yWMYlOhOE5Tmk6rhY/CuL229H/qbo1voweJq0hDkFDBLViFoCdIEpwmW0z2X5YlUCvd393z88MDjw5llViVfzlUtfWu0pmeckkRmRuD6KZO6kfoMofKdejmKk1q3WN+va8XMGD69uuECVj0gQel83I9lLsbeYJalTKNJgJXkJwZyF039FToxg5Iz1Rvdx5AUzKu+XxQq1Z3adnrb6FV7pdXGvlWum3Pds0KR2k6rTt3bcbniUGb4w59+R5kDck1ZhQKESr4fIrIcCWmW5NQ6XFRLnkW9jY596IcCSwZULJm/brqDPRVV+QAfxh72qPT/zgXguCiVBT1YYm5OEioUX29xbjRZfFiLbkHfe3hyha776MxydBYN6bRSvBBzdfOEVuLV6mZsK+Pz737PtJzpaY46xzGF7pGKyB+3b9+YzyeWeaabs24ryRLLMi41OM+LxIhw+MClkpnOC23fIHKvi+N8eX5mqxslvWLDv/32N67boJplylRIZry8vPD15RupJC65MGdxq+u+YSkzl0AWzfGswI8MMZiWWG9KYqTs2watUJaF1hM2Xfjw3R/Zb3+h9i+YqdpMAT2JAmfHzd2D5aIK8XV3je5BqNXrhw6qp9Pxe8Ks3waUB5aERqCvVtFDpCTtTtQUI4NgZB/7YJNo8aslFqYMxx7X9zcgG6dyVrB8i2AhfaXgk9zllxUMEg9a7EjdS0H7s+q06GKIKj2+jaoWlzvsAA7UcYBVo7tmIMRdFvaD8b5SPIVdz9MTqWe818DkDe8jI0Hc70HVGwwS+ewMm5TR5Y1f4WNkiyo6Xk/2t7jtwI+7t4A1BninFh23qNBG8p7mPEaPChGKQS6iRU8LpDJxf35kLmemaWJeFh4uJ07nE95X0jmTceZFVWvOUwxrFSaUbZNPUFb/OVlmD/aVMnq0VvM0SXjZghqdnMFCs5xYAvRLaaFWBSulsK3ukVuRXLDkwNVTdJz9uJy1t+Tf1eJyMmHg8bwL0NOwvHBaZGjTpFbGlEhmHtOzN/kaTTmz7Ps1LDYQ06cp36D3zG27cV0727axrZXbGzvw3vS/P/zpkffvZ3LR55IGFbrrnKguSx1L6myyuQqYgF7MiqI5XRfaUEdbSurYfAQrRdcYl4aCw4hnBfguiCg2nMSCY43m6HKJIsdprtx3E3aI5Uk6n0Mr0VVdgBhl8YwPqC8KTQlKRwiSirJBLhjEDMl5LDoLwXfqZhNt32kt8XT3kZwLde9K4jNj23X5318mSjHOdxe8NWrvTDnx+PSEu7PtO8sycds2Lnd3MesYGKqML5f5xLfrC2XbmeeZsq4rv337G3/9yy98+vSJaZlZX6789W9/02R7Klwu99yuV+Z5YSoTD/ePEMHadGf3nbXu1Ns37h/vyFl0sLa+4CmR5oUZ43w5s6RMngt13yMQQwdYyrqxU5q43H+i1gv7/lfq/ky2FVkajHs4YAELsVy3v+skJLsPq2uhJa8flo4msLchL2LQHK0k4oI7Hi2qPnwfALu9aWithS5Ah6D1hPuGXHc83FvHqw61cYrZQBs8/fGqYliF0fMIXeqH0dooewaX/7jcTBefM2Yy/bWDOBqvFkQJ8cFb+BoNH3tc3PghMtL0J0LRTdBPig6McfSYIhRH2lyiBUlB78mCRZXReng91HbGdYhNYWBndCvxWQz20ui2+ut7ssJhXuCqPHPWtkpFrJdpSkz5wjJPnM8X8rRwupw5LQvzbOqU2LmcZ3IpCtUKG5hSXLBYDtFfQGHFdtkwHJ2k+OQlZR2iaSIVQQaK6TVI6kQL4WTrhBW0Tg1LRrJBdR1alxwmc3rGcvbV5SzmXSHhJA/K9qAQWBIZI0X5SsK8UiwHJt8pMUzto0ghitRjL3XcGiPvAlOnsDVBzG3vBPCuOYRzUKJ77/TNue4uB1K5z1BdncSHj4kffvyApRuWE6kvJGs0M4hZT0qFYm/gZEMEki6RbEmdxCz4rAuOJrpeZ6AeCcZB70CqYS4Zg2vAbA7xJyHqHGs+lmUUnDlLMLm1FZLmGjmNLv0VzRAdekzEtG93I1oVXQx9APzeYcxeAt/0gGCThz+YpdBUIbjO1RnVXkk+M5U7fvvrV369vvD5+w9M0wWjc3d3Zr/deH5+xrszTRPnktnrEA4IiVm7prZVyk9yOGxI5GpBo21svXGaZ0qrlZwLHz5/R+2d29evakVOF5o7JVR6OHjrB2eaiCncu8J7TtOsTR6tn1NpyVimwOot8XB/r5VZd2kayhTYsPQNz88v3Lb1GICWpI1d8o26fQWux6GullOwxKAcwuCMpzjs41COm92DlfPG6Z2hpNSFF9Wzj0N/DP2MkYMsx8ZxgHn4ycRGi2vMKJol9DCVS8SsQZRDb2OBRVd0CG50QSRL4pWnNig2sWmDZBBlv0eoEGYRWKLWdDwP9x6VmBhGsgqIA8rFJKruB247VGopcG9Vsc7A+t508LSo0HE7IDnDY+FHpkIdn4HjvZGz6aBzC+sSdRkKuJmwVqkesKVHhkBvGvYj2mBOcDffczrdkXLDUud8ucdm593DIzkZS84sJZOmzDTJGmU+RTJhbEb3otTE+DwtaCfJRjvn9CxIw9tJGyyhw4KKuWFZ+e8Jk/jQtH5SVMOe1TXQ/FXl3+Twq+cVpAVPVGRZnyKTgYA7LFlc4qEjH064puGyxTrIqcSf62A9XE5t+P8Q8EcnRxfIG1adFrwfRAXlLDda0Etb20NtPbK3NfvY286+V3pL7N6gvYrmAoGjzPCv/uHPnC9JB70VXI6NFM+i59IoZToOaIarKx2yk11UUnzQULUlXhmMY3CfjgOcQ2Py6jbgr1+OTjCjxd8ZB/nYg3tTp7t7hS5tiMecZxR1PQ5c769Q81hThyTJ39qGj3Mn/tDUBVnUnx509uRIaNd0cvW20VpjOt9xOt2x7sb1+sL1+cT9hztKyry8POPu/M//y//E3f0j/8U//JcY8PztG+TM+XLRuqyDIj2SOO2A5L7dbnKOPV9YppnaVVwI6/PMdbspM7VkTucL15crt/VGyVJip1axHp6aZvSkVlDry5nnCLhv4iDfne8Y2a5OV1fhCYpcZPfWDhO9jlNRMtLeG8USu2XOlyfuHk68fPtnntd/YqGSqfq5Il+q9je1TNaDEXAshjeCNnr8DYsLYUAqJbDNqLbxWFLp/+fvc2CdesUhLgub4c7AD2Vb0q2Ea7lojZ5fOZYHNyNa4kxQKtEQS5+d4BzRX6OK84Yf7qaAl6NVVpW40/t8HOgWNMfuEUnag6kVdMfh4Ck7c/1cT10lIKGBGP3yuA3Hf3mToLHFu4nXpz9Pol2GwXmj0msSPGTauAO4bb0ilb2YUefLA3eXRy6XO7xWis0SVCbnfJ5ZlsJyKpznGXKjlIylRk4TS5nAb9EVlOAvNKyMrA5dyhaXui7pznC4TWHtUdNwJJImQGltFeJSSKHitSh5Mx7sO+JzDL1PXAaDHl2SSZwYgrHhBGGGiqlmWOuqVlG+89gfObqYYR6nkJ/Ovt24Oz9EvKZejxqZgbEL+lBWhkoZdSuJXgIm6U5tldcLw+hVh2atY0gcQ2Wg+0yvnVo39mrszWm3nXVXF9FDM2IG/8Wf7rncd1LtpKUIKgzjSEfve/gnWZbjgIwxO9lrwEK6nI+4YPOjWPMRUWzj2bzhbqNhk3eZH+riifAyNJMiBv9as64LyhK5d/beD6KE4+RgfHVHhJkWlFf8KGZAc7DUMnFCHtD1gK9fKy79vBZq897DvQGx2Wo2tSVtp7fG+w9/5PLwHaU2/nw+sddGj4Lt+fnKh/fvuZzv+V//l/+VKWf+m3/9X2tPTTNLTmJSIfbYIB6DhRWRR5fUaHuF5cwyFcq6r/Rd6XPNVXVsVYvUcmKxSQcdHsrDuFVsfECj5XRqbYe97gAjk+VQ0boGaC66G+ZMcRBfby8kjI8fPlAfGt++Rh5ryUxTpqWMzR/Jm9Nsxfsz2W6YbSTCosMBK68eLlFh1K4KLJnu/lcDLTs20zGmdqmYNYA3VfJwuDwmjub+77oMece0qCDCL8gmUfA6UZkRcNhrRIoUpKPKG0qKMYGMA0gh4HhXJsFR97menRuiO3oY1HkS0ygyO2RhHWykPjqp40cwKsMhMnCEk+K8igb1hhn3AD6M1XKUP40K6jqsh5V4hz7H0ztR94l1vakazTBNmcvFuJweyHGC56yQmIf7hZQaywR353fklLhVAdzn08RcMmVKTLORpgkzxYamVME601SYyiSLjOzAFDDRwNn1cxTiguzhsUO8qCKiH/TbXCSqNCuk7OCJHkaOltMxBB+CqTE3SwFLjdnZq7+QiAmpm+BMG9dvQA1mgiRMQ+rcGy2p2s3I3t6i4yrdmM932ofdwTJ5aGMY9hRR+R8UUMOtYEHBpodNRRyEdd/BMq1vbK3R2ip4l1eLEK83rrcXbldn2xNtN/bW2W5O1QiLkVn9/Y+fmLNjWTO6lGK+ZyCWwGui4jB/TPHvzbKAWxsMry4Yr0WuDQMZC88kGys7ipxhnJkTuOi6NbLOZX+xSe2dA4pK0XMkOHJE9CMZyuoxixhkjXHYq+g7rhuOHeXRPCZigP72jIqWo8fBnexg6tmYvRnBaDvx/Xc/qqDImdqcL7/9wvmsDJqP79/jDn/+wx95/vJVoujemKdFc54W84+kHuo4x47n27lcLtyd71jXVeFEdxdKXTVP0CGoYG0N9YMKZom+7eSc8CQu7sCW4xkcdrYe0E/AcHF59MBnxGMvlkL70OmWyGac5jlUopnzaWJaZkZA+O7Ovq+0dObu3R+xvtG2r9T1V7p/pdiL2D8J3IeOIuYJ9ANG0EYcmKQGZm46G8c0QtzkwI69knsOe47YGG8cVV6ZOiHoYbSS8r5JxykfLJJgYuh1RKuKqlipr6Xe1qGiCyjXIZwLKCleB/6qXMYTjRTsE4s7vB2bZwzftC5HVxewg6lr8dArDIvxpL43PrNhlTLs08Pm2HVoGzp8TOcTy+mMMVPmmWwz1+s3MoXujUcW5tMTy7IwLwuX04W672xrY1t3cilMBaaky2CanNNJOoEyJRSrq9SHnBFnvzqpaD4wMPrXLA/ikiTgm1cmh7uRcyGc+GINjAIgkUsMRQM/7sMcDoXUpFbxoyC1UdzGZapLowVMm2Po/mo7k6KZjY6xe8AvcUlbwKQxtHRLb6jHShNURGaOz0gak6PijrU+WGRmOYRlb8sDfe4jeGiY3m11EzEhqLted/W5oag1T7Ra2RrszVhrYl2N7bZx22Gv+h8d7i7w5z99YpkFZeQUAxAbe7AdORCxq4DQKplFUmOWk26rDIYeXbOBFmvbjvc8CrExNA4kISBkC1ppa1VdcFtDlSzvLyySGpPRI0Gyth4miJM0EqaO1oKG1i0g1oMZMi7lkbXBUUzr5cclcTTlpvXT7fBdC6AjEunkjuuemMsdeb6wrht5mrndrjzfVva9jhwx3OD8cMf/8H/7v9J6Z123A57beo0LUDfe6JQ9oHOAl5cXci6knDjZzO22Umq0o+5ObaqGW1NjVmvjdruy98ayFO4u9/TaXqsdH+rpcnQUBLbVe1M851T0Yecso7c4MI0c+gZlMzTPeBWG3tlpDdbaKLlgDktSAoX3mXn+wHJ65PbyC9vtF+Z8o6eNbBL7eRtPOT64sK8YD3FY8Ro6RMYmUyuYtVFTonslDWaQx4JwsUj60GjEZmxYXBxx6BP02fgonAFt/f0Fiw2DOrQBpZbCaXSX8+rAOMdB37FDKOTjwCYftEggZih24NadjnvR4eVO94F1G8cO0wSUmpJYFwMopWExAExvTAdLgoe7zLKceP/4I+WyMOeF2ja2KmZWsQ/kbEzTBN5oe6PHxVt32cuv+29cr7/x7t1nTndnzlPicpkoE6Tsyls3aLWq2q8rfc+kqUCLDsSI9jkuxuzYgIwM6TVSoZiYRsViHaaCEZDGAKwT5KGyB7C3sGilMGmAHkwlPSHZ6udxAXQ/ig+IosKGgXsw1UYHHB2ulOpGTyG8dFOhFoeh/m/ARyUuABU2KQqPg7vP6GATYuu8WqhX90PIOAqHve7qILpMPcV+itXqw98K9q3Re6XtTt3k9bNtcNuddu20ncMh9h/+8InLZSEXe4WlR3GT4tIJ6wnjdYCsGcKrIE0wQFKxx8EdgTyKUMDD+Te2/euYRVohFbINeo2heEb+Yzk83KL4zWMg73EJgfAqCyKG4yk40v01FsBiL2YfBeUIoDpOGv1q/Zij6HwIEaCpWJEzhEUxU9V5d1Hl3336kefnjZf1Nz5++Mi63jjNhXXd2HMVQ2yvPD4+MF8Kf/3r3/DeWU7nyAPqTMuE9cZtr5znmdZEjy1lwl3ixdHJTNOM204Z2oE9PJtaG7hlSF1SLJA9UriycL1RpeWICZSDI2z7ymQTlrXwZvewnvXjAx4CJ70gaSm8tuMDEPxpnCe9udY6KU/cnp+5tp37uxOnvLDcfaYsF+rtN7x+o6cryVZIHbdNwrU0uEVHL6r/CmhE2dODSpuDJSToSXkRcUHQSJ6PTaN1pWHXkXrm4EQbf1QK6pB0Ecvea7wKXazRRtMk349NMl5yD5zZj99VWdBj0CyyXAqlpwEFKXClm+iaxMWlNgbbVSIsF1NKFhBhJ+JGagQLCj2PPNLZYJkL58t33J0v3J2M909nznMhzYlGhr3jwyqCsTmjo8KoCbZNMFvJji2Jd49n3r87syxnLsvEnAxnJ7ljTMdFLOM36U88uQSKOVES6grigh40Ya031f+jWLKUKHk6KicdVmL/qGpVda7LwcBl2CiHdkEVI273tQN9hSDHMYfpkCm82oi4yztHNg5xL+HkZNSRR2FgvYvSPD7vbNCChx8GipZVreajI3nVEoyD6S0D53Xdd/lR6SVSWxcbaa9s7ab33jwEeGHZHR3MtqnxqjtsW2fbKvuaWW/Kj9id8CyDHz6feffhkSl5MIi0gDJaZ6/hV6OyPp4cSonrIhjQ9HoDqsWCDIAswD3qQdU6duS8W54ORKFEp6FnWeKcuh3arVSmgGvDC83FcGvt1d9shLKp04sXmsYP59iZ/bDcL9F1j52gwjEOGn0fR3O4gBYPpOOo8HVW0sWOm6Y77u8/UJZC8875fObh8oDHnGloiJo3zAtfvvzKdV35/Okzl9MD1+sXxcvmQkmJL1++cj6dj/WhDqWwhEC4emffd0o7Lgd55UgEpIN1nibuzhf2tgMxczDXYh0Qhg+83ckpk9MkFkfKnJYFJ+Tn3Uk5awgU1XGKG3NJhX2CtmmC76ZBoadgfJTMvu+8XFfKpJ/RcIXvcGK5nLB2z+3lV3q/4XaNSyl4yEOxHPOCFp8vqYSx2RhnBxsnBHKJEbAzPmnD0mAxvLkcomPRrCAzDAYP+umBWx6f/LGqhl+MDwm+a2wuJs6kuNSUj042hbOLsg6FsY6CS7YKGiZL06EDQb+f4sUo3/iouD3EPv2V1dRMOoUcGyGXifuHe949vufh4YHTvHCaO96eOU1wWgiqZ4vZdRdmPBVKliWAVzG1bNI6cSCXkw4KX7T2sowLJ5MRXM7OW+ddwQCd+XzHlEUqSGV8QFVYPBadlWCidMyKnGGQqANWHZ2sEYSVpZTCO8njx5kGqq2J2gyUNNTUAyaJCtY9zOdi8wc41WJG8cqYIzrRqnXurzOxsZbk7uLhKuxxj+l7d9MhlcZliS4BVe9xIcUF1poOTLfRtYQK2KGyyYm4KwZgb/pvj4AlGc8Vuct6w72A72zN6A32NbGucKsbrfXImdGjf/gAnz5/pOSNUk7yMYislRSq6AGp6vfkrZWS+p3uYZOSMo6KROU6DMPIcfAGePfmGR4Qb8zeHAvyyDi8DXKmvsgfS5oUpxQNys1jKG2ie/veYES5+nEf4L0eP+u4NA6q43BGHhfC+A+L/R6dnMXF43ExGoLliA4/GazQ2CjLR5a79/z27SsXu7BM0mp8fX7hfLowl8Tl/l7rp8qW/qeffs/15YUyaT76eHdP2yvbfmOZF6ZJOROn00THucsXXtaVdVea5PPzN07LTOlxUFxfXsg5Mc8n9q50Bit2eH50F8vjsFCOittM1XrfGjmosa2uEX5iWGukUsKWWRtFjolvPulsTGnSAq77kc2A77pzHUpJPD49kIrwsuSNjUozV+1cHpnOC95Xtu03OaV2mZ6V6BYIbDJHpKaPgzognrfLz4Ix0IFkri7CasAO0U2R43DW7CMxMbygXlfU+EE9cNFR1UTF5zIKS52oCMIQL2S/fUhwB9RkE0Hkwm1icF9eX30JeKLQ2hDV6UAbswflOwSUNNTM0uYCMzV1Wp/Ibnx8fOJ0nznPzrIkzBpmN4yOFP49vGfkB51I9FCjWnJIYg15RMJaN0oRppxzxi2FMCl0HlZUhfV6YMDKcGhMedYGy8oSUPiQK540BrspjQH0a89mXdV6CU3OmE8d7r0mMgMWWhgLY+ex583prcnvKxd0jde4aELxflTroU3hGEBJUeuurqXr0N9uN07zBbPw6kLwHKbX07vTkzLZfazdJl+1ASklolNyzZh6rBES9B4214wGWsWgt05jk4Fk7dASdR8IQhR/ZM35WgjpXNky+97xltl3Y9+d2jL7tnOtlW036u48XODPv/sd53OinMobdfTY8prEZR9eXPrdjB9HQrJRhO2vl4XDYEQpMiHIEa61q0FxCtimMMSAan3GpQxTLqy3Z5E7bOTI2LE3e0CwAu9gZ6eEQWLvUoArgTgdotwee7kTs64xDyG2WB9dnUoH0ri8wpeqh5B2DA+IwB9LNF/Y2jfuP3zkw3e/43mr0TWqK57neK954uvXryzLQsmFf/nnf+H+4Z7L/YW6Na7XK08PD/y2buRknJaJeVrYopsqmJTy28bd46PM/RD5oLTArk/ziSNdbNyEcdCNi8HiLHWv0daLPZG6HmprwvbztKgSchc8ZX7I0G3cpm5yVcQPoVGxxF4m5uysu/IsPAuzLMlEMWuNvu/YlFmmk6wQHJobzTI2XUhmTDyw377R+8rar+Rc9XOTBpDJjdZXjHbQ0tyJKq4clZ+GTgbWwxo4M1hSw+8lW8jzD7NADnaDYKkR1DOGaW+6k3gurzYSTVCS66AZUY1HjKtr+cr86zDBOD4vXap+HGCjmpEBQFwwIU6T4FRVWu8LqSysNbHdZrab6qFlAdLGXDItgaeGpaZ8gZyHTOMwELQogFIAYWO3SDVrcpaNQ9mTNjdesaCWYlndaikMvUcpQ22sAyMNWCiqs7C747Wq07PBCopqrSo8opoWqy0w7oM1UyHWuNT143N3EoWcBhH14INBk2ZIKls/sHt91OrKsk4EugmuTC618eVyL6eC1onAgWO9EYentDL67171zAdRwuJStLggQK/7gDIOuFP/6E057Ldtjffbua1fwRex+nzwuoYSO5LQGBdEo/bCvkq1e/xva/Sts2+dkuH3f/yR5aEwn2bZ2durcE9nC2IaDWjszfo95hVwoBljX77dO4fwz4Oi7lGtx7zP4mc5pqFz71jOeJKg8/byjVpXcl4op7vYz6F/6CPxMQqKcZCLMRDwrRAEQc/Rt3ggAqSYAwpnyJHR0i295pmMWYyj72Ue8K6rUIh1XfcaHfAd//iff+X77595fHp/MLF++ctfOC0X7h4utN74T//4n/n47gNPT094NvZ9Z9oLuRROKbHXxmmeWNcVetCaa6NGxvdcCtPTk5w3eufu7oE5J8WXtt5Js/KVvXdKybTWaLWRpyLxV7TQ+hwnbeCmW82yUaaZuu/kksixySxJzVtrpddKKpnzfOaw1DA7WvFf//oLuRTePT6psxACcHz0ZkDb2NcbnqCUkyxE0qB/yg63e4dypu47O84y31Hbyq2uFBrZdtx2YMetQmRWKLzE3izckWOgV/CaVDWq0GAjjQM9ZhMpBaT2plt4HSa+Yo3aCEFDA14nD/rHqCAPLDb+TDx5vebxszm+c+Otz8shp0VnZu/j/QUN110XIhe2NvP1m/PtW+fn335lygvn0jmdEg9P08Hm0BC5YyZr5tFJjTJQRnBR1THIHDqMZd+cSFkiuSGG7MOmPUGiiklyUDfr0ZIL0lQ2MS060niurXZKES1XTpranCk2rtSk+wFh/t0vRzoMH1YiAy7wEVdByo51redhBW9jbUJQCj38r/Qh5xhuSnAliG1w6XtXnnYKOCgFpNFr/bsMdgtvqoQIJRaQkqjXrwcpKeG9BoQb5ULsM29NKZNbpe6rXmcH7yXWjN5763on3uWvJgxeGojaJ/a10Vqh1s5tX7mtO3tztgbbDb7/PHO660y2M5WJ2hplnhhEDelFBkusHXvj7a9ByRwme8deiULjwOltFJuibh6QnavwJL6H3l7iNYxo5vT4jpfffuZy945cigqELjshdWiFut9oTf5uDaA3JkqcCq8Q43j+hjoCFT8RntXfbEd/cwl6wIDk0C3ByGd3hzqILZE1cnn4ifXXwq+//o337z/SWqXkiXeP73GDeZl5/vaNn374UXTx9caP33+i9UqtYn72Lvjo/v6RSynsXbB+Krq8koVBqInBmnIm1cZeK6W5x2AoPjD7+4OsbvVog4eQrAeMKwvgnZSXMJ+Ddd+Y8xJ0vMS+Xdm2ldY7c58o0/zaVnXntq2cTice3j3qIWXDg2WVDltt8aqnaSKVjMdHZSZVq1S+Pfjqem0tZ6bTmeadl+tO6ifKaaK2DesrnRuqCRdhyQYyC2yYryisJtSPyCjtsAGJrkEKZx0ig4ky2Baj03jF1eJ49iQskrAQD0jLo6PTQgmIISql4+/HsBMb7Xf8HC84K05VReMyTfTegl4XGCc60Ef9rQCWhS/Xws9/7VzXgvvMdV259Y18n0nTmZQGnIY6t6LNKhFcp5lRLIelgIVWQH44aueRcZ7FLMqTGE4W7Jmg/hrRhkTbruU4WC/657bvTNNEpSFBui5lKYktoLQY7Ae0N2YELcSbpBo0y7ikvEWXqAG/GFFG7shN1Ry6XDLNTWSILhGiBTRwWLw0qeK3vpOd6J4yqcyC5tTHUvv+Wm2mV6aT7F/6URb0FoPf1A5zy1QHdqP52dvOdLDuEi4Yq+/ctg1vG60Z3gutNzpNRYO3sHswmlfleHStr96GxXzB3ajNuO6Jda283GTqt95g22G5wPv3T8wz5EVzuWl6a3Wjf+qwb7rIU0QbWX89VCzcB0ZvENoB3HnZnqlrZV4uTNPMaxoKoRsinu/rGSZNjFG7TBinCS7LmeXDT+ScxSByU2had4rN7L3FHGSwpVQQthiEp+jqx0UR7wyziAkda+gwFXy97EZH5EeBM+x09N8WBJMeM0K3xufPf+b/9F/9A80SrQvWrW2Hogv9+fkZsyR3DG9ctxvcjMuygMN6XXVZuHO7Xum9kuaJ83xiP14/7OvKdDpxwMa9cdt3yUuzGXtt7F5ZlrO6iSkgl7g86lZpdOblRN9j6p8S2VUVtqZDPPcUNh4ZijBAFphSxsInZ/x6WZ/ZrisYXC539CbbZxiCJLEialUFVXvDXGKpkd0gVxLHvOigDdqjpQbJ+fbyzLqt3N3d0VKme6FQqHUCu9dB2XeSVyztWKoYRf47g7veq95nDDs9zPucuFMDHx3cduCNStkZFuYWsIRecUQoju7kuJw1WFZv0d/YCFgcqlElxWGs76Ot0TCIjucQ6SE9w0CdbEAxUf/ue+I//Icv/PJ8ipyFFU8y2/v2Uvnb31744dOCm37Ky/rCwzSRs5LfUhxuNarYPgCX+P0UzI1uHMK8g43jFoK1RA5IyXsc2kmJf6MjGKFQGpJWHOUop1TEcvIehngJzxmvG8yhcUD0xnRYy0eV7ftR4Q/bFUF64/B5FTU5Ch3qpckbKREMIICJsOALmLTR26rFkQv5PKuCtlH9B16epeodJItOV2dc/fVy/7tfHhd/0QGlOite33i9QVH1xBbV8F6fNcvv4tsLQ0+Y76QuFp+MElNU+abwoOjeWtupW1Z+9fWZ221n35y2wrrB0xP88fd/4unpjOU9BGGD1fT6GR7vIvbC4XcUzr0cRUHwxRxKVvzu1laev/6McuUTre0s8/nNOo/5TEcuuP46VDZ36JXb9Zkpf9CBmsMzIQR55rIWqSVR6yryRRCbc1ooGVH6Uwy0fRgrOmPJDCgQz7rzDjubHkVPnAmmy731UQhZoBP2d89or3A6v+P9h98zzU+KBg733uqNfV8p80w3ojMNNlfJx3mSsvHt+SqYNmX+P//mf+fXX3/lT3/6PZ9/+glLhb1WQU3LCdxpe6N6ZZ5ncsoUA3oSZ6bXhk8S/Hh/hYO6Gc+3K8ukampgg8RANFuConZ4rTslSxHrOL1MzDYdf2+vO3Mq8ksKvcW+rfTTCbdK9sEld10azXl5vnI6n5jyxO57nDiqtOUwK4xy3XamaJf7gCtq5e7unmU50VvF8ol9v+Gej2FjsxPdd1rb8LoyJcD28LwXNbKnhvU9WtywOD5aSA+MNTMS84aHvQe7pkfokRMvPwbUo9c4AlfGKjHiMFeV5dGZSfyUB9R/wEwamkXwTrTjmhoWUWu9jpoLZwKUY/DbC/zLrztrK8yzIMLalVte985//sff+Ic/fuLpITNnJ9mJ3FTJCKdXl2WDOXQ0LcYetFK38MU1AnbSn6ehW6Ch+MqsxLymCn5oatxHxoGex23v0Cu9uWjSOVHbrgtkclJ12V73F/I86fPxRoQYS1mcpVVokRIoTZ22afMt3kOi106J19wdPKzTh3NuM4XhWFcWNNsmwWnv9JRIfWeeG6lktr5zzBxAnU5QXVtrB6QHyOzyuKTks6WISyOlSHrrwwMr1pk3tm1j3TcSxrZttLrFYH06qtpEZnclT1ac1Odw9fVDpSv+0Zl1h/UK61657p3rtnGrK/vu7A3u7uAf/vhH7h5m8iThVk75jXlmivmA1uprEJD2rDyOGhgULwxG6YDSPEnwl1rmfP6e3l7Yt2f2DWqHZQlfLYzsKTrGiHd1jq4sp4mH+w/xezW6tOi03eU91VemYDi11LHqbwgWexzeCYJNJnq9sY+ayyq4YHsL7dFrJPH49zgvvB8ec8nGY8oKvMPJTdENy+UnyukdmXxY+mPQ15VlnvEuMalYiIWXr1/ZauX+dIkLxXi6f2DvnX/56y/8u//4H3BvfNo+c32+cXd3x75vZINUskgVTefJZIVG10zCvTNPM+SJ7oQjaxUensU8KWbMi1qRktS+mZnw5XFmt8ZpOsVtKr1F6nIKHW6vEeiKJTifzky5UErRbdpgbRut7pRpprXObV3lwRN52b020qLLofrOtq7Kqs2JaZoB4+uXX9nayt35wuX+QXGdBEcbaS4GWOh4VDsTrVb2zejzgHBEwzTrzMnIycG3WBz70QU4TTGhYzH0Gu6qWlAex/hgMKRQUBKOnz4qwgHVvEGYRp96+NR4BKTEpTIM/xqR5jVeQ1TbxoRygEcXNF6jnCb7gL/SGH5mTsuJbXVBFdeVn3++8d139/S2UnKnTYXmlVYrc6kD+dCizwatU22PziL2hBXBZfm18tOZPaqteI6A7D7iovv/67gkytx3Y1s36toEQ6ZMT5263njZvnB/d0daFqztQbAIJT3Cny24+8dZxoAVYn36awfSkgf5LWl28QY+SICNgKU6NCcOXmh1p5tgLnUGAQW53ncyFV09dBcjh2AUaCBSBkP5H/5drXVRM5UiQq+Nfb+x7+thL37dN7wNBL2jUCd1PL02etsCiivUfoNYk94ztI1kJ27bjX03amu0ZtStUbfGtjXqDucFfv/nz5zvS+wTWaErNz6YaRZ08vGcjQgQgurp6BaNxOZ7ONjqIsliRGDJKEVWK3s9M5+DZtxl9TPmFGKBZSy8lnDkSRYMHm9NDhjDhSEOfXfZqxQv1IgHLTZTy4Z4V4NFpSXZu2iyyiaJhdk7mgPF+fK6qjR7GFv6tQ4gpUFT1koaOdfa547NZz5+/CO3286UXp+v187Pv/zMD59/ZPeON2fHmS1z//gO6zuDBbbXyrLMlJ759vUbpRSW0yNPD0+URZnoy7KE3brTc+bXL3+TOPjdO7ZtpzhoQJ0TacqBqXa2Ve3y6XzBEBzkMRuobafk6RiIFJPwrtJZor27Xa+UUignVZ5Hi5lkV9AQm2VeFqzqNVRvbOsGydjjcrAhoApZ/jSVKIg0/W+14sXJNfKcW2eaC30XG6RXpeRN84TnArsqQ6ZJHYfXoFganjP5dBGdMPZkc4e2s7qGtSWfsNJIaWIK/NxM7Bzxm+uBTdph6aFK+rWSED3UmnoB8xS4skqSPrBWT3R2humcll0Dz6QY/Hkcowc04WNoKbigs8azlw8t/VXJanaikkl5mAQO0Znsg6c0kSfjn/7yjT/9+YnLJdFThZ6prZPsxtYykxlTnjSf8hKQX2QmuIfaeY7XA/RQ9puYZW7Ss7Rg9aQ3ueUD8hBRQIPPYVHSeuOXv/w7Uip8ePcTp/NF0bq3F+oyM81z2H0L721jroLR2xvPpAEFjizkeEJpuO42BeRIpk3M4/zY6E5X8IuD2cyEhujXdmMUYfTGyNUeGRd1q+SS/66DUHcZsFscMa3vEZ+pA2HMEzRwdfZeud2+yEAuARR4UzDUrteWUqXuyg8finwPOLS1SrITta3szeh9Z9uMuu/UJqbMXmWP0XYoBX7/hx9597RQcidN4UJrYdwURUkKcoVgFT/S1XSgv5kF4cprP9T80VkfxYvOgWnKYgzF94zmiD32UTKLpMMQ7/Z+dC3qGDuD7m5Z++o1sjSEeiHMTD7BYCHx9wXOYBv2o6Ufr+m1GIgfEovYBvzCYdHDq6NsolPdDrRhd7h7+syH7z7x9aXx81/+hYfzPff3D7g5Hz9+z6Bq55I5uSQGbhpWT5Ps/Zcsfck1CogfPn/iu++/5/7piaUU1ipb8H3dOJ3OZHdOpwu39UayzN2SKMs0UZIwLIV3azHPy+nVRTIexHCglK9+vGECXtg7U5ownL3tisuzxAmj5TFiUk2tMiIskcM0rO47e6+03jhNZ3pv9N6ZUmZvFavCKW3KWG3sWQ97OZ0B2PaNHOEl58uZk+uWbK2Rcigat07rMiFM7rxsK9O0yOwNKJH05A41dbXBreM5gtC3RAnlZcrO17ZhDufLmZQ0D0l9wmkkgyURw2mPAZ0WQItqqluNZxiCxIC/zJouY29kO7HHKnVGOEo/2nJFHAqSATGY8LHoR9wRh2mfgotEF17XE//5n35l60ZmofeVlKCG5iVlRTv+9qXxTz/fWE5Gund832nuym7o0JIu0mxZQ2HXUD65uhUjsdVdRIWvv7HtV54eP1FO5zf4GnEpqErTUSnILo0jx0PJmjqgkKvbvmJ95Xp5xq0zTQvLfI+3Rqs7Kc14k7U8uwu+GB48FgctrxTQOHPGi9EB3jPXfePUGj5PR+7y62Ub3QAp8qKj47E7UuKgCo+f0PvrpZRq0GeTLOUlqI4Pj8YQeO57xXunthazI3H/iQ7Seg9arHzHXnU/r4fsVtsrQjAKDxcttDn0touWWxutSz+x7ZXr7lxfNtoO+94oBX76/WfevbtQZlVTKWZtKWUJKePiS2MoTw8sPthHlklubL3hvTLlMWBO5PCNc+/hO/lq5Idz2LkPGrA6ihyfAXIP2Ne4oDqelRd+hDiNDAmXG66lIl1JiYrfRqcoSHwQ249C981+E318kAcCihyt9fErHfOWHqQc/Z7mqmOWmREZxLtYesvyjm9fbqTTA5fTHIN2mHImlyIniiQpQqHhObFtG9Wky8km1XTfVywn/vjHP4NJVzNnzZyXMrHv8GVbWc4XenfuL3eKeMiJr1+/UswgT+XIJq477L0yDevezsG1iO6fVHQ7GsZeV5blxNokgktpIafC5WmmDCpYkjpaH2gNZauGNs1FL80l07b9VXQVt2T3RilCZwW5xGHYOr3uCjAhxwC1QylBYQx/JVNWqyoibahM57pe2Vdht2VZmBNsbnoOJl+iTYoVaLpA2hxaEa9s3nh59phXRA5t39WDWmFJiWvRIM1SI5UUNMkuY9e4OCxV2QDY0DS0sNOWcMnThMbzQ8MbYqfXQjEugmhX3/4BftgUwARuwYaa+ctfd/75X37l22YkJrzv4I3WnPN8UndkjqXE2jv//v/4hXf3HyjWOd/FBkoZyx38hhTOMfjtjptTikgM1/WZbX3htDxQW+Xx6TuWWZe4WyhKwi9KB8ugoLaDliy4wsg5OjFbMDPePf5AmSbuH95hvZKnWZdVHuuNCA3zoJqm4wAYg+74YbFWRgfzKqYz4JwLW6/sX688PD7GRtdhklggaLdK+spxOYwsh/g6V8e9rytzvP+97ZRUqK1KrGevOpfOJouFnvAuHVKrnda72Dq9QtieK0+7BfRijE9diW0a9mtwHZsY2bO4v/L6ncpWVUjVG2y7s+7Odruyr411l8L9h58+8OHdmTxLI2R5oru0LzklconYXR/Da4+DWBRxi5lnB6aUaF4010KDVosuGghhra5LZR6EyLT3Y60PPVcK9pw1dQwj+GnAvzHlZxBJ8IbN82FQmnPGkg7PdbuKzRjmexYuqbrgB7kERkSvxJivHcZh0Ni1btWxviW3BC386CJFjkgps7edeX7k0w9/5tonzpb5/rvvmcpCbepA9lbZa6OkRN03/t2/+z/485/+xLTM5N5kmULn+fmF87ywLAvn5cTz8ze+XF/48P4DROdce+Xx4VFnbE5s1xt5Vud/mmfKFPqG2kXtNDNKnslTfq1UW8AvZlQaJWwu9F3VJSyn81HhDq+dysB4tTC9iveb3MLHPmirSQ97H3bVbRwWgrM8wRgAtT2gmCT4ciaz7hu//vorvQli+vBBpnJmRkGupSkX5sDuy5TY1ivTPFHpzAlSKUxRdXhvNHfmVmg4m+2kOZO7qjSzQqmZ5aLLAwyvTmVi2Cl/252+aoZSilHM8VSxroWQAbIzpYmUpRUoYRN8WDoHPqkwp6hajwugj3MtFmM9Zhnddfl6D/hiQCQUqid+/Vr5N//+K2V+wg2a76TemUqOqqmT84Szs7bGPBV++etX/vnnM/ePM35dOXvg/MnxqMLUZWm50zutah5wmi/cnR/I04l0WBYYhHaiW6bVXdVU0qD9GMf0Hn5HGgju+6tgrxTju+8/RbRoCUils3dnKjkU27JzFpMKik1iJjV1fuMy0nMVfp1ssK70XmQaXJgBz862VSwPYgKkUt/AKhbOpiHUi27c3ILxwnFBgA6Tve/Uusd8IkXWg2Zu4/Oum6r83jo7TSEP2iVCvsdsKWjatRv0piTBpgtgdBB9ZEOERTju0aEIZtq3yt7GTPCFl7WyVe3HP/zxdzzen8jLpgsi1L9YJacJy2HLPiWoQ5QmqngGBpNHZpkcs6Faq2zBk0Ukiwq0txYp6saGRXiYWsZesHH5uLhjvQ/jlEw3OS+o+1CyXM4ZDwKNCh4DK+Qsto/EnLEGBznlzTDaTcSH8WciHpToNqKzPDoFwfSETmR0GQe8G11I745HN/f+4488PH1ibjNOp9WN3jf2vVKKupdeKzVnSknch6DOm7FtlZf1mYfTPefzgnfpYmRdkylZOjgLGHa7bdzfS/2/Xq98uV35dD4zRNSlTIW9Vb7++pXz3T2nZdFBP/BjnGYNa51t28NITV0EtcvzZBxaCHccF0VCrey23TBzCWuasbdKieGVt06tqkjmoml9G/m6HubcfbCJO75XvKNZhmW22rh+u/Ly7RkrCb913j09kfMS+LVeSykFt8pSZpo3SpmZJrEwrBQ6RuuJeU7suywhMEgtLrXuWDFyi2GjOcuki7F6Jc0J6qgstKgTiVpkGFd9zDg6fe1Hu2rDayfp2WXUups5Uypx0I1DJzjj6ELpjTDz0gE3Lg8GZu5hFBeLFjKbFf762294Uqf3fH1myQuUHA7C2gA5T9AzN7+xLGe+rQv//I/PfPc58XgPftNw0muhtW88XCaYOyU1TEHE8RyKoIdstLbTLRTqoWylE9W/qI41wgimPNFaVednEjw11ybK+RJ00cJUGiNlTYNmP+AIZYIY7oJyzBTZGKNcUkNeOSmGnTSyT9gwoUO01t7FW+upMJ+yBqB1Z22iPmcrTDoBxYCK88RxpoDPGpXcMq3vB6wbvGD2trJvz4LGupHSBEZoFozu+wH1um44QBK7PGAlV9XWoxvEobU4jAjr726YbQe3n3BK2GtHNi4aTq/rxm3b6Tv0W6WucFngu+8/8HA/USa5E6uolN13Noc04l4V4+HDZqQXvFeGcaiq8BmzG+6b0IYxN+qNyZaYQcXMTqdQvGQVlzYcYMewt2vKiamnvXbR5nEoNqubKJOKkJhqlZxj7RiyEdeQXtDma464htXKY7GYP4yPDyImOZV4X5oHDtgaPJCBmIeYoWzveG+muUY3IQXWK4l7Pv70r7k1x/sqV/7uXK/feHl55sOHj2zbevhNtQqff/wd19uVl28vrNvKeV5UHKcSzL8d9wIp83j/EH5ZWpu/ff2Vy+UixGaa+C6KmGRGKkUOdiUXHh/fMS+TqHcBuSR3anDZm8G0ZOrW2OvKPM2CgsL6QIreRkmF1qvsv3tjXa/0Ks1Cc2cuk8yyogV3kx2HR6xeH4Z33o/Mg2H5Qd01lCkxp/COR3ubizqAy+nMVGa8Be/aVU+0fcOa02YpzKd5lgcUHl8jSXw32TdMSZGSZp3ZNOgU1XwD7GDbkCbSZqRiLBNstcWFKb3AhEfsY2gVqmE5k4KVMSwVmuvSrURmdO8R1lohafhIDJS9h6G562AM26E41KLltRQXRw96qrNqmg75QspXNq+k1MhZdMLam1xFwypiVMC5FKZc+PrtGz//cyaz4Eui3Taev/6C9Y0//vEsGt1UqM0pOR8zGO/12JwWtN/W5e7Zk8lemlFROXWt1CzhodhcmVqbYCwSvd008DNofSi4d1WYY6NWJ5Xx1FN4/uvgal6hN3rWbKo2yG9yQwjRFNnozaBVquXQV4QkrixYX3m53ZhTYe2JuWQZ5uEMwdgYIvcul1XpSTK1y0126F1Em11xL7Rd+S4lGbWt1OpRnB2r6PBp6oG1i523SmYajB28H2KxFAVca1mwKKLs7lVK7N4zrRq1Od4XvO5cbzvbDqcz/OFPf+TurAKxJM2axnwHIjNjKOPjdQ4SgA3UIQ5UN7AmnUBvLoEYQcYY3UIa1vej8n4LC8IQmroZuTkkG7lZtKB8Z6aArJ1MVreJHUWEXrc+qXEBN++vcx0z4RCHpXns+eguhsGnuodxQeg5q3t9M+jGjplIT2G/EV/vQQFsVLrD04cfKdM7zRe7s9WNKRVyTjw9PjHPM/u2sdfKVCZ+/fYb73MiWVbg1iQ92r7vTDmHn5nF+pTIsl2vkIzTvPDTDz9xu636mnmWuLhLt/Nwd0exJux4WR5odad2p8UU37qTgx3w/PyFqUxMc6GkU/jgjOGLIKOcghOf1F1s2426VqZlmKJx0PuG3fJRXQSGaF3OKYN63NBgqq0bO2rr11rJDnkS573Mmcd3T/jeON+dqV7pTV1ObV2YdE6HOM4w+UTtGmimqRwL1xymZHhOh1eTlUQzZQNXlvDOEeTRHfop5jfuzCWG3XEBJsBT4VZ3+r4rGL7BbMaU5O3iJotlzWrSwVkv5lE9a7ituYvJhr1V9ibhVfXQCIQamQMKUaXb4uLqDXoSFTInHZbJCp6NaUps1w1MkMv1ttJ8p0yaPyVz9r1z/dZ4uXP6dmPdfqWtld/9+IlWdRGv3chToTVBPsnjMo+qprkGrznLnrntnVxg3+MQ68O+3OPABsKWwnPWBeZOKjsj9TCnRi5O6o1STmJxGUx1HKTQk7qrCSdbkCDcBKu6nmU2Gfd1yzK8a0Fr9rCt7sqNMMu0sLe4v5x5qRu0yi0Ln9elsYsKHAaHe+9s12d5CNlG3XdRtpPsp3M6cduv+H5T95hgi0PPTOwiHbrlgM881kkK358OKoT6Nv5iHL4pKuGhHZjZ20pvgs7aBm5G3bsyZPbK9bqx7XC+gx9+9wMPdyfMVURYyTokTZfDAbN5lEeuOaYdYNDA62N24E6Nyrq7iCWDMq6j10neGCmHJDsotAP37yj1Um0LeM9k60qi9GG4KDpyHpeZe8ykXmFbnc+vlb5DGGhGEdadoeMeXQIx8bFRVBAQ67jGDzscFSh0iU1feXMx6+QAnvAmM47dTnz6/X/JXB64Nc0FT9MMZqzfVu7u5PR6vruoiCuFd3kUzX68HjG1VPCWPGNBnTYT1DpMXL9dNbP4y8//zOXujvvzPWkW3fivf/sbl/OZcrm7p1Z5u+Q8QUrkpjChlpxU9SG3TYEUaS70KSh98aZ1E/YYTAW84J2pnOCkDzWbxq5Cj5Q+1cOmPBcNt+id6gqXabVSe1WgvYuRsARYu++VXKBvjS/XZ6Yy83BZsEum75UaKWcQmg/vB/zjwVhpZpRcQrbhEs5MM953rntl9pliRkVVSMoZvNH6Ltx0bAwcusvpM2dlDpjT103wTXgdLQ67ZVKrI2yZNE143TGDEm67loIDnhLURjERBTy6tdo7zRLzPFO2ja04tieKwbUa5MRUUox7XB1KKdp4xWJiYpxyYm9S2yYS1+tVvProk08l6f1YYt1X9n6jGMzzHddvz7zwhcu58+7dEznDdf1KTidSqSzN8amwbS/CqU1VTCq61CxZuExKQZ3rxLY1WlXGefcgCvRBNQ1qbBK7JpckfjwbkLg+P1PryuXugceHTMnikyunuov+bKoo29Y0X0oGFowg1Dn1XmkRvkJ3muWDYLE3HTi97zKoa5217ZFVkOl1pdfEUpw6OW3fxawLHci+ftOaacj63TvrbSWXIvM9DWBivuAR/Bf6CefQw6Qk3RFh/DjuUflZJbrvsoZ3aQbcY8DtHOriTqPvK7XPtJbZvdG2Susr21pZr3Dd4OEB/vCHPzMvTs67DmsyKZ3w6JQGrJajpDsMMO2VjeaoKu+tsdPI6YS3SvWADC2R8kTqjW0XBf4oQlMUWqhYwEfBOQwewYb+xxKJdlhuD2bdNGWGf9oYQI+utYeVT++aKxCdBaYCdHDEigma8QCw8eG4MI55AkLOxwzCEFFnj458MKNSwK1vxXVGp7tx9/QTeXrPbd1xl3cSGNO0YGU6Lq9WxSDbbusrzTdJoGseIs+RX94qnl5p0Tkn5j6JUZcncpk4PzxwWs5Mp4V93WhuTKVEcBhwPi3sNXFdr8JmXYynIwi8di4Pj6y3F3INKwLnUDht+5ViRUlhx22qy3KKN9ZaVSuXjMkHP9vZ1g3fjdM8haGXxD7rtvHy/AXu7jnN8/F9rRtTytDguq5Y7eQJdQrux8S/ZA06W1dl38zwYf3gIT2vnaUkmDIjeNzyRCmNlI1OJo3BaWwF5SiLClvDaI1s+N6P7ih3Vb2WNBBzy0y5YPt2aAEsFlielPRdc2SAvxl4qfNRhZhdbWddG6eL5jGVhN+uumyKMZ8WKkbyTu1O6pUupE+q4XGx5cL5ZKwvV7atMSM7FDPou/BeK68LeUqZjZ1lKdTauN6+8uEpcbl7R5md3l/o5rxcb+Rk7NON6TTjbcf8BaOTk2mGY5Na/KaOspSF3hu1Orf1xu3b33AS295YpjusnCOPoEZanGIr8zTHpnS+/vrCz//8C8v5r/zuxz+Kqpuc0ymTcqUsF0Zu2Z4TZYM0zUw5sa1fgUqZdD9OroFw9YT17aiSPTayJcguppPXzg7kHBkpyVm3jb1uunBaCA0xwSNxadtxWLl0PmMu6hZFjQbprQvOHXqYYb4mhl5EilboyaFvdN8ZrLPaO70qJaiF/5MlmdDV1oN6KqZUq53bvkFtXK+KH/34ET798FOoxRUbUMiozNDPzkQHPobRAbMJWcoRsav3klPnr7/+Rdz7S4pZE5gJerU47Et+zfMLoPCojEUb1T9dqeqMXwO2S2Ui1R1LEdQzz3FZaIA+ui8nqOxJ30vwtKDCoaEBMHl9i4QT1iWMzzT6t8N36XAUFkOrtQiZitnEyCwR6STx2ke0KA/OPL37Hc/XypyeuVzOmr1ieMq8e3igA3/9689c1xvfffqBdd3C2FJF0JRkrOjeIGYfHajbps7VwoxyFkPMEDvo3d0Dp5OG1TYL3nzImdu2UgY/eyqFfSvUvGtAXUzurmSqO62urwHzLtGLPHpgmU+s242pGylnetuPRQ1So9JdQrdUQtzTYnjSaXtg2O743mg0pnnmcXpHGsZ0lkgNhtqkO8zLiXmemXJm5GJMy8yw8W2tsa/y/U8YqWWmJdTdLg430XEMSqNmMYZX01DYCDzSGSxX9wrT6djsyWDtG83F0885BxTSA/bKYJ29w94q59M52t52dDdzOlHCEiOlrDmOAy6nztaa8rinzHk6s+67lOjBKsNiKFs3ruuOuVGmE8sceQZm7HtT1Yozn43TVviWG3sb1hTyx7pfThTL7F3rIGWnVefx6Z75lHmcHrm7M2q/aUHmSUSH6nhO1DZRa41tvmE4yzTT+guZRw2Y2xX8FQ7oFGiJrWfqdqM65NKZ+kbPBZjYHaxpnlBr1Ja9cr3tVJthNb7eNp5vO2WBS5up/sJSbpyXs+izCUqGsu2000Ki03yn9bDHKEmeTJbxJuPAlBJzyhCMnNZ2egsXLTfqHtofnNr3IDoLXlM05pgnhD1KrD8dNbJ50d+PQax7wHIBtw5mX29433Av9GzQROxIPVHDrC8l2OoNQm09RH/NRYeutdLaSl0rTmLfnH2X4HTbYW/w8AQ//vgHypLJJTHNsvTIvHqmvRpRBhQ4qvo+XHIjDtikum7uXE4PwTwKyEUnKiPLGyyyq4fyvsc1kQ/40Y3ozvIbOBKcGIzHuGGapJ8qB6Vej3YkA46nn5IG9o6EiQnNIxwOg8NEwmKu0+PM6+GiOhxth2W1u+Ht1eVWPzsfXcTxow/oqQE7vRsfPv+RT5//FXsV+lCmzO22UaYioaTJCWOtO+smgWWtOymdmMpExvjrX/8ZDJ4eP+p5NEH0c86s2yqabynUddV8jASnEySZcV7XjbbvPD4+kVLmbIny8PBA23f2vjOfZ8puzEVOrXvd8e7Mp5k77pms0JPz7aXStpX5dNEgNcJUNF9QRWpdG8yQBiL6QeoQnoAGyFMhV7X75s7eOyMuMpusnruP+1AHqgI5nLmUgKXk1zM41e6NnCbqXqlVqlZV46+LOc3lja0GxwJQJyUWUQ7edY/FmAKPnWwGk6Wwx/BwmedgBsm/SLGZWQwqAanMp4X5tKhqcqdS1FqH+hUIYaPcq6zMarld0M8Sg8g0ZXKwLnJKVBcjKHXjZQXfO6lkpqxLevJQjns9LsLTaeHLt6qKa8AS1lnmiWU5qRpuOw+P72nrxmmeOJ0SuWzMJ3CrTAY5zSifWLhtDWO4vek9kjJzNtZeBS0kWYETqX9itAgE82xcLk/46Z4UYh91VnJQtdCPdMt465hnumc8T1zuxMhYbx7mbU2XSbuyL5XanLvTmVQEV9S+BZwDUKm6Z5lr0AORhkDW5JVbrlgOONUT0siGk8BxWDnEjEn0TeUICOfWfKCP+UYcVlqvw5VWs4MeF4Qui/56OMX3F37+OvDde6X7jqEui6Z5hYq1SahMStTa2fcaLgSzusKt0ppTb7B2uH+AP/zxTyznJUSAr1Atnl6Fw12ECovCAIt/Z1T/aQAzwcTr5GkiNVXdWD4OTLGURP/W/ok9Gkh/7/txsL4aK4uU4VaPLmTAhlYmXg3KX3/GiGqGJBpofA7ePFiJOcw+idlhDQqz3nM3kyWMD9Cpx+uIQjY83vsQ2tnQfMQtlTzmhIPaK/cA7x0rj3z+6b9hmu+YZvW9+155fn5mWRZeXm48PD6QU+H5+Znn52de3r3ndL5jKiVIG427uye2/cY//uf/wKfPPwaUr2F9bTutN52bLs3KMs0xl+nUrsF1L+WgyLbW1ElcY7B4nhdqKooz7E0Og4Q1QYeWu1rOvTFNJ05lonpnvV1prTGfTliXGZghmqqiUcMjJ6bmg6HQe2OEbvfWyWh2sVnQ3eKDHvqLNIZaaIC77YpdVbU/qfETq5JWK4VEz4kpFVK2mAWEbN6g1ybzyRTW0gV6c+ZcqC4nXMFVHhe//kzVhoGpws9JA/zhhCAKHWoBzWB4BKWQ3XsIu9701W3b8DkzZdGEU1m02UxLsdfOPBVy0IRLES7ek5G6RIsdDUjd1J5KaBLVi3fIE2Wyo8Nzd7xWLHjXOWWWMuEd1m3nfFqgJJ6/vPDddyfSfKOYQ540HM062La2k22SV5WPVj3jLu76bsq9Sy6YsBQn2Rm3ld4h57+ncab82tabKbVLmqQYhwbTZG/Otke0ZgpsvxteZtb1hWwT57M0MyWLWV8YYjpYB5MPOzj8t+DnZzMcg8i1RnEl5BzEC5PBIJbIHlBdHBqGic7qe7iJShfjXcFGNY7SHOIqDFlye0SF1sGkEYd/xGr6yJYIt9ZRkUsnI1Fi76K0Zss4RYFc+4YHBOnVWXcpt2tFCXMb5Am+f4LvP/0Dy6mEENAwm0Q8CNGjLEM2MeHc5GAQy7ibuoL8ZkKR3KndqW0LHazIFFIqDx1VFG9B9XlVNesytLCMySnL4DOetx3lHYJkDEG8Xboox45OigEjJeXGtPiZtcmiRGfTdjDNdAETcAx4Bqr22+h6XqNHR3dlIRrm2LcQxWWsssNpOP7UOrQOj+9/x/3lR26tU1In2UzKxtPjO9w7f/nLz5gZ9/f3/Pj9j6QfEr0568sVlonz+cLenZSgNa2B1jpWdFZ2y5znEyMq9jSfNGvGZew6mKTWKEl56VjjH//5nyjZkpLeDLa+qUq1TF0jfSxlmjeWedZAmc7l4Z6UFfSSu4YqeepMZWavGqQAXK9SNZs5p8uF7boOjzIwo3pjSuW4j1soRluVTmHK6WA5HY6Y4drYum7gkgLnc10iHrd3AnwuLBQdkPH3hwLyMBabhm8S7NebcPvTQto188imTqi3/uosGx+yWQ4M1Q4nS3Mw7698bpy1Naas9IGOVKkeL6a7htRpWUju3F5u7HXn/v4+RDNRsViimXDOhElpmSBtwdRJBt1Ypoz1xGmeYkEMB1U4TyWq3zBP8x1Dles0FbI5tVe2qwadl8uFl+evnBbj6d2JaXnGZlFqk1vQZDVLetkb2WRLLsV0J6WJ5At9X9lJmE/M00a3G5kTySZKSUJGAs8eUCJHHRo5FfE0E07rxm270Wtm3TprBa+jUtoou8lHqpxIG5yWTIqMhLU1HerpDXyQkI9WCNGmDM0S1hJ76lif8NTouyAeC3M+C3rmLsUCNiCHGIrjiYYqPAW6aPZlmkqH7089DgsfJn8uGvjISHASjf2Y43TC46iPzlhfV7vHoaqLdN9volj7xN6c9aqB/O5SU9fWaTuc7+F3P/6J+U6fh2DQ15Q9MfhD1uFS0+cQZB0pbJEfYm+ciHsL+5i+H4NdZUpLfT0q+2SZkQA31uq4HIbuYjwnPUMgiMZ6dRZznOgmVF0cUFayRAtbn947k2nGuIe6fIjaetdrlzdWeH51oRoWCIYotKIY/70eQrqRAEy0D2INy0jTsTYiGIJuHWQE0iO/+91/zW1zvrw8czlNvNQbvVaWi6Dp77/7njxNkX1TNYyfMvu6crveuFwurNcbv375hTLNJDO2fQ+o28PmyJlyplZB1eu6sa0rH57ecVtXvjx/5eX5yvfff09JmTIvPH/7Rll3td3LsgS7xo6q0hNMqfDl+TdKzlwuZ8ntTYtxa5sGeOacp4XeNVs4TTP/8svPvHz7RgceHx/CKFAfaraEZ5j6GMo5DMGaIaYDYQLokcbVJTobQyKQZUQ/HkBjTjMBHdMTtE1taglMlWGz4ao2WlS9rXVu28rLl29MJfO0zGJ8tJ2ekiogs1gIBOQlaxGJwBrrLqFhIYtZ4FFZGizjUgjPKPDozrTZciyq56/PrPtKa519lfhnQAseOQMpJb3HFrCbG+45YL7GspzI03zYbJuB+R5jV2cE3YNxmQt/NUErJecwMAuaLbCtLzS/8en7O6azErDdOiBXUxsVMAmr4oGlLAPHlDrmGoj+y798gZQwP/GHP8ykvpJIVEKo2UMkGPOfw5cfXcZDFGauMei+NbYN6q7hZW/GbW1cry9czjOlFFlm337m0/t3lJypxSjVadahRIrEgBK60UwaDgn4SrDABN1gEvQRa6WURPNRper7yHJbZaN1D/ZfMF3i0IQWxUMsJ5flw1jruSfE3tlFW/XGMB2sPjKHw68osHI5JSSSeWgOblg/4dZoTTYbNGerLk1E1axuC0PCdx9PPD2+53QulDIR7AbMpmOuONIjj/vIDGtBde2dZnJYHuZ0GRPM5sLbqwcbCVM3FUrnYX9uaeyrHp5N0T2M58Tr3rNR5Xo4NjB8zMYQ+bgzo+vUf+iyVHm4OeEeOwKOoLqrwxlQkkmEiTXcMkFcInULR94Blw0G0SD563v0eCEjmMq983K98vik0LRBD647fPfjP3B//x1/u16preJdFIFfvv0KX36llMI8n3g6n7FkfPv2jdu6cnd/z3R3YiFp1uQ6/N8/vufD03fquEqOXHNpJiBFl6mL88tvX7m/f6TkwtwraxbElJJMhH748UfKNE08PDxyOs2Bz3WaN2pWrm2aE6f9DMlYbxsd3UZfv33l6/NXEpm7u4tomrv8Z/Iy8e7pifPlTsZd4fxapkk3cq2IbaYHlQL6CBgWH5tphJkHhJR95KmJuZDTWLxaaAOG6ggfjSOP3qt8XZzoILIONHesObfrym29Mk2Zh7t76JLtl+kivFBFYQwrx6xCLdHWesxAioaFOTaDyXSs0bmt4q2fllMsZ3nStEGjA7YWfI08kadEWQT7iPaY6L1A2+VHgy5w20WbLZbY66bvHc/XAt4iZVEiITafWF+dzsO7B37Iif3WeP525bKc2au+R/XG3m58+HDicpay1nD0IhM5yUKi7xokugmnF0Cyx+cgvPd8mdh3qd6tp6CWyt6ELuZQQ3Cjsg8CWHTwNir2HJVyYm8Z3xu9KS5077DVinnhdt243GWm6YzXF269UraNlBULNERpwu7jIvA0YHJG1vCadLAlk+dWaw23RjZjrT0qVkFhuQs9Dx7aa8YDIXTUTzl0AAPqG780p3uTwWCwt43exxpOoR0RQ8ubLMw7KXBwZ6sr7mvQRSvrehXO3yu3rbHtTqsWDghOzvDw/p7vPn7ifJkVWYwuSktFc1g3qd0Hw89rWLunY40cw/zDQUx9fI+DtlU7aMSCmF69Ad5y+geMOLqLRiMzMtQFP6qYFETXfUOakeEjJ/ianGNuIbgwHW4DA8LTs9/bhrvRkyIIkosWH8OH+NIOFLx1PFL0Rvzs6//GmTXmESDzwvgsSxSDKfPweB8dj4ty7TAt9/z+d/+a57Wxbd94evqenDKXZWK5v/Af/9N/5MvzC3eeuGw7OUsTNgpbEMtyToXT+cxyOukC83AbaDu/fv3C6XThsixi07lwlbvzhW/LC+t643I6U5YTH6aZZT7Reuf5+RuneaFoiCHGCyZOflt33KFME96d03LiZV1pUam2feN0d2K5P2l85DpASc6+iyL4cP/AKVgg+77DVrFpUiVaJkprbwLcLSojiU40DN4FW4Tieiwc0U4zp3nCm/Oy3xTwfT6FuR9QxcHORUrd9WXn/u6sTU0OpXimxqDs7nzmNC3YNAZZxtYrJRHW5q4oSoxhhNRbV+hO27FcmEthq3qNUy7Se/R6DORTmQ7OO2MmEV733RWiVu4vGkBFoyqFpjZQSQlOZ3rXwmjIGsDH8kwoR8gkpQfThb9v+vlJQ1Rdk2HFneDp4Y5vvLDejLrttC4X31w6T0+FuwuYVSYvh/sklqiulC4P0RM+PicDZtV0Dqk0nh5PpJQpZnTfoM3UnChWOVoGz1TPquZNkAZdnU/vMjNTm76w3m7cbhvNNSD3NpM949Yw9N4vp3taD9EXmeoTvTWy73iSIK012UhgTnajNOVrWBO0R67KWCEIGJ6p49QnYZFmR5mQpbuGnSPKclS6A+PN6Q27xmVBkQNm6b3zGkFlx4XQrUPbXwfW7kf31lqldQ2oE3uIMCfZLTRnq01mgNVo1WldA9nTHXz88ANP7++ZT4scDzzJ5LFMTGnShW9iJlavR+TuYdcz9Dwo6CeFQlrGfYKwu7cgcLy9DF7zyt96Mx2U0zjMS8w39LfrwTDE33wdw7/MwhspY17Ipie5AWaZra2yBUHxyLiJYWmJ2mpg9hYEhERLhh3Z0wdopPcQxQGoKLLOYaioyiCG+AMCj6Jj+M9JMKx1XXfj8+d/xd3j93x92Xm8e0fJhXXfucZc5PvvPvO+13Crzuy3m7RCOTq/uHAgmFp1P9AZD2bY3emePEkTRk6UgAytJJ4e73QimLNeb5zP8oDqvbG3yul0ovxv/9v/xnfffcfnz5/E60XCMHcxZzw7p3lhrZXH+ZFUjFY02KpVLa3XymYVq0rfqvvG19++Us4zpyzIxOaJkowvv31jngQJ0I2eLAYoo5NVG9d2KaUTIf9H1Lm6N3J2YKa63BDdnW3bFI4xT2ytUcbitMTDWUMaeYJ31ucr6f6OnIqGklkc97YLi08506vUjN50ebkFPa9WRm27d6dMs9SqrqpeKuwmR8+UICVOZToiNxkLzqVHEYdbw6ZhH52jB87mIQwKK2ED0qT2ta7HkVKjEhMdUsImSykwWGHVk2WS6/MVtbKTcyJnuFwK7gtfn6UIfrgUypyYZl3iyUtUXZnZHLyBBSMrvRE1WcJH9xeEBLfhscOR5zBYS8202ROvWH/Do/DQhV57o7cUuR+6HluzqFu1SXqXuChRyEWuwK3BvNyTTBX5um6QOkspRyvtWGgMqp7VrPlWM1NQvGcKPeCXMWps0uAMLyHveK3SN5gjV1aJqIaNew0gYrCazbpS0XzDPSPJsGy/CdO83ozWtjgKVdHiYcLXgkrrOqiVJqdUNHdNQveaNHNoUKu6rWzw/nHm/fvP3D3cs5x0Wcrob5g0FvI0xwUdf+aqjLu9ZoSYZw2uzYEiJmKWUeO+rdQWbs+MX4MYLFbaKw011g39gJN4Y4JH2PQwcH8joD/ReYfuyeOmkV9bCiPRHsSGyM7oAd0OWj2v5BgRSnQh5K5M8d7jUkgjg2XcBK9TUjm7+zGPklmp1kVKmUHuTcRfC5isVWe+fODzD/8a95nqK0s5cd1uZBJbxK2ezmculwfavtK2ynw6UVtl3zb+9usvlLIotyfXAwmw7HhK1Lqz3W6cLhfFSOOsvfHv/+N/ZDktfPr+s8hKgzDgndvthbvznULceta5+unHH/nw9Mi6r2zbyul8Zp4WNr19piSV9d4am8sf53Qq1CqbgJQyvSDRWRKFstYGU+I0z+Q8077elH+dtQDzsgTTx8muQyFnMVW2fWU6z0zzFIFCk1St0TGeTueBAurDtYTlaO7D/wg4sqStGJ6NtjbMdjGowuGxtSobA4QXW/hsmBnn80nU1CzPHqudXju1dwbvoSR5wQhTyFTEkhgDR0O4575v8unx0IMkCdUsKJTJTFm+wcTYEePAUo9q4bWSUX3ZsJKZS+F2u9HrLpIBxi9/+xvn84m7u4ugqHkhRYsJQvCky8jSOFhinibsQQNeUmXvCdhISXi3KlOD3SVqKyVMyoYT6Rj57Mq+ji1oxuur9sThOpp6EA8ylFGrDf67IBELi+lWDfPBVGkiTzTBQvu2g52Dvih2WfIcSm0JljqJVuVinFOiJQn1Dsfdbsegtm0wlyQH4Z5oqZN7qHUt6K5JporZRw8nuKc3J+UYvPewb7Dg4sUA17rr+5g6iyOXPSCcoVh2brTeaD3gDc9hzqdigr6zh/lhSTPeEjTZlW/7Tm9bPAMdjHWH6QSfP3/m4fGOMs8spdA97LxNaELOE2UKUatxDMbNMiNB0HrHkX0PVmIviulDd+p6ZW/74YhKMOlGlFayt8RUAi0YVizHFPjvRaWRLU3YXQiyMWyQB+xVX0GwpRKZitPbpvkhY5Add08otqXDCiabSTDXU4eeNBc6XmlcWv1NR5GyEjnj/Wl8laAkrGo2YdmCLBObpKvDamR+9+O/xtOF674zmRhhGbG05nlmnnN0xlXoQOrQRXzZuzRYaers25Xl/EClKTgu6MS1bvzll7+wfLvw8eN7Ssr85//0n/if/qf/F//tf/vfiXzQ26GfOd3dHdKFHNCs90Z59/SIm7EF1DClIh+c8MlJ0djte+W2r9w/3JFLYV1XAEoptGq03EjeOZ1OLPPM+bKQLUVY/TvoGuA9Pj3S9hqoTdzKyaiWWGxjmYsuW+NQLWeTTD9rVSnBygwz57ycj49wKonaGnOW/bY8X5y+dkrQX1OHvJTXIJpspGbKGh4XS1Ps37pJQXyaFjxLmDKXGLa/oe+NBTRHBaYDSut92ytt1wC7JCPZRG07W6vMucTT1dpOobBOrbFmWG8r3hvL6S42HOFKaWTLyqYlUeMSbQg+okxKgYwB+qiEeteyL2RIBds628sL7VRYpoU5m5TGbaP3YLr3iu+GLVnGhNHdjAt65HSPN2ExuPVghRAUVmHBgS51w5Jo10qkkz12jzOlMeliQx1UbY1933FP7BUdhF2iyOva8d2BLMbdutO75gglQy6JvRmlO3nJEgxuVSmFyFm3mMkULjkv3ZnNRQ4wY29NDqcBx8rGXfoYM1Xn3lt0kEZnPyAvQQ+CFHWJ7Ud0qeI1R3CN6K0qrsUE0sX2QkpzHJoqyFrbaU0FkVfYg/8nq291DpvGU7pkHOY7+OHzj3z87jsdh6ZuJpvgNTkQ25FtYDEcVMJbQEIKZWHvmssQQJBhWMq0Vqn7xhrirQ6Ya3ifLDQE9lqHixkUHYDLUwkP8Vp0FsmMwVbS+48CLIY8Zq+RvSm68tTDctsIGqeswV/DuSJ9ztStaptYHP76xpp8ylrdyOFwoi6qe3s9+LvexyDeWEBNGbk7gIoNqYDVYbS+03vn8vADn7//Iz5f5IKdQ+SLyx6nQp5nfv75n3l+eeHu7onHu3vBxt4hJ15uN15uNz5/+oGyzAKYYjNmjJxncsq8vHyjlMx5WVjOM//3/8f/k6fHR7YttEIup4psjXme1GF5sO9wSh9qUVPSWGudrW3K7UUspLp3np4emfaZZVFle1pmbuuGmVHmEipV3fqbc4heDNl+NO9SCAYFy1ImF3nqzJZpc8NPw8Ncc4m6V/q6Q0YHlENK6WjxUi6UKUfW7Cuim1OwPYjQjykGhRbioIqUdWaB0yF1qFd21yG/7iuEKI8x0DMtxOE/pM2gnyq6nA66vUVSlgmPLueZIZ6KFc88JPp/55tv5GRUg8UBJua8iC2TLGY4KbBS/Z7NiclVCc45wXSBrkSv5h54OqGZiEWexDz58vyNtjfup0KaDdLE5MoxSEDusvAA2WhIyOYU66QSfHNEKJC4cGLkNOuw6ZBEEBokAlVtHJRP69JatOgs1XW0sLP2gFK6fH5cQ96eFHAkXH6ntXDzRd+z953bLvM+gs5IGhDcjnmlZKUeVjrdOvO8HJWvKNbEjMLFZkHQWcJZvIf1eFL2hVvAd3qP+ijH92o6fJugm57sWKuEAaMcpHsI6RK0nd6HpXcNYZM0R/SodiNYq7YNwqyyhrHkyOOZJnj38T3n04XHpwvzLBM9QTIRuOTyBiqTCgvvob5vHFh177r49raq8ja1GR5FkrqVlXW/SYOTSkCGGjw3LLQCQRO1OJjFdX0zsPagl74Kboli5O9+OQHT6ZLHoZtEuDuNKaxb5L5MrBVh9ClFR6iNGJe59p57pLkhCI2c8KiI3PXlZgkLXZLHi4nVQTLNS3m71pO6njElgIJb4f7hD3TuOM0Ltb4A6rRr3cO/rPPy9Vdq09/f1hfq5UzbdvZtI+fMj59/ZF1XFdbu+vfTSZdt75zmme8//UDrO3Oeqb3xtLyn5KJirItNlvPM//5v/2e2vfJ/+e//h3haoUszU35QNuNuuue23fjHn/+F7p0fvvuEpUxddyqNy3LhfDmz143rdsVyZp5KWBVrCKf2pzA5pGXRrdoimaobXiwse59UMUSl0012v1bkvLrGIWilsCEaWwpsW4lnajVySO57bWKOuGPLLMm8N3nLFKPvG70lrKiStXng5Q5bVHOzQS9MSDi01Y2cZk6nhSF4SslItdKjgspTwZoGWa354dJKb7Qku/E+TceS8cDPyzQfNUszj2F1VJsKncAsMeeTXDZRdS5UV/yulGR93FrH5hXfRa1LDj2Fi+b4qLU3ZUvRZSFgXUy0h3cPnO4u4JpRtJ6YbKfv7Rg+dwTN1VzJLcUowENFrIso2SQs2NVRAhLpWQ9IBVIKo0HQ5KEnPKsCsyY6bCaqsz7OA8E1mcLWh0VFZruqm/j2vGFIOVr3jZIEHxacthtta2A7ZZblyNwnLEmn0FKoXa0ylQWbhfvvJgbOXuVjJYNJY2Rt7J6YuuMBL1l0SB5wl7u6LadSLCn3o62ilYd9zEgnG8201myPWYD+vMdBl1KhV/kyaTC8aUBdI+zGQjUd8+1k8O4DvHv/Oy7ne06XCyVD9xpd3n5QTXv3qHrzoXQ2d7bQwoyVKXqq7FYWs8NZoNGp25Vt31G19WYKYZAoQZlXMfpqok/ASMP6Iip7G9iMDk2ZBb6+jgH6pxQkg3j25pGt0juV6CYAUsZcmopkPZr+GCr7cDsQnJbcDpp6e3NLpdBPjOmCLvgYpPOqEH/LoNIHEfLKLmZU9xX3xHL+ntPpM799eWE6Px5oxHBHTkn03+vzjbv7e757/x1rrSQ3vr08Y5a4O19Y143T6URiorWdum2kJDuel9uVlDJ35zPuC3trlG7a403d+ZQSe1UezZe//cbX24so4MOhNor+cnu5cXd3Yd93/t//+7/hb1/+yrKc+PHTj0yW8akz51kayu6kbuzbzr7vnC9nuV1a53S64O6vs4p51oI2vaBsFtGkwqQTgmK+ff2N55crXiufvvtEWmZSFe7daseWRSFAJq+mum5H9OX4VS2RWpMKFLkc7nujJ7WOVmZOy4yZYh/rXpkndUTtJD1E2/QzPSXSlPlwes/tdsWSuP8WWDk5Y9W4Xa+cwyXVs5GRPTkgzD4ZlERxjt83YkDeG82MnFJgo+omshk5TdysKT86FSm743JxUfyP5brXjZQSOc/0fWXddlGNS2aeg7XCKx+jeecWANfl7o67x3uSFVGJuzbw2pzFzlQa++40r2R/zf/QaahBq1dVxilNsci3Y8CbzI4cEUuJxjgYjWIWg30pX3XSilLcwiOLFBvdkdnS3vEmGwos0Vriel1ZXza6mbKhvZEnmIt44LXusd6MqWXyLJsT3ZX1GKqaZcEoO0qr8wpFmpckBdmh4cE7rW/0bCxx2I0pVR/4dAr6drjKFjMdLp6QBkJ/S/TVDYbbaK+QTrRueNOFauEQqyVkErC1jtnKvodFk2kojsva+/vvfhSt/TKFeG34KekiECVVt1o+JkLixqtLhcleze50oRglaW0L1lHnsm8bW6uCoA4dkB2HvKegp5oJmouPW+d9DuipQS7k14EBPRTiGmQrz0b6LfAmuDIRF4PnqOZTOCBonemCceWNBFSkg3y8Rt2oHaKrG+zDYbIY3eeYbkeRMswGBan5m709TqR4311nkfarVPGkO95/+DNlOvPLty+kKXN3eoTk/MvPf+Hbly98+vQjd3d3fPfxe1LKXLddM0Q6p2mhZJ0V57s7vn37yvP1mVIm8iwou5SZuUxsdee23WRhk/MxNkhJIunaXTAu8D/8j/8j6y4obNuugvFzplenXB4ueBPdKZP49OE77u8f6b1x61X4PtFOuzb25XRiLfqAU8603iNEQ4fDrceGNmkkMoA7v375yv3lHKIOOJeMc8++b3xbr+x9ZzFlzpYysxTjtlW8X0klMy8Ly3KSonnbsaLkttz7GCuBGc/fvgHOlATzLKczU8QkNib2tKmCxSheaCmxupNKUtpeKpRlxuLW1UwErBn7beN2u6qNrpW9O5f7O2xO7FuF8Kg5UquyQSmB4esltqrNVSwxnwLOqYIlXp6/sm6V83eiw5aoesh2bHAxoWCKLm3bZZa4e2f79kytlcu0cHm6V5KfSbex7fLcMmC+LAqI8rhGYuHnluheeLGGe4Wu4aJZI1miFKW4GZoZpUHnbAT+OzaEKzshDjfH5X2EuggJtl/x8eSJbjV6NF6x4nHBmWG5qPusnXnO/Pb1W/AUnEqN59ugiB69brLWnuYi3wk4YL0xM0hxSbTaoSvekTRgER3/qUY+R44OF6fgbJ5Fbw1YT7AklIjKrL5TKtSgPTYTzONDsdsqta54q1ie49DZGOmGIj7AuupqNxfkWOPWX1eIbcbdPXz48MSH9z8xn0vYvAfMYXZciDU+x+M0ToTlx2DwBP02oXwVezNzYpJdOhvbemOv6sSylej2MxV1djbsNQIJyPG5yuYtFDUtaL9m9FY1KI2zgsDErUtFU/KkYKMWBnxV0E8PmMe7HRoAkgRkI9hrvNUjw4bhAQcew3FC6yO4dLzudBBmxhYZ9u09POZ6DJL1fXU5DsqUo2fYUHFTKXz66b/i/ccf+OuXjYTx9fmFaT5zKUpi/MvPf+Hu7o7pNGHd6F7ZX67ky1lJlUXPa2+ab8meaD882A5Dw6JLd12vgHE3yaLF0XpKSXED5/MJd13+8zzTXNTw3sXSM3NKTorrXJaZ/+a//a+xKTNZloHUtytrypwvC1ORj1OtO2tVzuq+q4I6bCuacMplPnHbb4fqd0qZ9XYlp0Qp5WDiuHfK/T3TNHFaFs7zDEmxoH2vpGXmcTmxzhN7bSxlIpVE33bakoLuGB8Y9hpyf7mQy2PgdDeWacJKplWNHHM+y8fFYbdG3Svny93BzHKD7JlpmbhtK96kq7j5jYbyvM93d/z222+hhpxV5Vsh53xkVDSUQDVse91hW698+fobrXYe7u95uH/CcPnHd6ecGqezFK1b3bm/uxxzj2xAmVgWzQi6y7mRvjLnRYSAc+P55Ur1zjJNB1y3e+M0zay3mz6j85nkRl1ftJm6M80zUzkzJefraeavvzT2jbAeCRphMLcaSQphMzggpRRiP5EDW9cMRwlj8vCCJMYnUrhqHi8X1OTDan50S6+7M1mnZ1Vnvct7v6TEMsO+VlI2coKpLJQykXDqfgOUm93bmGPYm6pPM6kyMGt3ufBSlJIY9ho9ckxyiyzkhFry7mKUkKSJsaG0F7TiFNpgdKHOpRnBKjJaW1Wpu6xHrIu/P54xwL7vtC7xpePUGrBSsJbmE3z+4QOffvgd06Q9MUDA2nYs1uQg8BbT0L21JgM/H0ywOJiNOOBFM03hm9QH/OdO2zdq3cALyaaYv+h55DEUNjST6ZG0GK+g9gGxbRSTjiUf2omYITVdEN7BFcDBbbuRppm2b5QsJGFYqg9DxDEPk61POQgD470rE0avqbuIAz6oOZbkf+WDchw3y7EedZb1sNIJb6FB3oqfkSGNac+YTRFdDKT0yN3jH2i+MC+Zu7uJtlW8d+q28cff/4GPHz7qPbVObZ1lltHiertyQ4Lc68szrXYulzvcG+d54XK+UOsa1inwfH3mn//pn/jw3Ueent5pzhMXmlti3SvTNB+FmlcxJpM75/NJWTutszsUuvPzb3/j03ffiXba5Ez45ctXelf4zO264heTWtU1GD5wVY8ByKi2o+o92Ylaawyh5C744bsPtE2HIUVsmdI7DyVzNy8HXubZ+Ou3b+y98v7dB2ZmpsmP27qcomtBStu+N5a7M7ge9um0qFr3zrKcmE960C/Pje6V0/mkRLdemdLM5TTL3hvYm/BzdwmQ7uYT3WCaJ+Z1gfumOM+pcDqdomNViExG4poW1gZEALwnC8y9s15VAb1/98D57p4MTNNMtszaO5/u78A6a1zApxioJhdsU1s7Bk/rvtK2lRUjl4mnj+95nz7y8vUbp7s7Skp4cyiJfa2clpny8SP7tnF3vuDERXQ5s7XGMi+iPBepo+c88fNf/olt/xqRqtCrXH81fPVQ0AasxxgFGi08s5wUJHHpF2QjIiMKM2HScqpsh5WBHxtThmzETIggWbgbtVcopo12mSgmyuABSYj4Lhv3Vd5kudwxZ+j7zjKNC0OkhOHbo6wLaTJKMll/p8I0CQYrCIYSdCHzRzddDNkm8Epz4fCi+ppOdGS6N6C/kWlCYO6tOkP5rSFupvZv7JsOyx15cFWNU5gKfP7J+PTDf8n9wxNTiW7NJEilQwr6NIyquovu/crr1D5KkfmCDhjHaHU/LgVBMLrI9u3K7XYLlMBe6bsm/yQPKFF9SRA7xpe48iLqLmi25vA1ir2sBEZZ9McrC8hRQVS+7mzbhi3Tsf6mMmmeE8r93kVnLSm0KX6c4rG2Ks1cMa2jo4qT3qPjpecYVkdXYZr39bhIx6Xy9peIKbH+OypoQjtB39jazHXrvP/thT/+7gdOJ+nS20nM0q/Pz1ya3KRrE/3dA2YrJ2XAf31+VmfYXUmL3jmfRAHfe2ffKpuv3J8fud2uEr9itG2nhtP0NE081519X+ldGovburK2ynK6yIYlKcaZLG1VuW03bt++8e10Zp7mqDAS9+c7rrcry2lm9xoVe2NY7u7bzrZXltMcA0wGoSOqp0gS2zTQvq03Hh4eybNwVUtSdG5NQ7Rc5HOecqfkzPeff2DbVGnN0wxZAfSCJwyvlWmaSPvGBtydT5EvrQHYvt5Y5sL5fGZvlTJlTpczvVXmaabmyvaysV833j09SLvgzjIFBc47ZE33UwSYzHPkeo+BeVc0qFnCWmY+TbS9MU8TW18peVjuGl6ciQIP93z//j3uMhCc54X5dGZ2yLcby2mWZTe6fPq+082ZpoltXVHSWg7ox9imidP5rO81CRq8O99RzHi5veAY9w8P3D2+Axp2p85m+DrJRmXmkuRVb+bMFG7JeXr6xPl0zz/987+l7r/httFNaYKCHSr0oHtSVTzEjGUwUlpr1BbJZaVEdVkpNumiQENx/V1dBkEYDJhT394sYSYAfioZToJsauvc5bOqu9bxHL443gAd9rlooE6vtN3AdmYmPHmES4nnnlCyITgpOTtSxqbSlcGeNOBsvQ9EOrhjDRPlQYexZLiCU+qGaqKzuj9aWOirE6x9j0zqGjCIoIneV7ZNkEjTS8ddncO7j/DjD/8F758elJIXHdmobhNGTzACewQXxmAdFXODRECXTkGFSEfsDn/F53F6CAOvL1f2/SbYL4RylsQM3OOQHBMOS+kV+oeDLu4uptm+79D2IGaom0xxcR8VOIB3aSiI7Bk3Xm5XWq+6iM5nEQLoocNR59C7BJHHPC+J/1+bB4VXL94teHFqaWV5giQJntQd+ZtbVTOKV1sVhQjZMDBWkcjQX6jLaT1xubzn63PnP/z7f+SH7/9Azsavz184nx+kifDC9fbCfbkP+KrQXIzRfZdV0ulOCMh8WrifTwxPKEHfGy/7yu3lG3fnJ56e3vPhw3f8+vU3/vLLX3i8f6DRIcM8TTz/f7n6ryZJsiRLE/wuFKDIgIOAiSszq7u6pvdl52Hm7w8t0dLMLtHSbIPp6gIJIgM4MDNFInLRPjCLWvR6UVREupubqYqKXGY+fMDphaVbuE6z5LlU+K//+T/xi1/9irEb8N6Qc6K2hvch8ItvfyH0OkXjnk8vxC5yGA/YZogmShei1gjRC6up06AgY9ARdxUYiejI4bGdJ+WZ4PXGshEfZDQstUgyXSuMuw1dCMKHxtDKTJ0LbvAM4yjdVmnkPGNqJfQd1TjJyYoBH8ONothaZTlfiUNPiB1GLRmCc8TtBjB0Rd5PcB7fD5haSLkoxi4Plh09plZsCKR54TKd2W1HveENh91exIRVTAhzylzbhW7TEYm3nri1SiviB7Xf7aFCSYlhHHHOEGNHao04aibCLPFg/TiwGMmODtZjetiYHms8WMs8zWw7cemtBfKSMF3HZuiU5dARuit938lhuLax2qXlWulCL3sTDDklZIHpGboOZw13hzu244a//OW/MM0faE09omqlaqa5WroJaF/l4atVDn0DOJOpRthQq8ixtCxaHNDgdaMLUst6bIsOYy1BVhadAZwarzk3yi4B2fFcyoSj4XyH80Hsukum1iS+Ya7pYQypZYLVnG05txQqk5FJ/Llk0sCsB4LRQ0hZXKj7vGLwpTWFjUTf0+pCqaJCLnmRg46jfrsO8aaPqkoPVCZ9P5I3XcTVRaaaCF982fP23S+5249CQW4VbKWpZqSZdTIRwz1YJZiraZ4ulBUOc1gRgK2YiTFKsjC35W0ujZor83xlmS9ybe3PEukMNFuxRcq65F8YTJPiUWnCqiqeVBexNzOGsR9JtVLyTClSDCOCUkjnXrGtkFu7Mf5qLTKbWI+tTiam60Q3jHIAqpWJNHBZyBi1ghH2YakyCTSrvk6aj1GbPM/yuajmSKTst8luhSobKoRVF4SKNEBW6pmQZars2xwzrRmsP/Dum39g++A5vhzBwuly4acffuL+sbAdd6ScxFoIJAPHeoX+GtBw0d32JT72kmlt1SVXH+vDdksfO16en7mcz2x3e7bDyGEcwTqenj7z8vwT+8OOzXZPH3teTi88X868eXjk5fjCp48f2f/qN6Q0M80L3jt859ebtOhoJ8Ek1li8ZgV4zSYIzpPSwpIEw8wpEbxgf1MSEY33Fsmql8phrSGaDms9aZEFi7MW5z0Oy3a3Feta6zBO7D6u5zPLdabR6Lpe2EjKThK8UcRkRrt6HwIxBqECek+wjm4YheXSJKP5Mi+4zhO8J5ckJoT+XjQIRnUALr966WSJbVzSQgwW53q8E3FPa40Qg/xsa8CK+tcEw7B7Q54TSy4472Samhd83wnObQzeG6zrcGokGLsgrq5ZqLt+3FBqJTqHd4HWRaX9CuRWcmGaEz7IdPPnP/2Jv/7tr/Rdx+9/9zspOjUzjB3WNZ6uJ+62O7x3ontYC8WykNXcbLVOD7FXZllWrynDeHjkl+6P/Okv/8T5+D3BJh3O7Ss9mKBzwerH9EoaRB/CXCvWrWwX7Tarp5lCA2qz4hJKw9gg8ZZrkdXD22AxTjn1ruKsZUmJ1hL7cUQCeeW+C1aAj5SF8WSMKoydkcMVEcG12rShls/eGXN7X/A/VFbpwpscDa2tdOh1h6LThWZKpzLTyqyFT03/mhTopkvBZjytXmSCyE58lkqjJokxGUZ4827Hl198xf5uD3XCOLW3N5VKvIngWl0X8+sBLi6kRW0u1j9bu3e5TrL0XRtEY+SAs84zXa6yw2qVZUlCwHDr3kiZXbcCKtoBb/xN4FZEIYExllJFEJnmSQKx4oBtFocnM2MbLEvGdz3BVFoxZIrkVVi3Cp2xVpIyq5X7NueKS/nmGtiafH5tZYrc7P1/9hnqf1bzCm1SRAW9bnQ0zZQVhlr1O1V1TQZoTuOQb3eIiEjXD6OYRq6Wt+9+xd3mPftDz9dfQskSiDXudtKdKGzmnYiU//Vf/oXHt4/EYUO1nvPlyMPhkeN0ouTMZrNhtcBpN8KQONvObebl5YXSGunzB4buG4iyc7TW8Oc//Rtv3r7hm2++xRi4P9zJTpXG73//RxVkLuIEbeX6+VpFuFRSEgqbNYxdx+ly5Xy5kFPmdHqh63q2hz3eWFl42UZBLKhTqbx8/sRmt8Pa7qaGRgtF0/Gumkrv5c8bQq+bp4WC2E+YG/PBEKJn7LZs+o5Fx8h5usiImBaa94QYxNWyLJhipSgYS8qautRJtOeSF2IXCEHM2lqzBOtwIVByIVMlFtT0lFaJXaCWytOnzzw9PWHu7tju93jvuF4vEmITvGz/nbBOUlEW0bTIuBkEZ5cOxDF2I7WvlCUxXSdiFCOv+fmZ+4d7un7gUoTPvR1lX2CKsCNO5zOlNg6HA7UuJBfEFkNhmBA6fvXLX7O/3wlpYLpSciZsd3hTufd7NpsNpRhdkwjbqZYih3lrzFMiDgEfIqs9dckV7z01VbrtPb/77f/EX/9tZL5+jzFHfeASVEex7vZQyRIYhE0my2vbECdePUCabeRssC5jK3grcAvWKr88S4dn1u+3YIzFhSrdqrfQkuhOfCPsenGjlSUQ1kvB8sYSgtMimCW7YAW0yqKccA8lY31WEear31TTKavWhsmVEkToKYxIgankeJCvl84ZTC20cmXJmqtBpjSLpReRXEX9nRZak2S8MhdSkelks4X370fefvEt+81OkxqhujVzTZX3slJVqEUWv+iOrDXkOhSdkLRLXp/NhrCaVHd8O0RpMM0XrpezNpBZYmytaoaautZae+t0MSqG1OmCZhTE05RFZ7DJ6bOK0Fi9gGO1SCJjCBK6Za1EnopduuSAr4FBNgRsLqqxqRgNFzIY2b8pqeBmALge3uu0cMOxQIq17BrWol1auxW5W662ft1ts2GMhHrps3OjLVKFuGAE8s7FMAx3GHsgZ8fzywdqbew2W2I/8O3jG5ZlEYjfWD5//sTQj8RRDDFX0sBm3DLPM8eXZ3IpYCzjdgAj6IVz4q57ma70w8h2Owvb1BiWvNBpMRuGDV9++SUhRILzzFmeqY+fP7MZBjabEe+jZqIblmUR6KkiI5kNTtgGGIiOdj5TkQNz0/aUUnl5OmJ1Qbbb7/AhSPD7MjNut3RdR86FWrIEXzilM5bCNF0ZhlGWWZIywzJPOOfoY69mWDBdr/T9SNd3ot6eM97JjbkZBvHCd0byrbuIq/DThx9JS+bNw6N0+ap8dcGTloUuRPbbHamJWjEODme9HCpNqL0OKXwRh8ezsGCN5eH+ga6LgmMax3a7xTnLMs3UXOi6QWEJOF4uXKcr+/t7NruRz58+8fGHj7jYs93vGGJPcYG0FGLsJSehE8w51crYdbIcDdKtWQ/RduIxTyMET1WBU4jxZsj429/9lkrler0qzznT9wOd+r/AqgQVqwVyodVK7Dpcka42dp2oYg3qbeUxRrN+KbRiCX7k17//Rz7+OPLxx3+hMGHrBWtmIMrfkcdInUpVyKR5DaVVUi0EK9YnrDi1a2rHsOCap9WMC0Gmilt2sDyEoFbiVoq9s020DVZUujXPGDfIZErBaaiS0aNE3NMDkLitK418TwCjDBg5TasegkI7bU3S2AhR9gAIpGHXZqhqcUGU0LlI7ohY0jhaXWjWU7M0Q7I85LaQdhZ2O/j6q0feffmeYXA31hZVpmJnPM0sUgA0c6GtNOW1AwZaFXv+18lFoChrDbUIrVSIqWv5kJS56XohpUROQte1dCKGk9IpPbUR3YvRQooxN23M2iawtkdWmqmG5H/3YUNJVRf1GW+j2Fo7h++kCGWltFvrcbrnS7VRcxIGX614F2lGF+RW9B/NqrYCndKa+A0IQmJvC2p0ly4iPyRbZt1dtJ/PjRZWWxJeNUdO4bQ1rU+XJzScTCc3mKrni6//A6beSWPTDN//8DdO/YZxN3I+Hcm5sN8fMEFC2na7PX/3m9+RcpJWpMJ1nvAx8M1X3yI256L1WmHCeVmgVYZxpMyJ/XYrTLAlcbqeSSmx2+2x1vLlV1/Rx47T9UzOlc3Y83B/p02D4eXlmXGzEeJEyvzw/D2+KuRhgZfzmaHvsNbKwjcvBB8Yuw21irldbUWWYkYgiuAd1UeBQVQ0Zq1AGrfKe1ucNZblyvHlyMPjIzF2GnRTVfiETBTOqIBOdhDO9QRj5UaKnk0Yb7uATOH+8Y3QXEHjIpWWm5PUvCjvyRsJOylFoJS8JEyVwKVSRFRirWfOM/O8cP/wQGmZUirH84XL+cSX33xBNIEwSjKX73ryPHGaroL1HXY3Ku7L8wt/++FHLvPC6XziD3/4O2IIvHn7BoMVG14vDqvzMmGcw9kVl1UqYYUWxMHxWhLBKV3TWqrkz2r2rqHre0quHA5WbaMFLmpY6cSMHDjFW8GQVe0dosP7wPPnJ67XiceHO3BidbHkIgUpJ4ppeBt4+/63dP2Wv/3tnygzVCt25FWXn9JprQ+jxSjLSezgG6WuiYdgasWpa25xjWazUGPnWfj8bqXers2a/R+Wo7VWjBdFNjRc6BTake6sIl47quaT2FX92pXDj09U24nB4ApFVMF8TctSLOSmotSFtrSb4Z8zSnkuQsstmschsENgldpVvb8rV9IsKEgW2Juug3eP8OXXX3J/f48PlmA6gjevBABd+PMz2Ko1capdhXGixFdM34KjKnVVp6K27g2lwMrUJVY1uVSW68Q0X3UvIY2UFEpHaTITmGbAqj6GJhh8kymimoojYMxqzte0EM0YA9EFsnWcps9QCpuxx/oOZxrWBpyyrGhCx5Tmy0GAYAMpe5aUydNV2X4AYkkhojtJ/TNW4CCx69cdi2k0NUlsTSnMWZqHorsIyTIWo0JxHF71ELKTaFaICeLttAKIr79WSndtjVQq79//Ow67X1GM2ARZu+Nwd8/5dOLy05Wh79gd9gLblcq7d+8oNI4vRz4+f6Iftux2e+Zlxhjxt6tNiSPekuaFGCLBO7LatKdaWJbMxnmMM2w3W9U7yM7vehUihXMe7+T9pZSJMXB6OdIp/TXnhcfHB/Jhhzcr9IOwRk7TTDBWunQsNnrZ2GcEF2+BrJ1BqrMsF73VzsZQWsMFOUCPn585nc/cPdyzGXeioswQQtBRWJgUpVSxRTDyZ2jXaJvBRVnCVifYcwyBp89HnJcQj9Yq5/OFsNtjgpfORbuapRiC95wvZ5bLjA+BcRzwum/52+kTXQhsu47T6cJ2M5Jr5sNPH3j69Jk//Ls/QlnZQNKtlVSZXZKQjiyv/bLM2tVDnhd8DHjj2G/2WPcDY98TfVDqsCi0c0qcjif2dwe8D2yDXJ9S0u2mK1ngoP1mK4lqmjlQFV/MOYmjbpOHUg1D6Yee6Xyh1kYIUaX2Itbz1rJkSduqOTEMHbUW0jKz3W/wQeJk0SITghAO1q8JRh6QbvvIF18Hnj9/x/npe2w7I6E0ehhVKQ4Y7f+r7BVyq3gDSRe+Ailk3Q+8smuk2aiixbCVVoTNZK3w1KUByaK8bZIAtyKc6GK2OovN0IrACKZKvsmSZeKKvifGiLEeT5ZDTndSBXA13ezNpTsX2msxUFS4W2oVlXGDWj25itdRawu1itmgKZIrXNQCIRWwDg4HePP2gfuHO/Z3A30Xoc1y6DqnRUZezdop3/INrNCMVwmXtGcyyZQmXXcIo7B1lExwcxtqSGeN3F8lCYEkJ/H1WLObMXpIrvsAJKO5rjkYK9ykzCAxuKuvQr6m7Jsiz6ONATvDck38+OORX/26Y9s7OXhV7Q2IMZ2+TuEze3LNQpbxET8Ixp5zledC76/a2g2qyrWqBbbAYUYFfw1hH7VSqOvKTD+/tsJ16hArJn+6lTKa3a0sHadFsjRZXtimQVlqsjhs3/Pm/W9YlizXKUoDfLff07QQ3N0deHh4I9ByEy0FVZhM+3EvP7cWDvs7TK0sJd8Oe289NmqT1Azn0xOTDez2B5nAgFxE0CqKc2ELbjYbsZKvlVQSXewYhoHz8YXrMrHZ7QjOc76cAMna8RgZH6d5omGY55nsHCEEcoOQ1/jEBkU6FbEJyHqRVaDRmlpIQKuNH77/gel6pgCb6wjDSFkkLGO3P8gjnhK0SogdYJjmKzEKq6aVSnOWsiTsRmCqUiov52e+//FH9psth90OY0UsNFPxJXMtlegd3nkRshlkHEMOPIO4XjproTQSmes80drqqWTph4Gvvt3irSeHgi+eh/t70m4rOd3Wkm0kk6E0fBc5BE8uhXEcSSUTY+Tw+MBvf/2b132OFSM+X6Uoj7stwXfUUilWKYTGU2pW6EImZB8iJheWOmFomvDWGIdR6W9KDV4dWIE49rRSWJaFru+kK2sKw1ExXSAbbmIvYxohdITQ3ZaVxsiNKweqFO9UFkxtLDkzbh/Z7e75mx94+fBnaCdol3UtQW0O05zi3nLAtmopDow+9DgPJmBJap/dWI0YS00058UWpCmOXqvuuqpOq073AVb7uxWJNgpjGEyxOFMwine3VjRpLtPsyI2ZRZMdC6JDyWsuiCp15X1ZWZ7mypIztIptC61aalmVvMICzBVVclfNK4AuwFffGB4fH9gf9nSdw/pI9AYx2+uwXui+WX2FzOo5pAmDa2NnVYQovyfNQ64FauN0fKYfELwadSZtqgk2Tps8SZ9McxYDztrIJckOxahxpma1Ow2yMm1d7coCX3gfP9tnGDUt1GU9ukBvVXcqHnw3ktuRaZkZiiy914heWbbDkiXMy5ZKVhTCWmHPGZD34j22FCpVXY/ldZXaxMq8Ftl9qC0HxaippCrkdfkrueECaddibmJHEJPMFU6uCNvIIjvXleyBPj+1CRQW/B3b7S+4XhvengndSK2B2hZi1/F4d8/hsGe32bGkRWj5wyivqzSCj5ioQseUWeosQkAU9DKr5YjkwVglB7ycjqLnCh3NOmx0Yjx/vdIwhL5jul5Y0kLXdXRRzoU+ROZ+4N12R9f1tzPlp+cfmHYHvNEC8f2PP7DZbtluN7i2KguLPJBWUtZKzmyGgdwyc1pE24Byx3O+3TCtNu4Oe3i4I/ooCXEqDrNqACb4sMHFXqI8i0T85bSQjdVpo2lnKqdlDJ7zMdF3HaEP6qme2e12yoARa/C2dta2EVzg3Zu3shCl0YVA7z3LvPD27VuaaeS0EPZb/TmVw92BPnSUUuhcFAFSg5wL41YiCFNK9DGSXBVjOe/Z9Z0uuIR9FW1gWiaWknjz5gEXAqE6dXaEzot3P8ZgrMAfzVahyLZEM5LnkddQmyRLUV8rXq1Las14hO2SlJl2Oh45HPZUY0hZvFqac9Qli6pSw9hlOrK0FgTnb/Ia1mtfShWCfhUb9S5I7nFplW3X6cRgePvwLWM38vGnv3E9/zPOzWAytURsG4SJYaVDs2qPIkmGoo5tVpTP8hEJE8cZ6f5eXVhXvx4JyKxVmTmsHatoG0wzr2tYZUNZb2+u3ILx74md9I24KktmGngNrDdZm2OntUiCj1ouoFPLkgo1S2dfaGr0CKkmyJAqt9jb6GB/D+/fPLA/9Nzf3eF8IdcJ5wPRecGsbeTVPVbLXSsqwtMFvu4VaAghpKKw0drtS2HvNztpHIxydoxRO3M54VNK1JrFVbQkHY3kNYgth9p9S4ngpn2xYkdBE/zeaid+223czOGEnWXXotQ0xY/C4bDnj3/oyelA4L1KAAEAAElEQVRKyYsqwmU0a3oQtyLU5uxWtX4VUoHuArD1JmS8QZEKp+WmU+VNyOkQDeEKv2k90xcqFFj5PqVm7UdeGWBtNXg06vZsV6ZYYzUbbC1ByeTa8/jwNdDz9HTkcPeATYWUE10MOGuwIRCNp5Yk+esUTJFz6zyf2ZgtlEaeZ3AOZ4XCLzCuZSUdoFu1agwP94+M44aUEpfLhXEUoO/D8ZmX5ydaLuzvDwzjVhpoG3DO3tYI20G891bSyn674+OHD5zPZ3ypst1/8+6dyMPTgvNBy3IRipkNpLRIIRg6RCshSVwpZVqVTN1aG30/YoPFZ6HLdv2W0+mZZmTZXEvGNFkUdf2A1dHLB88wjCxLkkLSkLwE5zlfr3S6K3l8uGfoIj7IxFERGMpZCROyaq1Rahb6qIE5ZS6XI28f3nDYH4jOc7EX2VUEz6dPH1gXAK01scZt2n0hFy7nhPdWfKeMpxhJtAvNEPwoSXutkVoj5QVXFppp3N3v+fjTRxlJTSNTyUUYXaVWoomKSTbmPGmUZCP6jlQWvUmrhAVhZNmZGpMx7DdbTDFMNTF0PWaZicEzjiMWYWC1KkFHpVbJzS4rvi5dJsaogDKy+ho1MVaSwtVkinStYoLF1ib89yrYaAWa94y7t4Rxz4e/WI4vf6O2F6wpFBKmeVEAtzVnwOuiVxaoNatxoTVkp26drAIzxPitNbCO4iSMR/DfAKv/EwiP3oiwqVWw2QldVh/09VCjNbxCUyYbEpMUx9xhnEzWaLe+VMHkLZYyL/J6m0IZCPSSk6OQJPlPi1FwsL2HN4/3PDzs2G0sofM474BFQqPY4VzEGEnAk0N9PckNq6q3rrOAUehH+f3qB6yHhkyf1shriT7q4ZzFMgRDqY2aRYeT8pUQerV1kaUrbZ0YlParFgqSQGdlz3OT6Okv+/q/pcMX1XWtBtsKxnmxN0G1Gs4SjcW7nhQCP7cMX0/81sQLLlfJQa+rHbgWKoFmrMYuo42H6CjWIlNr1d2oketjkWwHKlRzW8Y3LRC0SimGtpIB1qKHTGkYp1G3OqkafcdtNQUszNkQ4yO7zZeE4Q3DsCMjaW+mNZZsJI8G2Y/UUrBO9FkyRcKm3wpyMy8sNRGMIWUxNa2mQcnM84INns5GvXRVp16HCZa5ziKEs56XpydMgzD0xG6UOAgrgWVrBol+jPIa0wLG0Pcjdw+PfP70USiwwQdyWrimRQVtTaJGj0f6vseVLPnAXsLa145EVjcVrCWqPNw4XUrWyvHpKAexMTi/HkJyC5YlU0NjWiZSWvBBYk5XOMt6Wd5hDMG+3pYuRi4/fWDYSXKSsdIVrZ70xlhilNHYWnGFfPr8CWg4p7bdTSadECNd3/GLr75mmieu1wun05lpmkg5UbLAEtO6J9CZ1hhYaiYWQYStXam7FlNEkHQ8nTi9nPjqy6/Y7feClVYJPHHO4aq/yfzRcbYsieuyEPtOrptyrksVIWDnItkkfAz0QVPEbIFJpoU5JYJ3bDcb/TseOzrFI8GrcVt28gBUqiiMV11AE7jHW8vL5SKsLhdwzjBdZ+r5SghRDypVQhSBDS+zuAk/vv8dLuz48PlP1PSEMQXLgmlBBE/I8k3cb0GWyEVUxdXgsVSHjvwF4yy39q+IvTwNjFWLiZYQ9581Ca2qpYL+tfaKowvOLIdqBTyG1BIlizdSouFXSqhZMw5EQGdboVZPWmYamSynETXDItIOug4OO3h8HHjz8CX7w0gzV3woWJMlydB4MB5nI16fiXXyXe2txRqhQbMKZ0jlkSnKkJrkNcuCVqaLFZo01hBClPdqDDVZcjHiXFvENSGlRRa6Zrppo9biYwygqXIrjt3WQs16PdcDVPtZIzsO52QHYdYPwLzafa9abHGGNqqX4hbfW/WQN1aX8GalmEpToHJN0U+sr0X+Iimrxkvff2uoDYnAXM01bDHaXNhX+5fbUkLtU5oq0nXv19A3V5tSa3Vi1eOo6dRqG6RsMWbDt7/8B7r+DQWnRavgQhDBqPoxyQ5G4N/j+cyHn37il7/8tb4eDWmyls4J67MYZV9NiXO64l3At0Cz6mhcwRthcjkrNHfjLTH2/OqXv8KotqXWQs6Z4IWIIEiCiGxrLqJjsjLV5ZTY7Taifbucrmy2o9j9IgzyZZ7oushmt4NSmCehuNaSeXl+wQbP0MlhEbxEmdbS5IA3kEthM44M/QC1YX1kyvMNCgixl6zWJVFLoe9H8S+ywqNelpndfk+larcr239rLDktjJuR7bBh9ZUJQSyRU1oIXuy3xUpD7KKHcZSFEo2WEzVETYKSjjiGwMP+kZoOalooPv2X84Xnl2ec88zXK6frGazsDC7TlXmeGAdhfpErxgs1dWiVz09PzMuCC44R0T3kJXOZL5zPF/qhZz/umVJiHAZqXaiI/cbQiSeUYKHCjGim4oJjZ3c0shwQxgr0N09sh4GH/YFUiu6XwHhHOp2xunxepzRvhf2SahNKaghQKnMSm3dTKyFEfPCUZaGu3lSxk8OlZEqTBWdNoiodYsd8nZjmSr99w1fdhs+f/8r55UcwC80srEmExjhoTpbpuhwU7N9Ss8WULG6XKvLLRuAulaoRVmFeTdrlVQoL1TScqmslG13tVVRjAVa1CUJ5zcgB4NbDu0K1aFd6veEYNQu81WpmWcTqAy+Hg4+w3cDD4453b/bsd5GuQxoSM+OseH0ZAsF1VKrQPu3a1Kz4h1N8qmK9x63TlikY7ZpX1e86Oa0oU2HtoHUKspaaEzUn/vzdD9Scubs7CGxprd5blpIqa5yqkAZ+1vFLtZLvCeoWbF7hpZUmpKe17ETEdsSZqAr/ogvyNUVCCrC5pcCJ64E0e5qVkYSd5Jqw5WqWF7CybWXGadSShWKsTUdVyx6nqjpTX0Vua2SvQHg/O/z1G5dqlJTgbhd1NcoTTdG6a623A3fdPaFMporj/Ve/Y//wFdiBZZ74fHqSfUdt7IZRpqIqTYCz4oVlrSVNC3nJOGc5n0+6GxUhcVmBxlzBWebjzPNyYrvZEd2Vzf5Aa+3GUJ3TwnWZ6M2g+ScGyrojE+KEaU1C1YyT6GJd9B9PJ/b7nUyfGfoQKUPBu04UksaKpN5jiL1EJuacWJbEuBm1kzOMnahaq2mcTleul5PgXF46Nmf0w23IuGyrZCe4IIdpLUzXk6bKRWLXiyusE6vf4+lI33W3ERLg9HLkOk1stxt2uz2bw6vtszBvJOi+ixFrHafTiRiDhLobx912R6MRjWTDOsRGwznL6eVIaZlvv/yaLvYMgzCASmu8eXzkenlD34tZ4fPLC0/nI+fThd244fl4whnDkpN2VoJZGuvY7LZsdhs8ntVrci4TP3z/E5txYDfsRG1thTU2T4a+6+hiLwycZcavedHNcrleiSHQxZ6c1V8qF07HI9fjhbTd0fcDbRE/llLUb2ee2NqNBKH7pvbdaj5mKnWpYBvPpzMUocK5riMiS7RqPOfTkdPlwuPdHfM0YywKiwnjomURZC4lQW0sBaztCN0jM1e8yZhyxZFBISGjjoFlPQSRwiHdvsHp8GYxNx2EGPA1FvF8wJiGbwKLWsSxtOihVI1AY4JlC55cjOQdGJzufKRrEoaINAfNCPOsymiD6Ii4nVDOQtfDuJHisB3vuLs70A8dMTZiXA9M8cQJcYM1DWOCHrr1Z5h21YNf/n/wnopV23SFAYgCJWiRk6plpUtH8j6o5raDaa1RU2JaFoqSOJIRxbQxkGoWyAZEiFblmjYlNrTmoKnxIvZmL22sxLCuB6w1BmPDbUIzRjr1defE7V0hqY9Kfmmtqq5D4Y7SBOrQvUnWDX9t4n7brFyPzGtAUylCS89tPdgr1ojNeNWdUakynTRjqTVjcbK0Xr/JSnEoqzbC3ppYmQ5k+lmbDWkyMhZh2dWWKRpHW5ph2H3Fw7vfkIvB1MLxeOJ4PPL+zVswDmO9+J4tmXOa5JwABt/z29/8HVXfnzhUSB7PXLKGXsnZPAwjT63yp3/9F4lS6Ef+7u9+R86FTT+SW+F4fGG73ZHzQilB9UowzVfmaWYYR0mwNI5puuDsRoxbc2Wz6TFA9FE+dyPmrn4pmXlJjEPP5Xwh2ULsA7VAyoVlmnHBM8aelfssQhq5MbwPsnDU9qauN5Kusiownc84Z3EucJ0Xnp6f2W83tHZmyZKWdbi/o+sHWUpHEeVJESxczmf+83/5r7x795Z//w//IA+E2gA4uWtZU52gscyJRlMrXUiT8IzpBtI0yQc7VPJVvNbHfkOulaBj5JIyx+ORzWbDsBnFcsMY+r7nwTlGfX1pyUKBDOIFn3K60SG98Temk7OOVDK9j7x/907Cf/LC6BzWOXGyrVk46IBxRsbJeis9UgCNWFI3Xf5f56v8/BhUHZ/ItTL0HSmLUGrTD0QvDrMuiL/+8XpiM26xxjLsRlqDoevAikI9WMOcZA/jjEAc0Yr5XWsSa1qotyVXs4b5fMF6RzPClnHes+TGNFUe374hAOfnT6R0xLoF1yrOqqmyWReF5RWXNkInxCKwW1sjaRvGlhvUkfVuW21kVi+lCiQ9+CRfWWCrQkPC6DMNcUttVfKnCxIKTxWOvXTmsi+JAfrRsBlh3ARCrGw3d4zjnqEXexYbRDchVGSPi1YLlOYttHU3UhR2Ktq8e/29DppQOdcEiMZqoyF0S6MYeFN6cZOnWWFgSd+b5yz3Zsn4LhKaNn2tYlfdhF2zP+Qwl+Wr0725lyLaRENgjGc1IEQhinWKMbrTsk7dQ6tOR8YJ6WGFAZv8XGE8yUG/TJMI5wx4HwG5RmLNKF9nalXbcZkqihpMNoUshWEk6Yur96z8uHprzmR7I07KkkBnWcvYLfRK/uatEDWFxBRpul0n1XbLZ1EqpXn67i3D8CV/++HIm/sOZ4TGerff43zHssxYZzkdT+Scud/fM82TNM1BzgCn0KKNURYETZqgeUkseeZ6uvLNV1/z5uEd/o+By/XK5TKR80II/c05t1WxDjfeU4vA5ev0Z50TZEi1FiktXCfxk7PWU9JCaulW3Kd5ZrPd4D/+8CPDMDIMHeNmFBFUrcLjvtuTNr2OpWsdsFznq0wOSLzi8WXm8c1bMe1r8nBbb5iniWVa+G///N9xzvHNt9/w+PDIOMom/XQ6cjmfWeaZEAPjsGWz3bJK5GuF48sTqVZ+/8c/iJJQi4PYO3NjU1nrsNqZ3T/eQ6vM8yTMERuw3km3O8/U2jjs94ybDWt9i2pW2CrYIMpqgOkyYZ1lXmZOp7Pg+rqPiV3PkhNlumKMUIFzzUJhtKJgba2RcuJ8PrHd7cRwMC30vtNCKh3PnBLOy4fjnaeYona/vbxG22FaZZqu4karN3jwgc5Hlpz4fHqiH0cO+x3BFWWSNdKShDpnHXlJBOtZFsnS9d5RdeQX4zfDnIsY46UEwdHHjuDk+gVlqqzsjpIz1FWHIFOkD5J61YeeL774ilIgt8bm4UuW88Dl8pFWknbIRa3oRERXdIKUe83IstGIUEusSCw1S7eu4IgcAsZScYLX+zURoGLLOpEKQ01eu+gbGlWEfSguKycGwYlhbQjQ9w5rGmM/sNt2hEHgzt4jzgBdh7FJ/LhurqGSm2KMZkysLZOR92KtJhfq4jRYqFoobHOkVoX5osr7FXZriCnmytWvRayxS8vkZQFrmM5nyfy2sjyW+0uKgm0V3Gpfve7CmgZacbN6WXcR0DT7vdyaP0k4E4bPrcBoPghtTb3TZDfrMEU8cltbbUN0/2FkylvSBWM9vfC/WZlaTYt0bnLGOGNkB6UNAKsBo+6QDFYN8QQ6Wj211nVWa0iUboPW8u1nAIKk/IzGa3RvabSBsVY+B3fb1RQtEAbY8Pjuj3TdA8/nSfQHZmG72QnkbeQwXr+/c4JjGW8FdjPww/d/4+7hnrEfb0W0ITulGCM//vADp9OR/WHH4fDAm7fvAREcixZKinuuhcPhgVwrsYq9Ua1iwd/Fnr4znK4XrIoK05xxbqaukLMWfdmVSrPWhw5/9+aRPkay4tHeOM7TJJa0TtgBzluicRQrnUAIkVIzT8dnptMVaNzfP0qVNla81PWi+h62fc9fv/uOw/7A4/0Da2LVdiMwyGUW9lJFaYWtqYdQxhnH2zfvZCfhLTmroEQrZVUnrpwype9xRrCB9UGapit9J6IpC+zGkaaGY8u80EWhex4vFzabDeNmpORC10W892JpDCzTzDRNeCuYr3OOsZOlfK2Vy/V0k8r3saM6Qy4L1jqWZRFfFZ2UipPgJavxna0lnDEE66mtYKsRjNIGzVpA+OC6cCtVqJzeyP7CeEdMSd6jsxo56eXhQmi01QYV/FlalCnFOe0cnSMtAiMZY8jLzDzPYqqoHBopAuKblBU3Xn1uchHB0BraIwvDRs6L4s9VLcYNvt/TE5muJ1p9ERzaJYyG8nhltBSHwkjcYIBqGs5BrkYOKtOgZpyRYKGlGCCCHhIlV5zRuE/k9Um3KEmMIJx6qlh0DB30Efb7Du9k1Rq6SLSVzX5gswt0IdJ3PcY2ggfnsuLb6oNlKpZeH3b1B7WyxPVObKa9NbKQJ0AVzy9ZpuebQthYpfIqoUCW6dyW8gI5Sf9bU2W6zhjjOJ1esLan62RCLEZyLPSEV4qvuR1axjvWeNLaVg2AlF9ZJMv06azWlXXBvN47iu1rObpBxIVV84I0VcbIdAYsVXZ+Ta14WkuU4qFZcpmlKJp1mjL6/aQwrM8ATTQO67K7mdeFel3TLNZtvrqnNl53E5I/8Qrx3AaKNbvavWpSBNtfobhGLYu8ntLz5svf8f7tr2m2Z3sQoSbViSi2SvG3QSbJvpPkt1bBI3tByehIHI9HduNOjBAncYrwQdye7x9EVxFsYD6fCUMv6ZAKt+dcCMGTU+F0euZwuL/tboLSq8UdwLDpe7x1MlkpJySXwjydbwFqVGGKGiO+fH7sekIzLDrCnS4nhZgCtIqjkRtkhQKMhbHrWWqCLJztOWU+P33k4eENqSyy4PFO7Jqb45tf/5pJF0s5SWnPZcG5QOgCPZUQRDksKXPS3YJh3O/FNtcVgXRSJsQ1l3ml9SWOxxPBB/EvyUKztSFQSFJ1lwW6gDeWXmmyzmXaJAVhHEf6rsMA3333Hd47vvnmG7lvlI9traUbBplwTi/86a9/wXjPRg35QMKJmpOxOdpANZVh6Hk0jzSV+ltvRQCn2HRrhr4ftOOw5CZLamc8L+cTVPFlMUXwUWctJS9A5ZIWmJt0CqFndb0Eeb2mGqblytPLCxjY3x8wSbrS6D25FZbTlSktoq5ujawivBAi1VbWDJElZ1rKXNJMmia8C5ggOR+2yRVoGWrOlFq4LLMK8RyuwlyKBst46ba7e3KamOazFAibKU7orbYKZFeNTJXoAy7FSm0qjByW2RqselxIZJb6E7UKJUmHrjs3o/486PkhmeOwGeHtm4EuNIa+o5UzPo6Mw5btxtH1huANLliCFvhXuGj1l6o4Myg7ydFMViRDFuNGFePowrwZZXnVwmpBvh50zmpn1wzVFO2YJXo0L6qURvQaQjiQqNjd7q24yIqgQOBPLf5i1aHmf0Y0Mc5YmZ61OFSdtl5/iaCvrDynJvtL9a2AJswxazV7nAJNoz352cDRJMpYWKiyu2pV9hMpSYjRGoG8BkY1vY7rZw8IbVpXEVX5xkbT8WozqjfQug23oihWKUp7rU4niaaT0ypclS6/WYE2+dk1oSE055vPk2fYv+Pd17+meElxa03T/rRwNmNY8izL+WCxRmx3aOg1bJRcePf1F3gXWDSv5+PzJ8ZxJFRP53t2caA5y7/+8z/z+fjMv/+Hf8Sr9mzsB6zzsjLoBuZ55nq90kchQEj0gRB4KmDWSAVjsTaIs3WF48sL/TgKdRrIeeHjxyfSMuOTFoToPFNacBX2uy0+eByG1GSrj7MaxSmwAq0RukBumWgMMYh673o+g2l03UAIQZZnLvDHv/vDLbp0ul6oORNDT0qJ6LwmTKmfjpOxU0z4dPmlLpGgGdGqBKfBn//tL/zth+/4xbe/5Hd/9wfZY1wn9ntRELrg+fzpI7vdjr6TzIbgA8F7McHzKydCHtD3795JYclif94ceOc47LZ4H3k+vvDjjz/x4fNHbAiMmwHvvHCZW2O5XPExkkri+emJ+/s3EnSkvOQlL7IA04kqGCm+3kXFmhvBydcvaYZS6aK8j950XK4nhmGDMUUyklvDNnnwvIkCxTi5UZc601rFBo+3lvNJDmQbpMMoS2LOC52PtFpZatJJqpeDplau80wrwo7IufDxwwfKkrl7eKBb/V9UILgmmuUmgr+GwVZDKkU7vyo4MzKJeN+rn9YiAT25YoxkkM8li5cVkvOhulgaRvk8cgjYqkWhCDGiNUOqTT13GmQoRuCm0EHfGbrgcM4xDB0xWrZjRx8b3mV8MET/hhg9IcpC2vtGCEZYZVZeA1WOauuEmIE6d1qnmRgmCNxkRcFdmtAn10zs9Z4rLSuvX7vYZtRfjBX4R/JcRA1+XWZAMstbWQ86QQhocrCuy+liFj0oBVZbrSnW7n4VwlWN2JXKKYZ5WHezmDZVQn0E82+3BkeCbEWdjlm7dXN7NlvVzYDRHWXVZb0zYK0WAYGImnWaVFdfGWcgeSm87kHk9cvXW4XUGgIDV/1f6xqkKTVW1Pr5tu8RbzNefwYVr+eMaG30e7SfCwYbqWVqtfhwz7B9x/la2PUKYWl3r3MfrRY524y/QTjGwpRmaipY7+i7nlTEhDKnBWM8+8Oe4KN8P9so1jFPE+f5yjBucE6Kf4wdz8cTP/30I5+fPvOP/+EfxbW5VnKrhFZZqqTdnU4vpLxwf/egz11gnsVHb7vb8vj27bqawTnH89ORoe8Zxx6fa+W6zORc6KLIwX1weGUCgSiNReDVKEviPBV88Gy3O3bbPc47tautDONG8qwV+1yFHqY2Um1EFyWjIXYYJ1a2QxwwViCdRiPagVZhyTMhyMI25UQIQTQGwQp+3GBZ5OJ+84tv2e53nE8vdMMgi0RtGafrlU+fnzDOsttupPM0aAyo4XQ64WNg8EJL80GzL4CXlyMxBunKnYgKrbUc7vb8JvzmtkRstWGCZTmfZRJojcv5yH/6z/+JP/zx79nu9uzGHcv1SjAO1wUqhpYWjucj93dvyEjYiEBcYgG8G3ecpzPTPDFacZwNMbIsky4VLbELeONZ0sxsZpxz+CZdYmnSPXQlM00L3luF6pTxYuVaZisH1enlTOw7TBGanBzgsnuY55l0najXhO09wVtaqSzXhXm+4EOgWDk8c0pyYOoDWEF0Dk0evr7rRFQoSyDJcggDtMScLrQsE2DACP23IctWI8KjJu2xLpll0qnIErUViZHNNYuBoEMmgQC7bWC77bCmEpxh6IIk+kVHtGJZHwPiXRYswamjTxHacWfFGsSYJp5P3osXWBBK9Y2rZOXec048rCQOayXxriCK7N1KNVgvjJlcGssi+c9C/5aDeikSd7nMF67Xs5YXIwFUNzM9OUEtjWQyrTlRkGhz15pQiVeRnHTh6CtWaEc/v6KUUwzkmpQFpHhRsdQ2Y0yUTtuKEK81dBcgRVv2fQKXZiqpNJo1SsCQn5tLIdpOvNvc6s/1qpoXvYIWpyJNR2sNiryXjKjdRZsl+wu7khduTCZtAFu40fxlDpPCY5oKRDUtE7NOR6sDrP59nUJ92LG9/4bjc8b4I3HcYxp448gV8jzLpsw7XIOXy4laKm8eHpmXWTRn0WMRHVc1Ej0aQsRgGAfZh6Y8MZdKtJbYd/zx7/+e4DUATncOP/30I3/723cALKUwhsBcZXfph1E+w1q53x84Xa/M86xwoyXGjq6bxJ5cG/qmHcd33/+NPvb89te/wXd9x49//Ru5JN5/8SXRCZc3mcrqB9MM0lU3CTI33im/3uG8YGGlFKwF7z1uu5WqXkVI4lvgeHrm3/7p3/jjH/6eoR9F1DPP7Dc7jLPUKtv04L14QTkEXmnCbvBeAonsDU4RfNwFxx//4R8IwfPp40cu14nd/kDnoyaFNeZ5ll1CFU/6Tz994Mfv/sYf//jv6MKGw/09XexlyTxnak6kKl0rtWBNL15VOWGcZdxsBAf0kdP1wvF4JNeKU8/7bex4vl4wxvG73/+BYRhwWJZ5Itcsr2XVAJSCs57PTx/AOu4OB4INMtYb0R+0BufpwpwX+tjLlEGmM7KjsU1TBYzYBjtXoROGTmsSTm+8x8f6KuAD0rLgnaMLkdaKYJM5M5qeuQhkJMpsy+nlyvV8whnHZr8TaKCJxXlqFRuiHNZVrDaqYrm0JmZry8y8JLFyMYa6LExKnwzO4YwjU/G2o3eRVBM1z+SSVah4xrdKK2J5XZR7T5Wpoi1J4UqBu4KDobd0nUwL202Hc0f6PmBt1rAqS7QOGwzRy+HlHXT9iHEV79TIsomLKU0MLL1qHPrQieml0UO2QXOrQ6r0pzcIyeprK7JsN7p4zU1KhgW9ZhlvPIVViSvHd06yzE438ePaNSMFa82QkNlGKLM3N9Z1d7DuO3TPoYeoWX/PyGdTmkyDt31SEUoqeN0PSTftnEr3FfOuN+dFI/bpTTUGzlA106I1jbRthWaaJDP2PaiWuxplzDVuLKbGK6mgtfz6Xhs3mDeVqhEEa9io2OHXJtbvzjhsa0oBDkiYRdNdkARR2XXBbtZFtlyjqs+LfFY9b97+jm7zBZgL42bP5XSi6zqcF1jHWff6GRURqJVcEDJTxLtALiKILKj3nZdIYufFpXpJC89PLyzLxBdfvpfm2Aam6UptQjTBQd93HO7u+fKL9+w3G9GLXU54HwkhEtdOwDk2m5ElJdnjtMbYdfTde1prr8XLRXwX+e0vf8XxcmVaZnyplYc3jwQfZNJU6pcsIeXieCf5vtZIwpfcig5jnWK/r3oFa8UBNqsVxm63pxlhgnjvOV0vdJ2EKFi1hWi1kdLMdrvT418dYHXf5tXFsNYipl7GEHXhHEKUQ+x0ZJkn7h/eKqOh0UrmMk3UnME5klJKHx4fyHPCa0qeZR23IxipyJtxECjDi90zxjAtC5vNBt8Fur4jlUI5nSi1KtNGYDesJTrPEuHtsKWLkdpkkSsRowGrBcJYobX+65//zDdffn2jwq0CotQKzhq2g9ijr4C6s47j+cjT8wuH+wND19GsIfrA9TpJOpcxHI8nzucT0+VMqpnoAg/39/RDJ51/zkzTFeeCLN5rY1r0kMBxmSaJTnWGYbtVGqcEFqVWZfJyXjjd2hgYg1g5FIEo1kjM2EVaKaS8kFO5YcrFOlmaKy/fWItxlmA71TVUWo6kchWc/2beJrBNLUlYPxliB9udZTN2bDcbYmwE3+iCw7g9xspritbhvGO+PBPqhmIicYgEb7FejP5ylTS7PohDa6liE2JdRLZ0Bduk+7wRv2vB+NW91d+sS0rKGr6kWSFrTnp53ZfUKlh3qom8zKQkxpRYR8tyCDvr8E5YTQ6jjCRdQuvytiqeLwuAcjvw5JBdmywLrt1WWLL30YWy3s2SAd2oeZLdECuXjJvLqzGrz5ShUZSSKzkaQuqQiT8XZfopAuBwOBcJncE5rwyzRjHqqFrXycTeXmNbw6L09a20z6qTpFh8N5niqr1ZbOgCglylsbDrXqBajFk/Ob0qKw3dGFpJt3uyNUuqgTePv+Rw+BbrRnbfvuM8XTldzgybDbk1iTSwlrwsLCWJVZAxbIZR91eGksqtUQvWMi0L12litxeY0FrH5enC5XLifLlgjOXtu3cUK9OsN5YYIq0Z3r//irv7B2LwHI9HttutEhEan5+fGMcN3jvKSkxyjilnPWdlvs2t0cWO0/lESleMMbgQ2Q5CwvHPn5/ZbAZ6Z3l6fqYbevogYyRJunXjvS77ze3GkqFZDkfnXoVvEmuaqVWCddYVunOe3/zm97dpY5X9v8YtuluxabXdgtwtVj1OpPhISK6kBxj9PrXKctj7t0TfK/W06nRjOU5yaEZnKcvCsN2wGTb4GFlKlo5Lza4M4K0VqqoxGsYjHOdSC7YTy5H1wH759Intfi/pflVMzHJaqKVyGLYsRW0FnNolbFeXS1G/2i6yqVt+9+vfMG42wiRS5tHp+kJ0UQUx4oXTnIUKU55pxnJ3f7gdMjSh20mXBtTKy9MzH378EeMdwzhIBKazXNUaWiylI9N0pZXKMPZynZsUy5YryecbRjqdzzw/v9CPA6F5lpJUYFSkWBqYl8z1MlHzQs4wxEDXeTCGtFJZrWDwzlg0RIsVBDZVikRl7bI9Ju4JbQOtyHXIEylNtOWId3C/C8Te0AdD3wuN1fkLfdfhg9A4UeJEK5XSOWyp4HtC5+iGKHGwnR60ii8H38kuzFocARcs3lqsEztmGkofVtgCq8VLXEH1oaC2SiCKkhehKzYauVRaSbRFDuhaDWmZbrYQTgeACupEa2+d7Wp1cbPHAazxr7YRdlUorxqCV28rh0wxeiTqpV8BMf1urenkhuZ1vEI3WiqkEbjtReSJzU3wqtIyxq7sGoWOdAfSWiOGiPVOss+V2vvzgltZF87otWz6nuV6Ytadh3stgqZqlkeTqQOx+0mKbwVrb4WuqYofs15BZQwV+RkrUQICuRjuH7/h21/9A8ZsqavHWuw4aJZOsAjLr1ROk0A7d7udNDVePr9WGz99/sBuu8WYxny68nw88vHTR969e8+3335LA8Zx4Hq93na7rWW8D6rUXs9OOJ9fWOaE3R94evqMtY7NMPLpdOUyXdhsttK050zX9Xhr2fQDx4uEEY3Dhq4TUWTXDVQt5tYahmEUrY1pErqDLoq9tXx+epaOJTjmaQZv2W42N7XiDSXU5Vcpa7ciH74PYkNQVdGIjuq1CO10t99pQyyW47KkMmL+Z6QLq6kSho4lZWFhOHPbe+SqKtpbk1Al+jB0TMtMTpntOOCcp6VEDJGHwx19H3k+nRiHgVRmWZp7KWTeBa6nMx8/f6bWyrgdJe+iypO6TFdi8Dc5fSkF7x1v37y9WWFcLxdqLVymiRiDGAtez/RdxzRlyZ6+zoQ+6tTVJNuXTBcHWXCxakSKiqgMqSTm6xXjA9t+JCNsohClGz0dr4y9fobBMzhPzYK6Hh7uuC5XBtdxuN/jg0CFZF3xGRHdBeeZa9KbROBFKkLVLE1/Xw62cSf6kst0kcnTScdsmzwP8+XC+XQk2gjWMaUF4yH6ICwa71B5l+YINFquN4vyvu+waBA8QFNOutHOr8rn3cWA2+wJrjBsGn1odH4RfjoBF63c09ptp7aoKlughdwSfRcJ/ShKau8p6Sqwpvf0IdANgaZJfn7NK7HrwVpuxAqzQng1Q17Df9YDVa7xdTnjZA1PybNAAy3rQtjT9Nqjh62xq1WH5BcLyl9vS2fp9hXWQWCddeHKWhjWhk5xetE7CQffFScTtLXCsDIWh2QNNC0guRk5aBGWjFFPJZXFybPalDnUCq2IfkdsPMCUjHNBYONbcTOgzYKparutB6iEmgmcZNrqZ3UD1tT7SXcFbV1YK6NJr/Vt4jBeIU+5kYwSHlYB7usvKZLOyNmWdVqXzxVy89wd3vHNL/8B4/ekJOQK+R6W0/WKt5643UGDOSW6GMlpoVmDj56s97fzju1mZIgjx8uJp+cX5nmSIl2L2sg0moV3799rvsyah+H06yrNWt2PdGy2O5z1fPPNtyJCdpbHx0fu60GbZVhKpm/Is5pmXp6fOR6PPDzc8e7dlwqry5/fKNG1yn7s8eFBJO6IeV5plu12x8cPP7Hb78kl0YdR9nCoEZzS4gxQS1ZhkOF6ucg45ANNFbnOW9JcSHlWQzNhz4niTw7qruvwXv1sWD1fCrWIUVkzTbAxHdHXf1uFxnKRjsyYRloW5fgKg8HGwMOdhH37EHnse2qtYlzoLKkUrstELplPnz7d9AOnlzO7w56cEp2zjLsdQxN4o6R8k7sbL0yQdL2SlklyBib50KlNIl7V6vvD50+cj0d+8dXX0o1WeY19t5HkN8U/VzFZ3/VyMLbGy3xkFwbpskqV4KJFJqaWE0uRg8U5Q86ZeZnwzhOs5e3DI8YJ/llzwlQpPLlk0VNUyFU6trUvlV/tpsw0qgexxhGcTC4x9KRlBi12K3w0bDaiIs+VzovFRDW6YL4dbg3rZBdijCwTrQu3G1UWsHr4tvVcqVhmOnclMDP0hhAtQ/Q4l7G2EP2IbVqAjVib5zKJR34RxXjoIiVfiXFgCAPBRiyVl+MTMcC43RG9x/sOSrvRUS2e1ZjQNkPOVayejWRtl2yYpyvbsWdOM8uSyeUqHbbaWhhrZaLE0NIsh77uLcS0b+XlywKxqdCrGZlcDWI7oRQrVh8lp910vemDC9UUbdCsapj0UAeZyJw0QbKLVtuNKnsfgXgk1rfSWO2pW1swaDpbkZ5cmEeZkiSPYo0BNdaQQSEsFb8Z6dmNCbKk1mKSdcpoWrCaLvWluKz7FCkCFtF0iHi83SinBUnqa82KXkV3da0ZvE5gBtEmVRVjrnYkFi1IxtLaGvxlyRn6zYEvfvUfCOEgGqFa5OsU2tv0o5x5pbHkhc8fP3I43LPd7vFWRKifP31gmq6M/YZvf/FLjDHszIHnT8+klPnNL3/NuNvqHkatMYpmsmsSoDxfqmzXQ1zuRdmFWHV3LUVQipLELsdaiSWY00ylcrlcGceRcTMSQ4QbtGdoOeOCkIuoSIDa56dnoa8qdOODIzpPHDeEEDjs9wTnWUrBr9XYigAl58r5fKLvhQKKUjp/tlKTZV9d7Raq4NJNllN919N1UT7gkplTJriA9063+AVnLZfrmevlym5/YByHG/S0sg9CkMOl1sbY91jvabrraA2s98LUmhfCZvwfDuLj6UgumesyM242PBwO5EWyHOZJMrhTSgxDT8mZl+ORWirBe+ZpElGLgX4z0m1Gnj4/4Vumj57z+cLj4yPevzrcbjYbSZPLks9hnd68TrMTWLvEphnFlWkVL3nH6mBKrqJHaI0wdLRcWeoibBdEZ1Bb4poyZcl0fcf5fMZ6yYu4nGUcnlNiN24kMa0K5TTlwjRNgmEqnXI1M1tKEYPAJsEkhYrR5DGQRXarld0w8nI6k0uh6yPRieVEU0+iprnltYnwynpD7z3NWLIuKV2pYCrOGDxnWjvRd5W7jcd7QxesMs8alqj/yHKztCyHwXIlq2tqBrxpjHEkbB7ovVjXL8uVvnN0QaYPbz3W9NAsaZm0wZA9RWuyS0rNMl9nvB9ZcuZynTg9n3DG8zRkSjkzpZ/ou55h2GBtoOs3gFCJb5naRmI39YOXQ0+dUG9GekCrGfuzdDWD2oU30S1UUH2B0oRvAUXKKGqNqhYrthVyBePkgG0gLrfKCKo6qa/+rdIulNsS19gs+QxNMyaKQKRF1ezWCoU3FXDGU0zC2CD5EhpGJJkdPysQrOwsha+shP8Y417dFVa6bDNgg8qpdRIzhbWfWMubMet1WJsvbr/q7bUIRCjBU7qo1vOr5kjo93z5i98zdg8sWeBUF4Io3avQmEsr0rg1hzee+zdv8FbcsI2BzgV2hx0pLXz8/Im7+wc2my0heB7ePnJ4vOduv+d0OdFwmhMjBdg6y/V6wbjA2PcqE5D39PT5M3/5y1/4/R9+T4yR6TLThoHj8YntbseyZKarPMdv3rzBWse8TKKwjpGh78mlMmcJf7PGcy0F7xH4LES597//8BN/+M3fiRpSm5BaC7thw7xMDENkDTxZ5iISfmWoWGPpugGrwTjB+xVcvt2gtVZC8FQnys7VFdEapBprB1GWTFpmsknc39+pla08IOO4kcW3UvlKzpzPZ4ZhoOs7wdCc1wfC4QyULPbCFOHYWyMq6efnZ2IMbPqBVgrb7QhYNuNGMr1Dh/citgtrNoXuD+YlSZZxzoqlCzX29PLMeZ7oQmSeZ8au53B/zzBuXnF2BDnwqmS0zoGzpLQwnc5stlvZZ+TEkjJdjEAjzVeOzyfRNaTM0mbJS7ZWFrxVjPjkhlWeebXgxKo4ukDtpLh8/PCB2HXcHQ70XSAGT305MSV5YDGVkiTWUKZDMe0zILnOrSl7RrDiJSfZX5hC7DqxEymLqte3HLYbSq10IerBLw+laWIXLS2HoyxJokODUHNNC1gz49wFby4MPXRdZnCGrndEJw+2d06OFmtxTUSyBpgW6dBNq2QSsdsQXKTkiRB6TAha/ADf6H2HDRZrPd6ui1hHyXC9LORsMDVjgmcpCxRDLZ75nDmfvuPpLOZpOc94ByEY7u8PxC7Qd2+kETLSqYmCV5YwraljKXogCrC/OojoZyCTgOxxBPJptt5UwAJJqT+SqtZFyKV6BO2U1+S1tcD4NQa2iahPNByacGhWKFmNs5sojFdmnM2N5uTAKK0J6UCbl9oyt55w3YioHqOxitu47SVKE9Fj06LX2qp9kZ1H1WW0vptb+6ljr1y91oTF9LNfZi2krLNxu51N1RhsFVi3KeKw6hvEkyzTSke1W7766t9h7Ibnp2eGYZQ8GIxmasvuNBjHdZ4x1jCEkWCFUHM6HUXTorvW/eFwa/zO1zPTRXYOu2FDmhPTdaGUzOH+wPUygRFU4+X5KPY7Icr+Vwvqw8M9MUjDFoOwpkqt9L00JTFaQvBczhPzIhbjq//b+fMR9/hWzBWtw2q++G4csFiCdQQnKIl/d/8GHyJ5mZU9IRUyt3o7742R7nMujc47plwobSGGQIyeWrSbUSzvdRmmH0QDU7UzRlxPhYZXmeeZEDuatcTQSdExwrM2mkplrRV+v+ZDYCSrNajc/ebyqRS43Cw1LwQnnSkIiiqsPMPT0yfeHO7wURb0nz5/Zr92+DQ67wlOPN9xhmgDKWectQzbDefzmZIrPrz6QaVlpguRN28e5bBfkjAKnKMpx/7Dpw9YDF98/aUsuKrwklMWkZr38t/n04mrFd8WWiOOA846TscXcqmkPDMMI10I6gEvD8GSE8wiPPIh3GwJJOu38PjmDa01pmnGqEDQ6PIPY6ipkMrCmh5YikBrLnjmumCbFNucE1gRBpnCLU4Va1UgFAXy6SKuCCYquKocwAWNskRgkGaNWGj4ijMzff/C0Bk6NzN0maF3dHpX+bXZqIs2IZnaLN4Izt0MdNHh3UipjXGzF+aedVjzIDkVNROsw7hGwOOik3hWGlW7z7QkaHA8JngxpKVynl6YLhnTPMfrhKWwXC+Y2IjBECLsNo67h3fsdxt8FH9/eV3mZj1tzc8Uvmvfa1eWjky+suNINBzOSvN1E4ZhwGmH3RpGbUWagbqK5qphjRHFgFVrCrFcX9XSorxeF8q1Wo1MfQUcC2IaKeeyCOdwQXcCUg2WJG60Rrv1qrwZozs1UGKL3iPSiFaqkelm3Ss2YzFVJlfRQ1RuFt/G3pTP64RlGhRlcFWF8lZTQrSorEXI8boj0gwjKk0twhF4C0OuhlI83o7sH74hNcnljlYoymXJnKcrwzBqXkchdhKL+/nDJ+pDYztuAJELnI8nhn7AOEPXd3zxxZf03cDL6SyWF1SuSQrJZjfy/Q8/0k89n56eSEuilsTbt+8YlPkphV6fhRB48+4L0TAtieNRYg3u7+4wxjLlRPCBh4eBlDLzMmuzLA3v5Xphv9trXreYZaYsDEgXg8QrWIPf7TeUZZKDTBdML8cz+/0O43pMKzfhSfROuk4ras6Xl2daLgy73frpSXHQpfK6gW+t4aLj9PRMrY3D4YAa1tP1g8BcxtOCpKnlWsUi18pHfTmf6fueqLYZN2YUMp5flivReQn3cJ5WMsHJ4tNZc7PpaM4TnMMOG7ktSsGGwGazwThH8AFqIbfGnDOxRboucj6d+fjpE199+SUhBux14jiduZzOUAu7uzvs5UyMUWikypJoTX+Glcrc9wPzMkOFlGdyqXhnxYq8VShFMH7t4lLKqgw3zNcrc06UlCEX6DK4QLpMhL6jLImXF8E3O+/ZHQ6qhM5QK8s8sxSBIiTYRWCTaqDmqgw1ZWAgTBvxnoeWMr45Ub42XVlW6EJHRmA+XL0JkIZeNCdNHUFFA1BpTmIkreLDAg9UxgiECednNnHmbl/Y9AOmFVo1AkG6Ri0J65ow3LzDG0cjaw5xxOsLbk1CU5o1hCheXcKBz7cpzjm1jjYNjxe6ZIO2FC4l8fSysKTMy/OR8zFxvi5CKW2CDfsAwWf2d5F3b/eMfSQEwzD29FEXxK1hnBzKxjhqkxW20Y56pZFiucGjtWmcaltNK60G89jVSV3f48+6ZiM4uy7CBFZSCrRRIz/RYBRlYenBLBFv8rOaUbioaTGTA8k0lFCi+4SfeUnV1VajJPHHsmqVovukalaUWfTxTYknNOleSyuQoVpVOzRYWUUCWuvktdJU9SypVQ34jJSxdWn1ypSU3y0tI9Yk6N5Cl1useRjKM0cKScpQqyWZgW9++Q/0/VucjYTouZyv2OXK0I3S5LjAMl9v1zl6sdj/65/+wldffSXuCEPHWyduC6llTIVwM/as3O231CI7glwSD3cPPBzuFBJslGUWhmFDQ4HkDPNBdhSlVPJ8Zk4LH376kc/PzzzeP7Db7QjeiseYaWq1s3A6vuBDx3635+7wwOl8JuUZG4f14ssuuFqWnEilsjUDfpkXistY63CmcZlmXj59VMdV+ZSnqyihbzo2A61qRoRxpCVJ+FASo6lammSoGFkaWX0AxnEjN77XTAPbxGEW9cVv0AivC7Zab2rvG1kvBFnRaXdzPL7w3Xff0/WRb7/5lu24kY7EW1gWUMrYyqDyMfBwd6Dre93kW3GGzRnXRfGoMSLC8l0Uo7pSebi/x4fVvruRa2YuhWxkwhk3G9FAOGV/GLDOCnXUSLrc/eEg+oIqOQzT+YJzTmlqQvGb5wXrrBacgrUiBsxVxFvXRRLZqjBeZUmv1zjEiLWevovkKod2WTK5Zi7LTKdxrkbbzlqQ91d/xtUvhZqKsCFCJ9TeXDgej3Rjh7NibSwWBIInOyOHmG16gCuNsTUJijLhZxbX1ilskCFngr2yGxbGMLHZLIydHNrer/ss8btxztKUhluRbsg6A83hithEGOuYpxMlVXzsCcHrQae9rTVI9p0TJjWGVgpLqJhcZEpLlePxyH/7l4+cL7N0qwl8gO22Z7tx9P2BzRAYd5ZxNGw20gTUVui8ve1wrNfXZbQ0GN2/GIezijnfsPKmbqMyFVjraQhTRSCaldUuy0t5tn7WlNWkS1jZJVjjQLt15UNLgajcchUkRpjbruHG+LGGlgpLzTdiQQNdosu+YJ2CslqKrxCS/IfTOAH5xzovk0YVceAahCN/X+N0eYXebrBUVRq0kYwLWJfnRfUTFW6FQWN2McKUKu0GNlWruwezUn1/vjWVmFwhy0Ry7fnlr/8D797/Fqosf41pDJuB2gyXJFGiIUbdr0oRLLnw/vEtfYhU03j6+JEv37/HBM/peqZTYo6IZ51a8zv1kLP0vYSs5VoYhoF+s+F4PiFaFIEel7Rg84KxI8YH0jLz3/7p/+J6vmBjR2fNbSdojFz7tMjeMPjIw8MbWpEdX85FpnIn90nRMzX4AAj5KHh5jv3TyzP3hwexD6gwdD2Hu3su05XDdsecNCfBGmLsmOcJmy3jODCBxF9agQ1CFMdI49ADTrom4wytyKbcYG4HfE4qLzdWmE/6Bm300v2mhPeefnMnIq9J/M1pDZwTw72+Z7/b0HW9jEcpYwz4YDGdvGGa0GebmnO1JtDX+XzGOUff94DkSFhnCX1Ep3TOpyubvqcfB5brwlIytTS2/YY0JZoPPJ9O8jBYizVwuSyghcmqFXerFetes3iH2N9yGTBwvpzwvhOr9FLIpSmdUQ5bkAS+1DdcC3Rdj1YiUT0XycUmiqVAyQlKEwwdI6FDFloRi2awpJZ0QQbTsgCZlmX5nEtmMWJkN+eFJScGM8hkVpouOsE5cZ6lrAcKWNskdTCBjV5gFAO2ZkwN1HomxplumzkMhrFbiG4iOjnoVd4oTrmrmR6oWV3BEZCYR8mv8HozWwxx2MMgQrO1EzbGKb2xiqCyCnRRMswl8/zDT8xzYTs+kuaF55fP1GVmDJYQDOPg2O63bEYhcnSdw9hCjA5soXeeSsa7Tg5jvQ61SeimBOLIzkHqs1RqszJzmi4hjMO5cDuAq/GseXSlVayVMC9ZTEuHaXQaux3SiM2Hc9BsUChKSetaL6zmULebsM3fvK0oC0tZu0rdIxk5jKvCZQ61+TBOSAO2UmpC8ih0g2kcTYVo1IYhUEwFIzBr0QWsMwWav0WDSsGxt8bQNDnqMYaS5bpJoUAmAW1aa5M9Q7MGW1dPXp0atP6VtsJo9pbdsuqs5mppDHzxzR95/+XfqQWIQFIGybYvFY6XM/Oy8BC8UH010TNRCc5yd3egFogxyJSuxTfGjg8fPxL7yGbc0IeeXAt9F2imv+XKlCzX9fH+kfPxzPPTk0Cx1pEp/Pd//mfevXnD4X7Pd9/9wHKZ8X1PcI4YI0PXCZSPRKPWVuQ5Caq+N9zOAe8FNpzSQh8i2QjJyLsmAVgV5mXGPxweADm4smmYZtnv9zrKyWXqooxcqVQ6H4RCqMsY2wWWeaE1uRDHlydCiIQYxR8oz0TTCd2yOZwXdkNVF0jvBbes2lmklAjBczqf+fGnn/jVL35Bnpfb0nvFXleW9jAMfPHV1+IFNM/S/SiuWnOm70dMLwyqFf5JtVAvF75/euGrr7/k3dtvMNawTBMguPf1eiGESNdHHI7L6cI0zeofZTG+Z1xGpkmCw4UJ5ClLIiWJYHXOs3ErrdGuEx3WWJ4+P+P7IDbtOTFfEyVUYuxfzdZq43qekWwAgfw2/agWCdobGsPx42e+++F7Hh8fuLu7R/+QJS9ykCi3XjzYjLz/vLxSCFsTiGN1US2Fp09PbPdbhn6Qxe5e3l8tTf1tdA1t5X1ZKwfEkq/qFxTFjqFWaIbWEtYuRHukd1f2YyGMFecNnQVvt3qgyn232khYa7Dq6fP//6sq4VOoqVI8xBGhktP0qgXQhe+cGufzkfmaOZ6vOG8oS+JyyTQLT58uNNM43I18uQvsNnd0Q8fQR6KXxsO7gAuOVg3GiqUK1hBsh7dGfaTE7rwaaQBWy2zhn9sbrXelDDvrdRmpmL6ym9AigLPYW9O8Ct7kXlqHe4F5xKpf45sUNqo061iNXVtpJIT6XFumVKe9u6jLc0tyuKr1BVi1L9c6JgivQNBNc6R18YwR2MiuHRaGZtYIU5liSi2317wu6+u6uKb9zDZGM8arTFDFaIhQXaeBV6PCdb9jndcJRq6O0+LyeoHc6wW72StO5OppJfLF17/j4ctfUBBvt1qUTKHmhsZLzszx5chlEit9byy5ZHrnaaVyvJ5x1uKCuLR2IdJvetY8+M53gs7UmTWzXAqgwMib7YBtIlD+4ot3Aj9aQ0qzxBRvNxLH3OD9+7fsDztIYlKz3R3YbrcYrE4oEicgO57G9XwixE7txRe6OFBbFdPPvaGL/S31z1qJoe18xLtgmOfEECVXOddExN8CP5x3EkzTKtP5wuHuQMmFeRb3UdcFzUO2/PTjB77/4Tt+/atf4X1PNtDZKNXJq6eNsiEkNF1GNeclt8JYDW5BCkheFj59/sz7L94LHKaRkmY1LKuyMHdObmTvIy0LxGS9UFezRhdai04fwnvou4749pFhkDxa44T9lEphmmZejkc2o2CG2+2O5TQzzZLS18XIdZpIOTNNM51zzFlw0+fjC/N0ZakFXwS/74cBkK7DOkcuC1Oa8CXRUuJyuXKdrmzcRvsfeXBzqXozihuu+L3AsizUtJBrE2uRGPjy7RvwQejGRal6xlFqVtWocMtTTmIgaIQ7vgjmpN2FwTZhN1Ua87QQQhARXLM6lUhhCd4j7JOMN+Z2Ixojn2UzMs3UXAkeentkt7nSdRc2sSNYA/aMMR3eRmgVa8EFg4YGyFShHlGmVfG4aSosYl3mVqblSkozIfa0XGk1iwjJ9ZhSWOYJS+R8vXK+HMmpMS0SKtQFw8PbnuhHSrnQD4G3b99hzEKlEKwnBLCuYR1UFoKLsBYKY8BVmq0kORcFIqAIDGe8quHVBt8YcMLi8ioaq9bc9kGrbuDG5LGvnTSonK7eCKPc0vxWaAW1fGhGOlkdmaweis5VTPUKW4mO4Hq98MMPf+HLL7/B6HRWm2D9bd1DmZWRtZ67jtUbyqqqutZ623/ciqEyrGorOrXqZ6uit6YHeGnrchyBtJBxwFirUJBagSA7J7QAsk4E6O6nQTXuRjFu+nfWXab8W65ua43cPLkF3n35B968/y3YHqdxodY5sgpn+9gp1Ax9DLJ/SRmUlov3lFz463d/5f7ugbv7A7kW6jLjXMHj2T8ciE4ElDkX0ZSkRDf2/O37H3m4f2Cz3bFOo12/4atvvsEbx/PTZ3746Uec83R9kAneWmIcWFWswspcVKcmk19wXsW/FR8DQdEcmXqlYX14eLiRVYzub0ppvFw+c7c94IsqXJsu0oJ2vg3DfJkotTB2HbUhVUvtrr13TClhK3R94Icf/sZ//+d/oQueGHthYijjQGio0l3c9hSA8yvvuWkKl04n1uJ85O2bN8KIEq4ey5Kwg1M/n7WoaMejS2jfvHT6OuI1DKflQi2NN/f3hBjpvIxm3gmLaV3c+ehl7wB8+c3XTKcL83Vi7CXjOwYvqlxd2gXnpYCaxvX8QquN0/kiXlUhyhSDLo70hq0pkUpht91SssQcYgQOW+Ev5zy5ZFJKkh9treRTN0OZJ/76pz/zdDpS04Lxkf0wsr/bM/hITVK4YnBc1Bog9r0kzVUVODqvh02FmrFNwkVKEfPBPvZ0vSzlbDNcpiudj9jgKCVL8FNrXKYzl+vCZjMwdELZlVE3Q5twnOlj4rD1DP2ZEGQycGYBk3Et4IyXDGuEZGBrAbd6H0kTYa0XQkKT3ZlBsH5nI5frC9fTE7kulGUmpRlspIsDZb4wnS9cr8LZLwV2+44uDhhv6YMY+fVRulBjR6xNWHfGWkdve/08jFK2G9F4UPNLTMWp51kzbfWLk4MfwbvXZbJ1EkBUm6jljR6mtoltjcRq+ltrXNeGyqAQy5rxLEpvFEaTB1tgq6qmlHXtplf7aiNMtGoquYpdiKtRFtW6Wzwc3mn9N1T8jTlVb5zT10lOhxzqekYbqxbo7jYdyU5F3kOr4szbWrlNQPLZ2hsMBiu8haSe6hVc3WLl65xOlMJCo2rYkbE6rQpBwCgbaj30DEYzw7MULVbzS8NcB96/+xX3b78BE/F4oQYbjwEC4vBL46ZnGsYNOSdSq6LMsQZnDH/67q/87bsfeP/+C5ZFHAvwEoi2tIXgA4nCNC00Pbhrq/jS+Ot3f2O6zvz+91uErHNhWSSdMCMC0MeHR0rO5NTwDqbrRCmVu/2ep8sTT59/oLTCL3/xC8bYk10j58zpdJIJxaw09ar73sTQ9bojqhQqpRZ91hrn05lNv8GvJmnCrpBqXdQyAgPLIoeat5bBjkrVtDg8uYne4ccff+Cf/umfaLWyvTtImhry4Zgm4pMVJ2oKpMpwfUNp5ebQO8JYy+n4zMvxxDfffHPrwLou3rqkZuDzp4/cv3nDfJWs54N1EpRTZS+BNXjU9rxVjqcTIXji4wPTNDHNE+/fvaO3HbUWlmTYjCPTPAs/OHhqidRa6MdRokuNxVY5rJouZ8uyCAc5GN69f6cW3ZWkUam5iS23tYZpmVeXAMGYs6XrO2Inwq7pelEuNXz+9Jldv8F1AsPpKcG43WK943Q6Ya0Vk7EfJg6HPdvNwOfPLxwOW1G8topLoqtAYa8VsluK5GpXK+vclQJZW2W7EbviOS0sOnlEE+TBCZ55Fovz1hwlV2zUvtAuGGaCv7IJE5ux4P0VcRDJ4gHUCsEGvO2wNKqXZa0noKvH1wPJGsXbm+4gAKuTbklM1xfmdOV6bXh/oh96+tix5Bd5CLYbdneSeobp8d7gfAEKwQrrTthnEecdtSzg5N73qnZ1RvL5xNZCpt1GxXhDM/nWUWPAFBCH11e7Dsn2kINPwojk8FyFcBTJa7BeDuVX5s7r8Sx8icYtkGfFWuB2GBuzisbEh0wiG+Rzp1bKuuAnktaTnooPgd1+K4ywFQprXotRuR3gTSFQrMJi67MIQrnVj80Y0U2hsHDO9WdvxmGakDdWyqwRIYd8fW3ChKtaDJDFf9W0Qafl4waTrshDW511hawgUKSyvJq6CdQVRnMk1eO8f/d3vPn6V+QE13lm1zuu1yvGe3bbPSj80qw8G25VPXtPnheuReirMtBZfv3bXxFCZMkzrcEu9tCcuBZYQ1qSxCrHKA2T9VjreLh/4K9//TPffPstm3EArAhaZ4MLgbHv6PvIsljm6SoNXQhczkeu1xOX84Wai+S0Y2nOYUrm+ekT58sV1I681srj4xuMsTgXeDmf2Q6jpkxmMZU0EEPPV198SSoZj3blwXg5NJwcwCUpiwJLFwJDjKyOLdY4TueT2CaPI9ZY3r39gs1u5LA76FnWboZkrYj5XQhSpVupN6vpG+8bQ8oJU2BUUd64Hdlsxtcxfe2WdDQKoWO+TrRa2e92OLsKTQxGsfPcMtvdjrQkvv/+R/qxJ6XMy/mJ3//yN8LptxIQ7pzFB8cmbJANP8RdVJtyR62N+TxxmSdhMinV8HQ+0wy8XC4yzhlhjyzTzNI0p8PAMs/8+OFH4SZrrKAcfp6cZubrleu80Awcdju+nydiCIxhg5sbSxV67f6wZ7ff8nB3J1L6ljlfxaDPGNgeNhoVKTijOKbW28K06ti9CqCkdXJYCsd5lolSBZOuc3SxW9s5Geeb2BJ0YSCXorbQltImvF3owsIwTnQh470aACI278ZVXHMYKx15s4j1AJVWhBHWKGCFeaaorXSOrdFqkqTCVjm+fGJOZ84XiRIFcDZiXGQb7mGUXYm1YpdurVG2kTzQzlR1+XXClrIOe/OuWf2LnAAZRoRJAqmgnk3lf4Bg5HrKvSr+UcLZr/UmD8BY4f2LQl2pq9ZgmiPr5yKQjT4XTdqplcVTS9XFcb0RQKwVuLWaKhRba2801loLLYtnbc4VH9Qap1RWHUNpYkIpewBuRaFW0bKszrU0qGYW0z8TWJfbDfPqz6qJdCs8lm/LaCOK7tq0AKAitlcdU61G8CYtyevesSqjTVhqVqcDtNi+/iytVqzWJPVWRwOQqUYMS0sx1Lrliy9+y/2XvwQ8KV8pS2LxDjRZ0rSimgvRnfTWU60ajjZDdIHT9STUZheIPjIOPcuciF1P1p0gTZhrxhqsa+x2O5yPLNOke8XKt998rfCRCOJijNzdH9RaHr3PhDJfaianJFooa6T59Y43777CNHh++szx+MLhcMAFy3a7IS3yd5YkdvOrlqOGQFKafvQdFisU9VpZrY38bZTSE9uqG+tpWQjWiz4hBnCGDz/8yLzM3N3dcT6e2N3f0Vrl7v6Bh4c3Mp4q64Iq8NAqeFtv6Ka45NoI5ayZAp2j60ahPgLv37+XsV2Lwg0z82Jf4Yxhu9uIpTS6a2jywCw58fl04rDdytdrAQh9xFjD//b//H8wxMAvv/yKWps4oDbYeg+I0M9bQ1WP+VQEv0wp8enTJ6ZlkaXUplcLEcd//af/i//zv/wX+nHgV9/+gl9++wsWKjUXqhV8erZGFkvWcJ2u4rUSIlBJufDh6Ykv378XdXfOXKcr0+VKsI44dLScb4Kh1TuqLGLAGKz4y9fScEY6l6IHiDDBzU3wRq0UA8F7UhV6sqnyHvvQyaTXdNnN688yzSi1VnQVm91ITYXaIOUX6vJMv7f4OOM44/GYFhCx8M/8eqxw37ErJCNou+iq5LAyuhhfoRTTNL42Z4Iu4H03MFoHd09YE7EmEuMW7+WwsIDR0CBnpdOX6EbZUVW1CDFG6ZVtZQnpjsXctLoC4+gC2hhdOlspeHKuGlAhaiDKNbQGX29eivJ6xJCINTva/hw+0YNvhZPkCnCDCa21UBu5iu1z1dhTg3T9QiSAWgpZn51aCzVnfT+Cm2u5YjXdy1U0GYpBvu77WIvNyrQz0Dopjk0mE4lB1eS1qp1/bmrYJzy1lYTUlLYtsJJMFLRCqVb1F3IfrEW3rsUE2UmBBFjZZuQ+blJA5GxQL6Wq8JLcUbqHEB+r0hqlOGDL+69+x/7uC87nK/M0sSyZYRgxITJo03i6XLDWiVW+aVTnb1NcrQ3nA4ftnZBygmd/OGCc0cwIy7wEJON+5unTZ0IMbMYNwXlccNB6pmlmmTL92PN3v/vtTZGdi7A+hYghKE/shAzkW9A1iOewPyButBGQomZ94PnzJ2rNbHcHnLV0oWPJRVMTXynMXdexpEQpCWMkXbGCOhgYhhDx4gWiSx1pXJjnhWg9PvibqApE6DaMooge93tRZs9yA8ZOKazKsKhVKZvTjPWONat2Hf8kGU1sEqzcETQgJ1lUSbcHXVwD7LkBU84Klhk0exdEHezWdq0WxnGgG0dazqRloRsG7vZ7/vy3v/KH3/2OX3z7LYfDHZ+fPjPPM28e3+B18y83QZXcWu9u5oa1iAzfOcu4PXCdrtTWmOaZP/35Lzhj2W+3bHd7cspEJ0HmrTQuLbHMCzHKIr+Wos6q4pPjnOH94yO5FCiZy/EksAGVl+uZh16S7MhiHpA0sEc44WINrn0WNJizdNzWWahF8xoM1EJBEryaFhxyolY14PvZSlQlTq9Lc2UcqcECxhrmWvnTv/wLD1vPw1sZyX2aqC5TslBkZRupSYUUUkOiIjEU226W202DcqxxSu2U08KUFT4QfUGq8hpWZ4bN8KiqYIv1FmOrsoJeO05r18hT2SfhjHohKTvKmleLbdqtSKGMOpSOLEeWZBIYNd1zulw2DeKqdJeVBWtOs9XXsargjT5sN5+gNSf6Z1AbKKvGSOYCyHtabdnlgarMZblBO/Wa1co+KH6vzZnxr/s7RDQm/YIWi7pOAUL6FMxfpyljaW21z9awn9aoCh8XJDu8aute7fr6LVUFjGXNSdHlsqikV3hMafTN0YySTtv/WCDkfJKkNyEsyN+tRuNO9f6q1dKsfE4r06k1gWFqM3h74O3739KND5yuC9N0YZoXnJUzLPpIUqv/55cjfddjNxtsq+Ql35pVvWW5JkEy+uAYh47j8cRlObPdHeS9Gs/x5ci//vnPeGPZ9QPb/Z73X3xBBY7Pn/n4+YlvvvmGYbuRBi4XssJJyzxzuVzYbHbiURe8LOWbfF2pjceHNxhr+Lc//Zn7uwPjZsPdwz2TZlGU0rgUIRpF04tUQfVMzlg6jUWg6qdfm4R9IWw0L4rG19uylIzzMnIUhN1UTaWlyo8//sTdYccXX3/Ny8sLy7zg1DOpllcVaDPrUrphxl7OCIv4C1lLzvKwBt/h3GtV88bydHzh6ekzXd/xcP+AtaLSLLnQBX+DTFwznKdJltz9oA6y+tE5z3Q+cn9/T3OWPDVKSsQY+MNvfss8T9QiLqbT+Ygzjt1mQ0lJqWOO5bqANUzHE+frhc24YbPd8PBwz3KdeTmf+PT5M89PT+x2O/6X/+V/ZVGVb0ozqSwEIxqGy3zFGEfse9Kibo1dvC3Dqj7wxVpenp4Zhp7jNBGjZ56n2y7CGTnASmlcLlexLalV8jXUjqEok8QbJ2Kikm9jedP/w6CimUYrM8eLiPpuyB8CDzorS8B1khAKceN0utKPPc9PL/z402eOTyeWyeHjFuctDotfHC4afAYTNJCqNWFRUSnGSeJgbVSv9hUr7p+tBjklrA3q35P0cEiyAwkDrcyUlmVS0FQ1u8qkDEhwmlE/poKzYl9vnLtZfDsA93Mtw2sec1OYR+Cuhq3iEXY7JBTtcFog1EtOeli1CDHa6/88tKciRVIpZdI0aFUx2vQYzZmWgmZ1KSGHnrXr3m0VLRaqijFzEctvasLaqIU0yJ5RKFQ6DRlaNZSab21B+9k/AvnIg1uR6Ud6coE7jDKA0PdeqGo1A6a6W4FoTcK+bvY87XWXJ+6ua2UWH6iWDbdLY4w2aMpU+hk7Ci1kzXis8bdiK38sTYWxKJTmKMUS4gMPb3+D7+9YmsPYzND1vHnzjqHrsUYcb1OpfPrwEecd3aanGMnwXtJCHztMM0xZPNrSnKRhjTKN1mo4ny+02nh4cy+WQc6yHXv5cxrPz0845+k3I3lObMaNFB9NtixVipSNhmm68r//7/8Hf/+Hv+erb77m6cMH+q5ns9tibMO2yqfPnxjHnof7O3GBTgtd7Dkc7iSLwyhKEAe89+ScybUSvRcKe84crxdpcvd7Wmt8Pr5wvVx5vL/DCyNUKrN44Ivz51ISzx8/MAwD++2O43Ll6fmJ+7sD1EoXOqx1dF0nXkBqRpWWRCrpNhrJDS9d+HQ9M4wbmRSbkchAy03QlVrlww8f+Pj8kaHvePPwKBTTXjDxpRRSWsTH6HRiul558+6tdOPWKX5tuc4TzVo+v7wQnKeLHoq4ll7nhW6I7Lcb8XtvEGLEuUBwkcvxTDdI4MdmHMW4LkaC95xPZ5aUhJvvPdFLHnU/SDDI6XoShXht9DEylcw8T3IjWSkIJYvt9qefPpFq4u3btzfqYMuJcegxxrDbbIjecj17RmWf5VLJSYJsQgzi4JgysUH2Cp/kSk1FFZTSofq1YOsBVmnkVvBNFnmFQr4sdHojCw3RSDeRxbpjVWpiDLt+5M9/+Z7//q//yn5/j4sbPr0ccf5KayOHuwFTZ7VUzgxWePveFPxq34y44Lq+h7oe7RWD/EwxNC1gMq02Up1uB3JrlrpcAS+qayPdqNBirR60mWohrjClMcpC8RizUoTrzWbitiAzK/wvB5Ix8rlZlaqv99r667YYlZehhWotFlIU67p5tq84eV47elRjsjKVahNdQ4NWhPRgNbvIWQ+u0XLV7vxnC2Hk64X6GW6HalPfpKo054pOxk08m8qKINQiXf26/2OlvDYtEGiBaDQk9wTdD8i9ZjG6Vm5UhZXWXYESVVbUwhhadQKrAbdkuaZEFvkiLc6vC3G49UuaNWKUY6BQGKsSvWJbVXgpUmvAhTvu3/wK3++xcSQ62TWV0ghdj/GeaV4I1dH5yHW50i6V+/sHyXLHELqBKS2yeHaRKSWaqcxLwU2ii1ouZ9I88+FyYdiM9L3h8eGBoetYloxtjcv5Qhg6+r4n7WVCCDHKgjwXjJVoXYzsL/7jP/6juEgY0UmlKiFC1kihfXp6IqUt93d3WAc//vATw3bLnRHISFyoZQfq1s8b0UPlnHh+OfL//T//P+Ql83//n/9nDvf3fP74kZfjkbdv30p8KdboclE6F+cdronDa9cPzDlhGvzHf/yfCFFUzDFEcJZcMyHIYZmzdPYSU7maczU1ZYOuG25LLHlWpLto6CLQWHb7HZfrmS/ev6cfB9KsORK1Mqckxn8IjfPt+3fEEATbRT3mW+Kw2zG3wv/7//h/YZrhqy/e8+UXXxC9w/VRFrGl0UJju93SDYPASxYus1jrhi6SihQAMemTG9roAxOcox9G3r17x2WeuV7OfPjwkfdffAlNdhHzZea6TBjtyEvOlCzXBQt5UXsNa2k5s6Ry44I7Y/A+st0Fui7QlHrcTAVlklwvZ8nv3h+wVbonivRUwWoUaFtV23LFV/feVrR7BfbbHS8cmS4T/aa/4Xrr51QWtbb2igkHx3WapUeOljItVBP4fCykfGLJkbIHu/f4PlNqwxsozVJJxJ9RXNNSqK4K59yIKFN+qjBaBLfWUZSg+Pa6YBUufTWiF7FqvUJrWOcIqrtpTd2LG8p40dxk5f5bPZikM+dmiV9VVW5NpTUrLJz1dZmKrTK9NsWpzO0A06VyE6jEaFERgz45sGVyERSuNW5uyLU23UQIkcMZz2qvPucLvnZk4YjegCnTGqaqdYZZtygCd63vCdAYTrFQNxh5RvG8yvMUnrmdyDLpZJ1wqkI9VRYfN92AYGBZoCItuGUtCuuzXlVJXQvtFY+S6aLoVKCz1s+ADZmmftZs3oq2zmKytihYhIou71ns10uxFEaG4Q2xuye3gToXfKgsSRwCrpdZooErTOcrdB3d0PF4/0CtjWBF2Z9bYimF6/nCy+nEOI7EKs3j4W4jmphiGHdbvv/ue4btqo3SfIgu4ryn85F+uyUnAT+7rhM7FoVhSsl0vQSh5WXCOctX33wt+5/WGHYbulpZpgUTxLLm7u6O1oROH1zP4e6OUjK1JJ5fLhz2B2LwnKcZoxkXTs+HJSU2fcdXX37Fv/3pT/zX//bf+L/9x//IZrvl/RdfSK5Ka0CRkfZ8PZOmKw8Pb3DOsdttBXNfZlqpLLVigsdamPNC0Y4Gq4svMkuacd7rg6TOi0YsAISRIV2KMdqFaOfSjGDTj2/fst1v6fuBMmd8jILdG3FFNSYRQmAcJTNWcMOOzm9Z1rCNUliWiVqE9TP925VuHPniiy8UY7a6VA/0h5G8JF7SCwdj2G23OGfJOYtQ6zrdGEub7YaldORcuJxO5NUqm0bXD3z5/j0fPn4kpSwmhsETWi+eLWkRtkiD63Viu9uz3e6ppWoHJ099MQZKYZoSl/ORfuwwydz2PdWsfPdMboV+6BVTFkx9KhIaU42l7zqlDvKKia+dLypO0iix7bDjYq+kksTCoyhTxrlbQEsFzscL3kkaVuw/s0wLOUsnv2THp+dCWhZ82NH3FT+phYeVQuNB7MeN5DXnNFNrksPURAzgWpUJgUZbPYuaBElJt7jKh2X28LqUNethbQqmQnUa2QlYHFkPMqs0TlHm2ptHmbEGp11LI1NNlqJgRC1dm9rP6zG6Wqq4qhCKAbOaFjWoyP0oi2OB6zBGsgdMohkPzqhvV7kxenI1+pr11VdhCpbSqHW5HZCmiU17a7DUBMZh22sBFnvvFQ6y+uxVXQwXsEG6/FW8WHWB3gwNmSwEvpTprVS5Hq1IguHP0yhXDURpVemp69SkYrNiVDSoe491t6A1prTXwiZDmwZZGcBJUyCsrp/tWSzQCka+SJl3ospOWFLriJs3PD1X+rzwfi97mVKqinmF4nxZFmJtYqVRC/OS6Lqey3XieD2L47LvKHkilYLDME0TIXhCFxWudzQL227D17/4hm7oGEMUa3Gl0ZZUWeyC81HvVUOInZAJWqU1o8FYDVMSl9OFbhiwtioTTNAQayF29iY0nKaJoe9xTnJzrBEHWpxnM25orbKkInY6CsiqCTz7zY5Pz5/57W9+y1dff83Hjx+wwOPjo+jMSsHnJAo9a2RZ1PWjWPjWLJt0Zxm7jhYN12XmerlCE9bMqr4vWXx2Qoz4Jvbb3onqb13qOe+EXrvym2U216WtZlUrBrrdbEk5kw2U85VmYOg7WdpowlrL6p2/LgK9x1YJxMEagtvw9//u3/PD999TS+Zw2Msh5VV56sPNgdYFx2azESX5MrHdbbHZgBdRW71kwbGtlShCMjFEuq7j6elJTLucox835B9/YpquHO7usBic9yKmWRacczc2F02wQ4Bpnm8PtjWGl/OF4APee9KSsQ1SrmSqiB0N5JTpu0G/Rr73Uirn45F5uhJ9JL55hBUEsGuHrojzyuVvMNfMEAIhOJZF4mKLmn7RKiUVQpAez1tHTom/fvcdqQBFlt7iry//vs4LLy8z28HRdQW44k0iRA92NUlc8GbQw96JmVu9yoLWVqioEZ3V1wvrzmL9ZYzkkWO92h2LmR9GDpVSilrYCxFhtb0oFFwVfNhYYR8Fm6H62wHfkLNNMST93iKklAw0UdEa1kQ0bhBTMAJlWIVKbqLSptCK6gAakhJotJM0ugwW6ZwRV+YiD31tVZ8fmQZWLUQRmaYWfkuliN/RumCqSbp65O+tsFa7WWyjOwzZQdJk8kpNvg/OUpqXTBVWBpgskVeDwdKyLq2LTjRGp95XONmgSXMrfVeN69bCKDAbNxHhCuG9TiPtVpTNmh1RVphS6cgyvJGI5Nax3X1BPz7w449/5TR95v2X7wnB3yalGCLDKFOEaVIkaEjyJEVijufpdn3RCOVhO1JSIcaIkLcF4jLVkFrl8e6B1ipTXjTaF+ZplonIWUbvyQk+ffrI4+MbYhdYlisGf0Nbcil0w8D5dGSZ9UxywtQU3ytDq3If3N/fsywLp+ORZZp4+9VXGkRW6LpIKQInDoN879XjarpKYuP1fOF6ufLrX/6S+/2BTBNYqok+xIfYkXPG0xjGXimA8sH54JmXxPVy5vn5mdP5RE6Z3/7+9+KnjihFS8nkIqymEILYPyQRmK2Mjoo4jMr4X3UnITBMW6fdZnDWMWu2RQiB8+VIWhLx3Tt8EKvv0sTmGu/ptNqdL2cu5zNv3ryh1kpaFu73Bx72B47XC6UULtOF7bjBecGhxcNE0vUOuz3P84V9EL1FdoKF2wY+Rk7HowpgLJfjmafnJ0l3Gkfa5SJMp1L48t0XXOZJHmBlgtSab3i+93LIL2lhzZMwznK9TLLHyZLGF73FuoFaFhbdC9jaFCJYEXNxiEXtHIKzjGPP0A2STaHFKyfxlbfWvHZsinFKAmAjZcGNrXHMadaFuChZnVh5Qm30u5Ef/vaRaU6ErheRH8JttC4I9bkII0bS9rLy3MXxdwkJ14RVIXbZTqCLlrlOibEXd9c137o2YQeteg5r1+Wl2EqUVnE100KUa2IEjsQp/bKpA21eaLYq/Vb2MlaN5AySoe1MXmsEkBD7BsXlrcFR5Htp1Gexa+EptwMTGtkq9dIahQHFDsUazapujsyaC+CgriJTcyuCZi1sxtzEd/IlVo3q1KxSvYwqa8oa2tWLOjnf2Ekr3LX+mRyUtbWfvWdpCnKFhiM1YbqsGS4VhddWhi5iy7J6M63aEtFJOL33m+y91LZbipUUoopML/nWBSgU2JSCrK9dFuFZ73qvP0tmr6zFSV6NTAe1bdhu3tINbzC2549//HtKSoTQiQUQkJYsdPvaQI3upIGV56YgYsTQ92KjXjMez91hT6nidRe9ZtI3oTKvNODiGg1xsJ7OVzEp7QfVyzRagZfnJ77//jtO5wu//PZbrFPY3K1Qp6X8/7j6sydJsuzME/vdTRcz8zUiI7P2KjTQy8j0NGREyP//nSIUPpAzJKe7gepCAZVbLO5uZqp6Nz58Ry1ymCIQAJkRvpip3XvOt+ZVibPjrG26tRs0qARtx5YX5sOBaZopeaUejrheuLxe2PLG/d2DDXCea1mJPlGcOLZ9E3z3/Mx1Wzi/nfnp5RPReb55957mHOfLWeqmGDWViOjSw7nLTAEdRM7z7ukdw6iOhbb30FZY1oVSCncPD4oHqJUYBxmIevvFQ16NVLSV14rZe1PQ11YKgylhxkER2/d3j9pIQsC1zmaS0TREvAsk79lqpeS8j36A3ujL50+8f/cNd/MBnGPNCt/rXbhvzpnoA8OoCG11DHTKTZYoNyoODscjeVm5vl1YLlf+7a//qkwpNCnmnM2sUm9ywLzlG1HdcJKf5mrKna4I466peUxRD1jrFmgnvbtlBNzgosu6KAAsBoXtmaO25EIIkTROOmaaJYfiyMtGjY3jUfJlWfD3Cc1TcRSTUUbv2S5q5/I+KLs+Jsl/q4IZf/zpk+DD2oneyz3rFGddeyV4x+EhkWJmL6ZxPur3rvJLONf139j7nROHQ5KMua87RP5/UhPhjAC3yd35RtgDzFqR+it4enWSElrSnfc7yr9zM9XIbjnIhehUmv0+N8lnE2chJZDXDlEzIVhfdFPiqhrOEKvX+QUhaxO32GlNuX2/3n9BCrsqWEakgvEKpirrIoCVE/X1cN+3ElpnD9LXMKb7vduhXH23Q9Sr2rb2GzQmqLlTumAkfUwl0doNl3uPNei917TuzMPgkKmu3C7jZhsN+4XSO273TtlEvs+E7Taw7H4GhydQTQnmuhoV9XPFXeAFLih23HucwW2tJ2qfCHFmGp4I4x2dxOW6cneauX94svdRZ0lK+m47PN1rU52AVxJqGBTxfX678Pz8zOlwElyJzsZau1JbW6XWqnpeAi/nLzw+PDCmgdaurMuVeHr4Gm/iHOu2suaNx6f3xBhY14X5OJniUJf5HrWfhgFHZV0zPogf2bbV8pn2c6Dw+vLCMCTmw4G8FU73d+x+lBijWutqvQ2U2OsQcPQQeHx44uePH/l//N/+71yXC//uT3/H7//0J/1dPZn+hjnvZdtbzoTgGceBp+dnvYEh8PLyhVYa0IwAg/PrmVIKD6c7OUpbB1dvnbr7kx1C0rTkAnVdGeNEA9ZyxVdHHAaCgzAkWf+bwVQGAxdTXxCtDc9V/DDizGT28Phg+K5ym86XK6VJNz6MA4ek3JLz9Up0sFxn7k53nO7vFBA4JNKovujgJf1dl4WYIkNMbA6u28IwjXzz4QM+BHIubOZ5WMvG5bqQ0qiSn3Vh2zbSoGm91XarAw1OZDV2qeqD3W4H+7Ip+RavyIZWNZVFZ2UtODOfIezWujudKUlwGFYPLgSW5co4yj2+lCJxgvNUpzbCbJHmgg4dtWi6298/j6O0wj//+V/59OUVF0VCDmO0S79Rq3iX013iODdCXHHeCEv0mvYm13Xv0Q5uhaQJVoDmMwEpO9hhGC/DlhQwRYofwLlRX9vLzd2cIzQ5uRvGD/hmE9wOt+lW8N0ZBu9uLvJ9u1GKhrdDWLh42OGu3qmGi/tm7Q7NqAbkYu67ka95qrd012akv3M76M6+edhdoHnADvS+h/iZzNXhbgeNx0QH1Vn4nv59bRpYdLlEfQabDn1nqjYd3u0mqd2tdb03ct9/Tx1mdJkYjQEwl7YTL7DDek0HUTc+YH8o98LR3vdQPrsc9s2OvlMVhO7Ypb/Vfn/Xld5rnD1th5TsjNLvAPRErZ7cJ4bxHXG8YxhmQhA0fclXPn1cOR2O+BAtWE8c6pYz+IQ3DijGwPmqifrp+ZEpjby1V8XrTLMJKaAGwV5bzhrAamUNmW0rfP/D9zrYXWAcZ+Iw4F1ky5sNRo5hHHh6fNLz7y2vq3ma77RaBLPS8DGCg+W64lNkDF8VYb2jVIQgKPB8veKAeeamUi1FicTBe4qvDCHoPYxaAILzUlIZ3/H4cM/v/vR7/tv/8V/53/4//1/uH+/5/R/+SPSGleIc0TXWWrlcz0pJHUTS5tykMKKyrSvj4WAeicA4zzw9PynR0Ik89VFf05m7ttRyS31trZBSgj4oXx7wVeqRtOewgLBNWzdrLdYNrayWfQopQOqN0pqwaZt+nE0wYxoom3KLgpWeL8vCmlcy0q6Do9WioOkQ2DYZ3qL31CJDy5gGwRoxMDNSa+N0Oin/aVlpeROHUox8ypltWTVl9E5sjWVbycum3yMELtuCR7Hk1YwxXZs5S175+PPPvHv3TlLH2uS2DmaOcxjmiylqHKH722EutZrbz0NCDPjNqVyoCc5RSYsFyxEYXGel0WvG7X/n63CL6/Dxywt/+/6jUniryo/onhAjvjcu25U5OX736xPTWInBGXiyk5xGm3XLJPUiTxvNYisK9CgzlvkEAHwTvBT2n8WED811vOv4fQpHA7kqSJVnEJs6oXcYp3ePD5WOxYJ0b4d+p6GJ0vdJv5ttEeIKzG8C+G5bt3O4vsMf7bbR0B2NZBe4Dme1+hVTVGki1F2+K/ycHfA2VbtGtXdxl3fup0PDmdqu0fY5j2abmf083tFRWY94iK8XBvY67hCWeAXLUjLyX/CXShC5QXzcBCmdYHdmux1at46G28XDjSSFjvv/UzU1m/6a6Wg94sokznC3P6/WPQ1/gpMq2biU0hK1zAzTE4SZEJR7BBIMjDGQhoM4HvtdnUmZMXw+BAVX5qxAxNPdnc6M5G8cW1rVmx6ix1VPL40hBFbdVFwvCx8/feR0uie4wOfPn5lM5rrlTDRFX2uZFBJhjvvHU++Ba7YdOnwVoR1qpWQZ6w7jBB3ezlfSODAGj4+R3tSnfZhmqSi9hr8QAst1Y1muTOM79lc+t0zM2lZ9Gm4BjSF4gk/8/d/9O54fnvjbv/2r+OnWiDqcDPvzji8vn+XCu7/XgetkdCq1QOlMszT70QdCSjg64zwxtImSN2o2RYeeGnCBGBQ1LqzNsSzC0ZN9wMbjSTrtWq0cRgenKRT1ybeDwdMl4e4ikUoTOR6HQaFiMeF6I6XE89PIy/mNbV1lhfeO83Kltcbd4aj4365Vv3fwKZKXleY8fUiElBjHkd6bzjMf6K5QgVILa84yppSO71aRWQvZh5ujN9pk17JhsCai3zHo3Bo1Z0QYCgaopbJt2XLdtepjE0fVJ+b2Yd3xEucMk22mfy/KRRpiYkgRDgfoOwT1NfjMdxnwfAj46pVl1ET/9apI99pEpV6vCzFGNjNseTpbzYoEcYHDEPnVb0ee3wVCeNUh7Lwdjnsjndm0nEeE9R6rYJOpTjhqkwzYd2eRENKPO2QU69Hjaqe7oh4UZ6kS7EOPDsYKIqlbo/uO95Va1J0QgnKMxIc6mit4F9R5sE/UBgkZYqQPepfhSWVGDXqC3pRNxU5Qq+yl+WrSX4OfDEO/BRd6k3JWUw7ZZbVHewvzFxbv+Ip9K4FVRWHY5qOjXLxG16IBLpgZUje930M2u703bk9Zxi4VPaT7Rq4NwjaZim0PNsz13T1tHMrN9MZXOGu/oKyv4ga78DUBVuZDU5t1fTHX+k2663vVQNlkuGt1YKudViNDumM6PuPcxHK5cpqgJy/3eGnMhyPz6XTD31Vi5alFQZaKUs9c1w0fpFQMKbJmHebPj08i77H3rnRJop3EL4/3DyzXK99//z/Y1pU//OEPROvDyZd6U0b2W6R7oO37mz3bIPJbzyS3s9g1JVOnHA3pUYtkeTtTxyITXtTZOI0Hcl7s7FjIOXCL8ffOzKOdIcgDcj1fuXu443g4CEEIgVaU8Pz+3Ts+fPONfucO8dOXT2yXlfvHO66XK7lU3n/3nWoTs148gBAiy7YwpEG48aTC8lL0wjrnCEnpnN19JQj7DXd2t8ujlFXreRz0UNeqFFJvBSO9kNcV7xN+SnQa21oY3cBWK9Ggit471+sZ7z2TqWKCg+o8W6nQClOaTCKng8LVRhoHHh4eKbmwLqsySkIgNvBxIOBpW2U+TJKnOWfGnMrHLy+8vrxyvl5Zr4uhwcJ7e99ozZHLSquVFAIZnbnDkBjcQK3V8pUkBfRdIWg0gzuaIIn3797h+Jp55Ty6ILouCBGjuiBKFw/igyCZ1y8vXK5XUkq8f//esOcGUYbD6vR9YfcYBDaT8oYYhFsb5q9nuYvviIFcVHhUGxA986DiGUdlGiL3p0DwK1BxVdCgc9HMaP6WwaOAu0Jnh52+hsvtJUns+UndCZLq2HbRzGugyOfQO91H/V61EJxaweSTU4GTDF7e+IGvByn25+jaZnaSr1qla7OP9J5h5LwXTu/STVqqg1oHvKp1JU0srkAzg1mvVINfXRf3Fbpg02bSKNVoKna62jbU2eHbTCCSwWo9vQxjts2xk+pd8FXzTc1zRmjvh7H5yG/biifQb5uEDq39+RIUpuzVXQZ7g+yMR9QDYlBW/+p7olvardOr4pr8GSonko9EW5YgLUe0N8M2FtD24Pdwv4C8zY7SB/LqcX7Cp2e8P+BcYBj0nk3DRK2d43xkmCTYyLV9DW60DSiGwPntjbfLhXfffKPQzWrwJGp1vHUzbIVxGtnWTByiiqJKowVt0qf7e44nOBxmam3c39+Rc6FshTiMghudQY8mcPRR4Z7dSRXm0USf88ZlkZ8oRFWNrusb0zgyDaoQbq1y3RZmJlxyTIeJVNTw9/Zm8P/DI3dP97ogWlcyLDAPA+u6WtKD4Mx1XfAG0we7MFT4FYitVMZ5pONJ08S7uzuC97y9XXh5Udzy48O9oKVhJA5SL4Xu5Ii1FU7bqQxIHsFMrTVy3khpMMxRF8UYJ2qt1KwM8xQixamEpZqawvlAbRvLtTIcZuq6cT5fhKn1xunh3mSNtor3TnRxn1noQEG9F7FGSm9MIXC6u2eaJryDdVs4HGZKaVzZCEnE6VoyNNWo7quyiP3G50+f+cu//IvyqFxknEZpnvs+5QtfjUmFP5j8r1QplCiNXPdoR8hm2DGQjV6+lrzsHQLOfXWdSiQl2KZ164ZonrWIeO6tU0rleDzigy4dvCNOg4XlCb7ZDXut6aLoTfn2AWnWQ+jU0qlG7nWP6lfbbs7rpOhJaVAPMJn7uwMpgesZ3z09jgaHSSbqu6cb6L3XOnpXaC3SfTcYqdG6x2PksSWbYpJKZQgN2oDgttm5PabCK6NHXRgiNgvNXrMdmWkmjf0KZzkvklZqIE1+JS/U1hnSTA+CQVxTC54WOF3urXuR5l2GQY+ju4gaBM2xjvDovvfGlUDzxTwKRmRbQVbpHYIuY9cULeF7pO51EyYEqV2wCnbot17Nt2MXlTM+Cx3wNwVVrxYZbpyB/U/bDXn23rRmeUr6iprGd/bYLthySybEvk4Hb5Ed3X+dlnceAm0c3i4DKYNk0K3s0k79qX2r9M1RiLTmqHUgl4kURFKHMIB3TNNAvJv11Z34zXFUMutmr02pRTlkrUpCPk382w8/0crG+w/fAoJZVe/rZEy1yJSX1zfepcR1W7hPR1otbIb5+xB5fveOsmawLcs7qzBtzjhcbioy/U9luxTm496JU9jNzP/jL3/hx7/9wD/+r/8Fb50mJW9cF23y43EkxElcZm90q3kWZaC6g/NFfTBTsr5710l0mvOUBqfjEdc727aZ+ETx7QHHEBNxmDTY9EZ8fn6nMKmYboTHsix8/PFHXIy8f/eOcZrJeWMcRchsrVji4o4+7q+AM+lnpW8KGZPzUUS3clm6eQ4c1+sFeme4u9PmMIzsUjThwUH8RVN6YfAeP5rSouvwDTHx9vZG8EmRFt5bxIhj8FIMXbeV5fMXvvvwntPpQKmVXCpHw/l6h5wrec3Ew0yMgVIyy1nWem9y0W3bGNLAw/09D09PxJh4fX1lvS6SAdsDVWpVu1VtlK4IwlI7ronM96hjt7lOr1WwgElb9zTe3qua4sJX4l7wn5ck1Q69nVBuVYGEwzTw/OG9HTA6IIJJPiV37LeHsdiZEQzmURRHt6hpB7FZNIaULc/fvOPDxwt/++lHXTIhmdChEVxnGKWsKV29yRAorhLQh0lHpDO+QQ5xHRgF1zzdCWZx7P3CIidvpszeLYoA09UbBu5uZ9at0EcSSRUqBVcl1fZBh1CvpIa2QAeNQm/2d+uK9yN42JaMD4kSIVZnmndBUQKJoh3aluJvCr6C+ituFTu968B0ImKbwSstQ6OaI1tTeO5yU7uiLKqb0dI3fHPiX5z21z3FtpnCScoikDS4m+PeYkfc158Xp0PcmVGhW3d2N3gBHKX1GwqwqxH1tFW7nDSQyARmzyi6IPY470olEASP2c/lurRP+8ZgIJDktZZvLimtXNuhBzYcrQ0sq6MxaXMYJnEtIeKSxye5iKPXpRGj8r/WvIlXjFHBfUHfOw2J3hvv3z8yxJHonAkHYE8eVm6cwu5ckEhgCFHx+Dc+xe/FcKRRfKUhwHo9m4qeUtLAWW2DqdXx08vP+KCAPW+w4Lpt/Nf/+n9weTvzf4n/V4IPzNOBmEZyKVDFXQbvb4pOpWT02+t5vLvTmds7W8niVS5XcopM42Tcr2OrVfyajyQSwXXWslJrNWhVz0d0dgBs28o8zzhUvnF8uOf56QlwnF9fqb0yjpMmlk2XhPPepjtILhiRKDx5Wzda324Ss50YO55OjDFRKESntbtZpEStBRfDL3oh1BRXsqKRYwrW8KUHqZpS4TQfSYOySRKRQrN4Az2LMUgyu64LKSXVYnpw5qLFO1JMbLUQq3ovhiFRc6YjtcDb6yuXZeHdu2dTQ3XWVdlP13Vl6xmPVrR1WZimiWVbqauC6aRUCnjThXv6jZQqeaOWyjROdLp8AlXEprek1q/QA3K05yw8ssNWiiKAgzOlj9yxzfMVg3bcJkFvH4jr+cw4TYRR+Lf6G6C3Kj4kKJYhG0Q0DxP//j/8A+flwpfXN9ZNCcCxd2KEGFBlaXTUoGHBUQUvdINkK7Soy6FjHzAkeFAbmscbzIW58ptrtwtmL7qq9mEWWbwrfAxWsomU7mlOguaAeKVu2GxxVS14dmHjquJRms15veDSIEy/FF0U7atUs7pgB1qnt4LrjeIEi7jehEHbwdkd7KVevRVtml5FWA51sXdTUqkOs9mWU2xw02tVSsH5atBRB9tKaqnaxGzbrU6dHN0Zh7PLn5xtCe3rxN58xRVvx7/UcArw27fbr5uF68pD6qauwzbK/YLTxSyiX36cr/DWrn7o5k3B/vvNO7HzKqbeqQDVU3ok18h17azVK/stTRqSvGL9nfOkMOCdYxhnQmjIRhVYt03Cihj09aoiYnzaY4AeBPlhog8Tf3Q6MQVa94x+4ng86XyaEm/nK4dxkIM5eFzwLG9npmnEe0fySW2LFXrsxF9clD10vb7B8fz4aJ/fSoq6eLdtI8WBP/677/BBG3ipGUdgjGqp80a811IJMeGd5PZDGnTWOM/pdCce2UHA8c//7b/z7a9/zenbI0vexBO3DZcStYj3CiEyD5P8HduF43xgGCJx2yTh2tYrw6Bf8vHhwYLUlH7YnaNsepBLa7e0SxDBRDelQofuAzF4uajtsIY9j6UxjMoGSj0QDoN9cCq9Vi69cozHm+GsN7her0zTLG2ylW1sW4YO4zTYgT5qMmvFipN02LkkfX8IiaenE9EykgiSCLpR0khpp3VALNczoR/AO4ZxvEWHaPhx4D0pRDyVTy+2RZRNk3kTnHZ/ulPP9WpTIe6Gc+6Kit6hbCu5ZBnkvA4SZ/uB9+32IaVDLpWyFcIYb9LWPV5jGkZxPN4R8AqPM1nmPvHQu0lebbsJgel0IthGmFuzk9dRu6P1QnAjTXbfW/9Brpu1qKmzYh4OOFcg7Fk93nTtHag6rNqeheogdMO+AZepFnRxM5EhgvIGt9HwzZ4lkincKnKeREFnVjWp2IYolNVMm67pkMZ7c0tb6nHrFlPuKfZ391KHVjURY4R9M+VVMbc+XY7U2pWn1XqzaDspXXzQ+ytzl7PzsN02nE5ja9gUboclgeraDZIQfKUBznkNUV2nMMEgE25eaqDWW5f5/lzolLL48f0xqLLhtX2Gb+H253f5tO/Rtor9L6Hssd4MthJfVZAybr+ELN1MJj+DgesOMWlV1WfA7T/OLpzAXOjc/mxrkVoirY1c1s75WonTQIwDwQclEgziG1KITEMipIGUksLvvKn/gmfZMofDxDDKhWysmBKv8eRcSMmqDpw2K7tLAfXIbFsmt415GAX1Nse6rKRBr9/nz584TAee372jU8ANMs713Qfjb3yE6l49aUxKOGiNUgSJpiHxj//4XzgdVZHaWuPtcuYwzaQhEVAtbKmNlCKlVVUyXC9U7wlpUFR8UImQpM2ZrWz0pvTX9XphvLtnng9EHNdlJZfGMEroovKjwYrRIKZhYAqefjwYaaNfrOyrlxfGNU0TZStsTaatOARyXtR2FRKvXz7TkelsPhwoobItm4xfaSAlYcAxBmUxEWg0tnxlmg5a/1q3VE8IPpCbAvH2i2ZXvIQYLTTQ4AY92ry+fObl5YVvPnwDznGK8jfEmBSjXWXAqbnSQieaKUX9F5qwx6i+B9cc1RXiEKGok7qUyufPn4hRqqfSKosFAuqwKMrxd+IFvNfE2Cx36bqqwjBF5bmHlL7mrZjevcPNM9BLNf9Dt8DEKlimKcCudh1yDRFkaRxoHlxVV60UNLpt3U1aqPgA7x2HJHjv8nbhcn6lNzjOB4L3uKTkXRcjoQIu8Pp25f/1//x/83K+ElMAbybE5JlGyLUwdkU8qO9Y6gq8fnYZ9PY8L10M+6Sru8wbTt5Ndnm7I5EhT9O5oit0ubRusKbT91dnObtuko6XAqop9bX1IlgK8Qs17JN5tIgLRwzBUk29CHOQT8Wkur07mi82STfozf6mx7lALeYLsIle2Ox+GeiSwP58906bU8WmaGGARX9AysjuoGZAm3upFsTp/G0C94D37RdQkG0QFtHdW//aQcHOB9zmfH2+tG7quuu6DLtJMyuSd+9/v7WdrdOvpt75cFMkAfoM+a7IdrgFINL3QcF4HiOTJapwtBYodSTnxNYjLgaGyeOiw0crxUkyiQ1jEsLhPD50E8xgr6tI7Ov1Qs6FISaKB98UceJ6oJhKUKU7+p28Mzl96zLqlkquGrxKLuStMCSIybMuG3HoxDRSt8IQA5e3C80LmVEOXAenNk7fRE6XXii5WnWCYn68dyQ3cZgPdBB6cblwvlxwHdIQGVISlJg16JW8kWIgjuNNQRq9o9jbqvM58t2vfoXzgVI2xvlg6i0HacQFz5QGei2sWRaAIaqJ1LdOLKVQqz00Tvivnhk9rLUq7dF5r5Vt2VjrFR+PcjW/vfHw+MRWM+uy0ps8BL43rstF1Z/ekVuVIW2Tm7f2bsmxlVQKRJUc6QFXTlEIgWFUbMg4jSQfLYvQMZpdvnfhnut25c9//RfatvLttx8otRJNNtqK6jb3SU666MZlyYyjTHQgBVf33eR8jdfLG4/DI3gZU5Zt4Xy5MsRNue+9KuKjJUqRWqP3Tt5Eog5JmVT75QvdJKpf8ePuZXorXVkprSoQLAZ7gHvVoYXVTdqf9V0ZMz4ElJrgiSEKQ0VbjzKE9IFpTnyEI+DHoA+1HR7n8xuue+ZJ5SOO3eValTfjla/1+dNnPr98MVKrM0+DFDIekwqv2p6CZ/KeHiPNNXwvMrXZRV0jJBfZdat+P9Q0R4Jd2nilmzZfDI6yC4B4m/b2aA2B6lIVycS5h0gAPdJbpZj6zbskYxvta/eBr/KHub1JTtO9s/SBSlX0+f4zVk9rmT0YUPHUOtUVkmfx13A7iJ1xCAbW4G1r6s3ZlqOcqdqKeJtd1Fsre7nWni4Lt8UHTIEVbMrvpjbS5mEn8y4zRlueYDz72b4urBIIfL2fac3dypO4/R52Udqr5C2+ZP/vhrQATTLl4MVFtH4TLnS/T0R66wz4o5VAKYlSZiqjBpYQOR0inkgMgzbEEAgxkuJgP+hGb0IUvHeUYkS7U0mYtuB665cecBTvKFmXeK0N5ztDGiwgNJOGgcvlwmGcGeYRuuJ/cpX6K4UBP8oKML070IuCD3OpfPz0kV/95jdEP1HqSu+OECZC1KVweTsTh8QQR3kRgtRdpWyKw6mwx7Z/eH5H6ZXXL68cTydiTILlI8R0NKd75XJZFPhpfOX5fFamXkw8Pz2Tc+XtfOH+eOJ8PfPD6wt39w/cnR6YppnL5ZXz65k+N4ZB9cHVO3EStSqsan+oQVkl3gc7ONstIM2FgM/KIp/H6dYr8fz4jlYqW1m5Xq9A143YO8vlwvV6YTqM9CZFzpgSdds4zAeuy1WrTzT5Bka2eW+ytKyLo3aZ8szkhPPkWvCt0IEPz8+M08QwTQy1sWyb2eMbl3UhRXUz7NK9YVAMdwq6oVtrtNJJY2LNmXXd+PLpM/cPDwwp8fTwSOudrRYulzPFtpyaM703lrzSUHCfsw9p3jJ53SA4hhjZSmEvGep0yRS9Y3m96OAKnpYr8XjQh6ZXcHqAnDmwva3uNpRZH7W6mkErtB8StMa6XGk4xmFQa1xKN5VP93C5XgA4nY5EF8k13yYSRUc0fA+ULZOGxDypcyIkRwqQonKRconiI3onDRM5Nsa+E+Kq2IzO4V21/gQ7XC32c3cp7zlAmo51iexQpZ0p2nBNRdK8EaFYz3KP7FEw3TgB3wFT87TmwIt4bHxdWHoJOC+PibOVbnc2926bW81QO4GgfkeS4CwavUW5G7w3DUegOhHszb6Jb/4GSUmh5PX+R5Nmur05zgvGlX5LsleDdHrvhBZoHrvUdlhI3E6z79ctGbViEFZzN9VXt21Fm0uwfKX98tkhKynQev/Fe9SDXTRy/u7vV987O5o3KEvDB/tr0fbtURcxnZtCq2GgWR/JNbJVz1YjtEi36PdcG3dRXfSdThoPdCRsiWGg9k5pzWTwDucD4xiU5Izyl1JSQ9z33//At99+S4xJvJt5jZz3bNeFnOVMXpeNcZyYpqMpGZXZ5UJkTEqPUBWvN+LYE8eB5jyH48H6QjTQpRg14NGopbFcLtRWGf0ENA3HDmrNLNvGPA6AqgiGUXlTow10Ptrl4f2tarq7indwPBxtv1ZMz//+v/3vQOfv/uHveX56R+uOjz/8TBySPEKtc7muPJykivI+UntnzasQFDtgYrTiHHZyCiWzLterfkAj87oTLh59IMxm7mhaQXvja9/A2viXv/4LUxx4fv9OklbXOBxnXTqjpY+GyBSsX9JLJre3oYFI1PPbG7V0bSNx/wAJulLQmRrYmncc4sT8699wXa63D8x6XUhBPbPXy4VzLfh3z8yjonrnecJ7T66Zvuj75JqFF4ZgPIPcwrUKDrq7O/Hlyysf316Ud4R6n3POfPr5I6WJ57gbZz6f33j5+JFlXTgd73h6/459m3B+N77J5XtdrxwPR8ZhIAdtCN6ilb3zjNMkAx3OVC/mvwjR3OydveeYEHF0qveaKrpeT9clcStVq28vjW3JzPPMPMsTkm2r3J34Keoy2srCh+cH6r/7Ay9vF9blTKsb02Hm8nallEwpjdQcrTp6dbJmeMlKBwIEj4vJqiY9t6Kgfeq/6fBN3dTc11Pc+B4dPtyGGUejmUzT9SY4QZov1AEBxWnLiwZltdZvP0Pzbcd0bBO0zQSLr3by1vSmJj8cCvar6IClm+xRF/+tRtUDXVWxFBkYJcZNGlK8bQwARZtrsIMfjHj3/TatSx0laWb1ujR2k5tk3/6G/9Ps9bhdrwC/zE+yC7cZzEcziLNSTNHUu7txEl0vtV3EX9viLE5QPiTnDMq0P2sXJISbdLl5iVekN243k2EuAzBQ6sCSTRXmImPwuOipW1FsvfeEkDgMkxkgBe8lD24AnMft3dD2Hjgjoh2Ow3Tg7k7ST8XIVFMHKongL3/9V5bLhf/yv/4jd9Gk/sE4NuSmH0IiWxq7952OhmgHOBeNvIfj6Wjepq9ydt885/OZ8/nM3d0d4zBSWyOvi34X7ziMsyZ4J7YuuZG3tzde3t6YhoHT6Siy3kQeUiYayRMEP3rveXn9zA/f/wgR7h8eeb5/ZBwT799/YIoRguebb79ljIM8cTbs1py5f3pgmiYcjlw2aeJ24tIH6+ztjvlwpLXK588f9UvPR2IIROfIXcYQcpPkzWuizlkmMlc7zNLWpxCYxif87SDr1G2DWnBBWPJx0v9uVYegN5L0er4yzhPH00GHIp0tr9Ri0Jd5AqZpYi9buZtPFCreO+6PJxE7IXCYJ3748UfedSwSoZnqCkqpuACHcVK0SPAsy8oQB54fHimmc66ls23iGsZpwuMIfuOyLuAjPkbK5SosMyReP33m9XpWFEfSNO5xN0WXbCyVbds4Hg8W2eFJIXHb+Q1XdgjyUbn9rhhpxstoSnHd+gDYUx470zwbnNNuSinXO657ruuF6DzH40FnlTPfhSX6ll5JMcpIliZKK/zut78BGn/5y585n1+Z55GWEzUbsThCC9YKtlU2GnPwCt5DxrPmOmmHZdi5mN0L0sAuDkwCu/8j8t8OUq/Dudtx5TQOQnVUb8IEM2P5lmjBOC3vcNVTvF4fmvEaO6ls5UK7M16nrbepWkGGnkRtWQ5r7yxKw75e7wa+R/N771CKyVXJ+iNl3wwM6/fOgvC8/d8Ku7O3GbF4XxVHFUusNV/Gnpq7q432etZqt1Bvv8yDktsd25IkD7BNwmA/1/vte9KDficHrhqjsquV6HT3VYzQvDZP+Vl2SKmj+HcFFe7ptzp3BrYyUuvAViS9HdKIi+JUUhoYx5mQJqYkP4UPnvEwmSBAn9dazZjapK4qXqBedwoODb2TUuD9+/e391wqR8foNISejjPff/9vtFxwMcpIi2VVoSHs9fVF+U9z5Xg44by2bvGd9eu1rCmH7qDkypYz0zQY/5DET8QA1TFMs7YDoro1qtz63els+uGHH/nx+7+R6fzHf/gH3r//hnVdqWUlHk7kbeX18sZ2WXl8/477OfD5ywtpjLx//57f/va3GhZ9YJyVI+cbzMOsHLSmZ3stG8MkwrpYU+G2bUStMInWwPVKaR0/BmIMfP7yynndmKeRLa8M8WgPtvT9W16Z5gO1VkIQJh6Pkfl45HI5k9eNOB3wKEZAeK8eyNIqkU7wCcK+w0iL3Gshec/90yPRJ3zwXN8u6EJrdC9XcHAykTmn6OlaK9mZ4qRUGFBDU1On9MPDvbBs+q0BLpeVznDDZYdxtJVcWfCvlyvBeeZxpOMoWR/ygOdqrlzXxWc8PjziumfLK87D87tn7u9OhDSwE5LVCc/09jC1UjRFuqiHIwR6qfgYhNvWpgdGOzp09KDFQBgGTYG37KGv22C1jcV7BSPWWojm1Iwh6pJ3nmmetb1V029vlfOyME0j0SdIDlecJK7Ise1957e//g3f//BvdFc5zider69mw9bPUoHNzvKtejqF7gIhNAoO55MERHahydBmk2cTGdwwEvE2T4tJ0DZmc21z2gaQKauzT/lOl1JvNFcJJBpmduoVVwVLdMS5tN7ppYHXlqHuBfFYrnb7/zvqat40tJgIoO3ksR2UtSp6v9kku9eCCpaRRFa/iV0UTnzGDThr4lCalfzISxHlp8HR+2YRFvvft7h19pffPDh9h8K+Es7gbyqi/d/pefdQTTDRxTlIGNDtIg56ew0K7vALb4WeON2pxifdSG4U1tfFSTXA9URrgesWyH1g2SKd4SatD4eR4MGHSAiJGCIhRpz13afgSTHKm1QUAd67AWHB4kOqItrlM3Js24r32npBKELyik3RRdz59W9+c5vNBPMKjchNEOmUBqiNbZUaaDocCE2gZNt5imShlJZVl0vmcllZz2c+t0bpmW8+fMfxeHcbbLzT56CaX23bKiF00pA4rwvLcuVtuTKn4QbheweOSOjw5csLf/2Xv7AuGx0Yfj0wDom//4e/57e//o1ENwaLykxYyLUo9bpWiusa6GPg/niku27BhYoeis57YpQBbCv6kEe0shzv7jkd71i2hXmccE5yse6FfYagUu1xHCi5qmR7y7y9vXC6f8AFM7RcF9JhJoZgvQqRui3ElHBxvx4s5fW6ct108N/d3eNCp+bONA9g3oSYhFV2B+OUTBLYjY3XrV5aZWAgxGQPT+Dx/sHgpcphUCTI5XxhHCtumrisKylnDjZZr+tCK5FxHMi1aGrxkXy+8vLyRS70IOiNpt7v4+lAXKShn8aROgy0bt3UTi7grpJhXEuE1DnNM7Q9ilkQRQrCBPesfde8cPAqXX7vEfVDiBB1xlE4h7aaJlGAY28JdPgU6EXBfr6r+2OIVh4THK50lu1qUewjpcHgAzVUgkv4IG12pXM8HfmGD7y8vpBi5FrO9LrRW6IUtSDjHTV7vK926BRWYHQefGPY/Qk9CIGwq+C2RPVOIRN80ia0n3M2qd209QY9NK+E3WZXhD79OshKz9xMiV3xL+KyGntDnZ3wOszNRe/sa7WW8XuxksPiQfYMVQ++mxeiU8qGXOHxdtgqQ2oP9Qv4W55Ssw3BeCoLM2zYhkPDEXBd+DoNy73CHOXWyeKCcpH2WaLa70/fNShflxy3v77YFuFl9Oz7NqLnyt9+HlM3AXuwYrfXyXfBTLux9msZURNEhqNTLGsqUXrA9YE1R3KJLNWRS2CMgTBI+TPEhDNl4B79E6IjRE9AW7yzbLSff/gb9fGRu5N8DMnEJ82UW641QhKMWGsRx2Wb2/5jtmDG3BD57e9+S69NMfzO40Pg/OWFAMTHJ+6fnjjmTdemi5Sm3uoYk3K27PnrWZyu7451uXJdr6zLosy368r494n5cGKfTl0ItLWy1dX4GxV8zePA6f6B+8uV+9OJ+XAQ2kLABzmlT8cTv/v9H/jy8iLOsFZ+9atf45O31Ghuqd7RRXGkTsN+7VVS4JaZhknPTuu4aL6M1kRcDzGxXBe2kklpEFlXpc4IMTL5A3nbRMCYxCxEqyU0Lfe6XhU/HZIZXDB5ZrUavH7Dl533HOfpdrCH7iEEQmu85ZXruuB94NQqJXecK0zDZNyFJRh6bQKDH6wQ3mK0EaxyGkZiFDwjDFIk7WE+EFwkl0q/XPjrX//KH//4B759943C/ULgcJjZSiGFwOPTE9fzmX/7/gce7+/14TB1SIoqdhmGEe/ObDnTnHDIXAtjTLSS1SRlqbIhStKqKsFq6bqI5HMNfGSKkxneGoNXmiih0UojjAODc18Pdqf12gX5AryXI7s75TiFFK19rRFdwA0KH8N5xlFrfa8V14N6ElLk/v6OlEb2EMEWzEmeV+G7MeIcHOc7hjCxlgu9Zz5/+ZlaK7HLYb7lDskb0auco97ly3Clk1EseHCNViM9dJNg72a7Tu8yQOn/s65l+o5+3KZnTcxN5GZztzm9h2Joh9WUIhzZdbmkXat4F2nmlgBwreH87nHZu8fU1teDv5GD2ga8OGzZm1U/64Km2V7ofq8atcC9X0qHENnrDE4UpNpuf94Rbr9fbt2ErsYSdE/FE2+XoN2I7H4VDDoSdNW96k/Dvmkg45q2VakNvWU01d7NGY44H7fTIf5WWrXrC8p+63j9LjLq2eXjJDNtZCBQa2JZE3kbyHi26khhUPmPD5zGQa+vRUx0O4jTaeAwH8V70rWRoiHx06cXvE/cne4VteI0S3XvVUXrTABhMPIQByrGd2FS3yLe1Bu+X5raGF3ojMPEN+/fczlfFDjpPYSBvneUNITpYyS/ZUStuTAMCh4dUuLny0VDTYMvL1/42/ff84ffzxpELOkijQMvX77gveM4nLgu8mD99te/41fffitJfZSHpTWvDCcUQfI8jTw/Pd3Q2ZILg08034m+iZd02npyFRJRaVJKZW0gMYrDjF7Pt/caJKOzmyOkxOwDw2EihsDrl8/UnHl8fAKw1TcSgFwlT/U+UUqm984wzyQfTaf8wHW54qNc1Q3H29sLj4/P7OQfMdG3FdcdWysk5Nb1PvDr735FDJHrcuF6XZgPJ5EoqyRmMSTreUjqPLEHU3Cu9NMKZ+PWueuCJ1oio3Mi3a/bxvlyIVgbm/OOISVeLxdePn8ipIHnd8+03tjWhVyPNGCeZ8ZxZFkWLtfzTamlVMmv+f65Kben9UZ00WS4/lZU0p0UT8KIPeW64mOQRqk1s8/vNaJyVPuOacIdaZwsjE193N1w1m4f0uN01MFbPW4r5FqZhiRTkXE/y7IwDoEUEj0OfGMqktrKjSCOPprb12SXhnu7jtb2KgNOD57Xlx+oVQPClqvgpgQjNp0HQUWNLly+6msG75QHZhhw2/cKU93oSt1BJL0uatNT5WlAnoa2M/reYCg7zSQtrQy22mPKIHr8hZRUG4mglKD/y1m4xn6210JzCUkjdJpXIOzeBuMwOp3cYC/L7Du4dDvMjY9wu9kOerUsMvt9la/UUUCev8WQCGLsBsDZIW2bQms7Sa5Lla4LtlVrqpNN1r6uRb53j3ojpHZy9v27VjQzle0y1/0399ajYXvbTsdgUA0WJ949tU3UFsklcb14lrURh4HaCsn6UmqvEOUVCEGKw2qa5GkYmQf5AIIp44q9z7/7/W85TEcNqPYaV7jByfrHE7xMqOits88N+GDGRAvepIGLltSaC9FHhjiS7pN65Y2niPZMhBCpLUtW75D03g0QPEveOMWRh/tHlnWzSA3Jd58eH2+/R89ZkFrwnE4nZdfZkPjl5RXnI4fDbK16lZo1ZLoOITmdHa1wXa7EkHg4Hbm0lXVbGceR2m0gDHpvvRUSBSdVnYuRMSRylvKyxCTD3TgxzhOx5IIPA9M4aj1zjm3d8MB0PMmE1grjeIDerIVMb4BuNDNq5UyfdUCFqKjsmAaSD2yrurEPx4UYB+iV6CLDMIlP8IGyLUZaJZO9SSs+DBMx6GDd8ioyL3STwVqpt9P0M7honxd3i1cITiac0irzdDAOQQ9PGgd+/atfs5aV9bpxmEZCDCTnqK3T88a2bIzjyO9+81s2k7q6Hmk0kUdVUF2aJs6fP7Jse1aMorFz8dx81ONAdw23wjxOYGTntm0M4yyiqhYLGnR2vnmLn9CaLLliJ6aItwDFaZ7kj8ibKaKgl6qk3tJYlovMgl4lSTFFfBef01ulF0c6aQOEkVoKFMWb6+JRrEFiwLVNZsFcoKt72+M4He+UGtkyl+WN2jaSNzYVSXlr0/rqvVNtZdN7GL3CIrtPEjjQoTX2NCvnDYtCYogG0AQf9NZxrdDiQGwmLXVeOUqtm+/FmVRUkOZev0PfYSDPnkLanUhaZ1lae3y5+IliZrWdoNRxS68UU4N1p9RZ8d67akich/foQLIDvhn82HrFt8CuRtq5CY3wRjCDeR6Mc0Zbh+s7R9PtOLTgQ4PKFE2yE8je/DL7z63v5fteQdq/fi+7GPRRkXMek9U6Z6DKDcLSNEwHjwIXtalEti1wXSOVqMbAMOBSo3vHEFRHHIZBeUhh4HQ44UO4eavWos09t6pk07sJamfbFryPfHj/jd7zJk5u90LlLeODDa1dGWgNEcH7ZyEEf4v5aS5Qt035Tw78OGsAds66QPbtVXBtzg28miu3sjEy6Oq03pvoAqU3Ss2kYeTbb94TUlKaBAi6NFPO5XxmOhwYnUrBrpcLvcE4Djw/v8MHz7pu+JxpXSKE9fJGa/D4+Ej3nm1ZOb++8nD/SBxGxq7K3Gp8mkcJD8554hCJMdl/1/PoouP1y5naGsfDgR8/feTD+284MBN/+v4n3n374aar760Sk2cIJ4KV8GxbYRjtQXOOIQSWslLtaV2WC8uycKRxOOogvjuc2OtQUxr57le/IqbI68sXWrNij6YO5tY7pXWmeSJGJY0G7/ApQndKZl0y26WQZi+HITvRY/WZDYr3N/nbDYrYMb/9yW+mqmnWD02jrBvrdmWeJ8WgB8fvf/cbrkZaBfSzxFZxjOSmAMPuPddtZc2Z4zzyw/eZcUhq0wuBumWil3LMJUE0KY5MUcmp1XXWZeHl5cw3HybGWY123mnSCV3FJ757K1sSZr70yrJklssXhmnQ9BEq3utCXfNGSNr6Pl/fWNeVIUbBF8FTtsy6ZqZh4NsP3yqTRjkZ+uz7SI+Wt29SSD3YnjCOBpt1eunkqmykwUWOhzv45g98/vQTb5ePOLdhCK3eieJpZjwrYH2+nmzQfnKNWAMtyJlN1UTqaweSTavNSEGFGmJ/39VGi8GmLSHtOsDUFCdNu8Vs4CXbtGS2Rre8KA+9aMi4Haw2rd84BH0GunMEu7g73jYTHdqdqt+TLoVT18nempG+3f5724uvlBF0+/rdKBEsEt0C9PZAid2rIYtdNzJ/994YDAImce23bar2jquCtW7dGGAEqpEZ7L4Lq9g1qEYXCjhnpUP2x1uH2IWlNYv0a82z1ImaB5Y1sFWHjyOlZkL0+AAxRlJIknAPgclHjrPCNb3BHdF5XIBpkBxziEmCl+AZ5wPzON4i/HcFYO9SSV63lTFawnRwRG/prxWS13PSSqFFb4GWHYZkJLKUeMG6r+l2+djv7b22PPE9TWrEHQbHRBXe3y4QQfDKpHOWVbXlzC5nH6aZXhvn5YX5dNLumPTnw7RzPPDx40deXl748M0HQho5v37hzqE05nFgXEe6c+R14XJ+ZRylmmq9sQE//vA3ti3zxz/8EWK4ff9S9w0JylYoQ+G3v/4NY5IQJIYhGC/QrRbTYsBvL75jGJIIMqWx0b0nhsSyXO22Fo64LAsxha91nX1HkfUwtdqJKUkJ4JzUMx5olWEaNXmvhZiCIBVnD6j3jAaTeK/JrbbOtmWGYcA7b12vlcuycZxn+RAw5YxhfqVVlpw5jjN4x+FwYBxH7qaR4/Go1ZFKIuJ9ZJomci68rVdoMB9FvmPEzjwNPJxOOOD1qtyV8TCTt4xrcM1F2uNamYaR/fMG0nyP0XN5e2PX7g8p2jot6K2PkeCCRZEbYh6lHz9//ogfIo9PT3JoZp1o3gdSGmX0c17qKJsa4xBJaeTL9RNrXm4YyhAHTeVd8tFeFD3gvBHtfMXUg3lcQM5wAzvsjPGMcea7D7/l5e3A6+uPOLdS+4bvjoT8Jhn1CXfLPJKzQZlS1XrQQ3dEBDlobs3iALo3Nr/RXcV1j7fyqtYqvYCLewqtoD66FCRCHgXhBNe/GrusWAnfxBF448oI9Fu+s+H8Cs0SrBEkW3XOSPcur4FGbA/OIAwXkBLLNp1m4XqucQswt80X4/xa3y8M23ssZgQUzVE9t80A+5x1Z1tyd8YxcJPEVrcXhNo988vhaf9ee4SHbAz07ixBWAaOsP8IDmjJ7pTOZoNALZ7eD1xzYCmJsgUTVii563xZmY+R0+FEcIGUNMmHkBiHiE8eH/e9WxfwPE/cn44stXJ5O0PvDMeDeh7sd2ut6hk22ZaPnuM8sy4btEx03pRhMCX93J7Op5dXQoqMhxmFw7qbabGWIoVhM1ixNplkQyB6j08DtWZKybfY84CjWpWypM1Yw6ba45xTDpzznmXN9JK5f3xg8p5tU1KB956Hxyfyllm3lcPhQClq5pymmZ8/fuR6ufDNd9+qf9pCPvfLYB4HCEGZUKEymaQ14AjDSLKhZ8uKlaH3m8x9y4Xz+RXnURW1E3IRn56fCUE5SV//cbSmuNhofdLX68I0D8QYqKioZogDPgZqtZu1e0FVzt8K7H3vlNopRcqTIQxkJ+dq9YUdSXTOs22bQjBRxHfzFllRNN2lPWDNB7zrpNRpJeOCqijx6ote11XmsK3QvCO6ePvNWi2cL2eGNHC6OzEOifFwMKhFa6J3Du9hiNah7KDmTCsNN0dChut6ucWFhOuVFCNPz8+A48KF8/VCa41l3bg7HdVQhXDG2pVT780BO48jQwj4lBgHx7auuBBJXook52yqtoEv+sQ3H77F20NdctYqHAIhDrhaSUHyv+fnd+SSab2qycoHPn3+KG6jF9XRTmrocy7oA+UFuXQzSQnq8vRccNEIYDdAD7fgt9oiIep186Hz8PCOVgprOdP7GVzRJFx1IU7D3msdFP9tH57uHK4pUjrrUcSZAMJhfoCuTCEXIlSLLDFTZyGT8kTzwbq0dyLV9E7GX2FTn6AradRd37OQhOUXg2ZulonOV/GFHWKaw40rsu8TnZRSKrQ3OIbdEW7bgHP6YLu9me+X8JFNqc1MeU3S9FtWk6gSevdf48K9Y29a8t24lS54qNvPcIvZdLo89k4OUAOgNgFV4XaDk6oddLhAwxPNILZRoQYgUmrAtcRaGtc6sG0RmJQ7lqUgq7UxTiPzNBGSSalDUK5aFN+SYmBIM7tqrEfHkAaac4wxUccBZ0R0p7OVYvXKG8PDoIZTW3n2Ui4fjC/rziqOdRneWha3TdWgcc/q0kDVXKeuK4BVJzurSnCsrZKvr0SD3RV3rz8jcZpSr0spVgHgmCbMnd2lHu2d87Jw1+4oWZ3d8+HIuq5E66RPw8RtC++dw2HmV999yzjNpBgJdyfF0NiQGaM2Gt+F1MQYb1KH0poSMp4eocur5u338l6elOAdcUzkbeO6bYLEgTgEuUYd4gGccyQvyehO+6iiM9N7UgpsB7yjuMroEjF5khMMUWsXjmtrfPCe1BtxHKi1GfnobZEVQdk6DFEF4TusUUpleTtzd//AtmZaq9Sha+II2gzCOMg93BrFyRwSjpHPL68013j58kppld/86tfsRo3jdOTHTz9zcJ2jO0rr3ZWvtLWVJToOYdamEwJ+gHmayDkrNK8LT+0dWlFN6vl84e16YRxHrvZgpRAZDonTSX0UpRbhrjjp+L3qV6f5wOHoiVHTWnMeFyMTEONA9FIz+KQ3fK91VcquLq/eGmFQwYp3js0isIMPxMkzOanLQtQwEJ1nczCkgTQk82ogFQPdBADCcxvNUnLDDaPwaMtzyVywN+F9l3oLyOtGKZ6H0wdKO3NdPlPaIgOaSxRrYCu+Ewzr9fa1e2+S6YJa5nqnBVNx2aHm+u7Aj1KC2cFHKxSna0U9Fl7bhGvg+u1i7jTLEfImC935ALgZyuw5/WV8eWseF/pt2r2BaeZVoDeKD9xo067Cm+73BFl3u+wVb9FpzQxbLtllo58veEev3eK3Rep7p5TeYpd6t8uJKm9MRBvd7cP7ddej20XxdSHqdvlCddpY9lTWW0u6qRW0THlK8WytkZuj1wiM5Bapm6c0BcsFP1IJeMssCylIvTSMxDgSvTDxwfKX8rZRKxynwDgkSg3knJlGnTd125gOR5uqsx3o4nlqzoxptIY/64Sx7mff9+KzwjBYNhJ7bpVT09ssiIbayL0Qe9DF0mEthTENUjS2LuFL8PRtZb1cCHcnevfErpTsOAXamuVa74K1rmUj5zdrdoz6XqZSGqfJkhMaPVeIjVz1P6U15iBuVM+rIKz7h0d5G4ok7sEO+CFFjsd3OK84/R2xIehCLiWThsi2rBTvmcwPlotBu131DiFEnh+fuT+dKL3RSyPusFBwzvKNEu3W2SATXEpJWSW+s25i0GOIXF8vhDtHTCOOroyTOKo/tmWSHwljsNpGC6pr+01v6Z0gyVXvt0gMrW1SWuRSNW2E0VIs3U1B1Ju3F0kDXrb89OM8kku+9VA0Wx1blSnm8e7eJjSROpfLhYfHe3xzCvayKlPnAzVnnKusxdqfumeMA+1wJIeB8/XCDz/9xHW58uHbb6m1sm6b5K/ImFW7wga3JROGKLennavRe03y3iYQYEyJraGiEnNfOyDFRN5WrtvK8TTog29O+SFGnJNhKQQz9Bhu6p2jW2lK7J7H52fazz9xerhnmAbydWEI8iJUg2KmQet7aIHmGqUV7qaZ2hvLqnpSLLlWGLU5wYO04a0CRMbxwDHd00ms189Un4mhAJ3arzKWOckVW7MK0Q5qRGtUb5N83bsidjhI0EtwNm40wVQuBHqr1CauS/CB1GOqRd3b/jytihehe8FKdhjiFHmOXQPdefEJ3GB7dp+3Jamb0khQoYxxOzaDff+GAEP/Fd/vYk/2P6VZ3tvK4ix4U7+bfJSdZkS3Pp+mbXKwZz4VJ45gV2Pp1ZE/QynAznwi+vs4k6q2eoMM2R2/Vi9Kg9IjrUVylbv5ukWcG+hhoFSoVaokVVE76OILu1cY3RATrWv4DCkyDhPJ1H6ZTMRzmI/4lEiuUFshxoF/++u/cH93xzfW1dBLp/lGnCS+GMcBonrJm3XCOxw+KhjQdcdyudB6Z5wGy2vSa7aVgsQjjV4r63XF9c7j+/cEU1XtUvRqHSshqLtimhrrRQU9j09PlGXR85sioTpTfB6J88z59ZVbkVqXx8QHbw2ZjpgSuXdyqSTbNgbzpan0S/hfCtZa6D1bLqzrxuEwazjZB7z9Uumd89uZu9MB5wdLDFDYqQsKV00hsF5XbXLAP/3lz4Irq0JahxDZghVs7RHA8zBLe9/tEd9VNV5qg7UVxmHa0Uzmw2yqhGq2b82Cra605imusV3OzPPhZtRR8qSm3BACtWjCL6XSS8clW8mGRBoGM8Do50tBqqLtutCdZ4hRBK0PFBqTYe/yRjl+/atfMaSBLW9StmyZnoTBpZg4v70xPD4ShkjPjflwYBxGWitKZc2ZrTamKJNPcp7qddMH5+gx0EpliIG7D98yjSN/+etfAblGXS3kstk6p49sCIES6y26fDpMDGlQokRv5Ks6xefjTLL6z0qlGrvrgz4Q2k4qKSnWFx8oPZOvV+bDgb0cR/go5CpsNqTAfbyjl8owaLW8hcdhLt2ig+PzywtjjEzzrBpNZ5JIVJiUohQi3VbqjleghoNtXejdSlGAw3jHnCZKudDqhc6mtZeCZwGXCChXP+6uOaeDrGLOZxK7yqY7kYaWyGRReE1hcgY7doMN98P+qwtBB3Xotr1FrDfapu7uLIlCyj3XmxnpbuO5XQHNJv89N0lbiy3aXw/q28X0lRfYvenOfS3m+frl642E7XYLdHNfe9++/judO5Zaq6/fkPRWwriA657im4kPzBVuv0C1DKA9EVaPqPEeTT6MWh1rDrTiKC2xVWguUmrk5eVMd5nD3YOwcWcy3f0SckH1vsbDjUOUb2CYdKaEgXFKTAZBpTRBr7QYOPgDwTi3FOPtfVlqYfv4xrfffUtwgeLl6cit0otiZZyZdp0zlaN3tLxRY6DWwrXIAJdz4Xy98vTwYDW8igv3vbO0omRWpzSA5pVr5ZviMqb5AGElFMlVx2lmWa5c140xROaDIKG8raQ0SPFp0G1D+P92vdJmaKVQqczzjA+CwEpT+7t66zXlx2PCt2aiCUcK8l6lYVDkeJUC7DQfqL0xH2ZcUHBpt7qCmCSsoXdCGri7l7flh48/07bMsq3893/6b1yuV373m9/z8HBP3EqxD4Dgjx0nvU2gXdhfiNHCpDpY1s00H8jLKlllDNSilNE0zkTv+f5vf+Nv//ZX/vCnP3K6P2nSoN8upb5HnBQ9qKUUQhwkTXNayVJKN19BGJylwTY1x3U5rFNKN+NcbpVmmUMuKA4gWcLrPM/4GIg+fCXs8KRBJsFcK65WjpNMLj5FnMuG7UWVeXS4vl1Z15XjUevvn/70d7iU+Of/8Wd678zzQQR0UHR1LRWXAg+mjR7HSdEdOCkjvFzJniAy1AXGYRZ84iHUIEdv16R9f3+HD3Ygl64PnLNQNh9IPrGWlS1njscTJa/ULcvXYtBFiOFW3jSM6r/W1NjYaiUFkerueGSaZ9as3KlhTBzGiZdylgGwO5pzu2uA2jvR60MTR8Up9N6Y40BukegixScqK21LtHqmmWKuskFfVQzliiAvp+sHWwjAfVUMIcy87JyJTfndYjno3bYFYy3xCkWzQ77ZQe5Kx0dnX1sXRajQLXXUO8hdG9YuFK0UQo82mesSq10tzR7oVQe27rrA7j+Q90MXUPV2IRkEhBPE0/ouPdVUrF81aervsDuZW+/sJT664rUNeJO9evsdfbfCpQ44q7RtVVOjNyGGHV7eOVr1bDmQiy6KS+7kLQKJGAZ7rwvrVrnmSm1nPnx4Z36lQHP1Fv8SUyLY6z+Ns+DIoN9qnJS+HCbxbk7SOsYY2LaVkgsfvv0AwHK9spXM4DznbePl0yt3j/JPSfGija5bRJCPguKWdeFwOlJyFfzSHclHyT+bzoZKp9fCeDyRgsQAQwriEEpnWVfezm8457l/eNDv5j1TGlnboogZB8FnqAvXmiEGhnGidA0sjUY0X4xr8o+9bTo7p3liHMbbEDHEQZxl0LPavGOeR5sEuobioqBO3zrDOLLlzOV8odXMOAy3NOPr5UqM2lpiUmePs62d1uh1z2vTAJq8TNL/+te/cT0v/Pt/+PdfNwlnEridgQc5inMp5CLX6t4Y53zgfHkjhoSPkV4Lb2+vtNp4fHy6BdV1L8zrv//zn3n//h33dyeent4pnz9X1qzazXVbmA8HQouomCYCHu9356cjJa1awXse7k64ECjVDmDnNC1Y9n90kx76UsBMNs38Ha7DXm36/PjIbOmqW4H704nDKIzSeR2kQwj6oNVOK4WcM2OKkjGWxnw8ELfMJa8MKXE6nW5EF7UwjjMlyHC4q6xqrVqD80qN5gj24kCmeaZhzlGnW36tm8qRaBSle+B9NO1/leIiRHz1hMNAo3A9n6m1cJgV7S05XGW9LPRW8Un+EWcwV/NabUtW21mujad3z5YC3G7wmZrNwCHxQgOoSiHt3rJ/HJxOJ5oZ0TSZe4KHpci8NcZHtiFRykDoK01gDcFvqsgs3aIutJ06B6EZWW1Be6AgSGc8hhRI+veNnWYw6MXGe+eaIiecNz+BztiaGz066R8CZDqpYaF1AdfV+LCbzJyT2kuBeTchKaYGtX92Xs/SCfC3Kd7hcbtyaVeQ3Tg6u3j8TozbyuDM/OTtWulfv6/MdvpuzWkt2PvH2w7h4fDNfEWW1yV5sabV0Bu5B5bFsSyRykDxkU6AFHAkip0R3QWOd3f48yLeqgNeKhnXAzEFxmlgGAaS8Vk+eK7XC4fjHWlIECN+j653QfAUDnxgmIKpkLR7revG2/mN+7sH7h+eaXUzCXQjEugxEKpMZZ3dD1F5+fzC/d2JOIysdqn6FAm9MQ6jOLk9NLNWcmsM43BLHN56IZfMy+sruWTCGLk73UPviiLvCBLCEWNgPsj7NU9H6HCYRoYoM2ruisZYclHM/jBQi4zJtWW2RWSxM+ShtyrxBuJpfdv73StjjPTDTK9V9brbRsuZFhzbtvHl82da79zdP+BjoOTCtm1Qu3o1tpXaG08PT5ymCR8D8zhJDo+g7s9fvvDTp5+JwbJ7mjgOOjqsWm83GWzJK7msHE93RBc4X8789PMnfv3ddzjveX0940AYW1Sm+3K58vz0xDgkfv75Z758/sS2XLk7PUBM7CU8MUWmPslxeAjUNRuOKk1zuGXDVz7+8AOtVB7fPfNwOjEOAz99+ahY2+YtKkMM/5IXBeG5QRknVTIyTWiKG2nojdiWqoDAu6b8d+t+wOuhrTlT1g0XFRMw+ImtVC7X601eviw6fK+vrzy8e3+DD3xwuOqhKXhwnAbOb19IQ6JeC+tyxXVHnBLfPL/He11+OVdy12t/OV85nWbuTneEUFm29YY9piQpYG8mPxCoLk6BZHpsbxeLDvzcKsGkoK7rMnegqPCS5WSvhSkEmvOs20Z3MCRLJS2d+7u7mzhhx1i7Ya7NYJ3YHaXrVpNJzRFD0vfskRCO+DBC0QRWfTMJaSAmlfr0tldeihBONOiBZimmzn3lGGqXYmz3BHf7vbsZ8RrNRHCO2vbcJRvEUXYVeMEwtZKjIh720p2+R4OjSV7KJx3kN/ezs9eyK1/H2Z/11sSof8xFbQRqwCSviBwWbQ2+6U864VL2mXFGaJiazy7A3SyoO9m2dVP4uLrLY81Y16SAKXSTqFZadWx5oBTHWiJbG8HP9B7JTvDqfvH2JiL3dJyZ00C1nzPiFOvvHD45hjQSUjR/QWOOiUttlJz1LHulCyixJ9NRdH9taqiMqPa3bCu9Nxl+jft6eHrC0VkWRcakccQFKItCF6/XKz4GHh4frNJAv0PtKj9KKbGyUXLWaxqcCr96E3KQIlve6LUTh4GYohWfJbzToe6DZ4zqhFBlsqfVSsmFw+FODFqTj8G5pEqC3pUDFQOnu3tq2XBO/jFd7N0k/MZ59a7L0iBh7xy5N5LzTCnxcr1yLVWdE4dZz4LFaby9vRGjUJnLduXPf/4nLucL27LSnePx4ZHDfOIBz+PTI703Xl4/s1yuyFckKD+2JnIae1B7U1DfXiNZa5ZMzGotL9cL3//wPYdxpvXOED3zYSb4eEtQzbnw6eNHjqcjDw9PhDSwblfuDgfBHMtKrYXJwqpC3PuNIU5yG9faqDmDFZiFGLi/u+ev//pXXv75jedv3vHtN9/w9PhgHyBxG93roEouaJMuFUJkHidNjzXz+nbmV999S82FEgJxlNPx88sXEdDP73DjaHHRwn63kpnCyBgHStG6OYRID5Bb0wXpnbwUpkbw00C+rmxlZUgjrYuEj8PA8XRiGIVjNjqn+cQwqp82knj5/IV1WZhPB4YpsW4bT4bNjj6Y2auZ21wKHPremewY7x7I61Vel4BWSzxhCLx9fmWKB4aQuK5XRh/VHoZC1Hrv7E7j3lQIH5wC1dSxULSNOc2xQ0qUriTMaOtq3GWHTbk4wXVadcQgx31rciaLZAyUkKAGCgO4M96Bp+K8olhqF7moqAdnNNUunxZ3J7e5tgW9b90uErsG7F6Qd+c27ivN1fhi5wQ/dRwuB1p0mrjDV8ms29VD3VSgHcvysbRbuA0P3eI/dhir3xL2pN7yOzmM3ODi1H4RCohBTzec392CDV3btzrxRN5p0vT7NuMcfb9MbVvBeCfxqFHBgNWR88A1B3JzVDeAj+aPMBmw/9pEZy5ISaanqMr4Cj450pBM2qrNIcZkNZuF7gP390/ULLhlmmbWdSENg2CsKjhGg0iTCa6p5z6EwOFwwnvYrpuym7rlpJXMeDwQnGdEaQA+7UOE5/x2JsTE8TjjnHxdW9nIy4ZPiXW5UnLh+emJgWDbtDgNnzzenM3DMNz8GbUU1m1lPh5pVa+pD4HpeIRWVYHcHe7m+WryPGEd6CjKJSOhwMdPP3M6njTApJE0uJtPLHkJdFqz4cuLa8y1WrqthCrH052Ujs4xPD9zPJ3oXXCZEJbItmzkWnh++obf//73nI4Haw0d+dWvf8W77Zm385laOofjgbwV4p6ZnlJkWTbU0+xv6+1ocRHDNJKXlc8fP9Jb5/X8yrat/OGPf+R4uLPcIk3LdStMh5laG0uW6/b983vBTKXydn5juVz5ME1EL7VEb50QEzFopd4jF5qv1OZM/nXP+fzGjz/+yE8//MAYB37z29+K6Nky12VlPkwc5wNL2UhWX5prYfQys63rxs8//8Tz0zPDOBBTYh4nNnNOH6aZkBLXy5VxGIhjYhoGYgr87V//xt3DCfAMgw7MshPALYkr8EpdXdeNrVTWvCpq/DDjkLvxeDgyThMxFvEpVnD0+vpKSpGYJo53J5prPDzck7eVL59e5cJOgZINT2yNHqP6ObykdMu66EBZBW0NoxoH11xslfSkYdIH0ltHgdcUhblzW5e6pbVCrU2hj71jNRwkb1JCF4iu033g5dMntnXh6d076qbgOClMPaFJ+uytf3rAkVuntIJzTTk8RFy8o9WIJ5FbIyoGDucLwWValRQa409usSxeuP7OvrrudamxH67yWQDsgYjNoMc9+mQ31rHbFBBp7kswQt/Rgg750LFtF3klunkS3P4FkI9jn3xwt01BX1cfr9r3noi+J27oH9dp/pdxLBj0gpHZUrz0HfrZv03fU626HOAWV81tBBPx0JGUt7nEsgS2LRLcxGobjgx7gldjMOi0dxH9COrsQdyTcwGfdJENQRP3nggbkz4/QxopqfDp02fe3z0Sp0kXQwo4P984yKUu+KaBsDfrcR480/FAraYuBOLgWJbMVjblP00zJWf8MFOKNmWPBs0UPWke+fLzR+7ujrTS2LL9pjEwjapFXlZFVrgYqXnVIe8c3iVK3+wV1PPWa741z1Hl55LvSBeBT+mr8gy0vZeNXippGvF48pbZuj5b3RV+/atfMx+PUnU6boS190qUaEEciavmmfHyWkyTcuRAUDy71NpqDGqtN5Tn7//9P/D0/MR123i8f+Dp8Un86Lax1copTKQQeTidBPmFgTpWYkxJ04rzeL+R4mAEctTt1TvzKCL2y3rmermybAuX68Lz05N0+8jQVWydAsV8V+2hzMcHXIdty9S6yRF5d9IkVspNB0xvtKopJaV0U+Z45+lVtYz3j0+s60ouFee7EVXiTuIQiV7wme+NZVv59Okjd6cH3AmGYWQcJx7fvaPUgl/hJRfVnU6zLstR1v9/+du/8t279zyMz5rW12JR58Iq1yL99RgjMU4seSX4aNNTZMkFHyXJi8MgjfImU2AYdPPrAyjcupbGzz9/VkHS4cDD0yNPz4/kvHI+n3k5f2F8GXn37huiz3QX1FDWG8Fw3doD3m1KK226gARbdFNIZBGG46CDqDVSFISylUowItc7uThL7UiBVMXFuJHkhWf7Ha7pmjZ//vlH1i3z+PxkcfCaNHVK7VQxgh6iPlSxerZtoXaIHhwR7ycdyCbP9K3Q2Aj9QgwjnUZpG55m2TPNSlwMv++yOeyCU9cl3fY9Ucl2wejAU9GOA5OumorQeAabum8Hu2Nfuetu0nOO0DrdVVM5NVpz1oGRvv7dm5z2F9vK7RUx65gzD7RN/th7Vv3+OssLsvvP/Z611HfWwqBAACdhba8iXAR9OXofKM3RSmQpkdoDn79cab1xd0oGU+2JtE0Ng1E/qSo+Ct4F8xzoawfvGaZRlwUOHxIpadAbp5FkMORyueK7YzzMTONkUmUNOs5JwTelQK6KBnKm3tEW5agVZZwNA9OQrEuiEdIIrfP9Dz/zcH+vz5ePdCQN73iO8z3pm6ictVx4eX3h8eFBqka7lE7xQKmVoSohN+diPRZVF1qMvL68EFNiuV45HA9Mh5kvL18IPnB3fyepcNkYpkkx4rWqs9pgz+y6gkxbZ1mufPr8wh/+8FtCnLg76rPaBwcWS7TlDWJkWxZGhwWE6t0upXC9CCkIBxmcJUSwwaDL1tDswS69M8TIb3/9W0ot6vEumRgkAKqWkeW94EEVWYkOiHSorhM9jNNEKRuX85n5eCCMI+u6aMKPgZpXnGuqATze8c23H4SNB5Fs0UUcgpvAqRikd0oR0ai13zGlkTTNLNcLb2+vvHv/nhAiaynQNg6H8SaZlavYzE2tM08j3/7qV/Tab6QsdA7jxN6yBY1hGCnLwuFwZJxU0VdKIcXIN8/v1ekaI9e3N7ZtY5pnhmFkWzIXf+Xp4Ul1m63RvDM1k8ioUpRfE4Ky7a/b8lVX7x1bzbroYuJ4POlnyZVWKofDZCocTSK9FSVYzipFWq4LS1l5wDGEpEt2OnA8PrBcNtZTZhiMW6lQ1pVaMiEO4D3jONHrZhNfotUiZUuQ+uy6Ltw7TxrUASwVjGX+Y07hLmgq+gAmO3ZBpCB4ko9Up5nVu0ihUktjiIpXyMF8Lh6sz1WGx9agVUWNE3ChwJaJQVp+HWSO6gciIpwLGe9GYdPIFBf9SHeF6Ff6ttJQMdOO/rte1HlgxLE3TD64ZgS58RNdhOkuicVB78Huh3zD0vWfGl8ZYEE9DmTI63Y4968bw86FODu0sUto54x+SXjvK8xXJdPOT3SLhbBIHMwZ3eMt4BL7O3sMYKP+QsXUoQV6C6xtZFtHtqxU5oYHF+kUdQpgIuYufD6R8OPX3C1xWk5ySzu8nQ2EyUq1QkhM48QQE2stDDHdYnO4v+PdNxOH8XALoay18OnjT4QwcP94YhwnGlkbhME0O2mfhsRylSKwOB26wzQyjxNvb2cub2daLdzfPzAPJ5TdJVgt9EpKI+t65fx2lkHOOSvLa+QibjBvhdNhZpxmJQm0Rl0bKYH3gWVbOVmVwrZu5Fq5XC7mVzhQ6ZxfX3hOkeo9edusdlnFSNRN79C6gXPUstGqmuqqTRA3EY7TM1tzRR8bNWmmpAK1nDflNTmp3CbjUEFpx9F7cu+kLnXU+e1MSZHTdOCyXIhpAGDJK702sxeAa43leuXl7Y3H+3umw0HqJgc3otr7wOF0lMUbOByFa63rxvF0L2WBPdzRKv66kVrBR2LQL7hHEXvk4O3NsawXvINxPpJi5NPHq/iNw4HH50dNC9UCoVu1aNtA9JIm7loO7wKH06RI665sInXzYlJQcCFyd3fHcT5oLcdxXq/cBavq7J3ZwePTE2OSYuYwqzC91srT+yeSj/QQCa1xOOjhzjnTvaNsmWXR5eAcrNcrwzBwNJzYuwvee10srXNZLjiioLXeSOPATz/+xOfPL/zpj3/gMB14907vhneeaRwoFoD4/PTE48Mjl+tqZLiiMWILvH75wpfPnzmcDkzTQXHYwTgLq0nd1VwxBVJJcu8WKUckSW7mcIDWpGYLcbC+DDuyQlest63Xws01gffaFfHsdDkE58ilQVQE/a7oUmR7RN3A4Elys5cqmKRVnE+4vYeh7hCRF9LfKiFUqpNqJ7qRNKw4Kv529CkGzxv8s8/w3Vzh3YxlnmDtbWamAsOeMqoQjRb/YeuV6xS7THwD3+STsTmbvbPE1Q6Wp7NDXL1be6FtzK114i5ndb+4Fm7rxe6+t0YJw5uaM/ksUqB99XHY9tOhukQz3qb2SN5GlqWBH9gqODfq0nN6Xaf5wNvbG70UfIg0+v/p7tr71HfpdvJB9z7tFvkUozZ4uYhHXBMUFYdERfH1Tw8Pv3gfAodpYKuOx4enm+BgWVYTgVw5xBPR4v6T22XW2h5P8yzFY/ByqzvHw8M9wzjjnafVbDHrAerGtnfDd8d8PDDME9TGsiwM40h0nmtuzONI8BZVEcIt4gdUA/z++b14x3mibpnzcmE10y7OUa6rJDetK5zQ6UxsBj2dl4XD4cBwOBBy4f7p0c7OTjKfQ72Zdh2Xtwu9w9PzI947Sm7KtWudvBXmeTaF4VcnfamVvK7E45G6rizLynx34nh31BjiHcM4gvN8+vyRMU08PDzo0t4sIdr4nJe3N8Hf9mzRinCzaRQ54/3ubHZ2eDSGYQDvCJaF1F2/Re362mm90Cy3PbeGK1llQnYwrMvK/f09Kcow11qTljomaDCOIykpwpZuMQzeSkY6uOhZrisff/6J8O23nA5yBbfaaLWoCQ9ozRGCohsUDSLj3mGvJjWDRq6NcdQ6PIRIHBOHgypagw/gbcr24mxyqxbQhta1ZWGaJk4nKX3O5zdCHIkxEDbP5XzleDjI7h8j//rXf+X19Y0//d0fOKYDL29vFoqog3SaphvAUU1mGVzQBO8VWlb63lfRyWWjlcKXlxeulwu/++Of1I1RFpI3x2y1Ep8uRceUBkWM7K9Xs1A3+i2hMsagVb07w1ar1Tp0XPT6fdAFUlE65bZlnp+fqd2ZaU9qmv2W8T7gfbXXv95m5jBMlHalm5RZvA2kgJy0ffcXCIuvWNyGkxIFtxEoVDa8W3U4u93hLNHpHui2/5tqkI9O56/kL1+pWTPseZQK+1UV540wbr0TDM5xXREfe57SjV+wn8M1bbclBFyVx6M4r0C3XdW0bxldfEbfyXn7esFJy++9U+9EMD6mezpRai8XKM1Tc1JgYU1ci2NZM9PhQPdmeuzmh6ATY+R4POrfO3Sw2uvknHpLBufVlOiVmJpqNxlxF+9guWfJ4N7mOyNyWedN4pPeGp9fX3m8e1DFKIqt8PNIsNKe8/mK945hGBQtT7j9nM45Hu7uxHl2R9lWWkrUqs/A6XQnLrV11m3lelk4nY7swZ7RB/yYiDhc7Yqc8DIAuxAhePKyMU+zImiAUuVy9jZ0SBmm88RPM1OtTGngWpQ9FofE81EX2HVbeHl55d3TwDiO5E2bVa6NIXmWtvHhm2e2RTL+4JsN1rpkPEoLDjFaKi7EQb6J+osnFqD1TnTgfCBfrxQ6vRSGOLBFhYwe7056bku1n6MQu2oaQP0US9m4G06MYeDd0xNrUVVz9JYc2YIjdkVGXC4XuSBDlAEFJcGCoJZuW7e05p0YPERZ5ntHhTW1m8sXXJAb8+HuAZ88b6+vdoDDfDgSQmDdsr5Hd1ze3hjHGTcp4KrsrkI8MQWOxxO7u9R3R3Eoq926oNf1KrVOGoXrotTRcRw1UTjIOUsiGwJrWZk/fKvckyIs3/VO2QqXcrUIjcaQRg7HA21upKvCvsZhpLkuhZfz/PTlE87NRBe5LlfFn7uIS47XtzM//fwT/8s//mdSSPzpD3+g1c40jDeMe28VK1Uqs4os971VevdSH+26bhx394/8MUaWnMllY1s3fv74E4P3TNPMdDD8t+rCz6WyFWmmj/PR3sNqR6RIrr2YCTo1N9Kg+bxXm4J91BTcGt68NHd3d3z77QcZMB3EnmQu26WY9vu53d6LqUd8oKUoGtgm1mFnaVslOlMjhUTD04tKfxzmn3YJeoW+UUuk9o0hNlM/FYph+vW2aexeid39HGj2f8v/EH5xoXRdEPuZD/J+tB2n3/+Lt5QCu2CKkcSoJ1s8nbfOBy9IyxJs98IjH/YD3FnIH9AFArq6q6MUQV+6tshKADdRt0hu+ozlikFM+umbhzD5W56TNigLA+mmsgnR8qus6MgFbYwt41wijknprM4T0ihVm4lL0jTzdjlTS+bu/l7S7m5x4k5GzXmeuCwLvXfzWjl6LVzWq/iBu0fm44E0REpujPNEK5W8LAzzQWnEpmCUObPDJj9AtdSB3jtlywzTyORGrlylPpoPJLvk5ZuSetL3wGEM+DjgeuPp7p7z5Wx+K93W3cE4TqozQGrLrWTujnfgHNPpxHfjyLZJvah7P7JtC3/585/5+aePzP/LxHx4oqNO6eAiznemIRF8wrQ11KpNrteNLRcOh5lpniyB2x6HWqlBIZyn05HaJNmPv4Dkcy6s60rZCo8PDxzGkUZnWRa8UQAdmW4f371TOm2HmNDP5MwMCUxplKKrtj0CWdNVa51xGG+FLT5YZaGT6mW/ZfcIZ993uMHjoqc0TfZ7CUpMSe1PueAsenddM84XDtPINA6s68LhcAIcPnQOx5MeXCfCsNWGqw1ioFd4vH8kpkjOhRC+atars0MgREIwF2evXC5n1suVx8dHYpTn4tOnz7TeFDI2REouXMqF1S88PT2xkqE1rucrETje3cuZHQJ0xWQv20YIgS+fX/HeM8wjvChV0YfAh/ffaPoyruR0fyTn9QYHnQ5HZfwHL+y+a2OLPlBrI3rpxdvOKTRFa4vQF4Y8DJEYHxm3lVozpMhyufLz5UIaEt9884H7O2XU19I1pXg1hpVaSUOkFZmSeu0Kd3Pgq4g/bzW1yQfeyoIr4JKkOcErT+nd8zO5yoW/lqxQwt7p1QDCoGM3BKlvtlKovRF9otEIPinunIqEO0EFPX0/nAXwexzV788FOB/ZapOem4HXt8L5vPD+wz1pdLiW8bXifTViOAuytK+rOtHKDgyFm+lNOG83poTd32CwpZRL1im9hzcZG92dtX95VUzu4WnNQt+c9WS3pufIB324ZQwDiIJzm8Vm90GkMQHvPFuPbDWpryTD1kzm2iPOR0E3XkPbfrvt7Squt9s22Jt+Ftc0Ve/mvf01qL3gOowpEpNBdD4yxkgaBsZRAXKlVA11w6Bh0cN2EWQyT4MaAWtlHkfmaSR69YJ8eXnhL3/+Jz5+/MTf/d3f8ae7PzKliT50rtcrL19eOZ8vPLx75u505JoL23VhGBR9XVxneT3jYySlQRte0HvhomqHS85cLmeCj+rHDslEBZ2SM8Gbe6U30jwSt42as6XGqq9mGAa6CyTvyMDlTY1t0zAJYh8GXWIW862a2Mr17cJ3333DmALbsuFc53Q8iDeqyrAieOZpotZq5T9KqI4uwGFimkYr3JJasLhOapUeh103QM5ZHS+tkVzgeDxQq+DbWgvVtry6ZaqTXD04L6Ou9xZ+mOjFomfolCaDnkuJbduI67qqvzolDgf7ZpbYuv+jeAmLcm6Yc9hI5aA4i2hToG+A3UbDOBPNyJbzhq860O/uH0SamkPa2029LFeGYVT+uXc3CZgbREC+vH1hWTbu7k+EwVbCLbMaQRRjvFX/zbMm+23b+PHjT9zf3fHEIwGY5okv48CXz5/pwUsDbTlQx+mgYD0zBa3LyuFw4nQ60op4iFIKb5ezkl174+PLF+b5QOmFYZz48vbCelkZxsQ8z1RFzfK73/yG7969t4hrnUc+eUlNS2FIA2XbKMFxOhyMeJTuXiRmNyXIrRdQeH1TIqSIxcTj8zO5Zm0bJkve29HO1wvOReZptrAx+RccggnkHg0seSP0wDyMuKZdo1nsiicKpumNsik0cQwGP+WKH4MdensyqW0StsXFEGnrQqkbveqCdB68EakdbpWZ0v87m3AcIWiyNnZZdbUm+xvGIyGNuEG8U3OZ6jKtFoJreCdXNr7jzVfjvTyt7kbeajoDxVhUJAff9y1923DzL/g982mnDuwzkzuC75wuUppFblT0GjcvQpKKjxHfoxJUQwA30JsjtKAUVDzez+Ai21apPUmvTzMfhhdP5DvLm3wH02BkqG43GiarpRO6QidxwvNDUJZayZkaJGX1g8Qee1po9EpRGMfEPA2mhnQc706kMXJ3vCeNUhD54LheLuI2fcDOZQ2PSTDz9//2b/z1X/9GGiIhJVp3UrmFQK+dz58+8bosvJ3P/Onv/h3buvLp8yc+fPj2JrN3IXA8zIKBvDNoteKKVIUepeimgzmoS2VdVpa80XLheDySEITWqgQ8e4lSCILLHFJ45ib56t3jPc4rYr117DOpKoQQpBQ9HO/4n//xP/Py+kItjZfLJ9bLwjfffoDg2daVYRwZk3lP7AxcFnGO8SjeqNRKcvIn9dYZ48hWNwbbiHOpjCkJtagXvHeMadCWgba2AfkpwuDJW8G6gsX/uc66LkQfdIbklWkcdUkNI9MwUFojjqYKyrXgmRSlkQutVsVAmyqm1mJuWW4YWW1dhHfJODOCXS5n0jBqeqJxuWz0jnDH3rle3hiGgZ9+/kQvhcfnJ7UxlUozDGyaJimiGqTI7RJ5fXlhy4WHh3t9/1r44fvvqa3y7Yfv6LVyvS483t+bCqTzl7/+C//03/47/+E//Sf1P6REc45f/+pbvnl+FgmVC8FLehqdDriQYFs3xmnmcDgoF6pWlvOVcRqUJHt3z08/f1TKbHDcDXf885//B3/+8/+g9M7z4yPfffhwOziOxzvC/YM8Dp6b/PXl5ZW//uu/8R///t/TXWeKSVfAtpmMzwjo2ileUrWSixHywhmxyae2xjyNfHj/gfP5jcN8UMxF6+I5miMk08F7NOEGeL1cOEyTAvkc9D6YwUyQhNbhQApRQoFWTZ2zY+ae3Cx1t0mJ4b0mFsE5Oilc08Pb+8i6ZCs7gtg8DFiYpPK5QDI+F61JrmXWTVhrMEx+9oNh8w1/usNgfbW2+Yj3A9SsLmevKd13R2kbMURVvgZT3/XNOIgOvemA7x2Hekn2bNdgEd0gVeytMW5X4TlHK+Z6LoLpsoPQA7mpJwIPLXtqK7dehd4gjgMpzjjnuTrY8kZpMEyT4NTUabkZVDfSXd5ZaGppjMNA7bCVppykHapoUiPV1nBB4FV0UY5sVC7UoziKcRaBuzfBOeeYRoXIxZQIaWSzLXqeZ053J7z3LNervAkhsgWFUMYkOLBieSDA9Xzl8nZhHmfuH+/55t17alZcfa2V492J3/3hD/z0+Qs//u1vcjCf7gSBm6AGr+9tUgRoCgmNKd3ELG6MHOIdY5JR71pWVQ5bqq6gtT1+XllMtUlgUXqhx0AMyFvS5BMaJ5lycdBy0fbuOst14XA8AoP4UDzH6cAwTfz0wxvT8WDPQ1WNwO49kiZB6V728/QsWev5fFHKc8Jgy6p+iiCnvouR0rsZoJU0sVUJR4agBOkvb69cXl54enxmWRbmSec9dokejyK0g4uUsnG9LNzfnRitH2OMSVHh4zgyu4E1b/qAt2bThVbliJroSlWGfS1NL7DvUDulZgan6sxlU5/C/eMjed1Y88YQB1wMjC6wbQtvlwvTkJge7jge7yhWgOGH4Wb+yLVCU0zE5e3M/f0d7775ht46Q0zUWlmumupTCNpoSmVbV/kDnA7Pl89feH565u504nq9crle8b3z8PzMw92diGgHW22EdcOlQbc3gs8GArmsChIcR+4fhW/GGHl7e8M5x+9/+zvelis4/fuHx0ee372TqofOPE7kvIkI7CKmtloZoqPkynZZWN5eKb3y9PCEA14vb7xcLjw83OthbNroFO0bRDi3DlUF6MHBukoOuuXCdFTrHkHql7oVrutKmkaGoA9/6EixUwt//ctf+O1vfsPD/SPdQ4ymxjEXfvAB3wutdCpZQX4W9eyplLypZMWb+sd7QZINYrfIDuORaKqiZPa8nS9yakdPdJ4WO8uqS5cg6MOHgE+O9XXh7fWNw/HI3ekkotj3mwop9AANMpVAuFF7Pgy4oIk8OHUc5+ZoBEreGMKkDygHXKsseaOW5VaclaIzPsMk1t3pstW+gbMNxePIdHyFQqU1p8m/6SDu3alXwskj4pIu3bxttDCS5oRzkYJgC98FZ0RLV5YktBKCmul8L1TTw7tuBUV+oCwrmOT8Bq3ZaxGCyGD5MEwd5Tw+RY6TzKU0wZsxBSKRGAIxydNzmEdKrVYeFG+kLgY70xrTYWaeD0ZAd3JvBOOmeu+4oLDL092Jx+cnfIxsmwaMMA3QOsf5RIgDh1lbYc2ZOEwWTlfpW6aleBPY5Jz58vkzd3dqVKuhaFPritwJlhMVSiavhRDUdNl3cQWVNA6kLrjFIhfVj2OXawrBNgugVq7XC703jrMMbd5FTeuDN+hGEOHz4wdCkkrT1UoYRxmLLQVBplAJS4ZhUgV072qZA4soB1clAFE9gPiFsqkdL4XIcln54Ye/cf8oo5xrjU8//si2Fb779leEIbEsK8475nFi9wNV85qdrAVPWhOR4811Iugm6l6NVqVpGvEh2HRqJeBYR7Bz5G0hjQPeil+ii5Yd1KF0aqzEkOixaUJy4FrHD555mhnHyT40gg1ECq1yIVtB+DgJP7y8Xnh9eaH3xvv0DaVUSqpW6+n5/R/+gOvw8fNnhmHgw7cfbomwAP/Tf/6fGQeRzH/729/4+NNP5Fx4fnrmP/6n/8j93Z0+4Dv0EppxM5pKFJ2tZNzWKqXKVCelh/D2XArny4XkA+8/fODp6ZlaG8XkbLt/YwiJSiGFgZYt/K517p6e+I8P9zwcNQmvecV7SV997+TurNBpY1lWdVrnzPnyxpQm8nWhpEAaoxHaUpM0L26ha7y9VaP2pgMPlI9TQ+D+cGTNmdwKoVlsRZBD/nw+czoetRnEwFYLvVVezm+c7o701nh7eZVv5DjTkOKi1ypyzYQP3nn78KlnOiVJYJdlhagLRPCJPqAhDqZOEqaeyyYj0vXKmCameZD73Bm009V/7WwL8T5SkZTWOZHFrXaTJsoRHucTLcg/PTi58tt6Zms6mFMKxCQTXyWrFnv/LGC8w80QpvMm+67DOwZSVFBgbiLF1Ueg9Fx2wtGtJqqKisPoBg8CKYx4JAJxruEN/xaItMu0tLWDxCTjpMRQpb+aSo4gjhFT9OzQm+hGUyZFvCkPhxQtyTUxzTLLDSkRUmLdzhyOo2n6TaYaAsM8KRLDB8Y04H2Uj2C7ikAO+jwdxpk//v6P+ASEQHKBHAQN91LISAHnXeTucOS6LMpUalIkdeBaMpND0DjaQL/57lvWNXN+eyGGSO4VamccJ3xTVEzzkeLV356CSsuKTBvm32gEHLkVapMBrZo9oNeKj2amXa/2jEZCUuKzPDWCTmlYQCPgBYunaYRhoNvzsOaNcRyJ7mvu125gvtqw643TcXi697RaKLlziJKyZxvgSi/89a//wn/7p//O3//p73j3/I5spUlxHOkdDsPMula862xlJaXRODb9mOu6EpI4v9okUArNa0hpBgX4lPQAe+WUdzC+YrjpqvGeMAxc1pXRsslDSIAml9PDSQ9nkVZ5J7qdZY+4bv4KY+VxUlfUJvVQjJE98hjgcH/ku+HXnF9fuS6L5acn4hg5HHSDX69X6pZx46hyE7rMXNEzh1HqATPANaeQr59+/onPnz9zfzrhOqzbxjzPjOMeLdCgCj93UVj7tm74GNhqxbWF0qsI9FZ5enxgywV/vXItmeACwzDYzSwS1yWF/Q3DQPCV3tHvEr6SsS/nV67XhU5jtsDEYbAa1bpPOJ3z5Y0vX14Z342keaSVSq1S6Gylcn57IwTP4XDAdR1qwyi4cHfV6vYQ9v7w+Ewu6ubeLKY4da3VaRiVQFAr6/pG6ZW3tzeW85W70xFCJA3RoBYvOK03ldQXwWItRjvPpBpppWpTG6z7d9lY+8oQEzENtG6Te+cWzOa6x7ugbbYWYLROZMmlS3XQFAW9u/X77r3wjpaLxVmDa45pnHXJW3WvN3hpnEdiH0xNM0JQBELrkcu6MiQZF7vlVxWTVDsvGMk5TwKc0/BxyVdK6cTY8K7fNiSBEo5o0SnOeI79Pd5KZRzU7cA+VFVp3ZvDPDfqr44+WG6H5Nu9d0X+NyRZtxBHb+nGwbak2gopDeywc4rhFuewLpmSOg8PjyrxMc4lxV0YYkCNHYZjHEw9s1P9egZAiqFm0E6tmyZiF/CtqTudQGudt3VhHEa9Pr6Dj4zDLGd+24hupLXKcRjp0VNzoZYqHtEnDnMkWRBlLcXiNnbVoKNHz8hgsJPBhE35bt45SllpUVN/7x1CIqIq0m3bIGd9fuPAMMkXRq94Y60EyXcNXHmzylPlWDXnLGlWzvmyZm0bB2/qpK7hWysfAKVVXl7fuJsmhoMCEJs5prsNnvIhFT5//sTz4xMPz480i0J/ev8N8zjeLqa7gyoI3q5vTMndxBjeC0LuWyeM4pFTh+t2JZac9SYbC76nDbb2dQIlmKKmKu6i9cbrl8+EhwdSOpJztV7owDjO1FLYcmZIWumojXVdVUiCI9SmS2QcCd0pHrtW4bOmr+3NWYIlemhOMpEVcwSO40DeMjUXXHA8f/NOh/0qOCsNiYfjvX4Pr797nA/85Kym00lKqmrWQmqDeis6pO6txa7beufxUW/sMIxc14VeCiFEjnd3bMtCKZUxjVzOF/7pn/5Z8rPTkWVbGUelYW7bRqWTuhRf4jvtI+XUa/v2+ibHZFB/9K7cAtiKEi97LkxpIj1J79zQwbNaO9aPP37P+Xzm/bv30MTNYJti7vKUJGuvK1VZXcfDjPNHtfE1xV0kJ1JuTAPJR671yo8//MCW1Rx2f3cHsm6pgGrJLNcLPniGmMxDbFMh3By6Ak2qEcNVA0jyDC0oKK83tusVphFvAwQOjseTZUw5hl02bNN7a93MYPrA9g7dKc4hJf0kN+jAeXwc8KM+0NENhk8rUXYej3bItK/KE+0VeIoMni5QXFGekUFrwQX63mrXTfhEM229Dm/voy66Dg6ZQeldvfAumj9AWHVZV1yDeRwhJS7nN6noxlmXScB+/kApUm6p21k9JR5v4YVOg4j3Xy+JGGm1MceZcUx4r0N/H/q898zzIkHIqC2wmFHyeHfS69v3NkEn5ZTBfr13M7pJHRl7sk1LU/paKslLAeYa+Bi4Xq+koOw2S6TUhRc6YYx0ogVZVqEWTgkPS7twXVeepyd6z6xrYauK3I4pcRomPE38aats10Ub7DzfMsqoMvPVKOPg6CN7vCJUXl5eKTkzzTrbYtAQ2XPB9cE8QDKn7gGFraqgTQI6qaf2fp5SC84FxuN0q4mWL0IQknOOnjPbdaXRefvymbJNPKV3DLvct6oZ8jgfJU5xjX/4D/+Bwzxxmg98OZ+JQQkIzjnWujC4EeeDMvZqJRvflPPG7Gfujid71qXQ+vLlC3nLxNfzqwLn4j4JijyJKekFGQfJPpsePuFhnnePT7ghgrMD38lso6pMiX97rZpgHCzbAsDpcDJzk4Om4nXX3U2aWFu1ciOUFRMiDpinkWVZbaLTh2jnUKZJ2u23tzdiitRcuDueKF0Be6fpiA+B0/2Ru/lEnSrH+cjD6UTZMi6o5a4WlXC0oAkx+sD1/AbeczgeGLwRfathdTFRW1aaZV65XK9M08Tj3Yn705HmPTlnogtctyvTMJFC4Hw+M9gGUasuhNIqn758ZiuFFIUBBwv+yxY/nPNG8ImlrPjowTvq9aLWKuv1fXl94fPHz3z73bfc3z3qsLOMeNcV81Brl+u5FlrNxKCQs+5UfzmNI8u2kVvRVlkrISD1Rky0ZWWIgcfHB5xTQONlXShbobVySwPGO5KLdDM17jlbpTXysjEeJ8lInSdFYd3ew7oWuimhnOu0pg0gDgOPj4/aRLpMSLu8urVGCP0W9ldatYlUpqgxJkL0ZJP6jWZg1IpvmIs52naJcUhKEu6rNfYFzzwf9GyjQ2rfMnYoTWnENjVb5ek4H26bRnTBvCYodbkbxGZSbnzEtUx02qjOb69KHB5Gehc567xkmiHsRffyL13WjVoy0zByeJp1OQ3Ctb1N/t5HmT2d5/9H1X82SZakV5rgo+wyI84iImkRNNDTO9M9Ivv/f8V+2p2Zlp5GA0UyK4O4u5FLlO2Ho2bZA0EJgEJWhLvZvaovOec5iUw/aPyRc2Z/ODQTKXR9r9Abo8vNNyl05yXFTk023Q8DKcPr+0nL6N3IbpwQkUVgFN/GXzEKdTJ0QlBnJJhp8eNgLYN3TVnZcuFLVda8kToqplV+AtPUlNUytvF4WlXwXk9XOh8IXUu6o8r4ayRPz0XLcYoRu6kfWLcVlwzBBnXtRfpBqiVtibfzCec9u2mvd2JbNZJvBr7UFITqsKAfJHdd4sb5/E639XRdYH840jslWsp/gTpvp9G9M56tVraYCC106OXDB4ZBBOolJXwLWasVYsuaCdbz8fmJglL6utCRY2GtUbsNQ7vQ9T2mFmdo23grlUyN9Z7q9/5+4vXbV56eX/C7cdKU+E49tOTm0uX24KKZmW+SuVoK3OZlsbBuCzl7hlH/7K2K03jJskUdpBij/Ow2cjAFtiSMbRcUen7b9l/ma1umeFIqrDmztGjOdV25zEvLdVZl+vr6yvl84sOHj3S9xgFpU2a2sYYtRXof+PGnH/Ft4eM6uY8DIrHi7D2y0BhH3BKvr6+8fPqAt56UEyRNgL2XGcb07WBYV7brSh86/vk//ieWdeF0PtH3PaVot3O9zvRjz2W+sGeH7RvWIWW2pPl9H0IDo+kQM7l1GtYwDKNQ4kncpVoyS5MJHvZ7+i5wOV+0lD8+3kc+10Xa7pQzvQ+E0AxJGIzvKE0JZo0MYtKJF3KOjP0IXpeY6z0fP36U4iEoAtaUwrpFrpe5tfyudQzt0qdqFlwS8zwLdugcOFWhSsZycuvniLOBrvPS4dMCg1pFV3JuBYv4SwmpvLa0qRKvtjnFxZHSgS0tPA5Gv6NrmRnah6mG4zaCM9x/dj3I6h+ctzLfVdFjb0JXYzThSdT25xnWuAllYDxb2igl04cBYw3LtgjuaGiub4OzPc4VjbxKafTc5sAPA6lPvJ3PvDzKZ5CaFN0242jFYo3gldfrVYqUviN0GmH9PiBvgMuuux+W1jmsdcS4cr7MjNPAOE13fEPcNlyvwib4QOcD1UK6LBhTmecN7zs65znFxJevX3lIWhxPdtQ+Mislct024hLJORKGgXHw7WOUSCT0gTVtOBPAVppCl23dwHli3nANcFeLUB8GheNg5NnQUt6yHyec08I9JVkcjdEzsD8cNDZqk4qUKx7Iq7p8u98RjAEv71Q14PqO76fvMd7f93Q+9Hp3S+W6LlCrYoMNd1xJSYVf/v2vvJ9OVCpPj0+M4ySFWKcMditPAQaNeUuslJy5Xs94Ywmh10FuDEtUFHLXpgA3T05tbLzcZOnCggg7k3Kkmk4dU6soNIYeGirGyhCcEl/e3tntduz3O4IPTLsdw9DhndfSeVlWUk4c9ocW0qHbN5aEM52Y8cYQc2LbVs3tqxQrneuIObKtsQWYB0lnkXV/XWZcM9OklMkpSXppPWEQilzwt6LAl5KpWeMczU4TKUa6vmO3n+TMNnA6X+g66YJDFxinkf1hx7dvr439LrlmzhptxJLwXS+3aImQM1sufPv2jXVZ+OMf/wjG8P7+zsNhT997Pn36ROj61ppaYtzIS2J8GEUh3fSzXS4XhqEnpkRJEdM6hMfDgfM8c/rHidfXbwTfMYwTD0fhCWI0zMsCpnLYH4DauFnaG2w5kkvFYcmm7SSs4Xx+l37dew57EXW3lHg4PBD6XrLXdkh44ylNeaKxk4Zcy10Zppmo5KP67hX5aoltobbFDdOC5j9+/EihsK4L1XvJOdvOytt6b7nlm1GIvMFIXqsxOT70rOsmtcckxHO1jQ11lyXeDu8GxKsVI2eZgH2+klftErxzhE5GrdRc4nGLzYhWMQm6QRnUREMi01V5DYyUCpIU3xzSukP1HrR7I7VgH9sOeO11Gp/JVHLRHk4/fL3v4HIz0uWUqV2b0VvB+jRyM8QtYWpbstobNdew3x24Xq+UKtOhMfLFl0Zczs08NvmOw+MRb8M9qjKlSE6FaRzbmEod97KtrVqX8u75+YUQAuAIoaHBMXJQB6/vqY2PDruJOlYul8sdWbGlyP5w4M9dz3W+cn0/0XU9IcDlOoPVGWF2huusIlLGUHMftyQiNSWyoZnSNG1wXYcxDnIkpSLptTMcdgcFouXCMPaQqoxsbXzsc8TQtQLLKrkPjfrHYbhLYH2By/XM+/nEOAz4bSO0c+c6X+mCKnjamNpU05I6q5bWQNxUrYfQFKDc7z+2rCJmHCemhgm5XWg5CddjjXZqwWrXNnQd04ePpFJ4f3vHJIk6luuVnDMfv/9eiq0Gfcw5quu1VlHQpbA0Flfn++YgL7qAgeuysG2RZDOh77HV8vn1jfPpnfl6YRr+SD90DP0TIXSiupWb1rYpFSTd1EIq0FQpXrI5bx3J/g5AoyJZpRlI60a2Dmcb36b98gUwObW5ayE7S+eHxo2X3MrUpghp7fc07fUAVemuzc42GabkW4XK6gXoui3dpmnC+8Bxf+CWkW2MZLnWOnoTiHGl7zseH56xPhDnd94vF+K2MU4Tz09PfPv6leM04UKHa0vRUtVt5RixgDeS3Zay8fr6SkqZ6eHA+i7WfGmzYaqj9x0vzy/kUpivV67zmbfXd7rnZ0i3CtISvGNeV26BOKVhQhTQY3S6VkhxY1k3rJN5xlmngyQX9rvpnhS2xowPEEYt740J6oYqGG8InTABtornc8OcqEEXgyk3BZpt5ybONcRDaRWxFDPBOGwVwM9Z+/sl1ZbjBS3pjZHPIKdE3Da8c2wxkdaVflTs7O8Roa2LbQ7QG9b8BjkyFbo+QBbB8gZAC10gxdSMaXJH+6B8gZKEiryZ4RQHapuiTYW3bbknoKX0zVhtELkzN/GFd6apqyqmaPzU7ndijHLPB40VqeAmVbrynlShv6sq3T507RCzdxl6ypE+9Oym3f0/A20XgSVF7Uv2w4gfep78jhtw0jnxympXmcZRFWTLNd+WyBoXxdJ2PTZ4nl9eNOtuuzYoYh/V25hC3eC2Na/RON1T8eKyaQ819Lyf3og54QzMl6t+l5jxXp+zC+ric/FUGxTEZR05a2dRN3ml+rEHlFdiXWNHBceXv/0d1wUe9o+8nd748vUz//Tnf8bapuiLseEspLKTuEDqQo3SPaWqC8cErJcRzjdT6zJf6foBkyXOcPbCy4ePEt5s+j1D1zf1lZbL434iuE7ClhsBAnlO/vmf/4VlvuCcV6dxL3poQUyWvnUK3rkmfrBYH+irxnnbtmjv6zuwUZfAttEFcaHWlMm10lm5kTKGlCKD7+iDb7ktIh9/+fKZv//732QxMJU///nPPD09icvXnsdcC7smPS4l65JIccNYRwgBRyXVqiCboGVXihvrvEjm1lC69yS427KqaCTi2kNl9K2xrpGvX77QTRNd37f5sRbENgTxlJpSRC+1XshtXTV3tSPF1LtKCqcqxNTKh+dncq68v73x9vbOxw8v2GqYponzfKGrkjpmJH/DGFIt7EJQO14qu/0eYyCu4rNvMbHf73DBQZMtFiCuC9fLQvCOYRra61r/p6qvcno/8fXbK/vjkY3IZZ7pQwcOnl6e6YaB5TpjjLAEa3NedtY2+uXC+Xzi8fggtEIuzbyoB6+YQq4ateyb/b5WuUFrpSEHTBvn2abC0cdmWsWX4oYLHuJNBVNYY6QfBmLJwob4HmMqSSN7mQ0BQid3eNXlUNvvbr3yjI0BHxr8sepnvy3Hamn5GaA2uM1H623k4FyTeXq809owp9L2MJaYsn7uqtm0abPgagVzvF5m3t/f+f6HT2glLJ9LIAD2rpYyQbuJnACEj5EEUDuQkm+OgkqNCg5yzlJyIRhDNcoq0MXuWodjNA5xDoeqwbUdGO5/+gJM1cLeNOez/m39T281Z9dospBa4BPNO0ar7g03k2lP7w3W0STeIg7U1tF1QbN925bR2ikoKW4cJ1LesMbJOOVFGKhVEwXr2iIdo6xqBkyBJS387S9/5cOnj7w8P7PFRFw3hk5ZCCF4Pn3/fZMXO95O7xhjJbdMeo+C9cJtVfGhUozY3mCcMj+CkzIrxoQPDe5XBTQ0Fb779JFUsjxR28bb2xtvb9/48OGFZVnYYmTqd4pirtqtYgzxeqHv+/tFqE+z3FMxrbXsDkc6L6lviivX+cK8rDjvOR4PGj0ZwzAIi5GLchiUQKcEva4bsO1n9Z069P3xQUo0bpq2lsPRsrQV6qSiyBujd6MZ5Pb7HWkLrElZNH0vVzcuk3Lk0O24zhc+f/7K09MjwzhKWdkry6OgRXmtlZgS//2//Xc+f/ksyGfMHA97Pr584NOHD4TQ0XVK8cy3n8cGvOijI+smY5pv/ghnle41z3r5lm0jbivfffrE4eGJWw9ujFHmQr3Nv8CapmaIivH77sfvW3xnQzlEhYu7tNF3HRRIKerh7npyWsgp0k/7lnhn1LF4R01wupzlFciFt9OZ09s727ZhbQvxMYJT3R6IEIQxKMBht29z2ERFRsIffvyJuKzEXFiXhYfjAeNaN0Uhbxvz+cp1vjINI+O0o5RMjKlF/ymi8bffPvP6+o1xt5Nqo+ogckhJlWOklsJht2M/TffRhjGaAYcQeHp6vo/Kl2Ul14Q3oe0QNI6xnacvA+uysG4rNWrua8rtIVR6WsG0hZoOGWMNfTeQS2GbL/TDxGW+SIrbB2JSdvFNLmnaIRabos3oZIWm2KEJCDCV0If2PDRnKILLUTQqMvb3YJ9assJX0PLdWHBdIG4bxkSyFeiRFPUc7EZcq7QzunwcN9w4VOP4+vUz5/OF777/JA9IsKRYaPBSTNGYpglBqTlKB+VuMMNGB6apdrLaCouYVmSJLDrvyIk26sl3NdFtHl6MPDUhiHEkvtWtpGh5De3/FqDQthFcaUqwQjH5vvjVrsjcOzLxrrKWz03K6ltAmG3qJe/8nW2mkYaKIk0KanvPjsLKhw7XRCvWOcZp0BjM3y7t20iskHPhcjkTXgNPz88yMPYdeYtcT2e5kZ1hGPf3AgcnVEysFVf1LNzc5SC/AgjJEqy9d0JznAldj3dS2znjWdaZ/fFAXDe+vX7Be8thf+Dbt288PT9Rs4qKlAZqRWNu7yUlDx210audEyBS93dmmkbez1lqzGHU91Erp/d3LvOVqe/EXbLyiLlpwNjQCicj4U6F3uuMC82ndQtv8y2ILMWIrQXvOj0v7azVudimB0bvrwKLEn3L3nGNTJBS5LLOWGMkojDynnXe8f76SvAB23t8y2O3GXp/GzPN/PDdJ7rQcTq/g4Gnlw8MjR/1/PA7Lom2uK/O4m+uy64bJAnNkiTadqNbapufRXLOdH1D++bfK9RiDLZUqT2QK9CUSq6Z4HvGaVAlVwx2bG7EVQdKqW1RWmoDp4lYmLMMQubGvGkP3OVy5R+//YNppyXRNs+s68Lx4YHdNFGqmPNSYql1kqImqYoxGiVYa8C0eD9jefn4UXnDpWp+317lmBMpJoZRrHrRWV3jIVXiujCvG8M4cDgeyCgX3HoZz3799Tc+fnyhFhjHkWVZmbeN54cH8epTUqtsxN9njS3bRmMeAfnUtZk2ly8NKFeRCqILXgeZt5h0W36aVr3oqrOApzF+SsH5cI9sHfueaoRDp1E6TaFFWArfkE1pVXZtG1vRTKky4aSUtOTyGl0ZIxnxTVFhTKW2HG4wQsh3YJzBNUmsDzdMNdSasJ1j8CPgwBly3MhJ6V7lfuBXKJV1jcKn5HYcN6Vc3rLMZUXRsTc/h5aLumb08wq8Zp1eUmNvPgA9LzFGGU2LXvCYI94GWsmqkVJ7aJxzwmPXer8QbgUVzZ2sZz7dR4vGtLrLiAeVbcY72zoZr3vZVELvsW5o+x+5op1rB2xLh3HOMfQDfQelauTljKEbRPxcllW7mtISINuJ2XntB2otzMtKSpmw1xI9Jb1Hn777nuA71m2V+a22p8vI5FcLrPOCBeZ1wXsvIUnV3spWfQSlyb7NffmEFtZVz5j3jQ+WM8v5yjgObW+in+90uVCbQshiiIuCtlRUCq3yj99+I6fEH//0J4Lv2mTAtqLFUGuixMo07hj6SYVNzBIcAJ9++J7eB6b9jr6fKCVpJ1IEO82aVzY8Prg+QBL00LQv1RqddxI46Pu5iWNu562DJt6Q+tDUtl8phbfTmy7vYeC6LqzXC19++8puNzGEjpITwzDy/fc/cD6fGYbxTieuNyWosThr6HzHP/2Hf+EPf4wsm2KVbTvLbtEEpgEKjWtIHcBfzmdsu22Ntfc/3FQRQcdpj3VB8+lc2E+Tvk/rWpqSFiWpzahvN0euldBJG3yDBRqLdOV9wLrfLfXWGLpBeOAtRVzXMzU34E1tQ6vunPV8/PBJlXPrTLou8Pz0IBVjLGzLgu9aKlZtNMRUMUEBJtaaxpXxdJ1nXWb2o7Kthz7Qe0nbnDEyqaWIDYIfjtPIbakoA4zFOcO4G7Q83triLATeX9/4y7/9Kx9eXjAYpq7HPD5xPb+zxK05n01LR1NhE7pATplCobMBwk1xIwOVDoxMTImu69k3ZzdWMuJiwHrbzDbiPVknY9a8XNjt9gyhJ2IFVHSe0A/6fkqhuFbRlHK/EDIZqmCOYNpcv4okaSpbjGxNnmymG/kUHIbSOEe3Rd4tsObGcKIpg2qh5frmdmjrUXJGhq9aKjE2hZN1uGxwXjLSklMTGARMLS2yUeO3SgMjopdPWh/tqUotPByOrYOwGKsRlw8ee5sSVahO/h/Wre3+5WnQZdouAfN7ilq9dQW3HqJyv+RzVRd9U69VU9ps2+Bcw0AY8HiSU7RvNwSN2IzHB6nTvLWtKrdNKGLvqjQouIb2vy3gS2oZFc4RBqBW+tBrAeokRy3N7b5uiV///g9ePr5gjWdNq/I+rMe7jmma2JaNeV4IXUPglEJwgXm7UIrGWw+PD8RNE4Jb12m8uud5XXk47jHIHGecxSIYYAW6zrQutPDl7Rvf+Q/sj0cAQj/wxz/8gd9+U2Ru1wc9t8VQsnZL/dDRdT12JzVRqQUausQCxla2JWGrEvVyG58bo73gfhz4+YefgN+9MtbA2A9SRQnZdB8RudYml+aKd5i76VBsvExctBsdB2Sqi1FdnBHiJd4UEtaKFN0MqDULC9I5T9gf+fZ64tvbO/vDEWcd4wRd6NjtBCH17axSOqShu4VedaIue+/Z3ywPSIwki4Ml9B3r+wlbHG6QxNqnWiiLIkpDJ0nj5Xqh73qhao2VWsILfFcK1NqW3Dn+znFCubjWOeIWFUQfQlve6qB2PsiRWi1dkOxya47I3nuWOFOTIGegXUdJuc1VO1Uz1nPcd3qpjeXp6RHvdCGQMilGPHJ/5rbYK0ahIKPT71cQ+logNyl3LstC8B13ZssW5d/IhbFX0Lozht1u3xQxUiEN49hmyW3BWgrGOUpKDGPPp+++xzdlSK6Vse9ZVi25zucTh8OBse9J9aakqVznK2vcWNeFcZoaZkR4kFSSDhHvoVSic9BywvXhlnsV44NXIl6S0XFbIt7MhP2B4D12mthyovOSZbYJANVxR4pYRFottyW0gXlbcUDXD1hjCM6RvFVuRa0t27y20YFK5FpVJHhjtJhuB0tpnZ72FlDR91JbzrbghIkt64KorUvQB+6wVgE4h4cDpbm7Ta04bynOt5jRyhJ1abatCMY4xtBjMSRlOt2NfL+rnHKrBitbLmxxU+FjoVYVTbpMbh6J5j9prbppv0MsiWBltEqlsi2R7DNd3xHo7heSawdozJJuK5Crwf+MqmvnJVsNXhJP40TBdb4dgFbyaNfGOsZZvNWiT51labsoI0Xi9cpunPDOMW+RPnS8v1/4v//bfyNukeN/etQ+oDh6Y+kfDvpsWqqf856uCwJJZsEMLYVUzD3/2jXfSa1yf9fadlw4vK8KSGoKttJ2WbVWtrgAlU/PH+j7vhkGW5dhDceHhxYzoIjiaB2+m7U0dp6P331iCL2Cy4ppu0ztTi2GZV6I28Zh1wjX1hE6y2G3V7HSLg5rA6Vs/PVvv/D0+Mjj06OeoYZIcd61PRP3KALb8CGlVOXr1MputyfFyOvbK74pPffec7lchB8fOoKTdDsVfVeHaU/MmZoSNTjmbeWnH39gXdU5XeeZEDqGUXk6l7iyrLFl59z62HrH9eRSMA01UlJmmEZM0vQlhACl8Pn1lbJu/PTzTxrVj+MOYyA4yVa3mPj82z84THt4eqDvZR7yPiAMzu+Lvfuc1DjwVtP3XIlVhhe1l6qMSimKb2n7hWIa0/0GAUsRgyezKSXOBal6rGsjosTpdKJWUWIfHw5UWxnHHQ7J+mrT+9quw3nH17c3MJWn44Naf+dIVYlixUpJEWPkuN+r6m2Hm3AHUMvKr//4B99/+Mh0PDDe2tRyW8kUKYTCjtNJSo7SYlfBMI0TP/7wI7cQn5ozNTh2g0JYbq72DKzbinee8/nCL7/+IqwyBarh8eEBa+BtmSUF9lIYJTLBO9Imt7xp1uwtpwaFU+dh28z64fGBlGMzeqmOCC5QjMUXNDppUmSlx/nWBdyqZVU167yw3+91+DcXsWuvva1WS+8qh+tdcEBpn1kVrdN7qNqBiNhqf0cnCOBELlHMm+YWNkUMG+daZ8kttLFQEtLE24azsB3OinSrf19VYm0k3GmQTryA8pRpzCfjdFEYoZV9EFZBZGSNMqAHU+8AwNKECwaN3kKnVMBafo9SLcbijWFbF7Zt4TgcG0tL3ZkPXuOa2hz5DbzXdTJOuSATXtd1bYek7OjgFQBlvWvZ5/JAGOfE3rH6ZzJgjA6GXAoueGJKAkyGRClVqIqYsBQ+fvrE+/nCui6EoZcirQpaGJqy63pdSEk8K9cIwSFo32eNEbrHd/hB+yRvdPiv68qwn/SscZMZ6L9S0aI1JYWCTdNE9do1lBWWdWV33GHwWO9wsSFIsoq74/GhfdeFqR9akaHCNhWRBmqBYmG/n3j/snK9Xtg/PBKsZ1uXpj6Sd+nhcMQAKSX+r//r/+Cf//lfeHh6vI8HTQvIKqUItEjrxlKWizx0dEOPd4ok3ixNJq9LKJfCl89feP7wwmG3Ux5Pgy+Czttv374Blb7v2E07Ykz0w9CgiRCC1IKu8+zdgffTCVtVbFdqi0G2d/m5MYZt3Xh7f+fD7Uxr05/L+wln4LotvJ/PfHh+xlsnFyZtxlmcIGNzTuRvb3z82NG5oKo/bnJUW8O2RZnZmnpAWALRK4d+0q4hF6rXzFYyMy/sdyqYqHlZZ8SgmWNmNw6E4sFb5mVWWMm0J5fC6fTK9XwhJY03/D//Bx4e5Om4LeMqBbza55Qyx/1erV9zNJciE5wx8mWkhjsuB0kaU0qs80Kxld2gOf31OnOaZ8bDDoO9e0jWRtqkgu0CvpljXNcpvjVGaN6DNca7HpyawIlKudurKpuXmS/fvvD09MyWFr0I007wr6BbHyBui9zPpmOLWioqcU5jkiXnBuMqVGebKUquTYxh6Huozf1qhEevRqHu2Vp670ltzhOGoTm+1G0VUyCr0hv68Y5+d0Ji4p3c9rn+Ph6koRpM/T2kygBbjFRTWba17YmaaNBo3pyK8A/rpoRD7y29h+QVx2rQ6Esz3CadBkKv0VZAu4VSa3PzahRqreJtrdMuIseCzTdchH6fTCHgKVviuiyMBfogI5Lkqq5dDFqeZ9qfT5Emft3o+j1QFdjivdhmbeQ0jTsK0PdKYqu18ZSCSMsYiROcNzjfK6Fs6NvY0AjKZmg7EIUZuTa2M8046rsB75WMqCq3XXzVgpVyaegGSpRnyTlBBeMayTayOxz5T//pKFm1qYTqKM5Qk8F1VsmEDYCZt8Q8rw0x3gkjnprkIxXmolwVqZwS63ri/XTisT5gdntM1my+OO2naswarRX1lLkW/vVf/xVT4acff+R8vTJvCx8/fFCOQmnkZy/sDRambt8EGDdzrgooX7STNOid8b7j0/ffq0OpOodK0WdfMvz6t7+z/+cdLqiT/w//4Z/56cef7t+ZXqva9pga8Rkr6bg18H5Z1bEagzWO67xgrWEcRoopbJs6zJcPLxzGHVuKrTvV6HCLTQ7czJF919N5xT2XUiAVrLeKVTCG3lqwmcfDUeqlJLItFUwDO8okLsRKnFf+9f1feX75wMdnSaBd56XubOuAbVvxqVRcUY5xbRJK1yz4uWimXYMhbYXT6yt9m/EZY9Sem+Y4TpnUDkDvlEmdjcGm1Mw7QjS7G/2goTBS3CilMoQgF6nVIXA5nzidL/z0k1Q88/lKbIC74KXjvi2DHAbTML7e25vw6j4PVPZtJha5GYOzDbQlf8L57Z3BD+ymwNvlhAUe9opI/fmHHxh3I9sW9We3isV5wzKrY3JFD8q2CkBoQsOuVxluctKcfd1WQg1SHeUkTbUzfP36SkURmtMw8eHlA9N+0sK4SkhQqTw/PbKlzHK5tGAkx2oMrmqBXUqiG3dt+Spl041Cq6pZM3NnDAGNmKhGv4NRR2OrDFry8jZCMA6HJ5UNcmUYJ3IWoth0WlLfnomSN6xV9oBAyJBKYlkWjFO17LuWPe4CxlWc9e1B1kHsbKDWIuVOrwGRIHxSq+EcHgtZ+BS1+i10xpr7oUmuZFRZVwPZybudUtG8uAvihRWjpW9B0lkj7MrQDTjTwl+8pwxdy8LIrduQCzu3TivFKK1/U37d4I0pt7/PO3zv5U3wzVxo9L3fksp6ekxPk6qOisstmb5XSljNEZzHd6Fdvlk7gi2TozqdhcqHpwcMlpgrrlNa5K1Dtl5dgQuOrnZCmSTY7yYp55z8C8M0Mp9nhqcej1zythrO1zPbsuCcZ5p2xLhxvc463HxgmCZyyuz27bCukoeqQ2ga/Ao1tsx4I5OaKRVTFR9rvFDkn7/9g1//9gvjbsc//VPPxw8vbFuk5EroLM8vzxq9tJF3igk7tjCmUpijSKimeSJK0W6BalpGSsB1HXlLLNuMteBdT9cPgFEGg90RQs+//PO/kNDFap29d70Gg+18y/PRc1jQJYrViN6agi1VqYw5c9jv6CZPzJXjTjL8krPeRSzOON7nd4ah57g/tPEtUkGpLVTRkkTfzhS6PmCzJVudh7b5rNYYmaZRExwqFAW8rXFtuxZ1stu2KQPDBmLMDF2P9R4fnETpcdvQfSPNh5MEQ5vyahqFsRC8cMGlFFUh3rMsV+LWUAfVMDZPhKkKj7lerryf3nkphYfHJ1UwrX3Sf7XDtOQmfqn0Q4/zglnFTYtZ7zuOuz3Tbs84Dk191ZRPtVCMDHcKskmkKt3/Y1AriYXz+QpDQxPUqvyAbeP17ZVpHKQSCk2yVrNCzFOm66R8yLkQBunSx2lqP/3v0t5glcMckw6163LFGae/nxv219z10c449tNO/CnvqXXjw4v8H1+/fWPstYAz1pBrJDhL7jvB1pxn6PQgTPuJiZ12P+1y1KhhEJelxaNWtKAyrnlcEA45VQUX6TuPmFyotrJtEVyWXI/altftBZznpolXYE2pwNYOXJqsz5SmhhKoTyFCjlwaubN1gPWuejLNHS+TXs7qeGwx1GoJ1oBrHsRa7qqi2l58o/0kOZVGiNWLlYvkgNn+7qA2GEqs0rPjqUTJsVsBYnwQN6qNn8Z+ai/qTVtlmrtcrt4bIcC0hXnfaMcpZ6ahBxe0M+o0PvXGyrBZikazWIqzHA8D424n6W5DdN8AfIqb7OiHvo2kYOh6+l5KtPf3V75+/kzJWe9I1rirAtcYoVambiI2afg8L7y/nvDOcHx4UldaMxXb4JeReV2UP5Aza974x6+fuV7OjLsd33//Ha7rCKUIa2GFktHzZ5SJbStxeyN0fWNQoY7IKcL1JmoAi+vAJsg54n1gmVdC8HTO3uf/tVZC1zXiiHLmqQW7GxWQFpPS2LxGfPUW6VphXRYlw3Ud+/2kgKBqcd6wLjOlVol1MHz//fd67g0NP5MwppBSVuCRZjd33pHeFb1G3nU4E/F+YEuivXrrKWmTGrFqNHm5nPjw4ZMa79ty35omqjjoM3JOgU4ls5t+z0g0Rl20D17qvXVTRHSSIjV0gffLmV9/+TsfP33HbhwpuXDZFkUn58y02/GwF6pEZ73ozM+PT02gYfC2tcFd16MUSENnHLFmuqoXsZbMdZlJObOsM4ehZ42rogmN2sp7Epmz3KEKrYrfH/ZMR2mnv3z9rKSpcdRyfBxIufy+62iyWGc9rtNheng8MqwD/dBz2O3u80AF4vze1mO0BM21ELdE3wWKFfgLo9Agf8Mcg0yDFbxttvdlZRh6QvCkWtiWjd1hBzHp8rLmPnIyRodmKWqTv375yul84uXTd6zrSt8F/V4xU1yrNFMSy6kt325I5v1+rxezSUxzhde3V16/fYXjA6fTmeenJxl0jNrGm4P4dtnmnLUz2KTfd9ZpSWugxkr1bSGfsi6clDlf5zYrHQhNV11LJhhLdZU1KdQm3MZH2ihCVVp05wNLTFKMOS8GTrCNrSPZbWo46y5YVAQ7jHeErKovWkmK71LQKhWXt0ptK9UKeW1psuqKS0Yky9uuJP+eOSHgtg7PG7dHazFD8VUy7M5C1fw/lYJHSYBGZnR5MbDYlukeU9uVmKaMMcLZq0s1lKIDKHQdDksI2ofgDHXLSnXzvbIurDhDxRp81zW1lRbOAu95ht1IF9SBxBipXmyzwfX0w0DnPZ2Ty5wiPlZKiRgzT4+PeO/5+vpFleDopM6BxmWL8hPVgjdB3ofribGfKEXsLIejoD1F6DtqFn7eGMuXr1/59vpN/hxr7s9H1wlJ4Z1ljpXT6Z0udByOWswfDw/gLNYFdRc3P8At5CZ4XfJIfl+aTPbT8zMueKa+ows92xaJKdKbAWcD6nM1V3DGYL3l7e2NdV0Z+57944OUO+Y2i195fXsjWKH8x7GT4sfocnetCKq1cDgcm/CmQt+zbguduynJHLEUqaPQn92FwLKpcBqn4S5FtVUmwpskvOsHShaSY7fTbkpXpM6+0ozJGEuwkpKfTyedpeOod6PeDMvCu2BFyA7VKeCrFHKWO77vB95eX1kWpXY6p7yPB/vAcXeQVLzFPitptDbXfeV0veBjznS28earXuwuBEoW40QRhpXgHL9+/abc10cjjnlrX/qxl3oiJaoxzbxhWt4t+NDRhY7z5cTbt3f2hwP7w0FSsqSFoHFOrtzbBgtJP60xkucOEza4hgqudy0/xuA630gNwohXaxl74Z8727HmjW9fX/n5xx+ZdlrEUwod4ELAW8tuGHFOrtPz+czhuCduG++v6Y5FsE1XTFM3lQLX84kwDFgMp9OJ3fGBEDqNBqhM+12rbFIL9LjlgP8e8+mCU5VrDGvaKFnE02m3Z5xGfvv1V659YBgEY3Te443h7fWtVelWih7rqBprN5e2Lt1aGm6heSBCCLzO7/zlL3+hGwI//fCT5p4AxZFyJhfH0DlSkDIjt7ln4nZQGvzQUbaGmqeybokYo+SH7uY/kEw2bYlYkxLjisZjMSaldd08Bu1gMG15YVzr9jL3rAZ3w4ZYR2esAqFo71QRWqH62+hKgUdO8zpcBhskPLiNiHzbmYEuJ4zDV0uq6jDF4tdnq32czHO2SMVXmmqLWnE+tN1IUDjMtrHFxG4c6YaAdaFp0YV18d4zzwvWD3jaCwAAUdJJREFUwW6/b6ZMeyd82qZiK0UU227QEj03ObBGto68JX755Ve+fP7C9z/8wE8//sQwTZJDusCW4+9elSYQkE9BwoZpd+Rxt9duso0kz6crMUXGUZkbxVacs4zTSD8MVCr74/H+OccYdVl3Ciza7w86+KzUZtb7+7MICJfR5unv5zMfn18ojakWfGCbZ0x27B+OjLudlFhxxTl5uiTVqNjb79IuW9Oo06Vktip5uME0kqxj2k14625JqlBk6Ot8x/HhAdCyP6Wm+quV6hzeGCw9WNvy3CskXfY364D1DrOJGZa6TNcFapKx8nZZTNOoBM+iZ2A8jOry28+9Rv28XRdad+05XU4sceXD8wvWS4G0xYTHYUxhTZuyqduzYrWAI2eBRY8PR759eyXlyDAMTNZqZFcKqeY7wry20a1vK4dlWXj99oo3pWhQW9XygqEbB3zymp2n2DoNfYj7pwfdOk2hZEohhAHvAuu6kKKqT9v8DYZKjBu3mNQ//elPTTrmpZqpv7dr0ZTm2FZlVaqInSxaPndUqtUvoyQ5I8hbTqSqF/7t9E4InunpAzGtYKBzoRngahsV3GbAjs45fN8rp7cdvHoZDWuKnF/P7FprhjX4YDG1haNvkW9v7/jLmf3jgT/+4Y8UaygpkVJm2yJfvn7BVHh+fla1l+Nd5mbbw0iV/NEDnfOYzvMYOsWLxkg/TryfLuynPWvcOM+iuhpjGMaJuG1ctyvZ3tAMGucYKxyJDzo1b5RfY9qFnhPbtTBfL+ynqTHmY9PgZ833kyPWqAfWWsLNAKlznDAIUTKfr8S43S/3elt2V9VI1WjJfL3ODKFjGHuCt5L+FmEXCrld/NxHl9ZYis3QOjYXLBRLihGMDnuhGyQxdc5j2lgs4FhTq5yNJRNx1RCzxkVdcBRMw3uogvVO9GBnBTe8mT2tdfq7/yegX2oz6ePuoNhSa6lRLKKYIilnSirN9TtCA+JZY1QwVIln53lm6DvBJ41klaZUjDeYZHDOctsd9l4V8KVIHlor/PLrZ/7tf/wby7LydjoxjCO7nXw/xknC6YwH47AxkmJk2xKmN/T9yH6yeO9aNamvLMbYjHg3zAqQ9RmZanh4fORhfyDFjHXN5+L1zuYG3ZMEWJ3xffTYeqdajH5/53BGu6iUNs6nM7v9ntAHcixcr1dC6NrPHJmmAecs87bRD7JhltI6EKtLYphGXCdvVy2mjVILoZeEfbcTELPW1HZ5yq0Yhr6NjnWRSZV4E2TQCtkmxjBgOycaccPj9F2Ps4brPPP2/srD8ZnDbic+2RYxVELoqMYw9IMydHImpo2+H6UAc64VkBrB336Wl4dHjbeLdkt5W8EqkOz8+t6UeDI5+9C1C0NKtsvlyvn0zg8//KDOtKr4mPqRmovwO65hbkqmNqHP++nUnPrO3dsig4xIve8ozvPt2ze5Gntx0D9+/52cvCmpgq9NkVKE7u2HEWtv6IYmC6xNFVIEWttyYp1nHh6fdMoYmqGsmbNaglXfy9X5/vYNg+Hx6QMYWOar5oHekEsiXSLn04Wnl0e871m3mePDJ3KO5FrYDzsqlT/8+LPUK2vCBkvXqb1dtwjOtrlrj/GWp+ODfAyXC/3YUzsLUZdTjumOKTYGHg4HrsvCl1+/ME4DKVeuacVUUVIv1wun81nZC63cybbcZXXWAk5jFCmVFGjzdnoXhAt4OB54e3vDO8fbeeVyOjNOE8M4NgT4bfylz3pNG947sfGtI4SuKXtqQ4XA4bDn4eFB7WTQUjWlzLpFkrfEuDJNe4qtxCW25WbGBFXJ67YpYL6o8k9kbJAc1rQdR2luducNg1VqYIoJ45scz1hCO+S3bSH41gbR1E7tkKZV36adYNaAD0EejuwoKVJcEYjMy4fBje+ERonBA1USaH13DTuQS0sibMqwasA5+uAVH4mhOoszks6mZszLxdE57Ueql4fAe4fr29jMWZY1sT/uZHpqL3+1RpGgXdB7F4KYaV1PCO4+1fPBczkpInPaTVIIrglnZfTrnCVXmp5engvQmPVyPpG2yLQfmbqdTFweOXRNkGmVjS1vd95Xqq0lqppLPzwcRGaut9c5N7u0dg2Px0dFh7bx5e0yEQ7YtkM4q1gopV18uihq1bhU+7DA8/MHrLNYW+mnXuOQfuCaLpyvM48PPUM/YcpVl1UtzJcLfQhKY6T5WazEK0M/UMuMD1L21KTkyVJpSYg3NH7zONRCjMqqGEdFK998Jrogm0gmpXskacUoxdNYLm/vxJhwDw+UVPn822f+8dtnQvgLf/6nf+LPf/qTitSm9tMUWIl8xohv5lrxYa0lx0gqlc6PFGMYpx3W0OjZDUVTJAjZTQceDkfmddVlaY34a8FjrOO3L19IKfHd9z+Qa+W//49/xVnHf/inf1K3sC2Srfsm7olycKc18vnzZ477A7cVI3CL+GvdhAu8vHyg1ETKmbhFrIUtbZpXVWmNSZJbmlK5c4javKC0xehtUxhLhFyYt41dSi1cvf1zsUhWSSVvwtzWUhoWo9LNM0PJnE4nXOjojCHFzNevX1iXlcPxgJtumQKW6zZLh26NrO9GbJ21ZnrjGkNHTZSplX/841c+vXzg6emJab9na/mzQyeVg+/ULl8vyrJ2ncxs3lr2w6DwDxdY0yKDWeiIpbBdV7Z5IW7SMbvqtBy3rUqxTrd5wwY75wTFqzB0gcFLfTGMcrp7Hzg8HOl1w1OK8oVDCCKYOkeK4vFIxVKwwYGxbNcLYRyhKpLS/JPj25evPOwFFFyTkNApZlKUCmzbCvO88OH5ue0XMtsapTBrbaqx5s7Euj1NcduouWKCpIzF3qhJ7i6VFdix3HMnbiw7CRIk56RIWyUh/e3EkkQySxxG9bYtwCFtmW5o+wNj74Yz2xzRtRrWEnXYGoftbOM0iTbqrKO7xZ1mjZl0AMG2LaQ1KYCqKtjIWtNw680eZhXo03UdadsIITAM4z0n5ZY5XdD83PvAONCEAYWMCiHfdfzj11/ph4FhGNhNA6ZJZmPctNeyjmXZ+PjxIw+HB5Z54e39jQx8+fob1n/Hbj/dpb3UQHCwVSHCY4zCobQMkLtvBNd2dTdvRGlKHslfv/vheyGmjcV2Rh6VLJRFvQUptcvXe8cWtUzGGhUCbe5urJY8N/GBMSI41yr8tXGezrUxHxU/jHdUST8oQ/u8nint76IttY2V+MHdlsrWqhg1ijag+YRyk7vaYtlylH9j7JTOmAomxSaZ1w4gl9p8X4YQXDOc9vh+gLpwC8ryXlke58uFbV11+DvtbqtrqH5rGYL2m/SCY1rU5dL2EBkgRRk+iyKJg+94PBw1DWjjJD/0jM7dF803RWeulcNR+R6Pxwf+8fkLf//73/n5Z8l4axNLxJJw0dANGqd5r+f5p59/Vkqm3kpzG8tKeuhv0lGZgpblxGg1jd62ja4TwTDnzNYS53yQVLHminEduCKXoDV3XbW1Dtt5Pn2YWq1oidvKti6/zzhvt+GmA+v5+MSyRrXfxvDh4ydKyaRYWLetLXEVgAPw3U8/6INK6m5iTtLAI7Kks7qYUi067IPX+KEdMs56/d3W8PT4pKVqoTmDRQQ16M9PObFmuZ2fX15Y54V41kWYq/Y43//wPT/7n5sZjOa6TS21rDY6KQRnNYuNG+fXV43JQsC0hDrnHG/v70zTSJc7/ezWsqXC2GnRWUshVxl1bjkAtuolzC6TUiTYvS7eHHk4PnA87FrGR6bmyjQM2MmS6w68pQ+ex8OeLnQEn9myZxylgpqvV3ZNnleK3NDG6MDxfY/3+dYqYnPzKjR5NU1dlqyFnOWMNTSEhmmLauVTxJT0PTTcscVRnELujXOKm6xtT+J+z1THGkY/Cl1eEjmZJtXVqDEEqeuWy1W4ihCaPlyIDqk99PRIsdVBToROUbej8VSrpXK1le26yvzpNdv/4ccfG+Qy0nUDfdex5dyos5WSioyf1kpjXyolbmxrZL4KcHc8HkgpMV9mDocHrDPM66y5unWEYAkhcNjtcc6zbgtUw7/95d9xDaUTnKN3gZwjayqk1N4bHyhGKptSKr7zBBeozrSFf9WYitiECMrTtkYpiNWWVtSsYvxYS6mJdVm5LgvH3Z79ca8JnWk4Fx9UIKB9ozOOHEU09V6FTqmZUjR6sdP+/t1Sqqo66wAl5H35+pnr+cqf//xP3DLKKdCNgvzdxoV9K6IASlYhq0GG/AgpboKQZss6z/zjl1/xQT6Kvu/I1VBLJBtH3oS8uF4v9EOm7wLj8UjOUkP+9Mc/cN0WQvB8//PPbDlhnSMbeXi2Kn/OTaShd0aWgq4LUgv2VeOwnJljbNy3jeQLL4+PDP0gU16t2jfEyPPTEzYIjhmzEEfTMIIVhubxcOB/+1//Nx6ORxVcWZ9NsOrKT6d3lnnhxx9+wqCzYFlmGW1vc1bT7PrKDtZHWEoWm8Q7zpcTcWv27da29/2gQJ5q2zJBLZMAWNJud6EjxcQ4dg0caLGmEtPG6f2dlArjbrhrqm9OVO89xYm+aY29p7yJgdPCcTo5xWU6o+nOtUPJVUucVDOsia+vb1hjeXoQAbO2ZLViBLfzXUcuiXnWoUQIBB9YLjNb3Dg8PtC5gHWGdd3IMUJDWN9UGt5avp1O5JgZ2t5AHZZtbaZ2JClpLLdFGfp2fiBtG7/++gvzvPDwcERZw65BywrztjIQWJcG5+oU2J5K0WgpwHWRNyIEtdWS0+nAnXZa/nsL82WDuooQ6j3BVx6fajtkLSlv2kOEjgcfZMQ2hlAi1Xod1tYo46FUUlyJUXGYwQW8geLlBK2l5Uw082Ep7VKxAo8VgxbOhhZXqWfPeL1IaYnUahh618Yj4KrHWVWFNwOSOpMkDWwVvdV433AzDbRmpD6yxt7HrLuDUCvFGBm/EJLDG0s1mrM7d8t80M/mncOGgCvl7ooeu4FtXvD+9z/bOcsyZ4bRyblcajNuZmFtWjJf53S5bDkSt4VUC7YBLTsf+TrPGOc47mTALA2gaZyjYJnXlc6rCAo+8MNPP5JTIqeibgvYUuF8PilS1zqeXp5FAbBGI44lkbzUZiH06gpLucueSzHyiRhFgGZ+p4V2Tt1qcKF1ODO//uNXeUOap2e5KKDrBuozreu/LBfmeWa33zEMk/7MmqEKTVKaJPmmaLDGCLiYNvqu519/+x98/8P37A4jqZl0q9EZY4qyTJxVV72tm/Knx/EuwLgRBayXC31ZFj6/v5K2xMPjc8PGWKb9gXVZySkp5KyM2qk4T3B61pOB3WHHf/nf/wvVGKbQkWkjeVXk2ArLfOG9GWJ9U3Z1wdOFAdeW1rVaet+xbZGaMvvDnqG9i13f0w2DfBcp0w8jvuE2bl0rRZ1GsJZl3eiGnpcPLyqIc2aJG6fzO9M4sdvtsPNCjCu1FrZt43K5KDcFaltY65J2bdx0Y+44U7VraC/nNA6E0N0PLhDSoyBX9b2Ky1rS+DaCCqHTD10z1kGpVpm0zjMECNbdw7nJBd/3mJqZrxfeX8/sDw/sj3usKdRiW7arY7/f3fkoPtvfQzuskemkQo7bHQVhmmS2lkx/fCCXwjRNxJZed4Ou1Vr59u0buyQOSt91OKu4w2XemqMaapPr1SaTtN7z5ctXUiocHvas6yojC9wPjlJvuviW6ZAK17zy7dtXvn39CqDfq+pimIYB6xzPj49akIW+VUTNjNRMgZ2x+Ar9OOKs432Lbd5exd/Z7RsDJ3O5nKmp4q3HBB18UnFpYOQyxC2xRi3/a0OIdJ1gaL2XHNMZy1ojMWW8l/JEl0DBWxnEcPVOl7TGYkrSd28k+6ylknNTupWE68a7Zpy2jA7BNdmkvqOCciRMkg+hH7TDyrGALcQigmiZZz62yM5i2wXhAlTlNDsjRd111Q5lfHi6q5ayEdhw7DoqUqAM/SjGTgOkLdsmr4O19L0CdRRta5mXha7rOByP9H2Ps47QqdMRy0ih9DFCDJ5p6FoIUKArlV3fcZmvzOvCdb4CRiPbUrDBY7GkmvRi50w2HcZ4sZ+spTQIJsY0oxaMu5EQO/kHciFtQlXYwZC2jdRMkgIQWv1nq20TAttYXpZUNE7M1mGqFZp93QjdRLWKufXdG9sWGacRg2FovCJrhTmpNZKSPo8QvEaRVgbZXGhyehV+SlhsEw5j2OaFLUd2w8SHDx90cSUFndlaNbRvP3870jBFiYtbKWzXhXE/Ya1lGgfK0GMRMsPtD/z8w48sy4ZrzDo/eElsnSEEyZDDvnXF6O/1IXC+XLDeUluWRwlGy+DaFuKl3jHhy+nUUupsG/hr//Dy9HxXe6aSCUF5EkM/MnadIJbl9yC4p8fHhvuWUtQZ4XbMYH6XtlJZrjOH/QFr4evpnbfXd2JcieuMqZndbrxnbthq7imLDS5sGtGwdXRGpq/ceODOS4lx2B3u6ocaMzFHnHfk6n5fQts2QoobQz9qbq4VJFvSbiDnQtd39OPYoHCFbhCQKl4v5Jx56IUiGIaJ7rsdfRfkoLJGOAM6vPMMfXc3guUqOqp3WgAqGczhbCCMPdNompZbF9K6rs39uL8nM21RWbpbk416Yxi7nuwK67qxxZW//u0XtnXhTz//CWe0J9hyIq/qDqZparhgwzxfscZIjdQ6s1wbH8kIs4ATv+Xb16/QFlX7aSKEjrfLBYrgYC5YctzuHYu+fDCh4b2tZZpG7nStthMKvXLCTZEnxIfAbtqrxe9UOada7rkFpmpkuBF5P594Pj7S7UZVncYwBKjOtIICSJLshaDL9DovxG0ljDtCuOlZtHjznadk7RAwopZWahuLaNyTUiIVYV5CF3g4PkrJlOWNcEb7Aosl2o1+7Aljx/lylrKugima/adNXUg/dPik/ASdm66Z6HSoOJPwgzTw611UKcJnN3TaZ2MYJ0k6sXK0Pgzte23guGGa7svRQ98ROo+RSYGaKzZokbslVYeuSaFj0pK39wNPD88s6ywTp++pNeJ9p92GccqbqLAV7YacMWTr1OHVRIqFL99emfqRofMUJ/RLP4w46ynWsJtUhVbTDv9iCEE59rFRELw3gMNaAedKXFlKwfsOFwKpATXfL+8cd0cZxJaFrh9IJfHy9NKUjUJin95PMr71A5fLCWsdwzAQuoHgJa6oWbsErOoLECpD54tc+ljHWjLkjOk7/vDjzzp0V6GCcEoKlBmxYjFkY3DBSZ2XFeh0G016K2WcsfLOrLnw+PgsSqvTfqszmpT0veb2oijoMjLGYFFI0OA75tNF77b3+CCRj2n/WlMmNLXobr8XO2ub73vbm8gkZXHggvP4zrG2c4kGCzUI3y4kUTuv2+i63YmklMgxSlrdPGyXZWEYOmJMnM/veO9YN7hGRawaZ2T+a6TsbY342ma8N+mXMzJclSouTPABUw3zLFKhD+5u5wbownBfbDmcFsSNcX9LTXNtoWJam22QOxlg2zacbTsRC/tpT6ayLDOh6xj6oY1pKvO2NsQ49MHLuVszrs24b8C3GBOX60UmtdTGIhiy0ZyztoOmWmnSt5LpcVQjF3mxht04Mg3PojpWzb+/fPmGdXK4Wmv4/O0LIXR8/PiRvETe3t+VbDdN9N3A6fzOscWwmlKpRlVQsJ7X0zfNqXtJSEvODMPAMIx8+PACTar6sD+yxU3gQIyMXDXpMXAWU7Twz6WQEHTOtu9y93DQSM45qFF5E81E9vT4KExCaelvqem8a2WLmVJVgUy7PYeHB0opLFX0VzuOcrVTOJ0vLMuVl+cXaRCxTOPIEPqWACfkuLHKb3bOCufiPZnK5Xzm/f2MA/b7EdPovn2nPUPXOERxEx+oFtPiRtWq97bH9U6YERyVlVq9goBw0Bv6rqNzPaYTYfZ0PXPoB1xTW9ku8DQNv7trrcVTKY3B3/sO6z2ZJMZ+Azl643Be3KO6VSiF/X4vAOFNPouheC3EY1ReuXFNrJATeGWBzNeFXT/hgufx+YmSj7y9vXPLe7nJnUvNpOKgGNZ1VSRqteS8iUbsHMZ4Rt9TDbzPM701QrQvC74fiXFmvhZcJyTH2A/kkomb9pDS2TeYodPuItWFeW7Z2Fm/t7WZWDPzZaH3HeM48vnrN14eH0UbtbaZ2Byv377y9n7ieNjR9wP7nVSHoevIsWC8a2ayTHUoq6MlGK7rjDWW/eGgMXABXyFWS46Zab+n63vp/nPGFdP2PvouC7DNV3lQQmikB+Gzq9U50GRTFITYGYaOuK3kXDlMPcY1/0PJ9N5TmuDmNs5NKbNzHbvdRDxluiCsu3LWM+MgmnPdJEG2zjH1A4yGdQ2S+nb9vcBMTW13Y+KFoDGeKkAD1lLjpjHdNCnbonlGghVyXXy11rnmih0CNQpqejjsyemF17dXFTSDCvY4r3ROXfn1ciXlhFfQTmGJm1zOSHXi2xzOWWnsb7F8N0hebQeyDpWVYRg1emnzPW8dqSYxWYwInMZavNGicl1naDpq29yjAQvB40tiq7e2tunOc5aLund8/frKfr9ntCNY00KPDFi5P6PJLTdh0OFqfKOSttmjazfytjGvC3Geefn0seELKuu2qV0L6kaME+U250jwPZ+++0QphS9fvnI8HonbxrpsfPntM1/e39ntdhyPltf397uRqOaFZVsYx4lcKq/v7/zx58c7zuTlwwcxr5yDkjldrlpYhyASZhsbWFvvn3/wjpQKW2yGmFIxQQ98Kpo7p9JS4rJ+f1NVfeSq2NGhyjBEMHTOtZnrQoqRSuXl+Zlu6JnnmeU6048d+/FAqqJ0/t//9f/DnCKfPn0nA5+1lCiGkuIPtRgsuagbtMJxb2skNYTI4+OBXb+n1HSPfN3t9wL9NSns+7zgvWM8HBTxmGx7CYTVrsYw7Sccu2YadO0y9XShI+cqtEoqDKGTQStFghEiX+quhK2G3TRS226s60Lz8ThctTSl8T3EXpJchx1s05/rH9hioiR1yN5KCZZKhlg5DEdyn3jPiZzr3bey5I2h+RWcdxyPBznnbzHCtVJiy/YIDl8CSkqSC7+0+bPzjt3DgbhGluuV3A/0fWjy1Mr1fOZSCx9eXuQVyTd6cWBZF97e3piGkUxlmnb4vsdFjYRMGx3WllfQ9yMfP36Q/t7AeFvqDxMxJtoqjt00NdVfAGtacZluEhslOTrJl/19oWsgqaspSCBSym0sWcg5UpIh9IX97jbH1zjZGEO2lX70d0VlSonH44Eu9CJXG+FKTu8nrtcLwzgwTTudD83fk9KKc0dKqfz1b38lb4k//8t/0Ggqxvs/G7dI7XochsM0EawTCLUU3t5ngnX0w0Dx7j7uvs4z47RjGgb6XoFQoAtOewWN/Ev9Pe+9Iil+aLs80VsLrqhbDy5oV1Uqwfl7mJG1Bm8V3xBzZOwGfvj+B7quw1nDfn+QudXr+ylUpv2eHBO+GjidTvz7X/5CqfC4P/DHP/8JYx3kKN6OU9tPrZrTItmgtU01tCqHONwXe3Jim2xkkiqFUsQxMW1+Z6xv/grN6FyR9Isinn83jiIhLhtm7Mml0vW9FkVYti1BnhnHUSYnoOn2iOuqlLMqJYa1jnme5SDdTwQbWhxlQwBYS4wb52Vm6jopl9ImtYYt1G1lciNPLx80t02q5D+8fABnSZuS6XwXCMG2lk9jhJozaVupVaMsjMUGz8dPH/XFO12czuhCwiiTexoGxYbmQinbnXOUMPiuZUM7Bzljvbgwt6V/SpGaM/9z/nbXB6a2MCwpcjlfFXTvrSqrIsVJzkXQxSqyZQ0C7fVDz/64b9p+RyiWlCKn84lP333H0IvJY61hbcoJ12jBMSVhq4Gu7UjWy0zcVp4eHhv+OpNWJygatOr55t8xuEb3nTot87PT752LF3LaWp4fHshUhq5viqbC6e2ED460tn2Ig2k34pzjcjrT992dABxahnbfZMK2QSNrzRgnJYxt1alvKPyb6sYZgQlNm9tuW+SyXCm1SjDgAtO4Y5mvAPTjQL+tAkc6Szdq5JKbE7rWCnfQZtWuxjlln5wvTPsdtRph0fuAC45tnvFNLOGdZyMy7vYcpuk+vpA3QFJxGf89uY3XSspcz2f+z//f/8Ef//SnFjqkDok2R98PA7VaRXYiodG0G6FOulgfmpvZgAsWazQKHXcT/TBSSVKDUbHWk1IkblHerKJFeWnycGN00IXdgVxFBL7OFwoymW7XFWMND804ELMmAXqeKjllXTjOczwcqFkRy2tZ8cOEwfDt/Y1ffvl7w3r3/PM//5OUXY23JSCf6L5vr9+4BTsZKz5XvqXjWdNkvQ5vrM64WulCx/PLY3uOLY7CNAyauGAIXhGto3dc55ktb4zdCF6O7GKaNLmIjtx1Ha7K1+WcYz9OrRCWYqtW0Q1qgM4HHJl12zjPF43UNbVj3mY634mz5y1tvYy3vnU1HQ/7AzFF/Lat/PLLryzbend7WiPY1eV0IvSBcTroDisRHzpyzYxNt6/Q8sh1Wfjw8kGLSDQTTG3emtLGfDlj7Ew/yIk5DpNufqsDoVZaNGigltxUE4aUMl1uM/imd989HMktZpDNMfmR1DC7vbH3X3LoBoL1rHHj67dvyr4eB4prS9hxYDATIYQWmfrO+PzCYX/QwrdWcpQiY5kXpgb0u6lpQFCteZ4xDvb7Pbv9Aeccy7zSeU8NgbxciVlyYJxhN+rvvF5n1rhx3O11kVoFxNhq2WrG4chWL6l0z6oQrPU4r5GG7To6VE34YtuLkgVZQx6DWDJjGNrPrsxs51zba9jblAhvPN5vlKTZfzf0TXZpmS9yEJsKNlfw0rz/4U9/5vHpiS2LS2UxLN/eGHcDwQfiumo23MZgru2+xrHn6fGoTIOSZPQqlcF2YOFyucqtHBS4czNvWevamEZ67s5IXn2rmNZ5wbVxZ74uGKMM4NobpaK16FphqMS+ciHo8wudqvYqk5ouH2n/TdusFGqja2ZMKYzjjq1EUky4oWU4W8/x+ECMSh8zyBtjMYRuICZhMvb7g7qp4OhcR6ZikiznN8XNVmVIvF7P7HfHdkCpOg8htBAnLaZLloseq649DB0hB8CSsxRxUHl4eGrjC7TcbSj66tT17h+O2M5xOB4pucgj5awk5zhMmy4Yp0uA5j3YlkUwyqbo885zY5zN1wv9MBBc3zaUEqtkk5SRvWVV084Rc8WYpIiCktuIyet9ixvOB7p+YJkFHdWtCiVuXE4nHp+fm1Nfv5cHZVz7Qt0Sy3lhCDIvWmPoTMBN6sTW65Xj8VHxtN4rAM1YrHN8+PARWjEn340h0faLRbuTnCPGQLDiYmFaprbRRSbfjUK9zG0yYPQ8eufJ20oNWu7fdjE3YGfJGnsXW3l7e2e33zWWlG/UAXUaJRftAq0hV0c/jhyxfHt75bg/EPqemhLztmgM7jpi3rBVBIiv317lXB+0U/Y5ZQ6HPU/PT0zTxGG3J5fM+f2k9ts5Xr99aWCqQPAb52Xh+4+f9KBUHbxsGzlrRBGbicg5vRzB98xc2y2mhefNbm5zRUYEtUs0Y5lpH04fAl+/fMaHnsPuQDc0lk5oLXyKpCijWx/UZTw9i2ZpgGL053WhYzyMhEawNSh9bmvqq77vGZq3oOv6Jnt0aicxXC5nrDOMw9RQGpWtFH777TOvr195fz9zfDjy8vKBbdsESBy1r6l1wOfM0pZIzjlS1IJ9y6Z1a46aE7Va1m0llSxUNfJc5Kz871xLq9RMkyHrInZGo55gDZWO7X6YGGzWwj44p8UhYIMnbVuz4hcp0ZzGK7eLzvdyBtcqE+XldFZ1GTSi8qHjn/78R17fTpAlN7bWcbqcmPYToR/oc5E02bWuxlqGEKSiMJZyvUo10ubXad3wvVQ+c5nxgyeviX4aFJ6TMzfcw3KZGaaeru8080+JabdTsEyFfpwIvqcfBy13qZheljVTYX846OegSGqbhOOoVuBFezPIOS1AU1LO92AD23yVwsnpkMnOEePGOq9UZzm0PJDdTuMLg3YpnZHKLaXIljLTpNjbkjMpakc09IGcDGtciGuScmteCb2wJ4f9kVQiOYLtQ4P2yROxzAsGmUit9OgYY5TBYiylsdI6b8hVm5xUbzZFy/H4wH/+z/+FkhPLfNWl13m8dbw8PHGeFy5vrzw9P4GxrPOGdZZh6Im5YFNhGKSMujGdas2cThfe3k48vzwzDEOTtKoViTFyfrtweDzwcHwimEpsopPaip7Q/EKH47GN+CzbuIhf1SnvBiyHw4NMa01oQJViyVo5L2uBbhhk3rTw+PAAJfF+OhO3RCqVeb4yzzMfXl5UXrddx8ePn6hUBWFNbbKypTvn639mJ7nOU71hnVfGybFer6RaedodWOIm5WhjdGWkzuz7TkbbIhl7Ntqy1lp0nlrXEOUw9h22Vi7XK9M4tN2PlSAjtPFpXPWzBM84jlyXmVwrg5HyzafMebmwxo3dOGK6Bh/tPMt1Yb5c2e1G/DhpMbjNK7txkuRrk6Hu49MDwXfM64XX93ec91y3DVMrychJm4rmkkM3sKbIL7/8nRhXfvrpRy1sW2fSj4NmgUEUzHpn4Jj7zRpCJ0mhqfTjSPCOdau8n8/0YWMYejrTKYuhaCbovSR/NRfcNCpLwWiul1rQ+RAC/fOjDuLSwsFBY6leI4LQ5r4Z+O3zF66XC7vdxMdPHxXD2PXCSOfUXJ3w/n7it9/+wflylhPbOeLxAazl7fWtMZMMoesZgycsi0ictdzNhWPoWyiK5tkFWuhO4+kkOK0LRx8k721Viwt6ES+XEzkVqZpyZOg6vO+pJd8hf9YqmN0527wIwhgscaP3QZV2gw2O3UB46SimUnO9K9MOxwec8+x3B0Ln2WLEJrDOizQZpJLyPvD9Dz/do2/7vlNyFvK29P7WDUTN28cRrKdS8KXlKIeASRtL8gyhIzp7l5z6dliA4Zdf/kG4Wn7+WWZFcYRuVCnxv0LftRyQ5uUBghWraZxGCob5qlb8eFQC4PlyYVlmpmlH6GSuVKZER3aikA5Dr2VpypLSOkMthtN15nw68xp6nj88aRFcwQaLwxCRbNcGjVi3dWFdN4IXCqQk7Y7isipbulRy6TTPjpHf/vEb3//4ozK/04YLjopwIuOw146iZmptkUjW3Meuwm/JzVxKR+etKuG2N4wpAoWu79lWyClSKaQVtmVjv98Jr20lv6wZluXK++XMEJQG2QePdZIwlyQwZM6ZvikAt3XTJWEbinxNWO+xnSc1/0yqDYpnIebEFqN8T01tGZov6fnlg4B/QQ7n0i4d17I+hmHgvMx8/fLlXnReLjO1Qe8OxyPDMPDw8oILPZfrmcNhrxVd2ohFuejOSVoeEIyyOCcjmvPEeOF6uai7HJtcddvoYsS2gDOMYV63RqhueeCtWMyNZgEw9CPFzOQ1SmBjoImAKSVJBFSAXNv7Xnk7fwFbRXOthVKThCyp8PnrV3wX+PT8gUTh8fhArvXevf/bX/+d//Gv/4r3nv/8X/53vv/+iK2G/W4vooN3Gtlta2Q5X9ly4Xh0reIxDLup2eULvuv58PSkhVPf4XzQrI+2UDGyuP/tr3/j2+s3/vjzH+n7kZS2ln9t6Vx3Xyi13oubMSavka0pl1wjVS7zjNtPeO/4w89/kJHLar+RUxJR3Ul3/jvSQpA4W83vjJNqpJiw+qIrUGJs8DPbfibJclX9dSynM1+/fuV8OpFS5vnlkcPhKIDdJvPLvGysm7j0Ye3o+sTD8yPWW5ZlI8YN67W7mZeZnd9r9lsrQ/UY5yQ1K1UuVyMToCmFoRvuDuOtbOzGqdEtTQubMW3UV/j8+WtDDu+IcWXoAhbL1KvtT02ffTebVXkjaq0cpn3bLTnO15laiqqZ26igAVK3pJfUtO/fYhh6y2YN07Tj8fFJxpvgmtTXsm1XOfaNpxuGNtXQEKXUgsNL7dHC3nMqZJ/xfcvJrp6XwwE39PgY774JZ0Nb4Fv6PnC5vGtk5gLjBNTmlTGa6QYnUuYlrhp1VP2Ol2UhLiu7hwOP/aN2cE6z/K4LpNirUzNCsjvrpaKqivj13tH3PTkXtuWMwTINA/n4gK2GJa58+e0LH7/7jmnnlWQYtERNZaPzI13fsVwXvHN4r1FL6As5SprqnSdM/m5GrKVyXWbWZW5L3AoN83EzT3WtGmz/jauS554vJ2qGcRjlIbKGJS503UCyqWFQKmmr+KAkt27oSCnxt7/+jRwTf/jjHzkcj/hewTjFVjnujeHrt1f2u1HFFJV5XliXWReUs2xVktNp2hG3RNcFtnWlZHWfH56fJYWxDoc6LesdXR/o+tDiA+S4z61L7kNHLqWN4GRG25YV5z1YkU5/++VX3k/v6gjaGCylxLwuvJ/OHPY7Hh4f5Sg3Uj855xi6wLquWuY61/584dANYL3RvrSooO6GgW7ohccJjpgLXa1KoSuFx6cnnV1VKkZ9dxXvHdd5k4rUGOIWeXt/ZV0XbOu4+9CxpY11WemHXp0GHX3f8d3LB40n9WKpm3fyka3ryn63l02hgUS9sWBhjZm//+1vrMvK7sOe/TSRthWs4bjbS8gUGpm7VOjHHQ+DZsG56CYarWbEqeX6hq6XbrcLGnE0qdgtQnNZZrZ15eef/8jj0xP3LNthFGajJGLjIRmj29D4ZtqzhnGcmqcC3l6/8tvn3/jDzz8x9tKGG9SCkrTAK0VwsBA0knIhyOTRnDOiberBv15njVJ2Qbr5BulKKTNOI2BYrjOmLcd3xyP781UYg3lhPs/sdwcxbXIhbnNbWIpme9zveX554unhiWIt5/MFh+W6zFKJVAX0eCP1UjH1jlYutbCuKz4JmifGvXDmKWW+vb7Rd4GH41GLfmMbVygzz1IhPR2PzPPML7/+Svn0kcPhIKVcbUqW5crj4aFJlVX9x1oIfU9c1pYdXVpHl1oVrkVxbKTKzimbW5+bQGPOelWFznI8HOVhCV7dnDOQEt4ryjFXmrO0Uqvqo2otJUbWJfJ+emXsB/bDSM4o5AagREzfkUtt45hI5we2bWMcJnLcAMl8TW2zXqsoXgxUI/XP+e2dp+cPWC93dBc04x6GgYI+p9FMFN9MoCGyxY1u6NgdD5K+ZsHshm4gFlWAyzJTKHR+IFWFuOx2O5Z54fX1K6HzOOu4zFdCVDe0zgscFLizxYVpGnFBh2Uhk1BWxy2K93K+YK1UhZ8+ftLzbQMhVKxs6YJtBtNGk2LjOq/sbipiKbUx3LJtXJdV+4knSU9rexet17O1LAvjMPD69ZV1WXn59JHQ94JEoguqomhN646E0DGM3c1bxraupJjpOpkIe99xna9Y5zm9vYHZsa2x7Rd6xtDfcw1qKzpDrW38G7guF4ZhahBG6DoZ89YYGfuArQr2WpBMd9u2e2CSdU2NWaEajW5yEghxvp6ZTxc+/vSD9hztIPZN9ZmzTJfXy4nhpxGo2imZlh8+CjG036v6vnmrjNPCPtfC+XTS8nvZCMPAdx8/Uksk1lYk14JHl13X91gb+PrtlZQijy8vHA9H+mGgDwO1ZkpJbNuKy6VdyvJJ1Ap3zj6Gp+dn9sfjfbSvEdvNSQFDP7D78cD/8i//IkNxjJgsukbnfEMbWfw4TtgKuRRiXHBOPBdTcuPr1/sczDYTCZS7DyKXhDWO4Ht+/PEntTpZ8/yuybpq1Z8xjHIe11R4e39jGAYOxweMF8q5mOakXDamfmBe14YJXtRKt9bN1ooPDutkKKm1MHrZ9311YKoQwbYSmvuX1jV8e33l44cXUspc5wtd3/H19Stfv3zDe8//u//f+fTxE/t/2fH12zdp0btey2Anuuh1k/KpH3qeG/2yC4E1Rpb5yrdvr7jOE4r0zPtpR04ahWQKZivEbaXreoINXOYLb6eFZVnY7ffs9ztJI4HOwHK9sut7buE0wyBdtHOWl5cPyupYLhSEBd7v9xrJWctWkvI9rOXf//3feXl+5vn5GVdF5G2rDYyxpKy2dxwnUlUEJKVQYmLOiuXsBy2kc8m8vb+TS+Hp6RGCOgVrpT+/rkpB6/rA6XLm/XLi04dPDIMq9GoM27JgquH1/Rv//f/6b/wv/6//iA/PeG+oVfLQWAvBelWRVg5hY6DrPMPQMV81erBef6/JGYvlFt0YGoix78dmchNcL/QD095LfbWtnN/PhI8dvnrWdcFYtd3OCdhXq+jEDkWbLsvC189f+Pb2jU8fXrSo3hIlJWKKLOvCukS2ZSH1PcE65su1JbNV5suVfpwYx13bJ6DfOTVeUmexRT6A0Fy2CvA6NI9RbqKDpn6y6qJSuu0FI6MPgvAFy6cfvsNVx/U68/75ndP7K9/98KP2IbcDtGrpbqw6qmqM9pSHI8M0aeRX5KXItULOVCdDp/cWqvhjlcowDHTd0M6VDRs8UxnIMbXUSuRUd56hjRKdC6S0ErwlOC+11fVKKQOX01lL8/F3+WrOUVialBr2XfLm1CYWIfS8PD5yma+kLHx8SS3moEdJkt7x8PTMfhhZm4yXlLhuG+fzha4LHA4HXJCqSay40HwrA32pfFmumEakMEbFZwhBz0oprPPCsi5s28pj1+l8dZ44z3TBc2j8syVpL/HDD9/zenrn119/IafYVHd6Z0zVpRLXSN/Au9bKKhBTZAiDiNrOcdwfsFWRqQ4twmOJd+/Fx0+fpLIblV6JNZL/m3BXg3Yu4NV+5eZm9MoAAIqz5DXShY7fEzogplWSRNekiqgCccHhi2tZugmMMqtzybj2v5fmBU5FevjrMtOPIznFFiRisFSeP3y4p0zlrDl1oRC8upnSHLclrpS4MW8z3336TnnCNkFpRiqh2djv9CJerxfe377y6eNHTBU+gVwoqXI8HKRtLkUqEQzPT4/EmHRR5YqjEKuMeLlWtlwoDT5XDVwuZy7zlb//41f++Ic/ErqO60UobtuckvO2MF9njseDTEoptkAgkUN7L1WEaSM5Gzp657m0TGFqVLVhHOMwMPQD27YydSPmg6UPPUuMbccgF/N0GDmdL3z5/JkPL88yMDbZZ4wbfpiw1uGdZtbWOVxGOALnmPqe6ypkg90kravA6+s3jHV8fHm5O/RrNpyvM7EkdsPA6XTh3//6F67zhanv8f6ZYjQq2GKiH3o+ffhInFf2+4f2jMXW8Rg8Gi85W/G2pw5ydntr2Ja5RTcqH8I7ixt6UlOfee8Yuo55i/gQqEaI8XVb8UFFzroom/nwcLzLiMXjCTivYKHqm/8h17b8rJxP76QUeTge2R8Ocqpby/l8Zr5c2NZIrpktRta0sT8eWvyvJ6bIZZmbNLa1/5cF412rQtWlGmfIqRLavP429ig5sq6JaZIhzRiawsvTdW0CsCWu7ycOj9oluXaAFSqddzw+PbMbRzAt1KtW3udZVaQPTLsdxljFjEqTK7UOog5TirrJaptoQsbEmdr2GzK5XU9naqnsp4kYJGEPskLTdVquy5DuMG3c2bXvaNsSh8OhSZQbaiRloqt0xrOukXHsG1vJ6NntLK6isewtJOl8bp4MKZP6cVJkb05s88rzw4PAmynh+sCXL1/x1tF1PV2QKMI2MOjdx2DVIRvv+fThI7VJYMFwOV8kL7ce5z1937OtK9Y6np4f6Vwglk0dSefvAV21ZJYklddht6N++iRBzTAqo6aIFtH1g1zc1rGuC33X32WyLohwXZtU97ptSsULpimqpJLsOu3MaiPJklUEdT5wOp85vb7fuzpvjCHWQs1SAJmWEeGqrOzVcFfQKIaybwC/1JzQWhQ6oPMddtCM2bV5eCkKIKoVHE3KZbRE7X3H6fyu28pmAfXunKjSgnMctSRyzHSDnINx21jmC6VU+nFgN+yYlxVjNzof6IYABYK1rfmQZrkU+O67n1QZdj19LMRS+OH77/ANEeC94/PXr3f3s2166FoypVpiTtojhKBqu7YIzCIC5Dwv+rtuUaQUzJ2fgi6lVh1bqzCl/bQn58ylaHSxbVtb/CXOpxPXWeOy0lysvuvlWG17IQ3gHQd/ACeKpXGe63ImLUpXO5/feXwQNnierwz7HeTM9TLThZ6hoSSqleu1VM1+c5KywxhhBqosEMo4nnbsW9eBNQTjWdPGul4ZhwnnHJ8//4NaEx+eP+B8xy9/+xvd0PPp03eM4yAVlff8/Kc/qoDImS9fv+AMfPf991hjSDlinBQb9YbbKJKuPj49M+6POO8JxhJT4np5Y3h4aLrwijeWy/Uqd/HLC8m1wwawNtD34R5sv62buqssKsB1vrIuK49PjwQfSFGJXofjA0/Pz1Qj2XIxlRQ3tutVjmsHh2nH8XiQL6CB5ow1TOPINI13I1aJVXucdaU4x9gQNdZYQueI2yac+WEnL4wLdN3vpNqbiumWnuicY9xNvL2+kqpyNqq1UtRZw76NBq0XWqMYGlcq6BKs+nlovoObkpA2wky357BNLnrvmVPjiFUJeJfrpe0gxEUy1uG55UtUBRVVSFW59tPgJa2t8mWt64oLHbuuI5XCw8MTVL07OWeMDzw8HO9FqnWay9dcwDuuyyI/THA8Pj5AKcqdb4Y+RyUmRzBOMnxgN40SjqTEMAWeHp/khaA2igEKz6qNEQUYCtYHcqM8GKvkwBwzprOYnDBOkEnnXYsxTaxx066rwRqN1di+1kJJGkEfjg+6eM9XjDW8X955fHgSLt2quCj19nNp32AqxJI0oq+/74xNaTJ5Z4lLwhTofU80kRw3ileOfCqV129v2iebtgeVS9RTs2k3emkVuDgiJWWsVcWVamHsBxlrSst+aKMlqjDVCq/P4LwumxZlqhc7CXyFEMHWWYLVB1iNXKzOB3FwjOTbpSR8aCarJtec56taOt8xhI5sG+xuzSS/MY0fwGSWecZ7h2/jEdeIjTGmm+ZPclrnBeMqhViM5pjWEnPmcDxSc+J6vWKBZAUrrElu1i4ETIa1Fl0aFp6fX/AGtpRaDnjLN6h6QI9OWv+0/n74Wu94DFpgO2MaHrvj+fmJYRYKfFnXhudoOR1GbKJlW1nWlf1hpDd9k/9VteUhMG8rfT+w++HA2Pcs28o8z/S+43g83Em41eglKEV+AO89cd2akcvSN0TKNi/8+o/fOD48MB32ko46jdakEjoQml7+8eWJfdzx4dN3vL2+8tuXLxwfHnh6EjhRipMLNNNZSllomKqgGuccJjUAmrHK8WheCxcGHsed4jStJRjDvCxcrlf2xyPeWtamjvHB83Y60+8mxq5jTSs5ZaZpvDtaSy68vr+Tto24bRzbRZOaKW/a70gpsS7aI5Tb0nQc8VgZkJ4fMNXhLyd2Oy2n70EztXC5XNgdD3jjmsInUopMp1M3NYZam2c3MJw1DtfLv1EbRVbBTTq49b0V0pK1A/HqhI7HBz2fpVCq4Xy5EJersB/6T977bWskY12WhdN1xlYl4DkH2yJNfzcqVe1m+ryp53LrouIy0+2me+ecUma339+pAsY4KvpeKaWlzW3333ccx2YerRwOB+3dkjAStRV7Ifjmq6ptCXsz3gkddDpdCcEJpx7kps+lNISGnqN1WRQq5R2T7+5IG4Oirp5ePghkaSoOi21jpnlbBLV0Ch2TjNdSc+KyXO+XVIor9rBnGkZqLeyNUeeSkjJtFiFUwtBrJ5EzvQ2YLuCzWHQFSzBeU5dtofOev/7lb5xPJ37++Y+YhuLpuh6CJRRD9VVqsG1j7Aau64xzTgDWWhpOyPG+vrPZjYf9ASLgvLqbqHCxfuhgpSE+PP5mZceK4FQqrNuM8m6BKlzG5XJVHGMp7L3MJSa1zAmKBtu3qt0o4tIYBWxocCamUkoy4j1/eGFt4wI5oq9axjW1gHdBf0czC9VS2JaVbuiFtsiZfui1sE5JFcgwMPSdMrOBb6+v7KaJvs8s68rQD5zetAvZ7cTJsU2HnFsOQiiFvh+IcePyvujBtJYvv30Ga9kdtV/IOfHlt8/89ONP2GBJccOUineBDy/PrFvEkdv8NuOxLNsiCNwwsKXI27c3Hh+OeuFLaXJhmZu8V+Sq8zu6XnP8fhh0wTXFzQ3q571j54RYTjlr+WyrllHrxm+ff2OadhjgMs9sccX3PbtBOyIah8tUeTas1tjklsudose5FhBFYWns+sPxyDxfyaWw3+9uX7MotVWLvR+//4FUC50LzNeZjx8+aKeRVZFVYBhH4rrqd+x7fvjxxztkzhiZxlITLIDUUSBDnlhD2qltVvLIT58+qespAuGdz2e+vb1zvZx4+fBEbnsKY6Q+yzXjbEcsiS+/fWk04wrO8d3HTzweNQYTejtRa2EYJ7Z1oxaoKZO8EQZh2sv1G4c2es2a78NdcOFaRyFSqKcFnKvrbc7m1BDrYmipApWLXQmDNRZyM2emLao7NqVh+2/cq16LzvYeGvSd2CalNsYqsxuNJt7eXvn67ZWyiShw2O95fHrGej3H3lrWGMlJLC4XQsOFKGb08z9+w3x64cPLx9at3Ob0kgf7YKi5Lb2tpOhLXMlb4nq98On77xi8uj+ZyTKmimqajQK+bFsKb9vG2yLiwn6/xxvL+/nMl8+fmwIIfqu/8fDwyOV8xjrP49MjtkIkYbAtH7ve5nCA8DbjqPczrZHdMCoPJ3S8n090fa+xL62jiELcd8FDaFt7cyB4T1qjwKXNv0XXg6mEzuGK1FIWCQksQpWLM+fxk56D3WFHWDQa3e/2/Nf/47/iQuAPP/xMKYXLMtPbAayYcjcZe9tdt5FRIypbFevHw0GXSRKLypTK0CkLPRiYxqntlgwlRvwyX3FeEYq5FK7XCzlmfMjsd4e2LFHlEHMkX6ELIrjmmpXClCVbDU5y01wy3nhyTTijeT0VQhgIgfvBSz/ivUxIaUukvNAPbcHYN01yqZRbGh7KC+6be7LvVImuubC0jseFTkuYnPj08QXvO86X9/uS/bdvv3HYHxh3TQpIZdsifW91KDotN6tzWrK23INpv+N8vlCqIW0rl8sV64VmCBhSbp0EImZa5yT3pOBLS8xqc8RUCq+vb3z5/BvGwsemEMopU61iXIMVciFlyXuXGMlR1bc4TKVJSqXGkQIys21bS6wr979T+QnwfnpnnhceHx942B+132lB8DVpHixYIjgjVMm6LazrSui6exhVGDoeHh90kCHq7boIn37Y78itK/RA8EHL3JJ5enpmt9vJMY0kvDRpoxmGNuagaeKFNKFdDNapepy3VbGi1qrKQ16b3LhL3inAJbeCRqZOy3a98nh8Zr8/yKPiVJys28b5dGKcBvbTHh8C1+sF5xzn05lPHz4yNuXH9SK+WTcNeGOJRlLIWqFsCeNpFFJ5RuZtoe8nqlNew35/bB1ybTC6guscaVOOvHdBBF7jWBdhS5wPLPOJEAK7w4FhCMynN/76t194fHzkw4cP5KqDqncaa6VNaWa2dabW1ObvyTy/fNAOUvNfcqxEr1l1zBVvA/4wsh/3dJ0n5yR3dugoDmqsYISf2JqpVB3iZ+bLhf1xRzKSfe/H/d2fYEwW76i51p21OC8fTPGBGJV9YQJ3Sbs1jnHUTurmLTLG4jG8riuX04Xz6Uwp8Pj4wDSMfMVorBY879/e2e0k0Z2vF/a7CWcs06hdTokZawu1IUBEH2mQx/YOxy3K2d95DsdDe/aUCvft9ZX304nDbmK/1/8Po9hTj6TSdd0k/mndEtWQchIGpAuE0BGcRCShydy3FqZmjEb03ktl+PLhI3/95W/89V//je9ePmGccqpzzorVDeZuYahG6qUcN9FyndDypWicfTqfWK5XcIHnlydqhSH0pJKwRgbpsR8UhWydlQ77Jh90CoJZlsg0NridtTweD5izWsyuH+4VgtzRBlMqqWa89WzLRu0raStitbe4zlslWGtly7UF7gx437E7OKEvhol5vapj2e+pFta44Uti3Ilqmk0lxUzMVw6HI77rGJpk0xtDRESFVDRL9KEDq1HQ99/9oBczJ8URGoM3heTbyCkmcK4paCT5M1mRlTkL6ZCyKtxpmqg5E63a7bwl5mXTZ5YztakILstFhFUnKJ2pkNaV19c3rLV8//E7qVNu+59aebte+fb2jWXd+P7TJ7ZmPrPW8vZ+4nkYqDUxb83FbZsZb9uw3iuVbF0xPvD9999LrdDGSjQneU5RKIwQWIt+bkttAUpCNGyXK6HvtNxsc2k5Sytb1uFmjOH99KYlf9fReRkTS62kUklRqinf+UYPvcWeylRXs6qkUjI5yy3bt4vCG6MRR5aUOW8RPxl811GcLuecYqPsujvVNqdCN4hV04Wen//0Rynp2s5FmcoOH/Tdnd5PGOt5fHjQTsc6XXCNabOuUbsnr4XoZVn49uULruvYjcr+GNpCsxqoFDrncc3h/346s9spr6OUTNf31Jr+H54eoaDVvndjT9cFYspgkPzTSVixbQrGuXU7WEEzjbc6LARqJUDDQFQwlWno2nK1JQgaK5R4kaP8w+Mz20GO5jEETtcLb2+vfP/dD4DS1FxLxLNGhq1lvnB6f6fmyvFw5On5CVcMv/z9Fx4fHvj5x5/UGTjh+Uut1BzJVbyyYZzuGSfOKrd+i5FME0g0a7bEMi3mNFdy3ORUXjc+f/mN3bTj6fjIt91XTpeLul5T6bqgzmCLbFsSZ+5R6qllVYqf6zuKreSctMMJnr6XzPrr21f2+wO7caIPHZfrBec8X16/8l//v/8np8uJH3/+mX/68z/x9PQo6bUVbbmUwrotTRnq8c7CLUZ53di2yMvzoKV3G71ll+97xtvgX/h7jar/1//0n0XAAGzOuK6nVgskvHEkUtvTWIK1VKtcjVoFBvzy7St/+8tf7xTvUirTODD08niE6qE3dypE5wK+83L8FqSE6UMHGKahMC8r87LweNwzDCPPfS+Ojbll1LoWqNGyWevvlanDYnpPNY55eZfJygpPABDXWYym9n+XWtkd9gTfae7cLhOHvAiuBcar8nRkExs8TulUzitPN2fJQEst9MVrhOM93ioEx44iqm5xY1kXjbtCr1Ada5jjxi7stKBO4hHpZ1FuBAZ2u0mO7U3MKhttO9Q0Wqu50S2Batt8vVhy0sLIecfTxw8cnx7pQ0eqCmIPVjPW0+srnz9/5f39nUrl4eGB3TiK924qMa1czydC6Di9v1NK5ofvvsNhGHZ7Yoq4CqkanLPEokPx+HAkWN/Q4zLR1RqpRj9/uX3mjRVjS9F+wSlEZ8sirlIKl+VCsJ7DtMMYw/HwoMOtUz60p2VntOhG8bzAYYkoAEqGQIkg5nXB+8D7RcoQ5xzrulJ9wHvLuqycTu8Kf/JefoBtY77MjLtRoDzv74lxKS9454kxEvqO54ZYEOJb+7SSs16ODx/5y1/+wvV85eXDB0n9mwmx6wSidMawGya6Qal287yw5UxYN962jffXV/74pz+3alEHW0aLdhqcbb5qTp5ilKy1LZlv1NktRcZhgKILxgZHV4pEBW10k2oFY3l5eeF4eFDFi5aVrnYaefWuKZI0Pqy2skXJcXfj7e9tihrbmEUx0fUD3gVKFpRxuc7M11mXCoaN0gCMFbLBj0HIjqHHGhUpw9DzfnpnWxb+sa18+PjpLuHEKMlwSQlDVeyw8/fPoZQiM6Sx5Bix93TJqoIOjcVxFuND+1wL6/nKeb7w9PDAbr/nfJ0xztK7XnsAp5wVV+Hi9ffMaVFEgPfE2Ax4bWw5Wkc1hvPpwtvriYf9g9I1U+TL6yu9d/z9r39jiQshBF6enpimURdTO0fI2tONw8i6LeQcyRnW08rT0xNPT4+kmDBOOw0XOoIVAtC3tEeQSKDmrN9z2Xg8Hnh4ONDdRAel4L3FIBLFmjZFDVcryXajXqRtoxC5nC/KBqmayjinfJm+JVneoKAxRl5PbxyPR3zM2/9DznobUVjruVy/sc0L5aDIxNqMVtU6SoWaVN0WMpaiB7gWpt2Id514TCVTcpu1VmmVl2Xhuqjdqgh9fD2dMM7x8eWDWD80GLm1TJOqKNt4MLQXZZp2uqgA33TAtt6kglLNuIYbv5n2bsu/WyKUbQjzbCqXZcYUGKdJhNHY5GEGXIV/fPnKfj/x/PjAfhx52yLrtlFyxh808pChTwfkbe58mPYsy8IyLxwOO5yzTGbANLx0SYXQFtZvr2/88uuvxC3SjR3juGO3m+g6LZzXeSanzPly4enRczju7yqpWEoz0yDkRt9LadX2Qt60w9p7KSnQ6BDTlGwl33dJueFHFDdbmqzUKYejjUyq059bqPSjmFrGGEnwDKr8iioYBbPIiVxLJSNwW0qJr6/f2I87pmkkbQvG7Di9n/i3//FvDGPPhw8f6IaBaRLuwbfqvKZ0Lx7WeWUrG36amJzH9lVS5lrpQqAkVafeeazzkDOXVeH1wTvthqyMmd//8FOL1n2Taikl+rG/e4NqKYSh49OHTyzLlRgjp8uZNa10JbDFCDXTB0WfxpI57vZyEztL9Kqqc/tX3/fULM5RKcImAHSDdmYEXXZxS+x2I5fLmZITQ99hmrpl3SLWW/owKNSqKXHWORI6MZDev53ow8huGPUdk+9hO/JAJYU5WSOVYNdz2O/luB8GQmnO+JL0WVW1v97Zu3+gFFjOi0Z9LqjiLVUehKxOdtsUcNM18N2WhKFQOqV+d3UNeqeLqZze3zHWctzvMDdXvKlYbxmHiaHroFYO0w4+FWjqQe9c25tWtlrpO+UyrJs4WC4EvDVcF42FKm0C4SzTbqTrvqMfeqW+1cLz4QjO8YefDd/98AMlJb77/juGfmwdMngDscLlegFgHIQ+yrnydnrjr3//O999+siSIyxoT9kN7fkq9wx0aqWmrMx6Y7GDLvFaS/NMGXJtJrq2I+l9B1UycmeF/fBNWVmr4fDwAAbe396FAx8nfNDF6VxLNcRyvl5Zlo2no0WAFKM5Xh9ck1VKHeOMbXAvGd1qe/mF0i1crzOh93d+UnCB0LUPC4hpIzgvzTC0iEGlcgUnpG5NyjUYdxPruhHbDZprJXgdosFqP+Gc4XKa2daNw0HkVIpmldkBRbLBrg9yZjaJ3k1DboqAf5flKgu+dVqS+yAHY1PnlKzlufFOKYhVMrj30xt9cErYc8IydFXQvdRMKzkXXPAKM2l7gWqANtu9ERvvrPhWKQiaVzhdLuSU6frA99/9yOOjnNJvr2+8nd45n0+ELsi42DhG98vTQI56yGzwmnPSlrRO33OhtPSx+Ht+BZX1etXsvCnWeicMxzYv+E6VRm1u3nmeOb2f2R93mK7JHs3vQS+F+rtaLEUpebyQ1DVr3JFLQ2s7HWA+BILzPD08MviOX8+/8Pr2lTD3pJj56Y8/8/T0fK/2AIL32sXkzDCNxC2yriv9vqPve2KMdF6z4GXbRObtA9MwqYhw2rtsm0B7FXVdxgjkt2vPbS4t2br8PxPrfC9qbNpW/uN/+k/spolqDd4GulHZApKH13tEbCmFkjJ0OhBSQ564Bld8e/vG6e3E49MTU9+TQCNR5zCdOpzD4ahscbQAD10gRK+Ap9FQtqw435bHbrH4oG7KWmVv1IaIMe2SVG50JsXtjs+fxgEbAlvK2KaxX7eN5XphHA8tZzrdPS/ys1hijvTDxHefPjJ2gZIy27Jyww1P49Rw7m0S1pbVipoxXC/zXd3oxw7fcBnzfOG6OA67PeM48nQ4Uo3h4fGRsevv0vTdoam6bkqsdh7VEjHe433H9Tpzen8npczT4yNj33FdV7Z1pZbCNO3YTztckw7nIhruOAzgHX3fsS6LSABbpIQObwOpZtakhTsoVK3vlIbog+Pp40c+//ILS87aeaIEuMs8M47qIvO6qMBrcxapsozICMZoxFsrWMflcsX7ICRMk8JqUpLIZeF8PhO85/H4IP/POPJ4OHB6vDBfrwzD0KwIJ/YNPVRqZepHOq89pDfAOl/lJnTTzdQM1rE/TMRNS8KSs4J6FEbMLUbUYkVdLMqasCVjjMYywXmKEU/JIPrrul2pMeOCFkTLthBqIHS97PNN9ne5nO+UTutFq9xi4uvnz8zrwjD0gr15LZ1q1eETa2qkx/aQIB35tY2gQgicXt95/viiQ9u4BthTClkxlXm54rpw1x1bKwnt9x+/owvSRNfmu8DZ5ma8xRjqS0rNT+GbczTXyjRqLJKpDK29W9eV3gdsraRVecDDODAOA7vdjhA818usxeAy04XAYf+gQ3CN5FrZt3HFvG3tsmsZt40Ce7qcGceRbvTUrSihz3spgrZFlXBb1j4cjqSqea6z7n4RpZJ5fX1lXVcu5zPbugmQ5/XZBxewxmjhnRNV6e9aIpMhCSy4tfD3rpnZpn7g48ePDKFv0krLLdZ02u3VVpfE++sr3nl80NHrjGFOic+fP3M4HDk+HKhFhcu8rm13lKlRIzAhKjK//fbK8fjA49Nj87VchfvwQTPjqjwT1zqokjPtPic1yahGNEkNba7YLnAc+rspSclncvQ6o6yBWJQYdkNbOPj/F3Umy20cQRB9vc2CjQwRFCVLVDjCYf//H0nhsLUQlgQQmL0XH6oI3XECMD3VWZkvsVWlQELoxoHvhwOXvschdvQlJ4VhCqakjz1d3+uLWBPl6jBar9fy8kxFE9xWnTiWIlc5bNtSOcF8l1KuATCTrU7dXnD7XtAvzjhWztENA925A2NpmiDYGANxTkSVyCp1WeE9+/1r7vb3wpHKWXqfjWW32XI4nShGbncOkUSKuox8ETt4VQdOxyPzPHBfPWCDBDq9MUyLNLtVVeBuf69yklebayaWxPl8oVmJzm5eHFGI+6cYwzLLf7cAfX9mijNN1bDGaIPgzDD05LqmdhUxR7zq/CkXmIQr5auKlXNEHZ4W6xiHQaZy66QTJ3hOfcd2A61vaauG12/eXgc0i6GbJ75++ZcP7z9gvVeirJxfyXB9jo0VdDoGjA4/Xp1qgJ7TIm0WJSYcDodrFsuYTKgrggvcbLfsdlvIhX4aOB5PVNoc6YwTpaaqRKWRa1TB+/pqYy2pUIjUoaautZQ+Z/nBTydwjpvNhqI6HgaCrYTgOC8UZ/BOpn+bpT60qAQjU+0vj/XL4rsO9dW7HWPk6emJ7z9+8vj+kd0usJTM4XDg0nWkFHk6HPj98VG1YxD7moSxxmGimMK6aeWWQeZweNJrrGMaRjKFNw+viSRcFgSB845x6CXIst0SiywTkyuA+LilTnGBaSGbQk1FQnYx8yxIBJeFcDnNEze7W4buwrevX7m/v2NJ4viZ1IkzTxOVSkXd0HOz3VFXNbHId7ksgre42+/lD1gKm+2aEjPH5yPWOLarliVnlnHipbmqqPd/Gkf+/vSJN7+95aF+4HnsCVVg5VvGsedy6eS25MRdIwUqogtTMikrZwr4+PGTINOt5eHhQXVaWZRNWeStjLjIYkYtvI7j8SSsreAJtQwWc1wwMWEbS+XrKxEzLgt121DyS02s5FWmeZbRMslLvThHyoXa+6sLxVorS8plZhoHhR4uhEqaD5u6wW7F/TFNsrNoWoUpWo1G6U1WwrMShnTaWHh+vtDq4TNOC01dMWvX8sBLaBOsS9dJNqrsGHTh+/KZRetqhQTQ8/PHT54vF5FpVi3BOaZxph97dusN5+7C538+k4H3796y3WxYNORIzkyzJv+xLHEmp6KqrHAMUpJnzwYr+xxkR56s+O29cxSn3CcjFk+ZFguhCswp6Q3iFXUr8LtcEqt1K13Q2u8RnLCqnO59lqVIz7Ii0//78o1zd+H+/o4P797JIaQL+ZQyKWfWq5amrhjGEWERCuPIqOsuqpQS40LXSS5is9mKaSElYpqp/eZXAFAHuKzPhfdyQIcQ9LaogTMDlbNYZOeVU6K4TDAOCgq/hHmZOF8uQjr2nlotsUnl2qAav7WG6J2wm7LA/GLOrOpGh0zZBZUUub19Rd02gtzfbMTAkcUIFEsUY4N2nRhjZAHvHFinEqj8XsZb4kXOpnXb8tcff15lpO7SscoN6F5WAi4QrOduvxfJKqUrZXdOCecD/wO6KoanIvmT/gAAAABJRU5ErkJggg==", + "text/plain": [ + "" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from PIL import Image\n", + "Image.open('demo/banana.png')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sRfAui8EkTDX" + }, + "source": [ + "### Prepare the config file and checkpoint file\n", + "\n", + "We configure a model with a config file and save weights with a checkpoint file.\n", + "\n", + "On GitHub, you can find all these pre-trained models in the config folder of MMClassification. For example, you can find the config files and checkpoints of Mobilenet V2 in [this link](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2).\n", + "\n", + "We have integrated many config files for various models in the MMClassification repository. As for the checkpoint, we can download it in advance, or just pass an URL to API, and MMClassification will download it before load weights." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "VvRoZpBGkgpC", + "outputId": "68282782-015e-4f5c-cef2-79be3bf6a9b7" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py\n" + ] + } + ], + "source": [ + "# Confirm the config file exists\n", + "!ls configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py\n", + "\n", + "# Specify the path of the config file and checkpoint file.\n", + "config_file = 'configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py'\n", + "checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth'" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eiYdsHoIkpD1" + }, + "source": [ + "### Inference the model\n", + "\n", + "MMClassification provides high-level Python API to inference models.\n", + "\n", + "At first, we build the MobilenetV2 model and load the checkpoint." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 323, + "referenced_widgets": [ + "badf240bbb7d442fbd214e837edbffe2", + "520112917e0f4844995d418c5041d23a", + "9f3f6b72b4d14e2a96b9185331c8081b", + "a275bef3584b49ab9b680b528420d461", + "c4b2c6914a05497b8d2b691bd6dda6da", + "863d2a8cc4074f2e890ba6aea7c54384", + "be55ab36267d4dcab1d83dfaa8540270", + "31475aa888da4c8d844ba99a0b3397f5", + "e310c50e610248dd897fbbf5dd09dd7a", + "8a8ab7c27e404459951cffe7a32b8faa", + "e1a3dce90c1a4804a9ef0c687a9c0703" + ] + }, + "id": "KwJWlR2QkpiV", + "outputId": "982b365e-d3be-4e3d-dee7-c507a8020292" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.7/dist-packages/mmcv/cnn/bricks/transformer.py:28: UserWarning: Fail to import ``MultiScaleDeformableAttention`` from ``mmcv.ops.multi_scale_deform_attn``, You should install ``mmcv-full`` if you need this module. \n", + " warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from '\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/local/lib/python3.7/dist-packages/yaml/constructor.py:126: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\n", + " if not isinstance(key, collections.Hashable):\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Use load_from_http loader\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Downloading: \"https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth\" to /root/.cache/torch/hub/checkpoints/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "badf240bbb7d442fbd214e837edbffe2", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0.00/13.5M [00:00" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "%matplotlib inline\n", + "# Visualize the inference result\n", + "show_result_pyplot(model, img, result)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oDMr3Bx_lESy" + }, + "source": [ + "## Fine-tune a model with Python API\n", + "\n", + "Fine-tuning is to re-train a model which has been trained on another dataset (like ImageNet) to fit our target dataset. Compared with training from scratch, fine-tuning is much faster can avoid over-fitting problems during training on a small dataset.\n", + "\n", + "The basic steps of fine-tuning are as below:\n", + "\n", + "1. Prepare the target dataset and meet MMClassification's requirements.\n", + "2. Modify the training config.\n", + "3. Start training and validation.\n", + "\n", + "More details are in [the docs](https://mmclassification.readthedocs.io/en/latest/tutorials/finetune.html)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TJtKKwAvlHX_" + }, + "source": [ + "### Prepare the target dataset\n", + "\n", + "Here we download the cats & dogs dataset directly. You can find more introduction about the dataset in the [tools tutorial](https://colab.research.google.com/github/open-mmlab/mmclassification/blob/master/docs/tutorials/MMClassification_tools.ipynb)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "3vBfU8GGlFPS", + "outputId": "b12dadb4-ccbc-45b4-bb08-3d24977ed93c" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--2021-10-21 03:57:58-- https://www.dropbox.com/s/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip?dl=0\n", + "Resolving www.dropbox.com (www.dropbox.com)... 162.125.80.18, 2620:100:6018:18::a27d:312\n", + "Connecting to www.dropbox.com (www.dropbox.com)|162.125.80.18|:443... connected.\n", + "HTTP request sent, awaiting response... 301 Moved Permanently\n", + "Location: /s/raw/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip [following]\n", + "--2021-10-21 03:57:58-- https://www.dropbox.com/s/raw/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip\n", + "Reusing existing connection to www.dropbox.com:443.\n", + "HTTP request sent, awaiting response... 302 Found\n", + "Location: https://ucfd8157272a6270e100392293da.dl.dropboxusercontent.com/cd/0/inline/BYbFG6Zo1S3l2kJtqLrJIne9lTLgQn-uoJxmUjhLSkp36V7AoiwlyR2gP0XVoUQt9WzF2ZsmeERagMy7rpsNoIYG4MjsYA90i_JsarFDs9PHhXHw9qwHpHqBvgd4YU_mwDQHuouJ_oCU1kft04QgCVRg/file# [following]\n", + "--2021-10-21 03:57:59-- https://ucfd8157272a6270e100392293da.dl.dropboxusercontent.com/cd/0/inline/BYbFG6Zo1S3l2kJtqLrJIne9lTLgQn-uoJxmUjhLSkp36V7AoiwlyR2gP0XVoUQt9WzF2ZsmeERagMy7rpsNoIYG4MjsYA90i_JsarFDs9PHhXHw9qwHpHqBvgd4YU_mwDQHuouJ_oCU1kft04QgCVRg/file\n", + "Resolving ucfd8157272a6270e100392293da.dl.dropboxusercontent.com (ucfd8157272a6270e100392293da.dl.dropboxusercontent.com)... 162.125.3.15, 2620:100:6018:15::a27d:30f\n", + "Connecting to ucfd8157272a6270e100392293da.dl.dropboxusercontent.com (ucfd8157272a6270e100392293da.dl.dropboxusercontent.com)|162.125.3.15|:443... connected.\n", + "HTTP request sent, awaiting response... 302 Found\n", + "Location: /cd/0/inline2/BYYSXb-0kWS7Lpk-cdrgBGzcOBfsvy7KjhqWEgjI5L9xfcaXohKlVeFMNFVyqvCwZLym2kWCD0nwURRpQ2mnHICrNsrvTvavbn24hk1Bd3_lXX08LBBe3C6YvD2U_iP8UMXROqm-B3JtnBjeMpk1R4YZ0O6aVLgKu0eET9RXsRaNCczD2lTK_i72zmbYhGmBvlRWmf_yQnnS5WKpGhSAobznIqKzw78yPzo5FsgGiEj5VXb91AElrKVAW8HFC9EhdUs7RrL3q9f0mQ9TbQpauoAp32TL3YQcuAp891Rv-EmDVxzfMwKVTGU8hxR2SiIWkse4u2QGhliqhdha7qBu7sIPcIoeI5-DdSoc6XG77vTYTRhrs_cf7rQuTPH2gTIUwTY/file [following]\n", + "--2021-10-21 03:57:59-- https://ucfd8157272a6270e100392293da.dl.dropboxusercontent.com/cd/0/inline2/BYYSXb-0kWS7Lpk-cdrgBGzcOBfsvy7KjhqWEgjI5L9xfcaXohKlVeFMNFVyqvCwZLym2kWCD0nwURRpQ2mnHICrNsrvTvavbn24hk1Bd3_lXX08LBBe3C6YvD2U_iP8UMXROqm-B3JtnBjeMpk1R4YZ0O6aVLgKu0eET9RXsRaNCczD2lTK_i72zmbYhGmBvlRWmf_yQnnS5WKpGhSAobznIqKzw78yPzo5FsgGiEj5VXb91AElrKVAW8HFC9EhdUs7RrL3q9f0mQ9TbQpauoAp32TL3YQcuAp891Rv-EmDVxzfMwKVTGU8hxR2SiIWkse4u2QGhliqhdha7qBu7sIPcIoeI5-DdSoc6XG77vTYTRhrs_cf7rQuTPH2gTIUwTY/file\n", + "Reusing existing connection to ucfd8157272a6270e100392293da.dl.dropboxusercontent.com:443.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 228802825 (218M) [application/zip]\n", + "Saving to: ‘cats_dogs_dataset.zip’\n", + "\n", + "cats_dogs_dataset.z 100%[===================>] 218.20M 86.3MB/s in 2.5s \n", + "\n", + "2021-10-21 03:58:02 (86.3 MB/s) - ‘cats_dogs_dataset.zip’ saved [228802825/228802825]\n", + "\n" + ] + } + ], + "source": [ + "# Download the cats & dogs dataset\n", + "!wget https://www.dropbox.com/s/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip?dl=0 -O cats_dogs_dataset.zip\n", + "!mkdir -p data\n", + "!unzip -qo cats_dogs_dataset.zip -d ./data/" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "15iKNG0SlV9y" + }, + "source": [ + "### Read the config file and modify the config\n", + "\n", + "In the [tools tutorial](https://colab.research.google.com/github/open-mmlab/mmclassification/blob/master/docs/tutorials/MMClassification_tools.ipynb), we have introduced all parts of the config file, and here we can modify the loaded config by Python code." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WCfnDavFlWrK" + }, + "outputs": [], + "source": [ + "# Load the base config file\n", + "from mmcv import Config\n", + "from mmcls.utils import auto_select_device\n", + "\n", + "cfg = Config.fromfile('configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py')\n", + "cfg.device = auto_select_device()\n", + "\n", + "# Modify the number of classes in the head.\n", + "cfg.model.head.num_classes = 2\n", + "cfg.model.head.topk = (1, )\n", + "\n", + "# Load the pre-trained model's checkpoint.\n", + "cfg.model.backbone.init_cfg = dict(type='Pretrained', checkpoint=checkpoint_file, prefix='backbone')\n", + "\n", + "# Specify sample size and number of workers.\n", + "cfg.data.samples_per_gpu = 32\n", + "cfg.data.workers_per_gpu = 2\n", + "\n", + "# Specify the path and meta files of training dataset\n", + "cfg.data.train.data_prefix = 'data/cats_dogs_dataset/training_set/training_set'\n", + "cfg.data.train.classes = 'data/cats_dogs_dataset/classes.txt'\n", + "\n", + "# Specify the path and meta files of validation dataset\n", + "cfg.data.val.data_prefix = 'data/cats_dogs_dataset/val_set/val_set'\n", + "cfg.data.val.ann_file = 'data/cats_dogs_dataset/val.txt'\n", + "cfg.data.val.classes = 'data/cats_dogs_dataset/classes.txt'\n", + "\n", + "# Specify the path and meta files of test dataset\n", + "cfg.data.test.data_prefix = 'data/cats_dogs_dataset/test_set/test_set'\n", + "cfg.data.test.ann_file = 'data/cats_dogs_dataset/test.txt'\n", + "cfg.data.test.classes = 'data/cats_dogs_dataset/classes.txt'\n", + "\n", + "# Specify the normalization parameters in data pipeline\n", + "normalize_cfg = dict(type='Normalize', mean=[124.508, 116.050, 106.438], std=[58.577, 57.310, 57.437], to_rgb=True)\n", + "cfg.data.train.pipeline[3] = normalize_cfg\n", + "cfg.data.val.pipeline[3] = normalize_cfg\n", + "cfg.data.test.pipeline[3] = normalize_cfg\n", + "\n", + "# Modify the evaluation metric\n", + "cfg.evaluation['metric_options']={'topk': (1, )}\n", + "\n", + "# Specify the optimizer\n", + "cfg.optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)\n", + "cfg.optimizer_config = dict(grad_clip=None)\n", + "\n", + "# Specify the learning rate scheduler\n", + "cfg.lr_config = dict(policy='step', step=1, gamma=0.1)\n", + "cfg.runner = dict(type='EpochBasedRunner', max_epochs=2)\n", + "\n", + "# Specify the work directory\n", + "cfg.work_dir = './work_dirs/cats_dogs_dataset'\n", + "\n", + "# Output logs for every 10 iterations\n", + "cfg.log_config.interval = 10\n", + "\n", + "# Set the random seed and enable the deterministic option of cuDNN\n", + "# to keep the results' reproducible.\n", + "from mmcls.apis import set_random_seed\n", + "cfg.seed = 0\n", + "set_random_seed(0, deterministic=True)\n", + "\n", + "cfg.gpu_ids = range(1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HDerVUPFmNR0" + }, + "source": [ + "### Fine-tune the model\n", + "\n", + "Use the API `train_model` to fine-tune our model on the cats & dogs dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "P7unq5cNmN8G", + "outputId": "bf32711b-7bdf-45ee-8db5-e8699d3eff91" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2021-10-21 04:04:12,758 - mmcv - INFO - initialize MobileNetV2 with init_cfg {'type': 'Pretrained', 'checkpoint': 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth', 'prefix': 'backbone'}\n", + "2021-10-21 04:04:12,759 - mmcv - INFO - load backbone in model from: https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth\n", + "2021-10-21 04:04:12,815 - mmcv - INFO - initialize LinearClsHead with init_cfg {'type': 'Normal', 'layer': 'Linear', 'std': 0.01}\n", + "2021-10-21 04:04:12,818 - mmcv - INFO - \n", + "backbone.conv1.conv.weight - torch.Size([32, 3, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,821 - mmcv - INFO - \n", + "backbone.conv1.bn.weight - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,823 - mmcv - INFO - \n", + "backbone.conv1.bn.bias - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,824 - mmcv - INFO - \n", + "backbone.layer1.0.conv.0.conv.weight - torch.Size([32, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,826 - mmcv - INFO - \n", + "backbone.layer1.0.conv.0.bn.weight - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,827 - mmcv - INFO - \n", + "backbone.layer1.0.conv.0.bn.bias - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,829 - mmcv - INFO - \n", + "backbone.layer1.0.conv.1.conv.weight - torch.Size([16, 32, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,830 - mmcv - INFO - \n", + "backbone.layer1.0.conv.1.bn.weight - torch.Size([16]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,832 - mmcv - INFO - \n", + "backbone.layer1.0.conv.1.bn.bias - torch.Size([16]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,833 - mmcv - INFO - \n", + "backbone.layer2.0.conv.0.conv.weight - torch.Size([96, 16, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,835 - mmcv - INFO - \n", + "backbone.layer2.0.conv.0.bn.weight - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,836 - mmcv - INFO - \n", + "backbone.layer2.0.conv.0.bn.bias - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,838 - mmcv - INFO - \n", + "backbone.layer2.0.conv.1.conv.weight - torch.Size([96, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,839 - mmcv - INFO - \n", + "backbone.layer2.0.conv.1.bn.weight - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,841 - mmcv - INFO - \n", + "backbone.layer2.0.conv.1.bn.bias - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,842 - mmcv - INFO - \n", + "backbone.layer2.0.conv.2.conv.weight - torch.Size([24, 96, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,844 - mmcv - INFO - \n", + "backbone.layer2.0.conv.2.bn.weight - torch.Size([24]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,845 - mmcv - INFO - \n", + "backbone.layer2.0.conv.2.bn.bias - torch.Size([24]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,847 - mmcv - INFO - \n", + "backbone.layer2.1.conv.0.conv.weight - torch.Size([144, 24, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,848 - mmcv - INFO - \n", + "backbone.layer2.1.conv.0.bn.weight - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,850 - mmcv - INFO - \n", + "backbone.layer2.1.conv.0.bn.bias - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,851 - mmcv - INFO - \n", + "backbone.layer2.1.conv.1.conv.weight - torch.Size([144, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,853 - mmcv - INFO - \n", + "backbone.layer2.1.conv.1.bn.weight - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,854 - mmcv - INFO - \n", + "backbone.layer2.1.conv.1.bn.bias - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,856 - mmcv - INFO - \n", + "backbone.layer2.1.conv.2.conv.weight - torch.Size([24, 144, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,857 - mmcv - INFO - \n", + "backbone.layer2.1.conv.2.bn.weight - torch.Size([24]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,858 - mmcv - INFO - \n", + "backbone.layer2.1.conv.2.bn.bias - torch.Size([24]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,860 - mmcv - INFO - \n", + "backbone.layer3.0.conv.0.conv.weight - torch.Size([144, 24, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,861 - mmcv - INFO - \n", + "backbone.layer3.0.conv.0.bn.weight - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,863 - mmcv - INFO - \n", + "backbone.layer3.0.conv.0.bn.bias - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,864 - mmcv - INFO - \n", + "backbone.layer3.0.conv.1.conv.weight - torch.Size([144, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,866 - mmcv - INFO - \n", + "backbone.layer3.0.conv.1.bn.weight - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,867 - mmcv - INFO - \n", + "backbone.layer3.0.conv.1.bn.bias - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,869 - mmcv - INFO - \n", + "backbone.layer3.0.conv.2.conv.weight - torch.Size([32, 144, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,870 - mmcv - INFO - \n", + "backbone.layer3.0.conv.2.bn.weight - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,872 - mmcv - INFO - \n", + "backbone.layer3.0.conv.2.bn.bias - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,873 - mmcv - INFO - \n", + "backbone.layer3.1.conv.0.conv.weight - torch.Size([192, 32, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,875 - mmcv - INFO - \n", + "backbone.layer3.1.conv.0.bn.weight - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,876 - mmcv - INFO - \n", + "backbone.layer3.1.conv.0.bn.bias - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,878 - mmcv - INFO - \n", + "backbone.layer3.1.conv.1.conv.weight - torch.Size([192, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,879 - mmcv - INFO - \n", + "backbone.layer3.1.conv.1.bn.weight - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,882 - mmcv - INFO - \n", + "backbone.layer3.1.conv.1.bn.bias - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,883 - mmcv - INFO - \n", + "backbone.layer3.1.conv.2.conv.weight - torch.Size([32, 192, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,885 - mmcv - INFO - \n", + "backbone.layer3.1.conv.2.bn.weight - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,886 - mmcv - INFO - \n", + "backbone.layer3.1.conv.2.bn.bias - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,887 - mmcv - INFO - \n", + "backbone.layer3.2.conv.0.conv.weight - torch.Size([192, 32, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,889 - mmcv - INFO - \n", + "backbone.layer3.2.conv.0.bn.weight - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,890 - mmcv - INFO - \n", + "backbone.layer3.2.conv.0.bn.bias - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,892 - mmcv - INFO - \n", + "backbone.layer3.2.conv.1.conv.weight - torch.Size([192, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,894 - mmcv - INFO - \n", + "backbone.layer3.2.conv.1.bn.weight - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,895 - mmcv - INFO - \n", + "backbone.layer3.2.conv.1.bn.bias - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,896 - mmcv - INFO - \n", + "backbone.layer3.2.conv.2.conv.weight - torch.Size([32, 192, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,898 - mmcv - INFO - \n", + "backbone.layer3.2.conv.2.bn.weight - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,899 - mmcv - INFO - \n", + "backbone.layer3.2.conv.2.bn.bias - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,901 - mmcv - INFO - \n", + "backbone.layer4.0.conv.0.conv.weight - torch.Size([192, 32, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,903 - mmcv - INFO - \n", + "backbone.layer4.0.conv.0.bn.weight - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,907 - mmcv - INFO - \n", + "backbone.layer4.0.conv.0.bn.bias - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,908 - mmcv - INFO - \n", + "backbone.layer4.0.conv.1.conv.weight - torch.Size([192, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,910 - mmcv - INFO - \n", + "backbone.layer4.0.conv.1.bn.weight - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,911 - mmcv - INFO - \n", + "backbone.layer4.0.conv.1.bn.bias - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,913 - mmcv - INFO - \n", + "backbone.layer4.0.conv.2.conv.weight - torch.Size([64, 192, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,914 - mmcv - INFO - \n", + "backbone.layer4.0.conv.2.bn.weight - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,915 - mmcv - INFO - \n", + "backbone.layer4.0.conv.2.bn.bias - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,917 - mmcv - INFO - \n", + "backbone.layer4.1.conv.0.conv.weight - torch.Size([384, 64, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,918 - mmcv - INFO - \n", + "backbone.layer4.1.conv.0.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,920 - mmcv - INFO - \n", + "backbone.layer4.1.conv.0.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,921 - mmcv - INFO - \n", + "backbone.layer4.1.conv.1.conv.weight - torch.Size([384, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,923 - mmcv - INFO - \n", + "backbone.layer4.1.conv.1.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,924 - mmcv - INFO - \n", + "backbone.layer4.1.conv.1.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,925 - mmcv - INFO - \n", + "backbone.layer4.1.conv.2.conv.weight - torch.Size([64, 384, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,927 - mmcv - INFO - \n", + "backbone.layer4.1.conv.2.bn.weight - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,928 - mmcv - INFO - \n", + "backbone.layer4.1.conv.2.bn.bias - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,930 - mmcv - INFO - \n", + "backbone.layer4.2.conv.0.conv.weight - torch.Size([384, 64, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,932 - mmcv - INFO - \n", + "backbone.layer4.2.conv.0.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,933 - mmcv - INFO - \n", + "backbone.layer4.2.conv.0.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,935 - mmcv - INFO - \n", + "backbone.layer4.2.conv.1.conv.weight - torch.Size([384, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,936 - mmcv - INFO - \n", + "backbone.layer4.2.conv.1.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,938 - mmcv - INFO - \n", + "backbone.layer4.2.conv.1.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,939 - mmcv - INFO - \n", + "backbone.layer4.2.conv.2.conv.weight - torch.Size([64, 384, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,941 - mmcv - INFO - \n", + "backbone.layer4.2.conv.2.bn.weight - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,942 - mmcv - INFO - \n", + "backbone.layer4.2.conv.2.bn.bias - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,944 - mmcv - INFO - \n", + "backbone.layer4.3.conv.0.conv.weight - torch.Size([384, 64, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,945 - mmcv - INFO - \n", + "backbone.layer4.3.conv.0.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,946 - mmcv - INFO - \n", + "backbone.layer4.3.conv.0.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,948 - mmcv - INFO - \n", + "backbone.layer4.3.conv.1.conv.weight - torch.Size([384, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,949 - mmcv - INFO - \n", + "backbone.layer4.3.conv.1.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,951 - mmcv - INFO - \n", + "backbone.layer4.3.conv.1.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,952 - mmcv - INFO - \n", + "backbone.layer4.3.conv.2.conv.weight - torch.Size([64, 384, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,954 - mmcv - INFO - \n", + "backbone.layer4.3.conv.2.bn.weight - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,955 - mmcv - INFO - \n", + "backbone.layer4.3.conv.2.bn.bias - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,957 - mmcv - INFO - \n", + "backbone.layer5.0.conv.0.conv.weight - torch.Size([384, 64, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,958 - mmcv - INFO - \n", + "backbone.layer5.0.conv.0.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,959 - mmcv - INFO - \n", + "backbone.layer5.0.conv.0.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,961 - mmcv - INFO - \n", + "backbone.layer5.0.conv.1.conv.weight - torch.Size([384, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,963 - mmcv - INFO - \n", + "backbone.layer5.0.conv.1.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,964 - mmcv - INFO - \n", + "backbone.layer5.0.conv.1.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Use load_from_http loader\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2021-10-21 04:04:12,965 - mmcv - INFO - \n", + "backbone.layer5.0.conv.2.conv.weight - torch.Size([96, 384, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,967 - mmcv - INFO - \n", + "backbone.layer5.0.conv.2.bn.weight - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,969 - mmcv - INFO - \n", + "backbone.layer5.0.conv.2.bn.bias - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,970 - mmcv - INFO - \n", + "backbone.layer5.1.conv.0.conv.weight - torch.Size([576, 96, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,972 - mmcv - INFO - \n", + "backbone.layer5.1.conv.0.bn.weight - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,973 - mmcv - INFO - \n", + "backbone.layer5.1.conv.0.bn.bias - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,975 - mmcv - INFO - \n", + "backbone.layer5.1.conv.1.conv.weight - torch.Size([576, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,976 - mmcv - INFO - \n", + "backbone.layer5.1.conv.1.bn.weight - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,978 - mmcv - INFO - \n", + "backbone.layer5.1.conv.1.bn.bias - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,979 - mmcv - INFO - \n", + "backbone.layer5.1.conv.2.conv.weight - torch.Size([96, 576, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,981 - mmcv - INFO - \n", + "backbone.layer5.1.conv.2.bn.weight - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,982 - mmcv - INFO - \n", + "backbone.layer5.1.conv.2.bn.bias - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,984 - mmcv - INFO - \n", + "backbone.layer5.2.conv.0.conv.weight - torch.Size([576, 96, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,985 - mmcv - INFO - \n", + "backbone.layer5.2.conv.0.bn.weight - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,986 - mmcv - INFO - \n", + "backbone.layer5.2.conv.0.bn.bias - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,988 - mmcv - INFO - \n", + "backbone.layer5.2.conv.1.conv.weight - torch.Size([576, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,989 - mmcv - INFO - \n", + "backbone.layer5.2.conv.1.bn.weight - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,991 - mmcv - INFO - \n", + "backbone.layer5.2.conv.1.bn.bias - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,992 - mmcv - INFO - \n", + "backbone.layer5.2.conv.2.conv.weight - torch.Size([96, 576, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,994 - mmcv - INFO - \n", + "backbone.layer5.2.conv.2.bn.weight - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,995 - mmcv - INFO - \n", + "backbone.layer5.2.conv.2.bn.bias - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,997 - mmcv - INFO - \n", + "backbone.layer6.0.conv.0.conv.weight - torch.Size([576, 96, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,998 - mmcv - INFO - \n", + "backbone.layer6.0.conv.0.bn.weight - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,999 - mmcv - INFO - \n", + "backbone.layer6.0.conv.0.bn.bias - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,001 - mmcv - INFO - \n", + "backbone.layer6.0.conv.1.conv.weight - torch.Size([576, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,002 - mmcv - INFO - \n", + "backbone.layer6.0.conv.1.bn.weight - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,004 - mmcv - INFO - \n", + "backbone.layer6.0.conv.1.bn.bias - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,005 - mmcv - INFO - \n", + "backbone.layer6.0.conv.2.conv.weight - torch.Size([160, 576, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,007 - mmcv - INFO - \n", + "backbone.layer6.0.conv.2.bn.weight - torch.Size([160]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,008 - mmcv - INFO - \n", + "backbone.layer6.0.conv.2.bn.bias - torch.Size([160]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,010 - mmcv - INFO - \n", + "backbone.layer6.1.conv.0.conv.weight - torch.Size([960, 160, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,011 - mmcv - INFO - \n", + "backbone.layer6.1.conv.0.bn.weight - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,013 - mmcv - INFO - \n", + "backbone.layer6.1.conv.0.bn.bias - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,014 - mmcv - INFO - \n", + "backbone.layer6.1.conv.1.conv.weight - torch.Size([960, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,015 - mmcv - INFO - \n", + "backbone.layer6.1.conv.1.bn.weight - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,017 - mmcv - INFO - \n", + "backbone.layer6.1.conv.1.bn.bias - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,018 - mmcv - INFO - \n", + "backbone.layer6.1.conv.2.conv.weight - torch.Size([160, 960, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,021 - mmcv - INFO - \n", + "backbone.layer6.1.conv.2.bn.weight - torch.Size([160]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,022 - mmcv - INFO - \n", + "backbone.layer6.1.conv.2.bn.bias - torch.Size([160]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,024 - mmcv - INFO - \n", + "backbone.layer6.2.conv.0.conv.weight - torch.Size([960, 160, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,025 - mmcv - INFO - \n", + "backbone.layer6.2.conv.0.bn.weight - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,027 - mmcv - INFO - \n", + "backbone.layer6.2.conv.0.bn.bias - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,028 - mmcv - INFO - \n", + "backbone.layer6.2.conv.1.conv.weight - torch.Size([960, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,030 - mmcv - INFO - \n", + "backbone.layer6.2.conv.1.bn.weight - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,031 - mmcv - INFO - \n", + "backbone.layer6.2.conv.1.bn.bias - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,033 - mmcv - INFO - \n", + "backbone.layer6.2.conv.2.conv.weight - torch.Size([160, 960, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,034 - mmcv - INFO - \n", + "backbone.layer6.2.conv.2.bn.weight - torch.Size([160]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,036 - mmcv - INFO - \n", + "backbone.layer6.2.conv.2.bn.bias - torch.Size([160]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,037 - mmcv - INFO - \n", + "backbone.layer7.0.conv.0.conv.weight - torch.Size([960, 160, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,039 - mmcv - INFO - \n", + "backbone.layer7.0.conv.0.bn.weight - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,040 - mmcv - INFO - \n", + "backbone.layer7.0.conv.0.bn.bias - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,041 - mmcv - INFO - \n", + "backbone.layer7.0.conv.1.conv.weight - torch.Size([960, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,043 - mmcv - INFO - \n", + "backbone.layer7.0.conv.1.bn.weight - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,045 - mmcv - INFO - \n", + "backbone.layer7.0.conv.1.bn.bias - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,046 - mmcv - INFO - \n", + "backbone.layer7.0.conv.2.conv.weight - torch.Size([320, 960, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,048 - mmcv - INFO - \n", + "backbone.layer7.0.conv.2.bn.weight - torch.Size([320]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,049 - mmcv - INFO - \n", + "backbone.layer7.0.conv.2.bn.bias - torch.Size([320]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,051 - mmcv - INFO - \n", + "backbone.conv2.conv.weight - torch.Size([1280, 320, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,052 - mmcv - INFO - \n", + "backbone.conv2.bn.weight - torch.Size([1280]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,054 - mmcv - INFO - \n", + "backbone.conv2.bn.bias - torch.Size([1280]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,055 - mmcv - INFO - \n", + "head.fc.weight - torch.Size([2, 1280]): \n", + "NormalInit: mean=0, std=0.01, bias=0 \n", + " \n", + "2021-10-21 04:04:13,057 - mmcv - INFO - \n", + "head.fc.bias - torch.Size([2]): \n", + "NormalInit: mean=0, std=0.01, bias=0 \n", + " \n", + "2021-10-21 04:04:13,408 - mmcls - INFO - Start running, host: root@cc5b42005207, work_dir: /content/mmclassification/work_dirs/cats_dogs_dataset\n", + "2021-10-21 04:04:13,412 - mmcls - INFO - Hooks will be executed in the following order:\n", + "before_run:\n", + "(VERY_HIGH ) StepLrUpdaterHook \n", + "(NORMAL ) CheckpointHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_train_epoch:\n", + "(VERY_HIGH ) StepLrUpdaterHook \n", + "(LOW ) IterTimerHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_train_iter:\n", + "(VERY_HIGH ) StepLrUpdaterHook \n", + "(LOW ) IterTimerHook \n", + "(LOW ) EvalHook \n", + " -------------------- \n", + "after_train_iter:\n", + "(ABOVE_NORMAL) OptimizerHook \n", + "(NORMAL ) CheckpointHook \n", + "(LOW ) IterTimerHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "after_train_epoch:\n", + "(NORMAL ) CheckpointHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_val_epoch:\n", + "(LOW ) IterTimerHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_val_iter:\n", + "(LOW ) IterTimerHook \n", + " -------------------- \n", + "after_val_iter:\n", + "(LOW ) IterTimerHook \n", + " -------------------- \n", + "after_val_epoch:\n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "2021-10-21 04:04:13,417 - mmcls - INFO - workflow: [('train', 1)], max: 2 epochs\n", + "2021-10-21 04:04:18,924 - mmcls - INFO - Epoch [1][10/201]\tlr: 5.000e-03, eta: 0:03:29, time: 0.535, data_time: 0.259, memory: 1709, loss: 0.3917\n", + "2021-10-21 04:04:21,743 - mmcls - INFO - Epoch [1][20/201]\tlr: 5.000e-03, eta: 0:02:35, time: 0.281, data_time: 0.019, memory: 1709, loss: 0.3508\n", + "2021-10-21 04:04:24,552 - mmcls - INFO - Epoch [1][30/201]\tlr: 5.000e-03, eta: 0:02:15, time: 0.280, data_time: 0.020, memory: 1709, loss: 0.3955\n", + "2021-10-21 04:04:27,371 - mmcls - INFO - Epoch [1][40/201]\tlr: 5.000e-03, eta: 0:02:04, time: 0.282, data_time: 0.021, memory: 1709, loss: 0.2485\n", + "2021-10-21 04:04:30,202 - mmcls - INFO - Epoch [1][50/201]\tlr: 5.000e-03, eta: 0:01:56, time: 0.283, data_time: 0.021, memory: 1709, loss: 0.4196\n", + "2021-10-21 04:04:33,021 - mmcls - INFO - Epoch [1][60/201]\tlr: 5.000e-03, eta: 0:01:50, time: 0.282, data_time: 0.023, memory: 1709, loss: 0.4994\n", + "2021-10-21 04:04:35,800 - mmcls - INFO - Epoch [1][70/201]\tlr: 5.000e-03, eta: 0:01:45, time: 0.278, data_time: 0.020, memory: 1709, loss: 0.4372\n", + "2021-10-21 04:04:38,595 - mmcls - INFO - Epoch [1][80/201]\tlr: 5.000e-03, eta: 0:01:40, time: 0.280, data_time: 0.019, memory: 1709, loss: 0.3179\n", + "2021-10-21 04:04:41,351 - mmcls - INFO - Epoch [1][90/201]\tlr: 5.000e-03, eta: 0:01:36, time: 0.276, data_time: 0.018, memory: 1709, loss: 0.3175\n", + "2021-10-21 04:04:44,157 - mmcls - INFO - Epoch [1][100/201]\tlr: 5.000e-03, eta: 0:01:32, time: 0.280, data_time: 0.021, memory: 1709, loss: 0.3412\n", + "2021-10-21 04:04:46,974 - mmcls - INFO - Epoch [1][110/201]\tlr: 5.000e-03, eta: 0:01:28, time: 0.282, data_time: 0.019, memory: 1709, loss: 0.2985\n", + "2021-10-21 04:04:49,767 - mmcls - INFO - Epoch [1][120/201]\tlr: 5.000e-03, eta: 0:01:25, time: 0.280, data_time: 0.021, memory: 1709, loss: 0.2778\n", + "2021-10-21 04:04:52,553 - mmcls - INFO - Epoch [1][130/201]\tlr: 5.000e-03, eta: 0:01:21, time: 0.278, data_time: 0.021, memory: 1709, loss: 0.2229\n", + "2021-10-21 04:04:55,356 - mmcls - INFO - Epoch [1][140/201]\tlr: 5.000e-03, eta: 0:01:18, time: 0.280, data_time: 0.021, memory: 1709, loss: 0.2318\n", + "2021-10-21 04:04:58,177 - mmcls - INFO - Epoch [1][150/201]\tlr: 5.000e-03, eta: 0:01:14, time: 0.282, data_time: 0.022, memory: 1709, loss: 0.2333\n", + "2021-10-21 04:05:01,025 - mmcls - INFO - Epoch [1][160/201]\tlr: 5.000e-03, eta: 0:01:11, time: 0.285, data_time: 0.020, memory: 1709, loss: 0.2783\n", + "2021-10-21 04:05:03,833 - mmcls - INFO - Epoch [1][170/201]\tlr: 5.000e-03, eta: 0:01:08, time: 0.281, data_time: 0.022, memory: 1709, loss: 0.2132\n", + "2021-10-21 04:05:06,648 - mmcls - INFO - Epoch [1][180/201]\tlr: 5.000e-03, eta: 0:01:05, time: 0.281, data_time: 0.019, memory: 1709, loss: 0.2096\n", + "2021-10-21 04:05:09,472 - mmcls - INFO - Epoch [1][190/201]\tlr: 5.000e-03, eta: 0:01:02, time: 0.282, data_time: 0.020, memory: 1709, loss: 0.1729\n", + "2021-10-21 04:05:12,229 - mmcls - INFO - Epoch [1][200/201]\tlr: 5.000e-03, eta: 0:00:59, time: 0.275, data_time: 0.018, memory: 1709, loss: 0.1969\n", + "2021-10-21 04:05:12,275 - mmcls - INFO - Saving checkpoint at 1 epochs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[>>>>>>>>>>>>>>>>>>>>>>>>>>] 1601/1601, 104.1 task/s, elapsed: 15s, ETA: 0s" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2021-10-21 04:05:27,767 - mmcls - INFO - Epoch(val) [1][51]\taccuracy_top-1: 95.6277\n", + "2021-10-21 04:05:32,987 - mmcls - INFO - Epoch [2][10/201]\tlr: 5.000e-04, eta: 0:00:57, time: 0.505, data_time: 0.238, memory: 1709, loss: 0.1764\n", + "2021-10-21 04:05:35,779 - mmcls - INFO - Epoch [2][20/201]\tlr: 5.000e-04, eta: 0:00:54, time: 0.278, data_time: 0.020, memory: 1709, loss: 0.1514\n", + "2021-10-21 04:05:38,537 - mmcls - INFO - Epoch [2][30/201]\tlr: 5.000e-04, eta: 0:00:51, time: 0.276, data_time: 0.020, memory: 1709, loss: 0.1395\n", + "2021-10-21 04:05:41,283 - mmcls - INFO - Epoch [2][40/201]\tlr: 5.000e-04, eta: 0:00:48, time: 0.275, data_time: 0.020, memory: 1709, loss: 0.1508\n", + "2021-10-21 04:05:44,017 - mmcls - INFO - Epoch [2][50/201]\tlr: 5.000e-04, eta: 0:00:44, time: 0.274, data_time: 0.021, memory: 1709, loss: 0.1771\n", + "2021-10-21 04:05:46,800 - mmcls - INFO - Epoch [2][60/201]\tlr: 5.000e-04, eta: 0:00:41, time: 0.278, data_time: 0.020, memory: 1709, loss: 0.1438\n", + "2021-10-21 04:05:49,570 - mmcls - INFO - Epoch [2][70/201]\tlr: 5.000e-04, eta: 0:00:38, time: 0.277, data_time: 0.020, memory: 1709, loss: 0.1321\n", + "2021-10-21 04:05:52,314 - mmcls - INFO - Epoch [2][80/201]\tlr: 5.000e-04, eta: 0:00:35, time: 0.275, data_time: 0.021, memory: 1709, loss: 0.1629\n", + "2021-10-21 04:05:55,052 - mmcls - INFO - Epoch [2][90/201]\tlr: 5.000e-04, eta: 0:00:32, time: 0.273, data_time: 0.021, memory: 1709, loss: 0.1574\n", + "2021-10-21 04:05:57,791 - mmcls - INFO - Epoch [2][100/201]\tlr: 5.000e-04, eta: 0:00:29, time: 0.274, data_time: 0.019, memory: 1709, loss: 0.1220\n", + "2021-10-21 04:06:00,534 - mmcls - INFO - Epoch [2][110/201]\tlr: 5.000e-04, eta: 0:00:26, time: 0.274, data_time: 0.021, memory: 1709, loss: 0.2550\n", + "2021-10-21 04:06:03,295 - mmcls - INFO - Epoch [2][120/201]\tlr: 5.000e-04, eta: 0:00:23, time: 0.276, data_time: 0.019, memory: 1709, loss: 0.1528\n", + "2021-10-21 04:06:06,048 - mmcls - INFO - Epoch [2][130/201]\tlr: 5.000e-04, eta: 0:00:20, time: 0.275, data_time: 0.022, memory: 1709, loss: 0.1223\n", + "2021-10-21 04:06:08,811 - mmcls - INFO - Epoch [2][140/201]\tlr: 5.000e-04, eta: 0:00:17, time: 0.276, data_time: 0.021, memory: 1709, loss: 0.1734\n", + "2021-10-21 04:06:11,576 - mmcls - INFO - Epoch [2][150/201]\tlr: 5.000e-04, eta: 0:00:14, time: 0.277, data_time: 0.020, memory: 1709, loss: 0.1527\n", + "2021-10-21 04:06:14,330 - mmcls - INFO - Epoch [2][160/201]\tlr: 5.000e-04, eta: 0:00:11, time: 0.276, data_time: 0.020, memory: 1709, loss: 0.1910\n", + "2021-10-21 04:06:17,106 - mmcls - INFO - Epoch [2][170/201]\tlr: 5.000e-04, eta: 0:00:09, time: 0.277, data_time: 0.019, memory: 1709, loss: 0.1922\n", + "2021-10-21 04:06:19,855 - mmcls - INFO - Epoch [2][180/201]\tlr: 5.000e-04, eta: 0:00:06, time: 0.274, data_time: 0.023, memory: 1709, loss: 0.1760\n", + "2021-10-21 04:06:22,638 - mmcls - INFO - Epoch [2][190/201]\tlr: 5.000e-04, eta: 0:00:03, time: 0.278, data_time: 0.019, memory: 1709, loss: 0.1739\n", + "2021-10-21 04:06:25,367 - mmcls - INFO - Epoch [2][200/201]\tlr: 5.000e-04, eta: 0:00:00, time: 0.272, data_time: 0.020, memory: 1709, loss: 0.1654\n", + "2021-10-21 04:06:25,410 - mmcls - INFO - Saving checkpoint at 2 epochs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[>>>>>>>>>>>>>>>>>>>>>>>>>>] 1601/1601, 105.5 task/s, elapsed: 15s, ETA: 0s" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2021-10-21 04:06:40,694 - mmcls - INFO - Epoch(val) [2][51]\taccuracy_top-1: 97.5016\n" + ] + } + ], + "source": [ + "import time\n", + "import mmcv\n", + "import os.path as osp\n", + "\n", + "from mmcls.datasets import build_dataset\n", + "from mmcls.models import build_classifier\n", + "from mmcls.apis import train_model\n", + "\n", + "# Create the work directory\n", + "mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))\n", + "# Build the classifier\n", + "model = build_classifier(cfg.model)\n", + "model.init_weights()\n", + "# Build the dataset\n", + "datasets = [build_dataset(cfg.data.train)]\n", + "# Add `CLASSES` attributes to help visualization\n", + "model.CLASSES = datasets[0].CLASSES\n", + "# Start fine-tuning\n", + "train_model(\n", + " model,\n", + " datasets,\n", + " cfg,\n", + " distributed=False,\n", + " validate=True,\n", + " timestamp=time.strftime('%Y%m%d_%H%M%S', time.localtime()),\n", + " meta=dict())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 304 + }, + "id": "HsoGBZA3miui", + "outputId": "eb2e09f5-55ce-4165-b754-3b75dbc829ab" + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAATMAAAEfCAYAAAAtNiETAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAFiQAABYkBbWid+gAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOy9ebRt2VXe91vNbk5zz+3vq3rVqaoQqpKKAmF6JGUQED226ZIQJzYYQ4IFIXEDdmC4ATuQ4dgYJMAjjQcgMKFzUIdBIAE2rQTCkkpt6dWrV69ee9/tTrO71eWPtfa+51VJ/JUxMpKhXeOM++o255y991rf/OY3vzmPCCHwyeOTxyePTx7/Xz/k/9tv4JPHJ49PHp88/p84Pglmnzw+eXzy+P/FoV/4jcc+7fEAIJCAQAgNCIIXgGBvb4+uazg7O6OqF+R5znQ6xgfLfLHAOcjKkhACGxsbXL9+nd3dXay1eO8RQgAwnU5ZrVYcHBzQdR1d17FYLAidBUAphdaaLMuQUuI9dF0HCLTSSKlxztF1HSEE8jynKDKMMSgt8d7Sti3WWoQQKKXiCescJTVZVpBnY7TOUCpDiAxBQAYYlTld12GMoSgLhJQY0xFEwKTnE0oghCCEQAgBpEBLBV4ggyDPc5qmIcuK+J6kxlqHUorNzU2Oj0/IsozP+ZzPw1rLe97zHsrJmFtnJ9z/8ENcuXwZERx/4ZWfyfHtm5wcHjI/OWakFBf397iwv8fOxgYSKLQC73DWUYzGeEAoQdc1fOqnvpS2bTg7OybLM2azGUdHR1y9eo2yKMnzkqpqKPIy3ucAs9mMhx56iBACz155lrppQEAA6rZlurGBkIKXPPIw+wcHPH/tGicnJ7RtS1c3qABCCHwI5HlBUYxwnnT+GXkxQkpN11kQAus8q2VFkRc401JoyUsffZSmaTidnzGeTDg5OaGzhrwoWNUVOzs7XLj3Ho6Ojzk6OuLs7AwhBPdduJcMQaYFITh0pvHecXR0SNs2FEWB9x6t4xryPmCNx1pHXPceKR3OW0QArTVKZfgg6YzDCUU+mtEFydGi5vZ8QW1BFiMcCmMdSoKUEiklhADBA5E5hODx3uOcxVmD955MKvI8pyxziqJAFzlVVdG2LcYYjG2HdS6lpOviut7e3mS5XOK9Z29vl6at4z5zEELA+fj8udJorfHW0LYNVV1x4cIFxkXJYrHAmpbxeIwQgrpuCMS1LaXEOcfZ2RmLRdzr29vb7O/vs1qt8N4TQsBai5QSpRTWWup6hRCCrutwzjGbzdjZ2aFtW+7cucNkMiHPczY3NxFCUFXVsJcWiwUBx+bmBC0DzizQ2vFFr/lcplPN9uaIve0p1tVY2xGC4wte/c3i44JZDzYkKa3X1HpprQeUoigwtiGEQNu2BBzee4zxjKYaay2TyQSAyWTCarUCwFpLCAGlFCEElsslW1tb3HvvvTz99NMYF3DO4ZyLiyG9B2sdxhgyneOcw3uGmxtCwHtP13XDRenPQ4hz0Fk/xxc90s+ctVgrMc5inEVahZTx/wPxdZACFSI4+viESCnTBobOWXCSRbViMo7nUBQFXnSczefITLOoVoxGI45OjnnFK57gox/7GEdnJwgh2N3dpcxzTo4OyfOMLMsYjUas5vE1i7Ik+Aiis9mMpl4xHU84Pj5mb7aBkIrFco5SitFoRFHkLFdzqqoarmdZFEgp0TqjKALBn1+zqqqGBWatRSlFZwyBgBSC5XLJ5tYmTdMwnU55+CUvQUnJ/OyM07ZFhIASEdSXiwV13TKdznDWIVBIBM66BCoZoyJDCkHXGcpRSb2Yc/XqVe677z688xweHqK0GgLchQsXsM5x5dkrFEVBU9UYY9jZ3mY+PyMXAi0Eo1FJnmV4H9JrWlamRUqFGk3IMo3MFB0Wb2uapkNITzFWBB/wIeCNQXkARQjgg6duGtogaE2LsRbjIHQtNgi88xR5jvduWGsyXfPgYiAcjUo64/GuX3UeiMDQdi3Xbt6gruM5SSnJ8njuWZahtQbivfLeD2vaWjcAqLUdRVGQySLtDTsE/rbt2BhPECFQ1yus7RKQllTVkrOzUzrj2N7eZjqd0nUdUkqm0ylbW1vs7OwMe61tW4qiIM/zBNBuICI9kPX7whiDEIKNjQ2Wy+Ww1mJQiaDZ71vvI6aQCTKdUxTpHE38na7rCN4Rd+15cvkiMBuYRpCE0F8sASEyNWstZZkzmUxwPp6Qcx4h+ygWwUVrjTGG6XQ6MCdrLVmW0bYtbduyWCyoqort7W0efvhhzs7OOD08ommagclF8IxgqpRaAzgPnDMuAO9dXDzpewMwr51bD179jT9/TgnBE6SAjwd2/cXPs+EGAfh0E5z1BAnWWKSQdMZERucsZVmi8wyVaYyzWO+YzjbIdMYHP/xhXvWa1/CZn/UXeOfv/DbT8Yijo2M+8zOe5PbNLa4//zz1Yo63Fq0UwnvyLEsL2IAQdG3HdG8/LijrcMHGDddWHB0dEYJjPp/jveP4+JjJZMLW1harVUVdVyiVofOM4ANN07BYLJjNZkwmE4qypCgKltWKuq7RUkYG1rTcvnGT3e0dJhvxHmdZxnQ8oVoucYn9xKDlESIQb51HyIBpO5rWMB5LdnZ2yPKc6zeug3NoLTk+OWI0KrHO0JmWnJwABO/YmExwwXN2dkpVrajqilFRUOY585MTXPzFCBA42rbBOkOWKbrO0TQ13keGVJYTdKYpwwgQGNfgnMG6CLYKiRAKlWUR/FC0DgQCrTVFURCcwEmNCGClAxGBs9/MudYoKUFoECCVQDmJVwopI/AqpYZ1eeHChYENuQSA/b7s13iWRRAoyzLeD9MN2chkMolBTGdYa2nbeL5KKfIiY3Nzk9VqRQiOyWRCpvUQwIoiR2fx/RhjcM6xsbHBZDJhNpuR5zmXLl2iqiqcc2itGY/HA3jleY6U5UBI+syq3/vT6ZSzs7P4nrtu2H8hnJOYQKDrLMEL8olGKTEQmuDAdBapAloplDqHsE8IZv0m7QmNQA1gJkQxRIq2beNiTSgrRMbZYsHm5ianp6dMp9Pha9u2lGU5nJz3nrZtqapquAGnRIreX4SeHUC8gQK5xrLOGVHPztaPdRA6BzyPFz5RZJ/+xkNww3kgzoEwCAhSJOot0SqLP5MxjVLI+FwChJSgAjrLcM4xmW3QdRaVZ9jgKfKCvCzjhZeRmR6fnXC6mCO0QipFY1rE/Iy6jinD6ekpXbUkGIt1FmcsnemYlCXWOlarmGZMp1O2t7dZrCrK8Zj9C3vcuHGN27dvs1jMkQoeeeRhFovFEG3jNQvkuUrnLdI9hM60zLIZ+/t7jMdjTk5POLzj4mYZj1hWSzqbc+nSx7hw4QLVYpE20pi2qmjbDq01o6LEGEtbNwQh4/VQGQKBEgFrWpQS7O5sY9qGWzeuMS4Likxx7fpVHnzwQR588H4uXX6Guq4oxznPX73C5uYmD95/kSvPPcfu9ibee65fe56tjSkaiXdgnaOua7quQSnBdHMTaw137tyJKVznsDYwnW6QFxlCgjAB42pQUVYJCIIQCCmQWiNUhhQZMkhs5rB5SeYFVigcAmsdbVXhnYib01qCM5FVSYX3ntPTUwgBQSDTkXlnmUZIUEqytbMT1zRuCOxxPcZgHFmMo2k8CLCuw1XxNay1TMpRCngBaztsZxA+UGSaXMdAn+caKTK0UnhrMG2DRDAejzi4cB/z+ZyTkxO892xtbTGdTrHWcnJywvHxMWVZkmUZSqkhy4pEp0TKmNb2GZhzLuFEoEzB0XtP08S0v3+enomaxPIE0Booch1lLhczO2tBhxDviTgnMx8XzO4GtsiA+rwzAlJOlmVr1NLHjSyiVtS2LXke08G9vT3m8zl5nt+VtvTUdbVacXp6OqC1MSZSTGA8HqOUSulrTDOjttO/v7vfb9TW/IvSzPXzMcZAECgVo4AQMWUlnasSDMB4Hg3P/91fcKHPI6mUEqnPI0xjOwSQ6YJSaRrTQdvROcvZYo7Wmq7tsM4hlOLP3vdeDm/dZlXXVF3Dxfvv5/Lly9TLBTs7O4SNKdV8ztmxwbUGax07OztoEfCdYWO2QVEWXDi4wL4PbGxtUdUrVqsFt2/fHlKb6XTKo48+ytWrV7l+/SaTyYTxeAOlNMvliuBjOtyDfgiBoihiUEjX1rmo+xU6AwR1VXGUwME5R6GzgWUYY8iyHICmbVA6w3QxiI3GY4SApqk4Oj6kKHPyXDMeleAMnWlYVUuWqwWjccl0XJIpiQie1WqBS0xrc2NCXTcxIARPpjXCB0BgTIe1HudtWr0lo9EopuyrFaZrqOvIQspyBCKglQSVI53FSRfTeRzGW5RThCAwwVHbQNMFWhtovcAATmgIdwdRay3OeowxaKkQMq0n7xEECDFRMsbgQ7w2Ryen1M2KpmmGAK21QusRWkuckxgTgTrL1cC6JpNJSkPBGItzJqarbQxemYqBv2mqqFcBp6eneGsYjUYAVHVNkWvatmW1Wg3gI4RgPp9z69YtvPfMZrO79lfcowbnLCH4IesZ9lz6HYiyU13XAxPrATGu0xG+qRBS4V3M4Lqsfz7AQXCBkK6js+fY9SIw68FjEM3oQeGcJfWCX0/VhYhA0i/4uDhK9vf3GY1Gg+g3m824dOnSkCqOx2NCCJydnXF8fIz3nv2DAw5v32aRIv1oNEroHs5T4DVw6i9QvCByyNtfCGQvPF6cQkbxO/6JjxdLeAIg++cRAmMcQoMWEinjJldKoVVMqVzhuX10h83NTequ5eGHHubq888jtaLpOvKyoOlakAIhFEVW8PyN65R5wWx7k7CIqYPtou4xKkoefvBlHN26yYeeqqjTwji4cEAG3Lpxg9lsxvxsznS6QdsZgpSsVgvm8zlt26K1pmkann76afb29tBao5Vie3uHra0dhFAcH52wXK5w3jHZiCx6uVwCDCmBMxZ8jMCj0SiClVTUq6jFSWB+eoaSkkzHDeGdQ0hJpiPb3t7exhpHVdcIKZESqsWcm9chLzKkiovfmo7NjSmHt25yenLEhXvuoSwLnLMc7O3SNA03r19jujGlXi1xtmN3e4uuaZBCoITAmBadKTItqOuaE2vZ2JiglaTIckSIaWvbVOADWmcILQbQAXDB4bxHWEsIEotl1TlWJrBoHEtjqS10CDzxuhY6ixtagfTgjMd7hxcm7Y0CYzq89XSmI3gzMBvvPZubm4zygjLLcc4O178J1cDyhJbYLlBmOTJACI5CK3wnaOtq0NRcZ5CEIUgDZFrjncO0DaZtYvqZ5wRn8c5xeHjIcrkkz3N2dnbY2NiIr5/AVWs96GmLxSK+n/R6URYKKC2QSg/pZ9STBU1bMZ1OadqAD5bONGDOC36j0YjOWZQW2MbSdQ3GSJTKyLK4/5VUKAESkUTrTwBmgyYle+DoBfK7q3f9I7KTpGcpRV23Q/Xw/vvvp6oqNjY2yLKMe+65h2eeeQaIhYSyLNFaU9c1bRsrKg9dvB+t1JBmCSESFY1ipknVzhAYmELPkIT4850mUdeJFLh/ZFkWt2FQeC+QBFAS6eV5xJASqRRBRI1M9owtBGQQeA/exwpiXo5p2pa9PMcYw/0PPsjx6SlFMWK5XKKU4vi5E7a2tiHA6ekZOs958lWv4pnLzxC04uT0lE95+CVIAs9dfpYHHniAQik+9pGPUBO1ssl4ghYBaw3BB65du4YxhqOTUx546CG2tjcjuLUtzlmaqqVtG5599lnm8/mwIJXS5HnBZDLh7GxOAMqypGka6roeInIv1AohyHVGcD6yc+dx3lEWJd65QTLo76vtOibTKeVoTFmMeOSRR1jVNR/56NOE4NnYmEU22yxpW0HX1Ggh2NnZIoTAzZs3UVJQLResqoqDgwMmkzG5lsydZXF6QqEzys0Zy/mSuqqYTqaITIPwqZqasaoWLBZLhPBIqdCZBJFhjcPaNrFPUEHikxh/99pJMoTUZHmG9halPMIKAj1YCZyDxjq0SjpYAnnnLFGOjffMmviQUqBENsg2zrkBSIoiJ08aLelvlZK0bX1+L/IM5wxNY6nrmvl8zjgRgBACUgkkek2XMsy2d7lz5zarxZLRaMRkOsG6Dtt2ZFnGYrFASsn29jYXL15Ea81yuaRt24Eg9Olj1NmK4fvOGRCBQo+HlLgnOOtyUK+R9Yy+z9SUTi6GXOFNQ9danEvf0yCES2lpQCvBOpp9XDCLua5MeWyMlL0m1oOC1prJZDqUYts2MBqPKYqcrM04OTkZBNLxeMyNGzcGQbjXbHr6ubu7y0c+8hG2t7eHHLrP0fuqmlLxRkuhkp4mhijR63DOmUHzEQLyPB9y93NWJ4coqJUjy+I5OxtTgq3tTQ4Pb7G9vYXOM4pixNnpGXleIjON95FVBUCrjLOzOZtbW1jn2d7ZZL46Y2dvl4N7LvDcc8/xrj95F7u7exwfnVLXNdPplKIs8SHEEnUZmavKNBfvu4+LUrJYLHj++Ws8eP9Fjo6OuHLlCquz0wTcsRDz7LPP8pL77yPPc46PjxmPSo6Pj5ltbrG9vc0DDz2AD46zs9OhUJBpzeHhIUVRcHh4yGg05lM+5QJ13bCzs8PR0TGn8xO01+SFJuCo6iVZrmL6gAPhcT4yjLaJOkte5JiuhgCT8QgRoGtbSNVmawy1X7GYzynKjHsv3sf+/i537hyhJGR5wY0bxzR1xXhUEoTk9LTFh4DWsTq3WlmyLGe5PKOpl0N1zlmLdxalZNJWcup6Cb5EEOjqmuAM46KgzBQSj7eJzSOROkMgo5gsAuDx3kZ2oQQCibee1niarkblBZtbezhR04UOspyJKuiCpGoNpuvQQiadF7JcowQ0TdzUUsYNrjOFUiIGRynRWg1V+iKL1ozgHEVR4GyXUugMfGQkEijzjHq1ikK70rRNzWQyiiDiLd4km1OWISV0bUdVVdxsO05PjxmPR+zt7yADXL9+HeccW7u77M62WVUNy+USay3zeawuj8fjuG+05urVq1y4cIGLFy9yeno6YIJznsm0JASH9wEhYjXVmHOSk2WKPI/Brq5XA6mxtmNn5x5Ozha0yxotGIpGITisdRzs7TKdFmTKgXcY231iMPPeY41lMY/Csnek1E0gZRTb6mqetBVP27WYrqOuAqvFCiEUznuOqorf/93fxaQLenxyzNnRMXVdR9BUcQF57+mMYXFyyumdI+aHR5yendLUDSFpCkpJpNQpvVVoFYsRPkXPahVR3zub8sRY9VlPl6XsNS6FNZaiHOHsYaS8TUemc6yzHB1plssF8/k0RRCSGBv1QOscxtiUao3xwXN2cgMlFXduP8f+wS7d4oSPvP+Euq644wPdxfs4OYlgdPO5FpEqgnmWo3VG0za87Vf/Tw7299nd2+P6jRvcuXWL5y/NOLx9m7ed3uKJxz8tgVnUtU5PT6l2tplONzi5c4hLVV+hYkp58+aNoXwOsLGxMQSA3ps0n89ZrZa0raGTZoia/Ub8eIFuYO7eE5wnSEnw0bIhE9D2qX6mM0LK0K01OOc5Pjomz3PqaokxDePxPkVRcM89ezjnWC0WNHV9bmcgCr0Q8M7gTACv4mtbG887QNAKgSCTgnwUq2nRJ5jFQCR13OBJeCb0mjAEIisPQUIQBOfwBAgCke5/nimkEnghqVcVwUZ9DqExMsNbH1k9otcq4oZO9gutFEL3oBVlGqTECZHWtxwynZ7FOGex7vx69hJI/LrGHNNL9synaRpEArz+fQgRCw1FkbGcL5hMJuzubUdXQmeSjrWiaRtG1rK5uRkLVMfHPPvss+zu7nJ2dsbu7i7GGG7fvj1kNycnJ2xtbXFwcMAzz3yM8aREJw25107XdbPeb7qeWvZp6jIFz9DF4CuIdhTnLV4EMi1pmgonPM53dF3zicGs6zpu37wd9ZGkI/WUW4h4wdsmvgkh46L23g3A4n1AaUXbdlzpOoKP5tmurjFdLHkDqKSxhRDtBAjomobVfI6zbgAyCEilkEJinR3S3V7Li++rXzwBrVUq4fthM6zLfzJFzbZtkmGyoesMOssghGhmDB7rovZijENphXM+LbgE+NbRNstY0W0iQIXjQNecUa1WdMYghYjakO2i4OkdUkiMNRCgAYyN0ffq6R1sM+f0+CZ3Du+wWMxZno1xznJ6dBNvGrZne0gZvWPL1ZK6rrl4zwW6pqJaLMiznNVqxa3bt1E6MuvxeExdx9TPWjPoL1Ip5vM5R0dHOBfT96ZpoigbItPpwSkkkVopibdRZ/I+4E0HUkY7i1JI1ZutPSgYjUfoJPo3bYvWgaapuHLlWWazGUWmuXH9Go89/jjTjQnXr18nyzTWSILzURzvaXYIsermLCLPkUISDTUhsmQp0DqLUoTtNRpL01iE9GgtyfO4mZUSOBfo2o6usxjT0SJRbYPMFCqXA/ApIVEqR6GiwRePMx0iSGSQBOcwIdBZh7cO4X1KO+PfOwIieJCgpIrB1JpYWAsxCAy2BG8IRGuKCPH5gpIIH1CEQd6QaV+EEJ87hnaBDCC8w3UxQ+nRzDtHUFFnKrKcSkrKUdQMjw/v0DZVtE5ojUpBv65r9vf3eec738k999xD0zRDaqmUYmtrawiWfTUSiJpX1wA6ZXIepaIU1QdJrWUiFhllWTKbzXAuVm67riPPNV3nY3DKFEpLnLMYbwnBkcmYcovgwZ8H3heBWV3V2M7Ey3OXfn53VdB7f25XEwKRgKPPf52zmM7gg0/Ljagx9XlzCMjg8cn82dtWe9Qefs+f/6wPfCHEovnHa5HvndyIcA58glStDPjknYsmUTkUEPro55OhVAiZ3P8RkIWWaRGmMn2KpnmWY41FKU3d1KxWVRSgVfxZXde0XRvTamvRuUQHjU+VwrCqCN6TFwVdZ8iybgBoawxZpiHPuXnzGpvTneRpsyhgsVxQ5A9ycHCBG21HlueoEDg5OeHhRx5iuVyglKKqKmazCRsbG+cXKgjatuPo6BgpFfOzBR4fr09KDyMjSpczifp9lA2pEtVvFgEIrZFSYG28M0opirxAICOQJ4f4fDEf1lbbdZyeHeOc4+T4iFGRUxZZSh/P9RDvfGT5wSOCR2cagkcS7QlaQplr2rajaWryItodjG2Zz0/w3g1FqJ4F+HAOOiBjISvIBJwRTJASvESKgA8gREaea6yNXrlVY1h0ltZDQEe25fygFUFIjA1cAO/tILMkwonn3LEvELDGXqBPtVI12UfzaTRNn2t7kTFH54HWmjzPECJWSYeUV0kgRAkhBOqqivczsZs8z1E6SjBNG+9z13UcHBxw584dvPfUdU1Zljz44INcvnyZpml44IEHWCwWHB4e8pKXvITnr12mrs1Q0ex173Nbybns09uwAJz32DY+vw+Wznq0cDEjDI7O1jEwb0/QBCwSb8918hcp5tG09okPkaJkX1aNZebzLoFzt31MA53zCXEN3qcTib9NTF1jRfBrvvqr+cD73peEWDFoQwFiqpGqjkop/tE//AfcvHmd+dkJ3/mdrxsKAH10CMEPjDH4EEXY9PP/6q/8Ff7oD/8QHzxaK6yzSBXZmjWpvSTL4+aTEiUVzjuyTPN9/+Pf5+qVj3F48yqv+45vH1ziOssoR7GEbdouCb8iCsZCYNqO0WhEpjSmjYUPEWKU7KNSkeUs5nOqqh7oe11XgzcPQmKbnuVqxWQ6oUq2lo2NaaxCOxcjY9umTd0MxuRVKsSMx+O4sKQkz3OWyyVN01BVFTYxmp7RhkSA1y0ouc6i/UKqWLWUCi0kWsRr1S9e7xy2jWlA29UY02Jti1Qwm004Ob7DfHHKfRfv4daNa1y+9DRSBoxpYpuPi1GY4FI1LqAk4B22a3Fdi7Md1rSYrsW0NW1dYU0X2ZwPZEqxMZmwNdukzAusMSzmZxwf3WF+dkJbp0CSZUxGY2bTKdPxiEIpCq3JlUYhIttyjmAiwHlnY5rrPM4YbBcfzpiUTru0VkUS/UXayBEcrLUYG786b7GuozNNCg5hzQ0fdbvBx+W6oa3Jexf/3ll8YiwhhHQ+ikxHOUYKEauUqXUqOkYF3likCGxvzdjb26MsS4K1dG3NYrFgNBrx3ve+lyeffJKzszOyLGO5XLK9HVNTpdRQKJhOp0m/jq164/EEIcRgju/dDUVRDMG1rmtWqxXL5XJ41FU1MECCxzmDw0ZWXUbdr+1WdKbG2DYWG8J5wPvzy39r5tHzb/UsKfk81tK5CBpiSPekEBEw0iaJqWkCJanIsywaSfM8+tToSVTvb0t8sK+cyghwP/CD/4R7L97PH/zBH74AaGEyGTMaxfSmNxqee2xFAsZ08qkCWpYlRV7Ev0mtFV0XUzLrbGrFkPzw//zPuP/BR/mjP3pXtGlYS9vE9pg8L5hOp2R530vqaJo2XaqQ9Ip8sL5kWUZd18Nmmk7H/PzP/TTv/4/v4p2/+TZe+yVfTIBkas1RSiO1pixHCAG7O7tkec58scA7T5bHvtT5fEHbdVy/fp2NjQ329/fZ29sd/Hx99cgYkwRbO3j/dGJW4gVeu/4cetYdK0uaPM+HIouUEi0lZZYzKkfn0TZtTADTGZq6AgJb25vkecbJ6THeO7Z3tsmLjKaJonDT1LRtk7xiXVpTkizXaC1jgUr2elPs/mjbGvCMR0XcCN4wm23wwAMPcO+99yZLRGQcPeuTsg+oMWwKIFOKMssYlSVFlqOEjCZfKREBTNPgbIcSgjIvGOUludYxO7EuMY5zQ/fgRex1MclwTfv91P+eTq79nrX0GmWvNUWQO78/3ofh7yFShP5e9Taq884XEDIku5O8i5D0+7PvKe7d/b2Z9ezsbLBpTCYTDg8PEUKwv7/PfD4nhMDFi/dxcnKcAG08BLZ1fawv/PXrwhgzgFu/L1xwyEwiM0WUmWA0KshzTQiOrmuwtsMHx7p2+OLezP6R7BhhQJj0c9nrUz3SpKP/uZDRGa80wXuU1oTgh54ya100DIqYqimpht7G4al89HeJwd1/Dlah1wpS+rhuUAw+oFN7w7rRFWL6mNTe4eY765BKJp+MoutahIO2a4GQStExSbA2Atrd4C7ojCHLc6K7eRQZiDFD72HwsUWqqRtAkCVBRPkAACAASURBVOcFdVNTFmWM0J1BKc3/8s9+mOVyycsef5Iv+7Iv5X/9ydfz7ne/m6M7x7FRXSm0lORKolWsEhcqGkOPjo/Is5xlWOGsoSwKbty4wUMPPcBjjz3GRz8Kz1+7yuHh4fDOrXExBRQi6RR51AZ91HOs7fA+vhZSEhA4G2UCQQBvEd4hZOxHDCFglERnOVqp6GUKsSdylBcoIaia2BlgrImN0U3NtevXuPfivezubHF0dBTZQzgXuZ33BB+1Tq1UrI7hktQQtTKhdGKDMUXu0xn8muE5rZvIdHKUiuwSorWm6zq61iCVjMxmWJsxfRMBkBIpNN54hI+paOhf0xNT70Gj1fQN5UoweLH6h/duAJ4+WPdVPmxY+z0/gJKQYSgWrLfUrbfmAUMrlXMGZ7tURIvDCELqac7zDO8cZ6enQ9W0KEtQCpUqmA8//DCXLl0a7Bn33nsvx8fHFKmvt//atzHlecZzzz3LhXt2h/RxvajUVzx7538PoOvm2fFkRFUvY+DSBW1VpUporAxHK0s00Eoh0GrNNP8iMBNqqMYgBB946v384A/8AB/64FM8e/kS3//935fAJ17sV7/q1Vx97jm+4Ru+gQ889QFuXr/OD/7jf4yUgunGBj/++tfz0Q99mKfe9z7+9t/6W0NUcd7xvX/3e3j/+97Hf/zTP+VzPvtzBjBMsSaBx3mzOWkBiz7qEStoMjmbSYL7t37rt/Lud/0xN65f44MfeIrvfN3rojYRRQ+kUvz4G97AlSuX+I1f/zUeeeQRQrrg2zvb/OiP/As+8uEP8P73voe/87f/B5RUGGMHVtdfPqXkwFCNMSAi+3jHb/46f/1bvjmZiGNbzXw+x9jkkE5tXLPZjKKMEefLv+y1/PN/8aM0Tcuv/dpvcOmZy3zlV3wFbdtydjaPgjuSzno662iqmkxmOGM4vH0zis7Bsr+/x70X72G5XHDnziFbW5sAg/drWFj0moWgbRuEjNqXs3FzBufPN6sPcaMag+06TGcGLcZaO3RttG1DXa+o6iVtWw8RVKnYigYBJaMeenx0hDWG/b092rrh5vUb2M6QZ5pRWTAqCwqdJY+WS+lUrHyvViuqqqJpW6x3KR0WeGIgqqolISpRLJdLbty4wa2bt1IlXSad1g9rLBYWBYiAcx2rakljOoy3uGBx3uBCrJRnWUY5HoGSdF3HKvU0Nm07tOF0bUvXVLT1iqZa0TY11iQmIQKktLVnRX1rHWltI2OKGbNlG1PXpJX13kcp42ZWMhbTtIrMUa2BnE+TSlwa3tClIs/R0RFZlrG1tcVoPCbLckajEVJKVqtVKjZ4tBR0bc1qOefzPvezURKeu3KZ27ducPHeC2gtOT09ZjabMh6XHB8fsVqtWC1W2M4hySAInPHYziGCJFM5prXgBUpqRJDx58aAF2RCs5wvEECpM4L3dE2Ndy72sKYAppUiU3rIAGL4eCGYSTVEDJFYzWtf+1pe/Zr/hPFkwjt+8+28933v421ve2sCh+jk/7Iv/TI+9ws+n7ZtecUrXoH3np/61/8HTdPy5Cs/g+lkypt/9Ve5dOkZfuEXfpGv/Ut/mf/ym76Jr/mLf4nDw0Pe8qZfjVHF+8EC4tcomfchqWwRkHyw6Cw2tkd9Li4G7xzL5Yr/7L/4Jp5++mN85is/g9/49X/HH/3xH/OeP/szlFK84uUv5yd/4l/xXd/93/P3/9738mM/9iN80Rd9KQA/8eOvx3SGz//8V5PnOf/23/4il565zC//0q+k6lqswEohY2TIc0ajktUyRpO8yHnpSz+F7e3tWAVUMtk6CqyN+qLzUI5K2qTRfcZnPImUkqc/9jF+8effyI+9/if5yEef5uWPP47KcrwQWO8R+Yix1GzPZnSNobIOrWBcljTtCqkC124+x+7uAbPZlGeffYa9vR0efPBBTk9PyXRBpouhg6MH5MlkhJRRAG4bh+1iuuW9iyblNK1BCUmmNFIInNIDiPWmSO89q2qJznPGk/Fgg+hM1EEyJcAnj14qt/fGaIFM/6X97pIAnyZeiGT5kFJSjseDVusJWO8QwhNEIAiPS8wutu40Q5DUKnrKYt9iz/jP2buUgSBiFddg8cYhhEIohUBhCVT1kpNVgxGaLggcAS9JhQIBzpNrEYsTMnY1CCFS5S1WX513CFIf8Jr5u+8VLvOcYhxN4q3tQMSg462J8x5yjcAjRUgs2cWJEr7D+4DzCfSEZlSOCTbgjKHDISTsbO2CF7gutp+hHKvFatDLZ5tTbly/xWw24/FHH2V7e5vbzz/P889eZndzRms6nn3mUmxfxaMkbG/NuHbjOmWesZw37O3t4azn+o1rADzwwP1MJmPm8zkLVdE1sWk8y3ofKHgRmJ/M2Rxv4mxHPa9QBmbbGygPzapmezxDCmLlOPkCPyGY2b5CEv0NAPzUT/0Ud47uwPERv/wrv8JXfsWX8+a3vCkK/8mc+v3/4B+wWCwgBP7sPe/hwsEBX/kVX8lLX/apLJdLVquKN/7cz/H1X/d1/NIv/TJf9VVfxS/9yq9w/fp1VqsVP/XTP8N3vu5vpiJCwrAEZH1K2wv5vebjfaxq7e7uslgsWK2iwfanf+Znk8/M8e4/+VPe/9RTPPnkp/Mnf/oepJTcuXOHn/nZn0MIyY//xE/yvd/zd7jv/vuoqhVf+tov4VWv/iKqquL09JSf/ze/wF/+i1/Dm9/01qE1SKRK73g8om27WGwInqaJ/Wb79z6Ac33ETWNOVNRUbLApFfdIIRhNJ2xMp3EihdY8/vhj7Oxsc3Z6xnQ6IctyjDVY59nd20O3UcuQJJHdxQ4AIaOfKMsUVbVgOp1yeHjIlStXki3DJtNmBLGheNOn7HisaaM5U0bnuu0DiA+p3zGCBWsTHqLZMVbLXIiVxVixa+i7Q7yPzvNeuA4h4EWUE3A+3mPh8MKnFic1+MCg11DDXcykBwCXvg4TUES0dbxI6xWKEOSgS51rTokRiVR0UiK+j3PBJaaaQiKFYjwaM9074HTZUFrBpi4wMgIbKsM7w43nLiPww/NHK9B5VTHP875adldlr2egdR0nTEjVM6w0Lkn1aZlJYB4tTNHGERLjBKGKc3nF+0EGABC+nzQjY+1TKDyBpmsHK0pbN3hnyHPN3s4WZVlwcnZGtVyg84yuqRHbM/b2tglA29as6iVd19A5S2gsq2UVZZGsQMu4h00Te69HeUHnOrwNOCmRJJeBtwiRx+AlIxgLpdFCIjzRepIquTZYPB78efvii8CsP9b9Wbdu3x5Y0e3bt3n5y1/Omg0sUvmbNyMIpUX0wAMPAvDuP37X8JxKKT74wQ8l4XCPp556KlZ+pLxLzzkHtF4cXXtfqYlVazX4vQarSNqo3/gNX8d3fdfruP/++1FSMZ1O+MVf/KVhYxwe3gEiTa/rhqqquXDhIJpugbe99U30Jx/f84eHjdRXqkxamFHoj20/xpi7Np2A5NWKvxeNxmF4305H3apJPajWWj7tyc/Ce3jtl3wx88UiLmgTy+VKKopSoXVssSKkipb3qCz69jKl6Iwdel2vXLnCK1/5Sra3tzk8PLyrtaQHszi2JTr1CdHg2dsz+s3+8Rr4+yrbYHVIoNq3qfSCL5wbcdcBZL0drX9OUiX4hZpnf+/WwWddkzn/2bnw/qI1nc6pv4/9e4A1Q/CgFfcg41EEks2M4D3zszOev3HIybLDZwVtkCxaAzpjczZlf3/v3I6RAm/v7+uvw7koLwaxfjB1dy1ZplFKR+fAEDhUtPOEVC0V/fDHPjAlLVGk74XovrdpHJIS0VLvCXTWgEwsOwiaLmpa4zIjIJlubKJ0zsnZAndySmcMKsvJipLT+ZK66djc2iEIhgLAaDRBSs3R4R2qds6oKCknkdG3pqJp42iqYqJpl5bOupRpRV+f8YGcHET0EWqhwWegJNY7jItZm1JxXBUOPH8OmGWJeayD2cH+fhSyheDg4IDDw9uDdyuK+r1XJwxC/7Vrz2Ot5dFPfSlNXcPg+YqL7Pbt2+zv79OmRtSDg4PhJg8LW6xVT/sFSXSWR2NuO7RbVFWV+j/v5V/95I/zjf/5N/Hbv/07GNPx+7/374fX9T6wv783VE8n4zHjcWwpivYEyyue+PS0IeOwxzzP6fpJID52PWQ6i5MXrGUyGtPUdWycF+ebSqkoILdNizF2KE1LFUfDNE2cJnHlynN473nsZZ/K+977FEIqHnv8Zbzxjf8mit/JuX90dMTBxgyR6aSpxEUeRGR5UivqroMgKMuSnZ0dnnvuOa5cuUJZlozH46F5vDc69kGgDwR98ae/B+sA0rO5vsrVM7OeLffAZL3DpsCwPoXjhaC0Do59Rc0GhsGT60C2Loj3ldhYIY7Pf94IHSiK8nwNrb1mf6x7ndZ/Hr/P0JccPBBi94cKgiAF2ajk3nsOCLqkPFsxbyy0Njaay9g3+cylQ6QIA6j2AN6L3P2Mv/6cepBdZ7t98UtpTVEwMLPeMN6vrRACwa1PdjnveolpWP/1vKratG2SkjJ0keOilQ6JRGQ5q65hc2sTpTV3jo6oqorRZEJelngCF+69Bxc8q7rCe8/Z2RnlZMxsMoujrsqMgMELRVHE4ljdLGjbWCUdT7aRMuCVjcUYDa4LeGFwviXa+ySZjjJNr5W2psPYKNME32upf04BYFjRfSUT+JZv/mZ2d3d58IH7+fqv/3re8ta3DZR2eKK7KpKCW7du81vveAc//D/9EBuzGVJKnnjiCV71BV9ICIG3vOWtfOM3fgMXDg7Y2pzx1/7qf33XW0g4+aLFXORx8QYf+NCHPsyrXvWFa5pDYJx6027fuo2Ukq/72q/liVe8IvnO4o3e29vjW/7aX0VpxXd8x3/D+9//FM9ducr169f53d/99/zTf/IDbG1tkWWaJ554BZ//+Z8XN0FaMM9evsIXfsHnxdcbjWi7dug5DT7wrj/8D/yNv/7NOOvQUlIUOQRP2zRp9Es8QWM6FvMFy+WS33rHO/nu/+51jEYjvvqrvpyXfsoj/LtffztCEJvcg+fO4Z0438rFalW0C4g0+C9en0cfeYSt7W2899y+fZvd3V1OTuIE26qqWK1WAzs4bw6OzICw7gG8+1hPmXrxfzA4y/Om/EEICnHFBe8I3kUNSYBOlgoIw/9rlQykREYRnAXvortdRLPn+kMSsF2LaRuc6YbfiZMUouYW5ZQQHy6ms8FGd7xtu+izCnH6iSJqXc4YnLEIDyIIZPJkrYNiU9Vcff4qTV3TdS2np8ecnh5j0lQR23XMphOm4xGTUclkVDKbTtjanLG3s83WbIO2ruiaGtM22K4dHv35ZFkG4e6pzH3xJtpVzgsw/b/7h3MOiUMJjxIBLUFpgc4kQgtQYH3UFY0LWBdAKFRWgM6wBOq2pek6bAhsbm8z29oahop6Ag89/BKK8YhFtWK+WkZpijg+6OgojnNCggsm5obS44LBC4PMAo4OlafqpBYIFVBZtI20pqFzbfQmWotNGnNnLHXTUXeGk7MlJ6dLTs5WnJzOPzGY9Wlb77kB+K13vIM/+L3f4/f+w+/xxp99I29685tfvODF+Truf/Zt3/5tZJnmPe/+E65dvcpPvOENwxykt/7a23jjG3+W33nnO/jd3/5tfv3tb4cXsLBkxLj7/SXfl3OOH/2x17Ozs8PVK5f5oz/4fYQQfOTDH+VH/uWP8eY3/V9cevrDvOY1r+Zd7343PTP0PvDBD36Qz/7sz+Lypaf54v/0i/j2//Z1IARKaf7m676bsix55zvezsee/hA/+i//OZPJmOl0Orz+//a//2u2d7Z57srTvP033orW2TBJlxB45OGH2dnZHlLozdkmk8kk3hwbq5mxGgtNE8cj/93v/T5msxlPf/Qp/tE//H7+xre9jlu3DwmQ0kJJ0zXJyCs57z2NV8kn8+Xm5oz9vb1hhnxZlmkCQ8HW1haz2WwY4rc+zbdt23PP4MdhROuPdR/TXSOXUpuJlOcTgNc35PpDrrGQnuX1d3/9PfSv2TOXflwNMAz2bJqG3suXZdEtTm958OfaVc8I131O668REivsiwda9dNVzicc++DxzpEXGeWoiOO8taAcxRn+UophjTZNbNZeLBYs5vP4dbG423Ae7l7f6+9xGL2UtLSu66jryG6Cf2F6zXBto63G0ptutV677krFvmitQYo4Dl5AXhZIlWGMY2dvl6ZruXN8RDEq2dic0VmD1Iq9vT3qLmpf/UyycZrp33UdxlmUlgjRt2h1BBxKi+FzDqIEocjyWARyLv5c6r6HN6Xm9ly28Q6s9XTGR1BrO5rO0Z4vP8QLL+Y9Dz0Yzo6OYiVOCD70gQ/wPX/ve3nLW96aqooenbQSH0JiBSlNgCF9e+Gi7MFESol3HiHjYoltJWEQ+3ux8jzVgd7fJcQ62LI2EcNHX1uqivSbt28badt6YJG9QbQftxtC7JnTWZy62dQN040JbdMmxhff69bWFk3TxJnvxrC1tR1H46hYgDg+OcElBqLTgMKqriiLks3ZJqtqNXwOQv9eIijE6ZxZntG1MUXsDcLGWqQSTKZTcJ4HH34FTz76Ug42pmhn0cEiMfhgCMKlYsyIg4MLsXqZ5xwfHbFYLNjb3+exl70May23bt3i+Pj4rqpmDzgCgZaxotcDZg8kL2xFWWdlQzqVBnX2ulSfeq6ntT2r6MEshHOpQqWeS7HGiF6of71QvF/XyPruk6SD3PV+18Fj3cgaxbD0nL1ul4KGkrGS6YNgqEHqgi4oTlYNN07OWNQGrzOC1HjX4U0zeMde+ACSf42h06M3j/Zg7V1KQdMHo6ikIVoT12Sep3FMqRIanElgFtuVVlXsGe610JiP9KbagOkcWZEDUR7IdIHKM9qmYbFccmF/l6pacnx8zO7ubhwLVK3Y2dlBKcXt27cBBv/jdDpFCBEBT0qEM0MQy7J+zmC69sKn6TFjgPgBJiGwsTFBCEld14xGU5zpKHLFqFDs70z5zE9/gtOj6zx0/wVGZY7wJp2P4yu+/jsFfIIRQCLRrPPevJhyihTpgpTnjEmkuo+Qa0J9v6BefPSRXXA+HYBkLu13ef87/WtDD25imPnUf3hJf8Q2iToxJMdoFDdj/KCG1fB3vWgdgkdJHUv71g7tPaI3uCaDrnMOEWKJX+v44RjZODZ0KxXboZq2YXd3h+Pjk1QMiM+lVfxEpjqxr34z9+luf1R1xXaxHVMJ55FSI5WiNS3BxVHGudbcc+Ee9vb22BoVuGpJMAGcg34GF9GkeXx8RFXV1HXNxsYGBwcHNE3D5cuXeTSV2k9PTweXdw9ESipkCixRKz7XcdZZWZ9uroND/JnHA5k+B6l+znufKvVRd309DP8mjrdRLwKn88DYA+poNBo+SKNvUO6LG0rpHp/uep4XguN64UgIEcedh0BrbbJjJNbWZ6sOLNA0HafLmpNVy7IzNDbQNQKhc5QUdHUT24mSrreuC/YN1evHOrsCcNYPH14ihIhdBcO5yWHtx+GSceR7fA2NTA3dsYoc11Gv2fSWq67rUFlsArfWkRc5k+kILSTVasHVq1e4ePEeyrJktVowm21xz4UDyqLkgx/+IDjY3t4kkwpjWkzTxuZypbn33gssz+bIBJTBxZ5WEAQXokTSeZSI0NM1kYlNxxOyXCNCbANL+JzMtzlZliOlBjKUzKO5FzHogvAJPp1JyOQMvUuzOv9/kz5cQ2uNs5YszxmXJXW1wiVQWk8Ph7I352+wr1jB+YeCKB39MzevPf/Ct5WiJ7z+DW/gn/7QDw2z1fofmdSU3buu+w2jEqUWQgyzmNJf4YNHIIcWpGq1umvD9tM7pJC0bUfXmTiRc20TuzQSSIj0MWbOxTantmV7e5uTk5M05JC7evas7UcBa4RzdKl/M6Z/ccErGRlC13aUecFjj72Mq08/jbpwgQtbM3wHpnG0pkUKqOoWQWyxivpiFvsWuzh//ez0hJs3ricWHUdEhzTxJIQQ3etC9G6v1Jq29kExUibLiRvA7u6Wm9jH6I3BE+InJOUxiNi2ARcBTniHDFHDMs4ilUJHCxo+WMJa6rAOZv31DiEMk0qVUmS5Qqp8sCH0wa+vxt6dUpKYvFoDNZ8ykciasrRetFRoGSdJOATSQSYk4yync4F53SKJAxeUlHE4Z5axPdlHirsZY39Ya5PNJR3JRpLJtfcjJWVZxIAaAlme03Utzrr0uRtNHD/EeWfLAHzBD+76oUKqNQJiA7x3bMwmw1rc3JgyGo3SlFlDkWkIOavlAq1id4SzHV1bUy0WFJmmaiucNYxGYwSxjSzL4nzDYCynR6cDMxRC4owb1lAmC7JxAV7Fj0bMSsiAoLCdJ7jA/PQMnSlE5yi1QAlFV3c8/tgTTMc5XV1jXUNw7i7O9HHHZnNez+TxJ54YNn/P3vMsHy5GBL5Y3o99d5/gCGFIR33wyDQkMSSrQiAuchBcuPe+2MOZ3oWSam22eZosIHlBgzsDODjnBzCr64Z+IN46UEWkV+mmxxJx/IAEOegsvT4VN2pPk+OCt8YwGo9wzrNYzOPH6qXoXtVVtE+IOC2gqqq0eRhSiqLI6SeG6CzD+RghY/9eh0utJ9YaFssFUkBdV/zfnL15vCVXdd/73XvXcM65Q/dtdatHtWYhBMLY2PhhbAQYMKMtIgY7xgwGnAQ8JM5g7JhgMFOCiWMbiOA5GDAeAggbEDjEYnDMGCSMQJI1oqFbLfV4+957pqraw/tj7V1V93ZD3nvnQ9Pqe89Qp2rX2mv91m/9fjt37gQChw8fojSB5cWBZJsg/qUegu9Kx355FUJoh4PTte6XjAk38sFvCgZbs6N+oOn/TIKCIwTdNgf6uE7/OPqfmy5gv/O3NYil/+5TQvqzolonk5ywaRPeCqP0H5srABDajGxOIUCNZJW5ztHG4NFYJHNPA/pNbSWcGUDJFMZ01sh2sCUTTH8nQ482yEZYIeGMjRPVmrZMJrRrJXU+tdYg7IT2e3YdYik3CTFh8D0FDhS2btrvXTNDJZu7+Rzn6ggzBPErcE5ksubiOBVsQ54p6dZ6S6EVupBZ3RwPdUNpcjKdk6sMrSQ4e0zkksWu9Uw22FKLjuB8XaqnUTlgcWUREN7ZqMgZlkOGwwWWF7axtDhktT4Rv7PDh+8jAVQOhwyGQ+bTaQfuxO5mys6SjVza0VyUgPG95ytUHxzq9wW6C6vkmCX97QengArye590rfTmebRY9wq3qQWnpbxrGtFNCy2fKTY0eos34TgQRz7iziEs6BTIRGgycbwGgyTTk7MQ2+spSCbZl5Yr1FjWTq+xtLyEjWBuFtnOJpXyiamOzEo2dSPKos4xr+J8qhL+2rad+7jlllt43COvYMf2bay7Buw8lkKxjIiBVikVibwhZpbx1CvFZH2tBd1B2OiGqBsW1UaIjZKkypuwyBBScOrwKqKqmJxn3xKIHX5TcFJK4UjyS4ItprUhwUyjiFr1qrdUetcsQAvGW2ujrpiIIoYQsHUdy/RUWikSU96HTmBARBGSkEGaLJEOplKawuQ4D3iPDrq9plqL5+fc1gKua0VmJMA5wFsr86guYEOXDcLmLFagEMkiXXLkCLGjqxMhuBs5Szw9Z5tNZX86P4QtG42WeeiA3FPGJQhBzojz0qiS8tlRWenCe9egnKjsqpChvEf7Bl9bnPJikK2hCo4wn9LYOmZbGuPBzS2NC+TEz1SOLKpCOxrqWhoZkmkKZjrIcyrvGc/HGO8oR8ucOnaUPDcsDIboosA3nmMPH2d6+jQqePnc0MTz1PCTPyNf+6xl5sq5u5hNplFfXhZ3ylLEC3OJ06dPi5lp3aC0Ys95BzCDgqYRQ4MQVAuC+vZmCBidobVIggzKksbaVp5XcK0gKhB5hlLi3ix69UsRVO7mAW0sA402kWSYc+65u1ldPU1RFJw8eZxdu3Zx8uTx1hEmcZSGw0UO7D+P6VQGwDc2xljXsFAMqJsapaVkmM5ERmdpaRsHDx5kPJkQgAsvvJD7Dz3A4UMPct55B5nN5zR1TVNJsKvrmvF4g8uveAyz6ZRTqyfbYFoUBetr6zTWMRiMomMUDIcjDp53gMl0wrETJ1CZgLdea4aL2zh+4rgMZq8ss3fvHiZrp6jnG7jgyDIlFBE0OhrW9m+kFPD71mUp0KTgthXg934zk35rJpZ+tolYG0vQ9LKtWVb6XGIAaXErug0uHWv/Nf3ssd+QSETU/vF2zZ1u49J687/bQJoCmeqw2aLoglnKBLXRoA1GG5aLgsZDEwy+ipQMJ6KWznkyZc4MMPEzEt6YmiN9VV/nxHMTr9pqJXEW07pPJOTQw/TonS9HQClh+CfJKAjo4KLIqby/EGvjzK11EriVolQwryuU8WKcHBwqgPaGIi8o8wEaB97iqgajNEVZyobiBIMzVstQvBKlk8IYYfo3MA+WTEht5Cagtahi2EyDbZiPxyhl0XlGnmmG5QCNYmN1gxOzDeaTMUvLC1E1RehS6XFWqzmlFMOF0abWe1rwS1nGZDJh+zk7ADCFFW1w7zlw8LzotWeR7LYjC6YNOM9E4z/5ZDaNxeRFZwiqsjbgJLmQoihaakRil8/nc7ItZYZSBnTOaGEbu87dSTkccc45O7Des7S0xJK1rK2tsbi4yMbGjJ2797J6ao0rr3wM373nfo6fOMYVlz2Chx9+kOPHj8txLp3D4tJMVBayIZddfhF33XUXKhuybfu5aD1iz569rK6dZlCU4GFjTcpO5xpWT26wfWWZH37cZZw8eYyTJ0+yY8dOtm+bs762EcvdnMwUNE3FdOpYWNzBBcvn4I1mbSJmsMaIWe6JE8d58MFl9u1YiRlOEKfwXlfSt/aAm6+pUqo9V30wfSu4vzV4nFmOnRkYtt64/QDX/9lWDKk95vS7IJtZks3ufxZI9p8c01XM0FJwljWadak+KWeUBpbviYjJWgAAIABJREFUH3N8Ths4Qzp3DtAixJgKC+exPmC9RWUZZjAAJbOcWtGqwOrQzg5ICeT72ZkEIckqO79WrTsc0HmLUoayHGIi+dw5J1St3obUupfTla/pjw8KOSIAK6Wmk7F7o0PKoQVWwQmFw9v2M7TWqFzHUlNhghFtOGAYpe5Hy8tUM2m6GG0odDfgXmiFDQFlFGWhKIeGYVHglcfmMG+0cOnKQqZVXIPOcxZH25isb3Bq7SQ79+/BBVElsXWNrYSfptOg+txDocRHoYfFn90E2HsmGxvYaDwrs3+dQsV8NmNhYTFmYQrfNKweP862bctMJlOSmcFm/COeeMTFeT3Po1S1qI2mHUorAZmT1IngIob11ZykLJDmHmOt2ZUPQXHsoQcZDUesnz7KcDjkruNH8N6xvnqCoiiZTMbMJxusr4+59dtQVw2Zspw8tcra6VVuWj/OfD6LHSNZhNLUgBPHH+C++26nrmsOH7or8qlyjj70XbZv386R8UQGfp2nLHPKQcnxY0c5ckRz7OhOVrZv49SJhzhx/DA7d+5mNl3jodOnGQ5HNLUlywu0hnJQ0jhH1dRUVvS81kyO8paFvGCyfpzSKAZx96rrmeBcRoieBN3Wad1YjuwmxmRxokDkZC7at5/d55zzPXGqfvDpd/7OFuC6529+/db3PVu2BwKia8Qm7ns90nGmoJyA/CQAqJSmmncmF2lx9LO9/phW/5HwQsG+ehr6hDb41XVNPa9Y35gwnkyZWY9N6ErK4hSgzKbvvrUr3Je8St8rqSoXRREhEGk+qRCwjYkO6D0cU6k2O2v/BAloggzlhOAAGepGgTLE4X2R+NaEGJjBaNCZYpQtoKIJsnOG4MTlqYhldZZlGGfJg8SFjABB7tNBlkNBNDMqGAzKliLiXNFm06OFQZus6EhBWR1l+FBx6sQJPCJgMSpKwjAadzce5wLTjQ0hAmeieJIeZxqaOMepY8ewTZe6d0KJshiyLGM+ndI0UjP7mObX0VcvYWmdDn98rVItLtbqLynBtLxzkW8mn9GZOrj2+bLgNks6i9qrBJuEXdl6FrGJvDX0kK5Qjm0sU7+BtZaHjxwCFLdVIh9cVxVHY6e27YR5v2nxWSt0Bt+7CUOA2eS0AMKNJTcZ3otWGkG4ZOtrJzjnnB1MpzOapqaupjSNjGKtr0lwSbN3SivmlTD9Y33EJIjywXRQMhlLWp9pLW3saLzaYpXtWUwgchc0dOQFpt9+98EHedqPPJ6VpaVN5Wb/sTWb6geGs/0usflDwqlU+nxiRhB16gjSTY3llUIs/YxOrvUp66O9hpCGs2MDSgV0JOumLmv33M3HuvX7pOe0gSUIpGK0BieYkgJUJAFnOkM7z2S8QdNU1HVF4zxNENwsaEPQCp1U+ZVuv7vcXPKGWey6hyDySipExyWjUTrbFPxTcuFDJxXUBsZ4T/VLWuchKA0RfI8SJN2m5gO5FtEDraUkFoXgOAlhNNmgjDOPGhCZHvBkKiMzimADg8xQKvEXdbXHuhqDpizETzbPNYNBRp4rjJHMOTeglEEtiHBjlmuUEgJ00zR4W4A+l7sPH8UiJebywiJLi4voADMrkwSDssR7i2ss/ZV6RjCrplPx80PhVcI15P9C7BiOFhaiRnsCvBWZEZcho7Uksb4jZ/Rre3p8l3anJ91+yXS36+xIqt4tyrakiLiIDTZJn0v5FCkfWZ5RRyJi09QUeY5Cdn/nHEYbvPMkGd8QRHcsHafzafg3iczRknDn1ZzBYIhWCuscdV1Fr8OcQVnQ1FG3LBYCaWdfW1vv2PzzOXlekEXF2Tx2NBvbtN9d9wK6QuRSmloImiKdksxgxPQltLt7BNRV4lalwNBlaErJhtBYyz0PHuKxl1xKnschbxWiIkq6IRP3TIJTe+/H4JSCo1bEmcROUHPzIwWYdil0v/Ey+5sy7u/Vzez7S/YpF/Lv6Cep85aI7QGTNPJUXHsoQsqaFKIEAhD/9mGzAmuhCkyuMRkUecYolEybirLSNL7BuQbnwCtDcIZKCdG2f9z9zSQ1eRJ+1nZptQatYnND8GoXG1k2asi12V/MYn2sGrquMZH0K/eSpJdxaiMEDGCKHO1kdZZGURgTR8pCVNKQLqkCsiwn08JJy1SGyRTVrCYzBZmOFnazmulUFFuGRU7dTDGqkN5voLP2ixMICqjnM4LLGQ4HOOeZjddwdcXywogLzjvA+niOd55qLjLehTF4PFlZkOdDqmpGNZts2njPLDPFfoeQcIX+ikNKE2cboUsYI8CnFRayQhj8PnbFQm9nSy47wSQUo9eVBJ73vOfy9re9jSsefWXkgxkJGFqMN/q7qLD4NSbL8LWP3JtKOjZGJgxmc6FkaKRTlxb+oBRXbGe9cHlQzOZVJFoKZmMjwbUsynah6TZoxwDTNPhYNog7thiN+Miza8vkGGB09NrUWlQtvK9BiRJClmm8t/J9gsc1XSczUr/iqKNgRSF2DZO+fKK8CIAdz3kKYKELIB2OlqSxoxmrbciNJjiLNhm5MUI1iLc+QRa+ybP4Ty8E6piBQyqTHJi+GfPmtSPXL8EC8t46OrtD2rCSckUMZKkcVKoNTnXqAKr2i3WfEiWwk5y0bG/pjMioUWrwEDzeNxgf8ZdcNh/rLBhFXibxP8/adJWTp06xtrFB3TiC0kznFePpnJOr62R5yd59+3GUVF6yGwVtZp8y3qZpGE8l6KINJldRLFMyKg2CUWlDORwxr+ZMJhsYrVgYjhDDmEZEM+PMpXOm9RMQNKwmOKmajBKgnJicNADWYgjkPjCzlnmYob1H4VEqRLKvQeuMKjhqJV6XdRA7x4XRItPZjEkzpigyynLI0vZFrPVUtoLgmc0n1M08cs1Um7ik5pg4oltWVx/CGJGNHw4XGM9qdiyN8I1nPqsj5BBYn8koWNNUZMawuLwAClbH3WzmWSSAQvxfD/dI/wpgG7F700ZIby6ahoSWj2XbV2nV3T9tdpd+lG5EItAauhtHsinL4lIkWcZMRgDKCF1GLK3FQYyO5iIdwN2+Z7p1Il3BO4dL2uk6SXPLkRkTO4ghyMBzkCxDaRXpGYNNHamW/wORAAn09d9ReJ9SEflbjs+LFlmM+M985k/xa7/yWh5z5ZV885vf4mf+yYvi80EFcc2GrgzvP87ZuZO3/Kd38CM/+qMcO3qUt/7O7/D3f/fF9pkvePGLec2v/RpLS0t8/oYbeP1v/DvqusOVUrdPTuVWLCmuh4jHyLXsoAet+5iU8JvSbtkvRVMGv7XEO/N5tL9PuND3fJzld877aGeoUCojhXUdr58LnuAVC6MFlA5Mp+s0TSVD0Xis82hdUgyGTGdzHrznMEeOHAEV2L1nD8vbllnf2GB5eZnRaA+ZKThxYpXjJ04xGZ/kxHrN8p4LIxbU8e2gUx3Zipf1my8hSDJlm0aqnChQCUnXrJ+xdnpo8kax3I6OVSjJspEwhQkhqr7UlFlck97i6ynYJqq3amxdEcqSPE/XOGCDj/zNmvHGRtsVT1MOqaGE8uDENi8Fr1RdJGipaRqWIqyRutGpGpnXlu0r5zJeH+MIsXOpKMsca0tMLiNQehY5oN8PM+sCQLers6kkkIMW5jGbbi5jRCaaXoeou2IpQ/JbFuHmaQFtDI2VbuZ8Nmd5eZmNjQ2yPIsllY7elw5jOkC6KAp579iVCTFoJZUKwWjibGc85rppECUVEd/zTcOgHBKyTvwxZTAqiIpsWzIQA17EWVImobRqTVsSZpJGLkSkMHKufKCpm9aybn19nf967Xt5zJU/wOMf/8NA8jlQ7fkMIUjG3Dt3AG9481sYjzd4wuN+kCc96cn8/rvfzU89+SpOnTjJIx/9KH7zDW/g1S99KXfecTvv+eP/xq/+63/DO976FjlGOvBYjtGfEXC2AvYdOXMzmN2NOXXBrJUWOstz+zdif5KgX6Ke7Ri+7yNiTEZnoiUXEAnwKKWUm4ws8sWaWvwaMmMYjUpEehxMXvDQQ0c4fuI4aMXll1/E8val1vBj9+5H0zjPbFbjg2JhsWTvvp00Fr57ZJUjp22XUfa+R9txjOd4a7dYfp48DCxNIwGsXc9pKNsYUJ4Q0iYsZOUWUFDpjlKtGq/UoqE9P0plURNPCUPfNgLDKJjF8i1p/KdjTMPvYhjUCQS0gYweEZszR9L6nfLkGzAcDlt2grVCQD5x4gSzuSXPDa4RDwUTfTYyK0HPZDKhURTL7Tk+qwRQSIsC+Mdbb+PNv/tm7rj9dh64/37e8IbficCt3LhXXXUVhw89wAtf+AK+ffPNPPzQw/zum96EVjKc/e53vYu77ryT22+7jdf9xm9s+oK/9Vu/yV133sEt3/kOj//Rx0vWojrFhaSGoLUYa6QhdYAXveiF3PSNr3Po/vu46Rtf5wX/5JqYglt8cDzj6T/JFz93A4fuv49v/cM3ufrqq9tZvp/72Z/l5m/dxN133c5f/sWfcsEFF8S0X1x/Pn7dR3jdb/xb/uzDH+T+++7m5ptvbE0drnryT/DZz17PnXd8h+s/9XEuvfSSXsYoO+uX//6LvOIVL2sZAul8eedomlgyB1o8RCvNV77yVT75yes5ceJE7+ZNGUuXzXZXSBbIaGGBJz/1qVz77ncxm03528/+Dffdey8/+fRnEIBnPee5fOnv/o5/uPFGxhtj3v/e9/Lcq6/uLrbaHHTStTkbW/9sPLP02rTrpkyt/17pTz8Ifu/H2X93ts7pWV+txAw4y4oeMdi2Lk8gvKbxZIPJZEKe5YxGMvQ8Ho85cuQQt9/+bdY3TrJ9+wIXnr+XC87fy94957DznGWGo5zTayepZusYbVlaKti5c5ld525n2/YRg2G2qcPWJ3snGlHil/UH5ftEWMnoNqthFEXeTZHgzn49VJcnKJXOfRzxiqNM3rtYQnYqJVprgS8ily11U/vHna5vmi/ur5mkEJLESdP16uOaW9fANHonbJ3MwQeBWaJRTLCO4ByZgiI3lEXGznNWKGO3tyi7fOzMzCwFm/b/kgfAkxgOh3z+c5/jpptu4vrrP912FJMHwFVPeQqT8ZgrHnUFSmne9973Mp/P+YHHPpalpSU+8dd/zd1338NHPvZRfvq5z+MlP//zPOmqJ1PN51z/6esFY3COwWDIZDphOBi2vLDxeBzxsobhcIH3XftfeeGLf5bPfe7z7N27l/MOHJBy1zt+8Ad+iD9+3/t42StewQ1/ewPnHTyPR1/xKIqi5JGPvJj//J9/j595/gv45je/xZve9Abed+27eOZzrmY0HLYn/6UvfQmvec2v8PKXv5KLL76Y8XiDRz/qCt537Xt4yS+8jK985Wu85CX/lD/90/fzEz/x1Fbtw4fApZdewo4dO2KGkdJ82RVdYyMh+Cz0hnZNqriReqnBQ7fBtOSn+Dg/BuJ777mH93/ow7z33e/i7rvu5OJLLwUCF15yCXfcdhvPePazedpP/RR/8Hvv4Nxzz2V5aTma8Z4ZPL4Xp+z/3ePswaxbXmd2F/vP627CcNbj+D9laAoVicPx1AXBGuU/FCo4gg3gHIMiZzgsmM+nPPTwEU6cPEbjG5a3LbJjxworKyvkZcZ8PmY6PY3JMlxwbGxMWFhcYjhcFAULdORwOVQ0LWmJwWe5zv3ftWB+LEkT+9+5NEQdWxbpvKmukZO8MvvCBQpxNvexwkpvIRuo8PcybcQd3jvhmGlNMEa8SJuafQf2t8GsxYzjfzdN096L6Zg7lVzdCgroOo4NZp3bORAliixVXTGbTAUfLHJhAFgpo3NtCFqcwHwM/Pkgi/OnomZczaeMx+vRU1YeZ6pmtKBF97MPfOADnDx5khAC1113HT/9vOfxqU9d31IviqLg3//2b3PyxAlCCHzzppvYvXsPz3rWs3jE5Ze3Jp8f/NCHeP7zn89HP/YxnvPc5/Cxj13HAw8cQmvNhz74p7z2tf9CWtDBx26ltNslegu/TOtOfeHiiy7k619f5PDhw5w4cby9AX7hJT/Pxz52HZ/73Odw3nPfvfdx5MGHRPjwuc/h81/4Il//+v8my3Le+Z//kHvvvo29e/cyGY+ZbKwRQuB//I/P8sUvfhGF4o47bkehePGLX8Bf/fUn+MpXvor3nj/5kw/w+tf/Fo+8/BHccuttYriqYOfuAyRTlj7u1d60hGhzFsvUeLKTm3harMmkRfBy35b1/dt5NBKVW6U0l172CHZEP4SUbYyGQ8bjDfbu28vFl17KeEOUZkcLCyJ3zP85UG0NQFu5Y+nm7ALS5rK0/3cqR84WrLoS98zX9z/3+wW0EESZQWuhgECf35U6sYoiammdOnWKk6dOsL5+mkE54MJ9BzlwwT5Or60ymayjZ4rhaEhR5nEuOHDeeftxzjMezzg+n6N1xtLSthgEK7579CiOTm+uT07uZ2HpHPRvdjn+BmtVL5MNbfmm1OZOb/88pmZQtyn2/lYhjmtF2hOiBpMZmZ5R3mCVwjrp/KdOej8LCyHEaZzFNhC3M8wxiC2OBuSZYT6Xe1Q05jrlkJSdJh06rTVlUbK4sEhVVcxmFXUVIJOuP97F5uKQTMsgWqLjoALOd9jvWdyZdP98AHD82PGYsCmOHTvG5Y98JH1IbDwe8/DDD9M1vhX79+8D4Otf+1r7PsYYbr3tNkII7Nq1i1tjAAA4evRhOaBMuh15lmOtoxyUTKYTMpMRokrCeDzmZS//RV71ql/k9a9/PXfffTe/8brf5MYbb0Ipxf59+1qD4MQ9a5oGpTW7du3i6MNHUVra9Rsb68xmM3bt3MHG2uk2mNxzzz2Cj/boJ/v27eOJT/wxnvvcZ7ffqSgK9uzZw6233ioGwv0FplIjRTpsBNksQqSXy2IBkC7lpqy4fwUithV65WbiWM2mUwbDIbapuer/Eru+pzz1aaytr7Xp/MLCIu/6L7/PB/74jzkQvRmmkwkBWeBbg8oZGWPv55tKAjaXpKJa4TbhRS24n37g+xwxSAupFRvU4mXw/zszSze0F0kksZLrhvxdzCKcq1lbO83q6kmcs5y7cxe7d++kGBWcPHEKa0WlYtZUTGdThsMBi8tLlGXJbDIjqDh4jVBiZtMZ6xtTjh87Kb4NLk1hdOemw3ezHmnXt+dDmi/9gXyHtWJ0G0Igyw2ZSnPRMtAfQgc5dPdtZPqn06fiJqiFMOKcFcu2THxYS6UhaEqtcb5gY32tLWv7mKb3IuSZRB9sXAu2nuOaCrxlriFEfmffjrB//RLc471veaApQwXItUFHzbYs4p7YhkZ56npOMYySRbkihO9DzVDt3dItpN17drenate553Ls2NG0uuRL2bjbet/iFIcOiwfApZddJs7dQUiT0ggQA5M9u89tF+6uXbsIIZDHIfY0q5nIuw7X7gTWWj7/hS/w6b/5DEVe8Pa3voX/9Pa38ZSffDpaaw4fPswFF5zf7R5OyK/OOh4+epRHP+pRZMbQWMu2bcsMh0OOHz3OdDolDSA7KyCu9zJkrpXmwcOHee+17+Wtb3krgW6oW+gYvuuZpBsxpfdbApP3Hp86e1vS+LQoleoyMK1VnKdT3cKMz7z/vvvw3nPJZY/grjtuB+CSyy7jI3/x5wB89567ueSyy9qezqWXXcbxY8dY31iX41NnL+m2ZmL9P+lnW7EQ7z0+qu22z29fvzkbgc7sJc0gtsD41kX5/+Eh+KppTZjli/vYfLHUdRX162A6nWCtY8eOFfbv34fWiuPHT3D81FGGwwGj0ZCFhW1kmWBWeVZASEa7gboW/C3PM7Qq8K7BWTGi7gbUu8Cc1mNytNqapaY/OpKh5bxFnqWWOUelNE1T0YkAbG40KKVai8h4yds1o2XXYDyeMMwzMQLW4LxFB7EtzEzG+ngD78tW0Tc9mqahrmuWlpba406f2Q3Sp/JYHlvVUgBms1mrHJJ0/qAvVqnJlQKjMZkSF/mmobI102qD3JZkufhcyLmI1/5sC6LthcSF+9KXvpTtK9s5ePAgL7jmGlGdTb8Pm5+bDvjow0e54YYbeNtb39qaI1x55aO56ieehDGGz3zmM1xzzTVccMFBdu3cyctf/jI5IGMYjUbkhZBIQwjCpKcTt9u1cyfPfOZPsby0LIHOe9Y3Ntob6MN/9ue88AUv4KlPeSpaa86/4Hye/vSnUdU1n/rU9Tz1KU/mcY/7IYaDAf/u3/w63/72dzh06H5mkzFrp1eFuuEarBXrteAdTT3nQx/6ID/3T3+OH/7hH0IrWF5a5PnPv1oWGUJWzYzixv/9ZV79ypeL/n2I3VQSjSHJ3nQ7VVqUZVm21JKiKFrNqlTO/9IvvYqvfuOrm14zmYz5u89/nn/+2tcyHAx42jOeyQUXXcQNn/0sIQT+5lPX8+NXXcVjf+iHWFhc5OWvfjWf/uQnJRzGrTwFo36wSQu0331Ku2wqN8qypCzLVkgw3ahZZPF3kEWkw8RAp2OjZ+vPTawKEoWhvTk33Sy6LVX6wpIpCGitqRvLZDrDBU9eFFhvGU83UEaxsLTAydUT3Hb7rayunWLfgb3sOGcHh48c4ebv3MLDDx9nUCyj9YgQSjQD8myEyUYoBihVUg6WGZRLDAZLBJ/jrWE6tZw4uUETdeqKIqMoMrJMU5ZCDs1zg3NN1PGvEPs98X+U8tHFTrBvy89ykGMy6SROJtOYGCRcLY4AppteS+A2OhJ/vcV6S8LdQsTjiixnUORopWjmFfPZNFYScq2H5QCtYD6bMt5Yp6krMqPFmLnIRbfQNrExJbp4RR6dnqJeW65N+8egWq8FDQzygjLLKUyGQbXeDOl3menkp2azGXUlHhvJEGg8HrO6dprVtdNMerJjZ/pmWtehMjEwff7zn+fLf/8lRqMRf/KBD/Cp6z/VLfR0olKCG7xQHwK86tWv5s1vfjPf+Pr/ZnFxkbvvvpu3vf0/EkLg05/+DI95zGP43A03MJvN+dh1H+MF11zT6ocpp7pRJdKgbey8ZBm/9KpX8a4//AMC8O2bv82//Fe/3gbSG2+6kV981av5D6//bT74J+/n1KlTvPF330xZltxyy638q1//N1z7nj9i586d3Hzzt3n5y19BU1cYrdi+vBRVNQbs2L5dgmi8mW+/7VZe92//LW//j/+RCy68kNl0ype+9CX+6rqPysCx0VgvHgAr27dJbZ9Mx4IMUKu4S4YQ2qwFFC960Qt51x/9QXsdHjz0Xf7iL/87v/Kr/yphvqysrHDhhRfSlhVx633jb/973vKOd/Dlb36L48eO8a9/+Zc5Gbuit91yC2974xv5/fe8h8WlJb5www384TvfKa9HEXq76FYs5vs9+jhQP6M0xqBbOfPv3w09G+ajUym75fPSc/pBLv28y+gEFMuKkm2DAdV8xmQ+ZWE0IC8zHjx8mEOHH0BrzZ59+9i5YweD4RBjDNt37AClGE/mKF2QZaUonBYD8qIkKwx5loOCprFYp/A+E6qQLvFBEXyGc4p5VdPQURVS+ZTOWSKSbsXOUgMEJSWhwK6m1/20OJfksdN5SRMAfXgjBfh+ma+iR4MMw9PLtLWOjRExM0XpdDV7o15b1odPlKeIl/Whh36X9owGSEDk5WGTmETyjc3zHFt7yaCDZzAqcVhOr61iCSItZSLlSW+pILYu2j0HdofxetSq94Hbbr2N3/yt3+T66z/djlOId6WooIakJ6ZVm0H0NuT2d8J5SWz4bkC4j7kEJM2lt1C7hdvXLgPVK1HSvCfQBr+UZBdFQRGZ/E1dx/IjLjSlW2JvZgx5ZlgYDciM3qRCISWqFWXM1jpNgOYmjpmIFIlCZVnksakOr0HhYou8hTHSzddei7QYu2sRiIx5rVA+EJwjN5phUZJnGh0S9oYQJUmBMnWvus9Kn7M1gybAJQf282NXPrr9fQpS/YXYL4H62VCfaxScF42v6OfYLzF8mtdtS0wVr1doA7s2RqadlRT7ZwuA7cLdclwg0xVlMcRkORvjdQgyHTKbTzh27CinTp0gBMeuXbvYvXs3JstYX1sn2dN5F5hXDcVggeFoxMJoSDEopIzMYvnqA+PxhNpZ6lo4i0VWYB0cO77K4ZNrbARDTbf5buXWJUB80/r3vYTAQVmUFGUBBOq6am0VlSIGQ1JnKL5Hp9KhdNYq0CqlyDPh3Il+pCPYhmFuGCgFvsEES6EQiCVCRSF6sqbjzaNxcV3X1HXd+myY2KhrDXJCII9wUHroiE8LWd3T1DXLy8sYY1hfF8x6MBiwfft2BuWIWSXjgR5Y2L5MVhZMqxmVt+RlRlZkeNVtau97/18qOGs3s1d5pqAUM6Q0ShSixIzplQBKKZxKxiSxDZ5EA3130tsbtY3yvQ8PMI8pZSpblIqD6D5seU1HzGxxPhLepNtFYq1DKdELs1F9NjPx5vMWYzIWRwsURQ5BJFlCcCK056WUk+8vi8PaGhHq05QmJzeaJpOxrgA0NhKKiTtgAJSKLs2x/Z5uah13TNXNMiYhxdRap9+9TNlcbLmHGDb7Hcktp7P9ryR2eUbGpeiG9nubR7/c3Aq692cKU8mZAmYIARdcBPp7wSb04vbmj5fvHTc4D1jnCVuevBXDS8ebAiYQCckZvmk4dWqVLDdoA/c/cB9Hjz7Egf17ecTlj0Ipxfr6RnQ6kg12Op9TliMGoxEmlwmAYrhAOYh+AlrhlMLhaILHug7jb3ygqhoms5logQ2WZGi7973SfyeopL+Zb7LtU4KL+dAp0W4u+2PGE7GpBAEkXloIAa26QNZuTj3oSOlEwAYT12YIwufy3pFluktE0qazRXwgnfc8T5MftJhpYvSnTa9v9JzmTfuGNimzS94GWhtMLvejiybMWZFTVY7prEJb166PPmJ4pmx2lrUnL5V84jpEi480TdMKLirVfem2m9LeL/FW691AfXLgpkd8YZ5lPPzQg2f+Pj7+6F3v5i1vfdumG6/L1uRWb5nk8QSnIOY8Fgk8AAAgAElEQVRD1MEiDkVnGcOyZFCKGqbI90AgzpYSgzIyQI2mJRZufeRGRry8d+14ViDgbSy7VXfR26/rJZB0RVqblMrsIyk7Ue37bXoSxPS3/3fvvfrXo3/Kt/w7lb0p84IzSwXaYzlzU+oEAzqF2/7HpRsxPb//2v7v+02Qs722/7o+htdZtwXW1sdY76mtZePUGmtrp3Desv+8A5x3/j5MYTi1usrq6VWyrGBl+w5CEE9RjMaUkjUFE+TuMAplZGg/lX+mMDKorsEHjfMiaDivZ9S2wtuSJnYaOzKx3nT++lnZphveCA8sBOncpWuhlNrETzRGbnofm1QJZ0tlZLeOEMKsCoQIe2ilCUFG+jJN7GhHSevQneP+fbo1sPYrqhSs+z/vm0P3x5XSe6YqT2vdcsWapqFuGorhgLzMcU3NdD6FTDPatoTVitXjx/BVUlzZ3HU/I5hlWeSzIBfx0Vc+OnYWC+no5AWz2YwqShT36/T25kuF1tbdVcghZyxm+SVtENyz74AMkpts0wnqX9ju9RJUQ8yCQrypda975mJ9r2O56qwlN4bF0ZDhcCBD4nWNDNjLeyrY5BKUMkMV9dZShup7EkQyOJ5F2ofI+ljrZFYwJOWGPlMsbLrxVfp9SOTR9L16VIf0akUrKUMMfG3208NKNudqvUcvoA0ihiMfv5lDtjW4dCVdF3jawKfUWYPY1vfeiq20nxGvW/L+Sp/f11pLHbDUWUsBzVpLYy3z2jKdV8zrio2NNfJcc8GF53Pw4D58cDxw+DDj8VgaCBo2put4BwRNVgq52WSKoDweizI5eZHJRkYguEAxLDG5x1iZ5ZzNaypf4YKVTbGffdNtuP3zufW799e0ZDQdKRaSJ6acS+tqdMg7vKtXIaWLmzZ2YtIRQiDTCnRs9FhhCHilySBCNdKc2QorpM2jDzMA7SbSNxdK/hnpmPpquimD77MSkgpK+ndjLUEripHgYvPZHJVnDFkCo7EuMK9qPPG9e6nZGcGsKAsGwwF1VKoIESyUg/cYE+tyaG+4eBV69026U7rMrM3cQoiKHFs5Q1KmJOlgpRRZ1n3xrYt6KzCZjjUEJMjqLmMTeRnaDpqc2DgiomL56S15luHxHf4VF0vwoQ0ggm9IB0poAKqlffggQUvRyYxrpdFaROV8aFln7VlqA1Es01R7X8foHnpnU3VaZGc80oYSIClmtOf37LUnBNi2uMAjDp4nn7nlpjvbUHSfJClrojOz1XHzkc7umZtWv2zt39Ttbu8DIZbeSYKp796dFnzCbc4IZo2lso5xNWde1+zYsZ2LLjqfXbt2MG9qxpMNateQl4IBWefYWJsAmsXFbeRFQTkoKUeD2MgQY1qTK9BEXwmHMtGNXWvxbnAVtZtjsSgT12zo2POtyXHaXDe5RalNY0KyPjfLIKm4Q6UAmEo0YzZnJmmus1WibS9dt5lorciUigIR4m4UdLofBaPtbzjpGvXxs3SsKVClYCbYaAyCsXx11mFrkddWcZ4ymCxuDFK1KKXauVGREBOjEpFlU9TOsj7ewALlaMhs3eICWK+w7v8gm33unl3UVVwsjWBBRS68kxBEv2EwGMWuikzGG62Z1fO2JayUGIIkop3WhjzatCl682ptvS0DsVmWUTXC6hU37rKtswFm8zmFMRRlSRZPamPFKMF5j8kNjXWoqDwKUM3m4B0Dk6GdZWlYsntlhe1LI4KtsNWcstAsLS1gCiNDr84JxcAYuUBeBsq9D3EOrW5rfucc8+mcqq6YzOeYrGA0WqQcjtC6YF47JtM5k1mDDUoMJ3SG8woXJA/UWSZWe8GhARdcvFCOTBvRofKObYMBO7ctMSwysDUKL5I3QRamD6BV9I6MgTgp5XYJoQT+LDPs3XEOg1Lm3ERT6MwA1P43tIE3yTr7NPsXpMsmoSwQ6/U2Y06tD1G6FY8Ch9jbScngkRRJtyYywgifMZ/PmScSppPdu64bqrppy0xxERcPUx88e/fu4YorrmD/eXuZzsccPbaK947l5W1C36gbvA8sjJYpigFlOWJxYZHR4hJZkeNDuv45xmgCHu8sTV3hGg+q085rXMO8qbDeEVTneNUvyVIwkXNgNmU6LebsRKU4hZ9Ma9mEoaf2IlJL3jiCk9+rGAhUkOZJsA0+62V7Ed/GpBliwYJdJN6mzF/3gtnWMrPPFdNat82+vt8tIZzxXVtNtjYB6hKUtL5kQxTJ8DzX+BjQlDGMRgMqF5hMJqg8F8n78RQ8eK3QvebfmbOZsRIsRyXFsJAXeY/yihBRt0ExRGNkFyuiHnpQLC4tM7EzvN6ciaWLFIK0pTOVkfwX8zxnMBgQHEwjuTYfDDCp3FWKbSvbyZTm1KlTLCwuCmi5JTPzEaupbMMgN9BykALDIicPigGavLFcftFF7FwcsTgocM2M9fXTbNu2wLYd2whaMiWtVOS3yUIhcsa8s2xsbDCfT+OF0VhnRaol3qzHT5xgOp0zzD1LS0OGowVs0KxuTKisYn1aszFv8LogmAKHQZmccjBosQvxj/TgLNp7jLcY59i1bZld25dYGQ0pNITIvFZKrp0yojumjSZ5ekJARVXfhCNKJpWwHCdBUBucV0znc6qZuEmVpcaFRrTU4myibSwBj8lEgkVnYrjcuIqghU/kvIs8KNXu3HVT4+s5xki3raprbGMxmZEOndcEJ2ois6rCWs+8mjObz7HBUTUCss+qOXXTYL2A2D4E5lXFdDYhD56LDuzhB668jPPO28t4NuXoww+xPt6gKEqUdtEZXAYVFkZD8myECoZqZjG5Jx+WZEoyXO/ANeLwpGxA20AWoKobyMSAZzKdM6ssXmdgDD7qzhF5cwSBNlzMwkUePG4OsRIxWgJRE8Q7UmvQmCgUKUPibdPHy4xv40FnBhU0RmWtYOposIBPG0WQDcf5QG0rTPAsDgfozBAay7yeoXLNsDAo56nnc/KyJMuKNhgFF8S0VwFeEZxsmJKMiKx2CAHtldzbwWM95MYwGC6IG7yHxskQO0507BKyncaTJFZYyAwuJFqLEv9OAk55gqsZDXPq2sr79ALuWfTMJI2djqfSHUKyJjyIC3ghNXAAMzZxziumikZR+SZqKNGWnp18tgCQWWbQykTBQ81GBAqrum6pFYmioY2mmU/ITcZsNmO87uIq6Je1tDdq1dTiRp1lKKNk0wkBHSD3kPnAsMw5mufYusJ7izaKwakB/p4HyMpc0m2VBCXT58hupoIXnpGtYzYYMYG4Y+V5zmwqLlD61AylTmCynKwocB5mVUPtArULNA4ar8R3MdE4WmKtNAHwXnTVnAPvOHV6iYdODBkWhdiyOUtw4vspC9dB1nlLbioXEE15j9BZ8iLjwr17WFoYiPSNzqgqy8b6hHrekBdC9hSZshB1q+T9mjKSelVUR4gYoonnzifXH2REDa2oY8koXhKiZVVVYlg8GAwoywG2cYw3xlTzGuc987piVlW4IKB+5SxeaaoQYtA2jDfGjGczRsMhl52/n8dcdiELg5KHHjrE6Y0NtMnYsbKDtbUNjq+fZGl5G96Bd4G6tjgXKIuCpcVlFpaXcYJJiAVfFBY12pMrkceuvZMGjROFh6py1I3H+mQo0tFN+glx2uBTmdjPgHwq71PjCQNGTFICSHe4/z4Jk0tYbsRJdfQsTaNvLU6bGl+ozoIwOBwyYSBD+HTH3kMoQg+DVTHAQWTcW4/1opbhrUP5QJbleB+oQyPdyN7saZqzbt+r/SzJ6T3E86vFn7Mo0N7TOAmqTdMQUivZO3BdM+6MYOas4+Sxk7gWWZNOR+LBGN0InyVKAEULjVbDy5EuRsw+W3C6izxa6XYGNAXCVMq5ZI0Vn6O1ws6rVtXWRpmR4LektEr+bqyV99aqxxWTwKADFNpw6OjDGJXazWLWYLJMPCSTEm46wV3l1ZVD7ffrOlEpmCX/zP7FC4FY9hlCRJScD9TOYa2n1X6KUtYK1V7wNiv00mGdVxNOromzjY54YER4E2wuGZra3D0LcRGq2KhJ1I/b7z3E4x5xIYvDIcbkNI1nOplTzUSNNcukQys3oKIoSjFrKQvyQgJ/aoh4F7WnlEglW5dck3KZW40mz0YbrBP4IeEwqIbGyvtM53PqqmmD2byuCArqaIbhrKRVLjTYSpj021e2cXD/fh558QWcs2OJU8ePsTEZA5IB1tMZRmfs3LmTvBgQAtha5NOTAmxX+niUEsA9M9E4Q+loKGLwzuJjJ62pLfN5Q117fDDtBtsvz/uTC92a6O6HTY2WuBGcrYvbNlpicNi6WW3SFhMZYhEKTQ0CpTFKsh/tRS1Y1i84w6ZOZsK0N9E7lGrxcxUbWQlPA3m90gqTieFvGnM0Wcd9tNaSt6TfbnBegqWPDQgJXHlWUI5GVLUF63Chxs6qTlrIeXqjmWcGs/lsLmqysRtIxHRaMDp11trsLvYu4xbUuSnLzRLa5KYHCKegENh0wpQWImhIQuTpggWPa9uwKr0DbTdTUDgpd7Xe9NoEpacLZTIxG1GoNo1NZEOzafGknagLZSruahA7d8mwQqfg2ge0E/u5k3UJzpEXZRtQMq8JKmC9AwymvRfkXCfEI6TTodKi8DED2HSo9J6ID1EMMm48LR9LyflzbTMlcOs997P/nJWI2RmqylLNm3hTi6t6Y4XeMhwMKAclWSY3u2DHnfGNidtbGp0BMVvO8hxPYGFhgaJQbIw3aGpxhR+WBV4p8U9thGJR2ybiURbrxT44gf9KafIsYzKdMq3mrKyscNHFF7F/7x4yAw89eIT5fEoWu+/NRMQGt6/sYHFpO+vrG5RFQS5jxHjvmUwmEBTkmuHSApmWUaQ802QGdADX44YBWBck064dzhFVi6Xs6wer1DQ5U7NsMwdLxWrAbWm89PleW//db6qk3/UDXNvpVOJCruIgu0qbnPeCXTqBitrjVWHTe246xvhdDJ0cvdw/IqUf2nuveySJdtOqDacYQftvpXQnphACeV4yKkc09QQFFKZgHKYQCcmtekZ8nKUB0L87+o/4RUNvTCLeOABb762QDjKkONfr5KVNKJIAFYrnPuc5vP3tb+NRj76yM5gAAbCdaxfdlqOV94wxLgRYWlrkrjvuxBhDVVfsP3h+e1RdshPpI0q4YWKPE486puObFpAiSvh0gK7zvrWbT1lOCJJib5KNVhlK+fY1EKQjFiDP42BuE0mTIQJfXUhrN4U0jtIH6H13wVrwNX037/vieD0KSXqt7455bWNMoQKZyXEWmlh69XHDdNO46ZzJbE7quKmYBbaEzvQNfOf2nmY4pQOXxcxUeH5aZzEjqxlPZzQxy62qSppHTUNVVwRou5Z5XhC8JTOKnSvbOf/geZy/bx+DsmD91AlmG2ssLizgfKCazlle3sbepW1UdcPx48ep6oaiLDFIMyLTmcApxE41HpMFslyysiyW862YAFLl1LHEbGpPIEPpXHDl0BZ4Z330R5z6pF8gOq9vvv+2Ujy2/kmPNoNKWVkfwI/rSAd/Rjdd1gqYNLzuPSiL1uLw5HVoEwIBRZTMLAMhOIKXTqfg7V3WZeK4UdMIITbTCpOVsbzsmAlpvCqVw5nJcHiKYkCRD3BuPVJAchQZAsF68Aqj8/Y7nqma8b2uQK+G7p+8FKZUek5fg6v9rxTIuofMWJrY8YxfppfmpjIvAdaJ2Nm9q4q69MQFJscwm87Yd94BnvhjP8Zf/Nmf40MyPpWD905KSU1ijDtMJlhV3UjXNfSOpd1E2p1EftCWl4DRQmCUXSujbuouoMUMMAC4OFkQAlpnZEaTx+ymrhucd3Gn6QIaKo09dQPq3SB2aBfFC1/0Yl75S7/E0598VZshulYzPpWZIQb9zR3K2jqmlcUYhbdRBSVOcCjvN104rSUDS/LYUopmkqlpjQndzpuMXAyaQueoPGPuPM28Is9zijwnKMXGdEpVVZItIgu/amqCDzS2obYWRcdrahoZt9u1cycXXXgh5557rtj/WUtZFOiFJbI8I6YbMWBaprMZjbUUZYmic+lKZPCF0YjBsBQlDKMpMkNmFIRk9SZgPEEGqqvKMZ/XNA4UGVrlUXqo2/T65WKfI5nWefpO7USA99Ig2EIyTl3A7xXM+iVnSjQ2Z0ihDWiZFtd77XR7f3n5WgQShCQjaS3eprrayvUkfXyEP1I31oeAKQfppd3m71yrpyad7t7x0pG10ZpMCUVqUI4oigF1LZ3qPBtIsNM5QXtptvQSn7OMM5lUmLVZVT+AdGhmukXi34o4FkEb2BQIU5ogNJIUCiJ4r5VgECnzaN8rfVra4EJXx/dr9lbjvY2C9DwG0898WyYLbUHKQxXLVx+iGzUdDyjmYHI0gfYgusCaspEYFJRGRc0sH0KizHQBhAhfoMQVWwpWAe415JmB4JlXtUj99AZ92y8Wj8W6OMKsaBdSCF46hSEwm1ftIhYwOZ1OFcvGns5+/AgXFI1HGjdKo7McFeJITfS1bEuJmKnJHJ6PUjcBlNwsliCcIqUISgT2lM4wRUFRlm2GqgP4xEZPHkrKYL1lbhuaiLd5Asro6GAO3osp9eLCAvv27GbvubuiksIGwQfK3BBMTlMJk3xUlqyNx6w+fIy8LFnevp08K7BWXMUWhyO2LW+XETqjZbZUK4yBLNcRW6XFDW0cc6sbGa2ZzxusA0xGIBOxQNVNpPQDTl+ZJAWDfrDSOkq3h80KJnKPd5JCW8vWjt7g29+FOLHQ3b+CT6X30jGAOLqg5FMAJEAQEF4pTTcKFWKFZDdxPk3yVg+iZzfMF5jOa6zzwl7QCpPiQRxVk7UpeG9Sw1VGQdDxGA3D4YjBYIiznrq2DEeBxKhINUx/czhDAkgpsykLu+Xb3+FNb3wTt91yK/fdey9veP1/iGC/POfHn/hEjjxwiBde8wJu+c53eOjIEd74O7+DQbGyssJ73v0u7rrzLu684w5+63Wv6waMveM3X/cb3HbLd7j5H27k8Y//kXiQ/YXQ4V/peIhl8Lbt2/jDP/gD/vEfb+PQAw/w0Y98hLzIezd42PyieHMfvOB8/uTDH+JrN/8D37rjdv74Qx9iz969Av5nGdu3r/Cu976Pr970Tb7yjZt4/4c+HAOAcKie99NX89kv/D03fecfueF/fZXn/vTVOOeoq4r5vKJuaj77+b/jF17xynZUSYaA48UjSgGFEA01aoySgPaMZzyDz37ub7nzu3fztW98jec899kEb9m7dzd/+dGPctudd3DPA/fz5x/9CHv27aVuai6/4pHcd+Qhfu+//AFXPOpRHDp6lMNHj3L1858fS+HAi37u57j51lt56PgJbr71Vq554Qt6i1xEHx1gfaCqLfPGSnMiBBwKTIYuCkxR4JSi9p4mgEO6cE4pbJAMT0xxAw0KBzLPqDXoDJ3llMMRRVEyr2omsylKK5aWllhcXEJpTd1Ih7Nqamor3TAfIvaqBX/btWsnl156Mfv27sFay4njRzl5/BinV09yevUUk+mUylrmdcN0Psd5H/mAnsl4wmwmRsxKKwajIUvLS4xGoy5QqNDegBrQ8Wa3TjIz7wK28VRVQ1U1OBfkvgli6JMglq2jYCmb7+NcfZXWoigks9zyuvZm7WVsKQBuDZb9Py2LoF/R9DCvM1U7fOyg94Jst0riJr4lgCgZsyqKvJ2YybKsndTw3rbfT4H4MMTPStMC8tkuBrYQHakUZZZHG0eFt04oILGrKcmNiFmmx1kxM+8iThMP+ulPexpPevJVjEYjbviff8s3v/lNPvM3fyP4GYHRaMSznvlT/PiPP5HZZMJjrnwMRiuuvfa9zGczfuixj2UwHPKJT36Se+65h4985KNcffXV/Pw//Tme9rSnM53N+MQn/iqdtR6RtpPMabGhiN+899prqeuGJz7xiWyMxzzn2c/eDFLG8+1DlAdWUtYOh0P++rqP8yu/9M9ompq3v/OdvOEtb+FfvPIXCd7zsle+kuFwxFOe+GO44HnCE57QpvjD4YC3vuOd/Opr/hlf/fKXWFlZYXFxiaZpWnng2bzi4ksuZceOHRgj2l6SnSUf0aaNsQk78N7xgz/4OK79b+/n1a94OV/43Bc477zzuPjSS/HOUeQFH/3vf8nLXvLz1HXFH737Pbzjne/k+T/90/zDt/6BXTtW+Plf+AV+5Vd/jSc8/kfagA+K0XDEte97H9c8//n87f/8Ww6ef5Dd556LMVksQ6XEQBlqa9vRHqskCxOgP5MArCDUFT54jJIZXpNlFINSTIm9j6W77Oi5yVhaWmRhNJRS3hiCAhs8OsuwwXF6fT2WkJbGNsyrCo9mcXk71WzG2unTlEVOXc2p5jO2LS2zf/8+HvvYx7J68iT33Xsvy0uL7N+7m4cfPsrRo0fZvn0bg8EIMNS1ZV7VaJ2xsrJCAI48dITFpWUuPH8fF190EcNywMb6BpPJGK0lY7GR3V7EQWgTx9iaytI0Qs85vbrGfN4wHC4yswHb1AyGIyo734QJ9QNRCh792df0b2ttS2vZfE+G9n5I/06zjykrSwx9iNI6kbtnipyAzD3OpmPqeo4qCjLlyYNwDZWNYqgmp8g0rfVECrohYGPzKMsyXCPUrDRO1yfFZlpj6znB2S4PCR68JXWJpdmnMK6TEEuYdsKrp9Mp6+vrHDj/IHUlKhuT6UTeKyhpFKrQNuTOGsx6FVsblT/wQfEAOHXqFNd9/Dqe97zn8Zm/+Uyb6iUPgLXTp8m14rZbvs2B/Qd45jOfyZVXXMFsOmEyHvPnH/4w1/yTa/jYRz7Gc57zbK677uM8cOgBlNJ88IMf4rWvec3mqJqwqwRhBUHGdu/ezbOe9SwuvfQyTp1aJRD4+Mf/inCW9LzNzOKXufeee3jw/vvJ4g53/V//FW95x+91XQtgcXGRA+cd5O677uTv/+5/kQx8VZxoOHDeQYzJOHzoENoYFheX8MHT1A2EwP5zd7SYkHOCbRlj8E1DWRb4KOMdgsdb+dAXvPhn+cwnP8HXvvxltILvfvce7r/vXsqy5IH77+Puu+9smwt/+Rd/wbuvvZbUx22t7UhBLD4ULXn2oosuYmlpicOHDvHggw92u7rWoBVBK0xeoAwoJ4VwFm+ILBfOmrMN2hjKLHWB064qp85keUvyXFwYUhYlw9ECWS7KE3kROYoKyoFMkzR1ja3FNWlQlljnmcxOU6+toUBwtSBLfvv2bTzyEZdz+SMewcb6GkeOHGY6G6MR/t+OHSvs3r2bEBTDhUWU0WxMJzQBGtswnkwohwP27d+Ps47V1VXuvOsuVpa3sbiwwPLyElqDx5EpIxhPIHaD4/oIgaax4msxmVA7Q17KDV01HpWflbrZW9abx5m2MuulS9g9t1+SpsdWPO2MUhU5nnk1h6aWhkVUxNBKhB7RgTyWmjrLyILIYGWZwTVdMO23MtLxpNEyHWj9cpUSw+Gg+iIFMWtMOHrC0NJQPL69ZxMLQqHwjaXMC+qqYjQYAjJ0X88r6mpOHiWQlFGbSsuzuDOlllmHXh07dhyFtHdPHD/BIx95BfTq9vF4zLGjD5NpQ55pyizjgvMPAvClr361zaq0MfzjbbdhtGLXrl3cduttsXMCx44epQ/SSQxLsH6SvJEjO7B/P/P5nGPHjkVcTbVHm0iHm9LkAMn7cvvKCr/75t/lR5/wBEYLC8Jfi7rwAfiT//uPGQwG/P4f/RErO1a4/hOf5D+97a3gpUT51df8c37hZS/nV/7lr3Pfvd/lTa//bb7z7W8JYbYoUCZjoFTE7TwmMwJ2Ooc2cYzHe4JS0tmLu/LuPXu48RvfkPlQ76mquXQUtWb7jhXe8MY38sSfeBILCwutkWq6GWL6ugXwle89Ho958QtfyGt/+Zf5nTe9ibvvuot//eu/zo033ihUGGi/e17kBCc3syIpmEZpHu+wMVvPMjHBsK5hPp9hG08gp8hLTJ4RlEcZg8lkSqQocpy10ZxXsor5RNRK8zxjNBzgvYvvJSKZ86qO5UvBdDpheWmRyy69hL17drO6eopTJ0+iteaSSy4hM5r19TVqV1Nby4kTqxTlgMFwxOK2Jc4/eAGjJQluVV1T1ZXY0cUh5+FgCFqzevo03lv27D4XlOCutnG4xlLPK1xtxe+0aVjf2GA6naGKRbnB2zLUoU0fae4CwVa8LD36AY0QWomq/mv7gS2RULf+PN2PCTN2taPxLjachGaS5zk+bh5aaxl9Cxkmgf5ApgG8OE2hUUFk4RWgInFcx2ZCLHA3VU/Bm/+HsneNtSxN6/t+7/uu676eW51Tl67q6e65z+AI+5sNJM7Fl0TCE5CiJFKYyB/CWIrk5EMiwHIi2YAJBieKiaLIcSLbkQFZVlAkCwNjDwMDxmB7mAEMw/T0dFd3XU+dc/Z93d5LPjzvWmdXVfdE2a1SdVWds8/ea6/1rOf5P//LoIzTEeLRcWGnFUJ6De46NUvH2TwIfm6tYzSdstmsKYocoxVNdOZtmmrPUrwvgvJ4eQFgTOyIrg/m2dnZcMO/efMm50+fDsXNKI2zFo0avJFUCDx++ABrLX/0k59gs90OzSRKLo5n5+fyvD4QlOP09IYUn+d/9P51Ofzfew8eUhQFN2/e5PHjx1wD5PHgBpHEmEjckpNEju4P/qUfYjKd8j3//n/Aer3k3/p3/l1+5K//9WHZsNmu+fG/9mP8+F/7MT784Q/z937mZ/j1X/sSX/qVXwE8v/6lL/Frv/qraK34b37wh/jLf+Wv8h9/z5+LXmVygjRNOzgc7HYVWimyLKVru+sNqDFDCnuwjgcPHnD7zh2sE+FuURaiSax3/OgP/Q9MZzP+5Hf8cdabLf/en/rT/M//y0/JODTkL8QjNGxir50TPv9Ln+fzv/R50jThx378x/nJv/E3+De/67uGrZeKSxqtIzbhfcxm6HEPF59PQjDS1KCNQvnncZuAp8hzTCLyIq2El61/e8oAACAASURBVJemCQTPdrNGEUgTzW63wdmO8XhEnhq869hu1lRNS2LiptOL7lEpuHXrJh//2MewXcu7776DtY1YOzcV43HJaFzSdR1ZWvLGGydstjuquubZxSXvPXokG1GjmR8ccHJyIoRd7ynLkhvHJ5wcHzM9O0PhB9zGBpEgdU3NbrujqhtsJ0sAGYdl4SDnrEcnUsB788+XzuEXOqieE9bf0F7kdV3z996/OO6PnfvPaTu3Z1L68qLBRIMEIyIDlBeMSjaOe0G+sbjCdb8fQhhuuPv/5iPE4PsFEIqe8hm4NmiU92WHBVV80uFnqTjjJknCcrFAKSjLEl9tCFlCkkpAsjZgVIrSe6ThFw+SHoz2+gkWPvt938fx4RF3X7nLZz7zGf7xz/+8tMPq2uddE1s+5wjO8uThA37lC/+U//6Hf5iD+YzEaD796U/znd/xHaRG849//uf53u/9Hl69d5fj4yM++9nPXr8I1S8Y9n7fe41PnjzmF37hF/iJn/gJjo5kpPvu7/5uyrIcWtlvfONNiqLgj/2xP7q3jRGwebPesF6vOTo65vv+/J8ffibAd3znd/Gh115DKUUX3Qd2uwqlFPODA/70n/2zjCcTnBNVxHa7FdF1gLZt2Ww2/PKXfp3/7LP/OW3bDsTctm2HcdAYcekVO24Zp3/2p/8+n/ne7+VPfOd3obXi9ddf50/9mT9D09RDbujV1RUHB3O+/y98DhA2fpplJEnCkydPuXfvHkeHhyLBiXfpk5MTPvMffobxZDxcAKv1ejgJe4xCxzurnKRy9w3OYtuGrq0hOIo8YzIek+fZ4IpQZBnTyYTJaESeZfR+9s7ZGCbjItFVSLy77Zb1akWepcxnExKjqXYb6u1GOqUYQIMLNLuKYB23Ts944/XXydKM1XolORGTMcoYLpdXXFxdSuHxjmcXz3jvwQOurpZY6/EO1qst6/WWyWTG4dExi+WS5WqFD4HZbMZsNiNNM4q8YDyeyoY9GLwDGzdpVdXSNh2dFY2oc44kTUnSBOckeDpJ9HNC7evT+RrMf7HjerFA9ePmB3HKngfs3z+suVfJ9PK63ju/vzmF/hpj/2Z0PQYOQc4x/yLEG5mYggrxVehI0YJoryAJbaelZ/Nf31h7I0j5WoGMREEqDG9ZAgTvJWglWOp6R9NUHBzOGE9KxuOSg9mULJd8hSw15Om1VOqlYtaf5fs0jM//k3/CF7/4Rb7whS/wsz/zs/z8P/pHwwtT8cAYFTlfcUsRvOe/+gufI00Svvgb/5yvvf0O/9Pf/CmmsylaKz7/C7/Az/z0T/P5L/xTfvmLX+SXfvEXB9znJexnr9j0Hd5/8f3fz9XVJb/2a1/irbe+wfd932eHu5sCLi6e8YM/+AP83P/9czx7dsFnPvMZjNb8jz/5k7z2+mv8xld+m//9//p7/PqXflXa4fihvnLvHv/b3/7b/NaXv8z/+Xf/Dv/H3/pb/PaXv0xvgf0f/Sf/KZ//5V/hN/7ll/n4Jz7JX/3v/jJtK6ZyIQSyNOVDr73G8ckJ0+l0sO0ejyeIg2ZCkqWDxEdkV4av/vZv819+7nP8tz/0l/hXv/f7/P1/8A+j9CblR3/4r/D662/w1rsP+Ic/9//whS98AYCyLCiLgjRN+dKv/gq/9Iu/yJe/8hX+8M03+e7v/nOAOFl8/+c+xx+++SbvvPcen/r0p/mv/+JffO5Y9u4fhBAT5eO2yyhMomImQsl4PKIoshhc0WGtGOzleUpepCRG0zUNNi5EZrPpkIIUvKMoMso4NkiHl5CnhjxNyPKUPBO+GlGxYIzhxo0bfOpTn+LWzVtcXDzjwYP32G43dLZjOpOwXpPI+Ng66aSapmGxXLDarEnSlJMbN7hxekpZlsxmcz72sY9xdnZGludY56jqCms7xuMxR4dHpCbDqASjEpQSQLw3fxxoMN5htLhpdFZcXrRROG9fKlr7haoH+nvAfr8zG7aQLxSnFx8vdmv7m1Jr7UAhCiEMHv0mJlY18fOx0eq9x6x6QnO/WOi1viH0Z0k/9eyRXfvv03rA23oIRKakeIMcuKeCn+nYue0tRa/fL44sS+jqhjzPaZuK48MDRqMRozLn6OiI6XTMZDJiPC4pR9chwC9lANz9yMfC5ZPHEYCHf/07v8MP/MAPSII5kGdZ3E70b1DR1z1NwOBFN2j6iPkQ1ZpS+n0gXsTRVhcGPaOo7aNU6bkatjd79oB+BNAUveuknGhaMXBZ+u4neElWLvOccVmSGhOdNxRJmmBSkTiJ5bW8E/3CCpp+9tcCVA4SpfiynBPvLW2MyIKUinZHIi7ov36QicH18fG9ZEs89L13UgTzjKZuJcbeCw6i4t22l0VZZ2mbli5a4YRwLTqWn8Vwcg8nVSzccndWTCYjjg7mciF00hX17zUvCkZlQVFIF2i7jqresdvtaLuWNDED9th2oq/sHKRpyp1bt9Eq0HUNZZ7T1jWJhsQomroiOEuWGhKtaJqa9WpD3XpaKx3RdDrh9Tde48MfeQPvLO+88zZNW5FlKXW9YzwZYbRiV23J84zDwyNSVbC8qDh/tmDXVBRlQTkZ4RU0bYNKDIeHc7Is4+jwiBs3bjAdj0mThCLLyJMc21q6qsY7oRZsViuurq5o2pbOB86vFrz36JwmJKispLLgTQpGoIT+xrgPyO8Xp54k22Nf+0VMKUVuXgb4Xyxe/fe/H+UjTVM6J+eDSgxJKiz5rq2wbUOmNUWqGRtIlSNxFu0thTbimKz1MIb3xU2ccyJ0sUfd6Ce0/n1Y71hXFXlZUGTZsO30Xuyv+6/3wQ5/r0IYpgmShHR2yGKz48bNU/7Ed/5xLq4u+do3vkFdV4ymE3HXwUN0hf5f/84/UPB+QvMof0Eu6ecuZlmh9x8WUckfosOoihdAz4iX6u6DG7SWSulY2WPx0zGSLM69Qgnxe6UMQlDDmNhjYqFvXYO6lj71jWTodZt7o+neiaX7BYcCFfE7ucMGhG3Vt8AvjgERfFXXciZJWBdhrTFZlP4orBOiqXfSomdpGsMwdPSyioUQuS/0Okmzd0cUukdGWZaEEFhvNnEbJTbmKmYRPOfyapIBh+g3lddMdAY7p+HdxA9Ca02W5XgX8AqCEnfc1Ejk3WgSMxJ8wBspplmWAn4gm6aJRpGQZynbqsV1ElwxHhWkuoAQWK8WpKnh7PQEY6CpduyqHU0tcWJd5/AqoesCWhsODubMZjOq3Y5Hjx6y2Wy4ceMYZ1usTkSGpCA4cF3AdYE0NxyfnXJ8dpPFcsXF4gIXPNPpjKP8iKAUZVkwHo0YlSW+s0KwTVJANn3BWemw9rola61YHymD0Uk8l+VASiRcgfWKutth0hRJ5jLI7bq/5YKLpp4mUTF3NeC93OSDlw+pc3Y/tva5wvbin1/kmCmlmM8P2dUV1lV4J865xihJl/KOVGtSBUrJeOecR7mA1w4XFCY3wzmy/1N7yXSeZ893h0qstqQZkabEcX0N9aakPh6FxGjxXuulVT3+jxyHREHjO45PjkUuWxR03lJ3LdM8IQ2pEHetYx+efHmbadTgaNG/lYBo8Hrb6b4tkigsmaP7mbkP++i3KtcYokIhpMIklTGrrzZ5lsnFXu1IjeKdb77z0svqH3/zp36KH/mRH40Hsedq9T9EaAIixRhGcykeOtDrxoIPIoWKbcvAkTHmOefK6xPn+v+11nSdjBgYQ93UJN7EbAQ7MLBNbO+laF+nMy03G1CSGjUsBmLyk3NWPNoRblrbtJRlKRedd2y2WxEAB0mL8spdA6fxWHitsJ2NhfuagNxr/pQSDpXRmizPYrsvLHy02CY5L2RZBay2GybTCSjFtt7hrNBPxqMRiZb3VuQpk9GIJMmoKkeqOtnsNR2VlwzS4DuKImM+n1KMCqqdQ3Uar6ANHqsUXZB1/WhcMp3OmM0nECzPzs+pdxVGaTarDW1TM51MsbXwvebjIybjMZvNlovzx0wODkmLHKs8+aikLEuOj48ZT8ZcPLvgzT98k/F4zPHRMWdnp5hgqLcNk8mY44O5UFUSBUpzcbGWUckFlErI0hxNTfAK33rZEGpD1zS4YMiTVDzoQgTAgaBkmRIiLqTQSB2xkjOpDSZReBuwLrqyqh4Uj6e67pcD16XFuvb6sk0UBsH6msbivSHROT5+vY87MJ0aXNdhg8c4hVGBJIgPmbKydd+5CpMmpGkyqDoCgdZ1dFXHJOKvYe+ciqMZXml0loI2WB3fB5rgLV4ptFFUrl8o6cGgk+BJgiEEJcdSgS4y3nznPuvtirprGc2nNLbD4rA4gnL4vQXAy9vM4ROQP3/6j3wbOl7oJq4alepXrtcHtv89BD2MiT4IP0dkRBpUnKuTNFosy/q/KGRkSrKE9W7LK6++EvlZUnh66VLPaXrxnvE8QVFJx7b/pvoiHIbZlKGSxq5SZFUix7r2+1d7P0+KgvOetu1ACwcqI4+ET+FPJTEQph9vtVYCJEfP+izPSZMEk5hY/KSDI5JUcUQxdsB1ji7pKIqcIs+p62rosrQRYzwhDfphC6W0pkta4UUNR2GfcX7tqnCdMg677U7kLUE2UtODOSBBsBeLK8q6xHtJee87TRvHkdQYQlHKRtV7siSlzEtMorC2oa1rfLCMRgXOOzabDVW1pa4qqrah7SRcw4aAtx2JSTk6OmA2m9I0LdVuR9s0TMcT8iwn1Qm2syQmIVGGpw+f8tg7zm7e4vDwkPP1ityVcRxPaduW86dP2W3HFEXJt33qj0hHqTWpTula8aMLzpMYw2ic0bUJ2/UGVKCqKgCqqiENYpXt7D5E4BCz3YAKkdYSBpHWNV4mznUM2k1kcyfKEAOGGDq2N1Zen6YvwB7PP/obMwq6thuuO0z8nLXohrUXcq8L4EKIkX5DL4BDo4xCpYksA/sRNwSx2wbs3tg75D4ocAos4FWCjdeVUQZlGCIEA9DZOt4qg3j5yzMN76VtOkxe4LXh0eOHXK1XYmhQZFxeXjCbT+gNcoO+vtJfKmZlOaIqRzS73XAsVXxhAoJz3YkRj1iQD6i/doKXD+46Hu7aX8zo6FMWR5iyKBiNZZQyqaHqWowT/Z0fDtp+AXqh3e5/H0ZPBgePoSPc2yYN39QXxvj8SvUraa5dAGJn03/YSinqqiZJE8qiICBC6N7bzWhDXVXCio/R9m3TCFaDkIt7fEPsvjuSuO0EFU9AH2uvikTcVswso1Zts91gow25FD2iCaK8z0T1KUmW3n23JyTKlkyLDXQQ0X2SJBRpKjcqRC8btGIymmBth2s7ukai+rQKewnknqIck6SZuO9uthAqElMQfIgbYHGeSNMU52FX7bCuJc8z2rbG2g5QJGlKqjR5XmCU4fT4Bm+88QZpmvLs/Jy2Ect1tGK92QiZshRSbp7lJGlKvas4f3ZOWCZMj48pokuHszaeZyVaGeq6QnlZ9ydFQVnmHMzmsRgEmrYmz8SQso1by7brKMqSzbbBB2g6j2gE5dd1BxWnk75NHq6gMFxDQ+HZL0Lq+iu10gPWuv/Yly2931Jg/+Gjc/AwWQWFjiJvpSVEWxMGHbMLKcr72P1ZKcxe4XrUJZ5bTkleZeP6vxNdZh9BY4On8yJzszbQhY408vm0klLjcQQVGRPe4ZXv5ZgEpaWjC70dkZEMUCfnatM01E2DWoutNiHIDTY+Xipmk7RgdOce282aYMVsLzWaIklJDJRZBs7iuhYf8QUfnTc1gcPpRCxaMNSNpbYBkxSk4zFpWkCSMj884JVXbnN0ckQRSZVVvePR+WO+8ge/y5OLc3brnSTwNEKPyNIMH+/8wtXyglH5gHXC/wk+RGW+wSH4j9ayOh+lBWdHR7x68xapVhRJwmiUM5mOKUYFJktQBnznWa3W1HUzAPltK0B/lhcYo5lMJxijWa/XbHdblFG0bc2zZ+fcvnkbazuqXRULVyeZAZGe4Kzl+OiIsixZLK7YRa+tJDEkSU61aQZbEx8cOtGkWcJoVDA/OuAb33iLp+dPaTsrI08iwRxaG4piTFmW4jpR19i4oAhxvE7SRAI4uhbnxVJHhcDRbM7x8QmXiwXWSeJQ3z0UeUEai2nXNUzGUwhC6p1OZxidsFxcsVlXJCahKMA7Jd2M8oxGBaNxgdKBtqup6wofpS09kJ2mWUzuMeRpxunZmWyy2jbaq0uY73a3kwIcgfNnz55hjOGNN97g9qdvc37+lEcXz+iAt956izRJODs7E5lZVTEeTTg7PWMyGrFer1mv1xitODk65ujoSDrsZsdms0YrRdO21G2HR7qaoCQ7oHNWCKBJr5XsO63YnqiXC1bf6bNfkPYWMdedjhYrohcwsf8/j57f5YOYBwRvI+H5enpxQYKDe8zWeJGZoQP1tsFYwQZ9cIL9iRcCre3QiG2WVgZtHImW1+w66FzABRWJ144kCWRpRmpihqbv4jYzFloH3itQAWsUyiu8gvVqjbWOm2e3ScuS+w/uUzct5XhMXddSZb1sPz+wmJWJyEfGh8ckSlGmhjLPKJOMPBHxbWhbXFfjmpbgOhSBNILNRZYTULQ20JaKJB8xnh2Sj+eQZJAYPv7JT/Dt3/5vkJc533z7Gzx98ojUGI4ODzg5PqK2FVpJ4HBTWbTSZEmOjVl71loUniQV7KGuFY1q5K4YFFqntN4TtCbNcpxzjBLhSN05u8UkNYyLgum4YDofM5qOScsMnSpSpbm6umKz3qF1gkdR1y1KGYqy5Pbt29RNzWa7luQg29I0Fbu64s7ZCfPplO1my2q5lEi+BnIdyA/GzOdzZpMpk/GYxWKB6iqmmWY6naCUomsdYa5pKkvdNuR5RpIZmq5iNp/wyU98lNs3DvmDr/0h7z14iPMwmc7JsgKiM+94NJYLwrq4KXZsdzuqqqHtOpRJYTQCoGoalNK8cucOJycnOOeomi7CA15iB52kfYu5HxwfH2O0HCPvA9vtjhBgfnCA1obdpsI7xWw2Q+tAZ2t22w1JqinLAue62JHtX+jSBRgNXWdZb9Y8OZdgmdVyRV1JoXTWMpvNGI/HtPFm46zlwYMHVFXFaDTi8OCQre24d/cuu92OuqoZj8acnt4g0Yam2oK3FEXOeHTKfD7j+OgQreHq8opqt2U2KWMAi6Kua0JQXC2WXC2WVI2ldREMNwlBK4LbgzykVuxNB9G+au/99sXs2mqHAZjVWvEyasse7PGtu7IBTY+vQ8XX0JPGFeLYq5y4tzbWYrzgZgbhkFWdmGwa5SQ+j4BONMpD66LwXnmhYxlFqr04TNsQcwk0Nm4/DQGrPMYJu0F4lQ4zFLPeNNTHNDaLN0I89wHy0YiybQheYS2kRUqRa7zyQ4bABxYz3wqzOksSiixlUpZMyoIiS0kVhE4Y2U4DSSJpw1rL3TtJWC63dEHRBcAY8nJKOT0gLUq80mSjkqvFgq/+7u8Qgufd997h6uoc61p2Xc1qtaDa7QYrXh0/bB+tjZUSprKOLpOKPYM771FaUoMEt7rm0DjvqaoKa1tCUgg3JhG7I+ccyjoMGpMEsiyhKDNsJxIVY675NJeXl+S58F02mw0PH11hreXg4JA0S/kX//w3WC6XEAJlWcbOMyNErMgozXaz4cGD92iahuOjA05PT9ntdlzWC4q8RJPQ2oamqdFJSVmMAcWjx49Jk4zZ7IDiYsFms6XtWnFx9Y6usrimY1SWEMSuOqDw1uK6jmqzpShLXOQBTUYj5vMDjuaH5GnGqCxAlI7YSHq1bTdIpow2rFdrkjRhNp3FAmw5Oz3j3r17bNZb3v7m29jOMZmMcL7Dblt8kFCVPmu0H5l6d1rnZFR22sgyKEkYj8copVhcLWLGgGM6kaJfVRVpknDz5k201iyurnjnnXfIipxiPKacTrkbi9njx48lSjAE6qpitVpxeHhAUYi+b7tZ8SSIWcJsOuFwPmG1vKTrWvFS6zqUTlhs1izWa7Z1h0oyWmtldLJecjKRDtjFTkbHrXqPmQ0bmiAFzDk3JIr3GKuOx1i/0K31xexFP7SXHkq+zraNQBem39xHYXfsJJ23eB1vIiEup5QiUbJACoDTogTqnPBJjUrRRjaSbSeYnJaIJFLjBj9KiyJRqbgva6IFmInyJ43SAr/YiPMG4/FKoBWvjLSVwHgyoxhPadqObd2SZEL8bVuHTuRu4V/ogF8qZoWWzWVuAkWiyUzA0OE70XmlgHIOg4ucIU2SphRJhkoz1pUjJUVnChsUXiW0ncXqTgiNzvL2/XfougaTaQJiSBhcx2Kzog4tbSOrfZntox9aHC/7EUPMYa8Z9dLq9tgCSICqorfW9V0Xo8t2lKZPfjKgtAi/rRWOmYc0TSjLnK2voLVCCEwztIaiyCNDHJHDFCUnN05QCr785S/z6NEjzs7O+NCrrwLw6NFDrq4uyVLDfD6nbRsODw55/fXX8d6z3axYLBZUVUVR5tiqQ2lNnmVUdY1zjizPsK3jyeNzDg+PmM0O+NCHPsTV1YJdDAQxWkBbo0Su4qwjBItRhjIvGBcj5tMJbWtZbTbXwchIqvW4LDk7ucHVasNqvQEvG+0yL6RD856ua1ku1xhjmN6dM5mMMSbl4GDO6dktxpMdq+VGOqnEYOsWYxRZXqBUYLNdE4KLtA6BA4jb5+CFqpPlBfPDQ05u3ODy4kKSm4p8uMDFOUEWLSjFbDrl+OSEg8NDtDGsdzvefvtt7r97n7OzU06Oj9mu13zlt79Mmiacnp5S5BlHh4fMplOUinbcTUNwHVkmcqSmaajqGpSitbKgqDuLR4tGM8QtsIp4cSw4IRaxYfjpx0WuMTPicuj6X+RcClqDvnbS2H/O/aL2YjbAiw/vO5QyPUIXqxxyvI0myXN8Z7FBydZaeVwgJngpvDKxuwrYoOUa1HItOgwhGAnycdK9egSGUCoQFHQx8k5HqzAvZUwoTLEZUTpER+Ioi1KANpCII4Y2CF2IlCTJUHGd4gM0uwaMj5m01+/7pWJ242hCCJ5EiaA4MQFva/kAbUORJGjv0cERlEZ5jVEeH51Qs3yCykYENLumpbGWdrMhVDVBwbbaUVVbkjwlCwl1vUUFR56npEnCrqoGtrX2ftAc9h9q77aZRIdT2c6lEUvrCO56ff08X024W9vtjnGSY8fuOnzEAS04EyBVjMsClSp27PDekZqUNN6xZtMZl4tL0sxw48YJ4NlttyyWC95++y1h7acp9+/fF0972zIajZhOxC/r3p1XCCHwzW++hejcDJPJhC4W29XVhjwrMYlE8Hk86/UaHwL5qGC93nBweMidO68wGo158Ogx291O3CyKjFQnFHnOttvS1HWMAnOMpmOmkwmr1RqFcOHqpmG5WPLKnVe4dXbGfDYnffw4+vOL79RkMmE8HlPXFRcXNQcHB9FZNokhLZrLqwXzyysO5nNGkzHgmU7HFFXK1cLhXIvWirIsqesdVVUNGJlWmiSmUiVJKlY1sYO6uLhgtVqRpimj6ZRnT8+xXceoHLHdboUGozSu61iv12hj8LG7mR8cMJ1OmU2n3L1zh499LLBYXNE2LWmWUFUbvGv3Qm0l0zRDCMJ1XdPZDp0mNPUOF2DXdOi0EPxJyySijBh9oiSHQgX3UgF6sZvqpwkA9oiv/RJm/+vfr6h9cDGTzbYxJsYAxoi2SHDHClTgQ6DzntZfx0BqFClSbIIy6Jhj0HkZkxMflwBeYdKCYC0+DsReS1gPUdpU1TUmiQqKfimiNUqlmEzTbnZCwB3el1yzFkgcpBpWyzW7quXs9hmNc3z9rW/QWst0PkEZJ+eO1pEe9gHFTPkWpWTGVsGiXO8B34HvaKqKNDGkBOHPaC2cqLphZ7fUzCiKCSCbyV3dkOYZ3nbsmoqqqvB4EtfSNQBecvGc5DD2DGasxaBI8kzEzlbiwJy17HvOe2uxMdlaRQqGD04SYQg0bSUrYq9o24bHj5+QK8PhbErXWpLUYFIzYHFFkrLb7fBOQOfpRDZxIQSUgaracnRwgHOO1XKJUrDZrHh2/pSz0zNG4xEhBJq6Zr1eCxP+zh2863jw4AHT0ZjxeCwa0c2GLEvRWjMajQih4ujoEIWW9KbO07UeayW1ym4rFss1PsBrb7zBdDqntZarr3+dQODw9h26XcNmuZRUdwI2gsmr5YJAYDSa0HQNs/kBnXVMJhPqasdut413QNnm2q7BGM1ms8a6Dq0hSY2MX13HcrkcQqF3VcXV1RX/9p/8kxwdH7JdL6nrmsvLC6zrYppTGkdXRdsmgol4J55UHtI0w5iENDVUVcXl1RVt1zGeTCTSDsSpFomYOzw6hABPnj4hhMB0MqEsS8rJmNnhAWmWsV2vWa9XbNYrFIrxaMR0OmY2GTMel9FJQnA3nSgmk4lsNJtIhylyNlVLVcsioHWOPBWrJJQmKB0VK5J27nwva8quu7Q9oL+3om6aZnDsGDiA8frrN8z7rP4Xi+OLgSUvFkDvPb5zMuJpRR9jGEIc9T3i8uFjMpgWh1sbIYYskdfvgiNo2ZjbSLPQaSJhx1pcU+T9iTuxZDa0BJOASWJepo/6TdlStlULymBdG6/1BB0UXdcKdcWk1E2FR1K7zs7OcMFTjkZkBOquIc2kiTHaDGlq71vMgpcgCRs8XQtZoiWpRkGSJ/jWYoyoA2QjpYb07aaDOjSoxoL2VG1D1VSSpQlsd1sa22E0uBjuqTUEq7BdKxwp38n6P7pL9B/CsA2KK+feVNBbx7WUA3qvIIEoYmwVChWkQKxWa6r5Dtf1qUnyPAb5wJumJdgOh4yhwYsyQZuUTBua3VYoB4mQCp1v8b4DLGWZUBQ57733gMlkwoc//GGqatefdozHY9ZR5G2tZTqdkmdJ7Fhq6WyLDLyitRbjpTMWd1MhiGZZxtXlAhfe5Pbt2xwfH3NzveL82TNWywUmaOh5ZKFn/Em7r7WJdjoz6qbhQx96jTRNWa3XXF1dSqfaFyN4vwAAIABJREFUNcL8T8WM0lqH7WqU1oxGBVVViz2R92x3HYvlkrPTM4KC3/uDf82dszNu3b7FerXg8OhAcjeV6Be32w3OpQPdpW0t1orPlveQZQVaK8EC25b1ek3TiBRKxkqhz2RZxp07d5iMJ6zX60F6s9tu2W23pHmGMSbibkKJ6bqG1XqBIqBxaHOD+XzOZDKirRvW6zUXF8/wzouvWWKEya81QcU8TJ2g0gxlcuFkRVy2F2K/6Ny6X2QiwShij3qYMOQfnw8x+SC2/wcVs+e/N+KRET/rf3CvWdYoYjCcjMkaVF/QVFxUJAmht+FGOjnlJEhY9YR4JTZWvddroMfH1PVxcw4XBPRXpqdzSZco3FOgd5OONCMUqCTBW431DmNS8vGIrCjogsNpL+MsQl3aX768VMw614tEAR/ogsMrWTUlxuDZa12Do1WA13FNKoxuF9zggRSCw7lA5xxNWw8HNTEZaRJ5a97jOkcbOppQ07VdPMm9RNR7H5c012JdHSmIzsbk7yEUZX+0jEeH65Ora6Wz6MW+1goBU6Uy6oRI7ZA1gqKxcsGlcXcyGo1kAZEZyjLH+YqyzLh954wsy+k6z2/+5m8xKku+/du/nelUVsl1JcXZOSdR8/FkS7NscNXc7Spmo5kkWfcyJaMHy2fnLBcXV0ymExSK1WpF56Koez6lriq6pkUHjdFifz4QZHUioS1Vi9ptpctRIkF58vQpzy4umM/n3Lx1i+1uw3a9Yrfbiv9XI9idSVKapvfCSkjSVBQAo5wkS3n46CGv3XuFG8eHPH70gKurS7SBrmvIi4wsCuyF/xXoWklcl8AKj9Epo0mBMgpbW+qmxkbnkl614Lzn4uoSvqk4nB9IZ6vkGB2dnFC3DYvFItonVTx88IDZbMbrr3+IV+/d4/atmzRNTZYlMi1sV9i2Q1xBRMVR1xVeaOFYLwG0jbVCvdBGuo5e2B2NG0MPRiticvnzBai3xlIqwiOqN2boFTTx696HS7a/xdxfBLw0vvZdWmJQvrdplxLWY5RaG0LXRcdpKTC9SkDF5+j5iT4EeS+h/zp5D1nE1oLq6RYe19eMvUJtoyEkgPdiGxVgyKsd8DwFxsQYReQybruG9XZLa+X81kbj2y7iiW7QMgf/LagZbSttbpImMcPS01nhJsm6VmNV3HkFyIywmFWQiu8BFzqCF92k0ki4AtK99Nl6WWookhSTKDG/s3LStI0khQ+EW9+Pj9cfsnSOor8Mbs9lwAfZ1ISAU2rgV6loVaSR2DOlRNlf143wuIoM7QLBOelAlcaYVGAGI41OkiTkWca9V+9yefkMG8RTyVpxu5/NJoxGI5TO+dSnPoVzjt1ux8OHD2iamhsnx7zxxhssLq+4urwkTRN2ux15lvLkyRO22y3WWiqdoIKis5GYGUNBAgrvAlmWY0zCarVmsVyQFinT6QSdQL3b0rY1RiWQZINqwkkiB9onTGczdnXNR157TdLio2vqYrHg45/4GHdeuUXA8t6Dd9iulpgk5fh4Dkqz2e6o6x1dZ3G+4uTGDT50do/VeoPbiMj/d373d8i1gOiv3L2NUoHlcoF1lqraCe1nPB4wN60T6fSVxg5ifemotZFzRCslN6EQODgQB4XlYsEzazk8OBQdrPecnp5ycnLC/OAgUmZKRqOSsihom4Z/+S9+izcPZty7e5fJZCQyrPGIrmtZb1bUleQ4TCYzTJJRN/IZVnXDtmponSdxAaUs1kv4k0AQ1xmkqh/tXigwciNmKGZybl+7ZuwXp+fMGvce/9+0jL7w6cj2CNHk1aOdjr6rAd9F7XBn6bxH64DvOy4g4Ies1X2c7v2sifrXtT9OC5ZJPB5SJof0uzi+9msBEJt1Y4T6EYIsBH0IVE1N4wTjVImiq1rQ/ZLF4921P+D7FrOuQ7RiQYib1nZ469EkoIxIN0BCBnygM5LuY1A0ATbdjhb5QKu6wdqG0G8wkBW7CgnByGZFuRRlwbeOthV7kl5l2ye46CjTCSGAk1QjhzjHDh9yFL07a6VzVBqvewJhBAuDik6tCXXdsE0ELxvHjZoCmu1W1stZXDb4OGZH0PvJ4yc8fvIApQPzwynONWjjMdrjfMvXv/YmRVmQaMPTp09xzjGdTqUb2XM2qOs6uhM0kTiaMhqNwIrTpmBeenjfIUpnRuMJdVOx3q6ZzaeCGz15TNPVlEUBXkTXPkiiuLXRadRokiidUVpMDcvRmM1mQ9PVnF+cM3s05eLinM1mQV1vqaots4MDXrl7m+l0ztOn5zxMEy4uL1mv1+R5ytHxEVfLBcvVkvFojLUdZZlx55XbhOD45je/wWJxxdnZKaenp+I+0TSDTUyWpWRZgdHiztA2HdtKRnmjDWVRooyMHN7JuTCfz4cAkNMbp1S7HVdXVyxWS1abNXmeUxQFqUmYndyg6xp21vLK3VcYjwqCs7R1hWsbFHIxZ6khS8YEPFdXl6T5COs12+2Opu2o2wYXxFE2WC83ShXTu2NB894Jw90L+N5f7IMRw3MFZ6+QPVejnve1fz9uWb8AeLGwDMVQGim52YeAcjI8ORfND0NA+evouRDcnqpnQHKkAOnr5x5ssAkI80OkSEpW+zIyEsTaOomLOiPwRmqEbeeCQ7HnjxbjCk28AbjoGFOUGQGPtR2jyYg02qcTC5kUxMD+wXupmFmbRDBcCo/rWozSTEZjRmVJ21ToALaTAArvZF2qAzTeSi4iMk9b28WoMGklk+hHr4KX/Eht8Fo6M9dZbNOKW0EieJePmERvlTK0wF4SdPbvaHLBi0FfUBLKev2BRicOFUN/CVgrWsmuk07OaINJMkJtCc7StZ4WG51WpTsFxbvvvs16u2I6GzEap2S5prWa5eqSum55/PAhk+kRHbJQ+PCH3+D09JRvvPl1fv/3f5/DuWzZLi6ecXJygu0aDuJCYTQaYRuL0R0mkfHF+kgsjO9luVyiDIwnU4qiZL1Z8fDxewQ8t27epCxLrPbUdU3TtcOYpqM2brVdE1D87u/9HoeHRzjnWCyXjCYTrHc8fPSAutpQFCl1k2Hbhs16jdaa7XZF19U0TQVKTsztdsPt27cYjUexe82xtuP4+JAuyoGqaif+YuvVwNMLkUAZgnibqUj6tZ2jbmsURC5gQpqNGI/HQ6e5Xq8Hadh2t8UoMaE0xtA0DUVRsFqt2G433Ll9myxL2eFZXF1x/rThYD5jNh0zHpU4l2KtY7fd0tQtq/UGHzQ2aILKaK2sx12QEdP3N889c8IEIy4OXosI2vlhZOsL2TBWEgF654b0JHFmRS7UWEmeC9V+Yczcvx72i5oUMiXnTFAMOiGlxEnWBrQJQ2fovARPy3jIUKRSE63e/fMUJ+nAHJpkwAjjSyGEyCT2FmdbjE5ITSL0CSXYeHxC4W1G/p23naRgmUQwt9jlmVRTtxUXl+dYDulcS9s1YOR8QQmHz+ytM18qZnUTIn/ESdhE15GZlDzTeAqSTGgQKnTQ1SKZ8ElsawFanGtxIPOy0hI6qgxapygvgL4m6vycihtJi+1EDY/ZsxT2LvLIzHOWLHJjFC2i51qe0ctDeiGrHJwI9ofrlG05IcQBtq4bSudIs5T5/IC2rthta4k8axvpDrWma1OxkjmeU4xSRuMcpR11vWKzWbDdtXz62z5N1ymWiyXb7ZaLi0uurq5YLi5lW8b1GFHXNd7ZoX0HEfTrTKGdXOy+c4AdxhMUJJmMyU+erPA4jo4OadqG+/fvc3pySpGVkmmpNVkmALYLga6uybKCsizQpqXpGhH8G82HX3+Nk5Njbt46I/gjylHO06fnnD97xtXlOau1MOBXqzXeWw4O57hgeXbxlFu37zCZjkT47WG1vOTp06fkecZoNOL45IQsS4UEHJ1YkzRFKU9dt1hb49MQgyp01LWGAbsxxpAmcmEkaUpWFKTGsN1uWS4WeOc5PDzk5k2JnnPOMR6PhL9XV9S1XER5lnEwnwqlJjjqasvlRStMKaOZzSeYNOXiYildi5KOuC8eRMBbKdG2hgi067hcMRiUV1jXDhhaD3H0QIki+nu5SPLex716uIRA0C9jYh8kceq7PECwviB475C1GRdA8QvEDcN7EtcNWPQA2gUdUfEwXEMRQI//LuNdkgiJPBjBmcVkAry3JArJyQxxNRACKkjUnFYiQ9QB4Xa6jmCUOMXEn9O0IgVcrVbcf/c+x00ly6C2QaeGNCukSBp1/brer5gt64Yih9QkdEHRukDnOsyuAVMxn0wJSi4uZQRgFHKrIfgG2nYQUjsCWgeCl3ndKIVOEpyVmbzrOiyWrrHUbU3TtvjEo0I0rXOgsISQRtAvrqrlmeVDldWoLB58TA+P4LqJlkPBBTwdzmsJFkkS0kKwJ2stu21FOW3EBaMssG2DdS1dJ1yrPM+ZTSYcHM5ZrxZMyhE6FSBzPCq4dfOM+XxClk945+2n/MY/+2dcXVzJOJQaqt0OgmN+9y7r9Zonmw1JYvjGm1/n9u1bbLdrRiPBkZquGTrOYeERROaSZka6gBDoYuJ3UEgX2VpCUKw3W7pMkrqzoiBJJdFpvV6zXK05PDxims+4++qrGGN48PARWZ5F/3rNjdMbECzVbssqdmRVVUEjNzatFKenNzi7eYuLi0sWyxXL1QrbWQ4PD/j46x9hUhScnz8lhGu8zDlL27ZMJlOmkylZllNVNZeXV+y2DVY5skzep/ZSAFrX0bUt9a6iRuLStBYFRRbH8unhIVcXl1xcXHD79i2SxHB5cRGxspz1esXR0QHT8Zjl8oqmqXnvvfcoiwyjYTQq0CB+WVVD23QShB2itjEE0DH5O4B1HpEEy4ZZBTkFk8iwJ36vQjoxrfe6MtXjWNeKFQn+VQNFw0dAXm74H/CIi64eFyN+nyJuHmV9GQ1IrzesvZcgIaADMSNTCr344AmJvaMDfz319F2m9Al9mI4eujPfJ3wj12eWptJ8hCBdqvaRjyrLiMTI9UwnCygpxDJ6BuR7UIa6abi4uBSuX9NACCRGSeBKLNK9rdf7FrOdtrQBcgImsvjrtoNuh24TdJuIsZsGlcqJhxL+inU2FhCP8tI1ibQjgnuA9eKUFVA4xK+8sQ116LBatqQZGm0SvIlJzcHGFbBHJQoTZPPoCbLN0GIr1N9ffI8NQIyiF0xPE2hdS1ak6DRlNJlwtbji6VvfJCtyTg6PaNstnd3hXU2iPFkp3UWqYH11ycXFM9pmgskUnW1QKXSu4WpxSVW1TMpjTg+PGWdCvvTeYYLc0YJ3zGczijzl9q1b7LZr8ixnV23Y7SqaOpBmBbbzVJU4BDRth/UekyVkSY73nsVyxXa3E+PEsqTtOlIdODwoCEHwhBAC2nX44GjaFhds9KFyJKlhsbxiPJlw4/SY1jYsFpckieL84hk3jo/QSYoxKTpJCS6wuFywWm9k3MPQVA3b9YZqs2U8mXJweETbNJw/fconPvYRCAdYK7yyi8sLdltxpz08PKRpWtq2Gza0o1EubrVJwm67JbSWtCiFA2g09bYmyzJmkxlaKZFdKdhtNqzi5vKjH/0YaZLQNhVvvH5XjjewXOTSYUcvtsMDSS933rJaLqhrCZBp6pb1pkUrTZEVKJOz6xxaJ2y3awKQZJnEDe71MjpcA+3Bil6wzNIBt1IxW1JOAT8oL1Cg4ujv6Jc9SDRShFe88zLORgxaIRZNNoazKIjp4IOJ0MAO6O2H8NdbVh3ZBjI6ehlDvZPmy8jSqzf8DMHHmyiD0w1Brk9tkoHNEGJxjQQgkiQTHpsVCRVagw6iHIjvrw2S1qRTSTvvU8tkzE3wRkJPdk3HUZqj4rb69tkNLi7OUan4pKnoLv2BxazTHocFpUhNgjUBpz2Nb6lsTdoarEkwHjGhQ0MwMjb2H0r/S113qAHkQPqo5A8B6xyd62iDo1Mer/sv6i2H9vCw2O4qHegBBRdkxhdyoCZoIxY46jr12ITeV0l+pWnKttpR5DlnN88wiZGN1bam3uyYzVLGo4yuNjxbLdisLW09I0sSmqbi8vKCtpmBCqx3a5LSkOYJ682GxWKNnyUkKObjMUmacnl5SaY1t27fkmOaGGyZS6rPaMyu2gCBqtoK87rucF42lyjxeDOx+03TjO12h7T0EhRi48nddTaegB1pasgGGxvpMNM0ZTKecrVcslgu2O0qTs/OuHv3LkdHR1xcXg1//wfPvsY779zn9u3b3Lv3Ku+88w4BGI1KjDFcXl6Ks0hVkaUZxweHzGYznj59wrPzJ6xunnLrluB3eZ6zXC7puo48zylLeY7dbidjtndRZSAcNkIgS3KClQs6UYYsTSmLgslozGQyJkkMy+WSoigwxnD37l1Oz26gDTjf0bUibldaM59NGI1OCSFwcXERlw4Z63WL94qqathsLodYO+88yThDBYe1clOw1gktg8irci4ygaIfmIq+ZCraXBkhgu6PhfvWT0NU3N5154Y/9R15D8RHypFSBCV/b4PHBOGrAbI9DSKhMvSFrB8TiWUmFlAFwV9L3wdbor1r7vnXFr/phb97v22rfKkWmoaKGSFRohVTMhHfviRiUhZCjF90oLyTdLKsENaEl5QvWYhZyS5oGygTUq0iefZbdGYqrlR9EO9+bTTBiCPkrqlITSxmQYBpMQZOSdDRnE2+zyNFR/WtOtf+ZN4L+Ng54Xp1TjaQIRaz50iy8fF+a+leytsTQ/tf4ny5Z+6o5dbYF8a6aQY31uPjY7z3ZGnGYnFFmowocoMx4pm+21SS6Jxm7CIBtq0brLdUdUW13HG1uuJqcYn3gWejJZ/4+KeYTCbMZjM+9clPsF6vWK6WvPvufebzKWmaslyuMEaz3WzF78sJG77uLKiExCRoLV74kvQdorVyrx0t2FUV9Vb0g0PyTrCEeIIkiUGblLKcDA65znreffc9us5y89Ydnj59xsWFvPbLqwW/+Zu/xcnxIa+++ipJkvLWW9+M+FceN9KaXVNRVzVFUTCdThmNRkMC0HpZ8fDhQ5SC119/nZs3b/LVr/4eV1crTk+PI9k4HS7uPrkJIsaK4FRN20R+kZHgirKULVchmJ+IzBWPHz+mqioePXxItZsxm4xp2xbvPEWac3x0wuHhIVVd4azn/Nk5m80VCsXB/JC2a3n08AkHBwdMxjPapqXtHL5raDo/FDQfwXlPiBiRfBYhzgyCLnmhmRgd6881QL8P4D93Dr/Pn/f/U0NHwHPUh4EKwXWx7K8JzbWioL+m91Cxl36u53l+2/u91hcVBu9bzJ57qOvXrPef24NJhlczZITsbVPbtiUpM7x37HYbkuSmGHsGSzkupGtVBqdBqW/RmQ2bEqSFVtqg0xTXWeq2JU9avJF22jv53RFItURZ9cVMyotUYtGuCQExRBmFc24oZja4WKmv7wAv3tV694sXP4wX2dZ96k3PP1NetjeJSdCxIzg5PCLPc/G4Kkvu3buHRrFZXVDVO4LTeCs2McqHKNwWcu5sMiVomI1mvH78Oov1ki9/5V9RpAUfev0N6q2l61ratmW5XPLVr36Ftm34xCc/zsnJCU1TDe4RIcgSQilNWY4iQdEKmJxIyApORvi6bWhbS5KkkspkStpOQHxjErRKZHscxORR8gkMSWpIkoS2Ff2i0oayGHF0NOLw8Iivfe1rvPvue4wnU/Ii4+Bgzquvvkqe53zta3/Is2fP2G637HYVx8fHHB+dsF5vaNuW2WyGUnrYWo5GI6rthrquOT8/5yDqI7UOlGU2yKDm8znHx8fMZjMePXo06Cxly+dpmhatdezYRkymY3R0OcmydFik3L9/n4uLC6bTKfdevYfRElry9PFjJuMR47FoXsWuSM6h46Nj6rpmPL72flstV4QQWCwWtE3LbDKnsY6q7mibLsIWct7pKN0ZzrsgkIYimmqq5zuyfcxqX0C+/9j/u0iW+JbF4gP/Lch0ZIZC8sK/q+try4fn/354HeGDf/aLvLL3J/aG/W94frqKhbrrWiEQOwfeCkyEQhlDqjy7rqWc5TGf0zIqU+bTMevNEl0KdIMOOCfecv3jZdtspa7Fv3G1TJJEGxZP4zrQMrJ5bwlOWMK9RkrrKG9Q0sLq2I6LCVwYxN3WWToXxd5Euoi6ZhW/HynvgwS4+79bZ+npJ6IUkAOdpilFmkm0fDyxxG+sESeFLGc8SjFsqLYLnG2YjErKNGO3q2lr0d2VZUlrO1T06tcYbOdoallkpFEgvtmsSJKEs7NTtttt5GXl5Hk0XvSe7XZLGl/TdDKj8w6larRJB31emudMphOcczRtx2q1FlDZaLIspbVJVC5cn5RFUTKdTAkhsNluWSxWwzE1ieGjH/0E84M5bWdpGwdBs15tuH37o9y9ey8Wi8Dh4Zyi+AT379/n6dOnzOdzbpyekKQJu+2O0WhE13UYo5hMxozHI+rddui+qqri4uICYwy3b9/m0aNH1HVNWZZMp9PhazYbyTFNkoTgBFQeT8YcHM6GdKB+Y7fZbCT53Fru37/P4eEBxydHlGXB+dMn1NWOUSGYmiew2qwjP02zWCw4OT5mNp8znU7xwVOMSj78kQ/z9Pycd++/S1O3lOWU3a5hW7U0rZwvhJ6KYQTPjUB6CC66oorgPET8eL+QXW/PGfiG7/cIe//zXIHrF0IvFJD37aC4pqPGXeTeABufuy+ZEQ3qf54PIVr1fOvHt+zI4j+pflDa//rh+14omkGMIzWKRCmK1DAvc2bTGT60jMqU6WTEdrMA7RmPxcK967ooopfHy2Mm195dvRay/zCslpHQpAmDdAmxAtFBR7xDDy9Sa42O4i37/3L2nl2SJNmZ3mOuPbTIiBSVpbpmgJmG4GDJQ4Bf+R/4j3d5Dnd5sMRCNGZalMjKShnaw7Wb8YOZe3hGZfWA9D7VmREZrizMrl/x3vdVBxqfSlVN/2fddGGZfERT8VGHakoNp9CD9NRlPx7Y2i0XHGSwHMfB9306QagZRNOU/X6vEftCK/I4ls3JyZQ0KtiuU5I40mBOR7dPCKHod7rcXH9B2Jr65fHxkbAb8tvv/oLXrwr8wKc/GPLxwyeyLGM8HjVeR1GU7HY7xuMhrutqbytNsW1BlmVaYxOFbfQ0QVCVEt+2CIIAIbQ2pOdpL6usJN1eiLA1I2pRFKa1J8B1XM2IWpRsd3uiKEJg0el0EZZ27fO84u7ujjiOjaZiiSVskjgl9jVXWFVV3NzcMZvNuLy8ZDQakecFSkmSVLcaOYZX7OzsDFlV3F5/ZrfbopRkMpmQJAn7/Z6iKIjjmCAImnt3HId+v0+/39ehodQFAd/3Gl0I3XJWADUrrc1oNMX3fXq9LsPRkLIs+PLlGs9zOTmZMhmNuL+/Y7VaNWGwlLJpcxoMBoSdENAhDeY8YaeD4/hEUcxml5BkJaVOVWMbbi4LYfBhB7rsSpkEh6pDOvlkEdeGrW7zec4IKaUa4yMMtOh4nh+vi/r1oYuAgwX5lp0xBQNl0j/1+drXUUvLPdlX1WGhfHIPX20G7HzYT2o8rTB5PKHhR5pNQ7c6IjVzB5aFqCQdX9BzbF6eziirlPmgT9cTBI72xrIipaoK8twm+zUNAFVWGnFfx+aVKR+b5GZRldhS0zlLjYqgUrJBEFelbGJfq2XpK1k1IiWVeVxo6SnNH970h4mvc2bfet0e5GYsa9dWHSZIbYwdx2HQ77Ndaf6woih0Namq2Gw2OHZJtF6SxnuqqkBWNpbSZWY30ISBvV6f8WRKVhQ8rh4Jux2m8ymVrFhtNGHiaDpiNjshyzKur7+w2q7JHjPeffcdWVFiex6VUniBT15oTF5RaT72TreDZQmDki8R0gZDtlKUOf1Bj90uotjH+L6DsDtU2xIpBWHYAyyiXcx+vzUYJxvfCzRmTUK32+Pz5y8IIYj2e1arDUVREnZC4jhhMpnQ7Th8fP8Lj4+PZHmOYzkM+l1OJmMeHh71U19Jot0G3/d1a5rnst9rJoQk0TihPM/pdrtIKYlN9VUpRRRF3N7eMp1O8TyvYblVSrFdb1CV0nxoZYnvG8FkAanhd5NK8uHjLyyXK8bjET2TJxsONUX2er1msVihlOLVK5der0+SJIxGE5IkxvMCgkCjyq+vr7m9vePm5o7AD0jTjGiXEiUZEgcn6OK6DoWskFJoqimpWih7PZn1TNbGzLYs3SljPLBjcGtbS7P++cRACB3RNHmwliFrFxDaAsJ6N62ojpRfe2T1+kZfe8OmZiAh5kTAIdV0bGzrcx3j3J6uvQMfm1RG61UIjaAwPeWWZWngvLk2/SfNSitQOGWFlWecDfpUKuCk2+PiZMTFdEReJHz6/NGoo2lHo96e1c2sR6FGKtcDjNBsDo6UDSsllgZkWibBX5mEKZhyr7SaClBV1SrQze0/ER+pm3O/9QR6bnvOmNXXq/E8B7VopVQTAtWLTns0HqtdxIcPD4hqjU1FGATYtkuVa0ClpSzKsuD29oE002K/j48Lirtbbu5vtepQvKc/GnB3f9eoAiVpgh/4rNYrPl1dMZlMmM3nFF5Of9Dn8fEREOxjDV14cXmB4zgkSUKepwZ2YhvGjy29nhYa2cd7I1dnN2PQ7XbYbvZav8B26PV6CMs2zBIlvV6P6eSExXLJar1msVo1EzdNdHvVarXCc0cEgfZ8Jt6E7XbHn/74Rx7u7wmCkF63i6wki8VC48CShM1mzXq9oSwLRqMRg8FATzDH4eTkhLOzM1arFT/++KMO9aUWmJ3NZvR6Pe05dzp8+fyF7doYSU/v2+v1UEpxf3/HZrshL1KiKKLTCZnNZrx4cU6e59r4ZhmnJzPe+AFpmjKZTJFScXd3z6tXrxBCMBwOieO4IQ2wbf2gcl2Pjx8/s49ykrzC9QN6QVeLclSa1lwrlhtBGDOH4eD12EJX8ETL0BznfJ8zBu2HtRCCY0mTtgfWNobt49dFh9qQ1UdoVog4rDPF8+svoB5SAAAgAElEQVSpPv+3Ujrt839rv9qz0x5shVJodSihe7uV0fCgVrKyhCZAtW0tpVhmWElK39YdF15ZMPJ6DH2HNPOQkymdbpfReNzMM3jGmDm2Zv+sygpZV3JkHQ4qMH1yZY22N6wWldK+dl3ENnevga4mWLdsi7wo9aCbXsjat66TnlqNSF9WLTdfu+jtATv24Oqyt2PI7jDIbWWekHWSOjfHr4+rpGSz2ZBlGVG0xSVmNPToBF1Nm11kOK6NMiIdGgZRMJ9OGJ1M2CcR692W3X6rGVj7PZSQbHcbsjxjPB3heZ5u9kbQMYnn0IQ+6mFBVSmWywVplmA5FufnZ03/YhRrNg3bJJ2llJyentI3ikJ1TkeHbhnCspjNZtzd3VOWFePxAN/3mc81xXSvP6CsKgOI1UbUdVxNSQ0MB31OJmOur6+xjR7kZrWkzHPWyxWvXw8pRUUc7ynyDN8P6HV72sAZaut6PMuyZDrVFcyaqWQwGLDdblkuVw31keu6zGYzI+mmOD2dNyBbgO12y2azptvtcvniBUkaN90e6/WSKNJGXgidoF+vN3Q6XTq9PpWEaL/DdjyifcIuivl4dc3Z2SmPixXb3ZY4yViutAGVCA13qSp829YizJbDbp/ovmHHwxJuzV/TtNwJNKmhQguE0DIAtYd2nPdtt+c1DkDLUB0/1I8LCHXE0TYuUkps0+gvpTTU3LZZZ8aEGW+sNGumPm9d9ayqqsn3teEk9ZprX287lG7WoqgaCcQnm9TaAa7raFW9SmMh6vyeg+737IcdZv0+2WrNYNAjXay432zA0q2KxXpHmuVEeYHcx83hnykAHPrEvrUppaEVtm1rWFhTRq5BsWawZcu9Nl5YXecUZlCPS8LlUQWo7d7WugDtJ9jx1rjrou45O3wBdbjjCKsxcEmSGNiBFuL1Agth2ZSm728fpSB1uFZVOgy0HZt9EoOt+cfOB+f4S48PVx/56ZefAOgP+ibXpUPGwWDA+fkF79+/1x6VMdCe79MfDEjSlOnJlH0ccXd314SZ4/EY1/Vwfb+p8qEwykUa71SH0KB1KnVv5FQn6MuqmYhRFCFRujqZxCgp8RyH4XDIZDKhP+hTFLn+XFUxGY9ZrVZNk31Zlnz8+KkxMsPhkMAPyNKUxeMjVVUxHo/xPM9ALgT9fp+TkxN+/vln9vs97969Y7Va8f79B758+cJsNmM2mzV50cViwWQ04vz8nMXikdvbW82oYDwPz3cpSk0SmWWa634+n9Ptdvj48QNZmvP2zXekWUa0iMiyjBcvXuD5Pj/9/HPDGxdFEYPBgMfHRxzHYTyZcHX1Gcfz6A9dvFDS6fbo9bso4VBIhZXkFBLSvEQIl5oc4/D4FrQbn9t5ptqjaT+M67+1mTPaRrAdkTzx2loFsqcPdk3iaTumY6EN7RCGYkfXMp5WTM3YYo5TlmXjUBx7ld9K77QNtYax1EK/raS/IcEXaHowbWcqVCmo7FLj5GSBKAuqXcTj1Wei0EMiKcoUJSqErVXGhKPb3lzX5X//P/QZvi4A2JZGJqPzIjWCWJj8E8okECs9VMZvbVVE6q9VND91s2mNvam/CE0pUveNKQAlqUz1qm3M6nwKfM2PfrwdKkJPzGZzHA3H6ICALMvYx7F+qpvdpIQsL8nzim20J9rF2DgEQYjjePT7HfwgoJQl0X6P3CqCjk9eFXihhxu6rLdrJJKgoxP3cZwQJXs+XX9iMB6iBHhhyGQyYbfdkmUZHSPWIZB4nsN6vaYsFX4QkKQpu+2ezWZDr9fDth3ifWIeIRZVpfB9j/G4wz66ARTdbkAYhtw/PFCWJefn5yYXl1LJkiLXgimaX0xQVgVJokPd/+fjT3Q7IbPZHEsoumHATRqTphndTp8X5+ecnMyI9hGLxyWLxwfiaAdCsNtFrDfay6kT/99//z2j0YjFYkFdWfY8V7fBAHsjt1d7y8OhDh00XVM9PySVLInjmM1mQ7/fN16fTVkWbDZrttstVSVZrlcsFkvCMOR3f/k7kizl//wv/wWlFCezGUEQsI/39AZ9gjAEFHf399iOw269RQgHpYTGEiYR2J5RH88oCt1vrK/L0SrhyhgKoYVfdCvP1wWq9rw9hhm1K37HD/Jjo1Fv7Z7e4yJBayXUzX9NKNxOpukCgJn75s+NQBAHY9Z+fWzYvlqDdduhMNmp1t/q4wuMDSkrLUhsWZSVxJI5dmBBUZJst2R7yKucqioMlZwWZKmE+srxewY024qXa4uNNgp1+7bVmHjVQB9qi2+1Sbmh8UAcMwFkdUggNjF9K6gXol0JeToRavf3W1td5pVSmsrSYRjrSiomvJVSK1XXSlCu7VCWEsuS5JXAsR2EZdp5pKlYKQ112MV7HM/Dci2yNOHhyz2O5zKajFBCkZUZg+GA6ckUS9gNQ+xmsyFJUjzPZ7vV1NK9Xo8kTbm7vefkZEoS7xkNB2y3e+azObt4j+/7FGXF5eVLlssVWZbgulr8NstzIAMEnU6P8/NzVus1p2enbDc7omhLp9vj6vNHlNIh0HK5IMtTbNui0wk1XESgacVvrnm8u+Yf/v5/5W//9m/I85z//J//M1WpPbXJeMrl5aVB0W+1RmiSkmdZ0yBeFAVBEOD7PldXV7iuy8uXLxmNRvz0008IIbi4uAA0C2wdaq7Xa87mc2zb5sOH9/T7fYIwINptiZMY27Y4OZliWcLsY+O6vg7Ho0QXUDohw+EQ0GmBh0dtzH/z299i2zb/+I//yHw+Zzweo5QiTmIWiwWe5yEyPV8qJXE8H8fRfS22pfA9m9QVGtSsezLAkppx2ViBxigo9WQBt+dnO9l/7KU9N++fe/84v3Z4n+Yhrlr7tO3XAWB+yPM9OUfr9XGurP37saFtX0dTCdVNqhxWu/YeawHlmomjqiqqQlBZmnq8sG1yKt2qKCSadEziOBaWcEiTxERbT6/hV42ZTiaq5lJQCse2sYXuercUOqGnLRwSrZFXi9ADRu/YYLKEhWPLJkdGdQDSHapC+qYPFZqneJ3jwTwuEKj6S1BP70WaQavVncqioCrLBmuGq8iKHNtRKKE783tDD8vxyBNNpVOkCfs4Jor3uIHH2fkp/fEAt+dRqRI3cHh8eKDX6/Lb3/6WMAy5vb3FsuDly5f87ne/Y7Xa8Pj4iOf5WigkydisdeVxs9mB0hTS8T5hF0R8ub0lCDqkecZoNKYoKvK80JVMbCxLtzlVpdTkglnKcNgny1LSLGZ+OqMoStbrlW4jERa2LRiPh3iej+fr3jeNnaqYzy549/qS9XrF+/fv6fd7SCn5q7/6PWEYst/HrDcrsjTT4igCsixlsYjodnu8ev0ax9UGqt/v4zhOYyxqTvfa46qxZcPhkCAIjB5mypcvX9jvI05OJmRZwma7IQwD8jwjjmPCMOD+/o4k2dMf6HPIqqIThigl+PTpM57r8erVG8288PEKpdDMGqcX2I7NcrlmNJzwhz/8J+7v7vjjH//IP/z9/8bt7S3/8sO/4/kBvu/hei6u7+FKRVmV5GVBWkoEJXWTc8NQbPoXiyKv/aBmDrd/Pw43nxgG0bKKf2ZrVzKb98z/FXX+Wqsdaarp1ufE4fP1+5axevX11on8r2zCkXE73qSoWs5Pq5DQVBZVUxCoTHAnZYlEqz5VUpBKaRrOFVVVolSJU0CWm+qrUtpDbh3/WWPWHqzGYJikpm4e1/6pjWbC0J1eGn0sTK9/bTW1myl05RMjP98yZE3VQyqomWm/4Up/qwp0fL21MWvvU+cC+r2eziHVXOlKNTgzx3GJko2mCcfGd32CsIvApkoSqqpkNBnTGXRJ85QoifD6PvPzGY5nmwKBFsMty5KHhwfu7ozgRn+I53ns9/sGsLtcLvHcgLu7ex4fHxkOh1xeXFCWEiF0tVQpQ+RoOyZnZlOVin0ZQ6xVcHwvJE40QWGWZ5Slq7n9txsm0xPOzkakacpsNmM4HrHd7thGe6pKKz+lacx0NqPXDUmzFMfS+cTr68/Nd+B5Ho+Pj+R5wWAwNMn7zOD1wgYLt9lsifY6JE7TlLdv32LbmmnBdd1GyEV3FcT4vg/oUDMMQ52ULjRmLwgDhCVMJ0GPx8cHlssF5+dnTKdTLSqz3VJWBcPhkPnpKbvtnvv7BednF/i+z5nheLu6uuLu7g7P6HL2upoZ+NPHT9i2zfff/5VWg4oiNEo9o1IVTlnilAVKCfI8QclSsz9YShMx1rkgW5MfWApU9bS3uD0H2z+PjYZ500RETw3I8THa8/14/dYeGhwMSJMNUgp1vI4ETzw0YR1yZMdEkMcFi+e8swagjd36G8ZrNPfe8kxBavbI+q+2hbRBubYmbZQCKcWBcKIqTU5cPHlofA3NMFiPGqfVWH+lY1xhQLFCSg0yE+AIYUjrNC6+agWzz3lSGMoQpTlWENI4oUJg2aJpum274seD961cwnPGTCmlK64ttRth6LvrPJrnuHS7Pda396SZyQ8FulxfFZKq0iyjQRgy7U/JZcl6uyItEipVMuj28AOPMAj48vmGH374N4oix7L0EC+XazbrDYvFyngkLlUptQGNIoIg1Ohyy8FzfU6mM9brNZ5vkaQJtuORZ3pRaS9AUBQ6G9Lp+mRZwT7b47g2p6dz4niPbfie4mRPp+vjB57ORwndKVFJxX6/w/Nc3rx+xXQ6wZIFy/sbiqLg8+fPWJbFH/7wd6RpytXVlcF8CQbDAWmacX9/z4sXL5hOT8iynOsvN9oDdhyiKMJ13SaklFJqLxjdSXF+fo5Sqiky9Pt9Qt9nvVxSyZL7+3uD7etRmkJGLQ1Xe3cAlSzpdrv4vse62uK4Dp1el09XV8YTEvhBgB8GBEHAeDRmcjIlK3Jubm85mZ1QKUmcJOyiHd1ehzTVOcWiKLBKrSgkK6VVv6SgEoqafUJia74+28IxeWRLfO15tQtRvxZK1hTV7ejkeB3U49ne9HiIBoherx/NWaw4PqWqc2b16/r31rmecySeCz+fHJc2bs566mgqXTyUAhx1yKHpbgQLKRTbLMaT4CkH19asPIpKs9IeTqIZSVrb155ZK5l/nJjUr/XvUmk2DS3VrisnUum4VimohEDKSmsJGk1NywZZ6ieavh7zZVggsBG2ZqW1n/mi4JCM/Grw2gPaCjdbe+pEuaqI9juoSjpBCMJlH0dYa4FjW4ynQzwvIMsjtpuYOMqxHc1PVeQFRZnxsHhgcjLm5GzGZDbBC118w3rqOA7SUSbB7eH7AVJKvnz5wuJxSSfsNMBP23ZIs5TQEiYv4+A4Nnd3twSBDm+GoyGrzRqlFIVpoYrjGCEE4/GULMvJigLL0nQ9nhcQJxFJkuL7umWoKktydH9pvNc6np5jMx4N6Q8GdMKA/X7PZDzUmopScn7xgtVqzWAw0s3exmudTCYMhyOiaM96vcJxbAOKVdze3pFluVFx1/c9GAyYTqdahcr3cV2X+/t7bm9vGQwGuK6W9asqzZyhpGS/j1BKMegPWG9W5HnOycmUssw5Pz83OTrdtTCZTOj3e+yiXdM/GoQBUgnW6xVRtOfx8YE3b97y+9//jn/8x//eqGVVsmzEWRxXS5dNJlM6PZ0aqAqdgihlgShKhOVgOS6e7RhFJs2wrLPmetFo2KWldTXr56kxXJXBbNbVw9pg1BXEOnQSTQhqkvZCG0YlQCj9Wpi5rPn0teYlFljYhnvtCEfWtljttc7z7yMVyvo6P3e83p54ifXaE2b/egDqCoM65BGrSlHZmlNNKgz1NtRdlnr8FFQlpWzl2mQJSuE7br2sn2xfGTPXspGmf1JVT9syLMc2NB7oJ1JVohyTb5GCQsoD2ZoUWIah1Ta02VJVDQ2Ilm5HMw/UpHFoapM2R9FXg9Z6usHTkrVlkNfYICSG16vSTyXdrKkpvB2LTGa6hQYfO7PYpT492aHIC5K4wLYsLYqxy/FdLWZSSslqu8bvBaSfrxgMely+vqQsSjZLDRgdDiakac54fEISx+z3MXlWMByOuLu7w3FcptOJlrQzxn847NPt9hiPxux2GwaDDjc3d1xfXzOaTBiNhuyTGCFshOgAwtDqlDieR5bm2JaLwCGJMqocKiHJkhwQfPebS8qy4NOnD9zdfNaN3pMR0+mEwLX453/+F/71n/67zselOePBiDhJePnyBX/5l39BlqUkZUqvp3Nx+/3O0PXohbdYLNDOu2LseZyfnfLx40fNoqtdcGRZ0Bn0mYxGXLsOyIqrjx/p9rpMxiPyLGO73uM6Lq9+c8loOGj6MJM04cv1F8qioNfrsNtuCeYzwsBnu93y5fM1WZ6S5wUns1POzi5QEjzX4eL8lDRN+enHP5LnCUWREfgeV1cfKYsSz/eI44i/+qu/4uRkzM8//YQNzCYjPNvm6uoazwuZTIe4XsBqu6cf9tinGWWu4SSWI1BlRaEyKmFhodv3hMFnIrXRQRneMeMRSWPYaoyXafjUwFEUNJ6fpm7QJKMVvhcgjIiKRe0FopM9Zl3ooE00ECW9hsxaOVQDjOFqLTZhjIeUhs7ogC9Tx45CO++mI2/9Wh7Wo6YI0xGCIQXCsi0tBCMrKttF+YJSQanAVgW2UjiqQinNIo0ywjHS6HRUNVf3UyP7Nc6sRu+24vomXrc0U6tCN40jS1Sp8wW6ETrHauJqpVsr0KyRNWOTQmpeJktpQ2YZrnHjrVlHVv+4glOD+GpjdlwcOBg3fbZ6oEXtB1pa/9N3tVyWbTskecLtwxfSPEaWFbZwiZOELE20l+FAlGgesW6vh+d6SPR1xJHeJ00TPnz4wN/8T3/H+18+8OLFJauVDikvLl5oehwjCjyfz7m5uTXA0My8b/PwmBFHOyxRARbz+QmlguXqET/QVbovX24pi5I4yagqycV4Qq83oCgq1usNjuNT5BW7bUS0ixmNBgy6fcKOTyf0CHyfXbQj3m3ZLBeAYNjvcH/3wHq9o9cds90m2JZNtEtJ4owsT/F8n2z5yM8//4SUFUEQMp3OyNKC9XprsGemxzEMTTJesdlsAFgul7x8+ZL1esXtzY0GDochL15c0DNA28FggO95BL7L4+Mj0+mUIPB5+PGebqeDMlTgk8mEeB/zyy+/kOc5Yegzm01ZrzdUZcF6tWQwGCJlSZ6ndDqhIaZUrFZLzs7mvH79iiiKKAotIH1/d6erpvs9QlZcX3/BcXzO53PKErbLDbN5yPnslNUuxlYWjgBhWVRYWi2skvohbRagVIfiljpa/LJO59Rz1MxWpSS2aSNUSrPRIizN22eBUFbD4ScNU0cjBmUMXJOXEkITN4qaufiQo2u3+9XGyRQfD3kvnuacG4PW2JBDtwFoKkILsG0H2zJwrGYsjJVD4diWGRtDuy2sZk3LykLW92065pXSob1SWrvAtmqz9RTZ8JUxaxuw54F5qrnhpjigd0IYY3cwNhbSkroJvR61J6PHk8FpwtiWETv+/dcSkPW1m8up9z6aSDqsCDsa0KoqyW4Xs44jNus1/e6A0/mcMt4TxTFBJySvKqJox3gy1IrKQcB3797Q63b4+eef+dd/+4Fut0NRSv7bf/2/+fTpipcvXzEYDBpsVWzaleI4ZjAYGIbVUHcMKEm322WxWOC6Ho+PuivgxcUl/TDUXjKQpgmb9ZqOUUWP4wQh6r5XTX00m80ALUnXCTvMZnPDn55gCa1Q/vjwQJpmbLdbTk9PGY8mrBYbsjSjyNdUBUxPpkyn03pZEAS+IXl0qCphdClT8qxs5kdRlLiuFruoVdsnkwmOo0Vy7u7u+PLlSwNcnU6nzOdzDY1xXU5OTlivlqw3epwc19G0QllGVRb07B6u57M3xIul1J7VYDBAodhF1xRlhe8HDcatxrTpPJRoQt40TVgsHg2Y+Zztdst6vWbQ73N5fobv+2w3e3bbvTawYY9OJ2SfZsiWjoSwa89B6xXoPJTVzLWn1Xozh+ucc2s5HnK9mpG4LrjVc/m4ENY2NOZkZv+DMXuyPlvL7smaaTsr6Py15tbT2NLj7oLjTbTXmjls+1rbn/wq4mr9k63fDzqi0hhd/Z6S9WxsH+GwPWvM6hs+rrq0vZ922bZGoAshSOP4q2O0E/ntAXkukfjcgP257Tgx+vwNmS+rvp/ay2uKAtrbjPYxk1ISBCFJkJIXJUWlqUY8P2C1WvP5+gvCErz7zXe4XkAUJyglePnqJe9/eU+v1+OXX35pkrlVVTGfz+n3+/zbv/1bwxFWM6XWHsxyueS3794hhODjB83V9f1f/zV+EHBzc0+WZfi+z2g0MsDegijak6SZKQ7IxoORssLz9bE/ffpEmsV0u1pH8vz8nDTNDOreavi+BoMhWVZhCRiNBrx8+ZKyyjWerPIZDIbMZjMeHh64urri4eEe0MKymlVWNT2WsZkH0+m0WRCfP3/m5uaWJNG4uCzTBrXeiqIgTlIcM2ZXnz/zm9/8hr/4i7/g7u5Oy8e5HtPpCY5j8/D4oA1bFGnSxk4Xv1JkWUGW6Z7b6XRqGv6vsW27MZ7r9RrQCy/LMqSUWnNzMEAphe8HhKEiSwsQuj/W81xKBUlRIqpKRxl1jCZ1zhilELb7ZH4/mYZHD+bG4KiDj1Ovr+O10xQ8vhGdfGPaP/lZX8Nzy0Upra5RVzwP4enTaxfW8+eqz1RVugRYtyDWLVdCPD3ecXGhNmSa5t7QgFMrE9ShM1CVfFWC5RvGrJ2XapeP4dC3VX+mMg3k7VLuc1/icXn52STiN6ojzx3nubI1aLZSXW01eT5xiOkFLcOLmUyGvLHTCXEdn6LQyP/A9/GCkGgXaYbL0GcXxbx+8x39QY8sS3hcrowBTFgt15ydX9Dp9Hj37jdYlsVyuWQ+n/Pw8ECW6XzZ73//vUG+CzN2Et/XlNHz+RnL5Zr5bM5sNifLMj5//kycJET7lG6312hG3t7e43o+QujJkmZbsjwhyzNOplM2mzWzkxnvP2ij2ul0ePnykv0+otfrMZmcIM3jcLlaE4ZdfL/DbhuTpgWTyVizVgTaOCG0yG5Vlez3e05PT9luI9I0J0szhBD0+4OGtNF1XW5vb3n79i3z+RwpJa7rMhwOGjiHlJLr62smkwlBEPD4+Ei8j+l2OgwHI85Oz+mEXX0vsznj8YirK91ONZudEPg69O52uti2bRSxtNBwGIY4ju6kqPtChRANOBdoWr4A0xql5/DHj1cIocPZTqfLYrFmvY3o9gZaoT7XRKWyzI0+q2F+qSqqyuLXgN/H7z19rYwXZjcG7dhwCSGaRv1jQ9esg7bB4OuA6JtVyCPnpb3m2kb310xZ7YEpDt0CtcNT5+JqD612LJ5ctwLXsvCEwhWHqq4tNN0+iEZ85dgz+wpOf1yCbdOMKKUa8GNNmlf3TNYSX23l4+e2b325bc+sTct7PPC1lW/nytpb/eW3qzmWid/rfXXooatrdeXRcd2G3TROYtabLUmSsk8Tk0i1eVwsCTtdzs4viPYJ//qvP5AXJe/e/ZbT8xcoYfGXv/sdYdhhu93xww//DghOTmamcqYn6t3dHYvFUisTxUlzLS9evGDQH2DbNpeXl7x+/Zr9fs92qw2QfnhIU2XTfZt1w3o9/vtoT57n+L6HxksVdDod3r75jnfvfoNtO/zpTz/q8BcL3w95++Y7Xry4JAxDEzJorcuffv4TSRLz+vVrTk9PTRfDFt/3OTk54eRkwmjUJwh9ExKGnJ7OjbGcIITgy5cv5HneeKmj0YjT0zmvX7/WuqHGyCiluL+/RwhBt9tDSvANJc9ms8P3AtarDff3D4bUMWWz2ZImOVJqYz2ZTBvGjpqJ49OnT+z3e9OxsGvmW5qm3NzcEEURl5eX9PuawWS73TEcjfjurZbeK4sC33d5dXlBpxuy3qywHa0DWXNyKVmBrAzjbI1yV8/O4ece3s8ZlrpzpoaiPJu7+sa6fWLcjn/S0tR4Zl0+ZyC/+vet/4w3V6/P9vXUNqRstSYeX7O5gEavQ8PCBS4WtgBXGPJGx8J3BIGtf9bbs7TZtUVtd/vXm+/7WJbV0FM/h3VpD3zTRtS64G99uc+9bg/Gt3J49b/2tdSDqyeFpcG+rXtqmrMto1xOnXAUWLZDkqZkaQoCLMdF2DZht8vVly+cnM1x/YDH5Zqs+NFQclvs9wnuuUdVVliWTZKk/PTTzwwGAwOXCIy3q+Ertu0Y7nwH1y2QUhGEHR4eHrm708yuVakZGWQleXh4oBNq7Nt0MqHbC1lvdg3C3vc90jjl4eGe8XjI4vGRstXl8OnjJ4bDEXEcs1ysSRLdm/n69RvOzs5RCubzM9IkJctSPn36SJrG/O3f/jWe52LbDo7j0ut1jajJ1iD4e5SlxPNd5vMZcZwacsoxu92OOI7pdrv8/PPPXF1d47oaUzZoQrpDH2ecxKRZhmO7rDc74mTPcrkgivcUec70ZM7bt9+xjyPyoqAoC/KiQFg2F5eXfLn+wj7aN/Ohbupvg3Y1KabW3ayNbD03bNum1+uTFyVJvCPLU/q9AYPhiEJKFsvKdJIofN+BSpJXOgfm2DbCcjRrhvjKT/hqAT/zV1DSMLvU5AE0ha/ncsfHD3R1tIYaj0yp1jWJQz7uaJ9fM5LPXr/SHlHjKz2TTnrycVX7iYfjPT2mMt1FtZaBibDqfYXCtb0nUJV6+5oCyNA1f8u7qge1qT6Yf83njwxZ+4J//Yt83v39ysX9xhPq8OXWCdKDIatFhC3LIs81P3/pOYZfTU8aWwSgLFbbHeORTnxHUUS3G2I7No7r8vr1O5bLR9brDXGSopRgv0/pdDqErsdyueaf/umfefXyBZ7n8Yc//KG579PTUxaLBePxmPl83lD8FIWh7hG6vacyjKybzQbLspiezFlvNigh+O6776hKyWKxxPU8FosF0T4hTRNev36FbdssHhb4nt8sXiklYdjRLU0bTWZYlpLtZoEQFkJk3Nzc8fvff8+gP8+IZIwAACAASURBVOLkZMJ6o0Oz1XpFWRbc3NxwcjLl7XdvdGLeKGI/PDxwcnJi+OEqyjJnvVkhsI1Gpu62CIJAy8jFMXmecnb2mjdv3jTe3s3NDTc3NyiliOOUx8WSbqdLFEVYlqDb7ZOmmgZpNpuTZjl5XtLp6KT8bD7TD9ksZ73eEPg+l5eXjZF/eHhgvV4znU55fHxkt9uhlOLy8pI8zxvPrWbv2Gy2DPp9/TAx5KS6kgqeZ7Pf7LBtmzBwsEpFlWiVLNd2sV2POMv+rOf03CaMjUnTFN/3sG278cra+9SGt02/3TZGTx7qQgsV6/VTv98ytEeOAaqWrRNPjmteHK6/XdQ0R9R2xWDqzDW082a1GLdS3yZ41OepiwCmoGHYqlGqqbjW52rZsmdwZq5LmqbNoNWhpW3bDYNoOzFZh5rHTeDHHlpN8XLcKH78JT/Hjl7v17b4x537B9dWNgNSnxf0lyOERq57ntfck1ZaklhCYQsdgu6iLULAxYtzfN8DoKoKPn78yHw+4/bmljiO6XV1I/RwqD0MxxKsFg/c3NzS7+tQ6/7+nv1+z3Q6Jc9z3r9/T7fbJQiCJkle521WqxWhH+rStu2yXK6IYk0fNBwN9X1YmkjQDwLef/jAqzdvzD25KKVpeZRU7PcFjmMzGo24uHjBfH7C9fVnNps1AovRSOtXfrm+JcsqwqBn4CGPjMdDM24Sx7F59+4d3W6H5WIFSj/wwjDk5cuXeJ5LmmY4jo3vu+R5hm15VFWF53lcXl6y2WwacOx0OuVv//ZvePv2rTFeegyCICBJErT+q02aF0gEo9GETjfUtN62YDgeG4GXHN9xyMuKDx+vePv2DXGScnp6ysX5OZ6ntRU6nQ5ZpuEvuhFezwvb1oBfx3EaY9btdtlud+Rpzs8/v2c0GgKKMi/o9fqaYMDVEj7dbk8LZO9iBBKhpKb3Frbxgp6ug3Zo1V4X9XZIyms9DaVUE/0cG7Nvva47der1cPjcAZqhc25P1ZXqy9CfwRg0HS4LakX0w9q1DaOtLWwN6FWKSmr+w1IqbKvuCHrq9HB0vzXD7VMomB5fS+iuCmXaIi0hkMZDPaYCq7dnPbN20q6dB6tP3E6+t5OTysS77UE+/tKee93e/iPe26/tV5ambG4c1MoMlG08tclkYlhFXZTKdOJWatMvkVRVwePjgvn8lNFoQBTt6fU6BMGQn3/+yeRbEsbjEa9evSJNU75c3yBVxW/evqHf7wPKYJh0Ra3X6+G6LldXV/T7/UbkQxuaiyZJfXFxQbyLiYiYzbS3cf+4IAw7jEcjfvj3f0dWivv7B8aTMXmuYQKvX2sG1YeHR9PkLLi8vKTT8fnxpx/ZbNZ0OloMV7PQ2gbToyhLie/pPkvLCrEtm+l0wmLxyKtXL4miiD/+8Y+EYUi329Wycp7Py5cvdUXx4YFev0tZlqyWK2RlMZ0ODGWS07C53tzcsFqtGI/HjMdjQGPP1us1y+WSzUYn74Owg1KQ5TmdbpfBcMh4PCaKdiilxZGvrj7R7XU5PztDKt1fuotiVsslVVnhOC673Y7RaMR8Pm/mtVaKsuh2u2YsUrZbTao5GAyYz+coCff7B/zA5+z8nH20o8gzXNfGzgR5UdLrhXihB2mB41h0Ay2zl5WmWm5Zxus9CgFbBu04yjCf0N6HOBizOup5LkfcLs613y+LqgmfhRERNnt8ZUDr9/9jyX19Tttcj2sb3UqlKEtBaQyPbN3jkwhKHSq28HU+UR9fGzTL1vqjNcmjsMCSWkhY1Q+LpxHr86DZ47xUm9Oo7ZG1c1X1ZwLD8/6trW34jnNnv7ZfvbXP+WvJU8dQESlZNb18vu83IVJR5E2YabkWstSiIFWRYwlJmWesl48kScZ0MuTFxTkWksfFgmi74fbmmulkStgJybO0oXYuy4rJeESWZcT7hMFwyOnpGQIY9O94eHig2+sx6Pcoy5Jot8f3Anwv0Pxqe4307/UGnJycICyH0WgEQguvhGFXQyZkhet5DEdDbMcmDAOurhKWD0uGwyG+71FWpWk3qri+vm5C281mx3a7A6WPqduChozHQ/qDEIVkNj+hkiX/9f/6b3z8+JHz8wv+7u/+wLt37/A8h7v7W4IgYDIZc3d/y93dDWVZMZ3MCQKfTqfXTP7xeExdhev1eo2oSWbCsVqdKcsysrwkTUv8IMS2c+J9wunpGUGgc3/b7V4LxmJR66+GQUie5QRhhzTZc3NzS5YlWJbF7e1tU319+fIlaZpyf39PFEWEYUhRFEgpm7kRBCHD4YhbU6RZrR7pdQJ8f0gYuOzjHYHvolSl5woS33ORAsqqMAzMdhO+/bmH8zdmOUodilm1Y9EuBBwqhDx1PmRFVuTNkXTuuO0lPbUAtSf23HU2a7N1LNSB8cKuvTWpNPCVgydYX2N9HE3Lpe+rDqfrauZTA6t7BaTQRTeFwbrpIW0iKIXJo7UCva8FTUxoVhuLqqrIsgzHyM3VT4njZGSTuzLG4jm3+skAHVttDpWUY3e0fcPH+7VD3vqLtW1NN6TfEHiuS7ergaY1Q6tj6WvTRHqKvCyJIq0ufnF+RhKnLBePzGdzqjLnwy8/a6yRbTMaDSmLnCLPwLjYFoLddsdysWAyHqOkYrlckSQpspJ0Oh06nS5VdYdjO/h+gOtKk1TXoc5isURViuFgSFmUZGnGeDLBsizujACIEDYX5xfs9hH39/ecns55XCwARX/QwxICx9Y9p6vlEs/XSe+Hh3uSJNaN81VFluWUhWYHqUrJbqsXt+PC4+KGFy/OOTmZ8rvf/SWfP1+TZTnL5Qrb/sAu2hIEHsPhkCTZ60b1N6/YbiMs4bBardhsdozHY7bbrenvtIyxCJrURKejRUWWy2WDt9tut/h+yUg45NmGfn9AEqdYwiYMO+T5mtev35iChI1SOUmekySxZsndbdnvtkynY3q9Hsvlkv1+33jAu92O9XrNarVisVgAGKjHDMuymZ+e0un0uLu/Z7fbMh6P6YY+vV6HzWZFWeR0fJ+8qpCyACURQoKskFWhW+gsH2U9z5hRz9Ff3wy84ShkBL7KodWORf0ZS1oNE8lza7s2lL8WLf2aYQM0vEII0zZo1nk7lyWeYsgaYyYPHQptZ+m4gFFUkrJyKKzGLBrFdZBYaM5EDVD+Vc/s+Ea0F1M86wU9d9PfYgQ4tsC/Nli/9rdveXPtn2WpuaaU0q0T9SLqdru6L05KI5mmGmOmJdEiTk9njEdDkiiiqgqGgx4CxXq95OLFC6LtBoFiPjthenJCVZbcPzwQ+C6vX74kjWOUVOx2keH7slitNkSRlpx78+ateRjoZvFer2sqfhqi4QiH4XDMbrelKEq6YZ+iKImiWAuh5BmL5SMnsxmPjw9kWYrnOSwWmkLodHbK1afPlGVJEAZst5sGEpFlGUmSkmU5nbBDZhXYtsd+nyBljOe5VNJlMu2z2awZjyd89+47bNvh/fuP/I//8c+cnEz5/vvf0e/38DyH0WjIl5vP+L5mrE2SBFnmRFFsKoM98lxTXAshGhqkGt4jpWzC/jRNTQjjEEUxnW6H0WRs0PWS8WhEp9tlMplg25buGU1jov2e5XrDzZcvuLag19VV45ubm6a7YLfb8enTJ5RS9E1yv4bpPDw8EMcxP/74J4RwKAuJJSzG4xEvLs4o8kQLLMuSfq9Dd9ClKBVKCWynJC8VaZYbwKcmd1Tqzxmsb2xCmDwUT6KP43ler6m2sawf5J1O5wCXkk+Noj7m8bp6anDrdy3zot1RgInQLCGQpRbj1d3iWrC6drmOHZjnjFl9D+1NKSgk5JUEWzUFWMvElRUYlbdadPlXqpm15W8sfYuKpO3mtm/+OSt8/Lf68/9f3O7/SNh5vNUudaUqTQZpNDNrfJxQWlE9LwqKPG+S/o7r0u/16YYhq+UDVZUznYywLHBswfnZnPFwwKvLC95/+MhquWIf7RAIup0OeZaxWCxQSmk2iyzXua7xRGuOllqp3HU9fN8niiK+fPmC67omp+ZRVal+MhUFk8kU13UpDFzg4uIFSim645CHhwekrDg9nbNarwznfsbd3R1n83MNERCCwWBgyAx1H2QcJwaBn+B7IWWpFXK22y3j8ZTReMxw5PGb375is17z8PCIEJqdoiiqpnNhNpsznoz4+PEX03pVEccZldRkgP1BH90Ur0PM2pgBjZpOlmXNuASBpvhWgOf7gKAoS4IgJM8Lcq/Q1UrbptvrURQlZalYbzYslwseHx804DaJdb+kqrAtge/7jeHSvG1ayXw+nz+pbM9mswYy4zgO3U7I5YtLPNdhF62JtlvuH25IEg0xGfT7lBKE5eK4GdE+Zx+lpi/RpsJCPZPj+o/OZ72P+GrB12urfu+4sAAHyFEdVdW4L30A/aMN86jDzKcXcPis0tas+UOd+5JVpXURhECYSiMm51aHj/U11aGyVRtlcTCMT9AK6J7VSkGFoOTAV1Y/GqrDGXTV89c8s/ZAtZVfjg1bG0wLB1f42DM7drO/VRT4te3PJf2fuvA22gPWaO9ORy/kupiRpwlKSs00W1UISyeHO0EXezhkt1kS7zeMhmNOz84AKIoMVVpcX33k9OyMLIkJA5/7+zvCIOTNm9dae/H6s+7nNKFUt9ttME5KqQZ97nke2+0WpVQzxjV4FCDLcs5OzyiriqyIkEoxmU5ZLpfkedGEZt1ul916iWVrL+Knn37Bd31evnzF1lBaTyZjTk5O+Pz5s1ET1zCJJNYGd5fvyfOSk5NTfM9nNpsRRTuGwwGe52sIRNhltVrT6XTo9wf86U8/0ut3ODubMT+dcXv3mTTNyIuCZF8S+NqA1A8/19XtPUEQNKK8SZIghGhCoslkwsPjgjjOyfKMMOyglOLz52suX7xgOByS5wVxrHsqPU+TT15dXbHfR40wDarU2EAzH/M8Z7PZIKXk1atXlGXJdrtlMBgYuu0IgJubGwB8r4Nt+7x69YptmbPZPBIGri6sCIVlC4oyo5I66HEcLUxs25YW57EshCG///+bM3suRdNea3qeP9XJaP9ucdDNsG0b4WjVqLI8aNc+XTfPr1H95uFn08CuP0wljYxcqxqq0zuqMTJPCwDykC8TXxtrlPZ2K6UZNCyFJnFFYVOnwCS20yJ9bMWZX+fMkEhZIpXOA7U9LSE0VsScl1rOqrkRy0KW5fEhn92++SWr50PSb3l6z72nlFZVdh1HQzAsi6ooicuSbL/H9Rxc09lvWzaubWmQoi1wPYfL6UtsYREnEZ2g03B0TadTPn74wPv3P3Nxfk6v3+X1y9f8/vvf68qirPCC8CtvpJ5sWs4uahDq4/G4qfhp4wau7eLaWvj2cbHEci2WqxVZnmtPrShwPZ/AD3hYLsjygm5XG5nxeMx+v2/yjtqIHLRCHdeliPZIpUiyFMvWidRKSWzXJUkznVTPCpyhx8uXc66vvwBQVSVxHBlVpJj5fIZtQxC4vHr1mn/9139hu4lw7JD1Zqs7CoKAOE5wHJvZbMZ4PDaGvs4JScIwMA3gPbrdkH2cYtmaaiov8kb9XZYVXuiCUgz6fUCyLAoWj4/s4z2D4YAXLy4QqsT3bGYnUzQZZMpmszY9mQUfPnxgOBzgug6r1Zqq0gwgYdjFQjCaTNltI0ASRTu6nS5h4NIJe2R5wvRkxnK1RipBXiokHrbj4PoeXl5SCgeVVSYgEo0kXS0UIOuE9pOuSW0Ya5eoLdBbLzbdLiWbHFEtpCLrEM54OwpFVeTNnLNtG7tRWjpUE5VqKairxtepo8RnF6as/1Y7Na3w1zLMHpo63IDkD4tSO391Olw0hzS/t6qtQCYlqbQ04we6NVEorehUSollOnxqvrd6+8qYlUWKZSlUVVBUBZYA17ZRNc11qXEw3U4Hy4I4jjUbJ+iw7Si5WQ9qG6fy56qR39p+LWd3yAvUmoH6tSNsXGxUJanygn7YIcsSqrKi3+9pYKcj0MrLiiD0mUwnmjFXaQ+uPxpgWxa5GZNXb1/zcH+v8wdIHpYPLJYLsjJnHyVMpidstjrR3Ov12O/3nExPWG80Yv5xseTi4oI0y7mczTXItazwPJ/RQIdEX+5usGybTqfLdhfx/v0HXr56SVVVhE6HUuo0hW15COExGs84iTLWiyVJkjCdTon2EZuN1hwoq4qw1yXOUvbRnqwqyasSZVsIz6USUKDY7RPSZIOsBLatiRk9z2F6MiLseERRxNjuMxrrPkzLEqRJSZ4BysN1u1i2j7B8ihIWyzWWEYo+mc/xA79R2smzmDzbG/BwjG1JXNdGGqm/xABvhcF6Cc/DRbDPC/IiRciKLNnTCXwuzk85Pz/F9x1evbzk4eGO/W6HVVkUVU5epMzmE77//i/x/YCb21u2mw2zk1PKsmKz3NEJQzzH5+HhPTdfrnn16pIXL17zxz/+gFKwjzOur+/JixLPD7E9TfBZFBlZURGlKXmVYjmBsV2WYR40WC8sLGE8FGolM+2NgBH6ELIxc5bSgNeaALU2PrYwFDpKE3cLpel3UJKqgpof0LKENgIG4ynQQj1KliihxVqqGsRqmpgFYNmHsFQ1a8sYYgWFlMYQHaQipahbmkyzvTJ90QITLLYLhvpnJQ4GVGfBLITlUArJXgqKQiIt1fCkCSWosFFFacal7tfU21fGTFYFta6dlKrpaZSV5kZCged6jEdjPM9luVyw3++RqmzAbE+OJ79G+z5nwBovq/Xe12XbP19YsCzb0CYpqPRTxzKPBKkqQs9DlTm+53E6m+H5HqvVkjRP6XYDXrx4QbfXwXNdKin54YcfyE1/Y17kTGczlqslf/8P/0BRFlxdXRFnMb1hj4Flk/Rz9vuY29s7k+jXd7WLIpQCx3EZjzvEccJ83ufqSpMlPjw8cHp6ys+//Mx4PKbT63J1dcX9wwPDoWaFXa5WeH6AsF22j8umTzPapzw+rHAcX4NMww5CaJqePNeA1k63i+25CKEV5u3NjjLNdVXKslAWuIGP6+uc4VpssWyLXq9LGAacnZ3y6tVLfvjhB4SgycNFuz2r5Y6yACECbDugqjQwsz8ZkWcpd/cPOjQ0IaXneziORVkUlJUGmnY6PoNBlygtSHa6TcnOTYuRpQkPKTXq3bdtJsMZvTDgxx+HYNn8L//zf2IwGurqZRyTZjkn8xmWBdPpGFDE8b45/2DQpxv2GPRGbFZbPMdnt425v3vg4uJc48c6OiQfjSbE8Z7HxXv6hcS2NZ+dQ0VeVWz2MWlZYHsenrIoK4OI5+mc1iyxAK2EvjYXzU/q/yvDAKinsTYstftkH9hplXlfCNGIbdfGUyoNXaEW9QVsy0JaNtLSD+86ZBRKNOc+Yupp7qA+R9UKKxVtp8IwxJp7r1sKqUG76qiYx+E9XTDRjNAFFpWUFOb+bfSc1cZdIXCb9inZMpJfGzOp2S0PFQmNU6l/L0vtPmvci969He49V3lsJ/5rbEz9+ePP2S0b/lwF59fyZ0q1vnElG/QwSrv2jmFW6HZDut0OWZpSlJrqudvvYDuCm5sbpidTyqLEdmy6HU0L/fHjJzzPpdftYwmbwXDIarni8sUl+zjm/v6B4WDEarXCcwMuLrSgxnq95vT0lE6nw3w+N3kvjWkTQkuuRVFEp9Nht9uxXDxSVSWD4cB0IgiKMidJU6SU7JNEC/gqTSypUOzjPX/6058IgwDfdSmyDMdzWC4X3N/f8+rVS6pKJ9qHwyHbaIdl2ea70Bz5iMN3o71omzjWVbyiyEjTRGPXyoKqKpum/LVRlnJdj7zIcD2POM5YrlZcvLggDHwWi0fyPGexWJC/fsVkPCTLdL6p2+vg+y69bkcLrKSSTbJGVJrc0zUsv7ZtU1YVnTDAsQWLh0csx+K7t2+xHP295kXB3e0tQghOz+a8ev0WgWIfbaiqku1mw26300SSCqPdqXUJAt8nDDu6GONZdHohVVWSpCmj0cgwrEBVSt0Z4DhILAoj6hJFeyoEjhM0eZxD8HSIqFq59HpFP/3A8d+f2Q7HEE8S4HWeDo5R9YfYtA3laEOsnuatnr73VW6r/qgxnO3q5KEQoJpbeg6C8WyaSRsarQwvBFLZ2rnVrJRUylCsCoESRtlAHCzvM8bseBCOBgxdiVqv11iWxXq9Js9zPN/5qsLSPmb7Jtr/9P0fgLCW4Nlj1NuvhaVfFQTMsSzbxrUsrW7uVKYsr6mnXV+zVXiBy3qzJAw7DPoDQ3Ht8O7db5BSUhT/L23v+SRJll35/d5zLUKnrCzVjRlMYxeKi13akmb8w/ltaYs1A0jjADsCMz0tqrt0pQrlWrzHD8/dMzIysroXWD6zrKiMDOHqXr/i3HNqptMpb9684dmzZ5SF6V56noclDfbr48fL7ndjJMvlkqZp+OKLL3Bdl0+fPgEMoz2e53F+fk6SJNzc3PDp0yfCMKAsc25uq04f02W5TA1tjZCEcURd+xwdnfDx40equibNDNe9JSWL6Zxf/tmfYVkWZ2dn9DUz3w8IwoCyKOnJ621LIqXBgDV1TVFk5JnpLJq6XkBVl/Ro9n7Gcrs1nPtxx7p7cXGB74dcXt2AsGjbhu12Q5alQyrvug5Vh7rvGyK7uoee5zEZTxiP1kTbClvWeK5hNJGWKR0UVUWRZybtbBre/fiWvMz5y7/+K2zXoa0bHMcjikIc2+P1j+8YjyPapiSOQl6+/JJ3796htSbZbNluU7Lkhu16y2J+jO+HlGXB1c0K2zETGoEfEPiB2Ubfp6prQ6LpB3hBaIg+fZ+8qGlaw+Hfto8Wnv6nrsfwYPtNg4HogbsOq5QPRwvvgofD33eozLPbfLjbnMczqM81RETnzIS0kcJMiJoc0+rSb/P3dgD+9j9m/STObH+5rtuhyA0dcp7nQ4F5V/1ot/uy+9xjXn84WDuO7FBEts/93/9/35HJrrhvWaYR4DkOEkXo+bSt6WTOZjP80KDR02VCWRUsZvMBxDmZTHj79i2e5w16j+PxmI8fP/LmzRtevHgxkAv+6le/Io5HgMX11Q2r1XpIu7/77jvm8zk3NzfEcTzgq3qKmp6R1jCiOkhhQLyeZ+Ye4zikaSuyrMD3PfwgYD6fcnV9iWhMjWS7NTOFbVUzGY/5In4xQBKCIGC7XVOUOVVlmhKWNE0PYbT9aOuaMi9Yb1aIDgyKUIShz3w+pSwDbm9vB/HcHptnWDMCpLRJs4L3Hz4ZObqOw9/cnMzwNALW6zWz2Zg4CtHKgGQvL/OhdhJHMZNxzYYEoTVNU5tItmkQQFWUWLIjcswyjk9PODo6BgFVXXF0dIq0LGazKWmWMIpCri4/kucVz58dIYXFarXi7et3LG9NFN1Dd1zPxhEOeZlxc3vDs2fPCcKQ5Y2hF5/N5rx69QNBqJBVRaA0theabVctRVGBbLFs/7M29G9dn2t87T6/mxWZmtldgNK/7lBQIZD3bHC/tt1H8PcdmRic4E/58UNd3iEQkmJIn4UpIRon2ZXpUD0F904npFsHx5l25zDNY5/TmlZvnyb1fEu9FmE/TL67g/1n7E7475+Afaf1uRrbfg3uofPtU0o5cEHZtt0xY5j3GyOUHXOpAVRWdUkYBeR5znJ5w3g8Rko5OKCLiwscx+mIBUOqqhrGcPqa1/HxMR8/XpF2CkqLxWJghXAch/F4TNjxj52dnVFVFUVRDIPQJ6fHqLZCqRbbkpyfn7Farcy0gGsTBCGN1liWTRgF/M3f/DW3yyVpmvLmjYFeqKo1qa7rorTGcezhZlNVBrgaRSGjOIIko65bBApLmlER27JBmPG09WqF6x4BYoBV9HOVWpsh8bKoaRoDQl6v16zXK0ajiRna3m6wLEEQmMjm6srgwS6enFM5DWVZUJUVdVV3DsUMf0/HRoylyHPapkEIA+vwHIfKdUmSDU3bcnFxwfnTC2zbYbVemaH1TcLR8SlV2TCfLRiPYxzbZr1e8eH9J5RuaVuN5wcotaSua47mR3zxxQuyouCqoy4/OTmlqVs+vP+IFALX9fA8nzCKaZUmTzOKqsULayNeIgS+5yFsl6p+cKn/T12H6tD9Oekf9yEbSt1x+it13yHu2qzpiO7b/33w7u5EwoPITNNF8g8Dkd1AY9+BDqvT1dXa6AeInQisJwS6myC9fwwOsmbcKbKYTodxZvfnMe9ybvOBbdsOtDP9Qdo/WP2O7B/w3Z3cLY4eOhC7fzvo3ekm+m3TkpayPziG1tixHUajMUIYgr6yNgyjQRgAwojJnhyzXm+68R0bISTbbcLz5887JL/GcVxAcHHxlPF4zNXVFXX9gR9/fEOR14zH42Hy4OjoyChBKTUYds9+GkVmSNt1zZhRU1dYlgGeIgR1XeG6Ln7o4/sBZd101D4Nk8mUm9trRuOYly+fY1k2H999JE/zblRHEcUR4/GYkT1CoweSyLIokdyy2aYmDbctcwkrhdvh8/Ii7WpMJWD41PoRICPg6yOFjVIQx2OCwGc8Hpl0tiwotwWjOOLli+c8f/6My6tPbDab7li0VGWO7xsacHODaHC3Ja5rdARk58TGkwnjybgrLmt8FeK4bgfrsPj40ZQEomhEkmVUZUVV1WbY3XKYTGa0TUuaJLiei1YwikccHR3huS6hH7JNNnz89IkkywnjEUHgk2Y5WZYyn82YTaaGp61qOlX7HIWxE1U3XepssVMS/v9l7Tqp/TTx0MTA3fv6LqexZaV018G/X/bpJwQ+V5vut+OhQwMTTHT8gAe2W2s98LQd/mxpusBSI7RESA26I87W4g5GKxRG4eUzNbNdoKwZTO1bqXfgWfMa4437+gfC/myta3eDd9PQB87pEVK33YPy8ADcPVqWYSywLQtrAPKaGovQiriDZgihGY2mzBYzHMehrIuuSH3NkydPiCLDp5UkKZvNluXSREibjSEb3G63fPXVOV999RVv3rxhPl9QFCWO7SFDuxMcyWia7cqXuwAAIABJREFUhpOTE16/fj0MvINhjPA8jyiKBi4tz3OYTs9pmorJbEZZlkwmBgJRVsZAXcdhNp93bBRGZ9IPAqazCWVZMxlPkJhJg7woEFIYvYHQJ45jIEGpgMViTtu0XUNHYgtBU5ZkicCZGlZbfxjOt9huN1xdXbHdblFKcX19jeO4HB2d4tguQRAwmSg224SmqdlsN4BRBg/DgJcvX/Lu/Tta1ZIkCdPppEu3IU1zQGBJm7ZpKNKMqiiQQhDHMdPpFMdzWd4aR3p6dsZmszZ6m+MRQRRiOy5CCoI4Zjye4Lg2220CypAm2rZLEIQo1dI0ijwryPMCARSZIa9sleLs/ALHD8mLkropkNImz0uSJGW92ZAkCVmeo5UgiAL8KCLJctKsoK7b7vr9V44y/cy1e2PftblDAcKdbdwNr0M/1vSQ7FQIQwS6/x27Nra7Hfef24n2uB957dr7g9r27hIGxgO7DZROWU2beUxpWcbhacEuCebBNLOqqgGRbjBbYMk70YU+gtplou2pSvYpgg6diEOOrF+HCB13D+xAX6PUwCTQR5I9p77jWGhNB1y1CV3PjDAVGZ5tdwpCaqBzjuMYp7L5lH5is0mwrEvG4zHbbWI6hrohimKSxHStFosjisIQAYKgLGscx2Y8nvDVVyOWy+VALdOL9jrdsHuvatTvZ57ngIlAJtMpQjRDShBFEdPplO12S92YY73eGGqh6+trVqvVIO0WRRFZeg0wkCKGkc9iMTc001lGVRY4rkPgeVjzGU1VkSYJVdVQlRnWZISQpkbhOA5+4BIEHp7n4jg2s9mM09PToelj1JWO0Uowm03x/IDb5YrN1qikG4bdnE+fPjKdTvnlL3/J9eUly+WS46Mj/Dgmz82NoW1bw/e/WnFzfYVEc3JywosXzwyfmWoRtiF6FJ7Ntixom5bJ8TG21uRFgR/4xGODG6zrEq1bClvg2DZKtRwdHQ3zmEWes1qtzfxsd10tFnNG4zGfrpdstgllkVNXJWHgU9UVSZIihSQMIoqiIs8yFAYM3TYNlrQ5ZJ9AJ+RrTNzoxZpMYTfd69OvQxCn3fVoikafRt4PCHqbNHWou5rZbl37Lsh43GntbuuhrMrYK90Ylbo3PbT72JejDu2XNliPToqvP1g9jMMU/oUwKWffhe/XwXhvoB7hITtG7333GWbvDtjDAv+uU3qMTnf3wB06ALsHbd/L7x6Y3gkLAbpp8FwbIQWu7YBqh7m12WxKmqZDhLTeGOVw3/cBA02Q0uLo6Ji6romiiDwvuLq6Rik91NIcxwBJl8slz549YzKeDWysnucNA+695FpfZ2tbsy09SWT/A4YZ1unIBbfbLdfX14RhxHyxwHY9bm4ME0TTtJycnnJ6eorv+7SNoq01t9eG3cH1jlitVgSBZ4bI2wZRm5pb4PuMRzFxGJCqjLoqadsarZ1uhtQZjnOe59ze3gwjWr0S12Qy4cn5E6S0KQoDvWiamjiOmM6mzOdz3r9/z9dff82TiydmX22LzWZD1o0fua5h34ijiDCIuL5echOuaFrN0WLO2dkpYRxT1hXKgLdQluTJyxddqcPi48cPpGnGiXsMQvP+/RtWq1tcx2U+nzOfThFCkmcZYRgaphLHQQhh+N0E+L5Hq1qWqxXvP3wysJS2oi4LqsjQNVVVPWQrCGibhqaqaOsWrTTCgqauEHbQVa13rv8+JRSHG1ufrwP/vPVYxKO17sxW3DkMHjYNhjTQ8R5Ee7vNvMeK90ZrQ9NWtQEHc58hZL82d6gR0BX2TGTXdYV0V4vrBULbDsNmTsO/oZt5KKoyz+l79bBD3vtfsz73fq3vGGz77bqL0hqUaii7ArPvuPi+181KlsOAd6tbg5K/uSYIA05PT8myjJubG7IsGyTiqqpiuVwOw8mGUrnl17/+NdfX10ynBmO2XK5xHZfz83O22+2AwPc8b5A96424n93sI1wTMRSdHQia2giUGPYNgwnrCRVd18P3I5TWnbBHaQrXp6dY0iFNtswXc66vrmiaijiOaNvadEN9geP7xFHIZDSix2vWVUngLxiPRoxGI5qmHGqnSZJSFAVZljGbzTg7O++G541U28ePn6ib1qT50kTJRVHgOjbr1ZJPnz51s5MwGo+ouqaHgQ40RrCkXBP4AS+ePaNuWs7Ozjg+WuAEPlXbMJpNzYUsTT10mxi2EW80wh+P0RLef3rHp7c/kGcJIPj46S2BGxAGEVmad0IsR9zc3BinmmU4ljRizGkClk9WKsq6QbeKoqhRzYaiyCmLAsuSeH6AYzuGKEIpqrrqaHEUTatxHPHAmfX/33cgu8Y+DGT/BEXQTwUEjzlGcQ+kehhdcL9+zb3ndtEK/Wvu19zocB33HeGhJsMhxztsSxdxmfJV95k7QZjq7yY/Bc04tHY9+edfd9/L779nv6Z26HWf3ckDJ6nH0fTOLPCNuG/b1Kb+UuSkWYpjWTiWQHtwdHTEer1mMpng2i7L5RLf97m5uWExnw8DyFVVsdlsCMMQKSWj0Wg4KScnJ0wmE7Is48mTJ3z11Vd89913FHmJZdkURcF2ux0oaIzwrAFg+r6hqOm7qb0TzrL07vRoMehEti1YtkVZVDRNSxCETKczPM9ntVpzdX1FXTWEYYRju5x3QNwg9Igio8ReViVRFJi5RNvGcz3aqmE8js3satMS+D6jOB7omm3HYbtdM5mMOT83dSqtDYXOl1/+meEFu12zXm8JwwA/iKjbhqIsAG04xppmgPKEYYglRCddZ+iCBuUmIVGtoWQ6ms1ASk6OjwmCgEo3KDReFIBl0QpBEIUUQnFzdcn7Dx8MyFlqmnSFbStOTmeoVvH+3Qe+//ZbxqMJZV51IGBzXpfLJVma4jkmWs+LglY62N7I0D8LqOoGrYyWaK8P2tOWrzcJ621KkeU0bYsrDc2zELvDO3dGZMz8jjlCcEfe8HMK5L0t/JyAY9++DtWzd6Ou3c/cpbjfr3HvihPt8qj1KSr6fgS3H4HtpsH76SsMASRguqPmNfvb/DOd2X73cf8A3b1mDz2801XY9b6HDuBjKeTwed1r98G2++/vv2e3dterZQvV3qlFKwNacRyHo8WCyWTEn/50NagDrddrnjw9x3Eclss1aI3reMSxSUOKvDTDxLbNdDIz8I3bFefnF/zqz79im2x58+YdWWoodk5OToYB8iRJyLKMJEkGTvq+EeB0qY7WJqr13ADbgqLISJMMjRGjtW0zWrXZpNiui+v6BH5IFEe4XkAQhl09L2GTJcymc2azGVJqTk5PaeqStq04Pl4wjmPqpkZogWoaFvMZ88kU1Socz8Af1psNjuvgeQYga4bAI9q2HWp/UkrCMOTrr7+lyEtevvySsqrxPQ8/8PB8z0Ar2prpdDIYQtO2bDZmgFu1iroyNT8pLI5PF0zCEWEQUpQlfhwzGkXkTY2qKsq2pqxKbjdrPl5d8frtG96+e88m2TIZT/izL17wi7/4JSeRRWAbtaw4DojDiDCISbYZ8/kMz/OGayXLcvC9LqI0o3tIyxhlZ0x1rXBthdcN+GutTRc0CKnqlqppsJsWaTmUjXHOPSp+1xbUznUuEIOYbu/QdqEQn1ufc2a7kdMDx9fjwMT9Gen9aLAnYd23s12ndihyM+ms6lK/zwcj+/7hnn8Zjr0p/mu9s/GDw/wfcGb7B+jucf/vnWNiJzTcO6iP7cju84/dNfrX7O/8/vv67a6qyjgLS+J6Dq7nYTsOTgeTCMNwGAB3HIe8NNHBhw8fODs7YxSNTUG3VcyieKgfGlrnasCZbTZbvv/ue2SHjn/31ihmG2UdQ8E8HptOZBiGg5Raz+MFBoAcBIFRd+oe67pEStuMiglwHJfAjxCWxLbNTJrZJk2W5li2zcnJGScncHtzy/XVLY7j8OzZM2azMZvVkuvrS8OC2kVcruMgEdgyJgpDPMcz1DDCQgmB5TometTG0ZRlyXJ5Q9M0RnS349DPOxyY67qm8KtaojiiaWvC0GhwNk1NHBmgcFEUrG5XHTGmwS+2rYECtK3CkpaRoAtHpHlGIwwwN2tqsrpilWdcr5f84dvv+Jc/fc16u0ULsBwbGXi0lmB2Mudi7hN7NmVR4zo2lpDUlWY+n3NyfI7nGiX5tmnIsxzPMTTYddOC49NIn7yoaOuSIs8o8wzVNjReg++bCQOtjHOypMR3PVzXKJnXqgMl7NvQznXdOwO9V2zfrQc/tn5uVPbwD/c/43Ofs/+33fLRw9TyoT+Q8q6Stb89+7Z80KHtQ0MOBDsPXdnPTDP3N+q+Q+mcSgfRPfS6/rW7jYK79993gHLnvYec4SEvv/v5jtMRMtoWnm2cGN0FooGbmxuWyxsuLp4Y5H/nnJJ0Oygm1XXdaRfarNcmtWqaZtBclFJydnaG7/v87ne/Mwy2loXrupyeniKEmfEsimIAy/aK3VrrIe3cFSHuNQosKbEtp8OZmVO33aRkmWGqlZaN6hxBnhdUdcVkOuXoaIHnhcxmgqrKh9S1VUY7MgxGZNnWAIptC9d2DG0LAt8PDL0MFpVSxBPD9bXZrgZm2J7v69mzZ4RhyM3NLUmS8MWXXzCdzFFK4+cFGsXt0jQCbNsIhYzi0UBImW5TwsCk7ZZrAM1xPCIMAnND1FAWObaUVHXJ8vaWtK1pHZtWtdyu17x+/5ZNliA9o2faKMU6z/hwc8Xl7Ygjf8Lx7IzpdEyR53z/7Xe8//CBv/jVX7JYzPB9c77iKDKHWGlWqyXbJKERNqusxbIUum2RHc6wvxQdx2U8nlDXLdvNlizPaVqF5Tq4toPnaUot0Xu6mfsuRml90MHsZhqHVu9QPrf2i+7D/wfrOpx9PRbE9K99LP3dtVMppZGp0w/1Pna/cz9qHHwAusuk9DBwr3v6ju6AyUcc6WfJGe+fgf2C4c7z3Emu9+//qfrawTvDkDA/npo+Vj+7y+EFtpTIDi+nNdQdnkq1muVyTRBElGXN5fUn6rri/PyMp8+eEoURX7/+IwLRdRsl2+0GKQVBYAbF+y5kH4VdXV3RKkUYBEjLgGuzPOfTx0sur666YfM1s9kU3Tt7DWjd1fQKg3IHbCkZx2MaZRSGqqqmqgqub6758OkD6/USy3ZMt0dI6laRFxXSdvD8kKZpOT055Xgx58379xR1iUSzOD7BsQSjcTxIonmO222DCdkty8F1fahrmtJgsKrSAHht2+bJkyeD1OCPP/7I1dU1x8fHPHnyBJC8e/uBsqpo2tpMDngei8Uc2zL7EQYhi9kC1ejuc20EgjD0ODk5w5ISt7sheJ5H0zbUeUqlFbZnE8QjmjwnL2s2SYG2PKTjULUtWBY4Lts8Z7lZUyxcNpsVo1gTRRGz+ZxXr94NUnNRFJNlOWFopliausZKt/iBj3B8akqU1tRC4To2pRA0bU1eaMqqQmvI8ow0TU1Ea1mgNG1r6muOZd9FYtB140RHc2NSMY28u8xlT5/T5VYdQ+yhpREHWC3uLynkUJMbHultC+AuhTxkh7vIhENRWG+Du6nx8FlC49gWSt0FL/1x2GVx67dm9/ehqtin6Rinv/uoBUYBruux7N4zDqSZ/Vfre6Gy7NuiQ33qbhMQd3cSAaazs+fV6XNuZYat1CAE2p1AbYbM2XNWh9LW3Vx/txGglDKki0KglaKoi4580aXRUDWaIIiwHZub2zV1pbBsl7bVOLbH7fUt84kRws3SjOVqxfF8ytHxcSecu0IpM8ycdarfnu+iULi+R6uhahRpkZNXLdl6i1YKz7k2uCrPo64rVNMyn5l0LfQDw9V/u+JaWozHU1oN0jWMGEm+4f2ntxTlltHYQloKP/Dx/JBaC1ZpxeUq5e31R9abhCdlxpMqI0sSFh9njMOQ49mco9mEk8WUOPbwXIcP795RVzXHR2c4bkCyzSmzGqSRU1PKDMK3qsG2JUGwYL1e8fr1j1xdX2FbNnVd8+OPPwCCy8tryqJkMpnSNopPN58QSnB+/gQpFFVREwcxL54+5+Z2SVM3eJ6HagVlWXF8fIJj2VRaI2hZbleUdUk8meKPR8ggIq01L19+xcWf/RX/92/+O3/49ht8x0KLlrouyOuG2+Wa4mxCnhZYeHhuyMnJE8ajt6RpRVUpbNvDdQNspzKcW2gcz6aoNW1bodsSWzY4gU3mWRSupCw0eZWT5gm3q1u2aWqo2e2ue940tG2C0BLfMTTmPfwjryqEZWNbFkVdm7E6Aa02ND1SWtBpZapWYYteHO6Ao8FQcd01E8zrevvsHYpxCr27kMbwO3u7S2X1YO9K3Y0i7qfI/ZaIHcM3ttwau+Z+tFS3zXDD7qtbA+gVo640UFH2zqrb7J7OaPB0e6UrpcG2ZDeSeX/O4IA6kxp+QO5FXPv7ufu3zgO3e87m3jTBQye3X2QU3Dmyg2Govi9tt0tlorVGKkWDRigDPnQtj9pryYuatknYiJRRFHWGfMJ8PmW1uuX2Zsl0MmKT57i2hQwDrq8usR2HwPewLYfVdsuHj++pajP4LC0LYUnWmxWO65JmBa4XU9YtRa1Is4q2rimcmqr6hOc4CKFRdYOuGxJvS+j7TMYTytKkpJvbhI9XN7RCc3pxSlquicYev/r3zzk6ipGiwQ88sG2WacX7m5Ty9SVXSmO5Lktd0SyvyLcJlRCUraZqFHlRcHt7w9nRhIsnZwYD5/m4nofjhDiuhSstHN9GC00cRyjVsFzeomnNeFJZcnt7Q1Hk/OIXvwQtePXqey4uLvjyy5fcXN/StpqTxQlxEON7PnVZU2YFq5slr757xfn5BePxhOvrG5o6N2K9dTeaYlnUQpOkG65W1wR+wOk4YjSbYeg/JX4w4vjkCZO3V2C/xwk98jJhk6+JQpeqaypURUNpNQQTj8CLCYIRyTanaQVSOgRBTJ4X5EVKXVUmYpKati5pmxzXsfBcjyzyyHObtpW0yhjqJtmQZnmn0doiOkS9JS1cR6LyhDorsOMI23GRTYkQGtuR1Gjazm6UNnz3sjXOoG1bVN0QWjZS33coZi5RdBxeJsYxqaru7FR09dv+pt+LYd+R8fT21Kuh7zqw3s7vgK79tz50qXqIxowzvHNkxmZ7au4+ktrdB0S3ZeLOe+idH7MR4u4vO3X/u88xTlwNztysgxRA9zsrLWaW6zBif39Pd7se/e+7z+++7tDrLSkfvPexdei7GtUC3VyXEEjbRiMoi4qSisV8biAHmw1FWTKdzggCn9XqliRNkVKy3mzQLTSt4ub2kqwouLi4YDqds92mNOsNbauwXYe8yCmrirJqWa23jEYLqrolz0zB3+pSQqXMMHbge4bZVoNqTf1sOp0gRQfszGvyH15zvbplfjxhPB7xt//xr/iLf/cS11M0TQoWLLcJy/qKxgIZeFhRgKoVeVNiqRrtWFiBTzSbMoljYt/D1g1vP7zn8vIDz54+ZT6dc7taY8mKprGYzhaotsZxXZM5KVPTurm5paoNb1cQhExnU55ePGW7Tfj06RNBEPDixXNmswU/fP8DqoWjxQLX9RBCUuQFaZJxdXWF6/rMpnN8z6Oqao6Pj7m4eGomO9BUTc3VakUrBeEkxvYdtGgoypq6LXj99gNvf/MbPm62aEuQ1xWVYYak6YDeZVlTFBVhYI5xluckSYrnglYmEoqiiG2y4frmkm2ypizTO4gEisAL8DzXNBCsO3Gfuq7ZdHATpbrxP2FmgB3XMcPwhdFetVC4jkWjbBpt5OgcW5rUXoAy9LBmFFEKQCJUT0m9Y1xdhKL63KrTKhJC3Bn5vbKSuGfkfUzQ20nf1OrrqrCLGZNDOeRQMAHcy4o+h3rQ7AQm8OC193yH3vm925H+qeFRmIS5faRB8qhupsGa9A7jLiLi0EY9svYd2O57P9fF/B9Z+3n87knppbfoTroUgsD3kZbFydkZoW/z9ddfA4rRKOLTp4/8+3/3lYlCqgJpu9SNYrVJ8IIVaVFTNy1ph5+SlqSq6+74ZIbjP/2AlA6uLc3YFKIbgg6Jwogo8BFoAtclikKeX1zw8uULBIJku2W92vL04oxNuiLLUr781Uu++OIljuOiRYXletRtRdEoaqXJ65pVlpLVFd4oRGqPukhpdEOFwg19xvMZJ7MJi1HI7ad3/OF3v6EsC05PTvHcEaPYZbXaorXEdgWjSYDWLVpbjMcTrq4vTVc1CDk/P2c0NoLHZVlycnLKaDQaSCln0xmXl1ckScNoJLsObUVR5JydnXF6ejoY0mQy5vT01IgcA0mWcn25ZJlueXJxxvHZCUjFNluzyQvqNmOb3/Kn779GexFa1BRlhedaxHGEbluqqqYoahgL4miMUpClGev1mtOTGNs2qV8UmQH821uP7RaqqqEoMprmrkHVCwSborsYpkdME8fFtg0XXNuYBlFRFOimxXNdatseZqYtS5JlObqpEI5RCbMRCKWoWt01z1qENkytuvdlQ1hi6te6y8N67QZTCRo8VVePNTi3fVvarX8dUke/X/uyHrx/d/Xd2N3P2bX1Q8w2P9dnsLPf+zW1uzL94e066Mx6h3YvD/+Za3c+c/fA7WNT9r9zcJQ/0anZvyM8PFB3Hr0XtABwbJswMFipDx+vefrkgidPn/Lqu2+4vV0xnY2ZTGem7lW3SNthOpuDNBS+eVHy6sfXZEVJmudGpk0YaIHtOMhWYdkO0rIMXYzrmpPRGkbVIAyJRzHnJ8eGWUS1BJ6Zx4ziEb7nGkcrBKenR7z9EHJ7c0UYGhDwNklwA4F0NLebLUlREE2n2OuSt5efWOaK//V//z94fnHKn373W775l6/J2worcHHjAG0LtC148YsvGI8C/v6//F9sNlv+03/43xBCMBlNkMJCCsOm6gcuVWWG76MoYrPpxT984nhEVRrantlsRhyPub6+wbYKhJSD0adpQpIkAz/c06cXnJwcU+SGzttI5BkB3yAIQEOSZFStIoxibNcly9YgFFg1SuS4XovrKZJmY9SG6gbphuasK4MFs6SDxsII8IAUFtPJlPFoRBgGWJahRIrCsBMyzlivl9zeLpGWzbPnL2hbAwsRUnQcfg1CGMdmWWZG1PPMuSmLuou8TVmhKs1xa3TTiX9omqZCCfBdhyDwUUIg6xpdllRNQ6salALdKCzp3I/MHjG/HT/WXfnqgaHvdhL733u72f39/uNDLOc+hGL/cw593v57Ptd0uLNeHtTKfuo9/To4aL77AeZ3OTipn1zivsP5XCS2W/8a7oB7G73v4R+7W+w2AvrhOaUUVdMY1LznY9k2Whi66eVySZaf8uz5C1zPQUjJ8fExWV4gpIXr+lRNie341GXBarPi3YePWLZNmqX4foAlLZrW9F2qukEIw05qnILokPauoWTusGeO6zKdTHAtSRAEuEFAXpREUcTR8TFVWQAt0SggLfNuaF4zns+wHEVRp9QKiqYhU4qkLCjaGisIWZyf8jf/6e/Ii4QffvyRtC5Im4K0yrFFyzhyCaIxs198yXff/Inf/+ZfePXDD5yfvWQcH2PbHn5g0eqyY7AwqXEcj9hsVmbCIgjxXB/P9btB8qIDBCtUW1FXDXmeo7UBpVqWwdvN53MWizlKtfiBTxAG3RwteJ6L1oq6qQ0TrWoQSEPlU+bYnsC2Fb6vOVqEnJ2M+fbtR3RTY0uLKt9SJBnTowWnixPiKMT3QupKUZQVq9WG2WyO63lmSsIxHdamaXEcFykleVGSZwWj8ZjRaMx6baiKAj+AqeikCvUwBN477KZpjE6BZeG6jimKqxY/cJG2g21LHNdDejatBj8MjZCHAEcJKqGxhEYKUFKCZepisksnB1uhTy53r3nju0TXUDPXv8ay7kdK+w7lrmam2MeN9QiAn5Mh9fb72ED5odd/7u93+6rvOeXPIRl216PkjPcjKwtLWo/Xze5vyaOv2cfIPIY/2d2JQ7n7fki7+zdbGuZUww5iCo4911pVVyyXFV+8fE662fLtt9+ymE0MMDNLDBi0rgn8gG1aslzfsl0nVE1FVZWEkcGZaSSjjq8szTKAjsvNRjUNwgLb8xiPR8ymU+IoxrHkMLTteR5WGBJEMUEQ0LSKVgscz8N2JFrX2LbA0RZKK1qtkNIyoNuqwXY96lbz6s1r3n68JhjFuF7M24/v+Mf/V/OHb/9EqRoc16FoKjbZBtm6NPOIsi6wHIu//Ou/5Js/fcc//9M/Yf9dhEXIaOJ1w+RjlDZ1qPl8QVmmHB0dkWUplmUIK6WUTCZTsizn3bv3eK7P0dExVbk1KbqwmM1mWJZNVVUD9dFms8G2HQPctSzGkzGj0YjNZmOowz9+wg4DmqqhrSssNHWe0oiWUWBzfjzhZrNgvV2bIe+2QasOPjJfcDydE4U24/EMpQRv37zn++9fMRpNsS1niKbSNDX05Y2ZIU22KbbtMB5PSBJDU14UBUdHR4xGY5Pma3O9eZ5nNEe3Cck2oSzr7tLXaNVgS4HrOTRtS5anhKKn32moawth2wgpESgsobGFNnqbUqItq5P+7KbqjSXcK5LrDgmgEUMpCPSOEzrcOOsf951Zb2u97bftQ/s9FGHt2uCdaLgArIPv+1lL6/v1s53VQ0zEI6HqQWe266lN8dMyAMudoejHt+XhvFefZmqtB6zP0H3sPnMovu45p30vvFu83H1d/+hIuxsb6QRQkWZguCwMLY8wgrtRHFMUiSn2qwbfd9lsE9bbpAPP5qzWK5qmpapK2tag4f0wBMtiNl8ghClG9tsUBD5VUTIZ+5yenDCfz/Fd4yAsYWon9miEJSVJmnakgxPQGunYKDTxKOLlF8/54d2PrK9XHbZLc311Qzjy0EoghIVjuziuifY0ijzPuLq5ZLm85O0P3+NJG9uzUUKbWovUVE1JVZfMRzOOT444Pj7i+tP3vH//kePFC+qqIUkavHDc4fJaHMemqiSWZd8JKVcVvu8P0w6O43aYuMbIEEYhTd0KUx9jAAAgAElEQVR2Sug1SbIly4zzqKq6G9GyOVucEQT+QE5ZFgXLmyUL2yVZbgitltHIplRQlTmuHTDxHL44P6YuKoptznqVMp8uOJkf8+z0jNBy8B0fiUOWFfzww2vyvGQ+NwLHjmN3lNt5N4MqyPOSLMsYjWIWiyOub2/ZbrdYlsV0OsOSFnXddA7pzhHUtVGREqLXR7CQEhazKZPpiDw3KWQ0ihHSwDJcz8cPQxSCvCjZbLdkRWE+E0HbaJJtwX1ONLnjyLRRbRLGqHtHtmtTjzXd+uBgv2b2MG08XM/e/Zx9G+6fF+IOXLu7Pfvb9rm109Y4vH5uzawHhPY7rlrVNVDUg8hsF7B256Uf4sEOFRx7B7c7KN4/fygnN5/9eBdl2A4BwpIdcNDMvwlhFJ3rpkE6Dh8vPzEdjQkCB7QF2kILTVbkhl20KLsLVaG0qYVZto2QFrPxCNd1WRzNzF3adVBtS1kZIKiKNCcnpxwfHzGdziiLgvVqTRgGZgi5K2tK26FuW9LMwBPqVqGFYZh96bzg/LtvePX2DWmSmrTVsijyirotaVSD7/qcn56RtZJP25ztOkU1FU7gMRrF6LLGtiRCKGxb4nkOUmoc1ybveLqePL3g+2/f8/7DB55eLJG2hxuESCHNNjUudW2EgcuyIs8Na4TvG8GTJEkZjyc8ffqMzXprUsTKEEoa2AJYtlFb9zyfqjIgVa3NmJOUoiNyXCOEIMsSijSnzmuS2w0TVxBaHpZQ+MLCQeA5Ds7RMQ4uutB8/HDDJJrw7Mlznp6echQHzCMftOY3//33vP7xLYvFEUEQ8MUXX3YOzChNZVnC+/fvubm+wfN8giDstEBN4yLLMm5vb/C9oIto1MD0oZTm6OiI+XyBFFaXdiqC0MOWAtuxKauGumMa8TwfW1oINE1Z0mhFXVTEoY8ljZO8uV1RVA1amSjNRF0947PRzty1Cc0uREJ3hKn307HHUrRdONNuEd/Uyu17r+nffyjKGvzETnBhWfft/NBr979/dz7Vtix2S/89gLb/2lYdDqgeOLOez7/HnrSdMxPcj4a6zRvy9uGLDzih3QNW1/Xgydu2HWoO/Xt2d3L/vbv/3z0p96iHhgFZU7hHGdFULcxBkJZvxlbqmij08Bybpq0oioyqbmhbg7AuSoMlcz1nR98ARnHMbD4ljiNAMx2PusOtUa2mLBum01mnbhRRVyVFmVM3hu+/rWrDsTVfsE1S6rrl6GhB3TTkRcnxJKJpco5PTjg9PaMoam5vVpycH3cI+xalFapskE1L5LksRjHrrKBMNjg6xBMSJSTjwCdyXVxL4jk2nmPjOhZFnqFbw9pRViXbdcbHT5eEoynTRYyQEinBsmzKsugovtvuWpA0Tc16vSFNM3wvpKnvzmE/ouX7PmVpIo4oDkmTDKV0N9dZDcdTCMNB1yt+3V7fgrI4nU04n8c0WYmQDZ5n4UsX6fjIFs4mM+K/+lvyP2tQZUvgBpzOFxxNR1hofv/b3/H73/+R2Wxm6JI8nzzPiaKYsjQUUJeXl3z77XckyWaAdJRlNYxwlWVJWZaoVnN9fUVdt/docDzPH647I2RsRJFv0o2BcGy2hhFXa1NrDAJkx1vneO5QYy2TlFYpimSLEjZgaoh7wIQ7JyKse8/T4b2MjTx0Zofq1PtrN4j4qTr1Yzbef+6hRt9ultZ/xy5edDeAEtx3mGg9RKVAp2jycD1wZv3MYJ8792Hsg6hM9Af4/k4+tvP3Pf9dh7Pf+cFBfSYU3T3gu45tl4ZESAP0Vd1FJ3XvyNQgwqJ1a2TttWKbJKw3S7RqiOIxliVJc1O4nk6nHbGha2hyVG3SRlOKw/NdhHJompqmbdBC4zpG6d11HczEvwEgX18b4RHbsk2KKi2SNCXNMlzfZ7ZY4Hgeju+hN4LF4ojTsye8f/+R4PdfMx6P8AKP0Dcg0e02QeUlHjANPCILlts12zTFQTAJY45HE6ZBSGDZeJYkcB0sAV4YkG4MRuzm9paydLi6vWW+2XJcz9AapLRxHZfG8XAcD98LQCuzn9owqtZVw9X1DVma0bZG36AndVytVqRpyvGxYRBJ0i2e56O1GuZbjeOzu5taTZqk3FxfU2QVx7MxZ/MRsT/BdSTUClWayDKQNq1sCMce7iIwAyhNS+A4tEXFH/70Df/4D/8Pju0xnx/hOh4vX3451O+qqmK1WvHmzRvW6zVSCqqqRgiDAdt2lNxHR0ecnZ2RJhlN0w5EBR8/fqQsK66urijLCsd28X2TLud5QV5mxHHE6ek5xycnNI2i6eu2VUVRmtKB47jUZYVuDMNL5AcIy2VTNF35oo9ajH6k0DvdQfq+/a6t/IwG3c9YP7dQf+h9u3b9WBNwN/jYXVKKoZSxmzrvb9djqIgHzqy/IKWUA69Vf1fYHyHSuu+oHM7NH9vRXed2KPU8tKO7n7P/t3tpp+hTXfO845jIypJG7UVasuOcMpm50t3doTtIbas6GpuAo6MFs8mkS61cZrMxN9c3ZGlCVWS4dkTg+9RC01QFZVHgBzGr5a3BawujK5pl6UA1fXx0Qqs0my5dieNOuKRpcX2ftCjww5AXX3zJNz++5Z9/90daJfjLv/or4jAgdG1sFInjUTsVjufTKk2yTWiyEtUoYi/gJBpzFI0YOR6BFHhC4AiBIwWOkLy7veX7V99xs1ohxIj3nz4xPznnSX5CWdZY0jDOuq4xVNf1qJuqm011aOrWUIors5+ua3QPbEtSFBlFkVPXVSdn11/M/Tk0DY1ePMfqRnnqpjYUQcmWV69+ZBw6+O4XZvJBS1Ta0ugSO3AInRiNjdAS1zbvT5a3fP/qR/75N78jywoWizmBH/H06VMzLuU4vH//niRJeP36Nbe3twOWLAzDjgzghm265dnzp13xf4TvGeaQ6+tblsslcRwThsbZqKF0YJbhZxO0rZkJ1loN2LXAD4jCqItWYzzXRQNFkpLludErcEymUbe1Uc4SAtvWg2CzsRnYdWK79rafKf1r1k/VtR4LWu7SyPtO5zGn1lPg39EN9cfxEFzk/vccWp9NM3c3vndmJrLpD9zDyMwczc8fBHjI2dRHbX1kdqj4/9jabTb0YbbWBrMmbWM0PeQjLwocyzQdylLj2jbz+RzV4XzG4wllV9tp24Yk2SKFRsoJEnh68YTlzRWr1ZK2LmhtgRAa33NwXRuEQ3G7ZLMxxylNsyGd1lqzTRKc21viOGYymTAajxFSst5s2SQJo0AQBiG2H/HixRfM/+UbLj9d88ff/5HF5D+BbeFqydQPQbXElgEG52mG3WrqCiInYjaeMPdjZp7PyHeZRQGx62JrzWa95Jtv/sS3370iL0ocN+Ty9par6xuTCrctWjtYloPvC5q26ShzVDdvCHm+MbOlYcRyuSJJEsqyorEgSbadBqjk+vqak5MTFouFgWygCYJgQJ4rpbo6rQEZ+6HPapPxw9s3SFkjbc0X9QXxKMD2fOp1jXZyjk7OTV2qSGmrnHS55IfvvuXVD68paphNF8znxzx58pTF4nggy7y5ueHNmze8fv26U4kyxhfHI4QwqlXn5+eMx+OO9+zu+syyjKIoBsomywLHqSiLahjCb1WLZTuUZUVVNdRVSVlWCGF0HnzP0A81VY1vu0hLMolHONKAu7EdkiqlpKuV9WY6eKmenJC7u8MwY3nfGfxr18MM7PBr9v9253zUEJjswzbgPujW7hTU+ohMKYUlJD0v2m4y3f+7D9/q16O02YeK8PtO5lBk1k/q7x+U/ULfocitj5AeKzbCnbffRxnvNyB6AQSlFY1qsbrh3TzPEYGPbEx3KgxGzKYLWlWzXK4YjWJkmqGUwrVtotA3WCFLkmw2+McL4ihE0FJXBWWe0rQ1ruMSxzEap7tzmwt+Om3IspyqbsmyjCzLaVVL3RjJsqIssGyLNMupm5JffvHEYI6E5Msvf8F//LsV//W//j3/8N/+kePJmK9++ZKjWUQ4XeA5FklVIqWgPj5m5Pg0lcQRHoHjM4oiFqOY2SRmFDm4NiSbNd/+6Wt+/9vfcvnpGo0FUpDmObfrNUmaGsCnb2OLPqoyeKosy7AdyXg8Igwj2sakTgB13fD27TvGo4Ao8gEHISRZlrFer5hMpoYNo24GjNZ2uyEMA7yunlW3NcKWlG1NWpW8ev8OJVour6+YLWZMZgssL0RZNh+uEpTWpOmadH1NvrolWS+pGxiNT3jy5II///M/7zRDXYQQfP/99/z2t78dBGXqusa2bebzRSdgs8EPQjzfY7VadUZnoVoDqenJNevaAIab5s5Ym8bsV9O0pKkBAQd+iOeHaC2NcMtmSypT0Brf85nPZoRxRBQGuLaNbTuUGm7SEnco7/Syjn3kou/GeYR1D8Jw1938t62fioQ+F5kZG7yfwe0D6Xf1dXtn1j8PHZ6u08/crRreOZbD2/3oBMDnOiKfi8w+F0vtHoTHOiLywEG6/513dbL+QO3WzBAdJKOjADK6mV1zQQgc28JxHWzbwZImKvCDAMeJugjOxrZM6jeZjDk9XmDbkjxNqcqC1z+8Ig59oigE1ybPM1plM4oi4smU1+8uqapywCP1zQPbNiG17wdMJhM0dPW6LRpBkmYkyQbXanlyfkIQRARRzPPnL3n+9DXf/vGP/P1/+XtEkRH97V9wdj7HkhorXSMqYDphEkTo1kW2Hra0CF2X2XjEeBSAKtiub/nmmz/w63/6NT/88ANNU2HZEXQpd5KmRl1pM8V1JJ7vAoqqrAehmCQxM6dtozoR6I5HznYJA814MqYqM8qyIIpiLEuy2Ww6seQpcTzCsd1uOsMU2E2zoKSuGyzXxvIdLM+mEZp3l1fcLFeMRiMmsyOi6Rxsl6wsadqKpkop0yWWKjiajlkcn/PkyZf89V//LeOxwa9JYfH6zQ/84z/+4yDaPJ1Ou3Pkc3FxMdS7xuMxq9UK1zUpsFaqa1KpgePO933a1tT+XNfDiu2BFLRpFFIoyrKhbdIhlbItGzcIjDhz3QzXcZFmqKZhu96Yhgtg2Q4ORn+2bXeZW82YXkckNERAxo2Z1/Q3n3/tMmZ2eC5z14E9ljnt+o/dwGWXuXZ3LnTfrs0PHASa/cR6OGjett2PGRAbHIe+29BhJ+5c5rD2Hd9jO7vvvfdfg94ZNNVmMkADvVCyFKYVZksLaVumY4lGic7IbNdg44SDJSWO7Zi7n7C6SMtCWhqFJi9LfH/E6ekZm9V6aBS0Tc3V5SXjUcwojojjgMuPJWC0BdumNUPLYcR4HOMGQUey6LJebwdM3Wq1REpTv0u2RhgkCCNGoxFRFFMUJZv1Gmc+ZblaE0chluUTRA6z2YK//dv/BVVUvPr2D/yf61vKfMN//s//gXDkYwub0AGkTeCC1AEWHrawcW2L0PeRSnF9fcsPP3zDr3/9z/zu918bPJ3nUbYG8e04NmVVsl6vSdOM2SQ2R18b8HEYhMgjwfXNlaE18l0sadNTMPu+x3gyQgrF5eUK27K4ubnublCG1TUITAHdth200jSNGZlzXYemqajrEs93GU2NToGtFaJtkNrQN93crlluC3BclNAIqbFEjee5LGYzXj59wsXZC56c/2Jg+VVa8S9/+D1/+MO/oJRisVgAdOfYqGOVZUldNdiWjbBsGlVz9uSUs7MzNpsNt5fLHdzZFCllJ2ZcMh1P8Dx/oEcvihzbNenprk5q29ZYUppGkjajUqptaNumm7DYIIQkb1vi4xMsNLoDgLdNgzFHwa6at+iDh54yaPBj99kuesit1hxkwdizvs52Oycp9ovt+02HPbuld3R9hEb3GXp4hB4pAXdsHWJIN7W6G8t6kKHt++qdnXkYmSk1ODSUNhxDSiGwsC1rcCK7u7b7eO+LD+zsrteG+yGsFMJgTKSF0AYvZksJUnbUJwbbptCg9B1RW3cCEdDWhpJFSxOltboCIY2zsyRFVWHVgsaxcWwLdCe95gVEoU3bNqAbqrKirgpsS1KWOXmeMV/MCMIRjm1GU7Kyoswr8gK08BkJuLh4zuzohDev3w5qTr7nEkUjXNejqRo+fviA6/qcnBxTFYW58BGsV2tcCzajHMvKaNUSS0qeP39Gul6x2dzw3XdfI/9BUijFiy+fM1nM8KKA0LHxHIljBagW6qqmKAvSZMN6dcurV9/xzbdf88033/HpcotGIm0H3UqEUli2Rjclm+WSdJOgTo6xsBBSYAlz7uMoROsZYNg+yrIk2SZUVUZdN7ieoGkqRnHAKI5J04ymbqnrFlvahpixKLF8M81gxlYUbVPS1AWqKbBQRJ6H77p4lgVtA22LY9tIy+rG0xom0zFHxwt832E08jk7O+bkaMFidsI4mpOlqZn8aCrevH1N0zacnJ2SpRlCGGckpIUfRCRp1ilgCbzAYXa0IIpjtklCmudUTU2SJlSlGZivqoqqLI2cmmpBNdgSXMdiOo5xfB8hzD7S3Yjb2tTTbEtSVgVatx3sJ+jKIgrHdphEPqHnGF68pqCta+g6oVpauH5gqHt6UK02sCmBAbgb73EXEPSkjxrDn6aVGWY3Was0Y1Bad4GLifFa2s7hPGbLvYPpvUnv+MTDyLBrVgzZmNBIYdFj50ChteyizC4l1YbzUGhtmDJ0N9GjGWTnht93tuqBM3Ok4VL6/2h7s+7YjSzf7xcRmJEDk8ORzpFKKpW7fAc/XK/77f3mJ38Ar/btbrtV7S6pdAaSOWNGRPhhB8AkD49U3dXGWkdJkZlIJBKxsYf/oMOB2qn80y8so8KHmnemFWpi979Spk7p5suM7OUo1yjpDyhUoHyEgOdhwuIpgpaRc4zOP6lQIpHeW48dRuRakz5cFMVEkQHriWPhTKZpis8z0sxhnUyf1usVh90jXnlx0o7jIKVtGXopH8YRsrxguVpQlFYwUqeBw/kRHTm+++47/pf/+l/4+7//e3788U8URcFqueZ2syGPM/70L/8vnz68p29axn7k9uaWJI7ZPj5g+57YpOCFC3h9fc1qWfLD3/2BuqtIyozt4z3/2//+f3D3P/5vfv+HH/jmm2+4vtlQlAtM1IU+1ZHT6cDxcODjx4/89NNPfPggfpA6LsEbnNdESvxBIxxq7Dhutxx3R4Z2ZOwGlPH4kIVaJz6SHodWQp4exg4XcHrDWHO1XLBe5RgNRZbQqVHuQsag0WClyS6y2hH1+UzXnjketmjliLyDZsDoiPViSZZGRArKPBeCNg7rLZvbG77/4fcUi5IkyyhX4umQJRmMnsNpy8ePnzgcDtTtGbRMkM/nirJYECfiaXq12dA0LcdTJaa8KNZXV2RFwU8//cT+cOB8OnGqTtjRSrYZlGDjKKaqTtixR2tDlsbEkaFpevIyQ3kJQgqFs720GVLRtGu7hriJxLG+G4gSGX7keUHXNiQmAmMoyoIRz+PhyDk41JsoxvnQn/ZKbvwKlJWbfaQ0zsskVaGDAbGs1lEEB2e11kmRQ7K5y6nzFLA+B6jO6/8ig5uzOAU6MLEkXk4FsJ+He9aJ38SUZik34rwKxsgWZXQIsGEEMAVN/xRz5l/9WmYWR6KIGRlRv7SegJNxc+PzS5uf7gi8XmfPz3vxt8vgZq2VL+oiY3utAXlJTr/cf5oLkHHsB9l3+NM0TSyzHO/NvA8pByIhIecFEHE67BhHG7wpBbkemYTlcs1isWa73bLd7kUK+uaGq6sNVVWzP+w4HY78y7/8GNy/RWb7/fsPbLePvP36HctlyWq5YPv4wGG/Y7Vc0JUFzsHQ95zGkZ9++hmlNHlRyF1Ywe3dHf/lv/4X1ldr/vGf/oF//Md/4J/+nz/xy4d7rq6uWK9XrFZLUJ62a2kaKXm6rp/9LiEo7UYJioi+G+l7h9ERxoDtW6rTkWr2yDTBC3LAO09d1RyPR9JM3Jcki/WBX2rZ77ekkaLMU6wfaJseY9LAfpDS3gciNNqEXtEB6waOhwNjP4gzj7VEaO5ubvn999+yWS0pilSuzViTZClpnlGulrJyoojRe/p+YN8cuf/wiY/v3/Pp032QIzqTJOJyVdcNQz/y7t07ymDuez6f8d6TJCkoGRKZ2IgY5eMjp8MR7yEyEW3TsChLIICxvZMykBHnhSlxOJxRWihOeZbinKNtRczRe8fNzTXjOFDXFcMwwV2kFzcOPefDgTTL0FFElGbEkaHoU3or5rpGA84hgoyhWkLhncVaj4n0xRwwJBdTNTNHgYvhHMzPned3r9SizwaClzXr5b9QIV3u8gX2VxD89nlsMM7MbKAoikIfNxyvugiITGKTnxe7nwWzCeEsqFxxtjHGQy8N23/r9qWm/+XJmTZ53ufGKJfbNNJ/ev7n+/xMm5yn958YCFEYBY9BaG8cRgE2di3WetIkI0sL4igGrxmGkbbt5mxtHI/UTU3e5ESxIS9yktTQtBkfPvyF0+nIu3fvBHRZVez3Bw7HfTDwyLm6WlFVNVV95tM9oW8Vo434eGojd9cPH95zc7Ph7du3XF1tAFitl/zww+/5l3/5F37++SceHx/59OkjzllQDqVCz0RDEiekWcr1Zo3Wmq4d8E7hvaaNBjoz4JzC+5HR+cBUaGi7jtxmeC+KpB7pgZRFSRQZmrplHEbiJEEpTZoM3FzfkESKNpDp0yQhimTq13Y9tUPURrKUvpfsLI6EtjMOPbvtI23TkWUlWZ5xc3PDD7//A3e31ySJCd6VijiJMXGExdMOgsd63O3Y7ff0Xc+n9x8YhyG4yRez8qlzTvqUxYK7uzvG0fLx40c+fPiA1prN5hrvoDqfGe1AVVW0TcPQ96xXK7IkZbfb4b1nvVpTFoWYHdeNrI2woNMse1aFDIPwNyfM3u3tbfBJrYMTmJ3dq3bbHXmai+JL1+KGDhXFjENPbDRREjM6h7MON3q8MegoRqNxiPy1IbSCpv41YJ1ITk+Ba66s1EUfbs7InpeK/xET0svti31y+CxOXPbgf6sf/1kwG4bhWUATYxCRGInj+OkNAaZppvJPkd5/zq28fPPXVDMuD9AI1Pmzv78sV18LjoA4guuLcjgA+IwR/pwgvdWMVdFai+ZWvMXZgUgr4iijKEpA0/cjUSQlaZ4XYSKZcX19jbWWpqmp6zNFUbJal6RpxDffvpsb/uPg+OqrN2itOJ2OLJdLFouCzfVaRs/K0zQyIczzEqVE7rttGu7v7xnHgdPpThyWiow0Tfnqqzf84Q8/cHMjkILj8UBVnbm//4TWnihSs8N2mqQURUGSiFfkWFj6fqTvBDsWaU1VtwzDSBzF9F3Lh48f+Msvf0EZTxwbhrEL7uaaJJEA17UVVV0B05Q4ZlmWZIlm6Cc1ip62rkT7DU0SG5wdaOqzaL6lEUp5TqeK3XbLdrul6zVFWWDH8RnVJY4T4tgE5YkRrxTdOHBuWgY7ctgfOByOAeIQUZ3P0gvNC9I0F+K889zd3rFcrgG4v38IKh4R6/Wam5sbrOvphobz+UR1Ps9STnmakaUpTZKwWq1YrVeU0/WQZsHxPBDLi2QO/hMAfcJRtW3Ldit2gOv1mr4X+tR6veZwOPCnH/9E3VQsIoOJjDBalKhxDH3L6CzKREKtGkR6SAMmitBKobWf/TllDUlG4/zE5Hyicfs5yWBOx17GmS9BpP6W7VLq/rUh4L/3vb5oNfd8jCon5TMawWUQnwecn2dKl5nZSyrTy4OfJ5kvAtm0TViUL8kRDaEXM9FqZuyKnpRnn8sbAbPjdtfUXF9tuL66Is/Ewamua5wVtPv5XLFcLsJF2VBVFU1TE0WGsiz4+us31PWZYWgZRilzkzjBmJtQVtSczyfKsuTqakWWpnhECfV8PnI6HcnzkiTJSLMUXSnatiGOI7bbB5omnzFaRZHx9u1bNpsNXdfy8PDAX37+CfyIZ6SuTlR1hVJKdNNikaQxJmLoR6pzQ113QnoeGsbBEmcReMduv+eXXz6IOu4ix9oepe1MprZuRCGa83jpXXrRqiRNEpJIcdjvUXjSJCbPIrxTWCvZLXnOm9sbotiw3+857vecD0eMUhht6LuOru2ozhWHw5E8S1FqQRQVOA9t38HQU7cdu+ORrh84ns8M3UisxUJvGEbG0VGWJVmWUVU1XSdSRFprdrsd2+0jXdcSRZPlX0zfW6qq4vHxnq4VDmcUaFAqZFtZJq2MrhMIjtKKvBCCvm9brBVw9nQD9d7PwaxpGrQWIv8ETZg8VpVSFEWOtSNaIw5VvcUkIt+towJlIqq6RbkR5WVIhxtCdRfENa2VIVrItNyUaygpPyfep6yeS6mhsPa5GCL8/7C9TGIuW0a/hnL4re2zYDZ9ARM+ph+HoKr6lJUBv5mZvTz4lx/ktaxLAW4YQ23/9PdLjMprNKhnHyjcmWfGf7hgpn0URRmgGtHsXylgx5FzP+AtxCYJvSWxX7N2wDpHVdd4HHVT0/YdXrlQDjravuFcVdTViTiRUvh4PDIMY8CWrRjtyI///CNaK96+fUtRFLRtJ+YeduDxccvxdCBNBAh7PicMg2ipRZHm5uZm9vZcX625vr7i9vZWjHYXC77+6g3D0GCHlqo+cTod8d6zXC5CpikXyzhYDoczv/zlA3YcaNsEn0Ge5Qy9pu87Hh4eWC5Lrq6WxKkmS6NQVkZ4K3ZxSVBsPR1PHI8nvLUsiyRkbjV29BRFQpalDP3IvjrTNjIhrqoT4DkdD/RdKxm0k4InMgajhdB+PlecFwVxGskEC89oHU3XUVUN28cdVd3Q9QP9MNCcG4ahw5g4tBBGvG/p+4E4jhmGgdP5HoVQ9wQM3ARpn5Fh7Lh/+MjxuEejyDcbVCzKKCNT092HfpmfWy95lrNarymXS/aHM1Vdz9izthXl2Wl9LRYLJj+BqcwU7JoIV9pxwCsXGBNQLkuyvMDisc5jh4HIKJyNpeGuFN6N4EacB+vj2Zz6cv1dgtafbS+W0gwQeLF+/6OC28tgdgnXmjwgyCgAACAASURBVB6n4718/K3tVQbAtPDH0YaTLU3Fz/TMfiUze63uhaee3LRNGZMOGJx+tM+yu+l4Lln1X9ok43tOihctsSc1jigEt0sNtem4hn6g70achdF6NtdXRJFhdAL5QGnOVY3zluVqSZLEVFXF/f0nfv7Lz5yqE4si4+pqJa8bxbJNrNpKtFE8BHLy8XhAxtGa6+sriiIjz3O2jzsm2RehWFnu7z/Rde0sFjgBDicNtTzPWS5WXG82GO2JDTg3UNdnrB0lwypLUDJJFM7oEa3g8eGeoc+JowSlYrZdS1W13N9/Io41fX/DV2/vSJYFTS1BNU1iIq1p6pquacV3Uim8tTNGSHkZaDRUOBsUN7DgLW1z5v6TQCysE4vBOBKKTz9YoqiE0NCvm4q6XZA2Yh7TDyKzXdUN1nl2+wNV3TKOlroRPmiaCuD1/v4epVTglkp5uD/sOR6PLBfLmVYTRVK+Pjw+cDzsJBgqNasE42EYevCQxA7v/DP1jDiOKcpipj9FsZDOJ7pO13Uzsd4YQ57nIgxp7RxgHx4eZrZC1zVYL+T7siy5u70mL0r2xyOH05kyy4KDkcZZxzCMoqgyDPQOGfu7iwQgLFGBY1wM1p4tZALe4fIVF0v9P7jMnLLSS2rT0xp+PZj9VnB7dQAw0Q2AIBMsX8olCdx5D0pG2dMERMCtBqfcZ284nYyXjPppuwyivDLlvPygv9YkfJnxaa2CkkbISsJUc7b2CinuaEeG0bIocoZxZLeXYJMXRfCYTVBas909Ak74hre3XN9cEycRD/f37Pd70viGT58+YbSoqG421/K5UGw219zc3vDjjz9SVWeKskQrxd3dHVdB8bYoCnGS6gaUIgj+SR/tdBIsVF2LAe1yuWS1WjGuxOA4TWI2VwsWpWhkLRYLnBOBxTgW1dqyKILgZE+aTjr/XaDpWPpeshxVKfaHPcUi5c5u8N6J56fryfMUvKeuKhG6LEs2V2sUnrZuwPcYpcnShDRNiIxm8A5rIDLii2nHDjtaQJNEgUFRlvT7mqaW6eJ+t2O/WVKUKZ6RtmvZ7bbs9we6vidJMobBUjddkBHq0Uay7vNJJIqWyyVFLoF8v5dANo4jfT/QtR1d34knQBTRNA1NXYu+mHdYbYIGmaKuKqIokv0VBd4LV3NamNMgqa4bkrSYBQonx/ppCDAt3CRJZgoUwPl8BiRbNFHM2Nu55IpMJCBvFNo5bN+TZClZls7H0XcCwJW8wmG09DKdsxBoWeACMR48DuUn2UcCnuvzAPGyIvq17bcSjcv9vKy6LjOyl5XXy8zwNYwq/IbS7PQG0xfSNM0cUJxzoJzI/aqL1FE9l/iY/k37nCSALjmW05fmL8TgLl//2oG/djKm3sSvneSJlqMvnncZYMdhFH0u5zCRIs0zFoVkYd47Ntc3NE3NaB1d37NerfnhD39gtV7z009/xjrBuelYjDTiKCWKBEA59CNvv37LdvvI8XgEZMrXD1JWKuW5vb1Ba8N2u+N8OhNFMTc3N/OCqaqKw+Ewo9EXi+WcdXz15g14R9e0JIkEsCSJUJiAtvboSHM6najrWkw5shSPw9pRQK5ulAyu62iaiqY+s9090PcVTXMmTQzV2VBXFV1bhwa0l2TAWmFFKKGlpXFMkWVzCVVVGu/EQQlvmSxctdYsFwvubm8Z7SPH88DQd9SV6IHlRUrTVuwPe7bbLVVd0/U9eV6SJhldJ9/parlisVwEeEXLV2++ZrPZMAwD9/efaNqaJEkoSxm09EPHbrcFoCyl/ZClCd5a0iSj73u2j48kcRLYHnoOQFMrY7lcorWc08PhIE7vb94CAvHo+/4z34CqqsiyYIbSdazX6/m6zPKcpm8xUUyWSTC0o6WpG+wwEms533boIZaJcQs0dc04jCTFgtFbNB6Fw1sL3sm007n5c0yeAVqFtSXZCJbgmat+PRP6rcb9y79dPv9JjedJAPJSwfdLQfEylkzbZR//1QHAJWfqspk+nfCnvT//QTK25xI+r52Qy+b75YHOJegrk8ovTS+/tF329p6RXLEzTmbSl4rjONyVLo4fj4kEfxbFMd3QU9c1q9WSvCiIk4S66dC6Yr1esVxdUZY74sgQKUWapiwWJUoZ6rrhdDrRNDXv3r3ju999z6dPnzgej3Tdmd1uNzeI0zThq6/ehlF9jUK00YqipKoqqkqa+n0/8P79B8ryzN3dHWmac9ifGPuBIs9YLKR5n9uEOIEoknN+rlqatme0nihOuLracDyeadqWU1WBSnB+ZOgahiHH2oGmqVB+wDkBsyqnGIcOvMMoWVzejdhxIE9l4irI/qnstBijyLKUrmvpg+Cjd6KbRtCxWi4W9IMnSjqshyQxWNtT1Se6seFUnai7htHbYMRsiaMWhWG93mC0DhPFIRiMpNR1w/39Jx63jywWJevVGhMZttsth8Oetm0CxcoEZIUnSWLe3N5xPBw41xXKK8pFSRSJSu9hf6AoJftKkuTZwszybO7Ddp14jXovWLz1ek3XdfPzpyBfFIU8jwlN4EiSlDjOSNMYZz29HfBWptPJJuF4ODK2HbERqlgTvCiWccTQyuRTKX1xPYc1ZEeMlsHTtCSlGJJqy3sH5rlw9X9kr0ze73nf+/LxZeb2suqafn4NpvFZMJuyrziOBW+VpmhtZkXYCZ5xcYSh9//buJRnpeTF/8/TjCkqv3j9a8qYf802lcOXdwUzAQmdmxu0SgnzQClQYdGV5YIkS6jbmrp9Uh3d7rYsFiXv3r0jLxZY79ntj2itub6+I9KK+nzkeDhyOp3J8zyQzKUAuL9/IM8Lvv/+99zf31M3DQ8PD7Rty2KxkkXuJ5DlSJII/WUirI/jyHK5xFrP4XBAKc1yuUYpzZ9/+pmbzQ1XV2v63tG0A2kWkSQReZ6QF5moimDo+pG66YiSlLxcQNNyfZNwOvYcjxWOUTLTLCZNjMA9vAZnGa30voz2GO1RjOCUBDXrGIdR6HA4vLdhOqfAWyKjiGON7y0WYRF0YycQGGO43lyR5gN12xHHimHoqJozmcqIEkNeZhRKh/7XifPhjEJTFCUffnlP1/fc3N2Kw3wl0+O+7ynygiIPKrfNGBzkI66u1iH4GJwdybKURVbITSSOub2+QRuNdZ4kVCgAdhjnYNU0DcMwUC4XvHnzJkykk3naqbUMb+7u7oIU95YsyxgCFu50Oolc0uMjo7NEcQI6gtFhR8/QjWiF8IpNxPl8Rnmwgwha2mFkUZRicxhHJKPHe4uzAy6wAJyWEhNnMUkswysvzIBAvUaKz6lV9Hwt/y0B7WXG9nL9vwxmL3/31x7HZ8HsfD7PO+z7nmEYhcqkZcR/Kd8hE00pj6b3+LU3f20KeQkB8UEC6LX23mVN/Vvba9OS+bh06N2FTHAcBW+lJ0NXOyDyNdA0Ffv9jr4Xcrk0bzOOR8dicaIsF0EXrROc2WKJdwJKresugHEd66slWVaQJMlMWNZarOa+/+579vsd2+2W+0+f2D5ueXx8pKrqUMasBJu023I+VxR5QZbliC6/9Ip++ukn8WwcPXGUoVRCVbVEscYYRRQrFouC9dUCoxXWW4bRY6KE65s7rBMRSWcdfb8lSSLSNCJODUoJlc1ESrBNbiRSEEcaUQ/yONvjtTTMjQY3joHDqwOez+Gsl8zOQJ4lRMbQ95am6WibhnFwRHFCmudEicVEBq/B+YG2rTCJJi8L8jIjSTK0NtzeDRz3R7YPO4wWsrxSCjtaykVKWUpQOZ9PDINIGDVtA4iiiQBqx6DhNsw9sa+vbynynPfv37M/HIiJydKUsR+omobN9UbaJUYLvsto6qYJUts9URRYKKGS8V5s95pGwLWn0wmAuq65vr7mfD5zc3Mj3qgo6q5jaFpsN5KmCYt8iYkjuUGM4h2QmBjnPcMoBPnrjXhSjM6zWS7pBzHi6ccgIOktCmFfGLwgEWZUxhOmXuNQryzAv6Zn9qW1+NprX1ZqLyeur/XqLqeer22fBbNPnz4xARWn3pbojMkFMN1pJOPR0jgME5AJAvHyTV+WiNPfXqaVzjki9dTce/mav/bkvdy+BOt4baqpFHRdE3S6ngJhFIkm1m63Dxdnz+PjVuzJliuSOKfvRulToBHWgKWu29AkbkWpdRyZ3NYlqxJ8UdMIfGC73c4TZOcc2+1uVkGNo0g4gJksZhn5jxwOB7yDN2++ph8c9w97tIY41phIgJRFceZcNygsaZaSxDHL9YY4zjBRSj9YdtsdSimWqwKtFSZSNG1F1yWkqcb7EYVFaS0NZm+CMMEIkXgMTE3pvtdYG86HfUI0aZDXJhHKK/q2RwOR0WRpho5jHLCKClRk6Maeums4Vw6MZ311RbHIGAfLannDV2/e8Of4z5wOJ+LYYKKIrusZhoHb2xt+97vfcTqd+PDhPYfDgaHvQQk/15iEPM9Yr1dyvStFaiLKvKSrG86nM3aYdMqk7xWHn+umIYojFosFt3d3GCOla9vI1HkMoN8oEjeoKfvyXqqdiYkyQTbquiZNUvrRit+C8/jRsihiFuWCJPBYx3GgSDOhRhlhVHjlafpOtPG0piwyTNujvUMD/TAwOotWCqMjIjUNAXwgUk7rRmwNpWemnwWYL5V2r22/1ku7jBGv7e9lj/1LPfTXts+C2W63482bN0EyeI1XmvO55nhq5iY0gPaB73URzF6bQLy2TUFkisKfpaH633bynu/86T4z7+8itZWK+Pm+J06YUlCWBf0gzW8Vsqc8yzFG+obDMIYJXcx+d6RpWhaLI9vtETt0rFdL8jTCOxgGy+m05Xg8sdlcsVotGPWAyFHHGJOTpgmbzYb1+orT6cRutw2o8JSHhwd2uz3r9Yo3b95QFAV//vNfGEdLFCnqusHoiNVyJTCGusHaCO9EMz7PEtIsRhuhrZioFZctFEobrFWMDpSKMFFMP44oo8iLjMgY8izGGHB+BOVIEiOByTvw8r2rSc7EW7wfMTqX79RZgZbYMQgMqqBVZyX4KS00KeeIjAYjCibjMBBFmkVRkpcFx/pM86mhaSp0pAXnhpjHaKVZlmvevXvLIRfTlF/ef2KwniTLOB5loilKF5P/gyOKDTc3tyyXJc5JGZwkMVmaEXlojmfuP3zisN/LYEAJDEXkndIwxJJr9OHhgW7oub6+Dl4REbvdkaIo5oRgXmxhclkUBfuw76ZpAPjll18oFwuaVly+ojjCJLAol+RZgVGKIeoFi2egrg54DeuiQEeafhjQSq6rrq5F8VZ5IqMYByvqG1oTGY1WwSxFMRO1haMhIkPWWqz6cs/7b+2hvaycvvT7lwHtt5Kbz4JZ23aC/dEy8cvyAtC0YWIkwceJUUgglQo8RSAVooL5OXRi+jfdsS7hH5fPCz/MdKNnKSYvEDDq4tGHJudL+ulFHQ7grZs6nvN+rbVYrVHW0/Ytzo4orYgiEzTqjZRQxmB0RF3XnIaTTLWMoe96PlQfOOy3fPvuLVksCPS+78Idv6Q616G30osxSppy9+aO9fobNpsrNptaLuiyZLvd8sMPP9D3Pf/jf/wD3osUtQkwgclB3FrHer0ED6fTVsovI9QdO44M40Dpc5I0Ik4VXhmSOEHpiH6wVNWZ/X5P07QoLQEtTmKMUaRJxOZqRZ4mKOVo6ooo0oHkHGADxhBHkZgte2FFGC3vgfdERuN1xPTNKOVJIgHEOgu9l15QkiQ4i9CjjGF5tRHi/GaNPmp2xy2+txRFSp6nKA0GCYZd1wTzmTW//+F70IZPDzucs+z3B8GkVWeMMdzcXHN3d0OWp6J3NrRsdzv6rmO1Eg21oZPs2BhNHiaOKPE8rZuGdr8jzXNhFhR5UCg5hOy6ARRGJ+S5BPUJ8V8UBUVR0DTNzASYJqJTu0MpJbzO3pImKcqJcvL5XKEBby1xlNDWLR7xGzgcDiRZKtWDMQx9T1VXmMiQxglZGuGdYNmUVmI/iLQHZAgQ1DK8VF/KS6btZ+mLJ02zeX2+jCMvYWmXM8LfSGq+lJVd7kYr9UwZB++fkL0X7/VZMKvqll8+3oOFwY7kWYF1PngmmvCGErDmmYdkqODA4eSNL4YCL6O6DweGFyqMmvTKpgAzBafw6NTT+ZIOzNSg49ndRVQ7Ls6pmpYRoSx++vDT77n4++gc/blCaUUSxTjn6bsBjSGLRFgwTzLaqqauT1xdXbG53qCM4XQ+MfQJo+0YVEQUadrWcjwdeXzczmW1C+fu7u6WJMnJsi3eB4PgKKKuPcfjiabpuLu75e3bt/z88y+8f/+R6twFCMCRIfDyirxkkoBO04K2rTida6pzxc3tDesrkcbBS88rTVOcVQGE23M+1wFuYMiyBOV7Ih1TFBnLRYnW0NQVx6bHGEVZ5MRaoxx4rTEqQiOA2bHrsVGPjuN55O95UpZwXpDzkdFYIDEDfRRJf3LoOJ1POKXJlyuSJCHPMhZjyWa9phhSlqslRZEHYDR4N7LbPhCblKv1Nd///jviLOPU/J88bD8F3mQq3M5ESu5iIZ+raSoeH+45V2eWiwVlkePGgf1hz7k5k8YJOjGCN8MSpzEYRTtIOZflOVVVkSQJy9Vqttg77A+gBeoyjgNt24b7Zo4AoQWCYIwOzA7R0MsmFRIM3o4on6LwNE3N0PfERsr4OIlphx4VifP58XjCBDPm0Y5UTQ0qiEREEVmRSyWQpvTDGEQSxqfvwwiQdwLaei3qtqOaWgOB4B+EFb2fltBTJhFAHs/iiA9JjQqLcIJtKaV5FvimuDQN/nzw3fTPcxVCwuQVeCuBWAz3fmWa2Y+e7fYg0z2taJr+RYNOeiHSDpts4sUs2E5GBjNsJbzGC0h1ssrSWj8BY2GmsZip3zblWCEgvgxiAtiV3+qn8P0UmXj+owsnSvkg23J5bPpJWts6j44ivHPBWQfcCNprxmREeU9sDHfXVzR5iok0+AGFZVEmFMVXKG8Z+4E0jRltKuBMNwQX8Jwo3OmTOKPvLX/+15/5+OEjq/US7x273Y627fj48ZNcKC5cXIOjaVqyLKcoFoE0L72hoii4vb1lvz8w2BNxnJDlmuUyY3O9mt2ohn5gCN6gSgt521rL4ShKuGliiFXKellQlkK1Op3OAa3uyLMslLAxURRUfpWQ1UGgGbE2ZHEqNKyuwY4DBMAtc4Ycgpx14DzjIE34pm05VDXECV9/+44oikjjmM1qRW8zkbYZRyKlGZ0P08qGr7/+hq++egPKULUt3/zuLeWx4Hg4BIXbhOViQZZFNE0FjJRFwbfffA2ERj3w+LjlXJ3wBlrb0XkJRokTfweTRCxWC+JYJpr39w+UZUlZFEFCO5mb14fDPrBlnPBhh56q8gEpIDpm4rU5EMeRBL6mxlqPNinejqFfbRmRLHfE07cNvRPvVAt4pWa39SmryYoctGawllVw7yoWi2BGnNL3fdDoG5AKX4u00BS0rGPA460ParaOJ0V+hVPq6efQXnoa3XnBwYV+uTaicWZdEJDUL4QnCEs5LGl9EaSmLGxa/3O1pieBI/9svb/iASA7HnFBc+g5ml8p90xi5yUORF1kV3460BBZPisB57+H3psXNcxpf88ew89z9sYU4J4i96SQ8dlnUryKbn5tE5kUNZsqaCa/T3Ha+fqbbxlXS6q6om5r+r7DOiiWBev1mqFtOJ9OYitWliwWS86nmvO5ZrVaYUyM1orr6yvKRU5dn+m6hoeHlq5rWa3WXF9fE0cxp+OZrhVYQf6u4O3bd6zXG+7v7xGbugX73Z7Hxy3GCNk9SmJu724piiIIBIxUdUOe5cRJhlIhI4hN8LsUhY4sT1ksS7SLWS2E3nQ+n9luH9nvD8RRQvLmKxQG0OCnC1rc43Uk8uQyZW3nDMRagWe4oGOmUFgntnJt19O0Hf1osd5jItEQ2x8O0hPTIuCntSGPMnQkPcwkzTgezgy9Z3OVsQw9Q6Ud33zzDTdf3fLzX/7Cjz/+ie3jA0kci5+BAudGkrgMmaMwXvqupeu6IBIgcB3nRRo8y2Xo1bQ1fT8GgPKa3W7H5GMwuaA/Pj5ydXVFlgsmcPKB0FrP+LIJtNu2LVmQChIMovTO8jwnjqRVMPmLTn6dMniyMtkchuD96oOqSCztFC+86rbvnw3gpkGDwHrsLDrqw4KfVErQCHNDK7yVQG97i7cWo2PJKEcXgo0KCtDBDDwEM8VzDNtUFT7VQy9Ss2f4hcts5HmratqeUSovtlcZAC9HodPOXkMCv9yUuhRSuzzECwnfl1OS+dg/bwz+GrTj3zvtfO1zzA1HLsTfvMfhGMeBrmuoa82//vlfwVsJvkboUhjR0R/HkTRJsHkumvk6Io5TkkQECtfrTfjcsFwuKBcFRZHSNGdO5yN9L8ayIrf8iHM+aHKV5HnBV199RZblfPz4kd1ux/F45NPHT3Rdz3fffcfd3Z00yVNR1d3vD4EFYNBG0TTS96qqE4/be1nYScQ337yjKHPSxNCdD8RGPvsEIzgcjkKW73r6cZBsSoGzGqzFGU2kpwt6ENVhBaMTqWg7yo1Aa00UR3gUvXX0dmSwdnZpV8ZQBrpQ0wq4N04yysUS721gK8DxWNGPAiwtyxWr1RV9J6Deu6++4utVST8MfPjlPVv/dPFHkSgM52nGuTrTtjUP9/cksbBbrq+vKYoCkOnipOEvsIp2nuQDvHv3DUUh0I+pBxbHMdfXN2x3e06n08UkPJpdrIwxPDw8zGDtNE3n52htKIoFcZRxOp1pg/LGFMymTA6eOM7TxHQSh1DOYv34jPM8Pe8lXeipbfQkxBAZjYlTMIHnrA29GxixMoVXQTlNXVgOT9Z3IcSJqOKUBPkQ0Kb3frn2ph7V0+Nra/mvWeufBbOJcnH57zXMx/T/09/kpDF/gM+SsIvM7Esj2b8mWL48rtc+5G/tZxomvLYvPdXmgbAu8tyWppGkeuhFJSFJYsplSVkWxGlKlETESYQL1CTrxtAvMVxfXzEOliiOyfOpd+JomopxFIfrtpXG8G63I00ztDJ8/PiJYRhYrVbkuZR9f/zj/8zV1RVN0/D+/Xt2+z2bqw13d3f88Iff0/YtXS+O7PvDHm00SZpitOF8btluH1EK0jRhtbqWrClILKdJjikLUZytauqqYuiHcD6gbVu6rsdkGpxicBZlHc4oIiMlhHYab6QZ7ZAyyHpPN4pabWQjlIqwIH0QY7CjpbeWfhxJs4w8L6lqgZ3c3G5I0pRzdUT05Qb63rFYrNEmIo7lXHVdNdu8eSOS1uv1mk+fPnE6nkjimPXbtyzKgLZ3njzL+Oqrrxj7Aa00zsp3BMyZ1CWFTGszB5+bm2tRpA2QjP1+LwDpOEapJ6rcZDLsvZ/5zdvtdg4mco0oiqJAKY0xMYrn1+/EKX7pQzld7zM9UIlAwWgteZ7Nw61hlIGT0oqu70IWJo5h0soIHGUfDHmtxVmpZiIPUZwwascwWJk2K5l7jkwaabLeXVhXRj8d/2Uz/0vphnrxeLnOX+7n17ZX6Uwvg9fLf5d8yunfs5Nx+cYXZaLi84B4uf1bR77/rswM/9nFMr1eTVPOkL4Tghl4xtHT9dLXM0aaocPQcz47TNeSlyVRXGK04e7ulqIohbJ0OJNl0u8Bi1KOLBOrtbZtUNoTRYbN5oq3b99yOp15fHxktz2Qpunsqt11A03T8OOPPzIMIx8/fuR4PHJ3e8cPP4hQYxIn1O2Zw2FHnucsFgWLRUFTN4zO8u6bt6EHdkbpBOesqKEAcZywvlrDmHI+7ITq0zR472btLSmpZGFMwszWTZ5BMvVy3qGDxpfXGqViDGCGPpD5Hc73oAWj6LVicJ6q7USRBEOajyhlBLkeKE9xlNOPA9ZCni9ZLq8YRkfbdEBPkuYs1ykoxS9/+WWmCS2XS+pzxdD1uHHEB3mpCfG/LJc8Ng8sylLgEgs910gTILoPJZsx4kY1jiOPj48CVLaWPM+5vr6eJ5dt280wDDuZkXj/meLsVP5FkbjBy/SzpzpX83tOE8spcD6zVYRZmWOiw/V9j8OSpEJur+ozfd9xfb2ZX3Nq6vm1WsuNe7SD6PY5g9eR9DPRxIkAhp3z1L7DjQHQjEZ5xwBCfvcXyUFwi5r6XPPamnrXl22rF+vwstf+pbX+ma5i2D63mnvRC5t+dyndMwW0S8jDs4DwssQMzXfPUzb3peDzEqD3+b6f9+f+3dkdr2dmcqfyYC3KmDCBUWijZLLXNhR5SpwIn6/ve7rqLD0KP3K1KGdZmTz4JN7evmEcLefzmfP5gDFrsiwjiguMEWUL8KzXa06niqIoWK82bLdboihis9lwPldstzvu7x/QWuAi3333Hd9/9z1fffU1q9WKq82aujuRZTF5kdHUDcPQkuUJWkfkeYq1A8PQYgy0Xc31ckOSGKLYkGUpkTdoP7LbboniKMhii0Bn1/fUTU1kDLGJSIzBawVeFoX2AspEi2ySt54o0kRJQmxzVHBWssOIR2MDYr0dBpqmpe46UBEoTZblDKPl4XEnGLA8R0cJaSraXh6oqgP7w5G8t6zX0quKohijDNZbvPMsipLk229JkjjAJ2oWZUHXdzR1w+5xK5PgJKEsCiIjXNGyLIOXp4BbFwvxTsgzkVw6n88sl0vev39PHMdzH0wbMwevS9D5VOoqpebyEQjeDE/O7uM4+a3GWFuG1wqVMElisoCfs3ZEKakQ0jQJYgaifacjLZWFFQCsVpo8y6X0H0fSJEUc5GWSOQyDZNLOk5iIpq6xoSWU6og8SKNHyhAbg0NhPWjErwGlcAF/6C5gCJdlZlh5n8WGL/XMfi0z+9Iaf9UE+GXGNf1eRsrPVSleKxlfC1YvD/lLYDj14s7z8vn/nmzsswP5tT87oX/gPdpPWCpDGsekWUykI9I0FlWKJEZrg0PuusfjEeMdRZGyWi2J45jj8UTfd7RtC4i9WD90HE97rHXc3t6QZQl9cbCZ+wAAIABJREFU3wc1iw6lFH/4wx/4T//pP/Hx40ceH6V/9u7dO87nWoCagffX9zJtTtOU/X5HlqeUQ8E4DqAcTVvhLHRdj/fSVJYS8waPLBBrB/q+pW07ytSQpYLDut5cM46O87mhCvLU56rGeyiSFJ+kkMRhGmxAR8SJEPP7XrK42Gmx9NMGpT1JGmFiL3QbNzCMosc1OIdXWgjWiQB466YlSeJgNGJwzpIXJev1FQ8Pj2HiLNdoP4wcDkfSTBZ8HEc8eE/fdRR5zu3tLU1Ts9ttsaMs3ixNsYHr6kRWhKppSLI0wGQEQzaOo9x8ooi2a/npp59ompZvv/0W70UFYxzHuQl/c3PDdrudXzcFtDRNZ5VakTJPZrHNSaOuKBZYO7XWCbJCYfJodFDDFf+EKZjleRZK1gZnLV55xl6YFUW5wGaWJHgM2HEUfT/CwG0Y6ZqWtm5kfcUx2gHIzUlZhxtG4jgii2PBslnL4D3aSSBjFFclAOUApgb91C+77IeJht/zBelePP7K+vyVNf+qOOMEFJ0ysGmbUtpLj4ApO5ucjqasZ94fzwPZZ4j/i8MXmMdvp5mXxzq/7jeC3JQZ+vAfrc2rz5s+r1bSxBZRvYg4McJr036WzFbaiOqD0WgjktJ7d2Acn8oPYyIh8noX7q6iHns8HkKJoVmv1+HGIcezWCxYX61RqLn5O5H9J7/ByYcgSZJZyXQYBlRkAxbLUlUEqWhpNIt4gJ1paWmW4r0ly8S4OIkN2nZY7ymKnM1mQ9P2tO1UJgmNB69Q7qnkjoxBQAgKpSWzsi6M58fQlrAjeNGqN3GE1yPt6LDW03UDEHp7JsIjGYTznjjNMFHC8Xhmu9uLAxWa3f5IXYsyRJxIP2i326I0rNdLFguR9BF/SyX9sNCfO+73lKVIiRdFQT4FqqZBGUNeFPRDP2vI+YvrZTK2sXak73tWq9V8HS2XS7mxZPnFmpC/zRI/wdBkkgACninR5nnOfn+UDDOslWEQhy3npyxNQN2xFkWbOInF5DqJSeKIKJUWQpbK4AlPkHg6gw+Gz12HUoTyVADezjrauiEOjk8KHWz1VPAXMBgNeb6g7jvGtgE7Mg7Sh0OL9pp1ErCUIrA8JmzqRU/9abXObZ9p4PlyQPhr6/pye7VnNqXHE29xQutPiOXLbO21YOBeZIFzvcxFMJsP+LkyrBdDwGf7f5n1vSb9+yWjlIujkIAR0v1JoePlxFYgBIJsF1J0ymJRUCQibledzpzOJ7FRi2PyrCQtChZ5wXK5YGhrzueKjx/vw/RR0Ptt2wZsmAAcv/32G0CcewQ/lmFMzPX1AkCa74NAAa6urgKmSprcv/vd73j37h3/7b/9N5aLJY+PW4FBOEOSxVRNG6RtcspySVOL49SESZPvVtoE1jqslTs+/kmLPYoiiqIgTRM84j4+9APeabTqQyAzz2bwHoXpB4wTfuGkGdePI946YUsMI3Ny7wV/1HcDoIijhDFMcK9vbsgKcSk6Hk/cPzzy889/oVyU1I3QvYyJGWxL00nWuN09hqxzAd6ThnMXx7EMXOqGcRiCZE6Lz9xsbB0ZQ7lYkM+9UwlO4zgR0cXM2GhDWZZMjkpKqbmd0LYteV5Ihh7ksSeZHxGPFMf47777TgC2hwOPj4+z9Lb4FojY5aROY50liiMiG+GcpW5qRjvOfTITyQ0lzRJWrIQdEcccTweM0dhxkGNrag77HZMw5PF4DJ6s1/RdS2M0o3fUdUPvBDQexZFkg2FwNY4jfduQZtmMPRPu7YgdOtCiFh1pPQui2nGYKZBioPNcSXbaB+FR8Zyb+Srg/q8tM6c3vmz0X+7g8kCm5zwLHi/e52V5OckJ65ABikzMBTFdq8938m/cfq1vdkmZerl5L6BZrUUSKEk0i7Lk+nrNMk9RynPMEvpBVFIXqyVFvhCYQT/gPaRJRpyULBcLFMKfFBL5Ex5JdLDSAGAUhVQQtYdqrORYAlh2WgggpWRVNXz//ff85//8nymKgsfHRw6HYwg8KVHscZU0q4uiDPtvcQ7SVExmJ+rNOFjG0RHHQozvhx7j7DyoMUHGOUnEs7LrRwFBas0wOpq2l16KjkgSuRyHwTKEvhjh4nR2xFuL005ctelxXtH3FqPlXIxtRz8MFOWCuzd35EVOFEdoZeiHhmEY2W73nKqacrHm97//PYvVkrZref/+Pf3QUy5K4sgQx9Fcnp1OJ/Ce8+nE0HXPSrbpO++6jqauaTuh8kVJTN00ojzbNPMUUehrEVpHT2YmSgRH27YNrQQVuKg6aNotGIZhDogTHm21Ws1KGoIRFI+G0+nAzc0bmqaRwNL3AaT+xB8GYQYkyaSZJ703pWCzueJcV3RtSxwZyiIXxY+hpw2fpQi/21xdsVwsOIdzJDcAMXUh9DrzoiArsiADL9CQjx9+IUpi8rykLDOKNud4PtEOPW5yJL7YdCgrJ18CafLL5yD0qadVr5RHXUxsf62//nJ7dQAwNStflpmXEfMSo/IUKT5PIz9DlYR96qAjxrMy9emD/S3by4xuolQ8Hd/TZHX6+/TZJMPzjHZEjxrrZFGCB+UwRrFaLYhjMSrtuhYVCUL8+vqGPIkxWoLFL+8/4NwYpJZL2rahLMs5Oxom5Hvd4UP63w8dWZaSpUVQSL3n8fFR+lRFyc3NNYuA5paeS85uJ3LQmyC9fT6JzZpzjvP5zOF4oshL0jRD/AVEBVf6TWB0TFEsUeQ0x3va7szQ93jniGOZtPWDI0pahtFKBtFbxkHcymMdkcQpxkxN7kBTw4F3szeA10/nuh/EeQgUURTjXMs4WJbLJbd3t2R5gnNeoBplidKGuhEF1sViyf3jI7vjHqUVHz6+D5N0Jf0gBWXAiyVxQl1V9F2PwrMoS7KbaynVvMONFusJ4pBLojhCRREmEnltkdju55tV6zsWpciVa21mcOxqJYj+4+kYJrCSmUyvb5rmGbzidDrRdR1JIjaGSZLMZag06ge8t5RlTlGI5tn5fMZay2JR8vDwgLXSAnHO8vj4iDFGBAmyDKNg7HuGriNaLlktFtxebxjHkaZpWCwWLMuCNI5IIkNsNBqDiyKs7/BGY72jG3uqpmIMGZbHsVpLOa1jg1OeSEOswRsDxtAMT5JgRmm0eYJvaRRz8TXPA8KT/TQsFNWOyzX82vp+ub3qznQpe325g6lJ+cXJwjT5ezY6DoFCPw0KnjXzL1JHE7BJf8t2GWCfH7886jmYPQWyyw8Q5i8IFozQNxMKijaOMY0py1wa9ueaLCt4c3PN1eYm7EPzj//4T5xOJ/74x/+Jr79+w8PjJw6HI9fXG1SwQdO6J44S0iQnjor5WOK+JUmjoFuWXdz9faAt3aGU4sOHDwzDwNu379hsNux2O07nE8VSnNdlYCN9zNVyzXK5RquI86mee6Jax4jPZkocp3grOmz1UfTo0yQVY1wVgY5Q+6Pgv9yIcyPDCP1o5396EJmZJPg9EkoQrzTKSOmhkKZz3/bUtaD/u66nC7Q5ARPnrNZLyXBMLGYrWrO+2tC2Pf/wj//E3/9ffw8a1usVVX0mz8Vl3aCoz2c2G4Ei9L3Y1rVtEyZ4ovdvh4FhHBgZcVEkvqhxTJTENH03yzjPYoxWuCZ93+O95/b2lmEYw2TRslgsRJDx40eyImexkHbBpDS7XC7n4Dapz0wEdOcc+/0+AKQXMxRjWhPTtTwJhE77dM5xOByCdeEVSk3HJzeVSTQSYL1ez+X25GPQ933ADnYzv3f0woUerWUIXM89e9I0pchE5WWKDyLdZCBN6DpD17V0bcM4XS+z1NXT2r+UzJ5+99lwzzmZkv/K2n5t+yyYXRr9fmlnl6Xn5YHaKdjBLOMjIYL58bJc9d6j3FMvTX7/OlXhb9mmjE+CakhheZ3R4LyfkeppKvpcRZmzXC9IYoEkCKDSUZYFd3dfsb7a0PUdHz585PC4w46Wosxpmo7TqWKxWJFl4oe43x8QYcZS5F0CnEDKSpl+OS8lUhTFwgk0hraVRVTXNUVRhknpUcwu4oQ4jrl/+MhgC9J8ysoqurYjihLGwfLp8Ik0zYjjOChvyHhfK40xEU11kCZ46CHFiUwnHZq67jgeK+JYZG6GfhQcnnf040jbiXdjpCDRwut1Vgjm3jnh+noYAgj1eKo4VTXdIIq3Xduyubnh7s0tq6sFZRnUP6yi7TrqpsFZSNKMKDgaZUWGiQRSYiJDnMRsVmuGpqM6nWUx971AFJxjcJa6qtFenL+jENwAurbFW4eKDM3Q0YSyccKDDUOP1tHMyhC5bGZ1DOFYxsTBvCRJkqcAEaaaU1aUJMlsNydeCw3H43H+zler9bxGJqOUKbBqLabVZVnO/Nwsy8TYZhzZ7XaowN2bdNikl5fTti2nk/i2VlXFbrebnaPmtYKn6tpgcCKSTG4YSKKEm82GNE0wRov6SzdgnEEpTRpHuDRFG0NX9Tg9reknV7WpBz9h8OZ3vGQQePl//8r6/Df3zC7pD5cB6zLbmUrRiRM2fZmCa/FPqhc+4MumRz6HZPytJeVr22sf9mUm9qVidhp2iFmwkOqHQWy8BDAdNOLf3FEUK6I4Y3888rjd0TQdu/stZVlye3tLWS5xDk7HM6MdiGPD9fU1SmnGwVFVNcdjLfJgXqOUJy9EfyyKYor8qaRfLEqyLOd4FOeiJBE4x8ePn+bvoq4qPnz6Czd3V5TlktPxRNP0IQBJKVuWK5q65Z/+6Z95//4XjNH89//+v/LHP/4RbzvKckGsE/qupR3k4tNKgJ55kYde14DS0hB/akMo8BmREq07o8Rqru9a8I4oMkFdVgL1OAyifmBlmpUmGd9+8w1/93d/x+3tDcfTierckCQ5w9jz+PjIP//zn1iurhitZXOz4d03b/nhh+/J8hRtFCbS5EnG+59+4Xw8CwbQPWX9YirTM8QiSxQFQ2g7juLUpFt0EkOoPpRSs8oEqBk8nKYp5/OZtu1ml6XHx0dB8fM0QIuiSLiWQW57Wjtd17FcLgFmcOwE2ej7bs6cxDT6TNcJCHfKvsqynGWH3rx5Q5qmbLcyBBIc2l4s97KMIsmDYKaQ/b33/PGPf4fWmk+fPnH/8EAcgoubCeye0TvJkMIU3rmOuqnJ0jRAQWQAlGZp4IiOuHEEo+nRDFbkwJ45uk3/P/fXw2BwevT+i8iMv6Zv9irO7LLx/zIDewnNmL5gpZRw7KZpZqiF3UUwm7apFDXGoLzU1965L32Of9M29fIUzBpISgmmZg5y87n8PPrbccCEO4r3gqiuqpo8NjgnZd/V1RUaQzeIZtb+/6PszZosOc87v9+b+3L2Oqeqa+1u9IYdJEFR0syEbzxj2dKE9A2sS38N+d4R9vewxzH2hR2hGA8pjjQkRZEAKBAk0Gj0VnvVWXNfX1+8macPig2QyoiKBqpPZZ3qynzyef7Pf5mvyLMCQ9PZ3d0jzwuCIISzMzRNkmUJ3W6Hg8N9hsMRuqZTFBVRlBCsYvJcGTaqDdeKqsqUpUu3xLbVheO6HqPRiE5HjV8tMbPN03Qcl7KqSLOUOE7IsoIgiOh2es2TXsN1fEBrxrqU5WJJURRcXlyzt7uPa+sNlUTx6/QaLEsJnR3HxrZMleBU5pRFpqRdtUSTEs92Gq8ykGWBFJIqTymSGKREWCaaoaNJcCxTheZ2IckL4jRHovHg/n3u3r5NrauObjPMd7Fccnp2xlZRcO/effqDPsPRgKPbh+iGIC9Sijwji1IM3WA8HmMYBleXVwTBijJtcgmkocTZmlgXmbxWnVpeVBhSYjjW+vpoi5nj2CpFqVAUi1a32k4yLTZWFAWu11kve9qHvWw6fmU2oFMWBUnjSGvbyvlXNGNnGAQ4TRFsi6JpmkiUzCovcqI4xnPdNSzU8ti2t7cxTY1VsCCKAjqdDmVVEIRKNmfZJk+++pJev89iMWe1WtLpdnBdD7/r07MdMt0giCOSOKZuqCClhDAO0TUlf+r3evT6fbo9Xz0giowEdZ/vjCeEqXporRclzb3JxuJlPbFtTHBCynXIumg/2e4U1vSqPxAz28ySfN0Xae1Knlf4WpuCbpkmZVV+zaVC1bRXgOC6OLbYnJTrQlbJrxc0eePP9gLjNe+rLbyt97yolRmg2ChczQubc8q168ArTE9CVaJJA8MwsU1DuVygI4SBoZvUQnFpyrwiDkOyOMIxNWzDoSxVcG2ahiRJjW5ILNMgTdQmy3Zs0iSn0+nS6/YYDEa4tk8cJ5SlkgD1Bz5Jomx3yrKk13PWT/UoDOkPhlRlxTLPyVKVdGSaBpZpsFzlWKaNaTksVyuurq4pior+YICh66RpRJpk1HXF7q1tBGoV3/U6yiFFGiRphqBQ29COkmdZTkSaxSyXAksvKMioZQpahWmYdF2Lge/Q9x0cHQxZoktwbY1SmIDEam5ITdORQgPDpKghTFLCJMd0fW4f7jHo+pzPzijzFNtUXKuyyMnTlK3RFpPJNoeHR2xtbSniKJJopZxHkjhmej0lDmOGwyG9Xg/P90iThPlsxnyuAn5LWeF5Xba2Jzi2rSyOhCRJk3XyVl4WpGkCAtJMkZ77fdlks6nrqcV/LMvCanh8WUPSrcoSw1b5olVZQtMBdnwfx3FUmE0UgVCRfEVd49o2pmEyvZ4yHA3RdZ3hcEhZlWrTKmuCOGQVhbi+h+e6yrSzcUzpdruYpsbe/i7X06vm583XNt5KyO5zfn5O3sQCCt2kKGuqSMELnm6yNxkzXWhcZRlRlVHXkrqqiNIcwyww7AorLwjjWDUwTXJUx/PwdQMsm7KqqAxlrV6Wr+5XhT0q3qVAYGgGmlBGo9RqWyuFhhQVQmpIKlRYQVNHNoTo4oZo/VvlTK9j6bf6snbubW1K2i6tjEvVBX3trGK9AGi7upvyKNmW4T+gnaTZgqqi1BovNuPjxspXa9nHknXkVntIIUAqD7Y2CFVHYpk6ulD/sLpQhnumYWHqNqbhEIcBYbJCViWOYWANeirZO8+xdYUTdTo2NFLcolBSpaosmV3PKNKSupQ4lsvQ6+JYNgBBEFHmGY7bURhXGRBFCZ6nQmWSxgjQNJR9t6yrxn9KUuYZUZk33ShIdDTd5Ho24/MvviCKQ/Zu7XFxcU5VVPS6fYb9Id6d29SVZDjYwhAmvttDiowsF9SiopI6AsXa3x530RljaxlTMmIh0HHodQcMu0M8t4NtGtiiwhZgagLLcJstmRo7dc1AGCZZWbEMYxZRgl4l9Hybw7uH3NoZoouKKk1Yzae4bhekhmWY7Ozs4HldhqMxtuUga8iSnDgKSFPVQURhQLAMuLy85MsnT5T77J07jLcn9Pt9ev0eq0Ax+jVDEMQRYRKh6RpO16WQJZ2+Um5EcYRu6uRFTpZnoAls18YRGnUlKcqcvMwxMJCFclcxTRNN15XjhqbT63S5vLggWAX4HZ/VcoVpWWyNRqrYaTpRHGFqOqZt4Vi24mUVGdPrK9Is5fadO2iaxnK1wHYdZQogJJUskULd3EWR4zads67rLBcLPM9dJ3yp/AEdy3FB09nZ3SPNM0zH4dZgRBwlXE+v1b0iNLY7HnQ61EmGjqAsauI4pZYCdJswLRBGBnqEmxd0HZtup0OVZwRpShyuCJZLkBLPcagqobIiyoooTXEdR1k+6Rq23dSRqkCTJZphInVl8irrujFBYKOjedUYafXXNZqvHTNf99+v6oj4Ha1mux2DV2PoTT3aZvDv5vn/EC3lv4RrsonTbbKJbx5r3lBDBJZSBaBapk1dFkRBTl0WjV22iW072JZLZuQURUAexziOjaVrKg+hLKilKi79vt8Y8qkMRNd1UATZkqurq8ZlYcn+/j6u65IkKVJCf9BXTr21xDAUkHxycoKsJb6n/NHayD/Lsuj4PpomGn+zCsd1cTqK0hGsVmR5xs7ONlujIYapEUUhZyenGIbJw/uPONw/oixq5osFWVrgeS6mraMbDrXMCaMIU6/xXIOO72IbE1xT4FsGi+kcWUl8p0PHs3FtE8+28QxwtQpLV1tNXYBoxgwl/YKqzKmKFFkXDPo9Du7d596bb+N1e1xNr1ktV4z6Q4oKrq6v2N7e5/69e7w8PkMKjbOzc8IgwPNcur7P9eWUKArRNBj0B0gJ6fFLrq6uOD09bVKQhrz99tu8+dZbCCFYLOecnByT51lDbrUp64rr6TW7u7sNVUKB7a0kKQwjJpMJSZxRFOV6enkFbJtUtcJcx+PxevHUJnFZlsXV5dU69SyOYwzdoChLur3u+ve6KYc6PT1VeQKmipjLigKarjDPc0X4FUqLmacZeZ5SFiWDwaCRuC2IY2UY0HJIe4M+QRwRJcqLL8sLikottLa2tjh5+VLdG0Kj3+01VJOIOMko6goJxHGqJirPwbUMHKFGc1mVjet0Q4SVNYYQGJaJsG21dNF19e/VQCtSSnQpKXVd9SmGom+owqWtJzetoRW96sy+fk9/azH7fUXjZhf3iqfF18bUm7Knb/p+N9m+m0XsDy1odV3/zg/5Td9j870ByEqSxDGeY9Pp9Oh2/aZ1N9fv37GVDk40TGrXssjzgqqsKKoaYRiqKJgqVk5dQJKyLNagbVlWzcq8xnU9pWE0LTRNUNavsjEBFrM5RVU0bH7l7NACvXme8/LFC2azGZ1Oh62xTpWm9IY9vvP++xzs7aJpgk7Hb7AcpzFljIjiiLIqubi65smTp/huB2EIdnYndHsuuoQoWqGhpDyGZuHaJqPBgFF/yMvnL5hP5xRpznQ+w9BW9Dyfcc/D8mxAp6okeaESqZSu0KKsauargDjLGWyN2b/zBncePmK0c4vZckWWF+iGiSIJW/T7AwzTJIxjJGqb+/LliRojzR7z+ZwkTZriAyA4OjoCAdfX18xms4aSIPnNb37LZ5/9mqOjI95+520ePXq4dl397W9/w2AwYD6fc3p6SqfTQWkIX9nzJHFKWdRreo2UEl031jIxKSWmqbShEslytSRJE/KyQAKD4ZAsz8mKnKIslC29r0jMVpPYVNZVQzhWsjaaicdzXyU6qQdgsualDXt9xuMxnuMSpyoo2rCMRl2SrGVXaLrypwtWdLtdev0heZ4ThnPSNFUdaRiueXKWbWCgg9DI8oo0y6mKAr/jqIVCnlFaCoNUN5NqoCzLxDSU+Fw2hF8hVeanjpqYDENTZgXNNtnQoCwNhKzQhTJ/FLIx5pctp7Ud3DansFfHv7iYbXLCNkmnwJrhvPmamwVjs4jcLDKv+/+bBe33HW1HtnmezWPdhTXvteXOgSKSlnWBYSpKhGlaTeJRhbEuZBqO42HqKvDD1PUmi7H5+XUdSaVyBGzlbqo3Bd6yFPfGNE2EJlmu5gRBsAZvq6qk01XETc9T4G64WhHHEYvFgiiK1oBzlmXr1frB/j4PHjyg2+0SRIHKm6wqdCHodDocHR0Cas1e5jlBEHLn7m3euH9X+Yh9/lumpzMw4N3qLR48eAPL0tCE3mwJzQYM1+h4DlbjtjAcDJhPF1xeTFkuFgTLOaupyXa/x63JmK7vU0mdvCoRVUUlK5K8IEpLHL/H7TfucXT/If5wSF7XlBJM26EjVTxaUUuFIU2nXE0XFKXSahqmyfX1tVI86BrjyQRT19nenjCdTTk5O8V1XPb29iiKfL3p6/V6OI5Dmqb83d/9HXVd8ejRQ+7fv8+jR4/wPI/r62vOzk6J46SxME8xDZPhcMigP8S2ba6vZ1+T+LXRcUWhLIbCMFTxf/LVoqY1EjBNcy1Mb8myrusC6kGcJIlaqugaaZ6vg1Dsjkp7Oru4UMuhoiXjpnQ9X3HG4oTp/JrxeIzrugRBwGq1Yj5fKClbEzItdKUHNiyH2WxGEARrr7Xzy0v2d3eUQ0xWKA2uqSRiatupxloVL6hR16560Bs6lubQEWB6HpWsyFPFeYvjjLLOoKooigyBo4i0uoGkQheKuGsIHa3OMaiUXEqyxt/rjWXi+t7+lxazmwTZFjPbHBs3W+3Nz21+7aYH2uu+z83Xbxayf8mY+bpj87yt5rS9GFsLYbXMgO3JPo6lDPiiKKIqC+raUyAuKr1K0zTcTge/04G6RjctFQZbKXcBRbLVsCwf8NE0fZ2ktFq1+YmK1JgVau1elDCbXxOGK4TQWDQGfm0nljdEzq2trbXPlmmqm2y0pagMj7/8kqLIGE/GjLaG3HvjDSUYrmvyIudwf4/BoMdiuWI0GpOWKctggW4ZbO9O2DvYxfW9ZrusxkIFK7RQgdnInyyObt9GHB6xXKx48ewlz549Y3Z5zcXlFckywDIsNM2kriolVK9rhF4SZxnCcjh84x77t+9iuj5hWlBpFRg6jueTZjnT2YKiqnG9Di9eHnN+ccX+4RFxErNYzhkNt9jd3aUsCxazKZcXl5yenqqbKM+VqFooisv29vb6uqWxphkNRwRBQBTG/OxnP1sHBk8mEx49egspJdvbOywWC06Oz4jCmIP9Q/K8WEMTqpvWGo2m6tyKuiJYqRFxMpk03ZuCYdpM2nZ50MbR+b7PaqUUB3mW0ev2cE2X+eIY0zRVdyUr1Tk2RdKyTEqprMZbK+6qKAmiV0qCTqeDbdtkmSqktuvT6XSoaQKwGyuifl/x2sIwZDlfMB4NqOuSIIqpSonb6VBWyjo+yzKyJEPWFa5t47uO4pxVFYamvm+336UoFRG6LHOEVDbpGhqy1jA0msg7lMxNyCbTU3mjWZQYVM0yr6kfSGpZI9uYPF7tBdrjG4vZZtHZLAYtDnZTKbDJS9v8mq+JyKX8GmHu941/rzvfza/7naMZNdZr32847ybtpN3GCinVU1JK8qKgquuGkGpRlCXT2RxZl+iauiCLvKQs82a8sKhkjWWrYNnWgBHAshSHyDC09YVdlq1kLEZr1t1FkbFcLmgDFIoGH+n3+3ieh2EYzGbzNQly59YOw8FwHXwL0Ol0KfKC0xfHZEko0EAeAAAgAElEQVSCYehohoamawxHQ+WoISWuq4IuxpMt3rUsXNfj1q1bDHoDXMcFWWLbLlCQZQWGXmOZOoZu4rgOXd9HQ2CY9poepKGhlRJXMyhqwfVCEUHzPMfQdQzTJMlztvf32b19F3+4RVpLsqoiLwqysiSNY8I4JYySJvDLYL5YInSd4dYWCIHrediWQ5ZnLOZzTEPn7t07HL88JouVh79l21xcnBEEIb7v0ev1GoxUR4hXjjCz2ZxO10cIwWw24+nTpxRFwf379znYP+Ddd95jPl+wXC7pdnucnp42zPqyEcGvmhRzgyzNFbFV1zGEimpsGf+60biBNKNi++BvJ4WW1pGlGZaZMhwO2dvbw3Yc4iRhtZiti+DFxYVaMjS3QeCopDDbtPCacJWTk5O1VZTve+vuT0rJcDSkKArCOGC5XLJYrBQM4brc2tshTNQ1WTRyLj1XY3qRZ+RZigDKMidHkuXK1CAImoQ1ahzPRZYVugbCNHBsS7kQC6F4iLqmfOOkpCorldSl6c3GN0fXasxmumnv5Aq1ZJGyUr8/5TX0NVOL17pmbBaMb8OYNt1mW7ttz/PW4uibwQM3XTJvHn/IqPn7DnGjkN0chTffixJ8W2tmtaxqzs/PMU0Tz3GVX5muU1YVi9VK+cQP+krIXFXMV0vKPKOoShWvVlfkZY7WeOBHkdLSDYc6hmGSZSmu565BTMdRW88WmDUM5eDqOEoutVgsAKhKJZtRDwJBmmaswpBlEDAajTg8PGQymdDt9bk8P6MqSlzb4fL8gvlijhQ1t/Z2sGyDVRCQpJnCRXQDv+vjdTrNf3dwPQ8qiazAdX0EFVWVouk1aCa6aWPZLmiGSgfXdAZbWxSVMp8skwJL6lTCIIlzwiijrio0raaKczr9PjuHd7C7A9Iaak1x26LFgiiOydOMoqiZTHYQusF8ucLzfQ7HE8bjLdKsoNcbkGYZRZYRJRHhKkBWFaulcuet6rJ5KCllw2CgirgS3NfEcYzruvR6fabTa+q6Zj5fNOnmfaIo4uTklNOTc87PL7l161YzhqlurSgqHEdlZp6dnROswjVu1nZteZ6zWq1wG7lSm2heCYUD6YayyjEtizTLQKjioek6VeOO0el0cFxXMf3rxl24UW7YloXZ2EJdXV2haQqsz0vV6Z+cnnByckIcx5imWmbkubI1SjeIuYqrWKzxtbouse0Jju0gNH1d5EClONV1RbiMCIIC2aafo+RPeZaR5QmGZa6zSk3TVHrZhjOnCyW5qkWFqCVFpvz7dNNScIxU2Z1QK0oU6tmutQuBtaBTNg/QV8c3FrObxWD9BRsZAZuFoX1dS/QDvva6153r5mtunrf93Oaff9B5xTeL1Tc7S9M01xcfKJ5bkRfKS6oJmKgqtYnr+B79QY9utweyIksT0jSmygtaJ/RWCZGm6qZpaSuaJtYuDWrcluvOzLbtBmAWTatu0PE7ZJlSHbSJN6vVsgE/lW1zXhYUecHOzg5b4zG93gBZ11xenLM93mZvd5fPPvuUJ7MneB2PXneAbTlcXD7h+fOXeH6XnZ0d/E6PJM2YzuZ0u33uHNzGFDpVUdDrd+h0bDy7i2mCZapFVVELLKGjGRaOYaFrOrUUzKYLrk5nXFzNsQwFUOdZTl40DHrH5o3DOxy88RBpOIpf5rkIw0QKDcNycG2fVakK26DbxfN7LIOItMhZBSvSLOfqekbVOHPMZ3PFgYoiVvMFg0GfyfY2tm2yvb3DbDYlClUK0WDQbyzIM8IwaG5Ub603LEv1ux8Oh8oksaxJ04yzszNevjwGBI8evcnhwSHb2zsgJXt7V5yenDeYZogQglWwYrlcMugP2D84aArOJUIod13bsjBMk1rWFM1Wsn1gm4aB47osF0scT5Fju70uu7u7PH32bK0FtZpi1nZ5s9lMgfNdn36/T5orbmMYhiRJSlEWCF0pEvJS6TVbzK0ddVuXEcdxEJrAsZVJpWnoKkvCNKGuSMIYWZdowlirQxzHoa4qqsogiqKmI/SxTBPZKINMwyCVClM0DIO6wf0sw0QaqjgZutKP1mhQq3tnrUrXUB5rbRlrP98cr5Uz3Txu4mY3i8mmWqC1TLmZE7DpkdYWv9YfvcWv2tZ/s5t6XTF73SGEULrLr3FSfrcYthjGmlUtJWmarnE/z3aQElZBiBCgC0G342M7rnLFsG2C5YKyrBQ733YaeU6F7bhsbY9JG1Z++/QDQZKo0VAtACwcW/l0tZvJqqoIViuG/T6O7aBJiWtbSsRdKH/2oiiVZk3XcV2Pra0x+weHLIOQ6+mCd95+i/fe+4DxcEBZFOiGhWN73HvjPkdHt5XDBzovXpyS5Rk/+MEfMRyNOTk55Z8//ZSd7V2G3QG2bnJxfobf8djb2+HwYBdN14iTGMvUcR0DhInQlC1MLWscr8Odew/JUsky+BWzxQopaSx3KrYmY/74T/4177z/HrrtUqGDoVHWAsoa03aRoqBMcqoaptMFZxdX7B8dMhwO+e0Xn3NxdYnlOKyWAb7foSorkjQlS5QBo+04LJZLFsslo9GIhw8fsLt7i5OTEwCSWKUsHR0d0u12kFKyWCzI8xzfd5WvmBT0uoNm46xGxG63T5Z9hW07fP75F3z80a+YTCbcvn2b+/ce4tg+T58+ba4JF0/WzBYLhKGTl4V6YDVC9DbRnGarfXl9tVYi1EhWQYCh62sN7XQ2YxWs2N7bpdftEjaayiIv0CXs7e4x7PWaTIiA3kBpNLe3t+n1elxdXXF8fEJweUkYJWRZxmA0JEkS0vyVakERbAv6/S5ZE3Q83tpSxT+JyRrPuDBYoQtFuXAcEyGV27Ft2xRlieU4aJpY6089z1MZA830UZblWryfNhkKRmNLVVcVhm1R5c1EV6tJSxMgNB1hqCalKvPmpv/6zf6tndnNgrAuGjfGt81i125vNovHZqFqP7eJs7U4ktFY+26+pv0ebQG8qUz4nTGYm4Tdrx+tqV3bYWZtm9u8h6ghPNq2tZaQpFnOfL5E1zRc21QkRdtSqeSGwWw2ZbVagqbh+10s22pAWhspZcPzkc1mqoOUNCZ/XS4uLnjx4iWaJtjb3aNIc0y9XMvE9KpmsjXG2LUIo5g0zRUvLMuxbGsdYnx4dESWl3Rch+vZgrOTE84vr3nn3fc4un2I43o4rkOn0+Vgfw+EoNfrY+g6vV6fo8PbHOwfcuf2bSxdhVgIAb7fxTCUti/PZcO7cylK1YWWhXJw1TULx+sz2tlncrggKp5xdTUFYXB07x73HzxgcnCHWndIK5B1SSVr0CuKumQVNLZDZU0cplS1htBMEBpb4wnj+Yy8Kti5tcvp6RmL+bLJEKiwXQdLN8iznG63j+97nJ2d8eMf/xc+/PB7fPe73+Plyxecnp4CkidPnjAajdjdvdUA5MpVY7m8ZDQasVyuqOsKy3LWD5q2o5/P5zi2i23bPH/+gl/96p8Bwdtvv80777zT0F0uGA6H6+t1uVyuz7GJjxmGsTYSyLKMWkplEeX7zK6naLrGYDBguVrxm9/8hiRJsF1VdFtHlTiJ6Xoe3U6XCOXS4fs+Ovoa6mldhqNVqFQmcqCCjDUDKaGqZCPbqsjzEtO2iON4HdpSNl2eBmga3Dk6wNDVuBjFISenJ7iuR5EXdHsdDo8OlHSsGSmFpjVbUKUs2d0dK3+3olTZrbJGqxQdo6hqDKHoIBog9Vd6JtnyVrW2bP2eYnazOHwTcXaz42p/afBKQdAer8vh3Pxoj3ah0HZLrytaNwvna/GwGwuAm8cmwXdzA9v+HELo1FK5O1S18uNSHY2kqgqODvcxDBOEIEoSYilJshyjMcqLk4ggDAAYjSxGoyH9/ogojEjSlMuLa3q9HlWDz5VlxaNHb9Lp+CzmCxzDwrEsFkvF4n7rrTfpdvs8efack5NTFsuA0daIW3v77B8c8Oz5CwaDIVLqFGWp1t9pxnw6xfUc3nn3Aw4P93hx/JzT03OqUvLgwUM8z1djV3+AY/sYmonrKLxTd1y2d3ZUnJ7vMxj0gArDsDB0HYmFlBqi8aaKkoI8i4jjmLio6I23mRSSynAQQmfvjftMDm6TVhpn1wvlEiJU+IXQlSfcKlwhJAx7Izodk15vwGRnmyRPOL88R9cNLF0FkpSVAq+FbAwORI3pmbi+0gmenJziug6djs/Pf/5PnJ2d8yd/8sf4vs8//uPPMC2Dvb09zs8v1v51SZKwvX2LnZ0d1SU3wbymafLkyVdoa4dZRaxW1zPUtWQ2m/LRRx8jJZiWTm/Q4+DggJ2dHQC++OKLhrwar2GYdhoIgoDZbEae59y9c5dHDx4QLJdr+KNlEKhxMQFNwQyObWNqSm2Q+mph4LkuSaZGRU966xGzpY9AiKxlkwNhN9SRfN2AtPmf/WGPNM+IEkUMR0qkrEBomLpgOOgTRyFJmqqHUFWilwWaaark9DRVYT/NCOs1EXyapjMaKFJzEKxYLQPiNEM2GhwpVVBKXQt0XSj1iK6tA8VlrZQEhqaMAlrzz/b4gzqzthBtFpX2H/p1OX7fVHw2N4g3X9cWls3AlJsd3LfhZOtlwR9g7ri5mLhZ3IzG86mu1VNUWeGYyqOq2yVNczzPwW5CSNJGTEsjK5GojaLjuAgEs9kSlRquDPq63W7jAus0ILJK/07TgkF/SJWl+I7Nw/v3cX2Ps/MLvnzyhKvpFMd1+P6Dh9y+e5fjk1P+609/wu2juxwcHfFf/v4fyJIUWcF4uIXtmAjD5Oe/+CW/ffw58/mUKAzZPzxgf/+AJEkIVhFg0Ov0uXv7LlIq/Wdd1Yy3tuh4PqCkVlWpHCdc16UwoChqyqomTSviKCGKlJ40ShNKzaA7nlAbamS3uj1K3aSqanQ0PEvlIghZU1dK4F1VEs9xAUGcxAokXywp64KsKJACzs7Ouby6UuLwLMd1PEzTIFxFCBTdwtB1kDVnZ2cYhiLqfvHFF8RxzA9+8H3+5E/+lF//+lN+/etf8/DhQ1arFUGwWi+uoijCcRyWyyWGYdLvD5pO3qQqKy4vLjEMk6ura6Xfte119J8QGp2Ox/OXL/nsN7+h2+3y1ptvMhqNuP/gwTqJKU0Snj59yvX0GtdtshaShDAK+fiTjxn2B9RSGWuugoCioXmkaUqUJI0rLevlVTsmurYKjA7DENM2m+v8VYp6LyswLXPNbdPLuinYkk6no3AsJJV85XZhmDqGMKgrZYcta53lck5eZJimjuM6ZLkqhgrLrYlnCwbDnvo6KSkrqYJhshKJxHM7qlloeGR1DaVQPDKj0YEaQgMdLFQO69owQlOQlNbg1Jtcs2/szG4C7u2fr9ssbn7NzWJ18xwt2fbmVrQtZlLKjS7p66/5ti5x8318W2e22e5blholW9O6oijA1JBIdE2l93R85e2/NRoyGgxYzqdNARcsVytWiwW6rmLl8rzEsm1GwzGGafLs6TNevHiJ49gcHBxydHRHORCUNUVeYpoWtqX4PZUm6Wz1+PBf/SnUJb/4xS/45Ef/TC1VpuVoOOLO3TfodPt89PEnnJyd8ejhW4wn2/yH//B/4nodyqLEsVyVcmT76KbDfBkSxjF1XbEMIqzLa8JVwPn5BUEQsLOzy+2jO7iOSxCsmC8XdPt9ZV1jWetRoPW01w2bNK2awI+CPMtJ0pIsEwjNISsT5mFMluXktcSyHNIa4rKm43aopSAtlINtXVWUeUae5lBKNASrZUAa51i2yZePnyAMiNKQ69kVF1eXnJwcMxpv4dguRVXieC7LZUAQhnS7PYQQ7O7u4rouz549QwiNt99+mxcvXvCjH/2Yv/qrv+TP//wv+Kd/+jkXFxccHysu12KxZGdnm8lkst5eep6PruscHBzw/PkLut0+IMjzEt+3G7xVCbFtW419tZT4HR/HVaPdbDHn2YvnSCmb7IcDhBDsHx5wcHTY2GPH6kGq62qZkhekcUKYxKxWKzRNY3xrm7KqCK+u8H2fu3fvMuoPuL66Io1itra28FyX+WKmgoWbMTDL8rUJo5SSLM1wm3T0vKjWYvkWajEbqyRg7eOPELiOQ8d30YHVMsC2leNvpyuJ04wkyVgsA+bLheoqgwgqqGQJUhBFIa7tMpHbXF5P126yum4pG/a6QtaCyhJIoZZxmhSUssKUjaGFoaMhQFZoUlGB/iDM7GbRujkqvm4cvYmVbRab9mOzmLWjZVvV24/NxUF7rrYLvPl3/9Jjs5i1hbN9D+pDkfJ03VB6TMdZP5WjRlBeFCWWrjejfNOhNj9PFEZcXFxi2za6rnPnzh0Gg4HKgSwU69v3bSVWLgpGoxHvvvu+8orPU/7h7/+B87NjVqsVUtZMtnd48OARo60JQRLx8cefkhcF/92f/fdMZ3P+8w9/yHQ6o1NUgEaSFExnS27tbDOZbLF3axvXszBNhQO+/eabaEIQRylFpnR0utA42NtH0w6YLxdopsFkMsGyLMIgJIpiqkoCOnWlUVc1ZVkpUFZY1FVOmlWUdUmUlqwSFU1n6CoII0djEcbqNUWJaRh4jg1VQV0XiLqCGvIkJ45TkjinDmqWqzlpkXF+fYbpmPheB9O2GtMAFfHW8btYts1qGXBxfsmg3+f9d9/h8PAQgNPTEx4/foym6WxtbfFP//QL+v0uh4eHlJUSYQ+HQ5W2JbQ1ljpoxqHFYsFisWS5XGIaltoqPn1OGwziecoRWNd0pIBev4ftWhSlsgAaj8ccHBxwfHzMl19+ybNnz5SrretSVdV69NzZ2WG8tYVj2+weHHF1eYkTBgpfa2LsbMtSm8o0JYpjtgZDleyl3AXW90lr2tjeYyr5SdkZzedzaIxTpdCbJkKN2VmWYZgmpmNiuy6e760xvo7r0+/46IYBskbThdKT1hpuljPTA5arkCzL12oIz3NxXJeqrLAtF6/TQTdtsmyJ73cYDIdrM8skSaipVXqXpSGkMvus6gIKRajWtSa/FtBhbRDxjcXsdZ3ZzaK1WWBuFrLXZQNsFq511d/optq/a3GCtqi10fbwynpoE5MTGx+0xU2+4qC87midbNd0jCa2S713TT0Za+VKUTWMbFlV5FlKGAZoSCzDxHdddm7t0uv2mc9nxLHCCQbDIVaTjzgcbSFQCdez2YIwjOl1e0wmY46O7rC7uwsILi4u+Oyz37BcTDl5/hVd12I8UZKUg8Pb2I7L8xfPCZOY73z3O5iWzbMXxzx9+hVpmrA92SZp3G/DOGS8td243xa8PDnBcSxu7Uy4d+8Bvf6QMFhxdHSbd956V22WmvDeJEnIi5Iyz7i6ulKdmGZi2g4ir9CESVEoykm76atlTRilrFYJpSyRQsfvDzDcHNOw6fWGlEVNkhekaU5VVPiOi5A1VZ4hqHEs5e+1XCxYzAOm0zmz+Rx0ieNaxGnC7rDPeHtMXubM5tNmG6iukU6nQ5akxElMEKxAVvzFX/wF2zs7/OQnP+H6+oosSwmjCMu2KGcFlm3z4P5DyqLi6bMnaJrOyckp19fXdHs9dm/t0u116Q+GGIbFoK8K3uHhEUEQNdCCpvBTZKMPVdFufscjiFYkScpyuWA0ustgMKDf63F1fY0QNNkPCUma4NhqrC2LAl3TWUxnzK6nCF0jiiKCMGS6nCs801Sj7aeffspqNuf+/ftIKXny1RN812O4NcBxHIIoWBtDtlNI3jyQ22xY11OBK21wTpZlSKAoKwauS6/bpS4LkigitVQGgybAtR3CKFaBK4Vc05HSNEXmOa1/v+d3GY3Gja9Zzny5Yhn8lr2dW3S6SgpomBaz2ZwsXyncTtYMeh2qIqHIcqq8oKgLZCWU9bqhksT0GgSy4Z99QzF7VYC+8W9QrZ149Rr5aqvQ6hDrpsvRhNZEVr2yJ67bgiPl+mtbPpbrucjWLrgsFZ+o8V6SqIIj4JWh25qG0hbQekPPtflu1aHpCjLUNMXEK6qcUpaYuvIwE7JS7G0BZZER1yWmocwEdakz3Brh2A5lDmGRkWcVhu7SG4/o9TvcuXubvMwJViGz+Zyr8yviNOXWzi5/9IP3Odg/IC9y5tMpv/z4I8IgJC8KDF3DcV329vexTeUeYJg2s/mcIHzJaDLhu9/5DhKNzx9/yReff0YNHOzdYjqdMhmrEWg+W7G7O8a2NU5Pzri6usRxLJ4/93j2fIe9vT2klPiuR6c7pJQai/mSMC0oq5K8yBrxdU63m+F5PkEQE0UJqzCh4/vUlSKetgLjLE0pyhLDNul6Hn3nVXaiaVikcUpdqvzFXMtAVBRFTRKHlHmCrkESR1xfXrFYLBFCR2/GfXVN6SwXK/YPDnl47xH/+POfI2qlla3Kgp2dW1imztnpGY7ncXp5wd//5Cf82Z/9Gf/DX/w5P/zhD5nPp01eZILtObw8fslisaDf79Pt9KmlxHGUa0QQR8yXAWgGlmUy2NpSzg3XgiiOWAUret1eY1pp0R/0FGWgqinyhDgusEydPK2ZTa+oipzZfE5V5jiOxd6tW4RxohQUhoFj2XT8Dp7rrjFfz3Pxu13VuZQFYRqrEBvHodvtMZlMME2Ty4sLsjhhMOijoXF9eUm332O5XFBVKrbOsW1sx0HLMpV/UFUga0xdhdWUVbUe+yUSKiWXsgwTqQlSBMtF0OBTNbs7txDor9LDqvJrma6appQqea5MNfNMJUx1fG/dNdayVIqIuiArYtIsBDSK0kLTB9S1iRQlRQ2ylKBJZb1e1JgGqJhkDbkBmul/8zd/87VS9b/9r//L37QkT9WpbHzoGoaho2sq4FMVkleMXE2ozkdvdFcgMRqPdkNXpMT1uTUN2YSatrHyeZFRFDlCU8EWo9GQ4XCA53sq31AT+J6r9I9NARMovzytGfVANqQ6JX9Q70IiUJ5ppmVS0epvlDFkUalsQt93qfIc17FwHZu6qkiSlLqoMXQL07CoSui4PUzNIQoTDMPmzUfv8N3vfp833rjHcrXkcnpFGMSMtsZ88J3v8Z3vfI/x1jZRmvLZZ7/l8y8e8+Srrzi/vEDKGq/jY5g6SRqhAX6nw3Q6p64lvf6A4XDIG/fuYRg6H3/yMVdXFwTBCk3WDPo9tsYjXMckDgIm4yGCmrJIuZ5eEoZLpRW1bM4vL3jy9ClnF5dkecXpxSVRlhMXBWGaollqSykr1J9SkKY5SZqr9CNdJwhCpvMZSZqof88mhNYwDSU2tm1sS21kfcfFsx2VFoSkyFN0Ab5j4do6QlaE4Yr5fKbUA1WF0EDoEk1XLHnbbjMHVCzdaDhGoLFYLOl2OhRFxs6tMbfvHHJ1fUGWl2RFxcuTU4ZbIw4ODxhuDXFcG6Ept1SVgpUTRiEvj08oyorvfu9DPL9DFCf0BwOKSrlEnJ6d8+XjL9EMg52dCUmqFjaDYR+omc+nCFHR63WwbIM0DSnKFN0AXRfcfeM2QlbM5zO2xxOkrEniGFPX8R0VD6hrOr1uj47rU+YZs+mU1XKB53sqh8FUwcmr1ZKsyFVnomtYhtrKOraFYehUZcF4PMHzXFbBkjzL8D0f13GJopDlaomyDKzVOTQNXRPomiBLVdxeVZWYlsH+3i3qPMc2TTqex2q5asjjPZ49f4lsin9/MMCxbWokhqFhWCZSGgjNaB6KyXoRgQChK2+2osxJkpAoDqmqHClKDEM90JM4ochr8lxlS1S1xNBMhG5QycaEQKr7uZSCf/8//k//M/yeQJNvGyk3x8kWewK+tpFssa0WvG9TnV93CKGAvyRJlBZttcJuMIlut8tkMsFxHJIw+lpbm2fZK3dcqQStUIEUqKgzgdAU3iOE0lxmeYGsBY6mY7s+uqEcIrI8xTR1oiAgqMFzPXqdTsN01qnKms6wQ1FUTMZ9vvfhh/iex/X0ii8ff4XjWZiOxu3bdzANm8Viycef/Iqrqyt8v8/+3j5FqTRnXqeDYeh0uz6+71KUBbVUFkRVWbJ/cIDneRRlyZ27d6mBn/70pwTN1nA07FMUJZPJiNHWFr/+9a+5fXsfgc7Z2TlJmqEJCUJy5+5tEBqLp0vGk21s2+V6PiM6PsZw3GaDW1HUtdLIlcr3P0nVk1w3dEzTUoL4okA0I7/jOuvAWqE1pGUNaiqkrNENHdtUwmdDSKgLwiAgSwp0x0ElxAvyIidJEzQBlq3slapKNlY0QmV2ZgXTqwV7u4e8/+4HzKdzNM3g9u27/OhHf8e/+Tf/in/3Z/+O//3/+I+8PD1lNNjio48/Jk5C/vRP/xjHsxheD5jOpk0gjeTk9ASBhmU6pEnKrVt7vDw+odPrYzkuL1++5Pr6Gqi5/sWUZ89GPHr0iDfffJPp9TVlkTEcKt3s1tYWhmGQJCG1LFmtFpiORa/jc2t7m4uLS2azKcvFcp1DuowCwiDGMHS2mgc3VATLBUEYcHZ2hu06TVG3FUs/S9cTjfp95OuNpmVb1I12set3FL0iiqgr2cTl5UTBisFoq4muixnLMX63g6ap7ahp65i2IsOuVgFPn37Fm/cecOvWLa4uVSDOwcHhGi5qrwfHsckym0pogEaWl8SxpCwzciEwTR2QCE1SFTnLlY6hDRiNRuiG4OL8jMV8TprG9Pw+Ah1NlwjNoK5K0qIizQvqqsCxdCzTQEoT3XhF8n9tMWvpEJv41CZrvy0+vxMADOtCtpmEvkmY3ZRD3cTVpFDynrr5ZUmpQN40TRWJUNM42N1jMBisfcbyLGOxWDCdTlkFAT2rR9kY0anQ0oqqKpT1b62eJo5TK48ulVCiVspVRiGgY7v43V6TgGStg3Jtx2HQH3D77h3ef/99er0exy+es7pccnR4gNAEXz75nIvZlNVqznIRUhTlen2vCRX+6jgOvX4X0zSQsqIocqJYYTBFnjPZ2eFo/4BgFWAYJu+89z5Pnz3j8vKS0WiMblqYpq02TK7L9vY25+fnjMYTyrJk0BtweHibz7/4gtl8jucrT7arqfL1evDgAZZl86Z10NwAACAASURBVNlvfotEicejOCKMIoosY2u4Ra/bw2q2vK6rfNRs21aGepGF5TrrxPM2JWj94JIVRZVTldWaNyiQFEJZpFdFQRCsuMhSqrLlOFXouhrtJTVlrkwty0oipcDQTWQtmV5f8+WXT3j3vXcZjcd89ewJfrfD3bsP+PyLr9ia3OLP//zf8//+P/+JIAh5+vQrLi7OKMucN996yGg04t/+2/+W50+f8dFHH7G1tYWswfe6fPH4MUdZzl//9V/zxePH/OKjXyKl5OjokCgKOT87Yzab8fjxYzzH5e7dO/R7nUZepAJNjCaAOE5CTNNmNlswny158PAh77/3AZ9++imXl1eAWGtEDdPEti3CaEVepCpguqqoqlp5zjUYb9HQJVqCrdFs0NvroCzLxho9XWeq6qbCQQ3D4NatW+RlwfPnz4nTrFkQqAdHex+bpglChZSkWabMQx0H27YZjUZEYch8Pl8v8XTTbDamGdPlQmWBVpI8kySpcilRNk06oMbRNK1xLEXKFbVy5+10PTU2tpkJQqAJDU0YDaZrYGhKF1qXCoOupVDvVXxLMXudgeImt+xmx3Uzxal1Q93Um7XnbWkQr+v62mKWpqlyHWiY+m0hbFNszs/P1wQ/IQRew9O5e/cu/cFAORpUismc5Wmzmk4py5q6LkmSrEmpEViWge91cD2VG6kDWRQzafyg0jSn43Y4OryD43ospjP2dvc5Pjlm+dmC5WLOYjHnh3/3nzB0gd/1GY56+H6Hfm/UWK+EjXOCpgzxej1cT2064zhiNrtWLOi6wjIUF+vk5JS33nyT/mDAL36pbqowClksl2t/q7KqODw85Ho6ZTqdsndwwGw+Q2h6Y9Q4JklTDg4PMS2Li8srvv/hh/R7PY6PT5jPrun2BhRFznx2TZEXVHlBlmREvZB+r7/OgmwfUFmmglbaUI7Nv1vbKmkC07DQtQqkpCxKsjRmuZgzn08JgxWr5ZLVck5ZZOiG+l07tk232yFLExbzGQKVKq8hsEyTXtegqmqOj1/SG/R5+PBNTs5OOT4+5b333+NHP/4x/99//nv+6i//kvff/4Cf/fRnYKrtn5Twycef8O677xBHIXt7e1xfX/PZZ58hazUNaJpOkqS8ePGC8XjM/v4+q9WKuq4bHliMLmB7e5vZ9ZRP//lTHtx/g7t37xCGIVdXV40Ros6dO3dZLpdcXszo9bokcYa3pWx+xuPx2kPMsiz293fpdLoslwvyPKWqShDQ7/VwPBeha0xnU5ZhqDpiU6lkOr6P7/vrezJNUkVoliqjMwxX6LrOYDCgKtW2Mi9VBN7F1TUA3W63UXo0BbGoWQULtEIlN93eP+D27dt8+dsvlGbTtnn58iWu67K1tcVWv49pW8wXC8RKRSjahkESRyjYSaIbGrqmiLBSKuzbGwxwPVsxAlAQUb/fV7brtovMlfaylUBpmoZuWhgC6tIgzytKBFlZUtbfQs242TFtfm5zpGy3kJsUjJZv9SpAQfva339boWyLmcqNbOTVzThrWRaGp35xcRCu30dd18zTlOl0yuPHj5Eopwe18XHwfA/f7zAcjvFcH7MpXkIDXVfWJK7ro+uCOE6IgoBhv0+R5QyHQw4PbhMsA37603/kiy++QEOQpim9XpdBv6swAl2jN+ziOha6rvg0YRigaSa2pRJ9Op0OtqU0mq0MJU0j8lw5HHi+p0bwhih59/AI23X4519/uhb/RokKOHE9jyzPmUwmaJrGxeUFO3u7FGWhKCC2w7MXLyiKgvF4zO7eLtPZjMl4i/v33+B6OmOxnAE1hwd7hKsFZyfHyrQwiZldXXHteMonbTRST/wmMk0JsdXTst08twTjoigbuYq27tTKsiSJQxbzGdPpNcvlrHGuKLFtE8sUDXicI20Dr+NimzpZHKMJJV6XQlEgOraDYdqcnF3wy19+jO24fPi9H/Af/+//i/H2Lv/qX/83/OhHP+KXH/2KD957nxcvXvDi+VOOj19yfn7CwcEeURiwvT3h+vKS73//+7iuy/XVlBfPj0mTkLqSfPLJJximiW4a3Lt3jySJubg4B1jnjXqehzsaEUUR0+mM+/fvsb+/z+npKavVihcvTtb0hPl8iaaZit5TSSxLb1xVJL7vYpg6g0EXXQffd8hSNXa6rqusjlyHLM8JGzWC1NR9ZDQefO30opYSXVxX5XOOx2NqJLPZjCKvGI1GkMTM5/OvkcbzPMfr+Ozu7uJ7IatgwXA4pKpKZrMZw6GyC5pOp7z7zjsAXF1drelNuq74mI7jkGYZeSXpdjuswpg0LsiSnAyFr9uWgW6YJFHEeDSg1+uqJscwsUwL6oo8KzA1Q6VXNXQRTdNwHNnICwVGK+UqG5PIbypm61y7G9wGKRUztw3JXaeWS9kY7ym7jk0CbFusWslQm13Ynu93Clpj7lY352y7PpW0rL7Otu31/F+WpWpJm6JZV7BYLJsWVIXbCiHQGsoFgrWnu2ValFVJXdXrEFXfc/nggw8YDQecXVzy4x//V54/fUFZKSM6xeFR55FCYtomtmVgWwZCVwRBw1RdhmW6qPg4Rcz1fB/bUjrAVbBqHBYktm3huR5xEpPGEQ/u3Wc82uLjjz9RbqMNNtjr9nBcV3U+psnW1pjjk2Mmk+0mRCOi0+mS5wVZnhGsVnz/+3+EZVucnJ7yve99FxAEqyWGrjEcDBiNBjx+/Jg0jXAcC4EK9VU6RbWtVq4eaumiCrFyYrBtZStT16/yHvK8UBYumgaN/1UQLlksZiwWM6I4UCJk08A0LJA6VaWrboSaxWKKitRuAjsQFGVFWVVAa3CguvNPPvkV3/3wQ7r9AfNlwHvf+ZD33v8ux8fHvDh+yQcffIBAslhMubq6YD6fKVDdc9geT/jbv/1b3n//fd57732+/OIriqLko48+4dNPP8UwTSzH5t133+Xw8EhBAZHKr1wulmyNRrx8+ZKD/V263S6LxQIp/3/S3uzJjvvK8/vkvtx9qx07AYIgRZGSKLXlkKJ7HJamHzx+cMS8zIPD/0n7H/ASXv4Iz8uEwxE9L+NuWd1Ud0sUN5DEDlSh9rr7zZv74oeTmSiApBRh34gSwBKq7q26med3zvluBb1ej1ZLRN8vXx7iOi2SVKLWPv30M1alZVOnJCUPBn1mswm2bWCahhgaJrLPC0th98ixabhuPVL6YVCOYq9kTZ63JIoiMsetMzW9tXRyFe+smqIsy6KtqMxmczzPE5tr26Lf6+PYDQ6PDggCQUePj49ZLpY4jiNqhOWSbrdb37/z+Zx14NfW4/PZjOV6jWW1JU0qTeXe0wQwVFVhIlTKjMoqSVMKSbfSDZRcSMlJkkisXili1y0Tx3JAU6TLjGSVEad/ophRRkGpSDGox8G87MzyvOSRiE6qKNOiQUGptGKXOrPLpNjverzG9FfkhaK8zuAvyk6tMiysPqo3quamJSLQLnL11QhcKvZT5Pk1NIq0IC2EwDraHnHjxg329vZod1p8883XPHnyhLPTC5bLJZoi8LWmix5stDHCsW0UNSOMAzTdptNoYFo6cRgR+iFhWJAmRZlBqdZvYpVonZavvd4vFnKRNV0Hw7R4eXyMompMJlMUVWUwlKDXi/EFw9Kt4eT8jKyA3a1tHj9+TKfTIYqTMsBDXFYHwx5Pnj6j0ZCT9+HDh5ydnuAtJYtgtZgThwGdVpNeuyXmiUFMEMSl4Z4sbvM8Yb1e0W636yQs27JBgTRJySpOXpzIaKiokmEZBoSRfERRSJ7moEGhq2KbXCilr5pCnqcslwFKBkVSEMcpKCpRklIoAWkml6ZhGAwHA549f85wY5Of/Pin/Ke//zs6vS/58Y9+wuHLI/b3X2K9ZfDuu/c4Oztld3cHz5tzenLCgwcPKG7n7O7u8uDBA1ZLj3fuvstsNufFi30UVfbCqqHzySefsLOzzbvv3sM0DfafP6PZbBIEAc1Wk/l8zosXz7l16xa6rvPy5SG+H9Bwm2yMtsog3zXtTptOp0OSxKxWS4LAJ44jdvd2CJ54pYtwUvL35NDO0owwimhHkYBWkYzkVR5nJSLv9nokZRhxFEVomsZoNOLLLz8HVeHmzZvEUSqHVhzJisBI8TwPRdEE6AkC1v4ay5TxMUrF72xzMJT7LM3p9/slo0DE71EU1fZFVdGpCO1h5JPE4hRrlIVLUUBXNXRdZWtjs0yikrR7yV4F13Zxeg6np+fkeYFhmaBJ3muz1RF5napQoJKjo2eQ5n9iZ1YXmVfV5ltjZvWiL3dP1Ye4cArZtcp7vKyvvLx3e7OgUe7c4iQhjqJaAKvrOoYmqFqRvkqFumz2GMcxfhDiui2ha1RWRoXIMarns0rqQKPZZGM0Ymt7G8Mw+Oabb3jy5AkolXVRKd9RJH5e1VSJ74pCvPUSQ1cZDLp0e10aDUc6QVWBvCDPiksk36K0fREGUZoKv6bRaKBpKmkaY6gazVaTpuOwnC9ot9ocHR4RJ0ktfNZ1HdOyGAwGxGnK6ekZN25cZ7lckecFumGyWEhOwGw24/3332e9XnP/y/t8+KMP+frrr3n65AkK0Om0SlVDTKfVpNNu1e+6Y73KGICMIPRILyKWyxmO68hIYJqCnOUFcRTVJOc4inAs+TeSFBRJt6pLWrthqLK/VHM67S4KOYulFD1FEXDIME1SJRUOU15AId1imkRkhYK3XoIC7XaLhw8f0O33efe993j65Ck3rt3k6tWr/Obv/hORv+av/uqX9Hpd7n/1Baenp3WH8eLFC27evMkPfvADHj54yD/97l/4q7/6V/zrX/9r/p9/+C3T2azUDOYcH5+wtydJ66aucXJyQq/XY7lcYJoiGq/8/lerFWEY0usOOD8fo6oab711m+VywfPnT5nPZ6UNlAAdrVarDO3VWS5Lt1fbJdVyHMdBz9JSJSAAkau7NaO/ShpvNZvEnQ6r1YokiusDs9/vk+YZ89kcUNnZ3SVOxPBTUSoPMqGGhFEkIFJPdsWxJ53W7Rs3GY/H7D95Rq/Xg6Lg4uKilv8Jeq3U4EGr1cJuNMhzIfuGQUiSRlAUGIYm1B1Tr/3+dF19jdgrnNICy3bIUTHTTEwdS9AwB6I4wrQcCk2nEP7O9xcz7U2XizcKlqaqNTerKhYVC19sP14hlpf1lJepG28WssvdX3XiVIWv8h2j/P+SMAJemUBWCKuiKBi6QVlPoBBfccM0scqZvkLzNjY2UFWVZ8+e8Q+//S3L1ZJup8toOMDzPfFYS8tcA+R7a0pBHKtl+o7w0AxTI88S8sJGQdwIHNvm7OyCOMoYDIZlUEkZRVYm+Sync5bLjE6nRafTZjjqE0URX375Bd1Wl9VqTa8/YDabMRptEMaC2O7s7HH9+k3+j3//73n/gw+wLJcHDx+XLqc54+mMte8TRhHXrl/n008/ZXd3m4br8Nmnf6zdEgxDo91u4nkee3tSzMfjMYqi4gcRjmMzn+c0XYlYi0OfYL3i4kwcTsX9VBNZU5aW14aAACtVgzwXQz9ViK9Oty3/PlfQlAwUuY6yPKszGCi5f6Zp0LDFP265WOKHIUmWk+YrUFR0XSVKJCgjzwpmsxk/+clPiIOIgxcvuHfvLof7z3ny5BHXr1/h17/+FUkScXEu0P9wOMDUDV6+fEmn0+HmzVuEQcIffv97Vqs1H330EdPZjM+//KJ0N8n45ptvGI8vuLK7Q7/f59mTp3ID5hmTybSWriVJzGw2o+G2a/H3b35zgKJAr98pd40SeGOFFuPxuOzGtDr52w8C4iTDbTQwbZOVJ6j4YDAQAM2SLmgwHBLH8nwVUNPvdul2u4zH4zrpSVEU4jIFXS0NScO1X1OwqnXQfD5ntVwTpyE5Oa1mg7OzM9brNYC45tq2FFXXLUe9CMMy6fV6DDY3WC6XjGczZtMFhqGhYKGohVj6lK4zvp8ShxGtdpONwZBuu0O3K5bmaZIQxSnj6ZS1HxCHEc1OmzjwCWM5zM7HF1y7doNeu4Nheq9tw77dmdWs/Et/f8WLlcJ1qTAVeYGivHJuTbNM/LovFcE3C9dlUOA1/WfZEWhlnmXVvhaFiJBlwazVttwARlnsbNtGVXTJ8yvke1umRavVoj/o0+/1aTQbHB8d8+jhQ05OT6EoGI5GbG9t4fs+R0eHtNstFKUkbdaSq7Ko6jqddksSnIsC0zCx7SozIEYpC26v18M03PqCqSxfZtMZdYSXLjsoufgmHBzsUygKWZ4zmczo9Xpcu34dPwyYz+cYhsHO7i6/+e1vufP222xtb/Hxxx9zenrKnTt3ePjkMZPJBKUo+Ou//mum0ykvX77kww8+4JNP/sBsNmNnZwuAyXjM9tYWrmtj6iphuGZ3ewNFUUmynCwt0DWl7LgidFUOOVM3UVSFNAlIIrEwVhXxulcUyElFYlIuuBVFLJSyNKYoVLI8IYpCGU9TQZun06kUPNsWJw9tTdNtEwaRFOYkKqk64o9lWQ69/haqZpDlCsF6xeHBPnfeusXjx084PTnho48+Yjab8ODBA+7du8v77/+Adtvl66+/RlUgDkNevHjB/fv3eevWbe7evcvR4RFff/0V97+6z4c/+hG/+MUv+OKLL3j8+DFXr15FVRROTk64ceMG7Xab+WzGaHPIvXv3iOOIp0+fsl6vcV2X+/e/YGtrm8GgTxyLy6sgv0I6f++9dzk+Ocbzlpyfn6LrlTuzUqLuObqm4Tqu7EMDn4vpFD/widOUa9eu0Ww0SYKQMIpo2E7t3uH7PsvlgiAIxOml2SLSY9RAkFBd11mtfRaLBVtbO7TabY5PjuXAd2wuDs/o9sXvzCr3xHkkBqpqOeEEQcDe3h6tdpv5ciEgQxSiKEJ2vzg/L9U8UuQ1qsmoQFFU+t1OLU0UZHch3b4uxFjDtDCyvJTXJSiFkJ0VTVLCvLUHCiSJuDF/fzHLXxWzogQDlApdRGRCerlYzwpIyaEAQxXUkUSWwG9yyb7PjeM1i6DyT9d10VS1btsVRUFTtdJZQK/Rsuo1iv1vQJLmDNp9ev0um5ubpUGejAH7z58xm81qF4KGbaJpOoG3wlvMJSBkc5M4XlNkAliYmiZqBlXHsWxc16HTbuOtFrIP8iNiJ4JcK5Ednf0XcuLv7nQxTQvfF0QmjrJ6UWxaOrdv32Zvb4/pdMLx8VHdiZ5fXGBoRukHr5Jmst/4z37+c+5/9RXL1ZKf/cXP+L//7u/4+OOP+eDHP+Lpi+fs7+9z584dskSQ2I8//pjhcFgrLMLQJ0kiobJ027iOiW3ZzGYziiylyDNUDXSlIElD2k2nvg4M05AdSZLIiJHlkvJdgGGaGGUnnmUpmqrVPKGCQmyiLQNVU8lzlSLPWPtrFosFmqaQZbl0X2kmUrfMZ7mU31kVqCuAQmXMB021iWkoRHFGmmacnRzy4x//FH9ni+l4zPsf/IAf/+jHfPrpHzg7O6fZdImiiPd/8AM+/vgfCP2A27dvM59LUEnDbbG9s4MfhMzmc373u9/R7ff41a9+xe3btzk6ekkQ+CwXMjJ98MEHPHv2lNViztnZGWEZNVi5sLQ7LeIkZLVakBcpcRxycXEuYnHL4vPPP5eovl6XnZ09bNsuaTozlosVzWYLRVGIoog0EzldNaEYZaydYRoopZxIK8G349MTlqsF29vbeN6qXvXomsFyueTw+IiV56Fqcv/EcUwYBHVYTpqk2KXDraZr9TUpPm5SzFxXkHdBUFes1mKBbTs2k+mUF/v72OVyv+Kj6Zcs6jVNo2HLaGmWRbwoCpRCiOuarqOoKs12h/U6IM0LVEW4ZfP5XHSnywW6ppBnOeolcea35Ez/y//4P/zNmzuyNwvSmx5g1Qxu2lbpU5R/Z2f2XY837XsKCpqt1muopaZpqEqZaFN2aGEY1qk/rZb42e/ubPPWzZu4rsN67XN09JJnz15wdPSS5XJJHEf0en2iOCAMQizLpN/v02w2yPOCKFiX0qgcXVUxDB1TNzANA8excGyXVrNJmmYE6zWBvyZLBcE0Sy/8yhNrtVwTxzFZBaioRk1ruHb9KkVR8PLlS9I0ISvdG1y3gePIc/QHfTzP4+nTp3z44Yc8evyE/f19/stf/4rPv/iCJ0+e0O/3cRyXtb9mMBhimiYboxGapvHVV1/xgx/8gDAMmE4n9HtdslxsZkajEZ1Wq3YoMU2dOA4lEiyKWK2WWKaOoatoCtglI9zQVWzTBLIypQoMXRUUNEuAAts0cG0LQ9dAycuRUiRsqibgQEWEFoNAiOK4BAIM8hzWfoC39qWg66LtNU0dx3VothpsbW3g2DZrz2M46LP2VkS+z2gwYH9/n8Vyzu7uLoZhsFotybKEnd1tVsslm5sbYoJZhiifnp4SRwmz2RzDMAmjkPPzc/zA58svv2RnZ5sf/ehDXEcCUb784gsO9g+4dvUqw+GAi4uLGqXUdfG/7/W6pUQvpCjymmMJRR3wLLuwgOfPXxAGEZblsDHaEvsiS7IogzAUXzdNww9D1t6aJBMVzaDfJ0tSojDE1A1WqxXjCxGx9/t9xpMxUUn9iaNEUqTiCAUxHt3Z2SGKZDc2HA3JMhmZ2+0WuinXaRonYglf2ghVaGgcx/XapkD21VESowC262DoUpSqGmAYesmPdDANo/bFa5f3eVESiJfLJfPFkkzR6A8GNcCgamppHZ+XKiJkzaMLzeXf/rv/9rvlTG8u7L9vJHyzGF1WDbxZxKrv82ete0p0TE5oyQeo/JbyslBUTOFms4njODQbDVzXraHeh48fkCYJcZQJLJ6JpEdVdDRdZbGcATmtVgO3YZOksQSTKhrNlkuWydcoZCiFeCylRUEcqeiKSho1cUyLxHFYzGeslp7wZWzRtw0GIhWZ+8LXGY1G5HlO4Esc2fXr10EpeP78GRcXZ3S7HVxXoujW67VA04ZFlhWcnZ3R6fYkKV1V+a/+zb/h4mLM48dPaLY7DAaDsrNoopsGBwcHvPPOXb748gtu3LpRBg3PsSyLXqeNoshYriqI6Dr0aTYcVFWhKBzSOEYpMlquha6rIisrXU81TcNtuGVABWSanJiqmpeHToKuayRxTpFFZJlkdWaZoN+6aYpraFGgKLK6kJFKovgMQ6x9VDUhSnMS34csQ8s0TMXAbtiMhiOJrNNEo+uvbTY3BmiKwsXZMbtbm1y9ssPhySluw+Xu3bv88Y9/qMe2tb9mNBzQbDa5uLiQbMvhiOOjEwzd4MqVa7z33nsoqsLJ2RmapvH48WPxUOv32NraIs9zXu4fcHh0yLUre+zt7ZKmKUdHRyyXSzxvxXq9xHHc6urHdRv0+3183+fw8JAgaF5CJUWDOJnMZdcaxww6faI4FvSx02YwHJIpiFFjGNRqDJBdllkuwbvdLq12kwcPHqCqsHf1CltbW0zGM84vzutF/WC0QafTRVV1VqtVSXPKalebQi2tvTORTlmlo0qVHbtYLCQ13TBIc3Hl9crX1e51aDRdiiInjdM646LZbKIh3eBw0JN8AKdJnudEZWPieR5xlmG0VDTDwnGbrP2wft1FlgqlqUiJ2w10tYD8T1Az3kQtLz9qPleFZl4aE4VnJGhKzuteZt9V/N78b0EzBTwIL0G9l1UAqqrScNw6DcY0TRSox4X1eo2GZFBKR1TKljQd06y6SXnjojhE8QscR9xKszzHDzwcSxdLoFy6jSzNxAkzikmjmIbdFCZ/q02eyCmh5EIxKDKB1F3XpdU0azWEQOYmw+GQ9XrN6dkxWZaxsbHBarVk5S1plN5Rg96AVrPNbDpjY3OD4WjEP/7jP/KrX/2K84sL/v7v/17a/pKw6DgO48mE8/EF7757D0UVxO2dt+8wn8/Is5zBoE+WxIxGG+iaynQ6IQjWOI6N561wHAvbshjPhVDZbLroioauQhKIOkFXVDQKDFWh0FUyVTomwzBQixzSFEPXSPOYKBPH36LIUFWFLE8oCoUi14nKm9iyTNkjzuZYto1tOXjrNXGaUSgqhaJQqPKRA4qq0GjYsswfn6KOtnBtgyKN6DRtAs9ktZxxZXeXKE1YLBZsbgwZDoccHByQ5yk3rl/j4uKMX/ziF3z22Wecnp4ShTFXrlxhMplydHTEbDHn2vVrFCXpd3//gFu3bjCfz/nis0/56UcfcfvWW5yensiIXuSSDl4W/CSNgQxNB10XbXFBxmq1FKK27UjnXqabg4LrNqCAwI9YrTyUTACuOI7ExrrTodFo0Ot1aWYtWi2h0fR6A0zDQCnEWaTaQ127do3FYsZiIT5s85nw0LrdLq7rYlg28/mcRkNCgk/Pz2i1Wuzs7HB4dICWaTRcB8cUPWi/1WE6nbIsFSjdbpeL8Rjf97Fdh62tLfw4KilZCQ2niW1bGJrkbRrldRIHkUixyuzcChgzDINOp0O73UbRdQ4v5iw9rxT152IFlGcYmoAny+WCbqeFimTNfm8xk7NExr0CQanqZGEFKinRpWoEyEI3KyR1GOV1n7LXZE+Xvua1YlaSQRzHIQhDGd9Kga2w4+UNdl3Zf1RIUHHJrFG7FL6qKiW5NS9I0hjfX5NmCZbpYJhayTBPCUMf0zQAlSJLoZCxCUApcoqsoMgKMqUgVXTSJCZLhKXccBvClckKNEWj0emR5EKnyDMJlfV9n263x872HqZp8+jxI5pNgdhXq0UJmkji9GAwYO2tebn/ktHmJreuXuXx48d89NFHJGnCgwcPuH79OheTCcNejytXrvDl/fsoqsLGxgY//OEPuf/ll7z33nsslguiMGR3b4fxxTmdbhdNk/2hrpX2Lap0wYamMvM8Vt4KFWg3XTRVoddpo6tquRNSyIucLImJgoDA97AdB8swMHSNRFVkV1o6oxiWhaWa6IZJnEgeZIFCmuVi6qhqrBdL1msfVJ312me59AjjmKTI0E2DVrNDu9NCJUdRCsLQF2qNZRCGHrqqM5uNSeOMNA55efCC7XLR9gAAIABJREFUOBOR/LNnT2k3G/T7Pcbjc0D2e1EU8/vf/4GrV68QhiGPHz0hChPZxfhr/NAnL3KuXrvGxXhMmiZQFEwn4uD6hz98wvVr17j91lv461VtyX3z5k16vR55nmCYKnEcEUVx+bpDFvMlrtso+ZfiLzYYjOh1c7rdLmEYYhi2eIhlKe1Wi8lsymKxIIpjVus1a28tGaeqShgEOKNNFMBbSPxcEAYsvSWmYeAHXi0LLAoJ09ENA98P0FMxhbRt9zX7e1WVcU5DDBurfVmv1yMKQ7zVSgxI12uajQadTlvoElFUC99RC4IgxHUcHMcuQ29ScRVOpFM7PT0VXa8r+mouUbYKRSIJT8/OZCrQNPy1RxwFDPs9bNsmIZS1g22iq68arm8rABTKqAD5QJHPqYpCochtXpQ2IlU3VRSl81QqCnoUBQ05VZW8IFMlxDMlRy2o/caqcM9CoUxDLhUDaSpRWqqGbsrsXaTyoj3PKyHcWNJaymWzVs7PYm+UkeVpLViXC1nHRCdJU/Jcl6j3PEc1FHRVcgKjTDyplNJtVlFk4ayioCkamlbqy7IEpdBqoCNNpQNsNBrM5hMs00AzRUjearj0+gOi2Ofrb77kxo2baJpKFEX4vsf5xRlbW1ts72xycTEhTyRl+/0PP2Q+lxHvzp23+Q//538gCAJMw+Tqlas4toW38rAti9APuHX9JocHL0njiLfffouTo0P8LMEwxArZtkziOGQ6G9Npt2maLuOLC0nz1lQWq5X8Dgs5ypIsRkkhL8TjSlEVsiQjK3RQVcTURcTGjmmi6AZZmhCmMVlRYGgahmWVSe+lCVOhoBo6SqHgByFRnIAmcWzBLCIIIwk6KTJcy2Jrc0Sv2yWJQ+IwIEszzKaBYjusVx6aqkNpUyTLYJ35fMLVm7fI8oTlYkaj0eCnH/2U4+NjvGXAzuYVfv/739PrDFCQgGdFEaWA560wTYvT04jFcsG9d95he2uDxw8fMZmM6fU6DIcj5vM5n/zxE8xybZDlOfPFgizNuH7jBsvlHNM0LhHIcxqNFqqmM5ufcvXqNRQUms02i/mc8/NzVt4Ky7RoNFwuzs9wXKdetEdRhLdeEyWxdC95wXwyY9ZsU+Q5i9JhxixNGubLBYapY9k2FOCvV0QRmJkgngQ+zWaLosiIklSyLZKMi+kRcZJhkZNGEaGqEvk+ni32QZPphDyXaaTVaqFr0mkXRYpj6oRxzqqUGzZsG03VCUOfyBdeXJ7n5FmOZUkGgXDulFp77Xke6zDC7gw5O5+wXq7qPM8o8Ol12jRsB6fbZTAY0Go4JPGrzuzbAMD//D/9DSWC9EpmpIrFi6KWJmaXXcwuKZ8uMfcrjlpRUjuEq6W+xuyv/16UKGdRlKeZUrKGS5VBXkhRTNPaTUOMRhQpNKoqFI3yxpDuUCkLXXnqlM+mqaXddVHSOsqWt8iEYpLnspdDUckLoUooqirsZ02j0XQZbWzg2BbT2ZQwCmh3hCdzfHJCryvUjqIoaLeaUOQ8efyQ09Njrl7dJYwCIGOxmHF6ekx/2GPvyq4EYqgwm624fuMWaul4+8MffsDp6SmBH7D2fNZrn4bbYDTaZDqZ0m13GQ6GXL92nSSO2Bh08L0FJ8cvcWyLIpPAYssyxU9OkfY+jEJ0w6DfH5Sk4xTNMPADn7W/RtU05itZyEaJhE9EaUpSFMRZRpxlBHHE0vNRTQu30WK6WBCEEaZl02i20E2TOE0ZT+eEcUqOiheEhHHCcu0RRJKIngNxmpKTo+sFhpJDntB2HfIkIglD2q0mrWabdrONoRvYliPyKUXFdRosy5toOBowHA2wTIPZdIpjOaiKxvMn+yxmaxpOh6bb4ne/+yfuvfMO169d4eDlM/xgASSYpkEcxaVBosv21gabGyM0VeVg/0AkfXmOaVpcjCc0W21RKsQxw9EGw40RURzT7nS4ees2QRhz//43GKbNYLhBlpW5AYWkgBdKwWq9wg/XaIYmPn6DAWEUcnh0SBRFNWAQhiGu4zLs9wl8X0Kii4KVt6KgoNPtCP3CNEnyHG+9xrEdbNtEK30Fg8AHJKeg0WjiNpoUisp4OmWxXGEYOqvFgqLIMVRF/ORKVD1JxHPQdRy5gXJJLsuyWN4f1yaJYlynQbPZQlcNmWCcBgoqSZygKiqj0QY3b94iyzIODg5ktdFq1UXB82M6nS7Nhst65ZEmMZ1Ou/S2U7j3zj0syyQpi/x/8+/+u//+O4vZ//a//69/I6mbVRGTApYXhdiRlEWiZsq+8fHtCOHXH38K3awcYesx9zKaeunLiqJ4JbsqIWC95KaJdvRV21xpSN8U0FcqAUPVSglOZUFT7fkkSk3TNPGSchu4rsOgL0RYVZMwhTx/5dPWaDaE5mFL7FyaJqzXHmEYEMWRZBFOx5ydneL7Pm+9dQvTMul02kynUxaLFVf3rnHnzl0mkwk//elPmUwmzGaz2k9e8gRswkASw7/++mscW4KIXx7sszUaMptd4K2W9cVbFDmuK0JxTdPqLMh+vy+pRHnOyvMksbooCAMfRVUF/VI1kjQjKwoUTRN0TdexbRcUlbUfkuWSrDOdzYniuEZl00wstcfjKUvPJ81yQTKjCN8PSTM5raMkISty0iTBUMExNVolDabZaIjKIEkJ/IDlYlWH+cr7qdQ8rdHmqMwszWvZkb8OaTY6vH3nHZ4/38cwTJrNdomezdnd3SYvEpIkwA+EJ2bqFq7TQFFguVjWBaVavG9ubtY+e0VRcHh4yGw24+zsjDiOuH7jGlmWsVxKrsDaCzAMk7OzcwnqbXcwDLPs0EPm8xnL5Zw8z9B0jYbbZG9vD8uy8LyKxJ2SxDFZmpKWcr7hcMhgMKDf74skab1mvljQ7XbFwDCOcRoOtmmW3ZAlzrxb20RJzHodoJo6puXg+Wu8lY+iKLSbDoNeD00T7aSqiGZalBxKTRp3XQcFsXVSy4nK81YYpo2q6iWYF11CzSUYuNoXTqdTDg8PSRIZ84uiKCcnBcuypbkpY+s6nTZN1xFTgzgBpaDpNmm3W/z6v/63341mGobxLe/9y0jlZSeM7y5W//+KWV1pLv3bqhOrpFV5nkP+7bT0SqsJl3hu+evBw/JtFC63iJflVKpSmjkWrwexVHvA1WpV6ihLbk75Rtm2zc72Nu1mgyjyOT45YTqb4rouW1s7tQ2403AZl75mjUaLp/e/JI5Tbt9+G8d2mY5FkvSXf/mXfP755wwGgxpcqYi2mqYxmYi/W7vd5s6dO+R5zpUruxSZOHsOh0PSNC1VBCNc1+Xg4ADbtmuuUJUo75dxebouXMFQN/A8YYkrmkqaZ2RJguu6KCioqoJlO6BorDwZpRRVJUxilAKiJOP8YoLn+6AoJFnBcrliOp2LRbghMje9FEyT5WiqguXaNB2DTtMuw4a1b71vsnMS/pJR7oCiMC5/lhRFVYmjmGkyxXFc7n/5DefnU+7cvktR5Pz+97/nJz/5Ee++d4//+B//L2bzC/7Vf/FLXr58Qb8/pMjBarTodvq1W8RisWBzc5OrV69yfHzMs2fPUFWV3V1BMj3Pqw8Gz/P427/9W+7de4eNjW22t3fw1yFHR8csFgs6nU5pdmABVj2FXJb1pOkrUwXTFKCkOrTTNGW5XNaSoizL2Nra4uTkhLXv1xmbIA40INdsnudsbAzp9fsMh0OW+55cy02XdqtPq9ViMpaCapQopVK6vGiXCOxyf8lkpmgqhmXhljs5VVWJ04zxZEEYJTV94/LP6DgOvu/z9OnT18CNVbmP29raIsl1Hj15ymQyEYcSx2E+nxMYGoNBrw41FpfjV9GU3w0AlEv+qni9+f/9KXpF5bv/vQXrz3VmuoZyicpRUzre+Pq8JPNefn2vXpvyWjG7LDp/8+d5HU0trbcVYSpXn69m+jRJUQrZd5HnzOdz8jx/dSGvPbzVAtPQaHfa9Ad9VFV4VXlRMOh0ODh8SafbxXEcHj58iK7rLBbCgN7c2OLk9JTNzW0+//xzjo+Pefr0qehV45j1es3u7i6dTqeWsty8eZPT01MePXpEp9Nib2dIu9Umy1Pm83l9+GRZVrsUVGnak8lE8hrLi78owDTFr2w8HkvYRSFdl+8HaJqObhiEYQSKj6YZWLYt4RzlbijLclhJaMZ67aHrBkmSsV6HRHGMYegoiqQNlXYFqOQUGTQbDXrtBoNBGxUIglCcHYoC13Vw3aYs0xerEgiQMF7HcQQN67RZLFekWcZ65ZEkBZ1Oh/l8xWeffcrPf/6fs1jMefnygHb7Lm+//TaHx/vcv3+fX/zylzx69A2npxe1GaXnefU9cH5+LmTo3V3a7XbNLVMUhf39fTzPK4tNVPPP/vjHz2m12rx95x3u3btHt9sDKt0r6LpS2shr6IZKEATMZ3MiP+L09IzZbFYu6u167XLZdkv802IGg0Fd8NrttnAw04Q4EiAt8tclCbeFWpo7vhkQVBXOoLSa8jyPYa/L7t4ejm2zv7/PfDZnOBywublFGAY1D7TIJaYxjmPG0wmqagLUB2dFsfJ9yTGoPue6bs3Pq6yJ4jhmUf7eG40Glm0yn085OTmh3XTp97uMNkTKNZlMiOKwvpe/VcyqJ76cY1k9asThTxQk5dvuQa898j/xtVBOq4ryWgcoDGFq0fh3vQZ5rZS0kNf5blLUvu3TdrmQVcUsyzKU0n++Yl6rqopaqPVNFQQBwVoukH6JsIjT7QJDVdjYGDLcGNWnVZIkeGthvetle+04jkiwNBVN12m325yfn/MXP/uLmiZQFAVRJHB2pyO8sopsOx6PyfMc13U5PDyk0WgwGo0EVTJ0Judj0jRlc3OT9XqN53ns7u4ym81Yr9f1Dma9XteZmKKsEPmWZhhopomhquS5TPWW7aDrBouFx8oLaLc7KIrGOpDvEwRS0Bw3od1qoxlOyVVbslx6uA2Xbqdb7hFkr5nnOVTuxeSYuriKGqYI2T3PK7uQouwMROheFbM8L+h2hAOGojCdzUiShOVyhYJGvz/ENCxOTy/46uuvuHHjBp9//hmttsuVK3tkecxkPMFxLN55510m49+hqXLDn52does6u7u7LBYLZrOZLODLDvHs7IzhcAhIYRmNhiwWU07P5vzsZz9jONzk6dNnzOdzxuOJpLKXfMgwFONQw9DK7qdX25L3u2Ls6XleXVhM0yTPhTRaedlV4SAVjali6FuWxcnZKZPJRAJbHBvHcVBVleViIftRXcjmSZIyK9PW01TQ7Ybp4LgOdqOJZrwyi1DLw8x2XJarFd5qJQRWx6HRbmOGIfZiSaczKMOyXZrNJovFop72fF8yPjc2NlAURdDaKKpXH48ePWK29Ll152263S4HB/vMZjMsy6Tb7ZCWsXvy+4tqUjp8RzGrHGIvO8Je9ie7XAS+6/HnGP9/7nGZXHtZnF65ZdTPXRQo1V6sthrKhSpxacysAIPve7xpQVQgyK1WyrMsy5KbPJcbT4S3CoNej+FwSByJ7U4YhmU3NqDT6ZIkCcfHJ9i2jePICbVcLsuuZ8LR0RHNZov1SvyiNE3j6tVrqKrCkydPmM3mZd5hUSJsJ1y/fgPXdTk9PcXzPG7evMnx8TEHB/tsbW0zn8+ZTU7Y3h7WF3jV6gMlAVm6rzAMKApBh3u9rkjREOLuarFA1VR8P8CxHeIkpciLMjszI0lT/CDAshySLGE6mTGZTEjSDNN2iVKFIBKOWRj4RKGQktXqQ4WgjOYzDR0NA9PQsXSdPM9YLJf0+33s0rJZUZQyRxS63V4t3k+SpO42Z7OZhMlcXICicnZ+wdWrN2k2m+Q5dPsdHjz8ih9/+BN293ZEFdHvMBqNuLg45fj4lNFoxN2773Cwf1TLh6o1wmUfL8MwaDabjMfj0kpHro3KDcJxXPI8Q1HEzFHhgiRJ2djYrHdbIg+LUFUwLRNdV2q502i0Ubu5Pnr0iOl0KgaflvjhVc9fjaBAaTfkC5JYWqWHgY/jOFiWWVugJ0nCcrWi0+9hmibztcdq6TP3lnWoTJZldWr64eEhlm6g6XoJRBScnZ1zfn5G6AdYtlF3eKZl0W63sGyb1UpUHpVUqtfr1QL16XQqpNuyePvleFx1ndX4fnh4yMXFOaZp0mo1GJaE58PDQ/k9m4aYTpaPbxWzNE3LG9Cp9WFBENRvanVjfG8x4v/7mAnivHE5EKX+fFHGtr8RllJJReQXkRPEEUXxKqegWvRfflrhu303182yLFDy+gKtHDzIhRCbZ4WAAY2GXAyzRXlyWAyHA0nDcYTTYxri5+6tfIIowjRtikLhzp23OT/vkWUZ9zbeJQhDbMtle3uHT//4mRgYmjqKUjAYVO4ZQyzLYDK5YD6fMhgMuH37VtkdDHBduwQbIgI/wLIdVqsV5+dj2XUpsmeTAq3LfklRMM0URdEoCoFefD8gihNarTZ5DlGclgaJOf46oACSJEPXTUE/Q1nGC5fMQNdNfD9kuVxjWgZZmmLqGu12m2bDxXUcsiQhQtBt27RQLZE7mYZOGAQQys2kqVp9A8clsCA7obWEuvT7DAayG5xOp3WHEcVJKSlb0mg28f0QioLbb71FGPncu3eXP/zh92IJtF7guC5bWxucnJxz5cpVvJXPfC6HzHq9Zj6fc+3atXKflbJYLOpOdrVa0e12abfbpclnE2+94OJijG03xPE1Ezt4KcjyPSq+nO97TCcTslzE6I7tcMIxtu1QFEU9qlmWuBZX3U3lGmMYRi1wVxRxiVFVsdZpt9tiw1VGwVWfD6odY5yU64uwtJLKJIw6S4mTFNaBrFzKnzXPZVQsyvtC1TWSNGd8McX3A0zTIPAjWu1+XWQr+VOz2aRZcuQuLi44Pz+vZYi6rtdjerfbpTca8uzFAScnJ0BBq9Wm2XTrUJdHDx/gui6bw0FNwIXvUQAIQ9uqK2V14lzeY33fQykKiss7Li7NnVVT9Z1fKZ/NLqGUl6VS1S9Q11636q7a7aqYSUcm02hF0q3oJa+Nrd/RXRaAZZqg5OQZtU+XoKfSIe7tXKHRaJBnGefn53ieR6vVklMvTtl7+yrL5Zyj432MkvpQdQ+qqrExGPD111+zvbXN5sYm09kM3dB56623+Od//hem4ymnp2dsb4vrwsnJKZ1OB9u2GY1GrFYrtre3aLXa7O/vc3BwQLfbJU1T0S1uvE0QeIj6QRWfrDgpL3yzTu9uNBokSUq3a5adrVbKnUwaTVWUEaYpiKYisXNJmpGkCWmW45SBL3muUKBg2TaO26BAJ13L15imRZCKGqPZaLKzs0Wr0WA2naAoAuo4to2ilHmqiUKUBFiuWfqkZbWpQPXzV5SVyoiw0WgQ+CGLxYIwDGk0mqBIR2JaBs+ePcHQLU5OjggCMSfc3t7igw9+yD//8z9x9doV3rp1mxcvngknLMnZ2trm8FAW9hWCWQWJVKz62gllNqvHqfl8Tr/f4caNG7x48ZwkSdje3mYxX7FcLjk/P68Px7293bIgihVSXhazXrdHFMQcHBzU1/h8Pufo6KhOQh+W9j/VxFTty5rNJlZpW1/pl4uiwC5zFtI0rbuluAz9zXNxj64W/51OB285Y7XyaLouw+GQTrctHWUcA9JYtNtdFBUs0yIor6dur00cpbil1LBqhqpONAgC8V1LxOJ9WGZtRJG8LwDT+Zzp8im6abO9vVVa0FMv/eU9btTAwOUd+LeKmcRlBfWIUqW9FEVRtux53aVdHgmrHZWmaWiqVhePLKvMDoXcWgZq1kXl1d8pi9kr4KEafapRt/pT13Vhk5eJ1pXYPUni8gJ49f3z6psXrzpGoWy8csGtnAdUBVm4OmJcV+2sFEWh0xW5BfnrWQatVhtdF0PK9XrNl19+SbfTYdAflSLchPl8SRjFQgBcB/S6slPIsoLlyuPnP/85H3/8TzJOuA3u3n27XpBub2+xWCzY3d0hjmO+/vortre3uXr1Cp999hlFkXP9+jWuXLnCo8cPePHiBbZlsLe3x3CwgYLGarWiyCXlKE1yFDRsy4VCLvZer1cucgs2t3Y4Pz9j7Qf0LAdNNVitPOaLJYPhENO0SFJ5XxdLjzCScJg4StH1TKzELYM4jAl9H10RrpJt23UArQJYhgj4TVOAAFs3UNQCGwO35VLZBFUdiYx8Qk6+du1avYQ/OT5GVaXgXblyBcd1OD07FxuZsntpt5u4rgNltuPZ2Sn37t1D03Rcp0GWFfh+yHA4ZDyesF77vPvuu5yfn7O9vY3rujx69EikPf0+juPUAvNer1fy9GJu3ryJouQEwZo8F7PG1WqFtwo4Pj5mPJ5Kwer1yPOCyWSCZRlsbmyyuTXk+fPn7O7ucnZyzvHxcWmPPuDZs2c1gGMYRpkIJZpIEYyLIubmrVsknsfjx4+xbIvt7W0mkwlnZ2cMBgMcx2J7Z0cS2dOEPFfwInE+dhwHTTXl3i/ADyM53FZLkky6OsOyUMr7t93p8OWXX/KTn/yYLEvZ2RNg6vPPP+diIpND1RCNRqO6mPV6PSaTSb17nEwmjEtplGEYWKV5ZZq+ssY3TRPdUOvCJSJ9n9M4YTKZfX8xq9CuypXiskbzcuLSm5SIqrjVf5a7qrz6ekUR/6vSt6nqhKqeqPq7bdv1zq4aabMsq2PYL/sXFUVRz9zic/bd1tyXH29SLV7TjyoKDbdBkkZMp9O6I2i1WnWkmrdcEwQBSlHgOA6u00DTNIIgoCgKtkYjwjBkMBwSxVG9wN/Z2ZFAjNId4NatWyiKwi9/+UsODg6YzWYcHx+Txgl3774NKIzH4/KGVDg+Pubw8GVtga2XC9y9vV2yLOPw8CWBH9Scu+pCqA6HKIpq8XOlj2s2m4CIlReLBdeuXau7NMuS2LLZYo4fhKWrwgxVE/twVdMpCgVvvS7De4X8mucZuqqiOyamYcrIbBjomkIQrAnyjDSRLkRVIEtTnEYD27ZY+x6aYXHlyhXyosBbeZesZFQcx8I0LXRd2PXn5+e1Zc5oNGJra5M4kTEwzXI6HV3GE9uk2+3W4NN0OuXRo0dsbW3JOZcrjEab7O/v03BdTk5OKQr49a9/zb/8y7/geR6dTodut4vvS9dX/Y593685VGmakKQxjiN7ysViBigl9SWuO4kwDJlOJ9y8eYM4DonjiPPz8xoZNXWz3gdW/MLJZEK/3+fq1auMx2PG4zG2bbO3t8eVK1eYTqc8efqU0zNRlJyPL+oCMRqNSt6awWq5ZO2HGLaNXlI9qnE2ClOWywVXr+ySJTGaopAkokIQOlSBqim0Gg2SVNLSp9MZt27e4vzijPPzcxynwf6LR5jX7boJ2tjYAKDT6fD+++/zySeflPuwixp8qhqnIPCRRAi1pqjohlqDLlVxq+79Chn+zmJWtdWVAWJlulgVtArWrVC+NwXpqq7VBaLIpR1SNLUuFnXbdOlRXFq0VcWygmmVEmG8POJe7ggvc7Dq8vinlna8XsTe/BAfLfklVlZHcRwzDsdkaUaaZDTcBp12uyxwRo0ONhoNwkgSqeM0odlsimFi+br9krW9vb0NSGxZnuc8f/4cz/NoNpv0uh10QwqJZTdwXNnh5XlOp9um3+/z1u2bgvKZGqZlECchQZixWC5o2Ca6bnB+flF32ABxnNBoqKzXPlmWo+uKZAwoSkkItUvETbJEdcNguVpycTEmTTKKQmW58sTnX9MwLZs0zVh6HmtftJt6KmJg29QwdKvkxKmYuoZt6qSRWOLINfWK6Ow2G+i6hh/5qJpKlRkpaHG/JKCuarpMUR4kjUZDtJ3ITmo+F9H61tYWz1/s8/z5c168eEGn06XT6XLr1g3yrODo6IjpdMLdu3exLPESazbaBL4AFdPZgvPxlL29XXq9Ho8fP8ZxHJ4+fVoXtOFwyIsXLwC4cUOi5R4+fMTm5pDd3bdKswOwLJu5tSxpHtIQiOGAX6KJYnd+cnJCq9ViMV+wtbElNtXlQVN1mNX1WB1Opmny4sULjo6OSjQ5YDQaoSgKOzs7nBwf4a/XvP/uPSzL4uDgRQ1OOU3pMhMKDl+eMF8sUBWDbqdT339JnpPmOUZJEBeSuU6UpMwWS3Z3rzAenzOdzXj58gjbsnDdBkEQ1LpskEmoSq5yHKeuM81ms75/6wlLU0XdUK6MbNsW1L9c1YRhyGgglkVpeWB/bzGrxN2XHSsu766qJWIVmVZBulXRU/XXXS6qru1yIam+15vOG/Aq3anqKGR8TFDy11PUq4Jaj7CquHiG5aj5pwoZvBK2v1nMgsBHN7X6FK081cnkNTfdpvysZWfTbLRQePUznpTI5vnFBe/ce4cwDNnbu0LgC4hiu0Ic3N3dpdvt8tt/+G39Ru7t7bFaLnj48CHD4bAe623b5vT0lOl0yocffkie5zx69Ihut0uj0eDBgwf4vk+n0y73WEJpiMtIOsuy6g6i6rjX63W9hB0OhyyXy5Kr1qHVaqHEMfPZnNlsIYaLukEUxSyXK3TDoIVCHCesvTVxkmDbDqZh4lgmpq5gGTq2bZVEXKFTRGGEWhpeghTRVquJoop7qaZr4mSbyu9j7a0piqLeSQZByHh8UQIWcg20Wi0M3Sydgo8xTIMgCOssTNl1SRGN45AoSnBdR4T2WUan02a5XLJaeWxsbHJxcU671SDLUj777DPeffddHMcRj/x+n+VyiaIoDAaD2ozRdd3aIHQ6nfHJJ38ACjwv5Nq1PoZusVp55HlRGyQcH5/w8OFD+v0ueSGuqo1GozbC3NwU5PP58+d1IcuyjMViUeZ4vloF2bZdA3eu6zKfz8kK4X6FUVTvqaQRkYzKte+jqSlueT0enp5g6Baj0ZAsSclfW2yLlFG6fomFW61W3Ln1Fufn59y/fx/TNBkOBuXeUCIKXdetUUzXdZnNZjx//pygNITc3NzE8zzm83kd5KKUcrskS2u1ius6pS25AGrz+bwqFq/Xrje2O7hTAAAgAElEQVRv9gqFuEyLuEzNcByHVqtVv4HVh6IIEpjUnvCvOrY3C1j1ue8CFFTtFaO3+rdSFHltrFV4PT1dxts/J6b6c49CzOKUvE5/qkZN25DRoeE066VmHMd0diQ2LI5jgeoXS3q9HoYl49BotCEX2GKB4zgUKAyHIyzLlqzJNOPw8IgPP/yQs7MzLMvi7t27tFqt2qf+zp07pGlaZxdMJhPm8zndbpeTk5M6tXq9XpNGEZ12q058397eRlEULi4uME2TOI5fk5A4jsPJyUlt+2w7Lul8SRzHLJdL0TyioKoaWZoTRbGo7xUNRStAUVEUDcu2abVbtB0TXUnRNQXbkVWBaZpYto3rSKJTAXUiumkL01+StQuyPKbVForLi+cvGI/HFEVRSnZgMpkKelpec81Gs15ug3Q+k+kUKBiUvlnSuYt0aLFYYhgmSZJycXFRLqBjFosVw+GG2CYVCc1mB4CDgwM2NjZ45513ODqSPExBic/rsOsvvviiBGwGnF+ckq1kegkCOTRURS+7KqXemdm2Jb9v26TZataSpCROcC23poWYplm/b9U9OZvNqNQo1bhVqRSq9/WTT/9IryvcxJcvX3L79m3eeeedmit3Nr5gPJ5jOBZZJtSOJBai9aDXR9PFtEHusarByF9lNhQKk/kM3TIxMwmkjqIARVV5++23OT4+rg+iKgV9MBgwnU7r/V9lje84jiSmr9ecjy/QNBUNsYlqNNy6caqUBBV7wbq0ioLv2ZldtqWGV+NW9bm8ZPxeJthW5NC0DKm4PBqqpb6T4tsM/MsII5TazDfQ0oo0W+3SKgDi8usRXlUmAMOfGDPr5+PV8776ELJsFAtzuoKCbdsmSzLpHlRd3DzKtne1Ev5TdYqkcYJlWWz0uqxWKxzH4fHjx6iaxnA4ZLQlUPTFxcVrp1JRiBnjarHgvR+8W5MDr1+/DsCLFy/KVKCl2Kc0pJ0/PDyk1WrR7XZ59uwJoaLSbDj1XqHiQVVdTMUqrzqyMAw5OjpiPB7TarXI85zFQqyLKtNF0cuJKiK/9H5VII2miUOD0AFsDCVFJafRdDFNq0ziNlBVrYyQg3a7Q15AGEW4jQbtToesKFjOIzzPw3EcNjY22NzclGXv6Rl5LgdktQB3HEdsiMxyNZKmNNst9vb2mJUW5dXk4Dguu7u7uG6D4+MTokgKzcuXh3Q6PRzHIcsKBoMRL/afkKYJe3t7dVL53bt368JxdnaGqqr0+/16/1gFmoyGQyxbVB1xPBNdraKXIJNeI583b97g6OhQbtDy0Dk8PGS9XtfC8izLGI1GtSytypAsioIrV64wm81otVqcnJwwHo+5du0anV4Pz/Not9v1dd3pdHBd2Zfqus58PpciaRhMpjOSWKYw3TGJM9mRFWSlgYOBOMXIf4OAZIqq1BGHb91+C9dx+eMfnzIcDgXtL2tEGIasVqs6THq9XjMajeoCVa0abNuuuWaGaaDn8v5WFJgwDMUg1HUpsrzeDV+uU98qZo7j1F3VZVSx+u+ojBar9lXViVghDIZlkuU5WdmVaWUhUypC6iVqRXlX1H8qQJ7l3ypwVaGpqBl5SUnX9Fc7hFdqhdczN18TqF/6n+JbhawQ+kFJVK0AiGqPFvkiFE9iIT9WxotnZ2ci89F1tre3mVs2y8UCVdeYz+e12eS1a9cwTZNOp8OTJ09otVrcv3+f07NTRhsb/OY3vykDd0U1cHxygm1ZzOdzjk9Oyje2xWq55ODgoCb0jkYjPM9jMpmIJXjg15whoV6s6XTaNfG0slKWhPK0BjpGo1FtXez7IVGSoKjScRl5TpJmFAXYmnxO1cSlRFFFfN5ouDQaLs2mjanEaBR0e21MS5Kuk0yixVTNBORQipNEjDKjBEXTxaVAk9c5m80YDobs7OwwHo85Pj6pD41OpyORd5ZJs9EkTeXiXnkeliOOHXlR1Be7gCEJqioOu4KSy/U7m83Yu3KVKIp58WKf4bBb7hAN0jRjd3eX5fL/pezNliS50vy+n+/u4bFHRq5VKKDQ6Gk2ZjhmMpFjQw5NDyCZbB5w+BBzoxvZSMYLmdHYpFqcrRd0A6gt18hYfV958Z1zMipRAKkwK1QhMxYPdz/f+Zb/sudXv/oVf/mXf2lKc+34vlqtWC6Xarq657NXr7m+fqfOY0ZdNwyiIb7vYVmy4V1fX9O2MqgYjYSX2LSycep7UbMzbFs06LQckP5ZmqbmHj07O8OyLO7v73n34QMvX77k5cuX3N5ck6Ypl6+/oK5rHh7uWC5PWa3vxVVpPqdoax7SDU3fMRxNiOIB+8eNuCopmXdbi616Hr0Fu8NeSmL1+VlRkKupf9u2ppeoM2bNy9RZ6W63M4FsOpvR1LVS6RWmgxeFuArzFkWRoUsFri9VYRjJ5qUSqh8NZqGyk9IIciPFo06w47jU9ZPJr/7Csmu2H72XxROw1bKsp/780b91nmSpzO0Y+a//dhwHOvld23WqHGmxLRfLdXAsC1tprcsx9MjKaLGw6WkRFYwOepmqYlkSILGU0octlKeux3V9XM+laTo2271ACYKQ+fKUrm6Uf1+tRCptRiOZagaBR9f33K8e2Fx/UMfT8/OvvuKXf/o1juvy4foDlmXxm9/+hs1mzatXr/hPv/oVtmVRBSG557BPUtoOxpMZH65vub6+4d/+m3/L999/z+PqEduGaBKxXq/NEMayLPEDOCQc9mvpY7ou09kcL4jIsoR31x8YRBG267BaC4C2amqm0wlt27Hebajbjr6Tc+8rypBtWzSNbFx+4CvLNE/UKpwYy4YwDESdNPLxbDmn8XCIHwY0fUedF7R9K5kaFk1bYdngeDZFlVHVBV3fCVsilB7T3d09eVHhuR6TyZTNZkvTtFycX+L5AUWeU9fS03Q9X9HPPCx6okCcs2zHIVfDgSQRV/JoMMBza6X4IfaIlm1ze3fLIA6k/+e7YnQMKuiM+Pv/+l85O1vStuIyFQQuRZnw/kOKZb+kbRv6TkxBwiBiPNaekjJUSdODqHssF1RVYcqoJE24ub1hNptADw8PK4MRbNuW+4cHArUOxStC+mKxUqAIgoCT5ZL7uzusvmd/2HHYbQl8H7sXB6R6OMR1fXrEYGR/SKmqmsAfMJ4Kz7gqS4aDAUHoiakPlvjAokQdOqmQmlbiwsur1xRFzrd//I5BHPHixWdst1uydM98JqDw27s7A3SP45jLF1fc3t+x3Ykw6fnZGX4UUu+2JHlGZ8HAG+H1HpEf4DsuVVtB04ELgerXlUVBnmXSz/6xYFZVDU3T4bpPLHfdeJTmqYfjeNi2a7ImobmAbbtUZaPAqsqgt1WkcNXzqor6I0OUvj1C2SOgVZ3W1nWrAppNR0ejXJMcZUnVdj1t1SiCuCqBLCVb1Eufx+otwJUEsOuUdlpHp8KbZTtPYoNdT1f30GmvARfX9dUU1lbI+BHvb26YTWYsZnMmszl1UTOMR8znU3bpgcvXn4kB7z/+I8vFgl/8y1/S9B3/8T/+P/iez+PjI+vVo0w7Vyum4zH7/Y54EIHjkuYlg2jAf/n137PbCSjz/fU1u/2enUKcN01Pngl2Z7/fczKfsXp4oCxr2q5nPB6SZRk39w+8u74hDCUordYfSJNEHMYVsPZxu6LrJFv1HZu2b2mrms62iGPBfDmWiOqFiufXNBWu04mdmG0TDyOqsiTLcy7OT2Vw0rZ0TUvT98TjEVme87hZEQ+GRNFA9T4bosBhMIh5fFyDZWM7Hk3bUdaNwRF1XUeSZgRBxGgyYTgcGQDr43prsuP5dI5lddw/3JMeEqJBxGQ6ZRTHDMdjBoOW9x9u2Ox2FFVFVdf8wz//E19++SWfvX7Fdr9jEsdYdDiWyyAMOJnNub+/Ix5GRIHPYjYmz118r+dsOZEJeJNSlgVvvv+Gquqoy4a2kjZMU1b0PYZorWWllsslbV3TVjWDIMSzPbzAwwsEnlSqzCNUqhGXl5d4QUCnNoqiLHlUG5qgB+QaufQErkPgOjwqH9hS4eA6y6aoxX1+v09oe+lDxfGQvChJk5TA9yirlLwqcR3J4l1PHKCqQgCu4+EEYY1YzOcn1HXJdp/g+gHn5xcGoxmo6eXj4yO333/Pmw+yyfeWRQvcr9cA1G2jDIQ7qkLI83EcY/UWvitOYnVVs1PcYh0vdPn8yWCmMyMNjtX9HNu2DQZMP44hEvq1z6ef+ka0bfujPtpH6P6+/+i5xwTx4/fVVBBDZdIBse9plCqGZ1vaYlEkco/rTNuW3cVyVNaobgJVmlrYBGEo5a56pT7xddtS1jWP6w3D4Yg4HrF+3NA3PV98/jlnp2fsk4Noc7Ud682GX379NS+urri+vaUoCt5fX+M7LmHoq5F0QVFmYhDr2lKCO470r8ZjHh5XbDcbXn32kt9/8w11VeHYNrvtFt9zqWqBkfiBy3a3ZbPdUBQVQRjiehLsH1aijBFGPsPhgDzPKCrJgqCjVFM427aZTqc4rmS3QSiOVEHoKzSNhx+4RFGoskGZVgrVxsf1HPb7lqqqafueqm5xfQEt264IBtplhef66joKjkq7dU+nNX0v3Nf97kBdt4ZP2PeWmtbVildakuclD/crBXVYEsdD+r4nLwqsviVWnMCyqri5uaHrYbDdMp8vhPMZBkTDmJOTJW0n2fTF1QW/3W746quv2O+20iPsana7LePJiCxLefPmO15/+ZqqimnbmrOzUw6JOEC9uLogLypub1eEQWz6QE/ORr4yQ+64uLgwrAIZFuSqVzggiCLTF9V9I8FwRWYdaIcry7LUQEE+q2tq41Tf9h23t9c0jfSfslI4rL1Slanblrbt6ZDrIeu0ZxD6BI5N00SGWTAajUwfdrvdmmlqkiTsdjsAg3LQk3M9tDC0yKLAbRrSPFNKM5GZ1Ea2KMoetjuiIOTl1RULNR29v78nPRxkM0gSY333vLf+k+5M+iLoYKJ7Z8/hFMfQBqMuqx7Hr3/+fHhC/OuH/n/9Ov274+lo3/cms3seENv2Y50z/XnPH89/IpSpHsdTMt1db6SALAtaFUhlMQl9ZhDIwl6v14RegONKhuE0LpPRhM8/+5yLy3P+/u//nuv371jOZ+RZhuvYLE/nQE/oy67jWlBUNZVSh/iv/9//i+u4hIHPy5cv+PDuHZvNmtl0wj5N6HtR/uyahsAPBF6QHHA9n6apeHi4RWhIYmhclmL+EEUhw+FISiIlie15gtGbTMZYHbiOje8FDAahgb+4nsNwqLiGXUuWpeY66YnlaCiqK/Qy4WybHqniHOUQLxJBvm8TRTGj0ZCiqJQrUU0QhOR5QVFUOLarhkoNTdMalodeyGApIj6KonQJWDR1weP6gbu7eyxsFicn/Pznf8J8sWCz3TKIh5yoauGff/sboffE4tb0r//Vv+Zf/OIXZFlK33c8Pq6oa6FNff75K9Uf7phMxpRVbmwCy7KkArregt7GsV0DBNXEch0UbNvm5ubmqG0jPcDD4UDbtiwWJ/zh22+NvJRu5AMmsdAIA61TpgNJ3/e4tkXfNYi8tQOWY0RLNQQoiqTnFBaC77IcGWC4TottyWDL1T64YHqsGgKi4T56HUorQuSHhsMhnpra6hiSpim1GqjN53O8/U5EJzsx9Wnbluvra+q6ZjGd8fDwwJ1ys3p8fKSqKmO4MhqNzM/yPP/pnpkOYPoE6pvoeNEfB6Xnwex5VnYcbD710AtCv4/hQj57j5963fOfHx+H/TyQmWN5At3KC+XnZVXJUMIWDwCZkMru4jgOo+GY29s7Ke1ezQnCgM1mQ5mVLJZzgtGAJE/4q7/6KyzL4h//6R/wPI8vX7+mrAriQUhVFSxPLsiyjIvzM77//juwWixLJsO+79FZ4IcuX37xBU1bMRgG7A89YeRhWRFVJQDctm2wrI4gdJi5IxwvoKwq2rKh7WqlhfVknGzbFp7vUlctZdUQBgFRNJUFWRREYcBkMiSKQoIgpO87mqZWSg0DhsOB4vxFBpogk6sGsIjjEX3fEYYeXSdDA8uyqapa2aq12LZI9IShaJRJcG05qD6ObTlG/6ooSmVj97SgBFIyIAyECL5arYy8jg7AZ6dn2LZDmmVcX1+T5zleENJ3Pe/fv+Pu7tbg7hxXBElXqxW/+JOvuPvwnul0zGp1R1nlTGcTMyh5+fIF+/0WrJ6zszPevv2etoXxaEaSpIShZFQabH1cWuqFrzOpNE3Z7/doVQ7btqmbmtPTUzUNrcxmoXvZbdsaJQqd0ekFHUURw8mYMkt5/+4OyxKOtZ6Oaoqc+Hz3NJ2sF2wBuGZphu95lGWOq9oAeuinBTzFeEU4km0rE3490dUZflMJNUwHPzHBll7lxcUF8UgyzbTIeXx8JEkSJYgQspjOWCwWlAofp/XcNAj3SSGnf8KAqscPDU26J6L3cSD6qYD0PEv7qUBkMjiTSX0s7XMMuTjOzI6zLf2642PVfzqFc9PP658dS9+20jfjCcMmv5BAVyujXJ3VVFVNb2FGwXleiDpmJPV9mqSMRyN8VyaP5eM9/9O/+p85P7/kP/2n/0iZF3z28gV5ntF1DU4UMIgDppMhttXhuD2ebxE0yFDCcmi6jj/7+iu22w0nJ1N+/7vfEccDzs6WLOZT4edtt9D35HlGVZdMRjN6LIqqwcls5rOJSPhEEVUtNmPz+dQ4azvjJ/liP/BIUwHZ+oFLHAtQ0fN8NbG2Dck5SRIDDxmPxwZCIhxcsG2Huu4V/9XCshw818O2OwK/pw5ayqLi9uae7WZPNIgI/Ii2a8mzHV3b44fuUTtB3agKl9a2Hbe3t8xmc+KByOI8Pj5yf3/PcrnE86W8/uzzz4njmG+++Ybr6xtubu84PTsnLwrevX/Ht999x9WLl3ieR55lxIMBs+mE+WSC3TUkhx2z+ZRhPTDcTC0LtF4/cnq6RI1fCYKQ2XSB6wb0ncVo1JlAKQMAT0FeZIqnJ8m6zKzrmi+++ALP8/j1r3/NZDY3UBpd3u12O+P2pYUUbds2pagESp94MKBXYgBNU6ljDnDd/gkEXpTmuHRQbKqStm7wfA/ftYkHsQHqaqSCJqofV1o6S9RT4yzLsOmMhJg4twt/1VLA+izLuLy8JMkzvv32WzabDfP5lMlkQp7nvLi8wlEwrMlkguM4ShNuZaSJ9LT3J+lMx6KIOoDoLOg40BwHp0+BXz9Z2j3L4I5L2uMyVQemT70enkpOnVU9L3v1z/q+/4EYZNeKK5NjPbETLEuMUXSPTOPiuq6j7Xpxj1I/d2xPjYcHDIKITPEHbdvCtlxOFlOCIOBXv/oVm82G6WTKzc01YeSzOJmz322YTsf0bU008DgctlxenpMcdiRpRmd5rHfibj4chpRlyqvPX5ClKadnp/RtK9mH0+PYNus1JElLELrUTcsinhAEHo7r4bjC62vblvF4RBwPSNMDo9FIqTxslGKD/JnNpgS+i+vYBKEr4MXOwg9cgiAkCHyyLP/omgV+iO04DAY+nuuTJClJkZmsoGtRDtkuYq/ZQW8zHI5pmp7tRuzmLAt83zcEZMlQBcMmFKxWqbf0ijgPtmICjEYj9XlS1iVpYqALp6enDIcjHtcbul7oQVdXV5Sq+S/qG5KZjccjmaZG5/znX70lCEQ7H+Dk5ITT01NFXevw/ZDd9kDgD2gbKePHoxk3Nzdm4WtsVRzHZhKpndzX6zUvXrzg66+/NkFOB5h3794ZSWmN7NcYrMlkYhazzrQGAxFFyLKcIi8oC2GbNG1LWVYsFh0TJVMkRHope9veUhuFZlSA59gyhfZ902LQYhOistF9hPXU5PfjhGc8GZt+mua0dl3Hzd2daPXtRKvPDXzjOarpTYMgVEbEar0peMf+cKDIc9O37yXl/2jdf3IAcBwo9As1OFYf+POA9amAcvycT5WizwOPyaZ+Iqt7XpYev8fzsvOTpa4eLvC8h2dj9T2uct1u6loFN0XHUSKNriNNUHqx2yrSjEOSMI5HXFxccHZ1xj/902+w+o5f/Pzn/Pa3v2W7XfNv/vIv6OlwrY4oCtjtMuJhSNz7TCcT7qyKjgbHG9ACeZrx8uUL0vTA2dk5q4cHoiikyDI8x+ZkIT23Is+IByG+L6oS48kMP/Epioow9FkuT3CVDV/XNQxHA6JBIPxTqzdmvCDCh4PIo2vF+xIEviI3rBiwxkPxQ9QKFq7rSzblCZVGRPlE7siyHAXp6BTwsSRJUmzb4eTk1DT8y7Imy1I8T4jklbIPcxzp25RlpUjIObPZnC+++IK2kUWlxQpt26Eocg7JHtt1aHvY7g/YtkNWVKLJ1jSUVcnF5SVnp6dcX18TDyKCIGK/33N7/QGLhi+/eMVgEBolFEGphybDkknqnv1eI/AbVQpnyuvg0Sx2ERYcGZHPOI558eIFURRxdnbGYiESUVoe6FgdQ2cxmty+XC65uroymdJ+vwcwpaAeJmz2O3ZJajKv3rbEtazt8VxPZbgtbVFQFTW1ZVGXOVbfmuxTZ1s6c9OlrC6h0zQ16rHHyQkIvEszSLSK7Gq14r3qiw2GMR8+fGAwGjKdTk2JGkURv/j5n7BdbyjLks1mw2a3k0Cd50r4YMd4PFb2hdD/VDDTJ0crZuhphA5mx2Joz6eNwA/Kuqdg8cOe2vGfTwXC5+8BfBRknw8B2rbFsaxPN/eP+nCW9UR4NyUqT0qzbdvStA3YogEWBAG+anJu1hvG4wlx+KTLP5tOmc/FJec//+f/ws/+5CteXF7y+29+w3w+56uvXrPb7bm8OiP0HTabB5kY+j5np0v2hy1VJW7Q2A1B6DGMT4jjAYvFlPfv3zMZT0jThEEkVveTyYi8yEmSPSeLE1Bgk6KsadqIMIwMpalpGrq+JQh8Xr16BWAUORaLxUd0GM+FPNubYYpG/oM0oOM4IBoOqKpG4bECEtXrKsut+AMoVyt9z5RlZTIrLWTZtq1qCC+Yzebc3t4qaeodTXPks9g9mU5rVHkQBPT0pkQD5Ng9X0QzQ6G43N7eM53NzMQvyzLqpqbIC7H2syxsx+ZhdccXn38BwO9/+zvW97dMJmPTApFNFq6vr81xfPfdd8q8tyTwQ+I45v5uxXg8YrE4oSwLo6Sitdc0Lent27d8/vnneJ5nuIqa0dG2rclkJpOJoa41TcN4PDb9s2N9tSRJxFPV95nP5/i+0LVcJcduqwl523SEUchisSDLnrJdHUSllSLXWveuj4VS9XRZOykdB18NrtfKxhq9oMnhuqUxnU7Jy8IoCD8+PiqO7ITT01NmJwvjbaAzwtFoxMnyhPXjmv1+b7Tcnk80P9kzs23bkM11tqN3iufQC32jGd6k/bGDuSnPFB3qeLDwnPZkWSKiqH//qQB4nBl+ckL6XAbIsj4aAuiFaR8NAORzpcFYlYXg6/yYpm1pmpYsz0lV6TCbLuTEeS7XHz7w+PjIX/7FX/CLn/8L/ubf/3tOzk4os4L14yOzyZSqqiiLktD3qfOCokgZD0fY9NRVxffffUeS7BmPR4xGE1brhDgUqzPPcbGwGA2HzKcTRoMBrqJ0aDmX0AuEQ+e64sPYd+y2GwEknp7gui5Z1uA4Ho7tEHoyZRrHY1zLJfIjpqMpruVC2yuvQNEAE+6cxkgVCKRFJoy1gqDUVaMkhPZ0Xc/V1QsuL19w2Iv7j86mHx4eePv2rZQUfsTd7QOWZZmxfxgM6EY9h8Me27ZZLE5wHIfV6hF4Ckae54qEdQ/v318bruFutyWOR8TDGNf3KKuK4XgCFhRVaZrt93d3BP4tURRxcXFJEPjMJlMGUUgcD2hqIXd/+eWXnJ6eMRpNFCxiILQ0laXE8RDX8XEdgfokScbhkBLHAxzHVhnckIuLCzOpu7i44M2bNwa/qbXxkyThZz/7GWmaCqn9/olHqxf1cDhkNpuxWq3Y7+UcbbdbU3qmaaqYBS29BaPxWGAhdUOeFwRhRFlXdH2H53oUWU4UBriOgHNff/E5y+WS1cMj799fm3JWb3qnp6dsNht+85vfcXp68pEQhB5saJ6kph3paS2IR0Ko2C1eIP2+qtVDnsKgAm5ubvj222+5ubkRYciZDEaiKGL1+Ejbd/L6tsV7FgM+WWY+//t5APuxHhU8EdV1gNNfTvPX9O91Wfg8Q3s+fTz+vB8rRT81gPixR6M4Ztq63rAT1GdNZzNaNSWpW0GsD6IIT42pLaSJ+eb7NwyCkL/+67+mrWv+5m/+hhcvXmC5NkVZkKYutt3TtR15kZM2FVWVM4wjwUitZHpzeXHGxfkFaZbwu99/Q9M7hIMRZSFuUOPxiMloxtnZJfQd282aphGz2slkJj0ryxLJ7LzAth3OTk9Zbzb84Q9/MNr1dV1zdnamHKJzw2esypqmbkkT2altp8fzbNq2V8Ro0Y0PA03I3iuZF9k1Z9M5GugyGAxI04ztZm+yBz3xalvx6dQ0Fq3dnue5ESIMw4CrqytsG+q6UjZ9YieXZeLpqEGSaSpaWePxmOVyyXQ6o6xq7leP1E2Nq9Qs7u8f6dqGzXZtMhXXEeiE9q10XYfxWExopfrwWa83fPbZK7bbHbvdnrIUY1qtMnE4JOy2exzHYb8/mHNgqSb7brdjsViYvtNwOOT09JT7+3u6ruPt27cGO6Y9ODW9SvcB7+5kan52dsbJyYnpLWn5H615pku1MAzxfJ/V44owFKXXjeL+niyXWF3H43rNZr3hxYsrJZkUKMXXkOl0yps3b3j16iV5XhpV2q4TVY/pdKomuG/N+tZUJfHVlPV0OBzM7+M4VnjCjH2SiFeGMiapm9p4HSyXS87OzhgOhXSvnbF0vDhOrLbbLXEcc7I8UVWJPH4ymGnw7Kea/p96nWVZBlh3nD0d19bHPz9+nn79sczPp8rDT8Exnh2I4Xv2vcwsj4vetmmwFev+OFvsFaWqKAr6TlzMPRU4szwnAsHQuAFv377FsRz+91y5KXYAACAASURBVP/1fyMKI/6Pv/s7+h4836dqK9LDnq6tCcMAB0h2e7LswCCOSA4+vi8mHlZvU5UttuXTVj1t3eP5AYEXYVs2+/2OupCJlGsLAT4KAilHO1Hz8EciMZ20Ca7jU7cN+90ex7KZLU4A+Q7RcMB+e8BzfJJEejCjeEyWFeLS3fQUacVwMiAMQoq8xrZ8caNKK5HGDiI2mz15XpOlGZvNhqbuCcOIoijx/YBWcQgBJTsjShVpmhIEgVHqMJAYde2121IQ+FQqk5LSxjfOPXqiWpYlSSIL5kkOOmez3XG3eqTteuEezmR6q2EHYeATBCFd20Lf0VRPZerelU0q8HwjOng4JLiOS5rkotTrutDbuI6P74ms0nAYA0Ll8wOXthXjZ8sSuWstJaWhH/r+vry85PPPP6dpGt6+fct6vVauRWf83//hPxi6YN/37JTiioZibDYbgTjEMfv9nsfHRzNwELhCSRDZYLvYjofjBUSDIaNYWg/0HdPZFE9tNiJ66ZMd9iznC27u7lVP1Db9LO2SNR6PzTQWMNAPPVHt+57temWuy/G1tVV10/eiTjMYDU3lluc5b9684d27dwyHQ3rbou5aWqTUdQOfyXyGFwbimtX4NF0nuPgfC2bHgeUYnqH/fp7J6If+udboPwa4wg8hH8efoz/rGENyjHX7//OQ5vwPy9Dj45Ap58c4NdtW4oxqJ3B9oe6AZcqUqqqgt/nTP/1Tfv6zn/PHP/6R/XbH6ekpH95fs1rdM1/MsGwJkK7j4Ni6tBUgap5leO6I8Xgi4oa7jKbusXqbxeyUrGzxPXElKooS1wsIfB9ZfzauG5DnJU1d0Hc2jmNTFBV9bzObTtkneyxrx2g05vz8jEIBI/u+5+7uAdt22e0OjIYy9j/sxUgiimKGw5jZfEZVF6TpI5ZVYeFRlrIRTacjAn/Pfr9XvZaOwyEhSVLTv7Jtken2NpI5OK6U75PpmNl0RtM21FUtYFTXxbbB9z3GkxFRGGHbjrnZdUYQhtIs14Oo9XrNYZ8oGaUnE+OiqinyHNcP1PM2CnBbYtuOmeqOR2MsegJfWhZZmtC2LbPpjPBkwPt3H9j2Cd9/914JOAogVTe0tQT0fr9jOBoQDyMeHx9FSTiRocCLFy949+4dh8OBV69eGfjIbDbj+vqa77//3sArbm9vqapKOSmJTLgmsPu+zz//8z8zmUx4/fq1Uey4vLwkCAL++Mc/8vDwQF3XeL6P5/ucqmy66xpGo5FRnYgC6Vn5nsvN9Q3j4ZC7uztGI1H6ffPmLf/u3/0v3Nw9GKlyDUzWXNE3b94YkQWdqOgkQyc/GmyrhzZaGUMT6bUm2/gIjyfKJpKlfv/99xwOBzMIgSeV60Yp3HZdx/39vWEfwE9kZscE5uPA8jyYHT/ftm0qI7L2lGnp1+iDOf4c/bf+wloC6KeGCMfB6QdZ4rNA9jy7M4C7uqFrn2p+y3ENSruqKtq+Uxr0oTl5XdexWCz5+uuv+e4P34myhe/zuFrhez5JknB2viRQWB3fFXiD53lEYcR4OKIfxDJ76KHvOsJwwMlsSV3X5EVP2xf47oDA9ekbi8bqsenJ+gLbsrCRrK7rOpq6py4riryibx2iaEg0iDlZCujy4eER3xey9GazxXU8wmDAupHSqa5bttudcpY6YbE4paMjSUpsKyDZl2w3qWSknk/T9IxGU8AmDAfMZguTKUDPdltgWUqF2LGwOyGGaQyUH7g0WUXdlPiBr0QSa/Iio+9bBnGoyp2RsR/Lc9lcNE2maQT5vtvvGQ3luup7ahANCMIUx/cZjkayIJQ0T9e2jGczgiBkOIwpC8k2ag2StmC7WePsU9rWZjE/Zb3ekmUF0+mcm5sPqlUii6uuhShvWZgMMI7HZNnT4tXBSt//VVXh+z6LxYIPHz4YAUNNHczznO1uhx+IGKfObHSpqgdwh8OBJElMxrfZbEwwaPuOopS+Wd8LFAkaM43Vr5dj7wGRTsqzlqYu8TyH16+/oCgqDocDVVWZnpgOUjr50MemKxy91oeD8KPKTq87XZKmeWZ04Y4DoIZn+L7InOuhhFbn0YOFs7MzM6D8SXem572y55nVcWD5VAP+U1Z0z8vJ4/fXr9Mnhf5j3Jl+6JN3jIP7VOZ2/L6f+r3mc+oyRwu96fetlbkDtkWa52w2awI1Fp9Op7x88Rl/+7d/y/nynPl8zh9+93tcx2G8GLOYzwQVn1TYQFVW+IGM14ssxVHBsms10BS6zqJrLYq8pm0tHFsmUU3V8LjaSmkWSZN/MIiYTW18f4AwGARR33ei+FEXLafnJxyyvbleRVHgez7z2YKmac100rJswiBmPBKV1ywteVxteFyvabtWSTvvWa/XvH49xBuEEhBdl/F4os6d6LLt93vTf9JTt9lMzoXufXRdayRehkPhUep/S8M4MhtGFImpsr4+qSppy7I0sjAic/TUAmmaBltxOHvbZaBc4DUmrShKBoMBYRhwdnZO01S8e/M9eZZxfnbGZDJmtXrgw4dbIODi4oqyrHn/7oPSkUt59eozgsCjbSt6OiXnbBHHEYNByPn5KcLnFDzbixcvcByHh4cHg9zXLvK675XnOQ8PD3Rdx+vXr5k4Du+vb8yk07Zt4/r93XffGU8KjVfTwoY6mGE7bPe3LE4WtG0rDXzHNv27zWZDVRZ8/fXXbFYPXF1d4ToOm82G16+/5PbmhuFoymq1lp6aMiXRE0YdEPXUUvfLjte9Dmwa5nG8tsqyNDJXGuqltc3u7++Vn+xQDa4yww/X4o62bRuxx8FgYAYV8CNl5nFw+hSO5DhAPQ9S+ksdA231rqJv4E8NGUzwsiys/mMy+vMMT7//RxlcL38s28Y+Oj6e990U0r/vnxqYnudhIYDdspKbTAw7ejzPN+J2bdvy61//mtPTU7I046CsyEZxzGeffUZTV1RNQd2USryyYhQPaaqawyFlvz9wenJKT8cwiqnrijzJKPKSuqoQJRKbIivVOeuZjKZMpiOqqpIxu+VB3+FYFmVRUZU1tu1Bb5NlBXlRcnd7T3JIGY3GFKrnNpvOOBwSkcG2hOEQRzGDMBa12jSnruXmH45GFHlNXYnl3n6fSnm6PVDXFcNRTBD4lKU4YjmObGJxPFBikyVtW+M4NsOh3HCTycRo4clzKvpevCNdV1RFm6Yiy2Qiq6docTw0QVnfQ1EUcbJYEATSP9tut5JhRiL54/quul/FAGe5XGIxJ1D4p5uba3xPBBR3+x2TyZgw9Nhutjw8POB5I5bLc4bDEbe3N4zHU8IgVOVlSlUVpJkO0iIAidXhuDZnp2fSEuh7Zecn08owDI0lYBAE7PZ7qURs2wByZ7MZWZ5zfn6OZYm5yX4vysXj8di4eBdFYUrTsiyNFth4PGY0mfLq81eMxlPyTMDDtRKibCpBzceDSJmqKKkoG7IsIQoWApXobTNs0S0f3T/T9CQNNdEcTC3T3ratENWPDIsdxzF4uTzPsVV/UrNIgiAw50nzMeM4NqKOWqzz5OQEyxKDGAtlbnSUr/x3M7PnGdJPPfpeyLOWwmzJy8Qd/Aek7/4oAP3I5PTp76cy9bj8PQ5klvq347j0dFg99KgsrxeQK1aP3fW0nXg26lLAdQUk2DYN8WhM2VRkuz1hFPGzr77k8sUVq9WKf/qHf+TP/uzP+f7771mvNlxdXPDV659RFQXv3r/l9PSE4TCmqm2yTJDV0hi1yLKUw2GPZVtkqUJqVwW+EitsmpbAD5gtT0Sho6gYRAGDOMRRY/g0Sbi8vFR6YC2HvfR6ZCdzaJqO9eMWy3KpqobVas1kPMWxXe7vH7AsVfJGMUmScEikwZ3nuSkjBoMBvi9IfgkcAzbrjYJiiBhluAkYDge0nZZIF9rSZDxjPJ4qkK6t0OsOFxeXzGYz7u7u+fDhPff3Dyoj86iqTJy0m1aVs55ZOL3iEGoTDL2zN01jrP92u70SL6yZhlNGkwm4NuLB0RKGPpcXZywXCx5W97RNw3r9yEh91ng8xvWEpoYtjlfbbc7hsCcMtQFxwcnJguubD5RloUqjBseV65amCYd9yv3diigcsl6LTM16vTELvO970jRjOp2w2x+wbJvBaETXtWx3e5I8Y7s/GE9Tx3G4uroCZHq3WIib95s3b4xnpKY6dV0nXFfVc8vy3ChXRFHAIAzxPJc4luBRlQW/+93vqPKCqiqIwgjfDynrmvOzS96+f89oIvi8t2/fUlUVs9nMYNu0GOV0OjUQksPhYDLOohA+5eFwoOk6qqYhUVlWR89A8Wv7XhQ4tC9vEAhyoEgzhtEAWim/26rG7sHBwsHiX379pyYwHleM/0PTzOe9seNA8px25NpPN6ERY7RtnGdZVdeLUKKFJYKJLTSK09VZHXZvoV2JBM8mMAfPsX6grCF9Nmh74bu1HGeOqGxMnjMcjlmv13RNwyAegeWQqb7MSBGe66bl9c++4pe//CXr9Zq/+z//LwD+4l//Bd99+y02Fq9eXEm5c9hiWRaT+QTXdUizhCzb0zUd9C3rzYquawhCn+HonOl8AnbL42qNZVu4gUNeZQRBiD9wubu75uz8FC/2GA0jPNcCWpLDHsd2ePniBXVdc319Tdd1fPbZK8kYsowsS4mygEEcUVbSX7lfPZqyxHU8kiQ108bs5oOUOqWYuDiOxWIxM5zM/W7Dw8OD9GwUODIKBuRZBh0q4PT4fsR8coKNA3ZPUeR4nq8UNjpFR+oVaHJGXbdUVUldN4zHU/reUuKJGRdXF+ySlO1BdvpIod8B4+6+PewYtAMs18aPfF5+/hLLsoyreFkL6LaZDCjzHevVDV2TU+SlBIBhxPX1NWVRcXp6ymevviA57Pnjd28oqhI/cEjSLftDh2V33N3fqj5Nyfn5uUHkr1b3JIeM5XLJbptyf7fm5OScwz5l9bgiSzOC0Gc0HGMrV6PVekvVtNiuy8nyjDQ90PHA/pBQN9eMRyOur29FCywIyDLBPWZZoXw3QyW7c5A10QlYOo6H1FXD9fU1ru+TZiJPHUfiuhUPJYB8eP9B/Eubhvlshm1PzTouy5KsKOiwTQao1Y9172o+n5umvS73dM9Pr+ueniRPsRyLrMiomgrLtrA9B98JSIuMopbq5fLy0njCdl3H+uGRrmnJDhl921PmFa7tUVcV3/7hO87Pz6nLBtf2CH1p5fxoMNOPH+tHPf//H0wMe+sHzz0uI5/33p4/2ralo/+oRAWkDOSJYG6YAD0GzW9ZcpJ762MmQt9LJkaPyApfXRGHckNrv0jXdXnz5g3n5+f8yS9+SRSG/PrXv2a/33N5eckginj39i1FmpmLW1UVtmUZnE9WZESeKy7PbUZViZGqVnqQmzOh73sm07HpKwC0fcs+2eF7LmUp6fZme49ltcoxKcRxXLJ8T9M29DQUVcFqdc/+cKAsSsoyZzQZ4ofnBoekJ3CafqKnQvqzbUd8CR3Xom5yLIQWJNO6DU1TMRiEWFZP19XkeULbdkTRVPXJhHok1J+EwTAiiqSpvV6LsGJZljw8PABir3dycmIwUlobazKZ8P79e3b7PUEkMku6H6Pvm6IoKIpCGvdq0KQpRr7vkxcZWKLsuloJKXs+m5ieU5qmrB4fWC6XXJxfAAIlKquK1XpDUVW4nktW5hwOW1HwsG1cd8r9/S1RJMOUxWKhZJMwmv5RFFNVDWmSE8dDmqbF9wIF+BSDEvEV7RlPJlRNI9Lkjoevhht9UZDlOWWWGxPd5XLJfr/n+vqaFy9eMJvNeP/+vXFI1430vu+NjPxkFIsNoSq3Pd+lyHM2bKibWpQxlISOzoQtq6NpBEC+WCzoe5Gi0vgxfQ0cxyFJErOujm3iJBimTGdTLMc2PTPTf28arL7jz//8zzkcDgaSoilPp6enjOMh3/3xDXQ9dD1xGNG4Htuyoi5KqrwgmM7kmMOILv6JzOz48Skc2POS8AfP44dB7L/3eCo9FQe07+jbp56YZQnNyLJENvsjzTTbOsoOVbB79h3UW2MBLy4vubm5oW1brs4vjDN213W8ePHCXMjVamU8LyUz6ZTbeWTMPhaLBbbjcH9/z2Aw4Orqgsh3yfOAzQZzk2nEt9Z9EmK0RZIkZhI1Ho+Zz2eMxhF1XVJUPbbbc8g2WG5D1WY0ZUtzX+K6HnmZsN1tyfK9mgrJYKNqhfMpN19NXRdqUPCkqzWdTs33sm0IwgDXtUhTcS7K85w8z8BqmM4GLJczWvX9Z3PJ8rq+ZDFbEkVLDoc9TZMzHA3oVL/LsixDU7Ft20zytJyMLmdvbm7YbDZcXUmm+/C44pAeFGVLXqsXT9d1psQBDAFbN9ObpmG1Wpnm8mAgw4K6bikKcZTfbrbcXN8aHa3D4WDQ89vtlnggNnMiPBlycXHBfD7n/v4eiLFtpMRWhP8oCrCsnjD0KQqPsixM+aeb5hoF3yKQINd/mirqhrp+HJKEgR+YXtjV1RXn5+eUpWSFrusymUzYbDY8Pj4aUOlkMiEexFiObaa7RVEYilMURUxHY7744gtW9w8fSW0BhmlQliXYrsGs6b6YDlie5/Hy5UszxNGbkRi61KR58mRMcuSepHFzjuPw7bffGt9T3Zfb7XY8Pj6yW284W16YJEZTpHTQBFGt1b3un3RnOn78GNL+UxmZwZFZPy6I+KmBwUe/R2VmvYgjfvQZqpzk2fEcj75tCxzHorM+IV0kIk6s12viOCYOBYS42+1wHIezszMuLy9J05S3b99SKCt5Qcxn5FmOp/0OjjiqyeGpz3F2dk6ZHmjb+qOTrJudYRgamRO9QLWonnDTlgSBTZLtcL0pWKLNnheJ8nyspMcRRdRNRVll9LT0tISR7KB1XVGUimTswXQ2Eu/BOGRURPS9ZBYy1ZQAIZmmy3q9AhrarsBxYb4YMxqNWCwWJMkBx+1YLE5omobDYY8f2ISRQ914ZFmN7fS0tShw9D0ma9Cbms4QLUsUJHS/RxOXoyiSMroVSIIGYOvG893dnThcKceivu/J88I0pNu2JU1Tg6gXD1GZovZ9z3J5SlGUlIWUOPf39yawrlYrNpsNdVUTeIGZ4mlkvnbG8jzPuA7poKFbIYOBaIDpJrlWg9VyP47vKRl3WJ6ckCpmhKbrDAYDmqbh8uzc9KC096k2BdZqIMfgct3rHAwG9BbEcUTZ1KLHr/TAkiShzHJevnxpfDd9BSHp+97g57IsIy9r8710sNWKGXrKCJJx39/fG5K44ziUVW08D3SvUK9jHeD0FFIDofX56boOB9n4Bsp/QT9nuVwaXq7OFHXA1Y//4TLz+UTzGO+l/3RdJ0HH+qHK66fgFM8DmnmvTxyPfqZl29A9GQw7yujVtm06hCfXWU9aaaZUVW/QNAIkdFyXqqqYTqd8/vnnOI7Dhw8fjEnuMa5Gjkv6AmmSsFwucV2X9XpNpXbMi4sL9vsd6W5L29XmJtPja+01qikrYto74dWrV4ZAXdcNTSsBaziMsawpTSPihN5kqDKcRknrBLiu0EWKosB1xU3I9yfCMWw00VhnCQVtOyTPC4bDiK6DXrlGT6cT1SerqOoSy5bzq01lw8jDsmMGcaDkg2KSJOXu7o7bu2uiKMLzba6v31GWLUE4MHvO4+Mjti0qpEmSGMzSzc0N+/2e+XyuguWT2UjX9uz3O5pagsjlxRV93/PHP/6RLM0pR5VZbPSIechoIuTlc+F7GvxV3XK6PDPQn8APeKl0zHRfSDeTBdeW49qugSF88803eJ7Hcrk0g4k0Tc2E75isrf8/UbQdXf5GUSRCBa6j+LO9lKpJwlpp4OuApMvrIAg4Pz9ns9lwf3/PdDo1juAab6apXXod1U1D2zUCQPZcQ3TXCPtdIyR2z5GeGUdWbSZ7bDsTtJ73x8MwZDwemw2ormsD09AsgI6e3X5DpYQ7dUYmbZYQ3/dZrR5pmtZkrVrkU6vXtm1nNjA9nNK0vK7ruLq6oigKM8XVj08Gs0+Wj8+C0vPnPwWNJ+zPMSbsf+ihJptqCGoeAtk4/vczxQudmWHR030ExzDvIW/P2dmZoLVth6+++oqrqyuyLOP9+/ccDgcjd+wrQnuWybStawQnRCcp8XEZpbO33W6D71h4nmN6Dfr86JRf44106QmYxRHHEfS9GXt3arwfBK3B1bRtJxPbTgLzcCjTsyyTbO/s7EzhsIT4HUWRujGkHxIEPZ4XABxJDlvmGoZhwHw+UZmbpXbMhjgefCSVPBhEjEZDbNsyCHPXdaibHt8PTAmgcUSff/658QnV5YvOBKbTqQn+kvUdDJRBss2GXrlvOY5rrn3bdibjHQ6HxPHQ6I/lecF2u8e2Wy4uLrBt2/Tb9ORMy1VvNhsTcFu1yADmc1Hz8H3feKDulCSNzhD0wtc+srovKefDZTQaiZ6ZbVFWFUVZsnpc0SPS3bpMO15nSZIYmpCWiNam28ecTJ2hPbmV25R1SVWVxOORut7BkzJtL5nO6v7BwBt0EBZTk5ggCEAlCLpHqUGxolrsm+CvFSx61R7q+x7P97At28gUaV06fR/K/RAxmUxM1q1l6NtWIB7DwRNPtqrFt1U/1/M8CnXfHA4HwwOFH/EAOP77o4DwiQHA8eu6rqPnSWb7eV/tU6/96DmKhyWt/u6jC2w998Pkh1i3np6ulwECRxmiZJXynP1+z8uXLzk7WdJ1Hb///e9JksTsdJoBoFNY4QEmOJbCpNkOj4+PuK7L+fk5o5G4BG23W8bjIaPBiCBwjbqH7kNopQPANLX14tZyL57n4vgubedg2S62I/pkTSO0nrazGI4makReYTetOPk4DtgOfhDhuL7SBQtVo7zF80LCcKAE/UQRVhD9shBlKtZQ16LRP5sNzObUdx2HJGU0tLFt6fOEQaEAr2NcRyZnXdtzcXGFZXtE0dDgjIbD4UcYRF2iOY6YImtApkZ9dz24nmjFTyYTRVl5VAOGHMfxFH2rV47gIUEQsdsdyHNxx9aL3nFctcgsNpudKvk63rx5w2g0Yr/fm4xspIQZ149r6HqGowGLkxmzuYAyP3z4QNNWPKykP4rVUZRyT7muSxB6YHX0vWPkrXWQDcOQJE0/Em189/YtoUL/+77PdrtlvV4zm06xOwGYaiCsJmsHQfCRSYiuPgQPKeWb24hprx6M5HluNpC+kQATeL4p+XSJp7M8PwhIso1p+Ovvofu9esqqYRF609blqiQw1kf+D74XEvgRju3Rdxaz6ZzAl41hm+9NKS5GM1Kq75KDkejuLdglB8NUubm7NRm13ng+Gcx+LKh9ClH/Y1na8xL0+eN5EHz+OwtAlZPm5/p1z4jmH/XGQHliPh2H/uMoyZ9/+ad/RpIkfPjwwfQfNIlXlwij0YjGtmV8X4q1lu96rNdrIl8oF1pbSu/0Wr4YZGKlp4V9/3RjAiYt1mlyrAC3g8GA9fqRyPbBcqlrsPCYTCQbq6oHyqJhPPLpWotcK3+2Dmla03cOw+GEMm+xqehah7Josa2O8SggDIZYJOx3OYc2Zzh06TsH3xuYjGcQjVlvHkgOOSLOKN9lv0to6g42ex4eVlxdXXJxMeZwSLi9vTclWRR5lGUF5KYvpssX7fGpg02SJMaBSC9GmSzWRvtdduvWYKz0otRQAI0m9zzPZAmWZZMkKZ7nCxi5qSmK0kzOhLQu5cvhcKBU5rXTyZR4KLATz/EUHCLjxYsX3N/fG2qRlsLWaiAamS7BU1ko9k9KuTqD0y2PwWDAy1efcXNzQ6Qa4LLZHXh83EnW7gcmCz49PSXLMh4eHoxMuZ6m6w3HdV2jNtEhaiehajUcN+h9xzUVjOu6jIdD9vu9yXpATLu1JI++XrOZOL4fcy31Pa17alpM4hiJoIdMeiqtoV7v379nPp//QGHHtsVuuOla9lvxQDg9PTU9VZ29awNirYunHz85ADiGUejgoBf9MXFcj1+lh/UxreFTPbLnQU5/EceR5l//7PnyGvWfZ6XvMRm+R8bTTd/RqNS7V6XacnnKfD7n5uZGGrNKQuYYpKl3o+12S1PXRjq4bVsqVbJ1dWNEBx3HYaDSZ2kYuzR1Q983SkPfM6nw/f0933zzDaenp+YiH/fRZHdqaVtoesiKEt8LaLuepmmJ4xnu1CPLc2zbJR7NubiasNvuCAJp9tJDMApwrAjbqsjSmsB3qMqesqhIkpKy6EiSPYd9SRwPuLkWgvTl5QXDeEZdteRZo3BgtRILFA3329tbsqzg7nZNU1uC2j6UqnflsX7cEanv8/DwYKg2RVEYVPdXX31Fmqb8wz/8A20rtCmN8t7tdjiuTMt0BqcVVU9OTj4qTX3f5+LiQpy7b29ZrVa8fv0llxcvuLu7F9HEIMJxXA6HDM8NODlZkueZyiykZ9n1HUWR07S1abbXZUVR5GRZCki2MRhEqqyPDdi2bRvVwK7JczVIeNhQVVL+azrTdDrl4eFBDSxyUZ4YDJSje6My2Jj5fMx2s2E8iE22KnSulNlsxmg0MiKWL168MKj7k5MTwza4e7hnOh1TqSHKcCgMCiHZj0nTlGR/II5jc2/rQdhsNmO9XuP7PpvNRlgt6tqkacp8Puf6+tp4E+gApU3Dm6bBJ1ACjKXJInWPDQQArNECmoephwvb7VZaOAoB4IcB4+kEPwywHJv5YkFW5Li+x15lbmEY/nQw+1Q29inc2PHPj3tXz5//PCA+/4znn/tTD8sWeevnmWPf97R9R5kVtMhOo2kgvu/TNy339/fc393Jbtz1ZmKls6hj8N9x8AaZ0lqWxXg6Nb2DpmlEUkhdUM9zqPIU28YADrUBhG6QauyX7mVYlmWcf2zHYeLNKNKUvKhxhgF9b5EcSvN9utYmzwuSQ0KWNiTJgbKsCYMhk8kUByiKmiwraBvoWqirTgWenKpsaBtIDjmBH9G14AQe9A7a9DhN97RtT5aV+J5PEAzwPckI9/s9aZby/t2NG+ZBVQAAIABJREFUTFCdgNZq6TpL9fqES6onZJowrS3XdPNa3y/7vcgDJYmwGYIwoscypZW+B3VWNRwO+frrr9Hj/Pfv3xNFEZeXl1xfX/P27XsW86UpNYtCwLlRFGPbT2Rv13XM5olqb4ShsC1q1/uonMqyzByLViBpmobFYmHoSavVijRNSbOUqmwMAj+OYzOpdQOfqq5Zr9cMh0MBJqsmtu4jRureWiwWghFTwSxNU1N2Pj4+8v79e66vrw2sR5eLWi5J9+E0Qr/rRD26Kisz3YzVedMcTj1hdryAOI6ZTCb4vs+7d+9M9qVZCLr81FmS7/v0KvBHUWR6e1EUGcpSmqbKTFxAzYCRE+r73ohRjtS1L9X/z2azj6bY+nyt1+uPYsaP+mYeB67n/z7uRen/N6lq/0Mjkp/qw+mfS6b338ek2Qpz9oN+2dGY2vaeJixaxmS7WbPb7owfoGs7RArMalmWwbEcK4R8FLT5GPirsxbnSKbEth3hT9pyUTUfTafiGliojSgAs1gAPD+kKK6pVEnTtZKMZlmmpnHSyC/LiuSQ47oyfZPzD0VR4dmWOItnGX1v0XWiEitlVaos31r1XcWFvutgtztgOxZlmbPfC3q8LGtc16dtevBsxuMp4/FUcSG3eK5vApM+z31nmeuo1V1FYigyx7pYiMS4XnhPvRbZQPK8xPMCNenqlLaWKFYIf3TNcrnks88+58OHD3z4cKPMM6aMhiOSJFNNc8xUcTg8x3GEYC8l8QjPc0mSA01bK+pZgOu4dH5gBjXb7ZbHx0dDcj4uoRaLhVGR1RmWRslrPJ9u3M8XC4pKwMO3t4JzSxXJXHMsXddlqnisWqAwiiIzydOKETc3N6zXa9I0ZzSKGQ6HYgYSD4hHQ+7ubqjrypThuhTTgoie41KqNsfp6akZKBwDlZumUXptQ1PK6paK3Fu9qTA0TEIGgBau4zMaTZgoSlSe5zSNCHZeXFzx5s0biqKkaQ6m//kENbEEEeE4dH1P2/eEgwGL5ZLheMzDwwOn5+fkZclecTn140et5nSA+FSvTJ/Y4yCiA1zffZxpfSqD0o8f4NU6/e8noKt5rvr/rv/Y0em4N2ZbNlEU4kehIa9ut1sxWK1kcqeDiKsa+se4IL0bfxoDJybBpeLaGbCj5+Gri+q6Dll6wLI6wxDouo7RaGQyxDdv3hi1g/1+b6YxMqVZUdUd8VDgFNfX11RVRRwL+FP6TkISr8onrSrHcTgkKbvrDVHo03cNdVljWVCWGWUl/pNNWxIEHpbd4XoWnmfhuiFlVfK4fqCpK5L0QBQF1HXFYjHn/v4Ox7HJsgTPEwDmdDrGdR0DbRDoyZBDsqcsc+q2pCwLM3EKgoDxeGya/avVygj2acOQruvo2h7bcvG83gQGHYyWy6UBo263W9I05fXr1wazpP0TBU7ikRxSurZVkj0Nh0Oipo2tCWi+7xkVFd1qcB2Xtm/NRqgBoHIulQiBbYnJje+z3+9p29ZkYLZtUZQZ89kJV1cCKXl4eCDPM8q6Zr3ZYDvCfNjuNeDZNj22MIp4eJBp42q1+khZQqtvLJdLbm5umM0mLBYLc6wWFmEok2pdout1qSeSGoRsq/tTDx50j66ua+LRxMgM6R7VcYmvh1vHgU2Xq13X8ebtW1zfU34EgdrUn3iUw+GQoijNcKzreuWEFRhsoYZF6RaWLkO/++47BoMBd3d36rz+hG+m1sT/aCEfBaLjbOUjfFn/wwD0qQHAj5WXfd9rJtSTc9LzGNqLs3h/VAb2xyR09Z56NK7/dF1HpAJIXcpO0PFxINa7jPmez49NgW4926GzjnTZgFyVHa5r05YFtiOCg8dltc7mBEIQMxqNDLVIXzQsi6bPqLuCIs3ZrDeKnNvS0Ro9ft2ILpuMMIgYDkc0bUGSbkmSFvoGx3KIhzFZ0ZjjCCOX8XhIkeeyMSAlV1YUVHWmeKkpXVZQVkOqOqPra4oywe99LLvjcX1neoZlWZLlGVWdYTtqc6ElTTNDsH4+FcvzXNzgjT1aZjKzum+EqtVD38kkLAwEVhAPRji2h225jEdCcLYtlzw7EAYDFvOlUGOKlp99+XOKvGSz2ROEvmEaJMmB6XRCWQZoF/dWfZemkeNs+oa+a8UwJs+xbVgsJMMoy9xkmKLzLw7nWZbw1Vdf8vCwEtnw7Z7FXFoLGkf239p7zyZHjjTP8+ceOgIqgVRVRVHd22zu7M5M28zHX7sPMDZntmt382aaZJMsxZTQQOgIvxcuMhIl+t7uGsMsLasyAWQgEP74I/6iKitmi7m5Xs0z2IPFWimlfSHW67XTiVuv14Y+FbhgYrOh6XTqMifbW5S+5+AhNmO0MCKboQ1NSCzY2BL47TDMIv/LUjMabGCz+C/7mT0bspn30ncdu11FGMacnS2YzxdO6ujm5sZULx6gM2WlWtq2x+LMgyBwWEGruLFcLh0n+ezsjL7Xxsnj8fjvB7NTkKz9firOOLxI+odPZeanJpWnfbPh37Ael0oYzuVJIANcv+xT01akvqDH8kk1wH4oUukg59lyRuF6ZLY5aaePNvjYnUQphRmSUpQFnnzSaurNeFun6oLFbEwYWrUMz42v7Xu2yGe7y2vdfI0zG41TetFTViV+4PPH//KN65kcDjld35FlY6SQlOWRzWZJ3+PQ4Yieps5BtcgoRHqRwx9pPJZHFAu6HoOW39HFMW2XE4SK2AtIM11OITrqpmY01u7hFmJRloWmlHUNfiBIs4j9fs/NrS5Nozim7VraVjqwpBURHB52wdkMzRqkdG0HCMbjiWte28/FkpvH4zGXl5eOzmQXv73ev/76hu12x2az5uxsxnQ6McTsWvNb85Kua+m6kVuUXde7xWOz9mFWeTweHV7KZo16OKKdlBaLBV3XO8cquxgfHx91BltpkvtLXvL2/btnEAubwSul2Bk8m22y236UbaJXVcX79+9ZLpfM53O3Tq1B83ADt6oww2GKhUAcDgfOplMXGD58+ADgcGTW79L6gD48PDhVEwuWdbxiI57Ydh1N2zIeT+kPeyfrYzc2S8Wy6rS292z7kHoYFzCbzZ3kEeCA7Lbv9+OPP7pJ8dCh7bPBbBh1h8HjNMDZN+IyHJ6sqU4DmYtLX8rOLLRiEMDAdGEUTn3jGSTDnl//VJ4AT8yAvqdtdCl5NtUATdE/fw+n59abia292T1hlHSLkobGDQwEDK4VZidUZJl03EJbkmqDjXuHGLeuzBZnU1YFwoco9phMx1xdaeT6w8MDXV9R1z1BoHmAvRq53bhp9SKbxSmSEGmAy3Ec064LPE8QRgIhW6r6oMtMCdLrkF5HNgqfTaVfvHjB3d0dWTY3WB+fKNYBKYwkYRRT1xpVHoQ+beeb8yiIk9jRo06R6nb3t5uFUsqZ/up+jKKqtIXdZDJxaHMb/O3mlKYpDw8Pz6gsdV0zGo0RaFs1zXjQ9CY/0L6hk8mcIAgoypy61osnSSMHO2jbFt/zXc/I0mfspjabzVwbwva57BRRg6IDlIKq0s7ef/3rX3n37p3LkOpWL+L1Zu1ezzrETyYTzR3dbPARzkzk8fHR9Vu32y37/Z7lUhv5LhbaKUw7Tb1guVxyc3dDbQjz4/GYs7MzTWs65m6Dn44nSFOJ5HnuSlDd92w45IUbKtSDTLGua9fWsPf8cI0ABprx5OW5XC7p2hYhpXOY2mw2rsVjoSue5xn6YOLek4NrKOX+vt0Uh+XzZ4PZMEuS2AzKfimkfIowNnvpuqc3JIRRuVAfTzx1sHre5n/WwLdIMZuFuccMniAEmImmvXid6vVjFCy3G4dBsTd513UE0nM7klIK3wSnoeyvPQ8LFh3CT+xXmGgdJpt/9krR9T2i6/CUIE5SfKmboAJPW7FtDxqU2urGv/U23O+3KNVxdqb11tfbNX7oM5pkpGanbaqatu+YTWd0fc/93T2qV2RpQppo81/rUB5HIXHkAS11VSGA4/GA50l8X5rsp3GI8CgKng0n9Iak3dm/++4754Lz888/u77JaqU10oQQ7HdaujmOErq016yEpiMI9YI/HnWA1FlL5+AZFpM1nU75+uuvAQwequT29oHe8DXsdCvPc9fTmU6nvHz5kjzPubm5oW608GBRFEbsb2RKwhqEMt4NJUGgHZi6vqNtG5Tq6PuWKNIQi6Zp9UDE84nj1EwPe0O8LwHpGtqj0YR3795RljUgkNLn4WHpSue6blC99iK1oNy26/jpp590loXSbkQXFyil2KxWzM/OuDy/oDOUqoPhsC7XK+bzOQrdytgftdBjp3q94K3Q4ShjvV5zOB7pUCQmo5zNtN3harXSa6Hv6VRPnCQgJb++fauz5yAgMwDwttWPeXh4oKj0oGAynbIzzBebbdu1YkG3aZriBz5VXdOqmtALuXpxyTiboKSiKRv2+Z5xmlCWB/qmR8qeQAqSLOLF5QXnV5e8++UNUvrEgSRKY1TbURQVVVGQH49cX10hfZ/AE8jgC7LZCM/QhQTKOFoPv7oe8uIJo6WUsBxuLdPjsq5BFFIfl6mWNO5KWk/ige5DDIcCJyVo3drM0UwW6am7Goy6pwWu2mxxuGs0TUPXmF6X9DjNOu0hhQDvuaBkj+ZnuqxRSPD0l/C1e0wUhCgh8YKIqu7odzor0OcMl1cvmEx0z8wPJKN9SpToJnivdA9ASokfhCRRRpJmSGCz23F/d8vjckUSR0zGE+ZncxCCVy9gvlgQhyG/3d5okw46yrIjPx6JE01rETJgNB7rzCOKWD486tIzyPR1aGseH+85W8wRQg8UDkaHbDKdMxlPSZKU+/sNP/70hiDQopKBHxJFGVmWEkYBTdtwe/+WP7x+zcuX19zd3dG2NXXdcH2tLdPevHnDfq8pO+uVlto5HA588803eF7Ij3/7G0LoSxyGIdNw7LLctm2pmxLPFyzOz3j79i1xErLbdzw83tF2tSulqqpiNI5IkpjttiOMJFk25vp6wWiUsVwuOey1ge50MtX6a13Hz3/7G03TcGlcz+u6ZjqdslqtefnyFXVdMR5PmE6nPDxo0cswjIiimDwvycY6y8qrkna/Yzo/c4DT+XxOVVXstzvyyZSr8wvaquJ4OHCItEHv3d0d0vc5lgWe7/Pzr7/QK0VdVdRty9XFBbPzBUmWEqcpwtNB6f72lsMx53xxye3tLV2rOJstSOKMqmwo8orXr19T1bW+f6R28KqqCs/vSEdjzhbn3N7ecnN7S1PXPK5WHPY7erT9nR2UBUFAID1QZpjm+cRBiBd6LJf3hIGiawuOxzVZEpghW814FCL6mkkT0VQtcRqRRClK9ByOK6p3R7pWcNyWBL7PanXLcX9A+pK6bFB9z3x2RpLFeIYi9tlgdtozswFl2Oy3ZZmdKNoFb6cy9jmnmLJhgDp97ud6dOoTgU0PCz6NZxPDxwx7azZQyeceAk+wkEEgs691kq09ew9CDyw81esenycRvqQoKrqmJ/C1BHYQBEjh0XYNeV48GagqQ3j3PLpOg1QnkxlSPmmcPd4/sFqt2G63horUkCUpXdNyMFPQtm0RwIsXL/jDt695WD5or8QWgigjyXzdZzRcyziMSeKExeJ549nzIl6+/Ja2q1FKEIYRnpdTFjlCSGN+ckcUJbx+fYbqoSgql9FUVUPXKcJIkKUJTathAaNxxna7YTyeoJSmd2lZHa2n9cMPPzhisZSSyXTG+fm5YwFYnqp9r9boxP7b9lKeJoIB43GqzX3PvnZletumTKc6KOr+kFa5sNmTvc5Xl1dk6Zj7+3vWqy1hECOFT9voDET14MmA9WpLkWtQ9sX5FfP53ExLH+nB8XAtCv/bb79lu906WtJ4PKbve25vbnh80LJFZVGQF4We2hpTXQVPzmDo1snucCDJMgLTa/xwc+OGXl3X8de//pWvv9bv/d///d9p29YZhPznf/4nf/rTn/BMt72sazbGjPjqxQtm8znL9ZrUTCwrg4ss65qZp6FHlqcc+YGDZPi+r+0aleL777+j71vyvERj+CR5XlAUJUo1pGnC3d0N7979hlJweblgsTjTMuhFiSDmuM8dPbJtWw6bPU1bMR1PuL29ZTzO8AOPsvwCNOO0P/apQDIMdPYC22nGp/pPp68PTz0TG2BOBwuf+v65n52ez6f+Zo96xu60C+B0mCDkpyWMTnsD9jrYRrC9cSU6y6mwkim2hBUO8KeUQnraCEOPoSP3+n3f0fc+RV7w/t1v3NzcIKU2tbg4H/NP//RPdF3nRO2k8KnKhvu7R90gbWuKsqIbUKz6tjMiigIvDFFCECUpdVWxNJLYcRxzdXUFUnI8luxvHwz1SpCmGU3TGllk3USPopixH7iyo64brelVHQlDqTmURU0Spxz2RwMelrx584bZ7Mz1VM7Ozthud47j13atw2nZAULXPZG/7f1iNeytzdwQGqBJ9S1hGBsoyMqonGzw/ZAwjFku1066+/XrP7Ldbk3J5LtGu9Ws8zzvmbadFShs2/YZS2E+n5OmKb/d3rrP2WqSWWrO4+Mjs9mMy8tLyrJkvV47eEnTNBwNnGVhFCrs9ND2isIw5PLy0vGCrcGyBejaprol79uenpWpHhkKkw1ClpYHaBXfzcb5CYxGI87Pz12rxr7fOIzculdKOb6pqARtC/fLB9I0xvMCpIS2VVRVQ123ZlIqmU5n9L1ASri6esFiMTfUuT2PDzuE0Dzg7Xatp+xNjaIjyzTQdzabECfRl6EZwwDxucAwRGYPF7kNEKfBZvh6w6bhMLDZ34mPxpj//48h8PKjAQFPkAsNs3jKLF3zcgD5OH0Pp0F+mEna7KCSHmmYIBTa3KSuDDctMlxHwcPDiiyLCaOAx8ectm0clUv7QWpeYV23xqFIcH19zevXf2Q2m7Beb4whiMaRCeGhVEFRVOwOe0bjMR2KtukRsqXrSupK34xJkrBa79muN3iWi7rTdmx5XhPFGecXc7bbNX/72y9OFQSECyaWT2l12Oy1KcoDRakbl+fnGhD72283jEYjLi4uWC5XJEnK+fk5IJxVmhYc1NzK1WpF3bSMp9Nn+l4aIqGR5cN7bFgF2DF9lqUURe42GovNevXqFcfj0TkO2amfpZTZIUBoDGy++eYbiqLg6uqKsiwdHtCyAeI4dhpnZVk6A5bYSPTc3d0xGo24urri8vISKyZghz6aOH90KhwW7hIEgfET1eyEYeN9NBo5FY62bZ1jlQW9alFNXcpayIsNeMvlEqsWYvmlSuns1GqK2RLSMgIsj9ip/ZrNuIt08Doaj1MrA6TbR5I0yTg7mxH4IW3X0LU9ucwpih2Hw5HVcstonHF9/VLzSCOts1dXjRleCC7OL/A8uL+/wfM8Xr66NtP0JzaGJ32SJHVr9KNgZqcDH/WRzA1k9YmGWcmwbLRR/kuL/zTgPc+s9FRwGJROoRzDctRhzMxOjlLPnvsUjHCBbBg8rXqADWadmZB9LvMbgg9tYLYpvup6Sr8gMnzPOI7MeDxC0ZkpkJ0I9waeoGWntcvQlsfHDWEQEYYRWTbm6mrMq1cvGY8nFEXFmzfvCMOQq6sr+r5juVwZRsGILB3TdIpjoSdX7eNS72oGArE4m7Narnjz6y/4njZlnU2njLORhp0UlSGVd6RpZq6Pz/GYu6xEQzSg61p3L+hpY4mUgvF4BGgnck1V6RiPp+R5RZKkvHr1SjvCez6+F/Dh/Q1ZpjMk2+KwIoyWlmM5eEIIl4nZhWmFAi4vLw1UAd68+dVpk9lgYR9vs7kwDJ0w588///zE4hCCyUiXo1aOyE6cLXDUBljrzr5YLFx2tj8cHGK/LEv+/Oc/s9ls+POf/0xVVcznc2cObAOODYaWjvTixQty87O7uzvqumE+P2M+nzthxeGUd4jwt9NHO4yw3pcWUrHZbLi4uHBa/lYsoWkarq6umM1m/Prrr44naTfqyWTCxEwj21rznnMjK26ZDnaD88KA0XhE1ynKQ0XXK4T08HzdU81GI8Iooldw3B9Yb/Z4nkApPcUVCra7NUV5xA80EH4+nzn4yXQ6NdJH3bPk55PB7LSUG2ZVFveiJ0aV0zOyjzkt3YbHMCjYgHYaOECZjONj/8tPZYvDEa1mIPTPgpV7nPnCvLbVThsGs67r6Lvuo/MfBi8LWByeu02z+7ZD9oLUlAiLxYLFYmE4mRUIhe9LUzbUSE+f1Xq9ctLdk/GYNB2TxClBGBh56ZjHhxU3tzdcnJ8zX8yJo4TD8eBoP3GUkJda7mV72LPb7TkcNF1ESsFkPKGpO5qqpm466kbLSmejCUGgpWV2h4J3798zP5vw9VevEQKnVhqGPhjoiZXf7rqWyWTMeJwRRQFpluJJj/fv33M8HvE8z3HxgkBPTH/99Vfu7+9ZzM8Bi6qPXIbUdh3748EFEnt/1XXNdrvl7u7OqcMOnbXt4+1jLei0qir2+z2r1cqVVEOfx6H1YZ7nHA/amm1iqDNWWaMoS8ZGzkcM8GFW7NPKSCugrCs3sNDvv3L3i1V+tYDZxWLhsr7RaEQ6yp4JFlqSu82oVqsVi8XCab1ZalnTNK401EMJ4QQQoihy0tO2TLZrxHKEbXZnp8dCCKfOYfmjsQHNhqn2gm2NmYj9O1JKyqbm4XbJIR875RN7zbMsI8syXr165T6Xug7oO12C2swyCkI+fPhgSupzd75DcxUrTPBFCaDTzOf05xb2YLMSm5Gdlp3Dr2FZOuyP2ZvI7jJPQfE5pu3jLOvj/p17PfHkKvUs+JnnWHwYRqZnKNUzfI49d/u6tkFtM1en92XeUxAE4AeMkhFnswXT2cThq+xN7/nS7aRCQGeyOt1E16CVy6sXxEmGUnr6ujvkrDZ6AFDWNfu8ZLn5xagqBIzHE+I0pahrfru91zpQTc3+UHA4FKZPkuEHEVXd4PsB8/MLlILp7Iw4GbFerxyebLVc40nJxcW1OSeN0o7iGCEgSeLB0EKZ6yJou5qiUKA0LGK93ppBhuB4XJsyrmK/OzAajZ2ixVdfTUy2VNC2Pdk446uvvuL29smhyJaEw0AlxBNQVCnlSiMLFI1jQVU1xvouNEGlIUkyfD+kKCp8P8TzAgd8VUpnlHGaIAMf4Xs0fYcMfLzOp2xq/CgkCSPOLy/MhHNF1Wrc4dyYkPzHf/yHK+EeHx85Pz/n7u5OT29NgLXkczuhHYJjt9ut68lZ6psF8VrQ8HBdWIu3IeZqu90+K/9sz82CtocaYvZnSimD0dPX12L9LKjZnvfZdPZEzDfrx/b1jmVJHKeMsglS+Oy2B+qqJfCVgwHVVUtTd3gyIAoT2qanKgu6Vos/XF9e0LY1X331Fd9++y339/cIIVy5PhqNjKCm/2yj+GQw+1y/C3AoaVte2FLDBpDTQDYMPsOg6DKpwYeidwv1PAgNjs8NFE5ff/h3npWjJph1XQdCPQP/DR93ev42MxvKBA2Dmf1APeEzGk1JsxQpPFeeQG92uoS2rZlMx8RxxG63pSxqR3Ha74+EUQxIjkdtdrLb7QAYj8fM5+eOs6eU4urqijCMOB41VeT9hw8oz0MEPgi9MKMoYrFYkKUpdVXTdx1hqG/CKElQwHq9oes6xuMRF+eXJHHEygwGwDhRl9qd3ZZ4GEyglKDoyfMjbduB0ooXh8OR29tbXr/+g6PbWNS453kmwzh3O3ZRaOjCdDZ1O3SSJEZJd+UkcdI0fSZyaGWV7ed+NCKIdvEvFgu++uorrq+vnUv6brfTgowOV9UanFpmCPaFQ6nb881z7apuaT1xHDvyuc0at8YU2oJ5pZR89913+L7vfB8sHcqJCxiakoOdlAXBwIty6EJl7z+r4jp0qmqaxl0Xz/O0uq3JHrWZcuYCnx0sWEUX25M8Pz93maL9sv1RTQWs3MTU930iP3iG6bRtk7womc5mTljSBk0raWQ15Cx20OrWnZ+f8823X3HYbXn11Uu+//57JpMJ9/f37u9YCS9NXm+fBfCPgtmwTPxcdmbTX3vRhhmZTfuG2ZINBENYxJAuNJxsnh6fCq7DrOkU9IrQzuS2/HDBSFh/TYPoH7g6DTFp9pzsa9rzt6WMBQnaoGhhAp7nEYQBnic57A+UfkkY+qRZ6lQNkiTCDzzu7+/48OE9Qjzh4rSm2EvKsmazWxkepsI3qPzAyGivNxtyI+/y/sMH3r5753anKIlRUrDZ7ygLXRbpckSiVMfZfMpus+VwKMiyGWkas99vSZLIAVn7VpOhJ11r6EJaMqbve46HA2mWGIpYgJbMDtyGBjrDlFKyWCyIjWnMdDrFk76j+0wmU9fnWa83VFVNGEZOtFH60kkmWTOLDx8+uB16Npu5hXh1dUXXdS5jSdOMt2/eEUURo9GE9+9+I000Ub9rlaEx9Xz91bd8++23/PDDD4ZgHvLq1Stm8wn/73/8P6zXWm11PJ1wc3ND1dSMpxPyPGcxnXB7f0deagrVy5cvqduGh8dHJuPxs57ydrvl9evXxHHsNMPsfWUDkg3a9jmlcTufTqdOlNCWfb7vM5vNuLu7czaCw/6hzdLsfW9dw22gs1minR5bIcYkSTT7wPcd88JmhOu1nigGppIRQnB+fk4SRs54JQgCXrx4QZQm/PLmHUWtYStRmpCaLA5wctuWHhYEAePZlKbv+HB7QzZKOO53vHh5zWq1cvppSinTa/Vc9t116hlN7rM9s9OJow0Wtkk7LB+HwWv4fBdIBo97Prl8juPSgeJJQuZTgez0b3qDC/y5DE1KiTTAjDDQrHyheBYE7TlYjNfwdYbvYVhm2iDkykygLCtkmJAkAWEYGR/Kmq7bURQBcRySJlqkUO+esN/vWK4euL2/Q4iAoixp6g6EIQV7gr7vaLsGBERxCAqqumS33VOUBXEUE0TaD7HvGi3dHXp0fUNXN7RtZYjR2leyrkv2W+24LT2IkxApJHXfotHxHZ4n8bwYz9OczKLMnXepEDAej0j6RT+RAAAc2klEQVSS1Khl6A0uy1KSJMX3ddBwn2vsmwa95PHx0U3vDocDQnhu5y6Kgk7pxrbFgGVZ5qzn7OK+urpym4gQWo5nOp1SlVr/Sp9L5iAVtnyyChdlWWq1V6N8m6YpZVWyXnUkScbj4yOPjxpYbPXnrCyQVs6Qrry1clO73Y626wj9wIkftm3Lbrfj8fHRwUxGo5HTx7eBZDQa0XYdRaV5le2g6rEEb7vx3t3dOW24YcloM7eXL1+y2WxYr9dOB83en7YPOQSE29e15tC26rLDF+0n0ROZIcJsNmO/37OpdR/yH/7hH1gsFtw/PPDDDz/QtD3no8xxWu/v710LIE1Tvv/+e3766SfatnUltQ262/0e+o44Spxyx2g04ng88vDw4NgzT22pp7X62czMllOnGdppL+p0MnlaOp4GltOM7bQ3NQxyw+d+KZh9Clox/LA8z3PBzJHJ++dTVbvo/EGAOz1v++8hH3WYGfaqx5OS0XjEZDyh6/T0qOtbLi8XXJpeQDZKiSItw/3hg55spWnKeDZlt8s5lgVt04KApm51f031yFziBRLRS3bbHXl+REjJdD5DIDgc91R1RTCQFlZmwYS+TxyFqK6HvqPrWva7HQJdwqZxTNe3hH6KEC1NU7Hfd09QCKHchFabXejdv+u092RZaBzVYnFOUZRstzuzY+vg3dQtq9XGlT/aMMWjaxVC9GY8rzgWB8q6NGWvlk6yO/nLly85Pz931Bzb2wHcVDEItPuPLcVGo5GTelZKOQiG/RyTJHHlnBB6g5jNZjwsH1BCa2vNjTO7HRZFRkraNriLqqJqGuqmoWs7vES6qafNGh8fH3n9+jWPj48ADvBrg/RsNtNZy17LfxcDyIXlL9r7dDabOe9QO9wA3MZugcVTo432ZGqjhyT2nrVZjjVGsXi+0cg6gdXuZ0IIPOPWtVwutfLtmQYKv3nzhtvbWyJj5Lve72h77aGphMIPPeptxWa3RqwFf/3xP1mv11xfX2tISFPx3dd/0kOXu3tmk3N++eWN8dUc8+HDDfP5nL/85S8OBzes7D4bzOzxqd7XMMCcZlqfe9zpaw7Lt9Pf65/ByY8/+5qnQNu+712j3/7cfmg2mLnzVKfSRU9BdfgeTzPIoRKt3Znt730pieInIrIOeBAGEZ4X0vdWwFJQ1y0gOD/XAaAs9fStbju6tqYx0i1t12juYFujZZoTuq6hqkt61THKUqZTTQp+fDwymWQsLhaMspFDyUshGI1SxpmWYw78GMx7CU3ZkqYJRV7QtDpT65uGumnd7m+13MuywtrubbdPJiVCCMpS05Y00ttgv0YT4jjh/v6Bw+HIeDzm/Pyc4zE3Tt8hZVEymUwMvejI/rjH86XbrW3AvLm54fLy8plyg5WUiaKI29tbFvMFDw8PrvyyQwKbDVmak+/7pldXOF2tyXRC3ysO+4PLoJRSXF9fa9qUea5V0rANc5tBKqDteye4aDFhcRxzfX3N2dmZI4oP2zH23m3MBNMqitjycai7Z+EY9p4byvzAE5k/iiK+/vprptOpg4D4vrbJsy0fu4ZsMLODONvzs+cBTxPLLMtIY43322w2rt+nM8sWJTSI+lgczETbnndjMuOG7XbP1dUF33zzDdvtGiE85vMZTfOkwHM8Fvh+SJJkBEHkbA8tMd+ubd9/QkV8ks70qankMDicBqJhQ3wIufhUI3/4s0812eFjWMWnDhtITgcIDq0mniAgtvmPmRDqMlOB96QAMOyZfSrQDt+r7RcOP3zP8/B8j76H/e7AUR4NqfxMSxc3JT/++ANJEvHq1SsuLs/xPMF6bQUIdaDv6kqXk0AY+kZ3S6vwVlVpXMcDZjNNkNY4N21rpv+WR5qExKHHsS1pqpy2aajLI7u11rpKopgkjpEopPDo2pLtpmB/2LtSLAxDJpMn53WlFE1Ts16v6HvlemxJkjixvoeHR5aPa9pWg21n0zmeF7DbHdhstuZz0ni2qqwJ/MipmZRlRVFU1E2DH2ihy9vbW25uboxBsuYb/vWvf9WNYqNPf3t760b2P/zwA0msm84W6FvXemB1d3dvCNIe4/EEKSXj8YSm0SodWrLJJwp8lssHwjB00koW8mCxW3azGlKrbH+q7zomWcZ+v3co/Lu7O77++mt+/fVXNwixrvZCCFf6WVCvXRPWcckKFtogYwdASikXtK3MuB1s2MxztVo5KSNbng0leez9bGWxLRTGKoHYrMze67ZUtOcshHAOVFVVsTseGJ2NHVi2rFo26y3rzQrVw2ishQb++S//hO8F3N3/RppkPDzes3xcMRmPWS03/Pm7/8rbt2+5vbnnX//1X7l/uOV//I//i6++emkya2OF+KVgZi/YpzItGyROs5Xh44dKE/b7KRTDBqJhwHyeCX0+mNkL+6mSUinluJXDIKeUotcUAJdZSUOMH6LI4Um4Dp4Hbvt6dhezfQW7c0kp8Y21WRLEJMZh/HjULkDSA98PAM9Jo6xWK+7ubumNGOB0OqVqGzwpCUPfZRy2X2IDjT1P+2/bt+v6lqYuUF1N24Ine7IspO98VN/TNDWCjiCAMBRIoaWig0AaH8WWIMDc+BFxHCLEkzyMHb9bGEnXKrpWMR6lbhCgp9E6QK1WKwODKAZeo70WbzRNf6vUqp2cfJq6phc9UaSDpV3ggBs22cmiZRDYa6NVaLecnS2cLZsVGLSfn52Ibrdb51A+Go24vLxkuVqSpAlVXT3TsLdyOxahP5vNHILfrhkruX44HLhcnDOfz5nP52w2GwfL2O12jlZk+2B2cmtlx+u6JjCNcZuNSCldA9+ev83ArF6fhZfYTMkGMqWUA/cOMWSnOE+7KQ8TBLtp22DadVrHfbPZEIWaKXE4HLi7u3N9y5cvrynaiqI6sj8caZoaBEwmI5TSDJ88byiKI1XVUJY5i8WCsszZbNd89eoVWTRF4HM8lnRdw+Pjks165zYO3Xts6NVzLcUvymZ/KoANf3/a/zr9vQ1kw2A2DHr2wj0PHGBRvadZ3LDPNsS4DYPhMJscPtZKe7jf83Ep3fdaw/xLpeawtB2et4amdHR0yEhbt9V1bZqnLWdnU6N7Hxoxw9+QUvLVV6+ckufN3Q1RFDFfnOH5nut3WBClH2ijWtsc19nF2OlS7XZbxqMZvoS+74hCn8V8RhLF+GYwIxAkcewyWCtFbRvSxzyn7XpqszgFkjjW6b5evPq97ncHw7XTHgJKCaIoYTabIATc398bCaGRyeQi6rphuVzpMquqkdIzZSvs90cWizl1U7HZbxiNMicXdDgc2O/3vHjxguvra47HI+/fv3ecRjttm81mVFWtVSekloeez+fUde3wVjazsFZz1iZOCEFTN9SNhlmE8ZMgouVdDqXVn7icHuv1mrIsjaCgtrWbz+cu85lOtdv6y5cv6fue29vbZ8KGNruy95FlodhNyn7e9v6zZbFNPOzfyfNcezOYgGvvW+u3YDM6G6DstHgoSmo3aDvJtNlh13V4QhDFMZ7QRjR7YwdnMWdWaig9y0AqkAo/1N4Ro1FKXbccDjv+2z/+d9q2Bin4L9/9kYuLK+7vb5G+oG4avn11wf/9b/+T/FgynY15+/Yto1HGP/7jP9K2tdlMKuqmouu+MM18dugN+OMel1LPGltSCBgEDvuhDJH5pxnOMKI+7QJmaOD+zPPppA1Qp3264Ws/O/3B+dBruW1pA696/jiXiZ4Ex9O+3BAgOHyvfd9rORJfcL9+RC0VSRwzHhuyd9/x/ua95juORsxmU6Qn2G51DyVJMl69eIUMfPbFAU8IsvGYKPBp+05PJJuaqihRKKbjMUmaEng+hyKnLiukEkRBTFkVrJYb+q5xgnhZlhH4PmezGUkUcTw+B2Xa5naaF3Sdom5MBis9uk5r1t3f615QmmR4XsBolOD7IU3bsd0ukVJyPOb4vue8F8JwR103CCGdEw/YQZI0i7U2OKwj09mUjp5edQ74eXFx4VRFhdBy0XVVMclGrB6eXIWOYg+9ctpa1m3+4uKCsiyd2KW1irPii1EUGRf1iKoqmYw1DjBNM4SEkTGH7pXip7/9jfv7e0dw9jyP1XpFVRZcX1/rZruUzCaaOB4EAReXlzRdSxhHLJePTGb6vDzPo6wrkAKkoEfLwjdth/Q8egRt33MscucVEIQeeXF4ggh5oVsXwzUyn88B7dNaNw1Vo69xEIWaqSKfpLE9oUG1fdvSmuzN93ykkuBBahR97f2/XW+eKSVLKWk77SdQNiXz5pzJbMx0PHOfd5amnC8SpHzBH//4R/7t3/6NIj9wNltArxBKakWYtuPtr290iduUSKn165SxBPR935DfS6q6pBkIdH48zRxokulGud7l3e/t4u6tmKIJCBZAKnWcC3xNntZxoKfvnqYPljYkBQhpA4ai73qdlamBQKMLKgIhJI1prHdNb2Kqdmvq+6edZXi455t/+0HgHiOljyZqa8J2GPpEof+suX86ybXPFeIJpvFUkvccqwYhJX7o03o1xxbKvDQ9u57ZZIIfBBpNrrQQYBBMiEONE3pYLUEKwigkCVPm8ynL1QN3mzVSKMriiOdLfC+gaxp8JKJThF7AeD6m7dBUpn1nms9zylLwuLynLktmswlpEtOaEsr3E9abg5GYntM2PV2nNLzC09I7io68OBD4EeOx9kQ8GKybBT7mVc5qucaXIRfnFzRNy/FQkYuKttPTUb1hSeq6oaobpNDwjaqq6TrFbn8grwuQWj0k8H38IOCY586yDKXwECxmc46Hg55KIzju9tRlhUKw2x04bHeMx2NeXF7poIOgKStEqBBRjOgVdVFy95smMk8mE+5v7tgf9lxeX1JXLZIaz9fleOAFdH3HzCiw9m3Lzf2NaVs0JGnM337+kcV8wYvLFzw86omrBuzGvPvtA/tjgRdEBFFCpwS7/YFdXqCCgN4057fHnKJomZ9NKcqa7f6AFIqzxZymqdjsNnqwMM6QCNq2o29rjvstTdORxjF9B/vtgThN6Ht4eFxqvUDp4YcR2+MG4ZUEQYQvfHLja1rlejoc+hF0IISEXlHmT59daR4TxzF5fmBypqfNURpzNDCa68UlYRgTBLoaaNqarmgo6wNBEPDbL2/xOwh6j3KXk6+PWpG26Hn47Y4kPFAWW/78pz8gpeTl9RU///wTkzMtY1SUNe8/vNdUKQPY/WQwO+2Bfe73pz2zj4LdJ373qezp+dG7oGQpTUp9GrLx8WFxYM//nv1ug5nFwQn1MU5O/+5j/bIhzONTZfjwPdWqwRN6WtTJnrqtoEWXucBus9G9hesXWie+qtnv9uzVTgNje+P/WVYcDjs+vIeyzGnamixLBpleT1U1jjdXVTVx3bHZHNjsjtRNT7k+cP+4oWlqojDgbDbl7m6JlIIo9MnS0r2fw2HP43IDvSBLRkxnyikz7Pd7OmVYDKMGhe6rVVXF3ozJlVII40NpgbCHwx4jpIzv6z4gCKIodDpvRVHRtr37XIq8oKUhjDStKE4TJLr8aJoGKQR1U3HY7zkejtSmRIujCIHAjyIW83MKg72yGY3t+1kGhTWUARxtR5fyPruNLulmX0/p+44iz/ltt+dh9UAcx7z+4x94eHzAX/mmxNKlmh2UCCMcWpalJop3Wm0iThOEJ9kdtNN7Xhbsjwc6pak+ZV3TKQiigN0hZ7PfEccR89mEOAlothV53tI0W3xfsjjTDAoPaeSlNNxjOjnT16muqVsz8AIQ2v91fr5gMp4wG09oq5q721s2yxWhHzCeTqiqBoFkMp5otydh+8Xes4pLCJ2pRXHohiMoQSAD6qJmt86p6oK+7/B9rTQShB7v3741n2WICAVN3dPVHXTQq57fHt8biMnBKB3/xN3dHevdVrt8+Vr2ars/Ig5f0DP7XCD60gIelmme90TE/nv9t88FO6WGjABx8pxPB8mnDM4fVsBPva7Bv/U5P2ceuNend8HsNHDb8vhTfx9g+FP7OIm+gQJPlzcvL684OztjOpkihaAz9LDAQB+kF9B0jQGn1pRVwXQ64+JiwWQ6Ic8PBgPUkOcrR+1ompZeab7ibH5B2zXGjOLI5eUrzudzuq5B9Z2WXQkD02TX5cZ0OtHlYA+jbEIY675QVReUlZZdieKYssxR6Elr29YUxZG6qU3fSdE0FduqRkupd0CPkBDHIZ4v6NrWGH6YkXteGhmaGD8M6FQHnsL3PZIoNhlATpHnRGFIaiR2rFilbYJb8cHD4ehQ7Zb8bMtTO8WzgVgpLZFjBQ37vmdxvkAIHIQB9P202+/YbDbMzrRuv1Wu0G0UPViwtDRtn6cngw+PD1RNQ9M2WsRTCDf9rKqKttMlpe2PKSXNICFHtQrf8wzbQprpsa4glHpi29Ap95rz+Zyryxc0bUte6lLY9338MHCVRBzH1I3ub6lW36dZliHNOYF08JU0zSgrDWmZzXRpbsnjlrMJmrA/Ho/pmt5Qykr2+y1llZvfe0beSL//JEkIg8QNUuz5SymZzSZmI9BQHI349zke91RVQZxGVHWhxRsGxxdVM4YTw2eLdvCz02P4mOHzP5VZnTbYbaB6VuaiXHamnyM/+Xy7+0r5GUiI+XKlYqf7Z6fTUCE+PofTyeiXjjiK6XkaoweeT5REZGlKEidOGnu72ejFgOaohYH2cNQZok8YhUCCHwSMxwmLxTmgDNRAummw73tG9aClaRVxklCUJcuV7mF9//33fPPN1/hS8vh4T5alCBRxpA1nlcm4fF8rXBy2B4oiZ7Ves9lqf8og0By8XinyoqTtWtd0thM0bQAMyiiLBkFIHEfoj6tnMhnR9frGTdOYs9kZcZyS5yXacVyboLSqI4gC/MBnZHpfy+XS9akkQpeanm7kV6b3s1gsEFLS/XZDWTWMDfWnN2R+uxFZus+QU2yZB9PplPPFgv1eT1gfHh6YTifMZlMm0wnSlxzzI7/88ovLvgAnS2Qb5ofdESl0k71pWwJw0AeLj7PwBwYNeSm1q/x2uyNNUjw/IwwDqqqmLBt8z+P6+oowDOhaPcmrq9pl/XGckGUj5vO5zqiPB7emHd5SavzearkCegKpKYmz8QShYLPbMh5PORq3q67vzBCrYzRKnR5bb6bjeZ6DUE8uXG1DvtdWhr7vk3qpuU801EOhteXSNEP1kuVy6YJpmo7MlFc+42BaiIu9hlaJZMi8gU8EM+c6dJKhfSqb+lzTfRjITpvpp8OAZwFnEDS/FDTs1HP4QUlpkftPAelTh3p6kZMg+NyJ/TQgfg6mcvrd8wJU1+phg5SEfkQYxHgyoO/h9vaewA8YW6G9JMX3tNdg07SkWUqnejxfmgxVN2u32y2Py0fattI8w06fjzbX9dnt9mx3Obvtjl/fvqcoc/7lX/6Ff/7nfyYIfHbbDS9fviCKIjbrFUHgOxzV4bCnKHI2my33N3fkx5KiLLXlnQHVxnFMa6addnpoWQaW4weQJilZnLoAKYTC8wXT2YiuawkCj9FoxGw2IYpiPF9QFloiCCEpC33Dx5HW0b+6vCSOInLjjr6pN7qBLSRxFNH6PgqQnkeWar20INQLa7fb8f79eweWtdZ0m82G+XzOfr/nxx9/dKqtURSR57qRb1VhNaFZTzmbtmGz3qBQBF7g+I1NUxtGQqkB0tLT5xfH+F1HYgLfztivWUB1XhrHoe7JTayqGvM6IVLqhaqzn5ZkFBsgr6bdRYGGXPjCM+DfmuPx4MyXj/kRhHBqHE/ZXcJ0NiUJIvqm5f7ujiCKuTy/YHFxThDGrAbqHJqStUcILQllhRxBK3wEoS63oyiiKmsDacnIsgnSw2DWjrSd9hl1MJAWl1lrF/dzwjDkw4d3jpO7Xq+xvqfJKKPve/e5AE5SHT5DZxpOHe1CPs2sTgOPDYCnpeXpgj/93WlmpjPDIWD39HUkUoKST/QpHdCsokX70bk/e3+mN6aNesNnelae59F3zbPANrwep5nq6XsAaOoaJcQTTcQQ0i0KexSnzKYZi8VC35hdT1PXCKXldUajEW3fUzcaSJtmmvpU15UpqzqEKA0dSE8INRYsp2u1WON8Puebb//C69ev3W4W+IEmnPc9RZGz25SUBv1+OOyRUpdhZVUhBGSjjDAaTMqkJDQjeLvhxXGsR/X+U5k0SjKyRJcPVa0nr74fEoQeodB+nH4gQfQIoctVzQPFeJqGCANIfnx8pDMLx6q1lHnBZr0mS1Pk4HPZ7/cGI/jkoWCnzEN7siRJ3HR0qHu23++N03bJt6+/cTZuZVmy2axYb9bEaUyvesaGldCh4SBZppHp1qg2SiLddxKCsqqQvodnpuC25G0t4LlpqI0CiA5cvSlFO7RVdUCWxKRJRBxFeJ5w8tNgqidzj9ogY+lfXdeRpKn+HM39bQP648MjvpB46NLy6vyC+WLBZruhrp94nhbd33WtM2Wxw7Eg0CBsP/CcHl9dNYjeGCq3um3hStFAtxp2u52m6+Gj1HPhV7sO7d+tqoqLiwseHh6MwsuaMNZDp8BMou3xRWjGKQ7rUxnMcNE/Pe5jPuPfb+DzLKg9L+mGz3uabto3/pxs3j0LZC4wm1exr2mNfO2Nf9ofOwXMWpDql4KzAuq6xQsCvEDjr8qyBqUIg4AwCEniFCl98mNJfixpTO8mjiKy0YhmuabtW8pKgxvnC61a2jQ1YRgghJ7cNnWH7wujHKBvsrZr6ZViPp9zfn7Bbrfjw4f3SCnwpCA/HpACqqqka2tub29Yr9f0fcf5+cLhuoRCN1k9YQJoTVn29EpnWRcXT36NbddRNz6ep7O0OAyZjEYGCAu+L8lGCUkSmaCly2gpFX4AqYwMod3XPM4+xQ8i2laXeaUBCldlRRJFBCOPqizxfB8ltFKIEIL98cBmtwUkvh8Sxhrxnowy8KTR2ipojcLvaqu9G8ezKUopjmXBbDHHC3zevHnjsG22jO9VT5Jpc97tfuf6ZUmS6CY5ODkiBAMp65zWPLaoSpfN9qrX5XvT0J1UMlIGhEGAkBYFpafb2iNB05wskPWwPyARz9ZBURT0SpHET2oceVm4Pu7j4yNv3r4hwGM2mZDEehOVQrBer2majta0D/Rno3ttVrLaqsUEgecgPbbH2HcK0Qsj912gMVBaKioIAgd96voOKXS/sW46I6wpHKRG0dF2NaNxShQHXF6ds1yv6bqGKNIu7tIX9OoL0Ay7sIfl4vBnfz8oPS9LT79Og8zpoR83lCH6+Pe6h6b/fwrMfd7/GmSYJqB1re419YMAOXyfchAo7TnbG+2UrvWJk3ffBFqUsS50MzqenWkpHIv6zgsC3yczWlGB56N6xbE8goSm0TtfaJRk+x4CP3I9IBtYq6pyWmAA19dXHI9H/tf/+p+MDV7qeDxw2O9A9URhwGym6UFS6GxQN461we1us6cqNJzC9leSJEFISV7kdEppfFsQ0Pc9y9XSuYnbjUFTnHyiyCdJY9I0Qbv15EYKKTG8w9iUy4XLypumohUtaZrpm9qUOV2rDXotadq+f52Z6kW4Wq2Qnk+Sjihr7SCuyfFafsb29qxKhpXlsYRzC6ydTqdOn0wPSAK22y3vPrzTjXJf97gCs7kcDroEruuaUTZ+hpwvy5KmKh3n8un+eVKZETxVOErpIJImqXYfchgzpTmRqkUr/E5IIt3DUmYarAUCGofUt5u1bbLb9Wc12a4Xl0zHY47WCd31Ej3oe3dt4kSXqcfj3vWubCVmKxC32RsxT/3/Hs+3rBnQUvG9C46n60sIQZpqWpTtLSul6WAWWPzixQuCSF/Xoq4cTxNA/L2G9u/H78fvx+/H/w7HF9KM34/fj9+P34//fY7fg9nvx+/H78f/Ecfvwez34/fj9+P/iOP/A2MP1qPnCw9LAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "%matplotlib inline\n", + "# Validate the fine-tuned model\n", + "\n", + "img = mmcv.imread('data/cats_dogs_dataset/training_set/training_set/cats/cat.1.jpg')\n", + "\n", + "model.cfg = cfg\n", + "result = inference_model(model, img)\n", + "\n", + "show_result_pyplot(model, img, result)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "MMClassification_python.ipynb", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.11" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "31475aa888da4c8d844ba99a0b3397f5": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "520112917e0f4844995d418c5041d23a": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "863d2a8cc4074f2e890ba6aea7c54384": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "8a8ab7c27e404459951cffe7a32b8faa": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "9f3f6b72b4d14e2a96b9185331c8081b": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_be55ab36267d4dcab1d83dfaa8540270", + "placeholder": "​", + "style": "IPY_MODEL_863d2a8cc4074f2e890ba6aea7c54384", + "value": "100%" + } + }, + "a275bef3584b49ab9b680b528420d461": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_e310c50e610248dd897fbbf5dd09dd7a", + "max": 14206911, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_31475aa888da4c8d844ba99a0b3397f5", + "value": 14206911 + } + }, + "badf240bbb7d442fbd214e837edbffe2": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_9f3f6b72b4d14e2a96b9185331c8081b", + "IPY_MODEL_a275bef3584b49ab9b680b528420d461", + "IPY_MODEL_c4b2c6914a05497b8d2b691bd6dda6da" + ], + "layout": "IPY_MODEL_520112917e0f4844995d418c5041d23a" + } + }, + "be55ab36267d4dcab1d83dfaa8540270": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c4b2c6914a05497b8d2b691bd6dda6da": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_e1a3dce90c1a4804a9ef0c687a9c0703", + "placeholder": "​", + "style": "IPY_MODEL_8a8ab7c27e404459951cffe7a32b8faa", + "value": " 13.5M/13.5M [00:01<00:00, 9.60MB/s]" + } + }, + "e1a3dce90c1a4804a9ef0c687a9c0703": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "e310c50e610248dd897fbbf5dd09dd7a": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/docs/en/tutorials/MMClassification_tools.ipynb b/docs/en/tutorials/MMClassification_tools.ipynb new file mode 100755 index 0000000..ee87e71 --- /dev/null +++ b/docs/en/tutorials/MMClassification_tools.ipynb @@ -0,0 +1,1249 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "MMClassification_tools.ipynb", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.8" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "XjQxmm04iTx4", + "tags": [] + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4z0JDgisPRr-" + }, + "source": [ + "# MMClassification tools tutorial on Colab\n", + "\n", + "In this tutorial, we will introduce the following content:\n", + "\n", + "* How to install MMCls\n", + "* Prepare data\n", + "* Prepare the config file\n", + "* Train and test model with shell command" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "inm7Ciy5PXrU" + }, + "source": [ + "## Install MMClassification\n", + "\n", + "Before using MMClassification, we need to prepare the environment with the following steps:\n", + "\n", + "1. Install Python, CUDA, C/C++ compiler and git\n", + "2. Install PyTorch (CUDA version)\n", + "3. Install mmcv\n", + "4. Clone mmcls source code from GitHub and install it\n", + "\n", + "Because this tutorial is on Google Colab, and the basic environment has been completed, we can skip the first two steps." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TDOxbcDvPbNk" + }, + "source": [ + "### Check environment" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "c6MbAw10iUJI", + "outputId": "8d3d6b53-c69b-4425-ce0c-bfb8d31ab971" + }, + "source": [ + "%cd /content" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/content\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "4IyFL3MaiYRu", + "outputId": "c46dc718-27de-418b-da17-9d5a717e8424" + }, + "source": [ + "!pwd" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/content\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "DMw7QwvpiiUO", + "outputId": "0d852285-07c4-48d3-e537-4a51dea04d10" + }, + "source": [ + "# Check nvcc version\n", + "!nvcc -V" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "nvcc: NVIDIA (R) Cuda compiler driver\n", + "Copyright (c) 2005-2020 NVIDIA Corporation\n", + "Built on Mon_Oct_12_20:09:46_PDT_2020\n", + "Cuda compilation tools, release 11.1, V11.1.105\n", + "Build cuda_11.1.TC455_06.29190527_0\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "4VIBU7Fain4D", + "outputId": "fb34a7b6-8eda-4180-e706-1bf67d1a6fd4" + }, + "source": [ + "# Check GCC version\n", + "!gcc --version" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0\n", + "Copyright (C) 2017 Free Software Foundation, Inc.\n", + "This is free software; see the source for copying conditions. There is NO\n", + "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n", + "\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "24lDLCqFisZ9", + "outputId": "304ad2f7-a9bb-4441-d25b-09b5516ccd74" + }, + "source": [ + "# Check PyTorch installation\n", + "import torch, torchvision\n", + "print(torch.__version__)\n", + "print(torch.cuda.is_available())" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "1.9.0+cu111\n", + "True\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R2aZNLUwizBs" + }, + "source": [ + "### Install MMCV\n", + "\n", + "MMCV is the basic package of all OpenMMLab packages. We have pre-built wheels on Linux, so we can download and install them directly.\n", + "\n", + "Please pay attention to PyTorch and CUDA versions to match the wheel.\n", + "\n", + "In the above steps, we have checked the version of PyTorch and CUDA, and they are 1.9.0 and 11.1 respectively, so we need to choose the corresponding wheel.\n", + "\n", + "In addition, we can also install the full version of mmcv (mmcv-full). It includes full features and various CUDA ops out of the box, but needs a longer time to build." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "nla40LrLi7oo", + "outputId": "a17d50d6-05b7-45d6-c3fb-6a2507415cf5" + }, + "source": [ + "# Install mmcv\n", + "!pip install mmcv -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.9.0/index.html\n", + "# !pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.9.0/index.html" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Looking in links: https://download.openmmlab.com/mmcv/dist/cu111/torch1.9.0/index.html\n", + "Collecting mmcv\n", + " Downloading mmcv-1.3.15.tar.gz (352 kB)\n", + "\u001b[K |████████████████████████████████| 352 kB 12.8 MB/s \n", + "\u001b[?25hCollecting addict\n", + " Downloading addict-2.4.0-py3-none-any.whl (3.8 kB)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from mmcv) (1.19.5)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from mmcv) (21.0)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.7/dist-packages (from mmcv) (7.1.2)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from mmcv) (3.13)\n", + "Collecting yapf\n", + " Downloading yapf-0.31.0-py2.py3-none-any.whl (185 kB)\n", + "\u001b[K |████████████████████████████████| 185 kB 49.3 MB/s \n", + "\u001b[?25hRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->mmcv) (2.4.7)\n", + "Building wheels for collected packages: mmcv\n", + " Building wheel for mmcv (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for mmcv: filename=mmcv-1.3.15-py2.py3-none-any.whl size=509835 sha256=13b8c5d70c29029916f661f2dc9b773b74a9ea4e0758491a7b5c15c798efaa61\n", + " Stored in directory: /root/.cache/pip/wheels/b2/f4/4e/8f6d2dd2bef6b7eb8c89aa0e5d61acd7bff60aaf3d4d4b29b0\n", + "Successfully built mmcv\n", + "Installing collected packages: yapf, addict, mmcv\n", + "Successfully installed addict-2.4.0 mmcv-1.3.15 yapf-0.31.0\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GDTUrYvXjlRb" + }, + "source": [ + "### Clone and install MMClassification\n", + "\n", + "Next, we clone the latest mmcls repository from GitHub and install it." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Bwme6tWHjl5s", + "outputId": "7e2d54c8-b134-405a-b014-194da1708776" + }, + "source": [ + "# Clone mmcls repository\n", + "!git clone https://github.com/open-mmlab/mmclassification.git\n", + "%cd mmclassification/\n", + "\n", + "# Install MMClassification from source\n", + "!pip install -e . " + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Cloning into 'mmclassification'...\n", + "remote: Enumerating objects: 4152, done.\u001b[K\n", + "remote: Counting objects: 100% (994/994), done.\u001b[K\n", + "remote: Compressing objects: 100% (579/579), done.\u001b[K\n", + "remote: Total 4152 (delta 476), reused 761 (delta 398), pack-reused 3158\u001b[K\n", + "Receiving objects: 100% (4152/4152), 8.21 MiB | 19.02 MiB/s, done.\n", + "Resolving deltas: 100% (2518/2518), done.\n", + "/content/mmclassification\n", + "Obtaining file:///content/mmclassification\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from mmcls==0.16.0) (3.2.2)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from mmcls==0.16.0) (1.19.5)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from mmcls==0.16.0) (21.0)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->mmcls==0.16.0) (0.10.0)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->mmcls==0.16.0) (1.3.2)\n", + "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->mmcls==0.16.0) (2.4.7)\n", + "Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->mmcls==0.16.0) (2.8.2)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from cycler>=0.10->matplotlib->mmcls==0.16.0) (1.15.0)\n", + "Installing collected packages: mmcls\n", + " Running setup.py develop for mmcls\n", + "Successfully installed mmcls-0.16.0\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "hFg_oSG4j3zB", + "outputId": "1cc74bac-f918-4f0e-bf56-9f13447dfce1" + }, + "source": [ + "# Check MMClassification installation\n", + "import mmcls\n", + "print(mmcls.__version__)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "0.16.0\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HCOHRp3iV5Xk" + }, + "source": [ + "## Prepare data" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "XHCHnKb_Qd3P", + "outputId": "35496010-ee57-4e72-af00-2af55dc80f47" + }, + "source": [ + "# Download the dataset (cats & dogs dataset)\n", + "!wget https://www.dropbox.com/s/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip?dl=0 -O cats_dogs_dataset.zip\n", + "!mkdir -p data\n", + "!unzip -q cats_dogs_dataset.zip -d ./data/" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "--2021-10-21 02:47:54-- https://www.dropbox.com/s/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip?dl=0\n", + "Resolving www.dropbox.com (www.dropbox.com)... 162.125.67.18, 2620:100:6020:18::a27d:4012\n", + "Connecting to www.dropbox.com (www.dropbox.com)|162.125.67.18|:443... connected.\n", + "HTTP request sent, awaiting response... 301 Moved Permanently\n", + "Location: /s/raw/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip [following]\n", + "--2021-10-21 02:47:54-- https://www.dropbox.com/s/raw/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip\n", + "Reusing existing connection to www.dropbox.com:443.\n", + "HTTP request sent, awaiting response... 302 Found\n", + "Location: https://uc88da1070f63f9a78ee48b59098.dl.dropboxusercontent.com/cd/0/inline/BYb26ayxWasysNPC1wSer1N9YqdOShCMIBzSIQ5NKaIoKQQ47lxZ3y7DkjKNLrYiSHkA_KgTE47_9jUHaHW79JqDtcSNEAO3unPfo8bPwsxaQUHqo97L_RjsSBhWg4HZStWRbLIJUl5WUOtpETbSQtvD/file# [following]\n", + "--2021-10-21 02:47:54-- https://uc88da1070f63f9a78ee48b59098.dl.dropboxusercontent.com/cd/0/inline/BYb26ayxWasysNPC1wSer1N9YqdOShCMIBzSIQ5NKaIoKQQ47lxZ3y7DkjKNLrYiSHkA_KgTE47_9jUHaHW79JqDtcSNEAO3unPfo8bPwsxaQUHqo97L_RjsSBhWg4HZStWRbLIJUl5WUOtpETbSQtvD/file\n", + "Resolving uc88da1070f63f9a78ee48b59098.dl.dropboxusercontent.com (uc88da1070f63f9a78ee48b59098.dl.dropboxusercontent.com)... 162.125.67.15, 2620:100:6020:15::a27d:400f\n", + "Connecting to uc88da1070f63f9a78ee48b59098.dl.dropboxusercontent.com (uc88da1070f63f9a78ee48b59098.dl.dropboxusercontent.com)|162.125.67.15|:443... connected.\n", + "HTTP request sent, awaiting response... 302 Found\n", + "Location: /cd/0/inline2/BYbEOCLrcXNg9qXvYXbyZZ0cgv3fSQ1vs-iqDCz24_84Fgz_2Z5SkserjAUpmYgty-eQkchAlzxQPbgzayZnie5yCipe42WVTChJJiIQ6m5x7GxgWJOn6_5QP3eRbFuYyrc1yV61BKlYuCJDHH0eyNaN8paR6bjevwMJ7Alip-gvf3c9JfjJmMgZrzcpknENyaI62FSgxFkX-Kc-FS41RYQadnMfUmhZCfMrFDSzTcmRprDiC9hQ-zJkcW_kbjI0whA1ZLQ-OG9-8Qf7jn8qd4g_tQLneL8X44qOUX4hRs2LE23g4n0jz8DeNt8KZ48WhGs8_20rBIgHH0dut3OjHF5DZMI8dVyHFAiJGyxOknZ5aCfImtz6MGgHDwbiipkICxk/file [following]\n", + "--2021-10-21 02:47:55-- https://uc88da1070f63f9a78ee48b59098.dl.dropboxusercontent.com/cd/0/inline2/BYbEOCLrcXNg9qXvYXbyZZ0cgv3fSQ1vs-iqDCz24_84Fgz_2Z5SkserjAUpmYgty-eQkchAlzxQPbgzayZnie5yCipe42WVTChJJiIQ6m5x7GxgWJOn6_5QP3eRbFuYyrc1yV61BKlYuCJDHH0eyNaN8paR6bjevwMJ7Alip-gvf3c9JfjJmMgZrzcpknENyaI62FSgxFkX-Kc-FS41RYQadnMfUmhZCfMrFDSzTcmRprDiC9hQ-zJkcW_kbjI0whA1ZLQ-OG9-8Qf7jn8qd4g_tQLneL8X44qOUX4hRs2LE23g4n0jz8DeNt8KZ48WhGs8_20rBIgHH0dut3OjHF5DZMI8dVyHFAiJGyxOknZ5aCfImtz6MGgHDwbiipkICxk/file\n", + "Reusing existing connection to uc88da1070f63f9a78ee48b59098.dl.dropboxusercontent.com:443.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 228802825 (218M) [application/zip]\n", + "Saving to: ‘cats_dogs_dataset.zip’\n", + "\n", + "cats_dogs_dataset.z 100%[===================>] 218.20M 16.9MB/s in 13s \n", + "\n", + "2021-10-21 02:48:08 (16.9 MB/s) - ‘cats_dogs_dataset.zip’ saved [228802825/228802825]\n", + "\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e4t2P2aTQokX" + }, + "source": [ + "**After downloading and extraction,** we get \"Cats and Dogs Dataset\" and the file structure is as below:\n", + "```\n", + "data/cats_dogs_dataset\n", + "├── classes.txt\n", + "├── test.txt\n", + "├── val.txt\n", + "├── training_set\n", + "│ ├── training_set\n", + "│ │ ├── cats\n", + "│ │ │ ├── cat.1.jpg\n", + "│ │ │ ├── cat.2.jpg\n", + "│ │ │ ├── ...\n", + "│ │ ├── dogs\n", + "│ │ │ ├── dog.2.jpg\n", + "│ │ │ ├── dog.3.jpg\n", + "│ │ │ ├── ...\n", + "├── val_set\n", + "│ ├── val_set\n", + "│ │ ├── cats\n", + "│ │ │ ├── cat.3.jpg\n", + "│ │ │ ├── cat.5.jpg\n", + "│ │ │ ├── ...\n", + "│ │ ├── dogs\n", + "│ │ │ ├── dog.1.jpg\n", + "│ │ │ ├── dog.6.jpg\n", + "│ │ │ ├── ...\n", + "├── test_set\n", + "│ ├── test_set\n", + "│ │ ├── cats\n", + "│ │ │ ├── cat.4001.jpg\n", + "│ │ │ ├── cat.4002.jpg\n", + "│ │ │ ├── ...\n", + "│ │ ├── dogs\n", + "│ │ │ ├── dog.4001.jpg\n", + "│ │ │ ├── dog.4002.jpg\n", + "│ │ │ ├── ...\n", + "```\n", + "\n", + "You can use shell command `tree data/cats_dogs_dataset` to check the structure." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 297 + }, + "id": "46tyHTdtQy_Z", + "outputId": "6124a89e-03eb-4917-a0bf-df6a391eb280" + }, + "source": [ + "# Pick an image and visualize it\n", + "from PIL import Image\n", + "Image.open('data/cats_dogs_dataset/training_set/training_set/cats/cat.1.jpg')" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAASwAAAEYCAIAAABp9FyZAAEAAElEQVR4nJT96ZMkWXIfCKrqe88uv+KOvCuzju7qrmo00A00QZAcoZAznPmwIkP+p/NhZWVnZTAHSBALAmig+u6ursrKMzIyDj/teofqflB3S89qckTWpCTKM8Lc3ezZ0+unP1XF737+KQAgGEQDQCAoIgD04MGDul7f3Nx0fTsalbPZJCa/XC5dOQ4hGGMAoGka55y1tu/7LMu89865zWbz+PHj+Xy+2WxGo1G3rokIAJgZEa3N9L3C4Jwzxngfu64TkbIsq6pqu40xJqXU922MkYiMMSKS56Uh61zubGmtM5QjGkTMDZRl2fd9CKGoSkTx3gthSgkRBCGllFIKnAAAEbGXPM/1q7vOM3OM0ZBLKcWYUkrM/PHH33n58mWe5yklnlW3t7e5M6fHJ7PRaLOY3769zAxNsvz06PDJwwe5ofnV1eNHD2+uru7du3O7aeu6Nha97/7n//l/fvHimQ8dIi4Wi/V6vVk34/H0+voa0Xzy8XdfvXrVtt39+/c//vjjt2/fPn32TZ7nDBKZY4x37997+/btn/zpj9++fftX//E//vCHP1yv1yWS98FaK2i+//3vv3nztutDluWcQMjEwNfXN/cfPHrz5o0IVlVlQvfpp5+mlH72s5+NZ9MY48nZ6Xy5XK1Wn37/e3fu3Hnx6uVvf/vbPoQ7d+6cn5/317ezg8lqteq6phqVV1eXm83q0aNHTdOklFKSlASBrM2yLHM2ny9eiAgB5nleFBWhjZG7kMAUybibTXdxs1yHaMspOtf7aAiMMdZaRERhRHTOOUNd13FKIfQxRokJUay1xpibxTzGKCLWWiJCkizL8jy/vb1JKT169Kiu133fHx8f397etm07nU67rqubdex9nueTycQY5JjeXF4cTmfHx8fG4Pz2drVaTSaTO3furNb1YrHw3mdZllKq6xoRx+NxWZbM7JyLMW42G+dcVVUppc1mlee5916/6/T0tGmaZ8+e3bt3DxHLsgSAvu+dcyIyn8/J8PHxIVFs13Mi/z/+9/9qOrWzcX7n7lEKTQi9QLKICAAg8K2j7/uUEiKKSAih67rEIYRQ94uiKEII0+n06urq5OQkhAAAMUa9iJQSAFRVZa1dr9ciwszGGN36RDal5L1HIGYGABHRaxARXe5vXwrAVoaBmVl2B4CIiE/RGOO9DyGQNYMQhhCICA3q240xiGj0f4jW2rqui6Lquq4sS9/HoiiOjo5fvXo1mUx+/OMf379//2/+5m8ePnz4y9fPfvSjH/3t3/y1M/aPP//8F/+0qKpqeXuTCSBi0zTZeDSbzbquy/P86uqqmB5WVeUyU9dS1/XV1VVMvqqqs7Oz5XK5XC6rajydTlerze3tbUppPB4bY4qi0KUmIu9749xqtdKlOz8/J6I/+7M/CyE450Lb6foj4dOnT7OsCCGEEIu8stYSwsHBASBba5m5qoo337yq6/WdO3em0/Hd+3frun79+uXZ2Vni8PTpVwcH0zx3JydHTdN88+zrsspzgL7vjTHT6bQo87atQ+ibpgkhWGuzLBPBGFIIqWkakM7lGcfEzMzsvUdIPiYfOJ9UQGSMIWsNCxElkARCAqr4AACFRcR7j8LMzCnF6FNKFinLbFEUWZYdnZ40TdN1XQghhOBD1/e9bjYVifV6HUIYjUZElBeubVuBlGWZI6OLEH3o+x4AyrIUkfl8EUI4Pj4moouLCxa01qrCreu6russy0RkPB7Xda17O8syIkopxRiZue97773esu7YsixVjJ1zRDRsY2stS4wxWgPWWmt1JwMiJh9i4pQSS6Rhl8v7h/ceEVVy9r9YHwYijkYjABiNRnoPw14XEbWBjx8/zvNcf6NXP0ia3ttwV2ru9CnuiyUi6tbUW8L3D/3GuDtCivsHMydhERARRgAAIkLEBNLH4FNcbtYhJUHMigKtuV0uNm2zbuou+Murq+9//rnNsuvb2zzPz8/PP//88+l0aoxxzumNq6LRVTo6Ouq67ujoaL1e3717dzwep5SyLBuNRrPZjJlXq9VqtXLO6T7I81y1qd71crns+17dAedc13UoYoy5vLw8OTl5/vz5D37wg/Ozs6PDwzzPmVkfZmbp5cuXKSUU0MtAxLars8xJ4uOTQ/VN7t69+0//9E+67Z49e6ZKJ8Z4cHDw4ZMnP//5z5tNrdvx0f0Hq/miqde31zd92xhCjoEADdJ6ueqaNoVokMo8K/LMEkYfurZGREEQER9DCMHHwMyM0HVt27ad72OMMaXOt13X6T3GGL33IXh1Urz3XddlWWYt7R7rdqsw87Nnz54/f/7ixYu3b99u6lVKyVqbZS7LsizLdFPpNiADRJQ4OOemo/F0Oi2KAoD188fjMQC0bRtDyLJMDd1yuby+vo4xZllmjCGi8Xh8dnZ2584dAPDe13UdY9RtrIrDWqsbTLdTCAERp9Op6ik9Qfe8un4JpO9DCMHaLM9LvVRh8N7HGEUQwZCezcxpdwwibq0djUZlWaoa0C/QbeScCyGMx2MRybJMnQoR6ft+vV5fXV0h4ocffnh6ejoajVSRqIx1Xafr+L6PalWwU0qDJRzEbBBLRCQilVh98U4g3700QoiI+pCstcZYEBSGFDlF7r0XgN57Y20fvMszm7nRZFxUZe/94dFRluf/5e//7uPvfPKTP/9n63pzfHz8/Pnzf/2v//WDBw9+9atf6dqpsiyKIqWkznnTNKenp9PpVNVn0zSbzeby8lK90BDCb37zG2vt2dlZ3/d1XRdFoU+rbdurq6vlcpmEp9Pp0dGRcw4AiqLo2857//vf/q5erfURTMbjsiy7rh3WIYTgMiPCSGJAdspbPnz85OT0qOub2XTcdvXzF98kDin0PnQI7H2XGTo7OyGU2/n1Yn5jCI4OZ02z6ft+uVyq8VksFl3XuMxYR71v62a92azUThZFUZalc86nkFIMKaaUYkqIaDNXlCUQCoIxJitcVhY2z4xzLs8QkZm93+p0IrKWdD/oHTnnNMbRM+/cuXN0dKSO1aCRdds4ZxCxKAqXGR+6EEJd11VejMtK/UlriQCNMS6zR7ODvu/bth6NRqPRqGmaul7nuauqSpcxxjgej+/fv3/37t3pdHp9fb1er9u2TSk553RzWmuLotDtp0ZPhVDV7mCo9F7UzAij97Hvg6AGVsAhMbP3UZIYpNwVpHel4qcirnKoFl9XRKVFhTDP89VqhYjz+Xwymcznc0TUB6OGmJnrum6aRgVYTZBzTkVCpUtvYJCbQa50lQch3JdD3h3f0hr6sIczhRCAEI2xmTHGGAN7QktEYMgVeQKZHh4kEJtnPsWsKNQe5lXJCJfXV6t644ocDLVtu1gs1Dm/vr6u63qz2YiIOkjOOdU7RHRwcPDJJ58Q0ZMnTz755JOyLL/88stvvvkGAL773e/OZjO9AFVn6l8YY5yzIty2zXg8un//3tnZ6cHBjAhR+PDo4JunX2eZ+9//978cFcXLZ8/KLJtOp23b6uecHh2v18uUUgpxvV7H5HNrEofet5PJ+N6dO9Px+M2b13/ywz/65puvr64u//zPf3JyeHB5eVGV+Xqz/Onf/5c//qPPLSGBfP797/3iZ19Yg9aStSTAXdfU9TrGOBqNzs/PsyyLMa5Wq+Vy7n3nMluWeVnmiCiEYBAIhRhIyFrnXDmqqqoqRuVkNj04OJhOp7OD6eHxgbWGCNWl6vs+hF631mJ5u9lsvPcAbK3VHQIAR0dHBwcHk+moKDPdIQCit8/MbdsmDjHGuq5VIZZlqVYhRh99UIMxLisAcM6URWGtTSl436FAmRcff/zxeDxWpVMUxWQySSldXl7e3Nzoe3UXqQEnIjUY+vtB8FRpMrPamGG3ExGA+rEphMBJhJETxMgxcIoCQIho33dHh+BQ+r4vikz3SkoJgMkYRHRZpjAMEZ2cnCyXyyzLxuNxjHEw6HVdLxYL9bJ0rVXwjDEpSQhBRPKsGELBwdskIpb0LXdUzwkhgKAxKaWESAxRBEXEUhRJzFFEEggyi0hiFo0JrUE0REQWt2qCpe07RCRryqpq2lZEYpLFallVo7quRaQoy//rr/7qiy++QKLb29vvfOc7f//3f99s6ocPH8auXc9vl32neuqDDx719abv+7OzMyL66KOPlq2/d+/exZtXb968bttWndLxePxv/s2/+cd//Mem3pycnIzHY2a4vVmUZbl14Jv65OREg73cZcvlEgBIoCzL4L3v+8s3bzil9WpV2swgoUDXdbPZbLFYdFknkDarFRG5zPjer7vm9cXLsiwn01Gql865er3Jsmw+nxPR2fGJc261Wi2Xy6urq+l4TACb1SqFUGU5JHCZTSm1bcucEocYXVHMRqNqs9m0bde2YG2mqJgxxpmMUiINNwQCJ0keAGKSNvVNkN5zx+LFJiRBHFRvSomjD4GMMYaAmSUxAItYA5g46Mbb1Ku2bdu2Vf1rDDkqHRm2FCO1bZ1lGSQGxEk1MiDq76UUuqbt+56IMmvAmKbZHB4cENF8fpNCrKoKcthsNhoX6Is8zxFxtVpdXl4y83Q6HZS7ao0YI2AkA9aRCIfYAwBLBOTRuGyaJnFIHIwxxiKSLas8Nd6iYW77nr2DIfISZiGQJAn4nQnat0tqZwerqLpqeL96I0+ePDk+Pp5Op6PR6IMPPhAR3U+j0aiqqsVicXt7KyJnZ2dFUXjv+77Xc4hoELwhStRYVj/hW+7oPlTzh2Hh8JOR9YUAIKL33qeYkjADoDHkrMmczV2eXd/ekDVN137w5HHne5u5pmur8ajre2MtGVONR19+9fvJbDo7PCiKQt3OqqpGo9Gf//mfP3z4UL0mZn78+PHx8XGM8fT09MWLFymlm5ubzWYzn8/VCy2KYrFYfPHFF8fHx/pPdXjOz8+n0ykR5WUxnk4Wi0XbtqvV6sWLF8wceq+e6t2z83q1PpzOXnzz7PToeDVfKKwXY4g+1HVdliVymk6nHzx+GPo+BC+QMkvPn399c3PVdc3sYHzx5uXJ6WGemf/8N/+xblZHx7PQN5NR8dGTR7/77a+Cbwn59avnTx4/9H3TtQ0Bhr7zfZtnVlK6vb5ZLm4NQe6yIssIxfdtW29C7xHEWLSOjLVElERSSiHGEEJdr9fr9XK5nK/m8/n8dn59e3t7u1wMDpuxuIsPO0U1jNmhgH2j/vxqtQKALMum0+l0Os3zTLXDpl4horWkf82yDFGcM9bavm36tvFdrxGawW1smTnHzE2z8V2v77KOBNLbt2/X67VzTjezWjPd5JPJJMuyrus05NZ4SiN5NbaDX9Z1nbqsek7btgMU5JxzRW7IKXZojHMuR0ZDzhpjAFFkcNNokMB9+6PH4DFaa7uuUxX44MEDNd/OuTt37uhbFJO01up1VFX1ve99786dOxrOqgevTrmKlt6DSvtgAP/wUIR6ODRm0AMM6X/qNuDO+UyAaipZUASZgRlEsKiqdV0XVeVj/Og7n9g8Ozo9sXk2OZjdLhfj2TQri6vbm1W9+dN/9hO05uTk5NWrV9/5znfOz88vLi5++MMfnpyc6FV57w8PD4uiUP/wt7/97cuXL//xH//x9va2KIrZbKaPqmma29vb3/72t4vFYjabVVUFAOPxWCMTl5lqVLRd3fXNerO8ur5EEmMRgcsia5vN4cF0ubh1lgyBcJzP5xqBiPBmtT44nOaFOz06/MlP/jQvXAy9c/b09CT6vl4v1svbm5urosgeP35EBASSO/v0q99H3x/OpkXmzk+OFzfX7Wb9wYP7yffL25sQeiIQSEQwHo+so029WixuQ+jJQF44a01KofdtjB4GvBoSAACKSNI9swvIt3uJmVmiCLddHZMnAxrpEMHOSw9bwCb2KqXOuTzPVZdZa8qyqKoqz3PryFrbdY2i9+r3xRjbtl0ul7rXU0pkMLPbXAincHBwsFotrt9eEdF4UnH0vu0Kl63Xa0Q8OTm5d+/eeDweHDcAUIezaRqVKI3xUkoqhAPerkZF10FxpqZpFNExxhhnsywja3ST6+5VxzAz1lpryWzFTz9XDaC6ixqeWWvH43Ge53pxAKB46Xw+1/iwqqrXr1+/ePFCjaR+AgAcHx//5je/2Ww2RVE45/RD6rper9d93zdNo8pG10ghx6Io9BtVC2RZpiC1Yll6q3onapb1nxpDF0VRVVVZVU3TZFmWlZVzzthMAej5fEHGhpgOj45Diud375zdOe98/3/8X//ng0cPL6/edn0vAOWoCilOZtO79++9fP1qNBk/evzBZ599Nh6Pf/Ob30wmkxcvXnzzzTfq1Olz+sUvfqH48KtXr87Pz3//+99/9NFHZVl++umnZ2dnuhSImOf5V199dXx8/Mtf/hIATk5OVqvVJ598olFQ3/ez2Wy9XnddN5lM1DXS9wLAcrlURHS5XFprD2cH6+UKRRWT2azWfdt99fWX8+ubTz75KMssp74s86LIvv76933fssS2q1+9fhGTn87GL14+G0+q3rdX15fXN2/bzVpiiH3XNzUJHx/M2ratmzUixhgXiwURHR0dKDaY0hb922LxBgZrYMlYS5mxItJ3zaZeT6fTssrzLBuX1fHJ0Z07Z4eHh7l1VVXps3PO5blTxA6AQwjWUVFuN7e1xhgS4aoo+rZt64Zjij6AJGcssBAgsFRF3tZN9CF3WdvU41FlLLEkH3oOERGJIEXfNM3Fxaury7cicnJ6NCrK29vb+Xyu9uPo6EizoLe3t8+ePVPtUJbl8+fPnXP3799X4+acSykVRQbAKQVEEUld1zTNxvuu79s8dyH08/lNCH1KIUY/m02Yeb1eA7CGuCIphP7wcDYZV+PxuMwLYwwNDqe8n6AbPEZFhBQUUo9xOp2GEN68eXNxcaEJrsvLS5UuPV/zM+pfPX369ObmRmGMIRuxb131ngeveECi1B/eoRdOA9+iKPq+L4qirmt9Wl3X9n2fhPvgF8vbLMu64K21LitijHXdANJ0dti2PaLp+/CTn/zk5ORkvV5XVaUZBSIajUbX19eHh4eLxaJpGiIqiuJv//Zv792790d/9EfMfHNzs1gsUkp/+Zd/qeqQiMqyfP36dYzx+Ph4sVhsNpu+71erVQjh6dOnimDleX5wcFCWZdd1mqW4uLjQ/G9d18457/2ACetT2OpXQGCRxJIYWHTbWTIaKRFRkeW5y1KKIXiO6eunv0+hn89v8txNJ2VV5p9+7zsHh9OUYowhxpBSJAACkJRSCNH7FAJw4hii70PfpeCB08F07AhD16bgncFxWYyrYlTmKAklISfgCJwgReSEkjgGjoFDhMQAkFlbuKxwWb1ZJR+MMWWRFVnujEUBBN45WSmloNspy22e58agKmUVv8E7G2DzEPvEYReA6OfwLkoRpG1g0tVN6HqJiZlFEiE658oy36zWVVWdnh1PR+Msy0ajMsts1zUhhNlsNh6P5/P5L3/5y+l0ul6vDw4OFK3RdMAQWCkmR0RqG3Qn617VZ0dEapk0mdQ0jXPGWHSZzXNnLaWUhKOztm3btt40m3W9WtKQlhjQkSESVbOjO7Isy0G01Ka9ePHi9evXCl71fT9cxAByKtz6/PlzTdnroQs9JItUzDTqHdB/VbT6NFTX6mld16niVJtZluVoNGq6thyVWZbNZjMRKUYVACmQBYAhxjwv79+/H0JC6y6vb44ODo8PjzimB/furxbL3GUoIMxnp6dd007Hk+jDqxcv7925+4uf/fzDx0/UNKl38fjx46dPn+olaf7m+vo6hPD48WO1FXme39zcvHnzRm9EobajoyOVHMXfXr9+rVn7V69eGWM630YOAIpJJJFEBJk1qm45eP1PYkBOFoEAUMQ5d3BwMJvNcpdZa0ej6sWL519//dXHHz55ffHq9cWrH/7x5yAxc+QyQwYA2RAYi2SAJYa+TaFHSQRCIChMIJk1ZZ4hCiICSgh9Xa839SqEYC2Nx1VVVdYRAHvfNW29Xq+Xy7mkFL3vu8b3vXAkAINkkKIPwGxAJHHsve86jh5YmCNzHJQsIA/wIzOL8ADux+RZorUWkGPyQ8CieQqVQGYW4P19q/Kw3cJbdQm5y4iorHIDeH19/ebNa9/3usOIqGmau3fv/sM//IPqSn2UxpjDw8MQgtJllOalSXl1d9VfVauufqb6hppn0qxy3/dZZgUZgF1mjaXEIQTPHB0hGQRg4bgVwn2kZIgJY4xd130LUGmaRhHh29vbm5sbvRoF69WHUYd20O6avdAIFXYZc13NQc4HRYCIm81G8564lz9UKUVEBWY1Lq3rWgHihw8fZpl7/Pjx4fGxc246nW7qtu/7LM+Losrz8vT0vKjGs+nhcrH+zW9+o67vvXv3UkqvXr2azWZ1XSuGmef5d7/7Xc2YP3r06OnTp4vFQnXe27dvz87OTk9P27a11qqKQcTb29uDg4O7d++KSFVVRVG8fPny/Px8Pp+rlFZVNZ1O9QllWdb3/cXFRdd1mr1Q/bKfZbFksiwDAEgcYxRmYU4xgojR3AVHg5LlNi+cdZQ5MxqXhHJ9/db7DlJcr5fX129fv37ZdU1RZFlmM2s092AMIqcYve/bGHqWSMAEbFAyS6M829RrlpRl1lrbNM3bt29vb68VnFQIepcqiokjS0IBSZxCTMGLjxxTip6jz52xRBxiW9eL+c1yfts1LSRWy7/NMO0g0BB67733vUqmYjPqPaEkSIwsJGCRLAIkTqFXNS8pShxOYORkLWWZzTNLiIr6pBBFJMstsrRtu1ot6s1Gcwm6/k3TXF9f931/fn6uN6jpQWVfXlxcPHr0SDfAkydPUgptW282q75vEUUZkESwg+gTEThnjEHm2PctOWKOPnTMUdWLD13bbVyGuTWZM84Z2reBg/iptCgSpYGpopcas+nmUxF9/fr17e2tAkqDN6Uepoh47zUS1bSP7NKsKpYauw/uqMZ+aiSVQaIGTe07Ik6n077vJ5PJcrmsqmo+n6uLeHx8vKo3eVmWZbnZbE5OT0XE+1CWozzP27btfDw+Pn748IOTk7OLV68OZzMCCH1fuOzy9cX3v/tpmeVvXr3+3ne+29XNw3v3/+xHP754+eqjx0/+z7/839+8eXP37t2yLH/5y1/e3t5+97vfnc/neZ5r0uz09FRd8fPzc9XB0+l0uVxqQL9arbqu22w29+7dIyIlH45Go4uLCyLSuPpb6m/AyQqXGWMMbQEni2QAM2N1GUMIoetVB6nWK8vy5Pj4//u3//nxk0enx4f/7//X/7Oqiq5rUgocg+4PADYoZMAQeN8l33PwMfQx9MF3vm+7trZkJDEKVGV+eDCdjCoQburNzfXVYn7bbNYpemuwKsrpeDKbTDNrCmty6xwSsEBMEhPHxDFJYkicQuA+BO+TD7zTXMbgAMkwb32iAZBPvHWLBiWle8aYrW3w3ouoalJ2VFRASFFQ986HikqpA2SLFGME4MODg5OTk7IsU4p93y2Xy/F4/Pd///d/8id/ogp3vV4fHh6ORiNjjKbEx+OxhkWz2Uz5cX3fa/6jKArl0zRNo3jMZrNZr9dqS7quMxYBJKXA4q2DIndE0Pdt7zsfuph64R13VB//gMfssgUppcSSiIAIALd025TSeDy21k4mk/V6rRen1qksy4ODA2WNrlarGCNmOHgIuOPEqEwOmY99yVe4YrFYNM1GBVt3YUrddFKuVpuiKEKIjx49IrLe+zos1019fX395s2bpmnmy8VHWVZV1XK1NsYgmtvFyl1cHB+fnp+fA8DbN78rsnyzWndNm1kXen/3/M6ds/Pr6+u2ae6cn//i5z8/P7t7dnL6i5/93JL52c9+dufOnclkotv90aNHX//ut6EzKlH3799/8fTr29vbs5PjLMvatu2bPqX0+9///uOPP95sNnnuXr16dXh4qNB23/cnJ7OLi4vRaKJ2nnbMISAgwMFByLIMAaJzFqlwmbr6lkxZZoMHgbx1zrqmjpyOjo7unt9Rnsfx8eHRwfTNmzdNu2XbEpElEiKDRFlGAsYYQjTGAAsKhN4H8LPZtG3brm8Oq8N79+50XfPm8kKNkjHGbqM1gyTqP1siY50lI1HUxAEZFOPbLoJDkCLLghj2sWNIwYNRdU8iggCIAkAxBjIIrNe5TVYrQKraX3dgSluUVcVywCQHDHbA22XHQ1a7YkBZ/q1qAQBAhMzYLMvmu/S6eqF1XaszCQBv375FxPPz89VqxcwPHjy4vb2dzWa6jXU/a3ilnBtVqcqXUt2R5zlzsg5ALKRABGWZIzuQFPoOLSBHRKFB++qLAZgZMgHqDQ8KW/Njmp0fjUaKT04mk4GHpXmL6XSqkOY2Zef9AMmoXA3wxi7Pg+rgTafT8Xisfp0ybJQ+PgikgoeHh4f379+fTCaj0ej169dEdHFxoV7TYrHQ98YYwRAiXl/fImII4YPHTxDx+vq667r1eq3a9+uvvy7L8vz8/He/+50CZUpMf/v27dHR0fPnzxeLhTHm6OjIe399fX16eppS0rsry1KBzaurq4ODA72A8Xj8q1/96t69ez/84Q+rqmLmZ8+e6bPR3aO+9MHBQYxR3csQ+hi9ujQx+tC3gKzhk+71FH3wXYg9wZb2oKsxLqsss+o7LJfLzz///uXlxYsXz773ve9tF00iICMJS4zRh9CrXySQhKNIMghZZrPMWgLgqMrxnXkB9W4kz12e51nunDOIol7cer1u6ybGaACJiABRwAA65wgQZWsVt+moFHkXCirYrlDC4IKJvMtwqBnIc6ffrn/VN+6CoyGdtkUitgQpgF3A6bfMctmGPGqylsvl7e0NJFbc3lo7n88//vjjL7/8Urfuo0ePbm5u6rpWL0xdQt2lr1+/HtIn+gjirsJDYYI8z/W1ZjJGo1Fdr40xZZkLQggeCVxmicAYdNZkWVbm7r+ZIRygywEgUeHUr8mybLPZxBjX67X3frVaDUkVBQkHVTF4XCrVGsUOf8rzXNOGKjZE9PTp04uLi9VqS9VVudUiKb2qt2/f6jemlBaLxQBsKmxTluXNzY3q0YH+qr7N9c1tlmVv31w+/+aZJVNkue/7tmn+y9/+re/6zWqdQnz14uX/8G//+8l4/PLFiz/70z99+eLFwcGBItQi8uzZs7/5m79R1RO3Kem6LMumaV69ejUajbqu+/DDD8/Ozl6+fDmfzx8/frxcLu/cuaPMJg2PVe8qPNu2rZq+fXhsx2/eHvpdmsno+36z2SinVF0yDd5ijFVVdV33/Pnz8/Pzo6OjL7/8crPZqE4cXKZ3JlREKwbatt1ua9waYV1YY4z3/vLy8uXLl8vlUlXk4M4MWhsAmvXGt12MUdKOSAhgCcejkTNWVU+7qX3bcYiay+77XsOcATMfLNiA1WvegraS/c5BUIB0gFDxD+jEg2cxwA0q8FdXV87aw8PDqqo0A6EKHXcpQe/9YrH4i7/4C2vts2fPLi4u7t27Z629vb1VRsrt7a3m2BSe2HeMrbUDCUF/PyiL9WYJwM455qgR1iBuxqB15JwhMVkQamPsUoiYxAlbjuSpgOqgooK8BDboRRabdr5ummYlEvq+nc0mGpuqsW7bNkWxJntz8fbe3YfrVdPUPYKtu5SVU3JV6yWKZbStT00f123XxXS7Wv/5v/wX1XSSEPoUPafFKgiVqzoKlU0PJhv1EUfT4/HsKAh5Rsoyz/Ly8uLt7Q1YO6tm/+Zf/ZsMs9RHi1lh8na9aTZ1ledXby6OpiNH8cGd48cPzn/9i39Y3l7c//BhE/2ybT2Qq6bgRid3PnpzU883sfZ47/GHXz57Fk367/6Hf/XXf/9Xk9MiXne0kcrOTk4fZrOztbjrjt3ouKyOTmYn9fUybVqHPJ1k8/XbYmp/9et/StyenR39zd/8ddNsfvSjH725eHtyfJZn5aiajMfjEPqiyMbjIsR6dlCaLhYJ8wjY+LhusAuZYIamcnmZ5dPR2DnXtm1iNtb23i831ybj6awwNq3W14vltaR+XGXs20mZZYSxaTAEJ2KZnYgVxMjiI4SkJREi0ocwnk5NniWEgOKB6xRaSdFS4l4gWEshhOVyzQmLfMTJ5NnYmlyYmJXDxGQYKdE485br1NXcBytSuGig4fD87cVNvWlBAoInis5GlwUwI8s5+ALDNMdRBlYCpd5RwuQJIgGjJOaYUlBvxSdfjArKsIstYDKGUvIxdCCJJBpIBsRwgNClrvb1qmtDDGLAVcUkyyqJEtoUfTo5OkXG2MfC5IXJmk2zXqySj8eZbS4v4vzmBx9+8IMPP7h5+c2r3//mbDbC2D7/6jeWosFAGA4Pqq5dFjm2c5hmp7P8rJ1zO+fj0b2HZx9O8+OSJqnB1CB6wy1QsOjN+ro+KU6y4Py8cR6PyknOGNdNJuBAJETugwS2A2VMHVy1IUR0e3ur8WiMsSzLoihU38TYISLAFkce3Nc8zxG2/rEa4sGNfN/f2Lq1CqLc3Ny8evXq/v37P/jBD7744ouvv/7amhIARqNR33eIqKFmCOHg4ECkSUkyl2/LF3xKKV1dXX322We///3v9bSu6+7du/f02YuBF/bJJ588f/ZSWazffPPNarUqyxKA6roGgNlsVlWFiKzX6yyzbduen59+9PHj29vru3fvapS7XC6b6GcnR6enp/Pb67quHz9+3Ly90uWKMcQUWZBRdB1ub2/v3bv3u9/97ptvvlkul8aYuq5VTW6x0B1BbyC+D5CVrs8+WqZRB+8IGQMdAnaFY/qZqvXVYvCW7gsiYmkbTQwxhT4LDcsHaFofunNOJO7jc8O71BPZ2aitdTLGgGEAQAEEQjSIhGCI7EcffXSzap1P0+MiUN4LMJksL5//7p9gR8dXbE+vX9NO+5tEz9EsubFb+5ZSMgadtYPjoC7sYELRGBCWrX8RgBkAGMUYs80qEjKIOmtE1PZdSNFm7ujoqKjK1Wq1rmtF4Bnh5OSERbZMtOBDiqFfbWpnjMkLhwgCse+7ELo8dyGEmDoyGSKyBBZBsogGjLHGQbJExCAAQGQSgzAjRBHZAjODd7vLsbxb/WF/6J/UNVVGGO6x24hIQzje5ihdlmVN02h+dvAThvM15z6dTp8/f350dHR0dNT3fZ7nIEbLL66uLvXzFa/fppUAlElDZBEiIq1WN2/fvh3SNVojOx6Pl8vl4eHR119//fjx46Zpnj9/PhqN3r5969lnWUZkEXpni5RSXdd3794PIfR9cXt7O5tNHj38YLmc3717d7Vaee99DHXoJ0cHZVm+DWE+nz84Poq5HYprYgwCQM4YY8hkSjguiuKnP/3pv/t3/+7ly5dd16l2U19F10oXITdGwzCidxUtvKtiGVZVAa2UEjrUDbTrS+B1M/GuUlbPHNBsZ+ywWYf119M0IlA3VU/Q/Bj8QR2Zao0BAJe9hPCe2LBBQdziS5vN5vXry8v5WrLCk1s2XUI7Gk8fnZ0MW04dRfW9YVfXpqpBP1m5oAMwkbYLYlRiCWSLJsp+ols0fbiVSRHljTNICGn7+YJtUMl3LDiZHliXL5bruFiGEIzLsqK8Xa271h8cHgPAYrUUkbIcgbHzNzd1tymKLK8sInahafyq67p8lPt1E2IkAUMUJQBLgozQEJIxVtiBoRQ5cBIN01iAhVOyWhk5RCOqQTUStdaqa6vPVTl1k0kJu+rgfb3lvdfiWY0lFIQYlPc+8DNsCAV8r6+vLy8v27Z9+vTpyclJvfFaLFtV1XK5HI8rxRWbpjHGGrPVx0WRgVDf+9PT01//+tdKBEPEk5OTb7755t6DR+v1ej6fHx4efvHFF/P54t69e7//4vc//vGPw3rjW49opqMxkX3+/OXyZvnR4yeTquybenk7f2Xo8vVFmZUWLYqs1+vJbCrONE1jcqsG/M2bN2OirUEgIiEAJgJj3HrTKOnx7Ozspz/96TfffFNVlYhoQkLdBNxh0SIylBUMcRHs2hToybQrXdumUolUCDWEVuMwRE2D/Mgu9k4hiAiIbEHHnVfSd53G/EPFatu2XddV5RgRQQB3ZWXMggASE4hovg6UzSOJkRModiPILGCMSSAIQOf3HyRy2Xi16tO6iz1DBIMkX3311QBAyC4OFJGDg4Mh4Bx0BO5VvRljtP59wEJpKAZIMmgHRAEQFXNEERJNhrR9TwAMeZ7nQEaQ0JDN8rprZ7OZyTOtUytHo7wsg/Cde3cjp01Ti8h8tayqajYeReG8IsaeEfMiF+G2W3ddl1KoxidoBCSSY2MpQWJIUQxGQyTWIBExQxd870NIHJPWuKaU0rtSpm8dbdsOyk+XRsml+td9G4i4LTVC2C7K27dvNbi31qb4zhHaX2U1fUqUcc5tdvnTtm1VkouiePPmzWQy0hJYpdID0Gq5Xq/X1mYx8nq9/vjJ9xSeKctSAIeLF5G/+Iu/+Puf/gMzf/rpp0+fPj06Onr58uVkWoTQp+Q9hXv3Hty9e/err756+fIlACCJiFxdXf3DP/zD5z/4bL1et02vd0E2C6AsmfL27eXr1+nju3eVTumIDCBuZVJ+9KMf/fKXv1Sq0Gefffazn/3sRz/60d/93d9pC5NBcoYQQP5rbEG1ErqxhjTGcAoIg4BwEhAQJgRD6KzZuSrvaboYw/DI9qtljCHlsgFIWZZa5qdyKvLeT70MVcGD+hiEB40dPh9xdy8sX331e6E8pbRY3C7qPtkMbVbX4Xg82jv/3aG0Kg15UkoxBACQZIuigF1hgKokDRcRty1qmFlSGKIqJ1vpFQOAuNOTGAMzAHJCTgmBMsfMEaT1fRY8WDM7PDR51jQNgwjCJx8+eXnxet3UzJyYBaBp26vr60nlUkpJApADkQSeMZDDBN5kAMaQBTRinGGJfWwFMhRGJGROzD6ktvNtn+bLlSTPKcQY7a7MXoabBAC1PIOVG57/sGm0bcT+7jHGcAIi0mhQ/as8z5vYyc6hVX2mLzabzWazSSlpNkKTE4N3VBRFSkYLYfWXTdOMx1Nrs5RSSn0IwZpMKX8ff/zxixcvrLU+pM1m8yd/8ie//fKrzWajqY6UkoLOeVn87ne/m84ejcdT7/311c3HH1ff/e53U0qvXr84PDxMKRVF1nXd119//cHjRylJ13WT0WHTbKjIEkEm+Xg8Xt5c13Wd5zkipJSM0VgupQQMcOfs5Ory6Je//GVVVZNRObc0KvOzkyNmRpTQt2qXfBdUnwuqchIkQRAkAREkAWQWiUkTzRYQkIQAkohyjwaO0YAhx105GO6BmcLvMTEGVFOLPwa+oTo+RVFs6yH2NYIwAIS4rY9BJERAEBBmThYdAFi0iIigKDomgOhDORtVRG5hyfSuyMjl0IeUwhC1yh5Tcthp72ucXTG3JGOMEsS998H7PM+1TGlwH7YheuqNUdwR0zb1bZAImYksEIYUBSjLyz6GNsTj05OmaTZNff/hg2lmF6vlKM8Ojo+avgshtG1LxoxGozzPlbJiTMbMiUNM26xbnmvTsFYh28RBIBlrkE0IwWAQjiZYSwKEzBAj+5CQQTiyMCSm4c6H4qDBBRpeD5pvoHoO1n9YO+34gIiHh4dKUsM9LFt2xfuDy6QtrtRgdl23XC5ns1kIQX3R4+NjRPzoo49SSrPZTJdAO7jpJlND/eDBg7dv3ypTnJm7rqvr+uTk5Pj4+OTk5H/9X//XP/7jP14ul7/5zW/+/b//903TPH78uO9DURTnZ3estX3fzmaz4+NDIlByCRE5Z+q6/s2vf3t0dORcruvgnFssFpt69fHHH3/22Wd5kZ2eHk8mEzVTuyAnppT++q//ejabnZ+ff/LJJ1999ZX3/vnz5//8n//zDz74QNnnGrnxjoe97yAM/KH9/g6Kd6f0LoGm7cZSSpp93SviBtiVfeKOG2h2x951bg/nXFVV+uBUF9PgY79fUzYo3OHRDxec4raHDxFtjakIS5zOxkSEzFlmtVhWhK0j/aLhdoYb3F8Hs3foJoGdgz1ETMPGG96orrX3XYxRgJFgl0vcfhEaAIAueCGsJmNN4drMuTyr20aJX8WoOjg+MsY8e/ZMCZsiorlxETk9PdXLAyEtjSciLXHue2+MtdZ1ra83rTAacpwASZJsOdjO5WQzQStC5ArryiyvsmL0Tvb22Rv6gIfFGvaHBpDfEr/BEurJVVUpJU1XbT/a2cfWtBJPmT6qj6uqurq6stZqn6imac7OzpqmUS7bkJ3Teo7VaqUdNKy1T58+VR9PE18vXrw4OTnJ8/zTTz/96U9/enp6ent7+/Lly//wH/4DIq4Wq67pZtPpdDKZ394+++br9XJ1MJ2hAMcUfKfX+ptf//r0+HRcjYno9PT09PR0tVp0XffDH/7gz//8J7PZTDvKqBYkA7Rl8XNK6eLiou/7L7/88u7du2dnZ1dXV7/+9a8fPnw4Go1UDQ3aTUEX2KGgeuw/jkF5DSoMOeXWZIaQEwdvQHJrHKGBLXnSgOjrLZfSoP5HKIQCkkCScOza2hBMJ6OD2STPLKfg+zb4ThPuKECA+lp/WnpX2KEMG2ds7jKOkRXy3WX4tK9E37Rv31y8fv16tVr4rms2q3qzijH0bRN9r5WNVZGPyqIq8qrIo++j71PwWpYhKep/cddeSaEs9bC0h1W/O/b3qsYIzPE9cw5pvz8akozGpXaXefbsaZbZqirqep1SuHPnbDodf/PN13W9zjJbljkC+75dLee+bw8PptbkzhbW5JwwJRE2nMj3MQYgskS272PbemYxxgKgMQYFNOfpnHMuI7KAzlBGrnC2zPKRVX9mXzzMrtRotVqNx2Mt7jg+Pv7oo4+ePn2q4ZbITvkZY4zDHYxJRH3fi4AGeOPxWBibpjG7ekWtu48xLpdLEdFWUSrDNzc3GigulvPb21vNvE8mE2PMYrE4Pj72PhJR13XWZsfHx1mW/frXvy6cVVWq1ZnW2ouLC6CrO3fuNE2j4UTTNIvFIqT45MkTjun2ZlEUb/6n/+l/+l/+l//l6dOnxuJqvsrzvKzy1Wo1Go20edazb1589zvfe/P8ddNubt6+Oj09Lcry6dOnn3/63R//+E/++q//+l/86Iez2axvlr0PKXljzXK9zh12bWMIq7LYrFciQgiXby4OZtOubTJnASDFkBTJjASipQNGy8ri1usz3nsRVl2uh4hyuaXve+U5DICKcgn1KWgdwNZMpRT3ukjum46Ukmp6RT4066Nu4uD+hBCHOFOZDztojfWTVDWDVk4Y48gxgomCDLPJgQdadp4AkIQEwaAlODg7GxzjfUs7pLv0RoawBRGLokACZlbZ8z7kRdF1nbW77MsuctZABgBiZMRIWu+fOMY4nY1jYEA+mIyrqoLEHH2RWeB8vV4agylxjD50bbNeFc5uNhv2/aiqjHDXNrlz4/FYgp9fLZ1zWW4NmeQ1IpDM5FmVYzQppdKOwDGxjR1DpOX81jqDPhUWDdq27b//vc/HVebbNqaOY0B6v8fMoJX1cZ6enmrZnppjbb+3c+Jxf/kQseu6ELbdINOOLN/3PcC2y5PZFoamAePBHe2Ad2VQzrnRwUHT1tqv7dmzZ1o66JwLIXgfjo9PM5cb4xaLFaF1zhWF0dimKAqXFQDQtq2PbK3Nsvz29vbTTz/drJv/8l/+y2c/+DzGeH529vsvv7x7544z9uH9By9evDg8PDCIoe+vr68//PCj5XKJAqfHZ4v5/L/7V/+qX7c8F9c1Mcj19dt//Md/JE5N0zx58sFms7m8eTut8sm0jLFHIwcHBxzfsfz2j9vbW6VfDjHMgBVraPeH79rT5O8dg084OBf7zuq+J4mIKcRviZ++yF0GAByTkLHOWWsiYOAgIPp0RYR2SIs2z3r31He/BO39wxIALFKwmTGGgQBwuZyvN+uurb2PLARkARmAm2az20Lv/czzbTcjZhZR/U5E1Ie4vw78fo5kf/du/4RGTURktjsKCwFGH5gZEgSQThgAfNdx6KzRloPCMabQ+Q5TSpJ8ZtECE8eMgDJrrbHAEvrC5tZYh4aQxFhGZo4ShQyEtheR3NgYY7tqnHOjvBofFQAMHKvMlWU5LsfT0WwyLuf+GgAEDHN8J4S8l4RVRTiZTHTrpF1nYg0bREQJuMO7hueiSlQ16DaGTDDkKrTtpHL2dDvKLv8xCOHp6en1DTRNE6NXOyYimizRMOb4+DiENJ8vu64zxmo0qBWPdbMlxT969Oji4uLJkw8/++yzGKP6t1p+VdhiOjm4urr6T//pP//Zn/345ub6+vr67PxE/V7FWqzNmHkxX11f3T558mTT1G3bUm6stW8uX//TF6m+vvl//Ot/nXOKm+UWXiFBRAEevKZ9SQOAq6urIevDu654zAx/IIT7Dv8AaA1SJxw5SQIWEU5JRPSfnKIwIwhoty4RBCAUNIql6ecP2hY1uZxS8J4RxRgTow/BE2X7KmAQ6W9FicPWz2zOW27A1jkiskAm9pF2smQAkwinlCjwnr7YnU9q7lJKnJJoxk+2tObth7MwM/KWPmr3QlzFZId1I+N0WzFLSgyAJICEMUYEABYW6aMXkRgjpoQYSEBEiAP7GJERsSRASalv+uQJ0REZgdRFn5ITR0lrhQ0RJhGfJCbvKO99MERllvcM63ppUirK8vryjXOuKnN0Lvp0efF2M58jp5gCSmCOKUUre3Ed77IRiKgFjog4VDG+efNmUD/b9i3bnbEtjLDO7ZoFy1A9mOK2eGxnJLc0drNr9pjeb1cBAN772Wz2/Pk35+fnV1eX2iKp7/s8L+q6fvjgUdsu7t27d311G0I4mEzU7RQRbZwxnU5PTk7UT/vTn/zZf/7P/znG+Omnn15cvhGR5XJ9enq6Xq+/+OKL73zn45OTE+0sjoiPPnjw4vkrAJzNRqvlpiiKv/u7f/jxDz8PoTcGq9HI5FlTr+fz+fLq8vnz53/83Y+rR49Wt5dt2wqIIbPZbCq7SwCqAdmlBdbrJjOEYCQGFMmc1bWFbUF9Gnb5nrliFRnmbTKDmWVPYvchCt7rjcB7CcPBr5FdRmT4Pe11lYYdFlAUTvEXAEAUIv18VkxH3qv8Vli1ZGaJCREtGTKGyCSk6WzsAVph6byEGCNHjhykIDNc8L54D1TPwX9O24p1YWZAdbi22MyWijCkWHeOq4gAGr1XSQIgJAIGDRICATIgCAeJzBwNoiVsfEAkQ6SNok3iLMuwqqxq0r5HJJvnRi/ABxOFiAwmZ8GREySK3Idkk1datqVEhtkRJ9+vF2KjBVtYVxWlA1ovlldN3debyXRkichAkmhlh3QNClhFsaqq29tbhfiJqKqqi4uLg4OD21st5YJhB6gKxL3ksnqwupTaUk6ldyiY0EUcdsCAqqeULi8vQwhaJzHgE0P6/vr6On2cmqb5sz/7Z79Mv765uf3kk0++/vrr6+trtYez2SzG+PLly3v37n3xxc8Ojg5DCKenp/rkZrMZFDCfz++cnR3OJv/H//aX53fOPvv0e8+ePd3Um+ODw8PZbL3acIiT0cjZvN3UP/3pT0eT8cnJiVjazOsQwmw0Oj8///1XX96ZjWd5posQUyT3bhm/pdcQceDjD+yF4a+yo5IN0jJsTXmfljR4lcPD0m9Je/zB4YTtY43v3LnhJwAoOc1pjLBzN/KiUCuq56g7qo2ctsqXRa8SEYUFQGKMqG6gAANLEC8xCIyzDAA0sDOgrSgQgEXe9ZseLph2hD7YFbKKOroANq+stYACAGYXIOsWSgj7cK6+TmAFBCAKCidmYENEgAJMIAiJOUpMBtgQGTJgyRkgAuNIEjvk3EBmTWXHSpQ3iBmBIUAkJudRjAHnqCxNnjsA8Bl7TyGEvNhG6ZVz03K6Xq8Xi+uDR/dFUgo+9H1wOTprwYgtQ8eQYUbWorHqB6oIfUsVad8hremoqkpLrXY8xndjJBDf8dZ3rd23n5lSgoEastNVKg/69oFBMnhcl5eXxydHbds+fPjw7du3h4eHfd8rUmJ2pZMppcPDw4ODg6Zp5/M5ACgpHtAolKqklrIsf/WrXznn8ry8vr7+5JNPnj17Bv0W0z88vHNx8WqxuL1z5+QnP/nJX/3VX3Vd8+d//pMv/ukXFxeX9+8/vLmeHx4eC3ej0Wher7VqhAhTCr7rzo6PF4tFw+FoOqqqar6ouy5ZS5hwX2x2QY4ob+adbOyA/uFM2mOBKYI6iOX+MUSS+4L6LYu37wkP5++bQdkVpuj66++13qLetPsfPlwS7eo/9w+VJWSBrQuscs6SZLFYLNfr9bpuQgxAyIAIZCzBUE84FJGqMZOBCQKACkGJiPY6ARQV5gFD7vte5B11ZDiECZCRHKQEAMwCCdACsBAkbeQBJAaBCCxBUY220puMehkZgTNorTMpOmBEtCha5GWdHbltC2wlkJCBlHS8Sqc9rELoVVPM55ZS+/bqrYiMq2rscimZkdknDly3tbGoUNe2JTbtVfQOUO/Z2VlZltrNQWuC6rq228r6fRSHiCiEwCnsdNK29CmEEGHbutvspk0MmeU8z4cpNgNRq+887AiQfd9rq3OteAohFEWxWq2I6NWrVwo6f/nll7PZbDKZaAcATbIx0OvXr4+PTwRB/dKUUlmWIQRfx8PD2e3tbQj9/Qf3NpvVN9988/3PPj05PWqb/uDg4OGjB5eXV1ptmFK6f+8ug6zfrNvYlVVpDfV9v1ksvvvggXMOeu1Mp2wpZGbkd1zZfediu18RtQWQuvqKaorsLyZ+a2N9S9gMwZ7carsVARAEJNWGwsJRREAIwRhjB29lPyZEVLsESrhDBJGkcfhwPYOd+dY10C73KyKOTJJ3aXdjjDXOAc3XdfTBex9iCkBREMiISSgWAREJcPdFAsJsrdsqd5ZtgoS22c7hfvn9lRzUFsi7SqiIQEgGELYXGEVEkmTGkggIWyDrjDO0raatCjXjAA4AaIf0SkyFNTnliJhCDCEQ2sJmZNg5Kgp0jo0JiJg5wMxgNTZW21Jti2AlZvb+yepiISJVUU4mk+lkQgJtEsZY5hVLTCFEZDssK+3R+UQkz/OPP/5Yy7TVvTw4OLi4uChLx8wAabBvtBuKMFg55959GoBkWab9KbSoQs/vum40Gk0mE96lJTWNIZXVFoBv374tiuLm5kr7fFZVdX19qz2wqmr8u9/9jhNog0O1kMvl0mXF0dFRjPHi8mo8Hj979uzj73wiIutVfXFx8bOf/ezw8NCM3eXlZe9bY1AgqgH/j//xP5ZlaUz86quvjo5ODg4OLi4uHz54slwuq9JkRU5E4/EYne3axhlzenr6zTff3PvjH47H465rfEjOubwq1vVKb0Rxf7VmqlO08GxoM6OnDUOs9j3YP3RQh9/DLnje/82eS/Jt1rXswuxv+aIiot3r0l57Ic25OVsOH7j/Ff+tQ3YJdxQYTKs1bjSCJsa8D530feSUYmIBlC7G/c8c9JROaOFdUy/ctfTcpvKBY4wkoDxH2v11q+n2yhHFCIAB0HZViKyOGGSZwSQgkBvKHVmDBCiSIhILE24vHgWIyJHpu84ao01Bfds10lhriyzvwwIBQEgYtEPX1psz0LXBOVdVZUqp3iyD7yfj8tGjab3eSExd067sKicLzLnLXWV737ZtijFa4h5iFDHIbCRaSAwMSSZFUS9WVVFMy9F6XberJrYhQ+ebNoQQ+sgpGSJrjTOOiGxeKo5NgBjZCFQuL4zrYtJ844MHD6y1V1dXcdcFeLVaaS3z1dXV8fHx69evi6KgSblpm/rVsyKzjrBw1K1uoa8f3H/ULRZN2374/c+7yE+/eTk7OBTnkPnm5mYymdy9ezcGdsZm1pUum1YjOpb55VWe5xnRw/O70IV2vuq7W0tQzAprsGmS75OzJFDkxcGL118FeUtFNjsfz9u38+7F5Hy26dvYbLRCP8VYWMddl3p/UFTduq4mJSFaaxPKstmgpcwCIhNEFEZAAnZGDIL2ESOIvvMpxtwhEUTfZNbsQBwBiAhoDGbWisRdCzbRdg+KHLJx+4KmLxCBCDVGQARrDcC2GALJ6LnMPETvCNArpxS3BEvZ/gPEcoqRWQNCIrSMLkHsO6/uInOPws4Zm5El8cEzsikwyzJjsWmXb19dLxaLruusy9u2596v3t6mKE+efGTYvKHDIs9VuhAxt5aZ+75f1w0RIRnjUBR3QQIylFKGWFWTvu/X66UBmY7GRNuuakhiHSWyMfrESUASrhMPlcfIiAKYQFJiC2gFYwhtH4iZhBHFWnLOCVHfbNsjiUid0ng8rpt6E4JW7UwORjHGNjYA0nRtH/zQhFtRfe/9eDxmz7fzt8aYyWRSVrOmae5Wo6uu71LKDCDyslutNss+Bj0HTVqsVnZQZoNl0ye8XC6JrHY3bNtewRJEbLtaHU6zK7cfyBzGuK1Fhb1OaizGmPl8/oMf/EBd0GfPng2emJIwlTiiI7smh0dlnqXgtcslAgAnrTGv26btUxJWVpcirjqobOjemVJyzo3H4w8++GC1Wi2XKy0eTykxWwCwWY6IeVYaY0JiFyIYUldA/VVt02StjTH0TQtBABEzq/xnIlK0feuw7aRh61wJ7E1b2AHoIog6tBQHt21A2AdD9IcWZvjTkOfAve4m8H4stO/9Dif/V79C3s8lfuvYdZt+l4IzFomydlUfHEzzPNvUi7bbiKQYIcQ+chyPxyL8zdMvv3r6+67r7tw9v//o4ZuLy4PD8b37d8ajg5ub29cXl9533zx7WTz5M82Xpr3iD94l9IYrl11kKwmGdpiyQ01T2kISsHM6hze+t4AAoD63sO/7wmrOI6a+hRgdaeqLNfWl66yyrdMptNbxWx0hQPodB80Nj1Ur67Ub/T4B0Hs/Pbq/Wq0SyGiigyizyJULYbVa9FtbDnZ42O9DxsjM2h9aZFe1hViWZdstjTGGnOwBANbapmkQeZf94303Q9u/P3v27Ec/+tEXX3xxeHg4FEbUda39aYhIh54Cc5ZZJuzbOnhvAKwxArhcr4EskVzd3BblpCgrtK7btKcPzxTI1baLIfbW2rt3z3VMD/OWf2etJQLN5xhjjMvQGPQRjGWGvu+bts+ybLNZ31xd51VWlqXWEkIAmzmLxhhjEcHaRJR2vd9FRDNy260g/xXiJe4Ym/sCM/iHw7J/SzZ0a8oOsBlcL+97+IOIEfcqgPSj9uKL9wR7P7T7rx5DvICImnwi2gLUyuTUcorRaGQMth0eHJy+vHj98uXzlNInn3xwfHzo8izG+IP/8b8TxvW6RrJFaY+OZtZm9+6efHH17ZpV3UL76zMIp4gQGM1VKLPXbJt0JGMMIO/QnS3KJdtSJlDhZBESYAEBDSMtEYEgx5RCT9Y4Mpu2Tikpdq3fq/kw7YKxTx4EAGutvCPEvbuFIcdjjKmqSvt3aCBwc33ddZ11LoVoEI0ydV0PMDUWLZkin1nciwNhANNYk6eiXM0sM8ysA4ZCnCBiirIbcGGKwunAakTULwZB3BFqxVgFBl+8ePGDH/xAu6NqCTwRDYMju67TvvHL5XwyGjuDHFPf92WWj0ajqhwtFqvxZOayeHl1nRfd9ODYutzaTmlK67VhjsrzFkiz2eznP/+5cy4lSSkaQ3meAWDTNJ4zS2AASIQFEzOHVDddiFdFmanqcdksd5khAkmKyLldffpWGFAAeAdOsQgCAgnIH+TlcC9sG3wN2iHGgxb/Q6nYV/D6pX9owYb34q4rwrcO3BUZ/qEc/rcOZ60h7YUHacsyT8aYMs/apu66xloajSfWbGdUfPX110gwnRQHB9Pj48NyPAKAkOLt9YVzOQtOqrwsRmFSJIbrGxos3rDpdavoNhgiveFqdzIQAECjrxD6lHCr9N5fPcR31VQqhCAALAicOWcI1e8mIiBi5hCCoq8DRE97fN1BHIY5MESEe43S95WpiGjhkfakh53aTYElMViRmDiGzOaZM0i2Kg+bpo7JF1n+bvDi8LmIyABEpKCl5up41yTb7Goq425ksYqic455V/8uqIOmiqJ4/fbq4cOHX3755QcffPD3f//3n3/++c9//vM8z3Wmmq6+RgUHBwchhL5ryiLLbEEGRCSBlNXo9OxO04YuRLCYfIwsPgbK8icff7RcvlXKpQhro38AbtqNy4xzhqLE3TRibQhPMZGxrGNDBQQpcep8X9f1eXaa57lOJmFRgJsdkUFi5tB1AIIAkN7LcaEAIpIAoiC82z3f2tw0tDbcQxe/dfL/jYF693XvS/Xwe2XqDP/8loTvn/x/L4ckoN3yQUkCwigKmsfk+8zSZDIS4VcvXlxcvGrbdnSYHx8fn54eF1UeQttcL40xNssuLy8ns1melVVhgSwSoDCIR3S8x80aFoTfJ2zpb5iZU0xp4M/B0PcJcJtyHIbS7BZHbzltH44IQCIRp+MEQzQpkEFkE30ffffg0aOhKe4gVzFGnTlJu+JPHMpcCAlQPdhB8Wl6sO/7tm6AJcsyZyxTSill1gJnhMQpcYwAzlpCsoDYdnW9XhVlZoe7hf2c0p47FEJANPpivV4nbq21hrbtfZWkol4l7Lx54XdNfjWc03Y1Oo+BiNq21czeeDwe2hlqIgESh67tYevUNk2z2jTTg1hNptcvXorgdHZcVKNN3Qv2f/qnH//8P33JzCjJEqTQaVP3xe3VpCpTSol70NGFQRCxzE2P6JAYQQQYAY0zwAAQOaWU8rzQftAsgUC8D5kTZubAffICkBEyR0hsUVEodUC1heYWFv/WXtefQ58Y3qOzDHI17KFvOZmyxw3U9xr4tvRuf2rlu+wq3wUgsbDA+wTL4ev+W3Ko+SlELQ7czuchQiLI85wlrtfrm5vrq+tLY+jBgwc//LPvzec38/m8blaj0ags8xhjs1l9+NFjBLNYLK+uLkVwMpkdHZ7cu3Mmr1aaYgCwzMi7kkjtlyiS5H2Xvu+3TEnmLb0OAJCE+Z2wicjW+3r/nkh4C0Vtqw3FMFprM2PR2YjoA2SWrEFEUJDVEokAGpyOK93AzGy3mUNbVZWzOEwC1bAQ9iIy9e/yPB+Px+qU+s6AdUja8D4hgCUSIESxRpBYYrT7fs47Db2rbBhyMgqE9H0PGEW2LfJUVaj16/vemG0VXIoMAErS12TD8fHxer2+f/++pvWU4anW7/Ly0lqroAgAOEtt0yQdI+5svWqur6+tzWyWA4B1+RaPstTVm2a9Ikwh9taAiHRda60tclOvV7vJctGQqrdEaLIia2JKwpET8DYHjYCAnFnXdZ11VebcZtOSkVEx4hA5Re3EM3h9aAwbRERLGomhJsWRUQnO+9H18EZr3/XUgvexlj8UiWEXDqF1Gub27DCh4S24o33ty/AQYpBz/39ZQoO0deO2zxdEmFOKAgjSbNZvry7btj2cHdy7d+fw8PD1ywvmCIy99zqnZHowOTo6atYNWRcjg5Ag9l24vr559uyZMSeD4dqJFoqIpk/3HezhancrEPSFy4wj1/e9SNrl6/c8CFQlhMiyKzVAQokxGkg6OqogQINioIzZarUaRrvAziZr5/jh6WhKSTEYzswwpFnhHL1arfIbCNLj8Vgly5GxGQCRIwRgiSFG7kOXldloNLLWoIT3aBm4A9mIcGjppc9eCym895NpzrueQrJjXROR9z7PjZJRIiWN+rz3J3fuzufz+Xx+fHw8n8/VuI1GI21We+fOnYEMoFWFRZGv15sQwmg0Go+mXes7H1abGqEdjSZZll3fzIloOpldX19//btfZyhd6Lf+QL1y42lGxWqzzMwUUp9brMoxETVNE2OPmGLElGwKKIIpBZRtTwxjsOu6IrdIkFLKXVYUed+3yYslAmssMBFkWSbMYq0ytpGEGBGBEAnf8dr3PUN9kKrRBhH91rIPz/IPj8F4bsk3u5Kl/ZgKERWyG97yTgj/W9L23zi0DUeMmiAB2XUequt1URRNW7dtOx5XT548Kcvy9nb+9MWLoijGk6qqptZa50zmcmGLIJzI9zoP1BgqQCgGGjwCtWm81zzlW8W+sq0eBO2Zv9VWJDvGTLv1muUdTxARaadqgDR3AwhCIpvVqspcljtAjswkbIlcTovVUvGOYQUUmJnNZvtCMQSrOo8gCUBioB3VHmDdtHmeI0vyIZKhEVhjc+tYCGg7sx0FQgh99E1Xu2SsNUWRhSjbTl4AYK1NiZumQcSyGGVZttk0tCtLSynpqmm3D2bRFzokNaWkkyQ0Ka+sF4Uly7LUpL/S35RWNjCttaC+bdt3A1CNVFUZQqzrOs/K4+Pjum4vLy+Pjo5EBFiOZtPMke/WBOm3v/5Fe/nryWSiTdYcudRvbppl2/bEARGzrODQJBGIyYgYwYPJRFhS6JgBGAxuya5d22ora4NSZnkIvu87YxwYk1ICBIvAAL5vIXiDpIN+OCbmmBkQ3A7rMiZLu8YT+7EN7MovB6EaktG0O4YQYDhZa754x4D/Nq6o+1VEH98OVNum4AmRjAnbru/vObrD3sJd8b7sKITMuFrXZGA2mzHzfH7jnLtz53z+5c1vf/7bxEFHL15dX79584aZZ8dHxhgRC5xbk1nnCK0w5WUlDOORrZtOUuZ7WS4XvhdEyTILAEQ6FVN3V+z7ranfqRJgTlqbO1ybD526XTrTN6XIvJUQ3LWqMwZj3HZ8tGQAEZhjilmW5YUziH3fBo5Vnim9qCoLAOjaRvNnVVU5awihqTdbxhyANQTOikiKQex2Wq7Z9YLhHbklyzIVCgVKtr9hA8Axhab1NrfFuEBLXeo361ooaXNxq7tfRdEYN51OU0q+D1VVFcUWrQ4hKdUQERWG4fROcxuznRmg4R+9T2LW7nQqk4PhNsZoenD3CWbgyoHwsEcnk8l4PO37fj5fAkvTNBG76XjifbdY3EYfRlX1Z//inw1CKIJauLTZNNbaruu6tu/qOoRAZEej0XRatn1iYWBwZMlaYQzBhzYaY5BFUmLablDVsgQsKQoy4xadEyLZoqMMYLboKChSAPtCsi8z74MH2xMGs7lvOQcFvJ+i2LdUQ6C472fyXqpDD33XPmq6b6WH7T6YHdzt/dPTUx20WFXF/fv33rx587/95f+n7/t79+7duXNH1ejR0RERXV9fI2TWZC4rsjzPstxZZzJjjPE+JuGUDEKBJgdyLF1i2/luX8vAu+qNAne1vO/fguCOcWB4O6s0pW0/KwAQSCwMu4o5JJKUgBnRAACBUpsNCKMAkhChAUJJzNtWUYNKgj0XRvYycPuqc3go+6uq8cLQZ0Sb02iCMfap8z5KLPMqQZwvbiNIgkTWCCq1Ba2GmLDthyk6I7betJvNJoSkGTzEd22aWLRv+buGJeoVqBCqM73Ph3z69KmurCI3tIOGBwap3r+6u2ppNUvp++1IDSKq15uubogTSAgNO0PHk3w2Pjk6OPzux8daPyU67rzzmeGM5IMPPggh9H1Yr9c3Nzc3Nzf16qbdzO3hnZQSImUG87wAwYa4TRFAOMUArNMqCS0gJwm4I50DonVkjAXC+C7bzpoVlsQi2u3vnRgMUoe7STiwxzuT9ysJ9yVQdvn9ISAf7NguE/seYAN7Xu6+SA/vkvePQS98S1mIyGx2dDu/9t5XVdHF8Ox3v7m8vOhif3R6dHLntBiXN7e3bduPRiMim1Uj68q8KMvRqCxzZa2RRWNMv+kDp95DAiIwiclH6QN/ywnXK/wWODl4BIgIyID7Dvz2T9YqLLHN16ekk8kEaRh0lVAMEBMSQRoSSoaIIAGApJQ4kXtXSER7lMBBx+n6w676PMWIAIa2FF7Q2hDm4L3oOBaiPsa+68qicNYai6kNgkyORGRTNz1Hl1ubOQYEBkTZNurCXTSo3Ss0hRDj1iJpabxKl3K+hXEQJ5EteWfQavsR9sXFxXg81lntuBuolnblhd573PEAt7cagoYHbeeX65W1mfbCsCCzUSEhhHYzPTr8/qefnB4eeO8dhNj6br0drxVjCiEQx8XVhXOuKkeT08ODUX40KZumAaDny42RXdUZApDNiEzp2s4z67RZq85/gsSccmcjs7AXABFLIAwASaEZ1PZnIEPi/tv5Bt7V76Y9QGU/bBu07H4QOMjGALeYXU9eiTu2pwiKwBYOBQRE0Yp4UIDIkjFkmvRecnn4in1XindV14jYB399c5NSQnPw+vWLX/zyZwcH07/4iz/XgbUvXj1nBmbo5rd5Vo7HY6HcFkVRlnlRkAElsANgAI5JkjALJAHfh7rp6qYz06kumAZ7qGE1Q+JgjAEEEY4p6MUMgpG2JRFxZ7KEaNtqUrYlB2lno5JaCEQUSMg0GDqRBEy715xS5JicMyxsQAiBhCUGANAOPZAipN0kKkOQIHCKvI0jhhhhsIQxRrNrESQiurfROJvbJJxSMmhdnvmem7ZHvSMAALDaOcIYUxSF99si9Lt37x4eHs7nyy1DD2XwgHc7YEi+JZGtKTPmXcf14cyjoyO1hIr06HUrFroDALYToLYZSCKXZS4rEm/6PszncxTg6F2eEbKzcng4efLg7P7pzIH0i3lEUTdYRFDIgpCRjGy7mW9SmgMNnsPBuCzL8rrZIBgRDLFrVm0SYygzrsAtSxM5JjZGSPW0yYwRFuMxgkjihNpoNhKhQVXDiRlFy7Pl27jLvmYdfrm/PrjjbeFewn3fZH1LRFN8Vzo4/HL4/H2TqDtYOxjum53h7UPlmiIiehmXt2tErJv66Yuvmqa+++DukycfFONysVxe3rxNkc/Pz7OsWC7WImjzLDAKgRgBC2CRwGhzKOscYEwExCQgXeia0HapD3uTpb8VVg3XP+xvVSUi4n033Jpzdmc/cQfgx2FBCDQjvzMJIAKIBIYIOSUI1hgSBE779h92ePXgsOCeo067ahLZVZlpMK9SN6yqloYT0dArNYTgysrlLgXfdI04Gh1MI+HN25X0yCDCKAq+D1w4haacc9qX/vnzl5eXlwrIqkpWl09ECN+NTCMy6tDuXfe7/TFYAJ27pC6rao7Bs1KLui2rZ0+0lcmUYtd1lrDIHIqs57cns9H3Pvn4g3tn3Dfr1cLElPrAkR1RURZqq30fUkqz0VTHNbZ+O+RIRKy1ZydnxjhhU7d+uWrqto2QmJnIERBvnU/GbaWkRUSLFIh01ntCIP52Da6IZvATsRnEabBp+1L3LaM0/NR12A//BudC9oKT/bfsX8OwzvJ+Q+FBsId1HmRbt4iujO4nVYvz2qeUFsvbvq8fffDws8+/m2X2xcuXV9eXzuZFUWyamjd1ilKWo8RCFsGkJFEw2iwnIgCOwlnlTDSUOEXxPvbcBe5xx5L8Q4gY9jzAQSSYmSzIXpf7YfhSiD2JttV8j7gn70Zq75hrBlGIDEpSi7ol+oqIMpf2JW3AqAaZVCLx8ASHWHowIYqVKOqrYqmx1bZxhMGyLAW57RvIbCkTMBRYfO8ZMKUUGWzadddSdajPrGmaoigGyZa9Y/tE4V3DBUV41CXY3oy8W5R10wxokgbfQ1cL1Rm4NzdbRFigaZo8z53Ls4x81yeBPHPsO2fkYDI+Ppw65Pnqln1/dDhbxcgpEYo14iwAAyEDSdusfd8agumkJBq1bbtZN33fQ+iJKMttnlWFy5Z1X7fBR06SCBHQ6HQRECFrDRnmLX0J49ZWGMQhSwuwNYHvjBez2auy2d9YfyiHg1wNdmz/YQ+PU4VE/Zw/tKWw52TCnuUcXgyfMHQqYWZtf64/dT/pc1lHXC4XZZV/+tn3P3j8IEl6c3XZ9U1WFEVRANBivQo+jUaTSZ7ZPCsnFREah2TBWkKLKYGEBJTQgjEklELX+9BG9mjA7LVRU6ALd3nOwTXY4bS8j2ztqycRUVRZZ/furSeEMDQuEI3XEC0RWKLILJIYAczA9SPZC4kHoBjfr3G39h27E1i0GSTHFH0wSGjBkhFjRUQSi0YGzNonUiRFiUREzvoUV5t1BKiqql+tE0NkiilZZtZEn6a2EVGHSVxcXNR1S7s04LB22/IF2AJrMUbmbVofBwWDZlBmx8fHOuTg9vZWa9jUqOr4F8VUNDjU3TAuy9b3jJDnuYikECVFAhGC89Oz87OTvqlf31zGZjmucodyOJ1sNpsQAqQoSCgps0SU5c4RcN9tk6qOCFksweWbl1U5ns6Oq/FsMq6AsiRN2HTAghYNQhLYGnMjiBhDxAGIE0ER5ZHQtiRINdI2Rh+YxIN07RulQU5x79jfQ/siKrt2r2mv5/Jwzh/+1EzXvrnbZjXUgoegY2q6rtNRFnE3qTPuzfELIdw04ejo8PMffP+zz74rEJ8//6bruvF4rOSHGPn4+DhzRZYVo9FkPJrkoyKlgIjkCJ0gQgyhD10IXsiAEHMKyXe+jckj8f4iDC9g17eW9+jjaa9nx55DuAUdYorGGOZ35cUADLCNvfUz6V2967YrsbCS3QBA64Zp/xpgzxoPOnQf4BDZUaMAZFf+PiiU4QENiJpzLlgQETJYVVWfuK5rdG40Hq/qRhBYkAQsGmRgESFril242dY1CkBMAmAI8zyjCBDT1JXXkBAzQIwA6CjLjDD3HIpi20MlxpRlWOYFMzdN0/k1GnDW9mHjY304neWZvb6+nlQlokhsY2gBwIoYEkHYdGvnLJi07JeJGY1khiRxFvCDo/sHWLroesB1CMYdzu2Me4/ZyBSEBAlALKNI4pgwJPKRWEBSClGSLbPZqDg/P3v58uXtm2/idDaZHpyOZgelu856L3C7Xq67hK40tgiMEcRY22IGkgCIrKEo0ncpJeAYebxsVliVxjruehAuyKFAKqMzCJB8vy1pQWs0lRclAYDNHOzCG2utj7hcb2KMVVWhlb7vACDPs6ZpEnCm7eUdEEkS38VQGmTmwDFFVtsYY2z6zqTtwOembQGoKArLKbUNJdpsNiFJjHFdbzwnBlm3Xd21XfA+BCYDhIHTum/btv3OlH78Rw8+/6MPEndfP3t+fXNlrAsR13UDQNFzntOonFow/YYNBxqfkhMEhsjcobMmiz71YtHVdW+yPImpN2HVpkBFdE5iop27DtpoUGTrAao9T0kArHNIlEIQ7t/BM3HLq0YAwyA+KkxigABsTJB8mhRjBawTSBTmxH1sNpKmVYEZYuA21Chm7BywD21D+di5HLaYolhyAACMEsWAscZmYCmCCJuEObqeODA4Z/JqlAATi48hz3NIrOADQSLaNUKLMWbImKx1gMYaIUDGKLGd5M772MeYhO0gygCIhCKoVfMuy5iBmWNKFAICEZFB3OIPahZY0q5HXdt3uSusddaStRYIY0wxRi9bzaR6XafGxhhDrw2jWEmAg/YKIQiCMUS7Rg64y0keHByMrK3Xi8jh4OBgMpm0bTseF9sP3zY810dgRCwBZ1kWow/9dviWMcbm2Wg6cb7YMveB8moym01uFptRVYLl1kvnvRBLhLbeGAskQCgICCDaBQCjz3KbZZnNnEmJOXFSshc3bZsza4SQQAREQtAaSGZOOl1ExKctIrVarNfrZUpp1PdK3UKS3Od932VZlnNuzHvueqJ3LBZjjMnylFLf+5Ral2UiqMWfQzHO7XJRb1pmjpzW9SYyC0LdtDbPYtsg4nhU1k2nI8EfP3z0b378nclkcnl5efn2mpHu3r1XN82zZ89Oz+7ArkWA9z4rq9FofHJysgF9vqAt5y1u5+uGtM2zxyB9CNtG/vzOO9j3CGhHWhgcbLMb2MZ7qbx952I/lh72j5qBbXkTbkNiBCQgUTKmvDtwz6Xfd0n0i1RLGsC0ayKRUpLELnfMyfvtQGwkAOUMAgCAQRTZYxQKpJTAkCFnMpdYQuKQxIe0c0AkJbHDRRAZ7e6onm1pDFIS5iTsUyQgi1aHrWl3Ld6dLIwikFgSJSKKBBCYk7qHVnZjD9XR1yY54/F4GRbqFEncBl1DJCA78ioRgTAkBgH1iFCEiDKblWWW5a5PnhkQAVGSiAFBJCVmx5gyV2RZlpKO9UD12ep1jUhlWbLgarXa1O0osHVFVRQ5ZbaLPqyT74Qy6wCMNeTIgEFjkAVsTFGAWSSl5LeCseOhKqpnMAoIS2TxIWnHjz74siy74AeI2Mft2Oe2hrquvfebpiUi7zvF38oyz7IsLzLFuzWoY47ljuafUjLbelNUfkbb+T74EIIhR8Y1bQ9t33a+7Tt1SjvvGUQIIwe/CZAYCNtN3XbtqMjvPbj/0UcfffDBvYuLi5vbhTGGBd6+fQtoHjz8wLncGNP3Xu+1Cz4LQfcmOGMMWUJryRpKbKy1ne91k4SQutb7PjLS4G7vQ01q6OKO2bPvYyMip3ftZAZHXXasN9nLtQ54iWYoYPcuQjIoKXoSpm0XDEhE2p5+P0AgQG3tiEMCc1eerhK19Wt2v7FkdH5OCtG590rJEBEFgFADycxmZVm1IUJMSXwInYYA6u5aEUFBAiJAwXcKxseYQIRQCBmBgQEZQXLrtpC6XqsQE4sIIXICzzrgKVhrHdk8L8TIUGisSUIiUshHVzC9j0flmSXlRyRmBNqS5UUbrgXm6WSSFa5uVyGEsix3HTs1ZjDacAnRel9bS7k1iJhlKcaYQkwp9T7GJLm1jsjarO26erUMsrr/8EkfORiZFC543/QNEY4q5zlSQiJRIM2KCBm2KYTgU98G4xAERNEoFiRbMFDwqW37rut8itq7JSua7dQEYQ3ANGbg6JS1p3bAh06Xazodq85S9G8ITgraRh0pimJduoPvPXywWS5Xq5W19vBw5IpC5+TkWdmnXYNJTiLCvB22Xo1GrQ8319d5UXz3O9/58MMPR6PR119/s1qvjTHVdFY3jV+vJ9ODR48evXp1kecFkVHPpm1b9uycy04mFo2z5Cw5g0SgfVmEEYA4Qdv7vg8xCjqjlSZDlMW7JOqQ4pIdf2OwbHGv6H4/iv6vGsb9SHKQQ8JtRgK3USEnkESEtGXADJI/fObWt8KdVLOICOmov/e+MXFiEbFbT1rj0l2zEKOqwSCDc3lRVH2oEdnaDLhBNERICGCSJdgHvklAe9NhHzwzEBE4BDTMnBCIhBT90ZQMKKEBRYi23GXU1STasr+MmAE9DyFo4aP2UPiWR6EvxmUVhQNLSL0IEqBhVA5NSokkIlZ57pIUZG2C5FyOiFuaC2k9H8vO/ds0Xehb5TqJCAc+ODndLFcA4JybzqgoyxBSaFqOXd94g+b8aFoV+eX13KfOcZ6SERHRShdOBIzOkMnAAAAETglYUiQAnV4SIjNz23XDxMU+buswO9+HEBi2WXjded0GYowC7yZAAIAALFeN7LqYkXnXiWujpaIpKdWpTEnZQk3b+RDJWJflAti0bV3XddMEgbpvVYabrmVmIGzbNssyTglSPJiM7969+8mTx7PppGmay1dvjk6OyZrb29uirD77/g+S8IsXL9br2vtARAQmcwUAiNliUWTYOnSWLKlHshUDYfACfR98zwzWYf4H5Lx3x+AH4Q7O1Rf7juK+qHzrGM4hIiCEXWEFM4NyDBHhHXosKalfL8wRUQxZRGSdTrd9Aru6YU4sUZSSuOvUaMkgiffsvbdkjMtEBCSJCAoIyja+Q7QmY+YsKzJXxLhOgbMsI7TWEkAERqZt3lMLzQd9YBAlCoM2ijQGtn3VGQg5RLXX246uAsIiSRgAALPMTSaTIq+0ikIzs1vvGVFEdDvGGM22AGg7OHowjBllMXGKPih7BW0SEpa+rauqsJzqek1WxtMx5fbN1ZtqZIkIGBFZBLeeoSQBYo5d55umNSg6BdEY44ppSNzWTRQgorIsi5wRcT2f+5iq0exgXExHFUm8vpmHzU1enQKqk8CIWrwkBq33fiPcdW2MPoaemTnEGL1PkFLquq5uGpW0JBxCsNb2MWyZH7vUjmPqg+ogirBt4AkAkIIxJsawTVvZd6rNsFL/gBlBKCeHrjBEN4ultbaaTK21rQ+NfrvLWt+3fjviS31gFNQs1nK5nExG3//edx8/+iDLMt82BuTw+CjLchbJs8K5vPN914e27cvxyJBNIQBSlmXO2jzPq2kFJMZQbo2zhMASZeetUeTUe982wUdBsEg60utdr/EhGQh7Gc6BeqL4pNnmG95J4JDVwPdR6MGxlD2JBQAWBmCrBBsmQB3XyYzbTomwI83hNtcPAMAh6mVt/T5tJ8OCZQksQqwpAI4pGQbIUJJoIIcCu17JBtCgBeIir7Ks8H3wMbisRERLBgjBqkiTfd8iEaKgNRnaJIwEIsKg8TEBCSniZwjRCFIU7ll0k6bEkNiaLM9zRNP3ISUv8G7teI8Cq5lGay3gO8J3Sin0fYghxsAsYChBSIw+bqdYgt+RPCRlxtrMKUmOSJiREEWSSEKRtu2cswp1MEdD23GLddf1IbEgpBT6AMDOWmtwuV7neY7Jt6t5WY3Pj6YQ+1evLrL8YFC3hKzWKUq6rZfGGBHu+ib2fpeKi8wEu1Q47wrefQxlWcbIUcQYIyBBGAGEBY01O8Jk3JXMMcc8p5BSVOeKDTEAQmJOQAYBkISQyZB1Li+std53USAmMdvCbQMoaLDrW5+2Dp4YskhElKXgvS/z7O75nQ/uPRiX1Xxxq1hOVY3Wm41z+fHx6aZrv/rqqQCc3rlbFAUn6TpfuOzwaOZsDlsoQawR68gQYsLA2+SHiMTAbeubrg9BwGQINkWvvsqQcoC9rADuEV+HWJE4DUIoeyTPtGtkTjteGw8jHxEGUQQt6dUWISBIxLtIklHofWhHY0IRIYG4az+s7zWAIJA4lZkJTc+BVZA0USkpCYEkHUIvoA0KjEo5EphRURV5mVLyPpbMzO/lriyhRe08AGrhtkuQZVnkxMwC2whSa2UMiEGwRMbYKGwEQSRItNbVofNd37dt7pzmP40xWnugYPqg81QIt0uD24SMon++awNHFRNCHUnLEMPR0VH0PrTNqMizzK7X64p4Oh13rRJNlPEgKCiCIknb5ufjsXNuuVwu1qubRUop2XLW9T5zziK2bRv7jp2TxIagLLKU/M3lcnZweHb3XpyMLyH6zRJ2BCsRSaxMg+BjIMKUUtc1uudYYkgJowEABUKMMcAcUup7b4siioA+ZtAAUoC5770GfiICZDSGIXFdCImFAXXbJUSFVS2RJpcAUBCZCKwlayfl4XK5XK5XU5xOp9OsyNfLVdM0jQ99H8hs8cZk0JAg4ng8fnD/3uOHj6wzby/fzOc3ItLXG4MgCIK8WteBU1mMQorL5VILtZnBTfLJdGrJaS+jHMkSGQTa9uDdTYNNECP7PvZdjImNNQyUmIDSt3xIeR9cUZFTJiMisu/3fdFBPtOuB8zwOe9SqSqE9K26LVQhlL2ErQgIJBFEMQSoHWkIdKC9oqJskZzVMd0ppe3An+iDM9uaJo4xRp9Zt20nxYKonTc027ntiZhlGQlxTKLde0CYBVlEwPKWhC1qKBInffdms6mqKsvyzvcxBCIS5q5tpzGVWZ65LMbku9b76EPsvC+KKiND1qbgu6ZFQwbQIkUkLTrue51jTDtmzDbrqvxj2SF+ZW4cGBbxiX1sDdoiyybF1FkKoS+yPEbfNDw6mCCi9z4mgpSIyBp0zjhnsyyzBvM8P5xNYozz+fzt22uRdHp6enh4+PPfPs1yW5aj8biajMfJ98JRUry9ZkvALM5SDL6v10cH43/24z/+T3/3m6qq8sxpqts5h4Rd15I1b99etb4vy5xBdDjUaDSCd8OPrSBFQAY0LgM048mImVf1RuuyAaHpehKnII1WxO58LY4xWpMxKTEqL8ttr84UkYwBgKLMTo6OFcxwed40tTEGskKT8syszXK0brMsy2dPvzLGTEbjFy9eHB0clmXxL//lv3Rk/tNf/V/GmO9+8slicfuLX/zi7OyOdh5JSZq28zFMDw/OTs+/+PnPsix7/OjJZ599dnxwuFwudd9nFlMK3ps8c87aFCMz+z72PgHA9e18uawnk6MmyGq1mc6O27CQPZIK7UYwDCJEuyrHbVlcfEdAlx2cM+xYld6dD6LMUkfWZFlm8wwQvfdNvW7aBnNnEZx6IhFjjAbRWScMW4OlwpkSKCMlRBFBFsZtAE+AlozvWo5hQMskBREmsmQQmEhAKGk0qMQag7Sp6+Vi8ejJY20lUde1mkoAYSIRtrtoEAC0/eY2SwNaIBeTA0BjAMABOGNOp+PpdJqVRdf5xZI22GSElXNgbIicUpIQJQuIGUhKHBjfURBU9WxVEG51vArhAN4k30cWhm1HdMaoEK4iaXnhCKzNyTkDBMCQZYX+NTFDRCJOiRHN3bv3r64uL169atv6yYcfTqfTzXr9+vUbk7nlYlUVpUi5WCxKZ8ej8vrt5ePHjxeLRXc97/tWhyWdnZnDw6NPP/7wzZs3r14+Ozk5+eTjJ6/fXNzc3Nx/9PDizRu0iEGSsHG2Go9CSmLICDGzziVDQ1u/SMtHgldhE9z2ZcuKHMM2JNaqU9qWhPY2c1mWqTSKSOTkDNnM+eBB8OTkJPownk6Wy+XBdKLEmsy5osySD13XpBQz58aj0fV6vVgstMckIjZNM5vN7tw5/7f/9t+G0H/51ddNuymK4sWLZ1VV/It/8c/rTRhNJ3meb7ouAnDXtk33+s3FRx995H2s2+bnP//5wcHB0exgPBk55wLXzlhCJEFh5iTIW1b6el2v12sfxIKklHxIre/fG3G4J10Dc1r2yJy0BcphL1x6j2/9LfO4r82l70TNY0rGmBACEFoSQ6RET2vRWpti0GopAgbgpKMBmELotaaGiBBQXRTQ4lGl+CBqfIuIhMDMkFhprjo8BhABIYRQ5oX3vsxyBFbGUt+1LssQwZDANnggFBbeVnMYInDWFVnuuy7FkDtXWBdjNGRGeXZcFAcH07IsV5s69R1EF41jIgbs+tCGKJyi95ZFOGFkcPsJUmEZOjRu6020cxHvampC9FFYkNTLkJiYEjnWTGNRFMagdWgzl4iBPVGmFBZmFmHEhBhE+KuvnlZVcXx6upzb5XLVtt1oVD14+LDY9KOPPmrWm03dnpycBN8tl8vDw+NN07RNj9YcHBwpivv27VViOTs76bpG5yePqqIsMuZYVdXp6anJ3PXtTd02ITIYQoS+D9mu1dU2ttnB630MUTgrcpdnGEk7RGZloelWJDF2W9KlQC4ROWeIHDN3fZOYRMQ5V4zGOpo3xqjjeqy1y/V6Mpls1svlqrEIVVWNR2XbtvP5zXaydNMeHBxs1kvvuz/6/PNPPvpouVy+vbxoNquPv/PJ8eHB1dVVu6lD9F8/e+6cK8pyNJ2cn58fnRwbl4cU58tllmWZDskqysJl683m9ubmk+88BmBOECFK4r5pvdeaDFiu1pvNRkwFAFE4CseUjPmvVE4O0jXAE0Og6Oy7+knYA2D2iemDWCqWo50afYqsWWVnMudSHwGQiAwYEmtSwl37Q9y6hTt5VlKbgDquQ05/QDRo+1h3k60JcVtilkREYRuRXXFCiOPDg/V6rbig9x3H0PftQKHVwVAW2SFHEBIRBDRgCOBoOruNIXCqbGYM9iKGqDTOSjTRYyDLMUeIBiMBI4J1zpALzidOwftdh4XB+x9+7uszRER493u9T2RBw4Ysx5g47vSNNnsnY4CMIYcMhBG3DwuRyOqixSQisayqEMJ6vVpvNpnNRqORMabr+izLvvrq6fHBzFr7xRc/m00n9++cP3vxLPTeWjsaTfKi6PvQ+dVytal7f36Gp2dHH3/y5Pmrl7/5za+q8ejo+ODq6vLo5BitScKd77vWW8qMMUmYZNvHdptU2AlhTFvKi7ZP7/s+gZTWtrFGRBRKIfodO8Qa1M4sAEwoBncfxDw7nHVdN5/PtRxsPB5772+vr/K7513XNetVkbuqzEGga5vlYk7FdFxW9WYTQ+i6bjaZ/OAHPzg7OfmnL34aQg9Em83KOSpHJRlcN/WP/uRP1+v1arNumu5Xv/rNum0EIc/z73z66e3tbd/3uXNnJ6d37tw5PT09Pz+X5GOMiZOghBCaTdM0nfeRWbTlkSscIDKzMegyk+I7IRxEbvBC6Q9my+n+GcTyWwL8h0IYY1TMTwOo4UOMMdYaa8GqwVO6LIu1dnDWAEAjQ2TZVpnv1/gmrdwHRNEWHCJiCEXIAMYdOgrIoIkGFkRABGvNcj4HlKoqueEMnettSkpAN4RgrXUppaD9a4UEOKVEKFVR9nnRM5TOWjIUGQUyQG5rT8i+jyFYiaUlz+JZMmOKLK8A2t43bd+FiIjWZmk3RWgLKMN7oqgrtL+y2VBkbLSrR9RhD9p+N6WECMYZIiJIiBhjQkTnMiI0qN4fg8jp6fF8frtaLazNxtPxaDxeL5dv3rx5dnnz8OHDzWbTtfWd+/fY+5cvX84Ojha38/F4bG1WN33XdSbLKaMY4/X88v69h5999r0udL/+9W8fPnp0en7+1dOnaKgcjU5OTgRhvaoZQS8mttsGWZr4gl2ePQrrMx7kE0ALoOKOrxxSAmN1RNmYSIOfRISjUaXNZrQWwlrquu7u3fPr6+vj48N1V1tLF69fl5k7OT4kQt93m3UnMR1OJ1ervigK7z2HOB2NP//8s/Pz85fPn4vIZDKJyS+Xi7przk6OjDXz5XJx82VIsSgKk2e9X61Wm/O7Z9/77LOrqysdv3P3/Pzk7DTPc2PMaDRq1qsEzJxEMPnUdb7v+5TE+xhCIGvzPI8cIkfjLFmM4V1ufZClQVT2LR7sWOzv9PUfvEv+4AghoCFjjDNkt9Are+8tvPuW3Wdtn8XWZDHT3hB49T4HydxOQBZJuzJrEQEZulduc/RqUrd1ppiYJcsLSaHrm75tjg6mQOJTJILOt7oNLJEV3BYeASECYhIB4SgQU06WXGYEjUBBhgBsEkoeghEQEsgBjLOGxbAAgXMWXaaFEa7tAydEQmTaNTnfFzbeVZQleOeKEJElDJQUUVZo2FpblaVzjhBTCsZYYzHLMuCACJo32nUsRxRhjszx+vp6PB4/+fDjxfxmPp+/fv3aew9kHz165L3/+MMPJ+Pq+bOno+Pj6WTy9PdfHhwdTyYTYfC8Ikbn8igc6hYxzRdXjz/8+NNPv/P6zcWmXk26KQDU9Xo0mZweH00mk+ub+XK53LRtSh609MkaIiAC1p6vMRoEMATIMXoR2TU4ClohSqTwOGZZNhqVZVl67/u+875PKWW5LcpMRBKH1WpljHEuOzk5+fJ3v5mORwbw/8fafwVJlqXpgdj/n3OudC1D60gtK0tXV1frnumeQcNmsRhgbWcNtnhZmHEfCBiMLyQf8cAXgHzh8oGEGWwAcgcYLGZmZ3pqplpWl+ysqqzUkZkRGTrCPVyrK4/gw3H39IysbgIkr4WFRXi4X79+4/znV9//fdlsttNuUoaUEkbRQMOgikexEIIRIuKYKHBc5/Kli9evXgs8b2Njw7SYTzGXy8zOz/mDfhBFhGp2Quz3+z1vUCyVSqWSm0path1F/JVXXjk4ONBCHf1+H6TKZDLT09P7QSxjKQiCEqCIrosiEk10QBnCkF5IMNPUn/2URelHdG9wPGI6dk1jB3jKPeoawfhP43QGxvPTbCipHUdxFIcWRQpKIJEg6XClMYNQQlAIDSuVGjZGlMZmiDF2kqKuFaGUOIhjwzC0eKoc4UuHmwVwlDqJAqWEjkhNk4Wh7zhWGPn5fDaIA8KJ7bD+gOJIt4TFeqoa1Gg2EkEpVGowGICUBhDgAlAZenhSKAPBZtS0TA0NUUJalDJGQiERpEHRchKu63pB2B/4YRgOSVieDSKOlCWfL9GMjRBAaLAoGKBvgWmarmtreigdVziO4ziOisaOdDj8MYw+pFBKuk4iDEPPG/iep4UQe72O53nJjDs9PR0EQafdTCQSDEmtVqOm5Vq2BARC3EQKjTgK4yiKJKBpms1OO9k4KU+VX3nlxubWdrvdpmyYfhBA23IdJ+j1+iCezTExypg5FAzR4OVxkyOOIsqYZVkaDG0aVEqpJDcMw3Vd17W1k4mjEJRSkmuGIgIolQApGDN5GNkZBxEtyxKSI6pBvzc7Xfb6/TDwJBLbMU2k/qDf7XZNlh0MBulkanZ2dmVlBRGfPNkElIVC7uSk0u1iOpPqewMp+dTU1NT09KDBAGm91Ww0Wplc2jRNz/Pu3btXq9UMgxYKhdLUVC6TYYQGQbC7t82kCUBAciSoFAiuhBBAqQClhhAKLoSyLIuYpucHDJ+TKz0VWI7tc2xg/Hlpt0lXeeoMurbnOE7E9Qy6bqRRSankE20MkCglGXfnCSGj9s/IrSnQ/Ktk+MNkVzOOY0YoMJCjUWOdBTJCNY3NMCIdKdVoUuDpuVkh4nQmeXyilIiTmbTWltRnYJrbYIxYRaQAAhH8/sBilKJSsRAomWEwJFJKXf13HcsLIi+UQnBC0KBmzCPFBUhlMiNp26Yd69hAied2tbF2I6gRqQE8SxoJIYJHQojx1BchRGO+dTNNt1y0pmwkY333uRBSoi70AyiiZUlTjPNI4zZt29Tk0I7jSGY2Go3I98LAAyXmpmcymUwQBG4q2W51+p6PQCMhB56vgCSSSaH6cRydnJwUisULFy54fvjw4UMBKITo9XoAhJkGj2JCiG3bjuOEg6Gco2EYSMmIaIREUURHlESUUk3vGYYhIZpBTHNGWslkEhG5iA3DkJJzbgkhGCOUIqWGlDKVyB3t7wnhDvrdbDZLKY2i8ODgYKpUSCQSBiOB12/WG74/EFGsQColAs8vFmZLxSIhZHt7+8mTJ+tnVjX4wTCZrtZyKSIeD3zPdacvXbrU972Do8OB7znJRKFYFKCYaSSTrms7mog9k0pblqH3PsGVlEAVSCkF53Eck2cUjMM6p+VYwMxOt0ufzUM/d5ARgh8ngLIAwONo8mmT/nDCbGDcZ04kEuB7Y4C7/l+A5AaB4TYohBACheCgkCiDWc9dhgLdzbdMc7g+1dDxDucNxHPjhUSBntMnhIAiUmoU57MrJCA5jwqFnN40OedRFDI2HNceRkNggGEYCFKKKOQxRbANajBK0ANEStBMAiqJyhM8lko6juPYaFlGECsujTBmiC5jqUHkZdL5dD4nQAipyqVSKpkJw0fFLGk0Wopzkxp6rsIwzCgIkSAIqUBSRgjKOI6E4AAykozZyVhBJJAL5ftxwo8JtSSSMBIGEgIUJPJQEMR0OjNoj/lFqByOnQAimlai2eq46aykuL3ztBjmC6X8Xu0AY5pIJLKZDGazeuqq1R0MwvCzn/40m0rNzc0pEMsL0wcHB77v9Zq1lDmdYSXRj092TtbWV65fOCfDwaNHj1yWQNELej6CEQlJubARFChuYq1WLRaLCpVBTcMy6s2Gm0qGPK4326Wp8szslO+HQRQRYgAxJaURjyWQGNVhrXZ1ZqbTagSeBwAEoVTInZycpJNJilAsFsMw7HfYfHEBQA5aPQDZjTxC5OrakuWaALLrd4hrd1oNK5Hsx23GLOTe8tqcZVnpbOLoYD+KIsdyMMbNp09npqeZYvWDtm2n1ubOhmG493DPnBLJKKmUAgsybrZcLudyOYL4k/d+lnCcbDY7Pz/v5u36cdM0zbm5OcP0wshLJ5M7OzuO5QouAahrpsNwAMLkgW8Y1KZG6EUCedKyMAp0vYoorXqiuJJKSlBKU8XHnKNWzlMyiiKLTjAbAZCJFjyA5GJoopQhZQwAvD4KaTJCJUoAJRQoUMSgIoqEjONIMURLMgMojSRR0BFd0zQN05RSxoorVJGIwjBMpVL6TRGRoF50SlJUxI4NhpRQpJKgjoYIxZ7gqAAIEUopgaAkBSSIQRiFlAnLvPd0q+N1uypyStmuCjiNJEoBsULBlAAkChUSZIxRg5GEZTkGIVIYCqgWu5QghUCJKDGKRSwk4TLkIop4LMEmjBpGrpB3k0nbtoGCk3AWFuYME4ul/Kdbt30/9DxPSR0wEFTPxqhhJF2hYRYAkiqqw1ZtS2QELxzVdSiORxPRBUrEcFRcMcYIVTxWSilK6WAwqFardsKemi5Kudjvd9utlmO76VwmCILBYKCU0nt5s944qVWuX7+eTiRs2z46Omq1Wp7nMcay2WzUE4VCYTDo1ev1bC49uzA9MzOzvb2tlDIYSyRTBnMGQTiAgFIjkUxS0xiqiIVcckG0MoHjMsa8QaCENE0zCKJBr0cpNSjV8xPT09PddkepeH9nlzIUMUeEhGsrpISQbrebSaU1JYxUaNu2aTJAPhj0giCwLEop7XQ6ALLf71NK41gQEmt/4vf9qamp9fX1brdrMqNWq6XTaSnlwsKCVo9Lp9PNZvOjjz4ql8uXL1++tbMlpdQBfxRFR0dHnXY7mUx+7e03DWLoRlEcx5zHpmkAqFQq5TiONwhs2263O4yxQbOF1NI54RALBUIIEKiEksZwxEfXu4c4ax0L6aoaGfYIhkUsJb8iIXyusPf8EfNQSikUBwqIlBCChIGCKI4kgAQUSsZKEY20BkUNkxomZYyMY11CFRI55NNHDbIBAAlKapQSoNDsUWQ444IgRcyJFjgBlFr1VaFSGIahYVuI9Lhy1Op2qEGYZXQa3Vw+g6h0h44RhShAZx2IaAAzgBpg2BZBwYFTGYeCc8WFkkoJqVwWxahQBCEPhQA0kVFmmsx2mGmYtl2aKi6vLK2tLXERJRLORn2n3/dEFEeR5rEbMl8ITRen2zI4zq2FpsyTSkqgSIY8RRoIrltq2okDAGPMMRyZJDGP4jhGNBCJUpFSSCnd3NxcWFhYXl7sDrrHx8dRxA3DSDjJhw8fzszMzExPR1G0u7t9cnKScNy1tbV6vR75PiGk0WiUivl0Og0AWmAUUWmS/Hq9nswk0+nMxYsXP//yFqEskfJcJ+lHca/bt22XGYZFiRJy4PmaYdEwTQLACHVMK+m4ANButnq9HiqwDDPiscGsOI6z2Wy33bFtt9frJZPJOIp0wc515fTUbKfTieP45KSulEo4icFg0G4HhomWZSSTySjyarVaOp3EkTSdaWr4LjJmFtKF8+fPr62tPXnyRA9/6c5hrVZLJBJWKkUpLZfLmUym22p/+OGH2ZXFRCJhGUYYhqZpJnNpg7Eoio6Pj9OJpGVZmVS2XC5rNcwgCAKLaeYLABgMBvl8gdbbBGkQRVIpRKqGfmz4X4bni5y6SgkAZGJGaSxnT5GMZzImLfC3GKFSQik54vBHpYYrmzJGQKEY9hC4BKQKFYQ8VERx9SwnFIoIoCIetQFHLBhcgBAqVoRzGcnYZGpYVAPJJUpigC7voAQKAFSTOiqFUgIgNQyLIOMxD4Mojnm301cwZGpkSdNWSikhCSiGYFFqokEVsZmlFAqpdzBUiqBCQhiyRCRp4Md+LKnhWGbCTqUN2wJGy9PTFy5dnJ2dNh2mEAaeF/GYkSFoeHjrhwPBkiGJR4mxbuzAqMxFCEopJShKh0odQ5E2BZLKOI6jMGZRxKiBjCYd2+uRiIBuJCJIQMIYOXdmPZvPBUHQajTS6XS5XGy320+fbv7gBz84OTlpt1pKqWKx6DhO4Pn9fp8QMjc3p2El3W5bCKEzTw/CZrOplHBd1/f9jY2NQin/+uuvH59Um612v98HRYhhas1TAGBIMslUwIyE7QAQXS2zTavT6+ZzGaWwWasLIc6cOZNMph8/fpzO57qdTuhHOo20UmkAKTifmZlq1Oq+H87PzLdanV7fC8MwnUoN5MDz+mHom5wZRtJ2TCFCz4+DIEA9jKbQdV3TNB0nQSlN2YlCoVCv103TrFaruVyu2WzqLcx13W6v19rdXVlZef3116MounXrllMobGxsDAaD5eXlYrHY67ZN05yamrp47vxgMDg+Pq7VqtlsemqqpP8v3W5Nc4L1BgNEFAqR0SAKIy4kEsKQUoODIkgQqe6gDf/XI0scVwSelekmquV6avZUGea3HIxIQRQqlKik5FIolIhKSlBEylAoTRLMpGQKKWLPC41YjqeohsWz+Jm6DpWoWfc5V5xLrlApCSAYU6aJBtVeIR7SAYNSQkmJAEAZUARBoNfqxDGfm11wEqmtvad+GCWT6YHf1+UkUIo51FBKEQYGoZbJEqbpGMzSwH6FwgKBRHGToDIoEkBFjTCGMOaxok4ykcwUrERaUQMYXVhaPHv+HGO48ejB4eFuFAdRFHmep5mFUI8x6lm4KNbJLoJUBJXmvudCFxKUrtngEFWolAqCQHAFQ2VEzfYtFCMI0mJoWsyKKUUQSr8LJYTMzs42Ws1er0eQ2YbV7w84l/Pzi/V6vdvt6mE/3x9wzpNuolgsZjIZxzQPDw+bzSajWCqV9DRWKpXs9/tRFCTTSQDodDq2a9m2c+XK1Y1Hj/YPj+I4zqTSqWRaSpCKM8B8OhMlEoSQKIq6nb4fBq1+3TQtygwASDquk3BXV1Yopft7O8lEQgoRhmEqlRl0e+l8odfrJRKJ2dl5iqzbbXf7g06nWyoVCwVLSlk9qudyuUIhF8WB5/XDyLdtpgNmXR7QwNQo4oaBSqluv1c5qQaeH0VRo9HQaFLXdeeXFuMgjKIomUrpcDSfz8/OztYD7/z58/1+v9vt9ru95eVlx3H6ne7BwV46nZ6bnXYcZ25+xrKN6nHl5ORkupQMw9CyjV5vwEyzXq+3O71u3yeGLaRCyhRBJbQPHJU0dLCmRwdx7P1QKQVCAR3NhIIidEhCObZA9Tyn44uHVLEeNwcEBVKzAgLBOJKcc+CCcE6FYqCYBnYKZSiFOCY71/YGenwPh6OquqKmhADK2BBfDcCJpIJIqYcohhAcJaRUHACYZJSiohDGXCpkpmOYvpIoOCogrpMcMosrxUTgEUIMZjgGS9pmyrFtw2REqShCYiqCymAUkFE0mUEpPWkNhJIcKDJqOik7mWG2LQGtRPK4Wvnww1/Fkh8e7XZbTSHjMAy6YXc42zacJgHFh3PlUghERQjgsN4rpRIMNWuY0HUmHbjqMyhiUkoZM7UlA0cEajCwLENKHkcKhKQUGaOUkP39/WQyOT8/32zWd3arOu9ijP3VX/w555xRmkqlUqkUIoqY1+t1IUQcBJq0f252emZmplqt9vv9lGsDyFYrHAwG6Uwym80KoR4/3rQtN5XKWFYjjngQBGiTOI6DKFRh7Lou59yyXRAyDn0Z8X63Vy47rU7HsqxyMZ/JFWzTEkKkkynGWNJxlVKmabYbTSEUj+JEIvHk8VYi6czMzPm+n87mL1+9ls/nNx486LXDZNK1bIP3AkTUPX3PCzXjE2PM933DsMIwAIVCiFKhYBhGebl89+7dVCrVaDSKxaJtWu12GwByudzy8nKn09nf26tWq8VicYDq6tWr8/Pzjzce6QAkDv1K9SiXy6VTCUTa63X2duJkMplMJq9cuVSr7GoYXRCFScPs9gfdwaDVG1gOBBEnBuFCxlxIQNDyZlrFTvP8g8JRf5yMBiMoosRhFYA9rzM1jpV+i1eMooAQApSgREUVIZQxhoxKKSSlCrkEImE4fScJAQJSgyU0WQ4xAEAB0+y4AECkolJbPpWAgpgaECIQASjXieuQj0+3yJRUDBG1oAKAzKRzyWQ6CKKe5xumQw0SRLqDioiKIjCXAaKymHIYWKhQxEJEQgkDEQQHJQxUjFDLoJbBGGPESlBEpjBWwIH6YUgUcFCDOKw+qnqhZ1qUMWIZKATvdDoh5XEYCc6RGdrudRnGGOm/0RGOnlKKSmMH9C0XiAgoBedaxckGUK6rgy4hpIwFKkkFt0ytpTEQIjaZyUyGqNKZrFJKUzxlMpmlpaUg8H/yk590Op2LFy+ur615nre19aTZbKYSyWIp7/v+/MxMLpcDgNpJ5eDgoN/v53K5fqNvmMx2LM/3hRDUMAI/Ojw8nJqZKZenTctpNpteGAEq0zIIowyoyYw4jFAKg7JUIpkoJ3K5XBiGWgZIciF5xJCk08nFuflBLGr9gWb+z+VyjFDTNMMwbLfbQmQXF5Yty6OUJhKpmZm5Wq0x6Aau63IRSSkt23AcKwz9TqeTz2flkFQfDcMIg4gxphSm0+np6elkMqkZbNPptGVZvX5f92l1hJJIJM6dP4+IjLHPH9z/8V/+ValUunjxvBDi1uc3oyhaXFy0TSubzZRKJUqI7td3um09AMl5NPA9wzD8KFaUeWFEmOnHsQREMuQyH/sxAc/q9/C8Hqic1KUR+tUAEx35ye9kQpJl8lAyVshGAS4qpSQCQ2SWJWPOla6pSKEAKdUCMYpSqRTX0qiEDQ3SpHLEXCiR0hE5Oh+GyQYiSpAEkDKDUiJijogU1RDshgoJRUoJCKRATcsGyxr0FRKuAAAHXoCIlAAhwKaKGaUERa3MLhQP/SiWUegYDJSkSkokwAgFk2CMwAwnb5qmAtIPwiCKgl4fiKcQeoO+H/mOYxNC+v1uX3LbthzH6QUdOTpASAUElAKldEeIETQMhqhAClRKCFRiSESiS9aIKJWK43gwGCSobqkpIYTkClCCJIxxy7IAjL6SGkVhECoActn0/uGBZeeXFhcPDvaOj4/q9fr+/v7Zs2fjOL5582a326UU8/l8KpHknF+5cqXbaj19+tR1XdsydD+g2+026r1sNmuaJhIihGg2mwohnU43m+2pmelsLmea5u7BYRRFlmU5ju0q0zGtdrs9XC5SJpNJwzCazXa5WBRCdFttKeW1K1eXlpYYxf1K7WBvl4AKgmBxcREAENXR0dHly5drtZrOmqNYPHq8mcsXi6Wpva3ddCZpmowQ1em2dIMxl8sNBv04jhkz4zjW+RZjplJxrdFYjqKjzc3BYNBoNC5evFirnuzv7y8uLvba3TAMbdsOw7BWq8VxnE6nOedLSwvFYpFSOj87e+XSpXa7tb+7l0i4fjA4Pgp1n1aJGKRAZboJJxa8XTmxE26t3kJCewPfspNSoSTUZAYhDKlEBIKUiOcsatKhqdEBz0oDZPI5OGJAkxOq0i8ejDHCKBAiQQkllFCKDxuJMZcR19xiQBANpQhRAFQoopSKJQKAQRARhURm2Aq51AyLGotECAB4XmAySoEioFJAkCAalNHQj4aDxEM/D5QqCsoistvs9gf+3OJCpMTG5uMwjrP5DGMGDikMgQne15+RAJGSICgETgj3A89mVF+mUkwqFfpBL4p61E5nbSDoBYEXBMyxlCADvz8IfEQQMmIeEAIGIyMAK2o1L4bEdMwo4iKWOpWnSAijACCiOAg0wZH+B3DGGIDyfZ8ApQpCHu7v71vz88V0OooiwtA2HUQS8ZiYpNvtSylt2zUNNxZSCMGY2W6352fnfH9wfHxICPZ7nWajdunieTeRiOM44bq+77uuvba21m62tp4+yWaztmHMzMxowLuu3ff7/cXFeanH5qWKAiElSFDtdncQ+IZpnz1/LpFI9bzBo0ePUqnU+sKZ6tbhYNBjmgQIFIA6PNiTCqempnZ399fPnjk+Pl5bXX/48OHCwoKUUshYKh5GPmPG/v5+JpPRsOzj42POuU7VBoOB53m9Xu+f//N//sufvLdkLezubvf7Pce1bJvZjqVAmibzPE9Hhr1ejxDGOU+lMlJGWkjQtO2FhYUoiphpaEbmmbnZdDJ1dHTU6/XKhWKhULBte+n8mVKpdHBwEHjezs5Ov9+bmZpOpRPJlJvPZpRStm2LOAy5KBbzhJBBv6fp83jX84OIS4yFpABomI5BFRkKJAohIhEJKU3TmbQ3HOG2fc8zDMO0rCHIUbudOB4r4E6+amyKMFHXGXoqGUMUSyBAEDSXklScc6mQS8GlUkjBAIJMAAgA27SH56cAABwIJZQyM+ZcEcaFxqYRykwuZRiGhDFgTGgpQUREGkkVDnykLIpjgso0TQTgYaBAUmYEQYiUmo49NT0tQCTTWRfEIBjosTVKiUGRKRlLKWPJoxBMxgxKTEKpRTlSQslI4UVKyaWUkYgCwc0oBIJ+FPhhQCSXSvW9PuccGBhEa8eAjFUUAgXkJJKcy2FSCGPAge7SCCEoDrkJdBkbUQ/djxJxEEoRKdWYN0mNaD8oEEpZMPCEEEoCUN2QUswwGSPBoI+oDMNIJtyB35eSMwaIYBjG5ubm3OzsjRs3Wq1Gr9dDxGKx2Gw2s6lUGIbFYpFRTCQSnU4HEU2Dca61uJg0QICSgvNY5LL5/f39ge+trKwsLi56ntdsNg8P9w0cj9gIBQpRK5+SSqVy5sza1tOnP/rRjx4/fpxMJra2tmq1mmUaBiWuYzuO0+v1gtAjhOQL2UqlQik1LdbpdXd3d3//93//5s2bf/Pe37766suEEMsyFxbPISohYs8f6MjTshylAh6LKI4BYkoNznkUBc1m0/f9ZrNpmmbSdZVSGuVTKBSWl5fn5ub0Teh2u41WKyYKpMym05zzRMKdLpdPahXP86LAV2I+m0unk66UVq1Wq9dr3W53ZnoaEahhIKGU0kgoRRk1TDQsociQmkGjkyklE9DQsQPUKZ+eKZ9M/+AU0P/5Y9IIJz0qIQxGAEl4xmzKQElESoimUBzOQehdQPvlIfGPlFwIHZnrkwz/NPLdzBjOfEqpWw+EAlFIheJKKc15QBCRGQpAIjFMI/a5pro0HduwzFjGFtoAGu0qpFRsjKlVUnHOlQCgjFGUSlGtdqPZhygBBQoJECVBaAAgoNDEmjEPCSEU0WQGMwhFApKLmMeCBxgM+7aoGIHhdMioUUFAciAaDQRSaSIPADKsjqHSA1YAONItkXHEKWNxLMCgiiACoYQCAaUw4jHnQg+CZjKZMAqoYyRsJ4r7yZSTzizZtj0YiK2trSgMv/71rxsG9X3f6w8454Zh9Pt9RIzjOOGm6vV6u90Ow9DKOUJwIUbcwcqIueCc7+zszC3Mu26iVqt5oee6NkC27w1EGFFKkTIFoKRSiiM1CWIQBPVmo1jMNxoNpPTe7dvdbjeZTF44t/Z0+8lg0A9Cv9frcc5jLmzbbrfbqVSKC5VMJl3XtizDde1bd27/j//4v//www8qlcrA6/X7XUIgl88iomHYmsswDKIgCKQEBIMSw06wMI78cCjNGUQRATBNs9VqPXr0qFarpVIpRiilNJ1OT01NPdne0swgX355Swl5/aWr8/PzP/zd3zmpVWzbDj2v3WkIIXgcJpOuaWQ8r0+ZCbpkLZQXRlKBQMIIlRI5KC64lFKNJC75hLo1Uc/MTJNZDJe6HOq9kd9skzqbnTS/oVFRJqXU8w0AgIoQoIQQPxa6+KdGMaN+iRiRf8oJqpvJY7zp40g3e/wqgKHyCKAUXOgWHIwmpzTdkUTlR0G31wvjiDEGFPTcj+bOAK6k5CyMhGEYJjN0uVLDMogCRjAmhChAkCbVVV/KlZIghIwVokIJFAgqQE5QUAKMMtOgtmEySkUUBrGQQgY8EEIqISUCl8NcdNiNFVLvHkqOlcqVkEIqLpDoyS39yQmgazsEWRRFvu8jReYYRCghOAMNmDaVUiYFzbloW9bZc2d2d7f10HQYeqDidCrtum42m3rnnXdMw6hUKo8fbyilVpdXXnvttc3NzXajkUgkKpVKMuHs7e3phTXwepxzHksglKAWBkEhVD5fVAqPj4/jOEplU/l8ljJstOoYEwMMgyAo1Kp0RApCjYWl+cPD4z/8b/7h559/ns8VT05OYsG//vWvnz23fuvLz5v1EyFEIpUqlRYHg0Gz1SEEgygYDLz1M6ulmdLP3//l9PR0q9X4kz/5nyuVyrnzZ1KphO8Pjo6OXNd98uTJ6uoqY4wSRqnmQFSIGEUxs4aex0m4tmkFQeAFgSPEK6+8cnh4WKvVTNNUplk5qZaLpXQ6ffnyZc/zlJCvvfJqIpEAlLdvffHZzY+vXbtWzOUMk+bzWQLo+d1Wu1av1wv5KcO0FbJuv+eHQa8fhLEwhRJccAlCgQaO6dE+JEoOSXcBETWSWFsL0xetFIjhikdEVCAmFDjGdqi+ijNbP6LtV3N3a4dJlAKBMuaSc8G5EAIpIYoQZADDnVI+ryc3aYSjuGZonFJwRERQhCAAUgJIlJKAcsjPj6BluVApikoSShRAEPmRiAyLUUo5j2CksSRkLLlgUUyRGIyaoBSPleCUAiOEcsGlQJAKJMRUCV2lFdgXXc4jRVQQhZxzhcilUErEMQcpBEFJUCmQXIlQhH4kkOs6gZQSUBqUoa53CSm0rLEiI1C3ktobAwhUcjh7hwSBAEkmk4QQ3/cpAjNZcliwIoNWn1JqWJJSQylkyBg1GWMbDx7uH+5ZNi2WcgpixkABD8Lep59+kU6nGaXb29vJZDKbzSql2u22lJIx1u/3bdv2PE8Py+ZyubAfyBEp+LCfKaWUkEtnKyfHCuXMzLRS6uHDh4ByemaqXekhIh/q/gmlRzYJC+KIWWx/fz+ZSu0d7Nqu9fTp05WVlf297W63FQQDzsXUVOnCxTOBH20+3TJMWq2dKCUT6SSltNtruwkbKXEcZ3p6+vr16z/96XutVqPX73734nd93x8MBjpYIIS4rkuJQSkDgEajoSsKtm2PQcOxEIyxqampZDK5srKiyVpiwR9vPtHBOedxPp8HlNXjyrlz54qlfDAYKBCDgWdQYlrMMIxk0nUc6/CgZpi25ST7/X7EhR8GElAoFccCCAUAwhgq1E1UNam1OKxfIpkcMR1GeaAFTwGfCzsnQ9Nx2eaUwWgNU83YggIkAckVEI4KUA19rJIKQCmqzQYB9GzueIBDW5kkCJSgQD1NKEEBKBkGoZZIYhQJISbVjG0xGYrpAUihkV5aaMO0DTdhIyLnUcJJGhaTIAgQAVKBkCiBKMYFgwjjiHPORRQzStLJpJNIRH6AACKKYxEQgVIRooBzCNGTKlaIseAAEhklqAyGQkgCSnIuCRcERcxFHIuQc8Y1q6KGnSmiEIGo4UATQQV0dCul0qTjCoiiw5heKaEm/kmc8yjiccx1OZ4xSixHSskjFcogiiKpeBzHUvHDw4MgHhQKWSlTrmvH3Ds5OfKDQavVtm17EEWI+PLLLxcKhY8//OiTTz5ZXV0tFAp7e3tra2sn1eO5uTltpchBCCm4EgoEf5bPHBwc2K6VL+YYY4eVw4OD/WTKzeQy6XQ6DMPBYBBEoZSSMUNTJtdqNcMy//LH/+v5cxcrtZNOv7d6ZrXZae7t7SDIYiHXbLfa7ValchzHvF4/iaK41+tkMrlOpwVAvv+7379///65c+dq+5V0JqnBPQDged7+/v6DB/ey2TwhRArNo8mIOUyNQh4TLReNmEgk3GTCcRzXdZ/u7MzNzCilarWabdvpdLpUKmlITRiGT548VkrNzk2bJut0W1tPHuXzuUIx51imSgshVLfbFnHUaDQALSGBWk4QRZRSjZhRoAMZQgihBJFQEEQqopTy4yHdPZKRsqAa5nhCKzMoRYeGqTEbz6FGxynlqdrp2FtybYFAQCuHSVRcKSJMw0A9ykS4ZgDSuFXDGColD7dZeKYGqcc8KdUAHjFCTMYGQYKMIBAikRCQCpUyhkBzIblUCITBqOihCEMv9Kq1k6yMhgxGFA2DAiqiCEFgng+MAVEqigSPhcXANNF2DMMyQUglIn2XlGICQCEjZCAlF6CU5Eh1JkgoNVCaSkoKKIUQAiXnPBIy5gKHeFGpZwjp0OUN4+whTzdSQKGUlEPeSDWBYhNKcAljympEDMPQ9307mbBMo5CfGgwG/X4/9KMwDgDAMDjnfGFhznaY6TDLogp4t6d6/U4YBr/zO7/TaDSajcZgMNjY2BBCDHr9tbW1MAz1jFmz2RRC+L6vUeaMMUqVoDLmIAVXEglhpgnEYKZltZqd/qBjOua582fa7davf/3rly69HMVcIWgaC4mgZckyubxpmgpxEPjpdJoQ8s43v9FsNs+eXUdU/X5/Z2+3Vmvs7++GYdxsNcIgSqUSs7PTlUrFNM2FpXnDMFKpVI3v1uvBxsYD0zSz2Wwi6Uop8/m8lGBZFgLxvCAIAh5L23YYG85Yj1vPhmEAVQCQTqeTyWSn0xn4XqfTUUotLi5qnHc2mzVNo9vtHh8eSSmYwRYXF5aWFwiBZqNWq9U0L2CpVMrn81tPD3SoJoSghgkAgJQrSRGlUhIUKgSlKCGUGADgcT6emyNIYFSCGyL4R434ob1JOZJrPd2dHz+iRlNy+lepNO0ZQUQKqAVbpATLoEAkJwACpVIwhqQqAUozbymAZwYvhSDaw1DNNKN5rrlBkVAALWUmhh6ZgKSMooKYx0IKJpGAoUCikn4YCSE63dbu7vYgKHe73TAMqElNO4F6mgEo8wahbaNhGACUi1gKafQjAoNcJgtaf4LaRAEzTAIoiFC8K+UQ8IogKEWlh8gZihilkLGIpRRxGMVBGIahZHJsTqg0EkKpZ6ovEwEAgFKKMqoQpB7i0gLVCnSlWIs6EUDO+WAwcFJJalFmuQAwisSY41j5fDaTz1SrR6m0I6UIoyibTS2vLM7OTaXTyY8+3P6Lv/iLOIoWFxcRlZ4toJQ2Gg2/308mk59//vn1a1f29/dnZ2dBC+5M7AgAQ8lyg2DMuVLKsiypZLvd9n1fQ1IIIaZpJxIJYrAgCJq9VqvTzuTypm1/7fr1drt7/+GDVCbdbrcJIQuLc8eVQ8/v61Q+8LyIc8YY2LC2fiaTL3zyyScJN/Xnf/7njuM8fMi+8/bbX3755e3bt/VgRzqTOjo6Yoxls/lUKgUKa7VGFDU5F9rkZCTHA+ae52ksnoi5ZVm9TieKoitXrvgDb2Njo16vr6+vb25uJhKJdDpVq9WymdTa2srR8QEPw9u3b+cyaSQqk0kxZsRh0Ov1NDxFjlwZ0qGn4rGEEdGRQKREKYrmaDoWxuNIBHXYqUbsvSAlRZ16IygllRqDuV+0Q3iBZkYppWmvh05M5zmgtf+U0hSGUkoQIIfg1UhGL4a1iKgZ93BEezN+L4sZlFClJBccERkhehyeaVuNQQkuKdHESFobiRCIoqjVahmOEfFYaRJ8AogUNT1Tww0MEluGySwaM8GDMFJCgFIxWkgJVWBIVKAgirmI49hESwhBpFQEqaJEUJSoADSZhSIgpezLIJRhSENuSzUs2VAlUAghFScAkkhiAiqCiDFILlAxAwAVZ0P2MaUMBQwJGfI3QsxDI2lxk2ZS6ZNKZffexqtOaqY4MwgqMfcUDEwmHCeRdC2I/Ppep1mrYugDU0HkH1LpR8FJvdoddNdnX3rj6kthGFqWEYahi4QQIv1gZXbG9/1z587UasuWbTgW7XRajDEnXdIZVxAEIY8VoOmYhmlGUdw8qYdBnMlkTMceDAbKdNNJM4o7UspYhhgrEUnfD6mJ03NTnX57Zn768y9vrq6snT27qlR878Htc+fO3bn/pDi1sLVzlEjl+GGVGvbmxj3LMouFstf1ZCSDjmcqtjq7kMlktra29maevvXWa/v7+7nc2ffff991Enu7+7lcDoEc7B9ms1nP6wPw2blpKaVtk8qBX3ByKJWbzkZR1Ot2FhYWhBB60LnRaDze2Dg4OFhcXDxzZv3BgwdvvHI5DEPGWDHr7O3tdVv1TqMxMzNTyOdN09zf3+9141TKHnjQ6/cBIGW7hmmFvkg66d1KM5LScGxghkA1gnAKANDVQAWYNp1hECT0fAGICYV6ZFQhxiBRKUBAg47y8FEtlKBCIqUMeDzkaEXUJU/tXB0eAAAqBDFkk0dESUFJ1N4PJRJFgRJGGaWUD6ujihCkhOKQz1tSZgqphgzoQJQCRGKYTMSEK0REYAoQQ83ARYArSUChRSg1CUWpqzLIlOkKEfcDLE5lGLUdaszM5I6PD00CBBVIJSVnRI3GiHAYZGvahZCGQBlVILlgCoExRACCfFgAVqjhsXq70o4ClJIqHivLgtIDY6fKWZPpNbwAkNdFAiklyOHQPQISRNu2O50ORTI/O2cyFgXhYDDodDqFrG0yI47j6lG11Wim01mTMs/zas2aAhGLuD1ouUnHck0pIQ7iVqsVRZFpMsZYtVq1LGN9fX3g9S3L0FQOepoJAHTXvuedSClBEcMwtHQHYyYzjMHAY4xFJBoMBoPA17z3ngdCeolEwrZtpVQYxFLKRCKRzuYfP358clJ7urU9VZ4ulUr5fPGoUm23u4jqz//8z4Mg+M53vhOG4f7+fiaTTiaTURjt7Ow4jgMAmUxGTw8JIZ4+fbqwsJDP5/WU4BdffJHL5TRoZqzFa5qmZkPsdDr5fF6HCdq0MpmM5tGYn5/d2trKZNJhGL7zzju6ZmOYtNvt6t5MPp+/ePGi4ziJRKLVaiWTyX6/L4QwDOPp06d62LLf79vUVEBj+az3oBSAUoKPWmugBAqFIBEIKGoZ45EaHA3R6wx/vDYmazDP6WHqFHJysP2Zgq86ta5e9Jn6h1Nn+MpXTSafX+V8n6vHjuJ8hkoqMYyNpRAoRRzHxDEJMaTig8EgnXE5571BP45DiRaj1LQppa52/MMLopQCpVIpL/ANQgVlRAEKSQAtUAyJRD3aqIbOXLfxcWiBQiohRDysC3KlFBAE8WwsGl+oL0/eDngB/SClxFH2rpeRrq0VCgXJhWEYjUbDpMyyLIoQR4E3CEzGhGEMvL5jWVEURTwSsTw+rFRODhvtFjNZzQ2+9rU3TdPM5/Nf+9rXKpWjo6Ojp0+fLi7OE0Lq9bplWb3uIJNN6aKo53mGYTCGoBSPoijiQnSFkgSZ49iI2Ov1okDLmqsgiAgF3w/jWDDGTNtKJtJKqVarxZhx69YtZljZbP7mzc/jOHZdd2tru1Y5OHfu3Jkz5/b29m7duqWLlvV6PZvNtnZblNJCIaflPnU7u91ub25uuq57/vz5hYWF//gf/3J1dS6RSEgpXdcFgH6/r0ag0CiKHMNtt9uGYQCA41oas55IOHEcnz9/fmFhXncLhRD9QbdYLEZhjEAs087nCvNzC1EUxRH3Bv7hwREhJJvJFYvFzSdbiUQik85aps3DiAv0hYpCTVmNKJXUdJ2jf7dUoMFDkgCdaK/jaEJicq2f+lnh6U18WMWbYIWZNIZnrx1jw0eHHKNtEEZzR89Fs6fW3lcaIQ6fpvSQudJRMwiltMqT7n4rRYYam0HgOa4lJQz8vmnOJNJJLoJkJikUF0hjUEJJxiiFMRUfpcQweBQHUWQZkdQkcFygAg7KoFQpJRVyQD1nBEgkICjgursghRAi5oJzTZyjG7Ry8iZqKxrDYU8ZIQDoWFxKqYQkCpDptUd7vd5UseS6brfbtQxjdXVVKVWr1aKgL3kgeZxKJW3DNCiTXKCCfC4fC14qlWbmZir16vsf/NKwnRs3blT2W81m03XdXq/3H//jf7As67vf+07MI9/3DYNKCXEcm6aJQDOZDCKGnFuWOaRwRilEHARBGHPHcdxEghDieQMQyjCZkhCGoSYy1/zblBiIpN/vNdqtXLZAiTE1M21Zzu3bd13XLZSKqVTqpWsXFhcX6/X6zZufuq5brZ5wzpeXV3K5XBjE2Ww2nU53u/3BYJDNZovFot9ra97RTqdzfHy8uDilDe/o6Gh6erpQKBiGsbOzU6lUtHShpnzWk5PJlJtMJrWHZ4xNTZU/+uijdrvd6bZ++MMf+r7vuu7du3dc152bmwOA3d1d27ZbrdbMzIxhGLlcjjFWKpX29vbCMKzX6/V6fX5mMYy4F8ZBEPKR96OAxDSlnqlVAqQkIBEVUaMOxATv49gNvmiHv8UjfeWD4/KMmrA/OS60vhCCfeXJxyY9Xpww6T9RV1Oe1YqUUgpUHId6uAIkB0IZAjJGAPt8YLKEYVIE7rhmKu10Or5tm1JFiMAl5xFnupA8pMgkqBhDISTnoeBIiSb5UkLGsaKCAIBBUKtAoxadGkEKtPgv5zyWQg9c6Q80NjwYhR9f6QnH38VI5FEpBYCEENM0HdOKwlD/yfO8MJl0XVdzqDE47rTaMQ/TSVdYdrfTC/2IEmLbtvC9KOS9bn/Q8/xB0O13T45OEM1MJnNycuK69je+8Y16vb63t6er9gBSCNFs1h3H8X2/kC8FQcBVqOnPhDDthEtKRAgRhHG1WiUUDKCWZQgQqDV1UCmJmXQ2mUz6vl9vNDwvYIxZphOG8fe//7u9QX/76W4+V+ScV45P/u7f/bvZrCWEcF338uXLmUzmZz/7hZRydnbWNM1sLm0apmmaUvJEwikWi4h46+Yn2Ww2juNWq/XZZ5+9/vrrt2/fbjabcRzrMFVKWa1WdcGTc86U4bpuvpBljOmAQjN8P3782DDY/fv3F5fmV1dXLctqt9sbGxuri3NBECAlQRTWarWFhYWt7adra2u26+QK+W63G/H44uVL9Xr90aNHzXZrtrzo+0FvEAQR17V9RKTUoIRqIxwiREGhkkCGGrfjiFRXvMlIqfeUHSqlJt3Zi1v2i5HUUAVswgKHdDQTbnDMNqaeT4VOLcWvPLQPRNSjvajGqqAIBNRIcEwAGNqxJSyWSdjZXJrzMGkbQcIZdJQEnk6nhIijKJKSM30j2LCUq6cYKackFpwqxgiVBLiUsYypIkopm1Ht2obTgRp5M2QclAJ0p0oB6h3omd7VJFsMvBCaT+5Do/uCDIfcaq7tpFMpPWmacFxKqe/7QohyudxvVYLA8we9hJ1GRCUFYySdzj68/8B2HS1wmS/m3nrrbSEEEMwVih9//LFhGMlMcmtnmxk0mUzuHx0uLi4iEiF4z/MTSPq+V56ZBkqIZemxa87RMaxEIgEAnh9IyYWScSwy2RTzWRBEQsSJhGMwWwjVbnd93+92+57nWaaTzBiUklqt4QfB9vaOUkAIZcwIw+j48GRpaWV5cbHX6dTr9QvnzqwsrymlqtUTxUWjXY3DkBGyvLhYKBQoqjiO9/f3NfeEBqweHx+7rouIvu+3Wi3DMIrFolJKY9+ogY6bUEpFUSSECENfCNFoNFZWlqenp7/7vW/rucqbN2/Ozs5OT5cvXry4sbFxcnKiuQX0Z9/e3kbEVCplGEaz2eScN5tNx3GWlpY6/X670xv4MScUkVFKpUICugg3ZtYTQxEuAAXPEDB6sZGRAsxXrn5EeM7njEK+cX6oU0o1noGaOIEcmxZqYMyzQaoxKHRc/Jw0wvEqVS+EwRpPrWsiSikgCkEiISajqKQAKcWwrA9EKiESFiYoLk2VOQ/KuUw2YaUdFovY8/pcxpHBAkqY4oJQpusxSkiiaW0IiQVnUhBGBUUpUSklQCpQko/kYxUhisCowzMckFdSIShyOsqf9OnjD3bK4+vjGRGGJj0ekeFls9lapRoEgSYRlFI2m01E7DUroe8pJYWMURLDMKjFXNd97bU3ylNTfW9wcHyQyqSn56b9MKhWq9Vq9cyZMysrK5XqcbfX0fNNb7/9dqVSKRbzQohMJhOGISEsCCLfD/PFnBBCy8ozgDhmnMtur1ssFaonNc4jx7GQ0jBsGAZNpVJhAJqs2rbdZCJNiQapqHK5/OEHH6WzmXq93un0CCGFYvHosPIP/uvvPXr06Cc/+dtWq5NIJBhjCkSpNLW9vZ1IJNrtdrvdTCRSmkV7MBg4jqMT1zAM19bWdnd3c7mclFKPIwVBMDc3l8/nNRY0lUo1TxrJZDIIAsdxEglHr7mTk+r58+e+uPXZ7u6ulPLatWtB4BkGTSRy29u79Xqz1WpxLl966aW9vYPV1fWDg4N8Pm8Y1uzs7ObmZqVysrHxeHp6ut/3+u2o2x/ECu1ExrAMU0rOhZRcCiLgWb1EgKSAEsAwhkp4MAr8yGiQYtIRjfdoTa82GR/pHV9nuXKC83d4ktEz5cgf6tB0bIE6QRq/1+T7jteknNDoPuUeNP+FVEJKhagIaOJ3hQSI0MMHEpFQVBQpEGVEPvp+OZlQyio6iUQpv1DORlGwtb1JDMYYAaRMfzxUoIQcf1SdCxlSMgCFQyQaaDj5KO7XDNNqJIY+Cut1QqrUBK/2ZAj6lY7+xahgeH/lSOxOSkTUG3O/39cI42az+fjxY4c1TWradooAiQOp6Xx83//yyztLy8tA8LByHIvIuu/EIvJ9f2554f6Dewpkr9cbDAaFQmFra+uzzz6bm5srFEqUGrOz80+fPiWENJvNTqdTni5JKTnnYegLpQzDCIKg0+kkEgnPG/R6/UwmAyg1gXQqlapWDoVQmUwul8tFcSyEQsR8qVgslI6PKp1O5/iktjg3ryPGfD7/8ccfr62tcc41ZuXLL+/85V/+5Y0bNxBxaqocBEG73fa8frfbjmNRrVYdx5mfn7dtW88EahP6kz/5E7036fTPNM10Or2wsFAul282f51MJoPAy+ezU1NThJAwDI6PjzYePWi1Wrlcbm5u5uzZ9ZdffunRo0c7OzuXL166cu1as9m0LKs0NfWXP/7xH/zBH+wfHi4sLdWbTTeZ9MPQcpx8sTgzN/dkayvoq74XUMOw3LTFjEjEhEshuVIUFMhngSFq1D+ZoJ2ftMYXw8JxxDR2euMXjgOr8XnGT9NObXyucXlGDhEycGr9jU8++f0rF+TwDSgOC01KKAVIFAClSGTMFQwLGZSCQRmjlFFqBT0a+ElABYQFfirpJm3XR1goFFOpVL5YzBXyzGQGSBXL+NmUECihJGGUSxGLoS6v1MpVBMUQ1QcKlVBC89cZ1NAyQ2PwK4x2qTAMNMxlTETNRjP1pzY/bWymYegXMsYUF7pfIiw7CALGWBiGhVxeKVWpVHTfGVmQKKQSdiL0QwnCMAzBYTAYJJNJy7LmFubPXbzQ6bUr9Vq313YTiUIpf9G40Gw3opCvrq0RQoIwNE0zm8sxw8jmcpzzMIrCiB8dHSmltra2lpaWNDtg7fg4lUppcSjd5u50uoyx7qAvuGKMHR0d5XI5x3Fu3bqlgammab/88st7hwfz8/Obm5ux4EnH3Ts8KGRzuXy+2+1+42tvSCkf3n+Qy+Webm51262pUrl6XHnzza/dvHmz1+3EYTQ3N5fP5hBR8rjf7dRqtWQyWSwWC4WCdo+zs7OtVuvkpAYAtVrt8PDw8uXL+/v7zWZzZWUpnU5vb29r4M7GxkPHcV5//fVur23bdrvdHAwG77333vT0tG2b8/Ozu7u7hUJBA1Dv3Lmzurp6eHjIGPvlL39548aNzc1N7Yf1uKNpmiHjEhQjJJlMGo7VHQyiwEfDAqJ1FIfsLABACCWEcB6ON2K9u+kKzTieHKc2oxXyjKl6aAUjLnN9Er2c1KigYE5KHbKhsLbQmB4lY8GFbvNRggQVQBzHmtVBozJ0GVCfajJMG+8IABq7+Ez0WykhJDimRZXk2olqaCqAAignk8Vc0avWssX0oFrvVY8VUYRCp9v27Vb3pH7i6hbFRGMAEWEiVdP3YrwJAUAshh+eTmwPoAC0HAs+M0KtPCh4NDknppTiL8g+njrGD46dp5Sy3++blElmBEHQ7/cNOmwnmrYDQPwo7HZ6/b5HwKCESQmpdFohHFaOJYhUNrO4NH9cYQ8ebex+tGua5szMjOM4A68XRVEunzl/7uJ7772nlAqCwLKsfD4fxzwMo9nZ2e2dxzs7O77vSwnnzp3jnLtustVpM8a63V61WjUMQ8/Laf/T6wa+P7h27YplOSe1WiqVOjo62NvbY4x1Op3eoI+ocunM9PR0Kp3O5XJPnz7NZDKOa62sLh0eHk5NTW1vb6fT2R//+C8RaTabLZUs3ZozTTOO47Nnz56cnOhKxvz8PCHkV7/6lW3bP/zhD//Df/gPnufdvn377bffbjabFy5caDQat27d+r3f+735+fmDg/16vaYj3jDyCSFRFLRaLULI9HT50qULT548effdd7/zre/HsVCK/+3f/uT69euLi8uffvop5zyKol/96sNr1661Wh0p5RtvvPXBBx8YhpHJJk3LMUwrlU4wK8EVUMsTinb6fVBICRUAOKSSHaoyTq6u8RobpyHjHG/8tMnlNz5OvWq86Ws6DAnPXJkcucSxb4SJ7sUYJ6BPKCfULOD5MHXiykeQN5QwbHaAklyqcayIksRc24vf7ymoxGGjwoSIQh4qEEjBdR2khBiMPVMjmVBoGQ4R6TMKPeSnUD1ru+unCS06P7oR4/muYYgPwyvWbzH+AOMRaT00Of54k592+OCERI4uvJqJJCIGQeB5XiaV1ucMY8GCUErZHXj9vmdS03UThmml02lmmhGPu91+q9+2XZsrmc1np2YLx8fHSolEIqs33Vqt9knrk2vXrsVxPD09PT09s7+/L6XMZrNCiNdff9M0Te0NkBqtVmt3d7fZas/NzcWCx7FApBqaYBjW9HTq+OhOqTRFKTVN1ut3hIwvX768f3TIOWcGDcMgCMJUyoiioNuTVtOYLk7/u3/37374wx8+fvy4VC406q1arUYIyWazr7zyCiHs6dOnOslMJBKBH1YqlV6vt7q6ure3FwTBW2+9VSqVPM978uTJzMzM1tbT1dUVfbWPHj06Pj5++eWX4zjiPC6Xy1EUOY5Tq1cNY35j40GhULh06ZLjWJ1OZ3t7u9lsGobhBf7Tp0/7/f73vve9hYWFf/kv/6WUcmVlJZ/Pc865FLbrlMvljz/+uFgu7e7uEoiEkhxUq9M0LD+MZBgM+kGExCTAFQIhRElUChVIPVMytsBTRjWZhk16ocnvL+7dk1atzW/MKayUUs8HlS+2LtTIbZ4ywnHYDM8fQxFCAKDaLCUgEqWpuBVolI1SHBGFBJC2aVAlY9+LQ+A8kpKjoRhjzW5PgoZGaxaA8dfEgYg6FEClKCLTGk6EEEZ1PKDrJVqM3jAMRqkeb3wmWCO0fvdzNRj8DVpzkzd3sg8LEzID2tQ9zxuSrykVx3HMVawAqWVajuW4yJhQGAtRrdX2Dvbbva6TsIUS2wc7zXatPFu0E66dcPOlYnmmXJouLa+uLiwtWa5zcHwElGzv7X1x+0tmmZWT+mdffBnG4t7dB81Ge3dnP5vJVyrVYrGUyWTffPNrrVYnDONMOpdJ5xgzlVJSQjKZvnjxolLi4sXztVp1MOjlcrm/fe9dpcSjRw/b7SYXUSLhZjJp0zQIwSgKH9y9C0K8/sqrv/e7P3jr9Tc2H2/MzcwsLSysLC3kM1mv1/X7A5Rq0O15vT4BVS6XlVJTU1O5XO6TTz7Z3d1dXFyklH7xxRfnzp2bmZm+fPlyu93OZDL9fr/f76+sLH/55Ze7u7vpdDoIgkazVq1Wq9XjqampXq/XaNQ453qoP4yCZCqRTqevXbu2sLCwvb398ccf37hx45/9s3/26NGjR48eaSkFwzB+9rOfTU1N6XBUobRt23EtxqhhUDdhJl3boIDAAQVBhagQFRAgFBV5NkQ76eLGv45Tu8mN+5TVvWiHX/mgUkqOSjIS1Fda4IsvP+WBX3xQ52uKDLWHQNdTRpaJRKECxQXnXPBIcO6Hnhf4/cGg3+/7vh9FkYwlCBnHMY9iHnEZxWycmD5ngUoZlFIkQ93tkUKbAhUCII4/7bArTYDoiUSlNwPts4etwmcfY3IG7Ld/VP1C/TydOTi2rZSKoogD6gY0pTSKIsclgNSyXWY4zPSCQRBFkR8G7V631+vZrrW4tlyem0qXM7GIFMqjo4NSqXT9+vU4jh8/fsyoeebMmWKxvLe3t7OzUy6X0+l05fik1+tNTU0dHx8nEknGjCAIT05q29vbYRC3Wi2pqFIYBjGjNueSEGbbbuCHnXb3pFZdXll+uPGAMnL9+rVKpcp5FIQegMzlMrlCNplMAwDn3LCMKA5+57vfHAwG/9P/9H/9gz/4g2r1pFwuf+tb39ra2vY878GDBxpAp3Owk5OT2dnZl199rdVqdbtdDVe4devW6upqKpX6+te/ns1mL168WK1WX3311W63++qrr968efPhw4eVSuXq1auIeHi0Pzc3p2n8X3755Vqt2u22wzBMphJKKc3e/2jjSbVa/Z3f+R0p5c2bN4+Pjzvt3o2XXgGA2knDYNY3v/nN7333d/7Fv/gXf/RHf8Q5/9M/+wvbti3LMkzDtE0kVEgZxGbPjxA4IAUAJIgKFCGEYORF8FyKNTx0G3OyoADDwd/nGlovms2p3sZw7WmdUKXUhGOctMCxzx1XMSayUJh0g6feV4JC1GzVz10YSoGEEiSCgE7DJFBUwKlCjexUSohYKo6xIv5wbSMiAmUSQZ2yfl0XIVQTglIClKBWMJVK4uiTK6VQKkEBFQBRRM+FaaiMeBbWa7nsyQ/82+/pZAygE1SlFOdcgzxQAUUCAHEcE0Icx2l26iFXsUTXdCzblQq5kkqK5ZWVmIe9Qa/dbZkJY2Fplhqs3W4tLy8jYqVy1Gi0jo6OTNMcDAZhGO7u7jqOW61WNx4+LpfLu7u7tVpteXn1zNpKv+/Ztru3d1AqToVhnM8XgyBwnWSz3YrjjpTSdt1UMl2v1yuVE6XE0dGB4zjb20ez8zMvzVz/27/92zfeeD2K4lqt1un2pZQHBweEkHOL55XCL7/8Us/pPnjwoFarr66uPn78+MmTrenp6Ww2u5Zd29jY8H3/zJk1zwsMw/jkk0/CMPz88887nc6rr766s7ODqIdg4M6dO7qo67ru/fv3HcfR25brusmUi0TNzc2VSiVK8enTTc0ymkwmDw4O9H04f/58IpH44P2by8vLvu9nMplvf/vbBwcHn3/+udYDTSaTV69e/fzzz3u93j/5J//kww8/DIJAKREEXhj6hBmG71FKvTAOQ58RVIRJkJqJHgklFJEimTC2SV832bQY/wkm5hjG1nLK3U3+ipqzdIRYBt0h1M8ZmZ18Hs42aXjjtx6Xdk6tRqWUBKEpEIdXq7mShk+SOhHTrwSpCKAwKTEoMSmlhEkqJVeSA0AUhwQoAhClmPbaI2zZkDVRCYlIpZIUUQkJSCghUilQQCYUP8c2o/eaYf9FKqUlowARUVI6uaPgC63CF7+f/tgjVSpBqMkMRlkcx/1+nzGWSqWqTT8II2/gu27SNkzJVcwlF4oxli/mcrJwUj/2Q88LvFyykCvm0m7m4cOHW1tbSql0OhuGYeX4SbvdPjw8brc7uVxOcHVwcCClXF8/q6PuZDK5ML9Ur9dNx261WoSwQd+jlBFCbdvu9zwpwbZdQlgcB5ls4sKFCw8ePEink2EY1mqbC4tzrVZramqqVqsxRhgzCYFCoXDjxg3f9xMk+uyzzwghn3/++fz8/DvvvPOf/tN/ImQIZC+Xy/fv3xdCvPTSS/1+PwzjT359M5PJcM47nc7s7KxSam5u7vHjx7rDWSgU3n777SdPnly6dMl13StXrlQPDxNJ5+DgwDCM5eXlWq3mOFY2m7VtEwBs287mMtlchnNuWZbneUEULi4v7ezs6Lgxl8tlclnGmCYpD6Lw4OhwfX395uefuclE5aRaLBUGfW/gexAJLiUhRAJathHFQqDej4lmTyAEKGPMNMcWOF5y6oXyzOQaGNvk5AuHFjWRWOrXak4mNS40nPZjz2/6XxWUjs37RSMHAAXjEsazqgwAcM4ZstGAMpdINOlTs983DGpajDFKUAFIPbyOgERJUASVGDYJxDODenbgyKehVCA1Rb3USSCllGl6rSEGcOS+pQINHRzJzY1F5ya3txfd4Fc6xsm/djqdfr8fhmEURb1er9FoaAEJN5VExN7Aq9fr1WrtpF47qdfqzcYHH334+Ze3Or32zPzcysqSbdtRFCAqKWUqlZqdnV1aWtLdtoODg2QyKaXM5/MahmLbNgDxfZ8xdv/ewzCIEbFUKnU6HUSqaQVbrZZpmrMz8/l8XodSjJnJZLpSqVQqFe1ker2Onk6o108AwLLM+YW5a9eura6uFot5w6CVylGn0/n617++sbHxwx/+EBGbzWYymVxdXb148eLu7m6tXi0Wi6lUamdn51e/+tXu7vb58+ebzebU1NTa2prulywuLq6srGxsbBwcHPi+3+v1Dg8PLcuKoqjZbO7t7ywuLtq2/fTpUyllr9dJp9NvvfWWlFKL/i4uLmoqxGazGUXRtWvXHj16hIjb29ubm5s//OEPdXr59OnTSqXy/vvva2EcXRZ+4403MpkMM5kQPAxDPxj4vs85Z0z/xxWgUjDKl7T+5BjeGMdRFIVhGARBEATy+UM935Z48RhXccZeC34zGelXL6pRxngq/nrxh8lfXzRLbdrDduXzua5SAkymGOGAoRBBzP0w6Id+3/O4FLEUkeCxkMymBudccK6egV+RmEaslGb4QBnHSGzLkBLCMFaxP747hFJCEVDEikuUwEBRNXkHAcAQz+BpMOEPx7cbJrAIiCgkR4YoFZexjuwpoFRRIulQmwZy4McqwVxUYMcsibbXEUEgTNOMlWh7bde1E4lEPOhGKjYs48mTJ5mT1PnzZw3DCCM/DMOZ2ZVmo7u+vl6pVMIgRqBnzpy5devW1NSURqjk8knAeGY2XywWc7lc9fiwPJ358MMP9/cOX3rlZUpZo94ybbNcLinEhw8fUmok0qlOe+A6yZOTWtBH1yycnFQ6DS9fyLz26svbO1t37ny5Z9FsNjtTSGdT5pnF6ffee69VOajVaiDS5XIxm17kkflf/70/+vjjD4rFYr1e29i4T4h68OAeALEt9969hzPTc/VaY25u4dqVyx988MH8N75RP6lm06mdp1tn19cSjl0oFOon1YO93Rs3brRarU6r2Ww233n769lsNplMzs/OJBNuu9n68IP333777f29vVKpaDCadBN/9Vf/ayaTefTwQaFQuHDx2o2Xrjx48ODv/uiHtVrt//F//79ZltXvtdbX1w8Odvr9fiaT+fLW/ptvvnn50rlf/OIX8aA/l0/P5jI3f30LgU3PLpbKM082d6amptt9r+cHiGgaVHAZiyAOQwPpcysflCKodIRFkIMiqCdkiZJSSGmBBCWEiJWUusmhzdh1Xc0nQ4Y+UIESinNAXajQ4ocjrkAc6q2hUkNaaQCt/kkQlZRIiA7pYcipARp6CQDyuYKlIhyJ5iZVFCUSJTVxBDVYpFSoFJiOZFY0MoG5uK2UORQ+UlRKoutFQUyHjlQR9uIeA8/3EqSUOgETQoRhyMgQdjR2gJNbxW9ycS9+nzTC8QamtPa30uwWMMpch7NWpmnapouIFInv+/v7/Xa7baOpQZK+P8jlcoi00WgwgxSLRcZYOp1OJNxeb2CarN/v375z6wc/nL5z545pmk+fPg2C4M0339RYMMMwFhcX7969q0d46/U6Iu7s7IT+IAiiQqEwP7dYrdfq9cbc7EKxXP71p5+5yWQcx5zLM7PnKDEGg8HOzm4ul6vVahpLoMmplpaWSqVCPp+7ffv2wcHBzZs3y+WpK1eubW5uIuL6mTOVylEun3306FGxlB8MButnlj/44INer5tMJnO5/Orq+p3b9wBgd2+bUfPu3buGYVy9enV7e/vs2bMbGxv9fj+dTj969CibzepPsby8XKlUzp8/3+l0CoXCxsaGUuqb3/zmn/3Z/1IulxljzWbz+vXr1Wrl7t27lNJMJnflyqVCoRDH8dbWlg527t27Z1nWt771rVqt1u/3Hcf53d/93VarpVPoSqXy13/915zz6WLh448/Nk17dXXVYPaDjU0u4Oq1yzv7x6DLLZQKRKEkgCTEGOsNvuh/vvIYgiKV0msDR7yDp+Bm4zPIySI8PkOujQc1EBEmIjIppSbBUBNwGTlRySHquVKqhptP4nX0NZjGs/M/l7gqPUfCQLNFSQVKKoWUapsngMAmDeZUsD52WZMoWzGUCnzWToHny1zwvENXLzyifzjVkx1f9yj6/4pggDGmiw2SiyAIBr1Ou90uZEoLCwsS214Y5Sjxo7DTH8zPz9Zq1bnFhWvXrlqW8Ytf/mxjY2N+fr5QmvrTP/3TTqejad5v377d6/WOj48Hg0G3202lUmEYFgoFvQqnpqZardbc3EKlUuGcX7l8bWFhAQDjOK5Wq91ud2pmxrbcdrsbBAGPvTiOGaNra2vVanVubu74+HB1dfXOnTvFUl6IeGdnWzcAq9WTpaVl23ajKBr0/bt3bwdBUCjmX331VR1nci6FEOfPn9/f32+1WicnJ2EYplLJdruDiIuLi0dHR1NTU7OzszpPS6fTH3zwgeu6UkqNQygUCr1eL5vNRlF0++6dUqn0+PHjge8hZe1uR0r55ptvbm9vOY6TzmYQ0XVdpKxyUj04OLhw/qpuAuVyOb271et1TUizv79///5913VfffVVPZUPABcuXEilUo1G68njbduKzp5bz+aKBwcHQgBoqRWKCMi5Ekqh4AoovGA8z+zheYtSE90LeH4A6tSSm1hvEysKQBdG1W/OgHR/W02IW0yWiPRBX3ihmoiTx4985fUMWd50tUgB6tEjbZCjy2OT55WjyS4cVWxhDB4f9QYHcXzKnE6Z/viejj3eixc3ebyQLo7pZ8a3FWDEpazTibECrmEYvX4/jKJkKhMLPvCDMPQJpaZt+WF0+869k1rt+kvXCsVp09rlQp05e6ZR76VSmZ/85GdKqXw+L4S6ceOVTqfz/vvvV6s13w8tK6CU9vve1tb2yUn96uXLiUTi1he3Dw8P3/7GNwihW5vbJqHlcrlQKBweHBsGPTk5AQDfC7UQfCqVIgQWFxd3d3c3Nzc3t2SxmM/n89///vefPNlUCsIw3tzcLORLCTcVeCSby8zOTi8uLr7/q59HUSBk/Morr6yvr//4xz8OguDBgwe5bEEIUSwWNAHy7OzsnTt3vvvd7yYSienpaULIX/3VX/X7ng7q6vX65uamnsbqdrsEmWMnms3mzZs3v/3tb29sbHQ6rSiKpqZmFhfnnz59Wq/Xa7WaZVmpVObq1WK3MwCAZrN58eJFpZTOD7WAx+HhYT6fTyaTzWaz0WjMzMwUi8VardZotKSAbDarUbupNC8W8/VmN5ZCciEAFKEAmmRTmuw5rOKpZTPOdMZP0FTOkwtJP2EsAwzPV93hq4zhK+1k0gGMf3j2oEabvGDpGu817pCPsQdj2t9RZ214fjb6IoAKlVA4HPDi2o6Ifs5wExpbnd4ShBBDwPgEu5GcoCHAiSLyqf3gOU/4G27KZA49aYSaoQilAgCCY8eI+gPjCNNgmqZB0bKs0JPNZtt1XdOwm80moZBKJaqV2ptvfC2bS1cqR416k3NeqzWq1dr8/GIqlXrppZeEEAcHB+vr6x9//LHmz/3BD35Qq9VgNFWsFSBefvnlzz777OqV6+vr6/1+/6OPPmo22pTSkpsoFoue5+3sPj2zfk4Auq7baGwLrvr9/vz8/MbGg5dffun/9T//29dee6Xba7/zzts///nPDcOcm5uLY+G6rmMnXDcZBEFD9DmPZmZm7t2/c/Xq1Tt3vkylUoVC7uDgYGpqilJ6eHgMANVKLZ8vFosFXSy5d+/erVu3zp07Rynd29u7cuWKHmLK5/PdbvfBgwfLy8vVarVSqWTTmVar9d3vfL9YLO7vHbbb7W9969ubm48//fRTjYY/f/68RsN1u93Dw0PTcPSM0t7enpaIOTk5oZRWq9U4jsvlsmVZ6XRaA3empqbu37svpVxdWc/ni/Vao/pkM5VKlaZmB37ox5HkoQCiKANAJTiXihFj0ga+cp1M/jze/cdQfr0etBGOPRKMYyiYXJajX/Grl+I4xD3lCSbOdvqQExeMiFrQdtJXP+9RlEkoI9QgI/MhIKUCpCPnotf5RAg6WVNxHEerz47xrPrQd2TS6sbH5B2c9IST8IgXb+5kADzeDsY3YgzNIXq613G0gK5pmvofk85m+t6g1qh3ev0gCgk1FJBqrSaUsp3E9s7ehx/9mhrmN7/13StXX1KAX//612u12q1bt/7sz/7M9/1Lly4RQjKZzPHx8e3bt3d3d3V0qgd55+bmFheWEXF9ff3ixUue55mmqQX9giDo9XqpVGpubs4w6GAwUEoBylar1e22S6XSyclJKpWyLOvq1av5fLHf77/77rudTi8MQ9dJvv32O9lsFgA5jwBgd3f7s88+syzrtddec103CAK9RyDi/Py8aZqFYo4QyOUyL7/8cqfTeeWVV05OTnZ3dzOZjBaEymaza2trZ86c0ZIy2k6azeba2roQsl5vcM63t7fzuWKz2fzlL3+1tnbGMIxGo7W3d3B0dNTtdmemZ+dm5xcWFhBxdnZ2b2/v+Pi4VCodHw+zOwB49OiRhgecO3euVqvt7u7Ozs6+8fpbKysrJycnnj+4fv3q6ury1uYTQiUjoNkBFQiQHAAYQfVVx3jRv7hOxj5AE3xMgkX/i44X7fxFy3nRGvGFr7FmBk7AueI4PmXh44/Dhl/IFBpAGICBaCA4Bht/Md2PmrQrbRLjeeexkxy+8cTN0vHhqTsIL2xyk1c2dqGT/nPcGH1WLAVERErJsBdCIIoiOaLGQAKEEFRCKRVxbrtur9fzPM+yDWoayOjUzNztu3cLpVJ5ZvbguPL+Bx8xxpLJJDWsmXLfdd1yuZzNZm/evGnbtu/7nudls1kNwjRNM5PJCCFM02y326Zp3b17T0q5sLBAkAkVN5vt3d3dleU1LsXq6ioXUa/Xq9Vqi4uLcRzXq82Dg4OZmanNrce6BWKY9MmTJ9/73u98+OGH9Xq9WqlFobx69drq6hqP1Te/8e2NjQ0/GDx+/PiXv/zlxYvnw8jP5Qqu687Pz3/00UeImMsVNIupkPHZMxfff/993Vd4+PDh3/k7f8dxnI8++ujoqFoqlXRLXZdegiBwXffWl3fOnF1rNBrbu7vNdjvi/O79e7NzCzdeflkIsbTsD7xeIplutNqLyytCwe72juM4rVarUCgAQLPZTCQS+Xw+iiLDMObm5nZ2drRKlGEYlmWls/njkyoI6fuDXK6QTCVs28zlU34QIBGubVEpg1hyyRllhmEE8VdLfL74oH5EN4S0M8DRJBSMREVfLOw9t/YAlJI4JM6dOO0Ly1WdjkVPH2R8VvLc/PEp//niIyPrHY24j/i/2bAYiwAw7OZNOmV9oiiKoijSRF3jCtWkacnRyMmk7zp1nDLLU99PeT/9LuPLOAVP1VA1fUlKKS25nkwm2+22TlSklJpVxTTt69evFwqlarVaO6kzaliWk83mE4lUpXLyk5/8NIpiRPKDH/xwdnYum81dvnzl+LiSSCTn5ubjmHue3+32Go0mpaxeb3S7XcZYv+8BkLNnzyYSCcMwfvSjH+Xzec/zhBCPHj3y/QGAvHz5crlcnpoqLS0t1ev1lZWVZrN59uzZTrs3GAz29/c9z9vbPbAsp9vtPXm8NT+3KCUsLs2Hkf/aa6/atlUqlW7fvp3LFtbX1zmXlA7p1bLZdLNZZ4w0m/VHjx5p73ThwgXHccrlsl6pc3PTFy5cuHLlyhtvvPH666+fnJzoIZ1Go1GvNU+q9adbO/Nzi8VikVJ27tz5RqO1tbVtGIZtudev3Ui4qXQqe3h4eHJy8s4775imefbsWT1xn0ql9Kz95uZmuVxeXl7Wqvdnz54lhOzs7Ekp55cW19fX0+lkHIQxj2zb9LwuKuXYLGFbjCBIwRBcyz71r//KZTO5oPUY93j/1UBlc3SM/dL4tF/Zb/zKWEwfQ8D3ZIo4+pmoZ184+hpbhLbb8fqc9OfPfQSJ4y+QGsk++SVRSWaapu/7ajTnPx6cxVH2NeYg1NOAMLIQGHk2bTnPgDzPQ++EeDa4NLYuHE1djKec4NmMswAYQsBjKaSUQBkwMjMz47puIuHGcez1B0EQMAKU0kwms7+/77jWxUvn9e44GPT+5m/+5uWXX3769KlSSvfNs7lMEASlUulw58nW1lapVELEw8ND/T8GgA8++KBYLJbL5b29PQ0Z09JL8zPz3W5fi6483txcXz9bKpW2trZSyUy5XE6nU7du3fpv/tv/9vbt241GLY5DnSvq0cdz587NzMycO3fu019/XDk+mZmesyynWj15tLFhGu7Pf/7LVCrzs5/99Fvf+ub9+/f10Mo//sf/eGNj4xc/f//MmTONRmN9fX15eTmKwlQqyRgtFPL9nqeFEwuFwvT09L/+1/+6VCqFYfjtb3/76tWrpmnu7OxwzqenpweDAec8m82bpu374blz5xBVoVBAoIVCARFN06pWK2trZ/7Nv/njH/3o93d3d/O54ltvvKlXtsb3HR0d2bady+WazWY+n1dK5XK5Xq8npczn87pO+9lnn+3s7OXz2Watvrq6+uTJo0KhBEpkc8lYkUazA0oggSgMCepxgGfH2CrIV024A4DeWOM4HnfFJt3AZGFmHEaN0xwBI5AnwTgWw/MrpZ5TCFZKKQGSjOqd+i10FqfhKEopzrmIYyEEY0yHh8P0bCRPgqPGnhztA8MPJUH7OTkxi6yxCmN7GcJfTkENcKI6qibKxFJK9lUjHpPbzH/mPqeeP8aP654kQ/0uEoa1Ncjn845lE/Ksk6GUjON44IUntcra2oplGZVKZWpqampqtdVqbWw80DrP8/PzlUrl448+sSzr1VdfzWbylNJGvTWw/bnZBV3Hf3B/QyNUHMdZXzu7uLh47949wzDeevPtJ48egSJrq2ds237waKNcLqdSqV/+6v1ioXzn3t2rV6/atrWzu/3Sjevdbvfk5CRUMaX4ta99Lebh3/zNu3fv3p2ZmWk22oyxIIja7a5SYNvu/Pxit9u1TDtbSObzWc/r/+Ef/uHNmze1AMbly5c554lE4o033vj0009937vx8vXt7e3dve1SYVGb0MnJybVr13K53K1bt2CUY+uuhud5W1tbOsw+c/bq5tOdfLEc8nh9fZ0QEu7u2G7y3XffNQx648YNapjf+u53ugNPSvnw8ZP1tZUHDx7k8/lsNqttT0fpxWKx3W7rjSmVSsVxPDMzg4g7Wwf5fP7b3/52pXKUz6YZY3MzU/VmM5dPu7Y5CCPDoGnimpbwgphSRuG5mGu8GH7LOhkvPzI6xvHaGPY5PmccBuPtHhFBzwSLUywyvy2rVEppV0MAKRK94GE4y6DEb5hAOGU+zz4doUgZUkaI0iT/qKQatk+GA5Zs8l5IKfWIkM70JvFockSTTC3rK2/ZOFA+ZY2TofbIeJ79dbKyNLxmQhhjhpYnlYIx5tqOY5saKsBRaX4RwzB4FARBADJKJSx/0NvfHXDOk+7S/Ow0gZf29/efPn363t/8TalUKpVKxXy+VCqKOOr3+6urq+12W5dVNA5zfn7+5s2b58+fz+Vyh4eHug0gpXz48GEwCHQvpFQqzfa6iUSi3++7rru4uLiytlqr1XK53Pz8fL1en5qaqp58rGKyuDjf7/ePjg/OnDkDAA8fPkwmk41Go9/3qtWTTDoHALOzs4LLc+cu9P2D48rht7/zzQ8++GBra2t6ejqfz6+urk5PT3t+/8MPfzU9PRVG/k9/+tMw9PU0vRBicXFRx8nz8/OffvppoVDIZDKWZWkumTiOe70eYyyKoq2tp3qJbj/dXVs9I6Sam1vY3Hw6MzMTxzEi1fLAukV54fzFo6Oj/f39tbW1hw8fdjodwzAuXbokhDg6OiqXy8lkcmlpSatWcc6DIFg/e6byQfWLL744ONjLpBILC3OWZXARmaYlZRz6nuAhY44BjEDEowCo+1sM4DdZxbgwcSp70vHXZPc8iEJ8VnQApUBKKZQEeAYsUc/X9odvgUBG2kPjtUq1nUulkAgcmc5EW04+G8Ianhafb9dJQKlQAVGgEToEQJCh5x9eBhundkOfO3K4zyEMJmb8XizIjt8PXnCD6oUcVz0fb0we41CWDJG4ilKqyaRTSdfzPIpEh6AmM6SUmn/FMoyz6+uVSqXTap07d67X6fz8pz/N5/MEYGVpKZVImKYphOh3u65t8yiuVCpLS0tBEBweHvZ6vXa7nUwmE4mEZsUFgLm5Oa2sVK/Xu91u0knrUqdp2jMzc73eYGtr69rVl1rtxrVz1wmB3d3dpaWFv/7rv06lEufOnSHSiONYVw7X19dM06zX60EQJBIpzwsdOxEEkWFYJ9Xa3t7BwsJSp3/cbLa++c1vXrx4MZPOPX78+Oiocu/eg1/84hf1ev2b33pHiOjjTz58++23Egnn888/55x/8cUXCwsLpVLp4cOHUkqtRm7bdrfbdV1XZ0qFQqHZbBJC9vb2Ll26tLu7pwmCK5XKpUuXfn3zk9XVVcdxoigol6ebzSYAefDgASHk0427c3NzQRAopXZ3dy9fvryxsaGVNmq12oMHD2ZnZx8+fPj9738/juPl5WXHzhYKDzudzo0bN0BGs7Mz9+/f9/v9QtkRUiqQBCSAVFzwKJZSCniW9r8Yjr544ETlHF844Pn4EwA0nGO8BE/5i7EbHL+vtr1T61MpUEJqL6qE1COykxY4sTuM3+u0Yesj4AqFJuAf5pmAyBUwJMPWIoFnQbZ+5TDQff5Ek8Y2tsYXTevU41/5tFPH+MHxD5pURtdgdBZqmqZt247jaCIQz/P6/b4u0Pm+X57KWzaN4oAyTKZcypAZZHZuOoz8bq+dL2QvXb6wtr6iGV++9rWv5fNFIVSj0QrD2LKcIIhqtUa/77322hvpdDaORbFYTqeznMswjKenZ5PJZCqV0ngUSmkYhkqpZDJpGMbNmzeXl5ejKKpWq/Pz8w8ePBhDxtyEvbKy8vjx4yiKLl26JIRqNBq+75fLZdM0C/ni4eFxFPFWq+N53ve//733338/l8sVi8WpqSlE/Pf//t8rhf/oH/2jdrudy+VWV1d/8YufdXvtVrvh+z4hRLfjpZSa996yLMbYzs4OY0ynT4VCodVqcc6npmc9P2y0mmfOnbUcJ4iiIAovXb5anppZWV3N5Yul8jRSEvH40ZPNh48eLyws5HK5vb29jY0NAIii6NGjR9vb267rnjt37urVq6+++uq5c+cIIb/+9a9/8pOfvPvuu4OBXyqV5ufn0+l0r9fjIpqaLuTz2WIxXy7k8/lswnUoAYpK9zn+i45TIejE6n+OPWycECYSCcuyJlf1ZEB3alkO/Qo++1VNNOqEECLmIuZxHCsu9JA6PB+OTlZ9XjQWpVSsVCRlJGWsMAaIAWIFHNCPIz/mARd+HA8TQv1dC2jhaD5t8spwVFA59fnHzzm1sb14/KY/jT/P6ANoVCtoPiXHcXR5Wn/aMAzDMGSEOo6TSqUcxzEYebr1hKBaXJiJAt80zXNn1l3b+vt/77+6efPm4eHxg3v3TdPMpJMn1eOPPvwV57zf7wNAuVyenp7W1q5r+rlc7vj4+NNPPzVNc2VlJZVKdbtd27Bbrdbq6qqUst3rJpPJGzduVE9OVldX792/PxgMrl+//vDhw0KhQCn95JNPrl58SYMwdaw7NTUlpcxkMnt7e3HMwzDsdvsGs5XCs2fPz88tommvri1fODz/2We/tm334sWLnEs9S6Vjvw8/fH/gddbW1iqVytRUOQzDa9eu3b17lzF25syZ27dvdzqdpaWlsevzfV9XTXSTybCsZrM5VZ7xvXBzc3NhYaFeb6ytrepNwfcHu7u7nudVqkda62J/f7/b7RaLRdd1L1y4sLOzQyl1XVeTcGvU6NzcnBBienradV1GUlPTpUTCeXD/Do+8J5sbQsSU0pn5OUBDIUFq9vyo2/MAQE/eTPqx3742xstj8vt4Bb7YV6OUGsYzODQhiECklEoNSRO1J1QTWdJz16AAAKieldeoaRkLQDJUiFXk+e6alBJHKDmNByCEjONSbTgcUCAROOTdp1rpGRA1EgglIn22M403DP0e+qInyQj0E+REr+KU/ZyyzNGHe26E99RrJ1+uf9VhMENiWVYi4TqOg4hRFPX7fQIoRzVVy7JSCYcxdrC9oThfWFiYmZkdDAaDQa/fbfd6vaODg3q9XigU2s16IpO6du3Kndv32o1md9DX6RMhpN/v60omIaTZbPZ6vWazmUqlCCG9Xk/nfsCRxyKdylSqx0opz/NKpdJxpXJ8fJxKpZ48eTI1NdVut4WIL148/2d/9mfn1i5+7Wtfu/Xl5wCwtLSUyWQ++OCDMAxnZ+fiOD6pNqWUBwcH+Xz5ymWn3+9fubH8F3/xFz/43d978mQznc5KAfv7+8vLy4lE4l/9q3917vyZP/zDP3zvJ3/9y1/+fHllYXd3N2HzYrE4Oztbq9XOnDkTBEEikXBdV0fUUkrHcXQSqzPVdqejlEqn03t7e6Zprq+vSykbjeadO3cMw8hkMvfu3UPEoWnxMJVK5XI5SulgMLh//363233jjTeklBqFI6Ws1+uZTGZ7e1srGTbr/rXrVwyDVo4Pzp9dTafTntdNJpODbk8h9cNYSDAINShhBAlD+I3m9tWHnJizgVEFcrxyyAjUNV70WqJchy3EYKCQc87HpdEJOxxb7+kFqxQqoBS1aJLGUOKzOtBzNR6p5IuLf/KcQgGXigxh61Iq1KB0ZtDhmD6oZznh+NBp4fgqJ9uAOCJO/crj1O7yWxzjZCBx6i4MadIp0/5ZKRUEgS+57/uu7WiSC4pkvE3oFdnpdJ4+3dIlmHQ6c/bs2XfffXdz87FlXZmZmbly5cqFCxe2t7cNk85a8zMzM61WS8d1k9ZYKBSy2ezc3JxSynXdw8NDSqnNHC2yeXh4OLsw/+DBg8FgkEqn/WCQTCYXFhY++eSTXLGQyaRN0zxz5sze3p6ueUxPTx8c7CPi9PR0s9nc3d1LJtKtVqtQKJ5UG8lk0jTNRqMRx5lcLieEWF1dff/9Dy5euCyl3Nvb63a78/OzhJA//dM/PXtu9dvf+ea/+3f//vz55UEX9/b2Xn/99YODg263a5rmG2+8kU6nB4MBpbTX62mfHARBPp8/PDx0nIwQsW4wZLPZyvHJzOyUUuratWtCCN/3AeD4+FgjgZLJZKO6c/HixV6vl0gkbt26NT8/H8fxu+++WygUVlZWjo+Pdb6wurraarVeffXVW59vrK+vP3nyaHV11XWdTCbTatVeffXV+/fvS2BRLNFwKXUsy7JtGykTg/+ynPDUHg0TIFIcUdpOeDmIo1Abp27fgcLxEMZvWrdf8Y4KCCFKyJEVAn3GUPwcEdszp/obCq4xl5RLJFLz0BAplFJCxGGsADRtKZA46pqGUDII/R4jYJsmSpQxyBhRsnymOF2aTdgJRggBCDxvMpaQE5AimLDYF2ue8LzrG0cU8ELB1zRchqYShAi0wHTRMAQQXywWpmgYBe1O2rbnp8vFbFozRKLJ0GSl2ekLVy+jyRLZtBcHH938JAbxO7//g53Dnb3jvYPqwa8++dXm7maz1/zy9t16o7W5tX3/wUat3tx49EQBqVRr2Vzhzt37SNj9BxupdDbmkjKzUCwvr68EPPzi7peLqytuIlkslW/duUOZ1en4Cu39w3o2PwNg+z5dXr7suuVCPt/tdF575VUd7208eSxAOalkfmYqUEKY9KTbVo4ZMezwAJP28WHv0cO9Rxs7J9Xm4uKi7bBLl9cTSWY7mM44c3MzjFkEEj977/Nz6y/Vq6o4taSII9HeP6pVa+3Np7tcqtnZWd8fGCZaJpyc7B0ebnW7FX9QTyVBmBgz9Hg0CHyCiinJ+x4LedzpfvnRhwlGTg73Svn07Gzx7LmVRufkf/jf/I9WIjkIg67fNR1ycPyUWfKP/ru//0f/3d/vdRsH+7ulQjlhpB7d3dq8f9A4GlRre//n/8v/6enWxtWrl3/x/ocDjwtpv/ezjxsd7seGmSpZifxAQDsITwa9p0dHJkhDCUMJJjmTnCrBQDKQqLj+UsClivV3qeIYZAwyUkJ/xSAFAUEADMpR6QcFAUmRowol1/QWAlQkeBiGcRwjom1aJjMMpFQhSoWxIFwSLqlQTFGmdMZKlEKtdxQjDoTwQAWMhAYNLDYwSI9ClyiqJJGCKmkgmAQZpQSRIEqpxxuBS+QShSISqEI2sJNNNE4E1pXRRKuh7CY4HZquS7cu3RpP1HiCTSa1zw9bgW3b2WzWsiylRH8AnHPdwDiV48oJYYBTlga/oSOjXmhdwAuec2zP4z9ZlpVOF+YXF6QU+/v7YRzl89nLly/rJDYMw62tLc0qrcsht27d+qf/9J/W6/Uf//jH09PTL7/8MiKeWSNHR0e1Wm1qaspxnGKxWK1Ws9msUurVV1/d29t75ZVXfvKTn5w7d+7hw4cvv/zyL37+829+85uXstl33303lUpduHDx0qVLd+/dnp6a7XRae3t7qVSmenKSyWS+/PKLbDbtMJrL5bRchC5dLi8vd3s9ANREt1IBoEJE0zSTyaROIOM43tzcPHtuHQDOnDmzu7t7//79a9eucc7PnDnz8MFGoVCoVCrZbE5H5lLK69ev1+v1RqPR6bRmpsprayumxUAKyzYBIIqCQa8vZNw6juM4FlLpd9cgeCFEFITvvPNOPp///ne/92/+5H/5P/7v//nV61cR8Ve/+nBvb+fSpQuZbGp5YZEy3N3dvn/33vT0bCGff+utt9KpfLPWXFlZ2ds7uHXr1oXLF37v937v6GD/6OjozTffBICbN28uLC0KIYIgEApiSToDPwzDdDKVzxU73fA/0yP99mPSHY0fmdzNdXN/zP8yLmoMR1Wfr+hMvvwrUqqJdXsqdpsM6L7yhZHgiEjU6drSJLkhG+PRxkaoa6kanqNpiXU55CsvaNzTP5Vwj211nAScMjn5VUi3UzasxvP7hnF0dDQzXTZNc29vjzE6MzNju04chz9576fr6+ual/7C+YtBENy9cy+fz6dTmVKx/HRre3t7+7vf+d7Ozs4Hv/pwfX393t3Hq6ur3/jGN3Qb8OLFi6lUan19/csvv+z1eouLi9Vq9R/8g3/w2WefXb9+/fDwMJtNP368kc5mbMfMZrP37t31wyCdzuwf7Nqum8lkGGPZXNr3gi+//NK27aRl7u3tZbPpo6OjarWyuLK8u7u7tLzc6/UopQQUNUwA1E1ZAOh2u5ZlWZbl+/7+/r6UPAwDRNTRMiJms9l2uz09Pb2/v6+USiYTe3u7u7u7b775+v1790ql0t7eTqPRuHjxvB9wPwhMyygWi4wRg7I4jveaNSEECmnbpk4XpZRRGN145eU//uN/owd//w//u//tF5991m639/Z3pVALi/MLC0uua5+oI0Q4f/5iPltAxHarG4YRo3atVgsDXiqVlpeXj+p7m5ubUkrGOvqC8/l8s9k+f+ESECoBu4Mwijrtdjvi0rJdJM5vWuL/XxwvLv1xjDa0llEbf4yChOez0skh4MkFPF6HL3qXU+/1m4xWn4oaxvgCYMLTTK7/8TTU0AjHl6jrbHpSrtNpScUdx5m05smkEV6gMxz/iZLTO9aLHwZeMD8yum5KqUEpYUYy6Rby2TgOB4NBIuG6rouUVKvNV199tVwub25uptPplZUV7asXFhZ+/vOff+Mb31BKxXF8+/btTCbzox/96P3333/ttde63e6jR4+63W4ul7ty5Uqj0fjTP/3TpaWldrstpZyfn6eU6uwuDMN8Put5XrNZDwKv0235weCkVqvX6zMzM2EYLiwsvPfee4VSsVqtaVrOs8ur77zzTqfX/s53vnPnzu04js+cWTNMO4oiEcUAYJkMkIFS/sDrW53ATS0sLCQSiStXrrTajWw2fXh4UC6XhRC7u7v5fKFSqayvr6dSGQDodLqDwQARG41at9u1LCOfz29vbwFAs9m0HRMAeCw6nQ4iUkqTyWQqFTAkgkd67oRLLuI49AfvvvvuK6+8tre384tfvD87O/vWG28KJQkAEDY3N/fe3/5seWXJYjTm/jvvvNPr9A3DMJldOT558uTJ/s7++XMXUqnM/v5+rX0Sx4elciGRSMwOG6omYca9e/csx83k8nYiU8jmJKAfxoyZvQGH/38cL+aK8HytYZgrqmfSDCNjeL4iODH6BJMR3MjAXrTDF4+vtMahLSADRBjqdg85tAFAPA9QIafeXh+6DaU7clJKnVjrf+SYvuk3vXbypkzGrpM/fGXqOP60Y1zsuEmYz+c1IGt9fV1XBfb29gAgjuNPPvlEKZVOp//6r//6888/L5VK/X7/H/7Df/jrX//6z//8z8+ePcsY63a7U1NT3/rWtxYXF3UhsVwuSyn/+I//+MmTJ7lcDhFzuZzWCfvFL37R7/cfPXo0Pz9PKJSnismkm8vlwtBfXV2eX5jN5TKFQi6Xz5anStMzU9lsNpNJeV6/1+vU6/VPPvkklczoYk8unfnoo48OD/YAgDFmGQYhBJRUggsehZFfrVaPjo7u3r27v79v23a5XC4Wi4ZhrK+v625kv99PJBKaTK3dbm9tbeXz2Ww2u729rZTq97u1Wq3X61UqFcMwEomUlHJ7e/vevXudTs80zWI+m0g6OGIuD8MQCBq2pQemHj58+Ps/+OHS0ko2nalVT1658XIymb5w4dI3v/ntt978Wqk01Wr2OJcXL1xZWV7d3zu8f/++ZVnLy8uGYSgl5udnL125XK2dzM0urK6uPnz4qN3uXrx4eW9vz7UdKaU3CHQ7d9DrN2v14+PD32pZ/wXHi0vu1AJTEx2/UwUIMnFMwjbVuD34nEjmc0CU33Qxp2LAsc1zgTGH8VcUK/1dSDL+YmM2NCE0Y4VSapgWju3Btm3TYoSQMAzHVaxJKC28MA/2lRsVTDhAOTG5rx8Z/zBErhnGeOaQAnY6nSiKbNuM45jz2PM8REylUp1OB0aVaz1wqAljPvrooxs3bnQ6nTt37gwGg7W1tV//+teXL1++/fhhtVrVyz2VSt29e1drSDiO0+/3X3nllY2Njampqd3dXT1SqJTsdNrMNC5cOLe/v28YtFgsahZ6J5FCVH/v7/3Bk82n6+trX375peu6x3tVQumnn3565eqlmZmZIAiuXLnS6/XSqYSfz/V6vTCMfS8kjmOajBFaLucZo8VisVarLizO1Wq1TCaTSqXS6XQmk6lWq4Swg/0jRKpRAaZp53I5ANjb33ntlVc1E/b9+/eXlxejKGo3WwDSdRKO42i5mGLRDf2g2+4IyQnBZDKZTqYQZNtoV4+OX331dWaxZDL55MkT23b/7b/9f77y+tc+/eTm1HQpCKLFxSXbtr+8dUdJaZpseXnZ98NarZZyU6++9koYhre/vNMJBm+99bUgin76k59PT0/nsumnT5+eO3uh3W4PPK/T9RJBSA3bNM1SqeQ4Tq0d/Bcb3Fcd4/UzubTURINuHJeOVuZYHez5EVZAnFBHnDzGS3rSE04iV+EFz3nqQESlkWfaGU5c53PhqG7pwnAbEFKCUkgIjoY1dDNAjmnnTdMcv8Gkyx6DEiaNcLjrP98knPw+PtX4B0IoHYnj6LvDOVcKUqlUsZATIu71eoRgKpVCSnzfO7N+NpfLPX78eHt71zRtzuX+/uGNGzfm5xd3d/eiKJqfn5+bm0PEzc3NL7+883RrL5FIZLPZIAgIIcvLyycnJ4i4tLRECHn48KHWkCoUCoPBQJMapdPpYrnUbreZQROJhOXYvV4nm806iUSn0zIM4/GTjQsXLpw9t57P57kvd3d3FxYWHj16lMtl5+dnj6r+0tJSrVabKpfDIGg2WjyWtmEalEgedTodPZWjuyD1+onj2I8fP37y5Ekcx61We3FxOQxix0msrKycnNSIaUVx0GjWhBC2bV6/fv3zLz472j+IosgbBK1WK5VKZTJpPWVfrZ4QACUFKOladrFYzBeLFLHXaZuWNbMwbxhGMpn84tZtyvDi6ophGEopy7JmpucMg0khHMc1TWduZqZer3f8ZiKRWFlZQakePXr45MmWEKI4vxALhSCpYbTb7cXFxYXFZakwlUp3et2IAxqmH4Rerx/EkWe7QBP/v9je+JAThNnjR04trbEdSikBhgRlOKEdNo7G4HlfMml+MGGHAEDoc4IZpyz2xXdHYpw6J2gsJ04Y4Rj8qi9VSkCgAKB9kZ7eD4KACeI4zrh/OPl+k1c/6fr1g5pd68Xj1LWOf9Dx8ThCkFJy3eNErFarhkFX1lbn5+eUUt1+L47DDz/8UHMNEUJu3boVx7Ee+avVaqZp6r7Z+fPnf/rTn66srHiex2PQ0XW32w2C4NKlS/v7+ysrK7pDvbOzk8/n9cWsrq7atr22dqNSqUzPzrRanTNnztTrdT8IPM8HgJmZmUQioUkxwjDMZrP7+/vlcjmTyZgmk4rHcby3t5fNZm2ToVSJhFMoFASXYRhbJlOSD3qhbchisaBRL+lMslDIPX261e12k8mk53m1Wo0xc3FheTDwi8WilFJy3uv1PK+fy+UajcaZM2fW19cNQvf39+fm5rSsfL3e9DxP19l6rXav3eFhZNtmNp9xE3a9Xt8+2Hv77bf7ne7jx49TmfRrb7yue8Wm5czOL5oWe7q9mclkirlsFPJCodBstsIwHAz84+Nqt9vVknUA8pVXbqCbabdavV7HsRNB6B0dHW9tbbZarU6nC0gz+UIxn7YdHoQx+tS2nX70n2Vj/x+P8dp7MQQ75ZrGbmfkCUcrcyieeRp8MunT1Aup4FfWRH5TmIqIoVQwhJU+F9NOIvgYIcTzvCiKCKGccymBUSaEFiIdyneYpknokHVGf8fnDzVBkXjKSjXGZfyS8XNM09R4cUTUBq+U0nNWzLKEEP1+nwI6lsGF8Lo9QqBYLAoRHxwchGGwsLCg0cmpVEoLOGslCaXU7OysnrRYX19/8ODBgwcPdK1F45tff/31vb29vb29dDqt1Y7+36z9V7AkWZoeiB3l2sM9tLj65k2dpUVXy6kW0z0zmCUGoC3Apa0ZZ7kGGtb4vEvjE1/JB7yAfADNCBJY0NpADBc7GEwPMHpaVFd1VXdVV6WoyryZV8vQruURfDj3Rkbem1XTDdCtLCtuhIeHe8T5/Vff/32O42iatre3J0fRMcYSdVmr1Vqt1kcffYQxxgpRFK1SqaRp2mg2CVEODg4kVf5kMpHU19J1F0WhqiohqNlqr6wse97k6OioP0Ddbrcsy+WFXhrFEwQELaej4fXrN7M8khUUXVcRQpqmrq6uTqfTer2+v79fq9W++93vnp4Mut2FSqVy69ath08e27bZaNTW19ffeeed23dura+vO5b92WcPRqPRjevX4zgsy3IymdCSFzl99PBTXdebjdrXvvpVx3WTNIUIbVy9WnB2PB5OkygTLMjT3d39ZrNZ73bzIh6OglqtBqFgjHmet7K6dOfW7X6/f+ocS9gahFAOGWq68slnjzzP46wMw7BiGghAhLDrVhcWFoMwPu4PipKVjE+nnqabeZ4CaM0WKIQQn9fz+PlgHhVndT6ZKF0uy8ttFr7Nm9AssATnGYosziOE5BQFnxMXku/FWLlgvfK9EigyM7AZfrUontHhnGE5ZR8BPOtUZse5bNJ8bhLyGUCtmMOYz+jEhQSji6d3lHnv/J+/yY+Y2XBZUgwhApBAwDknhBiaZqqa49i+76+vr46nE8kjdnRy3Gq1To5Lw7AqFbfXW9Q0LY7jPC9939/c3BwOx3JOvNtd+NM//XPP8958800ggjzPpceo1WrtdlvKm5ZlGQSBEAIhJNlydV1njF25chUhVHEdKbV9fHSyvLqyuLjY7S6wkxNKabPZrrhOq9XinOd5fnrobW0/BoDrhvoXf/EXGxvr7U5THlzBxDRN17HdSoUxTss8Cn2AhK7rnHPf9xvN2sHBwZMnjyuVShiGb7zxxo9+9OOiKK5evQoA+qu/+qsoivM8v3nzhm3brVbrXVb+8R//seu6tmFKzhvp/23buX7dWV+/4jhOf+AnSRInicwSBYITb5pl2dif3nn11Ve+9CUhxOPHj70kspibsnKwt7m1tZWmebPZvLK6Vq3WPvroow/ZLzY2NnwvCMMwCAICgbwvk1M0nnpRFGEIsrRAAnh+mMShEGI4HBJFkwBDzIWu6wjjLMugYc8WK4RQXB4VeFYS74uRNJe3iyUGMYvRnoad8w/mSxtiDh8Gzxtvs1M9C80uAGXmPuuC2zz7FAjALBA9w9ZAeRx564EAPDNPeOEi5/2seFbI+8L1XDih+S/oaSg898YL39QMdsQ5N00TQ0iLsijLJEniOMYQYgCyLJNTFI7jMEYnk0kURYyVt27d2t/ff/DggZxk73Q6vu8Ph8NGo7G0tCQJiP7pP/2ntm2//fbb9+/fZ1TIafetra2dnZ2Dg4Pl5eWrV69+8MEHvV5PUjPFcSxTNdu2t7YeyHYIoxxBWRDGEOLBYBBFiarqiqIM+qO93QOpEb+xseG4dpZlN25cG42GnPPj4+MiyxcXexhDXdcd26q6lSwrhBBlnt24c8uyrFqtJqnya7WaBFL3+/1vfvObf//v//3RaOJUqh999DGEcGNj4+6n9z3POzg4SNN0ZWVFOoqTk5OlpaWFhYWjo6O1tbW7d+/euHHjvffeE0IstDocAsrZldUV1TIKynXbNCp2EEeKqh8eHnqBr1n6a1/5MoRwa2vrcPshgEA3UJxM//wvPr2yfvXo6KTTahdFkaeZnGnSFVKW5WQ62t/fh5Ue5wBiWDLqhVlZlkkcYozr9bpl2zhKiiyPkiRLUt1ArKTEfGqB8OmA3lOMKEZ4Zg8zM7i8PbfPfHm7sLw552cZ2fMqmTOvw8+nZ+ed3gXruPzGC0sdPC+rvOAJ5UZm38j80Wfl3HPEC4Twojk99/MuHOfyns+1z/naFEIInPvGLMviOFYw1jCxLGNpceXJk83l1RVVNQ4ODhYWFh4+/HRxYcEwjFqtFgSB5GKwbfvmzZuDwWAwGKytrb311lue50nZsKIoIMBJkty/f1+mi2tra1mW3b179+rVq+PxWCIwW62WEKIoiv39/V5vgXOua4bS0LtdRVV1RPBoOKnYrlWpSfmKvf2Dk5MTQghCOAiChd7SyemRhI8rilKU2UsvvUAIYZRqKtE0rV51OAOU8qpTURRla2vrtddeK8vytH/81ltvZln6l3/5l9evX8cY37hxI0k+efz4MSHktddeOzo6RhhoulJvLIeRT2lxdHSAEMAAGoZTFFTXzNFwoqlGmuSKommaBqnQNK0UXFGUJEnCNIGahi3DS6KNhYXbneYnd+/9+Mc/Pjw8tCzLsqyuCXu9HsbKZBx88N57mqbs7x/bpjWdTvI0H4/H4/HY1HRCUBQHeZ6TRCnL0jL1sqQInBGOSAxDrVZDiIwmXp7njDEExXysePbg0vqR5Amz4sLnGeEXOA/wjCeYfcLMxp6prEomh1kVcMYqKF3ObGh4Zp/oeRYl0zTwrNWdfeoZH6l8aW79CwbOWaDIvOVcvs4zjbdzpOnsOufd8bzhzYfFZ7udv2vey4NnXeK8Hfq+TxBCACoYywl6TdNs3VhbW1MVLKPWPM89z2t3O91u9+S4jxCCALtOLYqiMIgDP1JVtdFo6Vr85PF2p937ype/9uDBg3/5L/6Vruv379//2te+Jif3fv7zn3uel+e5pJp2XVf26Hzfz/O82WwuLS0F/jjwgzQryrJ0nGoQRIqqcg5s2y5ZGQZRpVLpdDq1WmM0Gu3v7xcpsyxreXkZQlGvVuI4rNdcSUiRxBFC2K1YhqZpmlakpWmaaZpKNc80TSV+TRaiNU2TCsHdbvff/k9/+Lu/+7+Qx3/ppZem06kUeFnsLchwenByypjY2dl57ZVX33333TfffPPg4OC1V1/VNO360npW5MeDfqtZLwColPkoDIfeJCny/8f/+C8++uTj0XgCCe52u9evbrz55pu3LF9VdM8LojBVCVpeXt16stuotW3LGY9GxyeIUsoIU1XiVKqoCrjRyLJEVRSaF3mWJElGCHIBCMOwYruSpFRAmGclVpU0zeeNRC4jcY4gma/+P9dj/OpGOGchswDtqRz3fC4373bAnIu+YBczI7wccF5Y/5f8zVNqtmdf4s8Y4YVrkKf0bMQIwbNgnwuObv7GcNlhXth/fof5T5eXTQWFEKqKqitEahLKud6yLHe2nywtLem6PpmMAQD3799/443XmvXWZDIZjUb1er3dbkMIT09PkyT58MMPV1dXGWPvv//+4eGhZIw3DEOGrJ7nLS4uvvjii91u9+TkZDqdGoaRZdnp6Wmj0eh0OlJGoixLjBXHqVbrNc65ZVZURbcqtqKoSZLkZcmY8LxA1bVuZ2FpcWVxYfn48IQQ8vrrr1cq1mcP7g4GQNeUzz777M7tWwghS9fVBlGJYpp2HMcK0WLGGo267FIsLS1tb29//PEvFxYWqtWqYRij0bgsmZQBjeN4dXU1pzljbHFx8eTkBCFkWVaj0ag57vb2ruu6YRjWarXT01PTNPf3969cuTIdDpGq5ll2cnTsZXGOoJ/GkyT59NHmz+9+nJel3aojhLws/WznSXtlcblTtFqt3kJzPPJqdfvRo89sq1qvVzudnm1ZEMKVlRVT04sik1FDobdCP+CcJklURCUNSwyFrmrNVl2CrvI0lWJehmkWRRFfWNyXVshs4aA57unL2+flihcynVlO+FxnMx9tzm8SYQuetUl5evjpeV5c4fNWMNtHiAu293TlP/24p+f3bKJ7vuvFZy4ccXau8833+X3QXKg9u/LLXnT2vGmaCsYqUVSMIIRSQKtUy93d3Sj0Fxa60+k0DMNWqzX1PYTQhx9+KE2UMSYVMw3D2NjYkL3+9fV1Xdffe++90WhUrVYxxrVabTAYfPbZZ/1+X9M0ORk8nU7zPJdxlIRxyuJNo9GIIwIAqNcaEpJ/dHQcp0me565Tk0xbnhdEcdzpdBYXFxFCuq7HcfzZZ581m/XDw8NGo1arOoqCMcaGqlmWxSjFiJimiQE0DIMnmaap/X4/z3NVI8PhsCiKer3+xhtvSNF5RdH+0T/6R3lenpycMMaBwj1vsr+/a1lWp92R+hz3Dg51XW80GlLBdzKZvPHGm48fbXba3fh02F7oOZYtiWdU16k4DtNUP4kAQablCATTggIAwjQZTidDHDSa7sLiGkKo223/hz/5s29/+3v1etVxHMAhhFBRFBUTqbmNMd6bZEmWyuEyTdMwhAqGtm23W92iKDzPi6KEcoEVYnJTURQsnh9eAjZTin+6QAEA5bnswoXtgg3Mthl88uxfMbNJNHfUi8eZDSGAZ7PQy0t3/tYw840zw7vsJwV/DvgGnLFuyx4+eD7dwOyDz1NBIQSfN5sLbvC5j794E8/mkPIxQmiuKPuU2hQAkGWZ1Ljc3t1RFPLSSy+9+vprURRI/XrGmOM4Evgix8CHw6HjOMfHx5PJRAKgZWlnf3/fsqw4jnd2diQc/MqVK/J2GEVRURSS4VvTtDAMi6JYX1sLgkDq/pUlHY1G/eFgNJxomlYwLnF8aZYdHBy9++7PptPp22+/vbK0vLW1hTHsdrsbG+vHRwerq6tJHDFS6rqexHFRFApK5aoFSTadTqXWYlEUq6urGxtXDg4O3n333c8++8wwzN/6rd+ZTqf7+4ebm5tLS8ubOw8BAJ1OZ2lpZTqeHB4e/vZv/3an2XrnnXfDMOy02kdHR6+99ppUtknTtNPpWIZ5kB2VBZR0VVGZH03Gx6enOS0rFTtJUwGh67pMiKP+qbFcOz4+KgvGGLx567pbrVSr1dPT09XVjdCPJENkgXAURUKISqXS00zdUzllRZ4mYZSkcZnnUIBarSbp3hBCpq5xIMqyTNMUW/aFdTC/HjjnApx5p3lzeu76ee7z8/BmaYTnK/ai/cHzouDMhcBnyzBCUhg+K5OIVMznZoh/lXU+/+CpEc5hrQlEDEIBoayccggBwgAhPLsDibNRYikezqFA880TCKDgZ3VUxsCMqRsAAIXAUPINX2AiPbs3yCuE51g+eRBTVRSsAEaDuMAYI9wouRLlotlZHftR4MWdzqpC8KA/zZJiMhm9euuaFEK6d+/e2mL7ypWrg9HwF+//1LAsADkmsN1t6abmxf76+kYURZXmchRFBTTDqV/SwND0NNuqWHYYeLwsrqytMz0DhglNff94U1XV7bt7jHPN1qGOBpOjadCvtbQXX9ugNOp224phT2N25BUP90eHB+PQ0D/2Dh6l48Od/esnfVDQ5fanL968rhtOo95u1Jw/+P/+fzauXL/9wmu+nx8eDUZhXnG1ovAcxwVAJGnAGKO0LGm++fihaentdvODD96L42R1ddXzR0QBb73y5Xffffev/+PffO1rX1tcXGSOePDxZ4ZhfPMbv/mzn/0sjuhCb/3e3c0XXnjh5q1X9vb2kKs9PHo8CYZra2uL9SZSTNNs3v20/8f//hc/+vmH/6f/y/9ZrSuNpv3w4ce9Tv10MhmewKsbNxE3LMPByNhYf2l/d7S8tE6palh1t8a4KBSFVEGe8mgcjcbTMwknQytME6iqNp0mUEWD6TCOU4EgxkgIXhZFliQYoSbKfd/XDUvR1IkXKIqqmZafxDrGDMJSACY4g4goaiF4URRVeO6XhFx/QIqoMM6FlLEFApwPCkEIgaCcPxNznZcYz9pgMoxlrGQMYIwxIjOPBM+A1tJYJLSFIygABAid2WdSzPUtz5NBcN7qkO+VL8mTgORchercp8r/YYzA2ZS+OCP2nXOvQPqgz7PsGSgWzqXU/NKE7vnRBL6kIzf/6lzofPYgTdOyYAAAwABCiFIqmSa2trbWVpYBAK1Wa2lxYWvrcVEUb7311ns/+qs0zSGEDMDxeKqbJ7Va7c23vvKnf/qndhAWZalpmh/FeZ5LtqWFpRsSylwUBYCiLMvxeDwZjS1Tp3k2GAwEK23TarVaAgJN07Ks+OUnHxFdfeVLLwdB8JWvfOXll68ppGA8UzUSpQWaJiEItGFsOhXFUaN4gLFi2laz3bJV3VKU3YP9z+5/dPvGxtpq7+2338ZIPT09dd1O1aktLa+leZ9SurK6dHx8VJwmhmHs7e1CCI+Pj2/fvv3yyy+//7OfD4fD733ve3fuvPjRRx8dHBx885vfLIoCIbS3t5ckme/7Jycn/+v/6r+u1+vD4QgAsLKyAiE8ONh3HCcryvF0ktMSYmBVTKxaJ9OIw/Lf/MH3d0cTiBjCOEziJEujKFpe6RGixnEchsnGFUfGF+NRIPmRdV1XFGU4GuV5lqSBEMAwDDWmqqpKnvwojGXPWla2JWk6hFBisBRFkQUw3/ctRm3oQiQEYAIwVVXiPOcICybkxCoXgnNelJTjc7qjp0b4nO1CKDh7EpznRzPm7NlME4QQYyxX+uX1OctF5x3j7NVfK/q7fHA4ByoAM8pDxhiEZ10Uzrn4vGu9ZDxgzoTmP/LCPs89lQvPz79F5h4SQi0Jj998801WFluPH2EMFxd6N27cODza/+u//uuX7tzgnGdpQZk4PjjojyfXrl3r9LpxmmQlZYwRTT0rPFI6HA7D+GxUEgqmEEVVVU0hBGHL1IGh27ZtaMrCwsLaympRFJSV/iS1bWvojUxTv3Pnzne+851Wx84yryhDiFG4fzTyp35SKJYFtcwbBy7mKcu5ggtIia7Vm4121TYJfPcnf/Vk67MvfelLluUcH41cN4vC0nXq2/ubvV5vd3fXMHRGxUcffdTptA8ODr797W/X63WZsrbbbUqpZVWuXbv20c/vjkajOI6vXLmysrJyejqQrI2MMamvWK83X331VUldRQj+bPdxWZRX1tfcei0p07JIh9NJtWH+wR/9a8WpqSZOioQIUK1WqWCMsSTOYiPpdpYQQoEf7e7ucobRKqKU2rad5+5gcOr7fpZHElAYx7FlWfM2INHF4/GYMQEhlBQ+CCFZfDKwIgTXDMO2TaSQOC8YKwnGqkIEQogAwAXCClYVDoRSMp7O4dwE5BBIsXg4K6UCB215mAABAABJREFUAKAA4pmexOXU6UJ2d2EFznsO+YDP6688b60+jfieV4mcP7pM5y5YFD+PB4GcJzxvTYLZSV6GqM82eGm2av5cL1z2F5zchXfNDkgwQRBDCME5dhSpqorU4+PjimV+9RtfR4L/0R/9Eef0lVdfwgqp1lqnp6cZY4ZdqVRrRFHCKBl/9khR9ThLi7xEWV7QkhDCudA0fTQamaZpmjqGkHNOCK5WqzXXMQ1DUKYouOq4Gxsb6+vraZwMh0PE0At3bvz0/en+/t5v/Rffwxg/ePiZYytE4UEa+2mqmFYyip/s7wYFvnL7BoiOB6dDVtICCrddc6uuW3O7Nfs7xnf/3f/0B59++qnr1peXNpr19uPNXyz21izLWlxcFIBVqy5jpedPFheXiqKwbEPX9U8+vgchfOWVV/K8/OCDD770pS9Vq26WpRjjoijSNL179+Nms/29731P1cje7gnnvNVqyN5XtVrd29s7PBkuLnRXr15hZer7U4FRkI4Vw4iSISKMKHo0ndqO1e22R8PTLMsYU5Mka7U6gkMIcOBHjUZbUkhJqFq9Xs+LhE6KsT9N09SwaoqipGkqqfoAAIqiyJ1loxIhlGdlHMfT6VQIsdBqckGLIs+LTGoWUM6IbmiWLRAWlJWM0TJlvGRCUEqJmCtbiFn1UK5pLteOEAKcq2DPFub8DR2eswbPIr5ZWw8h8txVOmuZyG1mwxdM47I5/K3bZaN9qk94Phr//MrvbJvhqmdZ7HlR6xnDm8Wo8FKzdXbGzzVCyhhCEAFYlqxIs0KnhJCKa7uu++DBvTgJf/Nb3/zG279xcnRYr9c1TTkdjTJK7YrjVmumZZeMjkajew8e2Lbt+wHEZ9T/mmkAWmJVWVnpyrYH4JwxppwT5CyvrGCEaJ5pisoEjOPYtu0OwYjziW9UHPP45NAwDD8KNdXQbaOk6XB6RAFeXr/yeBQ/3N5url3/vf/qH6w2lH/5//of7//iEy8Lsa1RAsbBBIN84+qV/8P/8X/4b/43v//b3/svNjY23n3no7/7u7/34MHDRs+5d+/etesbn376YDIZdbvdd955p9Np05K3l7t5VvZ6C4ZhaJqRJMnhwTE6J6e6d++ubVcIIdevX5XC4IZh9Pv9yWSiaZptO47jCCGKkmu6yTiY+FMBcrda1QIeJpOr1xe2T0ZJmgKeIa7GkZ+GQXNjw7Z1VdF9P6jX2lE0vX37NqWiUqnoui5XW6VSSbPqdDr2/XA6HX/rO6/leT6dTqXu1Tl5NC+KgpCz6VNFAXIinFJ6OjihJSeEACSIqiMEEACaplQdmyOcFwVI0rQoGC/OCFuwMrd0ZjO4s6xPyBUr7ZALgcHTkgnnF0mZnpsWzfYRz7KufF78eWG3L7bDC5Hg7LPmWyxPBd/OjQpjjOd95XO3C37sQjg6yzOFOE+EL4XOs1sLvFRoEkJwCDgXZVkKAUu7hBD6YXjz5s0sjbd2d9rNhmlbnud1Oq3JNDQtlxDk+/5w6mVZNplMsqzgII7TrNlsAoSyLEOQFDmVWk5lWQrGTNOoVqu2aRGEGCt93+92Oo7jVCs2QiiM02q92ak1/MGJPx22m/UCMkXFiqKsrVwL4lEeZrphjcfTx4NHx4NBc6HbaLf60/GdazeBSqChelE4CTyaJW3bchoLJaBxFPzDf/gP/+gPfyCY+spLX7l7916vu2w5jBCCIDEMs9PpKCrudruu69Tr9SiK2u12peJ8+OEvIYSNeivPc4nelLTi9XqtVqvdunXr4ODA87yl5YUgCCCEq6urEq1aluVgMLl9g5RlycuCgyyLPdOATU2/cXXxdDIIJoGtq4iV/aO+AtHtjQ2FTxYXV8qCIUR++tP3bKs6HEx0XbcsK8sy3/cluD8M4zTJTdOWA9O+75um2W4bsr6f56V0MpLQrSzOiCoRQpZONE0TAGiaYVZsh9WKsjRte+wHACPGBOQFFoxghIhGFY6edihmqxYCAATkQMilyyF8imgBc9DTeTuRswGSThqdb/J+8dxgbWYtF1zi5f7kvHU9d7t8/AtRLpmdEDzTSJKn+LlN0vnYev6eMSNcE+c6TfCckQ48z2vPF4TmD6UrhHMAITxjcRUwL4soicu8uPrWm6enx5988kmjVpUaRv3hKReqbdtpmg5GwyxLAAAY47X1jdFopJug1ekIITzPAwCkaWoYxmQ6NjW9Wq02m816vW4ZJoRQsHJw2lcUxTSMarWqa3qWJQUtSVlAxE1Ly/txiVAcx05ePT0ZJHlAQa4ZlbwcPHryeOSlrYU2V+CPfvLDn/3ojz755ce9WttwLAp4lIQaoF44BUxrVCpf/upbP/jBn967d29p4ZpttTgH0+l0fX398eNHlUpFVev7B9s3btwYjUae5z169Gh9/YplBZTSx5tbb7zxRqVSQRioGmk1W2tra48ePYqi6ODggHO+tbXV6/VUjTSbTV3XKaVpGt+9+7E/DlnBYi80VK0s0lH/ULXNTr0WZu4LGyuc7k29WEfqSq3RqNc7lqukiW1VVVXf2d7/7NNH6+sbhmHpuo4QCENfckxlWTYejyGEVzeu9fv98Xg8nU7b7bZTceWvXJas2WwmSTYcDqMoCsNY/sqUUsizaq2R53lWFpVKBUCc5BnR9HqrhTCCUBAEORYQAogFAZCx+UX/1A6Z4BCdiS6dCQc9uzIvL9HnesLnGszMU8086sxG5LjPcw3h+Rt/jtohAM8QoJFnEANn7Zm/3QjnW/PS0mQ6PjPCp/oWZQmeV7yakZ3K52fXTAVnXBCIsKIQQCjlSZJAATRFPTw5btVrnXZzOp1wWtTr1TDy949GWVH4vh9FIcY4iKMsy6pVx3YdkqnVajXPc7l0ZLw0HfmSQtNxbFqWcRLKNvrrb74RBeHp6WmSpq7rIoXoli0gXF1brLj2J589OJ1OOAOKouzvH968c3U0HaRlVKs2bt++UzzZe+/+I6aGt15uZ97IMAzHcQqac0HtimmamhCMENRo1LzheG1tzR9njx9v/db37uzvHXVWdM/zTNOMk5BzpqnGaf9YCKaq6pUrG6urq7u7+1/60pcG/VGa5kFw1G63R6PR1BvfNK5Pp9NutxsE/t7e/srKymAwcF13cak3Gg9URZcyiXWzWoTZwdbexnqz5bpZPIK0YHFY15U379yOvPgXB6eVhvPCyy+0ay0eRLV2y/fiTqfy859/WK3WVVW7c+eOpmkyBWWMqarCqIiimBBlZWVtc+tJURQQwmq1WqvWx+OxEAJCvLu7WxQ0DMM8z+UUtWmaQoiNK4vdbjdNcj8K3VpV07Q4zYmqYKJyCJIkHU8mXhjkeU6ZYIydnkbPLkB0vn7kWubnz3AAuLgk/ALOfRQhTznNZhV+earzOz81krkYdb6nhxCet4JfxbAvH/bCxxGpHyKtglEOISYEUPqUgu38bOTbhGTBgOcwvNkdQhI8AgAopUVRSA5fCKFyLm8ozjm85RtnsofgvMEqznuPEjQkmBAQqKqqqxpRFUzI6elpMJ1YtlGxLKKpJycnU2+cUZzmOUJIM8yyLDudjqZpZZlLrlvD0FhZtho1AECe50mSdFvdXq8n2RxHw2EQBAsLC2mafvjhh9VqtdlulWV5dHxsmrZuBJK2rNqovvTSS8d//cPxeHxTu7OyfOXRp4+RwoECNVVturWVRdYP0n6YRv6kaTu9ZjsK/bXOEuAMAaASJABzHPvDj37RqjZef/317/+//y1CzuMn206lUa06hmHJryUvEhky5Xkqv42yLI+Pj994/Uvr6+sLC0vvvvvu4kJ3aWmJcz4YDG7dvoEgQQg1m804ToSIkyTyfR8hJKsgo/HArOij40HrxmLqxXmYmETRFB2rRrVi+Rn71htfXm2shF7UMCvXF5fWlpZZcHLr1p1/+S/+1Q/++D/8t//tP3r/Z7/43/2jf7yzs3d6etputz3P++lPf7q5+bBarVYqlYcPHym6urKy8vjx448++qhRb8p46ujopFKpEAJee+01QggtOedcesvj45293YOJ743H45JRy7J008YYE1VhHCRJsriyXBSFomqHuztxHGtW+3zCG3HOqaAAACAQwgicEWmflWfmSy+zuHFmJDOhMXGu6TBbhxdMbj6aA+dB3Aw0oigYPGve8BzZMxtBlGtYsiRr+tM5+Hm3KQ3kGSNECHF+ZmZfbNyXM0B4PtQ478HRpSneCzWl2V1E7iZzCTFHbM6ogEIICFIAGGN6tVaWZY6hUuBRniVxSGmhKJKNDY/Hw6IoGo0GpXQymRiqstDpagq2TR1Cg1KKIJBAtsFwQhTkuLamaRDCPM8fP37s+76u64ZViePU8wLXdevNFlbIcDx68+byLz+5ZzmV2y/c8f3w3t0HVzZW6tVWyZM4DSM/CLMUFpkOBcjjcHiKPMLzrGraTcdxdN1QiKVqFdNgtKzXq4KJ034/itMkzSeer6qO5OGXGUueF3lecgYgxJqmHR4eUsoVRRmPx5ubT8qSdbu9Ws2RpKkff3x3fX1d1+FPf/reCy+8ILWZLMtyHFv+lJJh6fT4sa3h9W5NdetIEF6UhGITmWFcsIx13PrG166CEsReoCJsUNpcvf6z937x/e//6//+v/8ffvjDH373u9/9/vf/9T/4B/9gc3NTCLG1tbW9va2qapqk3jS4fv26bmt7e3vT6fTVV191ner7778fx3G1Wh8MBmEY+75fliWCRELVfd+nLJtf4rTkvDxjTqlWqxXLrujmydRPo1jDuNbtTtMZ0b2AEMwGnQAAQvDzsI4DIOaC1V97uxypfcGTF8xBPpCsKNLZzkA2GGNB2cU3CgEAwHMBKZGKKzPACgDPcGNf+NTZ85ftUHZp5+808t+ZdsWFs39aPj3Pd88OiyCngjN2JuWNiDymqmu0yKUvjaJ4MplgDFuthmCo3z9ZWFhYXV3lgrl2pVZzw8ifjieclnmWWpalKDhJkmA6Kcuy2V2SFfw8SygtVFWdTqcyoLIsy3acwWBw0u8TVau41bUrG15wZNiV6zduPtk7+uu/+tHXvsFevHMn8Ee9Vmc0hb7vGxC27cpyozGd+mNvnEDV0PSeW207bkVRFSBUCDSMCBA119nbPtjc3AyT2I/inYMDy2kR7GiapusqQkDPVdma4YKORqPV1XVK6d7uwd27dw3D0DTd9/04jnVdf+edd7rd7ssvv/yjH/3oysbawsLC9vZ2GIaGYQAAKC3KsvQ8bzQaxaF6uHe003KrNm7UdShIFpUCpu2FFRgkcUphznRFdet1BSFdUf7kB3/67/7dv/vOt793/96nN67farU6v/M7v/uTn/zEMAxJJhDHsaZpACBVVTnnH330Ua/Xe+WVV3q9XppkUq1xZWXt/v37um7K2xxEghAiKSFP+4Ner3fz5m27UmGMSf2PMAw9LxhmQw4BLUuMkKkZkEvWWUEpo+WZaDTA4NwBivm1PcsMf93tuUv68/aEc3rA84VJMIcJmwHCznBg50rVF6xgHqFKJAwCISS58sEZYOZp1+EMjH5++3nu6V5w+uBZ/w4+56Zy+bsQQsivWABAMFZUVVd0NIfxgxBSzhVFsSsVBGUNIL9x40ZZlnEYrK6upmk8GQ9ffvnlvb2dyWiYRqFOcKXqqArGiKcpiMPg+Pg49L0sK4Ig4Jz3+31CyPr6+mA0PDw+ct3q1atX6/W6LAiJMGh1e3a9bdm/2Nt7b2nlIPRix3QQBRZW67pBMGwYJgAgDkOFlYaw3EqlZ1VdRTchMiDUEVIAUAnOkuSzzz579OSxgCBMk+3d/cWVa+MxBgAoKqEyaOM8y7IsTwzDkFXHjY0NTTPSNOec37hxI038/f39lZWVOI6fPNms1+v379+vVCqyPWDblkQ4AAA8D1uWwbh2Mjz5+BNqqOLO7Y2KpVHBC5+O/J1mZ7lRreVpAblAEB7v7zx++Nnf/OyT27df8Lzg5ZdfXltbq1Qqm5ubpmlOJpNf/vKXMmQYDoftdrvTWTo8PIqiSMo2QQgXFhZ0Xf/ggw/+/M//8stf/nK1WpU/epFTibiIoqjV7uYFvXvvQRAERVHIQbBud2Fxcbnb7kgl8Pv37+9v7cjBTlmeKGkua6EIQelpEIZn6R886xqeL7BfI0Obt4rPC/Fmz8AzSbIzK0BzIsGzHWQ8jM5lVL44opw9JjIWvbDHPKj84pmBZ56fWSa6JKN99ufchP6FS5pPUp/WgdiZ8CKfUXcDAITwPM/UNQEYpUXFMhcXF4syCwLvxrWbjx8/rlRsRXHu3f3YsoyNjY2tx49u3bp5Yqj9/gktU29aAiA0jJ1Go0RGGIamaUrpIkppGCUQwp+9//6NG7ekWj3n/OjoyDRs16l13KoAEGLy1a//xnAUPnm08x9/8B//m//6f5V4I0MnDcvBCaAEL7huvNB1NVVjtq7ojm03NLNqW25FrzuGo6tlmjzZfPThhx8OBr5VqZWUnQ4HI8+3rEXLsjRdBUBGXDAIzgg48jxP4qxer+d5SYhyfHwchmEcjQEArVYny7LNzc1vfOMbUjxYVRUt04QQnjfVNB0AIHniJnHqTb008xUVlKzodFpmxdYs5+B0kFCj1uBJFKeRlwaT3ScPHz246zauJ3H+0ouvttvtdrs7Ho9dt7K9vb23tyeJXgEAiqK4bg1jZTr1v/vd7wIApFc8PDzc29vjnP/O7/zOkydP6vXmeccil3NPZVmGcVypVBqNZqfTjeM4CIKTo5OToxMAwEeMVSyz2Wwu9HrLi4uMsTzPx4OpDNdnEdYswQGAnYWgUAAA4Fk4+p9ohM99ad5U5J8zZwDOvZ9czFJXFyEkXRpCSApFKucTueCc2+IsX5sPR2fWMosM590X+JU9IUKIzammgfOAEz1vygM82/SfN3g+tydjLAMFgQgCgMQ5SoJzRdeazSZlBQA8TZONjStREKoK/s53vsWKcjA8NW17f28HQ9BuNfIk8X2/yFNs27pmUwZs25aaoWVZTjw/L4swiJeWVlzXHQyGp4PhK6+8IrOav/rh33z7a7c13QJQuXLl6mtvvLGzs/s3f/k3q63WG6/dabWbnZp7PD6ZponQxJVuu1N1Se5CCFWEK5bdrruNaoXgktHk0aOHH3zw/vbWYyGAomgCgKyko/H45KTPGLNsEyEgBBuNRoPBwPMmkqzRsqzJxEuSxDAM3/cffrb5W7/1DSmTeuPGje3t7R/+8IdXr17t9/sLCwtydv7w8LDb7cnKDcY44rlQcQrY48ODsTd13Uqr0220F3THff/De0kWZ2kYTAaJPzQUXqtZnIPl5dVvfes7JydHw8HolVdf/tGPfvRnf/Zntm0vLi5KxNyLL77suu7x8fHS0tLW1tbp6anrujdv3lxcWMqybGdnZzLxbNuWNE2yNysl3IqimEyDOErLgkmbLPICQqyqhJe0Ua0tLi4iCIus8DyvUqmYpiFbAnL1I4TgWaGSF6WUsj2rjkIIf13zmy3d2eN5nzaPjp49ObMUmb7NFvYsHZtvQs6CVThnAjNDmD8ymbUKwFMn+0WcGQB+ruMW562LeRc3M/3nxqWz/qTE6wghOORIIQRhCDGGCCGiIEww1oiiaYpCEIZAfgWWZa2srAhKgiDoLXQajUYSRrqhtpvN05Oj8Xi40G03mw2nYhm6qhHsuq5t23/yo/d93x8MBlLp8nQwTNN0MvEsq1Kr1Uy74nneZOKNx9M0TRcWFj57+OjK1Wu64RIOGvXWV9/66kc/e+8P/+0f0sRTv/b62nrP1UzGSoghxrioMK1oAQAwQpaqVyuWjsBoND493nvn3Z/cu38/yzLbUZjgEEOiqSPfT9NUCKGqKsaQsVLX9Xq9btvm3t6e7/tJkty4ccuyrOPjk1q1ceP6rTxPfvCDH3z729++f/+u69YopQ8fPnzrra+UZVmrKUmcCSFc18nzIsuySqWiWInlWCrkoMjHUeIn+TjKjX6o206YZhwwVQGs9AXNalWnt7xYc178O3/n7wjBoii5c+fWD3/4w3/2z/7Z1atX5OylJOGv1+tCCCDg4sLS3vHj27dvb2xsJEmyvb3d7/cVRel0ekIIxrgkFqlWqzJq9X1fMy1KKaUcAESIqiqKZCSheTGdTvf39sqybDdbk8mkf3I6nU4bt+9I2S94Rj9RzhdgZgtyzop+bU/4zHL9nJee+cRz5yHmhErBeUAnzguTMiiVStlfvJFZTw88Hbf9Ih89s+bLkeeFWYp5v/rc65n5dEl6L30jA0xRNE1RESIIQIwVXVE1VRVCYCAIhgTBsiynga/pzXa7vfV417T0KIqGpyeaprVbjcXFhS9/+cu7208whnmWjkIfA1Gr1bI8yYv0zp07w+Hw8PAQYyxDi4WFBdtyHj/e2tnfM3Sz2+1eu3bj8PBQMilyDpI4U1THsuyFhQXXcrIg+MW7P/wX//z/mYWjb//m13Rbg1AYWBEIlYIbik0QxggpCCoQRb6392T7088+uffxJ/vHUaWmVlTbC0pVgbqqx3Hc7/cdxzZNU1ERpQXn3DRN1634vn/jxo0kSYIg8jwPY2yZZhzHUTR4+eWXdV3f2tr61re+s7Oz8+Uvf3mWmAGQSVJjSqksOFlV21AVHSHMmSqQgnHJYJKxNEwUXSNEFaKoONWVhdWbG8uLvWav+Zbk1b969er3v//9e/fv/t7v/V6axr7vHx4elmVpmvbR0ZFcf1mWSR1lSRgnhEjTNE1TySk8nfplWZqm2Wp2FEUJw9D3fctpIYQMTbcsC0KYxkkSZ3mSFkVR5oXquqxM5SCibdudTiejdLbSGGOUczkQe7kW+sVFh8/b5nPCv3XPy4t/vtEtzkkDAQDSJRJCePH8wsz8RggHsGSgoJADLgADgmBVanHOducASKlSABCY4z6chZSzNuCs5jOrfFqKJq0RzY1Lzk56hpvheSmTwjTPVLWgigIA4JRBCDVNM1QtjmNVVTVVPZObREoYUQATQwNxNEYItdp1AMBgNDwd9B9tOqZpW1ZF0+pYMz0vmCZFl9V7vd6Cgxa7PVPTf/GLDxlj167e0HW9iPP15ZWDJ9sKUYswnp6OKpVKw3Z2Hm7S5UVdy7JkdHxwWq/X7Yp27YXrQe7/8pe//ItPHh/k6ObNm6urq7WaW9E0AIBmar7vjz0vjuPJZLK7u/vo0aOjo5PxCHBhT8YEIUyAAuNMFaGmCH9QUzYUR7dVlTBI8yROBbd0c2mxE0UTzrlhwDyH/cGx1MfOkrRZ16sV9bd+81tlQZvV2ulBX1W05eVlb+ClWdLptGge58nUsbBgQaPQVFY6FRsiHAc+wqBXdXAFj8d9XOLXXnuz013gCDY6XdO2sEKqLTVJaCHYJDhZudLLWRIm/vb27rWrN/KS5SVvd3sYK5ubm5RSRVFuvHRlaWnp8fZ2HEUY44k3zcvC96cPHtzTVdU2rSSYHoY+hFAn5LUXbwqkaJo2Go08b6CqKqMlRqDb7Z6ennIhNBMvrV4Nw1DL1FIUmqbdqLpFUYymXpDEUIA8yxhE1UYjyQoAEBAIMAYFRAArBGOMS5Ryzjk762ALhBjjjDHBhQIBRpBxSUgtIIQIQoFKAIC4hGyBT4WMoPzvrOCPz94Lz0QEzwyFi7OJLbm7AFQAIQAFc33FWU4HzqkTz4xQ1icUReEcQC4YE5TSgrIZ3f3l+8FzrfnCHWVmb7KJP/tzZoQz1MIF2IGiqUKIPM+FEIALIURRFAmIpKbXzP7l3de2bauuHh0dlSVzXdcwTMdxbctZXl5+8OAzSdq/uLh49epV3w/39vY+/fTTrJj87u/+7te+9jVFUX7845882vzs6sb1Gzdu+L4fhuHh4SFCqFKpWJaVpiml9PT0FGN89erV5ZWlVqtFCGKsFIKtrq7+7P13/+f/+Q+rVffmzZvr66u9Xq/ZbCZ5Eoah53lh5Pu+PxqNhoNxGMZu1SFYEwIWOeMMqaqpEFKW+f7+/sbGhlutQCgAEWmaJkkWx+HW9uNrN64RQo6OjtI0dR3H87yf/OSd3/nOV4+Ojr3p9PHmzvLyqoLJytIyxsSbThVF0VUtDqNHnz0cj4fedOrYlb3jsYqJ6zjLS4sV87ptGY5tQChM0+QQLK+v6aYhMLFchwlAVIXmdGdn5+4n9waDwXg81jSj1WoRQp48eSKJsGzbFgJ2Oh0IoRwKk97A87woisIwpJQeHx93u908TeXcY9VxJddzkiRPdva73a6maZJtZDweSzwqhHA6ncqCsLxHt1qtRqNx75f3HccxbKvWakKVHA+GByen/f5ptd6QSwYJDASEAHBOOecQw/l7/SwQO/sTPNW75vCMYuK56/zX3Walo/mkUVZrwFwl5XKKByTvKDwXIcMAUsplQfnzjHB2bZcrNBf+lEfmJbvwpHww60xe+JpkxfJsCBqcpbyUcZmgz2fGckg0DoMoSgEAQoAsy05PBhAO87y8c+fOw4cPP/30U1VV19fXF3qLL730wq1bN4Lo9Ec/+pu/+Zu/efXVV7/73d/85S8/3j/YlfGbaeqqSqI4HI2HktuGMZZlYnt7GwAgAJ9Op7Wa67ru7du3d3d3S/q667rT6eT09PTo6EAS3XJRyAhfCIEQxBhbhmabZlkwTTM4gyFLqWAYMlZmWVb0anVCiKropcgF55pmaJrmeWGr1UIASZ5Vx3GSOM2zouaKvb296dRbW7tSr1fX1tbiKEuSaDSaXLt2TbLIcA50Q1VUwjmXqteQi2q1euvmnZWlrmloGiGMl7ZrCwg0w0jyQiCYJNnx6YlC1CeffRaFsVTCkqoHp6enjIk4StfW1mq1epIku7u7nhc0Gg0AAPKZN51maer7/ng8lgO+0/Ek9H1d1y3HgRBKy5TJv2maiqJIpnMhhGEYEMJKpbK2tra/vz+dTiULhvzdB4OB41Qghr7vT0Kf6EZeUsexDctOs6LkDDAmECFIgQIwzigtVXyGgBEQ8fPqBhMAzFkghwiALzK+52ZPX7zN1vB8asY5n+8HXqjQzJ4neZ7Lm5mqqgQTQgAEZcn+9r6nmKvciGdHM+bDX3xpygk8heGhWUNydk5Zll3IIQkhCJ89D88rsVEU+b6PMRZ5Zhp2pVJp1FsQQsExIaTT6QwGA0VRNjbWTdM2DMMPvKIoOOclDVZXV03TVBQ8mXgQAl3XsjzBGHc6raLIfD/M83Q8KQBAjUaDYMUPvP7gNC+ysixXV5dv374NIej1ei+9/ILv+++99+4nn3wSx3FRFOPx2DAkaz5RVdW2TckmDACaTjwhIC2BpqpZWpQlZ1SYBg6C4OTkpNVqKLpCWU5ZVhSFELDV7ERxIDFoqqrmeW6a5uLiYh4PbfuMpuX09DSKAjkemaZxWZZ5nqoagVDIFfzw4fbqxutFkUnlqVqtoavKGeaEM83QAUB5XgKMfD843D/UDYuWLMuyRqNh23a93oQQJknW6XRUVV1dWY+iSPorhEClUoEQcp4NBgPf9yEXGEDXdbvtDispFKLVarUaTc6553lREMrf2jSFJICSN744jgEARVHI+4WmaVmWycQ4SZK9vT1qFKZlIYwBgkIwVuR5nlJKEVFYyShlGBKocYIUDIWAnAMoIALwbF1SfiaGIgCSNRsOEbhU87ywdP8Ttpl6CpgLAy87KvC85JPMqjKzPWSZZC7oFGdXcXaSYmYk86Yyw6xdCC+fjnvN1XNlJHPZDQIA8iyVyoRCCEoZAEBVVYUoUpWNYIwQkhM9UIAwDF1DM00TAHJ8PJCGKimG6vW6hFD2+ydhGBJCVldXr11bj5Ix57wsS8Hh8vJip9N6/Hjr008/vXbtmqoRw9TyPFdVQikNQy/PY8epCyEURSnLMgi8MKwGQUBpIesKlUrl6tVrUm17OBw+efKk0bAoLeSQq6oSRcGc0zzPq7VKFCWMlYoK8pzFiQ8EsqzKxPM2n2xbTmVpqct4UZSZXN9pmkoM7XQ6VVVV0zTLsDEkSwsLYRD5fqgrqj8Z16uugqBVqRzu766trfU6a0WRTSaTg9290JuaGgrDsMyLwI88z/erDrPNim1qmtofDvSSCoymU09AGCUpLQGxlIWFhdFodHx8jBDpdrsY435/6LruwsJCFCabm5ue50uV76WlxbIsp0F/MhxJo1JVlVOW53mZ50tLS5qmyV/Ndd2KZQdB4Hme7H9KPlLGmCTRYoxtb283Gg3LsjjnhmGsra3FcRyGoe9NDNNUFcwgABghDIBgrMyFEKygJS05JBgCRQMAAYwhnaO3ZbMSI8RACA4RhwDIkQuAZnWOeV/yn2OH89tz1/bnbWS2CSHKomBMcPYrnYeYi4Bn+8/Z3nmvjz4jFAPnkLVg7spnm6xHS3i3YE8LvrZtE0JkYUbi7CSFScVYtJ2a67ppGpdMlnjK0cSLszQIgiiKyjKngqqKatiaU3OSbCphPRCJJI2iMFFVcv361a2t7V6vt7a2CtdhmmYnJyejcToaD6bT0LIMVSUY4zRNZemSECK5qJut+sbGxu3bt2XxsNFoKLiQM41B6CkKllzURVEYhhUGcRBE06kX+gFnBcGKpgJccZIkGY/H1aoNIGe80HUCOdQUJUkSAlGv3YEQjsfTlKWGphUFK4oiDP12uyvN+Ojo5OjogDGqKNgwtLLMkzTOsowQ4rpuzgVCiFMWRZE3DRACmqYACDVNS/OCcjYeTcMkLilPo5QQdZpGCBEIseu6tVotCCKJAjk5OeFnrIQwTZN+n8pSwng4Ojk5iaKo1WppiprneRyGMpLnlOU8l+m9/NVqtZofpaPRSN7UpCcsyzLLsuXlZdu2IYRSyVSqJiOEut0uRCgMgzDNDNsyTNtxKpppHh6dAAIQwBACDDnnJQIYAsEYRAgJiAQAZxSBECP4FMsFziwQzC/U/3zD488O5cFz2Pe8LXxeiEtkNogQopRleUEpx+isJ/MFnnD2eU+NbQ4jOl8LlTetmfOcvQTmzHj+G4ECzSZH5IyixLmbpgnO+xkSdM5KWhTFg/ufTcbe8vKyW62Ypk1pIb8RBAkhxDR1IbQkiZIk2tvbo5TG0WRpaUkIsbm5CSG8du1avVHd3toVggnBGo2abdu+76uqYlnm6enpdBJRSkejEUKoKIokibIsWVxcrFaraZoGfgTPZ8yWlpZu3LiRBKcAgDRNoyhQFNxoNCzb4JxHYQIhPDnpf/LxgzLLLUPTNZMQcnRYTCaTvb09zgvLNqq1CoF85E+XV3pxHIdhUHOrCCF/6uV57tiVTqPmjb0sSQenfdu2gRAYAoxEwWj/9GTQP5WORcGIFuWwP20tLAjCMMZZlkVZZGZqGJKcTmzbnvoeAEi2SYFAURzHccJoKvWqgiAaDEZlWdZqNcdxNjc3u52FdrutaVq/3/d9jxDMOR+PjvI8B5wrGOuqmkKIEDIMA4ozYighRJ7niqJUq1VN06I4l2mIrusyQpHez7IsmRkJIbIsk0KurutOvSGCpGDUtoyFxZ5pO1ESx0lmaSrXVcHPOPMpZSUrGGMM2QAADKGY08FG5zT1f+v2n+wM4RyaR65zcd5RBM/OWzwnHOVzrBtlWTImsKrIm/cXn+uFY81GM+D5SKK0bZGXYq6JL/vy0o9d8KLwfBJKsi0ihBQ8i0vPtllOyDmnRZnned1t5Hk5Gk2EEISQvEgxhpZl7e3tWbbR63Wq1epoNNjc3Nzd3T05Pb62sXp4eGjb9q1btzjncojh+o2rQeidnJwM3ju1LadSqfR6PcexAeCGbkMIJf44SZLBYLC9va0oiiT2RAjFcSwBkLpmpEm+0FsyDEMAFoY+Y8yyDEXFlFLLspxKlRBy9+7dLI8VoikqTJI4SQrJ+KbrRFE77caqZRlxEhBCVpYWj4/h0dGRRpRuqwkhFpTlacYZbdTrURRhBMbDwVlHWJRFnsqalq7rFdvWNU1wkMYhQijN4igKk6SSOTaL/CAIHj3ePD09tStuWbKJFxBCwjDyfR9AtrKysrd3kGVZq9WSSdrDhw/7/X5RFEmSMMZ0Q7NtuyiKzc1N28ASHGMZJiEEAYgQ0hSVc+77viw3OI5TrVYRQp7nmZar6zpCSGZ9cviGMWbb9mg0AgDIzv5kMpmpNRe0zPNcFnVqbqUsy7AMTF1HCGFMKGdZliUsKYq8LEuhm9IYEARM+gN4tlbROYkgBHzmDP/W+uKvuM0a3TPsqFztn2f88+ZzNkSDMcZYUEoJUXVdT7IcnutjQCQAQjPwmjSh+dbCzCCl7YE5zdT5aFs8C02QG3+Wq4afz3fJ90IAZ/cV2eqY/TAzp1pSzgT3Ap9y5rqVSqVimjpR0GA08E8CAKFbq66uX6k16v1+fzgcAoH6p8OD4qjT6aysrPR6PTkAKTFfjuPohnpyekQUJAXrb926dXp6Oh6P5WC+FPTNskwKJzmOs7S0BACQyvL1ej3L1DwvEEIQKrquQYhoyRln7Xb78eaW3G0ymTiVaq/X8zyPAwggz7IkTsIktU5PTxEW/cEJEGWWZVHgs7IQBGOECMFFUQxP+0Wec86btVql4jSqtTAMoWCAKxgyhFHBKCtyQ9V67U4axaMwKcsSIVGxdFVDELKClsfHx2EcRVFk6BNV1bOsiMIYEgwAeu3VV588eUKwevXq0vLyMmPs6OhoNBrJezQhxPO88XgsDa/eqAKaDwaD80DdPT09VVV1eXlZiuEcHBzI8uDR0ZGu68fHx9VaCwAgoaTSknVdl89IdeQ8zw3DSJLE9/12u61oGhNn925W0iSKaV5oRDk8PLQct1qtGqrKypJTBgHXVFIAoSkEIZSXFHCGMRac5UUBIeQCQM4xAOIcniV7ZE/X2yUfNV9unN/zwlvk+pwfWZQB+YVwdOYbpR3N6iNEQpllrUIatLxgeo5UEOdsbdJsZjRVs6PLE50fZbpQpLlwYV+crcI5mJuYXfmMGh0AuRRmB8mLlDGmaZpt25VKxXVdxso4jr/yla8GgV+WpTcNBIeuWysKenR0kiRZp9Nrt9vVqsMYm078LMsAAN/59ncRQoeHh7JMV6vVIISUUlUjr7z6kjRgwzAqtithmZKLOoqi0WgimyULCwtXroAiDg3DsCzDtHTZDZb3rqPDkzRNCVGbzebKykoUJsfHxwcH+7X6jeEwjCKa53Ych4PBKVFgniZFmdMipbSAgmMIIGCcgjLPu0sNqR4DABCCY0xM04AQZFm/LHPOOYCIEFUWjdrtNkU0z1PD1BACRZF5gZem6XA8BABM/Cn0/ZXFFSGYW3WajdbS0sruwQ7G+MqVK+12+/j4+Be/+AXn/MbNa81mUyqEjsfjsiw5p6ZpIgSiKP7aV74qyTgIIcvLy7quf3r/wer6Wr1ed113dXXVsqzj4+ODg4ODgwO7UpNz+icnJ7I4J8OKsizjOK7ValEUXblyRQixsLCgadrj3SdYIVLIFQCQZQUvSgLglfV13/dZVpiuDoSYjEd5UXQ6HVYWZQExxrKJDwUEQkDBMcICinOoNwMACyC44AI9Y3i/lhucT/8u1ClnBsbn5vUuLP7Z24mkfpBtN9M0MVYkH4S8Pz3d+6lhPGNCsw+bJYEzP3m5DSg+X9/juZfHnyaN8Mz45+ZEZBdRwqZMU7csAwAQx6Gsqu3u7jqO0+12KpVKFCWKolRs98r6VdfWJ5PJ4eHRZDJ1nAqldDyejsfjPM+bjXbVre/t7zx48GB7e3ttba1ardq2Va/XHz58uL29LauCnXZPktg2Go0kyYIg6HQ6d+7cUVV1PJ5mumaaZV7ygnFdV1WVqBpWFIVyBLEWRX6SFbppJ1lRUFZtNARnWR5xoRAFaZoCIAMQGKZepElRZIJTjABGQAgGOAW8APKeKITglNGCYwEBUwhQCCqKglIOASq44BwALtyKAxXshyFRkBAsTiKAOWUMEkgp7Xa7/dPB/tHhZDi5c+fF3Z2dD95/f3XjCmNMlqaKopCS4PKLGgwGx8fHQrBer9NsNsuynE6nNcc9OTyK43h1aVnqcOi6/vrrr0+nU3mfkjFnlmXVeu1r3/g6EMRxHIyxZIWr1WqLi4tS9dU0TZkHyX7PZDLJ8xxrqqYaQhEEY1YwKigUwNQNxgSkIC9TjDHNCwIRNgzLNGMvp0VGZxUHrnDOaVkqpk0A54ALiBCCAEh2Xc7RU3qLy0WKX3GtzrvEC9Dt+ZdmJjMfGAohiJR31nWdsZQQASHiAsjZ0LP88pLdg7kbxnxGd+E2cNawvuT3fkVTFEJIiByEEIjzsREhZPSIEIIII4QYYJZlVNwKA+zo9EiWJSCEvV7PsoxOb6Hdbo/H4939Iwnnj8KYlpyWfDL2wiC2bKPb7V25cuXk5KRWqyMEEEK6Zk6mI0n+vbu7K4TgnN26dbNeb0wmkzDygyBkjCFEhBB5nodhvLOzJyv7v/3dv8NYnhfM80OiIFXFbrXiODbnAkASp1leMLda4wJRSru9xU/v72GMDEPTdaIbimnpCPI8jynLIKCqAgEACFBOM4IU09AiPyjLEnJGsIIAF6zklNIit009x7DIKaUijZM0zTkHhqZpFQdjUDDKRZkkAVGh6Vo9sxVGSa+3WKlUsjSP/KBimVEQJlF0fHzc6XQIIYPBYDqdWpbJGEvSSJZSrl+/Kr+Wk5MjCVrquXVJexNFERDCNE0kwMnJiWwRQQ3KPoQfBmEc2bbdbHRn8Zj8V5bfdnd3FUU5OTnp9Xr7+/u9Xs/3fUVRGMJZSWleIAAVpKhEwZAQpAz7J1gqiARByVi73UYIcSGqtikjZ0oZFwiCEglBECCQUQjgGff8LLbjCCnz6/PXssML3ujC47NQc07gafaWCyZARqNRURSSOjIrSowVTTV005IdHiEEhGimpSHE0wlifmlK8IKrlbGrnJuaDzJnl/p5FzbvOWeecHZkfk4ZTpAs8EQIcUoLIUSWZYwJ2VV7+PBhURRbWzvLy6uGYViW1W63a7UaKJKtra3BYIQQWllZqdfqURTt7u5yziVRN6XUdV3G2OPHj8fjcRz/Ynl5Ncuyer0ehmEQhEVReNPgjTfeaDQapmlqmkEpvX//Psb4zu0XppNYCAGRIAQAyAmBbtWq1ap5kVqWoSrGwsKS6zYODw+Pjo4o5apK6g3HMLSSFmHoGxpSCEjj0LZNTJCqEMA456wsctUilmUiATDGlCKE5YCKAEAgBBUFYayrKk+TUpZPVFV3HCcTimmpOlQBEmmRh7FPDKKbxpUrawih7kJnZWmVIJQXKQT8zTffnESxqurdbvuVV16Jomh7e2s6ncqcByGEMSSEuG6lUrFkP7lbb/e6vTSKj46OFEVpNBoAwslw5LpuEAREVer1+urqasnowcHBaDSyraoQIkkSecx+vy/rApJPMY5j2bhWVbXRaGCM9076nPMiy0zdUBXN0o08SYs01xWNEMKhUJjCgeAA5GVRpGmz3cqyTPZpKaUcCACBimQGxgUQACEAuBCCAQYAK4UKnvUlX2yH8wne/KpGl4Zyn/uuC4ZwZoQnJyeuU6vX65pmlIynaZ5n5Rn49Tz+PJ/jfWY4anYq8/Yji11wrn8oKdzmne8XnCu4dHeBF9zx+T4y3IUQVhw7z/PJNEYQ67puWTrGihDCNM1Go2UYxunJoCiKdrudZ+WTx9sapKZprq5cGY2HDx9uQiiazWa9Xi3LstGsqaoqi2xSoVT2x2TN5ujo+PT09Nat2zdu3ECQfPrpp7VagzE2HA51Xa9V6zIBGI0DxpiiQsPQiQIAoEwAxkGSRu12yzAMwyQAKpbtANifTEe6qakaUlWFiSLNYsZtSzMMQ0OQIwAR5ggBWjAhABJcxcjUrTzP00yUZV4wijBECBAMozAlhECBBKdQAE0lmqphiCjNLFO3KpbA6Lh/HEQ+A8yq2GbFzrJMlAAAvrKynETZaDDcfPjpKEpd15XoIk3TbLsShqHv+RAJVSWKolQqlm3bMlYqaWEycu/evUePHtXr9VqtNhqPDcO4fft2VhZFUWRFHgRBkqXLy8srKysrKyu7O4ey6CD7THKSCwBw9epVKRoph/cfPnyo63qSJABhDLGuIdetNmpNTVF9BrIkduyK53kCgmqjjgieTKcZE5VKBbAciVJBnGNIS0aLAiKCVZUAxhEUAkAoOEDiDLT9nKDsV1yolxf/hUU+81XzQSJ41lfJjZRlaVmWadqcA6Jqvh/2T4ccQErpzAgFhACetTtmLY15OwFzFIaSX/XCqxdO7le8tlmSiWYSxxCCc2dIIQUAEILKEkAoMEGarpqGiRDiHAwGIwk8yLIsCKIwjHd29j3Pe/XONTmyXZa5aZqtVrNarRKC87w8Oe4jDGq1Wq/XlSp/JycnlUolDMMrV67s7R1sbW1JQnjD1OQmFcIajYahmwcHB48ePer17uR5hnLAAdKBgjHiAHJANMNiAiVZLpHiaV6ougkQ0Q1EC2DZhuOYCsGMlZRi3dBYmVHGAAeIEEXFgAHKiiSNFKzO94U5ZxBiCKEMpEtKpcvSdYUzMZlMiG2bltnttgVBfuIFSSAgxyoejQYQYg2rg8Gp67orSysKJh9/+NEgiLvdbrvdPjnpHx8fCiHq9drKyoofTG/dutHr9Uajwe7uLmOs3Wk1Go3+kwNNUd2KI9M/WdLb3d2VIHin6h4dHR0cHGRZVpalpmlhGBuGIWv3sj1o27asjXme12q1ZK7heV6v11MUhXKiqSoSQFMNxkTJcyiAqZlJkhQFpbRQDV0zdFnUIESZjscQQkSIShDDsOBUAAABQVAAOREBBEKAc8EBR4DPE7LMp3C/7jYzLVlhmX/mQh532YGRIAgGgwGlp1lW2I5LKQ+CiKja3Amdy0ZJI2T8gpHMMsCzlsa55czO7/JVfYEdPnPw809AEM76kLNAlyPMGJOeStM0QpBMBizLMgyr11t89PDxcDjsdHoLC0uci+l0Gsex53mSji5N8yiKxuOxALwoCggFALzdbiuKwhhXVVUyt3e73SAIrl+/3ustxnHsedOjo6PVlfWyLH/+859HUdLr9WzbfnD/0/F4/PWvfz2KSFGwsswYYzbVDVMVAmKMEcZCiKJgYRhPJp5Ee2OMNUVJBbdts9PplEUaBZMk9lUCHdsqy5wJQDBWFEUAVpa5YBwDVQ6qqSoBkMnRASGYFIgPgwjjXNMQxkrgJ5PJZLFWM03dcRyOhUSZVutup9erVCpBEGlYBwIMh/2DvYN6tfG9733vL995//DwcGdnx3EcJKV287zb7d64eS2Oww8++ODo6MA0zSsb66Zpjsfjg4ODRqOh6zpWiGmaIEuzLJNCWpZT0QxdCCGJWCWjsVOpdzqdNE3H47FsD0odGyllMa80LBuMqZdhjHlJoyg6KakKMcHYta08zy3diFIxPO0TQ7MrFYTQZDIp81zWOFRVxec+w9BVJgCCgp8NIYkZK9Q8XcWva36Xl/G8M5TbbIrighHO+OYAACQryP7RSMafYZKfHbpIIYScMdnePFPb5RxwUSIhzoVv0HnnAAHIOccIyT2xAIBxAiHEhM1dsAACwLN/ARACyBInOPv3bJ74/HoA4EAgIRjjJQAEYwEABQJDKKeAOQScM6RZjPE057SkJQRCgyY2dJ0cb23DPNVAKQofUogx0vWs0cCawSEsq5ZzeppNJn5ZijwrHceROXCtSqbj5KPp/XrdabbqhmF+9NEn+/v7EKg3btxY6C1vbm6GQbq/f3h0dLS6uq4oyqNHj5I43djYaDabH330EcAoCILJxPvN3/zNW7evTqdTRsVw4C0sLIRBGIaeputRFNQbNUUFtboNssn6UldV9DQNypIxirOyFLpWlgaCmooxAggzhIGQrIiNXgVCGMVBFkQYobIsiixXVZUDoaqaQqGOlZSVWZZM/dHEG5cH6k3rJkIEQtZt1C0VY4zUIjt9coKIIjTr8OBUN8wvf/nrllnxPB+ohVklUZSlxdQ0zXbPtW2L8vjkdNfSjV6n7joqhsjSlTwOIt/Xm2aGi0Kl0+mQRKTd7tZrDbNmjYaT1fXV3d1dwzCvX79+fHz86suvbG5unvaHURRwzuXwsRCsLHMAQJqWrlspy7LfPxGC27ZZFFkQeJwpAAFdVXNGwziQBpbHNKQZAKBEgmFM87KkPuecl8yoVDRNo4xVHQcQUm02JQheEkBSSuU8qkKwqkAhMBUlZ4xzjgEmSMEQcQ44EwAgAZAAiAMoAOLnYDcCmezeYYxl0V5WX2bhiXybOP9PkTpLsx7jrGCDZ15KnL2ZzzFqzxyRDC/BeTZ59id6jtzx3+rZLgSis6T2Cyo0zz3UhZ3F+bXOTl46SUrpN77xjTAM/WAaJVFZ5hCAbrfrOM64fzKdenmeV6uOU3GLgnpeYJkVRVEgEq1WgxAyngym0+HJ6VFR5L1er9vtLvSWfN/f399P01Q25b/yla/Eceo4jqqqhwdHw+FQuk2A0dtvv40QopTLSkO73WaMRXFACHr55Ze3d54AAN5///3FxR7G2K3Xq9UqZ2DiTYfDcRJnlmU5lQrGGApMsBRUIRgJBRNVJWmeSU8ufzchBCKYqEpRFCWlaVGmaZrmlHIBISSaeu/evZWVFcuykjyhJVcUDRFYlvTVV1+beEEYxpVKxa5Ui6Ioco8Q8vbbbwdBcHh4ODztCyEsy9I1jVLKWNms1SGEN25eOzo41DTt/v37k8lkRjwhh5KCIJDa4zdv3D4+PvY87/S0X6lUVFV95513GGMvvPhylmUzWiQJqUEItdttGbJyzqMoGo1GUuVXjozI0Qpd103TlPjBGaO2XBWyCIkxJqpaFIXMaTnnUiBRKp9HUZQkiThn6JSVRV1RAFE455wKTnkpGIFEwbhkshUEEAAcQQgBBE9LE//JznO2PVMdnRnDfAZ5OX6d3+Bljhlw5uTPXuUXnfJTm3k2RAbPGvCvEqZ+3jMQQuk1iyJLkmhzc5OygjGWl3lRZAJBDhjGuFqtWpbVbLbKQhwfnw76Q01TllcWkyRRVdJqNSqOXW+4aRZmWVoUuRDiyZMnO9t7vV7v6tWrnHNNM770pS89fPjw5z//EADQ6/WOj06Kouh2uwght171vKmqapTSbnfh4HBimJqu641G5969Tz797F4Y+s1m/aWXXlhYWCiKLJ2eSGLcOI4Hg0ESZ61WS7Q6RVFAATiGgmPEmYIRhgBCHKWJoWolpSVniPGyLBmniGAuAGM0y/O0KPOy5AICTFRNs22Hcx6nGRdCIuMNS1cUxfdC3w/TJLcrzvLyqqroYRjV681rL93Z3d0Npt5xeQghlDYWhuGrr748GY6EED//4Geu647H44WFhW63K3XRIISWZTHGJhMvjmNJS2WaZqfTURTVcRzJf/HSSy9t7+xNJhNJ9atpmqxIG4ZRFIX0jRIkHEURhNC2bYxMCU7AGDuOY9v2DOE0W7cSRyXNksKnY98AAIlcUVVVgpyeqjOcz+Bahi2n7WhOsyxjJccIE6JwVvKzXAxAwJFAHHIAgRDsfCxYLmBZImHgIreNOGfmR/PLdWYX83A2Mn8xYC7NE3NjR/PmLj6fnnH2AZdfuuwJL9ve31qwAXNWLea8Hzh31BDAsiyldkUY+QAAXdecqttuN5FCIAaEEFamw+FwMpnqmtlstq5cueJ7AcZKvV6ltBCAp2nMWFmW5WQyGY0Gn3762dWrVyHAf/VXf8UYW1lZsazK4eHhm2+++eqrr8ZxfHR0tLu722w2r127trGxsXu4U6vVOOfv/vRnnU7n1q0buq7//Oc/z/MUAAEhvHPnVhgFlmUkSeS6bhkixliSJEmSSD9QFEWcJgghBARnQJSAl1hBUEEYIoAARwhRAbgQTIC0yPM8LyhTFEUAxIAs0UBKOeOcQ3Ttxq2spP3+cGlpYWFhZepPIBS27TzZ3tV1vVarIKw4lWoUxScnfUXRFteXLd2QtyrP8+Ioarfba2trWZwkSeK6rqSikgNWQghN06T8E8ZYdtiLopBI0S996UsPHjzQdePo6OjJkycSKGOYdlmWcnJC/qaappmm+eDBA8aYoiiWZQEApKsUQlQqFUqphIAbhiG1biTDvPRm0nmcAZWFKMtMGjw6n1YFAMi5sBkj68wIMcYZ55ADIQCCGCkaA5xzwCkjQDAgmEyJABdQcC4EBBg+053/VdbtZT90wX+Seej3BSOZf/N8mDp/eHDJE86OAP+2pudlT/sF0enls589I2bdTwGYYJAzCHi16srbSlnmeY5VBA3NNEwtDfNbt264bm17e2d3d9uyKoSojuPIoDGOQ9+fFmVWFLmmKaurq9VqYzQa5VnZ7XZl7V7y7Z+cnAwGg3v37g0Gg+WllRdffFFRlOFwqCjwwYNP6vXm8spiveH+8qNPqtXqq6++cnBwYFdM01Idx4mTIMsSiESzVQfZZBa4YowNgwAAgyBwKw4+v60xJiAAEHIkEKWlwlQIASQKBBwRzHNRMlqUDGAkIMKKAijPaBqlWRTHULHzzGCMaZqhaCTL8yiKJl7EmHDdumlVgiDK8wJCbFdct1r/+OOP5QxUq9UaD0fHx8eWZa0uLW9uPmw0GnEUvf322w8fPnzxxRefbD6WnL9SGVsIked5EAQAAEppv9/f2dk5OjqqVBxCyO3bt4UQuq5L6if52+V5Lu0kiqJGoyGBaZRSQogMK4qiGI+mYRjKvh8AQNK9yUbibEHOLLAoipIXjluhlMZJlKRxq9XSdJUL1h+MZrEepVSCOhBCll2RfGiEEJOoHIE4TYu8UFQNQ1QKToFAApSccSAEFwAjCAAEQMKpZ4Y4W5fw/En54LnmJy7oE15AeV+Am80A1uB8oWPl+TnhBTf4jJE8j0T1/3+eEHMhtcgBhJKKWQjBh8O+aZq2bcuVQQV3q9VGo+HaqsRblWWxura8ceWazEBOTw/b7XazWXerlhCcUqppSqfTOTg4Ho/He7sHe3t77Xb7pZde2tnZ+4u/+AtFUer1Zq1WW15eXl+7IiVsr1y58md//cfNVmOht7Czs5Pn6Qsv3nZd1zRNLuhw2E+S6LR/dPv2TcbKZqvuOI7a6Y69qYxPCCEQYCGEnKkjEBEsR+MEAAAhAAUUEDIBABcAQVXRhRCUA4xxGIYIKgACJkTBWZKlQRxFScpAsLy8WrHd8cQTgOm6XnHraZq2Wz3TcgWH00kQhnmr1VlYWFhcXD69eyIR6rqqLSwsUFYSiA4PDxcWFhCEw8Hg008/Pdjbl7P2lmVJi9V1Xf4cknXbMIzr127KqvLu7h6E8MaNG+dM3mMZOkp7m9UdsixzHAcAIDlmarWaoihxHEOgKgq2bVMIQWkRRaWmaYZhlGXOWAkAVxQs5VSKosjzFCBBIKKMp1nMitI2zLN2SEnP2xhkZoSEkCiImBAAAA0T2zAhhARhBWYCIy5kKCoAEwwIweWk7Rdh0L5g0V4wivn9ybyZgfNolT/LOjH/GZdjTgBm+qnPGQyZBbfzz1z2tLOjffHFXC7MyB+Rcy4gQhgRhDSVaKqiaqhSser1mqKqaZqHSSx/aUMV7XZ7cXFRJjbDUT/LMqKgvOAlzcsoB0A4joMQSNN0OBzKW9Irr7zy6quv9vt9KQb45S9/eTwej8dTCGG1Wm2325L4aHt7+/r1K9vbu8cnB0TBYehPp36WZbdu3RoO+/V6fWNjPQj9drt5cnLkeV6e523TcCmtVqv1ai1K0iKnaVHAgo6nnkoUQ1V0RREEQagQAQHEmqFDDLM854JhjCEmRNUQQrplcw5KxgtO85JmeVkwziEwbUvVtYKyaOIRQjTDMnSdUnDt+vXJZOp5Xkl5lie6HmZZsbOz0213hBBlXjDG2u12r9PO83x/f79im3GWLSws7Gw9uXLlShAE3W7X930OZrQDQZqmEuTAGNvf39/f33/llVfiOJbd1CAIMMZS/LQsSzltOJtR6vf7EuokRx/lsqxWq5p6xuUhg/aiKOS4xs7OjhyskZhnVVUl2kkQKKsyssMk1VSlk5SDCzLgl64bAKAizASDACsAISAIViqGrhGlYLQUgHAGGYWQCnYOGQP8jH7wrEzDn00Rn67NL84J53c9Y8WZDRPNnB6ao/ieRdK/lnmAOQv8Yk/4q2yX95/dIM4BdBwhrKhY0xRdVw1DK8tyPB5iogIA8pKGUTSZeFndOKOrgVASfuV5xjlrNOqe521tbUEIX3zxTr1el1xPk8nENM1r165JXJVlWZrGZWNa6goeHh6envRfe+2169evR1FkuGxpaQFjfHh47PnTWq2W56ZhGEKIWq2WZUm1Wk2SZHFxceqN6/V65g0IIfV6vdUKseePpz7PspyXcRxzTScQKYQIiBFRsKIqqqoZhqTDK/NMwhgoY0gIx6mmRc7STGSQUUEFwIiomm6apu/7qqq2Wq1qo06wOp0E4+lkbf3qyUk/zTJdN1VVrbi1oiiCKMon4cLCAiHk5OQkjWOVYNd1DU1/+PBhrVZjJW02m4qiLPZ6QRDEYVhrNQkhsgGbZVm1Wpe54mAwiOP45OSk1WrVarXpdHr9+nUJe5B4QFnykaNMskVUqVQqlUq1Wh2Px1EUAQAcxxn0p3J9SsxqkiSmacqcULYHZJf4XE9FMEbzPCWESBQUQiAIQkppterIZVMURZYlYRhwzhVF0YEMa1nBhWBcVUuMFVXBEEIEOGACACIEZ5IsigPIxVnvQgAAzqA3kAt0Po1xoUXxq6xnIhG00mXLe4n02hjj2YgUAEByHIhnqzpP3SMAiqKIWUsDPHVx7JIW79m5ztHgg2cd9DMe8rwVOfvcWbQsj1CWTCEEQoyBwBhrmoIJzPNkMhkyxoqiZIJrquHWG/VGy7btyB94XvDZZ48cxzFNHZxjfZKEm6b+0ksv5HkeBFEUJVJlZWNjAwBwenqaZZlhGJzzIIiiKGo2mxDilZWVv/t3/65pWE+ePEmSRFEUVcXj8bjIabVahRBnaa7ruu/7zWYziiKEgKZpcRyapl6vNSGEEAFaMLkQJ55/Nl3OmRdEpX42jcY5l+SZRUGJpaVpDhAyLJszltMCAUSIGmdpxXbygiFSBlEc+GGz3ZlMJr7vX716tdPp5CXNs2IcTQ6OjoMg6LQXS8qTJPM8j6i6W2s8ePAgz/MrG0tCCMuyNtbXhRCtVrPMC9kq4JTFcaxpim2alUql2WxeuXIFEJymabPZrFQqnueVJZOFTV3Xm82mLJnqun7r1q0wDBuNxvb2dp7ncq5XrhbP86bT6euvv46QbLFO4viMnkPTtIpj27ataVocx5ggRSVCiDiJIAK6qkkPgQkyTJ1xO8vTeqseBIFt20WeK4qSpWkUhoqihEHQ7/dd122326WmlbpOKQ2CIGNnxNMqJipxTV3nAGRZtrS8Mva94Xjke76fRJQxxdQty6IUnS+8clbdkTWk5xobf1boYba8nwlHJRLlPK7js5fn6Srms8R5LPVTuxKAUipnmTDGEIpZLRigzw0vn7td8Kiz5ubnhakYY4wwIUBXSM216zXXMlQI+GQy2d/fp7RstttOpZqVNAgChBAAMI6TarW2sbFRluWDB/fCMJQYjnq93mw2G41WWZZlSQkhUuekLMuyYAiher1er9fjOI2i6MGDB7/xG9+8fv16nud7u/uU0kajwRir180oilRFr9ebSZKlSSa/BzmPl6ZpWVCpXZckWZ6nNUVACHWFGIYhWQYZFXme2bYDCaZMZAXFGKsqAAgjTLJM1gWZQAxwUZZUSqJEScIZTOKMM6CqqqIoRVEUOW133c5Cp6Ss3WkdHhxhRX28uZVlmR/86euvv9nrLa5dubqzuzWeDFfXlgEAjq0Dzv3ptN/vU0rTOC7L0vf90bBvmqbjOIamFEUxHA7jOE6SRDUN2fdLkoRzLlHjhBCMlNFoVJalpukYYzlfJpm5JaeRZMeSTg9jfHJyIsejJLuMbdtCiN3d3W53SXLwJEkiR5zkjx4EgbRSybiFEHIcxzCMaTANgkDGHZL2u9/vM8YWFhYcx+l0OpJpSlZoIYQcCICgPI7jOoamZVlRluXHH3+k6ppZsddWl5M8mwZ+nCZ5mii6fZZOAQYghxBAJCASYmY7MzgK5AByhM6CzflA9EIk+NQIZ8YG5/oTGM9rMn5RAMkYA+eUFrKJfFY+fl5z/1cxxfkYevZgdjGzHQhSAOCUUoakNgBASHpm7LoV0zQNQy+KIi9Kt2q1Wm1As6tXr4Zh+Gd/9udFkb/88stS1aTdbhuGITlXwjBK4gxCqKpqliemaRKsRlF0xu0HsWEYvV5PFiHkMMFgMAAANBqNw8Ptfr9fqzYopScnJ6cn/Xa722i0IMQIEc5BmuZ5VmCkViqaadowPhGcFUVBWSG7ZIyCgjImOMsKmtMyLwC3CcIK4QhxGiaUFZALBgUQjJcUQM45BwykaZ7npYDY0E1VTfKkoJRubKxXKtZwOAYAuLWqlNzwAv/FF19eXFwsKHVcuyzL49OjSqUyGAy+9Nqrsj5p23YUhHEcc841VV1fXwcAEInkFQIKYBmmoel2/YyNQs7ixHEsw6V6rWlZFsZYtl6kqUyn0zjJKKW6rsvSyGQyybJMdiYIIbJvYVmW67ryOJVKRaoOW5ZlWZbs5TDG1tfXj46OKKW1Wg1jvLW1BQBYWlpy3cpkMpL65K5b0XU1DH1K6cnJkeM4kv9O1nIUBVuW4XsJJgRgBBBkgheMFqxgvGy26hBCQDBnOS8zAoRjGFhVgpQBAYAAGCKIAAAAI4whOpMREwIAIdm75QixuDTld9mUyOUpeHFOWjo/DzE7BJ8rtCB0bq7iqVcUQsBz+5GjGL/WdsHm5zEK4HMeCy4Y4wUoSppTVjIOAWcQilrNDYJoOp1ipLZ7C72FJdu2G+7iX/7lD9M0/upXv+xWK3fvfhJFwbVr16RYqjykqujIViGUNKy8Wq1WbFdO2QghdN1sNBqyLHF6enrz5s2VlZUoiiR1hWEYMnxijAEB6/Vmp9MzDXs4GFmWZRq2aellyQzD0jSFc753+CnnXK7Ler0uIFaVMMnyMIgppQXnQghVpQZlCmUAUEOFBBKAORRAcAEQQhBjiLGuFAUtioJyIBgQVKRprqtGo1kjCuoudMqyWFlZznP69d/4BmPs8PD43/7hHx4dHd154dZoMtQ0rSiyLEvefffdpaUlQ9OhAIwxzwuAEJVKpdHopFGcJElepARhwzBkuJ7xM55oyeobxyk8n0va2NhQVXVrazvLsoWFhUqlsru7W6vVdF2XTUXOeb1en/3usnHvuq6iKFEUKYrS7XYHg1NZ15G8dVmWpGmcJImEZCiKEgSexMRK0CTgAnIRB+FxXgjKXNd1LJtS2m40K5WKlLJREEYCpGnGOYeKQoGgZZ56uR8EukwviYxRIVEViDEhmLIyCMMyKqlSkUY1I3MBc3wuF5bxM4nVpZeeGqH8BueD1HlzuuA9GWNYUWc7Sy1VIQSUOd7Ml3JpogghVPJfieXqbzVCedu5HJcKIRCEiqLoOpaCZzXXIhh4njfoD6MoAkD0FjqLyyuUga2trYdJkcSpAODJk+1er7O0tMw5E4JtbW0ZhuW61apbd90aISpnglKu6VC2NDDGrVbLNM0sKySxRaPRStP08PBQIWq32x2NRtvb23deWRccFgUNgkhV1VqtBQTa2to2TTPLckoppazIqRAwirLpdBqGvqJomm7olm3ZVUS0JC38MDJNsywpKynEmANRMiHJIEzVkLBDwUoKEEGKvFfmeR6Eke8FaZZTAUI/KEq2sr6uaIpTdTRVj6KEUhrGgaZp3W6XqPq7P/tZXqTD8eD4+FDTtKOj/eWVxSJlg9O+PCBjjJ6XCSbDkSRAURUi6wVRFDHGpnHIGJtOpxLpImfEy7K0TF227E3TlNQn0mijKKpUKhhjSdsjYeKe51mWpaqqLOokSdLv9/GZTBCZFWaks5UeQg4fS1mLarW6uroqhDg+PlbVMwJBic6XY2hhGEoZ49kksTiXs/YlZoCLssh5STVFbdRqNbcqBM9zWhQFURUAoUawoRAgWJxn6FzCCM7J2c/Swrl6jADgc0eZnskJ5ZvlPX5WzJTOcNarkJmrRCqgeUDMJccq33hWOPrCMccv2J4pqIqngSi4lDECeV8giBBMiCzhMggFIQohCGHYbNZr9bbj1HzfP+2PwjDcfby3sNC9fv3q6tqKpilJGoRhUJal41QBAEmS5BnD2IMAcQ44B71Fh3OO4NmXwDlXFE3yjrZanWq1Gsfx5uam/BqDIPjpT3+6uLiMMR70x1lW6HoCBE6SrN3unp6e3rt3rygKyzIMwzRNI/CjhYWFsmQlZcX5byn7YEAgCFEuBKMsS3MEIGNMV1QdCkVRIOdZljFaYHjG8+X7fhgnWZJFScIAKQqqavqt6zdWVxerVWdra6fd6o1GE4zVD97/QDXMRr1FWbG4tPTyyy+/+urLtVrVC70XXry9fX8rSZKjo6MkSeSZZFnGSypXv0oUOQifpmmZ5WWWc3JWPJf6BVmWSROVReCiKGSrVmoYKooCQCYh1JIqSgJiOOej0Wh5eVnTNM/z0jSV2fhwOHzllddkLTqO4zRNEULNZpMQ0mw2kyQBAMg8YjgcysZGmsaUUtM0Za1LDoVmWSbFTPv9fr/fl7UuCfSJiwxCiACUfCGc8yTLTLOoVh2JZQWcQQRVRanYtsXNeJpKA5F3Fmk7su9/wRDOndjzPeH8M0TMlWTAnKuZTXzNyqfwfF5pdixwHoXC87nfs8IM/1Wb75e3uTuKAADg88H8eea1eVOklBEEAECU0iSJPA9jyHSNFEVRr9fVjoawNpn4h0f9OMlVRX/hzktc0DCMt7d2KMuLImu26isry61WqyxpFCbTqR/4cVkyjBWFaHfv3lUUxXVqruvKz63VGp1OZ0ZfqyjKZDyNosh1XQDAwcGR69YgxHEcm6ajKjqEqNdbpCWPwuTw4HgwGFi2cePGjbW1NVXVqlUSRUkZRvI+KLteecYmnlfQUgqG5SgXQqiqCg2c5zlgnHMeR1GZZ4qi6KqEPUFVVXXDVvWUCUCI6rjVt978Ur1VyfN8Oh3bVgVhXJbl7u7uxA++9a1v3bp1i6j4+vWrjlNRDXU0HigKls06aRhFludFnuc5BvBsqQHIGGGMZXEi+bW4RiRRmBywlHpmcvWnaTqZTHTdgBDK6ovUDJVJnWziSbI/WUlijIVhmGVnViqEIITIuFQattxTjixgjKXSweLiommae3t7CKFr165t72xOpqO8SG3b5oIGoRfFgaLi3b1t27Y9fxKEnm3buq7XiKsoSjQc5Xle5gUQnArOGIuSWMGQFrnjOK1Wq1qtljQfDIeDwSBJknZrMc/zGdYcAIDPWWXOPIecKDhHzMxXMcDndMLJbA7wQiFEfimze7Ps6lBK6fxQ75wRCiHweWEGwqeaZ7+u6M2FgUg0m+rnz+/jl2WpEYLQDFwgpCWbpmkYRpoUB4dHnheqqq7pVp6VrGTj8QQT2Ou1iIKTJJlMgOM4YRiZpmnoVrVaV4heFJxgVdf1Vlf3fZ9RzjmvVCqyspdl2cbGRhynsgAohGi1WpZlDYfDtbU1SunW1mPOxFe/eqNWbUwmvmnYh4eHmqatra2ZppnlCaVnbBqD07GASFVVy3EFIHg0HU+8GdpYToRICjzTtKrVKkpiRcGspBhjhhCBiGAVIWTbGsaKXXGCJC0oM7ygVm/cunXr0cldINDS0lJRFKpqPHnyUNf1OiaO47z8yitTb9zr9aIoPD4+pqzw/enJycm1a9dkc+9gb78/OCWESNyJpqi1Wq1WdymlIzIIwxAAIPGcMjaTLmuG6pReKE1TKTAqwaira1fkejMMQ1q7LALZtn18fDwYDOQkZ5ZlEMJarTYcDmUWAACQuG3Jzy37DTIclUVX+a7XXnsty7LpdCozbUkz1Ww2Hzx4IBmlZPUVnAt7tbud8Xg8ybIiz3heAi6Q4BBwWV5OkoQLmud5EsemadZqNVZpSx2UOI5loC5tUMbql23sgtuYZXbP5IRsjixx/mU54Cw51yThPITQMAyp4HF23BlOTkKQhBDn8SoXXEAg0Fw/Y+5fcA68lrgDcS7XxoXAkuJQmrYQQsyIwM+usAAAMCTQ2fSKiQudkIquWaapaZpODAJNgswoDOOpBwFvOU5d1+I4LmmCLDiMioojABBFEeW5KPI8oGKH7ne7XV2pVBpVmbyNx2PP84bDUW9hQSE08MeKUnS7blEUp8d9XVeXl5fjOBa0jPxJFEW0MEJ/VObR9kH21ltvLTL48/c/+L/93/+vv/3d7xmafnBw0Gv3KpZz5/oV19QsrdJzOya36q36ONVomWuEaIoCeF5zyMaKZQD19HgQ+lFRZEbdbrh1t1LXNBWXhauHSADFQpqjYahhca5koGhBkkW5h0Ta9/2GbrzywhorxrfX1v743/8AEWV1/VrBkEqUarW6vLauEG2hu9Rudp9sPkrjyLR0moCdzZ1J4D3eemKa5iuvvPx3/5e/ByH8xS8++NnPfjYZeLW6u967igkpswRVtCSaXL16VQ7j6qamGep4OomS0LQNoigIIcrLrMgVRUEMUc5sWLEqdp6k0+l0Y2Pj4OBgOBzKEd6z9l2WVyvOYDBQMeGct1qtMssnw4GgZRCFL770ku3aDzcf1Wo1igUjQABm6kTRFUGZppJOs2FZ1mDY1zQVABGGAWMMEVJxXYiVVm+BAVJrdjvLa2EY9/v9PM/1Sfj6izcCQg4oHzGQYS1J85xjU3O8HAhViCQ3SmaqpF6t8SJP0ySYnnjTKeCiZmiUIsYYLbMoiiqmJUHnBtEBYPI2SghJsSwmnxc+uQAAIclgJgAACApAZuv7spOZ7xzKZFRuT73cHNoGzJnKcxzcnO198Q7PVGWevWfIz5JLbnbXqJhmlmVRFBm62mq1HMtUdU3T9DiK0jQtyszQdALP4XgQIoRqtRo6E2aDtm0jhDgD+/v7w+FwOp0uLi7KwEnX9UajAdCZ4M50Or179y5jTFe1drsp7zuS29P3p1Iiqtls5go+Ojj8xS8+ePDg0xfv3HRdlzN6enq69fhxu9l5/bW3bt++XeRUJjBurSoEVBStyPMsDU0dq6parzUNRQWcaopXmnnFchrVlmU6mmKqqmqUSCVIU1RDIRhBwCkUAEDMIYxLmgRRnCTNduv2K69du3lHcPjhhx/WarV2Z/Hje/eJZn/1q191q48mfrC4uPjjH//44ODgK299aTrOTk6P4jC6fv36p5sPp9PpdDp95513/vAP/1AI8fWvf/Uf/+N/LIT47OGDP/mTPymKotVqfPOb38yybG9v7/bt23Ieem9vjxC10+kkSbK4sCznlSSoRQjBGJdTFEEZLC4uyo4oIUTX9cXFxf39fUmuJRnWkiSp1Woy+NzY2BiNRmmaPnz40HIqGGPP87I8F0IUZRlFUZkXrCiLPGclJYQAIqrVqmEYYRiGYZSXpRACQry+vr6zvXd4eGi7jhCQc+44zsrKypMnTzAmEMJqtUo59KM4z0skQJZlUHBGC2YaqmPrmgLkkuMcnNcdFUXRNQ1CqGmagrAkH5NjSQAALpl4FSixbkJgzrnEugkhIH8qz0aeeqpLmM+ZHQIA8Dlh8PlE6Zl9yqR8hn64bHKzg8O51t8X2OH8cS7vNisNn2OUmEwzbNt2HLvZaFQqFUltYBhGMJ1QSpGBFE1BWVYWVABOiFGtugghz/OLolAULJnmZBcrTVPGSk0z5FCcZZmDwZBS6jgO53w6nSqY9Hq9paWFo6MjuYZ83//kk3s7O1umaTabzQyw69ev/29///c3Nx9alkXLoigKKDgh5PDoqN3ai+N489FW6Ed37ry4tnGFx9ypOoFfhmGsKRYxdKTrTsVs1Nzd7Z2j/aMio9NgGoWxZVWqTq1qaQrGEIg0L2iRQyB0RcUqTNK8P54UDCyurK5fv/Xi629g3fj00aaqaMNk2hKiWmuYtiuEiKJoeXn53/ybf7O2tn779u00Tbd3dwAXZZH9+Mfv/L3/8u/pur6zs5OmqcSRHR4e/5N/8k9ardZv/873/vk//+cHBwf9fv8//IcfSJu5f//+0tJSr9djTMgxwkePHslIPssKmdepqiab7Lqu66pRa9SDIIjTJC8LytnC0mKcJlmRU86iJK7VakVRmLZVFIWAQFpys9ksKRVCuK47GAyEEIZhFBBqmlZ1XNeutFutquNSSg9PD1RVlTqtGGPbcWRG+uDBA4VonU5H0bXxeCqLOnEcB0FgmhbGCiYqhyjNizhOoyio1WpAgDzPC4I4N4UQlLGyLKGqAsGEELTMAQCQEBUTFSOCkUIQxlhXVCEEQWfCEBixubDuHFjDZ7UZhMScEV7e0Dml/rxNypwNnStjz3vCC43IeWObFTwvZJ6Xt8vlVvlAFmzm01d5nNF00m21a7WaYRh5ng/GIyEEbiBd13XLVBSl0ahhDNMolikWMJjEMcjsVQhOaVEUmeM4EMKizAbDUwiwnLjhnFoVxzCMVqslGC+yPE1Tz5skSSTLgEdHRycnJ0mSvP76m6+99lq9Xj8cHDPG+ifH4+Go025dv/5K6E0nw9G9e/dMS3/hpTtra1fCKN7aeXf67k8m3viNt16p1+uEKEAgoupYIYATw9DddgcAoCBlOp6Ox9PTUR8NBrbt0JpVr1erFQdAQBnkjArAVMwnQeiFaXtp8dU33uwsrxLdiLKcCq5pxkJvCSmqbdscgP39/TAM8Wj01a9+9f333ycEJ1G0sbEBBXj5lReTKH7v5+85jvPWW289fvz4Zz97bzgcrqysmKbZbrf/+N//yR/8wR90Op3f//3f/+3f/ju9XmdnZ+cv//IvDw8Pj49P4zi+cePG+vr6yvJavV6XJOWyhCidw9nMrm5J3icZC2RZJgnwp9OpTPlUVZUSogCAPM/zNJPLwDCMWq1mOxWE0OlggDEusiwMQ8E4BtDQ9TiMRqNRvV2ToO1+vx+GUavTOVOeUZWlxRVd15/sbO/vH0rBxiAIFhcXsaIWBWUcQKIAiCBCcmSFiVIwAIAhVzvSNGZbumlyWpZ5DuS8Py04wmVZClVVEEYIQsAxRkhTGEFCCJ2Vz67nC9VRDsDzjFBayEzmeobenrUxwLMDUfKlC23GCx2FmQX+iuHo5SNIGi95d5lx9SOEVlfW5FSb7/tFntq2XXNcVVWDwFdVXbfsar3GSko0VQbSCEPOKda0Wt0FACCEyoJVq24QBJZlCSEk/EpRlLxI+4MTJ00BAGWeMcZGo0FZllmWJEly584dz/M8z6OUdjqdWr2+f3Dw45/8pCizmzdvLvZ6t2/eSNMUcsY5f/PN169fv+p5gVt3j06P9g73GWCdTqPeaUiaas4BxAQhAgQCiGi6GUVRrdZo1ls0L0+OTjc3nxwfnnjR9P3Hn16/fv3G9WuWbnBAMlrkrIA59+K81Vu8fvvFdm+pACCeekhRm91eMBkWZfT48Van2xtPw/d+9kGl1rDK8v6Du/3h4Lvf/S7GOEuid378k62tLcPUsiLd2dmRTK0vvPDiZDKWTmwwGFy9enVzc3N1dfVf/at/ZVmWaZpXr1797/67/32e5z/5yU92dna2t7e3nuy8/fbbQohKxZ1OfcuydN2wLEvXdYnJPj09DYJgZWVF/qaGYezu7uJzjSBJZiGpKOTYhFtxCCGPHm+61apZsVGCu92uFwSu62IIoygydUPCvmlRyuUqZTB2dnYOD08o5wsLC9Vq1Q8DCfWsVquKogEARqPR/fuffvs33srLJIjikgnLqgAAMAJlWY68CeDM0DRD0yhnEELF0FVVVWsuhjBL0rIsEYCUUgwgBxAJgCBEEMqYU8rbc84NmEMIIXhqLHwWlZ5v5MKin23SCM9haGCWB+I5OkMxt80m9Oet7kII+kwD8HO2+VcvPBYznhsA8LmS6cLCQlEUEgOlaoYso3mBHwUhwQgphDN5LZhoKoRQswyJZZPHJIRwDsxcl7FlURSTyTTLMoQAY2UUBSfHfXCOX4cQNptNia15/PhxURTNZvPWrTuO41BKR6NRGMYbV1am48ng9OT2C7fzPC/yNM2Tbq+LMdZMw7SMJEuXV5c6vW6z2VpZWbE1Q1E0WuaaahRFKRjHiBcFFUJUHcexbU65pmkAQTnZEE5DAYgfxJNpIOVvVV3DhBwORm+/+MrGzduckDjNKUKM0ZKyshSj0WTryXa7szD1vcdbW29/a63Zai0t+cvLy0mWxmGUxuF/+Q//wc7W9tbWVr1ZV4h2fHy8s7MjhLBtu1KxJaZMwmgP9o9UVV1aWjo6Ovrxj3/8/e9//+tf//rGxsbf+3t/b3tr9969e6urq7u7+5LZkVIWBAFjTNfMIj/D1mCMbdv+/5H232GWp1d5KPp9v5x3jpVT5+7pMDM9WWKUJSSEQBIS0TZwbLiC4+NrAT72se8FTLA54CNfc4UIAgQCC0kocqWRJkdNT3dP566uXDvn/cvx+84fq2qrZkbisZ+7n3762V1dtfeuvX/rW2u977ve1Wq1QM7W7XZhbWgYhrquwyHreR4hxHEcFjMLCwvValXVNNd1G63mkSNH4P23xmNCSKRqmqxQSlVVzefzURR2u93xeEwISaV0SVHgyD569Kjr+LVarTvoW5aDEFJV9fjxo8PxCCFk2264P9s4Ho9c12MxE8UhIonnO67rcizmGIwQyhsqi5EgcDyLSSyGIWYQZhnEcRzPcgghEkeI7plrkDgWaDwBSClGFFECxSiDEUIUH8iE3y886Gu3mgFeOuHxD7KLB3/kjSGN3lCR/uO3g2nwdaEIumrYE0oI2dzegm6YFziO48IoanV7/X4/n8sgjKMoGQwGYeQTQgRBCigmhIgiz7IYhkfT6SxC1HXtbC4NL09VFYbZs8oTBIHFnCzLsCZaFGXAxyH+fT9sNFqm7UA3mMvlTmRPDloNSmmSRK9evmzbNiewpVKB5/mhOWQYzg8ChmEWlhd4XhRFuVyp8DETxyHLCalUmpIgSgKWZcOYpI20JCsxoZ7rIo4tV6teEFieXTx8VNf1kOLRyHQcj+M4HsUEx0auVKjOYlF2w1BSNTPwu91uQtGw1jAyueMnTzle0Gp2Tpw4cfbs2d167Z577tnbKkPi1Tu3EEJrq3emp6dfeumlpaUlSmkYxNMz1Wq1OhwOgZELw/D06dNXrlxJp9MvvfQS7HuhlK6vr1+8ePHa1RvHjh1bWVnJZHLpdPapp55SFJUQ0ul0RkNT0zTQx2iaFifJcDSCQ9S0LFGSEMYJIRzPR3FsGIbreQhjPwjwvkva1NSUrCj90dC0rVqt5vo+kBwMwyBCe72ewPNpI+V5XkTDzc3NWq1m27YoSkBL2rZru46q6LquswJPSKvb7QKuWyqkZFnlRRkWuULDFRlR6Pm2bSOaoIQkSQTEYBT4jMBalgUAIYuxyPMMw7AMTpKEIoIJjcMQISRQnmEYShIGXHs5WA2ECKYIU0IJphjhPej/u+jo68IGhBEHWzLo/SZOO/A70AME+vesbP8Ho+514Tf5WXSgHUX7e3yB24WXNx5ZmUxG0zSe5+MkimMiCoyqKqIscQwbR6HjWEkYMeweA9kf9NKZFKV0OBzCYATLCkDyAMcF4Q14j6Iord1WNpUGWpnneUri4dCFwXDTtG3XkWX59F1nzpw+q+janTt36ruNM2fuqkxVv/3tx+rthqYpR48eVlX1W48/rihacThADMdxPPVc1226vjOdnXIcS1HlbCYTx0GSuIrESxIXRJFCEcYcwwsZ3WARtl1vc3tnZ72WcnxBEMIwSjAbJtjpjnqj4S987Jdy5anOyI5IUqhmEs+3/aBQKIhVrtVqFYvV6blZ03K26/UwDgqFQq1ZazRaqiTXajWO43q9Qa3RtF3v7rvvNgwDnM6iMGk0GqIoPvTQw/V6rd1u7+zUoLYEtbc2ZbAMXylPzc9xYRhub2+3Wp1ut3vmzJl3v/s9sNHl0sVXd3d3YaQ9k8mA+ygMWCVJUq/XGYaBDhzML1RVdRxHFEXLsiRJymeyjUZDlCXdMDL5nGboN27caLRaGGNFkniedz1nPBiSJImLEcZ4dnEGvBQ4jqMUQe2WyaRN2xJFET5ZQRCq1arjOK1Wi+NQmk6EL5EoivlMmuf5Wq3mOjROYoypKIqiJBFCfIpgdlkURZ7d64k4lmUwHo1GwKwEQcBiBnY0YIwJZTDCDGL298bHkKMoQyfD8Difz74xANCB+hMhdFDJDa6sE+nMJEOC8TPGeDKXOAmkSSwdLF+Z/eVN+IAWllLKfZ9VMRPtjiRJYKYAT5TRjX3EKWEYJpMy5udnZ6anFVHoddvD4YASgkkSBEFCIo7jRE20LCuOklQqlc1mOY7z/TAIgjCMdV23bbvT7hmGMTc3Z9v2zZs3jx86miRJs9m0LIeXRNM02+0uRYhSmspkDcPI5wqHjh4xTdNxnB/+4R9ev3r9nnvO9YeDxx77xiuXX/nRD/5IdbqyvrFx/dbNr33tsamZ4g++7/2pVPrxJ566du3G/ffff8/xu3u9XqtdP3LkULmUW1qed50xpjHL4nwuo8lSGAUkThBC1tjsdrvPf/PF9fX1VCqVxKTdbntesLSy/JM//VOCosqqmmAaIyqocpwktucSRKOBOxgOr1+/furM6SCJrly7tluvlavT6+vr1WpVkpRRfzDoDSmlHMM4jjMY9AzD+OAHP0hp8sUvfjGdTi8sLFy7fuXRRx8lJAGraMsaZ7NZXdcHg8H8/DwhZDwe9/v9u+++98knn+R5PpvNtVotRdYOHTr0Az/wAxsbG48//uTFixez2ayuq7dv356dnZ2ZmUmn0zs7O7DLiVIKFSlUKLA623EcXVFTqZSRTm1tbyOWOX7yhOd5t1ZXB4MBgxDLstPVqXwm26jXPcddXl4WFB7ETN1ud3t7p9luI4Q0zdBTBkZsEARBHIVhDAvestksw9FMJjM/OwdHT+B5Yeibo3Gv1+v3+2k9nc/np6enYbUGy7KuZ8VxXCgUDE2HopfnOMuyhsMhhxlw9Tc0HY5pSZIkp78XXAyFC53lGMrgMNzzuaKYcP9IRjoYP5P4BLXOwViaZFEIS8Bv9g6JA8sGJg/L7E/xf/90+D1u3P4NcDa8zxkORyOO42RJ4nmexShM4vHYEoVOEgZh5OuqVq2UGYbpNBuDwQAhVMgXU0YaftxxnPHY4nk+m83OzhYdxynkS4dWjty6detrX/uaqmrnzp3b2dwqFIpT1erO7m6z2TQy2XPnznh+yDDcxtbm9tYOywvVapVh+eMnTjVbneWVQ41mZ319bafeePDBhxFmeE48cuTI5s728vJsJpsnURIGcSaVPnr4yOnTp48cOtZttUWRV1UdM1wYJJbp+b67tDBHqWC7set6DEWKomhGjhPlueXBwHK63f5wPNK11NnThxdXltVsMaEoxnyUhAmDSRDbrjsyRxhjqzEybWtoWnfWN+5/4IEwjvvDwclTxwuFQrPZZFnsRyHLc/lsrtVqkYS++c1v3tzc/M3f/M33vve9/+pf/etvfeubly9fXlxcvHXr1mAweNObHoEfzOVyzzzzVDqdXl9f932/WCzzvJjJZHzfVxT19u3bSUxLRyuNRuMP//CTnufdf//9jz76KMuyq+urnCg4jtPqdgbjEcaYEwWBJI7jjPs9hFA2nwOSsNls9kfDSrHUarWiJM7lcpbrXLlyxbZtUZbDMGQxTpI9WlwURd/1hsNhVa9A5QyAKmRUx3HS2YwkKoIgcGHAsiHYTCZJwgi84zjtbsfzvCD0aJxgRBGmi/NzAsfGcTIaD8BTw/E9RdFWlmbBQzEmCULI87xREPieV6lUwjAkPeJ5XkwSnGBKaRRFHCvuBRFDMcYE05ggRAjFAqyWQIhyb2znXheErwubiR09eq0GZ3IHQnTCqgNgc7B7nOTVg4/zPe8fvIGAkGGYiV4W/EgiipOEhlFECCEkdl03CSPbNleWl0G4ZFoWRiiIo1QqlU6n+2ZvMBghhMrl8tTUTLmcuK6fJMnq7bV8Pt/v1TY3twRBePvb3wGLZpeWlhiGeeWVVxzHfcc73lEolF58+TvNZrPRaOkp474HH3j00Uc/81efnZqampqa2draGUn9ZrNer9dnZuf/l3/+C1EcXLn2aqvRSsLk1KnTlXK1XK0oiha4AYe3IjeM41jRldnZ2Uw2bRhapVLSNM1xbMzKFPMMpixHoygw7SAIAtd1lUxuanEl4kUfc6WpmSOnzxqpzHaji1iGYZiYJJhFLIsH45HjWKIszU/NkVrtIz/2kwmNB6P+cGyWqhWGYbL5zK3Vm2kjA0pIjuNYljMy6WeffT6Tybz3ve995ZVXNjc3//k//+fz8/N/+7d/G8XBT/7kT966dYtSks1mNzY2yuXq6dOnwY+wVqstLCy88MILhIBDD0MJzufzrus7jjcYDC5dukQpjuO4VC08/PDDCwsLURQ9/vjjGxsb/X4fzlaO41RV9X3/xRdfNE3z5MmTH/3oR9u7ddM0eZ43DMP23GazOR6Py9WqZVlzMzMgolAleWZmppDLg85J07TRaDTp3IIgpBSbpomNPRMtmINxXc/3/enUdBAElmXxHEPjhCQxhxkW0ZnpKk2I7TqmaUVx4Eccz4vpdDpKCKVoZFocw8iynJVlkRfS6XQYhrZpea7vhxFGDKwjJRQFCDP4u3JOEHRSiliWxRO7mkIh9/2CcNLyHQRIDzaBB4OQvGb6/rtl6mQs+OD3oAPl7sFylBDC0+8dhIIg4AMS2ImsnEXsHneCCEJI5Ll0JpVLZ3LZtCzwsiIyCPue67ouQkQURT/0WJZVFA0caX3fb7e6wIal0xmWZU3TDoIAWpQkSdauvVoqlR568JFStXL58qtXr11r9/rNVufo0eNvffvbLl5+9fNf+PtHH330Iz/xkz//8z9/9913tzZrS0tLXuBOTVVKpYKR0m/dusGy+Oy504IgDMeW67qFQonBnGma6XTa9wMw+ZR4IU4ihmEcy3Rdt1It8SyHMd2bIbBM0zRt2w4pTpLEspxut2/oqZWVw4IgWZZVKBRYFlyCUETiwbAXx2E+n0/x6eu3rp88eZITuM6w2+o0wyS88MorlUplc3OzUCh5jt9utA0jpSlqLlcY9NpXrlxBCK2sLD3//POZTObjH//4cNj/0pe+1O403/ve97Isu729BZEGkxZBELTb7YcffvjatevAvK3d2WBZFobKIXN6XhBFUaVSabRrcKTed9991Wp1YWFhAmZeunRpY2MDFlcEQQAr71fmFizL0lOG5/uO70mKHARBlCS2bWNKBUFIGylD1TBCEAwDs5/JZMbj8Wg04ji+Pxx2Oh1BkBRNzeeKSZL0hoMgiOBEI4TouZTvuqmUXikWOIzjKGARJkmk63qvN6CU2o4XRTEnSAzLl8tlP3Cz2SzPciC1H4/HvuPm83nDMGzL2t3dHY/HkiACmCwIAo08juNEnuc4joVxe0IJgakDghFiKMGgwJrE0uRv5oDR0yRCDk48TWpU+gZmDyIN+kZQeE5IxYOw6vfsCb9fELL7G7xhGwGIg+M4FgSJxgnGWBRFTZUNQyvmC/lsptNu5vNZiRf6vY5pjnluT+yfyhgzMzOIMlevXl1bW1NVdWlpec84jKBUKqVpWrfT39rakiRpeXn5Zz7yI6ZpfvWrX33p5VckSbJshxXEBx96ZGFh6a8/+zeXr1750Q9+eGlp5T/8+v/7oQcf2djYmCnPTE1NCSIvivxubTOXyziW6ThWsVhkWdxqdRBChw8fLRQK1tjqdDpjx15eXj58+LAoikkYweyPLMssw4T7t8DzgPhGCK23ahhjx3bDMFZVvVSsFAolhmEYiiRZQCSJ4iD03NF4yLK4XC6uX98pT1V7g24Q+YVK/tbqzSAOnnjyyepURZFVjhPSRmZ3tz7o9o8dPaHIsjnqx3G8vb09Go3e/e531mq1xx9/4uMf/1fvfOc7//N//s+ixN+4cQOExPPzcxzHnT9//tq1a9lsThTF6enpp5566sTxU1/72tcEQTCMNKBZzWZbFEVAXyzfBF1EoVBotVqg7b7rrrviOIYJCQg/y7JAHu30R1EURUl8Z20tpuTw0SOe521sbaVSqcMrK5lMptfpupZdLBQMTbcsa2D2U6kU2HAlCbEcxzRNhuHS2Uw2k4+iqDccxDEY9oQMwwQ0iaIgl0mViwWRZVkGqaKAMbUty7E9RdPDKHH9IIhiy3T0dIbjkK7rNEng4zBNk2e5paUlQgiNk36/7zrOZEZXkiQaeSzLChzPC6zAciyDGIQYRBBNGEowQpgS7nXMwcHy8nWQ6SSbHWwUJ/ch2PCexwwGTgnodfzaMWS8v1L7dT3nPw6iwkPRfS93juNA0JQkFGEk8byqqpqmyLKKMQ7DMCGEEBSGoe06rutpmgqLtbudPiUYYxxF8czM7NTUVC5XQAjNzs4nMe12u83GerFY/tCHfqxYLAZB8Bef/rOdndpgNGQo1jTtzT/w6NTsXK8//OQnP4kZ9rd+63deuXjxDz7xf8Vx3Gg1wzjq9kZ31jeWlxcLhfyx43dREpJ8/tqVV9/5znffvn1ze7Pmum672a4UyudO321ZVmfcLRQKMzMztm333EG91kqlUplMRpG1IIgpQQwjMCyNk9APCMMwA8cOw9Aa25lMjhGVkesxlslQxrWddErnMI5CF9OEJoQiHLiBG4Qsy9u2m9Dw5e+8srmzdvTEUVmRXN/LF4qdVhtm5Fv1Zr/fr3vee971drTvQv/MM8+Nx8OHHnrwxo1bN27ceOihhy5fvux53iOPPLK+vm4YqUKhoOvG7Owcx3G1Wq3T6dR2G7qWWlhYePXVq4QgnucnRQd0VvlKDo7gUqk0MzNTr9dv3ry5vr4OLwMhZFkWxrhcLlcqFUmSzt91tt1ujy2T43k38MEvqxIEMDxRKBRInNA4SafTuqqBjRCMSgFcFEQRIWQ0GsQkiSNCKXU9N0koaCR4nmcUUZIksPAKkyRjKLlcRuD5HssKgpBKZwllXT8YmZbrh1EUxQQlIxMgD0opJ0j5bK5YqqyvrcmyrKczPC8CRkopYTmBZWOEEMUkSUiMEow4jsEsgzCBaKQYUVwqFQ5G3cF6clKLQvzsDfUemLfAB5ScE7AUggS+AplzMpUzybEY42ivi/sfRUcnywPANR0mreI4FgSFEMJxjK6okiTwHCdwLC9wgeuUisVqqcgLrGPZo9Ew8H2EEC+IkiSl0+lsNmsYac/zNjY2Nta3dF2fn58/c+bcysoKpXRjY3N1dbXb7W7fujg9PZ1QZBjGmXP3MCx35fqNXn947733yYp6c/XOyxcuXLt56/Cho2sb65Ik+S6ulIsrK0t+4BZzOcwkR48cKhbzKV3b2drudrvFfBFjrEpqtVrd3anbxGEYZmpqGiEGIbS9tZtL5zKZnGmavh9A6+v7Xrvdtk0LY9xMhpRSy3IqlSlV0k3TlnjJtR2JF3LZNEYk9l1VFjmW+q4bJ2Fte9juNBut+vKhpd3GjpKS773vnmvXr25sbS4sLLQbbVXVDS1T294NgzhJaOTZv/prv6Kq6qc+9SlYQL+zs3Xs2DHfdwuFwtve9rannn7ixRdfBGMewzByudzCwoKqqoZhbGxsiILcbrfPnj178eJl0zRd1y+XyyBhwxgvLS5nSilo2NLp9JkzZ4bD4auvvrq9vY0Qgmxp2zbM7ILXUzVbaDQaDMdatm17riCJcRwzHMey7M7W1uzs7Pl77rVG4zurq6qsLCws+LFn2za45Xe7PYbjCCFraxssz6WMjCRJmGMJQb1eD1pNIrDZbLpaLCCS+I6ZSxmlQp5jGd/3h8ORKMgRoUFELMfdrTcxZgmlgiDMzc1NT0+7rttutWAVR6VYyuVyHMOapjkej4fDIaY0nU6nNRrHcRyFJIoRSTgWiywjcIzIcSwhDCLMZIrijbfX5aVJfEIPPWkO9z0IGMuyJs0eQghKC6DaoKuBxVTotZK3/6kbxCrGGAJyIm0lhIRh7DMsQoTwPCIcwnR2dp4kked5DCtHUWSaFsvgQqHw0MOPAA6+sbH1jW88Ztv2XafO/NiP/djRo0dN0+52u88//3yv13ddVxCEYrHIx8uyLBtGmuHYa9eumZZdLlff8fa3y5r+xFNP37x5O53OLi4u1hp1gHBu3dg+cfIuyx7Xa80rV16tlAq3b946ddeJuempfq9naPr09HSr3qrVaojgeq2GVMY0zSAIZVlNGZnhcGzbfqfTL5VKju0BXxeGoWWPMca6phUVQZKU8cgyjDSJCWYZWVN5XrTNcRjGgW97luXLPIdJo1HvtBqmSfOFLHRusBdla2vrAx/4wH/87d8CYbrnBaIoFsvl+k69UilvrN783H//ux//iY9+/OMf/6u/+qs7d+6cO3eu2+1WKiXLsj772c8+/MiDnU6n3+8rinLs2DFIgIANnj59GiMWdPCNRgNcemF5PSHEc30E/suynCTJxsYGxng4HIJJ1OnTp03T7Ha7IL6HCUOMMcyyKJo6HI382q5t22EYSooCMxC6rt++fduznWw2K4vSzZs3C5V8p9MBLTHGGHRwlFJIITzPaykjSSg0nBhjPw4KhZwsy4Hn+r4/ogSRJI7CE0ePeZ4fBpHj+X5EXM/3fZ9hOF4S683GxAAuiiJd0yY6FhhlhqOEUsqL4nQ5FwaBS+MgTJIkRAnlODZKGIVjWUwZShlM8VS1iA5oryc3yHjkwBz9BFYBDgT6PfhVwzCE2daDuRQeVlRkTdMy6TQQjKZp9rs9y7I0TSNRvGfhelCVxpLXPQg8jizLE+4x2V/ymk6no8Egk8lJomJZznhk86yQzRYNPcUz/NLSEk3I2tqapitvectbThw7FsfxhevPbG1tWZazsnz47NlzqVSmXmtubW01Gg3XdcfmMEmiTCady2cwpq7rpnghn88/+9xzU1NTKysrzWbzgYcf0nX9rz771xzH3bhxI18sgLVeOp2+cOHCqZN3J0mCWfbSpcuEEITZ8lR1MBhRhHQ9dequM0mSlKemPM9Tdc0wjMR0KaWQ3uM4BoGloijD4RCUg6qqAvEFZYiSkZl9O3cGYT9wbdNyXddQNVEQwtDvd7qj0SiOIvCZdoMRz/NJTAVJEwRxMBjpWur8+fONVv3bjz/25je/6Zlnnzhz5tSPfvhH//d/+2uiyM/MnPvGN/7hRz7w/n/2z/7J9evXVu/c8l1na3tjenq6vrOrKEoYxrdv3vrAB350d3eXJEhNG/V6PZvNJkmyvn7n2OEjd911125t++knn1peXmRZtr67myRxNpvFGNu2HYTm7OzseDxeWFgwTWtra2dpaanV7Ph+yHG8KIqIMq7rg7V+GIYwrrm0sowY3B8ObdfZ3NwMk7hQKIgcLwjC4ZVDjmVhihzLzmazgsiura1FSTw1NSUK8ng8Hlum63qdTiedzWqaIcsyw7Htdrtea4ZhyIrorhMnBUHI53KB622sreeyWU3TdnZ2dF3P5nKaoXth0Ov3x+NxEEeuJ/i+H0UBy7I8z/IcIwi8KPGYklK5wPOcY4193w9DH2wdccQzDJPEcRT4CFNFFCSRZxFFScKzmOcYhhI8N1s9mPoOXvoHg2FyH0hziASWZcGZJwgCdKBpPBg8XhhgjAWeh6I/lUqldEOW5Xa77dnOeDy2bTuOIvCQ5Hne8pyDMTy5ua7reSEhSFUF2He3xxuGgeM4SZgoim7oaVGUWSxgxBTz+dHQXFhYeNMjDwmCcPXqFdd15xdmlTSTz+cVRWvUW5cuXa7Xm8VCeXl5+ebNm3EcJyQSRT6Xy+qG6vvucDi0O12GZU+ePMlx3MbGxgc/+MFOv/eZz3wmV8j3+33DMMaWOTc3NzMz8/nPf/7UqVOt5uDOnTvpbLbX6/d6vff84Ptqtdqdjc3ZubkgiDDL2Jb7zve8e2dnN5fL5fN5bzCGIw88pKHelmW53+8DvGYYBijUoe4gfAI1vCAIIi/EcWyZI9eyHcfJZlKiKA57/e2drWF/AP0zYiOGYQI/4nlREtX+aEwSNDc39453vfOLX/z8tetXfuFj/+Lf/O8fX1lZ/q3f+s1f+uX/x9raiNLkB978iKrK/9v/9i8vXX4lpWuf+uNPBkFweHnliSeeMIx0p9V+85sf/fCHP/yLv/Cxs+fvmZ6e3tzcvHHjhijyLMK6rp89d3ppYfHatSvdblfkeVmWqtUqyESNlFir1TBmjh8/Xi6X//zP/xIj1nVdjFnAioeDcRzHKysri4vLo9Go06zVarVMLivKEsNxDMdub2+bjp1KpXiGVRRlujrFYkziJPQDWZZTaW17e9vxXFmW44iMx+OROR6NxtVq1XKcJKG5XE5PGcPhsNPuEUIYgR5aWu71er1u9/iRoxzD9nu9Q4cOgRQEYSwpMuZYx3XH47Hje0Eoj0aD0WgUhiFCRBJ5VVVUTY4CP5fPFLLZdMbAGLdajeFwKAmioeQwxkkc+r6fxBEH2y3iSJElgWdlgedYFq8sz9MDwmi037Ml+47Xb+wVXyd2AaBpIuCmrwVLWYEnhJB9WJXneYHjWZadmZlJ60YmkxEEwbasWq22tbXV7XYVQ6P7yOpBSgO2hUD1CxQtRH7ZSFFKdT2la4Zleb12T5G16emZo4ePPProo/ls9ubNGyzLnj17OgiCF55/tjOqdTqdXm+AKKPrhqrqAi/tvTBB4HjYNUwSEoG13nypeuTIkcuXL2ua9sEPfvC/f/7vkiRRFGVrZ9vzPEmSKELHjx8H+2qe5zOZQhAE37lwgeO4fn9w733nL1+6stuo//N/8YtbWzuvXr3iOv5HP/rRJ55+KpPJaJo+V6yAwhjKCvDGpZQ2m00ISAjCSe/tJh6005LAi6JIkmQ47FujcX/QTaI4jqMkilzXdix7b7sYChHCvh8ymFM03XPDwcjEGH/kxz9ar+/+5V//5Y995MO3Vm+urt744Q+8//Tp07/x658gJE7icDweHj9+7Md/4iO+62Rz6du3bz/z5FMcx43HViGXt203CIL/49/9h1euXH722Wd7vV4+n7es8c7mFsMwhWKOY9iHH34QY7x+5w7DYLDcTqfTa+s3MpmMbbuDweDcuXOapm2sb7344ou6ntI0bTQaR1HEcVw2mz118vRgMOh32tvb27woyKrC8jzDscPh0AsDjuMkXiiXy2kjlTYMx7JJnMRx7AeOJEkE0eFwyGBuZmYmJsmtW7cxxs12O45JtVpVdW04HI5HFsMwksaDG2KjXl+YnSsXSzdv3ICLLY5jihDDsWESm5ZlmqYfhVEsQ8GJMeVYFiHCcazAsyyLWQ7rilKpljRNa7eb4+FIVVVDybEMQ2kShiElCYcZeHuTMBAFXhYlQeQ4dt/fHgrCyYc9SUSvowehoJ94gKP9EWNIiZO0OUE+Hd8DyTU8EXiPQ2G5E+/R7rqmlcvlRx99NJfL9UZD2DFo2zZoHQCdh/KM7lshpdMZWAPiDUZLS0sMwzYarerU1I99+KO5XKG+Uzt58mS73b5y9dV+v9vptP775/9G4JhMJlOayk5PT8/NLVim0+l0TdPMpPlCoaDruqIoHM+Y5qjTaZvWCESDLMtevnz53LlzU1NTf/hHn5ydnd3d3d2t11KplCzLlm2fPXt2c3Nzc3Pz0be99dlnn52amitVKtu7u/1+/95774UFQB/84AdHw3632+62W0srhwbDnm2OZEH0PI9NKMzOKhzLiYIgS0mSmKZVKJfgjYXXAJ9CQhKRl1jMERIzDBOFoeM45nA0Gg8cx+l1OmNzKLCcJAksx6Y0VVGU2u4Og1lEKMEEEaqqchjG/eHwS1/60i/+0seu3rj56T//zO/8zm/98Z/+8X/87U98+s/+8H3ve9/f/M1fZzMpyxr/8A//8Gc+85mP/eIvPPX0E+9617u21jcuXrzIMJxlWaIoq6p64cKFwyeOWZb12GOP9fv9xcX5yA/6/f7Kykqv0/3Sl7508uTJ+++/v9Gob2xsIIR6vd799z9s2/bFixcr5SnbcquV6a2trdnZ2X5/CFbZi4uLmUym2WzazjgI3SRJDMOQFJnluf5waDk2QoiXRMdxBJ0zDAMYY1BswlxLr9dDDC6VSiRB4/E4Jkkmk9nc3CSEpFIpsCGGN9bzPNsOrNH4wQcfPHbs2NrtVdu0oMuANVv5QoEXhf5oOByNoGuwbD9JEoZBgiByLCYkASIglysKHMMwOEkSRGhKN0ReUFU18SnGOEkwy7IMx4LmmUShGccRxUwcJzBF8T3LzklKxAcocoZhNE2DccyDIphJBL6ukkQITTgT6CElSeKMFMuyw+EQnNTiOO52u61W69KlSwghLZ3h92vXbDY/N7cAjpRgMs+yLABxDMPADgMJ4+FgPDU1dfbs3ebI+upXv37lyuckQfy7L34+m82mdBUhwrK4UM6qsswwDMxlY8zKkloqlXQ9xTI8dJiUUkCPYOkXQoRl2Tt37tx1110zMzN/98UvaJq2vr4+Go38MDAMY2yas7OzHMddfPXyAw8+sL29vby8HCXJhQsXwPR+bm725u3bhWLu/vvv//KXv9zrthmGOXHs6NVXL/f7fXCh7jXa6XQ6l8tls1myvwzLdd1cLgcZEqqSZH8tpqzKDMIMw4ehb9njYa/f63UsexyHIWZoSlMJjYPQYxiczRgzM9PDbpdlWYyYiFCBFzTDUFWdE4R6s/Unf/xnP/+//IuLr17+4z/58/f/yAcb7fZv/c7/+Tu/+XsXLnxnY/0Ox3G/+Zu/eeLksU996lMPPHjfr//6r//e7/6nz372s2trG71Od3NzO5VKlUvVzdqOJEmnTp2CYQswOwQTpPPnz9u2/corr9x//33Hjx+/ceNGv9+/fPmKpmlxTLrdvucFhw4d4XlREESMaRB4mqbJslguFwmJDUMjJLZHlq7rqq7JqhIlieXYgiBwPA+BxPM8IhTW0RiaLstyFEXVahUxuN/v+16YzWa9wN/a2gZuE8oogecqlYqmOqurq7OzU77jWpZVLBSCIGg5rTOnTzMM02q1wCRGUZQwiW1wFkc0l1Mty3Idx3UshBDGSBQ4XuQty5qdrhqGznOMLMuSJJimGQSByMrgBAn4sCiKHMdSQlhRoZQ6cURCn0MJwZTCchmoPjGilCSIUlCYUYoIoZQQhmUhbCa1KNqHOrl9A/M3tnOpVArKUagwwzCMKAK8IQ5C2H+CDgwfdpoduA8JGQgZjPHEHzaKInhDYZbih3/ofcVSeWw7f/CJ/7p6YxUmXADv8UNPQ5Kuq7zAcBjFNMAEgwubIEgYsRzH8zyvqYYgCDCjBJvlJUnkBdayxpZlPfjgg6VS6dN/+ReLi4tbW1uj0ahYLsG4TTabnZ2dffHl75w6dQowzIWFheHAAjXGu9/9bsdx2u3WRz/60Vs3rokir6rqzHR1qlr+2te/QimNwyAOA0Q4sC1jGEbXdRj2KRaLQA9AkQ9vHQyvJEmCMaKU+q49HPZ7vd5o3As8T9VkkeUZkSUk4XmGEOKFdqO9yzEsSQjDMighTmRxgsDxIsdxo9Fo/Ykn3v+BH6lWZm/dWTt85MT584+sr6//+Z//+fvf//7P/OWfy7L4wgvP12q1JApFif/xH//xX/7lX3744Yc//vGP/+kf/8mb3vQDt27devzxx7VMyjTNH/iBH3j44YdfeOG5fqfLsuzNmzfPnTl78eLFqampM3fdBVble8RgrXX16lWW5cIwlCT5c5/7u52d7cXFRWDwS+VCp9tSVEmUeMwkQegghMIwjEYjWVUMwzBti2EYN/BFUeQ4Lo5jgeNN03RdVxalQqHQatdFUWR5DiZjVFVleY7juFQqNRiNYLgxXyyUy+WUEYDAJZtK37hxo5FK8Rw3tuxerwe7ljHGvV7PcuwgjhzH6ff7Y9tSlJzveXEcYowYhmHw/ph7EnEcx7FsEPiQe6CeBN9Kx3MBnZY5RZAkjDEOAoAzwzDmDraC9ADHAKfypBwl+0uJgWyYzEzAxYFfy/XjAxT/eDxmGJj73ytuKdkDWqMgCMMQYywKAsMwYEEpicqkR40TEoV7rSmDuSSmXhwghHLZwtGjR0+dOjUzM/P0M0/evL26s10bDgY8J0J9EsfxwtK8IokUJZZjKkQoFvO8wDqO47t+kiQsG3CsYBipyVQUzDFNynIARXie5yX5xZcvzM7O3759R5blhcVlhmF2d2pLK8szMzNbuzuEoFOnTn/pS186d/fdnW5/Z3ubYdF4bB89evSv//qvFxYWTp488fTTTzWbzdFgqChKo1FLorBSqRi6bOjyaOjHcWRZJvgXh2GQzWY1TbMsC+ptoEMppbCqTeR4iEbPd3zXcT07Dvck9WDihRDhBRYh1vOcwaCnY81yHZbl4oTanh8nVJIVyx4fO3bsiaee+sQnPvELv/iL/6/f+I1/+S//n5/+9Kd/6X/9X8fj8Z07d970pjdtbKw9+uijmWzqwndeunPnDs/z73nPe55++ul2u/vjH/mo43jPP//83NycF4elUunFF19cW1t785sfETn+lVdeOXTo0Pb2drlctm37pZdeOnPmdCaTqdfrg8FAVdLT07OZTGptba1cqubz+Xvvvafb7bquTSldWlq6dOkV2zZFUWy3Q9d1EEJRFLmWl8qk4QqBqQCWZcFhqFQoxmEI7AvDMCsrK88//3xCyalTp6IweemllxzPLZcrcRybtg3uEp7n+b4vSXI+nw+Jl81mDcNAlCqiVC6WOJaVJKlcLnueF4RhEAR+FELAxHE8Gg2hIpMkWRRg2QvlGGZ2dlbXVLpPInCYSetpscDXdtuQpQD01lMZ8BYbDocEB2yCMWG4g2GDD0wbQRhMiIrJfcdxgiCA+vhgxzhh21+HahqGEUVRGASwe4DjOIHjwV8ZnojZ13MDnagoGXTAffQgVBtFka7rhw8fPnnypCRJly5d+qM/+iNVk0mCWJbN5nIs5jDGrMCn02nbtnu9Ds+hcrlUqhRVRaKUcJxhJZYoihwnUIKBEgj8vU3xoigaKQ0h4jg2pbRSqVSr1a3VjeXl5W984xv5QkE3DNd1y+UyxcgwDNXQv/Od7/zoj/7o7dVVTdehQvY8z3Xdu++5u97Y/dKXv/iff+/3/uqv/mowGABhpWmaNR5VSsViPgf9sKYpjuNQmoShPxz2HceCYSKAf6DbAUd6MBCYKlU9z3EcJwx9zFCMKcaIZ/Go3ytXirquWPZ4OBwmSSQIvKoqfMQTQhCKEcIIkSSJCE0IIZub6/fdd98rly4/88yzH/rQh37n9/7zE48/9Za3vO1T/5//srOz9bP/7J8cO3as3W59/R++urK0uLm1vr6+HvnBD/3QD62urv3sz/7shz70Y7/xG7/xXz/x39a2N2dmZrrd7u3bt5eXFx944AFVVb/z8ovHjhxtt5uwIA38s0HPOT21cPPmzSiK3vGOdwWB98yzT9XrdVVVVFX2fKdQKMzOziqKPB6Px+OxYRg0jGRZhpEFEKOBsMZxHEWUYKoo9H3Hss3R2PM8z7fL5XJMkl6vhxF76NChIAp7vT402IqiybJsu06j0SiXqtlsdmB2R6PRvffe2+t2V2/eKuYLlFJw6WYYhuU4gihEoCRJoiIjLNi27bte4LtJzAiCoMqyLIs0IaCAFTkeCxQYfJY10ul0EASYZcMwFESZ53nMMoRgQZRjxMYUEYbFhxfnXsdAkH0btdcBLVAfJvh7L7uYNIEHQxohxIlCFEVRGAKmAkU8wBUswlBtIkrhCvN9X9dyk4edlKMMw9x3330LCwuSJL366qvPPffcYDSYrk7Pzc21Os04jpMY9tLsQfyqLDIMo6qiIouKImeyeiZtUEps21QFfXNzM0noVHVG1w2OExRZE0WRUrq1tSWIXCaTkmVpfmHW85xvf/vbR5YOE0J836/Vau94xzu2drYtyzIM4z3ve++HPvShf/fv/p3tOE8//fReafrii77vX79+/bOf/ey//tf/+p577rnr1Knf+I3fOHLkSBAE+XyeZdlms3nPPfcMBoPNzc1isbiz04KtfWfOnGk0GmD4lyQJEGVQ5U5EDnEcK4IMjoCua0uSUCoXLWssSUKv38nnc5lMqtNt+b7L87znuYZhqIwex7Fp2cPh0PEDzHAMxyaUMS2nPxqLkuK4/n/9wz/8iz//zNrG+r/7d//+r/7iU//wD187d/b0f/gP/8edO6t/8ZefHg36iipls1l7bJ4/f/6uu858+7Fvra1t3H333W9+06N9c/SFL3wBYmM47BeyuRMnTmi6cvXVK0kSJUnSajQOHz6UzWbjOL506dIjD79lfX0dVr4QEucL2TAMJUk0zZHnO9PT06Y5EkVhNBqB5aFvEVEURVmyHLvb7/OioKqqH4Xdbndhdg4htLK0bGjarRs3VVmZmZkZjfvNZtMPA0EQwiAOwxAxOI6TMAyDKCIEsSyLYJERYhFCfuyU8oVcLuc6Tq/dETheVRTf94vF4mAwsB1H0dRCuSSI4mAwaHU7ju0HQQC+OwzDsHg/h1GSSqWqpXK1Ws1m0xzHhUHg+/7q+pbrumEY6ykDlLS5bKHRbi0uLouSZNv2cDjiDiac18UVs6/bpvu21hzHIfpd7fVBIHTys/i1KlAoEuBigt4GUwTVbBLFe054oqgoSjqd5jhuNHRBtcyyrGEYU1NT09PT2Wx2e3v761//+tbWFsMws7Ozx48f7/f7r776arlawJgyLGIxphRBWmBZNpXSSRJBDaApKqUUEjjP89PT0wzD8ZyI9u1MwB5b13XXsxFCMzMztVrt1q0b2WwWIWZ7e3txcfHUqdOb2zuO47qu/673vPf//L0/+PCHPjI9M/dHf/RH7Xb3nvvu/8Y3vjEcDlVJ/Lf/9t9+/etfb7fbR48e/YM/+IO5uZlMJjUajdbWVt/97ncHrsMxxDYHDz94vt1uF87e5fv+eNS3rVEYuDynciySJSkKvSj0JsUCu7/FlbAMolES+yQJkxj7rhP4LqGh6zpxnKKUAl6VK+bCKOptb88XFsIwtFzHDfwoiRhKMGUShGdmq6VqSZL1wWD0hc997r57726321/4u8+99a1vXV29tbm5+cQTT9x//32/+qu/+pm/+PNsLp3JZJ558qkXX3yx3x/ec889mmZsbW397ku/e/8jD/3SL/3Sl7/85a9+9atvetPDKU1fXV29+56zHMeZ5iiXy/3UT/0Uz3M3btyo1+vz8/Pf/OY3K5XKysoSITEsh0mS2DTNH/mRH7lz587YHG5tbYA6n+NYjDHIvnVdF2XJC4Juv9dqtSzXgTbStm3XdXVVBdEIIaTdbluWJSmypmk+G7Isq2iqqmobGxuNVqtSmUqn01s726IoplOpzc3NVE6DSXGWYeIgJPGe0NI0zZWVlWwuNxyPGu2WZdtw5Vy5chVhJCsiz7D7sAhhMM5kMjzDRlHUarWGgx7w2AzD8KLExYnrB1EUJRS7lsNwAsMwtuNQaHejkJsEzyTYDgYhfq21hCiKNI4m3/m6cnFy52BPSBFVFIVlGMuyAFBhGQZwJ1YQ9zjJOAavAT+IU3qqWCzOz8+Xy2WO4waDwZ07t/v9fqfTkWU5m00jhNrtZqNRS6VShw4tW/aQwZRhEceyHOYYhpFlRZWlbDozGPYCz4/CMI4JTRISJwIrXL58eXZ2dm5uAVHGNC3f913Hbzab4J1z/r57VlaWbt++tb2zqapqHMe3bt0C/cri8hL2mFdfffVXfu3X/uzP/mw0Gv3qv/m1X/7lX75x48aP/9RPfvWrX93Y2Hj3u999+8b1+fn5T3/60+fOndvZ3ZIkYXd3d3FxUeTZfDbNs3hufrpRq0mCkESRyPMch9zYn5kqMQyjyhVFUQaDAZA6cOKCjGEShBIvRZEsy6IkMizLihJLkcwwmGPY0WggSQKl1LRtThCSJBkMRmzSgMehlCIGY2BsKTUtvpAvmbZZyKdfufDi/ffd+5Y3P/zE409yj9z3Uz/1U7/9W7+5trb2pjc98uWvfP1nfuZn/uIvP33z5s2zZ8/WajWE0Obm5unTp6Mo4ljhzp07Tz/99L//9//+7rvvfvLJx+M4Ho/Hnud9+MMffv75Z2/dutXr9aIoBG4T7GHD0B+NBwiTOAnr9TrPc5Ikffvb36aUzsxOHTlyTFVV0zTr9d1Go5lPVaCLI4iCOkMQhIwoxHEMHxmlFLr3+m5tNBpVqkUoBXVdVxVUr9fX1tZGozF4Z8D5DlZdkx8EmYQgCKlUisQJRgg++vF43B8MLMfmREHTtG63u7q+pigKz+xJNaHZA5eLlKbDXVjcAA8oimLfDlU9ZTleGBOGYcBDQJZlczzEDI2TmKIEH56boa+9TUpKGNgDGADwTEVRzMADqAD2DaADdOIba1GEEMGoUCgIPD8YDGzbFgSBY1hIdJEfwK4fSRThwUVRXFlY8TwP+gfTNGFumlJaKBQ6nY7neZlMJp/PY4xt2/Z9X1Y5SimDOUEQeVbgOE6RNVVRcrlcp9MyhyNB5PL5TDpjyKLEsnhnp4YQ0jRDFGSEsCTtLQB1HOfYsWOYoY5jqarS63eGw/709HQcEFhCMB6PL1++/KEf+7Enn3zy6tWrf/B//ZdPfvKTq6urWsqYnZ29ceNGqVTK5XKYJOfOnfu1X/u1T/zX//L5z39+2B8oiuS7bi6XKxYLKd1wXMs0TUWSTdPM5XKDwci27XK5PFmE0u12wQ0FdJLAwQLAjBDKptLwGQVBQDCCxSkI47E53K3VpqamOI7b3NmGHp4Q4nuJ67pxHEqSJMkilPc8z6czmWPHjj/99LOHDx1vNluIMj/4g+/7yle+5pP4Pe951xOPfyudNiglb3nrD+xub1Wqpa9//esz1SmGYZ544qlTJ05SihVFabe6V2/dqFQq7Xb7rW996wc+8P5mrX7hwoWLly6wmHnzmx9RFGVzfb3RqB87diyO452dnVwu12g0ANuAYSW4oKvVsu/7kiTdvHlT07R8Pp/P5yVJGvdNx3HGlhmTxA/DVqc9GAwIRhzH3X3mbBRFiND52dlOq7166zbP8w8+dN+VK1fGllkoFDhWGA6Hpm2FYTQej6dmZhzHi+N4enYG5khTqRQrIvAQSuKYxglNCM9xMHefTqdZjrNdx49CzDBhGI4sM/RdIAYButMUVZZlDnOu6xqGAQaNsJI5jiJKacjJuVyu2aq7rsuy7Hg8xhgLHK8oUqFQ4DguDvcz4UFtCn7DlODBgvOgkAVuB7vBN96AUaCwdDqKKKUJwwLMBa4NsiynDMMwDGAgXr1yCUAIKF/321Hc7bUZFucLWU3TgtCDYfB8IRv4Y0IIxglGCaUxiWkcBXHAJWGkyVroeqY5YjHWVYMV2cAN5ubmbty4YZr2kcPHisVSFCW25WKMz58/Twi5/OrFdru5tLQoiBxCaDwee1YoSQ5J6PXr16dn52RZsSz7t3/3d1944cVr166fvOsuSZJqtdrszJykyN/69uO/9R9//RN/8F9+/Cc+sr6+HgSBLIuzs7O9TjuTSUdBmJrRa7vbKytL/X6/UimFvq8pAiICixNz1APPvyiKUrpMKeUYQlkqcIgmge95hBBJkhx7xLKsF/jwZjoOJ4pyQgnDMJSQOCYMg3hehN4hjmOCY8vzvShGfMQTXpSFbDadz+fN0bhYyBq6oqn8+XNn/u7vvtDc3Xrg3jN/+5Wvdzqd97///V/60hcJSY4fP/7tx7551+mTsizX6/XhcHj+/PmLF14RBCmfz7/7XT+YMKjdbouiOB6P/+Iv/kKTlVOnTrEcfuqJJ69cuXLq1Kn5+fn5+bnt7W3LsjzP293dVlUVLgxV1avVahzHV69ezWQyQRAyDJckNIqSfn/Y6w08zyvnS77v266jGfp0scjynGmaY9vK5/OgyR72BzzLMggDvf70008zDLO8sry4uDjoj2D/WRzHMzMzpUql2+0Ph0O4/PaG41i0Z5kZxzzDBp5PkgQW0QyHQ0mWwzjqDQeO6yqKksnnNE2D6zNJElmUYNdiEkb5fNbQdU01CCFhEDi2DU8tFqtVWdL0lB9ECGPYHmvbJqVJJqULrMRg8l2y/nWaT7o/OjgpLyd88Rtjj752MOJgMmRZ1vO8MAgADCSEJBRhjBVFUUQJBlgxQqZpbm5uWpbFYTwB5aEABrrMtu0oihzHRoiCcWgcx5ZlKhJKEKaUkDhKaIwSFAVB6PmqrCiKkktnaJywiMEEJQmNgpjhQ0mSVFUHSTGA3aVSybKsmzdvcjyzuLjYarUYFuVyGdM056eXUqnU+vr6/Pz8oaNHfv3Xf/13f/d3X3jhhb/5m785f//9u7u7Kysr6XS61WmPt8bvete7ut3uxVcv/9q/+ZVPfvKTqqpm06lOp3PmrrvM0bjZqnuOXSoVWq1WLpPieX61vptPp9VcBiGU1pROxyahzzMMSxNJkmKf5TFVVTlJEhL6BGNV5IfmUFG0MHAxxoTQJIh4no/CMIyTdDotCEK73bEtZ2ZmhlLcbrdZSSEMSiiJkyQiSUxjWRYLhdytm9dr9a181qBJyLH0xPHDG2u33/Oe95w9e3Y4HPI8XyqVHMf+7d/+7Y98+EN/+7d/+7GPfexLX/hivV5vt9vnz5+/fftOFEWf+cxn3vne93z729+em5u7fPny0tJCtVT+1Kc+9c53vf1jH/vY5csXm80mxzBzc7PwmQZBEIQWwxKeF1VNYRjGsseypKZSKc/zer2eKEqu66VSaZblzLHd6w1iPxIEwQt8lueKgpDNZovFopYyQNcO00PQ20OFefLkyeFw6DjOtWvXet2B4zjpdFrXDUVReoOBruuyLDfbLU3TFhYWarUamyBNVnieN3Q9Y6RGg6E5HiuKcvTo0WazORqNFE2dnp4OwtDzvDAMMylVFEUWM1DNCoIUer4VRpBCEN4rj7PZbDqdZll2c2DanhfEcYJoEsBeACZJkvGwn9JkFIdRHOwBM5PbRLAG7enBoILAIOg1JmsHRW0Hv3lyR1GUIAjCPWpFiuOYQZjjOMMwQs9vtVq+79P9spbjOA5j0LjhvRmlEE4UVVWhSCOEuK4L5F6SJAgjhChDMaIxJSiOEE0og9g4DKkoCoJkaDqoBVjMptMZwsZzc3NJQre2tizLzueLiwvLhmE89thji4uLCYksa5zL5Xr9DgRYv9+/fft2qVS66+yZb3/72x/96Ee7/d7LL7/8jne84+Lly3DSf/mrX0mn06IofuADH/hPv/tbP/MzP/XNb36zUCiUy8VbN26uHFq2bdu0RqlUajQaGYaxvblezGe3t7eDwDdHw5mZGdd1F+ZmMSUwOhCHQYioY5lxHIs8Bx5WSRKTOCJRKHKYSJwsq5RSzw8NTRlbNIwTVdHjOB4MBlFCoyhxHM+2XZqQmCBVN3K5lCiwCYm8MPB8t1wuDHs9TZF9z9naXOM53Gk0n3/m6XJl/oknvl0s5E6ePPn5z/9dLp8hhExPT//xH//x3WfOMgxz69bqxYsXK5WpbrdLCP3GN77x8MMP7+zswOAS7Db71re+tbm+cf/95zHGTz7+eKvVvOeeeziOe+6550rlNFjf67oahkGn08lm8gzDuK5LKc1ms5pqVCqVKIpUxTKMNCZROp3u9nsjc7y5uel4rmmaoiLD0NDU1BTPctZ4TBMCXYxmyePxmGKQauF0Os0JPDisjsdjUZShtmdZFuQ1Ed3zf0ilUuVSGVMUBgFsHzMMI5VOIwZ7YUAI4Xkec2wQBJqiQtOYJEkc78GN9VoNNr8KnAj5Ay5jQYyarc54PGQw9mw7Cv1sOqVIgu/FPIMFnkV0n6Cf1KKvQ0oPBhiEBN1XtLH7twl+M8mNB3lCGIEHQ2Vuf5sSwzAHdaF0X3bD8zzHM5ihCYnCyI/iAGEiiJymK1EcEBpHceAHbkIiXmB5gaUoQUmMSExRAgoGntszmCKEeJ4XBSHsWw6DWOSkarkCm150XRdFcWpqCsiDL3/5y/fccw+suXRd99q1a7qunzp1Coqora2te+65Z21tDWP89re//fd///fT6TQh5O6775Ykqd/vq6pqWdbZs2e/8pWv6Lr+4IMPHvTtS+vGcDis1+uyLKqqev3G1ZWVlcFgMBz006kURYkkC45rCSKHGcpymOOZKA54gUWYUJQIIqcbaiqta7oiK2IQehQlPMPK8t5wM5BJoDhxXY9SynHccDhutVoJIbbjMgxTKJUWV5bnFuZTqVScRKY5qlQq/UF3NB56ru3aZqfZmJufvfLqpWw2Wy6Xoyja3t7+4Ac/OD09ffXq1be//e2wY1gURYi34XDY7/fhE/z617/O8/xb3/rWTqfz0ksvaZp28uRJSulzzz337LPPHj9+fHl52TTNjY2NQ4cOKYoyMzOTy2WjKCSE5HI5SRY2NjbS6XShUCwUCjzPdzq9jY2t8Xis63q73Y7jGEDsbrfb7XZd1wX1WafTGY1GruuOx3uyb5CbEUJ0Xed5HuRslmXB+B8YpQP9SAjZ3t6GGQBCCIicbNseDAatVqvdbo9GI8hpEAggXRqPx6PBEC7d0WjUarXazeZw1Pd9H65hSZIgW4Dn4tbWlqjI3W630+mEYeg4DhTDiqLADE2xmK+Uy0zCMQnHUIGjAkd4lvAsfIXwbMziENMAkQCRENOIQTGLGUJxQmgUkzCiUYwTwiEsMCxLEUMo/MEJwQlBcYLixB6bPMPqqsZiJoliliCGosD1Yj+ghHAsK7IchxkWYZ5hBZYLYHKO7qnm9oKcIpEXcEIZgmRelDBH/Yj6kYhYL0AxYQnlowSFcUxQQnESEz/BfqGcNXLa2O6b9lhSRDd0X7rw8kyphKMoCdyZal4R0TNP/sPVq8/ff/+JwWCbF8PhqLGxeXN+aaoyVVzbuJPNZ9Y2t+4+f9/G9k6/P/yJn/ipr33tH04eP9VqtDfWNhmCZ6szu5s7b37gkdPHTp07ecaQtIfuObl69RXP7OUMqdfcVUTG9yxKwkIh1+12O71uOpPT0pkgIVomRzmhPzY3dnaxKL568+bAcvumY0XJyI8GXuhSxkP8eqt3u9bijIJamL6921EzRSdC6fKUG9EAseu7tZgTepbjJKQ1Ho5832dYKZ3q2BZWFYckVR1hpy0EY2yNarduFVU9r2TmKwuqoB9aOqFrWdsJsoUKLyt3NjcKU2UcD+49fajfqrmjcVrO3Lh4Z+Nm+7GvvPiLP/sr//UPPk0i7ud/9ucwE4+tBmYtxfADx4x9p9usqSL3yAP3v+mRh7Y213e3d2q1miBIa2sbrU6PE2TL8adnF+46c7cgqbyoveVt7z589PSNm5uN5jCdmVpaOdHpjR0varQ6mMNOYPqxlWCfEaITp08FSXRnfW08HuuqqooSTggTJdOFkshyta1tczQaDAYjc5wvFwuVUqZcwZLcGQwJxUZalxVeVQSS+IHviDyTzuilSllPZQaW2xmNsaKM2v3GzrY9GsaB7zgWy1FJ4cfWgONJEFq+MwzsAfKtoi4tlLIlRcgZeUNOCVg05NRUcTqlZWnMYMpUK9OnTp5OGZl6o2HZtqbrumGomhYMzJlC+dDsAvUjTGgxk2MxRgidO3cuncs6Qdgd9PfGQ6GPh7MEvdbX8P//2wTOId/Lbe1gXp2Yi05y7Pd8wNclXrTPi8CAhWEY6XQ6n8/D9+RyOdgRa5qmruvXrl2jlML/ApZt2/aNGzeuXLny0ksv1Wq1hx9+OJPJpFKpwWCwvr5+7ty5U6dOXbp06ed+7ueeeeaZIAhyudzZs2cXFhaAec/n81/4whcIIU888cSFCxeKxWKtVoPqALbt7cFompZOp+M4XlhYgMkA3/dhMHI4HhFCLNPRdR3QM4ZhoijSNAPG0scjazAYgG9CGMWCKHGcEMaJaZrd/rDV6kRR1O9044j0+31D07vdriRw1mjMIux5Hix5NwwDHt913evXrzcaje3tbVD8QqewuLi4uLi4trZWKpVYFkuS9MrFlz/+8Y97vguOLOfOnXv++ecLhdLU1MzKygrYb0IVmiTJ9evX19fXVVU9e/ZsEATHjh3L5/MnT54UBOH27dsbGxvPPPPMP/zDP5w/f75QKKytrYVhCPuAV1dXt7a2AKLjOA4akP7+LYqilZWVu+++O5PJOI4Dq4fG43GtVnMcR1XVXC535MgRSZJGo1Gz2YS+APKSpmkgSQNDCowxTPGDHykoLnO5TLFY5Hke3H4lSSkUCrBPW1VV6CEppa7rWrYNfVCSJJ7nWZZlWVYYhuCW0u/3x+PxYDCo1Wr1eh0KYIB/YEINdokXi8VMJoMx3tzcHA6HoihWq9MMv7+PHto8dn8D2T8SUf9TtzfGIcTG6/45ibrJoTCZ4Tj4UG8U5UyK4ck/IaRBcgEVJgjiZFleWlp6+OE3ZTK59fXNK1eu+X64tLSysnzY0NPn771f11NzcwvZbP7y5SutVuftb3/nO9/5blmWO53Oxz/+8T/90z89duwYOCAxDAMi1VarxTBMKpV605veND8//xM/8RP9fp9SOjc35zhOs9mE7fY3b96cyBXgjuM4AG/KqhLH8aA/goY5TGLLsjmO8/0QJnolUQnDEFjpKIoYVnTccHNrZ2e75gUJQszOzs729k69Xu/3+57jJlFI4oQmRBS4lKEpijI9PQ2zLDzP+74vCAKcDuC8RAgZDoe9Xi9Jkm63m8vlLly4cPTo0a2treeff351dTWdTj/+xLefe+65H/zBdxNCPvaxj/3Tf/pP6/V6uVxlGEaW5enp6Vwu1+/3t7e3G43GysrK3NzctWvXnn32Wdd1fd/vdDqqqsL+90984hO7u7vZbPb8+fOnT5/GGK+vryuKAhcxqIXgUAB2lFIqSRLoaSeXShiG/X4fLnRoWTHGsO8efKJgLBOMsTHG+Xx+amoKIQTDuJqmQWTCgyPEQGxjsAhLEp7n44gQQjDHyrICw+i5XK5YKkGkOY4DGQV0eaVSiVJ68+bNzc1NyAEgYJyenl5aWur3u2traxQRTdNM0xwOh4qiQPMFnRE3wWDI/u5B9AbpzGuD8PsG5/f8OsMyEzT1IH4z6Tkxwgef96BMZxK6iPxj6CtCCNE9rWkcxz71wzAkSZIyQkLIYDBIkqRYLILPuWMNWJYtFouVSgW4OJ7ni8Xi9evXwSTmySefzOVyOzs7cRzPzs6+8sorP/3TP/2Vr3xlOBx+5StfyWaz0JacOnUKHDc2NjbOnz///PPPv/jii8eOHauW5ampKUEQBoPB5JeNogjEk+CGlMlk2u02kFEpI8MyfLPZvOuuu9Y2N7PZXLfbLRTLAPExLA9eYFEURYQyvGA5PqU0CILhcCwpOqK42xsGQaDrumvbmUzGGo+zht7v97P5oizJU4WsYRgsMx4MBkDEgSAOXEwn0FqhUFhcXNzd3YWxqdXVVYZhjhw58hu/8Rv/7b/9f+v1+tbWRq934tixY3ES/P7v//5P/9Q/ee75pxuNmihrYBEfx7Gqqjs7O4VCYWpqKp1Ow+RELpe7ffv2aDQChvDo0aO2bX/2s581jPTJE6fuvffeVCoDLDbGmOdZjmM4nmVZ7Hlev993TB/YAlC0wwaEidceqJ2q1Sos/cxms+B1FIUBJtSyxiSJDMOA758Q2gzDwEPZnhsmcTAeZVPpubkFTVO2traGg0EulylXK1Hox3EchEESx0EQ2LbdarUIEsHwG7AZgB5gqVu/3xcEoVwuC4LQ7XZhsMF1bdhrAsvhGrs7iiqlUnqpVIrjsNcbRIHHTAgTuCX7K+y/d6ghRL/PH0Lp9/yDD7R28DfcJmzkwQIVfqvveXtjBMJt4roPNi1wfCKEUqmU4zjdbpdSCjO7nU7n0qVL169f7/V6oihCoQJzIYPBQJKkRqNh2zaUsul0OpVKXb9+/ad/+qfX1tba7bau667rttttcOOzbfvOnTv1en08Hs/MzKyvry8sLFQqFUopOJRIkgQguO/7d999N/TuSZL0er12u82ybDqdhuzEcRzPC6KiqKoqy7IsqXCJ9AejbreLMcvwQm84ajabjuPU6m3Xi2TF4AWl2xv1+mPbcnPZQiaVVmVF4HgWMzQhSRSjOGIpAc8ymI0ihMCGKVmWJ9lGVVVVVZeXl2dmZgByJITUajUwtD169PCf/ukfv+1tb+n1ek8//fTi4uLKygpJ6JNPPv2Od7wzjgkgFjs7O71er1KpaJoGwjE4cLe3twEygfWPCKGXX35Z1/W3ve1toij6vr+6ugrAI/ArcAUqilIoFFKp1B4Jwe0tWoGFH2CYnclk4L+8fVNWjLGqqtlsdk87NhzCp8yy7GAwaDQaLMtC5QxpKtm30Od5XtE1SVVYlqeUIox5nk+lUpRgkHlxomgYKUBcyuUyDGHDUCtAsvV6HRLy7OwsXCQgW7t27dorr7xSKpXuvuecJEm9XgdgWHCXRoiBF8+BehMsFSZw6D+S2f5nMyE94OGNEMTa3tTFngnifkkJqXIiVqb7S03f+JiTCEQIkYRAeMMlJXAcSRAhxDRNSnAmkykVCoEfbm5uBkFgGEa5XISTu9PpQJsUJ8lwOCyXyzdXb9dqtVKptLq6WiwWFxcXl5aWMKYvvPCcJEkTXfVTTz1x//33y7J44cIqpfTYsWMvvfTC5ub6Aw88sLu73etsnjhxwvMCUZQNI2VZDiGIUkwpVhRtPLYEQarVGocOHWIYjmHQcDgaDod6OlWv1w09Xa/XFU11HI/lBNd1CUGypnueV6814IrRUsWg00+Hie0GQehTiglBDMYIMYIgDLq9YrE4Hg3K+RwlSUpVhsOhIAi52TxMuIVhOBh04K2gFMOCIZBGbGxsrK+vT83PXrt2LZPJVavV1dU7b3rzw7/9n353cXHxyJFDtmPevn17YWFhNOrduXNne+v4I4+86cJ3rgNHBzRgsVicNEL5fB5yIEIICkXLsmB4GhZrt/iW43hTUzNhGAKVHwQewyBFlQWBkySJ45hqtQq29teuXYPyARbI8DwP+6HgzFUUxbbtOI71lOE4jm1psixLkiSKPMyXDofDbC4ny7JpuVa7PbYd6OiSmDMKBsuyOzs7PM+zLJ9KpQjFzWaz3W67tiPJAsdxDM/pvJHL5TlRdl03CAIgKrPZrCzLEIqwvNnzPMdxQBMWx3F1qkJo0mw2u902x3HpbLpYLCqKVK/XQbyZz1QZcCKBKhlk+5CRvm8m/Ed7vzfeXmc3ivadvA+GEwip4Jbsj/9OforZX839PeMQhjsnTSbk8yiKTNPEGKdSKUVRLMtqt9twVjEMp2lGqVjJZvKU4OFg7Puhquq+Hz766FsXF5cNI/2D73nfwvyS74Xnzt7z2GOPLSwsAEp+7Ngxy7IWFxfDMNzc3BwMBtPT0/fee28QBIcOHWIYBnYh2FAWWlaj0dB1nRCytbWVSqXgHQD6aHLk9YfjMCaypFqmE8exF4SEoMFgYNsuQgwvShgxruPZth3GEcPxHC+32v16s2Watut4hpFKp9OSqIgcjwmlhPAcoymqoWuKLPEc2+l04jieXLXj8RievdfrNZtNGBQE6nVra4sQsr21kyTJ5uampiuLi4tXrlz5uX/yc6+88vJHPvKR7e3tqakpURTb7e6jj7716tXrU9W5SqUSBIGmaalUqlarwR1BEFqtFs/zsLnJMIwTJ07kcrnl5eVqtdput5vNJrARSZLYtg1XsCAIuq5jjLvd7vb2Nqyd2N3drdfrvu8DTGIYRqVSWVxcjPcduyH8IJ9D6wtoja7rULczDKPrOrAacNzYtg2QgaZpURLHCXEdDySvlMEIMa7rmqYdhiHFyPdCeKPWtzZb3Q7LstBZQKSJopjL5cD2oVar9ft9WHkCg3v5fH5+fr7f7167dmU8HqdSKeDGUqnU2tpaq9UKgoBlWQ7oO9gSDqAcTK8BG/7GG/k+AjXy/TJhQiaZEGNM99drcyxLD1COE76LvMGZG2OM9nVy+A36OEVRkiRJYhoEQRRFiJA4InEcrywvi4IcBMHu7q5pmlB8+r7/4P33bmxs3F5d1XW9VCoRQgLL5Hl+Zmbmm9/85rlz5yRJarValNK3vOUtn/rUpxBhbty4cfbsWUmS7ty5Ax//9PR0v9+fmZnRNO3VV1+9cePG0tLSaDSqVqsnj8+3Wi1A+brd7vT0NELIMIxSqbS2tpbL5VzXnZmZcRwnlUpBEZXPF0VR5gTRcV3DMKIwSSjxPIfheMMwOE6IEsLygq4osiw7XgijzxiRbtc2NK1YLC0tLASe06gH5XI59IOpSrnVasmybA4HwKr1ej04bR3HWV4+BGjecDj2fT+fz1um4/s+vGxZ17e3t48dO/H8888vLa4MBj2IgW8//tjHPvax//7f/yafz73jHe9YX78zHJjf/Oa33vLo27/61a+2222e503TBPeT8Xi8vb2tqirLsuvr6yzLZjKZWq2WSqXOnTt3/frN0Wi0srJiW47r+uAr5zjO1NRUsZiXZTFOIteNZVlOpw0aU9gZjDF2HKder8MehGKxCP1YEATgVgqOWITBgIRTRZVlURQ4QgjYMsRx7PoBZIUoiliWzeVy3rAD3WahUMimM45rm6bJUCQIRNdTsiwihDzPkwQRbNQ0LdtsNuEFm6ZpWRYcIkmSpNNpaHwIIaIoIoQ6nU775nVRFGdnZ23bxBiJosjzrOu6umaompJKZSRJYqANAGAKMilwnRASE5UdSPhBb4335TITRAcdMKR5HfQCPwVSXShB9+3rE2iH4CvwzaAURwfQ1NdlxYPPC18EWhxsmuCVZLPZlZUVhBDP8/BSM5kMaDJt2waaAboX27b7/T5YfZqmOTc3B93a5ubmO97xjj/6oz/ieb5YLNx333lCkkIhPz8/5/ve2bNnwjB4/PFvLyzMLy0tvvTSi7qu3XXXqfvvv6/b7Vy7eiPwo0p56tjRE/lcMfAjluE5VnAdX5ZURdYowQzmlhZXzLHNMvyRY8eH45HtOppqYMQ2W51mu53N5hiOkyTFcbz+cIARSwm2LRdRBsqWsTl0XVdRJI7j0oaBSAzvjyLteQtompbSNZEXqtUqy7KmaVJKOY47c+YMCHemp6ePHj1arVZBLQgrZY4dO5bL5ebn51VVDYJA05VUykiSaGFh7uLFC3NzMwAtqqq+traxtLTCMsLnPve5H/3RH22324VC4cEHH7x48eLq6qogCDBr1mg0isViNpvt9/vHjh2bm5uDdjEIgq2trd3d3Wazee3atbW1NcuyRFGs1+vgr/O2t72N47jDhw9nMhlonIrFIqQv+E0Hg0Gv17t16xYE4UsvvQTX1UsvvTQYDBYWFhBCOzs7/X7fsqyFhQXYMgJ8A7AUuq4HQRDHxLZdyI2dXtf3AkVRZE1FCGVzuZu3VkHeOL+4sLxyuNcfbm1tlUql+fn5Uql06NAhOMcVRaGUguJ8MBjU63XgXTDGsHc9CDx45RPkH8qTRqNx5846BwnE8zyw44dCEYKBHoBVDpIKZH+SEL4f79u0TWLvdZkKIQQLrhFCKNkzp0n2K15CCED2EKLfO89CVkTfYwcGtLmdTgdjLMtyOpUy9LQoiqPh0PfCJElUWdZUHTp427anysuO7S2tTAFq6vvhoUNHKKW2bbdb3fvuu880zZ/4iZ966aWXgyDa2tpJovj+++8Pw3B3dxfeh5s3b169enV5eRkwgGKxePTo0Xa7PRwOWZYle+u4Atg3Au8tnItgz16pVKDesyzrkUce2djYkCVVFOThcNgbDAihURRt79YhScYJBfyAMizGOEwIolTgMc/LkiAQksiiGCfhaDQKfJfnWVmWTWscx2I2m5ZleWdr++jRI+l0uqf3wzCczO9kMhlVVWGX+Pb2tq6lYAvA3NzcPzz2mG3b5XK1Wi0DrzUej13XEQTh61//+vz8nCiKzUb77rvvvXjxIkDzn/vc537lV37lC1/4wtbWlizLKysrjUajVCqxLBtFEYjUQNwzHo+NlCgIQr/fZxjO9wLLsmR5T4O2u7t7+PBKGPpRHNbrdVmWV1dXdSUNvRbGuFKptFqtdDoNkqNOp8MwzPz8/OHDhweDwe3bt69fvz47Owu7soHYWJifNQxjPB47jieIIsMwYFOmaZrvB61Wa2VuGt7qJKF7xENCMINAqK0oSq8/PHbsRK/X6Xa7mqbdvHEHFshSSkulEsdxpVLp1KlTL7zwQq1Wu3PnDsdxkBLgF49YDiG0F4E8g/GeFzjHcZQS3/d9x+VA1g3pCMpCiDEYxp2E3yQYGI7FGCOMKaEUI8wyCMFWi/36cxKKeC94IHHB64Awg8echBP09HuN3/cpdyEI33iDQwSOeSgCoQsKgwCuLU3TGMwAGpZKpXzfr9frLM+xLNtutyGBW5YVRdHS0pJlWXNzc5TSy5cvA845NzMLGCxUQZDSp6enC4XCqVOnYMhYFEXQl/d6vbnpAqV0Y2MDnhGKGZjDkiQJIdRoNFRVBfB2Z2fHcVxZVjiOb7bb7XZbllSCmGazSQgSJYnjBM/zhuY4iiKe58MgEnlGV0SYiEMIqZLIUDI2+yIPSkVeiiRF12SBZ1mWIApshOu6lUpFVVXgkeEKg8sUpAUwfdNutw8fPnzlypWtrS3f93d2dgwjvXJoKZ3K3rlz5/r1a2fPns3n841GY2Fh4cknntZUI47jx5549vz580eOHPnSl770lre85fnnn19cXLx9+/aJEyfu3LnTarXuuuuufr9/8eJFwzAOH7lHFOU4jiVJsS1nOBxLkmLb9uLiYq/XKxQKQeCNzdHm5mYul2s2G+qCkclkEELD4XDCT0y2VhiGsb6+XqvVXNc1TbNSqWCeW1xcbDbq5nB06tQJw9BAE+d5nqKqhUKBUGZ7tz4ajRiGTafT9MCuFJ5lESIMw/AsF4ZhfzSemZnp9/ulUmlra4fn+VRKh1UZkwJwZ2fHNE1JkuBVQUqEOQe4VFzPhlIOKGvAxnzfL5VKe85dUciA9T+cRhPyAHKgJO1NOaD9Bb2Ag4GwBgQuEwXpweyEXkshQEUKrm8QM8neZOl3d4lC3QgZ4/sG4Rt6RYwxFJPwOgEEGwwG4/EYXhuoGQBtB3hwa2trPB5fuHABpqenpqZgqQDAbouLi9ls9ktf+lIulxuNRqdPn+4Pus89/0ychKLEExpLslCr7zSatfvuv7fX71y6/MrxE0c5nrl2/cqdtdv5QpZluSQhvV7fdb1isZTL5QmhSUJs2wnDqN3u3L69Cv8VhtEzzzzrBX4Qhbbj9XvD8cgK4ihJqO+H3V7Pdf0gCMa2ZZl2EhOeEziOUyVOUwRdFQ1NSulyylBVReRZNp/PMQxmWLZUKRaLeYJREEeSqoCJo2VZzWYTAAxIRLBPAiGk6zp4TzSbzdu3b0N97vsuTCclSYQx7vU7mq7ANvlisaQo6vr6xqFDh0ejMULonjMn/v7v/17TtPe///2bm5uQKMbjMQiaofVKpVIAijz77LNra2vdbleW5XK5XCgUyuUyyNa3tnZeffXVXq9HKZ0Qj5ZlVSqVYrHYarVg5IVS2ul0SqUS2IXA3L0sy/AsIASH2srzvNFoBA0LlLWe50FdCtbvuVwujvfHnCklCGHMchzHi4LAi6PRaGp2jlJ64cIFx3V1wxgMx9DJTyQKiqJEUbS+vg7g6vT0dCaTiePYdV0QRUOkQZcI7wYMlEwwPN8POJAdQBRNRnUppaD/gnUue+N/DCNJUpTEE2QPHXBkmoQHPbBoDU3I9EkiZRj4ItnXKDAsi/aL2+8Xgf/IDU5HIFrgn7AVWVNVx/ZGo1E+m52ZnpVl+fbt291u1zGtYrFouw7PC1NT07pujOv1dDodBtHi8oogiHGcJAm5c2ftLW95y/Xr16empkqlUjabvXr16szMzF133eX7/pEjRyBv9Pv9s2fPXrp0CQ6U0WgUOAz4iGqatri4OB6PG40GnFxra2sAaguCcPPmzZs3byqK4nshALmj0WhsOwQzcRxHMfG8gFLKcDxGDKWUZVlZ1QzDSPFBkiSyxEkSzzCMqkosL7AcVjU5IVEqrXMcJylyGEcJQaJiI4SWl5dZhltdXfU8b3FxUZaF3d3dMAxd14dPCs7BJElkWd7c3HRdFxa1m6YtSVKSRLXaTiaTYznc6XQAg7l9+87Zs2c7nV4YWblczvf9mzdvVqvVBx98sNFoDIdDhNBjjz0Gk6xPPfVUuVyen5/f3d3t9oaqqg+HQ9u2VUWDaQOO4zRNO3r08Hg8FgQum8sAsRkEnsDKuq6DjAbkDdBTAXjW6XSKxSKldDgcFgoF8Mt48cUXc9nM1NTUnTt3Dh9aPnv2bLfbLRbzt27fbm9uKapBCIVytN/vl3IZlucwhQsVrDlpHMdCSkSONR6PeUn2fR/qI4TQ8ePH4akNw4CZr0KhMBgMQAsOXY+iKNls1nXdbrfLgq2TwKuqIssSJhQhBEQAFJ4izzOAhU5oBvgPSE2QqSck/uQ+2AQAYDPpEics/Ot4eXxAU3bw/p4V8X7gJUkC2fL7BdtBLuQgNcLzPFSzGGPDMDKZDMdx4F8IuRoYKqCMwXEA9GudTofjuMuXL49GI9/3M5mMKIq7u7urq6tQm4HV74ULF+C0k2UZIvDVV1+1LOuZZ54B57Vut3vr1q25ublqtbqxsQHKDwAAAO4CxgwUKrlcrlqt9vv9GzdumKYJJ3er1Wq1Wp4XhGE4Go2GgzF8ojGhECGCIPGiJEmSoij5jGaokq5KmZSWNlRZ5FVFyOdScRzm89lsLheEoeO6upHOFvKIZcBPVZKkkydPTk9Pdzqder0+kYkNh0NVVaGNgZN7cXERWp3xeOx5jm2blUrl2LFjSRKNRiOWZa9evYoQk81mLdOZnZmP43htbQ00Ky+//DL0QhjjZrO5ubkJEjPf92GmwTCMI0eO5PN52HELmC20XpIkPfDAA8ViEa6chYUFQRCAlAemvlKppFIpjLHv+2AGeezYsePHj+dyuV6vB7MdCwsLmUwG9AkQJ0BOCIIAfCnP8zDTwHGcqqqQMH3fD6JwcmH7YQg6R5bhrl+/Tgg5duxEqVTZ2dkRRTGTyST7znfdbhe6tn6/r+s6MHxQf004TCgHYMkUqA5YlgHgc1JCctCoQCKC4hCCagJpwlNCYnUchxV4iEiIWLpffzL7lANEzOTvSfV88G+gKKDEBfH2JJjx99HqQPzRN9zCIICjBarNIAhsy4UlM+VSFTJ5rVaD6jqVSvXaHdM0OYFvNBrgiXT4yBGEUKVSeeGFF5aWlp599tluv3fixIk/+ZM/KZfLLEMxxnfu3AGt2bVr16ampkCjdOPGDYBkFxYWer0ex3FLS0uBY8JusDiOYQ0YQmg4HEIPBt5EzWZT1/VKpTIcDgllbdsVRZHhOUVRMGIp8jmOS6XScBQqisZwPFw0iqKk9YTDSNOUbCEbRYnrewylvCQRSiVJ4gU2SRIvDBhekHmO5bgkSdbX11mGu+uuu1iWrdVqluXk83lBEPL5oud55XLZ90JVVXu9HkJoJpsHqQqlFCaJYB3AhJTb2Ng8dOjI4UNHn3322ePHT8IvNRqNADP81Kc+9cu//Mvf/OY3BUEoFAoLCwsAJ05NTa2vr58/f363tgb1fxi2NFUH1jSXy21ubnIc1+/3NU3hLJZlMdTJQH9blgWBBO8q7KPfc0vhuHK5HMcxsIsLK8uHDh2q7e4Mh8MjRw5RSu/cuVOtVuv1piTLxWIxTlCz3Y0pMoyUoamjfhf6TEEQWMxQmvA8jzELrAb8V5jEw+GQEEQI2djYgKMf5EeapvE8n8lkQFQMM+hhGAKRKIoilsUkSTRNg3YRkoFhGJqi7knE4piDoVtAESBLEkIAj4JClNnfiABYKIcFfEA8DaFMD3iuTcJm75/7ghioSzGh8M1kz5U7EURuwrazLBuR8B8Jwu+ZEnme51gBTjUWY1lSK5VK4PsATrpBQFUE4nqe50kUNxqN/nAA2eDo0aNnz57FGN+4cSObzb744ouu6545c+YLX/hCtVrtdDqiwELbOTs7e/PmzVar9ZGPfORrX/savKpUKrW1taVpGihFs9nsnU4Teg/QoKVSqUajsba2Njc312g0ms0mIGPT09OtVuv69euipAFiKbCMIAgCLzmeixBSFBWzrCBIHMdRjFiWVVVV11KKbEYxr2pyNm04XuB5XpyELOIzmYzr+mEYCrKEMe72esAL67pMCGk129evXwdameOETqdTKBSq1elGowFw/9TUFHzKgADBwKSiKNvb24DrgMIWChaGYQzdWF/fnJqaATQBdF4XL148c+bMl7/85dnZ2TiOa7UabC5ZXV0FTAuwrvHYymazSULBANY0bYTQ0tISGOqk02nPd69du1YoFFiWrdfrPM+PRiOGYer1uqqqhJDhcAg6xFQqBfjq7OxsvV5HCI1GI9BwYk3r9/tR6IP+RBAE0zSDOJFlOZPJ9IajIAhUQ5+MXOwDkHvXJ8gzlheP+4F38+ZNXVEXFhYGg4E56ubzeaDmARyBTYnXrl2D6hQkPkAhIoSMtErI3sYxSGxg7gZyc8dxPNvjACjhedEwZHjLRqOR74eqqrIszzCwihC6VsQwXOhHlFIGs4gimiBCKUzThn4E6RXtV9aQ9CRRRAjQ14RhGMSwhJAoTliWZQQREeJTiqIImsMEY4bhv4u+7IcZhC6hFFOGQSxlecqyCUUkIUmEKEGUp5jhOZ5HDI4Z1o1oJl++ub5RLlZK+UKhVB4NBoVctZDLf2f88uKpo47jPPfcc8tLS+ff/FCn03nh2efy+fz2lUvj4SiXyzXr9ely2bXt6elpJ2ZGdiRpua/8w+NhEOi6vrZZ833iuNb09LTvRa7jj4bmsN9fWVm5cukKIwiBFxSL1WazoTZ73SvXS6VSwrKXrl8PEVnb2WQYhuXw4NowSRIlo8KHbdumruuGJkVRpIiI4xhNx5IkmOYgpaY4jsNMXCgYtdotU65WlpZrtVqW5Ub+iFFEXdd3dnY4jsvlclHs6hrX7/cX52cty+p6fXXq5Hg8jmLkuGF/sK2qaq8/DgKSL+jTM/NBQFOp1KVL1xHm2p3hPfcsl/LGhjloN3YRZRYWFk6dOJ5Op/P54vbuTqfb96M4ZujXv/UP73vfD9398Lnd3k5WVAlJWMRjgo+sHFlbW2U5rKtCNiMHvsJgt1SSZTlIkj4hvdXVF0msRn4UB3EYhnEQMgzIU8xuuwYkBE3iwPV0RRc5kec1XpQ4jvP39nCozXZ7bm5OlOWYkFQmE4ZhbzBA+3i7IIoiSiRMsMT3HTOJWdcLlg4d5nl+p9n1YzQc2wRhQRJTuuF4rj0eqxzruq4XBFA6chyH4sSx7Ewmk0llMWYZzJeK1TAMR6bDi0q1OkUpDYJAVlWEUKfXa66vb+7uMhybsAxFtNbvIoTCOOR0FSGUuFE6nVYEmYmIyolUVuM4tkdjYG4xQpqu7OGckBkB6wcABh1wfDoIS9LXTkJAEZvsOzLhA7Js+B4oMidfwfujSUDO4H0xDd2XrfLMa55iklTRa1mKyddlWZ6IeBiGoRjFcRwGcafTyWWzhmHU6/UkileWlqanpzutdhTFCKF2u/PAAw/OTE/duHEDlteCsAsg3NFoNDMz0263ESIcx+3u7pZKpWarHgVhKnX4O995MQoClmU7nfbM1HQURQgTVZUbjdpw2I8o1jTN9z0g/W3bjOJAURTTHCX7y8M933ddV5KkQiFHwphlsSAKsiIKgsCyDMOIkiSJEo8QyeVy6XQKqkGGRaIoRmGCKGNbriQqnhsUCoU4jkVBxhgH/t5ugl53IIkKy7ICL0H5BH0XxwkwkwFLOJrN5nA4hLmHQqGgaUYURZ1OR1LkdDaTxHRjazMMo51afX5+EZACmWOXl88Konjx4sV77r77U3/yx49+8MOdTocQkkqlbt26kUrrQeA///zz5+4+I8ui67rlUlVVVde1l5ZWEEKrt+qg8gWwHthUkFLMzMyAswHGOIoiMIPVjD2eSdf1dDqtKAqoT/HeykoPIQRN5h6CGLqpTJphmGazGUZJThCCIGi1Wmh/QgAAkCgOQMsFgQcVH3RuwP3AvNJEVUcphSlE2+ahHICUA1VkGIaO58IgIsGIZVkY2en3+yk9tbCwkMvlYCEHIKIw5k/2bSu+q9sGIBXvr1Iib3CO2Ss4Dyi86WuB0El4TNLg5D488oTln/wsVLMHgxBsoPABpdtr4vAAjQgPwnFcRBJCCIMxyzIUoyiKoigRRRE0fnCgdLtdSZBJnICkUJKklaXlmdnpb33rW51OZ2lpaTDoqYpUKhc4jknpGV2Xo8iI4xijxPO8x594NW2kaMIcPXboiSeeYBHO57P1Wl0UuG63LbCcKIqtVst1XVlP+763vb0JA3u8wI5GI9s2NU2jlMRxjBmaJCLLYlEU8/n8sDPSdBlaeYxpkrCSJBkprVKpdDqtyRuoaVoURcVCmVIKkkXbtqF6BHAYYABA7W/evLmzswNTGlCb7fXP+709GGfBABFM/c7MzDAMF8ex6w42NjZs266Up6ampk6dumt9c1NVdVlV0qnst598Yjy2Tp46dfny5aNHjz765h8YDvscxzSbbccZO45z/MRRSollVUvFShB6rmsLAg+UPUJ7py2QCjAMAQWqqqqrq6sYY4C1QBzDcdzCwsKVa9dBKANsIUJoz7MvSUCmDzpnVVVBN8LQvYEEhBjgMwBukSSJECS6bhQlDKCGLIJtaoDQwk9BEwHCVLCTDIIAEBfY1SNwDEheoygCHkIUxXQ6zZljWMFdKJcopbVaLQzDdDrdaDTS6TQEpO/74Get63q/3wfIKooiDmJvgpccTEEH895Bag4dWOv7xqw1yX7wU/H+GsOD7eLr8uTrfnbyLAx+fcjBC9kT1hAKAFKCKORIAgMFGMOg0PbWFou5w8srkiA2m03X9iqlciqd2dnZev/7fiiKoueeey6TyRSyGd/3eQFHQXj06Eq/3z+0vPTKK69wPBPFYRSxsiy5flCtiMdPHBkO++Vitlar6caMZfLjcT+K/SRCmCGKKooSJ6qqZVkJiUgYa5rGcVBoJAyDOZ6P7CCOIsMwJEnwPG88HqqaBAwSLJyK41jVRMPQUil9OOxLkjQej8MwIoSCMaksa47jVKszzWZnenp6a2urUqlYlitJqmk6sqyVSqVMJh9FkesGk7FXGFcfDsdwYQEWWigUkphOT0/funWrWCyGYQyCzJSRKeRLXuC32+2NjQ3X88rl6k5tt15rwkVpjseGob/88nd+8Affe/XlF+fn5zudVqvdmJqumKZpmuMTJ05Y1ng0Hhw5cmRra8tx3HJpCiaGAckAJg3ADCBICoWCoihwbiKEANj0fb9arcLFDSPae6OVUZROpwHlt20b1JSyLKfT6SR0tre3GYYTRNEwjChKgiDQVJ1ihDGOknSSJAzD7YnpRc5znAl9B04zEBuCIMCAL8wlA8zOcZwVeCBPg/9yXVdV1ampqVQm3Wg09mYgHafRaMmyCB65vu8DNi7LMsxeAJ4EhLnv+1xyYICQ7pN19A1q7O8ZhAcjCh/w6p5Up5Oget134v11vwelp1AYJ2EM/wsxt/dEdE9qAzKcPYkcgdM9Yfg9/yiQvQPLAnyApuhhGEZBmMvleFZot9t2y3vzm99crU5/7WtfiaNk4dBcp9MKI19TZKqI6YweeDbLUZZLWBbxCRYQxjh55KFzzWZzZrby9a989dixo9VqKZdNGbrU6/Wyad20RoQks3NVz/P8gJJYmqqUu91uIZ/p9/vzc7OKIq2treVLZUUSx+OxoasIoQFNKCGKIsqyrKqwi5fyPKMosixLu7u7lmXBpQZiJkIoy2KW4eOIiCl5FJs8Jwq8xHOiJCo8JzbqrWajvbKywrGCqujj8dixPVg6e+AUQ5IkZTKZq1evchzHsQIA+js7O7Kslstlc2wXSsVjx45du3ZtY2PzuRdeyGazsqz2er2nnnrq6NGjkqr0+31FlvO5nCxLS8sLhJBcPkNotLCwcOnSJZCh1uu7iqKwDE8JZhkxmykSgmDPJEQyNMOQzWCeExhtz/OGw+FDDz2UzWaffPLJytQ0HKmGYciybJrm9vb2aDQ66GEBVQBsNfYdFCc08t29/MYSjDFJiB8GlFJJEBiOgyGjJIp5nhdUFUga4NtgYA0OL7gBfIgxDoLAcRxEYphLBE1yQikMyHd63dnZWcdxVtfXhsNhNptOpVK2bS9OzeD9AUUgfvr9Pmg5AZjxfZ+baD4nMQOB8T1T4sHK8/uFKFSVk4RG91esvS5c0f44xYTtwG/Ie9+dzKB7gc3iPRRr0hdO2MiEkCQhFKMJY5lOpWRRkQXRGps8zzOIQQgVCiVFUR577DHbtqcq5bW1VUkWpirldqdVrU4FnqPpUrvdOHpkpd/v+76YYtVut5vP5wSB73Wap8+cdBzn1Mmj4/G4WMrSJNB1PYrdMAhkiQ98p1wpt9tt3VD8QJmdnRZFvlIpMQwSRb5SqcRJ2G4LkiTB+E82m0UkkCReEBmEEGYEhmEUVeJ53vP2WiNJkhiGY1m+WCwLgtBsdCnFo5GZTmeHw3GpVEkSks8X4zhOp7OQ7rrdPlzf5XIVPl4Yb6V0z7oSnDI2NjYWF5Z7vR7IhqrV6TiOR+ZYVtXx2NQ0/dTp09vb24IgbW1tzc7PHTl6iGGYbqtdKJeazfr995+nhJw5c/prX/saw+BcLscwjKIohw8fZhjGcbzp6dlOpyeKsqrqpmmXS9O1Wg3IGzhWgIGwLGs8HmuaVqvVZmdnT506BaPYkLFBZAs6FZiEgDxJCIFhImDhEELAuCaRDxew5wWZPEmn05lMptftA6tBKRWQhDFGiDAM4hisp9PAHwIpALOCwBTE+z6A7D79RgjJpFKUUhiCyefziGHa7fba2lpv0IcNNrBSBQTiPM+7vg98NScIcRyPTNPzPEGSEEIJpQQhijEHsMpE0gk9A7yIN8bM98VLDiRJdKDr20to+3cmPwhp8I2Pf7A0nXw/hONeikbfPQWYPbUHm1AaRRFFCGNGEAVBEHhOgEYIIZTJZGzTGo1GumrMzc1Vlme/9a3HZVm87577nn7mSde13v9D7+1229lsWpVFa9xPZ/TQ5yqVih/YLEdZOR1G7mjcO3H02Pb29r333n3l1Uuarlj2iGeYSqWMGSqJfCGf5TjOMLRyOR+GLsa0VCoUi3lVlSVJ9H0/k02xHHa9EM5soD0qlYrrdDmOYzBLKWVZDGq7hETFYjGKYkIIy/IIJXFEctkCxth1dwDq4nnBth2O4z3PSxLS7w8qlYphpDDGpmmBlnVqatrzbADPgMICkg0hDFMOMEoCLwk6GdcPJEVpdTqAkfh+yLJ8r9crlktHjhwBsqGYyzeN1Kg/uHLp4nz1raoqE0Isy+p223Dx9fv9fD7veb7juCzLBkEYBCEh1Bw77U57coGBJAtS1uLiIkKoWCwuLCyIothut0EDDM2FIAhQ+4FZ4OzsLOQrqF0RQmEYAlgyHI1Gls3zPGIZvG+8AMkKwsz3XSZkQt/HlAocN1kxAHwDNJzgAQNT/JNrG65hoAEhg8my3On1oP3TDH1nZ0fTtHQuKwgCkIRHjhyxBiPQS8DqX3hMMDqZpBAO7atV6D5bckDl/QbS77Xhhw6AMcxrNwpOMuf3S55wpOF9/hDt94fca2vXvRimrzkC9jId2ntJe6o6llUUFSAsBnOtVqtYKCiSCv4uiiRlMpmpqalv/P++8cADDxSL+e985zuzs7OVcmFjY2Pl0KI6Erq9JkJUlZWlhflWuxEEXhgGkhBpspLPZAWBO3r08LWrry4uLtTr9WIur6hSJpMZD0eGoR09egQUxmPTK+QyGONUKkWTiGdxEgUpXc0cO04pHfb6uqJOV6c4zLiuOzc9U28G8BHEEdk/nmLfT/K5IsaM4zj5fD4M4uFwGIbhcDhmMOe4Vqqc6XUHqqrWa02e54fDYafT1VQjiqJ8Pn/k8DHbtm/evNnvDcPIgcwAOw/hpIvjGGY7RqORoiitVmtxcVHTDN/3VUPN54sXLlyYm5sDQzRZlkF5t7a2Zprm/Pz8hVe+c/jwYUWVdnd3vvjFLwJvBnlDUZSdnZ0oinTduHLlSqVScWyPYZhSqXRndT2XKxQKBbiIofWChlDX9atXry4tLaXT6dXVVcuyNE0D2QogosVicWK+JkkSKJ+AexwOh81m0/M8QRCKxaIsy8Bk5rJ5gRfbrU4cJbIsFwoFsCYALCQMfUIIwyAoNwghE5UYnA6T9ZtJksDlBiprkFhDyMApwHFcOp12fU9RFIwxWGal0+lqtVoqlUgQEUJAb2AYRqFQGA6HMMQMyY9Syk2OyQlGAo3j5KKn+1bce4Xfa123mQMzfnsXUxwn390hwUymePEBZzS6b6t6MG73ytc4+u4/J3KcPWkfwhShfQAJMyzDMK7rKLqm63oUx1EU27ADIEyAWZZleWNjo9FovOdd7zq8cuTXfuVXT993zrIsjGmlUomjwLZtTdM827Ftu1QoDoZ9MP81x8NqpUII2W1bMJ7PMAxGKJVKpVIpEsWGYaiKxPN83bY1TYvCUJYkeKNqtVqlUikUChMVMshlgiDIZrOAts3MzDSbTdd1YcXsVHXGdd0giDiO6/UGmXQuiiKGYaMoMU2TJKjX67VanSRJ7r/vIXCaAhwCBuRv3759+vRpkDgPBoNcLifL8tTUFAwKViqVTCYzGo1qtcbhw4cFQbBtR5blbDbbbLRXV1dlWR4MBmEYp1IpNa15gZ8vFlieI2EwHo/7w0Gz3QIkc3F+IZPJnDpxkud5nuUW5uZfefn5EydOFAqFo0eP3rhxa2Fh4cUXX+Q4vt/vq4rOsQLLhjzPO443Gpm6nuJ4zvM8sCmo1Wqj0ejkyZOrq6scx2WzWfDC6Ha7hw4dAsxjt94AaX4URZZlgTdxv98HZrzf7zMMA6PS4M3F8lwqnQmCIEpi1/cUTQ2ikCAqy7JtWqqs8Cw3GAyWFheKxeJwONxY34aJwU6nw/N8pVIZDAYXL16emqpM+jIIEKgNAUEFNQtCSPI8mF3mRQFaTdt0QUQOczPrd+40m81isZjJZUHR1h8OEkpEWZqoPrmD2QwfgENfUw0eiJZJz8rsD0BABTXZWU8P+HbT15KKBzPkpFJ9Y2pFb7jBccWyLMdyUDZMXmShUPCj0HGcOEkkSVY0led5kiCMsSLLt27dknjh53/+523T/PVf//UHHnjAJYHneRy7t2zD9VyUxEkU6JpsWU6r2S7kM7MzM3R62rLGV69e5dVCFEW+68VhlMlkioXy3PRsxkj3e90wjEVRLuTyoW6AvVoURYIgwTaLCxculMtlx3EGg8GhQ4d0PdXpdFRVJwSZpk0ptW3XspyxOc5kcq7rc5yAMR+GYcrIQMWYzeYHg4FtueVymWX5MPSKxeLNmzeTJIFuCopA0IuEYWgYBvhwN5tNjuPAz+Lc4jmQXGezWUlSJjsFwPEJlD2gaEWIGY1GyRjZtr2wsLC5uanreqvVAvkYgzBYg3qeQ2mSzRZUVW42m7lcYXt7d3n5UBTFjuPs7tbS6Qyg9o7t9ft9WK6CELPfX6GJ0hI6uunp6UajEYbh1tYWyI9g4pFSCm9ppVLpdDrdbrdarYK4FOb34VxDCGUymYkquN3tybKKMTscDsHzBiHU6/V6ne7c/OxgMJBluVDMAz+0ubm5vLwMCieQlUHufeSRhzY3N+Eih1IWFqIwDAM6GFCTMwxjuy6UpnrKmHAhuVwO7PwAHYUxRXD6msxIgGo3n8/ncjluEn7Qp33PGDh4m5BOUEBCKsf7HOAkSU6+n933U5mgNXRftobeANVM7h98BLTvZcjsewTvFbqIYow9z4spYRiGZxiouVVV1VSD5/lbN28KnPhPf/pn4jD6+7//+2w6JwiCbbvmeBiFviQJlJLxYOj7rudr1ljgBZZnBUJw4Cc8z8YBiQOipSWRlyml3e4wDGOGop3txmDYy6YztmVxDKdpKUqJ53kkIiInekHY7/Y0RU2lUlEQirwgavLudm1+nuu2e7OzsyzmXNdlGCbyiTVyZSUl8MpwMK5WqzyHBv1xJpOJIrK9vc1xQqPewhjD0btnGToaRFEEck2QhmUyGZZldV1nGGaCrQPiBxUUDK1PGGRZlg0jxTBMr9cDYTpwGHFMNjc3WUXp9TqZfK7RbmU8z3MDeDrXdiilYehHURQFwXg4TKKIxFGpVL506VK/PyiVSo7jYsTxPI8RyzJ8KiVsbGyUSiWMcRQFkiRgTEejMcMwpmnW63Xo2ZrNJhR4pVJpdnaW4zjwdMrn88vLy5/7/Bfife8vKJ7B9wmsQ8DWudvtQo3tOI5p26qqIoZBDMdxgqqqQPQnUTwxblMlmedZz7ZK+Vyr0wfBAAwWg0g1k8koigLPC9tjwZ8OITTodSbkPqgOgNXA+6N/iGVABrSxsbG5uZlNpYH/oPuegFCntFotCCWWZV8ThAfz1esgTbRvYMFz3CTXTUpQdMAl8WAjCzf6P2DidjDe0GvzMEII0dc4ke6/Howxdl2XMhhOQUKoF/imafpeSAi59957Txw7+cILLziWffTo0bXV9U6no2ZVOCw4jsOEgg+ayEuj0bBULGSz+SiK+r2RpkgYceXSlEcYSVQ5jrFMW+JlhmGiKMGE4znZc/oMclVVRYgEbsiyQqlU3tzdIoTAmFy73YVf//btO/l8sVFv5bKFOI6HAzOTyWQy2Xy+kMqqg8HA9yPPCxmGCQMShbRYKK/d2RgNzSiKGIbrdHqwGqHRaFiWPR6Pg9Cr1Wrz8/OixJcrRYRQGIZh5AehJysiZqhuqJIkwUQvAOJhGEbRuFqtEkIYhh0Oh51ORxIVnucBAlFV3XVdFmNF0ba2dliW7w0HYG3GZtjl5eUkiSRJEnneskzf9xkGHzl0uFbvZDPFSxev/vAPL4uCyvPieDwEWoXn+f6gm82lM9lUq9USJd6yx45jr6ys3L59u9FonDx5stPpbGxsgGfk1tYWz/OKorTbbZD4yLJcKpVgTl9V1e985zuqqq6srHQ6HYQQqDTBkBKQSVEUp2bkKAhBM81xnOcFgFjKorRb20mn083arq7rqqpeuHDhrW95S73ZS6VSYDkHF61lWRsbG6COoJTClFyyv1MMIgda0Em5QSkdDofgUMzwHBAwMPWyubVlmibQZjzi/SicjHqGSdxot/qjITeJwEmCoq81R5uUlBBRYRRNIu3gOG+0//XJ33vTum+wEj4Ipb4xJv/vwt6zSY4syw5877lWoUVGSiQSKABV1VVd09NLLm2bazPkrPiD/BHD5bddcse4NNoOh5zp7mlZ1SUgMpEytHTt/sR+OBGOAFDDDSuDZWVGRkRG+H333nPPOXc/CN/9qwheIeabeGqqbemzSZ6hF7VtRzcNzrng6uDg4J//83/+y7//1fDhoVlvoM6ZzWbd4y5jxDZ02zAJVZZl6Yw26i3LsnRNJ0pKwT0/6HX7URTFUS5yahquZWiyJLwkjJE4TAnRNM1k1JCCFgUXRZkkBaNmLWgdH/NHjx5PJpOrq+t6vZ5lxXg8bjbaumbmeTGbzYVQk8lM181Wq31wcDhfzuKocJ36/d1E07RGo8W5VIoeHh4LIY6OTgzDAIXK85yrqzf1egtHMmMM6lIooeGf32w2MXCL4xiiHsNgtVpts9lYlvX27Q0E7JRS1Mm1QAZBkKYpAI96vb7Oi5OTY2wLFsJYhZtWvWEYhu/7eZ5mSbIpcimlKPl6uaJErpbp4/Onv/nNb8bj6fHx6dXVFdzzNE1LsxhOH4PBwdu3V4PBACYmEKGT3QJ2qIGOj4+vr6+TJAEzBjSM+XzeaLUfHh4wi68k7eAw4I8qyxLsE6T0lO/Sg2CEcGVZwDjCMJRSMrUt35I4ztLYMLQnT57AcBH5EP0nak62076ynQKBMea7NgJV7pw7gafEaYJfVIwiVtG04wiQUlbkUFgfDQYD8GTey4T7RebHubEKwn0Xtv3f+qCrpDsmhNpx1vaxzSrsP45D+v5MsvoXVx6yrr5zy0FYQnISxXEYRo7n9vv9Rr31+PHjf/Nv/s2nzz87PDz8w+9+H3ie67rddieNQkJIQmlZlrqhpXFSFMVisXIdC6cmEYQojSgtS0sldVNjZVbkiRyPZkRpusYUViApZtu+bdtE8LwsiNKklFlaHh+fYjEQISwMY8t0BgdHZcmjKLYsRwhVCxpxlMZR6tjlaDj7/uUPrVaj2+2+/OFS07TTk0ebzebm+r7d7sAKDRNexojjOPP5XAje63WbzaYQvCwLTWNhuJFS9vu9+Xzuug5jrNfr2rbtOPZ6vXry5DF2DD158kQIAYn98fGJ7/sAEjAGwKW5O+Otbrf33Xffdtrty8vX56dnjNFer2dZxu319fX12267c3xyuF6vb25u5jPe7w9OTx/94fff/OxnX/23//bfnj9/7jgWYyRO4DIqazXfsoz+QVupFqPmarWCdeJwOCyKAk0jIFPLsvI8H4/HWZY9e/as1Wq9vbl1HOfNmzeEkE6nszWeCYJmswl8FXJN1NW6rk+u7nr9jlJqOp1qlLXbbctyVqtVFG5++tOfLmfT09NTQ9cXi9mLFy/u7+/9em8+ny8WC0AvWP8MXQj6QDSEpmnuUhFFAoR0Tu2szDzPw7Fo2BbCcjQa3d7eVgbTkASCZ88YgzjT87xGo6Hvx8+7Cfs/XTSCZYfeD8cS6l1AumqP5oZgo7scu59OqzS7X9Zug20X+WrP/JeSvXHFDqqiCudlZjo23g4otaAF+du//dtPPvlkuVzOihJ/6tOnTzerdVFm5e7mB15ZltiedXgwKIqiWasnSZqlPE3KKIosU5eShWGqlCKEtdtd27TSLO73+2UhNWZoSouStOTCMKyiKDarMGjaV2/emqbZ7/am07ltWZ129/r6erMKPcdVgrSbHSXI7e1tHMab1WYynjYbrShMlWSlFLPZSimBq9DzHSF2K98MA7vchsNhq9XIssT3XSnlyckJpRQ80vv7W0qbZZl3u+2yLB3Hur6+evz4ERwA+v0+pm1gY+FqgAsBxuWaZkgpTdvCQR5F0dHh4cXFhW3blKnr6+ter7MlfNgO53yz2QzvH5LEXi7Ck5Ozv/u7v/v888891+ech2FRlnkYrYXgjJEwDJlGlFKHhwe2VXv9+jVUrW/evDFNs9PpYKgwn89RtmFXdq/XS9O01+udnJxcXl5Op9NWq4W1MHme53kOOAevBNt+arXa+cXjdrOVZnEcx1mSxnEsynKzWTm2VZblYrHgnJcajaKo2+1OxxNBLEoppgtkJ2rHEBLmVJjjQxcuhHDtLdcCjHZML9M0pRqD3jJo1G3bBpYLD3iYdHDOwVwDGIudGWDzvpcJ9/NVFRsffIFNRtVN29ud9EHNSd6HW/eDs+oVq8Rb/W7FTkD/VwUhbnSnvzQMQ5QcHMI0y8IwtB3ns88+O310dnd39+tf/eYv/uIvfv+7362Xm4tH55+9+DRcr7///vuzk1PHqYEuZJi67/tUESysB3WD5wUWGGqaFkcR8f3u4HATroqicBzP8wJR8ulkXhbi5PhQSpXnBerAVssjkuZ5OZnMfN+fTqd5noPIcnV1ZVmOZakgqK9WK6yCieOk2+3puu55dcb08XhaqzWKInvz5k2v15OSfPPNNxdPzsuylPKdA93x8bHrugcHB5hJ6rr+/PlzrEa5vr5eLBZPnz4FYDOdTrEUAcWP4zhJkgCTbDQaSIme5/V7A0rpdDrFBKLX6zlCWI4dh5HjOIZp/tVf/a9v37zWdDqdTk1TZ4w26w0gq2Ve9Hq9V6+Ws9ms1WoJIWaz2ePHj2/vbsJwbRiakKUQXNO08WQYx/H9/W273by/exNFEezlcRwsl0s4AhNC0MgBb4uiaLlcekGNMYadSrPZrN1uHx8fv3r1qpJrSilvbm5Wq5Vpmp7nbfL8YTSUXKAlBnmlXg/yLP3222+zOCry3HEs0zSzJD05OXl9PcRKXXhbVTJiLC+B3wKYt3BIytMY7DmkH3CPpJS+7zmOQymFpBs1s+d5wJ/Vzj20Cpkvv/wS7HnMxt91aHJPOaHtrF/IXn9ICNGZodSWUbYtO3dw5XbSKGHGxpQgQknNMqiUGmF0B5wKriSVmkbVbljJyFY6KKUUrrZT0BNFKN3iMqRRb6Bf94M604wwSiiljWYrz/M0589ffP7ll18+PDz8H3/971zb+d/+6n/57W9/65l297zlOPZ8MSWEtPpNachws4zjWCmla2o2Lcqy9AO71a7VW36UrheLhWmazKbrdGV7tuEbV5d/fPr0aRzzZt3UWS5IuViMGOOue7FaLe/v723bef78+cPDg1JqvV63eMM0vSgaep53eXl3dHS0WsetljUaTwGpfffDt57nFTwTqrRt+/io57mWFM7V1RUMLxazOS9Eu9m7fTvq9/tJnNu27Tudg+6jxWLRbvcuL6+fPn2KtHZ3N2SM9fuH63V0dmbf3Nw/fvx4s9nUavy3v/3D8fHxbLmazBdCiG+++/6TTz755rtvYZAzW85N01yGC8/zjh8dwVC03W7HcUgpdxpmr+WEy4fbt1YUhY7jXDx5dHV1tV5tbNt+9OSJ67qvLq/DMHZ9pWjy8vUfWm3vT9/+/he/+MXvfv+Pz58/H4/HR72Tu7u70XD5ySeftJvp/e3o4pyEm2Q6XazXa9j1Usvkko4m86wUpaLdwVFeluXdwzrNv7982263R6MJEO/1OrQsqyzFmzdXpmmHYbjZRJSOKaVK0SCoC6Fub+91x9hsNv1+P/Dt0WgUOL0g8K6vr03dKMuy1WrpjOGSzrIsSkulFMbu6OKAZAJHLcsSNpYIIaQBzlSeRkqn6yyOeU4ZpaZm6vYmDpM85ZwfHR2dnp6A+zadTmku83XChOJJbhOjzMrrHy4PDg5kUtrE0HSS57t12eTH5gQfNH67MpXu32H/nj/6nYqlUZEBKNm616gde1vt5UmAUVUyZFujDHX59u2Tx48ty7q5eksphb0vFOtffvmlZZr/9b/+1zAMP/nkE8eyv//++6IovJqDAaa7VwP4jgllDQabOC8x5WOMtVotVCB43xeLheOam3C5XC6Xy3m9Hriu22rVgsBbrqYlzylTSRqORsP7+zuUhZqp+v2+41pZniyWM9PSh6P7+WIahiFEVZZlFWXmerYiIssTSrTZbBLHcRiuKSW2bZZlrpRcLueu6/q+m+epEKVp6kkSTSajg6PPANyNx+N6vf6P//iPYRjCNRTGnoyxy8vLp0+f5nn+u9/97nmRYRlLvV5XShmGgcMb5RDZ8Y3QH87n8263PRqNgKm22+16vZmm+Xg8tiyr2+32ewfoIWezGWrgNMpRa8VxfH5+/vLly1qthkwSBEG1GhEjk8Vi4bouMg+KQMO2CCFQ/SA9sp2zPdwrw+Wq2+2apnl4eLhcLi8vL4+PjweDwc3NzXq9Bq4LA2w8Jnow6BVN00zTdDabZVlmBSZEEtbOVRATiG63ixeAtIlhia7rMDKEZAmFKH693qmz3VZpuqN5Sil/9rOfgVyOp4OXXKPRuPruDcIHbBsYGsHTiDEGexS9ipaqINwvSvf7wyqE9uOtusn3FUnVYwrYtwn5LpIJoZAj7TCb6oGUUlyW+79eVbjnZ2dXV1dCqfPjk1qtBu/08/PzTqcjpZxMJnB5qSp4WAzAuZkxhrUQx8fHpsYty1osFmBvwrgFvwVcDsYzSqlms9lqtYKamSQJ1STR+GI94arBVbEKZ6XIKaVZEa1W64KnsI0pimI0Kut13zBYUaRFkWZZnOdJHG9M06zVvDRNXdcSgjcagRAFjIxm8yEhxLRovV4/GLSn0ylT6vCou1qtuEgfX5wQQkajUZaHp2eD+/v7zWZDCAHxGrpYuBhiYZBt27e3t48ePcLU+9WrV5RSz/Ngb6OUSpKkXq8/e/YMC70wtQPgvlqt0jQGCbPRaOm6Ph6PHceBveejs3MU7cPhcD6fT6fTfr9fFJnnOUKo8/OzJ08e397etloNzstGo0cIcV3bsixCpGUZvu8mSeR5XczWtr5eGiOE5Hnu+h5MFl3fxyGoadpyubR1A+NQWGmt1+ujoyNYVKxWK1zxIOUBGsGkAaS8drvt2U69Xn/8+PF0PCl2y94RdSgvqWYAnYI/Nb7Wdf3s7Ay8cKxwYrBKSZKUp+CyAqGsxtevX7+GpxMGQnhti8XipHuEPITjHtQfpdRsNsMzGoah70fgx3G4D5kgzBhl++H3cQB/8MU2j4t3kw9V9Yq7e7L9lS/vr0yjuy0U0+m0Xq/DNwkd/OHh4cnJyWq1evPmTZ5lEKRtNpssSWG0yvMCHycMPxqNxsnJyXxyC9QLD4tGAkqTOI6hcENx32w2B4MB0xOp8v5BS5EiDOMoXpY8SRJeFInruiVP02xj2azSE+Z5GidrqQrT0jrdRrMVcNHGWOX45GCxWHielyRJr9fNsiyMlorkRRnV63XXa3a73VarESdLxhg4K5QK22GMMdthQqaO6y1WOWbBwN/wlq5Wq3q9jvXu0L+u12sp5enpKZeyLEvTsGzLWS5WvhfMpvPNOvz000/hHJMkSZpkRFGN6VEYF4WBOQekepPJ5Pnz50KI1XJNKR2Px+PxGKo/4HuiFEDCBoNBkiT9fh9eZsA20AXN53POOcAYvMIkScD1ofo2a5Vl2e/3cTICe4N70tnpWRRFeDQImpCmcGrgsgSVbLvvqeaCUApaQhJGSZKcnZ2B2qbrumUYSikEYZIkab71pEYFhLGBrus4sJCN8VCoqjRbp5QJIZUq1dagUIciCnvXwGjVdQPzWCiw8RTYeAFhJPI20ux7mbCqS6sESN8PCSkl0Bu2J3v/OIA/eMx9wKa6A2OM7IwS93kwTN8ybLZSQ7VNsMBgNE1Lk7Tdbl9cXFBKsfAM8HRF5VFKodw6PDhgjI3HY16Wp6eng8FgOp2uFgs8IBjAsFfFuGk0GsVxjIXGOLHSNFVsxUXWbDal7JRlzhhtNuuAxfzA0w1NN2i71XEcq1YL4jjudpumqXNeeJ5Tq/m2bSslkGSCwCuKzHFsTaOdTivLsjgOLVtvtmr1es0wjFrNMy2t3WlYluX59ld/9sVoNBqN7gkhjWZjuVz+/g+/se02qizTNL/++mus4xwOh0+fPp3NZm/fvr25ucHsC06HXhBg1TYGVo8ePfrDH/6wWCwAmuOUxMS52WzCQlfTtMVisVwu2+22bTmtZns8mjx79mwwGIBvBT2ulHKz2Xi2h9HC69ev8zw/Pz9XSnmeB94sGipgUfgaK4crv6Mt0E1pWZYgi8+XSxyOIMegigNfdDgcttvtbfRSCu5Yde1xzouiCFo1dMsYr2dpgimIbVqoM/chD1CoMUureJTQJSIU8VPbtuFsr5RahEs8EXKgZVnYWzidTkFbw0GD+lYpxbnwfb9izMPzW0p5fHxciX3fy4T7QfJB5FR9HaGyiqL9+3xwf/JBq/kRGQ5VKHl/aKGUEuTHVcWHh4fYtvX5i0+Pj483mw1WDiA9WqYJel4URUQqePvDgpYQ0mq1sCBtOp06pgKUXNHhweXnnNdqNSyvAniIQBVSosdwXAvFm67rQVCHSQznknNeMTmVUhcXF2AJ4ngmhOCJMMXCB2NZFhoSIQSeER85qql+v49xWb1ex3Cl+kRN0xRCgKHW6XTm8/njx4+BEyLMTNMcjUZYKA9e1WQyAf3ScRwEAwbfOPtxt16vB27XYDBAZwHk/dNPP6WUWpaFMx5ygVartVwukW9RvIVh+NOf/vS7777D4pdarYZJAKxx8GfiAgUiCoC92WzWajWqa8gGs9EckzcEuRACDhebzQbTlNlsBufSJEnw+VJKTdPMdzfM/W9ubur1OiEE4KSp6ajgHh4eoGNQQqCeRPAouiUMgDNAKcXRgLJos9lAYITGD68QoGhlgYEmCFQHQggeDW1qGIY1J5CUlFJkZZHzUhClG7pGaVrkcZqsozAMw3fAzH4m/CAg94NQEbE/fvg4YD6I4S13lLxTD6IF3C90q6dWO8UGqSaNO73SYrGAL60oym+++Qaek7AAQtmDiiWKIkPTYcuNhanHx8eB78NUE4d91TpiC5W2cx8PgoAQAhCGUroFaXSfc6mIrul2s91RSsVRKiRpd/pKqTRNNd0yLVdRRjVmux5jpmVpppmnaWrb1LIs163lec6YyTmxbR/igCQpGGN5LvK87Pf7i8XCNLdiv9PTWpJky+VS102laL8/WC6XWVYwpn/55VfTafTkyZPZbGYYxvn5Od5e7AbGZjz0eGhxDw8PuZTNZvPZs2fwAr++vp7P5+jKHMdZrVaoJEGb7Pf7cN02TVsIZRjWZrOZTGau67969QOuKgx4TNPs9jrT6TQJ46Dm+YH76WfPm83mN998o+n05vbedV2mkZLnuHaxRACnTFmWkC/5vh8mMfyUCCFXV1e+7wul0By+ffu21Wppiui6PhwOUbngOMOYju3tBcMoQtf1QpXgciRJslwuRVFCxGybFmaA6I/AKbVtO0qWbLeTE2HJGIPSBd0mwkzX9SrxIH/iCEBhqZQCI3S1WgF6QXWGF4w3DY8AHm+73R4Oh6iHi6L4kUz4cQTuJzelZJW4Pw6/DxAdpRRjKC/fEXEQhFJuTdLUBzf6jiVHCNG07QP+7Ks/Wy6Xb9++zeIExxgaCciu8yy7vb3dYdDaeDxGjY7VnCBegoaLfLXVzgqBK0DTNDjSwlqr1Wo9fvxY07TxeNzsOESVZSE1Znfa9bIsw/V9EpetphvHaRQWnHON2UlcBkHQbPjz2cZ1XaKMJN74HqnXAkN31+v1bLq2rcxxHKIM32smSeI6ruc2RsNpq9lN4tzQ7Tzj08miXmuNx+PZbHZ0eOrY2mw2u357V6vVNGZqzIThClySWq3WYrHA5Yj1w0VRnJ2dIc5x1pSlmM3GpmnHcex5wcXFU8fxxHYdHU2SjDHmOF6aput1WK83oyiJ49T3/STJlKKTyeyHH77DzobJZAJZY1mWrXbz4OBgs9lQSUDv/OKLL/Da8PIAuSGn9ft9IMO2bTOqAzuxLAv1Ht7/zz///PLy0nEc1/dxuk0mS8/zbN2AAzpQqLu7u0ajUZYlRAwY1kFXAeMMpW9Nn5BRDcdFCapRBpp7tNkgaHEZAHRBAsSyKogqdF3HX4qhPN+thxBcSUEo0XRdd2zPtlyNGVKoq5trrAPYNorMYFSnRENKL4oC+g+U8Shizd2KoXdBWMGbdNf1iT0DGBTulFKNvaOtbaNmj3Czn8QwlEzLnBBC1XtJdRvAO6iG7bFkdEOXUqJvVkrV/aDf77fbbRALkyTB0ILthFRSyvl8zssSLMqyLCUVkIRW6mTPdcFXZIyVZY6AhLESYO5vv/325OQEJGBs+cCFIoQQXCtynme5bdt5JotCdNoDwzDms8i27U570G53b29va0FnuVw2Gg3XcjRmM1qulvHhwImjQggRbjJe0rdX9+122zCMX//q951O5+TE63YOpzM5n22WiyiOijRNn1y8GA1Ho9GMEPLyh6sgCGazZZGrOCps2758c3t4eIhF8P1+v9FoYCvQxcXFs2fPvv/++1//+tfPnz/HdoSiKLAQt9/vYy53fX2NYtgwDCz6Mgzj5z//uRDij3/8IwyzdW24XC4p0TrtXriJ4zg+P7+Yz6etVgs7QDVNcz1HKQU8NlxtcBS+fv0aBx/nHP/2er3b21u0bZizTyaTPCsNw3j69Cnn/Pz8fPTLCVI3fDHKsuRSQkF/eNidTqd1z4/jmBDiOM5kMmm1Wu12+/7+/urqCppDuMWhiXh4eKi168irqGtEUTYajTiO18sVrnv0csvlEvW8ZVmz2ezRo0eYsoBJc3d3B12Y4zhIjABslFLAC+hOdYTeEjLI2Wzm+z5aCcTeer0mUjqOYzl2vdkwbYtqrNVux2mim0YYRxBkvQNm6Pvyv/3MRveat/3v/P/efjTB4sYYo3tVKNlR4fI4xRHVbrdrtZptmEIIbGvYWg9YdqVjlDunRrXTEFNKGWWU0na77ZgW6Py8LDGcNQwjixdgFcODAH0a8LrFYhEEASp7bE3Tdd20g+U64ZwbZlByuVqlrusdHB6G8ds0l0kSpbmcTNeEMM9vtdr9IkmTuIijnChdcKqkzLI8CjPBqeB0uQgPDw+LXNqWz0timZ6u2etVrDFrPlt1Oh3TcDvtAyW1JEke7ieWtXZd17EDJZXgVNdsYBJRFAFuAevi9evXp6eneZ5jbzF26y6Xy3q9LpTGGOt2u8PhsNFojUaTbrcbx/Mvv/zKtu3lcvny5et2u/3s2YskSf7tv/13n336xWq11jSdMe3+/l7XzE6nXREdTVOXUgrJlVKo08x2K4oiz3OlFPf3d6j0iiKXUobh5uCgH8dxrRas16s4juI4yjPOOUfSE0JUBvtAvwkh+KNAswQU1+l00GHGcRyGoRDC87zxeHx3dwfTGtiWojFDV4IMWRSFLDkOdMwe1+v14eGhUmoymaCLZroJ5o1t29fX1wBIa7Xa27dv8fJwVFW+UpbjGYal66XjuLVaw7bdOI5Xq81u/sEcZzvZLwqe52XgOWznsIoFYd1uF4U9qOdKKX2/qvwgwOieFp7u1vcQ9d6yiA+KUrWnCZb/XXUi0FHyUTmKjwRWJaZpllmOJVKmbpimqWmaZ291A8BFP0jgVbmLzAxqvLbL5HDIwk/BVkP3WA1kUaUAQEfyTC5vUNAzuuGch2FclnIymZWF4rwMN6kULE1z27Y5F0lSEM7RNYFyAXMNFCGcczwUcuxkMjFNM8/L9Xr9+eef397eP3nyyXodNptNIdTRkXN5eSmEME1b100URZgQ4l3FNhLMpkejUavVsm370aNHzWYTmm7UbOPpEo0QCj+M2nVdn81mlmV99tlnl5eX8/n89va21WodHx9DamxZDiFsPJ4MBgPX8RljlGq1Ws333SzLsjxVSrmuyxhxTAfjnzRNHx4eut2u3FlCZFmGRYX4mKAh7rT7Ukq0r1j/hFeFfSFFUYxGI4QN2vJ6vQ5vC4gScFifnJzc3d3NZrPNJnJd29/dgiC4G99jsy/+ZCK2rtbMMOH1cnhwgF1RWIxr2w6AFjDgwOFWSoFWRXb7nrePRohm2bVaDa8EPtG2bR8eHoL1ho9YKVXJaNnOJ0pKCZ+OWq0GgTI460mS6B8P2fcrzP0acssRVXsE638i46k90OWfCkIpJdkT4LMdmch2HcBTIA3FmxCzJkyBGGOWYeJNwbB1axb+EYaU57lkHNcuNYyqfA03S0xE8GahQcewFbDNdDqFcQiQxkLQZrPJmHZ9fS2EAFY5mUwM3cLHgJmkaZoYnTUDF0CRrmtRtAEYqJRwXUeI0rIMSlWjUQvDNT7mxXI6GAzCMHzx4sWf/vSnn/zkJ69evTo6OrJtu9fr4XLMsqzRaKDvD8MQi9rhHo8KEAEA6g98hNBH0d3Wx+FwqGkalntiDkEp/earxkMAADSASURBVPbbb3u9nq7r2P4nhDBN03ebgquyEIwxw7DKsry7u8uz0vMUUk1lDgRLVUIIIBagWbiIISMACo2cjKGfYRhxHPu+f3p6aprmw8MD/Lan06lQcr1e76jkGorGWq2GdYVYxgpd8u3tbZ7n/X5/OBw2GjUEKmBYlAl5nmPeyxjT6VaOFIahpmmY3MxmM6DNRVH4tQasa6o+DdaGMIACUKx2mnK8EsMw2u32zggrA0gDO2PEv9qtBDVNk0vpmqZNiGYYhDEu5WK1enN1BdnkaDJJ01QXe4a/+4BnlVcQb/LHVLn731E78LOKq+33K/bnu197LwgZY4puxU34Oa48/D1ESDi3F1m+fVW7lTI40fdfzPZJ5a64JXR7xey0+ZqmlVmEAMNprZRC3QJvvCAIqnkx7lOkWV5meZ5PZ1NKqW7qpSjjOGZMI4QkcUY1ommaYTUKnq43YZ7MMQSr1WrrzYwQQhn1fLPdbkiVe54XRotG07u9vcXBX5TJJly0O/XFcuG4xmw+0nSVpJtNuNhqi7I4juPFkjVUw3GNakaHTw2RVqvVxuNxJagROxfJPM8p0aQgUhDLNAO/XgsalmkrSV3PO+gf+l5tPJr6vn90eJKm6Tdff/uXf/G/C6Fub681TcPIYTi8DwK/KNws02zbrDyEgIjEcdjptHSdFUXW73dd1767u8MvYl5qmvr19egnP/lsNBo5jvVwPwU8OJ/PdV1fzFcnJyfdbrcUHFc/AIhqhIOFVpAgdzoddFzAxgkhGN8DyNlsNug+it3ePsaYVFtHIvwvIhkcNOhuAZkCjwGpqHLmrdAKXGk4u4VQaRqZpt1stlutDhakPTw81Ot1xnRCSkqZELwsha4rQhikmBiTwoe7LMuHhwesfMOcZhuEVbxV4cT2RL2oZHYX+4/vpt/vJ98LTvYu8PaDUO2USlW4boEfTQMAAxDJdk2EJQZxZLdRAHMFBKHaU/orpQACJUliMG07jTAMvttz2mvXtZ1BCIp+PHu73ea7ZWb4SPAeFWRSlLFlm59+9pQxbbPZxPFSERrUPF7KzWY5md4LIYQ8KnnGNJFkK8aYwRzN4KvN3Pd923FsWzNtZVhSM/h4PD48PCxF1Gg0TG74tQPGWJpt8iI6GPTRzcZxLJUgvBSCO65OqDmZ3sfJ6uTkZBMmhmFgLAZeVXVq4DQpiqLT6eBszrKMUl0IcX5+nqYpBsSGYQyHQ9u2sTsByvrZbOa6brPZvLx8m2XJ3d2DrrN6/VNoZ7vdTp7nnBeUKlyLZZmj1OecY/EtmjdcbUiMKOxRJ8NkLc9zKTSMT0H7xDr7R48eXV2/xbJUdGVoDWAJ43nexcUFsMRqhn57ezuZTNBcoDaGFBD7xfTdSm2MAaB+BrCEq//u7g7jB+zcBtiTpulkMoFfKJrSqnfA0YNhMnx6wG7FcwGIIoSgGMHpCdS60ajXajUQlbG1Al3Sy5cvqwnHNggRcvsZbD8mxc7QjlJKiUb+u64w1f9uUyi+uReE+Bq9m/roBmSJEILhtZQSosl2s7UFYNS7cWL1CuW+kzdlIHYJul23yPb+wHLnbwcYwNyt3QOYAbwL7y/OV2ZI29Db7eZgMBBCkHtRcsziqes6RennebFabUoe2w7z/EBTOt5PzzNn88KymWlRRYo4WVk2o4xruiS0bLZ837fxgp8/f/6f//N//pf/8l/+8pe//PLLL1erlWVrhDLPD8A7qTfcOFlF8ZKyAV75er0uyxKzKYyq0QOj5Ts+Pp7P5xhgWLYLClhV5KMoaLfbUsp//Md/rK57gGGj4Qy1XByn6OK63a7neUkaxXGm63qr1fA8LwwxjOAw8Ib+ixCS5zmWH4LFBgVdp9NBLcoYs+1cKfXdd9+9efOGc76Jt4UJnAKbzWaSZdh81mw25/O5pkiSJOijgAVEUfT27dvpdLrZhK1WixACpeVisRiNRjiesCt3vV6DtsY5bzWalNIKjcNvlWW5DuNarYYzGgw1jDH1nXB8H/ZTSimxbXaw4RSHHQgMEP6CIAr0wfM8QjggVvw6WpVyZ0OBzKxXhc3+nKBq0sjOkxsVCKbn5CPg9Edve0nuRzLhvlRK7Q1I5tMF6ml8ohkXwGmASaDKxxtEd/IrKWWl36/4N47jUKn274M/AQvu8OtorNEb1Ov1wWAALwkMADRNm0wmrmcFQWDb1mw+zrOCMdXtNoVQNzc3R0cn9UaglGKaajYb0MK4ZolBGQa8lqUxJrMsk9IAaH542HNdp1ZzkZkJYZwXf/EX//Pd3d2f/dlP/+Zv/uYv//IvX79+zTk/PT0VopxOJ91ut9GobTabNI2VssE+JYQ0m816vQ6EBmoA27b7/f7FxcUPP/zguu7bt29X6ywIgtVq5fs+xt/oyhBa8G5DtKB/xu7hZrN+d3cHpOH4+BDvHg5y0zQJ9bIswTdrtRr6Xsdx0EFgoHd2dvbNN9+AV+R53v39PYg+yJ/T6bRKC69evbJtu+Blu90+ODiYzudSykajAQu5Ikkx4J5Opxi1qZ2boOPYqGgw+gcFDNyAer0OQs9iOkN1KoTANP/t27fw9oVZVl5wQgj0n+v1utForFYrSikip6q6wcw2DCMtJQ7xwWDg+37VgsIAju/cij3POzg46PV6b9++3jKuHAf0LFBE0I1rMOemTN8mJUIxusN/QpIkTZBVFWFCEqUIZQwe2Eh2iKh35evu+5RSyii6JiJKQray3G1kEkIIKbiglFKNUko5kVwWSihCCAAYHNhVrGLMQLeuNu97CmNvzM4IhFIqiVJKKkqURhljSmNSZ8TUNSwJlaZhGGmqhMg0jRal0jTt+OTM8xw/cBljtmtacyPLEqF4t9+kVLeo7ZuB23Axk8RGsW6jX3frGByfDk5hoPTmzRu33YuiqEjWqzBpt09gsdVstDRNc23nzZs3FxcXUkqDabyM7h8eXvz0RZbzMkqTlM/mm6dPf6LrtfPzz/7jf/x/wg0pCm6a9eWCHB6e5+nEd08vr/84HN3863/9r//2//27NI0ajWCzWf7iF//Tcrn84x+/Nk0TC7fvbh9OT0//2f/wP/6f/+H/3oQrz/Nsx2x3mo5jnz8+e3h4MC19Mh09f/HJr3/9a8Mw6o3g/uHW853lctpoNOJ0ZNp5p+fEqa1I1u32nFj7Z//sZ5eXl9gV4Thev98PguCHb38Yj8effvrp7e3tfD4/PDy8vRk9efIkz6Tr1A/6x8PhkChD1xzfa87nc8t1er1eccUn81m73QZi2Wq1yrIM15vlfNFvd7KT0ySKV7P5oNu7ebh3a8EqCm3fe/32CkdzmqYHwcHJ+SPLcx3X1W3r8uZ6PB4vNuvBwdHt7W1ZiE6753u1rOCbKHn69GmSps1WW1BW7/byPJe6YXj+aac7HA7vRsOiKBaLxTrcSKJQPaJu1zSNKaJTZmq6bZimac7ml5Zech4nIfXsrpCyzLK6b1MpeMCKgjiObts+ISQJRw/5QuVespK6ro2miyjeaBrlqSBcdZsN13MYI0WRvdcTvldGSokyd39KQfesCj+oQquEtn9n+j49bf8L9f7aw+pHbO9lVJFeJerqR9XL0N7fpbGfVPefC4+gaRr2EKLWNQwLXU0cxwcHvSxPtN2ChIeHO6VUu90WYrso7u7ubj6fQ/NWlmWn0wEHEt2CZVmDweCzzz67vrtJkkxK4vs1TdOIkEopypjreLZtHwyODMNar9fKskzHffLs+Xq9Pjw8pjRnjCVxBl+J29vbp0+f+l4tiqKi4CjI0yxJ0hin+Hw+Pzo6Wq1Wd3d3jx8/uby8fPLkyaNHjzqdznfffXd/N9R1/c2bN2i3lssl1Iz4q0Evhgk0pq/41IBG9Pt9xhg8MxeLBdjtjx49+v3vf39/f48ar3o3Hj9+3Gg0xuPxaDQCmFmVu0hNQDLhVaHrOp4OboKMsYeHh88//xyvStd1rOZ9eHgYj8foxDabjb9bz0gprdoEYNHghQkh4FUD8ucf//jHR48eWZb1y1/+siiKVqslpfz6668/+eQTYDlgaGRZNhgMgA/h+9AlwmijXq+LkmNVliy3lA+UVM+ffwrIlBCi63oYhvi4PS+YTGY3N/dKkV6vDfg0TXNGnCjagO/JebFahWVZ1uv14XAY1Hxd17NsN6L4IAj3o6jCIZGIYLf6QfjtR291xbM9Pf5+MOzfuQqe/X/fv9uHs0dF2f5zsY/CuGoOqzgXuxvnnCtdiIKQjBBiGBqjmiACnI8sywxTw5wRHGsppRRMSbpZR1eX15PJxHXdTqfjed6/+Bf/YjgcrtdroqTGWBylD/ejPM9zWaRZTil1Pd82rSzLiqIwNM2wLKFUvd5I0nQ8mTqWbVnW+fl5PIlXyw1wAkpps9mO4zjLcl3XhVO6rmvbUtd1368pJQ4OequX96enp9PJHIzhly9fnp2dL5fLP/zhD0dHxy9fvuy0e+v1er1eP3nyZDwed/oH6NmwTQXgId5hXddh0wIUfgcJepvNptc7sCzn6uracZwwjO/uHrrd/tu3b4+OjuI4ffTocbO5hi0NlZxS+vDwMBgMANIWRfHw8IA+EMxJQJf9fr9er89Xy++++84wjPF4fHZ2tl6vj4+PUaweHR1JKV+/fg34VCkVRdFqter1erAtw+tHfdTr9Q4ODprNJoYEUkoQ2SmlQGtA3MMEC1u1q/oZzGFMRDabDcgxsLqRnKNotM2tHxyVquLicc4nb4bYn4GDAycyCs56vX50JBhj/X4fMtfNZjMdrymlYRhiS2xR5IoIz3POz88bjZrtWGka/4ioFze526akdtgjmlT2vgXbxwHw0Y8+TIDk/Zt6fyz5rmrdaymVUrB1opRKKqsuWe75cXxwHLCdpxuyN/AeTdOoCeZkXpalaeoVD/jhYRQEHqFyNBoLwXXdlFKORqNwU2Bv3nK5Ngzr5OTs/Pzcdd2XL19vNhuxE3THcRqG8WKx6J0c5aVSShoZL/LtkVkL7LuHyXqxDIIA1vS+KylNO92s0+nd3Nx8/fWfTk5OTNM0jEgIifYDJ3FRFIZhFEUShuvZbEIUOzk+u7y8nM1mL168mE7nd3d3R0dHWZaDqXz+6KLb7f72t79N0/T+/j5KM9u2W60WRqCAfB3HQT5J0xRYa61Wa7VarVaLUrVcLh3HGY/Hy+Xy8ePHaZoOh8Pj42NCCFw8oC2CfIxoOhzHB4MB8Eyc1MvlUt85iwF1NAyj2+3qun59fY3dwEdHR9DBgGwNdBpMSwCBuq4zZmiaBlkZ0hSG8tXyerClwQ2IoqjX6wFah8B/Pp8LIcDGVruJOXjVUBULIaDPIITYtq1RyjmPokhYHF9QqTRNA6mDEOK6frPZhABFCJEkWZrmUZTM50vf9weDI9xZKVoUvCi4rrNer6frdDIZapp2eHRAKc2yBFemrhmu6+v7AAluCDMUDFUaqbLKvmyPvF927sdnFRhVEqPve+xXd64eYYsg7U3w9x9N7qAXqlPMdvCeit0S0g8OBcQGngVBiKkDNzLUP4QQXTctyxSSa5pByNZHvCiKsixw6SyXy+kkAiul0Wg1m83T0zPLcjab6OXL14PB4PBwEMfxZDLhXPq+X683ozgNoyRN09lsgbQTBIEQ6ocfXt7d3MKpodtum7ZTFMU6jOxSZ9So1xu+HyRJslqtCSFgSyF5Z1mqaSxNk5JnRZli8fJgMBiPx0Ko4+Pjm5ubo6Njzvnf//3ff/GTn37//feDweD4+Pj+fnh0dPL9q5cHB/jgs/l8jkEiIWQ43J7osPfu9XrdbrfRaDw83KVpqpRChQnC12azubq6ajQat7e39XodyYoxNp/PfcdttVpwFgMh3nEcVBa46bp+cnICet319bXv+ycnJ9Pp9Oc///nbt2+/+uqrLMsODg4WiwVKfaVUURTT6RSzkEfnj2BAOhwOkyRtNhvNZrPb7YLqgLEH4BDkydlsBgtM1JbQvy8Wi263C6kkCn78yVBOr1YrcN+FEI1aDWgnL8qiKKIo0inDUhqcXJodBLWaECKPIiEkZRrT9KLkQeCbli0ViTbhYrkC9SrLMqr09WaRZrFuMMex2+22bZs4elB6UEp1bc/djOyVhXBWpJSCvVrFD/Lh/hVP90xE2UfOa1s54ftuolXcVk/HdoRvuWd8Wv06Ie/hsZj+KchYsIN893RVGYwivsrkqCiEELnU4NeANQCGoeV5zjSq6yzPM0II3prJZILTsdnuQPOCqaui7Pb+4f7+/umz52DrJlluWLbjeYZlx2k2WazBUUziGEms3SwFJ3kp80JEcdZ1g3qzk+e5UGw6X91ev/7iiy++/OJnQpR5NiSEuJ6NIieOI8uykiRqt5uaVnMcCz4rX3/99RdffGFZzuXlZa1Wq9Xqs9ns9vaWl9KyrCzL8rwkhJydndVqtQspCCGgYqH9gw/feDxGQQtaAiGk8sYsigLJQSn1m9/8BoM+KSWiC1wT4Jyz2Sx0Xdtz11F4dXONqToMrW3bprqW87Ioinqr6fieJR0cbZiqwTYC6Q5L2tBjdzod5BmkO13X4YVBCHFdB+a8GNxjBcV6vQagHQRBEATD4ZDueELAq1Eu4joHQwOnDxIsShV8Z71eY1hvGIbpGtvnpQRTRxBuJsNxHKdJkqxWK7iSel7gecHx8TGkLZpWSkmKgqdpmiSJZZh3dzemafZ6HU3ThCgtK8DT4TopiuKdoHj/WkfGQwrGT6uwqQZ0VetVpcHqd8lusKGUAk/74zjcj1X6/o3tbcWgO1gVH4xS22ExBoDV3aqCGeHH9gh71Y/gXxA49VazU6v7ux0MinNuMD3LCiEEY0RImec559uRzOnpaTWuhJsgLtYwDK+vr4uiqLT5YRje3t4mQuV5HkUJVvMFvgtvON+v9QaHtm03m23dsC6vrh3HSdJ8NlvEcRoE9eUyMQyrLPPAr0u5bDYbm81G11mWMdPSizIryiyMllGUcC6//fZ7vJM3N7dHR0cvX74scn5+fj6bzZ49e6Hr+tdff/306dOrq6s/+/mfAx2BIhmwBJrA2WzW7XZXqxVAdlTX9Xq93x8sl2vX9U3TnkwmcZzatlur1ZbLtW27nEuMCjabTavV0XWGQR/Ga47jAFdUSsF/5P7+Ho1co9EYDAb/8A//ALwEHSbAHlhoA2vB+I4xBifbyiQSgQ1b/jiOoYTG1QvcH4c4YwysQ9ScOHcajQZqXcijq9k69ltRStEWWpbFiwKUzlajicISlA8ACmma2pbrezVG9fUqLHJu6BI4X5HzshCM6rblCh4nWcZLaZnOQb/NeXF8fHx2djadTgkhvV5PKQU+g6YZWZa96wk/uGHgCDCtovBUmGTV++3DoWxvTZrYmawR8iNL7ck/AdLsp+L9tIkEhTtru2lEdf+9BnI7VK2IDghCsnNcZ4wFfuC6LlFss9kkSaKUYIz4vp9mSbfbNgxtPp/ned5sNoWorVZL23azLNtsInhDYQh7eHg8nc7AgW63u6ZpL5frh4eH+/uh3WpKJYnGYOkHVDCJYi4EnJdMx4aVxtHRkWmaF4+fZGk+HA7DMHRdO0kylCio5TzPUUToul6W+WazEqKMo/Lp06f/6T/9p6Ojo08+efbLX/5yMBgsF+snT55Mp1Pbds/OzoQQnU7v/n6ICQqiCxplAIkAaWDVA55KRUi6ubmBI3273ca2pru7O0JIlmWwloIrJFLrkydP1utlURR4h5GmhsMhSsTz83NMvdFcgVuLqMiy7MWLF8jb8N3BpY8PFIxcYCeW51awGWaA+m6zJ5b+Yh0qIgRJDyU3RrWYeEGSbxgGki02i0HQAONguLA6jsMcB50LWFmu61q6gak1MOosy+qtpuU6buCXZWk6NgwN0iKnlJqOLSnJ16tC8Have3p6moSLI3L4/MWzWq0Gyr5tm8PhOIqiNM23xAD2Tyh0q7lq9Zcg8KrSFLfq4q5iUu7WZf9oq7Zf+lYISjV8V7t9GBVBZ7++rQbxamc3UCVkRF1VGFdKQmNHWEPZY5omY3rFzzYMo1Zr+r5r26ams7u7u9lsYpo63EQsy3r06HyxXEKryjRNRzI1zZLz6WxWlmXJ+ZvLS1wBmqa1O52UKGAMvXYHezl1XYcIUEnZ7XYtyxoNR0dHRzjLeZE+PDycnB6jPoEUkGnEcWy4d2n6tlPFNFzXbaXUJ598outGkiTPnj0zdGswGPR6vWZzuw5+NJr4vo+MPR6PNU07Pj7mnB8eHqLRHY/HFxcXEA21Wq0gCO7u7hhjZ2dnYRh+++23X3311a9+9Stwu9FxgVr5i1/84r/8l/+CGq/dbj99+vQffvX3P7x6KYQ4OBzc3t4u16v+4GA4HD5+/Hi+XHz7/XcwtJdE3dzccCkqwAxLCBuNxtdffw1COUrlKIrg6YLrYbVaYWZQXWOO4ziO0+12R6MR0jigF+BMkF/gbrDrLori4OAABHfDMDDJABseFF/M8S3LWi6XkvPqaux0OrZtx5sQ8NXBwYHneS+vr4HxgIQAnhC2hYL2be6WTzw8PHiel4QL7DyE3x+OOayHkZLgk9Wrsm0fayG7RZxqbz1TFXXVpU/3tE4fcFCrUlDX31lC7d8qygvbGT0h0ioLtu2P6DYIcfjt/6LaOW1VQV7FIdmtcEPxjKkreuU0Te3dqleiWJIknBeGoUFU1u93fd9XSiyXy9l8slgsmOGnWbwFZhWXigpZckGYRhzDUkqlaQJALwgCz3eSTagRZdmWptGiyJSUghdECccGPT8URV4UmWVZjmMVRZGXSiohhLAsQ9cZITLPc6m4aRqEENe1a0EDiqeiKG3btq26aVr1egMDZSWpruvn5+dxnMKJdDKZIUv4vg/8EHDFeDx2XReNVr/fv7+/f/LkCdIFpdR1Xfg49XuDNMk77V6R80a9dXf7UBYCUn3H9r75+ttGvRVFUeDXKdHubh8ANn7//feAW3BSwEgbgQSSDQR7nHOdaVtfiSjK8/zy8tLzvDzPgyDYLzvhkorsJ3d0PHNnKC6lHA6HcGEDlQekeSklDPhQtgghcH/gLmzPnQxtBQorPEKe55vNRgkBRBqyj8ViUfeDFy9edDqd6XT6/fffp1JCuQJADj4jjuM8f/4cPCdMPtCkbDYbKqRju2EYhpvIHwRxHE8ms1arZZqmECUyjY46hO7tY8INLWyVFfczpNqDOvfjs4rJ/TaSvg+T7j8O28mXqiJWSkn2EiBjTGdbK0Q0gWoPVsXHY+jvrdPY/wJ1PNktkMHfqBlarVYLgiDP09lsRpk6PDzsdvtRtGk2m7quj8fD29trSPXare5wugR+QCkFXoLXg4sGY1/MiMCnSXgOL1NdI4LnmqbZlm5bupIlkazI4jTeaJT5Xt2xDcHzdrtZlnkUbRgjnHPTNKXihmE4jpskcRjGyOqr5cZxnIP+sWW5o9EI3iT1WhOoBucSAzHOpWVZ4/H45PjUMu0oipIixmu+uLiADljTtE8//fSHH36QUqLAhr5OKfXzn//8P/z7vwmC4Pr6+uTkZDabHRwcYFMSfnE4HGLzKex97+/vj04PwjBEhun3+5jZ4AM9ODjAe4L3DR+H57iAQFEn393dffHFF1jHjSUTuHzb7Taq0DLcgBzLGAPfAL9br9fR5UZRBLkTKjKgKZD8wamNcw6QtkIT0MVBSwEjRry8oigYIYwxz/NmsxmMZ0VRXl9fj0YjUAKn0ZrLMk4jRZVuaqUo1uGKUvry9Q+LxeLg4IBznpf505MnMKFsB+2rq2vP8zwvuLt7aLVaoAdjc6iUklLyDkKsqj7y0U3uqW/3I6qKq/1Wbf/R9mLwwyCs/pftyTXkznimyrraLgirmlnuiarU+zPG/adA74H7VHItTdMcxyWE5Hme5yWl1DQsnIWmaUqhcpEzxgaDI0pVHMej0aikWiFKYAA5L1Spcl6UZen7fpqlWZlLqryaDxXMcrM6OOxiKJfGCXzB6nXfcxwlcs/yAPB4ttPtNDRN40XCGNE0WpSJpmlZlqVZjEQNvB4qONM04zilVMuygjEjiiLH9uq1JhSx0+ncNNNut0uplqbpxcXFv/+//sPp6enDwwNyICEExAAU+dPpFPzm2WzW6XRev36dpulXX3318uXLn/3sZ9hDdnt7OxgMLi8vUaRh9LxYLD7//HNM4WazGei1myhah2G7210ul5999tnvfve7TqeTJEkpxJNaTQjBpVyu10VRCKUYY8vlErAKOrfDw0O4GIK9SfaafDB70Inx3RK4CqTAnQG9oItG1gUj6uTkBCxZpRREjNVcChAdzvTqmK44DLphQFjo2g6K4SLNMJxEVeV4VpIkcRzqus55IUSZprEQYrXa9Pvds7OT9XpNCGm3m2VZClESwqIo0TTDcTzDsGzb9TwvjmNwoTWN6jgP9svRqiLVdosH99OO2iN279/UbnXZfqzuEto7ff0HcYi7VUNIPAXbC6r9+Kx4HkwjVeZUO6Lcx48ssfltZyXEOQdIIIRYrdaaxjzPA2oSx/E334xqtdrZ2Umr3aSULhaz7Uei63GWCQGwDuXi9lxaLGa+73e77aIopORZlpimfnDQcx3DdUzDYJEs8jQqsrjI4jmlWZa16g1d1xkRTFN5FqORKHgphPB9H0pzsEySJLm9vYVSybIspYjjuFlaXl3eCCHKsgRhoCiKxWIVx3G3203THOif4BISHpRYEEys1+vf//73Uspnz55Np9M//elP8GI7PT29urqSUvZ6vT/84Q9//dd/Xa83wjAMgtpyuQqC2suXr0zTyrK80+k+PDw0Gs3FYkEpOzgYcC7a7c5o/sAYg6k7shN8lnADalJuV6b4EONiv5Jt22/evHn69Onr16/xV4MPTSlF8GCwCcEbgAnozvjOhx+5AT0hCmBkNnz6i8VivV6juEWWQ39eYaoIPLzCrRs3pYhMPN3Wlp9S2J/iTHS1gItSCJEX2Wq1WiwXSik/8A1T/+LLnxiGMRoPXdedziaz2SwIgvls9eyTT29urocP45/97GeT6ehv/uY/npwcpWlKqaKU6oam890u4v3g2W8FP7iy99PX/lhivw/c7y1xl4/jtoq6Kly3ma36YrcTuzoXEW8aofs6pqps3u8VKaUVQRQHKt2hpgCpHcfRNLbZRFJyTaemaaNdnE75cDicTEaaprVajU6ns7m/1TRq21ajUUdvqfa8DxDe1ahNCFHka1GmjHDLoLXAwTdFWWpUGCY1NGqZZuDbVAleprqmJGGe55imaVkGIbamab7vr9cb7EKIo1RoSgrSaraB7DHGCSFY/cMYm80WSZJMp/Plcn1/f1+vNW5v7uI4/u1vf1u5qkkpMXBP0xTerZBKdLvdy8tLmI7e3t4eHBy8evXq6PC0Xq/f3d1Bjlh13d1uN4qi4XAI1OHTTz/F2gnginCnHw6Hg8FAKeV5HkibWFmFwg+DuLrnNxqNXq8npYQrB8brKEaAl8IgGPQxw7ExpvM8D9TfsixBJ8IRg5as3W4TQvCeYFwppbQsCzxVDAP3QT6xkwgi9tATmaZJdzjCcrmEV28SRuCyttvto6OjSMZZlsAhBqsXESxxXGRZsl7nWZa0280sS1arxdHRIDC6lLI4zjgvZrM5ti9j3CpEKZXgvNCrUrOKjf2Scr/LYjsK2C66WDUZp7uWshoP0t0KKOxV2o9Dtev65O5W9Zn7ob6NaiU/yNJq74by9YOKVO3kF/tfsz32D3qDLEvX6zUhstvttlotXWdYcmJaxtOnTz3PWywW9/cPzVbdtHQwv+D9WhQFZQqe0GEY2natVqtRSqF/PRz0MBqp1zyn33EtG8+ra5rneWVZapShjISt7XSxwI4h09Q1TfM8Wq830zQ9OztjVC/ykWnaUsKOhwVBrdWqgRht23at1iCE4MxOkuTu7i5uJLBCiqKk0+mAbw0/MiBbr1696vf7x8fH4BALIU5PTzVN+/bbbx8/fnx6evr61dXJycloNOp2u5PJBLp1nDIQKDYajXq9Dihhs9ngcgfoP51Oge4sFgsArQB1N5sN+iVCyOvXr1ut1mq1siyr1Woxxo6Pjw3DuLq6QrQj9hD8mqYhSeD4wyxRSokDAkGodoRKCDJ0XUdaVkrBOA85E3g4alqMMQnZgpOgByL7Sc4p7BiZttls1ut1meUYGBZFMZ/P7Y5LGFWUaIberNWwgCQMwxeffSqEIIxePH3S6/UmkwnTtYKX3UH/V7/6VRyl9UZwfX3t+95PPv+y5HmjQUteFEUhRKmrvSZwP7lVViIfRKDck+3t94E4gcT71r2apknJP+4G9zPtByXlfqRJKSnZPqn2fhn8QVjul69qx06uCqFqyJnnucHMyWSilHJdeGyZnPO3b99KyZvNervdlkpMJhOUT48fPxaGBPcCcjgkZCjoGGNYU053JCld1y3LmM3WWKnVbjZZs2nbtqGzfr/HCF0ul5pGHceiSmlUBUEgKfU8z3EsZGNeSinJbDYbDA6VpIZhtVqtNM2jKJpN52XBV6sZDAsty1ou16vVqtlsUqoVRQEkxvO8yWTiuj6qvk6ng9IUVOl6vV6r1ZbL5U9+8pObm5uzs7PVaoXtOghXaO1fvHjBGPvzP//zt2/fws+bEALwSQhxcnIyHo/hcQZ+DMpLLFGB5ngymUDdZ9v2er0ej8fYREC46Ha7eK9OTk4g2AUbDgwkDKWra6bYbelCqKOVgk9hdZ6iV6w+esQ29O+YBoOOj6MEJylGi+ghAfYg0kRZSikNw1gvV2C9ebaDswAfcU3W6vX68fEh4C7Hsdrt5uHhwcXFxT/8wz/EcdhsNpUShEjXtYUor69vs6woioJR/fDwiBCZpqlusG63W5R5nqecFzoj78AVolS1kpIxRhQhUqndltzthIARxoihM01jlJKdA7LEGSaFYJQwDcinFFwwppSs0uzWk5BSvchzzrkopVKEMqoIQfwKUe7uLCmlbHcQGKYuJaRVOqWalIpSzbIMy9yipsjA1UvFdVNhsFXQztVCM5iu66lJJRdaqlFFiFKddotoWpJnBtN8r24aBo7M2+HIcWxDmb4VNBq1m5u3i8nE85xwOTdNkxIpi8IwDCaUo9u1Vi1M1GK5iSKv223rVmuxTDebKef8aCEoVWkad9ttL+Cz2SwMw4uL8/WsEJlp2y0iCSFEqnK1Xraa/X7v6OWb10meTRdzTdc55ylPf/PH3xz3HjebTV4aSZxYVhmGcRynus4opVmeUkrjWClF1+sw8Os31w/CzAkhPI5qtdomDLE4xTTN2XjyyflFFEUqLx3NiONsdvuA6mtYlFLKFy9eOKbVbjTLLBdCUKk0QmXJN8vV6P7h7Oxstlq/ffv24snjMisF5SUvDMls3aKCdJutgpc13725ueFKJkl8MOj+8PLber3++PTpfLECNfzw9Ozq6mqxCYlp6q4rDH2RxIs44qbBTDNK00WWbjZZu9VKBVmPp4zIZrfNebFYz4LA8wOPMb0sSyXKOAw5557j8FKu16HneZLQyWxONaZpmm7bm8WCJKlhWAYzsjxdTGdpnDBKbc2hnOlUI4LkccYY0zQ9S1JCiG2bcRzXW/X1ZmV5ZpSFTmAfN05N0zRNXZmiLAuRlkUeG6Y2vrqxOLOFxtf5fJktZ0uSsEW2SuwsTRbPPnlEKT0adF5fXWJJa5rxu7vher02bOs9G/wPUg39CDVVSmm69kHKIu/jqx/c5HvqPlkVjR/fk+4ATLKfgfe+L/eIcrhpO2M5+b4/AH1/Vdv+ayt4YRoGChK05kQqqsibN28Gg8HR4NC1bJD62Y45lWVZksRhtGaMRNFGKkEIwQ7XsizzvIiiaLMOy1JQSq/v5rA5gTGmKHPPc9vt9uvXr13X1nVGlZJSYCqdJFEey16v5/n+ZrOZzWZ5nhNGCSGz2YxSCvQlC0Nt56mz2Wy4KBaLhRClplEuSs9zOJeO43ieo+umFCrLCkIk0wjnfJ2sALe6rqvT7e4NSilXPNqEYRhmaco51y1TpyxNU6y2XywWIGSjkNN1/U9/+hMMPuDF5HkeiuowDFer1fPnz9EXzefz0WhUFNlP/+yrzWal6zolClgoHg0FC+d8Op0KotbrNYhEURTphYGGsBRC7LyYXMdJkiSM1qZptJt113XDsExTURQbnWmdTs+2bZ1qQqg4jrMsq9eabGe2r5Siu4uz1WrVg1qz3hBFORmP5/O5qRuNej3KC0IIVsphz4phGAAlgcNVHanc0ULyPF+vl3mRCsF1ndm2bVr67e1tkXPGGCGsLDlKMCHE/f09hkCe511eXo7Go/V6XavVNF0H0qPiSK8u2Y+jaL9Pq26apn8cXeTHVLy7R94vGj8Ecj74dUJI9fjboPqon9wHfiilSr3jzexHb1UY/+gNVSVThDFmaLppGKfHx41Gox7UiNgqXNHWu0wry5xSWpZFlke9Xq9/0PN9dz6fw1xos4miMM6yjHMZx3Gz2Tw9PY3j+Pr6Stf1i/OzIPDjOA58F4WoZRiEkHa73el0kiRq1dxGo0EYXa7KNIuLogBFcj6faqbhunaWJZtwBT69ptMo3mR5VJa5rjMuSl1nvu+GYShE6bq279eEQENueJ6nFA2oAyJ+u9EsyxJiBc/zDEODwQTwCeibZuMJDAthG0F3usosy+r1Okhb0CL88MMP6/X69PQ0Lyz4poEvJimZz+eWZaBN5ZxrpkHIdgkPBjloW2azWc7LrQu9xsqy5HIbeJQxfbevU0k9iqKSSwAn6HEcRzMMjRACyC0XinOJk/FwcFwIjh5V13XdNHCsO45T5sVisSBCSik9z9Moy7KMUJZlWavV8jwP86Fms1mWOfg0OGcJISAhQv+dJEkYrtMsJkRBDacbrCxLx/YsyypLAfQI12ezWU/TdDR66Pf76FaiaJNlie1aeZEWZSYJeaeikHvaPPpjZOvqKlfVhqb3udcfRCP5qHOrMuGPRu8O5Hx/pr/7AueKlFLf80fEUfcBZrOfqPcjEzfH8ZSQZSEIEZZu2Jbte55j2Y16Swo5ny+JkIwxx/ZM06REwwXturYQXEtou93EygF4eOMlwZU4z0shRK1Zm8/n0KQ/efLk5GiQpsnDw8PpydFms2o2641aLU0TsPLv729FTlar1SYKF4uFEMLzPKZpgM5BQFU7U0ac7ppmMEZqdQ8+vJZldLqtkueGodfrQbvV4Vyapm6atu8FRVFIs+k4DlOk3+8XRcHLMssypogeMEkUdPpJHNdqNThDj6fbZbpKKczicNmBsYCxPiZ7Sqmjo6PLqzcwaA2CoN1u2p776aefFkX26tUry9oaLidJjGbBdd3VbOuqzDnXiUKGzNJE13WhJCZD2m6Up2nachHZtu35jmkaZVnmeapptN/v27Ypyh28KYlSFNm+3W7PV0tQW7XdjVLqed48SZWQpqa7rlv3A0boerVq1htYCgScSQgB1CeOY6UEZBaKSJgjcs43m1jtRiZSCil5nudpxo+Ojjw3UErN50sEuev6pmkahgZyDzgPGlGw1apU/PhL33k9qT2zQ7mnJ9q/oNXeHELtaWc/ntTv56UPHqHSN+GLHccCheg/qf2tIrZ60h8Nsx99AdW/uq6XosAnVLXpQojb21vLsmpBUPcDx3E0uh0KB41GnuemaQjBhLQJYbPp/P7hDodFnpe6rteCOiFsPp9HUTKbzf70pz+ZpvlXf/WvPv/882izyrL0k08+MQ1ts1mBdbFabVf8LZfL5XRTlmWUxOBV27bNNA0Tv8V6ZVlWrVbbLj/NMkJIo1lHv+04FtOk73uNRhBFHmh3zVatKErOC00zdEPjgkZh6Fq2tUN316sVCDegpzRqddM0geADAe72DyGJwOZnOBTZtv369WssbPj+++8B8FiWNZ/PgyD46quv7u7uiqJYrVbpZMwYg3oY9bzjeXmelWUZx/FyudTY1qW3LEuLUThE5GWBTAjyJytLzJMQA4ZhGAw7DAspuWW5QRAQInWmGYbluq7BdIxJYQ8HcaOiW6ojHgrvj2PZiotRkuimddDrd9ptzXaw/oBSCm6NpmmrFbSgGSFktVoZpg7YJs/zohCQL2k6LcsiyxLOuSJC7NkimqZZrzVbrY5lWff3t6Zpnp+fg/h+d/0Af7rZbIZ0bQIWr/LeB6lp/ztV37VfuO5f3PvX/X4mrNo2pKvqV7YVKduOK8AoIoRIyX80m+E+lFJEDtkxCgR/xwncZ7Ttv7zq9VBK8zynioCJjbMt5lwJWfeDIAi63a5nO2VZlnmBzifw67oeoyINgsC2zSSJyrJUSt+y4DXD87yyFErRsiyHk9nTp08vLi5OTk5gvwXyVFnmURTd3ZVpHE8m4zzPYeFcUSsxSatgOtu26YZSSn3fN20Lf5RhGI3ANwxjE65KnmkaMQzNMLVa3WeMUaaUErpOLVsnhOg6sW29VA74N3e3t0EQVPF2dXWFckujjDEWJ8l8sciLoh7UFSHM0AVROS8Vo4Iow7YGx0eW60hKxEyto3ATR77vx1l6fDg4Pz9Hoo6izc3dLfDYVqcNnga86vI8x96IVs1HUk3TlGisMghkjIlSglEkd7Y3ZVlSaggh8lwqJT3Hct26bZu6ruV5qtFttlBU4SQFkTCKIs6547mWZUmiIO/gnM9mM40ynbIsy3rtTrvdXq9WScnBnmWMBUGA2nu3zaLcSrQMDZm/KAoiNSkl51wqUu78RXXD2mw2vJR0T91eOa+BhwBHvNF0QghZLBb4TMEl+JFNvep9+UKVx9ie8pB+dCM/dkPe2itK6Xs/2aKldL9sALxRvQz6PjIEEgww5eqVkL1JoNwZNLI9tu7+Lc9zyzAxscBRZJmmqRtYHhSGYbhaZ1lGFcGmWOygTrPYMLRefytTwN4VpEqN6egGcW1JKQ8PD1ut1u3tbRRFrm0qJVerlefaq9VyvSZ3NzeTydhxnMPDQzQJtm0pSgjBDpOMy7Isy3ojIBqBkGIdbhzHwqTLNs1ms0moKIrMD1yAPY5jWpYlpSKU27bjc1dKAtM+O/dbrdZ1koxGI2h2EBjQlUopDcuoNepCiIeHB875YhMppeB1DbMWMJIZY3d3d47jgJSH+rPZbN7d3ZmmuVgsIOFrNpuu615cXMyXC6UEpdSyLNu2MF7HS8LUIcuynJdZloVh6AU+AgaHI9/NnKWUGqOmaeIiIoRYlmUYuuCllNKyTEJIFEVMUV03cW0gfmzbBrM0ThMM2+bz+fX1tUZZs1a3TBO7D5bLZcpFxSVA9wiZhVKqLEs0GsCNQBAnAh6+GWVKKckYMQwDLw8jE8ZYWRZhGCpFXdetNwKpOBeFH7iWbfT73eVyKURpWb6UEhY2Otnr3HD8VHXmfrrbuyn60W0/sD6oJBnb92WqQrBafL/NYFVB+67Zw6Gye0AB26UdAKMq55vdC2B73HH5Y7todq+NUaoRwrIsL9JM13XP8VutNiEsjtMkSm3TdF3XsdADyDhPUBcZhmZbrlRcCGVbTpLGnHPBpdBFlmVRlOR5rjHj/Pz85ubm+vq63W7quj56uIvjSNd12zK63Xa3286SRNc1ZF2lxFTMCCEFL/Xd4vWCl2EYmqbZ7XZt28bmUwzWpJSM0UajLmUpZNnptCglJc8NUzs6HsRxbJmOZdlKqTTJGaO6rsWLzcHBQb1eL9LMcZwsTjCIOzo6wjkCSU4YhqPRKMuyVkeDDblSCqRwpRS0WoZhtFotCGHBWU/T9OjoCL5SUsp6PSiK4rvvvqvVfMKo41igXEdRiHIagcEYg+kLdPd5ntca9eoz0jQNkyV87kmcOo5j2UaSxGEYahr1PEdJURR5zQ9c18+yTJZC103GWDW+r1RskEowxsAp7Xd7zVo93GygQSnLkmk6ukHOueNaUso4jqXcajLozsZB7HbpaNVSRJ0ahg4DTugYqxCQUirJKaWe52V5hM2thBCsKpBSDgYD04YHQr5er/8/mJNilez1C2gAAAAASUVORK5CYII=\n", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "execution_count": 10 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "My5Z6p7pQ3UC" + }, + "source": [ + "### Support new dataset\n", + "\n", + "We have two methods to support a new dataset in MMClassification.\n", + "\n", + "The simplest method is to re-organize the new dataset as the format of a dataset supported officially (like ImageNet). And the other method is to create a new dataset class, and more details are in [the docs](https://mmclassification.readthedocs.io/en/latest/tutorials/new_dataset.html#an-example-of-customized-dataset).\n", + "\n", + "In this tutorial, for convenience, we have re-organized the cats & dogs dataset as the format of ImageNet." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P335gKt9Q5U-" + }, + "source": [ + "Besides image files, it also includes the following files:\n", + "\n", + "1. A class list file, and every line is a class.\n", + " ```\n", + " cats\n", + " dogs\n", + " ```\n", + "2. Training / Validation / Test annotation files. And every line includes an file path and the corresponding label.\n", + "\n", + " ```\n", + " ...\n", + " cats/cat.3769.jpg 0\n", + " cats/cat.882.jpg 0\n", + " ...\n", + " dogs/dog.3881.jpg 1\n", + " dogs/dog.3377.jpg 1\n", + " ...\n", + " ```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BafQ7ijBQ8N_" + }, + "source": [ + "## Train and test model with shell commands\n", + "\n", + "You can use shell commands provided by MMClassification to do the following task:\n", + "\n", + "1. Train a model\n", + "2. Fine-tune a model\n", + "3. Test a model\n", + "4. Inference with a model\n", + "\n", + "The procedure to train and fine-tune a model is almost the same. And we have introduced how to do these tasks with Python API. In the following, we will introduce how to do them with shell commands. More details are in [the docs](https://mmclassification.readthedocs.io/en/latest/getting_started.html)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Aj5cGMihURrZ" + }, + "source": [ + "### Fine-tune a model\n", + "\n", + "The steps to fine-tune a model are as below:\n", + "\n", + "1. Prepare the custom dataset.\n", + "2. Create a new config file of the task.\n", + "3. Start training task by shell commands.\n", + "\n", + "We have finished the first step, and then we will introduce the next two steps.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WBBV3aG79ZH5" + }, + "source": [ + "#### Create a new config file\n", + "\n", + "To reuse the common parts of different config files, we support inheriting multiple base config files. For example, to fine-tune a MobileNetV2 model, the new config file can create the model's basic structure by inheriting `configs/_base_/models/mobilenet_v2_1x.py`.\n", + "\n", + "According to the common practice, we usually split whole configs into four parts: model, dataset, learning rate schedule, and runtime. Configs of each part are saved into one file in the `configs/_base_` folder. \n", + "\n", + "And then, when creating a new config file, we can select some parts to inherit and only override some different configs.\n", + "\n", + "The head of the final config file should look like:\n", + "\n", + "```python\n", + "_base_ = [\n", + " '../_base_/models/mobilenet_v2_1x.py',\n", + " '../_base_/schedules/imagenet_bs256_epochstep.py',\n", + " '../_base_/default_runtime.py'\n", + "]\n", + "```\n", + "\n", + "Here, because the dataset configs are almost brand new, we don't need to inherit any dataset config file.\n", + "\n", + "Of course, you can also create an entire config file without inheritance, like `configs/mnist/lenet5.py`." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_UV3oBhLRG8B" + }, + "source": [ + "After that, we only need to set the part of configs we want to modify, because the inherited configs will be merged to the final configs." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "8QfM4qBeWIQh", + "outputId": "a826f0cf-2633-4a9a-e49b-4be7eca5e3a0" + }, + "source": [ + "%%writefile configs/mobilenet_v2/mobilenet_v2_1x_cats_dogs.py\n", + "_base_ = [\n", + " '../_base_/models/mobilenet_v2_1x.py',\n", + " '../_base_/schedules/imagenet_bs256_epochstep.py',\n", + " '../_base_/default_runtime.py'\n", + "]\n", + "\n", + "# ---- Model configs ----\n", + "# Here we use init_cfg to load pre-trained model.\n", + "# In this way, only the weights of backbone will be loaded.\n", + "# And modify the num_classes to match our dataset.\n", + "\n", + "model = dict(\n", + " backbone=dict(\n", + " init_cfg = dict(\n", + " type='Pretrained', \n", + " checkpoint='https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth', \n", + " prefix='backbone')\n", + " ),\n", + " head=dict(\n", + " num_classes=2,\n", + " topk = (1, )\n", + " ))\n", + "\n", + "# ---- Dataset configs ----\n", + "# We re-organized the dataset as ImageNet format.\n", + "dataset_type = 'ImageNet'\n", + "img_norm_cfg = dict(\n", + " mean=[124.508, 116.050, 106.438],\n", + " std=[58.577, 57.310, 57.437],\n", + " to_rgb=True)\n", + "train_pipeline = [\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='RandomResizedCrop', size=224, backend='pillow'),\n", + " dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),\n", + " dict(type='Normalize', **img_norm_cfg),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='ToTensor', keys=['gt_label']),\n", + " dict(type='Collect', keys=['img', 'gt_label'])\n", + "]\n", + "test_pipeline = [\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='Resize', size=(256, -1), backend='pillow'),\n", + " dict(type='CenterCrop', crop_size=224),\n", + " dict(type='Normalize', **img_norm_cfg),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='Collect', keys=['img'])\n", + "]\n", + "data = dict(\n", + " # Specify the batch size and number of workers in each GPU.\n", + " # Please configure it according to your hardware.\n", + " samples_per_gpu=32,\n", + " workers_per_gpu=2,\n", + " # Specify the training dataset type and path\n", + " train=dict(\n", + " type=dataset_type,\n", + " data_prefix='data/cats_dogs_dataset/training_set/training_set',\n", + " classes='data/cats_dogs_dataset/classes.txt',\n", + " pipeline=train_pipeline),\n", + " # Specify the validation dataset type and path\n", + " val=dict(\n", + " type=dataset_type,\n", + " data_prefix='data/cats_dogs_dataset/val_set/val_set',\n", + " ann_file='data/cats_dogs_dataset/val.txt',\n", + " classes='data/cats_dogs_dataset/classes.txt',\n", + " pipeline=test_pipeline),\n", + " # Specify the test dataset type and path\n", + " test=dict(\n", + " type=dataset_type,\n", + " data_prefix='data/cats_dogs_dataset/test_set/test_set',\n", + " ann_file='data/cats_dogs_dataset/test.txt',\n", + " classes='data/cats_dogs_dataset/classes.txt',\n", + " pipeline=test_pipeline))\n", + "\n", + "# Specify evaluation metric\n", + "evaluation = dict(metric='accuracy', metric_options={'topk': (1, )})\n", + "\n", + "# ---- Schedule configs ----\n", + "# Usually in fine-tuning, we need a smaller learning rate and less training epochs.\n", + "# Specify the learning rate\n", + "optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)\n", + "optimizer_config = dict(grad_clip=None)\n", + "# Set the learning rate scheduler\n", + "lr_config = dict(policy='step', step=1, gamma=0.1)\n", + "runner = dict(type='EpochBasedRunner', max_epochs=2)\n", + "\n", + "# ---- Runtime configs ----\n", + "# Output training log every 10 iterations.\n", + "log_config = dict(interval=10)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Writing configs/mobilenet_v2/mobilenet_v2_1x_cats_dogs.py\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "chLX7bL3RP2F" + }, + "source": [ + "#### Use shell command to start fine-tuning\n", + "\n", + "We use `tools/train.py` to fine-tune a model:\n", + "\n", + "```shell\n", + "python tools/train.py ${CONFIG_FILE} [optional arguments]\n", + "```\n", + "\n", + "And if you want to specify another folder to save log files and checkpoints, use the argument `--work_dir ${YOUR_WORK_DIR}`.\n", + "\n", + "If you want to ensure reproducibility, use the argument `--seed ${SEED}` to set a random seed. And the argument `--deterministic` can enable the deterministic option in cuDNN to further ensure reproducibility, but it may reduce the training speed.\n", + "\n", + "Here we use the `MobileNetV2` model and cats & dogs dataset as an example:\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "gbFGR4SBRUYN", + "outputId": "3412752c-433f-43c5-82a9-3495d1cd797a" + }, + "source": [ + "!python tools/train.py \\\n", + " configs/mobilenet_v2/mobilenet_v2_1x_cats_dogs.py \\\n", + " --work-dir work_dirs/mobilenet_v2_1x_cats_dogs \\\n", + " --seed 0 \\\n", + " --deterministic" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/usr/local/lib/python3.7/dist-packages/mmcv/cnn/bricks/transformer.py:28: UserWarning: Fail to import ``MultiScaleDeformableAttention`` from ``mmcv.ops.multi_scale_deform_attn``, You should install ``mmcv-full`` if you need this module. \n", + " warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from '\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/local/lib/python3.7/dist-packages/yaml/constructor.py:126: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\n", + " if not isinstance(key, collections.Hashable):\n", + "2021-10-21 02:48:20,030 - mmcls - INFO - Environment info:\n", + "------------------------------------------------------------\n", + "sys.platform: linux\n", + "Python: 3.7.12 (default, Sep 10 2021, 00:21:48) [GCC 7.5.0]\n", + "CUDA available: True\n", + "GPU 0: Tesla K80\n", + "CUDA_HOME: /usr/local/cuda\n", + "NVCC: Build cuda_11.1.TC455_06.29190527_0\n", + "GCC: gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0\n", + "PyTorch: 1.9.0+cu111\n", + "PyTorch compiling details: PyTorch built with:\n", + " - GCC 7.3\n", + " - C++ Version: 201402\n", + " - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n", + " - Intel(R) MKL-DNN v2.1.2 (Git Hash 98be7e8afa711dc9b66c8ff3504129cb82013cdb)\n", + " - OpenMP 201511 (a.k.a. OpenMP 4.5)\n", + " - NNPACK is enabled\n", + " - CPU capability usage: AVX2\n", + " - CUDA Runtime 11.1\n", + " - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n", + " - CuDNN 8.0.5\n", + " - Magma 2.5.2\n", + " - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.1, CUDNN_VERSION=8.0.5, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.9.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, \n", + "\n", + "TorchVision: 0.10.0+cu111\n", + "OpenCV: 4.1.2\n", + "MMCV: 1.3.15\n", + "MMCV Compiler: n/a\n", + "MMCV CUDA Compiler: n/a\n", + "MMClassification: 0.16.0+77a3834\n", + "------------------------------------------------------------\n", + "\n", + "2021-10-21 02:48:20,030 - mmcls - INFO - Distributed training: False\n", + "2021-10-21 02:48:20,688 - mmcls - INFO - Config:\n", + "model = dict(\n", + " type='ImageClassifier',\n", + " backbone=dict(\n", + " type='MobileNetV2',\n", + " widen_factor=1.0,\n", + " init_cfg=dict(\n", + " type='Pretrained',\n", + " checkpoint=\n", + " 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth',\n", + " prefix='backbone')),\n", + " neck=dict(type='GlobalAveragePooling'),\n", + " head=dict(\n", + " type='LinearClsHead',\n", + " num_classes=2,\n", + " in_channels=1280,\n", + " loss=dict(type='CrossEntropyLoss', loss_weight=1.0),\n", + " topk=(1, )))\n", + "optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)\n", + "optimizer_config = dict(grad_clip=None)\n", + "lr_config = dict(policy='step', gamma=0.1, step=1)\n", + "runner = dict(type='EpochBasedRunner', max_epochs=2)\n", + "checkpoint_config = dict(interval=1)\n", + "log_config = dict(interval=10, hooks=[dict(type='TextLoggerHook')])\n", + "dist_params = dict(backend='nccl')\n", + "log_level = 'INFO'\n", + "load_from = None\n", + "resume_from = None\n", + "workflow = [('train', 1)]\n", + "dataset_type = 'ImageNet'\n", + "img_norm_cfg = dict(\n", + " mean=[124.508, 116.05, 106.438], std=[58.577, 57.31, 57.437], to_rgb=True)\n", + "train_pipeline = [\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='RandomResizedCrop', size=224, backend='pillow'),\n", + " dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),\n", + " dict(\n", + " type='Normalize',\n", + " mean=[124.508, 116.05, 106.438],\n", + " std=[58.577, 57.31, 57.437],\n", + " to_rgb=True),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='ToTensor', keys=['gt_label']),\n", + " dict(type='Collect', keys=['img', 'gt_label'])\n", + "]\n", + "test_pipeline = [\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='Resize', size=(256, -1), backend='pillow'),\n", + " dict(type='CenterCrop', crop_size=224),\n", + " dict(\n", + " type='Normalize',\n", + " mean=[124.508, 116.05, 106.438],\n", + " std=[58.577, 57.31, 57.437],\n", + " to_rgb=True),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='Collect', keys=['img'])\n", + "]\n", + "data = dict(\n", + " samples_per_gpu=32,\n", + " workers_per_gpu=2,\n", + " train=dict(\n", + " type='ImageNet',\n", + " data_prefix='data/cats_dogs_dataset/training_set/training_set',\n", + " classes='data/cats_dogs_dataset/classes.txt',\n", + " pipeline=[\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='RandomResizedCrop', size=224, backend='pillow'),\n", + " dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),\n", + " dict(\n", + " type='Normalize',\n", + " mean=[124.508, 116.05, 106.438],\n", + " std=[58.577, 57.31, 57.437],\n", + " to_rgb=True),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='ToTensor', keys=['gt_label']),\n", + " dict(type='Collect', keys=['img', 'gt_label'])\n", + " ]),\n", + " val=dict(\n", + " type='ImageNet',\n", + " data_prefix='data/cats_dogs_dataset/val_set/val_set',\n", + " ann_file='data/cats_dogs_dataset/val.txt',\n", + " classes='data/cats_dogs_dataset/classes.txt',\n", + " pipeline=[\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='Resize', size=(256, -1), backend='pillow'),\n", + " dict(type='CenterCrop', crop_size=224),\n", + " dict(\n", + " type='Normalize',\n", + " mean=[124.508, 116.05, 106.438],\n", + " std=[58.577, 57.31, 57.437],\n", + " to_rgb=True),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='Collect', keys=['img'])\n", + " ]),\n", + " test=dict(\n", + " type='ImageNet',\n", + " data_prefix='data/cats_dogs_dataset/test_set/test_set',\n", + " ann_file='data/cats_dogs_dataset/test.txt',\n", + " classes='data/cats_dogs_dataset/classes.txt',\n", + " pipeline=[\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='Resize', size=(256, -1), backend='pillow'),\n", + " dict(type='CenterCrop', crop_size=224),\n", + " dict(\n", + " type='Normalize',\n", + " mean=[124.508, 116.05, 106.438],\n", + " std=[58.577, 57.31, 57.437],\n", + " to_rgb=True),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='Collect', keys=['img'])\n", + " ]))\n", + "evaluation = dict(metric='accuracy', metric_options=dict(topk=(1, )))\n", + "work_dir = 'work_dirs/mobilenet_v2_1x_cats_dogs'\n", + "gpu_ids = range(0, 1)\n", + "\n", + "2021-10-21 02:48:20,689 - mmcls - INFO - Set random seed to 0, deterministic: True\n", + "2021-10-21 02:48:20,854 - mmcls - INFO - initialize MobileNetV2 with init_cfg {'type': 'Pretrained', 'checkpoint': 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth', 'prefix': 'backbone'}\n", + "2021-10-21 02:48:20,855 - mmcv - INFO - load backbone in model from: https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth\n", + "Use load_from_http loader\n", + "Downloading: \"https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth\" to /root/.cache/torch/hub/checkpoints/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth\n", + "100% 13.5M/13.5M [00:01<00:00, 9.54MB/s]\n", + "2021-10-21 02:48:23,564 - mmcls - INFO - initialize LinearClsHead with init_cfg {'type': 'Normal', 'layer': 'Linear', 'std': 0.01}\n", + "2021-10-21 02:48:38,767 - mmcls - INFO - Start running, host: root@992cc7e7be60, work_dir: /content/mmclassification/work_dirs/mobilenet_v2_1x_cats_dogs\n", + "2021-10-21 02:48:38,767 - mmcls - INFO - Hooks will be executed in the following order:\n", + "before_run:\n", + "(VERY_HIGH ) StepLrUpdaterHook \n", + "(NORMAL ) CheckpointHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_train_epoch:\n", + "(VERY_HIGH ) StepLrUpdaterHook \n", + "(LOW ) IterTimerHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_train_iter:\n", + "(VERY_HIGH ) StepLrUpdaterHook \n", + "(LOW ) IterTimerHook \n", + "(LOW ) EvalHook \n", + " -------------------- \n", + "after_train_iter:\n", + "(ABOVE_NORMAL) OptimizerHook \n", + "(NORMAL ) CheckpointHook \n", + "(LOW ) IterTimerHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "after_train_epoch:\n", + "(NORMAL ) CheckpointHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_val_epoch:\n", + "(LOW ) IterTimerHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_val_iter:\n", + "(LOW ) IterTimerHook \n", + " -------------------- \n", + "after_val_iter:\n", + "(LOW ) IterTimerHook \n", + " -------------------- \n", + "after_val_epoch:\n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "2021-10-21 02:48:38,768 - mmcls - INFO - workflow: [('train', 1)], max: 2 epochs\n", + "2021-10-21 02:48:44,261 - mmcls - INFO - Epoch [1][10/201]\tlr: 5.000e-03, eta: 0:03:29, time: 0.533, data_time: 0.257, memory: 1709, loss: 0.3917\n", + "2021-10-21 02:48:46,950 - mmcls - INFO - Epoch [1][20/201]\tlr: 5.000e-03, eta: 0:02:33, time: 0.269, data_time: 0.019, memory: 1709, loss: 0.3508\n", + "2021-10-21 02:48:49,618 - mmcls - INFO - Epoch [1][30/201]\tlr: 5.000e-03, eta: 0:02:12, time: 0.266, data_time: 0.021, memory: 1709, loss: 0.3955\n", + "2021-10-21 02:48:52,271 - mmcls - INFO - Epoch [1][40/201]\tlr: 5.000e-03, eta: 0:02:00, time: 0.266, data_time: 0.018, memory: 1709, loss: 0.2485\n", + "2021-10-21 02:48:54,984 - mmcls - INFO - Epoch [1][50/201]\tlr: 5.000e-03, eta: 0:01:53, time: 0.272, data_time: 0.019, memory: 1709, loss: 0.4196\n", + "2021-10-21 02:48:57,661 - mmcls - INFO - Epoch [1][60/201]\tlr: 5.000e-03, eta: 0:01:46, time: 0.266, data_time: 0.019, memory: 1709, loss: 0.4994\n", + "2021-10-21 02:49:00,341 - mmcls - INFO - Epoch [1][70/201]\tlr: 5.000e-03, eta: 0:01:41, time: 0.268, data_time: 0.018, memory: 1709, loss: 0.4372\n", + "2021-10-21 02:49:03,035 - mmcls - INFO - Epoch [1][80/201]\tlr: 5.000e-03, eta: 0:01:37, time: 0.270, data_time: 0.019, memory: 1709, loss: 0.3179\n", + "2021-10-21 02:49:05,731 - mmcls - INFO - Epoch [1][90/201]\tlr: 5.000e-03, eta: 0:01:32, time: 0.269, data_time: 0.020, memory: 1709, loss: 0.3175\n", + "2021-10-21 02:49:08,404 - mmcls - INFO - Epoch [1][100/201]\tlr: 5.000e-03, eta: 0:01:29, time: 0.268, data_time: 0.019, memory: 1709, loss: 0.3412\n", + "2021-10-21 02:49:11,106 - mmcls - INFO - Epoch [1][110/201]\tlr: 5.000e-03, eta: 0:01:25, time: 0.270, data_time: 0.016, memory: 1709, loss: 0.2985\n", + "2021-10-21 02:49:13,776 - mmcls - INFO - Epoch [1][120/201]\tlr: 5.000e-03, eta: 0:01:21, time: 0.267, data_time: 0.018, memory: 1709, loss: 0.2778\n", + "2021-10-21 02:49:16,478 - mmcls - INFO - Epoch [1][130/201]\tlr: 5.000e-03, eta: 0:01:18, time: 0.270, data_time: 0.021, memory: 1709, loss: 0.2229\n", + "2021-10-21 02:49:19,130 - mmcls - INFO - Epoch [1][140/201]\tlr: 5.000e-03, eta: 0:01:15, time: 0.266, data_time: 0.018, memory: 1709, loss: 0.2318\n", + "2021-10-21 02:49:21,812 - mmcls - INFO - Epoch [1][150/201]\tlr: 5.000e-03, eta: 0:01:12, time: 0.268, data_time: 0.019, memory: 1709, loss: 0.2333\n", + "2021-10-21 02:49:24,514 - mmcls - INFO - Epoch [1][160/201]\tlr: 5.000e-03, eta: 0:01:08, time: 0.270, data_time: 0.017, memory: 1709, loss: 0.2783\n", + "2021-10-21 02:49:27,184 - mmcls - INFO - Epoch [1][170/201]\tlr: 5.000e-03, eta: 0:01:05, time: 0.267, data_time: 0.017, memory: 1709, loss: 0.2132\n", + "2021-10-21 02:49:29,875 - mmcls - INFO - Epoch [1][180/201]\tlr: 5.000e-03, eta: 0:01:02, time: 0.269, data_time: 0.021, memory: 1709, loss: 0.2096\n", + "2021-10-21 02:49:32,546 - mmcls - INFO - Epoch [1][190/201]\tlr: 5.000e-03, eta: 0:00:59, time: 0.267, data_time: 0.019, memory: 1709, loss: 0.1729\n", + "2021-10-21 02:49:35,200 - mmcls - INFO - Epoch [1][200/201]\tlr: 5.000e-03, eta: 0:00:56, time: 0.265, data_time: 0.017, memory: 1709, loss: 0.1969\n", + "2021-10-21 02:49:35,247 - mmcls - INFO - Saving checkpoint at 1 epochs\n", + "[ ] 0/1601, elapsed: 0s, ETA:[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n", + "[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n", + "[>>] 1601/1601, 173.2 task/s, elapsed: 9s, ETA: 0s2021-10-21 02:49:44,587 - mmcls - INFO - Epoch(val) [1][51]\taccuracy_top-1: 95.6277\n", + "2021-10-21 02:49:49,625 - mmcls - INFO - Epoch [2][10/201]\tlr: 5.000e-04, eta: 0:00:55, time: 0.488, data_time: 0.237, memory: 1709, loss: 0.1764\n", + "2021-10-21 02:49:52,305 - mmcls - INFO - Epoch [2][20/201]\tlr: 5.000e-04, eta: 0:00:52, time: 0.270, data_time: 0.018, memory: 1709, loss: 0.1514\n", + "2021-10-21 02:49:55,060 - mmcls - INFO - Epoch [2][30/201]\tlr: 5.000e-04, eta: 0:00:49, time: 0.275, data_time: 0.016, memory: 1709, loss: 0.1395\n", + "2021-10-21 02:49:57,696 - mmcls - INFO - Epoch [2][40/201]\tlr: 5.000e-04, eta: 0:00:46, time: 0.262, data_time: 0.016, memory: 1709, loss: 0.1508\n", + "2021-10-21 02:50:00,430 - mmcls - INFO - Epoch [2][50/201]\tlr: 5.000e-04, eta: 0:00:43, time: 0.273, data_time: 0.018, memory: 1709, loss: 0.1771\n", + "2021-10-21 02:50:03,099 - mmcls - INFO - Epoch [2][60/201]\tlr: 5.000e-04, eta: 0:00:40, time: 0.268, data_time: 0.020, memory: 1709, loss: 0.1438\n", + "2021-10-21 02:50:05,745 - mmcls - INFO - Epoch [2][70/201]\tlr: 5.000e-04, eta: 0:00:37, time: 0.264, data_time: 0.018, memory: 1709, loss: 0.1321\n", + "2021-10-21 02:50:08,385 - mmcls - INFO - Epoch [2][80/201]\tlr: 5.000e-04, eta: 0:00:34, time: 0.264, data_time: 0.020, memory: 1709, loss: 0.1629\n", + "2021-10-21 02:50:11,025 - mmcls - INFO - Epoch [2][90/201]\tlr: 5.000e-04, eta: 0:00:31, time: 0.264, data_time: 0.019, memory: 1709, loss: 0.1574\n", + "2021-10-21 02:50:13,685 - mmcls - INFO - Epoch [2][100/201]\tlr: 5.000e-04, eta: 0:00:28, time: 0.266, data_time: 0.019, memory: 1709, loss: 0.1220\n", + "2021-10-21 02:50:16,329 - mmcls - INFO - Epoch [2][110/201]\tlr: 5.000e-04, eta: 0:00:25, time: 0.264, data_time: 0.021, memory: 1709, loss: 0.2550\n", + "2021-10-21 02:50:19,007 - mmcls - INFO - Epoch [2][120/201]\tlr: 5.000e-04, eta: 0:00:22, time: 0.268, data_time: 0.020, memory: 1709, loss: 0.1528\n", + "2021-10-21 02:50:21,750 - mmcls - INFO - Epoch [2][130/201]\tlr: 5.000e-04, eta: 0:00:20, time: 0.275, data_time: 0.021, memory: 1709, loss: 0.1223\n", + "2021-10-21 02:50:24,392 - mmcls - INFO - Epoch [2][140/201]\tlr: 5.000e-04, eta: 0:00:17, time: 0.264, data_time: 0.017, memory: 1709, loss: 0.1734\n", + "2021-10-21 02:50:27,049 - mmcls - INFO - Epoch [2][150/201]\tlr: 5.000e-04, eta: 0:00:14, time: 0.265, data_time: 0.020, memory: 1709, loss: 0.1527\n", + "2021-10-21 02:50:29,681 - mmcls - INFO - Epoch [2][160/201]\tlr: 5.000e-04, eta: 0:00:11, time: 0.265, data_time: 0.019, memory: 1709, loss: 0.1910\n", + "2021-10-21 02:50:32,318 - mmcls - INFO - Epoch [2][170/201]\tlr: 5.000e-04, eta: 0:00:08, time: 0.262, data_time: 0.017, memory: 1709, loss: 0.1922\n", + "2021-10-21 02:50:34,955 - mmcls - INFO - Epoch [2][180/201]\tlr: 5.000e-04, eta: 0:00:05, time: 0.264, data_time: 0.021, memory: 1709, loss: 0.1760\n", + "2021-10-21 02:50:37,681 - mmcls - INFO - Epoch [2][190/201]\tlr: 5.000e-04, eta: 0:00:03, time: 0.273, data_time: 0.019, memory: 1709, loss: 0.1739\n", + "2021-10-21 02:50:40,408 - mmcls - INFO - Epoch [2][200/201]\tlr: 5.000e-04, eta: 0:00:00, time: 0.272, data_time: 0.018, memory: 1709, loss: 0.1654\n", + "2021-10-21 02:50:40,443 - mmcls - INFO - Saving checkpoint at 2 epochs\n", + "[>>] 1601/1601, 170.9 task/s, elapsed: 9s, ETA: 0s2021-10-21 02:50:49,905 - mmcls - INFO - Epoch(val) [2][51]\taccuracy_top-1: 97.5016\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "m_ZSkwB5Rflb" + }, + "source": [ + "### Test a model\n", + "\n", + "We use `tools/test.py` to test a model:\n", + "\n", + "```\n", + "python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]\n", + "```\n", + "\n", + "Here are some optional arguments:\n", + "\n", + "- `--metrics`: The evaluation metrics. The available choices are defined in the dataset class. Usually, you can specify \"accuracy\" to metric a single-label classification task.\n", + "- `--metric-options`: The extra options passed to metrics. For example, by specifying \"topk=1\", the \"accuracy\" metric will calculate top-1 accuracy.\n", + "\n", + "More details are in the help docs of `tools/test.py`.\n", + "\n", + "Here we still use the `MobileNetV2` model we fine-tuned." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Zd4EM00QRtyc", + "outputId": "8788264f-83df-4419-9748-822c20538aa7" + }, + "source": [ + "!python tools/test.py configs/mobilenet_v2/mobilenet_v2_1x_cats_dogs.py work_dirs/mobilenet_v2_1x_cats_dogs/latest.pth --metrics accuracy --metric-options topk=1" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/usr/local/lib/python3.7/dist-packages/mmcv/cnn/bricks/transformer.py:28: UserWarning: Fail to import ``MultiScaleDeformableAttention`` from ``mmcv.ops.multi_scale_deform_attn``, You should install ``mmcv-full`` if you need this module. \n", + " warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from '\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/local/lib/python3.7/dist-packages/yaml/constructor.py:126: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\n", + " if not isinstance(key, collections.Hashable):\n", + "Use load_from_local loader\n", + "[>>] 2023/2023, 168.4 task/s, elapsed: 12s, ETA: 0s\n", + "accuracy : 97.38\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IwThQkjaRwF7" + }, + "source": [ + "### Inference with a model\n", + "\n", + "Sometimes we want to save the inference results on a dataset, just use the command below.\n", + "\n", + "```shell\n", + "python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}]\n", + "```\n", + "\n", + "Arguments:\n", + "\n", + "- `--out`: The output filename. If not specified, the inference results won't be saved. It supports json, pkl and yml.\n", + "- `--out-items`: What items will be saved. You can choose some of \"class_scores\", \"pred_score\", \"pred_label\" and \"pred_class\", or use \"all\" to select all of them.\n", + "\n", + "These items mean:\n", + "- `class_scores`: The score of every class for each sample.\n", + "- `pred_score`: The score of predict class for each sample.\n", + "- `pred_label`: The label of predict class for each sample. It will read the label string of each class from the model, if the label strings are not saved, it will use ImageNet labels.\n", + "- `pred_class`: The id of predict class for each sample. It's a group of integers. \n", + "- `all`: Save all items above.\n", + "- `none`: Don't save any items above. Because the output file will save the metric besides inference results. If you want to save only metrics, you can use this option to reduce the output file size.\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "6GVKloPHR0Fn", + "outputId": "4f4cd414-1be6-4e17-985f-6449b8a3d9e8" + }, + "source": [ + "!python tools/test.py configs/mobilenet_v2/mobilenet_v2_1x_cats_dogs.py work_dirs/mobilenet_v2_1x_cats_dogs/latest.pth --out results.json --out-items all" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/usr/local/lib/python3.7/dist-packages/mmcv/cnn/bricks/transformer.py:28: UserWarning: Fail to import ``MultiScaleDeformableAttention`` from ``mmcv.ops.multi_scale_deform_attn``, You should install ``mmcv-full`` if you need this module. \n", + " warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from '\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/local/lib/python3.7/dist-packages/yaml/constructor.py:126: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\n", + " if not isinstance(key, collections.Hashable):\n", + "Use load_from_local loader\n", + "[>>] 2023/2023, 170.6 task/s, elapsed: 12s, ETA: 0s\n", + "dumping results to results.json\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "G0NJI1s6e3FD" + }, + "source": [ + "All inference results are saved in the output json file, and you can read it." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 370 + }, + "id": "HJdJeLUafFhX", + "outputId": "7614d546-7c2f-4bfd-ce63-4c2a5228620f" + }, + "source": [ + "import json\n", + "\n", + "with open(\"./results.json\", 'r') as f:\n", + " results = json.load(f)\n", + "\n", + "# Show the inference result of the first image.\n", + "print('class_scores:', results['class_scores'][0])\n", + "print('pred_class:', results['pred_class'][0])\n", + "print('pred_label:', results['pred_label'][0])\n", + "print('pred_score:', results['pred_score'][0])\n", + "Image.open('data/cats_dogs_dataset/training_set/training_set/cats/cat.1.jpg')" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "class_scores: [1.0, 5.184615757547473e-13]\n", + "pred_class: cats\n", + "pred_label: 0\n", + "pred_score: 1.0\n" + ] + }, + { + "output_type": "execute_result", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAASwAAAEYCAIAAABp9FyZAAEAAElEQVR4nJT96ZMkWXIfCKrqe88uv+KOvCuzju7qrmo00A00QZAcoZAznPmwIkP+p/NhZWVnZTAHSBALAmig+u6ursrKMzIyDj/teofqflB3S89qckTWpCTKM8Lc3ezZ0+unP1XF737+KQAgGEQDQCAoIgD04MGDul7f3Nx0fTsalbPZJCa/XC5dOQ4hGGMAoGka55y1tu/7LMu89865zWbz+PHj+Xy+2WxGo1G3rokIAJgZEa3N9L3C4Jwzxngfu64TkbIsq6pqu40xJqXU922MkYiMMSKS56Uh61zubGmtM5QjGkTMDZRl2fd9CKGoSkTx3gthSgkRBCGllFIKnAAAEbGXPM/1q7vOM3OM0ZBLKcWYUkrM/PHH33n58mWe5yklnlW3t7e5M6fHJ7PRaLOY3769zAxNsvz06PDJwwe5ofnV1eNHD2+uru7du3O7aeu6Nha97/7n//l/fvHimQ8dIi4Wi/V6vVk34/H0+voa0Xzy8XdfvXrVtt39+/c//vjjt2/fPn32TZ7nDBKZY4x37997+/btn/zpj9++fftX//E//vCHP1yv1yWS98FaK2i+//3vv3nztutDluWcQMjEwNfXN/cfPHrz5o0IVlVlQvfpp5+mlH72s5+NZ9MY48nZ6Xy5XK1Wn37/e3fu3Hnx6uVvf/vbPoQ7d+6cn5/317ezg8lqteq6phqVV1eXm83q0aNHTdOklFKSlASBrM2yLHM2ny9eiAgB5nleFBWhjZG7kMAUybibTXdxs1yHaMspOtf7aAiMMdZaRERhRHTOOUNd13FKIfQxRokJUay1xpibxTzGKCLWWiJCkizL8jy/vb1JKT169Kiu133fHx8f397etm07nU67rqubdex9nueTycQY5JjeXF4cTmfHx8fG4Pz2drVaTSaTO3furNb1YrHw3mdZllKq6xoRx+NxWZbM7JyLMW42G+dcVVUppc1mlee5916/6/T0tGmaZ8+e3bt3DxHLsgSAvu+dcyIyn8/J8PHxIVFs13Mi/z/+9/9qOrWzcX7n7lEKTQi9QLKICAAg8K2j7/uUEiKKSAih67rEIYRQ94uiKEII0+n06urq5OQkhAAAMUa9iJQSAFRVZa1dr9ciwszGGN36RDal5L1HIGYGABHRaxARXe5vXwrAVoaBmVl2B4CIiE/RGOO9DyGQNYMQhhCICA3q240xiGj0f4jW2rqui6Lquq4sS9/HoiiOjo5fvXo1mUx+/OMf379//2/+5m8ePnz4y9fPfvSjH/3t3/y1M/aPP//8F/+0qKpqeXuTCSBi0zTZeDSbzbquy/P86uqqmB5WVeUyU9dS1/XV1VVMvqqqs7Oz5XK5XC6rajydTlerze3tbUppPB4bY4qi0KUmIu9749xqtdKlOz8/J6I/+7M/CyE450Lb6foj4dOnT7OsCCGEEIu8stYSwsHBASBba5m5qoo337yq6/WdO3em0/Hd+3frun79+uXZ2Vni8PTpVwcH0zx3JydHTdN88+zrsspzgL7vjTHT6bQo87atQ+ibpgkhWGuzLBPBGFIIqWkakM7lGcfEzMzsvUdIPiYfOJ9UQGSMIWsNCxElkARCAqr4AACFRcR7j8LMzCnF6FNKFinLbFEUWZYdnZ40TdN1XQghhOBD1/e9bjYVifV6HUIYjUZElBeubVuBlGWZI6OLEH3o+x4AyrIUkfl8EUI4Pj4moouLCxa01qrCreu6russy0RkPB7Xda17O8syIkopxRiZue97773esu7YsixVjJ1zRDRsY2stS4wxWgPWWmt1JwMiJh9i4pQSS6Rhl8v7h/ceEVVy9r9YHwYijkYjABiNRnoPw14XEbWBjx8/zvNcf6NXP0ia3ttwV2ru9CnuiyUi6tbUW8L3D/3GuDtCivsHMydhERARRgAAIkLEBNLH4FNcbtYhJUHMigKtuV0uNm2zbuou+Murq+9//rnNsuvb2zzPz8/PP//88+l0aoxxzumNq6LRVTo6Ouq67ujoaL1e3717dzwep5SyLBuNRrPZjJlXq9VqtXLO6T7I81y1qd71crns+17dAedc13UoYoy5vLw8OTl5/vz5D37wg/Ozs6PDwzzPmVkfZmbp5cuXKSUU0MtAxLars8xJ4uOTQ/VN7t69+0//9E+67Z49e6ZKJ8Z4cHDw4ZMnP//5z5tNrdvx0f0Hq/miqde31zd92xhCjoEADdJ6ueqaNoVokMo8K/LMEkYfurZGREEQER9DCMHHwMyM0HVt27ad72OMMaXOt13X6T3GGL33IXh1Urz3XddlWWYt7R7rdqsw87Nnz54/f/7ixYu3b99u6lVKyVqbZS7LsizLdFPpNiADRJQ4OOemo/F0Oi2KAoD188fjMQC0bRtDyLJMDd1yuby+vo4xZllmjCGi8Xh8dnZ2584dAPDe13UdY9RtrIrDWqsbTLdTCAERp9Op6ik9Qfe8un4JpO9DCMHaLM9LvVRh8N7HGEUQwZCezcxpdwwibq0djUZlWaoa0C/QbeScCyGMx2MRybJMnQoR6ft+vV5fXV0h4ocffnh6ejoajVSRqIx1Xafr+L6PalWwU0qDJRzEbBBLRCQilVh98U4g3700QoiI+pCstcZYEBSGFDlF7r0XgN57Y20fvMszm7nRZFxUZe/94dFRluf/5e//7uPvfPKTP/9n63pzfHz8/Pnzf/2v//WDBw9+9atf6dqpsiyKIqWkznnTNKenp9PpVNVn0zSbzeby8lK90BDCb37zG2vt2dlZ3/d1XRdFoU+rbdurq6vlcpmEp9Pp0dGRcw4AiqLo2857//vf/q5erfURTMbjsiy7rh3WIYTgMiPCSGJAdspbPnz85OT0qOub2XTcdvXzF98kDin0PnQI7H2XGTo7OyGU2/n1Yn5jCI4OZ02z6ft+uVyq8VksFl3XuMxYR71v62a92azUThZFUZalc86nkFIMKaaUYkqIaDNXlCUQCoIxJitcVhY2z4xzLs8QkZm93+p0IrKWdD/oHTnnNMbRM+/cuXN0dKSO1aCRdds4ZxCxKAqXGR+6EEJd11VejMtK/UlriQCNMS6zR7ODvu/bth6NRqPRqGmaul7nuauqSpcxxjgej+/fv3/37t3pdHp9fb1er9u2TSk553RzWmuLotDtp0ZPhVDV7mCo9F7UzAij97Hvg6AGVsAhMbP3UZIYpNwVpHel4qcirnKoFl9XRKVFhTDP89VqhYjz+Xwymcznc0TUB6OGmJnrum6aRgVYTZBzTkVCpUtvYJCbQa50lQch3JdD3h3f0hr6sIczhRCAEI2xmTHGGAN7QktEYMgVeQKZHh4kEJtnPsWsKNQe5lXJCJfXV6t644ocDLVtu1gs1Dm/vr6u63qz2YiIOkjOOdU7RHRwcPDJJ58Q0ZMnTz755JOyLL/88stvvvkGAL773e/OZjO9AFVn6l8YY5yzIty2zXg8un//3tnZ6cHBjAhR+PDo4JunX2eZ+9//978cFcXLZ8/KLJtOp23b6uecHh2v18uUUgpxvV7H5HNrEofet5PJ+N6dO9Px+M2b13/ywz/65puvr64u//zPf3JyeHB5eVGV+Xqz/Onf/5c//qPPLSGBfP797/3iZ19Yg9aStSTAXdfU9TrGOBqNzs/PsyyLMa5Wq+Vy7n3nMluWeVnmiCiEYBAIhRhIyFrnXDmqqqoqRuVkNj04OJhOp7OD6eHxgbWGCNWl6vs+hF631mJ5u9lsvPcAbK3VHQIAR0dHBwcHk+moKDPdIQCit8/MbdsmDjHGuq5VIZZlqVYhRh99UIMxLisAcM6URWGtTSl436FAmRcff/zxeDxWpVMUxWQySSldXl7e3Nzoe3UXqQEnIjUY+vtB8FRpMrPamGG3ExGA+rEphMBJhJETxMgxcIoCQIho33dHh+BQ+r4vikz3SkoJgMkYRHRZpjAMEZ2cnCyXyyzLxuNxjHEw6HVdLxYL9bJ0rVXwjDEpSQhBRPKsGELBwdskIpb0LXdUzwkhgKAxKaWESAxRBEXEUhRJzFFEEggyi0hiFo0JrUE0REQWt2qCpe07RCRryqpq2lZEYpLFallVo7quRaQoy//rr/7qiy++QKLb29vvfOc7f//3f99s6ocPH8auXc9vl32neuqDDx719abv+7OzMyL66KOPlq2/d+/exZtXb968bttWndLxePxv/s2/+cd//Mem3pycnIzHY2a4vVmUZbl14Jv65OREg73cZcvlEgBIoCzL4L3v+8s3bzil9WpV2swgoUDXdbPZbLFYdFknkDarFRG5zPjer7vm9cXLsiwn01Gql865er3Jsmw+nxPR2fGJc261Wi2Xy6urq+l4TACb1SqFUGU5JHCZTSm1bcucEocYXVHMRqNqs9m0bde2YG2mqJgxxpmMUiINNwQCJ0keAGKSNvVNkN5zx+LFJiRBHFRvSomjD4GMMYaAmSUxAItYA5g46Mbb1Ku2bdu2Vf1rDDkqHRm2FCO1bZ1lGSQGxEk1MiDq76UUuqbt+56IMmvAmKbZHB4cENF8fpNCrKoKcthsNhoX6Is8zxFxtVpdXl4y83Q6HZS7ao0YI2AkA9aRCIfYAwBLBOTRuGyaJnFIHIwxxiKSLas8Nd6iYW77nr2DIfISZiGQJAn4nQnat0tqZwerqLpqeL96I0+ePDk+Pp5Op6PR6IMPPhAR3U+j0aiqqsVicXt7KyJnZ2dFUXjv+77Xc4hoELwhStRYVj/hW+7oPlTzh2Hh8JOR9YUAIKL33qeYkjADoDHkrMmczV2eXd/ekDVN137w5HHne5u5pmur8ajre2MtGVONR19+9fvJbDo7PCiKQt3OqqpGo9Gf//mfP3z4UL0mZn78+PHx8XGM8fT09MWLFymlm5ubzWYzn8/VCy2KYrFYfPHFF8fHx/pPdXjOz8+n0ykR5WUxnk4Wi0XbtqvV6sWLF8wceq+e6t2z83q1PpzOXnzz7PToeDVfKKwXY4g+1HVdliVymk6nHzx+GPo+BC+QMkvPn399c3PVdc3sYHzx5uXJ6WGemf/8N/+xblZHx7PQN5NR8dGTR7/77a+Cbwn59avnTx4/9H3TtQ0Bhr7zfZtnVlK6vb5ZLm4NQe6yIssIxfdtW29C7xHEWLSOjLVElERSSiHGEEJdr9fr9XK5nK/m8/n8dn59e3t7u1wMDpuxuIsPO0U1jNmhgH2j/vxqtQKALMum0+l0Os3zTLXDpl4horWkf82yDFGcM9bavm36tvFdrxGawW1smTnHzE2z8V2v77KOBNLbt2/X67VzTjezWjPd5JPJJMuyrus05NZ4SiN5NbaDX9Z1nbqsek7btgMU5JxzRW7IKXZojHMuR0ZDzhpjAFFkcNNokMB9+6PH4DFaa7uuUxX44MEDNd/OuTt37uhbFJO01up1VFX1ve99786dOxrOqgevTrmKlt6DSvtgAP/wUIR6ODRm0AMM6X/qNuDO+UyAaipZUASZgRlEsKiqdV0XVeVj/Og7n9g8Ozo9sXk2OZjdLhfj2TQri6vbm1W9+dN/9hO05uTk5NWrV9/5znfOz88vLi5++MMfnpyc6FV57w8PD4uiUP/wt7/97cuXL//xH//x9va2KIrZbKaPqmma29vb3/72t4vFYjabVVUFAOPxWCMTl5lqVLRd3fXNerO8ur5EEmMRgcsia5vN4cF0ubh1lgyBcJzP5xqBiPBmtT44nOaFOz06/MlP/jQvXAy9c/b09CT6vl4v1svbm5urosgeP35EBASSO/v0q99H3x/OpkXmzk+OFzfX7Wb9wYP7yffL25sQeiIQSEQwHo+so029WixuQ+jJQF44a01KofdtjB4GvBoSAACKSNI9swvIt3uJmVmiCLddHZMnAxrpEMHOSw9bwCb2KqXOuTzPVZdZa8qyqKoqz3PryFrbdY2i9+r3xRjbtl0ul7rXU0pkMLPbXAincHBwsFotrt9eEdF4UnH0vu0Kl63Xa0Q8OTm5d+/eeDweHDcAUIezaRqVKI3xUkoqhAPerkZF10FxpqZpFNExxhhnsywja3ST6+5VxzAz1lpryWzFTz9XDaC6ixqeWWvH43Ge53pxAKB46Xw+1/iwqqrXr1+/ePFCjaR+AgAcHx//5je/2Ww2RVE45/RD6rper9d93zdNo8pG10ghx6Io9BtVC2RZpiC1Yll6q3onapb1nxpDF0VRVVVZVU3TZFmWlZVzzthMAej5fEHGhpgOj45Diud375zdOe98/3/8X//ng0cPL6/edn0vAOWoCilOZtO79++9fP1qNBk/evzBZ599Nh6Pf/Ob30wmkxcvXnzzzTfq1Olz+sUvfqH48KtXr87Pz3//+99/9NFHZVl++umnZ2dnuhSImOf5V199dXx8/Mtf/hIATk5OVqvVJ598olFQ3/ez2Wy9XnddN5lM1DXS9wLAcrlURHS5XFprD2cH6+UKRRWT2azWfdt99fWX8+ubTz75KMssp74s86LIvv76933fssS2q1+9fhGTn87GL14+G0+q3rdX15fXN2/bzVpiiH3XNzUJHx/M2ratmzUixhgXiwURHR0dKDaY0hb922LxBgZrYMlYS5mxItJ3zaZeT6fTssrzLBuX1fHJ0Z07Z4eHh7l1VVXps3PO5blTxA6AQwjWUVFuN7e1xhgS4aoo+rZt64Zjij6AJGcssBAgsFRF3tZN9CF3WdvU41FlLLEkH3oOERGJIEXfNM3Fxaury7cicnJ6NCrK29vb+Xyu9uPo6EizoLe3t8+ePVPtUJbl8+fPnXP3799X4+acSykVRQbAKQVEEUld1zTNxvuu79s8dyH08/lNCH1KIUY/m02Yeb1eA7CGuCIphP7wcDYZV+PxuMwLYwwNDqe8n6AbPEZFhBQUUo9xOp2GEN68eXNxcaEJrsvLS5UuPV/zM+pfPX369ObmRmGMIRuxb131ngeveECi1B/eoRdOA9+iKPq+L4qirmt9Wl3X9n2fhPvgF8vbLMu64K21LitijHXdANJ0dti2PaLp+/CTn/zk5ORkvV5XVaUZBSIajUbX19eHh4eLxaJpGiIqiuJv//Zv792790d/9EfMfHNzs1gsUkp/+Zd/qeqQiMqyfP36dYzx+Ph4sVhsNpu+71erVQjh6dOnimDleX5wcFCWZdd1mqW4uLjQ/G9d18457/2ACetT2OpXQGCRxJIYWHTbWTIaKRFRkeW5y1KKIXiO6eunv0+hn89v8txNJ2VV5p9+7zsHh9OUYowhxpBSJAACkJRSCNH7FAJw4hii70PfpeCB08F07AhD16bgncFxWYyrYlTmKAklISfgCJwgReSEkjgGjoFDhMQAkFlbuKxwWb1ZJR+MMWWRFVnujEUBBN45WSmloNspy22e58agKmUVv8E7G2DzEPvEYReA6OfwLkoRpG1g0tVN6HqJiZlFEiE658oy36zWVVWdnh1PR+Msy0ajMsts1zUhhNlsNh6P5/P5L3/5y+l0ul6vDw4OFK3RdMAQWCkmR0RqG3Qn617VZ0dEapk0mdQ0jXPGWHSZzXNnLaWUhKOztm3btt40m3W9WtKQlhjQkSESVbOjO7Isy0G01Ka9ePHi9evXCl71fT9cxAByKtz6/PlzTdnroQs9JItUzDTqHdB/VbT6NFTX6mld16niVJtZluVoNGq6thyVWZbNZjMRKUYVACmQBYAhxjwv79+/H0JC6y6vb44ODo8PjzimB/furxbL3GUoIMxnp6dd007Hk+jDqxcv7925+4uf/fzDx0/UNKl38fjx46dPn+olaf7m+vo6hPD48WO1FXme39zcvHnzRm9EobajoyOVHMXfXr9+rVn7V69eGWM630YOAIpJJJFEBJk1qm45eP1PYkBOFoEAUMQ5d3BwMJvNcpdZa0ej6sWL519//dXHHz55ffHq9cWrH/7x5yAxc+QyQwYA2RAYi2SAJYa+TaFHSQRCIChMIJk1ZZ4hCiICSgh9Xa839SqEYC2Nx1VVVdYRAHvfNW29Xq+Xy7mkFL3vu8b3vXAkAINkkKIPwGxAJHHsve86jh5YmCNzHJQsIA/wIzOL8ADux+RZorUWkGPyQ8CieQqVQGYW4P19q/Kw3cJbdQm5y4iorHIDeH19/ebNa9/3usOIqGmau3fv/sM//IPqSn2UxpjDw8MQgtJllOalSXl1d9VfVauufqb6hppn0qxy3/dZZgUZgF1mjaXEIQTPHB0hGQRg4bgVwn2kZIgJY4xd130LUGmaRhHh29vbm5sbvRoF69WHUYd20O6avdAIFXYZc13NQc4HRYCIm81G8564lz9UKUVEBWY1Lq3rWgHihw8fZpl7/Pjx4fGxc246nW7qtu/7LM+Losrz8vT0vKjGs+nhcrH+zW9+o67vvXv3UkqvXr2azWZ1XSuGmef5d7/7Xc2YP3r06OnTp4vFQnXe27dvz87OTk9P27a11qqKQcTb29uDg4O7d++KSFVVRVG8fPny/Px8Pp+rlFZVNZ1O9QllWdb3/cXFRdd1mr1Q/bKfZbFksiwDAEgcYxRmYU4xgojR3AVHg5LlNi+cdZQ5MxqXhHJ9/db7DlJcr5fX129fv37ZdU1RZFlmM2s092AMIqcYve/bGHqWSMAEbFAyS6M829RrlpRl1lrbNM3bt29vb68VnFQIepcqiokjS0IBSZxCTMGLjxxTip6jz52xRBxiW9eL+c1yfts1LSRWy7/NMO0g0BB67733vUqmYjPqPaEkSIwsJGCRLAIkTqFXNS8pShxOYORkLWWZzTNLiIr6pBBFJMstsrRtu1ot6s1Gcwm6/k3TXF9f931/fn6uN6jpQWVfXlxcPHr0SDfAkydPUgptW282q75vEUUZkESwg+gTEThnjEHm2PctOWKOPnTMUdWLD13bbVyGuTWZM84Z2reBg/iptCgSpYGpopcas+nmUxF9/fr17e2tAkqDN6Uepoh47zUS1bSP7NKsKpYauw/uqMZ+aiSVQaIGTe07Ik6n077vJ5PJcrmsqmo+n6uLeHx8vKo3eVmWZbnZbE5OT0XE+1CWozzP27btfDw+Pn748IOTk7OLV68OZzMCCH1fuOzy9cX3v/tpmeVvXr3+3ne+29XNw3v3/+xHP754+eqjx0/+z7/839+8eXP37t2yLH/5y1/e3t5+97vfnc/neZ5r0uz09FRd8fPzc9XB0+l0uVxqQL9arbqu22w29+7dIyIlH45Go4uLCyLSuPpb6m/AyQqXGWMMbQEni2QAM2N1GUMIoetVB6nWK8vy5Pj4//u3//nxk0enx4f/7//X/7Oqiq5rUgocg+4PADYoZMAQeN8l33PwMfQx9MF3vm+7trZkJDEKVGV+eDCdjCoQburNzfXVYn7bbNYpemuwKsrpeDKbTDNrCmty6xwSsEBMEhPHxDFJYkicQuA+BO+TD7zTXMbgAMkwb32iAZBPvHWLBiWle8aYrW3w3ouoalJ2VFRASFFQ986HikqpA2SLFGME4MODg5OTk7IsU4p93y2Xy/F4/Pd///d/8id/ogp3vV4fHh6ORiNjjKbEx+OxhkWz2Uz5cX3fa/6jKArl0zRNo3jMZrNZr9dqS7quMxYBJKXA4q2DIndE0Pdt7zsfuph64R13VB//gMfssgUppcSSiIAIALd025TSeDy21k4mk/V6rRen1qksy4ODA2WNrlarGCNmOHgIuOPEqEwOmY99yVe4YrFYNM1GBVt3YUrddFKuVpuiKEKIjx49IrLe+zos1019fX395s2bpmnmy8VHWVZV1XK1NsYgmtvFyl1cHB+fnp+fA8DbN78rsnyzWndNm1kXen/3/M6ds/Pr6+u2ae6cn//i5z8/P7t7dnL6i5/93JL52c9+dufOnclkotv90aNHX//ut6EzKlH3799/8fTr29vbs5PjLMvatu2bPqX0+9///uOPP95sNnnuXr16dXh4qNB23/cnJ7OLi4vRaKJ2nnbMISAgwMFByLIMAaJzFqlwmbr6lkxZZoMHgbx1zrqmjpyOjo7unt9Rnsfx8eHRwfTNmzdNu2XbEpElEiKDRFlGAsYYQjTGAAsKhN4H8LPZtG3brm8Oq8N79+50XfPm8kKNkjHGbqM1gyTqP1siY50lI1HUxAEZFOPbLoJDkCLLghj2sWNIwYNRdU8iggCIAkAxBjIIrNe5TVYrQKraX3dgSluUVcVywCQHDHbA22XHQ1a7YkBZ/q1qAQBAhMzYLMvmu/S6eqF1XaszCQBv375FxPPz89VqxcwPHjy4vb2dzWa6jXU/a3ilnBtVqcqXUt2R5zlzsg5ALKRABGWZIzuQFPoOLSBHRKFB++qLAZgZMgHqDQ8KW/Njmp0fjUaKT04mk4GHpXmL6XSqkOY2Zef9AMmoXA3wxi7Pg+rgTafT8Xisfp0ybJQ+PgikgoeHh4f379+fTCaj0ej169dEdHFxoV7TYrHQ98YYwRAiXl/fImII4YPHTxDx+vq667r1eq3a9+uvvy7L8vz8/He/+50CZUpMf/v27dHR0fPnzxeLhTHm6OjIe399fX16eppS0rsry1KBzaurq4ODA72A8Xj8q1/96t69ez/84Q+rqmLmZ8+e6bPR3aO+9MHBQYxR3csQ+hi9ujQx+tC3gKzhk+71FH3wXYg9wZb2oKsxLqsss+o7LJfLzz///uXlxYsXz773ve9tF00iICMJS4zRh9CrXySQhKNIMghZZrPMWgLgqMrxnXkB9W4kz12e51nunDOIol7cer1u6ybGaACJiABRwAA65wgQZWsVt+moFHkXCirYrlDC4IKJvMtwqBnIc6ffrn/VN+6CoyGdtkUitgQpgF3A6bfMctmGPGqylsvl7e0NJFbc3lo7n88//vjjL7/8Urfuo0ePbm5u6rpWL0xdQt2lr1+/HtIn+gjirsJDYYI8z/W1ZjJGo1Fdr40xZZkLQggeCVxmicAYdNZkWVbm7r+ZIRygywEgUeHUr8mybLPZxBjX67X3frVaDUkVBQkHVTF4XCrVGsUOf8rzXNOGKjZE9PTp04uLi9VqS9VVudUiKb2qt2/f6jemlBaLxQBsKmxTluXNzY3q0YH+qr7N9c1tlmVv31w+/+aZJVNkue/7tmn+y9/+re/6zWqdQnz14uX/8G//+8l4/PLFiz/70z99+eLFwcGBItQi8uzZs7/5m79R1RO3Kem6LMumaV69ejUajbqu+/DDD8/Ozl6+fDmfzx8/frxcLu/cuaPMJg2PVe8qPNu2rZq+fXhsx2/eHvpdmsno+36z2SinVF0yDd5ijFVVdV33/Pnz8/Pzo6OjL7/8crPZqE4cXKZ3JlREKwbatt1ua9waYV1YY4z3/vLy8uXLl8vlUlXk4M4MWhsAmvXGt12MUdKOSAhgCcejkTNWVU+7qX3bcYiay+77XsOcATMfLNiA1WvegraS/c5BUIB0gFDxD+jEg2cxwA0q8FdXV87aw8PDqqo0A6EKHXcpQe/9YrH4i7/4C2vts2fPLi4u7t27Z629vb1VRsrt7a3m2BSe2HeMrbUDCUF/PyiL9WYJwM455qgR1iBuxqB15JwhMVkQamPsUoiYxAlbjuSpgOqgooK8BDboRRabdr5ummYlEvq+nc0mGpuqsW7bNkWxJntz8fbe3YfrVdPUPYKtu5SVU3JV6yWKZbStT00f123XxXS7Wv/5v/wX1XSSEPoUPafFKgiVqzoKlU0PJhv1EUfT4/HsKAh5Rsoyz/Ly8uLt7Q1YO6tm/+Zf/ZsMs9RHi1lh8na9aTZ1ledXby6OpiNH8cGd48cPzn/9i39Y3l7c//BhE/2ybT2Qq6bgRid3PnpzU883sfZ47/GHXz57Fk367/6Hf/XXf/9Xk9MiXne0kcrOTk4fZrOztbjrjt3ouKyOTmYn9fUybVqHPJ1k8/XbYmp/9et/StyenR39zd/8ddNsfvSjH725eHtyfJZn5aiajMfjEPqiyMbjIsR6dlCaLhYJ8wjY+LhusAuZYIamcnmZ5dPR2DnXtm1iNtb23i831ybj6awwNq3W14vltaR+XGXs20mZZYSxaTAEJ2KZnYgVxMjiI4SkJREi0ocwnk5NniWEgOKB6xRaSdFS4l4gWEshhOVyzQmLfMTJ5NnYmlyYmJXDxGQYKdE485br1NXcBytSuGig4fD87cVNvWlBAoInis5GlwUwI8s5+ALDNMdRBlYCpd5RwuQJIgGjJOaYUlBvxSdfjArKsIstYDKGUvIxdCCJJBpIBsRwgNClrvb1qmtDDGLAVcUkyyqJEtoUfTo5OkXG2MfC5IXJmk2zXqySj8eZbS4v4vzmBx9+8IMPP7h5+c2r3//mbDbC2D7/6jeWosFAGA4Pqq5dFjm2c5hmp7P8rJ1zO+fj0b2HZx9O8+OSJqnB1CB6wy1QsOjN+ro+KU6y4Py8cR6PyknOGNdNJuBAJETugwS2A2VMHVy1IUR0e3ur8WiMsSzLoihU38TYISLAFkce3Nc8zxG2/rEa4sGNfN/f2Lq1CqLc3Ny8evXq/v37P/jBD7744ouvv/7amhIARqNR33eIqKFmCOHg4ECkSUkyl2/LF3xKKV1dXX322We///3v9bSu6+7du/f02YuBF/bJJ588f/ZSWazffPPNarUqyxKA6roGgNlsVlWFiKzX6yyzbduen59+9PHj29vru3fvapS7XC6b6GcnR6enp/Pb67quHz9+3Ly90uWKMcQUWZBRdB1ub2/v3bv3u9/97ptvvlkul8aYuq5VTW6x0B1BbyC+D5CVrs8+WqZRB+8IGQMdAnaFY/qZqvXVYvCW7gsiYmkbTQwxhT4LDcsHaFofunNOJO7jc8O71BPZ2aitdTLGgGEAQAEEQjSIhGCI7EcffXSzap1P0+MiUN4LMJksL5//7p9gR8dXbE+vX9NO+5tEz9EsubFb+5ZSMgadtYPjoC7sYELRGBCWrX8RgBkAGMUYs80qEjKIOmtE1PZdSNFm7ujoqKjK1Wq1rmtF4Bnh5OSERbZMtOBDiqFfbWpnjMkLhwgCse+7ELo8dyGEmDoyGSKyBBZBsogGjLHGQbJExCAAQGQSgzAjRBHZAjODd7vLsbxb/WF/6J/UNVVGGO6x24hIQzje5ihdlmVN02h+dvAThvM15z6dTp8/f350dHR0dNT3fZ7nIEbLL66uLvXzFa/fppUAlElDZBEiIq1WN2/fvh3SNVojOx6Pl8vl4eHR119//fjx46Zpnj9/PhqN3r5969lnWUZkEXpni5RSXdd3794PIfR9cXt7O5tNHj38YLmc3717d7Vaee99DHXoJ0cHZVm+DWE+nz84Poq5HYprYgwCQM4YY8hkSjguiuKnP/3pv/t3/+7ly5dd16l2U19F10oXITdGwzCidxUtvKtiGVZVAa2UEjrUDbTrS+B1M/GuUlbPHNBsZ+ywWYf119M0IlA3VU/Q/Bj8QR2Zao0BAJe9hPCe2LBBQdziS5vN5vXry8v5WrLCk1s2XUI7Gk8fnZ0MW04dRfW9YVfXpqpBP1m5oAMwkbYLYlRiCWSLJsp+ols0fbiVSRHljTNICGn7+YJtUMl3LDiZHliXL5bruFiGEIzLsqK8Xa271h8cHgPAYrUUkbIcgbHzNzd1tymKLK8sInahafyq67p8lPt1E2IkAUMUJQBLgozQEJIxVtiBoRQ5cBIN01iAhVOyWhk5RCOqQTUStdaqa6vPVTl1k0kJu+rgfb3lvdfiWY0lFIQYlPc+8DNsCAV8r6+vLy8v27Z9+vTpyclJvfFaLFtV1XK5HI8rxRWbpjHGGrPVx0WRgVDf+9PT01//+tdKBEPEk5OTb7755t6DR+v1ej6fHx4efvHFF/P54t69e7//4vc//vGPw3rjW49opqMxkX3+/OXyZvnR4yeTquybenk7f2Xo8vVFmZUWLYqs1+vJbCrONE1jcqsG/M2bN2OirUEgIiEAJgJj3HrTKOnx7Ozspz/96TfffFNVlYhoQkLdBNxh0SIylBUMcRHs2hToybQrXdumUolUCDWEVuMwRE2D/Mgu9k4hiAiIbEHHnVfSd53G/EPFatu2XddV5RgRQQB3ZWXMggASE4hovg6UzSOJkRModiPILGCMSSAIQOf3HyRy2Xi16tO6iz1DBIMkX3311QBAyC4OFJGDg4Mh4Bx0BO5VvRljtP59wEJpKAZIMmgHRAEQFXNEERJNhrR9TwAMeZ7nQEaQ0JDN8rprZ7OZyTOtUytHo7wsg/Cde3cjp01Ti8h8tayqajYeReG8IsaeEfMiF+G2W3ddl1KoxidoBCSSY2MpQWJIUQxGQyTWIBExQxd870NIHJPWuKaU0rtSpm8dbdsOyk+XRsml+td9G4i4LTVC2C7K27dvNbi31qb4zhHaX2U1fUqUcc5tdvnTtm1VkouiePPmzWQy0hJYpdID0Gq5Xq/X1mYx8nq9/vjJ9xSeKctSAIeLF5G/+Iu/+Puf/gMzf/rpp0+fPj06Onr58uVkWoTQp+Q9hXv3Hty9e/err756+fIlACCJiFxdXf3DP/zD5z/4bL1et02vd0E2C6AsmfL27eXr1+nju3eVTumIDCBuZVJ+9KMf/fKXv1Sq0Gefffazn/3sRz/60d/93d9pC5NBcoYQQP5rbEG1ErqxhjTGcAoIg4BwEhAQJgRD6KzZuSrvaboYw/DI9qtljCHlsgFIWZZa5qdyKvLeT70MVcGD+hiEB40dPh9xdy8sX331e6E8pbRY3C7qPtkMbVbX4Xg82jv/3aG0Kg15UkoxBACQZIuigF1hgKokDRcRty1qmFlSGKIqJ1vpFQOAuNOTGAMzAHJCTgmBMsfMEaT1fRY8WDM7PDR51jQNgwjCJx8+eXnxet3UzJyYBaBp26vr60nlUkpJApADkQSeMZDDBN5kAMaQBTRinGGJfWwFMhRGJGROzD6ktvNtn+bLlSTPKcQY7a7MXoabBAC1PIOVG57/sGm0bcT+7jHGcAIi0mhQ/as8z5vYyc6hVX2mLzabzWazSSlpNkKTE4N3VBRFSkYLYfWXTdOMx1Nrs5RSSn0IwZpMKX8ff/zxixcvrLU+pM1m8yd/8ie//fKrzWajqY6UkoLOeVn87ne/m84ejcdT7/311c3HH1ff/e53U0qvXr84PDxMKRVF1nXd119//cHjRylJ13WT0WHTbKjIEkEm+Xg8Xt5c13Wd5zkipJSM0VgupQQMcOfs5Ory6Je//GVVVZNRObc0KvOzkyNmRpTQt2qXfBdUnwuqchIkQRAkAREkAWQWiUkTzRYQkIQAkohyjwaO0YAhx105GO6BmcLvMTEGVFOLPwa+oTo+RVFs6yH2NYIwAIS4rY9BJERAEBBmThYdAFi0iIigKDomgOhDORtVRG5hyfSuyMjl0IeUwhC1yh5Tcthp72ucXTG3JGOMEsS998H7PM+1TGlwH7YheuqNUdwR0zb1bZAImYksEIYUBSjLyz6GNsTj05OmaTZNff/hg2lmF6vlKM8Ojo+avgshtG1LxoxGozzPlbJiTMbMiUNM26xbnmvTsFYh28RBIBlrkE0IwWAQjiZYSwKEzBAj+5CQQTiyMCSm4c6H4qDBBRpeD5pvoHoO1n9YO+34gIiHh4dKUsM9LFt2xfuDy6QtrtRgdl23XC5ns1kIQX3R4+NjRPzoo49SSrPZTJdAO7jpJlND/eDBg7dv3ypTnJm7rqvr+uTk5Pj4+OTk5H/9X//XP/7jP14ul7/5zW/+/b//903TPH78uO9DURTnZ3estX3fzmaz4+NDIlByCRE5Z+q6/s2vf3t0dORcruvgnFssFpt69fHHH3/22Wd5kZ2eHk8mEzVTuyAnppT++q//ejabnZ+ff/LJJ1999ZX3/vnz5//8n//zDz74QNnnGrnxjoe97yAM/KH9/g6Kd6f0LoGm7cZSSpp93SviBtiVfeKOG2h2x951bg/nXFVV+uBUF9PgY79fUzYo3OHRDxec4raHDxFtjakIS5zOxkSEzFlmtVhWhK0j/aLhdoYb3F8Hs3foJoGdgz1ETMPGG96orrX3XYxRgJFgl0vcfhEaAIAueCGsJmNN4drMuTyr20aJX8WoOjg+MsY8e/ZMCZsiorlxETk9PdXLAyEtjSciLXHue2+MtdZ1ra83rTAacpwASZJsOdjO5WQzQStC5ArryiyvsmL0Tvb22Rv6gIfFGvaHBpDfEr/BEurJVVUpJU1XbT/a2cfWtBJPmT6qj6uqurq6stZqn6imac7OzpqmUS7bkJ3Teo7VaqUdNKy1T58+VR9PE18vXrw4OTnJ8/zTTz/96U9/enp6ent7+/Lly//wH/4DIq4Wq67pZtPpdDKZ394+++br9XJ1MJ2hAMcUfKfX+ptf//r0+HRcjYno9PT09PR0tVp0XffDH/7gz//8J7PZTDvKqBYkA7Rl8XNK6eLiou/7L7/88u7du2dnZ1dXV7/+9a8fPnw4Go1UDQ3aTUEX2KGgeuw/jkF5DSoMOeXWZIaQEwdvQHJrHKGBLXnSgOjrLZfSoP5HKIQCkkCScOza2hBMJ6OD2STPLKfg+zb4ThPuKECA+lp/WnpX2KEMG2ds7jKOkRXy3WX4tK9E37Rv31y8fv16tVr4rms2q3qzijH0bRN9r5WNVZGPyqIq8qrIo++j71PwWpYhKep/cddeSaEs9bC0h1W/O/b3qsYIzPE9cw5pvz8akozGpXaXefbsaZbZqirqep1SuHPnbDodf/PN13W9zjJbljkC+75dLee+bw8PptbkzhbW5JwwJRE2nMj3MQYgskS272PbemYxxgKgMQYFNOfpnHMuI7KAzlBGrnC2zPKRVX9mXzzMrtRotVqNx2Mt7jg+Pv7oo4+ePn2q4ZbITvkZY4zDHYxJRH3fi4AGeOPxWBibpjG7ekWtu48xLpdLEdFWUSrDNzc3GigulvPb21vNvE8mE2PMYrE4Pj72PhJR13XWZsfHx1mW/frXvy6cVVWq1ZnW2ouLC6CrO3fuNE2j4UTTNIvFIqT45MkTjun2ZlEUb/6n/+l/+l/+l//l6dOnxuJqvsrzvKzy1Wo1Go20edazb1589zvfe/P8ddNubt6+Oj09Lcry6dOnn3/63R//+E/++q//+l/86Iez2axvlr0PKXljzXK9zh12bWMIq7LYrFciQgiXby4OZtOubTJnASDFkBTJjASipQNGy8ri1usz3nsRVl2uh4hyuaXve+U5DICKcgn1KWgdwNZMpRT3ukjum46Ukmp6RT4066Nu4uD+hBCHOFOZDztojfWTVDWDVk4Y48gxgomCDLPJgQdadp4AkIQEwaAlODg7GxzjfUs7pLv0RoawBRGLokACZlbZ8z7kRdF1nbW77MsuctZABgBiZMRIWu+fOMY4nY1jYEA+mIyrqoLEHH2RWeB8vV4agylxjD50bbNeFc5uNhv2/aiqjHDXNrlz4/FYgp9fLZ1zWW4NmeQ1IpDM5FmVYzQppdKOwDGxjR1DpOX81jqDPhUWDdq27b//vc/HVebbNqaOY0B6v8fMoJX1cZ6enmrZnppjbb+3c+Jxf/kQseu6ELbdINOOLN/3PcC2y5PZFoamAePBHe2Ad2VQzrnRwUHT1tqv7dmzZ1o66JwLIXgfjo9PM5cb4xaLFaF1zhWF0dimKAqXFQDQtq2PbK3Nsvz29vbTTz/drJv/8l/+y2c/+DzGeH529vsvv7x7544z9uH9By9evDg8PDCIoe+vr68//PCj5XKJAqfHZ4v5/L/7V/+qX7c8F9c1Mcj19dt//Md/JE5N0zx58sFms7m8eTut8sm0jLFHIwcHBxzfsfz2j9vbW6VfDjHMgBVraPeH79rT5O8dg084OBf7zuq+J4mIKcRviZ++yF0GAByTkLHOWWsiYOAgIPp0RYR2SIs2z3r31He/BO39wxIALFKwmTGGgQBwuZyvN+uurb2PLARkARmAm2az20Lv/czzbTcjZhZR/U5E1Ie4vw78fo5kf/du/4RGTURktjsKCwFGH5gZEgSQThgAfNdx6KzRloPCMabQ+Q5TSpJ8ZtECE8eMgDJrrbHAEvrC5tZYh4aQxFhGZo4ShQyEtheR3NgYY7tqnHOjvBofFQAMHKvMlWU5LsfT0WwyLuf+GgAEDHN8J4S8l4RVRTiZTHTrpF1nYg0bREQJuMO7hueiSlQ16DaGTDDkKrTtpHL2dDvKLv8xCOHp6en1DTRNE6NXOyYimizRMOb4+DiENJ8vu64zxmo0qBWPdbMlxT969Oji4uLJkw8/++yzGKP6t1p+VdhiOjm4urr6T//pP//Zn/345ub6+vr67PxE/V7FWqzNmHkxX11f3T558mTT1G3bUm6stW8uX//TF6m+vvl//Ot/nXOKm+UWXiFBRAEevKZ9SQOAq6urIevDu654zAx/IIT7Dv8AaA1SJxw5SQIWEU5JRPSfnKIwIwhoty4RBCAUNIql6ecP2hY1uZxS8J4RxRgTow/BE2X7KmAQ6W9FicPWz2zOW27A1jkiskAm9pF2smQAkwinlCjwnr7YnU9q7lJKnJJoxk+2tObth7MwM/KWPmr3QlzFZId1I+N0WzFLSgyAJICEMUYEABYW6aMXkRgjpoQYSEBEiAP7GJERsSRASalv+uQJ0REZgdRFn5ITR0lrhQ0RJhGfJCbvKO99MERllvcM63ppUirK8vryjXOuKnN0Lvp0efF2M58jp5gCSmCOKUUre3Ed77IRiKgFjog4VDG+efNmUD/b9i3bnbEtjLDO7ZoFy1A9mOK2eGxnJLc0drNr9pjeb1cBAN772Wz2/Pk35+fnV1eX2iKp7/s8L+q6fvjgUdsu7t27d311G0I4mEzU7RQRbZwxnU5PTk7UT/vTn/zZf/7P/znG+Omnn15cvhGR5XJ9enq6Xq+/+OKL73zn45OTE+0sjoiPPnjw4vkrAJzNRqvlpiiKv/u7f/jxDz8PoTcGq9HI5FlTr+fz+fLq8vnz53/83Y+rR49Wt5dt2wqIIbPZbCq7SwCqAdmlBdbrJjOEYCQGFMmc1bWFbUF9Gnb5nrliFRnmbTKDmWVPYvchCt7rjcB7CcPBr5FdRmT4Pe11lYYdFlAUTvEXAEAUIv18VkxH3qv8Vli1ZGaJCREtGTKGyCSk6WzsAVph6byEGCNHjhykIDNc8L54D1TPwX9O24p1YWZAdbi22MyWijCkWHeOq4gAGr1XSQIgJAIGDRICATIgCAeJzBwNoiVsfEAkQ6SNok3iLMuwqqxq0r5HJJvnRi/ABxOFiAwmZ8GREySK3Idkk1datqVEhtkRJ9+vF2KjBVtYVxWlA1ovlldN3debyXRkichAkmhlh3QNClhFsaqq29tbhfiJqKqqi4uLg4OD21st5YJhB6gKxL3ksnqwupTaUk6ldyiY0EUcdsCAqqeULi8vQwhaJzHgE0P6/vr6On2cmqb5sz/7Z79Mv765uf3kk0++/vrr6+trtYez2SzG+PLly3v37n3xxc8Ojg5DCKenp/rkZrMZFDCfz++cnR3OJv/H//aX53fOPvv0e8+ePd3Um+ODw8PZbL3acIiT0cjZvN3UP/3pT0eT8cnJiVjazOsQwmw0Oj8///1XX96ZjWd5posQUyT3bhm/pdcQceDjD+yF4a+yo5IN0jJsTXmfljR4lcPD0m9Je/zB4YTtY43v3LnhJwAoOc1pjLBzN/KiUCuq56g7qo2ctsqXRa8SEYUFQGKMqG6gAANLEC8xCIyzDAA0sDOgrSgQgEXe9ZseLph2hD7YFbKKOroANq+stYACAGYXIOsWSgj7cK6+TmAFBCAKCidmYENEgAJMIAiJOUpMBtgQGTJgyRkgAuNIEjvk3EBmTWXHSpQ3iBmBIUAkJudRjAHnqCxNnjsA8Bl7TyGEvNhG6ZVz03K6Xq8Xi+uDR/dFUgo+9H1wOTprwYgtQ8eQYUbWorHqB6oIfUsVad8hremoqkpLrXY8xndjJBDf8dZ3rd23n5lSgoEastNVKg/69oFBMnhcl5eXxydHbds+fPjw7du3h4eHfd8rUmJ2pZMppcPDw4ODg6Zp5/M5ACgpHtAolKqklrIsf/WrXznn8ry8vr7+5JNPnj17Bv0W0z88vHNx8WqxuL1z5+QnP/nJX/3VX3Vd8+d//pMv/ukXFxeX9+8/vLmeHx4eC3ej0Wher7VqhAhTCr7rzo6PF4tFw+FoOqqqar6ouy5ZS5hwX2x2QY4ob+adbOyA/uFM2mOBKYI6iOX+MUSS+4L6LYu37wkP5++bQdkVpuj66++13qLetPsfPlwS7eo/9w+VJWSBrQuscs6SZLFYLNfr9bpuQgxAyIAIZCzBUE84FJGqMZOBCQKACkGJiPY6ARQV5gFD7vte5B11ZDiECZCRHKQEAMwCCdACsBAkbeQBJAaBCCxBUY220puMehkZgTNorTMpOmBEtCha5GWdHbltC2wlkJCBlHS8Sqc9rELoVVPM55ZS+/bqrYiMq2rscimZkdknDly3tbGoUNe2JTbtVfQOUO/Z2VlZltrNQWuC6rq228r6fRSHiCiEwCnsdNK29CmEEGHbutvspk0MmeU8z4cpNgNRq+887AiQfd9rq3OteAohFEWxWq2I6NWrVwo6f/nll7PZbDKZaAcATbIx0OvXr4+PTwRB/dKUUlmWIQRfx8PD2e3tbQj9/Qf3NpvVN9988/3PPj05PWqb/uDg4OGjB5eXV1ptmFK6f+8ug6zfrNvYlVVpDfV9v1ksvvvggXMOeu1Mp2wpZGbkd1zZfediu18RtQWQuvqKaorsLyZ+a2N9S9gMwZ7carsVARAEJNWGwsJRREAIwRhjB29lPyZEVLsESrhDBJGkcfhwPYOd+dY10C73KyKOTJJ3aXdjjDXOAc3XdfTBex9iCkBREMiISSgWAREJcPdFAsJsrdsqd5ZtgoS22c7hfvn9lRzUFsi7SqiIQEgGELYXGEVEkmTGkggIWyDrjDO0raatCjXjAA4AaIf0SkyFNTnliJhCDCEQ2sJmZNg5Kgp0jo0JiJg5wMxgNTZW21Jti2AlZvb+yepiISJVUU4mk+lkQgJtEsZY5hVLTCFEZDssK+3R+UQkz/OPP/5Yy7TVvTw4OLi4uChLx8wAabBvtBuKMFg55959GoBkWab9KbSoQs/vum40Gk0mE96lJTWNIZXVFoBv374tiuLm5kr7fFZVdX19qz2wqmr8u9/9jhNog0O1kMvl0mXF0dFRjPHi8mo8Hj979uzj73wiIutVfXFx8bOf/ezw8NCM3eXlZe9bY1AgqgH/j//xP5ZlaUz86quvjo5ODg4OLi4uHz54slwuq9JkRU5E4/EYne3axhlzenr6zTff3PvjH47H465rfEjOubwq1vVKb0Rxf7VmqlO08GxoM6OnDUOs9j3YP3RQh9/DLnje/82eS/Jt1rXswuxv+aIiot3r0l57Ic25OVsOH7j/Ff+tQ3YJdxQYTKs1bjSCJsa8D530feSUYmIBlC7G/c8c9JROaOFdUy/ctfTcpvKBY4wkoDxH2v11q+n2yhHFCIAB0HZViKyOGGSZwSQgkBvKHVmDBCiSIhILE24vHgWIyJHpu84ao01Bfds10lhriyzvwwIBQEgYtEPX1psz0LXBOVdVZUqp3iyD7yfj8tGjab3eSExd067sKicLzLnLXWV737ZtijFa4h5iFDHIbCRaSAwMSSZFUS9WVVFMy9F6XberJrYhQ+ebNoQQ+sgpGSJrjTOOiGxeKo5NgBjZCFQuL4zrYtJ844MHD6y1V1dXcdcFeLVaaS3z1dXV8fHx69evi6KgSblpm/rVsyKzjrBw1K1uoa8f3H/ULRZN2374/c+7yE+/eTk7OBTnkPnm5mYymdy9ezcGdsZm1pUum1YjOpb55VWe5xnRw/O70IV2vuq7W0tQzAprsGmS75OzJFDkxcGL118FeUtFNjsfz9u38+7F5Hy26dvYbLRCP8VYWMddl3p/UFTduq4mJSFaaxPKstmgpcwCIhNEFEZAAnZGDIL2ESOIvvMpxtwhEUTfZNbsQBwBiAhoDGbWisRdCzbRdg+KHLJx+4KmLxCBCDVGQARrDcC2GALJ6LnMPETvCNArpxS3BEvZ/gPEcoqRWQNCIrSMLkHsO6/uInOPws4Zm5El8cEzsikwyzJjsWmXb19dLxaLruusy9u2596v3t6mKE+efGTYvKHDIs9VuhAxt5aZ+75f1w0RIRnjUBR3QQIylFKGWFWTvu/X66UBmY7GRNuuakhiHSWyMfrESUASrhMPlcfIiAKYQFJiC2gFYwhtH4iZhBHFWnLOCVHfbNsjiUid0ng8rpt6E4JW7UwORjHGNjYA0nRtH/zQhFtRfe/9eDxmz7fzt8aYyWRSVrOmae5Wo6uu71LKDCDyslutNss+Bj0HTVqsVnZQZoNl0ye8XC6JrHY3bNtewRJEbLtaHU6zK7cfyBzGuK1Fhb1OaizGmPl8/oMf/EBd0GfPng2emJIwlTiiI7smh0dlnqXgtcslAgAnrTGv26btUxJWVpcirjqobOjemVJyzo3H4w8++GC1Wi2XKy0eTykxWwCwWY6IeVYaY0JiFyIYUldA/VVt02StjTH0TQtBABEzq/xnIlK0feuw7aRh61wJ7E1b2AHoIog6tBQHt21A2AdD9IcWZvjTkOfAve4m8H4stO/9Dif/V79C3s8lfuvYdZt+l4IzFomydlUfHEzzPNvUi7bbiKQYIcQ+chyPxyL8zdMvv3r6+67r7tw9v//o4ZuLy4PD8b37d8ajg5ub29cXl9533zx7WTz5M82Xpr3iD94l9IYrl11kKwmGdpiyQ01T2kISsHM6hze+t4AAoD63sO/7wmrOI6a+hRgdaeqLNfWl66yyrdMptNbxWx0hQPodB80Nj1Ur67Ub/T4B0Hs/Pbq/Wq0SyGiigyizyJULYbVa9FtbDnZ42O9DxsjM2h9aZFe1hViWZdstjTGGnOwBANbapmkQeZf94303Q9u/P3v27Ec/+tEXX3xxeHg4FEbUda39aYhIh54Cc5ZZJuzbOnhvAKwxArhcr4EskVzd3BblpCgrtK7btKcPzxTI1baLIfbW2rt3z3VMD/OWf2etJQLN5xhjjMvQGPQRjGWGvu+bts+ybLNZ31xd51VWlqXWEkIAmzmLxhhjEcHaRJR2vd9FRDNy260g/xXiJe4Ym/sCM/iHw7J/SzZ0a8oOsBlcL+97+IOIEfcqgPSj9uKL9wR7P7T7rx5DvICImnwi2gLUyuTUcorRaGQMth0eHJy+vHj98uXzlNInn3xwfHzo8izG+IP/8b8TxvW6RrJFaY+OZtZm9+6efHH17ZpV3UL76zMIp4gQGM1VKLPXbJt0JGMMIO/QnS3KJdtSJlDhZBESYAEBDSMtEYEgx5RCT9Y4Mpu2Tikpdq3fq/kw7YKxTx4EAGutvCPEvbuFIcdjjKmqSvt3aCBwc33ddZ11LoVoEI0ydV0PMDUWLZkin1nciwNhANNYk6eiXM0sM8ysA4ZCnCBiirIbcGGKwunAakTULwZB3BFqxVgFBl+8ePGDH/xAu6NqCTwRDYMju67TvvHL5XwyGjuDHFPf92WWj0ajqhwtFqvxZOayeHl1nRfd9ODYutzaTmlK67VhjsrzFkiz2eznP/+5cy4lSSkaQ3meAWDTNJ4zS2AASIQFEzOHVDddiFdFmanqcdksd5khAkmKyLldffpWGFAAeAdOsQgCAgnIH+TlcC9sG3wN2iHGgxb/Q6nYV/D6pX9owYb34q4rwrcO3BUZ/qEc/rcOZ60h7YUHacsyT8aYMs/apu66xloajSfWbGdUfPX110gwnRQHB9Pj48NyPAKAkOLt9YVzOQtOqrwsRmFSJIbrGxos3rDpdavoNhgiveFqdzIQAECjrxD6lHCr9N5fPcR31VQqhCAALAicOWcI1e8mIiBi5hCCoq8DRE97fN1BHIY5MESEe43S95WpiGjhkfakh53aTYElMViRmDiGzOaZM0i2Kg+bpo7JF1n+bvDi8LmIyABEpKCl5up41yTb7Goq425ksYqic455V/8uqIOmiqJ4/fbq4cOHX3755QcffPD3f//3n3/++c9//vM8z3Wmmq6+RgUHBwchhL5ryiLLbEEGRCSBlNXo9OxO04YuRLCYfIwsPgbK8icff7RcvlXKpQhro38AbtqNy4xzhqLE3TRibQhPMZGxrGNDBQQpcep8X9f1eXaa57lOJmFRgJsdkUFi5tB1AIIAkN7LcaEAIpIAoiC82z3f2tw0tDbcQxe/dfL/jYF693XvS/Xwe2XqDP/8loTvn/x/L4ckoN3yQUkCwigKmsfk+8zSZDIS4VcvXlxcvGrbdnSYHx8fn54eF1UeQttcL40xNssuLy8ns1melVVhgSwSoDCIR3S8x80aFoTfJ2zpb5iZU0xp4M/B0PcJcJtyHIbS7BZHbzltH44IQCIRp+MEQzQpkEFkE30ffffg0aOhKe4gVzFGnTlJu+JPHMpcCAlQPdhB8Wl6sO/7tm6AJcsyZyxTSill1gJnhMQpcYwAzlpCsoDYdnW9XhVlZoe7hf2c0p47FEJANPpivV4nbq21hrbtfZWkol4l7Lx54XdNfjWc03Y1Oo+BiNq21czeeDwe2hlqIgESh67tYevUNk2z2jTTg1hNptcvXorgdHZcVKNN3Qv2f/qnH//8P33JzCjJEqTQaVP3xe3VpCpTSol70NGFQRCxzE2P6JAYQQQYAY0zwAAQOaWU8rzQftAsgUC8D5kTZubAffICkBEyR0hsUVEodUC1heYWFv/WXtefQ58Y3qOzDHI17KFvOZmyxw3U9xr4tvRuf2rlu+wq3wUgsbDA+wTL4ev+W3Ko+SlELQ7czuchQiLI85wlrtfrm5vrq+tLY+jBgwc//LPvzec38/m8blaj0ags8xhjs1l9+NFjBLNYLK+uLkVwMpkdHZ7cu3Mmr1aaYgCwzMi7kkjtlyiS5H2Xvu+3TEnmLb0OAJCE+Z2wicjW+3r/nkh4C0Vtqw3FMFprM2PR2YjoA2SWrEFEUJDVEokAGpyOK93AzGy3mUNbVZWzOEwC1bAQ9iIy9e/yPB+Px+qU+s6AdUja8D4hgCUSIESxRpBYYrT7fs47Db2rbBhyMgqE9H0PGEW2LfJUVaj16/vemG0VXIoMAErS12TD8fHxer2+f/++pvWU4anW7/Ly0lqroAgAOEtt0yQdI+5svWqur6+tzWyWA4B1+RaPstTVm2a9Ikwh9taAiHRda60tclOvV7vJctGQqrdEaLIia2JKwpET8DYHjYCAnFnXdZ11VebcZtOSkVEx4hA5Re3EM3h9aAwbRERLGomhJsWRUQnO+9H18EZr3/XUgvexlj8UiWEXDqF1Gub27DCh4S24o33ty/AQYpBz/39ZQoO0deO2zxdEmFOKAgjSbNZvry7btj2cHdy7d+fw8PD1ywvmCIy99zqnZHowOTo6atYNWRcjg5Ag9l24vr559uyZMSeD4dqJFoqIpk/3HezhancrEPSFy4wj1/e9SNrl6/c8CFQlhMiyKzVAQokxGkg6OqogQINioIzZarUaRrvAziZr5/jh6WhKSTEYzswwpFnhHL1arfIbCNLj8Vgly5GxGQCRIwRgiSFG7kOXldloNLLWoIT3aBm4A9mIcGjppc9eCym895NpzrueQrJjXROR9z7PjZJRIiWN+rz3J3fuzufz+Xx+fHw8n8/VuI1GI21We+fOnYEMoFWFRZGv15sQwmg0Go+mXes7H1abGqEdjSZZll3fzIloOpldX19//btfZyhd6Lf+QL1y42lGxWqzzMwUUp9brMoxETVNE2OPmGLElGwKKIIpBZRtTwxjsOu6IrdIkFLKXVYUed+3yYslAmssMBFkWSbMYq0ytpGEGBGBEAnf8dr3PUN9kKrRBhH91rIPz/IPj8F4bsk3u5Kl/ZgKERWyG97yTgj/W9L23zi0DUeMmiAB2XUequt1URRNW7dtOx5XT548Kcvy9nb+9MWLoijGk6qqptZa50zmcmGLIJzI9zoP1BgqQCgGGjwCtWm81zzlW8W+sq0eBO2Zv9VWJDvGTLv1muUdTxARaadqgDR3AwhCIpvVqspcljtAjswkbIlcTovVUvGOYQUUmJnNZvtCMQSrOo8gCUBioB3VHmDdtHmeI0vyIZKhEVhjc+tYCGg7sx0FQgh99E1Xu2SsNUWRhSjbTl4AYK1NiZumQcSyGGVZttk0tCtLSynpqmm3D2bRFzokNaWkkyQ0Ka+sF4Uly7LUpL/S35RWNjCttaC+bdt3A1CNVFUZQqzrOs/K4+Pjum4vLy+Pjo5EBFiOZtPMke/WBOm3v/5Fe/nryWSiTdYcudRvbppl2/bEARGzrODQJBGIyYgYwYPJRFhS6JgBGAxuya5d22ora4NSZnkIvu87YxwYk1ICBIvAAL5vIXiDpIN+OCbmmBkQ3A7rMiZLu8YT+7EN7MovB6EaktG0O4YQYDhZa754x4D/Nq6o+1VEH98OVNum4AmRjAnbru/vObrD3sJd8b7sKITMuFrXZGA2mzHzfH7jnLtz53z+5c1vf/7bxEFHL15dX79584aZZ8dHxhgRC5xbk1nnCK0w5WUlDOORrZtOUuZ7WS4XvhdEyTILAEQ6FVN3V+z7ranfqRJgTlqbO1ybD526XTrTN6XIvJUQ3LWqMwZj3HZ8tGQAEZhjilmW5YUziH3fBo5Vnim9qCoLAOjaRvNnVVU5awihqTdbxhyANQTOikiKQex2Wq7Z9YLhHbklyzIVCgVKtr9hA8Axhab1NrfFuEBLXeo361ooaXNxq7tfRdEYN51OU0q+D1VVFcUWrQ4hKdUQERWG4fROcxuznRmg4R+9T2LW7nQqk4PhNsZoenD3CWbgyoHwsEcnk8l4PO37fj5fAkvTNBG76XjifbdY3EYfRlX1Z//inw1CKIJauLTZNNbaruu6tu/qOoRAZEej0XRatn1iYWBwZMlaYQzBhzYaY5BFUmLablDVsgQsKQoy4xadEyLZoqMMYLboKChSAPtCsi8z74MH2xMGs7lvOQcFvJ+i2LdUQ6C472fyXqpDD33XPmq6b6WH7T6YHdzt/dPTUx20WFXF/fv33rx587/95f+n7/t79+7duXNH1ejR0RERXV9fI2TWZC4rsjzPstxZZzJjjPE+JuGUDEKBJgdyLF1i2/luX8vAu+qNAne1vO/fguCOcWB4O6s0pW0/KwAQSCwMu4o5JJKUgBnRAACBUpsNCKMAkhChAUJJzNtWUYNKgj0XRvYycPuqc3go+6uq8cLQZ0Sb02iCMfap8z5KLPMqQZwvbiNIgkTWCCq1Ba2GmLDthyk6I7betJvNJoSkGTzEd22aWLRv+buGJeoVqBCqM73Ph3z69KmurCI3tIOGBwap3r+6u2ppNUvp++1IDSKq15uubogTSAgNO0PHk3w2Pjk6OPzux8daPyU67rzzmeGM5IMPPggh9H1Yr9c3Nzc3Nzf16qbdzO3hnZQSImUG87wAwYa4TRFAOMUArNMqCS0gJwm4I50DonVkjAXC+C7bzpoVlsQi2u3vnRgMUoe7STiwxzuT9ysJ9yVQdvn9ISAf7NguE/seYAN7Xu6+SA/vkvePQS98S1mIyGx2dDu/9t5XVdHF8Ox3v7m8vOhif3R6dHLntBiXN7e3bduPRiMim1Uj68q8KMvRqCxzZa2RRWNMv+kDp95DAiIwiclH6QN/ywnXK/wWODl4BIgIyID7Dvz2T9YqLLHN16ekk8kEaRh0lVAMEBMSQRoSSoaIIAGApJQ4kXtXSER7lMBBx+n6w676PMWIAIa2FF7Q2hDm4L3oOBaiPsa+68qicNYai6kNgkyORGRTNz1Hl1ubOQYEBkTZNurCXTSo3Ss0hRDj1iJpabxKl3K+hXEQJ5EteWfQavsR9sXFxXg81lntuBuolnblhd573PEAt7cagoYHbeeX65W1mfbCsCCzUSEhhHYzPTr8/qefnB4eeO8dhNj6br0drxVjCiEQx8XVhXOuKkeT08ODUX40KZumAaDny42RXdUZApDNiEzp2s4z67RZq85/gsSccmcjs7AXABFLIAwASaEZ1PZnIEPi/tv5Bt7V76Y9QGU/bBu07H4QOMjGALeYXU9eiTu2pwiKwBYOBQRE0Yp4UIDIkjFkmvRecnn4in1XindV14jYB399c5NSQnPw+vWLX/zyZwcH07/4iz/XgbUvXj1nBmbo5rd5Vo7HY6HcFkVRlnlRkAElsANgAI5JkjALJAHfh7rp6qYz06kumAZ7qGE1Q+JgjAEEEY4p6MUMgpG2JRFxZ7KEaNtqUrYlB2lno5JaCEQUSMg0GDqRBEy715xS5JicMyxsQAiBhCUGANAOPZAipN0kKkOQIHCKvI0jhhhhsIQxRrNrESQiurfROJvbJJxSMmhdnvmem7ZHvSMAALDaOcIYUxSF99si9Lt37x4eHs7nyy1DD2XwgHc7YEi+JZGtKTPmXcf14cyjoyO1hIr06HUrFroDALYToLYZSCKXZS4rEm/6PszncxTg6F2eEbKzcng4efLg7P7pzIH0i3lEUTdYRFDIgpCRjGy7mW9SmgMNnsPBuCzL8rrZIBgRDLFrVm0SYygzrsAtSxM5JjZGSPW0yYwRFuMxgkjihNpoNhKhQVXDiRlFy7Pl27jLvmYdfrm/PrjjbeFewn3fZH1LRFN8Vzo4/HL4/H2TqDtYOxjum53h7UPlmiIiehmXt2tErJv66Yuvmqa+++DukycfFONysVxe3rxNkc/Pz7OsWC7WImjzLDAKgRgBC2CRwGhzKOscYEwExCQgXeia0HapD3uTpb8VVg3XP+xvVSUi4n033Jpzdmc/cQfgx2FBCDQjvzMJIAKIBIYIOSUI1hgSBE779h92ePXgsOCeo067ahLZVZlpMK9SN6yqloYT0dArNYTgysrlLgXfdI04Gh1MI+HN25X0yCDCKAq+D1w4haacc9qX/vnzl5eXlwrIqkpWl09ECN+NTCMy6tDuXfe7/TFYAJ27pC6rao7Bs1KLui2rZ0+0lcmUYtd1lrDIHIqs57cns9H3Pvn4g3tn3Dfr1cLElPrAkR1RURZqq30fUkqz0VTHNbZ+O+RIRKy1ZydnxjhhU7d+uWrqto2QmJnIERBvnU/GbaWkRUSLFIh01ntCIP52Da6IZvATsRnEabBp+1L3LaM0/NR12A//BudC9oKT/bfsX8OwzvJ+Q+FBsId1HmRbt4iujO4nVYvz2qeUFsvbvq8fffDws8+/m2X2xcuXV9eXzuZFUWyamjd1ilKWo8RCFsGkJFEw2iwnIgCOwlnlTDSUOEXxPvbcBe5xx5L8Q4gY9jzAQSSYmSzIXpf7YfhSiD2JttV8j7gn70Zq75hrBlGIDEpSi7ol+oqIMpf2JW3AqAaZVCLx8ASHWHowIYqVKOqrYqmx1bZxhMGyLAW57RvIbCkTMBRYfO8ZMKUUGWzadddSdajPrGmaoigGyZa9Y/tE4V3DBUV41CXY3oy8W5R10wxokgbfQ1cL1Rm4NzdbRFigaZo8z53Ls4x81yeBPHPsO2fkYDI+Ppw65Pnqln1/dDhbxcgpEYo14iwAAyEDSdusfd8agumkJBq1bbtZN33fQ+iJKMttnlWFy5Z1X7fBR06SCBHQ6HQRECFrDRnmLX0J49ZWGMQhSwuwNYHvjBez2auy2d9YfyiHg1wNdmz/YQ+PU4VE/Zw/tKWw52TCnuUcXgyfMHQqYWZtf64/dT/pc1lHXC4XZZV/+tn3P3j8IEl6c3XZ9U1WFEVRANBivQo+jUaTSZ7ZPCsnFREah2TBWkKLKYGEBJTQgjEklELX+9BG9mjA7LVRU6ALd3nOwTXY4bS8j2ztqycRUVRZZ/furSeEMDQuEI3XEC0RWKLILJIYAczA9SPZC4kHoBjfr3G39h27E1i0GSTHFH0wSGjBkhFjRUQSi0YGzNonUiRFiUREzvoUV5t1BKiqql+tE0NkiilZZtZEn6a2EVGHSVxcXNR1S7s04LB22/IF2AJrMUbmbVofBwWDZlBmx8fHOuTg9vZWa9jUqOr4F8VUNDjU3TAuy9b3jJDnuYikECVFAhGC89Oz87OTvqlf31zGZjmucodyOJ1sNpsQAqQoSCgps0SU5c4RcN9tk6qOCFksweWbl1U5ns6Oq/FsMq6AsiRN2HTAghYNQhLYGnMjiBhDxAGIE0ER5ZHQtiRINdI2Rh+YxIN07RulQU5x79jfQ/siKrt2r2mv5/Jwzh/+1EzXvrnbZjXUgoegY2q6rtNRFnE3qTPuzfELIdw04ejo8PMffP+zz74rEJ8//6bruvF4rOSHGPn4+DhzRZYVo9FkPJrkoyKlgIjkCJ0gQgyhD10IXsiAEHMKyXe+jckj8f4iDC9g17eW9+jjaa9nx55DuAUdYorGGOZ35cUADLCNvfUz6V2967YrsbCS3QBA64Zp/xpgzxoPOnQf4BDZUaMAZFf+PiiU4QENiJpzLlgQETJYVVWfuK5rdG40Hq/qRhBYkAQsGmRgESFril242dY1CkBMAmAI8zyjCBDT1JXXkBAzQIwA6CjLjDD3HIpi20MlxpRlWOYFMzdN0/k1GnDW9mHjY304neWZvb6+nlQlokhsY2gBwIoYEkHYdGvnLJi07JeJGY1khiRxFvCDo/sHWLroesB1CMYdzu2Me4/ZyBSEBAlALKNI4pgwJPKRWEBSClGSLbPZqDg/P3v58uXtm2/idDaZHpyOZgelu856L3C7Xq67hK40tgiMEcRY22IGkgCIrKEo0ncpJeAYebxsVliVxjruehAuyKFAKqMzCJB8vy1pQWs0lRclAYDNHOzCG2utj7hcb2KMVVWhlb7vACDPs6ZpEnCm7eUdEEkS38VQGmTmwDFFVtsYY2z6zqTtwOembQGoKArLKbUNJdpsNiFJjHFdbzwnBlm3Xd21XfA+BCYDhIHTum/btv3OlH78Rw8+/6MPEndfP3t+fXNlrAsR13UDQNFzntOonFow/YYNBxqfkhMEhsjcobMmiz71YtHVdW+yPImpN2HVpkBFdE5iop27DtpoUGTrAao9T0kArHNIlEIQ7t/BM3HLq0YAwyA+KkxigABsTJB8mhRjBawTSBTmxH1sNpKmVYEZYuA21Chm7BywD21D+di5HLaYolhyAACMEsWAscZmYCmCCJuEObqeODA4Z/JqlAATi48hz3NIrOADQSLaNUKLMWbImKx1gMYaIUDGKLGd5M772MeYhO0gygCIhCKoVfMuy5iBmWNKFAICEZFB3OIPahZY0q5HXdt3uSusddaStRYIY0wxRi9bzaR6XafGxhhDrw2jWEmAg/YKIQiCMUS7Rg64y0keHByMrK3Xi8jh4OBgMpm0bTseF9sP3zY810dgRCwBZ1kWow/9dviWMcbm2Wg6cb7YMveB8moym01uFptRVYLl1kvnvRBLhLbeGAskQCgICCDaBQCjz3KbZZnNnEmJOXFSshc3bZsza4SQQAREQtAaSGZOOl1ExKctIrVarNfrZUpp1PdK3UKS3Od932VZlnNuzHvueqJ3LBZjjMnylFLf+5Ral2UiqMWfQzHO7XJRb1pmjpzW9SYyC0LdtDbPYtsg4nhU1k2nI8EfP3z0b378nclkcnl5efn2mpHu3r1XN82zZ89Oz+7ArkWA9z4rq9FofHJysgF9vqAt5y1u5+uGtM2zxyB9CNtG/vzOO9j3CGhHWhgcbLMb2MZ7qbx952I/lh72j5qBbXkTbkNiBCQgUTKmvDtwz6Xfd0n0i1RLGsC0ayKRUpLELnfMyfvtQGwkAOUMAgCAQRTZYxQKpJTAkCFnMpdYQuKQxIe0c0AkJbHDRRAZ7e6onm1pDFIS5iTsUyQgi1aHrWl3Ld6dLIwikFgSJSKKBBCYk7qHVnZjD9XR1yY54/F4GRbqFEncBl1DJCA78ioRgTAkBgH1iFCEiDKblWWW5a5PnhkQAVGSiAFBJCVmx5gyV2RZlpKO9UD12ep1jUhlWbLgarXa1O0osHVFVRQ5ZbaLPqyT74Qy6wCMNeTIgEFjkAVsTFGAWSSl5LeCseOhKqpnMAoIS2TxIWnHjz74siy74AeI2Mft2Oe2hrquvfebpiUi7zvF38oyz7IsLzLFuzWoY47ljuafUjLbelNUfkbb+T74EIIhR8Y1bQ9t33a+7Tt1SjvvGUQIIwe/CZAYCNtN3XbtqMjvPbj/0UcfffDBvYuLi5vbhTGGBd6+fQtoHjz8wLncGNP3Xu+1Cz4LQfcmOGMMWUJryRpKbKy1ne91k4SQutb7PjLS4G7vQ01q6OKO2bPvYyMip3ftZAZHXXasN9nLtQ54iWYoYPcuQjIoKXoSpm0XDEhE2p5+P0AgQG3tiEMCc1eerhK19Wt2v7FkdH5OCtG590rJEBEFgFADycxmZVm1IUJMSXwInYYA6u5aEUFBAiJAwXcKxseYQIRQCBmBgQEZQXLrtpC6XqsQE4sIIXICzzrgKVhrHdk8L8TIUGisSUIiUshHVzC9j0flmSXlRyRmBNqS5UUbrgXm6WSSFa5uVyGEsix3HTs1ZjDacAnRel9bS7k1iJhlKcaYQkwp9T7GJLm1jsjarO26erUMsrr/8EkfORiZFC543/QNEY4q5zlSQiJRIM2KCBm2KYTgU98G4xAERNEoFiRbMFDwqW37rut8itq7JSua7dQEYQ3ANGbg6JS1p3bAh06Xazodq85S9G8ITgraRh0pimJduoPvPXywWS5Xq5W19vBw5IpC5+TkWdmnXYNJTiLCvB22Xo1GrQ8319d5UXz3O9/58MMPR6PR119/s1qvjTHVdFY3jV+vJ9ODR48evXp1kecFkVHPpm1b9uycy04mFo2z5Cw5g0SgfVmEEYA4Qdv7vg8xCjqjlSZDlMW7JOqQ4pIdf2OwbHGv6H4/iv6vGsb9SHKQQ8JtRgK3USEnkESEtGXADJI/fObWt8KdVLOICOmov/e+MXFiEbFbT1rj0l2zEKOqwSCDc3lRVH2oEdnaDLhBNERICGCSJdgHvklAe9NhHzwzEBE4BDTMnBCIhBT90ZQMKKEBRYi23GXU1STasr+MmAE9DyFo4aP2UPiWR6EvxmUVhQNLSL0IEqBhVA5NSokkIlZ57pIUZG2C5FyOiFuaC2k9H8vO/ds0Xehb5TqJCAc+ODndLFcA4JybzqgoyxBSaFqOXd94g+b8aFoV+eX13KfOcZ6SERHRShdOBIzOkMnAAAAETglYUiQAnV4SIjNz23XDxMU+buswO9+HEBi2WXjded0GYowC7yZAAIAALFeN7LqYkXnXiWujpaIpKdWpTEnZQk3b+RDJWJflAti0bV3XddMEgbpvVYabrmVmIGzbNssyTglSPJiM7969+8mTx7PppGmay1dvjk6OyZrb29uirD77/g+S8IsXL9br2vtARAQmcwUAiNliUWTYOnSWLKlHshUDYfACfR98zwzWYf4H5Lx3x+AH4Q7O1Rf7juK+qHzrGM4hIiCEXWEFM4NyDBHhHXosKalfL8wRUQxZRGSdTrd9Aru6YU4sUZSSuOvUaMkgiffsvbdkjMtEBCSJCAoIyja+Q7QmY+YsKzJXxLhOgbMsI7TWEkAERqZt3lMLzQd9YBAlCoM2ijQGtn3VGQg5RLXX246uAsIiSRgAALPMTSaTIq+0ikIzs1vvGVFEdDvGGM22AGg7OHowjBllMXGKPih7BW0SEpa+rauqsJzqek1WxtMx5fbN1ZtqZIkIGBFZBLeeoSQBYo5d55umNSg6BdEY44ppSNzWTRQgorIsi5wRcT2f+5iq0exgXExHFUm8vpmHzU1enQKqk8CIWrwkBq33fiPcdW2MPoaemTnEGL1PkFLquq5uGpW0JBxCsNb2MWyZH7vUjmPqg+ogirBt4AkAkIIxJsawTVvZd6rNsFL/gBlBKCeHrjBEN4ultbaaTK21rQ+NfrvLWt+3fjviS31gFNQs1nK5nExG3//edx8/+iDLMt82BuTw+CjLchbJs8K5vPN914e27cvxyJBNIQBSlmXO2jzPq2kFJMZQbo2zhMASZeetUeTUe982wUdBsEg60utdr/EhGQh7Gc6BeqL4pNnmG95J4JDVwPdR6MGxlD2JBQAWBmCrBBsmQB3XyYzbTomwI83hNtcPAMAh6mVt/T5tJ8OCZQksQqwpAI4pGQbIUJJoIIcCu17JBtCgBeIir7Ks8H3wMbisRERLBgjBqkiTfd8iEaKgNRnaJIwEIsKg8TEBCSniZwjRCFIU7ll0k6bEkNiaLM9zRNP3ISUv8G7teI8Cq5lGay3gO8J3Sin0fYghxsAsYChBSIw+bqdYgt+RPCRlxtrMKUmOSJiREEWSSEKRtu2cswp1MEdD23GLddf1IbEgpBT6AMDOWmtwuV7neY7Jt6t5WY3Pj6YQ+1evLrL8YFC3hKzWKUq6rZfGGBHu+ib2fpeKi8wEu1Q47wrefQxlWcbIUcQYIyBBGAGEBY01O8Jk3JXMMcc8p5BSVOeKDTEAQmJOQAYBkISQyZB1Li+std53USAmMdvCbQMoaLDrW5+2Dp4YskhElKXgvS/z7O75nQ/uPRiX1Xxxq1hOVY3Wm41z+fHx6aZrv/rqqQCc3rlbFAUn6TpfuOzwaOZsDlsoQawR68gQYsLA2+SHiMTAbeubrg9BwGQINkWvvsqQcoC9rADuEV+HWJE4DUIoeyTPtGtkTjteGw8jHxEGUQQt6dUWISBIxLtIklHofWhHY0IRIYG4az+s7zWAIJA4lZkJTc+BVZA0USkpCYEkHUIvoA0KjEo5EphRURV5mVLyPpbMzO/lriyhRe08AGrhtkuQZVnkxMwC2whSa2UMiEGwRMbYKGwEQSRItNbVofNd37dt7pzmP40xWnugYPqg81QIt0uD24SMon++awNHFRNCHUnLEMPR0VH0PrTNqMizzK7X64p4Oh13rRJNlPEgKCiCIknb5ufjsXNuuVwu1qubRUop2XLW9T5zziK2bRv7jp2TxIagLLKU/M3lcnZweHb3XpyMLyH6zRJ2BCsRSaxMg+BjIMKUUtc1uudYYkgJowEABUKMMcAcUup7b4siioA+ZtAAUoC5770GfiICZDSGIXFdCImFAXXbJUSFVS2RJpcAUBCZCKwlayfl4XK5XK5XU5xOp9OsyNfLVdM0jQ99H8hs8cZk0JAg4ng8fnD/3uOHj6wzby/fzOc3ItLXG4MgCIK8WteBU1mMQorL5VILtZnBTfLJdGrJaS+jHMkSGQTa9uDdTYNNECP7PvZdjImNNQyUmIDSt3xIeR9cUZFTJiMisu/3fdFBPtOuB8zwOe9SqSqE9K26LVQhlL2ErQgIJBFEMQSoHWkIdKC9oqJskZzVMd0ppe3An+iDM9uaJo4xRp9Zt20nxYKonTc027ntiZhlGQlxTKLde0CYBVlEwPKWhC1qKBInffdms6mqKsvyzvcxBCIS5q5tpzGVWZ65LMbku9b76EPsvC+KKiND1qbgu6ZFQwbQIkUkLTrue51jTDtmzDbrqvxj2SF+ZW4cGBbxiX1sDdoiyybF1FkKoS+yPEbfNDw6mCCi9z4mgpSIyBp0zjhnsyyzBvM8P5xNYozz+fzt22uRdHp6enh4+PPfPs1yW5aj8biajMfJ98JRUry9ZkvALM5SDL6v10cH43/24z/+T3/3m6qq8sxpqts5h4Rd15I1b99etb4vy5xBdDjUaDSCd8OPrSBFQAY0LgM048mImVf1RuuyAaHpehKnII1WxO58LY4xWpMxKTEqL8ttr84UkYwBgKLMTo6OFcxwed40tTEGskKT8syszXK0brMsy2dPvzLGTEbjFy9eHB0clmXxL//lv3Rk/tNf/V/GmO9+8slicfuLX/zi7OyOdh5JSZq28zFMDw/OTs+/+PnPsix7/OjJZ599dnxwuFwudd9nFlMK3ps8c87aFCMz+z72PgHA9e18uawnk6MmyGq1mc6O27CQPZIK7UYwDCJEuyrHbVlcfEdAlx2cM+xYld6dD6LMUkfWZFlm8wwQvfdNvW7aBnNnEZx6IhFjjAbRWScMW4OlwpkSKCMlRBFBFsZtAE+AlozvWo5hQMskBREmsmQQmEhAKGk0qMQag7Sp6+Vi8ejJY20lUde1mkoAYSIRtrtoEAC0/eY2SwNaIBeTA0BjAMABOGNOp+PpdJqVRdf5xZI22GSElXNgbIicUpIQJQuIGUhKHBjfURBU9WxVEG51vArhAN4k30cWhm1HdMaoEK4iaXnhCKzNyTkDBMCQZYX+NTFDRCJOiRHN3bv3r64uL169atv6yYcfTqfTzXr9+vUbk7nlYlUVpUi5WCxKZ8ej8vrt5ePHjxeLRXc97/tWhyWdnZnDw6NPP/7wzZs3r14+Ozk5+eTjJ6/fXNzc3Nx/9PDizRu0iEGSsHG2Go9CSmLICDGzziVDQ1u/SMtHgldhE9z2ZcuKHMM2JNaqU9qWhPY2c1mWqTSKSOTkDNnM+eBB8OTkJPownk6Wy+XBdKLEmsy5osySD13XpBQz58aj0fV6vVgstMckIjZNM5vN7tw5/7f/9t+G0H/51ddNuymK4sWLZ1VV/It/8c/rTRhNJ3meb7ouAnDXtk33+s3FRx995H2s2+bnP//5wcHB0exgPBk55wLXzlhCJEFh5iTIW1b6el2v12sfxIKklHxIre/fG3G4J10Dc1r2yJy0BcphL1x6j2/9LfO4r82l70TNY0rGmBACEFoSQ6RET2vRWpti0GopAgbgpKMBmELotaaGiBBQXRTQ4lGl+CBqfIuIhMDMkFhprjo8BhABIYRQ5oX3vsxyBFbGUt+1LssQwZDANnggFBbeVnMYInDWFVnuuy7FkDtXWBdjNGRGeXZcFAcH07IsV5s69R1EF41jIgbs+tCGKJyi95ZFOGFkcPsJUmEZOjRu6020cxHvampC9FFYkNTLkJiYEjnWTGNRFMagdWgzl4iBPVGmFBZmFmHEhBhE+KuvnlZVcXx6upzb5XLVtt1oVD14+LDY9KOPPmrWm03dnpycBN8tl8vDw+NN07RNj9YcHBwpivv27VViOTs76bpG5yePqqIsMuZYVdXp6anJ3PXtTd02ITIYQoS+D9mu1dU2ttnB630MUTgrcpdnGEk7RGZloelWJDF2W9KlQC4ROWeIHDN3fZOYRMQ5V4zGOpo3xqjjeqy1y/V6Mpls1svlqrEIVVWNR2XbtvP5zXaydNMeHBxs1kvvuz/6/PNPPvpouVy+vbxoNquPv/PJ8eHB1dVVu6lD9F8/e+6cK8pyNJ2cn58fnRwbl4cU58tllmWZDskqysJl683m9ubmk+88BmBOECFK4r5pvdeaDFiu1pvNRkwFAFE4CseUjPmvVE4O0jXAE0Og6Oy7+knYA2D2iemDWCqWo50afYqsWWVnMudSHwGQiAwYEmtSwl37Q9y6hTt5VlKbgDquQ05/QDRo+1h3k60JcVtilkREYRuRXXFCiOPDg/V6rbig9x3H0PftQKHVwVAW2SFHEBIRBDRgCOBoOruNIXCqbGYM9iKGqDTOSjTRYyDLMUeIBiMBI4J1zpALzidOwftdh4XB+x9+7uszRER493u9T2RBw4Ysx5g47vSNNnsnY4CMIYcMhBG3DwuRyOqixSQisayqEMJ6vVpvNpnNRqORMabr+izLvvrq6fHBzFr7xRc/m00n9++cP3vxLPTeWjsaTfKi6PvQ+dVytal7f36Gp2dHH3/y5Pmrl7/5za+q8ejo+ODq6vLo5BitScKd77vWW8qMMUmYZNvHdptU2AlhTFvKi7ZP7/s+gZTWtrFGRBRKIfodO8Qa1M4sAEwoBncfxDw7nHVdN5/PtRxsPB5772+vr/K7513XNetVkbuqzEGga5vlYk7FdFxW9WYTQ+i6bjaZ/OAHPzg7OfmnL34aQg9Em83KOSpHJRlcN/WP/uRP1+v1arNumu5Xv/rNum0EIc/z73z66e3tbd/3uXNnJ6d37tw5PT09Pz+X5GOMiZOghBCaTdM0nfeRWbTlkSscIDKzMegyk+I7IRxEbvBC6Q9my+n+GcTyWwL8h0IYY1TMTwOo4UOMMdYaa8GqwVO6LIu1dnDWAEAjQ2TZVpnv1/gmrdwHRNEWHCJiCEXIAMYdOgrIoIkGFkRABGvNcj4HlKoqueEMnettSkpAN4RgrXUppaD9a4UEOKVEKFVR9nnRM5TOWjIUGQUyQG5rT8i+jyFYiaUlz+JZMmOKLK8A2t43bd+FiIjWZmk3RWgLKMN7oqgrtL+y2VBkbLSrR9RhD9p+N6WECMYZIiJIiBhjQkTnMiI0qN4fg8jp6fF8frtaLazNxtPxaDxeL5dv3rx5dnnz8OHDzWbTtfWd+/fY+5cvX84Ojha38/F4bG1WN33XdSbLKaMY4/X88v69h5999r0udL/+9W8fPnp0en7+1dOnaKgcjU5OTgRhvaoZQS8mttsGWZr4gl2ePQrrMx7kE0ALoOKOrxxSAmN1RNmYSIOfRISjUaXNZrQWwlrquu7u3fPr6+vj48N1V1tLF69fl5k7OT4kQt93m3UnMR1OJ1ervigK7z2HOB2NP//8s/Pz85fPn4vIZDKJyS+Xi7przk6OjDXz5XJx82VIsSgKk2e9X61Wm/O7Z9/77LOrqysdv3P3/Pzk7DTPc2PMaDRq1qsEzJxEMPnUdb7v+5TE+xhCIGvzPI8cIkfjLFmM4V1ufZClQVT2LR7sWOzv9PUfvEv+4AghoCFjjDNkt9Are+8tvPuW3Wdtn8XWZDHT3hB49T4HydxOQBZJuzJrEQEZulduc/RqUrd1ppiYJcsLSaHrm75tjg6mQOJTJILOt7oNLJEV3BYeASECYhIB4SgQU06WXGYEjUBBhgBsEkoeghEQEsgBjLOGxbAAgXMWXaaFEa7tAydEQmTaNTnfFzbeVZQleOeKEJElDJQUUVZo2FpblaVzjhBTCsZYYzHLMuCACJo32nUsRxRhjszx+vp6PB4/+fDjxfxmPp+/fv3aew9kHz165L3/+MMPJ+Pq+bOno+Pj6WTy9PdfHhwdTyYTYfC8Ikbn8igc6hYxzRdXjz/8+NNPv/P6zcWmXk26KQDU9Xo0mZweH00mk+ub+XK53LRtSh609MkaIiAC1p6vMRoEMATIMXoR2TU4ClohSqTwOGZZNhqVZVl67/u+875PKWW5LcpMRBKH1WpljHEuOzk5+fJ3v5mORwbw/8fafwVJlqXpgdj/n3OudC1D60gtK0tXV1frnumeQcNmsRhgbWcNtnhZmHEfCBiMLyQf8cAXgHzh8oGEGWwAcgcYLGZmZ3pqplpWl+ysqqzUkZkRGTrCPVyrK4/gw3H39IysbgIkr4WFRXi4X79+4/znV9//fdlsttNuUoaUEkbRQMOgikexEIIRIuKYKHBc5/Kli9evXgs8b2Njw7SYTzGXy8zOz/mDfhBFhGp2Quz3+z1vUCyVSqWSm0path1F/JVXXjk4ONBCHf1+H6TKZDLT09P7QSxjKQiCEqCIrosiEk10QBnCkF5IMNPUn/2URelHdG9wPGI6dk1jB3jKPeoawfhP43QGxvPTbCipHUdxFIcWRQpKIJEg6XClMYNQQlAIDSuVGjZGlMZmiDF2kqKuFaGUOIhjwzC0eKoc4UuHmwVwlDqJAqWEjkhNk4Wh7zhWGPn5fDaIA8KJ7bD+gOJIt4TFeqoa1Gg2EkEpVGowGICUBhDgAlAZenhSKAPBZtS0TA0NUUJalDJGQiERpEHRchKu63pB2B/4YRgOSVieDSKOlCWfL9GMjRBAaLAoGKBvgWmarmtreigdVziO4ziOisaOdDj8MYw+pFBKuk4iDEPPG/iep4UQe72O53nJjDs9PR0EQafdTCQSDEmtVqOm5Vq2BARC3EQKjTgK4yiKJKBpms1OO9k4KU+VX3nlxubWdrvdpmyYfhBA23IdJ+j1+iCezTExypg5FAzR4OVxkyOOIsqYZVkaDG0aVEqpJDcMw3Vd17W1k4mjEJRSkmuGIgIolQApGDN5GNkZBxEtyxKSI6pBvzc7Xfb6/TDwJBLbMU2k/qDf7XZNlh0MBulkanZ2dmVlBRGfPNkElIVC7uSk0u1iOpPqewMp+dTU1NT09KDBAGm91Ww0Wplc2jRNz/Pu3btXq9UMgxYKhdLUVC6TYYQGQbC7t82kCUBAciSoFAiuhBBAqQClhhAKLoSyLIuYpucHDJ+TKz0VWI7tc2xg/Hlpt0lXeeoMurbnOE7E9Qy6bqRRSankE20MkCglGXfnCSGj9s/IrSnQ/Ktk+MNkVzOOY0YoMJCjUWOdBTJCNY3NMCIdKdVoUuDpuVkh4nQmeXyilIiTmbTWltRnYJrbYIxYRaQAAhH8/sBilKJSsRAomWEwJFJKXf13HcsLIi+UQnBC0KBmzCPFBUhlMiNp26Yd69hAied2tbF2I6gRqQE8SxoJIYJHQojx1BchRGO+dTNNt1y0pmwkY333uRBSoi70AyiiZUlTjPNI4zZt29Tk0I7jSGY2Go3I98LAAyXmpmcymUwQBG4q2W51+p6PQCMhB56vgCSSSaH6cRydnJwUisULFy54fvjw4UMBKITo9XoAhJkGj2JCiG3bjuOEg6Gco2EYSMmIaIREUURHlESUUk3vGYYhIZpBTHNGWslkEhG5iA3DkJJzbgkhGCOUIqWGlDKVyB3t7wnhDvrdbDZLKY2i8ODgYKpUSCQSBiOB12/WG74/EFGsQColAs8vFmZLxSIhZHt7+8mTJ+tnVjX4wTCZrtZyKSIeD3zPdacvXbrU972Do8OB7znJRKFYFKCYaSSTrms7mog9k0pblqH3PsGVlEAVSCkF53Eck2cUjMM6p+VYwMxOt0ufzUM/d5ARgh8ngLIAwONo8mmT/nDCbGDcZ04kEuB7Y4C7/l+A5AaB4TYohBACheCgkCiDWc9dhgLdzbdMc7g+1dDxDucNxHPjhUSBntMnhIAiUmoU57MrJCA5jwqFnN40OedRFDI2HNceRkNggGEYCFKKKOQxRbANajBK0ANEStBMAiqJyhM8lko6juPYaFlGECsujTBmiC5jqUHkZdL5dD4nQAipyqVSKpkJw0fFLGk0Wopzkxp6rsIwzCgIkSAIqUBSRgjKOI6E4AAykozZyVhBJJAL5ftxwo8JtSSSMBIGEgIUJPJQEMR0OjNoj/lFqByOnQAimlai2eq46aykuL3ztBjmC6X8Xu0AY5pIJLKZDGazeuqq1R0MwvCzn/40m0rNzc0pEMsL0wcHB77v9Zq1lDmdYSXRj092TtbWV65fOCfDwaNHj1yWQNELej6CEQlJubARFChuYq1WLRaLCpVBTcMy6s2Gm0qGPK4326Wp8szslO+HQRQRYgAxJaURjyWQGNVhrXZ1ZqbTagSeBwAEoVTInZycpJNJilAsFsMw7HfYfHEBQA5aPQDZjTxC5OrakuWaALLrd4hrd1oNK5Hsx23GLOTe8tqcZVnpbOLoYD+KIsdyMMbNp09npqeZYvWDtm2n1ubOhmG493DPnBLJKKmUAgsybrZcLudyOYL4k/d+lnCcbDY7Pz/v5u36cdM0zbm5OcP0wshLJ5M7OzuO5QouAahrpsNwAMLkgW8Y1KZG6EUCedKyMAp0vYoorXqiuJJKSlBKU8XHnKNWzlMyiiKLTjAbAZCJFjyA5GJoopQhZQwAvD4KaTJCJUoAJRQoUMSgIoqEjONIMURLMgMojSRR0BFd0zQN05RSxoorVJGIwjBMpVL6TRGRoF50SlJUxI4NhpRQpJKgjoYIxZ7gqAAIEUopgaAkBSSIQRiFlAnLvPd0q+N1uypyStmuCjiNJEoBsULBlAAkChUSZIxRg5GEZTkGIVIYCqgWu5QghUCJKDGKRSwk4TLkIop4LMEmjBpGrpB3k0nbtoGCk3AWFuYME4ul/Kdbt30/9DxPSR0wEFTPxqhhJF2hYRYAkiqqw1ZtS2QELxzVdSiORxPRBUrEcFRcMcYIVTxWSilK6WAwqFardsKemi5Kudjvd9utlmO76VwmCILBYKCU0nt5s944qVWuX7+eTiRs2z46Omq1Wp7nMcay2WzUE4VCYTDo1ev1bC49uzA9MzOzvb2tlDIYSyRTBnMGQTiAgFIjkUxS0xiqiIVcckG0MoHjMsa8QaCENE0zCKJBr0cpNSjV8xPT09PddkepeH9nlzIUMUeEhGsrpISQbrebSaU1JYxUaNu2aTJAPhj0giCwLEop7XQ6ALLf71NK41gQEmt/4vf9qamp9fX1brdrMqNWq6XTaSnlwsKCVo9Lp9PNZvOjjz4ql8uXL1++tbMlpdQBfxRFR0dHnXY7mUx+7e03DWLoRlEcx5zHpmkAqFQq5TiONwhs2263O4yxQbOF1NI54RALBUIIEKiEksZwxEfXu4c4ax0L6aoaGfYIhkUsJb8iIXyusPf8EfNQSikUBwqIlBCChIGCKI4kgAQUSsZKEY20BkUNkxomZYyMY11CFRI55NNHDbIBAAlKapQSoNDsUWQ444IgRcyJFjgBlFr1VaFSGIahYVuI9Lhy1Op2qEGYZXQa3Vw+g6h0h44RhShAZx2IaAAzgBpg2BZBwYFTGYeCc8WFkkoJqVwWxahQBCEPhQA0kVFmmsx2mGmYtl2aKi6vLK2tLXERJRLORn2n3/dEFEeR5rEbMl8ITRen2zI4zq2FpsyTSkqgSIY8RRoIrltq2okDAGPMMRyZJDGP4jhGNBCJUpFSSCnd3NxcWFhYXl7sDrrHx8dRxA3DSDjJhw8fzszMzExPR1G0u7t9cnKScNy1tbV6vR75PiGk0WiUivl0Og0AWmAUUWmS/Hq9nswk0+nMxYsXP//yFqEskfJcJ+lHca/bt22XGYZFiRJy4PmaYdEwTQLACHVMK+m4ANButnq9HiqwDDPiscGsOI6z2Wy33bFtt9frJZPJOIp0wc515fTUbKfTieP45KSulEo4icFg0G4HhomWZSSTySjyarVaOp3EkTSdaWr4LjJmFtKF8+fPr62tPXnyRA9/6c5hrVZLJBJWKkUpLZfLmUym22p/+OGH2ZXFRCJhGUYYhqZpJnNpg7Eoio6Pj9OJpGVZmVS2XC5rNcwgCAKLaeYLABgMBvl8gdbbBGkQRVIpRKqGfmz4X4bni5y6SgkAZGJGaSxnT5GMZzImLfC3GKFSQik54vBHpYYrmzJGQKEY9hC4BKQKFYQ8VERx9SwnFIoIoCIetQFHLBhcgBAqVoRzGcnYZGpYVAPJJUpigC7voAQKAFSTOiqFUgIgNQyLIOMxD4Mojnm301cwZGpkSdNWSikhCSiGYFFqokEVsZmlFAqpdzBUiqBCQhiyRCRp4Md+LKnhWGbCTqUN2wJGy9PTFy5dnJ2dNh2mEAaeF/GYkSFoeHjrhwPBkiGJR4mxbuzAqMxFCEopJShKh0odQ5E2BZLKOI6jMGZRxKiBjCYd2+uRiIBuJCJIQMIYOXdmPZvPBUHQajTS6XS5XGy320+fbv7gBz84OTlpt1pKqWKx6DhO4Pn9fp8QMjc3p2El3W5bCKEzTw/CZrOplHBd1/f9jY2NQin/+uuvH59Um612v98HRYhhas1TAGBIMslUwIyE7QAQXS2zTavT6+ZzGaWwWasLIc6cOZNMph8/fpzO57qdTuhHOo20UmkAKTifmZlq1Oq+H87PzLdanV7fC8MwnUoN5MDz+mHom5wZRtJ2TCFCz4+DIEA9jKbQdV3TNB0nQSlN2YlCoVCv103TrFaruVyu2WzqLcx13W6v19rdXVlZef3116MounXrllMobGxsDAaD5eXlYrHY67ZN05yamrp47vxgMDg+Pq7VqtlsemqqpP8v3W5Nc4L1BgNEFAqR0SAKIy4kEsKQUoODIkgQqe6gDf/XI0scVwSelekmquV6avZUGea3HIxIQRQqlKik5FIolIhKSlBEylAoTRLMpGQKKWLPC41YjqeohsWz+Jm6DpWoWfc5V5xLrlApCSAYU6aJBtVeIR7SAYNSQkmJAEAZUARBoNfqxDGfm11wEqmtvad+GCWT6YHf1+UkUIo51FBKEQYGoZbJEqbpGMzSwH6FwgKBRHGToDIoEkBFjTCGMOaxok4ykcwUrERaUQMYXVhaPHv+HGO48ejB4eFuFAdRFHmep5mFUI8x6lm4KNbJLoJUBJXmvudCFxKUrtngEFWolAqCQHAFQ2VEzfYtFCMI0mJoWsyKKUUQSr8LJYTMzs42Ws1er0eQ2YbV7w84l/Pzi/V6vdvt6mE/3x9wzpNuolgsZjIZxzQPDw+bzSajWCqV9DRWKpXs9/tRFCTTSQDodDq2a9m2c+XK1Y1Hj/YPj+I4zqTSqWRaSpCKM8B8OhMlEoSQKIq6nb4fBq1+3TQtygwASDquk3BXV1Yopft7O8lEQgoRhmEqlRl0e+l8odfrJRKJ2dl5iqzbbXf7g06nWyoVCwVLSlk9qudyuUIhF8WB5/XDyLdtpgNmXR7QwNQo4oaBSqluv1c5qQaeH0VRo9HQaFLXdeeXFuMgjKIomUrpcDSfz8/OztYD7/z58/1+v9vt9ru95eVlx3H6ne7BwV46nZ6bnXYcZ25+xrKN6nHl5ORkupQMw9CyjV5vwEyzXq+3O71u3yeGLaRCyhRBJbQPHJU0dLCmRwdx7P1QKQVCAR3NhIIidEhCObZA9Tyn44uHVLEeNwcEBVKzAgLBOJKcc+CCcE6FYqCYBnYKZSiFOCY71/YGenwPh6OquqKmhADK2BBfDcCJpIJIqYcohhAcJaRUHACYZJSiohDGXCpkpmOYvpIoOCogrpMcMosrxUTgEUIMZjgGS9pmyrFtw2REqShCYiqCymAUkFE0mUEpPWkNhJIcKDJqOik7mWG2LQGtRPK4Wvnww1/Fkh8e7XZbTSHjMAy6YXc42zacJgHFh3PlUghERQjgsN4rpRIMNWuY0HUmHbjqMyhiUkoZM7UlA0cEajCwLENKHkcKhKQUGaOUkP39/WQyOT8/32zWd3arOu9ijP3VX/w555xRmkqlUqkUIoqY1+t1IUQcBJq0f252emZmplqt9vv9lGsDyFYrHAwG6Uwym80KoR4/3rQtN5XKWFYjjngQBGiTOI6DKFRh7Lou59yyXRAyDn0Z8X63Vy47rU7HsqxyMZ/JFWzTEkKkkynGWNJxlVKmabYbTSEUj+JEIvHk8VYi6czMzPm+n87mL1+9ls/nNx486LXDZNK1bIP3AkTUPX3PCzXjE2PM933DsMIwAIVCiFKhYBhGebl89+7dVCrVaDSKxaJtWu12GwByudzy8nKn09nf26tWq8VicYDq6tWr8/Pzjzce6QAkDv1K9SiXy6VTCUTa63X2duJkMplMJq9cuVSr7GoYXRCFScPs9gfdwaDVG1gOBBEnBuFCxlxIQNDyZlrFTvP8g8JRf5yMBiMoosRhFYA9rzM1jpV+i1eMooAQApSgREUVIZQxhoxKKSSlCrkEImE4fScJAQJSgyU0WQ4xAEAB0+y4AECkolJbPpWAgpgaECIQASjXieuQj0+3yJRUDBG1oAKAzKRzyWQ6CKKe5xumQw0SRLqDioiKIjCXAaKymHIYWKhQxEJEQgkDEQQHJQxUjFDLoJbBGGPESlBEpjBWwIH6YUgUcFCDOKw+qnqhZ1qUMWIZKATvdDoh5XEYCc6RGdrudRnGGOm/0RGOnlKKSmMH9C0XiAgoBedaxckGUK6rgy4hpIwFKkkFt0ytpTEQIjaZyUyGqNKZrFJKUzxlMpmlpaUg8H/yk590Op2LFy+ur615nre19aTZbKYSyWIp7/v+/MxMLpcDgNpJ5eDgoN/v53K5fqNvmMx2LM/3hRDUMAI/Ojw8nJqZKZenTctpNpteGAEq0zIIowyoyYw4jFAKg7JUIpkoJ3K5XBiGWgZIciF5xJCk08nFuflBLGr9gWb+z+VyjFDTNMMwbLfbQmQXF5Yty6OUJhKpmZm5Wq0x6Aau63IRSSkt23AcKwz9TqeTz2flkFQfDcMIg4gxphSm0+np6elkMqkZbNPptGVZvX5f92l1hJJIJM6dP4+IjLHPH9z/8V/+ValUunjxvBDi1uc3oyhaXFy0TSubzZRKJUqI7td3um09AMl5NPA9wzD8KFaUeWFEmOnHsQREMuQyH/sxAc/q9/C8Hqic1KUR+tUAEx35ye9kQpJl8lAyVshGAS4qpSQCQ2SWJWPOla6pSKEAKdUCMYpSqRTX0qiEDQ3SpHLEXCiR0hE5Oh+GyQYiSpAEkDKDUiJijogU1RDshgoJRUoJCKRATcsGyxr0FRKuAAAHXoCIlAAhwKaKGaUERa3MLhQP/SiWUegYDJSkSkokwAgFk2CMwAwnb5qmAtIPwiCKgl4fiKcQeoO+H/mOYxNC+v1uX3LbthzH6QUdOTpASAUElAKldEeIETQMhqhAClRKCFRiSESiS9aIKJWK43gwGCSobqkpIYTkClCCJIxxy7IAjL6SGkVhECoActn0/uGBZeeXFhcPDvaOj4/q9fr+/v7Zs2fjOL5582a326UU8/l8KpHknF+5cqXbaj19+tR1XdsydD+g2+026r1sNmuaJhIihGg2mwohnU43m+2pmelsLmea5u7BYRRFlmU5ju0q0zGtdrs9XC5SJpNJwzCazXa5WBRCdFttKeW1K1eXlpYYxf1K7WBvl4AKgmBxcREAENXR0dHly5drtZrOmqNYPHq8mcsXi6Wpva3ddCZpmowQ1em2dIMxl8sNBv04jhkz4zjW+RZjplJxrdFYjqKjzc3BYNBoNC5evFirnuzv7y8uLvba3TAMbdsOw7BWq8VxnE6nOedLSwvFYpFSOj87e+XSpXa7tb+7l0i4fjA4Pgp1n1aJGKRAZboJJxa8XTmxE26t3kJCewPfspNSoSTUZAYhDKlEBIKUiOcsatKhqdEBz0oDZPI5OGJAkxOq0i8ejDHCKBAiQQkllFCKDxuJMZcR19xiQBANpQhRAFQoopSKJQKAQRARhURm2Aq51AyLGotECAB4XmAySoEioFJAkCAalNHQj4aDxEM/D5QqCsoistvs9gf+3OJCpMTG5uMwjrP5DGMGDikMgQne15+RAJGSICgETgj3A89mVF+mUkwqFfpBL4p61E5nbSDoBYEXBMyxlCADvz8IfEQQMmIeEAIGIyMAK2o1L4bEdMwo4iKWOpWnSAijACCiOAg0wZH+B3DGGIDyfZ8ApQpCHu7v71vz88V0OooiwtA2HUQS8ZiYpNvtSylt2zUNNxZSCMGY2W6352fnfH9wfHxICPZ7nWajdunieTeRiOM44bq+77uuvba21m62tp4+yWaztmHMzMxowLuu3ff7/cXFeanH5qWKAiElSFDtdncQ+IZpnz1/LpFI9bzBo0ePUqnU+sKZ6tbhYNBjmgQIFIA6PNiTCqempnZ399fPnjk+Pl5bXX/48OHCwoKUUshYKh5GPmPG/v5+JpPRsOzj42POuU7VBoOB53m9Xu+f//N//sufvLdkLezubvf7Pce1bJvZjqVAmibzPE9Hhr1ejxDGOU+lMlJGWkjQtO2FhYUoiphpaEbmmbnZdDJ1dHTU6/XKhWKhULBte+n8mVKpdHBwEHjezs5Ov9+bmZpOpRPJlJvPZpRStm2LOAy5KBbzhJBBv6fp83jX84OIS4yFpABomI5BFRkKJAohIhEJKU3TmbQ3HOG2fc8zDMO0rCHIUbudOB4r4E6+amyKMFHXGXoqGUMUSyBAEDSXklScc6mQS8GlUkjBAIJMAAgA27SH56cAABwIJZQyM+ZcEcaFxqYRykwuZRiGhDFgTGgpQUREGkkVDnykLIpjgso0TQTgYaBAUmYEQYiUmo49NT0tQCTTWRfEIBjosTVKiUGRKRlLKWPJoxBMxgxKTEKpRTlSQslI4UVKyaWUkYgCwc0oBIJ+FPhhQCSXSvW9PuccGBhEa8eAjFUUAgXkJJKcy2FSCGPAge7SCCEoDrkJdBkbUQ/djxJxEEoRKdWYN0mNaD8oEEpZMPCEEEoCUN2QUswwGSPBoI+oDMNIJtyB35eSMwaIYBjG5ubm3OzsjRs3Wq1Gr9dDxGKx2Gw2s6lUGIbFYpFRTCQSnU4HEU2Dca61uJg0QICSgvNY5LL5/f39ge+trKwsLi56ntdsNg8P9w0cj9gIBQpRK5+SSqVy5sza1tOnP/rRjx4/fpxMJra2tmq1mmUaBiWuYzuO0+v1gtAjhOQL2UqlQik1LdbpdXd3d3//93//5s2bf/Pe37766suEEMsyFxbPISohYs8f6MjTshylAh6LKI4BYkoNznkUBc1m0/f9ZrNpmmbSdZVSGuVTKBSWl5fn5ub0Teh2u41WKyYKpMym05zzRMKdLpdPahXP86LAV2I+m0unk66UVq1Wq9dr3W53ZnoaEahhIKGU0kgoRRk1TDQsociQmkGjkyklE9DQsQPUKZ+eKZ9M/+AU0P/5Y9IIJz0qIQxGAEl4xmzKQElESoimUBzOQehdQPvlIfGPlFwIHZnrkwz/NPLdzBjOfEqpWw+EAlFIheJKKc15QBCRGQpAIjFMI/a5pro0HduwzFjGFtoAGu0qpFRsjKlVUnHOlQCgjFGUSlGtdqPZhygBBQoJECVBaAAgoNDEmjEPCSEU0WQGMwhFApKLmMeCBxgM+7aoGIHhdMioUUFAciAaDQRSaSIPADKsjqHSA1YAONItkXHEKWNxLMCgiiACoYQCAaUw4jHnQg+CZjKZMAqoYyRsJ4r7yZSTzizZtj0YiK2trSgMv/71rxsG9X3f6w8454Zh9Pt9RIzjOOGm6vV6u90Ow9DKOUJwIUbcwcqIueCc7+zszC3Mu26iVqt5oee6NkC27w1EGFFKkTIFoKRSiiM1CWIQBPVmo1jMNxoNpPTe7dvdbjeZTF44t/Z0+8lg0A9Cv9frcc5jLmzbbrfbqVSKC5VMJl3XtizDde1bd27/j//4v//www8qlcrA6/X7XUIgl88iomHYmsswDKIgCKQEBIMSw06wMI78cCjNGUQRATBNs9VqPXr0qFarpVIpRiilNJ1OT01NPdne0swgX355Swl5/aWr8/PzP/zd3zmpVWzbDj2v3WkIIXgcJpOuaWQ8r0+ZCbpkLZQXRlKBQMIIlRI5KC64lFKNJC75hLo1Uc/MTJNZDJe6HOq9kd9skzqbnTS/oVFRJqXU8w0AgIoQoIQQPxa6+KdGMaN+iRiRf8oJqpvJY7zp40g3e/wqgKHyCKAUXOgWHIwmpzTdkUTlR0G31wvjiDEGFPTcj+bOAK6k5CyMhGEYJjN0uVLDMogCRjAmhChAkCbVVV/KlZIghIwVokIJFAgqQE5QUAKMMtOgtmEySkUUBrGQQgY8EEIqISUCl8NcdNiNFVLvHkqOlcqVkEIqLpDoyS39yQmgazsEWRRFvu8jReYYRCghOAMNmDaVUiYFzbloW9bZc2d2d7f10HQYeqDidCrtum42m3rnnXdMw6hUKo8fbyilVpdXXnvttc3NzXajkUgkKpVKMuHs7e3phTXwepxzHksglKAWBkEhVD5fVAqPj4/jOEplU/l8ljJstOoYEwMMgyAo1Kp0RApCjYWl+cPD4z/8b/7h559/ns8VT05OYsG//vWvnz23fuvLz5v1EyFEIpUqlRYHg0Gz1SEEgygYDLz1M6ulmdLP3//l9PR0q9X4kz/5nyuVyrnzZ1KphO8Pjo6OXNd98uTJ6uoqY4wSRqnmQFSIGEUxs4aex0m4tmkFQeAFgSPEK6+8cnh4WKvVTNNUplk5qZaLpXQ6ffnyZc/zlJCvvfJqIpEAlLdvffHZzY+vXbtWzOUMk+bzWQLo+d1Wu1av1wv5KcO0FbJuv+eHQa8fhLEwhRJccAlCgQaO6dE+JEoOSXcBETWSWFsL0xetFIjhikdEVCAmFDjGdqi+ijNbP6LtV3N3a4dJlAKBMuaSc8G5EAIpIYoQZADDnVI+ryc3aYSjuGZonFJwRERQhCAAUgJIlJKAcsjPj6BluVApikoSShRAEPmRiAyLUUo5j2CksSRkLLlgUUyRGIyaoBSPleCUAiOEcsGlQJAKJMRUCV2lFdgXXc4jRVQQhZxzhcilUErEMQcpBEFJUCmQXIlQhH4kkOs6gZQSUBqUoa53CSm0rLEiI1C3ktobAwhUcjh7hwSBAEkmk4QQ3/cpAjNZcliwIoNWn1JqWJJSQylkyBg1GWMbDx7uH+5ZNi2WcgpixkABD8Lep59+kU6nGaXb29vJZDKbzSql2u22lJIx1u/3bdv2PE8Py+ZyubAfyBEp+LCfKaWUkEtnKyfHCuXMzLRS6uHDh4ByemaqXekhIh/q/gmlRzYJC+KIWWx/fz+ZSu0d7Nqu9fTp05WVlf297W63FQQDzsXUVOnCxTOBH20+3TJMWq2dKCUT6SSltNtruwkbKXEcZ3p6+vr16z/96XutVqPX73734nd93x8MBjpYIIS4rkuJQSkDgEajoSsKtm2PQcOxEIyxqampZDK5srKiyVpiwR9vPtHBOedxPp8HlNXjyrlz54qlfDAYKBCDgWdQYlrMMIxk0nUc6/CgZpi25ST7/X7EhR8GElAoFccCCAUAwhgq1E1UNam1OKxfIpkcMR1GeaAFTwGfCzsnQ9Nx2eaUwWgNU83YggIkAckVEI4KUA19rJIKQCmqzQYB9GzueIBDW5kkCJSgQD1NKEEBKBkGoZZIYhQJISbVjG0xGYrpAUihkV5aaMO0DTdhIyLnUcJJGhaTIAgQAVKBkCiBKMYFgwjjiHPORRQzStLJpJNIRH6AACKKYxEQgVIRooBzCNGTKlaIseAAEhklqAyGQkgCSnIuCRcERcxFHIuQc8Y1q6KGnSmiEIGo4UATQQV0dCul0qTjCoiiw5heKaEm/kmc8yjiccx1OZ4xSixHSskjFcogiiKpeBzHUvHDw4MgHhQKWSlTrmvH3Ds5OfKDQavVtm17EEWI+PLLLxcKhY8//OiTTz5ZXV0tFAp7e3tra2sn1eO5uTltpchBCCm4EgoEf5bPHBwc2K6VL+YYY4eVw4OD/WTKzeQy6XQ6DMPBYBBEoZSSMUNTJtdqNcMy//LH/+v5cxcrtZNOv7d6ZrXZae7t7SDIYiHXbLfa7ValchzHvF4/iaK41+tkMrlOpwVAvv+7379///65c+dq+5V0JqnBPQDged7+/v6DB/ey2TwhRArNo8mIOUyNQh4TLReNmEgk3GTCcRzXdZ/u7MzNzCilarWabdvpdLpUKmlITRiGT548VkrNzk2bJut0W1tPHuXzuUIx51imSgshVLfbFnHUaDQALSGBWk4QRZRSjZhRoAMZQgihBJFQEEQqopTy4yHdPZKRsqAa5nhCKzMoRYeGqTEbz6FGxynlqdrp2FtybYFAQCuHSVRcKSJMw0A9ykS4ZgDSuFXDGColD7dZeKYGqcc8KdUAHjFCTMYGQYKMIBAikRCQCpUyhkBzIblUCITBqOihCEMv9Kq1k6yMhgxGFA2DAiqiCEFgng+MAVEqigSPhcXANNF2DMMyQUglIn2XlGICQCEjZCAlF6CU5Eh1JkgoNVCaSkoKKIUQAiXnPBIy5gKHeFGpZwjp0OUN4+whTzdSQKGUlEPeSDWBYhNKcAljympEDMPQ9307mbBMo5CfGgwG/X4/9KMwDgDAMDjnfGFhznaY6TDLogp4t6d6/U4YBr/zO7/TaDSajcZgMNjY2BBCDHr9tbW1MAz1jFmz2RRC+L6vUeaMMUqVoDLmIAVXEglhpgnEYKZltZqd/qBjOua582fa7davf/3rly69HMVcIWgaC4mgZckyubxpmgpxEPjpdJoQ8s43v9FsNs+eXUdU/X5/Z2+3Vmvs7++GYdxsNcIgSqUSs7PTlUrFNM2FpXnDMFKpVI3v1uvBxsYD0zSz2Wwi6Uop8/m8lGBZFgLxvCAIAh5L23YYG85Yj1vPhmEAVQCQTqeTyWSn0xn4XqfTUUotLi5qnHc2mzVNo9vtHh8eSSmYwRYXF5aWFwiBZqNWq9U0L2CpVMrn81tPD3SoJoSghgkAgJQrSRGlUhIUKgSlKCGUGADgcT6emyNIYFSCGyL4R434ob1JOZJrPd2dHz+iRlNy+lepNO0ZQUQKqAVbpATLoEAkJwACpVIwhqQqAUozbymAZwYvhSDaw1DNNKN5rrlBkVAALWUmhh6ZgKSMooKYx0IKJpGAoUCikn4YCSE63dbu7vYgKHe73TAMqElNO4F6mgEo8wahbaNhGACUi1gKafQjAoNcJgtaf4LaRAEzTAIoiFC8K+UQ8IogKEWlh8gZihilkLGIpRRxGMVBGIahZHJsTqg0EkKpZ6ovEwEAgFKKMqoQpB7i0gLVCnSlWIs6EUDO+WAwcFJJalFmuQAwisSY41j5fDaTz1SrR6m0I6UIoyibTS2vLM7OTaXTyY8+3P6Lv/iLOIoWFxcRlZ4toJQ2Gg2/308mk59//vn1a1f29/dnZ2dBC+5M7AgAQ8lyg2DMuVLKsiypZLvd9n1fQ1IIIaZpJxIJYrAgCJq9VqvTzuTypm1/7fr1drt7/+GDVCbdbrcJIQuLc8eVQ8/v61Q+8LyIc8YY2LC2fiaTL3zyyScJN/Xnf/7njuM8fMi+8/bbX3755e3bt/VgRzqTOjo6Yoxls/lUKgUKa7VGFDU5F9rkZCTHA+ae52ksnoi5ZVm9TieKoitXrvgDb2Njo16vr6+vb25uJhKJdDpVq9WymdTa2srR8QEPw9u3b+cyaSQqk0kxZsRh0Ov1NDxFjlwZ0qGn4rGEEdGRQKREKYrmaDoWxuNIBHXYqUbsvSAlRZ16IygllRqDuV+0Q3iBZkYppWmvh05M5zmgtf+U0hSGUkoQIIfg1UhGL4a1iKgZ93BEezN+L4sZlFClJBccERkhehyeaVuNQQkuKdHESFobiRCIoqjVahmOEfFYaRJ8AogUNT1Tww0MEluGySwaM8GDMFJCgFIxWkgJVWBIVKAgirmI49hESwhBpFQEqaJEUJSoADSZhSIgpezLIJRhSENuSzUs2VAlUAghFScAkkhiAiqCiDFILlAxAwAVZ0P2MaUMBQwJGfI3QsxDI2lxk2ZS6ZNKZffexqtOaqY4MwgqMfcUDEwmHCeRdC2I/Ppep1mrYugDU0HkH1LpR8FJvdoddNdnX3rj6kthGFqWEYahi4QQIv1gZXbG9/1z587UasuWbTgW7XRajDEnXdIZVxAEIY8VoOmYhmlGUdw8qYdBnMlkTMceDAbKdNNJM4o7UspYhhgrEUnfD6mJ03NTnX57Zn768y9vrq6snT27qlR878Htc+fO3bn/pDi1sLVzlEjl+GGVGvbmxj3LMouFstf1ZCSDjmcqtjq7kMlktra29maevvXWa/v7+7nc2ffff991Enu7+7lcDoEc7B9ms1nP6wPw2blpKaVtk8qBX3ByKJWbzkZR1Ot2FhYWhBB60LnRaDze2Dg4OFhcXDxzZv3BgwdvvHI5DEPGWDHr7O3tdVv1TqMxMzNTyOdN09zf3+9141TKHnjQ6/cBIGW7hmmFvkg66d1KM5LScGxghkA1gnAKANDVQAWYNp1hECT0fAGICYV6ZFQhxiBRKUBAg47y8FEtlKBCIqUMeDzkaEXUJU/tXB0eAAAqBDFkk0dESUFJ1N4PJRJFgRJGGaWUD6ujihCkhOKQz1tSZgqphgzoQJQCRGKYTMSEK0REYAoQQ83ARYArSUChRSg1CUWpqzLIlOkKEfcDLE5lGLUdaszM5I6PD00CBBVIJSVnRI3GiHAYZGvahZCGQBlVILlgCoExRACCfFgAVqjhsXq70o4ClJIqHivLgtIDY6fKWZPpNbwAkNdFAiklyOHQPQISRNu2O50ORTI/O2cyFgXhYDDodDqFrG0yI47j6lG11Wim01mTMs/zas2aAhGLuD1ouUnHck0pIQ7iVqsVRZFpMsZYtVq1LGN9fX3g9S3L0FQOepoJAHTXvuedSClBEcMwtHQHYyYzjMHAY4xFJBoMBoPA17z3ngdCeolEwrZtpVQYxFLKRCKRzuYfP358clJ7urU9VZ4ulUr5fPGoUm23u4jqz//8z4Mg+M53vhOG4f7+fiaTTiaTURjt7Ow4jgMAmUxGTw8JIZ4+fbqwsJDP5/WU4BdffJHL5TRoZqzFa5qmZkPsdDr5fF6HCdq0MpmM5tGYn5/d2trKZNJhGL7zzju6ZmOYtNvt6t5MPp+/ePGi4ziJRKLVaiWTyX6/L4QwDOPp06d62LLf79vUVEBj+az3oBSAUoKPWmugBAqFIBEIKGoZ45EaHA3R6wx/vDYmazDP6WHqFHJysP2Zgq86ta5e9Jn6h1Nn+MpXTSafX+V8n6vHjuJ8hkoqMYyNpRAoRRzHxDEJMaTig8EgnXE5571BP45DiRaj1LQppa52/MMLopQCpVIpL/ANQgVlRAEKSQAtUAyJRD3aqIbOXLfxcWiBQiohRDysC3KlFBAE8WwsGl+oL0/eDngB/SClxFH2rpeRrq0VCgXJhWEYjUbDpMyyLIoQR4E3CEzGhGEMvL5jWVEURTwSsTw+rFRODhvtFjNZzQ2+9rU3TdPM5/Nf+9rXKpWjo6Ojp0+fLi7OE0Lq9bplWb3uIJNN6aKo53mGYTCGoBSPoijiQnSFkgSZ49iI2Ov1okDLmqsgiAgF3w/jWDDGTNtKJtJKqVarxZhx69YtZljZbP7mzc/jOHZdd2tru1Y5OHfu3Jkz5/b29m7duqWLlvV6PZvNtnZblNJCIaflPnU7u91ub25uuq57/vz5hYWF//gf/3J1dS6RSEgpXdcFgH6/r0ag0CiKHMNtt9uGYQCA41oas55IOHEcnz9/fmFhXncLhRD9QbdYLEZhjEAs087nCvNzC1EUxRH3Bv7hwREhJJvJFYvFzSdbiUQik85aps3DiAv0hYpCTVmNKJXUdJ2jf7dUoMFDkgCdaK/jaEJicq2f+lnh6U18WMWbYIWZNIZnrx1jw0eHHKNtEEZzR89Fs6fW3lcaIQ6fpvSQudJRMwiltMqT7n4rRYYam0HgOa4lJQz8vmnOJNJJLoJkJikUF0hjUEJJxiiFMRUfpcQweBQHUWQZkdQkcFygAg7KoFQpJRVyQD1nBEgkICjgursghRAi5oJzTZyjG7Ry8iZqKxrDYU8ZIQDoWFxKqYQkCpDptUd7vd5UseS6brfbtQxjdXVVKVWr1aKgL3kgeZxKJW3DNCiTXKCCfC4fC14qlWbmZir16vsf/NKwnRs3blT2W81m03XdXq/3H//jf7As67vf+07MI9/3DYNKCXEcm6aJQDOZDCKGnFuWOaRwRilEHARBGHPHcdxEghDieQMQyjCZkhCGoSYy1/zblBiIpN/vNdqtXLZAiTE1M21Zzu3bd13XLZSKqVTqpWsXFhcX6/X6zZufuq5brZ5wzpeXV3K5XBjE2Ww2nU53u/3BYJDNZovFot9ra97RTqdzfHy8uDilDe/o6Gh6erpQKBiGsbOzU6lUtHShpnzWk5PJlJtMJrWHZ4xNTZU/+uijdrvd6bZ++MMf+r7vuu7du3dc152bmwOA3d1d27ZbrdbMzIxhGLlcjjFWKpX29vbCMKzX6/V6fX5mMYy4F8ZBEPKR96OAxDSlnqlVAqQkIBEVUaMOxATv49gNvmiHv8UjfeWD4/KMmrA/OS60vhCCfeXJxyY9Xpww6T9RV1Oe1YqUUgpUHId6uAIkB0IZAjJGAPt8YLKEYVIE7rhmKu10Or5tm1JFiMAl5xFnupA8pMgkqBhDISTnoeBIiSb5UkLGsaKCAIBBUKtAoxadGkEKtPgv5zyWQg9c6Q80NjwYhR9f6QnH38VI5FEpBYCEENM0HdOKwlD/yfO8MJl0XVdzqDE47rTaMQ/TSVdYdrfTC/2IEmLbtvC9KOS9bn/Q8/xB0O13T45OEM1MJnNycuK69je+8Y16vb63t6er9gBSCNFs1h3H8X2/kC8FQcBVqOnPhDDthEtKRAgRhHG1WiUUDKCWZQgQqDV1UCmJmXQ2mUz6vl9vNDwvYIxZphOG8fe//7u9QX/76W4+V+ScV45P/u7f/bvZrCWEcF338uXLmUzmZz/7hZRydnbWNM1sLm0apmmaUvJEwikWi4h46+Yn2Ww2juNWq/XZZ5+9/vrrt2/fbjabcRzrMFVKWa1WdcGTc86U4bpuvpBljOmAQjN8P3782DDY/fv3F5fmV1dXLctqt9sbGxuri3NBECAlQRTWarWFhYWt7adra2u26+QK+W63G/H44uVL9Xr90aNHzXZrtrzo+0FvEAQR17V9RKTUoIRqIxwiREGhkkCGGrfjiFRXvMlIqfeUHSqlJt3Zi1v2i5HUUAVswgKHdDQTbnDMNqaeT4VOLcWvPLQPRNSjvajGqqAIBNRIcEwAGNqxJSyWSdjZXJrzMGkbQcIZdJQEnk6nhIijKJKSM30j2LCUq6cYKackFpwqxgiVBLiUsYypIkopm1Ht2obTgRp5M2QclAJ0p0oB6h3omd7VJFsMvBCaT+5Do/uCDIfcaq7tpFMpPWmacFxKqe/7QohyudxvVYLA8we9hJ1GRCUFYySdzj68/8B2HS1wmS/m3nrrbSEEEMwVih9//LFhGMlMcmtnmxk0mUzuHx0uLi4iEiF4z/MTSPq+V56ZBkqIZemxa87RMaxEIgEAnh9IyYWScSwy2RTzWRBEQsSJhGMwWwjVbnd93+92+57nWaaTzBiUklqt4QfB9vaOUkAIZcwIw+j48GRpaWV5cbHX6dTr9QvnzqwsrymlqtUTxUWjXY3DkBGyvLhYKBQoqjiO9/f3NfeEBqweHx+7rouIvu+3Wi3DMIrFolJKY9+ogY6bUEpFUSSECENfCNFoNFZWlqenp7/7vW/rucqbN2/Ozs5OT5cvXry4sbFxcnKiuQX0Z9/e3kbEVCplGEaz2eScN5tNx3GWlpY6/X670xv4MScUkVFKpUICugg3ZtYTQxEuAAXPEDB6sZGRAsxXrn5EeM7njEK+cX6oU0o1noGaOIEcmxZqYMyzQaoxKHRc/Jw0wvEqVS+EwRpPrWsiSikgCkEiISajqKQAKcWwrA9EKiESFiYoLk2VOQ/KuUw2YaUdFovY8/pcxpHBAkqY4oJQpusxSkiiaW0IiQVnUhBGBUUpUSklQCpQko/kYxUhisCowzMckFdSIShyOsqf9OnjD3bK4+vjGRGGJj0ekeFls9lapRoEgSYRlFI2m01E7DUroe8pJYWMURLDMKjFXNd97bU3ylNTfW9wcHyQyqSn56b9MKhWq9Vq9cyZMysrK5XqcbfX0fNNb7/9dqVSKRbzQohMJhOGISEsCCLfD/PFnBBCy8ozgDhmnMtur1ssFaonNc4jx7GQ0jBsGAZNpVJhAJqs2rbdZCJNiQapqHK5/OEHH6WzmXq93un0CCGFYvHosPIP/uvvPXr06Cc/+dtWq5NIJBhjCkSpNLW9vZ1IJNrtdrvdTCRSmkV7MBg4jqMT1zAM19bWdnd3c7mclFKPIwVBMDc3l8/nNRY0lUo1TxrJZDIIAsdxEglHr7mTk+r58+e+uPXZ7u6ulPLatWtB4BkGTSRy29u79Xqz1WpxLl966aW9vYPV1fWDg4N8Pm8Y1uzs7ObmZqVysrHxeHp6ut/3+u2o2x/ECu1ExrAMU0rOhZRcCiLgWb1EgKSAEsAwhkp4MAr8yGiQYtIRjfdoTa82GR/pHV9nuXKC83d4ktEz5cgf6tB0bIE6QRq/1+T7jteknNDoPuUeNP+FVEJKhagIaOJ3hQSI0MMHEpFQVBQpEGVEPvp+OZlQyio6iUQpv1DORlGwtb1JDMYYAaRMfzxUoIQcf1SdCxlSMgCFQyQaaDj5KO7XDNNqJIY+Cut1QqrUBK/2ZAj6lY7+xahgeH/lSOxOSkTUG3O/39cI42az+fjxY4c1TWradooAiQOp6Xx83//yyztLy8tA8LByHIvIuu/EIvJ9f2554f6Dewpkr9cbDAaFQmFra+uzzz6bm5srFEqUGrOz80+fPiWENJvNTqdTni5JKTnnYegLpQzDCIKg0+kkEgnPG/R6/UwmAyg1gXQqlapWDoVQmUwul8tFcSyEQsR8qVgslI6PKp1O5/iktjg3ryPGfD7/8ccfr62tcc41ZuXLL+/85V/+5Y0bNxBxaqocBEG73fa8frfbjmNRrVYdx5mfn7dtW88EahP6kz/5E7036fTPNM10Or2wsFAul282f51MJoPAy+ezU1NThJAwDI6PjzYePWi1Wrlcbm5u5uzZ9ZdffunRo0c7OzuXL166cu1as9m0LKs0NfWXP/7xH/zBH+wfHi4sLdWbTTeZ9MPQcpx8sTgzN/dkayvoq74XUMOw3LTFjEjEhEshuVIUFMhngSFq1D+ZoJ2ftMYXw8JxxDR2euMXjgOr8XnGT9NObXyucXlGDhEycGr9jU8++f0rF+TwDSgOC01KKAVIFAClSGTMFQwLGZSCQRmjlFFqBT0a+ElABYQFfirpJm3XR1goFFOpVL5YzBXyzGQGSBXL+NmUECihJGGUSxGLoS6v1MpVBMUQ1QcKlVBC89cZ1NAyQ2PwK4x2qTAMNMxlTETNRjP1pzY/bWymYegXMsYUF7pfIiw7CALGWBiGhVxeKVWpVHTfGVmQKKQSdiL0QwnCMAzBYTAYJJNJy7LmFubPXbzQ6bUr9Vq313YTiUIpf9G40Gw3opCvrq0RQoIwNE0zm8sxw8jmcpzzMIrCiB8dHSmltra2lpaWNDtg7fg4lUppcSjd5u50uoyx7qAvuGKMHR0d5XI5x3Fu3bqlgammab/88st7hwfz8/Obm5ux4EnH3Ts8KGRzuXy+2+1+42tvSCkf3n+Qy+Webm51262pUrl6XHnzza/dvHmz1+3EYTQ3N5fP5hBR8rjf7dRqtWQyWSwWC4WCdo+zs7OtVuvkpAYAtVrt8PDw8uXL+/v7zWZzZWUpnU5vb29r4M7GxkPHcV5//fVur23bdrvdHAwG77333vT0tG2b8/Ozu7u7hUJBA1Dv3Lmzurp6eHjIGPvlL39548aNzc1N7Yf1uKNpmiHjEhQjJJlMGo7VHQyiwEfDAqJ1FIfsLABACCWEcB6ON2K9u+kKzTieHKc2oxXyjKl6aAUjLnN9Er2c1KigYE5KHbKhsLbQmB4lY8GFbvNRggQVQBzHmtVBozJ0GVCfajJMG+8IABq7+Ez0WykhJDimRZXk2olqaCqAAignk8Vc0avWssX0oFrvVY8VUYRCp9v27Vb3pH7i6hbFRGMAEWEiVdP3YrwJAUAshh+eTmwPoAC0HAs+M0KtPCh4NDknppTiL8g+njrGD46dp5Sy3++blElmBEHQ7/cNOmwnmrYDQPwo7HZ6/b5HwKCESQmpdFohHFaOJYhUNrO4NH9cYQ8ebex+tGua5szMjOM4A68XRVEunzl/7uJ7772nlAqCwLKsfD4fxzwMo9nZ2e2dxzs7O77vSwnnzp3jnLtustVpM8a63V61WjUMQ8/Laf/T6wa+P7h27YplOSe1WiqVOjo62NvbY4x1Op3eoI+ocunM9PR0Kp3O5XJPnz7NZDKOa62sLh0eHk5NTW1vb6fT2R//+C8RaTabLZUs3ZozTTOO47Nnz56cnOhKxvz8PCHkV7/6lW3bP/zhD//Df/gPnufdvn377bffbjabFy5caDQat27d+r3f+735+fmDg/16vaYj3jDyCSFRFLRaLULI9HT50qULT548effdd7/zre/HsVCK/+3f/uT69euLi8uffvop5zyKol/96sNr1661Wh0p5RtvvPXBBx8YhpHJJk3LMUwrlU4wK8EVUMsTinb6fVBICRUAOKSSHaoyTq6u8RobpyHjHG/8tMnlNz5OvWq86Ws6DAnPXJkcucSxb4SJ7sUYJ6BPKCfULOD5MHXiykeQN5QwbHaAklyqcayIksRc24vf7ymoxGGjwoSIQh4qEEjBdR2khBiMPVMjmVBoGQ4R6TMKPeSnUD1ru+unCS06P7oR4/muYYgPwyvWbzH+AOMRaT00Of54k592+OCERI4uvJqJJCIGQeB5XiaV1ucMY8GCUErZHXj9vmdS03UThmml02lmmhGPu91+q9+2XZsrmc1np2YLx8fHSolEIqs33Vqt9knrk2vXrsVxPD09PT09s7+/L6XMZrNCiNdff9M0Te0NkBqtVmt3d7fZas/NzcWCx7FApBqaYBjW9HTq+OhOqTRFKTVN1ut3hIwvX768f3TIOWcGDcMgCMJUyoiioNuTVtOYLk7/u3/37374wx8+fvy4VC406q1arUYIyWazr7zyCiHs6dOnOslMJBKBH1YqlV6vt7q6ure3FwTBW2+9VSqVPM978uTJzMzM1tbT1dUVfbWPHj06Pj5++eWX4zjiPC6Xy1EUOY5Tq1cNY35j40GhULh06ZLjWJ1OZ3t7u9lsGobhBf7Tp0/7/f73vve9hYWFf/kv/6WUcmVlJZ/Pc865FLbrlMvljz/+uFgu7e7uEoiEkhxUq9M0LD+MZBgM+kGExCTAFQIhRElUChVIPVMytsBTRjWZhk16ocnvL+7dk1atzW/MKayUUs8HlS+2LtTIbZ4ywnHYDM8fQxFCAKDaLCUgEqWpuBVolI1SHBGFBJC2aVAlY9+LQ+A8kpKjoRhjzW5PgoZGaxaA8dfEgYg6FEClKCLTGk6EEEZ1PKDrJVqM3jAMRqkeb3wmWCO0fvdzNRj8DVpzkzd3sg8LEzID2tQ9zxuSrykVx3HMVawAqWVajuW4yJhQGAtRrdX2Dvbbva6TsIUS2wc7zXatPFu0E66dcPOlYnmmXJouLa+uLiwtWa5zcHwElGzv7X1x+0tmmZWT+mdffBnG4t7dB81Ge3dnP5vJVyrVYrGUyWTffPNrrVYnDONMOpdJ5xgzlVJSQjKZvnjxolLi4sXztVp1MOjlcrm/fe9dpcSjRw/b7SYXUSLhZjJp0zQIwSgKH9y9C0K8/sqrv/e7P3jr9Tc2H2/MzcwsLSysLC3kM1mv1/X7A5Rq0O15vT4BVS6XlVJTU1O5XO6TTz7Z3d1dXFyklH7xxRfnzp2bmZm+fPlyu93OZDL9fr/f76+sLH/55Ze7u7vpdDoIgkazVq1Wq9XjqampXq/XaNQ453qoP4yCZCqRTqevXbu2sLCwvb398ccf37hx45/9s3/26NGjR48eaSkFwzB+9rOfTU1N6XBUobRt23EtxqhhUDdhJl3boIDAAQVBhagQFRAgFBV5NkQ76eLGv45Tu8mN+5TVvWiHX/mgUkqOSjIS1Fda4IsvP+WBX3xQ52uKDLWHQNdTRpaJRKECxQXnXPBIcO6Hnhf4/cGg3+/7vh9FkYwlCBnHMY9iHnEZxWycmD5ngUoZlFIkQ93tkUKbAhUCII4/7bArTYDoiUSlNwPts4etwmcfY3IG7Ld/VP1C/TydOTi2rZSKoogD6gY0pTSKIsclgNSyXWY4zPSCQRBFkR8G7V631+vZrrW4tlyem0qXM7GIFMqjo4NSqXT9+vU4jh8/fsyoeebMmWKxvLe3t7OzUy6X0+l05fik1+tNTU0dHx8nEknGjCAIT05q29vbYRC3Wi2pqFIYBjGjNueSEGbbbuCHnXb3pFZdXll+uPGAMnL9+rVKpcp5FIQegMzlMrlCNplMAwDn3LCMKA5+57vfHAwG/9P/9H/9gz/4g2r1pFwuf+tb39ra2vY878GDBxpAp3Owk5OT2dnZl199rdVqdbtdDVe4devW6upqKpX6+te/ns1mL168WK1WX3311W63++qrr968efPhw4eVSuXq1auIeHi0Pzc3p2n8X3755Vqt2u22wzBMphJKKc3e/2jjSbVa/Z3f+R0p5c2bN4+Pjzvt3o2XXgGA2knDYNY3v/nN7333d/7Fv/gXf/RHf8Q5/9M/+wvbti3LMkzDtE0kVEgZxGbPjxA4IAUAJIgKFCGEYORF8FyKNTx0G3OyoADDwd/nGlovms2p3sZw7WmdUKXUhGOctMCxzx1XMSayUJh0g6feV4JC1GzVz10YSoGEEiSCgE7DJFBUwKlCjexUSohYKo6xIv5wbSMiAmUSQZ2yfl0XIVQTglIClKBWMJVK4uiTK6VQKkEBFQBRRM+FaaiMeBbWa7nsyQ/82+/pZAygE1SlFOdcgzxQAUUCAHEcE0Icx2l26iFXsUTXdCzblQq5kkqK5ZWVmIe9Qa/dbZkJY2Fplhqs3W4tLy8jYqVy1Gi0jo6OTNMcDAZhGO7u7jqOW61WNx4+LpfLu7u7tVpteXn1zNpKv+/Ztru3d1AqToVhnM8XgyBwnWSz3YrjjpTSdt1UMl2v1yuVE6XE0dGB4zjb20ez8zMvzVz/27/92zfeeD2K4lqt1un2pZQHBweEkHOL55XCL7/8Us/pPnjwoFarr66uPn78+MmTrenp6Ww2u5Zd29jY8H3/zJk1zwsMw/jkk0/CMPz88887nc6rr766s7ODqIdg4M6dO7qo67ru/fv3HcfR25brusmUi0TNzc2VSiVK8enTTc0ymkwmDw4O9H04f/58IpH44P2by8vLvu9nMplvf/vbBwcHn3/+udYDTSaTV69e/fzzz3u93j/5J//kww8/DIJAKREEXhj6hBmG71FKvTAOQ58RVIRJkJqJHgklFJEimTC2SV832bQY/wkm5hjG1nLK3U3+ipqzdIRYBt0h1M8ZmZ18Hs42aXjjtx6Xdk6tRqWUBKEpEIdXq7mShk+SOhHTrwSpCKAwKTEoMSmlhEkqJVeSA0AUhwQoAhClmPbaI2zZkDVRCYlIpZIUUQkJSCghUilQQCYUP8c2o/eaYf9FKqUlowARUVI6uaPgC63CF7+f/tgjVSpBqMkMRlkcx/1+nzGWSqWqTT8II2/gu27SNkzJVcwlF4oxli/mcrJwUj/2Q88LvFyykCvm0m7m4cOHW1tbSql0OhuGYeX4SbvdPjw8brc7uVxOcHVwcCClXF8/q6PuZDK5ML9Ur9dNx261WoSwQd+jlBFCbdvu9zwpwbZdQlgcB5ls4sKFCw8ePEink2EY1mqbC4tzrVZramqqVqsxRhgzCYFCoXDjxg3f9xMk+uyzzwghn3/++fz8/DvvvPOf/tN/ImQIZC+Xy/fv3xdCvPTSS/1+PwzjT359M5PJcM47nc7s7KxSam5u7vHjx7rDWSgU3n777SdPnly6dMl13StXrlQPDxNJ5+DgwDCM5eXlWq3mOFY2m7VtEwBs287mMtlchnNuWZbneUEULi4v7ezs6Lgxl8tlclnGmCYpD6Lw4OhwfX395uefuclE5aRaLBUGfW/gexAJLiUhRAJathHFQqDej4lmTyAEKGPMNMcWOF5y6oXyzOQaGNvk5AuHFjWRWOrXak4mNS40nPZjz2/6XxWUjs37RSMHAAXjEsazqgwAcM4ZstGAMpdINOlTs983DGpajDFKUAFIPbyOgERJUASVGDYJxDODenbgyKehVCA1Rb3USSCllGl6rSEGcOS+pQINHRzJzY1F5ya3txfd4Fc6xsm/djqdfr8fhmEURb1er9FoaAEJN5VExN7Aq9fr1WrtpF47qdfqzcYHH334+Ze3Or32zPzcysqSbdtRFCAqKWUqlZqdnV1aWtLdtoODg2QyKaXM5/MahmLbNgDxfZ8xdv/ewzCIEbFUKnU6HUSqaQVbrZZpmrMz8/l8XodSjJnJZLpSqVQqFe1ker2Onk6o108AwLLM+YW5a9eura6uFot5w6CVylGn0/n617++sbHxwx/+EBGbzWYymVxdXb148eLu7m6tXi0Wi6lUamdn51e/+tXu7vb58+ebzebU1NTa2prulywuLq6srGxsbBwcHPi+3+v1Dg8PLcuKoqjZbO7t7ywuLtq2/fTpUyllr9dJp9NvvfWWlFKL/i4uLmoqxGazGUXRtWvXHj16hIjb29ubm5s//OEPdXr59OnTSqXy/vvva2EcXRZ+4403MpkMM5kQPAxDPxj4vs85Z0z/xxWgUjDKl7T+5BjeGMdRFIVhGARBEATy+UM935Z48RhXccZeC34zGelXL6pRxngq/nrxh8lfXzRLbdrDduXzua5SAkymGOGAoRBBzP0w6Id+3/O4FLEUkeCxkMymBudccK6egV+RmEaslGb4QBnHSGzLkBLCMFaxP747hFJCEVDEikuUwEBRNXkHAcAQz+BpMOEPx7cbJrAIiCgkR4YoFZexjuwpoFRRIulQmwZy4McqwVxUYMcsibbXEUEgTNOMlWh7bde1E4lEPOhGKjYs48mTJ5mT1PnzZw3DCCM/DMOZ2ZVmo7u+vl6pVMIgRqBnzpy5devW1NSURqjk8knAeGY2XywWc7lc9fiwPJ358MMP9/cOX3rlZUpZo94ybbNcLinEhw8fUmok0qlOe+A6yZOTWtBH1yycnFQ6DS9fyLz26svbO1t37ny5Z9FsNjtTSGdT5pnF6ffee69VOajVaiDS5XIxm17kkflf/70/+vjjD4rFYr1e29i4T4h68OAeALEt9969hzPTc/VaY25u4dqVyx988MH8N75RP6lm06mdp1tn19cSjl0oFOon1YO93Rs3brRarU6r2Ww233n769lsNplMzs/OJBNuu9n68IP333777f29vVKpaDCadBN/9Vf/ayaTefTwQaFQuHDx2o2Xrjx48ODv/uiHtVrt//F//79ZltXvtdbX1w8Odvr9fiaT+fLW/ptvvnn50rlf/OIX8aA/l0/P5jI3f30LgU3PLpbKM082d6amptt9r+cHiGgaVHAZiyAOQwPpcysflCKodIRFkIMiqCdkiZJSSGmBBCWEiJWUusmhzdh1Xc0nQ4Y+UIESinNAXajQ4ocjrkAc6q2hUkNaaQCt/kkQlZRIiA7pYcipARp6CQDyuYKlIhyJ5iZVFCUSJTVxBDVYpFSoFJiOZFY0MoG5uK2UORQ+UlRKoutFQUyHjlQR9uIeA8/3EqSUOgETQoRhyMgQdjR2gJNbxW9ycS9+nzTC8QamtPa30uwWMMpch7NWpmnapouIFInv+/v7/Xa7baOpQZK+P8jlcoi00WgwgxSLRcZYOp1OJNxeb2CarN/v375z6wc/nL5z545pmk+fPg2C4M0339RYMMMwFhcX7969q0d46/U6Iu7s7IT+IAiiQqEwP7dYrdfq9cbc7EKxXP71p5+5yWQcx5zLM7PnKDEGg8HOzm4ul6vVahpLoMmplpaWSqVCPp+7ffv2wcHBzZs3y+WpK1eubW5uIuL6mTOVylEun3306FGxlB8MButnlj/44INer5tMJnO5/Orq+p3b9wBgd2+bUfPu3buGYVy9enV7e/vs2bMbGxv9fj+dTj969CibzepPsby8XKlUzp8/3+l0CoXCxsaGUuqb3/zmn/3Z/1IulxljzWbz+vXr1Wrl7t27lNJMJnflyqVCoRDH8dbWlg527t27Z1nWt771rVqt1u/3Hcf53d/93VarpVPoSqXy13/915zz6WLh448/Nk17dXXVYPaDjU0u4Oq1yzv7x6DLLZQKRKEkgCTEGOsNvuh/vvIYgiKV0msDR7yDp+Bm4zPIySI8PkOujQc1EBEmIjIppSbBUBNwGTlRySHquVKqhptP4nX0NZjGs/M/l7gqPUfCQLNFSQVKKoWUapsngMAmDeZUsD52WZMoWzGUCnzWToHny1zwvENXLzyifzjVkx1f9yj6/4pggDGmiw2SiyAIBr1Ou90uZEoLCwsS214Y5Sjxo7DTH8zPz9Zq1bnFhWvXrlqW8Ytf/mxjY2N+fr5QmvrTP/3TTqejad5v377d6/WOj48Hg0G3202lUmEYFgoFvQqnpqZardbc3EKlUuGcX7l8bWFhAQDjOK5Wq91ud2pmxrbcdrsbBAGPvTiOGaNra2vVanVubu74+HB1dfXOnTvFUl6IeGdnWzcAq9WTpaVl23ajKBr0/bt3bwdBUCjmX331VR1nci6FEOfPn9/f32+1WicnJ2EYplLJdruDiIuLi0dHR1NTU7OzszpPS6fTH3zwgeu6UkqNQygUCr1eL5vNRlF0++6dUqn0+PHjge8hZe1uR0r55ptvbm9vOY6TzmYQ0XVdpKxyUj04OLhw/qpuAuVyOb271et1TUizv79///5913VfffVVPZUPABcuXEilUo1G68njbduKzp5bz+aKBwcHQgBoqRWKCMi5Ekqh4AoovGA8z+zheYtSE90LeH4A6tSSm1hvEysKQBdG1W/OgHR/W02IW0yWiPRBX3ihmoiTx4985fUMWd50tUgB6tEjbZCjy2OT55WjyS4cVWxhDB4f9QYHcXzKnE6Z/viejj3eixc3ebyQLo7pZ8a3FWDEpazTibECrmEYvX4/jKJkKhMLPvCDMPQJpaZt+WF0+869k1rt+kvXCsVp09rlQp05e6ZR76VSmZ/85GdKqXw+L4S6ceOVTqfz/vvvV6s13w8tK6CU9vve1tb2yUn96uXLiUTi1he3Dw8P3/7GNwihW5vbJqHlcrlQKBweHBsGPTk5AQDfC7UQfCqVIgQWFxd3d3c3Nzc3t2SxmM/n89///vefPNlUCsIw3tzcLORLCTcVeCSby8zOTi8uLr7/q59HUSBk/Morr6yvr//4xz8OguDBgwe5bEEIUSwWNAHy7OzsnTt3vvvd7yYSienpaULIX/3VX/X7ng7q6vX65uamnsbqdrsEmWMnms3mzZs3v/3tb29sbHQ6rSiKpqZmFhfnnz59Wq/Xa7WaZVmpVObq1WK3MwCAZrN58eJFpZTOD7WAx+HhYT6fTyaTzWaz0WjMzMwUi8VardZotKSAbDarUbupNC8W8/VmN5ZCciEAFKEAmmRTmuw5rOKpZTPOdMZP0FTOkwtJP2EsAwzPV93hq4zhK+1k0gGMf3j2oEabvGDpGu817pCPsQdj2t9RZ214fjb6IoAKlVA4HPDi2o6Ifs5wExpbnd4ShBBDwPgEu5GcoCHAiSLyqf3gOU/4G27KZA49aYSaoQilAgCCY8eI+gPjCNNgmqZB0bKs0JPNZtt1XdOwm80moZBKJaqV2ptvfC2bS1cqR416k3NeqzWq1dr8/GIqlXrppZeEEAcHB+vr6x9//LHmz/3BD35Qq9VgNFWsFSBefvnlzz777OqV6+vr6/1+/6OPPmo22pTSkpsoFoue5+3sPj2zfk4Auq7baGwLrvr9/vz8/MbGg5dffun/9T//29dee6Xba7/zzts///nPDcOcm5uLY+G6rmMnXDcZBEFD9DmPZmZm7t2/c/Xq1Tt3vkylUoVC7uDgYGpqilJ6eHgMANVKLZ8vFosFXSy5d+/erVu3zp07Rynd29u7cuWKHmLK5/PdbvfBgwfLy8vVarVSqWTTmVar9d3vfL9YLO7vHbbb7W9969ubm48//fRTjYY/f/68RsN1u93Dw0PTcPSM0t7enpaIOTk5oZRWq9U4jsvlsmVZ6XRaA3empqbu37svpVxdWc/ni/Vao/pkM5VKlaZmB37ox5HkoQCiKANAJTiXihFj0ga+cp1M/jze/cdQfr0etBGOPRKMYyiYXJajX/Grl+I4xD3lCSbOdvqQExeMiFrQdtJXP+9RlEkoI9QgI/MhIKUCpCPnotf5RAg6WVNxHEerz47xrPrQd2TS6sbH5B2c9IST8IgXb+5kADzeDsY3YgzNIXq613G0gK5pmvofk85m+t6g1qh3ev0gCgk1FJBqrSaUsp3E9s7ehx/9mhrmN7/13StXX1KAX//612u12q1bt/7sz/7M9/1Lly4RQjKZzPHx8e3bt3d3d3V0qgd55+bmFheWEXF9ff3ixUue55mmqQX9giDo9XqpVGpubs4w6GAwUEoBylar1e22S6XSyclJKpWyLOvq1av5fLHf77/77rudTi8MQ9dJvv32O9lsFgA5jwBgd3f7s88+syzrtddec103CAK9RyDi/Py8aZqFYo4QyOUyL7/8cqfTeeWVV05OTnZ3dzOZjBaEymaza2trZ86c0ZIy2k6azeba2roQsl5vcM63t7fzuWKz2fzlL3+1tnbGMIxGo7W3d3B0dNTtdmemZ+dm5xcWFhBxdnZ2b2/v+Pi4VCodHw+zOwB49OiRhgecO3euVqvt7u7Ozs6+8fpbKysrJycnnj+4fv3q6ury1uYTQiUjoNkBFQiQHAAYQfVVx3jRv7hOxj5AE3xMgkX/i44X7fxFy3nRGvGFr7FmBk7AueI4PmXh44/Dhl/IFBpAGICBaCA4Bht/Md2PmrQrbRLjeeexkxy+8cTN0vHhqTsIL2xyk1c2dqGT/nPcGH1WLAVERErJsBdCIIoiOaLGQAKEEFRCKRVxbrtur9fzPM+yDWoayOjUzNztu3cLpVJ5ZvbguPL+Bx8xxpLJJDWsmXLfdd1yuZzNZm/evGnbtu/7nudls1kNwjRNM5PJCCFM02y326Zp3b17T0q5sLBAkAkVN5vt3d3dleU1LsXq6ioXUa/Xq9Vqi4uLcRzXq82Dg4OZmanNrce6BWKY9MmTJ9/73u98+OGH9Xq9WqlFobx69drq6hqP1Te/8e2NjQ0/GDx+/PiXv/zlxYvnw8jP5Qqu687Pz3/00UeImMsVNIupkPHZMxfff/993Vd4+PDh3/k7f8dxnI8++ujoqFoqlXRLXZdegiBwXffWl3fOnF1rNBrbu7vNdjvi/O79e7NzCzdeflkIsbTsD7xeIplutNqLyytCwe72juM4rVarUCgAQLPZTCQS+Xw+iiLDMObm5nZ2drRKlGEYlmWls/njkyoI6fuDXK6QTCVs28zlU34QIBGubVEpg1hyyRllhmEE8VdLfL74oH5EN4S0M8DRJBSMREVfLOw9t/YAlJI4JM6dOO0Ly1WdjkVPH2R8VvLc/PEp//niIyPrHY24j/i/2bAYiwAw7OZNOmV9oiiKoijSRF3jCtWkacnRyMmk7zp1nDLLU99PeT/9LuPLOAVP1VA1fUlKKS25nkwm2+22TlSklJpVxTTt69evFwqlarVaO6kzaliWk83mE4lUpXLyk5/8NIpiRPKDH/xwdnYum81dvnzl+LiSSCTn5ubjmHue3+32Go0mpaxeb3S7XcZYv+8BkLNnzyYSCcMwfvSjH+Xzec/zhBCPHj3y/QGAvHz5crlcnpoqLS0t1ev1lZWVZrN59uzZTrs3GAz29/c9z9vbPbAsp9vtPXm8NT+3KCUsLs2Hkf/aa6/atlUqlW7fvp3LFtbX1zmXlA7p1bLZdLNZZ4w0m/VHjx5p73ThwgXHccrlsl6pc3PTFy5cuHLlyhtvvPH666+fnJzoIZ1Go1GvNU+q9adbO/Nzi8VikVJ27tz5RqO1tbVtGIZtudev3Ui4qXQqe3h4eHJy8s4775imefbsWT1xn0ql9Kz95uZmuVxeXl7Wqvdnz54lhOzs7Ekp55cW19fX0+lkHIQxj2zb9LwuKuXYLGFbjCBIwRBcyz71r//KZTO5oPUY93j/1UBlc3SM/dL4tF/Zb/zKWEwfQ8D3ZIo4+pmoZ184+hpbhLbb8fqc9OfPfQSJ4y+QGsk++SVRSWaapu/7ajTnPx6cxVH2NeYg1NOAMLIQGHk2bTnPgDzPQ++EeDa4NLYuHE1djKec4NmMswAYQsBjKaSUQBkwMjMz47puIuHGcez1B0EQMAKU0kwms7+/77jWxUvn9e44GPT+5m/+5uWXX3769KlSSvfNs7lMEASlUulw58nW1lapVELEw8ND/T8GgA8++KBYLJbL5b29PQ0Z09JL8zPz3W5fi6483txcXz9bKpW2trZSyUy5XE6nU7du3fpv/tv/9vbt241GLY5DnSvq0cdz587NzMycO3fu019/XDk+mZmesyynWj15tLFhGu7Pf/7LVCrzs5/99Fvf+ub9+/f10Mo//sf/eGNj4xc/f//MmTONRmN9fX15eTmKwlQqyRgtFPL9nqeFEwuFwvT09L/+1/+6VCqFYfjtb3/76tWrpmnu7OxwzqenpweDAec8m82bpu374blz5xBVoVBAoIVCARFN06pWK2trZ/7Nv/njH/3o93d3d/O54ltvvKlXtsb3HR0d2bady+WazWY+n1dK5XK5Xq8npczn87pO+9lnn+3s7OXz2Watvrq6+uTJo0KhBEpkc8lYkUazA0oggSgMCepxgGfH2CrIV024A4DeWOM4HnfFJt3AZGFmHEaN0xwBI5AnwTgWw/MrpZ5TCFZKKQGSjOqd+i10FqfhKEopzrmIYyEEY0yHh8P0bCRPgqPGnhztA8MPJUH7OTkxi6yxCmN7GcJfTkENcKI6qibKxFJK9lUjHpPbzH/mPqeeP8aP654kQ/0uEoa1Ncjn845lE/Ksk6GUjON44IUntcra2oplGZVKZWpqampqtdVqbWw80DrP8/PzlUrl448+sSzr1VdfzWbylNJGvTWw/bnZBV3Hf3B/QyNUHMdZXzu7uLh47949wzDeevPtJ48egSJrq2ds237waKNcLqdSqV/+6v1ioXzn3t2rV6/atrWzu/3Sjevdbvfk5CRUMaX4ta99Lebh3/zNu3fv3p2ZmWk22oyxIIja7a5SYNvu/Pxit9u1TDtbSObzWc/r/+Ef/uHNmze1AMbly5c554lE4o033vj0009937vx8vXt7e3dve1SYVGb0MnJybVr13K53K1bt2CUY+uuhud5W1tbOsw+c/bq5tOdfLEc8nh9fZ0QEu7u2G7y3XffNQx648YNapjf+u53ugNPSvnw8ZP1tZUHDx7k8/lsNqttT0fpxWKx3W7rjSmVSsVxPDMzg4g7Wwf5fP7b3/52pXKUz6YZY3MzU/VmM5dPu7Y5CCPDoGnimpbwgphSRuG5mGu8GH7LOhkvPzI6xvHaGPY5PmccBuPtHhFBzwSLUywyvy2rVEppV0MAKRK94GE4y6DEb5hAOGU+zz4doUgZUkaI0iT/qKQatk+GA5Zs8l5IKfWIkM70JvFockSTTC3rK2/ZOFA+ZY2TofbIeJ79dbKyNLxmQhhjhpYnlYIx5tqOY5saKsBRaX4RwzB4FARBADJKJSx/0NvfHXDOk+7S/Ow0gZf29/efPn363t/8TalUKpVKxXy+VCqKOOr3+6urq+12W5dVNA5zfn7+5s2b58+fz+Vyh4eHug0gpXz48GEwCHQvpFQqzfa6iUSi3++7rru4uLiytlqr1XK53Pz8fL1en5qaqp58rGKyuDjf7/ePjg/OnDkDAA8fPkwmk41Go9/3qtWTTDoHALOzs4LLc+cu9P2D48rht7/zzQ8++GBra2t6ejqfz6+urk5PT3t+/8MPfzU9PRVG/k9/+tMw9PU0vRBicXFRx8nz8/OffvppoVDIZDKWZWkumTiOe70eYyyKoq2tp3qJbj/dXVs9I6Sam1vY3Hw6MzMTxzEi1fLAukV54fzFo6Oj/f39tbW1hw8fdjodwzAuXbokhDg6OiqXy8lkcmlpSatWcc6DIFg/e6byQfWLL744ONjLpBILC3OWZXARmaYlZRz6nuAhY44BjEDEowCo+1sM4DdZxbgwcSp70vHXZPc8iEJ8VnQApUBKKZQEeAYsUc/X9odvgUBG2kPjtUq1nUulkAgcmc5EW04+G8Ianhafb9dJQKlQAVGgEToEQJCh5x9eBhundkOfO3K4zyEMJmb8XizIjt8PXnCD6oUcVz0fb0we41CWDJG4ilKqyaRTSdfzPIpEh6AmM6SUmn/FMoyz6+uVSqXTap07d67X6fz8pz/N5/MEYGVpKZVImKYphOh3u65t8yiuVCpLS0tBEBweHvZ6vXa7nUwmE4mEZsUFgLm5Oa2sVK/Xu91u0knrUqdp2jMzc73eYGtr69rVl1rtxrVz1wmB3d3dpaWFv/7rv06lEufOnSHSiONYVw7X19dM06zX60EQJBIpzwsdOxEEkWFYJ9Xa3t7BwsJSp3/cbLa++c1vXrx4MZPOPX78+Oiocu/eg1/84hf1ev2b33pHiOjjTz58++23Egnn888/55x/8cUXCwsLpVLp4cOHUkqtRm7bdrfbdV1XZ0qFQqHZbBJC9vb2Ll26tLu7pwmCK5XKpUuXfn3zk9XVVcdxoigol6ebzSYAefDgASHk0427c3NzQRAopXZ3dy9fvryxsaGVNmq12oMHD2ZnZx8+fPj9738/juPl5WXHzhYKDzudzo0bN0BGs7Mz9+/f9/v9QtkRUiqQBCSAVFzwKJZSCniW9r8Yjr544ETlHF844Pn4EwA0nGO8BE/5i7EbHL+vtr1T61MpUEJqL6qE1COykxY4sTuM3+u0Yesj4AqFJuAf5pmAyBUwJMPWIoFnQbZ+5TDQff5Ek8Y2tsYXTevU41/5tFPH+MHxD5pURtdgdBZqmqZt247jaCIQz/P6/b4u0Pm+X57KWzaN4oAyTKZcypAZZHZuOoz8bq+dL2QvXb6wtr6iGV++9rWv5fNFIVSj0QrD2LKcIIhqtUa/77322hvpdDaORbFYTqeznMswjKenZ5PJZCqV0ngUSmkYhkqpZDJpGMbNmzeXl5ejKKpWq/Pz8w8ePBhDxtyEvbKy8vjx4yiKLl26JIRqNBq+75fLZdM0C/ni4eFxFPFWq+N53ve//733338/l8sVi8WpqSlE/Pf//t8rhf/oH/2jdrudy+VWV1d/8YufdXvtVrvh+z4hRLfjpZSa996yLMbYzs4OY0ynT4VCodVqcc6npmc9P2y0mmfOnbUcJ4iiIAovXb5anppZWV3N5Yul8jRSEvH40ZPNh48eLyws5HK5vb29jY0NAIii6NGjR9vb267rnjt37urVq6+++uq5c+cIIb/+9a9/8pOfvPvuu4OBXyqV5ufn0+l0r9fjIpqaLuTz2WIxXy7k8/lswnUoAYpK9zn+i45TIejE6n+OPWycECYSCcuyJlf1ZEB3alkO/Qo++1VNNOqEECLmIuZxHCsu9JA6PB+OTlZ9XjQWpVSsVCRlJGWsMAaIAWIFHNCPIz/mARd+HA8TQv1dC2jhaD5t8spwVFA59fnHzzm1sb14/KY/jT/P6ANoVCtoPiXHcXR5Wn/aMAzDMGSEOo6TSqUcxzEYebr1hKBaXJiJAt80zXNn1l3b+vt/77+6efPm4eHxg3v3TdPMpJMn1eOPPvwV57zf7wNAuVyenp7W1q5r+rlc7vj4+NNPPzVNc2VlJZVKdbtd27Bbrdbq6qqUst3rJpPJGzduVE9OVldX792/PxgMrl+//vDhw0KhQCn95JNPrl58SYMwdaw7NTUlpcxkMnt7e3HMwzDsdvsGs5XCs2fPz88tommvri1fODz/2We/tm334sWLnEs9S6Vjvw8/fH/gddbW1iqVytRUOQzDa9eu3b17lzF25syZ27dvdzqdpaWlsevzfV9XTXSTybCsZrM5VZ7xvXBzc3NhYaFeb6ytrepNwfcHu7u7nudVqkda62J/f7/b7RaLRdd1L1y4sLOzQyl1XVeTcGvU6NzcnBBienradV1GUlPTpUTCeXD/Do+8J5sbQsSU0pn5OUBDIUFq9vyo2/MAQE/eTPqx3742xstj8vt4Bb7YV6OUGsYzODQhiECklEoNSRO1J1QTWdJz16AAAKieldeoaRkLQDJUiFXk+e6alBJHKDmNByCEjONSbTgcUCAROOTdp1rpGRA1EgglIn22M403DP0e+qInyQj0E+REr+KU/ZyyzNGHe26E99RrJ1+uf9VhMENiWVYi4TqOg4hRFPX7fQIoRzVVy7JSCYcxdrC9oThfWFiYmZkdDAaDQa/fbfd6vaODg3q9XigU2s16IpO6du3Kndv32o1md9DX6RMhpN/v60omIaTZbPZ6vWazmUqlCCG9Xk/nfsCRxyKdylSqx0opz/NKpdJxpXJ8fJxKpZ48eTI1NdVut4WIL148/2d/9mfn1i5+7Wtfu/Xl5wCwtLSUyWQ++OCDMAxnZ+fiOD6pNqWUBwcH+Xz5ymWn3+9fubH8F3/xFz/43d978mQznc5KAfv7+8vLy4lE4l/9q3917vyZP/zDP3zvJ3/9y1/+fHllYXd3N2HzYrE4Oztbq9XOnDkTBEEikXBdV0fUUkrHcXQSqzPVdqejlEqn03t7e6Zprq+vSykbjeadO3cMw8hkMvfu3UPEoWnxMJVK5XI5SulgMLh//363233jjTeklBqFI6Ws1+uZTGZ7e1srGTbr/rXrVwyDVo4Pzp9dTafTntdNJpODbk8h9cNYSDAINShhBAlD+I3m9tWHnJizgVEFcrxyyAjUNV70WqJchy3EYKCQc87HpdEJOxxb7+kFqxQqoBS1aJLGUOKzOtBzNR6p5IuLf/KcQgGXigxh61Iq1KB0ZtDhmD6oZznh+NBp4fgqJ9uAOCJO/crj1O7yWxzjZCBx6i4MadIp0/5ZKRUEgS+57/uu7WiSC4pkvE3oFdnpdJ4+3dIlmHQ6c/bs2XfffXdz87FlXZmZmbly5cqFCxe2t7cNk85a8zMzM61WS8d1k9ZYKBSy2ezc3JxSynXdw8NDSqnNHC2yeXh4OLsw/+DBg8FgkEqn/WCQTCYXFhY++eSTXLGQyaRN0zxz5sze3p6ueUxPTx8c7CPi9PR0s9nc3d1LJtKtVqtQKJ5UG8lk0jTNRqMRx5lcLieEWF1dff/9Dy5euCyl3Nvb63a78/OzhJA//dM/PXtu9dvf+ea/+3f//vz55UEX9/b2Xn/99YODg263a5rmG2+8kU6nB4MBpbTX62mfHARBPp8/PDx0nIwQsW4wZLPZyvHJzOyUUuratWtCCN/3AeD4+FgjgZLJZKO6c/HixV6vl0gkbt26NT8/H8fxu+++WygUVlZWjo+Pdb6wurraarVeffXVW59vrK+vP3nyaHV11XWdTCbTatVeffXV+/fvS2BRLNFwKXUsy7JtGykTg/+ynPDUHg0TIFIcUdpOeDmIo1Abp27fgcLxEMZvWrdf8Y4KCCFKyJEVAn3GUPwcEdszp/obCq4xl5RLJFLz0BAplFJCxGGsADRtKZA46pqGUDII/R4jYJsmSpQxyBhRsnymOF2aTdgJRggBCDxvMpaQE5AimLDYF2ue8LzrG0cU8ELB1zRchqYShAi0wHTRMAQQXywWpmgYBe1O2rbnp8vFbFozRKLJ0GSl2ekLVy+jyRLZtBcHH938JAbxO7//g53Dnb3jvYPqwa8++dXm7maz1/zy9t16o7W5tX3/wUat3tx49EQBqVRr2Vzhzt37SNj9BxupdDbmkjKzUCwvr68EPPzi7peLqytuIlkslW/duUOZ1en4Cu39w3o2PwNg+z5dXr7suuVCPt/tdF575VUd7208eSxAOalkfmYqUEKY9KTbVo4ZMezwAJP28WHv0cO9Rxs7J9Xm4uKi7bBLl9cTSWY7mM44c3MzjFkEEj977/Nz6y/Vq6o4taSII9HeP6pVa+3Np7tcqtnZWd8fGCZaJpyc7B0ebnW7FX9QTyVBmBgz9Hg0CHyCiinJ+x4LedzpfvnRhwlGTg73Svn07Gzx7LmVRufkf/jf/I9WIjkIg67fNR1ycPyUWfKP/ru//0f/3d/vdRsH+7ulQjlhpB7d3dq8f9A4GlRre//n/8v/6enWxtWrl3/x/ocDjwtpv/ezjxsd7seGmSpZifxAQDsITwa9p0dHJkhDCUMJJjmTnCrBQDKQqLj+UsClivV3qeIYZAwyUkJ/xSAFAUEADMpR6QcFAUmRowol1/QWAlQkeBiGcRwjom1aJjMMpFQhSoWxIFwSLqlQTFGmdMZKlEKtdxQjDoTwQAWMhAYNLDYwSI9ClyiqJJGCKmkgmAQZpQSRIEqpxxuBS+QShSISqEI2sJNNNE4E1pXRRKuh7CY4HZquS7cu3RpP1HiCTSa1zw9bgW3b2WzWsiylRH8AnHPdwDiV48oJYYBTlga/oSOjXmhdwAuec2zP4z9ZlpVOF+YXF6QU+/v7YRzl89nLly/rJDYMw62tLc0qrcsht27d+qf/9J/W6/Uf//jH09PTL7/8MiKeWSNHR0e1Wm1qaspxnGKxWK1Ws9msUurVV1/d29t75ZVXfvKTn5w7d+7hw4cvv/zyL37+829+85uXstl33303lUpduHDx0qVLd+/dnp6a7XRae3t7qVSmenKSyWS+/PKLbDbtMJrL5bRchC5dLi8vd3s9ANREt1IBoEJE0zSTyaROIOM43tzcPHtuHQDOnDmzu7t7//79a9eucc7PnDnz8MFGoVCoVCrZbE5H5lLK69ev1+v1RqPR6bRmpsprayumxUAKyzYBIIqCQa8vZNw6juM4FlLpd9cgeCFEFITvvPNOPp///ne/92/+5H/5P/7v//nV61cR8Ve/+nBvb+fSpQuZbGp5YZEy3N3dvn/33vT0bCGff+utt9KpfLPWXFlZ2ds7uHXr1oXLF37v937v6GD/6OjozTffBICbN28uLC0KIYIgEApiSToDPwzDdDKVzxU73fA/0yP99mPSHY0fmdzNdXN/zP8yLmoMR1Wfr+hMvvwrUqqJdXsqdpsM6L7yhZHgiEjU6drSJLkhG+PRxkaoa6kanqNpiXU55CsvaNzTP5Vwj211nAScMjn5VUi3UzasxvP7hnF0dDQzXTZNc29vjzE6MzNju04chz9576fr6+ual/7C+YtBENy9cy+fz6dTmVKx/HRre3t7+7vf+d7Ozs4Hv/pwfX393t3Hq6ur3/jGN3Qb8OLFi6lUan19/csvv+z1eouLi9Vq9R/8g3/w2WefXb9+/fDwMJtNP368kc5mbMfMZrP37t31wyCdzuwf7Nqum8lkGGPZXNr3gi+//NK27aRl7u3tZbPpo6OjarWyuLK8u7u7tLzc6/UopQQUNUwA1E1ZAOh2u5ZlWZbl+/7+/r6UPAwDRNTRMiJms9l2uz09Pb2/v6+USiYTe3u7u7u7b775+v1790ql0t7eTqPRuHjxvB9wPwhMyygWi4wRg7I4jveaNSEECmnbpk4XpZRRGN145eU//uN/owd//w//u//tF5991m639/Z3pVALi/MLC0uua5+oI0Q4f/5iPltAxHarG4YRo3atVgsDXiqVlpeXj+p7m5ubUkrGOvqC8/l8s9k+f+ESECoBu4Mwijrtdjvi0rJdJM5vWuL/XxwvLv1xjDa0llEbf4yChOez0skh4MkFPF6HL3qXU+/1m4xWn4oaxvgCYMLTTK7/8TTU0AjHl6jrbHpSrtNpScUdx5m05smkEV6gMxz/iZLTO9aLHwZeMD8yum5KqUEpYUYy6Rby2TgOB4NBIuG6rouUVKvNV199tVwub25uptPplZUV7asXFhZ+/vOff+Mb31BKxXF8+/btTCbzox/96P3333/ttde63e6jR4+63W4ul7ty5Uqj0fjTP/3TpaWldrstpZyfn6eU6uwuDMN8Put5XrNZDwKv0235weCkVqvX6zMzM2EYLiwsvPfee4VSsVqtaVrOs8ur77zzTqfX/s53vnPnzu04js+cWTNMO4oiEcUAYJkMkIFS/sDrW53ATS0sLCQSiStXrrTajWw2fXh4UC6XhRC7u7v5fKFSqayvr6dSGQDodLqDwQARG41at9u1LCOfz29vbwFAs9m0HRMAeCw6nQ4iUkqTyWQqFTAkgkd67oRLLuI49AfvvvvuK6+8tre384tfvD87O/vWG28KJQkAEDY3N/fe3/5seWXJYjTm/jvvvNPr9A3DMJldOT558uTJ/s7++XMXUqnM/v5+rX0Sx4elciGRSMwOG6omYca9e/csx83k8nYiU8jmJKAfxoyZvQGH/38cL+aK8HytYZgrqmfSDCNjeL4iODH6BJMR3MjAXrTDF4+vtMahLSADRBjqdg85tAFAPA9QIafeXh+6DaU7clJKnVjrf+SYvuk3vXbypkzGrpM/fGXqOP60Y1zsuEmYz+c1IGt9fV1XBfb29gAgjuNPPvlEKZVOp//6r//6888/L5VK/X7/H/7Df/jrX//6z//8z8+ePcsY63a7U1NT3/rWtxYXF3UhsVwuSyn/+I//+MmTJ7lcDhFzuZzWCfvFL37R7/cfPXo0Pz9PKJSnismkm8vlwtBfXV2eX5jN5TKFQi6Xz5anStMzU9lsNpNJeV6/1+vU6/VPPvkklczoYk8unfnoo48OD/YAgDFmGQYhBJRUggsehZFfrVaPjo7u3r27v79v23a5XC4Wi4ZhrK+v625kv99PJBKaTK3dbm9tbeXz2Ww2u729rZTq97u1Wq3X61UqFcMwEomUlHJ7e/vevXudTs80zWI+m0g6OGIuD8MQCBq2pQemHj58+Ps/+OHS0ko2nalVT1658XIymb5w4dI3v/ntt978Wqk01Wr2OJcXL1xZWV7d3zu8f/++ZVnLy8uGYSgl5udnL125XK2dzM0urK6uPnz4qN3uXrx4eW9vz7UdKaU3CHQ7d9DrN2v14+PD32pZ/wXHi0vu1AJTEx2/UwUIMnFMwjbVuD34nEjmc0CU33Qxp2LAsc1zgTGH8VcUK/1dSDL+YmM2NCE0Y4VSapgWju3Btm3TYoSQMAzHVaxJKC28MA/2lRsVTDhAOTG5rx8Z/zBErhnGeOaQAnY6nSiKbNuM45jz2PM8REylUp1OB0aVaz1wqAljPvrooxs3bnQ6nTt37gwGg7W1tV//+teXL1++/fhhtVrVyz2VSt29e1drSDiO0+/3X3nllY2Njampqd3dXT1SqJTsdNrMNC5cOLe/v28YtFgsahZ6J5FCVH/v7/3Bk82n6+trX375peu6x3tVQumnn3565eqlmZmZIAiuXLnS6/XSqYSfz/V6vTCMfS8kjmOajBFaLucZo8VisVarLizO1Wq1TCaTSqXS6XQmk6lWq4Swg/0jRKpRAaZp53I5ANjb33ntlVc1E/b9+/eXlxejKGo3WwDSdRKO42i5mGLRDf2g2+4IyQnBZDKZTqYQZNtoV4+OX331dWaxZDL55MkT23b/7b/9f77y+tc+/eTm1HQpCKLFxSXbtr+8dUdJaZpseXnZ98NarZZyU6++9koYhre/vNMJBm+99bUgin76k59PT0/nsumnT5+eO3uh3W4PPK/T9RJBSA3bNM1SqeQ4Tq0d/Bcb3Fcd4/UzubTURINuHJeOVuZYHez5EVZAnFBHnDzGS3rSE04iV+EFz3nqQESlkWfaGU5c53PhqG7pwnAbEFKCUkgIjoY1dDNAjmnnTdMcv8Gkyx6DEiaNcLjrP98knPw+PtX4B0IoHYnj6LvDOVcKUqlUsZATIu71eoRgKpVCSnzfO7N+NpfLPX78eHt71zRtzuX+/uGNGzfm5xd3d/eiKJqfn5+bm0PEzc3NL7+883RrL5FIZLPZIAgIIcvLyycnJ4i4tLRECHn48KHWkCoUCoPBQJMapdPpYrnUbreZQROJhOXYvV4nm806iUSn0zIM4/GTjQsXLpw9t57P57kvd3d3FxYWHj16lMtl5+dnj6r+0tJSrVabKpfDIGg2WjyWtmEalEgedTodPZWjuyD1+onj2I8fP37y5Ekcx61We3FxOQxix0msrKycnNSIaUVx0GjWhBC2bV6/fv3zLz472j+IosgbBK1WK5VKZTJpPWVfrZ4QACUFKOladrFYzBeLFLHXaZuWNbMwbxhGMpn84tZtyvDi6ophGEopy7JmpucMg0khHMc1TWduZqZer3f8ZiKRWFlZQakePXr45MmWEKI4vxALhSCpYbTb7cXFxYXFZakwlUp3et2IAxqmH4Rerx/EkWe7QBP/v9je+JAThNnjR04trbEdSikBhgRlOKEdNo7G4HlfMml+MGGHAEDoc4IZpyz2xXdHYpw6J2gsJ04Y4Rj8qi9VSkCgAKB9kZ7eD4KACeI4zrh/OPl+k1c/6fr1g5pd68Xj1LWOf9Dx8ThCkFJy3eNErFarhkFX1lbn5+eUUt1+L47DDz/8UHMNEUJu3boVx7Ee+avVaqZp6r7Z+fPnf/rTn66srHiex2PQ0XW32w2C4NKlS/v7+ysrK7pDvbOzk8/n9cWsrq7atr22dqNSqUzPzrRanTNnztTrdT8IPM8HgJmZmUQioUkxwjDMZrP7+/vlcjmTyZgmk4rHcby3t5fNZm2ToVSJhFMoFASXYRhbJlOSD3qhbchisaBRL+lMslDIPX261e12k8mk53m1Wo0xc3FheTDwi8WilFJy3uv1PK+fy+UajcaZM2fW19cNQvf39+fm5rSsfL3e9DxP19l6rXav3eFhZNtmNp9xE3a9Xt8+2Hv77bf7ne7jx49TmfRrb7yue8Wm5czOL5oWe7q9mclkirlsFPJCodBstsIwHAz84+Nqt9vVknUA8pVXbqCbabdavV7HsRNB6B0dHW9tbbZarU6nC0gz+UIxn7YdHoQx+tS2nX70n2Vj/x+P8dp7MQQ75ZrGbmfkCUcrcyieeRp8MunT1Aup4FfWRH5TmIqIoVQwhJU+F9NOIvgYIcTzvCiKCKGccymBUSaEFiIdyneYpknokHVGf8fnDzVBkXjKSjXGZfyS8XNM09R4cUTUBq+U0nNWzLKEEP1+nwI6lsGF8Lo9QqBYLAoRHxwchGGwsLCg0cmpVEoLOGslCaXU7OysnrRYX19/8ODBgwcPdK1F45tff/31vb29vb29dDqt1Y7+36z9V7AkWZoeiB3l2sM9tLj65k2dpUVXy6kW0z0zmCUGoC3Apa0ZZ7kGGtb4vEvjE1/JB7yAfADNCBJY0NpADBc7GEwPMHpaVFd1VXdVV6WoyryZV8vQruURfDj3Rkbem1XTDdCtLCtuhIeHe8T5/Vff/32O42iatre3J0fRMcYSdVmr1Vqt1kcffYQxxgpRFK1SqaRp2mg2CVEODg4kVf5kMpHU19J1F0WhqiohqNlqr6wse97k6OioP0Ddbrcsy+WFXhrFEwQELaej4fXrN7M8khUUXVcRQpqmrq6uTqfTer2+v79fq9W++93vnp4Mut2FSqVy69ath08e27bZaNTW19ffeeed23dura+vO5b92WcPRqPRjevX4zgsy3IymdCSFzl99PBTXdebjdrXvvpVx3WTNIUIbVy9WnB2PB5OkygTLMjT3d39ZrNZ73bzIh6OglqtBqFgjHmet7K6dOfW7X6/f+ocS9gahFAOGWq68slnjzzP46wMw7BiGghAhLDrVhcWFoMwPu4PipKVjE+nnqabeZ4CaM0WKIQQn9fz+PlgHhVndT6ZKF0uy8ttFr7Nm9AssATnGYosziOE5BQFnxMXku/FWLlgvfK9EigyM7AZfrUontHhnGE5ZR8BPOtUZse5bNJ8bhLyGUCtmMOYz+jEhQSji6d3lHnv/J+/yY+Y2XBZUgwhApBAwDknhBiaZqqa49i+76+vr46nE8kjdnRy3Gq1To5Lw7AqFbfXW9Q0LY7jPC9939/c3BwOx3JOvNtd+NM//XPP8958800ggjzPpceo1WrtdlvKm5ZlGQSBEAIhJNlydV1njF25chUhVHEdKbV9fHSyvLqyuLjY7S6wkxNKabPZrrhOq9XinOd5fnrobW0/BoDrhvoXf/EXGxvr7U5THlzBxDRN17HdSoUxTss8Cn2AhK7rnHPf9xvN2sHBwZMnjyuVShiGb7zxxo9+9OOiKK5evQoA+qu/+qsoivM8v3nzhm3brVbrXVb+8R//seu6tmFKzhvp/23buX7dWV+/4jhOf+AnSRInicwSBYITb5pl2dif3nn11Ve+9CUhxOPHj70kspibsnKwt7m1tZWmebPZvLK6Vq3WPvroow/ZLzY2NnwvCMMwCAICgbwvk1M0nnpRFGEIsrRAAnh+mMShEGI4HBJFkwBDzIWu6wjjLMugYc8WK4RQXB4VeFYS74uRNJe3iyUGMYvRnoad8w/mSxtiDh8Gzxtvs1M9C80uAGXmPuuC2zz7FAjALBA9w9ZAeRx564EAPDNPeOEi5/2seFbI+8L1XDih+S/oaSg898YL39QMdsQ5N00TQ0iLsijLJEniOMYQYgCyLJNTFI7jMEYnk0kURYyVt27d2t/ff/DggZxk73Q6vu8Ph8NGo7G0tCQJiP7pP/2ntm2//fbb9+/fZ1TIafetra2dnZ2Dg4Pl5eWrV69+8MEHvV5PUjPFcSxTNdu2t7YeyHYIoxxBWRDGEOLBYBBFiarqiqIM+qO93QOpEb+xseG4dpZlN25cG42GnPPj4+MiyxcXexhDXdcd26q6lSwrhBBlnt24c8uyrFqtJqnya7WaBFL3+/1vfvObf//v//3RaOJUqh999DGEcGNj4+6n9z3POzg4SNN0ZWVFOoqTk5OlpaWFhYWjo6O1tbW7d+/euHHjvffeE0IstDocAsrZldUV1TIKynXbNCp2EEeKqh8eHnqBr1n6a1/5MoRwa2vrcPshgEA3UJxM//wvPr2yfvXo6KTTahdFkaeZnGnSFVKW5WQ62t/fh5Ue5wBiWDLqhVlZlkkcYozr9bpl2zhKiiyPkiRLUt1ArKTEfGqB8OmA3lOMKEZ4Zg8zM7i8PbfPfHm7sLw552cZ2fMqmTOvw8+nZ+ed3gXruPzGC0sdPC+rvOAJ5UZm38j80Wfl3HPEC4Twojk99/MuHOfyns+1z/naFEIInPvGLMviOFYw1jCxLGNpceXJk83l1RVVNQ4ODhYWFh4+/HRxYcEwjFqtFgSB5GKwbfvmzZuDwWAwGKytrb311lue50nZsKIoIMBJkty/f1+mi2tra1mW3b179+rVq+PxWCIwW62WEKIoiv39/V5vgXOua4bS0LtdRVV1RPBoOKnYrlWpSfmKvf2Dk5MTQghCOAiChd7SyemRhI8rilKU2UsvvUAIYZRqKtE0rV51OAOU8qpTURRla2vrtddeK8vytH/81ltvZln6l3/5l9evX8cY37hxI0k+efz4MSHktddeOzo6RhhoulJvLIeRT2lxdHSAEMAAGoZTFFTXzNFwoqlGmuSKommaBqnQNK0UXFGUJEnCNIGahi3DS6KNhYXbneYnd+/9+Mc/Pjw8tCzLsqyuCXu9HsbKZBx88N57mqbs7x/bpjWdTvI0H4/H4/HY1HRCUBQHeZ6TRCnL0jL1sqQInBGOSAxDrVZDiIwmXp7njDEExXysePbg0vqR5Amz4sLnGeEXOA/wjCeYfcLMxp6prEomh1kVcMYqKF3ObGh4Zp/oeRYl0zTwrNWdfeoZH6l8aW79CwbOWaDIvOVcvs4zjbdzpOnsOufd8bzhzYfFZ7udv2vey4NnXeK8Hfq+TxBCACoYywl6TdNs3VhbW1MVLKPWPM89z2t3O91u9+S4jxCCALtOLYqiMIgDP1JVtdFo6Vr85PF2p937ype/9uDBg3/5L/6Vruv379//2te+Jif3fv7zn3uel+e5pJp2XVf26Hzfz/O82WwuLS0F/jjwgzQryrJ0nGoQRIqqcg5s2y5ZGQZRpVLpdDq1WmM0Gu3v7xcpsyxreXkZQlGvVuI4rNdcSUiRxBFC2K1YhqZpmlakpWmaaZpKNc80TSV+TRaiNU2TCsHdbvff/k9/+Lu/+7+Qx3/ppZem06kUeFnsLchwenByypjY2dl57ZVX33333TfffPPg4OC1V1/VNO360npW5MeDfqtZLwColPkoDIfeJCny/8f/+C8++uTj0XgCCe52u9evbrz55pu3LF9VdM8LojBVCVpeXt16stuotW3LGY9GxyeIUsoIU1XiVKqoCrjRyLJEVRSaF3mWJElGCHIBCMOwYruSpFRAmGclVpU0zeeNRC4jcY4gma/+P9dj/OpGOGchswDtqRz3fC4373bAnIu+YBczI7wccF5Y/5f8zVNqtmdf4s8Y4YVrkKf0bMQIwbNgnwuObv7GcNlhXth/fof5T5eXTQWFEKqKqitEahLKud6yLHe2nywtLem6PpmMAQD3799/443XmvXWZDIZjUb1er3dbkMIT09PkyT58MMPV1dXGWPvv//+4eGhZIw3DEOGrJ7nLS4uvvjii91u9+TkZDqdGoaRZdnp6Wmj0eh0OlJGoixLjBXHqVbrNc65ZVZURbcqtqKoSZLkZcmY8LxA1bVuZ2FpcWVxYfn48IQQ8vrrr1cq1mcP7g4GQNeUzz777M7tWwghS9fVBlGJYpp2HMcK0WLGGo267FIsLS1tb29//PEvFxYWqtWqYRij0bgsmZQBjeN4dXU1pzljbHFx8eTkBCFkWVaj0ag57vb2ruu6YRjWarXT01PTNPf3969cuTIdDpGq5ll2cnTsZXGOoJ/GkyT59NHmz+9+nJel3aojhLws/WznSXtlcblTtFqt3kJzPPJqdfvRo89sq1qvVzudnm1ZEMKVlRVT04sik1FDobdCP+CcJklURCUNSwyFrmrNVl2CrvI0lWJehmkWRRFfWNyXVshs4aA57unL2+flihcynVlO+FxnMx9tzm8SYQuetUl5evjpeV5c4fNWMNtHiAu293TlP/24p+f3bKJ7vuvFZy4ccXau8833+X3QXKg9u/LLXnT2vGmaCsYqUVSMIIRSQKtUy93d3Sj0Fxa60+k0DMNWqzX1PYTQhx9+KE2UMSYVMw3D2NjYkL3+9fV1Xdffe++90WhUrVYxxrVabTAYfPbZZ/1+X9M0ORk8nU7zPJdxlIRxyuJNo9GIIwIAqNcaEpJ/dHQcp0me565Tk0xbnhdEcdzpdBYXFxFCuq7HcfzZZ581m/XDw8NGo1arOoqCMcaGqlmWxSjFiJimiQE0DIMnmaap/X4/z3NVI8PhsCiKer3+xhtvSNF5RdH+0T/6R3lenpycMMaBwj1vsr+/a1lWp92R+hz3Dg51XW80GlLBdzKZvPHGm48fbXba3fh02F7oOZYtiWdU16k4DtNUP4kAQablCATTggIAwjQZTidDHDSa7sLiGkKo223/hz/5s29/+3v1etVxHMAhhFBRFBUTqbmNMd6bZEmWyuEyTdMwhAqGtm23W92iKDzPi6KEcoEVYnJTURQsnh9eAjZTin+6QAEA5bnswoXtgg3Mthl88uxfMbNJNHfUi8eZDSGAZ7PQy0t3/tYw840zw7vsJwV/DvgGnLFuyx4+eD7dwOyDz1NBIQSfN5sLbvC5j794E8/mkPIxQmiuKPuU2hQAkGWZ1Ljc3t1RFPLSSy+9+vprURRI/XrGmOM4Evgix8CHw6HjOMfHx5PJRAKgZWlnf3/fsqw4jnd2diQc/MqVK/J2GEVRURSS4VvTtDAMi6JYX1sLgkDq/pUlHY1G/eFgNJxomlYwLnF8aZYdHBy9++7PptPp22+/vbK0vLW1hTHsdrsbG+vHRwerq6tJHDFS6rqexHFRFApK5aoFSTadTqXWYlEUq6urGxtXDg4O3n333c8++8wwzN/6rd+ZTqf7+4ebm5tLS8ubOw8BAJ1OZ2lpZTqeHB4e/vZv/3an2XrnnXfDMOy02kdHR6+99ppUtknTtNPpWIZ5kB2VBZR0VVGZH03Gx6enOS0rFTtJUwGh67pMiKP+qbFcOz4+KgvGGLx567pbrVSr1dPT09XVjdCPJENkgXAURUKISqXS00zdUzllRZ4mYZSkcZnnUIBarSbp3hBCpq5xIMqyTNMUW/aFdTC/HjjnApx5p3lzeu76ee7z8/BmaYTnK/ai/cHzouDMhcBnyzBCUhg+K5OIVMznZoh/lXU+/+CpEc5hrQlEDEIBoayccggBwgAhPLsDibNRYikezqFA880TCKDgZ3VUxsCMqRsAAIXAUPINX2AiPbs3yCuE51g+eRBTVRSsAEaDuMAYI9wouRLlotlZHftR4MWdzqpC8KA/zZJiMhm9euuaFEK6d+/e2mL7ypWrg9HwF+//1LAsADkmsN1t6abmxf76+kYURZXmchRFBTTDqV/SwND0NNuqWHYYeLwsrqytMz0DhglNff94U1XV7bt7jHPN1qGOBpOjadCvtbQXX9ugNOp224phT2N25BUP90eHB+PQ0D/2Dh6l48Od/esnfVDQ5fanL968rhtOo95u1Jw/+P/+fzauXL/9wmu+nx8eDUZhXnG1ovAcxwVAJGnAGKO0LGm++fihaentdvODD96L42R1ddXzR0QBb73y5Xffffev/+PffO1rX1tcXGSOePDxZ4ZhfPMbv/mzn/0sjuhCb/3e3c0XXnjh5q1X9vb2kKs9PHo8CYZra2uL9SZSTNNs3v20/8f//hc/+vmH/6f/y/9ZrSuNpv3w4ce9Tv10MhmewKsbNxE3LMPByNhYf2l/d7S8tE6palh1t8a4KBSFVEGe8mgcjcbTMwknQytME6iqNp0mUEWD6TCOU4EgxkgIXhZFliQYoSbKfd/XDUvR1IkXKIqqmZafxDrGDMJSACY4g4goaiF4URRVeO6XhFx/QIqoMM6FlLEFApwPCkEIgaCcPxNznZcYz9pgMoxlrGQMYIwxIjOPBM+A1tJYJLSFIygABAid2WdSzPUtz5NBcN7qkO+VL8mTgORchercp8r/YYzA2ZS+OCP2nXOvQPqgz7PsGSgWzqXU/NKE7vnRBL6kIzf/6lzofPYgTdOyYAAAwABCiFIqmSa2trbWVpYBAK1Wa2lxYWvrcVEUb7311ns/+qs0zSGEDMDxeKqbJ7Va7c23vvKnf/qndhAWZalpmh/FeZ5LtqWFpRsSylwUBYCiLMvxeDwZjS1Tp3k2GAwEK23TarVaAgJN07Ks+OUnHxFdfeVLLwdB8JWvfOXll68ppGA8UzUSpQWaJiEItGFsOhXFUaN4gLFi2laz3bJV3VKU3YP9z+5/dPvGxtpq7+2338ZIPT09dd1O1aktLa+leZ9SurK6dHx8VJwmhmHs7e1CCI+Pj2/fvv3yyy+//7OfD4fD733ve3fuvPjRRx8dHBx885vfLIoCIbS3t5ckme/7Jycn/+v/6r+u1+vD4QgAsLKyAiE8ONh3HCcryvF0ktMSYmBVTKxaJ9OIw/Lf/MH3d0cTiBjCOEziJEujKFpe6RGixnEchsnGFUfGF+NRIPmRdV1XFGU4GuV5lqSBEMAwDDWmqqpKnvwojGXPWla2JWk6hFBisBRFkQUw3/ctRm3oQiQEYAIwVVXiPOcICybkxCoXgnNelJTjc7qjp0b4nO1CKDh7EpznRzPm7NlME4QQYyxX+uX1OctF5x3j7NVfK/q7fHA4ByoAM8pDxhiEZ10Uzrn4vGu9ZDxgzoTmP/LCPs89lQvPz79F5h4SQi0Jj998801WFluPH2EMFxd6N27cODza/+u//uuX7tzgnGdpQZk4PjjojyfXrl3r9LpxmmQlZYwRTT0rPFI6HA7D+GxUEgqmEEVVVU0hBGHL1IGh27ZtaMrCwsLaympRFJSV/iS1bWvojUxTv3Pnzne+851Wx84yryhDiFG4fzTyp35SKJYFtcwbBy7mKcu5ggtIia7Vm4121TYJfPcnf/Vk67MvfelLluUcH41cN4vC0nXq2/ubvV5vd3fXMHRGxUcffdTptA8ODr797W/X63WZsrbbbUqpZVWuXbv20c/vjkajOI6vXLmysrJyejqQrI2MMamvWK83X331VUldRQj+bPdxWZRX1tfcei0p07JIh9NJtWH+wR/9a8WpqSZOioQIUK1WqWCMsSTOYiPpdpYQQoEf7e7ucobRKqKU2rad5+5gcOr7fpZHElAYx7FlWfM2INHF4/GYMQEhlBQ+CCFZfDKwIgTXDMO2TaSQOC8YKwnGqkIEQogAwAXCClYVDoRSMp7O4dwE5BBIsXg4K6UCB215mAABAABJREFUAKAA4pmexOXU6UJ2d2EFznsO+YDP6688b60+jfieV4mcP7pM5y5YFD+PB4GcJzxvTYLZSV6GqM82eGm2av5cL1z2F5zchXfNDkgwQRBDCME5dhSpqorU4+PjimV+9RtfR4L/0R/9Eef0lVdfwgqp1lqnp6cZY4ZdqVRrRFHCKBl/9khR9ThLi7xEWV7QkhDCudA0fTQamaZpmjqGkHNOCK5WqzXXMQ1DUKYouOq4Gxsb6+vraZwMh0PE0At3bvz0/en+/t5v/Rffwxg/ePiZYytE4UEa+2mqmFYyip/s7wYFvnL7BoiOB6dDVtICCrddc6uuW3O7Nfs7xnf/3f/0B59++qnr1peXNpr19uPNXyz21izLWlxcFIBVqy5jpedPFheXiqKwbEPX9U8+vgchfOWVV/K8/OCDD770pS9Vq26WpRjjoijSNL179+Nms/29731P1cje7gnnvNVqyN5XtVrd29s7PBkuLnRXr15hZer7U4FRkI4Vw4iSISKMKHo0ndqO1e22R8PTLMsYU5Mka7U6gkMIcOBHjUZbUkhJqFq9Xs+LhE6KsT9N09SwaoqipGkqqfoAAIqiyJ1loxIhlGdlHMfT6VQIsdBqckGLIs+LTGoWUM6IbmiWLRAWlJWM0TJlvGRCUEqJmCtbiFn1UK5pLteOEAKcq2DPFub8DR2eswbPIr5ZWw8h8txVOmuZyG1mwxdM47I5/K3bZaN9qk94Phr//MrvbJvhqmdZ7HlR6xnDm8Wo8FKzdXbGzzVCyhhCEAFYlqxIs0KnhJCKa7uu++DBvTgJf/Nb3/zG279xcnRYr9c1TTkdjTJK7YrjVmumZZeMjkajew8e2Lbt+wHEZ9T/mmkAWmJVWVnpyrYH4JwxppwT5CyvrGCEaJ5pisoEjOPYtu0OwYjziW9UHPP45NAwDD8KNdXQbaOk6XB6RAFeXr/yeBQ/3N5url3/vf/qH6w2lH/5//of7//iEy8Lsa1RAsbBBIN84+qV/8P/8X/4b/43v//b3/svNjY23n3no7/7u7/34MHDRs+5d+/etesbn376YDIZdbvdd955p9Np05K3l7t5VvZ6C4ZhaJqRJMnhwTE6J6e6d++ubVcIIdevX5XC4IZh9Pv9yWSiaZptO47jCCGKkmu6yTiY+FMBcrda1QIeJpOr1xe2T0ZJmgKeIa7GkZ+GQXNjw7Z1VdF9P6jX2lE0vX37NqWiUqnoui5XW6VSSbPqdDr2/XA6HX/rO6/leT6dTqXu1Tl5NC+KgpCz6VNFAXIinFJ6OjihJSeEACSIqiMEEACaplQdmyOcFwVI0rQoGC/OCFuwMrd0ZjO4s6xPyBUr7ZALgcHTkgnnF0mZnpsWzfYRz7KufF78eWG3L7bDC5Hg7LPmWyxPBd/OjQpjjOd95XO3C37sQjg6yzOFOE+EL4XOs1sLvFRoEkJwCDgXZVkKAUu7hBD6YXjz5s0sjbd2d9rNhmlbnud1Oq3JNDQtlxDk+/5w6mVZNplMsqzgII7TrNlsAoSyLEOQFDmVWk5lWQrGTNOoVqu2aRGEGCt93+92Oo7jVCs2QiiM02q92ak1/MGJPx22m/UCMkXFiqKsrVwL4lEeZrphjcfTx4NHx4NBc6HbaLf60/GdazeBSqChelE4CTyaJW3bchoLJaBxFPzDf/gP/+gPfyCY+spLX7l7916vu2w5jBCCIDEMs9PpKCrudruu69Tr9SiK2u12peJ8+OEvIYSNeivPc4nelLTi9XqtVqvdunXr4ODA87yl5YUgCCCEq6urEq1aluVgMLl9g5RlycuCgyyLPdOATU2/cXXxdDIIJoGtq4iV/aO+AtHtjQ2FTxYXV8qCIUR++tP3bKs6HEx0XbcsK8sy3/cluD8M4zTJTdOWA9O+75um2W4bsr6f56V0MpLQrSzOiCoRQpZONE0TAGiaYVZsh9WKsjRte+wHACPGBOQFFoxghIhGFY6edihmqxYCAATkQMilyyF8imgBc9DTeTuRswGSThqdb/J+8dxgbWYtF1zi5f7kvHU9d7t8/AtRLpmdEDzTSJKn+LlN0vnYev6eMSNcE+c6TfCckQ48z2vPF4TmD6UrhHMAITxjcRUwL4soicu8uPrWm6enx5988kmjVpUaRv3hKReqbdtpmg5GwyxLAAAY47X1jdFopJug1ekIITzPAwCkaWoYxmQ6NjW9Wq02m816vW4ZJoRQsHJw2lcUxTSMarWqa3qWJQUtSVlAxE1Ly/txiVAcx05ePT0ZJHlAQa4ZlbwcPHryeOSlrYU2V+CPfvLDn/3ojz755ce9WttwLAp4lIQaoF44BUxrVCpf/upbP/jBn967d29p4ZpttTgH0+l0fX398eNHlUpFVev7B9s3btwYjUae5z169Gh9/YplBZTSx5tbb7zxRqVSQRioGmk1W2tra48ePYqi6ODggHO+tbXV6/VUjTSbTV3XKaVpGt+9+7E/DlnBYi80VK0s0lH/ULXNTr0WZu4LGyuc7k29WEfqSq3RqNc7lqukiW1VVVXf2d7/7NNH6+sbhmHpuo4QCENfckxlWTYejyGEVzeu9fv98Xg8nU7b7bZTceWvXJas2WwmSTYcDqMoCsNY/sqUUsizaq2R53lWFpVKBUCc5BnR9HqrhTCCUBAEORYQAogFAZCx+UX/1A6Z4BCdiS6dCQc9uzIvL9HnesLnGszMU8086sxG5LjPcw3h+Rt/jtohAM8QoJFnEANn7Zm/3QjnW/PS0mQ6PjPCp/oWZQmeV7yakZ3K52fXTAVnXBCIsKIQQCjlSZJAATRFPTw5btVrnXZzOp1wWtTr1TDy949GWVH4vh9FIcY4iKMsy6pVx3YdkqnVajXPc7l0ZLw0HfmSQtNxbFqWcRLKNvrrb74RBeHp6WmSpq7rIoXoli0gXF1brLj2J589OJ1OOAOKouzvH968c3U0HaRlVKs2bt++UzzZe+/+I6aGt15uZ97IMAzHcQqac0HtimmamhCMENRo1LzheG1tzR9njx9v/db37uzvHXVWdM/zTNOMk5BzpqnGaf9YCKaq6pUrG6urq7u7+1/60pcG/VGa5kFw1G63R6PR1BvfNK5Pp9NutxsE/t7e/srKymAwcF13cak3Gg9URZcyiXWzWoTZwdbexnqz5bpZPIK0YHFY15U379yOvPgXB6eVhvPCyy+0ay0eRLV2y/fiTqfy859/WK3WVVW7c+eOpmkyBWWMqarCqIiimBBlZWVtc+tJURQQwmq1WqvWx+OxEAJCvLu7WxQ0DMM8z+UUtWmaQoiNK4vdbjdNcj8K3VpV07Q4zYmqYKJyCJIkHU8mXhjkeU6ZYIydnkbPLkB0vn7kWubnz3AAuLgk/ALOfRQhTznNZhV+earzOz81krkYdb6nhxCet4JfxbAvH/bCxxGpHyKtglEOISYEUPqUgu38bOTbhGTBgOcwvNkdQhI8AgAopUVRSA5fCKFyLm8ozjm85RtnsofgvMEqznuPEjQkmBAQqKqqqxpRFUzI6elpMJ1YtlGxLKKpJycnU2+cUZzmOUJIM8yyLDudjqZpZZlLrlvD0FhZtho1AECe50mSdFvdXq8n2RxHw2EQBAsLC2mafvjhh9VqtdlulWV5dHxsmrZuBJK2rNqovvTSS8d//cPxeHxTu7OyfOXRp4+RwoECNVVturWVRdYP0n6YRv6kaTu9ZjsK/bXOEuAMAaASJABzHPvDj37RqjZef/317/+//y1CzuMn206lUa06hmHJryUvEhky5Xkqv42yLI+Pj994/Uvr6+sLC0vvvvvu4kJ3aWmJcz4YDG7dvoEgQQg1m804ToSIkyTyfR8hJKsgo/HArOij40HrxmLqxXmYmETRFB2rRrVi+Rn71htfXm2shF7UMCvXF5fWlpZZcHLr1p1/+S/+1Q/++D/8t//tP3r/Z7/43/2jf7yzs3d6etputz3P++lPf7q5+bBarVYqlYcPHym6urKy8vjx448++qhRb8p46ujopFKpEAJee+01QggtOedcesvj45293YOJ743H45JRy7J008YYE1VhHCRJsriyXBSFomqHuztxHGtW+3zCG3HOqaAAACAQwgicEWmflWfmSy+zuHFmJDOhMXGu6TBbhxdMbj6aA+dB3Aw0oigYPGve8BzZMxtBlGtYsiRr+tM5+Hm3KQ3kGSNECHF+ZmZfbNyXM0B4PtQ478HRpSneCzWl2V1E7iZzCTFHbM6ogEIICFIAGGN6tVaWZY6hUuBRniVxSGmhKJKNDY/Hw6IoGo0GpXQymRiqstDpagq2TR1Cg1KKIJBAtsFwQhTkuLamaRDCPM8fP37s+76u64ZViePU8wLXdevNFlbIcDx68+byLz+5ZzmV2y/c8f3w3t0HVzZW6tVWyZM4DSM/CLMUFpkOBcjjcHiKPMLzrGraTcdxdN1QiKVqFdNgtKzXq4KJ034/itMkzSeer6qO5OGXGUueF3lecgYgxJqmHR4eUsoVRRmPx5ubT8qSdbu9Ws2RpKkff3x3fX1d1+FPf/reCy+8ILWZLMtyHFv+lJJh6fT4sa3h9W5NdetIEF6UhGITmWFcsIx13PrG166CEsReoCJsUNpcvf6z937x/e//6//+v/8ffvjDH373u9/9/vf/9T/4B/9gc3NTCLG1tbW9va2qapqk3jS4fv26bmt7e3vT6fTVV191ner7778fx3G1Wh8MBmEY+75fliWCRELVfd+nLJtf4rTkvDxjTqlWqxXLrujmydRPo1jDuNbtTtMZ0b2AEMwGnQAAQvDzsI4DIOaC1V97uxypfcGTF8xBPpCsKNLZzkA2GGNB2cU3CgEAwHMBKZGKKzPACgDPcGNf+NTZ85ftUHZp5+808t+ZdsWFs39aPj3Pd88OiyCngjN2JuWNiDymqmu0yKUvjaJ4MplgDFuthmCo3z9ZWFhYXV3lgrl2pVZzw8ifjieclnmWWpalKDhJkmA6Kcuy2V2SFfw8SygtVFWdTqcyoLIsy3acwWBw0u8TVau41bUrG15wZNiV6zduPtk7+uu/+tHXvsFevHMn8Ee9Vmc0hb7vGxC27cpyozGd+mNvnEDV0PSeW207bkVRFSBUCDSMCBA119nbPtjc3AyT2I/inYMDy2kR7GiapusqQkDPVdma4YKORqPV1XVK6d7uwd27dw3D0DTd9/04jnVdf+edd7rd7ssvv/yjH/3oysbawsLC9vZ2GIaGYQAAKC3KsvQ8bzQaxaF6uHe003KrNm7UdShIFpUCpu2FFRgkcUphznRFdet1BSFdUf7kB3/67/7dv/vOt793/96nN67farU6v/M7v/uTn/zEMAxJJhDHsaZpACBVVTnnH330Ua/Xe+WVV3q9XppkUq1xZWXt/v37um7K2xxEghAiKSFP+4Ner3fz5m27UmGMSf2PMAw9LxhmQw4BLUuMkKkZkEvWWUEpo+WZaDTA4NwBivm1PcsMf93tuUv68/aEc3rA84VJMIcJmwHCznBg50rVF6xgHqFKJAwCISS58sEZYOZp1+EMjH5++3nu6V5w+uBZ/w4+56Zy+bsQQsivWABAMFZUVVd0NIfxgxBSzhVFsSsVBGUNIL9x40ZZlnEYrK6upmk8GQ9ffvnlvb2dyWiYRqFOcKXqqArGiKcpiMPg+Pg49L0sK4Ig4Jz3+31CyPr6+mA0PDw+ct3q1atX6/W6LAiJMGh1e3a9bdm/2Nt7b2nlIPRix3QQBRZW67pBMGwYJgAgDkOFlYaw3EqlZ1VdRTchMiDUEVIAUAnOkuSzzz579OSxgCBMk+3d/cWVa+MxBgAoKqEyaOM8y7IsTwzDkFXHjY0NTTPSNOec37hxI038/f39lZWVOI6fPNms1+v379+vVCqyPWDblkQ4AAA8D1uWwbh2Mjz5+BNqqOLO7Y2KpVHBC5+O/J1mZ7lRreVpAblAEB7v7zx++Nnf/OyT27df8Lzg5ZdfXltbq1Qqm5ubpmlOJpNf/vKXMmQYDoftdrvTWTo8PIqiSMo2QQgXFhZ0Xf/ggw/+/M//8stf/nK1WpU/epFTibiIoqjV7uYFvXvvQRAERVHIQbBud2Fxcbnb7kgl8Pv37+9v7cjBTlmeKGkua6EIQelpEIZn6R886xqeL7BfI0Obt4rPC/Fmz8AzSbIzK0BzIsGzHWQ8jM5lVL44opw9JjIWvbDHPKj84pmBZ56fWSa6JKN99ufchP6FS5pPUp/WgdiZ8CKfUXcDAITwPM/UNQEYpUXFMhcXF4syCwLvxrWbjx8/rlRsRXHu3f3YsoyNjY2tx49u3bp5Yqj9/gktU29aAiA0jJ1Go0RGGIamaUrpIkppGCUQwp+9//6NG7ekWj3n/OjoyDRs16l13KoAEGLy1a//xnAUPnm08x9/8B//m//6f5V4I0MnDcvBCaAEL7huvNB1NVVjtq7ojm03NLNqW25FrzuGo6tlmjzZfPThhx8OBr5VqZWUnQ4HI8+3rEXLsjRdBUBGXDAIzgg48jxP4qxer+d5SYhyfHwchmEcjQEArVYny7LNzc1vfOMbUjxYVRUt04QQnjfVNB0AIHniJnHqTb008xUVlKzodFpmxdYs5+B0kFCj1uBJFKeRlwaT3ScPHz246zauJ3H+0ouvttvtdrs7Ho9dt7K9vb23tyeJXgEAiqK4bg1jZTr1v/vd7wIApFc8PDzc29vjnP/O7/zOkydP6vXmeccil3NPZVmGcVypVBqNZqfTjeM4CIKTo5OToxMAwEeMVSyz2Wwu9HrLi4uMsTzPx4OpDNdnEdYswQGAnYWgUAAA4Fk4+p9ohM99ad5U5J8zZwDOvZ9czFJXFyEkXRpCSApFKucTueCc2+IsX5sPR2fWMosM590X+JU9IUKIzammgfOAEz1vygM82/SfN3g+tydjLAMFgQgCgMQ5SoJzRdeazSZlBQA8TZONjStREKoK/s53vsWKcjA8NW17f28HQ9BuNfIk8X2/yFNs27pmUwZs25aaoWVZTjw/L4swiJeWVlzXHQyGp4PhK6+8IrOav/rh33z7a7c13QJQuXLl6mtvvLGzs/s3f/k3q63WG6/dabWbnZp7PD6ZponQxJVuu1N1Se5CCFWEK5bdrruNaoXgktHk0aOHH3zw/vbWYyGAomgCgKyko/H45KTPGLNsEyEgBBuNRoPBwPMmkqzRsqzJxEuSxDAM3/cffrb5W7/1DSmTeuPGje3t7R/+8IdXr17t9/sLCwtydv7w8LDb7cnKDcY44rlQcQrY48ODsTd13Uqr0220F3THff/De0kWZ2kYTAaJPzQUXqtZnIPl5dVvfes7JydHw8HolVdf/tGPfvRnf/Zntm0vLi5KxNyLL77suu7x8fHS0tLW1tbp6anrujdv3lxcWMqybGdnZzLxbNuWNE2yNysl3IqimEyDOErLgkmbLPICQqyqhJe0Ua0tLi4iCIus8DyvUqmYpiFbAnL1I4TgWaGSF6WUsj2rjkIIf13zmy3d2eN5nzaPjp49ObMUmb7NFvYsHZtvQs6CVThnAjNDmD8ymbUKwFMn+0WcGQB+ruMW562LeRc3M/3nxqWz/qTE6wghOORIIQRhCDGGCCGiIEww1oiiaYpCEIZAfgWWZa2srAhKgiDoLXQajUYSRrqhtpvN05Oj8Xi40G03mw2nYhm6qhHsuq5t23/yo/d93x8MBlLp8nQwTNN0MvEsq1Kr1Uy74nneZOKNx9M0TRcWFj57+OjK1Wu64RIOGvXWV9/66kc/e+8P/+0f0sRTv/b62nrP1UzGSoghxrioMK1oAQAwQpaqVyuWjsBoND493nvn3Z/cu38/yzLbUZjgEEOiqSPfT9NUCKGqKsaQsVLX9Xq9btvm3t6e7/tJkty4ccuyrOPjk1q1ceP6rTxPfvCDH3z729++f/+u69YopQ8fPnzrra+UZVmrKUmcCSFc18nzIsuySqWiWInlWCrkoMjHUeIn+TjKjX6o206YZhwwVQGs9AXNalWnt7xYc178O3/n7wjBoii5c+fWD3/4w3/2z/7Z1atX5OylJOGv1+tCCCDg4sLS3vHj27dvb2xsJEmyvb3d7/cVRel0ekIIxrgkFqlWqzJq9X1fMy1KKaUcAESIqiqKZCSheTGdTvf39sqybDdbk8mkf3I6nU4bt+9I2S94Rj9RzhdgZgtyzop+bU/4zHL9nJee+cRz5yHmhErBeUAnzguTMiiVStlfvJFZTw88Hbf9Ih89s+bLkeeFWYp5v/rc65n5dEl6L30jA0xRNE1RESIIQIwVXVE1VRVCYCAIhgTBsiynga/pzXa7vfV417T0KIqGpyeaprVbjcXFhS9/+cu7208whnmWjkIfA1Gr1bI8yYv0zp07w+Hw8PAQYyxDi4WFBdtyHj/e2tnfM3Sz2+1eu3bj8PBQMilyDpI4U1THsuyFhQXXcrIg+MW7P/wX//z/mYWjb//m13Rbg1AYWBEIlYIbik0QxggpCCoQRb6392T7088+uffxJ/vHUaWmVlTbC0pVgbqqx3Hc7/cdxzZNU1ERpQXn3DRN1634vn/jxo0kSYIg8jwPY2yZZhzHUTR4+eWXdV3f2tr61re+s7Oz8+Uvf3mWmAGQSVJjSqksOFlV21AVHSHMmSqQgnHJYJKxNEwUXSNEFaKoONWVhdWbG8uLvWav+Zbk1b969er3v//9e/fv/t7v/V6axr7vHx4elmVpmvbR0ZFcf1mWSR1lSRgnhEjTNE1TySk8nfplWZqm2Wp2FEUJw9D3fctpIYQMTbcsC0KYxkkSZ3mSFkVR5oXquqxM5SCibdudTiejdLbSGGOUczkQe7kW+sVFh8/b5nPCv3XPy4t/vtEtzkkDAQDSJRJCePH8wsz8RggHsGSgoJADLgADgmBVanHOducASKlSABCY4z6chZSzNuCs5jOrfFqKJq0RzY1Lzk56hpvheSmTwjTPVLWgigIA4JRBCDVNM1QtjmNVVTVVPZObREoYUQATQwNxNEYItdp1AMBgNDwd9B9tOqZpW1ZF0+pYMz0vmCZFl9V7vd6Cgxa7PVPTf/GLDxlj167e0HW9iPP15ZWDJ9sKUYswnp6OKpVKw3Z2Hm7S5UVdy7JkdHxwWq/X7Yp27YXrQe7/8pe//ItPHh/k6ObNm6urq7WaW9E0AIBmar7vjz0vjuPJZLK7u/vo0aOjo5PxCHBhT8YEIUyAAuNMFaGmCH9QUzYUR7dVlTBI8yROBbd0c2mxE0UTzrlhwDyH/cGx1MfOkrRZ16sV9bd+81tlQZvV2ulBX1W05eVlb+ClWdLptGge58nUsbBgQaPQVFY6FRsiHAc+wqBXdXAFj8d9XOLXXnuz013gCDY6XdO2sEKqLTVJaCHYJDhZudLLWRIm/vb27rWrN/KS5SVvd3sYK5ubm5RSRVFuvHRlaWnp8fZ2HEUY44k3zcvC96cPHtzTVdU2rSSYHoY+hFAn5LUXbwqkaJo2Go08b6CqKqMlRqDb7Z6ennIhNBMvrV4Nw1DL1FIUmqbdqLpFUYymXpDEUIA8yxhE1UYjyQoAEBAIMAYFRAArBGOMS5Ryzjk762ALhBjjjDHBhQIBRpBxSUgtIIQIQoFKAIC4hGyBT4WMoPzvrOCPz94Lz0QEzwyFi7OJLbm7AFQAIQAFc33FWU4HzqkTz4xQ1icUReEcQC4YE5TSgrIZ3f3l+8FzrfnCHWVmb7KJP/tzZoQz1MIF2IGiqUKIPM+FEIALIURRFAmIpKbXzP7l3de2bauuHh0dlSVzXdcwTMdxbctZXl5+8OAzSdq/uLh49epV3w/39vY+/fTTrJj87u/+7te+9jVFUX7845882vzs6sb1Gzdu+L4fhuHh4SFCqFKpWJaVpiml9PT0FGN89erV5ZWlVqtFCGKsFIKtrq7+7P13/+f/+Q+rVffmzZvr66u9Xq/ZbCZ5Eoah53lh5Pu+PxqNhoNxGMZu1SFYEwIWOeMMqaqpEFKW+f7+/sbGhlutQCgAEWmaJkkWx+HW9uNrN64RQo6OjtI0dR3H87yf/OSd3/nOV4+Ojr3p9PHmzvLyqoLJytIyxsSbThVF0VUtDqNHnz0cj4fedOrYlb3jsYqJ6zjLS4sV87ptGY5tQChM0+QQLK+v6aYhMLFchwlAVIXmdGdn5+4n9waDwXg81jSj1WoRQp48eSKJsGzbFgJ2Oh0IoRwKk97A87woisIwpJQeHx93u908TeXcY9VxJddzkiRPdva73a6maZJtZDweSzwqhHA6ncqCsLxHt1qtRqNx75f3HccxbKvWakKVHA+GByen/f5ptd6QSwYJDASEAHBOOecQw/l7/SwQO/sTPNW75vCMYuK56/zX3Walo/mkUVZrwFwl5XKKByTvKDwXIcMAUsplQfnzjHB2bZcrNBf+lEfmJbvwpHww60xe+JpkxfJsCBqcpbyUcZmgz2fGckg0DoMoSgEAQoAsy05PBhAO87y8c+fOw4cPP/30U1VV19fXF3qLL730wq1bN4Lo9Ec/+pu/+Zu/efXVV7/73d/85S8/3j/YlfGbaeqqSqI4HI2HktuGMZZlYnt7GwAgAJ9Op7Wa67ru7du3d3d3S/q667rT6eT09PTo6EAS3XJRyAhfCIEQxBhbhmabZlkwTTM4gyFLqWAYMlZmWVb0anVCiKropcgF55pmaJrmeWGr1UIASZ5Vx3GSOM2zouaKvb296dRbW7tSr1fX1tbiKEuSaDSaXLt2TbLIcA50Q1VUwjmXqteQi2q1euvmnZWlrmloGiGMl7ZrCwg0w0jyQiCYJNnx6YlC1CeffRaFsVTCkqoHp6enjIk4StfW1mq1epIku7u7nhc0Gg0AAPKZN51maer7/ng8lgO+0/Ek9H1d1y3HgRBKy5TJv2maiqJIpnMhhGEYEMJKpbK2tra/vz+dTiULhvzdB4OB41Qghr7vT0Kf6EZeUsexDctOs6LkDDAmECFIgQIwzigtVXyGgBEQ8fPqBhMAzFkghwiALzK+52ZPX7zN1vB8asY5n+8HXqjQzJ4neZ7Lm5mqqgQTQgAEZcn+9r6nmKvciGdHM+bDX3xpygk8heGhWUNydk5Zll3IIQkhCJ89D88rsVEU+b6PMRZ5Zhp2pVJp1FsQQsExIaTT6QwGA0VRNjbWTdM2DMMPvKIoOOclDVZXV03TVBQ8mXgQAl3XsjzBGHc6raLIfD/M83Q8KQBAjUaDYMUPvP7gNC+ysixXV5dv374NIej1ei+9/ILv+++99+4nn3wSx3FRFOPx2DAkaz5RVdW2TckmDACaTjwhIC2BpqpZWpQlZ1SYBg6C4OTkpNVqKLpCWU5ZVhSFELDV7ERxIDFoqqrmeW6a5uLiYh4PbfuMpuX09DSKAjkemaZxWZZ5nqoagVDIFfzw4fbqxutFkUnlqVqtoavKGeaEM83QAUB5XgKMfD843D/UDYuWLMuyRqNh23a93oQQJknW6XRUVV1dWY+iSPorhEClUoEQcp4NBgPf9yEXGEDXdbvtDispFKLVarUaTc6553lREMrf2jSFJICSN744jgEARVHI+4WmaVmWycQ4SZK9vT1qFKZlIYwBgkIwVuR5nlJKEVFYyShlGBKocYIUDIWAnAMoIALwbF1SfiaGIgCSNRsOEbhU87ywdP8Ttpl6CpgLAy87KvC85JPMqjKzPWSZZC7oFGdXcXaSYmYk86Yyw6xdCC+fjnvN1XNlJHPZDQIA8iyVyoRCCEoZAEBVVYUoUpWNYIwQkhM9UIAwDF1DM00TAHJ8PJCGKimG6vW6hFD2+ydhGBJCVldXr11bj5Ix57wsS8Hh8vJip9N6/Hjr008/vXbtmqoRw9TyPFdVQikNQy/PY8epCyEURSnLMgi8MKwGQUBpIesKlUrl6tVrUm17OBw+efKk0bAoLeSQq6oSRcGc0zzPq7VKFCWMlYoK8pzFiQ8EsqzKxPM2n2xbTmVpqct4UZSZXN9pmkoM7XQ6VVVV0zTLsDEkSwsLYRD5fqgrqj8Z16uugqBVqRzu766trfU6a0WRTSaTg9290JuaGgrDsMyLwI88z/erDrPNim1qmtofDvSSCoymU09AGCUpLQGxlIWFhdFodHx8jBDpdrsY435/6LruwsJCFCabm5ue50uV76WlxbIsp0F/MhxJo1JVlVOW53mZ50tLS5qmyV/Ndd2KZQdB4Hme7H9KPlLGmCTRYoxtb283Gg3LsjjnhmGsra3FcRyGoe9NDNNUFcwgABghDIBgrMyFEKygJS05JBgCRQMAAYwhnaO3ZbMSI8RACA4RhwDIkQuAZnWOeV/yn2OH89tz1/bnbWS2CSHKomBMcPYrnYeYi4Bn+8/Z3nmvjz4jFAPnkLVg7spnm6xHS3i3YE8LvrZtE0JkYUbi7CSFScVYtJ2a67ppGpdMlnjK0cSLszQIgiiKyjKngqqKatiaU3OSbCphPRCJJI2iMFFVcv361a2t7V6vt7a2CtdhmmYnJyejcToaD6bT0LIMVSUY4zRNZemSECK5qJut+sbGxu3bt2XxsNFoKLiQM41B6CkKllzURVEYhhUGcRBE06kX+gFnBcGKpgJccZIkGY/H1aoNIGe80HUCOdQUJUkSAlGv3YEQjsfTlKWGphUFK4oiDP12uyvN+Ojo5OjogDGqKNgwtLLMkzTOsowQ4rpuzgVCiFMWRZE3DRACmqYACDVNS/OCcjYeTcMkLilPo5QQdZpGCBEIseu6tVotCCKJAjk5OeFnrIQwTZN+n8pSwng4Ojk5iaKo1WppiprneRyGMpLnlOU8l+m9/NVqtZofpaPRSN7UpCcsyzLLsuXlZdu2IYRSyVSqJiOEut0uRCgMgzDNDNsyTNtxKpppHh6dAAIQwBACDDnnJQIYAsEYRAgJiAQAZxSBECP4FMsFziwQzC/U/3zD488O5cFz2Pe8LXxeiEtkNogQopRleUEpx+isJ/MFnnD2eU+NbQ4jOl8LlTetmfOcvQTmzHj+G4ECzSZH5IyixLmbpgnO+xkSdM5KWhTFg/ufTcbe8vKyW62Ypk1pIb8RBAkhxDR1IbQkiZIk2tvbo5TG0WRpaUkIsbm5CSG8du1avVHd3toVggnBGo2abdu+76uqYlnm6enpdBJRSkejEUKoKIokibIsWVxcrFaraZoGfgTPZ8yWlpZu3LiRBKcAgDRNoyhQFNxoNCzb4JxHYQIhPDnpf/LxgzLLLUPTNZMQcnRYTCaTvb09zgvLNqq1CoF85E+XV3pxHIdhUHOrCCF/6uV57tiVTqPmjb0sSQenfdu2gRAYAoxEwWj/9GTQP5WORcGIFuWwP20tLAjCMMZZlkVZZGZqGJKcTmzbnvoeAEi2SYFAURzHccJoKvWqgiAaDEZlWdZqNcdxNjc3u52FdrutaVq/3/d9jxDMOR+PjvI8B5wrGOuqmkKIEDIMA4ozYighRJ7niqJUq1VN06I4l2mIrusyQpHez7IsmRkJIbIsk0KurutOvSGCpGDUtoyFxZ5pO1ESx0lmaSrXVcHPOPMpZSUrGGMM2QAADKGY08FG5zT1f+v2n+wM4RyaR65zcd5RBM/OWzwnHOVzrBtlWTImsKrIm/cXn+uFY81GM+D5SKK0bZGXYq6JL/vy0o9d8KLwfBJKsi0ihBQ8i0vPtllOyDmnRZnned1t5Hk5Gk2EEISQvEgxhpZl7e3tWbbR63Wq1epoNNjc3Nzd3T05Pb62sXp4eGjb9q1btzjncojh+o2rQeidnJwM3ju1LadSqfR6PcexAeCGbkMIJf44SZLBYLC9va0oiiT2RAjFcSwBkLpmpEm+0FsyDEMAFoY+Y8yyDEXFlFLLspxKlRBy9+7dLI8VoikqTJI4SQrJ+KbrRFE77caqZRlxEhBCVpYWj4/h0dGRRpRuqwkhFpTlacYZbdTrURRhBMbDwVlHWJRFnsqalq7rFdvWNU1wkMYhQijN4igKk6SSOTaL/CAIHj3ePD09tStuWbKJFxBCwjDyfR9AtrKysrd3kGVZq9WSSdrDhw/7/X5RFEmSMMZ0Q7NtuyiKzc1N28ASHGMZJiEEAYgQ0hSVc+77viw3OI5TrVYRQp7nmZar6zpCSGZ9cviGMWbb9mg0AgDIzv5kMpmpNRe0zPNcFnVqbqUsy7AMTF1HCGFMKGdZliUsKYq8LEuhm9IYEARM+gN4tlbROYkgBHzmDP/W+uKvuM0a3TPsqFztn2f88+ZzNkSDMcZYUEoJUXVdT7IcnutjQCQAQjPwmjSh+dbCzCCl7YE5zdT5aFs8C02QG3+Wq4afz3fJ90IAZ/cV2eqY/TAzp1pSzgT3Ap9y5rqVSqVimjpR0GA08E8CAKFbq66uX6k16v1+fzgcAoH6p8OD4qjT6aysrPR6PTkAKTFfjuPohnpyekQUJAXrb926dXp6Oh6P5WC+FPTNskwKJzmOs7S0BACQyvL1ej3L1DwvEEIQKrquQYhoyRln7Xb78eaW3G0ymTiVaq/X8zyPAwggz7IkTsIktU5PTxEW/cEJEGWWZVHgs7IQBGOECMFFUQxP+0Wec86btVql4jSqtTAMoWCAKxgyhFHBKCtyQ9V67U4axaMwKcsSIVGxdFVDELKClsfHx2EcRVFk6BNV1bOsiMIYEgwAeu3VV588eUKwevXq0vLyMmPs6OhoNBrJezQhxPO88XgsDa/eqAKaDwaD80DdPT09VVV1eXlZiuEcHBzI8uDR0ZGu68fHx9VaCwAgoaTSknVdl89IdeQ8zw3DSJLE9/12u61oGhNn925W0iSKaV5oRDk8PLQct1qtGqrKypJTBgHXVFIAoSkEIZSXFHCGMRac5UUBIeQCQM4xAOIcniV7ZE/X2yUfNV9unN/zwlvk+pwfWZQB+YVwdOYbpR3N6iNEQpllrUIatLxgeo5UEOdsbdJsZjRVs6PLE50fZbpQpLlwYV+crcI5mJuYXfmMGh0AuRRmB8mLlDGmaZpt25VKxXVdxso4jr/yla8GgV+WpTcNBIeuWysKenR0kiRZp9Nrt9vVqsMYm078LMsAAN/59ncRQoeHh7JMV6vVIISUUlUjr7z6kjRgwzAqtithmZKLOoqi0WgimyULCwtXroAiDg3DsCzDtHTZDZb3rqPDkzRNCVGbzebKykoUJsfHxwcH+7X6jeEwjCKa53Ych4PBKVFgniZFmdMipbSAgmMIIGCcgjLPu0sNqR4DABCCY0xM04AQZFm/LHPOOYCIEFUWjdrtNkU0z1PD1BACRZF5gZem6XA8BABM/Cn0/ZXFFSGYW3WajdbS0sruwQ7G+MqVK+12+/j4+Be/+AXn/MbNa81mUyqEjsfjsiw5p6ZpIgSiKP7aV74qyTgIIcvLy7quf3r/wer6Wr1ed113dXXVsqzj4+ODg4ODgwO7UpNz+icnJ7I4J8OKsizjOK7ValEUXblyRQixsLCgadrj3SdYIVLIFQCQZQUvSgLglfV13/dZVpiuDoSYjEd5UXQ6HVYWZQExxrKJDwUEQkDBMcICinOoNwMACyC44AI9Y3i/lhucT/8u1ClnBsbn5vUuLP7Z24mkfpBtN9M0MVYkH4S8Pz3d+6lhPGNCsw+bJYEzP3m5DSg+X9/juZfHnyaN8Mz45+ZEZBdRwqZMU7csAwAQx6Gsqu3u7jqO0+12KpVKFCWKolRs98r6VdfWJ5PJ4eHRZDJ1nAqldDyejsfjPM+bjXbVre/t7zx48GB7e3ttba1ardq2Va/XHz58uL29LauCnXZPktg2Go0kyYIg6HQ6d+7cUVV1PJ5mumaaZV7ygnFdV1WVqBpWFIVyBLEWRX6SFbppJ1lRUFZtNARnWR5xoRAFaZoCIAMQGKZepElRZIJTjABGQAgGOAW8APKeKITglNGCYwEBUwhQCCqKglIOASq44BwALtyKAxXshyFRkBAsTiKAOWUMEkgp7Xa7/dPB/tHhZDi5c+fF3Z2dD95/f3XjCmNMlqaKopCS4PKLGgwGx8fHQrBer9NsNsuynE6nNcc9OTyK43h1aVnqcOi6/vrrr0+nU3mfkjFnlmXVeu1r3/g6EMRxHIyxZIWr1WqLi4tS9dU0TZkHyX7PZDLJ8xxrqqYaQhEEY1YwKigUwNQNxgSkIC9TjDHNCwIRNgzLNGMvp0VGZxUHrnDOaVkqpk0A54ALiBCCAEh2Xc7RU3qLy0WKX3GtzrvEC9Dt+ZdmJjMfGAohiJR31nWdsZQQASHiAsjZ0LP88pLdg7kbxnxGd+E2cNawvuT3fkVTFEJIiByEEIjzsREhZPSIEIIII4QYYJZlVNwKA+zo9EiWJSCEvV7PsoxOb6Hdbo/H4939Iwnnj8KYlpyWfDL2wiC2bKPb7V25cuXk5KRWqyMEEEK6Zk6mI0n+vbu7K4TgnN26dbNeb0wmkzDygyBkjCFEhBB5nodhvLOzJyv7v/3dv8NYnhfM80OiIFXFbrXiODbnAkASp1leMLda4wJRSru9xU/v72GMDEPTdaIbimnpCPI8jynLIKCqAgEACFBOM4IU09AiPyjLEnJGsIIAF6zklNIit009x7DIKaUijZM0zTkHhqZpFQdjUDDKRZkkAVGh6Vo9sxVGSa+3WKlUsjSP/KBimVEQJlF0fHzc6XQIIYPBYDqdWpbJGEvSSJZSrl+/Kr+Wk5MjCVrquXVJexNFERDCNE0kwMnJiWwRQQ3KPoQfBmEc2bbdbHRn8Zj8V5bfdnd3FUU5OTnp9Xr7+/u9Xs/3fUVRGMJZSWleIAAVpKhEwZAQpAz7J1gqiARByVi73UYIcSGqtikjZ0oZFwiCEglBECCQUQjgGff8LLbjCCnz6/PXssML3ujC47NQc07gafaWCyZARqNRURSSOjIrSowVTTV005IdHiEEhGimpSHE0wlifmlK8IKrlbGrnJuaDzJnl/p5FzbvOWeecHZkfk4ZTpAs8EQIcUoLIUSWZYwJ2VV7+PBhURRbWzvLy6uGYViW1W63a7UaKJKtra3BYIQQWllZqdfqURTt7u5yziVRN6XUdV3G2OPHj8fjcRz/Ynl5Ncuyer0ehmEQhEVReNPgjTfeaDQapmlqmkEpvX//Psb4zu0XppNYCAGRIAQAyAmBbtWq1ap5kVqWoSrGwsKS6zYODw+Pjo4o5apK6g3HMLSSFmHoGxpSCEjj0LZNTJCqEMA456wsctUilmUiATDGlCKE5YCKAEAgBBUFYayrKk+TUpZPVFV3HCcTimmpOlQBEmmRh7FPDKKbxpUrawih7kJnZWmVIJQXKQT8zTffnESxqurdbvuVV16Jomh7e2s6ncqcByGEMSSEuG6lUrFkP7lbb/e6vTSKj46OFEVpNBoAwslw5LpuEAREVer1+urqasnowcHBaDSyraoQIkkSecx+vy/rApJPMY5j2bhWVbXRaGCM9076nPMiy0zdUBXN0o08SYs01xWNEMKhUJjCgeAA5GVRpGmz3cqyTPZpKaUcCACBimQGxgUQACEAuBCCAQYAK4UKnvUlX2yH8wne/KpGl4Zyn/uuC4ZwZoQnJyeuU6vX65pmlIynaZ5n5Rn49Tz+PJ/jfWY4anYq8/Yji11wrn8oKdzmne8XnCu4dHeBF9zx+T4y3IUQVhw7z/PJNEYQ67puWTrGihDCNM1Go2UYxunJoCiKdrudZ+WTx9sapKZprq5cGY2HDx9uQiiazWa9Xi3LstGsqaoqi2xSoVT2x2TN5ujo+PT09Nat2zdu3ECQfPrpp7VagzE2HA51Xa9V6zIBGI0DxpiiQsPQiQIAoEwAxkGSRu12yzAMwyQAKpbtANifTEe6qakaUlWFiSLNYsZtSzMMQ0OQIwAR5ggBWjAhABJcxcjUrTzP00yUZV4wijBECBAMozAlhECBBKdQAE0lmqphiCjNLFO3KpbA6Lh/HEQ+A8yq2GbFzrJMlAAAvrKynETZaDDcfPjpKEpd15XoIk3TbLsShqHv+RAJVSWKolQqlm3bMlYqaWEycu/evUePHtXr9VqtNhqPDcO4fft2VhZFUWRFHgRBkqXLy8srKysrKyu7O4ey6CD7THKSCwBw9epVKRoph/cfPnyo63qSJABhDLGuIdetNmpNTVF9BrIkduyK53kCgmqjjgieTKcZE5VKBbAciVJBnGNIS0aLAiKCVZUAxhEUAkAoOEDiDLT9nKDsV1yolxf/hUU+81XzQSJ41lfJjZRlaVmWadqcA6Jqvh/2T4ccQErpzAgFhACetTtmLY15OwFzFIaSX/XCqxdO7le8tlmSiWYSxxCCc2dIIQUAEILKEkAoMEGarpqGiRDiHAwGIwk8yLIsCKIwjHd29j3Pe/XONTmyXZa5aZqtVrNarRKC87w8Oe4jDGq1Wq/XlSp/JycnlUolDMMrV67s7R1sbW1JQnjD1OQmFcIajYahmwcHB48ePer17uR5hnLAAdKBgjHiAHJANMNiAiVZLpHiaV6ougkQ0Q1EC2DZhuOYCsGMlZRi3dBYmVHGAAeIEEXFgAHKiiSNFKzO94U5ZxBiCKEMpEtKpcvSdYUzMZlMiG2bltnttgVBfuIFSSAgxyoejQYQYg2rg8Gp67orSysKJh9/+NEgiLvdbrvdPjnpHx8fCiHq9drKyoofTG/dutHr9Uajwe7uLmOs3Wk1Go3+kwNNUd2KI9M/WdLb3d2VIHin6h4dHR0cHGRZVpalpmlhGBuGIWv3sj1o27asjXme12q1ZK7heV6v11MUhXKiqSoSQFMNxkTJcyiAqZlJkhQFpbRQDV0zdFnUIESZjscQQkSIShDDsOBUAAABQVAAOREBBEKAc8EBR4DPE7LMp3C/7jYzLVlhmX/mQh532YGRIAgGgwGlp1lW2I5LKQ+CiKja3Amdy0ZJI2T8gpHMMsCzlsa55czO7/JVfYEdPnPw809AEM76kLNAlyPMGJOeStM0QpBMBizLMgyr11t89PDxcDjsdHoLC0uci+l0Gsex53mSji5N8yiKxuOxALwoCggFALzdbiuKwhhXVVUyt3e73SAIrl+/3ustxnHsedOjo6PVlfWyLH/+859HUdLr9WzbfnD/0/F4/PWvfz2KSFGwsswYYzbVDVMVAmKMEcZCiKJgYRhPJp5Ee2OMNUVJBbdts9PplEUaBZMk9lUCHdsqy5wJQDBWFEUAVpa5YBwDVQ6qqSoBkMnRASGYFIgPgwjjXNMQxkrgJ5PJZLFWM03dcRyOhUSZVutup9erVCpBEGlYBwIMh/2DvYN6tfG9733vL995//DwcGdnx3EcJKV287zb7d64eS2Oww8++ODo6MA0zSsb66Zpjsfjg4ODRqOh6zpWiGmaIEuzLJNCWpZT0QxdCCGJWCWjsVOpdzqdNE3H47FsD0odGyllMa80LBuMqZdhjHlJoyg6KakKMcHYta08zy3diFIxPO0TQ7MrFYTQZDIp81zWOFRVxec+w9BVJgCCgp8NIYkZK9Q8XcWva36Xl/G8M5TbbIrighHO+OYAACQryP7RSMafYZKfHbpIIYScMdnePFPb5RxwUSIhzoVv0HnnAAHIOccIyT2xAIBxAiHEhM1dsAACwLN/ARACyBInOPv3bJ74/HoA4EAgIRjjJQAEYwEABQJDKKeAOQScM6RZjPE057SkJQRCgyY2dJ0cb23DPNVAKQofUogx0vWs0cCawSEsq5ZzeppNJn5ZijwrHceROXCtSqbj5KPp/XrdabbqhmF+9NEn+/v7EKg3btxY6C1vbm6GQbq/f3h0dLS6uq4oyqNHj5I43djYaDabH330EcAoCILJxPvN3/zNW7evTqdTRsVw4C0sLIRBGIaeputRFNQbNUUFtboNssn6UldV9DQNypIxirOyFLpWlgaCmooxAggzhIGQrIiNXgVCGMVBFkQYobIsiixXVZUDoaqaQqGOlZSVWZZM/dHEG5cH6k3rJkIEQtZt1C0VY4zUIjt9coKIIjTr8OBUN8wvf/nrllnxPB+ohVklUZSlxdQ0zXbPtW2L8vjkdNfSjV6n7joqhsjSlTwOIt/Xm2aGi0Kl0+mQRKTd7tZrDbNmjYaT1fXV3d1dwzCvX79+fHz86suvbG5unvaHURRwzuXwsRCsLHMAQJqWrlspy7LfPxGC27ZZFFkQeJwpAAFdVXNGwziQBpbHNKQZAKBEgmFM87KkPuecl8yoVDRNo4xVHQcQUm02JQheEkBSSuU8qkKwqkAhMBUlZ4xzjgEmSMEQcQ44EwAgAZAAiAMoAOLnYDcCmezeYYxl0V5WX2bhiXybOP9PkTpLsx7jrGCDZ15KnL2ZzzFqzxyRDC/BeTZ59id6jtzx3+rZLgSis6T2Cyo0zz3UhZ3F+bXOTl46SUrpN77xjTAM/WAaJVFZ5hCAbrfrOM64fzKdenmeV6uOU3GLgnpeYJkVRVEgEq1WgxAyngym0+HJ6VFR5L1er9vtLvSWfN/f399P01Q25b/yla/Eceo4jqqqhwdHw+FQuk2A0dtvv40QopTLSkO73WaMRXFACHr55Ze3d54AAN5///3FxR7G2K3Xq9UqZ2DiTYfDcRJnlmU5lQrGGApMsBRUIRgJBRNVJWmeSU8ufzchBCKYqEpRFCWlaVGmaZrmlHIBISSaeu/evZWVFcuykjyhJVcUDRFYlvTVV1+beEEYxpVKxa5Ui6Ioco8Q8vbbbwdBcHh4ODztCyEsy9I1jVLKWNms1SGEN25eOzo41DTt/v37k8lkRjwhh5KCIJDa4zdv3D4+PvY87/S0X6lUVFV95513GGMvvPhylmUzWiQJqUEItdttGbJyzqMoGo1GUuVXjozI0Qpd103TlPjBGaO2XBWyCIkxJqpaFIXMaTnnUiBRKp9HUZQkiThn6JSVRV1RAFE455wKTnkpGIFEwbhkshUEEAAcQQgBBE9LE//JznO2PVMdnRnDfAZ5OX6d3+Bljhlw5uTPXuUXnfJTm3k2RAbPGvCvEqZ+3jMQQuk1iyJLkmhzc5OygjGWl3lRZAJBDhjGuFqtWpbVbLbKQhwfnw76Q01TllcWkyRRVdJqNSqOXW+4aRZmWVoUuRDiyZMnO9t7vV7v6tWrnHNNM770pS89fPjw5z//EADQ6/WOj06Kouh2uwght171vKmqapTSbnfh4HBimJqu641G5969Tz797F4Y+s1m/aWXXlhYWCiKLJ2eSGLcOI4Hg0ESZ61WS7Q6RVFAATiGgmPEmYIRhgBCHKWJoWolpSVniPGyLBmniGAuAGM0y/O0KPOy5AICTFRNs22Hcx6nGRdCIuMNS1cUxfdC3w/TJLcrzvLyqqroYRjV681rL93Z3d0Npt5xeQghlDYWhuGrr748GY6EED//4Geu647H44WFhW63K3XRIISWZTHGJhMvjmNJS2WaZqfTURTVcRzJf/HSSy9t7+xNJhNJ9atpmqxIG4ZRFIX0jRIkHEURhNC2bYxMCU7AGDuOY9v2DOE0W7cSRyXNksKnY98AAIlcUVVVgpyeqjOcz+Bahi2n7WhOsyxjJccIE6JwVvKzXAxAwJFAHHIAgRDsfCxYLmBZImHgIreNOGfmR/PLdWYX83A2Mn8xYC7NE3NjR/PmLj6fnnH2AZdfuuwJL9ve31qwAXNWLea8Hzh31BDAsiyldkUY+QAAXdecqttuN5FCIAaEEFamw+FwMpnqmtlstq5cueJ7AcZKvV6ltBCAp2nMWFmW5WQyGY0Gn3762dWrVyHAf/VXf8UYW1lZsazK4eHhm2+++eqrr8ZxfHR0tLu722w2r127trGxsXu4U6vVOOfv/vRnnU7n1q0buq7//Oc/z/MUAAEhvHPnVhgFlmUkSeS6bhkixliSJEmSSD9QFEWcJgghBARnQJSAl1hBUEEYIoAARwhRAbgQTIC0yPM8LyhTFEUAxIAs0UBKOeOcQ3Ttxq2spP3+cGlpYWFhZepPIBS27TzZ3tV1vVarIKw4lWoUxScnfUXRFteXLd2QtyrP8+Ioarfba2trWZwkSeK6rqSikgNWQghN06T8E8ZYdtiLopBI0S996UsPHjzQdePo6OjJkycSKGOYdlmWcnJC/qaappmm+eDBA8aYoiiWZQEApKsUQlQqFUqphIAbhiG1biTDvPRm0nmcAZWFKMtMGjw6n1YFAMi5sBkj68wIMcYZ55ADIQCCGCkaA5xzwCkjQDAgmEyJABdQcC4EBBg+053/VdbtZT90wX+Seej3BSOZf/N8mDp/eHDJE86OAP+2pudlT/sF0enls589I2bdTwGYYJAzCHi16srbSlnmeY5VBA3NNEwtDfNbt264bm17e2d3d9uyKoSojuPIoDGOQ9+fFmVWFLmmKaurq9VqYzQa5VnZ7XZl7V7y7Z+cnAwGg3v37g0Gg+WllRdffFFRlOFwqCjwwYNP6vXm8spiveH+8qNPqtXqq6++cnBwYFdM01Idx4mTIMsSiESzVQfZZBa4YowNgwAAgyBwKw4+v60xJiAAEHIkEKWlwlQIASQKBBwRzHNRMlqUDGAkIMKKAijPaBqlWRTHULHzzGCMaZqhaCTL8yiKJl7EmHDdumlVgiDK8wJCbFdct1r/+OOP5QxUq9UaD0fHx8eWZa0uLW9uPmw0GnEUvf322w8fPnzxxRefbD6WnL9SGVsIked5EAQAAEppv9/f2dk5OjqqVBxCyO3bt4UQuq5L6if52+V5Lu0kiqJGoyGBaZRSQogMK4qiGI+mYRjKvh8AQNK9yUbibEHOLLAoipIXjluhlMZJlKRxq9XSdJUL1h+MZrEepVSCOhBCll2RfGiEEJOoHIE4TYu8UFQNQ1QKToFAApSccSAEFwAjCAAEQMKpZ4Y4W5fw/En54LnmJy7oE15AeV+Am80A1uB8oWPl+TnhBTf4jJE8j0T1/3+eEHMhtcgBhJKKWQjBh8O+aZq2bcuVQQV3q9VGo+HaqsRblWWxura8ceWazEBOTw/b7XazWXerlhCcUqppSqfTOTg4Ho/He7sHe3t77Xb7pZde2tnZ+4u/+AtFUer1Zq1WW15eXl+7IiVsr1y58md//cfNVmOht7Czs5Pn6Qsv3nZd1zRNLuhw2E+S6LR/dPv2TcbKZqvuOI7a6Y69qYxPCCEQYCGEnKkjEBEsR+MEAAAhAAUUEDIBABcAQVXRhRCUA4xxGIYIKgACJkTBWZKlQRxFScpAsLy8WrHd8cQTgOm6XnHraZq2Wz3TcgWH00kQhnmr1VlYWFhcXD69eyIR6rqqLSwsUFYSiA4PDxcWFhCEw8Hg008/Pdjbl7P2lmVJi9V1Xf4cknXbMIzr127KqvLu7h6E8MaNG+dM3mMZOkp7m9UdsixzHAcAIDlmarWaoihxHEOgKgq2bVMIQWkRRaWmaYZhlGXOWAkAVxQs5VSKosjzFCBBIKKMp1nMitI2zLN2SEnP2xhkZoSEkCiImBAAAA0T2zAhhARhBWYCIy5kKCoAEwwIweWk7Rdh0L5g0V4wivn9ybyZgfNolT/LOjH/GZdjTgBm+qnPGQyZBbfzz1z2tLOjffHFXC7MyB+Rcy4gQhgRhDSVaKqiaqhSser1mqKqaZqHSSx/aUMV7XZ7cXFRJjbDUT/LMqKgvOAlzcsoB0A4joMQSNN0OBzKW9Irr7zy6quv9vt9KQb45S9/eTwej8dTCGG1Wm2325L4aHt7+/r1K9vbu8cnB0TBYehPp36WZbdu3RoO+/V6fWNjPQj9drt5cnLkeV6e523TcCmtVqv1ai1K0iKnaVHAgo6nnkoUQ1V0RREEQagQAQHEmqFDDLM854JhjCEmRNUQQrplcw5KxgtO85JmeVkwziEwbUvVtYKyaOIRQjTDMnSdUnDt+vXJZOp5Xkl5lie6HmZZsbOz0213hBBlXjDG2u12r9PO83x/f79im3GWLSws7Gw9uXLlShAE3W7X930OZrQDQZqmEuTAGNvf39/f33/llVfiOJbd1CAIMMZS/LQsSzltOJtR6vf7EuokRx/lsqxWq5p6xuUhg/aiKOS4xs7OjhyskZhnVVUl2kkQKKsyssMk1VSlk5SDCzLgl64bAKAizASDACsAISAIViqGrhGlYLQUgHAGGYWQCnYOGQP8jH7wrEzDn00Rn67NL84J53c9Y8WZDRPNnB6ao/ieRdK/lnmAOQv8Yk/4q2yX95/dIM4BdBwhrKhY0xRdVw1DK8tyPB5iogIA8pKGUTSZeFndOKOrgVASfuV5xjlrNOqe521tbUEIX3zxTr1el1xPk8nENM1r165JXJVlWZrGZWNa6goeHh6envRfe+2169evR1FkuGxpaQFjfHh47PnTWq2W56ZhGEKIWq2WZUm1Wk2SZHFxceqN6/V65g0IIfV6vdUKseePpz7PspyXcRxzTScQKYQIiBFRsKIqqqoZhqTDK/NMwhgoY0gIx6mmRc7STGSQUUEFwIiomm6apu/7qqq2Wq1qo06wOp0E4+lkbf3qyUk/zTJdN1VVrbi1oiiCKMon4cLCAiHk5OQkjWOVYNd1DU1/+PBhrVZjJW02m4qiLPZ6QRDEYVhrNQkhsgGbZVm1Wpe54mAwiOP45OSk1WrVarXpdHr9+nUJe5B4QFnykaNMskVUqVQqlUq1Wh2Px1EUAQAcxxn0p3J9SsxqkiSmacqcULYHZJf4XE9FMEbzPCWESBQUQiAIQkppterIZVMURZYlYRhwzhVF0YEMa1nBhWBcVUuMFVXBEEIEOGACACIEZ5IsigPIxVnvQgAAzqA3kAt0Po1xoUXxq6xnIhG00mXLe4n02hjj2YgUAEByHIhnqzpP3SMAiqKIWUsDPHVx7JIW79m5ztHgg2cd9DMe8rwVOfvcWbQsj1CWTCEEQoyBwBhrmoIJzPNkMhkyxoqiZIJrquHWG/VGy7btyB94XvDZZ48cxzFNHZxjfZKEm6b+0ksv5HkeBFEUJVJlZWNjAwBwenqaZZlhGJzzIIiiKGo2mxDilZWVv/t3/65pWE+ePEmSRFEUVcXj8bjIabVahRBnaa7ruu/7zWYziiKEgKZpcRyapl6vNSGEEAFaMLkQJ55/Nl3OmRdEpX42jcY5l+SZRUGJpaVpDhAyLJszltMCAUSIGmdpxXbygiFSBlEc+GGz3ZlMJr7vX716tdPp5CXNs2IcTQ6OjoMg6LQXS8qTJPM8j6i6W2s8ePAgz/MrG0tCCMuyNtbXhRCtVrPMC9kq4JTFcaxpim2alUql2WxeuXIFEJymabPZrFQqnueVJZOFTV3Xm82mLJnqun7r1q0wDBuNxvb2dp7ncq5XrhbP86bT6euvv46QbLFO4viMnkPTtIpj27ataVocx5ggRSVCiDiJIAK6qkkPgQkyTJ1xO8vTeqseBIFt20WeK4qSpWkUhoqihEHQ7/dd122326WmlbpOKQ2CIGNnxNMqJipxTV3nAGRZtrS8Mva94Xjke76fRJQxxdQty6IUnS+8clbdkTWk5xobf1boYba8nwlHJRLlPK7js5fn6Srms8R5LPVTuxKAUipnmTDGEIpZLRigzw0vn7td8Kiz5ubnhakYY4wwIUBXSM216zXXMlQI+GQy2d/fp7RstttOpZqVNAgChBAAMI6TarW2sbFRluWDB/fCMJQYjnq93mw2G41WWZZlSQkhUuekLMuyYAiher1er9fjOI2i6MGDB7/xG9+8fv16nud7u/uU0kajwRir180oilRFr9ebSZKlSSa/BzmPl6ZpWVCpXZckWZ6nNUVACHWFGIYhWQYZFXme2bYDCaZMZAXFGKsqAAgjTLJM1gWZQAxwUZZUSqJEScIZTOKMM6CqqqIoRVEUOW133c5Cp6Ss3WkdHhxhRX28uZVlmR/86euvv9nrLa5dubqzuzWeDFfXlgEAjq0Dzv3ptN/vU0rTOC7L0vf90bBvmqbjOIamFEUxHA7jOE6SRDUN2fdLkoRzLlHjhBCMlNFoVJalpukYYzlfJpm5JaeRZMeSTg9jfHJyIsejJLuMbdtCiN3d3W53SXLwJEkiR5zkjx4EgbRSybiFEHIcxzCMaTANgkDGHZL2u9/vM8YWFhYcx+l0OpJpSlZoIYQcCICgPI7jOoamZVlRluXHH3+k6ppZsddWl5M8mwZ+nCZ5mii6fZZOAQYghxBAJCASYmY7MzgK5AByhM6CzflA9EIk+NQIZ8YG5/oTGM9rMn5RAMkYA+eUFrKJfFY+fl5z/1cxxfkYevZgdjGzHQhSAOCUUoakNgBASHpm7LoV0zQNQy+KIi9Kt2q1Wm1As6tXr4Zh+Gd/9udFkb/88stS1aTdbhuGITlXwjBK4gxCqKpqliemaRKsRlF0xu0HsWEYvV5PFiHkMMFgMAAANBqNw8Ptfr9fqzYopScnJ6cn/Xa722i0IMQIEc5BmuZ5VmCkViqaadowPhGcFUVBWSG7ZIyCgjImOMsKmtMyLwC3CcIK4QhxGiaUFZALBgUQjJcUQM45BwykaZ7npYDY0E1VTfKkoJRubKxXKtZwOAYAuLWqlNzwAv/FF19eXFwsKHVcuyzL49OjSqUyGAy+9Nqrsj5p23YUhHEcc841VV1fXwcAEInkFQIKYBmmoel2/YyNQs7ixHEsw6V6rWlZFsZYtl6kqUyn0zjJKKW6rsvSyGQyybJMdiYIIbJvYVmW67ryOJVKRaoOW5ZlWZbs5TDG1tfXj46OKKW1Wg1jvLW1BQBYWlpy3cpkMpL65K5b0XU1DH1K6cnJkeM4kv9O1nIUBVuW4XsJJgRgBBBkgheMFqxgvGy26hBCQDBnOS8zAoRjGFhVgpQBAYAAGCKIAAAAI4whOpMREwIAIdm75QixuDTld9mUyOUpeHFOWjo/DzE7BJ8rtCB0bq7iqVcUQsBz+5GjGL/WdsHm5zEK4HMeCy4Y4wUoSppTVjIOAWcQilrNDYJoOp1ipLZ7C72FJdu2G+7iX/7lD9M0/upXv+xWK3fvfhJFwbVr16RYqjykqujIViGUNKy8Wq1WbFdO2QghdN1sNBqyLHF6enrz5s2VlZUoiiR1hWEYMnxijAEB6/Vmp9MzDXs4GFmWZRq2aellyQzD0jSFc753+CnnXK7Ler0uIFaVMMnyMIgppQXnQghVpQZlCmUAUEOFBBKAORRAcAEQQhBjiLGuFAUtioJyIBgQVKRprqtGo1kjCuoudMqyWFlZznP69d/4BmPs8PD43/7hHx4dHd154dZoMtQ0rSiyLEvefffdpaUlQ9OhAIwxzwuAEJVKpdHopFGcJElepARhwzBkuJ7xM55oyeobxyk8n0va2NhQVXVrazvLsoWFhUqlsru7W6vVdF2XTUXOeb1en/3usnHvuq6iKFEUKYrS7XYHg1NZ15G8dVmWpGmcJImEZCiKEgSexMRK0CTgAnIRB+FxXgjKXNd1LJtS2m40K5WKlLJREEYCpGnGOYeKQoGgZZ56uR8EukwviYxRIVEViDEhmLIyCMMyKqlSkUY1I3MBc3wuF5bxM4nVpZeeGqH8BueD1HlzuuA9GWNYUWc7Sy1VIQSUOd7Ml3JpogghVPJfieXqbzVCedu5HJcKIRCEiqLoOpaCZzXXIhh4njfoD6MoAkD0FjqLyyuUga2trYdJkcSpAODJk+1er7O0tMw5E4JtbW0ZhuW61apbd90aISpnglKu6VC2NDDGrVbLNM0sKySxRaPRStP08PBQIWq32x2NRtvb23deWRccFgUNgkhV1VqtBQTa2to2TTPLckoppazIqRAwirLpdBqGvqJomm7olm3ZVUS0JC38MDJNsywpKynEmANRMiHJIEzVkLBDwUoKEEGKvFfmeR6Eke8FaZZTAUI/KEq2sr6uaIpTdTRVj6KEUhrGgaZp3W6XqPq7P/tZXqTD8eD4+FDTtKOj/eWVxSJlg9O+PCBjjJ6XCSbDkSRAURUi6wVRFDHGpnHIGJtOpxLpImfEy7K0TF227E3TlNQn0mijKKpUKhhjSdsjYeKe51mWpaqqLOokSdLv9/GZTBCZFWaks5UeQg4fS1mLarW6uroqhDg+PlbVMwJBic6XY2hhGEoZ49kksTiXs/YlZoCLssh5STVFbdRqNbcqBM9zWhQFURUAoUawoRAgWJxn6FzCCM7J2c/Swrl6jADgc0eZnskJ5ZvlPX5WzJTOcNarkJmrRCqgeUDMJccq33hWOPrCMccv2J4pqIqngSi4lDECeV8giBBMiCzhMggFIQohCGHYbNZr9bbj1HzfP+2PwjDcfby3sNC9fv3q6tqKpilJGoRhUJal41QBAEmS5BnD2IMAcQ44B71Fh3OO4NmXwDlXFE3yjrZanWq1Gsfx5uam/BqDIPjpT3+6uLiMMR70x1lW6HoCBE6SrN3unp6e3rt3rygKyzIMwzRNI/CjhYWFsmQlZcX5byn7YEAgCFEuBKMsS3MEIGNMV1QdCkVRIOdZljFaYHjG8+X7fhgnWZJFScIAKQqqavqt6zdWVxerVWdra6fd6o1GE4zVD97/QDXMRr1FWbG4tPTyyy+/+urLtVrVC70XXry9fX8rSZKjo6MkSeSZZFnGSypXv0oUOQifpmmZ5WWWc3JWPJf6BVmWSROVReCiKGSrVmoYKooCQCYh1JIqSgJiOOej0Wh5eVnTNM/z0jSV2fhwOHzllddkLTqO4zRNEULNZpMQ0mw2kyQBAMg8YjgcysZGmsaUUtM0Za1LDoVmWSbFTPv9fr/fl7UuCfSJiwxCiACUfCGc8yTLTLOoVh2JZQWcQQRVRanYtsXNeJpKA5F3Fmk7su9/wRDOndjzPeH8M0TMlWTAnKuZTXzNyqfwfF5pdixwHoXC87nfs8IM/1Wb75e3uTuKAADg88H8eea1eVOklBEEAECU0iSJPA9jyHSNFEVRr9fVjoawNpn4h0f9OMlVRX/hzktc0DCMt7d2KMuLImu26isry61WqyxpFCbTqR/4cVkyjBWFaHfv3lUUxXVqruvKz63VGp1OZ0ZfqyjKZDyNosh1XQDAwcGR69YgxHEcm6ajKjqEqNdbpCWPwuTw4HgwGFi2cePGjbW1NVXVqlUSRUkZRvI+KLteecYmnlfQUgqG5SgXQqiqCg2c5zlgnHMeR1GZZ4qi6KqEPUFVVXXDVvWUCUCI6rjVt978Ur1VyfN8Oh3bVgVhXJbl7u7uxA++9a1v3bp1i6j4+vWrjlNRDXU0HigKls06aRhFludFnuc5BvBsqQHIGGGMZXEi+bW4RiRRmBywlHpmcvWnaTqZTHTdgBDK6ovUDJVJnWziSbI/WUlijIVhmGVnViqEIITIuFQattxTjixgjKXSweLiommae3t7CKFr165t72xOpqO8SG3b5oIGoRfFgaLi3b1t27Y9fxKEnm3buq7XiKsoSjQc5Xle5gUQnArOGIuSWMGQFrnjOK1Wq1qtljQfDIeDwSBJknZrMc/zGdYcAIDPWWXOPIecKDhHzMxXMcDndMLJbA7wQiFEfimze7Ps6lBK6fxQ75wRCiHweWEGwqeaZ7+u6M2FgUg0m+rnz+/jl2WpEYLQDFwgpCWbpmkYRpoUB4dHnheqqq7pVp6VrGTj8QQT2Ou1iIKTJJlMgOM4YRiZpmnoVrVaV4heFJxgVdf1Vlf3fZ9RzjmvVCqyspdl2cbGRhynsgAohGi1WpZlDYfDtbU1SunW1mPOxFe/eqNWbUwmvmnYh4eHmqatra2ZppnlCaVnbBqD07GASFVVy3EFIHg0HU+8GdpYToRICjzTtKrVKkpiRcGspBhjhhCBiGAVIWTbGsaKXXGCJC0oM7ygVm/cunXr0cldINDS0lJRFKpqPHnyUNf1OiaO47z8yitTb9zr9aIoPD4+pqzw/enJycm1a9dkc+9gb78/OCWESNyJpqi1Wq1WdymlIzIIwxAAIPGcMjaTLmuG6pReKE1TKTAqwaira1fkejMMQ1q7LALZtn18fDwYDOQkZ5ZlEMJarTYcDmUWAACQuG3Jzy37DTIclUVX+a7XXnsty7LpdCozbUkz1Ww2Hzx4IBmlZPUVnAt7tbud8Xg8ybIiz3heAi6Q4BBwWV5OkoQLmud5EsemadZqNVZpSx2UOI5loC5tUMbql23sgtuYZXbP5IRsjixx/mU54Cw51yThPITQMAyp4HF23BlOTkKQhBDn8SoXXEAg0Fw/Y+5fcA68lrgDcS7XxoXAkuJQmrYQQsyIwM+usAAAMCTQ2fSKiQudkIquWaapaZpODAJNgswoDOOpBwFvOU5d1+I4LmmCLDiMioojABBFEeW5KPI8oGKH7ne7XV2pVBpVmbyNx2PP84bDUW9hQSE08MeKUnS7blEUp8d9XVeXl5fjOBa0jPxJFEW0MEJ/VObR9kH21ltvLTL48/c/+L/93/+vv/3d7xmafnBw0Gv3KpZz5/oV19QsrdJzOya36q36ONVomWuEaIoCeF5zyMaKZQD19HgQ+lFRZEbdbrh1t1LXNBWXhauHSADFQpqjYahhca5koGhBkkW5h0Ta9/2GbrzywhorxrfX1v743/8AEWV1/VrBkEqUarW6vLauEG2hu9Rudp9sPkrjyLR0moCdzZ1J4D3eemKa5iuvvPx3/5e/ByH8xS8++NnPfjYZeLW6u967igkpswRVtCSaXL16VQ7j6qamGep4OomS0LQNoigIIcrLrMgVRUEMUc5sWLEqdp6k0+l0Y2Pj4OBgOBzKEd6z9l2WVyvOYDBQMeGct1qtMssnw4GgZRCFL770ku3aDzcf1Wo1igUjQABm6kTRFUGZppJOs2FZ1mDY1zQVABGGAWMMEVJxXYiVVm+BAVJrdjvLa2EY9/v9PM/1Sfj6izcCQg4oHzGQYS1J85xjU3O8HAhViCQ3SmaqpF6t8SJP0ySYnnjTKeCiZmiUIsYYLbMoiiqmJUHnBtEBYPI2SghJsSwmnxc+uQAAIclgJgAACApAZuv7spOZ7xzKZFRuT73cHNoGzJnKcxzcnO198Q7PVGWevWfIz5JLbnbXqJhmlmVRFBm62mq1HMtUdU3T9DiK0jQtyszQdALP4XgQIoRqtRo6E2aDtm0jhDgD+/v7w+FwOp0uLi7KwEnX9UajAdCZ4M50Or179y5jTFe1drsp7zuS29P3p1Iiqtls5go+Ojj8xS8+ePDg0xfv3HRdlzN6enq69fhxu9l5/bW3bt++XeRUJjBurSoEVBStyPMsDU0dq6parzUNRQWcaopXmnnFchrVlmU6mmKqqmqUSCVIU1RDIRhBwCkUAEDMIYxLmgRRnCTNduv2K69du3lHcPjhhx/WarV2Z/Hje/eJZn/1q191q48mfrC4uPjjH//44ODgK299aTrOTk6P4jC6fv36p5sPp9PpdDp95513/vAP/1AI8fWvf/Uf/+N/LIT47OGDP/mTPymKotVqfPOb38yybG9v7/bt23Ieem9vjxC10+kkSbK4sCznlSSoRQjBGJdTFEEZLC4uyo4oIUTX9cXFxf39fUmuJRnWkiSp1Woy+NzY2BiNRmmaPnz40HIqGGPP87I8F0IUZRlFUZkXrCiLPGclJYQAIqrVqmEYYRiGYZSXpRACQry+vr6zvXd4eGi7jhCQc+44zsrKypMnTzAmEMJqtUo59KM4z0skQJZlUHBGC2YaqmPrmgLkkuMcnNcdFUXRNQ1CqGmagrAkH5NjSQAALpl4FSixbkJgzrnEugkhIH8qz0aeeqpLmM+ZHQIA8Dlh8PlE6Zl9yqR8hn64bHKzg8O51t8X2OH8cS7vNisNn2OUmEwzbNt2HLvZaFQqFUltYBhGMJ1QSpGBFE1BWVYWVABOiFGtugghz/OLolAULJnmZBcrTVPGSk0z5FCcZZmDwZBS6jgO53w6nSqY9Hq9paWFo6MjuYZ83//kk3s7O1umaTabzQyw69ev/29///c3Nx9alkXLoigKKDgh5PDoqN3ai+N489FW6Ed37ry4tnGFx9ypOoFfhmGsKRYxdKTrTsVs1Nzd7Z2j/aMio9NgGoWxZVWqTq1qaQrGEIg0L2iRQyB0RcUqTNK8P54UDCyurK5fv/Xi629g3fj00aaqaMNk2hKiWmuYtiuEiKJoeXn53/ybf7O2tn779u00Tbd3dwAXZZH9+Mfv/L3/8u/pur6zs5OmqcSRHR4e/5N/8k9ardZv/873/vk//+cHBwf9fv8//IcfSJu5f//+0tJSr9djTMgxwkePHslIPssKmdepqiab7Lqu66pRa9SDIIjTJC8LytnC0mKcJlmRU86iJK7VakVRmLZVFIWAQFpys9ksKRVCuK47GAyEEIZhFBBqmlZ1XNeutFutquNSSg9PD1RVlTqtGGPbcWRG+uDBA4VonU5H0bXxeCqLOnEcB0FgmhbGCiYqhyjNizhOoyio1WpAgDzPC4I4N4UQlLGyLKGqAsGEELTMAQCQEBUTFSOCkUIQxlhXVCEEQWfCEBixubDuHFjDZ7UZhMScEV7e0Dml/rxNypwNnStjz3vCC43IeWObFTwvZJ6Xt8vlVvlAFmzm01d5nNF00m21a7WaYRh5ng/GIyEEbiBd13XLVBSl0ahhDNMolikWMJjEMcjsVQhOaVEUmeM4EMKizAbDUwiwnLjhnFoVxzCMVqslGC+yPE1Tz5skSSTLgEdHRycnJ0mSvP76m6+99lq9Xj8cHDPG+ifH4+Go025dv/5K6E0nw9G9e/dMS3/hpTtra1fCKN7aeXf67k8m3viNt16p1+uEKEAgoupYIYATw9DddgcAoCBlOp6Ox9PTUR8NBrbt0JpVr1erFQdAQBnkjArAVMwnQeiFaXtp8dU33uwsrxLdiLKcCq5pxkJvCSmqbdscgP39/TAM8Wj01a9+9f333ycEJ1G0sbEBBXj5lReTKH7v5+85jvPWW289fvz4Zz97bzgcrqysmKbZbrf/+N//yR/8wR90Op3f//3f/+3f/ju9XmdnZ+cv//IvDw8Pj49P4zi+cePG+vr6yvJavV6XJOWyhCidw9nMrm5J3icZC2RZJgnwp9OpTPlUVZUSogCAPM/zNJPLwDCMWq1mOxWE0OlggDEusiwMQ8E4BtDQ9TiMRqNRvV2ToO1+vx+GUavTOVOeUZWlxRVd15/sbO/vH0rBxiAIFhcXsaIWBWUcQKIAiCBCcmSFiVIwAIAhVzvSNGZbumlyWpZ5DuS8Py04wmVZClVVEEYIQsAxRkhTGEFCCJ2Vz67nC9VRDsDzjFBayEzmeobenrUxwLMDUfKlC23GCx2FmQX+iuHo5SNIGi95d5lx9SOEVlfW5FSb7/tFntq2XXNcVVWDwFdVXbfsar3GSko0VQbSCEPOKda0Wt0FACCEyoJVq24QBJZlCSEk/EpRlLxI+4MTJ00BAGWeMcZGo0FZllmWJEly584dz/M8z6OUdjqdWr2+f3Dw45/8pCizmzdvLvZ6t2/eSNMUcsY5f/PN169fv+p5gVt3j06P9g73GWCdTqPeaUiaas4BxAQhAgQCiGi6GUVRrdZo1ls0L0+OTjc3nxwfnnjR9P3Hn16/fv3G9WuWbnBAMlrkrIA59+K81Vu8fvvFdm+pACCeekhRm91eMBkWZfT48Van2xtPw/d+9kGl1rDK8v6Du/3h4Lvf/S7GOEuid378k62tLcPUsiLd2dmRTK0vvPDiZDKWTmwwGFy9enVzc3N1dfVf/at/ZVmWaZpXr1797/67/32e5z/5yU92dna2t7e3nuy8/fbbQohKxZ1OfcuydN2wLEvXdYnJPj09DYJgZWVF/qaGYezu7uJzjSBJZiGpKOTYhFtxCCGPHm+61apZsVGCu92uFwSu62IIoygydUPCvmlRyuUqZTB2dnYOD08o5wsLC9Vq1Q8DCfWsVquKogEARqPR/fuffvs33srLJIjikgnLqgAAMAJlWY68CeDM0DRD0yhnEELF0FVVVWsuhjBL0rIsEYCUUgwgBxAJgCBEEMqYU8rbc84NmEMIIXhqLHwWlZ5v5MKin23SCM9haGCWB+I5OkMxt80m9Oet7kII+kwD8HO2+VcvPBYznhsA8LmS6cLCQlEUEgOlaoYso3mBHwUhwQgphDN5LZhoKoRQswyJZZPHJIRwDsxcl7FlURSTyTTLMoQAY2UUBSfHfXCOX4cQNptNia15/PhxURTNZvPWrTuO41BKR6NRGMYbV1am48ng9OT2C7fzPC/yNM2Tbq+LMdZMw7SMJEuXV5c6vW6z2VpZWbE1Q1E0WuaaahRFKRjHiBcFFUJUHcexbU65pmkAQTnZEE5DAYgfxJNpIOVvVV3DhBwORm+/+MrGzduckDjNKUKM0ZKyshSj0WTryXa7szD1vcdbW29/a63Zai0t+cvLy0mWxmGUxuF/+Q//wc7W9tbWVr1ZV4h2fHy8s7MjhLBtu1KxJaZMwmgP9o9UVV1aWjo6Ovrxj3/8/e9//+tf//rGxsbf+3t/b3tr9969e6urq7u7+5LZkVIWBAFjTNfMIj/D1mCMbdv+/5H232GWp1d5KPp9v5x3jpVT5+7pMDM9WWKUJSSEQBIS0TZwbLiC4+NrAT72se8FTLA54CNfc4UIAgQCC0kocqWRJkdNT3dP566uXDvn/cvx+84fq2qrZkbisZ+7n3762V1dtfeuvX/rW2u977ve1Wq1QM7W7XZhbWgYhrquwyHreR4hxHEcFjMLCwvValXVNNd1G63mkSNH4P23xmNCSKRqmqxQSlVVzefzURR2u93xeEwISaV0SVHgyD569Kjr+LVarTvoW5aDEFJV9fjxo8PxCCFk2264P9s4Ho9c12MxE8UhIonnO67rcizmGIwQyhsqi5EgcDyLSSyGIWYQZhnEcRzPcgghEkeI7plrkDgWaDwBSClGFFECxSiDEUIUH8iE3y886Gu3mgFeOuHxD7KLB3/kjSGN3lCR/uO3g2nwdaEIumrYE0oI2dzegm6YFziO48IoanV7/X4/n8sgjKMoGQwGYeQTQgRBCigmhIgiz7IYhkfT6SxC1HXtbC4NL09VFYbZs8oTBIHFnCzLsCZaFGXAxyH+fT9sNFqm7UA3mMvlTmRPDloNSmmSRK9evmzbNiewpVKB5/mhOWQYzg8ChmEWlhd4XhRFuVyp8DETxyHLCalUmpIgSgKWZcOYpI20JCsxoZ7rIo4tV6teEFieXTx8VNf1kOLRyHQcj+M4HsUEx0auVKjOYlF2w1BSNTPwu91uQtGw1jAyueMnTzle0Gp2Tpw4cfbs2d167Z577tnbKkPi1Tu3EEJrq3emp6dfeumlpaUlSmkYxNMz1Wq1OhwOgZELw/D06dNXrlxJp9MvvfQS7HuhlK6vr1+8ePHa1RvHjh1bWVnJZHLpdPapp55SFJUQ0ul0RkNT0zTQx2iaFifJcDSCQ9S0LFGSEMYJIRzPR3FsGIbreQhjPwjwvkva1NSUrCj90dC0rVqt5vo+kBwMwyBCe72ewPNpI+V5XkTDzc3NWq1m27YoSkBL2rZru46q6LquswJPSKvb7QKuWyqkZFnlRRkWuULDFRlR6Pm2bSOaoIQkSQTEYBT4jMBalgUAIYuxyPMMw7AMTpKEIoIJjcMQISRQnmEYShIGXHs5WA2ECKYIU0IJphjhPej/u+jo68IGhBEHWzLo/SZOO/A70AME+vesbP8Ho+514Tf5WXSgHUX7e3yB24WXNx5ZmUxG0zSe5+MkimMiCoyqKqIscQwbR6HjWEkYMeweA9kf9NKZFKV0OBzCYATLCkDyAMcF4Q14j6Iord1WNpUGWpnneUri4dCFwXDTtG3XkWX59F1nzpw+q+janTt36ruNM2fuqkxVv/3tx+rthqYpR48eVlX1W48/rihacThADMdxPPVc1226vjOdnXIcS1HlbCYTx0GSuIrESxIXRJFCEcYcwwsZ3WARtl1vc3tnZ72WcnxBEMIwSjAbJtjpjnqj4S987Jdy5anOyI5IUqhmEs+3/aBQKIhVrtVqFYvV6blZ03K26/UwDgqFQq1ZazRaqiTXajWO43q9Qa3RtF3v7rvvNgwDnM6iMGk0GqIoPvTQw/V6rd1u7+zUoLYEtbc2ZbAMXylPzc9xYRhub2+3Wp1ut3vmzJl3v/s9sNHl0sVXd3d3YaQ9k8mA+ygMWCVJUq/XGYaBDhzML1RVdRxHFEXLsiRJymeyjUZDlCXdMDL5nGboN27caLRaGGNFkniedz1nPBiSJImLEcZ4dnEGvBQ4jqMUQe2WyaRN2xJFET5ZQRCq1arjOK1Wi+NQmk6EL5EoivlMmuf5Wq3mOjROYoypKIqiJBFCfIpgdlkURZ7d64k4lmUwHo1GwKwEQcBiBnY0YIwJZTDCDGL298bHkKMoQyfD8Difz74xANCB+hMhdFDJDa6sE+nMJEOC8TPGeDKXOAmkSSwdLF+Z/eVN+IAWllLKfZ9VMRPtjiRJYKYAT5TRjX3EKWEYJpMy5udnZ6anFVHoddvD4YASgkkSBEFCIo7jRE20LCuOklQqlc1mOY7z/TAIgjCMdV23bbvT7hmGMTc3Z9v2zZs3jx86miRJs9m0LIeXRNM02+0uRYhSmspkDcPI5wqHjh4xTdNxnB/+4R9ev3r9nnvO9YeDxx77xiuXX/nRD/5IdbqyvrFx/dbNr33tsamZ4g++7/2pVPrxJ566du3G/ffff8/xu3u9XqtdP3LkULmUW1qed50xpjHL4nwuo8lSGAUkThBC1tjsdrvPf/PF9fX1VCqVxKTdbntesLSy/JM//VOCosqqmmAaIyqocpwktucSRKOBOxgOr1+/furM6SCJrly7tluvlavT6+vr1WpVkpRRfzDoDSmlHMM4jjMY9AzD+OAHP0hp8sUvfjGdTi8sLFy7fuXRRx8lJAGraMsaZ7NZXdcHg8H8/DwhZDwe9/v9u+++98knn+R5PpvNtVotRdYOHTr0Az/wAxsbG48//uTFixez2ayuq7dv356dnZ2ZmUmn0zs7O7DLiVIKFSlUKLA623EcXVFTqZSRTm1tbyOWOX7yhOd5t1ZXB4MBgxDLstPVqXwm26jXPcddXl4WFB7ETN1ud3t7p9luI4Q0zdBTBkZsEARBHIVhDAvestksw9FMJjM/OwdHT+B5Yeibo3Gv1+v3+2k9nc/np6enYbUGy7KuZ8VxXCgUDE2HopfnOMuyhsMhhxlw9Tc0HY5pSZIkp78XXAyFC53lGMrgMNzzuaKYcP9IRjoYP5P4BLXOwViaZFEIS8Bv9g6JA8sGJg/L7E/xf/90+D1u3P4NcDa8zxkORyOO42RJ4nmexShM4vHYEoVOEgZh5OuqVq2UGYbpNBuDwQAhVMgXU0YaftxxnPHY4nk+m83OzhYdxynkS4dWjty6detrX/uaqmrnzp3b2dwqFIpT1erO7m6z2TQy2XPnznh+yDDcxtbm9tYOywvVapVh+eMnTjVbneWVQ41mZ319bafeePDBhxFmeE48cuTI5s728vJsJpsnURIGcSaVPnr4yOnTp48cOtZttUWRV1UdM1wYJJbp+b67tDBHqWC7set6DEWKomhGjhPlueXBwHK63f5wPNK11NnThxdXltVsMaEoxnyUhAmDSRDbrjsyRxhjqzEybWtoWnfWN+5/4IEwjvvDwclTxwuFQrPZZFnsRyHLc/lsrtVqkYS++c1v3tzc/M3f/M33vve9/+pf/etvfeubly9fXlxcvHXr1mAweNObHoEfzOVyzzzzVDqdXl9f932/WCzzvJjJZHzfVxT19u3bSUxLRyuNRuMP//CTnufdf//9jz76KMuyq+urnCg4jtPqdgbjEcaYEwWBJI7jjPs9hFA2nwOSsNls9kfDSrHUarWiJM7lcpbrXLlyxbZtUZbDMGQxTpI9WlwURd/1hsNhVa9A5QyAKmRUx3HS2YwkKoIgcGHAsiHYTCZJwgi84zjtbsfzvCD0aJxgRBGmi/NzAsfGcTIaD8BTw/E9RdFWlmbBQzEmCULI87xREPieV6lUwjAkPeJ5XkwSnGBKaRRFHCvuBRFDMcYE05ggRAjFAqyWQIhyb2znXheErwubiR09eq0GZ3IHQnTCqgNgc7B7nOTVg4/zPe8fvIGAkGGYiV4W/EgiipOEhlFECCEkdl03CSPbNleWl0G4ZFoWRiiIo1QqlU6n+2ZvMBghhMrl8tTUTLmcuK6fJMnq7bV8Pt/v1TY3twRBePvb3wGLZpeWlhiGeeWVVxzHfcc73lEolF58+TvNZrPRaOkp474HH3j00Uc/81efnZqampqa2draGUn9ZrNer9dnZuf/l3/+C1EcXLn2aqvRSsLk1KnTlXK1XK0oiha4AYe3IjeM41jRldnZ2Uw2bRhapVLSNM1xbMzKFPMMpixHoygw7SAIAtd1lUxuanEl4kUfc6WpmSOnzxqpzHaji1iGYZiYJJhFLIsH45HjWKIszU/NkVrtIz/2kwmNB6P+cGyWqhWGYbL5zK3Vm2kjA0pIjuNYljMy6WeffT6Tybz3ve995ZVXNjc3//k//+fz8/N/+7d/G8XBT/7kT966dYtSks1mNzY2yuXq6dOnwY+wVqstLCy88MILhIBDD0MJzufzrus7jjcYDC5dukQpjuO4VC08/PDDCwsLURQ9/vjjGxsb/X4fzlaO41RV9X3/xRdfNE3z5MmTH/3oR9u7ddM0eZ43DMP23GazOR6Py9WqZVlzMzMgolAleWZmppDLg85J07TRaDTp3IIgpBSbpomNPRMtmINxXc/3/enUdBAElmXxHEPjhCQxhxkW0ZnpKk2I7TqmaUVx4Eccz4vpdDpKCKVoZFocw8iynJVlkRfS6XQYhrZpea7vhxFGDKwjJRQFCDP4u3JOEHRSiliWxRO7mkIh9/2CcNLyHQRIDzaBB4OQvGb6/rtl6mQs+OD3oAPl7sFylBDC0+8dhIIg4AMS2ImsnEXsHneCCEJI5Ll0JpVLZ3LZtCzwsiIyCPue67ouQkQURT/0WJZVFA0caX3fb7e6wIal0xmWZU3TDoIAWpQkSdauvVoqlR568JFStXL58qtXr11r9/rNVufo0eNvffvbLl5+9fNf+PtHH330Iz/xkz//8z9/9913tzZrS0tLXuBOTVVKpYKR0m/dusGy+Oy504IgDMeW67qFQonBnGma6XTa9wMw+ZR4IU4ihmEcy3Rdt1It8SyHMd2bIbBM0zRt2w4pTpLEspxut2/oqZWVw4IgWZZVKBRYFlyCUETiwbAXx2E+n0/x6eu3rp88eZITuM6w2+o0wyS88MorlUplc3OzUCh5jt9utA0jpSlqLlcY9NpXrlxBCK2sLD3//POZTObjH//4cNj/0pe+1O403/ve97Isu729BZEGkxZBELTb7YcffvjatevAvK3d2WBZFobKIXN6XhBFUaVSabRrcKTed9991Wp1YWFhAmZeunRpY2MDFlcEQQAr71fmFizL0lOG5/uO70mKHARBlCS2bWNKBUFIGylD1TBCEAwDs5/JZMbj8Wg04ji+Pxx2Oh1BkBRNzeeKSZL0hoMgiOBEI4TouZTvuqmUXikWOIzjKGARJkmk63qvN6CU2o4XRTEnSAzLl8tlP3Cz2SzPciC1H4/HvuPm83nDMGzL2t3dHY/HkiACmCwIAo08juNEnuc4joVxe0IJgakDghFiKMGgwJrE0uRv5oDR0yRCDk48TWpU+gZmDyIN+kZQeE5IxYOw6vfsCb9fELL7G7xhGwGIg+M4FgSJxgnGWBRFTZUNQyvmC/lsptNu5vNZiRf6vY5pjnluT+yfyhgzMzOIMlevXl1bW1NVdWlpec84jKBUKqVpWrfT39rakiRpeXn5Zz7yI6ZpfvWrX33p5VckSbJshxXEBx96ZGFh6a8/+zeXr1750Q9+eGlp5T/8+v/7oQcf2djYmCnPTE1NCSIvivxubTOXyziW6ThWsVhkWdxqdRBChw8fLRQK1tjqdDpjx15eXj58+LAoikkYweyPLMssw4T7t8DzgPhGCK23ahhjx3bDMFZVvVSsFAolhmEYiiRZQCSJ4iD03NF4yLK4XC6uX98pT1V7g24Q+YVK/tbqzSAOnnjyyepURZFVjhPSRmZ3tz7o9o8dPaHIsjnqx3G8vb09Go3e/e531mq1xx9/4uMf/1fvfOc7//N//s+ixN+4cQOExPPzcxzHnT9//tq1a9lsThTF6enpp5566sTxU1/72tcEQTCMNKBZzWZbFEVAXyzfBF1EoVBotVqg7b7rrrviOIYJCQg/y7JAHu30R1EURUl8Z20tpuTw0SOe521sbaVSqcMrK5lMptfpupZdLBQMTbcsa2D2U6kU2HAlCbEcxzRNhuHS2Uw2k4+iqDccxDEY9oQMwwQ0iaIgl0mViwWRZVkGqaKAMbUty7E9RdPDKHH9IIhiy3T0dIbjkK7rNEng4zBNk2e5paUlQgiNk36/7zrOZEZXkiQaeSzLChzPC6zAciyDGIQYRBBNGEowQpgS7nXMwcHy8nWQ6SSbHWwUJ/ch2PCexwwGTgnodfzaMWS8v1L7dT3nPw6iwkPRfS93juNA0JQkFGEk8byqqpqmyLKKMQ7DMCGEEBSGoe06rutpmgqLtbudPiUYYxxF8czM7NTUVC5XQAjNzs4nMe12u83GerFY/tCHfqxYLAZB8Bef/rOdndpgNGQo1jTtzT/w6NTsXK8//OQnP4kZ9rd+63deuXjxDz7xf8Vx3Gg1wzjq9kZ31jeWlxcLhfyx43dREpJ8/tqVV9/5znffvn1ze7Pmum672a4UyudO321ZVmfcLRQKMzMztm333EG91kqlUplMRpG1IIgpQQwjMCyNk9APCMMwA8cOw9Aa25lMjhGVkesxlslQxrWddErnMI5CF9OEJoQiHLiBG4Qsy9u2m9Dw5e+8srmzdvTEUVmRXN/LF4qdVhtm5Fv1Zr/fr3vee971drTvQv/MM8+Nx8OHHnrwxo1bN27ceOihhy5fvux53iOPPLK+vm4YqUKhoOvG7Owcx3G1Wq3T6dR2G7qWWlhYePXVq4QgnucnRQd0VvlKDo7gUqk0MzNTr9dv3ry5vr4OLwMhZFkWxrhcLlcqFUmSzt91tt1ujy2T43k38MEvqxIEMDxRKBRInNA4SafTuqqBjRCMSgFcFEQRIWQ0GsQkiSNCKXU9N0koaCR4nmcUUZIksPAKkyRjKLlcRuD5HssKgpBKZwllXT8YmZbrh1EUxQQlIxMgD0opJ0j5bK5YqqyvrcmyrKczPC8CRkopYTmBZWOEEMUkSUiMEow4jsEsgzCBaKQYUVwqFQ5G3cF6clKLQvzsDfUemLfAB5ScE7AUggS+AplzMpUzybEY42ivi/sfRUcnywPANR0mreI4FgSFEMJxjK6okiTwHCdwLC9wgeuUisVqqcgLrGPZo9Ew8H2EEC+IkiSl0+lsNmsYac/zNjY2Nta3dF2fn58/c+bcysoKpXRjY3N1dbXb7W7fujg9PZ1QZBjGmXP3MCx35fqNXn947733yYp6c/XOyxcuXLt56/Cho2sb65Ik+S6ulIsrK0t+4BZzOcwkR48cKhbzKV3b2drudrvFfBFjrEpqtVrd3anbxGEYZmpqGiEGIbS9tZtL5zKZnGmavh9A6+v7Xrvdtk0LY9xMhpRSy3IqlSlV0k3TlnjJtR2JF3LZNEYk9l1VFjmW+q4bJ2Fte9juNBut+vKhpd3GjpKS773vnmvXr25sbS4sLLQbbVXVDS1T294NgzhJaOTZv/prv6Kq6qc+9SlYQL+zs3Xs2DHfdwuFwtve9rannn7ixRdfBGMewzByudzCwoKqqoZhbGxsiILcbrfPnj178eJl0zRd1y+XyyBhwxgvLS5nSilo2NLp9JkzZ4bD4auvvrq9vY0Qgmxp2zbM7ILXUzVbaDQaDMdatm17riCJcRwzHMey7M7W1uzs7Pl77rVG4zurq6qsLCws+LFn2za45Xe7PYbjCCFraxssz6WMjCRJmGMJQb1eD1pNIrDZbLpaLCCS+I6ZSxmlQp5jGd/3h8ORKMgRoUFELMfdrTcxZgmlgiDMzc1NT0+7rttutWAVR6VYyuVyHMOapjkej4fDIaY0nU6nNRrHcRyFJIoRSTgWiywjcIzIcSwhDCLMZIrijbfX5aVJfEIPPWkO9z0IGMuyJs0eQghKC6DaoKuBxVTotZK3/6kbxCrGGAJyIm0lhIRh7DMsQoTwPCIcwnR2dp4kked5DCtHUWSaFsvgQqHw0MOPAA6+sbH1jW88Ztv2XafO/NiP/djRo0dN0+52u88//3yv13ddVxCEYrHIx8uyLBtGmuHYa9eumZZdLlff8fa3y5r+xFNP37x5O53OLi4u1hp1gHBu3dg+cfIuyx7Xa80rV16tlAq3b946ddeJuempfq9naPr09HSr3qrVaojgeq2GVMY0zSAIZVlNGZnhcGzbfqfTL5VKju0BXxeGoWWPMca6phUVQZKU8cgyjDSJCWYZWVN5XrTNcRjGgW97luXLPIdJo1HvtBqmSfOFLHRusBdla2vrAx/4wH/87d8CYbrnBaIoFsvl+k69UilvrN783H//ux//iY9+/OMf/6u/+qs7d+6cO3eu2+1WKiXLsj772c8+/MiDnU6n3+8rinLs2DFIgIANnj59GiMWdPCNRgNcemF5PSHEc30E/suynCTJxsYGxng4HIJJ1OnTp03T7Ha7IL6HCUOMMcyyKJo6HI382q5t22EYSooCMxC6rt++fduznWw2K4vSzZs3C5V8p9MBLTHGGHRwlFJIITzPaykjSSg0nBhjPw4KhZwsy4Hn+r4/ogSRJI7CE0ePeZ4fBpHj+X5EXM/3fZ9hOF4S683GxAAuiiJd0yY6FhhlhqOEUsqL4nQ5FwaBS+MgTJIkRAnlODZKGIVjWUwZShlM8VS1iA5oryc3yHjkwBz9BFYBDgT6PfhVwzCE2daDuRQeVlRkTdMy6TQQjKZp9rs9y7I0TSNRvGfhelCVxpLXPQg8jizLE+4x2V/ymk6no8Egk8lJomJZznhk86yQzRYNPcUz/NLSEk3I2tqapitvectbThw7FsfxhevPbG1tWZazsnz47NlzqVSmXmtubW01Gg3XdcfmMEmiTCady2cwpq7rpnghn88/+9xzU1NTKysrzWbzgYcf0nX9rz771xzH3bhxI18sgLVeOp2+cOHCqZN3J0mCWfbSpcuEEITZ8lR1MBhRhHQ9dequM0mSlKemPM9Tdc0wjMR0KaWQ3uM4BoGloijD4RCUg6qqAvEFZYiSkZl9O3cGYT9wbdNyXddQNVEQwtDvd7qj0SiOIvCZdoMRz/NJTAVJEwRxMBjpWur8+fONVv3bjz/25je/6Zlnnzhz5tSPfvhH//d/+2uiyM/MnPvGN/7hRz7w/n/2z/7J9evXVu/c8l1na3tjenq6vrOrKEoYxrdv3vrAB350d3eXJEhNG/V6PZvNJkmyvn7n2OEjd911125t++knn1peXmRZtr67myRxNpvFGNu2HYTm7OzseDxeWFgwTWtra2dpaanV7Ph+yHG8KIqIMq7rg7V+GIYwrrm0sowY3B8ObdfZ3NwMk7hQKIgcLwjC4ZVDjmVhihzLzmazgsiura1FSTw1NSUK8ng8Hlum63qdTiedzWqaIcsyw7Htdrtea4ZhyIrorhMnBUHI53KB622sreeyWU3TdnZ2dF3P5nKaoXth0Ov3x+NxEEeuJ/i+H0UBy7I8z/IcIwi8KPGYklK5wPOcY4193w9DH2wdccQzDJPEcRT4CFNFFCSRZxFFScKzmOcYhhI8N1s9mPoOXvoHg2FyH0hziASWZcGZJwgCdKBpPBg8XhhgjAWeh6I/lUqldEOW5Xa77dnOeDy2bTuOIvCQ5Hne8pyDMTy5ua7reSEhSFUF2He3xxuGgeM4SZgoim7oaVGUWSxgxBTz+dHQXFhYeNMjDwmCcPXqFdd15xdmlTSTz+cVRWvUW5cuXa7Xm8VCeXl5+ebNm3EcJyQSRT6Xy+qG6vvucDi0O12GZU+ePMlx3MbGxgc/+MFOv/eZz3wmV8j3+33DMMaWOTc3NzMz8/nPf/7UqVOt5uDOnTvpbLbX6/d6vff84Ptqtdqdjc3ZubkgiDDL2Jb7zve8e2dnN5fL5fN5bzCGIw88pKHelmW53+8DvGYYBijUoe4gfAI1vCAIIi/EcWyZI9eyHcfJZlKiKA57/e2drWF/AP0zYiOGYQI/4nlREtX+aEwSNDc39453vfOLX/z8tetXfuFj/+Lf/O8fX1lZ/q3f+s1f+uX/x9raiNLkB978iKrK/9v/9i8vXX4lpWuf+uNPBkFweHnliSeeMIx0p9V+85sf/fCHP/yLv/Cxs+fvmZ6e3tzcvHHjhijyLMK6rp89d3ppYfHatSvdblfkeVmWqtUqyESNlFir1TBmjh8/Xi6X//zP/xIj1nVdjFnAioeDcRzHKysri4vLo9Go06zVarVMLivKEsNxDMdub2+bjp1KpXiGVRRlujrFYkziJPQDWZZTaW17e9vxXFmW44iMx+OROR6NxtVq1XKcJKG5XE5PGcPhsNPuEUIYgR5aWu71er1u9/iRoxzD9nu9Q4cOgRQEYSwpMuZYx3XH47Hje0Eoj0aD0WgUhiFCRBJ5VVVUTY4CP5fPFLLZdMbAGLdajeFwKAmioeQwxkkc+r6fxBEH2y3iSJElgWdlgedYFq8sz9MDwmi037Ml+47Xb+wVXyd2AaBpIuCmrwVLWYEnhJB9WJXneYHjWZadmZlJ60YmkxEEwbasWq22tbXV7XYVQ6P7yOpBSgO2hUD1CxQtRH7ZSFFKdT2la4Zleb12T5G16emZo4ePPProo/ls9ubNGyzLnj17OgiCF55/tjOqdTqdXm+AKKPrhqrqAi/tvTBB4HjYNUwSEoG13nypeuTIkcuXL2ua9sEPfvC/f/7vkiRRFGVrZ9vzPEmSKELHjx8H+2qe5zOZQhAE37lwgeO4fn9w733nL1+6stuo//N/8YtbWzuvXr3iOv5HP/rRJ55+KpPJaJo+V6yAwhjKCvDGpZQ2m00ISAjCSe/tJh6005LAi6JIkmQ47FujcX/QTaI4jqMkilzXdix7b7sYChHCvh8ymFM03XPDwcjEGH/kxz9ar+/+5V//5Y995MO3Vm+urt744Q+8//Tp07/x658gJE7icDweHj9+7Md/4iO+62Rz6du3bz/z5FMcx43HViGXt203CIL/49/9h1euXH722Wd7vV4+n7es8c7mFsMwhWKOY9iHH34QY7x+5w7DYLDcTqfTa+s3MpmMbbuDweDcuXOapm2sb7344ou6ntI0bTQaR1HEcVw2mz118vRgMOh32tvb27woyKrC8jzDscPh0AsDjuMkXiiXy2kjlTYMx7JJnMRx7AeOJEkE0eFwyGBuZmYmJsmtW7cxxs12O45JtVpVdW04HI5HFsMwksaDG2KjXl+YnSsXSzdv3ICLLY5jihDDsWESm5ZlmqYfhVEsQ8GJMeVYFiHCcazAsyyLWQ7rilKpljRNa7eb4+FIVVVDybEMQ2kShiElCYcZeHuTMBAFXhYlQeQ4dt/fHgrCyYc9SUSvowehoJ94gKP9EWNIiZO0OUE+Hd8DyTU8EXiPQ2G5E+/R7rqmlcvlRx99NJfL9UZD2DFo2zZoHQCdh/KM7lshpdMZWAPiDUZLS0sMwzYarerU1I99+KO5XKG+Uzt58mS73b5y9dV+v9vptP775/9G4JhMJlOayk5PT8/NLVim0+l0TdPMpPlCoaDruqIoHM+Y5qjTaZvWCESDLMtevnz53LlzU1NTf/hHn5ydnd3d3d2t11KplCzLlm2fPXt2c3Nzc3Pz0be99dlnn52amitVKtu7u/1+/95774UFQB/84AdHw3632+62W0srhwbDnm2OZEH0PI9NKMzOKhzLiYIgS0mSmKZVKJfgjYXXAJ9CQhKRl1jMERIzDBOFoeM45nA0Gg8cx+l1OmNzKLCcJAksx6Y0VVGU2u4Og1lEKMEEEaqqchjG/eHwS1/60i/+0seu3rj56T//zO/8zm/98Z/+8X/87U98+s/+8H3ve9/f/M1fZzMpyxr/8A//8Gc+85mP/eIvPPX0E+9617u21jcuXrzIMJxlWaIoq6p64cKFwyeOWZb12GOP9fv9xcX5yA/6/f7Kykqv0/3Sl7508uTJ+++/v9Gob2xsIIR6vd799z9s2/bFixcr5SnbcquV6a2trdnZ2X5/CFbZi4uLmUym2WzazjgI3SRJDMOQFJnluf5waDk2QoiXRMdxBJ0zDAMYY1BswlxLr9dDDC6VSiRB4/E4Jkkmk9nc3CSEpFIpsCGGN9bzPNsOrNH4wQcfPHbs2NrtVdu0oMuANVv5QoEXhf5oOByNoGuwbD9JEoZBgiByLCYkASIglysKHMMwOEkSRGhKN0ReUFU18SnGOEkwy7IMx4LmmUShGccRxUwcJzBF8T3LzklKxAcocoZhNE2DccyDIphJBL6ukkQITTgT6CElSeKMFMuyw+EQnNTiOO52u61W69KlSwghLZ3h92vXbDY/N7cAjpRgMs+yLABxDMPADgMJ4+FgPDU1dfbs3ebI+upXv37lyuckQfy7L34+m82mdBUhwrK4UM6qsswwDMxlY8zKkloqlXQ9xTI8dJiUUkCPYOkXQoRl2Tt37tx1110zMzN/98UvaJq2vr4+Go38MDAMY2yas7OzHMddfPXyAw8+sL29vby8HCXJhQsXwPR+bm725u3bhWLu/vvv//KXv9zrthmGOXHs6NVXL/f7fXCh7jXa6XQ6l8tls1myvwzLdd1cLgcZEqqSZH8tpqzKDMIMw4ehb9njYa/f63UsexyHIWZoSlMJjYPQYxiczRgzM9PDbpdlWYyYiFCBFzTDUFWdE4R6s/Unf/xnP/+//IuLr17+4z/58/f/yAcb7fZv/c7/+Tu/+XsXLnxnY/0Ox3G/+Zu/eeLksU996lMPPHjfr//6r//e7/6nz372s2trG71Od3NzO5VKlUvVzdqOJEmnTp2CYQswOwQTpPPnz9u2/corr9x//33Hjx+/ceNGv9+/fPmKpmlxTLrdvucFhw4d4XlREESMaRB4mqbJslguFwmJDUMjJLZHlq7rqq7JqhIlieXYgiBwPA+BxPM8IhTW0RiaLstyFEXVahUxuN/v+16YzWa9wN/a2gZuE8oogecqlYqmOqurq7OzU77jWpZVLBSCIGg5rTOnTzMM02q1wCRGUZQwiW1wFkc0l1Mty3Idx3UshBDGSBQ4XuQty5qdrhqGznOMLMuSJJimGQSByMrgBAn4sCiKHMdSQlhRoZQ6cURCn0MJwZTCchmoPjGilCSIUlCYUYoIoZQQhmUhbCa1KNqHOrl9A/M3tnOpVArKUagwwzCMKAK8IQ5C2H+CDgwfdpoduA8JGQgZjPHEHzaKInhDYZbih3/ofcVSeWw7f/CJ/7p6YxUmXADv8UNPQ5Kuq7zAcBjFNMAEgwubIEgYsRzH8zyvqYYgCDCjBJvlJUnkBdayxpZlPfjgg6VS6dN/+ReLi4tbW1uj0ahYLsG4TTabnZ2dffHl75w6dQowzIWFheHAAjXGu9/9bsdx2u3WRz/60Vs3rokir6rqzHR1qlr+2te/QimNwyAOA0Q4sC1jGEbXdRj2KRaLQA9AkQ9vHQyvJEmCMaKU+q49HPZ7vd5o3As8T9VkkeUZkSUk4XmGEOKFdqO9yzEsSQjDMighTmRxgsDxIsdxo9Fo/Ykn3v+BH6lWZm/dWTt85MT584+sr6//+Z//+fvf//7P/OWfy7L4wgvP12q1JApFif/xH//xX/7lX3744Yc//vGP/+kf/8mb3vQDt27devzxx7VMyjTNH/iBH3j44YdfeOG5fqfLsuzNmzfPnTl78eLFqampM3fdBVble8RgrXX16lWW5cIwlCT5c5/7u52d7cXFRWDwS+VCp9tSVEmUeMwkQegghMIwjEYjWVUMwzBti2EYN/BFUeQ4Lo5jgeNN03RdVxalQqHQatdFUWR5DiZjVFVleY7juFQqNRiNYLgxXyyUy+WUEYDAJZtK37hxo5FK8Rw3tuxerwe7ljHGvV7PcuwgjhzH6ff7Y9tSlJzveXEcYowYhmHw/ph7EnEcx7FsEPiQe6CeBN9Kx3MBnZY5RZAkjDEOAoAzwzDmDraC9ADHAKfypBwl+0uJgWyYzEzAxYFfy/XjAxT/eDxmGJj73ytuKdkDWqMgCMMQYywKAsMwYEEpicqkR40TEoV7rSmDuSSmXhwghHLZwtGjR0+dOjUzM/P0M0/evL26s10bDgY8J0J9EsfxwtK8IokUJZZjKkQoFvO8wDqO47t+kiQsG3CsYBipyVQUzDFNynIARXie5yX5xZcvzM7O3759R5blhcVlhmF2d2pLK8szMzNbuzuEoFOnTn/pS186d/fdnW5/Z3ubYdF4bB89evSv//qvFxYWTp488fTTTzWbzdFgqChKo1FLorBSqRi6bOjyaOjHcWRZJvgXh2GQzWY1TbMsC+ptoEMppbCqTeR4iEbPd3zXcT07Dvck9WDihRDhBRYh1vOcwaCnY81yHZbl4oTanh8nVJIVyx4fO3bsiaee+sQnPvELv/iL/6/f+I1/+S//n5/+9Kd/6X/9X8fj8Z07d970pjdtbKw9+uijmWzqwndeunPnDs/z73nPe55++ul2u/vjH/mo43jPP//83NycF4elUunFF19cW1t785sfETn+lVdeOXTo0Pb2drlctm37pZdeOnPmdCaTqdfrg8FAVdLT07OZTGptba1cqubz+Xvvvafb7bquTSldWlq6dOkV2zZFUWy3Q9d1EEJRFLmWl8qk4QqBqQCWZcFhqFQoxmEI7AvDMCsrK88//3xCyalTp6IweemllxzPLZcrcRybtg3uEp7n+b4vSXI+nw+Jl81mDcNAlCqiVC6WOJaVJKlcLnueF4RhEAR+FELAxHE8Gg2hIpMkWRRg2QvlGGZ2dlbXVLpPInCYSetpscDXdtuQpQD01lMZ8BYbDocEB2yCMWG4g2GDD0wbQRhMiIrJfcdxgiCA+vhgxzhh21+HahqGEUVRGASwe4DjOIHjwV8ZnojZ13MDnagoGXTAffQgVBtFka7rhw8fPnnypCRJly5d+qM/+iNVk0mCWJbN5nIs5jDGrMCn02nbtnu9Ds+hcrlUqhRVRaKUcJxhJZYoihwnUIKBEgj8vU3xoigaKQ0h4jg2pbRSqVSr1a3VjeXl5W984xv5QkE3DNd1y+UyxcgwDNXQv/Od7/zoj/7o7dVVTdehQvY8z3Xdu++5u97Y/dKXv/iff+/3/uqv/mowGABhpWmaNR5VSsViPgf9sKYpjuNQmoShPxz2HceCYSKAf6DbAUd6MBCYKlU9z3EcJwx9zFCMKcaIZ/Go3ytXirquWPZ4OBwmSSQIvKoqfMQTQhCKEcIIkSSJCE0IIZub6/fdd98rly4/88yzH/rQh37n9/7zE48/9Za3vO1T/5//srOz9bP/7J8cO3as3W59/R++urK0uLm1vr6+HvnBD/3QD62urv3sz/7shz70Y7/xG7/xXz/x39a2N2dmZrrd7u3bt5eXFx944AFVVb/z8ovHjhxtt5uwIA38s0HPOT21cPPmzSiK3vGOdwWB98yzT9XrdVVVVFX2fKdQKMzOziqKPB6Px+OxYRg0jGRZhpEFEKOBsMZxHEWUYKoo9H3Hss3R2PM8z7fL5XJMkl6vhxF76NChIAp7vT402IqiybJsu06j0SiXqtlsdmB2R6PRvffe2+t2V2/eKuYLlFJw6WYYhuU4gihEoCRJoiIjLNi27bte4LtJzAiCoMqyLIs0IaCAFTkeCxQYfJY10ul0EASYZcMwFESZ53nMMoRgQZRjxMYUEYbFhxfnXsdAkH0btdcBLVAfJvh7L7uYNIEHQxohxIlCFEVRGAKmAkU8wBUswlBtIkrhCvN9X9dyk4edlKMMw9x3330LCwuSJL366qvPPffcYDSYrk7Pzc21Os04jpMY9tLsQfyqLDIMo6qiIouKImeyeiZtUEps21QFfXNzM0noVHVG1w2OExRZE0WRUrq1tSWIXCaTkmVpfmHW85xvf/vbR5YOE0J836/Vau94xzu2drYtyzIM4z3ve++HPvShf/fv/p3tOE8//fReafrii77vX79+/bOf/ey//tf/+p577rnr1Knf+I3fOHLkSBAE+XyeZdlms3nPPfcMBoPNzc1isbiz04KtfWfOnGk0GmD4lyQJEGVQ5U5EDnEcK4IMjoCua0uSUCoXLWssSUKv38nnc5lMqtNt+b7L87znuYZhqIwex7Fp2cPh0PEDzHAMxyaUMS2nPxqLkuK4/n/9wz/8iz//zNrG+r/7d//+r/7iU//wD187d/b0f/gP/8edO6t/8ZefHg36iipls1l7bJ4/f/6uu858+7Fvra1t3H333W9+06N9c/SFL3wBYmM47BeyuRMnTmi6cvXVK0kSJUnSajQOHz6UzWbjOL506dIjD79lfX0dVr4QEucL2TAMJUk0zZHnO9PT06Y5EkVhNBqB5aFvEVEURVmyHLvb7/OioKqqH4Xdbndhdg4htLK0bGjarRs3VVmZmZkZjfvNZtMPA0EQwiAOwxAxOI6TMAyDKCIEsSyLYJERYhFCfuyU8oVcLuc6Tq/dETheVRTf94vF4mAwsB1H0dRCuSSI4mAwaHU7ju0HQQC+OwzDsHg/h1GSSqWqpXK1Ws1m0xzHhUHg+/7q+pbrumEY6ykDlLS5bKHRbi0uLouSZNv2cDjiDiac18UVs6/bpvu21hzHIfpd7fVBIHTys/i1KlAoEuBigt4GUwTVbBLFe054oqgoSjqd5jhuNHRBtcyyrGEYU1NT09PT2Wx2e3v761//+tbWFsMws7Ozx48f7/f7r776arlawJgyLGIxphRBWmBZNpXSSRJBDaApKqUUEjjP89PT0wzD8ZyI9u1MwB5b13XXsxFCMzMztVrt1q0b2WwWIWZ7e3txcfHUqdOb2zuO47qu/673vPf//L0/+PCHPjI9M/dHf/RH7Xb3nvvu/8Y3vjEcDlVJ/Lf/9t9+/etfb7fbR48e/YM/+IO5uZlMJjUajdbWVt/97ncHrsMxxDYHDz94vt1uF87e5fv+eNS3rVEYuDynciySJSkKvSj0JsUCu7/FlbAMolES+yQJkxj7rhP4LqGh6zpxnKKUAl6VK+bCKOptb88XFsIwtFzHDfwoiRhKMGUShGdmq6VqSZL1wWD0hc997r57726321/4u8+99a1vXV29tbm5+cQTT9x//32/+qu/+pm/+PNsLp3JZJ558qkXX3yx3x/ec889mmZsbW397ku/e/8jD/3SL/3Sl7/85a9+9atvetPDKU1fXV29+56zHMeZ5iiXy/3UT/0Uz3M3btyo1+vz8/Pf/OY3K5XKysoSITEsh0mS2DTNH/mRH7lz587YHG5tbYA6n+NYjDHIvnVdF2XJC4Juv9dqtSzXgTbStm3XdXVVBdEIIaTdbluWJSmypmk+G7Isq2iqqmobGxuNVqtSmUqn01s726IoplOpzc3NVE6DSXGWYeIgJPGe0NI0zZWVlWwuNxyPGu2WZdtw5Vy5chVhJCsiz7D7sAhhMM5kMjzDRlHUarWGgx7w2AzD8KLExYnrB1EUJRS7lsNwAsMwtuNQaHejkJsEzyTYDgYhfq21hCiKNI4m3/m6cnFy52BPSBFVFIVlGMuyAFBhGQZwJ1YQ9zjJOAavAT+IU3qqWCzOz8+Xy2WO4waDwZ07t/v9fqfTkWU5m00jhNrtZqNRS6VShw4tW/aQwZRhEceyHOYYhpFlRZWlbDozGPYCz4/CMI4JTRISJwIrXL58eXZ2dm5uAVHGNC3f913Hbzab4J1z/r57VlaWbt++tb2zqapqHMe3bt0C/cri8hL2mFdfffVXfu3X/uzP/mw0Gv3qv/m1X/7lX75x48aP/9RPfvWrX93Y2Hj3u999+8b1+fn5T3/60+fOndvZ3ZIkYXd3d3FxUeTZfDbNs3hufrpRq0mCkESRyPMch9zYn5kqMQyjyhVFUQaDAZA6cOKCjGEShBIvRZEsy6IkMizLihJLkcwwmGPY0WggSQKl1LRtThCSJBkMRmzSgMehlCIGY2BsKTUtvpAvmbZZyKdfufDi/ffd+5Y3P/zE409yj9z3Uz/1U7/9W7+5trb2pjc98uWvfP1nfuZn/uIvP33z5s2zZ8/WajWE0Obm5unTp6Mo4ljhzp07Tz/99L//9//+7rvvfvLJx+M4Ho/Hnud9+MMffv75Z2/dutXr9aIoBG4T7GHD0B+NBwiTOAnr9TrPc5Ikffvb36aUzsxOHTlyTFVV0zTr9d1Go5lPVaCLI4iCOkMQhIwoxHEMHxmlFLr3+m5tNBpVqkUoBXVdVxVUr9fX1tZGozF4Z8D5DlZdkx8EmYQgCKlUisQJRgg++vF43B8MLMfmREHTtG63u7q+pigKz+xJNaHZA5eLlKbDXVjcAA8oimLfDlU9ZTleGBOGYcBDQJZlczzEDI2TmKIEH56boa+9TUpKGNgDGADwTEVRzMADqAD2DaADdOIba1GEEMGoUCgIPD8YDGzbFgSBY1hIdJEfwK4fSRThwUVRXFlY8TwP+gfTNGFumlJaKBQ6nY7neZlMJp/PY4xt2/Z9X1Y5SimDOUEQeVbgOE6RNVVRcrlcp9MyhyNB5PL5TDpjyKLEsnhnp4YQ0jRDFGSEsCTtLQB1HOfYsWOYoY5jqarS63eGw/709HQcEFhCMB6PL1++/KEf+7Enn3zy6tWrf/B//ZdPfvKTq6urWsqYnZ29ceNGqVTK5XKYJOfOnfu1X/u1T/zX//L5z39+2B8oiuS7bi6XKxYLKd1wXMs0TUWSTdPM5XKDwci27XK5PFmE0u12wQ0FdJLAwQLAjBDKptLwGQVBQDCCxSkI47E53K3VpqamOI7b3NmGHp4Q4nuJ67pxHEqSJMkilPc8z6czmWPHjj/99LOHDx1vNluIMj/4g+/7yle+5pP4Pe951xOPfyudNiglb3nrD+xub1Wqpa9//esz1SmGYZ544qlTJ05SihVFabe6V2/dqFQq7Xb7rW996wc+8P5mrX7hwoWLly6wmHnzmx9RFGVzfb3RqB87diyO452dnVwu12g0ANuAYSW4oKvVsu/7kiTdvHlT07R8Pp/P5yVJGvdNx3HGlhmTxA/DVqc9GAwIRhzH3X3mbBRFiND52dlOq7166zbP8w8+dN+VK1fGllkoFDhWGA6Hpm2FYTQej6dmZhzHi+N4enYG5khTqRQrIvAQSuKYxglNCM9xMHefTqdZjrNdx49CzDBhGI4sM/RdIAYButMUVZZlDnOu6xqGAQaNsJI5jiJKacjJuVyu2aq7rsuy7Hg8xhgLHK8oUqFQ4DguDvcz4UFtCn7DlODBgvOgkAVuB7vBN96AUaCwdDqKKKUJwwLMBa4NsiynDMMwDGAgXr1yCUAIKF/321Hc7bUZFucLWU3TgtCDYfB8IRv4Y0IIxglGCaUxiWkcBXHAJWGkyVroeqY5YjHWVYMV2cAN5ubmbty4YZr2kcPHisVSFCW25WKMz58/Twi5/OrFdru5tLQoiBxCaDwee1YoSQ5J6PXr16dn52RZsSz7t3/3d1944cVr166fvOsuSZJqtdrszJykyN/69uO/9R9//RN/8F9+/Cc+sr6+HgSBLIuzs7O9TjuTSUdBmJrRa7vbKytL/X6/UimFvq8pAiICixNz1APPvyiKUrpMKeUYQlkqcIgmge95hBBJkhx7xLKsF/jwZjoOJ4pyQgnDMJSQOCYMg3hehN4hjmOCY8vzvShGfMQTXpSFbDadz+fN0bhYyBq6oqn8+XNn/u7vvtDc3Xrg3jN/+5Wvdzqd97///V/60hcJSY4fP/7tx7551+mTsizX6/XhcHj+/PmLF14RBCmfz7/7XT+YMKjdbouiOB6P/+Iv/kKTlVOnTrEcfuqJJ69cuXLq1Kn5+fn5+bnt7W3LsjzP293dVlUVLgxV1avVahzHV69ezWQyQRAyDJckNIqSfn/Y6w08zyvnS77v266jGfp0scjynGmaY9vK5/OgyR72BzzLMggDvf70008zDLO8sry4uDjoj2D/WRzHMzMzpUql2+0Ph0O4/PaG41i0Z5kZxzzDBp5PkgQW0QyHQ0mWwzjqDQeO6yqKksnnNE2D6zNJElmUYNdiEkb5fNbQdU01CCFhEDi2DU8tFqtVWdL0lB9ECGPYHmvbJqVJJqULrMRg8l2y/nWaT7o/OjgpLyd88Rtjj752MOJgMmRZ1vO8MAgADCSEJBRhjBVFUUQJBlgxQqZpbm5uWpbFYTwB5aEABrrMtu0oihzHRoiCcWgcx5ZlKhJKEKaUkDhKaIwSFAVB6PmqrCiKkktnaJywiMEEJQmNgpjhQ0mSVFUHSTGA3aVSybKsmzdvcjyzuLjYarUYFuVyGdM056eXUqnU+vr6/Pz8oaNHfv3Xf/13f/d3X3jhhb/5m785f//9u7u7Kysr6XS61WmPt8bvete7ut3uxVcv/9q/+ZVPfvKTqqpm06lOp3PmrrvM0bjZqnuOXSoVWq1WLpPieX61vptPp9VcBiGU1pROxyahzzMMSxNJkmKf5TFVVTlJEhL6BGNV5IfmUFG0MHAxxoTQJIh4no/CMIyTdDotCEK73bEtZ2ZmhlLcbrdZSSEMSiiJkyQiSUxjWRYLhdytm9dr9a181qBJyLH0xPHDG2u33/Oe95w9e3Y4HPI8XyqVHMf+7d/+7Y98+EN/+7d/+7GPfexLX/hivV5vt9vnz5+/fftOFEWf+cxn3vne93z729+em5u7fPny0tJCtVT+1Kc+9c53vf1jH/vY5csXm80mxzBzc7PwmQZBEIQWwxKeF1VNYRjGsseypKZSKc/zer2eKEqu66VSaZblzLHd6w1iPxIEwQt8lueKgpDNZovFopYyQNcO00PQ20OFefLkyeFw6DjOtWvXet2B4zjpdFrXDUVReoOBruuyLDfbLU3TFhYWarUamyBNVnieN3Q9Y6RGg6E5HiuKcvTo0WazORqNFE2dnp4OwtDzvDAMMylVFEUWM1DNCoIUer4VRpBCEN4rj7PZbDqdZll2c2DanhfEcYJoEsBeACZJkvGwn9JkFIdRHOwBM5PbRLAG7enBoILAIOg1JmsHRW0Hv3lyR1GUIAjCPWpFiuOYQZjjOMMwQs9vtVq+79P9spbjOA5j0LjhvRmlEE4UVVWhSCOEuK4L5F6SJAgjhChDMaIxJSiOEE0og9g4DKkoCoJkaDqoBVjMptMZwsZzc3NJQre2tizLzueLiwvLhmE89thji4uLCYksa5zL5Xr9DgRYv9+/fft2qVS66+yZb3/72x/96Ee7/d7LL7/8jne84+Lly3DSf/mrX0mn06IofuADH/hPv/tbP/MzP/XNb36zUCiUy8VbN26uHFq2bdu0RqlUajQaGYaxvblezGe3t7eDwDdHw5mZGdd1F+ZmMSUwOhCHQYioY5lxHIs8Bx5WSRKTOCJRKHKYSJwsq5RSzw8NTRlbNIwTVdHjOB4MBlFCoyhxHM+2XZqQmCBVN3K5lCiwCYm8MPB8t1wuDHs9TZF9z9naXOM53Gk0n3/m6XJl/oknvl0s5E6ePPn5z/9dLp8hhExPT//xH//x3WfOMgxz69bqxYsXK5WpbrdLCP3GN77x8MMP7+zswOAS7Db71re+tbm+cf/95zHGTz7+eKvVvOeeeziOe+6550rlNFjf67oahkGn08lm8gzDuK5LKc1ms5pqVCqVKIpUxTKMNCZROp3u9nsjc7y5uel4rmmaoiLD0NDU1BTPctZ4TBMCXYxmyePxmGKQauF0Os0JPDisjsdjUZShtmdZFuQ1Ed3zf0ilUuVSGVMUBgFsHzMMI5VOIwZ7YUAI4Xkec2wQBJqiQtOYJEkc78GN9VoNNr8KnAj5Ay5jQYyarc54PGQw9mw7Cv1sOqVIgu/FPIMFnkV0n6Cf1KKvQ0oPBhiEBN1XtLH7twl+M8mNB3lCGIEHQ2Vuf5sSwzAHdaF0X3bD8zzHM5ihCYnCyI/iAGEiiJymK1EcEBpHceAHbkIiXmB5gaUoQUmMSExRAgoGntszmCKEeJ4XBSHsWw6DWOSkarkCm150XRdFcWpqCsiDL3/5y/fccw+suXRd99q1a7qunzp1Coqora2te+65Z21tDWP89re//fd///fT6TQh5O6775Ykqd/vq6pqWdbZs2e/8pWv6Lr+4IMPHvTtS+vGcDis1+uyLKqqev3G1ZWVlcFgMBz006kURYkkC45rCSKHGcpymOOZKA54gUWYUJQIIqcbaiqta7oiK2IQehQlPMPK8t5wM5BJoDhxXY9SynHccDhutVoJIbbjMgxTKJUWV5bnFuZTqVScRKY5qlQq/UF3NB56ru3aZqfZmJufvfLqpWw2Wy6Xoyja3t7+4Ac/OD09ffXq1be//e2wY1gURYi34XDY7/fhE/z617/O8/xb3/rWTqfz0ksvaZp28uRJSulzzz337LPPHj9+fHl52TTNjY2NQ4cOKYoyMzOTy2WjKCSE5HI5SRY2NjbS6XShUCwUCjzPdzq9jY2t8Xis63q73Y7jGEDsbrfb7XZd1wX1WafTGY1GruuOx3uyb5CbEUJ0Xed5HuRslmXB+B8YpQP9SAjZ3t6GGQBCCIicbNseDAatVqvdbo9GI8hpEAggXRqPx6PBEC7d0WjUarXazeZw1Pd9H65hSZIgW4Dn4tbWlqjI3W630+mEYeg4DhTDiqLADE2xmK+Uy0zCMQnHUIGjAkd4lvAsfIXwbMziENMAkQCRENOIQTGLGUJxQmgUkzCiUYwTwiEsMCxLEUMo/MEJwQlBcYLixB6bPMPqqsZiJoliliCGosD1Yj+ghHAsK7IchxkWYZ5hBZYLYHKO7qnm9oKcIpEXcEIZgmRelDBH/Yj6kYhYL0AxYQnlowSFcUxQQnESEz/BfqGcNXLa2O6b9lhSRDd0X7rw8kyphKMoCdyZal4R0TNP/sPVq8/ff/+JwWCbF8PhqLGxeXN+aaoyVVzbuJPNZ9Y2t+4+f9/G9k6/P/yJn/ipr33tH04eP9VqtDfWNhmCZ6szu5s7b37gkdPHTp07ecaQtIfuObl69RXP7OUMqdfcVUTG9yxKwkIh1+12O71uOpPT0pkgIVomRzmhPzY3dnaxKL568+bAcvumY0XJyI8GXuhSxkP8eqt3u9bijIJamL6921EzRSdC6fKUG9EAseu7tZgTepbjJKQ1Ho5832dYKZ3q2BZWFYckVR1hpy0EY2yNarduFVU9r2TmKwuqoB9aOqFrWdsJsoUKLyt3NjcKU2UcD+49fajfqrmjcVrO3Lh4Z+Nm+7GvvPiLP/sr//UPPk0i7ud/9ucwE4+tBmYtxfADx4x9p9usqSL3yAP3v+mRh7Y213e3d2q1miBIa2sbrU6PE2TL8adnF+46c7cgqbyoveVt7z589PSNm5uN5jCdmVpaOdHpjR0varQ6mMNOYPqxlWCfEaITp08FSXRnfW08HuuqqooSTggTJdOFkshyta1tczQaDAYjc5wvFwuVUqZcwZLcGQwJxUZalxVeVQSS+IHviDyTzuilSllPZQaW2xmNsaKM2v3GzrY9GsaB7zgWy1FJ4cfWgONJEFq+MwzsAfKtoi4tlLIlRcgZeUNOCVg05NRUcTqlZWnMYMpUK9OnTp5OGZl6o2HZtqbrumGomhYMzJlC+dDsAvUjTGgxk2MxRgidO3cuncs6Qdgd9PfGQ6GPh7MEvdbX8P//2wTOId/Lbe1gXp2Yi05y7Pd8wNclXrTPi8CAhWEY6XQ6n8/D9+RyOdgRa5qmruvXrl2jlML/ApZt2/aNGzeuXLny0ksv1Wq1hx9+OJPJpFKpwWCwvr5+7ty5U6dOXbp06ed+7ueeeeaZIAhyudzZs2cXFhaAec/n81/4whcIIU888cSFCxeKxWKtVoPqALbt7cFompZOp+M4XlhYgMkA3/dhMHI4HhFCLNPRdR3QM4ZhoijSNAPG0scjazAYgG9CGMWCKHGcEMaJaZrd/rDV6kRR1O9044j0+31D07vdriRw1mjMIux5Hix5NwwDHt913evXrzcaje3tbVD8QqewuLi4uLi4trZWKpVYFkuS9MrFlz/+8Y97vguOLOfOnXv++ecLhdLU1MzKygrYb0IVmiTJ9evX19fXVVU9e/ZsEATHjh3L5/MnT54UBOH27dsbGxvPPPPMP/zDP5w/f75QKKytrYVhCPuAV1dXt7a2AKLjOA4akP7+LYqilZWVu+++O5PJOI4Dq4fG43GtVnMcR1XVXC535MgRSZJGo1Gz2YS+APKSpmkgSQNDCowxTPGDHykoLnO5TLFY5Hke3H4lSSkUCrBPW1VV6CEppa7rWrYNfVCSJJ7nWZZlWVYYhuCW0u/3x+PxYDCo1Wr1eh0KYIB/YEINdokXi8VMJoMx3tzcHA6HoihWq9MMv7+PHto8dn8D2T8SUf9TtzfGIcTG6/45ibrJoTCZ4Tj4UG8U5UyK4ck/IaRBcgEVJgjiZFleWlp6+OE3ZTK59fXNK1eu+X64tLSysnzY0NPn771f11NzcwvZbP7y5SutVuftb3/nO9/5blmWO53Oxz/+8T/90z89duwYOCAxDAMi1VarxTBMKpV605veND8//xM/8RP9fp9SOjc35zhOs9mE7fY3b96cyBXgjuM4AG/KqhLH8aA/goY5TGLLsjmO8/0QJnolUQnDEFjpKIoYVnTccHNrZ2e75gUJQszOzs729k69Xu/3+57jJlFI4oQmRBS4lKEpijI9PQ2zLDzP+74vCAKcDuC8RAgZDoe9Xi9Jkm63m8vlLly4cPTo0a2treeff351dTWdTj/+xLefe+65H/zBdxNCPvaxj/3Tf/pP6/V6uVxlGEaW5enp6Vwu1+/3t7e3G43GysrK3NzctWvXnn32Wdd1fd/vdDqqqsL+90984hO7u7vZbPb8+fOnT5/GGK+vryuKAhcxqIXgUAB2lFIqSRLoaSeXShiG/X4fLnRoWTHGsO8efKJgLBOMsTHG+Xx+amoKIQTDuJqmQWTCgyPEQGxjsAhLEp7n44gQQjDHyrICw+i5XK5YKkGkOY4DGQV0eaVSiVJ68+bNzc1NyAEgYJyenl5aWur3u2traxQRTdNM0xwOh4qiQPMFnRE3wWDI/u5B9AbpzGuD8PsG5/f8OsMyEzT1IH4z6Tkxwgef96BMZxK6iPxj6CtCCNE9rWkcxz71wzAkSZIyQkLIYDBIkqRYLILPuWMNWJYtFouVSgW4OJ7ni8Xi9evXwSTmySefzOVyOzs7cRzPzs6+8sorP/3TP/2Vr3xlOBx+5StfyWaz0JacOnUKHDc2NjbOnz///PPPv/jii8eOHauW5ampKUEQBoPB5JeNogjEk+CGlMlk2u02kFEpI8MyfLPZvOuuu9Y2N7PZXLfbLRTLAPExLA9eYFEURYQyvGA5PqU0CILhcCwpOqK42xsGQaDrumvbmUzGGo+zht7v97P5oizJU4WsYRgsMx4MBkDEgSAOXEwn0FqhUFhcXNzd3YWxqdXVVYZhjhw58hu/8Rv/7b/9f+v1+tbWRq934tixY3ES/P7v//5P/9Q/ee75pxuNmihrYBEfx7Gqqjs7O4VCYWpqKp1Ow+RELpe7ffv2aDQChvDo0aO2bX/2s581jPTJE6fuvffeVCoDLDbGmOdZjmM4nmVZ7Hlev993TB/YAlC0wwaEidceqJ2q1Sos/cxms+B1FIUBJtSyxiSJDMOA758Q2gzDwEPZnhsmcTAeZVPpubkFTVO2traGg0EulylXK1Hox3EchEESx0EQ2LbdarUIEsHwG7AZgB5gqVu/3xcEoVwuC4LQ7XZhsMF1bdhrAsvhGrs7iiqlUnqpVIrjsNcbRIHHTAgTuCX7K+y/d6ghRL/PH0Lp9/yDD7R28DfcJmzkwQIVfqvveXtjBMJt4roPNi1wfCKEUqmU4zjdbpdSCjO7nU7n0qVL169f7/V6oihCoQJzIYPBQJKkRqNh2zaUsul0OpVKXb9+/ad/+qfX1tba7bau667rttttcOOzbfvOnTv1en08Hs/MzKyvry8sLFQqFUopOJRIkgQguO/7d999N/TuSZL0er12u82ybDqdhuzEcRzPC6KiqKoqy7IsqXCJ9AejbreLMcvwQm84ajabjuPU6m3Xi2TF4AWl2xv1+mPbcnPZQiaVVmVF4HgWMzQhSRSjOGIpAc8ymI0ihMCGKVmWJ9lGVVVVVZeXl2dmZgByJITUajUwtD169PCf/ukfv+1tb+n1ek8//fTi4uLKygpJ6JNPPv2Od7wzjgkgFjs7O71er1KpaJoGwjE4cLe3twEygfWPCKGXX35Z1/W3ve1toij6vr+6ugrAI/ArcAUqilIoFFKp1B4Jwe0tWoGFH2CYnclk4L+8fVNWjLGqqtlsdk87NhzCp8yy7GAwaDQaLMtC5QxpKtm30Od5XtE1SVVYlqeUIox5nk+lUpRgkHlxomgYKUBcyuUyDGHDUCtAsvV6HRLy7OwsXCQgW7t27dorr7xSKpXuvuecJEm9XgdgWHCXRoiBF8+BehMsFSZw6D+S2f5nMyE94OGNEMTa3tTFngnifkkJqXIiVqb7S03f+JiTCEQIkYRAeMMlJXAcSRAhxDRNSnAmkykVCoEfbm5uBkFgGEa5XISTu9PpQJsUJ8lwOCyXyzdXb9dqtVKptLq6WiwWFxcXl5aWMKYvvPCcJEkTXfVTTz1x//33y7J44cIqpfTYsWMvvfTC5ub6Aw88sLu73etsnjhxwvMCUZQNI2VZDiGIUkwpVhRtPLYEQarVGocOHWIYjmHQcDgaDod6OlWv1w09Xa/XFU11HI/lBNd1CUGypnueV6814IrRUsWg00+Hie0GQehTiglBDMYIMYIgDLq9YrE4Hg3K+RwlSUpVhsOhIAi52TxMuIVhOBh04K2gFMOCIZBGbGxsrK+vT83PXrt2LZPJVavV1dU7b3rzw7/9n353cXHxyJFDtmPevn17YWFhNOrduXNne+v4I4+86cJ3rgNHBzRgsVicNEL5fB5yIEIICkXLsmB4GhZrt/iW43hTUzNhGAKVHwQewyBFlQWBkySJ45hqtQq29teuXYPyARbI8DwP+6HgzFUUxbbtOI71lOE4jm1psixLkiSKPMyXDofDbC4ny7JpuVa7PbYd6OiSmDMKBsuyOzs7PM+zLJ9KpQjFzWaz3W67tiPJAsdxDM/pvJHL5TlRdl03CAIgKrPZrCzLEIqwvNnzPMdxQBMWx3F1qkJo0mw2u902x3HpbLpYLCqKVK/XQbyZz1QZcCKBKhlk+5CRvm8m/Ed7vzfeXmc3ivadvA+GEwip4Jbsj/9OforZX839PeMQhjsnTSbk8yiKTNPEGKdSKUVRLMtqt9twVjEMp2lGqVjJZvKU4OFg7Puhquq+Hz766FsXF5cNI/2D73nfwvyS74Xnzt7z2GOPLSwsAEp+7Ngxy7IWFxfDMNzc3BwMBtPT0/fee28QBIcOHWIYBnYh2FAWWlaj0dB1nRCytbWVSqXgHQD6aHLk9YfjMCaypFqmE8exF4SEoMFgYNsuQgwvShgxruPZth3GEcPxHC+32v16s2Watut4hpFKp9OSqIgcjwmlhPAcoymqoWuKLPEc2+l04jieXLXj8RievdfrNZtNGBQE6nVra4sQsr21kyTJ5uampiuLi4tXrlz5uX/yc6+88vJHPvKR7e3tqakpURTb7e6jj7716tXrU9W5SqUSBIGmaalUqlarwR1BEFqtFs/zsLnJMIwTJ07kcrnl5eVqtdput5vNJrARSZLYtg1XsCAIuq5jjLvd7vb2Nqyd2N3drdfrvu8DTGIYRqVSWVxcjPcduyH8IJ9D6wtoja7rULczDKPrOrAacNzYtg2QgaZpURLHCXEdDySvlMEIMa7rmqYdhiHFyPdCeKPWtzZb3Q7LstBZQKSJopjL5cD2oVar9ft9WHkCg3v5fH5+fr7f7167dmU8HqdSKeDGUqnU2tpaq9UKgoBlWQ7oO9gSDqAcTK8BG/7GG/k+AjXy/TJhQiaZEGNM99drcyxLD1COE76LvMGZG2OM9nVy+A36OEVRkiRJYhoEQRRFiJA4InEcrywvi4IcBMHu7q5pmlB8+r7/4P33bmxs3F5d1XW9VCoRQgLL5Hl+Zmbmm9/85rlz5yRJarValNK3vOUtn/rUpxBhbty4cfbsWUmS7ty5Ax//9PR0v9+fmZnRNO3VV1+9cePG0tLSaDSqVqsnj8+3Wi1A+brd7vT0NELIMIxSqbS2tpbL5VzXnZmZcRwnlUpBEZXPF0VR5gTRcV3DMKIwSSjxPIfheMMwOE6IEsLygq4osiw7XgijzxiRbtc2NK1YLC0tLASe06gH5XI59IOpSrnVasmybA4HwKr1ej04bR3HWV4+BGjecDj2fT+fz1um4/s+vGxZ17e3t48dO/H8888vLa4MBj2IgW8//tjHPvax//7f/yafz73jHe9YX78zHJjf/Oa33vLo27/61a+2222e503TBPeT8Xi8vb2tqirLsuvr6yzLZjKZWq2WSqXOnTt3/frN0Wi0srJiW47r+uAr5zjO1NRUsZiXZTFOIteNZVlOpw0aU9gZjDF2HKder8MehGKxCP1YEATgVgqOWITBgIRTRZVlURQ4QgjYMsRx7PoBZIUoiliWzeVy3rAD3WahUMimM45rm6bJUCQIRNdTsiwihDzPkwQRbNQ0LdtsNuEFm6ZpWRYcIkmSpNNpaHwIIaIoIoQ6nU775nVRFGdnZ23bxBiJosjzrOu6umaompJKZSRJYqANAGAKMilwnRASE5UdSPhBb4335TITRAcdMKR5HfQCPwVSXShB9+3rE2iH4CvwzaAURwfQ1NdlxYPPC18EWhxsmuCVZLPZlZUVhBDP8/BSM5kMaDJt2waaAboX27b7/T5YfZqmOTc3B93a5ubmO97xjj/6oz/ieb5YLNx333lCkkIhPz8/5/ve2bNnwjB4/PFvLyzMLy0tvvTSi7qu3XXXqfvvv6/b7Vy7eiPwo0p56tjRE/lcMfAjluE5VnAdX5ZURdYowQzmlhZXzLHNMvyRY8eH45HtOppqYMQ2W51mu53N5hiOkyTFcbz+cIARSwm2LRdRBsqWsTl0XVdRJI7j0oaBSAzvjyLteQtompbSNZEXqtUqy7KmaVJKOY47c+YMCHemp6ePHj1arVZBLQgrZY4dO5bL5ebn51VVDYJA05VUykiSaGFh7uLFC3NzMwAtqqq+traxtLTCMsLnPve5H/3RH22324VC4cEHH7x48eLq6qogCDBr1mg0isViNpvt9/vHjh2bm5uDdjEIgq2trd3d3Wazee3atbW1NcuyRFGs1+vgr/O2t72N47jDhw9nMhlonIrFIqQv+E0Hg0Gv17t16xYE4UsvvQTX1UsvvTQYDBYWFhBCOzs7/X7fsqyFhQXYMgJ8A7AUuq4HQRDHxLZdyI2dXtf3AkVRZE1FCGVzuZu3VkHeOL+4sLxyuNcfbm1tlUql+fn5Uql06NAhOMcVRaGUguJ8MBjU63XgXTDGsHc9CDx45RPkH8qTRqNx5846BwnE8zyw44dCEYKBHoBVDpIKZH+SEL4f79u0TWLvdZkKIQQLrhFCKNkzp0n2K15CCED2EKLfO89CVkTfYwcGtLmdTgdjLMtyOpUy9LQoiqPh0PfCJElUWdZUHTp427anysuO7S2tTAFq6vvhoUNHKKW2bbdb3fvuu880zZ/4iZ966aWXgyDa2tpJovj+++8Pw3B3dxfeh5s3b169enV5eRkwgGKxePTo0Xa7PRwOWZYle+u4Atg3Au8tnItgz16pVKDesyzrkUce2djYkCVVFOThcNgbDAihURRt79YhScYJBfyAMizGOEwIolTgMc/LkiAQksiiGCfhaDQKfJfnWVmWTWscx2I2m5ZleWdr++jRI+l0uqf3wzCczO9kMhlVVWGX+Pb2tq6lYAvA3NzcPzz2mG3b5XK1Wi0DrzUej13XEQTh61//+vz8nCiKzUb77rvvvXjxIkDzn/vc537lV37lC1/4wtbWlizLKysrjUajVCqxLBtFEYjUQNwzHo+NlCgIQr/fZxjO9wLLsmR5T4O2u7t7+PBKGPpRHNbrdVmWV1dXdSUNvRbGuFKptFqtdDoNkqNOp8MwzPz8/OHDhweDwe3bt69fvz47Owu7soHYWJifNQxjPB47jieIIsMwYFOmaZrvB61Wa2VuGt7qJKF7xENCMINAqK0oSq8/PHbsRK/X6Xa7mqbdvHEHFshSSkulEsdxpVLp1KlTL7zwQq1Wu3PnDsdxkBLgF49YDiG0F4E8g/GeFzjHcZQS3/d9x+VA1g3pCMpCiDEYxp2E3yQYGI7FGCOMKaEUI8wyCMFWi/36cxKKeC94IHHB64Awg8echBP09HuN3/cpdyEI33iDQwSOeSgCoQsKgwCuLU3TGMwAGpZKpXzfr9frLM+xLNtutyGBW5YVRdHS0pJlWXNzc5TSy5cvA845NzMLGCxUQZDSp6enC4XCqVOnYMhYFEXQl/d6vbnpAqV0Y2MDnhGKGZjDkiQJIdRoNFRVBfB2Z2fHcVxZVjiOb7bb7XZbllSCmGazSQgSJYnjBM/zhuY4iiKe58MgEnlGV0SYiEMIqZLIUDI2+yIPSkVeiiRF12SBZ1mWIApshOu6lUpFVVXgkeEKg8sUpAUwfdNutw8fPnzlypWtrS3f93d2dgwjvXJoKZ3K3rlz5/r1a2fPns3n841GY2Fh4cknntZUI47jx5549vz580eOHPnSl770lre85fnnn19cXLx9+/aJEyfu3LnTarXuuuuufr9/8eJFwzAOH7lHFOU4jiVJsS1nOBxLkmLb9uLiYq/XKxQKQeCNzdHm5mYul2s2G+qCkclkEELD4XDCT0y2VhiGsb6+XqvVXNc1TbNSqWCeW1xcbDbq5nB06tQJw9BAE+d5nqKqhUKBUGZ7tz4ajRiGTafT9MCuFJ5lESIMw/AsF4ZhfzSemZnp9/ulUmlra4fn+VRKh1UZkwJwZ2fHNE1JkuBVQUqEOQe4VFzPhlIOKGvAxnzfL5VKe85dUciA9T+cRhPyAHKgJO1NOaD9Bb2Ag4GwBgQuEwXpweyEXkshQEUKrm8QM8neZOl3d4lC3QgZ4/sG4Rt6RYwxFJPwOgEEGwwG4/EYXhuoGQBtB3hwa2trPB5fuHABpqenpqZgqQDAbouLi9ls9ktf+lIulxuNRqdPn+4Pus89/0ychKLEExpLslCr7zSatfvuv7fX71y6/MrxE0c5nrl2/cqdtdv5QpZluSQhvV7fdb1isZTL5QmhSUJs2wnDqN3u3L69Cv8VhtEzzzzrBX4Qhbbj9XvD8cgK4ihJqO+H3V7Pdf0gCMa2ZZl2EhOeEziOUyVOUwRdFQ1NSulyylBVReRZNp/PMQxmWLZUKRaLeYJREEeSqoCJo2VZzWYTAAxIRLBPAiGk6zp4TzSbzdu3b0N97vsuTCclSYQx7vU7mq7ANvlisaQo6vr6xqFDh0ejMULonjMn/v7v/17TtPe///2bm5uQKMbjMQiaofVKpVIAijz77LNra2vdbleW5XK5XCgUyuUyyNa3tnZeffXVXq9HKZ0Qj5ZlVSqVYrHYarVg5IVS2ul0SqUS2IXA3L0sy/AsIASH2srzvNFoBA0LlLWe50FdCtbvuVwujvfHnCklCGHMchzHi4LAi6PRaGp2jlJ64cIFx3V1wxgMx9DJTyQKiqJEUbS+vg7g6vT0dCaTiePYdV0QRUOkQZcI7wYMlEwwPN8POJAdQBRNRnUppaD/gnUue+N/DCNJUpTEE2QPHXBkmoQHPbBoDU3I9EkiZRj4ItnXKDAsi/aL2+8Xgf/IDU5HIFrgn7AVWVNVx/ZGo1E+m52ZnpVl+fbt291u1zGtYrFouw7PC1NT07pujOv1dDodBtHi8oogiHGcJAm5c2ftLW95y/Xr16empkqlUjabvXr16szMzF133eX7/pEjRyBv9Pv9s2fPXrp0CQ6U0WgUOAz4iGqatri4OB6PG40GnFxra2sAaguCcPPmzZs3byqK4nshALmj0WhsOwQzcRxHMfG8gFLKcDxGDKWUZVlZ1QzDSPFBkiSyxEkSzzCMqkosL7AcVjU5IVEqrXMcJylyGEcJQaJiI4SWl5dZhltdXfU8b3FxUZaF3d3dMAxd14dPCs7BJElkWd7c3HRdFxa1m6YtSVKSRLXaTiaTYznc6XQAg7l9+87Zs2c7nV4YWblczvf9mzdvVqvVBx98sNFoDIdDhNBjjz0Gk6xPPfVUuVyen5/f3d3t9oaqqg+HQ9u2VUWDaQOO4zRNO3r08Hg8FgQum8sAsRkEnsDKuq6DjAbkDdBTAXjW6XSKxSKldDgcFgoF8Mt48cUXc9nM1NTUnTt3Dh9aPnv2bLfbLRbzt27fbm9uKapBCIVytN/vl3IZlucwhQsVrDlpHMdCSkSONR6PeUn2fR/qI4TQ8ePH4akNw4CZr0KhMBgMQAsOXY+iKNls1nXdbrfLgq2TwKuqIssSJhQhBEQAFJ4izzOAhU5oBvgPSE2QqSck/uQ+2AQAYDPpEics/Ot4eXxAU3bw/p4V8X7gJUkC2fL7BdtBLuQgNcLzPFSzGGPDMDKZDMdx4F8IuRoYKqCMwXEA9GudTofjuMuXL49GI9/3M5mMKIq7u7urq6tQm4HV74ULF+C0k2UZIvDVV1+1LOuZZ54B57Vut3vr1q25ublqtbqxsQHKDwAAAO4CxgwUKrlcrlqt9vv9GzdumKYJJ3er1Wq1Wp4XhGE4Go2GgzF8ojGhECGCIPGiJEmSoij5jGaokq5KmZSWNlRZ5FVFyOdScRzm89lsLheEoeO6upHOFvKIZcBPVZKkkydPTk9Pdzqder0+kYkNh0NVVaGNgZN7cXERWp3xeOx5jm2blUrl2LFjSRKNRiOWZa9evYoQk81mLdOZnZmP43htbQ00Ky+//DL0QhjjZrO5ubkJEjPf92GmwTCMI0eO5PN52HELmC20XpIkPfDAA8ViEa6chYUFQRCAlAemvlKppFIpjLHv+2AGeezYsePHj+dyuV6vB7MdCwsLmUwG9AkQJ0BOCIIAfCnP8zDTwHGcqqqQMH3fD6JwcmH7YQg6R5bhrl+/Tgg5duxEqVTZ2dkRRTGTyST7znfdbhe6tn6/r+s6MHxQf004TCgHYMkUqA5YlgHgc1JCctCoQCKC4hCCagJpwlNCYnUchxV4iEiIWLpffzL7lANEzOTvSfV88G+gKKDEBfH2JJjx99HqQPzRN9zCIICjBarNIAhsy4UlM+VSFTJ5rVaD6jqVSvXaHdM0OYFvNBrgiXT4yBGEUKVSeeGFF5aWlp599tluv3fixIk/+ZM/KZfLLEMxxnfu3AGt2bVr16ampkCjdOPGDYBkFxYWer0ex3FLS0uBY8JusDiOYQ0YQmg4HEIPBt5EzWZT1/VKpTIcDgllbdsVRZHhOUVRMGIp8jmOS6XScBQqisZwPFw0iqKk9YTDSNOUbCEbRYnrewylvCQRSiVJ4gU2SRIvDBhekHmO5bgkSdbX11mGu+uuu1iWrdVqluXk83lBEPL5oud55XLZ90JVVXu9HkJoJpsHqQqlFCaJYB3AhJTb2Ng8dOjI4UNHn3322ePHT8IvNRqNADP81Kc+9cu//Mvf/OY3BUEoFAoLCwsAJ05NTa2vr58/f363tgb1fxi2NFUH1jSXy21ubnIc1+/3NU3hLJZlMdTJQH9blgWBBO8q7KPfc0vhuHK5HMcxsIsLK8uHDh2q7e4Mh8MjRw5RSu/cuVOtVuv1piTLxWIxTlCz3Y0pMoyUoamjfhf6TEEQWMxQmvA8jzELrAb8V5jEw+GQEEQI2djYgKMf5EeapvE8n8lkQFQMM+hhGAKRKIoilsUkSTRNg3YRkoFhGJqi7knE4piDoVtAESBLEkIAj4JClNnfiABYKIcFfEA8DaFMD3iuTcJm75/7ghioSzGh8M1kz5U7EURuwrazLBuR8B8Jwu+ZEnme51gBTjUWY1lSK5VK4PsATrpBQFUE4nqe50kUNxqN/nAA2eDo0aNnz57FGN+4cSObzb744ouu6545c+YLX/hCtVrtdDqiwELbOTs7e/PmzVar9ZGPfORrX/savKpUKrW1taVpGihFs9nsnU4Teg/QoKVSqUajsba2Njc312g0ms0mIGPT09OtVuv69euipAFiKbCMIAgCLzmeixBSFBWzrCBIHMdRjFiWVVVV11KKbEYxr2pyNm04XuB5XpyELOIzmYzr+mEYCrKEMe72esAL67pMCGk129evXwdameOETqdTKBSq1elGowFw/9TUFHzKgADBwKSiKNvb24DrgMIWChaGYQzdWF/fnJqaATQBdF4XL148c+bMl7/85dnZ2TiOa7UabC5ZXV0FTAuwrvHYymazSULBANY0bYTQ0tISGOqk02nPd69du1YoFFiWrdfrPM+PRiOGYer1uqqqhJDhcAg6xFQqBfjq7OxsvV5HCI1GI9BwYk3r9/tR6IP+RBAE0zSDOJFlOZPJ9IajIAhUQ5+MXOwDkHvXJ8gzlheP+4F38+ZNXVEXFhYGg4E56ubzeaDmARyBTYnXrl2D6hQkPkAhIoSMtErI3sYxSGxg7gZyc8dxPNvjACjhedEwZHjLRqOR74eqqrIszzCwihC6VsQwXOhHlFIGs4gimiBCKUzThn4E6RXtV9aQ9CRRRAjQ14RhGMSwhJAoTliWZQQREeJTiqIImsMEY4bhv4u+7IcZhC6hFFOGQSxlecqyCUUkIUmEKEGUp5jhOZ5HDI4Z1o1oJl++ub5RLlZK+UKhVB4NBoVctZDLf2f88uKpo47jPPfcc8tLS+ff/FCn03nh2efy+fz2lUvj4SiXyzXr9ely2bXt6elpJ2ZGdiRpua/8w+NhEOi6vrZZ833iuNb09LTvRa7jj4bmsN9fWVm5cukKIwiBFxSL1WazoTZ73SvXS6VSwrKXrl8PEVnb2WQYhuXw4NowSRIlo8KHbdumruuGJkVRpIiI4xhNx5IkmOYgpaY4jsNMXCgYtdotU65WlpZrtVqW5Ub+iFFEXdd3dnY4jsvlclHs6hrX7/cX52cty+p6fXXq5Hg8jmLkuGF/sK2qaq8/DgKSL+jTM/NBQFOp1KVL1xHm2p3hPfcsl/LGhjloN3YRZRYWFk6dOJ5Op/P54vbuTqfb96M4ZujXv/UP73vfD9398Lnd3k5WVAlJWMRjgo+sHFlbW2U5rKtCNiMHvsJgt1SSZTlIkj4hvdXVF0msRn4UB3EYhnEQMgzIU8xuuwYkBE3iwPV0RRc5kec1XpQ4jvP39nCozXZ7bm5OlOWYkFQmE4ZhbzBA+3i7IIoiSiRMsMT3HTOJWdcLlg4d5nl+p9n1YzQc2wRhQRJTuuF4rj0eqxzruq4XBFA6chyH4sSx7Ewmk0llMWYZzJeK1TAMR6bDi0q1OkUpDYJAVlWEUKfXa66vb+7uMhybsAxFtNbvIoTCOOR0FSGUuFE6nVYEmYmIyolUVuM4tkdjYG4xQpqu7OGckBkB6wcABh1wfDoIS9LXTkJAEZvsOzLhA7Js+B4oMidfwfujSUDO4H0xDd2XrfLMa55iklTRa1mKyddlWZ6IeBiGoRjFcRwGcafTyWWzhmHU6/UkileWlqanpzutdhTFCKF2u/PAAw/OTE/duHEDlteCsAsg3NFoNDMz0263ESIcx+3u7pZKpWarHgVhKnX4O995MQoClmU7nfbM1HQURQgTVZUbjdpw2I8o1jTN9z0g/W3bjOJAURTTHCX7y8M933ddV5KkQiFHwphlsSAKsiIKgsCyDMOIkiSJEo8QyeVy6XQKqkGGRaIoRmGCKGNbriQqnhsUCoU4jkVBxhgH/t5ugl53IIkKy7ICL0H5BH0XxwkwkwFLOJrN5nA4hLmHQqGgaUYURZ1OR1LkdDaTxHRjazMMo51afX5+EZACmWOXl88Konjx4sV77r77U3/yx49+8MOdTocQkkqlbt26kUrrQeA///zz5+4+I8ui67rlUlVVVde1l5ZWEEKrt+qg8gWwHthUkFLMzMyAswHGOIoiMIPVjD2eSdf1dDqtKAqoT/HeykoPIQRN5h6CGLqpTJphmGazGUZJThCCIGi1Wmh/QgAAkCgOQMsFgQcVH3RuwP3AvNJEVUcphSlE2+ahHICUA1VkGIaO58IgIsGIZVkY2en3+yk9tbCwkMvlYCEHIKIw5k/2bSu+q9sGIBXvr1Iib3CO2Ss4Dyi86WuB0El4TNLg5D488oTln/wsVLMHgxBsoPABpdtr4vAAjQgPwnFcRBJCCIMxyzIUoyiKoigRRRE0fnCgdLtdSZBJnICkUJKklaXlmdnpb33rW51OZ2lpaTDoqYpUKhc4jknpGV2Xo8iI4xijxPO8x594NW2kaMIcPXboiSeeYBHO57P1Wl0UuG63LbCcKIqtVst1XVlP+763vb0JA3u8wI5GI9s2NU2jlMRxjBmaJCLLYlEU8/n8sDPSdBlaeYxpkrCSJBkprVKpdDqtyRuoaVoURcVCmVIKkkXbtqF6BHAYYABA7W/evLmzswNTGlCb7fXP+709GGfBABFM/c7MzDAMF8ex6w42NjZs266Up6ampk6dumt9c1NVdVlV0qnst598Yjy2Tp46dfny5aNHjz765h8YDvscxzSbbccZO45z/MRRSollVUvFShB6rmsLAg+UPUJ7py2QCjAMAQWqqqqrq6sYY4C1QBzDcdzCwsKVa9dBKANsIUJoz7MvSUCmDzpnVVVBN8LQvYEEhBjgMwBukSSJECS6bhQlDKCGLIJtaoDQwk9BEwHCVLCTDIIAEBfY1SNwDEheoygCHkIUxXQ6zZljWMFdKJcopbVaLQzDdDrdaDTS6TQEpO/74Get63q/3wfIKooiDmJvgpccTEEH895Bag4dWOv7xqw1yX7wU/H+GsOD7eLr8uTrfnbyLAx+fcjBC9kT1hAKAFKCKORIAgMFGMOg0PbWFou5w8srkiA2m03X9iqlciqd2dnZev/7fiiKoueeey6TyRSyGd/3eQFHQXj06Eq/3z+0vPTKK69wPBPFYRSxsiy5flCtiMdPHBkO++Vitlar6caMZfLjcT+K/SRCmCGKKooSJ6qqZVkJiUgYa5rGcVBoJAyDOZ6P7CCOIsMwJEnwPG88HqqaBAwSLJyK41jVRMPQUil9OOxLkjQej8MwIoSCMaksa47jVKszzWZnenp6a2urUqlYlitJqmk6sqyVSqVMJh9FkesGk7FXGFcfDsdwYQEWWigUkphOT0/funWrWCyGYQyCzJSRKeRLXuC32+2NjQ3X88rl6k5tt15rwkVpjseGob/88nd+8Affe/XlF+fn5zudVqvdmJqumKZpmuMTJ05Y1ng0Hhw5cmRra8tx3HJpCiaGAckAJg3ADCBICoWCoihwbiKEANj0fb9arcLFDSPae6OVUZROpwHlt20b1JSyLKfT6SR0tre3GYYTRNEwjChKgiDQVJ1ihDGOknSSJAzD7YnpRc5znAl9B04zEBuCIMCAL8wlA8zOcZwVeCBPg/9yXVdV1ampqVQm3Wg09mYgHafRaMmyCB65vu8DNi7LMsxeAJ4EhLnv+1xyYICQ7pN19A1q7O8ZhAcjCh/w6p5Up5Oget134v11vwelp1AYJ2EM/wsxt/dEdE9qAzKcPYkcgdM9Yfg9/yiQvQPLAnyApuhhGEZBmMvleFZot9t2y3vzm99crU5/7WtfiaNk4dBcp9MKI19TZKqI6YweeDbLUZZLWBbxCRYQxjh55KFzzWZzZrby9a989dixo9VqKZdNGbrU6/Wyad20RoQks3NVz/P8gJJYmqqUu91uIZ/p9/vzc7OKIq2treVLZUUSx+OxoasIoQFNKCGKIsqyrKqwi5fyPKMosixLu7u7lmXBpQZiJkIoy2KW4eOIiCl5FJs8Jwq8xHOiJCo8JzbqrWajvbKywrGCqujj8dixPVg6e+AUQ5IkZTKZq1evchzHsQIA+js7O7Kslstlc2wXSsVjx45du3ZtY2PzuRdeyGazsqz2er2nnnrq6NGjkqr0+31FlvO5nCxLS8sLhJBcPkNotLCwcOnSJZCh1uu7iqKwDE8JZhkxmykSgmDPJEQyNMOQzWCeExhtz/OGw+FDDz2UzWaffPLJytQ0HKmGYciybJrm9vb2aDQ66GEBVQBsNfYdFCc08t29/MYSjDFJiB8GlFJJEBiOgyGjJIp5nhdUFUga4NtgYA0OL7gBfIgxDoLAcRxEYphLBE1yQikMyHd63dnZWcdxVtfXhsNhNptOpVK2bS9OzeD9AUUgfvr9Pmg5AZjxfZ+baD4nMQOB8T1T4sHK8/uFKFSVk4RG91esvS5c0f44xYTtwG/Ie9+dzKB7gc3iPRRr0hdO2MiEkCQhFKMJY5lOpWRRkQXRGps8zzOIQQgVCiVFUR577DHbtqcq5bW1VUkWpirldqdVrU4FnqPpUrvdOHpkpd/v+76YYtVut5vP5wSB73Wap8+cdBzn1Mmj4/G4WMrSJNB1PYrdMAhkiQ98p1wpt9tt3VD8QJmdnRZFvlIpMQwSRb5SqcRJ2G4LkiTB+E82m0UkkCReEBmEEGYEhmEUVeJ53vP2WiNJkhiGY1m+WCwLgtBsdCnFo5GZTmeHw3GpVEkSks8X4zhOp7OQ7rrdPlzf5XIVPl4Yb6V0z7oSnDI2NjYWF5Z7vR7IhqrV6TiOR+ZYVtXx2NQ0/dTp09vb24IgbW1tzc7PHTl6iGGYbqtdKJeazfr995+nhJw5c/prX/saw+BcLscwjKIohw8fZhjGcbzp6dlOpyeKsqrqpmmXS9O1Wg3IGzhWgIGwLGs8HmuaVqvVZmdnT506BaPYkLFBZAs6FZiEgDxJCIFhImDhEELAuCaRDxew5wWZPEmn05lMptftA6tBKRWQhDFGiDAM4hisp9PAHwIpALOCwBTE+z6A7D79RgjJpFKUUhiCyefziGHa7fba2lpv0IcNNrBSBQTiPM+7vg98NScIcRyPTNPzPEGSEEIJpQQhijEHsMpE0gk9A7yIN8bM98VLDiRJdKDr20to+3cmPwhp8I2Pf7A0nXw/hONeikbfPQWYPbUHm1AaRRFFCGNGEAVBEHhOgEYIIZTJZGzTGo1GumrMzc1Vlme/9a3HZVm87577nn7mSde13v9D7+1229lsWpVFa9xPZ/TQ5yqVih/YLEdZOR1G7mjcO3H02Pb29r333n3l1Uuarlj2iGeYSqWMGSqJfCGf5TjOMLRyOR+GLsa0VCoUi3lVlSVJ9H0/k02xHHa9EM5soD0qlYrrdDmOYzBLKWVZDGq7hETFYjGKYkIIy/IIJXFEctkCxth1dwDq4nnBth2O4z3PSxLS7w8qlYphpDDGpmmBlnVqatrzbADPgMICkg0hDFMOMEoCLwk6GdcPJEVpdTqAkfh+yLJ8r9crlktHjhwBsqGYyzeN1Kg/uHLp4nz1raoqE0Isy+p223Dx9fv9fD7veb7juCzLBkEYBCEh1Bw77U57coGBJAtS1uLiIkKoWCwuLCyIothut0EDDM2FIAhQ+4FZ4OzsLOQrqF0RQmEYAlgyHI1Gls3zPGIZvG+8AMkKwsz3XSZkQt/HlAocN1kxAHwDNJzgAQNT/JNrG65hoAEhg8my3On1oP3TDH1nZ0fTtHQuKwgCkIRHjhyxBiPQS8DqX3hMMDqZpBAO7atV6D5bckDl/QbS77Xhhw6AMcxrNwpOMuf3S55wpOF9/hDt94fca2vXvRimrzkC9jId2ntJe6o6llUUFSAsBnOtVqtYKCiSCv4uiiRlMpmpqalv/P++8cADDxSL+e985zuzs7OVcmFjY2Pl0KI6Erq9JkJUlZWlhflWuxEEXhgGkhBpspLPZAWBO3r08LWrry4uLtTr9WIur6hSJpMZD0eGoR09egQUxmPTK+QyGONUKkWTiGdxEgUpXc0cO04pHfb6uqJOV6c4zLiuOzc9U28G8BHEEdk/nmLfT/K5IsaM4zj5fD4M4uFwGIbhcDhmMOe4Vqqc6XUHqqrWa02e54fDYafT1VQjiqJ8Pn/k8DHbtm/evNnvDcPIgcwAOw/hpIvjGGY7RqORoiitVmtxcVHTDN/3VUPN54sXLlyYm5sDQzRZlkF5t7a2Zprm/Pz8hVe+c/jwYUWVdnd3vvjFLwJvBnlDUZSdnZ0oinTduHLlSqVScWyPYZhSqXRndT2XKxQKBbiIofWChlDX9atXry4tLaXT6dXVVcuyNE0D2QogosVicWK+JkkSKJ+AexwOh81m0/M8QRCKxaIsy8Bk5rJ5gRfbrU4cJbIsFwoFsCYALCQMfUIIwyAoNwghE5UYnA6T9ZtJksDlBiprkFhDyMApwHFcOp12fU9RFIwxWGal0+lqtVoqlUgQEUJAb2AYRqFQGA6HMMQMyY9Syk2OyQlGAo3j5KKn+1bce4Xfa123mQMzfnsXUxwn390hwUymePEBZzS6b6t6MG73ytc4+u4/J3KcPWkfwhShfQAJMyzDMK7rKLqm63oUx1EU27ADIEyAWZZleWNjo9FovOdd7zq8cuTXfuVXT993zrIsjGmlUomjwLZtTdM827Ftu1QoDoZ9MP81x8NqpUII2W1bMJ7PMAxGKJVKpVIpEsWGYaiKxPN83bY1TYvCUJYkeKNqtVqlUikUChMVMshlgiDIZrOAts3MzDSbTdd1YcXsVHXGdd0giDiO6/UGmXQuiiKGYaMoMU2TJKjX67VanSRJ7r/vIXCaAhwCBuRv3759+vRpkDgPBoNcLifL8tTUFAwKViqVTCYzGo1qtcbhw4cFQbBtR5blbDbbbLRXV1dlWR4MBmEYp1IpNa15gZ8vFlieI2EwHo/7w0Gz3QIkc3F+IZPJnDpxkud5nuUW5uZfefn5EydOFAqFo0eP3rhxa2Fh4cUXX+Q4vt/vq4rOsQLLhjzPO443Gpm6nuJ4zvM8sCmo1Wqj0ejkyZOrq6scx2WzWfDC6Ha7hw4dAsxjt94AaX4URZZlgTdxv98HZrzf7zMMA6PS4M3F8lwqnQmCIEpi1/cUTQ2ikCAqy7JtWqqs8Cw3GAyWFheKxeJwONxY34aJwU6nw/N8pVIZDAYXL16emqpM+jIIEKgNAUEFNQtCSPI8mF3mRQFaTdt0QUQOczPrd+40m81isZjJZUHR1h8OEkpEWZqoPrmD2QwfgENfUw0eiJZJz8rsD0BABTXZWU8P+HbT15KKBzPkpFJ9Y2pFb7jBccWyLMdyUDZMXmShUPCj0HGcOEkkSVY0led5kiCMsSLLt27dknjh53/+523T/PVf//UHHnjAJYHneRy7t2zD9VyUxEkU6JpsWU6r2S7kM7MzM3R62rLGV69e5dVCFEW+68VhlMlkioXy3PRsxkj3e90wjEVRLuTyoW6AvVoURYIgwTaLCxculMtlx3EGg8GhQ4d0PdXpdFRVJwSZpk0ptW3XspyxOc5kcq7rc5yAMR+GYcrIQMWYzeYHg4FtueVymWX5MPSKxeLNmzeTJIFuCopA0IuEYWgYBvhwN5tNjuPAz+Lc4jmQXGezWUlSJjsFwPEJlD2gaEWIGY1GyRjZtr2wsLC5uanreqvVAvkYgzBYg3qeQ2mSzRZUVW42m7lcYXt7d3n5UBTFjuPs7tbS6Qyg9o7t9ft9WK6CELPfX6GJ0hI6uunp6UajEYbh1tYWyI9g4pFSCm9ppVLpdDrdbrdarYK4FOb34VxDCGUymYkquN3tybKKMTscDsHzBiHU6/V6ne7c/OxgMJBluVDMAz+0ubm5vLwMCieQlUHufeSRhzY3N+Eih1IWFqIwDAM6GFCTMwxjuy6UpnrKmHAhuVwO7PwAHYUxRXD6msxIgGo3n8/ncjluEn7Qp33PGDh4m5BOUEBCKsf7HOAkSU6+n933U5mgNXRftobeANVM7h98BLTvZcjsewTvFbqIYow9z4spYRiGZxiouVVV1VSD5/lbN28KnPhPf/pn4jD6+7//+2w6JwiCbbvmeBiFviQJlJLxYOj7rudr1ljgBZZnBUJw4Cc8z8YBiQOipSWRlyml3e4wDGOGop3txmDYy6YztmVxDKdpKUqJ53kkIiInekHY7/Y0RU2lUlEQirwgavLudm1+nuu2e7OzsyzmXNdlGCbyiTVyZSUl8MpwMK5WqzyHBv1xJpOJIrK9vc1xQqPewhjD0btnGToaRFEEck2QhmUyGZZldV1nGGaCrQPiBxUUDK1PGGRZlg0jxTBMr9cDYTpwGHFMNjc3WUXp9TqZfK7RbmU8z3MDeDrXdiilYehHURQFwXg4TKKIxFGpVL506VK/PyiVSo7jYsTxPI8RyzJ8KiVsbGyUSiWMcRQFkiRgTEejMcMwpmnW63Xo2ZrNJhR4pVJpdnaW4zjwdMrn88vLy5/7/Bfife8vKJ7B9wmsQ8DWudvtQo3tOI5p26qqIoZBDMdxgqqqQPQnUTwxblMlmedZz7ZK+Vyr0wfBAAwWg0g1k8koigLPC9tjwZ8OITTodSbkPqgOgNXA+6N/iGVABrSxsbG5uZlNpYH/oPuegFCntFotCCWWZV8ThAfz1esgTbRvYMFz3CTXTUpQdMAl8WAjCzf6P2DidjDe0GvzMEII0dc4ke6/Howxdl2XMhhOQUKoF/imafpeSAi59957Txw7+cILLziWffTo0bXV9U6no2ZVOCw4jsOEgg+ayEuj0bBULGSz+SiK+r2RpkgYceXSlEcYSVQ5jrFMW+JlhmGiKMGE4znZc/oMclVVRYgEbsiyQqlU3tzdIoTAmFy73YVf//btO/l8sVFv5bKFOI6HAzOTyWQy2Xy+kMqqg8HA9yPPCxmGCQMShbRYKK/d2RgNzSiKGIbrdHqwGqHRaFiWPR6Pg9Cr1Wrz8/OixJcrRYRQGIZh5AehJysiZqhuqJIkwUQvAOJhGEbRuFqtEkIYhh0Oh51ORxIVnucBAlFV3XVdFmNF0ba2dliW7w0HYG3GZtjl5eUkiSRJEnneskzf9xkGHzl0uFbvZDPFSxev/vAPL4uCyvPieDwEWoXn+f6gm82lM9lUq9USJd6yx45jr6ys3L59u9FonDx5stPpbGxsgGfk1tYWz/OKorTbbZD4yLJcKpVgTl9V1e985zuqqq6srHQ6HYQQqDTBkBKQSVEUp2bkKAhBM81xnOcFgFjKorRb20mn083arq7rqqpeuHDhrW95S73ZS6VSYDkHF61lWRsbG6COoJTClFyyv1MMIgda0Em5QSkdDofgUMzwHBAwMPWyubVlmibQZjzi/SicjHqGSdxot/qjITeJwEmCoq81R5uUlBBRYRRNIu3gOG+0//XJ33vTum+wEj4Ipb4xJv/vwt6zSY4syw5877lWoUVGSiQSKABV1VVd09NLLm2bazPkrPiD/BHD5bddcse4NNoOh5zp7mlZ1SUgMpEytHTt/sR+OBGOAFDDDSuDZWVGRkRG+H333nPPOXc/CN/9qwheIeabeGqqbemzSZ6hF7VtRzcNzrng6uDg4J//83/+y7//1fDhoVlvoM6ZzWbd4y5jxDZ02zAJVZZl6Yw26i3LsnRNJ0pKwT0/6HX7URTFUS5yahquZWiyJLwkjJE4TAnRNM1k1JCCFgUXRZkkBaNmLWgdH/NHjx5PJpOrq+t6vZ5lxXg8bjbaumbmeTGbzYVQk8lM181Wq31wcDhfzuKocJ36/d1E07RGo8W5VIoeHh4LIY6OTgzDAIXK85yrqzf1egtHMmMM6lIooeGf32w2MXCL4xiiHsNgtVpts9lYlvX27Q0E7JRS1Mm1QAZBkKYpAI96vb7Oi5OTY2wLFsJYhZtWvWEYhu/7eZ5mSbIpcimlKPl6uaJErpbp4/Onv/nNb8bj6fHx6dXVFdzzNE1LsxhOH4PBwdu3V4PBACYmEKGT3QJ2qIGOj4+vr6+TJAEzBjSM+XzeaLUfHh4wi68k7eAw4I8qyxLsE6T0lO/Sg2CEcGVZwDjCMJRSMrUt35I4ztLYMLQnT57AcBH5EP0nak62076ynQKBMea7NgJV7pw7gafEaYJfVIwiVtG04wiQUlbkUFgfDQYD8GTey4T7RebHubEKwn0Xtv3f+qCrpDsmhNpx1vaxzSrsP45D+v5MsvoXVx6yrr5zy0FYQnISxXEYRo7n9vv9Rr31+PHjf/Nv/s2nzz87PDz8w+9+H3ie67rddieNQkJIQmlZlrqhpXFSFMVisXIdC6cmEYQojSgtS0sldVNjZVbkiRyPZkRpusYUViApZtu+bdtE8LwsiNKklFlaHh+fYjEQISwMY8t0BgdHZcmjKLYsRwhVCxpxlMZR6tjlaDj7/uUPrVaj2+2+/OFS07TTk0ebzebm+r7d7sAKDRNexojjOPP5XAje63WbzaYQvCwLTWNhuJFS9vu9+Xzuug5jrNfr2rbtOPZ6vXry5DF2DD158kQIAYn98fGJ7/sAEjAGwKW5O+Otbrf33Xffdtrty8vX56dnjNFer2dZxu319fX12267c3xyuF6vb25u5jPe7w9OTx/94fff/OxnX/23//bfnj9/7jgWYyRO4DIqazXfsoz+QVupFqPmarWCdeJwOCyKAk0jIFPLsvI8H4/HWZY9e/as1Wq9vbl1HOfNmzeEkE6nszWeCYJmswl8FXJN1NW6rk+u7nr9jlJqOp1qlLXbbctyVqtVFG5++tOfLmfT09NTQ9cXi9mLFy/u7+/9em8+ny8WC0AvWP8MXQj6QDSEpmnuUhFFAoR0Tu2szDzPw7Fo2BbCcjQa3d7eVgbTkASCZ88YgzjT87xGo6Hvx8+7Cfs/XTSCZYfeD8cS6l1AumqP5oZgo7scu59OqzS7X9Zug20X+WrP/JeSvXHFDqqiCudlZjo23g4otaAF+du//dtPPvlkuVzOihJ/6tOnTzerdVFm5e7mB15ZltiedXgwKIqiWasnSZqlPE3KKIosU5eShWGqlCKEtdtd27TSLO73+2UhNWZoSouStOTCMKyiKDarMGjaV2/emqbZ7/am07ltWZ129/r6erMKPcdVgrSbHSXI7e1tHMab1WYynjYbrShMlWSlFLPZSimBq9DzHSF2K98MA7vchsNhq9XIssT3XSnlyckJpRQ80vv7W0qbZZl3u+2yLB3Hur6+evz4ERwA+v0+pm1gY+FqgAsBxuWaZkgpTdvCQR5F0dHh4cXFhW3blKnr6+ter7MlfNgO53yz2QzvH5LEXi7Ck5Ozv/u7v/v888891+ech2FRlnkYrYXgjJEwDJlGlFKHhwe2VXv9+jVUrW/evDFNs9PpYKgwn89RtmFXdq/XS9O01+udnJxcXl5Op9NWq4W1MHme53kOOAevBNt+arXa+cXjdrOVZnEcx1mSxnEsynKzWTm2VZblYrHgnJcajaKo2+1OxxNBLEoppgtkJ2rHEBLmVJjjQxcuhHDtLdcCjHZML9M0pRqD3jJo1G3bBpYLD3iYdHDOwVwDGIudGWDzvpcJ9/NVFRsffIFNRtVN29ud9EHNSd6HW/eDs+oVq8Rb/W7FTkD/VwUhbnSnvzQMQ5QcHMI0y8IwtB3ns88+O310dnd39+tf/eYv/uIvfv+7362Xm4tH55+9+DRcr7///vuzk1PHqYEuZJi67/tUESysB3WD5wUWGGqaFkcR8f3u4HATroqicBzP8wJR8ulkXhbi5PhQSpXnBerAVssjkuZ5OZnMfN+fTqd5noPIcnV1ZVmOZakgqK9WK6yCieOk2+3puu55dcb08XhaqzWKInvz5k2v15OSfPPNNxdPzsuylPKdA93x8bHrugcHB5hJ6rr+/PlzrEa5vr5eLBZPnz4FYDOdTrEUAcWP4zhJkgCTbDQaSIme5/V7A0rpdDrFBKLX6zlCWI4dh5HjOIZp/tVf/a9v37zWdDqdTk1TZ4w26w0gq2Ve9Hq9V6+Ws9ms1WoJIWaz2ePHj2/vbsJwbRiakKUQXNO08WQYx/H9/W273by/exNFEezlcRwsl0s4AhNC0MgBb4uiaLlcekGNMYadSrPZrN1uHx8fv3r1qpJrSilvbm5Wq5Vpmp7nbfL8YTSUXKAlBnmlXg/yLP3222+zOCry3HEs0zSzJD05OXl9PcRKXXhbVTJiLC+B3wKYt3BIytMY7DmkH3CPpJS+7zmOQymFpBs1s+d5wJ/Vzj20Cpkvv/wS7HnMxt91aHJPOaHtrF/IXn9ICNGZodSWUbYtO3dw5XbSKGHGxpQgQknNMqiUGmF0B5wKriSVmkbVbljJyFY6KKUUrrZT0BNFKN3iMqRRb6Bf94M604wwSiiljWYrz/M0589ffP7ll18+PDz8H3/971zb+d/+6n/57W9/65l297zlOPZ8MSWEtPpNachws4zjWCmla2o2Lcqy9AO71a7VW36UrheLhWmazKbrdGV7tuEbV5d/fPr0aRzzZt3UWS5IuViMGOOue7FaLe/v723bef78+cPDg1JqvV63eMM0vSgaep53eXl3dHS0WsetljUaTwGpfffDt57nFTwTqrRt+/io57mWFM7V1RUMLxazOS9Eu9m7fTvq9/tJnNu27Tudg+6jxWLRbvcuL6+fPn2KtHZ3N2SM9fuH63V0dmbf3Nw/fvx4s9nUavy3v/3D8fHxbLmazBdCiG+++/6TTz755rtvYZAzW85N01yGC8/zjh8dwVC03W7HcUgpdxpmr+WEy4fbt1YUhY7jXDx5dHV1tV5tbNt+9OSJ67qvLq/DMHZ9pWjy8vUfWm3vT9/+/he/+MXvfv+Pz58/H4/HR72Tu7u70XD5ySeftJvp/e3o4pyEm2Q6XazXa9j1Usvkko4m86wUpaLdwVFeluXdwzrNv7982263R6MJEO/1OrQsqyzFmzdXpmmHYbjZRJSOKaVK0SCoC6Fub+91x9hsNv1+P/Dt0WgUOL0g8K6vr03dKMuy1WrpjOGSzrIsSkulFMbu6OKAZAJHLcsSNpYIIaQBzlSeRkqn6yyOeU4ZpaZm6vYmDpM85ZwfHR2dnp6A+zadTmku83XChOJJbhOjzMrrHy4PDg5kUtrE0HSS57t12eTH5gQfNH67MpXu32H/nj/6nYqlUZEBKNm616gde1vt5UmAUVUyZFujDHX59u2Tx48ty7q5eksphb0vFOtffvmlZZr/9b/+1zAMP/nkE8eyv//++6IovJqDAaa7VwP4jgllDQabOC8x5WOMtVotVCB43xeLheOam3C5XC6Xy3m9Hriu22rVgsBbrqYlzylTSRqORsP7+zuUhZqp+v2+41pZniyWM9PSh6P7+WIahiFEVZZlFWXmerYiIssTSrTZbBLHcRiuKSW2bZZlrpRcLueu6/q+m+epEKVp6kkSTSajg6PPANyNx+N6vf6P//iPYRjCNRTGnoyxy8vLp0+f5nn+u9/97nmRYRlLvV5XShmGgcMb5RDZ8Y3QH87n8263PRqNgKm22+16vZmm+Xg8tiyr2+32ewfoIWezGWrgNMpRa8VxfH5+/vLly1qthkwSBEG1GhEjk8Vi4bouMg+KQMO2CCFQ/SA9sp2zPdwrw+Wq2+2apnl4eLhcLi8vL4+PjweDwc3NzXq9Bq4LA2w8Jnow6BVN00zTdDabZVlmBSZEEtbOVRATiG63ixeAtIlhia7rMDKEZAmFKH693qmz3VZpuqN5Sil/9rOfgVyOp4OXXKPRuPruDcIHbBsYGsHTiDEGexS9ipaqINwvSvf7wyqE9uOtusn3FUnVYwrYtwn5LpIJoZAj7TCb6oGUUlyW+79eVbjnZ2dXV1dCqfPjk1qtBu/08/PzTqcjpZxMJnB5qSp4WAzAuZkxhrUQx8fHpsYty1osFmBvwrgFvwVcDsYzSqlms9lqtYKamSQJ1STR+GI94arBVbEKZ6XIKaVZEa1W64KnsI0pimI0Kut13zBYUaRFkWZZnOdJHG9M06zVvDRNXdcSgjcagRAFjIxm8yEhxLRovV4/GLSn0ylT6vCou1qtuEgfX5wQQkajUZaHp2eD+/v7zWZDCAHxGrpYuBhiYZBt27e3t48ePcLU+9WrV5RSz/Ngb6OUSpKkXq8/e/YMC70wtQPgvlqt0jQGCbPRaOm6Ph6PHceBveejs3MU7cPhcD6fT6fTfr9fFJnnOUKo8/OzJ08e397etloNzstGo0cIcV3bsixCpGUZvu8mSeR5XczWtr5eGiOE5Hnu+h5MFl3fxyGoadpyubR1A+NQWGmt1+ujoyNYVKxWK1zxIOUBGsGkAaS8drvt2U69Xn/8+PF0PCl2y94RdSgvqWYAnYI/Nb7Wdf3s7Ay8cKxwYrBKSZKUp+CyAqGsxtevX7+GpxMGQnhti8XipHuEPITjHtQfpdRsNsMzGoah70fgx3G4D5kgzBhl++H3cQB/8MU2j4t3kw9V9Yq7e7L9lS/vr0yjuy0U0+m0Xq/DNwkd/OHh4cnJyWq1evPmTZ5lEKRtNpssSWG0yvMCHycMPxqNxsnJyXxyC9QLD4tGAkqTOI6hcENx32w2B4MB0xOp8v5BS5EiDOMoXpY8SRJeFInruiVP02xj2azSE+Z5GidrqQrT0jrdRrMVcNHGWOX45GCxWHielyRJr9fNsiyMlorkRRnV63XXa3a73VarESdLxhg4K5QK22GMMdthQqaO6y1WOWbBwN/wlq5Wq3q9jvXu0L+u12sp5enpKZeyLEvTsGzLWS5WvhfMpvPNOvz000/hHJMkSZpkRFGN6VEYF4WBOQekepPJ5Pnz50KI1XJNKR2Px+PxGKo/4HuiFEDCBoNBkiT9fh9eZsA20AXN53POOcAYvMIkScD1ofo2a5Vl2e/3cTICe4N70tnpWRRFeDQImpCmcGrgsgSVbLvvqeaCUApaQhJGSZKcnZ2B2qbrumUYSikEYZIkab71pEYFhLGBrus4sJCN8VCoqjRbp5QJIZUq1dagUIciCnvXwGjVdQPzWCiw8RTYeAFhJPI20ux7mbCqS6sESN8PCSkl0Bu2J3v/OIA/eMx9wKa6A2OM7IwS93kwTN8ybLZSQ7VNsMBgNE1Lk7Tdbl9cXFBKsfAM8HRF5VFKodw6PDhgjI3HY16Wp6eng8FgOp2uFgs8IBjAsFfFuGk0GsVxjIXGOLHSNFVsxUXWbDal7JRlzhhtNuuAxfzA0w1NN2i71XEcq1YL4jjudpumqXNeeJ5Tq/m2bSslkGSCwCuKzHFsTaOdTivLsjgOLVtvtmr1es0wjFrNMy2t3WlYluX59ld/9sVoNBqN7gkhjWZjuVz+/g+/se02qizTNL/++mus4xwOh0+fPp3NZm/fvr25ucHsC06HXhBg1TYGVo8ePfrDH/6wWCwAmuOUxMS52WzCQlfTtMVisVwu2+22bTmtZns8mjx79mwwGIBvBT2ulHKz2Xi2h9HC69ev8zw/Pz9XSnmeB94sGipgUfgaK4crv6Mt0E1pWZYgi8+XSxyOIMegigNfdDgcttvtbfRSCu5Yde1xzouiCFo1dMsYr2dpgimIbVqoM/chD1CoMUureJTQJSIU8VPbtuFsr5RahEs8EXKgZVnYWzidTkFbw0GD+lYpxbnwfb9izMPzW0p5fHxciX3fy4T7QfJB5FR9HaGyiqL9+3xwf/JBq/kRGQ5VKHl/aKGUEuTHVcWHh4fYtvX5i0+Pj483mw1WDiA9WqYJel4URUQqePvDgpYQ0mq1sCBtOp06pgKUXNHhweXnnNdqNSyvAniIQBVSosdwXAvFm67rQVCHSQznknNeMTmVUhcXF2AJ4ngmhOCJMMXCB2NZFhoSIQSeER85qql+v49xWb1ex3Cl+kRN0xRCgKHW6XTm8/njx4+BEyLMTNMcjUZYKA9e1WQyAf3ScRwEAwbfOPtxt16vB27XYDBAZwHk/dNPP6WUWpaFMx5ygVartVwukW9RvIVh+NOf/vS7777D4pdarYZJAKxx8GfiAgUiCoC92WzWajWqa8gGs9EckzcEuRACDhebzQbTlNlsBufSJEnw+VJKTdPMdzfM/W9ubur1OiEE4KSp6ajgHh4eoGNQQqCeRPAouiUMgDNAKcXRgLJos9lAYITGD68QoGhlgYEmCFQHQggeDW1qGIY1J5CUlFJkZZHzUhClG7pGaVrkcZqsozAMw3fAzH4m/CAg94NQEbE/fvg4YD6I4S13lLxTD6IF3C90q6dWO8UGqSaNO73SYrGAL60oym+++Qaek7AAQtmDiiWKIkPTYcuNhanHx8eB78NUE4d91TpiC5W2cx8PgoAQAhCGUroFaXSfc6mIrul2s91RSsVRKiRpd/pKqTRNNd0yLVdRRjVmux5jpmVpppmnaWrb1LIs163lec6YyTmxbR/igCQpGGN5LvK87Pf7i8XCNLdiv9PTWpJky+VS102laL8/WC6XWVYwpn/55VfTafTkyZPZbGYYxvn5Od5e7AbGZjz0eGhxDw8PuZTNZvPZs2fwAr++vp7P5+jKHMdZrVaoJEGb7Pf7cN02TVsIZRjWZrOZTGau67969QOuKgx4TNPs9jrT6TQJ46Dm+YH76WfPm83mN998o+n05vbedV2mkZLnuHaxRACnTFmWkC/5vh8mMfyUCCFXV1e+7wul0By+ffu21Wppiui6PhwOUbngOMOYju3tBcMoQtf1QpXgciRJslwuRVFCxGybFmaA6I/AKbVtO0qWbLeTE2HJGIPSBd0mwkzX9SrxIH/iCEBhqZQCI3S1WgF6QXWGF4w3DY8AHm+73R4Oh6iHi6L4kUz4cQTuJzelZJW4Pw6/DxAdpRRjKC/fEXEQhFJuTdLUBzf6jiVHCNG07QP+7Ks/Wy6Xb9++zeIExxgaCciu8yy7vb3dYdDaeDxGjY7VnCBegoaLfLXVzgqBK0DTNDjSwlqr1Wo9fvxY07TxeNzsOESVZSE1Znfa9bIsw/V9EpetphvHaRQWnHON2UlcBkHQbPjz2cZ1XaKMJN74HqnXAkN31+v1bLq2rcxxHKIM32smSeI6ruc2RsNpq9lN4tzQ7Tzj08miXmuNx+PZbHZ0eOrY2mw2u357V6vVNGZqzIThClySWq3WYrHA5Yj1w0VRnJ2dIc5x1pSlmM3GpmnHcex5wcXFU8fxxHYdHU2SjDHmOF6aput1WK83oyiJ49T3/STJlKKTyeyHH77DzobJZAJZY1mWrXbz4OBgs9lQSUDv/OKLL/Da8PIAuSGn9ft9IMO2bTOqAzuxLAv1Ht7/zz///PLy0nEc1/dxuk0mS8/zbN2AAzpQqLu7u0ajUZYlRAwY1kFXAeMMpW9Nn5BRDcdFCapRBpp7tNkgaHEZAHRBAsSyKogqdF3HX4qhPN+thxBcSUEo0XRdd2zPtlyNGVKoq5trrAPYNorMYFSnRENKL4oC+g+U8Shizd2KoXdBWMGbdNf1iT0DGBTulFKNvaOtbaNmj3Czn8QwlEzLnBBC1XtJdRvAO6iG7bFkdEOXUqJvVkrV/aDf77fbbRALkyTB0ILthFRSyvl8zssSLMqyLCUVkIRW6mTPdcFXZIyVZY6AhLESYO5vv/325OQEJGBs+cCFIoQQXCtynme5bdt5JotCdNoDwzDms8i27U570G53b29va0FnuVw2Gg3XcjRmM1qulvHhwImjQggRbjJe0rdX9+122zCMX//q951O5+TE63YOpzM5n22WiyiOijRNn1y8GA1Ho9GMEPLyh6sgCGazZZGrOCps2758c3t4eIhF8P1+v9FoYCvQxcXFs2fPvv/++1//+tfPnz/HdoSiKLAQt9/vYy53fX2NYtgwDCz6Mgzj5z//uRDij3/8IwyzdW24XC4p0TrtXriJ4zg+P7+Yz6etVgs7QDVNcz1HKQU8NlxtcBS+fv0aBx/nHP/2er3b21u0bZizTyaTPCsNw3j69Cnn/Pz8fPTLCVI3fDHKsuRSQkF/eNidTqd1z4/jmBDiOM5kMmm1Wu12+/7+/urqCppDuMWhiXh4eKi168irqGtEUTYajTiO18sVrnv0csvlEvW8ZVmz2ezRo0eYsoBJc3d3B12Y4zhIjABslFLAC+hOdYTeEjLI2Wzm+z5aCcTeer0mUjqOYzl2vdkwbYtqrNVux2mim0YYRxBkvQNm6Pvyv/3MRveat/3v/P/efjTB4sYYo3tVKNlR4fI4xRHVbrdrtZptmEIIbGvYWg9YdqVjlDunRrXTEFNKGWWU0na77ZgW6Py8LDGcNQwjixdgFcODAH0a8LrFYhEEASp7bE3Tdd20g+U64ZwbZlByuVqlrusdHB6G8ds0l0kSpbmcTNeEMM9vtdr9IkmTuIijnChdcKqkzLI8CjPBqeB0uQgPDw+LXNqWz0timZ6u2etVrDFrPlt1Oh3TcDvtAyW1JEke7ieWtXZd17EDJZXgVNdsYBJRFAFuAevi9evXp6eneZ5jbzF26y6Xy3q9LpTGGOt2u8PhsNFojUaTbrcbx/Mvv/zKtu3lcvny5et2u/3s2YskSf7tv/13n336xWq11jSdMe3+/l7XzE6nXREdTVOXUgrJlVKo08x2K4oiz3OlFPf3d6j0iiKXUobh5uCgH8dxrRas16s4juI4yjPOOUfSE0JUBvtAvwkh+KNAswQU1+l00GHGcRyGoRDC87zxeHx3dwfTGtiWojFDV4IMWRSFLDkOdMwe1+v14eGhUmoymaCLZroJ5o1t29fX1wBIa7Xa27dv8fJwVFW+UpbjGYal66XjuLVaw7bdOI5Xq81u/sEcZzvZLwqe52XgOWznsIoFYd1uF4U9qOdKKX2/qvwgwOieFp7u1vcQ9d6yiA+KUrWnCZb/XXUi0FHyUTmKjwRWJaZpllmOJVKmbpimqWmaZ291A8BFP0jgVbmLzAxqvLbL5HDIwk/BVkP3WA1kUaUAQEfyTC5vUNAzuuGch2FclnIymZWF4rwMN6kULE1z27Y5F0lSEM7RNYFyAXMNFCGcczwUcuxkMjFNM8/L9Xr9+eef397eP3nyyXodNptNIdTRkXN5eSmEME1b100URZgQ4l3FNhLMpkejUavVsm370aNHzWYTmm7UbOPpEo0QCj+M2nVdn81mlmV99tlnl5eX8/n89va21WodHx9DamxZDiFsPJ4MBgPX8RljlGq1Ws333SzLsjxVSrmuyxhxTAfjnzRNHx4eut2u3FlCZFmGRYX4mKAh7rT7Ukq0r1j/hFeFfSFFUYxGI4QN2vJ6vQ5vC4gScFifnJzc3d3NZrPNJnJd29/dgiC4G99jsy/+ZCK2rtbMMOH1cnhwgF1RWIxr2w6AFjDgwOFWSoFWRXb7nrePRohm2bVaDa8EPtG2bR8eHoL1ho9YKVXJaNnOJ0pKCZ+OWq0GgTI460mS6B8P2fcrzP0acssRVXsE638i46k90OWfCkIpJdkT4LMdmch2HcBTIA3FmxCzJkyBGGOWYeJNwbB1axb+EYaU57lkHNcuNYyqfA03S0xE8GahQcewFbDNdDqFcQiQxkLQZrPJmHZ9fS2EAFY5mUwM3cLHgJmkaZoYnTUDF0CRrmtRtAEYqJRwXUeI0rIMSlWjUQvDNT7mxXI6GAzCMHzx4sWf/vSnn/zkJ69evTo6OrJtu9fr4XLMsqzRaKDvD8MQi9rhHo8KEAEA6g98hNBH0d3Wx+FwqGkalntiDkEp/earxkMAADSASURBVPbbb3u9nq7r2P4nhDBN03ebgquyEIwxw7DKsry7u8uz0vMUUk1lDgRLVUIIIBagWbiIISMACo2cjKGfYRhxHPu+f3p6aprmw8MD/Lan06lQcr1e76jkGorGWq2GdYVYxgpd8u3tbZ7n/X5/OBw2GjUEKmBYlAl5nmPeyxjT6VaOFIahpmmY3MxmM6DNRVH4tQasa6o+DdaGMIACUKx2mnK8EsMw2u32zggrA0gDO2PEv9qtBDVNk0vpmqZNiGYYhDEu5WK1enN1BdnkaDJJ01QXe4a/+4BnlVcQb/LHVLn731E78LOKq+33K/bnu197LwgZY4puxU34Oa48/D1ESDi3F1m+fVW7lTI40fdfzPZJ5a64JXR7xey0+ZqmlVmEAMNprZRC3QJvvCAIqnkx7lOkWV5meZ5PZ1NKqW7qpSjjOGZMI4QkcUY1ommaYTUKnq43YZ7MMQSr1WrrzYwQQhn1fLPdbkiVe54XRotG07u9vcXBX5TJJly0O/XFcuG4xmw+0nSVpJtNuNhqi7I4juPFkjVUw3GNakaHTw2RVqvVxuNxJagROxfJPM8p0aQgUhDLNAO/XgsalmkrSV3PO+gf+l5tPJr6vn90eJKm6Tdff/uXf/G/C6Fub681TcPIYTi8DwK/KNws02zbrDyEgIjEcdjptHSdFUXW73dd1767u8MvYl5qmvr19egnP/lsNBo5jvVwPwU8OJ/PdV1fzFcnJyfdbrcUHFc/AIhqhIOFVpAgdzoddFzAxgkhGN8DyNlsNug+it3ePsaYVFtHIvwvIhkcNOhuAZkCjwGpqHLmrdAKXGk4u4VQaRqZpt1stlutDhakPTw81Ot1xnRCSkqZELwsha4rQhikmBiTwoe7LMuHhwesfMOcZhuEVbxV4cT2RL2oZHYX+4/vpt/vJ98LTvYu8PaDUO2USlW4boEfTQMAAxDJdk2EJQZxZLdRAHMFBKHaU/orpQACJUliMG07jTAMvttz2mvXtZ1BCIp+PHu73ea7ZWb4SPAeFWRSlLFlm59+9pQxbbPZxPFSERrUPF7KzWY5md4LIYQ8KnnGNJFkK8aYwRzN4KvN3Pd923FsWzNtZVhSM/h4PD48PCxF1Gg0TG74tQPGWJpt8iI6GPTRzcZxLJUgvBSCO65OqDmZ3sfJ6uTkZBMmhmFgLAZeVXVq4DQpiqLT6eBszrKMUl0IcX5+nqYpBsSGYQyHQ9u2sTsByvrZbOa6brPZvLx8m2XJ3d2DrrN6/VNoZ7vdTp7nnBeUKlyLZZmj1OecY/EtmjdcbUiMKOxRJ8NkLc9zKTSMT0H7xDr7R48eXV2/xbJUdGVoDWAJ43nexcUFsMRqhn57ezuZTNBcoDaGFBD7xfTdSm2MAaB+BrCEq//u7g7jB+zcBtiTpulkMoFfKJrSqnfA0YNhMnx6wG7FcwGIIoSgGMHpCdS60ajXajUQlbG1Al3Sy5cvqwnHNggRcvsZbD8mxc7QjlJKiUb+u64w1f9uUyi+uReE+Bq9m/roBmSJEILhtZQSosl2s7UFYNS7cWL1CuW+kzdlIHYJul23yPb+wHLnbwcYwNyt3QOYAbwL7y/OV2ZI29Db7eZgMBBCkHtRcsziqes6RennebFabUoe2w7z/EBTOt5PzzNn88KymWlRRYo4WVk2o4xruiS0bLZ837fxgp8/f/6f//N//pf/8l/+8pe//PLLL1erlWVrhDLPD8A7qTfcOFlF8ZKyAV75er0uyxKzKYyq0QOj5Ts+Pp7P5xhgWLYLClhV5KMoaLfbUsp//Md/rK57gGGj4Qy1XByn6OK63a7neUkaxXGm63qr1fA8LwwxjOAw8Ib+ixCS5zmWH4LFBgVdp9NBLcoYs+1cKfXdd9+9efOGc76Jt4UJnAKbzWaSZdh81mw25/O5pkiSJOijgAVEUfT27dvpdLrZhK1WixACpeVisRiNRjiesCt3vV6DtsY5bzWalNIKjcNvlWW5DuNarYYzGgw1jDH1nXB8H/ZTSimxbXaw4RSHHQgMEP6CIAr0wfM8QjggVvw6WpVyZ0OBzKxXhc3+nKBq0sjOkxsVCKbn5CPg9Edve0nuRzLhvlRK7Q1I5tMF6ml8ohkXwGmASaDKxxtEd/IrKWWl36/4N47jUKn274M/AQvu8OtorNEb1Ov1wWAALwkMADRNm0wmrmcFQWDb1mw+zrOCMdXtNoVQNzc3R0cn9UaglGKaajYb0MK4ZolBGQa8lqUxJrMsk9IAaH542HNdp1ZzkZkJYZwXf/EX//Pd3d2f/dlP/+Zv/uYv//IvX79+zTk/PT0VopxOJ91ut9GobTabNI2VssE+JYQ0m816vQ6EBmoA27b7/f7FxcUPP/zguu7bt29X6ywIgtVq5fs+xt/oyhBa8G5DtKB/xu7hZrN+d3cHpOH4+BDvHg5y0zQJ9bIswTdrtRr6Xsdx0EFgoHd2dvbNN9+AV+R53v39PYg+yJ/T6bRKC69evbJtu+Blu90+ODiYzudSykajAQu5Ikkx4J5Opxi1qZ2boOPYqGgw+gcFDNyAer0OQs9iOkN1KoTANP/t27fw9oVZVl5wQgj0n+v1utForFYrSikip6q6wcw2DCMtJQ7xwWDg+37VgsIAju/cij3POzg46PV6b9++3jKuHAf0LFBE0I1rMOemTN8mJUIxusN/QpIkTZBVFWFCEqUIZQwe2Eh2iKh35evu+5RSyii6JiJKQray3G1kEkIIKbiglFKNUko5kVwWSihCCAAYHNhVrGLMQLeuNu97CmNvzM4IhFIqiVJKKkqURhljSmNSZ8TUNSwJlaZhGGmqhMg0jRal0jTt+OTM8xw/cBljtmtacyPLEqF4t9+kVLeo7ZuB23Axk8RGsW6jX3frGByfDk5hoPTmzRu33YuiqEjWqzBpt09gsdVstDRNc23nzZs3FxcXUkqDabyM7h8eXvz0RZbzMkqTlM/mm6dPf6LrtfPzz/7jf/x/wg0pCm6a9eWCHB6e5+nEd08vr/84HN3863/9r//2//27NI0ajWCzWf7iF//Tcrn84x+/Nk0TC7fvbh9OT0//2f/wP/6f/+H/3oQrz/Nsx2x3mo5jnz8+e3h4MC19Mh09f/HJr3/9a8Mw6o3g/uHW853lctpoNOJ0ZNp5p+fEqa1I1u32nFj7Z//sZ5eXl9gV4Thev98PguCHb38Yj8effvrp7e3tfD4/PDy8vRk9efIkz6Tr1A/6x8PhkChD1xzfa87nc8t1er1eccUn81m73QZi2Wq1yrIM15vlfNFvd7KT0ySKV7P5oNu7ebh3a8EqCm3fe/32CkdzmqYHwcHJ+SPLcx3X1W3r8uZ6PB4vNuvBwdHt7W1ZiE6753u1rOCbKHn69GmSps1WW1BW7/byPJe6YXj+aac7HA7vRsOiKBaLxTrcSKJQPaJu1zSNKaJTZmq6bZimac7ml5Zech4nIfXsrpCyzLK6b1MpeMCKgjiObts+ISQJRw/5QuVespK6ro2miyjeaBrlqSBcdZsN13MYI0WRvdcTvldGSokyd39KQfesCj+oQquEtn9n+j49bf8L9f7aw+pHbO9lVJFeJerqR9XL0N7fpbGfVPefC4+gaRr2EKLWNQwLXU0cxwcHvSxPtN2ChIeHO6VUu90WYrso7u7ubj6fQ/NWlmWn0wEHEt2CZVmDweCzzz67vrtJkkxK4vs1TdOIkEopypjreLZtHwyODMNar9fKskzHffLs+Xq9Pjw8pjRnjCVxBl+J29vbp0+f+l4tiqKi4CjI0yxJ0hin+Hw+Pzo6Wq1Wd3d3jx8/uby8fPLkyaNHjzqdznfffXd/N9R1/c2bN2i3lssl1Iz4q0Evhgk0pq/41IBG9Pt9xhg8MxeLBdjtjx49+v3vf39/f48ar3o3Hj9+3Gg0xuPxaDQCmFmVu0hNQDLhVaHrOp4OboKMsYeHh88//xyvStd1rOZ9eHgYj8foxDabjb9bz0gprdoEYNHghQkh4FUD8ucf//jHR48eWZb1y1/+siiKVqslpfz6668/+eQTYDlgaGRZNhgMgA/h+9AlwmijXq+LkmNVliy3lA+UVM+ffwrIlBCi63oYhvi4PS+YTGY3N/dKkV6vDfg0TXNGnCjagO/JebFahWVZ1uv14XAY1Hxd17NsN6L4IAj3o6jCIZGIYLf6QfjtR291xbM9Pf5+MOzfuQqe/X/fv9uHs0dF2f5zsY/CuGoOqzgXuxvnnCtdiIKQjBBiGBqjmiACnI8sywxTw5wRHGsppRRMSbpZR1eX15PJxHXdTqfjed6/+Bf/YjgcrtdroqTGWBylD/ejPM9zWaRZTil1Pd82rSzLiqIwNM2wLKFUvd5I0nQ8mTqWbVnW+fl5PIlXyw1wAkpps9mO4zjLcl3XhVO6rmvbUtd1368pJQ4OequX96enp9PJHIzhly9fnp2dL5fLP/zhD0dHxy9fvuy0e+v1er1eP3nyZDwed/oH6NmwTQXgId5hXddh0wIUfgcJepvNptc7sCzn6uracZwwjO/uHrrd/tu3b4+OjuI4ffTocbO5hi0NlZxS+vDwMBgMANIWRfHw8IA+EMxJQJf9fr9er89Xy++++84wjPF4fHZ2tl6vj4+PUaweHR1JKV+/fg34VCkVRdFqter1erAtw+tHfdTr9Q4ODprNJoYEUkoQ2SmlQGtA3MMEC1u1q/oZzGFMRDabDcgxsLqRnKNotM2tHxyVquLicc4nb4bYn4GDAycyCs56vX50JBhj/X4fMtfNZjMdrymlYRhiS2xR5IoIz3POz88bjZrtWGka/4ioFze526akdtgjmlT2vgXbxwHw0Y8+TIDk/Zt6fyz5rmrdaymVUrB1opRKKqsuWe75cXxwHLCdpxuyN/AeTdOoCeZkXpalaeoVD/jhYRQEHqFyNBoLwXXdlFKORqNwU2Bv3nK5Ngzr5OTs/Pzcdd2XL19vNhuxE3THcRqG8WKx6J0c5aVSShoZL/LtkVkL7LuHyXqxDIIA1vS+KylNO92s0+nd3Nx8/fWfTk5OTNM0jEgIifYDJ3FRFIZhFEUShuvZbEIUOzk+u7y8nM1mL168mE7nd3d3R0dHWZaDqXz+6KLb7f72t79N0/T+/j5KM9u2W60WRqCAfB3HQT5J0xRYa61Wa7VarVaLUrVcLh3HGY/Hy+Xy8ePHaZoOh8Pj42NCCFw8oC2CfIxoOhzHB4MB8Eyc1MvlUt85iwF1NAyj2+3qun59fY3dwEdHR9DBgGwNdBpMSwCBuq4zZmiaBlkZ0hSG8tXyerClwQ2IoqjX6wFah8B/Pp8LIcDGVruJOXjVUBULIaDPIITYtq1RyjmPokhYHF9QqTRNA6mDEOK6frPZhABFCJEkWZrmUZTM50vf9weDI9xZKVoUvCi4rrNer6frdDIZapp2eHRAKc2yBFemrhmu6+v7AAluCDMUDFUaqbLKvmyPvF927sdnFRhVEqPve+xXd64eYYsg7U3w9x9N7qAXqlPMdvCeit0S0g8OBcQGngVBiKkDNzLUP4QQXTctyxSSa5pByNZHvCiKsixw6SyXy+kkAiul0Wg1m83T0zPLcjab6OXL14PB4PBwEMfxZDLhXPq+X683ozgNoyRN09lsgbQTBIEQ6ocfXt7d3MKpodtum7ZTFMU6jOxSZ9So1xu+HyRJslqtCSFgSyF5Z1mqaSxNk5JnRZli8fJgMBiPx0Ko4+Pjm5ubo6Njzvnf//3ff/GTn37//feDweD4+Pj+fnh0dPL9q5cHB/jgs/l8jkEiIWQ43J7osPfu9XrdbrfRaDw83KVpqpRChQnC12azubq6ajQat7e39XodyYoxNp/PfcdttVpwFgMh3nEcVBa46bp+cnICet319bXv+ycnJ9Pp9Oc///nbt2+/+uqrLMsODg4WiwVKfaVUURTT6RSzkEfnj2BAOhwOkyRtNhvNZrPb7YLqgLEH4BDkydlsBgtM1JbQvy8Wi263C6kkCn78yVBOr1YrcN+FEI1aDWgnL8qiKKIo0inDUhqcXJodBLWaECKPIiEkZRrT9KLkQeCbli0ViTbhYrkC9SrLMqr09WaRZrFuMMex2+22bZs4elB6UEp1bc/djOyVhXBWpJSCvVrFD/Lh/hVP90xE2UfOa1s54ftuolXcVk/HdoRvuWd8Wv06Ie/hsZj+KchYsIN893RVGYwivsrkqCiEELnU4NeANQCGoeV5zjSq6yzPM0II3prJZILTsdnuQPOCqaui7Pb+4f7+/umz52DrJlluWLbjeYZlx2k2WazBUUziGEms3SwFJ3kp80JEcdZ1g3qzk+e5UGw6X91ev/7iiy++/OJnQpR5NiSEuJ6NIieOI8uykiRqt5uaVnMcCz4rX3/99RdffGFZzuXlZa1Wq9Xqs9ns9vaWl9KyrCzL8rwkhJydndVqtQspCCGgYqH9gw/feDxGQQtaAiGk8sYsigLJQSn1m9/8BoM+KSWiC1wT4Jyz2Sx0Xdtz11F4dXONqToMrW3bprqW87Ioinqr6fieJR0cbZiqwTYC6Q5L2tBjdzod5BmkO13X4YVBCHFdB+a8GNxjBcV6vQagHQRBEATD4ZDueELAq1Eu4joHQwOnDxIsShV8Z71eY1hvGIbpGtvnpQRTRxBuJsNxHKdJkqxWK7iSel7gecHx8TGkLZpWSkmKgqdpmiSJZZh3dzemafZ6HU3ThCgtK8DT4TopiuKdoHj/WkfGQwrGT6uwqQZ0VetVpcHqd8lusKGUAk/74zjcj1X6/o3tbcWgO1gVH4xS22ExBoDV3aqCGeHH9gh71Y/gXxA49VazU6v7ux0MinNuMD3LCiEEY0RImec559uRzOnpaTWuhJsgLtYwDK+vr4uiqLT5YRje3t4mQuV5HkUJVvMFvgtvON+v9QaHtm03m23dsC6vrh3HSdJ8NlvEcRoE9eUyMQyrLPPAr0u5bDYbm81G11mWMdPSizIryiyMllGUcC6//fZ7vJM3N7dHR0cvX74scn5+fj6bzZ49e6Hr+tdff/306dOrq6s/+/mfAx2BIhmwBJrA2WzW7XZXqxVAdlTX9Xq93x8sl2vX9U3TnkwmcZzatlur1ZbLtW27nEuMCjabTavV0XWGQR/Ga47jAFdUSsF/5P7+Ho1co9EYDAb/8A//ALwEHSbAHlhoA2vB+I4xBifbyiQSgQ1b/jiOoYTG1QvcH4c4YwysQ9ScOHcajQZqXcijq9k69ltRStEWWpbFiwKUzlajicISlA8ACmma2pbrezVG9fUqLHJu6BI4X5HzshCM6rblCh4nWcZLaZnOQb/NeXF8fHx2djadTgkhvV5PKQU+g6YZWZa96wk/uGHgCDCtovBUmGTV++3DoWxvTZrYmawR8iNL7ck/AdLsp+L9tIkEhTtru2lEdf+9BnI7VK2IDghCsnNcZ4wFfuC6LlFss9kkSaKUYIz4vp9mSbfbNgxtPp/ned5sNoWorVZL23azLNtsInhDYQh7eHg8nc7AgW63u6ZpL5frh4eH+/uh3WpKJYnGYOkHVDCJYi4EnJdMx4aVxtHRkWmaF4+fZGk+HA7DMHRdO0kylCio5TzPUUToul6W+WazEqKMo/Lp06f/6T/9p6Ojo08+efbLX/5yMBgsF+snT55Mp1Pbds/OzoQQnU7v/n6ICQqiCxplAIkAaWDVA55KRUi6ubmBI3273ca2pru7O0JIlmWwloIrJFLrkydP1utlURR4h5GmhsMhSsTz83NMvdFcgVuLqMiy7MWLF8jb8N3BpY8PFIxcYCeW51awGWaA+m6zJ5b+Yh0qIgRJDyU3RrWYeEGSbxgGki02i0HQAONguLA6jsMcB50LWFmu61q6gak1MOosy+qtpuU6buCXZWk6NgwN0iKnlJqOLSnJ16tC8Have3p6moSLI3L4/MWzWq0Gyr5tm8PhOIqiNM23xAD2Tyh0q7lq9Zcg8KrSFLfq4q5iUu7WZf9oq7Zf+lYISjV8V7t9GBVBZ7++rQbxamc3UCVkRF1VGFdKQmNHWEPZY5omY3rFzzYMo1Zr+r5r26ams7u7u9lsYpo63EQsy3r06HyxXEKryjRNRzI1zZLz6WxWlmXJ+ZvLS1wBmqa1O52UKGAMvXYHezl1XYcIUEnZ7XYtyxoNR0dHRzjLeZE+PDycnB6jPoEUkGnEcWy4d2n6tlPFNFzXbaXUJ598outGkiTPnj0zdGswGPR6vWZzuw5+NJr4vo+MPR6PNU07Pj7mnB8eHqLRHY/HFxcXEA21Wq0gCO7u7hhjZ2dnYRh+++23X3311a9+9Stwu9FxgVr5i1/84r/8l/+CGq/dbj99+vQffvX3P7x6KYQ4OBzc3t4u16v+4GA4HD5+/Hi+XHz7/XcwtJdE3dzccCkqwAxLCBuNxtdffw1COUrlKIrg6YLrYbVaYWZQXWOO4ziO0+12R6MR0jigF+BMkF/gbrDrLori4OAABHfDMDDJABseFF/M8S3LWi6XkvPqaux0OrZtx5sQ8NXBwYHneS+vr4HxgIQAnhC2hYL2be6WTzw8PHiel4QL7DyE3x+OOayHkZLgk9Wrsm0fayG7RZxqbz1TFXXVpU/3tE4fcFCrUlDX31lC7d8qygvbGT0h0ioLtu2P6DYIcfjt/6LaOW1VQV7FIdmtcEPxjKkreuU0Te3dqleiWJIknBeGoUFU1u93fd9XSiyXy9l8slgsmOGnWbwFZhWXigpZckGYRhzDUkqlaQJALwgCz3eSTagRZdmWptGiyJSUghdECccGPT8URV4UmWVZjmMVRZGXSiohhLAsQ9cZITLPc6m4aRqEENe1a0EDiqeiKG3btq26aVr1egMDZSWpruvn5+dxnMKJdDKZIUv4vg/8EHDFeDx2XReNVr/fv7+/f/LkCdIFpdR1Xfg49XuDNMk77V6R80a9dXf7UBYCUn3H9r75+ttGvRVFUeDXKdHubh8ANn7//feAW3BSwEgbgQSSDQR7nHOdaVtfiSjK8/zy8tLzvDzPgyDYLzvhkorsJ3d0PHNnKC6lHA6HcGEDlQekeSklDPhQtgghcH/gLmzPnQxtBQorPEKe55vNRgkBRBqyj8ViUfeDFy9edDqd6XT6/fffp1JCuQJADj4jjuM8f/4cPCdMPtCkbDYbKqRju2EYhpvIHwRxHE8ms1arZZqmECUyjY46hO7tY8INLWyVFfczpNqDOvfjs4rJ/TaSvg+T7j8O28mXqiJWSkn2EiBjTGdbK0Q0gWoPVsXHY+jvrdPY/wJ1PNktkMHfqBlarVYLgiDP09lsRpk6PDzsdvtRtGk2m7quj8fD29trSPXare5wugR+QCkFXoLXg4sGY1/MiMCnSXgOL1NdI4LnmqbZlm5bupIlkazI4jTeaJT5Xt2xDcHzdrtZlnkUbRgjnHPTNKXihmE4jpskcRjGyOqr5cZxnIP+sWW5o9EI3iT1WhOoBucSAzHOpWVZ4/H45PjUMu0oipIixmu+uLiADljTtE8//fSHH36QUqLAhr5OKfXzn//8P/z7vwmC4Pr6+uTkZDabHRwcYFMSfnE4HGLzKex97+/vj04PwjBEhun3+5jZ4AM9ODjAe4L3DR+H57iAQFEn393dffHFF1jHjSUTuHzb7Taq0DLcgBzLGAPfAL9br9fR5UZRBLkTKjKgKZD8wamNcw6QtkIT0MVBSwEjRry8oigYIYwxz/NmsxmMZ0VRXl9fj0YjUAKn0ZrLMk4jRZVuaqUo1uGKUvry9Q+LxeLg4IBznpf505MnMKFsB+2rq2vP8zwvuLt7aLVaoAdjc6iUklLyDkKsqj7y0U3uqW/3I6qKq/1Wbf/R9mLwwyCs/pftyTXkznimyrraLgirmlnuiarU+zPG/adA74H7VHItTdMcxyWE5Hme5yWl1DQsnIWmaUqhcpEzxgaDI0pVHMej0aikWiFKYAA5L1Spcl6UZen7fpqlWZlLqryaDxXMcrM6OOxiKJfGCXzB6nXfcxwlcs/yAPB4ttPtNDRN40XCGNE0WpSJpmlZlqVZjEQNvB4qONM04zilVMuygjEjiiLH9uq1JhSx0+ncNNNut0uplqbpxcXFv/+//sPp6enDwwNyICEExAAU+dPpFPzm2WzW6XRev36dpulXX3318uXLn/3sZ9hDdnt7OxgMLi8vUaRh9LxYLD7//HNM4WazGei1myhah2G7210ul5999tnvfve7TqeTJEkpxJNaTQjBpVyu10VRCKUYY8vlErAKOrfDw0O4GIK9SfaafDB70Inx3RK4CqTAnQG9oItG1gUj6uTkBCxZpRREjNVcChAdzvTqmK44DLphQFjo2g6K4SLNMJxEVeV4VpIkcRzqus55IUSZprEQYrXa9Pvds7OT9XpNCGm3m2VZClESwqIo0TTDcTzDsGzb9TwvjmNwoTWN6jgP9svRqiLVdosH99OO2iN279/UbnXZfqzuEto7ff0HcYi7VUNIPAXbC6r9+Kx4HkwjVeZUO6Lcx48ssfltZyXEOQdIIIRYrdaaxjzPA2oSx/E334xqtdrZ2Umr3aSULhaz7Uei63GWCQGwDuXi9lxaLGa+73e77aIopORZlpimfnDQcx3DdUzDYJEs8jQqsrjI4jmlWZa16g1d1xkRTFN5FqORKHgphPB9H0pzsEySJLm9vYVSybIspYjjuFlaXl3eCCHKsgRhoCiKxWIVx3G3203THOif4BISHpRYEEys1+vf//73Uspnz55Np9M//elP8GI7PT29urqSUvZ6vT/84Q9//dd/Xa83wjAMgtpyuQqC2suXr0zTyrK80+k+PDw0Gs3FYkEpOzgYcC7a7c5o/sAYg6k7shN8lnADalJuV6b4EONiv5Jt22/evHn69Onr16/xV4MPTSlF8GCwCcEbgAnozvjOhx+5AT0hCmBkNnz6i8VivV6juEWWQ39eYaoIPLzCrRs3pYhMPN3Wlp9S2J/iTHS1gItSCJEX2Wq1WiwXSik/8A1T/+LLnxiGMRoPXdedziaz2SwIgvls9eyTT29urocP45/97GeT6ehv/uY/npwcpWlKqaKU6oam890u4v3g2W8FP7iy99PX/lhivw/c7y1xl4/jtoq6Kly3ma36YrcTuzoXEW8aofs6pqps3u8VKaUVQRQHKt2hpgCpHcfRNLbZRFJyTaemaaNdnE75cDicTEaaprVajU6ns7m/1TRq21ajUUdvqfa8DxDe1ahNCFHka1GmjHDLoLXAwTdFWWpUGCY1NGqZZuDbVAleprqmJGGe55imaVkGIbamab7vr9cb7EKIo1RoSgrSaraB7DHGCSFY/cMYm80WSZJMp/Plcn1/f1+vNW5v7uI4/u1vf1u5qkkpMXBP0xTerZBKdLvdy8tLmI7e3t4eHBy8evXq6PC0Xq/f3d1Bjlh13d1uN4qi4XAI1OHTTz/F2gnginCnHw6Hg8FAKeV5HkibWFmFwg+DuLrnNxqNXq8npYQrB8brKEaAl8IgGPQxw7ExpvM8D9TfsixBJ8IRg5as3W4TQvCeYFwppbQsCzxVDAP3QT6xkwgi9tATmaZJdzjCcrmEV28SRuCyttvto6OjSMZZlsAhBqsXESxxXGRZsl7nWZa0280sS1arxdHRIDC6lLI4zjgvZrM5ti9j3CpEKZXgvNCrUrOKjf2Scr/LYjsK2C66WDUZp7uWshoP0t0KKOxV2o9Dtev65O5W9Zn7ob6NaiU/yNJq74by9YOKVO3kF/tfsz32D3qDLEvX6zUhstvttlotXWdYcmJaxtOnTz3PWywW9/cPzVbdtHQwv+D9WhQFZQqe0GEY2natVqtRSqF/PRz0MBqp1zyn33EtG8+ra5rneWVZapShjISt7XSxwI4h09Q1TfM8Wq830zQ9OztjVC/ykWnaUsKOhwVBrdWqgRht23at1iCE4MxOkuTu7i5uJLBCiqKk0+mAbw0/MiBbr1696vf7x8fH4BALIU5PTzVN+/bbbx8/fnx6evr61dXJycloNOp2u5PJBLp1nDIQKDYajXq9Dihhs9ngcgfoP51Oge4sFgsArQB1N5sN+iVCyOvXr1ut1mq1siyr1Woxxo6Pjw3DuLq6QrQj9hD8mqYhSeD4wyxRSokDAkGodoRKCDJ0XUdaVkrBOA85E3g4alqMMQnZgpOgByL7Sc4p7BiZttls1ut1meUYGBZFMZ/P7Y5LGFWUaIberNWwgCQMwxeffSqEIIxePH3S6/UmkwnTtYKX3UH/V7/6VRyl9UZwfX3t+95PPv+y5HmjQUteFEUhRKmrvSZwP7lVViIfRKDck+3t94E4gcT71r2apknJP+4G9zPtByXlfqRJKSnZPqn2fhn8QVjul69qx06uCqFqyJnnucHMyWSilHJdeGyZnPO3b99KyZvNervdlkpMJhOUT48fPxaGBPcCcjgkZCjoGGNYU053JCld1y3LmM3WWKnVbjZZs2nbtqGzfr/HCF0ul5pGHceiSmlUBUEgKfU8z3EsZGNeSinJbDYbDA6VpIZhtVqtNM2jKJpN52XBV6sZDAsty1ou16vVqtlsUqoVRQEkxvO8yWTiuj6qvk6ng9IUVOl6vV6r1ZbL5U9+8pObm5uzs7PVaoXtOghXaO1fvHjBGPvzP//zt2/fws+bEALwSQhxcnIyHo/hcQZ+DMpLLFGB5ngymUDdZ9v2er0ej8fYREC46Ha7eK9OTk4g2AUbDgwkDKWra6bYbelCqKOVgk9hdZ6iV6w+esQ29O+YBoOOj6MEJylGi+ghAfYg0kRZSikNw1gvV2C9ebaDswAfcU3W6vX68fEh4C7Hsdrt5uHhwcXFxT/8wz/EcdhsNpUShEjXtYUor69vs6woioJR/fDwiBCZpqlusG63W5R5nqecFzoj78AVolS1kpIxRhQhUqndltzthIARxoihM01jlJKdA7LEGSaFYJQwDcinFFwwppSs0uzWk5BSvchzzrkopVKEMqoIQfwKUe7uLCmlbHcQGKYuJaRVOqWalIpSzbIMy9yipsjA1UvFdVNhsFXQztVCM5iu66lJJRdaqlFFiFKddotoWpJnBtN8r24aBo7M2+HIcWxDmb4VNBq1m5u3i8nE85xwOTdNkxIpi8IwDCaUo9u1Vi1M1GK5iSKv223rVmuxTDebKef8aCEoVWkad9ttL+Cz2SwMw4uL8/WsEJlp2y0iCSFEqnK1Xraa/X7v6OWb10meTRdzTdc55ylPf/PH3xz3HjebTV4aSZxYVhmGcRynus4opVmeUkrjWClF1+sw8Os31w/CzAkhPI5qtdomDLE4xTTN2XjyyflFFEUqLx3NiONsdvuA6mtYlFLKFy9eOKbVbjTLLBdCUKk0QmXJN8vV6P7h7Oxstlq/ffv24snjMisF5SUvDMls3aKCdJutgpc13725ueFKJkl8MOj+8PLber3++PTpfLECNfzw9Ozq6mqxCYlp6q4rDH2RxIs44qbBTDNK00WWbjZZu9VKBVmPp4zIZrfNebFYz4LA8wOPMb0sSyXKOAw5557j8FKu16HneZLQyWxONaZpmm7bm8WCJKlhWAYzsjxdTGdpnDBKbc2hnOlUI4LkccYY0zQ9S1JCiG2bcRzXW/X1ZmV5ZpSFTmAfN05N0zRNXZmiLAuRlkUeG6Y2vrqxOLOFxtf5fJktZ0uSsEW2SuwsTRbPPnlEKT0adF5fXWJJa5rxu7vher02bOs9G/wPUg39CDVVSmm69kHKIu/jqx/c5HvqPlkVjR/fk+4ATLKfgfe+L/eIcrhpO2M5+b4/AH1/Vdv+ayt4YRoGChK05kQqqsibN28Gg8HR4NC1bJD62Y45lWVZksRhtGaMRNFGKkEIwQ7XsizzvIiiaLMOy1JQSq/v5rA5gTGmKHPPc9vt9uvXr13X1nVGlZJSYCqdJFEey16v5/n+ZrOZzWZ5nhNGCSGz2YxSCvQlC0Nt56mz2Wy4KBaLhRClplEuSs9zOJeO43ieo+umFCrLCkIk0wjnfJ2sALe6rqvT7e4NSilXPNqEYRhmaco51y1TpyxNU6y2XywWIGSjkNN1/U9/+hMMPuDF5HkeiuowDFer1fPnz9EXzefz0WhUFNlP/+yrzWal6zolClgoHg0FC+d8Op0KotbrNYhEURTphYGGsBRC7LyYXMdJkiSM1qZptJt113XDsExTURQbnWmdTs+2bZ1qQqg4jrMsq9eabGe2r5Siu4uz1WrVg1qz3hBFORmP5/O5qRuNej3KC0IIVsphz4phGAAlgcNVHanc0ULyPF+vl3mRCsF1ndm2bVr67e1tkXPGGCGsLDlKMCHE/f09hkCe511eXo7Go/V6XavVNF0H0qPiSK8u2Y+jaL9Pq26apn8cXeTHVLy7R94vGj8Ecj74dUJI9fjboPqon9wHfiilSr3jzexHb1UY/+gNVSVThDFmaLppGKfHx41Gox7UiNgqXNHWu0wry5xSWpZFlke9Xq9/0PN9dz6fw1xos4miMM6yjHMZx3Gz2Tw9PY3j+Pr6Stf1i/OzIPDjOA58F4WoZRiEkHa73el0kiRq1dxGo0EYXa7KNIuLogBFcj6faqbhunaWJZtwBT69ptMo3mR5VJa5rjMuSl1nvu+GYShE6bq279eEQENueJ6nFA2oAyJ+u9EsyxJiBc/zDEODwQTwCeibZuMJDAthG0F3usosy+r1Okhb0CL88MMP6/X69PQ0Lyz4poEvJimZz+eWZaBN5ZxrpkHIdgkPBjloW2azWc7LrQu9xsqy5HIbeJQxfbevU0k9iqKSSwAn6HEcRzMMjRACyC0XinOJk/FwcFwIjh5V13XdNHCsO45T5sVisSBCSik9z9Moy7KMUJZlWavV8jwP86Fms1mWOfg0OGcJISAhQv+dJEkYrtMsJkRBDacbrCxLx/YsyypLAfQI12ezWU/TdDR66Pf76FaiaJNlie1aeZEWZSYJeaeikHvaPPpjZOvqKlfVhqb3udcfRCP5qHOrMuGPRu8O5Hx/pr/7AueKlFLf80fEUfcBZrOfqPcjEzfH8ZSQZSEIEZZu2Jbte55j2Y16Swo5ny+JkIwxx/ZM06REwwXturYQXEtou93EygF4eOMlwZU4z0shRK1Zm8/n0KQ/efLk5GiQpsnDw8PpydFms2o2641aLU0TsPLv729FTlar1SYKF4uFEMLzPKZpgM5BQFU7U0ac7ppmMEZqdQ8+vJZldLqtkueGodfrQbvV4Vyapm6atu8FRVFIs+k4DlOk3+8XRcHLMssypogeMEkUdPpJHNdqNThDj6fbZbpKKczicNmBsYCxPiZ7Sqmjo6PLqzcwaA2CoN1u2p776aefFkX26tUry9oaLidJjGbBdd3VbOuqzDnXiUKGzNJE13WhJCZD2m6Up2nachHZtu35jmkaZVnmeapptN/v27Ypyh28KYlSFNm+3W7PV0tQW7XdjVLqed48SZWQpqa7rlv3A0boerVq1htYCgScSQgB1CeOY6UEZBaKSJgjcs43m1jtRiZSCil5nudpxo+Ojjw3UErN50sEuev6pmkahgZyDzgPGlGw1apU/PhL33k9qT2zQ7mnJ9q/oNXeHELtaWc/ntTv56UPHqHSN+GLHccCheg/qf2tIrZ60h8Nsx99AdW/uq6XosAnVLXpQojb21vLsmpBUPcDx3E0uh0KB41GnuemaQjBhLQJYbPp/P7hDodFnpe6rteCOiFsPp9HUTKbzf70pz+ZpvlXf/WvPv/882izyrL0k08+MQ1ts1mBdbFabVf8LZfL5XRTlmWUxOBV27bNNA0Tv8V6ZVlWrVbbLj/NMkJIo1lHv+04FtOk73uNRhBFHmh3zVatKErOC00zdEPjgkZh6Fq2tUN316sVCDegpzRqddM0geADAe72DyGJwOZnOBTZtv369WssbPj+++8B8FiWNZ/PgyD46quv7u7uiqJYrVbpZMwYg3oY9bzjeXmelWUZx/FyudTY1qW3LEuLUThE5GWBTAjyJytLzJMQA4ZhGAw7DAspuWW5QRAQInWmGYbluq7BdIxJYQ8HcaOiW6ojHgrvj2PZiotRkuimddDrd9ptzXaw/oBSCm6NpmmrFbSgGSFktVoZpg7YJs/zohCQL2k6LcsiyxLOuSJC7NkimqZZrzVbrY5lWff3t6Zpnp+fg/h+d/0Af7rZbIZ0bQIWr/LeB6lp/ztV37VfuO5f3PvX/X4mrNo2pKvqV7YVKduOK8AoIoRIyX80m+E+lFJEDtkxCgR/xwncZ7Ttv7zq9VBK8zynioCJjbMt5lwJWfeDIAi63a5nO2VZlnmBzifw67oeoyINgsC2zSSJyrJUSt+y4DXD87yyFErRsiyHk9nTp08vLi5OTk5gvwXyVFnmURTd3ZVpHE8m4zzPYeFcUSsxSatgOtu26YZSSn3fN20Lf5RhGI3ANwxjE65KnmkaMQzNMLVa3WeMUaaUErpOLVsnhOg6sW29VA74N3e3t0EQVPF2dXWFckujjDEWJ8l8sciLoh7UFSHM0AVROS8Vo4Iow7YGx0eW60hKxEyto3ATR77vx1l6fDg4Pz9Hoo6izc3dLfDYVqcNnga86vI8x96IVs1HUk3TlGisMghkjIlSglEkd7Y3ZVlSaggh8lwqJT3Hct26bZu6ruV5qtFttlBU4SQFkTCKIs6547mWZUmiIO/gnM9mM40ynbIsy3rtTrvdXq9WScnBnmWMBUGA2nu3zaLcSrQMDZm/KAoiNSkl51wqUu78RXXD2mw2vJR0T91eOa+BhwBHvNF0QghZLBb4TMEl+JFNvep9+UKVx9ie8pB+dCM/dkPe2itK6Xs/2aKldL9sALxRvQz6PjIEEgww5eqVkL1JoNwZNLI9tu7+Lc9zyzAxscBRZJmmqRtYHhSGYbhaZ1lGFcGmWOygTrPYMLRefytTwN4VpEqN6egGcW1JKQ8PD1ut1u3tbRRFrm0qJVerlefaq9VyvSZ3NzeTydhxnMPDQzQJtm0pSgjBDpOMy7Isy3ojIBqBkGIdbhzHwqTLNs1ms0moKIrMD1yAPY5jWpYlpSKU27bjc1dKAtM+O/dbrdZ1koxGI2h2EBjQlUopDcuoNepCiIeHB875YhMppeB1DbMWMJIZY3d3d47jgJSH+rPZbN7d3ZmmuVgsIOFrNpuu615cXMyXC6UEpdSyLNu2MF7HS8LUIcuynJdZloVh6AU+AgaHI9/NnKWUGqOmaeIiIoRYlmUYuuCllNKyTEJIFEVMUV03cW0gfmzbBrM0ThMM2+bz+fX1tUZZs1a3TBO7D5bLZcpFxSVA9wiZhVKqLEs0GsCNQBAnAh6+GWVKKckYMQwDLw8jE8ZYWRZhGCpFXdetNwKpOBeFH7iWbfT73eVyKURpWb6UEhY2Otnr3HD8VHXmfrrbuyn60W0/sD6oJBnb92WqQrBafL/NYFVB+67Zw6Gye0AB26UdAKMq55vdC2B73HH5Y7todq+NUaoRwrIsL9JM13XP8VutNiEsjtMkSm3TdF3XsdADyDhPUBcZhmZbrlRcCGVbTpLGnHPBpdBFlmVRlOR5rjHj/Pz85ubm+vq63W7quj56uIvjSNd12zK63Xa3286SRNc1ZF2lxFTMCCEFL/Xd4vWCl2EYmqbZ7XZt28bmUwzWpJSM0UajLmUpZNnptCglJc8NUzs6HsRxbJmOZdlKqTTJGaO6rsWLzcHBQb1eL9LMcZwsTjCIOzo6wjkCSU4YhqPRKMuyVkeDDblSCqRwpRS0WoZhtFotCGHBWU/T9OjoCL5SUsp6PSiK4rvvvqvVfMKo41igXEdRiHIagcEYg+kLdPd5ntca9eoz0jQNkyV87kmcOo5j2UaSxGEYahr1PEdJURR5zQ9c18+yTJZC103GWDW+r1RskEowxsAp7Xd7zVo93GygQSnLkmk6ukHOueNaUso4jqXcajLozsZB7HbpaNVSRJ0ahg4DTugYqxCQUirJKaWe52V5hM2thBCsKpBSDgYD04YHQr5er/8/mJNilez1C2gAAAAASUVORK5CYII=\n", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "execution_count": 15 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1bEUwwzcVG8o" + }, + "source": [ + "You can also use the visualization API provided by MMClassification to show the inference result." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BcSNyvAWRx20", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 304 + }, + "outputId": "0d68077f-2ec8-4f3d-8aaa-18d4021ca77b" + }, + "source": [ + "from mmcls.core.visualization import imshow_infos\n", + "\n", + "filepath = 'data/cats_dogs_dataset/training_set/training_set/cats/cat.1.jpg'\n", + "\n", + "result = {\n", + " 'pred_class': results['pred_class'][0],\n", + " 'pred_label': results['pred_label'][0],\n", + " 'pred_score': results['pred_score'][0],\n", + "}\n", + "\n", + "img = imshow_infos(filepath, result)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAATMAAAEfCAYAAAAtNiETAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAFiQAABYkBbWid+gAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOy9d7Rk2VXm+TvmmjDPv8yXmZWmSt4hUDWSCiEE3QgBYnCSMMPMYgYaqUFIgxHQTTdDQ4uhEaIb0EA3vqGBxkiAJOQwLYMTKpUk5MtlVnr/TNhrjps/zr03IrMk/pq1ZvWsilqxXuWLeBFx7z3n29/+9rd3iBACj90euz12e+z2P/pN/n/9AR67PXZ77PbY7f+N22Ng9tjtsdtjt/9f3PTtv3jK5zw1AAgkIBBCA4LgBSDY3t6mrktGoxHzYkKapgyHfXywjCcTnIMkzwkhsLKywuXLl9na2sJai/ceIQQAw+GQ2WzG4cOHqeuauq6ZTCaE2gKglEJrTZIkSCnxHuq6BgRaaaTUOOeo65oQAmmakmUJxhiUlnhvqaoKay1CCJRS8YB1ipKaJMlIkz5aJyiVIESCICAD9PKUuq4xxpDlGUJKjKkJImCa1xNKIIQghEAIAaRASwVeIIMgTVPKsiRJsviZpMZah1KKtbU19vb2SZKE5zznHqy1fPjDHyYf9Lk22uf4Xac498gjiOD4J8+6m73rV9m/cYPx/h49pTh2aJudQ9tsrqwggUwr8A5nHVmvjweEEtR1yZOe9ESqqmQ02iNJE1ZXV9nd3eXChUvkWU6a5sznJVmax+scYHV1lVOnThFC4Oy5sxRlCQICUFQVw5UVhBTc+bi7OHT4MBcvXWJ/f5+qqqiLEhVACIEPgTTNyLIeztMcf0Ka9ZBSU9cWhMA6z2w6J0sznKnItOSJj388ZVlyMB7RHwzY39+ntoY0y5gVczY3N9k5eoTdvT12d3cZjUYIIbhj5ygJgkQLQnDoROO9Y3f3BlVVkmUZ3nu0jmvI+4A1Hmsdcd17pHQ4bxEBtNYoleCDpDYOJxRpb5U6SHYnBdfHEwoLMuvhUBjrUBKklEgpIQQIHojMIQSP9x7nLM4avPckUpGmKXmekmUZOkuZz+dUVYUxBmOrbp1LKanruK43NtaYTqd479ne3qKsirjPHIQQcD6+fqo0Wmu8NVRVybyYs7OzQz/LmUwmWFPR7/cRQlAUJYG4tqWUOOcYjUZMJnGvb2xscOjQIWazGd57QghYa5FSopTCWktRzBBCUNc1zjlWV1fZ3Nykqipu3rzJYDAgTVPW1tYQQjCfz7u9NJlMCDjW1gZoGXBmgtaOf/qC5zIcajbWemxvDLGuwNqaEBzP+6L/XXxGMGvBhkZKazW1VlprASXLMowtCSFQVRUBh/ceYzy9ocZay2AwAGAwGDCbzQCw1hJCQClFCIHpdMr6+jpHjx7loYcewriAcw7nXFwMzWew1mGMIdEpzjm8p7u4IQS899R13Z2U9jiEWIDO8jE+6t485qzFWolxFuMs0iqkjP8OxPdBClSI4OjjCyKlbDYw1M6Ck0zmMwb9eAxZluFFzWg8RiaayXxGr9djd3+Ppz/9GTz48MPsjvYRQrC1tUWepuzv3iBNE5IkodfrMRvH98zynOAjiK6urlIWM4b9AXt7e2yvriCkYjIdo5Si1+uRZSnT2Zj5fN6dzzzLkFKidUKWBYJfnLP5fN4tMGstSilqYwgEpBBMp1PW1tcoy5LhcMhdd96JkpLxaMRBVSFCQIkI6tPJhKKoGA5XcdYhUEgEzroGVBJ6WYIUgro25L2cYjLmwoUL3HHHHXjnuXHjBkqrLsDt7OxgnePc2XNkWUY5LzDGsLmxwXg8IhUCLQS9Xk6aJHgfmve0zEyFlArVG5AkGpkoaizeFpRljZCerK8IPuBDwBuD8gCKEMAHT1GWVEFQmQpjLcZBqCtsEHjnydIU71231mRzzoOLgbDXy6mNx7t21XkgAkNVV1y6eoWiiMckpSRJ47EnSYLWGojXynvfrWlrXQeg1tZkWUYis2Zv2C7wV1XNSn+ACIGimGFt3QBpznw+ZTQ6oDaOjY0NhsMhdV0jpWQ4HLK+vs7m5ma316qqIssy0jRtANp1RKQFsnZfGGMQQrCyssJ0Ou3WWgwqETTbfet9xBQSQaJTsqw5RhOfU9c1wTvirl0kl48Cs45pBEkI7ckSECJTs9aS5ymDwQDn4wE55xGyjWIRXLTWGGMYDocdc7LWkiQJVVVRVRWTyYT5fM7GxgZ33XUXo9GIgxu7lGXZMbkInhFMlVJLAOeBBeMC8N7FxdP8rgPmpWNrwau98IvXlBA8QQr4TGDXnvw06S4QgG8ugrOeIMEaixSS2pjI6Jwlz3N0mqASjXEW6x3D1RUSnfCp++/n+S94AXd//j/h3e99D8N+j93dPe7+vGdy/eo6ly9epJiM8dailUJ4T5okzQI2IAR1VTPcPhQXlHW4YOOGq+bs7u4SgmM8HuO9Y29vj8FgwPr6OrPZnKKYo1SCThOCD5RlyWQyYXV1lcFgQJbnZFnGdD6jKAq0lJGBlRXXr1xla2OTwUq8xkmSMOwPmE+nuIb9xKDlESIQL51HyICpasrK0O9LNjc3SdKUy1cug3NoLdnb36XXy7HOUJuKlJQABO9YGQxwwTMaHTCfz5gXc3pZRp6mjPf3cfGJESBwVFWJdYYkUdS1oywLvI8MKc8H6ESThx4gMK7EOYN1EWwVEiEUKkki+KGoHAgEWmuyLCM4gZMaEcBKByICZ7uZU61RUoLQIEAqgXISrxRSRuBVSnXrcmdnp2NDrgHAdl+2azxJIgjkeR6vh6m7bGQwGMQgphOstVRVPF6lFGmWsLa2xmw2IwTHYDAg0boLYFmWopP4eYwxOOdYWVlhMBiwurpKmqacPn2a+XyOcw6tNf1+vwOvNE2RMu8ISZtZtXt/OBwyGo3iZ67rbv+FsCAxgUBdW4IXpAONUqIjNMGBqS1SBbRSKLWAsM8KZu0mbQmNQHVgJkTWRYqqquJibVBWiITRZMLa2hoHBwcMh8PuZ1VV5HneHZz3nqqqmM/n3QU4IFL09iS07ADiBRTIJZa1YEQtO1u+LYPQAvA8XviGIvvmbzwE1x0HYgGEQUCQoqHeEq2S+JiMaZRCxtcSIKQEFdBJgnOOweoKdW1RaYINnizNSPM8nngZmeneaJ+DyRihFVIpSlMhxiOKIqYMBwcH1PMpwVisszhjqU3NIM+x1jGbxTRjOByysbHBZDYn7/c5tLPNlSuXuH79OpPJGKngcY+7i8lk0kXbeM4Caaqa4xbNNYTaVKwmqxw6tE2/32f/YJ8bN13cLP0e0/mU2qacPv0wOzs7zCeTZiP1qeZzqqpGa00vyzHGUhUlQch4PlSCQKBEwJoKpQRbmxuYquTalUv084wsUVy6fIGTJ09y8uRxTj9yhqKYk/dTLl44x9raGiePH+Pc+fNsbazhvefypYusrwzRSLwD6xxFUVDXJUoJhmtrWGu4efNmTOFqh7WB4XCFNEsQEoQJGFeAirJKQBCEQEiB1BqhEqRIkEFiE4dNcxIvsELhEFjrqOZzvBNxc1pLcCayKqnw3nNwcAAhIAgkOjLvJNEICUpJ1jc345rGdYE9rscYjCOLcZSlBwHW1bh5fA9rLYO81wS8gLU1tjYIH8gSTapjoE9TjRQJWim8NZiqRCLo93sc3rmD8XjM/v4+3nvW19cZDodYa9nf32dvb488z0mSBKVUl2VFopMjZUxr2wzMOdfgRCBvgqP3nrKMaX/7Oi0TNQ3LE0BlIEt1lLlczOysBR1CvCZiQWY+I5jdCmyRAbV5ZwSklCRJlqiljxtZRK2oqirSNKaD29vbjMdj0jS9JW1pqetsNuPg4KBDa2NMpJhAv99HKdWkrzHNjNpO+/lu/bxRW/OPSjOXj8cYA0GgVIwCQsSUleZYlaADxkU0XPx/e8KFXkRSKSVSLyJMaWsEkOiMXGlKU0NVUzvLaDJGa01d1VjnEErxkY99lBvXrjMrCuZ1ybHjx3nkkUcophM2NzcJK0Pm4zGjPYOrDNY6Njc30SLga8PK6gpZnrFzeIdDPrCyvs68mDGbTbh+/XqX2gyHQx7/+Mdz4cIFLl++ymAwoN9fQSnNdDoj+JgOt6AfQiDLshgUmnPrXNT9Mp0AgmI+Z7cBB+ccmU46lmGMIUlSAMqqROkEU8cg1uv3EQLKcs7u3g2yPCVNNf1eDs5Qm5LZfMp0NqHXzxn2cxIlEcEzm01wDdNaWxlQFGUMCMGTaI3wARAYU2Otx3nbrN6cXq8XU/bZDFOXFEVkIXneAxHQSoJKkc7ipIvpPA7jLcopQhCY4ChsoKwDlQ1UXmAAJzSEW4OotRZnPcYYtFQI2awn7xEECDFRMsbgQzw3u/sHFOWMsiy7AK21QuseWkuckxgTgTpJVce6BoNBk4aCMRbnTExXqxi8EhUDf1nOo14FHBwc4K2h1+sBMC8KslRTVRWz2awDHyEE4/GYa9eu4b1ndXX1lv0V96jBOUsIvst6uj3XPAei7FQURcfEWkCM67SHL+cIqfAuZnB10r4e4CC4QGjOo7ML7HoUmLXg0YlmtKCwYEmt4NdSdSEikLQLPi6OnEOHDtHr9TrRb3V1ldOnT3epYr/fJ4TAaDRib28P7z2HDh/mxvXrTJpI3+v1GnQPixR4CZzaExRPiOzy9tuB7Pbbo1PIKH7HP/HxZAlPAGT7OkJgjENo0EIiZdzkSim0iimVyzzXd2+ytrZGUVfcdeouLly8iNSKsq5J84yyrkAKhFBkScbFK5fJ04zVjTXCJKYOto66Ry/Luevkk9m9dpVPf2JO0SyMwzuHSYBrV66wurrKeDRmOFyhqg1BSmazCePxmKqq0FpTliUPPfQQ29vbaK3RSrGxscn6+iZCKPZ295lOZzjvGKxEFj2dTgG6lMAZCz5G4F6vF8FKKopZ1OIkMD4YoaQk0XFDeOcQUpLoyLY3NjawxjEvCoSUSAnzyZirlyHNEqSKi9+amrWVITeuXeVgf5edI0fI8wznLIe3tyjLkquXLzFcGVLMpjhbs7WxTl2WSCFQQmBMhU4UiRYURcG+taysDNBKkiUpIsS0tSrn4ANaJwgtOtABcMHhvEdYSwgSi2VWO2YmMCkdU2MpLNQIPPG8ZjqJG1qB9OCMx3uHF6bZGxnG1HjrqU1N8KZjNt571tbW6KUZeZLinO3OfxnmHcsTWmLrQJ6kyAAhODKt8LWgKuadpuZqgyR0QRog0RrvHKYqMVUZ0880JTiLd44bN24wnU5J05TNzU1WVlbi+zfgqrXu9LTJZBI/T/N+URYKKC2QSnfpZ9STBWU1ZzgcUlYBHyy1KcEsCn69Xo/aWZQW2NJS1yXGSJRKSJK4/5VUKAES0YjWnwXMOk1KtsDRCuS3Vu/ae2QnjZ6lFEVRddXD48ePM5/PWVlZIUkSjhw5wpkzZ4BYSMjzHK01RVFQVbGicurYcbRSXZolhGioaBQzTVPtDIGOKbQMSYh/3GkSdZ1Igdt7kiRxGwaF9wJJACWRXi4ihpRIpQgiamSyZWwhIIPAe/A+VhDTvE9ZVWynKcYYjp88yd7BAVnWYzqdopRi7/w+6+sbEODgYIROU575/Odz5pEzBK3YPzjgCXfdiSRw/pGznDhxgkwpHn7gAQqiVjboD9AiYK0h+MClS5cwxrC7f8CJU6dY31iL4FZVOGcp5xVVVXL27FnG43G3IJXSpGnGYDBgNBoTgDzPKcuSoii6iNwKtUIIUp0QnI/s3Hmcd+RZjneukwza62rrmsFwSN7rk2c9Hve4xzErCh548CFC8KysrEY2W06pKkFdFmgh2NxcJ4TA1atXUVIwn06YzeccPnyYwaBPqiVjZ5kc7JPphHxtlel4SjGfMxwMEYkG4ZtqasJsPmEymSKER0qFTiSIBGsc1lYN+wQVJL4R429dO40MITVJmqC9RSmPsIJAC1YC56C0Dq0aHawBeecsUY6N18yaeJdSoETSyTbOuQ5IsiwlbTRamr9VSlJVxeJapAnOGcrSUhQF4/GYfkMAQghIJZDoJV3KsLqxxc2b15lNpvR6PQbDAdbV2KomSRImkwlSSjY2Njh27Bhaa6bTKVVVdQShTR+jzpZ1v3fOgAhkut+lxC3BWZaDWo2sZfRtpqZ042JIFd6U1JXFueZ3GoRwTVoa0EqwjGafEcxiriubPDZGylYTa0FBa81gMOxKsVUV6PX7ZFlKUiXs7+93Amm/3+fKlSudINxqNi393Nra4oEHHmBjY6PLodscva2qKRUvtBSq0dNEFyVaHc4502k+QkCapl3uvmB1souCWjmSJB6zszElWN9Y48aNa2xsrKPThCzrMToYkaY5MtF4H1lVALRKGI3GrK2vY51nY3ON8WzE5vYWh4/scP78ee697162trbZ2z2gKAqGwyFZnuNDiCXqPDJXlWiO3XEHx6RkMplw8eIlTh4/xu7uLufOnWM2OmiAOxZizp49y53H7yBNU/b29uj3cvb29lhdW2djY4MTp07gg2M0OugKBYnW3LhxgyzLuHHjBr1enyc8YYeiKNnc3GR3d4+D8T7aa9JME3DMiylJqmL6gAPhcT4yjKqMOkuapZi6gACDfg8RoK4qaKrN1hgKP2MyHpPlCUeP3cGhQ1vcvLmLkpCkGVeu7FEWc/q9nCAkBwcVPgS0jtW52cySJCnT6YiymHbVOWct3lmUko22klIUU/A5gkBdFARn6GcZeaKQeLxt2DwSqRMEMorJIgAe721kF0ogkHjrqYynrAtUmrG2vo0TBXWoIUkZqIw6SOaVwdQ1WshG54Uk1SgBZRk3tZRxg+tEoZSIwVFKtFZdlT5LojUjOEeWZThbNyl0Aj4yEgnkaUIxm0WhXWmqsmAw6EUQ8RZvGptTkiAl1FXNfD7nalVzcLBHv99j+9AmMsDly5dxzrG+tcXW6gazecl0OsVay3gcq8v9fj/uG625cOECOzs7HDt2jIODgw4TnPMMhjkhOLwPCBGrqcYsSE6SKNI0BruimHWkxtqazc0j7I8mVNMCLeiKRiE4rHUc3t5iOMxIlAPvMLb+7GDmvaeuasajCcF7vKdJ3WTH2qaThDRNgVgmtTZSy/HBGCEUzntuTKa85y/+oruoo4MRe9evU5ZlB5ptSmuNZSQFu9eusXv5CuPxhKoqm+JDQEqFlKqJbqqpYAp8WKSYAhHL4SL+TSvwtwG2jWSx+ODI0hznAv3+gLqu0SpS+mvXNLPZlL3dAb75+yjGiiZy+phqu4aNhMDu9fMoJblyMWdre4PZ3g0+uXeVoii5FgLTnR1GozHOeS7WdWdD0IlGq4SqrnjzH/4mW1tbbGxscu36NfZu7nLmU0Nu3rzJ5XOfZtDLGR0c4Oqa8ewmF66cZWtjo2Fd885akqQpW1tbpFlGVZVcv34tWipkm1LTeKssvY/9A9vbh2JZX0jGkwlFWZClKU84dRfHjxx7VKDrmLv3BOcJUhJ8tGzIBmjbVD/RCaH5YNYanPPs7e6RpinFfIoxJf3+IbIs48iRbZxzzCYTyqJY2BmIQi8EvDM4E8Cr+N7W4uoaAgQd10AiBWkvVtOiTzCJgUjquMEb4ZnQasIQiKw8BAlBEJzDEyAIRHP900QhlcALSTGbE2zU5xAaIxO89ZHVI1qtIm7oxn6hlULoFrSiTIOUOCFQSnYkot3wkdVYrFucz1YCEc0a727NW7bMpyxLRAN47ecQIhYasixhOp4wGAzY2t6IroTaNDrWjLIq6VnL2tpaLFDt7XH27Fm2trYYjUZsbW1hjOH69etddrO/v8/6+jqHDx/mzJmH6Q9ydKMht9rpsm7W+k2XU8s2TZ02wTPUMfgKoh3FeYsXgURLynKOEx7na+q6/OxgVpUl169ex7so/C+L7ELIJtVTVDpWXyLgOXzzHO8DSiuqqubKJUvw0TxrTY2ZxZI3gGo0thCinQABtjaUsznOxvIsLZgphRQS62yX7tJt36XKYwhorZoSvu82w7L8J5uoaY3B+0DwtgGWBELAzSwueAIW2WhkSiuc882CixfEWoc1ZazolhVCSsbjQPAlxWxGbQxSCOZFgSZEwdNH0DDWQIC6AGNj9L16cYTwNeV8zM0bN5lMxlTzPs5ZZuM9ZnlOVZYE7wDHwSgwmo7Is4yyKHDWRrYa4MruDfI8Cvmz2bQpoauO4nvvm20nuLq727FWax3OWRBw9tJ5Xvi8F3Di6DFCI1IrJfE26kzeB7ypQcpoZ1EKqVqztQcFvX4P3Yj+ZVWhdaAs55w7d5bV1VWyRHPl8iWe8tSnMlwZcPnyZZJEY40kOB/F8ZZmhxCrbs4i0hQpJNFQEyJLlgKtk7g+bavRWMrSIqRHa0maxs2slMC5QF3V1LXFmJoKiapKZKJQqeyATwmJUikKFQ2+eJypEUEigyQ4hwmB2jq8dQjvm7Qz/r0jIIIHCaoJytaaWFgLMQh0tgRvCERrigjx9YKSCB9QhE7ekM2+CCG+tmqupQwgvMPVMUNp0cw7R1BRZ8qSlLmU5L2oGe7duElVzqN1QmuUjEBaFAWHDh3i3e9+N0eOHKEsyy61VEqxvr7eec3aaiQQNa+6BHSTyXmUilJUS2y0bolFQp7nrK6u4lys3NZ1TZpq6trH4JQolJY4ZzHeEoIjkTHlFsGDX1QAHgVmswZMola2/MitVUHv/cKuJgSiAY42/3XOYmoT2U2DJn7p4kW9yUdACYHWttqidvc8v3isDXwhxKL5Z2qRb53ciLAAPkFTrQwdm4smUdkVENro5xtDqRCycf/X8blaNouwKdM30TRNUqyxKKUpyoLZbB4FaBUfK4qCqq5iWm0tOpXooPFNpTDM5gTvSbOMujYkSd0BtDWGJNGQpk3UXlRwBXFxyF6PNEupfKwoS6Is0O/3Oo9SC2aRyi8uvvMeY6IJ2tqm4tcEJec9py+c48TRyM5EI+q3UTY0lah2swhAaI2UAmvjayilyNIMgYxA3jjEx5Nxt7aquuZgtIdzjv29XXpZSp4lTfq40EO88xhjIXhE8OhEQ/BIoj1BS8hTTVXVlGVBmkW7g7EV4/E+3ruuCNWyAB8WoAMyFrKCbIAzgglSgpdIEfABhEhIU4210Ss3Kw2T2lJ5COjItpzvtCIIDWMDF8B728ksDeHEs3DsCwQssRdoU62mmuyj+TSaphfaXmTMkYBorUnTBCFilbRLeZUEQpQQQqCYz+P1bNhNmqYoHSWYsorXua5rDh8+zM2bN/HeUxQFeZ5z8uRJHnnkEcqy5MSJE0wmE27cuMGdd97JxUuPUBSmy75a3XthK1nIPq0Nq11ztoqv74Olth4tHFIqCI7aFhTFnMHGAB3pBt4udPJHKebeuc8IEu1NNDuqLavGMvOiS2Dhto8+LOd8g7gG75sDic8m+sRiRRAikB3s7fFFL/iiThsKEFONpurY2jqEXJhf259tdAghlr7/6E1v5MqlS4z29/nmb/7m7sRqrfHBo7XCOotUy2zNkyRp3HxSoqTCeRdBpUlt4+9l5xLXSULeiyVsU9Vd2hvNogJT1fR6PRKlMVUsfIgQo2QblbIkZTIeM58XHX0vinnnzVtOK5xr2WJMpaJOGK+LarxMsTgSU+KuBN5olnHhi4aluq6QEmKU6FLz2tSd565NMVOdRPuFVLFqKRVaSLSI56o9x945bBXTgKouMKbC2gqpYHV1wP7eTcaTA+44doRrVy7xyOmHkDJgTBnbfFyMwgTXVOMCSgLeYesKV1c4W2NNhakrTFVQFXOsiWk8PpAoxcpgwPrqGnmaYY1hMh6xt3uT8WifqmgCSZIw6PVZHQ4Z9ntkSpFpTao0ChHZlnMEEwHOOxvTXOdxxmDreHfGNOm0a9aqaER/0Wxk08gyFmPjT+ct1tXUpmyCQ1hyw0fdrvNxubpra/LeNdfW4hvGEkJojkeRaIVWKpICZ/FN61R0jAq8sUgR2FhfZXt7mzzPCdZSVwWTyYRer8dHP/pRnvnMZzIajUiShOl0ysZGTE2VUl2hYDgcNvp1bNXr9wcIITpzfOtuyLLoT53P5xRFwWw2YzqddvdiPu8YIMHjnMFhI6vOo+5X1TNqU2BsFYsN4R8pANyKXDSbcqkVqPWcNawJ0fAmEdmClKJL96QQCK2wDe3XWscTGmKa2ZZ0rXNdVa5Fxbb7QHT/Dg3otZ6325ljzEYGg363yV/y0pcC8MlPfKJ7QggepXTHIEOI1Tsp4iJ0wTXisum6G4wx5Hmv0fhCTKXaPs2yIklT0jS2B7WVJu8dZdlWXkOjV6S0Rt0kSaJPKElIk4SimIMQzGYzsjxe9ACNqTWNbVbCdJpVmqQRMKwl0QrRCMvehkYXqsjzjE+feZj/5Ru/ifvuvbcD65ahSinxblGVFkvXsv3c7WNtKhTFat1SRBIV7QhCCHTDVAWSsio74G3Xj6kNgYo0S1nfWKOqa/YP9vDesbG5gU4009EIlgKkagKHFBKtJVJo8LFAFbzHq9Yi5KiqAqUT+r2MoiwQMrCxusXq6grj8Yi9/UV3SbTSRHtNDKgxbAogaVIyr5LYUeECPgiUjLKLKUscCiUkeZphgiZYi7GR0aEkSsUkuM1G4vmM1qG26B5CaHTGBeDpxrUfWYvqWn1aJrlg6Mu2pIWvrWXX7Xm31kaDbqOXChmaVLBaOO6bx7XWqDRlWpYMhq4r2BljmM/nnU0D4MaNGwghOHToEOPxmBACd9xxB/v7e03PZehAfVkfa5n9sgdtPp93tpQsz3DBIZPoMCAYpIJeL6NymhAcdV0iFA2QLbDpM4BZXNCtHSO0iNJ2Asj2wrRI09zax0W8gEppgvcorQnBdz1l1rpoGBQxVVMyahHBe2xw3UX2TdoUN9ICrAKLDbbcuwkxddRNe8Oy0XX5NeO/Gye1dUjVgqqiriuEg6qugNCUomOSYK2h9dotElxBbQxJkwbmeS8yEGO63sPgY4tUWZSAIE3jRsuzPEbo2qCU7tzQpq47prowQJqO8YglJipFZKF1bZBC4kLUGpWUVFVJvx8NxkmSgBBU9aLyE3xYaCreI9tjCwut0dkH8t0AACAASURBVPlGQ5OSgMDZuDEFAbxFeIeQsR8xhIBREp2kaKWilynEnshemqGEYF7GzgBjTWyMLgsuXb7E0WNH2dpcZ3d3twl2C5HbeU/wUeuMoBnwuEZqiFqZULphg77buM458EuG52bdRKaTolRklxCtNXVdU1cGqWRkNt3ajOmbCICMYOqNR/gIXKF9T09MvTuNVtM2lCtBF7jbu/cxAwpNUIgBL1b5sGHpeb7zdgoZumLBckvdcmse0LVSOWdwNhZIlIrDCELT05ymCd45RgcHXdU0y3NQCtVUMO+66y5Onz7d2TOOHj3K3t4eWdPX2/5s25jSNOH8+bPsHNnq0sd237UA3Xoe27QzVkAX5tn+oMe8mKK1JNEZ1XzeVEJjZThaWaKBVgqBVkum+UdBmZBdNQYheOc73s7//YY38Nd/9V4uX7rAH/z+77G1tbWojiH4xMc/zqu++1W86Y1v4sqlSzz8wAM89SlPxjrH4+66iz/8/T/g4Qce4CMf+jDf+YpXNPpALLV+8zd9Ex/+4Ac5d/Ysr/m+71vCRdGAxxJgNQtYyEVaefToUX7ll3+J+z/9Sc6dP8t//e3fIs3SRgNqOwFaPS6yu/b1n/e8e/izd72Dj330w3zqkx/h13/9l9k5crhL3VaGK/zGr/8KZ08/wAOf/jh//mdv4/GPv6tryj516gR/9Mbf5VMfv48P/v37+O3f+hX6vR7zouiY5M7OET7w/r/mm77xZRjbOKSbNq7V1VWyPKOuq07jklIiQtSIhsMhP/Haf8v73vuXfPzjH+Gtf/pmjp84gQ+BQ4cO8Su/8Wv8zb1/z/s/8kF+542/x+d83jOjPynPeMdf/hn//W/+CoD//Gu/ykc+8XHe+Zd/0bHeJNH82E+8lr+7717u+9hHeMu73sbn3f2sLnDQpCzBefAhblRjsHWNqU2nxVhru66NqiopihnzYkpVxTYia2uUEk2aHlAy6qF7u7tYYzi0vU1VlFy9fAVbG9JE08szenlGppPGo+WadMpTG8NsNmM+n1NWFda7WDEVAk8MRPP5lBCVKKbTKVeuXOHa1WsURUHUnhadLUJE466UAkTAuZrZfEppaoy3uGBx3uBCnPCRJAl5vwdKUtc1s6ansayqrg2nrirqck5VzCjnM6qywJoaH1xkvU3a2jGupvJOs7aRMcWM2bKNqWujlbXeRynjZlYysletRMdiF658mqJOaLINS1mW7O7ukiQJ6+vr9Pp9kiSl1+shpWQ2mzXFBo+WgroqmE3H3PPcZ6MknD/3CNevXeHY0R20lhwc7LG6OqTfz9nb22U2mzGbzLC1Q5JAEDjjsbVDBEmiUkxlwQuU1Igg4+PGgBckQjMdTxBArhOC99RlgXcu9rA2AUwrRaJ0p7fF8HE7mEnVRQzRrOyv/7qv40Vf/hU8cvYsv/Wb/4Wfef3r+ecv/47GRxL/7pWvfCUv/xev4KXf+A088QlPZDQ6YHV1hbe99a387n/7b/zP3/ItnDp1ine87W08+OBD/Pd3v5snPfGJ/PTrXsfXfv3X8/d//wFe91M/Fc2ZoukmWKJk3ocmuYyprQ+W7UOH+L3f+10efPAh7nneFzKbzXnRl70QrTOs84tuhkYHiq8dtbwQAjpN+cl//zo+8IEPkmU5v/Wbv8Zrf/zH+P7X/BBFUfCq7/4ujh09yrPufi7T6ZS7n/UsyqLCmIo87/Ej//qHKIuS5z7vS5hOJnzJP31B1+TunEcqRZZlPOlJT+Tw4UNYG9Mn5yHv5VRN2re2vsbo4ACpFN55vPQIIfml//QL9Ps9vvzFX8d0MubZn383WZaT6BStE9719nfymld/L1VZ8H/8wPfzc7/4C3zJ876QNE158Qu/HCHg02ce5nu/+1X8zV/9NUBkzCHw9d/wMp73hV/I13zlV7K3u8cTnvREnG0YUZPiCRcw8wKaaQ1KSBIVz6FTugOx1hTpvWc2n6LTlP6g39kgahN1kEQJ8I1Hrym3t8ZogWz+a/a7awT4ZuKFaCwfUkryfr/Taj0B6x1CeIIIBOFxDbOLrTtlF6S1ip6y2LfYMv4Fe5cyEERMaQ0WbxxCKIRSCBSWwLyYsj8rMUJTB4Ej4CVNoUCA86RaxOKEjF0NQoim8harr847BL51ES1plk3ze5qS9aNJvLI1iGit8dbEeQ+pRuCRIjQs2cWJEr7G+4DzDegJTS/vE2zAGUNNTHE317fAC1wd289Qjtkk+tUCsLo25Mrla6yurvLUxz+ejY0Nrl+8yMWzj7C1tkplas6eOR3bV/EoCRvrq1y6cpk8TZiOS7a3t3HWc/nKJQBOnDjOYNBnPB4zUXPqMjaNJ0nrAwUvAuP9MWv9NZytKcZzlIHVjRWUh3JWsNFfRQpi5bjxBX5WMHNthST6GwjAW976Vj716U+BEPzcG36et7/1T3E+9q0JFZncH77xjfzN3/4thMBDDz0IwEte8hKSJOHf/cRrEULy8OnT/PGf/Akve9lLefd73sPXfs3X8IEPfIB77/0gzjle/zM/w794xcsXwNoAWZvSRk1OdprYU57yZJ7+tKfxrf/bt1GWFWVZ8advewdKaZSKc6xoFkyMVCFOh3AOIQPvec/7YpQTknlR8qY/+mO++5XfSZLEWWStsfXOO0/xwAMP8cH7PtS0AulGa5Gsr69x/I5jfPRjH+O9731fo4lIhIrVmXPnL7B56DhpmqGUxgbbpOIeKQS94QDvHEmWxvMZoni+tb3FV734y7nnC/8Zl69eJUsSPvjBDxGsI5GCi+fP86bz5+OmEPCm3/99vv3l38Ghw9vs7e7F/s8mrbTG0Hru2uATnCfLc07deSejgxEPP/ggyxJDe/6jAB6afscIFixNeFiukLoQA0es2JW03SHeR+d5K1yHEPAi2mdwPl5j4fCiqchK1fnA4FbtrmUmLQC45mc3AUVEW8ftrWxCKEKQtGOPlm0q0GpWEpSIn4OW0YuYagqJFIp+r89w+zAH05LcCtZ0hpER2FAJ3hmunH8Ege9eP06uWFQV0zRtq2W3VPZaBloUccKEVC3DasYlqTYta7RPH2WFaOMIDeMEobLu3HnvOxkAQPh20oyMtU+h8ATKuuqsKFVR4p0hTTXbm+vkecb+aMR8OkGnCXVZIDZW2d7eIABVVTArptR1Se0sobTMpvNGQ83QndYYe697aUbtarwNuKYC7xubhRBpDF4ygrFQGi0kwhOtJ00l1waLx4NftC9+1gLAsj/rytWrHSu6du0aWZaxubHJzd3dLro9+OCD7arpXuPkiZNsb29z+cLF7ndKKf7u/e8HYGdnh2vXrzcLWHL16tVm8XZL8PaXpG1i1Vqxc/gwN2/udoPiWg2t7dOs67gB42ssRge1F/Zzn/k5/Ni//T95xjOeHofi6YSbN29EEAjwhjf8AmmS8ou/8POcPHmCd7/7vXz/a/4lk8mEoiz5v37ydbzyu17BL/3nN7C5scE73vkufvjf/CjOu0Uri2grta5p4m41BIfTjizNqOqSNE2jlcVGu8qxo0cBOHfhPDRCbp4mXRV3fX2Df/Uj/5LnfsEXMBwOu83bGnm9qRcLWEqSJG0mF8QF8ZY/+RM2t7b4oX/9wzzhCU/gwx/+MD/+Iz/KpQsXm8roQpRuN0ULKO3mb0XczurgA0LStam0LVDgu428DCDL7Wjta9IZoG/VPOO1C7eAz7Ims3hM3KIn3bKmGytA65dqPwMsGYI7rbgFGY8i0NjMCN4zHo24eOUG+9Man2RUQTKpDOiEtdUhhw5tL+wYTeBt+yvb87BcdHFNISeCv8LWFUkSAzIh4LrAEZl+NMFG7SgWYhp9NjRaomh+F6L73jbjkJSIlnpPoLYGZMOyg6Cso6bVzxMCkuHKGkqn7I8muP0DamNQSUqS5RyMpxRlzdr6JkHQFQB6vQFSanZv3GRejellOfkgMvrKzCmrOJoqG2iqqaW2rsm0oq/P+EBKCiL6CLXQ4BNQEusdxsVWNKXiuCoceBZg9ijNTGt9q68rBI7s7DTm2MCRnSNUVcX+wX77hGgANYuStGhY1IWLF7j//vu548Rx7jhxguMnT3L0+HFe+g3fQCBw5cqVSEcbentkZ6d52za9XGyobmM1izjNMs488gjb21s4F9ue2kpqK5IuKiY1Uslmcfkm2Er+y2/8Gp/45Kd4zj3P54lPfgY/9uORQc7nM6SSzOcFr/2Jn+TLXvRi7rnni3jyk5/Eq1/1XV017Pz5C7zmB3+YF7zghbzwy76SL3vhl/INL3tJt7mUUt04oKIoO6CVSuKsjWyyioWBRW9bNBBfunwFgFMnT3WtW3VVx9RbCF7zr36AnSNHeNlXfx3P+dzP5Vte9rLFuQsgpepGrVRl1X2ediM55/nVX/plvvklL+NLX/DFaKX5ntd834KPLDnKlxlGO4G3BYO2vzWmoXGDtQ3Hi9E1t4LYMgDd/ng7hHO5CtqCVzvcoJ0gDHHKR6/X6xhiO6Cz/ZtlQG5vy16nZaBrQSUWPqKmG++W0PjOlFIcPXKUEydOcPTYEVZWVqJ4nqUkiaKqCs6cPsOZM2c4e/YsFy9e5MqVK9y8eZP9/f1mncp/9K617go9qmkJbNv8bq8Oqi5gtAzWdftSiDaQLwpmUkrKqmJelnFKDR4XrXQIJRFJyqwuGayvolLN3sE+V65eZV6WpHmOJ7Bz9AhIwayYM51OGY1G1M6S9nJUmpDlCQGDF4YsV6gUinLCaLJPUU5RiUDKAMqiVEClIFXAC4PzVRxp1MhDsqlW18ZQmRpjo0zjPNTOUZl/pADQgkjr8QL4uq/9Wp72tKfR6+W8+lWv4m1vfzvOLVqFIIqR7Ult6fmf/fmfs76+zitf+croMdGau591N89+9rMhwJvf8la+4J57eM6zPx+tFD/4gz+4tLhYrrp2FzFLo7M9+MB9932IT37yU7z+p1/H6uoqWmte9KIv66ZhysbA+On77+eLnv/8zsSolSbRmpWVIbs3d5nN5tx56hTf9m3fSlseV0rxFV/xIp785CcRWzLixhyPx6ytrSGF4Gu++qt4ypOfRFVXcbJC8zg+zoNz1nH82FH+4UPv5ztf8e2Ng7/VSiLITsZN25hz+AYgpBDs7u3yjnf+Ga/7yR/n2LFjCAF33/153HnqJCF4hsMB8/mc6TQOUvyuV78agF6v3zGWqqq4dvUaT3rKk7tNbF2MwM+557k845nPoB11LARMJ9PP6jFcBrRW/O8MznLRlN8JQSFWJYN3sWsheKQArSSqcYu2/9aqMZASGUVwFryL7nYRzZ7Ld0nA1hWmKnGm7p4TJylEzS3KKSHem9UfbHTH26qOPqsQp58ootbljMEZi/AggkA2nqxlMCznBRcuXqAsCuq64uBgj4ODPUwzVcTWNavDAcN+j0EvZ9DLWR0OWF9bZXtzg/XVFapiTl0WmKrE1lV3b48nSRIIt05lbm1NVVV29ob2WizfnXNIHEp4lAhoCUoLdCIRWoAC66OuaFzAugBCoZIMdIIlUFQVZV1jQ2BtY4PV9fVuqKgncOquO8n6PSbzGePZFNvIUfOiYHc3jnNCggsm5obS40IEN5kEHDUqbaqTWiBUQCXRNlKZktpV0ZtoLbbRmGtjKcqaojbsj6bsH0zZH83YPxh/djBzto2IodOs/uQtb+bXfvVXOXv6DEpJvv81r7kVaRr8atdj+9h0MuGrvvp/4ku++It54NOf5tyZM/z0636KPMtAwP3338/3fO/38au//MucfvghLlw4f4tDvTFi3PI21tnuogkh+KZv+RZCgI986IOcOf0Qr/yu76SuTWPglOhE8+P/7rU84+lP4/rVy/z93/1tF5Vf/T3fx7d+6//KI2ce4Bd/8ed5x9vfRSDQ68Wx1KdOnuR3fuc3eeihT/F3f/c+7r33Pv7TL/1K3Lha8Tmf83T++I/+gAvnHuKdb38L//W3f5e3vOVPu43tnCNJU+666052dg7Hi2NjNVNKiQDKMo5HjsKtb8YoBYILvPJV38/Dp8/wrre/mXOPPMCP/ui/iZ49BD/3Mz/L1tYW7//IfbzxrW/hvns/ANC4v9vR4p6f+anX8e0v/w7+8q/fx5ve+mYSHacwbGxs8u9f/3o+8A8f5i/e+x729/f5+f/wH+G2M347M172nLWG3O65TZuJlIsJwMsbcvneBr/Wy9SeD+AW1tS+Z5s6tuNqgG6wZ1mWtF6+JIlucVrLg19oVy0jvJ35LY6LplIdf6dVO11lMeHYhxh40iwh72VxnLcW5L04w19K0a3RsozN2pPJhMl4HH9OJrcazsOt63v5M3ajlxotra5riiKaa4O/Pb2mO7fWxqmzrelW66Xz3mQGsd1JxHHwAtI8Q6oEYxyb21uUdcXNvV2yXs7K2iq1NUit2N7epqij9tXOJOs3M/3rusY4i9KS1mfmfE3AobTovucgShCKJI1FoGjoFkjd9vA2qbk1CxnDgbWe2vgIalVNWTuqxfJD3H4yDx+/I0wODgghMoR3vfOdvOc97+F1P/3TTVXRo5v0zYfQjANu2pCg80jdvijbVCoaNT1CxsUSzYAL4GzFykWqAK2/S4hYam5fczERw3dVuhBoxqdkXdtIVRUdi2yHQ7bjdkOIPXM6iVM3y6JkuDKgKqvORS+kYH19nbIs48x3Y1hf34ijcZRma2uLvf19XMNAWmY3L+bkWc7a6hqz+az7HoT2s0RQiNM5kzShrmoIorN1GGuRSjAYDsF55vMZK70eqVaI0LbJRJ9TEG1FTpGlaWSSUnQRPH7xzJDg45x5U8eUtWMdYWFjEY1Z+vjhHb702c/tgOT2VpRlVtbpZs2gzhbM26DT9u+1G7Wdiddew5bpqabnUiwxotvTxNvF+2WNrO0+oTFHLn/eZfBYTuuiGNa8ZqvbNTYHJWMl0wdBV4PUGXVQ7M9KruyPmBQGrxOC1HhX403ZecduvwONf42u06M1j7Zg7V2jpzVfjKIaDdGauCbTVC8sPN4TnGnALLYrzebTKMWkadxvdNSEQMDUseAEsZiW6AyVJlRlyWQ6ZefQFvP5lL29Pba2tuJYoPmMzc1NlFJcv34dWPgfW822ts3gUme6IJYk7ZzB5twL30yP6QPELzAJgZWVAUJIiqKg1xviTE2WKnqZ4tDmkLs/9xkc7F7m1PEdenmK8KY5HsdXvvRVAj7LCCDR0KzWIBv/iNh/6ZtJCQuXbKOxyCWhvl1Qj761kV2wmA5AYy5td3n7nOYvaE2u0E6ucLdoI/FCKubzovmuAUevF1348YsaZt3ftTpaCB4ldSztW4uQMk7DbQ2uIl545xwixBK/1vHLMZJ+Gv04KrZDlVXJ1tYme3v7lGWBEPG1tIrfyFQ07KvdzG26297mxZyNbCOmEs4jpUYqRWUqgoujjFOt8S6CUqJkk4r5CH5N90WUOH2nX/o6urizNMV5H2eN9WMaaowB77vzGho2cntq37GmJVbWppvL4NAabj2Q6AVItXPe21SpjbrL79H9P3G8jXoUOC0CYwuovV6v+yKNtkG5tYgopVt8uuV1bgfH9lja49NJgguBytrGjtGwtjZbdWCBsqw5mBbszyqmtaG0gboUCJ2ipKAuythO1OiJy7pg21C9fFtmVwDO+k6HFEIQGqP5orBVd58r7pu2iKK7hu5YRY7rqNVsWstVXdeoJDaBW+tIs5TBsIcWkvlswoUL5zh27Ah5njObTVhdXefIzmHyLOdT938KHGxsrJFIhTEVpqxic7nSHD26w3Q0RjZAGVxonD6C4EIc/V57lIjQU5dxrQ77A5JUI0JsA2vwuTHfpiRJipQaSFAyjeZeRKcLwmfymREbqVttf7HoFv82zZdraK1x1pKkKf08p5jPonAXbk0Pu7I3iw/YVqxg8aUgSkf/jHOOH/qBH+AHf+A1t36ApRT2n7/8FbztbW/vHjJNU3brum43jGootRCim8XUHqkPHoEkaQbgzWezWzZsO71DCklV1dS1iRM5lzZx3JwWIZqvMXMOY+N00I2NDfb395shh9zSs2dtOwpYI5yjbvo3oxE1LnglI0Ooq5o8zVhZWWE2HiGyjDTRDSMOXXuab76xxjd2h1Ywb2+2SV1Cw4S7boYWMGilghjM2laf7otiGg0y/vs2dgN4H/sYvTF4QvyGpDQGEVuV4CLACe+QIWpYxlmkUmgRbVo+WMJS6rAMZu35DiF0k0qVUiSpQqq0syG0wa89/ltTShomr5ZAzTeZSGRNSbNetFRoGSdJOATSQSIk/SSldoFxUSGJAxeUlHE4Z5KwMTiEFLcyxu4aWNvYXJpbYyNJ5NLnkTJOPRHxuJM0je1H1jXfu1HG8UPEwQiEsAC+4Dt3fVch1RpBLH4571hZHXRrcW1lSK/Xa6bMGrJEQ0iZTSdoFbsjnK2pq4L5ZEKWaObVHGcNvV4fQWwjS5I43zAYy8HuQccMhZA447o1lMiMpJ+BV/GrEZMcEiAobO0JLjA+GKEThagduRYooaiLmqc+5RkM+yl1UWBdSXDuFs70aGamWpIf4egrXvzibvO37D1N0kXlMvbUxO+JLOa3v9zyquzSUR88shmSGBqrQiAucogN0P/hZ3+W//izP0dsz1G3tPaE28CyKfB14ND2UwIURUk7EG8ZqCLSq+aixxJx/IIE2eksbUN53KgtTY4L3hpDr9/D/T+MvXm8pVlZ3/tda73D3vsMdWqeuqu7qpueaJV7jQEaJTESo2AjComE5BrE3GsTBBnUi/lo7jVEBQFphsSIE7nXGLEVBEG4N04YmlY0CUhDT/RU3V3dp6ZT55x99n6HNdw/nrXe992nCm9eOF1VZ+/97vdd71rPep7f83t+j/Nsb29JW724u8/mM7KsVwuYzWZx8dCFFGVZkBRDsjzHedkhpV60iTw+wRC2p9uSkYp1mgDVvEKr0H0PiGaW2PuhRx1SwrmjeKSjM2bDxwTdsy+j57PbO+rHfTF8EqPgCEF3yYEhrjOkWwxD1fQAe4rH4ncOv29ICRnWiopOncX7dLNc9vndx2IEIHfvvWxOIUCDeJW5zoXQjMYinvt0OhXYobFizgygPD5YZvNWQuVdnmD6M2WZOyMbYYWEM7auZSFMJnRzJdFLtNYg7ITuPvsMsYSbhOgw+IECBwrbtN19N8xRqc1dVeFcQ6ZFJURpSWTZtqKppONUsC15ptAqoL2l0ApdSEItx0PTUpqcTOfkKpOSO63xmMglk2uxc9lUS11IdLMl0dOkHLG8dxkQ3tmkyBmXY8bjJVaX9rCyPGajOR/v2eHD3yABNF5eZmd7ShuVQuUpyH+Sd5YKRtOO5qIEjB+8X6GG4NDQqeofrJJrjgyQgfqGkEdRxHq73gvoMR5ARYmgDpyW8K5tRTctdHymlKruJ2/CcSCWfMSdQ2giyZCJ0KS1FqNFu11kenKWYgu9ZCST7EvHFWotm5c2WVld6TyiLLKdTQrlE1Md6QnYNq0oizpHVUfNMSW413R7SnCOPSsr5Fkm3KHYTSokN1oRxy2N/cA7lpvG2rbTCFMMFn6XTRRbOClKbj11ik6NIUAIyTj5gccX2/QRDX7crBx+wTgppXAk+SXBFtPcEGOmUUSt+mG0O3hmATow3lobdcVEFDGEgG2aGKan0EqRmPKCD6apE1AkIYNUWSJjp5SmMDnOA96jg+6eqdbS87OyjYDrWpEZMXAO8NZKPaoL2LBILRl6sQKFiBfpUkeOEDO6OhGCpRxPzhHVT2zbeZzdOogRy8JGo6UeOiBryjgvWmpBRsT5WHwewAdHbUXmyrsW5URlV4UM5T3at/jG4pSXBtka6uAI1YzWNtHb0hgPrrK0LpATv1M5sqgK7WhpGklkiKcpmOkoz6m9Z1pNMd5RTla5eHadPDcsjcboosC3nrPPnGN26RIqePne0MZxavm275bbviJmtv/wIWbTnV7JYhBS2NaytLwEm9J4IwGX+w8fgixJYcf9fWEXJi7kLBapjihLwbemOzsSrua5KBR437nNbdNgsozlpSW0EZVOZyWUc1GDy6SsWJZzYP9BNi9tkhcFGxsXIjh/gcl4LPVzbUtmDKPxEkePHKOqakAx3Z7hXGQnW5GRyfOceVUxne6wvLzC8ePH2YmNdK8+cYKnnjrDmTNPc+zYcaqmom1abGPJszzW7U05eepm5vM5lzY3Os+sKAq2t6aieBtbsYFiNBpx1bFj7Mx3OH/xIirT0lS4rcmUot6ZcvTwEa676jilVtSzKbat8AhG0lobS392J6kjqK9E4TZlG5OhSbhYIr4WecbVhw+zPB4vhKm7PbH0uyEJNsQQNK213V5WMnBEA9LhVvQb3JAblj6T/hwu5oSdJrA5Gc0+udNvXFov/ju+obsOpXpstih6Y9bhhkaDNhhtWC0KWg9tMPg6UjKc9Ct1zpMpc7mBid+R8MaUHFnQl3PScxOvumglEYGd6xsJgScMMD0G4+UIKCUM/+BTzWdABxdFTuX8QqyNNbfWieFWilJB1dQo46VxcnCoANobirygzEdoHHiLq1uM0hRlKRuKEwzOWC1F8UpkzwtjhOnfQhUsmdegITcBrQPagM002JZqOkUpi84z8kwzLkdoFNsb25yfb1PtTFlZXcIYjdFSZZOOK7aa08awtLqykHpPKfQsy9jZ2WHfwYOAYACTyQSVZVxz6mTstWcR7zbparluA84zIf+lPpltayk2N/uGoCrrCr21FuOYMnHpgdd1TVVVl4UZShny0RIrazkHDx1gaXUP+/fvA2NE7dZaNjc3WV5eZnt7zvETJ9m4uMnXfd3X88jDj3Pu/FluueFGnnnmKc6dOyfFuFq0yo3JmSzt4+T1z+ahhx5isnKAA4cN5XgvR44cZWPzEqOiBA/bmxJ2OtcynW6ztvcgN9/89Vy4cJYLFy6wb98BZrOKrc3tGO7mZKagbWucz9i3f429B6/COnkMhgAAIABJREFUG83mjjSDXRmNyIOndI7D+w9zbN9eQjvH1js0tiIrRMFWB4UKSSppYM4GYVzCuFLIXhRFJ/UyZOQn43B5OHa5Ydi9cIcGbvi73RhS+mz3WgjiXQ2+Y+G8IXQd01X00BI2JHM06119ks8onEk/vOb4ns5wRuPjg0jE+0BSQpJF6gPWW1SWYUajuMELVy6pwOrQ1Q5ICOSH3pkYIfEq+36tWvc4oPMWpQxlOcZElRnnnFC1BkmYrns5ffjaPd+gkCsCsBJqOim7NzokH1pgFZxQOLztvkNrjcp1DDUVJhjRhgPGRpRLJqur1HNJuhhtKHRf4F5ohQ0BZRRloSjHhnFR4JXH5lC1Wrh0ZUFuDI1r0XnO8mQPO1vbXNy8wIHjR3BBVEls02Br4afpVKheeSiU9FEYYPFXNGbOWqabW52USLcrxnrEupJOSm3bgoJ2Puf8M8+wurLCLAre+RTD+zTJ08DrLsSTjBexv6HrXk8LTmgc8uA28nwQNg6ln+ndgADPPPk4o9GYi+eeZDQecfbM4/jguXjuGYpCGkXsbF5kezrjC15A/bba4tKlTba2Ntk8/2QkJooCK8mTCPDMGcNDD0nZ0WOPfhmtBKN76vT9rKyuSpfnVmRhilIA0AsXzmMyxZOn97G6Z5WzZ9d5+qlH2LdP+olubW1H6SBLlokAXVEWWOdobEtjW6xruRATBZM8Z+PCUxxeW+O5t34dS6OS1lU0dS29O5WGoLs4bTdeldL1va58b7iuZDyGxmeY+buSgevfv/j53ecdnnORsiAL1FzB4A2vIUTAe0gLSQKASmnqqpc6SnNj6O3tToz01yI8MsG+Bhr6KRwPkglsqpqt7R2mOzPm1mMTupK8OAUos3DvQ48y/X33fSVV5aIoIgQiyScVArY1sQN63xtWoIJFY6aCGDRZFzlSESBF3ShEB8wHNCLxrUmRFxgNOlNMsiVUbILsnCE46fJUxLA6yzKMs+RBcOaMAEEUYUdZDgWxmVHBaFQO5lzRedOTpVHnrOhIQdmYZPhQc/H8eTzSinJSlIRxbNzdepwLzLa3hQicieJJOi4zZrZtubh+dgG47YUSI9CcZTR1TdtGekQ0WBvtxe79goElLan4WaU6XKxOgGfMyHnnIt9MvqNv6uC6CSgTTgDQTlfNxyLVQIddeddSzTXVPO8aCocQaIsc21rqucdZy4Xz64Di0Uck+9nUNRdjprbLhHm/MPnsJReVaofAN7SNkDfb1pKbDL8lWmmEQDW3zHa22D/fx2w2p20bQrC0rWU222K2s4X3oau9m88VVV1ho5YYIVCFqHwwKmnqHc5fWKdtK77jtud34VnwPgonq4E+WX+dQ24W0FUKJA8ted9DImxaiLv/vRvUHr6W2Pwh4VQqGS2iR9DrwgUfKx9UVM4womA73ADDwDD38EUqnQvoSNZNWdb+vYvXuvt+0ns6wxIkC2y0BieYkgJUDMsznaGdZ2e6TdvWNE1N6zxtENwsaEPQCp1U+ZXu7l0egJwwi1n3EEReSYXYcclolM4WjH8IocMh02c6w0jCR/v3Og9BaYjge5Qg6fC34AO5zoSnqCUkTgRzI9ktslEZax41kEVP35OpjMwogg2MMkOppL+oazzWNRg0ZSGCl3muGY0y8lxhjHjOuQGlDGppEkv9NEpJQqttW7wtQB/iq0+uY5EQc3VpmZXlZXSAuZVKglFZ4r3FtZbhTL28oclsB+8dGoVXCdeQ/4SInU2WlqJGewK8FZnJpWu11uLE+j5PNoztO7g6DDg+JEOZmu72mR1x1ftJ2YUUERexwRJXsGQCI+UjyzOaiOe1bUOR5yhk93fOibx0lAmazWaEgHQYj9fpfCr+TSJzdCTcqq4YjcZoJSq5TVPHXoc5o1I8NwFwdQ/QB9jc3IpGWlNVFXlekA0UZ513tLbt7l0PDLpC5FLaRgiaeZ7xyJOn2dq6FZNLh/CmaWirFq0cucnRmRiGxAtNC955Mbh5UeCdp25Ebz/PY5G3ChErTQsyUTnEOHVrPxqnFNFqhWS5dC+ouXgkA9NNhf4V7wkdnhbY7c2lvw/7Sw4pF/Lv2E9S5x0R2wMmCmyiUqpEEZLXpBAlEID4pw89Q18pRaEKTK4xGRR5xiSUzNqasta0vsW5FufAK0NwhloJ0XZ43cNMZEryJPysy9JqDVrF5EZLSBCN99ioIdd5f9GLTVFD73wQSb+ylsS9jFUbIWAAU+RoJ7OzNIrCmFhSFqKShmRJFZBlOZkWTlqmMkymqOfSzSzTsYXdvGE2c+S5YVzkNO0MowrJ/Qb61n6xAkEBTTUnuJzxeIRznvl0E9fUrC5NuPbqq9iaVjI3K5HxLozB48nKgjwfU9dz6vnOwsZ7eZjppGlESLjCQupeiridlXKhzBgBPq2wkBPHyUcN/jDY2VIGLZiEYgyykginSZGxceE83/ni7+Lzf/l5MRhaUdfNwi4qLH7NL7z73bzi5d/LeDzm3/zMz/IL77lTClOdZ14JJUMjlQxp4o/KMiYRPL/4ix/AWcfrXv/GSLQUzMZGgmtZlN1E053RFgNz5NAh7rnnzzBRqfbIkWvEwEeeXSp2dzF80LHXptaGprV434DSsdGIxnuLib0JXNtnMsXjJJY6ClYUYtZQnGIvXcXrSp5PptA+GpkuYyg1jsroTlhSK0RL3zlMfK7BWbTJyI0RqkHKkgaZ+CbP4j+9EKgHrfzE6xPJ6J5TuDh3uues+nPr2Nkd0oaVlCuiIUuepFKdcWpSBlDRQwzpW6IEtvcJM0vevviE1lmatomUIo/3LcZH/CWXzcc6C0aRl0n8z7M52+DCxYtsbm/TtI6gNLOqZjqruLCxRZaXHD12HEdJ7cW7UdB59snjbduW6UyMLtpg8r6ZTIjVD/hArg3leEJVV+zsbGO0Ymk8QRrGtCKaGWsunTNdPwFBwxqCk6jJKAHKic5JC2AthkDuA3NrqcIc7T0iJxUi2degdUYdHI2SXpdNUHhnWZosM5vP2WmnFEVGWY5ZWVvGWk9tawieebVD01aRa9ZjtXkuyTHpiG7Z2HgaY3KWlyeMx0tM5w37Vib41lPNmwg5BLbmUgrWtjWZMSyvLoGCjWlfm3kFCaAQ/z/APdK/AthW2r1pI6S3pGsWOj6W7T6lY4gBA+8u/Ur17r6kiEO3MMSbsiyvRJJl9GQER4vQpXP86I/9OG/4kTfyB5/4/Qh+BrK8794cv7RbSh2W5xzOd182wDakq5InXo8TADWEGGpowWW895w7d46TJ2/g+c9/Hh/5yIcFXHZR3FAPGq2g8D65IvKnXJ/HRo2vFIp476IgpgKteOG3fAs/+zM/zdVXXcV9993HG97wZr56//2XPS75Q+7XkIr+6cI9rUT4TgNSKpzMAtHTUt3fh2J3C/Mh4jHyLHvoof9sIkaHy/C4HidazFJ282vhfXSvJ1zoax5XeM15H9sZKpSSJjQhXqfRRvT6vGJpsoTSgdlsSxIvoQU81nm0LilGY2bziqcefpIzZ86AChw+coTVPatsbUtx/2RyhMwUnD+/wbnzF9mZXuD8VsPqkZMRC+r5dkDnje3Gy9L889EB0AjcY7TuBCoh6ZoNPdZFGIHkfceOVSjxshEzhQlS/1s3DWUW56S3+GYGto3qrRrb1ISyJM/TMw7Y4CN/s2G6vd33UohVDommhPLgpG1eMl4pukgYZ9u2rKysdJp7qcQQoGosa3sPMd2a4ggxc6koyxxrS0wuJVB6HjmgfxNmFjpvTPVgxUJIIBctzOOBh0Xsqxj91C5j1D2x5CH5XZNwSICNDy3IjVfzitXVVba3t8nyLPbX1F1XIWPixFBgUjFwzMqEaLSSSoVgNLG2kwHo7aWWEKXxbcuoHBMyF4u/xTiJJ6IpyqIPGWABN0uehNKq0xxLmEkquRCRwsi58oG2abuWdUOsUSnFwQMH+Q+//kF+6v98G3fd9bu8+Y2v5zd+40Pc9twXLIxdmtjKp3FcZJ1HZ3PRiAQJJzqD22FUixnHrwXY9+TMRTC7L3PqjVl67UrvHS7EYSXBMES90jX8jUfEmIzOYjIEgpNm1loracAS+WJtI/0aMmOYTEoCHh/A5AVPP32Gc+fPgVbcdNMpVtdWuiL+w4dvpXWe+bzBB8XScsnRYwdoLTxyZoMzl2zvUQ7uY5hRXgDtB8YsyfVIFYsYsG4+p6JsY0B5QkibsJCV0zJVKq0o1anxSiwauvFRSmSGRNba4W0rMIyCeQzfksZ/usZU/J4kl1K42xky6MsFB/Nw99xK7y8KketO7ARrhYB8/vx55pUlzw2ulR4KJvbZyKwYPZNJhUZRrHZjfEUJoJAmBfDpT32K97/vA9z92c+y/swz/M5dv8O+/fskXInA6Jfv/RKv/+Ef5rc//GGePvM0D97/ADfdKLIzJ0+e5K677uKxRx7hr7/4Re6444e6G/Te809e9Sr++otf5PTpx3nzm98cJ3a00lENQWsd1V1110txAcwOgrPpiDf4IIz+W599C3/wid/nsUcf4cknnuBjv/dRTp68FhPVELz3LK+s8JHf+S0ef/g+/uCTv8d1p052qg6rKyu8613v4Itf/Cu+/OUv8Pa3/wwoOiWDMJiw4jHGxEfoAjQOHz7MPfd8hh98zaul1CmJJga6cySwvjf+ipe8+Ds5e/Yc//E3f4umqXnPe97H4cOHeN7zn0d/dhZXPr3iQxrjKwHhu38/NCTDZzPEenb/dFc6+LxM7l1h3+AzQyP4tY8rv3alzOkVP61EwinLirjbC+VBMuYNILym6c42Ozs75FnOZCJFz9PplDNnnuD++/+are0LrK0tcfKao1x7zVGOHtnPgf2rjCc5lzYvUM+3MNqyslJw4MAqBw+tsWdtwmicLWTYhmTvRCNK/LJhofyQCCub26IaRlHkfRUJ7srPQ/V+guqSLrHEK5Yyee9iCNmrlGitBb6IXLaUTR1ed3q+iZ843JiSQkhKJKXnNcQ1d8+BWeydsLsyBx8EZomNYoJ1BOfIFBS5oSwyDuzfSxmzvUXZ+2OXGbPOze/+A9/7Pd/DD91xBydPncIHz7t+/p0LqXyAO+64gw/8u3/HiWuu4bu/52Vsbm4xWZrwyU98gi984QvccOONfO/LX8Gb3/QmXvSibyOEwLNvuYX3vOcX+KEf+iFuuOFGTpw40e1+TduQZRmbm5td+UcKz9KC6KRUUkchL9kxccsVRVny73/pg9x869dx87OfzcalS7z//e/rEegAL/7Of8A7330nN9zyHB544EHe+95f6GpFP/CB93Li6qt54Qv/Hn/7b9/Gtddcw4//6Ju6EK7Hf5CBD6ELl2NUTZ5n3PCs60VjLQTa1i7szAuTcRBm33jDs/jKffdz6tS1fOLjH+XYsWM89thj3HjTDbseWNTwGjy7dOw2POnc6bUr0RPSa1c6z/+YQbncmCWu4deSvVm81isbzStd15W/XWFiLwEdotSyj92vg0cFR7AWnGNU5CyNR7RVxenHHuehBx7gzJmnCN6yd22Zo0cOsLI6pqqmbFxYZ3vrIjvTi5xdf4LNrQvU9Q7e1uhgMVp0xFRsWnKlsqzdWeWhMUsKGkJhsLioPOFS/1DiJrCwITlC6JsNp/vXEjbE+w1RICJ6ZCGQaS3d4RvRT9NaR9jI0lRz9u/fz969e9mzZw8rKyssLy+zsrLC2toa+/bt68LHJBg5NHrGSEa+yHJyk0mf1bxgXI4YlyMmozHjcgQhMN+ZUc3mEKRloYk0lFwbCi0ORWqniIryVllOZjR1NePSxjm2Lm10z/7yQvMFaybHxz72Me677z5CCLz//R/g9z/+ccKrf6BrowZw11138ad/8ieEEHjwgQdQSvM9L3sZeZ7ztre9DYD77r+Pj3zko7z8e1/Of/7DP+L2l97Of/nsZ7n7c/eQZRl33nkn//wHXyPupBHSoPdJRiY2CdE9ryjEjujDyTJcpP/tv/1XPm9F26ooSj7823fx67/6K50cidKauz93D3fffQ9KG3711/8vPvuZP+TgwQO0TcN3fddLuO3538KlSxsoFL/yK7/C29720/zMz76dDjdKk5XYrb3DCQVpfOKJp9h38HjK+kcXPMQ2Z7Jz9uG1sK4Jgclkia3tLdZW93DjDc9iz+oy21tTlpeWFsyFUopOLifGlB0vij68Se8dUlw6Jv7/Twi3m96wmzuWzpPGf3dYOvwzhSNXMlZ9iHv554ff+zddbwiizKB17G/KkN+VMrGKImppXbx4kQsXz7O1dYlROeLksRNcde0xLm1usLOzhZ4rxpMxRSlKI4rA1VcfxznPdDrnXFWhdcbKyh5W9yyzb1/NI+vrOFw3LkOu5tALS2OQ8KL++lusVQNPNnThm1KLmd7dXvYQEkqjJNMxxHKtSHtC1GAyY8jzDOUNVimsk8x/yqQPvbAQJDmQJH+Sl5aMWJZlLE9G5JmhqsQDFY25XjkkGe6kQ6e1pixKlpeWqeua+bymqQNkkvXHu5hcHJNpKURLdBxUwPmeU3iF7kx6OB4oFOvr63FDUF0PgP3797Fx6VL3rgcffDCWFkj2SOO5+uqrOXDgAE89eXkPAKXgyOEjnF0XbaTgPWeeFqno1rZRBtlRjkp2ZjtkJiNElQTvfScGJ89vkZyYJs/Ja6/hbf/6X/O3vvEbWVpaxmSm6w6edNfW18/KQ3OWs+vrABw4sL+bYJ/69CcWFpPoTdlIcByA1/GcbjjBVEqkSIZNwuHItVOptAdAspTdrktgZ2fKsWPH+Pxf/hWnnnULKnhWVpeZ7cy65IlSIXqI4oF0mlUdftZfd1o8ib83LAHare4wNFYLc2MXvgOLIamoVrgFvKgfn3j4IUeMzkPuxAa1dOb6Wp7Z/4jhlfyGJyBdrmQDjKFPpPY417C5eYmNjQs4Zzl04CCHDx+gmBRcOH8Ra0WlYt7WzOYzxuMRy6sikT3fmRNULLzGELxnPpuztT3j3NkL0ojERb4Q/rJxLops4BX7bjwk+TIsyHeRTC5jkeWGTKW6aCnoT95sGuO0tWoYLmKSGo4BnLPSsi3TFEZTRqJ1qTXOF2xvbXZh7RDT9F7k0JPog41zwTYVrq3BWyoNoRB+57Ad4fD5Jekm733HA034G0CuDTpqtmUR98S2tMrTNBXFOEoW5Sp6rXJcQQIo3X0/kQ4fOdIN1aFDh6QHwMYGw/GysR9kwil8CDz+xGnuv/9+nvf858eHQ0wEiCVfP7vOc5/7XECyUIf27yed1MZaTdvKonO4zpBZa4WrEyVvUuYo4Wvimnvee+edrK+f5Ru/6blsbm3xXS95CXd9+D/FGjUJWQ8dOiKJC+dYXl4C4Oz6M52E9XO+4Tlsb213WIfoQ0liQLhqUec+diYKMb5MuNhwssXVJg8yZfYGO7ZMepl4Dz74EC960beRsoCjYsy1117LAw88IBMzPqdkUPtFvuiZLTzbAQjf8dcGO+bu9+7+3PB7hp5FZ8i8xy900B5e3+Uk3tTsJdUgduH35Zf+P3wIvmq6JswyED4mXyxNU0f9OpjNdrDWsW/fXo4fP4bWinPnznPu4jrj8YjJZMzS0h6yTDCrPCsgpEa7gaYR/C3PM7Qq8K7FWYkq+gL13jCnzTD1K9jtpaYf8XTSeEeepZY6R6U0bVtDUp0Ji1CBUqprERmXW2fMtOwaTKc7jPNMGgFrcN6igyMzkJmMrek23pedQks62ralaZquq/mVcFeBXvrJNwy30/vn83kHHSWdPxiKVWpyWWCYTKECUXm2YVZvk9uSLFexNrzun/2VJkSXC4kT97tf+lJuvOlGJpMJb3jD6/n9T3yi3yXTDSWvYID/fPpTn2ZtbY3Xve51jMZjsjzjm/7WN/G85z0PYwyf/OQf8ILbbuMFt91Gnmf86FtEv2xUivKqi7VtqRlJwl6Sbn0WuWFfue8+nv/853XurI6dqpeXl9na2qJuao4dO8YP/4vXyqUNFs43v+A2nv/cb6LIDXf8bz/In9/z5zz6yMM89dQTfPITn+Ad73g7e9f20DYVV119nG9/0bdJMXWcGI88/FWcc3zzN9+GIpAZ1XUaD95x5PAh/urzn+WH/tfXRHA8jVu/UwlYauOYy+B98g8+xeFDB3nVP/4+8izjjW98A2fPnuPzf/4XA6PgpZYwuKir1e/+Q+OQDNWQMpEm39AQDY3N0OglMDftsincGDbaGNbuZpHFrzvIIvVEEEOnVeQd7vq9iVFBwta6xbmwWPqmH0mMIC2W9FrTWnZmc1zw5EWB9ZbpbBtlFEsrS1zYOM9X7v8yG5sXOXbVUfbt38eTZ87wxS/dyzPPnGNUrKL1hBBKNCPybILJJihGKFVSjlYZlSuMRisEn+OtYTaznL+wTRt16ooioygyskxTlkIOzXODc20sl6uR9nvS/1HCRxczwX1DnHKUYzLZsHd2Zsznc0LwEVeTOaPTohewDKMj8ddbrLekhJGQcC1FljMqcrRStFVNNZ/FSEKe9bgcoRVU8xnT7S3apiYzWhozF7noFto2RkWBzGiKPHZ6inptuTbdj0F1vRY0MMoLyiynMBkG1fVmSK9lpteim8/nNHXdYXHGGKbTKRubl9jYvMTOQHbs8h4AzsWdvzdUH/v4x/iVD/4yX33oITKT8ZYffcvCRO+8AYgLTBbEdDrlu26/nb/zwhdy/1e+wunHHuMdb397bJcV+MpXvsKb3/wWPvjBX+KRh7/K6dOPx11Tvj/4SAPppLQXU/hpor/nzvditOHxRx/mqdOPk9ySt/7ET/CC227jyccf4/c/9lH+6I//pN8tnIUAn/rUp3nTG9/Aw1+9n1tuuYUfecPr2bd3D3vX9vAv3/q/U9cVf/pnn+GxJ57gtz78W5y45oRkgxDJlLPrT/Mv3/pWPvjLH+TRxx7hTT/yejFYQdLsZZFz6uRJ9q6tScVEDAbFcxsUOtNjRSEEzp87zz979T/n9a97LQ8/dB/f+nf/Dv/LP/n+aJDiBhIg9TYdRBQLx+4SpiEovRsL+1pYzJWOKwH6CtFUS9mwZNiuhBMNw9NFb03FqgV92bUNjeqwgUfnFUYKRlaU7Nm7htaanWpGOSpYXVtl/ewz/Pnn7+H8xfMcOXaM6667jqXlZbKiYG3fPvbu24fJSpQuyLIxeT4mL5bIixXyYomiWCbLJrhWY63B+wxUgdYlkBN8hnOKqm46uoF0epfSp6Tpnza0IbWhx8fic9EKbVh4j3Nt1wu1HxcJU9PzSPXUuxMtEmaCNtHopU0iev4qSOOZEHzs5XC5x74bJ1UkDcHUZV11cyx9dvccS1l8YEFMYj6fxyoUcUjaVpptG6NwWC5tbrCxeYnGeukipY38afrg8vIeAEcPhtnOXL7XBz79qU/zp5/5U975znd15RTSu1JUUBOjXGnV1WIONuTuNeG8JDZ8XyC8MKkRN7fDjobAJkPtMlCDECXVe0IsFE6jhsTnRWTyt00Tw4/IiVG6I/ZmxpBnhqXJiMzoDlMLQYiGzor0kPS+FM6YdZY2lpkIXqhQWRbNlerxGhQupsg7o6PihtE957SoF567TDSthEfmHLnRjIuSPNPoEPgn3/4ixuOcNrQxnDDSXHXQK2GoqtqXiS0akt3h4zCs3B0CDb2h4WILzovGV+znOAwxfKrXHRgteV7x+5HyLWJTac/l4dfw2H1dsngMZTHGZDnb0y0IPso47XD27DoXL54nBMfBgwc5fPgwJsvY2pS62LIc4V2gqluK0RLjyYSlyZhiVEgYmcXw1Qem0x0aZ2ka4SwWWYF1cPbcBk9e2GQ7GBr0ZZtvGothy7hu/g9wzuCgLEqKsgBCZwhTFy1h1RPnTtoYepUOpbNOgVYpRZ4J5070Ix3Btoxzw0gp8C0mWAoFChFPFKzMDXT8c/LYuLhpGpqm6fpsmJioS167D4E8wkHpkDJDIlnd0zYNq6urGGPY2tpiPp8zGo1YW1tjVE6Y11Ie6IGltVWysmBWz6m9JS8zsiLDq95z/+Cv/ZaCK2YzB85aZ5RkMqZSIonTRd1gAYNRqTFJgJBi5xCrahYnZG/lB18eoIouZQpblIqF6D7s+kwfMqkEDJAoCv1uZ61DqVZwuKg+m5m4+LzFmIzlyRJFkUMQSZYQnAjt+RDDxdBNDmsbRKhPU5qc3GjaTMq6AtDaSCgW3zLaVBW7NEfNsLSoNV0mMo1DUiz3HdY2wJBCMpNSnBySAYwgstz95TvpggqJvwLGsgvL2h1u7gbdhzWFKeRMwHsIARdcJz7QnTdc7jUOpli3wXnAOj8Uy+2uafffhyEyEAnJGb5tuXhxgyw3aAOPn36M9fWnuer4UW686dkopdja2o6djmSDnVUVZTlhNJlgcqkAKMZLlKPYT0ArnFI4HG3wWNdj/K0P1HXLznwuWmCjFSnaHtxX+vsQKknjvNC2Twku5kOvRLsY9kePJ2JTyVtNvLQQAlotev2yWQy88dhfNigwcW6GIHwu7x1ZpntHJG06u8QH0rjnear8oMNME6M/bXpDbDbVmw4b2qREQ+ptoLXB5LIeXWzCnBU5de2YzWu0dd38GM7my4xZlvVdqoeS0QQ6fKRt245P1bnFEbgNpP+kqbpoxHaHD90RP5jHFlg/+pY38eY3veny98Uj9QDoPYg0yQf0g3jtyYj5EHWwiDyxLGNcloxKUcO0rUilBGJtKdEoIwXUaDpi4e4jN1LiJSVJSBKAgLexfEj1D727XY9MrG6sOqdUuEEk70R151t4E8S+lLp/PXgIizI9uzeR3UZqt+fVPfNdz+tKXlIyKELCVF3J2PAJD3G7K21ouzE8vsZnh58bYnh9yBvY3Jpivaexlu2Lm2xuXsR5y/Grr+Lqa45hCsPFjQ02Lm2QZQV71/YRgmI2mwngXIrXFEyQ1WGUhDMqhmZaYQojheoafNA4L4KGVTOnsTXelrTrA1SYAAAgAElEQVShT0gteK+D8R8a427BG8i00I6apuqehVJKKBTxuRkji14+b0k4m6zDMJhHCGFWBYIKaHRMfklJX6ZBBYm28L7XcBt468BlhnUYUSVjPfx9z5lzC+VK6ZwpytNaR4ZBTDC0LcV4RF7muLZhVs0g00z2rGC1YuPcWXydFFcWs+6XGbOOz4I8xBe/5MUxs1hIRicvmM/n1FGiuJvcSvWLLwVau3dXvSgauDBx+7wyBHj3L7yHO9/7/oUBSg92cUGJUU0ZPLkWFiRwXIrvY7jqrCU3huXJmPF4hAoB1zSCP0VvUsFCl6DkGSolTT2Sh+oHEkRSOC5lIqLeEaQjtnMQknKDGvhaYWHhq/R6kMnY39eA6pA+rUQ2Jo/F6d57sdDeQ1gE76+EfV3JaKQ/d2NnV/rc7hIo4vheyYjtPvfwWS5cS3xuqffXbmwtfSbhZamuL/201lI1lllVUzU129ub5Lnm2pPXcOLEMXxwnH7ySabTqeB5GrZnW3gHBE1WKjxSrB+Ux2NRJicvMtnICAQXKMYlJvcYK7Wc86qh9jUuWNkUh943V/Z+d9/7cMMQj0YSAkOowBgZS+sadEjJrh4O6I1+MkRxE4zXkmkFOiZ6rDAEvNJkEKGayJXctbmlzWMIM0CPm/ZNglhIrimlFtR0kwc/ZCUkFZT079ZaglYUEykFrOYVKs8YswJGY12gqhs88dwD1+wyYzaajCiKvOtPGQidVXLOY0yMy6FbcPEpDDwyCY+GnlnnuYUQFTl2c4YkTEnSwUopsqy/8d2Tejcwma41BMTI6t5jE3kZugyaDGwsEVEx/PRWDAO+x7/iZAk+dAZE8A3JQAkNQEVMzcXMYhSu1jpm9DRai6icDx3rrBsl4u+IYZrq1nW07mEwmrHWMx3feMONQrL1vp+0XsIMwev6cHs4TsNxGx67F13yuIbHkCQpc6IHm3XMLkuS48qJheF3pPN1u70PhBh6JwmmYffuNOETbnOZMWsttXVM64qqadi3b41Tp67h4MF9VG3DdGebxrXkpWBA1jm2N3cAzfLyHvKioByVlJOReFOxMa3JFWhiXwmHMrEbu9bSu8HVNK7CYlEmztnomQ2B/mFmeGjEhmVCMj8XZZBUxMbSc0whmjGLnkmq6+yUaLtH128mWisypaJAhHQ3Cjqtx1iWN5gz6RkN8bN0rclQJWMmcFQ0gjF8ddZhG5HXVrGeMpgsbgwStSilurpRkRCTRiUiy6ZonGVruo0FysmY+ZbFBbBeYd3fIJsNcPjoYap5RdO2QtoLgTwvu7bxmfMUeRkpE1oAUq2pmhofohRvBL/TTqqVFv0sL52ZVCqB6DwfRK8oEx0ySGFtEb0rKaSdVxWFySKpL+twMectzntMlkUJl0w6pwP1vALvKLMM7SzLo5KDa3tYXRqDbbBNRZFrlpeX0Lmmjm29TCbCdalcSkVvq6orbOS2mcg/q2PvxllVo7OCyWRCUY7RuqBuHTvzmnllRZVUZwSV4UJ8hmiUkS7p0qADXPBdU2GjBbw13rE6GrF/zyrH9+/l+NoeQnAor9DR8xOlVOH7JIxGdmZZEEoNPJ7Og47+nu4TElfyzhKKh1KdrLNPtX9BhAbElAVivN55zCn1YUyG1oYQRK8+ygZKnasXQcDUREYY4XOqqur6N1gnu3fTtNRN24WZ0kVcepj64Dl69Ai33HILx68+yqyasn52A+8dq6t7hL7RSBf5pckqRTGiLCcsLy0zWV4hK3J8cEIxMbnIIyEZ8Lapca2McdLOa11L1dZY7wjKdGM2DMmSMZExMAuezpD3F1wSQhVjGVQU3A6hA9C9d3jjCE5eV9EQqCDJk2BbfDbw9iK+jUmqM4IFu0i8TZ6/Hhiz3WHmMCLSWnfJvmG/W0K47F47TbbQzzUzSBD0mJ9Ihue5xkeDpoxhMhlRu8DOzg4qz0XyfjoDD14r9CD5d7kxi2nZyeqEcQjyIe9RXhEi6qaVQWNkFyuiHnpQBAM7do7Xi55YekghiGxzpjJS/8U8zxmNRgQHs/mcEAIjRAUjDejq6iqZ0ly8eJHVPWsCWu7yzHzEamrbonIDHQcpkE/G5EExQpO3lptOneLA8oTlUYFr52xtXWLPniX27NtD0OIpaaWisY6WIKatvbNsb29TVbP4YDTWWZFqiYv13PnzzGYV4yUpcxlPlrBBs7G9Q20VW7OG7arF64JgChwGZXLK0ajDLqR/pAdn0d5jvMU4x8E9qxxcW2HvZCyeYvQqlQKlITNK5I+Vw9kmNksJKGM6Y5SMs+6wHIcPkg10XjGrKuq5dJMqS40L0tjGZBkuOCEy4zGZSLDoTBout64maOETOe8iD0p1O3fTNvimkk0oBOqmwbYWkwmdQ3lNcKImMq9rrPVUdcW8qrDBUbcCss/ruNF6AbF9CFR1zWy+Qx48p646wjd83Q1cffVRpvMZ6888zdZ0m6IoUdrFzuCygS5NxuTZBBUM9dxick8+LsmUJFq8A9dKhydlA9oGsgB100ImDXh2ZhXz2uJ1Bsbgo+4ckTdHEGjDRS9c5MHj5hAjEaPFELVBekdqDRoThSJlY0tJH+UDrrW0HnRmUEFjVNYJpk5GS/i0UQTZcJwPNLbGBM/yeITODKG1VM0clWvGhUE5T1NV5GVJlhWdMQouSNNeBXhFcGIDxBkRWe0QAtorWdvBYz3kRhoH+aBwHtrYxBonOnYJ2U7lSWIrLGQGFxItRUn/TgJOeYJrmIxzmsbKeQYG94qeWdM07GzvRIUKFa0v0SrnkeRIlDg2XUYOo2h8GzWU0tMaZrR6t9ukwUipfudlsg8wgvT6fHuTzBiqquqavKZzd0fE7BrbiteXmU5yGg86yCQ0Xtzxx7MsTmqHNopyVOKDJyske6UHAHkyaAHhj0lY05LqRqXiQELfPMs6+Wytt1HqGUzsHOWDompaGgetC7QerBfPDNV7q52yaxDXLXgn7bS945nlZZYnI0Z5LkmJJHMUr88HB6bvLem9X/C2lNJ9FlQRvc0kBqlxVkLmQ6trHD2wXzBFTfTOcxLjvC0LSdao0PGbQhSBTAXCziY+UQ5a0cSQUSuNNqJlVdc1SgmbuyxH2NYx3Z5SVw3Oe6qmZl7XuCCgfu0sXmnqEDC5dOyebk+ZzudMxmNuuOY4X3/DSZZGJU8//QSXtrfRJmPf3n1sbm5zbusCK6t78A68CzSNxblAWRSsLK+ytLqKE0xCtOGisKjRnlyJPHYTdeeCE4WHunY0rcf61FCkp5skiECmqGzwKUwcekA+hfcp8YQBI01SJFnUpX/kPAmTS1huiGFk7FkaOzX2OG38bo2KcId0bHJIhQFBRYgjXdPCEu5xz2jgIObNrcd6Ucvw1qF8IMtyvA80oZVs5KD2NNVZd+fqvkt8eg9xfLX05ywKtPe0Toxq27aElEr2DlyfjLvMmLVNy4WzFwfcLcl0JB6M0ZHPEiWAVJSGThpeIv6XQhR6TGhgeHRauPTUBB8fpEutseJ7tFb4qMtvncOmJsADqkaiBSilaK2Vc2s14IqJYdABCm04t3ERo1K6WZo1mCzDeYdJSrhpgPvIqw+HuvvrM1GEVErlF6R8U2cqYzKUNoSIKDkfaJzDWk+n/aR0Rxju7Wj0Cr1kWK2r2Z5JZxsd8cAUR4bke2kxTMNwJ8RJqKLRT9SP/j09QO+s5/Qz69y4cxUH1/bgfUqlK4qipCxzyrIgL3KUokuIeBe1pxR467EudU3KpW41Nnk22mCd1PklHAbV0lo5z6yqaOq2M2ZVUxMUNE6aYTgrbpULLbYWJv3a3j2cOH6cm6+7lv37Vrh47izbO1NAPMBmNsfojAMHDpAXI0IA24h8elKA7UMfj1ICuGcmNs5QOjYUSbW5MlZtY6mqlqbx+GA63HgYng8rF/o5sYhd9kkQNZCjvxyvhMjHo49IunU1AOsFMhCjnDZmrTRGifej4wYo8xecYSGTmRyPBXqHUh1+nqCJhKeBfF5phckEmklNikym4hQVGkpuhomKYUjrYwJCDFeeFZSTCXVjwTpcaLDzupcWcp5BaeblxqyaV3Ewe42tbo8JdDtzj+/G3GXcgvpuynGxRFB7mKzvjEJgYcCUFiJoiAKP3QMLHtelYVU6Q/y3fL8W0pZMgMFnE5SeHpTJMqFPoDo3NpENzcLkSTtRb8pU3NUgeW46AqjJuA4B7USPSAbPCum1KDuDknlNUAHrHWAw3VqQsU6IR0jDodKk8NEDWLhUBm/EhygGGTeejo+lZPxcl0zpJaZTBkwazHgee+YZXNtibUtrBcccj0aUo5Isk8WuVb+oQvCxZEV1pTMQO3HlOZ7A0tISRaHYnm7TNtIVflwWeKWYTqfYVigWjW0jHmWxXtoHJ/BfKfGAd2YzZnXF3r17OXXdKY4fPUJm4OmnzlBVM7KYfW93RGxwbe8+llfW2NrapiwKcikjxnvPzs4OBAW5ZryyRKalFCnPNJmRQn434IYBWBeY16m5LdGzlrBvaKyGPL/k4Qw5Z8Nsr1aSUBoasiGNZve/h0mV9NrQwHWZTiVdyFUsZFdpE/NesEsnUFF3vWqRSL1wjfFeDL2og6wfkdIP3drrjyTdZTq14Q7I7f6tlO48SokESiblhLbZQQGFKZiGGURCcqeeEY/LlWb90DEeHvFGB2qoaeEA7F5bIV1kSHZukMlLm1AkAarkoynYvLDBS26/nc/efXd3PeL5sPCQ4tXKOeMlhwhCC7jd7zDpqnpnJ4aMSrhh0h4nXnV0xxcmkCJKRg9qKb3v2s0PvZwsyxdqIFEZSvnuMyBhowqQ57EwN2mcBQkEBiat2xRSOcowZPTd6yyE8yEaqF4cb0AhSZ/1uzwziPyjnoYyU4qt6U73PqUUblaxM69IGTcVvcCO0JnuwPfd3lMNp2TgsuiZCs9P6yx6ZA3T2Zw2erl1XeO9p2lb6qYmQJe1zPOC4C2ZURzYu8Y1J67mmmPHGJUFWxfPM9/eZHlpCecD9axidXUPR1f2UDct586do25airLEIMmITGcCBxEz1XhMFshy8coyRdcbI01f56GJIWbbeAIZSueCK4cuwLviMVSIuKzcLPRGpJvlg0V/pZ90dB5U8sqGAH6cRzr4y7LpMlfApOJ170FZtJYOT14n2AMUIlOlUhldcAQvmU5RXOq9LhPx97YVQmymlZSL0Rs3WV6hu16lpFmRw1MUI4p8hHNbkQKSo8gQCNaDVxidd/d4hULzKxmy+Gu1+IiSQesMmQTU4qJ2H+mNyPDMwv6VLsmpmcgwazb870LlPWkA4s2j4nX1ix6l4kPq60QTvuCdhJLaSEjoIxs761LjcpYFOxgYeIL97iU9EASEzExGkUvplMn6kqzkAZrIdPbW4mwLQQp0R0XOqBDOU/CpYFzA0be//We5+57Psn5unR9/649LCBwkTGmtlYXeNhKK1YItzauK+bxmPq949Q/+c/7rvV/modNP8sEPfYjJZJnWOtrIffupn/7XfPXxJ3jszDPc+f5/izIZzvcCk84HqtZSWUftPJV1VNYyaxq2ZhWXpjMuTedsz2t26oZ5Y6lqx7zxVNZTu0DjwaIJOkflJZXzTKsaneeMJksEY9iezbi0vU1tLS545k3NvG2o2pbatjS7yLGz2Q47Ozusra5y6003cd211zLKc7y1lEXB0tIKWV5gdBbFAzNaa5nN57TWUpQlCk3qelWWJUvLyywvLzEal6KEYTRFZsiMhBbC5RMwniCUg7puqKqG1oEiQ6ucxCfc7ckwmMfD+ZxC3HR/YeDFia3pDVmicHwtgzakuQw9N/nx3XrQWnfJH7WwviRt4JNxWjCivW101mIjzy/Vx3b3EqGDPmru6SgdXhZcvz6TMU/fpySJoFTGqJxQFCOaxtLWLQq590znmNg4e8hxu8yYaaU7Tyl5Vb09Gwye6oI9+V8qZQrJgtN1Rhpk/DsXkhCzolp+lOpLerpv61zO3V7Worvb7U5BcCGfVBe8qIt2DzP4LjxURD5NSGDtgP8UH0byYDqWdkjh2dAbSViX1Kr6EBJlpgv1fPyFNDcR8FjhJZMVPHlmKHIjKqU+dskBHnzgAX7ix9/KX37+L0nekbUyieomGa6K+XzOrJozr+bMKzFq3/Kt38qP/cRP8MpXvIIbT51idXUPP/eud0ZjJRUU//D7XsnffeEL+Z+/4ev52899Lj/+1rd2gDFakhIuKFrraVpH3Via1tFaT2strXNYJxw66wKtC9Te0/qADQqnDEEZlM4wRUExGqOzAheU0FLQoAwh9VBSksWqbEvjLFZ8JZTRmMxIeZKWnXxlacKxI4c5eugghTHMptvMpzsYpclMTltb8rxkZXkPs1nF448/wdb2lOXVVVZWVhmNxxSjEcvLy52q6mhUSm2pVhgDWS5cLhQdbmhjmVvTSmlNVbVYB6iMQCYlTqgrzs8h2383+Tu9P216Q74f9GVRiXi6WOC92IOh04YbeuvRM0vn2m0w5dr6eU9wIpYQn46sZfl78BZnG5xtpCQQaealgvSsLXJD8BbbtOCFrBuHUXAuHzlmUVmGKOutBmKnWhvG4wmj0RhnPU2TyueSiyT2Zdgw+gqy2aYPZYA/+MQned+d7+XP/vQznHnqKe768G9z4MCBzkqH4LnvS/fy+te9jrt++8Oceeop7r//fm6+4Ua01lx/3Sl+567f4fTjj/OVe+/ltXfcEQdemr/+41e+kv/+Xz/P448+zJve9COdwVMdJjXE2tJ/FAnwv+OOO7j3S1/izFNP8aUv/TWvec1rIlgug3Lq5Ck+/J9+k0cffpjHTz/Or/6HXydEDtdkMuZfve1t/PHdn+OPP3cP73zPe1lb2xvdXVnUH/qN3+SNb/kxfu6dv8Bf/Le/5nN/+d95/gu+mSzLOHLkKO95/y/y2c//d/747r/gtT/8RqwTvhtKSl8OHTnCf/6zu/mnr34NMTBER5KiNNRoMAryzMTwzEaA2fKhX/01Pvtnn6Gu5kg1QduxpJu26X/i71onP9Zavu+Vr+R377qLv/qrv2Jza4t3/NzP8b0vfwVFWRIIvPJVr+JXf/mXefTRR1lfP8t733snr/qn/5RhHawHHApMhi4KTFHglKLxnjbIa15JzaIN0FgXm+IGWhQOpJ5Ra9AZOsspxxOKoqSqG3bmM5RWUZp5BaU1TSsZzrptaKxkw3yI2KsW/O3gwQM861nXcezoEay1nD+3zoVzZ7m0cYFLGxfZmc2oraVqWmZVhfM+6tF5dqY7zOfSiFlpxWgyZmV1hclk0uFYWvULUOS3Y52vc9GbD9jWU9ctdd3iXJB1E6ShT/LMdpeCJUM29J6GRqooCsrY/Xv4uW6xDgxQMoC7PbMFz2+XwRwazSsrmfg+g96FgPFz9ET5YWirlZRZFUXeVcyk/hqSVLG9EQbpwxC/K1ULyHeLt6ZCiB2pFGWU3FZB4a0TCkjMaorBFTHLr2nMQpCUtXe9QXjZy17GHa99Ldc963q8c7zrnT+PVn1GBeB1r/0XvO997+Oq48f4npe+lO2tTZbGEz72sY/zxS98getPneKlt9/Om9/8Jr79778IrTTPfvYtvOud7+C1r30d199wEyeuujrWdPUPpxu8QcyXsqsnrz3Jz//8O/hnr341R48e4+9927dx7733RsATliYTPvXJT/LgQw9x6623csvNN/PHf/RHhCCh4eve+Ea+/hu+gZff/l285EV/j7379vEv/9X/0bnJ6d7+4fe9ks/8yZ/wd297Lv/oe1/GuXNnyYuCf/vLv8a8mvMtz/tGvucl/4C//x3fySu+71VdFYMxGUtLE65/1g0cPHhQmv4mBYu0U8XwJTOGleWlztCJUqh4mSngFgqFp7VNp6LggnhJtW2xLrbSC4Ebb76Ze+/9Mq/4h/+Ij3784zz44IOMRiOuu+46tDbcdPPN3PvlL/O617+eX/3Qh/jyvV/mxIkTjJcm3Tl8iF5RXpDlBVlZipxy9CC0yTBZQTEaMV6aMJqMMUWBynKUyciLMatr+4QbWOTCdVNgg0dnGTZ4Lm1tcf7iBTa3N6maOVVd49Esr65hTM50Oouh5awTBD1+/BjPec5zGI1KnnrqCcBz/OhhVPCsr69TVVXErQxNY6nqBq0z9u8/wPLyCufOn2Ne1xw8eJBn3fAsjh0/ysrqMsWokMyl1pI5j1k6oRIJLNHWQs5tW8uljU2qqmY8XsJ6mNcNo9GEodDBEBMbqsV0VRO6FwAYhtLDZiDpHKk7UqqGSK8NdeaATmtueXmZtbU19uzZw3g8wfsg+mAxrE1cQzEy4nEmg5SoH0qpuMn2dZghRlYSegxCSGJVRFMRnO39kODBSy+DPtmn6Fsl94YzOTKz2YytrS32799PU4vKxs5sJ8JYHh1ipcsgdrwcMwsLfwDwsY9/nPvuv4+6rnn/v/0A3/3S74ZoqdMJfvuuu/jsf/kv5EZz+vFH2bx0iZe85MUUec7P/cy/wTvHY488ykd+9yO84uWvAAK3v+Ql3H3357jnz/+ctml457vfvWhVd1/PIBZPh/ee66+/nuXlZZ55+mnuueeejrbxnd/xHWit+cmf/ElmMxG2u+u3PizXrTUvvv12/u9f/zU2Ll5gujPlV/79L/Lt3/Ed4hEOwux7Pnc3//n//X9wzrNx8SKnHz/NDTfcxE0338LPve2nMdpw6dIlPvyb/5HbX/rdC+KCpx8/zfVXH+ED730PwXuhNGRZ10TX2UQLcORZxngkEkRt2xC8oyyLuHv6TsolQqZ9SkUPvQD5WV5aZnt7i+NXHeemm2+WBQ5MlpbwIbC8vMx0e5trT57kpptuYmtbmqmurK721RnaUJZjRuMReVFEIrGhHI2YLIlMTha7vQfAZDlKS6g1Go2ZLC0zniyR5QVKG/KikFmjoBwVjEYjVGpqGzyjsmRUjmjbls3NTepG9OhTgmJtbQ+33HIzt9xyC9tbm5w58ySz+ZTp9habm5fYt28vN910E4cPH+PIsas4cPAgk+UV8mIESjHd2cF6x7Hjx1maTNjY2ODBhx7i9OnTtG3D6uqKELSznEwZ0cwPxLAtTsIgTWmm0ynTnR0a6/BKcMy6FXzvbzqG+NHwJ5VttYPP91niRcN4OR62iKuBXI80EBZ8cT6fC6dSqQ6jIxpTE3XnskEYmw6169oTvSIpSy98t44VM76PQIaOSHJMAulzsY8GKekRGQOtpcwLmrpmMhoDUnTfVBVNXQGxnNBIyWM6LifNdimznqS3vr6OQtK758+dpyxL9q2tSQ+AOHgPPfQgRksqu8wkS3TNiRPsP3CAx04/0Z1eG8M9n7sHBRw6fIj19fWOK7X+zDNx1BhkQONNdr+jG5hHH32UH/iBH+D7v//7efe73sWjjz7Kv/qpn+IP/+gPCcCJq09w+vHHY82f71PmSozZwUOHOHfuvPBzFJw9d5ayLNmzdy+bFze6+3/k4UfEJVaa1lZopbjqxAmMMfzp3X/RXZRWmqefPsPy0nJX/G0yI2CnE8ltAdY9IXpuaaI2TUsIsXDce+q6wrlU2JvGThIXKKAVLfeObqFY0HWb7kxZWVnhzve8hzvvvJP9+/YB0kDVe890OmVldZUfe8tb8CHwnP/pOQDMdna67xPOUJTm8Q4bJ12WSRMM61qqao5tPYGcIi8xeUZQvivPKkclRSFEaxt5fVmWUe2IWmmeZ0zGI7x38Vw1RovAoYQvBbPZDqsry9zwrOs5euQwGxsXuXjhgsAY119PZjRbW5s0TkLu8+c3KMoRo/GE5T0rXHPiWiYryyijqSPemGV5bOaRMx6NQWs2Ll3Ce8uRw4dACYZkW4drLU1V4xor/U7blq3tbWazOapYlgXehaEObS7PZCYvp0tyDXblBfwsJoauxENLv0sGa/fvOy8vZq9d42i960D/PJOuSj4KRGqt0cpgQobxoeNlZhpAOlmB7jtbIbiYip5RcmggZkjTvXnT4eSaHi8OMWIKVtSRQwKXdYzNg3BlrHVMVlaYTrc7HLOOyrx1PR9IikcoKh6XGTOhKgzDOun9mBbs8WPHqOuaSxuXOna0Arx1nTaSANyBM08+wYMPPMC3vuA2OvhRaXwQd3R9fZ3nPfd5ImMDHDly9LJJ0D3Qhb/11vj3fu9jfPSjv4fWip/8yZ/k3//SL3H9dadQKE6fPs2Ja65Z+JisexmIc2fPcvjw4Rj2BQ4dOkxd12xuXBLMK36pjQx/IfXJRHvyiSeYbm/zjV9/i4SOWnodBqTUJSkczGZztFIURU7btB1twxiDNpIwCDZJ2FTkRUmWZYzGI6lJrGZdWOydxWTS7UachJ6YqLqssniU9993H7feeispn3zLs5/N/0fZm8VIlmb3fb/v++4aa0ZmVmZV9Trdw+lZKBs0BVuiIAkyCAugDBC2JcCwYcggKEgkIArScLFhwA+yRMsgKIMPhmXyyQ+ibZmUJcgjboIlPlHmYpJDDmd6lp7u6q69MjP2u32LH853b0RV14zhGBRqujIyM+LGveee8z//pa5r3nvvPUIIfOUrX+Fz3/md/NIv/RLKOT772c9x7949drvt4RCHMHSNcveUEIw0NeJY6p/HbQLSXZlE5EVaCS8vTRMInt12gyKQJpr9fouzHePxiDw1eNex226ompbEpNKRedE9KgV37tzm0++8g+1aPvzwA6xtxNq5qRiPS0bjkq7ryNKSt98+Z7vbU9U1z66u+ejhQ1prUUYzPznh/PxcCLveU5Ylt87OOT87Y3p5KcuXiNvYIBKkrqnZ7/ZUdYPtZKSTcTiQp31YikcnUsB788+PncMvgP3H4+Zxx3VMVD02V3jxZx3rG49/pu3ckUkpfKx7iwYJRkQGKC8YlXNCGRqCfMPRRvPoZ/U33OOvCR4oCxujerOFWEo4GDTK+4pgfk9JOfpdKnhQcsNbLZcoBWVZ4qstIUtIUp/8efUAACAASURBVAlI1gaMSlH622BmesB0+pEFvv/7v5/PfPozlEXJD//wD/NL//yfy0FRGhMr4zByOkdwFm87/q9f+VVOTk74qz/0Q5RFTpok/PHv/m7+xL/zb5OmCb/0hS/wPX/qe/iTf/JPkCUpP/ajkgEwAPAc/f38RwnAW2+9xfd+7/fKm40f+nq1GjCpX/21X4EQ+Lt/5+8wGo0oy5L/6C/9peFD+bVf/iX+07/8lzk9O2UynfKDf/Wv8Wu/+iuHE0oLnjCE9IKs+o3hD774Re7d+4D/4r/6r5nO52il+cRbb/Fd3/3HCcgo0bYtd195hV//jd/iP/nP/vNBJWHMYfPpXIjLAtnodV0HBEajkul0Ql9ZksGtQKTZ0rXk/PW/8Tf4wz/6Mq++JnijiaqJ//Xnf57/8C/+Rf6t7/5uTk7m/PhP/AT/+Bd/cUjD+V9+/uf5gR/4AT7x5ptcXF7yIz/yI/z8P/yH8fyMnXDw2Laha2sIjiLPmIzH5Hk2uCIUWcZ0MmEyGpFnGb2fvXMxSSseB6Vks7vf7dis1+RZynw2ITGaar+l3m2lUzImhtYGmn1FsI47F5e8/dZbZGnGerNGG8NoMkYZw/Xqhqubayk83vHs6hkf3b/Pzc0Kaz3ewWa9Y7PZMZnMWJyesVytWK3X+BCYzWbMZjPSNKPIC8bjKVobCAbvwMZNWlW1tE1HZ2WcdM6RpClJmkiHHFzE2z5uu/TiWHhcZJ47q+PXj+2mX/bnuCN72dayV8lofci37KMZu64bNtaHRduBeiFFsd/u+dhohNgECEBvtMJoFc0M1MBQ6JcIErbso0tNPJ9UbwQpzyUEGTdxA/YmnDUvQSvBUtd7mqbiZDFjPCkZj0tOZlOyXPIVstSQpwep1EsDTYjdS19B/uk/+Sf8T//gH/Duu+9itOG//ImfGF6YigdG951M3FIEL3fi//g/+H7+9J/5s/zOH3yJL3/jPf72T/4kZVmSGsPXvvpVfuzzn+dnf+7n+Pp7X+fevQ+PjA/VC69JDWWsx4uyLOXHf/zH+NrXvsq9e/f4nu/5Hn7wB39w2Lrstju+7y98H++88w5/9OUv80df+Qr/3p//88MQ/T/8zM/wR1/6Q/7RP/1n/J+//C9Y3tzwd/+bv/3cpqn/lGWzqumsjZ1Wxl//ob/GxeUl/+yX/wW/8Tu/x0/99z8zjHN5njOdypbszU98gouLC8RBMyHJ0kHiI7Iroaf079E50e79b7/4f/BkueHP/Nk/y9/8/I/y4OkVf/PzPypFNZGQjFu3bvH222+TZ1l/jgDwq7/yq/y3P/mT/MI//kXe/frX2W63/GhMjFdK8XM/+7P84i/8Av/q13+d3/v93+d3fue3+e/+3t87LFziyW0S2U6VZcl4PKIoshhcIcoAIcWm5IWEs3ZNg+060jRlNpsOKUjBO4oio+zpD4m4reSpIU8TsjwlzxKSxAgf0Ium79atW3zuc5/jzu07XF094/79j9jttnS2YzqbcHq6wCQyPrZOOqmmaViulqy3G5I05fzWLW5dXFCWJbPZnHfeeYfLy0uyPMc6R1VXWNsxHo85XZySmgyjEoxKBMz3vdWRXKxt1+K9w2g9LGQAtFE4bz9WtI4L1YuA/cswsY9TKp5/vNitHW9KrbWCzfecsyMKh3Dj5POx0epdpFv9VvUQRKx4kdLRU6+ex8l6qyvB2+T39Mnm/ejZd4ny336gfx1Bc4f3iyPLErq6Ic9z2qbibHHCaDRiVOacnp4ynY6ZTEaMxyXlqDiUiBcP1t233g6rZ8/kqlDwy1/4Av/yX/4rfuqnfgoF5FkWtxP9Gzxs5jQBgxfdoOkj5kNUa8rV4QPxIo5bHRj0jKK275lrxzXsaOzt+Rn92EjvOiknmlbI+lYxdD/Be7n485xxLKQm3oGSNMGkInESy2t5J/qFFTT97K+FoT9IlOLLck68t3QkyColkh7p7Bier9Qx7ygc/gTxUTNRXpVlGVme0dStxNhHHpCKd9teFmWdpW1aup50GQ6iY/ldDCf3cFKpniuonrtphRB6TyIAiizjlYtLRmVBUeTiAdZ1VPWe/X5P27WkiWE0HpMYI0Jy6+mcdLCv3LmLVoGuayjznLauSaKzR1NXBGfJUkOiFU1Ts1lvqVtPa6Ujmk4nvPX2J/jkd7yNd5YPPnifpq3IspS63jOejDBasa925HnGYnFKqgpWVxVPny3ZNxVFWVBORngFTdugEsNiMSfLMk4Xp9y6dYvpeEyaJBRZRp7k2NbSVTXeCbVgu15zc3ND07Z0PvD0ZslHD5/ShASVlVQWvEnBCJTQayGPCbPHxam3zT7OYzgmvObmecLsyzq6/vtfRvlI05TOyfmgEkOSCku+ayts25BpTZFqxgZS5UicRXtLoY04Jms9jOF9cRPnnAjUH1E3tD5MZ957rHdsqoq8LCiyDNTB885EnM4ojQ+H4BUVoouG1pAkpLMFy+2eW7cv+FN/+nu4urnm3W98g7quGE0n4q6Dh+gK/T/+z/+7gpdgZj7KX5BL+rmLWQHO9R8WUckfIm6m4sVObEGluvvgBq2lUjpW9lj8dIwki3Ovd3HDcYyQBTWsX6Xa93UtiN2vOmhFh3aXEL8vPo5OrH4bKLU13rV0723qhqIVjk6moZghrXIIMeTEeXQU1hqTRcsghXXCoRNaRSBL0xiGoaOXVSyEyH2h10maozti13WkaUZZloQQ2Gy3cRuF2MHELIJhc6QUyiQDDqHi6DpIXBSDndPwbuIH0Y/Rrje6U5HoqyQ8YzSJGQk+4I0U0yxLAR/VEwIuKxLyLGVXtbhOgivGo4JUFxACm/WSNDVcXpxjDDTVnn21p6klTqzrHF4ldF1Aa8PJyZzZbEa13/Pw4QO22y23bp3hbIvViciQFAQHrgu4LpDmhrPLC84ub7NcrblaXuGCZzqdcZqfEpSiLAvGoxGjssR3wi7PkhTimB+clQ7rqFuy1or1kTIYncRzWQ6kiRte6xV1t8ekqWzbMMjtur/lglP9BlG2f0oFvJebfPDyIXXOHsfWDuf+y/77RY6ZUor5fMG+rrCuwjtxzjVGkSYpeEeqNakCpUKkVniUC3jtcEFh8gNh4vi39pLpPM+e7w6V6HmlGZGmxHG4hnpTUh+PQmK0eK/10qoe/0eOQ6Kg8R1n52cily0KOm+pu5ZpnpCGFO/FdusYnnyJn5kaHC0Ob0U0eL3tdN8WaRO/1s/AIdCHffRblcPoo1AIqTBJZczqq02eCf/KVXtSI1Yxn/9bn+dH/9bnP/by+scP/pW/whe+8IVY3PpfItQNpXVfkwghdn460OvGgg+EvjsJhyIpzhwv6j+f24WgtabrZMTAGOqmJvEmZiPIdrFvvU3k8Gh9SGdabbcQx9RhMRCTn5yz4tGOIfhA27SUZSkXnXdsdzvh1gRJi/LKHYDTWMy9VtjOxsLdYyFq0PwpJRwqozVZnskqXO42h63ocPLBerdlMp2AUuzq/SDFGo9GJFreW5GnTEYjkiSjqhyp6mSz13RU3uFcR/AdRZExn08pRgXV3qE6jVfQBo9Vii6IMmI0LplOZ8zmEwiWZ0+fUu8rjNJs11vapmY6mWJrT9dZ5uNTJuMx2+2Oq6ePmJwsSIscqzz5qKQsS87OzhhPxlw9u+LrX/064/GYs9MzLi8vMMFQ7xomkzFnJ3PQipAoUJqrq42MSi6gVCKmpNQEr/Ctlw2hNnRNgwuGPEnFgy5EABwIMXA6RFxIoZE6YiVnUhtMovA2YF10ZVU9KB7rhe6XA4fSYt0hds4kCoNgfU1j8d6Q6Bwfn+8DYvSYGjEPCB7jFEYFkhAlRFa27ntXYdKENBXfORtnpdZ1dFXHZDIeusUDuR3Q4JVGZylog9XxfaCFDK4U2igq1y+U9GDQSfAkwRCCkmOpQBcZX//gHpvdmrprGc2nwqfEYXEE5fBHC4CPpzMNn4D89/f9+38BHS90I2sblOpXrocD2/8dgh7GRB+En+OD3P1Rca5O0mixLHSDopCRKckSNvsdzjt++u//ND/993+aPsqu3yKGCEzLweiB1WMMQUnH9tybim8oDLMpQyWNXWUfHBzCQWh9kFgd7oIu0ijQkKUpGbmMmNaCIiYyh2G81VoJkBy1bFkuixCTmFj8pINDBdFeOqIYO+A6R5d0FEVOkefUdTV0WdqIMZ4IiP2whVJa0yWt8KKOb0ZDd3pwVehHXt+5KC5m+OySNCEvChyBq+UNZV3ivQXvh07TxnEkNYZQlLJq954sSSnzEpMorG1o6xofLKNRgfOO7XZLVe2oq4qqbWg70bjaEPC2IzEpp6cnzGZTmqal2u9pm4bpeEKe5aQ6wXYSAp0ow5MHT3jkHZe377BYLHi6WZO7Mo7jKW3b8vTJE/a7MUVR8sc+929IR6k1qU7pWuFcBedJjGE0zujahN1mC0qIpgBV1ZAGscp29hgicIjZbkAFIY/qMIi0DniZCLgioB7du5QA4eJfRgwdOxorD6fpC7DHC9dtjycosfHqrztM/Jy1EuG4D9iuxQVwoTdkGHoBHBplFCpN0AMfUp6QKFF12KOxd8h9UOAUWMCrBBuvK6MMyjBECAags3Vc6QXx8pefNLyXtukweYHXhoePHnCzWYuhQZFxfX3FbD6hN8gN+nClf6yYjccT6s2GLm69ho0HUoG15tCJ4QeMJRwVt+DlgzvEwx38xYyOPmVGk2UZZVEwGssoZVJD1bUYF/Vlw0E7LkAvtNv938PoySB8HzrCo23S8E2xMPYFTql+Jc3BBSB2Nv2HrZSirmqSVMitgUBnu8HbzWhDXVWYKE2RD6YRrAYZ2Y61eZ3tYkiuAlQ8AX2svdIxdW1LkphBq7bdbbGtjJhS9IgmiNFxoNedeksvLO5tmWRLpqOzSA8pxBV5PEa9LrfICm4tzujalq6RqD6twlECuacohRDrnWWz3UGoSExB8EE4WEqcJ9I0xXnYV3usa8nzjLatsbaj39SmSpPnBUYZLs5kqZGmKc+ePqVthHCLVmy2WyFTliV5lpNnOUmaUu8rnj57SlglTM/OKKJLh7M2nmclWhnqukJ5WfcnRUFZ5pzM5vTkzqatyTMxpGzj1rLtOoqyZLtr8AGaztNrBKHX88qJ1dNojjAOhpsJfXd1KErq6LmCr+oBaz1+HMuWXrYUOH746BzcXx0hKHSIXXqv3kA+dx88LqQo72P3Z6Uwe4XrUZd4bjkleZWN6/9NnDb6CBobPJ0XmZu1gS50pJHPp5WUGo8jqEjM9Q6vPCHWsSApxdjQ2xGJrleMNGV5UTcNaiO22oQgN9j4+Fgxm+YjylffYLte411HgnCLcpOQaEWRSRvtu06yJV2UKcRR7WQ8IkkzQFO3jsYGtMlJRyOSLAeTMJvPuXPnNienJxTR5K9uKp5cPeFLX3uXZzfPqHY1ne1oGxcJf5l0ZL0lEF4wKh+wToIt+l5aK4Mj4FW0gnaOMs25dXLCq7cuSbUiTxLKMmM8LsnLAp0ZlIZgA5vNlqZtJeBUQdcK0J/lBVprxpMxxoj/1r7ao4yibRturq+5vLgUHlJdSTFx4mXfda1gjtaxWJxQFgWr1Yqq2kd7E0NiMuqdSG/6k1LHrV9RFsxOZnzwwQc8vXpGZx1pkqGTVDAPrSnyEUVRPCd7kVNaum2TiLWycMegsx1dZ5lOJ0wmU7b7Pb3YeLE4IYlgbRqLadc1TMZTCELqnU5nGJ2wWt6w3VQkJqEowDsl3YzyjEYFo3GB0oG2q+NxsUM3Lek8WWSeG/I04+LyUjZZbRvt1SXMd7ff451IvwCePXuGMYa3336bu995l6dPn/Dw6hkd8N5775EmCZeXlxIEXFWMRxMuLy6ZjEZsNhs2mw1GK85Pzzg9PZUOu9mz3W7QStG0LXXb4ZGuJijJDuicFQJo0msl+04rtifq4wWr7/Q5LkhHi5hDp6NFXv8CJvb/59Ev4X0Ar4KIvhHKTHxF0pV5N2C2xovMDB2odw3GCjbogxPsz0gn1NoOjdhmaWXQxpFoec2uEwdlF5R4+ONIkkCWZqQmZmj6Lm4zY6F14L0CFbBGobzCK9isN1jruH15l7QsuXf/HnXTUo7HUc0SImZ/AM0+VszKJCWYhOmtgkQpytRQ5hllkpEnIr4NbYvralzTElyHIpBGsLnIcgKK1gZar0jyEePZgnw8hySDxPDpz36G7/quf5O8zPnm+9/gyeOH1E1OXqYs9ytC4tjkewm1qCxaabIkx8asPWstCk+SCvZQ1zuappG7YlBondJ6T9CaNMtxzjFKMhbzE95+/U0mqWFcFEzHBdP5mNF0TFpm6FSRKs3NzQ3bzR6tEzyKum5RylCUJXfv3qVuara7TZSftDSNuFY0bc18OmW33bFerSSSr6loW6EdzOdzZpMpk/GY5XLJQ+OxXc50OkEpRdc6QqdpKkvdNuR5RpIZmq5idjLjs9/5WT75+m2+8u5X+ej+A5yHyXROlhX0ttvj0VguCOviptix2++pqoa261B5IWc7UDUNSmnefPNNzs/Pef+De1RNF+GB6ErrJO27dz85OzvDaDlG3gd2uz0hwPzkBK0N+22Fd5LboHWgszX73ZYk1ZRlgXNd7MiOL3TpAoyGrrNsthsePxWb9PVqTV1JoXTWMpvNGI/HtHUzFOf79+9TVRWj0YjFyYKd7Xj9tdfY7/fUVc14NObi4haJNjTVDrylKHLGowvm8xlnpwu0hpvrG6r9jtmkjAEsirquCUFxs1xxs1xRNZbWRTDcJAQtx6h/xGHgaDrwHFZYPNddhZ51P0wIgkF+HLXlCPb49l3ZgKbH16Hia+hdDxXi2KucuLc21mK84GYG4ZBVnZhsGuUkPo+ATjTKQ+ui8F55yaA1ilR7cZi2IeYSaGzcfhoCVnmME3aD8CodZihmvWmoj2lsFm80TdPiA+SjEWXbELzCWkiLlCLXeOWHDIFvWcx8K8zqLEkospRJWTIpC4osJVUQOmFkOw0kiaQN63j3ThJWqx1dUHQBMIa8nFJOT0iLEq802ajkZrnki3/4B4Tg+fCjD7i5eYp1LfuuZr1eUu33Q1eh44fto35Ntm5O7EgivDeIeb1HaUkNEtzqwKFx3lNVFda2hKQQbkwi+QXOOZR1GDQmCWRZQlFm2E4kKsYc+DTX19ci5zo9Zbvd8uDhDdZaTk4WpFnKb//f/5rVagUhUJYlRZGTZRkhYkVGaXbbLffvf0TTNJydnnBxccF+v+e6XlLkJZqE1jY0TY1OSspiDCgePnpEmmTMZicUV0u22x1t14qLq3d0lcU1HaOyhCB21QGxHXddR7XdUZQlLvKAJqMR8/kJp/MFeZoxKgtA4xHKhzEG23aDZMpow2a9IUkTZtNZLMCWy4tLXn/9dbabHe9/831s55hMRjjfYXctPkioSp812o9MvYeWc9KxOm1kGZQkjMdjlFIsb5YxY8AxnUjRr6qKNEm4ffs2WmuWNzd88MEHZEVOMR5TTqe8FovZo0ePJEowBOqqYr1es1icUBQZSsFuu+ZxCIxGI2bTCYv5hPXqmq5rZTLoOpROWG43LDcbdnWHSjJaa2V0sl5yMpExycVORseteo+ZDRuaIAXMOTckivcYq47HWL/QrfXF7Fgl8NJHXN7YthHowvSbezVI0+RasHgdbyIhLqeUIlGyQAqAi7zHzqlotZOijWwk29jZa4lIIjVu8KO0KBKVivuyRsKS4/EQuEngFxtx3mA8Xgm04pUZbrTjyYxiPKVpO3Z1S5IJ8bdtHTqRu4V/oQP+eG6mls1lbgJFoslMwNDhO9F5pYByDoOLnCFNkqYUSYZKMzaVIyVFZwobFF4ltJ3F6k4Ijc7y/r0P6LoGk2kCDq0huI7ldk0dWtpGVvsy28s6t8ea+hFDzGEPjHppdXtsASRAVdFb6/o4elXVntL0yU8GlMY7cZ7wQdbEaZpQljk7X0Er+QNJmqE1FEUeGeKIHKYoOb8llki/+7u/y8OHD7m8vOTNKKN6+PABNzfXZKlhPp/Ttg2LkwVvvfUW3nt22zXL5ZKqqijKHFtJIEueZVR1jXOOLM+wrePxo6csFqfMZie8+eab3Nws2cdAEKMFtDVK5CrOOkKwGGUo84JxMWI+ndC2lvV2ewhGRlKtx2XJ5fktbtZb1psteNlol3khKffe03Utq9UGYwzT1+ZMJmOMSTk5mXNxeYfxZM96tZVOKjHYusUYRZYXKBXY7jaE4CKtQ/zLiNvn4AUSyPKC+WLB+a1bXF9dSXJTkQ8XuDgnyKIFpZhNp5ydn3OyWKCj0eP777/PvQ/vcXl5wfnZGbvNht//vd8lTRMuLi4o8ozTxYLZdIpS0Y67aQiuI8tEZdFEo0uUoo0mnHVn8WjRaAbBkENcmvQFJ8QiNgw//bjIATMj4pSHr8i5FLQGffDyP/6Zx0XtxWyAFx/edyhleoQuVjnkeBtNkuf4zornHAqlPC6AilJGr0zsrgI2aLkGtVyLDkMIRnJNnXSvnt6TMBAUdDHyToTm0sVqIoUpNiNKh8GNo6cIoQ0kSpYnBrIsx5OSJBkqrlN8gGbfgPExk/bwvj9WzG6dTgjBkygVcZyAt7V8gLahSBK09+jgCEqjvMYoj4+e4Vk+QWUjApp909JYS7vdEqqaoGBX7amqHUmekoWEut6hgiPPU9IkYV9VA9ta+4MNUf+h9hYqiVaDXZD3KXgvFAB3WF8/z1cT7tZut2ec5NixO4SPOKAFZwKkinFZoFLFnr24WZiUNN6xZtMZ18tr0sxw69Y54NnvdixXS95//z0JzEhT7t27J572tmU0GjGdiF/W66+8SgiBb35TNJJpYphMJgPOtb7ZkmclJpEIPo8XcXgI5KOCzWbLyWLBK6+8ymg05v7DR+z2++hmkZFqyQvddTuauo5RYI7RdMx0MmG93qAQLlzdNKyWK1595VXuXF4yn81JHz2K/vziOzWZTBiPx9R1xdVVzcnJiaSBJ0kMadFc3yyZX99wMp8zmowBz3Q6pqhSbpYO51q0VpRlSV2Le0mPkWmlSbRBaekku66jih3U1dUV6/WaNE0ZTac8e/IU23WMyhG73U5oMErjuo7NZoM2Bh+7m/nJCdPplNl0ymuvvMI77wSWyxvapiXNEqpqi3ftUahthw2KDCEI17VgtjpNaOo9LsC+6dBpIfiTlklEGS0LMSU5FCq4jxWgF7upfpoA4Ij42hsNHj//ZUXtWxcz2WyLs7EGHSPaIsEdK1CBD4HOe1p/iIHUKFKk2ARlJIc1Pk+hSHxcAniFSQuCFetMAK8lrIcobarqWkwmVULPm5TlU4rJNO12LwTc4X3JNWuBxEGqYb3asK9aLu9e0jjH1977Bq21TOcTlHFy7mgd6WHfopgp36KUzNgqWJSLy+TQge9oqoo0MaQE4c9oLZyoumFvd9TMKIoJIJvJfd2Q5hneduybSqxI8CSupWsAvOTiOclh7BnMWItBkeSZANZW4sCc7QOGhd3urcXGZOveRsQHJ4kwBJq2khWxF5D+0aPH5MqwmE3pWkuSGkxqBiyuSFL2+z3eCeg8ncgmLoSAMlBVO05PTnDORR0obLdrnj19wuXFJaPxiBACTV2z2WyECf/KK3jXcf/+faajMePxmOl0yna7JctStNaMRiNCqDg9XaDQkt7UebrWY62kVtldxXK1wQf4xNtvM53Oaa3l5mtfIxBY3H2Fbt+wXa3Ej46ApdesLgkERqMJTdcwm5/QWcdkMqGu9uz3u3gHlG2u7RqM0Wy3G6zr0BqS1Mj4FS16skwCmvdVxc3NDf/un/tznJ4t2G1W1HXN9fUV1nUxzSmNo6uibRPBRLwjKKHvpNE+PU0NVVVxfXND23WMJxOJtIPBWNKkCYvTBQR4/OQxIQSmkwllWVJOxswWJ6RZxm6zYbNZs92sUSjGoxHT6ZjZZMx4XNJHAzpr0YliMpnIRrOJdJgiZ1u1VHUbIwIdeaowaQZKE5SOihVJO3e+lzVlhy7tCOgXfEzkVr1jx8ABjNefd24wNzwOCzkuji8GlrxYAL33QrdRStLKYyB3CHHU94jLh4/JYFqD1tgIMWRJDN4OjqBlY24jzUKniYQda3FNkfen0Ukq7i9dSzAJmCTmZfqo35QtZVu1oAzWtfFaT9BB0XWtUFeMLAM9ktp1eXmJC55yNCIjUHcNaSZNjNFmSFN7aTELXoIkbPB0LWSJlqQaBUme4FuLMaIOkI2UkmBY52k6qEODaizo6OXeVDS+wwO7/Y7GdhgNLoZ7ag3BKmzXCkfKd7L+j+4S/YcwbIPiyllwMnHrOEg5oPcKEogixlahUEEKxHq9oZrvcV1vYSw/xyAfeNO0BNvhkDE0eFEmaJOSaUOz3wnlIBFSofMt3neApSwTiiLno4/uM5lM+OQnP0lV7fvTjvF4zGazAWS0mU6n5FkSO5ZaOtsiA69orcV46YzF3VQIolmWcXO9xIWvc/fuXc7Ozri9WfP02TPWqyUmaOh5ZKFnjUm7r7WJdjoz6qbhzTc/QZqmrDcbbm6upVPtGvG/Tw1lWWCtw3Y1SmtGo4KqqsWeyHt2+47lasXlxSVBwZe+8ke8cnnJnbt32KyXLE5PJHdTiX5xt9viXDrQXdrWYq1EF3oPWVagtRIssG3ZbDY0jUihZKwU+kyWZbzyyivRs20zSG/2ux373Y40zzDGRNxNKDFd17DeLBEbaIc2t5jP50wmI9q6YbPZcHX1DO88s9lUHDBS0c8GFfMwdYJKM5TJhZMVcdleiP2ic+txkYkEo4g9Hkwa5YvPh5h8K7b/typmz39vxCMjftb/YqGGyLgXg+FkTNag+oKm4qIiSQiRPuSRTk45CRJWPSFeiY1V7/Ua6PExdThuzonVvAso09O5pEsU7inQu0lHmhEKVJLgrcZ6hzEp+XhEVhR0weG0l3EWoS4dL18+VszAwQAAIABJREFUnpvpepEo4ANdcHglq6bEGMShPrauwdEqwOu4JhVGt4v+4SJdcjgX6JyjaevhoCYmI00ib817XOdoQ0cTarq2iye5l4h67+OS5iDW1ZGC6GxM/vaxgj2HL8Sjc0RC7GIIQy/2tVYImCqVUUdOBoOsERSNlQsujbuT0WgkC4jMUJY5zleUZcbdVy7Jspyu8/zmb/4Wo7Lku77ru5hOZZVcV1KcnXMSNR9PtjTLBlfN/b5iNppJknUvUzJ6sHx2znJ1dcNkOkGhWK/XdC6KuudT6qqia1p00Bgt9ucDQVYnKK1oqxa130mXo0SC8vjJE55dXTGfz7l95w67/ZbdZs1+vxP/r0awO5OkNE3vhZWQpKkoAEY5SZby4OEDPvH6q9w6W/Do4X1ubq7RBrquIS8ysiiwF/5XoGsdXWclsKLzGJ0ymhQoo7C1pW5qrBVqTq9acN5zdXMN31Qs5ifS2So5Rqfn59Rtw3K5jPZJFQ/u32c2m/HWW2/yxuuvc/fObZqmJssSmRZ2a/GqD44iFxVHXVd4oYVjvQTQNtYK9UIb6Tp6YXc0bgw9GK2IyeXPF6DBGktFeET1xgy9giY+7yVcsuMt5vEi4GPja9+lJQblxZU4glH0GKXWhtB10XFaCkyvElDxZ/T8RB+CvJfQP0/eQxaxtaB6uoXkYyiAo0JtndyoALwX26gAQ17tgOcpMCaGCCGXcds1bHY7WttFpxqNb7uIJ7pByxz8t6FmtK20uUmaxAxLL4k+XRvXtRqr4s4rQGaExayCVHwPuNARvOgmlYagAiDdS5+tl6WGIkkxiRLzOysnTdu00XQwHkTfj4+HD1k6R9FfSjBCGFbdzlv5EJQSEh7IWKrEsibPc5QSZX9dN8LjKjK0CwTnpANVGmNSgRmMNDpJkpBnGa+/8RrX18+wQTyVrBW3+9lswmg0Qumcz33uc4PV84MH92mamlvnZ7z99tssr2+4ub4mTRP2+z15lvL48WN2ux3WWiqdoGKISAgKlJE7KArvgqQ/mYT1esNytSQtUqbTCTqBer+jbWuMSiDJBtWE8xKUon3CdDZjX9d8xyc+gTZ6cE1dLpd8+jPv8MqrdwhYPrr/Abv1CpOknJ3NQWm2uz11vafrLM5XnN+6xZuXr7PebHFbEfn/wR/+AbkWEP3V1+6iVGC1WmKdparEm208Hg+Ym9aJdPpKYwexvnTU2sg5opWSm1AInJyIg8JqueSZtSxOFkOAzcXFBefn58xPTiJlpmQ0KimLgrZp+J3f/i2+fjLj9ddeYzIZiQxrPKLrWjbbNXXVUNcNk8kMk2TUjXyGVd2wqxpa50lcQCmL9RIyLxDEIYNU9aPdCwVGbsQMxawX9b8sUenFsJP+8f9Ny+gLn45sjyBFC492OvquBnwXtcOdpfMerQO+77iAgH8uFEVq1CEz4PhP/7qOx2nBMonHQ8rkkH4Xx9d+LQCensitvCzutJZusGpqGicYp0oUXdWC7pcsEp5yHLj98c6sQ7RiQZT21nZ469EkoIxIN0BCBnygM4g4FUUTYNvtaZEPtKobrG0I/QYDWbGrkBCMbFaUS1EWfOtoW7En6VW2Id5det/5EAI4STVyiHPs8CFH0buzVjpHpfG6JxBGsDDIqGdMQl037BLBy8Zxo6aAZreT9XIWlw0+jtkR9H786DGPHt9H6cB8McW5Bm08Rnucb/nau1+nKAsSbXjy5AnOOabTqXQjR84GdV1Hd4ImEkdTRqMRWHHaFMxLD+87ROnMaDyhbio2uw2z+VRwo8ePaLqasijAi+jaB0kUtzY6jRpNEqUzSoupYTkas91uabqap1dPmT2ccnX1lO12SV3vqKods5MTXn3tLtPpnCdPnvIgTbi6vmaz2ZDnKadnp9yslqzWK8ajMdZ2lGXGK6/eJQTHN7/5DZbLGy4vL7i4uBD3iaYZbGKyLCXLCowWd4a26dhVMsobbSiLEmVk5PBOzoX5fD4EgFzcuqCK+QDL9Yr1dkOe5xRFQWoSZue36LqGvbW8+tqrjEcFwVnausK1DQq5mLPUkCVjAp6bm2vSfIT1mt1uT9N21G2DC+IoG6yXG6WK6d2xoHnvhOHuBXzvL/bBiOG5gnNUyJ6rUWEImj5+7vGjXwC8WFiGYiiNlNzsQ0A5GZ6cC0MCkvL974l60dCPgQOSIwVIHzuA9L8jIMwPkSIpWe3LyEigbWrSJC7qjMAbqRG2nQsOxZE/mndRPSOv3UXHmKLMCHis7RhNRmJIOrxGHwti4PjgfayYWZtEMFwKj+tajNJMRmNGZUnbVOgAtrN0tsM7WZfqAI231M7TIfO0tV2MCpNWMjExqzB4UQ9og9fSmbnOYptW3AoSwbt8xCR6q5ShBfaSoHN8R5MLXgz6gpJQ1sMHGp04VAz95RDZ1nXSyRltMElGqC3BWbrW09Kz5aU7BcWHH77PZrdmOhsxGqdkuaa1mtX6mrpuefTgAZPpKR2yUPjkJ9/m4uKCb3z9a3z5y19mMZct29XVM87Pz7Fdw0lcKIxGI2xjMbrDJDK+WB+JhfG9rFYrlIHxZEpRlGy2ax48+oiA587t25RlidWeuq5punYY03TUxq13GwKKP/zSl1gsTnHOsVytGE0mWO948PA+dbWlKFLqJsO2DdvNBq01u92arqtpmgqUnJi73Za7d+8wGo9i95pjbcfZ2WLIVKyqvfiLbdYDTy9EAmUI4m2mIunXdo66rVEQuYAJaTZiPB4PneZmsxmkYbu9xMudn59jjKFpGoqiYL1es9tteeXuXbIsZY9neXPD0ycNJ/MZs+mY8ajEuRRrHfvdjqZuWW+2+KCxQRNURmtlPe6CjJi+v3kemRMmGHFx8FpE0M4PI1tfyIaxkgjQx4zMEHpnVuRCjZXkuVDtF8bM4+vhuKhJIVNyzgTFoBNSSpxkbUCbMHSGzkvwtIyHDEUqNdHq3T9PcZIOzKFJBowwvhRCiEziGENndEJqJLNCK8HG4w8U3mbk33nbSQqWSQRzi12eSTV1W3F1/RTLgs61tF0DRs4XlHD4zNE682PFrG5C5I9IoKftOjKTkmcaT0GSCQ1ChQ66OmZDJrGtBWhxrsWBzMtKk2gdMzJTlBdAXxN1fk7FjaTFdqKGxxxZCvsY0qvNc5YscmMULaLnIM/o5SG9kFUOzlHupZPnyAmhaNtWEnacI81S5vMT2rpiv6sl8qxtpDvUmq5NxUrmbE4xShmNc5R21PWa7XbJbt/ynX/sO+k6xWq5YrfbcXV1zc3NDavltWzLOIwRdV3joxFjfzImxqAzhXZysfvOAXYYT1CQZDImP368xuM4PV3QtA337t3j4vyCIislI1RrskwAbBcCXV2TZQVlWaBNS9M1Ivg3mk++9QnOz8+4feeS4E8pRzlPnjzl6bNn3Fw/Zb0RBvx6vcF7y8lijguWZ1dPuHP3FSbTkQi/PaxX1zx58oQ8zxiNRpydn5NlqZCAoxNrkqYo5anrFmtrfCr4oVI66lrDgN0YY4YwlyRNyYqC1Bh2ux2r5RLvPIvFgtu3bw8JR+PxSPh7dUVdy0WUZxkn86lQaoKjrnZcX7XClDKa2XyCSVOurlbStSjpiPviQQS8VUx+DxFo13G5YjAor7CuHTC0HuLogRJF9PdykeR9jHv1cAmBoD+OiX0riVPf5QGC9QXBe/vzSnNkhBh5lN57EtcNWPQA2gUdUfEwXEMRQI9fl/EuSYREHozgzGIyAd5bEoXkZEZDCMEFJWpOK5Eh6oBwO11HMEqcYuLvaVqRAq7Xa+59eI+zppJlUNugU0OaFVIkjTq8rpcVs1XdUOSQmoQuKFoX6FyH2TdgKuaTKUHJxaWMAIxCbjUE30DbDkJqR0DrQPAyrxul0EmCszKTd12HxdI1lrqtadoWn3hUiKZ1DhSWENII+sVVtfxk+VBlNSqLBy9FKkRw3UTLoeACng7ntQSLJAlpIdiTtZb9rqKcNuKCURbYtsG6lq4TrlWe58wmE04WczbrJZNyhE4FyByPCu7cvmQ+n5DlEz54/wn/+jd+g5urGxmHUkO130NwzF97jc1mw+PtliQxfOPrX+Pu3TvsdhtGI8GRmq4ZOs5h4RFE5pJmRrqAEGJ6UyQptn1IqmKz3dFlDqMNWVGQpCnOOTabDav1hsXilGk+47U33sAYw/0HD8nyLPrXa25d3IJgqfY71rEjq6oKGrmxaaW4uLjF5e07XF1ds1ytWa3X2M6yWJzw6be+g0lR8PTpE0I44GXOWdq2ZTKZMp1MybKcqqq5vr5hv2uwypFl8j61lwLQuo6uban3FTUInqZFQZHFsXy6WHBzdc3V1RV3794hSQzXV1cRK8vZbNacnp4wHY9ZrW5ompqPPvqIssgwGkajAg3il1U1tE2HViZa9Ah2gxYMygewziOSYNkwqyCnYBIZ9sTvVUgnNuRkRLy7F/b3ihWtDyPpEPEXn/ctqbFx0dXjYsTvU8TNo6wvowHpYcPaewkSAjoQMzKl0IsPnpDYOyS8dxhf6QszA94MeujO/GDoKddnlqbSfIQgXar2kY8qy4jEyPVMJwsoKcQyegbke1CGumm4uroWrl/TQAgkRkngSizSva3XS4vZXlvaADkBE1n8ddtBt0e3CbpNxNhNg0rlxEMd4qekgHiUl65JpB0R3AOsd0KZQOEQv/LGNtShw2rZkmZotEnwRjopH2xcAXtUojBBNo+eINsMLbZC/f3F99gAEpyiEUxPE2hdS1ak6DRlNJlws7zhyXvfJCtyzhentO2Ozu7xriZRnqyU7iJVsLm55urqGW0zwWSKzjaoFDrXcLO8pqpaJuUZF4szxpmQL713mCB3tOAd89mMIk+5e+cO+92GPMvZV1v2+4qmDqRZge08VSUOAU3bYb3HZAlZkuO9Z7las9vvxTixLGm7jlQHFicFIQieEEJAuw4fHE3b4oKNPlSOJDUsVzeMJxNuXZzR2obl8pokUTy9esats1N0kmJMik5Sggssr5esN1sZ9zA0VcNus6Xa7hhPppwsTmmbhqdPnvCZd74DwgnWCq/s6vqK/U7caReLBU3T0rbdsKEdjXJxq00S9rsdobWkRSkcQKOpdzVZljGbzNBKiexKwX67ZR03l5/61DukSULbVLz91mtyvIHVMpcOO3qxLU5OxCDTW9arJXVt5SZSt2y2LVppiqxAmZx959A6YbfbEIAkk8g8f9TL6HAA2oMVvWCZpQNupaLJpZwCflBeoEDF0d/RL3uQaKQIr3jnZZyNGLRCC98whrMoZIsqLjXiINSzA3r7Ifxhy6oj20BGRy9jqHfSfBlZevWGnyH4eBNlcLohyPWpTTKwGUIsrpEARJJkwmOzIqFCa9BBlAPx/bVB0pp0mpFoNaSWyZib4I2EnuybjtM0R8Vt9d3LW1xdPUWl4pOmorv0tyxmnfY4LChFahKsCTjtaXxLZWvS1mBNgvGICR0agpGxsf9Q+j/q0KEGkAPpo5I/BKxzdK6jDY5Oebzun9RbDh3hYbHdVTrQAwouyIwv5EBN0EYscNQh9diE3ldJ/qRpyq7aU+Q5l7cvMYmRjdWupt7umc1SxqOMrjY8Wy/ZbixtPSNLEpqm4vr6iraZgQps9huS0pDmCZvtluVyg58lJCjm4zFJmnJ9fU2mNXfu3pFjmhhsmUuqz2jMvtoCgaraCfO67nBeNpco8XgzsftN04zdbo+09BIUYuPJ3XU2noAdaWrIBhsb6TDTNGUynnKzWrFcLdnvKy4uL3nttdc4PT3l6vpm+PevPHuXDz64x927d3n99Tf44IMPCMBoVGKM4fr6mvV6Q11VZGnG2cmC2WzGkyePefb0MevbF9y5I/hdnuesViu6riPPc8pSfsZ+v5cx27uoMhAOGyGQJTnBygWdKEOWppRFwWQ0ZjIZkySG1WpFURQYY3jttde4uLyFNuB8R9eKuF1pzXw2YTS6IITA1dVVXDpkbDYt3iuqqmG7vR5i7bzzJOMMFRzWyk3BWie0DCKvyrnIBIp+YCr6kqloc2WECHo8Fh5bPw1RcUfXnRv+q+/IeyA+Uo6UIij5dxs8JghfDZDtaRAJlaEvZP2YSCwzsYAqCP4gfR9siY6uuedfW/ymF/7tZdtWeaoWmoaKGSFRohXi8Cq+fUnEpCyEGL/oQHmHdZYkK4Q14SXlSxZiVrIL2gbKhFSrSJ79Np2ZiitVH8S7XxtNMOIIuW8qUhOLWRBgWoyBUxJ0NGeT7/NI0VF9q87Bn8x7AR87J1yvzskGMsRi9hxJNj5etpbupbw9MbT/I86XUrF7rzHCAUitm2ZwYz07O8N7T5ZmLJc3pMmIIjcYI57p+22FCgGbZuwjAbatG6y3VHVFtdpzs77hZnmN94FnoxWf+fTnmEwmzGYzPvfZz7DZrFmtV3z44T3m8ylpmrJarTFGs9vuxO/LCRu+7iyohMQkaC1e+J3tqJsQrZV77WjBvqqod6IfHJJ3giXEEyRJDNqklOVkcMh11vPhhx/RdZbbd17hyZNnXF3Ja7++WfKbv/lbnJ8teOONN0iSlPfe+2bEv/K4kdbsm4q6qimKYght6ROANquKBw8eoJSkZ92+fZsvfvFL3Nysubg4i2TjdLi4++QmiBgrglM1bTMkYY3H4habZRIcXJZFFJkrHj16RFVVPHzwgGo/YzYZ07Yt3nmKNOfs9JzFYkFVVzjrefrsKdvtDQrFyXxB27U8fPCYk5MTJuMZbdPSdg7fNTSdHwqaj+C8J0SMSD6LEGcGQZe80EyMjvXnANAfA/jPncMv+e/j/6mhI+A56sNAheBQLPtrQnNQFPTX9BEq9rHf63me3/ay1/qiwuClxey5hzq8Zn38sz2YZHg1Q0bI0Ta1bVuSMsN7x36/JUlui7FnsJTjQrpWZXAalPo2ndmwKUFaaKUNOk1xnaVuW/KkxRtpp72Tvx2BVEuUVV/MpLxIJRbtmhAQQ5RROOeGYmaDi5X6cAd48a7Wu1+8+GG8yLbuU296/pnysr1JTIKOHcH54pQ8z8Xjqix5/fXX0Si26yuqek9wGm/FJkb5EIXbQs6dTaYEDbPRjLfO3mK5WfG7v///UKQFb771NvXO0nUtbduyWq344hd/n7Zt+MxnP835+TlNUw3uESHIEkIpTVmOIkHRCpicSMgKTkb4um1oW0sSU5lKU9J2AuIbk6BVItvjICaPkk9gSFJJqW5b0S8qbSiLEaenIxaLU959910+/PAjxpMpeZFxcjLnjTfeIM9z3n33qzx79ozdbsd+X3F2dsbZ6TmbzZa2bZnNZiilh63laDSi2m2p65qnT59yEvWRWgfKMhtkUPP5nLOzM2azGQ8fPhx0lrLl8zRNi9Y6dmwjJtMxOrqcZFk6LFLu3bvH1dUV0+mU1994HaMltOTJo0dMxiPGY9G8il2RnENnp2fUdc14PKYsS9q2Zb1aE0JguVzSNi2zyZzGOqq6o226CFvIeaejdGc474JAGopoqqme78iOMatjAfnx4/jfIlni2xaLb/m1INORGQrJC19Xh2vLh+f/fXgd4Vv/7hd5ZS8n9objb3h+uoqFuutaIRA7B94KTIRCGUOqPPuupZzlMZ/TMipT5tMxm+0KXQp0gw44J95y/ePjIcBKHcS/cbVMkkQbFk/jOtAysnlvCU5Ywr1GSusob1DSwurYjosJXBjE3dZZOhfF3kS6iDqwil9GyvtWAtzjv62z9PQTUQrIgU7TlCLNsFaoGD2w3TSNOClkOeNRimFLtVvibMNkVFKmGft9TVuL7q4sS1rboaJXv8ZgO0dTyyIjjQLx7XZNkiRcXl6w2+0iLysnz4W/571nt9uRxtc0nczovEOpGm3SQZ+X5jmT6QTnHE3bsV5vBFQ2mixLaW0SlQuHk7IoSqaTKSEEtrsdy+V6OKYmMXzqU59hfjKn7Sxt4yBoNustd+9+itdeez0Wi8BiMacoPsO9e/d48uQJ8/mcWxfnJGnCfrdnNBrRdR3GKCaTMePxiHq/G7qvqqq4urrCGMPdu3d5+PAhdV1TliXT6XR4zna7ZbPZCGXAMRhgnixmQzpQv7HbbreSfG4t9+7dY7E44ez8lLIsePrkMXW1Z1QIpuYJrLebyE/TLJdLzs/OmM3nTKdTfPAUo5JPfscnefL0KR/e+5CmbinLKft9w65qaVo5Xwg9FcMInhuB9BBcdEUVwXmI+PFxITtszxn4hi97hKP/81yB6xdCLxSQl3ZQHOiocRd5NMDGn92XzIgG9b/PhxCter7949t2ZPFLqh+Ujp8/fN8LRTOIcaRGkShFkRrmZc5sOsOHllGZMp2M2G2XoD3jsVi4d10XRfTy+PiYycG7q9dC9h+G1TISmjRhkC4hViA66Ih36OFFaq3RUbxlw8HGxwU36D970YWOeMSw8QmHbUpPp5CD9HzL/uKB7dtyxSEGK0kS8jxnVJTiIFrX7HY7YewrSeRJtOH8/Ix627Fe1lT7rZA5E5FPKBWYjsY8vP8AZcT65dmzZ5Tjku9461O88XpHXuRMZ3M+eP8eTdOwWJwMXUfXWTabDYvFnDRNoxttjTGKpmmYTqdkBEzM0wSFs57caIqiQCnJhswy6bKs84wnJcqII2rXdVHaU5AmqTiidpb1Zsd2u0WhGY3GKC2tfds6Hj9+zH6/j5mKFq0M1b5mn4tXmHOOhw8fc+vWLV599VVOTk5o244QPFUtUqMk+ordvn0b7xyP7n/EZrMmBM/p6SlVVbHb7ei6jv1+T1EUw3tPkoTpdMp0OpXR0MtCIM+zIRdCJGcSjCzkYsPJyRl5njOZjJmfzLG248GD+2RZyvn5GacnJzx58pibm5thDPbeDzKn2WxGOfp/WXvPLkmS7EzvMdceWmREqpJdM8BMAxgOljxc8Cv/A//xLs/hLg+Wu1igMdOiRFZWytAert2MH8zcwzMqqwfLQ+9TnRmR4crC7PoV733fENAhDeY8YaeD4/hEUcxml5BkJaVOVWMbbi4LYfBhB7rsSpkEh6pDOvlkEdeGrW7zec4IKaUa4yMMtOh4nh+vi/r1oYuAgwX5lp0xBQNl0j/1+drXUUvLPdlX1WGhfHIPX20G7HzYT2o8rTB5PKHhR5pNQ7c6IjVzB5aFqCQdX9BzbF6eziirlPmgT9cTBI72xrIipaoK8twm+zUNAFVWjaq5lFLTg5gmVGFZ2phJTecsNSqCSskGQVyVsol9rZalr2TVSJlV5nGhpac0f3jTHya+zpl963V7kJuxrF1bdZggtTF2HIdBv892pfnDiqLQ1aSqYrPZ4Ngl0XpJGu+pqgJZ2VhKl5ndQBMG9np9xpMpWVHwuHok7HaYzqdUsmK10YSJo+mI2eyELMu4vv7Carsme8x49913ZEWJ7XlUSuEFPnmhMXlFpfnYO90OliUMSr5ESBsM2UpR5vQHPXa7iGIf4/sOwu5QbUukFIRhD7CIdjH7/dZgnGx8L9CYNak1Hj5//oIQgmi/Z7XaUBQlYSckjhMmkwndjsPH97/w+PhIluc4lsOg3+VkMubh4VE/9ZUk2m3wfV+3pnku+71mQkgSjRPK85xut4uUkthUX5VSRFHE7e0t0+kUz/M4OTlpuiO26w2qUpoPrSzxfSOYLCA1/G5SST58/IXlcsV4PKJn8mTDoabIXq/XLBYrlFK8euXS6/VJkoTRaEKSxHheQBBoVPn19TW3t3fc3NwR+AFpmhHtUqIkQ+LgBF1c16GQFVIKTTUlVQtlryeznsnamNmWpTtljAd2DG5ta2nWP58YCKEjmiYP1jJk7QJCW0BY76YV1ZHya4+sXt/oa2/Y1AwkxJwIOKSajo1tfa5jnNvTtXfgY5PKaL0KoREUpqfcsiwNnDfXpv+kWWkFCqessPKMs0GfSgWcdHtcnIy4mI7Ii4RPnz8adTTtaNTbV8asqqpmFGqkcj3ACM3m4EjZsFJiaUCmZRL8lUmYgin3SqupAFVVrQLd3P4T8ZG6OfdbT6DntueMWX29Gs9zUItWSjUhUL3otEfjsdpFfPjwgKjW2FSEQYBtu1S5BlRayqIsC25vH0gzLfb7+LiguLvl5v5Wqw7Fe/qjAXf3d40qUJIm+IHPar3i09UVk8mE2XxO4eX0B30eHx8BwT7W0IXLFxc4jkOSJOR5amAntmH82NLr9SjLgn28N3J1djMG3W6H7WZPmuo8Wq/XQ1i2YZYo6fV6TCcnLJZLVus1i9WqmbhpoturVqsVnjsiCLTnM/EmbLc7/vynP/Fwf08QhPS6XWQlWSwWGgeWJGw2a9brDWVZMBqNGAwGeoI5DicnJ5ydnbFarfjxxx91qC+1wOxsNqPX62nPudPhy+cvbNfGSHp6316vh1KK+/s7NtsNeZESRRGdTshsNuPy8pw8z7XxzTJOT2a88QPSNGUymSKl4u7unlevXiGEYDgcEsdxQxpg2/pB5boeHz9+Zh/lJHmF6wf0gq4W5ag0rblWLDeCMGYOw8HrsYWu4ImWoTnO+T5nDNoPayEEx5ImbQ+sbQzbx6+LDrUhq4/QrBBxWGeK59dTff5vpXTa5//WfrVnpz3YCqXQ6lBC93Yro+FBrWRlCU2AattaSrHMsJKUvq07LryyYOT1GPoOaeYhJ1M63S6j8biZZ/CMMXNszf5ZlRWyruTIOhxUYPrkyhptb1gtKqV97bqIbe5eA11NsG7ZFnlR6kE3vZC1b10nPbUakb6sWm6+dtHbA3bswdVlb8eQ3WGQ28o8IeskdW6OXx9XSclmsyHLMqJoi0vMaOjRCbqaNrvIcFwbZUQ6SiMIMp9OGJ1M2CcR692W3X6rGVj7PZSQbHcbsjxjPB3heZ5u9kbQMYnn0IQ+6mFBVSmWywVplmA5FufnZ03/YhRrNg3bJJ2llJyentI3ikJ1TkeHbhnCspjNZtzd3VOWFePxAN/3mc81xXSvP6CsKgOI1UbUdVzQe3TIAAAgAElEQVRNSQ0MB31OJmOur6+xjR7kZrWkzHPWyxWvXw8pRUUc7ynyDN8P6HV72sAZaut6PMuyZDrVFcyaqWQwGLDdblkuVw31keu6zGYzI+mmOD2dNyBbgO12y2azptvt8uLykiSNm26P9XpJFGkjL4RO0K/XGzqdLp1en0pCtN9hOx7RPmEXxXy8uubs7JTHxYrtbkucZCxX2oBKhIa7VBW+bWsRZstht09037DjYQm35q9pWu4EmtRQoQVCaBmA2kM7zvu22/MaB6BlqI4f6scFhDriaBsXKSW2afSXUhpqbtusM2PCjDdWmjVTn7euelZV1eT72nCSes21r7cdSjdrUVSNBOKTTWrtANd1tKpepbEQdX7PQfd79sMOs36fbLVmMOiRLlbcbzZg6VbFYr0jzXKivEDu4+bwzxQADn1i39qU0tAK27Y1LKwpI9egWDPYsuVeGy+srnMKM6jHJeHyqALUdm9rXYD2E+x4a9x1UfecHb6AOtxxhNUYuCRJDOxAC/F6gYWwbErT97ePUpA6XKsqHQbajs0+icHW/GPng3P8pceHq4/89MtPAPQHfZPr0iHjYDDg/PyC9+/fa4/KGGjP9+kPBiRpyvRkyj6OuLu7a8LM8XiM63q4vt9U+VAY5SKNd6pDaNA6lbo3cqoT9GXVTMQoipAoXZ1MYpSUeI4WWplMJvQHfYoi15+rKibjMavVqmmyL8uSjx8/NUZmOBwS+AFZmrJ4fKSqKsbjMZ7nGciFoN/vc3Jyws8//8x+v+fdu3esVivev//Aly9fmM1mzGazJi+6WCyYjEacn5+zWDxye3urGRWM5+H5LkWpSSKzTHPdz+dzut0OHz9+IEtz3r75jjTLiBYRWZZxeXmJ5/v89PPPDW9cFEUMBgMeHx9xHIfxZMLV1Wccz6M/dPFCSafbo9fvooRDIRVWklNISPMSIVxqcozD41vQbnxu55lqj6b9MK7/1mbOaBvBdkTyxGtrFciePtg1iaftmI6FNrRDGIodXct4WjE1Y4s5TlmWjUNx7FV+K73TNtQaxlIL/baS/oYEX6DpwbSdqVCloLJLjZOTBaIsqHYRj1efiUIPiaQoU5SoELbWdBWObntzXZf//f/QZ/i6AGBbGpmMzovUCOJaSg5lEoiVHirjt7YqIvXXKpqfutm0xt7UX4SmFKn7xhSAklSmetU2ZnU+Bb7mRz/eDhWhJ2azOY6GY3RAQJZl7ONYP9XNblJClpfkecU22hPtYmwcgiDEcTz6/Q5+EFDKkmi/R24VQccnrwq80MMNXdbbNRJJ0NGJ+zhOiJI9n64/MRgPUQK8MGQymbDbbsmyjI4R6xBIPM9hvV5Tlgo/CEjSlN12z2azodfrYdsO8T4xjxCLqlL4vsd43GEf3QCKbjcgDEPuHx4oy5Lz83OTi0upZEmRa8EUzS8mKKuCJNGh7v/z8Se6nZDZbI4lFN0w4CaNSdOMbqfP5fk5Jyczon3E4nHJ4vGBONqBEOx2EeuN9nLqxP/333/PaDRisVhQV5Y9z9VtMMB+v3/ihQ+HOnTQdE31/JBUsiSOYzabDf1+33h9NmVZsNms2W63VJVkuV6xWCwJw5Df/fXvSLKU//M//keUUpzMZgRBwD7e0xv0CcIQUNzd32M7Drv1FiEclBIaS5hEYHtGfTyjKHS/sb4uR6uEK2MohBZ+0a08Xxeo2vP2GGbUrvgdP8iPjUa9tXt6j4sErZVQN/81oXA7maYLAGbumz83AkEcjFn79bFh+2oN1m2HwmSnWn+rjy8wNqSstCCxZVFWEkvm2IEFRUmy3ZLtIa9yqqowVHJakKUS6ivH7xnQbCteri022ijU7dtWY+JVA32oLb7VJuWGxgNxavXs6pBAbGL6VlAvRLsS8nQi1O7vt7a6zCulNJWlwzDWlVRMeCulVqqulaBc26EsJZYlySuBYzsIy7TzSFOxUhrqsIv3OJ6H5VpkacLDl3scz2U0GaGEIiszBsMB05MplrAbhlitk5nieT7braaW7vV6JGnK3e09JydTknjPaDhgu90zn83ZxXt836coK168eMlyuSLLElxXi99meQ5kgKDT6XF+fs5qveb07JTtZkcUbel0e1x9/ohSOgRaLhdkeYptW3Q6oYaLCDSt+M01j3fX/MO//1/5wx/+jjzP+Q//4T9QldpTm4ynvHjxwqDot5RlQZKk5FnWNIgXRUEQBPi+z9XVFa7r8vLlS0ajET/99BNCCC4uLgDNAluHmuv1mrP5HNu2+fDhPf1+nyAMiHZb4iTGti1OTqZYljD72Liur8PxKNEFlE7IcDgEdFrg4VEb89/89rfYts0//uM/Mp/PGY/HKKWIk5jFYoHneYhMz5dKSRzPx3F0X4ttKXzPJnWFBjXrngywpGZcNlagMQpKPVnA7fnZTvYfe2nPzfvn3j/Orx3ep3mIq9Y+bft1AJgf8nxPztF6fZwra/9+bGjb19FUQnWTKofVrr3HWkC5ZuKoqoqqEFSWph4vbJucSrcqCokmHZM4joUlHNIkMdHW02v4VWOmk4mquRSUwrFtbKG73i2FTuhpC4dEa+TVIvSA0Ts2mCxh4diyyZFRHYB0h6qQvulDheYpXud4MI8LBKr+EtTTe5Fm0Gp1p7IoqMqywZrhKrIix3YUSujO/N7Qw3I88kRT6RRpwj6OieI9buBxdn5KfzzA7XlUqsQNHB4fHuj1uvz2t78lDENub2+xLHj58iW/+93vWK02PD4+4nm+FgpJMjZrXXncbHagNIV0vE/YBRFfbm8Jgg5pnjEajSmKijwvdCUTG8vSbU5VKTW5YJYyHPbJspQ0i5mfziiKkvV6ZUSNLWxbMB4P8Twfz9e9bxo7VTGfXfDu9QvW6xXv37+n3+8hpeRv/ub3hGHIfh+z3qzI0kyLowjIspTFIqLb7fHq9WscVxuofr+P4ziNsag53WuPq8aWDYdDgiAwepgpX758Yb+PODmZkGUJm+2GMAzI84w4jgnDgPv7O5JkT3+gzyGrik4YopTg06fPeK7Hq1dvNPPCxyuUQjNrnF5gOzbL5ZrRcMIf//jvuL+7409/+hP/8O//N25vb/nvP/wrnh/gG4Fq1/dwpaKsSvKyIC0lgpK6yblhKDb9i1rw+elCb/9+HG4+MQyiZRX/wtauZDbvmf8r6vy1VjvSVNOtz4nD5+v3LWP16uutE/lf2YQj43a8SVG1nJ9WIaGpLKqmIFCZ4E7KEolWfaqkIJXSNJwrqqpEqRKngCw31VeltIfcOv6zxqw9WI3BMElN3Tyu/VMbzYShO700+liYXv/aamo3U+jKJ0Z+vmXImqqHVFAz037Dlf5WFej4emtj1t6nzgX0ez2dQ6q50pVqcGaO4xIlG00Tjo3v+gRhF4FNlSRUVcloMqYz6JLmKVES4fV95uczHM82BQIthluWJQ8PD9zdGcGN/hDP89jv9w1gd7lc4rkBd3f3PD4+MhwOeXFxQVlKhNDVUqUMkaPtmJyZTVUq9mUMsVbB8b2QONEEhVmeUZau5vbfbphMTzg7G5GmKbPZjOF4xHa7YxvtqSqt/JSmMdPZjF43JM1SHEvnE6+vPzffged5PD4+kucFg8HQJO8zg9cLGyzcZrMl2uuQOE1T3r59i21rpgXXdRshF91VEOP7PqBDzTAMdVK60Ji9IAwQljCdBD0eHx9YLhecn58xnU61qMx2S1kVDIdD5qen7LZ77u8XnJ9d4Ps+Z4bj7erqiru7Ozyjy9nrambgTx8/Yds233//N1oNKorQKPWMSlU4ZYlTFiglyPMEJUvN/mApTcRY54JsTX5gKVDV097i9hxs/zw2GuZNExE9NSDHx2jP9+P1W3tocDAgTTZIKdTxOhI88dCEdciRHRNBHhcsnvPOGoA2dutvGK/R3HvLMwWp2SPrv9oW0gbl2pq0UQqkFAfCiao0OXHx5KHxNTTDYD1qnFZj/ZWOcYUBxQopNchMgCOEIa3TuPiqFcw+50lhKEOU5lhBSOOECoFli6bptu2KHw/et3IJzxkzpZSuuLbUboSh767zaJ7j0u32WN/ek2YmPxTocn1VSKpKs4wGYci0PyWXJevtirRIqFTJoNvDDzzCIODL5xt++OFfKIocy9JDvFyu2aw3LBYr45G4VKXUBjSKCIJQo8stB8/1OZnOWK/XeL5FkibYjkee6UWlvQBBUehsSKfrk2UF+2yP49qcns6J4z224XuKkz2dro8feDofJXSnRCUV+/0Oz3N58/oV0+kESxYs728oioLPnz9jWRZ//OPfk6YpV1dXBvMlGAwHpGnG/f09l5eXTKcnZFnO9Zcb7QE7DlEU4bpuE1JKKbUXjO6kOD8/RynVFBn6/T6h77NeLqlkyf39vcH29ShNIaOWhqu9O4BKlnS7XXzfY11tcVyHTq/Lp6sr4wkJ/CDADwOCIGA8GjM5mZIVOTe3t5zMTqiUJE4SdtGObq9DmuqcYlEUWKVWFJKV0qpfUlAJRc0+IbE1X59t4Zg8siW+9rzahahfCyVriup2dHK8DurxbG96PEQDRK/Xj+YsVhyfUtU5s/p1/XvrXM85Es+Fn0+OSxs3Zz11NJUuHkoBjjrk0HQ3goUUim0W40nwlINra1YeRaVZaQ8n0Ywkre1rz6yVzD9OTOrX+nepNJuGlmrXlROpdFyrFFRCIGWltQSNpqZlgyz1E01fj/kyLBDYCFuz0trPfFFwSEZ+NXjtAW2Fm609daJcVUT7HVQlnSAE4bKPI6y1wLEtxtMhnheQ5RHbTUwc5diO5qcq8oKizHhYPDA5GXNyNmMym+CFLr5hPXUcB+kok+D28P0AKSVfvnxh8bikE3Ya4KdtO6RZSmgJk5dxcBybu7tbgkCHN8PRkNVmjVKKwrRQxXGMEILxeEqW5WRFgWVpuh7PC4iTiCRJ8X3dMlSVJTm6vzTeax1Pz7EZj4b0BwM6YcB+v2cyHmpNRSk5v7hktVozGIx0s7fxWieTCcPhiCjas16vcBzbgGIVt7d3ZFluVNz1fQ8GA6bTqVah8n1c1+X+/p7b21sGgwGuq2X9qkozZygp2e8jlFIM+gPWmxV5nnNyMqUsc87Pz02OTnctTCYT+v0eu2jX9I8GYYBUgvV6RRTteXx84M2bt/z+97/jH//xvzRqWZUsG3EWx9XSZZPJlE5PpwaqQqcgSlkgihJhOViOi2c7RpFJMyzrrLleNBp2aWldzfp5agxXZTCbdfWwNhh1BbEOnUQTgpqkvdCGUQkQSr8WZi5rPn2teYkFFrbhXjvCkbUtVnut8/z7SIWyvs7PHa+3J15ivfaE2b8egLrCoA55xKpSVLbmVJMKQ70NdZelHj8FVUkpW7k2WYJS+I5bL+sn21fGzLVspOmfVNXTtgzLsQ2NB/qJVJUox+RbpKCQ8kC2JgWWYWi1DW22VFVDA6Kl29HMAzVpHJrapM1R9NWgtZ5u8LRkbRnkNTYIieH1qvRTSTdragpvxyKTmW6hwcfOLHapT092KPKCJC6wLUuLYuxyfFeLmZRSstqu8XsB6ecrBoMeL16/oCxKNksNGB0OJqRpznh8QhLH7PcxeVYwHI64u7vDcVym04mWtDPGfzjs0+32GI/G7HYbBoMONzd3XF9fM5pMGI2G7JMYIWyE6ADC0OqUOJ5HlubYlovAIYkyqhwqIcmSHBB895sXlGXBp08fuLv5rBu9JyOm0wmBa/FP//Tf+ef/+l90Pi7NGQ9GxEnCy5eX/PVf/xVZlpKUKb2ezsXt9ztD16MX3mKxQDvvirHncX52ysePHzWLrnbBkWVBZ9BnMhpx7TogK64+fqTb6zIZj8izjO16j+u4vPrNC0bDQdOHmaQJX66/UBYFvV6H3XZLMJ8RBj7b7ZYvn6/J8pQ8LziZnXJ2doGS4LkOF+enpGnKTz/+iTxPKIqMwPe4uvpIWZR4vkccR/zN3/wNJydjfv7pJ2xgNhnh2TZXV9d4XshkOsT1AlbbPf2wxz7NKHMNJ7EcgSorCpVRCQsL3b4nDD4TqY0OyvCOGY9IGsNWY7xMw6cGjqKg8fw0dYMmGa3wvQBhRFQsai8Qnewx60IHbaKBKOk1ZNbKoRpgDFdrsQljPKQ0dEYHfJk6dhTaeTcdeevX8rAeNUWYjhAMKRCWbWkhGFlR2S7KF5QKSgW2KrCVwlEVSmkWaZQRjpFGp6OqubqfGtmvcWY1ercV1zfxuqWZWhW6aRxZokqdL9CN0DlWE1cr3VqBZo2sGZsUUvMyWUobMstwjRtvzTqy+scVnBrEVxuz4+LAwbjps9UDLWo/0NL6n76r5bJs2yHJE24fvpDmMbKssIVLnCRkaaK9DAeiRPOIdXs9PNdDoq8jjvQ+aZrw4cMH/u5/+nve//KBy8sXrFY6pLy4uNT0OEYUeD6fc3Nza4ChmXnf5uExI452WKICLObzE0oFy9UjfqCrdF++3FIWJXGSUVWSi/GEXm9AUVSs1xscx6fIK3bbiGgXMxoNGHT7hB2fTugR+D67aEe827JZLgDBsN/h/u6B9XpHrztmu02wLZtol5LEGVme4vk+2fKRn3/+CSkrgiBkOp2RpQXr9dZgz0yPYxiaZLxis9kAsFwuefnyJev1itubGw0cDkMuLy/oGaDtYDDA9zwC3+Xx8ZHpdEoQ+Dz8eE+300EZKvDJZEK8j/nll1/I85ww9JnNpqzXG6qyYL1aMhgMkbIkz1M6ndAQUypWqyVnZ3Nev35FFEUUhRaQvr+701XT/R4hK66vv+A4PufzOWUJ2+WG2TzkfHbKahdjKwtHgLAsKiytFlZJ/ZA2C1CqQ3FLHS1+Wadz6jlqZqtSEtu0ESql2WgRlubts0Aoq+Hwk4apoxGDMgauyUsJoYkbRc1cfMjRtdv9auNkio+HvBdPc86NQWtsyKHbADQVoQXYtoNtGThWMxbGyqFwbMuMjaHdFlazpmVlIev7Nh3zSunQXimtXWBbtdl6imz4ypi1DdjzwDzV3HBTHNA7IYyxOxgbC2lJ3YRej9qT0ePJ4DRhbMuIHf/+awnI+trN5dR7H00kHVaEHQ1oVZVkt4tZxxGb9Zp+d8DpfE4Z74nimKATklcVUbRjPBlqReUg4Lt3b+h1O/z888/887/8QLfboSgl//k//d98+nTFy5evGAwGDbYqNu1KcRwzGAwMw2qoOwaUpNvtslgscF2Px0fdFXB58YJ+GGovGUjThM16TceoosdxghB136umPprNZoCWpOuEHWazueFPT7CEVih/fHggTTO22y2np6eMRxNWiw1ZmlHka6oCpidTptNpvSwIAt+QPDpUlTC6lCl5VjbzoyhKXFeLXdSq7ZPJBMfRIjl3d3d8+fKlAa5Op1Pm87mGxrguJycnrFdL1hs9To7raFqhLKMqC3p2D9fz2RvixVJqz2owGKBQ7KJrirLC94MG41Zj2nQeSjQhb5omLBaPBsx8zna7Zb1eM+j3eXF+hu/7bDd7dtu9NrBhj04nZJ9myJaOhLBrz0HrFeg8lNXMtafVejOH65xzazkecr2akbguuNVz+bgQ1jY05mRm/4Mxe7I+W8vuyZppOyvo/LXm1tPY0uPuguNNtNeaOWz7Wtuf/Criav2Trd8POqLSGF39npL1bGwf4bA9a8zqGz6uurS9n3bZtkagCyFI4/irY7QT+e0BeS6R+NyA/aXtODH6/A2ZL6u+n9rLa4oC2tuM9jGTUhIEIUmQkhclRaWpRjw/YLVa8/n6C8ISvPvNd7heQBQnKCV4+eol7395T6/X45dffmmSuVVVMZ/P6ff7/Mu//EvDEVYzpdYezHK55Lfv3iGE4OMHzdX1/d/+LX4QcHNzT5Zl+L7PaDQywN6CKNqTpJkpDsjGg5GywvP1sT99+kSaxXS7Wkfy/PycNM0M6t5q+L4GgyFZVmEJGI0GvHz5krLKNZ6s8hkMhsxmMx4eHri6uuLh4R7QwrKaVVY1PZaxmQfT6bRZEJ8/f+bm5pYk0bi4LNMGtd6KoiBOUhwzZlefP/Ob3/yGv/qrv+Lu7k7Lx7ke0+kJjmPz8PigDVsUadLGThe/UmRZQZbpntvpdGoa/q+xbbsxnuv1GtALL8sypJRac3MwQCmF7weEoSJLCxC6P9bzXEoFSVEiqkpHGXWMJnXOGKUQtvtkfj+ZhkcP5sbgqIOPU6+v47XTFDy+EZ18Y9o/+Vlfw3PLRSmtrlFXPA/h6dNrF9bz56rPVFW6BFi3INYtV0I8Pd5xcaE2ZJrm3tCAUysT1KEzUJV8VYLlG8asnZdql4/h0LdVf6YyDeTtUu5zX+JxefnZJOI3qiPPHee5sjVotlJdbTV5PnGI6QUtw4uZTIa8sdMJcR2fotDI/8D38YKQaBdphsvQZxfFvH7zHf1BjyxLeFyujAFMWC3XnJ1f0On0ePfuN1iWxXK5ZD6f8/DwQJbpfNnvf/+9Qb4LM3YS39eU0fP5Gcvlmvlszmw2J8syPn/+TJwkRPuUbrfXaEbe3t7jej5C6MmSZluyPCHLM06mUzabNbOTGe8/aKPa6XR4+fIF+31Er9djMjlBmsfhcrUmDLv4fofdNiZNCyaTsWatCLRxQmiR3aoq2e/3nJ6est1GpGlOlmYIIej3Bw1po+u63N7e8vbtW+bzOVJKXNdlOBw0cA4pJdfX10wmE4Ig4PHxkXgf0+10GA5GnJ2e0wm7+l5mc8bjEVdXup1qNjsh8HXo3e10sW3bKGJpoeEwDHEc3UlR94UKIRpwLtC0fAGmNUrP4Y8frxBCh7OdTpfFYs16G9HtDbRCfa6JSmWZG31Ww/xSVVSVxa8Bv4/fe/paGS/MbgzaseESQjSN+seGrlkHbYPB1wHRN6uQR85Le821je6vmbLaA1McugVqh6fOxdUeWu1YPLluBa5l4QmFKw5VXVtoun0QjfjKsWf2FZz+uATbphlRSjXgx5o0r+6ZrCW+2srHz23f+nLbnlmblvd44Gsr386Vtbf6y29XcywTv9f76tBDV9fqyqPjug27aZzErDdbkiRlnyYmkWrzuFgSdrqcnV8Q7RP++Z9/IC9K3r37Lafnlyhh8de/+x1h2GG73fHDD/8KCE5OZqZypifq3d0di8VSKxPFSXMtl5eXDPoDbNvmxYsXvH79mv1+z3arDZB+eEhTZdN9m3XDej3++2hPnuf4vofGSxV0Oh3evvmOd+9+g207/PnPP+rwFwvfD3n75jsuL18QhqEJGbTW5U8//5kkiXn9+jWnp6emi2GL7/ucnJxwcjJhNOoThL4JCUNOT+fGWE4QQvDlyxfyPG+81NFoxOnpnNevX2vdUGNklFLc398jhKDb7SEl+IaSZ7PZ4XsB69WG+/sHQ+qYstlsSZMcKbWxnkymDWNHzcTx6dMn9vu96VjYNfMtTVNubm6IoogXL17Q72sGk+12x3A04ru3WnqvLAp83+XViws63ZD1ZoXtaB3ImpNLyQpkZRhna5S7enYOP/fwfs6w1J0zNRTl2dzVN9btE+N2/JOWpsYz6/I5A/nVv2/9Z7y5en22r6e2IWWrNfH4ms0FNHodGhYucLGwBbjCkDc6Fr4jCGz9s96epc2uLWq727/efN/HsqyGnvo5rEt74Js2otYFf+vLfe51ezC+lcOr/7WvpR5cPSksDfZt3VPTnG0Z5XLqhKPAsh2SNCVLUxBgOS7Ctgm7Xa6+fOHkbI7rBzwu12TFj4aS22K/T3DPPaqywrJskiTlp59+ZjAYGLhEYLxdDV+xbcdw5zu4boGUiiDs8PDwyN2dZnatSs3IICvJw8MDnVBj36aTCd1eyHqzaxD2vu+RxikPD/eMx0MWj4+UrS6HTx8/MRyOiOOY5WJNkujezNev33B2do5SMJ+fkSYpWZby6dNH0jTmD3/4WzzPxbYdHMel1+saUZOtQfD3KEuJ57vM5zPiODXklGN2ux1xHNPtdvn555+5urrGdTWmbNCEdIc+zjiJSbMMx3ZZb3bEyZ7lckEU7ynynOnJnLdvv2MfR+RFQVEW5EWBsGwuXrzgy/UX9tG+mQ91U38btKtJMbXuZm1k67lh2za9Xp+8KEniHVme0u8NGAxHFFKyWFamk0Th+w5UkrzSOTDHthGWo1kzxFd+wlcL+Jm/gpKG2aUmD6ApfD2XOz5+oKujNdR4ZEq1rkkc8nFH+/yakXz2+pX2iBpf6Zl00pOPq9pPPBzv6TGV6S6qtQxMhFXvKxSu7T2BqtTb1xRAhq75W95VPahN9cH8az5/ZMjaF/zrX+Tz7u9XLu43nlCHL7dOkB4MWS0ibFkWea75+UvPMfxqetLYIgBlsdruGI904juKIrrdENuxcVyX16/fsVw+sl5viJMUpQT7fUqn0yF0PZbLNf/1v/4Tr15e4nkef/zjH5v7Pj09ZbFYMB6Pmc/nDcVPURjqHqHbeyrDyLrZbLAsi+nJnPVmgxKC7777jqqULBZLXM9jsVgQ7RPSNOH161fYts3iYYHv+c3ilVIShh3d0rTRZIZlKdluFghhIUTGzc0dv//99wz6I05OJqw3OjRbrVeUZcHNzQ0nJ1PefvdGJ+aNIvbDwwMnJyeGH66iLHPWmxUC22hk6m6LIAi0jFwck+cpZ2evefPmTePt3dzccHNzg1KKOE55XCzpdrpEUYRlCbrdPmmqaZBmszlplpPnJZ2OTsrP5jP9kM1y1usNge/z4sWLxsg/PDywXq+ZTqc8Pj6y2+1QSvHixQvyPG88t5q9Y7PZMuj39cPEkJPqSip4ns1+s8O2bcLAwSoVVaJVslzbxXY94iz7i57Tc5swNiZNU3zfw7btxitr71Mb3jb9dtsYPXmoCy1UrNdP/X7L0B45Bqhatk48Oa55cbj+dlHTHFHbFYOpM9fQzpvVYtxKfZvgUZ+nLgKYgoZhq0appuJan6tly57BmbkuaZo2g1aHlrZtNwyi7cRkHZlijgsAACAASURBVGoeN4Efe2g1xctxo/jxl/wcO3q9X9viH3fuH1xb2QxIfV7QX44QGrnueV5zT1ppSWIJhS10CLqLtggBF5fn+L4HQFUVfPz4kfl8xu3NLXEc0+vqRujhUHsYjiVYLR64ubml39eh1v39Pfv9nul0Sp7nvH//nm63SxAETZK8ztusVitCP9SlbdtluVwRxZo+aDga6vuwNJGgHwS8//CBV2/emHtyUUrT8iip2O8LHMdmNBpxcXHJfH7C9fVnNps1AovRSOtXfrm+JcsqwqBn4CGPjMdDM24Sx7F59+4d3W6H5WIFSj/wwjDk5cuXeJ5LmmY4jo3vu+R5hm15VFWF53m8ePGCzWbTgGOn0yl/+MPf8fbtW2O89BgEQUCSJGj9V5s0L5AIRqMJnW6oab1twXA8NgIvOb7jkJcVHz5e8fbtG+Ik5fT0lIvzczxPayt0Oh2yTMNfdCO8nhe2rQG/juM0xqzb7bLd7sjTnJ9/fs9oNAQUZV7Q6/U1wYCrJXy63Z4WyN7FCCRCSU3vLWzjBT1dB+3Qqr0u6u2QlNd6GkqpJvo5Nmbfel136tTr4fC5AzRD59yeqivVl6E/gzFoOlwW1Iroh7VrG0ZbW9ga0KsUldT8h6VU2FbdEfTU6eHofmuG26dQMD2+ltBdFcq0RVpCII2HekwFVm/PembtpF07D1afuJ18bycnlYl324N8/KU997q9/Vu8t1/bryxN2dw4qJUZKNt4apPJxLCKuiiV6cSt1KZfIqmqgsfHBfP5KaPRgCja0+t1CIIhP//8k8m3JIzHI169ekWapny5vkGqit+8fUO/3weUwTDpilqv18N1Xa6uruj3+43IhzY0F02S+uLigngXExExm2lv4/5xQRh2GI9G/PCv/4qsFPf3D4wnY/JcwwRev9YMqg8Pj6bJWfDixQs6HZ8ff/qRzWZNp6PFcDULrW0wPYqylPie7rO0rBDbsplOJywWj7x69ZIoivjTn/5EGIZ0u10tK+f5vHz5UlcUHx7o9buUZclquUJWFtPpwFAmOQ2b683NDavVivF4zHg8BjT2bL1es1wu2Wx08j4IOygFWZ7T6XYZDIeMx2OiaIdSWhz56uoT3V6X87MzpNL9pbsoZrVcUpUVjuOy2+0YjUbM5/NmXmulKItut2vGImW71aSag8GA+XyOknC/f8APfM7Oz9lHO4o8w3Vt7EyQFyW9XogXepAWOI5FN9Aye1lpquWWZbzeoxCwZdCOowzzCe19iIMxq6Oe53LE7eJc+/2yqJrwWRgRYbPHVwa0fv/fltzX57TN9bi20a1UirIUlMbwyNY9Pomg1KFiC1/nE/XxtUGzbK0/WpM8CgssqYWEVf2weBqxPg+aPc5LtTmN2h5ZO1dVfyYwPO/f2tqG7zh39mv71Vv7nL+WPHUMFZGSVdPL5/t+EyIVRd6EmZZrIUstClIVOZaQlHnGevlIkmRMJ0MuL86xkDwuFkTbDbc310wnU8JOSJ6lDbVzWVZMxiOyLCPeJwyGQ05PzxDAoH/Hw8MD3V6PQb9HWZZEuz2+F+B7geZX22ukf6834OTkBGE5jEYjEFp4JQy7GjIhK1zPYzgaYjs2YRhwdZWwfFgyHA7xfY+yKk27UcX19XUT2m42O7bbHSh9TN0WNGQ8HtIfhCgks/kJlSz5T//Xf+bjx4+cn1/w93//R969e4fnOdzd3xIEAZPJmLv7W+7ubijLiulkThD4dDq9ZvKPx2PqKlyv12tETTITjtXqTFmWkeUlaVriByG2nRPvE05PzwgCnfvbbvdaMBaLWn81DELyLCcIO6TJnpubW7IswbIsbm9vm+rry5cvSdOU+/t7oigiDEOKokBK2cyNIAgZDkfcmiLNavVIrxPg+0PCwGUf7wh8F6UqPVeQ+J6LFFBWhWFgtpvw7S89nL8xy1HqUMyqHYt2IeBQIeSp8yErsiJvjqRzx20v6akFqD2x566zWZutY6EOjBd27a1JpYGvHDzB+hrr42haLn1fdThdVzOfGljdKyCFLropDNZND2kTQSlMHq0V6H0taGJCs9pYVFVFlmU4Rm6ufkocJyOb3JUxFs+51U8G6Nhqc6ikHLuj7Rs+3q8d8tZfrG1ruiH9hsBzXbpdDTStGVodS1+bJtJT5GVJFGl18YvzM5I4Zbl4ZD6bU5U5H375WWONbJvRaEhZ5BR5BsbFthDstjuWiwWT8RglFcvliiRJkZWk0+nQ6XSpqjsc28H3A1xXmqS6DnUWiyWqUgwHQ8qiJEszxpMJlmVxZwRAhLC5OL9gt4+4v7/n9HTO42IBKPqDHpYQOLbuOV0tl3i+Tno/PNyTJLFunK8qsiynLDQ7SFVKdlu9uB0XHhc3XF6ec3Iy5Xe/+2s+f74my3KWyxW2/YFdtCUIPIbDIUmy143qb16x3UZYwmG1WrHZ7BiPx2y3W9PfaRljETSpiU5Hi4osl8sGb7fdbvH9kpFwyLMN/f6AJE6xhE0YdsjzNa9fvzEFCRulcpI8J0lizZK727LfbZlOx/R6PZbLJfv9vvGAd7sd6/Wa1WrFYrEAMFCPGZZlMz89pdPpcXd/z263ZTwe0w19er0Om82Kssjp+D55VSFlAUoihARZIatCt9BZPsp6njGjnqO/vhl4w1HICHyVQ6sdi/ozlrQaJpLn1nZtKH8tWvo1wwZoeIUQpm3QrPN2Lks8xZA1xkweOhTaztJxAaOoJGXlUFiNWTSK6yCx0JyJGqD8q57Z8Y1oL6Z41gt67qa/xQhwbIF/bbB+7W/f8ubaP8tSc00ppVsn6kXU7XZ1X5yURjJNNcZMS6JFnJ7OGI+GJFFEVRUMBz0EivV6ycXlJdF2g0Axn50wPTmhKkvuHx4IfJfXL1+SxjFKKna7yPB9WaxWG6JIS869efPWPAx0s3iv1zUVPw3RcITDcDhmt9tSFCXdsE9RlERRrIVQ8ozF8pGT2YzHxweyLMXzHBYLTSF0Ojvl6tNnyrIkCAO2200DiciyjCRJybKcTtghswps22O/T5AyxvNcKukymfbZbNaMxxO+e/cdtu3w/v1H/tt/+ydOTqZ8//3v6Pd7eJ7DaDTky81nfF8z1iZJgixzoig2lcEeea4proUQDQ1SDe+RUjZhf5qmJoRxiKKYTrfDaDI26HrJeDSi0+0ymUywbUv3jKYx0X7Pcr3h5ssXXFvQ6+qq8c3NTdNdsNvt+PTpE0op+ia5X8N0Hh4eiOOYH3/8M0I4lIXEEhbj8YjLizOKPNECy7Kk3+vQHXQpSoVSAtspyUtFmuUG8KnJHZX6SwbrG5sQJg/Fk+jjeJ7Xa6ptLOsHeafTOcCl5FOjqI95vK6eGtz6Xcu8aHcUYCI0SwhkqcV4dbe4FqyuXa5jB+Y5Y1bfQ3tTCgoJeSXBVk0B1jJxZQVG5a0WXf6VamZt+RtL36Iiabu57Zt/zgof/63+/P+I2/1vCTuPt9qlrlSlySCNZmaNjxNKK6rnRUGR503S33Fd+r0+3TBktXygqnKmkxGWBY4tOD+bMx4OePXigvcfPrJarthHOwSCbqdDnmUsFguUUprNIst1rms80ZqjpVYqd10P3/eJoogvX77guq7JqXlUVaqfTEXBZDLFdV0KAxe4uLhEKUV3HPLw8ICUFaenc1brleHcz7i7u+Nsfq4hAkIwGAwMmaHug4zjxCDwE3wvpCy1Qs52u2U8njIajxmOPH7z21ds1mseHh4RQrNTFEXVdC7MZnPGkxEfP/5iWq8q4jijkpoMsD/oo5vidYhZGzOgUdPJsqwZlyDQFN8K8HwfEBRlSRCE5HlB7hW6WmnbdHs9iqKkLBXrzYblcsHj44MG3Cax7pdUFbYl8H2/MVyat00rmc/n8yeV7dls1kBmHMeh2wl5cfkCz3XYRWui7Zb7hxuSRENMBv0+pQRhuThuRrTP2Uep6Uu0qbBQz+S4/q3zWe8jvlrw9dqq3zsuLMABclRHVTXuSx9A/2jDPOow8+kFHD6rtDVr/lDnvmRVaV0EIRCm0ojJudXhY31Ndahs1UZZHAzjE7QCume1UlAhKDnwldWPhupwBl31/DXPrD1QbeWXY8PWBtPCwRU+9syO3exvFQV+bftLSf+nLryN9oA12rvT0Qu5LmbkaYKSUjPNVhXC0snhTtDFHg7ZbZbE+w2j4ZjTszMAiiJDlRbXVx85PTsjS2LCwOf+/o4wCHnz5rXWXrz+rPs5TSjV7XYbjJNSqkGfe57HdrtFKdWMcQ0eBciynLPTM8qqIisipFJMplOWyyV5XjShWbfbZbdeYtnai/jpp1/wXZ+XL1+xNZTWk8mYk5MTPn/+bNTENUwiibXB3eV78rzk5OQU3/OZzWZE0Y7hcIDn+RoCEXZZrdZ0Oh36/QF//vOP9Podzs5mzE9n3N59Jk0z8qIg2ZcEvjYg9cPPdXV7TxAEjShvkiQIIZqQaDKZ8PC4II5zsjwjDDsopfj8+ZoXl5cMh0PyvCCOdU+l52nyyaurK/b7qBGmQZUaG2jmY57nbDYbpJS8evWKsizZbrcMBgNDtx0BcHNzA4DvdbBtn1evXrEtczabR8LA1YUVobBsQVFmVFIHPY6jhYlt29LiPJaFMOT3/19zZs+laNprTc/zpzoZ7d8tDroZtm0jHK0aVZYH7dqn6+b5NarfPPxsGtj1h6mkkZFrVUN1ekc1RuZpAUAe8mXia2ON0t5upTSDhqXQJK4obOoUmMR2WqSPrTjz65wZEilLpNJ5oLanJYTGipjzUstZNTdiWciyPD7ks9s3v2T1fEj6LU/vufeU0qrKruNoCIZlURUlcVmS7fe4noNrOvtty8a1LQ1StAWu5/Bi+hJbWMRJRCfoNBxd0+mUjx8+8P79z1ycn9Prd3n98jW///73urIoK7wg/MobqSeblrOLGoT6eDxuKn7auIFru7i2Fr59XCyxXIvlakWW59pTKwpczyfwAx6WC7K8oNvVRmY8HrPf75u8ozYiB61Qx3Upoj1SKZIsxbJ1IrVSEtt1SdJMJ9WzAmfo8fLlnOvrLwBUVUkcR0YVKWY+n2HbEAQur1695p//+b+z3UQ4dsh6s9UdBUFAHCc4js1sNmM8HhtDX+eEJGEYmAbwHt1uyD5OsWxNNZUXeaP+LssKL3RBKQb9PiBZFgWLx0f28Z7BcMDl5QVClfiezexkiiaDTNls1qYns+DDhw8MhwNc12G1WlNVmgEkDLtYCEaTKbttBEiiaEe30yUMXDphjyxPmJ7MWK7WSCXIS4XEw3YcXN/Dy0tK4aCyygREopGkq4UCZJ3QftI1qQ1j7RK1BXrrxabbpWSTI6qFVGQdwhlvR6GoiryZc7ZtYzdKS4dqolItBXXV+Dp1lPjswpT132qnphX+WobZQ1OHG5D8YVFq569Oh4vmkOb3VrUVyKQklZZm/EC3JgqlFZ1KKbFMh0/N91ZvXxmzskixLIWqCoqqwBLg2jaqprkuNQ6m2+lgWRDHsWbjBB22HSU360Ft41T+UjXyW9uv5ewOeYFaM1C/doSNi42qJFVe0A87ZFlCVVb0+z0N7HQEWnlZEYQ+k+lEM+Yq7cH1RwNsyyI3Y/Lq7Wse7u91/gDJw/KBxXJBVubso4TJ9ITNVieae70e+/2ek+kJ641GzD8ullxcXJBmOS9mcw1yLSs8z2c00CHRl7sbLNum0+my3UW8f/+Bl69eUlUVodOhlDpNYVseQniMxjNOooz1YkmSJEynU6J9xGajNQfKqiLsdYmzlH20J6tK8qpE2RbCc6kEFCh2+4Q02SArgW1rYkbPc5iejAg7HlEUMbb7jMa6D9OyBGlSkmeA8nDdLpbtIyyfooTFco1lhKJP5nP8wG+UdvIsJs/2BjwcY1sS17WRRuovMcBbYbBewvNwEezzgrxIEbIiS/Z0Ap+L81POz0/xfYdXL1/w8HDHfrfDqiyKKicvUmbzCd9//9f4fsDN7S3bzYbZySllWbFZ7uiEIZ7j8/Dwnpsv17x69YLLy9f86U8/oBTs44zr63vyosTzQ2xPE3wWRUZWVERpSl6lWE5gbJdlmAcN1gsLSxgPhVrJTHsjYIQ+hGzMnKU04LUmQK2Njy0MhY7SxN1CafodlKSqoOYHtCyhjYDBeAq0UI+SJUposZaqBrGaJmYBWPYhLFXN2jKGWEEhpTFEB6lIKeqWJtNsr0xftMAEi+2Cof5ZiYMB1VkwC2E5lEKyl4KikEhLNTxpQgkqbFRRmnGp+zX19pUxk1VBrWsnpWp6GmWluZFQ4Lke49EYz3NZLhfs93ukKhsw25Pjya/Rvs8ZsMbLar33ddn2LxcWLMs2tEkKKv3UscwjQaqK0PNQZY7veZzOZni+x2q1JM1Tut2Ay8tLur0OnutSSckPP/xAbvob8yJnOpuxXC359//wDxRlwdXVFXEW0xv2GFg2ST9nv4+5vb0ziX59V7soQilwHJfxuEMcJ8znfa6uNFniw8MDp6en/PzLz4zHYzq9LldXV9w/PDAcalbY5WqF5wcI22X7uGz6NKN9yuPDCsfxNcg07CCEpunJcw1o7XS72J6LEFph3t7sKNNcV6UsC2WBG/i4vs4ZrsUWy7bo9bqEYcDZ2SmvXr3khx9+QAiaPFy027Na7igLECLAtgOqSgMz+5MReZZyd/+gQ0MTUnq+h+NYlEVBWWmgaafjMxh0idKCZKfblOzctBhZmvCQUqPefdtmMpzRCwN+/HEIls3/8j//Owajoa5exjFplnMyn2FZMJ2OAUUc75vzDwZ9umGPQW/EZrXFc3x225j7uwcuLs41fqyjQ/LRaEIc73lcvKdfSGxb89k5VORVxWYfk5YFtufhKYuyMoh4ns5pzRIL0Eroa3PR/KT+vzIMgHoaa8NSu0/2gZ1WmfeFEI3Ydm08pdLQFWpRX8C2LKRlIy398K5DRqFEc+4jpp7mDupzVK2wUtF2KgxDrLn3uqWQGrSrjop5HN7TBRPNCF1gUUlJYe7fRs9ZbdwVArdpn5ItI/m1MZOa3fJQkdA4lfr3stTus8a96N3b4d5zlcd24r/GxtSfP/6c3bLhz1Vwfi1/plTrG1eyQQ+jtGvvGGaFbjek2+2QpSlFqameu/0OtiO4ublhejKlLEpsx6bb0bTQHz9+wvNcet0+lrAZDIesliteXL5gH8fc3z8wHIxYrVZ4bsDFhRbUWK/XnJ6e0ul0mM/nJu+lMW1CaMm1KIrodDrsdjuWi0eqqmQwHJhOBEFR5iRpipSSfZJoAV+liSUVin28589//jNhEOC7LkWW4XgOy+WC+/t7Xr16SVXpRPtwOGQb7bAs23wXmiMfcfhutBdtE8e6ilcUGWmaaOxaWVBVZdOUvzbKUq7rkRcZrucRxxnL1YqLywvCwGexeCTPcxaLBfnrV0zGQ7JM55u6vQ6+79LrdrTASirZJGtEpck9XcPya9s2ZVXRCQMcW7B4eMRyLL57+xbL0d9rXhTc3d4ihOD0bM6r128RKPbRhqoq2W427HY7TSSpMNqdWpcg8H3CsKOLMZ5FpxdSVSVJmjIajQzDClSl1J0BjoPEojCiLlG0p0LgOEGTxzkET4eIqpVLr1f00w8c//2Z7XAM8SQBXufp4BhVf4hN21CONsTqad7q6Xtf5bbqjxrD2a5OHgoBqrml5yAYz6aZtKHRyvBCIJWtnVvNSkmlDMWqEChhlA3EwfI+Y8yOB+FowNCVqPV6jWVZrNdr8jzH852vKiztY7Zvov1P3/8BCGsJnj1Gvf1aWPpVQcAcy7JtXMvS6uZOZcrymnra9TVbhRe4rDdLwrDDoD8wFNcO7979BiklRVEwGo24urri5cuXZKmuXvq+j21p7Nft7b15rRfJarWiLEvevn2L53nc3d0BNK09vu9zfn5OFEUsFgvu7u7odEKyLGGxzI0+psdqtde0NcKi0+tSFAEnJ3Nub2/Ji4J9rLnubctiOprw23fvsG2bs7Mz6pxZEISEnZAszajJ6x3bwrI0BqwsCtI0Jol1ZVHn9ULyIqNGs9c9lrud5tzvGdbdy8tLgqDD/cMChE1Vlex2W+J434TynueSG9R9XRBp6x76vs9wMGTQ39Dd5ThWge9pRhPL1qmDNM9Jk1iHnWXJ9cfPJFnC3/7h73A8l6oocV2fbreD6/h8+njNYNClKjN63Q5v3nzH9fU1Simi7Y7dbk8cLdhtdkwnM4KgQ5alPCzWOK7u0AiDkDAI9TUGAXlRaBLNIMQPO5roMwhI0oKy0hz+VfXNxNP/r9u38GDHRYOG6IFDhdWyvm4tPDgPz5/vuTRPu/hwuJxvR1C/VhARxpgJy/l/aXvPJzmSNM3v5x5apC4FFFT3zuz06uXtLe1IM/7h921pt6TtkMbZvZke0QKNbgANUSJlaOHODx4RlZVIoPt2STcrJCorMjLCI/yNVzzv8yCF6RA1MabVhd/m7+0A/O1/zPhJnNnhcF23Q5EbOuQ8z4cE87760X71Zf+9j1n9YbL2DNkxj+yQ+7///6Ehk11y37JMIcBzHCSK0PNpW1PJnM1m+KFBo6erhLIqWMzmA4hzMpnw+vVrPM8b9B7H4zHv3r3j1atXPH36dCAX/NWvfkUcjwCLm+tb1uvNEHY/f/6c+XzO7e0tcRwP+KqeoqZnpDWMqA5SGBCv55m+xzgOadqKLCvwfQ8/CJjPp1zfXCEakyPZ7UxPYVvVTMZjPoufDpCEIAjY7TYUZU5VmaKEJU3RQxhtP9q6pswLNts1ogODIhRh6DOfTynLgOVyOYjn9tg8w5oRIKVNmhW8efveyNF1HP7m4WSapxGw2WyYzcbEUYhWBiR7dZUPuZM4ipmMa7YkCK1pmtp4sk2DAKqixJIdkWOWcXp+xsnJKQio6oqTk3OkZTGbTUmzhFEUcn31jjyvePL4BCks1us1r1/+yGppvOgeuuN6No5wyMuM2+Utjx8/IQhDVreGXnw2m/PixfcEoUJWFYHS2F5ojl21FEUFssWy/U+uof/o+FTha//9/ajI5MzuHJR+u2NOhUDeW4OHue3eg79vyMRgBH/Kjh+r8g6OkBRD+CxMCtEYyS5Nh+opuPcqId042s6034dpXvuY1pR6+zCp51vqtQj7ZvL9E+z3sd/hf3gBDo3Wp3Jshzm4D41vH1LKgQvKtu2OGcN83ixC2TGXGkBlVZeEUUCe56xWt4zHY6SUgwG6vLzEcZyOWDCkqqqhDafPeZ2envLu3TVpp6C0WCwGVgjHcRiPx4Qd/9jFxQVVVVEUxdAIfXZ+imorlGqxLcmDBxes12vTLeDaBEFIozWWZRNGAX/3d3/LcrUiTVNevTLQC1W1JtR1XZTWOI49PGyqygBXoyhkFEeQZNR1i0BhSdMqYls2CNOetlmvcd0TQAywir6vUmvTJF4WNU1jQMibzYbNZs1oNDFN27stliUIAuPZXF8bPNjlwwdUTkNZFlRlRV3VnUExzd/TsRFjKfKctmkQwsA6PMehcl2SZEvTtlxeXvLg0SW27bDerE3T+jbh5PScqmyYzxaMxzGObbPZrHn75j1Kt7StxvMDlFpR1zUn8xM+++wpWVFw3VGXn52d09Qtb9+8QwqB63p4nk8YxbRKk6cZRdXihbURLxEC3/MQtktVf3Cr/386juWh+2vSvx5CNpS64/RX6r5B3F+zpiJ6uP7vg3f3OxI+8Mw0nSf/oSOy72gcGtBhdLq6Whv9ALHngfWEQHcdpPfn4Chrxp0ii6l0GGN2vx/zLuY2O2zbdqCd6SfpcLL6Ezmc8P2T3E+OHpuI/b8dte50Hf22KUlL2U+OoTV2bIfRaIwQhqCvrA3DaBAGgDBismenbDbbrn3HRgjJbpfw5MmTDsmvcRwXEFxePmI8HnN9fU1dv+WHH15R5DXj8XjoPDg5OTFKUEoNC7tnP40i06TtuqbNqKkrLMsATxGCuq5wXRc/9PH9gLJuOmqfhslkyu3yhtE45tmzJ1iWzbsf35Gnedeqo4jiiPF4zMgeodEDSWRZlEiWbHepCcNty9zCSuF2+Ly8SLscUwkYPrW+BcgI+PpIYaMUxPGYIPAZj0cmnC0Lyl3BKI549vQJT5485ur6PdvttpuLlqrM8X1DA24eEA3ursR1jY6A7IzYeDJhPBl3yWWNr0Ic1+1gHRbv3pmUQBSNSLKMqqyoqto0u1sOk8mMtmlJkwTXc9EKRvGIk5MTPNcl9EN2yZZ379+TZDlhPCIIfNIsJ8tS5rMZs8nU8LRVTadqn6Mw60TVTRc6W+ylhP9/GftG6jBMPNYxcPe5vspp1rJSuqvg30/79B0Cn8pN98fxoUED40x0/IBHjltrPfC0Hd+3NFVgqRFaIqQG3RFna3EHoxUKo/DyiZzZPlDWNKb2pdQ78KzZxljjPv+BsD+Z69o/4P0w9APj9BFSt/1J+XAC7l4tyzAW2JaFNQB5TY5FaEXcQTOE0IxGU2aLGY7jUNZFl6S+4eHDh0SR4dNKkpTtdsdqZTyk7daQDe52O7744gFffPEFr169Yj5fUBQlju0hQ7sTHMlomoazszNevnw5NLyDYYzwPI8oigYuLc9zmE4f0DQVk9mMsiyZTAwEoqzMAnUdh9l83rFRGJ1JPwiYziaUZc1kPEFiOg3yokBIYfQGQp84joEEpQIWizlt03YFHYktBE1ZkiUCZ2pYbf2hOd9it9tyfX3NbrdDKcXNzQ2O43Jyco5juwRBwGSi2O4SmqZmu9sCRhk8DAOePXvGj29+pFUtSZIwnU66cBvSNAcElrRpm4YizaiKAikEcRwznU5xPJfV0hjS84sLttuN0dscjwiiENtxEVIQxDHj8QTHtdntElCGNNG2XYIgRKmWplHkWUGeFwigyAx5ZasUFw8ucfyQvCipmwIpbfK8JElSNtstSZKQ5TlaCYIowI8ikiwnzQrquu3u339nK9PPHPsP9v01d8xBuFsbuK3xQgAAIABJREFUd83r0Lc1fUh2KoQhAj38jv01tn8c99/b8/a473ntr/cPctv7QxgYD+wXUDplNW36MaVlGYOnBfskmEfDzKqqBkS6wWyBJe9EF3oPap+JtqcqOaQIOnYhjhmyfhwjdNyf2IG+RqmBSaD3JHtOfcex0JoOuGoTup5pYSoyPNvuFITUQOccxzFOZfM+fc92m2BZV4zHY3a7xFQMdUMUxSSJqVotFicUhSECBEFZ1jiOzXg84YsvRqxWq4Faphftdbpm917VqD/PPM8B44FMplOEaIaQIIoiptMpu92OujFzvdkaaqGbmxvW6/Ug7RZFEVl6AzCQIoaRz2IxNzTTWUZVFjiuQ+B5WPMZTVWRJglV1VCVGdZkhJAmR+E4Dn7gEgQenufiODaz2Yzz8/Oh6GPUlU7RSjCbTfH8gOVqzXZnVNINw27O+/fvmE6n/PKXv+Tm6orVasXpyQl+HJPn5sHQtq3h+1+vub25RqI5Ozvj6dPHhs9MtQjbED0Kz2ZXFrRNy+T0FFtr8qLAD3ziscEN1nWJ1i2FLXBsG6VaTk5Ohn7MIs9Zrzemf7a7rxaLOaPxmPc3K7a7hLLIqauSMPCp6ookSZFCEgYRRVGRZxkKA4ZumwZL2hxbn0An5GuWuNGLNZHCfrjXh1/HIE7746MhGn0Yed8h6NekyUPd5cz289p3TsbHjdb+sR6Lqsx6pWujUve6h/Zf+3TUsfPSBuvRSfH1k9XDOEziXwgTcvZV+H4c9fcG6hE+ZMfore8hw+zdhH2Y4N83Sh+j092fuGMTsD9ph1Z+f2J6IywE6KbBc22EFLi2A6od+tZmsylpmg4e0mZrlMN93wcMNEFKi5OTU+q6Jooi8rzg+voGpfSQS3McAyRdrVY8fvyYyXg2sLF6njc0uPeSa32erW3NsfQkkf0PGGZYpyMX3O123NzcEIYR88UC2/W4vTVMEE3TcnZ+zvn5Ob7v0zaKttYsbwy7g+udsF6vCQLPNJG3DaI2ObfA9xmPYuIwIFUZdVXStjVaO10PqTPMc57nLJe3Q4tWr8Q1mUx4+OAhUtoUhYFeNE1NHEdMZ1Pm8zlv3rzhq6++4uHlQ3OutsV2uyXr2o9c17BvxFFEGETc3Ky4Ddc0reZkMefi4pwwjinrCmXAWyhL8vDZ0y7VYfHu3VvSNOPMPQWhefPmFev1Etdxmc/nzKdThJDkWUYYhoapxHEQQhh+NwG+79GqltV6zZu37w0spa2oy4IqMnRNVVUP0QoC2qahqSraukUrjbCgqSuEHXRZ6737vw8JxfHC1qfzwD9vfMzj0Vp3y1bcGQw+LBoMYaDjfeDt7RfzPpa8N1obmraqDTiY+wwhh7m5Y4WALrFnPLuuKqS7XFwvENp2GDZzGf4D1cxjXpV5T9/Lhx2z3v+e8anPa33HYNsf152X1qBUQ9klmH3Hxfe9rleyHBq8W90alPztDUEYcH5+TpZl3N7ekmXZIBFXVRWr1WpoTjaUyi2/+c1vuLm5YTo1GLPVaoPruDx48IDdbjcg8D3PG2TP+kXc9272Hq7xGIpuHQia2giUGPYNgwnrCRVd18P3I5TWnbBHaRLX5+dY0iFNdswXc26ur2maijiOaNvaVEN9geP7xFHIZDSix2vWVUngLxiPRoxGI5qmHHKnSZJSFAVZljGbzbi4eNA1zxuptnfv3lM3rQnzpfGSi6LAdWw26xXv37/veidhNB5RdUUPAx1ojGBJuSHwA54+fkzdtFxcXHB6ssAJfKq2YTSbmhtZmnzoLjFsI95ohD8eoyW8ef8j719/T54lgODd+9cEbkAYRGRp3gmxnHB7e2uMapbhWNKIMacJWD5ZqSjrBt0qiqJGNVuKIqcsCixL4vkBju0YogilqOqqo8VRNK3GccQHxqz//6EB2V/sQ0P2T1AE/ZRD8DHDKO6BVI+jC+7nr7n33j5aod/mfs6NDtdx3xAeKzIcM7zDsXQel0lfdfvcc8JU/zT5KWjGsbFvyT+93X0rf/iZw5zase0+eZJHLlKPo+mNWeAbcd+2qU3+pchJsxTHsnAsgfbg5OSEzWbDZDLBtV1WqxW+73N7e8tiPh8akKuqYrvdEoYhUkpGo9FwUc7OzphMJmRZxsOHD/niiy94/vw5RV5iWTZFUbDb7QYKGiM8awCYvm8oavpqam+Esyy9uzxaDDqRbQuWbVEWFU3TEgQh0+kMz/NZrzdc31xTVw1hGOHYLg86IG4QekSRUWIvq5IoCkxfom3juR5t1TAex6Z3tWkJfJ9RHA90zbbjsNttmEzGPHhg8lRaGwqdzz//M8MLttyw2ewIwwA/iKjbhqIsAG04xppmgPKEYYglRCddZ+iCBuUmIVGtoWQ6mc1ASs5OTwmCgEo3KDReFIBl0QpBEIUUQnF7fcWbt28NyFlqmnSNbSvOzmeoVvHmx7d89+23jEcTyrzqQMDmuq5WK7I0xXOMt54XBa10sL2RoX8WUNUNWhkt0V4ftKct32wTNruUIstp2hZXGppnIfabd+4WkVnmd8wRgjvyhp+TIO/Xws9xOA7X17F89r7Xtb/PfYr7wxz3vjjRPo9aH6Ki73twhx7Yfhh8GL7C4EACpjpqtjk85p9pzA6rj4cTdLfNAXp4r6qwb32PTeDHQshhf922h2Dbw8/337Ofu+vVsoVq79SilQGtOI7DyWLBZDLi66+vB3WgzWbDw0cPcByH1WoDWuM6HnFswpAiL00zsW0zncwMfGO55sGDS37151+wS3a8evUjWWoods7OzoYG8iRJyLKMJEkGTvq+EOB0oY7Wxqv13ADbgqLISJMMjRGjtW3TWrXdptiui+v6BH5IFEe4XkAQhl0+L2GbJcymc2azGVJqzs7PaeqStq04PV0wjmPqpkZogWoaFvMZ88kU1Socz8AfNtstjuvgeQYga5rAI9q2HXJ/UkrCMOSrr76lyEuePfucsqrxPQ8/8PB8z0Ar2prpdDIshKZt2W5NA7dqFXVlcn5SWJyeL5iEI8IgpChL/DhmNIrImxpVVZRtTVmVLLcb3l1f8/L1K17/+IZtsmMynvBnnz3lF3/xS84ii8A2allxHBCHEWEQk+wy5vMZnucN90qW5eB7nUdpWveQllmU3WKqa4VrK7yuwV9rbaqgQUhVt1RNg920SMuhbIxx7lHx+2tB7d3nAjGI6fYGbR8K8anxKWO27zl9YPh6HJi43yN96A32JKyH62zfqB3z3Ew4q7rQ79POyKF9uGdfhrk3yX+t9w5+MJj/A8bscILuXg//3hkm9lzDg0n92Insv/+xp0a/zeHJH36uP+6qqoyxsCSu5+B6Hrbj4HQwiTAMhwZwx3HIS+MdvH37louLC0bR2CR0W8Usiof8oaF1rgac2Xa747vn3yE7dPyPr41itlHWMRTM47GpRIZhOEip9TxeYADIQRAYdafuta5LpLRNq5gAx3EJ/AhhSWzb9KSZY9JkaY5l25ydXXB2BsvbJTfXSxzH4fHjx8xmY7brFTc3V4YFtfO4XMdBIrBlTBSGeI5nqGGEhRICy3WM96iNoSnLktXqlqZpjOhux6Gfdzgw13VN4le1RHFE09aEodHgbJqaODJA4aIoWC/XHTGmwS+2rYECtK3CkpaRoAtHpHlGIwwwN2tqsrpinWfcbFb88dvn/OHrr9jsdmgBlmMjA4/WEszO5lzOfWLPpixqXMfGEpK60sznc85OH+C5Rkm+bRryLMdzDA123bTg+DTSJy8q2rqkyDPKPEO1DY3X4Pumw0ArY5wsKfFdD9c1Sua16kAJh2to777ujYE+SLbv54M/Nn6uV/bhH+7v41P7Ofzbfvrow9DyQ3sg5V0m6/B4DtfyUYN2CA054ux8aMp+Zph5eFD3DUpnVDqI7rHt+m33CwV3n79vAOXeZ48Zw2NWfn//jtMRMtoWnm2MGN0NooHb21tWq1suLx8a5H9nnJJ0Nygm1XXdaRfabDYmtGqaZtBclFJycXGB7/t8+eWXhsHWsnBdl/Pzc4QwPZ5FUQxg2V6xW2s9hJ37IsS9RoElJbbldDgzc+l225QsM0y10rJRnSHI84KqrphMp5ycLPC8kNlMUFX5ELq2ymhHhsGILNsZQLFt4dqOoW1B4PuBoZfBolKKeGK4vra79cAM2/N9PX78mDAMub1dkiQJn33+GdPJHKU0fl6gUSxXphBg20YoZBSPBkLKdJcSBiZst1wDaI7jEWEQmAeihrLIsaWkqktWyyVpW9M6Nq1qWW42vHzzmm2WID2jZ9ooxSbPeHt7zdVyxIk/4XR2wXQ6pshzvvv2OW/evuUvfvXXLBYzfN9crziKzBQrzXq9YpckNMJmnbVYlkK3LbLDGfa3ouO4jMcT6rplt92R5TlNq7BcB9d28DxNqSX6QDfz0MQorY8amP1I49joDcqnxmHSffj/sLqOR18fc2L6bT8W/u6vUymlkanTH+p97H/nodc42AB0F0npoeFe9/Qd3YTJjxjST5Iz3r8ChwnDvfe5k1zvP/9T+bWjT4YhYP54aPqx/NldDC+wpUR2eDmtoe7wVKrVrFYbgiCiLGuubt5T1xUPHlzw6PEjojDiq5d/QiC6aqNkt9sipSAITKN4X4XsvbDr62tapQiDAGkZcG2W57x/d8XV9XXXbL5hNpuie2OvAa27nF5hUO6ALSXjeEyjjMJQVdVUVcHN7Q1v379ls1lh2Y6p9ghJ3SryokLaDp4f0jQt52fnnC7mvHrzhqIukWgWp2c4lmA0jgdJNM9xu2MwLrtlObiuD3VNUxoMVlUaAK9t2zx8+HCQGvzhhx+4vr7h9PSUhw8fApIfX7+lrCqatjadA57HYjHHtsx5hEHIYrZANbrbr41AEIYeZ2cXWFLidg8Ez/No2oY6T6m0wvZsgnhEk+fkZc02KdCWh3QcqrYFywLHZZfnrLYbioXLdrtmFGuiKGI2n/PixY+D1FwUxWRZThiaLpamrrHSHX7gIxyfmhKlNbVQuI5NKQRNW5MXmrKq0BqyPCNNU+PRWhYoTdua/Jpj2XeeGHTVONHR3JhQTCPvbnPZ0+d0sVXHEHtsaMQRVov7Qwo55OSGV/q1BXAXQh5bh/vIhGNeWL8G90PjYV9C49gWSt05L/087LO49Uez//uQVezDdIzR33/VAqMA19VY9p8ZR8LM/qv1PVdZ9mXRIT91dwiIuyeJAFPZObDq9DG3Ms1WahAC7S6gNk3mHBirY2Hrfqy/XwhQShnSRSHQSlHURUe+6NJoqBpNEETYjs3tckNdKSzbpW01ju2xvFkynxgh3CzNWK3XnM6nnJyedsK5a5QyzcxZp/rt+S4Khet7tBqqRpEWOXnVkm12aKXwnBuDq/I86rpCNS3zmQnXQj8wXP3LNTfSYjye0mqQrmHESPItb96/pih3jMYW0lL4gY/nh9RasE4rrtYpr2/esdkmPCwzHlYZWZKweDdjHIaczuaczCacLabEsYfnOrz98Ufqqub05ALHDUh2OWVWgzRyakqZRvhWNdi2JAgWbDZrXr78geuba2zLpq5rfvjhe0BwdXVDWZRMJlPaRvH+9j1CCR48eIgUiqqoiYOYp4+ecLtc0dQNnuehWkFZVpyenuFYNpXWCFpWuzVlXRJPpvjjETKISGvNs2dfcPlnf8P/9dv/zh+//QbfsdCipa4L8rphudpQXEzI0wILD88NOTt7yHj0mjStqCqFbXu4boDtVIZzC43j2RS1pm0rdFtiywYnsMk8i8KVlIUmr3LSPGG5XrJLU0PNbnfV86ahbROElviOoTHv4R95VSEsG9uyKOratNUJaLWh6ZHSgk4rU7UKW/TicEcMDYaK666YYLbr12dvUIxR6M2FNAu/W293oawe1rtSd62IhyFyfyRib+Gbtdyadc19b6lum+GB3We3BtArRl1poKLsjVV32D2d0WDpDlJXSoNtya4l836fwRF1JjX8gDzwuA7Pc/9vnQVuD4zNvW6CD43cYZJRcGfIjrqh+r603T6VidYaqRQNGqEM+NC1PGqvJS9q2iZhK1JGUdQt5DPm8ynr9ZLl7YrpZMQ2z3FtCxkG3FxfYTsOge9hWw7r3Y63795Q1abxWVoWwpJstmsc1yXNClwvpqxbilqRZhVtXVM4NVX1Hs9xEEKj6gZdNyTejtD3mYwnlKUJSbfLhHfXt7RCc355TlpuiMYev/qrJ5ycxEjR4Ace2DartOLNbUr58oprpbFcl5WuaFbX5LuESgjKVlM1irwoWC5vuTiZcPnwwmDgPB/X83CcEMe1cKWF49tooYnjCKUaVqslmta0J5Uly+UtRZHzi1/8ErTgxYvvuLy85PPPn3F7s6RtNWeLM+Igxvd86rKmzArWtytePH/BgweXjMcTbm5uaerciPXWXWuKZVELTZJuuV7fEPgB5+OI0WyGof+U+MGI07OHTF5fg/0GJ/TIy4RtviEKXaquqFAVDaXVEEw8Ai8mCEYku5ymFUjpEAQxeV6QFyl1VRmPSWrauqRtclzHwnM9ssgjz23aVtIqs1C3yZY0yzuN1hbRIeotaeE6EpUn1FmBHUfYjotsSoTQ2I6kRtN260Zpw3cvW2MM2rZF1Q2hZSP1fYNi+hJFx+FlfBwTqupunYouf9s/9Hsx7Dsynn499Wro+wasX+d3QNf+Wz80qXrwxowxvDNkZs321Ny9J7V/DojuyMSd9dB7P+YgxN1f9vL+d/sxRlwNxtyMoxRA9ysrLaaX6zhi//BM96se/e/77+9vd2x7S8oPPvuxcey7GtUCXV+XEEjbRiMoi4qSisV8biAH2y1FWTKdzggCn/V6SZKmSCnZbLfoFppWcbu8IisKLi8vmU7n7HYpzWZL2yps1yEvcsqqoqxa1psdo9GCqm7JM5Pwt7qQUCnTjB34nmG21aBakz+bTidI0QE785r8+5fcrJfMTyeMxyP+/j//DX/xl89wPUXTpGDBapewqq9pLJCBhxUFqFqRNyWWqtGOhRX4RLMpkzgm9j1s3fD67Ruurt7y+NEj5tM5y/UGS1Y0jcV0tkC1NY7rmshJmZzW7e2Sqja8XUEQMp1NeXT5iN0u4f379wRBwNOnT5jNFnz/3feoFk4WC1zXQwhJkRekScb19TWu6zObzvE9j6qqOT095fLykensQFM1NdfrNa0UhJMY23fQoqEoa+q24OXrt7z+7W95t92hLUFeV1SGGZKmA3qXZU1RVISBmeMsz0mSFM8FrYwnFEURu2TLze0Vu2RDWaZ3EAkUgRfgea4pIFh34j51XbPt4CZKde1/wvQAO65jmuELo71qoXAdi0bZNNrI0Tm2NKG9AGXoYU0rohSARKieknpvcXUeiupjq06rSAhxt8jvpZXEvUXe+wT9OumLWn1eFfYxY3JIhxxzJoB7UdGnUA+aPccEPtj2nu3Qe793J9K/NbwKEzC3HymQfFQ302BNeoNx5xFx7KA+Mg4N2P5nP1XF/B8Zh3H8/kXppbfoLroUgsD3kZbF2cUFoW/z1VdfAYrRKOL9+3f81V9+YbyQqkDaLnWjWG8TvGBNWtTUTUva4aekJanqupufzHD8p2+R0sG1pWmbQnRN0CFRGBEFPgJN4LpEUciTy0uePXuKQJDsdmzWOx5dXrBN12RZyue/esZnnz3DcVy0qLBcj7qtKBpFrTR5XbPOUrK6whuFSO1RFymNbqhQuKHPeD7jbDZhMQpZvv+RP375W8qy4PzsHM8dMYpd1usdWktsVzCaBGjdorXFeDzh+ubKVFWDkAcPHjAaG8Hjsiw5OztnNBoNpJSz6Yyrq2uSpGE0kl2FtqIoci4uLjg/Px8W0mQy5vz83IgcA0mWcnO1YpXueHh5wenFGUjFLtuwzQvqNmOXL/n6u6/QXoQWNUVZ4bkWcRyh25aqqimKGsaCOBqjFGRpxmaz4fwsxrZN6BdFpgF/ufTY7aCqGooio2nuClS9QLBJuouhe8QUcVxs23DBtY0pEBVFgW5aPNeltu2hZ9qyJFmWo5sK4RiVMBuBUIqq1V3xrEVow9Sqe1s2uCUmf627OKzXbjCZoMFSdflYg3M7XEv7+a9j6uj3c1/WB5/fH301dn8/+2v9GLPNz7UZ7J33YU7tLk1//LiOGrPeoN2Lw3/m2O/P3J+4Q2zK4XcOhvInKjWHT4QPJ+rOoveCFgCObRMGBiv19t0Njx5e8vDRI148/4blcs10NmYynZm8V90ibYfpbA7SUPjmRcmLH16SFSVpnhuZNmGgBbbjIFuFZTtIyzJ0Ma5rLkZrGFWDMCQexTw4OzXMIqol8Ew/ZhSP8D3XGFohOD8/4fXbkOXtNWFoQMC7JMENBNLRLLc7kqIgmk6xNyWvr96zyhX/8//6v/Hk8pyvv/wd3/zhK/K2wgpc3DhA2wJtC57+4jPGo4B//qf/ne12xz/+p/8FIQST0QQpLKQwbKp+4FJVpvk+iiK22178wyeOR1Sloe2ZzWbE8Zibm1tsq0BIOSz6NE1IkmTgh3v06JKzs1OK3NB5G4k8I+AbBAFoSJKMqlWEUYztumTZBoQCq0aJHNdrcT1F0myN2lDdIN3QXHVlsGCWdNBYGAEekMJiOpkyHo0IwwDLMpRIURh2QsYZm82K5XKFtGweP3lK2xpYiJCi4/BrEMIYNssyPaKeZ65NWdSd523SClVp5q3RTSf+oWmaCiXAdx2CwEcJgaxrdFlSNQ2talAKdKOwpHPfM/vI8tuzY92drz5Y6PuVxP73ft3s/37/9UMs5yGE4nA/x/Z3+JlPFR3uVi8f5Mp+6jP9ONpovr8D87scjNRPDnHf4HzKE9vPfw1PwIODPrTwH3ta7BcC+uY5pRRV0xjUvOdj2TZaGLrp1WpFlp/z+MlTXM9BSMnp6SlZXiCkhev6VE2J7fjUZcF6u+bHt++wbJs0S/H9AEtaNK2pu1R1gxCGndQYBdEh7V1DydxhzxzXZTqZ4FqSIAhwg4C8KImiiJPTU6qyAFqiUUBa5l3TvGY8n2E5iqJOqRUUTUOmFElZULQ1VhCyeHDO3/3jP5AXCd//8ANpXZA2BWmVY4uWceQSRGNmv/ic5998ze9/+wdefP89Dy6eMY5PsW0PP7BoddkxWJjQOI5HbLdr02ERhHiuj+f6XSN50QGCFaqtqKuGPM/R2oBSLcvg7ebzOYvFHKVa/MAnCIOujxY8z0VrRd3UholWNQikofIpc2xPYNsK39ecLEIuzsZ8+/oduqmxpUWV7yiSjOnJgvPFGXEU4nshdaUoyor1estsNsf1PNMl4ZgKa9O0OI6LlJK8KMmzgtF4zGg0ZrMxVEWBH8BUdFKFemgC7w120zRGp8CycF3HJMVVix+4SNvBtiWO6yE9m1aDH4ZGyEOAowSV0FhCIwUoKcEyeTHZhZPDWqEPLvfveWO7RFdQM/e/xrLue0qHBuUuZ6Y4xI31CICfEyH16/djDeXHtv/U3+/OVd8zyp9CMuyPj5Iz3vesLCxpfTxvdv9IPrrNIUbmY/iT/ZM4FrsfurT7f7OlYU417CAm4dhzrVV1xWpV8dmzJ6TbHd9++y2L2cQAM7PEgEHrmsAP2KUlq82S3SahaiqqqiSMDM5MIxl1fGVplgF0XG42qmkQFtiex3g8YjadEkcxjiWHpm3P87DCkCCKCYKAplW0WuB4HrYj0brGtgWOtlBa0WqFlJYB3VYNtutRt5oXr17y+t0NwSjG9WJev/uRX/8/mj9++zWlanBch6Kp2GZbZOvSzCPKusByLP76b/+ab75+zr/9679i/0OERcho4nXN5GOUNnmo+XxBWaacnJyQZSmWZQgrpZRMJlOyLOfHH9/guT4nJ6dU5c6E6MJiNpthWTZVVQ3UR9vtFtt2DHDXshhPxoxGI7bbraEOf/ceOwxoqoa2rrDQ1HlKI1pGgc2D0wm32wWb3cY0ebcNWnXwkfmC0+mcKLQZj2coJXj96g3fffeC0WiKbTmDN5WmqaEvb0wPabJLsW2H8XhCkhia8qIoODk5YTQamzBfm/vN8zyjObpLSHYJZVl3t75GqwZbClzPoWlbsjwlFD39TkNdWwjbRkiJQGEJjS200duUEm1ZnfRn11VvVsK9JLnukAAaMaSCQO8ZoeOFs/710Jj1a61f+2374fo95mHtr8E70XABWEc/97OG1vfzZ3ujh5iIj7iqR43ZvqU2yU/LACz3mqI/fiwf9nv1YabWesD6DNXHbp9D8vXAOB1a4f3k5f52/asj7a5tpBNARZqG4bIwtDzCCO5GcUxRJCbZrxp832W7S9jskg48m7PerGmalqoqaVuDhvfDECyL2XyBECYZ2R9TEPhURclk7HN+dsZ8Psd3jYGwhMmd2KMRlpQkadqRDk5Aa6Rjo9DEo4hnnz3h+x9/YHOz7rBdmpvrW8KRh1YCISwc28VxjbenUeR5xvXtFavVFa+//w5P2tiejRLa5FqkpmpKqrpkPppxenbC6ekJN++/482bd5wunlJXDUnS4IXjDpfX4jg2VSWxLPtOSLmq8H1/6HZwHLfDxDVGhjAKaeq2U0KvSZIdWWaMR1XVXYuWzcXigiDwB3LKsihY3a5Y2C7JaktotYxGNqWCqsxx7YCJ5/DZg1PqoqLY5WzWKfPpgrP5KY/PLwgtB9/xkThkWcH3378kz0vmcyNw7Dh2R7mddz2ogjwvybKM0ShmsTjhZrlkt9thWRbT6QxLWtR10xmkO0NQ10ZFSoheH8FCSljMpkymI/LchJDRKEZIA8twPR8/DFEI8qJku9uRFYXZJ4K20SS7gvucaHLPkGmj2iTMou4N2f6a+ljRrXcODnNmH4aNx/PZ+/s5XMP9+0LcgWv3j+fw2D419soax8fPzZn1gND+xFWrugKK+sAz2wes3VnpD/FgxxKOvYHbbxTv3z8Wk5t9f7yKMhyHAGHJDjho+t+EMIrOddMgHYd3V++ZjsbmlQgHAAAgAElEQVQEgQPaAm2hhSYrcsMuWpTdjapQ2uTCLNtGSIvZeITruixOZuYp7TqotqWsDBBURZqzs3NOT0+YTmeURcFmvSEMA9OE3KU1pe1Qty1pZuAJdavQwjDMPnOe8uD5N7x4/Yo0SU3YalkUeUXdljSqwXd9HpxfkLWS97uc3SZFNRVO4DEaxeiyxrYkQihsW+J5DlJqHNcm73i6Hj665Ltv3/Dm7VseXa6QtocbhEghzTE1LnVthIHLsiLPDWuE7xvBkyRJGY8nPHr0mO1mZ0LEyhBKGtgCWLZRW/c8n6oyIFWtTZuTlKIjctwghCDLEoo0p85rkuWWiSsILQ9LKHxh4SDwHAfn5BQHF11o3r29ZRJNePzwCY/OzzmJA+aRD1rz2//+e17+8JrF4oQgCPjss887A2aUprIs4c2bN9ze3OJ5PkEQdlqgpnCRZRnL5S2+F3QejRqYPpTSnJycMJ8vkMLqwk5FEHrYUmA7NmXVUHdMI57nY0sLgaYpSxqtqIuKOPSxpDGSt8s1RdWglfHSjNfVMz4b7cz9NaHZh0jojjD1fjj2sRBtH860n8Q3uXL73jb95495WYOd2HMuLOv+Oj+27eH37/en2pbFfuq/B9D2X9uq4w7VB8as5/PvsSdtZ8wE972h7vCGuH344iNGaH/C6roeLHnbtkPOof/M/kkefnb///sX5R710NAgaxL3KCOaqoWZBGn5pm2lrolCD8+xadqKosio6oa2NQjrojRYMtdz9vQNYBTHzOZT4jgCNNPxqJtujWo1Zdkwnc46daOIuiopypy6MXz/bVUbjq35gl2SUtctJycL6qYhL0pOJxFNk3N6dsb5+QVFUbO8XXP24LRD2LcorVBlg2xaIs9lMYrZZAVlssXRIZ6QKCEZBz6R6+JaEs+x8Rwb17Eo8gzdGtaOsirZbTLevb8iHE2ZLmKElEgJlmVTlkVH8d1294KkaWo2my1pmuF7IU19dw37Fi3f9ylL43FEcUiaZCilu77OaphPIQwHXa/4tbxZgrI4n014MI9pshIhGzzPwpcu0vGRLVxMZsR/8/fkf9agypbADTifLziZjrDQ/P53X/L73/+J2Wxm6JI8nzzPiaKYsjQUUFdXV3z77XOSZDtAOsqyGlq4yrKkLEtUq7m5uaau23s0OJ7nD/edETI2osi36dZAOLY7w4irtck1BgGy461zPHfIsZZJSqsURbJDCRswOcQDYMKdERHWvffp8F5mjXxozI7lqQ/HvhPxU3nqj63xfr/HCn37UVr/Hft40X0HSnDfYKL14JUCnaLJh+MDY9b3DPaxc+/GfuCViX6C75/kx07+vuW/q3D2Jz8YqE+4ovsTvm/Y9mlIhDRAX9XddFL3hkwNIixat0bWXit2ScJmu0KrhigeY1mSNDeJ6+l02hEbuoYmR9UmbDSpODzfRSiHpqlp2gYtNK5jlN5d18F0/BsA8s2NER6xLduEqNIiSVPSLMP1fWaLBY7n4fgeeitYLE44v3jImzfvCH7/FePxCC/wCH0DEt3tElRe4gHTwCOyYLXbsEtTHASTMOZ0NGEahASWjWdJAtfBEuCFAenWYMRul0vK0uF6uWS+3XFaz9AapLRxHZfG8XAcD98LQCtzntowqtZVw/XNLVma0bZG36AndVyv16RpyumpYRBJ0h2e56O1GvpbjeGzu4daTZqk3N7cUGQVp7MxF/MRsT/BdSTUClUazzKQNq1sCMce7iIwDShNS+A4tEXFH7/+hl//y/+NY3vM5ye4jsezZ58P+buqqliv17x69YrNZoOUgqqqEcJgwHYdJffJyQkXFxekSUbTtANRwbt37yjLiuvra8qywrFdfN+Ey3lekJcZcRxxfv6A07MzmkbR9HnbqqIoTerAcVzqskI3huEl8gOE5bItmi590XstRj9S6L3qIH3dfn+t/IwC3c8YPzdRf+xz++v6Y0XAfedjf0gphlTGfuh8eFwfQ0V8YMz6G1JKOfBa9U+FwxYirfuKyvHY/GMnum/cjoWex050fz+Hf7sXdoo+1DXvO47xrCxp1F6kJTvOKROZK909HbpJalvV0dgEnJwsmE0mXWjlMpuNub25JUsTqiLDtSMC36cWmqYqKIsCP4hZr5YGry2MrmiWpQPV9OnJGa3SbLtwJY474ZKmxfV90qLAD0OefvY53/zwmn/78k+0SvDXf/M3xGFA6NrYKBLHo3YqHM+nVZpkl9BkJapRxF7AWTTmJBoxcjwCKfCEwBECRwocIflxueS7F8+5Xa8RYsSb9++Znz3gYX5GWdZY0jDOuq5ZqK7rUTdV15vq0NStoRRX5jxd1+ge2JakKDKKIqeuq07Orr+Z+2toChq9eI7VtfLUTW0ogpIdL178wDh08N3PTOeDlqi0pdElduAQOjEaG6Elrm0+n6yWfPfiB/7tt1+SZQWLxZzAj3j06JFpl3Ic3rx5Q5IkvHz5kuVyOWDJwjDsyABu2aU7Hj951CX/R/ieYQ65uVmyWq2I45gwNMZGDakDMww/m6BtTU+w1mrArgV+QBRGnbca47kuGiiSlCzPjV6BYyKNuq2NcpYQ2LYeBJvNmoF9I7a/3g4jpX/P+Km81seclrsw8r7R+ZhR6ynw7+iG+nk8Bhe5/z3HxifDzP2D742Z8Wz6ifvQMzOz+elJgA85m3qvrffMjiX/Pzb2iw29m621waxJ2yyaHvKRFwWOZYoOZalxbZv5fI7qcD7j8YSyy+20bUOS7JBCI+UECTy6fMjq9pr1ekVbF7S2QAiN7zm4rg3CoViu2G7NPKVpNoTTWmt2SYKzXBLHMZPJhNF4jJCSzXbHNkkYBYIwCLH9iKdPP2P+h2+4en/Dn37/JxaTfwTbwtWSqR+CaoktAwzO0wy71dQVRE7EbDxh7sfMPJ+R7zKLAmLXxdaa7WbFN998zbfPX5AXJY4bcrVccn1za0LhtkVrB8ty8H1B0zYdZY7q+g0hz7emtzSMWK3WJElCWVY0FiTJrtMAldzc3HB2dsZisTCQDTRBEAzIc6VUl6c1IGM/9FlvM75//Qopa6St+ay+JB4F2J5PvanRTs7J2QOTlypS2ionXa34/vm3vPj+JUUNs+mC+fyUhw8fsVicDmSZt7e3vHr1ipcvX3YqUWbxxfEIIYxq1YMHDxiPxx3v2d39mWUZRVEMlE2WBY5TURbV0ITfqhbLdijLiqpqqKuSsqwQwug8+J6hH2qqGt92kZZkEo9wpAF3YzskVUpJlyvrl+lgpXpyQu6eDkOP5X1j8O8dH0Zgx7c5/Nud8VGDY3II24D7oFu7U1DrPTKlFJaQ9Lxo+8F0/+8hfKsfH6XNPpaEPzQyxzyzvlP/cFIOE33HPLfeQ/pYshHurP0hyviwANELICitaFSL1TXv5nmOCHxkY6pTYTBiNl3QqprVas1oFCPTDKUUrm0Thb7BClmSZLvFP10QRyGClroqKPOUpq1xHZc4jtE43ZPb3PDTaUOW5VR1S5ZlZFlOq1rqxkiWFWWBZVukWU7dlPzys4cGcyQkn3/+C/7zP6z5b//tn/mX/+PXnE7GfPHLZ5zMIsLpAs+xSKoSKQX16Skjx6epJI7wCByfURSxGMXMJjGjyMG1Idlu+Pbrr/j9737H1fsbNBZIQZrnLDcbkjQ1gE/fxha9V2XwVFmWYTuS8XhEGEa0jQmdAOq64fXrHxmPAqLIBxyEkGRZxmazZjKZGjaMuhkwWrvdljAM8Lp8Vt3WCFtStjVpVfLizY8o0XJ1c81sMWMyW2B5IcqyeXudoLQmTTekmxvy9ZJks6JuYDQ+4+HDS/78z/+80wx1EULw3Xff8bvf/W4QlKnrGtu2mc8XnYDNFj8I8XyP9XrdLToL1RpITU+uWdcGMNw0d4u1acx5NU1LmhoQcOCHeH6I1tIIt2x3pDIFrfE9n/lsRhhHRGGAa9vYtkOp4TYtcYf0Ti/r2Hsu+q6dR1j3IAx31c3/2PgpT+hTnplZg/cjuEMg/b6+bm/M+vehw9N1+pn7WcM7w3L8uD/aAfCpisinPLNP+VL7k/Cxiog8Mkn3v/MuT9ZP1H7ODNFBMjoKIKOb2RUXhMCxLRzXwbYdLGm8Aj8IcJyo8+BsbMuEfpPJmPPTBbYtydOUqix4+f0L4tAnikJwbfI8o1U2oyginkx5+eMVVVUOeKS+eGDbxqX2/YDJZIKGLl+3QyNI0owk2eJaLQ8fnBEEEUEU8+TJM548esm3f/oT//xP/4woMqK//wsuHsyxpMZKN4gKmE6YBBG6dZGthy0tQtdlNh4xHgWgCnabJd9880d+86+/4fvvv6dpKiw7gi7kTtLUqCttp7iOxPNdQFGV9SAUkySm57RtVCcC3fHI2S5hoBlPxlRlRlkWRFGMZUm2220nljwljkc4ttt1Z5gEuykWlNR1g+XaWL6D5dk0QvPj1TW3qzWj0YjJ7IRoOgfbJStLmraiqVLKdIWlCk6mYxanD3j48HP+9m//nvHY4NeksHj56nt+/etfD6LN0+m0u0Y+l5eXQ75rPB6zXq9xXRMCa6W6IpUaOO5836dtTe7PdT2s2B5IQZtGIYWiLBvaJh1CKduycYPAiDPXzXAfF2mGahp2m60puACW7eBg9Gfbdp+51bTpdURCgwdkzJjZpn/4/HuHWWbH+zL3DdjHIqd9+7HvuOwz1+73hR6ua/MDR4FmPzE+bDRv2+7HNIgNhkPfHehwEncmcxiHhu9jJ3tovQ+3Qe81mmrTGaCBXihZClMKs6WFtC1TsUSjRLfIbNdg44SDJSWO7Zinn7A6T8tCWhqFJi9LfH/E+fkF2/VmKBS0Tc311RXjUcwojojjgKt3JWC0BdumNU3LYcR4HOMGQUey6LLZ7AZM3Xq9QkqTv0t2RhgkCCNGoxFRFFMUJdvNBmc+ZbXeEEchluUTRA6z2YK///v/CVVUvPj2j/zXzZIy3/Jf/st/Ihz52MImdABpE7ggdYCFhy1sXNsi9H2kUtzcLPn++2/4zW/+jS9//5XB03keZWsQ345jU1Ylm82GNM2YTWIz+9qAj8MgRJ4Ibm6vDa2R72JJm56C2fc9xpMRUiiurtbYlsXt7U33gDKsrkFgEui27aCVpmlMy5zrOjRNRV2XeL7LaGp0CmytEG2D1Ia+6Xa5YbUrwHFRQiOkxhI1nueymM149ughlxdPefjgFwPLr9KKP/zx9/zxj39AKcVisQDorrFRxyrLkrpqsC0bYdk0qubi4TkXFxdst1uWV6s93NkUKWUnZlwyHU/wPH+gRy+KHNs14em+Tmrb1lhSmkKSNq1Sqm1o26brsNgihCRvW+LTMyw0ugOAt02DWY6CfTVv0TsPPWXQYMfus130kFutOcqCcbD6urXbGUlxmGw/LDocrFt6Q9d7aHT70MMr9EgJuGPrEEO4qdVdW9YHEdqhrd47mQ89M6UGg4bShmNIKQQWtmUNRmT/1PZf733xkZPdt9pw34WVQhiMibQQ2uDFbClByo76xGDbFBqUviNq6y4gAtraULJoaby0VlcgpDF2lqSoKqxa0Dg2jm2B7qTXvIAotGnbBnRDVVbUVYFtScoyJ88z5osZQTjCsU1rSlZWlHlFXoAWPiMBl5dPmJ2c8erl60HNyfdcomiE63o0VcO7t29xXZ+zs1OqojA3PoLNeoNrwXaUY1kZrVphScmTJ49JN2u221ueP/8K+S+SQimefv6EyWKGFwWEjo3nSBwrQLVQVzVFWZAmWzbrJS9ePOebb7/im2+e8/5qh0YibQfdSoRSWLZGNyXb1Yp0m6DOTrGwEFJgCXPt4yhE6xlg2D7KsiTZJVRVRl03uJ6gaSpGccAojknTjKZuqesWW9qGmLEosXzTzWDaVhRtU9LUBaopsFBEnofvuniWBW0DbYtj20jL6trTGibTMSenC3zfYTTyubg45exkwWJ2xjiak6Wp6fxoKl69fknTNpxdnJOlGUIYYySkhR9EJGnWKWAJvMBhdrIgimN2SUKa51RNTZImVKVpmK+qiqosjZyaakE12BJcx2I6jnF8HyHMOdI9iNva5NNsS1JWBVq3Hewn6NIiCsd2mEQ+oecYXrymoK1r6CqhWlq4fmCoe3pQrTawKYEBuBvrcecQ9KSPGsOfppVpZjdRqzRtUFp3jovx8VrazuB8bC33Bqa3Jr3hEx96hl2xYojGhEYKix47BwqtZedldiGpNpyHQmvDlKG7jh7NIDs3/L53VB8YM0caLiXZHWjbh3/yQDKqO6lhZ1Ig+u7+I2Fq724eemSHpVxLmPyAQHQtH53B09Bj8QQdl5FSNErfsVBiLL1uNW3dYO41k4ezbQfbtqDVOI7pmfQ8Dx34eL6iVab6NJmM2axu0UIbJW3H6ai0W+rKhA9NA34QMhrHhFFrMFK7mk1yi7QVT5484a/+8i/48ssvef78O8IwZDyacDKbETg+3734nqt3b6nygqZqOFmc4DoOy9sb2qrCsTzQphdwPp8zHkV89ovPycoUN/JZ3l7zX//p/+T0D1/z7PPPuLy8ZL6YEUYxll12eaotu92G7WbD+/fvefXqFe/eGT1I6USgLZSW2MLog9ooRFOyXS7ZrrbURUNT1ghLozsvtFVGR1KjkMI0T9dNiepwenWTMR3FTMYBloTQdylFY55CloVEQmuS7IZW2yZLEsoiYbtZIoXC1gryGkvaTOIRvmdj/7+0vWlz5Eaa5/lzd9yIg8EjU5lSSaXqqp2ZfTNr8/0/xdrsdLdtV7dNSaU8SMaNG+6+Lx4HGGQypequXpilgiIjEAgE/MFz/A8FZZ4LQRuH9ZbN7Q0//Ph7ikVJkmWUK/F0yJIMRs/htOXTp88cDgfq9gxaJsjnc0VZLIgT8TS92mxompbjqRJTXhTrqyuyouCnn35ifzhwPp04VSfsaCXbDEqwcRRTVSfs2KO1IUtj4sjQND15maG8BCGFwtle2gypaNq1XUPcROJY3w1EiQw/8rygaxsSE4ExFGXBiOfxcOQcHOpNFON86E97JTd+BcrKzT5SGudlkqrQwYBYVusogoOzWuukyCHZ3OXUeQpYXwJU5/V/kcHNWZwCHZhYEi+nAtjPwz3rxG9iSrOUG3FeBWNkizI6BNgwApiCpn+KOfOvfi0ziyNRxIyMqF9aT8DJuLnx+bXNT3cEXq+z5+e9+NtlcLPWyhd1kbG91oC8JKdf7j/NBcg49oPsO/xpmiaWWY73Zt6HlAORkJDzAog4HXaMow3elIJcj0zCcrlmsViz3W7ZbvciBX1zw9XVhqqq2R92nA5H/u3f/hzcv0Vm+8OHj2y3j7z75j3LZclquWD7+MBhv2O1XNCVBc7B0PecxpGffvoZpTR5UchdWMHt3R3/7f/8b6yv1vzTP/8j//RP/8g//7//yi8f77m6umK9XrFaLUF52q6laaTk6bp+9ruEoLQbJSgi+m6k7x1GRxgDtm+pTkeq2SPTBC/IAe88dVVzPB5JM3FfkizWB36pZb/fkkaKMk+xfqBteoxJA/tBSnsfiNBoE3pFB6wbOB4OjP0gzjzWEqG5u7nl9z98x2a1pChSuTZjTZKlpHlGuVrKyokiRu/p+4F9c+T+42c+ffjA58/3QY7oTJKIy1VdNwz9yPv37ymDue/5fMZ7T5KkoGRIZGIjYpSPj5wOR7yHyES0TcOiLIEAxvZOykBGnBemxOFwRmmhOOVZinOOthUxR+8dNzfXjONAXVcMwwR3kV7cOPScDwfSLENHEVGaEUeGok/prZjrGg04hwgyhmoJhXcWaz0m0hdzwJBcTNXMHAUuhnMwP3ee371Siz4bCF7WrJf/QoV0ucsX2F9B8NvnscE4M7OBoigKfdxwvOoiIDKJTX5Z7H4RzCaEs6ByxdnGGA+9NGz/vdvXmv6XJ2fa5HlfGqNcbtNI/+n5X+7zC21ynt5/YiBEYRQ8BqG9cRgF2Ni1WOtJk4wsLYijGLxmGEbatpuztXE8Ujc1eZMTxYa8yElSQ9NmfPz4V06nI+/fvxfQZVWx3x84HPfBwCPn6mpFVdVU9ZnP94S+VYw24uOpjdxdP378wM3Nhnfv3nF1tQFgtV7y44+/59/+7d/4+eefeHx85PPnTzhnQTmUCj0TDUmckGYp15s1Wmu6dsA7hfeaNhrozIBzCu9HRucDU6Gh7Tpym+G9KJJ6pAdSFiVRZGjqlnEYiZMEpTRpMnBzfUMSKdpApk+ThCiSqV/b9dQOURvJUvpesrM4EtrOOPTsto+0TUeWlWR5xs3NDT/+/g/c3V6TJCZ4VyriJMbEERZPOwge63G3Y7ff03c9nz98ZByG4CZfzMqnzjnpUxYL7u7uGEfLp0+f+PjxI1prNptrvIPqfGa0A1VV0TYNQ9+zXq3IkpTdbof3nvVqTVkUYnZcN7I2woJOs+xZFTIMwt+cMHu3t7fBJ7UOTmB2dq/abXfkaS6KL12LGzpUFDMOPbHRREnM6BzOOtzo8cagoxiNxiHy14bQCpr614B1Ijk9Ba65slIXfbg5I3teKv5nTEgvt6/2yeGLOHHZg/+tfvwXwWwYhmcBTYxBRGIkjuOnNwSYppnKP0V6/yW38vLNX1PNuDxAI1DnL/7+slx9LTgC4giuL8rhAOAzRvhzgvRWM1ZFay2aW/EWZwcirYijjKIoAU3fj0SRlKR5XoSJZMb19TXWWpqmpq7PFEXJal2SphHffvd+bviPg+Pt2zdorTidjiyXSxaLgs31WkbPytM0MiHM8xKlRO67bRru7+8Zx4HT6U4cloqMNE15+/YNf/jDj9zcCKTgeDxQVWfu7z+jtSeK1OywnSYpRVGQJOIVORaWvh/pO8GORVpT1S3DMBJHMX3X8vHTR/76y19RxhPHhmHsgru5JkkkwHVtRVVXwDQljlmWJVmiGfpJjaKnrSvRfkOTxAZnB5r6LJpvaYRSntOpYrfdst1u6XpNURbYcXxGdYnjhDg2QXlixCtFNw6cm5bBjhz2Bw6HY4A4RFTns/RC84I0zYU47zx3t3csl2sA7u8fgopHxHq95ubmBut6uqHhfD5Rnc+zlFOeZmRpSpMkrFYrVusV5XQ9pFlwPA/E8iKZg/8EQJ9wVG3bst2KHeB6vabvhT61Xq85HA7865//lbqpWEQGExlhtChR4xj6ltFZlImEWjWI9JAGTBShlUJrP/tzyhqSjMb5icn5ROP2c5LBnI69jDNfg0j9Pdul1P1rQ8D/6Ht91Wru+RhVTsoXNILLID4POL/MlC4zs5dUppcHP08yXwSyaZuwKF+TIxpCL2ai1czYFT0pzz6XNwJmx+2uqbm+2nB9dUWeiYNTXdc4K2j387liuVyEi7KhqiqapiaKDGVZ8M03b6jrM8PQMoxS5iZxgjE3oayoOZ9PlGXJ1dWKLE3xiBLq+XzkdDqS5yVJkpFmKbpStG1DHEdstw80TT5jtIoi4927d2w2G7qu5eHhgb/+/BP4Ec9IXZ2o6gqllOimxSJJY0zE0I9U54a67oT0PDSMgyXOIvCO3X7PL798FHXcRY61PUrbmUxt3YhCNOfx0rv0olVJmiQkkeKw36PwpElMnkV4p7BWslvynDe3N0SxYb/fc9zvOR+OGKUw2tB3HV3bUZ0rDocjeZai1IIoKnAe2r6DoaduO3bHI10/cDyfGbqRWIuF3jCMjKOjLEuyLKOqarpOpIi01ux2O7bbR7quJYomy7+YvrdUVcXj4z1dKxzOKNCgVMi2skxaGV0nEBylFXkhBH3ftlgr4OzpBuq9n4NZ0zRoLUT+CZoweawqpSiKHGtHtEYcqnqLSUS+W0cFykRUdYtyI8rLkA43hOouiGtaK0O0kGm5KddQUn5OvE9ZPZdSQ2HtczFE+P9he5nEXLaMfg3l8FvbF8Fs+gImfEw/DkFV9SkrA34zM3t58C8/yGtZlwLcMIba/unvlxiV12hQzz5QuDPPjP9wwUz7KIoyQDWi2b9SwI4j537AW4hNEnpLYr9m7YB1jqqu8TjqpqbtO7xyoRx0tH3DuaqoqxNxIqXw8XhkGMaALVsx2pE//8uf0Vrx7t07iqKgbTsx97ADj49bjqcDaSJA2PM5YRhESy2KNDc3N7O35/pqzfX1Fbe3t2K0u1jwzds3DEODHVqq+sTpdMR7z3K5CJmmXCzjYDkczvzy14/YcaBtE3wGeZYz9Jq+73h4eGC5LLm6WhKnmiyNQlkZ4a3YxSVBsfV0PHE8nvDWsiySkLnV2NFTFAlZljL0I/vqTNvIhLiqToDndDzQd61k0E4KnsgYjBZC+/lccV4UxGkkEyw8o3U0XUdVNWwfd1R1Q9cP9MNAc24Yhg5j4tBCGPG+pe8H4jhmGAZO53sUQt0TMHATpH1GhrHj/uETx+MejSLfbFCxKKOMTE13H/plfm695FnOar2mXC7ZH85UdT1jz9pWlGen9bVYLJj8BKYyU7BrIlxpxwGvXGBMQLksyfICi8c6jx0GIqNwNpaGu1J4N4IbcR6sj2dz6sv1dwlaf7a9WEozQODF+v3PCm4vg9klXGt6nI738vG3tlcZANPCH0cbTrY0Fb/QM/uVzOy1uheeenLTNmVMOmBw+tE+y+6m47lk1X9tk4zvOSletMSe1DiiENwuNdSm4xr6gb4bcRZG69lcXxFFhtEJ5AOlOVc1zluWqyVJElNVFff3n/n5rz9zqk4sioyrq5W8bhTLNrFqK9FG8RDIycfjARlHa66vryiKjDzP2T7umGRfhGJlub//TNe1s1jgBDicNNTyPGe5WHG92WC0Jzbg3EBdn7F2lAyrLEHJJFE4o0e0gseHe4Y+J44SlIrZdi1V1XJ//5k41vT9DW/f3ZEsC5pagmqaxERa09Q1XdOK76RSeGtnjJDyMtBoqHA2KG5gwVva5sz9Z4FYWCcWg3EkFJ9+sERRCaGhXzcVdbsgbcQ8ph9EZruqG6zz7PYHqpZa6FkAACAASURBVLplHC11I3zQNBXA6/39PUqpwC2V8nB/2HM8HlkuljOtJoqkfH14fOB42EkwVGpWCcbDMPTgIYkd3vln6hlxHFOUxUx/imIhnU90na7rZmK9MYY8z0UY0to5wD48PMxsha5rsF7I92VZcnd7TV6U7I9HDqczZZYFByONs45hGEVRZRjoHTL2dxcJQFiiAse4GKw9W8gEvMPlKy6W+n9ymTllpZfUpqc1/How+63g9uoAYKIbAEEmWL6USxK48x6UjLKnCYiAWw1OuS/ecDoZLxn103YZRHllynn5QX+tSfgy49NaBSWNkJWEqeZs7RVS3NGODKNlUeQM48huL8EmL4rgMZugtGa7ewSc8A1vb7m+uSZOIh7u79nv96TxDZ8/f8ZoUVHdbK7lc6HYbK65ub3hz3/+M1V1pihLtFLc3d1xFRRvi6IQJ6luQCmC4J/00U4nwULVtRjQLpdLVqsV40oMjtMkZnO1YFGKRtZiscA5EViMY1GtLYsiCE72pOmk898Fmo6l7yXLUZVif9hTLFLu7AbvnXh+up48T8F76qoSocuyZHO1RuFp6wZ8j1GaLE1I04TIaAbvsAYiI76YduywowU0SRQYFGVJv69papku7nc79pslRZniGWm7lt1uy35/oOt7kiRjGCx10wUZoR5tJOs+n0SiaLlcUuQSyPd7CWTjONL3A13b0fWdeAJEEU3T0NS16It5h9UmaJAp6qoiiiLZX1HgvXA1p4U5DZLquiFJi1mgcHKsn4YA08JNkmSmQAGcz2dAskUTxYy9nUuuyEQC8kahncP2PUmWkmXpfBx9JwBcySscRksv0zkLgZYFLhDjweNQfpJ9JOC5vgwQLyuiX9t+K9G43M/LqusyI3tZeb3MDF/DqMJvKM1ObzB9IU3TzAHFOQfKidyvukgd1XOJj+nftM9JAuiSYzl9af5CDO7y9a8d+GsnY+pN/NpJnmg5+uJ5lwF2HEbR53IOEynSPGNRSBbmvWNzfUPT1IzW0fU969WaH//wB1brNT/99BesE5ybjsVII45SokgAlEM/8u6bd2y3jxyPR0CmfP0gZaVSntvbG7Q2bLc7zqczURRzc3MzL5iqqjgcDjMafbFYzlnH2zdvwDu6piVJJIAlSYTCBLS1R0ea0+lEXddiypGleBzWjgJydaNkcF1H01Q09Znt7oG+r2iaM2liqM6Guqro2jo0oL0kA9YKK0IJLS2NY4osm0uoqtJ4Jw5KeMtk4aq1ZrlYcHd7y2gfOZ4Hhr6jrkQPLC9SmrZif9iz3W6p6pqu78nzkjTJ6Dr5TlfLFYvlIsArWt6++YbNZsMwDNzff6Zpa5IkoSxl0NIPHbvdFoCylPZDliZ4a0mTjL7v2T4+ksRJYHvoOQBNrYzlconWck4Ph4M4vb95BwjEo+/7L3wDqqoiy4IZStexXq/n6zLLc5q+xUQxWSbB0I6Wpm6ww0is5XzboYdYJsYt0NQ14zCSFAtGb9F4FA5vLXgn007n5s8xeQZoFdaWZCNYgmeu+vVM6Lca9y//dvn8JzWeJwHISwXfrwXFy1gybZd9/FcHAJecqctm+nTCn/b+/AfJ2J5L+Lx2Qi6b75cHOpegr0wqvza9/Np22dt7RnLFzjiZSV8qjuNwV7o4fjwmEvxZFMd0Q09d16xWS/KiIE4S6qZD64r1esVydUVZ7ogjQ6QUaZqyWJQoZajrhtPpRNPUvH//nu9/9wOfP3/meDzSdWd2u93cIE7ThLdv34VRfY1CtNGKoqSqKqpKmvp9P/Dhw0fK8szd3R1pmnPYnxj7gSLPWCykeZ/bhDiBKJJzfq5amrZntJ4oTri62nA8nmnallNVgUpwfmToGoYhx9qBpqlQfsA5AbMqpxiHDrzDKFlc3o3YcSBPZeIqyP6p7LQYo8iylK5r6YPgo3eim0bQsVouFvSDJ0o6rIckMVjbU9UnurHhVJ2ou4bR22DEbImjFoVhvd5gtA4TxSEYjKTUdcP9/Wcet48sFiXr1RoTGbbbLYfDnrZtAsXKBGSFJ0li3tzecTwcONcVyivKRUkUiUrvYX+gKCX7SpLk2cLM8mzuw3adeI16L1i89XpN13Xz86cgXxSFPI8JTeBIkpQ4zkjTGGc9vR3wVqbTySbheDgyth2xEapYE7wolnHE0MrkUyl9cT2HNWRHjJbB07QkpRiSast7B+a5cPV/Zq9M3u953/vy8WXm9rLqmn5+DabxRTCbsq84jgVvlaZobWZF2AmecXGEoff/27iUZ6Xkxf/P04wpKr94/WvKmH/LNpXDl3cFMwEJnZsbtEoJ80ApUGHRleWCJEuo25q6fVId3e62LBYl79+/Jy8WWO/Z7Y9orbm+viPSivp85Hg4cjqdyfM8kMylALi/fyDPC3744ffc399TNw0PDw+0bctisZJF7ieQ5UiSCP1lIqyP48hyucRaz+FwQCnNcrlGKc1ffvqZm80NV1dr+t7RtANpFpEkEXmekBeZqIpg6PqRuumIkpS8XEDTcn2TcDr2HI8VjlEy0ywmTYzAPbwGZxmt9L6M9hjtUYzglAQ16xiHUehwOLy3YTqnwFsio4hjje8tFmERdGMnEBhjuN5ckeYDddsRx4ph6KiaM5nKiBJDXmYUSof+14nz4YxCUxQlH3/5QNf33NzdisN8JdPjvu8p8oIiDyq3zRgc5COurtYh+BicHcmylEVWyE0kjrm9vkEbjXWeJFQoAHYY52DVNA3DMFAuF7x58yZMpJN52qm1DG/u7u6CFPeWLMsYAhbudDqJXNLjI6OzRHECOoLRYUfP0I1ohfCKTcT5fEZ5sIMIWtphZFGUYnMYRySjx3uLswMusACclhITZzFJLMMrL8yAQL1Gis+pVfR8Lf89Ae1lxvZy/b8MZi9/97cexxfB7Hw+zzvs+55hGIXKpGXEfynfIRNNKY+m9/i1N39tCnkJAfFBAui19t5lTf1b22vTkvm4dOjdhUxwHAVvpSdDVzsg8jXQNBX7/Y6+F3K5NG8zjkfHYnGiLBdBF60TnNliiXcCSq3rLoBxHeurJVlWkCTJTFjWWqzmfvj+B/b7HdvtlvvPn9k+bnl8fKSq6lDGrASbtNtyPlcUeUGW5Yguv/SKfvrpJ/FsHD1xlKFUQlW1RLHGGEUUKxaLgvXVAqMV1luG0WOihOubO6wTEUlnHX2/JUki0jQiTg1KCZXNREqwTW4kUhBHGlEP8jjb47U0zI0GN46Bw6sDns/hrJfMzkCeJUTG0PeWpulom4ZxcERxQprnRInFRAavwfmBtq0wiSYvC/IyI0kytDbc3g0c90e2DzuMFrK8Ugo7WspFSllKUDmfTwyDSBg1bQOIookAaseg4TbMPbFvrm8p8pwPHz6wPxyIicnSlLEfqJqGzfVG2iVGC77LaOqmCVLbPVEUWCihkvFebPeaRsC1p9MJgLquub6+5nw+c3NzI96oKOquY2habDeSpgmLfImJI7lBjOIdkJgY5z3DKAT56414UozOs1ku6Qcx4unHICDpLQphXxi8IBFmVMYTpl7jUK8swL+lZ/a1tfjaa19Wai8nrq/16i6nnq9tXwSzz58/MwEVp96W6IzJBTDdaSTj0dI4DBOQCQLx8k1flojT316mlc45IvXU3Hv5mr/15L3cvgbreG2qqRR0XRN0up4CYRSJJtZutw8XZ8/j41bsyZYrkjin70bpU6AR1oClrtvQJG5FqXUcmdzWJasSfFHTCHxgu93OE2TnHNvtblZBjaNIOICZLGYZ+Y8cDge8gzdvvqEfHPcPe7SGONaYSICURXHmXDcoLGmWksQxy/WGOM4wUUo/WHbbHUoplqsCrRUmUjRtRdclpKnG+xGFRWktDWZvgjDBCJF4DExN6b7XWBvOh31CNGmQ1yYRyiv6tkcDkdFkaYaOYxywigpUZOjGnrprOFcOjGd9dUWxyBgHy2p5w9s3b/hL/BdOhxNxbDBRRNf1DMPA7e0Nv/vd7zidTnz8+IHD4cDQ96CEn2tMQp5nrNcrud6VIjURZV7S1Q3n0xk7TDpl0veKw8910xDFEYvFgtu7O4yR0rVtZOo8BtBvFIkb1JR9eS/VzsREmSAbdV2TJin9aMVvwXn8aFkUMYtyQRJ4rOM4UKSZUKOMMCq88jR9J9p4WlMWGabt0d6hgX4YGJ1FK4XREZGahgA+ECmndSO2htIz088CzNdKu9e2X+ulXcaI1/b3ssf+tR76a9sXwWy32/HmzZsgGbzGK835XHM8NXMTGkD7wPe6CGavTSBe26YgMkXhL9JQ/e87ec93/nSfmfd3kdpKRfx83xMnTCkoy4J+kOa3CtlTnuUYI33DYRjDhC5mvzvSNC2LxZHt9ogdOtarJXka4R0Mg+V02nI8nthsrlitFox6QOSoY4zJSdOEzWbDen3F6XRit9sGVHjKw8MDu92e9XrFmzdvKIqCv/zlr4yjJYoUdd1gdMRquRIYQ91gbYR3ohmfZwlpFqON0FZM1IrLFgqlDdYqRgdKRZgoph9HlFHkRUZkDHkWYww4P4JyJImRwOQdePne1SRn4i3ejxidy3fqrEBL7BgEBlXQqrMS/JQWmpRzREaDEQWTcRiIIs2iKMnLgmN9pvnc0DQVOtKCc0PMY7TSLMs179+/45CLacovHz4zWE+SZRyPMtEUpYvJ/8ERxYabm1uWyxLnpAxOkpgszYg8NMcz9x8/c9jvZTCgBIYi8k5pGGLJNfrw8EA39FxfXweviIjd7khRFHNCMC+2MLksioJ92HfTNAD88ssvlIsFTSsuX1EcYRJYlEvyrMAoxRD1gsUzUFcHvIZ1UaAjTT8MaCXXVVfXonirPJFRjIMV9Q2tiYxGq2CWopiJ2sLREJEhay1Wfb3n/ff20F5WTl/7/cuA9lvJzRfBrG07wf5omfhleQFo2jAxkuDjxCgkkEoFniKQClHB/BI6Mf2b7liX8I/L54UfZrrRsxSTFwgYdfHoQ5PzJf30og4H8NZNHc95v9ZarNYo62n7FmdHlFZEkQka9UZKKGMwOqKua07DSaZaxtB3PR+rjxz2W757/44sFgR633fhjl9SnevQW+nFGCVNuXtzx3r9LZvNFZtNLRd0WbLdbvnxxx/p+57/9b/+Ee9FitoEmMDkIG6tY71egofTaSvllxHqjh1HhnGg9DlJGhGnCq8MSZygdEQ/WKrqzH6/p2lalJaAFicxxijSJGJztSJPE5RyNHVFFOlAcg6wAWOIo0jMlr2wIoyW98B7IqPxOmL6ZpTyJJEAYp2F3ksvKEkSnEXoUcawvNoIcX6zRh81u+MW31uKIiXPU5QGgwTDrmuC+cya3//4A2jD54cdzln2+4Ng0qozxhhubq65u7shy1PROxtatrsdfdexWomG2tBJdmyMJg8TR5R4ntZNQ7vfkea5MAuKPCiUHEJ23QAKoxPyXIL6hPgvioKiKGiaZmYCTBPRqd2hlBJeZ29JkxTlRDn5fK7QgLeWOEpo6xaP+A0cDgeSLJXqwRiGvqeqK0xkSOOELI3wTrBsSiuxH0TaAzIECGoZXqov5SXT9rP0xZOm2bw+X8aRl7C0yxnhbyQ1X8vKLnejlXqmjIP3T8jei/f6IphVdcsvn+7BwmBH8qzAOh88E014QwlY88xDMlRw4HDyxhdDgZdR3YcDwwsVRk16ZVOAmYJTeHTq6XxJB2Zq0PHs7iKqHRfnVE3LiFAWP3346fdc/H10jv5cobQiiWKc8/TdgMaQRSIsmCcZbVVT1yeurq7YXG9QxnA6nxj6hNF2DCoiijRtazmejjw+buey2oVzd3d3S5LkZNkW74NBcBRR157j8UTTdNzd3fLu3Tt+/vkXPnz4RHXuAgTgyBB4eUVeMklAp2lB21aczjXVueLm9ob1lUjj4KXnlaYpzqoAwu05n+sANzBkWYLyPZGOKYqM5aJEa2jqimPTY4yiLHJirVEOvNYYFaERwOzY9dioR8fxPPL3PClLOC/I+choLJCYgT6KpD85dJzOJ5zS5MsVSZKQZxmLsWSzXlMMKcvVkqLIAzAavBvZbR+ITcrV+poffv89cZZxav5vHrafA28yFW5nIiV3sZDP1TQVjw/3nKszy8WCsshx48D+sOfcnEnjBJ0YwZthidMYjKIdpJzL8pyqqkiShOVqNVvsHfYH0AJ1GceBtm3DfTNHgNACQTBGB2aHaOhlkwoJBm9HlE9ReJqmZuh7YiNlfJzEtEOPisT5/Hg8YYIZ82hHqqYGFUQiooisyKUSSFP6YQwiCePT92EEyDsBbb0WddtRTa2BQPAPworeT0voKZMIII9nccSHpEaFRTjBtpTSPAt8U1yaBn8++G7657kKIWHyCryVQCyGe78yzexHz3Z7kOmeVjRN/6JBJ70QaYdNNvFiFmwnI4MZthJe4wWkOlllaa2fgLEw01jM1G+bcqwQEF8GMQHsym/1U/h+ikw8/9GFE6V8kG25PDb9JK1tnUdHEd654KwDbgTtNWMyorwnNoa76yuaPMVEGvyAwrIoE4riLcpbxn4gTWNGmwo40w3BBTwnCnf6JM7oe8tf/vfPfPr4idV6ifeO3W5H23Z8+vRZLhQXLq7B0TQtWZZTFItAmpfeUFEU3N7est8fGOyJOE7Ics1ymbG5Xs1uVEM/MARvUKWFvG2t5XAUJdw0McQqZb0sKEuhWp1O54BWd+RZFkrYmCgKKr9KyOog0IxYG7I4FRpW12DHAQLgljlDDkHOOnCecZAmfNO2HKoa4oRvvntPFEWkccxmtaK3mUjbjCOR0ozOh2llwzfffMvbt29AGaq25dvfvaM8FhwPh6Bwm7BcLMiyiKapgJGyKPju22+A0KgHHh+3nKsT3kBrOzovwShx4u9gkojFakEcy0Tz/v6BsiwpiyJIaCdz8/pw2Ae2jBM+7NBTVT4gBUTHTLw2B+I4ksDX1Fjr0SbF2zH0qy0jkuWOePq2oXfinWoBr9Tstj5lNVmRg9YM1rIK7l3FYhHMiFP6vg8afQNS4WuRFpqClnUMeLz1Qc3W8aTIr3BKPf0c2ktPozsvOLjQL9dGNM6sCwKS+oXwBGEphyWtL4LUlIVN63+u1vQkcOSfrfdXPABkxyMuaA49R/Mr5Z5J7LzEgaiL7MpPBxoiyxcl4Pz30HvzooY57e/ZY/h5zt6YAtxT5J4UMr74TIpX0c2vbSKTomZTBc3k9ylOO998+x3jaklVV9RtTd93WAfFsmC9XjO0DefTSWzFypLFYsn5VHM+16xWK4yJ0VpxfX1Fucip6zNd1/Dw0NJ1LavVmuvra+Io5nQ807UCK8jfF7x79571esP9/T1iU7dgv9vz+LjFGCG7R0nM7d0tRVEEgYCRqm7Is5w4yVAqZASxCX6XotCR5SmLZYl2MauF0JvO5zPb7SP7/YE4SkjevEVhAA1+uqDFPV5HIk8uU9Z2zkCsFXiGCzpmCoV1YivXdj1N29GPFus9JhINsf3hID0xLQJ+WhvyKENH0sNM0ozj4czQezZXGcvQM1Ta8e2333Lz9paf//pX/vznf2X7+EASx+JnoMC5kSQuQ+YojJe+a+m6LogECFzHeZEGz3IZejVtTd+PAaC8ZrfbMfkYTC7oj4+PXF1dkeWCCZx8ILTWM75sAu22bUsWpIIEgyi9szzPiSNpFUz+opNfpwyerEw2hyF4v/qgKhJLO8ULr7rt+2cDuGnQILAeO4uO+rDgJ5USNMLc0ApvJdDb3uKtxehYMsrRhWCjggJ0MAMPwUzxHMM2VYVP9dCL1OwZfuEyG3neqpq2Z5TKi+1VBsDLUei0s9eQwC83pS6F1C4P8ULC9+WUZD72LxuDvwbt+I9OO1/7HHPDkQvxN+9xOMZxoOsa6lrzv//yv8FbCb5G6FIY0dEfx5E0SbB5Lpr5OiKOU5JEBArX60343LBcLigXBUWR0jRnTucjfS/GsiK3/IhzPmhyleR5wdu3b8mynE+fPrHb7Tgej3z+9Jmu6/n++++5u7uTJnkqqrr7/SGwAAzaKJpG+l5VdeJxey8LO4n49tv3FGVOmhi684HYyGefYASHw1HI8l1PPw6STSlwVoO1OKOJ9HRBD6I6rGB0IhVtR7kRaK2J4giPoreO3o4M1s4u7coYykAXaloB98ZJRrlY4r0NbAU4Hiv6UYClZblitbqi7wTUe/f2Ld+sSvph4OMvH9j6p4s/ikRhOE8zztWZtq15uL8niYXdcn19TVEUgEwXJw1/gVW08yQf4P37bykKgX5MPbA4jrm+vmG723M6nS4m4dHsYmWM4eHhYQZrp2k6P0drQ1EsiKOM0+lMG5Q3pmA2ZXLwxHGeJqaTOIRyFuvHZ5zn6Xkv6UJPbaMnIYbIaEycggk8Z23o3cCIlSm8Cspp6sJyeLK+CyFORBWnJMiHgDa998u1N/Wonh5fW8t/y1r/IphNlIvLf69hPqb/n/4mJ435A3yRhF1kZl8byf4twfLlcb32IX9rP9Mw4bV96ak2D4R1kee2NI0k1UMvKglJElMuS8qyIE5ToiQiTiJcoCZZN4Z+ieH6+opxsERxTJ5PvRNH01SMozhct600hne7HWmaoZXh06fPDMPAarUiz6Xs+9Of/g+urq5omoYPHz6w2+/ZXG24u7vjxz/8nrZv6XpxZN8f9mijSdIUow3nc8t2+4hSkKYJq9W1ZE1BYjlNckxZiOJsVVNXFUM/hPMBbdvSdT0m0+AUg7Mo63BGERkpIbTTeCPNaIeUQdZ7ulHUaiMboVSEBemDGIMdLb219ONImmXkeUlVC+zk5nZDkqacqyOiLzfQ947FYo02EXEs56rrqtnmzRuRtF6v13z+/JnT8UQSx6zfvWNRBrS98+RZxtu3bxn7Aa00zsp3BMyZ1CWFTGszB5+bm2tRpA2QjP1+LwDpOEapJ6rcZDLsvZ/5zdvtdg4mco0oiqJAKY0xMYrn1+/EKX7pQzld7zM9UIlAwWgteZ7Nw61hlIGT0oqu70IWJo5h0soIHGUfDHmtxVmpZiIPUZwwascwWJk2K5l7jkwaabLeXVhXRj8d/2Uz/2vphnrxeLnOX+7n17ZX6Uwvg9fLf5d8yunfs5Nx+cYXZaLiy4B4uf17R77/ocwM/8XFMr1eTVPOkL4Tghl4xtHT9dLXM0aaocPQcz47TNeSlyVRXGK04e7ulqIohbJ0OJNl0u8Bi1KOLBOrtbZtUNoTRYbN5op3795xOp15fHxktz2Qpunsqt11A03T8Oc//5lhGPn06RPH45G72zt+/FGEGpM4oW7PHA478jxnsShYLAqaumF0lvffvgs9sDNKJzhnRQ0FiOOE9dUaxpTzYSdUn6bBezdrb0lJJQtjEma2bvIMkqmX8w4dNL681igVYwAz9IHM73C+By0YRa8Vg/NUbSeKJBjSfEQpI8j1QHmKo5x+HLAW8nzJcnnFMDrapgN6kjRnuU5BKX756y8zTWi5XFKfK4aux40jPshLTYj/ZbnksXlgUZYCl1jouUaaANF9KNmMETeqcRx5fHwUoLK15HnO9fX1PLls226GYdjJjMT7LxRnp/IvisQNXqafPdW5mt9zmlhOgfOZrSLMyhwTHa7vexyWJBVye1Wf6fuO6+vN/JpTU8+v1Vpu3KMdRLfPGbyOpJ+JJk4EMOycp/YdbgyAZjTKOwYQ8ru/SA6CW9TU55rX1tS7vmxbvViHl732r631L3QVw/al1dyLXtj0u0vpnimgXUIengWElyVmaL57nrK5rwWflwC9L/f9vD/3H87ueD0zkzuVB2tRxoQJjEIbJZO9tqHIU+JE+Hx939NVZ+lR+JGrRTnLyuTBJ/H29g3jaDmfz5zPB4xZk2UZUVxgjChbgGe9XnM6VRRFwXq1YbvdEkURm82G87liu91xf/+A1gIX+f777/nh+x94+/YbVqsVV5s1dXciy2LyIqOpG4ahJcsTtI7I8xRrB4ahxRhou5rr5YYkMUSxIctSIm/QfmS33RLFUZDFFoHOru+pm5rIGGITkRiD1wq8LArtBZSJFtkkbz1RpImShNjmqOCsZIcRj8YGxHo7DDRNS911oCJQmizLGUbLw+NOMGB5jo4S0lS0vTxQVQf2hyN5b1mvpVcVRTFGGay3eOdZFCXJd9+RJHGAT9QsyoKu72jqht3jVibBSUJZFERGuKJlWQYvTwG3LhbinZBnIrl0Pp9ZLpd8+PCBOI7nPpg2Zg5el6DzqdRVSs3lIxC8GZ6c3cdx8luNsbYMrxUqYZLEZAE/Z+2IUlIhpGkSxAxE+05HWioLKwBYrTR5lkvpP46kSYo4yMskcxgGyaSdJzERTV1jQ0so1RF5kEaPlCE2BofCetCIXwNK4QL+0F3AEC7LzLDyvogNX+uZ/Vpm9rU1/qoJ8MuMa/q9jJSfq1K8VjK+FqxeHvLXwHDqxZ3n5fP/I9nYFwfya392Qv/Ae7SfsFSGNI5Js5hIR6RpLKoUSYzWBofcdY/HI8Y7iiJltVoSxzHH44m+72jbFhB7sX7oOJ72WOu4vb0hyxL6vg9qFh1KKf7whz/wX/7Lf+HTp088Pkr/7P3795zPtQA1A++v72XanKYp+/2OLE8ph4JxHEA5mrbCWei6Hu+lqSwl5g0eWSDWDvR9S9t2lKkhSwWHdb25Zhwd53NDFeSpz1WN91AkKT5JIYnDNNiAjogTIeb3vWRxsdNi6acNSnuSNMLEXug2bmAYRY9rcA6vtBCsEwHw1k1LksTBaMTgnCUvStbrKx4eHsPEWa7Rfhg5HI6kmSz4OI548J6+6yjynNvbW5qmZrfbYkdZvFmaYgPX1YmsCFXTkGRpgMkIhmwcR7n5RBFt1/LTTz/RNC3fffcd3osKxjiOcxP+5uaG7XY7v24KaGmaziq1ImWezGKbk0ZdUSywdmqtE2SFwuTR6KCGK/4JUzDL8yyUrA3OWrzyjL0wK4pySKu2WAAAIABJREFUgc0sSfAYsOMo+n6Egdsw0jUtbd3I+opjtAOQm5OyDjeMxHFEFseCZbOWwXu0k0DGKK5KAMoBTA36qV922Q8TDb/nC9K9ePyV9fkra/5VccYJKDplYNM2pbSXHgFTdjY5HU1Zz7w/ngeyLxD/F4cvMI/fTjMvj3V+3W8EuSkz9OE/WptXnzd9Xq2kiS2iehFxYoTXpv0sma20EdUHo9FGJKX37sA4PpUfxkRC5PUu3F1FPfZ4PIQSQ7Ner8ONQ45nsViwvlqjUHPzdyL7T36Dkw9BkiSzkukwDKjIBiyWpaoIUtHSaBbxADvT0tIsxXtLlolxcRIbtO2w3lMUOZvNhqbtadupTBIaD16h3FPJHRmDgBAUSktmZV0Yz4+hLWFH8KJVb+IIr0fa0WGtp+sGIPT2TIRHMgjnPXGaYaKE4/HMdrcXByo0u/2RuhZliDiRftBut0VpWK+XLBYi6SP+lkr6YaE/d9zvKUuREi+KgnwKVE2DMoa8KOiHftaQ8xfXy2RsY+1I3/esVqv5Oloul3JjyfKLNSF/myV+gqHJJAEEPFOizfOc/f4oGWZYK8MgDlvOT1magLpjLYo2cRKLyXUSk8QRUSothCyVwROeIPF0Bh8Mn7sOpQjlqQC8nXW0dUMcHJ8UOtjqqeAvYDAa8nxB3XeMbQN2ZBykD4cW7TXrJGApRWB5TNjUi57602qd2z7TwPPlgPDX1vXl9mrPbEqPJ97ihNafEMuX2dprwcC9yALnepmLYDYf8HNlWC+GgM/2/zLre03692tGKRdHIQEjpPuTQsfLia1ACATZLqTolMWioEhE3K46nTmdT2KjFsfkWUlaFCzyguVywdDWnM8Vnz7dh+mjoPfbtg3YMAE4fvfdt4A49wh+LMOYmOvrBYA03weBAlxdXQVMlTS5f/e73/H+/Xv++3//7ywXSx4ftwKDcIYki6maNkjb5JTlkqYWx6kJkybfrbQJrHVYK3d8/JMWexRFFEVBmiZ4xH186Ae802jVh0Bmns3gPQrTDxgn/MJJM64fR7x1wpYYRubk3gv+qO8GQBFHCWOY4F7f3JAV4lJ0PJ64f3jk55//SrkoqRuhexkTM9iWppOscbt7DFnnArwnDecujmMZuNQN4zAEyZwWn7nZ2DoyhnKxIJ97pxKcxnEioouZsdGGsiyZHJWUUnM7oW1b8ryQDD3IY08yPyIeKY7x33//vQBsDwceHx9n6W3xLRCxy0mdxjpLFEdENsI5S93UjHac+2QmkhtKmiWsWAk7Io45ng4Yo7HjIMfW1Bz2OyZhyOPxGDxZr+m7lsZoRu+o64beCWg8iiPJBsPgahxH+rYhzbIZeybc2xE7dKBFLTrSehZEteMwUyDFQOe5kuy0D8Kj4jk381XA/d9aZk5vfNnov9zB5YFMz3kWPF68z8vycpIT1iEDFJmYC2K6Vl/u5N+5/Vrf7JIy9XLzXkCzWoskUJJoFmXJ9fWaZZ6ilOeYJfSDqKQuVkuKfCEwg37Ae0iTjDgpWS4WKIQ/KSTyJzyS6GClAcAoCqkgag/VWMmxBLDstBBASsmqavjhhx/4r//1v1IUBY+PjxwOxxB4UqLY4yppVhdFGfbf4hykqZjMTtSbcbCMoyOOhRjfDz3G2XlQY4KMc5KIZ2XXjwKC1JphdDRtL70UHZEkcjkOg2UIfTHCxensiLcWp524atPjvKLvLUbLuRjbjn4YKMoFd2/uyIucKI7QytAPDcMwst3uOVU15WLN73//exarJW3X8uHDB/qhp1yUxJEhjqO5PDudTuA959OJoeuelWzTd951HU1d03ZC5YuSmLppRHm2aeYpotDXIrSOnsxMlAiOtm0bWgkqcFF10LRbMAzDHBAnPNpqtZqVNAQjKB4Np9OBm5s3NE0jgaXvA0j9iT8MwgxIkkkzT3pvSsFmc8W5rujaljgylEUuih9DTxs+SxF+t7m6YrlYcA7nSG4AYupC6HXmRUFWZEEGXqAhnz7+QpTE5HlJWWYUbc7xfKIdetzkSHyx6VBWTr4E0uSXz0HoU0+rXimPupjY/lp//eX26gBgala+LDMvI+YlRuUpUnyZRn6BKgn71EFHjGdl6tMH+3u2lxndRKl4Or6nyer09+mzSYbnGe2IHjXWyaIED8phjGK1WhDHYlTadS0qEoT49fUNeRJjtASLXz58xLkxSC2XtG1DWZZzdjRMyPe6w4f0vx86siwlS4ugkHrP4+Oj9KmKkpubaxYBzS09l5zdTuSgN0F6+3wSmzXnHOfzmcPxRJGXpGmG+AuICq70m8DomKJYoshpjve03Zmh7/HOEccyaesHR5S0DKOVDKK3jIO4lcc6IolTjJma3IGmhgPvZm8Ar5/OdT+I8xAooijGuZZxsCyXS27vbsnyBOe8QDXKEqUNdSMKrIvFkvvHR3bHPUorPn76ECbpSvpBCsqAF0vihLqq6LsehWdRlmQ311KqeYcbLdYTxCGXRHGEiiJMJPLaIrHdzzer1ncsSpEr19rM4NjVShD9x9MxTGAlM5le3zTNM3jF6XSi6zqSRGwMkySZy1Bp1A94bynLnKIQzbPz+Yy1lsWi5OHhAWulBeKc5fHxEWOMCBJkGUbB2PcMXUe0XLJaLLi93jCOI03TsFgsWJYFaRyRRIbYaDQGF0VY3+GNxnpHN/ZUTcUYMiyPY7WWclrHBqc8kYZYgzcGjKEZniTBjNJo8wTf0ijm4mueB4Qn+2lYKKodl2v4tfX9cnvVnelS9vpyB1OT8quThWny92x0HAKFfhoUPGvmX6SOJmCT/p7tMsA+P3551HMwewpklx8gzF8QLBihbyYUFG0cYxpTlrk07M81WVbw5uaaq81N2Ifmn/7pnzmdTvzpT//AN9+84eHxM4fDkevrDSrYoGndE0cJaZITR8V8LHHfkqRR0C3LLu7+PtCW7lBK8fHjR4Zh4N2792w2G3a7HafziWIpzusysJE+5mq5Zrlco1XE+VTPPVGtY8RnMyWOU7wVHbb6KHr0aZKKMa6KQEeo/VHwX27EuZFhhH608z89iMxMEvweCSWIVxplpPRQSNO5b3vqWtD/XdfTBdqcgIlzVuulZDgmFrMVrVlfbWjbnn/8p3/mf/4//xM0rNcrqvpMnovLukFRn89sNgJF6HuxrWvbJkzwRO/fDgPDODAy4qJIfFHjmCiJafpulnGexRitcE36vsd7z+3tLcMwhsmiZbFYiCDjp09kRc5iIe2CSWl2uVzOwW1Sn5kI6M459vt9AEgvZijGtCama3kSCJ326ZzjcDgE68IrlJqOT24qk2gkwHq9nsvtyceg7/uAHexmfu/ohQs9WssQuJ579qRpSpGJyssUH0S6yUCa0HWGrmvp2oZxul5mqauntX8pmT397ovhnnMyJf+Vtf3a9kUwuzT6/drOLkvPywO1U7CDWcZHQgTz42W56r1Huademvz+darC37NNGZ8E1ZDC8jqjwXk/I9XTVPS5ijJnuV6QxAJJEECloywL7u7esr7a0PUdHz9+4vC4w46Wosxpmo7TqWKxWJFl4oe43x8QYcZS5F0CnEDKSpl+OS8lUhTFwgk0hraVRVTXNUVRhknpUcwu4oQ4jrl/+MRgC9J8ysoqurYjihLGwfL58Jk0zYjjOChvyHhfK40xEU11kCZ46CHFiUwnHZq67jgeK+JYZG6GfhQcnnf040jbiXdjpCDRwut1Vgjm3jnh+noYAgj1eKo4VTXdIIq3Xduyubnh7s0tq6sFZRnUP6yi7TrqpsFZSNKMKDgaZUWGiQRSYiJDnMRsVmuGpqM6nWUx971AFJxjcJa6qtFenL+jENwAurbFW4eKDM3Q0YSyccKDDUOP1tHMyhC5bGZ1DOFYxsTBvCRJkqcAEaaaU1aUJMlsNydeCw3H43H+zler9bxGJqOUKbBqLabVZVnO/Nwsy8TYZhzZ7XaowN2bdNikl5fTti2nk/i2VlXFbrebnaPmtYKn6tpgcCKSTG4YSKKEm82GNE0wRov6SzdgnEEpTRpHuDRFG0NX9Tg9reknV7WpBz9h8OZ3vGQQePl//8r6/Hf3zC7pD5cB6zLbmUrRiRM2fZmCa/FPqhc+4MumR76EZPy9JeVr22sf9mUm9rVidhp2iFmwkOqHQWy8BDAdNOLf3FEUK6I4Y3888rjd0TQdu/stZVlye3tLWS5xDk7HM6MdiGPD9fU1SmnGwVFVNcdjLfJgXqOUJy9EfyyKYor8qaRfLEqyLOd4FOeiJBE4x6dPn+fvoq4qPn7+Kzd3V5TlktPxRNP0IQBJKVuWK5q65Z//+V/48OEXjNH8j//xf/GnP/0JbzvKckGsE/qupR3k4tNKgJ55kYde14DS0hB/akMo8BmREq07o8Rqru9a8I4oMkFdVgL1OAyifmBlmpUmGd99+y1//OMfub294Xg6UZ0bkiRnGHseHx/5l3/5V5arK0Zr2dxseP/tO3788QeyPEUbhYk0eZLx4adfOB/PggF0T1m/mMr0DLHIEkXBENqOozg16RadxBCqD6XUrDIBagYPp2nK+XymbbvZZenx8VFQ/DwN0KIoEq5lkNue1k7XdSyXS4AZHDtBNvq+mzMnMY0+03UCwp2yr7IsZ9mhN2/ekKYp260MgQSHthfLvSyjSPIgmClkf+89f/rTH9Fa8/nzZ+4fHohDcHEzgd0zeicZUpjCO9dRNzVZmgYoiAyA0iwNHNERN45gND2awYoc2DNHt+n/5/56GAxOj95/FZnxt/TNXsWZXTb+X2ZgL6EZ0xeslBKO3TTNDLWwuwhm0zaVosYYlJf62jv3tc/x79qmXp6CWQNJKcHUzEFuPpdfRn87DphwR/FeENVVVZPHBuek7Lu6ukJj6AbRzNrvjvTdQKQN7969p+8HTqczfPiA1p6ua1guF3z3u2/ZbK4x2jAMlqpqOB1r+l4EG2XCdcTaTiRdliNpKhdOnhdcX1+zWEj5NQEzJz/NLMsZraXtWuq6oesGTqeK5WIV7vSaPCsBHcq6lsP+wDAMfP70wPt335KnJkBJBF9nHCSJEJ2zLCVNYnFwGnvGoRNql/No7ynSLGiVgR8HvPLYvmVoavAelcToyKA9ZEksprlLaPqBuu3xaP70xz/y4w8/4IxkdJdmvvvDgV8+fOBmGPiHf/gj66s1m+srvv/hd5hI0Q8tQ9/RVS2Ribi9vSWKIu4/33M6HRnb4EvgIyFnazUHmd5JptYPlsh7oiyZr48pmGVZKi5Kg0AsJt7qVMlMvbFhGMiLxTzsmW72PmT8IjZgGIeBJijSpqko/6pQdp5PJ7IQBKegGMcxHqFZ9UNPVdcUeT63hSYc25s3b4jj/4+yN2uy5Dzv/H5v7svZl9qruhu9YQdJUNQyE77xjGlLE9I3sC7na2juHWF/D3scY1/YEYrxkOJIQ1IUCYACAQKNRm+1V5099+31xZt5+nSxAVIZUdFA1amsU3VOPvk8/+e/aCxXc8JwRavVoihzVoGSzVm2yeOvv6LT7TKfz1guF7TaLVzXw2/7dGyHVDdYRSFxFFHVVJBCQhAF6JqSP3U7HTrdLu2Or24QeUqMus63R2OCRN201ouS+tpkY/Gyntg2Jjgh5TpkXTSfbHYKa3rVH4iZbWZJvu6btGYlz0t8rUlBt0yToixecalQNe0lILgujg02J+W6kJXy1YImb/zbvMF4zfNqCm/jPS8qZQYoNgpX/cD6nHLtOvAS05NQFmjSwDBMbNNQLhfoCGFg6CaVUFyaIiuJgoA0CnFMDdtwKAoVXJskAXFcoRsSyzRIYrXJsh2bJM5otdp02h16vQGu7RNFMUWhJEDdnk8cK9udoijodJz1XT0MArq9PmVRssgy0kQlHZmmgWUaLJYZlmljWg6L5ZKrq2vyvKTb62HoOkkSksQpVVWyu7OFQK3i215LOaRIgzhJEeRqG9pS8izLCUnSiMVCYOk5OSmVTEArMQ2TtmvR8x26voOjgyELdAmurVEIE5BY9QWpaTpSaGCY5BUEcUIQZ5iuz63DPXptn/PpGUWWYJuKa1XkGVmSMBwMGY+3ODw8YjgcKuIoknCpnEfiKGJyPSEKIvr9Pp1OB8/3SOKY2XTKbKYCfgtZ4nlthltjHNtWFkdCEifxOnkrK3KSJAYBSapIz92urLPZ1PupwX8sy8KqeXxpTdItiwLDVvmiZVFA3QG2fB/HcVSYTRiCUJF8eVXh2jamYTK5ntAf9NF1nX6/T1EWatMqK1ZRwDIMcH0Pz3WVaWftmNJutzFNjb39Xa4nV/Xvm61tvJWQ3ef8/JysjgUUukleVJShghc83WRvPGIy17hKU8IypaokVVkSJhmGmWPYJVaWE0SRamDq5KiW5+HrBlg2RVlSGspavSheXq8Ke1S8S4HA0Aw0oYxGqdS2VgoNKUqE1JCUqLCCuo5sCNHFDdH6t8qZXsfSb/Rlzdzb2JQ0XVoRFaoLeuWsYr0AaLq6m/Io2ZThP6CdpN6CqqLUGC/W4+PGyldr2MeSdeRWc0ghQCoPtiYIVUdimTq6UH9YXSjDPdOwMHUb03CIghVBvESWBY5hYPU6Ktk7y7B1hRO1WjbUUtw8V1KlsiiYXk/Jk4KqkDiWS99r41g2AKtVSJGlOG5LYVzFijCM8TwVKhPXRoCmoey7ZVXW/lOSIksJi6zuRkGio+km19MpX3z5JWEUsLezx8XFOWVe0ml36Xf7eLdvUZWSfm+IIUx8t4MUKWkmqERJKXUEirW/NWqjM8LWUiakREKg49Bp9+i3+3huC9s0sEWJLcDUBJbh1lsyNXbqmoEwTNKiZBFEzMMYvYzp+DaHdw7Z2e6ji5IyiVnOJrhuG6SGZZhsb2/jeW36gxG25SArSOOMKFyRJKqDCIMVq8WKy8tLvnr8WLnP3r7NaGtMt9ul0+2wXClGv2YIVlFIEIdouobTdsllQaurlBthFKKbOlmekWYpaALbtXGERlVK8iIjKzIMDGSu3FVM00TTdeW4oel0Wm0uLy5YLVf4LZ/lYolpWQwHA1XsNJ0wCjE1HdO2cCxb8bLylMn1FUmacOv2bTRNY7GcY7uOMgUQklIWSKEu7jzPcOvOWdd1FvM5nueuE75U/oCO5big6Wzv7pFkKabjsNMbEIUx15Nrda0Ija2WB60WVZyiIyjyiihKqKQA3SZIcoSRgh7iZjltx6bdalFmKaskIQqWrBYLkBLPcShLobIiipIwSXAdR1k+6Rq2XdeRMkeTBZphInVl8iqrqjZBYKOjedkYadWrGs3Xjpmv+++XdUT8jlaz2Y7ByzH0ph5tM/h38/x/iJbyX8I12cTpNtnEN481b6gmAkupAlAt06YqcsJVRlXktV22iW072JZLamTk+YosinAcG0vXVB5CkVNJVVy6Xb825FMZiK7roAiyBVdXV7XLwoL9/X1c1yWOE6SEbq+rnHoriWEoIPnk5ARZSXxP+aM1kX+WZdHyfTRN1P5mJY7r4rQUpWO1XJJmKdvbWwwHfQxTIwwDzk5OMQyTB/cecrh/RJFXzOZz0iTH81xMW0c3HCqZEYQhpl7huQYt38U2xrimwLcM5pMZspT4TouWZ+PaJp5t4xngaiWWrraaugBRjxlK+gVlkVHmCbLK6XU7HNy9x90338Zrd7iaXLNcLBl0++QlXF1fsbW1z727d3lxfIYUGmdn5wSrFZ7n0vZ9ri8nhGGApkGv20NKSI5fcHV1xenpaZ2C1Oftt9/mzbfeQgjBfDHj5OSYLEtrcqtNUZVcT67Z3d2tqRIKbG8kSUEQMh6PiaOUPC/W08tLYNukrBTmOhqN1ounJonLsiyuLq/WqWdRFGHoBnlR0O6016/rphzq9PRU5QmYKmIuzXOou8IsyxThVygtZpakZFlCkRf0er1a4jYnipRhQMMh7fS6rKKQMFZefGmWk5dqoTUcDjl58UJdG0Kj2+7UVJOQKE7JqxIJRFGiJirPwbUMHKFGc1kWtet0TYSVFYYQGJaJsG21dNF19feqoRUpJbqUFLqu+hRD0TdU4dLWk5tW04pedmavXtPfWsx+X9G42cW95Gnxyph6U/b0TT/vJtt3s4j9oQWtqqrf+SW/6WdsPjcAWUriKMJzbFqtDu22X7fu5vr5O7bSwYmaSe1aFlmWUxYleVkhDEMVBVPFyqk3kKQo8jVoWxRlvTKvcF1PaRhNC00TFNXLbEyA+XRGXuY1m185OzRAb5ZlvHj+nOl0SqvVYjjSKZOETr/Dd95/n4O9XTRN0Gr5NZbj1KaMIWEUUpQFF1fXPH78BN9tIQzB9u6YdsdFlxCGSzSUlMfQLFzbZNDrMej2efHsObPJjDzJmMymGNqSjucz6nhYng3olKUky1UildIVWhRlxWy5IkozesMR+7ff4PaDhwy2d5gulqRZjm6YKJKwRbfbwzBNgihCora5L16cqDHS7DCbzYiTuC4+AIKjoyMQcH19zXQ6rSkJks8//y2fffYbjo6OePudt3n48MHadfW3v/2cXq/HbDbj9PSUVquF0hC+tOeJo4Qir9b0Giklum6sZWJSSkxTaUMlksVyQZzEZEWOBHr9PmmWkeYZeZErW3pfkZitOrGpqMqacKxkbdQTj+e+THRSN8B4zUvrd7qMRiM8xyVKVFC0YRm1uiRey67QdOVPt1rSbrfpdPtkWUYQzEiSRHWkQbDmyVm2gYEOQiPNSpI0o8xz/JajFgpZSmEpDFJdTKqBsiwT01Dic1kTfoVUmZ86amIyDE2ZFdTbZEODojAQskQXyvxRyNqYXzac1mZw25zCXh7/4mK2yQnbJJ0Ca4bz5mNuFozNInKzyLzu/28WtN93NB3Z5nk2j3UXVj/XhjsHikhaVDmGqSgRpmnViUclxrqQaTiOh6mrwA9T1+ssxvr313UkpcoRsJW7qV4XeMtS3BvTNBGaZLGcsVqt1uBtWRa02oq46XkK3A2WS6IoZD6fE4bhGnBO03S9Wj/Y3+f+/fu0221W4UrlTZYluhC0Wi2Ojg4BtWYvsozVKuD2nVu8ce+O8hH74rdMTqdgwLvlW9y//waWpaEJvd4SmjUYrtHyHKzabaHf6zGbzLm8mLCYz1ktZiwnJlvdDjvjEW3fp5Q6WVkgypJSlsRZTpgUOH6HW2/c5ejeA/x+n6yqKCSYtkNLqni0vJIKQ5pMuJrMyQul1TRMk+vra6V40DVG4zGmrrO1NWYynXBydorruOzt7ZHn2XrT1+l0cByHJEn4u7/7O6qq5OHDB9y7d4+HDx/ieR7X19ecnZ0SRXFtYZ5gGib9fp9et49t21xfT1+R+DXRcXmuLIaCIFDxf/LloqYxEjBNcy1Mb8iyrusC6kYcx7FaqugaSZatg1Dslkp7Oru4UMuhvCHjJrQ9X3HGopjJ7JrRaITruqxWK5bLJbPZXEnZ6pBpoSs9sGE5TKdTVqvV2mvt/PKS/d1t5RCT5kqDayqJmNp2qrFWxQtqVJWrbvSGjqU5tASYnkcpS7JEcd6iKKWoUihL8jxF4CgirW4gKdGFIu4aQkerMgxKJZeSrPH3amOZuL62/6XF7CZBtsHMNsfGzVZ783Ob37vpgfa6n3Pz8ZuF7F8yZr7u2Dxvozlt3oyNhbBaZsDWeB/HUgZ8YRhSFjlV5SkQF5VepWkabquF32pBVaGblgqDLZW7gCLZaliWD/homr5OUloum/xERWpMc7V2zwuYzq4JgiVCaMxrA7+mE8tqIudwOFz7bJmmusgGQ0VlePTVV+R5ymg8YjDsc/eNN5RguKrI8ozD/T16vQ7zxZLBYERSJCxWc3TLYGt3zN7BLq7v1dtlNRYqWKGBCsxa/mRxdOsW4vCIxXzJ86cvePr0KdPLay4ur4gXKyzDQtNMqrJUQvWqQugFUZoiLIfDN+6yf+sOpusTJDmlVoKh43g+SZoxmc7JywrXa/H8xTHnF1fsHx4RxRHzxYxBf8ju7i5FkTOfTri8uOT09FRdRFmmRNVCUVy2trbW71tqa5pBf8BqtSIMIn7+85+vA4PH4zEPH76FlJKtrW3m8zknx2eEQcTB/iFZlq+hCdVNa7VGU3VueVWyWqoRcTwe192bgmGaTNpmedDE0fm+z3KpFAdZmtJpd3BNl9n8GNM0VXclS9U51kXSskwKqazGGyvuMi9YhS+VBK1WC9u2SVNVSG3Xp9VqUVEHYNdWRN2u4rUFQcBiNmc06FFVBaswoiwkbqtFUSrr+DRNSeMUWZW4to3vOopzVpYYmvq57W6bvFBE6KLIEFLZpGtoyErD0Kgj71AyNyHrTE/ljWZRYFDWy7y6fiCpZIVsYvJ4uRdojm8sZptFZ7MYNDjYTaXAJi9t83teEZFL+Qph7veNf687383v+52jHjXWa99vOO8m7aTZxgop1V1SSrI8p6yqmpBqkRcFk+kMWRXomnpD5llBUWT1eGFRygrLVsGyjQEjgGUpDpFhaOs3dlE0krEIrV5353nKYjGnCVDIa3yk2+3ieR6GYTCdztYkyO2dbfq9/jr4FqDVapNnOafPj0njGMPQ0QwNTdfoD/rKUUNKXFcFXYzGQ961LFzXY2dnh16nh+u4IAts2wVy0jTH0CssU8fQTRzXoe37aAgM017TgzQ0tELiagZ5JbieKyJolmUYuo5hmsRZxtb+Pru37uD3hySVJC1LsjwnLQqSKCKIEoIwrgO/DGbzBULX6Q+HIASu52FbDmmWMp/NMA2dO3duc/zimDRSHv6WbXNxccZqFeD7Hp1Op8ZIdYR46Qgznc5otX2EEEynU548eUKe59y7d4+D/QPefec9ZrM5i8WCdrvD6elpzawvahH8sk4xN0iTTBFbdR1DqKjGhvGvG7UbSD0qNjf+ZlJoaB1pkmKZCf1+n729PWzHIYpjlvPpugheXFyoJUN9GawclRRmmxZeHa5ycnKytoryfW/d/Ukp6Q/65HlOEK1ux0DtAAAgAElEQVRYLBbM50sFQ7guO3vbBLF6T+a1nEvP1JieZylZmiCAosjIkKSZMjVYreqENSocz0UWJboGwjRwbEu5EAuheIi6pnzjpKQsSpXUpen1xjdD1yrMerppruQStWSRslSvn/IaesXU4rWuGZsF49swpk232cZu2/O8tTj6ZvDATZfMm8cfMmr+vkPcKGQ3R+HN56IE39aaWS3LivPzc0zTxHNc5Vem6xRlyXy5VD7xva4SMpcls+WCIkvJy0LFq1UlWZGh1R74Yai0dP2+jmGYpGmC67lrENNx1NazAWYNQzm4Oo6SS83ncwDKQslm1I1AkCQpyyBgsVoxGAw4PDxkPB7T7nS5PD+jzAtc2+Hy/ILZfIYUFTt721i2wXK1Ik5ShYvoBn7bx2u16v9u4XoelBJZguv6CErKMkHTK9BMdNPGsl3QDJUOrun0hkPyUplPFnGOJXVKYRBHGUGYUpUlmlZRRhmtbpftw9vY7R5JBZWmuG3hfE4YRWRJSp5XjMfbCN1gtlji+T6HozGj0ZAkzel0eiRpSp6mhHFIsFwhy5LlQrnzllVR35SUsqHXU0VcCe4roijCdV06nS6TyTVVVTGbzet08y5hGHJycsrpyTnn55fs7OzUY5jq1vK8xHFUZubZ2TmrZbDGzZquLcsylsslbi1XahLNS6FwIN1QVjmmZZGkKQhVPDRdp6zdMVqtFo7rKqZ/VbsL18oN27Iwa1uoq6srNE2B9VmhOv2T0xNOTk6IogjTVMuMLFO2RskGMVdxFfM1vlZVBbY9xrEdhKavixyoFKeqKgkWIatVjmzSz1HypyxNSbMYwzLXWaWmaSq9bM2Z04WSXFWiRFSSPFX+fbppKThGquxOqBQlCnVv15qFwFrQKesb6MvjG4vZzWKw/oaNjIDNwtA8riH6Aa887nXnuvmYm+dtPrf57x90XvHNYvXNztI0zfWbDxTPLc9y5SVVB0yUpdrEtXyPbq9Du90BWZImMUkSUWY5jRN6o4RIEnXRNLQVTRNrlwY1bst1Z2bbdg0wi7pVN2j5LdJUqQ6axJvlclGDn8q2OSty8ixne3ub4WhEp9NDVhWXF+dsjbbY293ls88+5fH0MV7Lo9PuYVsOF5ePefbsBZ7fZnt7G7/VIU5SJtMZ7XaX2we3MIVOmed0ui1aLRvPbmOaYJlqUZVXAkvoaIaFY1jomk4lBdPJnKvTKRdXMyxDAdRZmpHlNYPesXnj8DYHbzxAGo7il3kuwjCRQsOwHFzbZ1mowtZrt/H8DotVSJJnLFdLkjTj6npKWTtzzKYzxYEKQ5azOb1el/HWFrZtsrW1zXQ6IQxUClGv160tyFOCYFVfqN5ab1gU6rXv9/vKJLGoSJKUs7MzXrw4BgQPH77J4cEhW1vbICV7e1ecnpzXmGaAEILlaslisaDX7bF/cFAXnEuEUO66tmVhmCaVrMjrrWRzwzYNA8d1WcwXOJ4ix7Y7bXZ3d3ny9OlaC2rVxazp8qbTqQLn2z7dbpckU9zGIAiI44S8yBG6UiRkhdJrNphbM+o2LiOO4yA0gWMrk0rT0FWWhGlCVRIHEbIq0ISxVoc4jkNVlpSlQRiGdUfoY5kmslYGmYZBIhWmaBgGVY37WYaJNFRxMnSlH63QoFLXzlqVrqE81poy1ny+Pl4rZ7p53MTNbhaTTbVAY5lyMydg0yOtKX6NP3qDXzWt/2Y39bpi9rpDCKF0l69wUn63GDYYxppVLSVJkqxxP892kBKWqwAhQBeCdsvHdlzlimHbrBZziqJU7HzbqeU5JbbjMtwakdSs/ObuB4I4VqOhWgBYOLby6Wo2k2VZslou6Xe7OLaDJiWubSkRd6782fO8UJo1Xcd1PYbDEfsHhyxWAdeTOe+8/RbvvfcBo36PIs/RDQvH9rj7xj2Ojm4phw90nj8/Jc1SfvCDP6I/GHFycso/f/op21u79Ns9bN3k4vwMv+Wxt7fN4cEumq4RxRGWqeM6BggToSlbmEpWOF6L23cfkCaSxerXTOdLpKS23CkZjkf88Z/8K955/z1026VEB0OjqAQUFabtIkVOEWeUFUwmc84urtg/OqTf7/PbL7/g4uoSy3FYLlb4fouyKImThDRWBoy24zBfLJgvFgwGAx48uM/u7g4nJycAxJFKWTo6OqTdbiGlZD6fk2UZvu8qXzEp6LR79cZZjYjtdpc0/Rrbdvjiiy/5+KNfMx6PuXXrFvfuPsCxfZ48eVK/J1w8WTGdzxGGTlbk6oZVC9GbRHPqrfbl9dVaiVAhWa5WGLq+1tBOplOWqyVbe7t02m2CWlOZZzm6hL3dPfqdTp0JsaLTUxrNra0tOp0OV1dXHB+fsLq8JAhj0jSlN+gTxzFJ9lK1oAi2Od1um7QOOh4Nh6r4xxFp7RkXrJboQlEuHMdESOV2bNs2eVFgOQ6aJtb6U8/zVMZAPX0URbEW7yd1hoJR21JVZYlhW5RZPdFVatLSBAhNRxiqSSmLrL7oX73Yv7Uzu1kQ1kXjxvi2Weya7c1m8dgsVM3nNnG2Bkcyamvfzcc0P6MpgDeVCb8zBnOTsPvq0ZjaNR1m2rS59XMIa8KjbVtrCUmSZsxmC3RNw7VNRVK0LZVKbhhMpxOWywVoGr7fxrKtGqS1kVLWPB9Zb6ZaSElt8tfm4uKC589foGmCvd098iTD1Iu1TEwvK8bDEcauRRBGJEmmeGFphmVb6xDjw6Mj0qyg5TpcT+ecnZxwfnnNO+++x9GtQxzXw3EdWq02B/t7IASdThdD1+l0uhwd3uJg/5Dbt25h6SrEQgjw/TaGobR9WSZr3p1LXqgutMiVg6uuWThel8H2PuPDOWH+lKurCQiDo7t3uXf/PuOD21S6Q1KCrApKWYFeklcFy1VtO1RUREFCWWkIzQShMRyNGc2mZGXO9s4up6dnzGeLOkOgxHYdLN0gSzPa7S6+73F2dsZPfvJf+fDD7/Hd736PFy+ec3p6CkgeP37MYDBgd3enBsiVq8ZicclgMGCxWFJVJZblrG80TUc/m81wbBfbtnn27Dm//vU/A4K3336bd955p6a7XNDv99fv18VisT7HJj5mGMbaSCBNUyoplUWU7zO9nqDpGr1ej8Vyyeeff04cx9iuKrqNo0oUR7Q9j3arTYhy6fB9Hx19DfU0LsPhMlAqE9lTQcaagZRQlrKWbZVkWYFpW0RRtA5tKeouTwM0DW4fHWDoalwMo4CT0xNc1yPPctqdFodHB0o6Vo+UQtPqLahSluzujpS/W16o7FZZoZWKjpGXFYZQdBANkPpLPZNseKtaU7Z+TzG7WRy+iTi72XE1Lxq8VBA0x+tyODc/mqNZKDTd0uuK1s3C+Vo87MYC4OaxSfDd3MA2v4cQOpVU7g5lpfy4VEcjKcuco8N9DMMEIQjjmEhK4jTDqI3yojhkFawAGAwsBoM+3e6AMAiJk4TLi2s6nQ5ljc8VRcnDh2/SavnMZ3Mcw8KxLOYLxeJ+6603abe7PH76jJOTU+aLFYPhgJ29ffYPDnj67Dm9Xh8pdfKiUOvvJGU2meB6Du+8+wGHh3s8P37G6ek5ZSG5f/8Bnuersavbw7F9DM3EdRTeqTsuW9vbKk7P9+n1OkCJYVgYuo7EQkoNUXtThXFOloZEUUSUl3RGW4xzSWk4CKGz98Y9xge3SEqNs+u5cgkRKvxC6MoTbhksERL6nQGtlkmn02O8vUWcxZxfnqPrBpauAkmKUoHXQtYGB6LC9ExcX+kET05OcV2HVsvnF7/4J87OzvmTP/ljfN/nH//x55iWwd7eHufnF2v/ujiO2draYXt7W3XJdTCvaZo8fvw12tphVhGr1fsZqkoynU746KOPkRJMS6fT63BwcMD29jYAX375ZU1ejdYwTDMNrFYrptMpWZZx5/YdHt6/z2qxWMMfDYNAjYsxaApmcGwbU1Nqg8RXCwPPdYlTNSp60luPmA19BAJkJescCLumjmTrBqTJ/+z2OyRZShgrYjhSImUJQsPUBf1elygMiJNE3YTKAr3I0UxTJacniQr7qUdYr47g0zSdQU+RmlerJcvFiihJkbUGR0oVlFJVAl0XSj2ia+tAcVkpJYGhKaOAxvyzOf6gzqwpRJtFpflDvy7H75uKz+YG8ebjmsKyGZhys4P7NpxsvSz4A8wdNxcTN4ubUXs+VZW6iyorHFN5VLXbJEmG5znYdQhJUotpqWUlErVRdBwXgWA6XaBSw5VBX7vdrl1gnRpEVunfSZLT6/Yp0wTfsXlw7x6u73F2fsFXjx9zNZnguA7fv/+AW3fucHxyyn/72U+5dXSHg6Mj/uvf/wNpnCBLGPWH2I6JMEx+8ctf8dtHXzCbTQiDgP3DA/b3D4jjmNUyBAw6rS53bt1BSqX/rMqK0XBIy/MBJbUqC+U44bouuQF5XlGUFUlSEoUxYaj0pGESU2gG7dGYylAju9XuUOgmZVmho+FZKhdByIqqVALvspR4jgsIojhSIPl8QVHlpHmOFHB2ds7l1ZUSh6cZruNhmgbBMkSg6BaGroOsODs7wzAUUffLL78kiiJ+8IPv8yd/8qf85jef8pvf/IYHDx6wXC5ZrZbrxVUYhjiOw2KxwDBMut1e3cmblEXJ5cUlhmFydXWt9Lu2vY7+E0Kj1fJ49uIFn33+Oe12m7fefJPBYMC9+/fXSUxJHPPkyROuJ9e4bp21EMcEYcDHn3xMv9ujkspYc7lakdc0jyRJCOO4dqVlvbxqxkTXVoHRQRBg2mb9Pn+Zot5Jc0zLXHPb9KKqC7ak1WopHAtJKV+6XRimjiEMqlLZYctKZ7GYkeUppqnjuA5ppoqhwnIroumcXr+jvk9KilKqYJi0QCLx3JZqFmoeWVVBIRSPzKh1oIbQQAcLlcO6NozQFCSl1Tj1JtfsGzuzm4B78+/rNoub33OzWN08R0O2vbkVbYqZlHKjS3r1Md/WJW4+j2/rzDbbfctSo2RjWpfnOZgaEomuqfSelq+8/YeDPoNej8VsUhdwwWK5ZDmfo+sqVi7LCizbZtAfYZgmT5885fnzFziOzcHBIUdHt5UDQVGRZwWmaWFbit9TapLWsMOHf/anUBX88pe/5JMf/zOVVJmWg/6A23feoNXu8tHHn3BydsbDB28xGm/xH//j/4nrtSjyAsdyVcqR7aObDrNFQBBFVFXJYhViXV4TLFecn1+wWq3Y3t7l1tFtXMdltVoyW8xpd7vKusay1qNA42mvGzZJUtaBHzlZmhEnBWkqEJpDWsTMgog0zcgqiWU5JBVERUXLbVFJQZIrB9uqLCmylCzJoJBoCJaLFUmUYdkmXz16jDAgTAKup1dcXF1ycnLMYDTEsV3yssDxXBaLFasgoN3uIIRgd3cX13V5+vQpQmi8/fbbPH/+nB//+Cf81V/9JX/+53/BP/3TL7i4uOD4WHG55vMF29tbjMfj9fbS83x0Xefg4IBnz57TbncBQZYV+L5d461KiG3bauyrpMRv+TiuGu2m8xlPnz9DSllnPxwghGD/8ICDo8PaHjtSN1JdV8uULCeJYoI4Yrlcomkao50tirIkuLrC933u3LnDoNvj+uqKJIwYDod4rstsPlXBwvUYmKbZ2oRRSkmapLh1OnqWl2uxfAO1mLVVErD28UcIXMeh5bvowHKxwraV42+rLYmSlDhOmS9WzBZz1VWuQiihlAVIQRgGuLbLWG5xeT1Zu8nquqVs2KsSWQlKSyCFWsZpUlDIElPWhhaGjoYAWaJJRQX6gzCzm0Xr5qj4unH0Jla2WWyaj81i1oyWTVVvPjYXB825mi7w5tf+pcdmMWsKZ/Mc1Ici5em6ofSYjrO+K4e1oDzPCyxdr0f5ukOtf58wCLm4uMS2bXRd5/bt2/R6PZUDmSvWt+/bSqyc5wwGA959933lFZ8l/MPf/wPnZ8csl0ukrBhvbXP//kMGwzGrOOTjjz8ly3P+hx/+j0ymM/7Lj37EZDKllZeARhznTKYLdra3GI+H7O1s4XoWpqlwwLfffBNNCKIwIU+Vjk4XGgd7+2jaAbPFHM00GI/HWJZFsAoIw4iylIBOVWpUZUVRlAqUFRZVmZGkJUVVECYFy1hF0xm6CsLI0JgHkXpMXmAaBp5jQ5lTVTmiKqGCLM6IooQ4yqhWFYvljCRPOb8+w3RMfK+FaVu1aYCKeGv5bSzbZrlYcXF+Sa/b5f133+Hw8BCA09MTHj16hKbpDIdD/umffkm32+bw8JCiVCLsfr+v0raEtsZSe/U4NJ/Pmc8XLBYLTMNSW8Unz2iCQTxPOQLrmo4U0Ol2sF2LvFAWQKPRiIODA46Pj/nqq694+vSpcrV1XcqyXI+e29vbjIZDHNtm9+CIq8tLnGCl8LU6xs62LLWpTBLCKGLY66tkL+UusL5OGtPG5hpTyU/Kzmg2m0FtnCqFXjcRasxO0xTDNDEdE9t18XxvjfG1XJ9uy0c3DJAVmi6UnrTScNOMqb5isQxI02ythvA8F8d1KYsS23LxWi100yZNF/h+i16/vzazjOOYikqld1kaQiqzz7LKIVeEal2r82sBHdYGEd9YzF7Xmd0sWpsF5mYhe102wGbhWlf9jW6q+VqDEzRFrYm2h5fWQ5uYnNj4oClu8iUH5XVH42S7pmPUsV3quWvqzlgpV4qyZmTLsiRLE4JghYbEMkx812V7Z5dOu8tsNiWKFE7Q6/ex6nzE/mCIQCVcT6dzgiCi0+4wHo84OrrN7u4uILi4uOCzzz5nMZ9w8uxr2q7FaKwkKQeHt7Adl2fPnxHEEd/57ncwLZunz4958uRrkiRma7xFXLvfBlHAaLhVu9/mvDg5wXEsdrbH3L17n063T7BacnR0i3feeldtlurw3jiOyfKCIku5urpSnZhmYtoOIivRhEmeK8pJs+mrZEUQJiyXMYUskELH7/Yw3AzTsOl0+hR5RZzlJElGmZf4jouQFWWWIqhwLOXvtZjPmc9WTCYzprMZ6BLHtYiSmN1+l9HWiKzImM4m9TZQvUdarRZpnBDFEavVEmTJX/zFX7C1vc1Pf/pTrq+vSNOEIAyxbItimmPZNvfvPaDIS548fYym6ZycnHJ9fU2702F3Z5d2p02318cwLHpdVfAOD49YrcIaWtAUfoqs9aEq2s1veazCJXGcsFjMGQzu0Ov16HY6XF1fIwR19kNMnMQ4thprizxH13TmkynT6wlC1wjDkFUQMFnMFJ5pqtH2008/ZTmdce/ePaSUPP76Mb7r0R/2cByHVbhaG0M2U0hW35CbbFjXU4ErTXBOmqZIIC9Keq5Lp92mKnLiMCSxVAaDJsC1HYIwUoEruVzTkZIkQWYZjX+/57cZDEa1r1nGbLFksfote9s7tNpKCmiYFtPpjDRbKtxOVvQ6Lco8Jk8zyiwnr3JkKZT1uqGSxPQKBLLmn31DMXtZgL7xK6jWTrx8jHy5VWh0iFXd5WhCqyOrXtoTV03BkXL9vQ0fy/VcZGMXXBSKT1R7L0lUwRHw0tBtTUNpCmi1oefafLbq0HQFGWqaYuLlZUYhC0xdeZgJWSr2toAiT4mqAtNQZoK61OkPBzi2Q5FBkKdkaYmhu3RGAzrdFrfv3CIrMlbLgOlsxtX5FVGSsLO9yx/94H0O9g/I8ozZZMKvPv6IYBWQ5TmGruG4Lnv7+9imcg8wTJvpbMYqeMFgPOa73/kOEo0vHn3Fl198RgUc7O0wmUwYj9QINJsu2d0dYdsapydnXF1d4jgWz555PH22zd7eHlJKfNej1e5TSI35bEGQ5BRlQZantfg6o91O8Tyf1SoiDGOWQUzL96lKRTxtBMZpkpAXBYZt0vY8us7L7ETTsEiihKpQ+YuZloIoyfOKOAooshhdgzgKub68Yj5fIISOXo/76j2ls5gv2T845MHdh/zjL36BqJRWtixytrd3sEyds9MzHM/j9PKCv//pT/nhD3/I//QXf86PfvQjZrNJnRcZY3sOL45fMJ/P6Xa7tFtdKilxHOUasYpCZosVaAaWZdIbDpVzw7UgjEKWqyWddqc2rbTo9jqKMlBW5FlMFOVYpk6WVEwnV5R5xnQ2oywyHMdib2eHIIqVgsIwcCyblt/Cc9015ut5Ln67rTqXIidIIhVi4zi02x3G4zGmaXJ5cUEaxfR6XTQ0ri8vaXc7LBZzylLF1jm2je04aGmq8g/KEmSFqauwmqIs12O/REKp5FKWYSI1QYJgMV/V+FTF7vYOAv1lelhZvJLpqmlKqZJlylQzS1XCVMv31l1jJQuliKhy0jwiSQNAIy8sNL1HVZlIUZBXIAsJmlTW63mFaYCKSdaQG6CZ/jd/8zevlKr/7X/9X/6mIXmqTmXjQ9cwDB1dUwGfqpC8ZORqQnU+eq27AolRe7QbuiIlrs+tacg61LSJlc/ylDzPEJoKthgM+vT7PTzfU/mGmsD3XKV/rAuYQPnlafWoB7Im1Sn5g3oWEoHyTDMtk5JGf6OMIfNSZRP6vkuZZbiOhevYVGVJHCdUeYWhW5iGRVlAy+1gag5hEGMYNm8+fIfvfvf7vPHGXRbLBZeTK4JVxGA44oPvfI/vfOd7jIZbhEnCZ5/9li++fMTjr7/m/PICKSu8lo9h6sRJiAb4rRaTyYyqknS6Pfr9Pm/cvYth6Hz8ycdcXV2wWi3RZEWv22E4GuA6JtFqxXjUR1BR5AnXk0uCYKG0opbN+eUFj5884ezikjQrOb24JEwzojwnSBI0S20pZYn6VwqSJCNOMpV+pOusVgGT2ZQ4idXfsw6hNUxDiY1tG9tSG1nfcfFsR6UFIcmzBF2A71i4to6QJUGwZDabKvVAWSI0ELpE0xVL3rabzAEVSzfojxBozOcL2q0WeZ6yvTPi1u1Drq4vSLOCNC95cXJKfzjg4PCA/rCP49oITbmlqhSsjCAMeHF8Ql6UfPd7H+L5LcIoptvrkZfKJeL07JyvHn2FZhhsb4+JE7Ww6fW7QMVsNkGIkk6nhWUbJElAXiToBui64M4btxCyZDabsjUaI2VFHEWYuo7vqHhAXdPptDu0XJ8iS5lOJiwXczzfUzkMpgpOXi4XpHmmOhNdwzLUVtaxLQxDpyxyRqMxnueyXC3I0hTf83EdlzAMWCwXKMvASp1D09A1ga4J0kTF7ZVlgWkZ7O/tUGUZtmnS8jyWi2VNHu/w9NkLZF38u70ejm1TITEMDcMykdJAaEZ9U4zXiwgECF15s+VFRhwHhFFAWWZIUWAY6oYeRzF5VpFlKluirCSGZiJ0g1LWJgRSXc+FFPy7//nf/wf4PYEm3zZSbo6TDfYEvLKRbLCtBrxvUp1fdwihgL84jpUWbbnErjGJdrvNeDzGcRziIHylrc3S9KU7rlSCVihBClTUmUBoCu8RQmku0yxHVgJH07FdH91QDhFplmCaOuFqxaoCz/XotFo101mnLCpa/RZ5XjIedfnehx/iex7Xkyu+evQ1jmdhOhq3bt3GNGzm8wUff/Jrrq6u8P0u+3v75IXSnHmtFoah0277+L5LXuRUUlkQlUXB/sEBnueRFwW379yhAn72s5+xqreGg36XPC8YjwcMhkN+85vfcOvWPgKds7Nz4iRFExKE5PadWyA05k8WjMZb2LbL9WxKeHyM4bj1BrckryqlkSuU73+cqDu5buiYpqUE8XmOqEd+x3XWgbVCq0nLGlSUSFmhGzq2qYTPhpBQ5QSrFWmcozsOKiFekOUZcRKjCbBsZa9UlrK2ohEqszPNmVzN2ds95P13P2A2maFpBrdu3eHHP/47/vW//jP+7Q//Lf/7//GfeHF6yqA35KOPPyaKA/70T/8Yx7PoX/eYTCd1II3k5PQEgYZlOiRxws7OHi+OT2h1uliOy4sXL7i+vgYqrn854enTAQ8fPuTNN99kcn1Nkaf0+0o3OxwOMQyDOA6oZMFyOcd0LDotn52tLS4uLplOJyzmi3UO6SJcEawiDENnWN+4oWS1mLMKVpydnWG7Tl3UbcXST5P1RKNej2y90bRsi6rWLrb9lqJXhCFVKeu4vIxwtaQ3GNbRdREjOcJvt9A0tR01bR3TVmTY5XLFkydf8+bd++zs7HB1qQJxDg4O13BR835wHJs0tSmFBmikWUEUSYoiJRMC09QBidAkZZ6xWOoYWo/BYIBuCC7Oz5jPZiRJRMfvItDRdInQDKqyIMlLkiynKnMcS8cyDaQ00Y2XJP/XFrOGDrGJT22y9pvi8zsBwLAuZJtJ6JuE2U051E1cTQol76nqF0tKBfImSaJIhJrGwe4evV5v7TOWpSnz+ZzJZMJytaJjdShqIzoVWlpSlrmy/q3U3cRxKuXRpRJK1Eq5TMkFtGwXv92pE5CsdVCu7Tj0uj1u3bnN+++/T6fT4fj5M5aXC44ODxCa4KvHX3AxnbBczljMA/K8WK/vNaHCXx3HodNtY5oGUpbkeUYYKQwmzzLG29sc7R+wWq4wDJN33nufJ0+fcnl5yWAwQjctTNNWGybXZWtri/PzcwajMUVR0Ov0ODy8xRdffsl0NsPzlSfb1UT5et2/fx/Lsvns898iUeLxMAoJwpA8TRn2h3TaHax6y+u6ykfNtm1lqBdaWK6zTjxvUoLWNy5ZkpcZZVGueYMCSS6URXqZ56xWSy7ShLJoOE4luq5Ge0lFkSlTy6KUSCkwdBNZSSbX13z11WPefe9dBqMRXz99jN9ucefOfb748muG4x3+/M//Hf/v//OfWa0Cnjz5mouLM4oi4823HjAYDPg3/+a/59mTp3z00UcMh0NkBb7X5stHjzhKM/76r/+aLx894pcf/QopJUdHh4RhwPnZGdPplEePHuE5Lnfu3KbbadXyIhVoYtQBxFEcYJo20+mc2XTB/QcPeP+9D/j000+5vLwCxFojapgmtm0RhEuyPFEB02VJWVbKc67GePOaLtEQbI16g968D4qiqK3Rk3Wmqm4qHNQwDHZ2dsiKnAVDBqkAACAASURBVGfPnhElab0gUDeO5jo2TROECilJ0lSZhzoOtm0zGAwIg4DZbLZe4ummWW9MUyaLucoCLSVZKokT5VKibJp0QI2jSVLhWIqUKyrlzttqe2psbDIThEATGpowakzXwNCULrQqFAZdSaGeq/iWYvY6A8VNbtnNjutmilPjhrqpN2vO29AgXtf1NcUsSRLlOlAz9ZtC2KTYnJ+frwl+Qgi8mqdz584dur2ecjQoFZM5zZJ6NZ1QFBVVVRDHaZ1SI7AsA99r4XoqN1IH0jBiXPtBJUlGy21xdHgbx/WYT6bs7e5zfHLM4rM5i/mM+XzGj/7uP2PoAr/t0x908P0W3c6gtl4JaucETRnidTq4ntp0RlHIdHqtWNBViWUoLtbJySlvvfkm3V6PX/5KXVRBGDBfLNb+VkVZcnh4yPVkwmQyYe/ggOlsitD02qhxRJwkHBweYloWF5dXfP/DD+l2OhwfnzCbXtPu9MjzjNn0mjzLKbOcNE4JOwHdTnedBdncoNJUBa00oRybX1vbKmkC07DQtRKkpMgL0iRiMZ8xm00IVkuWiwXLxYwiT9EN9Vo7tk273SJNYuazKQKVKq8hsEyTTtugLCuOj1/Q6XV58OBNTs5OOT4+5b333+PHP/kJ/99/+Xv+6i//kvff/4Cf/+znYKrtn5Twycef8O677xCFAXt7e1xfX/PZZ58hKzUNaJpOHCc8f/6c0WjE/v4+y+WSqqpqHliELmBra4vp9YRP//lT7t97gzt3bhMEAVdXV7URos7t23dYLBZcXkzpdNrEUYo3VDY/o9Fo7SFmWRb7+7u0Wm0WizlZllCWBQjodjo4novQNSbTCYsgUB2xqVQyLd/H9/31NZnEiSI0S5XRGQRLdF2n1+tRFmpbmRUqAu/i6hqAdrtdKz3qgphXLFdztFwlN93aP+DWrVt89dsvlWbTtnnx4gWu6zIcDhl2u5i2xWw+RyxVhKJtGMRRiIKdJLqhoWuKCCulwr69Xg/XsxUjAAURdbtdZbtuu8hMaS8bCZSmaeimhSGgKgyyrKRAkBYFRfUt1IybHdPm5zZHymYLuUnBaPhWLwMUtFe+/m2FsilmKjeyllfX46xlWRieeuGiVbB+HlVVMUsSJpMJjx49QqKcHtTGx8HzPXy/Rb8/wnN9zLp4CQ10XVmTuK6PrguiKCZcreh3u+RpRr/f5/DgFqvFip/97B/58ssv0RAkSUKn06bXbSuMQNfo9Nu4joWuKz5NEKzQNBPbUok+rVYL21IazUaGkiQhWaYcDjzfUyN4TZS8c3iE7Tr8828+XYt/w1gFnLieR5pljMdjNE3j4vKC7b1d8iJXFBDb4enz5+R5zmg0Yndvl8l0yng05N69N7ieTJkvpkDF4cEewXLO2cmxMi2MI6ZXV1w7nvJJGwzUHb+OTFNCbHW3bDbPDcE4z4tarqKtO7WiKIijgPlsymRyzWIxrZ0rCmzbxDJFDR5nSNvAa7nYpk4aRWhCidelUBSIlu1gmDYnZxf86lcfYzsuH37vB/yn//v/YrS1y5/9q/+OH//4x/zqo1/zwXvv8/z5c54/e8Lx8QvOz084ONgjDFZsbY25vrzk+9//Pq7rcn014fmzY5I4oColn3zyCYZpopsGd+/eJY4jLi7OAdZ5o57n4Q4GhGHIZDLl3r277O/vc3p6ynK55PnzkzU9YTZboGmmoveUEsvSa1cVie+7GKZOr9dG18H3HdJEjZ2u6yqrI9chzTKCWo0gNXUdGbUHXzO9qKVEG9dV+Zyj0YgKyXQ6Jc9KBoMBxBGz2ewV0niWZXgtn93dXXwvYLma0+/3KcuC6XRKv6/sgiaTCe++8w4AV1dXa3qTris+puM4JGlKVkra7RbLICKJctI4I0Xh67ZloBsmcRgyGvTodNqqyTFMLNOCqiRLc0zNUOlVNV1E0zQcR9byQoHRSLmK2iTym4rZOtfuBrdBSsXMbUJy16nlUtbGe8quY5MA2xSrRjLUZBc25/udglabu1X1OZuuTyUtq++zbXs9/xdFoVrSumhWJczni7oFVeG2Qgi0mnKBYO3pbpkWRVlQldU6RNX3XD744AMG/R5nF5f85Cf/jWdPnlOUyohOcXjUeaSQmLaJbRnYloHQFUHQMFWXYZkuKj5OEXM938e2lA5wuVrWDgsS27bwXI8ojkiikPt37zEaDPn440+U22iNDXbaHRzXVZ2PaTIcjjg+OWY83qpDNEJarTZZlpNmKavlku9//4+wbIuT01O+973vAoLVcoGha/R7PQaDHo8ePSJJQhzHQqBCfZVOUW2rlauHWrqoQqycGGxb2cpU1cu8hyzLlYWLpkHtf7UKFsznU+bzKWG0UiJk08A0LJA6ZamrboSK+XyCitSuAzsQ5EVJUZZAY3CguvNPPvk13/3wQ9rdHrPFive+8yHvvf9djo+PeX78gg8++ACBZD6fcHV1wWw2VaC657A1GvO3f/u3vP/++7z33vt89eXX5HnBRx99wqeffophmliOzbvvvsvh4ZGCAkKVX7mYLxgOBrx48YKD/V3a7Tbz+RwpJf1+n3Zbib5fvDjGc9vkhYpa++ijj1nVlk3dmpQ8HA6YzSY4jollmcrQMFd4XlILu8eug+9565EySuJ6FHspawqCJWmaUrreOlMzCFUn1/DOminKtm06QmM2mxMEgbK5dmwG/QGu43N88pw4VtvR09NTloslrusqNcJySa/XW1+/8/mcMI7W1uPz2YxlGGLbHZUmVRTq2tPVwlDTFBOhUWY0Vkm6kCrdyjARlSIl53muYvVqEbthW7i2C7pQXWaqoIys+JZiRh0FpaGKwXocrOrOrKpqHonSSck6LRoEotGKbXRmm6TY1x2vMP2FeqKIVxn8su7UGsPC5qN5odbctFwJtGWlvRyBa8V+gfr5OjqykBRSEVjHu2Pu3LnDwcEBnW6bzz//jK+++oqL8yuWyyW6UOtr3VB6sPHWGNdxEFpJksXohkPX97FsgyxJSaKEJJEUuawzKLX1i9gkWhf1c1/ji1K9yVqei2nZvDg9RWg6k8kUoWkMRyro9er6ilHt1nB2eUEpYX9nl0ePHtHtdkmzvA7wUC6rw1Gfrx5/je///6S915Ml95Xn90lvrnflq6sNutFogBiCbnaoIGNnN0TuPmj1oIh92QeF/pPVPyATMn+E9mVDoYjZl9UMxRlwZkCQME0Q7buqy9f1N296o4eTmV3dADgR0o0odKOAqrp1b+b5nXO+Tk7eR48ecXF+hreULILVYk4cBnRaTXrtlpgnBjFBEJeGe7K4zfOE9XpFu92uk7BsywYF0iQlqzh5cSKjoaJKhmUYEEbyEUUheZqDBoWuim1yoZS+agp5nrJcBigZFElBHKegqERJSqEEpJlcmoZhMBwMeP7iBcONTX70w5/wn//mr+n0vuSHP/gRx69OODx8hfWOwfvvP+Di4pzd3R08b8752Rlff/01xd2c3d1dvv76a1ZLj/fuv89sNufly0MUVfbCqqHz6aefsrOzzfvvP8A0DQ5fPKfZbBIEAc1Wk/l8zsuXL7hz5w66rvPq1TG+H9Bwm2yMtsog3zXtTptOp0OSxKxWS4LAJ44jdvd2CJ56pYtwUvL35NDO0owwimhHkYBWkYzkVR5nJSLv9nokZRhxFEVomsZoNOLLLz8HVeH27dvEUSqHVhzJisBI8TwPRdEE6AkC1v4ay5TxMUrF72xzMJT7LM3p9/slo0DE71EU1fZFVdGpCO1h5JPE4hRrlIVLUUBXNXRdZWtjs0yikrR7yV4F13Zxeg7n55fkeYFhmaBJ3muz1RF5napQoJKjo2eQ5n9iZ1YXmdfV5htjZvWkr3dP1Ye4cArZtcp7vK6vvL53e7ugUe7c4iQhjqJaAKvrOoYmqFqRvk6Fum72GMcxfhDiui2ha1RWRoXIMaqfZ5XUgUazycZoxNb2NoZh8Mc//pGnT5+CUlkXlfIdReLnVU2V+K4oxFsvMXSVwaBLt9el0XCkE1QVyAvyrLhG8i1K2xdhEKWp8GsajQaappKmMYaq0Ww1aToOy/mCdqvNyfEJcZLUwmdd1zEti8FgQJymnJ9fcOvWTZbLFXleoBsmi4XkBMxmMz788EPW6zUPv3zIRz/4iK+++opnT5+iAJ1Oq1Q1xHRaTTrtVv2uO9brjAHICEKP9CpiuZzhuI6MBKYpyFleEEdRTXKOowjHkv9HkoIi6VZ1SWs3DFX2l2pOp91FIWexlKKnKAIOGaZJqqTCYcoLKKRbTJOIrFDw1ktQoN1u8ejR13T7fd7/4AOePX3GrYPb3Lhxg1/99X8m8tf85V/+nF6vy8M/fMH5+XndYbx8+ZLbt2/zve99j0dfP+Lvf/OP/OVf/gv+1S//Ff/P3/6a6WxWagZzTk/P2NuTpHVT1zg7O6PX67FcLjBNEY1Xfv+r1YowDOl1B1xejlFVjXfeuctyueDFi2fM57PSBkqAjlarVYb26iyXpdur7ZJqOY7joGdpqRIQgMjV3ZrRXyWNt5pN4k6H1WpFEsX1gdnv90nzjPlsDqjs7O4SJ2L4qSiVB5lQQ8IoEhCpJ7vi2JNO6+6t24zHYw6fPqfX60FRcHV1Vcv/BL1WavCg1WphNxrkuZB9wyAkSSMoCgxDE+qOqdd+f7quvkHsFU5pgWU75KiYaSamjiVomANRHGFaDoWmUwh/57uLmfa2y8VbBUtT1ZqbVRWLioUvth+vEcvresrr1I23C9n17q86carCV/mOUf63JIyA1yaQFcKqKAqGblDWEyjEV9wwTaxypq/QvI2NDVRV5fnz5/ztr3/NcrWk2+kyGg7wfE881tIy1wD53ppSEMdqmb4jPDTD1MizhLywURA3Ase2ubi4Io4yBoNhGVRSRpGVST7L6ZzlMqPTadHptBmO+kRRxJdffkG31WW1WtPrD5jNZoxGG4SxILY7O3vcvHmb/+M//Ac+/P73sSyXrx89KV1Oc8bTGWvfJ4wiDm7e5Pe//z27u9s0XIfPfv+72i3BMDTa7Sae57G3J8V8PB6jKCp+EOE4NvN5TtOViLU49AnWK64uxOFU3E81kTVlaXltCAiwUjXIczH0U4X46nTb8v/nCpqSgSLXUZZndQYDJffPNA0atvjHLRdL/DAkyXLSfAWKiq6rRIkEZeRZwWw240c/+hFxEHH08iUPHtzn+PAFT58+5ubNfX75y1+QJBFXlwL9D4cDTN3g1atXdDodbt++Qxgk/PaTT1it1vz4xz9mOpvx+ZdflO4mGX/84x8Zj6/Y392h3+/z/OkzuQHzjMlkWkvXkiRmNpvRcNu1+PtXvzpCUaDX75S7Rgm8sUKL8XhcdmNanfztBwFxkuE2Gpi2ycoTVHwwGAiAZkkXNBgOiWP5eRVQ0+926Xa7jMfjOulJURTiMgVdLQ1Jw7VfU7CqddB8Pme1XBOnITk5rWaDi4sL1us1gLjm2rYUVdctR70IwzLp9XoMNjdYLpeMZzNm0wWGoaFgoaiFWPqUrjO+nxKHEa12k43BkG67Q7crluZpkhDFKePplLUfEIcRzU6bOPAJYznMLsdXHBzcotfuYJjeG9uwb3ZmNSv/2t9f82KlcF0rTEVeoCivnVvTLBO/7mtF8O3CdR0UeEP/WXYEWplnWbWvRSEiZFkwa7UtN4BRFjvbtlEVXfL8CvnelmnRarXoD/r0e30azQanJ6c8fvSIs/NzKAqGoxHbW1v4vs/JyTHtdgtFKUmbteSqLKq6TqfdkgTnosA0TGy7ygyIUcqC2+v1MA23vmAqy5fZdEYd4aXLDkouvglHR4cUikKW50wmM3q9Hgc3b+KHAfP5HMMw2Nnd5Ve//jX33n2Xre0tPv74Y87Pz7l37x6Pnj5hMpmgFAX/+l//a6bTKa9eveKj73+fTz/9LbPZjJ2dLQAm4zHbW1u4ro2pq4Thmt3tDRRFJclysrRA15Sy44rQVTnkTN1EURXSJCCJxMJYVcTrXlEgJxWJSbngVhSxUMrSmKJQyfKEKAplPE0FbZ5Op1LwbFucPLQ1TbdNGERSmJOopOqIP5ZlOfT6W6iaQZYrBOsVx0eH3HvnDk+ePOX87Iwf//jHzGYTvv76ax48uM+HH36Pdtvlq6++QlUgDkNevnzJw4cPeefOXe7fv8/J8QlfffUHHv7hIR/94Af87Gc/44svvuDJkyfcuHEDVVE4Ozvj1q1btNtt5rMZo80hDx48II4jnj17xnq9xnVdHj78gq2tbQaDPnEsLq+C/Arp/IMP3uf07BTPW3J5eY6uV+7MSom65+iahuu4sg8NfK6mU/zAJ05TDg4OaDaaJEFIGEU0bKd27/B9n+VyQRAE4vTSbBHpMWogSKiu66zWPovFgq2tHVrtNqdnp3LgOzZXxxd0++J3ZpV74jwSA1W1nHCCIGBvb49Wu818uRCQIQpRFCG7X11elmoeKfIa1WRUoCgq/W6nliYKsruQbl8XYqxhWhhZXsrrEpRCyM6KJilh3toDBZJE3Ji/u5jlr4tZUYIBSoUuIjIhvVysZwWk5FCAoQrqSCJL4Le5ZN/lxvGGRVD5p+u6aKpat+2KoqCpWuksoNdoWfUcxf43IElzBu0+vX6Xzc3N0iBPxoDDF8+ZzWa1C0HDNtE0ncBb4S3mEhCyuUkcrykyASxMTRM1g6rjWDau69Bpt/FWC9kH+RGxE0GulciOzuFLOfF3d7qYpoXvCyITR1m9KDYtnbt377K3t8d0OuH09KTuRC+vrjA0o/SDV0kz2W/8xU9/ysM//IHlasmf/7M/5//+67/m448/5vs//AHPXr7g8PCQe/fukSWCxH788ccMh8NaYRGGPkkSCZWl28Z1TGzLZjabUWQpRZ6haqArBUka0m469XVgmIbsSJJERowsl5TvAgzTxCg78SxL0VSt5gkVFGITbRmomkqeqxR5xtpfs1gs0DSFLMul+0ozkbplPsulvGZVoK4ACpUxHzTVJqahEMUZaZpxcXbMD3/4E/ydLabjMR9+/3v88Ac/5Pe//y0XF5c0my5RFPHh977Hxx//LaEfcPfuXeZzCSppuC22d3bwg5DZfM5vfvMbuv0ev/jFL7h79y4nJ68IAp/lQkam73//+zx//ozVYs7FxQVhGTVYubC0Oy3iJGS1WpAXKXEccnV1KWJxy+Lzzz+XqL5el52dPWzbLmk6M5aLFc1mC0VRiKKINBM5XTWhGGWsnWEaKKWcSCvBt9PzM5arBdvb23jeql716JrBcrnk+PSEleehanL/xHFMGAR1WE6apNilw62ma/U1KT5uUsxcV5B3QVBXrNZigW07NpPplJeHh9jlcr/io+nXLOo1TaNhy2hplkW8KAqUQojrmq6jqCrNdof1OiDNC1RFuGXz+Vx0p8sFuqaQZznqNXHmN+RM/8v/+D/8+7d3ZG8XpLc9wKoZ3LSt0qco/9bO7Nseb9v3FBQ0W603UEtN01CVMtGm7NDCMKxTf1ot8bPf3dnmndu3cV2H9drn5OQVz5+/5OTkFcvlkjiO6PX6RHFAGIRYlkm/36fZbJDnBVGwLqVRObqqYhg6pm5gGgaOY+HYLq1mkzTNCNZrAn9NlgqCaZZe+JUn1mq5Jo5jsgpQUY2a1nBw8wZFUfDq1SvSNCEr3Rtct4HjyM/oD/p4nsezZ8/46KOPePzkKYeHh/yXv/wFn3/xBU+fPqXf7+M4Lmt/zWAwxDRNNkYjNE3jD3/4A9/73vcIw4DpdEK/1yXLxWZmNBrRabVqhxLT1InjUCLBoojVaoll6hi6iqaAXTLCDV3FNk0gK1OqwNBVQUGzBCiwTQPXtjB0DZS8HClFwqZqAg5URGgxCIQojksgwCDPYe0HeGtfCrou2l7T1HFch2arwdbWBo5ts/Y8hoM+a29F5PuMBgMODw9ZLOfs7u5iGAar1ZIsS9jZ3Wa1XLK5uSEmmGWI8vn5OXGUMJvNMQyTMAq5vLzED3y+/PJLdna2+cEPPsJ1JBDlyy++4OjwiIMbNxgOB1xdXdUopa6L/32v1y0leiFFkdccSyjqgGfZhQW8ePGSMIiwLIeN0ZbYF1mSRRmEofi6aRp+GLL21iSZqGgG/T5ZkhKFIaZusFqtGF+JiL3f7zOejIlK6k8cJZIiFUcoiPHozs4OUSS7seFoSJbJyNxut9BNuU7TOBFL+NJGqEJD4ziu1zYFsq+OkhgFsF0HQ5eiVNUAw9BLfqSDaRi1L167vM+LkkC8XC6ZL5ZkikZ/MKgBBlVTS+v4vFQRIWseXWgu//bf/bffLmd6e2H/XSPh28Xoumrg7SJWfZ9/0rqnRMfkhJZ8gMpvKS8LRcUUbjabOI5Ds9HAdd0a6n305GvSJCGOMoHFM5H0qIqOpqssljMgp9Vq4DZskjSWYFJFo9lyyTL5GoUMpRCPpbQoiCMVXVFJoyaOaZE4Dov5jNXSE76MLfq2wUCkInNf+Dqj0Yg8zwl8iSO7efMmKAUvXjzn6uqCbreD60oU3Xq9FmjasMiygouLCzrdniSlqyr/1b/5N1xdjXny5CnNdofBYFB2Fk100+Do6Ij33rvPF19+wa07t8qg4TmWZdHrtFEUGctVBRFdhz7NhoOqKhSFQxrHKEVGy7XQdVVkZaXrqaZpuA23DKiATJMTU1Xz8tBJ0HWNJM4psogsk6zOLBP0WzdNcQ0tChRFVhcyUkkUn2GItY+qJkRpTuL7kGVomYapGNgNm9FwJJF1mmh0/bXN5sYATVG4ujhld2uTG/s7HJ+d4zZc7t+/z+9+99t6bFv7a0bDAc1mk6urK8m2HI44PTnD0A329w/44IMPUFSFs4sLNE3jyZMn4qHW77G1tUWe57w6POL45JiD/T329nZJ05STkxOWyyWet2K9XuI4bnX147oN+v0+vu9zfHxMEDSvoZKiQZxM5rJrjWMGnT5RHAv62GkzGA7JFMSoMQxqNQbILsssl+DdbpdWu8nXX3+NqsLejX22traYjGdcXl3Wi/rBaINOp4uq6qxWq5LmlNWuNoVaWntnIp2ySkeVKjt2sVhIarphkObiyuuVz6vd69BouhRFThqndcZFs9lEQ7rB4aAn+QBOkzzPicrGxPM84izDaKlohoXjNln7Yf28iywVSlORErcb6GoB+Z+gZryNWl5/1HyuCs28NiYKz0jQlJw3vcy+rfi9/e+CZgp4EF6Deq+rAFRVpeG4dRqMaZooUI8L6/UaDcmglI6olC1pOqZZdZPyxkVxiOIXOI64lWZ5jh94OJYulkC5dBtZmokTZhSTRjENuylM/labPJFTQsmFYlBkAqm7rkuradZqCIHMTYbDIev1mvOLU7IsY2Njg9Vqycpb0ii9owa9Aa1mm9l0xsbmBsPRiL/7u7/jF7/4BZdXV/zN3/yNtP0lYdFxHMaTCZfjK95//wGKKojbe+/eYz6fkWc5g0GfLIkZjTbQNZXpdEIQrHEcG89b4TgWtmUxnguhstl00RUNXYUkEHWCrqhoFBiqQqGrZKp0TIZhoBY5pCmGrpHmMVEmjr9FkaGqClmeUBQKRa4TlTexZZmyR5zNsWwb23Lw1mviNKNQVApFoVDlIwcUVaHRsGWZPz5HHW3h2gZFGtFp2gSeyWo5Y393lyhNWCwWbG4MGQ6HHB0dkecpt24ecHV1wc9+9jM+++wzzs/PicKY/f19JpMpJycnzBZzDm4eUJSk38PDI+7cucV8PueLz37PT378Y+7eeYfz8zMZ0Ytc0sHLgp+kMZCh6aDroi0uyFitlkLUth3p3Mt0c1Bw3QYUEPgRq5WHkgnAFceR2Fh3OjQaDXq9Ls2sRaslNJpeb4BpGCiFOItUe6iDgwMWixmLhfiwzWfCQ+t2u7iui2HZzOdzGg0JCT6/vKDVarGzs8PxyRFaptFwHRxT9KD9VofpdMqyVKB0u12uxmN838d2Hba2tvDjqKRkJTScJrZtYWiSt2mU10kcRCLFKrNzK2DMMAw6nQ7tdhtF1zm+mrP0vFLUn4sVUJ5haAKeLJcLup0WKpI1+53FTM4SGfcKBKWqk4UVqKRE16oRIAvdrJDUYZQ3fcrekD1d+5o3illJBnEchyAMZXwrBbbCjpc32HVl/1EhQcU1s0btWviqqpTk1rwgSWN8f02aJVimg2FqJcM8JQx9TNMAVIoshULGJgClyCmygiIryJSCVNFJk5gsEZZyw20IVyYr0BSNRqdHkgudIs8kVNb3fbrdHjvbe5imzeMnj2k2BWJfrRYlaCKJ04PBgLW35tXhK0abm9y5cYMnT57w4x//mCRN+Prrr7l58yZXkwnDXo/9/X2+fPgQRVXY2Njgz/7sz3j45Zd88MEHLJYLojBkd2+H8dUlnW4XTZP9oa6V9i2qdMGGpjLzPFbeChVoN100VaHXaaOrarkTUsiLnCyJiYKAwPewHQfLMDB0jURVZFdaOqMYloWlmuiGSZxIHmSBQprlYuqoaqwXS9ZrH1Sd9dpnufQI45ikyNBNg1azQ7vTQiVHUQrC0BdqjWUQhh66qjObjUnjjDQOeXX0kjgTkfzz589oNxv0+z3G40tA9ntRFPPJJ7/lxo19wjDkyeOnRGEiuxh/jR/65EXOjYMDrsZj0jSBomA6EQfX3/72U24eHHD3nXfw16vakvv27dv0ej3yPMEwVeI4Iori8nmHLOZLXLdR8i/FX2wwGNHr5nS7XcIwxDBs8RDLUtqtFpPZlMViQRTHrNZr1t5aMk5VlTAIcEabKIC3kPi5IAxYektMw8APvFoWWBQSpqMbBr4foKdiCmnb7hv296oq45yGGDZW+7Jer0cUhnirlRiQrtc0Gw06nbbQJaKoFr6jFgRBiOs4OI5dht6k4iqcSKd2fn4uul5X9NVco2wVikQSnl9cyFSgafhrjzgKGPZ72LZNQihrB9tEV183XN9UACiUUQHygSKfUxWFQpHbvChtjOBLOQAAIABJREFURKpuqihK56lUFPQoChpyqip5QaZKiGdKjlpQ+41V4Z6FQpmGXCoG0lSitFQN3ZTZu0jlSXueV0K4saS1lMtmrZyfxd4oI8vTWrAuF7KOiU6SpuS5LlHveY5qKOiq5ARGmXhSKaXbrKLIwllFQVM0NK3Ul2UJSqHVQEeaSgfYaDSYzSdYpoFmipC81XDp9QdEsc9Xf/ySW7duo2kqURTh+x6XVxdsbW2xvbPJ1dWEPJGU7Q8/+oj5XEa8e/fe5T/+n/+RIAgwDZMb+zdwbAtv5WFbFqEfcOfmbY6PXpHGEe+++w5nJ8f4WYJhiBWybZnEcch0NqbTbtM0XcZXV5LmraksVit5DQs5ypIsRkkhL8TjSlEVsiQjK3RQVcTURcTGjmmi6AZZmhCmMVlRYGgahmWVSe+lCVOhoBo6SqHgByFRnIAmcWzBLCIIIwk6KTJcy2Jrc0Sv2yWJQ+IwIEszzKaBYjusVx6aqkNpUyTLYJ35fMKN23fI8oTlYkaj0eAnP/4Jp6eneMuAnc19PvnkE3qdAQoS8KwoohTwvBWmaXF+HrFYLnjw3ntsb23w5NFjJpMxvV6H4XDEfD7n0999ilmuDbI8Z75YkKUZN2/dYrmcY5rGNQJ5TqPRQtV0ZvNzbtw4QEGh2WyzmM+5vLxk5a2wTItGw+Xq8gLHdepFexRFeOs1URJL95IXzCczZs02RZ6zKB1mzNKkYb5cYJg6lm1DAf56RRSBmQniSeDTbLYoiowoSSXbIsm4mp4QJxkWOWkUEaoqke/j2WIfNJlOyHOZRlqtFromnXZRpDimThjnrEq5YcO20VSdMPSJfOHF5XlOnuVYlmQQCOdOqbXXnuexDiPszpCLywnr5arO84wCn16nTcN2cLpdBoMBrYZDEr/uzL4JAPzP/9O/p0SQXsuMVLF4UdTSxOy6i9k15dM15n7FUStKaodwtdQ3mP3134sS5SyK8jRTStZwqTLICymKaVq7aYjRiCKFRlWFolHeGNIdKmWhK0+d8qdpaml3XZS0jrLlLTKhmOS57OVQVPJCqBKKqgr7WdNoNF1GGxs4tsV0NiWMAtod4cmcnp3R6wq1oygK2q0mFDlPnzzi/PyUGzd2CaMAyFgsZpyfn9If9tjb35VADBVmsxU3b91BLR1v/+zPvs/5+TmBH7D2fNZrn4bbYDTaZDqZ0m13GQ6G3Dy4SRJHbAw6+N6Cs9NXOLZFkUlgsWWZ4ienSHsfRiG6YdDvD0rScYpmGPiBz9pfo2oa85UsZKNEwieiNCUpCuIsI84ygjhi6fmopoXbaDFdLAjCCNOyaTRb6KZJnKaMp3PCOCVHxQtCwjhhufYIIklEz4E4TcnJ0fUCQ8khT2i7DnkSkYQh7VaTVrNNu9nG0A1syxH5lKLiOg2W5U00HA0YjgZYpsFsOsWxHFRF48XTQxazNQ2nQ9Nt8Zvf/D0P3nuPmwf7HL16jh8sgATTNIijuDRIdNne2mBzY4SmqhwdHomkL88xTYur8YRmqy1KhThmONpguDEiimPanQ6379wlCGMePvwjhmkzGG6QZWVuQCEp4IVSsFqv8MM1mqGJj99gQBiFHJ8cE0VRDRiEYYjruAz7fQLfl5DoomDlrSgo6HQ7Qr8wTZI8x1uvcWwH2zbRSl/BIPABySloNJq4jSaFojKeTlksVxiGzmqxoChyDFURP7kSVU8S8Rx0HUduoFySy7IslvfHtUmiGNdp0Gy20FVDJhingYJKEieoispotMHt23fIsoyjoyNZbbRadVHw/JhOp0uz4bJeeaRJTKfTLr3tFB689wDLMknKIv/f/Lv/7r//1mL2v/3v/+u/l9TNqohJAcuLQuxIyiJRM2Xf+vhmhPCbjz+FblaOsPWYex1NvfZlRVG8ll2VELBectNEO/q6ba40pG8L6CuVgKFqpQSnsqCp9nwSpaZpmnhJuQ1c12HQFyKsqkmYQp6/9mlrNBtC87Aldi5NE9ZrjzAMiOJIsginYy4uzvF9n3feuYNpmXQ6babTKYvFiht7B9y7d5/JZMJPfvITJpMJs9ms9pOXPAGbMJDE8K+++grHliDiV0eHbI2GzGZXeKtlffEWRY7rilBc07Q6C7Lf70sqUZ6z8jxJrC4KwsBHUVVBv1SNJM3IigJF0wRd03Vs2wVFZe2HZLkk60xnc6I4rlHZNBNL7fF4ytLzSbNckMwowvdD0kxO6yhJyIqcNEkwVHBMjVZJg2k2GqIySFICP2C5WNVhvvJ+KjVPa7Q5KjNL81p25K9Dmo0O7957jxcvDjEMk2azXaJnc3Z3t8mLhCQJ8APhiZm6hes0UBRYLpZ1QakW75ubm7XPXlEUHB8fM5vNuLi4II4jbt46IMsylkvJFVh7AYZhcnFxKUG97Q6GYZYdesh8PmO5nJPnGZqu0XCb7O3tYVkWnleRuFOSOCZLU9JSzjccDhkMBvT7fZEkrdfMFwu63a4YGMYxTsPBNs2yG7LEmXdrmyiJWa8DVFPHtBw8f4238lEUhXbTYdDroWminVQV0UyLkkOpSeOu66Agtk5qOVF53grDtFFVvQTzomuouQQDV/vC6XTK8fExSSJjflEU5eSkYFm2NDdlbF2n06bpOmJqECegFDTdJu12i1/+1//229FMwzC+4b1/Ham87oTx7cXq/18xqyvNtf+36sQqaVWe55B/My290mrCNZ5b/mbwsHwbhest4nU5laqUZo7Fm0Es1R5wtVqVOsqSm1O+UbZts7O9TbvZIIp8Ts/OmM6muK7L1tZObQPuNFzGpa9Zo9Hi2cMvieOUu3ffxbFdpmORJP3zf/7P+fzzzxkMBjW4UhFtNU1jMhF/t3a7zb1798jznP39XYpMnD2HwyFpmpYqghGu63J0dIRt2zVXqEqU98u4PF0XrmCoG3iesMQVTSXNM7IkwXVdFBRUVcGyHVA0Vp6MUoqqEiYxSgFRknF5NcHzfVAUkqxguVwxnc7FItwQmZteCqbJcjRVwXJtmo5Bp2mXYcPaN9432TkJf8kod0BRGJe/S4qiqsRRzDSZ4jguD7/8I5eXU+7dvU9R5HzyySf86Ec/4P0PHvCf/tP/xWx+xb/4lz/n1auX9PtDihysRotup1+7RSwWCzY3N7lx4wanp6c8f/4cVVXZ3RUk0/O8+mDwPI+/+qu/4sGD99jY2GZ7ewd/HXJycspisaDT6ZRmBxZg1VPIdVlPmr42VTBNAUqqQztNU5bLZS0pyrKMra0tzs7OWPt+nbEJ4kADcs3mec7GxpBev89wOGR56Mm13HRpt/q0Wi0mYymoRolSKqXLi3aNwC73l0xmiqZiWBZuuZNTVZU4zRhPFoRRUtM3rv+OjuPg+z7Pnj17A9xYlfu4ra0tklzn8dNnTCYTcShxHObzOYGhMRj06lBjcTl+HU357QBAueSvitfb/+1P0Ssq3/3vLFj/VGemayjXqBw1peOtr89LMu/15/f6uSlvFLProvO3f5830dTSelsRpnL1+WqmT5MUpZB9F3nOfD4nz/PXF/Law1stMA2NdqdNf9BHVYVXlRcFg06Ho+NXdLpdHMfh0aNH6LrOYiEM6M2NLc7Oz9nc3Obzzz/n9PSUZ8+eiV41jlmv1+zu7tLpdGopy+3btzk/P+fx48d0Oi32doa0W22yPGU+n9eHT5ZltUtBlaY9mUwkr7G8+IsCTFP8ysbjsYRdFNJ1+X6ApunohkEYRqD4aJqBZdsSzlHuhrIsh5WEZqzXHrpukCQZ63VIFMcYho6iSNpQaVeASk6RQbPRoNduMBi0UYEgCMXZoShwXQfXbcoyfbEqgQAJ43UcR9CwTpvFckWaZaxXHklS0Ol0mM9XfPbZ7/npT/8LFos5r14d0W7f59133+X49JCHDx/ys5//nMeP/8j5+VVtRul5Xn0PXF5eChl6d5d2u11zyxRF4fDwEM/zymIT1fyz3/3uc1qtNu/ee48HDx7Q7faASvcKuq6UNvIauqESBAHz2ZzIjzg/v2A2m5WLerteu1y33RL/tJjBYFAXvHa7LRzMNCGOBEiL/HVJwm2hluaObwcEVYUzKK2mPM9j2Ouyu7eHY9scHh4yn80ZDgdsbm4RhkHNAy1yiWmM45jxdIKqmgD1wVlRrHxfcgyqz7muW/PzKmuiOI5ZlK97o9HAsk3m8ylnZ2e0my79fpfRhki5JpMJURzW9/I3iln1g6/nWFaPGnH4EwVJ+aZ70BuP/E98LZTTqqK80QEKQ5haNP5tz0GeKyUt5E2+mxS1b/q0XS9kVTHLsgyl9J+vmNeqqqIWan1TBUFAsJYLpF8iLOJ0u8BQFTY2hgw3RvVplSQJ3lpY73rZXjuOIxIsTUXTddrtNpeXl/yzP/9nNU2gKAqiSODsTkd4ZRXZdjwek+c5rutyfHxMo9FgNBoJqmToTC7HpGnK5uYm6/Uaz/PY3d1lNpuxXq/rHcx6va4zMUVZIfItzTDQTBNDVclzmeot20HXDRYLj5UX0G53UBSNdSDfJwikoDluQrvVRjOckqu2ZLn0cBsu3U633CPIXjPPc6jci8kxdXEVNUwRsnueV3YhRdkZiNC9KmZ5XtDtCAcMRWE6m5EkCcvlCgWNfn+IaVicn1/xh6/+wK1bt/j8889otV329/fI8pjJeILjWLz33vtMxr9BU+WGv7i4QNd1dnd3WSwWzGYzWcCXHeLFxQXD4RCQwjIaDVksppxfzPnzP/9zhsNNnj17znw+ZzyeSCp7yYcMQzEONQyt7H56tS15vyvGnp7n1YXFNE3yXEijlZddFQ5S0Zgqhr5lWZxdnDOZTCSwxbFxHAdVVVkuFrIf1YVsniQpszJtPU0F3W6YDo7rYDeaaMZrswi1PMxsx2W5WuGtVkJgdRwa7TZmGGIvlnQ6gzIs26XZbLJYLOppz/cl43NjYwNFUQStjaJ69fH48WNmS587996l2+1ydHTIbDbDsky63Q5pGbsnr19Uk9LhW4pZ5RB73RH2uj/Z9SLwbY9/ivH/Tz2uk2uvi9Mrt4z6ZxcFSrUXq62GcqFKXBszK8Dgux5vWxAVCHKrlfIsy7LkJs/lxhPhrcKg12M4HBJHYrsThmHZjQ3odLokScLp6Rm2beM4ckItl8uy65lwcnJCs9livRK/KE3TuHHjAFVVePr0KbPZvMw7LEqE7YybN2/hui7n5+d4nsft27c5PT3l6OiQra1t5vM5s8kZ29vD+gKvWn2gJCBL9xWGAUUh6HCv1xUpGkLcXS0WqJqK7wc4tkOcpBR5UWZnZiRpih8EWJZDkiVMJzMmkwlJmmHaLlGqEETCMQsDnygUUrJafagQlNF8pqGjYWAaOpauk+cZi+WSfr+PXVo2K4pS5ohCt9urxftJktTd5mw2kzCZqytQVC4ur7hx4zbNZpM8h26/w9eP/sAPP/oRu3s7oorodxiNRlxdnXN6es5oNOL+/fc4Ojyp5UPVGuG6j5dhGDSbTcbjcWmlI9dG5QbhOC55nqEoYuaocEWSpGxsbNa7LZGHRagqmJaJriu13Gk02qjdXB8/fsx0OhWDT0v88KqfX42gQGk35AuSWFqlh4GP4zhYlllboCdJwnK1otPvYZom87XHaukz95Z1qEyWZXVq+vHxMZZuoOl6CUQUXFxccnl5QegHWLZRd3imZdFut7Bsm9VKVB6VVKrX69UC9el0KqTbsnj75XhcdZ3V+H58fMzV1SWmadJqNRiWhOfj42N5nU1DTCfLxzeKWZqm5Q3o1PqwIAjqN7W6Mb6zGPH/fcwEcd64HohSf74oY9vfCkuppCLyQuQEcURRvM4pqBb913+s8N2+netmWRYoeX2BVg4e5EKIzbNCwIBGQy6G2aI8OSyGw4Gk4TjC6TEN8XP3Vj5BFGGaNkWhcO/eu1xe9siyjAcb7xOEIbblsr29w+9/95kYGJo6ilIwGFTuGUMsy2AyuWI+nzIYDLh7907ZHQxwXbsEGyICP8CyHVarFZeXY9l1KbJnkwKty35JUTDNFEXRKAqBXnw/IIoTWq02eQ5RnJYGiTn+OqAAkiRD101BP0NZxguXzEDXTXw/ZLlcY1oGWZpi6hrtdptmw8V1HLIkIULQbdu0UC2RO5mGThgEEMrNpKlafQPHJbAgO6G1hLr0+wwGshucTqd1hxHFSSkpW9JoNvH9EIqCu++8Qxj5PHhwn9/+9hOxBFovcFyXra0Nzs4u2d+/gbfymc/lkFmv18zncw4ODsp9Vspisag72dVqRbfbpd1ulyafTbz1gqurMbbdEMfXTOzgpSDL96j4cr7vMZ1MyHIRozu2wxmn2LZDURT1qGZZ4lpcdTeVa4xhGLXAXVHEJUZVxVqn3W6LDVcZBVd9Pqh2jHFSri/C0koqkzDqLCVOUlgHsnIpf9c8l1GxKO8LVddI0pzx1RTfDzBNg8CPaLX7dZGt5E/NZpNmyZG7urri8vKyliHqul6P6d1ul95oyPOXR5ydnQEFrVabZtOtQ10eP/oa13XZHA5qAi58hwJAGNpWXSmrE+f6Huu7HkpRUFzfcXFt7qyaqm/9Svlsdg2lvC6Vql5AXXvTqrtqt6tiJh2ZTKMVSbeil7wxtn5Ld1kAlmmCkpNn1D5dgp5Kh7i3s0+j0SDPMi4vL/E8j1arJadenLL37g2Wyzknp4cYJfWh6h5UVWNjMOCrr75ie2ubzY1NprMZuqHzzjvv8A//8I9Mx1POzy/Y3hbXhbOzczqdDrZtMxqNWK1WbG9v0Wq1OTw85OjoiG63S5qmolvceJcg8BD1gyo+WXFSXvhmnd7daDRIkpRu1yw7W62UO5k0mqooI0xTEE1FYueSNCNJE9IsxykDX/JcoUDBsm0ct0GBTrqWrzFNiyAVNUaz0WRnZ4tWo8FsOkFRBNRxbBtFKfNUE4UoCbBcs/RJy2pTger3rygrlRFho9Eg8EMWiwVhGNJoNEGRjsS0DJ4/f4qhW5ydnRAEYk64vb3F97//Z/zDP/w9Nw72eefOXV6+fC6csCRna2ub42NZ2FcIZhUkUrHqayeU2awep+bzOf1+h1u3bvHy5QuSJGF7e5vFfMVyueTy8rI+HPf2dsuCKFZIeVnMet0eURBzdHRUX+Pz+ZyTk5M6CX1Y2v9UE1O1L2s2m1ilbX2lXy6KArvMWUjTtO6W4jL0N8/FPbpa/Hc6HbzljNXKo+m6DIdDOt22dJRxDEhj0W53UVSwTIugvJ66vTZxlOKWUsOqGao60SAIxHctEYv3YZm1EUXyvgBM53Omy2fops329lZpQU+99Jf3uFEDA9d34N8oZhKXFdQjSpX2UhRF2bLndZd2fSSsdlSapqGpWl08sqwyOxRyaxmoWReV13+nLGavgYdq9KlG3epPXdeFTV4mWldi9ySJywvg9ffPq29evO4YhbLx2gW3ch5QFWTh6ohxXbWzUhSFTlfkFuRvZhm0Wm10XQwp1+s1X375Jd1Oh0F/VIpwE+bzJWEUCwFwHdDryk4hywqWK4+f/vSnfPzx38s44Ta4f//dekG6vb3FYrFgd3eHOI756qs/sL29zY0b+3z22WcURc7Nmwfs7+/z+MnXvHz5Etsy2NvbYzjYQEFjtVpR5JJylCY5Chq25UIhF3uv1ysXuQWbWztcXl6w9gN6loOmGqxWHvPFksFwiGlaJKm8r4ulRxhJOEwcpeh6JlbilkEcxoS+j64IV8m27TqAVgEsQwT8pilAgK0bKGqBjYHbcqlsgqqOREY+IScfHBzUS/iz01NUVQre/v4+jutwfnEpNjJl99JuN3FdB8psx4uLcx48eICm6bhOgywr8P2Q4XDIeDxhvfZ5//33uby8ZHt7G9d1efz4sUh7+n0cx6kF5r1er+Tpxdy+fRtFyQmCNXkuZo2r1QpvFXB6esp4PJWC1euR5wWTyQTLMtjc2GRza8iLFy/Y3d3l4uyS09PT0h59wPPnz2sAxzCMMhFKNJEiGBdFzO07d0g8jydPnmDZFtvb20wmEy4uLhgMBjiOxfbOjiSypwl5ruBF4nzsOA6aasq9X4AfRnK4rZYkmXR1hmWhlPdvu9Phyy+/5Ec/+iFZlrKzJ8DU559/ztVEJoeqIRqNRnUx6/V6TCaTevc4mUwYl9IowzCwSvPKNH1tjW+aJrqh1oVLRPo+53HCZDL77mJWoV2VK8V1jeb1xKW3KRFVcav/LHdVefX1iiL+V6VvU9UJVT1R9XfbtuudXTXSZllWx7Bf9y8qiqKeucXn7Nutua8/3qZavKEfVRQaboMkjZhOp3VH0Gq16kg1b7kmCAKUosBxHFyngaZpBEFAURRsjUaEYchgOCSKo3qBv7OzI4EYpTvAnTt3UBSFn//85xwdHTGbzTg9PSWNE+7ffxdQGI/H5Q2pcHp6yvHxq9oCWy8XuHt7u2RZxvHxKwI/qDl31YVQHQ5RFNXi50of12w2ARErLxYLDg4O6i7NsiS2bLaY4wdh6aowQ9XEPlzVdIpCwVuvy/BeIb/meYauquiOiWmYMjIbBrqmEARrgjwjTaQLURXI0hSn0cC2Lda+h2ZY7O/vkxcF3sq7ZiWj4jgWpmmh68Kuv7y8rC1zRqMRW1ubxImMgWmW0+noMp7YJt1utwafptMpjx8/ZmtrS865XGE02uTw8JCG63J2dk5RwC9/+Uv+8R//Ec/z6HQ6dLtdfF+6vuo19n2/5lClaUKSxjiO7CkXixmglNSXuO4kwjBkOp1w+/Yt4jgkjiMuLy9rZNTUzXofWPELJ5MJ/X6fGzduMB6PGY/H2LbN3t4e+/v7TKdTnj57xvmFKEoux1d1gRiNRiVvzWC1XLL2QwzbRi+pHtU4G4Upy+WCG/u7ZEmMpigkiagQhA5VoGoKrUaDJJW09Ol0xp3bd7i8uuDy8hLHaXD48jHmTbtugjY2NgDodDp8+OGHfPrpp+U+7KoGn6rGKQh8JBFCrSkquqHWoEtV3Kp7v0KGv7WYVW11ZYBYmS5WBa2CdSuU721BuqprdYEocmmHFE2ti0XdNl17FNcWbVWxrGBapUQYr4+41zvC6xysujz+qaUdbxaxtz/ER0texMrqKI5jxuGYLM1Ik4yG26DTbpcFzqjRwUajQRhJInWcJjSbTTFMLJ+3X7K2t7e3AYkty/OcFy9e4HkezWaTXreDbkghsewGjis7vDzP6XTb9Pt93rl7W1A+U8O0DOIkJAgzFssFDdtE1w0uL6/qDhsgjhMaDZX12ifLcnRdkYwBRSkJoXaJuEmWqG4YLFdLrq7GpElGUagsV574/GsapmWTphlLz2Pti3ZTT0UMbJsahm6VnDgVU9ewTZ00EkscuaZeE53dZgNd1/AjH1VTqTIjBS3ulwTUVU2XKcqDpNFoiLYT2UnN5yJa39ra4sXLQ168eMHLly/pdLp0Ol3u3LlFnhWcnJwwnU64f/8+liVeYs1Gm8AXoGI6W3A5nrK3t0uv1+PJkyc4jsOzZ8/qgjYcDnn58iUAt25JtNyjR4/Z3Byyu/tOaXYAlmUzt5YlzUMaAjEc8Es0UezOz87OaLVaLOYLtja2xKa6PGiqDrO6HqvDyTRNXr58ycnJSYkmB4xGIxRFYWdnh7PTE/z1mg/ff4BlWRwdvazBKacpXWZCwfGrM+aLBapi0O106vsvyXPSPMcoCeJCMteJkpTZYsnu7j7j8SXT2YxXr06wLQvXbRAEQa3LBpmEquQqx3HqOtNsNuv7t56wNFXUDeXKyLZtQf3LVU0YhowGYlmUlgf2dxazStx93bHi+u6qWiJWkWkVpFsVPVV/0+Wi6tquF5Lqe73tvAGv052qjkLGxwQlfzNFvSqo9QiriotnWI6af6qQwWth+9vFLAh8dFOrT9HKU51MnnPTbcrvWnY2zUYLhde/41mJbF5eXfHeg/cIw5C9vX0CX0AU2xXi4O7uLt1ul1//7a/rN3Jvb4/VcsGjR48YDof1WG/bNufn50ynUz766CPyPOfx48d0u10ajQZff/01vu/T6bTLPZZQGuIyks6yrLqDqDru9XpdL2GHwyHL5bLkqnVotVooccx8Nmc2W4jhom4QRTHL5QrdMGihEMcJa29NnCTYtoNpmDiWiakrWIaObVslEVfoFFEYoZaGlyBFtNVqoqjiXqrpmjjZpvJ6rL01RVHUO8kgCBmPr0rAQq6BVquFoZulU/AphmkQBGGdhSm7LimicRwSRQmu64jQPsvodNosl0tWK4+NjU2uri5ptxpkWcpnn33G+++/j+M44pHf77NcLlEUhcFgUJsxuq5bG4ROpzM+/fS3QIHnhRwc9DF0i9XKI8+L2iDh9PSMR48e0e93yQtxVW00GrUR5uamIJ8vXryoC1mWZSwWizLH8/UqyLbtGrhzXZf5fE5WCPcrjKJ6TyWNiGRUrn0fTU1xy+vx+PwMQ7cYjYZkSUr+xmJbpIzS9Uss3Gq14t6dd7i8vOThw4eYpslwMCj3hhJR6LpujWK6rstsNuPFixcEpSHk5uYmnucxn8/rIBellNslWVqrVVzXKW3JBVCbz+dVsXizdr19s1coxHVaxHVqhuM4tFqt+g2sPhRFkMCk9oR/3bG9XcCqz30boKBqrxm91f8rRZE3xlqFN9PTZbz9p8RU/9SjELM4Ja/Tn6pR0zZkdGg4zXqpGccxnR2JDYvjWKD6xZJer4dhyTg0Gm3IBbZY4DgOBQrD4QjLsiVrMs04Pj7ho48+4uLiAsuyuH//Pq1Wq/apv3fvHmma1tkFk8mE+XxOt9vl7OysTq1er9ekUUSn3aoT37e3t1EUhaurK0zTJI7jNyQkjuNwdnZW2z7bjks6XxLHMcvlUjSPKKiqRpbmRFEs6ntFQ9EKUFQURcOybVrtFm3HRFdSdE3BdmRVYJomlm3jOpLoVECdiG7awvSXZO2CLI9ptYXi8vLFS8bjMUVRlJIdmEymgp6W11yz0ayX2yCdz2RwiUaIAAAgAElEQVQ6BQoGpW+WdO4iHVoslhiGSZKkXF1dlQvomMVixXC4IbZJRUKz2QHg6OiIjY0N3nvvPU5OJA9TUOLLOuz6iy++KAGbAZdX52QrmV6CQA4NVdHLrkqpd2a2bcnrbZs0W81akpTECa7l1rQQ0zTr9626J2ezGZUapRq3KpVC9b5++vvf0esKN/HVq1fcvXuX9957r+bKXYyvGI/nGI5Flgm1I4mFaD3o9dF0MW2Qe6xqMPLXmQ2FwmQ+Q7dMzEwCqaMoQFFV3n33XU5PT+uDqEpBHwwGTKfTev9XWeM7jiOJ6es1l+MrNE1FQ2yiGg23bpwqJUHFXrCuraLgO3Zm122p4fW4VX0uLxm/1wm2FTk0LUMqro+GaqnvpPgmA/86wgilNvMttLQizVa7tAqAuP58hFeVCcDwJ8bM+ufx+ue+/hCybBQLc7qCgm3bJksy6R5UXdw8yrZ3tRL+U3WKpHGCZVls9LqsViscx+HJkyeomsZwOGS0JVD01dXVG6dSUYgZ42qx4IPvvV+TA2/evAnAy5cvy1SgpdinNKSdPz4+ptVq0e12ef78KaGi0mw49V6h4kFVXUzFKq86sjAMOTk5YTwe02q1yPOcxUKsiyrTRdHLiSoiv/Z+VSCNpolDg9ABbAwlRSWn0XQxTatM4jZQVa2MkIN2u0NeQBhFuI0G7U6HrChYziM8z8NxHDY2Ntjc3JRl7/kFeS4HZLUAdxxHbIjMcjWSpjTbLfb29piVFuXV5OA4Lru7u7hug9PTM6JICs2rV8d0Oj0cxyHLCgaDES8Pn5KmCXt7e3VS+f379+vCcXFxgaqq9Pv9ev9YBZqMhkMsW1QdcTwTXa2ilyCTXiOft2/f4uTkWG7Q8tA5Pj5mvV7XwvIsyxiNRrUsrcqQLIqC/f19ZrMZrVaLs7MzxuMxBwcHdHo9PM+j3W7X13Wn08F1ZV+q6zrz+VyKpGEwmc5IYpnCdMckzmRHVpCVBg4G4hQj/w4CkimqUkccvnP3HVzH5Xe/e8ZwOBS0v6wRYRiyWq3qMOn1es1oNKoLVLVqsG275poZpoGey/tbUWDCMBSDUNelyPJ6N3y9Tn2jmDmOU3dV11HF6t+jMlqs2ldVJ2KFMBiWSZbnZGVXppWFTKkIqdeoFeVdUf+pAHmWf6PAVYWmombkJSVd01/vEF6rFd7M3HxDoH7tH8U3Clkh9IOSqFoBENUeLfJFKJ7EQn6sjBcvLi5E5qPrbG9vM7dslosFqq4xn89rs8mDgwNM06TT6fD06VNarRYPHz7k/OKc0cYGv/rVr8rAXVENnJ6dYVsW8/mc07Oz8o1tsVouOTo6qgm9o9EIz/OYTCZiCR74NWdIqBdrOp12TTytrJQloTytgY7RaFRbF/t+SJQkKKp0XEaek6QZRQG2Jp9TNXEpUVQRnzcaLo2GS7NpYyoxGgXdXhvTkqTrJJNoMVUzATmU4iQRo8woQdF0cSnQ5HnOZjOGgyE7OzuMx2NOT8/qQ6PT6UjknWXSbDRJU7m4V56H5YhjR14U9cUuYEiCqorDrqDkcv3OZjP29m8QRTEvXx4yHHbLHaJBmmbs7u6yXC755JNP+Iu/+It6NK8S38fjMaPRqERXl9w4uM3p6avydfRJkhTXaWKaBooiB97p6SlZJkBFqyW6xDSTg7O6Fit1hqqKB11lB1R9br1e19fo5uYmiqJweXnJq5MT9vf32d/f5/zslPV6zc7tWyRJwtXVBaPRBuPppaQq9fuEWcLVekZa5DRbHZyGy3Iyk1Sl0uZdrcxWDYNCgcVqKSNx+fP9MCQoUf8sy+pdYtUxV7rMqitdLBZ1Iev2eqRJUrr0itLBcGz0kvPmOE4tl7J0U6ZC25HDq2yovrOY2WWcVMUgr614yhdY03SS5HXIb/ULy6mZvfG9FF4TWxVFeb2fv/b3qk9Sys7tOvO/+lPTNMjlv2V5Xo4jGaqio+gamqKgll7r8hwK5M7IUFApyBAXjBwKQVVRFCmQKKXThyqSp7xA1010QydNc2bzpVAJLJv+aIM8Scv8vqQ0qVRptQTVtCyDvCi4HF8xOz0pn0/Bvbt3efDB+2i6zsnpCYqi8Mev/8hsNuXg4IB//OQTVEUhtmwCQ2PprclyaHf+X8rebEeSLM3v+9lu5ua7xx6ZlVlZXT3NqVkAQZwBySH0ABKEecDhQ/BGNwIl8EIAwSHV4uzd1V1VucXqu9u+8uI754RnVFYNZUBWVkb4ambnO9/yX2Z8vLnj5uaWf/Ov/w0//PADq+UK24ZoErFer80QxrIs8QM4JBz2a+ljui7T2RwviMiyhPc3HxlEEbbrsFwLgLZqaqbTCW3bsd5tqNuOvpNz7yvKkG1bNI1sXH7gK8s0T9QqnBjLhjAMRJ008vFsOafxcIgfBjR9R50XtH0rmRoWTVth2eB4NkWVUdUFXd8JWyKUHtP9/QN5UeG5HpPJlM1mS9O0XF5c4fkBRZ5T19LTdD1f0c88LHqiQJyzbMchV8OBJBFX8mgwwHNrpfgh9oiWbXN3f8cgDqT/57tidAwq6Iz4m//23zg/P6VtxWUqCFyKMuHDxxTLfknbNvSdmIKEQcR4rD0lZaiSpgdR9zhdUFWFKaOSNOH27pbZbAI9PD4uDUawbVseHh8J1DoUrwjpi8VKgSIIAk5OT3m4v8fqe/aHHYfdlsD3sXtxQKqHQ1zXp0cMRvaHlKqqCfwB46nwjKuyZDgYEISemPpgiQ8sStShkwqpaSUuvLx+Q1HkfPf77xnEES9efMF2uyVL98xnAgq/u783QPc4jrl6cc3dwz3bnQiTXpyf40ch9W5Lkmd0Fgy8EV7vEfkBvuNStRU0HbgQqH5dWRTkWSb97J8KZlXV0DQdrvvEcteNR2meejiOh227JmsSmgvYtktVNgqsqgx6W0UKVz2vqqg/MUTp2yOUPQJa1WltXbcqoNl0dDTKNclRllRt19NWjSKIqxLIUrJFvfR5rN4CXEkAu05pp3V0KrxZtvMkNtj1dHUPnfYacHFdX01hbYWMH/Hh9pbZZMZiNmcym1MXNcN4xHw+ZZceuHrzhRjw/t3fcbpY8Ks/+UOavuM//af/B9/zWa1WrJcrmXYul0zHY/b7HfEgAsclzUsG0YD/+uu/YbcTUOaHmxt2+z07hThvmp48E+zOfr/nZD5j+fhIWda0Xc94PCTLMm4fHnl/c0sYSlBarj+SJok4jCtg7Wq7pOskW/Udm7ZvaauazraIY8F8OZaI6oWK59c0Fa7TiZ2YbRMPI6qyJMtzLi/OZHDStnRNS9P3xOMRWZ6z2iyJB0OiaKB6nw1R4DAYxKxWa7BsbMejaTvKujE4oq7rSNKMIIgYTSYMhyMDYF2ttyY7nk/nWFbHw+MD6SEhGkRMplNGccxwPGYwaPnw8ZbNbkdRVVR1zd/+w9/z1Vdf8cWbV2z3OyZxjEWHY7kMwoCT2ZyHh3viYUQU+CxmY/Lcxfd6zk8nMgFvUsqy4O0P31JVHXXZ0FbShmnKir7HEK21rNTp6SltXdNWNYMgxLM9vMDDCwSeVKrMI1SqEVdXV3hBQKc2iqIsWakNTdADco1cegLXIXAdVsoHtlQ4uM6yKWpxn9/vE9pe+lBxPCQvStIkJfA9yiolr0pcR7J41xMHqKoQgOt4OEFYIxbz+Ql1XbLdJ7h+wMXFpcFoBmp6uVqtuPvhB95+lE2+tyxa4GG9BqBuG2Ug3FEVQp6P4xirt/BdcRKrq5qd4hbreKHL588GM50ZaXCs7ufYtm0wYPo4hkjo5z6ffuob0bbtT/pon6D7+/6Txx4TxI9fV1NBDJVJB8S+p1GqGJ5taYtFkcg9rjNtW3YXy1FZo7oJVGlqYROEoZS76pn6xNdtS1nXrNYbhsMRcTxivdrQNz1fvn7N+dk5++Qg2lxtx3qz4Q+/+YYX19fc3N1RFAUfbm7wHZcw9NVIuqAoMzGIdW0pwR1H+lfjMY+rJdvNhldfvOS3335LXVU4ts1uu8X3XKpaYCR+4LLdbdlsNxRFRRCGuJ4E+8elKGOEkc9wOCDPM4pKsiDoKNUUzrZtptMpjivZbRCKI1UQ+gpN4+EHLlEUqmxQppVCtfFxPYf9vqWqatq+p6pbXF9Ay7YrgoF2WeG5vrqOgqPSbt3TaU3fC/d1vztQ163hE/a9paZ1teKVluR5yePDUkEdTonjIX3fkxcFVt8SK05gWVXc3t7S9TDYbpnPF8L5DAOiYczJySltJ9n05fUl/7Td8PXXX7PfbaVH2NXsdlvGkxFZlvL27fe8+eoNVRXTtjXn52ccEnGAenF9SV5U3N0tCYPY9IGenI18ZYbccXl5aVgFMizIVa9wQBBFpi+q+0aC4YrMOtAOV5ZlqYGCvFfX1Mapvu077u5uaBrpP2WlcFh7pSpTty1t29Mh10PWac8g9Akcm6aJDLNgNBqZPux2uzXT1CRJ2O12AAbloCfnemhhaJFFgds0pHmmlGYiM6mNbFGUPWx3REHIy+trFmo6+vDwQHo4yGaQJMb67nlv/WfdmfRF0MFE986ewymOoQ1GXVYdx89//nh4QvzrQ/9bP0//7ng62ve9yeyeB8S2/VTnTL/f8+P5T4Qy1eN4Sqa7640UkGVBqwKpLCahzwwCWdjr9ZrQC3BcyTCcxmUymvD6i9dcXl3wN3/zN9x8eM/pfEaeZbiOzenZHOgJfdl1XAuKqqZS6hD/7f/7f3EdlzDwefnyBR/fv2ezWTObTtinCX0vyp9d0xD4gcALkgOu59M0FY+PdwgNSQyNy1LMH6IoZDgcSUmkJLE9TzB6k8kYqwPXsfG9gMEgNPAX13MYDhXXsGvJstRcJz2xHA1FdYVeJpxt0yNVnKMc4kUiyPdtoihmNBpSFJVyJaoJgpA8LyiKCsd21VCpoWlaw/LQCxksRcRHUZSuAIumLlitH7m/f8DCZnFywi9/+QfMFws22y2DeMiJqhb+4Z/+Ueg9sbg1/dm//DP+xa9+RZal9H3HarWkroU29fr1K9Uf7phMxpRVbmwCy7KkArregt7GsV0DBNXEch0UbNvm9vb2qG0jPcDD4UDbtiwWJ/zuu++MvJRu5AMmsdAIA61TpgNJ3/e4tkXfNYi8tQOWY0RLNQQoiqTnFBaC77IcGWC4TottyWDL1T64YHqsGgKi4T56HUorQuSHhsMhnpra6hiSpim1GqjN53O8/U5EJzsx9WnblpubG+q6ZjGd8fj4yL1ys1qtVlRVZQxXRqOR+Vme5z/fM9MBTJ9AfRMdL/rjoPQ8mD3Pyo6DzecOvSD06xgu5LPX+LnnPf/58eewnwcy81meQLfyRPl5WVUylLDFA0AmpLK7OI7DaDjm7u5eSrtXc4IwYLPZUGYli9M5wWhAkif8xV/8BZZl8Xd//7d4nsdXb95QVgXxIKSqCk5PLsmyjMuLc3744XuwWixLJsO+79FZ4IcuX335JU1bMRgG7A89YeRhWRFVJQDctm2wrI4gdJi5IxwvoKwq2rKh7WqlhfVknGzbFp7vUlctZdUQBgFRNJUFWRREYcBkMiSKQoIgpO87mqZWSg0DhsOB4vxFBpogk6sGsIjjEX3fEYYeXSdDA8uyqapa2aq12LZI9IShaJRJcG05qD6ObTlG/6ooSmVj97SgBFIyIAyECL5cLo28jg7A52fn2LZDmmXc3NyQ5zleENJ3PR8+vOf+/s7g7hxXBEmXyyW/+oOvuf/4gel0zHJ5T1nlTGcTMyh5+fIF+/0WrJ7z83PevfuBtoXxaEaSpIShZFQabH1cWuqFrzOpNE3Z7/doVQ7btqmbmrOzMzUNrcxmoXvZbdsaJQqd0ekFHUURw8mYMkv58P4eyxKOtZ6Oaoqc+Hz3NJ2sF2wBuGZphu95lGWOq9oAeuinBTzFeEU4km0rE3490dUZflMJNUwHPzHBll7l5eUl8UgyzbTIWa1WJEmiBBFCFtMZi8WCUuHjtJ6bBuE+KeT0TxhQdfzY0KR7InofB6KfC0jPs7SfC0QmgzOZ1KfSPseQi+PM7Djb0s87/qz6T6dwbvpx/bPP0ret9M14wrDJLyTQ1cooV2c1VVXTW5hRcJ4Xoo4ZSX2fJinj0QjflcljuXrgf/qX/zMXF1f85//8nyjzgi9eviDPM7quwYkCBnHAdDLEtjoct8fzLYIGGUpYDk3X8cfffM12u+HkZMpvf/Mb4njA+fkpi/lU+HnbLfQ9eZ5R1SWT0Ywei6JqcDKb+WwiEj5RRFWLzdh8PjXO2s74Sb7YDzzSVEC2fuASxwJU9DxfTaxtQ3JOksTAQ8bjsYGQCAcXbNuhrnvFf7WwLAfP9bDtjsDvqYOWsqi4u31gu9kTDSICP6LtWvJsR9f2+KF71E5QN6rCpbVtx93dHbPZnHggsjir1YqHhwdOT0/xfCmvv3j9mjiO+fbbb7m5ueX27p6z8wvyouD9h/d89/33XL94ied55FlGPBgwm06YTybYXUNy2DGbTxnWA8PN1LJA6/WKs7NT1PiVIAiZTRe4bkDfWYxGnQmUMgDwFORFpnh6kqzLzLqu+fLLL/E8j1//+tdMZnMDpdHl3W63M25fWkjRtm1Tikqg9IkHA3olBtA0lfrMAa7bP4HAi9J8Lh0Um6qkrRs838N3beJBbIC6GqmgierHlZbOEvXUOMsybDojISbO7cJftRSwPssyrq6uSPKM7777js1mw3w+ZTKZkOc5L66ucRQMazKZ4DiO0oRbGmkiPe39WTrTsSiiDiA6CzoONMfB6XPg18+Wds8yuOOS9rhM1YHpc8+Hp5JTZ1XPy179s77vfyQG2bXiyuRYT+wEyxJjFN0j07i4rutou17co9TPHdtT4+EBgyAiU/xB27awLZeTxZQgCPjrv/5rNpsN08mU29sbwshncTJnv9swnY7p25po4HE4bLm6uiA57EjSjM7yWO/E3Xw4DCnLlFevX5ClKWfnZ/RtK9mH0+PYNus1JElLELrUTcsinhAEHo7r4bjC62vblvF4RBwPSNMDo9FIqTxslGKD/JnNpgS+i+vYBKEr4MXOwg9cgiAkCHyyLP/kmgV+iO04DAY+nuuTJClJkZmsoGtRDtkuYq/ZQW8zHI5pmp7tRuzmLAt83zcEZMlQBcMmFKxWqbf0ijgPtmICjEYj9X5S1iVpYqALZ2dnDIcjVusNXS/0oOvra0rV/Bf1DcnMxuORTFOjC/7LX78jCEQ7H+Dk5ISzszNFXevw/ZDd9kDgD2gbKePHoxm3t7dm4WtsVRzHZhKpndzX6zUvXrzgm2++MUFOB5j3798bSWmN7NcYrMlkYhazzrQGAxFFyLKcIi8oC2GbNG1LWVYsFh0TJVMkRHope9veUhuFZlSA59gyhfZ902LQYhOistF9gvXU5PfjhGc8GZt+mua0dl3H7f29aPXtRKvPDXzjOarpTYMgVEbEar0peMf+cKDIc9O37yXl/2Tdf3YAcBwo9BM1OFZ/8OcB63MB5fgxnytFnwcek039TFb3vCw9fo3nZednS109XOB5D8/G6ntc5brd1LUKboqOo0QaXUeaoPRit1WkGYckYRyPuLy85Pz6nL//+3/E6jt+9ctf8k//9E9st2v+9b/6c3o6XKsjigJ2u4x4GBL3PtPJhHuroqPB8Qa0QJ5mvHz5gjQ9cH5+wfLxkSgKKbIMz7E5WUjPrcgz4kGI74uqxHgyw098iqIiDH1OT09wlQ1f1zUMRwOiQSD8U6s3ZrwgwoeDyKNrxfsSBL4iN6wYsMZD8UPUChau60s25QmVRkT5RO7IshwF6egU8LEkSVJs2+Hk5Mw0/MuyJstSPE+I5JWyD3Mc6duUZaVIyDmz2Zwvv/yStpFFpcUKbduhKHIOyR7bdWh72O4P2LZDVlSiydY0lFXJ5dUV52dn3NzcEA8igiBiv99zd/MRi4avvnzFYBAaJRRBqYcmw5JJ6p79XiPwG1UKZ8rrYGUWuwgLjozIZxzHvHjxgiiKOD8/Z7EQiSgtD3SsjqGzGE1uPz095fr62mRK+/0ewJSCepiw2e/YJanJvHrbEteytsdzPZXhtrRFQVXU1JZFXeZYfWuyT51t6cxNl7K6hE7T1KjHHicnIPAuzSDRKrLL5ZIPqi82GMZ8/PiRwWjIdDo1JWoURfzql3/Adr2hLEs2mw2b3U4CdZ4r4YMd4/FY2RdC/3PBTJ8crZihpxE6mB2LoT2fNgI/KuuegsWPe2rHfz4XCJ+/BvBJkH0+BGjbFseyPt/cP+rDWdYT4d2UqDwpzbZtS9M2YIsGWBAE+KrJuVlvGI8nxOGTLv9sOmU+F5ec//Jf/iu/+IOveXF1xW+//Ufm8zlff/2G3W7P1fU5oe+w2TzKxND3OT87ZX/YUlXiBo3dEIQew/iEOB6wWEz58OEDk/GENE0YRGJ1P5mMyIucJNlzsjgBBTYpypqmjQjDyFCamqah61uCwOfVq1cARpFjsVh8QofxXMizvRmmaOQ/SAM6jgOi4YCqahQeKyBRva6y3Io/gHK10vdMWVYms9JClm3bqobwgtlszt3dnZKm3tE0Rz6L3ZPptEaVB0FAT29KNEA+u+eLaGYoFJe7uwems5mZ+GVZRt3UFHkh1n6Whe3YPC7v+fL1lwD89p9+w/rhjslkbFogssnCzc2N+Rzff/+9Mu8tCfyQOI55uF8yHo9YLE4oy8IoqWjtNU1LevfuHa9fv8bzPMNV1IyOtm1NJjOZTAx1rWkaxuOx6Z8d66slSSKeqr7PfD7H94Wu5So5dltNyNumI4xCFosFWfaU7eogKq0Uuda6d30slKqny9pJ6Tj4anC9VjbW6AVNDtctjel0Sl4WRkF4tVopjuyEs7MzZicL422gM8LRaMTJ6Qnr1Zr9fm+03J5PND/bM7Nt25DNdbajd4rn0At9oxnepP2pg7kpzxQd6niw8Jz2ZFkioqh//7kAeJwZfnZC+lwGyLI+GQLohWkfDQDkfaXBWJWF4Ov8mKZtaZqWLM9JVekwmy7kxHkuNx8/slqt+Fd//uf86pf/gr/6d/+Ok/MTyqxgvVoxm0ypqoqyKAl9nzovKIqU8XCETU9dVfzw/fckyZ7xeMRoNGG5TohDsTrzHBcLi9FwyHw6YTQY4CpKh5ZzCb1AOHSuKz6MfcduuxFA4tkJruuSZQ2O4+HYDqEnU6ZxPMa1XCI/Yjqa4loutL3yChQNMOHOaYxUgUBaZMJYKwhKXTVKQmhP1/VcX7/g6uoFh724/+hs+vHxkXfv3klJ4Ufc3z1iWZYZ+4fBgG7UczjssW2bxeIEx3FYLlfAUzDyPFckrHv48OHGcA13uy1xPCIexri+R1lVDMcTsKCoStNsf7i/J/DviKKIy8srgsBnNpkyiELieEBTC7n7q6++4uzsnNFoomARA6GlqSwljoe4jo/rCNQnSTIOh5Q4HuA4tsrghlxeXppJ3eXlJW/fvjX4Ta2NnyQJv/jFL0jTVEjtD088Wr2oh8Mhs9mM5XLJfi/naLvdmtIzTVPFLGjpLRiNxwILqRvyvCAII8q6ous7PNejyHKiMMB1BJz75svXnJ6esnxc8eHDjSln9aZ3dnbGZrPhH//xN5ydnXwiBKEHG5onqWlHeloL4pEQKnaLF0i/r2r1kKcwqIDb21u+++47bm9vRRhyJoORKIpYrla0fSfPb1u8ZzHgs2Xm87+fB7Cf6lHBE1FdBzj95TR/Tf9el4XPM7Tn08fj9/upUvRzA4ifOhrFMdPW9YadoN5rOpvRqilJ3QpifRBFeGpMbSFNzLc/vGUQhPzlX/4lbV3zV3/1V7x48QLLtSnKgjR1se2eru3Ii5y0qaiqnGEcCUZqKdObq8tzLi8uSbOE3/z2W5reIRyMKAtxgxqPR0xGM87Pr6Dv2G7WNI2Y1U4mM+lZWZZIZucFtu1wfnbGerPhd7/7ndGur+ua8/Nz5RCdGz5jVdY0dUuayE5tOz2eZ9O2vSJGi258GGhC9l7JvMiuOZvO0UCXwWBAmmZsN3uTPeiJV9uKT6emsWjt9jzPjRBhGAZcX19j21DXlbLpEzu5LBNPRw2STFPRyhqPx5yenjKdziirmoflirqpcZWaxcPDiq5t2GzXJlNxHYFOaN9K13UYj8WEVqoPn/V6wxdfvGK73bHb7SlLMabVKhOHQ8Juu8dxHPb7gzkHlmqy73Y7FouF6TsNh0POzs54eHig6zrevXtnsGPag1PTq3Qf8P5epubn5+ecnJyY3pKW/9GaZ7pUC8MQz/dZrpaEoSi9bhT39+T0FKvrWK3XbNYbXry4VpJJgVJ8DZlOp7x9+5ZXr16S56VRpe06UfWYTqdqgvvOrG9NVRJfTVlPh8PB/D6OY4UnzNgniXhlKGOSuqmN18Hp6Snn5+cMh0K6185YOl4cJ1bb7ZY4jjk5PVFViRw/G8w0ePZzTf/PPc+yLAOsO86ejmvr458fP04//1jm53Pl4efgGM8+iOF79r3MLI+L3rZpsBXr/jhb7BWlqigK+k5czD0VOLM8JwLB0LgB7969w7Ec/vf/9X8jCiP+j//wH+h78Hyfqq1ID3u6tiYMAxwg2e3JsgODOCI5+Pi+mHhYvU1VttiWT1v1tHWP5wcEXoRt2ez3O+pCJlKuLQT4KAikHO1EzcMficR00ia4jk/dNux3exzLZrY4AeQ7RMMB++0Bz/FJEunBjOIxWVaIS3fTU6QVw8mAMAgp8hrb8sWNKq1EGjuI2Gz25HlNlmZsNhuauicMI4qixPcDWsUhBJTsjChVpGlKEARGqcNAYtS1125LQeBTqUxKShvfOPfoiWpZliSJLJgnOeiczXbH/XJF2/XCPZzJ9FbDDsLAJwhCuraFvqOpnsrUvSubVOD5RnTwcEhwHZc0yUWp13Wht3EdH98TWaXhMAaEyucHLm0rxs+WJXLXWkpKQz/0/X11dcXr169pmoZ3796xXq5WWMEAACAASURBVK+Va9E5//d//I+GLtj3PTuluKKhGJvNRiAOccx+v2e1WpmBg8AVSoLIBtvFdjwcLyAaDBnF0nqg75jOpnhqsxHRS5/ssOd0vuD2/kH1RG3Tz9IuWePx2ExjAQP90BPVvu/ZrpfmuhxfW1tVN30v6jSD0dBUbnme8/btW96/f89wOKS3LequpUVKXTfwmcxneGEgrlmNT9N1gov/qWB2HFiO4Rn67+eZjD70z7VG/zHAFX4M+Th+H/1exxiSY6zb/59DmvM/LkOPP4dMOT/Fqdm2EmdUO4HrC3UHLFOmVFUFvc0f/dEf8ctf/JLf//737Lc7zs7O+PjhhuXygflihmVLgHQdB8fWpa0AUfMsw3NHjMcTETfcZTR1j9XbLGZnZGWL74krUVGUuF5A4PvI+rNx3YA8L2nqgr6zcRyboqjoe5vZdMo+2WNZO0ajMRcX5xQKGNn3Pff3j9i2y253YDSUsf9hL0YSURQzHMbM5jOquiBNV1hWhYVHWcpGNJ2OCPw9+/1e9Vo6DoeEJElN/8q2Rabb20jm4LhSvk+mY2bTGU3bUFe1gFFdF9sG3/cYT0ZEYYRtO+Zm1xlBGEqzXA+i1us1h32iZJSeTIyLqqbIc1w/UI/bKMBtiW07Zqo7Ho2x6Al8aVlkaULbtsymM8KTAR/ef2TbJ/zw/Qcl4CiAVN3Q1hLQ+/2O4WhAPIxYrVaiJJzIUODFixe8f/+ew+HAq1evDHxkNptxc3PDDz/8YOAVd3d3VFWlnJREJlwT2H3f5x/+4R+YTCa8efPGKHZcXV0RBAG///3veXx8pK5rPN/H833OVDbddQ2j0cioTkSB9Kx8z+X25pbxcMj9/T2jkSj9vn37jn/7b/8Xbu8fjVS5BiZrrujbt2+NyIJOVHSSoZMfDbbVQxutjKGJ9FqTbXyExxNlE8lSf/jhBw6HgxmEwJPKdaMUbruu4+HhwbAP4Gcys2MC83FgeR7Mjh9v2zaVEVl7yrT0c/SHOX4f/bf+wloC6OeGCMfB6UdZ4rNA9jy7M4C7uqFrn2p+y3ENSruqKtq+Uxr0oTl5XdexWJzyzTff8P3vvhdlC99ntVziez5JknB+cUqgsDq+K/AGz/OIwojxcEQ/iGX20EPfdYThgJPZKXVdkxc9bV/guwMC16dvLBqrx6Yn6wtsy8JGsrqu62jqnrqsKPKKvnWIoiHRIObkVECXj48rfF/I0pvNFtfxCIMB60ZKp7pu2W53ylnqhMXijI6OJCmxrYBkX7LdpJKRej5N0zMaTQGbMBwwmy1MpgA9222BZSkVYsfC7oQYpjFQfuDSZBV1U+IHvhJJrMmLjL5vGcShKndGxn4sz2Vz0TSZphHk+26/ZzSU66rvqUE0IAhTHN9nOBrJglDSPF3bMp7NCIKQ4TCmLCTbqDVI2oLtZo2zT2lbm8X8jPV6S5YVTKdzbm8/qlaJLK66FqK8ZWEywDgek2VPi1cHK33/V1WF7/ssFgs+fvxoBAw1dTDPc7a7HX4gYpw6s9Glqh7AHQ4HkiQxGd9mszHBoO07ilL6Zn0vUCRozDRWP18+ew+IdFKetTR1iec5vHnzJUVRcTgcqKrK9MR0kNLJh/5susLRa304CD+p7PS60yVpmmdGF+44AGp4hu+LzLkeSmh1Hj1YOD8/NwPKn3Vnet4re55ZHQeWzzXgP2dF97ycPH59/Tx9Uug/xZ3pQ5+8Yxzc5zK349f93O81n1OXOVroTb9urcwdsC3SPGezWROosfh0OuXliy/49//+33NxesF8Pud3v/ktruMwXoxZzGeCik8qbKAqK/xAxutFluKoYNm1GmgKXWfRtRZFXtO2Fo4tk6imalgtt1KaRdLkHwwiZlMb3x8gDAZB1PedKH7URcvZxQmHbG+uV1EU+J7PfLagaVoznbQsmzCIGY9E5TVLS1bLDav1mrZrlbTznvV6zZs3Q7xBKAHRdRmPJ+rciS7bfr83/Sc9dZvN5Fzo3kfXtUbiZTgUHqX+f2kYR2bDiCIxVdbXJ1UlbVmWRhZGZI6eWiBN02ArDmdvuwyUC7zGpBVFyWAwIAwDzs8vaJqK929/IM8yLs7PmUzGLJePfPx4BwRcXl5TljUf3n9UOnIpr159QRB4tG1FT6fknC3iOGIwCLm4OEP4nIJne/HiBY7j8Pj4aJD72kVe973yPOfx8ZGu63jz5g0Tx+HDza2ZdNq2bVy/v//+e+NJofFqWthQBzNsh+3+jsXJgrZtpYHv2KZ/t9lsqMqCb775hs3ykevra1zHYbPZ8ObNV9zd3jIcTVku19JTU6YkesKoA6KeWup+2fG614FNwzyO11ZZlkbmSkO9tLbZw8OD8pMdqsFVZvjhWtzRtm0j9jgYDMygAn6izDwOTp/DkRwHqOdBSn+pY6Ct3lX0Dfy5IYMJXpaF1X9KRn+e4enX/ySD6+WPZdvYR5+P5303hfTv+6cGpud5WAhgt6zkJhPDjh7P8424Xdu2/PrXv+bs7IwszTgoK7JRHPPFF1/Q1BVVU1A3pRKvrBjFQ5qq5nBI2e8PnJ2c0dMxjGLquiJPMoq8pK4qRInEpshKdc56JqMpk+mIqqpkzG550Hc4lkVZVFRljW170NtkWUFelNzfPZAcUkajMYXquc2mMw6HRGSwLWE4xFHMIIxFrTbNqWu5+YejEUVeU1diubffp1Kebg/UdcVwFBMEPmUpjliOI5tYHA+U2GRJ29Y4js1wKDfcZDIxWnjymIq+F+9I1xVV0aapyDKZyOopWhwPTVDW91AURZwsFgSB9M+2261kmJFI/ri+q+5XMcA5PT3FYk6g8E+3tzf4nggo7vY7JpMxYeix3Wx5fHzE80acnl4wHI64u7tlPJ4SBqEqL1OqqiDNdJAWAUisDse1OT87l5ZA3ys7P5lWhmFoLAGDIGC330slYtsGkDubzcjynIuLCyxLzE32e1EuHo/HxsW7KApTmpZlabTAxuMxo8mUV69fMRpPyTMBD9dKiLKpBDUfDyJlqqKkomzIsoQoWAhUorfNsEW3fHT/TNOTNNREczC1THvbtkJUPzIsdhzH4OXyPMdW/UnNIgmCwJwnzceM49iIOmqxzpOTEyxLDGIslLnRUb7yz2ZmzzOknzv6XsizlsJsydPEHfxHpO/+KAD9xOT06e+nMvW4/D0OZJb6f8dx6emweuhRWV4vIFesHrvraTvxbNSlgOsKSLBtGuLRmLKpyHZ7wijiF19/xdWLa5bLJX//t3/HH//xn/LDDz+wXm64vrzk6ze/oCoK3n94x9nZCcNhTFXbZJkgq6UxapFlKYfDHsu2yFKF1K4KfCVW2DQtgR8wOz0RhY6iYhAFDOIQR43h0yTh6upK6YG1HPbS65GdzKFpOtarLZblUlUNy+WayXiKY7s8PDxiWarkjWKSJOGQSIM7z3NTRgwGA3xfkPwSOAZs1hsFxRAxynATMBwOaDstkS60pcl4xng8VSBdW6HXHS4vr5jNZtzfP/Dx4wceHh5VRuZRVZk4aTetKmc9s3B6xSHUJhh6Z2+axlj/7XZ7JV5YMw2njCYTcG3Eg6MlDH2uLs85XSx4XD7QNg3r9YqReq/xeIzrCU0NWxyvttucw2FPGGoD4oKTkwU3tx8py0KVRg2OK9ctTRMO+5SH+yVROGS9Fpma9XpjFnjf96RpxnQ6Ybc/YNk2g9GIrmvZ7vYkecZ2fzCepo7jcH19Dcj0brEQN++3b98az0hNdeq6TriuqueW5blRroiigEEY4nkucSzBoyoLfvOb31DlBVVVEIURvh9S1jUX51e8+/CB0UTwee/evaOqKmazmcG2aTHK6XRqICSHw8FknEUhfMrD4UDTdVRNQ6KyrI6egeLX9r0ocGhf3iAQ5ECRZgyjAbRSfrdVjd2Dg4WDxZ9880cmMB5XjP9D08znvbHjQPKcduTaTzehEWO0bZxnWVXXi1CihSWCiS00itPVWR12b6FdiQTPJjAHz7F+pKwhfTZoe+G7tRxnjqhsTB4zHI5Zr9d0TcMgHoHlkKm+zEgRnuum5c0vvuYP//APWa/X/If/8/8C4M//7M/5/rvvsLF49eJayp3DFsuymMwnuK5DmiVk2Z6u6aBvWW+WdF1DEPoMRxdM5xOwW1bLNZZt4QYOeZURBCH+wOX+/obzizO82GM0jPBcC2hJDnsc2+HlixfUdc3NzQ1d1/HFF68kY8gysiwlygIGcURZSX/lYbkyZYnreCRJaqaN2e1HKXVKMXFxHIvFYmY4mfvdhsfHR+nZKHBkFAzIsww6VMDp8f2I+eQEGwfsnqLI8TxfKWx0io7UK9DkjLpuqaqSum4Yj6f0vaXEEzMury/ZJSnbg+z0kUK/A8bdfXvYMWgHWK6NH/m8fP0Sy7KMq3hZC+i2mQwo8x3r5S1dk1PkpQSAYcTNzQ1lUXF2dsYXr74kOez5/fdvKaoSP3BI0i37Q4dld9w/3Kk+TcnFxYVB5C+XDySHjNPTU3bblIf7NScnFxz2KcvVkizNCEKf0XCMrVyNlustVdNiuy4np+ek6YGOR/aHhLq5YTwacXNzJ1pgQUCWCe4xywrluxkq2Z2DrIlOwNJxPKSuGm5ubnB9nzQTeeo4EteteCgB5OOHj+Jf2jTMZzNse2rWcVmWZEVBh20yQK1+rHtX8/ncNO11uad7fnpd9/QkeYrlWGRFRtVUWLaF7Tn4TkBaZBS1VC9XV1fGE7brOtaPK7qmJTtk9G1PmVe4tkddVXz3u++5uLigLhtc2yP0pZXzk8FMHz/Vj3r+7x9NDHvrR489LiOf996eH23b0tF/UqICUgbyRDA3TIAeg+a3LDnJvfUpE6HvJROjR2SFr6+JQ7mhtV+k67q8ffuWi4sL/uBXf0gUhvz6179mv99zdXXFIIp4/+4dRZqZi1tVFbZlGZxPVmREnisuz21GVYmRqlZ6kJszoe97JtOx6SsAtH3LPtnhey5lKen2ZvuAZbXKMSnEcVyyfE/TNvQ0FFXBcvnA/nCgLErKMmc0GeKHFwaHpCdwmn6ip0L6vW1HfAkd16JuciyEFiTTug1NUzEYhFhWT9fV5HlC23ZE0VT1yYR6JNSfhMEwIoqkqb1ei7BiWZY8Pj4CYq93cnJiMFJaG2symfDhwwd2+z1BJDJLuh+j75uiKCiKQhr3atCkKUa+75MXGVii7LpcCil7PpuYnlOapixXj5yennJ5cQkIlKisKpbrDUVV4XouWZlzOGxFwcO2cd0pDw93RJEMUxaLhZJNwmj6R1FMVTWkSU4cD2maFt8LFOBTDErEV7RnPJlQNY1Ikzsevhpu9EVBlueUWW5MdE9PT9nv99zc3PDixQtmsxkfPnwwDum6kd73vZGRn4xisSFU5bbnuxR5zoYNdVOLMoaS0NGZsGV1NI0AyBeLBX0vUlQaP6avgeM4JEli1tWxTZwEw5TpbIrl2KZnZvrvTYPVd/zpn/4ph8PBQFI05ens7IxxPOT737+FroeuJw4jGtdjW1bURUmVFwTTmXzmMKKLfyYzOz4+hwN7XhL+6HH8OIj9c8dT6ak4oH1H3z71xCxLaEaWJbLZn2im2dZRdqiC3bPvoF4aC3hxdcXt7S1t23J9cWmcsbuu48WLF+ZCLpdL43kpmUmn3M4jY/axWCywHYeHhwcGgwHX15dEvkueB2w2mJtMI7617pMQoy2SJDGTqPF4zHw+YzSOqOuSouqx3Z5DtsFyG6o2oylbmocS1/XIy4TtbkuW79VUSAYbVSucT7n5auq6UIOCJ12t6XRqvpdtQxAGuK5FmopzUZ7n5HkGVsN0NuD0dEarvv9sLlle15csZqdE0SmHw56myRmOBnSq32VZlqGp2LZtJnlaTkaXs7e3t2w2G66vJdN9XC05pAdF2ZLn6sXTdZ0pcQBDwNbN9KZpWC6Xprk8GMiwoK5bikIc5bebLbc3d0ZH63A4GPT8drslHojNnAhPhlxeXjKfz3l4eABibBspsRXhP4oCLKsnDH2KwqMsC1P+6aa5RsG3CCTI9Z+mirqhro9DkjDwA9MLu76+5uLigrKUrNB1XSaTCZvNhtVqZUClk8mEeBBjObaZ7hZFYShOURQxHY358ssvWT48fiK1BRimQVmWYLsGs6b7YjpgeZ7Hy5cvzRBHb0Zi6FKT5smTMcmRe5LGzTmOw3fffWd8T3VfbrfbsVqt2K03nJ9emiRGU6R00ARRrdW97p91Zzo+fgpp/7mMzODIrJ8WRPzcwOCT36Mys17EET95D1VO8uzzHI++bQscx6KzPiNdJCJOrNdr4jgmDgWEuNvtcByH8/Nzrq6uSNOUd+/eUSgreUHMZ+RZjqf9Do44qsnhqc9xfn5BmR5o2/qTk6ybnWEYGpkTvUC1qJ5w004JApsk2+F6U7BEmz0vEuX5WEmPI4qom4qyyuhp6WkJI9lB67qiKBXJ2IPpbCTeg3HIqIjoe8ksZKopAUIyTZf1egk0tF2B48J8MWY0GrFYLEiSA47bsVic0DQNh8MeP7AJI4e68ciyGtvpaWtR4Oh7TNagNzWdIVqWKEjofo8mLkdRJGV0K5AEDcDWjef7+3txuFKORX3fk+eFaUi3bUuapgZRLx6iMkXt+57T0zOKoqQspMR5eHgwgXW5XLLZbKirmsALzBRPI/O1M5bnecZ1SAcN3QoZDEQDTDfJtRqslvtxfE/JuMPpyQmpYkZous5gMKBpGq7OL0wPSnufalNgrQZyDC7Xvc7BYEBvQRxHlE0tevxKDyxJEsos5+XLl8Z301cQkr7vDX4uyzLysjbfSwdbrZihp4wgGffDw4MhiTuOQ1nVxvNA9wr1OtYBTk8hNRBan5+u63CQjW+g/Bf0Y05PTw0vV2eKOuDq43+4zHw+0TzGe+k/XddJ0LF+rPL6OTjF84BmXuszn0c/0rJt6J4Mhh1l9GrbNh3Ck+usJ600U6qqF2gaARI6rktVVUynU16/fo3jOHz8+NGY5B7jauRzSV8gTRJOT09xXZf1ek2ldszLy0v2+x3pbkvb1eYm0+Nr7TWqKSti2jvh1atXhkBd1w1NKwFrOIyxrClNI+KE3mSoMpxGSesEuK7QRYqiwHXFTcj3J8IxbDTRWGcJBW07JM8LhsOIroNeuUZPpxPVJ6uo6hLLlvOrTWXDyMOyYwZxoOSDYpIk5f7+nrv7G6IowvNtbm7eU5YtQTgwe85qtcK2RYU0SRKDWbq9vWW/3zOfz1WwfDIb6dqe/X5HU0sQubq8pu97fv/735OlOeWoMouNHjEPGU2EvHwhfE+Dv6pbzk7PDfQn8ANeKh0z3RfSzWTBteW4tmtgCN9++y2e53F6emoGE2mamgnfMVlb/ztRtB1d/kZRJEIFrqP4s72UqknCWmng64Cky+sgCLi4uGCz2fDw8MB0OjWO4Bpvpqldeh3VTUPbNQJA9lxDdNcI+10jJHbPkZ4ZR1ZtJntsOxO0nvfHwzBkPB6bDaiuawPT0CyAjp7dfkOlhDt1RiZtlhDf91kuVzRNa7JWLfKp1WvbtjMbmB5OaVpe13VcX19TFIWZ4urjs8Hss+Xjs6D0/PFPQeMJ+3OMCfsfOtRkUw1BzSGQjeP/f6Z4oTMzLHq6T+AY5jXk5Tk/Pxe0tu3w9ddfc319TZZlfPjwgcPhYOSOfUVozzKZtnWN4IToJCU+LqN09rbbbfAdC89zTK9Bnx+d8mu8kS49AbM44jiCvjdj706N94OgNbiatu1kYttJYB4OZXqWZZLtnZ+fKxyWEL+jKFI3hvRDgqDH8wKAI8lhy1zDMAyYzycqc7PUjtkQx4NPpJIHg4jRaIhtWwZh7roOddPj+4EpATSO6PXr18YnVJcvOhOYTqcm+EvWdzBQBsk2G3rlvuU4rrn2bduZjHc4HBLHQ6M/lucF2+0e2265vLzEtm3Tb9OTMy1XvdlsTMBt1SIDmM9FzcP3feOBulOSNDpD0Atf+8jqvqScD5fRaCR6ZrZFWVUUZclytaRHpLt1mXa8zpIkMTQhLRGtTbePOZk6Q3tyK7cp65KqKonHI3W9gydl2l4yneXDo4E36CAspiYxQRCAShB0j1KDYkW12DfBXytY9Ko91Pc9nu9hW7aRKdK6dPo+lPshYjKZmKxby9C3rUA8hoMnnmxVi2+rfqzneRTqvjkcDoYHCj/hAXD89ycB4TMDgOPndV1Hz5PM9vO+2uee+8ljFA9LWv3dJxfYeu6HyY+xbj09XS8DBI4yRMkq5TH7/Z6XL19yfnJK13X89re/JUkSs9NpBoBOYYUHmOBYCpNmO6xWK1zX5eLigtFIXIK22y3j8ZDRYEQQuEbdQ/chtNIBYJraenFruRfPc3F8l7ZzsGwX2xF9sqYRWk/bWQxHEzUir7CbVpx8HAdsBz+IcFxf6YKFqlHe4nkhYThQgn6iCCuIflmIMhVrqGvR6J/NBmZz6ruOQ5IyGtrYtvR5wqBQgNcxriOTs67tuby8xrI9omhocEbD4fATDKIu0RxHTJE1IFOjvrseXE+04ieTiaKsrNSAIcdxPEXf6pUjeEgQROx2B/Jc3LH1onccVy0yi81mp0q+jrdv3zIajdjv9yYjGylhxvVqDV3PcDRgcTJjNhdQ5sePH2naisel9EexOopS7inXdQlCD6yOvneMvLUOsmEYkqTpJ6KN79+9I1Tof9/32W63rNdrZtMpdicAUw2E1WTtIAg+MQnR1YfgIaV8cxsx7dWDkTzPzQbSNxJgAs83JZ8u8XSW5wcBSbYxDX/9PXS/V09ZNSxCb9q6XJUExvrE/8H3QgI/wrE9+s5iNp0T+LIxbPO9KcXFaEZK9V1yMBLdvQW75GCYKrf3dyaj1hvPZ4PZTwW1zyHqfypLe16CPj+eB8Hnv7MAVDlpfq6f94xo/klvDJQn5tPn0H8cJfnzJ3/0xyRJwsePH03/QZN4dYkwGo1obFvG96VYa/mux3q9JvKFcqG1pfROr+WLQSZWelrY9083JmDSYp0mxwpwOxgMWK9XRLYPlktdg4XHZCLZWFU9UhYN45FP11rkWvmzdUjTmr5zGA4nlHmLTUXXOpRFi211jEcBYTDEImG/yzm0OcOhS985+N7AZDyDaMx680hyyBFxRvku+11CU3ew2fP4uOT6+orLyzGHQ8Ld3YMpyaLIoywrIDd9MV2+aI9PHWySJDEORHoxymSxNtrvslu3BmOlF6WGAmg0ued5JkuwLJskSfE8X8DITU1RlGZyJqR1KV8OhwOlMq+dTqbEQ4GdeI6n4BAZL1684OHhwVCLtBS2VgPRyHQJnspCsX9SytUZnG55DAYDXr76gtvbWyLVAJfN7sBqtZOs3Q9MFnx2dkaWZTw+PhqZcj1N1xuO67pGbaJD1E5C1Wo4btD7jmsqGNd1GQ+H7Pd7k/WAmHZrSR59vWYzcXw/5lrqe1r31LSYxDESQQ+Z9FRaQ70+fPjAfD7/kcKObYvdcNO17LfigXB2dmZ6qjp71wbEWhdPHz87ADiGUejgoBf9MXFcj1+lh/UpreFzPbLnQU5/EceR5l//7PHyHPWfZ6XvMRm+R8bTTd/RqNS7V6Xa6ekZ8/mc29tbacwqCZljkKbejbbbLU1dG+ngtm2pVMnW1Y0RHXQch4FKn6Vh7NLUDX3fKA19z6TCDw8PfPvtt5ydnZmLfNxHk92ppW2h6SErSnwvoO16mqYljme4U48sz7Ftl3g05/J6wm67Iwik2UsPwSjAsSJsqyJLawLfoSp7yqIiSUrKoiNJ9hz2JXE84PZGCNJXV5cM4xl11ZJnjcKB1UosUDTc7+7uyLKC+7s1TW0JavtQqt6Vx3q1I1Lf5/Hx0VBtiqIwqO6vv/6aNE3527/9W9pWaFMa5b3b7XBcmZbpDE4rqp6cnHxSmvq+z+XlpTh3392xXC558+Yrri5fcH//IKKJQYTjuBwOGZ4bcHJySp5nKrOQnmXXdxRFTtPWptlelxVFkZNlKSDZxmAQqbI+NmDbtm1UA7smz9Ug4XFDVUn5r+lM0+mUx8dHNbDIRXliMFCO7o3KYGPm8zHbzYbxIDbZqtC5UmazGaPRyIhYvnjxwqDuT05ODNvg/vGB6XRMpYYow6EwKIRkPyZNU5L9gTiOzb2tB2Gz2Yz1eo3v+2w2G2G1qGuTpinz+ZybmxvjTaADlDYNb5oGn0AJMJYmi9Q9NhAAsEYLaB6mHi5st1tp4SgEgB8GjKcT/DDAcmzmiwVZkeP6HnuVuYVh+PPB7HPZ2OdwY8c/P+5dPX/884D4/D2ev+/PHZYt8tbPM8e+72n7jjIraJGdRtNAfN+nb1oeHh54uL+X3bjrzcRKZ1HH4L/j4A0ypbUsi/F0anoHTdOIpJC6oJ7nUOUpto0BHGoDCN0g1dgv3cuwLMs4/9iOw8SbUaQpeVHjDAP63iI5lOb7dK1Nnhckh4QsbUiSA2VZEwZDJpMpDlAUNVlW0DbQtVBXnQo8OVXZ0DaQHHICP6JrwQk86B206XGa7mnbniwr8T2fIBjge5IR7vd70izlw/tbmaA6Aa3V0nWW6vUJl1RPyDRhWluu6ea1vl/2e5EHShJhMwRhRI9lSit9D+qsajgc8s0336DH+R8+fCCKIq6urri5ueHduw8s5qem1CwKAedGUYxtP5G9XdcxmyeqvRGGwraoXe+TcirLMvNZtAJJ0zQsFgtDT1oul6RpSpqlVGVjEPhxHJtJrRv4VHXNer1mOBwKMFk1sXUfMVL31mKxEIyYCmZpmpqyc7Va8eHDB25ubgysR5eLWi5J9+E0Qr/rRD26Kisz3YzVedMcTj1hdryAOI6ZTCb4vs/79+9N9qVZCLr81FmS7/v0KvBHUWR6e1EUGcpSmqbKTFxAzYCRE+r73ohRjtS1L9W/Z7PZJ1Nsfb7W6/UnMeMnfTOPA9fz/z/uRel/m1S1/7ERyc/14fTPJdP75zFpLsCOkgAAIABJREFUtsKc/ahfdjSmtr2nCYuWMdlu1uy2O+MH6NoOkQKzWpZlcCzHCiGfBG0+Bf7qrMU5kimxbUf4k7ZcVM1H06m4BhZqIwrALBYAzw8pihsqVdJ0rSSjWZapaZw08suyIjnkuK5M3+T8Q1FUeLYlzuJZRt9bdJ2oxEpZlSrLt1Z9V3Gh7zrY7Q7YjkVZ5uz3gh4vyxrX9WmbHjyb8XjKeDxVXMgtnuubwKTPc99Z5jpqdVeRGIrMZ10sRGJcL7ynXotsIHle4nmBmnR1SltLFCuEP7rm9PSUL754zcePH/n48VaZZ0wZDUckSaaa5pip4nB4geMIwV5K4hGe55IkB5q2VtSzANdx6fzADGq22y2r1cqQnI9LqMViYVRkdYalUfIaz6cb9/PFgqIS8PDdneDcUkUy1xxL13WZKh6rFiiMoshM8rRixO3tLev1mjTNGY1ihsOhmIHEA+LRkPv7W+q6MmW4LsW0IKLnuJSqzXF2dmYGCsdA5aZplF7b0JSyuqUi91ZvKgwNk5ABoIXr+IxGEyaKEpXnOU0jgp2Xl9e8ffuWoihpmoPpfz5BTSxBRDgOXd/T9j3hYMDi9JTheMzj4yNnFxfkZclecTn18ZNWczpAfK5Xpk/scRDRAa7vPs20PpdB6eNHeLVO//8T0NU8Vv276z91dDrujdmWTRSF+FFoyKvb7VYMViuZ3Okg4qqG/jEuSO/Gn8fAiUlwqbh2BuzoefjqorquQ5YesKzOMAS6rmM0GpkM8e3bt0btYL/fm2mMTGmWVHVHPBQ4xc3NDVVVEccC/pS+k5DEq/JJq8pxHA5Jyu5mQxT69F1DXdZYFpRlRlmJ/2TTlgSBh2V3uJ6F51m4bkhZlazWjzR1RZIeiKKAuq5YLOY8PNzjODZZluB5AsCcTse4rmOgDQI9GXJI9pRlTt2WlGVhJk5BEDAej02zf7lcGsE+bRjSdR1d22NbLp7Xm8Cgg9Hp6akBo263W9I05c2bNwazpP0TBU7ikRxSurZVkj0Nh0Oipo2tCWi+7xkVFd1qcB2Xtm/NRqgBoHIulQiBbYnJje+z3+9p29ZkYLZtUZQZ89kJ19cCKXl8fCTPM8q6Zr3ZYDvCfNjuNeDZNj22MIp4fJRp43K5/ERZQqtvnJ6ecnt7y2w2YbFYmM9qYRGGMqnWJbpel3oiqUHItro/9eBB9+jquiYeTYzMkO5RHZf4erh1HNh0udp1HW/fvcP1PeVHEKhN/YlHORwOKYrSDMe6rldOWIHBFmpYlG5h6TL0+++/ZzAYcH9/r87rz/hmak38TxbyUSA6zlY+wZf1Pw5AnxsA/FR52fe9ZkI9OSc9j6G9OIv3R2Vgf0xCV6+pR+P6T9d1RCqA1KXsBB2fBmK9y5jv+fyzKdCtZzt01pEuG5CrssN1bdqywHZEcPC4rNbZnEAIYkajkaEW6YuGZdH0GXVXUKQ5m/VGkXNbOlqjx68b0WWTEQYRw+GIpi1I0i1J0kLf4FgO8TAmKxrzOcLIZTweUuS5bAxIyZUVBVWdKV5qSpcVlNWQqs7o+pqiTPB7H8vuWK3vTc+wLEuyPKOqM2xHbS60pGlmCNbPp2J5nosbvLFHy0xmVveNULV66DuZhIWBwAriwQjH9rAtl/FICM625ZJnB8JgwGJ+KtSYouUXX/2SIi/ZbPYEoW+YBklyYDqdUJYB2sW9Vd+laeRzNn1D37ViGJPn2DYsFpJhlGVuMkzR+ReH8yxL+Prrr3h8XIps+HbPYi6tBY0jK4uS6WKuzlf9CexBY636XnwhNpuN0YnbbDaKPuWZYKKzoclkYjIn3Vu0XcfAQ3TGqGFEOkM7NiHRYGNN4NfDMI38LwphNOjApvFf+pp9MmRT36VrW/b7Et8Pmc0WzOcLI3V0e3urqhcHkEy57xuapkPjzD3PM1hBrbixWq0MJ3k2m9F1Ypw8Go3++WD2HCSr/34uznh8kuSHT2Xm5yaVz/tmx++hPS57S3EunwUywPTLPjdtxZYTmhZPqgH6oti9BDlHlzM9pkemm5N6+qiDj95J+r5HDUnJixzHftJq6tR4W1J1i8V0hO9rtQzHjK/1d9bIZ73Li26+4MyGowGd1VGUBa7n8uarL0zPJEky2q4ljkfYlk1RpGy3K7oOgw7H6qirDPoGO/CxncDgjwSP5RCEFm2HQsvvacOQps3w/J7Q8RjEUk5htVR1xXAk7uEaYlEUuVDK2hrXsxjEAYfDgds7KU2DMKRpG5rGNmBJLSJ4fOgFpzM0bZDSNi1gMRqNTfNaXxdNbh6NRpydnRk6k178+nz/8MNbdrs92+2G2WzKZDJWxOxK+K1ZQds2tO3QLMq27czi0Vn7cVaZpqnBS+msUYYj4qS0WCxo2844Vv339t6rSZIjPdd83ENHpKrMUt0N0TM7GCx3SY6RP//Y/gAa1+wc2+UNIQZAK5RMnRk6wvfCRUVlC+7tOYYwK0OjUlREZvjnn3iFXYyPj486g600yf0lL3n7/t0ziIXN4JVS7AyezTbZbT/KNtGrquL9+/csl0vm87lbp9agebiBW1WY4TDFQiAOhwNn06kLDB8+fABwODLrd2l9QB8eHpyqiQXLOl6xEU9su46mbRmPp/SHvZP1sRubpWJZdVrbe7Z9SD2MC5jN5k7yCHBAdtv3++mnn9ykeOjQ9tlgNoy6w+BxGuDshbgMhydrqtNA5uLSl7IzC60YBDAwXRiFU994Bsmw59c/lSfAEzOg72kbXUqeTTVAU/TPr+H03HozsbU3uyeMkm5R0tC4gYGAwWeF2QkVWSYdt9CWpNpg494hxq0rs8XZlFWB8CGKPSbTMVdXGrn+8PBA11fUdU8QaB5gr0ZuN25avchmcYokRBrgchzHtOsCzxOEkUDIlqo+6DJTgvQ6pNeRjcJnU+kXL15wd3dHls0N1scninVACiNJGMXUtUaVB6FP2/nmPAriJHb0qFOkut397WahlHKmv7ofo6gqbWE3mUwc2twGf7s5pWnKw8PDMypLXdeMRmME2lZNMx40vckPtG/oZDInCAKKMqeu9eJJ0sjBDtq2xfd81zOy9Bm7qc1mM9eGsH0uO0XUoOgApaCqtLP3Dz/8wLt371yGVLd6Ea83a/d+1iF+Mplo7uhmg49wZiKPj4+u37rdbtnv9yyX2sh3sdBOYdpp6gXL5ZKbuxtqQ5gfj8ecnZ1pWtMxdxv8dDxBmkokz3NXguq+Z8MhL9xQoR5kinVdu7aGveeHawQw0IwnL8/lcknXtggpncPUZrNxLR4LXfE8z9AHE3dNDq6hlPv7dlMcls+fDWbDLEliMyj7o5DyKcLY7KXrni5ICKNyoT6eeOpg9bzN/6yBb5FiNgtzzxm8QAgwE0374XWq189RsNxuHAbF3uRd1xFIz+1ISil8E5yGsr/2PCxYdAg/sT9honWYbP7ZK0XX94iuw1OCOEnxpW6CCjxtxbY9aFBqqxv/1ttwv9+iVMfZmdZbX2/X+KHPaJKRmp22qWravmM2ndH1Pfd396hekaUJaaLNf61DeRyFxJEHtNRVhQCOxwOeJ/F9abKfxiHCoyh4NpzQG5J2Z//uu++cC84vv/zi+iarldZIE0Kw32np5jhK6NJesxKajiDUC/541AFSZy2dg2dYTNZ0OuXrr78GMHioktvbB3rD17DTrTzPXU9nOp3y8uVL8jzn5uaGutHCg0VRGLG/kSkJaxDKeDeUBIF2YOr6jrZtUKqj71uiSEMsmqbVAxHPJ45TMz3sDfG+BKRraI9GE969e0dZ1oBASp+Hh6Urneu6QfXai9SCctuu4+eff9ZZFkq7EV1coJRis1oxPzvj8vyCzlCqDobDulyvmM/nKHQrY3/UQo+d6vWCt0KHo4z1es3heKRDkZiMcjbTdoer1Uqvhb6nUz1xkoCU/Pb2rc6eg4DMAMDbVj/n4eGBotKDgsl0ys4wX2y2bdeKBd2maYof+FR1TatqQi/k6sUl42yCkoqmbNjne8ZpQlke6JseKXsCKUiyiBeXF5xfXfLu1zdI6RMHkiiNUW1HUVRURUF+PHJ9dYX0fQJPIIMvyGYjPEMXEijjaD386XrIiyeMllLCcri1TI/LugZRSH1cplrSuCtpPYkHug8xHAqclKB1azNHM1mkp+5qMOqeFrhqs8XhrtE0DV1jel3S4zTrtIcUArzngpI9mp/pskYhwdM/wtfuMVEQooTECyKquqPf6axAnzNcXr1gMtE9Mz+QjPYpUaKb4L3SPQApJX4QkkQZSZohgc1ux/3dLY/LFUkcMRlPmJ/NQQhevYD5YkEchvx+e6NNOugoy478eCRONK1FyIDReKwzjyhi+fCoS88g059DW/P4eM/ZYo4QeqBwMDpkk+mcyXhKkqTc32/46ec3BIEWlQz8kCjKyLKUMApo2obb+7f86fVrXr685u7ujratqeuG62ttmfbmzRv2e03ZWa+01M7hcOCbb77B80J++vvfEUJ/xGEYMg3HLstt25a6KfF8weL8jLdv3xInIbt9x8PjHW1Xu1KqqipG44gkidluO8JIkmVjrq8XjEYZy+WSw14b6E4nU62/1nX88ve/0zQNl8b1vK5rptMpq9Waly9fUdcV4/GE6XTKw4MWvQzDiCiKyfOSbKyzrLwqafc7pvMzBzidz+dUVcV+uyOfTLk6v6CtKo6HA4dIG/Te3d0hfZ9jWeD5Pr/89iu9UtRVRd22XF1cMDtfkGQpcZoiPB2U7m9vORxzzheX3N7e0rWKs9mCJM6oyoYir3j9+jVVXev7R2oHr6qq8PyOdDTmbHHO7e0tN7e3NHXN42rFYb+jR9vf2UFZEAQE0gNlhmmeTxyEeKHHcnlPGCi6tuB4XJMlgRmy1YxHIaKvmTQRTdUSpxFJlKJEz+G4onp3pGsFx21J4PusVrcc9wekL6nLBtX3zGdnJFmMZyhinw1mpz0zG1CGzX5bltmJol3wdipjX3OKKRsGqNPXfq5Hpz4R2PSw4NN4NjF8zrC3ZgOVfO4h8AQLGQQy+14n2dqzaxB6YOGpXvf4PInwJUVR0TU9ga8lsIMgQAqPtmvI8+LJQFUZwrvn0XUapDqZzJDySePs8f6B1WrFdrs1VKSGLEnpmpaDmYK2bYsAXrx4wZ++fc3D8kF7JbYQRBlJ5us+o+FaxmFMEicsFs8bz54X8fLlt7RdjVKCMIzwvJyyyBFCGvOTO6Io4fXrM1QPRVG5jKaqGrpOEUaCLE1oWg0LGI0zttsN4/EEpTS9S8vqaD2tH3/80RGLpZRMpjPOz88dC8DyVO21WqMT+2/bS3maCAaMx6k29z372pXpbZsyneqgqPtDWuXCZk/2c766vCJLx9zf37NebQmDGCl82kZnIKoHTwasV1uKXIOyL86vmM/nZlr6SA+Oh2tR+N9++y3b7dbRksbjMX3fc3tzw+ODli0qi4K8KPTU1pjqKnhyBkO3TnaHA0mWEZhe44ebGzf06rqOH374ga+/1tf+7//+77Rt6wxC/vM//5O//OUveKbbXtY1G2NGfPXiBbP5nOV6TWomlpXBRZZ1zczT0CPLU478wEEyfN/Xdo1K8f3339H3LXleojF8kjwvKIoSpRrSNOHu7oZ3735HKbi8XLBYnGkZ9KJEEHPc544e2bYth82epq2Yjifc3t4yHmf4gUdZfgGacdof+1QgGQY6+wHbacan+k+n7w9PPRMbYE4HC5/67+d+d3o+n/qbPeoZu9MugNNhgpCfljA67Q3Yz8E2gu2NK9FZToWVTLElrHCAP6UU0tNGGHoMHbn37/uOvvcp8oL3737n5uYGKbWpxcX5mH/6p3+i6zonaieFT1U23N896gZpW1OUFd2AYtW3nRFRFHhhiBKCKEmpq4qlkcSO45irqyuQkuOxZH/7YKhXgjTNaJrWyCLrJnoUxYz9wJUddd1oTa/qSBhKzaEsapI45bA/GvCw5M2bN8xmZ66ncnZ2xna7cxy/tmsdTssOELruifxt7xerYW9t5obQAE2qbwnD2EBBVkblZIPvh4RhzHK5dtLdr1//me12a0om3zXarWad53nPtO2sQGHbts9YCvP5nDRN+f321n3PVpPMUnMeHx+ZzWZcXl5SliXr9drBS5qm4WjgLAujUGGnh7ZXFIYhl5eXjhdsDZYtQNc21S153/b0rEz1yFCYbBCytDxAq/huNs5PYDQacX5+7lo19nrjMHLrXinl+KaiErQt3C8fSNMYzwuQEtpWUVUNdd2aSalkOp3R9wIp4erqBYvF3FDn9jw+7BBC84C327Wesjc1io4s00Df2WxCnERfhmYMA8TnAsMQmT1c5DZAnAab4fsNm4bDwGYfEx+NMf//H0Pg5UcDAp4gFxpm8ZRZuublAPJxeg2nQX6YSdrsoJIeaZggFNrcpK4MNy0yXEfBw8OKLIsJo4DHx5y2bRyVS/tBal5hXbfGoUhwfX3N69d/ZjabsF5vjCGIxpEJ4aFUQVFU7A57RuMxHYq26RGypetK6krfjEmSsFrv2a43eJaLutN2bHleE8UZ5xdztts1f//7r04VBIQLJpZPaXXY7GdTlAeKUjcuz881IPb3328YjUZcXFywXK5IkpTz83NAOKs0LTiouZWr1Yq6aRlPp8/0vTREQiPLh/fYsAqwY/osSymK3G00Fpv16tUrjsejcxyyUz9LKbNDgNAY2HzzzTcURcHV1RVlWTo8oGUDxHHsNM7KsnQGLLGR6Lm7u2M0GnF1dcXl5SVWTMAOfTRx/uhUOCzcJQgC4yeq2QnDxvtoNHIqHG3bOscqC3rVopq6lLWQFxvwlsslVi3E8kuV0tmp1RSzJaRlBFgesVP7NZtxF+ngdTQep1YGSLePJGmScXY2I/BD2q6ha3tymVMUOw6HI6vlltE44/r6peaRRlpnr64aM7wQXJxf4Hlwf3+D53m8fHVtpulPbAxP+iRJ6tboR8HMTgc+6iOZG8jqEw2zkmHZaKP8lxb/acB7nlnpqeAwKJ1COYblqMOYmZ0cpZ699ikY4QLZMHha9QAbzDozIftc5jcEH9rAbFN81fWUfkFk+J5xHJnxeISiM1MgOxHuDTxBy05rl6Etj48bwiAiDCOybMzV1ZhXr14yHk8oioo3b94RhiFXV1f0fcdyuTKMghFZOqbpFMdCT67ax6Xe1QwEYnE2Z7Vc8ea3X/E9bco6m04ZZyMNOykqQyrvSNPMfD4+x2PushIN0YCua929oKeNJVIKxuMRoJ3INVWlYzyekucVSZLy6tUr7Qjv+fhewIf3N2SZzpBsi8OKMFpajuXgCSFcJmYXphUKuLy8NFAFePPmN6dNZoOFfb7N5sIwdMKcv/zyyxOLQwgmI12OWjkiO3G2wFEbYK07+2KxcNnZ/nBwiP2yLPnrX//KZrPhr3/9K1VVMZ/PnTmwDTg2GFo60osXL8jN7+7u7qjrhvn8jPl87oQVh1PeIcLfTh/tMMJ6X1pIxWaz4eLiwmn5W7GEpmm4urpiNpvx22+/OZ6k3agnkwkTM41sa817zo2suGU62A3OCwNG4xFdpygPFV2vENLD83VPNRuNCKOIXsFxf2C92eN5AqX0FFco2O7WFOURP9BA+Pl85uAn0+nUSB91z5KfTwaz01JumFVZ3IueGFVOz8g+57R0Gx7DoGAD2mngAGUyjo/9Lz+VLQ5HtJqB0D8LVu555gfz3lY7bRjMuq6j77qPzn8YvCxgcXjuNs3u2w7ZC1JTIiwWCxaLheFkViAUvi9N2VAjPX1W6/XKSXdPxmPSdEwSpwRhYOSlYx4fVtzc3nBxfs58MSeOEg7Hg6P9xFFCXmq5l+1hz26353DQdBEpBZPxhKbuaKqauumoGy0rnY0mBIGWltkdCt69f8/8bMLXX71GCJxaaRj6YKAnVn6761omkzHjcUYUBaRZiic93r9/z/F4xPM8x8ULAj0x/e2337i/v2cxPwcsqj5yGVLbdeyPBxdI7P1V1zXb7Za7uzunDjt01rbPt8+1oNOqqtjv96xWK1dSDX0eh9aHeZ5zPGhrtomhzlhljaIsGRs5HzHAh1mxTysjrYCyrtzAQl9/5e4Xq/xqAbOLxcJlfaPRiHSUPRMstCR3m1GtVisWi4XTerPUsqZpXGmohxLCCSBEUeSkp22ZbNeI5Qjb7M5Oj4UQTp3D8kdjA5oNU+0F2xozEft3pJSUTc3D7ZJDPnbKJ/Yzz7KMLMt49eqV+17qOqDvdAlqM8soCPnw4YMpqc/d+Q7NVawwwRclgE4zn9PfW9iDzUpsRnZadg5/hmXpsD9mbyK7yzwFxeeYto+zrI/7d+79xJOr1LPgZ15j8WEYmZ6hVM/wNfbc7fvaBrXNXJ3el7mmIAjADxglI85mC6azicNX2Zve86XbSYWAzmR1uomuQSuXVy+Ikwyl9PR1d8hZbfQAoKxr9nnJcvOrUVUIGI8nxGlKUdf8fnuvdaCamv2h4HAoTJ8kww8iqrrB9wPm5xcoBdPZGXEyYr1eOTzZarnGk5KLi2tzThqlHcUxQkCSxIOhhTKfi6DtaopCgdKwiPV6awYZguNxbcq4iv3uwGg0dooWX301MdlSQdv2ZOOMr776itvbJ4ciWxIOA5UQT0BRpZQrjSxQNI4FVdUY67vQBJWGJMnw/ZCiqPD9EM8LHPBVKZ1RxmmCDHyE79H0HTLw8Tqfsqnxo5AkjDi/vDATzhVVq3GHc2NC8h//8R+uhHt8fOT8/Jy7uzs9vTUB1pLP7YR2CI7dbreuJ2epbxbEa0HDw3VhLd6GmKvtdvus/LM9NwvaHmqI2d8ppQxGT3++FutnQc32vM+msydivlk/tq93LEviOGWUTZDCZ7c9UFctga8cDKiuWpq6w5MBUZjQNj1VWdC1Wvzh+vKCtq356quv+Pbbb7m/v0cI4cr10WhkBDX9ZxvFJ4PZ5/pdgENJ2/LClho2gJwGsmHwGQZFl0kNvhS9W6jnQWhwfG6gcPr+w7/zrBw1wazrOhDqGfhv+LzT87eZ2VAmaBjM7BfqCZ/RaEqapUjhufIEerPTJbRtzWQ6Jo4jdrstZVE7itN+fySMYkByPGqzk91uB8B4PGY+P3ecPaUUV1dXhGHE8aipIu8/fEB5HiLwQeiFGUURi8WCLE2pq5q+6whDfRNGSYIC1usNXdcxHo+4OL8kiSNWZjAAxom61O7stsTDYAKlBEVPnh9p2w6UVrw4HI7c3t7y+vWfHN3GosY9zzMZxrnbsYtCQxems6nboZMkMUq6KyeJk6bpM5FDK6tsv/ejEUG0i3+xWPDVV19xfX3tXNJ3u50WZHS4qtbg1DJDsC8cSt2eb55rV3VL64nj2JHPbda4NabQFswrpeS7777D933n+2DpUE5cwNCUHOykLAgGXpRDFyp7/1kV16FTVdM07nPxPE+r25rsUZspZy7w2cGCVXSxPcnz83OXKdof2x/VVMDKTUx93yfyg2eYTts2yYuS6WzmhCVt0LSSRlZDzmIHrW7d+fk533z7FYfdlldfveT7779nMplwf3/v/o6V8NLk9fZZAP8omA3LxM9lZzb9tR/aMCOzad8wW7KBYAiLGNKFhpPN0+NTwXWYNZ2CXhHamdyWHy4YCeuvaRD9A1enISbNnpN9T3v+tpSxIEEbFC1MwPM8gjDA8ySH/YHSLwlDnzRLnapBkkT4gcf9/R0fPrxHiCdcnNYUe0lZ1mx2K8PDVPgGlR8YGe31ZkNu5F3ef/jA23fv3O4UJTFKCjb7HWWhyyJdjkiU6jibT9ltthwOBVk2I01j9vstSRI5IGvfajL0pGsNXUhLxvR9z/FwIM0SQxEL0JLZgdvQQGeYUkoWiwWxMY2ZTqd40nd0n8lk6vo86/WGqqoJw8iJNkpfOskka2bx4cMHt0PPZjO3EK+urui6zmUsaZrx9s07oihiNJrw/t3vpIkm6netMjSmnq+/+pZvv/2WH3/80RDMQ169esVsPuH//Y//h/Vaq62OpxNubm6omprxdEKe5yymE27v78hLTaF6+fIlddvw8PjIZDx+1lPebre8fv2aOI6dZpi9r2xAskHbvqY0bufT6dSJEtqyz/d9ZrMZd3d3zkZw2D+0WZq9761ruA10Nku002MrxJgkiWYf+L5jXtiMcL3WE8XAVDJCCM7Pz0nCyBmvBEHAixcviNKEX9+8o6g1bCVKE1KTxQFObtvSw4IgYDyb0vQdH25vyEYJx/2OFy+vWa1WTj9NKWV6rZ7LvrtOPaPJfbZndjpxtMHCNmmH5eMweA1f7wLJ4HnPJ5fPcVw6UDxJyHwqkJ3+TW/wAX8uQ5NSIg0wIww0K18ongVBew4W4zV8n+E1DMtMG4RcmQmUZYUME5IkIAwj40NZ03U7iiIgjkPSRIsU6t0T9vsdy9UDt/d3CBFQlCVN3YEwpGBP0PcdbdeAgCgOQUFVl+y2e4qyII5igkj7IfZdo6W7Q4+ub+jqhratDDFa+0rWdcl+qx23pQdxEiKFpO5bNDq+w/MknhfjeZqTWZS58y4VAsbjEUmSGrUMvcFlWUqSpPi+Dhrue41906CXPD4+uund4XBACM/t3EVR0Cnd2LYYsCzLnPWcXdxXV1duExFCy/FMp1OqUutf6XPJHKTClk9W4aIsS632apRv0zSlrErWq44kyXh8fOTxUQOLrf6clQXSyhnSlbdWbmq329F2HaEfOPHDtm3Z7XY8Pj46mMloNHL6+DaQjEYj2q6jqDSvsh1UPZbgbTfeu7s7pw03LBlt5vby5Us2mw3r9drpoNn70/Yhh4Bw+77WHNpWXXb4ov0keiIzRJjNZuz3eza17kP+wz/8A4vFgvuHB3788Ueatud8lDlO6/39vWsBpGnK999/z88//0zbtq6ktkF3u99D3xFHiVPuGI1GHI9HHh4eHHvmqS31tFY/m5nZcuo0QzvtRZ1OJk9Lx9PAcpp/ZHnEAAAboUlEQVSxnfamhkFu+NovBbNPQSuGX5bneS6YOTJ5/3yqahedPwhwp+dt/z3kow4zw171eFIyGo+YjCd0nZ4edX3L5eWCS9MLyEYpUaRluD980JOtNE0Zz6bsdjnHsqBtWhDQ1K3ur6kemUu8QCJ6yW67I8+PCCmZzmcIBIfjnqquCAbSwsosmND3iaMQ1fXQd3Rdy363Q6BL2DSO6fqW0E8RoqVpKvb77gkKIZSb0GqzC737d532niwLjaNaLM4pipLtdmd2bB28m7pltdq48kcbpnh0rUKI3oznFcfiQFmXpuzV0kl2J3/58iXn5+eOmmN7O4CbKgaBdv+xpdhoNHJSz0opB8Gw32OSJK6cE0JvELPZjIflA0poba25cWa3w6LISEnbBndRVVRNQ900dG2Hl0g39bRZ4+PjI69fv+bx8RHAAX5tkJ7NZjpr2Wv572IAubD8RXufzmYz5x1qhxuA29gtsHhqtNGeTG30kMTeszbLscYoFs83GlknsNr9TgiBZ9y6lsulVr4900DhN2/ecHt7S2SMfNf7HW2vPTSVUPihR72t2OzWiLXgh5/+k/V6zfX1tYaENBXfff0XPXS5u2c2OefXX98YX80xHz7cMJ/P+dvf/uZwcMPK7rPBzB6f6n0NA8xppvW5552+57B8O31c/w5Ofv3Z9zwF2vZ97xr99vf2S7PBzJ2nOpUuegqqw2s8zSCHSrR2Z7aP+1ISxU9EZB3wIAwiPC+k762ApaCuW0Bwfq4DQFnq6VvddnRtTWOkW9qu0dzBtkbLNCd0XUNVl/SqY5SlTKeaFPz4eGQyyVhcLBhlI4eSl0IwGqWMMy3HHPgxmGsJTdmSpglFXtC0OlPrm4a6ad3ub7Xcy7LC2u5tt08mJUIIylLTljTS22C/RhPiOOH+/oHD4ch4POb8/JzjMTdO3yFlUTKZTAy96Mj+uMfzpdutbcC8ubnh8vLymXKDlZSJoojb21sW8wUPDw+u/LJDApsNWZqT7/umV1c4Xa3JdELfKw77g8uglFJcX19r2pR5rVXSsA1zm0EqoO17J7hoMWFxHHN9fc3Z2Zkjig/bMfbebcwE0yqK2PJxqLtn4Rj2nhvK/MATmT+KIr7++mum06mDgPi+tsmzLR+7hmwws4M42/Oz5wFPE8ssy0hjjffbbDau36czyxYlNIj6WBzMRNued2My44btds/V1QXffPMN2+0aITzm8xlN86TAczwW+H5IkmQEQeRsDy0x365t339CRXySzvSpqeQwOJwGomFDfAi5+FQjf/i7TzXZ4WNYxacOG0hOBwgOrSaeICC2+Y+ZEOoyU4H3pAAw7Jl9KtAOr9X2C4dfvud5eL5H38N+d+Aoj4ZUfqali5uSn376kSSJePXqFReX53ieYL22AoQ60Hd1pctJIAx9o7ulVXirqjSu4wGzmSZIa5ybtjXTf8sjTULi0OPYljRVTts01OWR3VprXSVRTBLHSBRSeHRtyXZTsD/sXSkWhiGTyZPzulKKpqlZr1f0vXI9tiRJnFjfw8Mjy8c1bavBtrPpHM8L2O0ObDZb8z1pPFtV1gR+5NRMyrKiKCrqpsEPtNDl7e0tNzc3xiBZ8w1/+OEH3Sg2+vS3t7duZP/jjz+SxLrpbIG+da0HVnd394Yg7TEeT5BSMh5PaBqt0qElm3yiwGe5fCAMQyetZCEPFrtlN6shtcr2p/quY5Jl7Pd7h8K/u7vj66+/5rfffnODEOtqL4RwpZ8F9do1YR2XrGChDTJ2AKSUckHbyozbwYbNPFerlZMysuXZUJLH3s9WFttCYawSiM3K7L1uS0V7zkII50BVVRW744HR2diBZcuqZbPest6sUD2Mxlpo4J//9k/4XsDd/e+kScbD4z3LxxWT8ZjVcsNfv/vfefv2Lbc39/zrv/4r9w+3/Lf/9n/x1VcvTWZtrBC/FMzsB/apTMsGidNsZfj8odKE/e8pFMMGomHAfJ4JfT6Y2Q/2UyWlUspxK4dBTilFrykALrOShhg/RJHDk3AdPA/c9v3sLmb7CnbnklLiG2uzJIhJjMP48ahdgKQHvh8AnpNGWa1W3N3d0hsxwOl0StU2eFIShr7LOGy/xAYae57237Zv1/UtTV2gupq2BU/2ZFlI3/movqdpagQdQQBhKJBCS0UHgTQ+ii1BgLnxI+I4RIgneRg7frcwkq5VdK1iPErdIEBPo3WAWq1WBgZRDLxGey3eaJr+VqlVOzn5NHVNL3qiSAdLu8ABN2yyk0XLILCfjVah3XJ2tnC2bFZg0H5/diK63W6dQ/loNOLy8pLlakmSJlR19UzD3srtWIT+bDZzCH67Zqzk+uFw4HJxznw+Zz6fs9lsHCxjt9s5WpHtg9nJrZUdr+uawDTGbTYipXQNfHv+NgOzen0WXmIzJRvIlFIO3DvEkJ3iPO2mPEwQ7KZtg2nXaR33zWZDFGqmxOFw4O7uzvUtX768pmgriurI/nCkaWoQMJmMUEozfPK8oSiOVFVDWeYsFgvKMmezXfPVq1dk0RSBz/FY0nUNj49LNuud2zh077GhV8+1FL8om/2pADZ8/LT/dfq4DWTDYDYMevaDex44wKJ6T7O4YZ9tiHEbBsNhNjl8rpX2cI/zcSnd91rD/Eul5rC0HZ63hqZ0dHTISFu31XVtmqctZ2dTo3sfGjHD35FS8tVXr5yS583dDVEUMV+c4fme63dYEKUfaKNa2xzX2cXY6VLtdlvGoxm+hL7viEKfxXxGEsX4ZjAjECRx7DJYK0VtG9LHPKftemqzOAWSONbpvl68+lr3u4Ph2mkPAaUEUZQwm00QAu7v742E0MhkchF13bBcrnSZVdVI6ZmyFfb7I4vFnLqp2Ow3jEaZkws6HA7s93tevHjB9fU1x+OR9+/fO06jnbbNZjOqqtaqE1LLQ8/nc+q6dngrm1lYqzlrEyeEoKkb6kbDLML4SRDR8i6H0upPXE6P9XpNWZZGUFDb2s3nc5f5TKfabf3ly5f0fc/t7e0zYUObXdn7yLJQ7CZlv297/9my2CYe9u/kea69GUzAtfet9VuwGZ0NUHZaPBQltRu0nWTa7LDrOjwhiOIYT2gjmr2xg7OYMys1lJ5lIBVIhR9q74jRKKWuWw6HHf/HP/6ftG0NUvC/ffdnLi6uuL+/RfqCumn49tUF//e//XfyY8l0Nubt27eMRhn/+I//SNvWZjOpqJuKrvvCNPPZoTfgj3tcSj1rbEkhYBA47JcyROafZjjDiPq0C5ihgfszz6eTNkCd9umG7/3s9AfnQ6/ltqUNvOr581wmehIcT/tyQ4Dg8Fr7vtdyJL7gfv2IWiqSOGY8NmTvvuP9zXvNdxyNmM2mSE+w3eoeSpJkvHrxChn47IsDnhBk4zFR4NP2nZ5INjVVUaJQTMdjkjQl8HwORU5dVkgliIKYsipYLTf0XeME8bIsI/B9zmYzkijieHwOyrTN7TQv6DpF3ZgMVnp0ndasu7/XvaA0yfC8gNEowfdDmrZju10ipeR4zPF9z3kvhOGOum4QQjonHrCDJGkWa21wWEemsykdPb3qHPDz4uLCqYoKoeWi66piko1YPTy5Ch3FHnrltLWs2/zFxQVlWTqxS2sVZ8UXoygyLuoRVVUyGWscYJpmCAkjYw7dK8XPf/879/f3juDseR6r9YqqLLi+vtbNdimZTTRxPAgCLi4vabqWMI5YLh+ZzPR5eZ5HWVcgBUhBj5aFb9oO6Xn0CNq+51jkzisgCD3y4vAEEfJCty6Ga2Q+nwPap7VuGqpGf8ZBFGqminySxvaEBtX2bUtrsjff85FKggepUfS19/92vXmmlCylpO20n0DZlMybcyazMdPxzH3fWZpyvkiQ8gV//vOf+bd/+zeK/MDZbAG9QiipFWHajre/vdElblMipdavU8YS0Pd9Q34vqeqSZiDQ+fE0c6BJphvlepd3j9vF3VsxRRMQLIBU6jgX+Jo8reNAT989TR8sbUgKENIGDEXf9TorUwOBRhdUBEJIGtNY75rexFTt1tT3TzvL8HCvN//2g8A9R0ofTdTWhO0w9IlC/1lz/3SSa18rxBNM46kk7zlWDUJK/NCn9WqOLZR5aXp2PbPJBD8INJpcaSHAIJgQhxon9LBaghSEUUgSpsznU5arB+42a6RQlMURz5f4XkDXNPhIRKcIvYDxfEzboalM+840n+eUpeBxeU9dlsxmE9IkpjUllO8nrDcHIzE9p216uk5peIWnpXcUHXlxIPAjxmPtiXgwWDcLfMyrnNVyjS9DLs4vaJqW46EiFxVtp6ejesOS1HVDVTdIoeEbVVXTdYrd/kBeFyC1ekjg+/hBwDHPnWUZSuEhWMzmHA8HPZVGcNztqcsKhWC3O3DY7hiPx7y4vNJBB0FTVohQIaIY0SvqouTud01knkwm3N/csT/suby+pK5aJDWer8vxwAvo+o6ZUWDt25ab+xvTtmhI0pi///ITi/mCF5cveHjUE1cN2I159/sH9scCL4gIooROCXb7A7u8QAUBvWnOb485RdEyP5tSlDXb/QEpFGeLOU1Tsdlt9GBhnCERtG1H39Yc91uapiONY/oO9tsDcZrQ9/DwuNR6gdLDDyO2xw3CKwmCCF/45MbXtMr1dDj0I+hACAm9osyfvrvSPCeOY/L8wORMT5ujNOZoYDTXi0vCMCYIdDXQtDVd0VDWB4Ig4Pdf3+J3EPQe5S4nXx+1Im3R8/D7HUl4oCy2/PUvf0JKycvrK3755WcmZ1rGqChr3n94r6lSBrD7yWB22gP73OOnPbOPgt0nHvtU9vT86F1QspQmpT4N2fj4sDiw53/P/tcGM4uDE+pjnJx+7GP9siHM41Nl+PCaatXgCT0t6mRP3VbQostcYLfZ6N7C9QutE1/V7Hd79mqngbG98f8sKw6HHR/eQ1nmNG1NliWDTK+nqhrHm6uqmrju2GwObHZH6qanXB+4f9zQNDVRGHA2m3J3t0RKQRT6ZGnprudw2PO43EAvyJIR05lyygz7/Z5OGRbDqEGh+2pVVbE3Y3KlFML4UFog7OGwxwgp4/u6DwiCKAqdzltRVLRt776XIi9oaQgjTSuK0wSJLj+apkEKQd1UHPZ7jocjtSnR4ihCIPCjiMX8nMJgr2xGY/t+lkFhDWUAR9vRpbzPbqNLutnXU/q+o8hzft/teVg9EMcxr//8Jx4eH/BXvimxdKlmByXCCIeWZamJ4p1Wm4jTBOFJdgft9J6XBfvjgU5pqk9Z13QKgihgd8jZ7HfEccR8NiFOApptRZ63NM0W35cszjSDwkMaeSkN95hOzvTnVNfUrRl4AQjt/zo/XzAZT5iNJ7RVzd3tLZvlitAPGE8nVFWDQDIZT7Tbk7D9Yu9ZxSWEztSiOHTDEZQgkAF1UbNb51R1Qd93+L5WGglCj/dv35rvMkSEgqbu6eoOOuhVz++P7w3E5GCUjn/m7u6O9W6rXb58LXu13R8Rhy/omX0uEH1pAQ/LNM97ImL/V/23zwU7pYaMAHHymk8HyacMzh9WwE+9rsG/9Tk/Zx6496d3wew0cNvy+FN/H2D4W/s8ib6BAk+XNy8vrzg7O2M6mSKFoDP0sMBAH6QX0HSNAafWlFXBdDrj4mLBZDohzw8GA9SQ5ytH7Wiall5pvuJsfkHbNcaM4sjl5SvO53O6rkH1nZZdCQPTZNflxnQ60eVgD6NsQhjrvlBVF5SVll2J4piyzFHoSWvb1hTFkbqpTd9J0TQV26pGS6l3QI+QEMchni/o2tYYfpiRe14aGZoYPwzoVAeewvc9kig2GUBOkedEYUhqJHasWKVtglvxwcPh6FDtlvxsy1M7xbOBWCktkWMFDfu+Z3G+QAgchAH0/bTb79hsNszOtG6/Va7QbRQ9WLC0NG2fpyeDD48PVE1D0zZaxFMIN/2sqoq20yWl7Y8pJc0gIUe1Ct/zDNtCmumxriCUemLb0Cn3nvP5nKvLFzRtS17qUtj3ffwwcJVEHMfUje5vqVbfp1mWIc05gXTwlTTNKCsNaZnNdGluyeOWswmasD8ej+ma3lDKSvb7LWWVm8c9I2+krz9JEsIgcYMUe/5SSmazidkINBRHI/59jsc9VVUQpxFVXWjxhsHxRdWM4cTw2aId/O70GD5n+PpPZVanDXYbqJ6VuSiXnenXyE++3u6+Un4GEmJ+XKnY6f7Z6TRUiI/P4XQy+qUjjmJ6nsbogecTJRFZmpLEiZPG3m42ejGgOWphoD0cdYboE0YhkOAHAeNxwmJxDigDNZBuGuz7nlE9aGlaRZwkFGXJcqV7WN9//z3ffPM1vpQ8Pt6TZSkCRRxpw1llMi7f1woXh+2BoshZrddsttqfMgg0B69XirwoabvWNZ3tBE0bAIMyyqJBEBLHEfrr6plMRnS9vnHTNOZsdkYcp+R5iXYc1yYoreoIogA/8BmZ3tdyuXR9KonQpaanG/mV6f0sFguElHS/31BWDWND/ekNmd9uRJbuM+QUW+bBdDrlfLFgv9cT1oeHB6bTCbPZlMl0gvQlx/zIr7/+6rIvwMkS2Yb5YXdECt1kb9qWABz0weLjLPyBQUNeSu0qv93uSJMUz88Iw4CqqinLBt/zuL6+IgwDulZP8uqqdll/HCdk2Yj5fK4z6uPBrWmHt5Qav7daroCeQGpK4mw8QSjY7LaMx1OOxu2q6zszxOoYjVKnx9ab6Xie5yDUkwtX25DvtZWh7/ukXmruEw31UGhtuTTNUL1kuVy6YJqmIzPllc84mBbiYj9Dq0QyZN7AJ4KZcx06ydA+lU19ruk+DGSnzfTTYcCzgDMIml8KGnbqOfyipLTI/aeA9KlDPb3JSRB87sR+GhA/B1M5/a/nBaiu1cMGKQn9iDCI8WRA38Pt7T2BHzC2QntJiu9pr8GmaUmzlE71eL40Gapu1m63Wx6Xj7RtpXmGnT4fba7rs9vt2e5ydtsdv719T1Hm/Mu//Av//M//TBD47LYbXr58QRRFbNYrgsB3OKrDYU9R5Gw2W+5v7siPJUVZass7A6qN45jWTDvt9NCyDCzHDyBNUrI4dQFSCIXnC6azEV3XEgQeo9GI2WxCFMV4vqAstEQQQlIW+oaPI62jf3V5SRxF5MYdfVNvdANbSOIoovV9FCA9jyzVemlBqBfWbrfj/fv3Dixrrek2mw3z+Zz9fs9PP/3kVFujKCLPdSPfqsJqQrOecjZtw2a9QaEIvMDxG5umNoyEUgOkpafPL47xu47EBL6dsV+zgOq8NI5D3ZObWFU15n1CpNQLVWc/LckoNkBeTbuLAg258IVnwL81x+PBmS8f8yMI4dQ4nrK7hOlsShJE9E3L/d0dQRRzeX7B4uKcIIxZDdQ5NCVrjxBaEsoKOYJW+AhCXW5HUURV1gbSkpFlE6SHwawdaTvtM+pgIC0us9Yu7ueEYciHD+8cJ3e9XmN9T5NRRt/37nsBnKQ6fIbONJw62oV8mlmdBh4bAE9Ly9MFf/rYaWamM8MhYPf0fSRSgpJP9Ckd0KyiRfvRuT+7PtMb00a94TM9K8/z6LvmWWAbfh6nmerpNQA0dY0S4okmYgjpFoU9ilNm04zFYqFvzK6nqWuE0vI6o9GItu+pGw2kTTNNfarrypRVHUKUhg6kJ4QaC5bTtVqscT6f8823f+P169duNwv8QBPO+56iyNltSkqDfj8c9kipy7CyqhACslFGGA0mZVISmhG83fDiONajev+pTBolGVmiy4eq1pNX3w8JQo9QaD9OP5AgeoTQ5armgWI8TUOEASQ/Pj7SmYVj1VrKvGCzXpOlKXLwvez3e4MRfPJQsFPmoT1ZkiRuOjrUPdvv98Zpu+Tb1984G7eyLNlsVqw3a+I0plc9Y8NK6NBwkCzTyHRrVBslke47CUFZVUjfwzNTcFvythbw3DTURgFEB67elKId2qo6IEti0iQijiI8Tzj5aTDVk7lHbZCx9K+u60jSVH+P5v62Af3x4RFfSDx0aXl1fsF8sWCz3VDXTzxPi+7vutaZstjhWBBoELYfeE6Pr64aRG8MlVvdtnClaKBbDbvdTtP18FHqufCrXYf271ZVxcXFBQ8PD0bhZU0Y66FTYCbR9vgiNOMUh/WpDGa46J+e9zGf8b9u4PMsqD0v6Yave5pu2gt/TjbvngUyF5jNu9j3tEa+9sY/7Y+dAmYtSPVLwVkBdd3iBQFeoPFXZVmDUoRBQBiEJHGKlD75sSQ/ljSmdxNHEdloRLNc0/YtZaXBjfOFVi1tmpowDBBCT26busP3hVEO0DdZ27X0SjGfzzk/v2C32/Hhw3ukFHhSkB8PSAFVVdK1Nbe3N6zXa/q+4/x84XBdQqGbrJ4wAbSmLHt6pbOsi4snv8a266gbH8/TWVochkxGIwOEBd+XZKOEJIlM0NJltJQKP4BURobQ7mseZ5/iBxFtq8u80gCFq7IiiSKCkUdVlni+jxJaKUQIwf54YLPbAhLfDwljjXhPRhl40mhtFbRG4Xe11d6N49kUpRTHsmC2mOMFPm/evHHYNlvG96onybQ573a/c/2yJEl0kxycHBGCgZR1TmueW1Sly2Z71evyvWnoTioZKQPCIEBIi4LS023tkaBpThbIetgfkIhn66AoCnqlSOInNY68LFwf9/HxkTdv3xDgMZtMSGK9iUohWK/XNE1Ha9oH+rvRvTYrWW3VYoLAc5Ae22PsO4XohZH7LtAYKC0VFQSBgz51fYcUut9YN50R1hQOUqPoaLua0TgligMur85Zrtd0XUMUaRd36Qt69QVohl3Yw3Jx+Lv/Oig9L0tPf06DzOmhnzeUIfr4cd1D0/9/Csx93v8aZJgmoHWt7jX1gwA5vE45CJT2nO2NdkrX+sTJu/8ItChjXehmdDw701I4FvWdFwS+T2a0ogLPR/WKY3kECU2jd77QKMn2PQR+5HpANrBWVeW0wACur684Ho/8j//x3xkbvNTxeOCw34HqicKA2UzTg6TQ2aBuHGuD291mT1VoOIXtryRJgpCSvMjplNL4tiCg73uWq6VzE7cbg6Y4+USRT5LGpGmCduvJjRRSYniHsSmXC5eVN01FK1rSNNM3tSlzulYb9FrStL1+nZnqRbharZCeT5KOKGvtIK7J8Vp+xvb2rEqGleWxhHMLrJ1Op06fTA9IArbbLe8+vNONcl/3uAKzuRwOugSu65pRNn6GnC/LkqYqHefy6f55UpkRPFU4Sukgkiapdh9yGDOlOZGqRSv8Tkgi3cNSZhqsBQIah9S3m7Vtstv1ZzXZrheXTMdjjtYJ3fUSPeh799nEiS5Tj8e9613ZSsxWIG6zN2Ke+v97PN+yZkBLxfcuOJ6uLyEEaappUba3rJSmg1lg8YsXLwgi/bkWdeV4mgDiv2po/3H8cfxx/HH8z3B8Ic344/jj+OP44/if5/gjmP1x/HH8cfwvcfwRzP44/jj+OP6XOP4/11nUN+46XBMAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + } + } + ] + } + ] +} diff --git a/docs/en/tutorials/config.md b/docs/en/tutorials/config.md new file mode 100644 index 0000000..16e43ac --- /dev/null +++ b/docs/en/tutorials/config.md @@ -0,0 +1,417 @@ +# Tutorial 1: Learn about Configs + +MMClassification mainly uses python files as configs. The design of our configuration file system integrates modularity and inheritance, facilitating users to conduct various experiments. All configuration files are placed in the `configs` folder, which mainly contains the primitive configuration folder of `_base_` and many algorithm folders such as `resnet`, `swin_transformer`, `vision_transformer`, etc. + +If you wish to inspect the config file, you may run `python tools/misc/print_config.py /PATH/TO/CONFIG` to see the complete config. + + + +- [Config File and Checkpoint Naming Convention](#config-file-and-checkpoint-naming-convention) +- [Config File Structure](#config-file-structure) +- [Inherit and Modify Config File](#inherit-and-modify-config-file) + - [Use intermediate variables in configs](#use-intermediate-variables-in-configs) + - [Ignore some fields in the base configs](#ignore-some-fields-in-the-base-configs) + - [Use some fields in the base configs](#use-some-fields-in-the-base-configs) +- [Modify config through script arguments](#modify-config-through-script-arguments) +- [Import user-defined modules](#import-user-defined-modules) +- [FAQ](#faq) + + + +## Config File and Checkpoint Naming Convention + +We follow the below convention to name config files. Contributors are advised to follow the same style. The config file names are divided into four parts: algorithm info, module information, training information and data information. Logically, different parts are concatenated by underscores `'_'`, and words in the same part are concatenated by dashes `'-'`. + +``` +{algorithm info}_{module info}_{training info}_{data info}.py +``` + +- `algorithm info`:algorithm information, model name and neural network architecture, such as resnet, etc.; +- `module info`: module information is used to represent some special neck, head and pretrain information; +- `training info`:Training information, some training schedule, including batch size, lr schedule, data augment and the like; +- `data info`:Data information, dataset name, input size and so on, such as imagenet, cifar, etc.; + +### Algorithm information + +The main algorithm name and the corresponding branch architecture information. E.g: + +- `resnet50` +- `mobilenet-v3-large` +- `vit-small-patch32` : `patch32` represents the size of the partition in `ViT` algorithm; +- `seresnext101-32x4d` : `SeResNet101` network structure, `32x4d` means that `groups` and `width_per_group` are 32 and 4 respectively in `Bottleneck`; + +### Module information + +Some special `neck`, `head` and `pretrain` information. In classification tasks, `pretrain` information is the most commonly used: + +- `in21k-pre` : pre-trained on ImageNet21k; +- `in21k-pre-3rd-party` : pre-trained on ImageNet21k and the checkpoint is converted from a third-party repository; + +### Training information + +Training schedule, including training type, `batch size`, `lr schedule`, data augment, special loss functions and so on: + +- format `{gpu x batch_per_gpu}`, such as `8xb32` + +Training type (mainly seen in the transformer network, such as the `ViT` algorithm, which is usually divided into two training type: pre-training and fine-tuning): + +- `ft` : configuration file for fine-tuning +- `pt` : configuration file for pretraining + +Training recipe. Usually, only the part that is different from the original paper will be marked. These methods will be arranged in the order `{pipeline aug}-{train aug}-{loss trick}-{scheduler}-{epochs}`. + +- `coslr-200e` : use cosine scheduler to train 200 epochs +- `autoaug-mixup-lbs-coslr-50e` : use `autoaug`, `mixup`, `label smooth`, `cosine scheduler` to train 50 epochs + +### Data information + +- `in1k` : `ImageNet1k` dataset, default to use the input image size of 224x224; +- `in21k` : `ImageNet21k` dataset, also called `ImageNet22k` dataset, default to use the input image size of 224x224; +- `in1k-384px` : Indicates that the input image size is 384x384; +- `cifar100` + +### Config File Name Example + +``` +repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py +``` + +- `repvgg-D2se`: Algorithm information + - `repvgg`: The main algorithm. + - `D2se`: The architecture. +- `deploy`: Module information, means the backbone is in the deploy state. +- `4xb64-autoaug-lbs-mixup-coslr-200e`: Training information. + - `4xb64`: Use 4 GPUs and the size of batches per GPU is 64. + - `autoaug`: Use `AutoAugment` in training pipeline. + - `lbs`: Use label smoothing loss. + - `mixup`: Use `mixup` training augment method. + - `coslr`: Use cosine learning rate scheduler. + - `200e`: Train the model for 200 epochs. +- `in1k`: Dataset information. The config is for `ImageNet1k` dataset and the input size is `224x224`. + +```{note} +Some configuration files currently do not follow this naming convention, and related files will be updated in the near future. +``` + +### Checkpoint Naming Convention + +The naming of the weight mainly includes the configuration file name, date and hash value. + +``` +{config_name}_{date}-{hash}.pth +``` + +## Config File Structure + +There are four kinds of basic component file in the `configs/_base_` folders, namely: + +- [models](https://github.com/open-mmlab/mmclassification/tree/master/configs/_base_/models) +- [datasets](https://github.com/open-mmlab/mmclassification/tree/master/configs/_base_/datasets) +- [schedules](https://github.com/open-mmlab/mmclassification/tree/master/configs/_base_/schedules) +- [runtime](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/default_runtime.py) + +You can easily build your own training config file by inherit some base config files. And the configs that are composed by components from `_base_` are called _primitive_. + +For easy understanding, we use [ResNet50 primitive config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb32_in1k.py) as a example and comment the meaning of each line. For more detaile, please refer to the API documentation. + +```python +_base_ = [ + '../_base_/models/resnet50.py', # model + '../_base_/datasets/imagenet_bs32.py', # data + '../_base_/schedules/imagenet_bs256.py', # training schedule + '../_base_/default_runtime.py' # runtime setting +] +``` + +The four parts are explained separately below, and the above-mentioned ResNet50 primitive config are also used as an example. + +### model + +The parameter `"model"` is a python dictionary in the configuration file, which mainly includes information such as network structure and loss function: + +- `type` : Classifier name, MMCls supports `ImageClassifier`, refer to [API documentation](https://mmclassification.readthedocs.io/en/latest/api/models.html#classifier). +- `backbone` : Backbone configs, refer to [API documentation](https://mmclassification.readthedocs.io/en/latest/api/models.html#backbones) for available options. +- `neck` :Neck network name, MMCls supports `GlobalAveragePooling`, please refer to [API documentation](https://mmclassification.readthedocs.io/en/latest/api/models.html#necks). +- `head`: Head network name, MMCls supports single-label and multi-label classification head networks, available options refer to [API documentation](https://mmclassification.readthedocs.io/en/latest/api/models.html#heads). + - `loss`: Loss function type, supports `CrossEntropyLoss`, [`LabelSmoothLoss`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/models/resnet50_label_smooth.py) etc., For available options, refer to [API documentation](https://mmclassification.readthedocs.io/en/latest/api/models.html#losses). +- `train_cfg` :Training augment config, MMCls supports [`mixup`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/models/resnet50_mixup.py), [`cutmix`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/models/resnet50_cutmix.py) and other augments. + +```{note} +The 'type' in the configuration file is not a constructed parameter, but a class name. +``` + +```python +model = dict( + type='ImageClassifier', # Classifier name + backbone=dict( + type='ResNet', # Backbones name + depth=50, # depth of backbone, ResNet has options of 18, 34, 50, 101, 152. + num_stages=4, # number of stages,The feature maps generated by these states are used as the input for the subsequent neck and head. + out_indices=(3, ), # The output index of the output feature maps. + frozen_stages=-1, # the stage to be frozen, '-1' means not be forzen + style='pytorch'), # The style of backbone, 'pytorch' means that stride 2 layers are in 3x3 conv, 'caffe' means stride 2 layers are in 1x1 convs. + neck=dict(type='GlobalAveragePooling'), # neck network name + head=dict( + type='LinearClsHead', # linear classification head, + num_classes=1000, # The number of output categories, consistent with the number of categories in the dataset + in_channels=2048, # The number of input channels, consistent with the output channel of the neck + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), # Loss function configuration information + topk=(1, 5), # Evaluation index, Top-k accuracy rate, here is the accuracy rate of top1 and top5 + )) +``` + +### data + +The parameter `"data"` is a python dictionary in the configuration file, which mainly includes information to construct dataloader: + +- `samples_per_gpu` : the BatchSize of each GPU when building the dataloader +- `workers_per_gpu` : the number of threads per GPU when building dataloader +- `train | val | test` : config to construct dataset + - `type`: Dataset name, MMCls supports `ImageNet`, `Cifar` etc., refer to [API documentation](https://mmclassification.readthedocs.io/en/latest/api/datasets.html) + - `data_prefix` : Dataset root directory + - `pipeline` : Data processing pipeline, refer to related tutorial [CUSTOM DATA PIPELINES](https://mmclassification.readthedocs.io/en/latest/tutorials/data_pipeline.html) + +The parameter `evaluation` is also a dictionary, which is the configuration information of `evaluation hook`, mainly including evaluation interval, evaluation index, etc.. + +```python +# dataset settings +dataset_type = 'ImageNet' # dataset name, +img_norm_cfg = dict( # Image normalization config to normalize the input images + mean=[123.675, 116.28, 103.53], # Mean values used to pre-training the pre-trained backbone models + std=[58.395, 57.12, 57.375], # Standard variance used to pre-training the pre-trained backbone models + to_rgb=True) # Whether to invert the color channel, rgb2bgr or bgr2rgb. +# train data pipeline +train_pipeline = [ + dict(type='LoadImageFromFile'), # First pipeline to load images from file path + dict(type='RandomResizedCrop', size=224), # RandomResizedCrop + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), # Randomly flip the picture horizontally with a probability of 0.5 + dict(type='Normalize', **img_norm_cfg), # normalization + dict(type='ImageToTensor', keys=['img']), # convert image from numpy into torch.Tensor + dict(type='ToTensor', keys=['gt_label']), # convert gt_label into torch.Tensor + dict(type='Collect', keys=['img', 'gt_label']) # Pipeline that decides which keys in the data should be passed to the detector +] +# test data pipeline +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) # do not pass gt_label while testing +] +data = dict( + samples_per_gpu=32, # Batch size of a single GPU + workers_per_gpu=2, # Worker to pre-fetch data for each single GPU + train=dict( # Train dataset config + train=dict( # train data config + type=dataset_type, # dataset name + data_prefix='data/imagenet/train', # Dataset root, when ann_file does not exist, the category information is automatically obtained from the root folder + pipeline=train_pipeline), # train data pipeline + val=dict( # val data config + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', # ann_file existes, the category information is obtained from file + pipeline=test_pipeline), + test=dict( # test data config + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict( # The config to build the evaluation hook, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/evaluation/eval_hooks.py#L7 for more details. + interval=1, # Evaluation interval + metric='accuracy') # Metrics used during evaluation +``` + +### training schedule + +Mainly include optimizer settings, `optimizer hook` settings, learning rate schedule and `runner` settings: + +- `optimizer`: optimizer setting , support all optimizers in `pytorch`, refer to related [mmcv](https://mmcv.readthedocs.io/en/latest/_modules/mmcv/runner/optimizer/default_constructor.html#DefaultOptimizerConstructor) documentation. +- `optimizer_config`: `optimizer hook` configuration file, such as setting gradient limit, refer to related [mmcv](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8) code. +- `lr_config`: Learning rate scheduler, supports "CosineAnnealing", "Step", "Cyclic", etc. refer to related [mmcv](https://mmcv.readthedocs.io/en/latest/_modules/mmcv/runner/hooks/lr_updater.html#LrUpdaterHook) documentation for more options. +- `runner`: For `runner`, please refer to `mmcv` for [`runner`](https://mmcv.readthedocs.io/en/latest/understand_mmcv/runner.html) introduction document. + +```python +# he configuration file used to build the optimizer, support all optimizers in PyTorch. +optimizer = dict(type='SGD', # Optimizer type + lr=0.1, # Learning rate of optimizers, see detail usages of the parameters in the documentation of PyTorch + momentum=0.9, # Momentum + weight_decay=0.0001) # Weight decay of SGD +# Config used to build the optimizer hook, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8 for implementation details. +optimizer_config = dict(grad_clip=None) # Most of the methods do not use gradient clip +# Learning rate scheduler config used to register LrUpdater hook +lr_config = dict(policy='step', # The policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9. + step=[30, 60, 90]) # Steps to decay the learning rate +runner = dict(type='EpochBasedRunner', # Type of runner to use (i.e. IterBasedRunner or EpochBasedRunner) + max_epochs=100) # Runner that runs the workflow in total max_epochs. For IterBasedRunner use `max_iters` +``` + +### runtime setting + +This part mainly includes saving the checkpoint strategy, log configuration, training parameters, breakpoint weight path, working directory, etc.. + +```python +# Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation. +checkpoint_config = dict(interval=1) # The save interval is 1 +# config to register logger hook +log_config = dict( + interval=100, # Interval to print the log + hooks=[ + dict(type='TextLoggerHook'), # The Tensorboard logger is also supported + # dict(type='TensorboardLoggerHook') + ]) + +dist_params = dict(backend='nccl') # Parameters to setup distributed training, the port can also be set. +log_level = 'INFO' # The output level of the log. +resume_from = None # Resume checkpoints from a given path, the training will be resumed from the epoch when the checkpoint's is saved. +workflow = [('train', 1)] # Workflow for runner. [('train', 1)] means there is only one workflow and the workflow named 'train' is executed once. +work_dir = 'work_dir' # Directory to save the model checkpoints and logs for the current experiments. +``` + +## Inherit and Modify Config File + +For easy understanding, we recommend contributors to inherit from existing methods. + +For all configs under the same folder, it is recommended to have only **one** _primitive_ config. All other configs should inherit from the _primitive_ config. In this way, the maximum of inheritance level is 3. + +For example, if your config file is based on ResNet with some other modification, you can first inherit the basic ResNet structure, dataset and other training setting by specifying `_base_ ='./resnet50_8xb32_in1k.py'` (The path relative to your config file), and then modify the necessary parameters in the config file. A more specific example, now we want to use almost all configs in `configs/resnet/resnet50_8xb32_in1k.py`, but change the number of training epochs from 100 to 300, modify when to decay the learning rate, and modify the dataset path, you can create a new config file `configs/resnet/resnet50_8xb32-300e_in1k.py` with content as below: + +```python +_base_ = './resnet50_8xb32_in1k.py' + +runner = dict(max_epochs=300) +lr_config = dict(step=[150, 200, 250]) + +data = dict( + train=dict(data_prefix='mydata/imagenet/train'), + val=dict(data_prefix='mydata/imagenet/train', ), + test=dict(data_prefix='mydata/imagenet/train', ) +) +``` + +### Use intermediate variables in configs + +Some intermediate variables are used in the configuration file. The intermediate variables make the configuration file clearer and easier to modify. + +For example, `train_pipeline` / `test_pipeline` is the intermediate variable of the data pipeline. We first need to define `train_pipeline` / `test_pipeline`, and then pass them to `data`. If you want to modify the size of the input image during training and testing, you need to modify the intermediate variables of `train_pipeline` / `test_pipeline`. + +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=384, backend='pillow',), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=384, backend='pillow'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +``` + +### Ignore some fields in the base configs + +Sometimes, you need to set `_delete_=True` to ignore some domain content in the basic configuration file. You can refer to [mmcv](https://mmcv.readthedocs.io/en/latest/understand_mmcv/config.html#inherit-from-base-config-with-ignored-fields) for more instructions. + +The following is an example. If you want to use cosine schedule in the above ResNet50 case, just using inheritance and directly modify it will report `get unexcepected keyword'step'` error, because the `'step'` field of the basic config in `lr_config` domain information is reserved, and you need to add `_delete_ =True` to ignore the content of `lr_config` related fields in the basic configuration file: + +```python +_base_ = '../../configs/resnet/resnet50_8xb32_in1k.py' + +lr_config = dict( + _delete_=True, + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + by_epoch=True, + warmup_iters=5, + warmup_ratio=0.1 +) +``` + +### Use some fields in the base configs + +Sometimes, you may refer to some fields in the `_base_` config, so as to avoid duplication of definitions. You can refer to [mmcv](https://mmcv.readthedocs.io/en/latest/understand_mmcv/config.html#reference-variables-from-base) for some more instructions. + +The following is an example of using auto augment in the training data preprocessing pipeline, refer to [`configs/_base_/datasets/imagenet_bs64_autoaug.py`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/datasets/imagenet_bs64_autoaug.py). When defining `train_pipeline`, just add the definition file name of auto augment to `_base_`, and then use `{{_base_.auto_increasing_policies}}` to reference the variables: + +```python +_base_ = ['./pipelines/auto_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='AutoAugment', policies={{_base_.auto_increasing_policies}}), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [...] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict(..., pipeline=train_pipeline), + val=dict(..., pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') +``` + +## Modify config through script arguments + +When users use the script "tools/train.py" or "tools/test.py" to submit tasks or use some other tools, they can directly modify the content of the configuration file used by specifying the `--cfg-options` parameter. + +- Update config keys of dict chains. + + The config options can be specified following the order of the dict keys in the original config. + For example, `--cfg-options model.backbone.norm_eval=False` changes the all BN modules in model backbones to `train` mode. + +- Update keys inside a list of configs. + + Some config dicts are composed as a list in your config. For example, the training pipeline `data.train.pipeline` is normally a list + e.g. `[dict(type='LoadImageFromFile'), dict(type='TopDownRandomFlip', flip_prob=0.5), ...]`. If you want to change `'flip_prob=0.5'` to `'flip_prob=0.0'` in the pipeline, + you may specify `--cfg-options data.train.pipeline.1.flip_prob=0.0`. + +- Update values of list/tuples. + + If the value to be updated is a list or a tuple. For example, the config file normally sets `workflow=[('train', 1)]`. If you want to + change this key, you may specify `--cfg-options workflow="[(train,1),(val,1)]"`. Note that the quotation mark " is necessary to + support list/tuple data types, and that **NO** white space is allowed inside the quotation marks in the specified value. + +## Import user-defined modules + +```{note} +This part may only be used when using MMClassification as a third party library to build your own project, and beginners can skip it. +``` + +After studying the follow-up tutorials [ADDING NEW DATASET](https://mmclassification.readthedocs.io/en/latest/tutorials/new_dataset.html), [CUSTOM DATA PIPELINES](https://mmclassification.readthedocs.io/en/latest/tutorials/data_pipeline.html), [ADDING NEW MODULES](https://mmclassification.readthedocs.io/en/latest/tutorials/new_modules.html). You may use MMClassification to complete your project and create new classes of datasets, models, data enhancements, etc. in the project. In order to streamline the code, you can use MMClassification as a third-party library, you just need to keep your own extra code and import your own custom module in the configuration files. For examples, you may refer to [OpenMMLab Algorithm Competition Project](https://github.com/zhangrui-wolf/openmmlab-competition-2021) . + +Add the following code to your own configuration files: + +```python +custom_imports = dict( + imports=['your_dataset_class', + 'your_transforme_class', + 'your_model_class', + 'your_module_class'], + allow_failed_imports=False) +``` + +## FAQ + +- None diff --git a/docs/en/tutorials/data_pipeline.md b/docs/en/tutorials/data_pipeline.md new file mode 100644 index 0000000..4b32280 --- /dev/null +++ b/docs/en/tutorials/data_pipeline.md @@ -0,0 +1,150 @@ +# Tutorial 4: Custom Data Pipelines + +## Design of Data pipelines + +Following typical conventions, we use `Dataset` and `DataLoader` for data loading +with multiple workers. Indexing `Dataset` returns a dict of data items corresponding to +the arguments of models forward method. + +The data preparation pipeline and the dataset is decomposed. Usually a dataset +defines how to process the annotations and a data pipeline defines all the steps to prepare a data dict. +A pipeline consists of a sequence of operations. Each operation takes a dict as input and also output a dict for the next transform. + +The operations are categorized into data loading, pre-processing and formatting. + +Here is an pipeline example for ResNet-50 training on ImageNet. + +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=256), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +``` + +For each operation, we list the related dict fields that are added/updated/removed. +At the end of the pipeline, we use `Collect` to only retain the necessary items for forward computation. + +### Data loading + +`LoadImageFromFile` + +- add: img, img_shape, ori_shape + +By default, `LoadImageFromFile` loads images from disk but it may lead to IO bottleneck for efficient small models. +Various backends are supported by mmcv to accelerate this process. For example, if the training machines have setup +[memcached](https://memcached.org/), we can revise the config as follows. + +``` +memcached_root = '/mnt/xxx/memcached_client/' +train_pipeline = [ + dict( + type='LoadImageFromFile', + file_client_args=dict( + backend='memcached', + server_list_cfg=osp.join(memcached_root, 'server_list.conf'), + client_cfg=osp.join(memcached_root, 'client.conf'))), +] +``` + +More supported backends can be found in [mmcv.fileio.FileClient](https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py). + +### Pre-processing + +`Resize` + +- add: scale, scale_idx, pad_shape, scale_factor, keep_ratio +- update: img, img_shape + +`RandomFlip` + +- add: flip, flip_direction +- update: img + +`RandomCrop` + +- update: img, pad_shape + +`Normalize` + +- add: img_norm_cfg +- update: img + +### Formatting + +`ToTensor` + +- update: specified by `keys`. + +`ImageToTensor` + +- update: specified by `keys`. + +`Collect` + +- remove: all other keys except for those specified by `keys` + +For more information about other data transformation classes, please refer to [Data Transformations](../api/transforms.rst) + +## Extend and use custom pipelines + +1. Write a new pipeline in any file, e.g., `my_pipeline.py`, and place it in + the folder `mmcls/datasets/pipelines/`. The pipeline class needs to override + the `__call__` method which takes a dict as input and returns a dict. + + ```python + from mmcls.datasets import PIPELINES + + @PIPELINES.register_module() + class MyTransform(object): + + def __call__(self, results): + # apply transforms on results['img'] + return results + ``` + +2. Import the new class in `mmcls/datasets/pipelines/__init__.py`. + + ```python + ... + from .my_pipeline import MyTransform + + __all__ = [ + ..., 'MyTransform' + ] + ``` + +3. Use it in config files. + + ```python + img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='MyTransform'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) + ] + ``` + +## Pipeline visualization + +After designing data pipelines, you can use the [visualization tools](../tools/visualization.md) to view the performance. diff --git a/docs/en/tutorials/finetune.md b/docs/en/tutorials/finetune.md new file mode 100644 index 0000000..98538fb --- /dev/null +++ b/docs/en/tutorials/finetune.md @@ -0,0 +1,236 @@ +# Tutorial 2: Fine-tune Models + +Classification models pre-trained on the ImageNet dataset have been demonstrated to be effective for other datasets and other downstream tasks. +This tutorial provides instructions for users to use the models provided in the [Model Zoo](../model_zoo.md) for other datasets to obtain better performance. + +There are two steps to fine-tune a model on a new dataset. + +- Add support for the new dataset following [Tutorial 3: Customize Dataset](new_dataset.md). +- Modify the configs as will be discussed in this tutorial. + +Assume we have a ResNet-50 model pre-trained on the ImageNet-2012 dataset and want +to take the fine-tuning on the CIFAR-10 dataset, we need to modify five parts in the +config. + +## Inherit base configs + +At first, create a new config file +`configs/tutorial/resnet50_finetune_cifar.py` to store our configs. Of course, +the path can be customized by yourself. + +To reuse the common parts among different configs, we support inheriting +configs from multiple existing configs. To fine-tune a ResNet-50 model, the new +config needs to inherit `configs/_base_/models/resnet50.py` to build the basic +structure of the model. To use the CIFAR-10 dataset, the new config can also +simply inherit `configs/_base_/datasets/cifar10_bs16.py`. For runtime settings such as +training schedules, the new config needs to inherit +`configs/_base_/default_runtime.py`. + +To inherit all above configs, put the following code at the config file. + +```python +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/cifar10_bs16.py', '../_base_/default_runtime.py' +] +``` + +Besides, you can also choose to write the whole contents rather than use inheritance, +like [`configs/lenet/lenet5_mnist.py`](https://github.com/open-mmlab/mmclassification/blob/master/configs/lenet/lenet5_mnist.py). + +## Modify model + +When fine-tuning a model, usually we want to load the pre-trained backbone +weights and train a new classification head. + +To load the pre-trained backbone, we need to change the initialization config +of the backbone and use `Pretrained` initialization function. Besides, in the +`init_cfg`, we use `prefix='backbone'` to tell the initialization +function to remove the prefix of keys in the checkpoint, for example, it will +change `backbone.conv1` to `conv1`. And here we use an online checkpoint, it +will be downloaded during training, you can also download the model manually +and use a local path. + +And then we need to modify the head according to the class numbers of the new +datasets by just changing `num_classes` in the head. + +```python +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) +``` + +```{tip} +Here we only need to set the part of configs we want to modify, because the +inherited configs will be merged and get the entire configs. +``` + +Sometimes, we want to freeze the first several layers' parameters of the +backbone, that will help the network to keep ability to extract low-level +information learnt from pre-trained model. In MMClassification, you can simply +specify how many layers to freeze by `frozen_stages` argument. For example, to +freeze the first two layers' parameters, just use the following config: + +```python +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) +``` + +```{note} +Not all backbones support the `frozen_stages` argument by now. Please check +[the docs](https://mmclassification.readthedocs.io/en/latest/api/models.html#backbones) +to confirm if your backbone supports it. +``` + +## Modify dataset + +When fine-tuning on a new dataset, usually we need to modify some dataset +configs. Here, we need to modify the pipeline to resize the image from 32 to +224 to fit the input size of the model pre-trained on ImageNet, and some other +configs. + +```python +img_norm_cfg = dict( + mean=[125.307, 122.961, 113.8575], + std=[51.5865, 50.847, 51.255], + to_rgb=False, +) +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']), +] +test_pipeline = [ + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) +``` + +## Modify training schedule + +The fine-tuning hyper parameters vary from the default schedule. It usually +requires smaller learning rate and less training epochs. + +```python +# lr is set for a batch size of 128 +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[15]) +runner = dict(type='EpochBasedRunner', max_epochs=200) +log_config = dict(interval=100) +``` + +## Start Training + +Now, we have finished the fine-tuning config file as following: + +```python +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/cifar10_bs16.py', '../_base_/default_runtime.py' +] + +# Model config +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) + +# Dataset config +img_norm_cfg = dict( + mean=[125.307, 122.961, 113.8575], + std=[51.5865, 50.847, 51.255], + to_rgb=False, +) +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']), +] +test_pipeline = [ + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) + +# Training schedule config +# lr is set for a batch size of 128 +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[15]) +runner = dict(type='EpochBasedRunner', max_epochs=200) +log_config = dict(interval=100) +``` + +Here we use 8 GPUs on your computer to train the model with the following +command: + +```shell +bash tools/dist_train.sh configs/tutorial/resnet50_finetune_cifar.py 8 +``` + +Also, you can use only one GPU to train the model with the following command: + +```shell +python tools/train.py configs/tutorial/resnet50_finetune_cifar.py +``` + +But wait, an important config need to be changed if using one GPU. We need to +change the dataset config as following: + +```python +data = dict( + samples_per_gpu=128, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) +``` + +It's because our training schedule is for a batch size of 128. If using 8 GPUs, +just use `samples_per_gpu=16` config in the base config file, and the total batch +size will be 128. But if using one GPU, you need to change it to 128 manually to +match the training schedule. diff --git a/docs/en/tutorials/new_dataset.md b/docs/en/tutorials/new_dataset.md new file mode 100644 index 0000000..24e6fe9 --- /dev/null +++ b/docs/en/tutorials/new_dataset.md @@ -0,0 +1,239 @@ +# Tutorial 3: Customize Dataset + +We support many common public datasets for image classification task, you can find them in +[this page](https://mmclassification.readthedocs.io/en/latest/api/datasets.html). + +In this section, we demonstrate how to [use your own dataset](#use-your-own-dataset) +and [use dataset wrapper](#use-dataset-wrapper). + +## Use your own dataset + +### Reorganize dataset to existing format + +The simplest way to use your own dataset is to convert it to existing dataset formats. + +For multi-class classification task, we recommend to use the format of +[`CustomDataset`](https://mmclassification.readthedocs.io/en/latest/api/datasets.html#mmcls.datasets.CustomDataset). + +The `CustomDataset` supports two kinds of format: + +1. An annotation file is provided, and each line indicates a sample image. + + The sample images can be organized in any structure, like: + + ``` + train/ + ├── folder_1 + │ ├── xxx.png + │ ├── xxy.png + │ └── ... + ├── 123.png + ├── nsdf3.png + └── ... + ``` + + And an annotation file records all paths of samples and corresponding + category index. The first column is the image path relative to the folder + (in this example, `train`) and the second column is the index of category: + + ``` + folder_1/xxx.png 0 + folder_1/xxy.png 1 + 123.png 1 + nsdf3.png 2 + ... + ``` + + ```{note} + The value of the category indices should fall in range `[0, num_classes - 1]`. + ``` + +2. The sample images are arranged in the special structure: + + ``` + train/ + ├── cat + │ ├── xxx.png + │ ├── xxy.png + │ └── ... + │ └── xxz.png + ├── bird + │ ├── bird1.png + │ ├── bird2.png + │ └── ... + └── dog + ├── 123.png + ├── nsdf3.png + ├── ... + └── asd932_.png + ``` + + In this case, you don't need provide annotation file, and all images in the directory `cat` will be + recognized as samples of `cat`. + +Usually, we will split the whole dataset to three sub datasets: `train`, `val` +and `test` for training, validation and test. And **every** sub dataset should +be organized as one of the above structures. + +For example, the whole dataset is as below (using the first structure): + +``` +mmclassification +└── data + └── my_dataset + ├── meta + │ ├── train.txt + │ ├── val.txt + │ └── test.txt + ├── train + ├── val + └── test +``` + +And in your config file, you can modify the `data` field as below: + +```python +... +dataset_type = 'CustomDataset' +classes = ['cat', 'bird', 'dog'] # The category names of your dataset + +data = dict( + train=dict( + type=dataset_type, + data_prefix='data/my_dataset/train', + ann_file='data/my_dataset/meta/train.txt', + classes=classes, + pipeline=train_pipeline + ), + val=dict( + type=dataset_type, + data_prefix='data/my_dataset/val', + ann_file='data/my_dataset/meta/val.txt', + classes=classes, + pipeline=test_pipeline + ), + test=dict( + type=dataset_type, + data_prefix='data/my_dataset/test', + ann_file='data/my_dataset/meta/test.txt', + classes=classes, + pipeline=test_pipeline + ) +) +... +``` + +### Create a new dataset class + +You can write a new dataset class inherited from `BaseDataset`, and overwrite `load_annotations(self)`, +like [CIFAR10](https://github.com/open-mmlab/mmclassification/blob/master/mmcls/datasets/cifar.py) and +[CustomDataset](https://github.com/open-mmlab/mmclassification/blob/master/mmcls/datasets/custom.py). + +Typically, this function returns a list, where each sample is a dict, containing necessary data information, +e.g., `img` and `gt_label`. + +Assume we are going to implement a `Filelist` dataset, which takes filelists for both training and testing. +The format of annotation list is as follows: + +``` +000001.jpg 0 +000002.jpg 1 +``` + +We can create a new dataset in `mmcls/datasets/filelist.py` to load the data. + +```python +import mmcv +import numpy as np + +from .builder import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module() +class Filelist(BaseDataset): + + def load_annotations(self): + assert isinstance(self.ann_file, str) + + data_infos = [] + with open(self.ann_file) as f: + samples = [x.strip().split(' ') for x in f.readlines()] + for filename, gt_label in samples: + info = {'img_prefix': self.data_prefix} + info['img_info'] = {'filename': filename} + info['gt_label'] = np.array(gt_label, dtype=np.int64) + data_infos.append(info) + return data_infos + +``` + +And add this dataset class in `mmcls/datasets/__init__.py` + +```python +from .base_dataset import BaseDataset +... +from .filelist import Filelist + +__all__ = [ + 'BaseDataset', ... ,'Filelist' +] +``` + +Then in the config, to use `Filelist` you can modify the config as the following + +```python +train = dict( + type='Filelist', + ann_file='image_list.txt', + pipeline=train_pipeline +) +``` + +## Use dataset wrapper + +The dataset wrapper is a kind of class to change the behavior of dataset class, such as repeat the dataset or +re-balance the samples of different categories. + +### Repeat dataset + +We use `RepeatDataset` as wrapper to repeat the dataset. For example, suppose the original dataset is +`Dataset_A`, to repeat it, the config looks like the following + +```python +data = dict( + train = dict( + type='RepeatDataset', + times=N, + dataset=dict( # This is the original config of Dataset_A + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) + ... +) +``` + +### Class balanced dataset + +We use `ClassBalancedDataset` as wrapper to repeat the dataset based on category frequency. The dataset to +repeat needs to implement method `get_cat_ids(idx)` to support `ClassBalancedDataset`. For example, to repeat +`Dataset_A` with `oversample_thr=1e-3`, the config looks like the following + +```python +data = dict( + train = dict( + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( # This is the original config of Dataset_A + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) + ... +) +``` + +You may refer to [API reference](https://mmclassification.readthedocs.io/en/latest/api/datasets.html#mmcls.datasets.ClassBalancedDataset) for details. diff --git a/docs/en/tutorials/new_modules.md b/docs/en/tutorials/new_modules.md new file mode 100644 index 0000000..5ac89de --- /dev/null +++ b/docs/en/tutorials/new_modules.md @@ -0,0 +1,272 @@ +# Tutorial 5: Adding New Modules + +## Develop new components + +We basically categorize model components into 3 types. + +- backbone: usually an feature extraction network, e.g., ResNet, MobileNet. +- neck: the component between backbones and heads, e.g., GlobalAveragePooling. +- head: the component for specific tasks, e.g., classification or regression. + +### Add new backbones + +Here we show how to develop new components with an example of ResNet_CIFAR. +As the input size of CIFAR is 32x32, this backbone replaces the `kernel_size=7, stride=2` to `kernel_size=3, stride=1` and remove the MaxPooling after stem, to avoid forwarding small feature maps to residual blocks. +It inherits from ResNet and only modifies the stem layers. + +1. Create a new file `mmcls/models/backbones/resnet_cifar.py`. + +```python +import torch.nn as nn + +from ..builder import BACKBONES +from .resnet import ResNet + + +@BACKBONES.register_module() +class ResNet_CIFAR(ResNet): + + """ResNet backbone for CIFAR. + + short description of the backbone + + Args: + depth(int): Network depth, from {18, 34, 50, 101, 152}. + ... + """ + + def __init__(self, depth, deep_stem, **kwargs): + # call ResNet init + super(ResNet_CIFAR, self).__init__(depth, deep_stem=deep_stem, **kwargs) + # other specific initialization + assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + # override ResNet method to modify the network structure + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): # should return a tuple + pass # implementation is ignored + + def init_weights(self, pretrained=None): + pass # override ResNet init_weights if necessary + + def train(self, mode=True): + pass # override ResNet train if necessary +``` + +2. Import the module in `mmcls/models/backbones/__init__.py`. + +```python +... +from .resnet_cifar import ResNet_CIFAR + +__all__ = [ + ..., 'ResNet_CIFAR' +] +``` + +3. Use it in your config file. + +```python +model = dict( + ... + backbone=dict( + type='ResNet_CIFAR', + depth=18, + other_arg=xxx), + ... +``` + +### Add new necks + +Here we take `GlobalAveragePooling` as an example. It is a very simple neck without any arguments. +To add a new neck, we mainly implement the `forward` function, which applies some operation on the output from backbone and forward the results to head. + +1. Create a new file in `mmcls/models/necks/gap.py`. + + ```python + import torch.nn as nn + + from ..builder import NECKS + + @NECKS.register_module() + class GlobalAveragePooling(nn.Module): + + def __init__(self): + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, inputs): + # we regard inputs as tensor for simplicity + outs = self.gap(inputs) + outs = outs.view(inputs.size(0), -1) + return outs + ``` + +2. Import the module in `mmcls/models/necks/__init__.py`. + + ```python + ... + from .gap import GlobalAveragePooling + + __all__ = [ + ..., 'GlobalAveragePooling' + ] + ``` + +3. Modify the config file. + + ```python + model = dict( + neck=dict(type='GlobalAveragePooling'), + ) + ``` + +### Add new heads + +Here we show how to develop a new head with the example of `LinearClsHead` as the following. +To implement a new head, basically we need to implement `forward_train`, which takes the feature maps from necks or backbones as input and compute loss based on ground-truth labels. + +1. Create a new file in `mmcls/models/heads/linear_head.py`. + + ```python + from ..builder import HEADS + from .cls_head import ClsHead + + + @HEADS.register_module() + class LinearClsHead(ClsHead): + + def __init__(self, + num_classes, + in_channels, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, )): + super(LinearClsHead, self).__init__(loss=loss, topk=topk) + self.in_channels = in_channels + self.num_classes = num_classes + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self._init_layers() + + def _init_layers(self): + self.fc = nn.Linear(self.in_channels, self.num_classes) + + def init_weights(self): + normal_init(self.fc, mean=0, std=0.01, bias=0) + + def forward_train(self, x, gt_label): + cls_score = self.fc(x) + losses = self.loss(cls_score, gt_label) + return losses + + ``` + +2. Import the module in `mmcls/models/heads/__init__.py`. + + ```python + ... + from .linear_head import LinearClsHead + + __all__ = [ + ..., 'LinearClsHead' + ] + ``` + +3. Modify the config file. + +Together with the added GlobalAveragePooling neck, an entire config for a model is as follows. + +```python +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) + +``` + +### Add new loss + +To add a new loss function, we mainly implement the `forward` function in the loss module. +In addition, it is helpful to leverage the decorator `weighted_loss` to weight the loss for each element. +Assuming that we want to mimic a probabilistic distribution generated from another classification model, we implement a L1Loss to fulfil the purpose as below. + +1. Create a new file in `mmcls/models/losses/l1_loss.py`. + + ```python + import torch + import torch.nn as nn + + from ..builder import LOSSES + from .utils import weighted_loss + + @weighted_loss + def l1_loss(pred, target): + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + + @LOSSES.register_module() + class L1Loss(nn.Module): + + def __init__(self, reduction='mean', loss_weight=1.0): + super(L1Loss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * l1_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss + ``` + +2. Import the module in `mmcls/models/losses/__init__.py`. + + ```python + ... + from .l1_loss import L1Loss, l1_loss + + __all__ = [ + ..., 'L1Loss', 'l1_loss' + ] + ``` + +3. Modify loss field in the config. + + ```python + loss=dict(type='L1Loss', loss_weight=1.0)) + ``` diff --git a/docs/en/tutorials/runtime.md b/docs/en/tutorials/runtime.md new file mode 100644 index 0000000..b212744 --- /dev/null +++ b/docs/en/tutorials/runtime.md @@ -0,0 +1,257 @@ +# Tutorial 7: Customize Runtime Settings + +In this tutorial, we will introduce some methods about how to customize workflow and hooks when running your own settings for the project. + + + +- [Customize Workflow](#customize-workflow) +- [Hooks](#hooks) + - [Default training hooks](#default-training-hooks) + - [Use other implemented hooks](#use-other-implemented-hooks) + - [Customize self-implemented hooks](#customize-self-implemented-hooks) +- [FAQ](#faq) + + + +## Customize Workflow + +Workflow is a list of (phase, duration) to specify the running order and duration. The meaning of "duration" depends on the runner's type. + +For example, we use epoch-based runner by default, and the "duration" means how many epochs the phase to be executed in a cycle. Usually, +we only want to execute training phase, just use the following config. + +```python +workflow = [('train', 1)] +``` + +Sometimes we may want to check some metrics (e.g. loss, accuracy) about the model on the validate set. +In such case, we can set the workflow as + +```python +[('train', 1), ('val', 1)] +``` + +so that 1 epoch for training and 1 epoch for validation will be run iteratively. + +By default, we recommend using **`EvalHook`** to do evaluation after the training epoch, but you can still use `val` workflow as an alternative. + +```{note} +1. The parameters of model will not be updated during the val epoch. +2. Keyword `max_epochs` in the config only controls the number of training epochs and will not affect the validation workflow. +3. Workflows `[('train', 1), ('val', 1)]` and `[('train', 1)]` will not change the behavior of `EvalHook` because `EvalHook` is called by `after_train_epoch` and validation workflow only affect hooks that are called through `after_val_epoch`. + Therefore, the only difference between `[('train', 1), ('val', 1)]` and ``[('train', 1)]`` is that the runner will calculate losses on the validation set after each training epoch. +``` + +## Hooks + +The hook mechanism is widely used in the OpenMMLab open-source algorithm library. Combined with the `Runner`, the entire life cycle of the training process can be managed easily. You can learn more about the hook through [related article](https://www.calltutors.com/blog/what-is-hook/). + +Hooks only work after being registered into the runner. At present, hooks are mainly divided into two categories: + +- default training hooks + +The default training hooks are registered by the runner by default. Generally, they are hooks for some basic functions, and have a certain priority, you don't need to modify the priority. + +- custom hooks + +The custom hooks are registered through `custom_hooks`. Generally, they are hooks with enhanced functions. The priority needs to be specified in the configuration file. If you do not specify the priority of the hook, it will be set to 'NORMAL' by default. + +**Priority list** + +| Level | Value | +| :-------------: | :---: | +| HIGHEST | 0 | +| VERY_HIGH | 10 | +| HIGH | 30 | +| ABOVE_NORMAL | 40 | +| NORMAL(default) | 50 | +| BELOW_NORMAL | 60 | +| LOW | 70 | +| VERY_LOW | 90 | +| LOWEST | 100 | + +The priority determines the execution order of the hooks. Before training, the log will print out the execution order of the hooks at each stage to facilitate debugging. + +### default training hooks + +Some common hooks are not registered through `custom_hooks`, they are + +| Hooks | Priority | +| :-------------------: | :---------------: | +| `LrUpdaterHook` | VERY_HIGH (10) | +| `MomentumUpdaterHook` | HIGH (30) | +| `OptimizerHook` | ABOVE_NORMAL (40) | +| `CheckpointHook` | NORMAL (50) | +| `IterTimerHook` | LOW (70) | +| `EvalHook` | LOW (70) | +| `LoggerHook(s)` | VERY_LOW (90) | + +`OptimizerHook`, `MomentumUpdaterHook` and `LrUpdaterHook` have been introduced in [sehedule strategy](./schedule.md). +`IterTimerHook` is used to record elapsed time and does not support modification. + +Here we reveal how to customize `CheckpointHook`, `LoggerHooks`, and `EvalHook`. + +#### CheckpointHook + +The MMCV runner will use `checkpoint_config` to initialize [`CheckpointHook`](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/hooks/checkpoint.py). + +```python +checkpoint_config = dict(interval=1) +``` + +We could set `max_keep_ckpts` to save only a small number of checkpoints or decide whether to store state dict of optimizer by `save_optimizer`. +More details of the arguments are [here](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.CheckpointHook) + +#### LoggerHooks + +The `log_config` wraps multiple logger hooks and enables to set intervals. Now MMCV supports `TextLoggerHook`, `WandbLoggerHook`, `MlflowLoggerHook`, `NeptuneLoggerHook`, `DvcliveLoggerHook` and `TensorboardLoggerHook`. +The detailed usages can be found in the [doc](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.LoggerHook). + +```python +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +``` + +#### EvalHook + +The config of `evaluation` will be used to initialize the [`EvalHook`](https://github.com/open-mmlab/mmclassification/blob/master/mmcls/core/evaluation/eval_hooks.py). + +The `EvalHook` has some reserved keys, such as `interval`, `save_best` and `start`, and the other arguments such as `metrics` will be passed to the `dataset.evaluate()` + +```python +evaluation = dict(interval=1, metric='accuracy', metric_options={'topk': (1, )}) +``` + +You can save the model weight when the best verification result is obtained by modifying the parameter `save_best`: + +```python +# "auto" means automatically select the metrics to compare. +# You can also use a specific key like "accuracy_top-1". +evaluation = dict(interval=1, save_best="auto", metric='accuracy', metric_options={'topk': (1, )}) +``` + +When running some large experiments, you can skip the validation step at the beginning of training by modifying the parameter `start` as below: + +```python +evaluation = dict(interval=1, start=200, metric='accuracy', metric_options={'topk': (1, )}) +``` + +This indicates that, before the 200th epoch, evaluations would not be executed. Since the 200th epoch, evaluations would be executed after the training process. + +```{note} +In the default configuration files of MMClassification, the evaluation field is generally placed in the datasets configs. +``` + +### Use other implemented hooks + +Some hooks have been already implemented in MMCV and MMClassification, they are: + +- [EMAHook](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/ema.py) +- [SyncBuffersHook](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/sync_buffer.py) +- [EmptyCacheHook](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/memory.py) +- [ProfilerHook](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/profiler.py) +- ...... + +If the hook is already implemented in MMCV, you can directly modify the config to use the hook as below + +```python +mmcv_hooks = [ + dict(type='MMCVHook', a=a_value, b=b_value, priority='NORMAL') +] +``` + +such as using `EMAHook`, interval is 100 iters: + +```python +custom_hooks = [ + dict(type='EMAHook', interval=100, priority='HIGH') +] +``` + +## Customize self-implemented hooks + +### 1. Implement a new hook + +Here we give an example of creating a new hook in MMClassification and using it in training. + +```python +from mmcv.runner import HOOKS, Hook + + +@HOOKS.register_module() +class MyHook(Hook): + + def __init__(self, a, b): + pass + + def before_run(self, runner): + pass + + def after_run(self, runner): + pass + + def before_epoch(self, runner): + pass + + def after_epoch(self, runner): + pass + + def before_iter(self, runner): + pass + + def after_iter(self, runner): + pass +``` + +Depending on the functionality of the hook, the users need to specify what the hook will do at each stage of the training in `before_run`, `after_run`, `before_epoch`, `after_epoch`, `before_iter`, and `after_iter`. + +### 2. Register the new hook + +Then we need to make `MyHook` imported. Assuming the file is in `mmcls/core/utils/my_hook.py` there are two ways to do that: + +- Modify `mmcls/core/utils/__init__.py` to import it. + + The newly defined module should be imported in `mmcls/core/utils/__init__.py` so that the registry will + find the new module and add it: + +```python +from .my_hook import MyHook +``` + +- Use `custom_imports` in the config to manually import it + +```python +custom_imports = dict(imports=['mmcls.core.utils.my_hook'], allow_failed_imports=False) +``` + +### 3. Modify the config + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value) +] +``` + +You can also set the priority of the hook as below: + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value, priority='ABOVE_NORMAL') +] +``` + +By default, the hook's priority is set as `NORMAL` during registration. + +## FAQ + +### 1. `resume_from` and `load_from` and `init_cfg.Pretrained` + +- `load_from` : only imports model weights, which is mainly used to load pre-trained or trained models; + +- `resume_from` : not only import model weights, but also optimizer information, current epoch information, mainly used to continue training from the checkpoint. + +- `init_cfg.Pretrained` : Load weights during weight initialization, and you can specify which module to load. This is usually used when fine-tuning a model, refer to [Tutorial 2: Fine-tune Models](./finetune.md). diff --git a/docs/en/tutorials/schedule.md b/docs/en/tutorials/schedule.md new file mode 100644 index 0000000..1afc4b7 --- /dev/null +++ b/docs/en/tutorials/schedule.md @@ -0,0 +1,341 @@ +# Tutorial 6: Customize Schedule + +In this tutorial, we will introduce some methods about how to construct optimizers, customize learning rate and momentum schedules, parameter-wise finely configuration, gradient clipping, gradient accumulation, and customize self-implemented methods for the project. + + + +- [Customize optimizer supported by PyTorch](#customize-optimizer-supported-by-pytorch) +- [Customize learning rate schedules](#customize-learning-rate-schedules) + - [Learning rate decay](#learning-rate-decay) + - [Warmup strategy](#warmup-strategy) +- [Customize momentum schedules](#customize-momentum-schedules) +- [Parameter-wise finely configuration](#parameter-wise-finely-configuration) +- [Gradient clipping and gradient accumulation](#gradient-clipping-and-gradient-accumulation) + - [Gradient clipping](#gradient-clipping) + - [Gradient accumulation](#gradient-accumulation) +- [Customize self-implemented methods](#customize-self-implemented-methods) + - [Customize self-implemented optimizer](#customize-self-implemented-optimizer) + - [Customize optimizer constructor](#customize-optimizer-constructor) + + + +## Customize optimizer supported by PyTorch + +We already support to use all the optimizers implemented by PyTorch, and to use and modify them, please change the `optimizer` field of config files. + +For example, if you want to use `SGD`, the modification could be as the following. + +```python +optimizer = dict(type='SGD', lr=0.0003, weight_decay=0.0001) +``` + +To modify the learning rate of the model, just modify the `lr` in the config of optimizer. +You can also directly set other arguments according to the [API doc](https://pytorch.org/docs/stable/optim.html?highlight=optim#module-torch.optim) of PyTorch. + +For example, if you want to use `Adam` with the setting like `torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)` in PyTorch, +the config should looks like. + +```python +optimizer = dict(type='Adam', lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) +``` + +## Customize learning rate schedules + +### Learning rate decay + +Learning rate decay is widely used to improve performance. And to use learning rate decay, please set the `lr_confg` field in config files. + +For example, we use step policy as the default learning rate decay policy of ResNet, and the config is: + +```python +lr_config = dict(policy='step', step=[100, 150]) +``` + +Then during training, the program will call [`StepLRHook`](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L153) periodically to update the learning rate. + +We also support many other learning rate schedules [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py), such as `CosineAnnealing` and `Poly` schedule. Here are some examples + +- ConsineAnnealing schedule: + + ```python + lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=1000, + warmup_ratio=1.0 / 10, + min_lr_ratio=1e-5) + ``` + +- Poly schedule: + + ```python + lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) + ``` + +### Warmup strategy + +In the early stage, training is easy to be volatile, and warmup is a technique +to reduce volatility. With warmup, the learning rate will increase gradually +from a minor value to the expected value. + +In MMClassification, we use `lr_config` to configure the warmup strategy, the main parameters are as follows: + +- `warmup`: The warmup curve type. Please choose one from 'constant', 'linear', 'exp' and `None`, and `None` means disable warmup. +- `warmup_by_epoch` : if warmup by epoch or not, default to be True, if set to be False, warmup by iter. +- `warmup_iters` : the number of warm-up iterations, when `warmup_by_epoch=True`, the unit is epoch; when `warmup_by_epoch=False`, the unit is the number of iterations (iter). +- `warmup_ratio` : warm-up initial learning rate will calculate as `lr = lr * warmup_ratio`。 + +Here are some examples + +1. linear & warmup by iter + + ```python + lr_config = dict( + policy='CosineAnnealing', + by_epoch=False, + min_lr_ratio=1e-2, + warmup='linear', + warmup_ratio=1e-3, + warmup_iters=20 * 1252, + warmup_by_epoch=False) + ``` + +2. exp & warmup by epoch + + ```python + lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='exp', + warmup_iters=5, + warmup_ratio=0.1, + warmup_by_epoch=True) + ``` + +```{tip} +After completing your configuration file,you could use [learning rate visualization tool](https://mmclassification.readthedocs.io/en/latest/tools/visualization.html#learning-rate-schedule-visualization) to draw the corresponding learning rate adjustment curve. +``` + +## Customize momentum schedules + +We support the momentum scheduler to modify the model's momentum according to learning rate, which could make the model converge in a faster way. + +Momentum scheduler is usually used with LR scheduler, for example, the following config is used to accelerate convergence. +For more details, please refer to the implementation of [CyclicLrUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L327) +and [CyclicMomentumUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/momentum_updater.py#L130). + +Here is an example + +```python +lr_config = dict( + policy='cyclic', + target_ratio=(10, 1e-4), + cyclic_times=1, + step_ratio_up=0.4, +) +momentum_config = dict( + policy='cyclic', + target_ratio=(0.85 / 0.95, 1), + cyclic_times=1, + step_ratio_up=0.4, +) +``` + +## Parameter-wise finely configuration + +Some models may have some parameter-specific settings for optimization, for example, no weight decay to the BatchNorm layer or using different learning rates for different network layers. +To finely configuration them, we can use the `paramwise_cfg` option in `optimizer`. + +We provide some examples here and more usages refer to [DefaultOptimizerConstructor](https://mmcv.readthedocs.io/en/latest/_modules/mmcv/runner/optimizer/default_constructor.html#DefaultOptimizerConstructor). + +- Using specified options + + The `DefaultOptimizerConstructor` provides options including `bias_lr_mult`, `bias_decay_mult`, `norm_decay_mult`, `dwconv_decay_mult`, `dcn_offset_lr_mult` and `bypass_duplicate` to configure special optimizer behaviors of bias, normalization, depth-wise convolution, deformable convolution and duplicated parameter. E.g: + + 1. No weight decay to the BatchNorm layer + + ```python + optimizer = dict( + type='SGD', + lr=0.8, + weight_decay=1e-4, + paramwise_cfg=dict(norm_decay_mult=0.)) + ``` + +- Using `custom_keys` dict + + MMClassification can use `custom_keys` to specify different parameters to use different learning rates or weight decays, for example: + + 1. No weight decay for specific parameters + + ```python + paramwise_cfg = dict( + custom_keys={ + 'backbone.cls_token': dict(decay_mult=0.0), + 'backbone.pos_embed': dict(decay_mult=0.0) + }) + + optimizer = dict( + type='SGD', + lr=0.8, + weight_decay=1e-4, + paramwise_cfg=paramwise_cfg) + ``` + + 2. Using a smaller learning rate and a weight decay for the backbone layers + + ```python + optimizer = dict( + type='SGD', + lr=0.8, + weight_decay=1e-4, + # 'lr' for backbone and 'weight_decay' are 0.1 * lr and 0.9 * weight_decay + paramwise_cfg=dict( + custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=0.9)})) + ``` + +## Gradient clipping and gradient accumulation + +Besides the basic function of PyTorch optimizers, we also provide some enhancement functions, such as gradient clipping, gradient accumulation, etc., refer to [MMCV](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py). + +### Gradient clipping + +During the training process, the loss function may get close to a cliffy region and cause gradient explosion. And gradient clipping is helpful to stabilize the training process. More introduction can be found in [this page](https://paperswithcode.com/method/gradient-clipping). + +Currently we support `grad_clip` option in `optimizer_config`, and the arguments refer to [PyTorch Documentation](https://pytorch.org/docs/stable/generated/torch.nn.utils.clip_grad_norm_.html). + +Here is an example: + +```python +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# norm_type: type of the used p-norm, here norm_type is 2. +``` + +When inheriting from base and modifying configs, if `grad_clip=None` in base, `_delete_=True` is needed. For more details about `_delete_` you can refer to [TUTORIAL 1: LEARN ABOUT CONFIGS](https://mmclassification.readthedocs.io/en/latest/tutorials/config.html#ignore-some-fields-in-the-base-configs). For example, + +```python +_base_ = [./_base_/schedules/imagenet_bs256_coslr.py] + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2), _delete_=True, type='OptimizerHook') +# you can ignore type if type is 'OptimizerHook', otherwise you must add "type='xxxxxOptimizerHook'" here +``` + +### Gradient accumulation + +When computing resources are lacking, the batch size can only be set to a small value, which may affect the performance of models. Gradient accumulation can be used to solve this problem. + +Here is an example: + +```python +data = dict(samples_per_gpu=64) +optimizer_config = dict(type="GradientCumulativeOptimizerHook", cumulative_iters=4) +``` + +Indicates that during training, back-propagation is performed every 4 iters. And the above is equivalent to: + +```python +data = dict(samples_per_gpu=256) +optimizer_config = dict(type="OptimizerHook") +``` + +```{note} +When the optimizer hook type is not specified in `optimizer_config`, `OptimizerHook` is used by default. +``` + +## Customize self-implemented methods + +In academic research and industrial practice, it may be necessary to use optimization methods not implemented by MMClassification, and you can add them through the following methods. + +```{note} +This part will modify the MMClassification source code or add code to the MMClassification framework, beginners can skip it. +``` + +### Customize self-implemented optimizer + +#### 1. Define a new optimizer + +A customized optimizer could be defined as below. + +Assume you want to add an optimizer named `MyOptimizer`, which has arguments `a`, `b`, and `c`. +You need to create a new directory named `mmcls/core/optimizer`. +And then implement the new optimizer in a file, e.g., in `mmcls/core/optimizer/my_optimizer.py`: + +```python +from mmcv.runner import OPTIMIZERS +from torch.optim import Optimizer + + +@OPTIMIZERS.register_module() +class MyOptimizer(Optimizer): + + def __init__(self, a, b, c): + +``` + +#### 2. Add the optimizer to registry + +To find the above module defined above, this module should be imported into the main namespace at first. There are two ways to achieve it. + +- Modify `mmcls/core/optimizer/__init__.py` to import it into `optimizer` package, and then modify `mmcls/core/__init__.py` to import the new `optimizer` package. + + Create the `mmcls/core/optimizer` folder and the `mmcls/core/optimizer/__init__.py` file if they don't exist. The newly defined module should be imported in `mmcls/core/optimizer/__init__.py` and `mmcls/core/__init__.py` so that the registry will find the new module and add it: + +```python +# In mmcls/core/optimizer/__init__.py +from .my_optimizer import MyOptimizer # MyOptimizer maybe other class name + +__all__ = ['MyOptimizer'] +``` + +```python +# In mmcls/core/__init__.py +... +from .optimizer import * # noqa: F401, F403 +``` + +- Use `custom_imports` in the config to manually import it + +```python +custom_imports = dict(imports=['mmcls.core.optimizer.my_optimizer'], allow_failed_imports=False) +``` + +The module `mmcls.core.optimizer.my_optimizer` will be imported at the beginning of the program and the class `MyOptimizer` is then automatically registered. +Note that only the package containing the class `MyOptimizer` should be imported. `mmcls.core.optimizer.my_optimizer.MyOptimizer` **cannot** be imported directly. + +#### 3. Specify the optimizer in the config file + +Then you can use `MyOptimizer` in `optimizer` field of config files. +In the configs, the optimizers are defined by the field `optimizer` like the following: + +```python +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +``` + +To use your own optimizer, the field can be changed to + +```python +optimizer = dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value) +``` + +### Customize optimizer constructor + +Some models may have some parameter-specific settings for optimization, e.g. weight decay for BatchNorm layers. + +Although our `DefaultOptimizerConstructor` is powerful, it may still not cover your need. If that, you can do those fine-grained parameter tuning through customizing optimizer constructor. + +```python +from mmcv.runner.optimizer import OPTIMIZER_BUILDERS + + +@OPTIMIZER_BUILDERS.register_module() +class MyOptimizerConstructor: + + def __init__(self, optimizer_cfg, paramwise_cfg=None): + pass + + def __call__(self, model): + ... # Construct your optimzier here. + return my_optimizer +``` + +The default optimizer constructor is implemented [here](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/optimizer/default_constructor.py#L11), which could also serve as a template for new optimizer constructor. diff --git a/docs/zh_CN/Makefile b/docs/zh_CN/Makefile new file mode 100644 index 0000000..d4bb2cb --- /dev/null +++ b/docs/zh_CN/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/zh_CN/_static/css/readthedocs.css b/docs/zh_CN/_static/css/readthedocs.css new file mode 100644 index 0000000..577a67a --- /dev/null +++ b/docs/zh_CN/_static/css/readthedocs.css @@ -0,0 +1,27 @@ +.header-logo { + background-image: url("../image/mmcls-logo.png"); + background-size: 204px 40px; + height: 40px; + width: 204px; +} + +pre { + white-space: pre; +} + +article.pytorch-article section code { + padding: .2em .4em; + background-color: #f3f4f7; + border-radius: 5px; +} + +/* Disable the change in tables */ +article.pytorch-article section table code { + padding: unset; + background-color: unset; + border-radius: unset; +} + +table.autosummary td { + width: 50% +} diff --git a/docs/zh_CN/_static/image/mmcls-logo.png b/docs/zh_CN/_static/image/mmcls-logo.png new file mode 100644 index 0000000..6e65420 Binary files /dev/null and b/docs/zh_CN/_static/image/mmcls-logo.png differ diff --git a/docs/zh_CN/_static/image/tools/analysis/analyze_log.jpg b/docs/zh_CN/_static/image/tools/analysis/analyze_log.jpg new file mode 100644 index 0000000..8eb1a27 Binary files /dev/null and b/docs/zh_CN/_static/image/tools/analysis/analyze_log.jpg differ diff --git a/docs/zh_CN/_static/image/tools/visualization/lr_schedule1.png b/docs/zh_CN/_static/image/tools/visualization/lr_schedule1.png new file mode 100644 index 0000000..31fca35 Binary files /dev/null and b/docs/zh_CN/_static/image/tools/visualization/lr_schedule1.png differ diff --git a/docs/zh_CN/_static/image/tools/visualization/lr_schedule2.png b/docs/zh_CN/_static/image/tools/visualization/lr_schedule2.png new file mode 100644 index 0000000..8c6231d Binary files /dev/null and b/docs/zh_CN/_static/image/tools/visualization/lr_schedule2.png differ diff --git a/docs/zh_CN/_static/js/custom.js b/docs/zh_CN/_static/js/custom.js new file mode 100644 index 0000000..44a4057 --- /dev/null +++ b/docs/zh_CN/_static/js/custom.js @@ -0,0 +1 @@ +var collapsedSections = ['Model zoo']; diff --git a/docs/zh_CN/api b/docs/zh_CN/api new file mode 120000 index 0000000..0ef434a --- /dev/null +++ b/docs/zh_CN/api @@ -0,0 +1 @@ +../en/api \ No newline at end of file diff --git a/docs/zh_CN/changelog.md b/docs/zh_CN/changelog.md new file mode 120000 index 0000000..6b731cd --- /dev/null +++ b/docs/zh_CN/changelog.md @@ -0,0 +1 @@ +../en/changelog.md \ No newline at end of file diff --git a/docs/zh_CN/community/CONTRIBUTING.md b/docs/zh_CN/community/CONTRIBUTING.md new file mode 100644 index 0000000..5554800 --- /dev/null +++ b/docs/zh_CN/community/CONTRIBUTING.md @@ -0,0 +1,62 @@ +# 参与贡献 OpenMMLab + +欢迎任何类型的贡献,包括但不限于 + +- 修改拼写错误或代码错误 +- 添加文档或将文档翻译成其他语言 +- 添加新功能和新组件 + +## 工作流程 + +1. fork 并 pull 最新的 OpenMMLab 仓库 (MMClassification) +2. 签出到一个新分支(不要使用 master 分支提交 PR) +3. 进行修改并提交至 fork 出的自己的远程仓库 +4. 在我们的仓库中创建一个 PR + +```{note} +如果你计划添加一些新的功能,并引入大量改动,请尽量首先创建一个 issue 来进行讨论。 +``` + +## 代码风格 + +### Python + +我们采用 [PEP8](https://www.python.org/dev/peps/pep-0008/) 作为统一的代码风格。 + +我们使用下列工具来进行代码风格检查与格式化: + +- [flake8](https://github.com/PyCQA/flake8): Python 官方发布的代码规范检查工具,是多个检查工具的封装 +- [isort](https://github.com/timothycrosley/isort): 自动调整模块导入顺序的工具 +- [yapf](https://github.com/google/yapf): 一个 Python 文件的格式化工具。 +- [codespell](https://github.com/codespell-project/codespell): 检查单词拼写是否有误 +- [mdformat](https://github.com/executablebooks/mdformat): 检查 markdown 文件的工具 +- [docformatter](https://github.com/myint/docformatter): 一个 docstring 格式化工具。 + +yapf 和 isort 的格式设置位于 [setup.cfg](https://github.com/open-mmlab/mmclassification/blob/master/setup.cfg) + +我们使用 [pre-commit hook](https://pre-commit.com/) 来保证每次提交时自动进行代 +码检查和格式化,启用的功能包括 `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`, 修复 `end-of-files`, `double-quoted-strings`, +`python-encoding-pragma`, `mixed-line-ending`, 对 `requirments.txt`的排序等。 +pre-commit hook 的配置文件位于 [.pre-commit-config](https://github.com/open-mmlab/mmclassification/blob/master/.pre-commit-config.yaml) + +在你克隆仓库后,你需要按照如下步骤安装并初始化 pre-commit hook。 + +```shell +pip install -U pre-commit +``` + +在仓库文件夹中执行 + +```shell +pre-commit install +``` + +在此之后,每次提交,代码规范检查和格式化工具都将被强制执行。 + +```{important} +在创建 PR 之前,请确保你的代码完成了代码规范检查,并经过了 yapf 的格式化。 +``` + +### C++ 和 CUDA + +C++ 和 CUDA 的代码规范遵从 [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html) diff --git a/docs/zh_CN/compatibility.md b/docs/zh_CN/compatibility.md new file mode 100644 index 0000000..178e555 --- /dev/null +++ b/docs/zh_CN/compatibility.md @@ -0,0 +1,7 @@ +# 0.x 相关兼容性问题 + +## MMClassification 0.20.1 + +### MMCV 兼容性 + +在 Twins 骨干网络中,我们使用了 MMCV 提供的 `PatchEmbed` 模块,该模块是在 MMCV 1.4.2 版本加入的,因此我们需要将 MMCV 依赖版本升至 1.4.2。 diff --git a/docs/zh_CN/conf.py b/docs/zh_CN/conf.py new file mode 100644 index 0000000..31d94e5 --- /dev/null +++ b/docs/zh_CN/conf.py @@ -0,0 +1,241 @@ +# flake8: noqa +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys + +import pytorch_sphinx_theme +from sphinx.builders.html import StandaloneHTMLBuilder + +sys.path.insert(0, os.path.abspath('../..')) + +# -- Project information ----------------------------------------------------- + +project = 'MMClassification' +copyright = '2020, OpenMMLab' +author = 'MMClassification Authors' + +# The full version, including alpha/beta/rc tags +version_file = '../../mmcls/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.intersphinx', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'myst_parser', + 'sphinx_copybutton', +] + +autodoc_mock_imports = ['mmcv._ext', 'matplotlib'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +language = 'zh_CN' + +# The master toctree document. +master_doc = 'index' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# yapf: disable +html_theme_options = { + 'logo_url': 'https://mmclassification.readthedocs.io/zh_CN/latest/', + 'menu': [ + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmclassification' + }, + { + 'name': 'Colab 教程', + 'children': [ + { + 'name': '用命令行工具训练和推理', + 'url': 'https://colab.research.google.com/github/open-mmlab/mmclassification/blob/master/docs/zh_CN/tutorials/MMClassification_tools_cn.ipynb', + }, + { + 'name': '用 Python API 训练和推理', + 'url': 'https://colab.research.google.com/github/open-mmlab/mmclassification/blob/master/docs/zh_CN/tutorials/MMClassification_python_cn.ipynb', + }, + ] + }, + { + 'name': '版本', + 'children': [ + { + 'name': 'MMClassification 0.x', + 'url': 'https://mmclassification.readthedocs.io/zh_CN/latest/', + 'description': 'master 分支' + }, + { + 'name': 'MMClassification 1.x', + 'url': 'https://mmclassification.readthedocs.io/zh_CN/dev-1.x/', + 'description': '1.x 分支' + }, + ], + } + ], + # Specify the language of shared menu + 'menu_lang': 'cn', + 'header_note': { + 'content': + '您正在阅读 MMClassification 0.x 版本的文档。MMClassification 0.x 会在 2022 年末' + '被切换为次要分支。建议您升级到 MMClassification 1.0 版本,体验更多新特性和新功能。' + '请查阅 MMClassification 1.0 的' + '安装教程、' + '迁移教程' + '以及更新日志。', + } +} +# yapf: enable + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] +html_js_files = ['js/custom.js'] + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'mmclsdoc' + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'mmcls.tex', 'MMClassification Documentation', author, + 'manual'), +] + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, 'mmcls', 'MMClassification Documentation', [author], + 1)] + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'mmcls', 'MMClassification Documentation', author, 'mmcls', + 'OpenMMLab image classification toolbox and benchmark.', 'Miscellaneous'), +] + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# set priority when building html +StandaloneHTMLBuilder.supported_image_types = [ + 'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg' +] + +# -- Extension configuration ------------------------------------------------- +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True +# Auto-generated header anchors +myst_heading_anchors = 3 +# Configuration for intersphinx +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None), + 'numpy': ('https://numpy.org/doc/stable', None), + 'torch': ('https://pytorch.org/docs/stable/', None), + 'mmcv': ('https://mmcv.readthedocs.io/zh_CN/latest/', None), +} + + +def builder_inited_handler(app): + subprocess.run(['./stat.py']) + + +def setup(app): + app.add_config_value('no_underscore_emphasis', False, 'env') + app.connect('builder-inited', builder_inited_handler) diff --git a/docs/zh_CN/device/npu.md b/docs/zh_CN/device/npu.md new file mode 100644 index 0000000..7adcf0e --- /dev/null +++ b/docs/zh_CN/device/npu.md @@ -0,0 +1,34 @@ +# NPU (华为昇腾) + +## 使用方法 + +首先,请参考 {external+mmcv:doc}`教程 ` 安装带有 NPU 支持的 MMCV。 + +使用如下命令,可以利用 8 个 NPU 在机器上训练模型(以 ResNet 为例): + +```shell +bash tools/dist_train.sh configs/cspnet/resnet50_8xb32_in1k.py 8 --device npu +``` + +或者,使用如下命令,在一个 NPU 上训练模型(以 ResNet 为例): + +```shell +python tools/train.py configs/cspnet/resnet50_8xb32_in1k.py --device npu +``` + +## 经过验证的模型 + +| 模型 | Top-1 (%) | Top-5 (%) | 配置文件 | 相关下载 | +| :--------------------------------------------------------: | :-------: | :-------: | :------------------------------------------------------------: | :------------------------------------------------------------: | +| [CSPResNeXt50](../papers/cspnet.md) | 77.10 | 93.55 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/cspnet/cspresnext50_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/cspresnext50_8xb32_in1k.log.json) | +| [DenseNet121](../papers/densenet.md) | 72.62 | 91.04 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/densenet/densenet121_4xb256_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/densenet121_4xb256_in1k.log.json) | +| [EfficientNet-B4(AA + AdvProp)](../papers/efficientnet.md) | 75.55 | 92.86 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/efficientnet-b4_8xb32-01norm_in1k.log.json) | +| [HRNet-W18](../papers/hrnet.md) | 77.01 | 93.46 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/hrnet-w18_4xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/hrnet-w18_4xb32_in1k.log.json) | +| [ResNetV1D-152](../papers/resnet.md) | 77.11 | 94.54 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d152_8xb32_in1k.py) | [model](<>) \| [log](<>) | +| [ResNet-50](../papers/resnet.md) | 76.40 | - | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb32_in1k.py) | [model](<>) \| [log](<>) | +| [ResNetXt-32x4d-50](../papers/resnext.md) | 77.55 | 93.75 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext50-32x4d_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/resnext50-32x4d_8xb32_in1k.log.json) | +| [SE-ResNet-50](../papers/seresnet.md) | 77.64 | 93.76 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet50_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/seresnet50_8xb32_in1k.log.json) | +| [VGG-11](../papers/vgg.md) | 68.92 | 88.83 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v0/device/npu/vgg11_8xb32_in1k.log.json) | +| [ShuffleNetV2 1.0x](../papers/shufflenet_v2.md) | 69.53 | 88.82 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py) | [model](<>) \| [log](<>) | + +**以上所有模型权重及训练日志均由华为昇腾团队提供** diff --git a/docs/zh_CN/docutils.conf b/docs/zh_CN/docutils.conf new file mode 100644 index 0000000..0c00c84 --- /dev/null +++ b/docs/zh_CN/docutils.conf @@ -0,0 +1,2 @@ +[html writers] +table_style: colwidths-auto diff --git a/docs/zh_CN/faq.md b/docs/zh_CN/faq.md new file mode 100644 index 0000000..03243a2 --- /dev/null +++ b/docs/zh_CN/faq.md @@ -0,0 +1,74 @@ +# 常见问题 + +我们在这里列出了一些常见问题及其相应的解决方案。如果您发现任何常见问题并有方法 +帮助解决,欢迎随时丰富列表。如果这里的内容没有涵盖您的问题,请按照 +[提问模板](https://github.com/open-mmlab/mmclassification/issues/new/choose) +在 GitHub 上提出问题,并补充模板中需要的信息。 + +## 安装 + +- MMCV 与 MMClassification 的兼容问题。如遇到 + "AssertionError: MMCV==xxx is used but incompatible. Please install mmcv>=xxx, \<=xxx." + + 这里我们列举了各版本 MMClassification 对 MMCV 版本的依赖,请选择合适的 MMCV + 版本来避免安装和使用中的问题。 + + | MMClassification version | MMCV version | + | :----------------------: | :--------------------: | + | dev | mmcv>=1.7.0, \<1.9.0 | + | 0.25.0 (master) | mmcv>=1.4.2, \<1.9.0 | + | 0.24.1 | mmcv>=1.4.2, \<1.9.0 | + | 0.23.2 | mmcv>=1.4.2, \<1.7.0 | + | 0.22.1 | mmcv>=1.4.2, \<1.6.0 | + | 0.21.0 | mmcv>=1.4.2, \<=1.5.0 | + | 0.20.1 | mmcv>=1.4.2, \<=1.5.0 | + | 0.19.0 | mmcv>=1.3.16, \<=1.5.0 | + | 0.18.0 | mmcv>=1.3.16, \<=1.5.0 | + | 0.17.0 | mmcv>=1.3.8, \<=1.5.0 | + | 0.16.0 | mmcv>=1.3.8, \<=1.5.0 | + | 0.15.0 | mmcv>=1.3.8, \<=1.5.0 | + | 0.15.0 | mmcv>=1.3.8, \<=1.5.0 | + | 0.14.0 | mmcv>=1.3.8, \<=1.5.0 | + | 0.13.0 | mmcv>=1.3.8, \<=1.5.0 | + | 0.12.0 | mmcv>=1.3.1, \<=1.5.0 | + | 0.11.1 | mmcv>=1.3.1, \<=1.5.0 | + | 0.11.0 | mmcv>=1.3.0 | + | 0.10.0 | mmcv>=1.3.0 | + | 0.9.0 | mmcv>=1.1.4 | + | 0.8.0 | mmcv>=1.1.4 | + | 0.7.0 | mmcv>=1.1.4 | + | 0.6.0 | mmcv>=1.1.4 | + + ```{note} + 由于 `dev` 分支处于频繁开发中,MMCV 版本依赖可能不准确。如果您在使用 + `dev` 分支时遇到问题,请尝试更新 MMCV 到最新版。 + ``` + +- 使用 Albumentations + + 如果你希望使用 `albumentations` 相关的功能,我们建议使用 `pip install -r requirements/optional.txt` 或者 + `pip install -U albumentations>=0.3.2 --no-binary qudida,albumentations` 命令进行安装。 + + 如果你直接使用 `pip install albumentations>=0.3.2` 来安装,它会同时安装 `opencv-python-headless` + (即使你已经安装了 `opencv-python`)。具体细节可参阅 + [官方文档](https://albumentations.ai/docs/getting_started/installation/#note-on-opencv-dependencies)。 + +## 开发 + +- 如果我对源码进行了改动,需要重新安装以使改动生效吗? + + 如果你遵照[最佳实践](install.md)的指引,从源码安装 mmcls,那么任何本地修改都不需要重新安装即可生效。 + +- 如何在多个 MMClassification 版本下进行开发? + + 通常来说,我们推荐通过不同虚拟环境来管理多个开发目录下的 MMClassification。 + 但如果你希望在不同目录(如 mmcls-0.21, mmcls-0.23 等)使用同一个环境进行开发, + 我们提供的训练和测试 shell 脚本会自动使用当前目录的 mmcls,其他 Python 脚本 + 则可以在命令前添加 `` PYTHONPATH=`pwd` `` 来使用当前目录的代码。 + + 反过来,如果你希望 shell 脚本使用环境中安装的 MMClassification,而不是当前目录的, + 则可以去掉 shell 脚本中如下一行代码: + + ```shell + PYTHONPATH="$(dirname $0)/..":$PYTHONPATH + ``` diff --git a/docs/zh_CN/getting_started.md b/docs/zh_CN/getting_started.md new file mode 100644 index 0000000..d3e9899 --- /dev/null +++ b/docs/zh_CN/getting_started.md @@ -0,0 +1,266 @@ +# 基础教程 + +本文档提供 MMClassification 相关用法的基本教程。 + +## 准备数据集 + +MMClassification 建议用户将数据集根目录链接到 `$MMCLASSIFICATION/data` 下。 +如果用户的文件夹结构与默认结构不同,则需要在配置文件中进行对应路径的修改。 + +``` +mmclassification +├── mmcls +├── tools +├── configs +├── docs +├── data +│ ├── imagenet +│ │ ├── meta +│ │ ├── train +│ │ ├── val +│ ├── cifar +│ │ ├── cifar-10-batches-py +│ ├── mnist +│ │ ├── train-images-idx3-ubyte +│ │ ├── train-labels-idx1-ubyte +│ │ ├── t10k-images-idx3-ubyte +│ │ ├── t10k-labels-idx1-ubyte + +``` + +对于 ImageNet,其存在多个版本,但最为常用的一个是 [ILSVRC 2012](http://www.image-net.org/challenges/LSVRC/2012/),可以通过以下步骤获取该数据集。 + +1. 注册账号并登录 [下载页面](http://www.image-net.org/download-images) +2. 获取 ILSVRC2012 下载链接并下载以下文件 + - ILSVRC2012_img_train.tar (~138GB) + - ILSVRC2012_img_val.tar (~6.3GB) +3. 解压下载的文件 +4. 使用 [该脚本](https://github.com/BVLC/caffe/blob/master/data/ilsvrc12/get_ilsvrc_aux.sh) 获取元数据 + +对于 MNIST,CIFAR10 和 CIFAR100,程序将会在需要的时候自动下载数据集。 + +对于用户自定义数据集的准备,请参阅 [教程 3:如何自定义数据集 +](tutorials/new_dataset.md) + +## 使用预训练模型进行推理 + +MMClassification 提供了一些脚本用于进行单张图像的推理、数据集的推理和数据集的测试(如 ImageNet 等) + +### 单张图像的推理 + +```shell +python demo/image_demo.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} + +# Example +python demo/image_demo.py demo/demo.JPEG configs/resnet/resnet50_8xb32_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth +``` + +### 数据集的推理与测试 + +- 支持单 GPU +- 支持 CPU +- 支持单节点多 GPU +- 支持多节点 + +用户可使用以下命令进行数据集的推理: + +```shell +# 单 GPU +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--metrics ${METRICS}] [--out ${RESULT_FILE}] + +# CPU: 禁用 GPU 并运行单 GPU 测试脚本 +export CUDA_VISIBLE_DEVICES=-1 +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--metrics ${METRICS}] [--out ${RESULT_FILE}] + +# 多 GPU +./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [--metrics ${METRICS}] [--out ${RESULT_FILE}] + +# 基于 slurm 分布式环境的多节点 +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--metrics ${METRICS}] [--out ${RESULT_FILE}] --launcher slurm +``` + +可选参数: + +- `RESULT_FILE`:输出结果的文件名。如果未指定,结果将不会保存到文件中。支持 json, yaml, pickle 格式。 +- `METRICS`:数据集测试指标,如准确率 (accuracy), 精确率 (precision), 召回率 (recall) 等 + +例子: + +在 ImageNet 验证集上,使用 ResNet-50 进行推理并获得预测标签及其对应的预测得分。 + +```shell +python tools/test.py configs/resnet/resnet50_8xb16_cifar10.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth \ + --out result.pkl +``` + +## 模型训练 + +MMClassification 使用 `MMDistributedDataParallel` 进行分布式训练,使用 `MMDataParallel` 进行非分布式训练。 + +所有的输出(日志文件和模型权重文件)会被将保存到工作目录下。工作目录通过配置文件中的参数 `work_dir` 指定。 + +默认情况下,MMClassification 在每个周期后会在验证集上评估模型,可以通过在训练配置中修改 `interval` 参数来更改评估间隔 + +```python +evaluation = dict(interval=12) # 每进行 12 轮训练后评估一次模型 +``` + +### 使用单个 GPU 进行训练 + +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +如果用户想在命令中指定工作目录,则需要增加参数 `--work-dir ${YOUR_WORK_DIR}` + +### 使用 CPU 训练 + +使用 CPU 训练的流程和使用单 GPU 训练的流程一致,我们仅需要在训练流程开始前禁用 GPU。 + +```shell +export CUDA_VISIBLE_DEVICES=-1 +``` + +之后运行单 GPU 训练脚本即可。 + +```{warning} +我们不推荐用户使用 CPU 进行训练,这太过缓慢。我们支持这个功能是为了方便用户在没有 GPU 的机器上进行调试。 +``` + +### 使用单台机器多个 GPU 进行训练 + +```shell +./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [optional arguments] +``` + +可选参数为: + +- `--no-validate` (**不建议**): 默认情况下,程序将会在训练期间的每 k (默认为 1) 个周期进行一次验证。要禁用这一功能,使用 `--no-validate` +- `--work-dir ${WORK_DIR}`:覆盖配置文件中指定的工作目录。 +- `--resume-from ${CHECKPOINT_FILE}`:从以前的模型权重文件恢复训练。 + +`resume-from` 和 `load-from` 的不同点: +`resume-from` 加载模型参数和优化器状态,并且保留检查点所在的周期数,常被用于恢复意外被中断的训练。 +`load-from` 只加载模型参数,但周期数从 0 开始计数,常被用于微调模型。 + +### 使用多台机器进行训练 + +如果您想使用由 ethernet 连接起来的多台机器, 您可以使用以下命令: + +在第一台机器上: + +```shell +NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR sh tools/dist_train.sh $CONFIG $GPUS +``` + +在第二台机器上: + +```shell +NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR sh tools/dist_train.sh $CONFIG $GPUS +``` + +但是,如果您不使用高速网路连接这几台机器的话,训练将会非常慢。 + +如果用户在 [slurm](https://slurm.schedmd.com/) 集群上运行 MMClassification,可使用 `slurm_train.sh` 脚本。(该脚本也支持单台机器上进行训练) + +```shell +[GPUS=${GPUS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} +``` + +用户可以在 [slurm_train.sh](https://github.com/open-mmlab/mmclassification/blob/master/tools/slurm_train.sh) 中检查所有的参数和环境变量 + +如果用户的多台机器通过 Ethernet 连接,则可以参考 pytorch [launch utility](https://pytorch.org/docs/stable/distributed.html#launch-utility)。如果用户没有高速网络,如 InfiniBand,速度将会非常慢。 + +### 使用单台机器启动多个任务 + +如果用使用单台机器启动多个任务,如在有 8 块 GPU 的单台机器上启动 2 个需要 4 块 GPU 的训练任务,则需要为每个任务指定不同端口,以避免通信冲突。 + +如果用户使用 `dist_train.sh` 脚本启动训练任务,则可以通过以下命令指定端口 + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 ./tools/dist_train.sh ${CONFIG_FILE} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 ./tools/dist_train.sh ${CONFIG_FILE} 4 +``` + +如果用户在 slurm 集群下启动多个训练任务,则需要修改配置文件中的 `dist_params` 变量,以设置不同的通信端口。 + +在 `config1.py` 中, + +```python +dist_params = dict(backend='nccl', port=29500) +``` + +在 `config2.py` 中, + +```python +dist_params = dict(backend='nccl', port=29501) +``` + +之后便可启动两个任务,分别对应 `config1.py` 和 `config2.py`。 + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} +CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} +``` + +## 实用工具 + +我们在 `tools/` 目录下提供的一些对训练和测试十分有用的工具 + +### 计算 FLOPs 和参数量(试验性的) + +我们根据 [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) 提供了一个脚本用于计算给定模型的 FLOPs 和参数量 + +```shell +python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +用户将获得如下结果: + +``` +============================== +Input shape: (3, 224, 224) +Flops: 4.12 GFLOPs +Params: 25.56 M +============================== +``` + +```{warning} +此工具仍处于试验阶段,我们不保证该数字正确无误。您最好将结果用于简单比较,但在技术报告或论文中采用该结果之前,请仔细检查。 +- FLOPs 与输入的尺寸有关,而参数量与输入尺寸无关。默认输入尺寸为 (1, 3, 224, 224) +- 一些运算不会被计入 FLOPs 的统计中,例如 GN 和自定义运算。详细信息请参考 [`mmcv.cnn.get_model_complexity_info()`](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/flops_counter.py) +``` + +### 模型发布 + +在发布模型之前,你也许会需要 + +1. 转换模型权重至 CPU 张量 +2. 删除优化器状态 +3. 计算模型权重文件的哈希值,并添加至文件名之后 + +```shell +python tools/convert_models/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME} +``` + +例如: + +```shell +python tools/convert_models/publish_model.py work_dirs/resnet50/latest.pth imagenet_resnet50.pth +``` + +最终输出的文件名将会是 `imagenet_resnet50_{date}-{hash id}.pth` + +## 详细教程 + +目前,MMClassification 提供以下几种更详细的教程: + +- [如何编写配置文件](tutorials/config.md) +- [如何微调模型](tutorials/finetune.md) +- [如何增加新数据集](tutorials/new_dataset.md) +- [如何设计数据处理流程](tutorials/data_pipeline.md) +- [如何增加新模块](tutorials/new_modules.md) +- [如何自定义优化策略](tutorials/schedule.md) +- [如何自定义运行参数](tutorials/runtime.md)。 diff --git a/docs/zh_CN/imgs/qq_group_qrcode.jpg b/docs/zh_CN/imgs/qq_group_qrcode.jpg new file mode 100644 index 0000000..7c6b04f Binary files /dev/null and b/docs/zh_CN/imgs/qq_group_qrcode.jpg differ diff --git a/docs/zh_CN/imgs/zhihu_qrcode.jpg b/docs/zh_CN/imgs/zhihu_qrcode.jpg new file mode 100644 index 0000000..c745fb0 Binary files /dev/null and b/docs/zh_CN/imgs/zhihu_qrcode.jpg differ diff --git a/docs/zh_CN/index.rst b/docs/zh_CN/index.rst new file mode 100644 index 0000000..1de4b82 --- /dev/null +++ b/docs/zh_CN/index.rst @@ -0,0 +1,99 @@ +欢迎来到 MMClassification 中文教程! +========================================== + +You can switch between Chinese and English documentation in the lower-left corner of the layout. + +您可以在页面左下角切换中英文文档。 + +.. toctree:: + :maxdepth: 1 + :caption: 开始你的第一步 + + install.md + getting_started.md + + +.. toctree:: + :maxdepth: 1 + :caption: 教程 + + tutorials/config.md + tutorials/finetune.md + tutorials/new_dataset.md + tutorials/data_pipeline.md + tutorials/new_modules.md + tutorials/schedule.md + tutorials/runtime.md + + +.. toctree:: + :maxdepth: 1 + :caption: 模型库 + :glob: + + modelzoo_statistics.md + model_zoo.md + papers/* + + +.. toctree:: + :maxdepth: 1 + :caption: 实用工具 + + tools/pytorch2onnx.md + tools/onnx2tensorrt.md + tools/pytorch2torchscript.md + tools/model_serving.md + tools/visualization.md + tools/analysis.md + tools/miscellaneous.md + + +.. toctree:: + :maxdepth: 1 + :caption: 社区 + + community/CONTRIBUTING.md + + +.. toctree:: + :caption: API 参考文档 + + mmcls.apis + mmcls.core + mmcls.models + mmcls.models.utils + mmcls.datasets + 数据转换 + 批数据增强 + mmcls.utils + + +.. toctree:: + :maxdepth: 1 + :caption: 其他说明 + + changelog.md + compatibility.md + faq.md + + +.. toctree:: + :maxdepth: 1 + :caption: 设备支持 + + device/npu.md + + +.. toctree:: + :caption: 语言切换 + + English + 简体中文 + + +索引与表格 +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/zh_CN/install.md b/docs/zh_CN/install.md new file mode 100644 index 0000000..e881586 --- /dev/null +++ b/docs/zh_CN/install.md @@ -0,0 +1,210 @@ +# 依赖环境 + +在本节中,我们将演示如何准备 PyTorch 相关的依赖环境。 + +MMClassification 适用于 Linux、Windows 和 macOS。它需要 Python 3.6+、CUDA 9.2+ 和 PyTorch 1.5+。 + +```{note} +如果你对配置 PyTorch 环境已经很熟悉,并且已经完成了配置,可以直接进入[下一节](#安装)。 +否则的话,请依照以下步骤完成配置。 +``` + +**第 1 步** 从[官网](https://docs.conda.io/en/latest/miniconda.html)下载并安装 Miniconda。 + +**第 2 步** 创建一个 conda 虚拟环境并激活它。 + +```shell +conda create --name openmmlab python=3.8 -y +conda activate openmmlab +``` + +**第 3 步** 按照[官方指南](https://pytorch.org/get-started/locally/)安装 PyTorch。例如: + +在 GPU 平台: + +```shell +conda install pytorch torchvision -c pytorch +``` + +```{warning} +以上命令会自动安装最新版的 PyTorch 与对应的 cudatoolkit,请检查它们是否与你的环境匹配。 +``` + +在 CPU 平台: + +```shell +conda install pytorch torchvision cpuonly -c pytorch +``` + +# 安装 + +我们推荐用户按照我们的最佳实践来安装 MMClassification。但除此之外,如果你想根据 +你的习惯完成安装流程,也可以参见[自定义安装](#自定义安装)一节来获取更多信息。 + +## 最佳实践 + +**第 1 步** 使用 [MIM](https://github.com/open-mmlab/mim) 安装 [MMCV](https://github.com/open-mmlab/mmcv) + +```shell +pip install -U openmim +mim install mmcv-full +``` + +**第 2 步** 安装 MMClassification + +根据具体需求,我们支持两种安装模式: + +- [从源码安装(推荐)](#从源码安装):希望基于 MMClassification 框架开发自己的图像分类任务,需要添加新的功能,比如新的模型或是数据集,或者使用我们提供的各种工具。 +- [作为 Python 包安装](#作为-python-包安装):只是希望调用 MMClassification 的 API 接口,或者在自己的项目中导入 MMClassification 中的模块。 + +### 从源码安装 + +这种情况下,从源码按如下方式安装 mmcls: + +```shell +git clone https://github.com/open-mmlab/mmclassification.git +cd mmclassification +pip install -v -e . +# "-v" 表示输出更多安装相关的信息 +# "-e" 表示以可编辑形式安装,这样可以在不重新安装的情况下,让本地修改直接生效 +``` + +另外,如果你希望向 MMClassification 贡献代码,或者使用试验中的功能,请签出到 `dev` 分支。 + +```shell +git checkout dev +``` + +### 作为 Python 包安装 + +直接使用 pip 安装即可。 + +```shell +pip install mmcls +``` + +## 验证安装 + +为了验证 MMClassification 的安装是否正确,我们提供了一些示例代码来执行模型推理。 + +**第 1 步** 我们需要下载配置文件和模型权重文件 + +```shell +mim download mmcls --config resnet50_8xb32_in1k --dest . +``` + +**第 2 步** 验证示例的推理流程 + +如果你是**从源码安装**的 mmcls,那么直接运行以下命令进行验证: + +```shell +python demo/image_demo.py demo/demo.JPEG resnet50_8xb32_in1k.py resnet50_8xb32_in1k_20210831-ea4938fc.pth --device cpu +``` + +你可以看到命令行中输出了结果字典,包括 `pred_label`,`pred_score` 和 `pred_class` 三个字段。另外如果你拥有图形 +界面(而不是使用远程终端),那么可以启用 `--show` 选项,将示例图像和对应的预测结果在窗口中进行显示。 + +如果你是**作为 PyThon 包安装**,那么可以打开你的 Python 解释器,并粘贴如下代码: + +```python +from mmcls.apis import init_model, inference_model + +config_file = 'resnet50_8xb32_in1k.py' +checkpoint_file = 'resnet50_8xb32_in1k_20210831-ea4938fc.pth' +model = init_model(config_file, checkpoint_file, device='cpu') # 或者 device='cuda:0' +inference_model(model, 'demo/demo.JPEG') +``` + +你会看到输出一个字典,包含预测的标签、得分及类别名。 + +## 自定义安装 + +### CUDA 版本 + +安装 PyTorch 时,需要指定 CUDA 版本。如果您不清楚选择哪个,请遵循我们的建议: + +- 对于 Ampere 架构的 NVIDIA GPU,例如 GeForce 30 series 以及 NVIDIA A100,CUDA 11 是必需的。 +- 对于更早的 NVIDIA GPU,CUDA 11 是向前兼容的,但 CUDA 10.2 能够提供更好的兼容性,也更加轻量。 + +请确保你的 GPU 驱动版本满足最低的版本需求,参阅[这张表](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions)。 + +```{note} +如果按照我们的最佳实践进行安装,CUDA 运行时库就足够了,因为我们提供相关 CUDA 代码的预编译,你不需要进行本地编译。 +但如果你希望从源码进行 MMCV 的编译,或是进行其他 CUDA 算子的开发,那么就必须安装完整的 CUDA 工具链,参见 +[NVIDIA 官网](https://developer.nvidia.com/cuda-downloads),另外还需要确保该 CUDA 工具链的版本与 PyTorch 安装时 +的配置相匹配(如用 `conda install` 安装 PyTorch 时指定的 cudatoolkit 版本)。 +``` + +### 不使用 MIM 安装 MMCV + +MMCV 包含 C++ 和 CUDA 扩展,因此其对 PyTorch 的依赖比较复杂。MIM 会自动解析这些 +依赖,选择合适的 MMCV 预编译包,使安装更简单,但它并不是必需的。 + +要使用 pip 而不是 MIM 来安装 MMCV,请遵照 [MMCV 安装指南](https://mmcv.readthedocs.io/zh_CN/latest/get_started/installation.html)。 +它需要你用指定 url 的形式手动指定对应的 PyTorch 和 CUDA 版本。 + +举个例子,如下命令将会安装基于 PyTorch 1.10.x 和 CUDA 11.3 编译的 mmcv-full。 + +```shell +pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10/index.html +``` + +### 在 CPU 环境中安装 + +MMClassification 可以仅在 CPU 环境中安装,在 CPU 模式下,你可以完成训练(需要 MMCV 版本 >= 1.4.4)、测试和模型推理等所有操作。 + +在 CPU 模式下,MMCV 的部分功能将不可用,通常是一些 GPU 编译的算子。不过不用担心, +MMClassification 中几乎所有的模型都不会依赖这些算子。 + +### 在 Google Colab 中安装 + +[Google Colab](https://research.google.com/) 通常已经包含了 PyTorch 环境,因此我们只需要安装 MMCV 和 MMClassification 即可,命令如下: + +**第 1 步** 使用 [MIM](https://github.com/open-mmlab/mim) 安装 [MMCV](https://github.com/open-mmlab/mmcv) + +```shell +!pip3 install openmim +!mim install mmcv-full +``` + +**第 2 步** 从源码安装 MMClassification + +```shell +!git clone https://github.com/open-mmlab/mmclassification.git +%cd mmclassification +!pip install -e . +``` + +**第 3 步** 验证 + +```python +import mmcls +print(mmcls.__version__) +# 预期输出: 0.23.0 或更新的版本号 +``` + +```{note} +在 Jupyter 中,感叹号 `!` 用于执行外部命令,而 `%cd` 是一个[魔术命令](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-cd),用于切换 Python 的工作路径。 +``` + +### 通过 Docker 使用 MMClassification + +MMClassification 提供 [Dockerfile](https://github.com/open-mmlab/mmclassification/blob/master/docker/Dockerfile) +用于构建镜像。请确保你的 [Docker 版本](https://docs.docker.com/engine/install/) >=19.03。 + +```shell +# 构建默认的 PyTorch 1.8.1,CUDA 10.2 版本镜像 +# 如果你希望使用其他版本,请修改 Dockerfile +docker build -t mmclassification docker/ +``` + +用以下命令运行 Docker 镜像: + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmclassification/data mmclassification +``` + +## 故障解决 + +如果你在安装过程中遇到了什么问题,请先查阅[常见问题](faq.md)。如果没有找到解决方法,可以在 GitHub +上[提出 issue](https://github.com/open-mmlab/mmclassification/issues/new/choose)。 diff --git a/docs/zh_CN/model_zoo.md b/docs/zh_CN/model_zoo.md new file mode 120000 index 0000000..013a9ac --- /dev/null +++ b/docs/zh_CN/model_zoo.md @@ -0,0 +1 @@ +../en/model_zoo.md \ No newline at end of file diff --git a/docs/zh_CN/stat.py b/docs/zh_CN/stat.py new file mode 100755 index 0000000..f6d5b3a --- /dev/null +++ b/docs/zh_CN/stat.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +import functools as func +import glob +import os +import re +from pathlib import Path + +import numpy as np + +MMCLS_ROOT = Path(__file__).absolute().parents[1] +url_prefix = 'https://github.com/open-mmlab/mmclassification/blob/master/' + +papers_root = Path('papers') +papers_root.mkdir(exist_ok=True) +files = [Path(f) for f in sorted(glob.glob('../../configs/*/README.md'))] + +stats = [] +titles = [] +num_ckpts = 0 +num_configs = 0 + +for f in files: + with open(f, 'r') as content_file: + content = content_file.read() + + # Extract checkpoints + ckpts = set(x.lower().strip() + for x in re.findall(r'\[model\]\((https?.*)\)', content)) + if len(ckpts) == 0: + continue + num_ckpts += len(ckpts) + + # Extract paper title + match_res = list(re.finditer(r'> \[(.*)\]\((.*)\)', content)) + if len(match_res) > 0: + title, paperlink = match_res[0].groups() + else: + title = content.split('\n')[0].replace('# ', '').strip() + paperlink = None + titles.append(title) + + # Replace paper link to a button + if paperlink is not None: + start = match_res[0].start() + end = match_res[0].end() + link_button = f'[{title}]({paperlink})' + content = content[:start] + link_button + content[end:] + + # Extract paper type + _papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)] + assert len(_papertype) > 0 + papertype = _papertype[0] + paper = set([(papertype, title)]) + + # Write a copy of README + copy = papers_root / (f.parent.name + '.md') + if copy.exists(): + os.remove(copy) + + def replace_link(matchobj): + # Replace relative link to GitHub link. + name = matchobj.group(1) + link = matchobj.group(2) + if not link.startswith('http') and (f.parent / link).exists(): + rel_link = (f.parent / link).absolute().relative_to(MMCLS_ROOT) + link = url_prefix + str(rel_link) + return f'[{name}]({link})' + + content = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', replace_link, content) + + with open(copy, 'w') as copy_file: + copy_file.write(content) + + statsmsg = f""" +\t* [{papertype}] [{title}]({copy}) ({len(ckpts)} ckpts) +""" + stats.append(dict(paper=paper, ckpts=ckpts, statsmsg=statsmsg, copy=copy)) + +allpapers = func.reduce(lambda a, b: a.union(b), + [stat['paper'] for stat in stats]) +msglist = '\n'.join(stat['statsmsg'] for stat in stats) + +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +# 模型库统计 + +* 论文数量: {len(set(titles))} +{countstr} + +* 模型权重文件数量: {num_ckpts} +{msglist} +""" + +with open('modelzoo_statistics.md', 'w') as f: + f.write(modelzoo) diff --git a/docs/zh_CN/tools/analysis.md b/docs/zh_CN/tools/analysis.md new file mode 100644 index 0000000..840ff39 --- /dev/null +++ b/docs/zh_CN/tools/analysis.md @@ -0,0 +1,211 @@ +# 分析 + + + +- [日志分析](#日志分析) + - [绘制曲线图](#绘制曲线图) + - [统计训练时间](#统计训练时间) +- [结果分析](#结果分析) + - [评估结果](#查看典型结果) + - [查看典型结果](#查看典型结果) +- [模型复杂度分析](#模型复杂度分析) +- [常见问题](#常见问题) + + + +## 日志分析 + +### 绘制曲线图 + +指定一个训练日志文件,可通过 `tools/analysis_tools/analyze_logs.py` 脚本绘制指定键值的变化曲线 + +
+ +```shell +python tools/analysis_tools/analyze_logs.py plot_curve \ + ${JSON_LOGS} \ + [--keys ${KEYS}] \ + [--title ${TITLE}] \ + [--legend ${LEGEND}] \ + [--backend ${BACKEND}] \ + [--style ${STYLE}] \ + [--out ${OUT_FILE}] \ + [--window-size ${WINDOW_SIZE}] +``` + +所有参数的说明 + +- `json_logs` :模型配置文件的路径(可同时传入多个,使用空格分开)。 +- `--keys` :分析日志的关键字段,数量为 `len(${JSON_LOGS}) * len(${KEYS})` 默认为 'loss'。 +- `--title` :分析日志的图片名称,默认使用配置文件名, 默认为空。 +- `--legend` :图例名(可同时传入多个,使用空格分开,数目与 `${JSON_LOGS} * ${KEYS}` 数目一致)。默认使用 `"${JSON_LOG}-${KEYS}"`。 +- `--backend` :matplotlib 的绘图后端,默认由 matplotlib 自动选择。 +- `--style` :绘图配色风格,默认为 `whitegrid`。 +- `--out` :保存分析图片的路径,如不指定则不保存。 +- `--window-size`: 可视化窗口大小,如果没有指定,默认为 `12*7`。如果需要指定,需按照格式 `'W*H'`。 + +```{note} +`--style` 选项依赖于第三方库 `seaborn`,需要设置绘图风格请现安装该库。 +``` + +例如: + +- 绘制某日志文件对应的损失曲线图。 + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve your_log_json --keys loss --legend loss + ``` + +- 绘制某日志文件对应的 top-1 和 top-5 准确率曲线图,并将曲线图导出为 results.jpg 文件。 + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve your_log_json --keys accuracy_top-1 accuracy_top-5 --legend top1 top5 --out results.jpg + ``` + +- 在同一图像内绘制两份日志文件对应的 top-1 准确率曲线图。 + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log1.json log2.json --keys accuracy_top-1 --legend run1 run2 + ``` + +```{note} +本工具会自动根据关键字段选择从日志的训练部分还是验证部分读取,因此如果你添加了 +自定义的验证指标,请把相对应的关键字段加入到本工具的 `TEST_METRICS` 变量中。 +``` + +### 统计训练时间 + +`tools/analysis_tools/analyze_logs.py` 也可以根据日志文件统计训练耗时。 + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time \ + ${JSON_LOGS} + [--include-outliers] +``` + +**所有参数的说明**: + +- `json_logs` :模型配置文件的路径(可同时传入多个,使用空格分开)。 +- `--include-outliers` :如果指定,将不会排除每个轮次中第一轮迭代的记录(有时第一轮迭代会耗时较长) + +**示例**: + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time work_dirs/some_exp/20200422_153324.log.json +``` + +预计输出结果如下所示: + +```text +-----Analyze train time of work_dirs/some_exp/20200422_153324.log.json----- +slowest epoch 68, average time is 0.3818 +fastest epoch 1, average time is 0.3694 +time std over epochs is 0.0020 +average iter time: 0.3777 s/iter +``` + +## 结果分析 + +利用 `tools/test.py` 的 `--out` 参数,我们可以将所有的样本的推理结果保存到输出 +文件中。利用这一文件,我们可以进行进一步的分析。 + +### 评估结果 + +`tools/analysis_tools/eval_metric.py` 可以用来再次计算评估结果。 + +```shell +python tools/analysis_tools/eval_metric.py \ + ${CONFIG} \ + ${RESULT} \ + [--metrics ${METRICS}] \ + [--cfg-options ${CFG_OPTIONS}] \ + [--metric-options ${METRIC_OPTIONS}] +``` + +**所有参数说明**: + +- `config` :配置文件的路径。 +- `result` : `tools/test.py` 的输出结果文件。 +- `metrics` : 评估的衡量指标,可接受的值取决于数据集类。 +- `--cfg-options`: 额外的配置选项,会被合入配置文件,参考[教程 1:如何编写配置文件](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/config.html)。 +- `--metric-options`: 如果指定了,这些选项将被传递给数据集 `evaluate` 函数的 `metric_options` 参数。 + +```{note} +在 `tools/test.py` 中,我们支持使用 `--out-items` 选项来选择保存哪些结果。为了使用本工具,请确保结果文件中包含 "class_scores"。 +``` + +**示例**: + +```shell +python tools/analysis_tools/eval_metric.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py ./result.pkl --metrics accuracy --metric-options "topk=(1,5)" +``` + +### 查看典型结果 + +`tools/analysis_tools/analyze_results.py` 可以保存预测成功/失败,同时得分最高的 k 个图像。 + +```shell +python tools/analysis_tools/analyze_results.py \ + ${CONFIG} \ + ${RESULT} \ + [--out-dir ${OUT_DIR}] \ + [--topk ${TOPK}] \ + [--cfg-options ${CFG_OPTIONS}] +``` + +**所有参数说明**: + +- `config` :配置文件的路径。 +- `result` : `tools/test.py` 的输出结果文件。 +- `--out-dir` :保存结果分析的文件夹路径。 +- `--topk` :分别保存多少张预测成功/失败的图像。如果不指定,默认为 `20`。 +- `--cfg-options`: 额外的配置选项,会被合入配置文件,参考[教程 1:如何编写配置文件](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/config.html)。 + +```{note} +在 `tools/test.py` 中,我们支持使用 `--out-items` 选项来选择保存哪些结果。为了使用本工具,请确保结果文件中包含 "pred_score"、"pred_label" 和 "pred_class"。 +``` + +**示例**: + +```shell +python tools/analysis_tools/analyze_results.py \ + configs/resnet/resnet50_xxxx.py \ + result.pkl \ + --out-dir results \ + --topk 50 +``` + +## 模型复杂度分析 + +### 计算 FLOPs 和参数量(试验性的) + +我们根据 [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) 提供了一个脚本用于计算给定模型的 FLOPs 和参数量。 + +```shell +python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +**所有参数说明**: + +- `config` :配置文件的路径。 +- `--shape`: 输入尺寸,支持单值或者双值, 如: `--shape 256`、`--shape 224 256`。默认为`224 224`。 + +用户将获得如下结果: + +```text +============================== +Input shape: (3, 224, 224) +Flops: 4.12 GFLOPs +Params: 25.56 M +============================== +``` + +```{warning} +此工具仍处于试验阶段,我们不保证该数字正确无误。您最好将结果用于简单比较,但在技术报告或论文中采用该结果之前,请仔细检查。 +- FLOPs 与输入的尺寸有关,而参数量与输入尺寸无关。默认输入尺寸为 (1, 3, 224, 224) +- 一些运算不会被计入 FLOPs 的统计中,例如 GN 和自定义运算。详细信息请参考 [`mmcv.cnn.get_model_complexity_info()`](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/flops_counter.py) +``` + +## 常见问题 + +- 无 diff --git a/docs/zh_CN/tools/miscellaneous.md b/docs/zh_CN/tools/miscellaneous.md new file mode 100644 index 0000000..a2cb625 --- /dev/null +++ b/docs/zh_CN/tools/miscellaneous.md @@ -0,0 +1,59 @@ +# 其他工具 + + + +- [打印完整配置](#打印完整配置) +- [检查数据集](#检查数据集) +- [常见问题](#常见问题) + + + +## 打印完整配置 + +`tools/misc/print_config.py` 脚本会解析所有输入变量,并打印完整配置信息。 + +```shell +python tools/misc/print_config.py ${CONFIG} [--cfg-options ${CFG_OPTIONS}] +``` + +**所有参数说明**: + +- `config` :配置文件的路径。 +- `--cfg-options`: 额外的配置选项,会被合入配置文件,参考[教程 1:如何编写配置文件](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/config.html)。 + +**示例**: + +```shell +python tools/misc/print_config.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py +``` + +## 检查数据集 + +`tools/misc/verify_dataset.py` 脚本会检查数据集的所有图片,查看是否有已经损坏的图片。 + +```shell +python tools/print_config.py \ + ${CONFIG} \ + [--out-path ${OUT-PATH}] \ + [--phase ${PHASE}] \ + [--num-process ${NUM-PROCESS}] + [--cfg-options ${CFG_OPTIONS}] +``` + +**所有参数说明**: + +- `config` : 配置文件的路径。 +- `--out-path` : 输出结果路径,默认为 'brokenfiles.log'。 +- `--phase` : 检查哪个阶段的数据集,可用值为 "train" 、"test" 或者 "val", 默认为 "train"。 +- `--num-process` : 指定的进程数,默认为1。 +- `--cfg-options`: 额外的配置选项,会被合入配置文件,参考[教程 1:如何编写配置文件](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/config.html)。 + +**示例**: + +```shell +python tools/misc/verify_dataset.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py --out-path broken_imgs.log --phase val --num-process 8 +``` + +## 常见问题 + +- 无 diff --git a/docs/zh_CN/tools/model_serving.md b/docs/zh_CN/tools/model_serving.md new file mode 100644 index 0000000..4eed488 --- /dev/null +++ b/docs/zh_CN/tools/model_serving.md @@ -0,0 +1,87 @@ +# 模型部署至 TorchServe + +为了使用 [`TorchServe`](https://pytorch.org/serve/) 部署一个 `MMClassification` 模型,需要进行以下几步: + +## 1. 转换 MMClassification 模型至 TorchServe + +```shell +python tools/deployment/mmcls2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ +--output-folder ${MODEL_STORE} \ +--model-name ${MODEL_NAME} +``` + +```{note} +${MODEL_STORE} 需要是一个文件夹的绝对路径。 +``` + +示例: + +```shell +python tools/deployment/mmcls2torchserve.py \ + configs/resnet/resnet18_8xb32_in1k.py \ + checkpoints/resnet18_8xb32_in1k_20210831-fbbb1da6.pth \ + --output-folder ./checkpoints \ + --model-name resnet18_in1k +``` + +## 2. 构建 `mmcls-serve` docker 镜像 + +```shell +docker build -t mmcls-serve:latest docker/serve/ +``` + +## 3. 运行 `mmcls-serve` 镜像 + +请参考官方文档 [基于 docker 运行 TorchServe](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment). + +为了使镜像能够使用 GPU 资源,需要安装 [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html)。之后可以传递 `--gpus` 参数以在 GPU 上运。 + +示例: + +```shell +docker run --rm \ +--cpus 8 \ +--gpus device=0 \ +-p8080:8080 -p8081:8081 -p8082:8082 \ +--mount type=bind,source=`realpath ./checkpoints`,target=/home/model-server/model-store \ +mmcls-serve:latest +``` + +```{note} +`realpath ./checkpoints` 是 "./checkpoints" 的绝对路径,你可以将其替换为你保存 TorchServe 模型的目录的绝对路径。 +``` + +参考 [该文档](https://github.com/pytorch/serve/blob/master/docs/rest_api.md) 了解关于推理 (8080),管理 (8081) 和指标 (8082) 等 API 的信息。 + +## 4. 测试部署 + +```shell +curl http://127.0.0.1:8080/predictions/${MODEL_NAME} -T demo/demo.JPEG +``` + +您应该获得类似于以下内容的响应: + +```json +{ + "pred_label": 58, + "pred_score": 0.38102269172668457, + "pred_class": "water snake" +} +``` + +另外,你也可以使用 `test_torchserver.py` 来比较 TorchServe 和 PyTorch 的结果,并进行可视化。 + +```shell +python tools/deployment/test_torchserver.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} +[--inference-addr ${INFERENCE_ADDR}] [--device ${DEVICE}] +``` + +示例: + +```shell +python tools/deployment/test_torchserver.py \ + demo/demo.JPEG \ + configs/resnet/resnet18_8xb32_in1k.py \ + checkpoints/resnet18_8xb32_in1k_20210831-fbbb1da6.pth \ + resnet18_in1k +``` diff --git a/docs/zh_CN/tools/onnx2tensorrt.md b/docs/zh_CN/tools/onnx2tensorrt.md new file mode 100644 index 0000000..f6a25fa --- /dev/null +++ b/docs/zh_CN/tools/onnx2tensorrt.md @@ -0,0 +1,75 @@ +# ONNX 转 TensorRT(试验性的) + + + +- [如何将模型从 ONNX 转换到 TensorRT](#如何将模型从-onnx-转换到-tensorrt) + - [准备工作](#准备工作) + - [使用方法](#使用方法) +- [支持转换至 TensorRT 的模型列表](#支持转换至-tensorrt-的模型列表) +- [提示](#提示) +- [常见问题](#常见问题) + + + +## 如何将模型从 ONNX 转换到 TensorRT + +### 准备工作 + +1. 请参照 [安装指南](https://mmclassification.readthedocs.io/zh_CN/latest/install.html#mmclassification) 从源码安装 MMClassification。 +2. 使用我们的工具 [pytorch2onnx.md](./pytorch2onnx.md) 将 PyTorch 模型转换至 ONNX。 + +### 使用方法 + +```bash +python tools/deployment/onnx2tensorrt.py \ + ${MODEL} \ + --trt-file ${TRT_FILE} \ + --shape ${IMAGE_SHAPE} \ + --workspace-size {WORKSPACE_SIZE} \ + --show \ + --verify \ +``` + +所有参数的说明: + +- `model` : ONNX 模型的路径。 +- `--trt-file`: TensorRT 引擎文件的输出路径。如果没有指定,默认为当前脚本执行路径下的 `tmp.trt`。 +- `--shape`: 模型输入的高度和宽度。如果没有指定,默认为 `224 224`。 +- `--workspace-size` : 构建 TensorRT 引擎所需要的 GPU 空间大小,单位为 GiB。如果没有指定,默认为 `1` GiB。 +- `--show`: 是否展示模型的输出。如果没有指定,默认为 `False`。 +- `--verify`: 是否使用 ONNXRuntime 和 TensorRT 验证模型转换的正确性。如果没有指定,默认为`False`。 + +示例: + +```bash +python tools/deployment/onnx2tensorrt.py \ + checkpoints/resnet/resnet18_b16x8_cifar10.onnx \ + --trt-file checkpoints/resnet/resnet18_b16x8_cifar10.trt \ + --shape 224 224 \ + --show \ + --verify \ +``` + +## 支持转换至 TensorRT 的模型列表 + +下表列出了保证可转换为 TensorRT 的模型。 + +| 模型 | 配置文件 | 状态 | +| :----------: | :-----------------------------------------------------: | :--: | +| MobileNetV2 | `configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py` | Y | +| ResNet | `configs/resnet/resnet18_8xb16_cifar10.py` | Y | +| ResNeXt | `configs/resnext/resnext50-32x4d_8xb32_in1k.py` | Y | +| ShuffleNetV1 | `configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py` | Y | +| ShuffleNetV2 | `configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py` | Y | + +注: + +- *以上所有模型转换测试基于 Pytorch==1.6.0 和 TensorRT-7.2.1.6.Ubuntu-16.04.x86_64-gnu.cuda-10.2.cudnn8.0 进行* + +## 提示 + +- 如果你在上述模型的转换中遇到问题,请在 GitHub 中创建一个 issue,我们会尽快处理。未在上表中列出的模型,由于资源限制,我们可能无法提供很多帮助,如果遇到问题,请尝试自行解决。 + +## 常见问题 + +- 无 diff --git a/docs/zh_CN/tools/pytorch2onnx.md b/docs/zh_CN/tools/pytorch2onnx.md new file mode 100644 index 0000000..c66991a --- /dev/null +++ b/docs/zh_CN/tools/pytorch2onnx.md @@ -0,0 +1,88 @@ +# Pytorch 转 ONNX (试验性的) + + + +- [如何将模型从 PyTorch 转换到 ONNX](#如何将模型从-pytorch-转换到-onnx) + - [准备工作](#准备工作) + - [使用方法](#使用方法) +- [支持导出至 ONNX 的模型列表](#支持导出至-onnx-的模型列表) +- [提示](#提示) +- [常见问题](#常见问题) + + + +## 如何将模型从 PyTorch 转换到 ONNX + +### 准备工作 + +1. 请参照 [安装指南](https://mmclassification.readthedocs.io/zh_CN/latest/install.html#mmclassification) 从源码安装 MMClassification。 +2. 安装 onnx 和 onnxruntime。 + +```shell +pip install onnx onnxruntime==1.5.1 +``` + +### 使用方法 + +```bash +python tools/deployment/pytorch2onnx.py \ + ${CONFIG_FILE} \ + --checkpoint ${CHECKPOINT_FILE} \ + --output-file ${OUTPUT_FILE} \ + --shape ${IMAGE_SHAPE} \ + --opset-version ${OPSET_VERSION} \ + --dynamic-shape \ + --show \ + --simplify \ + --verify \ +``` + +所有参数的说明: + +- `config` : 模型配置文件的路径。 +- `--checkpoint` : 模型权重文件的路径。 +- `--output-file`: ONNX 模型的输出路径。如果没有指定,默认为当前脚本执行路径下的 `tmp.onnx`。 +- `--shape`: 模型输入的高度和宽度。如果没有指定,默认为 `224 224`。 +- `--opset-version` : ONNX 的 opset 版本。如果没有指定,默认为 `11`。 +- `--dynamic-shape` : 是否以动态输入尺寸导出 ONNX。 如果没有指定,默认为 `False`。 +- `--show`: 是否打印导出模型的架构。如果没有指定,默认为 `False`。 +- `--simplify`: 是否精简导出的 ONNX 模型。如果没有指定,默认为 `False`。 +- `--verify`: 是否验证导出模型的正确性。如果没有指定,默认为`False`。 + +示例: + +```bash +python tools/deployment/pytorch2onnx.py \ + configs/resnet/resnet18_8xb16_cifar10.py \ + --checkpoint checkpoints/resnet/resnet18_b16x8_cifar10.pth \ + --output-file checkpoints/resnet/resnet18_b16x8_cifar10.onnx \ + --dynamic-shape \ + --show \ + --simplify \ + --verify \ +``` + +## 支持导出至 ONNX 的模型列表 + +下表列出了保证可导出至 ONNX,并在 ONNX Runtime 中运行的模型。 + +| 模型 | 配置文件 | 批推理 | 动态输入尺寸 | 备注 | +| :----------: | :-----------------------------------------------------: | :----: | :----------: | ---- | +| MobileNetV2 | `configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py` | Y | Y | | +| ResNet | `configs/resnet/resnet18_8xb16_cifar10.py` | Y | Y | | +| ResNeXt | `configs/resnext/resnext50-32x4d_8xb32_in1k.py` | Y | Y | | +| SE-ResNet | `configs/seresnet/seresnet50_8xb32_in1k.py` | Y | Y | | +| ShuffleNetV1 | `configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py` | Y | Y | | +| ShuffleNetV2 | `configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py` | Y | Y | | + +注: + +- *以上所有模型转换测试基于 Pytorch==1.6.0 进行* + +## 提示 + +- 如果你在上述模型的转换中遇到问题,请在 GitHub 中创建一个 issue,我们会尽快处理。未在上表中列出的模型,由于资源限制,我们可能无法提供很多帮助,如果遇到问题,请尝试自行解决。 + +## 常见问题 + +- 无 diff --git a/docs/zh_CN/tools/pytorch2torchscript.md b/docs/zh_CN/tools/pytorch2torchscript.md new file mode 100644 index 0000000..9ef846f --- /dev/null +++ b/docs/zh_CN/tools/pytorch2torchscript.md @@ -0,0 +1,54 @@ +# Pytorch 转 TorchScript (试验性的) + + + +- [如何将 PyTorch 模型转换至 TorchScript](#如何将-pytorch-模型转换至-torchscript) + - [使用方法](#使用方法) +- [提示](#提示) +- [常见问题](#常见问题) + + + +## 如何将 PyTorch 模型转换至 TorchScript + +### 使用方法 + +```bash +python tools/deployment/pytorch2torchscript.py \ + ${CONFIG_FILE} \ + --checkpoint ${CHECKPOINT_FILE} \ + --output-file ${OUTPUT_FILE} \ + --shape ${IMAGE_SHAPE} \ + --verify \ +``` + +所有参数的说明: + +- `config` : 模型配置文件的路径。 +- `--checkpoint` : 模型权重文件的路径。 +- `--output-file`: TorchScript 模型的输出路径。如果没有指定,默认为当前脚本执行路径下的 `tmp.pt`。 +- `--shape`: 模型输入的高度和宽度。如果没有指定,默认为 `224 224`。 +- `--verify`: 是否验证导出模型的正确性。如果没有指定,默认为`False`。 + +示例: + +```bash +python tools/deployment/pytorch2torchscript.py \ + configs/resnet/resnet18_8xb16_cifar10.py \ + --checkpoint checkpoints/resnet/resnet18_b16x8_cifar10.pth \ + --output-file checkpoints/resnet/resnet18_b16x8_cifar10.pt \ + --verify \ +``` + +注: + +- *所有模型基于 Pytorch==1.8.1 通过了转换测试* + +## 提示 + +- 由于 `torch.jit.is_tracing()` 只在 PyTorch 1.6 之后的版本中得到支持,对于 PyTorch 1.3-1.5 的用户,我们建议手动提前返回结果。 +- 如果你在本仓库的模型转换中遇到问题,请在 GitHub 中创建一个 issue,我们会尽快处理。 + +## 常见问题 + +- 无 diff --git a/docs/zh_CN/tools/visualization.md b/docs/zh_CN/tools/visualization.md new file mode 100644 index 0000000..75d8142 --- /dev/null +++ b/docs/zh_CN/tools/visualization.md @@ -0,0 +1,302 @@ +# 可视化 + + + +- [数据流水线可视化](#数据流水线可视化) +- [学习率策略可视化](#学习率策略可视化) +- [类别激活图可视化](#类别激活图可视化) +- [常见问题](#常见问题) + + + +## 数据流水线可视化 + +```bash +python tools/visualizations/vis_pipeline.py \ + ${CONFIG_FILE} \ + [--output-dir ${OUTPUT_DIR}] \ + [--phase ${DATASET_PHASE}] \ + [--number ${BUNBER_IMAGES_DISPLAY}] \ + [--skip-type ${SKIP_TRANSFORM_TYPE}] \ + [--mode ${DISPLAY_MODE}] \ + [--show] \ + [--adaptive] \ + [--min-edge-length ${MIN_EDGE_LENGTH}] \ + [--max-edge-length ${MAX_EDGE_LENGTH}] \ + [--bgr2rgb] \ + [--window-size ${WINDOW_SIZE}] \ + [--cfg-options ${CFG_OPTIONS}] +``` + +**所有参数的说明**: + +- `config` : 模型配置文件的路径。 +- `--output-dir`: 保存图片文件夹,如果没有指定,默认为 `''`,表示不保存。 +- `--phase`: 可视化数据集的阶段,只能为 `[train, val, test]` 之一,默认为 `train`。 +- `--number`: 可视化样本数量。如果没有指定,默认展示数据集的所有图片。 +- `--skip-type`: 预设跳过的数据流水线过程。如果没有指定,默认为 `['ToTensor', 'Normalize', 'ImageToTensor', 'Collect']`。 +- `--mode`: 可视化的模式,只能为 `[original, transformed, concat, pipeline]` 之一,如果没有指定,默认为 `concat`。 +- `--show`: 将可视化图片以弹窗形式展示。 +- `--adaptive`: 自动调节可视化图片的大小。 +- `--min-edge-length`: 最短边长度,当使用了 `--adaptive` 时有效。 当图片任意边小于 `${MIN_EDGE_LENGTH}` 时,会保持长宽比不变放大图片,短边对齐至 `${MIN_EDGE_LENGTH}`,默认为200。 +- `--max-edge-length`: 最长边长度,当使用了 `--adaptive` 时有效。 当图片任意边大于 `${MAX_EDGE_LENGTH}` 时,会保持长宽比不变缩小图片,短边对齐至 `${MAX_EDGE_LENGTH}`,默认为1000。 +- `--bgr2rgb`: 将图片的颜色通道翻转。 +- `--window-size`: 可视化窗口大小,如果没有指定,默认为 `12*7`。如果需要指定,按照格式 `'W*H'`。 +- `--cfg-options` : 对配置文件的修改,参考[教程 1:如何编写配置文件](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/config.html)。 + +```{note} + +1. 如果不指定 `--mode`,默认设置为 `concat`,获取原始图片和预处理后图片拼接的图片;如果 `--mode` 设置为 `original`,则获取原始图片;如果 `--mode` 设置为 `transformed`,则获取预处理后的图片;如果 `--mode` 设置为 `pipeline`,则获得数据流水线所有中间过程图片。 + +2. 当指定了 `--adaptive` 选项时,会自动的调整尺寸过大和过小的图片,你可以通过设定 `--min-edge-length` 与 `--max-edge-length` 来指定自动调整的图片尺寸。 +``` + +**示例**: + +1. **'original'** 模式,可视化 `CIFAR100` 验证集中的100张原始图片,显示并保存在 `./tmp` 文件夹下: + +```shell +python ./tools/visualizations/vis_pipeline.py configs/resnet/resnet50_8xb16_cifar100.py --phase val --output-dir tmp --mode original --number 100 --show --adaptive --bgr2rgb +``` + +
+ +2. **'transformed'** 模式,可视化 `ImageNet` 训练集的所有经过预处理的图片,并以弹窗形式显示: + +```shell +python ./tools/visualizations/vis_pipeline.py ./configs/resnet/resnet50_8xb32_in1k.py --show --mode transformed +``` + +
+ +3. **'concat'** 模式,可视化 `ImageNet` 训练集的10张原始图片与预处理后图片对比图,保存在 `./tmp` 文件夹下: + +```shell +python ./tools/visualizations/vis_pipeline.py configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py --phase train --output-dir tmp --number 10 --adaptive +``` + +
+ +4. **'pipeline'** 模式,可视化 `ImageNet` 训练集经过数据流水线的过程图像: + +```shell +python ./tools/visualizations/vis_pipeline.py configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py --phase train --adaptive --mode pipeline --show +``` + +
+ +## 学习率策略可视化 + +```bash +python tools/visualizations/vis_lr.py \ + ${CONFIG_FILE} \ + [--dataset-size ${Dataset_Size}] \ + [--ngpus ${NUM_GPUs}] \ + [--save-path ${SAVE_PATH}] \ + [--title ${TITLE}] \ + [--style ${STYLE}] \ + [--window-size ${WINDOW_SIZE}] \ + [--cfg-options ${CFG_OPTIONS}] \ +``` + +**所有参数的说明**: + +- `config` : 模型配置文件的路径。 +- `--dataset-size` : 数据集的大小。如果指定,`build_dataset` 将被跳过并使用这个大小作为数据集大小,默认使用 `build_dataset` 所得数据集的大小。 +- `--ngpus` : 使用 GPU 的数量。 +- `--save-path` : 保存的可视化图片的路径,默认不保存。 +- `--title` : 可视化图片的标题,默认为配置文件名。 +- `--style` : 可视化图片的风格,默认为 `whitegrid`。 +- `--window-size`: 可视化窗口大小,如果没有指定,默认为 `12*7`。如果需要指定,按照格式 `'W*H'`。 +- `--cfg-options` : 对配置文件的修改,参考[教程 1:如何编写配置文件](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/config.html)。 + +```{note} + +部分数据集在解析标注阶段比较耗时,可直接将 `dataset-size` 指定数据集的大小,以节约时间。 + +``` + +**示例**: + +```bash +python tools/visualizations/vis_lr.py configs/resnet/resnet50_b16x8_cifar100.py +``` + +
+ +当数据集为 ImageNet 时,通过直接指定数据集大小来节约时间,并保存图片: + +```bash +python tools/visualizations/vis_lr.py configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py --dataset-size 1281167 --ngpus 4 --save-path ./repvgg-B3g4_4xb64-lr.jpg +``` + +
+ +## 类别激活图可视化 + +MMClassification 提供 `tools\visualizations\vis_cam.py` 工具来可视化类别激活图。请使用 `pip install "grad-cam>=1.3.6"` 安装依赖的 [pytorch-grad-cam](https://github.com/jacobgil/pytorch-grad-cam)。 + +目前支持的方法有: + +| Method | What it does | +| :----------: | :-----------------------------------------------------------------------------------------------: | +| GradCAM | 使用平均梯度对 2D 激活进行加权 | +| GradCAM++ | 类似 GradCAM,但使用了二阶梯度 | +| XGradCAM | 类似 GradCAM,但通过归一化的激活对梯度进行了加权 | +| EigenCAM | 使用 2D 激活的第一主成分(无法区分类别,但效果似乎不错) | +| EigenGradCAM | 类似 EigenCAM,但支持类别区分,使用了激活 * 梯度的第一主成分,看起来和 GradCAM 差不多,但是更干净 | +| LayerCAM | 使用正梯度对激活进行空间加权,对于浅层有更好的效果 | + +**命令行**: + +```bash +python tools/visualizations/vis_cam.py \ + ${IMG} \ + ${CONFIG_FILE} \ + ${CHECKPOINT} \ + [--target-layers ${TARGET-LAYERS}] \ + [--preview-model] \ + [--method ${METHOD}] \ + [--target-category ${TARGET-CATEGORY}] \ + [--save-path ${SAVE_PATH}] \ + [--vit-like] \ + [--num-extra-tokens ${NUM-EXTRA-TOKENS}] + [--aug_smooth] \ + [--eigen_smooth] \ + [--device ${DEVICE}] \ + [--cfg-options ${CFG-OPTIONS}] +``` + +**所有参数的说明**: + +- `img`:目标图片路径。 +- `config`:模型配置文件的路径。 +- `checkpoint`:权重路径。 +- `--target-layers`:所查看的网络层名称,可输入一个或者多个网络层, 如果不设置,将使用最后一个`block`中的`norm`层。 +- `--preview-model`:是否查看模型所有网络层。 +- `--method`:类别激活图图可视化的方法,目前支持 `GradCAM`, `GradCAM++`, `XGradCAM`, `EigenCAM`, `EigenGradCAM`, `LayerCAM`,不区分大小写。如果不设置,默认为 `GradCAM`。 +- `--target-category`:查看的目标类别,如果不设置,使用模型检测出来的类别做为目标类别。 +- `--save-path`:保存的可视化图片的路径,默认不保存。 +- `--eigen-smooth`:是否使用主成分降低噪音,默认不开启。 +- `--vit-like`: 是否为 `ViT` 类似的 Transformer-based 网络 +- `--num-extra-tokens`: `ViT` 类网络的额外的 tokens 通道数,默认使用主干网络的 `num_extra_tokens`。 +- `--aug-smooth`:是否使用测试时增强 +- `--device`:使用的计算设备,如果不设置,默认为'cpu'。 +- `--cfg-options`:对配置文件的修改,参考[教程 1:如何编写配置文件](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/config.html)。 + +```{note} +在指定 `--target-layers` 时,如果不知道模型有哪些网络层,可使用命令行添加 `--preview-model` 查看所有网络层名称; +``` + +**示例(CNN)**: + +`--target-layers` 在 `Resnet-50` 中的一些示例如下: + +- `'backbone.layer4'`,表示第四个 `ResLayer` 层的输出。 +- `'backbone.layer4.2'` 表示第四个 `ResLayer` 层中第三个 `BottleNeck` 块的输出。 +- `'backbone.layer4.2.conv1'` 表示上述 `BottleNeck` 块中 `conv1` 层的输出。 + +```{note} +对于 `ModuleList` 或者 `Sequential` 类型的网络层,可以直接使用索引的方式指定子模块。比如 `backbone.layer4[-1]` 和 `backbone.layer4.2` 是相同的,因为 `layer4` 是一个拥有三个子模块的 `Sequential`。 +``` + +1. 使用不同方法可视化 `ResNet50`,默认 `target-category` 为模型检测的结果,使用默认推导的 `target-layers`。 + + ```shell + python tools/visualizations/vis_cam.py \ + demo/bird.JPEG \ + configs/resnet/resnet50_8xb32_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth \ + --method GradCAM + # GradCAM++, XGradCAM, EigenCAM, EigenGradCAM, LayerCAM + ``` + + | Image | GradCAM | GradCAM++ | EigenGradCAM | LayerCAM | + | ------------------------------------ | --------------------------------------- | ----------------------------------------- | -------------------------------------------- | ---------------------------------------- | + |
|
|
|
|
| + +2. 同一张图不同类别的激活图效果图,在 `ImageNet` 数据集中,类别238为 'Greater Swiss Mountain dog',类别281为 'tabby, tabby cat'。 + + ```shell + python tools/visualizations/vis_cam.py \ + demo/cat-dog.png configs/resnet/resnet50_8xb32_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth \ + --target-layers 'backbone.layer4.2' \ + --method GradCAM \ + --target-category 238 + # --target-category 281 + ``` + + | Category | Image | GradCAM | XGradCAM | LayerCAM | + | -------- | ---------------------------------------------- | ------------------------------------------------ | ------------------------------------------------- | ------------------------------------------------- | + | Dog |
|
|
|
| + | Cat |
|
|
|
| + +3. 使用 `--eigen-smooth` 以及 `--aug-smooth` 获取更好的可视化效果。 + + ```shell + python tools/visualizations/vis_cam.py \ + demo/dog.jpg \ + configs/mobilenet_v3/mobilenet-v3-large_8xb32_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth \ + --target-layers 'backbone.layer16' \ + --method LayerCAM \ + --eigen-smooth --aug-smooth + ``` + + | Image | LayerCAM | eigen-smooth | aug-smooth | eigen&aug | + | ------------------------------------ | --------------------------------------- | ------------------------------------------- | ----------------------------------------- | ----------------------------------------- | + |
|
|
|
|
| + +**示例(Transformer)**: + +`--target-layers` 在 Transformer-based 网络中的一些示例如下: + +- Swin-Transformer 中:`'backbone.norm3'` +- ViT 中:`'backbone.layers[-1].ln1'` + +对于 Transformer-based 的网络,比如 ViT、T2T-ViT 和 Swin-Transformer,特征是被展平的。为了绘制 CAM 图,我们需要指定 `--vit-like` 选项,从而让被展平的特征恢复方形的特征图。 + +除了特征被展平之外,一些类 ViT 的网络还会添加额外的 tokens。比如 ViT 和 T2T-ViT 中添加了分类 token,DeiT 中还添加了蒸馏 token。在这些网络中,分类计算在最后一个注意力模块之后就已经完成了,分类得分也只和这些额外的 tokens 有关,与特征图无关,也就是说,分类得分对这些特征图的导数为 0。因此,我们不能使用最后一个注意力模块的输出作为 CAM 绘制的目标层。 + +另外,为了去除这些额外的 toekns 以获得特征图,我们需要知道这些额外 tokens 的数量。MMClassification 中几乎所有 Transformer-based 的网络都拥有 `num_extra_tokens` 属性。而如果你希望将此工具应用于新的,或者第三方的网络,而且该网络没有指定 `num_extra_tokens` 属性,那么可以使用 `--num-extra-tokens` 参数手动指定其数量。 + +1. 对 `Swin Transformer` 使用默认 `target-layers` 进行 CAM 可视化: + + ```shell + python tools/visualizations/vis_cam.py \ + demo/bird.JPEG \ + configs/swin_transformer/swin-tiny_16xb64_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth \ + --vit-like + ``` + +2. 对 `Vision Transformer(ViT)` 进行 CAM 可视化: + + ```shell + python tools/visualizations/vis_cam.py \ + demo/bird.JPEG \ + configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py \ + https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth \ + --vit-like \ + --target-layers 'backbone.layers[-1].ln1' + ``` + +3. 对 `T2T-ViT` 进行 CAM 可视化: + + ```shell + python tools/visualizations/vis_cam.py \ + demo/bird.JPEG \ + configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_3rdparty_8xb64_in1k_20210928-b7c09b62.pth \ + --vit-like \ + --target-layers 'backbone.encoder[-1].ln1' + ``` + +| Image | ResNet50 | ViT | Swin | T2T-ViT | +| --------------------------------------- | ------------------------------------------ | -------------------------------------- | --------------------------------------- | ------------------------------------------ | +|
|
|
|
|
| + +## 常见问题 + +- 无 diff --git a/docs/zh_CN/tutorials/MMClassification_python_cn.ipynb b/docs/zh_CN/tutorials/MMClassification_python_cn.ipynb new file mode 100755 index 0000000..b81bf87 --- /dev/null +++ b/docs/zh_CN/tutorials/MMClassification_python_cn.ipynb @@ -0,0 +1,2041 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "XjQxmm04iTx4" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UdMfIsMpiODD" + }, + "source": [ + "# MMClassification Python API 教程\n", + "\n", + "在本教程中会介绍如下内容:\n", + "\n", + "* 如何安装 MMClassification\n", + "* 使用 Python API 进行模型推理\n", + "* 使用 Python API 进行模型微调" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iOl0X9UEiRvE" + }, + "source": [ + "## 安装 MMClassification\n", + "\n", + "在使用 MMClassification 之前,我们需要配置环境,步骤如下:\n", + "\n", + "- 安装 Python, CUDA, C/C++ compiler 和 git\n", + "- 安装 PyTorch (CUDA 版)\n", + "- 安装 mmcv\n", + "- 克隆 mmcls github 代码库然后安装\n", + "\n", + "因为我们在 Google Colab 进行实验,Colab 已经帮我们完成了基本的配置,我们可以直接跳过前面两个步骤 。" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_i7cjqS_LtoP" + }, + "source": [ + "### 检查环境" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "c6MbAw10iUJI", + "outputId": "dd37cdf5-7bcf-4a03-f5b5-4b17c3ca16de" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/content\n" + ] + } + ], + "source": [ + "%cd /content" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "4IyFL3MaiYRu", + "outputId": "5008efdf-0356-4d93-ba9d-e51787036213" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/content\n" + ] + } + ], + "source": [ + "!pwd" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "DMw7QwvpiiUO", + "outputId": "33fa5eb8-d083-4a1f-d094-ab0f59e2818e" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "nvcc: NVIDIA (R) Cuda compiler driver\n", + "Copyright (c) 2005-2020 NVIDIA Corporation\n", + "Built on Mon_Oct_12_20:09:46_PDT_2020\n", + "Cuda compilation tools, release 11.1, V11.1.105\n", + "Build cuda_11.1.TC455_06.29190527_0\n" + ] + } + ], + "source": [ + "# 检查 nvcc 版本\n", + "!nvcc -V" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "4VIBU7Fain4D", + "outputId": "ec20652d-ca24-4b82-b407-e90354d728f8" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0\n", + "Copyright (C) 2017 Free Software Foundation, Inc.\n", + "This is free software; see the source for copying conditions. There is NO\n", + "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n", + "\n" + ] + } + ], + "source": [ + "# 检查 GCC 版本\n", + "!gcc --version" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "24lDLCqFisZ9", + "outputId": "30ec9a1c-cdb3-436c-cdc8-f2a22afe254f" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1.9.0+cu111\n", + "True\n" + ] + } + ], + "source": [ + "# 检查 PyTorch 的安装情况\n", + "import torch, torchvision\n", + "print(torch.__version__)\n", + "print(torch.cuda.is_available())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R2aZNLUwizBs" + }, + "source": [ + "### 安装 MMCV\n", + "\n", + "MMCV 是 OpenMMLab 代码库的基础库。Linux 环境的安装 whl 包已经提前打包好,大家可以直接下载安装。\n", + "\n", + "需要注意 PyTorch 和 CUDA 版本,确保能够正常安装。\n", + "\n", + "在前面的步骤中,我们输出了环境中 CUDA 和 PyTorch 的版本,分别是 11.1 和 1.9.0,我们需要选择相应的 MMCV 版本。\n", + "\n", + "另外,也可以安装完整版的 MMCV-full,它包含所有的特性以及丰富的开箱即用的 CUDA 算子。需要注意的是完整版本可能需要更长时间来编译。" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "nla40LrLi7oo", + "outputId": "162bf14d-0d3e-4540-e85e-a46084a786b1" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in links: https://download.openmmlab.com/mmcv/dist/cu111/torch1.9.0/index.html\n", + "Collecting mmcv\n", + " Downloading mmcv-1.3.15.tar.gz (352 kB)\n", + "\u001b[K |████████████████████████████████| 352 kB 5.2 MB/s \n", + "\u001b[?25hCollecting addict\n", + " Downloading addict-2.4.0-py3-none-any.whl (3.8 kB)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from mmcv) (1.19.5)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from mmcv) (21.0)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.7/dist-packages (from mmcv) (7.1.2)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from mmcv) (3.13)\n", + "Collecting yapf\n", + " Downloading yapf-0.31.0-py2.py3-none-any.whl (185 kB)\n", + "\u001b[K |████████████████████████████████| 185 kB 49.9 MB/s \n", + "\u001b[?25hRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->mmcv) (2.4.7)\n", + "Building wheels for collected packages: mmcv\n", + " Building wheel for mmcv (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for mmcv: filename=mmcv-1.3.15-py2.py3-none-any.whl size=509835 sha256=793fe3796421336ca7a7740a1397a54016ba71ce95fd80cb80a116644adb4070\n", + " Stored in directory: /root/.cache/pip/wheels/b2/f4/4e/8f6d2dd2bef6b7eb8c89aa0e5d61acd7bff60aaf3d4d4b29b0\n", + "Successfully built mmcv\n", + "Installing collected packages: yapf, addict, mmcv\n", + "Successfully installed addict-2.4.0 mmcv-1.3.15 yapf-0.31.0\n" + ] + } + ], + "source": [ + "# 安装 mmcv\n", + "!pip install mmcv -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.9.0/index.html\n", + "# !pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu110/torch1.9.0/index.html" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GDTUrYvXjlRb" + }, + "source": [ + "### 克隆并安装 MMClassification\n", + "\n", + "接着,我们从 github 上克隆下 mmcls 最新代码库并进行安装。" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Bwme6tWHjl5s", + "outputId": "eae20624-4695-4cd9-c3e5-9c59596d150a" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Cloning into 'mmclassification'...\n", + "remote: Enumerating objects: 4152, done.\u001b[K\n", + "remote: Counting objects: 100% (994/994), done.\u001b[K\n", + "remote: Compressing objects: 100% (576/576), done.\u001b[K\n", + "remote: Total 4152 (delta 476), reused 765 (delta 401), pack-reused 3158\u001b[K\n", + "Receiving objects: 100% (4152/4152), 8.20 MiB | 21.00 MiB/s, done.\n", + "Resolving deltas: 100% (2524/2524), done.\n" + ] + } + ], + "source": [ + "# 下载 mmcls 代码库\n", + "!git clone https://github.com/open-mmlab/mmclassification.git\n", + "%cd mmclassification/\n", + "\n", + "# 从源码安装 MMClassification\n", + "!pip install -e . " + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "hFg_oSG4j3zB", + "outputId": "05a91f9b-d41c-4ae7-d4fe-c30a30d3f639" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.16.0\n" + ] + } + ], + "source": [ + "# 检查 MMClassification 的安装情况\n", + "import mmcls\n", + "print(mmcls.__version__)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4Mi3g6yzj96L" + }, + "source": [ + "## 使用 Python API 进行模型推理\n", + "\n", + "MMClassification 提供很多预训练好的模型,可以访问链接查看[模型库](https://mmclassification.readthedocs.io/zh_CN/latest/model_zoo.html)。\n", + "绝大部分模型都能够复现原始论文的精度,或者达到更高的精度。\n", + "我们能够直接使用这些模型进行推理计算。\n", + "\n", + "在使用预训练模型之前,我们需要进行如下操作:\n", + "\n", + "- 准备模型\n", + " - 准备 config 配置文件 \n", + " - 准备模型权重参数文件\n", + "- 构建模型\n", + "- 进行推理计算" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "nDQchz8CkJaT", + "outputId": "9805bd7d-cc2a-4269-b43d-257412f1df93" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--2021-10-21 03:52:36-- https://www.dropbox.com/s/k5fsqi6qha09l1v/banana.png?dl=0\n", + "Resolving www.dropbox.com (www.dropbox.com)... 162.125.3.18, 2620:100:601b:18::a27d:812\n", + "Connecting to www.dropbox.com (www.dropbox.com)|162.125.3.18|:443... connected.\n", + "HTTP request sent, awaiting response... 301 Moved Permanently\n", + "Location: /s/raw/k5fsqi6qha09l1v/banana.png [following]\n", + "--2021-10-21 03:52:36-- https://www.dropbox.com/s/raw/k5fsqi6qha09l1v/banana.png\n", + "Reusing existing connection to www.dropbox.com:443.\n", + "HTTP request sent, awaiting response... 302 Found\n", + "Location: https://uc10f85c3c33c4b5233bac4d074e.dl.dropboxusercontent.com/cd/0/inline/BYYklQk6LNPXNm7o5xE_fxE2GA9reePyNajQgoe9roPlSrtsJd4WN6RVww7zrtNZWFq8iZv349MNQJlm7vVaqRBxTcd0ufxkqbcJYJvOrORpxOPV7mHmhMjKYUncez8YNqELGwDd-aeZqLGKBC8spSnx/file# [following]\n", + "--2021-10-21 03:52:36-- https://uc10f85c3c33c4b5233bac4d074e.dl.dropboxusercontent.com/cd/0/inline/BYYklQk6LNPXNm7o5xE_fxE2GA9reePyNajQgoe9roPlSrtsJd4WN6RVww7zrtNZWFq8iZv349MNQJlm7vVaqRBxTcd0ufxkqbcJYJvOrORpxOPV7mHmhMjKYUncez8YNqELGwDd-aeZqLGKBC8spSnx/file\n", + "Resolving uc10f85c3c33c4b5233bac4d074e.dl.dropboxusercontent.com (uc10f85c3c33c4b5233bac4d074e.dl.dropboxusercontent.com)... 162.125.3.15, 2620:100:601b:15::a27d:80f\n", + "Connecting to uc10f85c3c33c4b5233bac4d074e.dl.dropboxusercontent.com (uc10f85c3c33c4b5233bac4d074e.dl.dropboxusercontent.com)|162.125.3.15|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 297299 (290K) [image/png]\n", + "Saving to: ‘demo/banana.png’\n", + "\n", + "demo/banana.png 100%[===================>] 290.33K --.-KB/s in 0.08s \n", + "\n", + "2021-10-21 03:52:36 (3.47 MB/s) - ‘demo/banana.png’ saved [297299/297299]\n", + "\n" + ] + } + ], + "source": [ + "# 获取示例图片\n", + "!wget https://www.dropbox.com/s/k5fsqi6qha09l1v/banana.png?dl=0 -O demo/banana.png" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 420 + }, + "id": "o2eiitWnkQq_", + "outputId": "192b3ebb-202b-4d6e-e178-561223024318" + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYkAAAGTCAYAAADdkO5AAAABd2lDQ1BJQ0MgUHJvZmlsZQAAeJx1kc0rRFEYxn8zaDA0CxaTqLsYsqAmSpaMhc0kDcpgM3PdmVF3Zm733kmTrbKxUBZi42vhP2CrbCmlSEkW/gJfG+l6j6tG4tzOfX895zxv5zwHgklTLzr1cSiWXDs1ntBm03Na6JEwUZroojmjO9bo5GSSf8fbNQFVr/pVr//3/TnCi4ajQ6BReEi3bFd4RDi57FqK14Xb9UJmUXhPuM+WAwqfKz3r84PivM8viu3p1BgEVU8t/4OzP1gv2EXhXuFY0azo3+dRN2kxSjNTUjtkduKQYpwEGlkqLGHi0i+1JJn97Yt/+SYoi0eXv0UVWxx5CuLtE7UiXQ2pOdEN+UyqKvffeTq5wQG/e0sCGu4977kbQpvwseF57/ue93EAdXdwWqr5y5LT8KvoGzUttguRVTg+q2nZLThZg+itlbEzX1KdzGAuB09H0JqGtktonvez+l7n8AamV+SJLmB7B3pkf2ThE7z6Z+tvc+SlAAEAAElEQVR4nLz9S6ws25aeh31jPiIiM9dae+/zuHWqWHy4YViCDD1sqCfZXUvuGLDbBqG2AUN9wbYoWDbctiULBGSxRcA2BJMQIIsPyZRokiWbpkULIGmKZJGsB+s+zjl7r7UyI2I+hhtjzMi1q27de9lxAvfuc85eKzMyYs4xx/jH//9D/jf/xr+hqkpKiQCoKACqQMf+WUDU/mOIgS7Cy7YSEHJKQEPVfi+lhKrSWiOmBEDvnVYrMQZEAhICqoqqMsWJLopqp/d+/PecM/teuF2fmeYTKUYQEASAnDO9d2rvpBgRkeMaemt24SLUUvFvgYggQYgx0fZKRzlfztRaj2vf953q12r3QUHv/zzPma56/HsIgVorH58/Mk0zT49P3K43WqucTjMxZlQ727bbJSHEmBEBbY2YJ0IM7PuKANu2Mc9nUk60WgkxECSw7zv7euPp/XtEOkqA1thrI4To96OiQK2V1hopJfseChICQkCCECTY8xUopYAqIoHeO3my+xpCIIRIErvq0hoShPM8232UYD8TE7V3tHVOy4k4TYjA+Xzi6emJx8cHPnz5FX/gV/8AMUZ+kdfLyws//OEP+fjtt3z/7Sdu2yu9d24vK6VvtNYQVVTEn2xn3wt9b4QYEHv09CC0pkgMBIRt3UDU/q53YhRyTrTW6N3eqew7L88vnE4nnh4utNrRCEFhb50kgZgSVZSgQkqzr/8GCAElxsS+7dz2wnle2PaNT8/P/PKv/AohCrVX1utKq0rKiXmaeb4+Q+vMy4k8Z6RDKZWQo312Kcwpk1Km9krviiSh10qtnXma7P6KEHx9AlRRtm1HBKaY6QLSlZQzYl8aiYkQ7vtnmiZCsDXfWiP6/sohoCH4v0NtHREhpYiqEkNkmjJVO1o7RHvPJJE8TYgIKASUbz9+ZF1XHh8fOJ8eUNrxmfu2IyIsp4uvNaH3SmudEKB3ECBPEzEkgtj6jjEiQWi+n0O0ddpRtFZASFMmIKgIOWdCgADUrrTWOU0TIWcC2PX4GhP/c7yCx7DeGhJsL4x7FWOk+71trR2/O2KUqvp36mhrNBS6stdCKYWy7fz4xz+m0ZnzBAp9L7R+fxbX2xVBOJ1OlFKP+LauKwpHTOwiKIB2QIgx0Hujq1LKTlCIOUFXeu+klECh9QaqpJhI+HpSVZq/mQQBxA4HEQsi/iWbKoIwe5DOObGu5biZtd0ftmCbcUSk1pQYleSBtTelR+zvVQkiEGyT11oRgdP5kRiE1hVRqN0OgJSz/emHx7j5IxD11izYRVs8tTZUO9SOhk6jMeXpeHghhCM4igRyTpTS6K2Rc0aCHThiEcYCRa120E0TX3/1tR8oSi0bwb9vSoLfPcS/ateGNqXWjTkKMU7HwpvnxT67FnpT2/QRTqcz0zSjKvSuxASSM4lAFLGg6QtYxD6rtUYIgiBI78QY0K4Q/XF1u167Z4Jqp1U7DLR3tCuaMzEEcgwExTdDp/dCnjKIIBIJMYAnByEEAtC126NHWdeVy+Xy+x4Mb1/zPJOmTMoTec6UNrOuK1vbLUV4s05UlUAgp0xtSggRkW5rTUFipGoHDwqt24bKORGzBTchEIOfmjGxLDMxRvbWqa0yS7aNJ4GYImoLGySi2u3AR2z9KpRW+fj8id6V05Rp2ii90nslhUQvtra72MZWIBDpMTDlcU8tNwlqz3PKmTBltlJopZCiHSSEyDRFJAWkC006SMBCXyeMRdGgS7cgE4M9X18nQe4JzzRN94TPA5+qkv0AEt/bdj+TPWsRamsQLQaklOihUWtjSokQoq89SzgrcDmfuTxejmRDgiAKEoR0udi+ShEloCgpzah2SinkHEgpEmMiSLDEIIgnNoERzlOKdJSMIPNClICI0rvSaYjfj94VEWXOCUmRI3r5ATViytsgf+yzEMgeiwA6SgxCwOLJ+J0RY1qzmDIS6AYEAk06np8TUuR0PlNKsaghEKbE9z/8no+fPpJy5osPX7CcFkiJFAPURmsdBcq2sXVlOp8s6LdGo5FiomsjihDBf14JmmiiMBKDbrEzxUjrjYRXCdKVLrahA4Hu2SVd7QEKiAqtFWKyLxzxgBmD3TAFDQLaUcQybhFbaKidbiHQ+ng4FvBH4vP2xA3BFnKIdi0528kYWgDtnu1Hu/G9WUDzhzEWPP6nVUqRsnfLpFq3xRWCZR1iQbG2RoqWDanagdE9EARJLMtC1+pZOV5FQfPDJYZIa42cZ6aUaKgdhl1JMdimUyXEiPaOyIygKN2Cut8XO1ACKVmWFrBDwLI9UKzyWm83luVCTMk2/V4s4IRAmCZSjHRt9m4x2e+OHeQxMQW7v6qdrkqyiGEb5E01NaVkvwBe8XkV0hp5ORE1oNIptxshRup5prXOuq2s68qnlxeWZfmFq4kUoh1wggcEYZkXRJR9345n7Q8A1YaqBdXeGnutlgSIoq2xdft3W/gV6bboguLr3tZvj5GH0wUJQhdlSQmNYs/AK5KO0iVTS6XrTkqJ2pU5+d/XwjTPjFiTc+KyLERVmidMp2lBZCdmC3Q5JcSfd0OprSAdJNoKERG0Nsq2U1qzA4lAium4B10gqPgeVjqdqp0lT4gq120jq11Po5M12GGrSsz5ntyJJSIShRCjrXM/mHNKxx7r3RLKFCIpRCQmJDia0DM59iOIxxgtadRKa/D4/j2N7vtlonvFP00JkWjLUwK9dVpvVi1IYmTMy3Lye2CVW0qRkKPVcqlZYA2Rpt0qJrH9CZ3elYb4+hFLgPLs+0g9CbOKQN/EkLfVxPjvIwapWsWaQ7DqXSCKUN/8zqiKYkpH8hxHzOuWbEwp00OEp0e2dePj80dondO0MJ9O9O+/Y982QxhipvRqh4uv52VeALhdb0zR9tBeO1EC6/XK7bbyxRcfrIqaJiJehXUhJqErdI9HXazCSSEE1m0jiHA6LfSuHoTUHogHHXonADkm9t6JKYJ2WnW4B8sCqnZqV6IHectqhRADeZqOzAQRew8gBL84r1haa3S1Babw5jSGoMpWu5W4Djd1L6VSjJ+Vd+PU772RUmaaJ3qzDMc+0wLv2yqiqxIk0NWuK6ZALQZvpCURiKgotTXaXphO9lD2fSPniSSB0zwRQ2TvhVobMQRSytAbI08JDsuV0uhajmxERCyb9w1gSaEvQvW9KoHrdeO7n3zLV19HnvIDm9o15ZT84LRyN4gQgm1yBVopiESmnGxdxUjbi8cz8ViQoFtFYRvbNlaM90xKVdn2SoiRNC8QFNFITEKKgmL3rU/Ktq7k65Wy7cTz6ecfENEO5NNy4iW9UEolxkzOndY2C8p1VHXNn2MgCKz7yt52kMgpBnqzZCZKQAKUplxfrrxcX/ilH3xFSonmGzQS7CBuDUKi1U5R5ZwSEgy2sIDQvXy3QJZiIkhHgx0mOcBTzlZpdlub87IQUoLxXEJgipMd3L2ybiuP+QGRCLWgfr/lgFjF4JYUueRpFO50UQIGdFntYBUC2gkhEVs9Mt7JD4LeOzkYbCxA6Z0k932WQiAkgRjIqszz7BVTcCgqojT228Y0zUdWPIqMECIxRxhZt1dGy3IyeLYUJEVLMkWskol2AASHSSwNC/TQkCqAVfQp2T45LzNIIHgShUNDqkqYAoFgCQbYwdybVRGj4vN92HtDYyREO5xitHuasiVrY0+mYIlwcYRCRJgGtK5qVbMIIcgR0wiRqYfjZ94msaOCxO9rCImYLeluTTiFGVFo7cEOhRD54sN7Hh8fqMWSwarVYCLtBI+zMUYWvGoKgmo7Kr5aNl6uL1wuJ4ttMu4FhBTo1XC8HDK1FyIBjUqqpXB7feZ0efAtahh2SpFe+5F59m6ZpgSDNyzbhBgiXaycF/vO9lch3P9lHDQWuUE7pTRiTEiw95Sux8ZDBC0dmQdWagF/3yu1Fuq+kx8eCDFSym6YoNh1xxD8vxdbbCKU2lEt1neJkQSWTbRGKUpKhukqoP493/ZYUGUrBfZ43PDWGqU3Umsotve1dwqdFCKlVvZSqLXy8PBAQFj3yrTM1FZR7xncq4fR/7j3OUTswEzcD1Ar960KujycOV1Odpt7I4dAjpEeggW63tFwPyzVv3OwB+yboDNny2xUOLJ39fK/velR9W7XMOVx2Nv9CdrpPdJD47ScyTmRg22Y0ivrujHNmwfMn/8SEYecJuurpPuGHuV/792zsXHf/FnZfzqCYaVbIiCZ3gybrnWH3undAsi+WzWQpkSojYKQuvccciTESG/NqrUkaG+ELqTJqgXxZyb4vvB6IwTL1BOQzmfL0GNkFktGOh2RRMyZuVcQ2/giQpwSSaJlc63TRd/8ncNkDjeNwwECMQTW9cbr7cZ5WZjydGDNOUaaZ68qQn8DDZatcDp5gPZqQSSg2bLf6bSQQj76ir110uN8oAjBM2RBUa9WrHq1NT0OqZQiJRoMFFOyICVCcJhOBKKMiiYgkln3nRwNhj2uy9GG5FV0V4XoyV7TI1hLsN5d9aAcJdBohI4fUJEo8cjyQ7T7KcESrO4Zuvj3zsEODLVsjd4s9sUQ7ZD0RK4riCd5QkBKPeDgEVvsn20PjiSlRTF4h0iblQdgS4lSiiVd3SClrRSHwTHYs7Tjs3NO3NYb+213WDVRe4UY+eabXyKExF4NEdGuqIBWq9iSGJQnRCREunZSE+V0ufD4cLZyneCnnEDwikCtCdV7t8Uv0BS7SdipiogfIkJyPK73Tm92KuecraEshtWXfQd2YkxcLraBRsas3TZFUGVbCylF8pStwijVm1QDk7asI4aAvb3QarfDIliTPE/5gK9G1imOByPdMwE9fgaBnO4N3JgseIjo0f/IKaHz7LCckFOE3imt0aIHMAnMy3LAQXursFvVlLycXbwR3Honhng08EeFE0SovVP2nSlntFuJPuXMw/lrw8CxzZUGxKFq2LZ1rO0o8GavOJ7uy4A9FLrafUr+swNuatI9g4yePacDP48pEJPjrr2z6RXJsx2Yas3SGDNkC7ABKLXy8+sIQGBZFqYpH/cp5EjYE1vZkdoc/rSAFIJQHV+tQQkhkaM1/hWFZLj2vu2U3nn37on3H54QojWmu9Kb0ppt5Bytgg05MCcjKoyDR1WptbHVxqNDDejI5i3wiDXziIED1+5YFaytoTFRW/UDG1QS8zyh1Rq+PUAekK9nobU1ylb9uYI2g/xEO9agAggksYrj9dOLPed3iRDtugnBoJdgcIgkIXRQPzRQmOYJkUBrlek8M+XZ+ndxZnpDWlDttGaJWM4TMSeDN5sdfskDfQiGIKQUDb4Odi/avntVInZ4q5L0nkQ0VWKAlCPnMFt/NFhAD8GCuTWMxdZvs30bnEgzDqaRnOY3GTxdaMGIBuIxIvh7iUPuMSXL8H2/xRAJUVANSPakqXU0KAQhBEtMrbKLoA26oSv9aOPIcT2DpDPy7fFKiCV5qkhXgxa979F6h97Z6+5wl1dER2/rXuXv206I0Ei2T7q1FdI0HfA6CLtWJzp4TxKgG4yGKq+vL6Tzsjie75miw09TyqQ82EnqG8C+YK2NEOWAMMRPRxll7ujuK2grhDkflUjv/cgoxyYZmfvYZCklW+i3m512tdN6Z8qZIvbz67ozLzPRM4au40oM20/JN1mrRM8qx7MYgX2aJgZdorZGrZXr9Xo/uEI4mFIhWqAZ2fxokneHz3rvlHWltEYKgdPDA4I12NU/+3w+27X4qhjQWGuNIJ6hOcsghHAE3BQj5My+74QYWW9XNoX3X35pC0siGo1ooKqErvQQrEfUoQdjNIgKOWYIRlwTUTITt+2Vqta3STl7o8oyIBzuKL15tmx9lQFPgFBrNShyiUiEvTb2fSVOiVgTeyho71yvV87nM2m6N/p+v9c4QFXxewOosq+7B4FA2VZv5nfLsrRBM2bc2AQpeFZGp7ZCr5VdApMkuozGojXrWhdySBbMwx2y7KgnDxZo9loP7HoElYHj23LqR0CwnlQ3OHFg/r7egx8y3dlhju1ZhRBtu2mz75+SIJPdl15sjWkQrtuN3BtTnq0n1IVlnvnqq68OzH8wYoxZZNskSDDoNIxr9XWIJTzx4cyyzICtncvpRIjZiQGW/cKCKsTRcI+2fgMe/Lp6cA2U3okoSayiCctyMGmInVYLrfXj2ccoft3RkiyxIDd6BaMCGgE2J2PZAajHn+N7w8GEExGoFbodNsch7s8sJgFJR9VSq/UpoyTvZVvFBtZ8ziGhYveoNIgSPR4o2Q/FgKDxDr+9va7m6xSPTm2UA77WXl9fOJ/O9ll+sMx9ggyESFlXQJlSZrCtFEvMY57IS0Zro2k70AEVS4p7qwbqxUjtBk927HAKCtu+8Xp9JUkXz+67n07GXqqt+QkZjv7EnUEUrEkWBh9N0FZBHL8PnZwiGgOtW+9iPEztdhOW0+loZo1sedwYgFo2vv/4iV/5lV9h34stxpSQnHn59luWU2VK8WBbaL8fEjFYf2LbN1pTLueTMXK0H80o8QfUS/HKwk7SEK0RPlhd6lnkwZDqG9M0ee/CKGaMrCAlYu+G83sGUEuxDN4bgOpZ54C2DJYzVljO4aDdNe+bWHZj5fy+7zy9e0fZi7W7/cDtfv2JADFQuxJ6h2gHbCCQstEy991KyhDjEQiv2856faH2ztPTOx4fLsySkSWyroVGJ2C9qEkM52zqkFspTFNmztkxVHvOp9NC8sDZamPdC9O2Ukr5hQ4J8MZjN4qgJSGdecrUHdZ15SfffcuHp3cs59OxjohWJY5g07VRtdG7sdB6MvaTiNgB6usq+j2049zgq7H2W7dGaIwBrY2H08myYg+gloU1S448WZFgScVtL2zryod374n+LFElh4jFRLEGtdzfK/halJGCeg9hrEERIeVMqZVeO2tpxJyIGqiqRAmcl5l13djWnSTWI1BsjQ4aOaiTFcKxBzpKfrhwmpcjgZnyhOTMnCeSRAjWYLe9Zz3IFAzKQpxG6dBbBLoI0aGyEISUJnIU77VaDzCmmaBWURs19V4xjAR0HLx3XN8+f3K4tjkaEKJwu66Gu8dIK8bcHL/bWnN6rByBNcVIygEJ3gQPd+ZaFEFptK7kmAle+YPBTN0roCkZYrHvzkpylmhXX1VvvsfBeDoSTe9BqvehRFguF77wJKX37msUtnUj5MgkVs0LQrNsmxpsLYVkkHDcxp6we7W3SiJ5cqJoq0iMiOrRqFbtNE/Wf/Dl16Te+9H86ePi5plt349D4bOXY8KibzImEfamBGm2kVTZSyWnRFwWyr6TspeGAqgcVLWDAuhle/XsOqWJy/lMb40ppwPmuTg9rtXCVgrLshzZgPaxscWDMNSyU9vENE9HVtdaG2Ga1jzjCIFt3YkhkdP9sAohUJs1n3volFqZ59ngBV+0XZV5Muy3O8e8FGsGl1rZ1pV5njmdTgwNScrJMnU17UnzhTAyDCslFQnxWGDzYht3nqfjgFBVaim2gFIituAHTiZJoGfL/lWNyQSQnQEh3eCkOWWYTmytEETY9sLz9sqjPqF0gkRELDmwzDCDVmO0CAf7pamibafWxum0sLeGlpU8XSjF9Cf3eu7nv86XMz/4+mt++KMfcXu5Yedvgmjsi4fLA8vZmnRMmX3fXN8ix6EiDGamYckldIPBmmVUg5F2u61c5gUm78WM3kKwzwxq+pIafKPja+xI/GyzjnWjXhFOMZNOVhmLWI8miUMlop4Kd3vWg6EXgwfMdlA7x1pUGjHYZ+eUmXLj+fkTAKflRNXGHCIyZZbzibbCx0+fmOeZd09PR4Ay2Mx7HDG6TkoorRJximmwClZCIIVkxIxpclgaVDqBTKIanBYE7XcqKs2g3JwS3fU+A+I9EiThoA4nJrLg72UQy1tdwcFaxAI3QRAiMdgVJOmEEE1vMs/WjxGrynPXo9JI3nMZdNSUM6dlAcFYiDFbfw5jbVlSKUg3VEJRZ0p55S4mtsgOK9r6sGerYJXhQFr8d8ZeH6QCg4Q7te5GdMmZqMpyPtFr47atlFqppR19GFolJaMsN20QAlOI7NvOtq2GVJzPzDmxbSvX1yspBx4eHi0O5IgmO6SaVxVlK4jCcj6hpaLRemr02ggpMomw+uGQp+xwhfUgxpdPIjQ1HDckR2JFSE5VHSCbdmXbNvI0gQu1Suks80QYpSAOE+nbKiUeYrand+8wFs2GSOR0shPwcrmwrptlXmPRd6PCqZ1jni0Epnk+WE+j5xBDoLbOXqzzL+EerI2UYAsgiVDeLMxpmphyfpONRFKMVP8eBhPZ7+Zk13J7vfLy8on3794fGzJP2Wh/rTn9NRCSCelatyw2RhOzjYXWdbBMPt80YFTU+WRof3WmA34fomON9I6kRNZOb9U2cgoEDSwxsTw8oGrsqOfnZ7btxlJm5sWYELV6U5/mWYlhotO82Gks4nwRWxLaoJfO8+1q8OFyYlvX4/D8RV4pJR6f3vHy8sp2MTHhuhd7rsvM6bwYfu4Mp3XbySkdTDH1rL43tT5a6CRr23vGbYya9Xbj+nplXhZOMdoGb+pVZ0ebNUWrtuNUsObtvdIMTku26+kEsQqk1so8T9zWjVqtitEpWnbq7LHmiENMyTZscWjADx7rBdoznb0CVGc6PZxOBvO4gCzGaHRWsQx3niek8xnEEWJgva3c1psJIF0AZnCT9fCmPFnPIEVyiKiKJTbWibXv6mywpoGgltBEF/UZKc8q6PEsjqRTHcdPFtAtWFq7P6T4Rq8kx4HQWjMYOQTSlI/+BmAMx95Jeba9L8I0KOfVnrs2+13UhITVEyx1yrut4SEQdU0I3lju3SG/uzgY8X15kCccTcD6pYqxGa2qhKRQBmzvz+ltZSEi9NCptaAeb4aGQoISEGot7G1HW2M6zRgl2H4yKGg3mnbOE2cnSsQpOXtRWE4z8zSBCoqJVox9FpinhQCkkIwB2RqfXl9poiRr90FSW5Qp2Mly0NByQkUJtZJECJL4eH1lW2+keeHx8cHhGWPUhGQshxA6ezEsep5MPW2BzzZpc673wPfhzoA6egjjxku0Q6j3Qx19WuZDuNdb8yDtWKjfuiF0k+OBW8C3nkX0stFKveQPbS+VcnvmdDpzOp9s8cdotDM//Uutd7YW0JupQYfCeQBfXTuXhwvzshyNu2mayDEdfZge7KCVbtTTKNYDUh3NdHGWhanWU84HldJUx7DMM92DtMFTTs0zigWgaARp9juW+tp/dzkNGUGjZVg5RE7z6WjeG0XPmsPSBaEZ+613wLJdeqchJFWSZFSMLrhtm6nF952pTNxuK09Pd/bJz3x51RS9gd0dRurNnh/q2h6HLc7LfMB122bVYvR1k3NCW6NqJYdIEoHYCQ3XbwhTtqDVdTQxbQMN2irN2HptHAZOh7R7lEkxIZ6NWifH1PAxJlKsCMbvb310t0w1Li7kGwdBVSVLPKDqwSZSuolInYnW/Nmc55mWEnurTkiwbLdVO5DneTLlrXIkflGE87zc9RweuB4v1jdDcbZbRIN9L7hDJUP7MJ5TlEBwcaKqa08cZRh6hjvIhWlP5K7HCk4NuvdqDKox6DiTUCbrBJOT9Sq6r15Vu3dW2QaidG//GPSkVdGUTHPS7JDKclw6IP4cXITYlZD9kFJxkTF3jdHoMyguPDb6t8USO4wqto4ihhIoSqIZPA2fwU4jVokI58vFCT+mbWimpuR8vgBCfn3huq5GdZehLjdtTQmmF2o067dGMV1NqcwxI8tCadY/RMOxb5PH5V6rISI5U0rh4eFCB1IX8SxAqHVHxaTee6tUGqma/F9iQNShpH3nuu08Omd5NL5NJNbog5oGBxsHlOTNleDQRGumyLQMFdv0R6bm9DKE8/l0NPgAWi3sLocfi7v3Qq3FP8MWyaCqjswf7BpHXDHoZ/DRrSIq643btrEsJ0zU04xO+qZUHBDbeI9SGj/+4e/w7t07vvj6K/Ztp/ZG2SuXy5lpcgl8tABWHXsEq+KsErKDM/s9jTGiLvyRiDfz2wEzjewvSqB2u0Z1YRZYD6iPDSSjYdUNTxZTeAaNILaxREwfsO2VmCdO2Q6yXgoi04GTjo3cu0Wcrp2g4TgUt2pK1rZ3cg6cTiagK90Oitv1xsvLK4+PlyNT/FmvkBLZA9mgJI6mZNN24OiqJggbOKMx8gJrKfRSmNOD0V23QovK6WQwZcqRBxGuYVQD4sHQsshRHaC4PqPYgS0W9HI2PUWQCCi16rH2UkqkbIlIzumAOUTfUncHpCsW2ERIKRw9kNHjCDG4a4BDGKpo8P6ZCrd19UNopgBl36BXCCYYnWL05qmvfg8MyXU11luakClxva3kaWIZ1UdvdlhGjr1pZ/EbaIk7gQXFdAdeNGj0JFCC63Zci0OnihgJRo3wkFNAgt3L2A2zTxIsyfHMWSR6I9v2Tc6JpsGtKAKlWkUfvacVAv58oIXqNjIQ/dCptVp1HxKtKQRlfpNtDoErzjwcFGL8u+poY417i0ODokaFbS5Ow6BPO7DvgsSRvL6Fmu2WG11/9MgeHw0WnJL1D32loq2zq0ItTtUFiUJp3Xtxhgr12hwxMaaWBzQTT7fG3it0Yz2KAMEqxBRQWu+UVlm33U5ub0IO7UFpDdFOCFbmPT09kWcT1Rz6CcExf4MsJFuDB1cX1tYIrRGHyMiDwKDFjhvU3UrCmjVKSv4Q/X1arcZv7405JVo1Gl5MCWn3UtCqDznsOVo3LUQ7msWjGeQHkkNdT+/f8+AVw1CED0hslKCj6R0cg0w5QYzWE3C8uWw7ilJcGV5qJ3TzScqj8R2CWY40y/innDzQFLoTCkIQL78BDY5NeC9HhKLOypgmK9cB1UCnOZyFfzcjFaBKyAYBNjWBpKQI3ZgVrVVSNuWp+IJtqkR1+wO3BRHpIEaPRewzgt/vFIIlFhKNlWMniOHg+8627VzOp0NM+bNeMUamZOK6h8dHWmus/sxTjHQR2yDjUAlC78a2ad2sM0oyb7LelCUbfCYdO+TENynBoRGBNlDT7n5BgwJrf59SRD37jCESpjutkUEh90NAvboVZwVZPNFDW9G9ATqe53AQGLipqB5iv+R7YFfbF/RK26tpILI134m24bfban2sKXGeZiQ6scChQenG4HoLedRa2dcNFhOgRocVtavpmQb99Ahod7uJOxowMmUPkq4zSP79hyeYCJZpx8qw7ZBQ3QlB/L6oPw49LCuis/MEYV2NjDJlE8oO5lVInRCTZfyCOQc4bASBFHBRXUCk8nq7mrNDVmrrTMt0PM9xb1D1/pZXHaoHaxEdVFYL5hKcPBEEulfo2ghqgky5L9fPNFL3pCkgoY+8FLPisUQ7pIjEaKiNkxkanb7t7Fs59DUaTHsyLROhKbUY5dqQm8ZeICVT229lNdYYgdt6QwSW5UwPHaypn9y3BqYYicnsMlqtEBw6UgCh1B1VIeTM+XyhNbOcQO/4bfdDBjgyfWtEJwbVbKgwYZTMQ9lrN2rfh+GeZd4jyLdm/iQpJ7JYL2HdCyJwWqaDWithYOZuctWs/Eo507ftoKAOM0Lt/a5qZjCZ6iGyesuqsOZRsZ6DQ1Q5Jb755htSSiYIwhlV3bxmeu+cTjOofcfmwqwwWZ4QHQJTZ0FIMN3HYJf15sEqRqJCCkJVUzKP37+cT8QQ2PZq/lZvKp1g8ZAIB6vJgRtah9Bc05EyYL4uRbo3s4Q5W4amOhgiVqr6bXOIobPX5o3lyOTfbfydJVlCce8h86/5+YfEkamGQAfm8wJYZUbrB7tO/L2MtttJ04TUikgjqrGOijYTG3arIA1VsUagBN6sH6PHgjUdjdpqmPABE3gCoypmfTIybPewuh8w3r/onWEaKYzKNfj9e/OFFdatkGM31XIIiFuOIEZzzt7r7r0dnkpJTK8wBJPLMtFbZ5qNoqm9oMEqTzOFDKQ38NH1euX55Zlv5Bu+uXwD6uZ+IVnS4N8tffbMPLHrw+jPVLtjzVVHO1N4myWHo2+n2tm7kV1SMlM9BltR7dn0bnoJkWTBPtwx/GmaqK2608Ew+bSGvrNi/LT3prqOaxh2FTuqgdP5bCZ3yLG+7VFYPKi1gTq7yKucUdGOmGUJRqfUdpiAhuA+WbXTesUdi4x0EINX+xwoB9xhqJzzm/9uqUWHu20OFqeSBF72ld46p/PZDgNnKdHufk4jflU6dKsuhWQtBDdCbNL49OkTl/OZdAn0DpoCScXk2d0z7AP/9KZnaY2qhRTspOmifjgE8Cw2RNi21Rq7KVH2nbLvRwMxiDfpPONPwah6dqH329CcRTRPRu9rHaOF+kKOIRAmyxCtWciRdXQdknJTQ7ZeEW8ugwXS9fXV4JY8Hb2NUe4bf/x+cA2LC3XMmRCOhvDwoOq9OVVOj7+rZWfdyhG0d1X22w2dZubTcmQkVs3YtRs+bu9T/Z6IQwDDX2pdNx7OZz+ALUMam7O1xlaqPehaTE2MElMmwiGWCtkM04wLXa3k96wOx4jLeuN53zhfFub5RGvVLByaspZCnObD20oEGqZNkBgprRPj6Cnd3VZVgabsZaPWhb3sZtQ2T/y818DpBzRSigm+eq1srbCN/pA646Yb5/4tKy96/6LsGyEnpmk+nDdBPMOygNIV0OYWKHEYANuzrY15nmlqFWKKE3FAgjJgAqNTuiuZVVpYQLT3dgxa5CAQjKBjMCacnR7etVP24nRToQVlkmgnF9AlEByGlBCdBu4sNMXWlAREO7s2skZ6UKD5Ur+7la7bZvonTwZqa+z7zilGmijZ7//vxtHHP1uPL6G5uX+UkLyhKw7FWbKVjn6DVSbxSNbE+wO2rg3iCiJOXXcIW0e0EFLKpjr2w2GcC4wK6c2elhAY/kYAGuz3RSqkYQUSSFM4MvgYouuLumXv3Hub4769XaNWKXoPT5s3oMVdC6yyGvRrxLJ4OSqwNwuNO5T3lgkVVGkxcn54YNo3R2g80csjSRBK2Swj9MR6ULynmKFUggpxnhEXLKPWc5pS4t3jI8vpfFwj2knqZnxmn2vl6J0903l+fuG2vjJNiS8/fE3o4g2cfhzU23XlN//Rb/P+3RNffvX13UobjgqhNSVicFGeJmLvhy3AaNIeeL9nj4PlgB8o0T10BpVzKJNh/GwkpOyB3YUqYhTG2hpbKfSuXILZB3SHjMYXCXBULDGaSrz6aa7d6HxBzLZAe6dshZD1MPqLIfB6Wym3K7iKfCiqa61M3CGDsYDtAP1danDupevwKaplp/aZJU4YOmXOoAMSaK1Ry8YyZTQlo7vVxnlZzOy1N+ND+3cJGkiSiY5NajMLj9fbjdv1hXmeeHqY2DdxkzX8nlsTTkIwAVgzLcB8Dq6fubPBRlN33906JVgw2NfV7Y1//kuCsCwLy3JiSq9cR5DwhCY6ESKKJSOldfPLivcAeByowZqQSjfKt3ajtwbr/xzJR0heNYzGt5u+RWvo1WAeZritRkp4v8Az4GQpoyn+xWE7UykfVUQ3NwJDQrxJDhaI3rRqilPJh3jMOyeWbIQ4mmsH2WPg1yIGPRlcBBPJ4VJjv4gaE048AL1/9453T08s00SpO/un1RKYKSO9kePJIEO5wyIHDdX3hCEsxtyyPpdl7HpssaHW1oO0kvPbvexsMt8Hg020joo5xuNg0qM6GIFfxoI57uXbnpeqkRxyikcGj9jYgN7NKl6lERWrxkYC7BXsuHYjjODN63AII0Wt5xrjOMjupqcpBJpYVyISPPFQaGb9EiV8Rg0fse+uyrbKvzfbw+fTiT0It9tKDEbikd2ucZqNEvvd9z8xn69puR/kIdGCU+nEKNgRT678c/JpYSi3gyhNIY3SGb/xEu+Zey0bIQUuD49kLwd7GA/J4IWHy5mb2BfI0XyOQgj2PqOx7MFwXY27+/D4YP0OXyja3Lvcm93Da+ZtpjJKptEb0G6aBdTpon6oaTf77pgS+7ZRN6OHBomY5b99t20rzHMmpvwZS2WoYwcL63CaHeK4Wg8hXdVuKkhfcJ+en83XZ5qopRyZ1+QeRLUafjyqjre6iO6w1/gsVT2smkWwHlA0P396BQ3EaDbenY7um0EBfhDMKdrhEwJhNN7FNoCtE2OMDZiltIo05f3lzMNp5jSdqdWOeUUIQVnidLCKrDRPhAAvtyu9mmCr1srL6wsPl0dUI63tlNLJWam1U+qOzQaoDP45b4LiT3udlxMvp/l+H5sdPD/57lsupxNxMjaVdv1dDcuI6IA34ewN8FEZer/RAvFkvl5DTGdJktkxvO1PIdGqM7+2VgvP643H09lddzsegj6DUYepnoEYuH2Nd7jcoNKSpX7oCXrtJkrMbtDYB+zgFYMfEEE5RHYWUBRUSBJpA7axzINkvIbPhFN0x9lFKL0ztcanTx9BhafHR1977rfEyHLNiiKo4ftDaZyC+H50ybjI+MP38mAxec/zqBRHxSXAbu/tENLpdEbld1O/FY4nIWZmJyOr/9yxdTyLGBMaut8zeybt+FmDtmgN0YbEZP2CAY8N2NQhqEBkEA7Qu2kpYkLY6Kyn1ivGfLM4V4PZ+wz4UIKptMUFvHpHk45E5/gu3ZKbkfClFNn3zrQsnxkPBpTb9cbj5eJWPnqs1+waEOvz2Loe6xTt5JAodQfJtiyNBCTHDRzVRHDP/Jwnnh4feXx4YJ6nQ5UN1ogadM+Hy4U/8kf+CE/v3zMG4LRmlYJVf0OBaq9t3U3t7Jleyu4v0+0BjoPi7WMeGfZdj3AfMjJ6HbjNx3CSlBAozXoL4pAXYtjqaZ7Yi1E0g2+QON7zuGm2MILcPeWt12GrPud8WHMI+GClzOV8Pr7DEOwwtoPfPwl39ae6YtM+365hyvmg5dbayWnifL54Kf/5S4djqx8qAGlZyCnRaqNLpEs0TxmJaDDTulJtY3RVmliDNKfMw/nC1iu3bT3gEIMHBl48AqFtmvP5RMwTEGml8Pr6erBAkpMdLFhXtnXzwSob6/X6cw8IsAPyvJyIOXFeTtYo7x1pelSHIURKN4+rQRsWlMHcMlVvQoP1vLT1z+YAHPeyOVQXA9ftxvcvr3c81+dAxBgPv6fX9canb7/zZ/pGE9BtENJ126ilGq5tmNRxL0O04F+HHcVhGHfvXR2Dug7G1b36VWcs7XvldrNs25qs9/0jfg51+pGEWLC39UuHzQknI1krpTDNp4OOmd2OvLXGertZEheNrdXFBKPm0Wb+TbZLBr36noVbLyC4u7Me1/r2ZT/rzL141y2MhPNzurz/sv8hP2UxfQ7b3IejHdYeb3okIMYN8YBrFbzdmz7k+eP5eqUxTLvGAS2MpNWeWQjpfmDoPeaNOS+OzB39Fnv23ezLw10JP2LCMDyNEsnJ4k8Q4bQsLLNRwEPOnM6n+/Axj68jFoMcz0SQI+aWarDeNA2PLqtI0zHFLYA4z9myZtCmXkKPrENtETuLImo4MuZlWZy3bw9zv5qP0bwsXC4XUjC1dG/NKoDxUFQIIRN8ENDb13GKjrLvzQIZi+/tQJDb7cpt301IguGk58uFXipl23m4PFjDUyHOMyNEjMqAEKyp2g2DrLWaUV0Qszmv1RXaBjcJo9pX6J3lZLoKO818etxYPGLNrQGtJQZeb/CA9g5T9qV+b8C3spNiOIR0XTuo6VmqmjJYCQwHXSurrTJs3YDE7llmdby598GcEMRjVxJBQ7Ln6/e51YLMJ0KwASWtVVAnN7gNi80qSY7hC2FeOJ+sz1FaQcTZbR2iY/n7vrPVwl4rp4FJ/oxXCIHT6cTjwyPbtrqISnl4erSg5FqIXq1CGRtrrB2Oj1AznnNbkFoL6H2qYWcwWIxhdLuuxBTY5xPDUTZhsIUG6O6k+fT+/ZEEiZimJsZIija9rWsnRfE+1qgWbNaB1mrJxZThjbpXtTPl5FToASF5pRACrm50KKy9mfPQjwCXUmQthdfrjfk0MTucM0RlUQIrjb0WgswmFrzdUDWK8LatlGVhr+UQhzbvHYRoVFaD4yBPni178FQxrt1Igu+Zvficis8x+Lcv6xVgJASEOKqGn7ZO1CtkfXvafP7ew2frSPC4wytTSJTuDEknEoQQjTlYdjrlOEhiSt6Q78d3064H5DauIHjVLghRldG+yDEda1O1IjGYCnzcM1W02vPpXr2O7zVi3YjFIhBy4iTCtm9ugChHXHy4PLBtK/Hkc2TQu6NztypOusUOo1X7vBr/ffUYEWMgfcbVjSY+6b3ZhZhdKREbgzjFSO1yt5DwG15KYV6sKdubYYuSswVcP81idGGNy/zBWT210Utxap59gxQdAnKsdcBfnz141btK1KuGIJHX12ezmfZsL0jkddv4+P1H/uCv/gFrWrZC6Hd78EENa9Vk94gcn9n2jZ4SKWeen5+ptfA+fnGMBxwqbsPF7aCKbh98TjZlrLdm1h5+6h/F8vj9ENBaKet6jI7U1pEY2F5fqb0flhwGVQSU4eRowc+CiWGmrQ67lLsGwnx+zEojyBuGCYaJ1ybUspvjLsKcJ/ohprOs4uV6ZZpnLunE67qSfe7DgGJyPhHELUcEa8ppgW7jJ1UT+7by3XfK48MD23KiP3aHHX72y4SKkeg9pvP5jGY74YrajJDz6UzLlb14Y1ctkA6IEXG7h2A00bUUQoA5mlI3BVPPW4YoXB4fjEXnh0LsgSDJNr0b752mBLNl6qNh3rsa4SJGpNqzz9Nky9uhxDFiU+AQM4UQmLyZH5BjUJQBRhBVII4ZLIFOIEjj9HA5MkywYBFyoqrS1SrV02S6kN7vNiUiwsmHU6maSZyImPK/VVShXCqtFMo0k1PkspyPiY6W4EWWZXK3aKzvFWzt3Xfsz8gEftd/dhTGq4z7wfL7JhJy/8WjOhn/yQ8CCeLqeg8xR2VsYOrofdDvIs/Wqjtiu9jP9yp9wErBzVQ8mVOsvzOMIYe2xG/CgXAgrlcwi5VB3hn0asn3PhPcD9fxXQaEDSCtUZMwOSxfFTRawvv49I5TPSGoTb3zQ0u8r2SMQ6OR79uOBDMSteVmIt3WzG0jHQwZv5Dof+KOkL014jSR3ZpYtVkJExKEwLZtn52iBrF0zqcTp+WNMbSfTkNAFDxDqq2bEChEa+KhTNPMvu+WhR8P3FgSNjSo3oOpv19rjdP5ZJvM8UFbaGYwV0vh0+urqVO1kYKg2Tx/h6IZv2m12XecvXwbjbaBiQt3C4WjucSYB5w5X85HRjX6HH7zjsMIQHxxamt8/Pgd19vGF+8/8Pj0aJll60hOpMF8gbvoMNxZD1FAQ6CsPtrSKxJVZZlPEIzt9baROgJnFwtIij2TKWXPDo3MIGKKcgmYG6VY8xqBlAUT7ok3Sa35vddiVD+1DZRdiDmw7xSSqUZvt+NZ/rzXAaNNM1PK7CGhk83+Hsyg6PRT2Xa6RCMWRJ/21k1rMkRIDU96qtKjwS8hBCZvGkYJJij0BZgkQA506ezFqOBTtD5Gq83pm0Nopk5/DQdk0sWamBLNftoEnxFCtKYvdm1BoTZT1h9VqtuqaDIPqS4myMKpxeL/PkbYHu/dO/O8IG1U3G8SLXENjHuX1ZFETNNBU75cHhAxeG6eiqn6nbppzWXTiViGOw6IN5+hnzfhf9HX7+4n/EK/83MU/G/f0yAW/MTwJnlMIM3kFIcbrbP0kM/6EtEPiAEfNhTtzdaQWCDueLNZjUQjMTgNXCEEo5nv1SpJQMRcBBJmnmoH1pgrc4fMxj+PVwJKSkR/5iL3GeEpRY8194PGWBxqBxWgIdO0Y6XA/XAbQmYEUsAcX5fTyW5CLcZOSJnn11euz5/4pW++sWagG+SJCDEbR3lMXTN/nGEeZlhWjOL8f3U0JNwhGr8JoSvLMqOtU8ob2qxXCQPDK7UyTa7b8Aen7nM0mFGDJltKobnGYZpmHh4vpBzpZecmhv0+X68s82yb0QN4HA1qMT7/qFb2fSPmiXfv3lE9ExTu2HMIkXWzkZrn08k2B5Ha7DAYwjsJY/azKZnRTsoTn56f+fjpmSSBeTbcMapgjis2FrS3doxCVITkgjnEqJBaG4g6nh69f4TBQTg9Dr1bUqv5GZmIrpo9dExIV2tqHRi4UrQzkciuW5EonHQmYYs4iAmXSimmkg12bVEs87df62x1NUV8gm3bKdvGz68h8A1rMNeUffCNO7IO6ERFDvU6QWitcisruSVncClFgWYGf0G7DzNqQEUY80PkWK+1qvnuzHeNyOvrldt24/L44H0oa5AP0eBdHBfQEBkFci/lGAcro1JlVN73QwTPoC1PK9RSWZaZHs1l9dPthSlmmy+OBYScbU8Mq/iX68rTwwWi0dan8+xBwg5SVfOwMqjZKPASI7d9J4s5LqQ8kdLE86fno7vwxYcvvMeu1G772/odhgrAnWgywvE/7uuzvsI/5q9bEB3NcQ4I56ceOnL/h1EpIzjMMhiGzq4Uh9n6EBbeg676/iIYSiCqdB14vyVeY360rUKbyCkKeTItSduMzhrdXsPih73eUvMPVtmbhnaI0SxGeuJabrbncyR1E+Q2DfSyoU0d7nRrHd8/te521okxMFUVCd5DFSilEQ6RW2uUbeX1dkO9BCr7xo+//55ahrgtWgYfo1Uab3FSx0HHZuq9GY2wNzzBO/5OFW43C6opBfKUELFDZgTqGKNTffsRbEdjaPQnRkY7HCPBsxnfhDYEpSJBOJ1PxHmykxIxocmAUsBP/XvP48AOvTIa2f84GFTNFyomL9XzzGU50zsHPj7e235uKGLt59dt47bufPftt0xT5pe/+WV+6ZtvOC1nRM0tVJs9xNv1lb0U+85Oo2zO3QaO6kr8gBg7wA4vY5QljLcu48vC4S0fsIwE70GNHxARiHiW7eVwNFbXddtY992yJDoxmhf/2xgh2JCqVquJ3mq1TF9Hc7Ad9/XnvaZp4vHx0WGnxLzYcBxxexJR87/fy34cUjISkZhAhlo6I64EVjXNS8zZ8eXOtu1v1oCPq/Rgoao8v77wel3NWwlzGbjTuA12NHFeul/8myrnCKLhLhTd29ALOeSbIvu20ltjnm1ITG2V2srBfe/g68MzRbHnLSnZ6E2vGofP1Pg++HPea6F4UDiuS01UZwe/MxxrY7+tZrfje8HM5zG68dAeCEf1PijDg5Dxj//yiP2P8TpEm3r/91LKZ0K1n/5RdiiPflEYcCwcDCtjwjlINRIlGcLIIc4zZGQkyPfDx50KBBCDv8WhsRDHWl54fHgixGhjEVqj++jatEwu2vzcDPBtjGq9+z62dRaDmRaaj5XceyxroayFkcQMT7acvO+FJRI2VsE+7/r6QmhqtroRm0OACDfvI7x7euJXvvnGpsp1cxGVGA9f85CTwS6uLl3XzWAWF8sohqHlNNF7hQONV2KyByFhNFP1jVgHg1QYugIX6+h9cHgtxRlUd1weOMQpt9uVj999x77vx5S6lAzLPp9PPL5/dzf/e1POdYvyxwjU55dXwObZggWFOnyjfPPXZhTKmBOqJr65enYwHm58ExSmPPP48EDZN/7hb/yG8djnCQmBve4UF8S1WpjnE+/evSeEwMePH7m9XI9Gewzymd3zuu303lmmfASEmJOJv9RYO12VHJPNFwaHsJStNYcxbKEc1Ny9U3UInZS2V+pebAzoXvj+00c+vbxAU1JMNqkLjE8urkjulonGtxbNpVJrPVyHf5HXsizkPLPMC+ZAagnFd58+8un6SgObrRECUzjxcHokxUSrBjk2VdcB3ZXGrTWnhBqvPwabtzCo0K1Wnl9fjwpjnmfev3t36CvAFfRvglXplabGoDJqZT0q4l48MHtPKYZo8yRCOMZ5fvr4Pb/xD3+D19dXUvTRst10OkvO1jRW0ya8f/+O2SmqIua7czlfPEEJ3h8YQdT7jzh89ybYDGjjtq62pty3KqbI+fGJoW8YvcLarfJvpdn3fPMIjaF3p8D//+Ol3exs7GCyGDPcj+9xZ/ww7jfnh6Pj9TbeOHwWjEX0s3vnoL2jFtHtve/C3PEyW5J4mDmKN/LH2Teo39OUjbHnB0yMiZDMWiP5kKforLDo1xQdeh6fJmoswuVsc9RDNE3S+A45ZlPmYz5O3pRCUmA4KRxITJL7PfJEOIhjaK/rFVDyPPnsVGFOmS/efzh4+2OiTArBmjLOBrETy77wwJfVqwwNwm29sa4bY/xnzpHTaTEzqjceQTAYP/3QJQzfIVXl+Xrl0+sVcFMv7x20vRwZANjBcrutfHx5sQaxawdO02QNRBGurzf2fTv6CoMyaKeyY7giSLdsTsbAoCCHXXhvDdR6FWZbrnZYTJllyb7xyqGtGOVcFKMcL6cTv/LNN+NWAjb2c9t3wy3nmdNp4XQ6Wd+k2VxtE95x2B37ZA6WeeI0zzaop5lHvDo2GmPwGQmjMWhQ3fCuak6DtCzDDrbaGhULFHaAWIYYVDjNM3memOeFZV5Q7dz2jfW60cVmIgzRl6mg62FBUNabkx8seP+is6+TN6xv68r1ej2MIj89v/I7P/ydQ4ugauybvMxOFjDGTA4Gw5nq3sdThuizr+/zsz0/NKy+Vcq+MTxMH85nHs6n4z6OvoE4XjG8ABrukwTH/WwNMxzsg1ptVcM0JRNFKpTaIEbSPEGMlFYdn3bngGl2q/t6bGxD2NyVVsd0M99Tas1q1e6cfTlgqewzp2MwwkMX5csvP3BeFmez4cygyvV2ZV1XI6LUgrZOFFMkX33WwRBh/aKH/j/Wz7350VrrEZjH3wWHvQfDS8RiRJrMD+2z96PTvbd6XIMFIRgN7dGzG4eDP1/7cQv2io0XHfPoh3vt0Vge6IkMyZoch8M4mAZJQkRIkjidToY8HNdhl5fC/d9VMF0Ldp15yua9JIkppcNeacpjdjcESUiysbRtrz5/2n4/xXjE3/GK0YSCNmskBG7bxrauSE48PZysUSP2heJeSSnbwvPyxppm5pgpwWiQMk2Gizpe37s1t0Uy6/bKvm9cHi7HiWzPw7xNktvz1mKl0Gd+Tr27WVtzCqpRXE/ziRwbL9dX9taRm/Hj99bIIfCDr74iTZlSKmXbSJPN4W21sa6rNQclu73CGO+ovL68cAuBDx8+ICLM5/NBL2ytUWtH8htH2taORWHQg71P9gW7NrURxP77giA5oSinaeH0g5NBLiKGFQrH592tUoxi+P7piRCCGS46q8oO9Ihuxcy/ghFop3m2aqwWiIkkEyEOGMCgkaadoObf0oMQsR7CVjsxdceZxSdbWZajdGqpRqOLwvvHRwjCrs2DmS9qsQDbgEikbp1JvaF3urD4Oti21SqjXyBYSBDmZWaa5oOSGOPG5Xxi31YfqxttgwXvMyX3DWqN1gNTNH8jMza3XScYrGKld7JJas4AOS9362yrfIpZqZ8sS1/mxTJxwedDDAorR4+IbodxCNhaplszORlzKgYTAw4I7vF84csPX5iTq1pVK73TS8fM/QFtZgwZrfpQ/3zUR+H6Iff9x4+oKu+e3jEsQqKYLUMQ60tc3Yzy8XxhWU6g8PLywqKVvDzZnOUSeH55IeeJaVnQ5j5dEmw0bO82Z5oxC+JnP0uDbR3fl9/7d+PPO2XU+jfPz8/85b/8l2lt51/4F/47vH//4b4+xp+/C8P3d+UOo97V2sc+E7mvBwlEFw132+QGMbdOD5Gc3d9JcVuQ+6GiISDdYyRGQf/sIPLLGH0NUYGuRwySZmyqLpHgvYEa7u+hXTkUW2/RDzBvLc2+rzshTMxTp1SraLfvdz59/MT79+84LzPFPcGs+LLEYlRIe6vW5wSSID7IY6LuhbLtNv9AovPIrTQ+6JdvHmR3ZoT5q1gnfwh2cp78ZxrneeZ8mjlcL1VRDfRmGXaeklNWzTMJtYZPTve5C9I702xzcZMPksdVn6UU1uKnbG20GHj3/okcAttuE+zenU4ECdReUDh0B2P5yICFotsK+CIa/PBeq8+RsEx7TJs7fGfG+2APLfkDzJOpogUXFXqTSDB2lQY7rbd1Zd+N225VTKC3wpiaJxKIkxkBjqZ8KaY5eHx6JC+zZbzBlJ0pJiREeozmCuk9nIaSvNlf9p2ebDDNnKcDT04SDutyDYLWxmvfmfPEknxm7rDfcMgC7xuZl1CnxvtGH+NWb9uNnBemaTINQgw8PT1S/fv8Iq8UAl98+IAofPr4PRIij4+PPqzFFntD6GV3GxIL9mvZaV1NdS2C9MDByROD54LxFK1i9mBxOtkhcH195cfff0dvjQ9ffsnZRZRmeBdRrB9XvZqxz/CBOQ4l9GaDsFrr9NDYrzspZVP++/q/bRtBDVpbTiertFCE7qaJ4m4FlpsOBo4FsQbD8dRhkuW8ENTHjY6AiB1WCqzbzrfffsvT0yOP758Mhgree0TppRFyQCVwvV6Z0sTJB1zl2WixQrBMd0TgX+BluiE5BF9gCeH333/P7XazirPsfPnlV1wuF377t3+Lv/t3/w5/+k//B/yF/9t/wnq78q/9a/8z/ui/8q98Vsn97qrhZ36+Q9vHAeGv9qZHM6oUs2zpxtLzWDdgZGUkvp7mC4f7r9l0eP9Uh8fXXRiMV6OBQAzYLGoU4v2agio9Ru9z2jWqRXT7e8/Mqiebycoac4KUGdWNVhvn04k5JXfwFkLpR98ohEjZrmzbzuPjk40gYEwJTZkYM999/MQPf/g7/OHljzAnpUXLrlqrJHHvmN7N9kEGNdIOie72hgNO0tEgHdjdNIErPv0JHVkCaspVQnDaZ6Q437z6kJ3VJ5rNDhd1bcfJe17mY/F37UyXBQnC6+vrMUYzRRtQEnqwMkxNGBdiZJ4mdp+WFmPk6enJRgfK3dAM1aO6STGyrsYjz9mtStSsiXuzKiUEGxIiISCD7hqCCdZUaYzGIJ51KikHUrsPd+nNsPOybbYZY/DegDBNia6NPDm1VJsPgXGascMeCV8wvoirurXIqCR6Ix72Aka9ExWINuWtbJvh8qMpLDfk8mhVTMADD9RiGbAQkQDX5xfqvHO6nLHSvJn5G52cfSobhbrt3G5X1u3Gcj7/QpvbkX3SZHTUFCM1RvZSWbyJXWtxwaeYFQvW+A1esquPLu1iumCJBsds+0aowZp+KdIb9BjQLmwol4cH5tPCspijr4ZAi9wPSElEh9E0NGOPRRs7U8vdQiY52aHVesxYsQmQypwiyWeK9K5uBW8W2TlA7daQh7vVuE3D82a7GESlzeCky3K+V2mq3F6v1H3ji69/QHS34fcf3vN4udDbCHZq8+NTdOKCVZiSEiEFPxyT4/iR2qtx71FT9XubdxzCjsoD4eg3IvD9x2/5T/7sn+dv/1d/h47yt//2f8Xf+lt/k+fnZxcE7vzgm1/ii/df8Fv/4O/zW//oh/RaeP/0hLTCX/krf5H/8R/9o3eoRA6S+M95jQPlp5xpI9n1DL13q5gG6cR2k6/1o52NV2lywFCIQcJxzHOxGaa2BvFDBhyTOmoco46L9c6sT9p8rDQuWDWoydzG7lWXiJJjoCGg5oJrtuRujhgzDw+RbTcyEVGIXWh99ycmRt8/Z787xpxq2n3GdQi8f3pi3XYTjXDHqI9gnZLNS8BPVpQ8DLdq9cAO0zTTmzct/a5bECmHhfg4AbsO2T5Hcw6RI3sX/9kYgnHEnRabojlH0pQabVPfbjc72d1b6Pr6zA9/+CP+8B/5r5FTYrutlBg5Lwua7v0M4NBGqK8awyGrV0j2+dlX06gOMtkx64YndoQgrLdXyl55WM6MOfZvG18hiDVTW0dpBGx+d4yJ0xJ9mpwP+0iRvVcb+RncNRKO4U45ZtIlmVuA/16tlTicaeP9gOhik7tQWPeNHM18MKZsow+jZVQhyjFzYPKMUfZimHdv7K2wxOUYjtR7p3SHbqLRAT++PCMv8Acm61l0bwom1fvgnmtjyhO3deXl9ZWnh/eE/POtw3NMLPPM6+vV5hGnREzRx8Pi1WA+BtmUZgnHaVlQtYGQjDI7A47XE82e4vWjjZo9v3tEg6KlU3o3WqiIZXidw/NHmx24MWQfYm8CrBSgBev3ldKc8n3PV1vrdgC0wiTJIKicuMwTISSr+lR9IJg5p9bW2OrGKVjfTw4DP/cgcprs6/Vq410dwhqkDO3Wc0xviA05Z6Z5JqRkOg1vkBp0haELfechXqyPiE3Ok2aHakyRMCC9N0H489j7BoNQg5j+wd//B/yv/s0/xn/2f//LvLx8ZNuMDWmeW5ZF11r57d/+HcZsi5wS79490nplLYX/4r/46/yNv/Ff8k/+k//NN/f2bUv387pCPvu33+cleFV7twO/T8gc7rJ3e23GMeGB3nOyo1IZX9uqP3dFOCpYTxLtF4gBStWjd2j29F6RSEB9CJXAMXtE1cwFG+owo/mNISDdPfBqIsZqTfReEXW3DCqvL69M08QyT8zzTC3WQ22YuDIqhC6WBU7LzB/65W/M0iJFajV/pWUwgDzrb93oUepTjg6qWfUgr3ZTuzbPZzu9W6Y+rCx+95yJnNNh1TVGhA6V8sD8Y7Tg3wZtr43MBcA47wZvWnY4z2c+vP9wKJxbM1O/2g0CizE69DBWkx6H15imN1xZwX2VvA8wfKPw61pvG9999y2vrzdOpwvny+W4X9nhKDBl+vX1xu6KVpwuKx2D30TsIZWKSjB9g9Nnmw9qssTYDsNBb5TWzIKjqxuJDdaX0dkaA8dUmjRiEiQ5e6Iblh09N+oe/E7TzJJn5jQ4+X5/XMGdkgWkfR9wgV1vR/nw7j3v3n8w3YUObPguAmpqJITqszmuzy+8vD4zBtT8rNe0zJwfHpinbD0tr/Smyaq6gTfTlG1bqcMXy9lft+1GyhP5zVyEoPYcz6eFp3dPVFF6q2jtvLy88PLx+W4U2O7Y9vcfP/Ldd9/RBm2xd2PggdlSdJvieCsbt7L5EhsZqr3HHL1qlUQgOH5swSVaCWHfsXeu1yv7zRKGkIaIzSbfSbeq6vX1hd/6zd+klHIEtaZ6kDHmaXaigV3bEfx6P7RBGu70SFqnbHbtpRRu68p2vdHVKbHVZhbImOfg0dLElMNG5k3mHoRvv/0J/+b/8o/x5//8n6XuG7Va8tR7P9hCrTd3iU3M82xOyyLUvdKcIfYP/t7f41//n/8v+Jt/42+w7/uhq2pt3F9587+f/hqN73GIHlRT7g3wEYxr2Xl5eeHT8yfW2+2A4kcS+LYnMijHgmX5ZnL4+XWMXkJM0RnR3pOMuIbnPhWvdqsI7n4N3goX60EO88fjs936P/kzz9PsIw4ikqCJjTmdzzOzIxO92UFiCWVyUouSerASJwZzDW1lp9ad1+sVUeV0OjG7eMOU1uGYt2DNuHBYfw9KYivGzjmdzzY6MmejJzpuqv57iDgne0JSJBTLhoaflA7cSvUI9iG4h749DpoaJBYdXqjNvFbmeWJevj5QyhA9QxqNHrHGYUfuzU15owrHDPvUq5aOWRbvpXA+n83V9nYjxsDr6yderytffviC919+xbreaM7QkhCOmRbGSjJ9BqrWwFbjvNshq4cgR0QM+vHvPu5DG6V+iCZYU6N9RrFmWRbYujFiWqvsdbf7czE/q1DtHge1EaaKEntEQ6eqaSK6KNWGKDP44LU1+r7boTFYE9UbXb74DXOPPJ7PtrFwUzUZG+5+UFwWw/pv+458emY+feR8uRDC7zXd+92vGALz+Ww9plaPsZ5VMbFcCLyuVz59euZyuRCmidI7z5+eEQnkkMmTVaJbtV6LdDO0nFL2menQ6Oz7RqvWzCPa4aPBqomy76z7Tp4mTnk2UCUmOq6Qxkr+KU8sg4ygLtAjcA5CzNHHWipl2/jxtz/h6cFsu03vYx9b1e5ZyzYAp1UlpOymcGMKXGPJE199/RVLno4E621jdrBv7k3hYPRRzJAxev9Pezdrj9bIs9mWtFZpLbKVjYUTZrXSAbeS8MMNcVU++sajyddIb/zxf+d/z5/7c3/mgKQBo9MOTcMRB/VIKMf77L3T6OSYKVL5f/7ar/Gv/k//J/z3/qV/mX/un/tv8YNf+oaf/PhHvHv3jv/6f+OfOEw4Re4Hwlt19r0vqUcVHtPdgPDt//bd6f36phE9gKK37CAZ/6fcx9DdeybjsBhmh6O/gZiVecP6EHZgBWLEbHrEIM2m1foUvvc69wPoaJKrkILQuwklU1TWAfsJhG7sLxEhEWilUnrzuSDisdkOXYObVOwE6crr7ZVPHz9xu93IKfHoGP2glmlIzmwwJ9ERxJrjqgLs1YRgOWdz3vQvTQi8vr4SQuTycDFY6PmZW0q8f//eYZR+VA7Jg0q1b2+K3hih22yIEdBLbSxO/dy3nWGHMGicEoIxFILZlS/zfFc+94bhmOE4CBV1ha9x1EvZiSHy/OkjikEXVZW9FJJmHh7ecT4/cbmcjXbaG70a46T5NWvvdlh6kzGmxMuLQyY58/Hj9zw8PHLKkw1rLwXJyawscEjkTTbeUbP1xawZuq/Jqt2eF3e47+XlhTxl5pT54bffotWarylPIM3YTd0cVdu+c7lcKHVDPMNuXTktJ+LlgRTdB6kZb99mSEDXdjTotXfv4QQkm9NkG+K15GNbsw0uohTi5eIDWH724TBeIUZSCLx7/47b7WrQmEMo48DYNmuGn5bFmrnrjXmerf/la7WL+/YEOeZpqGf4DaNFv//iA63YNatYf2VUgF999ZUFW1UkZkKwYXl134x5lbPLztzArZgwdF4mgijb7vRfx7rNULJwyxtnH9kbvbMZxUbUDq5+KbtX0mJrSGyWRsyZH/zgBwfce4dJzHByzGEOMma2+LjcZv5gIdpIYIAeIgRhdq1NEJyK3dFW6c2mu10WY9KpwyIiweZa6+/Nnv/f/6+/xv/5//R/dKJAcvGeVTCD1g4+dc4dHgaFXRXkjYXELInLZeG3fuc3+eP/zr9l0GmceH35SEoT/+J/91/kX/6X/vv80//sP8Mv//Ivk9L8e5x/x34SfeOL5OswSDiC/6DYnp2KnCRRd2MU4r+jMnovv9/rDT32zYF6oHHdEjhaPZLC+3Xa79ZQCRW3Y5GjGjFfKe7VHGBDxTqBxtr3Aw8r+2YaCb+eoTXL2ckjagmmyd0CqXczYdhWa1ROeWJazLv/cFh1nvvuc1JTyuZUiH3o7x6iIyFQ9h1RY9C03pkmYzst8+xlqqtMY2SZJ/OsOXoUZn5lDW7xjNEW7pQz1+vK9dUC37vHR5scFcwrPTm7ZkwJG46xpXcCkWnyhzrwRREQW5y17HS1HoU1Ga2CWtedecqcLjYHY86Z0prpF96IkcBEfvtekTkQ3U65+IE2T5PNn9C711BwGGs8ME0BOSZlCTSzSVm3HQ3mBkuwkZk9GLbYxQJsR+2AEUWCu83mzOO7J2QIe0rjd378Y5bzmS/mieKNEztkjF1zOp8ZYxoVw5DP+UTpjXXfXeQz1rZl1YplU9nFX93FeUGE4XIpEg9NSnB6bcHuR2uVvWycpp9fSeRsxolTziAWXIZjaGvjMI2UUp3pFVmWE6dpoddmg6lasWDjGXaPSi/VMv8QmaxRZNVO2+h1N+jKGXX3DDPasCrxuQ77yrYXJuCUs01yHMyWCNknsVVVaOYia2NvG9M88wd/9Vcts3e4c54mfw7W+Iop2SHh6uJ1XRERJofQNm3Gqoq+2XEYQ++K68H/9yhuMKbYEKYuED2QNpS6bjRg6Z1pPrGx24S+00SMBoPu+048nQ8sfGDt9zbEHdL6k3/yT/KT775lnmdutytNOznPEBpl2yzeePERR7NP7/CbiPWZUgikOTPPJ2KE19ahNl6v3xOAl+dv+dP//r/Pn/5T/xf+0B/5w/yz/8w/zT//3/7n+R/8D/9H/OAHv/J5MeDXPHRW5qTbGDTXEYwHxVoQ9lIoZeO0nAjD7cHBbjkCvL5BuuTzP8c9wphFeBKNmp79rc06GJRea6E6A9J6Q3fXCPXYeRw6iCvHA92hrGmKtCZcrzupmxYnqfUgSy0m5hz9qzrQZSElsWEi8zxB70xTZp4/HHYDOZqkO+aM7oXvv/+er7/8yszphvHYgePZQfHw8ICezxZkS7HBON4YjimRvK/RVbmcz8zTbErYoQsAbq9GkYxpYk6ZHoz5kVNij2IHWcxICuS4UIrBAstyQmKgd2umj6zk3pswvcXpfEZH2efQUWud0/lEzmaZLcA8sgsRm/3qXk8phCPTEXvSBnk0m908qhDAh81YL2aKkd1pvvM8Hw30d+/eWbYi0WGBeDTyNQRODw+sbuOcMJ+da7lxPp9JYQy7ET8M+nFoqSoxDspe4pd+6ZeP7Kg1PWAqaWYt8u6DewLtKwKkaSIRbd54VdZS2fYr8zRxWmZsYpu4O2ozR2evBLtyiBWXaT6sjgcZYt93Uk+eLQbGsPefhSED7hdmM4+X5cK+F/ZtZ7vdHNqCh8sD05R8AzvUIp0evHEtgjRx5oiQVGBMDZRgfZrR70iJ19uVjx8/0bTzcLnw+PBEU4OJCGK9l5CYltlzEDkCTPBAl6Y3A2B6t/nR02SkhN4hZlIQ9nXld370Y07zzNdff8WYnVCVY4rY4RDQGtteSO8m5ni3eh8ahBFAbHiMWTWYEE0PamfMNuZ2ePvg8xD2bTMjxr0wfXhn66JVUlfW65WgENOTsaK0kxjsvM9f44n++q//Pf7if/afGoW97Kx+KFgvweEVZ6CBeyLFMV+9EodpZBcISt0a15dP5HlBdwvqS7J4tixnptxZ68o//Pt/n9/+zd/kP/sL/wm//ut/l3/9j/2v7Xd+n8r1QGzeNKsHRO1dBusd5Pv8EBzOulcS3iP4PT2R32dtjy+t4gO0+rFfAkZ0GPBPUCM6hGCHf/FkOATby+roUMd6ZN3JFDkb9VV8r/Vug+ViCPQYHa0IHLRY10cFExp1Uog8XB6MhRIsWw4hWJPMs/HTlPnw7j15aAzEGqDWi+jsnjEzhvGJMWjGyM4xQU5SIubINNuks05jTGvbt80cS92nHjAMPZg6tmrjfHngqw9fslzODjl1G7zy+kp540rbOwcmmXNmypmcrBF2UHJ9Aezryu324gdAPOT54/tv28anTx8tUPvBqA611aNhZpjqMs+HSeC6rubymYzFFKbM7PoP4e5DNZgRIyNorbF3U7fWUsyXxd+ztIpgthyWLar3Bg1uwZv3SKNR2VazbN/3naKND+8/8PD4YAEyjcVlCduYFpZiIM/Z7EEMMGSeJ/bbyrc/+hEvz5+O3lH3/sdggQDW/IxDCOVZqZMOBnXTkJ3Evlf2vVBKO0r9n/kK4bBJmabsdhI3tjeOmVNKnE8XFyaq4/o++GVsSAGiWxz0dpTwikHJtSsaIq+rNVfPlwsKPH965ttvv7Wmda3HBEAVm9EwzTPiIiuDPu1AG81Q1LJHmxVu905UiThkm7NZbYTPmTpH+BlkDhHyPPNwubDME6+3Gy8vL9bk9KbpSFzG5LmXlxf+4W/+Js/PttYHnv02qUA5hh6hyuXxwZCArujeKFvh9fXGXiu9GlW17eWz5/95/LMr/9N/6k/xO7/1W9RWeX5+sQ3aga7MebL7ge2pFCBmo1SLVgNPkzlFSxh2GsqrD0Fq2qi9MBroNnVRyHE59Bj72vhzf/bP8tf/+l/97H6O17GngzHkYkzekwuuZzERp6g5tuY8MXAwe9aWxt+tQf4xXjoOpXuSdTCkHG2IITCfZqMDDX8s/5nD9qXfe6H+tqQYWJaTOVccMTkfsUY9vtdqCE8pBvkZ4tMJUSGrEIoboqldFME8cJLPJaY10jxzfng0Ob5L2rMkQopH88X6FD60uzWa+7/Mp4VpmiFA9WldWaJn5mLlN1Y+rzfDk1OK5geEewep2nsjR5A1AZ+V5cv5Ql6839Ca002tNBs2yvds1ktvD/jnx0fO50df7LgVuv1MDIH1drOZstEy9eYmaGmMOvXriW8221Dwii+eoELE+Nba7VAdrrPPnz7y6dOLDX5Rs8Bo+8667sbcoJPmmSnbnOWcI4+Xp6PRLwSjCUYrWWstKDbDeHLZfg5meKgxuFe+ZWvi9EzVzr5txCBM88wcTQm83lY+vXwy8V4tLA8XLk/v/AC9L9DkCl7tnar9mOWbszVzB6V6BJLeLIju+86PfvRjPn38xHbbjr7A7/sSV5c61bfshV7qceCrKsVJEvTuRoa2gW7OglFLkexaQiBkm8wo9vBwHt7BQppOZ75494EvP3zJ07t3xyhSEev5vL6+mkBRu/X3nI02sBPzO7M12J01kxxC0WZeSBaYO1OM/MrXP+DD+/cmanQmYQS01KOxue8+QCslC+C9G3XVe3bFE4aczPbDKrhCAG7rar/vgeVtktNaQ2tlWRYeHi5Go/R7VXvhtl/NaypEijsl7Ptu4tjef098FOD54/f8mf/wP2TbC7eXVyNpqBCDuy+rkuNElnjcW+iE3gi92r3rK+bMagLgIMHt+zMpRKOCajvYYtqt6Z/jRIqJdTU/t7/8l/7SEUDfnhR3Ud3n/7Ng7dPmDP05etY2EXI0jO8eZ+P97nfgZ7/ulibwtrkNeHNbj+TG5sjf13r0Ht1IwAYT1H45UL0Ca70jwQ634HbgrTVC77ZGnGYtosRk7td7K2b+OQ4EO8nivSPfG1XNN0ajU/e0MeeJlDO1m7dPkGi9Br2zb4KIszaiPWB17LrD62qW0XmayX7ym5d95v17Cz51L0Zp104tZq/Qy36fInecoPYzQYR5cZW3BDuUittdRLGGu08SG02h0SQH63WklHh5eWEvO6fzQqhm043Au/fvKLtTeR0qG9XP0FlEr8DWvdCaHTaSB53QGEJ4OZj9d5IIecncfufK7WZCvIfHd0YMyJlLDDZ+kEDp9fi8HDIpR0SFrRV6q8Q0ISnQVzuYY0i0Xgz/FGN4pGRiRPPydBdZb9i21thLMWuLmFjLjehGeqo2R/np6R0xiA1s8mxuWCdYE9u22Rg5Y86a6k25fGSsOSZEK70XejO8dVtXtn0jL5Nt1t8HeRI3ogz+We8eH5nm2UdrmgbC5i6YiE4Qogb2sh+DnmwT25rS4cVFt6ytmX/X0ICclpMFihh5PJ15FSBmHi5nSil8+v57G8a0LFZBaT9GgUpwvbTa/R6MlgFbD8h9qPAlCGXdmHI+HH/xKtIcWGFy80ntnby4zUhXzucHTh22fafXRp4DtXVSwE04lYeHBx4uDyY89aFbe737QE3J5sSUUsghMM2LkUB68IBkVjshJJ6fX7nebnz5xZdMk7Gpop3gv+eZ/ad/4S/w1/7aXztcmePYw4iN163FWDtO8xRgDom9rwYpTxPVp0qCHt5GMQllv5LnmdQNRrGJjkqeM7VUUhdKgN4r63bjb/6X/x/KdiXPP0XA+TPj+bCdMZq5OgHAYkKndzmC9tHz+Gn420953X/ufsLe3wOg2wwcdTPKEMm9Hupws0eyX4gx2gROJ0WEYMPeFjDvN7XBYIOgg1cuum3s+269vgjTZLqYpMEzKhGCT1hRhKDutF8KKVmQK8UywRJtiM/eqg/7OR89CROgVhiuqb35UB+fNAdczifDq2uliQ/TiZGoQo8m7rqVK6fTidbNM/92q3StBhnlfDRokrNvgljWfl03Srnx7vGRnk2UpG2wmAx3TepMnm4znU1jIWgzltTwUAlBTFupgTDbPemtEXz+dMA4/+NEVzg8W9pgGvROWVdjh/jmCzEyzTOpNeKUSHHil3/ll/n06cUyT+1Ejey1kaZ0wHnVrbZjSuzaiN11rDHTW6FjvlUsi2WOCEnUhI40NESHqcx5dN93wuLiLUwDA8YHF2B9Xbk8PXFxWETDWLiWWWmvB6Q38Ns22tlyF/4AxJCMHRPeeNk4ZRlRkmRu25V1vZm453z6fTds7x3xTHotG9PlYve8Vl5edl5eX7lcLjYcxisFFZhPJ6aYqPvO7urz4JqgGAP4WtU3cx8mjKAxrE1ijFyWkw2ZSYbjPjw93ecptGZ23Z7EhH5notjcouB7Nx7mhqb7k2MK49qcJi3iVWtzuqJVJFspfPz4iUueCOdI18qwuam+boeYcN838hieE8Q1HCboHOr7Y7CWH/ZDHxSwLL9779FcAjrzcgKEveywK/XJoKbWqh2yvMXmYdtW/sSf+BO8Xq9cTpM3aJWgNm0bn4LHYGuFRI5GMonMSBQkKr0aYUM8NrXWCZtZ0HQ1dk4XqzBj7MbGyJ19rQiRHBNTFn7rt3+D7777CT/45hdT+d+D9oC57DAfB+cI6N3t1Ad89wueD8dr6InS0RuzyqLUSi0NDVb5BxdpWtWIrRO9Vy/Wd23GgCR6zOu0lNAulNJIMfP09EArjdu6sd9uNkM7OuGjt+P607iwUvY3o/FsHmqUQb9UujqvF29grjt7c7opHIs6T5M5QnYrC1upiDcPtTdknjilia1sB+bWeqeVguRMb1b6Tim502ED6cQkzHE5sngLgkqIM+LS9dqFKdpwpFp2swPRyrbvzC5qK/vOPE3M3gs4nCAl2aZwRtPWK0ncIbOaD7zGhM4m6ClqNro43TNGm21cSmFZFjuZPaA9X8259v379yYSErMBN6dFjKM/Lbx7F9ncYnynULaVEE8oRt0dzffq9Mha+8EkUlX2XpA8Gb7pAk+VSMomvAtiWUXzPo6ESG0KWthuK8u8GM4KhN54eveOOWd6ELRamZvFxDkovK439n3ni/fvSdn1Ba2RpjHAJxA9AVEwlk8xHUuhEnpkrzuCsG03vv8E75/esbiLLj4C09akwxhdUTcYnOeJ948PXK8rL3txA8bCdz/5lhAiX3/1JW2PxyE+BdfcxoD4OqNb03VyOaGKsNdKKRvTtJBygn2nJ7PcGGIzg/QaGiPv3z0yTjRt3YW17qkkd/pir/2ogE2E2m1tH/0KO9CmKbO33WdHGITTjiCtbK83czQe1FPjulCqjRqNeSJKMOKGm0oGMShkb/Y+OSZvYCf3WTOybu0NejAVdTKRlTbQUAh5YrjRtlrNlfdyIqVM2So52twQTWKmk9ih8+f+o/8rf/bP/EdW6XuQo5vfm5tU4FPJbBAOOw/vHwElL27HrjZ7plb3wQIOV/rW6FFBMqE3Wt3RLkiv9NZtMmKr1Ai0wE9+8hP+3t/9O/zgmz/4cw+Ft/qD1se8OV+TDoE3tUqwdVM4300Jx7Pmpx8Yvy9HwxmH3SDLUipoQDr0MCApbNSu6r2akLuwD8RnYqhDSDYxERciShJinxBpxFrJp9kq9Np4fXlB5tlgTFVCQOjVGpvj8porYVV8JrUYpbX3TkiJHowxdD5feHz3SA+BbTfsvA5GDkAwyET94IkxEtS41WPqU9B7c617id57J04TrRReXl9Zb1cre/yks6D4ZpUMHnVvxMnGh4acvQw3vC3lTJgyKSc31rLlmUMkR4dOQjTfdRFaHY0gpfXKx48f+fjyQq+dPE9ErMmuendwzJNNTRvskYHzGhNoobvHTymecYR4zDGWIG41bLbrrexenVmzdeg2sjuHhhSpVJ9F2/j+u+/40Q//EaW6tYoO/YJlskkicWCWwWChGBPreuNHP/wRP/n2J+7tZJYstcOUE3QTToYpeHaXETG88uX1yrc/+ZZ9N8V9a5112/j46ZmXlxfzRvImdSm7wX0Cwwq+lGI06VJIEnk4n63BeLtxe35hsEMO/Ypan8aUwuYObCNybVZGa43z+cwf+NU/xNPju+OgelvJCwZFmQjQXIin7KNwHYrsqqyl8PzyyRp5u+lHjG0WKD5zoTUb+am1sr6+Wn8siNmxjOCCNYKN1lqprbKXwrpu1gsZQss3wWKMyIwxscwnltN5FM4IcHq8EFPk9eWVUos9k2Bq/Y+vV9bbDbOC75zOFxNIeTUaZNhOGD/+dV0ppR+sKVWrulLKnrHer+uoxv0Z7mVn8/7i3gqvry9cr1dzHvZrXa83/u3/3b9F6+a2HFI8YDWJ9wAMEESJooh01q0A1ouopTrUOwYC2TwN/H3ynCh7t56IQK+d3nYkR85zZp6dNBKEbd/Z1sKv//qv/9SgfTSAm1Vw4/oGnPr20BCRgyE5tAmjrzOC9V3c9ns/DqCWu8Hh29ddtW0D1ZZl9mFvFvbUAzrclddv53dM82SQkrr+SDgU2NOY2zOa3GI2/DmZb5Mp77nD6R3rkOd5tkHwfTT7HD7wjZ1jNO90b0xaSRvcT8sCMdwbwWZl3XyD9AOzv5Yb1+dn8rwwT8aSatXGZ1a/WbVWNz8zJtIuwbDyEKxB4xlWdlhgK4VtW608jba5tm3jJz/+ESLwg1/6JdzswMVC7qYo7lEzHkz0FNw56l3EewlWMscQKLWaaCdneimHvxNqDA0y7KVwvV7J00QSMcdZgefnV/IUWeaE4iV3CK6OtvsQnBWU1B9aShYUvbGZpswy+XwJd6QMySaSSdt5fb3y9G4iuddZC+YDZbCCMLnupGg9qsfHhwcTP8bktLhyuNuqWCYTCRAivVRiMCXol+/fcz4ZXl96pe/F7AqA5XJhu2303KheGRIjvRreb46mBn1s6+riqcbz6yvTNDNpJ+/FqNfVcNSmhikb5VK9HwU0oYsx0HLMXC4Xz8LN9CxopLVywChVjSIoQcg6OOm2njtONvB+gIh767u7Wq2FtVQup4nsDJHrtvL8/MLX88Rt2wkE5tPZoJ3e6U7rFIk0GnXb6V14eLShMmYhb8G390rV5oNkAqtXA2FaLAj5vazA6+1K6Z33qpyceTM5Ht475GmypG40PWN406ztlFYot8Lj+QRd2faNFIO5vhLRbjBG3TZzM/VZMUkn9l5YXy15uy03G3QzTYh2kGjZSRT+4z/75/m1/8d/bs6xI8MVGxhrC9TNDbFGvkE5CXEGTwgGuwjCPGWuL/tnMVfGTJLWXZUckRRQSQS1fqJg9h5lN/fd55dP/K2/+f89KPfjdWgcRtBXt+rgbtUxLPr7EfwH3GSsJhF7njHGQ87qV/p7Kwfx5PDNNXx+SNi8jtGQD4bXWk9RlOjiN2OkNXozK5aUbRjS0fASzNEXDldp7R3NpoWLqly3zSp9bMzA5O8dREjDAGywH7oqIUciZt2w7qtNTUuJSQSqUQaaeYMz7DVSznbTx4IcJ9jIlt70LOI0mbpS7QGkaSJL4OV2RdWtlhWmlG0DTVBb8MlRbtvRGj05pVA6t9sz339fEOCXvvkG7Z0pZTv8/H1KqYRg9DVrjym1dar4A1MxHDs21m0n1uI9i8y7Lz4QJPpci3JknGPB9lZpOfuozwASbDiR08sEmMb86piozRZVCqaULWVj31aWaSGFRAv1yEYPN9Fog9JbtwNtmMnV1tjWG1VBgmkauiguW7Cpc62yzMuxCMd41cvpREcppdKb9YzmZXHrDQuggvVw5tmsTw4+O4HzcrKqqCl7t7W0nM7knFjXlSGgzDmzb4Xv1xspCA+XBx4eHu7ZvirX1xeur8+c5kzMibpXluXsrqmdWna2Uh26aT7WViCKqd33wr6WQ+MDDTS5BiOaornsaDDoTHpHYzRXYcQN0qzH8+HpHT0GMoGGTUFMWdhuG0ueWJaTKY8xiOHp3SMxJnpdKTRyrWY2KYNnr+6ya5tQwnDfdQtqnMGCkkKilI0ike169arT1OPNK6tlWfjywwf2befl+YUYEzlnHh4f+fT997z0zvsv3hvM9wavVocn9t0Os/PDgojy/PxCKRuXy8n7GsF0I63xcruRU+bdPBNjouyV5+dnY8QFofqUwTRlZ9B1eoT99ZV/+4//b6m1cDnP4FPQmveqGt0SEDHdUSAQcvSJlRaAt7WQfYAO3Wa6m9NxoNdCZCKGwM5ujMkcPTNvxroEQlRmjPGjmvj+40f+6l/9z/n48SPvP3z4LDCP14hrIy589uqWcnbBNTJ6HMwpGYX8OIBGCTgqSu5ViDCGXN1f6geT/XNj6ACCtwWIaiaIGghNLXYB27bRWrd55248aIJioxn3cej551iCY4aCIU/odaXWnc2HpFW6oRGoqV9M2OZNuz7m9Aq1t4PSeASqYJh/q5U4peNUAhf6jFOqd8KUDJd78wBCCDxcLtYg2XdXlWZq8OHwEpliOg6g3ho5RGYffHS9XkkinB4u4PhmnGfm+UytzwwrjpwSv/zL31gGrbbqzEURd+sUn45mPQ9Ddc0HKcdMTWYSVnojE5jSdDThRY2yOLlVuIqwlkopz7x7ekeINocAEdR59CLCw+lCw7LiAYNsjq+nnNhWW1HmH+/3C0HS5GZ+TrGsFWEiZMsUYgx88eWX5k+VMrX2I3FpHT5++y2gnL5ejoyn1saUzAJZVMFL0yHYaS0QFlN69l0Ooz6R0Twzv/kxq2CYxT08PrLkzFoKOWfmaSZOBqnNOUE8G9aq94EvMdqQE7aVy+WBUhtl39HajNSQTddSy+5NS6PqdhdhSbfAerk88f33v8leNp4e3zEsCrqKsbd6d4ze4MHS2t1zzLU6IsYAIZioqHL32NnKxt4bT9Nke98WvA0hmhZa7zw8XFhv27HmLfFyFl3rEKyfum83kmSmefLJgu4u6pm2sYjgdD5zfb2y3Vbm0dtTywhtIpn778TItt748Xff8/LyzIcvv/jsYLhDJsZeLLWg3SbtNU8yYrLhVmPGs6oFnXdPT8f1DGFZAOZlIsd4n97WbdTtbd14dzrzH/+5/4C/9mu/xmnJ4IaBQ1QLw7LGnk3EGEwW39rhs2aCNfHGvFHdr9tKUJjmha6Nve4kH6gkarFqr4V5TjZxMARj/W1WzYU88xv/8Lf5R7/1W58dEp9Haz84RobngV613yGmt3ASeDJzh+P8hPpMcDz6MOrY5++28RBL+y2x1OB73VaGWavY/rNjsB8V85QjLUaDNbvVaeLIB920X+rPtJZC9GFVyX8nT4mCMnvPsrklFyGQpPfDIhinTqUQwSer5Wm2silEpx4qkgNLXuzOqR5BELewHvBIiDaXuoHNLhhwTTDbX0nJSv94dznNbrqHKHXfYdDlYiA4j7+IsFweTO3qJniXhwvn8+nNw1D2ZiKq0u3PEBO0RlUbvTg20WAEdO1ID7QsFuB7o+3GCoopmSpWLYMP3eCaeV7MjGtgzm51HPw9r9crj48PnjnZnDYDoK3hLNrNJoLA5fEBuuHrs5hOY8eYCibvjagzF2pSTjGitbHkmZoyIY5JVXpkExJtTkcSoWhnUsvmsltalF4JvgAHH7zWFWLgHCK9NnQoOr1Fani0LTS8fxdj4hQikgOFZtVMC0gSQhM6jS6Bc5pQJoOzmmsa3J44icGd274T1xsP5wfqvpuqW8xQ71ars3h8WhgdSULfK6qVaYrkuHC7vTobbqE7nGnuxOJsKusd7dU0Ja8vr3z95VfkxSCV5vYWtvTs8A0tkM+RKZmHUgqRl/VKa8pjXjxZige2W9Ura99f3ZOM15cr33/8SAiRd49PvHv/CNqdbTdxzEVQYZ4S0zRTazdvpmAisRgjrUVKXVmW09FTu5xOvH96ZDmdrCoSMRuX7ti1R7GHy8NRTQL84Ksvue43xkCcuhdzW5iMbRMlsN5WalZOpwsPT0+k7C60rR3Dg4onB6/Pn/h3/w//LnvrTPPwR7JkDe2uY0pAJ4sQp2hxJ7SjOd5rR5MN44nuYKqtMe2JJoYISIhoPwhTdiBkiBqp7gSr3ouKwWj5qQU+PX/H3/s7f5t/4p/6p+4nwpuAbbdF7v/yBjkKmCur/dV9SqVddKETCQoFo6Uba9a9tJxl9nt71vrm/4dEy3zdenWmUbckvmNz6guKVH+/OEGr5lzRfB5Fd7seMfis++jjkSiMpC8lE+k1lxtMy0zd92O8dJIYD36tiDClREjZOuIIOdpwDVqjhQDBKV5iAjorlT1Ta9XsHELkdDkjXdAo1K2YO+s0HU3eqpatTWIWwARrqrRW7ca/PTXVYJAYEl+8f09pDe2NKoodQeGzEz0ka7iWfaMPtXdKRrENxqBoqneXTFcoLnm2TU23sjGYiVfvHXGNhXRztuxNaHsjzta8eni80N1iwzzg5RC2hJBs5gAG4RQRG/Ua4qFPaXXM6bZAvZdKqRvpvGBeQe7jL8GGQV1fWBazmS6ho7VDcwsIwbIx7LD64v0Hamvs24pOk1Vm2dStpTTmJL7ZbeM9nC6oez+9bCu1FM6XCzEIu1abpidejYupcweGWnsxyA6s39PUs3QTnhXUPIswewBVtVK5G5W6bIVr3kgxUVImTDOhNRisLKddj9nPAKUYa01EeLycaArrT35iB3+sxsLLJ9zNwXRBBJZ8ooWdUirremOrOxMLYD2L4AcgYZTvwYWjd2vo7LMg7LRMiCp5Wszk0cwTrOTo9eDwp8nMCaMGn9FgPSJwNkw3HN16FNZwzUc/QbzBamt3ni1Z0642E8C9r2zegf1sUIs+Yz0OVKWLsG6bHQJzZvaKevQIGP0/oEnn0+uVxwchxAdLVEo3eMkFlXutxJyYp8Rf/It/gb/0a3/FPODEFPdNjWHVqlVzASFqJy2ZOYhrSrq5sbsHV8cMPGcRQq/0bhVJCDOgtGL9w2nOoJXWC7oZlbbv1Qz4wOHfiJZK6ZG6N7777rvPAvTbsD1mSL/966MZ7XHJfxCV4NBQc4puo6oQg1NZo40Vtl6u7YND9X98gB5V2kjWRr9j2PIPxGa4Lku/Izdl23GzBP8mPsws2rx1/MCUQdVGQaweETHyzZQNAk290qLYdwpKOmheTrPLeabTkdqQnK3doaDR80hV+pusaFBoxxfFiyORaHvDVbY2HOZudpfEsoiODVzHH8C6Fcfto/nJeLDV3qnBdBHRP9c8+7s3y4Tg79/b3eYD9YUvBhHdbps1cJ2yqtpN2Cdy9CkgMHpJMQZ67UeDWUZAz5FTWDxwG9yhMZmhYbWpdahNB+vNZ0GESJZAEqUar8yzECGmzKhVA3DbV9ay83iazdabjrTGEMW1qpStOjtGyCFTtJJ0eM4Ht/61DYMY+cCMFLtv0YF1KkmE6myzHqE3C7wpRArGSW+9mwjOg08pY0CMJQzGYirsarBASGn0Kg+Yi2Z4eHeIramJLxN272KMtLJR6mSQSDO6bsqTsaIc6w3VmrghBoPZpslswvNEL8V0Eq6uTzmzrRu1VS4Xs+rovbuNxsTlDPC1ky/MK2eIAlUg+MQ909uMasq2Wp4nNLpyNRmRIziUWbXRyw5ETvPEFCM5RU6L6Yp2n1YnwQ6bYRw4fIIsTtv+GhV4iNaCNK8j85kq1Vhus4Y3exJn95meI3BvrA6Qo1YbaJWTrdGUkmeqbiTI+KKdEBKPDxdyNqit90II2arGZlz+XI0Ce1s3/sS/9+9Rt41pnp2t5b27EY4jNhp1csZSDEbRdDfm6hB4YkJ7Z7ttQLTEyDUjvVmCMk2JHCJr7UhoxDmh1cgzrSkhiU1jc4hXgkHpv/Wbv8Hv9/ppA4q6NoJa9aCix3exHLofyXZv6nqhxnTsF3V/L6tm0I5qcHiNt+fTm4twBbiadsl6HhFD9tqxDwcMWIsNHBp9jlorpevheD3I0uM5qN4dBUQsBmVV9t2NQt2FIR1D6HMiB+shBIQwB/O58WZa2YqV9U3tdFacjunq0a6moJ5MKWqD6Y03khxOYp498BpfX2uDnJFWWbeNacpMTlGdckaiKa9VjCoWYnRjQX9Ao3EsYk6U46SN4Ritaqwpa8JogOj2FZISsVbWokxTcOO3fnwX8UZ+DImQXRQm0D0z6QPjdighBDUVdBhmWXqwaYoztVKSOz4dbAFoaZ49WrktEUpT4pRZpnhg8CEkWrDvPc0Tz5+eUW1oMOPkFCMUe/+mTp/TxkhyUwjUaDKnVqvNnhiDXKqSfFyC0ZE7Hz9+IkWbJ12nmXZHzI0dVSuvtyu9m3HY5XQmdGvAju0jVTFXtuHPb5tkShMh2oQ602U0xA//7fXK66dPpF9JzNNCj90D+ljQFuxKs94ODVN/98bqtOFt22z+eDDzs967W3/XI3k4+gVeIZwfTuSQvDyXw1xRBPdKSoYStkbIBnGoqt+RjoqJyErdWUu1jDgEylbQtvFwWkzE6BYOyhhoZeLG1ryZ6wNzBLONdsDErF1G3vOGWCNiJIQQjdpsTfvo0OPRe3V24B1LR6y/eD6dmCYjdgw34hgi1efPz856VLG58CFOTke2fd92c+8dbsdNO3/rb/4Nfu3Xfo3Jx7xar9O+QyvFrq13klimaySJe78jpuDQWCBJIObZlPNdaBWWbHRemul2emswZxYSezVb/3Sa0FbYuw9Hs4DhqvBASJnf+e3foLdiMPRnh4KtMzPvvOtaRhxv2iyh7N0SpFpAC+qjgI3qbpqbrp2g7o4b7oOM7k14ORycj2fjf4q7BzeUmIwUI9qPymXAVqHZDAyaz4Px9Y0ovRR6Chb/xgHRmrH9PEFtzhZlECwc1gRoXUhjzsMUTLVoD8upfwRKNwpeAq6rqZQf8+VwmARvBMc7vh96ZKc6Fcx4vnmyLKXVSppny2iSeB9ioqbG7bZxvpwP3nAiIDHy/Hqlaz+Uvcm9j45HqsrryzMg5quTM6/7lfXlhaf37y2oqwni5tMJsCwjL0bBLbWSgeD+/01NJxIHwwHDUyVGbrdX4xs791x7p+wGWVwuZ9MyeDYYxGZMv3pTd8k2XB5x1oyf8ohtni4JTeZgK9G8lpqadTVR6N2Gg8zLQjsVktgB0dvod1jWmYIxHPbQaXVH5ox0e0ZBsT4DplSfspW219UcOU+urj6d5mNjXJbzPTB1uxnVK4daKznbzPC1FE6LZaAWsHysZ/B0eTS/p2zN/1Jt4HpvhGZZ0KfnTzx//MQyzzw8PTpUpz6H3CxipDu+rInWO6VWugo5ZvrUjgpiiIymaeJJBpx2d+d1/hODrWUB4E31KmJ+VD5zOMdEFdvURbsN4HJAXIKpl2upHhAhdrO+mJL3BzxxsMBvtEwcRrB72e8iTLVBUtaAFLJ441L1YP4oIE6FNJsUOzgGtIncsfU8ZYcgrHnbWyfF2asSPyCcVSjRIR3vV+KiT0tDK7VaD8oyVqWF7kG10UrhN//BP2Rdb1hFHg3WEIN5tTcbbiWBnANLtvnz2q2fEJ3Wnrz3koIN/Oqt0gPkDPMye+JaUW3croX9trKcTgRNbOtGPNveCTlQ9nr06sAprK3x7Xffsm+vLOf398NhrHNcPKdDV2L9HPUeVa9KKaYL02YHYaQdMxii2AC2vTV0VpvdgiMR6mLabH3OQTUPITp71GBtHWk/ckCEJn4fVF1FmjoDMZCiG2e6xk2bHl566mrw4d81UIDRgxLJtPZqAjxXcYuXaykM+ww1f/rg/ic41TkEIYp56VSHmmK0DHdQQKt2Zz/IMVY0xsg8m/6it34Yni3TxLbv5Kx3m+zHR/NN+vSJ4nOLDUqwZu28TIew5Ri649jdsH6Y58VOVl8MKUXDXG83W8zTREY9kxOb3ZoynF18M7jPx1jGelQtdCXPCylFPtXGbX0lTTMpm2dVjPNhId6bzS/o/s/dFZgxBG7bSsyZxbnrTW2u8YjAP/zR7/DyeuXLX/qKx4cLY2pYQcnuWjssgc+XB7RDLQ2cuqle+c0p0VDzssnZ6Oi4kVozoWIi0lz2L2LeSYLQc0a0M2fLAm+7Cd2iz+Vey4Zq5zRNnBebMdFqIyyR+vrCqxYezxdKaXYNx+q2g6W8bpzPwbI3TWyt0rbt2ETn04mcs8++fuHp6R2mbu7gLgAR88MaGd8QOkFA55nLw4Xb9XaskcEwCsPQD6sOdjWlf3B/foBGIWIzH6T3IZuxvkhMpBgpvdFLYfdwH+eZIFCqjQG2eca2DqNz/msDCf1wLiBwaIpqrWa54K6+wfs7rRaDfiWwzDO12vcN4hVSa3TuBAwQsvtQIRzVNP7sTT07/JLsGo997AfECEwhWF+p1Wqwb57Q1ijenD6fTzYGl8Diyu/WO+u6cXl6Yp5PbNvq38n6DU2hbxUJmZjtoJdgxpB3tpvSa3M9UiJdJkK1+DJFg1qeX66IdGd0Fbp0+g7xIXFOma2udmChlHV3G/1IKwa0aDOHhhiG07R6D8QOCXXqaorJtV5ewYoca6h7vJRuwVRQp3eKU2bd4hvxyZnRk1PbCylNFvhHPwNoPnwpeoU56K/R78uoQPHzulTrnY0qXb1S2vbdRMN4pew7ZUCZBv9HhhvzqOKMDWi6q1aqV0RKsIrGFzRuYRDkyDZ6geq8f1E4pWzZcLQb0VszRWptRDtLaU57bb0ZgwLDjm2QvLs1lkKpBjOtpbBtO+tW2Url5XY7LB6025zt5XSyyVm+2IeFyPjfu8d3PD4+kUS43W5cX69MKR12xyZqcmVpb5TebJ6AT5BTEWdYWZN8ydma9c5Wqhge/v7pHZenJ0IUWlNeXq4299sDkvNSjmxl6FDCARcYVlmaCfqiepMqwHVbkWiLc99tbjWtc7u+ot3gvSlm799A7ZWi0MT+pyjff/yel+srL8/PlG4iuObXArDfbqyvr0ZfDWI2F6qc5zPVg0B3iKv2Sq2FGDFRU1B627ldX2lYSVpLZV9vxCCkKZnHDKCeBYpYhl3UWGVr3VjLjgZFo7Fu4nyyqi4E8rLw+PDA7IZ9NinQg2cwlo01NMWnoNms7pyNLx8cIhg+OgOffbm+uuWJ/ffaO8EnctGU4GNJc5gOqNE0CT55LpjTpsiwfims20prxXpFHYLTtsU1ppaR2xM3lNLfL0SEUaWaEWEgWoAEkgiq1Te/bdjWPCghXgXYAKqmoyE8Aq2tMcGoySJwt4nweSq9UMqV6/WF1stB3rBRwP0IJKpmxEgyLYnZvRcLMDn5XOlqPbNS2beddV351V/9Vf7gH/7DltTRUalG/e6NKJFTDqQczLCumx113cxCw9apMuVE6Mp+XYk5mnnhfCIK9H1HFfZitiqZ6I7jnWlKPJ4v9j33apC368okcFRyQY3qXF1FjmCTB4Ng4r2OSCeINdNVu0FMDftfb8a6Cjb+OE3ZHRO8CsVYReKuEKWaY4WxzMJR0fdej/sNHOt1DHnzO2hrttnPQ2Mv5jpRGY7Wgd7d8DRYcilqDXafeOOVqNkCSbCe3l7svcq+0ponjdHsdF7X1YaMjbnEVlbjY+1cXt47pVXCrkjOTMGaz80zQzGy0zGAZzUA14KEl/q9VuKUiSGhwXBeg3wgtmb+R1gz6MMXX5hferXJU8MrvfbREBr2cUCwLFpEjqHvc7JmaX195dOnj3z99dc+bSlSt411bZzOJ4MWVKh+8pa2k7Phsss02bhUcf92MZx2LRsajfY4HvaUI9N8pvfG7eWFd198AO+PjMNhGKelPBm7CscYY0TVFOw5JOI08wf+4K+gHbbb5rbfmRohYV5a0i0bk62hXv93kXumgWH61MrThw/kmI1hpBbY4v+PsT/9tW5d0/ug39ONMWazmrfZ/alTVedUle2yXa4q23EIRHwJihAgIJAPCBQQ4l9CQiD4iISIAhKBhCDAkYhDEgsFiBxX1fFp6jS7dvu+q5lzjjGejg/XPebaFRxXLfmots9Ze601x3ia+77uq3GOMo08vH/Ap8TNfsdcK2P0pCEyFNmQjwa7ZawSt/rFBc84TXrv3rPY83/96p7WKsdxR5t2MqErhcXyreVNFLicHlmXzP3dSAiJmmXf4IO+Z2Obla2qtPnCNI4qMkIw8ZK/GtV5NKPprbMuEp2dn0+qaI8HbbhaWC4zfehmN4Ha6RhJPcg+O70w7za40zmpumVXD5d1EeTZGvvjXgd273TLjvUxmL7AUgW3rsHpOdYiq/VrtW9fyQ6GVlGYz34vh4Om0y2YpxlO/le1VUGx1qH20lTMOH+txpd1JoVoHa51mU2QZIqBWhuff/EFr+/vePXqtaptw82v1vfuJVsDm4vIl8z/Oey7A610XFnx3nG7v+N7n32PP/nH/xjoBBfpvbGWheNOyYpjDDinuVKv2yW3XWiqpXItlLLSCgyj6LrLvNKcwsBk/V7YeEjzecY7z+EwqUDMM2OKLLbOeqs2c/K0Xnj/8I68Xq7vQd3AhvG4K6R3HRLb2Pe7TEp1srrAW6uinTqxKEvJnM8zN8cbfWfshB5tnTjt4YbR/P2V/OGtO47BXA/allOnr5Irl8sFj7chvjkW55WSq5hwXV1RtYF5a82cazUvK2bsF3ykd+vcQgQk1HMpsZ8mFQUgWbtPYgq1JhuB4ALNK5d5IyHHYbRB7KyqL0RqFATg1sLz0zPOdQ7HvS6DIeEtznJZZpLBSFvO7LXy3qpwrwFXjIkNRqB31svFksi2oXEjn2cuZgERYmS331HQjbzbj9R6xzQqaGOthfOy0HJhSAP7aWIpDUrGOfntl5ztRofoA4tRKnPYhkD6T+mVp8dHai0cP/qYwzSx5oy7k+DIN1jJNtR3Zgqm6nCtitMc4iSzLjzOWwg9CjYpOdPgGnDjjNHhu2Pd7MeDJ7mIi86U26owg4MPP/sUR2cIEh+K6x/ovrPaAP3+9StSDKyl8fTwRLy/Jw6BMY5c1ouookG4eS+Vy5o5jJoFjMYEqwiHnQbx+muT06b3wv5Fp96U7dpeT4/PdghnetW68shTZpsXXDfsNjQ2GCPGdA0xor0oVVvwYENw5/bQHKfTmcfTE69evaZWVZPTfmee+5XeI3Fzeu2NYOv6u7+b9rLxBc0YPNzFU3fdbDys8t0cCFbv8KXy/nwmOs/+sGe/m6CLwkx3PD4+sdvtiPElh7o7peGpE9047P56CJdaroPljYVF73bhmH1CDNapSFNSfOHG9tx3Owofo4XL5OsBowMKhrjljIhsoBFUv1pObxAvVZeStFEFhycE7dvWOsOkQlAHqqfmzBSjoOOccU4iWe89/QVxVbezygK7mIdVqQVXIETpblxpXC5nUQa2i5jGkHTxOO+ZhonLZTazSa6FZje4MtfM5XRmXcr1WfaNJnqNShB822vFB09F+dDeB3OJjriouVzrVSu9uyuUDLY/msSxvus88M0CuTA9Do7OBp8bCcL0F9XcWNUBVzsXRc7pXVCUyBmVXBprXhgZkTVqo+byncvNBMK58PR8xgdJGXIu9NKpvVCzkTzEzLgSDFRA4OQx04WNXRaZqB12e1rwLOcLl1x49eoe34OGdIbXxWGAYWCsTUEVa7aMbMUUgpnFGbNmC8UoGxSE4aJNxmLOO1VGFXoIDOYPVDYTQu/JNivwXpYMCkdqtCADsJvDntLNv8fJmK6FqAqWzuXyzDwvTNPEuNmD22YcYrqKg7ouesEmXjYUr1+/wofIGCPnZWEIgTGNYpE0Ybg+eLk3upcoT7xiPnvt5JpNGS7GUXMa3E67iTTKSt07NLD0QHVGw9Ql6VOn1xfmhS43R2riRbemxeeivFkUNt+IiDFC7SQvVkS16igEx2Sbm+3wpticyRTIa2FtRlu1eYqCXsKVPeO9ujrbl9bqe95+8KH5bjVK25wp3TUwZRMN5Vo5HA9XIz7AoBMjRoRguP92gVdimGygB3ev7jkcdozjwLLCbrdjGIZryM52YC5lIbDZdmyRr8Jwl23IWDWfi2hWtxblqayl0uo2yNel4oLHlcLj6cy3X39Fd/A6v2I/fWS/NxJ9o5jxZKsqPHoXLDHGSDwcrkLM7e/srVGdp1pHMIRgM5ktvyDyXY79spgZp81hQ1CRt3XmtVRCSnzw9g23t7dsVhAhKHwp4OldthMtZ5qXojd4MdF607pIScVLa5YlQqfnRlkzN4eDXU6m5F9XPnj7mlYzoKjT6AXvdJwGta2zrMWelcSzci/211lA73rO3jlaroI5cTqDepeIzrRNJTe803NMwdGIVxdZajOTQLucbI/bZFgzjSZIVR5K0NqiQxpT76G5zyZIfWHOGfUfzIiz46vjSox1G0FIsKDr5qJta925TnfVYOtqqXum4LY5g/Nac0uW2LS3SnTQg2jlzinzQned5pk5yzRR4mlnEOaMQ5fW8+OjzljXeX5+UgIpjajq7Dtybu9ppfD8rEO0vu5M+z0eR+6ddckQPd51g386gxPTcT9N7KaRp9MT8+XCfpquopzgA6WrM9nUstFU3r1W3KD0KLV7zXz4lfHQ7WLYIlCdU3Tn/ZtXGgrjqfZSkg+GBysvoZTCcj7x9HzmzQdvdbA0o1N24YpUtXi1a6MUTDjlPSFGTs/PtF64OR5prjOmkefLmW+++ophGnnz5g0AtRh1LAVVVU4vAmPBeKf/62isqyCuGBKX+Swuu4+4BjG4K1OlUnE9WDeSWMmcns+ku3vDT0WblOjJUvgc5gEErRfWi/yKhpAI3ZG7VK0peO6Odwovcg6fIkPrVy8nMYNGcEEsN2TkN19m0jCSqzlKovyC71bjrYsvDhIfug67K0uqm/keJK/NvSGnGqRvUI3EhZtqODqJgZwTRBSHUUy3asIhByVEduOeJQSck7Cr5E6t6/Wi2dhNAaUffverlM06Xv9/WccXvn58pJaVYRx4fffaDhQItt560N/p08D9feRw3FPWIoioVnufBZ8iB7NSr6ZXqTQioqT29iLU2yCpUo2w4LylIyqCstYXl9v5MrMN8+WZBWGImqflzHmeub29FU21ypl4GtO1y2utKgkSz+n0zLJUpmmkGsXZC9CnWyoabGpr/e0daLnhBvm6/d7v/wH/+//dv07vnprP3Bz3pAkuT5oxyIdNJUSzn+u8FOa1d7Px11oR7GeMNP9CMoijyBUhRnwUvFN6IRZRjceUmOcV8AxjNJGs/u28wrzMslPfKhmaLgkrXloz/YOTNsd5oQzbTLBXrWPnvPzmTLui4bWKHQPldJHjSTjNgZrOxU0Hg7kKgyjYOEd1jrVkG+xrTujoFmm8UrIK9asXlDHgauv0milNgtNqxXstK2kIUPtV9zOv0iKRK8u6MFrUQ0qD5lG9E4Ph2tncBsuysOTC/rDn7evXrFWV8Xg4cHM8UHKlGAzkzNzr3bsHWkP2Ew72e2s1XcehX/R0ema/30MIkPM1FQswVpJar+hkTd6CNuGWi9yqgldSSuR1ZV31QUL484EbuRaSk0vm5mnzfl7orWr46wLNdW5ubuBGYTXzaSElsSdciCyrLMC3YHjvAxETGHVnL8bYNlZhyx9/Cx/akrdkM9DtMo1JB6LvXpdC8HJD9QfmeYbueXh+AuCDN2+u3lY+cR0u9t71HD0vlsFNF5AYLI2A8j/WUvF4iJFesyrnoECX6Bx0WVpAt2pJh846z6x5tYGwnFHX3AlBGQ4xCvdf50rf7ZgsVL50FCRjVQohWKaFKuBas/QeLePdYJREa6NttrJBIhhEGWIiJqxzGex/NyV7lHCo2sVYWiO1xOsPXvP09CwiQq+k5MmLdbAbddq566XRO5b528wXp8siRNMpSi1yX52mK/MuxeHKcFEms55f7gXXRLuVKebGT9fMItvcxTt1Jj36K5tqO6CuHUTfOjwzA+yN/X6P9/0aQrVRRrsTMWKjhh+OB1KIvH//nufLhcNuj3Oe2dLdNgdmDM4zxEWzxXlRXswyE2Jg2u/pvV2JEC2qW3RGUGk0idecp5bKPJ/5O3/4h/zdP/y7/Af//r/HOAZevbrhcnmmlcK0S6TJ8hGMVeScVfN0yrwhBhgkYw6vwZPsAO2I5eVTpOXMPIsIklejhveA9wlaNpeTRgjC/lezI1HBIOr3dbotFot1YFHF5wa9dAn8erc9Z0mDQYcX1VhK9G1YrqCyUhvV5p7OOzyeteRrEaqLQfNG6FeIK9u7XOvmBNsN1lLKZa3mcjGrQIgxcJmXK0llG3udTid67Qxj4HRalIgZPZdloRc92z/7sy94fH7it37wm6QUda7WylILsdLJl4uqhZSEURUJNeI4MrmdbKDt4AlJQ+7n84nn06NuKzz7/Q2udeZ15nhzIOAUcGK45pRGkk+sZVHlHi3O08uoK6+L+MytsptGulMgzWZCVb9Dsw0xsT/sCCmqBd8KAZQRUGgsl4ustWPk7QcfiMeeld7muvV7xvN1UV0DhnOHDtW9mM/t97vrpm214GPk7uaGw+EovLY5LWR7K61WRUK2RgyDHEdtwQCULr+lih3kXjhpKYX9/nDlzK+tiqaYG/N64XyZmaaBaTqo6KERcBSnxDGCBlKhO8uGkEq+LCvzfDHPn4zb650mywzItUEveO9Yyso3777l9PzMNO346OOPGFMkuPhii+5h3I2qoJyEVkutrJcZ5xrDMBJSoBuFLvQkG5bWOSRhoC7oUqlmR91sVhXsACVEQpg2WR6eQIoDPgZSivTujemxuao6Ts8nqoPDzYHaK/N8YV0bbtVFVEqz9D0J8ATPJPNBWtmS2Dy6pGptVBSi9Pbta6v+TSroOs6SHIXdyhY/uECjsiyqasO0s/cuxlWZV/wQcYMxnDoyseud87zQa2W07IB5XvBebsK1VZY1M5ieQfCQZVH0wJCM7+6D/MScY15XHh+fmKaJ+7tb8roqQ9ps2h3KMi6li05ZteZ3uwPTtCfnFecjKdlQ2SYmrncCCe80CC9Zrrtp2KvIKCuH/ZF/9b/73+Mf/kf/HnevJtLoeD6ruvVJe6Hb7Evdlqjoyo0XZLSfJkpr8hECxhA4HA7MeWFdMsMxCV4KDmaxKXtrLKXgvZTYl+du3cYofN+LTNPo+NBZL7NdsuDaJhJ0VrObiby3C31tNK8OuTuHdxL0tQ3DZ5vBYJqRynktDMHjgqNSaWHAuSYDQ+dwJbOpnreTbF1X0eV9EIm0dQpwmS/W2cPju/f03jgeDvQOOS88Pa9WtGhGuFmNPzw+0mrjcHtHKyu5rvhsXX/Vur27u2VIgwWZFRtDd5JLxOgDficDuGrii/00cbmcqZbu1hFVEwe5NcaoKsw7xzQdiCmR4oALgXEaxXxwEsBgopgwJHsI4K0yckAMidA7Lgqe6aXKk9+q+GizjOgVVN9NxZ2SqIo5L5RSrvz0arf8vCzsdztarczLwu3hQDNxWuzKM/bOUXtlHOXg6exS8MOAL4WlZA04zXBsq9J6a6xdJnFZfaeKENNWBLsonHNXmEn/3NnZ4bTaPObpfGJIw9V2YvDxmidelpV5XYhJbq273cQ0jGohiny1aq0y9moN4qCKw8y7Nl55jZGb2xt6r8zzbPRhiXJKETyxhnAdgsY4cHf3Sg6dNrzC7AR6FUwYx5Hgo6qk68xjYw850UuD/K8ckHyE2mjOicrcO8WsUpxX1ZKGgS0fuBnU0Eu3oby/doYxqtuMfrwOCINz9L0+d84r5/OF3SjzwK/eP7KcT4zjyDiOJO+t+3A4Jw799qUuplOs8nBscwO1875VOpVWzXLEKswe5KabzbIZL/v86DxLNRZX64yHHdEcVefWWOYz0e3xw0CrhafzmVuHwRM2bPaiXp6eT9Rp4Pb2BqJjY2/qDxWe3ukWDKV39/aDDxgN3tr0E9shdrnMDEMimP4jhC3gqRDCwPF4IzZj1TDcO3WurUJ3Ta5tzuHjILcAwzpLUa7J7/7eX+cPf//3+NUXf4JzK9F1Ztp3Ln5Pc9vhasWbMeGm3cB+t+Ph6b2e4zBwfzgYldy6LbOyXucZ5zXjCElwpFTrzpTikVbl3NC9lwDOZgCrMZ+cLH3VkfamYbxTl12KZlGxaShf6bguHUIvVsjx8lVqZb5cOJ1nWmm445HYnc5jN2vdGUzYbbbasThW73h6euLd+3d88OFHnJ9P9CYGqgvqFnIufPn1l7RccZ99SkR+bsu84r0kHxFwo2Zq3inThmZD+qaLjdZ5//W37G727Hc79vudyEfesd/vqFV/Z/z63bfspj2H6YAPxsGOif3tDRiuJjK7vZBlxrmRED23r1/jWuNig6KOPGg27/zNmmLogerEY+61Xi08NhO5ajjsGBOrc0qhilJ0phB1kDgZfT0/P+Nw3L+6JwDZNsMW8dGz/t2bmxuJx5q50aZIcJnT6STX0XXl7v4Vu0kZC83gJQfX8J/j7kDrL7hfLoXFDNGmcVS4un1vCIGaknQh9nO2wYI3Op0PYrCUKgFVjFvSW7cA+UhuRRYjzjHsdvikDmMTHC5lZV3lXFp7JGDzixDw1S5hE+5ofuOZXCBEx5JXbg+DHRI25ymCyGop1snseP36Xi125Wot0HO/HjxaX6pA9N8Jz57Gidoy6yqGVgqBaTTuuPM071jywsO370nTyKvbOzac4Rp12jeGiSCrIZpaOGw5A9IHbDOj7au3TS/guFyER59zJqXIcTcyescwjex2k6pmgxx65+q5s4nN1lp5//jANI2C41YBRnmr+mon3EjV/vD4RO2Nu9sbnPf88osvub+/5+54kLlk18DRHHu+YwynKvZs1eFYG/v9QYmKWfh0ii9mfBseVKtyNJTD4Jh2extAvmQU6J81cB38CyMwpURr/voevWVf9y7WW7Bn2ns3JqhgLRUH4KLZR1QVSd7JpHGzdxF12Jl62PP5z/+U9+f3pEHi1DR5ppyoudBDArOQ0d0SlFTXwbVO2gVqL8Q0EpJnP07EcaQXaRcqMvCMyTIjWiFOIwnZqYcYqa1wuDkKPkbJls1JTyB368wvfvwj/vrf/AMup2xhYNWep7GNamctmVb79RJUfoTWvwSNFde6YCHnrmtxSok+2rPByAo6bJT+59XR1c33bFlkINmUs3F6euKbL79kGCdOp2fmnPmN3/hNQnAiFMVCXlZ15OsCpdBTJJ/P+N0EK+ReSFOScr1VFcc2h/POcbi7FYW+yz32NF8IeMGRSMcSay22OcSCWBHzJDh/zZdwxrVuRsdaLzM+RSVYOceUErXD5aIg++PhcGWMRKD4ZjxvDUd9aVYBCquNGPSAFrfzXpGWMZLbNtD0tJo5Pz8RU+J8lhXEtNsxxGj88wGM3iesVgswoIFTHIxpMc+6mXsjm1FhN4z+/dOZdV24ub2h+aBqwTbExj0GsRFazvLAR3j4Zl4YwovJFnBVzepzbP/clZsREw9PT7gpKTPDDj6fBiKOkJLmN11V8VZ1OEzJGRJP5xN5Wbh7fUdKAzF4vv3mW25vbhnSaN3AFofoNGAM8uh3oDClqorId1lRrE1ZxR5Hrvrvu3cMXnYea6lMgw5AsdlGrZ9s7XbOPDw+sv/0Uwt9KjgPZS5m2QCP7ondpIuwVzGn4qBBZggWAxk7Qxrorm8hW4QQRHKw9eOcgwBj2yjQE6XcUkpmXQJ3d/e43rksC7V20TeRvmct1dh9OgCCD7T1xOX5mZv9nuYh15UhJMFaBkVhqu8pJdYiMWnzjbvjkcMmDMRxns8s68px3BGHgY5mC6UpzfHu5oZkxnrOOdGz3eW6H3tvCldynVev7xiGxBdff0XLjbv7OzYBneqR/sKs+Q78oZhcVdcpJlOfe3Xarb9oeszaxdsQWQeh8PFKp1dV20NMMl8MkSFGWi8GR9l8q4u48Wdf/Bnn0wNjMlg5JMLNwPO3J9Ye5ILsIPhtJqbDFa/zqLvGfj+KbpxEc14vM0sVNF0btCLtzFoqzAsMHecTSz4LYrZ2K1pHVGumEhicEhZ//NN/DHWWzf+50hOsreGbqLBCGPpLyiAyFqVZXGqRMLc2zR6bIQetdXMV6KyrzCW970QiW6KGi+bKgDrx0jqjXbA3xyPn84mf/vRn3N7c8ObNGz7/+kvu399xf3cv9CNkyjxfC1n9fGhjpJZVc1mD0nNVp19rtWJBtOgxRouO1tp5ff9K55idMblX4oeffIKrXIfDU4qczmdyExTkLYWuGbyylMJaKqlXhjCBU6CHM1ZIW1fafqRmBAmZXcUyX4hpYD9M9GQNpxMvXNWvuw7PWmtistiwNsYgUU3vvH77AfO88O7bb0QZnUZ8StR1peSVTbw2xkE2EFmMFIm6HCM74ptxK6WFPXcdvCkFDjcH0hIZhomOXW6q44QN13q9ADZ6ogvh6s0fh0FdQpeJOg610V04Zu+QPMRxJ1lLrQYLFeiJwQfCOBFCYCkrzqmzm/MKzjEYRhyiMWRaYz9O9GFgTCM+imP99PTIsq58+OGHBBfF0KC/YMGIa1+xYHoL4nFR+GutmgFtZofeOU7zhcU5pjgopIadeePogq61ksYkbyXLsZA4ruh3+0hKkbcffYTvjvPlTKWS0GfelKW6ZQqtZcqiVrpXKe/31lHRNVCWJYfNkmJk5z25eM7hjAvB6L1ip8QYWLMgwdYblM12RlBNLiu1CmL95JNPbfgMa9Ql7TtyNu4YBNWZ9jtGdtQq5ezd/b1Wi3WwwXlG03jkvIqBErTGQ4xs8Zm5ZoITSyj4eB3O5izzPlF/VUCNaSDskx18m1XLy0WxHQbdzNjSlULaMBPwK5wrF+R4pS875wnjYPtQRoPi6HcoC805ptvxOtfbBvmtVcuVb+A783kWicJJLb5lqAQGqiu8Pz2pOwjRHB7cy9yuQ62ZFMz1gEa/FBMu6mAuy0LpsLqgYKsgq8UOrJcTtWsGGWJSVIHvZGCujdAaIcIUHX/8n/7HvH/3Fbv9h6xrpWdRX3M2LG/zUGoKM+o46oawGNyoVEkNwLNd/sE5StXPOZ9P6rhSxJGvHZuYdI1lXfn5L37JlBIfffqx8rVbI4bI/fGWGAOv7u7xMRBdZF4Wvd8izdblcubtm9d6F73y9PRI753bm1ua88znM+uyMB32xnbaVOa61K8mrYYMjOP4otHwEGMVheDh4R3fPrzjs+99T4dnzpQOu2FkXmdyzhz3e4bdRHKiI5a+8TYgxcA0HNkfJgu82WAPZRivaza78EBrtmANWuidq/pQ5leFOA4GI2nhPj098/T4wKeffsZhr3jN3W6v8BgaaRhkER4kzHo6nTjuj6Yi1CYpxSL9cmU2Cip2EW3Ve4iqJpOXoV7DjLGcupJkvksOxbxuA+Qt57qWQskradhTc+H8fBEOHgcmJxfcGnTzz7OGZuM0EjGdh4gm1FrNQG6j4YlxgI/W7tow3Hn2h4NmJ87skYVv8Pj8wAcffEhIjrUUHh8e2B+PjCFQuiqLUosSxuwCFBFEQ9HQzPHXDARLXpnnhfD6NdNur1nPcMPr12I3NXNndV0Fxm6KNCfak87VRnSeaCpRH5zmMT6YYaTWTQhRM48wmN5kpbQRvtNB4Phz3dp28Plgvjtx4Hi4oayFx1aEq6eR0AAqLet9BbMGaV1mlLX360AfGvNSNOjtUlwPVR3q4AVFnC8nns6zwYGyTC+5XA0tow/C3qsu5GRkgh5MqVwLPo7Q45UF5Rz03Khe1m3O+ZfOqcHN3b3WURbG3NrGZhGdMsQIpZBbFqTRG84n60L7lZ1TSzXRp+A6kWuUKw7dTBQ1szrs98T9npJ1UTjnWeaLiSn1M2qp0BzjAPO68ObDt+x3R5blRO2wlIWeV6YxEpcGNGorBBKmqrxi+yVX8qpsGeVEdeKgeRUEMl0iyXEgjpHduGNeF+ZlUQ3sG34I4DwrneUyqyBJg4rfvpLGxuef/5Q/+pM/4W//4cfMdaHmhZLbFVoe0kCISQmVziJmjcCyoQNrziwl060LL6Wx1EpeFhqQSyYx0C16tIDRXSVYXNeFaT8S8FzOF8XeCoTjg08/wtXGSiMO6uB8g/WieSXBc7w50KNnMUiToEiCFmWBJD1SlP1M3GJzRTGupjV7oYa/RDPI/j4SK43kI2uprJeF80kDvjhOanW7FuHj83vWvPLB69daoF18aaWaiefcXCP4cK24XNOiL1U3Xuud2+ON+R6pyl6KGAnDkMilsswz005BO5fLhbiboHU+//xXvH//ns8++x5DHBinSVbQxubZZOvBR07Pj/ziF7/ihz/8oS4aE6bUvOLGCRcDg2GXOJmidXRBherlD9Nefp4PXf5U9hCTzUtat7qsyfK4lWreOAi3b5VlPkNtDHfjtcupNjhrrTGNo+iW5oK6WSBsecglZ8Y0koZoeCd0+7w+BJ6fn3h89y1vP/6Yw04VQGmV+/tX1NYVntQr3kNKkVayvF0QDFfRMLq5ojbZwl5wcM6ZUOShNcbE7d0NN4eba+U5X2Zh3UFWzn2biThlFW+V6Kao7r0IF66VZkNohcw4QndXxbViVQWBxeKvFybdXS/Hl0Pxz3/13ulV7qvLODLudvjzM7VaFW+FSV0zPkUSjsuawejDDnBBFglrLmIoVW1Yj5L8qI2lSFDobavHLc+6KtY2pUir6iacDzSnofJLQp2RIdxmUOjYfLS2joC2UYK1ZkqW1bkfBXGmlGT73jYhIAaZialXzfL5kjOTfX/vzqjpL88LpAaey0qphZ3h6nGQa7H3gTjIBj8MkwqtLu+p8zozxB0hqYP03iktDkeMicPuQMmaqw/TyHQYmcYjX3/9DeslgxME5HvHtShWpSnH6Vz3NV7FZbPK/rg78r4/k2vhbpjASw1d1kK1LsfFwLrMXBYzzjNWV61n7t7uUcsC61nfU1Z5e+VaWRehEjiLAEUzh1KrYC9zdk5DopTMfDkzTXtq8BTLHXfOSYQYIsGhyF0DcsqyEsxx2fvA7VGixjlnJgfguCyLnktMtLzy5Zdf8fT0yCs7g+fLzIcfvpUJZamseSWNA4ebG8qyyqliHJQXb5/Be09ZV3KuV8SGrvXukEVOMSFuySLuxIAGV6/fvOLu9S1lXnj3+MgHr9/o4Mwr027HYb0BXpwaN4+abNnYEW/trV0Q5gUVXeD2cOQ0TSyXE+d1ZfBBIpEUKetqN5k0BDGaEhO5NPrumNeF4+HI8XB8qbaagk4qUhe2Kpy9mqX4Jx9+aD8XGxZnqY8DeB+JBHLPdpFxzVjoKRB5oXs212XrHYJt8iSL6zgIpgsB7zuBjo+R0DupNmvpPLv9jVmOY+GlwgKdd0SDEdx3FLGbG20KgtiKJbHdpVt8CAopr6oAdRYEnk4X7uYZpsGouHB7d0d3ZmFSJUKc9ntdMsHjS6MHx2AY9VKKrMy9swuqc3p8ppSV+7t74jhYrK0EZ+tartXz5XJhiMr7nkzLUErVOsAU8q1Q7GAVxz8zhChoxb7nOoy2A7u1Sh/Ebuttg43+/y+G735tiuzdbkdeMyfvzWHWCbqx95p2IyXLkn3c7QRtlczGn+9d8OJA1GXRMs55yjLzeHom4Li7vWGaJoa9VNIlZ81MkrqCDTLoTu6vvXQYNphVVNrN/n7jvKcQ5E6K4Be5E3xnHlgKrjd2ux3F8o+vGo+NhdctwtZ7llxoOZNxpDRQjGkVY5RvWt/iVR3DMFDOmcvlQqmFo7thHEdubm6MVmlr16ChkjPrspKHytgmhmEyZ1Vx+JdFkBM94nwlRVEzh9Hz/U8+4umy8P7hiVrgzasPcR2++Oorcm82R3HXQb/qAROrOQ8RBW2tK+fziWk3yYV3TNRZsxTXX1h3KXq5FtD4te/d8WufHkjec8maJS7rzGXR0HbNhW/ff6vZQJVNxqZez7VwPl+4OrfWTDdvMEUmJM5Pj+RSubm5Ba855OPpwp/80Z/w0Scf8+btGzIdv5FqvKeUDF2+qUvOLItQhqDIHZrT+9rtd6QUSHHkeLgRVNcrzSOTQbR3a5PxKkFw0y8//5y74y0ffPwhea1XvYf3SeaNRXbuu2FHWTLn0xPLmtl5TzyvC7thED+9y1o69G74qQ07neP2Tgulobb7+Xzm7v5WEaFVfF3XnQWzdIi6IYNhwXevX3E+XcjrwjmLAXN7c0MaBwYva21nFVHpEHGMgw69OCQ+++zTKw7sujNrc93KQwwsrW1gO8M0Mk47epMR1uF4wMdBeGfjym12BOWUOyXlrWsmXwrTzjGmQbd4VyUhKllgXfI1Q8HHQLDhr+EYuNZZqtpVMaccD+/ekZeZm7tb5hAZfcC5QBh31M02IEpclC3bexgUa8lSeT6dWXPhzf0dazXn3VYZQmI/jtzdHolDAmcCO6My0l/a995V5fXgzMJY39ucuP1Yd7N5+tdamKYB7ycO+734/7VxmhcLUpfXU22CkHozVTGdtarriigzgw69VPKycsFxGEcGAu+fnggh8Ob1a4ZBMw2ZJ24mah3n9hJ4LQvruly1K//0G4IrayciaNM7zzROLNa+y5p7oeVOzSuPj8+ydYkvl5UsWTQj0HB6oGStSzdMTGtRGtooC/1gl0qIQZGxxqXHyaWTpiSzmF7+btGGZW2xERNak0uAd86Eottl2e2daIZQi6wjTqdnWm3c3d4ypkRzmxW04NwQHB2Pd3pPy3JhnhfGaeJqF21QbPeeXivfvHtHa0rzG4fRLGu4msy1ZkNtOrvdJMX1ONgz2CKBIYSRcdxZ3Wz1s5Oj77tvvyClHbvpyN3NnlY9p/MDx8M9h+NO4q/S6DXQvMV1mg6yG7Oo1BdhW62KQ01eF+pumpiXhc3KYxhsBlkrx5uJN68OlLLi48j93Ye8evuWh3fveX56JIXBfJ483ifmbB12N5vLUmit2MVs5pE5K7nTO+q68ng+kVxAXDBpPnLOxo6wbrWLrRn6d8KmnEgCa1Hx6lMgGKmh1CIK9zSQUqBVWf+oyNbRl7ziEdZ5xgHTYUdeVkJIvLl/Jbp6FquyY3OuJnsYFwO+O1n816JsoSDlevzm22/59ONP8F2+Q+Mw8PrtW06nE8HLq8g5R+j+equXvLKcnnGv7tnv92Zi5yiukZfMfJkZx4HDdAAfWNfCbrB8gsvMu3ffykvpsGcIgyiQveNiFA+9VlbqVX2bYqC7cDUbK3xHUdglSJOISX4sUR6+gLjFAaeHjuVPOAQRNQ1qO47oHSuy8UhOgsEetnkAlhGh7OsUBpZ1kUHZKDuR0qt1JdqoUxKcVUtR/q7zXOaFYWiQBp4fH7m9vVUamlOV2YPCWLzvtNIIAXbHPQ9Pz6yPT9zc3KIQ9s5XX/0ZzgU+/uQjDrdHnHeq6lqlZ0EOYQgKGNKatzAcwTelV3wT1LZ5OTXXoVdCUTez+TiV1qBLjLYuCzEmxmmCgMUjNtoq8ZLvsJqWAsQ5b6UT8RymSdnoTgfrkCzJz/DPYJ2ShrFq0c/nZ9IQaW1PXjOt1P/8S8I6CbrsMvbTjt1+x7IunM/n6yXQDTaY1wKtUivE2NkcYM/LhfPDMz4GXpm3UW0Ku8o4bu9u1VU1MaOaQUbNLjaa5zSfuJxXbm+OkAZCEyc/l5X9fk+jczqf8V7v4DjtiD4q46I2xiiYdpszNBuUrksmJl1+YxpZ6/ISqdn7n6Nv9q51272chEtVBemjGFolV/bjZO9IuSQfvX17hUCmabyypNjmHeaE4HH4NJISxDSpWzJ4WaZ7WNdnlT/FDE/VXq7zSl2fxOn3MvY4P3/LOO5I8ZbH9w+0nuVk4Bwb3H41TgX2447HeoLuGH0gp4F5XdntBi7zQl6KdSCVshZuj4FPPh6gn+k9knvnh3/1r7A/3PN0nundMa+Z1l+Chi6XmeAdqUu/9Pj0KNX4NOHM2VrK/aT3HwN3r9/gihlE1sJclW/+m7/1A8Y4WNFZDI70xCimZkyRec08Pjzw+vUbRbqWZjPKwtdff83p9MTHn3zC4XCg2HqKQ7zuK1cq56dnmnPsDztBv7FzvLkhDtGU4Zt3U6WbWaarigZY5wsxBG5ubkgx0Gon3h4PJPMKSk5slOgcw5Be8nLbJklXC3t3vGV32OvKqF3sHNfxLVDrhWAh7GurdngkCTOc8Ma7myPDNLEbd9TciGMywzRZdV+WxQbQVXTWwx5lYW9eNfIC8s5RUABMT9HYB1aFdrnTjoa/z1nhOZv7ZzB6K51rW5vnla+//pL9bgfB4YqM5YrzKBwu4oKThP5yoSHL7t47rkLz4LoyhbGNe9gduL25oVLMWmRgmVd+8tOf8v1f+zXevnmDy4Ypd7GC3DApstQJfnnz9jV0R2uFNUu3cjgcqVlGbjEE1lXzlhAia17pzpE6EO1/c4lvv/mG3W7icDwwBrGjPDJTXHJRXkJQoA3owupd3VH3qmZvDjfEEChUZSw4qKtM27zXYDGaIrg2DdDjdlHbbKJmXfOH3c6GoCo+NtaF3rFMztZ1oWVZLSt8KJPG4T/3ktBJossijYPgoGG4FhzK19b62I0jPSWWsgB2WffOftzBXrOylJK0Netq4jL5kLnWTJAnP/9KNXdTWZCfzxf+9Cd/yoeffMynH3+s9+FFGV+WCykOHKaJ6pUAWaq8tsKQ8L2Tu7Qz3myfSxUuuttNpr6Wo0Cahqs1Se+VUlYrBpRMJ1h42xNwcyNlbS5FFxxmPdNVme6PR4lMrfDrXmr+vGaRBZKe/TZLeQkBM6aMl9Nucp3H04l1KUYC0twDm6ltWfEQWJv0J7118rkrn3xK5PNsfm7SUPT6kuS4xf/u1sSyXpjrFgGg/TwMieflzLIs4DI3u5HPPjqw26m4cz5yPBz4w7/zLzJnKc6HYGaXBhf7oDnTuqzkvl7PFm+00egkaJSNl2Yn3c4dZxGrtRacMy+4rv3U0FwlhYgLgqN8UHcZTMPw/v07Xr95TfMB1wq5FKZpJEZZ+WS7ZDZtUbdirBe54PaSKbkRx0hZq1niWJnvHbU03r174DCN7A97swASM2uDwGqVNigOhz1z71Yp6wDqDoY0ErZKMGvYeRVueUdI0RS7RVd78ISAPGKQ6vVyfpbSLwC+XQU9u/2BiOP5fOJPf/5zfuuHP6SUrEr7zWstfvNY8rbrtyoTRFmsyCPId0dPUYyUvikiG9HJJ6fUxhgCDlnh0qJYGzanCC7oFm6dw+2R7hU6vz5lpt0kBKujdryLLuvxHHc7mvkngXQeyotQZXQ13grehDMaYNIgDgPf/7XvGV3y5fLEAl+C33xtOkvOjFGCv9wKuayUktnvD0zTPTmvgmKWWaK7u1sO005WAa2qIg+RdV14enwghMChQo+dvKzMWX5Ow5CUS2CDaydWLKE70YG72CS6yyq+d8jVWEYegg6U2mx4b3OQYdxgO+WUny7zVcxzO9xwPp9YUuS4O8j2uFWzB5AR3eFwwEV59pfeWEtm6i/Cvn/a8Hr7Cj4wDhO7acd+2imTu8gmLS8ry7JyvL3h/DBTeuFm2l0rq3A8UoyIMI6jGC6WMNZ7uzrnehdwvhOap8UItVJ6Z5oGPvv+r3F/cyN4rwrSDONAbRWouOgZvWxGclHaWUQzoeg9a620JpfcGCK5FIYYqMvK+8cz0Wke8a4V9uNohAj9nCkpnOrajZjjaQyj1MkBSlGQjXcRfGddMy4L1sB1inewsXgsz6M7ZSD34PFOkEjJyzUmsyFNRGpwPp+Vw+LAIVKG9C6daqwJaTHMQdZ5ei88nx6YgjRDOa/4FgldbgDdWHFtm6VMnnpuPD+f2R33jGO8aiyiT3h3Ztp5Pvt4x+7oSG6HiGKFDz76Nb73vd9iyZm6rlw67AYRZeZ1UVaDFR1ior1QtF2M4IUyV6Bm05j4zroWLpez5j7jcKVD+wDzfGFeFqb9hLMLtzbNTXop1Fa5fXWnZDjA+0bpldN8IqWR+zevwQxCe6sstTJMwzUnuwfHYb8nm91PB6NHS7PhUiCgwuS4MyFxEStUtuH56ie1lMzTwwMxpaSXhlrn2uThnnuhowq7py0RTLe4/ORVYXqvwG9avw5hXe34FBnB3E3BNc0mxAAqOBt4Pr5/x3K5sL+5Aeeuh0sz7C4OA+uyME6jYbdcrRh0eAd8N/FNEW7om0JpSn2JEE1emH0xmmmM8oAqpRCLGB8pJV7d3vHw9HjNF25FwkLRc4X1rlV03lg9LXTLqW14l3CtU3o1/URCHqky/VvLSu/iIb95+wGXy0W4v5kENpocPy2HoWECxyIO9vFwkDNnLvgoKuYyLyzzzBATo2HDLnhalq+Mm+SzNE0j8fu/frUMby3w7vGR0/Mzb968YTfcsyB32GVZrwd0c5CzqpYNU//m3TtijNxamMrVI8o89ZectW56x0dRYjfM/vZwVB56SEr3MujoMp9JaeT5fGI3jNze3l7Vv8s8sywr+327mhxqKf4zhthdcMdhv+P59KQNP6u4GX3j/Cw/rEPdy+wR5Eb8HUrppiWQQArh+1teBmLxCPJ01FaYUiI7QYy5dMZxZLfbs2kwipOjKTZzaAZDrFmCRoeYJeuyasCP8PYwKh50SJHoPIfDgf3hwHmeaesqu27rFCoSu6UYZVWPfrZPnmUVK2d01+HctejbqtJo2o3aG6fHJw7TRAvOLpxmAUkqznzvlv5WGL1/Cb/yDocXo7Hl62xA6nPzPbrucDb+EtvrrK2x9kr3sk/pRRddDOqs8EFOzl4OtCkk5mXleHNLT53S81XMNu0iv/7JKw5HObWmYSSmQs2B3/iNv8Jm7dN7ozRHv8wslwuXZWYaR6ZpMkW5EhZDsMTO1ihZHYFvTQU2UJtnzYuF+ARFDLfGEJMZkQp5AZFlnIeWlRNRirlqh4Afx6t+R35aO5wLtJI5nc48PT3hnePu/pYtF53er9Cjp5iXnDrdGD0+ymGh5MJuGDidz8znC59973uMZv64uSLXWlnnWcxCZ5BLKWpnvZcIBOcoRdzgIYTNHBFwYnq0Fy8d1/o1UH7TOjgf2E/y9pEDb8d5qbTLsuBS4v7VPb/zV/8qu6MMAcdRg95oLI2t/XJemDl+a+81DoOtyneYhSIOR3UNV2DJstCIDi4m2ooxsK6Zec6M44BD4SYeU0kFudg6L5HY5TJzvDlKhdi7krmaDqsSGwNJGgqvYJ/ajS7VGvhKpiMDZivNqVcGzRgjvTSIau02ZTd01lYVYuQjz0/PdExTEBQ845qje9gf9gzjKGzbKz40WSewVdutVAiO415t5Voqp8cHzUuSsVzoJJ/wwXMu56s6u5dOdmJU+SAqZzMc/7uZ0a6JZhud53YaFYWKuMAuBMqSmeII48aokXp3t9txOV+4rCu7+0nzEWCeZ6toHWVdeXh84Hg4crnM7A9Z/9s/44J4uSg8wzAwDiOXeDG/K7i7u+N4OFw36FLk3np1VbUuJcZAyVaUBO2JXDLTOJrRnirLVhskrd0epBua55Xn5+erK3IrhTgkepftx+3xBjws8yIIy6vjxeZmW/pbQ1Vs7JVusbkpJXZpYPEqKBSHClNMeB/MzLAb7GmHfMmc1xPe38mjzHuLCNYDi8MgqKko6yRgTqj1xZU1V+sc6dSiXJab49HWsXZlzx3Gxi9+9RNazXivTrK3rkOkdUvz4+qZ9N0X55yjZEEk0HUueLMTD4KqbPdTvfZFybOyz32iukpMjnrJ/NrbNxyOnsllYpgQgqSY3O9/7weUhgVzeVJXWJPrcLPb42w2Wc3AsPWXM7K1xlpmUgrkYB2F6/RcSD4y3WjgX0ohtM7SV2J3JB/oSfkyhQyrYPySRWLoXh15y4V5LVftjaosifhiDOx3k9biWgmpgkFkw27C9cZcMvPjheNRswjvHctcrmyvS2uM48Bxf8D3zsU84hzu6rw9jkprjL1YO1JFBwx4lrLKJbE2Wog077d3qwAc3NW0Tni1BsoNz7pqnpAsCyAGtc0vkZCOOI3mWhq5v3+lA83gmtmGjM57ed4PI94GpJj/vnPbpeWvQx1vlUzFLDQaTJsvU93gK21U+e2r0osh8vj8bNDIZC6Z4nxXXwQ5tcbl+VlQUoxXgzxvNF/XG8l7XG90rxrJD4kxBNbt8mqazYAoj9E7yw5Xp7Fh210fk2ApbylEnpwnBmGwa+2UvDLtRlwH5wM7q/ALwrE3Y8RLnnn88lsOxxuSD5Roaty8UkuXOHKaZGHdJCyja+jcvfyoSi+40tmN8piqc+bmeGSLm1TQzxZ802S7Qsf3ROpVhm3NQSxU70gOenO4nq36TdcM89watWVud7fXCy6avfZ8uXCeF0bLNvlnfrk//88hBOVBV4W3lCUrYAcHSV45ocq7v/dgyYkN6BJ1lcIwDsznmTVXjse98r1tSOxDYBhGI9BIUxNC5PY40BuW/+5smusIYeD56Vtcc9zc3Oi95kwK4t3vDju8E+V6S6nzZiUdjGW0rplcqhke6v1viY4bQ4wOwzApTrc1duOgtEiv2cymWPeucD6fOez3HI5HxBiMV9O+rWPrHWiF7rZ8eWOD1ca6nCVuTYkUd9A6v/r8T+lbt9IB59lS2LQxFTjUcXgnuGTj46nslManO0caItGwf5nTIcppB1zXBd5XQhK9nd54/Spx+2okuVlMruBwQddu8iPHm3t6tt9Tu0EzAz4WSq8kJ3FoBzlJGCxPDLgsWm0IiV4zzXm8GefJtj+QsyChlEaisdQAlmVWEVq3mV0wLzd1irU1TqcT67xw/8ErelURM7kIoROHSBpveXp44OtvvuI237DfH0V46BLIRi9/uWDIzjqvrHklBBW/YRg4HI54JHZ2TgW2smHkHjukAUbwyTtCB98dQxfbZ10Xvv3mW+Kg7IDtYmhoYKQqxDQIRrcM1op770lp4HQ68/Nf/YrT5azbsFouq3ekmNgMLDe13/bVDILwztSz4YWu6ZyCTjZHy9EO6+g8EVmTt5xhsx6wQ3dtlSVnqWHNkM97zzLPbGE2JVdayZRcaEUuoEMabXivtLP7V/emQm1sl7v+IxiqWNUm6q/S+2J0NrDy+B6sahFnv+SVtWSWUsm906KjJ2cJYRr0llLZ73fsDwcIXm1jCLSshXN+fmbtlbXpc7m45TF05suFd+8fzHpkg2n0uw+HHbubG0IMnJ5OXM4nWlNHGYbB/sZsnZraZEfji2++5KtvvrbNbCIyPPO88NU33zCfzsSQoFVOpwvPjye6F7//3btvuVxmUf28KshiVUpvjbou3Bzv1eE593JReE8ch2syYd+gz7/EVwiBaZqIY+JwUOZ1Mf8sc2BSxvs4yIbDOrmNVVSNKVRzlcmgDcAvy8KaV0qpXM4Xm+WYi2ip5Cx2XorWmXhlB/Su4uKDN294Oj3zy1/+AlAi4DCN1k3od+Ra2fKyRWHWz6pNF4FgrWy0U3+1BgHBRltMcBoSu3EwavhITIFxSqLxRs9+P9G7rUHhFuqOg8WDOulUvIM0DCgMSgc+HX784x/z888/N38iQV3z+Ykvv/wVV7sQMOO7zVNquyBs31sHtbkXtK7ZYu+Qc2aw9RC8IzppqBQG1Kh9ZYPPHI1WCnf7kddv9oSYGZLs4GN0poDvlHa5nldsQjIc1MrT6UQuhZYi1cgwS141j4rSTDjUfdRW9HeXqnAys+npwLIsfPXVN6Z5MAKCQWcOdWTeqXL3QRdy6Q3XZH+/Ox4wXjeTRfSWtTCfFsqykuLAzc0N07THe6E667LSqgrSw/GIogAy3zy8Z11ErR+tC8YYd8TAEAfovMD8ZslTS8NjzIcUog3XLHazGVsFDV2DU6Xs2tYSapCNHZK1Sn08TgPJe959+w1/9Md/xHJZmIZRSXenExruuKvz5vbzug1Z9tOeV3d3vL6/Y5oUSSkoyQ5bL4OtWhXm0ZroiRfzbnKbD01wVrgJI356eC93wzRaG+VJQ6LXzrjfs9/v5AwZPEtexAaInjgMDHGUpXmIhJg4P5/56osvOD2fiM7jog7S1sRy8sbk0OBToUEhGB1Qn1YukePu2pU4dNn5bvYWpRlOKrbUGJMZWouVU8qqfOooCX4Isih3rV31JMM08cnHH3M87Ek+Xu027u/uOOz37IwZczjsmHYHnOPq3ltrUYCLZXXPubCshR//6Ed88Wd/RpqiHYgrzRWca7pwLifWvHC6zNdKFWC5XPj6m6+5zDO1FAsE6tdWv7VGGvfcHo94OyQ3GmLvctKtVfTYsi5XiOQv/LJvC8YUc85xPB5w0VkuxQZfQkxihsk+WsXBNAw6zBzs93t2+4Oog11D4xQjX33zDb/8/FemwsUM85zBXYHzuvLtw3tRfwdBCuM0kkvhi2++VuedInhvwVrRDns5bF3DiL6zT1JKTNPIOCZC9GB6G9FfuTL9oEk1a+aW67oKOrXZjnce5yMffPgJx93uquDeYKjeCi446XC8u+ZxdLS3LqdnPv/Vr5hPZ77+4kvOJ1GWv/7mKx7ef3tlWLmmPe/pV2GkLoQGvV3JDRtTZyOEtF6Z85nTsuoMCh7noiEKWltrzqQhst9NJO8YfOd4GEg0Rqc/IKUoVrwz40garjdad+afZR1K9OpaRgkUaxNdfDft1Qmar5LzdoH0Dm5LhHP05hXMljVXur+/w3kV3vreLuv/4AlBGq+8LKyXC3leCU3PK4ZA8p7QdK7muu2XTjd3gzFF7g5HzTBs3qHZMNJWlCwBqY8cdhO7/WTnkC7l0iqtFmourDVf4bRrJobaNWKzgbPUuZW+Fg6TvJFAt/jT+cSrmxtCdy+COWdrt1vkZ1VlE7oj7RJv3n4gkd3dDdVwsJDSFaLa6LXNcZ0nbLRPnIZYrpq5ljOrAic/KN/gNF/49umJcTcx7UYeHh6YJj2IrQItJmrrvXO4vWEak7m3wjRKd9FMxVtK4XI6c3t7y2RBMcF5YvImTINsthDeq6LRAaasi5WF1golJHVmPjAMgmO2cHpQR1Ttpt+S5a45vlgQSVX1FvA0bNaBgkpabrgIPiV2u0nOpM4T7YB3zQ6TVuk5SxG9ZIIxQzYOvS5oXWghJnaWOdB9J+fGbrcXgcAO0RjEivirv/u7SkdDN1u1Id5+1HD86Xzhcp7xwZGCPKFarqQ08fbNW/bGqBCUVa8OpD5EUhqvAUSiO1f60rlcLux3By6XWWZl86qZgIt/Hlr6p305bICsC1ltvWe9ZJ6fnyilcn9/B8BSCksrXE4Lx90kn6og9lGMA8syW77HxBBVQNUurvu6FjGLfLPfU2mhioKcV06XM6/fvFaKGZq33d7dcXt3J68kr0gf5xyPD098+fWX3N3c0o4HphgZUpRWwlll3xspCb6qpUi/Gq3bcKjjv+q6+/XSi1FuAt4H1nIWNTwOdPSut86q1sblcpKnlQ/sdzvbU/1aiQbvuH/7ls9+/ftMKZE2tkxb+eM//k+Y1zPJ7L/1KoKJ/awwdCZQ68rr2L6vO2h+O8wEyTyfnwmD55B2aDdUXNtEnJpbtFIJvhOnTkgQxZMHuin5X6xRYnRc8sK4zrTazfNJM4l5vnCb7mh2MEcvzRRdhRuGLmCfxdl7CwFOp2em3WhCyE4ajDLcxX5yG90+q4BKlrPzzbt37KcdH759Qwg6G5zvV1+56IJEdF0WQM4IFL0qr3yLCu7o3WOwrsYC0jxpSF959/DAGBN3d3cU7429KoRmo/1qvejQirUpbWmKiZ4G1l4Zpom9V2U598bnv/qc/W9Lxu5w+NbxAbPB0CE/JeU+VLO+3e8nPv7sM8BzWQvH45HT+Qyt04IhjnYTuy6Nwab+XGbdutNuInZYuhZ6bOpCFjPHW/LK/mZP8on9fqfkugbdbxW7qq5tIDcb22O9nBl25t2ytZxGvyx09naA0RvPl5mvvvyCDz/+hJ2xq/aHA9U2CzhKXfHBMXaL5HRYFSaMtbmKa6qMSy2srTEEDYmWVnj//j3H3Y6bdKs/O5gdg3kvLaVeLUIqUtxOwyDoyaoC192L2lp1Em4cCHjyuhCmyeiVqjR8F4vkdDnTWmOMUc/BKKwphOtlA5DMm+rt2zckr6TAbeDVe2Ow0KJpetEwVFT5VN+UNsiLcylgbLQK5qNVc6EE8D4Rgt75fL7w9bffcLw5cn//GuhkU3T/pUCn/gI5AZyfn9kfjxyPR8EABl1W6/KambvVBo+nM847Xt/d47sa8ZgGgofSO2WWAvztq9dXuuE2D9q6lJY1w7m5OWje16qKgdbY7SZBGFWXS++NnLsU7c7x8PDA+Xzmg48/YuegtI7f4K9SyFnqZ++cugmV9+bxJHirNekh6KKubmp5F0UbD96S12onmANswFHKxRxoHfN8obXO7d0tOAkdS1lpNjT+4W/8Jmzzo1J5fnrPj3/2JzjkqCqkQRYiG7S4TR8cZllic8n+nffWq/lauaAogtOZ/aQ91wyGqzkzDgOjWbbfHnbsdhHvKj55gjO9A918tjzRBWprnB/f8/rtbzK39aqfOteCj5obtvZCnNkICmB7Oyi07HyZKWXV3GvYE2MxEkG/6hKuVjre0XNTrK1ZosjJAV7f3coWpW0mpcUuNJ1P0cv0sFuR0Eq7zhiSQ/HTZjjYrmgPuvCLrJOclyXR7WEvE80olli284Og7kO27VKS0xpxHIfrph2imB8NMLIEh/2e7/3691VJOlVADn3YpVccGhiXXK+zC3HcNSTObWHzoLmcnqn3d3hdy3ZQNKpD/kj2AR8eHhgNP11quybddd94Op25nC847/ngww/YDYNofSaa6qUZE8rjIxzSDgdXnHFKicu64GJgTEo4o4mNcntzZEjiwQs2kP3B4/OJu2VmPww8XS6sy8J+v2dKwrDX3NUmukClkYzSupiycllmqXSd7A+G4KzqdPRSWJeFZzq3pu517jsMgxCJsYny5zbOs7DC5uRfpfQ+0ZFL12LxKTAlr4F5mK60xtaMJhfUtZxOzzw9POJD4NXd7RUDpneqIWS+y0VWmhPH8zzTTCHtvSPGSQeEHYANCZJC8MoesGo2BW3wEIINxh2tB8OrGz3aQb11t71zPB4Yd7KUtrtcStNc/mJRnZajbRgNiX/zhz9kuVzorXH/5hXrZaGb+nyDB7a51evXrwALq9k6SCdTv1oUa7rMi2DRWq86HrkG61B2NnOLIQmPb9LaqAMr1NDYjeZs26DXyv6w53BzFNbdjZK+FpZ5UbjTNLKsEu8djkcNbU3IV6tYTX4TVVlX27dDw1tXXhrOBVJytheNyw/XNXhzPJLGJJzayQ5fTYBZS6AiwqG54XldSSHw1Vd/xi9/8VOCh9bd9UU0MBNIC0dig34MZjJITbRjwHfzQ9IvWHPm6enC7e0Rp3wDQXrNCfmo6mK8S+pgrGtQM2GMKJu1uNaYl5PWYCm4kAjec9zvtfdbtQ5LiAmoqF2NhaQpt7rM2orNJU1J3TtTGOkoqCyGYIQUdcWi1u5YswS24ziRUr0WzrW95FJsVmWdztqKhIDBU4w+PQwDuaibH5L2Y86CqJ2hQz68oCG9Vqbd/uphZkANNa+M0+6F9o32n/QW5tVja1SzBjs4s1XFh+MOZ9CCq7LLzr3x9PDM5XwipM0Gwz5Q6bLa7l1eR7Uy7SbuXr2y6MZOMOzr3eMDp9Ozmd8pX/rm9obj8UYHuJOnSUDxf+tlMThKN2xzzvzoRQVUKKxUopvXC85M34YBnyJ3d/eM4yimS9tsNyJjUqdQereM6UwcB374g99kvz9wySvPT096ia2xNg3CZUGh9jI4RaKW7dX2plsaz9rLFTpzW2Fil1UM6ZqTG71nvlx4eHg0g7+XoWjvml2sNfPw8KAOo0hfAdjAz+JP12JKVkz9qecYY+CSM+8eHkhp5O71K4Ol1usFKIWoiAWjQWPrskDVQPzrr77i6fFJ9iqtczaP+0aD2q4DfEIgBInBnFcGxFJWa3H1WbrBjht/QaE55gbqnLQ2wfP89MxlvnBeZnJZ/+ILYjuBEAyz2+3Y71RFOR+Yn2aenp7JRbbOyzLjer8qtHe7HaNZqINRP528qUovrKsyvZ2zwavXHEqzKOWUpzTgzcRQVfG2bhvLKkdZumDb0ptgKrPLF9QnPFrzHR122DscRtEgl6Uw5yzmVAwKMQpblG+//hwtt3AlXLSmuUOtVQe6czw/PfHtu29tRjSYbf4L60a0Z+lNRAtV4dhKwbVGDI4f/fSPOT+/o7t6jQ3oDcEFG9GDl7/p+qq2ar1ZB9u4Zqi7AM0FER9axXmn310b0UeGNOCjWJW9y4Zbz7xf5zRak3oHgcb59CySwbLSqlwD0jDiLR6393btpJ0TYUJzMmduEJrZHW6OjJOsekIITONIq4KUuhktbmhFTCOlNp6en+XcarY5zaFZQ1fOekpJlOPywrILLhi8qOc6DqOQRa+C77KummVuZ8WVXNOus7e86YzsMHn3/j3v3z/QQ7gWNM45UkgMKXE87IjF1NTedpMLwqKcHVjNSYE8xkitdgbT2YVI2Y9sfjEpRKoNW7GBY8krtVUO00EP83C8JtYVGrF5VeSTcPWO6KzjOJkPOngnCKl5qCVzuD0qL9k2rdSpQeKybXgjYTOPD4+kGLk5HoW3xaiDywbMySfzgXLXaig0+f4XWwDgGNMolol33N3dkoZELY2yzDCMjObkuSlwN5fWlAZqlNe9FKka/Ne+8fGVXXD3+jU4caCdczyfL5wvC7shUWrn4fGJ3W5itxvJeWVplXHccTG7iJubG6ZhssVhG9FwaIBsA6ltFlHMYXUYBNNtc5PewcfIfkj27qXsbLOq6ePxILaDd9zc3rA/HGitK0Zyv1fFm6uGhM7Tg5gVy2VmSAPDqHzuvnRWMvs04kI0mqgostJfNJlNNmhlVfVdKg8PD+wPR9K4M3uN71hk/wVf2wB+C9qpVW8+DcmMCy+8f//INA68efMG75WOOC+LVXujhsvVjCudJ07y629NNM3gA+csiGqMA9VcBrpzNC8my1YbDM7jh0gLXtBNiKaJaIC48d0orhss0VrX3+BUALXWaCVTe2OeF6iVu7t71pohZ6pzrOvMYdorsAjH49MTp9OFV6/uSUnwZ4heiY7ryq9+9Suen574/q//uuJTU7IeQLROnKP1SuyWFd2No2Sq8NYrP/7RH9mh666sps2w0YxCrgeXI2CpZ9b5dDYB2TAMhJS4XM7ARpOF07xyc9ix5JnWOtNNohll2weFLG0MIsCKuJfhVUPU7K/ffYkPMO0mFWMOfKv41q7wU7ULopR8dYzQpeGuA2sFealzSUkixof3z9zeHYlJlvebZUjwfqvOdd6Vld5EWAghvZCFQuCSizGjRptBqSC7dgDfYfl99dWXfPXV1/zOb/0Wm0tvTIHejH1qa88mqdd/Ly/K4DgeBL/XDbLszf6OSMytk5ym/FsGawpiDDWEyQUfdJN1CFEVxaVmpmGCzakSy4R1XVnLwXF7d49zJgoyQYprmwmfePXxmvImqXjv6mKc4YgBWGomuWg6A0E1W9B8LVkb1EnaTzFxSm/88ud/SgyR3/jBD8AyDIYhslS1zWOKpBqY14UUlSnhfCB4rFJRHOA8X+jecdwd6MMkSZyDNE2iCG9qc1so377/ltvjEZcST4+PTGngJh7kV1MlCgINsJMXc2S1rOn5cuF8uTAMA3EcKa0ymC9VzcJBh92e3TDw4ccfc356xnW5yFa6qOXdXdv63nUZ9lbJ9n7PlwvewTBOguaaLv84CBLJrRO9hphPT098+/U7Pv3e93h1d8dzPrPbHbQ5zF56CFEpa869VE3bkdA3VEAGYq7BsBsM0pIKv/tOlxoT3zPRJZ7OohMeLAq3bApWB6FrVnE6na5q6f/cr+8MtlNKTLuJy3wi18Lt/YHHx87lcsJ7z/F2z3HcWzeo4qeWwuPTe3qH73/2Gc1pXa7bHK5kwal0lnVlmsarArabLsM56UNch14y3TqBECY7aJEzQFcaYHEWBIOnuUBHHPq8riIL5EJeF11WKWleIhsBQZBVgr/zfKaUxjCOirWsjR7AJXBRClsfPMHbZZECbz78AG/uBt4H+lrpUb5hW+nfW6G5QLF/X5i94K2H91/zy89/gkveugfBOV2sCjb9CR19vt7gz02XtG5bDfyr/4N/jf/wP/j3+eMf/ZGYUUGBSmteWcrEPGd8aOz3E2U5E6k033FhxEdBaNqXxoRy2wxEDq/P777hvDyxZqeAo65gquYSPqgMb6XocI8R33Q4bxHE/XrmQadyOOxZ18zlsjKM8ntzTrM5to4/ryL4mHHqMO6oveh7nNhOAKVWUZTTwVLyJKLcum1n847WIXq4u7khdHnNiWorS/htVqVZ6dauG+3Xe+5evaK/f8/D4yMxJXyMFsMqk9DuHVFzCKVyzRaoPpiVhrcKvRb5iDTnOK+rfJnWrArWOQqS49OU4dpNS+GabJ03O18fvJS7XtP6Upq8jbratxgl5lspDN7TC2ZZu1UCgpHwEs3lmkU3602xnXhWa8e9CxxvbtkfDhx2O+YsKKXnRHRBnjI+KAM2RhqN5NRyBe+E+84Lj88X+va37ZElcO9MPgH6/ubt81m34UPkNM8cY6SXQnHqckqtplpXXGAKjrw2CHq5LVdV7EFRot0qlnEciT6wWISpc9Cq/Hr2dnHWKk/5rYUPwRnurcHm0iquFmpS4L3mGHLYdBFLH+qi0nlVW7477u9fczzest/vddiloMFf1TuIzuFMqHalzlUdGNsG3R1211wQGuzCXl43A9duYBgGQSOtsy4z5/OZjqDHcdrhHRwOB804TJzZTDfxl/3a7/asx5V5PoODb7/9ml99/ks+/egT3HS4YrhlyfJMigOln3h4fOawGylNF8fp+cw0TKLudg1E1zVzvlxk45ACtWRaLeCCbJjNdVPL9+Xm2gqrVpSeF0c5Lvetk0YzO4BhmtShwZUFGM1+oxYlvK3rylpXHt4/qMu8v2XcjcqecHDYHyzkXhfROApao+qy2o0jw+tX7HcHzTh8N8ioXXOyvbFrNKgWS6e2iqudP/qTf8zp9CTlOZ5cV1ozJTkbg+mFdqx74aXrdc6zrit/8Id/j3/5v/JfI+fOj3/8U0pbrPtu3N+9usYK7w5HHcS9sebMmPx3nq0Yilvi3Ua7Dch++93DF7x//zWH3QdyPG3mTuAtfMthtGnBWr11TvPMfjdhrFlaF114Xdarrfp+P+qANbzHd/mWOe8133Jynd50Os6FK+SvbleOv6WqS4tB3UdZF8GX9qxKLZQ1wzhyc7hhvxNMXG0P+aDBNDb/EcgjWxXNNaX9kdRAf2tA4Uo1Z8I4atDfbSOHEJRI1S26EoVfXPKKj579NFFKYV0XaheF83I+UcfNU0kARwCi041f8kprRd4yzkwpumYWzcuuOYVAiJ51bTTf1AKvNmCNjroWsTDsAea2Df+A1shVwSdTTBBVfVXE1vnhD36wTYJ0EI0T2YZuvcmkbXSeKQZK1Rjt4eG9YLEhyofeqojkgikkvZhJzrOu1fBTPeAedHC+ub+X4aHz7D76+CqF3xZBip5WOisyHUzO4B27TO9ubmRZfbngolK+Nitq1xUnyTgxhShZfjDlaTXGH2KqVBtAdsRqSYOw5WGaxLfvLwZ53dy39XjFXrvkzJQSfhjEz6bRc+PUMvc3N+RWuayrZjmbxxHqNGPyV2fc7ct3r4F5l0bEOylNW2vsUtIsZV1xKXLY7UQ+cB7fO3FIYpN1y302CuBmt/yX+jLYwHsVEcuy4vAM00CvMrakgCdfqde+O17d33O8PWiW5Dx3d69smHyRL5IdKtvh3zQtZtxNYo/lhbA/QCvkrHS+jUreqywVUhRhldzxFuzkB4MfujGYvFPOghP8F+wi/smP/wkPD0/8zb/510lTZG2eVzd34Bz7myNlkf9Y6yYYa52yzReawSAhMa8XHp+fGJNZjqCI1jU/k6KG98syM04TrsvPSJCgisxO5Y//8f+H4Ix42zplheU848LAOCY65TqXaLZOu9Gaetf5MAwT/43/9r/K2hx/5w//Hn////b3+fzLn+Cc5+ZwYLCcit4cw86U/s5zWRdubw5ofmdr20xAPZ3ujf7tddmcz8+cHr6lrhGI7KZJ7q6hyhnae+sYPHlVCNBuGvFerDEQc6k2U2U3MakcgUCnGWU++UChXAOZei6sIVDmmWXN7PcT43gQ5N2aHIGDvzK/lMjrCUneXr2p4/QuEEaDlEumOUfCKYLVBuY4j4+OVuXO23WEaS0jKM57uWrrBWBQVaKumbUWonNOISI2dG1Oxl25dT7/4guezs/cv3rN8MGHBOdYloXuHWkY1HY1VSQ1N9Z1YV5mhmHgzZAYQuR8mald8aS1NJ4eH5WwZC8a1Oo1GzCVIjiqV4W2B+evg2Wc4/nxkdP5zPGwZ9jvGUjyU7KKYRzTFQNMUQlhog468GLq5KZoxm1+07oO7myYo/InOsHDeDhIWAR4l4T122GgW7lbtxTMy78bbtuZ84UpJcYxsi4y83LIs35TD4/jiKvd8iz0DEue7FQTk+xyOXOqlZ3x0LVIRJOjiYW02TD7EOm+GjNG0MxSCkNQjGLtwidd19+yeW6FDZcJjujhkguPj49wc2S/2xNj4N3DA8+nM9TK3qC2bHTO0oWZDwSiS4InG7gYtIm8iRcbphDWs8w5az4SRQkOUT5XYbe7JvWV1inzigsR7wsuZzm43vg/Byf9xXeE1m0ad/QOn376Ka9f37Ou2TjnTdbnKUIBeuH+/o6bm4MOBWe1n2tczhdKWUhDpFQnBtYQr0ro8/lMip5lWXk+X/DeX/Uz2DN3dKIPsnTwnlIWw5MDD09PYpslZ9TQLg2M99fnkktlCJ5ht+PNMMinJ0aGlIylomqx14YfVYG21liXzFfffMNxv+fwyV7r28sAcFlWs/ew3JhlpbvGMGjQuazrdd3q8vKcLmeca3z7/it++We/kkmlc9YVrjw9L9zcJqBeh9POhKNazy/vaD6t/PP/hf8y3/v0+yxz5u2Hn/LX/sbv8ct/58c415jnmf1+R3cwHfakJGjw+fyeaxRr2PygKluSXQVct4jZLg8o+srXX3/O7/zOr7Gsjtw9h5iIXmgIbFDgir8WVd5QG3+dQ8aoIfNWLNZWNtNdeuuEoAJHhIRGGAfzcEp4m3fRJW4DiYAv64rrmnHUIh+raMPs7oIMWc0huHbBT0OQFVJLiSlFnJEeApEYZUffe2PyiUqlVVmEdEN8FBMshpsLiZYt+bBuVsKtafjkPfOyMj+fiMPA/fiG27s78yOXb1EYErtpIudMhat46PT0yOkyA53DdGC3G3n38N6MpPY039jt9zgv24mRJAfUrJjHbPht8KDO3Fmr261VVyzfOI5Mw0QKGiSm8B0KaKs8nc88P8um/PXtLSElYpIHe68dzL6gOcfiHVNKlFo4PT9by+Z4Pp1YzjO73Y6b4+Ga7dCd+bPoUbI5lTa7LEGtdwgOnJw885J5Pj3z6niDC4HH9++IIfDm9SvOpzPfnk7cv3pFjJG74+2Vlz0NE5XObPTdu5tbDcjsQs22uSLgXOByPuEC7IbxyhzKtZCXhfEwEY2hcZ5n0YZ3O3bDYAPDJjFj1+bOy8w0TYxJc5H1vHI+z9ze3Oj92QxmCBJIebfhnPLr2p5LtfnRZmDX4fp8cq08n56JITFN5uRb5SIazCQyek+vwsPLknHDhEMJbsXUw/9Ms7/vXhLOsZt2zJcL2ZnjqQtAxsfAclppIXA5nYhxYkhBXmA+0bvnclnwvtmh3q+W2t5tGHFgrY3cdElcLifu337Aq9c7YnAMPqJQHllRj8moveb/tHXxeM+dzbR6NyotgeyV9e2AvGRaK0w3B379s++pM7DLYxwnnBNltDtHTPJhKlX6iSlGPnzzmv3haPtOlv9DSnz49q3EWeb9NMYESRfsui5cloXzfOH2cAs4QkikkKg98+Mf/4h1fSb6Tm+evBTm00wwiHNTgG9MQIzhtHWzuXTGac8//1/8F3l6PpHSnurhb/7+3+Lf/fv/JrLHyHz1zbdM08h8+hYXHa1nQvccdsP1YHZhszHpW4VgX4LrtnCkh4eveX1/y9MFLmslRBUvtpxRRocgtd5FDy816yLtOnPoGN0ZY15uhaRYXBvzrWbpQ8YQLDdC52iMQZYbWfuiNulrYpKNy1qrdY06F7wRekrOuJioy4Xz6cy026lgNiSotEovjUahORmRjjYvbM26j1oZjMF3Ol+Id4lGJwUYD3v6umyiRIu7bBWGIFXmEPnszSszoqrm8eG5uzmqK2kNhydFMZJiDLx9+xHj+ZmcC8OoW/vmeGRMw/Wm2k/Tla+fgrBBF70ONKR4XJqw+RENsHOr5JoZQmC/P5ikPbCZjOUm2mEIheA8JWeWy5myrqI9biEaq4m3QpDFiFXgpVcW0yVUTJNQpY8PKdJKwzlhw0uVcCyMQa/foDQsSa9WWQN7PCkE8lrJZSXhrurPm+MNtRXW0vDDwAGuoqoYwwuf23yl7t+80VBrSNReRe0tCgShNYKfCFGH1eU8G+WycZh21CrRliJGobTC8/PpOtjaDVrszVTveI0n9scjY5RCvtZKDvDhB2+JMVFzZhNDhRhx1nJvMzHLnwFMwBN0QftW8TGyrhfyCsNu4nBz0Pow1tkGHeUifUBxzfyC7NIBfOvyTaqCb148l/6iW0JspmEYOJ2UCCdRkbFHYtasaKvYguP0+EyM2hObS2brleY6eblwPOyvYrTaDHJ1nvv7+2tBNIaBJS+UOot84brcBBC0RoP5cuLx+ZHduGecGmlM10hYFWiNEOQY0L3DD57gRmpV5bpcLjjXuTkeBQm3RvDmPmyMlY2tF2JgF7RP1rwIbqidwdTYgnKasrpjFwPHe4Zx5KM3byzr2ssOI3l2u1eUcuaXP/8ZCahe3eLlcmEthWG3N7eFSjfPp9bMZtvIFc6pc/nbf+fv8Vf/yl/nssyE6Gm58MlHn3J3/5r372Vh0mrn6f2TBv0tMgazsUgOnwTjaR1tDnHSW2z8KtfVFEYc777+3JThR2qCdTnz7pt3vPnwDcMw2lxP69J7uFwuPD+fRNgYhiuZxnunwvEK4Yr11oQ8svnNtbqyNE9swTqQ8Oco1l33CWnTcIGKCJ/MiqbjnNhvm5fV5bLwxz/+Md4Hbm9v+ODNG27v74ku0F25GvatlwtumrRfcyYX5c4PN5P2sX+hObdSuSwzq6x5glXtgk7EvnFM40Q1SKR1Yfzi2SqHNQ6JGoziZ0whN8BYR2IMnOaVcajcHA5snjGimZo/VKlUr+FQbzLNC3RqCCZV3wZZsMwza87Eww3hKv6xXAkn5eXWXpcm59Zx/Eh25eNolf6WPLXx1BXr6IMW7Ogj4XjUINY5mwuIBrbSeXj3Ducch+NRcwKzsOi1XiGojY89BE9zcoM9XZ5Z15WbwxHXHbkWnHfsp70xsrzw6K4WUlVWlVrZNus4qlpb8krJVXRfOxRrqSwhSzQ3TWJ2lUKpmf3uQO2FnDP7aXeluL559ZrzfCYOiZKNQ+5Fzyv2GdI4sPHZAVIIxBBYLitrqxqYhwClkMaR9flE72I/b8wwEB5qVBZ7pYqS7L2z3+/ZHyZ6aZzn2XB2/Z6NKbWY/uL+/h7vxaBay0o5N16bwv8vCh/67lfwQVWY5UsfDnucm8l5JaVELpn9bsfj6USeL5ReGfyoz+IctWa5v9bV5jCOaPYSm6CtJyu0HLx/er5SMnNvTJYC+e3De9Z15XuffXY10dzt93jfWctq7CZh3jbrpRZtCNcjmFbgfDmT0sA4Sa1bTY1uYxHpCGJgni9mcifPrWWZeT6fOe52MlvsneAF6bZSKKXRl67oXRB8KdyXyY+stTHQcSniQ+IXP/lT3n3zZwqUKpmlQ6nQe5AWw9uH0DjA7PVl8+Po10PvD/7w75FCYvV2WLXGhx9+xF/57b/B/+Mf/N+1viqUIrbTmBLRO2KC5MXSc71b0VlfoDq3nRsW7WqwzsO7P+PrX/2Uj77/ewzdcVk7p8uZu3LPMIoNFKxbxkgk0tlI9e4M3to6DdmMyFam1MyQJqO+qvhLSXqKb56eGHzk9u5WnagVvq2pCNS5Wwk9kuIgmvPlQkoyRw3GVFrWFefg1atX5DXz+PhoWTA7COn6Pn2MjPs9y/lMiIHn52d+/JOf4XH8rd//A6EgVxp/JDcp81PsRN1w4Yq5ldYIFo5SinDPNEQ2WMo5803vjegctQHBGZ6GZgItMV/OhLBTq9K78DrvqVkZuz5tm1oMmd47pXcdsK1DEDQif5OB/WEPQFllODZE2U2cl5Xjbsc0KCd2bt1S4CI+bUpTswBwztapI6SXcI3aGrk1BtN3gFxKZaWh77u5vcH7yH4cbMBcyLYZFdf6YlbXPaoUvXz/y5ptgURc9/jeLDPDrDtM26CWvGluALTSWVplP2r+s8wLT09PvHr9mhg90Y20qJjZuRSSl8iplEwtcpndUqZ6l4VG6pFxHBjGAed0QZ3nWRUajnfvHpjnZ968/ZDDfi8KXHCsS2ZZdAANMXHJmVQL4zTSWyempEPTFqXsThy+SezorSPxfuDueNRnDAHfZbvy/v07wHG8u+PN/b0Wq/fCctuWgZ3o1dS+Dr744gti9Nze/gU02P/M13bwCV/WAdCaiAJbZGlKiVaKqIDGaJNJm9ZRjAMxDoSgmcw2a3AxsV7OYhJNE4fSOK8LN6bQ792CZYaBUqsVYmLAHKaJy2XGO62F1pX7HjaRXlfYUcsrMShxMHgJtp5PM++e3hM++54w+25xw8EbDMr1c3rnOJ/P1HXFH4+mStaho6rWDqCaGVGmQO39alq3BgjVk5vD1QZ95h/94/+YNc/C+n3Ee6t48UTfSden7/X/2hUIQvkRjQ8//JSPPv6Iy7qwP+xY5pnTvPD6/p6/9jf/Fv/hP/wH5JqhCZIcUlSYVG+MRsV16ALqVjyG79Br3dbqoouE7ljnhS+//pIPvreyroKyv//970tpXl86kY2Ft7nrOud4enrmy6+/4uOPProqlZ3zLGtBXk3eZoeZb799R0qJm5s9h8NOYt7SZNudPMEgqmr6F8Bo2BvU1a55E5SCN3q8a4I9f+2zz1iXBedgGidCEIS+0VhrqeScSUNiHCeWWZfL/avX8qhqjpDGa+Ez+AEixFiJcuB8sUb23pnyz+7dKm+dzRzNw9VR1PaFnEsdSqpyOlR6V5BG8xID+SC/n+4kgNtyCHTIBnnoVChebXtwKlRb76QUDK4SPzxNI6Vk1mqJYUZD894zpmhOltJY+BDsg+sleKPLbY6SFG2KdV10aH5XeBM2uqmUs8k2bquFVrt52ZuNhc1EMPFUcjJqwzkOt7dM5uWjSlYc89bVJmNt5lwydSm4rtxvOgw+8XyeBVEYC6HSiOgyUnY0zOczD8ssu5Fx4u721v4m5Xcs6yqbZR+puRlFWQMyjOEwmmlg71Lbbgyoec0s6yKIcBiIYyLWzvPlTIhJ77s5LAWD7jRsdl5+U32bNYWArw1CYDlfeHp+5v5eKvxx2iuLxGCvZiy2YEPYL774gsPhyEcff8LmMPn4+EjvldvbW25ubv7SHcU0Tdzd3fP09EQ1P6/eHYebA73Iv2YcR9KQKDkzny86dDbhnhNcEcwCobSVKYki3Iw9FWMU/jwmUWKrGF+bsOzNq9fc3hV6bczzSoqBmJKIA0F6BxVgwYaf8fr3Nzo4wcDRmEjlcuZyOrHMZ6YxsZnkqpvoykXOmd4dLiY9L5tj9KZ3IscFVLA4x3EnuAzXTWAm+ErOzZ407CBUfvlnP+VHf/L/xcWGa8VcAKRid8Ex7gdeetItAlmHL7aHaq383b/zX+LTTz7Fh0irma++/oalFe7vb/jtH/wAeqDmmegih90eLDfe+w5BAk5FgUYN1X2kkxVsZJDWy+rQvkk0fvJP/hFvP/4B54vj9uaW+/ubqy2OcwHnPMPgzeabF/qug9Pzmfq2Xde4814wOlvxIWuOd+/eiwiz/zVcUeZnDJHcM3Tl32j+5cEH1pIZotb++fmZ87xwc3OUAj1qHSUfpE/zTt2HcyzLevWla1V0x2h6FO+cupLauLm94W/93t+ELolDafKDA4V8uTHhmqzyo0MDnsqLkq915Q8E59ntFErTwNo/gWa9NkpR4JCevAZBKoo7h8OR0+mZby4XxhC5ub01Fa6M4RovHj3eKTCnG2a/zBem3V4pUvZ9vWeoCopPXqZVzsnBdM2VkHRgVpQF0eiwFtIgZtRWFfj+skBKyZTeGFMihIPhis4WlKdQTQUtl1WCBqtrNY8eBMPMzwtff/UFH3z4kdmOeDDm0+V8odXK/tUrXXQ2hC+lkbcXFwKFznK+cD6fcNEzlYHd/kjujZ//7Ge8e//A3/i93+Xu9SvBQN0YW03RqBscELrjfDrJcG8a6OZr4xAFeBiSWFHouS8bPddgnMNux3G/w6fEWlagM89nxnF3ZXfQKqW8LELnPN5LJNW0oFA6cDdCkCqlDTYJzrGfJsRDl8Pqq/tXytfdBuF2IG8uscqa3mOgP/tp4u2rV3jT+VzL0s53T4J/6lfwgf1+z+VyYctHCWa3XJzsLRwSE6Y08tye6R2GYbjagKck2KYDCuqVBXQpin29rBeWvOKiRbvWTm/VMtU9rXuiH+ihX3+/NEpJa75Ucqm44jXktM8XY7TcaSOb2Ezx/uaGMQYOhxvZrwRBkq1VfEw2R3BXmNI7hzP32b6x95oGq2GI9CbXAZxjuYj+Oe32YgsBnYpHyvNf/uInnOcHXUi2Bi6XmbVkbo57UowE1ylGexXldRvsyvJ/OtzyB3/r93n/8MhlXnj79g1xiIL6KvwH/89/yOl0wkcVH4JGCniJYKkQJrvIW6EUCWcV3qS/2fqM6/53qPN+fPya0+kdh8Nn9N755a8+Z7ffsT8eoFY5LYsuwDKL5OCcI42Jjz95C07nRGud3XSQjqdkRdRa5/b2g3uGQe7Bl/MZb0FEJWd6jIS+hUd5vOsKQfIqOP0wsLP3Xey8bq0KOkQwofeOh4f3/Pif/IgPP/mETz/6WBYtQYP4lAZi0hC9LCvDODAMI3kRXByqGSE6rq4AzknPFi/rwuFwoKx1Wz/GP1DVGqLZRaOwb/np6BBtbhukwLo2/DZUqWqNdrsd65otPF7fd1UN2i9zTcHha1OO8jiODNPEsMV0OjQ4xhGGwFCTOhE72JyTbfhQFayillk4/GmVlP7Dt2+uBnfO1JJLln4jxmB+Q2rvSleIfLfWdckLvSwaMjWxdLz3luBX+Pmf/unVaiRGx26YWI3KdjzcMO32OPN6clGVdCMLsquV01y5i/qZ02Gvy43Gbn+Qk2iB+zdvGaeBcRSPe11mwn6vZVsLIUR2+x2xDjyfTrTWeDqfeT2NxCiJ/bLM0ntMYuqEKo8nbNE5PDk38npht99DyWKCeacNvxSGMXJzc6eLqVUOuz3RWZttkEWlmbpWgSbrurI/HuS/5KAHPechDsRBcbXSLvirc6oWqNZHzqKnfvTxxxwOB7rnWiQsy8JxumHLT/7LWnRscM/usDcYSxd6QYVDDFIgbwPV+1evqetqB4Hw5twbORd200g1d+IYoy45pIpOccAH5bO4IA1PphJdwDnLfMZxWVb24yD2HTD4wKbqra2p0iyiWoqyLD3RUitPz8/cHQ+klMRtb9UuMB3CtTa8K+A8Q3oBfbYBpcvF7PJNOQ3fQRY0E5vXVZ2mrRXFFGuPLeuFP/nRn6jbwMu0MmdqXdntI+M+ErxIIK6qc/C9k6l0A4PyuvL7v/fP8eu/9Ts8Pp15fHqi1spHH3zA+/cP/C/+V/9L/t3/y/8ZF5VFPwyiIffWab1yGPakAXXbesHkkhmiN7KI2fB3o1s46F0ISnSe5fLI/PzEb/7gIy7PFx6f39O7BKzd6RkUE9eVVhlNdR8c7HY7rV8XwYl96XH0YEy2ZsSaXC25t5MGXTprrZSS7UIXRX2DmjYkZwtXW6rmCqvN6MZJyZTBB+XqWKb6MO04n89Mu4lWGss6M447Njqvx8gmqHuJSQxM1y1/B0deF9ZamKbJCmTnucwXhnGUcKYWMxurlMtMnib246iX2TprLvioHN/BR0qWUrTWQveecr5ILNQksNnvdRDQYcnGIGqI3+xkFpZCoKyqMKJzpFFsE3pnydXMzTxDTfRe6I3rDKW1wDgMIrb1blSxIG5zEAtgLiuTbZAqVFd85ThYZkNmsLmL/NZ0+SU8YZq4zAvffvMN0zTx6v6eijqvYUiMceDnv/gZbz/8kP1ogitEVxtSYBcmzvMiKwzzaq/Wujs88+VsGorAEBLh9hbfISXBcjV0PvroLbnccTqdrwrZWi0TN2gAH0OCGLj1/ooti23T8a4RU2CeZ9JFxl0tOiIOnyItZ4Y48PzV13z7zTf85m/9gDiO0sT0zqvXb1iWmRgGPPLnyTRZgbh2rfaJUfnGG2PFtBl5WZH9d6SXSsuZtN9rhtmaZZm0a8hQrfVKbXXO8e279xyPB86nZ5zzvHr1mtYLz88L034US2ddr3bgf9GXDMyi/mMMLrkLdFwUFh08tCzDtOQDLQScOYB673j3/hv+9Gc/57d/57f1e6tol+C4PD1xOZ8Jd4HYZTsSGjjfIFdqhBQTy3nG5qH0qHUb8aIm2iGe4mDQh1x/Q1UKZMnqpKfdqH3XG4PlXbfacJ1rKFJt3To9U7jbM9jgUvoL+2br8h3mUtugeoleey90cyatIRBc5/Nf/IIv/+ynBO9Nndtktz9NtAli1EyK7qyDUMxxqwqqakQcgd/563+DNa+Mw8D3P/0ev/zlr/j7/8n/lf/w//UP+Sd/8ke4pA7u5nBDb1k72XV6y4SgIbYEvZ7SHEMP2us2Q6IJ+oxGqHBmjdMJ9Lbw9dc/Ibl/gfd15e7uXl2AreveZckevaj2Sik0CMsLOqqt4Kx/jingm5h9mjdF4v2dGHwdY3dVxhSIlh4nIeb2jlTEOOOn5FI4nWdizHgnlXyKKgh8CJR14dvHR24OB37zN35TxVqurK0yDhPeCW1wYBoRwdvrcoEYxWIckpFAZJXk7KKCTizrQjJubTVFtfx3AtP+cPUCcU6DomwK1x7UBcQkV8lhN12rHHHutficebJXo4hKBJQhDEyDqifvnFTP0eOMM9xzxyWHj45dsPxgOktrBP8d//wOiUTv2Wh1/TrE7r3y6vbmOrU3I2J6d+zHyZhN2M/WA/euyTLCVXzSQVJT5Hg8cjgexFH2Tu2tc7z68APCENkf9vJwaaLINhvGtyrBUx9Hcqtm0NVk4+08d5ZxXBuWFaIqvNnBuWUc9955fHpknydev35NK+JCpehxDVYqyXmG/cRoFWfpOsBpkJIVAeuCj06Ct9ZwVYw2oqyIC5U5Fw6DubkGMV9SGmTnoI8uYZ9p2Zz5efWqgXXOmehEAfZpIAxB0GHTZiO8RF/Ket5TesE3f8V8Ny8saRsUAPTu8Qu6udl+9NGHpCERDFMvJdPbqJ//l/gKMV7dZWOM19lSa41+jVfWenE2a7qsi9EeR8Zxx9sPPiD5QLAutRkUteYVgs25RrOgoajCpClz3WujllKY9sqh9t1RzP5iiEm0bN8Zg2YVZc2U1jU8tn0aQ1Qec/Uq3gx3b122DZspd2toLhVkkaHMgMbmtSWbhqIMCss/cED3jsnCo2S+J1g5xMQwen7xqx+zrGdiaNCg9YLrMEyRXKt1+0YY8Yjh16sC4qKwjY8//T5/9bd/l2Wt7KfIr371K/5n//P/KT/72U/Z7Q68fnNPXVc++ewTnt99y9NpsdteTMCcM7tph7taK6Muo2lTdWxmesUhX2ik3TW87/z0p/+Ed+++4u74Ac3Ler33zmleGEIyt+OqopjOYD9LiKvWf+uF2I2ZFzBhZTP4U1OZnIsMPp3DG+W9t2aW4oglucGtvZHXYnRmxZN6p0wTcNYNBv7088/55osv+P3f/31e3d9RW+OLr75mWWY+fPMWP430K9Qm/dRlvvDzzz/nMI68fv1aBJeuteUNqtRcOeN7VGC2YIvAbpgYQmI/Thx2E0PQ7VfNG184n6OshaVUllx4Op2kU8AxDKKIbVF7kl/Ua8UEjlwyyzLTqnQNtVbOq16874KXqlN7tKlMCdCT0VetWlcp5AneM/hExxNdZCmVECLTqGHiXAq/+vwLfvXLX/HtN+9ppXHJK5d5vf5OA3c5r6J4LiVzXlYb1jv2hx0e2SUEJ35yqxpi372647jb00oj2+DVd8/D4zOlNC45E23g9/T0yLv3Dyy5mMmbQR2lcF4XOXhiB2q352GLJjjP+4dHcJrflNpZcyV3Deadc5S18nSetQF6u34uHz2H2yO73Y5eu8E4ygDxHc7zyqtXr/nhb/4W+91Ey+XqVhlHYdfNLi8xAmR30O1Q3qzFjctA65ai5mVS4BoKcmpSHStESf9uCI4hJFzvLKbf2CiFKSVub+/Z7Xa8ff2aaZx4fHpkWReKfYZq3vrbaPQv9eUwhpCgvi3oSp2N2cdHT3BRlhsxWuANPD0/M6TEp598ImuSebG1rZnNtN8RXKCslfPzRXM7r2c1WO5J6Y04JPb7SWw8x5WSHW1GMQ4jydghrVTRS61i3mizGzxXStYzRYXG+XLi/fv3LHllNbx8+3yXy0Jel6s6vNpMovV+/e+DMXmGmBiPB8LmdKyXg6PzvDzys5/9ER3LdkBQXLPnm8yzCqSJkOW3IB4/Kp+mlMwf/sHf5u0HH4oJ1Br/m3/9f8vPf/mn3N3eMh4GzqcLb159yP/4v/8/YrfbmZDVnFC9+cH9ZwZRKtLqdUVsF0W1/SRzPmhdRIP59MCXn/+EOJqSGh3AsuAxhbvZsScvMkEumafTs0w4TTfhDSrMubHMmefnZzub3bUy972zLAuNFwtv+qYREjOrNiEorYvNF0JgnCbFyBrrSQmDjle3N7z96COL31W++nEvKHhZZkvBe3Hi1RyoctjvRLtOiWWeLWyrbZ6fRnToxJtpQk2XM6w2ST1rtEO6GDJbjGU33H4+X64D6t4bpUNErqbdNOmtd3xU4HwwBWeMAT/twTC5EGQgFn3U4NlEegHj7G6iOdQyxrj9/H79Pb0LMwzW9STD+Lde+uHhPe++/ZaQEq+jrClKrngvqKptFLO6+eV34cY4a8PFZsnIFrgWie/208QYB0AsBocWUKmd6I0QYHYPa5AW43hzyzKfVcm6QM4L337zjsfnJ3bTjrdvP1A3xsZ6EhOrucar1/fc1rZFHlOrxF+t6XOvtdGcjonSmsrJLpijO29DZAdNw8h1XTnsD8pqeHwC4KMP3qooAHYpEq0r8t6xrqsiFJtZqPTGEIbrJVFKwdlcqZd6rV16t03cZAMSYqA6J+GVk7tlCoGlrHjg+fmZx8cn3r59w83NrfjaZmt+YzkjGzyyBaO01swd+C97R7wMrGXXkQhV8NiyKCcC39GcuNF7Zbfb83w+k2smRQ35nPPUkslOFWsA9uNEsPWyUceTlyj0OnPpYqv58J1Iz9aI48SWXNdx1pV3i73V3DB4R3ONtmYLtOc6TK2lUGOkrIV3jw8spbDbTby6vbselusy45jwQcrvKQ2Kgw2By1worXJ7OIBT+BW2Btdl0bs1y/Jf/uwnfPH1L7mOgly9/rOz88Q72AJspLl2eq4GBw3DxO/+jT/UfCAE/p1/+//Ef/Tv/wOm/V7v9jJzOV34r/83/1v88Ld/h9dvPuDrr7/Y5BaaiZVMaYVhkvVH64XQnHQl4YW5aQ/+ei647axyncF1fvaT/5SPP/shz0tjvz8yjklBYq3ab1NXpFmBSBdTai8XlrlDFLPT36jUzlLqajVBpK8M3f6Obd6rg0xnr3cihhgbsodmKYLe/KD0Hmvv9Jz54O0HfPD2Azbb/s3y5+7ujnlZeffNN7x++4EubGu2xiHx+vaOEAPrfLmem3kVzRkjMfkQ8MEWGZ3rsJarOrCZG6Igh2g3aG2dOE3spz3Re/b7A0PwFrbSjX2jDxG9l4Npq/ynP/oRP/7pj8XXH0SXpIqJNAw2kEYQFvbQneFjvovSGpE6lNqhaMFvYmHlS+hvz7Wy1EwIkTdv3vIbP/gBv/7973P76hXjOHBzuOF4UI6z92ag16xydtHM6xZOlwvrWmm1M8aIi4KJDLiyxaKDVAFI2iJLFf+9dNlrJBeN/TAyjHu73JRdUR0MKYDn+tk351xVf7JZH4aRlJKxq7BFJ2ryMl9YlgvRbywJYY9byIwMAoU5+ihoIpkjJV3P7vT8zGVdyMZEWmvjcrqI1tstqN7cbl3jSqPVASc31Gw0QZ/in+syNq1KuQbC6B0pdyOK9QI4H0jTSBoGQtz8huqV4x+GiPfyAZO3vwgAl8vMZb68sJz+4lvCfMFkW3FjvkdaDzIerBuzLelZbQfBftxdleHjMOBM5LeJANVCqagZxwHfxbrzdsg4q9Q2qKfZhb6Wyrpm+/scOa+seSZGb4edKZSt+3NeAVcOsQxrFzxZgfF44Obujv0wKLb2SgF37PZ7clbnNMZBB7/97dM4sBsVZpOzPMZqLszzzFx0KW3C1T/6R/8JoM7asc0Iq8089J/GS3HZvvNynIPS4Dd/43f43q/9Opd54Sc/+RH/h3/r3wQXqLXzdDozTUf+pX/5v8o/97f/Lrl2fuM3fqhLxzI9tr1SWiNci0bILSsczF72i+vsS8dxzbjo4GLj55//mH/j3/hfc3p+Yr+byGumA5fLmT/76kvm5UJKkRS1px4fHyxvIhhVWL87WpG6zdhah+fTiVqL1leXqNWxxY46JvPC680cH7rwuY3c4Id07QitJREbrcn9dllXcyGocj8wIkWMge4c6zLrPRhhIsZEGAeenk/88Y9+zLvHB0vIS/Ln6p1oM8FICBLEqDRlzrK/AGt/eqf371g/e8/Om6y/miMhLxhy95C6Y62dx4dHwt09u/3E4CPrMjNE+bjk1R5YisKZLbwjhGBRkWLLeDCGhzoGb6sgRCG8KUrsU7ra7YA2Tu1bqlRnPwzsp4Gcq1VoQC/03MneQVfk41oLQ4iWK21VYIxSwIbAgOi3+I4PE42tg1IGtARgidwKy/nEWgo756nOX6uqbkNpB/RWScHz5u4V9f7VdSNtg6xqrKFW9JniFgZkdg2tFrzfcdgnyjbsR74+IQR8lKOkNArYBd4ISWljBP3dtVS8MUJ6t8MzONbTidP5xD2vGVNg7UoOdHL9oDtHbnqPQrUCy3wSnz5Fub56rmrm4DytydAxjsmMBlVRhxBk3Jdn9uOOu1d3Sjekc35+AiqvX7/CdSfvLK9ktFwaoRQG5OfU9u2ahvYXffkom+QtwKgUkTZy8JTV4bwd+lV0h4y6Itc7D0/P3OxlXb4fZTuzlkwxn6nmJNh03eOoVzWtB8tz39hbK8tZzLOjMdaqHQJxTLo81oXog3kidLbcFxAlVv9LMyjQU7vo3rvDjoCXcGw7Ir1cdcMwXUVhmifV62WeUpRjQGu6xAdROYeqXGbXG+/efcnPfvFPcE7eAM45ejHEwTXolv1hRZDgDgc2kKcBxfFX/tpfIzjPN4+P/B//7X+Lr7/8hmEYcb3yL/wL/yL/yr/y3+HDDz/gsqyMrvP6ww8IPtJY1N6rtGC9LJRpEFUdzSSUCLl1AC+zLh3cWidbVe6dZ7mc+X//o3/AF+8W/rX/4f+E/XggpUAaBtypyVTyqHvm+fmJP/nRj9jtdvyV3/4tahBeuBWctXXzexNFu9VGM5PE0lVQ4UQV762DExpSLX44RQUozWvGu84QAvNlZjGx5xYfjEMOxnZZhOA5Hm919jnwK2Bz4H3Y0xtWLKp7vzke+OjDt9ze3FyLyRgjRHmmOQexGX3Lt866VqI3O4QsClkuOmhjDKy9apO44frAQ4qikzZhfqF3fErk+Yl3337D8ebA0AdSjPzu7/4N/n+U/WevZlmanoldy233uuMiIiNNZflmu2E3m5whNCMBkjDzQRhpBEEC9Aso6Pfpm0aABkM2yTZT7ck2VZVZJjP8iWNes/deTh+eZ7+RBQjo6kOwG50m8pz37L3WY+77un3rcUViTcWqbjA6AjKlaoxf+VDdU8/Lr1KKeiBgSgVjKkGt59VaJbUUgvXYYEhZbuL9OOEwtH0raIG64BzE53CYJCMjl4JbbTAUWi8ZCdM4SvKcKkgCllOM4l4MgZIjM4bONlotV4K37LY7OTCQZWTJGWMKbW1w2mnNKVGotH1Lg5Gc5CoKK+M/pHRZb8+ft3eOw3gCYxlW6/NLYn2g6j4jzhNTFQnqNE08ffIEFxwuOEpaZscC/ksxyjx1GLjRQyJX0Zys+g4fGhpnmJNouhvvBfWuVa1ZvlfNkBhWazFexkxxmdZ9MOUZZwlOOtTGBKwpgiJGqmrnHL3rxRiUK9EUXCm0vfwuxnFSTpM7K6BC2wqOJWWa5p+wk0AURo0PZ+lq4zzZeZHYNoILSXkk5UKwwu7ph4GCYWfk/17CgQwie8XqOKAuiANhLZELphTyr1wQUV5wa/CNl0LImfOIylsvij/Q5ssq8wnI5Yzfh0w1DqsOOlEaStdTNDzqcDoxNA22aQT01rVSiOn3IfBK6faEBCvdkSlGx3sz6MHrGs+XX/xn9vs7fBAOGtrxogfN8i3rOF6/quSUYKjVstlueP7sY06nI7UWHh725AyN7/m//d//r/y3/+1/R4qR0zhx2D+yB9b9mtA2xNNJ1GIVjIO5qKhW8+OX3Zj9lfnjh04DRd8vC2WMI1RYhZ7/+G//Hb/9W/+c/+P/6X/g8f6Rvmvpnj+Xn1/HSU0IfPzRRzRtI1DAKhch2kE5nbrIPsGy2WwoOUv+RhaGWcrp3FEa9UnYoiO6UqkFOk1OzLlinCMY6VJLqvpMWmKVSUOvWBvnRIlqjaPvVyLDV9P0uUiWu4MmtHzyyaeUXCSdDynGD48jx/HIqhvw45zoGgne6axYyLM6qxftelSZa9V5t8zM5cWXw95gnMzcspFfS9MEdhc7oSXmQi5F3NDIGCPVTKuz7loKSk/XrGYZbVErNRed29XzKCc4T66ZWi2nNHM6HNmsVrL1r5VY4tno5ZxnPN5TK/SduInlKFeFlBH2yuDl5e8aiWE1wdDVDmsdznuBraXMqUTGeSYoLM2HBof54CgvlWIE8LfMzIMR41KqH1Di1VSSkcW3z+rCrpmaZCRTjQTZ+BAwrqgCRUiTj8cDcZz4+LPPyLEA0mVUKzJeHxqsNcQ5crHbYa0jTkn2Q7nIqMgHusFxPBxk5mwlQGmaJlDpZSnqGzANbePpwyA6+CzZEXmK6uVQ6ByGTllZIUgHmIs45k/TxDxGttsdXePIFVHKmQ9O5qWSLbUwNINgMYrsOOZ5JseZ5CxZO7vQted5finSjf5KmM0/9mUQ9ZY154NN5rkygpryiHEWq3+2KNAEpU4jiA2W3ycQS5EcYu0GD4eR3XqHBcY0qb8GfY7BF4NpAm3TSP4IIg4wRnESVQ6LKUXSnFmvexlrAFMVHIc3jsM8sgQ32SrqMW8EoV1Txuhi0hkUv4KqwCSi9ZtST9khLReciExsXjwFXjv/yI+/+DG2ihMbvRSlAFmuh+WGWE7lDwcTRXZm3/vBd/n2t76Lt56r7YUe7oZ/8//4N/zr/9W/ZhxHplkWr6v1mlIqNzdPGfqB9+MjVjljC2q85ATB62L9w/jLucCCyVi6ivNYsFb9XxK2Y72l6Vq+/PKnTLMwtCR6oDKnhEnSafXDwKeffnp20C/LfoPEPS8iO/G7LMvixMJ5K7WqKk/ou9UI8rvWqt4OhSBqXnwpmXmWnZCzjmJlKb8YaUF+t7UWUlTXfhays6lQcpRzDIkodcaQUyQ7h8dRqkBSrTGM4yxGypTpugbbtg3OepbnfYqL0kh+kTln4aM4K+Yza7+h5JAfJuXKFJPozq1V5QxcXd9wOp0kpF0vF5BDo3Ee1DB0nGZev3rFNMXz7mMJVAHBUMy5UqJ8LAVxe3tjmMeRn/zkp9w/PlCr6MljTKQaEbRuZbfZcX15eaYtFsoZJZxzwRkdgcVZuiJrsBXGSV6+NWNLKQABAABJREFUoW3xPohSy8hCqlfDn1M65LL8l73csjRfgkukuvDGnh8OU4Q3I39u5TDP1GqwQfTy+/2B4/GId/K9+GpIk1Abby5vePLkmQTUO4+znuk4nf0KQ9/p/FE6j1/+/Gf8/Oc/4/HuTi7+NmC9LK+2lztcaHj5+jUvX71iAaHlFDHeUnJmihMvXr1iP44450hG1DXGafVpFzhZ4XA8iotbVS3jPMrFHoIYF7PEqJpa2e8fmOMEZQFF6iGvL8uCsBYnslcPQyFnGXUtKWalyss2j9PZjPTrfrVNy9D1eqZ9mPeel+6IKicX6eqyVpIxy4I36/cOsF6tCF78L0Mr2OY0Tzwe9uI/0J/bV4OvhhgTp5MYl+I4iXomy8GQdGQktNmZGCcWwKF0GZpeqCbCEIL8mRoRWxVt7zTtcbvZEryMXJclf9EdRoXzDuY8S9eLwmlim4hYBDnx5vY1t29+ifcZZ2S5egau6hu2XBKmLmeFkJStKu0qjj/4g3/Nbrej1soUZ776xVf8H/77/57/5r/5r8kxYqg0vhXys3U0IXB584TVZisXlPvGRQTM08yvCF1rFQNkFnuwHLb1fGfVb3y3y1+LMeGd4eXrV8zjUcavzpOSKJIq5ZwbEXVhnpWcMM2zjLSl1WOaTpyOB1k2q/zbGqvBRJyVdTjtIqyoJsFwOh5k56EjzFILQ78ieBlB1pyJ88zpdBRhyaKM0vOylErSqYoc07oLKxWjXXjOmWkaOZ0O7B/2vHv3lnmeWQ89282GJ88/knAwi9xw1sq81VvoQit5CGfppTk/cCAVruSuSoh4ofL4uJcZmTPc391z9/4Waw0X2y2rvsdUWcylORHHqAeBVDM5Zd68fcv+uD8/oGfFR60K2rMYLw5SsrTUp3lm1Q/88IffY71ZkZPkOpxOJ5ogWQm1JooR443xTjqQJDGU6AvWhIZUDc5KUEdO4h04no7M8ywLyVqZ00TrPOt+EDdtaLDOM+XKlGRUNEU5IL23SE6SPBxGPz/vHRkJV4o6n21DI6HzRtzE3tvzbDBnlTsuDllVAg1tiykyoqsxs398ZDwccUVekyYENqsVxgdizbx6+YKf/uQLkeQBNUul75yj61quLi/46PlzhvUKUw2zzs83w0DfdnjfUKlMSWTDS063MeJlMcYITqQL4gvJkXGceP3qBafphCmIgsYICXdOidZLUl5KEVMKZBmNWDw1y14mhEaCe04TcY6Uamn6lhBaDocTUY10i6Jjke392l9G5vpt29J3PcNqOOvEvXfSJSU5FIwXs5pU5bKYTimpxFuzrMOiqS+s+xUELyNcUYmoWkhVgrnoQl5AmtXKpWCUKpqRIqpfDcJaMvrf04PZWysdiNNISmslT74WGZ/GBNWIqirJ95c0UyUo9jw4rwIAznN74LwXbPRAG8eZ4zThPfzDj/+a/ekeYyGZKmrBms6XwyJyQA/f5ddhDBjnqMlyfXHFD3/4m8xzIubCj/7sz/A28H/5H/7PzKcjc0yyJ0wzx8NeLqsqDudnT59L91a0+kLcw+MUhalml78ue7apStLg2edRyocFsNFIZGScPU4TVHj9+mtevXnD4XhgHKezRNpgmVRltkAesZa0LOcVwyEzJ4NVy0CtyleC88WMkS5lPAolwSDdjDGV0zTKs2Ila7oJLT5IIZaNBBfJriPLOaCEjJIzh4c9r16/ZP+wl7PHKPbFoDtJWSr+7Be/4KuvvmaaIqvNmtVK+WdBnt/xcBIhkSQsyaFF1dCTKi1u37bU+oGfI4A6WcRap2qVkulCYHhyI0vG46RhNQFbJR0upcSc03nz7pqGVWh0XCW8oI8/+YS+Uy+Cum+XEYsQJaUKzwXarqVtmzPrp2l2xJgEyFdhGNbYari9f6ALDav1IIHzKVNUFVMS8jM7mY9672mcoKJjLXhnubm8IQSn7aOhDWrqy3B7+w7fNvTrAes9IJVYRsRgRZoKjtMIxrBaCRr88XDk/u6WeRrp+jVPrq4+APBQvId1XGy38lkXkdBihLiZDSIz1hGH8yICkCCmhoKAv4wJ4mSvhe9/9/tc764pKuPMOopj2fOUxOk00g29zKeRnzFnWWR7LE+uLynOSoXnHDlmghduzuHxIAXD0EPJPOwP7A8HVsPA0PWUkskkcetbR/CBmEe6phEJss5JFx16NcIe6sJKFUAZMx2ZponQ9HjvefPmHSVlNt/7nJTS2Qg3nmRvsShL/rGvRUUmi34nXWiRttognVhOSbwxwGGeOB6PXF1dSEWcsvgT9PdWkIN8aUVaH4itLFC9bSiaXphypgkenOZfW3WtKxKjqK695oJxRrAwQDSVHCOtb3ChIedIsJ6cE1mEaqSSdVdi9FKOGBckSMY0HE8n3v3yPf2q58nu4tyxex8o6UOqo9H5t7eO3XZLionXb1/wt3//l1gP2ZYzirvUqgt38yufrzHq9dG+zFrDTOJ7P/hNLi+vOR4m5sMj/+Hf/zH/z3/zb7i8vJTYXr1lfvGLr3h8eOAP/sXvi7jBN3zy6af82Z8gKgrNxzVWR44p4nzg7AtIlXiaaFeNXrxZ9xQZg/yMsriWzrSmRDaGtulY9S1v3ryh8Q1t1xK8ZY4z43ii2V2ecTyNFnHtMDCNIzkXGi+GSEGcCIallkqxRrxhRi4FUyXLZZzkuS3zTHU9V1c3SoC155FVSXK5WCzWWdrtVooiw68If9q24f7+Tvw9N9d6Gcr32oaWQmU8Ttzf3mKdnDXGbOi7lmmeOO33pCxJmAbwRR8o2TOIounheOC0P+K2VoJWdLaezy+UFXmWBUrlFCO9s5hSuX3zks3ukpsnT+QDz1niTkODa4RtElpZ3pZcOJxOhMbz5PqaOScxmakscAntqOoyddbimwaj38fyEghSWWeq1uKt4Rc//wVv3r7hanfB59/+rqhYKOcqzTnHmAQaGAKCVfCO4A02i6rKBlGkLCqXYsCUql1MZN4/8q3VIAd1lIETOXKaIoPz8uB+Y/aIsbx985oXL14C8K3PV3RDLy9mks5qUTdVNdF4JyyslNS0OCeG9cBmvdYW1soc1kmU45KtUVNiSolRUcub7QaMoemCjNiwYm4qMgbb7S6kPZ+msxvXOU+ME6cY2fgNwbfKApK3X+JrLY+Pe37+85/x7c+/zWa7pVa4vroitA0LPjn4RruCiZO28V3XLsJXnLEMQ38e86SUyIwSu2mR/JJcyGmkVnEA/+Ll1+wuL3jy5IpxPOKslSzmNEiwxa/xZYyRsVBK7FU54maRwHrrKN7hkjwHU8o467i4uEAQ41VS6oxo5quBlObz3gvtLMbxyP4wMvQt6MVf9F2rhnPrHxTUVtQI8+71G+4e7littnz09AmN685+lJRlrDSeJlGrOSd52dZiqqENve7e8nn+fAIlh6q4YRxpQ8vQ98Sc8AR8K2j7mCWLYZ5HVpsNuRhinvjbv/0RD/evwEzUkrEusOQ9LxfE+eBFcp6FXlzOC31H4Pd/719KsWIqLx/e8/t/8Ht89/vf4/bhTkCKWulvdhtxvxswGPKceXL9Ea4N5CgBW8tOggpzKnRVoH65ysUt+8KMN5J1bbQjWL5lU8EZRyWB87S28vHTGy4vLrG25fb2jlQrq9WK8XDg9u6e1WZLjTPWaBiYjrIrUlzNiyncyIQm5cTheCAET9v3eCv0itCv6I2Y56hIrHKpOK9eiXkE70hZ9sXeiez7m2M2W/WCM6J6a7uWSwWKllKYponX796Q58RHH32EdZ71dsO//C//SznT54miuSU5S2fYhUDX9qSc8DEnQmipGkxhgqVxgdQGTjmy7RplFotXIWuLZpaqwULnBdMbWs84RV7/9KfsLraYogYga/HeqSbYMo8zKHpgPfSMMZ61/w5ZJi5u7UWWaqxqz61I9jCqE89ZmPlVTC/FGWqSuL+LiwtSLrx7f8fHHz0jlQQZkpFfaAiemGR8lWqVitwvUl9RPcy5EqyQZeM84kOLx3Fzc8PxeODh4UGS9XTWH2o4V8UmGFZ9L0EhOatGfcVuc0nbBZ5dP2U8zjjANY5cxPFojSHlzMPxwKrr6bqGisd5L4e0UxdukTwCq4ocSqGo5NRh6UJzjvbs+06ot0rPrWSMERlsRWbzeY7U2opRzMkF3HQtx/3+3CobY0mzjLdWa7nkLq8vORwPQMU3jk27kapYncpxihyzGHa8ehPWq0GNZPoiUc/Lfvm/xfVekO7Wgo6ZGkwxXF5uyeVjxvFETFFiNJuGXAp3Dw9c2C1N84/HmhpjaJpGO4pC07YMtQiG3KiXITjKVDAaeO+DV7WaJThRAhojJsuUPpiuZGwFPrS0reRBON0VVFU5WW/Z3z/wxZc/4/PPv8WVVn6mwHqzI6XC/vDAu3eWi+srSkq0fQex8HA6QimEdqVjAQfVcDzJDrANUglaK0jqFCcILcMwcLFeC9bfO8Zx4u7+gVolN8VrwNjDfs/D3R2hHzieIq9f/Yw/+5s/pZQZF2SEUkumFMOylDgDXlk8Cl6FEfLPx2TYXV5zc3XD8SSjy+9863O+9dknvL1/R+MaQtOQovy9J9dPuLm6kWhPD6YaPvv4E4Z+xX3eiwpWP2/vgJT1ktY43rO66ay1Ekd9NSiyWv+aJdfKZjXw6efP+eIXP+Wv//ov+d3f+X2MkfCr4D3NxY5B4Zun45Hj6YR3WzC6sLYGQyDOk7ynzkkUr4GhE8NizUlyqudZ3qfgabzXsKiiJjnthBrxCn391S95/fYdv/vbvyXcN4fuE0baJtB2PQZ3ljsPqxU5J46Hgzzbc2J/PPLzr74ipcj1xSUfPf8IXyrZWFKWdMPxdOTy8hIfgmZyJ3zOlZrn84Mxp5kaDJ0d5KCzTsYd1ipQyoKVyooCU4znyM1qLR9/9hnxpz9hHiNNIyEZRpeRFhmTnKYZ33Y8nPY0TcAZIBd8CExWgturHuJOzWtt05BSOcu4UpLvpebMnLOoeqxlHmVRur285GloyYgqYxxnqhEH+NJen9HlWMgSquIQ7XgqEjEZgizrnClY24vGO8mhZqzhl199RYqRZx89Y2d3GKzA4XQbdooTVauNUgq73ZaLyyse7u64u7+j6VqcsXRWzDLTNAqVtxSJocyZOfes+47NZo3f6tgtRTHXVENTZa7tvQgQcgTfyFIs5aQBT5ZTlIdQEOJ8WNKWSrFFsRGeEjPWyMWPkfyFZXzjncAFT6cD291OzYeW73z726SSFD0czkC1koSAWpMYEptVi/MyfonzeM7ktVUQ0LVUvAv0vQTdLP6FYbWi1cTCUgouO55e3VCtdKSn08R2k0TiV+v5Iv11vmoVoNpuu5U9VBJY4H6/5/B4oNZC1zbgPG9v38m4s5EdTUF2R8sObWGdOS/y7lrEyVraRkaiup+ySfwPnelYr9caDevPIyacFF2ffPKcx/2WN2/fkN68pe9aSoyyu6hFfzfy3yxUKJXQOnKcycZKYVESaYqaVSGeilFHlMse5eryUh3IorhzTnwbXSeYne1QeZEOHB7f4xSPY9AsmlKISR3VVVDXYpkUdM5CTTPWUVLhN37zt1ntLimpkHLGB0l8u7q44Az0tpYSI0ZHj+cdJZV+u6EdVtgHMaAKAl38JTHls/N+uSBSLviS8VaqcFMzxVhcReX1UpRUZ4hlptbEeuj48z/7E37vn/8LhSV2OOexTiStj/ePTJNIc4fVir7vzzuCKSVMELPcdDpB24oqTjuO43GkayvjONH33XlcVYsUgIV6Hp8uz+cYE/3QYbwwp0iyUDfG8NVXL/j4k+e0Tbc80Nze3TEejtzcXBF8YLfbEdSwl7P4LKSoSRivmT/WsLu4+CCucQ7nLL4LQaIAFb2RZgnssRXG/YncDXLZ6hI7IyoPjFH0tvxac87UaeJis6H5/g/lMK2FoNgDgz783rJdrbDG8OMf/4RpHvn+D3/I0HlSEf4TTsYl1fhzUp7EjmpgeFocpvILifMs88Ym8P7uXqrRlPDXVxK40jVS8dR6Tspbgudz1qU8Fdv1jKcTtULftsSaCEl02FhD0BXXIY0cjycuLi/49JNPgSI4E9kjnl3kc5aFfq0F3+6Yc5RZX9fTdh2Pj49sm61I2krCVUvX9dIitxZjduQUdRSIjJDUn1CyjHEc4ryuSJqdc1Y8G9YyTzOn6YBZrZFfYdZvUKo9bzhTPZe5ekmZu/d34kq/2BGso+laOfRLxngh2z558ozQir/AWEueZ9AZ/tItzKP4SfqhxzYBV+X3FlzDFGce7x9Yr9ZsNhtiynh1v9fqzyMX77WLrIsJsgKieErVUWf5uZrGcjwcaNtOOohcF8XjP/plvvHfcTree/fuHeNxYhz3IlENXg58NTSiuRql6DinVr1IBWpnjaj8fPDk/AFV45BxU8wSEZoaYQRdXd1IAmGpPB7FOZ9SJDethBbFxCnNOGPx3hJcIGngrUlSkUqhEAh4Xrx5x267Y7PbCEzPSW7xYTyyCh2hkWJojvGcxmeM5EDEGNkMgyoMlXrgIn/7d3+GsRGvShqrVfhUixaCqixaVJzfWFgvv7ph2PJf/av/NevhgsPxJL9j50hJqm2qkcCknOUZqJCizOatE5GN84GL3SVv3vxSPm/95xDvr8h1raQ8Lgq1nDPVLaNzJBnP6/drjFwMbUPbGPb7Wy43F/z93/4NX//i56wubnC5kvJMqJ5RY0JrKaxWa3Gs66K/loItBecDzlmmOjJNI8OwUrFOVluBoF2ck0yIWgEnSHOD4P3THGnUBPzJs6e0/aBLcPE0LNnvP/7JP3D38MBv/OD7dL1Qt4e2ZdV1soNUU64x8PHzT+Qy0zGwMUYYT1bIwouy0KsjHGOxNoh1WxhDGV+h94HOOa6vLvFWliRGW2fHB28DQBOW7X09B+q0rTo5s2iLHx/3nEax9JtSNWUuc/P0BqwlKq/ealVmMcxzIlexod8/7nn//j2pFknqclY1vzJr9c6x22xI08T93R1vXr8CJFKzbzsWg5hDyZsL4K4WwfUGGeXUXPBBKkRDlTmljv+Kvkwow+jh/p5SKrvLHRe7nQTXG5kjplpFUVULgypxFhqrzKAnulXH9fWFVMokQtfQb3uaxvN4PHA8HfDe0K17+qEnlUysVSSMLEYqg/cGnMDqUplJaZZQFgq4ShPas2Kl69qzUg1gnhOHxz3H0yi7JrQDsvL5mlIZo7Bg5BriGxGtAhOUP2c+q3v6phHsQFUKqRUJpgQtJc06F+nwZreh6RtO80iuiVIXV7gYOEUkUrAWpbyqP0NhfsE5qikcDgfAM8aZ0+l07j5+nQsCPdTapqVTKWxW/tR63XNzc8PV1TU5Feacuby4YL0WOjJFdSp6uTn3wR+wuN9TTPL9mnp+wRcj5rAWPlExlb4VJPjhcKIsZk3f4JDl97e/9S0++/hbXO8EJtmGRokERj8zicoVIUJm6DtCq/DIxWHrAxbJ2nbOgVXsd5YC4HQaGU9HVl13RsJInoHjZz//Ma9f/hK/pFHq/8wxS1zteUYut4NRR/559IanZHj28Wd8/MnnTFPGOWgaf1b9GSMeB+eUaZWEwzTnyGk8UvKy/LZcXl5jkkJAjaJsinzuOUWcMfqeiGR++b3IFEG9Ilm5bKYK1ToYrq8vmE8nGiCdTvzhH/1bVkOjcnAZt59OB6qBq8srdtudFptJD98qaqcsxXbTdQzDmqYRUUxWFE3TBJquw3/jz7WqTCs10zTtByGJtbi2JSVFb1A/5JI7y0fPnn9IRNSANescTd/p52PYrjc8e/pMTzaoVuTSy9ienDmejjJOVbFRqvKzeFM1c0DNNWlO+CDmOnJmGidsGzDWMsVMTBNt24nCq6ISK/MBiaFtjLTA4q40XlAXtUh2QM6ZX774mhwj3/78c7a73XkEVGrl9du3xBh5+vQJ3si/l3NijLNIPxE3onGWzWotCF3V6l9fXbJarej6XrwYephl9WRURLIZY6ZdlDajmNe6rsMaw32cmUuSF7cILjdjCCox3K5WNKGhaeTDlMWdLow1cMiZgLVyIzeNuMJd8PhSzgdgLnA4PDCNJzKVz771CdXA3f0tfdefjYgSgpNxzogBLUPjghqtdF9kJWAEIJRA0zTISDScw4McXqSFWQ7hFCNvXr/mydOnDLZnnEbiPHF9c6WKJFFUWHXtFuDh8YFaK8MwENqGeZ65v31PBbYXO2rjVQdu2PaiToo5CxCywjSO+KdCG45O2+k8Y5oG487rxA9yzep+5dmw+nIYBLznnGO1Gsh5plYJ3pmmkRAcbe3O/+4/9mWdpW0blcM2bNdrEVW4pVOT8ZDOJgiN5+FwoG3FsX04nCTFcWFlIfG78zxzPBxlPq7PqHMirV3+mima8mcMp9OeYbXW7GYZ8B9OI10TqBbGNNPYRl3i+l4Zg6kC43NG8OFusyHqpex09OaMk0MzS9jNIkYRb0QQZzmN7GJUTegsnOYj/8uf/RGpTpp7oBiQKpklRlaE+vcAHBIWkKnGS8dhKqlY/ovf+n2c7Znm0/k9MLaSouA9pAMR5tXSGcRpIqeKbzSYzHkuthfgG6jTuQgyRkZMoqIURY9RW3rBClK9Lqq+RMnCF/tgtyhcbje8e7fn4XFPt9nwox/9Kf/7/91/R9tv1JtjWK/XXGy2xDkSYyKejvSapBmL7ENTiszTSGhF3p5SZh5HWaI7w2F/1DPDsNps6EIglypVvxOydZpncir41hOniXmONEEyfNq+o1pH27b8s9/+TfI807SdGJ+RcycYR1J+VdJur8wzPsgO8xzAlCuH45HtZiPZ21l+BpQObGupKsszpJgEU6tV336aGReEt7UC5XOC2GidZ86FedYsWGe/8c8FXVZLhb7qW1n2qRyuWtFL3z084NpGM6xlsfT+/p75dKLvOq16DcNqhTGG8bBnPBwIBihZQoGCZAHknNlttjy7ecJus8FZx6x00DlLvkE1MOcMWTKDi+qGS0mMGl5fa8H4QLDh/Es01khuQa26uHRs1oMspKK4pH21eGFCCwpZL8+l63JOKmpzDqYXN7i1ENPEfv/AdJpwxnFzfcP24kIuxv1RxkhWJLIBJ//Pe2xw0m10coBIDvISMaoxqVNciiiqhbZvdO4tFfCTp09Zr9ekLOMi1zSMx/Gcg933HUEvW6nWZF4Zo3QP43FkPJ047B+Zx5l0DnMRiBlqimwbCQdyznIcT8xVEtpKyTI/No6YMlETwGqR/Ib9/kEOP52bmuWCzfLcees0DEbkfyklUVCdTmfvxD/6pWfbstfpuo6m6yhlloUpmheRMvM08erVS+5vb7m6vKTvV5rE6BV0Z/WjqudnBycmVECQMxi9IAw5xvO3Idyd9dmgab3nMI7c3r1nXrDu1oKTl9t4x/54xFrH4XDg9at3HA4jsSR517wSFHLSTPgoRiwrO7dYqnZv0nm1Tct63alIxGM0x+InP/lrXrz4CT5UjPH6nIsKJmdRClktIxYkuJjXnEpZ5TJa9Wu+891/Rp5HClHMukopmONIySOlZqZ5ppRM18gOIYQgh+g0EZVE/PHzj8WrpDsMIbDK8nlx+hvF35RqzmpJlNYsV1iGmvQRkA7It4Wb6zV3h0fiHLl9+4o//fM/wmQ5TE3RPD29bJpGgp1STqRcmI5H5mkSNWfbaGEqoohhLaOpNrTsdltqSdw9PBBHuTCjdu0WMQG7EPBe1GxB/TYVlO8kIykKBBfo+xUPD498+eUXvPjqa+ZpYsqJ0+kksnVdQqf0AZ2SshQRKQvRuut78d1YzcR2cs544x2tc9RiEBaSOfNcToc9m8327JHouh7fZOocGVPieDxKJdR6grFUJyyfEpPST2XUcTpOYKpor61UKJ998glPnzzBesfD4x5nBUtwf3fLxeU115eX7I8n2uAZ2lYWLaUwn0ahwBqIJksspLEEa3UxU3k8HgXO1XiV2k6McWS1Gmgaz5wyjZWAI5AOwoWgudmw7gcqkaQgvTGKYartWkiVU5w57A9stAIIyIdu4JyAV2olW6g6W616+LWhkTZYO+B+WLHZrilFZZ41a6CPk4O8k0O2cTLCM04ugJKzPjhSLZhiBFseAgUoWbqtWDKuql2oVMZRXsAlKyB0w9mh6YLDG08p+TxKAjHoWJxmO2y1lZYLousazM0N3jr6rgNjmOJMY2V0gFkcwpbry0ve39/z93//D/zO7/wWE4b9/QPDMOC959WLV6w2a7rmRqoZ43gc90yTCANKlSVpUmNY08iFezpONL3ItMZxFECdjr/+SV+lqvfGa+HSUlPkcNjjjWOz2ZBrZbfbKaMf4iQ54F0nh2vRkcOyTO27DlsNp1kko7PmU0sHIfyoolLkAvRtJxeSkdlwN3QyJgyeHCPWONnNlYS3HksVr0TwbNZCd7WjjDmgyvzQySFqJIiFBSMRglyuZ6yEA4yX8UsByDw+3vGjH/0h1qr60EouCqCGOZWnCYVI/zxVeNUPfy0n+OzTz9iuVoxxogmBaNJ5Z9A1PXNZls5SRC2fY9O0mGCZS8TOE/Oc6NarM8blw6xLZbZ5UVuJpNwVQ8mQbSU5YR65KmpCWwNGDahYQYJf7lrevoW729fUavnRf/hj/uB3/yWbiyfMkwh1cs26l82yszFyBqzWK4lGsEJ7rUhhJd2tx1oR+bRNw263pes6QtexYOHjPOOtjGetd8IKKwVnPW7tefP6NW3Xy+h1nqlNQ1KBi3eWy6sr2Qt6T9Yu0luL8w0ln6RDQDq0h4cH3rx+Q62VzWbN1fUV/bBizhJ2ZICmaVkYGxiFlUlQiRiqutWKtm149eYNr9+8wSAYhP00UQqsdxsuLi/k8KqV02HkNE48Pj5yOByx6keIJfH29pbjNBJz4u72juPpJK1YCOdftms8Hz3/hIuNOP/aVtOwjMG7wKpd0ba9PAhGeEegjkljKCmxPx548/IVpxQxOB5PRxonS0dj0Jvc65xUUtT2pxPzPOtuRC7L/XEkzRFjDI8PD9w/PMgLjASPD30vYwNdj+VlsWrU8ZoTOc76vSqosAoSZHkpoSjWG5quAWS2PMWs5FTHqusEkFcEEuiMElOtjCGiVqJN20jWwSxmofqNF22MSYMVZbafU9bvC0pZxk/mDHUrSQ9LY4k5cnf/QNL/TghBHZuQla0UnMU30tlUnUejy/U4z/LMHA4Ka4RPP37Oql9hqwDzhtUKi8ylp+PIPMez/ny7uaAdOqFcTiN3t3dnFLL4JipNH/BWLrdlIQdStf9Tvrqhp21bQUdYK8hs73j3/j37gzjV53ni8vKS68sdSbk6ORdK+kY3ihjqBMFgddxRCKGlC80HBLyGONVUSFEMgVHn6DI6kYPHKX+iKKkYqiiDSiK0vXLVDKGVEahB/BpimtRuDVGvZcR0VapAEWedeYMgKUaVQQppOPK3//l/4f37XxBsxlb1yCCu8ZyiXEJGzKPZLDN+qzL5hUCbIcFv//Pfx3cdlYoLFq9xrTllppzkUjFIJEGVQscaPaKsCDe6bpALe9gxDHIWfDBKoOcFiuJYLo6iSp56joaFD7G5S/iViHAK1sPz59d4G0nzyM+//BlffvljgoVcE6dJOVxO3uvFwS2ZMlafxUI8e9DqeXmfa1E1pjwPUKkp4ayhbTv69QrrHftxZK8jKWMcs6Ldry4vWa16SVVcZLM6qWiahouLC3a7C1wTxOVvrY4xj4xTFJ+ZtVIYGhmdb9drYsxMc/rgZTGG0LRkwJaczyRCCfBIjMcjY8oMfU8qhf3jo6ggVIvdaRtUaz0z9B9HGTk4Z9lPI/ePD0yK7G7awHq1whjLOI68evOa93f31CoyTzmQxOiy6nucC1ANwQaME7NME5oPjHRFHzRNoHENVfpw9uPMPCX61ZrgLPM8UbJgDLqh18Q8eZAf9nseHh85zTOxJLKpJJMV8TCT4nwGYe0uLlmvVtRSJKAl6CWBmHecEUv80h6kmPnlL37OF198STWyYLXWYry05TUVKIJWMAYd8Ul34Y2og8BSi7TJy2F3rpxqZZwj4zjKIaIqhVzEz5FigWzwXt3MVZzuNWWGfmC7XuOdpTGOrFC1xnmIMsLLSaqIFCMlyiIwJpEml5J1dCLmt5oqb96+4+H9HfNplDjPEMg58bh/5OtXr/jiyy/kInOe7cWO588/wWFZrdYYZzkejwTvefrkGRfbjXQEaWY8HLHOsFmtMdZye3/H2/s7ccfq/qskicsc55HT/oitleN+Ly/jPJ/n1b/OlzGG1Wqlju1AN/Q0TcNqtQIrs//QtBLUkgv7xwdAEuy8l66mfGMmPc2TjHFTFlyKE1fzeDypgcuqS7+QEIREmqMsg7WiDs5gvBzoMU4aIyqjqMVj4pw9fw/GyOxdsj9k/yWHJyym2GqrzKpr/YBVN1CMHF45F7xN3N1+xd//7Z/RhoxDolBrVeFElShVVNYukMT/f4h2GZF0qwt+43u/LRL2GtWcKOOl129est8/EHyLdwLprKUSS2bOBVN1D6XeEuc8u6srqahzETwHHy6FopduUUluYVklSZpkpghWxsrIaemwloPcuspuHfj+957ThcLx9MDf/qe/YL+/Fdx9zhjjqNZhnRej4unIfv8oXpjls9ALHWPo2p7gA8F5jC6np/HEm9u7M5oFFNOPZd33eC/j09PxwOk4UnPWEVSDVeNlzRl0rO9DoCBYcG8daZ55eHg4nxtN8Dgr0nJTDeu+5+ajZ3z2+Wd88unHWGsYxxPOO/rVIFk0qeBrrfJgmohvW4b1GrAEJ6MSZyzf+fzbgizIAn3rbMPLN2/YPzzw8fOPNd/Z0W22eO+52IlqJ8coaWmlslmv9QB1fP7553q7aqpUTByrOAVDgThnWY5bCRJx3jOpNrxVCWIu4uS2VQ/OUulCy6qXIKFTipBnVquOOCW6tsE5y8P+yOPDAzHO7HY7ALqu0apZbvScK5cXF5LylQp9E1j3nQSB6F5iydbwVhrm4zjivcNT8cGzWu9Ym8r97R39ek3XtQQn4TroAV0qxDmqPl0e0mmeCV4IozLmk3HNmR1fxWXdeI/TkcKcBNNwc3nFOE461pMD4WKz5TSOkhmhSABrDA7p/pZuT1OnyaUw6AK/1Ir3QYQFzpJKFpCc9SxxwoeYBMvSDXogLyMeQxNagh+5vnkijm/nIAkwEN8oCsVpmp58AKHrtRjxJF/0Mkhn4OPldiuOewo1RVwrXU0XGnKBd2/ecfPsCaZCXPwzv6b7Wg58f5YAtm1DSi2b7VZNoUbyw1NWDtVOLoQ8U4G+H5jHkTHNvLt9z+PjA08/ekbrW6r/4Az2Xrpn7ILiNpgGStIZ+uL1aAP74xHvRYgAooZBmU7OyXLzOM9YKzgG3zQYDQtaLgaj3iAJPbKMxxPWe1xj5PLTd9FaoLE0OKyv/OVf/nvG6Y3gt7OoiGpNYGfyrFng35CQFYq8j7Wo5Ff+XkyJH/zg+1xfPpGfJ3jmOTIdT9ih5/LygrZZ6U4tM0eZu6dZsDsxCD9OjJTw+PDAnGe2u0u+/uoLLVqW1TPUksi1pVW1kGj9Kq7ac1gUKsuXDi3iaiMvYDFYm6m2cnHp+d53n/Hjn7zk5Yuf8vb2Jd/65AccTknGjaIhZ5wi7+/v6dvA5cWFcNqo1CqUAFOlg3DGaqErAVYuBJ5eX9F1nRrx3Hlf5YM/izSaEAidGCPjPIsIxsp0pXovHT0iHLDGiVKwVJphOOfzrAY5F8dp4vb2Hc5Y1sMK1whqKM6z/vuyH6wV7u/vsdYJqt4YyUimZE28qqRUBROr5p9ixMlrDJINnQtd3zOnSKPqH6sStFXXERGYX60ikzXG8P7uTrDcm7XiLeQgXO82eCOwsXmOsoT1cNyPZCNz/pxkNJNKPi9RjTomRTL34aEsOeNNJVY58DP1jFhOUTC4u4sLLrZbstrR4QOWOai5RYxG0gVklZUtCOWiFapFkNcS+7eSihx4+vSGcZp58/YNbUqQxUdhjFEaZyQmcZq3NkhozSiV7zzvefnqFRbDxcUlT66uMNZy3B85TicutztyzRynE11pJPdXcQ2ZzKrrxYCTk8ianWRhxFToG6eeAFGNNVpdihROXIB1kb7py9dYeQ6WzykVmZcuKII5ZmJOrMOgYz2DCZ6m9TwbxPV8nEZSToJcUA9EKZVhWIlfIInKxixEXWN1nAjjOOFc4PriUnTl1mpVm4BIzAbvO9J04tXrNxRXuLy+5EN1+et/LW17SlEWstZgvWe7XmN9UPaPxIQKlNGSy4eRrWsCvbdc7C5krJTLmRs0zzPGwno1kKnnUWFRj4D1glGw1X0YYaRCKhNN42lDI91xkeCoJdVuOk0yrvABZyU7e472LGvt+xZjHPcPd6y2m/N7IkvdJeGwCr3YgPeZv/mbP+WLX/5njM+CsLZgykyuBrJUzxbOxUitGiBl1LNwjgk1kDy//Zu/I9WvC+Q8c39/x+FwZJ0zN1fXOMVcz3NknCeJFcgZ7wKNFUMgRnxaNjg2qy3Pnj3nP/+NyM3t4qBGMt9dSeQStDsQRVrNhZiMoHeMGCGh6o4waTEhMQJYgWc+ebpivf0OrTd8+Xd/wueffpvtekWU2xyLEUNcCHRdj7XSkWeWzwHmkilxhqbBVWViVWGUNU17HlWVHH/lPUNJA6UU4jhyiLLsX/c9IfRibrYWvJBehZAcBSSpju+26wSCCbgiP1/fDEBlP0oUr7VShPd9x2GcKfsDKUV+/JOf8tknn+GrYm5zreSaiVSNOoQGQVbUWinB6gcZGDpBOOAdRuNC2zbomEQenHGcOR2PrNdr+rbjNI2c5omVzn1NEmVDtHI4TOPMXCKh8bTB8+72ltA2tK4VMJwRjru4GnVxVpDLoQoSAVPPagJbqn4/FYIXPDgCKhtWA63e5mdTXRILe86FoM7IJ9fX8hjVD2HlxjpSnLFGTFEgu5zdZg0K44qqkgltEKWVmngEKSyH+fF04Hg4AZDblpweZT7f97x6dS/5032vHBtRaCVTaLuOwzTRNA7I3O8ftYrUXcg883g4sFqvhBBbxS8Sp5nbN69Zbbc8vb4B1c5b585uT2NFxfP4+MjD/T2+abi4vKDtu3NE53KweX1hi0GyNqqEHC1L/KgLWpwlThO3b95ydX1DE1ox7iF+h1KdVqCikZlzwhuDbyxWUeB3d3e8e/eeJzdPWK/X8nL6gG90hGdEnuvbwMXlTp+TpBd/Oev9f52vZY9grVwADkfftkwYYpwE5a1zd5H1G4gWW2VMI2eVZ1j1hPAM69SRrRQBUeJUyIXgxCxqvRxWtX7wEC2z4e12c4YGGiMEz3QaqbN8BmBYbzcE5wSdn4QKOo0SV2m/QaXoB/GB+EbIrxV5Z1KGnB3OJfre8urVz/jzv/hDDFHGUAjvSNIVHTHLSEeenQ/eEBScpwJgvYgq3bDl88+/T1rEBBX60OK2kgFdS1UyajnvrYqzrFZbnBXcRTESJGSArmlo+pary2uhJqCYHv0dZyMJb6lKlsvywZeaVfgAJng5gPUcKZ7zWEq2SuiSubJeS07Hm9c/5x/+/s/ZXX4XfEvTS/Jf1/bYQS7JUqL+9wyxZvb3j3hvWa83+jHJ+Gwcj5QiIyA9WuQus4aisanzPNEPKwzo7ws6a/FtKyh/uyhKRVVXHJgiOdq4D6wsa2VEdjqdWA0DFxc7fPC8f/+OGBPt0ND3KzKFUApTSey2O37/n/9zEZUsElZ5eASgJ2aOhnGa6d0HVICpQlltgiAM3ty+Y7NagzHUFKkh6Nxf5X96aZSSabwjuMBpnLlIsoStVlQ7UxIuepoiNniaRirtthtEUmssTbP8uU4fRDmQa5EWLkWRSdpvjGSCl/l6sIY4zbhWuDk+i6bcKvG2lEwxwq0JVhzEyyH3QYMtVZGQQRtqBeeMtuCi9kp5pg2BU5zo+wHjJE/Y6PdUdf+TkQoiZ0l7897x4uULqPDd73+Xi8tL1pvNB+lfrZQoiolgHfvTid1mIKXI3fuXZ9lmrgLzmuZJF1qVfmiZ0kzTBq6ePhXzjr43SyVavtElmVI5jSNv373l6vICZ6/ln0sR74Qjk2sh6iXjvePjT57T+OYD5MzI796oI99guLy8Zr0aSElcyqlKdrJvGhm9mSxwRr1oZewWZOn9uGeaJq4ur0RqGBPbXYP1TkN1PGma6IcetzG8f7jncDgyDLIHs5hz9Os/+mVgaHviHJn9LMojI4wjgdSas4TZFDRIKpGMZLnXnDnsHzmPWmIme/lcmkaSDJ0LlDJLNol2zzln2qWStYbq1FmrVaao5BD0eNNIEmAVmaTRZe+SrWEQ1Ma5yldlkxBFhUoq1atUkalWySaxifuHt/zpn/1P5LQneEtMVqxnVqTPNltsrlKcGauxuBr+o5JTjEQL1ypS288/+ZSm78Wbgbi7/drSKNctppmu7ajF4IwjLCw4dXxnmyFNGCVH1+rIqfLsyXPx15RJL6lvvKtVLmJpjqy+dUo3TknySSyy90OBshmcrxhb1NtRoFqMnTF4knnkF7/8Tzz/1g/IdEwp4Zxj8O4cPgQKNTAG5sxq6AhNd15gU+SyGkeJ9MUoDLFq9rQJlCBcpryoyKwhzUlzsCukdPZSSNNmyDXhjSM5R5onQtuRa8YihUe1lhZh1JmSOT7suXv/nmG1oXWN5qIIwHK3u6TqiJdq8IsjmVwoWGl/jLheZQ4qC8xa5UWzOsmLcZa5uHd45ziOEynNdL05o5f7tczExpO0yJdXF/JDOiNSS0Sz3ZkWVpXcFVrvsMZysdlgnWMaRWG0BKVYZ88L6FKFJ+WtP1c8Dn6lavStsG5e3b3n6c0TWu95mEaMsfR9J+EdJUslFhq5vEqhcYIJEYKpl6Qm/WMF3ZApRuRspzGddcUi2ZPQnzxlpWomxv2ebrWSSzdJ/uzQ96TQUnLi4+fPWVyzFqG/1lo4HkdckMstx4prPDs1ejlj+fiTTzFFKnxb4emTJxxOI+/evcb4QLd6JvNT5xjWgxxCy8isSGLgIj6YUqRtGp4/e8b1zQ137+84Ho80fSN+mFY8MK5WCWg6zUD5cMhXUdlYhMrrTGBOE94H+n7gcf/IP/zDP9C0ge98+zty4XqHMQ50SW/lLcdh1Ci34tNPP+Xh8aBjGUun0Zs1F2zwpFl+dmsdxla6TkZc8xwJ4UMR9Ot+hbahmRtOpxPOLsFR4gROKZKMoc5RwpOoWCumNqxVCqyha4KkuJUikZUY9X7IkrOkTDcM5wIszTM5ShJYzZU4zjgvsbRoVrtvRA1jTMU4L8ttBOnw+PjIPM1YZ7h9/56Pnj/XKExLniMmePGj1IopaqQq4rrughgvx+k9f/zH/yMPDy/wbdXUQyNZ1XqYVnWZGyu/54IV4ch5SFIxRoN3jGcaI9/9/m/QtmtiypLEaD0Vi/Myn08pk7ImGB4nnBcywpyiXhqBXBOpFJHaa2F3cXVN2wwi4ND/ulOJPcg9YTUGUL11OINcvFrRg+D3TSy4GiXVD1E4yuhJjJvWZgyO23cveHj/ko8//R1evL3n/v6etBqkU5ApFcc4nfE+m80W76264mWXWpCRo1OUOKVgvMMZyzSN6k8a6Dq5UETdxjngq6pYpVLFk9FIbGlMiZwS+9OJIWd8CFgf1B+ik4IUef/wgLWG/ekoo6m8FrVoVUOkOsMdkLyX3HADsvzyMpaRvOQq0j01nyx+fIMeLrlgQzjfUqtOnHoLumIhaErlbTkVYQtZlU8aY865qo3ztE1zXqqKkiPikX1DjLKIaZtlxihKBaP6/tPhxBhHLnabc+BHQQx2KCX26upKsgtS0sWhKAMyguuYYiJXmYUvKbBCk6w4HNW789w1pZn96XRevM5xVI8DzKUw9CKVndNI0MNljBHGk0a4FkxWX0XXcBor6/UgnVFKTGrOylEqVZcF9kauZ88KCIws2EZeosWPUSveW66un+gBbGj7/tzN5JzEDFQyv/jlV4Sm4eNnTzHWEZwnGsBbrCk4JzGeaY7nSqgm4T/NoybGNSLdLVUW6DhLQLDuphbSlFipfyKlrC29oxZYb9ciC64iI7TWUoyhpkRxUrF671mvt7RtzxIRuUijSxFcvDUW30gnNI0nVqsNjT4rzjl+3VHTN78WT4oPQcaZOXHYH8ilsFoN7GMizqMuBjPWdEzHE8fjSHAW1zWqBpODupTCOE6UmuibltrJ0nrBdzgX2D8+cjzu2V1eSsCRrewf7nncP/LRs4/w9gNxtuas0lZz3qO0bUfMkX5YKdxOE+6soNdDK8iGmRFfHCH0lBzJ/gTM/Nlf/H+5vfsFxiRMSRrVKh4DwcLLSJoqzmYZxxmq03vsPGoy6vNwNKHjN37w2zgXiGkSZViURTsq7Rb3tyiX7t+/5/LmCmrm/uFA1zRstjsa37Lf34OeOTZbGt8QfM+xVF1TFlIBaz+olWQyndULseDxHTkvgo1lNyE8qpoLxRqV3ipzDkGXOOcgn/j7v/sjNrunkA2nw4GcZ5q2kRGsgRcvXvDu/T3rjTDqttudNBbzTFIkS1WKgjEGp4DGBern24Y5Z5wBZ73InlWuO08Tt/d33Fxdi8eiaZQanc77xKFrtdi1VPz5z856bj8eDrTB8/VXXxG+/W2cdrPi8hdfR+M9NaphtRiHpaippzJOMytlsH8zk9AUzZk24mBsu47W6M2dhE+zKENQL0Ct4n6c9YcPrpPlnJzyorYp0qJ6vS2XvF1jw7k4aZog5hTnWO73HDN3D/fUXGh6OUCO48x6vZLvtxbe3b6j7XrA4I2hGOmGViEQ4yxUTqrmAci4YKHbhuAYp8T93S1N07C9vCAn4QjVUvDe4JyMHtarlXgPSsJYWRKD4LmtNfgQuLm5JuuCl2yEhKpjgj40UA2zpv0ZzeswJpNjZMyZru/IFL5++RJq5ebqCowk1y2p7zEXXKiSdBdkYV/KoskWtUSMkePxII70eeL93XuMMTx/9lT+fpEHI8fMerfFGCOuY6eIa7k9Ca0n4Gm9kCWNsTSIgdBoClaak4wMrGXOkc1uw2/91m+KUqXtZD9TslzETjrUxlvmKql0y3MjAVTyIpeUSN5jjUg4M5G27bFWLo7NZiNquEkc4W3bUobhn3xBOOfP7lxjJR50tb0ANGin7umHQU1u6ezoXa0HzoRU1eifk9AMdEEu7Gmazjst3wRsEtTHaZyAKgTlms+X3TxNso+YZ0LbCE6/FlwIBBeUT2QIBEFaYMlxVlm2YZ4nvE4GYpyp1jDHREoH3HTiiy/+nHfvvsCbJMo647FWMA6mipGVIhJYjFEOlSiJ5Mue1U+1SvVNTewubwhtz+F4FJVVlcKtCR2JojJcKUBiipJDnzImOPq+JbQdxsguZBg2+udnxjiL6bFr4MF8Q3WoYU2xUJqM94acLM7ojB6rO0jhNZVF7KI7S2dAmT/UaqWjqMIMMylifebduy948cu/5Ae/9b/h+sk1h/FImtJ50rJZr3h43NPaQL9ZMyfJEX/z9i39smeM4ldyXoKnQPajXp37cn5K3r2phaD+mmmeCc6rEkliE5ZLDW/PLmlKPSsSF/Nmzpm+a+nbG0rN/M7v/i69Ls6Nc9hqyCUxnUa9fDRN8+7+Du89V7stcZxYd5L8ha1kST+Ul6aYs0HLnwmqWW+4zBQzHV4NPxLBJweE0YPdyq/BOKGBKtep5Ir10uZMKZ5bnrNKxohuGSdLVVOk1U05Mk0Tw2qgazxdtxXXZCnEHBnHSEmFzTDw1Ve/5OtXr/n+975H1/cMwyDy2eAkoa4WbJAPdzxE8jTStI1W+zKCa60nu0zU9hRtexckiUcWp7bCXMt5nzGrIa918gDNaabvGvFFUInzzHGKDG2gcZ796Ujbd/KzGIl/XWRsP//yS169fs16s+Xm6prVasM8z+cLeomxLDlTbCWnRakkM81vylNzLnz2+bdlSVoqyYgBC8RwOGXxGQzqbEX9FEbLs9Y34ESx462lmspxnCglM6xF5VWco2kb+e+qgWwIDcnIzDW0A9VIYltFxAM1G8nGAJ23Ow6HPeM04b2lbXvWxuDbRhVO8j3UKTKsOhEpZCjTCPSKnMjo+fhrXxLeOc0BkQ7ZOC/omCzvxMXFTp9HSNZIVGg34JyRqEvEiRuaRggEplCOkYfjgfVqi3Eek5MY3GJimmeGYaBf9eeFcynSOT598hRwGjvaysWTMlYvz2qqUoFlSR5jZDweaBqv76uHVvhO1UKJkDAEFzkdX/Hi5d+wP3zNqrXEaKklUFMllkIxRXYh1Z7n/tVWdV3L6KmqgKSeU9gM2Mw0Zy4ub4g18+7uHR89fSKL7hB4fLzHtAK/rJqPYIHVEh9bDaEVKbdgWVSiWz21Ot69fUPXWjZ9z9uzsVtHcShmXvekxkCqBluK4DWcyKNdlkKPqjiRCqYGpDjWPYuRjh8vXbipAZ8n/u7v/5BhteLjT39PYJZz4mGOrDrPdrdhaHtCP2AxTFlwNMEHyah2Dkqk7QLzLKTgQhGHtOa/OC2mS4ySMKdG1/Vqhd9udREvF5itjoR0nbnC9cWljNC1y55rxnhP1wSJtzUGVzJtyowx4udJRvhWOpdu6JnGCR8CNSX8q7evuby44Gq3xXpHAKZRsk2tlQUY1sg3WxFaqhWUN7rMtc7hvbyMIQRCNfIL1Qr3Yr1lHOUAyikxTiNdEMzt4vY0VhrkZZySFWxnbOX+URKdhrYT45leVE9vrsHoDqQqJ6cKKbWmyEeffEwXAlfXT3j7/j0xJdJ+L2Yp56DxzHmSGWUq4GHdd5ywEhoSDDeXFzRKxVy+rNXMAGT+WWPGBqGmqtlYOD5GgIkoibJQaXzAhYZCpIyRuUZhqjiHDdB3/bkCSLnQ6mEVj0emGLm4vOTj5x/LYTDPys8pYMuZIumdE9if+iJkUS0jnaZpNNPYMk0zh/09Xd8TlL8zIwefbwLGWXXEO0qSdnXBgPONSn+KgkeJujgVsq18XraKv8MhLk/jPK3+jsV45Iglyk7MVGYVR4DBIYvrmGaaIPC+tmlwTaMV/geonPcywnq8u6dZ9Wx3lwxDRyoCqez7Xx/2B0LVXa3WWCtKlFqLeG5iIufI4ZCY54kQPI1viEXFCdryj5PE+C4Kk5zFLWycxXmLq4YpFEVPyJhz+fcBSjHsD3uC/v6Njs1qNRrmlbQ6VGWMjoFTUj6RkWfRusKcR0rSLPcscZvWFx4efsGL13/NNL3EuSxBP8ZjnVFEfcIikmeQLtKqRj8b8YopohFrKtktY6gPaq2Pnj3n8fHA27fv6ZuO3W5NrYW5JMoJgnOCuymSi120my/qki6632zQACkjo+Z+6FmvGtabteAvSpHsm7pc9Jx3NjKLB6ohW9lJhCwyfyEmi6x+eVYBcok436qs12Cs+A+SjoHMfM9f/eX/hxQj6/X3aNs1tu1wpkLKkkGdIkW9B8EFPv74uYxXqeAHYs7Mc+L+/hXPnj6j6VYgKmRyFWaYa1phuJ1OwghrW/nhtAg1VvIlapIED4o8n1SYszjDf/bllzw83PPsyVOeP39+dpR0XUNTpCtZfs5MwRsnaJuceTid8LvdjovNVu5OA5TMcTxx++6Wy6sLrre7c6xoyontZi0uV2DRblkrwTzVGKwNGL2F0yxOX9uJ0SeXzIsXL8AaPnn2kS5/5S7J9YN8FSSEBGd58/IlL16+oGkDH3/yKbv1Wg6jUuTCqvWch13UguqtIXnP6XSA0tEEzw+//0NWg2iLg/cC+CraQVDOhjLrHW1bNFS90K9X4oqtBW8Xy1nVZauGzOgLbFTKaEvlcDyJtl3DhkQ5JL+cHKOM76yh9w3b1Vqq4VLAVGqSzqltGlytnA4HrIHvf/+HGAOrvpcxUEq0xjHlifv7Bx7u7ri6ueFyt8MWQ8oV46ssghfAoB5mLjiO+0d+/vOf8dGzj7harWn6ARNnkhr5rLUcDgfG44lu6Omb7mzAy7r/aFwgWeH9b7fbs9chadJhrrI3mOIk3H19g01BJMs4TMk4r05iI2O4WBLH44F3796zXq+4vNgyzzNd0+Cd1T0GjPOB42Hio4+e47yTLPNpFMduTMDIfJpo2iDO6V/zy1gZJTrniDEzjqPEqxqZKe/3j7x8+ZKUEp9/9m2Rqhoxplpn6fteKkArL7QzIjowrpMqMKM7uEKkCoOMivXaDcbENB7xw0pm61HGOqHtwFYaK3LWRd5blDMG4pOYphM1J6xvKaXwcHdHPI6EztENcP/+K75++Vfk/BbnKiQrahiTkXqo4Axkk7F1uYksigGjVtl7e4ygq41mRmPxVvYRzlS+9fm3+fTTz7i5eaYRApbgW64uW8aYVTkniqwlltWHcFYoVcRg6ryXs0bpBUE7565bUZx4tLTmZ8mFLrnKUsLJtEMW7x5TDKlGBiMXtnGy0Ae9bFz+lYOzmIpFGoBcZhIenx11euDH/+nf8tFne773G/81put5+/YdcRQvk1HRg7MNsWTSLHaD4B3VGLxxOGcIXce797dsdlvaoJdhlWw9W0WOPUfZQXnv5QLS5yGrtynnLOrBrJGyaT67uJ/c3NA3LRe7C5xzTNMkeRRBY4p9oDonyqdaSVZl87YQ5hn//OlTUFxGyYnVRvWyTcOqHajWQwZvCxVPKcIsMdYS44lMofGCS05RDSEOvTwMOI/Vpc3Xr17w9u0bthdbCRiydnHzSVvjREoWc8a5ShxHvn79klwyT3ZPWXUDGAdG3K/WCJJ4qcCWByuVSue9RD/mkb5t2OreIjin2uiiqyorubFGxgRNK85LFyyNVmcgSzXdF0suhZV5c62C1xC1lYyPihFFGMaw9o1IyYw8gKdxpvfCOOqbwDRFUhwZVj3zNDHGSKuwQest+3FkPp3oVisGlbm+v78XtIa1zDHyeDhQamWz3dJ1LbMyqDrnMU6jSAu/IuktpdB1A9/61udcX16SqdScJJwmfki6i1Pk8eEBa51A7KpyeeBceZSUsaaKwcga+n6QoqEuuAFD41pAHvjxNPLw+MhqkIAgH4T2mUoiTYmhFR8LdimaZNnZDx04o3p9wViMJ5mv1ypT5e1up/scOJ2OzPPMxU7yfv8pfonlIDRuyeIuenl4cspstjve373n9us7Xr9+zWazkcu9illyVJ+Cc5JzkVLBWQ3WwZx3MMUYutAwGemelzFuTDPXV9d6+Bf1fRgaIxXwN13ktYpxNSWZi4cQaPtBZbIGbwKmFt7evmA1wHH/yJvbn2LZY50hFtlmGJtEhVMEyS0BDUWyC7AYEzBkkQll6TKqNXgNGxKxiuyOquaADL34pGTxG5i1AzJVhCPFWl3A68Wkz6YxIpowSfZhRke88umB1UmEUxk0i11jMaMg04lcJCMmpXx+9o3zMBtGwHUej0cs5YgKqGp2Ss06brKyDCdD0E63ihm1lDtevvpTQuP5zvf/NS2Rd4+3bNdr2tDjrEQRx3EkpcRqvWachYqAgfVmS7m/583dHY+Pj9xcX7Pd7bSiV/OgqgCtDQpuFLNqzvk80QmNGDa9QSJ9YxKFnbWst1sudjuRxaNdN4rdp5JLIk5SxBlrISVs2zLtD4zjiJeNfpYqjMwYJy52O66vnkCx2JIkyrLxdCqFElx0FvlajYC8HLUWzZEwxJQx3uByEU18zpRcePLRM549uaFRRc7ZieotznghyeYCxlIKdE3Lk08+4cn1zRlLXnJlfzxI0FBKNK08MHGeMNYxaFJUaMz5YFwOe2qVqsEYaf1EuiGVs9PkPWtoTFAPRTk/mFVlrmi+AMvFYQ3OekwuRMR05b10EcLxySwqspwyj6cDrWvxXeAwnpjGmbZraLtO3L456YNpZYEJZ6OiiheJc8QPvVzSbSuOYGuZ00yJCR+a84VYMVRTqFaWYaYY0hQJbeCqv8I6DxViFsR0sF5w06UwrAdWG0EmOHR+W4R5n5K4VHOuFFNEemolqlQYQfL5LmE8cZrYjxL7WEth8g7rLF3Xyfw9i4tX6QxMOUuLjuFw3GONuPxFkmglRrSVkU4ILWmamKYTw3Z1Dvax1kmUqOKil2yFf+zLWTHRlZJlGe4sx+MJZ72EBHUD3/n293n29DkOS4pJwodqIWVhc2UtHJwT4vBxmqQbVdWZPBPSSrsqo5KcE29ev+F0OvKdb3/rLApwrsE3gtSXQ1SeyrvbW7ph0EsbMb1pV26sqntq5snTC9r9gbu7L5iOr3FmouChiEow+ErNjpRnStTRQwWQotDiKE4YZaUUvDFEIwmWWHOGSi4j31IBK5X6YoZONVNjJHQdj48HTtPE0HX0q7WSHpzE6Krs1GBxfkHgf1BETvOknaLM3Jd3UV9wqtxWMiIPomScpvn8u/VGDs95jvhTixnkdxCqtHi1QHWZZRnvnPLR6LEuUUhS5QPFFnK545df/0fGw9fsnv6Azz7+Dn3fy2Wp/Kyf/eIXvHv7hn/5B/8S5yGmgndBeV6O7Wp1TuqbpomUIn5YYXWB3S4k7JQwQf1KpZ4X/yVLt+uc5XQaWW822BCI0ySFkZd8kuos1omMHQRk6q2n7z6MY40W633X0TYNPiv3xQWrfoMKVhQbHpEkOi+Gnpzzue3LQqMjA+MsQKmiaODFneytl66ggm89n33rU31JPCXJgT+nqJXxhsYtiqbKaTpSc+Fbn3+mXJ5CyhFjCs7Dbr3Sk7ucHbdjjHSNldkkktAVdH8gjlWDC0GqBaNwLKO5D2ZZyMucNdclwVg0+/r8YZPoIYw+iFC5v7ujOsduJQHpxsKUxOyEc1itpjHQtR1v797DYBhWA9e7HWMbP7wE2p46DfxoQ3tuQVOJnE4Tm9VKFDzjSKmV3XpNLpK/kGLGB0cbAgUJV4k5ESeJeG2C7GOyrZSUSRRs5yQvOyWslyVYUVWEMUDRRaoGlxgnVNKu6844kKAJhaXUs9Gucy3FKzn44ZHjSUiWz5/c0HQ9j/sH9scRZ73QV4OnwZGzAOgkwElUSyE0lGI4HUcwVUJevPyerXOUIsDD/X4PxtA3kkf++PjI+/d3bNZrejeI5vzXaSYMuOBpS8PhJGHy4zhK3rGzzFEgeiEEvPGyV0lF6LRzBAetlaW1wdC0Alsc44TXjvdwPGIrdF0vWeXGMZEpaabvOwEDhhYfHCZLwTJNwjPy6t1p246uE3HAsjfzxpFNBhsgVqwfOY1f8/jwY0p+j0Pc+zUVwKFrU0pJCqXT55WK9wZTBcNujYD3qoEaIEQx6omMV1VKsm6VfHQKTdNRYsbZwPF4YB5PtE1H2/eCs4kZezwSulZ2e87iQyNFfc3CEVKxiuTBSMbKnGae3FypmqngXCPjpQrVy+gtmUKoWT8b7QXnSgiQRhiPkf3xyHrVsBkstuccRSvvLnhbNVYArHpThIMlWfbWVazxeDPyuP+C+8eXbC9+xmef/QGbi8/kHFD8/v3dHY/7e549+4gpzqQcKVOibTr6YcWlIvjfvX2L854YhZ799Pr67FmzznI8Hnjx+hUfXd/Q9r1cHlUEQ02VHfE8T/TGiPq0FKiymzFV9k0pifemcUG4dprTro++xAEY8CHgg/eCtK0SrH4aZ+L0yHq9kaCXou2flfZNHgEhP1aKsEqQmdgyQ7TWY6y4iedUGLqA14O41EqJUTT1iFuz6zuNUM2inzbastZCMIGYCs5qpOiS7IXMvduV8IJyjCyZrblI5a5N8Ln6LXC+mLyVF22OE94LPnfZsXhjyEYYLiq5liWrXijzPGkVYFTvbb/xS5ADrC1CTy1qwislKz6j5ebykliEVOlDYLWRvUWuYqxZLr1Fny8+i0lmxabqoVjUQ2qI2q5P04xxVhHIuvyrHzwILnh5wb3DOyMk0FS1TbU8xoRLi5Pa4xr57xiqVjRV9wmCrLbBYUqlbVvmcSLWSGibX/EmmCJVf9/3bNZrGie0ypwWblFknE6afR20IrZ4a6CRljx4R9PISPN4PFKKeGCOhwOgDH9TcM5KxsfQnXOEjTHsdltiKjQ58U+ROS3uatlLRMk4Dq3sY6xkV9RUqFaWq6J9F0e1tZKVLc88kqkCeOulKzRAzhzmmaZpZNQH5BQ5nEaurq7PlwFwliBPpyPBiYz2dBjJeeZ0qPQrubAXbEiaK20DU3pgPH3Fw92Pmed7jE3UMpPTCIjSUOsAjMk4l9UVXCXkpho1oUmGgkA15e3K1pALhGTJpWJl+Iu3llikqNgfDtxUQxpPBGexXUcxwh8aNOf9eDwxzbMm+wWcXQyfUp7NRWbzJRUxIpbEajXQ961+36L+q1XwHiZLp2uW94hK4x1jglIt02y4u33keJgx5sDlRUfsHeNgWG09bc30ncPZluANvnF0rYw/LTJGpaQPHY8x2FKV+JyYDl/ws5/dcrn/Lk+u/xmuecIPv/cZH390zXqzI6VKnmdOpxHvAl2/FlkrhlM60TRBfUGVRs+6Rb4eXGAyE8fDieki0VYBiuaU2a3XYvcuhdNxVFe2wyrBOwMmziq9lrNjGIYzx22RuFc9J1OM4AreWItH5sn748j7N28Yx4nNDyTTIZYKy8hgmekacQ6mKDRMlgq8yMXQNK083LWQamGK8/lQXrqMWgt905FdISIVGc5hqhBAqQnaTn7FZmnNrCir4JzoddwfMM7ShoZe5WMLZgLdAVSReAsqpBSCdec2TZyQszDddRQhH6EurdCllco/Y0okZA7sVU203mwU9JfOHYt17kycXMZd1jhp++LEPIkLvTecv59ai3RkxojHIcv3lEuCXCWBrkoFuT+eWPe9oCl0ib9eLUEsehlOER8kp8E6Sa+Ti1ks/IsWu1ap3LvQyMWiL5kcTg40Ba4itEjjxFNCNWRNohMeCmrUg2oFM2CMBC1JtyFdTEGq59D1PB4fefvmHeFxz831tYScqHpj+fxkZCimq77vqVkWi/vjSNvJBeuMoet66f4qOvJr6TcDq2HF7fu3NM1T+n44vyC/zpezXtz43nF1ec08J8bxJKoc33CMR4wVqSlO2E8BS8oyEnTekeeZ42kvXakLtK3neNwTc2E19BRgikn3WvDx849EuGGt/twqrLAW34jb/HQ4cn/3nsPpiDOWjz75lL4fBGdeAZexdWI6/ITH+y+pZY9zwsvKVdLPlJ2gBsdK4ww1WJG4xmVAKnibag0ui2S9lEp1VY1eBePkwgNRPpkacLFSy8zbd2/5wQ88pXTM88hqWJ89MNVYumFF6Fru7x85jBMb70nVKbVBxjW1ZlLK+NDQNC37ecYYS6mG0zhRsKS0jKkqOKPGWPV15Iq1gmWJc2Y+PQoJoIGbqwtam3Fupuk6Qm807bGh9R7nDE3Qs8ss0QgVbzsq8q4UI8oib+WadM7i0x23L/6Ex7t/4MmT32K9+yFXuytq6JjnwnHK5Jxou4aUZ2KSqYS1RoywTorU7XbDNJ1ISTrIpOfudz7/FjkX7h4fuLm6hsZSShXpQGjJTWa/PxKCF/y3E/9MaFvqNDFPkdVqUOVXwiwTolpFx6bFg8ZBGIqxuFpY9x3dx88JTmzeiwzWOVksShA9UGUfgdE5pB7CcnjLB5aSHFDOWF3c6tK0iuxPOC/yZ2aQb1LT5yyOaZwJTSA0AVsMqsWTx1pHHqYU5pRobNCDROeyqrMXMF6ibRpa60Q1oYdkVoVU32plmLNWQR+q4JzFzl5VpuusxI8G7SAK5Vy9Rb0cFlMgVVpzoTJKu55qoSmVOdezMqumjGmDtOnOCN66CvCuWhn3vL29xXvH2q9ovLB24jzDeiVVu/6Cl4hMqsy6Wy8vRi3aYlbJwwDIXhZjycoobjpJHq+xViV7YsiRWFRZAEoWsub1GiThy1pyFtOasGnUzDRnatvo2AEhlJoPL69GEZDmrKM+L1kNehjKsyKX9TxH9QxA21lsE6hU1puVyKT12ZqjVOuhCcS50LYC1hvHo+YkfIgK/XW/jBUMTNcNzPPMNMUzF8t7ia70riGWWUJ2jGFOQgCVyFVRMXXdIBGnTnZdLjSsVhWvY5jXr15wcXVJaDt6258VcfM4yuiv6/WzEQmlNY7t5QW+7QhWDjXvnX5OhcYm3rz+Mfd3PyHnRyBTM6J+0XcVA6ZkYpHK2BjJ/DBmyVyQ0emCqMBZUaJlR0kSXeqNSEfVQgLGCLSwgmss7+/ectg/ME3CVku1QJbxECxdrGMYOu4fH5jncC5GhmElXhTvcYD3hrAeiONInOVSff/+Vrptb3FVCkq5rSvGy7QjIf/sPE4QoelaLnc915cXXFz2eDtibaYLnm4TaIIEaQUfMKbQegsm45tA4xwO6W6rHqhZkRfG2DM9OBhH4w25PPLm9V9xeHjL9c33GdafEcyazdAzzYLfSFHk7s4Z5nnZ8Yr4wVnLPEVyrWzWEoiVS6YLUkx9+bOf0frA5cUlp/nE2/2e7UrOBe/kApDO1pwvGmslc2SeZxrdgbTILsLUyvF4kn2qE1O1r8ZIFWDBhkCj8rM0J8lZxVG92OyNc6SYmcaZJjTYoBI85EVIZGKt1Jhou8BCPnVWE8swHI4zxhtZXKsaY6n6UZJlTIlq5KUOiG671iqHg+ZWLxfPdrX5AKczcDodsEYw1bmK5rdrVPGjB53xgTrOPBwOxDhztbvAd6IuKlW8HinLzG75UHNKmlxnCQoa2x8kktA7h3GStwEoU78ihE0ZK6364XyYDV1LrY12MpJCZxQ1kdWtXQRCj7FCw2ydJVeYonRCT66vyTkzpxEwqjyq55HfIgcGWWZ673GlnruOWsWteTqd2Pj1GYDnnDvnXtQkMDYJlakU0UQKJhxVQJlGfFbL8p/K/njk9t07bp5cs95dyoJt4evp3HOeJ3COm5trLi8uiEUQ7csFURUnblR3H5wl5kycZzoFM0pgu+R627rskeQzTKeTZKPbTAgdu8sV1nqm6SRxmL+myskYQ+Mb6VIqWDvycP9Ammeur67o+xVxOhFHMUy5xil+fslujhJU1cl/sxrD4TiJF8W3zDnS9B1Pnn98/l1lNdFxXrQb5nHm+HggNA3d0BO6lnbo2G53ElQVHFMUdPqcHrl9/ffcv/spmEmc01lx3kaBcgt+xlj1hoOWeVgkgyJrF2109ydAP6/5HwZnOMcDmGJw1ZCQZ7+EQtNavvrqp7x4/QLvez66eSrgxHlm6Cs2tNKBF5nLP7kSU+XpdFBAoRQcjZPl+ThKVv14GrHec3f3jtt3L3BIVgRGdgTGVkSrK+OyaY6c4gljK+2m42IzUG1ku4N1O4tQwltCMLQBjDUqfZ3pvKda8Rf13st75C2Nk+LZKUW5FI1iLUHy4quhFIc3DusqOf6SN6/esTm94eL6n+H9BRAw6P40zljb0DYNNnhRugF4Txc80/Go61rDbrvVFYDls08+JtXKFGesc8Rp5M008uzmCd57Hh8fGYZBvFYpcX9/z263wxnHy7dvqaUwDAOXTnIjSs74BSKpRbyfTieO0wzOsN6sZL6dZEndeLFY1VrO45fGebIXsJtDYkFzEdt4cJZaJoIX41MpCVMdsQj1cNW1MlsfJbRjtV7zZBjEhyCDZuFBhcBF2zLHqIeB+DI635O8IbgPwKusfP1YxHySCnSNbPu994RGDhTfNGeFUcnCnJ+nibdv37IZVvL9Vnkpqr4YwvyfWHWDjtKS5A07UV416psQjX/hNM2YWglOZrrjrLuXkimlFQWU+cBf8t7jFcCVkFGN8cqsrOrFwLDVZLYpjvAN8UDVM7dpNI51uYxLwTpLjFl4LKqkqaae5YXoUss5x2maWQ+DaNVrkhQ6HfHYLHJnrFHpoSfPiXmccaHgW3eWH5ZScMiBjjHEKDRX3zS4JFhxkLCU2/dvSKXwyccfAbKPEJyLfHY5qefF67jOQAjSfU2jQM2Wy8bYFlCzZ/DY6rAhSIFShTJaUqc6/JlQG37NO4KF4STvq9B2Ly8vOZ1O4heyBtO0+JSpVNI80/e9FDo6nk0xQZW9XrUGwUmb88jPOi9jtCou9zwlnBOlmEiRhfC67KKMSjULhporycPj/Z7OGVI98vrV33N6+CnGHhTAt3QOFedUvZORixWpvuXSdFjdrxQcrkqCm6mLdFqKPmMqNlSyLbiMsJBMIZdKMChFudAPjrv7lxQz8+3PfsBpTuzv96xXA9Y5cpwxXoCWVnd61cBqtZFnoRQsMoufoviLqm+4efqU4D1/9hf/ltN4T9M5DdFS34ApYAq1OCoZUxOboYeVo2lbuibQOEPXVlwjcnfvRd1kqyZJesPQNLhGitRVI3tT5zx9I05w5xzOOrwLlJr0nCzE3AgOJyZKhmDF5Vztkf39XxHjgY8++Rc07bVczsWQSyRH2Q2F4IhGcTS1sn888P7hnqc3lqHvz3uDnBIXux1zSqJ2DJ7VsDp7IZqm4WK308tFDvyFDxW84/mTJ4xxpg0i+797fJQd3nolZUMVNab33tOmpAA7SONMTJnVZiWzZaMLDxbssBh+zsY3fYGX3N6m70QbPp2IMdLoQmY99Fjvub685Hg48u74hmkcud/v2a3XcjDkLEog82F5EudJNMBoGl0p58OmJuG4OOcpU6btAoPtWXygjfNUVdq4ZYRh5BDuupbWCxitaUR5YpxkGMjy2nI8jjIH1zmwD0KGHe8f6fqOduhVcTJKzGXXapqbKAWGtieOM4+HE9Y2bNdeZvfLclrNVsZabK0K/tP9TIxyOFpRKInCyDGlRIozvc73KQsiQQ7pcRyFTmpgaL/hMnayZJbMYZiS5G9vNhudzguyYToKKMw3nhwzY5KHtelaxvFEF1qp2DQZbUoTjRNjU85VQqKGFZ8+97x885a7u/c8f/4RbddRYsQhMLLteo2xIjioRnKcZaRlWFhOi7Pd+4acI6UWmhBY4h6tEVf73fs7MIahX9FbS7GJaoXzJEvyyjieZK7tLyiNCiT+sS9dtzRNQ9d1zCnKfojCdj0wjhNxnqlZcqJzSZSiyH0rQEaH4TBJUVVEE6yftVS5XegQRA2MMXF/ewcG2n4Q1HoB4wxd2xGcl4x15yWeskSsl8u89QHrJu5f/h3z45c4M0vvXuPZAyCTP08hkas8X6YsyiRLxeBMI5kpNVGKlVYBA4oXt8VQjSXVGRBEB1k0gKEUYKYkOWxlwX3kj/7o/81HH32Myx0X6zW+9WcZfa3L7zmfvQcVkWFXI6gKH1qCsZRgSMapuezAn/4v/w6DIju0vPN4ZMsJxqHMN08j4w7aJtP6Sjs42jbQdB5nKt4J38l6g/UQvIXg1MslBrxSINhyHvs5L6iWWq2OdAqGwmACuUjIV0pV2GdGJbVtosy/5Osvb7m8/i/YXv4edrXCx0rOEzYF5mkm1pHgGhlh5cTpeCTGC2qPnmnyDFcd1QK8efOGUgrPP/lEYk8PB1brtcb9QgAuLy9lRGwtXd/TDytSjKQc6ZtGCmTrcNYzxZG721u8C4FglHVYpVL0VEqUoA9rBSFrjGAVKCKbqhbSJBfKsB5ETlkFrW2KVP7B2g/bfyNoD73U2F1dsx0GCuhoR9VTpYjGWQofYsq0rT3LWFNKzCmx6XuKEdBgTgmcsPXrPAvSQ2mzWV3docrIqFqLDQGf4JAFKOZ8gFrOWdGAkG37FkPH/jjy9s1rrm5u1K1dhVm0GJpKYZwnGg0yolZBHDiHb1vW1tIPElGYa6Zve1JSB60NHE5HTuPIsF7RhSAVlI4mlhyKcZ4Zuk72CFVUJLmKqTDpy56zeEYa4DRNpBx5fDiyWa1omkDMgmzw+jM2vYxAjsejXObOE4JjOs3UbFkovk0IeCuo8qnO9KuBdQjc3d1z/3DPzfU1q9WKYiqHxwd26y2h63DWcpgmjsejyDx1SZ9TkkxeZ8/jylI5L93jLHutJXN3GAagMJ9GjM5b52nGWEPfdcQSaXxH27UYZym5UMZEVXmgDUGVc1kUa7/OBQFnT5CMnCSf2FpZkJtaiXEvIgDfYMlELKGU88jQGElCfHh8IITAercl58JqGGQvh2RZlyIjC28N3aonuCDdwymRS8QbLzuwkmRZPE4UhdsFbeOm9I6XL/4z+/svMOaEMQlDpSzvlhZeNi93n470StYwnyLGWYUo+eyoNpFkHy/+CGMpiwiwCKW2VtmROFOxJpCqIdhMTuAr9KuGr1/9lB/96A/5Zz/4V1ztrkXabA1WuUpL1+4VTbKMoCyG+/0jQxfZrNdUPAGHt5Uf/dV/5OXXPyW0gvE42+xqxRl3HsEZwAWjf1/4T4uAsukCfadGOld0n1g0YtQJyBGR/lIL3gaaRp7b4Ay+adWwl4UaK/+07gYLNlQam0nWMiaoVXwmwRtyPvH6zV8wz0eub/4Fq/aS0+Q45ANpilDANAkTLG3b8uzpE/VCnHRfBU1jmKdZxqdUrq+uZDycEn//d3/Li69f8Du//dt8/MnHZzOhO5sXRRJc4yzIj1Lp+07OCGvOarqmafDj4YDRzf0Zx9C2nPYnrDO0vaht9OfGOJGxLhb4BSHtgscXwWCD5NHGMp5HK0LBFHXEOJ4YxwmQRLeUPmQYG2M4xQmfZD/h3ZLtjKKLJZmpWAtqxns8nZinEYdnTJGLzUY6HK04F7ctRtDEJSXQWafVsYqMBaJw3ZElz7pbkSiM7295/UZctc+ur5nPGcIyOnIaAlPLh0DzWCqNN6xWA3MWOmye9ZI0Ukl6J59tBcH+hoCx/szvMVXCULCWUA11CVbS0Vg1BusUA00gKS/fekfrOqaTAPdscDwcjjS63KzGYIuhpExB2mvn/JnDH0JDHxr240ScTmAqQ9iw3q5ISbTcx+OR169e0PaD7jMMaY6iAvGizGiGFj8HnLfn56JqlnI1ApBDd1XByghtGk8cT9qZDRs2q5VozSnYoNwpvVCaxoOzXF9e432Qtr+IFFb2DjKmwRiCD7Q60okx0ra/XidhdMu75IxbY2nbhjjNokBC/573IohUM1bOMn7COcmBsFUySVBFTpEoV3GVS473nCTXZJpGYpwwRn4XgOx1DHgjCI9qPaEKFOPN+6+4ff23lPwKx4g10m2nHEGRMyJlFtrpmdBcs2QF4FTw4VX0JYiZmr0SETRetRisxJ/JqEspqVlHmNaAKUrsNYVTNfgKjTvx0y//kt/73f9KDZaOxhnSNFO1UPBGMjlqtdqxFKwz7DaSzJdLJmbDMHTUfOBv/+aPgRMuFEFyqKS44PDGarJBlYW2KuFdgNY7QmtprCFYQW1YLwIMiaw2NF5gn9Kpyu/HB09opYuRblwuFCoi7bVW6cBifgxOxnzFWHKWAK1SCzULaNFbi3WJx7v/RJxO3Dz7V8S6YRwzzhnyOIkZs4jC8eJixxc/+wL76Hn+7AnDsKJWwzw9ijS2a2XHC5jg2W52dN8buLq60hhgUXxaY/ChpcmFlBIPj49UY875Pe9evaKbI6tBIpE311fYqNkNx2mmtR6HlYSjWnh8kG8gTTN1TtRUOD4eZeZdjbyIzlBiVv3yB3dtqoVZl8xgKEXa65wrh9NEqvIiLRTRRatLleAd5xwmBLqhF9eq/jMuBFGNZPmzU0r0ytI/jidWbacSW1HG2CUTAKil8Hg6Mc6T+i00Fzpn2cG0Ld5Y3r6/5XA4UijEaWI9rPjWZ5+zWa/OwR6w0GqLzIfVDWl1zt+3rQLYhFxbUqJvW/phkBclK7pYu4ZGaaFxFoeksSIBLVbyNKaaVKoomOnbN285PO45nibevbtjCSwyzp6d7P2q5+mzp3jfsFmvxKyILMVtEJ6/tbLALHpRtj7IXLNtuVivxINQ4Hg8sCCHHZaUhPoKAmlLJfHzn/2cmCvBCeZaFDdSBbe6YB6Pe/ld10LNUVRmum8Y48xxmqgVfNOeu0pnBeexqDWWbPTTaVZTk/0g2bVO5/6ZBZWSoihGLJb94XCOYP1Hw4jMh//tgnz/S8a2846ubWkUNigWFtHpL18xZk6Hg/zem4bD6Qg5M7Tio6gpc//+jtN+D0AXGozzCmAUA2HMWQ84QWMkBS0Gayhl5vHuK25f/wNpfIVnwlrJSMlZqlqDgaohTsj6qOhoOCCBT0bHhxYJ7RJPkz2/I9J5iYPbOKRosQYnGw8aJ2MYawveL/+e8qpcwTeG169+ys9/8de0avQ0RkY2i6tbxlsybZ/GI3NOqprr2e62xBi5e/eemma+/OKv+frlj/FNwlEIFhoDjXN0HsHwW4cJ4L09QxUbVzXAyxBaRz+0hEaS6DAF4+SCQFWCNngalUA3TdBCRtR+zqBpfxCcIThJraQIuw01njpjaawUo+Jcj+LCrhZbM513TNNPefX6P9CYB1atgwTVWbyTBbZwpyo3lzc8u7lhtdrq783StKJ4IgtWfp4mnLF89tknfP7pJ4QmMJ6O8t5r8Zlz4c37W+YY6duWVd/LDnGeMFRRQuZEUVGTbfqOxnv6rj1LPYXHk2g7eVGDSqPQwJpxnNGUEXIsMt/O4okwVSqDaRolbs/yQX5lLev1wMcfPWOzXrPqO8VhiC4663JccLZLwIn4GbCG0+nIeDqdVQUhiKwSXSKth4HgPeeYRqymeaFqGehCoPMCGptSYtIUJmHMCwdlvd6w22xErhkCm82W5x8/p+tkTPSBgS9jH2H0WNpeMOuy4qsi9ZNCjEZ/qa5a8jhyPBy4e/9eNP8YcsoE58/7DDDneeR+PLLfHzjOI/uTHMztIDPxi/WKj589xTmDr1KdHR5FtSWIBiPKCJXIOpX4lpgEqVAq8xjPwL7TKPCvEqPKP+Uw7Pte5LrGkGrm3e1bhq7ls299qpUKPPvoGdvtmlIq+/2e9+/ec7Hd0g0dc854Y2hWgtUYx4nDaWQ/noRjn+VQD8Gz3e5Eqqt+D8FrWJZbUv45mSl7BeIdH/fknAmNRF9aYE4SJTpNHzramnXMAr+2wgmEPZarpN4FjRkVVzqqplF9kHaXInSYRbEFzHPCGc84i9u1X0YtWYB8pWTmPOO9o+la3cPICGTOkZqr8oOMqgkrlYlX737CNP0C3EiskYx8jrVK9yUrNv1ruYoKJ+czTkMuYAHzLfHEkl6XsbXgdV4frFwewTmpnJ3Hug88quX/YzKQsN7StBbrDd56ugb++E//J8b5UTAbVTI7TCnEOONUpFEKPO4PzOPI6XTg8fGe6TByOkaa0FDKzJ/86N9R66i+ETF9yjxJ3hnnKsaJZNo7j7MQXMUEi3dOZP2t5LU4b/FGfu7z8h6L9Q6vSFbn7Pm5y0WKRGNFvWhLJc0TKc5S9RvOAhljqvCfLBgjplRzBpIted6Vxjrq/DVvXv0hef6azabj4uoG6wyv374hxoy3jvVuQ7fqUR0JxgjBoWlbjBF8yZIZHnxD2w3sH/Y8Hg7ie9Cpxd37W+5vbwm6a0wx8vLFC756+YpplHOfiuyHa8XnWcw08jSJLR6dT3kjUDexxYvpzvkkXHVjOcWJGIWJsjhic83MaVZNvSamqeY7xcRRK6vr7QbXBInzWypUdVhmlszcqrJD2W2c5ol5nDEGMUUZXfoqvncYVlpBylLMeckcrtZyHGceHu55cn2NC55SC/M0yz7CyiE1zpGha8/spwWeNX+jczDL7AuYxoh1hvenB0LTsttssc7KknSKDMNC6iy40FJS5v3jI4f9AWMN680G7zzzJOym0zQKb6mIx8I5jeNsOi53Ow7jxIuXX/HZJ5/RdB0Xq7V8/0aUXHOV1LgUJ3IOmMYyzkdevn3Nk+sbhmHFnOWFlMO+UFOm7VtlMAnyuyDVt7OWxnlO8wTzsjOQqNSL3QWrzYDFnZVvGw0pylW8KduLCyxwOhxofUu/WuEV0+G9PXeEqSbSKdP4BrdgVVSSczweWPUrhd5FrHdSAQ0r2taKhNpaHWcqzgMJ7ZFdgCc0HfMc2e8fuby+BiR+1TunYfL/+GURQmDoeo4j1DpK4I91xPkoB4y15CjhW8YYptPIfn9kM/TY4Al9S0mCSElzwntZqm53O3mW5glRLRcZ/QXPmzfv2G0G2kYYUqY4Gek1DbUeefPq7zjsf0HTjBijF4AWQ+d8h7Iwy8r5MzUIWFDuHCsRrXBe1GOko6hGxkkeSADBYbIMrbwzUIP+41IwFopyliw1Q7aVzhtKseTgub39ii++/E/83u/8bzllidSlylzcGCmsrPNsLy9JcWbWC7WpSmVwji+//Cu++OIv8EFwIcVUmfp4yE6J0gYMGWc8wcqo3ARz3ikZUwhWPBC16H7OOxm3lSKLbhdwFdzyodSskm8R7NQKlEy2RWRiCtCU6CKDqXoJVNkLlSh8BHLFkERRTBVeqZFdXM5veHyf5D1xz6lNw+XugmGQGGiTRQyQYyGmKCq/IBGoD/sHHu734Czf+da3RFQyjqSSWa9WklsyRYz3bC4u2O523L59S9cLOfbxcMQY+OjZUzbrrYowFuqu90sPKUu0KOOntulogmzX5WCUm94ZuY3ByIvfLfp/ySd4/+5Wuw9/vtlr4ewtGOPM4zRig5JTdUdwdmKfX0uLrZrpqovzFBNv377m9dvbs/qpqPu271fEFMl6uPpGeCmLIa4Lnt1mQwiB4+FErZXddsuq78RN2jX0fXvOkV1u3cW7UGtVBVbiYf+hajXGcDpNPNw/MMWJApxOIzGncxA9RivR/CGDYDUM9I3QZ/u+p5C5v7/jNIlqZH86EkumaTpWqxXWB1xwrPqNSNaU0nn3cM9Pf/IT3t7eYvX3M6xW9H1LrvLzb9drXHDc39/z+tVbgZ05wYY772mcPy9Al8u+V2EAaqiJJZGqPBu1VrbbDX0jC+Wk6WQWyDnKARwCTROYovCaDqeDtMVFWvBhWLNZrwnDIO553eeI1nyBLhqOp5HTPDNNE3fv3wPo4jdJzO04k1IkNIKHefvqFe/fvQUkw7zrBkoukvONYTwemeckAUu/fiMh8tempQ1C25xS0rGEyBtl5PQBsX73cMft+3cCFyxZUuKUaXQ6HZjnyKjZ8bXKMrwLgVF/Hmslcte5ICOmc6cViNMDX/7sR7x7+58I9gg1UmvUBasUJbJwl0O+5kotwtVapMWCm5GRkLciLDFWTbFy5slfR7INJHjMgStnoJ+3XnZoxlGNkwG/qWJEswZbC7ZmGpNxGBqf+Ku/+I/s9/eyZ8iZcTxyOI2ULHnruWZ847GhMqwGtrsdITQ0IfDw8IL/+X/+f2FMOit7MGL4xBmct+A8wTq8la7HhoDxTg3CBusMTfCar+21whaUkDMG471cVqIb1bNArsBlpzOnwjRHIfPWKjna3+hKq+ZTGCPInJIKXrtGoTcgiika2V/VQimCJi/5BV/97N/z/t1PwMBueyGij5yxVXDq+4cH0jQRrCEEhw+GN69f89d//VdM40nG1EVoEL2SXUtSLHstxGmEWjmo8Giz3rDb7NhuNlxcXOK9dJcpRVV6GkPbeLI35xjSEALzOHE6TlxfXqq5RBeP3mOyIeaEtZ6maWVpkgvvXr/BmMrFxmtLqSHizpIB6xy7i504nnWZJMoWdekao8lsa1rviHlmHCXTdzX0fPT0GX3osI0jeMmIrcbgmkDNhbfvbrHWcnN1JTGcCBisoPr+aZYXTTX2y6K85EiehZEkfBQ5HFkQG1UqMzFDWULTystkJIzk+UfPdLkpuJBh6IX/pJpycSknQttwudtyHEeGrgULJUfmGJnTzOV2J1jqCrvdluAC8zyxoJWD8zx99gSs5fF4UAKq4f3dHW/e3/LR8495+uwJQz8IHsSJgCAEAZPFeWI19HRth8ewzxN1LoIf6Fd4ldA2XUPjWmKSpa/cFe6D9NRa5iTfc9+vxHBVKliHr5ZZq9f9wz3ONex2VxjtJk9RfwehoWa51Gzb6WWcaF1DiQuXCpF4lsJmsxEHvu6rHh4eWa9XghEpBe8sKRdJ2soCIDRGDnFjIcbIaRoZVgNTnGly+CeNm0BhZ1G8LZ3GbK7XK6Z5EhgfVivbwvX1Uy53F5RSGU8z9w/v2e52tG1D0/U450k60061YK2jaRyd6uuxlRAstRriZKlVlt5TfGA+fc10/Io2JMmil5NJD6yqnR1ahFU52nSZLleDqBlVekJVCbWIQz4ooRZvhEUPZAQPjpXOYvEUZY11E+aY/FnGykI3IobWpgJ0vHz5M37y5d/wwx/+HjFB3/e0TSaWRGPNOfHOYs5jTHIh5RN/8qf/I+9uf05rGyIVazOlyE5Fpn1W9iJGCtTgJbTMW8lcd14uMBsqwVYab0lFphiLknCRVVv9EAR9Loe4NYLtpmRKtmRTcVb8IrbInqIiBFxTzbmTEGMwZ8uiCBUqzhrBHumAGlMJFmJ+w8Ptn7MedrTNjRzsdcbTAFJcOxdkZ+I8+MDNxSWX/+oP+PTTb519EX03SHGndAkDZ2qCMfDx06dnjtOTpzdQBYw4x8w0nZjntAhCggZsiNwNLw5jkXRxXoLWIiOI0AROGrrtvVjXc5xx1vL05gmmsVqTVuSKr0wpSYD8MOiMzgoDyFhZYhXFT+t4o6ZENd80MDUSoD6NvHn/jovdjrKp52hVi7i9d5udqHeskUhFhbx5J5b5eZ7YGyPdwzKj0wsq1oS1gZik+h66XqS5Vh42r/kGuVRaU4kqPQOdP2q0qzGGMk2MpeK8oWkC/z/K/qtZlu7M88N+y6Qps92xrwEawNtodM8MzUgUNSGGvoBC+ni60YUidCWGbhTBUFBBiaKoGVE0M+Rw2gzQaJhGA689fpuqyszldPF/svbBMMSGdkc3Gufss3dVVuZa6/nb46TpYZkmYuzYjiO0iidKedNFcyDrZF+aNSTb4te8RQ57a8kCbp7cSGnhHD/6yY/5+quvWKaJ48MD/ZMnqtkskhPOs6oxh2dPaQbrQWOZNAE173j5/DnjMOKC4/hwoHZZuVxA36kdq9lUh1VKLtMCm4b38qP41mixU9ZPa4zjjnk+cZwObPrxcRVqUQ+1N77JNPhdHMRX2Ka93e7Yby7Ybrd477jodtrsUuL9+3dM04knT27oItweHgjBc7Xfs7u8YJknam30Fx1pmfnZz35Ozpl/8k/+53z++eecjhP73d42kj/8y+MIXVQHRBU/lJJerzcRx/sPH8i1cLHbs5wmhqHj+vqGlBO1Frv/KiEM5FzMtdzItbIdtuSQOM4T0/GEknvlx+lD4d2brzg9/ALvjlAXW8SQctDuxWb68ZoU8tiqiGxXdTJbVWzODJI27OpLQc44ZzJ0586bge4/bxAwEhXUx83I2cax9rpUDzRPYaHzjuwKJd3z3Zc/5x/+5N/mkGYtYEArhfHJE1bzYYyDxacrtfUv/vK/5hd/89/SD0W1wm01739k9qXgDBXxztONg7KMvEeSYC34uEIIowQj1nuykszV5OTOYmAwuqk5qeukhlUeVAsqtnJVXoXmsZ9R1SJnajtlnxb7t+umro3b2SYi019HoGPnYC5veX/7L3j5/N/D1SeQHS1o89/sdAj0pXDKBRc9Lz79hOCUui0PTWWelJj8uEapTjp4pXv3Y29d8IXgHD5Eg/kWFjMO55Tw5AQli7SqxqTXSt8PPLm5puTCq9evuL3/wLHMHOYZ7x1DP0huVZVjXmvWDpWtL9Z55tPE+9tb7m7vON0/UJPG6OilQfa0xw5Zu7HG7faMI8oTkKle2fYPpxPLdGJ/sdMu33RrF+u2uLjYPeKr8PvyV+Dy6oKhi7rDg6P4RovqM4ircSsEOh9wwQnjx4H3Z0il5MS3X33Hq9dvBR3FyMNp5uF4OquznMMK7D3zlM5hcOI2pArbbHZyq+dMTlIvnOaJXC1Jt60PvzeRQDuby3LRw7AG+0Yf+P73fsAXX3xhhTeqn61YVEVDapLQkaaJaTkBjuuLSy6vLtkOI6HrqK7x+tUb/uLP/4rb+3sFN+qOxjU4HI6SCWf1CXRDpxRU16xnwTGniePDUVjyOOgkYidCB8J6q4rlvfPUJo9E33eE4JQLtcwcjgfmrBtYEJfC1IKDro88f/ZcNa2zEezes93siJ1au1JOeC+pYSuV5XTk6eWlHUp0orSn9Q/eIJpNj0PX69o4eXxyzjgj0JWZo6jm1R/UDz2b/ZYQOhbrGk+LpLIhSFcvtLWIJ/Kw2YzUWvjt777iNB2o7cjD/W+o01eEdqLzae3gsa5jwRmtymDalizYo+mzc/weIiIJJl7SYnc2zetAiwkvwOSeeqYe/1yGQu8qzhecUx4UFBHgTsR29I7ozaAWEjE4hiHz1dd/w/F4L9w8JU6HB2VwIUisC4Py20pirid+89Vf89/8N/83PBlXNPk46UtNdqrT+ApW55oIodF5J+WSTUDijixHbAjq9fD+nPVWLXNKir9iMt/HEMyaZKasTRBrS/q9jkpOM3VR6vNaopSK7ltvn2/OmVqKCtfcmmVn918uZtTULTmGxnL7N7z65p+TylsUNy6+rzXbjJzn4XhiOkw457i/v5UxrlZOxwP393cWs6PDpP5TGwUlncVEq1+ltDXA1en+MxVhbNaq1uCjXBZ9CLU1ur7j5vqScehZSmVJ5dxz7H2jtkKMTuFtJYEXPNBK5ZQT3gcud4PSJxf1DlAr9w/3jL2gldB19vsq1YqFXKumRGuUvNC6wNXFnos/+ROGYWS2aHJnHMKKxa44ceh0Qmi5agoAwjBCM2IcT8kza6GI9x7foI+R0HU68W+hVsWCqB0rkOeF48MdtTVuY+Dm8poherJlItVSmHNlcJVxGM6yMx+ieJsp0XXQBun9D0uyTdIz9j2eZmOpbqJSLdvJQR9kPtTRTtdLp3EVJs3zTG4NlnKO7YZGP45UCuTKvFgQoUWnXF9dk2omRnFKt7e3bHcbxQ+Xwt39PQHY73bWwCf1znGeCZbZ5Gqlecfh9EBJlaGPhC5QUqIfheGLEAzqRzZSrYt7puWkRX0cSWlhM2zwPrDf79mNG51ozWNzrgL1ke1upw4K+7w3Nm2suTuhi/gQOM0zm82Gf/zv/c9EAm4G7h7uTMHRfn/l/AO+hqGXgc4i6/u1A3xdRGPk5skTekswHsZRC3DNDENHK8K455Jo88R2v4dWyTVpMcrJPCWNftzy/NlzYOZ0eMPx7m/x7ZYuihR1zpvUVcqbZtyIy80UTYAV08jV3Utl06DZIaThbNH5eCK2/cJpIm/NqU+hGpTlBa2spsFg/TB1nTDQs+uo4JV8UMtM7wdaV3h3+zXv37/i+ac/Zr/JnJwKpLBmPkXAVPoAp+mef/b//r+Q8yu830D2NBZaUwpso6iS13a50gqeQjf2+KD1yVlmlWJoBCF1yK9w1ou5x2nKE+R0p+GjiGpXBRe5ZvdhLSTnqckTqln5fKO1fA5QbF6+ilVZV2qlLdpEcqecstIaLTUanlYz2aDt1gobNzAdv+Zw/3dcXz8nMxCcJ81Ktg4NLnZ7qtPmmiuUeSZ4z2azZdxshFgET+x7HUiXBR+Hc+J2wym7z+D1GqMdyEyoEiOxeY1T3715RYyRi8srhYoFjzf3qO86WuhI6SiOe3V5FcjLQnDaJNaTK03l6ftx1AkhhPMNmPMCOMZeeHqtRRlQXgFux3k2V2PAO9jv9+Sm78GjrKNWKfOMD4EpLQScGsuqTki5KoYgukjpzOV7zgVSrHTLhfm4MHSVuNlAg1/+6peclpl/8Kd/qsROIgrt1kIcaBznWSOb1W3VajElaX1woe8VkufNLBeco+RKIFhdZGJ6mLnY7dkFMzKFaMUqmqiaZcHc3r3ncNCp9PNPBzovGKCURPMOCmx3O5ZlJpVM30eWJWmzC4Fi8eU5a4HbbgX7lJIMyy3knMhLYhx7/vSPv8B3+sxKy8Im54XtdmRvMSTOgY+wzAuduZwP9/d8+913UOGTTz9l4wI+muBgmWWaqo+9HJrkm9znXTnHl8cQJVsMEe8xElfw2P3dPR/u7wgE9psNFyZTbjRiUGTK6uReF8oPt2/xL16yGbacTkdTSmpiyfOC7/moiOh/fMNY40JAME6thdB3bPc7+WoatJJwVRDlaqjLRSmeIQSaeWeC78gdIt9L4nSaFALpoVE5HSamknjy7JLl/hX3H35DaB9wQcbU0HQYc82UgEVKPLdC3A1c9Ra/4cUrtFWu6sGtJUFSVIlW8OeTrTjhJs9Jq8SqOYLWCNVTQ7XsqSKPiFd0hRW5a7NygoC8h+h7Sqf+krYsfPfd3/L97/8ZuRvoWiO3zKnA6aDK2W7YcLUb+PnP/gXvvvor6AK1KERPLvWsza7Y5L2S6rUSTK5LrYQmTq15bT4BZx0aDShnyHntjdGCfrZ+imuKmrKdWGYd/FzE10atnfjGEChBbmZx+BLeNNuM1nSGOatOITLj+hFMVGG7E5VK5wLOdbZ2TBzf/Y6by39A8UG+kpqt28fRhYElnch4+sHUpF7hi60+8k7rhFBKIc0z0zQzjj05CW3pgzxo8zSRc2UcTIpfK7E1VdotS6akyu4is0wz+0EGi4gML+phiCrR6aTJbw66vjfiR73OtVrhuxE/zjtqyThvuuPWOC0zwziYHM2fcb7YHBsL1dIpz9iN1lCLA8QqnbPCzgp938tjYael4Bx5WSgOwjBQloWvb2+ZThN4ePL0CfvtlpQTm01PH1TxF4Ln5fMXLDkzbrbKISqKT/ZO00Yuhe3Q8/n3JDEbuk7qhaZ8I6okgNvQCUpxVafs2OFc4eJSTvBcEqd5Zp5OMh7FjtNRyZelSuM+DMqrOZ0Sb1+/ogIvXz6n9yogag02vaI+umiYqNO1EtSlALlujCzLco7sxhRX83GCDXg0wRTr03Dbrb6vKsLi+skNsVZiCJymE33f6yQWAn4z6oCAZ7ff8cPh+2RTJaWSCAaJ+a6n894cwOKZpmU+E7ieQj9smdPMw+mB7bADl8lZo34/qLR1GAaexWekeTpDiGusR6Naubx1h9j7HTe78+Jf7dBzeXkJwHGZYZlVsBUCZwLxf+RrlXP3fc+79x8EKwVPjJ7jMZOmmX6Q4bM6nRJ97AmtSlmVMv3QEaOjJc9xViTKfjPqVO8cx/sjr779luvnV5Tynjfvf4krD1QPsQm/butKbgpBnIk0ilY78ZGN2jyuLjjXn6GT9VZY0epaq8ld5SDWQuzwLuPNebx+d6uV4hS8J8GjKlvxOknXVR5lr8dVbRY0LdzBaeP55svfcPy3DjQX6fueJc189+23XO4vaK4jhJ5v337JP//v/hm+6818qumo1Aal2ucu70GhyKJBJXYday+2W/PRUE2oBAG9DhLVSop0MYUsG5GtsEm99pKTTt7e4GQaNep6hCp/VzAyaKmZXdwo6cBiS7xTW+BSiw5fpTItR3ZowgjR0VqwDupKKSKXa4PoA1N5zZtX/4rd1b+F6y4Jw0DOC2k6yjMRA8mSEELQVFeaOOTNfs9Pf/ZTGvCP/uzP7FDg2O+3lFLposd1SitItdi1Ezzlu45WCpHgiAS+9+mn0t6al2E37ojrrWFW9Ijjdl7YhkDt9WaxXyoYItDIOtEbCcQ6AiN4JKyYv3PQFHIXvflBna6od4FlmugGJa7GKt6hmupIY3BlmRPdKI5jrSKtTkRWNELqeDpw+/4tmDdgPp642O7ZbJR6WluloxG7nqdPu/ODM+cZamPoB5yvZ7xut7vAeU8XA0vKkl/6DpkeM69uP5DmxJOrK7a7HZVGmk50fU8MjpSFQ+63G5XNg+Xr6OZ8f3fHsiQ+efEc30e+/9mn7C92ysK3RarzKpRPWV3Yxciw4Dwu6gRTasEwAVusC3mWIz4EwTmlVHwUjLZWt7pWHxdZr8lMYY7wcDri54knT54pl6tqYS5FksSuH+nd2hCmbpFpWdj0I9kDQW5t5zyXl1d4M4qdTkeeXPdsw8ARPVg5WQ6Y3T8pzcJKtwO9iStA0EEI0XgcSGmh5UTc7ZXt1G/wQd3fXeioJZOXBBupb0rj/HD9/V8rNWsb0DhwPDywiRvoBurYrGayUlJm7Admt0DJRB9Z+h5X8nnBrS3r0GW6/OPpiPeO/WbH3x6PpK/fsVycqMtXxNAgN5rLj4GOrF3r2jBKrZAbc8rkkogEWk3glIYK4VwxG/yKr8iJLm6offRO1QsRXT1f62oQeq2K45Y6bE2Ibfacr5uXPsdQHTRP8Ir+l2N/4d39W9J8xIWRFgVJpmmibrdc7p8RY+Wf/rP/F9PpNd4I7LVlsZaVfFdont4bxsI1+n6rJAOLGHdOCkvxE3ZP+w5QxfAqXliPB+v7qdW6H+0vvJk5fdA1dK1SvTaSXCurCmCxA1dKhaF77Dvpukgrle1my7sP70kln3tlal2E1VVPLQstKDSxFs/QVR7ufk5rHZfP/22y30Hs6EdwMeCrx4+S/KclqQuoyBTXclaDYqu8u71lPh558eIZoRtI6cDhNLO8f8/zZy9wbs3NqqScJZrBEVWDCa4LxOi4f3/SxNkqBWfyM43VS1q4vb/j9v6eT14+pxsGjdReVu85zYoW8KgVzaRoPgS5qYsWrtFC+eq6sRhm52xcrjlzPJ1I9/c8ub62roR6rumkQDTMORvm32yCqQ02WxHYqVSeXj3h+vqGkhNLKYz9yFpSL520U0Cbs6A852gpUVJSbo5DxfZF6qvqRZwtx/mcWBmCLJD3Dwd+/rO/xvvA5h/9IwYqhEBNC7/98kueXF9zc30Dvic6T3GFbL6DYRBvcmPmqtIKLI7QB/abHWWz1UYYOwKOTd8xzzNjF6gOajbuY3UWZ4UQrjd9XrKFoVXGXlI6fKGkAr7ag9JoRSax4lSIFEKgtsycEuN2q1iH9eFsjXme1ToY1iTPJue4C+QWrEq12gIlAUC1RbPUTPSe3bgjZz2Y0UdKs4BIi2YuJRuk5FiWSf4ZSwM9n6sNV+1DpOApqdAiqr10hc78DffHE9++fsUff/Fjhs2G/W6DD+tx6O/nJ7z3bLc7Ukrs9nvu7+7IOfP27VtNKF73Yq6JbEU+Dcc0LZqAnKN4R4iBmALNR7a9qZxcI6XK0AVePh24vf2GMt3TBx28WjQewDdDdeoj8VkEPeVSqcsiItmtC7mgpOC0yYnaccbjGd+jd3d+n6UZHFWxyBbFcmhAEQflkHAFW1xqa2uikRJ9GxSnDujgHc4lWpuJvefu9jV3x/dc7F5Qlokudvzoix/RSqPrPV/+7uf87jd/iW+Z1oI4lGL8Q7PXXg0ywhsX2gixETsz5tp7cLbpuSp+s1jXjSLQV5jJ2X3abBTV1LQu/NVVUgx03kPVXaehSWF4lGZTujxRu90W12DJ1jPiFPnhx5FDS2x3e2qT8rDvpYJyBp225khpIkSHa5qEo19YTr/Et5fE8EOWEvAxknKi9508Mh5SOqqwab/He1XO/tlPfiJBQBfZb7fELpKmCe8C20HpzKXIfe6dQWNOfq/NZkNspUIXOT488Obde/a7LTdXV4/yUC9lTAkQh4HvvfyEd7cfZCZbF6VslZHTwsW4VXjdAC47ycWcooOXRdHGLkbrg7Yz1aoicp4aHS7DzdU1uehBCyFQi7PxrZFdJfhIa5l3795yeHggLZk//cmfKDG0VE4lM8+TzE/BE2PPMAQeTgd+9tOf0TnP02fPeHpzozylZo5q5ygWR4EhEL5hngg1q83HA6dZZUWlFt0IRqg+ffqETz//Ps9vbvR3aea3v/str9+8J6ekCsEQyVGBen4tLEqzKGunYIDTMbPf9dA8LkTSMjEMAyH0imluIkKVXeQoTYmpqSgjqnrFIayk3rYbcDHQWqbiqTWpXCgI3/VebWPeq22wZY3GSh/V5jwMggGSwV190NieXSM2dQhgCqDqK4fpgTQvXF1dQlP+0N3tiZoL11eaJKrh0ctplucmdnQelsWIXOMB1mKpmqEVtc7N80wcOih24qvVzFSSb8/Hhf3FRtxXKtQxEjtNHYfjgZwThyPsL+J6Bv6DvhQJMzCnhefPX/Du3Vv1dgRHK5BqOXcjeDzON+4PD5ymiTiO1FnywjInWuyopmzxfccQHMEtXO4L8/EdUKkl4NpMc0ZGNxGyZ71RKbSaJcrIi/2pP0NHq1+kGlkdvHiV0hSP3axRby17chZr0qqiQJzBWhIAI2iqWqO1QSTOzJnaRAyasnu5tELwDe8sFs855tOJu/v3PHnyGTELy6/e0fUdISz81Z//U1o60Qim2MI2NXClQZP4pBnnYMUY9HFgiJt1YTlv7OvEFbteP6tUXIhmQGxyQjeoJLyL5j8Rx4j105MhRQjNERGk1koxaEkRJM45SirUiqaYppiP5pqFWzY8QcKaVGzoKoAMkK1p8W/VUXMgRHFGIQZO8y23t7/m5eYpKQ1UO0CVkInNW0SNqnCHnKguy1wcFdSYUqLre1pz+BjJs/rVnz57oc+xKf3CRUm8v331mt999SXRBU8rhd12y5xEhjaPiEYHZUmkVujpKRTi0HP9VHpmSmPJC/Np4vLiksubazPdCLPWTizCpAHdZrSUTJF4xVJI+17qptIasSK/hoPQ9WysnjJ6JVBmhPPj/ZlYmqfZSM5MybrwWJSGC57gVsNQow+Ry90O7xU50ltXhW6pNQbEsRlGey88xgH7yDIdefvuvVQg11dnviIEz24z8g/+7B+y3Ww4HI9kUyXd3x2IznN/e8f87CX99UhJmXmegcZo1ZRLEadTqcTomOYT83xid3F5zgMqZA6HA6EL9LHjsJzEhXRRN16uNKeUzcc8LE1AvQ+kXCk5iZQ+CU/fjltyTbjg5KC1SIHOezME6posS2I7BFJKTMeZq8srur6no9oU5pGaUKe6cRwYu44VPY5V0s7VxDgvEyH0dKEnIXI+RP3boe/FM9TCUiv3H+65vrxit9vgXOB4f+D1u/e8ePHsHB2T0nKW/AXv8WMPPlpEgvo0+q7js08/5fLySmmrpbDb7VQg///HV6mJZZoInQx++/1eTnPvGGJkKrq3U1KufxgHrnZblkUb8nKacVVZWrkmclpoQREr9x9+y5vvfob3EwL+PYJGipoiqYKQWtUkiJ6pWqodaB7lvWuz3+9VkbcGeJNhWk1xM2anNbyJq5t1TKxk/wq9qFrXDBUG8rgGzUxlgpjFSeRacUgR1HnH4kcyes0f3r2i+9G/y0whpUIXPMO2529/9Rf83Ze/JLRGLs42o2ZreVWkhXtUaNHsmuDpglJzPZZW7daNMUjZZROEthirmtWWCVnQt16fvlcLvcF7tdrmYS57c6vXGuhco2aJWNbJwsXHe0qTjYS1xYkfaWuUh0UJVZvaokeRIxmqT4ZWdIx9x+2H33C5/4xx88ccl2o5TkVoD9okTvNJDaMeqfBMsYTzpLRwOp1k9Iw61JUsGXkpieA6WirU5rjYbImIl1ZUd9dxc3lpGnzwdhN6U924iknuMsHJcbt2Orvthtk2ixg7xmEgxIjrZGT6cHvPdrvh6vKKZQFK0jhznLg/3DOOG26uL+miwVI+aAOaJuh6XNINepwmbi6vqE6GN+cDP/j+9zk+ecKyZHb7HTlJAeFCYLDXp7iHRmiS4/3xF1/Qx96kgZbG2tQbnGoxNZQauYwS5eHhge1mw5JV0CEorbHmEFcac0q8evMO5+DpzVO6oaeVxg/+6Ac8nCZVh9oiVlPm3YcPUCrPnsn4I5VB4NbSd68ur+w0iiRqOHV2DwOx78jzTJ4XiQJ6EXE+BCiZFkxil6Q+m08TcbuV9jlGet9zbEfA+q9rPmPM6sKNpmRqlFTY9hvuy0HqLufZX+5wUTddjJ0KZErFh54lnUQix0DfDcxVhPVxPjF0yiEqVa+j1ExKVQ+3cRFLKSpAsUa0WhtvXr9jO+zYboLh5Y3L60s2u52MTSg1NOBxwZHIBlVVI/EtrsWJw1L1a2UbOafKutXh+/d8KaVAoYepKNYgzwvZ0oXv7x5ItTCOA1999SXvX7/hydOnfP7JJ+RoMEwq9BGal9fj4XBgu9mSple8ffMzPEc7JVfktsYW5WpRQfWc5ip4URvqml7gmk0yJg/X19luxxpm51ZBkltJVovS0UFZsk4eOQYfFBUuh4UmJXWb6yhQKOL6fLVTfxUh3yog9EGzzonb96/UGojEDi4EyBN//uf/NSXNtJJtu9Lz2Wh22HPne8PBWVVYWqEbOlKRl2HsejngwZRWjw17LSkiKJVFajp7DohOIikPgQJVm0sji1tpgeKbnX0r1UWi3r4a/pIqCOa8QBjpY29ubhSg6mEcBpa0KPfJ2jwkIMhnWMzkaRZEFWlVU1/vEne3f8vn+0+Z/EhzgY7A4hPg2e33OGTi7fqOVHUYramy3WyIfc98mkjlse7AeXUAich24g594ObpM65bVSxHjOowqN6Tk7D7oYqEq97hWzybyZzdROtGEaPw5ylPdF3HYGX2zSyRx9PMNAsqca0xdsF08jMxRk6HI7/5zW94+fwFP/mTP7GTT8XFwMBAnheC9WCP3YolipBKlmZ5SonT8cDT9sQav7SwRJtmXBNG2oLaRmLsFATnRFwrw0j6Yx+0CNHABX14vQvsd3uad/St8r3PPmO32zLPC4stDGtXwDD0nI6TJgwCpSWePnvK/jRJNeIay6zcoM9evjQHtCI8ujBQciKZ6bDrOlx8bP1bb6bYqS51s93RDxuO00mfjXe4oIiBlGbwXr4UnE4wtXJaJoZhpOt7dmbbTynLeBbXPoZgTl7HdJw4HA70LwZ245al6PUly5iqtRGj1+TXR/o+4t1AbtnqZC2W2kcpN1oC51QMEwK1FD48fOBuuaWUzMuXn7IZ5FOhVYqZ1T77/DMKlV/+6pc05/njH/6IfuhoDVIr1Fx4enOtCbUJW25NCrWh65mnmY057QXLaFN8bzHKz58//4M2iPWr6zr1Vs9JcSCzWupi9Pzyb3/JzeUNm+fP2PUbhk8/ZYiRN2/estsrFmRZ7vFhg2viUK6uLqint3z53V9CvVWwWhN3tp5k0dKhE34ttKKU0SWVc8ieKIbwyFWARUyYiMSEHevCy+qXaOsmIs6hNvEm+halO7NuQM59JK985ATXoLr1hdZSsHOWLfLYdK4mxuP0jjTP5NBpXsmZL1//ji+//DWORd9vJH1pVSkCFqG3QkHmGdd/903IgR4GaMWUWZyfHR8Et5Vaycuk64M6RlxTtLnK1RwZiCZx1yVrphqrZh7uiE6KtFrEUygos0LKLH6hjz3Ry/fQqqbCED2xRXIFlyveF+ZZNQOlNGoBF7T5tSKoKwS3br8cHn7Hw+lvGTZ/yuHkmN2Mt3qH6ALDbjxvqjUXxe6M/Rl2DF3kdDwx9J1ZFpoiZ7powiCsObCqo8LlSrXxPFm2/3raaGDKFdP8em1uzRlkZBc+JTUpbfaST9ZsJK+D7X7LxXZrrmUzyxRpp72PfPGjH7PdbmnOka3hyaOwrcN8ZLvZnnHk7Thqojgd6PtRuoZa2G4G9psNzmmsw0Eqi5FyXpI9rw8SgzqcU3OevKP2e/vuXK+55IXqnBm64P7hgRgCw2jmqWp1p0mYcEo6/e6fPKNd6wMqVJMQerAohvXhPTwcSa1wvd/LvdsqD8d7DvcP55+f5pka5DQe+0ER6mHVdSt4sRZNPnHojKQVmZyXrCjgTWQpiT4GKpmSMn7jzkUxp5Mil/fDjgIs+WgjsGpYN7sNu3HUqbFhm1dvPcB2+nb+rCxa+7aj0/tOOUlb7iR9FeEstVGZZ7bDSD/K8b3f7BiGjiVlQjAc/+HA9mLPxW7P3d0dv/jF3xBCxw+//0eUvBZSPSYFhCDgW9JLZ7lVlWHs5Yx2Tu2FtdIHdQK3ZsVN3R82Sawtiav6KtfC6XhSwU0IfP7p52y3G6bTzMXFJZ7GNB1JLpNmndhc14msd4Wh61nm93z17V/i0ytCZ6oxy/5q64KO1Gfra2iUR4Wc/b2FOMtDY2ogb4ejc9QGkItTOVHzHy2kfLSJaE5oDlwVaX2GX50zaNZa71b2F6xjwYP3SNapRUgyVG9cpMqw0nSyQ4VJtmn8xV/8c5bpHa5K5uwJ5Nq0QbSVZDm/WqpTt01tjc1mo3bIVumd8QleacPBN1PvOEptpGXhdFAkeWuJECPjMJKLueA7lYJlCi4LPtc2ocw6mj/nYbXWaFFwkssZ13XiZnJhyTNd63CusVbiOefp4kAtsBQLN0XcXy2KCIoYmW5erFIKWNhq5xPv3/+al5sfMvZPcRRSnkgpU/vIfEoMQ0dxuvfD0EuMUgqlZU6WmtGPA2lZNMEXJRR4GjEO5FYUfNoFou+s+9jyTgIwbEZcjHIYOmc3mnb/6m0MjZ5QlMzqu57eO2pS3klbSaVc1L9qIW3aQLJuqibxmnOVTz55yVqtV2rm4XAkBgvoCsoJCjFySguvXr3i9avX/PhHP2K731NKYdNvGcZe8sa8WHe0Nj0/DAoL80Gub+Mq+q7nNE10fU/wkj92PpBM1nmaFr759is+/+z77LZbSmscDw/s2CrddJ7Z7XbEEHg4HNjttoSuJye17EkT22jRMMeauT8ecc5zdXGJ846HDweCj9xcXZKWibsPHwgh8vTJDRKeB2qd6E3Jg3P0IfDq9bfcXCvEcEqLOqbR51VrZQwdebQFzx6Ww/FI7Ht2F3udiIpqGjvTlKf6GBewunmxFNpkxr7qPNF1ZBJ4yTbPJqF54XhUo5x33qpoBeE1X5mWie040neaKMjq4Jhr4XK3O59CT4vKr97fvmM6nLi6vrGOjcRut+Hz7/0R281ICJ7j6WhGvw1LzhyPD3TdcOZhtlvhw85bbH2uhFDluB4Grq6vdRr9vYiOv5/Adk4TY2uFUhV1UF2lDz3zpPtvnhf67QDHiWk+MW4Vd348TWzGDTEEjsd7NuOIK/d89bu/IJ2+IoZsMKcOah4ZECn59zaDmis5FVqREsw5KPZwVqtcXRf82gzCbfX87oJN3Gree+QjdDrnUSrrjHRtZ+7arsHj9RC8pU1LCp9qU0ex/7WFriZqDTQnscXp4cDt3R3j/oYYA9PhPV9++QtcnSktA5qYW5XZrTZ3hox0lhXfgdN1GdbekZJ1KLGIntVf0UATEY772zvevP6KZVYsyW7TcxoOpDJzffOMod+wGXsLBlQ8TWkNnxyuK8a7CEZaN4rOvq+WhKPTAp8ktMEFgrd5ppnpzntCjJBUZpWzXre3tF7fK76olkoLq4Km0xw1TTy8/5b9zROmXFnmrCDL2cQxsZfs1z6L3Ap4z/H+SKuF7YVyy7pejXbTaTknVF9fj6RJSRD9MBCjCxCEQ8euU8xsa8rjQaSz4AggBEJpTHmWuilEM3w4S7W0yOgumFStcZomhs1gSjKdTh0yxDgHNS3MFhxYqu7XYTOwCb0ljxalLTbonOSgux+Z5Rydd5b5xOFwz36vfobizFiFyKbqIJhhaM1S8cHz5Tff8OTJE148e6ZduBbaWZrr+PzT77HZDKSU2G5H+hDYbAZNBlGLhe8iF1eXRCN8p5JwYaT3UcYyoBr2qEiHyrIspFLouiiFkhO0tNtd0o+qMK1FpTHBR+M91Bb29t17ptPMvE/0fWQ7bCRBrkmJuCiXiqpRleboQqccJTvFH49HYoxsx825Ba3kzN2tNqmb6yd8/c3XnOaJH//oCyMroS4Lk2vMJ/OwDHKdtgYx9lxeXHJ3d8tmt2Xt4R26Xgs0jbuHA9ttYewHKbwsAqZaoOE0L5I0+8rQbeiuB0YrQCm10JrjT378x+x2u3OjYKMR+sAubplPJ3Iu/Oznf0PXd/wH/8H/Um7qpvvAecESXTfSHJYRlXk6SILadX+4wsk5rxReHKfDd0yHE2Pf8atf/xpa4/t/9D1wMNVC6AeV8GxHQRcNTrPBufWOb7/8Genh7+h7KY1KFaxALTKF1kZxKy/QTLX0eJLXNRTuDjqBnnk2vyaaVttgPN4162V2FjCHWZQsAs/+u1ulrk2Tg8gLO1GvoX7rbnEmd1fprL2+1mgIig0+kGzzcM4zTemcW0ZL/Oxv/pL7u7fUsuCj4q4bjtoyrZpzujWUG2WuiBbtmqz5WZbKt+L8aLpptLORlKoq2ZRsKrN1JOdCyoWcCkMPqZjZt0Cx0NCCej3WDhO58FcfiXEJVUrMnDO+q9RU8KFCq/jY2VpoG+ljlonu0SrIuLZKzYoJqqVqfXZRidotQk4cH77l8uaPGNyO1uvQUptQkwa00ui78eOblrYZuX84nDknnEj4GANd3FjAaeXD+w/cHR74kz/+gphq4dV33+Gd4+WLl/IezLMsQyHQLHslg2SOSLFTcyVVEd4d7qyiyDmTJn3IVOi7jpISzTW60GvCwFPJLBbBgVebXK2VeTrxu6++5Mmz53z64qVOgdXJJOXhydMb5ln5J75BWRZ+9+WXAHzxQ3VgJCotaYRsDjpvGfEhUMwP4JwzA6F0+85Ony4G5uORGCLjRjBL7xv3DwdKbVzGkYKiJZzTdIWdglsTkVct8G3oNW5LRZHpQk8XhJPP84nWKkMnrbNzgd3F1jJYMsE7qv2e03yS/NV5QuctLC4QXc8hiXzeDBtKnuU7sekhdr3CyMyXgE0aQ+yMqNQYm9LCzf6K29s7SqlSeoBOns6iBXCqji2VUy3cvf/Aspm5vL6ijzIT3t0fpLAqle+++Y5PPnnJ0KsUpQ89cbP6JtoZ41WMM0xz4v7unu24IYy9xuUqcjaEaD4McxVbEqx3jqurPV3UCXLsBw6He1pe2F5fUvJCtdNozY3NOJCmmX3f0W23uFrYjQOhVVxL2Er7hwwTmry9xBdffvMVv/vyt5ymJ9Aqn33+maYfu9OX00QMF/RxZD4uLGUmLyd6X3j13c94eP83+F4RETJtavEK2g3Itmg3W9GEFxdyrmeuIeChOUXw29daO6oFbC3OahYPbUkKrpiwVV/F5M7KZgv6XTJBUIs6ttv5hQDO+DJn7ERbgWrwLQhCxnEOP7XpgqpNaRg7govUOvGrX/4lcz7QSmYM27Ofyf4lq8kN0195Ps6lcmy6Ad+SNj1khgtGiAtSjlSv+2/odwyx57QsNAepwNgP7MYd262CIqNFX7QGZAX5OW+hmanSArTcoNehwbtGdZLQUwsxdJScpPpqkURhNEsBeKtNrqY8cnS9kAiHDss5FwUkBn/+3PHWuUJinr/hePwG+h9C6Am+0hucXmtlKRlv/M3x/gBOJtmcFpPOBpZ5OUPIEpsoIsd5uLrYax25u7vnV7/8FT/4wQ/sxjEME33owQWqL7z65huOpxMvXzzn8vKarpO6yVUnvqEJa80lMww9Qz+QS9VpzeCYte4x1wRFkJVOoVEu4Zx5++GOf/4v/iU//MEPuNhs2O4uJEOTf5RlTgqp6jtKzfTDwOeffWYkb2TJi51eYOh7wx9nVfv11siWEq1W9hcXkpXmwrxM0FSOdLm/wFvybKoJ7wL92HE6nCi2sDmg1VUV486EnmvCO7/65lu+973P2G22pJSN2HsMFHt6fc2cpcBprZCqppdSsk1bXs14XUfXDbTcSDEzdB1j6HDmVu4sQMytTmcHDaXolpTwXTh7Po4PM6GrRB+lpR50Q/V9z5wWrm6uKCnhauPFJy+V6WRR8c4ZPegdF5ud/t3YSxVSCrU2NrsNwSnz6ZNPXuCDU0qqLTzj0J+jIxIZH4Kl72qifP/uHfX6ks+urwh9h0uJeV5kkOp7pLzKHB4emPPC2A1GcCtttesDzV3wP/33/31qLhwfDiwl8/BwpO96njy5ZowdC81CDW/ovNzpqTRG94cCTvZNVZ/78+fPLRfK8fbtO8ZxQ8mF+4cHnHM8nCbGYaPX6ixIM2Ru3/yK+/e/IXTFpgCnw4H1n0vFJTnmqtBbewHKOZZCRs/mO7yLCspEe12tmePxnmHYK27hrPRZyepVVqp7c+UmvJPQo9qm7IrIctcU3OlcNAhKp3Jn95zpj7BVWr+r+o/EVWndsoBMKTPzfGDYXvP6m7/lw4cvGUMgVU8taqwIxRz45qbGyFttOv68YXV9pIuSCrdWaC7YZis43UfZ/By6P1Oa6MeOVNSZEoIW6YvLHSFatazTyRweBQT+I6o8FS3YbnG4UGlNPezBiUfxpbC0RoyN4ORst/1O78N5QEZR7+36n9eIiPfq2Yi+U3hiq7gaqU73S60nptM3tNM1zQ9cXl3YdGninVzOKrg5J+bTxPX1Fc9fPIfmOB4PgAJWU84GramnZ1oS1xeXeOeJIQT+3X/8j7m5vGSphcPhSOeUbpnnGb/Z0HBM88zth/e8fP6COVkMcnMcpoMynZxj223UMRGCScI04vXjQN8P5Jw45oW8VEJQufzcFLm76Ud8CHz28gX/m//1/4rtZkNeMsfpyH6zowvWnNcqXd9zPB2YTjOXV5dyulqSbXaR4KQG8D7gXSU5mJeJfpAnwvtA7DT2+TV80G2Y54k3H94znU7s9ns1g9UgOV0/sht3ZyzWO5nHWhMUJlORI449Iavf+HB/YL/d4r2SMp3jTEp/uLtlWSbGLhIHdWp43zEMOnkrO0YGlzVXqOZM1w26TWuVc9d8JI5G7EzZ00Qcu75y9+GW0PVc7nZs9zsR19ExOnEJrmlOwDui6wj9OkoXpU02jcbbcSsCsjW6YSQOvXqygcU2gT7o1O6bp+vH9WkgenNbl8p0mnT9rVReiKZj3O750Q9/qHgMBy1nlmXh9va98QtbdtstNAjOcbnf0/cjzmHmvo6aRVzHvseNgYfTkfu7e6pzjJue6FVyX4D3b97Rgte0WCqn45GhG8/prX/vV9Prvr6+xrnGzc0193e3vHzxDO8jv/n133J/UBvfxXbH/cMD3jm2l3tcPvDNd7/l7etf4OJkPN559bAfvy5Q9ZwXReN8Ci95OauURBI71vxgW57tQOS4u/uKT158Cl0vaM+t0IizzUQTY7EQv1y16Xu/egR0P6pbIn70Otvvbaqr6XRdyotxjsXIjFWu66qmGZeTKoBb4Ve/+CnLwwdKnfHmdaAIFlunEN3l623jz6+fBpuuQ8r51fxX6JwDisGM62uTRD5sPJf9Fdvd9hzJMow7bTT2/blCCG3F3s4T9QpPAQpBJcvcWyvEoE2ywpKqpc5qksDCHaMdwtfeDe9sAgwBijbnVpM4BSdpbGueXCA68LHQkNx7OnzDkxf/ANftgHZOaj4eTzw8HHj29Bm5wdX+En/1RMGDzpNM+RVMMeqrJA3VOabTxLMnTxmGUTUMu+2Glivvbj/gHIz9SPTq662tMM8qZ//0k095/uwZ+/1e0igaS5p5/+Ytwzjy5OqpYA4gUSlJ2vvOd2TncKXKu0Bg2zm8i2SvReY4nazZTHn8T2+eanztC/OykHKii51usiry/NWbt7x/95aLt3t++KMf0ve9OICgboGSCw/Tkdh3xNjTdwMxqNt6zXEPXdRI13V0vVRz6f07DocHbm6udaprjwsCVSd1EZWNpSyklNn1I7NJ6IZhJAw9P/nxn1IopCVbrIFusNXwtx03UDIhdhrHqzLpx36gOauL1B1GcJ5jnigpsQu9IgNMiaAgPU0DpTojn3pSy3RBRecABZnNaq1s/VYbOJxhEUOUyaXQe28bkqTHYV0YnEWs2GmWNe+oVKVkut7GZyPGo8Zk75VhP08T0/HI6TRxc33D1ZNrtbnVSqCSs/islTe5v7+T+7RU7u8V0nhzdc2w3Shd2Dlc1cNS0kIqle0wkIFhjJoervX92+2OWtYwvMp+tzWVVaa0iavLS01ErZ7DAf9Hv2xxCj4Qox1gKrjYscwT2/2OruuYpiPb7ZbSsgLu2sSrVz/l62//nCgREM0lsNjqWiqsDtwzxAKuQG6ZlqqZLjmbwmT48utHdP43rcHV9TWXJlbQgvTRW3ArWmVxOE1R2DTwRkaXqmmiVVMIOnEYvjUIhq3bNiEXuNUIgPw6rRIMqjoX7uibCd3I1cUVp+M9f/ebn9OSVDf6KZVS1XnRjFdcD2iCtm2Rdo2lZeLY6y43HtDjaP7ROe68O/dqeKcsKR875RtVdxZbYFlM58j0ymPkh3NaJOxA6oFqyj5tqoFWReC5oNftnKO28Ej6t0BbvSUOYt9pE81F6dKxoywzS8l09PS9OEOQjH3JC51r4OXtmI/vmI9fs7m8YkoF77tzxfSqQgtRqQ59iJTaWJaZOSfSsjA4BGVHQU/pdMJ7zzSdOB0euHp6TYyx47gcFVdbdGJeT3NjtxVm7xpj15FCYDJfgKsikj//5DNhnK3pprCdrzRFY4sMl4IitqZ4Cxdw3tG7jkJREU3UYpGWTOjU2hSDV2Bas0RW72m+MXQ9T5/eYIymSE17WGqF6oVHppLoWiB45TxJ+u8pwasycpXraWomho7PX37K06sb+qFnNm7GOVTrGQJxGASZZXXX7ocNoQt89c03dF3PJy92LCkRQ1R945Ik6ewCHnjz9j3XVxc4HK9fv7XFK7KkhdBp0e1jh050E52ZGvs+0DqFIIb2GDPQqtybrkqxFUNkSjN3Hz7Q9wMXF5dnOKwPHcVZYUqR07tVI7DsRjwuBx5ubxl3Wy43e+6nI9V5wYqtUb3k0nUlJZtOm3lZYNgotAyPi8ZlFYWotdoYYk9/2VHKOz0QIZwjXRbzZ6irRJ/7brsDF+i7aN3ZheYqnVORUD2fnQUBNt9oXuGQOIeLgfHiQn4GU8FQCiEO3B9ucUPHjd03zSCr1SPzB2FO6zpSJcHNJXO4vyfGyPXNFfd39+x2W2iV64sLak28e/U3fPl3f4Fzk5Jim7iV5hq1qMLV2UGmtGJTriO3RE2ZVvLKEdOcdRuALYLWJmmL5bl6N/acz75NK98aEujWxY5mHQru/OaaEbO1tPOiXFtBoflZwXs8chDOILKVmPXVwgNpFKfCHV8V/6LF3ONd4be/+yXv3/1Wah/sRP4RrNPcSjD7/8EHU1vAo9y5dYNwzrwh9p6dFz8avMc3dZoQejEbrWJJ9Kyd4PBoQFx9HqoutnDDoJih6lSnIGmqF8Hs5GYIZn51tqmU4s5hfqvSEzADoQQG2XLwCI6yeL783Vd8+snnXFxutaHXimemlZ7WIpWM943j4UuGyy+gBJbpJEXS0PPk+RPqonUzEljSDA2tT86zubwkuECjqTXRNfrBEAbfcTpmdWM3Glf7Pc+fPmW7GXk4HsnLQsvNsltMCx/U1hacSrWzBebN1kVQSjV3axNDb9fBVVPorFI2pMJoVZknFUVeO6e+gWIL2NrctOKk1W6c6DtarQyh58nVDS8+/YRx2JxlekoB1ch2c3XNxf5Smfg0UlYCZ3CSkjonKWCthXmerLtA+TGtWYuaDwQ8Q9A1WF8/iL85TEfSlHj57AWfPH+Op/Hh7gNff/stD/cHCF5tVUmqif3Fls1mw4e7O8VvdB0uenbbLdtB0kilbjaW00yqSm7tTHIavBI9a5EvgCZYLVdNAVKjFWIvddicFg6nI8Fr4Y0hMvievh9Y8sRxnmQecg7nPfPJTjE+sjTxS30fcaVZiGCjukZdTJJZjbPwillRrtLA0A36mdhTa5DlvKgEaDNuzvBDroXTfKJW8FYTm3MhN+itnEXTQrLOAsfqjQDs85aLu9VC39nn1kW6oHtHvga4u7+n1cLFxQUPt3e8fveaLojfUm3jfP65f8iXc7DdjipAonGxv8A56KIUXKXNeCc+5uH2a/7u1/8Nvk16XyHhbFHR/VRYoZ1aRKTLGFrNb7A2SEvB5Fchjy2dzXwVwavI3iMTl5zZjwvTusmsX8UgJXELeg2r+W39+R/PVuuifa7Xbka+0lhP85JMC77ARULrZPgqC7UmcXuhcTq95le/+AtKPtoG9/ga2zr92G9//B3r9KL8t+jXbK8MHwFuH8UVGixl3iIspsRyn4JTQ50KyPSjXXv8166imAyDgalqVqSJqMZqRVuDWiDb358zs6q54bMOEq2JcNcxRwGJ+v1CFXAytX319Zf865/+S968+8DKv0jRZQcZBO1Pp3eUfEfXD9ROfoi8JFxBKbE+nGP6mxdy0+yAJZlKJTdLmmiVh4cHWms8ffZEm/1pnqitEfGkXHj75jX3x4P+ueXvr/+jsC7hXt3Y0290qo6xJwRPqdowXFNonIptDEpZG7Scw/tIC4HqPBQ5z0tT1kkIkSXNItAtbtyxdmwv0vSnROgC+6sLxs2G1DKnycw33ukNx8jQd7ZpNXofNR5X4bwpJZxr+CBzTU7KL7m7v+NXv/41r96+MfJKP696z9CpCMc3yWu/+c2X/Mv/9r/nN7/9HeNuy2mauDs+8PVXX/Htt1/y9Xff4iqM2y0herq+YzfuoDl2+x1//MWPdfNa/ar38pKSdQocd1tCkdHL2x24mqlwKLiw00nFY3K61hjHnr4f2W63dLbI4pzSdY8PzGlimSeTfUZFjDtHrombmxueXD8hek/vAh2KsfBdwA9RMAEe30eTT0o+Nw49azvaunmBFGXRrZr9xocPtwrD845lmslFk2prjn4YdKqZ1N28HQaa18nH99FKe0TIYjEbAU8ulgZUNFeEQcVULatnJC8zX375W5k+O88pL5Tg+fDhjtPDbCZPmSqpv+9w/nu/nCPGgXHsubq+Ydhu5Jh1jtDJ90PXePvqV/z65/+MUj8Q+4L3xaA4ncpzSZxPVm0lNbUmtpLwqzrJy9jmvDv7mqTstTid5gUZlbVKdF3418V8/Tlr6unjoizeYV1/BdpgaarrCV6yWb82WpBto2+2SJa2Rt1IOks1KA55HVYIrTTHZnfBh7df89Xv/gZPpJkSx+yD8j21dt441qqqddPQBKb34JsR5jZBrXbAdQJqYAeJdJb9Kr1WiQRrzJJzuqeqs95rrKCo5d/bKB2C53wTzFho1uq55mi1M+xKg+aUPrDkRGqZ3ETMr/5ao5wARxcDm9Dxk5/8Q374o58QYiAtawSKakzPhVDV09ID08NXOGAYB6pZBpwXFPb+/TvevH6vV10r83zUvVZUgFRzYoiRVDU5rJEzFXh4OODv3t/x3evXCvHrRfq22gR5OAWUTbYAq8tAxiuNjFXkcFCch3eeOS3c3t0CjjEOrPWWKgoSTleK7N5N8il5Kkqh9x0Pp4m37z9w9/AgOKFYwmTwVjRUOS2z/rxxPhkEvMV+9AQXWIoSEFvjnHU0dD2hD6aEksFKqQRNdZi9TCjb3Y6L7cY6npUgOh8O3N7fM88zLnqWmuk2A5998im73Q5nUk3vPU+un3B1cSU1VlpoVZ3hlcZpOpJqprd4cix+ozZrqHKOlBOtWpSFt+KRebbo78cFTOCCokWqPV6lqq6x7yNd19HHjv1G1Z4pZW4/3PLd19/y9sM7lmmmC4K2apXqyXtP6BTRfZ4kMQgoKM48lyxna/Bn78XQdXQhcrHdPqqtanuU4vnGfr/j6ZMbab9rphQVpbRc2W83QON4nEizgvoayiQaB3k8bm5u2PQDacn2YDWbtEzhpT5VliVxf3ggZ3EQ79694+H+HhccT29udM96+KMffJ+r6ytqzlpk66Ms+A/9klTUM/YjXd8JbhoCtc4UKmO34fbt3/Hzn/9nzOkVQxS4487wTxEmDpZg8NGnmys1azmuzmATi8SR416nxI8TXNNa7OTD772+latYQ/HWTCs+WkilGLAVi8fYjWbqNng8yTuH3WvYEdr8Fu7R7NbMBV9qppTFWgQN18cxbrZ88813PNzd4uz3+fpxxhK0j6n48+RSzrxia83EFo9d9nKAN+Mj6jnvyRnL7ew9rwHppckfQS3nTc43bWxrRer6Sor9Odh1/WicU/T4CpM1iuNcSsZH3E1akm0i64T3CHeGEPT5Bs/FxZ5+UFKD0mk7PNE2wHr+PAMwffgaX+/xWX0avouqY3CNkvWe5lnr5rMnT9VClxdyUV5U805FZIAzA/Dh/mBwdgwmYazE2PPJ55/SucCSE8O4wbmiKOKmInAshwSELRZU4hG6SEozNWX6OLDf7sg5MacFTJ0R/Ih3TnlBOTP0AJGH0wFX1IIFlWEcGbYbMNNbBdtMRLpEby1OKeFiIBdNDsEigYNzDJvt2dSzlhQ93N8zbrQJdFFyzOW4MA4947ghl8LNk2tubq5JKanb2nuGoefhdKBNmj66rmfoOn74/R9YzovC/TbjQGsDlz/am+t2Jna9gsMaNIPk+tDhOid+Z30CmqSRqdRzX6//6GEfhoE1PTWlxLIs7Pd75rRYkJ/ynnwXeVhb7kpRAUnTaaaUzDCOxC6wGbe4qHrTCpxOUpHFrqPm5ayXF4giw1CzyWGZJrA/K63gWrRRXJpyb1zMh9sPxD4yjhuiKdM2mw1LWqw+ceQ4HRk76dNfv33D3/z8b/jjL77g2XbDNE0W1Nhz7iFzympa5iwTYq/qW2diiyVUegIXuwsIkObFugq+IKXE6TBxc3OtA0IzWNMJipxSph8KblXX2Ofy9/ET3qkjfTpNuNbYbbZM85EBz2H6jp//6/+Uurxh14kTciZ7hIJvHk8je2Harq4+BL3jQFUsRWvWuwIyNpuSqWkDd61YQoImLW/qsVot/n0VX7AGWhYeAw1X9B9wj0SxeitWSYMW1doaXprYM+zakGx25XMa+qybyU1Lk9lPgase53pantkMI69ff6tp3zah5rVRVNaRhrPZT8Op04bJCixV+XT8Y55UddX+PpgIwe4dL3K5ukaogs0qQQdjv84JVuW6Lvyu0ao/b67rd0mRdgbn9P3OYMAof0hnOUYNxWuou5rzZqUOFV3B9Vo6F2yz1baU5kw/al10HlyVOs3ZulY8uNCR8x1leUVqkRgHXKdY8tev39ANG/VIRPVLUCplSeL1nJMVIYsPaVU9NH1QEsO4G4nPnzyVKc57SmhqObLdcfU1dLGna45mZo/B93inD32IvULqLIlCKpvCMi/Ms4WqOZEmIn8cvlS8Nbv50pht0RlK5mKzp1LZ73csOZ+dzEqWbBQL5QrOnzNYSsmEjcPRCULpO9swJAsMMTCnZG5E4eveJLXQ6LtBN0kp+vchmAwNanTEoePp9TXns02tpmkW9NK8dnZxo42SqlWadsTgoMnT0Pc9+508BktKdD6eb7beK1BsKYkh9oILTJ673ljembEHqZmcg84URjUE8GoE3HaDDGfTTO2UBtmqsm28GQZxTlJmq5/1wJQWttbNu8r0prTIlFcbrSjvyI8jrRRCHzXmYwuEc+QKKSdO84nY9wxxUBKmd9Qkp6z3nqEfTRGi+y3nwv3tPaANMefMdrtlWRIpa8o9Ho/MKXG5u6Ab1CFNU+KliFqT28Yo70CFVAqb/ZauH0X6BiNB7QQ3l8LpNHG5V5LtvCS6cWPKFP8HEdhadD2XF5cMseP+/gO//cWv6OKBX/7iP6dMrywpt4Ev4OSmp6xGtWqfq51+m+4hhcLpLpafzQhnh3B+I7qjCyQeuQa9nnxe2B5NaO7x9zhNsmfRbMMIWN3DrqmzYl0a3TodGKSEf/x9IlXtd9g9qo0NnaBbpaQKLVFrpDQpCl0pvPr6d+fv04ylhALXHi+8q5wVVs5pgtJkpINMsGiY9bNaPSfnO9NL8NGo5Ib1069O8wr0rFWjAW/rjSL3hbTp965LulRDouRrqzZNwDmevKFdZOVTgQ6FSvrQnRs9WzPYr0Fr4SwGWSfaXAqvXr/j5vopMSZcFwlOycner02SiPPNiWV5z+7yh6Sqz7w66GKHq4VlmgkXW6aHkzazqMSAaZlIcwKEHimBQfzkMAxM00RsYlrIAaaHA3f3dzx//oztsOWUrMLTFqt5XpjzwuV2J/y3qLgmmTmui5HovZTItbCLW2LsWJaZ06Kso1ZtM8FxbCI6CZ5nV1fqavWZzul0FlGLVjNs1QHd2Ouhcg6XC32IHBbFWbve0UpjsmkjxoAzEjOOHaf7B07HB+Yp0A8Dfd+x26nB6Xg6KlUTjfXR3rM2vEx1SljMFqeRShHU4jwuOkrR7661Ms2TEVEeX3rlyFgyZymCR7I5xpeiEX6InZrgTBJ6PB3pfafoC7sxSy1k26CaBX+dlUumnHDO0XcdtTVSUXSFy4pm8DHQDzKlFSpd18nxHDzd/opSCqd5pjXFLHscqwa4FCPsqjJgvIG4tWgTnpaZoRtErNdGjB3bYcD5jsPDHSHopru7v2O/2+P7QF4Wtdh1kZwXnj9/zqeffmJ9CBrrD6cj7VjouoHdZsNgUBrOMwxW/WgLxcO7t8xz4vvf+xzA2t5UEuRRkGW04L9mfMAYrRqTRptnam3sdzvp3f/AL30OgsX66Hnz5ju++ubnHD/8DOobxijHb42F1uTuF++o56RWO7nWZoevjxzVVtZTG5aPZKX2Z7I5nIGhxir0eDyxrhEdH+92K/JRDK4Rvr7+ZzkvhutJObh1inO4KgWjgfqsJs7zAmr/jtpIdbGfp8MaxRzcxdGHgTfvXnM8nnDBFtYqXtJlcR11HXDcOsd4k26velK9a0f8SM6r9xKcOBuap1UvgYBzOG9dYs5O5AT9fhf4iDbXb2gG89mmZCQl1ApB6qaP4d/z9T1fP2XYea9GxuCk9PJenMcjRKZIeI+6PVJVOGkXRn72V9/xwx90PH32Ix3e7ZDQqKiH/JH3SPlIN0ZOd5Pu51bZ7jbk2khzYlPU3hmc4+7unngZ8Gaq2262DH3PUjJxECpTa5EgYz5J2lmXbJk+HYHAssxoz8Y6hEUKX2129gBm7ZY25hOU95SaMoOqA4JTM9tXX7PMs/BurwjinBOjj+w3GzZ9x2meqHaKDz5SctGNXDTQHY5H5tOMRx6IvCScg3HcsO16dsOI956+6xhiR2pZTWEelirC+9PPPmW3v+DrV9/x/vVbLrYXeu+u0lmz2Ot3b3j77h3DqOpSbXCTIjH6qM5r53SiDjIerWPuUhcajWiO8y52CMYXftlwzMvCcZ4tF6spXbI2ka8fpWqO/UA/aKE+nI4mDe4YO3Eb8zyd4YRlXoy0D6SiUA3vHEPs6TolQAIc5yNLK5Yu6um8OIZSKjkv5FbIy8Iyzfzql7/m1es3bDdb1Ye2evZtdF0nG39T0m2z09OKfXd9pPOBh4cHPty9Vbx4lgLOhci8zLb5PJoBFQ2w+i6KyqMMgqOqVSuVzPZiZ6fVR1WQR8TlZtzimwp01HAXrB5W4I1zsBRVuk7TkePxHhxMpxPzNPPV11/y9u1rS8htf88UsZ7OITiR6j6IqPUcCHxJa98Re5NO2klVB2stsq5Gw5RFkHonOXctMzknal0sfwvBSusivEY52JcCGR8d0+vXuRfCreqgdn5fHy9t50WtfrxInoX9NJeNu1sjO+xjavl8ml9RAr0eSHnWfVPFk5Ui93AyL0oqhbevvqIy6cwfLVUYKGGdAtZX5k0UYZLWJvx/3bz68exlRk0XkdYCFmmN803wm4uE9njCd7Wd130oeBp4eX5q1XrmKZyTEkzU4E02v45SzaDk9bMomHLTYl6UFiv7m3OSNmsYUy3r6nw3PELoifPEfmC/f8IvfvkN96eFNE3oiVtJ+fVwoMnn/u6Omo5kZE/oUdwGpdrBqjGOWidTSjw8POCiY7/fkVvh4XRkOp5UmOS9uoScw885M+UFHwPX19c8ubnRKa8JPvFjT/Vwf3/Uybm3+j+vTKRWskx0VZ9JzgmCx/vAsihC/Prqipubm/N5x4G1wilTaTtu6HzUZtMaS8ksNfNwOvLm3TvmZebVm9f8za9/Qc4L/TAyjiMxCjsehoGu789NVCF4i7UWj+Ga47TM9JuR62dP+JMvfsyn3/uM0EcFytXCZhx1cjWCWouXegMudztNI86fVSUi4TXup6oioiH24BwBlaX7zogx9elRqExWptJo8m84b8UkUhEp0z4oqqQLBB/Yb3fnmHaCGrU6Ly5nnmZ8F8g5s2SL6bDo5BW+wGG9EerTFjIkY9S68HR9j/eOzW7L0A9cXOx5+uyJHhg0/YSgNrxGY1oWmR+dIpN184EzhcdSFgiO7bjhcrdnv98ToqfvI2/evKG1xnazFfmclnMJSkqZJS1421gvdluePLmRo/n+gYe7ey1c2YrqVyVLbVxc7Pnkk5eCEp0IzXEcmU8nKZvQiF1a5ac//Wv+9V/9a3JSP8bd7XvmNNOFyDQdmC1C+f/31yNmjpf3wnvPb3/75/yX//Q/5HD7d/RRURaCTwx+cYEmMTy1KdwuNzloS6kUE2Ws5Gxr1SSqGPxRz/j1WqjTVoOoroxh/M7MbdWWzsdF5fHLYJQVllr/un3ks8CZBPSR6F5/n2/ipM6LVpOcuZSFnBdKWjR5lsdColYV7V1S4u70YAS+Ve0ak7D+rmYLzfp++Ojkvl4DaGpT9GsFgK7tmkd1nrG0kp9fq65H4BFEaudyoHPSrHM0iyApduJfyfLzcGYQMx7qeWuzWKNV7mq8jp4Pqc/EPTR7nd72HBHGhSpZry988acvITTevTmSi57FVqRMOl8LsUCkhwdOx/fytK0HiYatszrYK1bFc3VzzThs6LwCRvOSGLqey8tLcRQOGQsBP44bcMKzu+1IN/R4HwhDh7PCGuccxVWmllnyTAuObtwQei2kQ9fhu8iyLLx585acdHOWkvAx8PTJE/rQSXHkLJTLCLr15sY1Yh+1AdVCnrVQbKya8urqiqdPnhuOrZ1brkjTK9cM3jBe1yTR7Qe22x37/Y6WK9M007nI1dUlu82WPC+M40AMyi5xVRCEM++E+IeG7yKhjyT7M+fl6o5dYNOPBCdCpnno+44P9/d8+buvzPAFm9ijwqfAfrcj2iQlM5W8AdFrEe6GnjUriSp1VAj6Pc3D/Ydb3rx9B8Fxsd3T9VIwhRBopeg5CqadMfWUVGnhnKobvIqVvDFk66EouEA0o97TZ0/pQtRp3klBczwcNT3kbPBcUT2inUyOk4qVikW2U/M5GrxWxSEHFxiGUe73Wjmdjtze33F8UI5MjEHTWvTUpFN0F9dKSvkNOhfpzJzomk0SXpp5HzvWqHPX3JmgXifSGEXAX15fEGJknhZCcMzTwnbcMu42KNTtDAr9vV/mjebVt7/gv/ov/kPu3v+M6LItdloCnC+6R1vRRtoy6zTinHKAcpqln19x7XWaqRlatW5nBy7gzssgH20Q6+xvjxTWr9zWDeCR+1ifG0li7b5uHy08DYNoxAdYLy2VcuYLJBN1tCK/TGmCJFNeyEmwZc3W7lYdLaGSpSIBitYwr8Mchic7j6+PUI0zaGuFxdx5ZlgnI5H7CqNcN0PT+rlCI4uIr9nu89XPpedcyq01Wt0+j4YZ3+w6ulUZpQ1Evoe6pnfo7+0ZkrdjVXPG9YUaz/soB3atPUpx7QNafSC+OSyglicvdzjv+Kt/9bcssyc4+U0kDiici7lapvmZ0+kt+MZ0mjgtOqj5rmPcqeJg7V0ZYmSz3VCd4PEQvB28A0PszbgpKDmWmoQ7+Xh23q7tb61Wpjmx3W55+uQpp+kkLsJKfDZBLHpuCsDqezHipSQCI32viHDamlvfCE2kaTWoggY1qcd1GBQ3Xqx3tjNFT+cDvTXegeCROc10LRLCcMYhIyIac63cvn7Nd7Xw5PopT26ecHl5ac7KxuHhwDSf2I1brsbRFFviQT57+ZJ5loww18rxeGCzGZVvU6G4ZmUtumlC8IRm8QG5Qic/xb3BUB6F3JUmh3P0jvfv3wrCM8KvoWBE54DsCPHRWeqcVC+hj9Q5M+X0WCcbPYol1sN0Ohy5Pzxwsdur6NzG6VIK+MZm2JxzgJRnv56csD+vZBrv37233uadZU/pIQ0WERC7jqvLS4JNbYfTrEXeO8a+N+NflOdlURTIZitieNMPfPrpJyxLUsXi0LPcT8TNhs1mY4cHqWOwE41rjS5EPnn+gmh67uBkiErTpOiQreK/p2li6EeGToVHKwQi+KYn50oIkR9/8QUpZYIXpBU7ncBbLgIMykJrHash6t/8elxsRRh889Vf8p/8x/873nz3U7ZRCT/NC4tvbvVEPG47azyIr0ZcZsFiOSdVfraie+px2RaR7KI5b53NB+vUYHr8x61Dz8V5E5Ge/zGyo5xhKIkn1nTRav+2anH/H0BY8Vz7+wj2Nds4Kjk70jTTWmBVT5Vqks8cqMWRl8I8N2KPIKTOUU3TL4VTUDGZmcbaR4qjcCakHxdvqbeqndBty/Zym9sCxDoFCGpLlCYPUKVQq/wZ3iI5is0CuRWCZZzpM2t8LAirzgnKsirjNe1Wk5bBg+eJCNbwveD4CM7ydsSohiXq0LE6uodYePnimp//9BXffXNgt3+G9w23TpoALli+1QzLA6OvuJsb0nKkpqxpYtufE33PPSLe8d03r6itsBm3bDZbutgxrwfYos6S2I8bvaAixhz0UKbTxCkt1FLoY6DreqIx/K0Wpmlic9GfR1Vqo+96Xrx8oRuabFknZjoxvX3Dkiydh1TxweH6Tqdg1+i84idKaaj0ZlCkRtXJWQfsyjhsFYZnRJvypAqhKaOk1sbthztFaN9c4Zrn7v0dwTtubm54dvPE4kJgzsK3u3Hgk08/ZVoW5uNRpP1mhNbEgxRBNsMwnBU4KvPQgpVrhuy4vLzkYn+BDzAtmoicQ9EVMTCOavDzzRP6jrIsTNOJ3Xan4K36GLveXCU3R52VhfX0+kabUxegyBWusMHEV999R1om9l/8mC4EDg8Hsoe8TAybkXG3V7FJQVJD4U4aRZtqQpdlYU4L3/vsM4t7F9yR54nOdxxOJ/b73fnE25zndDpxOJ34/NNPGYaBJQtm9DaCex+EqbtHiCR44boKb4xnNVWtleAEZZaqovdUBRUdjyfm08Tz589pQc7yN2/fcHVzTVd6nE00MnxVFUBFg8nGwXpCNEnM84LDMdWFy3FguxnxXcR1nuPdgUNc2G53/+bWwO9vGI1aFn73m/+O//Q//t9z+/av2Zm02pHVasa60D1GL5zff4WlLbRaZFzNggOLQTmP8A+sRjs9b4H60eJjAMlHi6da0j6eg1bfnHjXlc9YJwuT1rp25m4d4cxdNCBo7BcEzSPFqy/jLpojt0nx+M7MeFRaVRlpBZbcmLPMj330xBDxzX/0WhqrIaJRbJNffRr8HscCldh5PV+t0VqiuR6cElmrrQePEfOWoWYTXqWJG/tIo9DMhd0MalsPSDaUnwnuNVTTrZs3nhY+Zsn0Vn4v28pCE/Hho43AwhWL+TlaffQ91YqPnp/82R/x7atb/tWf/5ztLvDZ59c41wOZWmQIji5Q3ImH02ueuUbfefpup3UxZeOHKn0IMkCi5/+zl5/oIN53SmeuheU0qa8neOpS8M4Mca1WWl5DyBzTNDM4z/XVJV2nOOvgPF0UNt+ZGxrD8ADzJIAzk02z3QqLxShV4XaO1VshO3zNheM0aXddYYmgsvk17wTn6PoocsuHc0+t3er6QHLjOJ+E/S8TpRTGcWS33TNPJ24/vOfh7oFWqmKtYwfB0/tOvggjgPsYuX2443Q6cbm/YCkZgtJsnZ1Igkk3XZMDu9ZHt3FzqiWcpokh9udMIp0wTLKG53B8wNVC7AKX11f44FVIlDUm5pTJczY+wKvkqVYR2bOyVlKeeTgdoDVefvaSH37xBeN+qyiGWvGl0o8bQvBMy8ycFnLLZyPeOuXFpoTKkhI319cEa98rRfWauZUzx7AuTt55qIWL3Y6XL1/IpVkrVNhudwzjKBjq4V5R7I4z9ONRR0iaZ5Ux0YRd2+J3SjOpZpXj4Ohj5M27d9wdHwhRWfz3x4Ni1Uu1KJmse7OTBLobde3HqC6OVhtULZSKxFcHxXpW70Nknk6kmkwt9kgOf7SMnBffkmd+8fP/kv/4//y/5f7dLxmDI3hBOWtQoP+9f6EYGH8+3QoGqVmRIikrVsQJ4/s93H39EjTSzoRxOyfHfQTDWMpqNcL4fJI1qMHZ7z/vMeavaAU+8juf8XyPFk3F0Nd/wwFtT2BdzZMO5RF4WhN5q8geSFWHmeMs1VlnRsDqtAeu/iCRufr/vU3pAUmzeeSdtcytPMU6GTVJ1zEy+HwFV5PeqiYrBRkAvV1Xzv+55mH5j7gJTSxKCnD+o4Qrp1RVhU1isJH9bKpt6o2cV+OcNQca1K7cr0orhVIUzZ9TopRkOVmwvwz8O//O92n5yF/8xV+Zv8LRml3n6ikIOkvLgVIe7HCuBAG8J8+JUm36carGLVjKRZRJuTalaow7FbpFLx9SbD6wtKLcpJJsu7RvGEZiWNNX67mprrVmUkoVD1EbyzzT7XZSDwcPJeObJ3bKWsJ5si84JG1bjWql6SJthw00VKHpzfELZGuMS7WwGQZqeVygq6XRtlLlBXCNUD2bYcMPvv8DSs48efKU6OD64oJtL9nrOGyMaBOBFGLFV/j622949slLogtc7i544EG/I9nGNvRs9iJbT8us5EyDglz1dAb71KTFKjnUc+3UFZtzZk6TPmDT8Qsey3Su45iTFkXfUxb5PSpSYfgQzieVTdio7zrJme1qoVTHfrMjt6rd3yteuwG+l8M5Zy34D8cTtSpDaR2nmwdfKk+ePFMvdCm2OXZEHDvrkAhRZp9YNTbHEJmmBLYYOZzh/o5cK9thpPa9MqlalSACqMHOv2F9wBzZVX22zpHnhe0wKnsGR04LT2+uuXpyQxcjuWQ225Gr8Uqf/9oNWTOHwz2XV5dgZqVSEpGOLnhO00npuF1P6NRJ7Y3ET5YbRoWb645W7XlY4yKMvKwOSjrx13/x/+Sf/if/B1L6RkVOzRZxg2Jyy/hmTurmDIPPKnWymJmaqn6nxa6vjt11EdSp2J2lj5DthOrACb6gmnqLVTFjTXXWhWLCWYp7hEBqyx9xGuvmsM4Nes5FE6x8RLVF76OwP1fPLuja1o2i4lynU/06zTRPXZr1y2RKgaFbNxrLS2q2UZxhVv1hdQ3fdP3FT2CThqOtpkFfaeqhtB+ic7LMHOXMp6yek+YFlzUfV9yQM3qIFTvaGWr90mamZ9mvReLYZuYev/Ws/KIRcsEPAdWuVqje0psrBUcwxGDltNS+KGFDrVVV0QjSffb8gn/n3/sxQ4w4rzorWjMpr65aqR3NzeTlgcAVxXO+Z7NtACABTc2ZUrUhxnE8J9Y2KtEmHYfDuUKUY9Z2PadIiM4FQQpOhFRuld4udM4V3wUpm6icHh4oJWs00e1CaJ7D4chhXvjk+QsFv9EUh+ElZzzNE+OgF9cNgzJovIwvpqngcDzw6s1rttuRT569ZHWMOi/YK4RIXWZ1YHeR0BzbzUhrMo4NnchrT2McRi73O6lbXKMUyU5jCOQs/fDLT15qiomRmyfXZJsG1qiDdCpcbHeSq9p7SqVwOfTUjrM5L9hmQeqYjic24wAxsCwLx+OBy4sLigMfHMuy8OrNdzx99gLvRB611sgtCd93wUrtZecPQacZ6dALMXhK0WRwmheRT33k9HCgi4oh9sDhdGQ6Hnn54iUX2/15ynHeU6ynIm/0EOQs4cF+uxO5PE+qIQ1Rm17TQ5OrZXTVTDbb/5p2O8+LSkz6jrYkpfgGmebk7teEmKZZU5IZ4rCUzYvdXgt0qeQizmq/2xG8Y0mK8+iGXuqq6Ei1EXyTIXFRz7SzRbfWhlOwMfcfbmnAZ59+xrzM+JzAZIGv3r4npYUf/uiPqUVmphATjzIWLcRl+sCf/7f/V/4///n/iVbuFW3iJKhw1YLb3EoMr4uyAtQmOzmWknX4qMqmKm3ta14XrRUQUFZTKe0MXan6V3CVuIp1gXfmgHdnNVVrBcwUaMgNpWZzMK9sh5EX66aByFMAF8IjjGWr4NmlUD96vbWR2iyCuOmaNduBqk0tuWTmuUjE0Q96TQE5moPpfOo6qeiUvzpmqvER6ytphox1H3EmZ+hbn5J4kjNMZa9TGh1ya/S2+LtWoK2Njqa3MgpDU8X6QvPjLrAu0FU/U8iJ3nVr4kOKa7iczeynnxuczWgGNcnHoU2rlqrJxiYgyaUztTn6biRwVGICkZKT+BCbDhrgvVKXl/menO4h9gzDALURhg5K1ZrhPMV5SsvK1Jv1DK6Jkc7WsWzIR8SBd4EwRFMiNJNA6UZT1zLgHW9fv+Gr777m08+/z7Oba5GfwVNK4+nFFad5FtU2jDTv+earr+hiZLfZU2s+55LgnEht55hzpneOGjxp1ilpu9nhg2O3VUTu6ThBFOq6piiGEJjSTOwCfacLUbwcyMuycDqduLq8oNv01AzLcsIxMG5Gg3sqbHtahthF+i7CxZ5WGvcPDyxpUUdFSnz19df867/6S26ePuV/8U/+icxxQ6cCn1JIWTryPnhykWJhXiZqhePxQAiei2HDOKrrONcq6MhB10Wurq5kHvSBSqCUxDzNnO6PPH/5Eo/ju3ev+fD+A59/9imhC8qO6jrWyJNUCl3whOhxBtlV13BBm1OMge1eHgNJUXtKKUSnZrm7w526MILgv9Iax/lIWbKKdZogDm95TK3Kte6jPBPpcMQP7iOpZVNzHh1/+3e/IcbIH//oR3TO0fqOlFQolZdE6CLbQeUnc1voi8WPTJOguqE36EYenOYsirk0itemrGbEQhy02ZwOR8ZhOPMBLSd83wsCK1kQGzI3nSa5+A+HA1vLnjodH3ChEOIlMZhCqGXu777mX/yX/xF//a/+c7p2T4j1rCSzSgfI1jdwhnnWDctiwAuUXIUBZ2HFkUDxynHS87auFAje+Gj9c659xEF8BBvZAuTlvKOhlNKIQjQbdvJvH62C9u/OTXP2VTFj3roYtqbNBs1VxbT56z8pJNZa1daKTrpVJ+5SMrmiyuOkbp3gw1k9hVNR1eq1UO/H40YllGk1QD7KYD1A7AhufT165euFkifk98utVml1sI1DEmUbZez7vGu0tpYkqYmPlVdtDaotyrZ9aYIMnDOdnD9360jwYteyFmq1kNDzpqZ/JZS2KSUXTYZpKeL+sid2G969+8C33x24eXqBC47BeaXQ1ipe1Dl8SzwcPuDCDcOuIzjH0aTdrQGpkLuGM1VZcBb9D+q0cI7s5eMoKYteiEElF61BFzqduJpjmWe7YJ6+k1los98R3/UspxPu5vpMrI3DSMnCz7quo1qpzp/9g39INIVGprFMM66LGnFiZze4Ln4XO6iL6XMtqC46vve970kSuyRddCp5Sez3l3x4944Q4NnNE1xwdDGetb3b7ZbTcSalt6ztcZtxVPZQEZnchw6l3arzYYgDofOkeeLtm7f0m9E0xAu7zY6n109YlsRxOvH05oaK4jFSWnDA0neWFRTNiS5T4Zpw29HjcOSS+HB3z1e/+x0/+MEf8fLlC05LMmgCxm7Dh7fvuD8ceP7yBall0pLY7bbqetaRjtoUgR36nvv7Oy4vr/BVrXfv7+40Edgc/dlnL9ludmd9teIw3OP/mjprzfqhNi7GLX4fCDhSFqHlvbwGq0piLaEZdhuN9V4Q4TwvbLcb+lFqpu24oR9Hpmmm1cwyT4xdj+sHEWsx6zWVylyUy/TlN1/jveOLL76QcTFX5tOJcbTNzM6MKWVySnLZB09yjnmeyVWBj47KlGdiq4pG6fdSfAVHh1RmIQa+9/ln+BCYpiNdABi1OdRMLplXX/2Mf/af/R/57qt/yeiQvNitwoVqRGg6l0x9fAJ3DXIpzDXRUiKlSk2qlgSU6XQ2w3jWxNMmLAnv1Pq3/tCVL9FippoecV6Cl3TAdzi85UWte4L+zLYT22Z+PwZj3X70esQxNltI1yCPNSm3mehFZV5p3V5Q+quj1UBeJOhYFnETu602CEFIFgYprOnRUOoe01/9yqD5x/fgvXgp+Q00CYk7seW7NVroznutO28UgXMBWC1Kea2rF0N+r9LkR6m1EGMkhFX9Z8/N2TPyER6F+AmpDm0T81J4tbVQCmdlTjaEUY270J4dQ8+0TJqbmg5vrdq04xrPXz7h7j5yf39gdzFS3UAMjyS5J5LcTM0nrq83uF6qzD52dp85ZfL5Ad95XOspKQsCq4Ioa62QG9MyMU0TN0+fEEspUiPUBtFRlsS8KKp77AZccByPJz7c33N5fcFPfvxjwSFFXIEiDxwER/DS6+ecGIeBofcsJXO6P9KPPa0W0nRizoXL3Y7mHdOScW7Gh8i4MVK0idDzBIbYkZuXCzB6ysPM4XRiGEaeXd+Qa2aaF+H+Fx0djhqtgrNlbj88cLm/kE64i3K0Tgv0Pb4LpJoZxw3JiSwupZIruKjFkRj4ky9+zL/9D/8RtVbujgdKyXx4f8vlzRUepxBDIJ9mM8GtfgCdeD/cfeBit5dPwSla/f7+nl/+8teUUnny5AneTmwBx8PDA947Xj57Sq7qtf7s0xecZiWLtqITTEqFUHRKubm6ZrvZMKVEt92wr5mHhwM5ZTbjwGYYoRZOS2LoBlxzgsy8Z9Or+4DWxAEFJ0NOUtHNh/t7LnZ7HCK/N5tRXgJDkJuD+4cHhtgxxMDD4YEKlNxDgSfXN8btJPoYqNWBKbxSKaTpiN9spYaLeh2xC3zvs8/OCcRYLIi4BCmfasoUmxQ678klkZMeqnHc0nWKUzgeT1zur2hu5WYyuRb1a/eNzmsqrLGD0M6Bb9XgnWV6y89/+l/wX/2z/4jp7hvGwZ0POCuk0pypVRDvIMOc6f9Bv7dm8iJuqOUseNIjGaxdR00Pq5FL/72atHNlDkTur4SrDGRr46LHtPfVYi2cLUQGWapBpknibguz9qF6JjmFRdu/RfdsaFrIq3Tb5/WxUCktQWuU5Ggt6PltiuDIVc/TnBPT1IgRnYScelK8l1+n0SzuY0V01BXdzsY6SfI+HoC898Q145uKgi3QL3DFpr/HIEOnUcTI50eXdrMpeQ3Xo+kzWtKRcdjg3EYqp3UzCIrgWTkb52QqXIl1cUmPoYofcz/UTDbCfRUENIOoXJCoIWUdFGpdi5QKKU1cX++I0XP37lbRI5cBv6ZJOyEsvlVynoQkNEwhGGitKLbeq7itLIlpmnjz9i0vX7wg9lF1BU5KzY0VvrnWiK4qpG5pCZZJH4xDjSJOb8LFANGMSd7jalFb1UcXuo89yzwzpVnqkugpWSTI/vKCkhM1OKJzVLcQu47b+3upOwxjO+bEZjOCh+molNBV9+6DNUo1ja/H45HL6yvCIjI05yzoYmOkUq08uXpC/7yzMDz1T6t6FOWth0YsOom1AOTCsmT6PqoE3CSVhyKVRmrKMgk+8Pb2HdftWgm6ueg1dk4mLxesrU/kcVoWdUJ4z4cPH3DOcXF5wT/+n/xjLq8uqUVPRuc9x3niyy+/lKIsOOLxQHdxIax/Fi4dhoHjdCTGyHYz8O7DB6ITjBVjpI+R8clTrq9vyLkwDgNdCNze34tPALIZqHIpIt2jPCq3H24ppfDixUuVEJmzutrCsB23eBdwVMsW8nodwyioKUYuL68IIXA8HhVY2HVn6al3jnd3d4xdz7gZ2fiRZa1YLYUY1+YwXcdhGEjzQvKePka6XgqvMlsHMHKLl1rofK/Jykk51VpgHHs2mx0tOOb5xN39PS+ePsVVR8oL8+TZXu5sk4F2KqRxxp8GdpvEh+Ov+Vf//P/OT//q/4HnxGbTYai2JM+mUPH+3wzL0+dfmr6nlar2r1Qe2+WcN27AGYKv4DitleuSZFWdRkA6myDBup8fJUHasNH0qD9+hEbWqgqq1yLvNblIQt4Ehdg7+5i/cObxEF1S8ZYAfZbA1nz+7BQqGHBVRPdSM3V2tOqZF0gzbMZAINCtP9/UZnwMJdl7becNQ1HzKze0/pk8VOAMxrMBW74U14xDeAz/a8HgUJO9eudE9K8bolPrYyo6AMQwUDKUIBo3eOuyWCFBO9i1skJRj0geDYV72kEKo0cSjWi7XPXrXuvsk24MfWQp4viiEwGuCpBGC4F37++4P9zxxW7PcSn0faVYKGdxmYrndPog74v33N/dcXWxx8VAm5XoPZfMPM3s9nuWtPBX//qvuLl5xg9/8H2GfqC5dk6RTikTq6uELlKXia/fvOXp5TXvPrzn2dW1dmrvcV3Hy2cv5KwtmeI8RHSSK5mhH6CJX7h9955C5erqiutLKU8ELVXKvND3UbBPc2wHkcubcSTVTF4WHu4z42YgpcS4dge4RDksbHcbvbGSVKZDg+jpXORk46AKcjwPhyMXFxdsN6Me3aqu1xDlngahl7GXkqcHUmmE4NhsVI6e5pn3tx+00I6VEKPOK13g0xefsOqvt2Y4U3GQZ1lmciq0oeGLZ39xYc+xorJTrey6nqdPnihvCs/cEktJbIaBL/7kj4neM51OHJeJ97/7HTh4+fwFsZf7eNsp1iQlTRql5PNCLOerTspD3xO9Vw+GYb4hBnWPV5UV5dZIVVPD05sbLdbekQoMMTJeXnGaF2Vrdb1FfuiaOgeuiIz87u0bPnn2HGIkpcRsNa50krJ4L4JSS5mKnloUIbkuhIoXyYwmu42GmcZudU4r+qE1wTAxdnz9zTdMy8zTp8948+Y185Lou8g4jtzcXGuUz5mH48lCGVUtGUIkRihzZqmFrlS8C2z9nofpNe+/++/5+U//M969+RXb2IHvBS/Z3fOYlpypVRCJ/h/BGg3lFNVcxZ8kpRBQHxf+ihZr98ixnjcaj5M0uHEO4JNi57zu2eBROa/kWk7OcNO62dRV/+TWCA/D7Bv8m018HizNQH+u9TmwJh2vf36GvOBcJ6BfGwTRpUStHSl7jie9/th7OoM4m1NYjfedNhfjFVw1NdCZiFmJFE026xS3dlKv+L91ztGM4wDEkfAoa3UuEL2TFNmvZkGZ5yqauIIVEYFk+7UVfNGftTMf1IzTKLhVqGJXu9kmXlqw92lchqtyngddc2d/bxeTWhtdjIx9x7QUaoBQKrPda745psPCcoQPDye6YWDZZvnCnGAwaqCxiD904oBP80LLRcVhwVOWxLCV/+v68hJwXF9eakI3BRtZh/HD4UCsteEo6lgYR5ZlZn+x10Jq8bz3t/ekmrm6upK+2AsTXGVSPgTmnLjYXRC948PdHXd392z6kb7vNco1p1iOSWa05lZMXCNly3pAnf3sqxuRpf1mlAu2GX4ZPBe7K3Kt3N8fiFFGqaurK90ozhODU/phqSwps91s5U5u0KrC81YNd0pJBHLfkXIhuo5pWnAOPtzd8pvf/JbLqz3D8OLczZBzYU4ijlfCr5mt/nA6qS8irieLtcYQliXh/VqB6s5dEUvJ1JxxtsENXY8Lnutxw3A88PbNG7756hu248izZy84TifBC5ad5L0nuIHNMFhfR2FJC7f3d6R54WK7Y7fbE30hLct5DG45U6Pys07zzHbcEHl0cCvtMhC916bhYFomaJ1OwTwWsfigzZ8Q1NGA5+n1FalKtbGugK1ULveXwj69PztpfZPD3TV4uLvF39zQOW+hk5GUs/4zJRG9Q8/t+/fsLy/IWflLy+lEiJHvPX3OOA4KP1xmTvPEbrdnvxkVS9AETYDMm6lm3rx/z6bfcNEH3PE1X33zr3jz6hc03nIxaNHymJLETs/VFknJXiGXCuvJuwlqqTmz5AWSIxelzPJRBLdfDaofkcZr0U49cwa20LfHxNaPw/xWZdPHX+2j/7t+VVeVeKAfTvMf/301N4I7g//NFtW1n8GbePPjxbe1onKbFYIyx3wzbiu3xnTKLCfYbKAL4ELFR5sKTKno3DrHPG5YWkD9710bWH0L4m88SqmtGH/isWtaTaa6OuVk2KvrdbecNJyzJjp33oucdwSD2gKdNh4yj/0b9hM/bg9sBu+xmvYAb+vj2YuhEa2pQYrVD7KGu9IqqeqZHrqOaZlpsaNvj5/vzdOXfPf6VzzcLjx7vmZ8Ddq4DP6SfyyRyYzjCEBCSdQheG7rBHNmcY6LqyuuDQ7uwgoRFgiecdxonaJUCYNNax62I/v9Ba5W6qQuge1u5P50IufEh9s7LrY7thc7XGsMndrdnPVbb7c7QlCZjYsBbymnc1mAQrFav846s5ciBj/GQOyUtd6aHt7jdKJmuTcvLy+JIZJyYbvZsCwLxQcKmZIyh3rENdhtt3TjgK+eeZrYDaPKRmi46uh6i/0l4F2xm9+pPCl68tI43N0z7LbUXOg3A7vtXlHerbIsKvzp+55lWeQdMF1xa41vvvmO3W7L9eWV+hqcl8nMyFjn1MmwGQac85xOR07HI/srdSM753i4vwPnuLy+otuMvHj6nBfPXnB5fY0L9mAlmW5c1517IkDXcz6dmIquS6tVJ+9VEdQPdrpRl8KA5IDQCN7xcDhwOp24vrzWg+48uSlW/KzeCNLiNx9tdNck8OLZc1aDkO+k9MpLxo0KJFQFra5fCHoQ1q4QwbWSyV5cXdNyIQU5k+eUePXqFcPY8+z6KaEXrHScTvR9z/WTpzyLgRg7nojDVFgkTfdQLVqsDUdZSsK5qIehSiZ8c3ONL5l3b37J3Zu/5HD6DV0cGcKO4ArN6f5VzaYRrK0ZxAK0tZc9ID+Bgu1qzfjiOOZF6hE4R4U0I6u1BTWQeh7WvoPWzozrquZcw+LOtb6tnmGOx2W0nRelsxmsOZ2uDeapreKr12K/ThIecAHn6nlDEqmtja36urJQrJlKtWrCXDONSpZCUftlpCWYjhKddEM4K3uUuOrsNcpHtW5Bzek6rDHk5/eyEvqrOaMZtNQ8zjec61ldBx6oFl+zrrFSaT2qiUqrdL43L0YDU7utVbDydzVWVdfHIYGrXHldvlutEFYfioFMDdQTsfJIxlGdpxq9idUAX01NitOkpYQA8L2nNY9rnjnNfP3VOza7S7xT3a9b1WxBYS3USs4zqc68f/+BpzdP8DimNLOLOy42WwpN6iUfmJeFruv47rtXtNa4vrpmGDX9bzYb4qYfrZIz6EZyOk20Cr7r5IkII88GufDGrYxo02ni9ds3PL2+Zr+/pEZHzY1KttJ5mb0O80K/GTmdJjZDj4/SXS/LgvOevu9YzHMghl83zjwtuOYZR0laCZHUKl99/RXXN9c8vblhTonRWsTmaWYuCSbHdd8ZoaQ0VtBrI2Y8/TmHKeUkojl4SoPQO0qZ2O62+BjZP7nm86Y00WQPYwiezaiqymWRLyGVwvv372lNSaQAx+OR0AX64FnzYHzniCmYv6BRmjrAt9utTrQ58bAc+fXv/o7OR35o8tXffvUln3/+OdE53r//wNt376Q6INJK4TjNXOx3rD3S4ziy855lGAmxJ3hHqlKzdF2QFrsUhhBYSmHsBwKR02nidDqdYZRoBSSl6QDRuyjCDi89vBXa96FnhV1a5XwwyGRCHyTttc0qxgBE+RicFCApJY7HI4fTUVPk/lIn06JDRd8F/ujz70nS6zyuiuv67LPPaKUyTxO+deTamJokh11UvP1mM/JwOJBrZqSTiidX4gg1JVKZCL7h8sK7b3/B3e2v8PWOy3GjiG+STp8tyGRFMX17ZpVbevNs2fpLRX6iUhMlFUquOHv4tczaybKtMNBaLuXB23SCF7/A2kXtbfN5RGE+7nFYvxyaMhQSqJ3FE6jOnrF1oUJkOqzmOG8lV9VygNYsJ+z1riS9tVSuG0t10Kri1WvV55BFYqcF5mNjPjW6jVNIZZ2Vf+TDmbxdX/kKKa2Dw+MGocMVDZvK7bpHO3ysvd2Ns+LSh2DKIC9TJCJkS230vkGrRO9t8tMz0Vh322bXW5vf+nPld1kJ50cPjOeRGqr2mcT1Qyr1vMnoP0So6/PVxnH+DPQX0KTcWkM5BQtDbZmLvdbkr373nh/80TO6fsR3ntD1dE1k91IXHg4H9tfPef7kKSlnfvbzn7PdDPzoiy/oh47OOQ5FcTDjoOij43Ti7vaOZy+eUam8efWKi4sLYquVLj725OZl4cPdLZvthiEKNglendGrXiN6z9SaFCT9+FE6KszTQm9dCl99+w3TNPHFD3/E1cWFKRMq0zTx/vaOzWbDTbhiiIFSVR4O4DvPaTqo48EFutgRnefu7pbD/T3guLm4onPC9ErK8ksMvbmxVaRjeX5454idp5bCnBL9MMj16rx4BgehVhJQFym2Dqcj0+HEs+dPSSlzOGlSiTHSDQMPt3fc3d/x7NkL6ehz4TjP7Lcbdrs9y7LQ+UDXB0qxG7I2WvR05xuxMfQDMTideEshOMf15RVd1ymqpDV+/KMv2Ox3LCaX3G5HNsNAHzpyTvhO1w8nn0vsO4vyFpxTajPvg2SXu3Hk/uEeHyMXu50y/nOh73uiu3w0EIGgxax7I7pIagkCBCJLW+icV39IySrq8ZG+QUoLx6NSY7fbHeMwkpeJt/dvBTECF1WS1FUddn1xRXXqBXa1Kv6dImG9V35VCJEW5G6/v7vn9bu3PBwOPL254fmTJ1oYgk0/DrZjR0ueutyxzB+oNXNCJ/plmQgxUOotKX1HOr1l8MaduIhzTWfL30tHrUTnyA5aW53L/iN+QMq/XBeWeaHmel5QzapkC9+6/Bqc4s5bx3nh11ptKqoiLbymlxVMstPqeQIRpr5+eqv6DKcp2jnrZ7YXe053WtU/6+9eV2kHrikOP7iOVvPvbVC5QSuzvrVWWlWUSUXx8SkVDkfVYm63vcxfUfDl4+bwuIiu3Hp0TjETPH6dX9IZVZPQw/sIZMB/tEFYzSpa3H0QJyoPRiaFIPexd49eFtsg3EcbjcNbwdc6FWja1qLuWROz6jmzCdYEZ4eHZgjFSsSbms2tv2jdjJoZ7Jx501yleS/+0YEvDZoOZpvdwGefv+TXf/0tX/72G548+yOGUFi3Tuc9rnj6TtC8c57f/PpXvHvzhosvfqApzvwe6ppvuKhp4vriihcvntOFjlQSh2kipYXoTLZXUBCbozJNJ7WvpZmBwn6rYDiPp/eRUrT7aBHXAjKlif0w2klR1vebqyumzQ7n4HQ8MOeF68sr+r7n+dMndJ11054DAO2kUB37/YUgK2SDTynxcDxydXPDzfU1Pj4Gwq0uRfCE2HN3e08umefPnj7igA7VbYaAqxoY11O1b4VTUigdHvpxoLTCvHTnrJNtP5DmRHGwTIkYOrb7C1JKlOy4uLzkyuJCWq1K1o2eZUnE2FTFGqNurqabcogdqRYe7o4MfU/oBeN8/7PvUaicDke6YWA7bpimEw64vrzk5uoKUPNajJHYdKDztqC8+/CBZZnpQieXsqW3huDpY2CaZR7bDMNHp0rFrrg+CiC1WsXYoAVn4WSN0Pd0IZBrYr/fMc8L1DVCRGa3uemB887he23cqlxUCUrK6mr46puvcc7xky/+hDlYm58PLGWRy3jJhBB1GveCB0IAykJugW++/YbTMrEZBq6uLvBdoCXBnr7O3N2/ZXp4y+n4nrI8ME8PTKeTOiTKzMXFyNMXN0SfwGX6aFCesxOt5W0p7Keas9UmpnWxtngM7yTLzUtiyUfSnMllgdWU5aCaKuzjFc+Lfz9DRFqhsp3mHw+Xq7SysZLPTuMLa2R4MRmuAxdtET6LPM9uYjCdvoE3lYInqOKzVSOT19gHg8TcI5ovHD0L3sgLqUjuCupPLk2Cp5IDpylxmKEfYOiCuAiB9bZISt5abcFe845YobXWOMd3u2D3arVpq5hL2HGOqPAKgnTF+AyPamzPRjhB652PhKADgHNeB0a7TtVSZF2112ib6xoJIsJa6b1uVSdVR4sKRPRNzyF+nfoMKrKd21uwZa1FELHD7rcVXuy0gFticCkN74JqV2NP31c2Q89ksT6+OVLLSsuyylpv6rshDBzLxA//6I/48Y+/YDtuhPI0WMw4B035ad3A4hWXDzpUvnz2gn4ciGsy5brLxS7y/MULqYesR1XNZZXemthOp6MW2dCR60IcekpamHF0w8D9wz3D3LPb7wlh5nQ8SRobohaBrIKeGIJFJmOR2RDiCpoK81XGU6RVlQttt1tijIr16HphnV1HWhI0ePPmNcuS2I6jiJouWmiaFC3ROfABbzwIqHrUFT2t2+2eeZmY54XNuCXVQimZmhJ3D3fEvlM0RucZ6KitMueFQGAYlQB6mmeGfmBZZn79m98QfGDcDHzy4iXj2JFq43gUtLIbN3pALKKjD5HkMsEFmRRL1Um7YdEVGCGrbu/tZmv9HJUPD/eqI3We3WZLsJ6PmgUrKMxO2Oxm3FCd1BvROULfc5pO9F2nk3vWA5z9CgtpgfAOckq8/fCBnBMvnz0nlQy+w/vIskzkKbPb79iGvYIYaczzTAyBi91eMS443rx7y/t37/nTP/1TXJaEtzmoJlSIQRNuMTmnD3pvPsrAGDvH1nc8e/KEYRxxtZDqxPH9G27ffMvp+IFyvGOZPuBqZj594DidCF3j5tk1z57s6eNJ5zmvxVcJnxYAaJNtrWqO08UrlJpskW/6NzmRqSxTYskLpRSWMoscdmsGDo8TicEUsJ7m3Rn/12KxrlnrKVc8x6M01FnK6wrT2nSyZgOtmLjBGK1hDoJ6nngefzesU+3acLZ+tbNuNpxPqqubugKlqlLXGAb9eYWUHXOqPBz08ze7gRaUy+VaxbtVpbR2Vj9uhuqQKP9fsv7sx5YsS/PDfmvvbWZn8PEOcWPKGHKsLKq70Wo1oRZfBAh6k/TfSfoTBAigIFEPpACKhChoeBHZBAtks6u6snLOjIyIO7j7OWa2h6WHtbYdj6J3Z+XNe939nGO2bQ3f+r5vse2TwJKwBEWfjU5aaxAHQjDDSBUImszWJ7RLVwEXRXXv4oAYJ7QaEdyuq3c0rVNsm2NIuhVSmy6IsP2v7r2EXFKy3SpzozAtmUHOtubWZi2hZ36HBduFo3zpUsT4Wt04tbj0YL+z9QhrtlWyXUdiI5RIwzr5XFbefvcWgPuXd7YMrgXWkinLgiTbE38+zexeH7i/vbPP447e4zDZcwg2dA7RXmjJlZDMYGpZVqZpQFVIAaPF1Wr21MEq9DTuKGqrNWOM1DXz4cMD4zjw29//ge/fvuXLL7/ko1cvELHhyXw6E66MilhcAVxrYxp3foGUc6189/1faEX55OOPScPA9fXVtjO7NWWujd1uzzLPPD4+cnV9zbpkijZu7+4YhoH1PBOPB8Jk/PrcGpPZvzvTxB6y3W6HVrNpjkF4enhAh8iyLAzRbIc/vHvP9f0N0+hU19QgK1SlBfM6iRXyvG6zi/sXd8zzwou7O46HAxoCQ7FMPU4DaRgYFpstDCGQW2F5OnM8XiExMjieHwb35i+Zp/lsPkkxMU12qIydYPDgYb+3davB/JHmcqavSCwl+6C+oaUP1IwFNK8r427PmCJZHYuvRkWswYzYQgg8PJ34r//1v+Y8n/kP/tW/4u7uzjtRowEPw0j1Z7LUSq2++jRGWrbuQFX56MUrDvsDZc2U1nj68IHj1ZWraQsFJQUYUyJXw7xzNgdRQuDl3QtqVFIcWB4/8PT2z3z79jfUhyfWxwfK+khbzyzzA7SVU/6eF28+5uuvv2B3HGghI1JMQCT2lKu72ErH+9XsyiVE3/lQTKUr5l1FyaxNaWVlWVZytSQhagFCPJGYSZ85iHb6pDGILGBaoFQ3ffSLpzwbINvfC1YQ9O5ZPHFB9C5AvHK9YPcAEtQZNJ6ONtprNeNIT2Di32LzkM44Kp4g2nZN2vb8+J7xqhSEmgOlwMPjSllgtw/shkQKikh1XzN//jy5BQl9MSodhJHtw/mMojXEZ109tUVxzYdVTm6AaYXfNtuWvg7UflcUg6OsMGjU5pRZog25xWdGzuz07GDdjYg32eL7azwDi7te92Qi0afXBZGE7biwFLhRd9Wup9ix2L4sWermqtAV2nad7Jxe315zfZUISX0hGHQFvY/lUVUeHh4pzbz5ogTKvPA0z8ynk8WPEHn58jW7w8G2Yvo9qK1QxT+4QCq+vnS/M+/8mOvWLaxr5vD6NRXl4emBw+GK68OBZZlppRG9vsq5kILtnGgoH7/+iH/3D3/Hd99+4OOPPuLGubhVbXVh9xwqa6a1xjfffktei1WE08T+cGBKiVoaMZllR87ZrBha5f3DB5bFXEF38QAtcnNzwzhNfP755wzjYGso14XbqyNdTp82JTae+QObykgCBau4Y0zc3d3ycD7z9PDI4bAn15XPPv0YSYNVi6jNGWIgx8A0jgwxseSVNCT3qIL7mzvkBvbXV/a6tTHnlSBm0SsixDEyxEhI0f2zLGBQG1mtio9etUgIJAnEw5H9ONr59SfbLEKyDyyFNa8+kygMXn0HsWpTqlXqTRtzziDC9fHImAya63zw7h4ZNGwsjRiFn//VLzg9PTLudrbCsthgNueFwcV75/PZBH4hAAlqNWFma5xPJ47HIzc3N2Z18vTIU145+gd6PC88Przj5uaWu7tbQDnnjLTGOCRCUPZXO87rifnxW9796Te8/+a3MD9QS6XmCmSeHr8nrx84XF3x4x//kjefvSENStXFnzyH6LWawZE4pKE9wFZHRmxfiQUQSyaiSm6NvK7kbIujWu3D6D5kxSiPHevekoO9zmaNoaAantW7/cFXf3TDZVAtgGaeL0Kw39gwBzt9FuCf4fgunLMEUB2osnW3W9cgEROqmX+R/Ybi8BYGUClosWG1BXDbnqE5Uorw9JQ5nxoS4PqwIw1ClGJupVIRGbz38IG8P4Z2602YSAs+tvdNlP5nu2o255DYldJmNYIz/FSscNOmxL6K0xOEOcYq2goaIuKvgkNqkT4fsQSuChrMKVi2QO1IlCeoJu5YgT+sdGAwEMTZfoAEq07Vk3UnDOAQ2/PVwtCH4G7WiBESQoL7+z2ffn5v++iXlfGw8ybTeq+iQisrSOb2+obdYceQIqXYDnuJiZAMqQlRKDlDU4Zp9FlTcz2RNwK/+vWv+fLLLxAJ5GWxQCqBOo5cH67ozpz7aU/OC2UZWD24H3YJWiUvM3G3N6xOAsNxz6effs4Xn0WmwxFaQdXocXFIDNNokIKbxF0dDoSbxDgYLVQ9GO73O/KaeXh8IMaIJNvDPA0D+3Fid9ijpTL6/mUwymSnYQ6DJa7etIeQPAb0R9GfHgGRSBKlRpC1ME57xla5f/UCLRUpzgOPgcBgw7WmpDRyjIP/Ntuit7ueaLnyxz//Ga2FF68+Qkv1G+1GilHoezaGaIkniVHO4mAeUMs88937t5R15eM3H3M4mJXFuD9wOOygKadl2eYtWowdXWsXeglJAkzGlQ4VXwtaCJ3SqsLV8WgNtPvVSGcRqRIjSBMf5p8ZgjEpfvTp5zw8PbqhorFLJBcez2fkfOL6+oaUIqKBvNhhJiUO04SGQBjGrYZW10wcdpPNLjQwxsh5WXn6w++ZBrMI0JKNJFFWxt1AXh54+svvePvd7zh//x2sK+W8Ms8nCySc2R3hq5/9nFcf3cEYED0DwQLGNuy8QAq4C+m2T6Ea0KzN3I0bxWzNqzkm53UhrwulNFQH+kY5CzO2oay5pUT0eVWvOC20X/5kEIsPoJ9BIGaa5wZ4W6kd2fAXP3u21tR/LHZ4yxwFbObgr9h8t3JoCMk/vTp8ZjsILOboD3fyCGgVaBhbsInn0wBro7bAkitPs80mbo6BNDVCcBMXh1mEjMiAOPEEhdIfxT738QQqPeh6p6DRWU6tMQymq+ioBgTv1ALEhtZCIF2gG7dqrTV7oRocR7UEGQkXerInn36rOrmgu+r2NRb93wxWSiBGaG5EUooXiMrps922pO/iNmJNeHYSnpMHLiCk/VMgJeXqOvHFjz/i13/3Kx6eTty8vKJvw7MGVKkKb958zOl0pubC6q9wOBwIKRLjHcX3+KzzzM3NDafHJ3I1cS5DMGX2eSYhwjCMNsQy4BlE2Y87Ss62O1mUthYeHh4Y7xPJ6Z/VK8v9/kgcjA+cxDjqt9c3hBBdS2DeIYIwzzOlVm6vrreW6u7FPVTlPC/GCa72uq/uX/DN99+aAvzVS1P8TonD1dGdHDue6SxzhVoqU9xZXdUCuaykmHh6OCER7raht98ANdV0wLjspRQ3Gyz0hSJVK7urAyUXTucnvv/+LTf3d7YD2o6/DcWxqqPlwlorjw8P7I8H23WAHQYJYm6wC7ab2g/i6fGRh8cHPv/0M0YnAJRx4P76htm3rdkAVIyNpgYZjcNggEAutAGGOmyHV0RdhOb1YJDLWsY4QbOBMLXweDpBDAzj6PO+wmmeOe5ciIgyxsBSClMQZp+JkBLT4CwJEdb5TC5Gqx0OBzMYG0dKa9ZZxOhKfnv6Ht6/Z9rvuNrvqbWQmwsnY+SzN584j185n94xpAnNmSXPPD4+8Pjhjzz9+bc8ff+O9XymnGcLiqExXUVefvyG169vGA/JB6ArcRBi7PRFd+70oNhdP2nVsXGMhw+gtiO5VaGU7CaGM3mtbiUfaG31CtGqcEEvMIT9smcJwjqHDv10Rsym5O1xwoN/r20vgrPID1eqWReh7hjbfP+EOJDTSqGpaRhMTW0UUWLxX3BhGIlrJUCJNJoYQCkN59dbsyK+YCc3pbaBvAqncyYvyjDA/mrHMChCQaRtUBAkmiuuG4rtorYOMzjsF1ov7fzjhQ6fOccyFFfO969IuEhMNpGmhWu7lr7sgMaIqtGCTafiw2TYPNSCGFMr8D/86jCUQfQ+cwreE1rrgUS8Q4MQ+gIrh8I89tj99RZKt3y0zYpauySKS0JqSAp8/PKaXfqaq6ujzVufJzM1gaGIINHWwi7LYq7VITLqYHC0mKnn9+/e8/7hgfvbO8ZhMDIQBlMFEdLXX3zhLYhtT7OdrzA4yyfnzLfffsc33/7ZlsDc3zMMibfv3vH4cOLTj99sSr0+QCq5gMA4RJYlsy4zwzggEfJiBn3HaW9/h2xLfUS6IEm3taD3N3e0G1PkdmFQzRViI8fGbtwhwGmeGaMJ+yqrZfTdSK0TuzFxOj3x+PTIMIxcXV3Zg6S+HxcbyJeymoTezbxKK2gpLGVl1IRE8zi6vb3ZNs01FVu/KcLpfKaUwvH6mn2LfPHll4w+HAohoCGQ15mUbDE5tbEsmTgM7Hbm2FhUqcvZJPohMt3dcaPK+4cHBg+4Wqqxr5YVWiONA03cb380umgQIau5OY4tcTgcKPNMTImb6xueTo8svn9i1cZ5Xbi+vqLRyIt54O/3E1NKmwNmqcV2MHuA3497m5UI0AolF+7uX7DmlWmcHMe2c56CcHN1ZfW1499vP7zj7//27/j8ix/x2SefGfOs6wmKucW2ekZz5f27P1PKSssrpcwspwee3n3L04d36Hlhnh8p+cTxas8nP/qcjz95zf4q0iQTBhvCJzFx5wUewNW2ug2VK926sF7mBm6e121B5vNMa6ZeL81ghdaw8bL65gPFgrlEEr45zIeSfQgtcgkUqoEgzZOLoKwWOLVjzfZlJ7N3EdETT7PPhLu5diUwSt/FrlQnEHmwEnznOxv2j7odu7ZNjdwkbL5IZiqZoYmJYqsaM6gk1po4PVXyIsRROR4SuyEgwYfxEp09iXfuwbH6/rrYkpxm57iksC3lajS3DdceRSEFugJaCTasF4ih+mA/IHEiOTRocyzzm0tBqD5T6DBb14JYQHeHYQku7JNL8qEn825vgqfuy0KnzjrbpIcVCPYeu6Or/aSbCqp/k3c7Nlfy2UcTaJVIomCD7ygwHSP3suN4vQeH70Sqa2qEJRfmZaa0xjgmphQpsrLkwocPH7i7vTV/tMF2Bz09GV09DCYfKKqkGNgddyS3NLdqKiVatgMRp8mtHWz48frFa17ev4Ag5HnleH1FW0yMYbbNxURUYO6qy8qiC6fzieU8c7+/47g7MA0TN9e2qhP1QKzG395Nxr5ptREwXUNKiWEY+Mt33/L0dOKrL74wtgBCK42Zhf00kUKgqA3VT/OJ47QjpZF1zVwd4HhjTrDLeeF0euLu7o7dbk9tkNeZeV4ZBluXavbstm8AEfMRKtmgEW1MoyXPECODD7EfTo+cn54IKXHEdhwk78w6xtgH7q1kW5aDDZTEh3KvXr0iRTMHrNpY54VdjLQY2E+TezI1JAaePjyx209E3/yniCmnVRmDrWUdNHKulXenJ6veo89QWiXFgb88fc8f//xHPv74U+5ubrYg2FrlkAznXKtvQiuVYbejewhINIX8PJ8dAhxZfS5xe2P2LX3bVZeUWtXl0E4KhBSIk+3W7RWZKeNnnh6+pS4n5uUd6Mp6ntG6ksuZOi98982fyOeCaOF0+iPHfeDLn/2ITz77hGE3kEKDYF5cwY0lJUQb2HvnELCg1AOw2ceY3Ulr2dAXOpzQyOvCmhutrazuw9S675IHfwsmzR/+tAUg8QRwqQgtKDUXW1nC6DXmJSFsugo1nyD1AI8noWqFN7EPv6Vj++pzBJuv9MmGwTX2fDnaaAGyl7FO9ezRuA9SmwitVbRmWrU9yU0rtQbWGljmxjJn0rAj6cLNYY9EH6L2qrYXxb2r0MvuBav0TbRq+akP+H0OtF0Te3tRhBQTEuqW6XoQVnUG0ejwXMQU2ZgFh7ovlXpXpQ7TiQbni3XFt3WBF5HdRhh/VvXbawZ/c+bnlnx+ZG4RDfNpatp1Gb2T0Ge/BZulbE3DxerHdvtgq56bcavGYWCVBRBDI5oldVGcYm4kiHVeiG7YV1B248h+TGgIlGWhrIW7mxuO+wMPDw+c5hOvX77i7MLW69tbEtVWkTYErStzXs0UrzVSjNzd3fHRq9dEx9BPjyfenh4Yx4nj9ZFSqu1OwAYsXWz3sC6UcuLF/R3hpWXm3FxyHsUpr6bNKCWT19U8lZpXms1siDW7gC1Gbq6utoMiQYgaWc6L8e9jsgWGrZFiIo07FGyukTNRbH3lH/70R/7h17/mF7/4BT/9+S8AZS6Ntham45UzNcwtMShMcWDOZztctbAs5mo6xGADoOArWHNmN+2Q0LURyRblxADeSZQ1k9JAnldqjEwu/ltKQVR59+EDL5yVxbry9sMHXtwHhpb4/t1bcsmMw8TVfs+4n7agYDYZgDbO64rEQFTDvw+HA4+PD/z2d7/jzUev0dlENNP+wJ//9Ge++ebPxnDY7RCFwzRSovk5hRBYztmgPd95/fD4gd204zAMtFw5z2emGBnG0WYQYuJGW/conJ5sp/i42xk8J5b8Y4gMYeTNR28YB+Xp4RtSEk6Pb3l6ek9bZwuC60rNK09Pj6xrRtfK6fED3377Zw77yO3twJc//ZrXb+62HeTSg7Wz2CLRr5GJ9PrD3AfBl10D9nCKVoKqM8CaUVrXE+taaJIsqdVsnlXYAqbovHi1YQCthwKxdxTU/IEsGXYKpT34NiYTh6rE5gI2md38vyyIDDxf9AM/AJyM8dRprGoqiOYdgTisYbi7O8Vug/PLkNtimDGaLDk55VbsOhgZUZDme5JzYJ0Dp4eFNB4IQRnTzjeGLvbZDd2yzyEOE/ey3CK1XTvas3tgMKxoePb58C6rMoTBB+EuBAweJ591aOLJMW7rSj3Qu++TbmG/twgds6n0vRR9XvFc3wQdjrPXi6Ezo/zH6R1FnzMNFx1Y8P+j5lRrL2lkAdR811Bx0Z79HlvyZOmq+wHGEJmcCmu2JvZvVS703zQE4mLMx3meSdFIOxITWoyRmirktTCMievbG8qaWZaV79695Y+//wP//r/8l6SizVYG1sKyLIxjYhwn3wAWCME4t60lW0c5JHZpZBgnc1zdmfmalgohkO88HY4AAQAASURBVLUSBXbDxKnN3lYGNK82qB7s4HfxXjcC27mT6trsANviG2uPW2scr68NimrN4A7HAMf9ZPueWyXXamZ0MRnm7fBXzoWnpzPDlPjLt99yfX3jQjjD1Xf7PeO0I4RIyys1L8yPJ07LmWncMdBY5rOtjYzGJLc5DpS1UFvju/fvub26MtHcmri5umIcBx5PZ3LO3N3csD8eaaWwqG50Uo2BXRg3SKeBG2wZdyzngoymoQi+qnStlevdnu++/x5K5e7lC9u1sWZ2u2nDFdeijMPAmzcf8+e//IVlWZnzzG6YeDrPfPrpJ/x7v/wl0zj6PutAbZWcM8Nodu/TNFGrhbxWMof9gRTcaE+Em6Ntyaq1oJjtvL3Hwnk++c+qQXVDZF0Wx80D6/yBpGcmGXh6/wdaOVHagtbMvMzk00wpwrKunPIT5/fvmdeVQ6z85Gef8OqjK+5fjOzGZP5MtUG0+5JScAzcAvHmjhsgaLXK2Isa66ar213r9p/SDF4ri+0XyFWROm8dlymurSptzfZPmGLW4m4TvHLUC93TA7hiYigJ3ZW/r7Xs6cVgoUqHQwBM0Cfa5wgXLBqtHnjiNsA06lZnu/XAFz3QWmBhC1LPBqz9Bb06hcZaM7Vk+91NqBpY5sK8KPPZ5o43V3e8P/2J6XggsNCt4XGRbg/Q0rtRf4+C00B754YtY5LoC4X8PXXZoISApBFiNP1EU0+2fdbUnGQUCG5EKMEMNWMI5sbg9FnVZ5LDhk+FXETpicMugXefktwWvidVXwwUrE8RHOoKHUqUS+ILl2VS4veviSDNvreJxYVetfTZknqiKH4mjJNi4ljzYvTeQ3FbJbvGMQ2MByWIUnNmKYVWGvv9jqVW2mKeTZJsl4Rtizzz8u4Fn3/8Gfd3L1jmmbSfps3LaBztgSsuvkpiC0f++M03XB0PfPz6DZqVq+OVZ39/UJpRG0spJlIbJhjhJiWzRtbBlu0MNrQ8n2dEII7W3iWHqUpuBr+MgVZMtfxweuDh+0de3r2wZRnYwR3EfjYilFoZUjLLXNc+qEAcBupqe6zvXt5S18LPf/YL7l7espv21FYJwQbOrRTClNCYyK70HSRCrUQ3R1vWmdPjEy/uXjDEyJINKnv/8I7f/vo3yJdf8OL2zlekBnJtfPjwjt/99ne8ev2aVy8MThp3EzvfcdG0H4hASgNlXdlNk8N4pp/YT9fmcTUvPii2VZ/7cUL21m6uuZKXlaurIxoseYVge4UbcHt3y24aOeQ97z98IGng5cuXHHdmXY7brlQ1QaXzFwAxlkZTpv3BLFnWlTyvTNPOHummrncxd8+CVZ1DmkixOm02uIiuUOZCPj8S5USuD8yPGW0ry/pEXs0O5f37tzw+PSIhkeKIxJXjceGrH13x5vUt08759o7nJ995EiRuPjqXGtQ6G1sj2jwguQmcWJBptbp2wP5ctNJyIS/ZOhhVtFVKLcbVl7gFFHtAO24/PJc5bHDEha3i+0s8EPec0PlVW3XrAarbRnQmk6hX9+6e2v/ueU/RyIj2RTsJlWfWIeoWHpbSnr1Xh1XEo2XXI2ijNqUumVyMZaXAuhTKCuvcOM/Kx28+53R+x+3VnuCDag3NNRBtA2r6PKtvkNyCs5qmWzFr7S3P+eeXcLmG2hrTOGxFo6qaHbtfgih495MhBqvYfWgcggX5TlTtX+r0/OcCCyMeqNnbBzNJBGibPbj6bhW5PC8BarAlWPQuzt9Pc8PAoPZebPZjHlHVoBMbmgfrrGrtZoQNEbMWseG8JZk0jP5WTefR5ySCsRFDs1lubZVxHAg6MD+eUYX9NFKTGrTvsXy/m6yDEXvf97d3lFZIxXOiamNdVj48vOP66oZpZ9BPCpG7u1uG4F7jzVaLRh96NSDGxLosvP/wQBoCU0oXVkeprPVsgrvDARFhmAa02qKgIIE4mB3F6gN0nVc+PD7y5vVrnt4/sRtHG7plGyIZn9c8dHKzbkGC2F5jX2pvF1ZRCrVVUrpmN06M086qxFK83ROKCYA36mgcI6mZClcCnOcT33//PcO049adWHMt0CppTLy4u+Nf/ot/YTi7swvWXHh8eGCeFw6HI6enJ/affUoxUxu0BuJux/npxIcPD7x4+YJaC2/fv+PV/Qt+//vf8c2fv+FHP/6a128+4ni8ZllWrq6uSXHg/HhiHEYU2yZ3Op1I48i8mv1FLZVhSL5PW9iNZlqYc0YExt2Om9tb5vNsNttrtt0hADFS15WQgjvGKrkYNfLhPDOlya5zisZqakKKkRArmgtJlWm/ByLffvcXjseJMRUoJ85P73h6ek+thVqeKHlBGzy9f+Dh6R3z+cz59MESIjMv7m749PMrXtxeM4zGtxcqra0MrgoldN9+w6qrV4iIeWFZNRacFOFOmTTz9S/Z+Oihodlc+lortFyZzzNrydSaDd+n49YWwOoGhQxARjQ9q8ZlgwaaFro6t/YZgci2wzk0oZI9kHtVL6OrcG2wnnTY5h1NbJGROGzWgpnQOd7lyWu1hCSXYGwfwOCa/ufmdtv04SmeNNQMOwLQajaIjUBrgSUreV1Zy8DpVLm7/5QQKiIzw5hsS5t0x1bTHjSvpO0Cq8MuXk8HZW3VFNP09aBOgxXBZSYE31cdgWgyYLfGCM44EhfrBbe96B/H9ObJM5I0Ou7T88E2h+msNKRDXoa0xDhYEg2WCUS6geh28TA2VSBKMnZt6w63yew6sI7biW5my6GerCX6rhJxZlq/v/Z5pJmw1PaPWIK7DNItUaAJZaFpJYWRlAIjibdv31OGRApmrKmtoTESB4u9rVQjB4TAsDNBM61SSrG5sM0EjA0UMPXqEE0rYWq9xtX+QMNsn4varCKoUKibfT4KV4c9N8cjj+dHIoFht7eZxTRCsQckNqMFllKtBRxcsdkqa8lILRymPeM4cppnPv74I6IY7p+rtdmjDra+sjVTeodIq9YV2Ga2wYdOSoo7dtNIKZUwDb6y04JazbaTV6IJcTrem1KgRjt4TcwuYskrN3e31mo2Zc2rMZC0IUNiP5gWoaoxuKyjbhADX3/1lXPdbZCa1UzqInB+euLbP/2Jl/d3TPs9H08TIQSurm54Os+8uL2z2co4cXNzy/XxyMPTCYC76QYVePf9d/zmd7/jRz/6wjbEiTLuze4jOjsLoNXK3c0NpWTq6vCBQyx7T579EKpTay3P2+B2Gmwrm0SjXzZRlLyJXrXazoxxjH4tK7uxMcgD6+mBxw/vWM8nK0jymfN85v27tyzLyuPDI7lkQntidwWff/YRH338muMukpKSQnPIaCCJgpiyu1dSKr2zsJbdiZ+u/A6XAaBXSq0aTl+yD2P9+jQXx9VS7OFptneE5msge62/dYD4n5MHV/pju403pcNJ0nF52eYhFl+6ZK4nt2h4u4ohRs2oorHz4N0epQc32WChPj/oPUZ/B5e5g2wUWvv34NdHgBaqbZVzfUUEcl1pZSU7xXapmXmOzGvk8bEy7g+8eHHDt9/+iqurkSBWhFSHfSQkTI3RK2rsOhpd0d6FW9drNWW6Uh1XDx7iPen2QJ4c5WhGbyU4BCXFr0E16Fh8A6Wfh4TNCIMroKUniKZe0fd+yuFVae7masIyDZHuJRUirrOA7l9nrgK2ItcHKHZt5eLzFELaCgS7/34u+nwMddLDpo7YTP+6hsZmKgrexSR3sbbO1d4fQVjLSqswjW7jsWb+8u2fuL9/yfX9rWu38OVLdsatGIiEKLRqaEbKy8qf//xndvs99zc33F3tqT5waVUZNTJ3Zk+E8/nsxnw+KYpq9jXryvH62h1hhbAz+4y1ZO5uXqBnCzQlNB4eH5iXhevDkZAig9tUDzEyjiNxTNSHyvl85mq3p2hBVBiTWVLXWm2vggaiKqVkszsHwC2em5lj6eADqmbmcCpqjpUhEGLfkVDoDI+SV8PbFa6uD6wO27x6/coqUuzQ7aaJ7NveRJVWjPFDLQzTDtXG9e0t07p3zN8GUsM4UfTMu/fvoTZe3N1xfTyirdJWw+5LLlzd3XD38t4gpWa23V3FPA19JSvMpxN/+e47U1OrcjjuOZ1molqFv5TMw+Mj11fXhpsuCw+Pj8zzwuuP3zDnTMuZ/W5nFDtvW1stPDzYrvNhiEzTnloK5+XMbrcnSgRfNlNQq5pqBm3EENF6oixn8vlbHt6+pZUHlmVmnVfOpxNPeeH8OLM8PRBT5fZKuHtxxcuXb7g77onDRBSzU69tMa8dFcZgrBNJmOo/DhvshGPS1nK3bYBp+5Et8Da1jkG1kGuhlWweOLWa1qRW1jwTNDgturi54UDXURh+fLHUb+JMqQ7niwvYVDdmD1jAAit4wBS11t3Yz/TAbYHOo4p2eni4UIrRTfQl2/+xv3eCLc+hlJ4ounlez2Y2C/BuQZz2SvA5YKVqpq1WnGmdKFqpc2BdhIdZaEH46Rdf8d13v+FwCKTUFd0O7mjYlPumvQieEB2X6dCWmr1EwRlcAkGjCeD9Eyjq0GBjin4v62y/twUfXJteOoQ+a+jwka8tFYeiJCBaEUmgjeYD/oj9m9ME7DP4HAOCr17WLXn1S6x0exsYaE5eMFJA6IUEdk5kQ7Q2CSObsWG/T8/ORz9PBqH3zsrWqHY9Ta3OpgsNKV0MCue8Mj8tIPDh8ZH721u+/OprSslosRlebdX86MQStbjrQ8mQy8L+eLCZxKsXL2wLkirz6USapm3mUMWrjWgVym43ma4hKiSjZDVR4jjYqj9xFtNS+PD4BMBNrix5JUpgPwyk2ztU1SEbJU2JemqMw0QcE3leubm6ppXG+Wx22VfHg9MpFQ3YTCIqa60kMVuM4IuMmpoVhDr/X7Xx4uULW2Nrp8huQLVkEIdkokFVSDsOO2ENM+/enVBsVqNVmdeVFK0yyKXyOC/sp5ExJasbO9tCFK2NKOahH1Ni75bk2pS6Nn73298RQ+TqeGScBta1OMapyBDROdNEWGph1Ma6zuYsu2G60KhMh4mPPnpNSoH91RX67PnrPk131zespTCEkaUUPvnoI1OgFls4U1R98VDYbMLn2YSDa165PlxxmPZoDJznE9TK8XhN3y4XFabRkkSuJ5bTmafHd6xPj5yX75kXE7vN55l5WVEt7HcH7m8a15+95tWLG6a9wceD8VJpuqASSAIpTF6BRcZkQTYJXgB4q68XP6I+EO1mffbIWFegzYVFtZDzyrqeadWq15qLKacbtI1zfgngPbhv/HzpPYPb4nkA0OdK6F4hioNceqFEd1jqeZw3eMHer320iEg1ooff+I6NPx+sGrvmQprV7Rc2Wt+GqBuq4lDTZSZhgetC2W1YgiitUrKQtVHOcJ4b8yKsc+Svf/ELqpwIKbM/RhoFqcEH673+985GLOBu9rY8x3jsuQwBqgqhCk0KQtqCvHjlra0R3FbCICYXGor4Ui9/POQSdPuL6EYKgIuWBLpPVKvF4p5DQSoX3pclUnFrDV+NIB4zFLPlBkzm4wmiw4HN/JrMXuTy+bdbFKA1Ox9puz1WaHQRLZ6E6PeXPqS36xK8m29N7MzEwQW3preKyR0ThhEVX03QGh8+fGBeFqZxYEwjr169Zr878OHpgfcPD7b+mCBcX19zO9yznE68ff+OqzszvKu1EhrEwXYilGq0yDhEFldjm9MhxHFkXhezgaiNTOb2+prRl9Qf93tnM6njeZE6zzSUfThwdbgixcjTwxOn+czN8ZqQhPPT2dxe3UrYhpMGedR1garMbfVB7khKowdrgMoUE9PofvJaCGqwwBiUpdrimCFYZVJboWplHGxOMYxmLAfw4fTANE2s2dxMh5R49eKOdTXBey0FbYGUzEr96emJdV25urmhqlqljw1Yd8PEz3/yU0ottuu5NBu8BqGuhZyfzFAO5TzPrOMAIowOHr97+5Z3D+/44vOvGKaRq6sj+/2O/X4011wRNApDGjjPhfk8I2pEALCB+sPDA8MwcHU4OpwjZqKnlvivj0d247SZ7a2tbruo4zigFMYAi/pATwqlvOXD29/z29/8PYM0Sj5TlncsekaCcHd7w+t45HDYcTzsGXeJ2lajKFa3nmAEKdCye+0MxGSWEva/ldhcKY96QHZ6qw0E6JYW3Qq5VksO67pQazamUp5tf7qz52ot1LXTIr0LpYA8GzLKpcoT7YphH4I6DHmhp0qPxRYIvOrr0MTzlZwiNrjtIcD0JdEBH3zRjcFUEhxCY6tnrfgRS0A9cGwzhgCi1bqdnsSwZ6MTE9qWeCzbtFppqz0LpVSaRsoiPC2Z9Sw8PVW+/NGPub274de/+luubszmpGpEY9uug4VBc2rt0EoICu4DBm542NTIWVzU5nZhvKrboD27r7uDxZLmBnkSAlobLVaSpK1zuXxYB5fCZbhvFNzLXMegyWZRPrpdiKTt+5XOmAI3k6FVYzuiBlXa6mT1Arv2itTOIn1vCBaLWtgSfMRtPjzFt3aBKYt3XR2qs67WKMnWnfnra0Ewl+iyNmIcGaY9t8NAK5Xj8YqHhwfm8xPjtNtYi2kYOL/9noe3C6d5pmrjx1/8hCEO7IaBOA4kaXa4Wi6Ic/dPHx5M1TeOSDO3xT6jSyHw7uGBP/7hj8iU+MnXXxPd5nqIiTDAfjiwzitLWQlJPHAHtCm5VYaQCBEbvGqlrManXvPK8XgkJcMbRyZiTBxvjkYHVdtUth8nclnJ85kwTMQopCBMUdH1wSqAWAilUWRgqYE6G2n+fH6itcLd3a0bz8GjVp97KEM0Zs9aGrViiak+cRhAtFF9l8FuPPrI3wZkQ7QKVwENYm6urXCXEjVnkiRiMspvLaY9GMJAGo2u27cen2fj418drxmGkRQ71m2HI8ZImkaO5ciQIi0vUJsNxdZsjqtBkGLrYzWbBuNwOJDEVpZKEK6PV2Y8JkIthdNskN3hcEWuhevDYdOA1AB1yaQgXB2v2U+JWmw3eEIo+sTbb3/Hd9/+Lae3f+Lt93/k5f0LpiFy9+KKw3jPOI0cp50NJMuKhoJq8Ic6QzS7jJTM3yYNBvFsFFBv1XsSUFHqmp1cYJVdjKbSzcWdW7Fuap1XgyPVKsNasg/GQWsjl0KnGWnzgCJig0BXH6uID6+jB2QrOIIoVe1sB+3F/aXqswTjaLGqV3oW/HqV2Fr1rXuupBbrRpTu7rr9KksOW37RHj89GLYtmFj2uHhSiXsRbZ3GVtAa9LIV9f3vVWjZbPRzFZa1Mc9wnuH2/jU//vEX/P2v/jWH64EhGv05iBUYhEBQS5riy70ua5nM36g3xHiiktC6NMOQHHXHVXXFNQFtgRCFfXIIz2O+eqWeQtyKK+3sLbEdGylATB0IY/u8iFNTnZZM6Dve7f7gq3+RiLbnwRwuW+bYzsVm0+GdY+z+WZ78babJ1lV0p6f+lrTfR1MAYqr0wA822D07V32YrqJENbbpMBy4Pt7aYEh122Ex7twAVIRxMPLQx68/4uZ4xR+/+TPy/j2H3YFcFqZpJL14SZRAKqpuUx1Za+Z0ng0Tv742WbiYK+Diy1rSNNlym9Cccgp1nVnnhevra7uZMRD3tuNBGpSyUoN5iMRgQxmtxhfuZ8e2PkWESK6FdV0Yh4FpN9BapiLUBuOQOC2P/On3v2aZH4gjHMaB+5sdspi/fa0LefmWWs+IFJzBbs+mUxX/QZuxmaK4/N0UisO097M8UIwPSmAgpIlaBWJiGBKs16CJZWnsdjfs9rfMudLWjErgOA3cXB9tqVoXSqkNnGyIqhtNDQnk+cS8zEyHI1e3A4TAQKRWmLNRMFuBndhWvrvrW2ptrKsd1C6+SdGCfkqJXDNhCFxNx21Gs5v2VoVHk+3PZTY19mTOqsNgrpu52LwlxoCulZgCaGFiRddHtD1R2kJsmfXpL7z/y98y6szVRwM/+fEvzewMHyC27AF0tWIhWBCK0YL7MO5obUVir7ZBfC1t6v5GGFRngUx9s1g0QZvae0YNn61loYWAamFZ7D+t9K7B7Mtbrra/AnsoWxPvHMJmZWHuou6142Kq6sErSbSVtzi80LoSoD/oTnVV9fdvhA383wKyVf6tzyGam8oZoI5WRaVsgTRgVeOlG2mXgPGDyP8MxHJ4S11MZ8wqz4EXUJ3+Lmun/lYoLVFWmFfl6VQ5n4XddOCf/JO/5nd//BvS9MQ0JIOifXKaQiJ6BSzRupXo799ou564PIk2v5/q7ZF0Zhp9i8Xlvw37LzYHpZIkmumf2+sELMYY9t+TawWpIAMhRE9kDQmmJ9CuH1HQeNFtCc6U8+sjoXmCCM+uv4NqYv2YMUDNtLRZ9e37s83BQfscwa1/cEFx38khmF1IQ0gOJQYCdZs/eeKVy61uIrhm1Ii0pSFxYhr3ZDUpwLpms1f65hvefPKx2fiUvoZW2e8PfP2jL1jfrKRh4HQ+2QbBYWCZF1JEaCH68okTh3Hi6uULg4O0OVbXfP+tHaXjzTWfy2c8LAtUG+49zWf2hz27dAARd2NtzOvMoJGSjZp52Cc7uNoYUuDd0xMxBMZxxxiEpdhKyb3sSNEraDLrklnWhXSMPL79A6F+w3FcGcJMLAvnt8oSQB17jqogmRDt1ARnGWiDtbq9sNrAPSbb5dwazMsTp/MJRvOnEYmsa0VUSHFktx85HA48rYWmgmjivDsyHg5IGAlxT66BaXeg1QkNiVwaOTfef3jk1cefcDiYpYkMEWnGxkgpcAxHYpyoayHExmN+RDOElBjG0fb5NmXJC6hVyblW40Mf9nbIanN7buH0/oGYBuIYL5EjOmwXIFZhfnhgmkamIaFRaflMCjBE07+sT2do1n3VthJ1oeoTpT7R6hlpmUEKn7ycUJ1Iw4iGgNRsojm1TkuiVduSzIo9BBuRSwhIVEKNWwIwJZqJObcWovX5gz2QNIcafGAdsPdbykopDcRsM9bVPJZaNbvrmmfj+9sTZg6tGNRlfVGPXz2cBePtqzhEoAS16i5gAerZd/o7VMzxs88mov1rY/u75gNK6xAj2vc7+P6OoGwBymOTBwiDLy77n+VZbrhUm5e5h+ec4LYaYjODFgTpgrHeRTShZBMO1gzLrORVOT8p8xxQTfz8n/wT3n74I7W+5ebqQMUoxIREbMWGtmCOw2piv8vA3Iq1oJfVRiJ90VEXneGVeKBbl/T5RimZcbCdJTEJqrY0qMPLjeZut3JJkIgL4FxkJnGr4s236ZJIY+gGiTbnaLVtO1m0mX7DYCt7vRAdBlOHQqMQXeAYPXF38VyHP3sC6kkSBS0FYrQk0sw/i9A7KIPo+0ziOXwJJtbcrFuaFTtLqZdEGyNlfeL9h/e8fvPGmZ+WpHM2N9wg0FJCmi0zS+PI6fGJabI11GlZV2vZo92Y3WFPGGx3cm/PRYRxGjnIftNK7I57D4yBw/HIbrdjOZ2JMRJTIkZhSAE0EmIgqXmt1LKiWhliRFSZH99z3B3YHfas+UyKkSlCGqG1D5TyaF1MfaC1Jx6+XUj6xN1+ATKpjmgz7rxgSloSruOIxMRW5TUM5kzyTKSSsIGbC1iWsvJweseYD6S0pwHraeb8eCYOAgTefPyG08MHntYzQ5wYhkDcJYZhR/DuahwmSIkhWAIiRnPS/eY3yN1LalN2+UBrgVoh7Q6EsEfLBCT2ceD9t98QJfLqzRuqrkQa87JQFtNe7NOAtmQ7rnNBtNG5GVIy98fjxgiJg4Au5PURamY3DdAeifV72rmS0oQWpdQMrCxPTzQ9U8qMOE4trWH20o2xm6OGvOGvIkKrC0FlU8CGIaDVzMcIbIN/87gRNpGbn/6m6u6cF1s7BbqnvvZgIgqtblbTuazUtVCKsV5KwVwE1mJ7S4CmK7X0AJ641NuDQd+OBXcowSpIs4G3vcNm4leJ9mB5uEf988gzr51nD3L/jY3u0dQt2cUShLYNQjIvpq4RYAuQPZA1zeAUz54E+pdqhyOevx9h83Fysz5b2du2jsk6KSXXSCmNdW2sC5yXxnIWyiLUEvjrv/4ryvKO09Nvub/dI8GEploHqigtGDVdtGz30yrkS0KrCk0ykLDVrmFLIIAHYjP+s/ffu51G05VhGlwjoRg1tZ8BmxlFmlfeAaSap5V37TF2eu8luXa6sKq6dYtpF6z5EesStoRbfI5gthzmtSUmAHYvOpvAC0raPF8vBAi/hxgEpo6gSBxsNuPJ3E6+Letq4vDZdj8vicKYUrbwLKhTjFHG8chSlEZmDIMtX1tXT3Y2g8tl5XyebaeMJ4gxRoIvj1vn2eQCIqRaK60JY0rsp50l1lJJRFYKms1lNCCOe5l1RsdcW2tEiWa2tdsTQyKqMIhyWt7z4d1fePXinnV9ZNUGWgz6CBax7w+VaSpIfiDWijSl5hOFTGuPaDvT6oJoYSf28Hb3yFYbDfP4gUrJK7XCsDcWUBoiKY4m5BPDk1WVSV0EhnGB1QdJpZnvy1W5orTKboIlr4y7QM6Ndal8+913DOPI4WqHLI9UPbM8Fuo7Kyd2QyK3zLibOB6vKHFgECHEEZMjRh6+/T2qMIfRNmM1SHszJAxyRGSgDBOpZpCRx3cfQMxtllKZ3z+gy46bmxt7vhaBWmirsrZsKvFhoA2BUgoxBub5hLaVnE/QGmuI5HwmSiPKyvqhGBWwc6WbPegpJpoWhpC2ktaSrhKjDZBt53MhhtG0GQHEo16Qhiab90TBKr0e6B3+sLjm6x2xM6bY7w7dlKeLjnBIwgo6Xxe6mnXMapBa1Wr8/lzNi6bWDdI0DNlEXt26WbnA+mat0Dyo4sEgOlSIwz09pGGBwu0wGvYZwCxAgphXTg/eBlcZTbW2vkynuS2DWhXbXKEsvYp+hlc3+9MGE/WhqMMPImxC0kui4PL5+rUGlORzwkBlpdRCXgo1K0uGeYnMCywZFg386KuvONwM/OnPf8v9dSIOeeuuJEWS2sIl684SkUr1wGUFmGxMIe3Op6hf30vHY199GdDlq6hZxh8PV3Yh1cSRVXRjI1kydCsQUTSY1iSo2vnqupF+UXxHRe+8NnkJ4VLp+01XbOZlX5eipiccU0kHQlBXMLNBh323ix1C7y207+wQL1rw58KmFKKBUlbWdWV3sJjQyRkdcgJLqrHTsBxu+vTLHzFOOx4eV0RsvfNnn3xKjNFWK2Pjipuba58fKRrN5LB5TDfCSmM9nUjDEDGMNKBDoC6FWVdCGpiGgYyZ3ZUAujxzdDTCsbX4LTANA2ES/sv/8v/H+++/4Z/90y/I859Zz9/x3WIDGEsogJpP0Bh8Dejs4hFnL6BWjTQyg5h7qWHugRT6Qwg1JKtimyK1cVrOaFV2JMYwMcQdKjAxogHfEmVW6CFalteY7P00YWwQSIhMaMm0ujIcBggjx6uJd+8f+Ptf/Zrf/v73/E/++T/lej9Q1sL7x7cs60qMwvF6T6uTV36BJS+kcaS12SqaGhjCaHqIMlOWTBoG4rxSaKZsdvM74oiESG7Wcq79AS8L8wlaSS6EMQgpBNlsEM7ZhEa5lUs1VCr9Uc1iJ8WWZTVMV2ND7RD6Q+eJQYENGw+EQRhS9cU9EdU9rRUkCsmV+eZ8agHepEuV4MuZgvTZUWGIgycnHx57ADAMd4uMG5vJBu32/bkV1vVsbqxNacWsIpay2l7v6s/OJjbz/1J/iOlmez2Axq3aUx3YwooPSi4wAVzSRC8+DPoSMRuLp6cZVeH6eHDk3WilYftf/WHXPnLwSL+BB3YpNpsIj23atiDkY/gtuPbuuLNhejJurW4zCQ2J7kPkvRu1QF4aa1bWuVGysiyF8yIsa+Crr37GR5/c8sff/w0vbiam8URgpISBwW06KokQqttG+G4I/zxo2vQC3RKl3wxtwYIkzQV4Dp/1Ibh5VaC1IAHGKaKaCTLhiL37O9nZ7HK4oIYgIFjyxQQtusFLwdcE2JVovojJ4G3BnFsvNFmcGt4p6NrnYAjNhYG2XGlwV4u4nREJHeLqDl2yHSNLFNFV6n5PHEbdjTtUFz68fcv+eM1uN20U6v6frbsmUNScE2LcoW7AOK8roAZVa6MsK1NKjCltCaKJUJdl604lCNM0UVolroFk1D6HY3Ijq4nYUspM0wvOj0/ksnJ1vMLd5h0msMbbTm5Da+PXv/l3/J//L/8HfvrVKz559Y7d8MRhBMpASBdnQ8QYU6ILqSnGchGvkOzgEy6shNDPgNTtwgQxTDWECJLIeWGnB8YpMcaB5FuroppIxAKFrycVg8Gwt05uxlMKAXbJW7ppR1QhjYm1VvTpiZf3e/75P/+5K31NtzAOA+Nk7WGKNm8ZBrvAVRq2GXAlxsiUJoZxJLqRv7hl8bKshJBY5jNP84mbqzsIYgkxTNCMPYIHtiE5h78uVlGlgASzQhHXLajDCpPU7SCFiPvvdGl/g2aqTdQ6CYOXjc8epZia2Q3+1K97IFoyEXvwQ7LZzBbTXfW12RsgpGDYp+kUZub5RBz3mBlbY55PjGM09TyN4ApYBypQrWixIsUWua9mvldmAEpTWqmsy2qd7lbqWxGkz2EG2g80A7ZX3dgkfU+DuEXCVjB6QK+9IKyXSlLp0JLijGnKupi54fF6C+5bIeTsLHuTnZYZPLhc4IjmCcRYUP19hR5frfreqmOeVcI9KTWbcfShBsnjls2HCI1aGuWcOZ8LtQTmVTnPyrIEchY+fvMZP/ryc37/2/+Kl7cT+12FdkAEJqBFdTgxkEMktmq2IY7ZaxOHzgY668qeTIDsSUwci2/uFG2iO7sSLoAsld1BnNzi1z6oJ4pAV41XVkMICF5sgqSLfsL8uwAXpnVYsAONG6TjswVVhaAkCT8I/L1TsKZFLnBR7C9UL53Cdj6gGzzZa5g+RyTa/AaHBB1i1AC73R5iZH46ISnYTMHJz7bSwA6DhgYVahHu7l+SxoErhXlebG2pFvZpx83tDQFlWReD5ONAW1eyF/BDtLnnuq4IgTgmklkVGMQkKZJa5fpwZBzNOnpMthBnjGGzJ0BsK5oC42hWDX/83e/4T//v/zG3e+FnP/mUiPkshTAwjBOtnVENXrE5qwAX64nZX/ygvVO3vlX1Nit4HrabFekPm92EcRgYx8SQhq0hF+kJwoabQthUpTgu3gQfqroJWYIkA1EDw2Co+CDK3d01ISZubg8W6E5nq4TTyDAkxvFAiIFcKsMQacUsDHaTXXSjr3bedSUAcRD2u2vmJZNzYRxHxmliHPfENFqb6h3Uw8MD59nEMNM0MqRAiMqQ1CmMTpYn9kerR68t6BIC4lzypFCKAitBElWawxXilGdxb5kIsfpB79YibmGtimpBa7AKstlClBh834de9hEEr/aWfOLx6YHddCQNAfFzkNczw7A3aKdX8GVlrYbf52wLlqqqs2+q+S65iKjk7CtEK4rRa/vGro7/22fomJHj8z3K26njEgEcLiNulgfS+jW+VPXQjS63nIFq4+rmhtaUqsU2PTTZxJTPkIotMG0wE8546mIwH5L36083jvadCULbAmTTRl/3ac9ZM3ZUe24o3u9jpS7KeV1Z5kYtypKFeYVlhXltfPTmc77+6Vd886d/w37fOO49yYW0YfKhqf36KoRklXrKSvGgZVWxP7lBqK4viZgHlrbL815tnO9UhLZBPrVlisKLw2Q0cu/YtqSjBgvajbkE8gCbP5ZGgVpprZgHFOLx6CJuvNyRfoP9em7HokN7hoyIdF9a6DOjrXMWIUTfSKc/JBdUh3N6R2PFV39N/0xiYscAFhOG0fUTljTDsx3UFsdsW6ISGccDuZk3nnGPDFYvpZhRaFkotbILe7ueJRLIjNFtZ1JE1oAkWz2cUnAP86pItaA87Saezid2w8Q4jjRxDNVhpoiwAilETqczf/t3f8t/9B/+HxniE//yX/yMw5jJa2NAWEO2ziEq0avPVivB2Smo0lpXx7qIKHYut/n6N4VENOaANdV02hu+o0IFktgWuFYK4lbepoXyByeYaKnjxiHYMpq1WgVbqSQSMY3UmulDrRDSBsMc9oPtmUjmABljJMSBtdqi9+v9RK4NjZVr2RF9xWlrdbOmzqUYHcL9UvbjxBgC6eqKlKJZnktkbdW7kJEhKn/87k98/+53fP3VP+XTT954BV+5UC2b7fUNjuk71m2YqFH9jtPOx4K2IzhXa5OjRPucnnhjH9CpVXoiQm2pl6+0ZtRoqIRmwSs6xKR0mIEuCcA2262cn84c9lfu/Nv9aOD2+gWNCjUjQPZKXJqyrgulmc1xH7qVbDYaXVFb1DykJA6gCfPxYaPN+nO8daUXb5yOF3eoow9S7aiZ6Vr0DsTxEjHLbrYAZxBT/7MFnn7djXbbUKQF79ac8ujn8rK+tCcbX1BE2+6rvX/DvJsGxCtQ+7N/HtmQby4Db58zKmhbLKJqo+TGaS7MWSizUCqcT4WlCGsO3L94xdc//Smn0x/Z7x447BPRq+8+SLXoHAgt0GI1Jk60wJuqQcKlyda12eXqA+q+M9x7RQlIrbaYx699daibYmuR9+OVqeql/fD6+ef2Hss7Cx9lkRllRLPPUBAvIiZzc8XNAKNV8pZEXMS4xZne9V/uC1s/04VulvyLXnY+WOdvLgbNEZuq7nTdk2FTLw1sXN1aMNhKTGsiHXbECAilU4W7YFhBxTrbvBY0HGxCe154//BAKZnD4YoAnM5PzHPg6Xzi4ekDX3/9E5s/DxF0JOfCn/7yF25ubri9vbUNmKGQqprvTkjRl9UUIoEpDbalqTVbwJOM57/mbBDPEPnv/93f8Z//5/8Z//a//W95cbvnl7/8JdNeeXx4b5n95kjWTBr2ZgMeTUQ2JReIGRpto0L1DxxsxtBqsUVHPjNoahk0NcPM+jBMetXrnPGgENJgN1kt8UmwTNpx+x4xTIgSrUuKghTjLCc10ZqIEjXSogcKpy+GGNjvzdDPIVdSEGI0ZXQKgagmIBw6tl4vgWgaBpPR0wjjYN1RdUuEGIiDdTqxVEoTQqzc3x7Z/eynPD6+YRgGxsEChqCGnWpgqStEzGdeetEbNqgKVSTYWDBivWqK0SG73nqbdw8qziG/2AeIKDEFSq7Usl5a9CRQbVDYjdtsw13fMWGsESFwvL5yqOMyhO73ViuspVjn15SaV3JtrCWzLJmiJojTKuSSATb1sTZnKzUPrgJo9UAw0Ae76kyeXjyGoM8Urt3yoH8yhQZ9iKrNHtTqga9tAV1csd87Fdn0OKgljrBBqboFt96J2J8vyaAPpJ+rsu362+83swW2gGmJ2Dqj5l1za4szs+xsGHSTDF5dzixLYT5b17CUQM0w10opgau7l3z54y85nf5E0Lcc9okpKUOcbL6hQkMoliOsU612nTLJq2QjgyQJfkssscQObbfm9GNTNzc/a7gCG7Vntlb7fGkEGQLashV+3eyQfm2NPWYXqv9uv5OKEyfsWQkALVuHpo6JFN0C/qU9uMyNnmVf67g77GdTaKO2umvs1l3S4XBLAerki43OHHqU7zAoqKgz1K266jOIDl0F70ZVtwec3lKV0gjR9nd7O8Svfv0PfP2jL7m/vUVCcCEvHI/XpBDs/SmEYOsW3tzfc1pWymIQlNZqViHRE4QoLKVBqIy7ifW8ElOgJauEhxRYlpmQIv/Zf/pf8H/9j/9jRJX9uEcb/O4PvyPoS06Pb3n16prDzjDPJCu5zARgt5soQ8e07WKkoZk3Uq4s5xNXV0dAGVJgmqzZrprtwRgiosIkZoVby2pCnBgMUulDeG2UavQ40bjdoFadU+3GaV0zgNoCkyTGzGjaKE1tgFxBsW1qrZkDqjgdW70jGdyRsW3sigihkLvPscv3rVLWTXcSpBJDoknxaqQR3ZiuxsYY7ICnIXAVR25uPrIKGxucmcDKPe1jskBSKiENBIk+s/2hFYQdrcHjkScacUow+IPcq+PgdL3Kui5stD0RRJI9FM0qw6rVLB18X6/ZNndVcbHfHRwrB1QLVYVlOVFzNe+v2qzSK2YymGuh1EbLDS0WFAzCMr+s0PcJi9LtOdSluyFEQrswgQy2jITQX//CqBJxi2a/QD0+N/oYoesMDO5pXFTV7dm1tZ9XhzUtmHdQQnVravlB7fvM7uPyqv78d3Ol7f8+h8Q6PNG2GYz6VrVuF6Fat58pasLXZa6cz7Bk4TQbjl2bsma4ur7l57/4CSW/JcVvOe4aSew5iGKFU3McvorrS9wKgwZJI4r0vXZbYsbtzI2V5gr21umhDjW3Bm41UbVSfbZUG9werhjiCGG1eVFol6E+6oPxPsVyhlFThjiYI7Oz9ULXQXgQt1GSa3JoRBnQqrRgXYDYQ7YF3a3IhG2mgeD7Lixwh5C297XdKssE9pw6nKl6SUzav88L36b958OlaOlFRdRnn/UirGwqjNNEjCO5wuGw56uvv+J4OJBpDMnslSqNNA28ffuW+TwTgePxComRw/U1u13lvNhCttaUtCwLV4crBEsAT49PSIq8vLvftAPiexVMfLHjN7/9B/6T/+T/ZqOwIVFa4dv3Z3KBp6cnHr9/z1/9AkJT8vKOlheubw2DHsbI9XFHCNXmCjSWFc7nlSBKGtTWdCbzQTo/NiSF7eYcDkeiQiMz7XfkuqICO9nZwQtCKWaWp636ovpCLoUUol3jwRJikGRCKQyrtAK3otGWHokaftj8xjcRoiQ7HP5+KnXDuWvzfbwd//YVZep6ANuvm9BW0KD2TNBMcBY6NGPdW/dgt+pBUayCMsgPb/nNtCJt8wOjKrekzg7q9gRemxpw7UEJhziwbqFZMg7+gNtcxx1Pu8CqWQKLIXl1Zt5XS10Zxx1RlKaVvtWrqW0B24KxCDQzyst5RV1Z323jTQW90pr5K/XlV0ikqdGH+0OiPiHeHFhb9SQwbMlr0z3oM8z6wi3aHuQuUrsk07oF/UbDfPrZINAojUq0YKz/mIZq0ILh6w0YXMBmvyDgjgNbgDMIQ/2N/mN8fPvyZGBBo0McDjF5caSaPegJGnwlpoAwknMjr4XzUlkWpa5CrhGqsDYhZ7i6fskvfvYzKCeSPHJzDKRYCG3AnAsgDFZglRYYml2D4o4CbWModkjE9CRSgxVBGzwil+7Mnhw6BTS4lWGrzsbyy3Q87k1A54QZ91ft4dq6AfURnl/rrJlEcFtx2brc7rjqT60XB87eC82Ebc1ouEEGk0KGrtl4Xgx4V4GjBU7S2ESUimlSgvQ20GAmjxOKOCnhefEQtp+tqBeMeulQneCzdTj9eKhwXgtff/WlxcVTJjR4cfeCIOI6CUhp4u56RxN4ev+eZZlBAtO+UnLm3fv37PcHc5l22DS11sg5A7bh7e7+3k3qbC/zsiwMu4lxmHjKJ+Z55r/57/4N5/PMfrcHFQ67xG4aOEwWdcKU+P1v/kw7P3J9vePPf3mg/v6BF3d78vrEl198xO3NjrU8UGqhLDv+9t98w2dfHPn6q48dPw20tbBUq4xiCIQEbXXRXGvEx4mUAo1A2Stxsm1yFsSssrVQWc25dhwNB812SGosSIMhJESi0TINPbZqVoLR+tzkrSkg9dlw0A6dGdB1AZH6rW7ejscNY1UfhHV/GWmNJkpr2RKX97o9GSUxyMGsH4yHnbySVwkUt57omKi5TtZnD09zdau6B5JAUGydjLOVrGe3T93pcNVsrg0dyeRi+5wj3p5itD/bOqikNFgH4JCJBPUQuVKyvZf59ESulXEw40jAZwuZkm1ulbP5KRUX/GgpGJ9f0SrU3iE0sz0wtCf4alCxACX4A5/o1FRzgMWrSOsILnV5ATFTyM5dN6Vz9fGD6V5NWPos6Wjx4Cyuh2j9J726FFrvbN0XyKCf7CHN93F0jLtnbS6dWid5XAKTFR0b5K1ODyVAqKC27dAwaj+PQVnyTF5gWZT5ZIy7VY0cd66RNTdubu/5+uuvafLAJN9wfVhJY0IYoVUkeKJswe22rertFhIlWgEQqFRRh/Gs8kkiBE2YFVrduj5r2WzW1C1GtJmSurXiVilKSoEw+NKlDt1IwcouYxbaMxfogj1jNlkCaBpN1U8PrF4UqAdlsY4v+pUtuSHxksREIsOYtvvQdRhbKeUwkME2wU0LnSkYjOzQNxlW71abNu84ZCuq7F76nyVsHW1nTtVe2DU8Vtj5arVn04FhuOE0LwSUt+8+8DQ/8vLVa8Y42MwCJQ62BGl/2LPf72y0oIrkyuBQ/ePjgz3b40iahsncW9PAtN+jVEoxhWoU2wIndjY5Tnuujld88vEbYvItURLY7fYcpsbNIbKbAh/dTaS0cLMbSFPgs8Nr5tOJIUTG4cDpfCZGRYbEEHdMNyM//+VnHA87clGWljm32RlKyhAjJQjkzFoDwcuywsnQdW0Mw8R+dyAOI4f9niB9vXjxC2jTfRnsb6NEYgykFFhTs6FibYQ0mK0D4him+hDbHsaihRgGorO9grfG4gPEhPrNtVPzA1dL/PD6/9MoxuWOsm07s1mCm351908/rSLNPbt0c47sG9PsW+xBq1oderNjHEU2eEY7I5ILLt+cItRQHx6amKf6bAHM719S5+f7zEjMa6Y2s8AIEqktb224quHmtRZyybS6cD6fCBLdJh5UzZU2oLRqD0+t6w8Hzih9uaVi17tfS1PI9u+0AKN9yTyCm3/Qdx43t5cW6d/fA65BfAo0t6WWaiWeeFXXVdHNWUXixUNt3YLjeYfiAWh7Z8EKD+1rYfQffS9e5eKQngcpMUFY9xfq9e8PXWmbETgwJfklICt1UZZzYc7KOgtLhlygZFirGVne3L7k66+/IMYHJvnA9UEYx0SSwQbjsUM0gRpNXDhgTLOiDq2pUFOgNCEG68ptfmw8plat3u+Rb1u65J+lXwk7c9W7bIOaro97ptGeBcWgtK5vES+oLuw+cUfawC7uiUMkBXtdpbkP1AW+Ubl0gR02FIk+UPYZZhifzbGePc2qG6Jgf34+XwLxlQQWzht94G8FV+x3km01T+jD9j6Ct+Ktn9GAwYJRxFcbiLP7lFYbS25cH++QZt17jAO//c1vSGngzes3JJ8FB7HNmo+PTwSgTJP5O80rN/e3CMJh2pslfwqkRiOmyDQMtmykVMKQbNGGvyFtylxnQohEGn/9V3/FT378Bb/6h99ymEZ2U2JMhaDmJx8m8zDPumLsgMTN9REJybUSlaU0g40SUB4Yph25ZdaTTdRDqyQCQwiUYHuaRUDr2ZfNuIo3mj5giZWnk7Xbu+lo1XqrNKkMMTGMA6ozKRlbISVTIo7JdlDEOJlXfDHR25CCLVpvahv6gjCkiOC7qatjktEYQU0rilCaWQ2nyznaHoBe8VuvYgd9o2b2gCCBWuxgmfDQVKVhswK1rqJ5qxnxLVfa/D3oZc+Bny5tgWRNPdVfu6mSvErHh4k08+gP/p5ETVWtJBNNroZRlpI3503tYjAMessuZAsx2kwhZ5s1oG4H7t9bGqVY8FaFXDsF0h+M/jBvWL93Cxt237YEuQ3oN256dGPB55i/bAEgSId3fFmL3wClJ0w6gEDVur3Olvh75Q8+RLT71llzBgn9owTgj7lZdKvpT7a5gusmemR9/uUmcPb/3efJK+Bu99BBG+tkCoWIaqIVZZ4L57WxzoG5BPLi84cCuQSub17w45/8CJGZQ3zLcd+YpkZUe8bsPZmNvRoG4q8UUD0bhh9t+CkloKmiLZGoVE/CKhBa82vibJ2GD8F75rWz0Wqj6yrxsxx3yvl84nC191dOW79u99eKB3TLE4zB9zHQlwnZGeozC/G5gL1K27rvwTumVdVnagbnPr+H/mp+buxFbUZn7+eyPhk6/VRSf39iy7qCQvMO1u+ciRyjeWoByrNNiGKISfDf2XeRiKMH61rQlhh3R4PNgzCMA7f39w5H9wRs72sYBvb7A601xmEk9N0YwOn0REqJ49WV6SSGcWAIg23pcnVGdIhEBdZibKbg8vbWKq9fv+Z/9b/+3/C/+9/+78lroebVqmEN5FqQHNz9NFCzsaSXoRKqm1z5ja1VkWKl7dIWu3lSqWL860GMSdTx1yCDsY2oaM6kOLCsvkSclagrj+cTbfkWGaetxY8xMQ0TKQnDaGyDGISr44FhSsSYGAchxoUhKJIaQ4zEaWdDMsXt0BcbtgfzoxKMDaaOcoSonJfMbgzkPhxtjRRtn0V1pWvo8wmcgaW2UrGqt+Ot0Re/B8WTCduD0DsUVaMvF0frEQMjamv+iD471mLdit1XEwdBM38W/1nVZvMQoGhx7Ywtk1qWgpZCqSt5qTT3sQkhPkuCAVqzpNCMfVRKIeezBYoQyUv2ZGAOrqlZe1spSKtU6Z1Bc+qtvzl6sO9x1f35UfoSIPueHtCTP0yGjfTr2WGlTXnmpXvTywzDXr0jCsmTZriYAdK/vxCITmHty4JsGGuBvcMqOBx16Y7EX3N7788SxEX4t8Wgy/t6HlgBxZfz2Kmw6rIIrRbKYsK4NZuTa8lC1WSsFZTb+5d8/fVnjPEDu+mJ6x0MoRCHSMDOLD7AvVwqoakQtaHDhK4LhORFnC0A6wWM/UwgK2YF7jBJT6oxBFrL9AU7TdWGpX4ga1OmEW6vrsyDLdpMJiBW0Gk/t2ydlZEG/JyrkKJstF8VNlLL5q/V2hbcjSRoeE5SixuNZsaCcTL4yH6IPh/oRZYiHtztjMZo6w42kgcY1OTJJIgjAc2hJzCnbH/WS59z9SIkXDr/vpOnT9RVYF5skdcwjHz79i2H444UIj/+8mtSiiylbM94f99XV0d3L2jUdaWumf2LPafzmW+++5747i1fffkFSUsjx8IYbceDPSeGp601U2phlzbHPASl5srPfvJTvv7qK/7+7/+Op5MQ0uQBXYllBgZjIKgNMDUH1DxnKWIVsLRKbND95kUbNWFeNthuaanVsq5EkjXsfiMSZSvWHPcuK6dz5fSYKeXMstgDNqTBeNCqkALTmExMshuZdnuGIAxj4LgfGafG4TAw7iK7oVinEhq73Z4QIImiuiIxElMwZpJe7I3F3+EQjKIKnWvP9sQrwSrUDATzZGrNxGFdxakqVFd9mqvwpZNQb/XFjIFsQK9i98hZRP2q1O7jE+xwlRiMC24ltzni4px+F3/ZoBlKLtTziaVk6lrJLdOqsvrsQFUZwgRBbI5AYyKSm8EfGiJaCkszG/hajXek1QgGaKWQ6Ru+1J8a7QZ8/jtkezovlxHwYeKz4LmdBO+ivKp8PuTVTpH1s9N8YBlCQjFX2KomULyI2AwaMmJDRLR4ZyIU9TG/Cz7BX89hAJELzdWG2njndrmXl27pOe310iX9MCkAeGEiDrdJsdkMybUkyrI0zudMXiJLsQRRVFi0QRPu7l/xyWdvCPGJ68OJq+uZpIkkk+1z0eYBypJEX4CkNKSq3Zem5DQiapqoLMkCcgi+C8ICbIjGZFKviMVnRJ3k0IsTIypwmaMp7HeBaT/ZjEAxVwNMIyFut2ELjPomODEKfbRd9fY8uQ2Jn4kQuhjUnKlLyVaIIf47oyU1TNNwmWVdiCCKbcfsi4ysg7lAWb3L65v0VHuClK3v63vSQwhmjNhseliCEUk0GGTVi6NOH7aY0OdgwpBG1vUtV3evOBwPpMeFv/zlj3z65nOmcSLnQlmWTe8lMVCLWagH/M+l2YKhIfH6/p7jfs/7Dx94fHy07e3G1glUMShhXVZolg0j5jA6zzM5Z27vb6gls0sDX3/9Jf/9v/3vqG1ifTgzysA4wqyVsUUG7QfLNZfaRUNdJWiy9OBzBYBQPcPSBZuCUTzDhVbnwbM/NuYhL6gkxp0Q08jDaTZKpgbW2tDVhqN1hocm1AIxLBAeTX8RTMF4fTNxc5/Y70bge3bDyDAEro5HpjExjQpRDKtLo22UcwZCDDD4Fr86JmcHYHsTnlk1o+ZFZcNrYalG45XYaXjikIhZaxDU1MGa6XYFTSEMccODQdGSNyxf3GtGgIIJklSbCw4bazW4KM/zFkQbjqE2m8M8nh5N69CM2bYui/vQjE6ZPEF7YppuWOsKCrM0Hya2Z9CQ2WbYw+MVc09ezUSLIs76kGSBr3WVt/HFpf8Z6xLsdHTKq1VU21pRNUXulnwuMRakPYvJ9jOtW35Qzerl8tTb92o3CLSVsp2nLv7e1Bf69BlB58hb0qimr1fbVeHLNNgEaZs4SrZksNGGdUGY/K16d+HDcJwybcEnUVtA20qtkXlR5nNjXZVzaZADuUWyJrRWXr56xcefveJqeOR4OHF3OxFDJGpwA0bH7pMFKxjNK83PeBNPIOLJ3+dcQ1JqtU5DU6TWYsHcz6c5z1pSFMywj2qGctUTt2Kdbh8x3Ny8wBQQntw98W7urY7tB1fXW1uREY0MaTDLe6rNRTahnlXiMQ6UljFGDG5Nb++hoYxpsAQRIsF1L0K/z9adi/azAqbQHvDD4h1ARwGcZq2mPFcJFFWfbTYrJD3wiT+/zymuW6HhgsDts2K6nQ+PZ15/cktrym4/Ucqtn6/GEAM6mlZoSNEISTlTFosnwzCgKTCEiZILORdCSLx4+RoRJcXBhrAEJXhH/JdvvyXGyKeffEbNtoFsrXXbvGa3q/HtN98SgxAlkmvm6WFmvBt9v7RAbag7J1aHq+TyccHHsuI3jc55p224nIToN0e3i2MPRsdumz+ENmwdUiANAqNwmCqtGZ3N9hsvhn0XnO1R3TcmMq/KvGQeTivfvzPLjETEdIPC8frA9SGwmzAfq2BagBB3jLuRIdo2tRhhGIVdMvvfhi0hmVLweUBw73m77n0+ELX5YMndT4cBowYazEOwoWxoisTkxojLDwJowOBCrc2ZIGLH3UVJpSljitRSWbPtz52X01bJ2pnqi2eyrfssVrVWmsn+FULw1pVAcwdWrdZBWaC3IGoskK6LKA51dduEbovhIjAJQNoqMOkV2rOgeWGPVU8AlyDRpzyWLII/XM8gHoWmGWHwhy/SyO451cwGw2cPAds2t5Uq3fYEw8zprX4Qn11cbLsvWpTwLOh3y8M+ixDvsMtl/4H22Y4xs0RxYdVKhzDs9d2PLFhkbgS0BGpdKLmxZtsgt7i9hlZlaYnWbPnTy5ef8tmn9xz2H7i+OnOVMtGdnCUqQazjatWea6It+xK/DupVvMbqgmuv1PvzHoLZuhQooaMJAQk2M5NmqMG2L1ztz61eCgow9G2fxLt16z66KM04VR1i3PBH6wjVIFdzIr7srQBziMADvaIOB7n5gTsg9FmRiK086FRynuUCsKrecBH7ezuaz4wVlUvH4Of3ea3SsAKjadvYWXbuLUupRH7wA96t1ho3SFXEvNjWtVBL4tWrz5mzCZZvro7M88JZlOP+YEmn9iJLGWOE0RYSaQiM42AFTjX25m9/91tqUz799BOS7V4eqVUJKfHw/j2/+c1v+MmPf8IQI+u6Ehrc39+ZCZ9asogh2pIaX8kX1JTRVZVd7H427jkqEHxfhWgzpkjwTVs+5LOM3G+80Bko/THsVgd4Jqe3b37Tkfh8hS1IQo+jsTOqt6EEIFJqYZkry5xpKK1FcoZhFpoWo+8V5ZTNeTIGeJrf87gX9jth/3BGxCohRRhDZHc4EAfl+rhjGBIpmWVHCnbgZBBSHAiJDaOMKVk1Gwz/FMlewCpjWOgLVVrLJNdXxGSc9eYDsK0iBe80vANTO1gND9wWwXiisZbqCumA5mwoe6cgViWXYvMTFdb1CdVGDDuKKjh7BTGWWSOx5MUT/+BQkQ9XQ/fvt/tZxeBK38u26RMMcnm2lc2PQN998AO4ReFCbb0UVBcVM1sy2eYXTWmaYbN1wWmd0TUpz1hJcilhjCprSvDOUgvRzeOCeFfQf85CQtNK0PjD4KCGL9vebEsXvatW30qG6zjEQVUzqPPP3ky01hNlwXa90AxCXUumrJV5KSwr5CLkHCnFvINqs2Ty5uPP+eijI9eHR67vzhwH2xGSJBKDnSt12/PgA9wApAi1BWLtJGTD9lvwDW8t2b72GDZUMEVhjMLczENKmrq+xK5l2NZ9ZnALDzyw4g3W9e2e0Y06Vc1zzATCyWFE2e7xNmCParBps87UNG52rWMIqBjRw0qXDM0K2BhG1IkTRMxm3OHKvny17/+wLrkngA0Y9LPQLT28i+jhSDqNtb/hjqg8Lyx8eNwvgrUgF5gSh/Ojs9ObuQWYd9qRYbhiOa8M08S8zNAqcRzJeTXPt3FEXEd2Op2RGBnTSAp2pmtpnNaZ43TgzauPKO5SkaTBMi8s68Ldi3sLBCjDkPjw+Mjp8ZHr62tGbCDXsGUuREGzXfxAYBy9YmyFGG35eQhm+Zxicgz2ktdVzVHT91gBZkInvij9krWddxAcRmC7dtupsmrMueVN0eDujx1pTArt8pAljbYI/GjL3mtdgESQHbVklIzISCmFVmzHRYhKiBDSuGGhRHMsKg0en05MQ0SrMqZAY6FRaFUZgpDGyTbMpYGUPBBFb4lT2qAU+sELkSCNmAIhJkI7bf8eo8nrN3aM9Vgbnt5U0Zahr9SUXs3Z9Wm14/dhM7+zu97tMQq1Vntw2wDa7FyoPQi1GU+9dwNC8mGl3ZVON90c3jYU1x076cG80emPG2G3BZuLVSUF5/3X3kkBOCatDj9FHGLr1Z79potAKm1QTV+OtHknbf2pUYRr7xroA29jg6DPtAktEEOjNTvTIhC0bkZ13jf302mf7xnkpeqbx/qcBDeLwz5bp8lCo2als1cuiTI43GbXcM2w5syyNEqJzKVSc3AVtVBrIqTE64/f8OnHB17eP3G7F2I4ksITNGF09X/1udCw8ffx+Yyvx42jiRbx8iNc2GOJZF1BLNQqhNKQKEw6sBSHFINvpWsYzLcuLGtlzaZXUi4d1ZDg+vqKMIjPB5y1pwFnvnh08BjQC8xaQRZSOvjWNivyCL5vIjSCryEWgQElVoyl5RYdEoo5IjxjFaovTb+cVp8lNcCfZ54VNM/4T17I2VsOjqxou+zXEBmcFGHFbOjq7V7P6LOfDxddhbgl+/lckDBxfXMPYgPz3TSh42izDjdPPS0Lu92Ob7//nn/4h3/gxcuX/Pjrn6K1cppnRIKtsV5Xpt3IJJGcV1KIwYRzEljXlZuba37+s5+y5sw//M3fsCwrn3zxGT/94mtyK3zzl2+JUXj9+hX/4t//l/z617+itEyqymEX2B9HUrBd1CG5CR+R4APd1gOa2GCrSQ8blmTi9jg+/4qYCOkSBPoHl66A7g/yBkcl3x7W23xxvFsxH/VGGM3x1ayzXayjCRV7CKOM9LWSm0dM8N3NMm68+dIuzDClkZuCmBnekk/kAIMWYhlZWDZKXoyRlIZLoBCrzEx4ExwPjEhyT6ogHjB6u6nOwzZFa8fxuz+9aIXm1U/n08uAkH3YbnqMwGVLm31StZmBU2tFxu3aouvGxqDzpTY2Sxf2QSc/XAa6rkv1Z0dDfEbhBBisOpIMGGRhD40HFOXSeYpQXEiGGxra93YxW19AkzaIwoLrYp3NM3ZLEHMVrVq9Yu/299atRjH2Vu9YrLBXzBRMbA91Z9bApRpuFZuhZlR7EWDuqU2blcrBvs/wDrt+qmz7nvta1W6psjG3mt3PtSp5LSyz0YnnEmhZaESyKhQlToE3H33EZ58O3L/4lttjYQhXDNgOCKuwbbFUiub1pMHcklVdRNjsM4YopGCfuRBJAqrROtGYCNVW+iYR6yqq3fsUjJGY/Tnvhoi1OASlvdO3oDw35f46cLia3EIlW3nhbtG9HjFUsXnHVbfuOcUDQxisg/DCNBJowfRFRhHvsKbTsJvB1VFXE9FGdcaIbBrNTltHm+/rvhyMTk3u1PBNOS+waXA25p13Iaap3lClS3ccLn9Hz4ed6WdzzUigOrFgnhdOs/CHP/yBT778KSLmdxYl8Mdv/sz9/T3jMPL7P/yezz7+mJxXo+lWs+jfTzumYSRX724Rci7I4OXThw8P3Nxcc7w/cl5mqMrd3QuW05lPP/0EQuDq6poWlJZhnmfzX1pW/sf/7J9xddjzH/2f/kPy+oFpl2y3wWCfWJ4tsY/OGmgY1Q2Hl8KWcS3gPF9luTmJigXG0IG+H8AShnHbQ6/eTjpWK/2mYhvSUEpz22zpRwcUc8oUOtpgQbdi+L+lGqexbeyGYq6yJNK2n7aiAZrTLVMc2B9eILq46AET7WmwikmVJectIdiXPRTqlU8S/PXtv42h4JWpWtoVX4G5EQSDD1opiAwWSGVwb6ZsD1ZnXJHo6gRLVnVLKPaZHYsXs+lA+qYva1Et8RoI0QP78y95DuVgjqhmfO+22A43FR8ad7pl1eaa3LjZhmzeBU176vcnqasakj+A2c3tOpVUvNMM2yrb6Pegblmrv0dzKbbH12Ag2Z7Wdnlw/X/2jgza5cHuNFW13sJ0AWL3Fa+ErYLxjqUnw+CdDK48fh4gLvYmrTRKE9bVWEy5BtYF1mI+xhVF18b+sOPLH33ER28CH706k2IlIQyyOvNO0bhDKwQx5ksTNsNNex7sfbfWE69pY4zO7kJF3/SoMlA68y1CCo1CMjfTaGe1enGS85lcKqVWN0a0TrX6GOn6/pooCrJazycCwWjN4s9uLwENmgQNgpZmz1rwJ1bYXF3d+6C3rdgQ1vy9GqDREAebo6i5G4AP6BN9MxwSfbC84Rxb8fZDhXz/d6Oc1nbRSHkP5N/zQyZd35t+mWkF0EiTZskbizHNi8LT05lxekWLAWphXhulrI5amAuFbNvmlN048aMffcHN1RW9bovR5AuXpK2E1ohDIj0+PKC1sf9khzRYa2EII+Nhz0eHPURrP+fTiRgjP/nqK9biC2rays9+8mP+1b/6D/j//L//H1QWb5sGWvOBLIJKcTjCKWn90nqiUCl0G+1GH9g0ItEHipZATEbTE0QPau6Z7vQw9QczdijBHSFrYzOZc1CEbn1gTyOoPO9jenC7eB+JH82NRdOqLZSnY4cB5HKQiOrCmh3i3ZoZ6lm2ts+mLkgLsHHtuweMhxUJW8DTVraAYopn0JqJkuyYSQEdLXBrX/giNB829/3R3akpCpi1h3UzRo91BgXQB+NWfnlSoGBVcXRhmH2rdYc9h18Shurluvd/EzE4qy9QCT1D+5OzdRIi/mBcuiTV1R037eGsmjHPJq8qnUor/T4DaHWmTtkW+th7y35d2MgSP4TFHC7zD3CRAPQZAj537hHF2Uf+gCeJFKl9orpdKPVZjaqgBd+I7LswvJIMOnjCjjQiTcXYh/NKLiO1qDm3rhNGYDMigNbI8Zj4q59/ypefLezGM+PO5z9FvPu2PJXnhVIK+/2RGJIv/3LZm89T0AsOnpxC3CSgTag+8A8uJkzBiqCkKzUJFCXraq4FAORNoW+UartUQU3Hk5tytQscr44UWQhaiWGy4k9kUyVbUdlvRvLZwEpLMA4RpBK9E9wKrGZQpYYON9rPt1ChQWi9a/SVoHYCvJvK5lbbCyX6AFn6t9EH2GYmmC7FbsdK1JT6pb+uxK373eZnHfbTTp/2XeCoH1If2auiTalFOc2F/8X/8n/OJz/6CY9LISWb9O7Gkd2LV0iyLud6nNCcmaYdoRbCMFByZm2V83y29+jsNqWhVWgxkF6/eoliLcswpI6SWLsRjeeuQXmaT9Raef3iFSkE5rWwNqUsM19+9QW//93n/OUvvzKDvLqQpqO5lDqWp1oJoRmU4sMx/O1c+Et1qyT7Q2rXs1cL6qnBGBgRw+sQS0gbJ115hvF59dUqq7dZKfpDTfQqVNm4Ch6og7fAFiPU/YH0maxftt9/aQUtEfYb3lCSqrEqtjPnKSr4YFKKv+b2Yc3rx/fz2gBY/HvXbYAFrur0hKHq7o4ybVV160wNIDQF6RqK3uU5aUDCs/c9WrekXTNuXYRo8N4iIHRjxEsL3b8uOLtXqtv765eg0iv+XpmD31vt1xGfQQSklS0UVG1QV7/2VnUbjDg8f1b9D9XhEhPANQ94faD+3Ka7i+G2axXY3EmfQ0ndUsO6EraOsLXF3sPl9pp3T1PwJTmbE2GzpKkOjfRFQsVtFfEwJRq96IlAoupAK5l1qZwX3z9cC63A0ipVzayvhcKruxt+/OMbPvtk5erQSMPkkFylhW5u2Qhh5DG/Iy9ndscrq7odmuxGk6E7C6t1X05isqo2WrA0exorZFIcKK1YUI9KbZezb0JRoWqhls7ysnKlcOGn3b16ZRUwzWabySHr7XDo9mcVu9ad7ZScnq7VyTW6sjkW475lbry5nT2cYSW9ELmUklHE+QmeOB3pUF+8JYFLMlB1ttclNnS1/HY4xV+5FUu0/A+/1OeKF6+pRmcLioiZN/oCrnnJjOOBaXc0A84g5DmzrAv7aeK7d29ptfLm44/ZX115oSaMaWRKkeS/MwWzX7HePboyW0gSSWYta4Z+OWfO68y0M252AIZhYq2FMQ6cls7NVk7zyVS8uXJ//4r/6b/6n/Ff/D/fseYHapsNt3YBHmpSHNFAbcX2P/jJiVw6DHtNX0SidksNVnB9hVjQ6R6K7VngtWrebm4XvICJwqo69imB1mBtPnQN9dnhcu77psQVUzlqMzpk7G1gMxENgeYul5evTs8DqGiFmsx+5AJndGy5Wdu64eVtC5DNVaM9YTXMvE/U/JcMgy02jNuCr78n2ga3BfdraoorsK3CNlPKuL3nqpdtBxDs57V3R5fGOLSeNJ3G6Pe2JxpV3GffrxkWWAOg0gVWHUKxBND/3Fv3fj/MM6oZq0uVznDpGhuVBq3ZA4zjPhhHvQ+kO321bXCOXRfxB0C9DRA8oPdmoHkR4/MefTY70daH38JlgX1CW0axeVVtCq3avWhgHUNBWnKI4HlouBQIBtuJX4+EubiqqdZrZV2VeSnkGmnZNAmrJtpq4syQlC8/f8HPf/yCm+PMcV8Zx5EhRExXEZHgVEddESp3N6/cKiderNLVu/0+mI6XDk86i6j1rtC2CCoFQiREdxMoYv9RbNgvtt9ctdJyMEv56qIxvxJFld0E93c7hmAaAhvEqjnA+veJM1dULIjXgAXdFhgFovuHgSW94FmuiTGc7MRYJS4h2hI0h3LrpnC2vdxGDgn+HNiZTw6TN3GyjBcXiPBDUaWRRVpjU1/TNqRxO5Tamj3j6vYx0m1DLt14X3oliCWsZr8r18a4OzDs9jQJlHVmrcZeujpecXtzY+y3UhjHiVoKcRhoKE9PZzdzLfz5L3/i+vqO+/t7d9cwCL+WlaRq1gnB3Un3afKz6S6N2PB5SCM3N9FELwpXuyPzPBPHARDG6cg//R/9+/zt3/3X1PrBhD0lE4e4VbvQoJhXO9HUjC14Z4AFrl5NtI7SiRDVqrHWGQ3qFfsG3EAXq/TOo7kqYdtiFZJBQa3vDRBTGmpFYqKKpacgl5ArLTgFM18OGlA6rRWHbdQCgUhfot69YOzANWf5dC8m63Qs6aFWuTr40Y/E5r/UsJlIh9cuATVZtSoda73AIxWvdMWgCmnWwdHqxevepvQOKzwbCAJNkgVOxz8N2hHz2HfRXX+75mzb5xKNKsnfeANpVhGLIs3KgibBF0SZQhjt0JJsD1fbhs1Qfam0irXAHfmPQItig1/pHWGh+/AYPnx5j9a9dOdWZ6nUrsuRjaDgjaO18ly60ediN22dAdV/d0+wjdACoXXxYPNziJ+FvD30z9lsttNCXDnMs4ICSq3kAusamNfKWht1beQGtSYoEZWV673wy19+zmc/itwcVgZV9kOy62be+s5GLuBEVqXawNXPsimjLYkZ4cSLt9ahPbvmIaitLW3d7M8+Q6MSg9ntqLPEUn8mfclUy3ZOTEjqCcbbzNbg1euRaTJUwJyNo7MjfSWLw4/9qtoyIetwUrIurG+XC8GAZ+3zQKAXUoAroa3TJEYagRTbs+442LPZE4Q6fVt6UWdFkg0nLz/zg/PS0Wzx8+JTiW70F/zh77MNFMRjEv6uu+IfjV7I9jOkzMvC/Ysvub69I+dKGq3jHFJCUYZxJMbE49ODwUwhbIPrNA4MQ0JXmHbmiyVuZChx4Ltv/0IaBtNJPD2euLu9YZomlmj8/9oKqSYKZhexzCem4xEtjSWvjHEgTYnqi8R2uxvuXn7CX0+RX/27vyHP31H1bFkIC1gBSMNAa4X5dGJ3PDB4C9svkPZW8lnL1kSQDlPpPwqn3sJt8EGHBPCDjR0qCcGqxWAe+K0URK2rkFaJpdLGwR4WbdQQ/bAVO+g1IZh9dOtCMd1eCVC3yOZSthpAafS5ptTgoa/JJpp5/u2XL0seLVjgEw2b3CZhvqYbjOMp9mLn0d9RhG4lLdFmN7ivjjZvi11I1ju2/vOexPqbMhivJ++O8ftnF4C2/S57+83vSbBKqXd30vFsv17+7U0EW7bij7EfCEuN9XKfO8wkdePbb1DZBtVc2vyLU2jY5lVbPeqdQ6Vswd9+1jD2ignsuptu+0ffc6miK8/JF9XtSS5wpNmMIJck2CmMm0gL0+fg3XCtpi/JDfIaKFWZ10zN5ss0N2hF0AqjZD79JPFX/95nfPp6h8T37MMOYQfB9BRSbG6UvApofo98nZfd1zAYZh5sx3n1wBYwmKziFHV/39Whib66Vjfo0BJekOg2MYkUEkvOzMsTOQfyWqn9UfbrWoFhhPtXtwwpuwVKcghyIErvwNQLIzuNVbvQko16b/tZhOCEEtyhwdKYnfXLMiybX0hwWG1jtsmWpHqhERFnH/oZ6HTm7lLpXUGIBm11hTUY9Vwun6CzyV1WYC7LVS8nIjYjpNSmJJ/DbANvvdQReVU++fRzKmL74NeV0/lsu23EVj3EGJnSaDhhjGbNHwO7EJhPZwT47M0nl73YEmjryt3NDSEF0uPTCRkikiIZUz5u4q5oTIa1ZgqNXWvG7R8nu1E1UYOVKCaMmvjiy18yHQ78m//m/2VWECxWb3vAsLuZmI7CvMzoNDAF35K2BSKvsrFq1/zeI0G79N7EYdbiPbNZcLhgi7eqz/7ODrwKRI3b4p6RyFpnilbSaj9WA0AGlOoc8ibFFw5l0EB3fjWKnnc11Qbp/b3gLaNWu/kesmwJEX3uYoyP6NVHn4VZ8WBsKfGtWl3P0NWqQen9llUYoVtgg6p/GPLl+vTKB9tLjC9z7wycTjVuzkoLTgaw+8H29dymWiSgLfn7vkBn9KFnV9zy7Evxh713JM9sKvy61D4f0maiqO5hw8U7By8M/Lsvn5NGUKuiO4IiasIp9blMUx9Ob5mxJ6K+d1kNg99e6/kgW+36NoMftYnb0qs5C+BKCellqNMjtX94TyDNNwd2RzIZqNUKr7IWznllLbCWSKuR2ho5K7kFIo2PXkZ+8bPP+Oyzif1uJsYnhrgjxUBrq/lUScfJn0FGaq7G4ronFGSzULcCwwga4o1vwK2T/BEeSBRWQIMtxgktIq261YRQi71Y9p0O63qmZvV1pIapbx0E0Ap89mZgN7pYLbiBpoCp9eUHzxBAle4o1RXQCnVF6TPPXnBuNDSCOiTrFVXVTCmZYZi4wJbG3JJ40TJY0LelZUFtwA4VSWHzYPLg5HGhFy4CIV58quTi2xWAXiH3RUreOKNOGAle6ILTucWhK08w61r46PWPTMslwBAZY+L6cCSGQM5u6Bej7eZZZkKM5FzIsTGmxHldmR8+sJsmpjRyPp3JxQb1N7sr0uvXr8ziwV0Aw9ANqyzbRmCJgcREt0KorSApEcfEOp9YTmeWZeVw2FMKfPrxj6Eu/Nu/+f96sFipcbhcGBFEE0GyB4hL9FDt1022RKE07+ZGtkssXTXpD2NrFqM2jLzfABPjbIWoBzbBB1KO3VMbpVU7lE7BNzaHPcIijUImub8SQ7dikN60eLXlME5bSTJsn0ykURFqs+pDgu0hcLsYMnb44hYB+3l1CrHqJSH4lexe9X1uIB3ieB6R9TKABX92ekW1QT3NFa++fKU70nagxKuwbWT/7PdvCF+H3foh9zTYA1WH/eDyPTz7rDz7o/a/bzaYDqE7qRpDpLkIT0hu6vZ8FuT/29+X9DZV+3u1pNUN0noMaf7+qvYKsHvmpo3Saq9pMKZJUJrHH+NeVbWBv82UvEPzZ8Yq024h7rOwjQqdzBuJxppXSlbWtXJaYS1uV56DE3oit9fw9dcf8ctf3HN9yCQphLRHBPMaohHDQCCy1OwB9jLs7Tu9L1YklgF8+meyjW4dLeIzi4BW8YG23e9B7CRpsH7UzFYrqDkDlEWgFNa6kNdGLgrNljH5Km47r03YjcqrV6/N8sWve+yzOwJseiq/r0GJ3tVcQnn1LsEttVswF4YexNVhMDGYTIDQKoPT6EX7LA8LyD4La94NJ6kIiaIusk39GrXtfkq0mYIbzrhWpjqVuBv5Pfsc4u/PId2ofYM5lwKn+e9sRoSxDticX+/vXxDjyMPTA6LCOIwcb2+oTi9elsVM/1JinEaWOnI6PXE87BnH0dxha7G11GLR8Nd/+ANDFG52e3a7yfTtpTUbXNfCFIcLfqa2ACQNA2M0Lm3L9VkCCeRc+fD4gZurWw77HU/zmaEmPv/0r8inmX/41b9G2wcblInh0MGZM+O4B+kDJNku2IbPbV+DBZ1NJ2FXtxG2tkFCNAKJ4A9f8+/zxOOMqs3Rk2BVAUqsgUzZhrnqFSxaqYLTAu2BzrVZtc3IkKC1BCEYS0DVLK+xi62Ok4cYLXAVCxBdrCYWBz0IKkUXajCzNUJzCKbTf40IaXqMjkt2nNO565ZqjMLag9Oz4NnPI914MeiGxds3GERVO0TY4SAuXcwWbLuKGTZYxzoIx7KpPxAtaRO6AM2xLJx+cPnvZ/MCGxBDX/OI6rMKWB2yqx7ULgM+h4FBi8/VjK77gy/BxYTqSbhY5+FMmw4tGXXXAkrH8Wv/PKL28Hr1aXHIgor9uVssKEr8gf1Gp7+22udqAa0wr5l1bcwVaoF1gVIxjUJTDvvAz378ip//8pr72yOqD7YXxc9nw5CPqJcZzxAmEk4Yac7l993UwWds/YBUxTDpaO/LjOjsrIVoyV49YDaHeMSv97YrIgpRQVJgWQpLWVjmEzmvNB+0tpqpz57xlpWXb64Y93tg9iVZOET3bF+0F2DSLXbEf4v25Ci+88ZWEFt3WonitufIdvzEE6CE2GmSFqR7PHE4KnR/Lp+buefEBjFte67p999+l/rr2HzQ4pTJj2y+Jhr6qMfp13ZW7HSly6/zJKRqA/alqFmmqLKuyo++/AUSIn/685/Y7Y68uL9jP0yMY+S8LqxrZr9TdoeJNWfyupqz622Emnj3/p0llqtrUrS48PThPe/fvuWvfvlXNucobssQUmIKgVwyKSZzDpRgE/AxEQd3N7R0w5QG1po5Hvfsdp+SMAOp28M1j+cTj+vKi9df0qTx61/9DaW+M9OtTjMj2YPkOH/wgGbRz6rS5sOhENg6jj5EsoqXH1AwbQ+yNcsds+98eQvyphFo/lqBQGll25KGNBPCVbcs9p/sLBhjqxTD2ktGJBGTiU6qBx2JfV4w+GEz9kDYXB0tYdVWHDILF58Y9xaqqDGJYtsCu/hB6nXGNtj306e4SM9NzYJe7CfMw8b75HbB70Ov8gElYZb/Nv+xwbPae6peTYkJ3GzPgw/9elWkFyime91v9tzbNwW6GVwIl+7Giv3LsNY+Vk/0/rCq/XxV9bNjxneikUq5fD94clDQ7jTUB9qBrmZ9PoewV7dVqfZvP2QzGZvaLUsUupdOcKiwv18zq7P7222h2e45/p6aU2eDm05G1rJa+5/NlK9lsYF9M9rC/ih8+cVrfvlXL3n5UhjDI7W+Q2QkSAIxSDGFRAidAWaFXOqBTHGmjZ1P4/y7zoV+3WxgqpqRmKxYEYUQXGPkYjF/BonBIUnbq6HNXIZLVOqqrMtCXU6sufjZ9sVC6GZDXhV2O3jx8pYQ7Lnpj7QVSb1D9WfRP4fNBMCcfNXFgWHTHNk79dmDqLehBg2JU0+bVtsO6We+US8Lt8DOqHApkHpUCCOB6q61W/96ed9iPKiiysWJuMtwoa+97acjONW9hf4wCNI1KP59MSitwOm0sJvse4omPvroc463t0wfHvnw/ntKXvnszaekceAPf/gD//APf8/9q4/4p3/914SUiDFyc7zisDsQUuTqeOS4P/gZbYQw8M/+yT/l93/4A/v9ntoqqZVCSsk9090qoYkv37CHeZwmW2JS1XDH0phFeHx4oNbKbrdDS6M0o9Z9//Z7slYiyv3LH4Emfv3r/4qcH0mxqzrdCFz6LbEuQbxKYess+gNm/4a3mDj3pFMmO2bvj6IPmNrGLgCrEI0LbTuZqxZKsS7KhGnd18Udpfy1SoMQfDGIVwlpGBBxvxqquaAKjBK9nSn+/pzC6e+huMhNm+9eCxaw+mGLYg9AAwe2bSAuYmpUdwqwJIpRTlVx7Yn/o4t+1OEvC4yBTvF93gXYxbHgXZqva/TdkVXxas0q+bB5C+k2XrrYVffrrxvWbHOL8qwjgI6Fbcvi/e73fcb9L8z10zsIu3vOerJ7v9lyB1ta88O0Wbjsh4aqhUCiqDnUihrcaW17FyeyPSgbmwQz4mte0W09qNr3XYwU7aHd3DTEXYc9uGnrOw8sEYHYbu9qHlzrWphX6yp1DRRtpEG5u4t8/fUnfP2ze26uK/uh2WKmlpCQiaEYpbVekQJIzDQiyTtgUDfWdKhSBdW82Zj0e1hrv+Z2ncVtYWJyby+cMeYXKQbbw95UKSoW9ELvHKFV+0xrObPWszkQF8gZ9NnzCJbHP/nsntubHXhQtDxkrEYrFMW7v/6m7RwEhRA90bVGGIT+KcxC50KysEZP/XMbPI3vuIi+2AyNbI7UsDGy+jnsBWzJZwvukhCHGk1T0EWS3arFz7QY2aU/N52BiNjWum3+omHrsLr+y1hml/eTQrLYVSvTcMvtyzeU1nj1+pVZtSyzBfYifP/ue5vltcbbd++4u79jmkbGlKhaGcPAt99+x9v0ns8+/dT8pGpj2u/57NNPTQAZI2ldV1JKBiMFIaRoqkmwChKDnDoTQUJkiomn85l3372loRw+mlhKtr3H0aikHVv/cCrcvviUz/PMv/m7/wrVhSEUNAaXryWzefBb2xfcm3i2U8n82QsdArBnuM/C+xN+2aDVLoyp58NOEVOxXpoPUhBTqoZI8m1SQ/de0Q4PKFs9HCKDt+FVOn6qWyKoQV2xrCCD/2u/8XWDioY0oN1cT43eJpI2OKeLDdXb3eY4sNn5uouuWBLrq+Cls0+0z2vsffs5pcsGedbqXp5W4/93fQO9s9OAundOp7tad1M2rN1sQkyQaHqU7urL9rtaz9YidE3FRahW+cdf/aFWqls+N7e2ELYu3K0JNmEVlYv+wyCunghVLgZqaNiWBfVqri+/6VYHHWtu2E7t0L/Hh4uidp6037/N+M6Egh1zD8GSlAVHe92mQlmFNRfW1azrizlhM0yNj+4jv/irr/n8Rzuur0B1pdWZwJ4Q9jTxPRPilXzIXlFbQmitE6XtAbFKFKN/u/MBQEyFWqziVoDkynitKBlI23a2IFal+1Nj97ZkaAsXowkjCrSysuaVVjJ5zWi12UorUNoFxqwK+wO8fnNHTNahb9V2jIbqeOdycUrFdSuXYqEnOGk9wfRq3ouYZ2ddtzNoxZ0ttLKu35AJmy8IW5O9dRMAtWYrNqsSh+A26mquER2zCLK52l7eu0FMVfu59sIxiHX9fYrRCzn/tD03NbVdEDoF1pw5L5W7+3t2w4G3Tyvz6cxhv+f29pZpHHn7/h23t/cEAte3N9ze3XGaF4v1pfLw9MSyW3j7/Xd89/33oI0f/egL1jVD7p20TYDTcyOq5mV79iAkQcwufFkY04AMkVaVVk0kdHd37zin3wSB3GzD0fLwSKmNFAM5jdx/9DUffzjxl2//DuoToVVKCpfpvRbvKl2h2gehDjlsvGPtraJV/UZOCUbvU/VEcnkYjOlgr5Fc0BWx4Ktu9TyMiShhc36UGP0GKZDMiLA1YmukcbILjbnFqh8uJHoF1g+NvbfqmG7vbgaBpslfp7dKharFDin2e8TXn/ZOAXpBE4wxhDqdr1g152WhBf8tPWyUP/VlQAJOv70IuwwCuvwZsK5BsAdRFZFiv6f1rif4KzSkmk+W3ae+50HNv14uHaFBR8Ht5S87dX/wJbDRmS0ibGI2Baclj57DTMOi0mchFqCrDx+rr1C1OjhtkaLz5Jt3TKh3rz5Ebo6yS4W+PtIq8+BwZZ8zePetiyd4deFU9WMbqa2a5XKJVMxMc8kmjKsZShNocH8V+fjjI1//7BVvXk0cDgGtMyDYbnjbCRAotueEvjbWlLnqqspE7EQsiyy9mPJEHqKYi7NHMAlsorkYBtM7qHkVdYX16Peipf6zK2IFMDIMUBd3VmjkcmYtZ8rayGVl9eF7WX3u3gOvHXs+/uqGYchQoos8LdkFBI3VthhGhaL+GayTDT6zM0TsojHpNaPBThZsrSOpmOGfFTubM3DXUIj0UssghGAQpCm9TaEfevfa1fTiHbG9IH3uYYnAnv+gQhafK6lLg6WCJnPB1guTrxdzfY79wzLOYfKQiUGY18DrV5+jLfD27Vvevv0Ln7z5hOv9EUR4/+4905gYXr9kHEdEAmNy/zYRlvXM/rDn/vUrlnXh6enJknpe0RTR4MujWiPtjgdUYcmZlJK5PIq5GpZqVtoh2OEoi5naPT0+0RSO04GFwuPjI4pymp9IaWAaJuTGLMhTjCzrwvn/T9afdklyLOt62GPuHhGZWVMPQDeAPZ5L8opcpChS+iLp//8CcZFci+K94h3PsIG9ge6uyowIdzd9eM2jaku9zj4AuqurMiN9MHvtHbzz9P4Hmjeef/0nbvuv5F4pi9xPY5mCF+RrH+1h+nsYYwR95OPwlX6ionjT4+Bxi0P3TXfx5rknoJvJdiDnI3Mh54mDmx8HluXMlDPeKlaCWodhNsf3iqQ512a1pENqNzFKBvpoUTWMxLo4lyAOXysWFW2OwewrNKRZSxxUsWjkEAqi3WpxpmGXEXXN+L2B+Q+ICPY3r2FcmDC0B93qqxkemuDEU3sDzahNbrHrzZqgrq4q/GBEDQ4B4dgbbJKURlfwegkcl+IRcBMXI9vxHob53DH0DqaTNq4GyrsbbwfWh91yXHAtOpsxXG6j+wrMSLOG9toFthB/9h7UXW1abxvepziqN8XFBmmo4bCJq99bo7XGbYfbTedQmeDTxzP/6s9/4E9/vOPhXaakG71vep55Yi7D7iP0BzmHGHGsB6ICB+tVA+Y+zG6GINLAw0nZoNgrEaKm12dkSQmNQ3F8jFTG592lNnbv3NpKCsepXia87fQmN1oVo41anb2re3AXrNXHv+/w+GR8+PgUOibTXu/D/AUFUeUuUWvSwZxMYVEAOZv0T3SRGQZ5zlTs6aR3hl/acXs2wVUa4elwF/MsKf8FPw55DuhI5njK5IY8KbVO1vuVXGY6Cm8aTYJ7CDJ9dLC6vIQWBNFlFGWWw/I+4kwZBdSAc9/8d5eH2ruP31Nr5en+nnf3d8zLHEaYzk8//aT9lDRb7vAqCsT58P4jl9OJ9Xbm97//I+8eH+k9qMSW+fL8la/fvnA+XSiZxG/fvvBv/+2/4bvvvuOHHz6pYqv1SDLqAbd8fX7WhdE7Xju/9S/0rbG3XUE9e2Xdd9Z9Z86F3p3bduXbyzVWWubp4UdSXvj1l8TL+gtzX7mcZ2QlHnMKiIO6knuJiluHyTAuEEYqNo4w+qbs7FDa5lcnNkb8ZI8PBsRXzgEppRjYpdgUyYuSv6KiTIQ4yMbQF95896MVTSmhtLOEpah2jtfAcaABUbH1OMB1eLdeZdsc4fDmryIlc6iWsDQu1CS4KoEfVto6YLVYB90y7DDi8D2sF44DZhzQPeYFYfQXGyQhPrqNriGGbB6eMq+RoVGltaETCdaRSv74mgEWgbuMHS3Yba+v1RkD3+5voCQXt72P1h0dBpjJ6p4hYQv2TRex4PWN9njvMUz3uDh6ZJ2F6K0NLHlcIGQFNMUKHeygGuIovZ4N4rm0BjS9Glk8VNpubLvLyK/A73848bvffeb3Pz3w/fcz5wXc16OLIxVdPN6D+RZFxvDIVlt6qI/NwIIurMt6VpaCvWHjhIZFoTlO6hvkidzSAckNSEAU0PH/YlaXE7mP0ByZflaPCFIXvXXfd1pTh0Q7U/cbtEqr/N0swh3yBD/99D3FGnmZRKJImhkpc6HKQ6j3A5oxNABO0S5Y0uzEx2Xi41CMgXEM8V7dVMfbfE2FGIy64bjspk7iuJRTCsqwclbkhpuYswoDGwzvsNixMDq06GZr6gH/af4x0inxV2uiodVxxmf9plgaRZXrQstWuPXG48NH3r//nr/89VfAeHp6pHVn71UX12CC4vKqayGDzcaSC9++fOXLtvH+/Qeu1xvPL1/JU2GvskBaloV1U55L2dvOvq5cLnfc3z/gTdDKHgO859uVvjfuTid6LIYpZVrufP3yFW+VeZlpnrCSqdtG6p2v12emnNhCWqmcZWOtTs4XHj78jvYL7Nef6UuhxbA3xy2eUCaC+MEhCBkHamx68xZcb46DtIcbqMWq7McZpcU8BC4HZzzgqGEnPLJms4kmm1zYv2EH86O7H2KcAY81BpVV9snVOXDteHUHg1eL0l4rGybZARzwS6RqJXDs6GYHW6l1Iyf5+zcXE8oQzHH8tOShb0A26b28qbg0wByvw4NtgQ1KLfHe1AuoBX59/RrepujoeuDy8dkMsk/AM3pH6ThQD2uVMbcgdDBxCKcQHZmp+nYb1NHBSuoHRsuwVHBotsUMQviK2YwgTCUmdu/0OmAXCzZlpkcXYT70Fy6aYrT83uWGaVZUkfqwth7VYpQSrktAy90Zc4neYC7O54+Zn378kR9//8Tn7x+ZJ0Gs9D1otfPxXGJBiipuUXx0eS9FWUpKuuBIoQpOCXd5rok+LKqwrv1EQyrbRCdl7ZNc5DJbew3WWHSJeQoXZWLIHbqigYUgOry5s/nGuq1s68667uzVWGOftzaxbjdqDdjHoAaV8/OPd0yzc739jcvdZ0jDyiZz2Kx4UJJHNY3y3zswlUJJS8yCNDMi67I37IASj7lC/LPHvDJ5RLZaYuSPaI6h8+GQ1/grxbpaeI2ZfNtyoACakR8DUgYZR9npDbMpKLF+rPkWDLiE5oPKuUFzlVFMWXxP72Kxof2zNecPf/6Dzpna+NvffsYyPD488vyystXKaT6Rwu+qVhOU1GR0+Xh3z3xaxJ5LiZSU9+21c7mcVBRZ57uP39PdKX2XCdjHjx/JZkoocljrhoz8bodQ9+V2o1iG4txuN16+fGNaMvM841XCjVZ3cpHR17rpIKoRWqI2WArMki98ePodf6vO1y+/8PCghCwMHImRRhaLBT/679LbGDdtZ4TwiEHWoDXhkce8RRUwwSTS0LHF3xnsKNUWyQdtTpRZwbHTEfhDCHXi9A0MviJlsyrrPoDRENVkUnCN9OGbtVB+zxyMo6hyPWUxpnpVBVdytKeqmr3FeKwbPWWsNbzYsRmGD5b3Rkeq0eM51WjraxO99oCmVFF57/TkjByQEbg1Npku5Kj44+c0T1K62jho1S2lYT0RXk3dCr3X4wIZVdJ41bIuiWqYTu/7AYWBHzMaHR6jG4shecBAIyZZ85AddRzy25E4MCi7Hr78wVahJzHUxjwUHfApgXQU+uytBfOsc9Bho0kV/N91KKXiPNwb33/4jo/f3fP5+wsfP9xTSqb7htTw8drjAtzjRk82cgs6lqYoXlpw48E9kXLnsE736VjjKalKHfCOEJwOlGBzVRI6sKYsSM3jMldg2shdUMFzDFnjsNVBr8/HXNWptx18p3aFK/VueG1sN7htjb0lvLXDRqL3xrzApx/ek/PKaXnHELe+ms/oB+6tHbqUNAhJOKXk8CAbBz6IXpuPzjbF/h8XxmDfmRs1ReyvZUq2A3pyU/dgZkeHP2i0675GwFQc4K1BuDsfiZgGA44ddt/q+ARnkaLQej3GAAubdOXcY/LHy1FgHZdcH4WHyDIf33+Pp8L95Z62r7w8v/B4/8hyPsN1xUzBQefpRG/ypkrN2faVbV359be/8XD/yMvLC1MucD5TcuaXX/5GKYmnp3fUKpp+aTGE8ercthuVRkYq49ort+erHtJNWRGeOnuvbOuN+XIiAdu2cV1v3LadKRnZFaazb9uBgZvDbd8wZOeLQ5pPPH74kedfM7eXv5EuHfIWUnpXpWed7jvFRWlL6PCJOv+4IICgSIbIjv6K8WFgYTQYGyq/uWR6fJVw8lcnHg+Ts2RjViC4x+PTO7IfxqEXFfmYlzST4liMiYgjCc+mFFbeHZmI6cCcKGbsISQjqt2/w4ZHBxMMDIZ6nFcoZ8RKpuahARseMv1Qy4ZCIyY1NgK+sOaMOE1pJtJxEL+CvqMzULVWXcwNvU4NReXKUPVZGWGXEAeAg0UYTVzNUqLS45LrOpwDMzagtpjvmI4x7yJB9xah9z0d70UD+eHJVAg9OwTv/qCn4micqv9O/qbifHMBuEtt7G+6CMGDcXgavHuAn354x+Wh8N277/nup09cTp1cwCxR951+0zMoc8GRDX+PA3cyp0ZXnEzDakGjNT7zIHkYwqQhbFiaum4GC1BVIT1EqgGdGYmcZprXIzBHC1+HrnsJOxatg2E+N+BV3Z8N76Ke9964bcqJkKW79kfzztYa+96oW1ez1PWsWjzT3/3pHU+PZywHgcM69MxIhFSuiUNqKkqR0aNo6ENVbcd617p7te4WbDnckOMQV2U1ViBmWUzMCFqK4zqKweick/aJ4XiNcyKMUA3TzCTmt+N19L7D6PhwkhWhCrHuX4kfdlgVtbiBu0tzYqa0v3TAtVkeVcC+VlK6cPfwHvdEWSa++/QDlo3WnSkV8jnTWmW93cgnnTy1qQu6O58hZ+7PF3KW/QpZ+eUpJdbbC//hL//Cf//f/Z8PoWT59vIifm9v7HWnm7ZNb43ujWmWAntvFaqzbjeyZWrv9K2y9oZlDXXmoipla1UOq7zy/3vvfP3tCx3n/cO9GEJupLzw7uMf2Nf3fH35F3J/Zp4Dqw6eyrau+JSYy4nexbRIcLBeDgjBB/74yo6xOAtC5BqHzJvFOBaR9+gq9Hc9huWiPrZxNgrKoOFpeL4Pv5uJPMQ63sIYbBJdkkjVA5KH6ZhrhNR9xZlIOVOQb00mK37RoRJK5NFVJdPhf1T5FplMGnYlDxOGblhWGpbFieDsssnIhHHdoDx2fRbDttkRTNMTiueyI8Rey1/4vSItg2XTdwhOtj5vi3lJlzK57QGXDKZaQ1oIsTzcd4a6QytXLrdiZ+lzrTgpDsBBVW10evWDodN6FYMt2FaiGedXNkoPGvKxGBrVBZMJNnp9tG0gmV5lw9GFpV9O8O7pex7f3XF//475BJ8/3fPhLlFKD9w6jP663lVJE/2UMTbmJCPJ3bMYWEYATnqWw1A0GVQfCuq4nEih9wgagQsXT7GORYXVTK277K97CDdJRuZE79eYhxGJa9KDYGL96xKMgyyeRUfq825ObztrXWMQPdPbFesL3hJtq/QVtqrLorpsO6rcOnj3vvD580dyjss5Dm0SJJ910BIdvI2s7RZ6H331PE2CrqIY0E7NkLJgZl7xfnPBoRqEC6IW1XXSGRNwkh9IRdbFEdXCsHGJlzhOM8jzcQFzzLBGhGrA5PH1oh5znIejqxipiAfDiRBuZqNE1PIwOQTCVt1IqbCvnbV9BYPTcsIxvn37yu228eHdO3LKPD4+Mqx12q755TIrhbPc3fPzX3/hbjmTe2cqYuf98P1nllToBMzeu1xg3ZKoerWyj57bna1tcThm+rbpmLlt/PLtG9++fKHhvP/wgZnpEGINnxtLJsvk1nSQ9MA7d4nxSgnGSJnBE8tlIk8Lv/ztP0F95jwHZc0b82k+fEz+zn3VEhxBIWPgOSq90dOl44AfoTNqB5uYCLyZqBHf483fHcK15MrY1WzAXnUALhtuSwPeiIS8Lv/6lPyAMMYy8YDUMHnYTAP/POYrKYzTJlod1D1dEJhjk2y/1YWMw7sf/GsPF1H3qHR9kAHFEsnuYIWeJ6j7MaytMajNNoa2jVT14lrASDUU91bDsKwN+Oc1+0Jv5rWTa0c++fh89L17b/r83nRDakBGJRbPehgHdqe9yWMYlOhOE5Tmk6rhY/CuL229H/qbo1voweJq0hDkFDBLViFoCdIEpwmW0z2X5YlUCvd393z88MDjw5llViVfzlUtfWu0pmeckkRmRuD6KZO6kfoMofKdejmKk1q3WN+va8XMGD69uuECVj0gQel83I9lLsbeYJalTKNJgJXkJwZyF039FToxg5Iz1Rvdx5AUzKu+XxQq1Z3adnrb6FV7pdXGvlWum3Pds0KR2k6rTt3bcbniUGb4w59+R5kDck1ZhQKESr4fIrIcCWmW5NQ6XFRLnkW9jY596IcCSwZULJm/brqDPRVV+QAfxh72qPT/zgXguCiVBT1YYm5OEioUX29xbjRZfFiLbkHfe3hyha776MxydBYN6bRSvBBzdfOEVuLV6mZsK+Pz737PtJzpaY46xzGF7pGKyB+3b9+YzyeWeaabs24ryRLLMi41OM+LxIhw+MClkpnOC23fIHKvi+N8eX5mqxslvWLDv/32N67boJplylRIZry8vPD15RupJC65MGdxq+u+YSkzl0AWzfGswI8MMZiWWG9KYqTs2watUJaF1hM2Xfjw3R/Zb3+h9i+YqdpMAT2JAmfHzd2D5aIK8XV3je5BqNXrhw6qp9Pxe8Ks3waUB5aERqCvVtFDpCTtTtQUI4NgZB/7YJNo8aslFqYMxx7X9zcgG6dyVrB8i2AhfaXgk9zllxUMEg9a7EjdS0H7s+q06GKIKj2+jaoWlzvsAA7UcYBVo7tmIMRdFvaD8b5SPIVdz9MTqWe818DkDe8jI0Hc70HVGwwS+ewMm5TR5Y1f4WNkiyo6Xk/2t7jtwI+7t4A1BninFh23qNBG8p7mPEaPChGKQS6iRU8LpDJxf35kLmemaWJeFh4uJ07nE95X0jmTceZFVWvOUwxrFSaUbZNPUFb/OVlmD/aVMnq0VvM0SXjZghqdnMFCs5xYAvRLaaFWBSulsK3ukVuRXLDkwNVTdJz9uJy1t+Tf1eJyMmHg8bwL0NOwvHBaZGjTpFbGlEhmHtOzN/kaTTmz7Ps1LDYQ06cp36D3zG27cV0727axrZXbGzvw3vS/P/zpkffvZ3LR55IGFbrrnKguSx1L6myyuQqYgF7MiqI5XRfaUEdbSurYfAQrRdcYl4aCw4hnBfguiCg2nMSCY43m6HKJIsdprtx3E3aI5Uk6n0Mr0VVdgBhl8YwPqC8KTQlKRwiSirJBLhjEDMl5LDoLwXfqZhNt32kt8XT3kZwLde9K4jNj23X5318mSjHOdxe8NWrvTDnx+PSEu7PtO8sycds2Lnd3MesYGKqML5f5xLfrC2XbmeeZsq4rv337G3/9yy98+vSJaZlZX6789W9/02R7Klwu99yuV+Z5YSoTD/ePEMHadGf3nbXu1Ns37h/vyFl0sLa+4CmR5oUZ43w5s6RMngt13yMQQwdYyrqxU5q43H+i1gv7/lfq/ky2FVkajHs4YAELsVy3v+skJLsPq2uhJa8flo4msLchL2LQHK0k4oI7Hi2qPnwfALu9aWithS5Ah6D1hPuGXHc83FvHqw61cYrZQBs8/fGqYliF0fMIXeqH0dooewaX/7jcTBefM2Yy/bWDOBqvFkQJ8cFb+BoNH3tc3PghMtL0J0LRTdBPig6McfSYIhRH2lyiBUlB78mCRZXReng91HbGdYhNYWBndCvxWQz20ui2+ut7ssJhXuCqPHPWtkpFrJdpSkz5wjJPnM8X8rRwupw5LQvzbOqU2LmcZ3IpCtUKG5hSXLBYDtFfQGHFdtkwHJ2k+OQlZR2iaSIVQQaK6TVI6kQL4WTrhBW0Tg1LRrJBdR1alxwmc3rGcvbV5SzmXSHhJA/K9qAQWBIZI0X5SsK8UiwHJt8pMUzto0ghitRjL3XcGiPvAlOnsDVBzG3vBPCuOYRzUKJ77/TNue4uB1K5z1BdncSHj4kffvyApRuWE6kvJGs0M4hZT0qFYm/gZEMEki6RbEmdxCz4rAuOJrpeZ6AeCcZB70CqYS4Zg2vAbA7xJyHqHGs+lmUUnDlLMLm1FZLmGjmNLv0VzRAdekzEtG93I1oVXQx9APzeYcxeAt/0gGCThz+YpdBUIbjO1RnVXkk+M5U7fvvrV369vvD5+w9M0wWjc3d3Zr/deH5+xrszTRPnktnrEA4IiVm7prZVyk9yOGxI5GpBo21svXGaZ0qrlZwLHz5/R+2d29evakVOF5o7JVR6OHjrB2eaiCncu8J7TtOsTR6tn1NpyVimwOot8XB/r5VZd2kayhTYsPQNz88v3Lb1GICWpI1d8o26fQWux6GullOwxKAcwuCMpzjs41COm92DlfPG6Z2hpNSFF9Wzj0N/DP2MkYMsx8ZxgHn4ycRGi2vMKJol9DCVS8SsQZRDb2OBRVd0CG50QSRL4pWnNig2sWmDZBBlv0eoEGYRWKLWdDwP9x6VmBhGsgqIA8rFJKruB247VGopcG9Vsc7A+t508LSo0HE7IDnDY+FHpkIdn4HjvZGz6aBzC+sSdRkKuJmwVqkesKVHhkBvGvYj2mBOcDffczrdkXLDUud8ucdm593DIzkZS84sJZOmzDTJGmU+RTJhbEb3otTE+DwtaCfJRjvn9CxIw9tJGyyhw4KKuWFZ+e8Jk/jQtH5SVMOe1TXQ/FXl3+Twq+cVpAVPVGRZnyKTgYA7LFlc4qEjH064puGyxTrIqcSf62A9XE5t+P8Q8EcnRxfIG1adFrwfRAXlLDda0Etb20NtPbK3NfvY286+V3pL7N6gvYrmAoGjzPCv/uHPnC9JB70VXI6NFM+i59IoZToOaIarKx2yk11UUnzQULUlXhmMY3CfjgOcQ2Py6jbgr1+OTjCjxd8ZB/nYg3tTp7t7hS5tiMecZxR1PQ5c769Q81hThyTJ39qGj3Mn/tDUBVnUnx509uRIaNd0cvW20VpjOt9xOt2x7sb1+sL1+cT9hztKyry8POPu/M//y//E3f0j/8U//JcY8PztG+TM+XLRuqyDIj2SOO2A5L7dbnKOPV9YppnaVVwI6/PMdbspM7VkTucL15crt/VGyVJip1axHp6aZvSkVlDry5nnCLhv4iDfne8Y2a5OV1fhCYpcZPfWDhO9jlNRMtLeG8USu2XOlyfuHk68fPtnntd/YqGSqfq5Il+q9je1TNaDEXAshjeCNnr8DYsLYUAqJbDNqLbxWFLp/+fvc2CdesUhLgub4c7AD2Vb0q2Ea7lojZ5fOZYHNyNa4kxQKtEQS5+d4BzRX6OK84Yf7qaAl6NVVpW40/t8HOgWNMfuEUnag6kVdMfh4Ck7c/1cT10lIKGBGP3yuA3Hf3mToLHFu4nXpz9Pol2GwXmj0msSPGTauAO4bb0ilb2YUefLA3eXRy6XO7xWis0SVCbnfJ5ZlsJyKpznGXKjlIylRk4TS5nAb9EVlOAvNKyMrA5dyhaXui7pznC4TWHtUdNwJJImQGltFeJSSKHitSh5Mx7sO+JzDL1PXAaDHl2SSZwYgrHhBGGGiqlmWOuqVlG+89gfObqYYR6nkJ/Ovt24Oz9EvKZejxqZgbEL+lBWhkoZdSuJXgIm6U5tldcLw+hVh2atY0gcQ2Wg+0yvnVo39mrszWm3nXVXF9FDM2IG/8Wf7rncd1LtpKUIKgzjSEfve/gnWZbjgIwxO9lrwEK6nI+4YPOjWPMRUWzj2bzhbqNhk3eZH+riifAyNJMiBv9as64LyhK5d/beD6KE4+RgfHVHhJkWlFf8KGZAc7DUMnFCHtD1gK9fKy79vBZq897DvQGx2Wo2tSVtp7fG+w9/5PLwHaU2/nw+sddGj4Lt+fnKh/fvuZzv+V//l/+VKWf+m3/9X2tPTTNLTmJSIfbYIB6DhRWRR5fUaHuF5cwyFcq6r/Rd6XPNVXVsVYvUcmKxSQcdHsrDuFVsfECj5XRqbYe97gAjk+VQ0boGaC66G+ZMcRBfby8kjI8fPlAfGt++Rh5ryUxTpqWMzR/Jm9Nsxfsz2W6YbSTCosMBK68eLlFh1K4KLJnu/lcDLTs20zGmdqmYNYA3VfJwuDwmjub+77oMece0qCDCL8gmUfA6UZkRcNhrRIoUpKPKG0qKMYGMA0gh4HhXJsFR97menRuiO3oY1HkS0ygyO2RhHWykPjqp40cwKsMhMnCEk+K8igb1hhn3AD6M1XKUP40K6jqsh5V4hz7H0ztR94l1vakazTBNmcvFuJweyHGC56yQmIf7hZQaywR353fklLhVAdzn08RcMmVKTLORpgkzxYamVME601SYyiSLjOzAFDDRwNn1cxTiguzhsUO8qCKiH/TbXCSqNCuk7OCJHkaOltMxBB+CqTE3SwFLjdnZq7+QiAmpm+BMG9dvQA1mgiRMQ+rcGy2p2s3I3t6i4yrdmM932ofdwTJ5aGMY9hRR+R8UUMOtYEHBpodNRRyEdd/BMq1vbK3R2ip4l1eLEK83rrcXbldn2xNtN/bW2W5O1QiLkVn9/Y+fmLNjWTO6lGK+ZyCWwGui4jB/TPHvzbKAWxsMry4Yr0WuDQMZC88kGys7ipxhnJkTuOi6NbLOZX+xSe2dA4pK0XMkOHJE9CMZyuoxixhkjXHYq+g7rhuOHeXRPCZigP72jIqWo8fBnexg6tmYvRnBaDvx/Xc/qqDImdqcL7/9wvmsDJqP79/jDn/+wx95/vJVoujemKdFc54W84+kHuo4x47n27lcLtyd71jXVeFEdxdKXTVP0CGoYG0N9YMKZom+7eSc8CQu7sCW4xkcdrYe0E/AcHF59MBnxGMvlkL70OmWyGac5jlUopnzaWJaZkZA+O7Ovq+0dObu3R+xvtG2r9T1V7p/pdiL2D8J3IeOIuYJ9ANG0EYcmKQGZm46G8c0QtzkwI69knsOe47YGG8cVV6ZOiHoYbSS8r5JxykfLJJgYuh1RKuKqlipr6Xe1qGiCyjXIZwLKCleB/6qXMYTjRTsE4s7vB2bZwzftC5HVxewg6lr8dArDIvxpL43PrNhlTLs08Pm2HVoGzp8TOcTy+mMMVPmmWwz1+s3MoXujUcW5tMTy7IwLwuX04W672xrY1t3cilMBaaky2CanNNJOoEyJRSrq9SHnBFnvzqpaD4wMPrXLA/ikiTgm1cmh7uRcyGc+GINjAIgkUsMRQM/7sMcDoXUpFbxoyC1UdzGZapLowVMm2Po/mo7k6KZjY6xe8AvcUlbwKQxtHRLb6jHShNURGaOz0gak6PijrU+WGRmOYRlb8sDfe4jeGiY3m11EzEhqLted/W5oag1T7Ra2RrszVhrYl2N7bZx22Gv+h8d7i7w5z99YpkFZeQUAxAbe7AdORCxq4DQKplFUmOWk26rDIYeXbOBFmvbjvc8CrExNA4kISBkC1ppa1VdcFtDlSzvLyySGpPRI0Gyth4miJM0EqaO1oKG1i0g1oMZMi7lkbXBUUzr5cclcTTlpvXT7fBdC6AjEunkjuuemMsdeb6wrht5mrndrjzfVva9jhwx3OD8cMf/8H/7v9J6Z123A57beo0LUDfe6JQ9oHOAl5cXci6knDjZzO22Umq0o+5ObaqGW1NjVmvjdruy98ayFO4u9/TaXqsdH+rpcnQUBLbVe1M851T0Yecso7c4MI0c+gZlMzTPeBWG3tlpDdbaKLlgDktSAoX3mXn+wHJ65PbyC9vtF+Z8o6eNbBL7eRtPOT64sK8YD3FY8Ro6RMYmUyuYtVFTonslDWaQx4JwsUj60GjEZmxYXBxx6BP02fgonAFt/f0Fiw2DOrQBpZbCaXSX8+rAOMdB37FDKOTjwCYftEggZih24NadjnvR4eVO94F1G8cO0wSUmpJYFwMopWExAExvTAdLgoe7zLKceP/4I+WyMOeF2ja2KmZWsQ/kbEzTBN5oe6PHxVt32cuv+29cr7/x7t1nTndnzlPicpkoE6Tsyls3aLWq2q8rfc+kqUCLDsSI9jkuxuzYgIwM6TVSoZiYRsViHaaCEZDGAKwT5KGyB7C3sGilMGmAHkwlPSHZ6udxAXQ/ig+IosKGgXsw1UYHHB2ulOpGTyG8dFOhFoeh/m/ARyUuABU2KQqPg7vP6GATYuu8WqhX90PIOAqHve7qILpMPcV+itXqw98K9q3Re6XtTt3k9bNtcNuddu20ncMh9h/+8InLZSEXe4WlR3GT4tIJ6wnjdYCsGcKrIE0wQFKxx8EdgTyKUMDD+Te2/euYRVohFbINeo2heEb+Yzk83KL4zWMg73EJgfAqCyKG4yk40v01FsBiL2YfBeUIoDpOGv1q/Zij6HwIEaCpWJEzhEUxU9V5d1Hl3336kefnjZf1Nz5++Mi63jjNhXXd2HMVQ2yvPD4+MF8Kf/3r3/DeWU7nyAPqTMuE9cZtr5znmdZEjy1lwl3ixdHJTNOM204Z2oE9PJtaG7hlSF1SLJA9UriycL1RpeWICZSDI2z7ymQTlrXwZvewnvXjAx4CJ70gaSm8tuMDEPxpnCe9udY6KU/cnp+5tp37uxOnvLDcfaYsF+rtN7x+o6cryVZIHbdNwrU0uEVHL6r/CmhE2dODSpuDJSToSXkRcUHQSJ6PTaN1pWHXkXrm4EQbf1QK6pB0Ecvea7wKXazRRtMk349NMl5yD5zZj99VWdBj0CyyXAqlpwEFKXClm+iaxMWlNgbbVSIsF1NKFhBhJ+JGagQLCj2PPNLZYJkL58t33J0v3J2M909nznMhzYlGhr3jwyqCsTmjo8KoCbZNMFvJji2Jd49n3r87syxnLsvEnAxnJ7ljTMdFLOM36U88uQSKOVES6grigh40Ya031f+jWLKUKHk6KicdVmL/qGpVda7LwcBl2CiHdkEVI273tQN9hSDHMYfpkCm82oi4yztHNg5xL+HkZNSRR2FgvYvSPD7vbNCChx8GipZVreajI3nVEoyD6S0D53Xdd/lR6SVSWxcbaa9s7ab33jwEeGHZHR3MtqnxqjtsW2fbKvuaWW/Kj9id8CyDHz6feffhkSl5MIi0gDJaZ6/hV6OyPp4cSonrIhjQ9HoDqsWCDIAswD3qQdU6duS8W54ORKFEp6FnWeKcuh3arVSmgGvDC83FcGvt1d9shLKp04sXmsYP59iZ/bDcL9F1j52gwjEOGn0fR3O4gBYPpOOo8HVW0sWOm6Y77u8/UJZC8875fObh8oDHnGloiJo3zAtfvvzKdV35/Okzl9MD1+sXxcvmQkmJL1++cj6dj/WhDqWwhEC4emffd0o7Lgd55UgEpIN1nibuzhf2tgMxczDXYh0Qhg+83ckpk9MkFkfKnJYFJ+Tn3Uk5awgU1XGKG3NJhX2CtmmC76ZBoadgfJTMvu+8XFfKpJ/RcIXvcGK5nLB2z+3lV3q/4XaNSyl4yEOxHPOCFp8vqYSx2RhnBxsnBHKJEbAzPmnD0mAxvLkcomPRrCAzDAYP+umBWx6f/LGqhl+MDwm+a2wuJs6kuNSUj042hbOLsg6FsY6CS7YKGiZL06EDQb+f4sUo3/iouD3EPv2V1dRMOoUcGyGXifuHe949vufh4YHTvHCaO96eOU1wWgiqZ4vZdRdmPBVKliWAVzG1bNI6cSCXkw4KX7T2sowLJ5MRXM7OW+ddwQCd+XzHlEUqSGV8QFVYPBadlWCidMyKnGGQqANWHZ2sEYSVpZTCO8njx5kGqq2J2gyUNNTUAyaJCtY9zOdi8wc41WJG8cqYIzrRqnXurzOxsZbk7uLhKuxxj+l7d9MhlcZliS4BVe9xIcUF1poOTLfRtYQK2KGyyYm4KwZgb/pvj4AlGc8Vuct6w72A72zN6A32NbGucKsbrfXImdGjf/gAnz5/pOSNUk7yMYislRSq6AGp6vfkrZWS+p3uYZOSMo6KROU6DMPIcfAGePfmGR4Qb8zeHAvyyDi8DXKmvsgfS5oUpxQNys1jKG2ie/veYES5+nEf4L0eP+u4NA6q43BGHhfC+A+L/R6dnMXF43ExGoLliA4/GazQ2CjLR5a79/z27SsXu7BM0mp8fX7hfLowl8Tl/l7rp8qW/qeffs/15YUyaT76eHdP2yvbfmOZF6ZJOROn00THucsXXtaVdVea5PPzN07LTOlxUFxfXsg5Mc8n9q50Bit2eH50F8vjsFCOittM1XrfGjmosa2uEX5iWGukUsKWWRtFjolvPulsTGnSAq77kc2A77pzHUpJPD49kIrwsuSNjUozV+1cHpnOC95Xtu03OaV2mZ6V6BYIbDJHpKaPgzognrfLz4Ix0IFkri7CasAO0U2R43DW7CMxMbygXlfU+EE9cNFR1UTF5zIKS52oCMIQL2S/fUhwB9RkE0Hkwm1icF9eX30JeKLQ2hDV6UAbswflOwSUNNTM0uYCMzV1Wp/Ibnx8fOJ0nznPzrIkzBpmN4yOFP49vGfkB51I9FCjWnJIYg15RMJaN0oRppxzxi2FMCl0HlZUhfV6YMDKcGhMedYGy8oSUPiQK540BrspjQH0a89mXdV6CU3OmE8d7r0mMgMWWhgLY+ex583prcnvKxd0jde4aELxflTroU3hGEBJUeuurqXr0N9uN07zBbPw6kLwHKbX07vTkzLZfazdJl+1ASklolNyzZh6rBES9B4214wGWsWgt05jk4Fk7dASdR8IQhR/ZM35WgjpXNky+97xltl3Y9+d2jL7tnOtlW036u48XODPv/sd53OinMobdfTY8prEZR9eXPrdjB9HQrJRhO2vl4XDYEQpMiHIEa61q0FxCtimMMSAan3GpQxTLqy3Z5E7bOTI2LE3e0CwAu9gZ6eEQWLvUoArgTgdotwee7kTs64xDyG2WB9dnUoH0ri8wpeqh5B2DA+IwB9LNF/Y2jfuP3zkw3e/43mr0TWqK57neK954uvXryzLQsmFf/nnf+H+4Z7L/YW6Na7XK08PD/y2buRknJaJeVrYopsqmJTy28bd46PM/RD5oLTArk/ziSNdbNyEcdCNi8HiLHWv0daLPZG6HmprwvbztKgSchc8ZX7I0G3cpm5yVcQPoVGxxF4m5uysu/IsPAuzLMlEMWuNvu/YlFmmk6wQHJobzTI2XUhmTDyw377R+8rar+Rc9XOTBpDJjdZXjHbQ0tyJKq4clZ+GTgbWwxo4M1hSw+8lW8jzD7NADnaDYKkR1DOGaW+6k3gurzYSTVCS66AZUY1HjKtr+cr86zDBOD4vXap+HGCjmpEBQFwwIU6T4FRVWu8LqSysNbHdZrab6qFlAdLGXDItgaeGpaZ8gZyHTOMwELQogFIAYWO3SDVrcpaNQ9mTNjdesaCWYlndaikMvUcpQ22sAyMNWCiqs7C747Wq07PBCopqrSo8opoWqy0w7oM1UyHWuNT143N3EoWcBhH14INBk2ZIKls/sHt91OrKsk4EugmuTC618eVyL6eC1onAgWO9EYentDL67171zAdRwuJStLggQK/7gDIOuFP/6E057Ldtjffbua1fwRex+nzwuoYSO5LQGBdEo/bCvkq1e/xva/Sts2+dkuH3f/yR5aEwn2bZ2durcE9nC2IaDWjszfo95hVwoBljX77dO4fwz4Oi7lGtx7zP4mc5pqFz71jOeJKg8/byjVpXcl4op7vYz6F/6CPxMQqKcZCLMRDwrRAEQc/Rt3ggAqSYAwpnyJHR0i295pmMWYyj72Ue8K6rUIh1XfcaHfAd//iff+X77595fHp/MLF++ctfOC0X7h4utN74T//4n/n47gNPT094NvZ9Z9oLuRROKbHXxmmeWNcVetCaa6NGxvdcCtPTk5w3eufu7oE5J8WXtt5Js/KVvXdKybTWaLWRpyLxV7TQ+hwnbeCmW82yUaaZuu/kksixySxJzVtrpddKKpnzfOaw1DA7WvFf//oLuRTePT6psxACcHz0ZkDb2NcbnqCUkyxE0qB/yg63e4dypu47O84y31Hbyq2uFBrZdtx2YMetQmRWKLzE3izckWOgV/CaVDWq0GAjjQM9ZhMpBaT2plt4HSa+Yo3aCEFDA14nD/rHqCAPLDb+TDx5vebxszm+c+Otz8shp0VnZu/j/QUN110XIhe2NvP1m/PtW+fn335lygvn0jmdEg9P08Hm0BC5YyZr5tFJjTJQRnBR1THIHDqMZd+cSFkiuSGG7MOmPUGiiklyUDfr0ZIL0lQ2MS060niurXZKES1XTpranCk2rtSk+wFh/t0vRzoMH1YiAy7wEVdByo51redhBW9jbUJQCj38r/Qh5xhuSnAliG1w6XtXnnYKOCgFpNFr/bsMdgtvqoQIJRaQkqjXrwcpKeG9BoQb5ULsM29NKZNbpe6rXmcH7yXWjN5763on3uWvJgxeGojaJ/a10Vqh1s5tX7mtO3tztgbbDb7/PHO660y2M5WJ2hplnhhEDelFBkusHXvj7a9ByRwme8deiULjwOltFJuibh6QnavwJL6H3l7iNYxo5vT4jpfffuZy945cigqELjshdWiFut9oTf5uDaA3JkqcCq8Q43j+hjoCFT8RntXfbEd/cwl6wIDk0C3ByGd3hzqILZE1cnn4ifXXwq+//o337z/SWqXkiXeP73GDeZl5/vaNn374UXTx9caP33+i9UqtYn72Lvjo/v6RSynsXbB+Krq8koVBqInBmnIm1cZeK6W5x2AoPjD7+4OsbvVog4eQrAeMKwvgnZSXMJ+Ddd+Y8xJ0vMS+Xdm2ldY7c58o0/zaVnXntq2cTice3j3qIWXDg2WVDltt8aqnaSKVjMdHZSZVq1S+Pfjqem0tZ6bTmeadl+tO6ifKaaK2DesrnRuqCRdhyQYyC2yYryisJtSPyCjtsAGJrkEKZx0ig4ky2Baj03jF1eJ49iQskrAQD0jLo6PTQgmIISql4+/HsBMb7Xf8HC84K05VReMyTfTegl4XGCc60Ef9rQCWhS/Xws9/7VzXgvvMdV259Y18n0nTmZQGnIY6t6LNKhFcp5lRLIelgIVWQH44aueRcZ7FLMqTGE4W7Jmg/hrRhkTbruU4WC/657bvTNNEpSFBui5lKYktoLQY7Ae0N2YELcSbpBo0y7ikvEWXqAG/GFFG7shN1Ry6XDLNTWSILhGiBTRwWLw0qeK3vpOd6J4yqcyC5tTHUvv+Wm2mV6aT7F/6URb0FoPf1A5zy1QHdqP52dvOdLDuEi4Yq+/ctg1vG60Z3gutNzpNRYO3sHswmlfleHStr96GxXzB3ajNuO6Jda283GTqt95g22G5wPv3T8wz5EVzuWl6a3Wjf+qwb7rIU0QbWX89VCzcB0ZvENoB3HnZnqlrZV4uTNPMaxoKoRsinu/rGSZNjFG7TBinCS7LmeXDT+ScxSByU2had4rN7L3FHGSwpVQQthiEp+jqx0UR7wyziAkda+gwFXy97EZH5EeBM+x09N8WBJMeM0K3xufPf+b/9F/9A80SrQvWrW2Hogv9+fkZsyR3DG9ctxvcjMuygMN6XXVZuHO7Xum9kuaJ83xiP14/7OvKdDpxwMa9cdt3yUuzGXtt7F5ZlrO6iSkgl7g86lZpdOblRN9j6p8S2VUVtqZDPPcUNh4ZijBAFphSxsInZ/x6WZ/ZrisYXC539CbbZxiCJLEialUFVXvDXGKpkd0gVxLHvOigDdqjpQbJ+fbyzLqt3N3d0VKme6FQqHUCu9dB2XeSVyztWKoYRf47g7veq95nDDs9zPucuFMDHx3cduCNStkZFuYWsIRecUQoju7kuJw1WFZv0d/YCFgcqlElxWGs76Ot0TCIjucQ6SE9w0CdbEAxUf/ue+I//Icv/PJ8ipyFFU8y2/v2Uvnb31744dOCm37Ky/rCwzSRs5LfUhxuNarYPgCX+P0UzI1uHMK8g43jFoK1RA5IyXsc2kmJf6MjGKFQGpJWHOUop1TEcvIehngJzxmvG8yhcUD0xnRYy0eV7ftR4Q/bFUF64/B5FTU5Ch3qpckbKREMIICJsOALmLTR26rFkQv5PKuCtlH9B16epeodJItOV2dc/fVy/7tfHhd/0QGlOite33i9QVH1xBbV8F6fNcvv4tsLQ0+Y76QuFp+MElNU+abwoOjeWtupW1Z+9fWZ221n35y2wrrB0xP88fd/4unpjOU9BGGD1fT6GR7vIvbC4XcUzr0cRUHwxRxKVvzu1laev/6McuUTre0s8/nNOo/5TEcuuP46VDZ36JXb9Zkpf9CBmsMzIQR55rIWqSVR6yryRRCbc1ooGVH6Uwy0fRgrOmPJDCgQz7rzDjubHkVPnAmmy731UQhZoBP2d89or3A6v+P9h98zzU+KBg733uqNfV8p80w3ojMNNlfJx3mSsvHt+SqYNmX+P//mf+fXX3/lT3/6PZ9/+glLhb1WQU3LCdxpe6N6ZZ5ncsoUA3oSZ6bXhk8S/Hh/hYO6Gc+3K8ukampgg8RANFuConZ4rTslSxHrOL1MzDYdf2+vO3Mq8ksKvcW+rfTTCbdK9sEld10azXl5vnI6n5jyxO57nDiqtOUwK4xy3XamaJf7gCtq5e7unmU50VvF8ol9v+Gej2FjsxPdd1rb8LoyJcD28LwXNbKnhvU9WtywOD5aSA+MNTMS84aHvQe7pkfokRMvPwbUo9c4AlfGKjHiMFeV5dGZSfyUB9R/wEwamkXwTrTjmhoWUWu9jpoLZwKUY/DbC/zLrztrK8yzIMLalVte985//sff+Ic/fuLpITNnJ9mJ3FTJCKdXl2WDOXQ0LcYetFK38MU1AnbSn6ehW6Ch+MqsxLymCn5oatxHxoGex23v0Cu9uWjSOVHbrgtkclJ12V73F/I86fPxRoQYS1mcpVVokRIoTZ22afMt3kOi106J19wdPKzTh3NuM4XhWFcWNNsmwWnv9JRIfWeeG6lktr5zzBxAnU5QXVtrB6QHyOzyuKTks6WISyOlSHrrwwMr1pk3tm1j3TcSxrZttLrFYH06qtpEZnclT1ac1Odw9fVDpSv+0Zl1h/UK61657p3rtnGrK/vu7A3u7uAf/vhH7h5m8iThVk75jXlmivmA1uprEJD2rDyOGhgULwxG6YDSPEnwl1rmfP6e3l7Yt2f2DWqHZQlfLYzsKTrGiHd1jq4sp4mH+w/xezW6tOi03eU91VemYDi11LHqbwgWexzeCYJNJnq9sY+ayyq4YHsL7dFrJPH49zgvvB8ec8nGY8oKvMPJTdENy+UnyukdmXxY+mPQ15VlnvEuMalYiIWXr1/ZauX+dIkLxXi6f2DvnX/56y/8u//4H3BvfNo+c32+cXd3x75vZINUskgVTefJZIVG10zCvTNPM+SJ7oQjaxUensU8KWbMi1qRktS+mZnw5XFmt8ZpOsVtKr1F6nIKHW6vEeiKJTifzky5UErRbdpgbRut7pRpprXObV3lwRN52b020qLLofrOtq7Kqs2JaZoB4+uXX9nayt35wuX+QXGdBEcbaS4GWOh4VDsTrVb2zejzgHBEwzTrzMnIycG3WBz70QU4TTGhYzH0Gu6qWlAex/hgMKRQUBKOnz4qwgHVvEGYRp96+NR4BKTEpTIM/xqR5jVeQ1TbxoRygEcXNF6jnCb7gL/SGH5mTsuJbXVBFdeVn3++8d139/S2UnKnTYXmlVYrc6kD+dCizwatU22PziL2hBXBZfm18tOZPaqteI6A7D7iovv/67gkytx3Y1s36toEQ6ZMT5263njZvnB/d0daFqztQbAIJT3Cny24+8dZxoAVYn36awfSkgf5LWl28QY+SICNgKU6NCcOXmh1p5tgLnUGAQW53ncyFV09dBcjh2AUaCBSBkP5H/5drXVRM5UiQq+Nfb+x7+thL37dN7wNBL2jUCd1PL02etsCiivUfoNYk94ztI1kJ27bjX03amu0ZtStUbfGtjXqDucFfv/nz5zvS+wTWaErNz6YaRZ08vGcjQgQgurp6BaNxOZ7ONjqIsliRGDJKEVWK3s9M5+DZtxl9TPmFGKBZSy8lnDkSRYMHm9NDhjDhSEOfXfZqxQv1IgHLTZTy4Z4V4NFpSXZu2iyyiaJhdk7mgPF+fK6qjR7GFv6tQ4gpUFT1koaOdfa547NZz5+/CO3286UXp+v187Pv/zMD59/ZPeON2fHmS1z//gO6zuDBbbXyrLMlJ759vUbpRSW0yNPD0+URZnoy7KE3brTc+bXL3+TOPjdO7ZtpzhoQJ0TacqBqXa2Ve3y6XzBEBzkMRuobafk6RiIFJPwrtJZor27Xa+UUignVZ5Hi5lkV9AQm2VeFqzqNVRvbOsGydjjcrAhoApZ/jSVKIg0/W+14sXJNfKcW2eaC30XG6RXpeRN84TnArsqQ6ZJHYfXoFganjP5dBGdMPZkc4e2s7qGtSWfsNJIaWIK/NxM7Bzxm+uBTdph6aFK+rWSED3UmnoB8xS4skqSPrBWT3R2humcll0Dz6QY/Hkcowc04WNoKbigs8azlw8t/VXJanaikkl5mAQO0Znsg6c0kSfjn/7yjT/9+YnLJdFThZ6prZPsxtYykxlTnjSf8hKQX2QmuIfaeY7XA/RQ9puYZW7Ss7Rg9aQ3ueUD8hBRQIPPYVHSeuOXv/w7Uip8ePcTp/NF0bq3F+oyM81z2H0L721jroLR2xvPpAEFjizkeEJpuO42BeRIpk3M4/zY6E5X8IuD2cyEhujXdmMUYfTGyNUeGRd1q+SS/66DUHcZsFscMa3vEZ+pA2HMEzRwdfZeud2+yEAuARR4UzDUrteWUqXuyg8finwPOLS1SrITta3szeh9Z9uMuu/UJqbMXmWP0XYoBX7/hx9597RQcidN4UJrYdwURUkKcoVgFT/S1XSgv5kF4cprP9T80VkfxYvOgWnKYgzF94zmiD32UTKLpMMQ7/Z+dC3qGDuD7m5Z++o1sjSEeiHMTD7BYCHx9wXOYBv2o6Ufr+m1GIgfEovYBvzCYdHDq6NsolPdDrRhd7h7+syH7z7x9aXx81/+hYfzPff3D7g5Hz9+z6Bq55I5uSQGbhpWT5Ps/Zcsfck1CogfPn/iu++/5/7piaUU1ipb8H3dOJ3OZHdOpwu39UayzN2SKMs0UZIwLIV3azHPy+nVRTIexHCglK9+vGECXtg7U5ownL3tisuzxAmj5TFiUk2tMiIskcM0rO47e6+03jhNZ3pv9N6ZUmZvFavCKW3KWG3sWQ97OZ0B2PaNHOEl58uZk+uWbK2Rcigat07rMiFM7rxsK9O0yOwNKJH05A41dbXBreM5gtC3RAnlZcrO17ZhDufLmZQ0D0l9wmkkgyURw2mPAZ0WQItqqluNZxiCxIC/zJouY29kO7HHKnVGOEo/2nJFHAqSATGY8LHoR9wRh2mfgotEF17XE//5n35l60ZmofeVlKCG5iVlRTv+9qXxTz/fWE5Gund832nuym7o0JIu0mxZQ2HXUD65uhUjsdVdRIWvv7HtV54eP1FO5zf4GnEpqErTUSnILo0jx0PJmjqgkKvbvmJ95Xp5xq0zTQvLfI+3Rqs7Kc14k7U8uwu+GB48FgctrxTQOHPGi9EB3jPXfePUGj5PR+7y62Ub3QAp8qKj47E7UuKgCo+f0PvrpZRq0GeTLOUlqI4Pj8YQeO57xXunthazI3H/iQ7Seg9arHzHXnU/r4fsVtsrQjAKDxcttDn0touWWxutSz+x7ZXr7lxfNtoO+94oBX76/WfevbtQZlVTKWZtKWUJKePiS2MoTw8sPthHlklubL3hvTLlMWBO5PCNc+/hO/lq5Idz2LkPGrA6ihyfAXIP2Ne4oDqelRd+hDiNDAmXG66lIl1JiYrfRqcoSHwQ249C981+E318kAcCihyt9fErHfOWHqQc/Z7mqmOWmREZxLtYesvyjm9fbqTTA5fTHIN2mHImlyIniiQpQqHhObFtG9Wky8km1XTfVywn/vjHP4NJVzNnzZyXMrHv8GVbWc4XenfuL3eKeMiJr1+/UswgT+XIJq477L0yDevezsG1iO6fVHQ7GsZeV5blxNokgktpIafC5WmmDCpYkjpaH2gNZauGNs1FL80l07b9VXQVt2T3RilCZwW5xGHYOr3uCjAhxwC1QylBYQx/JVNWqyoibahM57pe2Vdht2VZmBNsbnoOJl+iTYoVaLpA2hxaEa9s3nh59phXRA5t39WDWmFJiWvRIM1SI5UUNMkuY9e4OCxV2QDY0DS0sNOWcMnThMbzQ8MbYqfXQjEugmhX3/4BftgUwARuwYaa+ctfd/75X37l22YkJrzv4I3WnPN8UndkjqXE2jv//v/4hXf3HyjWOd/FBkoZyx38hhTOMfjtjptTikgM1/WZbX3htDxQW+Xx6TuWWZe4WyhKwi9KB8ugoLaDliy4wsg5OjFbMDPePf5AmSbuH95hvZKnWZdVHuuNCA3zoJqm4wAYg+74YbFWRgfzKqYz4JwLW6/sX688PD7GRtdhklggaLdK+spxOYwsh/g6V8e9rytzvP+97ZRUqK1KrGevOpfOJouFnvAuHVKrnda72Dq9QtieK0+7BfRijE9diW0a9mtwHZsY2bO4v/L6ncpWVUjVG2y7s+7Odruyr411l8L9h58+8OHdmTxLI2R5oru0LzklconYXR/Da4+DWBRxi5lnB6aUaF4010KDVosuGghhra5LZR6EyLT3Y60PPVcK9pw1dQwj+GnAvzHlZxBJ8IbN82FQmnPGkg7PdbuKzRjmexYuqbrgB7kERkSvxJivHcZh0Ni1btWxviW3BC386CJFjkgps7edeX7k0w9/5tonzpb5/rvvmcpCbepA9lbZa6OkRN03/t2/+z/485/+xLTM5N5kmULn+fmF87ywLAvn5cTz8ze+XF/48P4DROdce+Xx4VFnbE5s1xt5Vud/mmfKFPqG2kXtNDNKnslTfq1UW8AvZlQaJWwu9F3VJSyn81HhDq+dysB4tTC9iveb3MLHPmirSQ97H3bVbRwWgrM8wRgAtT2gmCT4ciaz7hu//vorvQli+vBBpnJmRkGupSkX5sDuy5TY1ivTPFHpzAlSKUxRdXhvNHfmVmg4m+2kOZO7qjSzQqmZ5aLLAwyvTmVi2Cl/252+aoZSilHM8VSxroWQAbIzpYmUpRUoYRN8WDoHPqkwp6hajwugj3MtFmM9Zhnddfl6D/hiQCQUqid+/Vr5N//+K2V+wg2a76TemUqOqqmT84Szs7bGPBV++etX/vnnM/ePM35dOXvg/MnxqMLUZWm50zutah5wmi/cnR/I04l0WBYYhHaiW6bVXdVU0qD9GMf0Hn5HGgju+6tgrxTju+8/RbRoCUils3dnKjkU27JzFpMKik1iJjV1fuMy0nMVfp1ssK70XmQaXJgBz862VSwPYgKkUt/AKhbOpiHUi27c3ILxwnFBgA6Tve/Uusd8IkXWg2Zu4/Oum6r83jo7TSEP2iVCvsdsKWjatRv0piTBpgtgdBB9ZEOERTju0aEIZtq3yt7GTPCFl7WyVe3HP/zxdzzen8jLpgsi1L9YJacJy2HLPiWoQ5QmqngGBpNHZpkcs6Faq2zBk0Ukiwq0txYp6saGRXiYWsZesHH5uLhjvQ/jlEw3OS+o+1CyXM4ZDwKNCh4DK+Qsto/EnLEGBznlzTDaTcSH8WciHpToNqKzPDoFwfSETmR0GQe8G11I745HN/f+4488PH1ibjNOp9WN3jf2vVKKupdeKzVnSknch6DOm7FtlZf1mYfTPefzgnfpYmRdkylZOjgLGHa7bdzfS/2/Xq98uV35dD4zRNSlTIW9Vb7++pXz3T2nZdFBP/BjnGYNa51t28NITV0EtcvzZBxaCHccF0VCrey23TBzCWuasbdKieGVt06tqkjmoml9G/m6HubcfbCJO75XvKNZhmW22rh+u/Ly7RkrCb913j09kfMS+LVeSykFt8pSZpo3SpmZJrEwrBQ6RuuJeU7suywhMEgtLrXuWDFyi2GjOcuki7F6Jc0J6qgstKgTiVpkGFd9zDg6fe1Hu2rDayfp2WXUups5Uypx0I1DJzjj6ELpjTDz0gE3Lg8GZu5hFBeLFjKbFf762294Uqf3fH1myQuUHA7C2gA5T9AzN7+xLGe+rQv//I/PfPc58XgPftNw0muhtW88XCaYOyU1TEHE8RyKoIdstLbTLRTqoWylE9W/qI41wgimPNFaVednEjw11ybK+RJ00cJUGiNlTYNmP+AIZYIY7oJyzBTZGKNcUkNeOSmGnTSyT9gwoUO01t7FW+upMJ+yBqB1Z22iPmcrTDoBxYCK88RxpoDPGpXcMq3vB6wbvGD2trJvz4LGupHSBEZoFozu+wH1um44QBK7PGAlV9XWoxvEobU4jAjr726YbQe3n3BK2GtHNi4aTq/rxm3b6Tv0W6WucFngu+8/8HA/USa5E6uolN13Noc04l4V4+HDZqQXvFeGcaiq8BmzG+6b0IYxN+qNyZaYQcXMTqdQvGQVlzYcYMewt2vKiamnvXbR5nEoNqubKJOKkJhqlZxj7RiyEdeQXtDma464htXKY7GYP4yPDyImOZV4X5oHDtgaPJCBmIeYoWzveG+muUY3IQXWK4l7Pv70r7k1x/sqV/7uXK/feHl55sOHj2zbevhNtQqff/wd19uVl28vrNvKeV5UHKcSzL8d9wIp83j/EH5ZWpu/ff2Vy+UixGaa+C6KmGRGKkUOdiUXHh/fMS+TqHcBuSR3anDZm8G0ZOrW2OvKPM2CgsL6QIreRkmF1qvsv3tjXa/0Ks1Cc2cuk8yyogV3kx2HR6xeH4Z33o/Mg2H5Qd01lCkxp/COR3ubizqAy+nMVGa8Be/aVU+0fcOa02YpzKd5lgcUHl8jSXw32TdMSZGSZp3ZNOgU1XwD7GDbkCbSZqRiLBNstcWFKb3AhEfsY2gVqmE5k4KVMSwVmuvSrURmdO8R1lohafhIDJS9h6G562AM26E41KLltRQXRw96qrNqmg75QspXNq+k1MhZdMLam1xFwypiVMC5FKZc+PrtGz//cyaz4Eui3Taev/6C9Y0//vEsGt1UqM0pOR8zGO/12JwWtN/W5e7Zk8lemlFROXWt1CzhodhcmVqbYCwSvd008DNofSi4d1WYY6NWJ5Xx1FN4/uvgal6hN3rWbKo2yG9yQwjRFNnozaBVquXQV4QkrixYX3m53ZhTYe2JuWQZ5uEMwdgYIvcul1XpSTK1y0126F1Em11xL7Rd+S4lGbWt1OpRnB2r6PBp6oG1i523SmYajB28H2KxFAVca1mwKKLs7lVK7N4zrRq1Od4XvO5cbzvbDqcz/OFPf+TurAKxJM2axnwHIjNjKOPjdQ4SgA3UIQ5UN7AmnUBvLoEYQcYY3UIa1vej8n4LC8IQmroZuTkkG7lZtKB8Z6aArJ1MVreJHUWEXrc+qXEBN++vcx0z4RCHpXns+eguhsGnuodxQeg5q3t9M+jGjplIT2G/EV/vQQFsVLrD04cfKdM7zRe7s9WNKRVyTjw9PjHPM/u2sdfKVCZ+/fYb73MiWVbg1iQ92r7vTDmHn5nF+pTIsl2vkIzTvPDTDz9xu636mnmWuLhLt/Nwd0exJux4WR5odad2p8UU37qTgx3w/PyFqUxMc6GkU/jgjOGLIKOcghOf1F1s2426VqZlmKJx0PuG3fJRXQSGaF3OKYN63NBgqq0bO2rr11rJDnkS573Mmcd3T/jeON+dqV7pTV1ObV2YdE6HOM4w+UTtGmimqRwL1xymZHhOh1eTlUQzZQNXlvDOEeTRHfop5jfuzCWG3XEBJsBT4VZ3+r4rGL7BbMaU5O3iJotlzWrSwVkv5lE9a7ituYvJhr1V9ibhVfXQCIQamQMKUaXb4uLqDXoSFTInHZbJCp6NaUps1w1MkMv1ttJ8p0yaPyVz9r1z/dZ4uXP6dmPdfqWtld/9+IlWdRGv3chToTVBPsnjMo+qprkGrznLnrntnVxg3+MQ68O+3OPABsKWwnPWBeZOKjsj9TCnRi5O6o1STmJxGUx1HKTQk7qrCSdbkCDcBKu6nmU2Gfd1yzK8a0Fr9rCt7sqNMMu0sLe4v5x5qRu0yi0Ln9elsYsKHAaHe+9s12d5CNlG3XdRtpPsp3M6cduv+H5T95hgi0PPTOwiHbrlgM881kkK358OKoT6Nv5iHL4pKuGhHZjZ20pvgs7aBm5G3bsyZPbK9bqx7XC+gx9+9wMPdyfMVURYyTokTZfDAbN5lEeuOaYdYNDA62N24E6Nyrq7iCWDMq6j10neGCmHJDsotAP37yj1Um0LeM9k60qi9GG4KDpyHpeZe8ykXmFbnc+vlb5DGGhGEdadoeMeXQIx8bFRVBAQ67jGDzscFSh0iU1feXMx6+QAnvAmM47dTnz6/X/JXB64Nc0FT9MMZqzfVu7u5PR6vruoiCuFd3kUzX68HjG1VPCWPGNBnTYT1DpMXL9dNbP4y8//zOXujvvzPWkW3fivf/sbl/OZcrm7p1Z5u+Q8QUrkpjChlpxU9SG3TYEUaS70KSh98aZ1E/YYTAW84J2pnOCkDzWbxq5Cj5Q+1cOmPBcNt+id6gqXabVSe1WgvYuRsARYu++VXKBvjS/XZ6Yy83BZsEum75UaKWcQmg/vB/zjwVhpZpRcQrbhEs5MM953rntl9pliRkVVSMoZvNH6Ltx0bAwcusvpM2dlDpjT103wTXgdLQ67ZVKrI2yZNE143TGDEm67loIDnhLURjERBTy6tdo7zRLzPFO2ja04tieKwbUa5MRUUox7XB1KKdp4xWJiYpxyYm9S2yYS1+tVvProk08l6f1YYt1X9n6jGMzzHddvz7zwhcu58+7dEznDdf1KTidSqSzN8amwbS/CqU1VTCq61CxZuExKQZ3rxLY1WlXGefcgCvRBNQ1qbBK7JpckfjwbkLg+P1PryuXugceHTMnikyunuov+bKoo29Y0X0oGFowg1Dn1XmkRvkJ3muWDYLE3HTi97zKoa5217ZFVkOl1pdfEUpw6OW3fxawLHci+ftOaacj63TvrbSWXIvM9DWBivuAR/Bf6CefQw6Qk3RFh/DjuUflZJbrvsoZ3aQbcY8DtHOriTqPvK7XPtJbZvdG2Susr21pZr3Dd4OEB/vCHPzMvTs67DmsyKZ3w6JQGrJajpDsMMO2VjeaoKu+tsdPI6YS3SvWADC2R8kTqjW0XBf4oQlMUWqhYwEfBOQwewYb+xxKJdlhuD2bdNGWGf9oYQI+utYeVT++aKxCdBaYCdHDEigma8QCw8eG4MI55AkLOxwzCEFFnj458MKNSwK1vxXVGp7tx9/QTeXrPbd1xl3cSGNO0YGU6Lq9WxSDbbusrzTdJoGseIs+RX94qnl5p0Tkn5j6JUZcncpk4PzxwWs5Mp4V93WhuTKVEcBhwPi3sNXFdr8JmXYynIwi8di4Pj6y3F3INKwLnUDht+5ViRUlhx22qy3KKN9ZaVSuXjMkHP9vZ1g3fjdM8haGXxD7rtvHy/AXu7jnN8/F9rRtTytDguq5Y7eQJdQrux8S/ZA06W1dl38zwYf3gIT2vnaUkmDIjeNzyRCmNlI1OJo3BaWwF5SiLClvDaI1s+N6P7ih3Vb2WNBBzy0y5YPt2aAEsFlielPRdc2SAvxl4qfNRhZhdbWddG6eL5jGVhN+uumyKMZ8WKkbyTu1O6pUupE+q4XGx5cL5ZKwvV7atMSM7FDPou/BeK68LeUqZjZ1lKdTauN6+8uEpcbl7R5md3l/o5rxcb+Rk7NON6TTjbcf8BaOTk2mGY5Na/KaOspSF3hu1Orf1xu3b33AS295YpjusnCOPoEZanGIr8zTHpnS+/vrCz//8C8v5r/zuxz+Kqpuc0ymTcqUsF0Zu2Z4TZYM0zUw5sa1fgUqZdD9OroFw9YT17aiSPTayJcguppPXzg7kHBkpyVm3jb1uunBaCA0xwSNxadtxWLl0PmMu6hZFjQbprQvOHXqYYb4mhl5EilboyaFvdN8ZrLPaO70qJaiF/5MlmdDV1oN6KqZUq53bvkFtXK+KH/34ET798FOoxRUbUMiozNDPzkQHPobRAbMJWcoRsav3klPnr7/+Rdz7S4pZE5gJerU47Et+zfMLoPCojEUb1T9dqeqMXwO2S2Ui1R1LEdQzz3FZaIA+ui8nqOxJ30vwtKDCoaEBMHl9i4QT1iWMzzT6t8N36XAUFkOrtQiZitnEyCwR6STx2ke0KA/OPL37Hc/XypyeuVzOmr1ieMq8e3igA3/9689c1xvfffqBdd3C2FJF0JRkrOjeIGYfHajbps7VwoxyFkPMEDvo3d0Dp5OG1TYL3nzImdu2UgY/eyqFfSvUvGtAXUzurmSqO62urwHzLtGLPHpgmU+s242pGylnetuPRQ1So9JdQrdUQtzTYnjSaXtg2O743mg0pnnmcXpHGsZ0lkgNhtqkO8zLiXmemXJm5GJMy8yw8W2tsa/y/U8YqWWmJdTdLg430XEMSqNmMYZX01DYCDzSGSxX9wrT6djsyWDtG83F0885BxTSA/bKYJ29w94q59M52t52dDdzOlHCEiOlrDmOAy6nztaa8rinzHk6s+67lOjBKsNiKFs3ruuOuVGmE8sceQZm7HtT1Yozn43TVviWG3sb1hTyx7pfThTL7F3rIGWnVefx6Z75lHmcHrm7M2q/aUHmSUSH6nhO1DZRa41tvmE4yzTT+guZRw2Y2xX8FQ7oFGiJrWfqdqM65NKZ+kbPBZjYHaxpnlBr1Ja9cr3tVJthNb7eNp5vO2WBS5up/sJSbpyXs+izCUqGsu2000Ki03yn9bDHKEmeTJbxJuPAlBJzyhCMnNZ2egsXLTfqHtofnNr3IDoLXlM05pgnhD1KrD8dNbJ50d+PQax7wHIBtw5mX29433Av9GzQROxIPVHDrC8l2OoNQm09RH/NRYeutdLaSl0rTmLfnH2X4HTbYW/w8AQ//vgHypLJJTHNsvTIvHqmvRpRBhQ4qvo+XHIjDtikum7uXE4PwTwKyEUnKiPLGyyyq4fyvsc1kQ/40Y3ozvIbOBKcGIzHuGGapJ8qB6Vej3YkA46nn5IG9o6EiQnNIxwOg8NEwmKu0+PM6+GiOhxth2W1u+Ht1eVWPzsfXcTxow/oqQE7vRsfPv+RT5//FXsV+lCmzO22UaYioaTJCWOtO+smgWWtOymdmMpExvjrX/8ZDJ4eP+p5NEH0c86s2yqabynUddV8jASnEySZcV7XjbbvPD4+kVLmbIny8PBA23f2vjOfZ8puzEVOrXvd8e7Mp5k77pms0JPz7aXStpX5dNEgNcJUNF9QRWpdG8yQBiL6QeoQnoAGyFMhV7X75s7eOyMuMpusnruP+1AHqgI5nLmUgKXk1zM41e6NnCbqXqlVqlZV46+LOc3lja0GxwJQJyUWUQ7edY/FmAKPnWwGk6Wwx/BwmedgBsm/SLGZWQwqAanMp4X5tKhqcqdS1FqH+hUIYaPcq6zMarld0M8Sg8g0ZXKwLnJKVBcjKHXjZQXfO6lkpqxLevJQjns9LsLTaeHLt6qKa8AS1lnmiWU5qRpuOw+P72nrxmmeOJ0SuWzMJ3CrTAY5zSifWLhtDWO4vek9kjJzNtZeBS0kWYETqX9itAgE82xcLk/46Z4UYh91VnJQtdCPdMt465hnumc8T1zuxMhYbx7mbU2XSbuyL5XanLvTmVQEV9S+BZwDUKm6Z5lr0AORhkDW5JVbrlgOONUT0siGk8BxWDnEjEn0TeUICOfWfKCP+UYcVlqvw5VWs4MeF4Qui/56OMX3F37+OvDde6X7jqEui6Z5hYq1SahMStTa2fcaLgSzusKt0ppTb7B2uH+AP/zxTyznJUSAr1Atnl6Fw12ECovCAIt/Z1T/aQAzwcTr5GkiNVXdWD4OTLGURP/W/ok9Gkh/7/txsL4aK4uU4VaPLmTAhlYmXg3KX3/GiGqGJBpofA7ePFiJOcw+idlhDQqz3nM3kyWMD9Cpx+uIQjY83vsQ2tnQfMQtlTzmhIPaK/cA7x0rj3z+6b9hmu+YZvW9+155fn5mWRZeXm48PD6QU+H5+Znn52de3r3ndL5jKiVIG427uye2/cY//uf/wKfPPwaUr2F9bTutN52bLs3KMs0xl+nUrsF1L+WgyLbW1ElcY7B4nhdqKooz7E0Og4Q1QYeWu1rOvTFNJ05lonpnvV1prTGfTliXGZghmqqiUcMjJ6bmg6HQe2OEbvfWyWh2sVnQ3eKDHvqLNIZaaIC77YpdVbU/qfETq5JWK4VEz4kpFVK2mAWEbN6g1ybzyRTW0gV6c+ZcqC4nXMFVHhe//kzVhoGpws9JA/zhhCAKHWoBzWB4BKWQ3XsIu9701W3b8DkzZdGEU1m02UxLsdfOPBVy0IRLES7ek5G6RIsdDUjd1J5KaBLVi3fIE2Wyo8Nzd7xWLHjXOWWWMuEd1m3nfFqgJJ6/vPDddyfSfKOYQ540HM062La2k22SV5WPVj3jLu76bsq9Sy6YsBQn2Rm3ld4h57+ncab82tabKbVLmqQYhwbTZG/Otke0ZgpsvxteZtb1hWwT57M0MyWLWV8YYjpYB5MPOzj8t+DnZzMcg8i1RnEl5BzEC5PBIJbIHlBdHBqGic7qe7iJShfjXcFGNY7SHOIqDFlye0SF1sGkEYd/xGr6yJYIt9ZRkUsnI1Fi76K0Zss4RYFc+4YHBOnVWXcpt2tFCXMb5Am+f4LvP/0Dy6mEENAwm0Q8CNGjLEM2MeHc5GAQy7ibuoL8ZkKR3KndqW0LHazIFFIqDx1VFG9B9XlVNesytLCMySnL4DOetx3lHYJkDEG8Xboox45OigEjJeXGtPiZtcmiRGfTdjDNdAETcAx4Bqr22+h6XqNHR3dlIRrm2LcQxWWsssNpOP7UOrQOj+9/x/3lR26tU1In2UzKxtPjO9w7f/nLz5gZ9/f3/Pj9j6QfEr0568sVlonz+cLenZSgNa2B1jpWdFZ2y5znEyMq9jSfNGvGZew6mKTWKEl56VjjH//5nyjZkpLeDLa+qUq1TF0jfSxlmjeWedZAmc7l4Z6UFfSSu4YqeepMZWavGqQAXK9SNZs5p8uF7boOjzIwo3pjSuW4j1soRluVTmHK6WA5HY6Y4drYum7gkgLnc10iHrd3AnwuLBQdkPH3hwLyMBabhm8S7NebcPvTQto188imTqi3/uosGx+yWQ4M1Q4nS3Mw7698bpy1Naas9IGOVKkeL6a7htRpWUju3F5u7HXn/v4+RDNRsViimXDOhElpmSBtwdRJBt1Ypoz1xGmeYkEMB1U4TyWq3zBP8x1Dles0FbI5tVe2qwadl8uFl+evnBbj6d2JaXnGZlFqk1vQZDVLetkb2WRLLsV0J6WJ5At9X9lJmE/M00a3G5kTySZKSUJGAs8eUCJHHRo5FfE0E07rxm270Wtm3TprBa+jUtoou8lHqpxIG5yWTIqMhLU1HerpDXyQkI9WCNGmDM0S1hJ76lif8NTouyAeC3M+C3rmLsUCNiCHGIrjiYYqPAW6aPZlmkqH7089DgsfJn8uGvjISHASjf2Y43TC46iPzlhfV7vHoaqLdN9volj7xN6c9aqB/O5SU9fWaTuc7+F3P/6J+U6fh2DQ15Q9MfhD1uFS0+cQZB0pbJEfYm+ciHsL+5i+H4NdZUpLfT0q+2SZkQA31uq4HIbuYjwnPUMgiMZ6dRZznOgmVF0cUFayRAtbn947k2nGuIe6fIjaetdrlzdWeH51oRoWCIYotKIY/70eQrqRAEy0D2INy0jTsTYiGIJuHWQE0iO/+91/zW1zvrw8czlNvNQbvVaWi6Dp77/7njxNkX1TNYyfMvu6crveuFwurNcbv375hTLNJDO2fQ+o28PmyJlyplZB1eu6sa0rH57ecVtXvjx/5eX5yvfff09JmTIvPH/7Rll3td3LsgS7xo6q0hNMqfDl+TdKzlwuZ8ntTYtxa5sGeOacp4XeNVs4TTP/8svPvHz7RgceHx/CKFAfaraEZ5j6GMo5DMGaIaYDYQLokcbVJTobQyKQZUQ/HkBjTjMBHdMTtE1taglMlWGz4ao2WlS9rXVu28rLl29MJfO0zGJ8tJ2ekiogs1gIBOQlaxGJwBrrLqFhIYtZ4FFZGizjUgjPKPDozrTZciyq56/PrPtKa519lfhnQAseOQMpJb3HFrCbG+45YL7GspzI03zYbJuB+R5jV2cE3YNxmQt/NUErJecwMAuaLbCtLzS/8en7O6azErDdOiBXUxsVMAmr4oGlLAPHlDrmGoj+y798gZQwP/GHP8ykvpJIVEKo2UMkGPOfw5cfXcZDFGauMei+NbYN6q7hZW/GbW1cry9czjOlFFlm337m0/t3lJypxSjVadahRIrEgBK60UwaDgn4SrDABN1gEvQRa6WURPNRper7yHJbZaN1D/ZfMF3i0IQWxUMsJ5flw1jruSfE3tlFW/XGMB2sPjKHw68osHI5JSSSeWgOblg/4dZoTTYbNGerLk1E1axuC0PCdx9PPD2+53QulDIR7AbMpmOuONIjj/vIDGtBde2dZnJYHuZ0GRPM5sLbqwcbCVM3FUrnYX9uaeyrHp5N0T2M58Tr3rNR5Xo4NjB8zMYQ+bgzo+vUf+iyVHm4OeEeOwKOoLqrwxlQkkmEiTXcMkFcInULR94Blw0G0SD563v0eCEjmMq983K98vik0LRBD647fPfjP3B//x1/u16preJdFIFfvv0KX36llMI8n3g6n7FkfPv2jdu6cnd/z3R3YiFp1uQ6/N8/vufD03fquEqOXHNpJiBFl6mL88tvX7m/f6TkwtwraxbElJJMhH748UfKNE08PDxyOs2Bz3WaN2pWrm2aE6f9DMlYbxsd3UZfv33l6/NXEpm7u4tomrv8Z/Iy8e7pifPlTsZd4fxapkk3cq2IbaYHlQL6CBgWH5tphJkHhJR95KmJuZDTWLxaaAOG6ggfjSOP3qt8XZzoILIONHesObfrym29Mk2Zh7t76JLtl+kivFBFYQwrx6xCLdHWesxAioaFOTaDyXSs0bmt4q2fllMsZ3nStEGjA7YWfI08kadEWQT7iPaY6L1A2+VHgy5w20WbLZbY66bvHc/XAt4iZVEiITafWF+dzsO7B37Iif3WeP525bKc2au+R/XG3m58+HDicpay1nD0IhM5yUKi7xokugmnF0Cyx+cgvPd8mdh3qd6tp6CWyt6ELuZQQ3Cjsg8CWHTwNir2HJVyYm8Z3xu9KS5077DVinnhdt243GWm6YzXF269UraNlBULNERpwu7jIvA0YHJG1vCadLAlk+dWaw23RjZjrT0qVkFhuQs9Dx7aa8YDIXTUTzl0AAPqG780p3uTwWCwt43exxpOoR0RQ8ubLMw7KXBwZ6sr7mvQRSvrehXO3yu3rbHtTqsWDghOzvDw/p7vPn7ifJkVWYwuSktFc1g3qd0Hw89rWLunY40cw/zDQUx9fI+DtlU7aMSCmF69Ad5y+geMOLqLRiMzMtQFP6qYFETXfUOakeEjJ/ianGNuIbgwHW4DA8LTs9/bhrvRkyIIkosWH8OH+NIOFLx1PFL0Rvzs6//GmTXmESDzwvgsSxSDKfPweB8dj4ty7TAt9/z+d/+a57Wxbd94evqenDKXZWK5v/Af/9N/5MvzC3eeuGw7OUsTNgpbEMtyToXT+cxyOukC83AbaDu/fv3C6XThsixi07lwlbvzhW/LC+t643I6U5YTH6aZZT7Reuf5+RuneaFoiCHGCyZOflt33KFME96d03LiZV1pUam2feN0d2K5P2l85DpASc6+iyL4cP/AKVgg+77DVrFpUiVaJkprbwLcLSojiU40DN4FW4Tieiwc0U4zp3nCm/Oy3xTwfT6FuR9QxcHORUrd9WXn/u6sTU0OpXimxqDs7nzmNC3YNAZZxtYrJRHW5q4oSoxhhNRbV+hO27FcmEthq3qNUy7Se/R6DORTmQ7OO2MmEV733RWiVu4vGkBFoyqFpjZQSQlOZ3rXwmjIGsDH8kwoR8gkpQfThb9v+vlJQ1Rdk2HFneDp4Y5vvLDejLrttC4X31w6T0+FuwuYVSYvh/sklqiulC4P0RM+PicDZtV0Dqk0nh5PpJQpZnTfoM3UnChWOVoGz1TPquZNkAZdnU/vMjNTm76w3m7cbhvNNSD3NpM949Yw9N4vp3taD9EXmeoTvTWy73iSIK012UhgTnajNOVrWBO0R67KWCEIGJ6p49QnYZFmR5mQpbuGnSPKclS6A+PN6Q27xmVBkQNm6b3zGkFlx4XQrUPbXwfW7kf31lqldQ2oE3uIMCfZLTRnq01mgNVo1WldA9nTHXz88ANP7++ZT4scDzzJ5LFMTGnShW9iJlavR+TuYdcz9Dwo6CeFQlrGfYKwu7cgcLy9DF7zyt96Mx2U0zjMS8w39LfrwTDE33wdw7/MwhspY17Ipie5AWaZra2yBUHxyLiJYWmJ2mpg9hYEhERLhh3Z0wdopPcQxQGoKLLOYaioyiCG+AMCj6Jj+M9JMKx1XXfj8+d/xd3j93x92Xm8e0fJhXXfucZc5PvvPvO+13Crzuy3m7RCOTq/uHAgmFp1P9AZD2bY3emePEkTRk6UgAytJJ4e73QimLNeb5zP8oDqvbG3yul0ovxv/9v/xnfffcfnz5/E60XCMHcxZzw7p3lhrZXH+ZFUjFY02KpVLa3XymYVq0rfqvvG19++Us4zpyzIxOaJkowvv31jngQJ0I2eLAYoo5NVG9d2KaUTIf9H1Lm6N3J2YKa63BDdnW3bFI4xT2ytUcbitMTDWUMaeYJ31ucr6f6OnIqGklkc97YLi08506vUjN50ebkFPa9WRm27d6dMs9SqrqpeKuwmR8+UICVOZToiNxkLzqVHEYdbw6ZhH52jB87mIQwKK2ED0qT2ta7HkVKjEhMdUsImSykwWGHVk2WS6/MVtbKTcyJnuFwK7gtfn6UIfrgUypyYZl3iyUtUXZnZHLyBBSMrvRE1WcJH9xeEBLfhscOR5zBYS8202ROvWH/Do/DQhV57o7cUuR+6HluzqFu1SXqXuChRyEWuwK3BvNyTTBX5um6QOkspRyvtWGgMqp7VrPlWM1NQvGcKPeCXMWps0uAMLyHveK3SN5gjV1aJqIaNew0gYrCazbpS0XzDPSPJsGy/CdO83ozWtjgKVdHiYcLXgkrrOqiVJqdUNHdNQveaNHNoUKu6rWzw/nHm/fvP3D3cs5x0Wcrob5g0FvI0xwUdf+aqjLu9ZoSYZw2uzYEiJmKWUeO+rdQWbs+MX4MYLFbaKw011g39gJN4Y4JH2PQwcH8joD/ReYfuyeOmkV9bCiPRHsSGyM7oAd0OWj2v5BgRSnQh5K5M8d7jUkgjg2XcBK9TUjm7+zGPklmp1kVKmUHuTcRfC5isVWe+fODzD/8a95nqK0s5cd1uZBJbxK2ezmculwfavtK2ynw6UVtl3zb+9usvlLIotyfXAwmw7HhK1Lqz3W6cLhfFSOOsvfHv/+N/ZDktfPr+s8hKgzDgndvthbvznULceta5+unHH/nw9Mi6r2zbyul8Zp4WNr19piSV9d4am8sf53Qq1CqbgJQyvSDRWRKFstYGU+I0z+Q8077elH+dtQDzsgTTx8muQyFnMVW2fWU6z0zzFIFCk1St0TGeTueBAurDtYTlaO7D/wg4sqStGJ6NtjbMdjGowuGxtSobA4QXW/hsmBnn80nU1CzPHqudXju1dwbvoSR5wQhTyFTEkhgDR0O4575v8unx0IMkCdUsKJTJTFm+wcTYEePAUo9q4bWSUX3ZsJKZS+F2u9HrLpIBxi9/+xvn84m7u4ugqHkhRYsJQvCky8jSOFhinibsQQNeUmXvCdhISXi3KlOD3SVqKyVMyoYT6Rj57Mq+ji1oxuur9sThOpp6EA8ylFGrDf67IBELi+lWDfPBVGkiTzTBQvu2g52Dvih2WfIcSm0JljqJVuVinFOiJQn1Dsfdbsegtm0wlyQH4Z5oqZN7qHUt6K5JporZRw8nuKc3J+UYvPewb7Dg4sUA17rr+5g6iyOXPSCcoVh2brTeaD3gDc9hzqdigr6zh/lhSTPeEjTZlW/7Tm9bPAMdjHWH6QSfP3/m4fGOMs8spdA97LxNaELOE2UKUatxDMbNMiNB0HrHkX0PVmIviulDd+p6ZW/74YhKMOlGlFayt8RUAi0YVizHFPjvRaWRLU3YXQiyMWyQB+xVX0GwpRKZitPbpvkhY5Add08otqXDCiabSTDXU4eeNBc6XmlcWv1NR5GyEjnj/Wl8laAkrGo2YdmCLBObpKvDamR+9+O/xtOF674zmRhhGbG05nlmnnN0xlXoQOrQRXzZuzRYaers25Xl/EClKTgu6MS1bvzll7+wfLvw8eN7Ssr85//0n/if/qf/F//tf/vfiXzQ26GfOd3dHdKFHNCs90Z59/SIm7EF1DClIh+c8MlJ0djte+W2r9w/3JFLYV1XAEoptGq03EjeOZ1OLPPM+bKQLUVY/TvoGuA9Pj3S9hqoTdzKyaiWWGxjmYsuW+NQLWeTTD9rVSnBygwz57ycj49wKonaGnOW/bY8X5y+dkrQX1OHvJTXIJpspGbKGh4XS1Ps37pJQXyaFjxLmDKXGLa/oe+NBTRHBaYDSut92ytt1wC7JCPZRG07W6vMucTT1dpOobBOrbFmWG8r3hvL6S42HOFKaWTLyqYlUeMSbQg+okxKgYwB+qiEeteyL2RIBds628sL7VRYpoU5m5TGbaP3YLr3iu+GLVnGhNHdjAt65HSPN2ExuPVghRAUVmHBgS51w5Jo10qkkz12jzOlMeliQx1UbY1933FP7BUdhF2iyOva8d2BLMbdutO75gglQy6JvRmlO3nJEgxuVSmFyFm3mMkULjkv3ZnNRQ4wY29NDqcBx8rGXfoYM1Xn3lt0kEZnPyAvQQ+CFHWJ7Ud0qeI1R3CN6K0qrsUE0sX2QkpzHJoqyFrbaU0FkVfYg/8nq291DpvGU7pkHOY7+OHzj3z87jsdh6ZuJpvgNTkQ25FtYDEcVMJbQEIKZWHvmssQQJBhWMq0Vqn7xhrirQ6Ya3ifLDQE9lqHixkUHYDLUwkP8Vp0FsmMwVbS+48CLIY8Zq+RvSm68tTDctsIGqeswV/DuSJ9ztStaptYHP76xpp8ylrdyOFwoi6qe3s9+LvexyDeWEBNGbk7gIoNqYDVYbS+03vn8vADn7//Iz5f5IKdQ+SLyx6nQp5nfv75n3l+eeHu7onHu3vBxt4hJ15uN15uNz5/+oGyzAKYYjNmjJxncsq8vHyjlMx5WVjOM//3/8f/k6fHR7YttEIup4psjXme1GF5sO9wSh9qUVPSWGudrW3K7UUspLp3np4emfaZZVFle1pmbuuGmVHmEipV3fqbc4heDNl+NO9SCAYFy1ImF3nqzJZpc8NPw8Ncc4m6V/q6Q0YHlENK6WjxUi6UKUfW7Cuim1OwPYjQjykGhRbioIqUdWaB0yF1qFd21yG/7iuEKI8x0DMtxOE/pM2gnyq6nA66vUVSlgmPLueZIZ6KFc88JPp/55tv5GRUg8UBJua8iC2TLGY4KbBS/Z7NiclVCc45wXSBrkSv5h54OqGZiEWexDz58vyNtjfup0KaDdLE5MoxSEDusvAA2WhIyOYU66QSfHNEKJC4cGLkNOuw6ZBEEBokAlVtHJRP69JatOgs1XW0sLP2gFK6fH5cQ96eFHAkXH6ntXDzRd+z953bLvM+gs5IGhDcjnmlZKUeVjrdOvO8HJWvKNbEjMLFZkHQWcJZvIf1eFL2hVvAd3qP+ijH92o6fJugm57sWKuEAaMcpHsI6RK0nd6HpXcNYZM0R/SodiNYq7YNwqyyhrHkyOOZJnj38T3n04XHpwvzLBM9QTIRuOTyBiqTCgvvob5vHFh177r49raq8ja1GR5FkrqVlXW/SYOTSkCGGjw3LLQCQRO1OJjFdX0zsPagl74Kboli5O9+OQHT6ZLHoZtEuDuNKaxb5L5MrBVh9ClFR6iNGJe59p57pLkhCI2c8KiI3PXlZgkLXZLHi4nVQTLNS3m71pO6njElgIJb4f7hD3TuOM0Ltb4A6rRr3cO/rPPy9Vdq09/f1hfq5UzbdvZtI+fMj59/ZF1XFdbu+vfTSZdt75zmme8//UDrO3Oeqb3xtLyn5KJirItNlvPM//5v/2e2vfJ/+e//h3haoUszU35QNuNuuue23fjHn/+F7p0fvvuEpUxddyqNy3LhfDmz143rdsVyZp5KWBVrCKf2pzA5pGXRrdoimaobXiwse59UMUSl0012v1bkvLrGIWilsCEaWwpsW4lnajVySO57bWKOuGPLLMm8N3nLFKPvG70lrKiStXng5Q5bVHOzQS9MSDi01Y2cZk6nhSF4SslItdKjgspTwZoGWa354dJKb7Qku/E+TceS8cDPyzQfNUszj2F1VJsKncAsMeeTXDZRdS5UV/yulGR93FrH5hXfRa1LDj2Fi+b4qLU3ZUvRZSFgXUy0h3cPnO4u4JpRtJ6YbKfv7Rg+dwTN1VzJLcUowENFrIso2SQs2NVRAhLpWQ9IBVIKo0HQ5KEnPKsCsyY6bCaqsz7OA8E1mcLWh0VFZruqm/j2vGFIOVr3jZIEHxacthtta2A7ZZblyNwnLEmn0FKoXa0ylQWbhfvvJgbOXuVjJYNJY2Rt7J6YuuMBL1l0SB5wl7u6LadSLCn3o62ilYd9zEgnG8201myPWYD+vMdBl1KhV/kyaTC8aUBdI+zGQjUd8+1k8O4DvHv/Oy7ne06XCyVD9xpd3n5QTXv3qHrzoXQ2d7bQwoyVKXqq7FYWs8NZoNGp25Vt31G19WYKYZAoQZlXMfpqok/ASMP6Iip7G9iMDk2ZBb6+jgH6pxQkg3j25pGt0juV6CYAUsZcmopkPZr+GCr7cDsQnJbcDpp6e3NLpdBPjOmCLvgYpPOqEH/LoNIHEfLKLmZU9xX3xHL+ntPpM799eWE6Px5oxHBHTkn03+vzjbv7e757/x1rrSQ3vr08Y5a4O19Y143T6URiorWdum2kJDuel9uVlDJ35zPuC3trlG7a403d+ZQSe1UezZe//cbX24so4MOhNor+cnu5cXd3Yd93/t//+7/hb1/+yrKc+PHTj0yW8akz51kayu6kbuzbzr7vnC9nuV1a53S64O6vs4p51oI2vaBsFtGkwqQTgmK+ff2N55crXiufvvtEWmZSFe7daseWRSFAJq+mum5H9OX4VS2RWpMKFLkc7nujJ7WOVmZOy4yZYh/rXpkndUTtJD1E2/QzPSXSlPlwes/tdsWSuP8WWDk5Y9W4Xa+cwyXVs5GRPTkgzD4ZlERxjt83YkDeG82MnFJgo+omshk5TdysKT86FSm743JxUfyP5brXjZQSOc/0fWXddlGNS2aeg7XCKx+jeecWANfl7o67x3uSFVGJuzbw2pzFzlQa++40r2R/zf/QaahBq1dVxilNsci3Y8CbzI4cEUuJxjgYjWIWg30pX3XSilLcwiOLFBvdkdnS3vEmGwos0Vriel1ZXza6mbKhvZEnmIt44LXusd6MqWXyLJsT3ZX1GKqaZcEoO0qr8wpFmpckBdmh4cE7rW/0bCxx2I0pVR/4dAr6drjKFjMdLp6QBkJ/S/TVDYbbaK+QTrRueNOFauEQqyVkErC1jtnKvodFk2kojsva+/vvfhSt/TKFeG34KekiECVVt1o+JkLixqtLhcleze50oRglaW0L1lHnsm8bW6uCoA4dkB2HvKegp5oJmouPW+d9DuipQS7k14EBPRTiGmQrz0b6LfAmuDIRF4PnqOZTOCBonemCceWNBFSkg3y8Rt2oHaKrG+zDYbIY3eeYbkeRMswGBan5m709TqR4311nkfarVPGkO95/+DNlOvPLty+kKXN3eoTk/MvPf+Hbly98+vQjd3d3fPfxe1LKXLddM0Q6p2mhZJ0V57s7vn37yvP1mVIm8iwou5SZuUxsdee23WRhk/MxNkhJIunaXTAu8D/8j/8j6y4obNuugvFzplenXB4ueBPdKZP49OE77u8f6b1x61X4PtFOuzb25XRiLfqAU8603iNEQ4fDrceGNmkkMoA7v375yv3lHKIOOJeMc8++b3xbr+x9ZzFlzpYysxTjtlW8X0klMy8Ly3KSonnbsaLkttz7GCuBGc/fvgHOlATzLKczU8QkNib2tKmCxSheaCmxupNKUtpeKpRlxuLW1UwErBn7beN2u6qNrpW9O5f7O2xO7FuF8Kg5UquyQSmB4esltqrNVSwxnwLOqYIlXp6/sm6V83eiw5aoesh2bHAxoWCKLm3bZZa4e2f79kytlcu0cHm6V5KfSbex7fLcMmC+LAqI8rhGYuHnluheeLGGe4Wu4aJZI1miFKW4GZoZpUHnbAT+OzaEKzshDjfH5X2EuggJtl/x8eSJbjV6NF6x4nHBmWG5qPusnXnO/Pb1W/AUnEqN59ugiB69brLWnuYi3wk4YL0xM0hxSbTaoSvekTRgER3/qUY+R44OF6fgbJ5Fbw1YT7AklIjKrL5TKtSgPTYTzONDsdsqta54q1ie49DZGOmGIj7AuupqNxfkWOPWX1eIbcbdPXz48MSH9z8xn0vYvAfMYXZciDU+x+M0ToTlx2DwBP02oXwVezNzYpJdOhvbemOv6sSylej2MxV1djbsNQIJyPG5yuYtFDUtaL9m9FY1KI2zgsDErUtFU/KkYKMWBnxV0E8PmMe7HRoAkgRkI9hrvNUjw4bhAQcew3FC6yO4dLzudBBmxhYZ9u09POZ6DJL1fXU5DsqUo2fYUHFTKXz66b/i/ccf+OuXjYTx9fmFaT5zKUpi/MvPf+Hu7o7pNGHd6F7ZX67ky1lJlUXPa2+ab8meaD882A5Dw6JLd12vgHE3yaLF0XpKSXED5/MJd13+8zzTXNTw3sXSM3NKTorrXJaZ/+a//a+xKTNZloHUtytrypwvC1ORj1OtO2tVzuq+q4I6bCuacMplPnHbb4fqd0qZ9XYlp0Qp5WDiuHfK/T3TNHFaFs7zDEmxoH2vpGXmcTmxzhN7bSxlIpVE33bakoLuGB8Y9hpyf7mQy2PgdDeWacJKplWNHHM+y8fFYbdG3Svny93BzHKD7JlpmbhtK96kq7j5jYbyvM93d/z222+hhpxV5Vsh53xkVDSUQDVse91hW698+fobrXYe7u95uH/CcPnHd6ecGqezFK1b3bm/uxxzj2xAmVgWzQi6y7mRvjLnRYSAc+P55Ur1zjJNB1y3e+M0zay3mz6j85nkRl1ftJm6M80zUzkzJefraeavvzT2jbAeCRphMLcaSQphMzggpRRiP5EDW9cMRwlj8vCCJMYnUrhqHi8X1OTDan50S6+7M1mnZ1Vnvct7v6TEMsO+VlI2coKpLJQykXDqfgOUm93bmGPYm6pPM6kyMGt3ufBSlJIY9ho9ckxyiyzkhFry7mKUkKSJsaG0F7TiFNpgdKHOpRnBKjJaW1Wpu6xHrIu/P54xwL7vtC7xpePUGrBSsJbmE3z+4QOffvgd06Q9MUDA2nYs1uQg8BbT0L21JgM/H0ywOJiNOOBFM03hm9QH/OdO2zdq3cALyaaYv+h55DEUNjST6ZG0GK+g9gGxbRSTjiUf2omYITVdEN7BFcDBbbuRppm2b5QsJGFYqg9DxDEPk61POQgD470rE0avqbuIAz6oOZbkf+WDchw3y7EedZb1sNIJb6FB3oqfkSGNac+YTRFdDKT0yN3jH2i+MC+Zu7uJtlW8d+q28cff/4GPHz7qPbVObZ1lltHiertyQ4Lc68szrXYulzvcG+d54XK+UOsa1inwfH3mn//pn/jw3Ueent5pzhMXmlti3SvTNB+FmlcxJpM75/NJWTutszsUuvPzb3/j03ffiXba5Ez45ctXelf4zO264heTWtU1GD5wVY8ByKi2o+o92Ylaawyh5C744bsPtE2HIUVsmdI7DyVzNy8HXubZ+Ou3b+y98v7dB2ZmpsmP27qcomtBStu+N5a7M7ge9um0qFr3zrKcmE960C/Pje6V0/mkRLdemdLM5TTL3hvYm/BzdwmQ7uYT3WCaJ+Z1gfumOM+pcDqdomNViExG4poW1gZEALwnC8y9s15VAb1/98D57p4MTNNMtszaO5/u78A6a1zApxioJhdsU1s7Bk/rvtK2lRUjl4mnj+95nz7y8vUbp7s7Skp4cyiJfa2clpny8SP7tnF3vuDERXQ5s7XGMi+iPBepo+c88fNf/olt/xqRqtCrXH81fPVQ0AasxxgFGi08s5wUJHHpF2QjIiMKM2HScqpsh5WBHxtThmzETIggWbgbtVcopo12mSgmyuABSYj4Lhv3Vd5kudwxZ+j7zjKNC0OkhOHbo6wLaTJKMll/p8I0CQYrCIYSdCHzRzddDNkm8Epz4fCi+ppOdGS6N6C/kWlCYO6tOkP5rSFupvZv7JsOyx15cFWNU5gKfP7J+PTDf8n9wxNTiW7NJEilQwr6NIyquovu/crr1D5KkfmCDhjHaHU/LgVBMLrI9u3K7XYLlMBe6bsm/yQPKFF9SRA7xpe48iLqLmi25vA1ir2sBEZZ9McrC8hRQVS+7mzbhi3Tsf6mMmmeE8r93kVnLSm0KX6c4rG2Ks1cMa2jo4qT3qPjpecYVkdXYZr39bhIx6Xy9peIKbH+OypoQjtB39jazHXrvP/thT/+7gdOJ+nS20nM0q/Pz1ya3KRrE/3dA2YrJ2XAf31+VmfYXUmL3jmfRAHfe2ffKpuv3J8fud2uEr9itG2nhtP0NE081519X+ldGovburK2ynK6yIYlKcaZLG1VuW03bt++8e10Zp7mqDAS9+c7rrcry2lm9xoVe2NY7u7bzrZXltMcA0wGoSOqp0gS2zTQvq03Hh4eybNwVUtSdG5NQ7Rc5HOecqfkzPeff2DbVGnN0wxZAfSCJwyvlWmaSPvGBtydT5EvrQHYvt5Y5sL5fGZvlTJlTpczvVXmaabmyvaysV833j09SLvgzjIFBc47ZE33UwSYzHPkeo+BeVc0qFnCWmY+TbS9MU8TW18peVjuGl6ciQIP93z//j3uMhCc54X5dGZ2yLcby2mWZTe6fPq+082ZpoltXVHSWg7ox9imidP5rO81CRq8O99RzHi5veAY9w8P3D2+Axp2p85m+DrJRmXmkuRVb+bMFG7JeXr6xPl0zz/987+l7r/httFNaYKCHSr0oHtSVTzEjGUwUlpr1BbJZaVEdVkpNumiQENx/V1dBkEYDJhT394sYSYAfioZToJsauvc5bOqu9bxHL443gAd9rlooE6vtN3AdmYmPHmES4nnnlCyITgpOTtSxqbSlcGeNOBsvQ9EOrhjDRPlQYexZLiCU+qGaqKzuj9aWOirE6x9j0zqGjCIoIneV7ZNkEjTS8ddncO7j/DjD/8F758elJIXHdmobhNGTzACewQXxmAdFXODRECXTkGFSEfsDn/F53F6CAOvL1f2/SbYL4RylsQM3OOQHBMOS+kV+oeDLu4uptm+79D2IGaom0xxcR8VOIB3aSiI7Bk3Xm5XWq+6iM5nEQLoocNR59C7BJHHPC+J/1+bB4VXL94teHFqaWV5giQJntQd+ZtbVTOKV1sVhQjZMDBWkcjQX6jLaT1xubzn63PnP/z7f+SH7/9Azsavz184nx+kifDC9fbCfbkP+KrQXIzRfZdV0ulOCMh8WrifTwxPKEHfGy/7yu3lG3fnJ56e3vPhw3f8+vU3/vLLX3i8f6DRIcM8TTz/f7n6ryZJsiRLE/wuFKDIgIOAiSszq7u6pvdl52Hm7w8t0dLMLtHSbIPp6gIJIgM4MDNFInLRPjCLWvR6UVREupubqYqKXGY+fMDphaVbuE6z5LlU+K//+T/xi1/9irEb8N6Qc6K2hvch8ItvfyH0OkXjnk8vxC5yGA/YZogmShei1gjRC6up06AgY9ARdxUYiejI4bGdJ+WZ4PXGshEfZDQstUgyXSuMuw1dCMKHxtDKTJ0LbvAM4yjdVmnkPGNqJfQd1TjJyYoBH8ONothaZTlfiUNPiB1GLRmCc8TtBjB0Rd5PcB7fD5haSLkoxi4Plh09plZsCKR54TKd2W1HveENh91exIRVTAhzylzbhW7TEYm3nri1SiviB7Xf7aFCSYlhHHHOEGNHao04aibCLPFg/TiwGMmODtZjetiYHms8WMs8zWw7cemtBfKSMF3HZuiU5dARuit938lhuLax2qXlWulCL3sTDDklZIHpGboOZw13hzu244a//OW/MM0faE09omqlaqa5WroJaF/l4atVDn0DOJOpRthQq8ixtCxaHNDgdaMLUst6bIsOYy1BVhadAZwarzk3yi4B2fFcyoSj4XyH80Hsukum1iS+Ya7pYQypZYLVnG05txQqk5FJ/Llk0sCsB4LRQ0hZXKj7vGLwpTWFjUTf0+pCqaJCLnmRg46jfrsO8aaPqkoPVCZ9P5I3XcTVRaaaCF982fP23S+5249CQW4VbKWpZqSZdTIRwz1YJZiraZ4ulBUOc1gRgK2YiTFKsjC35W0ujZor83xlmS9ybe3PEukMNFuxRcq65F8YTJPiUWnCqiqeVBexNzOGsR9JtVLyTClSDCOCUkjnXrGtkFu7Mf5qLTKbWI+tTiam60Q3jHIAqpWJNHBZyBi1ghH2YakyCTSrvk6aj1GbPM/yuajmSKTst8luhSobKoRVF4SKNEBW6pmQZars2xwzrRmsP/Dum39g++A5vhzBwuly4acffuL+sbAdd6ScxFoIJAPHeoX+GtBw0d32JT72kmlt1SVXH+vDdksfO16en7mcz2x3e7bDyGEcwTqenj7z8vwT+8OOzXZPH3teTi88X868eXjk5fjCp48f2f/qN6Q0M80L3jt859ebtOhoJ8Ek1li8ZgV4zSYIzpPSwpIEw8wpEbxgf1MSEY33Fsmql8phrSGaDms9aZEFi7MW5z0Oy3a3Feta6zBO7D6u5zPLdabR6Lpe2EjKThK8UcRkRrt6HwIxBqECek+wjm4YheXSJKP5Mi+4zhO8J5ckJoT+XjQIRnUALr966WSJbVzSQgwW53q8E3FPa40Qg/xsa8CK+tcEw7B7Q54TSy4472Samhd83wnObQzeG6zrcGokGLsgrq5ZqLt+3FBqJTqHd4HWRaX9CuRWcmGaEz7IdPPnP/2Jv/7tr/Rdx+9/9zspOjUzjB3WNZ6uJ+62O7x3ontYC8WykNXcbLVOD7FXZllWrynDeHjkl+6P/Okv/8T5+D3BJh3O7Ss9mKBzwerH9EoaRB/CXCvWrWwX7Tarp5lCA2qz4hJKw9gg8ZZrkdXD22AxTjn1ruKsZUmJ1hL7cUQCeeW+C1aAj5SF8WSMKoydkcMVEcG12rShls/eGXN7X/A/VFbpwpscDa2tdOh1h6LThWZKpzLTyqyFT03/mhTopkvBZjytXmSCyE58lkqjJokxGUZ4827Hl198xf5uD3XCOLW3N5VKvIngWl0X8+sBLi6kRW0u1j9bu3e5TrL0XRtEY+SAs84zXa6yw2qVZUlCwHDr3kiZXbcCKtoBb/xN4FZEIYExllJFEJnmSQKx4oBtFocnM2MbLEvGdz3BVFoxZIrkVVi3Cp2xVpIyq5X7NueKS/nmGtiafH5tZYrc7P1/9hnqf1bzCm1SRAW9bnQ0zZQVhlr1O1V1TQZoTuOQb3eIiEjXD6OYRq6Wt+9+xd3mPftDz9dfQskSiDXudtKdKGzmnYiU//Vf/oXHt4/EYUO1nvPlyMPhkeN0ouTMZrNhtcBpN8KQONvObebl5YXSGunzB4buG4iyc7TW8Oc//Rtv3r7hm2++xRi4P9zJTpXG73//RxVkLuIEbeX6+VpFuFRSEgqbNYxdx+ly5Xy5kFPmdHqh63q2hz3eWFl42UZBLKhTqbx8/sRmt8Pa7qaGRgtF0/Gumkrv5c8bQq+bp4WC2E+YG/PBEKJn7LZs+o5Fx8h5usiImBaa94QYxNWyLJhipSgYS8qautRJtOeSF2IXCEHM2lqzBOtwIVByIVMlFtT0lFaJXaCWytOnzzw9PWHu7tju93jvuF4vEmITvGz/nbBOUlEW0bTIuBkEZ5cOxDF2I7WvlCUxXSdiFCOv+fmZ+4d7un7gUoTPvR1lX2CKsCNO5zOlNg6HA7UuJBfEFkNhmBA6fvXLX7O/3wlpYLpSciZsd3hTufd7NpsNpRhdkwjbqZYih3lrzFMiDgEfIqs9dckV7z01VbrtPb/77f/EX/9tZL5+jzFHfeASVEex7vZQyRIYhE0my2vbECdePUCabeRssC5jK3grcAvWKr88S4dn1u+3YIzFhSrdqrfQkuhOfCPsenGjlSUQ1kvB8sYSgtMimCW7YAW0yqKccA8lY31WEear31TTKavWhsmVEkToKYxIgankeJCvl84ZTC20cmXJmqtBpjSLpReRXEX9nRZak2S8MhdSkelks4X370fefvEt+81OkxqhujVzTZX3slJVqEUWv+iOrDXkOhSdkLRLXp/NhrCaVHd8O0RpMM0XrpezNpBZYmytaoaautZae+t0MSqG1OmCZhTE05RFZ7DJ6bOK0Fi9gGO1SCJjCBK6Za1EnopduuSAr4FBNgRsLqqxqRgNFzIY2b8pqeBmALge3uu0cMOxQIq17BrWol1auxW5W662ft1ts2GMhHrps3OjLVKFuGAE8s7FMAx3GHsgZ8fzywdqbew2W2I/8O3jG5ZlEYjfWD5//sTQj8RRDDFX0sBm3DLPM8eXZ3IpYCzjdgAj6IVz4q57ma70w8h2Owvb1BiWvNBpMRuGDV9++SUhRILzzFmeqY+fP7MZBjabEe+jZqIblmUR6KkiI5kNTtgGGIiOdj5TkQNz0/aUUnl5OmJ1Qbbb7/AhSPD7MjNut3RdR86FWrIEXzilM5bCNF0ZhlGWWZIywzJPOOfoY69mWDBdr/T9SNd3ot6eM97JjbkZBvHCd0byrbuIq/DThx9JS+bNw6N0+ap8dcGTloUuRPbbHamJWjEODme9HCpNqL0OKXwRh8ezsGCN5eH+ga6LgmMax3a7xTnLMs3UXOi6QWEJOF4uXKcr+/t7NruRz58+8fGHj7jYs93vGGJPcYG0FGLsJSehE8w51crYdbIcDdKtWQ/RduIxTyMET1WBU4jxZsj429/9lkrler0qzznT9wOd+r/AqgQVqwVyodVK7Dpcka42dp2oYg3qbeUxRrN+KbRiCX7k17//Rz7+OPLxx3+hMGHrBWtmIMrfkcdInUpVyKR5DaVVUi0EK9YnrDi1a2rHsOCap9WMC0Gmilt2sDyEoFbiVoq9s020DVZUujXPGDfIZErBaaiS0aNE3NMDkLitK418TwCjDBg5TasegkI7bU3S2AhR9gAIpGHXZqhqcUGU0LlI7ohY0jhaXWjWU7M0Q7I85LaQdhZ2O/j6q0feffmeYXA31hZVpmJnPM0sUgA0c6GtNOW1AwZaFXv+18lFoChrDbUIrVSIqWv5kJS56XohpUROQte1dCKGk9IpPbUR3YvRQooxN23M2iawtkdWmqmG5H/3YUNJVRf1GW+j2Fo7h++kCGWltFvrcbrnS7VRcxIGX614F2lGF+RW9B/NqrYCndKa+A0IQmJvC2p0ly4iPyRbZt1dtJ/PjRZWWxJeNUdO4bQ1rU+XJzScTCc3mKrni6//A6beSWPTDN//8DdO/YZxN3I+Hcm5sN8fMEFC2na7PX/3m9+RcpJWpMJ1nvAx8M1X3yI256L1WmHCeVmgVYZxpMyJ/XYrTLAlcbqeSSmx2+2x1vLlV1/Rx47T9UzOlc3Y83B/p02D4eXlmXGzEeJEyvzw/D2+KuRhgZfzmaHvsNbKwjcvBB8Yuw21irldbUWWYkYgiuAd1UeBQVQ0Zq1AGrfKe1ucNZblyvHlyMPjIzF2GnRTVfiETBTOqIBOdhDO9QRj5UaKnk0Yb7uATOH+8Y3QXEHjIpWWm5PUvCjvyRsJOylFoJS8JEyVwKVSRFRirWfOM/O8cP/wQGmZUirH84XL+cSX33xBNIEwSjKX73ryPHGaroL1HXY3Ku7L8wt/++FHLvPC6XziD3/4O2IIvHn7BoMVG14vDqvzMmGcw9kVl1UqYYUWxMHxWhLBKV3TWqrkz2r2rqHre0quHA5WbaMFLmpY6cSMHDjFW8GQVe0dosP7wPPnJ67XiceHO3BidbHkIgUpJ4ppeBt4+/63dP2Wv/3tnygzVCt25FWXn9JprQ+jxSjLSezgG6WuiYdgasWpa25xjWazUGPnWfj8bqXers2a/R+Wo7VWjBdFNjRc6BTake6sIl47quaT2FX92pXDj09U24nB4ApFVMF8TctSLOSmotSFtrSb4Z8zSnkuQsstmschsENgldpVvb8rV9IsKEgW2Juug3eP8OXXX3J/f48PlmA6gjevBABd+PMz2Ko1capdhXGixFdM34KjKnVVp6K27g2lwMrUJVY1uVSW68Q0X3UvIY2UFEpHaTITmGbAqj6GJhh8kymimoojYMxqzte0EM0YA9EFsnWcps9QCpuxx/oOZxrWBpyyrGhCx5Tmy0GAYAMpe5aUydNV2X4AYkkhojtJ/TNW4CCx69cdi2k0NUlsTSnMWZqHorsIyTIWo0JxHF71ELKTaFaICeLttAKIr79WSndtjVQq79//Ow67X1GM2ARZu+Nwd8/5dOLy05Wh79gd9gLblcq7d+8oNI4vRz4+f6Iftux2e+Zlxhjxt6tNiSPekuaFGCLBO7LatKdaWJbMxnmMM2w3W9U7yM7vehUihXMe7+T9pZSJMXB6OdIp/TXnhcfHB/Jhhzcr9IOwRk7TTDBWunQsNnrZ2GcEF2+BrJ1BqrMsF73VzsZQWsMFOUCPn585nc/cPdyzGXeioswQQtBRWJgUpVSxRTDyZ2jXaJvBRVnCVifYcwyBp89HnJcQj9Yq5/OFsNtjgpfORbuapRiC95wvZ5bLjA+BcRzwum/52+kTXQhsu47T6cJ2M5Jr5sNPH3j69Jk//Ls/QlnZQNKtlVSZXZKQjiyv/bLM2tVDnhd8DHjj2G/2WPcDY98TfVDqsCi0c0qcjif2dwe8D2yDXJ9S0u2mK1ngoP1mK4lqmjlQFV/MOYmjbpOHUg1D6Yee6Xyh1kYIUaX2Itbz1rJkSduqOTEMHbUW0jKz3W/wQeJk0SITghAO1q8JRh6QbvvIF18Hnj9/x/npe2w7I6E0ehhVKQ4Y7f+r7BVyq3gDSRe+Ailk3Q+8smuk2aiixbCVVoTNZK3w1KUByaK8bZIAtyKc6GK2OovN0IrACKZKvsmSZeKKvifGiLEeT5ZDTndSBXA13ezNpTsX2msxUFS4W2oVlXGDWj25itdRawu1itmgKZIrXNQCIRWwDg4HePP2gfuHO/Z3A30Xoc1y6DqnRUZezdop3/INrNCMVwmXtGcyyZQmXXcIo7B1lExwcxtqSGeN3F8lCYEkJ/H1WLObMXpIrvsAJKO5rjkYK9ykzCAxuKuvQr6m7Jsiz6ONATvDck38+OORX/26Y9s7OXhV7Q2IMZ2+TuEze3LNQpbxET8Ixp5zledC76/a2g2qyrWqBbbAYUYFfw1hH7VSqOvKTD+/tsJ16hArJn+6lTKa3a0sHadFsjRZXtimQVlqsjhs3/Pm/W9YlizXKUoDfLff07QQ3N0deHh4I9ByEy0FVZhM+3EvP7cWDvs7TK0sJd8Oe289NmqT1Azn0xOTDez2B5nAgFxE0CqKc2ELbjYbsZKvlVQSXewYhoHz8YXrMrHZ7QjOc76cAMna8RgZH6d5omGY55nsHCEEcoOQ1/jEBkU6FbEJyHqRVaDRmlpIQKuNH77/gel6pgCb6wjDSFkkLGO3P8gjnhK0SogdYJjmKzEKq6aVSnOWsiTsRmCqUiov52e+//FH9psth90OY0UsNFPxJXMtlegd3nkRshlkHEMOPIO4XjproTQSmes80drqqWTph4Gvvt3irSeHgi+eh/t70m4rOd3Wkm0kk6E0fBc5BE8uhXEcSSUTY+Tw+MBvf/2b132OFSM+X6Uoj7stwXfUUilWKYTGU2pW6EImZB8iJheWOmFomvDWGIdR6W9KDV4dWIE49rRSWJaFru+kK2sKw1ExXSAbbmIvYxohdITQ3ZaVxsiNKweqFO9UFkxtLDkzbh/Z7e75mx94+fBnaCdol3UtQW0O05zi3nLAtmopDow+9DgPJmBJap/dWI0YS00058UWpCmOXqvuuqpOq073AVb7uxWJNgpjGEyxOFMwine3VjRpLtPsyI2ZRZMdC6JDyWsuiCp15X1ZWZ7mypIztIptC61aalmVvMICzBVVclfNK4AuwFffGB4fH9gf9nSdw/pI9AYx2+uwXui+WX2FzOo5pAmDa2NnVYQovyfNQ64FauN0fKYfELwadSZtqgk2Tps8SZ9McxYDztrIJckOxahxpma1Ow2yMm1d7coCX3gfP9tnGDUt1GU9ukBvVXcqHnw3ktuRaZkZiiy914heWbbDkiXMy5ZKVhTCWmHPGZD34j22FCpVXY/ldZXaxMq8Ftl9qC0HxaippCrkdfkrueECaddibmJHEJPMFU6uCNvIIjvXleyBPj+1CRQW/B3b7S+4XhvengndSK2B2hZi1/F4d8/hsGe32bGkRWj5wyivqzSCj5ioQseUWeosQkAU9DKr5YjkwVglB7ycjqLnCh3NOmx0Yjx/vdIwhL5jul5Y0kLXdXRRzoU+ROZ+4N12R9f1tzPlp+cfmHYHvNEC8f2PP7DZbtluN7i2KguLPJBWUtZKzmyGgdwyc1pE24Byx3O+3TCtNu4Oe3i4I/ooCXEqDrNqACb4sMHFXqI8i0T85bSQjdVpo2lnKqdlDJ7zMdF3HaEP6qme2e12yoARa/C2dta2EVzg3Zu3shCl0YVA7z3LvPD27VuaaeS0EPZb/TmVw92BPnSUUuhcFAFSg5wL41YiCFNK9DGSXBVjOe/Z9Z0uuIR9FW1gWiaWknjz5gEXAqE6dXaEzot3P8ZgrMAfzVahyLZEM5LnkddQmyRLUV8rXq1Las14hO2SlJl2Oh45HPZUY0hZvFqac9Qli6pSw9hlOrK0FgTnb/Ia1mtfShWCfhUb9S5I7nFplW3X6cRgePvwLWM38vGnv3E9/zPOzWAytURsG4SJYaVDs2qPIkmGoo5tVpTP8hEJE8cZ6f5eXVhXvx4JyKxVmTmsHatoG0wzr2tYZUNZb2+u3ILx74md9I24KktmGngNrDdZm2OntUiCj1ouoFPLkgo1S2dfaGr0CKkmyJAqt9jb6GB/D+/fPLA/9Nzf3eF8IdcJ5wPRecGsbeTVPVbLXSsqwtMFvu4VaAghpKKw0drtS2HvNztpHIxydoxRO3M54VNK1JrFVbQkHY3kNYgth9p9S4ngpn2xYkdBE/zeaid+223czOGEnWXXotQ0xY/C4bDnj3/oyelA4L1KAAEAAElEQVRKyYsqwmU0a3oQtyLU5uxWtX4VUoHuArD1JmS8QZEKp+WmU+VNyOkQDeEKv2k90xcqFFj5PqVm7UdeGWBtNXg06vZsV6ZYYzUbbC1ByeTa8/jwNdDz9HTkcPeATYWUE10MOGuwIRCNp5Yk+esUTJFz6zyf2ZgtlEaeZ3AOZ4XCLzCuZSUdoFu1agwP94+M44aUEpfLhXEUoO/D8ZmX5ydaLuzvDwzjVhpoG3DO3tYI20G891bSyn674+OHD5zPZ3ypst1/8+6dyMPTgvNBy3IRipkNpLRIIRg6RCshSVwpZVqVTN1aG30/YoPFZ6HLdv2W0+mZZmTZXEvGNFkUdf2A1dHLB88wjCxLkkLSkLwE5zlfr3S6K3l8uGfoIj7IxFERGMpZCROyaq1Rahb6qIE5ZS6XI28f3nDYH4jOc7EX2VUEz6dPH1gXAK01scZt2n0hFy7nhPdWfKeMpxhJtAvNEPwoSXutkVoj5QVXFppp3N3v+fjTRxlJTSNTyUUYXaVWoomKSTbmPGmUZCP6jlQWvUmrhAVhZNmZGpMx7DdbTDFMNTF0PWaZicEzjiMWYWC1KkFHpVbJzS4rvi5dJsaogDKy+ho1MVaSwtVkinStYoLF1ib89yrYaAWa94y7t4Rxz4e/WI4vf6O2F6wpFBKmeVEAtzVnwOuiVxaoNatxoTVkp26drAIzxPitNbCO4iSMR/DfAKv/EwiP3oiwqVWw2QldVh/09VCjNbxCUyYbEpMUx9xhnEzWaLe+VMHkLZYyL/J6m0IZCPSSk6OQJPlPi1FwsL2HN4/3PDzs2G0sofM474BFQqPY4VzEGEnAk0N9PckNq6q3rrOAUehH+f3qB6yHhkyf1shriT7q4ZzFMgRDqY2aRYeT8pUQerV1kaUrbZ0YlParFgqSQGdlz3OT6Okv+/q/pcMX1XWtBtsKxnmxN0G1Gs4SjcW7nhQCP7cMX0/81sQLLlfJQa+rHbgWKoFmrMYuo42H6CjWIlNr1d2oketjkWwHKlRzW8Y3LRC0SimGtpIB1qKHTGkYp1G3OqkafcdtNQUszNkQ4yO7zZeE4Q3DsCMjaW+mNZZsJI8G2Y/UUrBO9FkyRcKm3wpyMy8sNRGMIWUxNa2mQcnM84INns5GvXRVp16HCZa5ziKEs56XpydMgzD0xG6UOAgrgWVrBol+jPIa0wLG0Pcjdw+PfP70USiwwQdyWrimRQVtTaJGj0f6vseVLPnAXsLa145EVjcVrCWqPNw4XUrWyvHpKAexMTi/HkJyC5YlU0NjWiZSWvBBYk5XOMt6Wd5hDMG+3pYuRi4/fWDYSXKSsdIVrZ70xlhilNHYWnGFfPr8CWg4p7bdTSadECNd3/GLr75mmieu1wun05lpmkg5UbLAEtO6J9CZ1hhYaiYWQYStXam7FlNEkHQ8nTi9nPjqy6/Y7feClVYJPHHO4aq/yfzRcbYsieuyEPtOrptyrksVIWDnItkkfAz0QVPEbIFJpoU5JYJ3bDcb/TseOzrFI8GrcVt28gBUqiiMV11AE7jHW8vL5SKsLhdwzjBdZ+r5SghRDypVQhSBDS+zuAk/vv8dLuz48PlP1PSEMQXLgmlBBE/I8k3cb0GWyEVUxdXgsVSHjvwF4yy39q+IvTwNjFWLiZYQ9581Ca2qpYL+tfaKowvOLIdqBTyG1BIlizdSouFXSqhZMw5EQGdboVZPWmYamSynETXDItIOug4OO3h8HHjz8CX7w0gzV3woWJMlydB4MB5nI16fiXXyXe2txRqhQbMKZ0jlkSnKkJrkNcuCVqaLFZo01hBClPdqDDVZcjHiXFvENSGlRRa6Zrppo9biYwygqXIrjt3WQs16PdcDVPtZIzsO52QHYdYPwLzafa9abHGGNqqX4hbfW/WQN1aX8GalmEpToHJN0U+sr0X+Iimrxkvff2uoDYnAXM01bDHaXNhX+5fbUkLtU5oq0nXv19A3V5tSa3Vi1eOo6dRqG6RsMWbDt7/8B7r+DQWnRavgQhDBqPoxyQ5G4N/j+cyHn37il7/8tb4eDWmyls4J67MYZV9NiXO64l3At0Cz6mhcwRthcjkrNHfjLTH2/OqXv8KotqXWQs6Z4IWIIEiCiGxrLqJjsjLV5ZTY7Taifbucrmy2o9j9IgzyZZ7oushmt4NSmCehuNaSeXl+wQbP0MlhEbxEmdbS5IA3kEthM44M/QC1YX1kyvMNCgixl6zWJVFLoe9H8S+ywqNelpndfk+larcr239rLDktjJuR7bBh9ZUJQSyRU1oIXuy3xUpD7KKHcZSFEo2WEzVETYKSjjiGwMP+kZoOalooPv2X84Xnl2ec88zXK6frGazsDC7TlXmeGAdhfpErxgs1dWiVz09PzMuCC44R0T3kJXOZL5zPF/qhZz/umVJiHAZqXaiI/cbQiSeUYKHCjGim4oJjZ3c0shwQxgr0N09sh4GH/YFUiu6XwHhHOp2xunxepzRvhf2SahNKaghQKnMSm3dTKyFEfPCUZaGu3lSxk8OlZEqTBWdNoiodYsd8nZjmSr99w1fdhs+f/8r55UcwC80srEmExjhoTpbpuhwU7N9Ss8WULG6XKvLLRuAulaoRVmFeTdrlVQoL1TScqmslG13tVVRjAVa1CUJ5zcgB4NbDu0K1aFd6veEYNQu81WpmWcTqAy+Hg4+w3cDD4453b/bsd5GuQxoSM+OseH0ZAsF1VKrQPu3a1Kz4h1N8qmK9x63TlikY7ZpX1e86Oa0oU2HtoHUKspaaEzUn/vzdD9Scubs7CGxprd5blpIqa5yqkAZ+1vFLtZLvCeoWbF7hpZUmpKe17ETEdsSZqAr/ogvyNUVCCrC5pcCJ64E0e5qVkYSd5Jqw5WqWF7CybWXGadSShWKsTUdVyx6nqjpTX0Vua2SvQHg/O/z1G5dqlJTgbhd1NcoTTdG6a623A3fdPaFMporj/Ve/Y//wFdiBZZ74fHqSfUdt7IZRpqIqTYCz4oVlrSVNC3nJOGc5n0+6GxUhcVmBxlzBWebjzPNyYrvZEd2Vzf5Aa+3GUJ3TwnWZ6M2g+ScGyrojE+KEaU1C1YyT6GJd9B9PJ/b7nUyfGfoQKUPBu04UksaKpN5jiL1EJuacWJbEuBm1kzOMnahaq2mcTleul5PgXF46Nmf0w23IuGyrZCe4IIdpLUzXk6bKRWLXiyusE6vf4+lI33W3ERLg9HLkOk1stxt2uz2bw6vtszBvJOi+ixFrHafTiRiDhLobx912R6MRjWTDOsRGwznL6eVIaZlvv/yaLvYMgzCASmu8eXzkenlD34tZ4fPLC0/nI+fThd244fl4whnDkpN2VoJZGuvY7LZsdhs8ntVrci4TP3z/E5txYDfsRG1thTU2T4a+6+hiLwycZcavedHNcrleiSHQxZ6c1V8qF07HI9fjhbTd0fcDbRE/llLUb2ee2NqNBKH7pvbdaj5mKnWpYBvPpzMUocK5riMiS7RqPOfTkdPlwuPdHfM0YywKiwnjomURZC4lQW0sBaztCN0jM1e8yZhyxZFBISGjjoFlPQSRwiHdvsHp8GYxNx2EGPA1FvF8wJiGbwKLWsSxtOihVI1AY4JlC55cjOQdGJzufKRrEoaINAfNCPOsymiD6Ii4nVDOQtfDuJHisB3vuLs70A8dMTZiXA9M8cQJcYM1DWOCHrr1Z5h21YNf/n/wnopV23SFAYgCJWiRk6plpUtH8j6o5raDaa1RU2JaFoqSOJIRxbQxkGoWyAZEiFblmjYlNrTmoKnxIvZmL22sxLCuB6w1BmPDbUIzRjr1defE7V0hqY9Kfmmtqq5D4Y7SBOrQvUnWDX9t4n7brFyPzGtAUylCS89tPdgr1ojNeNWdUakynTRjqTVjcbK0Xr/JSnEoqzbC3ppYmQ5k+lmbDWkyMhZh2dWWKRpHW5ph2H3Fw7vfkIvB1MLxeOJ4PPL+zVswDmO9+J4tmXOa5JwABt/z29/8HVXfnzhUSB7PXLKGXsnZPAwjT63yp3/9F4lS6Ef+7u9+R86FTT+SW+F4fGG73ZHzQilB9UowzVfmaWYYR0mwNI5puuDsRoxbc2Wz6TFA9FE+dyPmrn4pmXlJjEPP5Xwh2ULsA7VAyoVlmnHBM8aelfssQhq5MbwPsnDU9qauN5Kusiownc84Z3EucJ0Xnp6f2W83tHZmyZKWdbi/o+sHWUpHEeVJESxczmf+83/5r7x795Z//w//IA+E2gA4uWtZU52gscyJRlMrXUiT8IzpBtI0yQc7VPJVvNbHfkOulaBj5JIyx+ORzWbDsBnFcsMY+r7nwTlGfX1pyUKBDOIFn3K60SG98Temk7OOVDK9j7x/907Cf/LC6BzWOXGyrVk46IBxRsbJeis9UgCNWFI3Xf5f56v8/BhUHZ/ItTL0HSmLUGrTD0QvDrMuiL/+8XpiM26xxjLsRlqDoevAikI9WMOcZA/jjEAc0Yr5XWsSa1qotyVXs4b5fMF6RzPClnHes+TGNFUe374hAOfnT6R0xLoF1yrOqqmyWReF5RWXNkInxCKwW1sjaRvGlhvUkfVuW21kVi+lCiQ9+CRfWWCrQkPC6DMNcUttVfKnCxIKTxWOvXTmsi+JAfrRsBlh3ARCrGw3d4zjnqEXexYbRDchVGSPi1YLlOYttHU3UhR2Ktq8e/29DppQOdcEiMZqoyF0S6MYeFN6cZOnWWFgSd+b5yz3Zsn4LhKaNn2tYlfdhF2zP+Qwl+Wr0725lyLaRENgjGc1IEQhinWKMbrTsk7dQ6tOR8YJ6WGFAZv8XGE8yUG/TJMI5wx4HwG5RmLNKF9nalXbcZkqihpMNoUshWEk6Yur96z8uHprzmR7I07KkkBnWcvYLfRK/uatEDWFxBRpul0n1XbLZ1EqpXn67i3D8CV/++HIm/sOZ4TGerff43zHssxYZzkdT+Scud/fM82TNM1BzgCn0KKNURYETZqgeUkseeZ6uvLNV1/z5uEd/o+By/XK5TKR80II/c05t1WxDjfeU4vA5ev0Z50TZEi1FiktXCfxk7PWU9JCaulW3Kd5ZrPd4D/+8CPDMDIMHeNmFBFUrcLjvtuTNr2OpWsdsFznq0wOSLzi8WXm8c1bMe1r8nBbb5iniWVa+G///N9xzvHNt9/w+PDIOMom/XQ6cjmfWeaZEAPjsGWz3bJK5GuF48sTqVZ+/8c/iJJQi4PYO3NjU1nrsNqZ3T/eQ6vM8yTMERuw3km3O8/U2jjs94ybDWt9i2pW2CrYIMpqgOkyYZ1lXmZOp7Pg+rqPiV3PkhNlumKMUIFzzUJhtKJgba2RcuJ8PrHd7cRwMC30vtNCKh3PnBLOy4fjnaeYona/vbxG22FaZZqu4karN3jwgc5Hlpz4fHqiH0cO+x3BFWWSNdKShDpnHXlJBOtZFsnS9d5RdeQX4zfDnIsY46UEwdHHjuDk+gVlqqzsjpIz1FWHIFOkD5J61YeeL774ilIgt8bm4UuW88Dl8pFWknbIRa3oRERXdIKUe83IstGIUEusSCw1S7eu4IgcAsZScYLX+zURoGLLOpEKQ01eu+gbGlWEfSguKycGwYlhbQjQ9w5rGmM/sNt2hEHgzt4jzgBdh7FJ/LhurqGSm2KMZkysLZOR92KtJhfq4jRYqFoobHOkVoX5osr7FXZriCnmytWvRayxS8vkZQFrmM5nyfy2sjyW+0uKgm0V3Gpfve7CmgZacbN6WXcR0DT7vdyaP0k4E4bPrcBoPghtTb3TZDfrMEU8cltbbUN0/2FkylvSBWM9vfC/WZlaTYt0bnLGOGNkB6UNAKsBo+6QDFYN8QQ6Wj211nVWa0iUboPW8u1nAIKk/IzGa3RvabSBsVY+B3fb1RQtEAbY8Pjuj3TdA8/nSfQHZmG72QnkbeQwXr+/c4JjGW8FdjPww/d/4+7hnrEfb0W0ITulGCM//vADp9OR/WHH4fDAm7fvAREcixZKinuuhcPhgVwrsYq9Ua1iwd/Fnr4znK4XrIoK05xxbqaukLMWfdmVSrPWhw5/9+aRPkay4tHeOM7TJJa0TtgBzluicRQrnUAIkVIzT8dnptMVaNzfP0qVNla81PWi+h62fc9fv/uOw/7A4/0Da2LVdiMwyGUW9lJFaYWtqYdQxhnH2zfvZCfhLTmroEQrZVUnrpwype9xRrCB9UGapit9J6IpC+zGkaaGY8u80EWhex4vFzabDeNmpORC10W892JpDCzTzDRNeCuYr3OOsZOlfK2Vy/V0k8r3saM6Qy4L1jqWZRFfFZ2UipPgJavxna0lnDEE66mtYKsRjNIGzVpA+OC6cCtVqJzeyP7CeEdMSd6jsxo56eXhQmi01QYV/FlalCnFOe0cnSMtAiMZY8jLzDzPYqqoHBopAuKblBU3Xn1uchHB0BraIwvDRs6L4s9VLcYNvt/TE5muJ1p9ERzaJYyG8nhltBSHwkjcYIBqGs5BrkYOKtOgZpyRYKGlGCCCHhIlV5zRuE/k9Um3KEmMIJx6qlh0DB30Efb7Du9k1Rq6SLSVzX5gswt0IdJ3PcY2ggfnsuLb6oNlKpZeH3b1B7WyxPVObKa9NbKQJ0AVzy9ZpuebQthYpfIqoUCW6dyW8gI5Sf9bU2W6zhjjOJ1esLan62RCLEZyLPSEV4qvuR1axjvWeNLaVg2AlF9ZJMv06azWlXXBvN47iu1rObpBxIVV84I0VcbIdAYsVXZ+Ta14WkuU4qFZcpmlKJp1mjL6/aQwrM8ATTQO67K7mdeFel3TLNZtvrqnNl53E5I/8Qrx3AaKNbvavWpSBNtfobhGLYu8ntLz5svf8f7tr2m2Z3sQoSbViSi2SvG3QSbJvpPkt1bBI3tByehIHI9HduNOjBAncYrwQdye7x9EVxFsYD6fCUMv6ZAKt+dcCMGTU+F0euZwuL/tboLSq8UdwLDpe7x1MlkpJySXwjydbwFqVGGKGiO+fH7sekIzLDrCnS4nhZgCtIqjkRtkhQKMhbHrWWqCLJztOWU+P33k4eENqSyy4PFO7Jqb45tf/5pJF0s5SWnPZcG5QOgCPZUQRDksKXPS3YJh3O/FNtcVgXRSJsQ1l3ml9SWOxxPBB/EvyUKztSFQSFJ1lwW6gDeWXmmyzmXaJAVhHEf6rsMA3333Hd47vvnmG7lvlI9traUbBplwTi/86a9/wXjPRg35QMKJmpOxOdpANZVh6Hk0jzSV+ltvRQCn2HRrhr4ftOOw5CZLamc8L+cTVPFlMUXwUWctJS9A5ZIWmJt0CqFndb0Eeb2mGqblytPLCxjY3x8wSbrS6D25FZbTlSktoq5ujawivBAi1VbWDJElZ1rKXNJMmia8C5ggOR+2yRVoGWrOlFq4LLMK8RyuwlyKBst46ba7e3KamOazFAibKU7orbYKZFeNTJXoAy7FSm0qjByW2RqselxIZJb6E7UKJUmHrjs3o/486PkhmeOwGeHtm4EuNIa+o5UzPo6Mw5btxtH1huANLliCFvhXuGj1l6o4Myg7ydFMViRDFuNGFePowrwZZXnVwmpBvh50zmpn1wzVFO2YJXo0L6qURvQaQjiQqNjd7q24yIqgQOBPLf5i1aHmf0Y0Mc5YmZ61OFSdtl5/iaCvrDynJvtL9a2AJswxazV7nAJNoz352cDRJMpYWKiyu2pV9hMpSYjRGoG8BkY1vY7rZw8IbVpXEVX5xkbT8WozqjfQug23oihWKUp7rU4niaaT0ypclS6/WYE2+dk1oSE055vPk2fYv+Pd17+meElxa03T/rRwNmNY8izL+WCxRmx3aOg1bJRcePf1F3gXWDSv5+PzJ8ZxJFRP53t2caA5y7/+8z/z+fjMv/+Hf8Sr9mzsB6zzsjLoBuZ55nq90kchQEj0gRB4KmDWSAVjsTaIs3WF48sL/TgKdRrIeeHjxyfSMuOTFoToPFNacBX2uy0+eByG1GSrj7MaxSmwAq0RukBumWgMMYh673o+g2l03UAIQZZnLvDHv/vDLbp0ul6oORNDT0qJ6LwmTKmfjpOxU0z4dPmlLpGgGdGqBKfBn//tL/zth+/4xbe/5Hd/9wfZY1wn9ntRELrg+fzpI7vdjr6TzIbgA8F7McHzKydCHtD3795JYclif94ceOc47LZ4H3k+vvDjjz/x4fNHbAiMmwHvvHCZW2O5XPExkkri+emJ+/s3EnSkvOQlL7IA04kqGCm+3kXFmhvBydcvaYZS6aK8j950XK4nhmGDMUUyklvDNnnwvIkCxTi5UZc601rFBo+3lvNJDmQbpMMoS2LOC52PtFpZatJJqpeDplau80wrwo7IufDxwwfKkrl7eKBb/V9UILgmmuUmgr+GwVZDKkU7vyo4MzKJeN+rn9YiAT25YoxkkM8li5cVkvOhulgaRvk8cgjYqkWhCDGiNUOqTT13GmQoRuCm0EHfGbrgcM4xDB0xWrZjRx8b3mV8MET/hhg9IcpC2vtGCEZYZVZeA1WOauuEmIE6d1qnmRgmCNxkRcFdmtAn10zs9Z4rLSuvX7vYZtRfjBX4R/JcRA1+XWZAMstbWQ86QQhocrCuy+liFj0oBVZbrSnW7n4VwlWN2JXKKYZ5WHezmDZVQn0E82+3BkeCbEWdjlm7dXN7NlvVzYDRHWXVZb0zYK0WAYGImnWaVFdfGWcgeSm87kHk9cvXW4XUGgIDV/1f6xqkKTVW1Pr5tu8RbzNefwYVr+eMaG30e7SfCwYbqWVqtfhwz7B9x/la2PUKYWl3r3MfrRY524y/QTjGwpRmaipY7+i7nlTEhDKnBWM8+8Oe4KN8P9so1jFPE+f5yjBucE6Kf4wdz8cTP/30I5+fPvOP/+EfxbW5VnKrhFZZqqTdnU4vpLxwf/egz11gnsVHb7vb8vj27bqawTnH89ORoe8Zxx6fa+W6zORc6KLIwX1weGUCgSiNReDVKEviPBV88Gy3O3bbPc47tautDONG8qwV+1yFHqY2Um1EFyWjIXYYJ1a2QxwwViCdRiPagVZhyTMhyMI25UQIQTQGwQp+3GBZ5OJ+84tv2e53nE8vdMMgi0RtGafrlU+fnzDOsttupPM0aAyo4XQ64WNg8EJL80GzL4CXlyMxBunKnYgKrbUc7vb8JvzmtkRstWGCZTmfZRJojcv5yH/6z/+JP/zx79nu9uzGHcv1SjAO1wUqhpYWjucj93dvyEjYiEBcYgG8G3ecpzPTPDFacZwNMbIsky4VLbELeONZ0sxsZpxz+CZdYmnSPXQlM00L3luF6pTxYuVaZisH1enlTOw7TBGanBzgsnuY55l0najXhO09wVtaqSzXhXm+4EOgWDk8c0pyYOoDWEF0Dk0evr7rRFQoSyDJcggDtMScLrQsE2DACP23IctWI8KjJu2xLpll0qnIErUViZHNNYuBoEMmgQC7bWC77bCmEpxh6IIk+kVHtGJZHwPiXRYswamjTxHacWfFGsSYJp5P3osXWBBK9Y2rZOXec048rCQOayXxriCK7N1KNVgvjJlcGssi+c9C/5aDeikSd7nMF67Xs5YXIwFUNzM9OUEtjWQyrTlRkGhz15pQiVeRnHTh6CtWaEc/v6KUUwzkmpQFpHhRsdQ2Y0yUTtuKEK81dBcgRVv2fQKXZiqpNJo1SsCQn5tLIdpOvNvc6s/1qpoXvYIWpyJNR2sNiryXjKjdRZsl+wu7khduTCZtAFu40fxlDpPCY5oKRDUtE7NOR6sDrP59nUJ92LG9/4bjc8b4I3HcYxp448gV8jzLpsw7XIOXy4laKm8eHpmXWTRn0WMRHVc1Ej0aQsRgGAfZh6Y8MZdKtJbYd/zx7/+e4DUATncOP/30I3/723cALKUwhsBcZXfph1E+w1q53x84Xa/M86xwoyXGjq6bxJ5cG/qmHcd33/+NPvb89te/wXd9x49//Ru5JN5/8SXRCZc3mcrqB9MM0lU3CTI33im/3uG8YGGlFKwF7z1uu5WqXkVI4lvgeHrm3/7p3/jjH/6eoR9F1DPP7Dc7jLPUKtv04L14QTkEXmnCbvBeAonsDU4RfNwFxx//4R8IwfPp40cu14nd/kDnoyaFNeZ5ll1CFU/6Tz994Mfv/sYf//jv6MKGw/09XexlyTxnak6kKl0rtWBNL15VOWGcZdxsBAf0kdP1wvF4JNeKU8/7bex4vl4wxvG73/+BYRhwWJZ5Itcsr2XVAJSCs57PTx/AOu4OB4INMtYb0R+0BufpwpwX+tjLlEGmM7KjsU1TBYzYBjtXoROGTmsSTm+8x8f6KuAD0rLgnaMLkdaKYJM5M5qeuQhkJMpsy+nlyvV8whnHZr8TaKCJxXlqFRuiHNZVrDaqYrm0JmZry8y8JLFyMYa6LExKnwzO4YwjU/G2o3eRVBM1z+SSVah4xrdKK2J5XZR7T5Wpoi1J4UqBu4KDobd0nUwL202Hc0f6PmBt1rAqS7QOGwzRy+HlHXT9iHEV79TIsomLKU0MLL1qHPrQieml0UO2QXOrQ6r0pzcIyeprK7JsN7p4zU1KhgW9ZhlvPIVViSvHd06yzE438ePaNSMFa82QkNlGKLM3N9Z1d7DuO3TPoYeoWX/PyGdTmkyDt31SEUoqeN0PSTftnEr3FfOuN+dFI/bpTTUGzlA106I1jbRthWaaJDP2PaiWuxplzDVuLKbGK6mgtfz6Xhs3mDeVqhEEa9io2OHXJtbvzjhsa0oBDkiYRdNdkARR2XXBbtZFtlyjqs+LfFY9b97+jm7zBZgL42bP5XSi6zqcF1jHWff6GRURqJVcEDJTxLtALiKILKj3nZdIYufFpXpJC89PLyzLxBdfvpfm2Aam6UptQjTBQd93HO7u+fKL9+w3G9GLXU54HwkhEtdOwDk2m5ElJdnjtMbYdfTde1prr8XLRXwX+e0vf8XxcmVaZnyplYc3jwQfZNJU6pcsIeXieCf5vtZIwpfcig5jnWK/r3oFa8UBNqsVxm63pxlhgnjvOV0vdJ2EKFi1hWi1kdLMdrvT418dYHXf5tXFsNYipl7GEHXhHEKUQ+x0ZJkn7h/eKqOh0UrmMk3UnME5klJKHx4fyHPCa0qeZR23IxipyJtxECjDi90zxjAtC5vNBt8Fur4jlUI5nSi1KtNGYDesJTrPEuHtsKWLkdpkkSsRowGrBcJYobX+65//zDdffn2jwq0CotQKzhq2g9ijr4C6s47j+cjT8wuH+wND19GsIfrA9TpJOpcxHI8nzucT0+VMqpnoAg/39/RDJ51/zkzTFeeCLN5rY1r0kMBxmSaJTnWGYbtVGqcEFqVWZfJyXjjd2hgYg1g5FIEo1kjM2EVaKaS8kFO5YcrFOlmaKy/fWItxlmA71TVUWo6kchWc/2beJrBNLUlYPxliB9udZTN2bDcbYmwE3+iCw7g9xspritbhvGO+PBPqhmIicYgEb7FejP5ylTS7PohDa6liE2JdRLZ0Bduk+7wRv2vB+NW91d+sS0rKGr6kWSFrTnp53ZfUKlh3qom8zKQkxpRYR8tyCDvr8E5YTQ6jjCRdQuvytiqeLwuAcjvw5JBdmywLrt1WWLL30YWy3s2SAd2oeZLdECuXjJvLqzGrz5ShUZSSKzkaQuqQiT8XZfopAuBwOBcJncE5rwyzRjHqqFrXycTeXmNbw6L09a20z6qTpFh8N5niqr1ZbOgCglylsbDrXqBajFk/Ob0qKw3dGFpJt3uyNUuqgTePv+Rw+BbrRnbfvuM8XTldzgybDbk1iTSwlrwsLCWJVZAxbIZR91eGksqtUQvWMi0L12litxeY0FrH5enC5XLifLlgjOXtu3cUK9OsN5YYIq0Z3r//irv7B2LwHI9HttutEhEan5+fGMcN3jvKSkxyjilnPWdlvs2t0cWO0/lESleMMbgQ2Q5CwvHPn5/ZbAZ6Z3l6fqYbevogYyRJunXjvS77ze3GkqFZDkfnXoVvEmuaqVWCddYVunOe3/zm97dpY5X9v8YtuluxabXdgtwtVj1OpPhISK6kBxj9PrXKctj7t0TfK/W06nRjOU5yaEZnKcvCsN2wGTb4GFlKlo5Lza4M4K0VqqoxGsYjHOdSC7YTy5H1wH759Intfi/pflVMzHJaqKVyGLYsRW0FnNolbFeXS1G/2i6yqVt+9+vfMG42wiRS5tHp+kJ0UQUx4oXTnIUKU55pxnJ3f7gdMjSh20mXBtTKy9MzH378EeMdwzhIBKazXNUaWiylI9N0pZXKMPZynZsUy5YryecbRjqdzzw/v9CPA6F5lpJUYFSkWBqYl8z1MlHzQs4wxEDXeTCGtFJZrWDwzlg0RIsVBDZVikRl7bI9Ju4JbQOtyHXIEylNtOWId3C/C8Te0AdD3wuN1fkLfdfhg9A4UeJEK5XSOWyp4HtC5+iGKHGwnR60ii8H38kuzFocARcs3lqsEztmGkofVtgCq8VLXEH1oaC2SiCKkhehKzYauVRaSbRFDuhaDWmZbrYQTgeACupEa2+d7Wp1cbPHAazxr7YRdlUorxqCV28rh0wxeiTqpV8BMf1urenkhuZ1vEI3WiqkEbjtReSJzU3wqtIyxq7sGoWOdAfSWiOGiPVOss+V2vvzgltZF87otWz6nuV6Ytadh3stgqZqlkeTqQOx+0mKbwVrb4WuqYofs15BZQwV+RkrUQICuRjuH7/h21/9A8ZsqavHWuw4aJZOsAjLr1ROk0A7d7udNDVePr9WGz99/sBuu8WYxny68nw88vHTR969e8+3335LA8Zx4Hq93na7rWW8D6rUXs9OOJ9fWOaE3R94evqMtY7NMPLpdOUyXdhsttK050zX9Xhr2fQDx4uEEY3Dhq4TUWTXDVQt5tYahmEUrY1pErqDLoq9tXx+epaOJTjmaQZv2W42N7XiDSXU5Vcpa7ciH74PYkNQVdGIjuq1CO10t99pQyyW47KkMmL+Z6QLq6kSho4lZWFhOHPbe+SqKtpbk1Al+jB0TMtMTpntOOCcp6VEDJGHwx19H3k+nRiHgVRmWZp7KWTeBa6nMx8/f6bWyrgdJe+iypO6TFdi8Dc5fSkF7x1v37y9WWFcLxdqLVymiRiDGAtez/RdxzRlyZ6+zoQ+6tTVJNuXTBcHWXCxakSKiqgMqSTm6xXjA9t+JCNsohClGz0dr4y9fobBMzhPzYK6Hh7uuC5XBtdxuN/jg0CFZF3xGRHdBeeZa9KbROBFKkLVLE1/Xw62cSf6kst0kcnTScdsmzwP8+XC+XQk2gjWMaUF4yH6ICwa71B5l+YINFquN4vyvu+waBA8QFNOutHOr8rn3cWA2+wJrjBsGn1odH4RfjoBF63c09ptp7aoKlughdwSfRcJ/ShKau8p6Sqwpvf0IdANgaZJfn7NK7HrwVpuxAqzQng1Q17Df9YDVa7xdTnjZA1PybNAAy3rQtjT9Nqjh62xq1WH5BcLyl9vS2fp9hXWQWCddeHKWhjWhk5xetE7CQffFScTtLXCsDIWh2QNNC0guRk5aBGWjFFPJZXFybPalDnUCq2IfkdsPMCUjHNBYONbcTOgzYKparutB6iEmgmcZNrqZ3UD1tT7SXcFbV1YK6NJr/Vt4jBeIU+5kYwSHlYB7usvKZLOyNmWdVqXzxVy89wd3vHNL/8B4/ekJOQK+R6W0/WKt5643UGDOSW6GMlpoVmDj56s97fzju1mZIgjx8uJp+cX5nmSIl2L2sg0moV3799rvsyah+H06yrNWt2PdGy2O5z1fPPNtyJCdpbHx0fu60GbZVhKpm/Is5pmXp6fOR6PPDzc8e7dlwqry5/fKNG1yn7s8eFBJO6IeV5plu12x8cPP7Hb78kl0YdR9nCoEZzS4gxQS1ZhkOF6ucg45ANNFbnOW9JcSHlWQzNhz4niTw7qruvwXv1sWD1fCrWIUVkzTbAxHdHXf1uFxnKRjsyYRloW5fgKg8HGwMOdhH37EHnse2qtYlzoLKkUrstELplPnz7d9AOnlzO7w56cEp2zjLsdQxN4o6R8k7sbL0yQdL2SlklyBib50KlNIl7V6vvD50+cj0d+8dXX0o1WeY19t5HkN8U/VzFZ3/VyMLbGy3xkFwbpskqV4KJFJqaWE0uRg8U5Q86ZeZnwzhOs5e3DI8YJ/llzwlQpPLlk0VNUyFU6trUvlV/tpsw0qgexxhGcTC4x9KRlBi12K3w0bDaiIs+VzovFRDW6YL4dbg3rZBdijCwTrQu3G1UWsHr4tvVcqVhmOnclMDP0hhAtQ/Q4l7G2EP2IbVqAjVib5zKJR34RxXjoIiVfiXFgCAPBRiyVl+MTMcC43RG9x/sOSrvRUS2e1ZjQNkPOVayejWRtl2yYpyvbsWdOM8uSyeUqHbbaWhhrZaLE0NIsh77uLcS0b+XlywKxqdCrGZlcDWI7oRQrVh8lp910vemDC9UUbdCsapj0UAeZyJw0QbKLVtuNKnsfgXgk1rfSWO2pW1swaDpbkZ5cmEeZkiSPYo0BNdaQQSEsFb8Z6dmNCbKk1mKSdcpoWrCaLvWluKz7FCkCFtF0iHi83SinBUnqa82KXkV3da0ZvE5gBtEmVRVjrnYkFi1IxtLaGvxlyRn6zYEvfvUfCOEgGqFa5OsU2tv0o5x5pbHkhc8fP3I43LPd7vFWRKifP31gmq6M/YZvf/FLjDHszIHnT8+klPnNL3/NuNvqHkatMYpmsmsSoDxfqmzXQ1zuRdmFWHV3LUVQipLELsdaiSWY00ylcrlcGceRcTMSQ4QbtGdoOeOCkIuoSIDa56dnoa8qdOODIzpPHDeEEDjs9wTnWUrBr9XYigAl58r5fKLvhQKKUjp/tlKTZV9d7Raq4NJNllN919N1UT7gkplTJriA9063+AVnLZfrmevlym5/YByHG/S0sg9CkMOl1sbY91jvabrraA2s98LUmhfCZvwfDuLj6UgumesyM242PBwO5EWyHOZJMrhTSgxDT8mZl+ORWirBe+ZpElGLgX4z0m1Gnj4/4Vumj57z+cLj4yPevzrcbjYbSZPLks9hnd68TrMTWLvEphnFlWkVL3nH6mBKrqJHaI0wdLRcWeoibBdEZ1Bb4poyZcl0fcf5fMZ6yYu4nGUcnlNiN24kMa0K5TTlwjRNgmEqnXI1M1tKEYPAJsEkhYrR5DGQRXarld0w8nI6k0uh6yPRieVEU0+iprnltYnwynpD7z3NWLIuKV2pYCrOGDxnWjvRd5W7jcd7QxesMs8alqj/yHKztCyHwXIlq2tqBrxpjHEkbB7ovVjXL8uVvnN0QaYPbz3W9NAsaZm0wZA9RWuyS0rNMl9nvB9ZcuZynTg9n3DG8zRkSjkzpZ/ou55h2GBtoOs3gFCJb5naRmI39YOXQ0+dUG9GekCrGfuzdDWD2oU30S1UUH2B0oRvAUXKKGqNqhYrthVyBePkgG0gLrfKCKo6qa/+rdIulNsS19gs+QxNMyaKQKRF1ezWCoU3FXDGU0zC2CD5EhpGJJkdPysQrOwsha+shP8Y417dFVa6bDNgg8qpdRIzhbWfWMubMet1WJsvbr/q7bUIRCjBU7qo1vOr5kjo93z5i98zdg8sWeBUF4Io3avQmEsr0rg1hzee+zdv8FbcsI2BzgV2hx0pLXz8/Im7+wc2my0heB7ePnJ4vOduv+d0OdFwmhMjBdg6y/V6wbjA2PcqE5D39PT5M3/5y1/4/R9+T4yR6TLThoHj8YntbseyZKarPMdv3rzBWse8TKKwjpGh78mlMmcJf7PGcy0F7xH4LES597//8BN/+M3fiRpSm5BaC7thw7xMDENkDTxZ5iISfmWoWGPpugGrwTjB+xVcvt2gtVZC8FQnys7VFdEapBprB1GWTFpmsknc39+pla08IOO4kcW3UvlKzpzPZ4ZhoOs7wdCc1wfC4QyULPbCFOHYWyMq6efnZ2IMbPqBVgrb7QhYNuNGMr1Dh/citgtrNoXuD+YlSZZxzoqlCzX29PLMeZ7oQmSeZ8au53B/zzBuXnF2BDnwqmS0zoGzpLQwnc5stlvZZ+TEkjJdjEAjzVeOzyfRNaTM0mbJS7ZWFrxVjPjkhlWeebXgxKo4ukDtpLh8/PCB2HXcHQ70XSAGT305MSV5YDGVkiTWUKZDMe0zILnOrSl7RrDiJSfZX5hC7DqxEymLqte3HLYbSq10IerBLw+laWIXLS2HoyxJokODUHNNC1gz49wFby4MPXRdZnCGrndEJw+2d06OFmtxTUSyBpgW6dBNq2QSsdsQXKTkiRB6TAha/ADf6H2HDRZrPd6ui1hHyXC9LORsMDVjgmcpCxRDLZ75nDmfvuPpLOZpOc94ByEY7u8PxC7Qd2+kETLSqYmCV5YwraljKXogCrC/OojoZyCTgOxxBPJptt5UwAJJqT+SqtZFyKV6BO2U1+S1tcD4NQa2iahPNByacGhWKFmNs5sojFdmnM2N5uTAKK0J6UCbl9oyt55w3YioHqOxitu47SVKE9Fj06LX2qp9kZ1H1WW0vptb+6ljr1y91oTF9LNfZi2krLNxu51N1RhsFVi3KeKw6hvEkyzTSke1W7766t9h7Ibnp2eGYZQ8GIxmasvuNBjHdZ4x1jCEkWCFUHM6HUXTorvW/eFwa/zO1zPTRXYOu2FDmhPTdaGUzOH+wPUygRFU4+X5KPY7Icr+Vwvqw8M9MUjDFoOwpkqt9L00JTFaQvBczhPzIhbjq//b+fMR9/hWzBWtw2q++G4csFiCdQQnKIl/d/8GHyJ5mZU9IRUyt3o7742R7nMujc47plwobSGGQIyeWrSbUSzvdRmmH0QDU7UzRlxPhYZXmeeZEDuatcTQSdExwrM2mkplrRV+v+ZDYCSrNajc/ebyqRS43Cw1LwQnnSkIiiqsPMPT0yfeHO7wURb0nz5/Zr92+DQ67wlOPN9xhmgDKWectQzbDefzmZIrPrz6QaVlpguRN28e5bBfkjAKnKMpx/7Dpw9YDF98/aUsuKrwklMWkZr38t/n04mrFd8WWiOOA846TscXcqmkPDMMI10I6gEvD8GSE8wiPPIh3GwJJOu38PjmDa01pmnGqEDQ6PIPY6ipkMrCmh5YikBrLnjmumCbFNucE1gRBpnCLU4Va1UgFAXy6SKuCCYquKocwAWNskRgkGaNWGj4ijMzff/C0Bk6NzN0maF3dHpX+bXZqIs2IZnaLN4Izt0MdNHh3UipjXGzF+aedVjzIDkVNROsw7hGwOOik3hWGlW7z7QkaHA8JngxpKVynl6YLhnTPMfrhKWwXC+Y2IjBECLsNo67h3fsdxt8FH9/eV3mZj1tzc8Uvmvfa1eWjky+suNINBzOSvN1E4ZhwGmH3RpGbUWagbqK5qphjRHFgFVrCrFcX9XSorxeF8q1Wo1MfQUcC2IaKeeyCOdwQXcCUg2WJG60Rrv1qrwZozs1UGKL3iPSiFaqkelm3Ss2YzFVJlfRQ1RuFt/G3pTP64RlGhRlcFWF8lZTQrSorEXI8boj0gwjKk0twhF4C0OuhlI83o7sH74hNcnljlYoymXJnKcrwzBqXkchdhKL+/nDJ+pDYztuAJELnI8nhn7AOEPXd3zxxZf03cDL6SyWF1SuSQrJZjfy/Q8/0k89n56eSEuilsTbt+8YlPkphV6fhRB48+4L0TAtieNRYg3u7+4wxjLlRPCBh4eBlDLzMmuzLA3v5Xphv9trXreYZaYsDEgXg8QrWIPf7TeUZZKDTBdML8cz+/0O43pMKzfhSfROuk4ras6Xl2daLgy73frpSXHQpfK6gW+t4aLj9PRMrY3D4YAa1tP1g8BcxtOCpKnlWsUi18pHfTmf6fueqLYZN2YUMp5flivReQn3cJ5WMsHJ4tNZc7PpaM4TnMMOG7ktSsGGwGazwThH8AFqIbfGnDOxRboucj6d+fjpE199+SUhBux14jiduZzOUAu7uzvs5UyMUWikypJoTX+Glcrc9wPzMkOFlGdyqXhnxYq8VShFMH7t4lLKqgw3zNcrc06UlCEX6DK4QLpMhL6jLImXF8E3O+/ZHQ6qhM5QK8s8sxSBIiTYRWCTaqDmqgw1ZWAgTBvxnoeWMr45Ub42XVlW6EJHRmA+XL0JkIZeNCdNHUFFA1BpTmIkreLDAg9UxgiECednNnHmbl/Y9AOmFVo1AkG6Ri0J65ow3LzDG0cjaw5xxOsLbk1CU5o1hCheXcKBz7cpzjm1jjYNjxe6ZIO2FC4l8fSysKTMy/OR8zFxvi5CKW2CDfsAwWf2d5F3b/eMfSQEwzD29FEXxK1hnBzKxjhqkxW20Y56pZFiucGjtWmcaltNK60G89jVSV3f48+6ZiM4uy7CBFZSCrRRIz/RYBRlYenBLBFv8rOaUbioaTGTA8k0lFCi+4SfeUnV1VajJPHHsmqVovukalaUWfTxTYknNOleSyuQoVpVOzRYWUUCWuvktdJU9SypVQ34jJSxdWn1ypSU3y0tI9Yk6N5Cl1useRjKM0cKScpQqyWZgW9++Q/0/VucjYTouZyv2OXK0I3S5LjAMl9v1zl6sdj/65/+wldffSXuCEPHWyduC6llTIVwM/as3O231CI7glwSD3cPPBzuFBJslGUWhmFDQ4HkDPNBdhSlVPJ8Zk4LH376kc/PzzzeP7Db7QjeiseYaWq1s3A6vuBDx3635+7wwOl8JuUZG4f14ssuuFqWnEilsjUDfpkXistY63CmcZlmXj59VMdV+ZSnqyihbzo2A61qRoRxpCVJ+FASo6lammSoGFkaWX0AxnEjN77XTAPbxGEW9cVv0AivC7Zab2rvG1kvBFnRaXdzPL7w3Xff0/WRb7/5lu24kY7EW1gWUMrYyqDyMfBwd6Dre93kW3GGzRnXRfGoMSLC8l0Uo7pSebi/x4fVvruRa2YuhWxkwhk3G9FAOGV/GLDOCnXUSLrc/eEg+oIqOQzT+YJzTmlqQvGb5wXrrBacgrUiBsxVxFvXRRLZqjBeZUmv1zjEiLWevovkKod2WTK5Zi7LTKdxrkbbzlqQ91d/xtUvhZqKsCFCJ9TeXDgej3Rjh7NibSwWBIInOyOHmG16gCuNsTUJijLhZxbX1ilskCFngr2yGxbGMLHZLIydHNrer/ss8btxztKUhluRbsg6A83hithEGOuYpxMlVXzsCcHrQae9rTVI9p0TJjWGVgpLqJhcZEpLlePxyH/7l4+cL7N0qwl8gO22Z7tx9P2BzRAYd5ZxNGw20gTUVui8ve1wrNfXZbQ0GN2/GIezijnfsPKmbqMyFVjraQhTRSCaldUuy0t5tn7WlNWkS1jZJVjjQLt15UNLgajcchUkRpjbruHG+LGGlgpLzTdiQQNdosu+YJ2CslqKrxCS/IfTOAH5xzovk0YVceAahCN/X+N0eYXebrBUVRq0kYwLWJfnRfUTFW6FQWN2McKUKu0GNlWruwezUn1/vjWVmFwhy0Ry7fnlr/8D797/Fqosf41pDJuB2gyXJFGiIUbdr0oRLLnw/vEtfYhU03j6+JEv37/HBM/peqZTYo6IZ51a8zv1kLP0vYSs5VoYhoF+s+F4PiFaFIEel7Rg84KxI8YH0jLz3/7p/+J6vmBjR2fNbSdojFz7tMjeMPjIw8MbWpEdX85FpnIn90nRMzX4AAj5KHh5jv3TyzP3hwexD6gwdD2Hu3su05XDdsecNCfBGmLsmOcJmy3jODCBxF9agQ1CFMdI49ADTrom4wytyKbcYG4HfE4qLzdWmE/6Bm300v2mhPeefnMnIq9J/M1pDZwTw72+Z7/b0HW9jEcpYwz4YDGdvGGa0GebmnO1JtDX+XzGOUff94DkSFhnCX1Ep3TOpyubvqcfB5brwlIytTS2/YY0JZoPPJ9O8jBYizVwuSyghcmqFXerFetes3iH2N9yGTBwvpzwvhOr9FLIpSmdUQ5bkAS+1DdcC3Rdj1YiUT0XycUmiqVAyQlKEwwdI6FDFloRi2awpJZ0QQbTsgCZlmX5nEtmMWJkN+eFJScGM8hkVpouOsE5cZ6lrAcKWNskdTCBjV5gFAO2ZkwN1HomxplumzkMhrFbiG4iOjnoVd4oTrmrmR6oWV3BEZCYR8mv8HozWwxx2MMgQrO1EzbGKb2xiqCyCnRRMswl8/zDT8xzYTs+kuaF55fP1GVmDJYQDOPg2O63bEYhcnSdw9hCjA5soXeeSsa7Tg5jvQ61SeimBOLIzkHqs1RqszJzmi4hjMO5cDuAq/GseXSlVayVMC9ZTEuHaXQaux3SiM2Hc9BsUChKSetaL6zmULebsM3fvK0oC0tZu0rdIxk5jKvCZQ61+TBOSAO2UmpC8ih0g2kcTYVo1IYhUEwFIzBr0QWsMwWav0WDSsGxt8bQNDnqMYaS5bpJoUAmAW1aa5M9Q7MGW1dPXp0atP6VtsJo9pbdsuqs5mppDHzxzR95/+XfqQWIQFIGybYvFY6XM/Oy8BC8UH010TNRCc5yd3egFogxyJSuxTfGjg8fPxL7yGbc0IeeXAt9F2imv+XKlCzX9fH+kfPxzPPTk0Cx1pEp/Pd//mfevXnD4X7Pd9/9wHKZ8X1PcI4YI0PXCZSPRKPWVuQ5Caq+N9zOAe8FNpzSQh8i2QjJyLsmAVgV5mXGPxweADm4smmYZtnv9zrKyWXqooxcqVQ6H4RCqMsY2wWWeaE1uRDHlydCiIQYxR8oz0TTCd2yOZwXdkNVF0jvBbes2lmklAjBczqf+fGnn/jVL35Bnpfb0nvFXleW9jAMfPHV1+IFNM/S/SiuWnOm70dMLwyqFf5JtVAvF75/euGrr7/k3dtvMNawTBMguPf1eiGESNdHHI7L6cI0zeofZTG+Z1xGpkmCw4UJ5ClLIiWJYHXOs3ErrdGuEx3WWJ4+P+P7IDbtOTFfEyVUYuxfzdZq43qekWwAgfw2/agWCdobGsPx42e+++F7Hh8fuLu7R/+QJS9ykCi3XjzYjLz/vLxSCFsTiGN1US2Fp09PbPdbhn6Qxe5e3l8tTf1tdA1t5X1ZKwfEkq/qFxTFjqFWaIbWEtYuRHukd1f2YyGMFecNnQVvt3qgyn232khYa7Dq6fP//6sq4VOoqVI8xBGhktP0qgXQhe+cGufzkfmaOZ6vOG8oS+JyyTQLT58uNNM43I18uQvsNnd0Q8fQR6KXxsO7gAuOVg3GiqUK1hBsh7dGfaTE7rwaaQBWy2zhn9sbrXelDDvrdRmpmL6ym9AigLPYW9O8Ct7kXlqHe4F5xKpf45sUNqo061iNXVtpJIT6XFumVKe9u6jLc0tyuKr1BVi1L9c6JgivQNBNc6R18YwR2MiuHRaGZtYIU5liSi2317wu6+u6uKb9zDZGM8arTFDFaIhQXaeBV6PCdb9jndcJRq6O0+LyeoHc6wW72StO5OppJfLF17/j4ctfUBBvt1qUTKHmhsZLzszx5chlEit9byy5ZHrnaaVyvJ5x1uKCuLR2IdJvetY8+M53gs7UmTWzXAqgwMib7YBtIlD+4ot3Aj9aQ0qzxBRvNxLH3OD9+7fsDztIYlKz3R3YbrcYrE4oEicgO57G9XwixE7txRe6OFBbFdPPvaGL/S31z1qJoe18xLtgmOfEECVXOddExN8CP5x3EkzTKtP5wuHuQMmFeRb3UdcFzUO2/PTjB77/4Tt+/atf4X1PNtDZKNXJq6eNsiEkNF1GNeclt8JYDW5BCkheFj59/sz7L94LHKaRkmY1LKuyMHdObmTvIy0LxGS9UFezRhdai04fwnvou4749pFhkDxa44T9lEphmmZejkc2o2CG2+2O5TQzzZLS18XIdZpIOTNNM51zzFlw0+fjC/N0ZakFXwS/74cBkK7DOkcuC1Oa8CXRUuJyuXKdrmzcRvsfeXBzqXozihuu+L3AsizUtJBrE2uRGPjy7RvwQejGRal6xlFqVtWocMtTTmIgaIQ7vgjmpN2FwTZhN1Ua87QQQhARXLM6lUhhCd4j7JOMN+Z2Ixojn2UzMs3UXAkeentkt7nSdRc2sSNYA/aMMR3eRmgVa8EFg4YGyFShHlGmVfG4aSosYl3mVqblSkozIfa0XGk1iwjJ9ZhSWOYJS+R8vXK+HMmpMS0SKtQFw8PbnuhHSrnQD4G3b99hzEKlEKwnBLCuYR1UFoKLsBYKY8BVmq0kORcFIqAIDGe8quHVBt8YcMLi8ioaq9bc9kGrbuDG5LGvnTSonK7eCKPc0vxWaAW1fGhGOlkdmaweis5VTPUKW4mO4Hq98MMPf+HLL7/B6HRWm2D9bd1DmZWRtZ67jtUbyqqqutZ623/ciqEyrGorOrXqZ6uit6YHeGnrchyBtJBxwFirUJBagSA7J7QAsk4E6O6nQTXuRjFu+nfWXab8W65ua43cPLkF3n35B968/y3YHqdxodY5sgpn+9gp1Ax9DLJ/SRmUlov3lFz463d/5f7ugbv7A7kW6jLjXMHj2T8ciE4ElDkX0ZSkRDf2/O37H3m4f2Cz3bFOo12/4atvvsEbx/PTZ3746Uec83R9kAneWmIcWFWswspcVKcmk19wXsW/FR8DQdEcmXqlYX14eLiRVYzub0ppvFw+c7c94IsqXJsu0oJ2vg3DfJkotTB2HbUhVUvtrr13TClhK3R94Icf/sZ//+d/oQueGHthYijjQGio0l3c9hSA8yvvuWkKl04n1uJ85O2bN8KIEq4ey5Kwg1M/n7WoaMejS2jfvHT6OuI1DKflQi2NN/f3hBjpvIxm3gmLaV3c+ehl7wB8+c3XTKcL83Vi7CXjOwYvqlxd2gXnpYCaxvX8QquN0/kiXlUhyhSDLo70hq0pkUpht91SssQcYgQOW+Ev5zy5ZFJKkh9treRTN0OZJ/76pz/zdDpS04Lxkf0wsr/bM/hITVK4YnBc1Bog9r0kzVUVODqvh02FmrFNwkVKEfPBPvZ0vSzlbDNcpiudj9jgKCVL8FNrXKYzl+vCZjMwdELZlVE3Q5twnOlj4rD1DP2ZEGQycGYBk3Et4IyXDGuEZGBrAbd6H0kTYa0XQkKT3ZlBsH5nI5frC9fTE7kulGUmpRlspIsDZb4wnS9cr8LZLwV2+44uDhhv6YMY+fVRulBjR6xNWHfGWkdve/08jFK2G9F4UPNLTMWp51kzbfWLk4MfwbvXZbJ1EkBUm6jljR6mtoltjcRq+ltrXNeGyqAQy5rxLEpvFEaTB1tgq6qmlHXtplf7aiNMtGoquYpdiKtRFtW6Wzwc3mn9N1T8jTlVb5zT10lOhxzqekYbqxbo7jYdyU5F3kOr4szbWrlNQPLZ2hsMBiu8haSe6hVc3WLl65xOlMJCo2rYkbE6rQpBwCgbaj30DEYzw7MULVbzS8NcB96/+xX3b78BE/F4oQYbjwEC4vBL46ZnGsYNOSdSq6LMsQZnDH/67q/87bsfeP/+C5ZFHAvwEoi2tIXgA4nCNC00Pbhrq/jS+Ot3f2O6zvz+91uErHNhWSSdMCMC0MeHR0rO5NTwDqbrRCmVu/2ep8sTT59/oLTCL3/xC8bYk10j58zpdJIJxaw09ar73sTQ9bojqhQqpRZ91hrn05lNv8GvJmnCrpBqXdQyAgPLIoeat5bBjkrVtDg8uYne4ccff+Cf/umfaLWyvTtImhry4Zgm4pMVJ2oKpMpwfUNp5ebQO8JYy+n4zMvxxDfffHPrwLou3rqkZuDzp4/cv3nDfJWs54N1EpRTZS+BNXjU9rxVjqcTIXji4wPTNDHNE+/fvaO3HbUWlmTYjCPTPAs/OHhqidRa6MdRokuNxVY5rJouZ8uyCAc5GN69f6cW3ZWkUam5iS23tYZpmVeXAMGYs6XrO2Inwq7pelEuNXz+9Jldv8F1AsPpKcG43WK943Q6Ya0Vk7EfJg6HPdvNwOfPLxwOW1G8topLoqtAYa8VsluK5GpXK+vclQJZW2W7EbviOS0sOnlEE+TBCZ55Fovz1hwlV2zUvtAuGGaCv7IJE5ux4P0VcRDJ4gHUCsEGvO2wNKqXZa0noKvH1wPJGsXbm+4gAKuTbklM1xfmdOV6bXh/oh96+tix5Bd5CLYbdneSeobp8d7gfAEKwQrrTthnEecdtSzg5N73qnZ1RvL5xNZCpt1GxXhDM/nWUWPAFBCH11e7Dsn2kINPwojk8FyFcBTJa7BeDuVX5s7r8Sx8icYtkGfFWuB2GBuzisbEh0wiG+Rzp1bKuuAnktaTnooPgd1+K4ywFQprXotRuR3gTSFQrMJi67MIQrnVj80Y0U2hsHDO9WdvxmGakDdWyqwRIYd8fW3ChKtaDJDFf9W0Qafl4waTrshDW511hawgUKSyvJq6CdQVRnMk1eO8f/d3vPn6V+QE13lm1zuu1yvGe3bbPSj80qw8G25VPXtPnheuReirMtBZfv3bXxFCZMkzrcEu9tCcuBZYQ1qSxCrHKA2T9VjreLh/4K9//TPffPstm3EArAhaZ4MLgbHv6PvIsljm6SoNXQhczkeu1xOX84Wai+S0Y2nOYUrm+ekT58sV1I681srj4xuMsTgXeDmf2Q6jpkxmMZU0EEPPV198SSoZj3blwXg5NJwcwCUpiwJLFwJDjKyOLdY4TueT2CaPI9ZY3r39gs1u5LA76FnWboZkrYj5XQhSpVupN6vpG+8bQ8oJU2BUUd64Hdlsxtcxfe2WdDQKoWO+TrRa2e92OLsKTQxGsfPcMtvdjrQkvv/+R/qxJ6XMy/mJ3//yN8LptxIQ7pzFB8cmbJANP8RdVJtyR62N+TxxmSdhMinV8HQ+0wy8XC4yzhlhjyzTzNI0p8PAMs/8+OFH4SZrrKAcfp6cZubrleu80Awcdju+nydiCIxhg5sbSxV67f6wZ7ff8nB3J1L6ljlfxaDPGNgeNhoVKTijOKbW28K06ti9CqCkdXJYCsd5lolSBZOuc3SxW9s5Geeb2BJ0YSCXorbQltImvF3owsIwTnQh470aACI278ZVXHMYKx15s4j1AJVWhBHWKGCFeaaorXSOrdFqkqTCVjm+fGJOZ84XiRIFcDZiXGQb7mGUXYm1YpdurVG2kTzQzlR1+XXClrIOe/OuWf2LnAAZRoRJAqmgnk3lf4Bg5HrKvSr+UcLZr/UmD8BY4f2LQl2pq9ZgmiPr5yKQjT4XTdqplcVTS9XFcb0RQKwVuLWaKhRba2801loLLYtnbc4VH9Qap1RWHUNpYkIpewBuRaFW0bKszrU0qGYW0z8TWJfbDfPqz6qJdCs8lm/LaCOK7tq0AKAitlcdU61G8CYtyevesSqjTVhqVqcDtNi+/iytVqzWJPVWRwOQqUYMS0sx1Lrliy9+y/2XvwQ8KV8pS2LxDjRZ0rSimgvRnfTWU60ajjZDdIHT9STUZheIPjIOPcuciF1P1p0gTZhrxhqsa+x2O5yPLNOke8XKt998rfCRCOJijNzdH9RaHr3PhDJfaianJFooa6T59Y43777CNHh++szx+MLhcMAFy3a7IS3yd5YkdvOrlqOGQFKafvQdFisU9VpZrY38bZTSE9uqG+tpWQjWiz4hBnCGDz/8yLzM3N3dcT6e2N3f0Vrl7v6Bh4c3Mp4q64Iq8NAqeFtv6Ka45NoI5ayZAp2j60ahPgLv37+XsV2Lwg0z82Jf4Yxhu9uIpTS6a2jywCw58fl04rDdytdrAQh9xFjD//b//H8wxMAvv/yKWps4oDbYeg+I0M9bQ1WP+VQEv0wp8enTJ6ZlkaXUplcLEcd//af/i//zv/wX+nHgV9/+gl9++wsWKjUXqhV8erZGFkvWcJ2u4rUSIlBJufDh6Ykv378XdXfOXKcr0+VKsI44dLScb4Kh1TuqLGLAGKz4y9fScEY6l6IHiDDBzU3wRq0UA8F7UhV6sqnyHvvQyaTXdNnN688yzSi1VnQVm91ITYXaIOUX6vJMv7f4OOM44/GYFhCx8M/8eqxw37ErJCNou+iq5LAyuhhfoRTTNL42Z4Iu4H03MFoHd09YE7EmEuMW7+WwsIDR0CBnpdOX6EbZUVW1CDFG6ZVtZQnpjsXctLoC4+gC2hhdOlspeHKuGlAhaiDKNbQGX29eivJ6xJCINTva/hw+0YNvhZPkCnCDCa21UBu5iu1z1dhTg3T9QiSAWgpZn51aCzVnfT+Cm2u5YjXdy1U0GYpBvu77WIvNyrQz0Dopjk0mE4lB1eS1qp1/bmrYJzy1lYTUlLYtsJJMFLRCqVb1F3IfrEW3rsUE2UmBBFjZZuQ+blJA5GxQL6Wq8JLcUbqHEB+r0hqlOGDL+69+x/7uC87nK/M0sSyZYRgxITJo03i6XLDWiVW+aVTnb1NcrQ3nA4ftnZBygmd/OGCc0cwIy7wEJON+5unTZ0IMbMYNwXlccNB6pmlmmTL92PN3v/vtTZGdi7A+hYghKE/shAzkW9A1iOewPyButBGQomZ94PnzJ2rNbHcHnLV0oWPJRVMTXynMXdexpEQpCWMkXbGCOhgYhhDx4gWiSx1pXJjnhWg9PvibqApE6DaMooge93tRZs9yA8ZOKazKsKhVKZvTjPWONat2Hf8kGU1sEqzcETQgJ1lUSbcHXVwD7LkBU84Klhk0exdEHezWdq0WxnGgG0dazqRloRsG7vZ7/vy3v/KH3/2OX3z7LYfDHZ+fPjPPM28e3+B18y83QZXcWu9u5oa1iAzfOcu4PXCdrtTWmOaZP/35Lzhj2W+3bHd7cspEJ0HmrTQuLbHMCzHKIr+Wos6q4pPjnOH94yO5FCiZy/EksAGVl+uZh16S7MhiHpA0sEc44WINrn0WNJizdNzWWahF8xoM1EJBEryaFhxyolY14PvZSlQlTq9Lc2UcqcECxhrmWvnTv/wLD1vPw1sZyX2aqC5TslBkZRupSYUUUkOiIjEU226W202DcqxxSu2U08KUFT4QfUGq8hpWZ4bN8KiqYIv1FmOrsoJeO05r18hT2SfhjHohKTvKmleLbdqtSKGMOpSOLEeWZBIYNd1zulw2DeKqdJeVBWtOs9XXsargjT5sN5+gNSf6Z1AbKKvGSOYCyHtabdnlgarMZblBO/Wa1co+KH6vzZnxr/s7RDQm/YIWi7pOAUL6FMxfpyljaW21z9awn9aoCh8XJDu8aute7fr6LVUFjGXNSdHlsqikV3hMafTN0YySTtv/WCDkfJKkNyEsyN+tRuNO9f6q1dKsfE4r06k1gWFqM3h74O3739KND5yuC9N0YZoXnJUzLPpIUqv/55cjfddjNxtsq+Ql35pVvWW5JkEy+uAYh47j8cRlObPdHeS9Gs/x5ci//vnPeGPZ9QPb/Z73X3xBBY7Pn/n4+YlvvvmGYbuRBi4XssJJyzxzuVzYbHbiURe8LOWbfF2pjceHNxhr+Lc//Zn7uwPjZsPdwz2TZlGU0rgUIRpF04tUQfVMzlg6jUWg6qdfm4R9IWw0L4rG19uylIzzMnIUhN1UTaWlyo8//sTdYccXX3/Ny8sLy7zg1DOpllcVaDPrUrphxl7OCIv4C1lLzvKwBt/h3GtV88bydHzh6ekzXd/xcP+AtaLSLLnQBX+DTFwznKdJltz9oA6y+tE5z3Q+cn9/T3OWPDVKSsQY+MNvfss8T9QiLqbT+Ygzjt1mQ0lJqWOO5bqANUzHE+frhc24YbPd8PBwz3KdeTmf+PT5M89PT+x2O/6X/+V/ZVGVb0ozqSwEIxqGy3zFGEfse9Kibo1dvC3Dqj7wxVpenp4Zhp7jNBGjZ56n2y7CGTnASmlcLlexLalV8jXUjqEok8QbJ2Kikm9jedP/w6CimUYrM8eLiPpuyB8CDzorS8B1khAKceN0utKPPc9PL/z402eOTyeWyeHjFuctDotfHC4afAYTNJCqNWFRUSnGSeJgbVSv9hUr7p+tBjklrA3q35P0cEiyAwkDrcyUlmVS0FQ1u8qkDEhwmlE/poKzYl9vnLtZfDsA93Mtw2sec1OYR+Cuhq3iEXY7JBTtcFog1EtOeli1CDHa6/88tKciRVIpZdI0aFUx2vQYzZmWgmZ1KSGHnrXr3m0VLRaqijFzEctvasLaqIU0yJ5RKFQ6DRlaNZSab21B+9k/AvnIg1uR6Ud6coE7jDKA0PdeqGo1A6a6W4FoTcK+bvY87XWXJ+6ua2UWH6iWDbdLY4w2aMpU+hk7Ci1kzXis8bdiK38sTYWxKJTmKMUS4gMPb3+D7+9YmsPYzND1vHnzjqHrsUYcb1OpfPrwEecd3aanGMnwXtJCHztMM0xZPNrSnKRhjTKN1mo4ny+02nh4cy+WQc6yHXv5cxrPz0845+k3I3lObMaNFB9NtixVipSNhmm68r//7/8Hf/+Hv+erb77m6cMH+q5ns9tibMO2yqfPnxjHnof7O3GBTgtd7Dkc7iSLwyhKEAe89+ScybUSvRcKe84crxdpcvd7Wmt8Pr5wvVx5vL/DCyNUKrN44Ivz51ISzx8/MAwD++2O43Ll6fmJ+7sD1EoXOqx1dF0nXkBqRpWWRCrpNhrJDS9d+HQ9M4wbmRSbkchAy03QlVrlww8f+Pj8kaHvePPwKBTTXjDxpRRSWsTH6HRiul558+6tdOPWKX5tuc4TzVo+v7wQnKeLHoq4ll7nhW6I7Lcb8XtvEGLEuUBwkcvxTDdI4MdmHMW4LkaC95xPZ5aUhJvvPdFLHnU/SDDI6XoShXht9DEylcw8T3IjWSkIJYvt9qefPpFq4u3btzfqYMuJcegxxrDbbIjecj17RmWf5VLJSYJsQgzi4JgysUH2Cp/kSk1FFZTSofq1YOsBVmnkVvBNFnmFQr4sdHojCw3RSDeRxbpjVWpiDLt+5M9/+Z7//q//yn5/j4sbPr0ccf5KayOHuwFTZ7VUzgxWePveFPxq34y44Lq+h7oe7RWD/EwxNC1gMq02Up1uB3JrlrpcAS+qayPdqNBirR60mWohrjClMcpC8RizUoTrzWbitiAzK/wvB5Ix8rlZlaqv99r667YYlZehhWotFlIU67p5tq84eV47elRjsjKVahNdQ4NWhPRgNbvIWQ+u0XLV7vxnC2Hk64X6GW6HalPfpKo054pOxk08m8qKINQiXf26/2OlvDYtEGiBaDQk9wTdD8i9ZjG6Vm5UhZXWXYESVVbUwhhadQKrAbdkuaZEFvkiLc6vC3G49UuaNWKUY6BQGKsSvWJbVXgpUmvAhTvu3/wK3++xcSQ62TWV0ghdj/GeaV4I1dH5yHW50i6V+/sHyXLHELqBKS2yeHaRKSWaqcxLwU2ii1ouZ9I88+FyYdiM9L3h8eGBoetYloxtjcv5Qhg6+r4n7WVCCDHKgjwXjJVoXYzsL/7jP/6juEgY0UmlKiFC1kihfXp6IqUt93d3WAc//vATw3bLnRHISFyoZQfq1s8b0UPlnHh+OfL//T//P+Ql83//n/9nDvf3fP74kZfjkbdv30p8KdboclE6F+cdronDa9cPzDlhGvzHf/yfCFFUzDFEcJZcMyHIYZmzdPYSU7maczU1ZYOuG25LLHlWpLto6CLQWHb7HZfrmS/ev6cfB9KsORK1Mqckxn8IjfPt+3fEEATbRT3mW+Kw2zG3wv/7//h/YZrhqy/e8+UXXxC9w/VRFrGl0UJju93SDYPASxYus1jrhi6SihQAMemTG9roAxOcox9G3r17x2WeuV7OfPjwkfdffAlNdhHzZea6TBjtyEvOlCzXBQt5UXsNa2k5s6Ry44I7Y/A+st0Fui7QlHrcTAVlklwvZ8nv3h+wVbonivRUwWoUaFtV23LFV/feVrR7BfbbHS8cmS4T/aa/4Xrr51QWtbb2igkHx3WapUeOljItVBP4fCykfGLJkbIHu/f4PlNqwxsozVJJxJ9RXNNSqK4K59yIKFN+qjBaBLfWUZSg+Pa6YBUufTWiF7FqvUJrWOcIqrtpTd2LG8p40dxk5f5bPZikM+dmiV9VVW5NpTUrLJz1dZmKrTK9NsWpzO0A06VyE6jEaFERgz45sGVyERSuNW5uyLU23UQIkcMZz2qvPucLvnZk4YjegCnTGqaqdYZZtygCd63vCdAYTrFQNxh5RvG8yvMUnrmdyDLpZJ1wqkI9VRYfN92AYGBZoCItuGUtCuuzXlVJXQvtFY+S6aLoVKCz1s+ADZmmftZs3oq2zmKytihYhIou71ns10uxFEaG4Q2xuye3gToXfKgsSRwCrpdZooErTOcrdB3d0PF4/0CtjWBF2Z9bYimF6/nCy+nEOI7EKs3j4W4jmphiGHdbvv/ue4btqo3SfIgu4ryn85F+uyUnAT+7rhM7FoVhSsl0vQSh5WXCOctX33wt+5/WGHYbulpZpgUTxLLm7u6O1oROH1zP4e6OUjK1JJ5fLhz2B2LwnKcZoxkXTs+HJSU2fcdXX37Fv/3pT/zX//bf+L/9x//IZrvl/RdfSK5Ka0CRkfZ8PZOmKw8Pb3DOsdttBXNfZlqpLLVigsdamPNC0Y4Gq4svMkuacd7rg6TOi0YsAISRIV2KMdqFaOfSjGDTj2/fst1v6fuBMmd8jILdG3FFNSYRQmAcJTNWcMOOzm9Z1rCNUliWiVqE9TP925VuHPniiy8UY7a6VA/0h5G8JF7SCwdj2G23OGfJOYtQ6zrdGEub7YaldORcuJxO5NUqm0bXD3z5/j0fPn4kpSwmhsETWi+eLWkRtkiD63Viu9uz3e6ppWoHJ099MQZKYZoSl/ORfuwwydz2PdWsfPdMboV+6BVTFkx9KhIaU42l7zqlDvKKia+dLypO0iix7bDjYq+kksTCoyhTxrlbQEsFzscL3kkaVuw/s0wLOUsnv2THp+dCWhZ82NH3FT+phYeVQuNB7MeN5DXnNFNrksPURAzgWpUJgUZbPYuaBElJt7jKh2X28LqUNethbQqmQnUa2QlYHFkPMqs0TlHm2ptHmbEGp11LI1NNlqJgRC1dm9rP6zG6Wqq4qhCKAbOaFjWoyP0oi2OB6zBGsgdMohkPzqhvV7kxenI1+pr11VdhCpbSqHW5HZCmiU17a7DUBMZh22sBFnvvFQ6y+uxVXQwXsEG6/FW8WHWB3gwNmSwEvpTprVS5Hq1IguHP0yhXDURpVemp69SkYrNiVDSoe491t6A1prTXwiZDmwZZGcBJUyCsrp/tWSzQCka+SJl3ospOWFLriJs3PD1X+rzwfi97mVKqinmF4nxZFmJtYqVRC/OS6Lqey3XieD2L47LvKHkilYLDME0TIXhCFxWudzQL227D17/4hm7oGEMUa3Gl0ZZUWeyC81HvVUOInZAJWqU1o8FYDVMSl9OFbhiwtioTTNAQayF29iY0nKaJoe9xTnJzrBEHWpxnM25orbKkInY6CsiqCTz7zY5Pz5/57W9+y1dff83Hjx+wwOPjo+jMSsHnJAo9a2RZ1PWjWPjWLJt0Zxm7jhYN12XmerlCE9bMqr4vWXx2Qoz4Jvbb3onqb13qOe+EXrvym2U216WtZlUrBrrdbEk5kw2U85VmYOg7WdpowlrL6p2/LgK9x1YJxMEagtvw9//u3/PD999TS+Zw2Msh5VV56sPNgdYFx2azESX5MrHdbbHZgBdRW71kwbGtlShCMjFEuq7j6elJTLucox835B9/YpquHO7usBic9yKmWRacczc2F02wQ4Bpnm8PtjWGl/OF4APee9KSsQ1SrmSqiB0N5JTpu0G/Rr73Uirn45F5uhJ9JL55hBUEsGuHrojzyuVvMNfMEAIhOJZF4mKLmn7RKiUVQpAez1tHTom/fvcdqQBFlt7iry//vs4LLy8z28HRdQW44k0iRA92NUlc8GbQw96JmVu9yoLWVqioEZ3V1wvrzmL9ZYzkkWO92h2LmR9GDpVSilrYCxFhtb0oFFwVfNhYYR8Fm6H62wHfkLNNMST93iKklAw0UdEa1kQ0bhBTMAJlWIVKbqLSptCK6gAakhJotJM0ugwW6ZwRV+YiD31tVZ8fmQZWLUQRmaYWfkuliN/RumCqSbp65O+tsFa7WWyjOwzZQdJk8kpNvg/OUpqXTBVWBpgskVeDwdKyLq2LTjRGp95XONmgSXMrfVeN69bCKDAbNxHhCuG9TiPtVpTNmh1RVphS6cgyvJGI5Nax3X1BPz7w449/5TR95v2X7wnB3yalGCLDKFOEaVIkaEjyJEVijufpdn3RCOVhO1JSIcaIkLcF4jLVkFrl8e6B1ipTXjTaF+ZplonIWUbvyQk+ffrI4+MbYhdYlisGf0Nbcil0w8D5dGSZ9UxywtQU3ytDq3If3N/fsywLp+ORZZp4+9VXGkRW6LpIKQInDoN879XjarpKYuP1fOF6ufLrX/6S+/2BTBNYqok+xIfYkXPG0xjGXimA8sH54JmXxPVy5vn5mdP5RE6Z3/7+9+KnjihFS8nkIqymEILYPyQRmK2Mjoo4jMr4X3UnITBMW6fdZnDWMWu2RQiB8+VIWhLx3Tt8EKvv0sTmGu/ptNqdL2cu5zNv3ryh1kpaFu73Bx72B47XC6UULtOF7bjBecGhxcNE0vUOuz3P84V9EL1FdoKF2wY+Rk7HowpgLJfjmafnJ0l3Gkfa5SJMp1L48t0XXOZJHmBlgtSab3i+93LIL2lhzZMwznK9TLLHyZLGF73FuoFaFhbdC9jaFCJYEXNxiEXtHIKzjGPP0A2STaHFKyfxlbfWvHZsinFKAmAjZcGNrXHMadaFuChZnVh5Qm30u5Ef/vaRaU6ErheRH8JttC4I9bkII0bS9rLy3MXxdwkJ14RVIXbZTqCLlrlOibEXd9c137o2YQeteg5r1+Wl2EqUVnE100KUa2IEjsQp/bKpA21eaLYq/Vb2MlaN5AySoe1MXmsEkBD7BsXlrcFR5Htp1Gexa+EptwMTGtkq9dIahQHFDsUazapujsyaC+CgriJTcyuCZi1sxtzEd/IlVo3q1KxSvYwqa8oa2tWLOjnf2Ekr3LX+mRyUtbWfvWdpCnKFhiM1YbqsGS4VhddWhi5iy7J6M63aEtFJOL33m+y91LZbipUUoopML/nWBSgU2JSCrK9dFuFZ73qvP0tmr6zFSV6NTAe1bdhu3tINbzC2549//HtKSoTQiQUQkJYsdPvaQI3upIGV56YgYsTQ92KjXjMez91hT6nidRe9ZtI3oTKvNODiGg1xsJ7OVzEp7QfVyzRagZfnJ77//jtO5wu//PZbrFPY3K1Qp6X8/7j6sydJsuzME/vdTRcz8zUiI7P2KjTQy8j0NGREyP//nSIUPpAzJKe7gepCAZVbLO5uZqp6Nz58Ry1ymCIQAJkRvpip3XvOt+ZVibPjrG26tRs0qARtx5YX5sOBaZopeaUejrheuLxe2PLG/d2DDXCea1mJPlGcOLZ9E3z3/Mx1Wzi/nfnp5RPReb55957mHOfLWeqmGDWViOjSw7nLTAEdRM7z7ukdw6iOhbb30FZY1oVSCncPD4oHqJUYBxmIevvFQ16NVLSV14rZe1PQ11YKgylhxkER2/d3j9pIQsC1zmaS0TREvAsk79lqpeS8j36A3ujL50+8f/cNd/MBnGPNCt/rXbhvzpnoA8OoCG11DHTKTZYoNyoODscjeVm5vl1YLlf+7a//qkwpNCnmnM2sUm9ywLzlG1HdcJKf5mrKna4I466peUxRD1jrFmgnvbtlBNzgosu6KAAsBoXtmaO25EIIkTROOmaaJYfiyMtGjY3jUfJlWfD3Cc1TcRSTUUbv2S5q5/I+KLs+Jsl/q4IZf/zpk+DD2oneyz3rFGddeyV4x+EhkWJmL6ZxPur3rvJLONf139j7nROHQ5KMua87RP5/UhPhjAC3yd35RtgDzFqR+it4enWSElrSnfc7yr9zM9XIbjnIhehUmv0+N8lnE2chJZDXDlEzIVhfdFPiqhrOEKvX+QUhaxO32GlNuX2/3n9BCrsqWEakgvEKpirrIoCVE/X1cN+3ElpnD9LXMKb7vduhXH23Q9Sr2rb2GzQmqLlTumAkfUwl0doNl3uPNei917TuzMPgkKmu3C7jZhsN+4XSO273TtlEvs+E7Taw7H4GhydQTQnmuhoV9XPFXeAFLih23HucwW2tJ2qfCHFmGp4I4x2dxOW6cneauX94svdRZ0lK+m47PN1rU52AVxJqGBTxfX678Pz8zOlwElyJzsZau1JbW6XWqnpeAi/nLzw+PDCmgdaurMuVeHr4Gm/iHOu2suaNx6f3xBhY14X5OJniUJf5HrWfhgFHZV0zPogf2bbV8pn2c6Dw+vLCMCTmw4G8FU73d+x+lBijWutqvQ2U2OsQcPQQeHx44uePH/l//N/+71yXC//uT3/H7//0J/1dPZn+hjnvZdtbzoTgGceBp+dnvYEh8PLyhVYa0IwAg/PrmVIKD6c7OUpbB1dvnbr7kx1C0rTkAnVdGeNEA9ZyxVdHHAaCgzAkWf+bwVQGAxdTXxCtDc9V/DDizGT28Phg+K5ym86XK6VJNz6MA4ek3JLz9Up0sFxn7k53nO7vFBA4JNKovujgJf1dl4WYIkNMbA6u28IwjXzz4QM+BHIubOZ5WMvG5bqQ0qiSn3Vh2zbSoGm91XarAw1OZDV2qeqD3W4H+7Ip+RavyIZWNZVFZ2UtODOfIezWujudKUlwGFYPLgSW5co4yj2+lCJxgvNUpzbCbJHmgg4dtWi6298/j6O0wj//+V/59OUVF0VCDmO0S79Rq3iX013iODdCXHHeCEv0mvYm13Xv0Q5uhaQJVoDmMwEpO9hhGC/DlhQwRYofwLlRX9vLzd2cIzQ5uRvGD/hmE9wOt+lW8N0ZBu9uLvJ9u1GKhrdDWLh42OGu3qmGi/tm7Q7NqAbkYu67ka95qrd012akv3M76M6+edhdoHnADvS+h/iZzNXhbgeNx0QH1Vn4nv59bRpYdLlEfQabDn1nqjYd3u0mqd2tdb03ct9/Tx1mdJkYjQEwl7YTL7DDek0HUTc+YH8o98LR3vdQPrsc9s2OvlMVhO7Ypb/Vfn/Xld5rnD1th5TsjNLvAPRErZ7cJ4bxHXG8YxhmQhA0fclXPn1cOR2O+BAtWE8c6pYz+IQ3DijGwPmqifrp+ZEpjby1V8XrTLMJKaAGwV5bzhrAamUNmW0rfP/D9zrYXWAcZ+Iw4F1ky5sNRo5hHHh6fNLz7y2vq3ma77RaBLPS8DGCg+W64lNkDF8VYb2jVIQgKPB8veKAeeamUi1FicTBe4qvDCHoPYxaAILzUlIZ3/H4cM/v/vR7/tv/8V/53/4//1/uH+/5/R/+SPSGleIc0TXWWrlcz0pJHUTS5tykMKKyrSvj4WAeicA4zzw9PynR0Ik89VFf05m7ttRyS31trZBSgj4oXx7wVeqRtOewgLBNWzdrLdYNrayWfQopQOqN0pqwaZt+nE0wYxoom3KLgpWeL8vCmlcy0q6Do9WioOkQ2DYZ3qL31CJDy5gGwRoxMDNSa+N0Oin/aVlpeROHUox8ypltWTVl9E5sjWVbycum3yMELtuCR7Hk1YwxXZs5S175+PPPvHv3TlLH2uS2DmaOcxjmiylqHKH722EutZrbz0NCDPjNqVyoCc5RSYsFyxEYXGel0WvG7X/n63CL6/Dxywt/+/6jUniryo/onhAjvjcu25U5OX736xPTWInBGXiyk5xGm3XLJPUiTxvNYisK9CgzlvkEAHwTvBT2n8WED811vOv4fQpHA7kqSJVnEJs6oXcYp3ePD5WOxYJ0b4d+p6GJ0vdJv5ttEeIKzG8C+G5bt3O4vsMf7bbR0B2NZBe4Dme1+hVTVGki1F2+K/ycHfA2VbtGtXdxl3fup0PDmdqu0fY5j2abmf083tFRWY94iK8XBvY67hCWeAXLUjLyX/CXShC5QXzcBCmdYHdmux1at46G28XDjSSFjvv/UzU1m/6a6Wg94sokznC3P6/WPQ1/gpMq2biU0hK1zAzTE4SZEJR7BBIMjDGQhoM4HvtdnUmZMXw+BAVX5qxAxNPdnc6M5G8cW1rVmx6ix1VPL40hBFbdVFwvCx8/feR0uie4wOfPn5lM5rrlTDRFX2uZFBJhjvvHU++Ba7YdOnwVoR1qpWQZ6w7jBB3ezlfSODAGj4+R3tSnfZhmqSi9hr8QAst1Y1muTOM79lc+t0zM2lZ9Gm4BjSF4gk/8/d/9O54fnvjbv/2r+OnWiDqcDPvzji8vn+XCu7/XgetkdCq1QOlMszT70QdCSjg64zwxtImSN2o2RYeeGnCBGBQ1LqzNsSzC0ZN9wMbjSTrtWq0cRgenKRT1ybeDwdMl4e4ikUoTOR6HQaFiMeF6I6XE89PIy/mNbV1lhfeO83Kltcbd4aj4365Vv3fwKZKXleY8fUiElBjHkd6bzjMf6K5QgVILa84yppSO71aRWQvZh5ujN9pk17JhsCai3zHo3Bo1Z0QYCgaopbJt2XLdtepjE0fVJ+b2Yd3xEucMk22mfy/KRRpiYkgRDgfoOwT1NfjMdxnwfAj46pVl1ET/9apI99pEpV6vCzFGNjNseTpbzYoEcYHDEPnVb0ee3wVCeNUh7Lwdjnsjndm0nEeE9R6rYJOpTjhqkwzYd2eRENKPO2QU69Hjaqe7oh4UZ6kS7EOPDsYKIqlbo/uO95Va1J0QgnKMxIc6mit4F9R5sE/UBgkZYqQPepfhSWVGDXqC3pRNxU5Qq+yl+WrSX4OfDEO/BRd6k3JWUw7ZZbVHewvzFxbv+Ip9K4FVRWHY5qOjXLxG16IBLpgZUje930M2u703bk9Zxi4VPaT7Rq4NwjaZim0PNsz13T1tHMrN9MZXOGu/oKyv4ga78DUBVuZDU5t1fTHX+k2663vVQNlkuGt1YKudViNDumM6PuPcxHK5cpqgJy/3eGnMhyPz6XTD31Vi5alFQZaKUs9c1w0fpFQMKbJmHebPj08i77H3rnRJop3EL4/3DyzXK99//z/Y1pU//OEPROvDyZd6U0b2W6R7oO37mz3bIPJbzyS3s9g1JVOnHA3pUYtkeTtTxyITXtTZOI0Hcl7s7FjIOXCL8ffOzKOdIcgDcj1fuXu443g4CEEIgVaU8Pz+3Ts+fPONfucO8dOXT2yXlfvHO66XK7lU3n/3nWoTs148gBAiy7YwpEG48aTC8lL0wjrnCEnpnN19JQj7DXd2t8ujlFXreRz0UNeqFFJvBSO9kNcV7xN+SnQa21oY3cBWK9Ggit471+sZ7z2TqWKCg+o8W6nQClOaTCKng8LVRhoHHh4eKbmwLqsySkIgNvBxIOBpW2U+TJKnOWfGnMrHLy+8vrxyvl5Zr4uhwcJ7e99ozZHLSquVFAIZnbnDkBjcQK3V8pUkBfRdIWg0gzuaIIn3797h+Jp55Ty6ILouCBGjuiBKFw/igyCZ1y8vXK5XUkq8f//esOcGUYbD6vR9YfcYBDaT8oYYhFsb5q9nuYvviIFcVHhUGxA986DiGUdlGiL3p0DwK1BxVdCgc9HMaP6WwaOAu0Jnh52+hsvtJUns+UndCZLq2HbRzGugyOfQO91H/V61EJxaweSTU4GTDF7e+IGvByn25+jaZnaSr1qla7OP9J5h5LwXTu/STVqqg1oHvKp1JU0srkAzg1mvVINfXRf3Fbpg02bSKNVoKna62jbU2eHbTCCSwWo9vQxjts2xk+pd8FXzTc1zRmjvh7H5yG/biifQb5uEDq39+RIUpuzVXQZ7g+yMR9QDYlBW/+p7olvardOr4pr8GSonko9EW5YgLUe0N8M2FtD24Pdwv4C8zY7SB/LqcX7Cp2e8P+BcYBj0nk3DRK2d43xkmCTYyLV9DW60DSiGwPntjbfLhXfffKPQzWrwJGp1vHUzbIVxGtnWTByiiqJKowVt0qf7e44nOBxmam3c39+Rc6FshTiMghudQY8mcPRR4Z7dSRXm0USf88ZlkZ8oRFWNrusb0zgyDaoQbq1y3RZmJlxyTIeJVNTw9/Zm8P/DI3dP97ogWlcyLDAPA+u6WtKD4Mx1XfAG0we7MFT4FYitVMZ5pONJ08S7uzuC97y9XXh5Udzy48O9oKVhJA5SL4Xu5Ii1FU7bqQxIHsFMrTVy3khpMMxRF8UYJ2qt1KwM8xQixamEpZqawvlAbRvLtTIcZuq6cT5fhKn1xunh3mSNtor3TnRxn1noQEG9F7FGSm9MIXC6u2eaJryDdVs4HGZKaVzZCEnE6VoyNNWo7quyiP3G50+f+cu//IvyqFxknEZpnvs+5QtfjUmFP5j8r1QplCiNXPdoR8hm2DGQjV6+lrzsHQLOfXWdSiQl2KZ164ZonrWIeO6tU0rleDzigy4dvCNOg4XlCb7ZDXut6aLoTfn2AWnWQ+jU0qlG7nWP6lfbbs7rpOhJaVAPMJn7uwMpgesZ3z09jgaHSSbqu6cb6L3XOnpXaC3SfTcYqdG6x2PksSWbYpJKZQgN2oDgttm5PabCK6NHXRgiNgvNXrMdmWkmjf0KZzkvklZqIE1+JS/U1hnSTA+CQVxTC54WOF3urXuR5l2GQY+ju4gaBM2xjvDovvfGlUDzxTwKRmRbQVbpHYIuY9cULeF7pO51EyYEqV2wCnbot17Nt2MXlTM+Cx3wNwVVrxYZbpyB/U/bDXn23rRmeUr6iprGd/bYLthySybEvk4Hb5Ed3X+dlnceAm0c3i4DKYNk0K3s0k79qX2r9M1RiLTmqHUgl4kURFKHMIB3TNNAvJv11Z34zXFUMutmr02pRTlkrUpCPk382w8/0crG+w/fAoJZVe/rZEy1yJSX1zfepcR1W7hPR1otbIb5+xB5fveOsmawLcs7qzBtzjhcbioy/U9luxTm496JU9jNzP/jL3/hx7/9wD/+r/8Fb50mJW9cF23y43EkxElcZm90q3kWZaC6g/NFfTBTsr5710l0mvOUBqfjEdc727aZ+ETx7QHHEBNxmDTY9EZ8fn6nMKmYboTHsix8/PFHXIy8f/eOcZrJeWMcRchsrVji4o4+7q+AM+lnpW8KGZPzUUS3clm6eQ4c1+sFeme4u9PmMIzsUjThwUH8RVN6YfAeP5rSouvwDTHx9vZG8EmRFt5bxIhj8FIMXbeV5fMXvvvwntPpQKmVXCpHw/l6h5wrec3Ew0yMgVIyy1nWem9y0W3bGNLAw/09D09PxJh4fX1lvS6SAdsDVWpVu1VtlK4IwlI7ronM96hjt7lOr1WwgElb9zTe3qua4sJX4l7wn5ck1Q69nVBuVYGEwzTw/OG9HTA6IIJJPiV37LeHsdiZEQzmURRHt6hpB7FZNIaULc/fvOPDxwt/++lHXTIhmdChEVxnGKWsKV29yRAorhLQh0lHpDO+QQ5xHRgF1zzdCWZx7P3CIidvpszeLYoA09UbBu5uZ9at0EcSSRUqBVcl1fZBh1CvpIa2QAeNQm/2d+uK9yN42JaMD4kSIVZnmndBUQKJoh3aluJvCr6C+ituFTu968B0ImKbwSstQ6OaI1tTeO5yU7uiLKqb0dI3fHPiX5z21z3FtpnCScoikDS4m+PeYkfc158Xp0PcmVGhW3d2N3gBHKX1GwqwqxH1tFW7nDSQyARmzyi6IPY470olEASP2c/lurRP+8ZgIJDktZZvLimtXNuhBzYcrQ0sq6MxaXMYJnEtIeKSxye5iKPXpRGj8r/WvIlXjFHBfUHfOw2J3hvv3z8yxJHonAkHYE8eVm6cwu5ckEhgCFHx+Dc+xe/FcKRRfKUhwHo9m4qeUtLAWW2DqdXx08vP+KCAPW+w4Lpt/Nf/+n9weTvzf4n/V4IPzNOBmEZyKVDFXQbvb4pOpWT02+t5vLvTmds7W8niVS5XcopM42Tcr2OrVfyajyQSwXXWslJrNWhVz0d0dgBs28o8zzhUvnF8uOf56QlwnF9fqb0yjpMmlk2XhPPepjtILhiRKDx5Wzda324Ss50YO55OjDFRKESntbtZpEStBRfDL3oh1BRXsqKRYwrW8KUHqZpS4TQfSYOySRKRQrN4Az2LMUgyu64LKSXVYnpw5qLFO1JMbLUQq3ovhiFRc6YjtcDb6yuXZeHdu2dTQ3XWVdlP13Vl6xmPVrR1WZimiWVbqauC6aRUCnjThXv6jZQqeaOWyjROdLp8AlXEprek1q/QA3K05yw8ssNWiiKAgzOlj9yxzfMVg3bcJkFvH4jr+cw4TYRR+Lf6G6C3Kj4kKJYhG0Q0DxP//j/8A+flwpfXN9ZNCcCxd2KEGFBlaXTUoGHBUQUvdINkK7Soy6FjHzAkeFAbmscbzIW58ptrtwtmL7qq9mEWWbwrfAxWsomU7mlOguaAeKVu2GxxVS14dmHjquJRms15veDSIEy/FF0U7atUs7pgB1qnt4LrjeIEi7jehEHbwdkd7KVevRVtml5FWA51sXdTUqkOs9mWU2xw02tVSsH5atBRB9tKaqnaxGzbrU6dHN0Zh7PLn5xtCe3rxN58xRVvx7/UcArw27fbr5uF68pD6qauwzbK/YLTxSyiX36cr/DWrn7o5k3B/vvNO7HzKqbeqQDVU3ok18h17azVK/stTRqSvGL9nfOkMOCdYxhnQmjIRhVYt03Cihj09aoiYnzaY4AeBPlhog8Tf3Q6MQVa94x+4ng86XyaEm/nK4dxkIM5eFzwLG9npmnEe0fySW2LFXrsxF9clD10vb7B8fz4aJ/fSoq6eLdtI8WBP/677/BBG3ipGUdgjGqp80a811IJMeGd5PZDGnTWOM/pdCce2UHA8c//7b/z7a9/zenbI0vexBO3DZcStYj3CiEyD5P8HduF43xgGCJx2yTh2tYrw6Bf8vHhwYLUlH7YnaNsepBLa7e0SxDBRDelQofuAzF4uajtsIY9j6UxjMoGSj0QDoN9cCq9Vi69cozHm+GsN7her0zTLG2ylW1sW4YO4zTYgT5qMmvFipN02LkkfX8IiaenE9EykgiSCLpR0khpp3VALNczoR/AO4ZxvEWHaPhx4D0pRDyVTy+2RZRNk3kTnHZ/ulPP9WpTIe6Gc+6Kit6hbCu5ZBnkvA4SZ/uB9+32IaVDLpWyFcIYb9LWPV5jGkZxPN4R8AqPM1nmPvHQu0lebbsJgel0IthGmFuzk9dRu6P1QnAjTXbfW/9Brpu1qKmzYh4OOFcg7Fk93nTtHag6rNqeheogdMO+AZepFnRxM5EhgvIGt9HwzZ4lkincKnKeREFnVjWp2IYolNVMm67pkMZ7c0tb6nHrFlPuKfZ391KHVjURY4R9M+VVMbc+XY7U2pWn1XqzaDspXXzQ+ytzl7PzsN02nE5ja9gUboclgeraDZIQfKUBznkNUV2nMMEgE25eaqDWW5f5/lzolLL48f0xqLLhtX2Gb+H253f5tO/Rtor9L6Hssd4MthJfVZAybr+ELN1MJj+DgesOMWlV1WfA7T/OLpzAXOjc/mxrkVoirY1c1s75WonTQIwDwQclEgziG1KITEMipIGUksLvvKn/gmfZMofDxDDKhWysmBKv8eRcSMmqDpw2K7tLAfXIbFsmt415GAX1Nse6rKRBr9/nz584TAee372jU8ANMs713Qfjb3yE6l49aUxKOGiNUgSJpiHxj//4XzgdVZHaWuPtcuYwzaQhEVAtbKmNlCKlVVUyXC9U7wlpUFR8UImQpM2ZrWz0pvTX9XphvLtnng9EHNdlJZfGMEroovKjwYrRIKZhYAqefjwYaaNfrOyrlxfGNU0TZStsTaatOARyXtR2FRKvXz7TkelsPhwoobItm4xfaSAlYcAxBmUxEWg0tnxlmg5a/1q3VE8IPpCbAvH2i2ZXvIQYLTTQ4AY92ry+fObl5YVvPnwDznGK8jfEmBSjXWXAqbnSQieaKUX9F5qwx6i+B9cc1RXiEKGok7qUyufPn4hRqqfSKosFAuqwKMrxd+IFvNfE2Cx36bqqwjBF5bmHlL7mrZjevcPNM9BLNf9Dt8DEKlimKcCudh1yDRFkaRxoHlxVV60UNLpt3U1aqPgA7x2HJHjv8nbhcn6lNzjOB4L3uKTkXRcjoQIu8Pp25f/1//x/83K+ElMAbybE5JlGyLUwdkU8qO9Y6gq8fnYZ9PY8L10M+6Sru8wbTt5Ndnm7I5EhT9O5oit0ubRusKbT91dnObtuko6XAqop9bX1IlgK8Qs17JN5tIgLRwzBUk29CHOQT8Wkur07mi82STfozf6mx7lALeYLsIle2Ox+GeiSwP58906bU8WmaGGARX9AysjuoGZAm3upFsTp/G0C94D37RdQkG0QFtHdW//aQcHOB9zmfH2+tG7quuu6DLtJMyuSd+9/v7WdrdOvpt75cFMkAfoM+a7IdrgFINL3QcF4HiOTJapwtBYodSTnxNYjLgaGyeOiw0crxUkyiQ1jEsLhPD50E8xgr6tI7Ov1Qs6FISaKB98UceJ6oJhKUKU7+p28Mzl96zLqlkquGrxKLuStMCSIybMuG3HoxDRSt8IQA5e3C80LmVEOXAenNk7fRE6XXii5WnWCYn68dyQ3cZgPdBB6cblwvlxwHdIQGVISlJg16JW8kWIgjuNNQRq9o9jbqvM58t2vfoXzgVI2xvlg6i0HacQFz5QGei2sWRaAIaqJ1LdOLKVQqz00Tvivnhk9rLUq7dF5r5Vt2VjrFR+PcjW/vfHw+MRWM+uy0ps8BL43rstF1Z/ekVuVIW2Tm7f2bsmxlVQKRJUc6QFXTlEIgWFUbMg4jSQfLYvQMZpdvnfhnut25c9//RfatvLttx8otRJNNtqK6jb3SU666MZlyYyjTHQgBVf33eR8jdfLG4/DI3gZU5Zt4Xy5MsRNue+9KuKjJUqRWqP3Tt5Eog5JmVT75QvdJKpf8ePuZXorXVkprSoQLAZ7gHvVoYXVTdqf9V0ZMz4ElJrgiSEKQ0VbjzKE9IFpTnyEI+DHoA+1HR7n8xuue+ZJ5SOO3eValTfjla/1+dNnPr98MVKrM0+DFDIekwqv2p6CZ/KeHiPNNXwvMrXZRV0jJBfZdat+P9Q0R4Jd2nilmzZfDI6yC4B4m/b2aA2B6lIVycS5h0gAPdJbpZj6zbskYxvta/eBr/KHub1JTtO9s/SBSlX0+f4zVk9rmT0YUPHUOtUVkmfx13A7iJ1xCAbW4G1r6s3ZlqOcqdqKeJtd1Fsre7nWni4Lt8UHTIEVbMrvpjbS5mEn8y4zRlueYDz72b4urBIIfL2fac3dypO4/R52Udqr5C2+ZP/vhrQATTLl4MVFtH4TLnS/T0R66wz4o5VAKYlSZiqjBpYQOR0inkgMgzbEEAgxkuJgP+hGb0IUvHeUYkS7U0mYtuB665cecBTvKFmXeK0N5ztDGiwgNJOGgcvlwmGcGeYRuuJ/cpX6K4UBP8oKML070IuCD3OpfPz0kV/95jdEP1HqSu+OECZC1KVweTsTh8QQR3kRgtRdpWyKw6mwx7Z/eH5H6ZXXL68cTydiTILlI8R0NKd75XJZFPhpfOX5fFamXkw8Pz2Tc+XtfOH+eOJ8PfPD6wt39w/cnR6YppnL5ZXz65k+N4ZB9cHVO3EStSqsan+oQVkl3gc7ONstIM2FgM/KIp/H6dYr8fz4jlYqW1m5Xq9A143YO8vlwvV6YTqM9CZFzpgSdds4zAeuy1WrTzT5Bka2eW+ytKyLo3aZ8szkhPPkWvCt0IEPz8+M08QwTQy1sWyb2eMbl3UhRXUz7NK9YVAMdwq6oVtrtNJJY2LNmXXd+PLpM/cPDwwp8fTwSOudrRYulzPFtpyaM703lrzSUHCfsw9p3jJ53SA4hhjZSmEvGep0yRS9Y3m96OAKnpYr8XjQh6ZXcHqAnDmwva3uNpRZH7W6mkErtB8StMa6XGk4xmFQa1xKN5VP93C5XgA4nY5EF8k13yYSRUc0fA+ULZOGxDypcyIkRwqQonKRconiI3onDRM5Nsa+E+Kq2IzO4V21/gQ7XC32c3cp7zlAmo51iexQpZ0p2nBNRdK8EaFYz3KP7FEw3TgB3wFT87TmwIt4bHxdWHoJOC+PibOVbnc2926bW81QO4GgfkeS4CwavUW5G7w3DUegOhHszb6Jb/4GSUmh5PX+R5Nmur05zgvGlX5LsleDdHrvhBZoHrvUdlhI3E6z79ctGbViEFZzN9VXt21Fm0uwfKX98tkhKynQev/Fe9SDXTRy/u7vV987O5o3KEvDB/tr0fbtURcxnZtCq2GgWR/JNbJVz1YjtEi36PdcG3dRXfSdThoPdCRsiWGg9k5pzWTwDucD4xiU5Izyl1JSQ9z33//At99+S4xJvJt5jZz3bNeFnOVMXpeNcZyYpqMpGZXZ5UJkTEqPUBWvN+LYE8eB5jyH48H6QjTQpRg14NGopbFcLtRWGf0ENA3HDmrNLNvGPA6AqgiGUXlTow10Ptrl4f2tarq7indwPBxtv1ZMz//+v/3vQOfv/uHveX56R+uOjz/8TBySPEKtc7muPJykivI+UntnzasQFDtgYrTiHHZyCiWzLterfkAj87oTLh59IMxm7mhaQXvja9/A2viXv/4LUxx4fv9OklbXOBxnXTqjpY+GyBSsX9JLJre3oYFI1PPbG7V0bSNx/wAJulLQmRrYmncc4sT8699wXa63D8x6XUhBPbPXy4VzLfh3z8yjonrnecJ7T66Zvuj75JqFF4ZgPIPcwrUKDrq7O/Hlyysf316Ud4R6n3POfPr5I6WJ57gbZz6f33j5+JFlXTgd73h6/459m3B+N77J5XtdrxwPR8ZhIAdtCN6ilb3zjNMkAx3OVC/mvwjR3OydveeYEHF0qveaKrpeT9clcStVq28vjW3JzPPMPMsTkm2r3J34Keoy2srCh+cH6r/7Ay9vF9blTKsb02Hm8nallEwpjdQcrTp6dbJmeMlKBwIEj4vJqiY9t6Kgfeq/6fBN3dTc11Pc+B4dPtyGGUejmUzT9SY4QZov1AEBxWnLiwZltdZvP0Pzbcd0bBO0zQSLr3by1vSmJj8cCvar6IClm+xRF/+tRtUDXVWxFBkYJcZNGlK8bQwARZtrsIMfjHj3/TatSx0laWb1ujR2k5tk3/6G/9Ps9bhdrwC/zE+yC7cZzEcziLNSTNHUu7txEl0vtV3EX9viLE5QPiTnDMq0P2sXJISbdLl5iVekN243k2EuAzBQ6sCSTRXmImPwuOipW1FsvfeEkDgMkxkgBe8lD24AnMft3dD2Hjgjoh2Ow3Tg7k7ST8XIVFMHKongL3/9V5bLhf/yv/4jd9Gk/sE4NuSmH0IiWxq7952OhmgHOBeNvIfj6Wjepq9ydt885/OZ8/nM3d0d4zBSWyOvi34X7ziMsyZ4J7YuuZG3tzde3t6YhoHT6Siy3kQeUiYayRMEP3rveXn9zA/f/wgR7h8eeb5/ZBwT799/YIoRguebb79ljIM8cTbs1py5f3pgmiYcjlw2aeJ24tIH6+ztjvlwpLXK588f9UvPR2IIROfIXcYQcpPkzWuizlkmMlc7zNLWpxCYxif87SDr1G2DWnBBWPJx0v9uVYegN5L0er4yzhPH00GHIp0tr9Ri0Jd5AqZpYi9buZtPFCreO+6PJxE7IXCYJ3748UfedSwSoZnqCkqpuACHcVK0SPAsy8oQB54fHimmc66ls23iGsZpwuMIfuOyLuAjPkbK5SosMyReP33m9XpWFEfSNO5xN0WXbCyVbds4Hg8W2eFJIXHb+Q1XdgjyUbn9rhhpxstoSnHd+gDYUx470zwbnNNuSinXO657ruuF6DzH40FnlTPfhSX6ll5JMcpIliZKK/zut78BGn/5y585n1+Z55GWEzUbsThCC9YKtlU2GnPwCt5DxrPmOmmHZdi5mN0L0sAuDkwCu/8j8t8OUq/Dudtx5TQOQnVUb8IEM2P5lmjBOC3vcNVTvF4fmvEaO6ls5UK7M16nrbepWkGGnkRtWQ5r7yxKw75e7wa+R/N771CKyVXJ+iNl3wwM6/fOgvC8/d8Ku7O3GbF4XxVHFUusNV/Gnpq7q432etZqt1Bvv8yDktsd25IkD7BNwmA/1/vte9KDficHrhqjsquV6HT3VYzQvDZP+Vl2SKmj+HcFFe7ptzp3BrYyUuvAViS9HdKIi+JUUhoYx5mQJqYkP4UPnvEwmSBAn9dazZjapK4qXqBedwoODb2TUuD9+/e391wqR8foNISejjPff/9vtFxwMcpIi2VVoSHs9fVF+U9z5Xg44by2bvGd9eu1rCmH7qDkypYz0zQY/5DET8QA1TFMs7YDoro1qtz63els+uGHH/nx+7+R6fzHf/gH3r//hnVdqWUlHk7kbeX18sZ2WXl8/477OfD5ywtpjLx//57f/va3GhZ9YJyVI+cbzMOsHLSmZ3stG8MkwrpYU+G2bUStMInWwPVKaR0/BmIMfP7yynndmKeRLa8M8WgPtvT9W16Z5gO1VkIQJh6Pkfl45HI5k9eNOB3wKEZAeK8eyNIqkU7wCcK+w0iL3Gshec/90yPRJ3zwXN8u6EJrdC9XcHAykTmn6OlaK9mZ4qRUGFBDU1On9MPDvbBs+q0BLpeVznDDZYdxtJVcWfCvlyvBeeZxpOMoWR/ygOdqrlzXxWc8PjziumfLK87D87tn7u9OhDSwE5LVCc/09jC1UjRFuqiHIwR6qfgYhNvWpgdGOzp09KDFQBgGTYG37KGv22C1jcV7BSPWWojm1Iwh6pJ3nmmetb1V029vlfOyME0j0SdIDlecJK7Ise1957e//g3f//BvdFc5zider69mw9bPUoHNzvKtejqF7gIhNAoO55MERHahydBmk2cTGdwwEvE2T4tJ0DZmc21z2gaQKauzT/lOl1JvNFcJJBpmduoVVwVLdMS5tN7ppYHXlqHuBfFYrnb7/zvqat40tJgIoO3ksR2UtSp6v9kku9eCCpaRRFa/iV0UTnzGDThr4lCalfzISxHlp8HR+2YRFvvft7h19pffPDh9h8K+Es7gbyqi/d/pefdQTTDRxTlIGNDtIg56ew0K7vALb4WeON2pxifdSG4U1tfFSTXA9URrgesWyH1g2SKd4SatD4eR4MGHSAiJGCIhRpz13afgSTHKm1QUAd67AWHB4kOqItrlM3Js24r32npBKELyik3RRdz59W9+c5vNBPMKjchNEOmUBqiNbZUaaDocCE2gZNt5imShlJZVl0vmcllZz2c+t0bpmW8+fMfxeHcbbLzT56CaX23bKiF00pA4rwvLcuVtuTKn4QbheweOSOjw5csLf/2Xv7AuGx0Yfj0wDom//4e/57e//o1ENwaLykxYyLUo9bpWiusa6GPg/niku27BhYoeis57YpQBbCv6kEe0shzv7jkd71i2hXmccE5yse6FfYagUu1xHCi5qmR7y7y9vXC6f8AFM7RcF9JhJoZgvQqRui3ElHBxvx4s5fW6ct108N/d3eNCp+bONA9g3oSYhFV2B+OUTBLYjY3XrV5aZWAgxGQPT+Dx/sHgpcphUCTI5XxhHCtumrisKylnDjZZr+tCK5FxHMi1aGrxkXy+8vLyRS70IOiNpt7v4+lAXKShn8aROgy0bt3UTi7grpJhXEuE1DnNM7Q9ilkQRQrCBPesfde8cPAqXX7vEfVDiBB1xlE4h7aaJlGAY28JdPgU6EXBfr6r+2OIVh4THK50lu1qUewjpcHgAzVUgkv4IG12pXM8HfmGD7y8vpBi5FrO9LrRW6IUtSDjHTV7vK926BRWYHQefGPY/Qk9CIGwq+C2RPVOIRN80ia0n3M2qd209QY9NK+E3WZXhD79OshKz9xMiV3xL+KyGntDnZ3wOszNRe/sa7WW8XuxksPiQfYMVQ++mxeiU8qGXOHxdtgqQ2oP9Qv4W55Ssw3BeCoLM2zYhkPDEXBd+DoNy73CHOXWyeKCcpH2WaLa70/fNShflxy3v77YFuFl9Oz7NqLnyt9+HlM3AXuwYrfXyXfBTLux9msZURNEhqNTLGsqUXrA9YE1R3KJLNWRS2CMgTBI+TPEhDNl4B79E6IjRE9AW7yzbLSff/gb9fGRu5N8DMnEJ82UW641QhKMWGsRx2Wb2/5jtmDG3BD57e9+S69NMfzO40Pg/OWFAMTHJ+6fnjjmTdemi5Sm3uoYk3K27PnrWZyu7451uXJdr6zLosy368r494n5cGKfTl0ItLWy1dX4GxV8zePA6f6B+8uV+9OJ+XAQ2kLABzmlT8cTv/v9H/jy8iLOsFZ+9atf45O31Ghuqd7RRXGkTsN+7VVS4JaZhknPTuu4aL6M1kRcDzGxXBe2kklpEFlXpc4IMTL5A3nbRMCYxCxEqyU0Lfe6XhU/HZIZXDB5ZrUavH7Dl533HOfpdrCH7iEEQmu85ZXruuB94NQqJXecK0zDZNyFJRh6bQKDH6wQ3mK0EaxyGkZiFDwjDFIk7WE+EFwkl0q/XPjrX//KH//4B759943C/ULgcJjZSiGFwOPTE9fzmX/7/gce7+/14TB1SIoqdhmGEe/ObDnTnHDIXAtjTLSS1SRlqbIhStKqKsFq6bqI5HMNfGSKkxneGoNXmiih0UojjAODc18Pdqf12gX5AryXI7s75TiFFK19rRFdwA0KH8N5xlFrfa8V14N6ElLk/v6OlEb2EMEWzEmeV+G7MeIcHOc7hjCxlgu9Zz5/+ZlaK7HLYb7lDskb0auco97ly3Clk1EseHCNViM9dJNg72a7Tu8yQOn/s65l+o5+3KZnTcxN5GZztzm9h2Joh9WUIhzZdbmkXat4F2nmlgBwreH87nHZu8fU1teDv5GD2ga8OGzZm1U/64Km2V7ofq8atcC9X0qHENnrDE4UpNpuf94Rbr9fbt2ErsYSdE/FE2+XoN2I7H4VDDoSdNW96k/Dvmkg45q2VakNvWU01d7NGY44H7fTIf5WWrXrC8p+63j9LjLq2eXjJDNtZCBQa2JZE3kbyHi26khhUPmPD5zGQa+vRUx0O4jTaeAwH8V70rWRoiHx06cXvE/cne4VteI0S3XvVUXrTABhMPIQByrGd2FS3yLe1Bu+X5raGF3ojMPEN+/fczlfFDjpPYSBvneUNITpYyS/ZUStuTAMCh4dUuLny0VDTYMvL1/42/ff84ffzxpELOkijQMvX77gveM4nLgu8mD99te/41fffitJfZSHpTWvDCcUQfI8jTw/Pd3Q2ZILg08034m+iZd02npyFRJRaVJKZW0gMYrDjF7Pt/caJKOzmyOkxOwDw2EihsDrl8/UnHl8fAKw1TcSgFwlT/U+UUqm984wzyQfTaf8wHW54qNc1Q3H29sLj4/P7OQfMdG3FdcdWysk5Nb1PvDr735FDJHrcuF6XZgPJ5EoqyRmMSTreUjqPLEHU3Cu9NMKZ+PWueuCJ1oio3Mi3a/bxvlyIVgbm/OOISVeLxdePn8ipIHnd8+03tjWhVyPNGCeZ8ZxZFkWLtfzTamlVMmv+f65Kben9UZ00WS4/lZU0p0UT8KIPeW64mOQRqk1s8/vNaJyVPuOacIdaZwsjE193N1w1m4f0uN01MFbPW4r5FqZhiRTkXE/y7IwDoEUEj0OfGMqktrKjSCOPprb12SXhnu7jtb2KgNOD57Xlx+oVQPClqvgpgQjNp0HQUWNLly+6msG75QHZhhw2/cKU93oSt1BJL0uatNT5WlAnoa2M/reYCg7zSQtrQy22mPKIHr8hZRUG4mglKD/y1m4xn6210JzCUkjdJpXIOzeBuMwOp3cYC/L7Du4dDvMjY9wu9kOerUsMvt9la/UUUCev8WQCGLsBsDZIW2bQms7Sa5Lla4LtlVrqpNN1r6uRb53j3ojpHZy9v27VjQzle0y1/0399ajYXvbTsdgUA0WJ949tU3UFsklcb14lrURh4HaCsn6UmqvEOUVCEGKw2qa5GkYmQf5AIIp44q9z7/7/W85TEcNqPYaV7jByfrHE7xMqOits88N+GDGRAvepIGLltSaC9FHhjiS7pN65Y2niPZMhBCpLUtW75D03g0QPEveOMWRh/tHlnWzSA3Jd58eH2+/R89ZkFrwnE4nZdfZkPjl5RXnI4fDbK16lZo1ZLoOITmdHa1wXa7EkHg4Hbm0lXVbGceR2m0gDHpvvRUSBSdVnYuRMSRylvKyxCTD3TgxzhOx5IIPA9M4aj1zjm3d8MB0PMmE1grjeIDerIVMb4BuNDNq5UyfdUCFqKjsmAaSD2yrurEPx4UYB+iV6CLDMIlP8IGyLUZaJZO9SSs+DBMx6GDd8ioyL3STwVqpt9P0M7honxd3i1cITiac0irzdDAOQQ9PGgd+/atfs5aV9bpxmEZCDCTnqK3T88a2bIzjyO9+81s2k7q6Hmk0kUdVUF2aJs6fP7Jse1aMorFz8dx81ONAdw23wjxOYGTntm0M4yyiqhYLGnR2vnmLn9CaLLliJ6aItwDFaZ7kj8ibKaKgl6qk3tJYlovMgl4lSTFFfBef01ulF0c6aQOEkVoKFMWb6+JRrEFiwLVNZsFcoKt72+M4He+UGtkyl+WN2jaSNzYVSXlr0/rqvVNtZdN7GL3CIrtPEjjQoTX2NCvnDYtCYogG0AQf9NZxrdDiQGwmLXVeOUqtm+/FmVRUkOZev0PfYSDPnkLanUhaZ1lae3y5+IliZrWdoNRxS68UU4N1p9RZ8d67akich/foQLIDvhn82HrFt8CuRtq5CY3wRjCDeR6Mc0Zbh+s7R9PtOLTgQ4PKFE2yE8je/DL7z63v5fteQdq/fi+7GPRRkXMek9U6Z6DKDcLSNEwHjwIXtalEti1wXSOVqMbAMOBSo3vHEFRHHIZBeUhh4HQ44UO4eavWos09t6pk07sJamfbFryPfHj/jd7zJk5u90LlLeODDa1dGWgNEcH7ZyEEf4v5aS5Qt035Tw78OGsAds66QPbtVXBtzg28miu3sjEy6Oq03pvoAqU3Ss2kYeTbb94TUlKaBAi6NFPO5XxmOhwYnUrBrpcLvcE4Djw/v8MHz7pu+JxpXSKE9fJGa/D4+Ej3nm1ZOb++8nD/SBxGxq7K3Gp8mkcJD8554hCJMdl/1/PoouP1y5naGsfDgR8/feTD+284MBN/+v4n3n374aar760Sk2cIJ4KV8GxbYRjtQXOOIQSWslLtaV2WC8uycKRxOOogvjuc2OtQUxr57le/IqbI68sXWrNij6YO5tY7pXWmeSJGJY0G7/ApQndKZl0y26WQZi+HITvRY/WZDYr3N/nbDYrYMb/9yW+mqmnWD02jrBvrdmWeJ8WgB8fvf/cbrkZaBfSzxFZxjOSmAMPuPddtZc2Z4zzyw/eZcUhq0wuBumWil3LMJUE0KY5MUcmp1XXWZeHl5cw3HybGWY123mnSCV3FJ757K1sSZr70yrJklssXhmnQ9BEq3utCXfNGSNr6Pl/fWNeVIUbBF8FTtsy6ZqZh4NsP3yqTRjkZ+uz7SI+Wt29SSD3YnjCOBpt1eunkqmykwUWOhzv45g98/vQTb5ePOLdhCK3eieJpZjwrYH2+nmzQfnKNWAMtyJlN1UTqaweSTavNSEGFGmJ/39VGi8GmLSHtOsDUFCdNu8Vs4CXbtGS2Rre8KA+9aMi4Haw2rd84BH0GunMEu7g73jYTHdqdqt+TLoVT18nempG+3f5724uvlBF0+/rdKBEsEt0C9PZAid2rIYtdNzJ/994YDAImce23bar2jquCtW7dGGAEqpEZ7L4Lq9g1qEYXCjhnpUP2x1uH2IWlNYv0a82z1ImaB5Y1sFWHjyOlZkL0+AAxRlJIknAPgclHjrPCNb3BHdF5XIBpkBxziEmCl+AZ5wPzON4i/HcFYO9SSV63lTFawnRwRG/prxWS13PSSqFFb4GWHYZkJLKUeMG6r+l2+djv7b22PPE9TWrEHQbHRBXe3y4QQfDKpHOWVbXlzC5nH6aZXhvn5YX5dNLumPTnw7RzPPDx40deXl748M0HQho5v37hzqE05nFgXEe6c+R14XJ+ZRylmmq9sQE//vA3ti3zxz/8EWK4ff9S9w0JylYoQ+G3v/4NY5IQJIYhGC/QrRbTYsBvL75jGJIIMqWx0b0nhsSyXO22Fo64LAsxha91nX1HkfUwtdqJKUkJ4JzUMx5olWEaNXmvhZiCIBVnD6j3jAaTeK/JrbbOtmWGYcA7b12vlcuycZxn+RAw5YxhfqVVlpw5jjN4x+FwYBxH7qaR4/Go1ZFKIuJ9ZJomci68rVdoMB9FvmPEzjwNPJxOOOD1qtyV8TCTt4xrcM1F2uNamYaR/fMG0nyP0XN5e2PX7g8p2jot6K2PkeCCRZEbYh6lHz9//ogfIo9PT3JoZp1o3gdSGmX0c17qKJsa4xBJaeTL9RNrXm4YyhAHTeVd8tFeFD3gvBHtfMXUg3lcQM5wAzvsjPGMcea7D7/l5e3A6+uPOLdS+4bvjoT8Jhn1CXfLPJKzQZlS1XrQQ3dEBDlobs3iALo3Nr/RXcV1j7fyqtYqvYCLewqtoD66FCRCHgXhBNe/GrusWAnfxBF448oI9Fu+s+H8Cs0SrBEkW3XOSPcur4FGbA/OIAwXkBLLNp1m4XqucQswt80X4/xa3y8M23ssZgQUzVE9t80A+5x1Z1tyd8YxcJPEVrcXhNo988vhaf9ee4SHbAz07ixBWAaOsP8IDmjJ7pTOZoNALZ7eD1xzYCmJsgUTVii563xZmY+R0+FEcIGUNMmHkBiHiE8eH/e9WxfwPE/cn44stXJ5O0PvDMeDeh7sd2ut6hk22ZaPnuM8sy4btEx03pRhMCX93J7Op5dXQoqMhxmFw7qbabGWIoVhM1ixNplkQyB6j08DtWZKybfY84CjWpWypM1Yw6ba45xTDpzznmXN9JK5f3xg8p5tU1KB956Hxyfyllm3lcPhQClq5pymmZ8/fuR6ufDNd9+qf9pCPvfLYB4HCEGZUKEymaQ14AjDSLKhZ8uKlaH3m8x9y4Xz+RXnURW1E3IRn56fCUE5SV//cbSmuNhofdLX68I0D8QYqKioZogDPgZqtZu1e0FVzt8K7H3vlNopRcqTIQxkJ+dq9YUdSXTOs22bQjBRxHfzFllRNN2lPWDNB7zrpNRpJeOCqijx6ote11XmsK3QvCO6ePvNWi2cL2eGNHC6OzEOifFwMKhFa6J3Du9hiNah7KDmTCsNN0dChut6ucWFhOuVFCNPz8+A48KF8/VCa41l3bg7HdVQhXDG2pVT780BO48jQwj4lBgHx7auuBBJXook52yqtoEv+sQ3H77F20NdctYqHAIhDrhaSUHyv+fnd+SSab2qycoHPn3+KG6jF9XRTmrocy7oA+UFuXQzSQnq8vRccNEIYDdAD7fgt9oiIep186Hz8PCOVgprOdP7GVzRJFx1IU7D3msdFP9tH57uHK4pUjrrUcSZAMJhfoCuTCEXIlSLLDFTZyGT8kTzwbq0dyLV9E7GX2FTn6AradRd37OQhOUXg2ZulonOV/GFHWKaw40rsu8TnZRSKrQ3OIbdEW7bgHP6YLu9me+X8JFNqc1MeU3S9FtWk6gSevdf48K9Y29a8t24lS54qNvPcIvZdLo89k4OUAOgNgFV4XaDk6oddLhAwxPNILZRoQYgUmrAtcRaGtc6sG0RmJQ7lqUgq7UxTiPzNBGSSalDUK5aFN+SYmBIM7tqrEfHkAaac4wxUccBZ0R0p7OVYvXKG8PDoIZTW3n2Ui4fjC/rziqOdRneWha3TdWgcc/q0kDVXKeuK4BVJzurSnCsrZKvr0SD3RV3rz8jcZpSr0spVgHgmCbMnd2lHu2d87Jw1+4oWZ3d8+HIuq5E66RPw8RtC++dw2HmV999yzjNpBgJdyfF0NiQGaM2Gt+F1MQYb1KH0poSMp4eocur5u338l6elOAdcUzkbeO6bYLEgTgEuUYd4gGccyQvyehO+6iiM9N7UgpsB7yjuMroEjF5khMMUWsXjmtrfPCe1BtxHKi1GfnobZEVQdk6DFEF4TusUUpleTtzd//AtmZaq9Sha+II2gzCOMg93BrFyRwSjpHPL68013j58kppld/86tfsRo3jdOTHTz9zcJ2jO0rr3ZWvtLWVJToOYdamEwJ+gHmayDkrNK8LT+0dWlFN6vl84e16YRxHrvZgpRAZDonTSX0UpRbhrjjp+L3qV6f5wOHoiVHTWnMeFyMTEONA9FIz+KQ3fK91VcquLq/eGmFQwYp3js0isIMPxMkzOanLQtQwEJ1nczCkgTQk82ogFQPdBADCcxvNUnLDDaPwaMtzyVywN+F9l3oLyOtGKZ6H0wdKO3NdPlPaIgOaSxRrYCu+Ewzr9fa1e2+S6YJa5nqnBVNx2aHm+u7Aj1KC2cFHKxSna0U9Fl7bhGvg+u1i7jTLEfImC935ALgZyuw5/WV8eWseF/pt2r2BaeZVoDeKD9xo067Cm+73BFl3u+wVb9FpzQxbLtllo58veEev3eK3Rep7p5TeYpd6t8uJKm9MRBvd7cP7ddej20XxdSHqdvlCddpY9lTWW0u6qRW0THlK8WytkZuj1wiM5Bapm6c0BcsFP1IJeMssCylIvTSMxDgSvTDxwfKX8rZRKxynwDgkSg3knJlGnTd125gOR5uqsx3o4nlqzoxptIY/64Sx7mff9+KzwjBYNhJ7bpVT09ssiIbayL0Qe9DF0mEthTENUjS2LuFL8PRtZb1cCHcnevfErpTsOAXamuVa74K1rmUj5zdrdoz6XqZSGqfJkhMaPVeIjVz1P6U15iBuVM+rIKz7h0d5G4ok7sEO+CFFjsd3OK84/R2xIehCLiWThsi2rBTvmcwPlotBu131DiFEnh+fuT+dKL3RSyPusFBwzvKNEu3W2SATXEpJWSW+s25i0GOIXF8vhDtHTCOOroyTOKo/tmWSHwljsNpGC6pr+01v6Z0gyVXvt0gMrW1SWuRSNW2E0VIs3U1B1Ju3F0kDXrb89OM8kku+9VA0Wx1blSnm8e7eJjSROpfLhYfHe3xzCvayKlPnAzVnnKusxdqfumeMA+1wJIeB8/XCDz/9xHW58uHbb6m1sm6b5K/ImFW7wga3JROGKLennavRe03y3iYQYEyJraGiEnNfOyDFRN5WrtvK8TTog29O+SFGnJNhKQQz9Bhu6p2jW2lK7J7H52fazz9xerhnmAbydWEI8iJUg2KmQet7aIHmGqUV7qaZ2hvLqnpSLLlWGLU5wYO04a0CRMbxwDHd00ms189Un4mhAJ3arzKWOckVW7MK0Q5qRGtUb5N83bsidjhI0EtwNm40wVQuBHqr1CauS/CB1GOqRd3b/jytihehe8FKdhjiFHmOXQPdefEJ3GB7dp+3Jamb0khQoYxxOzaDff+GAEP/Fd/vYk/2P6VZ3tvK4ix4U7+bfJSdZkS3Pp+mbXKwZz4VJ45gV2Pp1ZE/QynAznwi+vs4k6q2eoMM2R2/Vi9Kg9IjrUVylbv5ukWcG+hhoFSoVaokVVE76OILu1cY3RATrWv4DCkyDhPJ1H6ZTMRzmI/4lEiuUFshxoF/++u/cH93xzfW1dBLp/lGnCS+GMcBonrJm3XCOxw+KhjQdcdyudB6Z5wGy2vSa7aVgsQjjV4r63XF9c7j+/cEU1XtUvRqHSshqLtimhrrRQU9j09PlGXR85sioTpTfB6J88z59ZVbkVqXx8QHbw2ZjpgSuXdyqSTbNgbzpan0S/hfCtZa6D1bLqzrxuEwazjZB7z9Uumd89uZu9MB5wdLDFDYqQsKV00hsF5XbXLAP/3lz4Irq0JahxDZghVs7RHA8zBLe9/tEd9VNV5qg7UVxmHa0Uzmw2yqhGq2b82Cra605imusV3OzPPhZtRR8qSm3BACtWjCL6XSS8clW8mGRBoGM8Do50tBqqLtutCdZ4hRBK0PFBqTYe/yRjl+/atfMaSBLW9StmyZnoTBpZg4v70xPD4ShkjPjflwYBxGWitKZc2ZrTamKJNPcp7qddMH5+gx0EpliIG7D98yjSN/+etfAblGXS3kstk6p49sCIES6y26fDpMDGlQokRv5Ks6xefjTLL6z0qlGrvrgz4Q2k4qKSnWFx8oPZOvV+bDgb0cR/go5CpsNqTAfbyjl8owaLW8hcdhLt2ig+PzywtjjEzzrBpNZ5JIVJiUohQi3VbqjleghoNtXejdSlGAw3jHnCZKudDqhc6mtZeCZwGXCChXP+6uOaeDrGLOZxK7yqY7kYaWyGRReE1hcgY7doMN98P+qwtBB3Xotr1FrDfapu7uLIlCyj3XmxnpbuO5XQHNJv89N0lbiy3aXw/q28X0lRfYvenOfS3m+frl642E7XYLdHNfe9++/judO5Zaq6/fkPRWwriA657im4kPzBVuv0C1DKA9EVaPqPEeTT6MWh1rDrTiKC2xVWguUmrk5eVMd5nD3YOwcWcy3f0SckH1vsbDjUOUb2CYdKaEgXFKTAZBpTRBr7QYOPgDwTi3FOPtfVlqYfv4xrfffUtwgeLl6cit0otiZZyZdp0zlaN3tLxRY6DWwrXIAJdz4Xy98vTwYDW8igv3vbO0omRWpzSA5pVr5ZviMqb5AGElFMlVx2lmWa5c140xROaDIKG8raQ0SPFp0G1D+P92vdJmaKVQqczzjA+CwEpT+7t66zXlx2PCt2aiCUcK8l6lYVDkeJUC7DQfqL0xH2ZcUHBpt7qCmCSsoXdCGri7l7flh48/07bMsq3893/6b1yuV373m9/z8HBP3EqxD4Dgjx0nvU2gXdhfiNHCpDpY1s00H8jLKlllDNSilNE0zkTv+f5vf+Nv//ZX/vCnP3K6P2nSoN8upb5HnBQ9qKUUQhwkTXNayVJKN19BGJylwTY1x3U5rFNKN+NcbpVmmUMuKA4gWcLrPM/4GIg+fCXs8KRBJsFcK65WjpNMLj5FnMuG7UWVeXS4vl1Z15XjUevvn/70d7iU+Of/8Wd678zzQQR0UHR1LRWXAg+mjR7HSdEdOCkjvFzJniAy1AXGYRZ84iHUIEdv16R9f3+HD3Ygl64PnLNQNh9IPrGWlS1njscTJa/ULcvXYtBFiOFW3jSM6r/W1NjYaiUFkerueGSaZ9as3KlhTBzGiZdylgGwO5pzu2uA2jvR60MTR8Up9N6Y40BukegixScqK21LtHqmmWKuskFfVQzliiAvp+sHWwjAfVUMIcy87JyJTfndYjno3bYFYy3xCkWzQ77ZQe5Kx0dnX1sXRajQLXXUO8hdG9YuFK0UQo82mesSq10tzR7oVQe27rrA7j+Q90MXUPV2IRkEhBPE0/ouPdVUrF81aervsDuZW+/sJT664rUNeJO9evsdfbfCpQ44q7RtVVOjNyGGHV7eOVr1bDmQiy6KS+7kLQKJGAZ7rwvrVrnmSm1nPnx4Z36lQHP1Fv8SUyLY6z+Ns+DIoN9qnJS+HCbxbk7SOsYY2LaVkgsfvv0AwHK9spXM4DznbePl0yt3j/JPSfGija5bRJCPguKWdeFwOlJyFfzSHclHyT+bzoZKp9fCeDyRgsQAQwriEEpnWVfezm8457l/eNDv5j1TGlnboogZB8FnqAvXmiEGhnGidA0sjUY0X4xr8o+9bTo7p3liHMbbEDHEQZxl0LPavGOeR5sEuobioqBO3zrDOLLlzOV8odXMOAy3NOPr5UqM2lpiUmePs62d1uh1z2vTAJq8TNL/+te/cT0v/Pt/+PdfNwlnEridgQc5inMp5CLX6t4Y53zgfHkjhoSPkV4Lb2+vtNp4fHy6BdV1L8zrv//zn3n//h33dyeent4pnz9X1qzazXVbmA8HQouomCYCHu9356cjJa1awXse7k64ECjVDmDnNC1Y9n90kx76UsBMNs38Ha7DXm36/PjIbOmqW4H704nDKIzSeR2kQwj6oNVOK4WcM2OKkjGWxnw8ELfMJa8MKXE6nW5EF7UwjjMlyHC4q6xqrVqD80qN5gj24kCmeaZhzlGnW36tm8qRaBSle+B9NO1/leIiRHz1hMNAo3A9n6m1cJgV7S05XGW9LPRW8Un+EWcwV/NabUtW21mujad3z5YC3G7wmZrNwCHxQgOoSiHt3rJ/HJxOJ5oZ0TSZe4KHpci8NcZHtiFRykDoK01gDcFvqsgs3aIutJ06B6EZWW1Be6AgSGc8hhRI+veNnWYw6MXGe+eaIiecNz+BztiaGz066R8CZDqpYaF1AdfV+LCbzJyT2kuBeTchKaYGtX92Xs/SCfC3Kd7hcbtyaVeQ3Tg6u3j8TozbyuDM/OTtWulfv6/MdvpuzWkt2PvH2w7h4fDNfEWW1yV5sabV0Bu5B5bFsSyRykDxkU6AFHAkip0R3QWOd3f48yLeqgNeKhnXAzEFxmlgGAaS8Vk+eK7XC4fjHWlIECN+j653QfAUDnxgmIKpkLR7revG2/mN+7sH7h+eaXUzCXQjEugxEKpMZZ3dD1F5+fzC/d2JOIysdqn6FAm9MQ6jOLk9NLNWcmsM43BLHN56IZfMy+sruWTCGLk73UPviiLvCBLCEWNgPsj7NU9H6HCYRoYoM2ruisZYclHM/jBQi4zJtWW2RWSxM+ShtyrxBuJpfdv73StjjPTDTK9V9brbRsuZFhzbtvHl82da79zdP+BjoOTCtm1Qu3o1tpXaG08PT5ymCR8D8zhJDo+g7s9fvvDTp5+JwbJ7mjgOOjqsWm83GWzJK7msHE93RBc4X8789PMnfv3ddzjveX0940AYW1Sm+3K58vz0xDgkfv75Z758/sS2XLk7PUBM7CU8MUWmPslxeAjUNRuOKk1zuGXDVz7+8AOtVB7fPfNwOjEOAz99+ahY2+YtKkMM/5IXBeG5QRknVTIyTWiKG2nojdiWqoDAu6b8d+t+wOuhrTlT1g0XFRMw+ImtVC7X601eviw6fK+vrzy8e3+DD3xwuOqhKXhwnAbOb19IQ6JeC+tyxXVHnBLfPL/He11+OVdy12t/OV85nWbuTneEUFm29YY9piQpYG8mPxCoLk6BZHpsbxeLDvzcKsGkoK7rMnegqPCS5WSvhSkEmvOs20Z3MCRLJS2d+7u7mzhhx1i7Ya7NYJ3YHaXrVpNJzRFD0vfskRCO+DBC0QRWfTMJaSAmlfr0tldeihBONOiBZimmzn3lGGqXYmz3BHf7vbsZ8RrNRHCO2vbcJRvEUXYVeMEwtZKjIh720p2+R4OjSV7KJx3kN/ezs9eyK1/H2Z/11sSof8xFbQRqwCSviBwWbQ2+6U864VL2mXFGaJiazy7A3SyoO9m2dVP4uLrLY81Y16SAKXSTqFZadWx5oBTHWiJbG8HP9B7JTvDqfvH2JiL3dJyZ00C1nzPiFOvvHD45hjQSUjR/QWOOiUttlJz1LHulCyixJ9NRdH9taqiMqPa3bCu9Nxl+jft6eHrC0VkWRcakccQFKItCF6/XKz4GHh4frNJAv0PtKj9KKbGyUXLWaxqcCr96E3KQIlve6LUTh4GYohWfJbzToe6DZ4zqhFBlsqfVSsmFw+FODFqTj8G5pEqC3pUDFQOnu3tq2XBO/jFd7N0k/MZ59a7L0iBh7xy5N5LzTCnxcr1yLVWdE4dZz4LFaby9vRGjUJnLduXPf/4nLucL27LSnePx4ZHDfOIBz+PTI703Xl4/s1yuyFckKD+2JnIae1B7U1DfXiNZa5ZMzGotL9cL3//wPYdxpvXOED3zYSb4eEtQzbnw6eNHjqcjDw9PhDSwblfuDgfBHMtKrYXJwqpC3PuNIU5yG9faqDmDFZiFGLi/u+ev//pXXv75jedv3vHtN9/w9PhgHyBxG93roEouaJMuFUJkHidNjzXz+nbmV999S82FEgJxlNPx88sXEdDP73DjaHHRwn63kpnCyBgHStG6OYRID5Bb0wXpnbwUpkbw00C+rmxlZUgjrYuEj8PA8XRiGIVjNjqn+cQwqp82knj5/IV1WZhPB4YpsW4bT4bNjj6Y2auZ21wKHPremewY7x7I61Vel4BWSzxhCLx9fmWKB4aQuK5XRh/VHoZC1Hrv7E7j3lQIH5wC1dSxULSNOc2xQ0qUriTMaOtq3GWHTbk4wXVadcQgx31rciaLZAyUkKAGCgO4M96Bp+K8olhqF7moqAdnNNUunxZ3J7e5tgW9b90uErsG7F6Qd+c27ivN1fhi5wQ/dRwuB1p0mrjDV8ms29VD3VSgHcvysbRbuA0P3eI/dhir3xL2pN7yOzmM3ODi1H4RCohBTzec392CDV3btzrxRN5p0vT7NuMcfb9MbVvBeCfxqFHBgNWR88A1B3JzVDeAj+aPMBmw/9pEZy5ISaanqMr4Cj450pBM2qrNIcZkNZuF7gP390/ULLhlmmbWdSENg2CsKjhGg0iTCa6p5z6EwOFwwnvYrpuym7rlpJXMeDwQnGdEaQA+7UOE5/x2JsTE8TjjnHxdW9nIy4ZPiXW5UnLh+emJgWDbtDgNnzzenM3DMNz8GbUU1m1lPh5pVa+pD4HpeIRWVYHcHe7m+WryPGEd6CjKJSOhwMdPP3M6njTApJE0uJtPLHkJdFqz4cuLa8y1WrqthCrH052Ujs4xPD9zPJ3oXXCZEJbItmzkWnh++obf//73nI4Haw0d+dWvf8W77Zm385laOofjgbwV4p6ZnlJkWTbU0+xv6+1ocRHDNJKXlc8fP9Jb5/X8yrat/OGPf+R4uLPcIk3LdStMh5laG0uW6/b983vBTKXydn5juVz5ME1EL7VEb50QEzFopd4jF5qv1OZM/nXP+fzGjz/+yE8//MAYB37z29+K6Nky12VlPkwc5wNL2UhWX5prYfQys63rxs8//8Tz0zPDOBBTYh4nNnNOH6aZkBLXy5VxGIhjYhoGYgr87V//xt3DCfAMgw7MshPALYkr8EpdXdeNrVTWvCpq/DDjkLvxeDgyThMxFvEpVnD0+vpKSpGYJo53J5prPDzck7eVL59e5cJOgZINT2yNHqP6ObykdMu66EBZBW0NoxoH11xslfSkYdIH0ltHgdcUhblzW5e6pbVCrU2hj71jNRwkb1JCF4iu033g5dMntnXh6d076qbgOClMPaFJ+uytf3rAkVuntIJzTTk8RFy8o9WIJ5FbIyoGDucLwWValRQa409usSxeuP7OvrrudamxH67yWQDsgYjNoMc9+mQ31rHbFBBp7kswQt/Rgg750LFtF3klunkS3P4FkI9jn3xwt01BX1cfr9r3noi+J27oH9dp/pdxLBj0gpHZUrz0HfrZv03fU626HOAWV81tBBPx0JGUt7nEsgS2LRLcxGobjgx7gldjMOi0dxH9COrsQdyTcwGfdJENQRP3nggbkz4/QxopqfDp02fe3z0Sp0kXQwo4P984yKUu+KaBsDfrcR480/FAraYuBOLgWJbMVjblP00zJWf8MFOKNmWPBs0UPWke+fLzR+7ujrTS2LL9pjEwjapFXlZFVrgYqXnVIe8c3iVK3+wV1PPWa741z1Hl55LvSBeBT+mr8gy0vZeNXippGvF48pbZuj5b3RV+/atfMx+PUnU6boS190qUaEEciavmmfHyWkyTcuRAUDy71NpqDGqtN5Tn7//9P/D0/MR123i8f+Dp8Un86Lax1copTKQQeTidBPmFgTpWYkxJ04rzeL+R4mAEctTt1TvzKCL2y3rmermybAuX68Lz05N0+8jQVWydAsV8V+2hzMcHXIdty9S6yRF5d9IkVspNB0xvtKopJaV0U+Z45+lVtYz3j0+s60ouFee7EVXiTuIQiV7wme+NZVv59Okjd6cH3AmGYWQcJx7fvaPUgl/hJRfVnU6zLstR1v9/+du/8t279zyMz5rW12JR58Iq1yL99RgjMU4seSX4aNNTZMkFHyXJi8MgjfImU2AYdPPrAyjcupbGzz9/VkHS4cDD0yNPz4/kvHI+n3k5f2F8GXn37huiz3QX1FDWG8Fw3doD3m1KK226gARbdFNIZBGG46CDqDVSFISylUowItc7uThL7UiBVMXFuJHkhWf7Ha7pmjZ//vlH1i3z+PxkcfCaNHVK7VQxgh6iPlSxerZtoXaIHhwR7ycdyCbP9K3Q2Aj9QgwjnUZpG55m2TPNSlwMv++yOeyCU9cl3fY9Ucl2wejAU9GOA5OumorQeAabum8Hu2Nfuetu0nOO0DrdVVM5NVpz1oGRvv7dm5z2F9vK7RUx65gzD7RN/th7Vv3+OssLsvvP/Z611HfWwqBAACdhba8iXAR9OXofKM3RSmQpkdoDn79cab1xd0oGU+2JtE0Ng1E/qSo+Ct4F8xzoawfvGaZRlwUOHxIpadAbp5FkMORyueK7YzzMTONkUmUNOs5JwTelQK6KBnKm3tEW5agVZZwNA9OQrEuiEdIIrfP9Dz/zcH+vz5ePdCQN73iO8z3pm6ictVx4eX3h8eFBqka7lE7xQKmVoSohN+diPRZVF1qMvL68EFNiuV45HA9Mh5kvL18IPnB3fyepcNkYpkkx4rWqs9pgz+y6gkxbZ1mufPr8wh/+8FtCnLg76rPaBwcWS7TlDWJkWxZGhwWE6t0upXC9CCkIBxmcJUSwwaDL1tDswS69M8TIb3/9W0ot6vEumRgkAKqWkeW94EEVWYkOiHSorhM9jNNEKRuX85n5eCCMI+u6aMKPgZpXnGuqATze8c23H4SNB5Fs0UUcgpvAqRikd0oR0ai13zGlkTTNLNcLb2+vvHv/nhAiaynQNg6H8SaZlavYzE2tM08j3/7qV/Tab6QsdA7jxN6yBY1hGCnLwuFwZJxU0VdKIcXIN8/v1ekaI9e3N7ZtY5pnhmFkWzIXf+Xp4Ul1m63RvDM1k8ioUpRfE4Ky7a/b8lVX7x1bzbroYuJ4POlnyZVWKofDZCocTSK9FSVYzipFWq4LS1l5wDGEpEt2OnA8PrBcNtZTZhiMW6lQ1pVaMiEO4D3jONHrZhNfotUiZUuQ+uy6Ltw7TxrUASwVjGX+Y07hLmgq+gAmO3ZBpCB4ko9Up5nVu0ihUktjiIpXyMF8Lh6sz1WGx9agVUWNE3ChwJaJQVp+HWSO6gciIpwLGe9GYdPIFBf9SHeF6Ff6ttJQMdOO/rte1HlgxLE3TD64ZgS58RNdhOkuicVB78Huh3zD0vWfGl8ZYEE9DmTI63Y4968bw86FODu0sUto54x+SXjvK8xXJdPOT3SLhbBIHMwZ3eMt4BL7O3sMYKP+QsXUoQV6C6xtZFtHtqxU5oYHF+kUdQpgIuYufD6R8OPX3C1xWk5ySzu8nQ2EyUq1QkhM48QQE2stDDHdYnO4v+PdNxOH8XALoay18OnjT4QwcP94YhwnGlkbhME0O2mfhsRylSKwOB26wzQyjxNvb2cub2daLdzfPzAPJ5TdJVgt9EpKI+t65fx2lkHOOSvLa+QibjBvhdNhZpxmJQm0Rl0bKYH3gWVbOVmVwrZu5Fq5XC7mVzhQ6ZxfX3hOkeo9edusdlnFSNRN79C6gXPUstGqmuqqTRA3EY7TM1tzRR8bNWmmpAK1nDflNTmp3CbjUEFpx9F7cu+kLnXU+e1MSZHTdOCyXIhpAGDJK702sxeAa43leuXl7Y3H+3umw0HqJgc3otr7wOF0lMUbOByFa63rxvF0L2WBPdzRKv66kVrBR2LQL7hHEXvk4O3NsawXvINxPpJi5NPHq/iNw4HH50dNC9UCoVu1aNtA9JIm7loO7wKH06RI665sInXzYlJQcCFyd3fHcT5oLcdxXq/cBavq7J3ZwePTE2OSYuYwqzC91srT+yeSj/QQCa1xOOjhzjnTvaNsmWXR5eAcrNcrwzBwNJzYuwvee10srXNZLjiioLXeSOPATz/+xOfPL/zpj3/gMB14907vhneeaRwoFoD4/PTE48Mjl+tqZLiiMWILvH75wpfPnzmcDkzTQXHYwTgLq0nd1VwxBVJJcu8WKUckSW7mcIDWpGYLcbC+DDuyQlest63Xws01gffaFfHsdDkE58ilQVQE/a7oUmR7RN3A4Elys5cqmKRVnE+4vYeh7hCRF9LfKiFUqpNqJ7qRNKw4Kv529CkGzxv8s8/w3Vzh3YxlnmDtbWamAsOeMqoQjRb/YeuV6xS7THwD3+STsTmbvbPE1Q6Wp7NDXL1be6FtzK114i5ndb+4Fm7rxe6+t0YJw5uaM/ksUqB99XHY9tOhukQz3qb2SN5GlqWBH9gqODfq0nN6Xaf5wNvbG70UfIg0+v/p7tr71HfpdvJB9z7tFvkUozZ4uYhHXBMUFYdERfH1Tw8Pv3gfAodpYKuOx4enm+BgWVYTgVw5xBPR4v6T22XW2h5P8yzFY/ByqzvHw8M9wzjjnafVbDHrAerGtnfDd8d8PDDME9TGsiwM40h0nmtuzONI8BZVEcIt4gdUA/z++b14x3mibpnzcmE10y7OUa6rJDetK5zQ6UxsBj2dl4XD4cBwOBBy4f7p0c7OTjKfQ72Zdh2Xtwu9w9PzI947Sm7KtWudvBXmeTaF4VcnfamVvK7E45G6rizLynx34nh31BjiHcM4gvN8+vyRMU08PDzo0t4sIdr4nJe3N8Hf9mzRinCzaRQ54/3ubHZ2eDSGYQDvCJaF1F2/Re362mm90Cy3PbeGK1llQnYwrMvK/f09Kcow11qTljomaDCOIykpwpZuMQzeSkY6uOhZrisff/6J8O23nA5yBbfaaLWoCQ9ozRGCohsUDSLj3mGvJjWDRq6NcdQ6PIRIHBOHgypagw/gbcr24mxyqxbQhta1ZWGaJk4nKX3O5zdCHIkxEDbP5XzleDjI7h8j//rXf+X19Y0//d0fOKYDL29vFoqog3SaphvAUU1mGVzQBO8VWlb63lfRyWWjlcKXlxeulwu/++Of1I1RFpI3x2y1Ep8uRceUBkWM7K9Xs1A3+i2hMsagVb07w1ar1Tp0XPT6fdAFUlE65bZlnp+fqd2ZaU9qmv2W8T7gfbXXv95m5jBMlHalm5RZvA2kgJy0ffcXCIuvWNyGkxIFtxEoVDa8W3U4u93hLNHpHui2/5tqkI9O56/kL1+pWTPseZQK+1UV540wbr0TDM5xXREfe57SjV+wn8M1bbclBFyVx6M4r0C3XdW0bxldfEbfyXn7esFJy++9U+9EMD6mezpRai8XKM1Tc1JgYU1ci2NZM9PhQPdmeuzmh6ATY+R4POrfO3Sw2uvknHpLBufVlOiVmJpqNxlxF+9guWfJ4N7mOyNyWedN4pPeGp9fX3m8e1DFKIqt8PNIsNKe8/mK945hGBQtT7j9nM45Hu7uxHl2R9lWWkrUqs/A6XQnLrV11m3lelk4nY7swZ7RB/yYiDhc7Yqc8DIAuxAhePKyMU+zImiAUuVy9jZ0SBmm88RPM1OtTGngWpQ9FofE81EX2HVbeHl55d3TwDiO5E2bVa6NIXmWtvHhm2e2RTL+4JsN1rpkPEoLDjFaKi7EQb6J+osnFqD1TnTgfCBfrxQ6vRSGOLBFhYwe7056bku1n6MQu2oaQP0US9m4G06MYeDd0xNrUVVz9JYc2YIjdkVGXC4XuSBDlAEFJcGCoJZuW7e05p0YPERZ5ntHhTW1m8sXXJAb8+HuAZ88b6+vdoDDfDgSQmDdsr5Hd1ze3hjHGTcp4KrsrkI8MQWOxxO7u9R3R3Eoq926oNf1KrVOGoXrotTRcRw1UTjIOUsiGwJrWZk/fKvckyIs3/VO2QqXcrUIjcaQRg7HA21upKvCvsZhpLkuhZfz/PTlE87NRBe5LlfFn7uIS47XtzM//fwT/8s//mdSSPzpD3+g1c40jDeMe28VK1Uqs4os971VevdSH+26bhx394/8MUaWnMllY1s3fv74E4P3TNPMdDD8t+rCz6WyFWmmj/PR3sNqR6RIrr2YCTo1N9Kg+bxXm4J91BTcGt68NHd3d3z77QcZMB3EnmQu26WY9vu53d6LqUd8oKUoGtgm1mFnaVslOlMjhUTD04tKfxzmn3YJeoW+UUuk9o0hNlM/FYph+vW2aexeid39HGj2f8v/EH5xoXRdEPuZD/J+tB2n3/+Lt5QCu2CKkcSoJ1s8nbfOBy9IyxJs98IjH/YD3FnIH9AFArq6q6MUQV+6tshKADdRt0hu+ozlikFM+umbhzD5W56TNigLA+mmsgnR8qus6MgFbYwt41wijknprM4T0ihVm4lL0jTzdjlTS+bu/l7S7m5x4k5GzXmeuCwLvXfzWjl6LVzWq/iBu0fm44E0REpujPNEK5W8LAzzQWnEpmCUObPDJj9AtdSB3jtlywzTyORGrlylPpoPJLvk5ZuSetL3wGEM+DjgeuPp7p7z5Wx+K93W3cE4TqozQGrLrWTujnfgHNPpxHfjyLZJvah7P7JtC3/585/5+aePzP/LxHx4oqNO6eAiznemIRF8wrQ11KpNrteNLRcOh5lpniyB2x6HWqlBIZyn05HaJNmPv4Dkcy6s60rZCo8PDxzGkUZnWRa8UQAdmW4f371TOm2HmNDP5MwMCUxplKKrtj0CWdNVa51xGG+FLT5YZaGT6mW/ZfcIZ993uMHjoqc0TfZ7CUpMSe1PueAsenddM84XDtPINA6s68LhcAIcPnQOx5MeXCfCsNWGqw1ioFd4vH8kpkjOhRC+atars0MgREIwF2evXC5n1suVx8dHYpTn4tOnz7TeFDI2REouXMqF1S88PT2xkqE1rucrETje3cuZHQJ0xWQv20YIgS+fX/HeM8wjvChV0YfAh/ffaPoyruR0fyTn9QYHnQ5HZfwHL+y+a2OLPlBrI3rpxdvOKTRFa4vQF4Y8DJEYHxm3lVozpMhyufLz5UIaEt9884H7O2XU19I1pXg1hpVaSUOkFZmSeu0Kd3Pgq4g/bzW1yQfeyoIr4JKkOcErT+nd8zO5yoW/lqxQwt7p1QDCoGM3BKlvtlKovRF9otEIPinunIqEO0EFPX0/nAXwexzV788FOB/ZapOem4HXt8L5vPD+wz1pdLiW8bXifTViOAuytK+rOtHKDgyFm+lNOG83poTd32CwpZRL1im9hzcZG92dtX95VUzu4WnNQt+c9WS3pufIB324ZQwDiIJzm8Vm90GkMQHvPFuPbDWpryTD1kzm2iPOR0E3XkPbfrvt7Squt9s22Jt+Ftc0Ve/mvf01qL3gOowpEpNBdD4yxkgaBsZRAXKlVA11w6Bh0cN2EWQyT4MaAWtlHkfmaSR69YJ8eXnhL3/+Jz5+/MTf/d3f8ae7PzKliT50rtcrL19eOZ8vPLx75u505JoL23VhGBR9XVxneT3jYySlQRte0HvhomqHS85cLmeCj+rHDslEBZ2SM8Gbe6U30jwSt42as6XGqq9mGAa6CyTvyMDlTY1t0zAJYh8GXWIW862a2Mr17cJ3333DmALbsuFc53Q8iDeqyrAieOZpotZq5T9KqI4uwGFimkYr3JJasLhOapUeh103QM5ZHS+tkVzgeDxQq+DbWgvVtry6ZaqTXD04L6Ou9xZ+mOjFomfolCaDnkuJbduI67qqvzolDgf7ZpbYuv+jeAmLcm6Yc9hI5aA4i2hToG+A3UbDOBPNyJbzhq860O/uH0SamkPa2029LFeGYVT+uXc3CZgbREC+vH1hWTbu7k+EwVbCLbMaQRRjvFX/zbMm+23b+PHjT9zf3fHEIwGY5okv48CXz5/pwUsDbTlQx+mgYD0zBa3LyuFw4nQ60op4iFIKb5ezkl174+PLF+b5QOmFYZz48vbCelkZxsQ8z1RFzfK73/yG7969t4hrnUc+eUlNS2FIA2XbKMFxOhyMeJTuXiRmNyXIrRdQeH1TIqSIxcTj8zO5Zm0bJkve29HO1wvOReZptrAx+RccggnkHg0seSP0wDyMuKZdo1nsiicKpumNsik0cQwGP+WKH4MdensyqW0StsXFEGnrQqkbveqCdB68EakdbpWZ0v87m3AcIWiyNnZZdbUm+xvGIyGNuEG8U3OZ6jKtFoJreCdXNr7jzVfjvTyt7kbeajoDxVhUJAff9y1923DzL/g982mnDuwzkzuC75wuUppFblT0GjcvQpKKjxHfoxJUQwA30JsjtKAUVDzez+Ai21apPUmvTzMfhhdP5DvLm3wH02BkqG43GiarpRO6QidxwvNDUJZayZkaJGX1g8Qee1po9EpRGMfEPA2mhnQc706kMXJ3vCeNUhD54LheLuI2fcDOZQ2PSTDz9//2b/z1X/9GGiIhJVp3UrmFQK+dz58+8bosvJ3P/Onv/h3buvLp8yc+fPj2JrN3IXA8zIKBvDNoteKKVIUepeimgzmoS2VdVpa80XLheDySEITWqgQ8e4lSCILLHFJ45ib56t3jPc4rYr117DOpKoQQpBQ9HO/4n//xP/Py+kItjZfLJ9bLwjfffoDg2daVYRwZk3lP7AxcFnGO8SjeqNRKcvIn9dYZ48hWNwbbiHOpjCkJtagXvHeMadCWgba2AfkpwuDJW8G6gsX/uc66LkQfdIbklWkcdUkNI9MwUFojjqYKyrXgmRSlkQutVsVAmyqm1mJuWW4YWW1dhHfJODOCXS5n0jBqeqJxuWz0jnDH3rle3hiGgZ9+/kQvhcfnJ7UxlUozDGyaJimiGqTI7RJ5fXlhy4WHh3t9/1r44fvvqa3y7Yfv6LVyvS483t+bCqTzl7/+C//03/47/+E//Sf1P6REc45f/+pbvnl+FgmVC8FLehqdDriQYFs3xmnmcDgoF6pWlvOVcRqUJHt3z08/f1TKbHDcDXf885//B3/+8/+g9M7z4yPfffhwOziOxzvC/YM8Dp6b/PXl5ZW//uu/8R///t/TXWeKSVfAtpmMzwjo2ileUrWSixHywhmxyae2xjyNfHj/gfP5jcN8UMxF6+I5miMk08F7NOEGeL1cOEyTAvkc9D6YwUyQhNbhQApRQoFWTZ2zY+ae3Cx1t0mJ4b0mFsE5Oilc08Pb+8i6ZCs7gtg8DFiYpPK5QDI+F61JrmXWTVhrMEx+9oNh8w1/usNgfbW2+Yj3A9SsLmevKd13R2kbMURVvgZT3/XNOIgOvemA7x2Hekn2bNdgEd0gVeytMW5X4TlHK+Z6LoLpsoPQA7mpJwIPLXtqK7dehd4gjgMpzjjnuTrY8kZpMEyT4NTUabkZVDfSXd5ZaGppjMNA7bCVppykHapoUiPV1nBB4FV0UY5sVC7UoziKcRaBuzfBOeeYRoXIxZQIaWSzLXqeZ053J7z3LNervAkhsgWFUMYkOLBieSDA9Xzl8nZhHmfuH+/55t17alZcfa2V492J3/3hD/z0+Qs//u1vcjCf7gSBm6AGr+9tUgRoCgmNKd3ELG6MHOIdY5JR71pWVQ5bqq6gtT1+XllMtUlgUXqhx0AMyFvS5BMaJ5lycdBy0fbuOst14XA8AoP4UDzH6cAwTfz0wxvT8WDPQ1WNwO49kiZB6V728/QsWev5fFHKc8Jgy6p+iiCnvouR0rsZoJU0sVUJR4agBOkvb69cXl54enxmWRbmSec9dokejyK0g4uUsnG9LNzfnRitH2OMSVHh4zgyu4E1b/qAt2bThVbliJroSlWGfS1NL7DvUDulZgan6sxlU5/C/eMjed1Y88YQB1wMjC6wbQtvlwvTkJge7jge7yhWgOGH4Wb+yLVCU0zE5e3M/f0d7775ht46Q0zUWlmumupTCNpoSmVbV/kDnA7Pl89feH565u504nq9crle8b3z8PzMw92diGgHW22EdcOlQbc3gs8GArmsChIcR+4fhW/GGHl7e8M5x+9/+zvelis4/fuHx0ee372TqofOPE7kvIkI7CKmtloZoqPkynZZWN5eKb3y9PCEA14vb7xcLjw83OthbNroFO0bRDi3DlUF6MHBukoOuuXCdFTrHkHql7oVrutKmkaGoA9/6EixUwt//ctf+O1vfsPD/SPdQ4ymxjEXfvAB3wutdCpZQX4W9eyplLypZMWb+sd7QZINYrfIDuORaKqiZPa8nS9yakdPdJ4WO8uqS5cg6MOHgE+O9XXh7fWNw/HI3ekkotj3mwop9AANMpVAuFF7Pgy4oIk8OHUc5+ZoBEreGMKkDygHXKsseaOW5VaclaIzPsMk1t3pstW+gbMNxePIdHyFQqU1p8m/6SDu3alXwskj4pIu3bxttDCS5oRzkYJgC98FZ0RLV5YktBKCmul8L1TTw7tuBUV+oCwrmOT8Bq3ZaxGCyGD5MEwd5Tw+RY6TzKU0wZsxBSKRGAIxydNzmEdKrVYeFG+kLgY70xrTYWaeD0ZAd3JvBOOmeu+4oLDL092Jx+cnfIxsmwaMMA3QOsf5RIgDh1lbYc2ZOEwWTlfpW6aleBPY5Jz58vkzd3dqVKuhaFPritwJlhMVSiavhRDUdNl3cQWVNA6kLrjFIhfVj2OXawrBNgugVq7XC703jrMMbd5FTeuDN+hGEOHz4wdCkkrT1UoYRxmLLQVBplAJS4ZhUgV072qZA4soB1clAFE9gPiFsqkdL4XIcln54Ye/cf8oo5xrjU8//si2Fb779leEIbEsK8475nFi9wNV85qdrAVPWhOR4811Iugm6l6NVqVpGvEh2HRqJeBYR7Bz5G0hjQPeil+ii5Yd1KF0aqzEkOixaUJy4FrHD555mhnHyT40gg1ECq1yIVtB+DgJP7y8Xnh9eaH3xvv0DaVUSqpW6+n5/R/+gOvw8fNnhmHgw7cfbomwAP/Tf/6fGQeRzH/729/4+NNP5Fx4fnrmP/6n/8j93Z0+4Dv0EppxM5pKFJ2tZNzWKqXKVCelh/D2XArny4XkA+8/fODp6ZlaG8XkbLt/YwiJSiGFgZYt/K517p6e+I8P9zwcNQmvecV7SV997+TurNBpY1lWdVrnzPnyxpQm8nWhpEAaoxHaUpM0L26ha7y9VaP2pgMPlI9TQ+D+cGTNmdwKoVlsRZBD/nw+czoetRnEwFYLvVVezm+c7o701nh7eZVv5DjTkOKi1ypyzYQP3nn78KlnOiVJYJdlhagLRPCJPqAhDqZOEqaeyyYj0vXKmCameZD73Bm009V/7WwL8T5SkZTWOZHFrXaTJsoRHucTLcg/PTi58tt6Zms6mFMKxCQTXyWrFnv/LGC8w80QpvMm+67DOwZSVFBgbiLF1Ueg9Fx2wtGtJqqKisPoBg8CKYx4JAJxruEN/xaItMu0tLWDxCTjpMRQpb+aSo4gjhFT9OzQm+hGUyZFvCkPhxQtyTUxzTLLDSkRUmLdzhyOo2n6TaYaAsM8KRLDB8Y04H2Uj2C7ikAO+jwdxpk//v6P+ASEQHKBHAQN91LISAHnXeTucOS6LMpUalIkdeBaMpND0DjaQL/57lvWNXN+eyGGSO4VamccJ3xTVEzzkeLV356CSsuKTBvm32gEHLkVapMBrZo9oNeKj2amXa/2jEZCUuKzPDWCTmlYQCPgBYunaYRhoNvzsOaNcRyJ7mvu125gvtqw643TcXi697RaKLlziJKyZxvgSi/89a//wn/7p//O3//p73j3/I5spUlxHOkdDsPMula862xlJaXRODb9mOu6EpI4v9okUArNa0hpBgX4lPQAe+WUdzC+YrjpqvGeMAxc1pXRsslDSIAml9PDSQ9nkVZ5J7qdZY+4bv4KY+VxUlfUJvVQjJE98hjgcH/ku+HXnF9fuS6L5acn4hg5HHSDX69X6pZx46hyE7rMXNEzh1HqATPANaeQr59+/onPnz9zfzrhOqzbxjzPjOMeLdCgCj93UVj7tm74GNhqxbWF0qsI9FZ5enxgywV/vXItmeACwzDYzSwS1yWF/Q3DQPCV3tHvEr6SsS/nV67XhU5jtsDEYbAa1bpPOJ3z5Y0vX14Z342keaSVSq1S6Gylcn57IwTP4XDAdR1qwyi4cHfV6vYQ9v7w+Ewu6ubeLKY4da3VaRiVQFAr6/pG6ZW3tzeW85W70xFCJA3RoBYvOK03ldQXwWItRjvPpBpppWpTG6z7d9lY+8oQEzENtG6Te+cWzOa6x7ugbbYWYLROZMmlS3XQFAW9u/X77r3wjpaLxVmDa45pnHXJW3WvN3hpnEdiH0xNM0JQBELrkcu6MiQZF7vlVxWTVDsvGMk5TwKc0/BxyVdK6cTY8K7fNiSBEo5o0SnOeI79Pd5KZRzU7cA+VFVp3ZvDPDfqr44+WG6H5Nu9d0X+NyRZtxBHb+nGwbak2gopDeywc4rhFuewLpmSOg8PjyrxMc4lxV0YYkCNHYZjHEw9s1P9egZAiqFm0E6tmyZiF/CtqTudQGudt3VhHEa9Pr6Dj4zDLGd+24hupLXKcRjp0VNzoZYqHtEnDnMkWRBlLcXiNnbVoKNHz8hgsJPBhE35bt45SllpUVN/7x1CIqIq0m3bIGd9fuPAMMkXRq94Y60EyXcNXHmzylPlWDXnLGlWzvmyZm0bB2/qpK7hWysfAKVVXl7fuJsmhoMCEJs5prsNnvIhFT5//sTz4xMPz480i0J/ev8N8zjeLqa7gyoI3q5vTMndxBjeC0LuWyeM4pFTh+t2JZac9SYbC76nDbb2dQIlmKKmKu6i9cbrl8+EhwdSOpJztV7owDjO1FLYcmZIWumojXVdVUiCI9SmS2QcCd0pHrtW4bOmr+3NWYIlemhOMpEVcwSO40DeMjUXXHA8f/NOh/0qOCsNiYfjvX4Pr797nA/85Kym00lKqmrWQmqDeis6pO6txa7beufxUW/sMIxc14VeCiFEjnd3bMtCKZUxjVzOF/7pn/5Z8rPTkWVbGUelYW7bRqWTuhRf4jvtI+XUa/v2+ibHZFB/9K7cAtiKEi97LkxpIj1J79zQwbNaO9aPP37P+Xzm/bv30MTNYJti7vKUJGuvK1VZXcfDjPNHtfE1xV0kJ1JuTAPJR671yo8//MCW1Rx2f3cHsm6pgGrJLNcLPniGmMxDbFMh3By6Ak2qEcNVA0jyDC0oKK83tusVphFvAwQOjseTZUw5hl02bNN7a93MYPrA9g7dKc4hJf0kN+jAeXwc8KM+0NENhk8rUXYej3bItK/KE+0VeIoMni5QXFGekUFrwQX63mrXTfhEM229Dm/voy66Dg6ZQeldvfAumj9AWHVZV1yDeRwhJS7nN6noxlmXScB+/kApUm6p21k9JR5v4YVOg4j3Xy+JGGm1MceZcUx4r0N/H/q898zzIkHIqC2wmFHyeHfS69v3NkEn5ZTBfr13M7pJHRl7sk1LU/paKslLAeYa+Bi4Xq+koOw2S6TUhRc6YYx0ogVZVqEWTgkPS7twXVeepyd6z6xrYauK3I4pcRomPE38aats10Ub7DzfMsqoMvPVKOPg6CN7vCJUXl5eKTkzzTrbYtAQ2XPB9cE8QDKn7gGFraqgTQI6qaf2fp5SC84FxuN0q4mWL0IQknOOnjPbdaXRefvymbJNPKV3DLvct6oZ8jgfJU5xjX/4D/+Bwzxxmg98OZ+JQQkIzjnWujC4EeeDMvZqJRvflPPG7Gfujid71qXQ+vLlC3nLxNfzqwLn4j4JijyJKekFGQfJPpsePuFhnnePT7ghgrMD38lso6pMiX97rZpgHCzbAsDpcDJzk4Om4nXX3U2aWFu1ciOUFRMiDpinkWVZbaLTh2jnUKZJ2u23tzdiitRcuDueKF0Be6fpiA+B0/2Ru/lEnSrH+cjD6UTZMi6o5a4WlXC0oAkx+sD1/AbeczgeGLwRfathdTFRW1aaZV65XK9M08Tj3Yn705HmPTlnogtctyvTMJFC4Hw+M9gGUasuhNIqn758ZiuFFIUBBwv+yxY/nPNG8ImlrPjowTvq9aLWKuv1fXl94fPHz3z73bfc3z3qsLOMeNcV81Brl+u5FlrNxKCQs+5UfzmNI8u2kVvRVlkrISD1Rky0ZWWIgcfHB5xTQONlXShbobVySwPGO5KLdDM17jlbpTXysjEeJ8lInSdFYd3ew7oWuimhnOu0pg0gDgOPj4/aRLpMSLu8urVGCP0W9ldatYlUpqgxJkL0ZJP6jWZg1IpvmIs52naJcUhKEu6rNfYFzzwf9GyjQ2rfMnYoTWnENjVb5ek4H26bRnTBvCYodbkbxGZSbnzEtUx02qjOb69KHB5Gehc567xkmiHsRffyL13WjVoy0zByeJp1OQ3Ctb1N/t5HmT2d5/9H1X82SZakV5rgo+wyI84iImkRNNDTO9M9Ivv/f8V+2p2Zlp5GA0UyK4O4u5FLlO2Ho2bZA0EJgEJWhLvZvaovOec5iUw/aPyRc2Z/ODQTKXR9r9Abo8vNNyl05yXFTk023Q8DKcPr+0nL6N3IbpwQkUVgFN/GXzEKdTJ0QlBnJJhp8eNgLYN3TVnZcuFLVda8kToqplV+AtPUlNUytvF4WlXwXk9XOh8IXUu6o8r4ayRPz0XLcYoRu6kfWLcVlwzBBnXtRfpBqiVtibfzCec9u2mvd2JbNZJvBr7UFITqsKAfJHdd4sb5/E639XRdYH840jslWsp/gTpvp9G9M56tVraYCC106OXDB4ZBBOolJXwLWasVYsuaCdbz8fmJglL6utCRY2GtUbsNQ7vQ9T2mFmdo23grlUyN9Z7q9/5+4vXbV56eX/C7cdKU+E49tOTm0uX24KKZmW+SuVoK3OZlsbBuCzl7hlH/7K2K03jJskUdpBij/Ow2cjAFtiSMbRcUen7b9l/ma1umeFIqrDmztGjOdV25zEvLdVZl+vr6yvl84sOHj3S9xgFpU2a2sYYtRXof+PGnH/Ft4eM6uY8DIrHi7D2y0BhH3BKvr6+8fPqAt56UEyRNgL2XGcb07WBYV7brSh86/vk//ieWdeF0PtH3PaVot3O9zvRjz2W+sGeH7RvWIWW2pPl9H0IDo+kQM7l1GtYwDKNQ4kncpVoyS5MJHvZ7+i5wOV+0lD8+3kc+10Xa7pQzvQ+E0AxJGIzvKE0JZo0MYtKJF3KOjP0IXpeY6z0fP36U4iEoAtaUwrpFrpe5tfyudQzt0qdqFlwS8zwLdugcOFWhSsZycuvniLOBrvPS4dMCg1pFV3JuBYv4SwmpvLa0qRKvtjnFxZHSgS0tPA5Gv6NrmRnah6mG4zaCM9x/dj3I6h+ctzLfVdFjb0JXYzThSdT25xnWuAllYDxb2igl04cBYw3LtgjuaGiub4OzPc4VjbxKafTc5sAPA6lPvJ3PvDzKZ5CaFN0242jFYo3gldfrVYqUviN0GmH9PiBvgMuuux+W1jmsdcS4cr7MjNPAOE13fEPcNlyvwib4QOcD1UK6LBhTmecN7zs65znFxJevX3lIWhxPdtQ+Mislct024hLJORKGgXHw7WOUSCT0gTVtOBPAVppCl23dwHli3nANcFeLUB8GheNg5NnQUt6yHyec08I9JVkcjdEzsD8cNDZqk4qUKx7Iq7p8u98RjAEv71Q14PqO76fvMd7f93Q+9Hp3S+W6LlCrYoMNd1xJSYVf/v2vvJ9OVCpPj0+M4ySFWKcMditPAQaNeUuslJy5Xs94Ywmh10FuDEtUFHLXpgA3T05tbLzcZOnCggg7k3Kkmk4dU6soNIYeGirGyhCcEl/e3tntduz3O4IPTLsdw9DhndfSeVlWUk4c9ocW0qHbN5aEM52Y8cYQc2LbVs3tqxQrneuIObKtsQWYB0lnkXV/XWZcM9OklMkpSXppPWEQilzwt6LAl5KpWeMczU4TKUa6vmO3n+TMNnA6X+g66YJDFxinkf1hx7dvr439LrlmzhptxJLwXS+3aImQM1sufPv2jXVZ+OMf/wjG8P7+zsNhT997Pn36ROj61ppaYtzIS2J8GEUh3fSzXS4XhqEnpkRJEdM6hMfDgfM8c/rHidfXbwTfMYwTD0fhCWI0zMsCpnLYH4DauFnaG2w5kkvFYcmm7SSs4Xx+l37dew57EXW3lHg4PBD6XrLXdkh44ylNeaKxk4Zcy10Zppmo5KP67hX5aoltobbFDdOC5j9+/EihsK4L1XvJOdvOytt6b7nlm1GIvMFIXqsxOT70rOsmtcckxHO1jQ11lyXeDu8GxKsVI2eZgH2+klftErxzhE5GrdRc4nGLzYhWMQm6QRnUREMi01V5DYyUCpIU3xzSukP1HrR7I7VgH9sOeO11Gp/JVHLRHk4/fL3v4HIz0uWUqV2b0VvB+jRyM8QtYWpbstobNdew3x24Xq+UKtOhMfLFl0Zczs08NvmOw+MRb8M9qjKlSE6FaRzbmEod97KtrVqX8u75+YUQAuAIoaHBMXJQB6/vqY2PDruJOlYul8sdWbGlyP5w4M9dz3W+cn0/0XU9IcDlOoPVGWF2huusIlLGUHMftyQiNSWyoZnSNG1wXYcxDnIkpSLptTMcdgcFouXCMPaQqoxsbXzsc8TQtQLLKrkPjfrHYbhLYH2By/XM+/nEOAz4bSO0c+c6X+mCKnjamNpU05I6q5bWQNxUrYfQFKDc7z+2rCJmHCemhgm5XWg5CddjjXZqwWrXNnQd04ePpFJ4f3vHJIk6luuVnDMfv/9eiq0Gfcw5quu1VlHQpbA0Flfn++YgL7qAgeuysG2RZDOh77HV8vn1jfPpnfl6YRr+SD90DP0TIXSiupWb1rYpFSTd1EIq0FQpXrI5bx3J/g5AoyJZpRlI60a2Dmcb36b98gUwObW5ayE7S+eHxo2X3MrUpghp7fc07fUAVemuzc42GabkW4XK6gXoui3dpmnC+8Bxf+CWkW2MZLnWOnoTiHGl7zseH56xPhDnd94vF+K2MU4Tz09PfPv6leM04UKHa0vRUtVt5RixgDeS3Zay8fr6SkqZ6eHA+i7WfGmzYaqj9x0vzy/kUpivV67zmbfXd7rnZ0i3CtISvGNeV26BOKVhQhTQY3S6VkhxY1k3rJN5xlmngyQX9rvpnhS2xowPEEYt740J6oYqGG8InTABtornc8OcqEEXgyk3BZpt5ybONcRDaRWxFDPBOGwVwM9Z+/sl1ZbjBS3pjZHPIKdE3Da8c2wxkdaVflTs7O8Roa2LbQ7QG9b8BjkyFbo+QBbB8gZAC10gxdSMaXJH+6B8gZKEiryZ4RQHapuiTYW3bbknoKX0zVhtELkzN/GFd6apqyqmaPzU7ndijHLPB40VqeAmVbrynlShv6sq3T507RCzdxl6ypE+9Oym3f0/A20XgSVF7Uv2w4gfep78jhtw0jnxympXmcZRFWTLNd+WyBoXxdJ2PTZ4nl9eNOtuuzYoYh/V25hC3eC2Na/RON1T8eKyaQ819Lyf3og54QzMl6t+l5jxXp+zC+ric/FUGxTEZR05a2dRN3ml+rEHlFdiXWNHBceXv/0d1wUe9o+8nd748vUz//Tnf8bapuiLseEspLKTuEDqQo3SPaWqC8cErJcRzjdT6zJf6foBkyXOcPbCy4ePEt5s+j1D1zf1lZbL434iuE7ClhsBAnlO/vmf/4VlvuCcV6dxL3poQUyWvnUK3rkmfrBYH+irxnnbtmjv6zuwUZfAttEFcaHWlMm10lm5kTKGlCKD7+iDb7ktIh9/+fKZv//732QxMJU///nPPD09icvXnsdcC7smPS4l65JIccNYRwgBRyXVqiCboGVXihvrvEjm1lC69yS427KqaCTi2kNl9K2xrpGvX77QTRNd37f5sRbENgTxlJpSRC+1XshtXTV3tSPF1LtKCqcqxNTKh+dncq68v73x9vbOxw8v2GqYponzfKGrkjpmJH/DGFIt7EJQO14qu/0eYyCu4rNvMbHf73DBQZMtFiCuC9fLQvCOYRra61r/p6qvcno/8fXbK/vjkY3IZZ7pQwcOnl6e6YaB5TpjjLAEa3NedtY2+uXC+Xzi8fggtEIuzbyoB6+YQq4ateyb/b5WuUFrpSEHTBvn2abC0cdmWsWX4oYLHuJNBVNYY6QfBmLJwob4HmMqSSN7mQ0BQid3eNXlUNvvbr3yjI0BHxr8sepnvy3Hamn5GaA2uM1H623k4FyTeXq809owp9L2MJaYsn7uqtm0abPgagVzvF5m3t/f+f6HT2glLJ9LIAD2rpYyQbuJnACEj5EEUDuQkm+OgkqNCg5yzlJyIRhDNcoq0MXuWodjNA5xDoeqwbUdGO5/+gJM1cLeNOez/m39T281Z9dospBa4BPNO0ar7g03k2lP7w3W0STeIg7U1tF1QbN925bR2ikoKW4cJ1LesMbJOOVFGKhVEwXr2iIdo6xqBkyBJS387S9/5cOnj7w8P7PFRFw3hk5ZCCF4Pn3/fZMXO95O7xhjJbdMeo+C9cJtVfGhUozY3mCcMj+CkzIrxoQPDe5XBTQ0Fb779JFUsjxR28bb2xtvb9/48OGFZVnYYmTqd4pirtqtYgzxeqHv+/tFqE+z3FMxrbXsDkc6L6lviivX+cK8rDjvOR4PGj0ZwzAIi5GLchiUQKcEva4bsO1n9Z069P3xQUo0bpq2lsPRsrQV6qSiyBujd6MZ5Pb7HWkLrElZNH0vVzcuk3Lk0O24zhc+f/7K09MjwzhKWdkry6OgRXmtlZgS//2//Xc+f/ksyGfMHA97Pr584NOHD4TQ0XVK8cy3n8cGvOijI+smY5pv/ghnle41z3r5lm0jbivfffrE4eGJWw9ujFHmQr3Nv8CapmaIivH77sfvW3xnQzlEhYu7tNF3HRRIKerh7npyWsgp0k/7lnhn1LF4R01wupzlFciFt9OZ09s727ZhbQvxMYJT3R6IEIQxKMBht29z2ERFRsIffvyJuKzEXFiXhYfjAeNaN0Uhbxvz+cp1vjINI+O0o5RMjKlF/ymi8bffPvP6+o1xt5Nqo+ogckhJlWOklsJht2M/TffRhjGaAYcQeHp6vo/Kl2Ul14Q3oe0QNI6xnacvA+uysG4rNWrua8rtIVR6WsG0hZoOGWMNfTeQS2GbL/TDxGW+SIrbB2JSdvFNLmnaIRabos3oZIWm2KEJCDCV0If2PDRnKILLUTQqMvb3YJ9assJX0PLdWHBdIG4bxkSyFeiRFPUc7EZcq7QzunwcN9w4VOP4+vUz5/OF777/JA9IsKRYaPBSTNGYpglBqTlKB+VuMMNGB6apdrLaCouYVmSJLDrvyIk26sl3NdFtHl6MPDUhiHEkvtWtpGh5De3/FqDQthFcaUqwQjH5vvjVrsjcOzLxrrKWz03K6ltAmG3qJe/8nW2mkYaKIk0KanvPjsLKhw7XRCvWOcZp0BjM3y7t20iskHPhcjkTXgNPz88yMPYdeYtcT2e5kZ1hGPf3AgcnVEysFVf1LNzc5SC/AgjJEqy9d0JznAldj3dS2znjWdaZ/fFAXDe+vX7Be8thf+Dbt288PT9Rs4qKlAZqRWNu7yUlDx210audEyBS93dmmkbez1lqzGHU91Erp/d3LvOVqe/EXbLyiLlpwNjQCicj4U6F3uuMC82ndQtv8y2ILMWIrQXvOj0v7azVudimB0bvrwKLEn3L3nGNTJBS5LLOWGMkojDynnXe8f76SvAB23t8y2O3GXp/GzPN/PDdJ7rQcTq/g4Gnlw8MjR/1/PA7Lom2uK/O4m+uy64bJAnNkiTadqNbapufRXLOdH1D++bfK9RiDLZUqT2QK9CUSq6Z4HvGaVAlVwx2bG7EVQdKqW1RWmoDp4lYmLMMQubGvGkP3OVy5R+//YNppyXRNs+s68Lx4YHdNFGqmPNSYql1kqImqYoxGiVYa8C0eD9jefn4UXnDpWp+317lmBMpJoZRrHrRWV3jIVXiujCvG8M4cDgeyCgX3HoZz3799Tc+fnyhFhjHkWVZmbeN54cH8epTUqtsxN9njS3bRmMeAfnUtZk2ly8NKFeRCqILXgeZt5h0W36aVr3oqrOApzF+SsH5cI9sHfueaoRDp1E6TaFFWArfkE1pVXZtG1vRTKky4aSUtOTyGl0ZIxnxTVFhTKW2HG4wQsh3YJzBNUmsDzdMNdSasJ1j8CPgwBly3MhJ6V7lfuBXKJV1jcKn5HYcN6Vc3rLMZUXRsTc/h5aLumb08wq8Zp1eUmNvPgA9LzFGGU2LXvCYI94GWsmqkVJ7aJxzwmPXer8QbgUVzZ2sZz7dR4vGtLrLiAeVbcY72zoZr3vZVELvsW5o+x+5op1rB2xLh3HOMfQDfQelauTljKEbRPxcllW7mtISINuJ2XntB2otzMtKSpmw1xI9Jb1Hn777nuA71m2V+a22p8vI5FcLrPOCBeZ1wXsvIUnV3spWfQSlyb7NffmEFtZVz5j3jQ+WM8v5yjgObW+in+90uVCbQshiiIuCtlRUCq3yj99+I6fEH//0J4Lv2mTAtqLFUGuixMo07hj6SYVNzBIcAJ9++J7eB6b9jr6fKCVpJ1IEO82aVzY8Prg+QBL00LQv1RqddxI46Pu5iWNu562DJt6Q+tDUtl8phbfTmy7vYeC6LqzXC19++8puNzGEjpITwzDy/fc/cD6fGYbxTieuNyWosThr6HzHP/2Hf+EPf4wsm2KVbTvLbtEEpgEKjWtIHcBfzmdsu22Ntfc/3FQRQcdpj3VB8+lc2E+Tvk/rWpqSFiWpzahvN0euldBJG3yDBRqLdOV9wLrfLfXWGLpBeOAtRVzXMzU34E1tQ6vunPV8/PBJlXPrTLou8Pz0IBVjLGzLgu9aKlZtNMRUMUEBJtaaxpXxdJ1nXWb2o7Kthz7Qe0nbnDEyqaWIDYIfjtPIbakoA4zFOcO4G7Q83triLATeX9/4y7/9Kx9eXjAYpq7HPD5xPb+zxK05n01LR1NhE7pATplCobMBwk1xIwOVDoxMTImu69k3ZzdWMuJiwHrbzDbiPVknY9a8XNjt9gyhJ2IFVHSe0A/6fkqhuFbRlHK/EDIZqmCOYNpcv4okaSpbjGxNnmymG/kUHIbSOEe3Rd4tsObGcKIpg2qh5frmdmjrUXJGhq9aKjE2hZN1uGxwXjLSklMTGARMLS2yUeO3SgMjopdPWh/tqUotPByOrYOwGKsRlw8ee5sSVahO/h/Wre3+5WnQZdouAfN7ilq9dQW3HqJyv+RzVRd9U69VU9ps2+Bcw0AY8HiSU7RvNwSN2IzHB6nTvLWtKrdNKGLvqjQouIb2vy3gS2oZFc4RBqBW+tBrAeokRy3N7b5uiV///g9ePr5gjWdNq/I+rMe7jmma2JaNeV4IXUPglEJwgXm7UIrGWw+PD8RNE4Jb12m8uud5XXk47jHIHGecxSIYYAW6zrQutPDl7Rvf+Q/sj0cAQj/wxz/8gd9+U2Ru1wc9t8VQsnZL/dDRdT12JzVRqQUausQCxla2JWGrEvVyG58bo73gfhz4+YefgN+9MtbA2A9SRQnZdB8RudYml+aKd5i76VBsvExctBsdB2Sqi1FdnBHiJd4UEtaKFN0MqDULC9I5T9gf+fZ64tvbO/vDEWcd4wRd6NjtBCH17axSOqShu4VedaIue+/Z3ywPSIwki4Ml9B3r+wlbHG6QxNqnWiiLIkpDJ0nj5Xqh73qhao2VWsILfFcK1NqW3Dn+znFCubjWOeIWFUQfQlve6qB2PsiRWi1dkOxya47I3nuWOFOTIGegXUdJuc1VO1Uz1nPcd3qpjeXp6RHvdCGQMilGPHJ/5rbYK0ahIKPT71cQ+logNyl3LstC8B13ZssW5d/IhbFX0Lozht1u3xQxUiEN49hmyW3BWgrGOUpKDGPPp+++xzdlSK6Vse9ZVi25zucTh8OBse9J9aakqVznK2vcWNeFcZoaZkR4kFSSDhHvoVSic9BywvXhlnsV44NXIl6S0XFbIt7MhP2B4D12mthyovOSZbYJANVxR4pYRFottyW0gXlbcUDXD1hjCM6RvFVuRa0t27y20YFK5FpVJHhjtJhuB0tpnZ72FlDR91JbzrbghIkt64KorUvQB+6wVgE4h4cDpbm7Ta04bynOt5jRyhJ1abatCMY4xtBjMSRlOt2NfL+rnHKrBitbLmxxU+FjoVYVTbpMbh6J5j9prbppv0MsiWBltEqlsi2R7DNd3xHo7heSawdozJJuK5Crwf+MqmvnJVsNXhJP40TBdb4dgFbyaNfGOsZZvNWiT51labsoI0Xi9cpunPDOMW+RPnS8v1/4v//bfyNukeN/etQ+oDh6Y+kfDvpsWqqf856uCwJJZsEMLYVUzD3/2jXfSa1yf9fadlw4vK8KSGoKttJ2WbVWtrgAlU/PH+j7vhkGW5dhDceHhxYzoIjiaB2+m7U0dp6P331iCL2Cy4ppu0ztTi2GZV6I28Zh1wjX1hE6y2G3V7HSLg5rA6Vs/PVvv/D0+Mjj06OeoYZIcd61PRP3KALb8CGlVOXr1MputyfFyOvbK74pPffec7lchB8fOoKTdDsVfVeHaU/MmZoSNTjmbeWnH39gXdU5XeeZEDqGUXk6l7iyrLFl59z62HrH9eRSMA01UlJmmEZM0vQlhACl8Pn1lbJu/PTzTxrVj+MOYyA4yVa3mPj82z84THt4eqDvZR7yPiAMzu+Lvfuc1DjwVtP3XIlVhhe1l6qMSimKb2n7hWIa0/0GAUsRgyezKSXOBal6rGsjosTpdKJWUWIfHw5UWxnHHQ7J+mrT+9quw3nH17c3MJWn44Naf+dIVYlixUpJEWPkuN+r6m2Hm3AHUMvKr//4B99/+Mh0PDDe2tRyW8kUKYTCjtNJSo7SYlfBMI0TP/7wI7cQn5ozNTh2g0JYbq72DKzbinee8/nCL7/+IqwyBarh8eEBa+BtmSUF9lIYJTLBO9Imt7xp1uwtpwaFU+dh28z64fGBlGMzeqmOCC5QjMUXNDppUmSlx/nWBdyqZVU167yw3+91+DcXsWuvva1WS+8qh+tdcEBpn1kVrdN7qNqBiNhqf0cnCOBELlHMm+YWNkUMG+daZ8kttLFQEtLE24azsB3OinSrf19VYm0k3GmQTryA8pRpzCfjdFEYoZV9EFZBZGSNMqAHU+8AwNKECwaN3kKnVMBafo9SLcbijWFbF7Zt4TgcG0tL3ZkPXuOa2hz5DbzXdTJOuSATXtd1bYek7OjgFQBlvWvZ5/JAGOfE3rH6ZzJgjA6GXAoueGJKAkyGRClVqIqYsBQ+fvrE+/nCui6EoZcirQpaGJqy63pdSEk8K9cIwSFo32eNEbrHd/hB+yRvdPiv68qwn/SscZMZ6L9S0aI1JYWCTdNE9do1lBWWdWV33GHwWO9wsSFIsoq74/GhfdeFqR9akaHCNhWRBmqBYmG/n3j/snK9Xtg/PBKsZ1uXpj6Sd+nhcMQAKSX+r//r/+Cf//lfeHh6vI8HTQvIKqUItEjrxlKWizx0dEOPd4ok3ixNJq9LKJfCl89feP7wwmG3Ux5Pgy+Czttv374Blb7v2E07Ykz0w9CgiRCC1IKu8+zdgffTCVtVbFdqi0G2d/m5MYZt3Xh7f+fD7Uxr05/L+wln4LotvJ/PfHh+xlsnFyZtxlmcIGNzTuRvb3z82NG5oKo/bnJUW8O2RZnZmnpAWALRK4d+0q4hF6rXzFYyMy/sdyqYqHlZZ8SgmWNmNw6E4sFb5mVWWMm0J5fC6fTK9XwhJY03/D//Bx4e5Om4LeMqBbza55Qyx/1erV9zNJciE5wx8mWkhjsuB0kaU0qs80Kxld2gOf31OnOaZ8bDDoO9e0jWRtqkgu0CvpljXNcpvjVGaN6DNca7HpyawIlKudurKpuXmS/fvvD09MyWFr0I007wr6BbHyBui9zPpmOLWioqcU5jkiXnBuMqVGebKUquTYxh6Huozf1qhEevRqHu2Vp670ltzhOGoTm+1G0VUyCr0hv68Y5+d0Ji4p3c9rn+Ph6koRpM/T2kygBbjFRTWba17YmaaNBo3pyK8A/rpoRD7y29h+QVx2rQ6Esz3CadBkKv0VZAu4VSa3PzahRqreJtrdMuIseCzTdchH6fTCHgKVviuiyMBfogI5Lkqq5dDFqeZ9qfT5Emft3o+j1QFdjivdhmbeQ0jTsK0PdKYqu18ZSCSMsYiROcNzjfK6Fs6NvY0AjKZmg7EIUZuTa2M8046rsB75WMqCq3XXzVgpVyaegGSpRnyTlBBeMayTayOxz5T//pKFm1qYTqKM5Qk8F1VsmEDYCZt8Q8rw0x3gkjnprkIxXmolwVqZwS63ri/XTisT5gdntM1my+OO2naswarRX1lLkW/vVf/xVT4acff+R8vTJvCx8/fFCOQmnkZy/sDRambt8EGDdzrgooX7STNOid8b7j0/ffq0OpOodK0WdfMvz6t7+z/+cdLqiT/w//4Z/56cef7t+ZXqva9pga8Rkr6bg18H5Z1bEagzWO67xgrWEcRoopbJs6zJcPLxzGHVuKrTvV6HCLTQ7czJF919N5xT2XUiAVrLeKVTCG3lqwmcfDUeqlJLItFUwDO8okLsRKnFf+9f1feX75wMdnSaBd56XubOuAbVvxqVRcUY5xbRJK1yz4uWimXYMhbYXT6yt9m/EZY9Sem+Y4TpnUDkDvlEmdjcGm1Mw7QjS7G/2goTBS3CilMoQgF6nVIXA5nzidL/z0k1Q88/lKbIC74KXjvi2DHAbTML7e25vw6j4PVPZtJha5GYOzDbQlf8L57Z3BD+ymwNvlhAUe9opI/fmHHxh3I9sW9We3isV5wzKrY3JFD8q2CkBoQsOuVxluctKcfd1WQg1SHeUkTbUzfP36SkURmtMw8eHlA9N+0sK4SkhQqTw/PbKlzHK5tGAkx2oMrmqBXUqiG3dt+Spl041Cq6pZM3NnDAGNmKhGv4NRR2OrDFry8jZCMA6HJ5UNcmUYJ3IWoth0WlLfnomSN6xV9oBAyJBKYlkWjFO17LuWPe4CxlWc9e1B1kHsbKDWIuVOrwGRIHxSq+EcHgtZ+BS1+i10xpr7oUmuZFRZVwPZybudUtG8uAvihRWjpW9B0lkj7MrQDTjTwl+8pwxdy8LIrduQCzu3TivFKK1/U37d4I0pt7/PO3zv5U3wzVxo9L3fksp6ekxPk6qOisstmb5XSljNEZzHd6Fdvlk7gi2TozqdhcqHpwcMlpgrrlNa5K1Dtl5dgQuOrnZCmSTY7yYp55z8C8M0Mp9nhqcej1zythrO1zPbsuCcZ5p2xLhxvc463HxgmCZyyuz27bCukoeqQ2ga/Ao1tsx4I5OaKRVTFR9rvFDkn7/9g1//9gvjbsc//VPPxw8vbFuk5EroLM8vzxq9tJF3igk7tjCmUpijSKimeSJK0W6BalpGSsB1HXlLLNuMteBdT9cPgFEGg90RQs+//PO/kNDFap29d70Gg+18y/PRc1jQJYrViN6agi1VqYw5c9jv6CZPzJXjTjL8krPeRSzOON7nd4ah57g/tPEtUkGpLVTRkkTfzhS6PmCzJVudh7b5rNYYmaZRExwqFAW8rXFtuxZ1stu2KQPDBmLMDF2P9R4fnETpcdvQfSPNh5MEQ5vyahqFsRC8cMGlFFUh3rMsV+LWUAfVMDZPhKkKj7lerryf3nkphYfHJ1UwrX3Sf7XDtOQmfqn0Q4/zglnFTYtZ7zuOuz3Tbs84Dk191ZRPtVCMDHcKskmkKt3/Y1AriYXz+QpDQxPUqvyAbeP17ZVpHKQSCk2yVrNCzFOm66R8yLkQBunSx2lqP/3v0t5glcMckw6163LFGae/nxv219z10c449tNO/CnvqXXjw4v8H1+/fWPstYAz1pBrJDhL7jvB1pxn6PQgTPuJiZ12P+1y1KhhEJelxaNWtKAyrnlcEA45VQUX6TuPmFyotrJtEVyWXI/altftBZznpolXYE2pwNYOXJqsz5SmhhKoTyFCjlwaubN1gPWuejLNHS+TXs7qeGwx1GoJ1oBrHsRa7qqi2l58o/0kOZVGiNWLlYvkgNn+7qA2GEqs0rPjqUTJsVsBYnwQN6qNn8Z+ai/qTVtlmrtcrt4bIcC0hXnfaMcpZ6ahBxe0M+o0PvXGyrBZikazWIqzHA8D424n6W5DdN8AfIqb7OiHvo2kYOh6+l5KtPf3V75+/kzJWe9I1rirAtcYoVambiI2afg8L7y/nvDOcHx4UldaMxXb4JeReV2UP5Aza974x6+fuV7OjLsd33//Ha7rCKUIa2GFktHzZ5SJbStxeyN0fWNQoY7IKcL1JmoAi+vAJsg54n1gmVdC8HTO3uf/tVZC1zXiiHLmqQW7GxWQFpPS2LxGfPUW6VphXRYlw3Ud+/2kgKBqcd6wLjOlVol1MHz//fd67g0NP5MwppBSVuCRZjd33pHeFb1G3nU4E/F+YEuivXrrKWmTGrFqNHm5nPjw4ZMa79ty35omqjjoM3JOgU4ls5t+z0g0Rl20D17qvXVTRHSSIjV0gffLmV9/+TsfP33HbhwpuXDZFkUn58y02/GwF6pEZ73ozM+PT02gYfC2tcFd16MUSENnHLFmuqoXsZbMdZlJObOsM4ehZ42rogmN2sp7Epmz3KEKrYrfH/ZMR2mnv3z9rKSpcdRyfBxIufy+62iyWGc9rtNheng8MqwD/dBz2O3u80AF4vze1mO0BM21ELdE3wWKFfgLo9Agf8Mcg0yDFbxttvdlZRh6QvCkWtiWjd1hBzHp8rLmPnIyRodmKWqTv375yul84uXTd6zrSt8F/V4xU1yrNFMSy6kt325I5v1+rxezSUxzhde3V16/fYXjA6fTmeenJxl0jNrGm4P4dtnmnLUz2KTfd9ZpSWugxkr1bSGfsi6clDlf5zYrHQhNV11LJhhLdZU1KdQm3MZH2ihCVVp05wNLTFKMOS8GTrCNrSPZbWo46y5YVAQ7jHeErKovWkmK71LQKhWXt0ptK9UKeW1psuqKS0Yky9uuJP+eOSHgtg7PG7dHazFD8VUy7M5C1fw/lYJHSYBGZnR5MbDYlukeU9uVmKaMMcLZq0s1lKIDKHQdDksI2ofgDHXLSnXzvbIurDhDxRp81zW1lRbOAu95ht1IF9SBxBipXmyzwfX0w0DnPZ2Ty5wiPlZKiRgzT4+PeO/5+vpFleDopM6BxmWL8hPVgjdB3ofribGfKEXsLIejoD1F6DtqFn7eGMuXr1/59vpN/hxr7s9H1wlJ4Z1ljpXT6Z0udByOWswfDw/gLNYFdRc3P8At5CZ4XfJIfl+aTPbT8zMueKa+ows92xaJKdKbAWcD6nM1V3DGYL3l7e2NdV0Z+57944OUO+Y2i195fXsjWKH8x7GT4sfocnetCKq1cDgcm/CmQt+zbguduynJHLEUqaPQn92FwLKpcBqn4S5FtVUmwpskvOsHShaSY7fTbkpXpM6+0ozJGEuwkpKfTyedpeOod6PeDMvCu2BFyA7VKeCrFHKWO77vB95eX1kWpXY6p7yPB/vAcXeQVLzFPitptDbXfeV0veBjznS28earXuwuBEoW40QRhpXgHL9+/abc10cjjnlrX/qxl3oiJaoxzbxhWt4t+NDRhY7z5cTbt3f2hwP7w0FSsqSFoHFOrtzbBgtJP60xkucOEza4hgqudy0/xuA630gNwohXaxl74Z8727HmjW9fX/n5xx+ZdlrEUwod4ELAW8tuGHFOrtPz+czhuCduG++v6Y5FsE1XTFM3lQLX84kwDFgMp9OJ3fGBEDqNBqhM+12rbFIL9LjlgP8e8+mCU5VrDGvaKFnE02m3Z5xGfvv1V659YBgEY3Te443h7fWtVelWih7rqBprN5e2Lt1aGm6heSBCCLzO7/zlL3+hGwI//fCT5p4AxZFyJhfH0DlSkDIjt7ln4nZQGvzQUbaGmqeybokYo+SH7uY/kEw2bYlYkxLjisZjMSaldd08Bu1gMG15YVzr9jL3rAZ3w4ZYR2esAqFo71QRWqH62+hKgUdO8zpcBhskPLiNiHzbmYEuJ4zDV0uq6jDF4tdnq32czHO2SMVXmmqLWnE+tN1IUDjMtrHFxG4c6YaAdaFp0YV18d4zzwvWD3jaCwAAUdJJREFUwW6/b6ZMeyd82qZiK0UU227QEj03ObBGto68JX755Ve+fP7C9z/8wE8//sQwTZJDusCW4+9elSYQkE9BwoZpd+Rxt9duso0kz6crMUXGUZkbxVacs4zTSD8MVCr74/H+OccYdVl3Ciza7w86+KzUZtb7+7MICJfR5unv5zMfn18ojakWfGCbZ0x27B+OjLudlFhxxTl5uiTVqNjb79IuW9Oo06Vktip5uME0kqxj2k14625JqlBk6Ot8x/HhAdCyP6Wm+quV6hzeGCw9WNvy3CskXfY364D1DrOJGZa6TNcFapKx8nZZTNOoBM+iZ2A8jOry28+9Rv28XRdad+05XU4sceXD8wvWS4G0xYTHYUxhTZuyqduzYrWAI2eBRY8PR759eyXlyDAMTNZqZFcKqeY7wry20a1vK4dlWXj99oo3pWhQW9XygqEbB3zymp2n2DoNfYj7pwfdOk2hZEohhAHvAuu6kKKqT9v8DYZKjBu3mNQ//elPTTrmpZqpv7dr0ZTm2FZlVaqInSxaPndUqtUvoyQ5I8hbTqSqF/7t9E4InunpAzGtYKBzoRngahsV3GbAjs45fN8rp7cdvHoZDWuKnF/P7FprhjX4YDG1haNvkW9v7/jLmf3jgT/+4Y8UaygpkVJm2yJfvn7BVHh+fla1l+Nd5mbbw0iV/NEDnfOYzvMYOsWLxkg/TryfLuynPWvcOM+iuhpjGMaJuG1ctyvZ3tAMGucYKxyJDzo1b5RfY9qFnhPbtTBfL+ynqTHmY9PgZ833kyPWqAfWWsLNAKlznDAIUTKfr8S43S/3elt2V9VI1WjJfL3ODKFjGHuCt5L+FmEXCrld/NxHl9ZYis3QOjYXLBRLihGMDnuhGyQxdc5j2lgs4FhTq5yNJRNx1RCzxkVdcBRMw3uogvVO9GBnBTe8mT2tdfq7/yegX2oz6ePuoNhSa6lRLKKYIilnSirN9TtCA+JZY1QwVIln53lm6DvBJ41klaZUjDeYZHDOctsd9l4V8KVIHlor/PLrZ/7tf/wby7LydjoxjCO7nXw/xknC6YwH47AxkmJk2xKmN/T9yH6yeO9aNamvLMbYjHg3zAqQ9RmZanh4fORhfyDFjHXN5+L1zuYG3ZMEWJ3xffTYeqdajH5/53BGu6iUNs6nM7v9ntAHcixcr1dC6NrPHJmmAecs87bRD7JhltI6EKtLYphGXCdvVy2mjVILoZeEfbcTELPW1HZ5yq0Yhr6NjnWRSZV4E2TQCtkmxjBgOycaccPj9F2Ps4brPPP2/srD8ZnDbic+2RYxVELoqMYw9IMydHImpo2+H6UAc64VkBrB336Wl4dHjbeLdkt5W8EqkOz8+t6UeDI5+9C1C0NKtsvlyvn0zg8//KDOtKr4mPqRmovwO65hbkqmNqHP++nUnPrO3dsig4xIve8ozvPt2ze5Gntx0D9+/52cvCmpgq9NkVKE7u2HEWtv6IYmC6xNFVIEWttyYp1nHh6fdMoYmqGsmbNaglXfy9X5/vYNg+Hx6QMYWOar5oHekEsiXSLn04Wnl0e871m3mePDJ3KO5FrYDzsqlT/8+LPUK2vCBkvXqb1dtwjOtrlrj/GWp+ODfAyXC/3YUzsLUZdTjumOKTYGHg4HrsvCl1+/ME4DKVeuacVUUVIv1wun81nZC63cybbcZXXWAk5jFCmVFGjzdnoXhAt4OB54e3vDO8fbeeVyOjNOE8M4NgT4bfylz3pNG947sfGtI4SuKXtqQ4XA4bDn4eFB7WTQUjWlzLpFkrfEuDJNe4qtxCW25WbGBFXJ67YpYL6o8k9kbJAc1rQdR2luducNg1VqYIoJ45scz1hCO+S3bSH41gbR1E7tkKZV36adYNaAD0EejuwoKVJcEYjMy4fBje+ERonBA1USaH13DTuQS0sibMqwasA5+uAVH4mhOoszks6mZszLxdE57Ueql4fAe4fr29jMWZY1sT/uZHpqL3+1RpGgXdB7F4KYaV1PCO4+1fPBczkpInPaTVIIrglnZfTrnCVXmp5engvQmPVyPpG2yLQfmbqdTFweOXRNkGmVjS1vd95Xqq0lqppLPzwcRGaut9c5N7u0dg2Px0dFh7bx5e0yEQ7YtkM4q1gopV18uihq1bhU+7DA8/MHrLNYW+mnXuOQfuCaLpyvM48PPUM/YcpVl1UtzJcLfQhKY6T5WazEK0M/UMuMD1L21KTkyVJpSYg3NH7zONRCjMqqGEdFK998Jrogm0gmpXskacUoxdNYLm/vxJhwDw+UVPn822f+8dtnQvgLf/6nf+LPf/qTitSm9tMUWIl8xohv5lrxYa0lx0gqlc6PFGMYpx3W0OjZDUVTJAjZTQceDkfmddVlaY34a8FjrOO3L19IKfHd9z+Qa+W//49/xVnHf/inf1K3sC2Srfsm7olycKc18vnzZ477A7cVI3CL+GvdhAu8vHyg1ETKmbhFrIUtbZpXVWmNSZJbmlK5c4javKC0xehtUxhLhFyYt41dSi1cvf1zsUhWSSVvwtzWUhoWo9LNM0PJnE4nXOjojCHFzNevX1iXlcPxgJtumQKW6zZLh26NrO9GbJ21ZnrjGkNHTZSplX/841c+vXzg6emJab9na/mzQyeVg+/ULl8vyrJ2ncxs3lr2w6DwDxdY0yKDWeiIpbBdV7Z5IW7SMbvqtBy3rUqxTrd5wwY75wTFqzB0gcFLfTGMcrp7Hzg8HOl1w1OK8oVDCCKYOkeK4vFIxVKwwYGxbNcLYRyhKpLS/JPj25evPOwFFFyTkNApZlKUCmzbCvO88OH5ue0XMtsapTBrbaqx5s7Euj1NcduouWKCpIzF3qhJ7i6VFdix3HMnbiw7CRIk56RIWyUh/e3EkkQySxxG9bYtwCFtmW5o+wNj74Yz2xzRtRrWEnXYGoftbOM0iTbqrKO7xZ1mjZl0AMG2LaQ1KYCqKtjIWtNw680eZhXo03UdadsIITAM4z0n5ZY5XdD83PvAONCEAYWMCiHfdfzj11/ph4FhGNhNA6ZJZmPctNeyjmXZ+PjxIw+HB5Z54e39jQx8+fob1n/Hbj/dpb3UQHCwVSHCY4zCobQMkLtvBNd2dTdvRGlKHslfv/vheyGmjcV2Rh6VLJRFvQUptcvXe8cWtUzGGhUCbe5urJY8N/GBMSI41yr8tXGezrUxHxU/jHdUST8oQ/u8nint76IttY2V+MHdlsrWqhg1ijag+YRyk7vaYtlylH9j7JTOmAomxSaZ1w4gl9p8X4YQXDOc9vh+gLpwC8ryXlke58uFbV11+DvtbqtrqH5rGYL2m/SCY1rU5dL2EBkgRRk+iyKJg+94PBw1DWjjJD/0jM7dF803RWeulcNR+R6Pxwf+8fkLf//73/n5Z8l4axNLxJJw0dANGqd5r+f5p59/Vkqm3kpzG8tKeuhv0lGZgpblxGg1jd62ja4TwTDnzNYS53yQVLHminEduCKXoDV3XbW1Dtt5Pn2YWq1oidvKti6/zzhvt+GmA+v5+MSyRrXfxvDh4ydKyaRYWLetLXEVgAPw3U8/6INK6m5iTtLAI7Kks7qYUi067IPX+KEdMs56/d3W8PT4pKVqoTmDRQQ16M9PObFmuZ2fX15Y54V41kWYq/Y43//wPT/7n5sZjOa6TS21rDY6KQRnNYuNG+fXV43JQsC0hDrnHG/v70zTSJc7/ezWsqXC2GnRWUshVxl1bjkAtuolzC6TUiTYvS7eHHk4PnA87FrGR6bmyjQM2MmS6w68pQ+ex8OeLnQEn9myZxylgpqvV3ZNnleK3NDG6MDxfY/3+dYqYnPzKjR5NU1dlqyFnOWMNTSEhmmLauVTxJT0PTTcscVRnELujXOKm6xtT+J+z1THGkY/Cl1eEjmZJtXVqDEEqeuWy1W4ihCaPlyIDqk99PRIsdVBToROUbej8VSrpXK1le26yvzpNdv/4ccfG+Qy0nUDfdex5dyos5WSioyf1kpjXyolbmxrZL4KcHc8HkgpMV9mDocHrDPM66y5unWEYAkhcNjtcc6zbgtUw7/95d9xDaUTnKN3gZwjayqk1N4bHyhGKptSKr7zBBeozrSFf9WYitiECMrTtkYpiNWWVtSsYvxYS6mJdVm5LgvH3Z79ca8JnWk4Fx9UIKB9ozOOHEU09V6FTqmZUjR6sdP+/t1Sqqo66wAl5H35+pnr+cqf//xP3DLKKdCNgvzdxoV9K6IASlYhq0GG/AgpboKQZss6z/zjl1/xQT6Kvu/I1VBLJBtH3oS8uF4v9EOm7wLj8UjOUkP+9Mc/cN0WQvB8//PPbDlhnSMbeXi2Kn/OTaShd0aWgq4LUgv2VeOwnJljbNy3jeQLL4+PDP0gU16t2jfEyPPTEzYIjhmzEEfTMIIVhubxcOB/+1//Nx6ORxVcWZ9NsOrKT6d3lnnhxx9+wqCzYFlmGW1vc1bT7PrKDtZHWEoWm8Q7zpcTcWv27da29/2gQJ5q2zJBLZMAWNJud6EjxcQ4dg0caLGmEtPG6f2dlArjbrhrqm9OVO89xYm+aY29p7yJgdPCcTo5xWU6o+nOtUPJVUucVDOsia+vb1hjeXoQAbO2ZLViBLfzXUcuiXnWoUQIBB9YLjNb3Dg8PtC5gHWGdd3IMUJDWN9UGt5avp1O5JgZ2t5AHZZtbaZ2JClpLLdFGfp2fiBtG7/++gvzvPDwcERZw65BywrztjIQWJcG5+oU2J5K0WgpwHWRNyIEtdWS0+nAnXZa/nsL82WDuooQ6j3BVx6fajtkLSlv2kOEjgcfZMQ2hlAi1Xod1tYo46FUUlyJUXGYwQW8geLlBK2l5Uw082Ep7VKxAo8VgxbOhhZXqWfPeL1IaYnUahh618Yj4KrHWVWFNwOSOpMkDWwVvdV433AzDbRmpD6yxt7HrLuDUCvFGBm/EJLDG0s1mrM7d8t80M/mncOGgCvl7ooeu4FtXvD+9z/bOcsyZ4bRyblcajNuZmFtWjJf53S5bDkSt4VUC7YBLTsf+TrPGOc47mTALA2gaZyjYJnXlc6rCAo+8MNPP5JTIqeibgvYUuF8PilS1zqeXp5FAbBGI44lkbzUZiH06gpLucueSzHyiRhFgGZ+p4V2Tt1qcKF1ODO//uNXeUOap2e5KKDrBuozreu/LBfmeWa33zEMk/7MmqEKTVKaJPmmaLDGCLiYNvqu519/+x98/8P37A4jqZl0q9EZY4qyTJxVV72tm/Knx/EuwLgRBayXC31ZFj6/v5K2xMPjc8PGWKb9gXVZySkp5KyM2qk4T3B61pOB3WHHf/nf/wvVGKbQkWkjeVXk2ArLfOG9GWJ9U3Z1wdOFAdeW1rVaet+xbZGaMvvDnqG9i13f0w2DfBcp0w8jvuE2bl0rRZ1GsJZl3eiGnpcPLyqIc2aJG6fzO9M4sdvtsPNCjCu1FrZt43K5KDcFaltY65J2bdx0Y+44U7VraC/nNA6E0N0PLhDSoyBX9b2Ky1rS+DaCCqHTD10z1kGpVpm0zjMECNbdw7nJBd/3mJqZrxfeX8/sDw/sj3usKdRiW7arY7/f3fkoPtvfQzuskemkQo7bHQVhmmS2lkx/fCCXwjRNxJZed4Ou1Vr59u0buyQOSt91OKu4w2XemqMaapPr1SaTtN7z5ctXUiocHvas6yojC9wPjlJvuviW6ZAK17zy7dtXvn39CqDfq+pimIYB6xzPj49akIW+VUTNjNRMgZ2x+Ar9OOKs432Lbd5exd/Z7RsDJ3O5nKmp4q3HBB18UnFpYOQyxC2xRi3/a0OIdJ1gaL2XHNMZy1ojMWW8l/JEl0DBWxnEcPVOl7TGYkrSd28k+6ylknNTupWE68a7Zpy2jA7BNdmkvqOCciRMkg+hH7TDyrGALcQigmiZZz62yM5i2wXhAlTlNDsjRd111Q5lfHi6q5ayEdhw7DoqUqAM/SjGTgOkLdsmr4O19L0CdRRta5mXha7rOByP9H2Ps47QqdMRy0ih9DFCDJ5p6FoIUKArlV3fcZmvzOvCdb4CRiPbUrDBY7GkmvRi50w2HcZ4sZ+spTQIJsY0oxaMu5EQO/kHciFtQlXYwZC2jdRMkgIQWv1nq20TAttYXpZUNE7M1mGqFZp93QjdRLWKufXdG9sWGacRg2FovCJrhTmpNZKSPo8QvEaRVgbZXGhyehV+SlhsEw5j2OaFLUd2w8SHDx90cSUFndlaNbRvP3870jBFiYtbKWzXhXE/Ya1lGgfK0GMRMsPtD/z8w48sy4ZrzDo/eElsnSEEyZDDvnXF6O/1IXC+XLDeUluWRwlGy+DaFuKl3jHhy+nUUupsG/hr//Dy9HxXe6aSCUF5EkM/MnadIJbl9yC4p8fHhvuWUtQZ4XbMYH6XtlJZrjOH/QFr4evpnbfXd2JcieuMqZndbrxnbthq7imLDS5sGtGwdXRGpq/ceODOS4lx2B3u6ocaMzFHnHfk6n5fQts2QoobQz9qbq4VJFvSbiDnQtd39OPYoHCFbhCQKl4v5Jx56IUiGIaJ7rsdfRfkoLJGOAM6vPMMfXc3guUqOqp3WgAqGczhbCCMPdNompZbF9K6rs39uL8nM21RWbpbk416Yxi7nuwK67qxxZW//u0XtnXhTz//CWe0J9hyIq/qDqZparhgwzxfscZIjdQ6s1wbH8kIs4ATv+Xb16/QFlX7aSKEjrfLBYrgYC5YctzuHYu+fDCh4b2tZZpG7nStthMKvXLCTZEnxIfAbtqrxe9UOada7rkFpmpkuBF5P594Pj7S7UZVncYwBKjOtIICSJLshaDL9DovxG0ljDtCuOlZtHjznadk7RAwopZWahuLaNyTUiIVYV5CF3g4PkrJlOWNcEb7Aosl2o1+7Aljx/lylrKugima/adNXUg/dPik/ASdm66Z6HSoOJPwgzTw611UKcJnN3TaZ2MYJ0k6sXK0Pgzte23guGGa7svRQ98ROo+RSYGaKzZokbslVYeuSaFj0pK39wNPD88s6ywTp++pNeJ9p92GccqbqLAV7YacMWTr1OHVRIqFL99emfqRofMUJ/RLP4w46ynWsJtUhVbTDv9iCEE59rFRELw3gMNaAedKXFlKwfsOFwKpATXfL+8cd0cZxJaFrh9IJfHy9NKUjUJin95PMr71A5fLCWsdwzAQuoHgJa6oWbsErOoLECpD54tc+ljHWjLkjOk7/vDjzzp0V6GCcEoKlBmxYjFkY3DBSZ2XFeh0G016K2WcsfLOrLnw+PgsSqvTfqszmpT0veb2oijoMjLGYFFI0OA75tNF77b3+CCRj2n/WlMmNLXobr8XO2ub73vbm8gkZXHggvP4zrG2c4kGCzUI3y4kUTuv2+i63YmklMgxSlrdPGyXZWEYOmJMnM/veO9YN7hGRawaZ2T+a6TsbY342ma8N+mXMzJclSouTPABUw3zLFKhD+5u5wbownBfbDmcFsSNcX9LTXNtoWJam22QOxlg2zacbTsRC/tpT6ayLDOh6xj6oY1pKvO2NsQ49MHLuVszrs24b8C3GBOX60UmtdTGIhiy0ZyztoOmWmnSt5LpcVQjF3mxht04Mg3PojpWzb+/fPmGdXK4Wmv4/O0LIXR8/PiRvETe3t+VbDdN9N3A6fzOscWwmlKpRlVQsJ7X0zfNqXtJSEvODMPAMIx8+PACTar6sD+yxU3gQIyMXDXpMXAWU7Twz6WQEHTOtu9y93DQSM45qFF5E81E9vT4KExCaelvqem8a2WLmVJVgUy7PYeHB0opLFX0VzuOcrVTOJ0vLMuVl+cXaRCxTOPIEPqWACfkuLHKb3bOCufiPZnK5Xzm/f2MA/b7EdPovn2nPUPXOERxEx+oFtPiRtWq97bH9U6YERyVlVq9goBw0Bv6rqNzPaYTYfZ0PXPoB1xTW9ku8DQNv7trrcVTKY3B3/sO6z2ZJMZ+Azl643Be3KO6VSiF/X4vAOFNPouheC3EY1ReuXFNrJATeGWBzNeFXT/hgufx+YmSj7y9vXPLe7nJnUvNpOKgGNZ1VSRqteS8iUbsHMZ4Rt9TDbzPM701QrQvC74fiXFmvhZcJyTH2A/kkomb9pDS2TeYodPuItWFeW7Z2Fm/t7WZWDPzZaH3HeM48vnrN14eH0UbtbaZ2Byv377y9n7ieNjR9wP7nVSHoevIsWC8a2ayTHUoq6MlGK7rjDWW/eGgMXABXyFWS46Zab+n63vp/nPGFdP2PvouC7DNV3lQQmikB+Gzq9U50GRTFITYGYaOuK3kXDlMPcY1/0PJ9N5TmuDmNs5NKbNzHbvdRDxluiCsu3LWM+MgmnPdJEG2zjH1A4yGdQ2S+nb9vcBMTW13Y+KFoDGeKkAD1lLjpjHdNCnbonlGghVyXXy11rnmih0CNQpqejjsyemF17dXFTSDCvY4r3ROXfn1ciXlhFfQTmGJm1zOSHXi2xzOWWnsb7F8N0hebQeyDpWVYRg1emnzPW8dqSYxWYwInMZavNGicl1naDpq29yjAQvB40tiq7e2tunOc5aLund8/frKfr9ntCNY00KPDFi5P6PJLTdh0OFqfKOSttmjazfytjGvC3Geefn0seELKuu2qV0L6kaME+U250jwPZ+++0QphS9fvnI8HonbxrpsfPntM1/e39ntdhyPltf397uRqOaFZVsYx4lcKq/v7/zx58c7zuTlwwcxr5yDkjldrlpYhyASZhsbWFvvn3/wjpQKW2yGmFIxQQ98Kpo7p9JS4rJ+f1NVfeSq2NGhyjBEMHTOtZnrQoqRSuXl+Zlu6JnnmeU6048d+/FAqqJ0/t//9f/DnCKfPn0nA5+1lCiGkuIPtRgsuagbtMJxb2skNYTI4+OBXb+n1HSPfN3t9wL9NSns+7zgvWM8HBTxmGx7CYTVrsYw7Sccu2YadO0y9XShI+cqtEoqDKGTQStFghEiX+quhK2G3TRS226s60Lz8ThctTSl8T3EXpJchx1s05/rH9hioiR1yN5KCZZKhlg5DEdyn3jPiZzr3bey5I2h+RWcdxyPBznnbzHCtVJiy/YIDl8CSkqSC7+0+bPzjt3DgbhGluuV3A/0fWjy1Mr1fOZSCx9eXuQVyTd6cWBZF97e3piGkUxlmnb4vsdFjYRMGx3WllfQ9yMfP36Q/t7AeFvqDxMxJtoqjt00NdVfAGtacZluEhslOTrJl/19oWsgqaspSCBSym0sWcg5UpIh9IX97jbH1zjZGEO2lX70d0VlSonH44Eu9CJXG+FKTu8nrtcLwzgwTTudD83fk9KKc0dKqfz1b38lb4k//8t/0Ggqxvs/G7dI7XochsM0EawTCLUU3t5ngnX0w0Dx7j7uvs4z47RjGgb6XoFQoAtOewWN/Ev9Pe+9Iil+aLs80VsLrqhbDy5oV1Uqwfl7mJG1Bm8V3xBzZOwGfvj+B7quw1nDfn+QudXr+ylUpv2eHBO+GjidTvz7X/5CqfC4P/DHP/8JYx3kKN6OU9tPrZrTItmgtU01tCqHONwXe3Jim2xkkiqFUsQxMW1+Z6xv/grN6FyR9Isinn83jiIhLhtm7Mml0vW9FkVYti1BnhnHUSYnoOn2iOuqlLMqJYa1jnme5SDdTwQbWhxlQwBYS4wb52Vm6jopl9ImtYYt1G1lciNPLx80t02q5D+8fABnSZuS6XwXCMG2lk9jhJozaVupVaMsjMUGz8dPH/XFO12czuhCwiiTexoGxYbmQinbnXOUMPiuZUM7Bzljvbgwt6V/SpGaM/9z/nbXB6a2MCwpcjlfFXTvrSqrIsVJzkXQxSqyZQ0C7fVDz/64b9p+RyiWlCKn84lP333H0IvJY61hbcoJ12jBMSVhq4Gu7UjWy0zcVp4eHhv+OpNWJygatOr55t8xuEb3nTot87PT752LF3LaWp4fHshUhq5viqbC6e2ED460tn2Ig2k34pzjcjrT992dABxahnbfZMK2QSNrzRgnJYxt1alvKPyb6sYZgQlNm9tuW+SyXCm1SjDgAtO4Y5mvAPTjQL+tAkc6Szdq5JKbE7rWCnfQZtWuxjlln5wvTPsdtRph0fuAC45tnvFNLOGdZyMy7vYcpuk+vpA3QFJxGf89uY3XSspcz2f+z//f/8Ef//SnFjqkDok2R98PA7VaRXYiodG0G6FOulgfmpvZgAsWazQKHXcT/TBSSVKDUbHWk1IkblHerKJFeWnycGN00IXdgVxFBL7OFwoymW7XFWMND804ELMmAXqeKjllXTjOczwcqFkRy2tZ8cOEwfDt/Y1ffvl7w3r3/PM//5OUXY23JSCf6L5vr9+4BTsZKz5XvqXjWdNkvQ5vrM64WulCx/PLY3uOLY7CNAyauGAIXhGto3dc55ktb4zdCF6O7GKaNLmIjtx1Ha7K1+WcYz9OrRCWYqtW0Q1qgM4HHJl12zjPF43UNbVj3mY634mz5y1tvYy3vnU1HQ/7AzFF/Lat/PLLryzbend7WiPY1eV0IvSBcTroDisRHzpyzYxNt6/Q8sh1Wfjw8kGLSDQTTG3emtLGfDlj7Ew/yIk5DpNufqsDoVZaNGigltxUE4aUMl1uM/imd989HMktZpDNMfmR1DC7vbH3X3LoBoL1rHHj67dvyr4eB4prS9hxYDATIYQWmfrO+PzCYX/QwrdWcpQiY5kXpgb0u6lpQFCteZ4xDvb7Pbv9Aeccy7zSeU8NgbxciVlyYJxhN+rvvF5n1rhx3O11kVoFxNhq2WrG4chWL6l0z6oQrPU4r5GG7To6VE34YtuLkgVZQx6DWDJjGNrPrsxs51zba9jblAhvPN5vlKTZfzf0TXZpmS9yEJsKNlfw0rz/4U9/5vHpiS2LS2UxLN/eGHcDwQfiumo23MZgru2+xrHn6fGoTIOSZPQqlcF2YOFyucqtHBS4czNvWevamEZ67s5IXn2rmNZ5wbVxZ74uGKMM4NobpaK16FphqMS+ciHo8wudqvYqk5ouH2n/TdusFGqja2ZMKYzjjq1EUky4oWU4W8/x+ECMSh8zyBtjMYRuICZhMvb7g7qp4OhcR6ZikiznN8XNVmVIvF7P7HfHdkCpOg8htBAnLaZLloseq649DB0hB8CSsxRxUHl4eGrjC7TcbSj66tT17h+O2M5xOB4pucgj5awk5zhMmy4Yp0uA5j3YlkUwyqbo885zY5zN1wv9MBBc3zaUEqtkk5SRvWVV084Rc8WYpIiCktuIyet9ixvOB7p+YJkFHdWtCiVuXE4nHp+fm1Nfv5cHZVz7Qt0Sy3lhCDIvWmPoTMBN6sTW65Xj8VHxtN4rAM1YrHN8+PARWjEn340h0faLRbuTnCPGQLDiYmFaprbRRSbfjUK9zG0yYPQ8eufJ20oNWu7fdjE3YGfJGnsXW3l7e2e33zWWlG/UAXUaJRftAq0hV0c/jhyxfHt75bg/EPqemhLztmgM7jpi3rBVBIiv317lXB+0U/Y5ZQ6HPU/PT0zTxGG3J5fM+f2k9ts5Xr99aWCqQPAb52Xh+4+f9KBUHbxsGzlrRBGbicg5vRzB98xc2y2mhefNbm5zRUYEtUs0Y5lpH04fAl+/fMaHnsPuQDc0lk5oLXyKpCijWx/UZTw9i2ZpgGL053WhYzyMhEawNSh9bmvqq77vGZq3oOv6Jnt0aicxXC5nrDOMw9RQGpWtFH777TOvr195fz9zfDjy8vKBbdsESBy1r6l1wOfM0pZIzjlS1IJ9y6Z1a46aE7Va1m0llSxUNfJc5Kz871xLq9RMkyHrInZGo55gDZWO7X6YGGzWwj44p8UhYIMnbVuz4hcp0ZzGK7eLzvdyBtcqE+XldFZ1GTSi8qHjn/78R17fTpAlN7bWcbqcmPYToR/oc5E02bWuxlqGEKSiMJZyvUo10ubXad3wvVQ+c5nxgyeviX4aFJ6TMzfcw3KZGaaeru8080+JabdTsEyFfpwIvqcfBy13qZheljVTYX846OegSGqbhOOoVuBFezPIOS1AU1LO92AD23yVwsnpkMnOEePGOq9UZzm0PJDdTuMLg3YpnZHKLaXIljLTpNjbkjMpakc09IGcDGtciGuScmteCb2wJ4f9kVQiOYLtQ4P2yROxzAsGmUit9OgYY5TBYiylsdI6b8hVm5xUbzZFy/H4wH/+z/+FkhPLfNWl13m8dbw8PHGeFy5vrzw9P4GxrPOGdZZh6Im5YFNhGKSMujGdas2cThfe3k48vzwzDEOTtKoViTFyfrtweDzwcHwimEpsopPaip7Q/EKH47GN+CzbuIhf1SnvBiyHw4NMa01oQJViyVo5L2uBbhhk3rTw+PAAJfF+OhO3RCqVeb4yzzMfXl5UXrddx8ePn6hUBWFNbbKypTvn639mJ7nOU71hnVfGybFer6RaedodWOIm5WhjdGWkzuz7TkbbIhl7Ntqy1lp0nlrXEOUw9h22Vi7XK9M4tN2PlSAjtPFpXPWzBM84jlyXmVwrg5HyzafMebmwxo3dOGK6Bh/tPMt1Yb5c2e1G/DhpMbjNK7txkuRrk6Hu49MDwXfM64XX93ec91y3DVMrychJm4rmkkM3sKbIL7/8nRhXfvrpRy1sW2fSj4NmgUEUzHpn4Jj7zRpCJ0mhqfTjSPCOdau8n8/0YWMYejrTKYuhaCbovSR/NRfcNCpLwWiul1rQ+RAC/fOjDuLSwsFBY6leI4LQ5r4Z+O3zF66XC7vdxMdPHxXD2PXCSOfUXJ3w/n7it9/+wflylhPbOeLxAazl7fWtMZMMoesZgycsi0ictdzNhWPoWyiK5tkFWuhO4+kkOK0LRx8k721Viwt6ES+XEzkVqZpyZOg6vO+pJd8hf9YqmN0527wIwhgscaP3QZV2gw2O3UB46SimUnO9K9MOxwec8+x3B0Ln2WLEJrDOizQZpJLyPvD9Dz/do2/7vlNyFvK29P7WDUTN28cRrKdS8KXlKIeASRtL8gyhIzp7l5z6dliA4Zdf/kG4Wn7+WWZFcYRuVCnxv0LftRyQ5uUBghWraZxGCob5qlb8eFQC4PlyYVlmpmlH6GSuVKZER3aikA5Dr2VpypLSOkMthtN15nw68xp6nj88aRFcwQaLwxCRbNcGjVi3dWFdN4IXCqQk7Y7isipbulRy6TTPjpHf/vEb3//4ozK/04YLjopwIuOw146iZmptkUjW3Meuwm/JzVxKR+etKuG2N4wpAoWu79lWyClSKaQVtmVjv98Jr20lv6wZluXK++XMEJQG2QePdZIwlyQwZM6ZvikAt3XTJWEbinxNWO+xnSc1/0yqDYpnIebEFqN8T01tGZov6fnlg4B/QQ7n0i4d17I+hmHgvMx8/fLlXnReLjO1Qe8OxyPDMPDw8oILPZfrmcNhrxVd2ohFuejOSVoeEIyyOCcjmvPEeOF6uai7HJtcddvoYsS2gDOMYV63RqhueeCtWMyNZgEw9CPFzOQ1SmBjoImAKSVJBFSAXNv7Xnk7fwFbRXOthVKThCyp8PnrV3wX+PT8gUTh8fhArvXevf/bX/+d//Gv/4r3nv/8X/53vv/+iK2G/W4vooN3Gtlta2Q5X9ly4Xh0reIxDLup2eULvuv58PSkhVPf4XzQrI+2UDGyuP/tr3/j2+s3/vjzH+n7kZS2ln9t6Vx3Xyi13oubMSavka0pl1wjVS7zjNtPeO/4w89/kJHLar+RUxJR3Ul3/jvSQpA4W83vjJNqpJiw+qIrUGJs8DPbfibJclX9dSynM1+/fuV8OpFS5vnlkcPhKIDdJvPLvGysm7j0Ye3o+sTD8yPWW5ZlI8YN67W7mZeZnd9r9lsrQ/UY5yQ1K1UuVyMToCmFoRvuDuOtbOzGqdEtTQubMW3UV/j8+WtDDu+IcWXoAhbL1KvtT02ffTebVXkjaq0cpn3bLTnO15laiqqZ26igAVK3pJfUtO/fYhh6y2YN07Tj8fFJxpvgmtTXsm1XOfaNpxuGNtXQEKXUgsNL7dHC3nMqZJ/xfcvJrp6XwwE39PgY774JZ0Nb4Fv6PnC5vGtk5gLjBNTmlTGa6QYnUuYlrhp1VP2Ol2UhLiu7hwOP/aN2cE6z/K4LpNirUzNCsjvrpaKqivj13tH3PTkXtuWMwTINA/n4gK2GJa58+e0LH7/7jmnnlWQYtERNZaPzI13fsVwXvHN4r1FL6As5SprqnSdM/m5GrKVyXWbWZW5L3AoN83EzT3WtGmz/jauS554vJ2qGcRjlIbKGJS503UCyqWFQKmmr+KAkt27oSCnxt7/+jRwTf/jjHzkcj/hewTjFVjnujeHrt1f2u1HFFJV5XliXWReUs2xVktNp2hG3RNcFtnWlZHWfH56fJYWxDoc6LesdXR/o+tDiA+S4z61L7kNHLqWN4GRG25YV5z1YkU5/++VX3k/v6gjaGCylxLwuvJ/OHPY7Hh4f5Sg3Uj855xi6wLquWuY61/584dANYL3RvrSooO6GgW7ohccJjpgLXa1KoSuFx6cnnV1VKkZ9dxXvHdd5k4rUGOIWeXt/ZV0XbOu4+9CxpY11WemHXp0GHX3f8d3LB40n9WKpm3fyka3ryn63l02hgUS9sWBhjZm//+1vrMvK7sOe/TSRthWs4bjbS8gUGpm7VOjHHQ+DZsG56CYarWbEqeX6hq6XbrcLGnE0qdgtQnNZZrZ15eef/8jj0xP3LNthFGajJGLjIRmj29D4ZtqzhnGcmqcC3l6/8tvn3/jDzz8x9tKGG9SCkrTAK0VwsBA0knIhyOTRnDOiberBv15njVJ2Qbr5BulKKTNOI2BYrjOmLcd3xyP781UYg3lhPs/sdwcxbXIhbnNbWIpme9zveX554unhiWIt5/MFh+W6zFKJVAX0eCP1UjH1jlYutbCuKz4JmifGvXDmKWW+vb7Rd4GH41GLfmMbVygzz1IhPR2PzPPML7/+Svn0kcPhIKVcbUqW5crj4aFJlVX9x1oIfU9c1pYdXVpHl1oVrkVxbKTKzimbW5+bQGPOelWFznI8HOVhCV7dnDOQEt4ryjFXmrO0Uqvqo2otJUbWJfJ+emXsB/bDSM4o5AagREzfkUtt45hI5we2bWMcJnLcAMl8TW2zXqsoXgxUI/XP+e2dp+cPWC93dBc04x6GgYI+p9FMFN9MoCGyxY1u6NgdD5K+ZsHshm4gFlWAyzJTKHR+IFWFuOx2O5Z54fX1K6HzOOu4zFdCVDe0zgscFLizxYVpGnFBh2Uhk1BWxy2K93K+YK1UhZ8+ftLzbQMhVKxs6YJtBtNGk2LjOq/sbipiKbUx3LJtXJdV+4knSU9rexet17O1LAvjMPD69ZV1WXn59JHQ94JEoguqomhN646E0DGM3c1bxraupJjpOpkIe99xna9Y5zm9vYHZsa2x7Rd6xtDfcw1qKzpDrW38G7guF4ZhahBG6DoZ89YYGfuArQr2WpBMd9u2e2CSdU2NWaEajW5yEghxvp6ZTxc+/vSD9hztIPZN9ZmzTJfXy4nhpxGo2imZlh8+CjG036v6vnmrjNPCPtfC+XTS8nvZCMPAdx8/Uksk1lYk14JHl13X91gb+PrtlZQijy8vHA9H+mGgDwO1ZkpJbNuKy6VdyvJJ1Ap3zj6Gp+dn9sfjfbSvEdvNSQFDP7D78cD/8i//IkNxjJgsukbnfEMbWfw4TtgKuRRiXHBOPBdTcuPr1/sczDYTCZS7DyKXhDWO4Ht+/PEntTpZ8/yuybpq1Z8xjHIe11R4e39jGAYOxweMF8q5mOakXDamfmBe14YJXtRKt9bN1ooPDutkKKm1MHrZ9311YKoQwbYSmvuX1jV8e33l44cXUspc5wtd3/H19Stfv3zDe8//u//f+fTxE/t/2fH12zdp0btey2Anuuh1k/KpH3qeG/2yC4E1Rpb5yrdvr7jOE4r0zPtpR04ahWQKZivEbaXreoINXOYLb6eFZVnY7ffs9ztJI4HOwHK9sut7buE0wyBdtHOWl5cPyupYLhSEBd7v9xrJWctWkvI9rOXf//3feXl+5vn5GVdF5G2rDYyxpKy2dxwnUlUEJKVQYmLOiuXsBy2kc8m8vb+TS+Hp6RGCOgVrpT+/rkpB6/rA6XLm/XLi04dPDIMq9GoM27JgquH1/Rv//f/6b/wv/6//iA/PeG+oVfLQWAvBelWRVg5hY6DrPMPQMV81erBef6/JGYvlFt0YGoix78dmchNcL/QD095LfbWtnN/PhI8dvnrWdcFYtd3OCdhXq+jEDkWbLsvC189f+Pb2jU8fXrSo3hIlJWKKLOvCukS2ZSH1PcE65su1JbNV5suVfpwYx13bJ6DfOTVeUmexRT6A0Fy2CvA6NI9RbqKDpn6y6qJSuu0FI6MPgvAFy6cfvsNVx/U68/75ndP7K9/98KP2IbcDtGrpbqw6qmqM9pSHI8M0aeRX5KXItULOVCdDp/cWqvhjlcowDHTd0M6VDRs8UxnIMbXUSuRUd56hjRKdC6S0ErwlOC+11fVKKQOX01lL8/F3+WrOUVialBr2XfLm1CYWIfS8PD5yma+kLHx8SS3moEdJkt7x8PTMfhhZm4yXlLhuG+fzha4LHA4HXJCqSay40HwrA32pfFmumEakMEbFZwhBz0oprPPCsi5s28pj1+l8dZ44z3TBc2j8syVpL/HDD9/zenrn119/IafYVHd6Z0zVpRLXSN/Au9bKKhBTZAiDiNrOcdwfsFWRqQ4twmOJd+/Fx0+fpLIblV6JNZL/m3BXg3Yu4NV+5eZm9MoAAIqz5DXShY7fEzogplWSRNekiqgCccHhi2tZugmMMqtzybj2v5fmBU5FevjrMtOPIznFFiRisFSeP3y4p0zlrDl1oRC8upnSHLclrpS4MW8z3336TnnCNkFpRiqh2djv9CJerxfe377y6eNHTBU+gVwoqXI8HKRtLkUqEQzPT4/EmHRR5YqjEKuMeLlWtlwoDT5XDVwuZy7zlb//41f++Ic/ErqO60UobtuckvO2MF9njseDTEoptkAgkUN7L1WEaSM5Gzp657m0TGFqVLVhHOMwMPQD27YydSPmg6UPPUuMbccgF/N0GDmdL3z5/JkPL88yMDbZZ4wbfpiw1uGdZtbWOVxGOALnmPqe6ypkg90kravA6+s3jHV8fHm5O/RrNpyvM7EkdsPA6XTh3//6F67zhanv8f6ZYjQq2GKiH3o+ffhInFf2+4f2jMXW8Rg8Gi85W/G2pw5ydntr2Ja5RTcqH8I7ixt6UlOfee8Yuo55i/gQqEaI8XVb8UFFzroom/nwcLzLiMXjCTivYKHqm/8h17b8rJxP76QUeTge2R8Ocqpby/l8Zr5c2NZIrpktRta0sT8eWvyvJ6bIZZmbNLa1/5cF412rQtWlGmfIqRLavP429ig5sq6JaZIhzRiawsvTdW0CsCWu7ycOj9oluXaAFSqddzw+PbMbRzAt1KtW3udZVaQPTLsdxljFjEqTK7UOog5TirrJaptoQsbEmdr2GzK5XU9naqnsp4kYJGEPskLTdVquy5DuMG3c2bXvaNsSh8OhSZQbaiRloqt0xrOukXHsG1vJ6NntLK6isewtJOl8bp4MKZP6cVJkb05s88rzw4PAmynh+sCXL1/x1tF1PV2QKMI2MOjdx2DVIRvv+fThI7VJYMFwOV8kL7ce5z1937OtK9Y6np4f6Vwglk0dSefvAV21ZJYklddht6N++iRBzTAqo6aIFtH1g1zc1rGuC33X32WyLohwXZtU97ptSsULpimqpJLsOu3MaiPJklUEdT5wOp85vb7fuzpvjCHWQs1SAJmWEeGqrOzVcFfQKIaybwC/1JzQWhQ6oPMddtCM2bV5eCkKIKoVHE3KZbRE7X3H6fyu28pmAfXunKjSgnMctSRyzHSDnINx21jmC6VU+nFgN+yYlxVjNzof6IYABYK1rfmQZrkU+O67n1QZdj19LMRS+OH77/ANEeC94/PXr3f3s2166FoypVpiTtojhKBqu7YIzCIC5Dwv+rtuUaQUzJ2fgi6lVh1bqzCl/bQn58ylaHSxbVtb/CXOpxPXWeOy0lysvuvlWG17IQ3gHQd/ACeKpXGe63ImLUpXO5/feXwQNnierwz7HeTM9TLThZ6hoSSqleu1VM1+c5KywxhhBqosEMo4nnbsW9eBNQTjWdPGul4ZhwnnHJ8//4NaEx+eP+B8xy9/+xvd0PPp03eM4yAVlff8/Kc/qoDImS9fv+AMfPf991hjSDlinBQb9YbbKJKuPj49M+6POO8JxhJT4np5Y3h4aLrwijeWy/Uqd/HLC8m1wwawNtD34R5sv62buqssKsB1vrIuK49PjwQfSFGJXofjA0/Pz1Qj2XIxlRQ3tutVjmsHh2nH8XiQL6CB5ow1TOPINI13I1aJVXucdaU4x9gQNdZYQueI2yac+WEnL4wLdN3vpNqbiumWnuicY9xNvL2+kqpyNqq1UtRZw76NBq0XWqMYGlcq6BKs+nlovoObkpA2wky357BNLnrvmVPjiFUJeJfrpe0gxEUy1uG55UtUBRVVSFW59tPgJa2t8mWt64oLHbuuI5XCw8MTVL07OWeMDzw8HO9FqnWay9dcwDuuyyI/THA8Pj5AKcqdb4Y+RyUmRzBOMnxgN40SjqTEMAWeHp/khaA2igEKz6qNEQUYCtYHcqM8GKvkwBwzprOYnDBOkEnnXYsxTaxx066rwRqN1di+1kJJGkEfjg+6eM9XjDW8X955fHgSLt2quCj19nNp32AqxJI0oq+/74xNaTJ5Z4lLwhTofU80kRw3ileOfCqV129v2iebtgeVS9RTs2k3emkVuDgiJWWsVcWVamHsBxlrSst+aKMlqjDVCq/P4LwumxZlqhc7CXyFEMHWWYLVB1iNXKzOB3FwjOTbpSR8aCarJtec56taOt8xhI5sG+xuzSS/MY0fwGSWecZ7h2/jEdeIjTGmm+ZPclrnBeMqhViM5pjWEnPmcDxSc+J6vWKBZAUrrElu1i4ETIa1Fl0aFp6fX/AGtpRaDnjLN6h6QI9OWv+0/n74Wu94DFpgO2MaHrvj+fmJYRYKfFnXhudoOR1GbKJlW1nWlf1hpDd9k/9VteUhMG8rfT+w++HA2Pcs28o8z/S+43g83Em41eglKEV+AO89cd2akcvSN0TKNi/8+o/fOD48MB32ko46jdakEjoQml7+8eWJfdzx4dN3vL2+8tuXLxwfHnh6EjhRipMLNNNZSllomKqgGuccJjUAmrHK8WheCxcGHsed4jStJRjDvCxcrlf2xyPeWtamjvHB83Y60+8mxq5jTSs5ZaZpvDtaSy68vr+Tto24bRzbRZOaKW/a70gpsS7aI5Tb0nQc8VgZkJ4fMNXhLyd2Oy2n70EztXC5XNgdD3jjmsInUopMp1M3NYZam2c3MJw1DtfLv1EbRVbBTTq49b0V0pK1A/HqhI7HBz2fpVCq4Xy5EJersB/6T977bWskY12WhdN1xlYl4DkH2yJNfzcqVe1m+ryp53LrouIy0+2me+ecUma339+pAsY4KvpeKaWlzW3333ccx2YerRwOB+3dkjAStRV7Ifjmq6ptCXsz3gkddDpdCcEJpx7kps+lNISGnqN1WRQq5R2T7+5IG4Oirp5ePghkaSoOi21jpnlbBLV0Ch2TjNdSc+KyXO+XVIor9rBnGkZqLeyNUeeSkjJtFiFUwtBrJ5EzvQ2YLuCzWHQFSzBeU5dtofOev/7lb5xPJ37++Y+YhuLpuh6CJRRD9VVqsG1j7Aau64xzTgDWWhpOyPG+vrPZjYf9ASLgvLqbqHCxfuhgpSE+PP5mZceK4FQqrNuM8m6BKlzG5XJVHGMp7L3MJSa1zAmKBtu3qt0o4tIYBWxocCamUkoy4j1/eGFt4wI5oq9axjW1gHdBf0czC9VS2JaVbuiFtsiZfui1sE5JFcgwMPSdMrOBb6+v7KaJvs8s68rQD5zetAvZ7cTJsU2HnFsOQiiFvh+IcePyvujBtJYvv30Ga9kdtV/IOfHlt8/89ONP2GBJccOUineBDy/PrFvEkdv8NuOxLNsiCNwwsKXI27c3Hh+OeuFLaXJhmZu8V+Sq8zu6XnP8fhh0wTXFzQ3q571j54RYTjlr+WyrllHrxm+ff2OadhjgMs9sccX3PbtBOyIah8tUeTas1tjklsudose5FhBFYWns+sPxyDxfyaWw3+9uX7MotVWLvR+//4FUC50LzNeZjx8+aKeRVZFVYBhH4rrqd+x7fvjxxztkzhiZxlITLIDUUSBDnlhD2qltVvLIT58+qespAuGdz2e+vb1zvZx4+fBEbnsKY6Q+yzXjbEcsiS+/fWk04wrO8d3HTzweNQYTejtRa2EYJ7Z1oxaoKZO8EQZh2sv1G4c2es2a78NdcOFaRyFSqKcFnKvrbc7m1BDrYmipApWLXQmDNRZyM2emLao7NqVh+2/cq16LzvYeGvSd2CalNsYqsxuNJt7eXvn67ZWyiShw2O95fHrGej3H3lrWGMlJLC4XQsOFKGb08z9+w3x64cPLx9at3Ob0kgf7YKi5Lb2tpOhLXMlb4nq98On77xi8uj+ZyTKmimqajQK+bFsKb9vG2yLiwn6/xxvL+/nMl8+fmwIIfqu/8fDwyOV8xjrP49MjtkIkYbAtH7ve5nCA8DbjqPczrZHdMCoPJ3S8n090fa+xL62jiELcd8FDaFt7cyB4T1qjwKXNv0XXg6mEzuGK1FIWCQksQpWLM+fxk56D3WFHWDQa3e/2/Nf/47/iQuAPP/xMKYXLMtPbAayYcjcZe9tdt5FRIypbFevHw0GXSRKLypTK0CkLPRiYxqntlgwlRvwyX3FeEYq5FK7XCzlmfMjsd4e2LFHlEHMkX6ELIrjmmpXClCVbDU5y01wy3nhyTTijeT0VQhgIgfvBSz/ivUxIaUukvNAPbcHYN01yqZRbGh7KC+6be7LvVImuubC0jseFTkuYnPj08QXvO86X9/uS/bdvv3HYHxh3TQpIZdsifW91KDotN6tzWrK23INpv+N8vlCqIW0rl8sV64VmCBhSbp0EImZa5yT3pOBLS8xqc8RUCq+vb3z5/BvGwsemEMopU61iXIMVciFlyXuXGMlR1bc4TKVJSqXGkQIys21bS6wr979T+QnwfnpnnhceHx942B+132lB8DVpHixYIjgjVMm6LazrSui6exhVGDoeHh90kCHq7boIn37Y78itK/RA8EHL3JJ5enpmt9vJMY0kvDRpoxmGNuagaeKFNKFdDNapepy3VbGi1qrKQ16b3LhL3inAJbeCRqZOy3a98nh8Zr8/yKPiVJys28b5dGKcBvbTHh8C1+sF5xzn05lPHz4yNuXH9SK+WTcNeGOJRlLIWqFsCeNpFFJ5RuZtoe8nqlNew35/bB1ybTC6guscaVOOvHdBBF7jWBdhS5wPLPOJEAK7w4FhCMynN/76t194fHzkw4cP5KqDqncaa6VNaWa2dabW1ObvyTy/fNAOUvNfcqxEr1l1zBVvA/4wsh/3dJ0n5yR3dugoDmqsYISf2JqpVB3iZ+bLhf1xRzKSfe/H/d2fYEwW76i51p21OC8fTPGBGJV9YQJ3Sbs1jnHUTurmLTLG4jG8riuX04Xz6Uwp8Pj4wDSMfMVorBY879/e2e0k0Z2vF/a7CWcs06hdTokZawu1IUBEH2mQx/YOxy3K2d95DsdDe/aUCvft9ZX304nDbmK/1/8Po9hTj6TSdd0k/mndEtWQchIGpAuE0BGcRCShydy3FqZmjEb03ktl+PLhI3/95W/89V//je9ePmGccqpzzorVDeZuYahG6qUcN9FyndDypWicfTqfWK5XcIHnlydqhSH0pJKwRgbpsR8UhWydlQ77Jh90CoJZlsg0NridtTweD5izWsyuH+4VgtzRBlMqqWa89WzLRu0raStitbe4zlslWGtly7UF7gx437E7OKEvhol5vapj2e+pFta44Uti3Ilqmk0lxUzMVw6HI77rGJpk0xtDRESFVDRL9KEDq1HQ99/9oBczJ8URGoM3heTbyCkmcK4paCT5M1mRlTkL6ZCyKtxpmqg5E63a7bwl5mXTZ5YztakILstFhFUnKJ2pkNaV19c3rLV8//E7qVNu+59aebte+fb2jWXd+P7TJ7ZmPrPW8vZ+4nkYqDUxb83FbZsZb9uw3iuVbF0xPvD9999LrdDGSjQneU5RKIwQWIt+bkttAUpCNGyXK6HvtNxsc2k5Sytb1uFmjOH99KYlf9fReRkTS62kUklRqinf+UYPvcWeylRXs6qkUjI5yy3bt4vCG6MRR5aUOW8RPxl811GcLuecYqPsujvVNqdCN4hV04Wen//0Rynp2s5FmcoOH/Tdnd5PGOt5fHjQTsc6XXCNabOuUbsnr4XoZVn49uULruvYjcr+GNpCsxqoFDrncc3h/346s9spr6OUTNf31Jr+H54eoaDVvndjT9cFYspgkPzTSVixbQrGuXU7WEEzjbc6LARqJUDDQFQwlWno2nK1JQgaK5R4kaP8w+Mz20GO5jEETtcLb2+vfP/dD4DS1FxLxLNGhq1lvnB6f6fmyvFw5On5CVcMv/z9Fx4fHvj5x5/UGTjh+Uut1BzJVbyyYZzuGSfOKrd+i5FME0g0a7bEMi3mNFdy3ORUXjc+f/mN3bTj6fjIt91XTpeLul5T6bqgzmCLbFsSZ+5R6qllVYqf6zuKreSctMMJnr6XzPrr21f2+wO7caIPHZfrBec8X16/8l//v/8np8uJH3/+mX/68z/x9PQo6bUVbbmUwrotTRnq8c7CLUZ53di2yMvzoKV3G71ll+97xtvgX/h7jar/1//0n0XAAGzOuK6nVgskvHEkUtvTWIK1VKtcjVoFBvzy7St/+8tf7xTvUirTODD08niE6qE3dypE5wK+83L8FqSE6UMHGKahMC8r87LweNwzDCPPfS+Ojbll1LoWqNGyWevvlanDYnpPNY55eZfJygpPABDXWYym9n+XWtkd9gTfae7cLhOHvAiuBcar8nRkExs8TulUzitPN2fJQEst9MVrhOM93ioEx44iqm5xY1kXjbtCr1Ada5jjxi7stKBO4hHpZ1FuBAZ2u0mO7U3MKhttO9Q0Wqu50S2Batt8vVhy0sLIecfTxw8cnx7pQ0eqCmIPVjPW0+srnz9/5f39nUrl4eGB3TiK924qMa1czydC6Di9v1NK5ofvvsNhGHZ7Yoq4CqkanLPEokPx+HAkWN/Q4zLR1RqpRj9/uX3mjRVjS9F+wSlEZ8sirlIKl+VCsJ7DtMMYw/HwoMOtUz60p2VntOhG8bzAYYkoAEqGQIkg5nXB+8D7RcoQ5xzrulJ9wHvLuqycTu8Kf/JefoBtY77MjLtRoDzv74lxKS9454kxEvqO54ZYEOJb+7SSs16ODx/5y1/+wvV85eXDB0n9mwmx6wSidMawGya6Qal287yw5UxYN962jffXV/74pz+3alEHW0aLdhqcbb5qTp5ilKy1LZlv1NktRcZhgKILxgZHV4pEBW10k2oFY3l5eeF4eFDFi5aVrnYaefWuKZI0Pqy2skXJcXfj7e9tihrbmEUx0fUD3gVKFpRxuc7M11mXCoaN0gCMFbLBj0HIjqHHGhUpw9DzfnpnWxb+sa18+PjpLuHEKMlwSQlDVeyw8/fPoZQiM6Sx5Bix93TJqoIOjcVxFuND+1wL6/nKeb7w9PDAbr/nfJ0xztK7XnsAp5wVV+Hi9ffMaVFEgPfE2Ax4bWw5Wkc1hvPpwtvriYf9g9I1U+TL6yu9d/z9r39jiQshBF6enpimURdTO0fI2tONw8i6LeQcyRnW08rT0xNPT4+kmDBOOw0XOoIVAtC3tEeQSKDmrN9z2Xg8Hnh4ONDdRAel4L3FIBLFmjZFDVcryXajXqRtoxC5nC/KBqmayjinfJm+JVneoKAxRl5PbxyPR3zM2/9DznobUVjruVy/sc0L5aDIxNqMVtU6SoWaVN0WMpaiB7gWpt2Id514TCVTcpu1VmmVl2Xhuqjdqgh9fD2dMM7x8eWDWD80GLm1TJOqKNt4MLQXZZp2uqgA33TAtt6kglLNuIYbv5n2bsu/WyKUbQjzbCqXZcYUGKdJhNHY5GEGXIV/fPnKfj/x/PjAfhx52yLrtlFyxh808pChTwfkbe58mPYsy8IyLxwOO5yzTGbANLx0SYXQFtZvr2/88uuvxC3SjR3juGO3m+g6LZzXeSanzPly4enRczju7yqpWEoz0yDkRt9LadX2Qt60w9p7KSnQ6BDTlGwl33dJueFHFDdbmqzUKYejjUyq059bqPSjmFrGGEnwDKr8iioYBbPIiVxLJSNwW0qJr6/f2I87pmkkbQvG7Di9n/i3//FvDGPPhw8f6IaBaRLuwbfqvKZ0Lx7WeWUrG36amJzH9lVS5lrpQqAkVafeeazzkDOXVeH1wTvthqyMmd//8FOL1n2Taikl+rG/e4NqKYSh49OHTyzLlRgjp8uZNa10JbDFCDXTB0WfxpI57vZyEztL9Kqqc/tX3/fULM5RKcImAHSDdmYEXXZxS+x2I5fLmZITQ99hmrpl3SLWW/owKNSqKXHWORI6MZDev53ow8huGPUdk+9hO/JAJYU5WSOVYNdz2O/luB8GQmnO+JL0WVW1v97Zu3+gFFjOi0Z9LqjiLVUehKxOdtsUcNM18N2WhKFQOqV+d3UNeqeLqZze3zHWctzvMDdXvKlYbxmHiaHroFYO0w4+FWjqQe9c25tWtlrpO+UyrJs4WC4EvDVcF42FKm0C4SzTbqTrvqMfeqW+1cLz4QjO8YefDd/98AMlJb77/juGfmwdMngDscLlegFgHIQ+yrnydnrjr3//O999+siSIyxoT9kN7fkq9wx0aqWmrMx6Y7GDLvFaS/NMGXJtJrq2I+l9B1UycmeF/fBNWVmr4fDwAAbe396FAx8nfNDF6VxLNcRyvl5Zlo2no0WAFKM5Xh9ck1VKHeOMbXAvGd1qe/mF0i1crzOh93d+UnCB0LUPC4hpIzgvzTC0iEGlcgUnpG5NyjUYdxPruhHbDZprJXgdosFqP+Gc4XKa2daNw0HkVIpmldkBRbLBrg9yZjaJ3k1DboqAf5flKgu+dVqS+yAHY1PnlKzlufFOKYhVMrj30xt9cErYc8IydFXQvdRMKzkXXPAKM2l7gWqANtu9ERvvrPhWKQiaVzhdLuSU6frA99/9yOOjnNJvr2+8nd45n0+ELsi42DhG98vTQI56yGzwmnPSlrRO33OhtPSx+Ht+BZX1etXsvCnWeicMxzYv+E6VRm1u3nmeOb2f2R93mK7JHs3vQS+F+rtaLEUpebyQ1DVr3JFLQ2s7HWA+BILzPD08MviOX8+/8Pr2lTD3pJj56Y8/8/T0fK/2AIL32sXkzDCNxC2yriv9vqPve2KMdF6z4GXbRObtA9MwqYhw2rtsm0B7FXVdxgjkt2vPbS4t2br8PxPrfC9qbNpW/uN/+k/spolqDd4GulHZApKH13tEbCmFkjJ0OhBSQ564Bld8e/vG6e3E49MTU9+TQCNR5zCdOpzD4ahscbQAD10gRK+Ap9FQtqw435bHbrH4oG7KWmVv1IaIMe2SVG50JsXtjs+fxgEbAlvK2KaxX7eN5XphHA8tZzrdPS/ys1hijvTDxHefPjJ2gZIy27Jyww1P49Rw7m0S1pbVipoxXC/zXd3oxw7fcBnzfOG6OA67PeM48nQ4Uo3h4fGRsevv0vTdoam6bkqsdh7VEjHe433H9Tpzen8npczT4yNj33FdV7Z1pZbCNO3YTztckw7nIhruOAzgHX3fsS6LSABbpIQObwOpZtakhTsoVK3vlIbog+Pp40c+//ILS87aeaIEuMs8M47qIvO6qMBrcxapsozICMZoxFsrWMflcsX7ICRMk8JqUpLIZeF8PhO85/H4IP/POPJ4OHB6vDBfrwzD0KwIJ/YNPVRqZepHOq89pDfAOl/lJnTTzdQM1rE/TMRNS8KSs4J6FEbMLUbUYkVdLMqasCVjjMYywXmKEU/JIPrrul2pMeOCFkTLthBqIHS97PNN9ne5nO+UTutFq9xi4uvnz8zrwjD0gr15LZ1q1eETa2qkx/aQIB35tY2gQgicXt95/viiQ9u4BthTClkxlXm54rpw1x1bKwnt9x+/owvSRNfmu8DZ5ma8xRjqS0rNT+GbczTXyjRqLJKpDK29W9eV3gdsraRVecDDODAOA7vdjhA818usxeAy04XAYf+gQ3CN5FrZt3HFvG3tsmsZt40Ce7qcGceRbvTUrSihz3spgrZFlXBb1j4cjqSqea6z7n4RpZJ5fX1lXVcu5zPbugmQ5/XZBxewxmjhnRNV6e9aIpMhCSy4tfD3rpnZpn7g48ePDKFv0krLLdZ02u3VVpfE++sr3nl80NHrjGFOic+fP3M4HDk+HKhFhcu8rm13lKlRIzAhKjK//fbK8fjA49Nj87VchfvwQTPjqjwT1zqokjPtPic1yahGNEkNba7YLnAc+rspSclncvQ6o6yBWJQYdkNbOPj/F3Umy20cQRB9vc2CjQwRFCVLVDjCYf//H0nhsLUQlgQQmL0XH6oI3XECMD3VWZkvsVWlQELoxoHvhwOXvschdvQlJ4VhCqakjz1d3+uLWBPl6jBar9fy8kxFE9xWnTiWIlc5bNtSOcF8l1KuATCTrU7dXnD7XtAvzjhWztENA925A2NpmiDYGANxTkSVyCp1WeE9+/1r7vb3wpHKWXqfjWW32XI4nShGbncOkUSKuox8ETt4VQdOxyPzPHBfPWCDBDq9MUyLNLtVVeBuf69yklebayaWxPl8oVmJzm5eHFGI+6cYwzLLf7cAfX9mijNN1bDGaIPgzDD05LqmdhUxR7zq/CkXmIQr5auKlXNEHZ4W6xiHQaZy66QTJ3hOfcd2A61vaauG12/eXgc0i6GbJ75++ZcP7z9gvVeirJxfyXB9jo0VdDoGjA4/Xp1qgJ7TIm0WJSYcDodrFsuYTKgrggvcbLfsdlvIhX4aOB5PVNoc6YwTpaaqRKWRa1TB+/pqYy2pUIjUoaautZQ+Z/nBTydwjpvNhqI6HgaCrYTgOC8UZ/BOpn+bpT60qAQjU+0vj/XL4rsO9dW7HWPk6emJ7z9+8vj+kd0usJTM4XDg0nWkFHk6HPj98VG1YxD7moSxxmGimMK6aeWWQeZweNJrrGMaRjKFNw+viSRcFgSB845x6CXIst0SiywTkyuA+LilTnGBaSGbQk1FQnYx8yxIBJeFcDnNEze7W4buwrevX7m/v2NJ4viZ1IkzTxOVSkXd0HOz3VFXNbHId7ksgre42+/lD1gKm+2aEjPH5yPWOLarliVnlnHipbmqqPd/Gkf+/vSJN7+95aF+4HnsCVVg5VvGsedy6eS25MRdIwUqogtTMikrZwr4+PGTINOt5eHhQXVaWZRNWeStjLjIYkYtvI7j8SSsreAJtQwWc1wwMWEbS+XrKxEzLgt121DyS02s5FWmeZbRMslLvThHyoXa+6sLxVorS8plZhoHhR4uhEqaD5u6wW7F/TFNsrNoWoUpWo1G6U1WwrMShnTaWHh+vtDq4TNOC01dMWvX8sBLaBOsS9dJNqrsGHTh+/KZRetqhQTQ8/PHT54vF5FpVi3BOaZxph97dusN5+7C538+k4H3796y3WxYNORIzkyzJv+xLHEmp6KqrHAMUpJnzwYr+xxkR56s+O29cxSn3CcjFk+ZFguhCswp6Q3iFXUr8LtcEqt1K13Q2u8RnLCqnO59lqVIz7Ii0//78o1zd+H+/o4P797JIaQL+ZQyKWfWq5amrhjGEWERCuPIqOsuqpQS40LXSS5is9mKaSElYpqp/eZXAFAHuKzPhfdyQIcQ9LaogTMDlbNYZOeVU6K4TDAOCgq/hHmZOF8uQjr2nlotsUnl2qAav7WG6J2wm7LA/GLOrOpGh0zZBZUUub19Rd02gtzfbMTAkcUIFEsUY4N2nRhjZAHvHFinEqj8XsZb4kXOpnXb8tcff15lpO7SscoN6F5WAi4QrOduvxfJKqUrZXdOCecD/wO6KoanIvmT/gAAAABJRU5ErkJggg==", + "text/plain": [ + "" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from PIL import Image\n", + "Image.open('demo/banana.png')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sRfAui8EkTDX" + }, + "source": [ + "### 准备配置文件和模型权重文件\n", + "\n", + "预训练模型通过配置文件和模型权重文件来定义。配置文件定义了模型结构,模型权重文件保存了训练好的模型参数。\n", + "\n", + "在 GitHub 上 MMClassification 通过不同的页面来提供预训练模型。\n", + "比如, MobileNetV2 的配置文件和模型权重文件就在这个[链接](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2)下。\n", + "\n", + "我们在 MMClassification 库中已经内置了大量模型训练所需要的配置文件,可以直接读取。而模型权重文件则需要下载,方便的是,我们的 API 提供了读取模型权重 url 的功能,因此可以直接以 url 的方式指定模型权重文件。" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "VvRoZpBGkgpC", + "outputId": "68282782-015e-4f5c-cef2-79be3bf6a9b7" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py\n" + ] + } + ], + "source": [ + "# 检查确保配置文件存在\n", + "!ls configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py\n", + "\n", + "# 指明配置文件和权重参数文件的路径\n", + "# 其中,权重参数文件的路径可以是一个 url,会在加载权重时自动下载。\n", + "config_file = 'configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py'\n", + "checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth'" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eiYdsHoIkpD1" + }, + "source": [ + "### 进行模型推理\n", + "\n", + "MMClassification 提供了 high level 的 Python API 用来进行推理计算. \n", + "\n", + "首先,我们构建模型。" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 323, + "referenced_widgets": [ + "badf240bbb7d442fbd214e837edbffe2", + "520112917e0f4844995d418c5041d23a", + "9f3f6b72b4d14e2a96b9185331c8081b", + "a275bef3584b49ab9b680b528420d461", + "c4b2c6914a05497b8d2b691bd6dda6da", + "863d2a8cc4074f2e890ba6aea7c54384", + "be55ab36267d4dcab1d83dfaa8540270", + "31475aa888da4c8d844ba99a0b3397f5", + "e310c50e610248dd897fbbf5dd09dd7a", + "8a8ab7c27e404459951cffe7a32b8faa", + "e1a3dce90c1a4804a9ef0c687a9c0703" + ] + }, + "id": "KwJWlR2QkpiV", + "outputId": "982b365e-d3be-4e3d-dee7-c507a8020292" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.7/dist-packages/mmcv/cnn/bricks/transformer.py:28: UserWarning: Fail to import ``MultiScaleDeformableAttention`` from ``mmcv.ops.multi_scale_deform_attn``, You should install ``mmcv-full`` if you need this module. \n", + " warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from '\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/local/lib/python3.7/dist-packages/yaml/constructor.py:126: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\n", + " if not isinstance(key, collections.Hashable):\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Use load_from_http loader\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Downloading: \"https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth\" to /root/.cache/torch/hub/checkpoints/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "badf240bbb7d442fbd214e837edbffe2", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0.00/13.5M [00:00" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "%matplotlib inline\n", + "# 可视化分类结果\n", + "show_result_pyplot(model, img, result)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oDMr3Bx_lESy" + }, + "source": [ + "## 使用 Python API 进行模型微调\n", + "\n", + "模型微调是将预训练好的模型在特定的数据集上对模型参数进行非常精细调整的过程,最终让预训练的模型能够适配新的数据集及对应的任务。相比于模型的训练过程,模型微调大大降低了训练的时间,并减少了数据量很小的数据集在训练过程中会出现的过拟合问题。\n", + "\n", + "模型微调的基本步骤如下:\n", + "\n", + "1. 准备新数据集并满足 MMClassification 的要求\n", + "2. 根据数据集修改训练配置 \n", + "3. 进行训练和验证\n", + "\n", + "更多细节可以查看 [文档](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/finetune.html)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TJtKKwAvlHX_" + }, + "source": [ + "### 准备数据集并满足 MMClassification 的要求\n", + "\n", + "这里我们下载猫狗分类数据集,关于数据集格式的详细介绍参考 [tools 教程](https://colab.research.google.com/github/open-mmlab/mmclassification/blob/master/docs_zh-CN/tutorials/MMClassification_tools_cn.ipynb)" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "3vBfU8GGlFPS", + "outputId": "b12dadb4-ccbc-45b4-bb08-3d24977ed93c" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--2021-10-21 03:57:58-- https://www.dropbox.com/s/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip?dl=0\n", + "Resolving www.dropbox.com (www.dropbox.com)... 162.125.80.18, 2620:100:6018:18::a27d:312\n", + "Connecting to www.dropbox.com (www.dropbox.com)|162.125.80.18|:443... connected.\n", + "HTTP request sent, awaiting response... 301 Moved Permanently\n", + "Location: /s/raw/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip [following]\n", + "--2021-10-21 03:57:58-- https://www.dropbox.com/s/raw/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip\n", + "Reusing existing connection to www.dropbox.com:443.\n", + "HTTP request sent, awaiting response... 302 Found\n", + "Location: https://ucfd8157272a6270e100392293da.dl.dropboxusercontent.com/cd/0/inline/BYbFG6Zo1S3l2kJtqLrJIne9lTLgQn-uoJxmUjhLSkp36V7AoiwlyR2gP0XVoUQt9WzF2ZsmeERagMy7rpsNoIYG4MjsYA90i_JsarFDs9PHhXHw9qwHpHqBvgd4YU_mwDQHuouJ_oCU1kft04QgCVRg/file# [following]\n", + "--2021-10-21 03:57:59-- https://ucfd8157272a6270e100392293da.dl.dropboxusercontent.com/cd/0/inline/BYbFG6Zo1S3l2kJtqLrJIne9lTLgQn-uoJxmUjhLSkp36V7AoiwlyR2gP0XVoUQt9WzF2ZsmeERagMy7rpsNoIYG4MjsYA90i_JsarFDs9PHhXHw9qwHpHqBvgd4YU_mwDQHuouJ_oCU1kft04QgCVRg/file\n", + "Resolving ucfd8157272a6270e100392293da.dl.dropboxusercontent.com (ucfd8157272a6270e100392293da.dl.dropboxusercontent.com)... 162.125.3.15, 2620:100:6018:15::a27d:30f\n", + "Connecting to ucfd8157272a6270e100392293da.dl.dropboxusercontent.com (ucfd8157272a6270e100392293da.dl.dropboxusercontent.com)|162.125.3.15|:443... connected.\n", + "HTTP request sent, awaiting response... 302 Found\n", + "Location: /cd/0/inline2/BYYSXb-0kWS7Lpk-cdrgBGzcOBfsvy7KjhqWEgjI5L9xfcaXohKlVeFMNFVyqvCwZLym2kWCD0nwURRpQ2mnHICrNsrvTvavbn24hk1Bd3_lXX08LBBe3C6YvD2U_iP8UMXROqm-B3JtnBjeMpk1R4YZ0O6aVLgKu0eET9RXsRaNCczD2lTK_i72zmbYhGmBvlRWmf_yQnnS5WKpGhSAobznIqKzw78yPzo5FsgGiEj5VXb91AElrKVAW8HFC9EhdUs7RrL3q9f0mQ9TbQpauoAp32TL3YQcuAp891Rv-EmDVxzfMwKVTGU8hxR2SiIWkse4u2QGhliqhdha7qBu7sIPcIoeI5-DdSoc6XG77vTYTRhrs_cf7rQuTPH2gTIUwTY/file [following]\n", + "--2021-10-21 03:57:59-- https://ucfd8157272a6270e100392293da.dl.dropboxusercontent.com/cd/0/inline2/BYYSXb-0kWS7Lpk-cdrgBGzcOBfsvy7KjhqWEgjI5L9xfcaXohKlVeFMNFVyqvCwZLym2kWCD0nwURRpQ2mnHICrNsrvTvavbn24hk1Bd3_lXX08LBBe3C6YvD2U_iP8UMXROqm-B3JtnBjeMpk1R4YZ0O6aVLgKu0eET9RXsRaNCczD2lTK_i72zmbYhGmBvlRWmf_yQnnS5WKpGhSAobznIqKzw78yPzo5FsgGiEj5VXb91AElrKVAW8HFC9EhdUs7RrL3q9f0mQ9TbQpauoAp32TL3YQcuAp891Rv-EmDVxzfMwKVTGU8hxR2SiIWkse4u2QGhliqhdha7qBu7sIPcIoeI5-DdSoc6XG77vTYTRhrs_cf7rQuTPH2gTIUwTY/file\n", + "Reusing existing connection to ucfd8157272a6270e100392293da.dl.dropboxusercontent.com:443.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 228802825 (218M) [application/zip]\n", + "Saving to: ‘cats_dogs_dataset.zip’\n", + "\n", + "cats_dogs_dataset.z 100%[===================>] 218.20M 86.3MB/s in 2.5s \n", + "\n", + "2021-10-21 03:58:02 (86.3 MB/s) - ‘cats_dogs_dataset.zip’ saved [228802825/228802825]\n", + "\n" + ] + } + ], + "source": [ + "# 下载分类数据集文件\n", + "!wget https://www.dropbox.com/s/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip?dl=0 -O cats_dogs_dataset.zip\n", + "!mkdir -p data\n", + "!unzip -qo cats_dogs_dataset.zip -d ./data/" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "15iKNG0SlV9y" + }, + "source": [ + "### 读取配置文件并进行修改\n", + "\n", + "在 [tools 教程](https://colab.research.google.com/github/open-mmlab/mmclassification/blob/master/docs_zh-CN/tutorials/MMClassification_tools_cn.ipynb) 中,我们详细介绍了模型微调所需要修改的各部分配置文件,这里我们可以以 Python 代码的方式修改基础配置文件如下:" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": { + "id": "WCfnDavFlWrK" + }, + "outputs": [], + "source": [ + "# 载入已经存在的配置文件\n", + "from mmcv import Config\n", + "from mmcls.utils import auto_select_device\n", + "\n", + "cfg = Config.fromfile('configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py')\n", + "cfg.device = auto_select_device()\n", + "\n", + "# 修改模型分类头中的类别数目\n", + "cfg.model.head.num_classes = 2\n", + "cfg.model.head.topk = (1, )\n", + "\n", + "# 加载预训练权重\n", + "cfg.model.backbone.init_cfg = dict(type='Pretrained', checkpoint=checkpoint_file, prefix='backbone')\n", + "\n", + "# 根据你的电脑情况设置 sample size 和 workers \n", + "cfg.data.samples_per_gpu = 32\n", + "cfg.data.workers_per_gpu = 2\n", + "\n", + "# 指定训练集路径\n", + "cfg.data.train.data_prefix = 'data/cats_dogs_dataset/training_set/training_set'\n", + "cfg.data.train.classes = 'data/cats_dogs_dataset/classes.txt'\n", + "\n", + "# 指定验证集路径\n", + "cfg.data.val.data_prefix = 'data/cats_dogs_dataset/val_set/val_set'\n", + "cfg.data.val.ann_file = 'data/cats_dogs_dataset/val.txt'\n", + "cfg.data.val.classes = 'data/cats_dogs_dataset/classes.txt'\n", + "\n", + "# 指定测试集路径\n", + "cfg.data.test.data_prefix = 'data/cats_dogs_dataset/test_set/test_set'\n", + "cfg.data.test.ann_file = 'data/cats_dogs_dataset/test.txt'\n", + "cfg.data.test.classes = 'data/cats_dogs_dataset/classes.txt'\n", + "\n", + "# 设定数据集归一化参数\n", + "normalize_cfg = dict(type='Normalize', mean=[124.508, 116.050, 106.438], std=[58.577, 57.310, 57.437], to_rgb=True)\n", + "cfg.data.train.pipeline[3] = normalize_cfg\n", + "cfg.data.val.pipeline[3] = normalize_cfg\n", + "cfg.data.test.pipeline[3] = normalize_cfg\n", + "\n", + "# 修改评价指标选项\n", + "cfg.evaluation['metric_options']={'topk': (1, )}\n", + "\n", + "# 设置优化器\n", + "cfg.optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)\n", + "cfg.optimizer_config = dict(grad_clip=None)\n", + "\n", + "# 设置学习率策略\n", + "cfg.lr_config = dict(policy='step', step=1, gamma=0.1)\n", + "cfg.runner = dict(type='EpochBasedRunner', max_epochs=2)\n", + "\n", + "# 设置工作目录以保存模型和日志\n", + "cfg.work_dir = './work_dirs/cats_dogs_dataset'\n", + "\n", + "# 设置每 10 个训练批次输出一次日志\n", + "cfg.log_config.interval = 10\n", + "\n", + "# 设置随机种子,并启用 cudnn 确定性选项以保证结果的可重复性\n", + "from mmcls.apis import set_random_seed\n", + "cfg.seed = 0\n", + "set_random_seed(0, deterministic=True)\n", + "\n", + "cfg.gpu_ids = range(1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HDerVUPFmNR0" + }, + "source": [ + "### 模型微调\n", + "\n", + "基于我们修改的训练配置,开始对我们的数据集进行模型微调计算。 我们调用 `train_model` API 进行计算. " + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "P7unq5cNmN8G", + "outputId": "bf32711b-7bdf-45ee-8db5-e8699d3eff91" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2021-10-21 04:04:12,758 - mmcv - INFO - initialize MobileNetV2 with init_cfg {'type': 'Pretrained', 'checkpoint': 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth', 'prefix': 'backbone'}\n", + "2021-10-21 04:04:12,759 - mmcv - INFO - load backbone in model from: https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth\n", + "2021-10-21 04:04:12,815 - mmcv - INFO - initialize LinearClsHead with init_cfg {'type': 'Normal', 'layer': 'Linear', 'std': 0.01}\n", + "2021-10-21 04:04:12,818 - mmcv - INFO - \n", + "backbone.conv1.conv.weight - torch.Size([32, 3, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,821 - mmcv - INFO - \n", + "backbone.conv1.bn.weight - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,823 - mmcv - INFO - \n", + "backbone.conv1.bn.bias - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,824 - mmcv - INFO - \n", + "backbone.layer1.0.conv.0.conv.weight - torch.Size([32, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,826 - mmcv - INFO - \n", + "backbone.layer1.0.conv.0.bn.weight - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,827 - mmcv - INFO - \n", + "backbone.layer1.0.conv.0.bn.bias - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,829 - mmcv - INFO - \n", + "backbone.layer1.0.conv.1.conv.weight - torch.Size([16, 32, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,830 - mmcv - INFO - \n", + "backbone.layer1.0.conv.1.bn.weight - torch.Size([16]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,832 - mmcv - INFO - \n", + "backbone.layer1.0.conv.1.bn.bias - torch.Size([16]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,833 - mmcv - INFO - \n", + "backbone.layer2.0.conv.0.conv.weight - torch.Size([96, 16, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,835 - mmcv - INFO - \n", + "backbone.layer2.0.conv.0.bn.weight - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,836 - mmcv - INFO - \n", + "backbone.layer2.0.conv.0.bn.bias - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,838 - mmcv - INFO - \n", + "backbone.layer2.0.conv.1.conv.weight - torch.Size([96, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,839 - mmcv - INFO - \n", + "backbone.layer2.0.conv.1.bn.weight - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,841 - mmcv - INFO - \n", + "backbone.layer2.0.conv.1.bn.bias - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,842 - mmcv - INFO - \n", + "backbone.layer2.0.conv.2.conv.weight - torch.Size([24, 96, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,844 - mmcv - INFO - \n", + "backbone.layer2.0.conv.2.bn.weight - torch.Size([24]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,845 - mmcv - INFO - \n", + "backbone.layer2.0.conv.2.bn.bias - torch.Size([24]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,847 - mmcv - INFO - \n", + "backbone.layer2.1.conv.0.conv.weight - torch.Size([144, 24, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,848 - mmcv - INFO - \n", + "backbone.layer2.1.conv.0.bn.weight - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,850 - mmcv - INFO - \n", + "backbone.layer2.1.conv.0.bn.bias - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,851 - mmcv - INFO - \n", + "backbone.layer2.1.conv.1.conv.weight - torch.Size([144, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,853 - mmcv - INFO - \n", + "backbone.layer2.1.conv.1.bn.weight - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,854 - mmcv - INFO - \n", + "backbone.layer2.1.conv.1.bn.bias - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,856 - mmcv - INFO - \n", + "backbone.layer2.1.conv.2.conv.weight - torch.Size([24, 144, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,857 - mmcv - INFO - \n", + "backbone.layer2.1.conv.2.bn.weight - torch.Size([24]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,858 - mmcv - INFO - \n", + "backbone.layer2.1.conv.2.bn.bias - torch.Size([24]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,860 - mmcv - INFO - \n", + "backbone.layer3.0.conv.0.conv.weight - torch.Size([144, 24, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,861 - mmcv - INFO - \n", + "backbone.layer3.0.conv.0.bn.weight - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,863 - mmcv - INFO - \n", + "backbone.layer3.0.conv.0.bn.bias - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,864 - mmcv - INFO - \n", + "backbone.layer3.0.conv.1.conv.weight - torch.Size([144, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,866 - mmcv - INFO - \n", + "backbone.layer3.0.conv.1.bn.weight - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,867 - mmcv - INFO - \n", + "backbone.layer3.0.conv.1.bn.bias - torch.Size([144]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,869 - mmcv - INFO - \n", + "backbone.layer3.0.conv.2.conv.weight - torch.Size([32, 144, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,870 - mmcv - INFO - \n", + "backbone.layer3.0.conv.2.bn.weight - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,872 - mmcv - INFO - \n", + "backbone.layer3.0.conv.2.bn.bias - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,873 - mmcv - INFO - \n", + "backbone.layer3.1.conv.0.conv.weight - torch.Size([192, 32, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,875 - mmcv - INFO - \n", + "backbone.layer3.1.conv.0.bn.weight - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,876 - mmcv - INFO - \n", + "backbone.layer3.1.conv.0.bn.bias - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,878 - mmcv - INFO - \n", + "backbone.layer3.1.conv.1.conv.weight - torch.Size([192, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,879 - mmcv - INFO - \n", + "backbone.layer3.1.conv.1.bn.weight - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,882 - mmcv - INFO - \n", + "backbone.layer3.1.conv.1.bn.bias - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,883 - mmcv - INFO - \n", + "backbone.layer3.1.conv.2.conv.weight - torch.Size([32, 192, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,885 - mmcv - INFO - \n", + "backbone.layer3.1.conv.2.bn.weight - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,886 - mmcv - INFO - \n", + "backbone.layer3.1.conv.2.bn.bias - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,887 - mmcv - INFO - \n", + "backbone.layer3.2.conv.0.conv.weight - torch.Size([192, 32, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,889 - mmcv - INFO - \n", + "backbone.layer3.2.conv.0.bn.weight - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,890 - mmcv - INFO - \n", + "backbone.layer3.2.conv.0.bn.bias - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,892 - mmcv - INFO - \n", + "backbone.layer3.2.conv.1.conv.weight - torch.Size([192, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,894 - mmcv - INFO - \n", + "backbone.layer3.2.conv.1.bn.weight - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,895 - mmcv - INFO - \n", + "backbone.layer3.2.conv.1.bn.bias - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,896 - mmcv - INFO - \n", + "backbone.layer3.2.conv.2.conv.weight - torch.Size([32, 192, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,898 - mmcv - INFO - \n", + "backbone.layer3.2.conv.2.bn.weight - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,899 - mmcv - INFO - \n", + "backbone.layer3.2.conv.2.bn.bias - torch.Size([32]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,901 - mmcv - INFO - \n", + "backbone.layer4.0.conv.0.conv.weight - torch.Size([192, 32, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,903 - mmcv - INFO - \n", + "backbone.layer4.0.conv.0.bn.weight - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,907 - mmcv - INFO - \n", + "backbone.layer4.0.conv.0.bn.bias - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,908 - mmcv - INFO - \n", + "backbone.layer4.0.conv.1.conv.weight - torch.Size([192, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,910 - mmcv - INFO - \n", + "backbone.layer4.0.conv.1.bn.weight - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,911 - mmcv - INFO - \n", + "backbone.layer4.0.conv.1.bn.bias - torch.Size([192]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,913 - mmcv - INFO - \n", + "backbone.layer4.0.conv.2.conv.weight - torch.Size([64, 192, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,914 - mmcv - INFO - \n", + "backbone.layer4.0.conv.2.bn.weight - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,915 - mmcv - INFO - \n", + "backbone.layer4.0.conv.2.bn.bias - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,917 - mmcv - INFO - \n", + "backbone.layer4.1.conv.0.conv.weight - torch.Size([384, 64, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,918 - mmcv - INFO - \n", + "backbone.layer4.1.conv.0.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,920 - mmcv - INFO - \n", + "backbone.layer4.1.conv.0.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,921 - mmcv - INFO - \n", + "backbone.layer4.1.conv.1.conv.weight - torch.Size([384, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,923 - mmcv - INFO - \n", + "backbone.layer4.1.conv.1.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,924 - mmcv - INFO - \n", + "backbone.layer4.1.conv.1.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,925 - mmcv - INFO - \n", + "backbone.layer4.1.conv.2.conv.weight - torch.Size([64, 384, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,927 - mmcv - INFO - \n", + "backbone.layer4.1.conv.2.bn.weight - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,928 - mmcv - INFO - \n", + "backbone.layer4.1.conv.2.bn.bias - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,930 - mmcv - INFO - \n", + "backbone.layer4.2.conv.0.conv.weight - torch.Size([384, 64, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,932 - mmcv - INFO - \n", + "backbone.layer4.2.conv.0.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,933 - mmcv - INFO - \n", + "backbone.layer4.2.conv.0.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,935 - mmcv - INFO - \n", + "backbone.layer4.2.conv.1.conv.weight - torch.Size([384, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,936 - mmcv - INFO - \n", + "backbone.layer4.2.conv.1.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,938 - mmcv - INFO - \n", + "backbone.layer4.2.conv.1.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,939 - mmcv - INFO - \n", + "backbone.layer4.2.conv.2.conv.weight - torch.Size([64, 384, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,941 - mmcv - INFO - \n", + "backbone.layer4.2.conv.2.bn.weight - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,942 - mmcv - INFO - \n", + "backbone.layer4.2.conv.2.bn.bias - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,944 - mmcv - INFO - \n", + "backbone.layer4.3.conv.0.conv.weight - torch.Size([384, 64, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,945 - mmcv - INFO - \n", + "backbone.layer4.3.conv.0.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,946 - mmcv - INFO - \n", + "backbone.layer4.3.conv.0.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,948 - mmcv - INFO - \n", + "backbone.layer4.3.conv.1.conv.weight - torch.Size([384, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,949 - mmcv - INFO - \n", + "backbone.layer4.3.conv.1.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,951 - mmcv - INFO - \n", + "backbone.layer4.3.conv.1.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,952 - mmcv - INFO - \n", + "backbone.layer4.3.conv.2.conv.weight - torch.Size([64, 384, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,954 - mmcv - INFO - \n", + "backbone.layer4.3.conv.2.bn.weight - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,955 - mmcv - INFO - \n", + "backbone.layer4.3.conv.2.bn.bias - torch.Size([64]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,957 - mmcv - INFO - \n", + "backbone.layer5.0.conv.0.conv.weight - torch.Size([384, 64, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,958 - mmcv - INFO - \n", + "backbone.layer5.0.conv.0.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,959 - mmcv - INFO - \n", + "backbone.layer5.0.conv.0.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,961 - mmcv - INFO - \n", + "backbone.layer5.0.conv.1.conv.weight - torch.Size([384, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,963 - mmcv - INFO - \n", + "backbone.layer5.0.conv.1.bn.weight - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,964 - mmcv - INFO - \n", + "backbone.layer5.0.conv.1.bn.bias - torch.Size([384]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Use load_from_http loader\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2021-10-21 04:04:12,965 - mmcv - INFO - \n", + "backbone.layer5.0.conv.2.conv.weight - torch.Size([96, 384, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,967 - mmcv - INFO - \n", + "backbone.layer5.0.conv.2.bn.weight - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,969 - mmcv - INFO - \n", + "backbone.layer5.0.conv.2.bn.bias - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,970 - mmcv - INFO - \n", + "backbone.layer5.1.conv.0.conv.weight - torch.Size([576, 96, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,972 - mmcv - INFO - \n", + "backbone.layer5.1.conv.0.bn.weight - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,973 - mmcv - INFO - \n", + "backbone.layer5.1.conv.0.bn.bias - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,975 - mmcv - INFO - \n", + "backbone.layer5.1.conv.1.conv.weight - torch.Size([576, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,976 - mmcv - INFO - \n", + "backbone.layer5.1.conv.1.bn.weight - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,978 - mmcv - INFO - \n", + "backbone.layer5.1.conv.1.bn.bias - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,979 - mmcv - INFO - \n", + "backbone.layer5.1.conv.2.conv.weight - torch.Size([96, 576, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,981 - mmcv - INFO - \n", + "backbone.layer5.1.conv.2.bn.weight - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,982 - mmcv - INFO - \n", + "backbone.layer5.1.conv.2.bn.bias - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,984 - mmcv - INFO - \n", + "backbone.layer5.2.conv.0.conv.weight - torch.Size([576, 96, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,985 - mmcv - INFO - \n", + "backbone.layer5.2.conv.0.bn.weight - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,986 - mmcv - INFO - \n", + "backbone.layer5.2.conv.0.bn.bias - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,988 - mmcv - INFO - \n", + "backbone.layer5.2.conv.1.conv.weight - torch.Size([576, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,989 - mmcv - INFO - \n", + "backbone.layer5.2.conv.1.bn.weight - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,991 - mmcv - INFO - \n", + "backbone.layer5.2.conv.1.bn.bias - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,992 - mmcv - INFO - \n", + "backbone.layer5.2.conv.2.conv.weight - torch.Size([96, 576, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,994 - mmcv - INFO - \n", + "backbone.layer5.2.conv.2.bn.weight - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,995 - mmcv - INFO - \n", + "backbone.layer5.2.conv.2.bn.bias - torch.Size([96]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,997 - mmcv - INFO - \n", + "backbone.layer6.0.conv.0.conv.weight - torch.Size([576, 96, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,998 - mmcv - INFO - \n", + "backbone.layer6.0.conv.0.bn.weight - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:12,999 - mmcv - INFO - \n", + "backbone.layer6.0.conv.0.bn.bias - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,001 - mmcv - INFO - \n", + "backbone.layer6.0.conv.1.conv.weight - torch.Size([576, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,002 - mmcv - INFO - \n", + "backbone.layer6.0.conv.1.bn.weight - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,004 - mmcv - INFO - \n", + "backbone.layer6.0.conv.1.bn.bias - torch.Size([576]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,005 - mmcv - INFO - \n", + "backbone.layer6.0.conv.2.conv.weight - torch.Size([160, 576, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,007 - mmcv - INFO - \n", + "backbone.layer6.0.conv.2.bn.weight - torch.Size([160]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,008 - mmcv - INFO - \n", + "backbone.layer6.0.conv.2.bn.bias - torch.Size([160]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,010 - mmcv - INFO - \n", + "backbone.layer6.1.conv.0.conv.weight - torch.Size([960, 160, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,011 - mmcv - INFO - \n", + "backbone.layer6.1.conv.0.bn.weight - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,013 - mmcv - INFO - \n", + "backbone.layer6.1.conv.0.bn.bias - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,014 - mmcv - INFO - \n", + "backbone.layer6.1.conv.1.conv.weight - torch.Size([960, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,015 - mmcv - INFO - \n", + "backbone.layer6.1.conv.1.bn.weight - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,017 - mmcv - INFO - \n", + "backbone.layer6.1.conv.1.bn.bias - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,018 - mmcv - INFO - \n", + "backbone.layer6.1.conv.2.conv.weight - torch.Size([160, 960, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,021 - mmcv - INFO - \n", + "backbone.layer6.1.conv.2.bn.weight - torch.Size([160]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,022 - mmcv - INFO - \n", + "backbone.layer6.1.conv.2.bn.bias - torch.Size([160]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,024 - mmcv - INFO - \n", + "backbone.layer6.2.conv.0.conv.weight - torch.Size([960, 160, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,025 - mmcv - INFO - \n", + "backbone.layer6.2.conv.0.bn.weight - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,027 - mmcv - INFO - \n", + "backbone.layer6.2.conv.0.bn.bias - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,028 - mmcv - INFO - \n", + "backbone.layer6.2.conv.1.conv.weight - torch.Size([960, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,030 - mmcv - INFO - \n", + "backbone.layer6.2.conv.1.bn.weight - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,031 - mmcv - INFO - \n", + "backbone.layer6.2.conv.1.bn.bias - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,033 - mmcv - INFO - \n", + "backbone.layer6.2.conv.2.conv.weight - torch.Size([160, 960, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,034 - mmcv - INFO - \n", + "backbone.layer6.2.conv.2.bn.weight - torch.Size([160]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,036 - mmcv - INFO - \n", + "backbone.layer6.2.conv.2.bn.bias - torch.Size([160]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,037 - mmcv - INFO - \n", + "backbone.layer7.0.conv.0.conv.weight - torch.Size([960, 160, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,039 - mmcv - INFO - \n", + "backbone.layer7.0.conv.0.bn.weight - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,040 - mmcv - INFO - \n", + "backbone.layer7.0.conv.0.bn.bias - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,041 - mmcv - INFO - \n", + "backbone.layer7.0.conv.1.conv.weight - torch.Size([960, 1, 3, 3]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,043 - mmcv - INFO - \n", + "backbone.layer7.0.conv.1.bn.weight - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,045 - mmcv - INFO - \n", + "backbone.layer7.0.conv.1.bn.bias - torch.Size([960]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,046 - mmcv - INFO - \n", + "backbone.layer7.0.conv.2.conv.weight - torch.Size([320, 960, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,048 - mmcv - INFO - \n", + "backbone.layer7.0.conv.2.bn.weight - torch.Size([320]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,049 - mmcv - INFO - \n", + "backbone.layer7.0.conv.2.bn.bias - torch.Size([320]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,051 - mmcv - INFO - \n", + "backbone.conv2.conv.weight - torch.Size([1280, 320, 1, 1]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,052 - mmcv - INFO - \n", + "backbone.conv2.bn.weight - torch.Size([1280]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,054 - mmcv - INFO - \n", + "backbone.conv2.bn.bias - torch.Size([1280]): \n", + "PretrainedInit: load from https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth \n", + " \n", + "2021-10-21 04:04:13,055 - mmcv - INFO - \n", + "head.fc.weight - torch.Size([2, 1280]): \n", + "NormalInit: mean=0, std=0.01, bias=0 \n", + " \n", + "2021-10-21 04:04:13,057 - mmcv - INFO - \n", + "head.fc.bias - torch.Size([2]): \n", + "NormalInit: mean=0, std=0.01, bias=0 \n", + " \n", + "2021-10-21 04:04:13,408 - mmcls - INFO - Start running, host: root@cc5b42005207, work_dir: /content/mmclassification/work_dirs/cats_dogs_dataset\n", + "2021-10-21 04:04:13,412 - mmcls - INFO - Hooks will be executed in the following order:\n", + "before_run:\n", + "(VERY_HIGH ) StepLrUpdaterHook \n", + "(NORMAL ) CheckpointHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_train_epoch:\n", + "(VERY_HIGH ) StepLrUpdaterHook \n", + "(LOW ) IterTimerHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_train_iter:\n", + "(VERY_HIGH ) StepLrUpdaterHook \n", + "(LOW ) IterTimerHook \n", + "(LOW ) EvalHook \n", + " -------------------- \n", + "after_train_iter:\n", + "(ABOVE_NORMAL) OptimizerHook \n", + "(NORMAL ) CheckpointHook \n", + "(LOW ) IterTimerHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "after_train_epoch:\n", + "(NORMAL ) CheckpointHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_val_epoch:\n", + "(LOW ) IterTimerHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_val_iter:\n", + "(LOW ) IterTimerHook \n", + " -------------------- \n", + "after_val_iter:\n", + "(LOW ) IterTimerHook \n", + " -------------------- \n", + "after_val_epoch:\n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "2021-10-21 04:04:13,417 - mmcls - INFO - workflow: [('train', 1)], max: 2 epochs\n", + "2021-10-21 04:04:18,924 - mmcls - INFO - Epoch [1][10/201]\tlr: 5.000e-03, eta: 0:03:29, time: 0.535, data_time: 0.259, memory: 1709, loss: 0.3917\n", + "2021-10-21 04:04:21,743 - mmcls - INFO - Epoch [1][20/201]\tlr: 5.000e-03, eta: 0:02:35, time: 0.281, data_time: 0.019, memory: 1709, loss: 0.3508\n", + "2021-10-21 04:04:24,552 - mmcls - INFO - Epoch [1][30/201]\tlr: 5.000e-03, eta: 0:02:15, time: 0.280, data_time: 0.020, memory: 1709, loss: 0.3955\n", + "2021-10-21 04:04:27,371 - mmcls - INFO - Epoch [1][40/201]\tlr: 5.000e-03, eta: 0:02:04, time: 0.282, data_time: 0.021, memory: 1709, loss: 0.2485\n", + "2021-10-21 04:04:30,202 - mmcls - INFO - Epoch [1][50/201]\tlr: 5.000e-03, eta: 0:01:56, time: 0.283, data_time: 0.021, memory: 1709, loss: 0.4196\n", + "2021-10-21 04:04:33,021 - mmcls - INFO - Epoch [1][60/201]\tlr: 5.000e-03, eta: 0:01:50, time: 0.282, data_time: 0.023, memory: 1709, loss: 0.4994\n", + "2021-10-21 04:04:35,800 - mmcls - INFO - Epoch [1][70/201]\tlr: 5.000e-03, eta: 0:01:45, time: 0.278, data_time: 0.020, memory: 1709, loss: 0.4372\n", + "2021-10-21 04:04:38,595 - mmcls - INFO - Epoch [1][80/201]\tlr: 5.000e-03, eta: 0:01:40, time: 0.280, data_time: 0.019, memory: 1709, loss: 0.3179\n", + "2021-10-21 04:04:41,351 - mmcls - INFO - Epoch [1][90/201]\tlr: 5.000e-03, eta: 0:01:36, time: 0.276, data_time: 0.018, memory: 1709, loss: 0.3175\n", + "2021-10-21 04:04:44,157 - mmcls - INFO - Epoch [1][100/201]\tlr: 5.000e-03, eta: 0:01:32, time: 0.280, data_time: 0.021, memory: 1709, loss: 0.3412\n", + "2021-10-21 04:04:46,974 - mmcls - INFO - Epoch [1][110/201]\tlr: 5.000e-03, eta: 0:01:28, time: 0.282, data_time: 0.019, memory: 1709, loss: 0.2985\n", + "2021-10-21 04:04:49,767 - mmcls - INFO - Epoch [1][120/201]\tlr: 5.000e-03, eta: 0:01:25, time: 0.280, data_time: 0.021, memory: 1709, loss: 0.2778\n", + "2021-10-21 04:04:52,553 - mmcls - INFO - Epoch [1][130/201]\tlr: 5.000e-03, eta: 0:01:21, time: 0.278, data_time: 0.021, memory: 1709, loss: 0.2229\n", + "2021-10-21 04:04:55,356 - mmcls - INFO - Epoch [1][140/201]\tlr: 5.000e-03, eta: 0:01:18, time: 0.280, data_time: 0.021, memory: 1709, loss: 0.2318\n", + "2021-10-21 04:04:58,177 - mmcls - INFO - Epoch [1][150/201]\tlr: 5.000e-03, eta: 0:01:14, time: 0.282, data_time: 0.022, memory: 1709, loss: 0.2333\n", + "2021-10-21 04:05:01,025 - mmcls - INFO - Epoch [1][160/201]\tlr: 5.000e-03, eta: 0:01:11, time: 0.285, data_time: 0.020, memory: 1709, loss: 0.2783\n", + "2021-10-21 04:05:03,833 - mmcls - INFO - Epoch [1][170/201]\tlr: 5.000e-03, eta: 0:01:08, time: 0.281, data_time: 0.022, memory: 1709, loss: 0.2132\n", + "2021-10-21 04:05:06,648 - mmcls - INFO - Epoch [1][180/201]\tlr: 5.000e-03, eta: 0:01:05, time: 0.281, data_time: 0.019, memory: 1709, loss: 0.2096\n", + "2021-10-21 04:05:09,472 - mmcls - INFO - Epoch [1][190/201]\tlr: 5.000e-03, eta: 0:01:02, time: 0.282, data_time: 0.020, memory: 1709, loss: 0.1729\n", + "2021-10-21 04:05:12,229 - mmcls - INFO - Epoch [1][200/201]\tlr: 5.000e-03, eta: 0:00:59, time: 0.275, data_time: 0.018, memory: 1709, loss: 0.1969\n", + "2021-10-21 04:05:12,275 - mmcls - INFO - Saving checkpoint at 1 epochs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[>>>>>>>>>>>>>>>>>>>>>>>>>>] 1601/1601, 104.1 task/s, elapsed: 15s, ETA: 0s" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2021-10-21 04:05:27,767 - mmcls - INFO - Epoch(val) [1][51]\taccuracy_top-1: 95.6277\n", + "2021-10-21 04:05:32,987 - mmcls - INFO - Epoch [2][10/201]\tlr: 5.000e-04, eta: 0:00:57, time: 0.505, data_time: 0.238, memory: 1709, loss: 0.1764\n", + "2021-10-21 04:05:35,779 - mmcls - INFO - Epoch [2][20/201]\tlr: 5.000e-04, eta: 0:00:54, time: 0.278, data_time: 0.020, memory: 1709, loss: 0.1514\n", + "2021-10-21 04:05:38,537 - mmcls - INFO - Epoch [2][30/201]\tlr: 5.000e-04, eta: 0:00:51, time: 0.276, data_time: 0.020, memory: 1709, loss: 0.1395\n", + "2021-10-21 04:05:41,283 - mmcls - INFO - Epoch [2][40/201]\tlr: 5.000e-04, eta: 0:00:48, time: 0.275, data_time: 0.020, memory: 1709, loss: 0.1508\n", + "2021-10-21 04:05:44,017 - mmcls - INFO - Epoch [2][50/201]\tlr: 5.000e-04, eta: 0:00:44, time: 0.274, data_time: 0.021, memory: 1709, loss: 0.1771\n", + "2021-10-21 04:05:46,800 - mmcls - INFO - Epoch [2][60/201]\tlr: 5.000e-04, eta: 0:00:41, time: 0.278, data_time: 0.020, memory: 1709, loss: 0.1438\n", + "2021-10-21 04:05:49,570 - mmcls - INFO - Epoch [2][70/201]\tlr: 5.000e-04, eta: 0:00:38, time: 0.277, data_time: 0.020, memory: 1709, loss: 0.1321\n", + "2021-10-21 04:05:52,314 - mmcls - INFO - Epoch [2][80/201]\tlr: 5.000e-04, eta: 0:00:35, time: 0.275, data_time: 0.021, memory: 1709, loss: 0.1629\n", + "2021-10-21 04:05:55,052 - mmcls - INFO - Epoch [2][90/201]\tlr: 5.000e-04, eta: 0:00:32, time: 0.273, data_time: 0.021, memory: 1709, loss: 0.1574\n", + "2021-10-21 04:05:57,791 - mmcls - INFO - Epoch [2][100/201]\tlr: 5.000e-04, eta: 0:00:29, time: 0.274, data_time: 0.019, memory: 1709, loss: 0.1220\n", + "2021-10-21 04:06:00,534 - mmcls - INFO - Epoch [2][110/201]\tlr: 5.000e-04, eta: 0:00:26, time: 0.274, data_time: 0.021, memory: 1709, loss: 0.2550\n", + "2021-10-21 04:06:03,295 - mmcls - INFO - Epoch [2][120/201]\tlr: 5.000e-04, eta: 0:00:23, time: 0.276, data_time: 0.019, memory: 1709, loss: 0.1528\n", + "2021-10-21 04:06:06,048 - mmcls - INFO - Epoch [2][130/201]\tlr: 5.000e-04, eta: 0:00:20, time: 0.275, data_time: 0.022, memory: 1709, loss: 0.1223\n", + "2021-10-21 04:06:08,811 - mmcls - INFO - Epoch [2][140/201]\tlr: 5.000e-04, eta: 0:00:17, time: 0.276, data_time: 0.021, memory: 1709, loss: 0.1734\n", + "2021-10-21 04:06:11,576 - mmcls - INFO - Epoch [2][150/201]\tlr: 5.000e-04, eta: 0:00:14, time: 0.277, data_time: 0.020, memory: 1709, loss: 0.1527\n", + "2021-10-21 04:06:14,330 - mmcls - INFO - Epoch [2][160/201]\tlr: 5.000e-04, eta: 0:00:11, time: 0.276, data_time: 0.020, memory: 1709, loss: 0.1910\n", + "2021-10-21 04:06:17,106 - mmcls - INFO - Epoch [2][170/201]\tlr: 5.000e-04, eta: 0:00:09, time: 0.277, data_time: 0.019, memory: 1709, loss: 0.1922\n", + "2021-10-21 04:06:19,855 - mmcls - INFO - Epoch [2][180/201]\tlr: 5.000e-04, eta: 0:00:06, time: 0.274, data_time: 0.023, memory: 1709, loss: 0.1760\n", + "2021-10-21 04:06:22,638 - mmcls - INFO - Epoch [2][190/201]\tlr: 5.000e-04, eta: 0:00:03, time: 0.278, data_time: 0.019, memory: 1709, loss: 0.1739\n", + "2021-10-21 04:06:25,367 - mmcls - INFO - Epoch [2][200/201]\tlr: 5.000e-04, eta: 0:00:00, time: 0.272, data_time: 0.020, memory: 1709, loss: 0.1654\n", + "2021-10-21 04:06:25,410 - mmcls - INFO - Saving checkpoint at 2 epochs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[>>>>>>>>>>>>>>>>>>>>>>>>>>] 1601/1601, 105.5 task/s, elapsed: 15s, ETA: 0s" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2021-10-21 04:06:40,694 - mmcls - INFO - Epoch(val) [2][51]\taccuracy_top-1: 97.5016\n" + ] + } + ], + "source": [ + "import time\n", + "import mmcv\n", + "import os.path as osp\n", + "\n", + "from mmcls.datasets import build_dataset\n", + "from mmcls.models import build_classifier\n", + "from mmcls.apis import train_model\n", + "\n", + "# 创建工作目录\n", + "mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))\n", + "# 创建分类器\n", + "model = build_classifier(cfg.model)\n", + "model.init_weights()\n", + "# 创建数据集\n", + "datasets = [build_dataset(cfg.data.train)]\n", + "# 添加类别属性以方便可视化\n", + "model.CLASSES = datasets[0].CLASSES\n", + "# 开始微调\n", + "train_model(\n", + " model,\n", + " datasets,\n", + " cfg,\n", + " distributed=False,\n", + " validate=True,\n", + " timestamp=time.strftime('%Y%m%d_%H%M%S', time.localtime()),\n", + " meta=dict())" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 304 + }, + "id": "HsoGBZA3miui", + "outputId": "eb2e09f5-55ce-4165-b754-3b75dbc829ab" + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAATMAAAEfCAYAAAAtNiETAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAFiQAABYkBbWid+gAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOy9ebRt2VXe91vNbk5zz+3vq3rVqaoQqpKKAmF6JGUQED226ZIQJzYYQ4IFIXEDdmC4ATuQ4dgYJMAjjQcgMKFzUIdBIAE2rQTCkkpt6dWrV69ee9/tTrO71eWPtfa+51VJ/JUxMpKhXeOM++o255y991rf/OY3vzmPCCHwyeOTxyePTx7/Xz/k/9tv4JPHJ49PHp88/p84Pglmnzw+eXzy+P/FoV/4jcc+7fEAIJCAQAgNCIIXgGBvb4+uazg7O6OqF+R5znQ6xgfLfLHAOcjKkhACGxsbXL9+nd3dXay1eO8RQgAwnU5ZrVYcHBzQdR1d17FYLAidBUAphdaaLMuQUuI9dF0HCLTSSKlxztF1HSEE8jynKDKMMSgt8d7Sti3WWoQQKKXiCescJTVZVpBnY7TOUCpDiAxBQAYYlTld12GMoSgLhJQY0xFEwKTnE0oghCCEQAgBpEBLBV4ggyDPc5qmIcuK+J6kxlqHUorNzU2Oj0/IsozP+ZzPw1rLe97zHsrJmFtnJ9z/8ENcuXwZERx/4ZWfyfHtm5wcHjI/OWakFBf397iwv8fOxgYSKLQC73DWUYzGeEAoQdc1fOqnvpS2bTg7OybLM2azGUdHR1y9eo2yKMnzkqpqKPIy3ucAs9mMhx56iBACz155lrppQEAA6rZlurGBkIKXPPIw+wcHPH/tGicnJ7RtS1c3qABCCHwI5HlBUYxwnnT+GXkxQkpN11kQAus8q2VFkRc401JoyUsffZSmaTidnzGeTDg5OaGzhrwoWNUVOzs7XLj3Ho6Ojzk6OuLs7AwhBPdduJcMQaYFITh0pvHecXR0SNs2FEWB9x6t4xryPmCNx1pHXPceKR3OW0QArTVKZfgg6YzDCUU+mtEFydGi5vZ8QW1BFiMcCmMdSoKUEiklhADBA5E5hODx3uOcxVmD955MKvI8pyxziqJAFzlVVdG2LcYYjG2HdS6lpOviut7e3mS5XOK9Z29vl6at4z5zEELA+fj8udJorfHW0LYNVV1x4cIFxkXJYrHAmpbxeIwQgrpuCMS1LaXEOcfZ2RmLRdzr29vb7O/vs1qt8N4TQsBai5QSpRTWWup6hRCCrutwzjGbzdjZ2aFtW+7cucNkMiHPczY3NxFCUFXVsJcWiwUBx+bmBC0DzizQ2vFFr/lcplPN9uaIve0p1tVY2xGC4wte/c3i44JZDzYkKa3X1HpprQeUoigwtiGEQNu2BBzee4zxjKYaay2TyQSAyWTCarUCwFpLCAGlFCEElsslW1tb3HvvvTz99NMYF3DO4ZyLiyG9B2sdxhgyneOcw3uGmxtCwHtP13XDRenPQ4hz0Fk/xxc90s+ctVgrMc5inEVahZTx/wPxdZACFSI4+viESCnTBobOWXCSRbViMo7nUBQFXnSczefITLOoVoxGI45OjnnFK57gox/7GEdnJwgh2N3dpcxzTo4OyfOMLMsYjUas5vE1i7Ik+Aiis9mMpl4xHU84Pj5mb7aBkIrFco5SitFoRFHkLFdzqqoarmdZFEgp0TqjKALBn1+zqqqGBWatRSlFZwyBgBSC5XLJ5tYmTdMwnU55+CUvQUnJ/OyM07ZFhIASEdSXiwV13TKdznDWIVBIBM66BCoZoyJDCkHXGcpRSb2Yc/XqVe677z688xweHqK0GgLchQsXsM5x5dkrFEVBU9UYY9jZ3mY+PyMXAi0Eo1FJnmV4H9JrWlamRUqFGk3IMo3MFB0Wb2uapkNITzFWBB/wIeCNQXkARQjgg6duGtogaE2LsRbjIHQtNgi88xR5jvduWGsyXfPgYiAcjUo64/GuX3UeiMDQdi3Xbt6gruM5SSnJ8njuWZahtQbivfLeD2vaWjcAqLUdRVGQySLtDTsE/rbt2BhPECFQ1yus7RKQllTVkrOzUzrj2N7eZjqd0nUdUkqm0ylbW1vs7OwMe61tW4qiIM/zBNBuICI9kPX7whiDEIKNjQ2Wy+Ww1mJQiaDZ71vvI6aQCTKdUxTpHE38na7rCN4Rd+15cvkiMBuYRpCE0F8sASEyNWstZZkzmUxwPp6Qcx4h+ygWwUVrjTGG6XQ6MCdrLVmW0bYtbduyWCyoqort7W0efvhhzs7OOD08ommagclF8IxgqpRaAzgPnDMuAO9dXDzpewMwr51bD179jT9/TgnBE6SAjwd2/cXPs+EGAfh0E5z1BAnWWKSQdMZERucsZVmi8wyVaYyzWO+YzjbIdMYHP/xhXvWa1/CZn/UXeOfv/DbT8Yijo2M+8zOe5PbNLa4//zz1Yo63Fq0UwnvyLEsL2IAQdG3HdG8/LijrcMHGDddWHB0dEYJjPp/jveP4+JjJZMLW1harVUVdVyiVofOM4ANN07BYLJjNZkwmE4qypCgKltWKuq7RUkYG1rTcvnGT3e0dJhvxHmdZxnQ8oVoucYn9xKDlESIQb51HyIBpO5rWMB5LdnZ2yPKc6zeug3NoLTk+OWI0KrHO0JmWnJwABO/YmExwwXN2dkpVrajqilFRUOY585MTXPzFCBA42rbBOkOWKbrO0TQ13keGVJYTdKYpwwgQGNfgnMG6CLYKiRAKlWUR/FC0DgQCrTVFURCcwEmNCGClAxGBs9/MudYoKUFoECCVQDmJVwopI/AqpYZ1eeHChYENuQSA/b7s13iWRRAoyzLeD9MN2chkMolBTGdYa2nbeL5KKfIiY3Nzk9VqRQiOyWRCpvUQwIoiR2fx/RhjcM6xsbHBZDJhNpuR5zmXLl2iqiqcc2itGY/HA3jleY6U5UBI+syq3/vT6ZSzs7P4nrtu2H8hnJOYQKDrLMEL8olGKTEQmuDAdBapAloplDqHsE8IZv0m7QmNQA1gJkQxRIq2beNiTSgrRMbZYsHm5ianp6dMp9Pha9u2lGU5nJz3nrZtqapquAGnRIreX4SeHUC8gQK5xrLOGVHPztaPdRA6BzyPFz5RZJ/+xkNww3kgzoEwCAhSJOot0SqLP5MxjVLI+FwChJSgAjrLcM4xmW3QdRaVZ9jgKfKCvCzjhZeRmR6fnXC6mCO0QipFY1rE/Iy6jinD6ekpXbUkGIt1FmcsnemYlCXWOlarmGZMp1O2t7dZrCrK8Zj9C3vcuHGN27dvs1jMkQoeeeRhFovFEG3jNQvkuUrnLdI9hM60zLIZ+/t7jMdjTk5POLzj4mYZj1hWSzqbc+nSx7hw4QLVYpE20pi2qmjbDq01o6LEGEtbNwQh4/VQGQKBEgFrWpQS7O5sY9qGWzeuMS4Likxx7fpVHnzwQR588H4uXX6Guq4oxznPX73C5uYmD95/kSvPPcfu9ibee65fe56tjSkaiXdgnaOua7quQSnBdHMTaw137tyJKVznsDYwnW6QFxlCgjAB42pQUVYJCIIQCCmQWiNUhhQZMkhs5rB5SeYFVigcAmsdbVXhnYib01qCM5FVSYX3ntPTUwgBQSDTkXlnmUZIUEqytbMT1zRuCOxxPcZgHFmMo2k8CLCuw1XxNay1TMpRCngBaztsZxA+UGSaXMdAn+caKTK0UnhrMG2DRDAejzi4cB/z+ZyTkxO892xtbTGdTrHWcnJywvHxMWVZkmUZSqkhy4pEp0TKmNb2GZhzLuFEoEzB0XtP08S0v3+enomaxPIE0Booch1lLhczO2tBhxDviTgnMx8XzO4GtsiA+rwzAlJOlmVr1NLHjSyiVtS2LXke08G9vT3m8zl5nt+VtvTUdbVacXp6OqC1MSZSTGA8HqOUSulrTDOjttO/v7vfb9TW/IvSzPXzMcZAECgVo4AQMWUlnasSDMB4Hg3P/91fcKHPI6mUEqnPI0xjOwSQ6YJSaRrTQdvROcvZYo7Wmq7tsM4hlOLP3vdeDm/dZlXXVF3Dxfvv5/Lly9TLBTs7O4SNKdV8ztmxwbUGax07OztoEfCdYWO2QVEWXDi4wL4PbGxtUdUrVqsFt2/fHlKb6XTKo48+ytWrV7l+/SaTyYTxeAOlNMvliuBjOtyDfgiBoihiUEjX1rmo+xU6AwR1VXGUwME5R6GzgWUYY8iyHICmbVA6w3QxiI3GY4SApqk4Oj6kKHPyXDMeleAMnWlYVUuWqwWjccl0XJIpiQie1WqBS0xrc2NCXTcxIARPpjXCB0BgTIe1HudtWr0lo9EopuyrFaZrqOvIQspyBCKglQSVI53FSRfTeRzGW5RThCAwwVHbQNMFWhtovcAATmgIdwdRay3OeowxaKkQMq0n7xEECDFRMsbgQ7w2Ryen1M2KpmmGAK21QusRWkuckxgTgTrL1cC6JpNJSkPBGItzJqarbQxemYqBv2mqqFcBp6eneGsYjUYAVHVNkWvatmW1Wg3gI4RgPp9z69YtvPfMZrO79lfcowbnLCH4IesZ9lz6HYiyU13XAxPrATGu0xG+qRBS4V3M4Lqsfz7AQXCBkK6js+fY9SIw68FjEM3oQeGcJfWCX0/VhYhA0i/4uDhK9vf3GY1Gg+g3m824dOnSkCqOx2NCCJydnXF8fIz3nv2DAw5v32aRIv1oNEroHs5T4DVw6i9QvCByyNtfCGQvPF6cQkbxO/6JjxdLeAIg++cRAmMcQoMWEinjJldKoVVMqVzhuX10h83NTequ5eGHHubq888jtaLpOvKyoOlakAIhFEVW8PyN65R5wWx7k7CIqYPtou4xKkoefvBlHN26yYeeqqjTwji4cEAG3Lpxg9lsxvxsznS6QdsZgpSsVgvm8zlt26K1pmkann76afb29tBao5Vie3uHra0dhFAcH52wXK5w3jHZiCx6uVwCDCmBMxZ8jMCj0SiClVTUq6jFSWB+eoaSkkzHDeGdQ0hJpiPb3t7exhpHVdcIKZESqsWcm9chLzKkiovfmo7NjSmHt25yenLEhXvuoSwLnLMc7O3SNA03r19jujGlXi1xtmN3e4uuaZBCoITAmBadKTItqOuaE2vZ2JiglaTIckSIaWvbVOADWmcILQbQAXDB4bxHWEsIEotl1TlWJrBoHEtjqS10CDzxuhY6ixtagfTgjMd7hxcm7Y0CYzq89XSmI3gzMBvvPZubm4zygjLLcc4O178J1cDyhJbYLlBmOTJACI5CK3wnaOtq0NRcZ5CEIUgDZFrjncO0DaZtYvqZ5wRn8c5xeHjIcrkkz3N2dnbY2NiIr5/AVWs96GmLxSK+n/R6URYKKC2QSg/pZ9STBU1bMZ1OadqAD5bONGDOC36j0YjOWZQW2MbSdQ3GSJTKyLK4/5VUKAESkUTrTwBmgyYle+DoBfK7q3f9I7KTpGcpRV23Q/Xw/vvvp6oqNjY2yLKMe+65h2eeeQaIhYSyLNFaU9c1bRsrKg9dvB+t1JBmCSESFY1ipknVzhAYmELPkIT4850mUdeJFLh/ZFkWt2FQeC+QBFAS6eV5xJASqRRBRI1M9owtBGQQeA/exwpiXo5p2pa9PMcYw/0PPsjx6SlFMWK5XKKU4vi5E7a2tiHA6ekZOs958lWv4pnLzxC04uT0lE95+CVIAs9dfpYHHniAQik+9pGPUBO1ssl4ghYBaw3BB65du4YxhqOTUx546CG2tjcjuLUtzlmaqqVtG5599lnm8/mwIJXS5HnBZDLh7GxOAMqypGka6roeInIv1AohyHVGcD6yc+dx3lEWJd65QTLo76vtOibTKeVoTFmMeOSRR1jVNR/56NOE4NnYmEU22yxpW0HX1Ggh2NnZIoTAzZs3UVJQLResqoqDgwMmkzG5lsydZXF6QqEzys0Zy/mSuqqYTqaITIPwqZqasaoWLBZLhPBIqdCZBJFhjcPaNrFPUEHikxh/99pJMoTUZHmG9halPMIKAj1YCZyDxjq0SjpYAnnnLFGOjffMmviQUqBENsg2zrkBSIoiJ08aLelvlZK0bX1+L/IM5wxNY6nrmvl8zjgRgBACUgkkek2XMsy2d7lz5zarxZLRaMRkOsG6Dtt2ZFnGYrFASsn29jYXL15Ea81yuaRt24Eg9Olj1NmK4fvOGRCBQo+HlLgnOOtyUK+R9Yy+z9SUTi6GXOFNQ9danEvf0yCES2lpQCvBOpp9XDCLua5MeWyMlL0m1oOC1prJZDqUYts2MBqPKYqcrM04OTkZBNLxeMyNGzcGQbjXbHr6ubu7y0c+8hG2t7eHHLrP0fuqmlLxRkuhkp4mhijR63DOmUHzEQLyPB9y93NWJ4coqJUjy+I5OxtTgq3tTQ4Pb7G9vYXOM4pixNnpGXleIjON95FVBUCrjLOzOZtbW1jn2d7ZZL46Y2dvl4N7LvDcc8/xrj95F7u7exwfnVLXNdPplKIs8SHEEnUZmavKNBfvu4+LUrJYLHj++Ws8eP9Fjo6OuHLlCquz0wTcsRDz7LPP8pL77yPPc46PjxmPSo6Pj5ltbrG9vc0DDz2AD46zs9OhUJBpzeHhIUVRcHh4yGg05lM+5QJ13bCzs8PR0TGn8xO01+SFJuCo6iVZrmL6gAPhcT4yjLaJOkte5JiuhgCT8QgRoGtbSNVmawy1X7GYzynKjHsv3sf+/i537hyhJGR5wY0bxzR1xXhUEoTk9LTFh4DWsTq3WlmyLGe5PKOpl0N1zlmLdxalZNJWcup6Cb5EEOjqmuAM46KgzBQSj7eJzSOROkMgo5gsAuDx3kZ2oQQCibee1niarkblBZtbezhR04UOspyJKuiCpGoNpuvQQiadF7JcowQ0TdzUUsYNrjOFUiIGRynRWg1V+iKL1ozgHEVR4GyXUugMfGQkEijzjHq1ikK70rRNzWQyiiDiLd4km1OWISV0bUdVVdxsO05PjxmPR+zt7yADXL9+HeccW7u77M62WVUNy+USay3zeawuj8fjuG+05urVq1y4cIGLFy9yeno6YIJznsm0JASH9wEhYjXVmHOSk2WKPI/Brq5XA6mxtmNn5x5Ozha0yxotGIpGITisdRzs7TKdFmTKgXcY231iMPPeY41lMY/Csnek1E0gZRTb6mqetBVP27WYrqOuAqvFCiEUznuOqorf/93fxaQLenxyzNnRMXVdR9BUcQF57+mMYXFyyumdI+aHR5yendLUDSFpCkpJpNQpvVVoFYsRPkXPahVR3zub8sRY9VlPl6XsNS6FNZaiHOHsYaS8TUemc6yzHB1plssF8/k0RRCSGBv1QOscxtiUao3xwXN2cgMlFXduP8f+wS7d4oSPvP+Euq644wPdxfs4OYlgdPO5FpEqgnmWo3VG0za87Vf/Tw7299nd2+P6jRvcuXWL5y/NOLx9m7ed3uKJxz8tgVnUtU5PT6l2tplONzi5c4hLVV+hYkp58+aNoXwOsLGxMQSA3ps0n89ZrZa0raGTZoia/Ub8eIFuYO7eE5wnSEnw0bIhE9D2qX6mM0LK0K01OOc5Pjomz3PqaokxDePxPkVRcM89ezjnWC0WNHV9bmcgCr0Q8M7gTACv4mtbG887QNAKgSCTgnwUq2nRJ5jFQCR13OBJeCb0mjAEIisPQUIQBOfwBAgCke5/nimkEnghqVcVwUZ9DqExMsNbH1k9otcq4oZO9gutFEL3oBVlGqTECZHWtxwynZ7FOGex7vx69hJI/LrGHNNL9synaRpEArz+fQgRCw1FkbGcL5hMJuzubUdXQmeSjrWiaRtG1rK5uRkLVMfHPPvss+zu7nJ2dsbu7i7GGG7fvj1kNycnJ2xtbXFwcMAzz3yM8aREJw25107XdbPeb7qeWvZp6jIFz9DF4CuIdhTnLV4EMi1pmgonPM53dF3zicGs6zpu37wd9ZGkI/WUW4h4wdsmvgkh46L23g3A4n1AaUXbdlzpOoKP5tmurjFdLHkDqKSxhRDtBAjomobVfI6zbgAyCEilkEJinR3S3V7Li++rXzwBrVUq4fthM6zLfzJFzbZtkmGyoesMOssghGhmDB7rovZijENphXM+LbgE+NbRNstY0W0iQIXjQNecUa1WdMYghYjakO2i4OkdUkiMNRCgAYyN0ffq6R1sM+f0+CZ3Du+wWMxZno1xznJ6dBNvGrZne0gZvWPL1ZK6rrl4zwW6pqJaLMiznNVqxa3bt1E6MuvxeExdx9TPWjPoL1Ip5vM5R0dHOBfT96ZpoigbItPpwSkkkVopibdRZ/I+4E0HUkY7i1JI1ZutPSgYjUfoJPo3bYvWgaapuHLlWWazGUWmuXH9Go89/jjTjQnXr18nyzTWSILzURzvaXYIsermLCLPkUISDTUhsmQp0DqLUoTtNRpL01iE9GgtyfO4mZUSOBfo2o6usxjT0SJRbYPMFCqXA/ApIVEqR6GiwRePMx0iSGSQBOcwIdBZh7cO4X1KO+PfOwIieJCgpIrB1JpYWAsxCAy2BG8IRGuKCPH5gpIIH1CEQd6QaV+EEJ87hnaBDCC8w3UxQ+nRzDtHUFFnKrKcSkrKUdQMjw/v0DZVtE5ojUpBv65r9vf3eec738k999xD0zRDaqmUYmtrawiWfTUSiJpX1wA6ZXIepaIU1QdJrWUiFhllWTKbzXAuVm67riPPNV3nY3DKFEpLnLMYbwnBkcmYcovgwZ8H3heBWV3V2M7Ey3OXfn53VdB7f25XEwKRgKPPf52zmM7gg0/Ljagx9XlzCMjg8cn82dtWe9Qefs+f/6wPfCHEovnHa5HvndyIcA58glStDPjknYsmUTkUEPro55OhVAiZ3P8RkIWWaRGmMn2KpnmWY41FKU3d1KxWVRSgVfxZXde0XRvTamvRuUQHjU+VwrCqCN6TFwVdZ8iybgBoawxZpiHPuXnzGpvTneRpsyhgsVxQ5A9ycHCBG21HlueoEDg5OeHhRx5iuVyglKKqKmazCRsbG+cXKgjatuPo6BgpFfOzBR4fr09KDyMjSpczifp9lA2pEtVvFgEIrZFSYG28M0opirxAICOQJ4f4fDEf1lbbdZyeHeOc4+T4iFGRUxZZSh/P9RDvfGT5wSOCR2cagkcS7QlaQplr2rajaWryItodjG2Zz0/w3g1FqJ4F+HAOOiBjISvIBJwRTJASvESKgA8gREaea6yNXrlVY1h0ltZDQEe25fygFUFIjA1cAO/tILMkwonn3LEvELDGXqBPtVI12UfzaTRNn2t7kTFH54HWmjzPECJWSYeUV0kgRAkhBOqqivczsZs8z1E6SjBNG+9z13UcHBxw584dvPfUdU1Zljz44INcvnyZpml44IEHWCwWHB4e8pKXvITnr12mrs1Q0ex173Nbybns09uwAJz32DY+vw+Wznq0cDEjDI7O1jEwb0/QBCwSb8918hcp5tG09okPkaJkX1aNZebzLoFzt31MA53zCXEN3qcTib9NTF1jRfBrvvqr+cD73peEWDFoQwFiqpGqjkop/tE//AfcvHmd+dkJ3/mdrxsKAH10CMEPjDH4EEXY9PP/6q/8Ff7oD/8QHzxaK6yzSBXZmjWpvSTL4+aTEiUVzjuyTPN9/+Pf5+qVj3F48yqv+45vH1ziOssoR7GEbdouCb8iCsZCYNqO0WhEpjSmjYUPEWKU7KNSkeUs5nOqqh7oe11XgzcPQmKbnuVqxWQ6oUq2lo2NaaxCOxcjY9umTd0MxuRVKsSMx+O4sKQkz3OWyyVN01BVFTYxmp7RhkSA1y0ouc6i/UKqWLWUCi0kWsRr1S9e7xy2jWlA29UY02Jti1Qwm004Ob7DfHHKfRfv4daNa1y+9DRSBoxpYpuPi1GY4FI1LqAk4B22a3Fdi7Md1rSYrsW0NW1dYU0X2ZwPZEqxMZmwNdukzAusMSzmZxwf3WF+dkJbp0CSZUxGY2bTKdPxiEIpCq3JlUYhIttyjmAiwHlnY5rrPM4YbBcfzpiUTru0VkUS/UXayBEcrLUYG786b7GuozNNCg5hzQ0fdbvBx+W6oa3Jexf/3ll8YiwhhHQ+ikxHOUYKEauUqXUqOkYF3likCGxvzdjb26MsS4K1dG3NYrFgNBrx3ve+lyeffJKzszOyLGO5XLK9HVNTpdRQKJhOp0m/jq164/EEIcRgju/dDUVRDMG1rmtWqxXL5XJ41FU1MECCxzmDw0ZWXUbdr+1WdKbG2DYWG8J5wPvzy39r5tHzb/UsKfk81tK5CBpiSPekEBEw0iaJqWkCJanIsywaSfM8+tToSVTvb0t8sK+cyghwP/CD/4R7L97PH/zBH74AaGEyGTMaxfSmNxqee2xFAsZ08qkCWpYlRV7Ev0mtFV0XUzLrbGrFkPzw//zPuP/BR/mjP3pXtGlYS9vE9pg8L5hOp2R530vqaJo2XaqQ9Ip8sL5kWUZd18Nmmk7H/PzP/TTv/4/v4p2/+TZe+yVfTIBkas1RSiO1pixHCAG7O7tkec58scA7T5bHvtT5fEHbdVy/fp2NjQ329/fZ29sd/Hx99cgYkwRbO3j/dGJW4gVeu/4cetYdK0uaPM+HIouUEi0lZZYzKkfn0TZtTADTGZq6AgJb25vkecbJ6THeO7Z3tsmLjKaJonDT1LRtk7xiXVpTkizXaC1jgUr2elPs/mjbGvCMR0XcCN4wm23wwAMPcO+99yZLRGQcPeuTsg+oMWwKIFOKMssYlSVFlqOEjCZfKREBTNPgbIcSgjIvGOUludYxO7EuMY5zQ/fgRex1MclwTfv91P+eTq79nrX0GmWvNUWQO78/3ofh7yFShP5e9Taq884XEDIku5O8i5D0+7PvKe7d/b2Z9ezsbLBpTCYTDg8PEUKwv7/PfD4nhMDFi/dxcnKcAG08BLZ1fawv/PXrwhgzgFu/L1xwyEwiM0WUmWA0KshzTQiOrmuwtsMHx7p2+OLezP6R7BhhQJj0c9nrUz3SpKP/uZDRGa80wXuU1oTgh54ya100DIqYqimpht7G4al89HeJwd1/Dlah1wpS+rhuUAw+oFN7w7rRFWL6mNTe4eY765BKJp+MoutahIO2a4GQStExSbA2Atrd4C7ojCHLc6K7eRQZiDFD72HwsUWqqRtAkCVBRPkAACAASURBVOcFdVNTFmWM0J1BKc3/8s9+mOVyycsef5Iv+7Iv5X/9ydfz7ne/m6M7x7FRXSm0lORKolWsEhcqGkOPjo/Is5xlWOGsoSwKbty4wUMPPcBjjz3GRz8Kz1+7yuHh4fDOrXExBRQi6RR51AZ91HOs7fA+vhZSEhA4G2UCQQBvEd4hZOxHDCFglERnOVqp6GUKsSdylBcoIaia2BlgrImN0U3NtevXuPfivezubHF0dBTZQzgXuZ33BB+1Tq1UrI7hktQQtTKhdGKDMUXu0xn8muE5rZvIdHKUiuwSorWm6zq61iCVjMxmWJsxfRMBkBIpNN54hI+paOhf0xNT70Gj1fQN5UoweLH6h/duAJ4+WPdVPmxY+z0/gJKQYSgWrLfUrbfmAUMrlXMGZ7tURIvDCELqac7zDO8cZ6enQ9W0KEtQCpUqmA8//DCXLl0a7Bn33nsvx8fHFKmvt//atzHlecZzzz3LhXt2h/RxvajUVzx7538PoOvm2fFkRFUvY+DSBW1VpUporAxHK0s00Eoh0GrNNP8iMBNqqMYgBB946v384A/8AB/64FM8e/kS3//935fAJ17sV7/q1Vx97jm+4Ru+gQ889QFuXr/OD/7jf4yUgunGBj/++tfz0Q99mKfe9z7+9t/6W0NUcd7xvX/3e3j/+97Hf/zTP+VzPvtzBjBMsSaBx3mzOWkBiz7qEStoMjmbSYL7t37rt/Lud/0xN65f44MfeIrvfN3rojYRRQ+kUvz4G97AlSuX+I1f/zUeeeQRQrrg2zvb/OiP/As+8uEP8P73voe/87f/B5RUGGMHVtdfPqXkwFCNMSAi+3jHb/46f/1bvjmZiGNbzXw+x9jkkE5tXLPZjKKMEefLv+y1/PN/8aM0Tcuv/dpvcOmZy3zlV3wFbdtydjaPgjuSzno662iqmkxmOGM4vH0zis7Bsr+/x70X72G5XHDnziFbW5sAg/drWFj0moWgbRuEjNqXs3FzBufPN6sPcaMag+06TGcGLcZaO3RttG1DXa+o6iVtWw8RVKnYigYBJaMeenx0hDWG/b092rrh5vUb2M6QZ5pRWTAqCwqdJY+WS+lUrHyvViuqqqJpW6x3KR0WeGIgqqolISpRLJdLbty4wa2bt1IlXSad1g9rLBYWBYiAcx2rakljOoy3uGBx3uBCrJRnWUY5HoGSdF3HKvU0Nm07tOF0bUvXVLT1iqZa0TY11iQmIQKktLVnRX1rHWltI2OKGbNlG1PXpJX13kcp42ZWMhbTtIrMUa2BnE+TSlwa3tClIs/R0RFZlrG1tcVoPCbLckajEVJKVqtVKjZ4tBR0bc1qOefzPvezURKeu3KZ27ducPHeC2gtOT09ZjabMh6XHB8fsVqtWC1W2M4hySAInPHYziGCJFM5prXgBUpqRJDx58aAF2RCs5wvEECpM4L3dE2Ndy72sKYAppUiU3rIAGL4eCGYSTVEDJFYzWtf+1pe/Zr/hPFkwjt+8+28933v421ve2sCh+jk/7Iv/TI+9ws+n7ZtecUrXoH3np/61/8HTdPy5Cs/g+lkypt/9Ve5dOkZfuEXfpGv/Ut/mf/ym76Jr/mLf4nDw0Pe8qZfjVHF+8EC4tcomfchqWwRkHyw6Cw2tkd9Li4G7xzL5Yr/7L/4Jp5++mN85is/g9/49X/HH/3xH/OeP/szlFK84uUv5yd/4l/xXd/93/P3/9738mM/9iN80Rd9KQA/8eOvx3SGz//8V5PnOf/23/4il565zC//0q+k6lqswEohY2TIc0ajktUyRpO8yHnpSz+F7e3tWAVUMtk6CqyN+qLzUI5K2qTRfcZnPImUkqc/9jF+8effyI+9/if5yEef5uWPP47KcrwQWO8R+Yix1GzPZnSNobIOrWBcljTtCqkC124+x+7uAbPZlGeffYa9vR0efPBBTk9PyXRBpouhg6MH5MlkhJRRAG4bh+1iuuW9iyblNK1BCUmmNFIInNIDiPWmSO89q2qJznPGk/Fgg+hM1EEyJcAnj14qt/fGaIFM/6X97pIAnyZeiGT5kFJSjseDVusJWO8QwhNEIAiPS8wutu40Q5DUKnrKYt9iz/jP2buUgSBiFddg8cYhhEIohUBhCVT1kpNVgxGaLggcAS9JhQIBzpNrEYsTMnY1CCFS5S1WX513CFIf8Jr5u+8VLvOcYhxN4q3tQMSg462J8x5yjcAjRUgs2cWJEr7D+4DzCfSEZlSOCTbgjKHDISTsbO2CF7gutp+hHKvFatDLZ5tTbly/xWw24/FHH2V7e5vbzz/P889eZndzRms6nn3mUmxfxaMkbG/NuHbjOmWesZw37O3t4azn+o1rADzwwP1MJmPm8zkLVdE1sWk8y3ofKHgRmJ/M2Rxv4mxHPa9QBmbbGygPzapmezxDCmLlOPkCPyGY2b5CEv0NAPzUT/0Ud47uwPERv/wrv8JXfsWX8+a3vCkK/8mc+v3/4B+wWCwgBP7sPe/hwsEBX/kVX8lLX/apLJdLVquKN/7cz/H1X/d1/NIv/TJf9VVfxS/9yq9w/fp1VqsVP/XTP8N3vu5vpiJCwrAEZH1K2wv5vebjfaxq7e7uslgsWK2iwfanf+Znk8/M8e4/+VPe/9RTPPnkp/Mnf/oepJTcuXOHn/nZn0MIyY//xE/yvd/zd7jv/vuoqhVf+tov4VWv/iKqquL09JSf/ze/wF/+i1/Dm9/01qE1SKRK73g8om27WGwInqaJ/Wb79z6Ac33ETWNOVNRUbLApFfdIIRhNJ2xMp3EihdY8/vhj7Oxsc3Z6xnQ6IctyjDVY59nd20O3UcuQJJHdxQ4AIaOfKMsUVbVgOp1yeHjIlStXki3DJtNmBLGheNOn7HisaaM5U0bnuu0DiA+p3zGCBWsTHqLZMVbLXIiVxVixa+i7Q7yPzvNeuA4h4EWUE3A+3mPh8MKnFic1+MCg11DDXcykBwCXvg4TUES0dbxI6xWKEOSgS51rTokRiVR0UiK+j3PBJaaaQiKFYjwaM9074HTZUFrBpi4wMgIbKsM7w43nLiPww/NHK9B5VTHP875adldlr2egdR0nTEjVM6w0Lkn1aZlJYB4tTNHGERLjBKGKc3nF+0EGABC+nzQjY+1TKDyBpmsHK0pbN3hnyHPN3s4WZVlwcnZGtVyg84yuqRHbM/b2tglA29as6iVd19A5S2gsq2UVZZGsQMu4h00Te69HeUHnOrwNOCmRJJeBtwiRx+AlIxgLpdFCIjzRepIquTZYPB78efvii8CsP9b9Wbdu3x5Y0e3bt3n5y1/Omg0sUvmbNyMIpUX0wAMPAvDuP37X8JxKKT74wQ8l4XCPp556KlZ+pLxLzzkHtF4cXXtfqYlVazX4vQarSNqo3/gNX8d3fdfruP/++1FSMZ1O+MVf/KVhYxwe3gEiTa/rhqqquXDhIJpugbe99U30Jx/f84eHjdRXqkxamFHoj20/xpi7Np2A5NWKvxeNxmF4305H3apJPajWWj7tyc/Ce3jtl3wx88UiLmgTy+VKKopSoXVssSKkipb3qCz69jKl6Iwdel2vXLnCK1/5Sra3tzk8PLyrtaQHszi2JTr1CdHg2dsz+s3+8Rr4+yrbYHVIoNq3qfSCL5wbcdcBZL0drX9OUiX4hZpnf+/WwWddkzn/2bnw/qI1nc6pv4/9e4A1Q/CgFfcg41EEks2M4D3zszOev3HIybLDZwVtkCxaAzpjczZlf3/v3I6RAm/v7+uvw7koLwaxfjB1dy1ZplFKR+fAEDhUtPOEVC0V/fDHPjAlLVGk74XovrdpHJIS0VLvCXTWgEwsOwiaLmpa4zIjIJlubKJ0zsnZAndySmcMKsvJipLT+ZK66djc2iEIhgLAaDRBSs3R4R2qds6oKCknkdG3pqJp42iqYqJpl5bOupRpRV+f8YGcHET0EWqhwWegJNY7jItZm1JxXBUOPH8OmGWJeayD2cH+fhSyheDg4IDDw9uDdyuK+r1XJwxC/7Vrz2Ot5dFPfSlNXcPg+YqL7Pbt2+zv79OmRtSDg4PhJg8LW6xVT/sFSXSWR2NuO7RbVFWV+j/v5V/95I/zjf/5N/Hbv/07GNPx+7/374fX9T6wv783VE8n4zHjcWwpivYEyyue+PS0IeOwxzzP6fpJID52PWQ6i5MXrGUyGtPUdWycF+ebSqkoILdNizF2KE1LFUfDNE2cJnHlynN473nsZZ/K+977FEIqHnv8Zbzxjf8mit/JuX90dMTBxgyR6aSpxEUeRGR5UivqroMgKMuSnZ0dnnvuOa5cuUJZlozH46F5vDc69kGgDwR98ae/B+sA0rO5vsrVM7OeLffAZL3DpsCwPoXjhaC0Do59Rc0GhsGT60C2Loj3ldhYIY7Pf94IHSiK8nwNrb1mf6x7ndZ/Hr/P0JccPBBi94cKgiAF2ajk3nsOCLqkPFsxbyy0Njaay9g3+cylQ6QIA6j2AN6L3P2Mv/6cepBdZ7t98UtpTVEwMLPeMN6vrRACwa1PdjnveolpWP/1vKratG2SkjJ0keOilQ6JRGQ5q65hc2sTpTV3jo6oqorRZEJelngCF+69Bxc8q7rCe8/Z2RnlZMxsMoujrsqMgMELRVHE4ljdLGjbWCUdT7aRMuCVjcUYDa4LeGFwviXa+ySZjjJNr5W2psPYKNME32upf04BYFjRfSUT+JZv/mZ2d3d58IH7+fqv/3re8ta3DZR2eKK7KpKCW7du81vveAc//D/9EBuzGVJKnnjiCV71BV9ICIG3vOWtfOM3fgMXDg7Y2pzx1/7qf33XW0g4+aLFXORx8QYf+NCHPsyrXvWFa5pDYJx6027fuo2Ukq/72q/liVe8IvnO4o3e29vjW/7aX0VpxXd8x3/D+9//FM9ducr169f53d/99/zTf/IDbG1tkWWaJ554BZ//+Z8XN0FaMM9evsIXfsHnxdcbjWi7dug5DT7wrj/8D/yNv/7NOOvQUlIUOQRP2zRp9Es8QWM6FvMFy+WS33rHO/nu/+51jEYjvvqrvpyXfsoj/LtffztCEJvcg+fO4Z0438rFalW0C4g0+C9en0cfeYSt7W2899y+fZvd3V1OTuIE26qqWK1WAzs4bw6OzICw7gG8+1hPmXrxfzA4y/Om/EEICnHFBe8I3kUNSYBOlgoIw/9rlQykREYRnAXvortdRLPn+kMSsF2LaRuc6YbfiZMUouYW5ZQQHy6ms8FGd7xtu+izCnH6iSJqXc4YnLEIDyIIZPJkrYNiU9Vcff4qTV3TdS2np8ecnh5j0lQR23XMphOm4xGTUclkVDKbTtjanLG3s83WbIO2ruiaGtM22K4dHv35ZFkG4e6pzH3xJtpVzgsw/b/7h3MOiUMJjxIBLUFpgc4kQgtQYH3UFY0LWBdAKFRWgM6wBOq2pek6bAhsbm8z29oahop6Ag89/BKK8YhFtWK+WkZpijg+6OgojnNCggsm5obS44LBC4PMAo4OlafqpBYIFVBZtI20pqFzbfQmWotNGnNnLHXTUXeGk7MlJ6dLTs5WnJzOPzGY9Wlb77kB+K13vIM/+L3f4/f+w+/xxp99I29685tfvODF+Truf/Zt3/5tZJnmPe/+E65dvcpPvOENwxykt/7a23jjG3+W33nnO/jd3/5tfv3tb4cXsLBkxLj7/SXfl3OOH/2x17Ozs8PVK5f5oz/4fYQQfOTDH+VH/uWP8eY3/V9cevrDvOY1r+Zd7343PTP0PvDBD36Qz/7sz+Lypaf54v/0i/j2//Z1IARKaf7m676bsix55zvezsee/hA/+i//OZPJmOl0Orz+//a//2u2d7Z57srTvP033orW2TBJlxB45OGH2dnZHlLozdkmk8kk3hwbq5mxGgtNE8cj/93v/T5msxlPf/Qp/tE//H7+xre9jlu3DwmQ0kJJ0zXJyCs57z2NV8kn8+Xm5oz9vb1hhnxZlmkCQ8HW1haz2WwY4rc+zbdt23PP4MdhROuPdR/TXSOXUpuJlOcTgNc35PpDrrGQnuX1d3/9PfSv2TOXflwNMAz2bJqG3suXZdEtTm958OfaVc8I131O668REivsiwda9dNVzicc++DxzpEXGeWoiOO8taAcxRn+UophjTZNbNZeLBYs5vP4dbG423Ae7l7f6+9xGL2UtLSu66jryG6Cf2F6zXBto63G0ptutV677krFvmitQYo4Dl5AXhZIlWGMY2dvl6ZruXN8RDEq2dic0VmD1Iq9vT3qLmpf/UyycZrp33UdxlmUlgjRt2h1BBxKi+FzDqIEocjyWARyLv5c6r6HN6Xm9ly28Q6s9XTGR1BrO5rO0Z4vP8QLL+Y9Dz0Yzo6OYiVOCD70gQ/wPX/ve3nLW96aqooenbQSH0JiBSlNgCF9e+Gi7MFESol3HiHjYoltJWEQ+3ux8jzVgd7fJcQ62LI2EcNHX1uqivSbt28badt6YJG9QbQftxtC7JnTWZy62dQN040JbdMmxhff69bWFk3TxJnvxrC1tR1H46hYgDg+OcElBqLTgMKqriiLks3ZJqtqNXwOQv9eIijE6ZxZntG1MUXsDcLGWqQSTKZTcJ4HH34FTz76Ug42pmhn0cEiMfhgCMKlYsyIg4MLsXqZ5xwfHbFYLNjb3+exl70May23bt3i+Pj4rqpmDzgCgZaxotcDZg8kL2xFWWdlQzqVBnX2ulSfeq6ntT2r6MEshHOpQqWeS7HGiF6of71QvF/XyPruk6SD3PV+18Fj3cgaxbD0nL1ul4KGkrGS6YNgqEHqgi4oTlYNN07OWNQGrzOC1HjX4U0zeMde+ACSf42h06M3j/Zg7V1KQdMHo6ikIVoT12Sep3FMqRIanElgFtuVVlXsGe610JiP9KbagOkcWZEDUR7IdIHKM9qmYbFccmF/l6pacnx8zO7ubhwLVK3Y2dlBKcXt27cBBv/jdDpFCBEBT0qEM0MQy7J+zmC69sKn6TFjgPgBJiGwsTFBCEld14xGU5zpKHLFqFDs70z5zE9/gtOj6zx0/wVGZY7wJp2P4yu+/jsFfIIRQCLRrPPevJhyihTpgpTnjEmkuo+Qa0J9v6BefPSRXXA+HYBkLu13ef87/WtDD25imPnUf3hJf8Q2iToxJMdoFDdj/KCG1fB3vWgdgkdJHUv71g7tPaI3uCaDrnMOEWKJX+v44RjZODZ0KxXboZq2YXd3h+Pjk1QMiM+lVfxEpjqxr34z9+luf1R1xXaxHVMJ55FSI5WiNS3BxVHGudbcc+Ee9vb22BoVuGpJMAGcg34GF9GkeXx8RFXV1HXNxsYGBwcHNE3D5cuXeTSV2k9PTweXdw9ESipkCixRKz7XcdZZWZ9uroND/JnHA5k+B6l+znufKvVRd309DP8mjrdRLwKn88DYA+poNBo+SKNvUO6LG0rpHp/uep4XguN64UgIEcedh0BrbbJjJNbWZ6sOLNA0HafLmpNVy7IzNDbQNQKhc5QUdHUT24mSrreuC/YN1evHOrsCcNYPH14ihIhdBcO5yWHtx+GSceR7fA2NTA3dsYoc11Gv2fSWq67rUFlsArfWkRc5k+kILSTVasHVq1e4ePEeyrJktVowm21xz4UDyqLkgx/+IDjY3t4kkwpjWkzTxuZypbn33gssz+bIBJTBxZ5WEAQXokTSeZSI0NM1kYlNxxOyXCNCbANL+JzMtzlZliOlBjKUzKO5FzHogvAJPp1JyOQMvUuzOv9/kz5cQ2uNs5YszxmXJXW1wiVQWk8Ph7I352+wr1jB+YeCKB39MzevPf/Ct5WiJ7z+DW/gn/7QDw2z1fofmdSU3buu+w2jEqUWQgyzmNJf4YNHIIcWpGq1umvD9tM7pJC0bUfXmTiRc20TuzQSSIj0MWbOxTantmV7e5uTk5M05JC7evas7UcBa4RzdKl/M6Z/ccErGRlC13aUecFjj72Mq08/jbpwgQtbM3wHpnG0pkUKqOoWQWyxivpiFvsWuzh//ez0hJs3ricWHUdEhzTxJIQQ3etC9G6v1Jq29kExUibLiRvA7u6Wm9jH6I3BE+InJOUxiNi2ARcBTniHDFHDMs4ilUJHCxo+WMJa6rAOZv31DiEMk0qVUmS5Qqp8sCH0wa+vxt6dUpKYvFoDNZ8ykciasrRetFRoGSdJOATSQSYk4yync4F53SKJAxeUlHE4Z5axPdlHirsZY39Ya5PNJR3JRpLJtfcjJWVZxIAaAlme03Utzrr0uRtNHD/EeWfLAHzBD+76oUKqNQJiA7x3bMwmw1rc3JgyGo3SlFlDkWkIOavlAq1id4SzHV1bUy0WFJmmaiucNYxGYwSxjSzL4nzDYCynR6cDMxRC4owb1lAmC7JxAV7Fj0bMSsiAoLCdJ7jA/PQMnSlE5yi1QAlFV3c8/tgTTMc5XV1jXUNw7i7O9HHHZnNez+TxJ54YNn/P3vMsHy5GBL5Y3o99d5/gCGFIR33wyDQkMSSrQiAuchBcuPe+2MOZ3oWSam22eZosIHlBgzsDODjnBzCr64Z+IN46UEWkV+mmxxJx/IAEOegsvT4VN2pPk+OCt8YwGo9wzrNYzOPH6qXoXtVVtE+IOC2gqqq0eRhSiqLI6SeG6CzD+RghY/9eh0utJ9YaFssFUkBdV/zfnL15vCVXdd/73XvXcM65Q/dtdatHtWYhBMLY2PhhbAQYMKMtIgY7xgwGnAQ8JM5g7JhgMFOCiWMbiOA5GDAeAggbEDjEYnDMGCSMQJI1oqFbLfV4+957pqraw/tj7V1V93ZD3nvnQ9Pqe89Qp2rX2mv91m/9fjt37gQChw8fojSB5cWBZJsg/qUegu9Kx355FUJoh4PTte6XjAk38sFvCgZbs6N+oOn/TIKCIwTdNgf6uE7/OPqfmy5gv/O3NYil/+5TQvqzolonk5ywaRPeCqP0H5srABDajGxOIUCNZJW5ztHG4NFYJHNPA/pNbSWcGUDJFMZ01sh2sCUTTH8nQ482yEZYIeGMjRPVmrZMJrRrJXU+tdYg7IT2e3YdYik3CTFh8D0FDhS2btrvXTNDJZu7+Rzn6ggzBPErcE5ksubiOBVsQ54p6dZ6S6EVupBZ3RwPdUNpcjKdk6sMrSQ4e0zkksWu9Uw22FKLjuB8XaqnUTlgcWUREN7ZqMgZlkOGwwWWF7axtDhktT4Rv7PDh+8jAVQOhwyGQ+bTaQfuxO5mys6SjVza0VyUgPG95ytUHxzq9wW6C6vkmCX97QengArye590rfTmebRY9wq3qQWnpbxrGtFNCy2fKTY0eos34TgQRz7iziEs6BTIRGgycbwGgyTTk7MQ2+spSCbZl5Yr1FjWTq+xtLyEjWBuFtnOJpXyiamOzEo2dSPKos4xr+J8qhL+2rad+7jlllt43COvYMf2bay7Buw8lkKxjIiBVikVibwhZpbx1CvFZH2tBd1B2OiGqBsW1UaIjZKkypuwyBBScOrwKqKqmJxn3xKIHX5TcFJK4UjyS4ItprUhwUyjiFr1qrdUetcsQAvGW2ujrpiIIoYQsHUdy/RUWikSU96HTmBARBGSkEGaLJEOplKawuQ4D3iPDrq9plqL5+fc1gKua0VmJMA5wFsr86guYEOXDcLmLFagEMkiXXLkCLGjqxMhuBs5Szw9Z5tNZX86P4QtG42WeeiA3FPGJQhBzojz0qiS8tlRWenCe9egnKjsqpChvEf7Bl9bnPJikK2hCo4wn9LYOmZbGuPBzS2NC+TEz1SOLKpCOxrqWhoZkmkKZjrIcyrvGc/HGO8oR8ucOnaUPDcsDIboosA3nmMPH2d6+jQqePnc0MTz1PCTPyNf+6xl5sq5u5hNplFfXhZ3ylLEC3OJ06dPi5lp3aC0Ys95BzCDgqYRQ4MQVAuC+vZmCBidobVIggzKksbaVp5XcK0gKhB5hlLi3ix69UsRVO7mAW0sA402kWSYc+65u1ldPU1RFJw8eZxdu3Zx8uTx1hEmcZSGw0UO7D+P6VQGwDc2xljXsFAMqJsapaVkmM5ERmdpaRsHDx5kPJkQgAsvvJD7Dz3A4UMPct55B5nN5zR1TVNJsKvrmvF4g8uveAyz6ZRTqyfbYFoUBetr6zTWMRiMomMUDIcjDp53gMl0wrETJ1CZgLdea4aL2zh+4rgMZq8ss3fvHiZrp6jnG7jgyDIlFBE0OhrW9m+kFPD71mUp0KTgthXg934zk35rJpZ+tolYG0vQ9LKtWVb6XGIAaXErug0uHWv/Nf3ssd+QSETU/vF2zZ1u49J687/bQJoCmeqw2aLoglnKBLXRoA1GG5aLgsZDEwy+ipQMJ6KWznkyZc4MMPEzEt6YmiN9VV/nxHMTr9pqJXEW07pPJOTQw/TonS9HQClh+CfJKAjo4KLIqby/EGvjzK11EriVolQwryuU8WKcHBwqgPaGIi8o8wEaB97iqgajNEVZyobiBIMzVstQvBKlk8IYYfo3MA+WTEht5Cagtahi2EyDbZiPxyhl0XlGnmmG5QCNYmN1gxOzDeaTMUvLC1E1RehS6XFWqzmlFMOF0abWe1rwS1nGZDJh+zk7ADCFFW1w7zlw8LzotWeR7LYjC6YNOM9E4z/5ZDaNxeRFZwiqsjbgJLmQoihaakRil8/nc7ItZYZSBnTOaGEbu87dSTkccc45O7Des7S0xJK1rK2tsbi4yMbGjJ2797J6ao0rr3wM373nfo6fOMYVlz2Chx9+kOPHj8txLp3D4tJMVBayIZddfhF33XUXKhuybfu5aD1iz569rK6dZlCU4GFjTcpO5xpWT26wfWWZH37cZZw8eYyTJ0+yY8dOtm+bs762EcvdnMwUNE3FdOpYWNzBBcvn4I1mbSJmsMaIWe6JE8d58MFl9u1YiRlOEKfwXlfSt/aAm6+pUqo9V30wfSu4vzV4nFmOnRkYtt64/QDX/9lWDKk95vS7IJtZks3ufxZI9p8c01XM0FJwljWadak+KWeUBpbviYjJWgAAIABJREFUH3N8Ths4Qzp3DtAixJgKC+exPmC9RWUZZjAAJbOcWtGqwOrQzg5ICeT72ZkEIckqO79WrTsc0HmLUoayHGIi+dw5J1St3obUupfTla/pjw8KOSIAK6Wmk7F7o0PKoQVWwQmFw9v2M7TWqFzHUlNhghFtOGAYpe5Hy8tUM2m6GG0odDfgXmiFDQFlFGWhKIeGYVHglcfmMG+0cOnKQqZVXIPOcxZH25isb3Bq7SQ79+/BBVElsXWNrYSfptOg+txDocRHoYfFn90E2HsmGxvYaDwrs3+dQsV8NmNhYTFmYQrfNKweP862bctMJlOSmcFm/COeeMTFeT3Po1S1qI2mHUorAZmT1IngIob11ZykLJDmHmOt2ZUPQXHsoQcZDUesnz7KcDjkruNH8N6xvnqCoiiZTMbMJxusr4+59dtQVw2Zspw8tcra6VVuWj/OfD6LHSNZhNLUgBPHH+C++26nrmsOH7or8qlyjj70XbZv386R8UQGfp2nLHPKQcnxY0c5ckRz7OhOVrZv49SJhzhx/DA7d+5mNl3jodOnGQ5HNLUlywu0hnJQ0jhH1dRUVvS81kyO8paFvGCyfpzSKAZx96rrmeBcRoieBN3Wad1YjuwmxmRxokDkZC7at5/d55zzPXGqfvDpd/7OFuC6529+/db3PVu2BwKia8Qm7ns90nGmoJyA/CQAqJSmmncmF2lx9LO9/phW/5HwQsG+ehr6hDb41XVNPa9Y35gwnkyZWY9N6ErK4hSgzKbvvrUr3Je8St8rqSoXRREhEGk+qRCwjYkO6D0cU6k2O2v/BAloggzlhOAAGepGgTLE4X2R+NaEGJjBaNCZYpQtoKIJsnOG4MTlqYhldZZlGGfJg8SFjABB7tNBlkNBNDMqGAzKliLiXNFm06OFQZus6EhBWR1l+FBx6sQJPCJgMSpKwjAadzce5wLTjQ0hAmeieJIeZxqaOMepY8ewTZe6d0KJshiyLGM+ndI0UjP7mObX0VcvYWmdDn98rVItLtbqLynBtLxzkW8mn9GZOrj2+bLgNks6i9qrBJuEXdl6FrGJvDX0kK5Qjm0sU7+BtZaHjxwCFLdVIh9cVxVHY6e27YR5v2nxWSt0Bt+7CUOA2eS0AMKNJTcZ3otWGkG4ZOtrJzjnnB1MpzOapqaupjSNjGKtr0lwSbN3SivmlTD9Y33EJIjywXRQMhlLWp9pLW3saLzaYpXtWUwgchc0dOQFpt9+98EHedqPPJ6VpaVN5Wb/sTWb6geGs/0usflDwqlU+nxiRhB16gjSTY3llUIs/YxOrvUp66O9hpCGs2MDSgV0JOumLmv33M3HuvX7pOe0gSUIpGK0BieYkgJUJAFnOkM7z2S8QdNU1HVF4zxNENwsaEPQCp1U+ZVuv7vcXPKGWey6hyDySipExyWjUTrbFPxTcuFDJxXUBsZ4T/VLWuchKA0RfI8SJN2m5gO5FtEDraUkFoXgOAlhNNmgjDOPGhCZHvBkKiMzimADg8xQKvEXdbXHuhqDpizETzbPNYNBRp4rjJHMOTeglEEtiHBjlmuUEgJ00zR4W4A+l7sPH8UiJebywiJLi4voADMrkwSDssR7i2ss/ZV6RjCrplPx80PhVcI15P9C7BiOFhaiRnsCvBWZEZcho7Uksb4jZ/Rre3p8l3anJ91+yXS36+xIqt4tyrakiLiIDTZJn0v5FCkfWZ5RRyJi09QUeY5Cdn/nHEYbvPMkGd8QRHcsHafzafg3iczRknDn1ZzBYIhWCuscdV1Fr8OcQVnQ1FG3LBYCaWdfW1vv2PzzOXlekEXF2Tx2NBvbtN9d9wK6QuRSmloImiKdksxgxPQltLt7BNRV4lalwNBlaErJhtBYyz0PHuKxl1xKnschbxWiIkq6IRP3TIJTe+/H4JSCo1bEmcROUHPzIwWYdil0v/Ey+5sy7u/Vzez7S/YpF/Lv6Cep85aI7QGTNPJUXHsoQsqaFKIEAhD/9mGzAmuhCkyuMRkUecYolEybirLSNL7BuQbnwCtDcIZKCdG2f9z9zSQ1eRJ+1nZptQatYnND8GoXG1k2asi12V/MYn2sGrquMZH0K/eSpJdxaiMEDGCKHO1kdZZGURgTR8pCVNKQLqkCsiwn08JJy1SGyRTVrCYzBZmOFnazmulUFFuGRU7dTDGqkN5voLP2ixMICqjnM4LLGQ4HOOeZjddwdcXywogLzjvA+niOd55qLjLehTF4PFlZkOdDqmpGNZts2njPLDPFfoeQcIX+ikNKE2cboUsYI8CnFRayQhj8PnbFQm9nSy47wSQUo9eVBJ73vOfy9re9jSsefWXkgxkJGFqMN/q7qLD4NSbL8LWP3JtKOjZGJgxmc6FkaKRTlxb+oBRXbGe9cHlQzOZVJFoKZmMjwbUsynah6TZoxwDTNPhYNog7thiN+Miza8vkGGB09NrUWlQtvK9BiRJClmm8t/J9gsc1XSczUr/iqKNgRSF2DZO+fKK8CIAdz3kKYKELIB2OlqSxoxmrbciNJjiLNhm5MUI1iLc+QRa+ybP4Ty8E6piBQyqTHJi+GfPmtSPXL8EC8t46OrtD2rCSckUMZKkcVKoNTnXqAKr2i3WfEiWwk5y0bG/pjMioUWrwEDzeNxgf8ZdcNh/rLBhFXibxP8/adJWTp06xtrFB3TiC0kznFePpnJOr62R5yd59+3GUVF6yGwVtZp8y3qZpGE8l6KINJldRLFMyKg2CUWlDORwxr+ZMJhsYrVgYjhDDmEZEM+PMpXOm9RMQNKwmOKmajBKgnJicNADWYgjkPjCzlnmYob1H4VEqRLKvQeuMKjhqJV6XdRA7x4XRItPZjEkzpigyynLI0vZFrPVUtoLgmc0n1M08cs1Um7ik5pg4oltWVx/CGJGNHw4XGM9qdiyN8I1nPqsj5BBYn8koWNNUZMawuLwAClbH3WzmWSSAQvxfD/dI/wpgG7F700ZIby6ahoSWj2XbV2nV3T9tdpd+lG5EItAauhtHsinL4lIkWcZMRgDKCF1GLK3FQYyO5iIdwN2+Z7p1Il3BO4dL2uk6SXPLkRkTO4ghyMBzkCxDaRXpGYNNHamW/wORAAn09d9ReJ9SEflbjs+LFlmM+M985k/xa7/yWh5z5ZV885vf4mf+yYvi80EFcc2GrgzvP87ZuZO3/Kd38CM/+qMcO3qUt/7O7/D3f/fF9pkvePGLec2v/RpLS0t8/oYbeP1v/DvqusOVUrdPTuVWLCmuh4jHyLXsoAet+5iU8JvSbtkvRVMGv7XEO/N5tL9PuND3fJzld877aGeoUCojhXUdr58LnuAVC6MFlA5Mp+s0TSVD0Xis82hdUgyGTGdzHrznMEeOHAEV2L1nD8vbllnf2GB5eZnRaA+ZKThxYpXjJ04xGZ/kxHrN8p4LIxbU8e2gUx3Zipf1my8hSDJlm0aqnChQCUnXrJ+xdnpo8kax3I6OVSjJspEwhQkhqr7UlFlck97i6ynYJqq3amxdEcqSPE/XOGCDj/zNmvHGRtsVT1MOqaGE8uDENi8Fr1RdJGipaRqWIqyRutGpGpnXlu0r5zJeH+MIsXOpKMsca0tMLiNQehY5oN8PM+sCQLers6kkkIMW5jGbbi5jRCaaXoeou2IpQ/JbFuHmaQFtDI2VbuZ8Nmd5eZmNjQ2yPIsllY7elw5jOkC6KAp579iVCTFoJZUKwWjibGc85rppECUVEd/zTcOgHBKyTvwxZTAqiIpsWzIQA17EWVImobRqTVsSZpJGLkSkMHKufKCpm9aybn19nf967Xt5zJU/wOMf/8NA8jlQ7fkMIUjG3Dt3AG9481sYjzd4wuN+kCc96cn8/rvfzU89+SpOnTjJIx/9KH7zDW/g1S99KXfecTvv+eP/xq/+63/DO976FjlGOvBYjtGfEXC2AvYdOXMzmN2NOXXBrJUWOstz+zdif5KgX6Ke7Ri+7yNiTEZnoiUXEAnwKKWUm4ws8sWaWvwaMmMYjUpEehxMXvDQQ0c4fuI4aMXll1/E8val1vBj9+5H0zjPbFbjg2JhsWTvvp00Fr57ZJUjp22XUfa+R9txjOd4a7dYfp48DCxNIwGsXc9pKNsYUJ4Q0iYsZOUWUFDpjlKtGq/UoqE9P0plURNPCUPfNgLDKJjF8i1p/KdjTMPvYhjUCQS0gYweEZszR9L6nfLkGzAcDlt2grVCQD5x4gSzuSXPDa4RDwUTfTYyK0HPZDKhURTL7Tk+qwRQSIsC+Mdbb+PNv/tm7rj9dh64/37e8IbficCt3LhXXXUVhw89wAtf+AK+ffPNPPzQw/zum96EVjKc/e53vYu77ryT22+7jdf9xm9s+oK/9Vu/yV133sEt3/kOj//Rx0vWojrFhaSGoLUYa6QhdYAXveiF3PSNr3Po/vu46Rtf5wX/5JqYglt8cDzj6T/JFz93A4fuv49v/cM3ufrqq9tZvp/72Z/l5m/dxN133c5f/sWfcsEFF8S0X1x/Pn7dR3jdb/xb/uzDH+T+++7m5ptvbE0drnryT/DZz17PnXd8h+s/9XEuvfSSXsYoO+uX//6LvOIVL2sZAul8eedomlgyB1o8RCvNV77yVT75yes5ceJE7+ZNGUuXzXZXSBbIaGGBJz/1qVz77ncxm03528/+Dffdey8/+fRnEIBnPee5fOnv/o5/uPFGxhtj3v/e9/Lcq6/uLrbaHHTStTkbW/9sPLP02rTrpkyt/17pTz8Ifu/H2X93ts7pWV+txAw4y4oeMdi2Lk8gvKbxZIPJZEKe5YxGMvQ8Ho85cuQQt9/+bdY3TrJ9+wIXnr+XC87fy94957DznGWGo5zTayepZusYbVlaKti5c5ld525n2/YRg2G2qcPWJ3snGlHil/UH5ftEWMnoNqthFEXeTZHgzn49VJcnKJXOfRzxiqNM3rtYQnYqJVprgS8ily11U/vHna5vmi/ur5mkEJLESdP16uOaW9fANHonbJ3MwQeBWaJRTLCO4ByZgiI3lEXGznNWKGO3tyi7fOzMzCwFm/b/kgfAkxgOh3z+c5/jpptu4vrrP912FJMHwFVPeQqT8ZgrHnUFSmne9973Mp/P+YHHPpalpSU+8dd/zd1338NHPvZRfvq5z+MlP//zPOmqJ1PN51z/6esFY3COwWDIZDphOBi2vLDxeBzxsobhcIH3XftfeeGLf5bPfe7z7N27l/MOHJBy1zt+8Ad+iD9+3/t42StewQ1/ewPnHTyPR1/xKIqi5JGPvJj//J9/j595/gv45je/xZve9Abed+27eOZzrmY0HLYn/6UvfQmvec2v8PKXv5KLL76Y8XiDRz/qCt537Xt4yS+8jK985Wu85CX/lD/90/fzEz/x1Fbtw4fApZdewo4dO2KGkdJ82RVdYyMh+Cz0hnZNqriReqnBQ7fBtOSn+Dg/BuJ777mH93/ow7z33e/i7rvu5OJLLwUCF15yCXfcdhvPePazedpP/RR/8Hvv4Nxzz2V5aTma8Z4ZPL4Xp+z/3ePswaxbXmd2F/vP627CcNbj+D9laAoVicPx1AXBGuU/FCo4gg3gHIMiZzgsmM+nPPTwEU6cPEbjG5a3LbJjxworKyvkZcZ8PmY6PY3JMlxwbGxMWFhcYjhcFAULdORwOVQ0LWmJwWe5zv3ftWB+LEkT+9+5NEQdWxbpvKmukZO8MvvCBQpxNvexwkpvIRuo8PcybcQd3jvhmGlNMEa8SJuafQf2t8GsxYzjfzdN096L6Zg7lVzdCgroOo4NZp3bORAliixVXTGbTAUfLHJhAFgpo3NtCFqcwHwM/Pkgi/OnomZczaeMx+vRU1YeZ6pmtKBF97MPfOADnDx5khAC1113HT/9vOfxqU9d31IviqLg3//2b3PyxAlCCHzzppvYvXsPz3rWs3jE5Ze3Jp8f/NCHeP7zn89HP/YxnvPc5/Cxj13HAw8cQmvNhz74p7z2tf9CWtDBx26ltNslegu/TOtOfeHiiy7k619f5PDhw5w4cby9AX7hJT/Pxz52HZ/73Odw3nPfvfdx5MGHRPjwuc/h81/4Il//+v8my3Le+Z//kHvvvo29e/cyGY+ZbKwRQuB//I/P8sUvfhGF4o47bkehePGLX8Bf/fUn+MpXvor3nj/5kw/w+tf/Fo+8/BHccuttYriqYOfuAyRTlj7u1d60hGhzFsvUeLKTm3harMmkRfBy35b1/dt5NBKVW6U0l172CHZEP4SUbYyGQ8bjDfbu28vFl17KeEOUZkcLCyJ3zP85UG0NQFu5Y+nm7ALS5rK0/3cqR84WrLoS98zX9z/3+wW0EESZQWuhgECf35U6sYoiammdOnWKk6dOsL5+mkE54MJ9BzlwwT5Or60ymayjZ4rhaEhR5nEuOHDeeftxzjMezzg+n6N1xtLSthgEK7579CiOTm+uT07uZ2HpHPRvdjn+BmtVL5MNbfmm1OZOb/88pmZQtyn2/lYhjmtF2hOiBpMZmZ5R3mCVwjrp/KdOej8LCyHEaZzFNhC3M8wxiC2OBuSZYT6Xe1Q05jrlkJSdJh06rTVlUbK4sEhVVcxmFXUVIJOuP97F5uKQTMsgWqLjoALOd9jvWdyZdP98AHD82PGYsCmOHTvG5Y98JH1IbDwe8/DDD9M1vhX79+8D4Otf+1r7PsYYbr3tNkII7Nq1i1tjAAA4evRhOaBMuh15lmOtoxyUTKYTMpMRokrCeDzmZS//RV71ql/k9a9/PXfffTe/8brf5MYbb0Ipxf59+1qD4MQ9a5oGpTW7du3i6MNHUVra9Rsb68xmM3bt3MHG2uk2mNxzzz2Cj/boJ/v27eOJT/wxnvvcZ7ffqSgK9uzZw6233ioGwv0FplIjRTpsBNksQqSXy2IBkC7lpqy4fwUithV65WbiWM2mUwbDIbapuer/Eru+pzz1aaytr7Xp/MLCIu/6L7/PB/74jzkQvRmmkwkBWeBbg8oZGWPv55tKAjaXpKJa4TbhRS24n37g+xwxSAupFRvU4mXw/zszSze0F0kksZLrhvxdzCKcq1lbO83q6kmcs5y7cxe7d++kGBWcPHEKa0WlYtZUTGdThsMBi8tLlGXJbDIjqDh4jVBiZtMZ6xtTjh87Kb4NLk1hdOemw3ezHmnXt+dDmi/9gXyHtWJ0G0Igyw2ZSnPRMtAfQgc5dPdtZPqn06fiJqiFMOKcFcu2THxYS6UhaEqtcb5gY32tLWv7mKb3IuSZRB9sXAu2nuOaCrxlriFEfmffjrB//RLc471veaApQwXItUFHzbYs4p7YhkZ56npOMYySRbkihO9DzVDt3dItpN17drenate553Ls2NG0uuRL2bjbet/iFIcOiwfApZddJs7dQUiT0ggQA5M9u89tF+6uXbsIIZDHIfY0q5nIuw7X7gTWWj7/hS/w6b/5DEVe8Pa3voX/9Pa38ZSffDpaaw4fPswFF5zf7R5OyK/OOh4+epRHP+pRZMbQWMu2bcsMh0OOHz3OdDolDSA7KyCu9zJkrpXmwcOHee+17+Wtb3krgW6oW+gYvuuZpBsxpfdbApP3Hp86e1vS+LQoleoyMK1VnKdT3cKMz7z/vvvw3nPJZY/grjtuB+CSyy7jI3/x5wB89567ueSyy9qezqWXXcbxY8dY31iX41NnL+m2ZmL9P+lnW7EQ7z0+qu22z29fvzkbgc7sJc0gtsD41kX5/+Eh+KppTZjli/vYfLHUdRX162A6nWCtY8eOFfbv34fWiuPHT3D81FGGwwGj0ZCFhW1kmWBWeVZASEa7gboW/C3PM7Qq8K7BWTGi7gbUu8Cc1mNytNqapaY/OpKh5bxFnqWWOUelNE1T0YkAbG40KKVai8h4yds1o2XXYDyeMMwzMQLW4LxFB7EtzEzG+ngD78tW0Tc9mqahrmuWlpba406f2Q3Sp/JYHlvVUgBms1mrHJJ0/qAvVqnJlQKjMZkSF/mmobI102qD3JZkufhcyLmI1/5sC6LthcSF+9KXvpTtK9s5ePAgL7jmGlGdTb8Pm5+bDvjow0e54YYbeNtb39qaI1x55aO56ieehDGGz3zmM1xzzTVccMFBdu3cyctf/jI5IGMYjUbkhZBIQwjCpKcTt9u1cyfPfOZPsby0LIHOe9Y3Ntob6MN/9ue88AUv4KlPeSpaa86/4Hye/vSnUdU1n/rU9Tz1KU/mcY/7IYaDAf/u3/w63/72dzh06H5mkzFrp1eFuuEarBXrteAdTT3nQx/6ID/3T3+OH/7hH0IrWF5a5PnPv1oWGUJWzYzixv/9ZV79ypeL/n2I3VQSjSHJ3nQ7VVqUZVm21JKiKFrNqlTO/9IvvYqvfuOrm14zmYz5u89/nn/+2tcyHAx42jOeyQUXXcQNn/0sIQT+5lPX8+NXXcVjf+iHWFhc5OWvfjWf/uQnJRzGrTwFo36wSQu0331Ku2wqN8qypCzLVkgw3ahZZPF3kEWkw8RAp2OjZ+vPTawKEoWhvTk33Sy6LVX6wpIpCGitqRvLZDrDBU9eFFhvGU83UEaxsLTAydUT3Hb7rayunWLfgb3sOGcHh48c4ebv3MLDDx9nUCyj9YgQSjQD8myEyUYoBihVUg6WGZRLDAZLBJ/jrWE6tZw4uUETdeqKIqMoMrJMU5ZCDs1zg3NN1PGvEPs98X+U8tHFTrBvy89ykGMy6SROJtOYGCRcLY4AppteS+A2OhJ/vcV6S8LdQsTjiixnUORopWjmFfPZNFYScq2H5QCtYD6bMt5Yp6krMqPFmLnIRbfQNrExJbp4RR6dnqJeW65N+8egWq8FDQzygjLLKUyGQbXeDOl3menkp2azGXUlHhvJEGg8HrO6dprVtdNMerJjZ/pmWtehMjEwff7zn+fLf/8lRqMRf/KBD/Cp6z/VLfR0olKCG7xQHwK86tWv5s1vfjPf+Pr/ZnFxkbvvvpu3vf0/EkLg05/+DI95zGP43A03MJvN+dh1H+MF11zT6ocpp7pRJdKgbey8ZBm/9KpX8a4//AMC8O2bv82//Fe/3gbSG2+6kV981av5D6//bT74J+/n1KlTvPF330xZltxyy638q1//N1z7nj9i586d3Hzzt3n5y19BU1cYrdi+vBRVNQbs2L5dgmi8mW+/7VZe92//LW//j/+RCy68kNl0ype+9CX+6rqPysCx0VgvHgAr27dJbZ9Mx4IMUKu4S4YQ2qwFFC960Qt51x/9QXsdHjz0Xf7iL/87v/Kr/yphvqysrHDhhRfSlhVx633jb/973vKOd/Dlb36L48eO8a9/+Zc5Gbuit91yC2974xv5/fe8h8WlJb5www384TvfKa9HEXq76FYs5vs9+jhQP6M0xqBbOfPv3w09G+ajUym75fPSc/pBLv28y+gEFMuKkm2DAdV8xmQ+ZWE0IC8zHjx8mEOHH0BrzZ59+9i5YweD4RBjDNt37AClGE/mKF2QZaUonBYD8qIkKwx5loOCprFYp/A+E6qQLvFBEXyGc4p5VdPQURVS+ZTOWSKSbsXOUgMEJSWhwK6m1/20OJfksdN5SRMAfXgjBfh+ma+iR4MMw9PLtLWOjRExM0XpdDV7o15b1odPlKeIl/Whh36X9owGSEDk5WGTmETyjc3zHFt7yaCDZzAqcVhOr61iCSItZSLlSW+pILYu2j0HdofxetSq94Hbbr2N3/yt3+T66z/djlOId6WooIakJ6ZVm0H0NuT2d8J5SWz4bkC4j7kEJM2lt1C7hdvXLgPVK1HSvCfQBr+UZBdFQRGZ/E1dx/IjLjSlW2JvZgx5ZlgYDciM3qRCISWqFWXM1jpNgOYmjpmIFIlCZVnksakOr0HhYou8hTHSzddei7QYu2sRiIx5rVA+EJwjN5phUZJnGh0S9oYQJUmBMnWvus9Kn7M1gybAJQf282NXPrr9fQpS/YXYL4H62VCfaxScF42v6OfYLzF8mtdtS0wVr1doA7s2RqadlRT7ZwuA7cLdclwg0xVlMcRkORvjdQgyHTKbTzh27CinTp0gBMeuXbvYvXs3JstYX1sn2dN5F5hXDcVggeFoxMJoSDEopIzMYvnqA+PxhNpZ6lo4i0VWYB0cO77K4ZNrbARDTbf5buXWJUB80/r3vYTAQVmUFGUBBOq6am0VlSIGQ1JnKL5Hp9KhdNYq0CqlyDPh3Il+pCPYhmFuGCgFvsEES6EQiCVCRSF6sqbjzaNxcV3X1HXd+myY2KhrDXJCII9wUHroiE8LWd3T1DXLy8sYY1hfF8x6MBiwfft2BuWIWSXjgR5Y2L5MVhZMqxmVt+RlRlZkeNVtau97/18qOGs3s1d5pqAUM6Q0ShSixIzplQBKKZxKxiSxDZ5EA3130tsbtY3yvQ8PMI8pZSpblIqD6D5seU1HzGxxPhLepNtFYq1DKdELs1F9NjPx5vMWYzIWRwsURQ5BJFlCcCK056WUk+8vi8PaGhHq05QmJzeaJpOxrgA0NhKKiTtgAJSKLs2x/Z5uah13TNXNMiYhxdRap9+9TNlcbLmHGDb7Hcktp7P9ryR2eUbGpeiG9nubR7/c3Aq692cKU8mZAmYIARdcBPp7wSb04vbmj5fvHTc4D1jnCVuevBXDS8ebAiYQCckZvmk4dWqVLDdoA/c/cB9Hjz7Egf17ecTlj0Ipxfr6RnQ6kg12Op9TliMGoxEmlwmAYrhAOYh+AlrhlMLhaILHug7jb3ygqhoms5logQ2WZGi7973SfyeopL+Zb7LtU4KL+dAp0W4u+2PGE7GpBAEkXloIAa26QNZuTj3oSOlEwAYT12YIwufy3pFluktE0qazRXwgnfc8T5MftJhpYvSnTa9v9JzmTfuGNimzS94GWhtMLvejiybMWZFTVY7prEJb166PPmJ4pmx2lrUnL5V84jpEi480TdMKLirVfem2m9LeL/FW691AfXLgpkd8YZ5lPPzQg2f+Pj7+6F3v5i1vfdumG6/L1uRWb5nk8QSnIOY8Fgk8AAAgAElEQVRD1MEiDkVnGcOyZFCKGqbI90AgzpYSgzIyQI2mJRZufeRGRry8d+14ViDgbSy7VXfR26/rJZB0RVqblMrsIyk7Ue37bXoSxPS3/3fvvfrXo3/Kt/w7lb0p84IzSwXaYzlzU+oEAzqF2/7HpRsxPb//2v7v+02Qs722/7o+htdZtwXW1sdY76mtZePUGmtrp3Desv+8A5x3/j5MYTi1usrq6VWyrGBl+w5CEE9RjMaUkjUFE+TuMAplZGg/lX+mMDKorsEHjfMiaDivZ9S2wtuSJnYaOzKx3nT++lnZphveCA8sBOncpWuhlNrETzRGbnofm1QJZ0tlZLeOEMKsCoQIe2ilCUFG+jJN7GhHSevQneP+fbo1sPYrqhSs+z/vm0P3x5XSe6YqT2vdcsWapqFuGorhgLzMcU3NdD6FTDPatoTVitXjx/BVUlzZ3HU/I5hlWeSzIBfx0Vc+OnYWC+no5AWz2YwqShT36/T25kuF1tbdVcghZyxm+SVtENyz74AMkpts0wnqX9ju9RJUQ8yCQrypda975mJ9r2O56qwlN4bF0ZDhcCBD4nWNDNjLeyrY5BKUMkMV9dZShup7EkQyOJ5F2ofI+ljrZFYwJOWGPlMsbLrxVfp9SOTR9L16VIf0akUrKUMMfG3208NKNudqvUcvoA0ihiMfv5lDtjW4dCVdF3jawKfUWYPY1vfeiq20nxGvW/L+Sp/f11pLHbDUWUsBzVpLYy3z2jKdV8zrio2NNfJcc8GF53Pw4D58cDxw+DDj8VgaCBo2put4BwRNVgq52WSKoDweizI5eZHJRkYguEAxLDG5x1iZ5ZzNaypf4YKVTbGffdNtuP3zufW799e0ZDQdKRaSJ6acS+tqdMg7vKtXIaWLmzZ2YtIRQiDTCnRs9FhhCHilySBCNdKc2QorpM2jDzMA7SbSNxdK/hnpmPpquimD77MSkgpK+ndjLUEripHgYvPZHJVnDFkCo7EuMK9qPPG9e6nZGcGsKAsGwwF1VKoIESyUg/cYE+tyaG+4eBV69026U7rMrM3cQoiKHFs5Q1KmJOlgpRRZ1n3xrYt6KzCZjjUEJMjqLmMTeRnaDpqc2DgiomL56S15luHxHf4VF0vwoQ0ggm9IB0poAKqlffggQUvRyYxrpdFaROV8aFln7VlqA1Es01R7X8foHnpnU3VaZGc80oYSIClmtOf37LUnBNi2uMAjDp4nn7nlpjvbUHSfJClrojOz1XHzkc7umZtWv2zt39Ttbu8DIZbeSYKp796dFnzCbc4IZo2lso5xNWde1+zYsZ2LLjqfXbt2MG9qxpMNateQl4IBWefYWJsAmsXFbeRFQTkoKUeD2MgQY1qTK9BEXwmHMtGNXWvxbnAVtZtjsSgT12zo2POtyXHaXDe5RalNY0KyPjfLIKm4Q6UAmEo0YzZnJmmus1WibS9dt5lorciUigIR4m4UdLofBaPtbzjpGvXxs3SsKVClYCbYaAyCsXx11mFrkddWcZ4ymCxuDFK1KKXauVGREBOjEpFlU9TOsj7ewALlaMhs3eICWK+w7v8gm33unl3UVVwsjWBBRS68kxBEv2EwGMWuikzGG62Z1fO2JayUGIIkop3WhjzatCl682ptvS0DsVmWUTXC6hU37rKtswFm8zmFMRRlSRZPamPFKMF5j8kNjXWoqDwKUM3m4B0Dk6GdZWlYsntlhe1LI4KtsNWcstAsLS1gCiNDr84JxcAYuUBeBsq9D3EOrW5rfucc8+mcqq6YzOeYrGA0WqQcjtC6YF47JtM5k1mDDUoMJ3SG8woXJA/UWSZWe8GhARdcvFCOTBvRofKObYMBO7ctMSwysDUKL5I3QRamD6BV9I6MgTgp5XYJoQT+LDPs3XEOg1Lm3ERT6MwA1P43tIE3yTr7NPsXpMsmoSwQ6/U2Y06tD1G6FY8Ch9jbScngkRRJtyYywgifMZ/PmScSppPdu64bqrppy0xxERcPUx88e/fu4YorrmD/eXuZzsccPbaK947l5W1C36gbvA8sjJYpigFlOWJxYZHR4hJZkeNDuv45xmgCHu8sTV3hGg+q085rXMO8qbDeEVTneNUvyVIwkXNgNmU6LebsRKU4hZ9Ma9mEoaf2IlJL3jiCk9+rGAhUkOZJsA0+62V7Ed/GpBliwYJdJN6mzF/3gtnWMrPPFdNat82+vt8tIZzxXVtNtjYB6hKUtL5kQxTJ8DzX+BjQlDGMRgMqF5hMJqg8F8n78RQ8eK3QvebfmbOZsRIsRyXFsJAXeY/yihBRt0ExRGNkFyuiHnpQLC4tM7EzvN6ciaWLFIK0pTOVkfwX8zxnMBgQHEwjuTYfDDCp3FWKbSvbyZTm1KlTLCwuCmi5JTPzEaupbMMgN9BykALDIicPigGavLFcftFF7FwcsTgocM2M9fXTbNu2wLYd2whaMiWtVOS3yUIhcsa8s2xsbDCfT+OF0VhnRaol3qzHT5xgOp0zzD1LS0OGowVs0KxuTKisYn1aszFv8LogmAKHQZmccjBosQvxj/TgLNp7jLcY59i1bZld25dYGQ0pNITIvFZKrp0yojumjSZ5ekJARVXfhCNKJpWwHCdBUBucV0znc6qZuEmVpcaFRrTU4myibSwBj8lEgkVnYrjcuIqghU/kvIs8KNXu3HVT4+s5xki3raprbGMxmZEOndcEJ2ois6rCWs+8mjObz7HBUTUCss+qOXXTYL2A2D4E5lXFdDYhD56LDuzhB668jPPO28t4NuXoww+xPt6gKEqUdtEZXAYVFkZD8myECoZqZjG5Jx+WZEoyXO/ANeLwpGxA20AWoKobyMSAZzKdM6ssXmdgDD7qzhF5cwSBNlzMwkUePG4OsRIxWgJRE8Q7UmvQmCgUKUPibdPHy4xv40FnBhU0RmWtYOposIBPG0WQDcf5QG0rTPAsDgfozBAay7yeoXLNsDAo56nnc/KyJMuKNhgFF8S0VwFeEZxsmJKMiKx2CAHtldzbwWM95MYwGC6IG7yHxskQO0507BKyncaTJFZYyAwuJFqLEv9OAk55gqsZDXPq2sr79ALuWfTMJI2djqfSHUKyJjyIC3ghNXAAMzZxziumikZR+SZqKNGWnp18tgCQWWbQykTBQ81GBAqrum6pFYmioY2mmU/ITcZsNmO87uIq6Je1tDdq1dTiRp1lKKNk0wkBHSD3kPnAsMw5mufYusJ7izaKwakB/p4HyMpc0m2VBCXT58hupoIXnpGtYzYYMYG4Y+V5zmwqLlD61AylTmCynKwocB5mVUPtArULNA4ar8R3MdE4WmKtNAHwXnTVnAPvOHV6iYdODBkWhdiyOUtw4vspC9dB1nlLbioXEE15j9BZ8iLjwr17WFoYiPSNzqgqy8b6hHrekBdC9hSZshB1q+T9mjKSelVUR4gYoonnzifXH2REDa2oY8koXhKiZVVVYlg8GAwoywG2cYw3xlTzGuc987piVlW4IKB+5SxeaaoQYtA2jDfGjGczRsMhl52/n8dcdiELg5KHHjrE6Y0NtMnYsbKDtbUNjq+fZGl5G96Bd4G6tjgXKIuCpcVlFpaXcYJJiAVfFBY12pMrkceuvZMGjROFh6py1I3H+mQo0tFN+glx2uBTmdjPgHwq71PjCQNGTFICSHe4/z4Jk0tYbsRJdfQsTaNvLU6bGl+ozoIwOBwyYSBD+HTH3kMoQg+DVTHAQWTcW4/1opbhrUP5QJbleB+oQyPdyN7saZqzbt+r/SzJ6T3E86vFn7Mo0N7TOAmqTdMQUivZO3BdM+6MYOas4+Sxk7gWWZNOR+LBGN0InyVKAEULjVbDy5EuRsw+W3C6izxa6XYGNAXCVMq5ZI0Vn6O1ws6rVtXWRpmR4LektEr+bqyV99aqxxWTwKADFNpw6OjDGJXazWLWYLJMPCSTEm46wV3l1ZVD7ffrOlEpmCX/zP7FC4FY9hlCRJScD9TOYa2n1X6KUtYK1V7wNiv00mGdVxNOromzjY54YER4E2wuGZra3D0LcRGq2KhJ1I/b7z3E4x5xIYvDIcbkNI1nOplTzUSNNcukQys3oKIoSjFrKQvyQgJ/aoh4F7WnlEglW5dck3KZW40mz0YbrBP4IeEwqIbGyvtM53PqqmmD2byuCArqaIbhrKRVLjTYSpj021e2cXD/fh558QWcs2OJU8ePsTEZA5IB1tMZRmfs3LmTvBgQAtha5NOTAmxX+niUEsA9M9E4Q+loKGLwzuJjJ62pLfN5Q117fDDtBtsvz/uTC92a6O6HTY2WuBGcrYvbNlpicNi6WW3SFhMZYhEKTQ0CpTFKsh/tRS1Y1i84w6ZOZsK0N9E7lGrxcxUbWQlPA3m90gqTieFvGnM0Wcd9tNaSt6TfbnBegqWPDQgJXHlWUI5GVLUF63Chxs6qTlrIeXqjmWcGs/lsLmqysRtIxHRaMDp11trsLvYu4xbUuSnLzRLa5KYHCKegENh0wpQWImhIQuTpggWPa9uwKr0DbTdTUDgpd7Xe9NoEpacLZTIxG1GoNo1NZEOzafGknagLZSruahA7d8mwQqfg2ge0E/u5k3UJzpEXZRtQMq8JKmC9AwymvRfkXCfEI6TTodKi8DED2HSo9J6ID1EMMm48LR9LyflzbTMlcOs997P/nJWI2RmqylLNm3hTi6t6Y4XeMhwMKAclWSY3u2DHnfGNidtbGp0BMVvO8hxPYGFhgaJQbIw3aGpxhR+WBV4p8U9thGJR2ybiURbrxT44gf9KafIsYzKdMq3mrKyscNHFF7F/7x4yAw89eIT5fEoWu+/NRMQGt6/sYHFpO+vrG5RFQS5jxHjvmUwmEBTkmuHSApmWUaQ802QGdADX44YBWBck064dzhFVi6Xs6wer1DQ5U7NsMwdLxWrAbWm89PleW//db6qk3/UDXNvpVOJCruIgu0qbnPeCXTqBitrjVWHTe246xvhdDJ0cvdw/IqUf2nuveySJdtOqDacYQftvpXQnphACeV4yKkc09QQFFKZgHKYQCcmtekZ8nKUB0L87+o/4RUNvTCLeOABb762QDjKkONfr5KVNKJIAFYrnPuc5vP3tb+NRj76yM5gAAbCdaxfdlqOV94wxLgRYWlrkrjvuxBhDVVfsP3h+e1RdshPpI0q4YWKPE486puObFpAiSvh0gK7zvrWbT1lOCJJib5KNVhlK+fY1EKQjFiDP42BuE0mTIQJfXUhrN4U0jtIH6H13wVrwNX037/vieD0KSXqt7455bWNMoQKZyXEWmlh69XHDdNO46ZzJbE7quKmYBbaEzvQNfOf2nmY4pQOXxcxUeH5aZzEjqxlPZzQxy62qSppHTUNVVwRou5Z5XhC8JTOKnSvbOf/geZy/bx+DsmD91AlmG2ssLizgfKCazlle3sbepW1UdcPx48ep6oaiLDFIMyLTmcApxE41HpMFslyysiyW862YAFLl1LHEbGpPIEPpXHDl0BZ4Z330R5z6pF8gOq9vvv+2Ujy2/kmPNoNKWVkfwI/rSAd/Rjdd1gqYNLzuPSiL1uLw5HVoEwIBRZTMLAMhOIKXTqfg7V3WZeK4UdMIITbTCpOVsbzsmAlpvCqVw5nJcHiKYkCRD3BuPVJAchQZAsF68Aqj8/Y7nqma8b2uQK+G7p+8FKZUek5fg6v9rxTIuofMWJrY8YxfppfmpjIvAdaJ2Nm9q4q69MQFJscwm87Yd94BnvhjP8Zf/Nmf40MyPpWD905KSU1ijDtMJlhV3UjXNfSOpd1E2p1EftCWl4DRQmCUXSujbuouoMUMMAC4OFkQAlpnZEaTx+ymrhucd3Gn6QIaKo09dQPq3SB2aBfFC1/0Yl75S7/E0598VZshulYzPpWZIQb9zR3K2jqmlcUYhbdRBSVOcCjvN104rSUDS/LYUopmkqlpjQndzpuMXAyaQueoPGPuPM28Is9zijwnKMXGdEpVVZItIgu/amqCDzS2obYWRcdrahoZt9u1cycXXXgh5557rtj/WUtZFOiFJbI8I6YbMWBaprMZjbUUZYmic+lKZPCF0YjBsBQlDKMpMkNmFIRk9SZgPEEGqqvKMZ/XNA4UGVrlUXqo2/T65WKfI5nWefpO7USA99Ig2EIyTl3A7xXM+iVnSjQ2Z0ihDWiZFtd77XR7f3n5WgQShCQjaS3eprrayvUkfXyEP1I31oeAKQfppd3m71yrpyad7t7x0pG10ZpMCUVqUI4oigF1LZ3qPBtIsNM5QXtptvQSn7OMM5lUmLVZVT+AdGhmukXi34o4FkEb2BQIU5ogNJIUCiJ4r5VgECnzaN8rfVra4EJXx/dr9lbjvY2C9DwG0898WyYLbUHKQxXLVx+iGzUdDyjmYHI0gfYgusCaspEYFJRGRc0sH0KizHQBhAhfoMQVWwpWAe415JmB4JlXtUj99AZ92y8Wj8W6OMKsaBdSCF46hSEwm1ftIhYwOZ1OFcvGns5+/AgXFI1HGjdKo7McFeJITfS1bEuJmKnJHJ6PUjcBlNwsliCcIqUISgT2lM4wRUFRlm2GqgP4xEZPHkrKYL1lbhuaiLd5Asro6GAO3osp9eLCAvv27GbvubuiksIGwQfK3BBMTlMJk3xUlqyNx6w+fIy8LFnevp08K7BWXMUWhyO2LW+XETqjZbZUK4yBLNcRW6XFDW0cc6sbGa2ZzxusA0xGIBOxQNVNpPQDTl+ZJAWDfrDSOkq3h80KJnKPd5JCW8vWjt7g29+FOLHQ3b+CT6X30jGAOLqg5FMAJEAQEF4pTTcKFWKFZDdxPk3yVg+iZzfMF5jOa6zzwl7QCpPiQRxVk7UpeG9Sw1VGQdDxGA3D4YjBYIiznrq2DEeBxKhINUx/czhDAkgpsykLu+Xb3+FNb3wTt91yK/fdey9veP1/iGC/POfHn/hEjjxwiBde8wJu+c53eOjIEd74O7+DQbGyssJ73v0u7rrzLu684w5+63Wv6waMveM3X/cb3HbLd7j5H27k8Y//kXiQ/YXQ4V/peIhl8Lbt2/jDP/gD/vEfb+PQAw/w0Y98hLzIezd42PyieHMfvOB8/uTDH+JrN/8D37rjdv74Qx9iz969Av5nGdu3r/Cu976Pr970Tb7yjZt4/4c+HAOAcKie99NX89kv/D03fecfueF/fZXn/vTVOOeoq4r5vKJuaj77+b/jF17xynZUSYaA48UjSgGFEA01aoySgPaMZzyDz37ub7nzu3fztW98jec899kEb9m7dzd/+dGPctudd3DPA/fz5x/9CHv27aVuai6/4pHcd+Qhfu+//AFXPOpRHDp6lMNHj3L1858fS+HAi37u57j51lt56PgJbr71Vq554Qt6i1xEHx1gfaCqLfPGSnMiBBwKTIYuCkxR4JSi9p4mgEO6cE4pbJAMT0xxAw0KBzLPqDXoDJ3llMMRRVEyr2omsylKK5aWllhcXEJpTd1Ih7Nqamor3TAfIvaqBX/btWsnl156Mfv27sFay4njRzl5/BinV09yevUUk+mUylrmdcN0Psd5H/mAnsl4wmwmRsxKKwajIUvLS4xGoy5QqNDegBrQ8Wa3TjIz7wK28VRVQ1U1OBfkvgli6JMglq2jYCmb7+NcfZXWoigks9zyuvZm7WVsKQBuDZb9Py2LoF/R9DCvM1U7fOyg94Jst0riJr4lgCgZsyqKvJ2YybKsndTw3rbfT4H4MMTPStMC8tkuBrYQHakUZZZHG0eFt04oILGrKcmNiFmmx1kxM+8iThMP+ulPexpPevJVjEYjbviff8s3v/lNPvM3fyP4GYHRaMSznvlT/PiPP5HZZMJjrnwMRiuuvfa9zGczfuixj2UwHPKJT36Se+65h4985KNcffXV/Pw//Tme9rSnM53N+MQn/iqdtR6RtpPMabGhiN+899prqeuGJz7xiWyMxzzn2c/eDFLG8+1DlAdWUtYOh0P++rqP8yu/9M9ompq3v/OdvOEtb+FfvPIXCd7zsle+kuFwxFOe+GO44HnCE57QpvjD4YC3vuOd/Opr/hlf/fKXWFlZYXFxiaZpWnng2bzi4ksuZceOHRgj2l6SnSUf0aaNsQk78N7xgz/4OK79b+/n1a94OV/43Bc477zzuPjSS/HOUeQFH/3vf8nLXvLz1HXFH737Pbzjne/k+T/90/zDt/6BXTtW+Plf+AV+5Vd/jSc8/kfagA+K0XDEte97H9c8//n87f/8Ww6ef5Dd556LMVksQ6XEQBlqa9vRHqskCxOgP5MArCDUFT54jJIZXpNlFINSTIm9j6W77Oi5yVhaWmRhNJRS3hiCAhs8OsuwwXF6fT2WkJbGNsyrCo9mcXk71WzG2unTlEVOXc2p5jO2LS2zf/8+HvvYx7J68iT33Xsvy0uL7N+7m4cfPsrRo0fZvn0bg8EIMNS1ZV7VaJ2xsrJCAI48dITFpWUuPH8fF190EcNywMb6BpPJGK0lY7GR3V7EQWgTx9iaytI0Qs85vbrGfN4wHC4yswHb1AyGIyo734QJ9QNRCh792df0b2ttS2vZfE+G9n5I/06zjykrSwx9iNI6kbtnipyAzD3OpmPqeo4qCjLlyYNwDZWNYqgmp8g0rfVECrohYGPzKMsyXCPUrDRO1yfFZlpj6znB2S4PCR68JXWJpdmnMK6TEEuYdsKrp9Mp6+vrHDj/IHUlKhuT6UTeKyhpFKrQNuTOGsx6FVsblT/wQfEAOHXqFNd9/Dqe97zn8Zm/+Uyb6iUPgLXTp8m14rZbvs2B/Qd45jOfyZVXXMFsOmEyHvPnH/4w1/yTa/jYRz7Gc57zbK677uM8cOgBlNJ88IMf4rWvec3mqJqwqwRhBUHGdu/ezbOe9SwuvfQyTp1aJRD4+Mf/inCW9LzNzOKXufeee3jw/vvJ4g53/V//FW95x+91XQtgcXGRA+cd5O677uTv/+5/kQx8VZxoOHDeQYzJOHzoENoYFheX8MHT1A2EwP5zd7SYkHOCbRlj8E1DWRb4KOMdgsdb+dAXvPhn+cwnP8HXvvxltILvfvce7r/vXsqy5IH77+Puu+9smwt/+Rd/wbuvvZbUx22t7UhBLD4ULXn2oosuYmlpicOHDvHggw92u7rWoBVBK0xeoAwoJ4VwFm+ILBfOmrMN2hjKLHWB064qp85keUvyXFwYUhYlw9ECWS7KE3kROYoKyoFMkzR1ja3FNWlQlljnmcxOU6+toUBwtSBLfvv2bTzyEZdz+SMewcb6GkeOHGY6G6MR/t+OHSvs3r2bEBTDhUWU0WxMJzQBGtswnkwohwP27d+Ps47V1VXuvOsuVpa3sbiwwPLyElqDx5EpIxhPIHaD4/oIgaax4msxmVA7Q17KDV01HpWflbrZW9abx5m2MuulS9g9t1+SpsdWPO2MUhU5nnk1h6aWhkVUxNBKhB7RgTyWmjrLyILIYGWZwTVdMO23MtLxpNEyHWj9cpUSw+Gg+iIFMWtMOHrC0NJQPL69ZxMLQqHwjaXMC+qqYjQYAjJ0X88r6mpOHiWQlFGbSsuzuDOlllmHXh07dhyFtHdPHD/BIx95BfTq9vF4zLGjD5NpQ55pyizjgvMPAvClr361zaq0MfzjbbdhtGLXrl3cduttsXMCx44epQ/SSQxLsH6SvJEjO7B/P/P5nGPHjkVcTbVHm0iHm9LkAMn7cvvKCr/75t/lR5/wBEYLC8Jfi7rwAfiT//uPGQwG/P4f/RErO1a4/hOf5D+97a3gpUT51df8c37hZS/nV/7lr3Pfvd/lTa//bb7z7W8JYbYoUCZjoFTE7TwmMwJ2Ooc2cYzHe4JS0tmLu/LuPXu48RvfkPlQ76mquXQUtWb7jhXe8MY38sSfeBILCwutkWq6GWL6ugXwle89Ho958QtfyGt/+Zf5nTe9ibvvuot//eu/zo033ihUGGi/e17kBCc3syIpmEZpHu+wMVvPMjHBsK5hPp9hG08gp8hLTJ4RlEcZg8lkSqQocpy10ZxXsor5RNRK8zxjNBzgvYvvJSKZ86qO5UvBdDpheWmRyy69hL17drO6eopTJ0+iteaSSy4hM5r19TVqV1Nby4kTqxTlgMFwxOK2Jc4/eAGjJQluVV1T1ZXY0cUh5+FgCFqzevo03lv27D4XlOCutnG4xlLPK1xtxe+0aVjf2GA6naGKRbnB2zLUoU0fae4CwVa8LD36AY0QWomq/mv7gS2RULf+PN2PCTN2taPxLjachGaS5zk+bh5aaxl9Cxkmgf5ApgG8OE2hUUFk4RWgInFcx2ZCLHA3VU/Bm/+HsneNtSxN6/t+7/uu676eW51Tl67q6e65z+AI+5sNJM7Fl0TCE5CiJFKYyB/CWIrk5EMiwHIi2YAJBieKiaLIcSLbkQFZVlAkCwNjDwMDxmB7mAEMw/T0dFd3XU+dc/Z93d5LPjzvWmdXVfdE2a1SdVWds8/ea6/1rOf5P//LoIzTEeLRcWGnFUJ6De46NUvH2TwIfm6tYzSdstmsKYocoxVNdOZtmmrPUrwvgvJ4eQFgTOyIrg/m2dnZcMO/efMm50+fDsXNKI2zFo0avJFUCDx++ABrLX/0k59gs90OzSRKLo5n5+fyvD4QlOP09IYUn+d/9P51Ofzfew8eUhQFN2/e5PHjx1wD5PHgBpHEmEjckpNEju4P/qUfYjKd8j3//n/Aer3k3/p3/l1+5K//9WHZsNmu+fG/9mP8+F/7MT784Q/z937mZ/j1X/sSX/qVXwE8v/6lL/Frv/qraK34b37wh/jLf+Wv8h9/z5+LXmVygjRNOzgc7HYVWimyLKVru+sNqDFDCnuwjgcPHnD7zh2sE+FuURaiSax3/OgP/Q9MZzP+5Hf8cdabLf/en/rT/M//y0/JODTkL8QjNGxir50TPv9Ln+fzv/R50jThx378x/nJv/E3+De/67uGrZeKSxqtIzbhfcxm6HEPF59PQjDS1KCNQvnncZuAp8hzTCLyIq2El61/e8oAACAASURBVJemCQTPdrNGEUgTzW63wdmO8XhEnhq869hu1lRNS2LiptOL7lEpuHXrJh//2MewXcu7776DtY1YOzcV43HJaFzSdR1ZWvLGGydstjuquubZxSXvPXokG1GjmR8ccHJyIoRd7ynLkhvHJ5wcHzM9O0PhB9zGBpEgdU3NbrujqhtsJ0sAGYdl4SDnrEcnUsB788+XzuEXOqieE9bf0F7kdV3z996/OO6PnfvPaTu3Z1L68qLBRIMEIyIDlBeMSjaOe0G+sbjCdb8fQhhuuPv/5iPE4PsFEIqe8hm4NmiU92WHBVV80uFnqTjjJknCcrFAKSjLEl9tCFlCkkpAsjZgVIrSe6ThFw+SHoz2+gkWPvt938fx4RF3X7nLZz7zGf7xz/+8tMPq2uddE1s+5wjO8uThA37lC/+U//6Hf5iD+YzEaD796U/znd/xHaRG849//uf53u/9Hl69d5fj4yM++9nPXr8I1S8Y9n7fe41PnjzmF37hF/iJn/gJjo5kpPvu7/5uyrIcWtlvfONNiqLgj/2xP7q3jRGwebPesF6vOTo65vv+/J8ffibAd3znd/Gh115DKUUX3Qd2uwqlFPODA/70n/2zjCcTnBNVxHa7FdF1gLZt2Ww2/PKXfp3/7LP/OW3bDsTctm2HcdAYcekVO24Zp3/2p/8+n/ne7+VPfOd3obXi9ddf50/9mT9D09RDbujV1RUHB3O+/y98DhA2fpplJEnCkydPuXfvHkeHhyLBiXfpk5MTPvMffobxZDxcAKv1ejgJe4xCxzurnKRy9w3OYtuGrq0hOIo8YzIek+fZ4IpQZBnTyYTJaESeZfR+9s7ZGCbjItFVSLy77Zb1akWepcxnExKjqXYb6u1GOqUYQIMLNLuKYB23Ts944/XXydKM1XolORGTMcoYLpdXXFxdSuHxjmcXz3jvwQOurpZY6/EO1qst6/WWyWTG4dExi+WS5WqFD4HZbMZsNiNNM4q8YDyeyoY9GLwDGzdpVdXSNh2dFY2oc44kTUnSBOckeDpJ9HNC7evT+RrMf7HjerFA9ePmB3HKngfs3z+suVfJ9PK63ju/vzmF/hpj/2Z0PQYOQc4x/yLEG5mYggrxVehI0YJoryAJbaelZ/Nf31h7I0j5WoGMREEqDG9ZAgTvJWglWOp6R9NUHBzOGE9KxuOSg9mULJd8hSw15Om1VOqlYtaf5fs0jM//k3/CF7/4Rb7whS/wsz/zs/z8P/pHwwtT8cAYFTlfcUsRvOe/+gufI00Svvgb/5yvvf0O/9Pf/CmmsylaKz7/C7/Az/z0T/P5L/xTfvmLX+SXfvEXB9znJexnr9j0Hd5/8f3fz9XVJb/2a1/irbe+wfd932eHu5sCLi6e8YM/+AP83P/9czx7dsFnPvMZjNb8jz/5k7z2+mv8xld+m//9//p7/PqXflXa4fihvnLvHv/b3/7b/NaXv8z/+Xf/Dv/H3/pb/PaXv0xvgf0f/Sf/KZ//5V/hN/7ll/n4Jz7JX/3v/jJtK6ZyIQSyNOVDr73G8ckJ0+l0sO0ejyeIg2ZCkqWDxEdkV4av/vZv819+7nP8tz/0l/hXv/f7/P1/8A+j9CblR3/4r/D662/w1rsP+Ic/9//whS98AYCyLCiLgjRN+dKv/gq/9Iu/yJe/8hX+8M03+e7v/nOAOFl8/+c+xx+++SbvvPcen/r0p/mv/+JffO5Y9u4fhBAT5eO2yyhMomImQsl4PKIoshhc0WGtGOzleUpepCRG0zUNNi5EZrPpkIIUvKMoMso4NkiHl5CnhjxNyPKUPBO+GlGxYIzhxo0bfOpTn+LWzVtcXDzjwYP32G43dLZjOpOwXpPI+Ng66aSapmGxXLDarEnSlJMbN7hxekpZlsxmcz72sY9xdnZGludY56jqCms7xuMxR4dHpCbDqASjEpQSQLw3fxxoMN5htLhpdFZcXrRROG9fKlr7haoH+nvAfr8zG7aQLxSnFx8vdmv7m1Jr7UAhCiEMHv0mJlY18fOx0eq9x6x6QnO/WOi1viH0Z0k/9eyRXfvv03rA23oIRKakeIMcuKeCn+nYue0tRa/fL44sS+jqhjzPaZuK48MDRqMRozLn6OiI6XTMZDJiPC4pR9chwC9lANz9yMfC5ZPHEYCHf/07v8MP/MAPSII5kGdZ3E70b1DR1z1NwOBFN2j6iPkQ1ZpS+n0gXsTRVhcGPaOo7aNU6bkatjd79oB+BNAUveuknGhaMXBZ+u4neElWLvOccVmSGhOdNxRJmmBSkTiJ5bW8E/3CCpp+9tcCVA4SpfiynBPvLW2MyIKUinZHIi7ov36QicH18fG9ZEs89L13UgTzjKZuJcbeCw6i4t22l0VZZ2mbli5a4YRwLTqWn8Vwcg8nVSzccndWTCYjjg7mciF00hX17zUvCkZlQVFIF2i7jqresdvtaLuWNDED9th2oq/sHKRpyp1bt9Eq0HUNZZ7T1jWJhsQomroiOEuWGhKtaJqa9WpD3XpaKx3RdDrh9Tde48MfeQPvLO+88zZNW5FlKXW9YzwZYbRiV23J84zDwyNSVbC8qDh/tmDXVBRlQTkZ4RU0bYNKDIeHc7Is4+jwiBs3bjAdj0mThCLLyJMc21q6qsY7oRZsViuurq5o2pbOB86vFrz36JwmJKispLLgTQpGoIT+xrgPyO8Xp54k22Nf+0VMKUVuXgb4Xyxe/fe/H+UjTVM6J+eDSgxJKiz5rq2wbUOmNUWqGRtIlSNxFu0thTbimKz1MIb3xU2ccyJ0sUfd6Ce0/n1Y71hXFXlZUGTZsO30Xuyv+6/3wQ5/r0IYpgmShHR2yGKz48bNU/7Ed/5xLq4u+do3vkFdV4ymE3HXwUN0hf5f/84/UPB+QvMof0Eu6ecuZlmh9x8WUckfosOoihdAz4iX6u6DG7SWSulY2WPx0zGSLM69Qgnxe6UMQlDDmNhjYqFvXYO6lj71jWTodZt7o+neiaX7BYcCFfE7ucMGhG3Vt8AvjgERfFXXciZJWBdhrTFZlP4orBOiqXfSomdpGsMwdPSyioUQuS/0Okmzd0cUukdGWZaEEFhvNnEbJTbmKmYRPOfyapIBh+g3lddMdAY7p+HdxA9Ca02W5XgX8AqCEnfc1Ejk3WgSMxJ8wBspplmWAn4gm6aJRpGQZynbqsV1ElwxHhWkuoAQWK8WpKnh7PQEY6CpduyqHU0tcWJd5/AqoesCWhsODubMZjOq3Y5Hjx6y2Wy4ceMYZ1usTkSGpCA4cF3AdYE0NxyfnXJ8dpPFcsXF4gIXPNPpjKP8iKAUZVkwHo0YlSW+s0KwTVJANn3BWemw9rola61YHymD0Uk8l+VASiRcgfWKutth0hRJ5jLI7bq/5YKLpp4mUTF3NeC93OSDlw+pc3Y/tva5wvbin1/kmCmlmM8P2dUV1lV4J865xihJl/KOVGtSBUrJeOecR7mA1w4XFCY3wzmy/1N7yXSeZ893h0qstqQZkabEcX0N9aakPh6FxGjxXuulVT3+jxyHREHjO45PjkUuWxR03lJ3LdM8IQ2pEHetYx+efHmbadTgaNG/lYBo8Hrb6b4tkigsmaP7mbkP++i3KtcYokIhpMIklTGrrzZ5lsnFXu1IjeKdb77z0svqH3/zp36KH/mRH40Hsedq9T9EaAIixRhGcykeOtDrxoIPIoWKbcvAkTHmOefK6xPn+v+11nSdjBgYQ93UJN7EbAQ7MLBNbO+laF+nMy03G1CSGjUsBmLyk3NWPNoRblrbtJRlKRedd2y2WxEAB0mL8spdA6fxWHitsJ2NhfuagNxr/pQSDpXRmizPYrsvLHy02CY5L2RZBay2GybTCSjFtt7hrNBPxqMRiZb3VuQpk9GIJMmoKkeqOtnsNR2VlwzS4DuKImM+n1KMCqqdQ3Uar6ANHqsUXZB1/WhcMp3OmM0nECzPzs+pdxVGaTarDW1TM51MsbXwvebjIybjMZvNlovzx0wODkmLHKs8+aikLEuOj48ZT8ZcPLvgzT98k/F4zPHRMWdnp5hgqLcNk8mY44O5UFUSBUpzcbGWUckFlErI0hxNTfAK33rZEGpD1zS4YMiTVDzoQgTAgaBkmRIiLqTQSB2xkjOpDSZReBuwLrqyqh4Uj6e67pcD16XFuvb6sk0UBsH6msbivSHROT5+vY87MJ0aXNdhg8c4hVGBJIgPmbKydd+5CpMmpGkyqDoCgdZ1dFXHJOKvYe+ciqMZXml0loI2WB3fB5rgLV4ptFFUrl8o6cGgk+BJgiEEJcdSgS4y3nznPuvtirprGc2nNLbD4rA4gnL4vQXAy9vM4ROQP3/6j3wbOl7oJq4alepXrtcHtv89BD2MiT4IP0dkRBpUnKuTNFosy/q/KGRkSrKE9W7LK6++EvlZUnh66VLPaXrxnvE8QVFJx7b/pvoiHIbZlKGSxq5SZFUix7r2+1d7P0+KgvOetu1ACwcqI4+ET+FPJTEQph9vtVYCJEfP+izPSZMEk5hY/KSDI5JUcUQxdsB1ji7pKIqcIs+p62rosrQRYzwhDfphC6W0pkta4UUNR2GfcX7tqnCdMg677U7kLUE2UtODOSBBsBeLK8q6xHtJee87TRvHkdQYQlHKRtV7siSlzEtMorC2oa1rfLCMRgXOOzabDVW1pa4qqrah7SRcw4aAtx2JSTk6OmA2m9I0LdVuR9s0TMcT8iwn1Qm2syQmIVGGpw+f8tg7zm7e4vDwkPP1ityVcRxPaduW86dP2W3HFEXJt33qj0hHqTWpTula8aMLzpMYw2ic0bUJ2/UGVKCqKgCqqiENYpXt7D5E4BCz3YAKkdYSBpHWNV4mznUM2k1kcyfKEAOGGDq2N1Zen6YvwB7PP/obMwq6thuuO0z8nLXohrUXcq8L4EKIkX5DL4BDo4xCpYksA/sRNwSx2wbs3tg75D4ocAos4FWCjdeVUQZlGCIEA9DZOt4qg3j5yzMN76VtOkxe4LXh0eOHXK1XYmhQZFxeXjCbT+gNcoO+vtJfKmZlOaIqRzS73XAsVXxhAoJz3YkRj1iQD6i/doKXD+46Hu7aX8zo6FMWR5iyKBiNZZQyqaHqWowT/Z0fDtp+AXqh3e5/H0ZPBgePoSPc2yYN39QXxvj8SvUraa5dAGJn03/YSinqqiZJE8qiICBC6N7bzWhDXVXCio/R9m3TCFaDkIt7fEPsvjuSuO0EFU9AH2uvikTcVswso1Zts91gow25FD2iCaK8z0T1KUmW3n23JyTKlkyLDXQQ0X2SJBRpKjcqRC8btGIymmBth2s7ukai+rQKewnknqIck6SZuO9uthAqElMQfIgbYHGeSNMU52FX7bCuJc8z2rbG2g5QJGlKqjR5XmCU4fT4Bm+88QZpmvLs/Jy2Ect1tGK92QiZshRSbp7lJGlKvas4f3ZOWCZMj48pokuHszaeZyVaGeq6QnlZ9ydFQVnmHMzmsRgEmrYmz8SQso1by7brKMqSzbbBB2g6j2gE5dd1BxWnk75NHq6gMFxDQ+HZL0Lq+iu10gPWuv/Yly2931Jg/+Gjc/AwWQWFjiJvpSVEWxMGHbMLKcr72P1ZKcxe4XrUJZ5bTkleZeP6vxNdZh9BY4On8yJzszbQhY408vm0klLjcQQVGRPe4ZXv5ZgEpaWjC70dkZEMUCfnatM01E2DWoutNiHIDTY+Xipmk7RgdOce282aYMVsLzWaIklJDJRZBs7iuhYf8QUfnTc1gcPpRCxaMNSNpbYBkxSk4zFpWkCSMj884JVXbnN0ckQRSZVVvePR+WO+8ge/y5OLc3brnSTwNEKPyNIMH+/8wtXyglH5gHXC/wk+RGW+wSH4j9ayOh+lBWdHR7x68xapVhRJwmiUM5mOKUYFJktQBnznWa3W1HUzAPltK0B/lhcYo5lMJxijWa/XbHdblFG0bc2zZ+fcvnkbazuqXRULVyeZAZGe4Kzl+OiIsixZLK7YRa+tJDEkSU61aQZbEx8cOtGkWcJoVDA/OuAb33iLp+dPaTsrI08iwRxaG4piTFmW4jpR19i4oAhxvE7SRAI4uhbnxVJHhcDRbM7x8QmXiwXWSeJQ3z0UeUEai2nXNUzGUwhC6p1OZxidsFxcsVlXJCahKMA7Jd2M8oxGBaNxgdKBtqup6wofpS09kJ2mWUzuMeRpxunZmWyy2jbaq0uY73a3kwIcgfNnz55hjOGNN97g9qdvc37+lEcXz+iAt956izRJODs7E5lZVTEeTTg7PWMyGrFer1mv1xitODk65ujoSDrsZsdms0YrRdO21G2HR7qaoCQ7oHNWCKBJr5XsO63YnqiXC1bf6bNfkPYWMdedjhYrohcwsf8/j57f5YOYBwRvI+H5enpxQYKDe8zWeJGZoQP1tsFYwQZ9cIL9iRcCre3QiG2WVgZtHImW1+w66FzABRWJ144kCWRpRmpihqbv4jYzFloH3itQAWsUyiu8gvVqjbWOm2e3ScuS+w/uUzct5XhMXddSZb1sPz+wmJWJyEfGh8ckSlGmhjLPKJOMPBHxbWhbXFfjmpbgOhSBNILNRZYTULQ20JaKJB8xnh2Sj+eQZJAYPv7JT/Dt3/5vkJc533z7Gzx98ojUGI4ODzg5PqK2FVpJ4HBTWbTSZEmOjVl71loUniQV7KGuFY1q5K4YFFqntN4TtCbNcpxzjBLhSN05u8UkNYyLgum4YDofM5qOScsMnSpSpbm6umKz3qF1gkdR1y1KGYqy5Pbt29RNzWa7luQg29I0Fbu64s7ZCfPplO1my2q5lEi+BnIdyA/GzOdzZpMpk/GYxWKB6iqmmWY6naCUomsdYa5pKkvdNuR5RpIZmq5iNp/wyU98lNs3DvmDr/0h7z14iPMwmc7JsgKiM+94NJYLwrq4KXZsdzuqqqHtOpRJYTQCoGoalNK8cucOJycnOOeomi7CA15iB52kfYu5HxwfH2O0HCPvA9vtjhBgfnCA1obdpsI7xWw2Q+tAZ2t22w1JqinLAue62JHtX+jSBRgNXWdZb9Y8OZdgmdVyRV1JoXTWMpvNGI/HtPFm46zlwYMHVFXFaDTi8OCQre24d/cuu92OuqoZj8acnt4g0Yam2oK3FEXOeHTKfD7j+OgQreHq8opqt2U2KWMAi6Kua0JQXC2WXC2WVI2ldREMNwlBK4LbgzykVuxNB9G+au/99sXs2mqHAZjVWvEyasse7PGtu7IBTY+vQ8XX0JPGFeLYq5y4tzbWYrzgZgbhkFWdmGwa5SQ+j4BONMpD66LwXnmhYxlFqr04TNsQcwk0Nm4/DQGrPMYJu0F4lQ4zFLPeNNTHNDaLN0I89wHy0YiybQheYS2kRUqRa7zyQ4bABxYz3wqzOksSiixlUpZMyoIiS0kVhE4Y2U4DSSJpw1rL3TtJWC63dEHRBcAY8nJKOT0gLUq80mSjkqvFgq/+7u8Qgufd997h6uoc61p2Xc1qtaDa7QYrXh0/bB+tjZUSprKOLpOKPYM771FaUoMEt7rm0DjvqaoKa1tCUgg3JhG7I+ccyjoMGpMEsiyhKDNsJxIVY675NJeXl+S58F02mw0PH11hreXg4JA0S/kX//w3WC6XEAJlWcbOMyNErMgozXaz4cGD92iahuOjA05PT9ntdlzWC4q8RJPQ2oamqdFJSVmMAcWjx49Jk4zZ7IDiYsFms6XtWnFx9Y6usrimY1SWEMSuOqDw1uK6jmqzpShLXOQBTUYj5vMDjuaH5GnGqCxAlI7YSHq1bTdIpow2rFdrkjRhNp3FAmw5Oz3j3r17bNZb3v7m29jOMZmMcL7Dblt8kFCVPmu0H5l6d1rnZFR22sgyKEkYj8copVhcLWLGgGM6kaJfVRVpknDz5k201iyurnjnnXfIipxiPKacTrkbi9njx48lSjAE6qpitVpxeHhAUYi+b7tZ8SSIWcJsOuFwPmG1vKTrWvFS6zqUTlhs1izWa7Z1h0oyWmtldLJecjKRDtjFTkbHrXqPmQ0bmiAFzDk3JIr3GKuOx1i/0K31xexFP7SXHkq+zraNQBem39xHYXfsJJ23eB1vIiEup5QiUbJACoDTogTqnPBJjUrRRjaSbSeYnJaIJFLjBj9KiyJRqbgva6IFmInyJ43SAr/YiPMG4/FKoBWvjLSVwHgyoxhPadqObd2SZEL8bVuHTuRu4V/ogF8qZoWWzWVuAkWiyUzA0OE70XmlgHIOg4ucIU2SphRJhkoz1pUjJUVnChsUXiW0ncXqTgiNzvL2/XfougaTaQJiSBhcx2Kzog4tbSOrfZntox9aHC/7EUPMYa8Z9dLq9tgCSICqorfW9V0Xo8t2lKZPfjKgtAi/rRWOmYc0TSjLnK2voLVCCEwztIaiyCNDHJHDFCUnN05QCr785S/z6NEjzs7O+NCrrwLw6NFDrq4uyVLDfD6nbRsODw55/fXX8d6z3axYLBZUVUVR5tiqQ2lNnmVUdY1zjizPsK3jyeNzDg+PmM0O+NCHPsTV1YJdDAQxWkBbo0Su4qwjBItRhjIvGBcj5tMJbWtZbTbXwchIqvW4LDk7ucHVasNqvQEvG+0yL6RD856ua1ku1xhjmN6dM5mMMSbl4GDO6dktxpMdq+VGOqnEYOsWYxRZXqBUYLNdE4KLtA6BA4jb5+CFqpPlBfPDQ05u3ODy4kKSm4p8uMDFOUEWLSjFbDrl+OSEg8NDtDGsdzvefvtt7r97n7OzU06Oj9mu13zlt79Mmiacnp5S5BlHh4fMplOUinbcTUNwHVkmcqSmaajqGpSitbKgqDuLR4tGM8QtsIp4cSw4IRaxYfjpx0WuMTPicuj6X+RcClqDvnbS2H/O/aL2YjbAiw/vO5QyPUIXqxxyvI0myXN8Z7FBydZaeVwgJngpvDKxuwrYoOUa1HItOgwhGAnycdK9egSGUCoQFHQx8k5HqzAvZUwoTLEZUTpER+Ioi1KANpCII4Y2CF2IlCTJUHGd4gM0uwaMj5m01+/7pWJ242hCCJ5EiaA4MQFva/kAbUORJGjv0cERlEZ5jVEeH51Qs3yCykYENLumpbGWdrMhVDVBwbbaUVVbkjwlCwl1vUUFR56npEnCrqoGtrX2ftAc9h9q77aZRIdT2c6lEUvrCO56ff08X024W9vtjnGSY8fuOnzEAS04EyBVjMsClSp27PDekZqUNN6xZtMZl4tL0sxw48YJ4NlttyyWC95++y1h7acp9+/fF0972zIajZhOxC/r3p1XCCHwzW++hejcDJPJhC4W29XVhjwrMYlE8Hk86/UaHwL5qGC93nBweMidO68wGo158Ogx291O3CyKjFQnFHnOttvS1HWMAnOMpmOmkwmr1RqFcOHqpmG5WPLKnVe4dXbGfDYnffw4+vOL79RkMmE8HlPXFRcXNQcHB9FZNokhLZrLqwXzyysO5nNGkzHgmU7HFFXK1cLhXIvWirIsqesdVVUNGJlWmiSmUiVJKlY1sYO6uLhgtVqRpimj6ZRnT8+xXceoHLHdboUGozSu61iv12hj8LG7mR8cMJ1OmU2n3L1zh499LLBYXNE2LWmWUFUbvGv3Qm0l0zRDCMJ1XdPZDp0mNPUOF2DXdOi0EPxJyySijBh9oiSHQgX3UgF6sZvqpwkA9oiv/RJm/+vfr6h9cDGTzbYxJsYAxoi2SHDHClTgQ6DzntZfx0BqFClSbIIy6Jhj0HkZkxMflwBeYdKCYC0+DsReS1gPUdpU1TUmiQqKfimiNUqlmEzTbnZCwB3el1yzFkgcpBpWyzW7quXs9hmNc3z9rW/QWst0PkEZJ+eO1pEe9gHFTPkWpWTGVsGiXO8B34HvaKqKNDGkBOHPaC2cqLphZ7fUzCiKCSCbyV3dkOYZ3nbsmoqqqvB4EtfSNQBecvGc5DD2DGasxaBI8kzEzlbiwJy17HvOe2uxMdlaRQqGD04SYQg0bSUrYq9o24bHj5+QK8PhbErXWpLUYFIzYHFFkrLb7fBOQOfpRDZxIQSUgaracnRwgHOO1XKJUrDZrHh2/pSz0zNG4xEhBJq6Zr1eCxP+zh2863jw4AHT0ZjxeCwa0c2GLEvRWjMajQih4ujoEIWW9KbO07UeayW1ym4rFss1PsBrb7zBdDqntZarr3+dQODw9h26XcNmuZRUdwI2gsmr5YJAYDSa0HQNs/kBnXVMJhPqasdut413QNnm2q7BGM1ms8a6Dq0hSY2MX13HcrkcQqF3VcXV1RX/9p/8kxwdH7JdL6nrmsvLC6zrYppTGkdXRdsmgol4J55UHtI0w5iENDVUVcXl1RVt1zGeTCTSDsSpFomYOzw6hABPnj4hhMB0MqEsS8rJmNnhAWmWsV2vWa9XbNYrFIrxaMR0OmY2GTMel9FJQnA3nSgmk4lsNJtIhylyNlVLVcsioHWOPBWrJJQmKB0VK5J27nwva8quu7Q9oL+3om6aZnDsGDiA8frrN8z7rP4Xi+OLgSUvFkDvPb5zMuJpRR9jGEIc9T3i8uFjMpgWh1sbIYYskdfvgiNo2ZjbSLPQaSJhx1pcU+T9iTuxZDa0BJOASWJepo/6TdlStlULymBdG6/1BB0UXdcKdcWk1E2FR1K7zs7OcMFTjkZkBOquIc2kiTHaDGlq71vMgpcgCRs8XQtZoiWpRkGSJ/jWYoyoA2QjpYb07aaDOjSoxoL2VG1D1VSSpQlsd1sa22E0uBjuqTUEq7BdKxwp38n6P7pL9B/CsA2KK+feVNBbx7WUA3qvIIEoYmwVChWkQKxWa6r5Dtf1qUnyPAb5wJumJdgOh4yhwYsyQZuUTBua3VYoB4mQCp1v8b4DLGWZUBQ57733gMlkwoc//GGqatefdozHY9ZR5G2tZTqdkmdJ7Fhq6WyLDLyitRbjpTMWd1MhiGZZxtXlAhfe5Pbt2xwfH3NzveL82TNWywUmaOh5ZKFn/Em7r7WJdjoz6qbhQx96jTRNWa3XXF1dSqfaFyN4vwAAIABJREFUNcL8T8WM0lqH7WqU1oxGBVVViz2R92x3HYvlkrPTM4KC3/uDf82dszNu3b7FerXg8OhAcjeV6Be32w3OpQPdpW0t1orPlveQZQVaK8EC25b1ek3TiBRKxkqhz2RZxp07d5iMJ6zX60F6s9tu2W23pHmGMSbibkKJ6bqG1XqBIqBxaHOD+XzOZDKirRvW6zUXF8/wzouvWWKEya81QcU8TJ2g0gxlcuFkRVy2F2K/6Ny6X2QiwShij3qYMOQfnw8x+SC2/wcVs+e/N+KRET/rf3CvWdYoYjCcjMkaVF/QVFxUJAmht+FGOjnlJEhY9YR4JTZWvddroMfH1PVxcw4XBPRXpqdzSZco3FOgd5OONCMUqCTBW431DmNS8vGIrCjogsNpL+MsQl3aX768VMw614tEAR/ogsMrWTUlxuDZa12Do1WA13FNKoxuF9zggRSCw7lA5xxNWw8HNTEZaRJ5a97jOkcbOppQ07VdPMm9RNR7H5c012JdHSmIzsbk7yEUZX+0jEeH65Ora6Wz6MW+1goBU6Uy6oRI7ZA1gqKxcsGlcXcyGo1kAZEZyjLH+YqyzLh954wsy+k6z2/+5m8xKku+/du/nelUVsl1JcXZOSdR8/FkS7NscNXc7Spmo5kkWfcyJaMHy2fnLBcXV0ymExSK1WpF56Koez6lriq6pkUHjdFifz4QZHUioS1Vi9ptpctRIkF58vQpzy4umM/n3Lx1i+1uw3a9Yrfbiv9XI9idSVKapvfCSkjSVBQAo5wkS3n46CGv3XuFG8eHPH70gKurS7SBrmvIi4wsCuyF/xXoWklcl8AKj9Epo0mBMgpbW+qmxkbnkl614Lzn4uoSvqk4nB9IZ6vkGB2dnFC3DYvFItonVTx88IDZbMbrr3+IV+/d4/atmzRNTZYlMi1sV9i2Q1xBRMVR1xVeaOFYLwG0jbVCvdBGuo5e2B2NG0MPRiticvnzBai3xlIqwiOqN2boFTTx696HS7a/xdxfBLw0vvZdWmJQvrdplxLWY5RaG0LXRcdpKTC9SkDF5+j5iT4EeS+h/zp5D1nE1oLq6RYe19eMvUJtoyEkgPdiGxVgyKsd8DwFxsQYReQybruG9XZLa+X81kbj2y7iiW7QMgf/LagZbSttbpImMcPS01nhJsm6VmNV3HkFyIywmFWQiu8BFzqCF92k0ki4AtK99Nl6WWookhSTKDG/s3LStI0khQ+EW9+Pj9cfsnSOor8Mbs9lwAfZ1ISAU2rgV6loVaSR2DOlRNlf143wuIoM7QLBOelAlcaYVGAGI41OkiTkWca9V+9yefkMG8RTyVpxu5/NJoxGI5TO+dSnPoVzjt1ux8OHD2iamhsnx7zxxhssLq+4urwkTRN2ux15lvLkyRO22y3WWiqdoIKis5GYGUNBAgrvAlmWY0zCarVmsVyQFinT6QSdQL3b0rY1RiWQZINqwkkiB9onTGczdnXNR157TdLio2vqYrHg45/4GHdeuUXA8t6Dd9iulpgk5fh4Dkqz2e6o6x1dZ3G+4uTGDT50do/VeoPbiMj/d373d8i1gOiv3L2NUoHlcoF1lqraCe1nPB4wN60T6fSVxg5ifemotZFzRCslN6EQODgQB4XlYsEzazk8OBQdrPecnp5ycnLC/OAgUmZKRqOSsihom4Z/+S9+izcPZty7e5fJZCQyrPGIrmtZb1bUleQ4TCYzTJJRN/IZVnXDtmponSdxAaUs1kv4k0AQ1xmkqh/tXigwciNmKGZybl+7ZuwXp+fMGvce/9+0jL7w6cj2CNHk1aOdjr6rAd9F7XBn6bxH64DvOy4g4Ies1X2c7v2sifrXtT9OC5ZJPB5SJof0uzi+9msBEJt1Y4T6EYIsBH0IVE1N4wTjVImiq1rQ/ZLF4921P+D7FrOuQ7RiQYib1nZ469EkoIxIN0BCBnygM5LuY1A0ATbdjhb5QKu6wdqG0G8wkBW7CgnByGZFuRRlwbeOthV7kl5l2ye46CjTCSGAk1QjhzjHDh9yFL07a6VzVBqvewJhBAuDik6tCXXdsE0ELxvHjZoCmu1W1stZXDb4OGZH0PvJ4yc8fvIApQPzwynONWjjMdrjfMvXv/YmRVmQaMPTp09xzjGdTqUb2XM2qOs6uhM0kTiaMhqNwIrTpmBeenjfIUpnRuMJdVOx3q6ZzaeCGz15TNPVlEUBXkTXPkiiuLXRadRokiidUVpMDcvRmM1mQ9PVnF+cM3s05eLinM1mQV1vqaots4MDXrl7m+l0ztOn5zxMEy4uL1mv1+R5ytHxEVfLBcvVkvFojLUdZZlx55XbhOD45je/wWJxxdnZKaenp+I+0TSDTUyWpWRZgdHiztA2HdtKRnmjDWVRooyMHN7JuTCfz4cAkNMbp1S7HVdXVyxWS1abNXmeUxQFqUmYndyg6xp21vLK3VcYjwqCs7R1hWsbFHIxZ6khS8YEPFdXl6T5COs12+2Opu2o2wYXxFE2WC83ShXTu2NB894Jw90L+N5f7IMRw3MFZ6+QPVejnve1fz9uWb8AeLGwDMVQGim52YeAcjI8ORfND0NA+evouRDcnqpnQHKkAOnr5x5ssAkI80OkSEpW+zIyEsTaOomLOiPwRmqEbeeCQ7HnjxbjCk28AbjoGFOUGQGPtR2jyYg02qcTC5kUxMD+wXupmFmbRDBcCo/rWozSTEZjRmVJ21ToALaTAArvZF2qAzTeSi4iMk9b28WoMGklk+hHr4KX/Eht8Fo6M9dZbNOKW0EieJePmERvlTK0wF4SdPbvaHLBi0FfUBLKev2BRicOFUN/CVgrWsmuk07OaINJMkJtCc7StZ4WG51WpTsFxbvvvs16u2I6GzEap2S5prWa5eqSum55/PAhk+kRHbJQ+PCH3+D09JRvvPl1fv/3f5/DuWzZLi6ecXJygu0aDuJCYTQaYRuL0R0mkfHF+kgsjO9luVyiDIwnU4qiZL1Z8fDxewQ8t27epCxLrPbUdU3TtcOYpqM2brVdE1D87u/9HoeHRzjnWCyXjCYTrHc8fPSAutpQFCl1k2Hbhs16jdaa7XZF19U0TQVKTsztdsPt27cYjUexe82xtuP4+JAuyoGqaif+YuvVwNMLkUAZgnibqUj6tZ2jbmsURC5gQpqNGI/HQ6e5Xq8Hadh2t8UoMaE0xtA0DUVRsFqt2G433Ll9myxL2eFZXF1x/rThYD5jNh0zHpU4l2KtY7fd0tQtq/UGHzQ2aILKaK2sx12QEdP3N889c8IEIy4OXosI2vlhZOsL2TBWEgF654b0JHFmRS7UWEmeC9V+Yczcvx72i5oUMiXnTFAMOiGlxEnWBrQJQ2fovARPy3jIUKRSE63e/fMUJ+nAHJpkwAjjSyGEyCT2FmdbjE5ITSL0CSXYeHxC4W1G/p23naRgmUQwt9jlmVRTtxUXl+dYDulcS9s1YOR8QQmHz+ytM18qZnUTIn/ESdhE15GZlDzTeAqSTGgQKnTQ1SKZ8ElsawFanGtxIPOy0hI6qgxapygvgL4m6vycihtJi+1EDY/ZsxT2LvLIzHOWLHJjFC2i51qe0ctDeiGrHJwI9ofrlG05IcQBtq4bSudIs5T5/IC2rthta4k8axvpDrWma1OxkjmeU4xSRuMcpR11vWKzWbDdtXz62z5N1ymWiyXb7ZaLi0uurq5YLi5lW8b1GFHXNd7ZoX0HEfTrTKGdXOy+c4AdxhMUJJmMyU+erPA4jo4OadqG+/fvc3pySpGVkmmpNVkmALYLga6uybKCsizQpqXpGhH8G82HX3+Nk5Njbt46I/gjylHO06fnnD97xtXlOau1MOBXqzXeWw4O57hgeXbxlFu37zCZjkT47WG1vOTp06fkecZoNOL45IQsS4UEHJ1YkzRFKU9dt1hb49MQgyp01LWGAbsxxpAmcmEkaUpWFKTGsN1uWS4WeOc5PDzk5k2JnnPOMR6PhL9XV9S1XER5lnEwnwqlJjjqasvlRStMKaOZzSeYNOXiYildi5KOuC8eRMBbKdG2hgi067hcMRiUV1jXDhhaD3H0QIki+nu5SPLex716uIRA0C9jYh8kceq7PECwviB475C1GRdA8QvEDcN7EtcNWPQA2gUdUfEwXEMRQI//LuNdkgiJPBjBmcVkAry3JArJyQxxNRACKkjUnFYiQ9QB4Xa6jmCUOMXEn9O0IgVcrVbcf/c+x00ly6C2QaeGNCukSBp1/brer5gt64Yih9QkdEHRukDnOsyuAVMxn0wJSi4uZQRgFHKrIfgG2nYQUjsCWgeCl3ndKIVOEpyVmbzrOiyWrrHUbU3TtvjEo0I0rXOgsISQRtAvrqrlmeVDldWoLB58TA+P4LqJlkPBBTwdzmsJFkkS0kKwJ2stu21FOW3EBaMssG2DdS1dJ1yrPM+ZTSYcHM5ZrxZMyhE6FSBzPCq4dfOM+XxClk945+2n/MY/+2dcXVzJOJQaqt0OgmN+9y7r9Zonmw1JYvjGm1/n9u1bbLdrRiPBkZquGTrOYeERROaSZka6gBDoYuJ3UEgX2VpCUKw3W7pMkrqzoiBJJdFpvV6zXK05PDxims+4++qrGGN48PARWZ5F/3rNjdMbECzVbssqdmRVVUEjNzatFKenNzi7eYuLi0sWyxXL1QrbWQ4PD/j46x9hUhScnz8lhGu8zDlL27ZMJlOmkylZllNVNZeXV+y2DVY5skzep/ZSAFrX0bUt9a6iRuLStBYFRRbH8unhIVcXl1xcXHD79i2SxHB5cRGxspz1esXR0QHT8Zjl8oqmqXnvvfcoiwyjYTQq0CB+WVVD23QShB2itjEE0DH5O4B1HpEEy4ZZBTkFk8iwJ36vQjoxrfe6MtXjWNeKFQn+VQNFw0dAXm74H/CIi64eFyN+nyJuHmV9GQ1IrzesvZcgIaADMSNTCr344AmJvaMDfz319F2m9Al9mI4eujPfJ3wj12eWptJ8hCBdqvaRjyrLiMTI9UwnCygpxDJ6BuR7UIa6abi4uBSuX9NACCRGSeBKLNK9rdf7FrOdtrQBcgImsvjrtoNuh24TdJuIsZsGlcqJhxL+inU2FhCP8tI1ibQjgnuA9eKUFVA4xK+8sQ116LBatqQZGm0SvIlJzcHGFbBHJQoTZPPoCbLN0GIr1N9ffI8NQIyiF0xPE2hdS1ak6DRlNJlwtbji6VvfJCtyTg6PaNstnd3hXU2iPFkp3UWqYH11ycXFM9pmgskUnW1QKXSu4WpxSVW1TMpjTg+PGWdCvvTeYYLc0YJ3zGczijzl9q1b7LZr8ixnV23Y7SqaOpBmBbbzVJU4BDRth/UekyVkSY73nsVyxXa3E+PEsqTtOlIdODwoCEHwhBAC2nX44GjaFhds9KFyJKlhsbxiPJlw4/SY1jYsFpckieL84hk3jo/QSYoxKTpJCS6wuFywWm9k3MPQVA3b9YZqs2U8mXJweETbNJw/fconPvYRCAdYK7yyi8sLdltxpz08PKRpWtq2Gza0o1EubrVJwm67JbSWtCiFA2g09bYmyzJmkxlaKZFdKdhtNqzi5vKjH/0YaZLQNhVvvH5XjjewXOTSYUcvtsMDSS933rJaLqhrCZBp6pb1pkUrTZEVKJOz6xxaJ2y3awKQZJnEDe71MjpcA+3Bil6wzNIBt1IxW1JOAT8oL1Cg4ujv6Jc9SDRShFe88zLORgxaIRZNNoazKIjp4IOJ0MAO6O2H8NdbVh3ZBjI6ehlDvZPmy8jSqzf8DMHHmyiD0w1Brk9tkoHNEGJxjQQgkiQTHpsVCRVagw6iHIjvrw2S1qRTSTvvU8tkzE3wRkJPdk3HUZqj4rb69tkNLi7OUan4pKnoLv2BxazTHocFpUhNgjUBpz2Nb6lsTdoarEkwHjGhQ0MwMjb2H0r/S113qAHkQPqo5A8B6xyd62iDo1Mer/sv6i2H9vCw2O4qHegBBRdkxhdyoCZoIxY46jr12ITeV0l+pWnKttpR5DlnN88wiZGN1bam3uyYzVLGo4yuNjxbLdisLW09I0sSmqbi8vKCtpmBCqx3a5LSkOYJ682GxWKNnyUkKObjMUmacnl5SaY1t27fkmOaGGyZS6rPaMyu2gCBqtoK87rucF42lyjxeDOx+03TjO12h7T0EhRi48nddTaegB1pasgGGxvpMNM0ZTKecrVcslgu2O0qTs/OuHv3LkdHR1xcXg1//wfPvsY779zn9u3b3Lv3Ku+88w4BGI1KjDFcXl6Ks0hVkaUZxweHzGYznj59wrPzJ6xunnLrluB3eZ6zXC7puo48zylLeY7dbidjtndRZSAcNkIgS3KClQs6UYYsTSmLgslozGQyJkkMy+WSoigwxnD37l1Oz26gDTjf0bUibldaM59NGI1OCSFwcXERlw4Z63WL94qqathsLodYO+88yThDBYe1clOw1gktg8irci4ygaIfmIq+ZCraXBkhgu6PhfvWT0NU3N5154Y/9R15D8RHypFSBCV/b4PHBOGrAbI9DSKhMvSFrB8TiWUmFlAFwV9L3wdbor1r7vnXFr/phb97v22rfKkWmoaKGSFRohVTMhHfviRiUhZCjF90oLyTdLKsENaEl5QvWYhZyS5oGygTUq0iefZbdGYqrlR9EO9+bTTBiCPkrqlITSxmQYBpMQZOSdDRnE2+zyNFR/WtOtf+ZN4L+Ng54Xp1TjaQIRaz50iy8fF+a+leytsTQ/tf4ny5Z+6o5dbYF8a6aQY31uPjY7z3ZGnGYnFFmowocoMx4pm+21SS6Jxm7CIBtq0brLdUdUW13HG1uuJqcYn3gWejJZ/4+KeYTCbMZjM+9clPsF6vWK6WvPvufebzKWmaslyuMEaz3WzF78sJG77uLKiExCRoLV74kvQdorVyrx0t2FUV9Vb0g0PyTrCEeIIkiUGblLKcDA65znreffc9us5y89Ydnj59xsWFvPbLqwW/+Zu/xcnxIa+++ipJkvLWW9+M+FceN9KaXVNRVzVFUTCdThmNRkMC0HpZ8fDhQ5SC119/nZs3b/LVr/4eV1crTk+PI9k4HS7uPrkJIsaK4FRN20R+kZHgirKULVchmJ+IzBWPHz+mqioePXxItZsxm4xp2xbvPEWac3x0wuHhIVVd4azn/Nk5m80VCsXB/JC2a3n08AkHBwdMxjPapqXtHL5raDo/FDQfwXlPiBiRfBYhzgyCLnmhmRgd6881QL8P4D93Dr/Pn/f/U0NHwHPUh4EKwXWx7K8JzbWioL+m91Cxl36u53l+2/u91hcVBu9bzJ57qOvXrPef24NJhlczZITsbVPbtiUpM7x37HYbkuSmGHsGSzkupGtVBqdBqW/RmQ2bEqSFVtqg0xTXWeq2JU9avJF22jv53RFItURZ9cVMyotUYtGuCQExRBmFc24oZja4WKmv7wAv3tV694sXP4wX2dZ96k3PP1NetjeJSdCxIzg5PCLPc/G4Kkvu3buHRrFZXVDVO4LTeCs2McqHKNwWcu5sMiVomI1mvH78Oov1ki9/5V9RpAUfev0N6q2l61ratmW5XPLVr36Ftm34xCc/zsnJCU1TDe4RIcgSQilNWY4iQdEKmJxIyApORvi6bWhbS5KkkspkStpOQHxjErRKZHscxORR8gkMSWpIkoS2Ff2i0oayGHF0NOLw8Iivfe1rvPvue4wnU/Ii4+Bgzquvvkqe53zta3/Is2fP2G637HYVx8fHHB+dsF5vaNuW2WyGUnrYWo5GI6rthrquOT8/5yDqI7UOlGU2yKDm8znHx8fMZjMePXo06Cxly+dpmhatdezYRkymY3R0OcmydFik3L9/n4uLC6bTKfdevYfRElry9PFjJuMR47FoXsWuSM6h46Nj6rpmPL72flstV4QQWCwWtE3LbDKnsY6q7mibLsIWct7pKN0ZzrsgkIYimmqq5zuyfcxqX0C+/9j/u0iW+JbF4gP/Lch0ZIZC8sK/q+try4fn/354HeGDf/aLvLL3J/aG/W94frqKhbrrWiEQOwfeCkyEQhlDqjy7rqWc5TGf0zIqU+bTMevNEl0KdIMOOCfecv3jZdtspa7Fv3G1TJJEGxZP4zrQMrJ5bwlOWMK9RkrrKG9Q0sLq2I6LCVwYxN3WWToXxd5Euoi6ZhW/HynvgwS4+79bZ+npJ6IUkAOdpilFmkm0fDyxxG+sESeFLGc8SjFsqLYLnG2YjErKNGO3q2lr0d2VZUlrO1T06tcYbOdoallkpFEgvtmsSJKEs7NTtttt5GXl5Hk0XvSe7XZLGl/TdDKj8w6larRJB31emudMphOcczRtx2q1FlDZaLIspbVJVC5cn5RFUTKdTAkhsNluWSxWwzE1ieGjH/0E84M5bWdpGwdBs15tuH37o9y9ey8Wi8Dh4Zyi+AT379/n6dOnzOdzbpyekKQJu+2O0WhE13UYo5hMxozHI+rddui+qqri4uICYwy3b9/m0aNH1HVNWZZMp9PhazYbyTFNkoTgBFQeT8YcHM6GdKB+Y7fZbCT53Fru37/P4eEBxydHlGXB+dMn1NWOUSGYmiew2qwjP02zWCw4OT5mNp8znU7xwVOMSj78kQ/z9Pycd++/S1O3lOWU3a5hW7U0rZwvhJ6KYQTPjUB6CC66oorgPET8eL+QXW/PGfiG7/cIe//zXIHrF0IvFJD37aC4pqPGXeTeABufuy+ZEQ3qf54PIVr1fOvHt+zI4j+pflDa//rh+14omkGMIzWKRCmK1DAvc2bTGT60jMqU6WTEdrMA7RmPxcK967ooopfHy2Mm195dvRay/zCslpHQpAmDdAmxAtFBR7xDDy9Sa42O4i37/3L2nl2SJNmZ3mOuPbTIiBSVpbpmgJmG4GDJQ4Bf+R/4j3d5Dnd5sMRCNGZalMjKShnaw7Wb8YOZe3hGZfWA9D7VmREZrizMrl/x3vdVBxqfSlVN/2fddGGZfERT8VGHakoNp9CD9NRlPx7Y2i0XHGSwHMfB9306QagZRNOU/X6vEftCK/I4ls3JyZQ0KtiuU5I40mBOR7dPCKHod7rcXH9B2Jr65fHxkbAb8tvv/oLXrwr8wKc/GPLxwyeyLGM8HjVeR1GU7HY7xuMhrutqbytNsW1BlmVaYxOFbfQ0QVCVEt+2CIIAIbQ2pOdpL6usJN1eiLA1I2pRFKa1J8B1XM2IWpRsd3uiKEJg0el0EZZ27fO84u7ujjiOjaZiiSVskjgl9jVXWFVV3NzcMZvNuLy8ZDQakecFSkmSVLcaOYZX7OzsDFlV3F5/ZrfbopRkMpmQJAn7/Z6iKIjjmCAImnt3HId+v0+/39ehodQFAd/3Gl0I3XJWADUrrc1oNMX3fXq9LsPRkLIs+PLlGs9zOTmZMhmNuL+/Y7VaNWGwlLJpcxoMBoSdENAhDeY8YaeD4/hEUcxml5BkJaVOVWMbbi4LYfBhB7rsSpkEh6pDOvlkEdeGrW7zec4IKaUa4yMMtOh4nh+vi/r1oYuAgwX5lp0xBQNl0j/1+drXUUvLPdlX1WGhfHIPX20G7HzYT2o8rTB5PKHhR5pNQ7c6IjVzB5aFqCQdX9BzbF6eziirlPmgT9cTBI72xrIipaoK8twm+zUNAFVWGnFfx+aVKR+b5GZRldhS0zlLjYqgUrJBEFelbGJfq2XpK1k1IiWVeVxo6SnNH970h4mvc2bfet0e5GYsa9dWHSZIbYwdx2HQ77Ndaf6woih0Namq2Gw2OHZJtF6SxnuqqkBWNpbSZWY30ISBvV6f8WRKVhQ8rh4Jux2m8ymVrFhtNGHiaDpiNjshyzKur7+w2q7JHjPeffcdWVFiex6VUniBT15oTF5RaT72TreDZQmDki8R0gZDtlKUOf1Bj90uotjH+L6DsDtU2xIpBWHYAyyiXcx+vzUYJxvfCzRmTUK32+Pz5y8IIYj2e1arDUVREnZC4jhhMpnQ7Th8fP8Lj4+PZHmOYzkM+l1OJmMeHh71U19Jot0G3/d1a5rnst9rJoQk0TihPM/pdrtIKYlN9VUpRRRF3N7eMp1O8TyvYblVSrFdb1CV0nxoZYnvG8FkAanhd5NK8uHjLyyXK8bjET2TJxsONUX2er1msVihlOLVK5der0+SJIxGE5IkxvMCgkCjyq+vr7m9vePm5o7AD0jTjGiXEiUZEgcn6OK6DoWskFJoqimpWih7PZn1TNbGzLYs3SljPLBjcGtbS7P++cRACB3RNHmwliFrFxDaAsJ6N62ojpRfe2T1+kZfe8OmZiAh5kTAIdV0bGzrcx3j3J6uvQMfm1RG61UIjaAwPeWWZWngvLk2/SfNSitQOGWFlWecDfpUKuCk2+PiZMTFdEReJHz6/NGoo2lHo96e1c2sR6FGKtcDjNBsDo6UDSsllgZkWibBX5mEKZhyr7SaClBV1SrQze0/ER+pm3O/9QR6bnvOmNXXq/E8B7VopVQTAtWLTns0HqtdxIcPD4hqjU1FGATYtkuVa0ClpSzKsuD29oE002K/j48Lirtbbu5vtepQvKc/GnB3f9eoAiVpgh/4rNYrPl1dMZlMmM3nFF5Of9Dn8fEREOxjDV14cXmB4zgkSUKepwZ2YhvGjy29nhYa2cd7I1dnN2PQ7XbYbvZav8B26PV6CMs2zBIlvV6P6eSExXLJar1msVo1EzdNdHvVarXCc0cEgfZ8Jt6E7XbHn/74Rx7u7wmCkF63i6wki8VC48CShM1mzXq9oSwLRqMRg8FATzDH4eTkhLOzM1arFT/++KMO9aUWmJ3NZvR6Pe05dzp8+fyF7doYSU/v2+v1UEpxf3/HZrshL1KiKKLTCZnNZrx4cU6e59r4ZhmnJzPe+AFpmjKZTJFScXd3z6tXrxBCMBwOieO4IQ2wbf2gcl2Pjx8/s49ykrzC9QN6QVeLclSa1lwrlhtBGDOH4eD12EJX8ETL0BznfJ8zBu2HtRCCY0mTtgfWNobt49dFh9qQ1UdoVog4rDPF8+svoB5SAAAgAElEQVSpPv+3Ujrt839rv9qz0x5shVJodSihe7uV0fCgVrKyhCZAtW0tpVhmWElK39YdF15ZMPJ6DH2HNPOQkymdbpfReNzMM3jGmDm2Zv+sygpZV3JkHQ4qMH1yZY22N6wWldK+dl3ENnevga4mWLdsi7wo9aCbXsjat66TnlqNSF9WLTdfu+jtATv24Oqyt2PI7jDIbWWekHWSOjfHr4+rpGSz2ZBlGVG0xSVmNPToBF1Nm11kOK6NMiIdGgZRMJ9OGJ1M2CcR692W3X6rGVj7PZSQbHcbsjxjPB3heZ5u9kbQMYnn0IQ+6mFBVSmWywVplmA5FufnZ03/YhRrNg3bJJ2llJyentI3ikJ1TkeHbhnCspjNZtzd3VOWFePxAN/3mc81xXSvP6CsKgOI1UbUdVxNSQ0MB31OJmOur6+xjR7kZrWkzHPWyxWvXw8pRUUc7ynyDN8P6HV72sAZaut6PMuyZDrVFcyaqWQwGLDdblkuVw31keu6zGYzI+mmOD2dNyBbgO12y2azptvtcvniBUkaN90e6/WSKNJGXgidoF+vN3Q6XTq9PpWEaL/DdjyifcIuivl4dc3Z2SmPixXb3ZY4yViutAGVCA13qSp829YizJbDbp/ovmHHwxJuzV/TtNwJNKmhQguE0DIAtYd2nPdtt+c1DkDLUB0/1I8LCHXE0TYuUkps0+gvpTTU3LZZZ8aEGW+sNGumPm9d9ayqqsn3teEk9ZprX287lG7WoqgaCcQnm9TaAa7raFW9SmMh6vyeg+737IcdZv0+2WrNYNAjXay432zA0q2KxXpHmuVEeYHcx83hnykAHPrEvrUppaEVtm1rWFhTRq5BsWawZcu9Nl5YXecUZlCPS8LlUQWo7d7WugDtJ9jx1rjrou45O3wBdbjjCKsxcEmSGNiBFuL1Agth2ZSm728fpSB1uFZVOgy0HZt9EoOt+cfOB+f4S48PVx/56ZefAOgP+ibXpUPGwWDA+fkF79+/1x6VMdCe79MfDEjSlOnJlH0ccXd314SZ4/EY1/Vwfb+p8qEwykUa71SH0KB1KnVv5FQn6MuqmYhRFCFRujqZxCgp8RyH4XDIZDKhP+hTFLn+XFUxGY9ZrVZNk31Zlnz8+KkxMsPhkMAPyNKUxeMjVVUxHo/xPM9ALgT9fp+TkxN+/vln9vs97969Y7Va8f79B758+cJsNmM2mzV50cViwWQ04vz8nMXikdvbW82oYDwPz3cpSk0SmWWa634+n9Ptdvj48QNZmvP2zXekWUa0iMiyjBcvXuD5Pj/9/HPDGxdFEYPBgMfHRxzHYTyZcHX1Gcfz6A9dvFDS6fbo9bso4VBIhZXkFBLSvEQIl5oc4/D4FrQbn9t5ptqjaT+M67+1mTPaRrAdkTzx2loFsqcPdk3iaTumY6EN7RCGYkfXMp5WTM3YYo5TlmXjUBx7ld9K77QNtYax1EK/raS/IcEXaHowbWcqVCmo7FLj5GSBKAuqXcTj1Wei0EMiKcoUJSqErVXGhKPb3lzX5X//P/QZvi4A2JZGJqPzIjWCWJj8E8okECs9VMZvbVVE6q9VND91s2mNvam/CE0pUveNKQAlqUz1qm3M6nwKfM2PfrwdKkJPzGZzHA3H6ICALMvYx7F+qpvdpIQsL8nzim20J9rF2DgEQYjjePT7HfwgoJQl0X6P3CqCjk9eFXihhxu6rLdrJJKgoxP3cZwQJXs+XX9iMB6iBHhhyGQyYbfdkmUZHSPWIZB4nsN6vaYsFX4QkKQpu+2ezWZDr9fDth3ifWIeIRZVpfB9j/G4wz66ARTdbkAYhtw/PFCWJefn5yYXl1LJkiLXgimaX0xQVgVJokPd/+fjT3Q7IbPZHEsoumHATRqTphndTp8X5+ecnMyI9hGLxyWLxwfiaAdCsNtFrDfay6kT/99//z2j0YjFYkFdWfY8V7fBAHsjt1d7y8OhDh00XVM9PySVLInjmM1mQ7/fN16fTVkWbDZrttstVSVZrlcsFkvCMOR3f/k7kizl//wv/wWlFCezGUEQsI/39AZ9gjAEFHf399iOw269RQgHpYTGEiYR2J5RH88oCt1vrK/L0SrhyhgKoYVfdCvP1wWq9rw9hhm1K37HD/Jjo1Fv7Z7e4yJBayXUzX9NKNxOpukCgJn75s+NQBAHY9Z+fWzYvlqDdduhMNmp1t/q4wuMDSkrLUhsWZSVxJI5dmBBUZJst2R7yKucqioMlZwWZKmE+srxewY024qXa4uNNgp1+7bVmHjVQB9qi2+1Sbmh8UAcMwFkdUggNjF9K6gXol0JeToRavf3W1td5pVSmsrSYRjrSiomvJVSK1XXSlCu7VCWEsuS5JXAsR2EZdp5pKlYKQ112MV7HM/Dci2yNOHhyz2O5zKajFBCkZUZg+GA6ckUS9gNQ+xmsyFJUjzPZ7vV1NK9Xo8kTbm7vefkZEoS7xkNB2y3e+azObt4j+/7FGXF5eVLlssVWZbgulr8NstzIAMEnU6P8/NzVus1p2enbDc7omhLp9vj6vNHlNIh0HK5IMtTbNui0wk1XESgacVvrnm8u+Yf/v5/5W//9m/I85z//J//M1WpPbXJeMrl5aVB0W+1RmiSkmdZ0yBeFAVBEOD7PldXV7iuy8uXLxmNRvz0008IIbi4uAA0C2wdaq7Xa87mc2zb5sOH9/T7fYIwINptiZMY27Y4OZliWcLsY+O6vg7Ho0QXUDohw+EQ0GmBh0dtzH/z299i2zb/+I//yHw+Zzweo5QiTmIWiwWe5yEyPV8qJXE8H8fRfS22pfA9m9QVGtSsezLAkppx2ViBxigo9WQBt+dnO9l/7KU9N++fe/84v3Z4n+Yhrlr7tO3XAWB+yPM9OUfr9XGurP37saFtX0dTCdVNqhxWu/YeawHlmomjqiqqQlBZmnq8sG1yKt2qKCSadEziOBaWcEiTxERbT6/hV42ZTiaq5lJQCse2sYXuercUOqGnLRwSrZFXi9ADRu/YYLKEhWPLJkdGdQDSHapC+qYPFZqneJ3jwTwuEKj6S1BP70WaQavVncqioCrLBmuGq8iKHNtRKKE783tDD8vxyBNNpVOkCfs4Jor3uIHH2fkp/fEAt+dRqRI3cHh8eKDX6/Lb3/6WMAy5vb3FsuDly5f87ne/Y7Xa8Pj4iOf5WigkydisdeVxs9mB0hTS8T5hF0R8ub0lCDqkecZoNKYoKvK80JVMbCxLtzlVpdTkglnKcNgny1LSLGZ+OqMoStbrlW4jERa2LRiPh3iej+fr3jeNnaqYzy549/qS9XrF+/fv6fd7SCn5q7/6PWEYst/HrDcrsjTT4igCsixlsYjodnu8ev0ax9UGqt/v4zhOYyxqTvfa46qxZcPhkCAIjB5mypcvX9jvI05OJmRZwma7IQwD8jwjjmPCMOD+/o4k2dMf6HPIqqIThigl+PTpM57r8erVG8288PEKpdDMGqcX2I7NcrlmNJzwhz/8J+7v7vjjH//IP/z9/8bt7S3/8sO/4/kBvu/hei6u7+FKRVmV5GVBWkoEJXWTc8NQbPoXiyKv/aBmDrd/Pw43nxgG0bKKf2ZrVzKb98z/FXX+Wqsdaarp1ufE4fP1+5axevX11on8r2zCkXE73qSoWs5Pq5DQVBZVUxCoTHAnZYlEqz5VUpBKaRrOFVVVolSJU0CWm+qrUtpDbh3/WWPWHqzGYJikpm4e1/6pjWbC0J1eGn0sTK9/bTW1myl05RMjP98yZE3VQyqomWm/4Up/qwp0fL21MWvvU+cC+r2eziHVXOlKNTgzx3GJko2mCcfGd32CsIvApkoSqqpkNBnTGXRJ85QoifD6PvPzGY5nmwKBFsMty5KHhwfu7ozgRn+I53ns9/sGsLtcLvHcgLu7ex4fHxkOh1xeXFCWEiF0tVQpQ+RoOyZnZlOVin0ZQ6xVcHwvJE40QWGWZ5Slq7n9txsm0xPOzkakacpsNmM4HrHd7thGe6pKKz+lacx0NqPXDUmzFMfS+cTr68/Nd+B5Ho+Pj+R5wWAwNMn7zOD1wgYLt9lsifY6JE7TlLdv32LbmmnBdd1GyEV3FcT4vg/oUDMMQ52ULjRmLwgDhCVMJ0GPx8cHlssF5+dnTKdTLSqz3VJWBcPhkPnpKbvtnvv7BednF/i+z5nheLu6uuLu7g7P6HL2upoZ+NPHT9i2zfff/5VWg4oiNEo9o1IVTlnilAVKCfI8QclSsz9YShMx1rkgW5MfWApU9bS3uD0H2z+PjYZ500RETw3I8THa8/14/dYeGhwMSJMNUgp1vI4ETzw0YR1yZMdEkMcFi+e8swagjd36G8ZrNPfe8kxBavbI+q+2hbRBubYmbZQCKcWBcKIqTU5cPHlofA3NMFiPGqfVWH+lY1xhQLFCSg0yE+AIYUjrNC6+agWzz3lSGMoQpTlWENI4oUJg2aJpum274seD961cwnPGTCmlK64ttRth6LvrPJrnuHS7Pda396SZyQ8FulxfFZKq0iyjQRgy7U/JZcl6uyItEipVMuj28AOPMAj48vmGH374N4oix7L0EC+XazbrDYvFyngkLlUptQGNIoIg1Ohyy8FzfU6mM9brNZ5vkaQJtuORZ3pRaS9AUBQ6G9Lp+mRZwT7b47g2p6dz4niPbfie4mRPp+vjB57ORwndKVFJxX6/w/Nc3rx+xXQ6wZIFy/sbiqLg8+fPWJbFH/7wd6RpytXVlcF8CQbDAWmacX9/z4sXL5hOT8iynOsvN9oDdhyiKMJ13SaklFJqLxjdSXF+fo5Sqiky9Pt9Qt9nvVxSyZL7+3uD7etRmkJGLQ1Xe3cAlSzpdrv4vse62uK4Dp1el09XV8YTEvhBgB8GBEHAeDRmcjIlK3Jubm85mZ1QKUmcJOyiHd1ehzTVOcWiKLBKrSgkK6VVv6SgEoqafUJia74+28IxeWRLfO15tQtRvxZK1hTV7ejkeB3U49ne9HiIBoherx/NWaw4PqWqc2b16/r31rmecySeCz+fHJc2bs566mgqXTyUAhx1yKHpbgQLKRTbLMaT4CkH19asPIpKs9IeTqIZSVrb155ZK5l/nJjUr/XvUmk2DS3VrisnUum4VimohEDKSmsJGk1NywZZ6ieavh7zZVggsBG2ZqW1n/mi4JCM/Grw2gPaCjdbe+pEuaqI9juoSjpBCMJlH0dYa4FjW4ynQzwvIMsjtpuYOMqxHc1PVeQFRZnxsHhgcjLm5GzGZDbBC118w3rqOA7SUSbB7eH7AVJKvnz5wuJxSSfsNMBP23ZIs5TQEiYv4+A4Nnd3twSBDm+GoyGrzRqlFIVpoYrjGCEE4/GULMvJigLL0nQ9nhcQJxFJkuL7umWoKktydH9pvNc6np5jMx4N6Q8GdMKA/X7PZDzUmopScn7xgtVqzWAw0s3exmudTCYMhyOiaM96vcJxbAOKVdze3pFluVFx1/c9GAyYTqdahcr3cV2X+/t7bm9vGQwGuK6W9asqzZyhpGS/j1BKMegPWG9W5HnOycmUssw5Pz83OTrdtTCZTOj3e+yiXdM/GoQBUgnW6xVRtOfx8YE3b97y+9//jn/8x//eqGVVsmzEWRxXS5dNJlM6PZ0aqAqdgihlgShKhOVgOS6e7RhFJs2wrLPmetFo2KWldTXr56kxXJXBbNbVw9pg1BXEOnQSTQhqkvZCG0YlQCj9Wpi5rPn0teYlFljYhnvtCEfWtljttc7z7yMVyvo6P3e83p54ifXaE2b/egDqCoM65BGrSlHZmlNNKgz1NtRdlnr8FFQlpWzl2mQJSuE7br2sn2xfGTPXspGmf1JVT9syLMc2NB7oJ1JVohyTb5GCQsoD2ZoUWIah1Ta02VJVDQ2Ilm5HMw/UpHFoapM2R9FXg9Z6usHTkrVlkNfYICSG16vSTyXdrKkpvB2LTGa6hQYfO7PYpT492aHIC5K4wLYsLYqxy/FdLWZSSslqu8bvBaSfrxgMely+vqQsSjZLDRgdDiakac54fEISx+z3MXlWMByOuLu7w3FcptOJlrQzxn847NPt9hiPxux2GwaDDjc3d1xfXzOaTBiNhuyTGCFshOgAwtDqlDieR5bm2JaLwCGJMqocKiHJkhwQfPebS8qy4NOnD9zdfNaN3pMR0+mEwLX453/+F/71n/67zselOePBiDhJePnyBX/5l39BlqUkZUqvp3Nx+/3O0PXohbdYLNDOu2LseZyfnfLx40fNoqtdcGRZ0Bn0mYxGXLsOyIqrjx/p9rpMxiPyLGO73uM6Lq9+c8loOGj6MJM04cv1F8qioNfrsNtuCeYzwsBnu93y5fM1WZ6S5wUns1POzi5QEjzX4eL8lDRN+enHP5LnCUWREfgeV1cfKYsSz/eI44i/+qu/4uRkzM8//YQNzCYjPNvm6uoazwuZTIe4XsBqu6cf9tinGWWu4SSWI1BlRaEyKmFhodv3hMFnIrXRQRneMeMRSWPYaoyXafjUwFEUNJ6fpm7QJKMVvhcgjIiKRe0FopM9Zl3ooE00ECW9hsxaOVQDjOFqLTZhjIeUhs7ogC9Tx45CO++mI2/9Wh7Wo6YI0xGCIQXCsi0tBCMrKttF+YJSQanAVgW2UjiqQinNIo0ywjHS6HRUNVf3UyP7Nc6sRu+24vomXrc0U6tCN40jS1Sp8wW6ETrHauJqpVsr0KyRNWOTQmpeJktpQ2YZrnHjrVlHVv+4glOD+GpjdlwcOBg3fbZ6oEXtB1pa/9N3tVyWbTskecLtwxfSPEaWFbZwiZOELE20l+FAlGgesW6vh+d6SPR1xJHeJ00TPnz4wN/8T3/H+18+8OLFJauVDikvLl5oehwjCjyfz7m5uTXA0My8b/PwmBFHOyxRARbz+QmlguXqET/QVbovX24pi5I4yagqycV4Qq83oCgq1usNjuNT5BW7bUS0ixmNBgy6fcKOTyf0CHyfXbQj3m3ZLBeAYNjvcH/3wHq9o9cds90m2JZNtEtJ4owsT/F8n2z5yM8//4SUFUEQMp3OyNKC9XprsGemxzEMTTJesdlsAFgul7x8+ZL1esXtzY0GDochL15c0DNA28FggO95BL7L4+Mj0+mUIPB5+PGebqeDMlTgk8mEeB/zyy+/kOc5Yegzm01ZrzdUZcF6tWQwGCJlSZ6ndDqhIaZUrFZLzs7mvH79iiiKKAotIH1/d6erpvs9QlZcX3/BcXzO53PKErbLDbN5yPnslNUuxlYWjgBhWVRYWi2skvohbRagVIfiljpa/LJO59Rz1MxWpSS2aSNUSrPRIizN22eBUFbD4ScNU0cjBmUMXJOXEkITN4qaufiQo2u3+9XGyRQfD3kvnuacG4PW2JBDtwFoKkILsG0H2zJwrGYsjJVD4diWGRtDuy2sZk3LykLW92065pXSob1SWrvAtmqz9RTZ8JUxaxuw54F5qrnhpjigd0IYY3cwNhbSkroJvR61J6PHk8FpwtiWETv+/dcSkPW1m8up9z6aSDqsCDsa0KoqyW4Xs44jNus1/e6A0/mcMt4TxTFBJySvKqJox3gy1IrKQcB3797Q63b4+eef+dd/+4Fut0NRSv7bf/2/+fTpipcvXzEYDBpsVWzaleI4ZjAYGIbVUHcMKEm322WxWOC6Ho+PuivgxcUl/TDUXjKQpgmb9ZqOUUWP4wQh6r5XTX00m80ALUnXCTvMZnPDn55gCa1Q/vjwQJpmbLdbTk9PGY8mrBYbsjSjyNdUBUxPpkyn03pZEAS+IXl0qCphdClT8qxs5kdRlLiuFruoVdsnkwmOo0Vy7u7u+PLlSwNcnU6nzOdzDY1xXU5OTlivlqw3epwc19G0QllGVRb07B6u57M3xIul1J7VYDBAodhF1xRlhe8HDcatxrTpPJRoQt40TVgsHg2Y+Zztdst6vWbQ73N5fobv+2w3e3bbvTawYY9OJ2SfZsiWjoSwa89B6xXoPJTVzLWn1Xozh+ucc2s5HnK9mpG4LrjVc/m4ENY2NOZkZv+DMXuyPlvL7smaaTsr6Py15tbT2NLj7oLjTbTXmjls+1rbn/wq4mr9k63fDzqi0hhd/Z6S9WxsH+GwPWvM6hs+rrq0vZ922bZGoAshSOP4q2O0E/ntAXkukfjcgP257Tgx+vwNmS+rvp/ay2uKAtrbjPYxk1ISBCFJkJIXJUWlqUY8P2C1WvP5+gvCErz7zXe4XkAUJyglePnqJe9/eU+v1+OXX35pkrlVVTGfz+n3+/zbv/1bwxFWM6XWHsxyueS3794hhODjB83V9f1f/zV+EHBzc0+WZfi+z2g0MsDegijak6SZKQ7IxoORssLz9bE/ffpEmsV0u1pH8vz8nDTNDOreavi+BoMhWVZhCRiNBrx8+ZKyyjWerPIZDIbMZjMeHh64urri4eEe0MKymlVWNT2WsZkH0+m0WRCfP3/m5uaWJNG4uCzTBrXeiqIgTlIcM2ZXnz/zm9/8hr/4i7/g7u5Oy8e5HtPpCY5j8/D4oA1bFGnSxk4Xv1JkWUGW6Z7b6XRqGv6vsW27MZ7r9RrQCy/LMqSUWnNzMEAphe8HhKEiSwsQuj/W81xKBUlRIqpKRxl1jCZ1zhilELb7ZH4/mYZHD+bG4KiDj1Ovr+O10xQ8vhGdfGPaP/lZX8Nzy0Upra5RVzwP4enTaxfW8+eqz1RVugRYtyDWLVdCPD3ecXGhNmSa5t7QgFMrE9ShM1CVfFWC5RvGrJ2XapeP4dC3VX+mMg3k7VLuc1/icXn52STiN6ojzx3nubI1aLZSXW01eT5xiOkFLcOLmUyGvLHTCXEdn6LQyP/A9/GCkGgXaYbL0GcXxbx+8x39QY8sS3hcrowBTFgt15ydX9Dp9Hj37jdYlsVyuWQ+n/Pw8ECW6XzZ73//vUG+CzN2Et/XlNHz+RnL5Zr5bM5sNifLMj5//kycJET7lG6312hG3t7e43o+QujJkmZbsjwhyzNOplM2mzWzkxnvP2ij2ul0ePnykv0+otfrMZmcIM3jcLlaE4ZdfL/DbhuTpgWTyVizVgTaOCG0yG5Vlez3e05PT9luI9I0J0szhBD0+4OGtNF1XW5vb3n79i3z+RwpJa7rMhwOGjiHlJLr62smkwlBEPD4+Ei8j+l2OgwHI85Oz+mEXX0vsznj8YirK91ONZudEPg69O52uti2bRSxtNBwGIY4ju6kqPtChRANOBdoWr4A0xql5/DHj1cIocPZTqfLYrFmvY3o9gZaoT7XRKWyzI0+q2F+qSqqyuLXgN/H7z19rYwXZjcG7dhwCSGaRv1jQ9esg7bB4OuA6JtVyCPnpb3m2kb310xZ7YEpDt0CtcNT5+JqD612LJ5ctwLXsvCEwhWHqq4tNN0+iEZ85dgz+wpOf1yCbdOMKKUa8GNNmlf3TNYSX23l4+e2b325bc+sTct7PPC1lW/nytpb/eW3qzmWid/rfXXooatrdeXRcd2G3TROYtabLUmSsk8Tk0i1eVwsCTtdzs4viPYJ//qvP5AXJe/e/ZbT8xcoYfGXv/sdYdhhu93xww//DghOTmamcqYn6t3dHYvFUisTxUlzLS9evGDQH2DbNpeXl7x+/Zr9fs92qw2QfnhIU2XTfZt1w3o9/vtoT57n+L6HxksVdDod3r75jnfvfoNtO/zpTz/q8BcL3w95++Y7Xry4JAxDEzJorcuffv4TSRLz+vVrTk9PTRfDFt/3OTk54eRkwmjUJwh9ExKGnJ7OjbGcIITgy5cv5HneeKmj0YjT0zmvX7/WuqHGyCiluL+/RwhBt9tDSvANJc9ms8P3AtarDff3D4bUMWWz2ZImOVJqYz2ZTBvGjpqJ49OnT+z3e9OxsGvmW5qm3NzcEEURl5eX9PuawWS73TEcjfjurZbeK4sC33d5dXlBpxuy3qywHa0DWXNyKVmBrAzjbI1yV8/O4ece3s8ZlrpzpoaiPJu7+sa6fWLcjn/S0tR4Zl0+ZyC/+vet/4w3V6/P9vXUNqRstSYeX7O5gEavQ8PCBS4WtgBXGPJGx8J3BIGtf9bbs7TZtUVtd/vXm+/7WJbV0FM/h3VpD3zTRtS64G99uc+9bg/Gt3J49b/2tdSDqyeFpcG+rXtqmrMto1xOnXAUWLZDkqZkaQoCLMdF2DZht8vVly+cnM1x/YDH5Zqs+NFQclvs9wnuuUdVVliWTZKk/PTTzwwGAwOXCIy3q+Ertu0Y7nwH1y2QUhGEHR4eHrm708yuVakZGWQleXh4oBNq7Nt0MqHbC1lvdg3C3vc90jjl4eGe8XjI4vGRstXl8OnjJ4bDEXEcs1ysSRLdm/n69RvOzs5RCubzM9IkJctSPn36SJrG/O3f/jWe52LbDo7j0ut1jajJ1iD4e5SlxPNd5vMZcZwacsoxu92OOI7pdrv8/PPPXF1d47oaUzZoQrpDH2ecxKRZhmO7rDc74mTPcrkgivcUec70ZM7bt9+xjyPyoqAoC/KiQFg2F5eXfLn+wj7aN/Ohbupvg3Y1KabW3ayNbD03bNum1+uTFyVJvCPLU/q9AYPhiEJKFsvKdJIofN+BSpJXOgfm2DbCcjRrhvjKT/hqAT/zV1DSMLvU5AE0ha/ncsfHD3R1tIYaj0yp1jWJQz7uaJ9fM5LPXr/SHlHjKz2TTnrycVX7iYfjPT2mMt1FtZaBibDqfYXCtb0nUJV6+5oCyNA1f8u7qge1qT6Yf83njwxZ+4J//Yt83v39ysX9xhPq8OXWCdKDIatFhC3LIs81P3/pOYZfTU8aWwSgLFbbHeORTnxHUUS3G2I7No7r8vr1O5bLR9brDXGSopRgv0/pdDqErsdyueaf/umfefXyBZ7n8Yc//KG579PTUxaLBePxmPl83lD8FIWh7hG6vacyjKybzQbLspiezFlvNigh+O6776hKyWKxxPU8FosF0T4hTRNev36FbdssHhb4nt8sXiklYdjRLU0bTWZYlpLtZoEQFkJk3Nzc8fvff8+gP8+IZIwAACAASURBVOLkZMJ6o0Oz1XpFWRbc3NxwcjLl7XdvdGLeKGI/PDxwcnJi+OEqyjJnvVkhsI1Gpu62CIJAy8jFMXmecnb2mjdv3jTe3s3NDTc3NyiliOOUx8WSbqdLFEVYlqDb7ZOmmgZpNpuTZjl5XtLp6KT8bD7TD9ksZ73eEPg+l5eXjZF/eHhgvV4znU55fHxkt9uhlOLy8pI8zxvPrWbv2Gy2DPp9/TAx5KS6kgqeZ7Pf7LBtmzBwsEpFlWiVLNd2sV2POMv+rOf03CaMjUnTFN/3sG278cra+9SGt02/3TZGTx7qQgsV6/VTv98ytEeOAaqWrRNPjmteHK6/XdQ0R9R2xWDqzDW082a1GLdS3yZ41OepiwCmoGHYqlGqqbjW52rZsmdwZq5LmqbNoNWhpW3bDYNoOzFZh5rHTeDHHlpN8XLcKH78JT/Hjl7v17b4x537B9dWNgNSnxf0lyOERq57ntfck1ZaklhCYQsdgu6iLULAxYtzfN8DoKoKPn78yHw+4/bmljiO6XV1I/RwqD0MxxKsFg/c3NzS7+tQ6/7+nv1+z3Q6Jc9z3r9/T7fbJQiCJkle521WqxWhH+rStu2yXK6IYk0fNBwN9X1YmkjQDwLef/jAqzdvzD25KKVpeZRU7PcFjmMzGo24uHjBfH7C9fVnNps1AovRSOtXfrm+JcsqwqBn4CGPjMdDM24Sx7F59+4d3W6H5WIFSj/wwjDk5cuXeJ5LmmY4jo3vu+R5hm15VFWF53lcXl6y2WwacOx0OuVv//ZvePv2rTFeegyCICBJErT+q02aF0gEo9GETjfUtN62YDgeG4GXHN9xyMuKDx+vePv2DXGScnp6ysX5OZ6ntRU6nQ5ZpuEvuhFezwvb1oBfx3EaY9btdtlud+Rpzs8/v2c0GgKKMi/o9fqaYMDVEj7dbk8LZO9iBBKhpKb3Frbxgp6ug3Zo1V4X9XZIyms9DaVUE/0cG7Nvva47der1cPjcAZqhc25P1ZXqy9CfwRg0HS4LakX0w9q1DaOtLWwN6FWKSmr+w1IqbKvuCHrq9HB0vzXD7VMomB5fS+iuCmXaIi0hkMZDPaYCq7dnPbN20q6dB6tP3E6+t5OTysS77UE+/tKee93e/iPe26/tV5ambG4c1MoMlG08tclkYlhFXZTKdOJWatMvkVRVwePjgvn8lNFoQBTt6fU6BMGQn3/+yeRbEsbjEa9evSJNU75c3yBVxW/evqHf7wPKYJh0Ra3X6+G6LldXV/T7/UbkQxuaiyZJfXFxQbyLiYiYzbS3cf+4IAw7jEcjfvj3f0dWivv7B8aTMXmuYQKvX2sG1YeHR9PkLLi8vKTT8fnxpx/ZbNZ0OloMV7PQ2gbToyhLie/pPkvLCrEtm+l0wmLxyKtXL4miiD/+8Y+EYUi329Wycp7Py5cvdUXx4YFev0tZlqyWK2RlMZ0ODGWS07C53tzcsFqtGI/HjMdjQGPP1us1y+WSzUYn74Owg1KQ5TmdbpfBcMh4PCaKdiilxZGvrj7R7XU5PztDKt1fuotiVsslVVnhOC673Y7RaMR8Pm/mtVaKsuh2u2YsUrZbTao5GAyYz+coCff7B/zA5+z8nH20o8gzXNfGzgR5UdLrhXihB2mB41h0Ay2zl5WmWm5Zxus9CgFbBu04yjCf0N6HOBizOup5LkfcLs613y+LqgmfhRERNnt8ZUDr9/9jyX19Tttcj2sb3UqlKEtBaQyPbN3jkwhKHSq28HU+UR9fGzTL1vqjNcmjsMCSWkhY1Q+LpxHr86DZ47xUm9Oo7ZG1c1X1ZwLD8/6trW34jnNnv7ZfvbXP+WvJU8dQESlZNb18vu83IVJR5E2YabkWstSiIFWRYwlJmWesl48kScZ0MuTFxTkWksfFgmi74fbmmulkStgJybO0oXYuy4rJeESWZcT7hMFwyOnpGQIY9O94eHig2+sx6Pcoy5Jot8f3Anwv0Pxqe4307/UGnJycICyH0WgEQguvhGFXQyZkhet5DEdDbMcmDAOurhKWD0uGwyG+71FWpWk3qri+vm5C281mx3a7A6WPqduChozHQ/qDEIVkNj+hkiX/9f/6b3z8+JHz8wv+7u/+wLt37/A8h7v7W4IgYDIZc3d/y93dDWVZMZ3MCQKfTqfXTP7xeExdhev1eo2oSWbCsVqdKcsysrwkTUv8IMS2c+J9wunpGUGgc3/b7V4LxmJR66+GQUie5QRhhzTZc3NzS5YlWJbF7e1tU319+fIlaZpyf39PFEWEYUhRFEgpm7kRBCHD4YhbU6RZrR7pdQJ8f0gYuOzjHYHvolSl5woS33ORAsqqMAzMdhO+/bmH8zdmOUodilm1Y9EuBBwqhDx1PmRFVuTNkXTuuO0lPbUAtSf23HU2a7N1LNSB8cKuvTWpNPCVgydYX2N9HE3Lpe+rDqfrauZTA6t7BaTQRTeFwbrpIW0iKIXJo7UCva8FTUxoVhuLqqrIsgzHyM3VT4njZGSTuzLG4jm3+skAHVttDpWUY3e0fcPH+7VD3vqLtW1NN6TfEHiuS7ergaY1Q6tj6WvTRHqKvCyJIq0ufnF+RhKnLBePzGdzqjLnwy8/a6yRbTMaDSmLnCLPwLjYFoLddsdysWAyHqOkYrlckSQpspJ0Oh06nS5VdYdjO/h+gOtKk1TXoc5isURViuFgSFmUZGnGeDLBsizujACIEDYX5xfs9hH39/ecns55XCwARX/QwxICx9Y9p6vlEs/XSe+Hh3uSJNaN81VFluWUhWYHqUrJbqsXt+PC4+KGFy/OOTmZ8rvf/SWfP1+TZTnL5Qrb/sAu2hIEHsPhkCTZ60b1N6/YbiMs4bBardhsdozHY7bbrenvtIyxCJrURKejRUWWy2WDt9tut/h+yUg45NmGfn9AEqdYwiYMO+T5mtev35iChI1SOUmekySxZsndbdnvtkynY3q9Hsvlkv1+33jAu92O9XrNarVisVgAGKjHDMuymZ+e0un0uLu/Z7fbMh6P6YY+vV6HzWZFWeR0fJ+8qpCyACURQoKskFWhW+gsH2U9z5hRz9Ff3wy84ShkBL7KodWORf0ZS1oNE8lza7s2lL8WLf2aYQM0vEII0zZo1nk7lyWeYsgaYyYPHQptZ+m4gFFUkrJyKKzGLBrFdZBYaM5EDVD+Vc/s+Ea0F1M86wU9d9PfYgQ4tsC/Nli/9rdveXPtn2WpuaaU0q0T9SLqdru6L05KI5mmGmOmJdEiTk9njEdDkiiiqgqGgx4CxXq95OLFC6LtBoFiPjthenJCVZbcPzwQ+C6vX74kjWOUVOx2keH7slitNkSRlpx78+ateRjoZvFer2sqfhqi4QiH4XDMbrelKEq6YZ+iKImiWAuh5BmL5SMnsxmPjw9kWYrnOSwWmkLodHbK1afPlGVJEAZst5sGEpFlGUmSkmU5nbBDZhXYtsd+nyBljOe5VNJlMu2z2awZjyd89+47bNvh/fuP/I//8c+cnEz5/vvf0e/38DyH0WjIl5vP+L5mrE2SBFnmRFFsKoM98lxTXAshGhqkGt4jpWzC/jRNTQjjEEUxnW6H0WRs0PWS8WhEp9tlMplg25buGU1jov2e5XrDzZcvuLag19VV45ubm6a7YLfb8enTJ5RS9E1yv4bpPDw8EMcxP/74J4RwKAuJJSzG4xEvLs4o8kQLLMuSfq9Dd9ClKBVKCWynJC8VaZYbwKcmd1Tqzxmsb2xCmDwUT6KP43ler6m2sawf5J1O5wCXkk+Noj7m8bp6anDrdy3zot1RgInQLCGQpRbj1d3iWrC6drmOHZjnjFl9D+1NKSgk5JUEWzUFWMvElRUYlbdadPlXqpm15W8sfYuKpO3mtm/+OSt8/Lf68/9f3O7/SNh5vNUudaUqTQZpNDNrfJxQWlE9LwqKPG+S/o7r0u/16YYhq+UDVZUznYywLHBswfnZnPFwwKvLC95/+MhquWIf7RAIup0OeZaxWCxQSmk2iyzXua7xRGuOllqp3HU9fN8niiK+fPmC67omp+ZRVal+MhUFk8kU13UpDFzg4uIFSim645CHhwekrDg9nbNarwznfsbd3R1n83MNERCCwWBgyAx1H2QcJwaBn+B7IWWpFXK22y3j8ZTReMxw5PGb375is17z8PCIEJqdoiiqpnNhNpsznoz4+PEX03pVEccZldRkgP1BH90Ur0PM2pgBjZpOlmXNuASBpvhWgOf7gKAoS4IgJM8Lcq/Q1UrbptvrURQlZalYbzYslwseHx804DaJdb+kqrAtge/7jeHSvG1ayXw+nz+pbM9mswYy4zgO3U7I5YtLPNdhF62JtlvuH25IEg0xGfT7lBKE5eK4GdE+Zx+lpi/RpsJCPZPj+o/OZ72P+GrB12urfu+4sAAHyFEdVdW4L30A/aMN86jDzKcXcPis0tas+UOd+5JVpXURhECYSiMm51aHj/U11aGyVRtlcTCMT9AK6J7VSkGFoOTAV1Y/GqrDGXTV89c8s/ZAtZVfjg1bG0wLB1f42DM7drO/VRT4te3PJf2fuvA22gPWaO9ORy/kupiRpwlKSs00W1UISyeHO0EXezhkt1kS7zeMhmNOz84AKIoMVVpcX33k9OyMLIkJA5/7+zvCIOTNm9dae/H6s+7nNKFUt9ttME5KqQZ97nke2+0WpVQzxjV4FCDLcs5OzyiriqyIkEoxmU5ZLpfkedGEZt1ul916iWVrL+Knn37Bd31evnzF1lBaTyZjTk5O+Pz5s1ET1zCJJNYGd5fvyfOSk5NTfM9nNpsRRTuGwwGe52sIRNhltVrT6XTo9wf86U8/0ut3ODubMT+dcXv3mTTNyIuCZF8S+NqA1A8/19XtPUEQNKK8SZIghGhCoslkwsPjgjjOyfKMMOyglOLz52suX7xgOByS5wVxrHsqPU+TT15dXbHfR40wDarU2EAzH/M8Z7PZIKXk1atXlGXJdrtlMBgYuu0IgJubGwB8r4Nt+7x69YptmbPZPBIGri6sCIVlC4oyo5I66HEcLUxs25YW57EshCG///+bM3suRdNea3qeP9XJaP9ucdDNsG0b4WjVqLI8aNc+XTfPr1H95uFn08CuP0wljYxcqxqq0zuqMTJPCwDykC8TXxtrlPZ2K6UZNCyFJnFFYVOnwCS20yJ9bMWZX+fMkEhZIpXOA7U9LSE0VsScl1rOqrkRy0KW5fEhn92++SWr50PSb3l6z72nlFZVdh1HQzAsi6ooicuSbL/H9Rxc09lvWzaubWmQoi1wPYfL6UtsYREnEZ2g03B0TadTPn74wPv3P3Nxfk6v3+X1y9f8/vvf68qirPCC8CtvpJ5sWs4uahDq4/G4qfhp4wau7eLaWvj2cbHEci2WqxVZnmtPrShwPZ/AD3hYLsjygm5XG5nxeMx+v2/yjtqIHLRCHdeliPZIpUiyFMvWidRKSWzXJUkznVTPCpyhx8uXc66vvwBQVSVxHBlVpJj5fIZtQxC4vHr1mn/9139hu4lw7JD1Zqs7CoKAOE5wHJvZbMZ4PDaGvs4JScIwMA3gPbrdkH2cYtmaaiov8kb9XZYVXuiCUgz6fUCyLAoWj4/s4z2D4YAXLy4QqsT3bGYnUzQZZMpmszY9mQUfPnxgOBzgug6r1Zqq0gwgYdjFQjCaTNltI0ASRTu6nS5h4NIJe2R5wvRkxnK1RipBXiokHrbj4PoeXl5SCgeVVSYgEo0kXS0UIOuE9pOuSW0Ya5eoLdBbLzbdLiWbHFEtpCLrEM54OwpFVeTNnLNtG7tRWjpUE5VqKairxtepo8RnF6as/1Y7Na3w1zLMHpo63IDkD4tSO391Olw0hzS/t6qtQCYlqbQ04we6NVEorehUSollOnxqvrd6+8qYlUWKZSlUVVBUBZYA17ZRNc11qXEw3U4Hy4I4jjUbJ+iw7Si5WQ9qG6fy56qR39p+LWd3yAvUmoH6tSNsXGxUJanygn7YIcsSqrKi3+9pYKcj0MrLiiD0mUwnmjFXaQ+uPxpgWxa5GZNXb1/zcH+v8wdIHpYPLJYLsjJnHyVMpidstjrR3Ov12O/3nExPWG80Yv5xseTi4oI0y7mczTXItazwPJ/RQIdEX+5usGybTqfLdhfx/v0HXr56SVVVhE6HUuo0hW15COExGs84iTLWiyVJkjCdTon2EZuN1hwoq4qw1yXOUvbRnqwqyasSZVsIz6USUKDY7RPSZIOsBLatiRk9z2F6MiLseERRxNjuMxrrPkzLEqRJSZ4BysN1u1i2j7B8ihIWyzWWEYo+mc/xA79R2smzmDzbG/BwjG1JXNdGGqm/xABvhcF6Cc/DRbDPC/IiRciKLNnTCXwuzk85Pz/F9x1evbzk4eGO/W6HVVkUVU5epMzmE77//i/x/YCb21u2mw2zk1PKsmKz3NEJQzzH5+HhPTdfrnn16pIXL17zxz/+gFKwjzOur+/JixLPD7E9TfBZFBlZURGlKXmVYjmBsV2WYR40WC8sLGE8FGolM+2NgBH6ELIxc5bSgNeaALU2PrYwFDpKE3cLpel3UJKqgpof0LKENgIG4ynQQj1KliihxVqqGsRqmpgFYNmHsFQ1a8sYYgWFlMYQHaQipahbmkyzvTJ90QITLLYLhvpnJQ4GVGfBLITlUArJXgqKQiIt1fCkCSWosFFFacal7tfU21fGTFYFta6dlKrpaZSV5kZCged6jEdjPM9luVyw3++RqmzAbE+OJ79G+z5nwBovq/Xe12XbP19YsCzb0CYpqPRTxzKPBKkqQs9DlTm+53E6m+H5HqvVkjRP6XYDXrx4QbfXwXNdKin54YcfyE1/Y17kTGczlqslf/8P/0BRFlxdXRFnMb1hj4Flk/Rz9vuY29s7k+jXd7WLIpQCx3EZjzvEccJ83ufqSpMlPjw8cHp6ys+//Mx4PKbT63J1dcX9wwPDoWaFXa5WeH6AsF22j8umTzPapzw+rHAcX4NMww5CaJqePNeA1k63i+25CKEV5u3NjjLNdVXKslAWuIGP6+uc4VpssWyLXq9LGAacnZ3y6tVLfvjhB4SgycNFuz2r5Y6yACECbDugqjQwsz8ZkWcpd/cPOjQ0IaXneziORVkUlJUGmnY6PoNBlygtSHa6TcnOTYuRpQkPKTXq3bdtJsMZvTDgxx+HYNn8L//zf2IwGurqZRyTZjkn8xmWBdPpGFDE8b45/2DQpxv2GPRGbFZbPMdnt425v3vg4uJc48c6OiQfjSbE8Z7HxXv6hcS2NZ+dQ0VeVWz2MWlZYHsenrIoK4OI5+mc1iyxAK2EvjYXzU/q/yvDAKinsTYstftkH9hplXlfCNGIbdfGUyoNXaEW9QVsy0JaNtLSD+86ZBRKNOc+Yupp7qA+R9UKKxVtp8IwxJp7r1sKqUG76qiYx+E9XTDRjNAFFpWUFOb+bfSc1cZdIXCb9inZMpJfGzOp2S0PFQmNU6l/L0vtPmvci969He49V3lsJ/5rbEz9+ePP2S0b/lwF59fyZ0q1vnElG/QwSrv2jmFW6HZDut0OWZpSlJrqudvvYDuCm5sbpidTyqLEdmy6HU0L/fHjJzzPpdftYwmbwXDIarni8sUl+zjm/v6B4WDEarXCcwMuLrSgxnq95vT0lE6nw3w+N3kvjWkTQkuuRVFEp9Nht9uxXDxSVSWD4cB0IgiKMidJU6SU7JNEC/gqTSypUOzjPX/6058IgwDfdSmyDMdzWC4X3N/f8+rVS6pKJ9qHwyHbaIdl2ea70Bz5iMN3o71omzjWVbyiyEjTRGPXyoKqKpum/LVRlnJdj7zIcD2POM5YrlZcvLggDHwWi0fyPGexWJC/fsVkPCTLdL6p2+vg+y69bkcLrKSSTbJGVJrc0zUsv7ZtU1YVnTDAsQWLh0csx+K7t2+xHP295kXB3e0tQghOz+a8ev0WgWIfbaiqku1mw26300SSCqPdqXUJAt8nDDu6GONZdHohVVWSpCmj0cgwrEBVSt0Z4DhILAoj6hJFeyoEjhM0eZxD8HSIqFq59HpFP/3A8d+f2Q7HEE8S4HWeDo5R9YfYtA3laEOsnuatnr73VW6r/qgxnO3q5KEQoJpbeg6C8WyaSRsarQwvBFLZ2rnVrJRUylCsCoESRtlAHCzvM8bseBCOBgxdiVqv11iWxXq9Js9zPN/5qsLSPmb7Jtr/9P0fgLCW4Nlj1NuvhaVfFQTMsSzbxrUsrW7uVKYsr6mnXV+zVXiBy3qzJAw7DPoDQ3Ht8O7db5BSUhT/L23v+SRJll35/d5zLUKnrCzVjRlMYxeKi13akmb8w/ltaYs1A0jjADsCMz0tqrt0pQrlWrzHD8/dMzIysroXWD6zrKiMDOHqXr/i3HNqptMpb9684dmzZ5SF6V56noclDfbr48fL7ndjJMvlkqZp+OKLL3Bdl0+fPgEMoz2e53F+fk6SJNzc3PDp0yfCMKAsc25uq04f02W5TA1tjZCEcURd+xwdnfDx40equibNDNe9JSWL6Zxf/tmfYVkWZ2dn9DUz3w8IwoCyKOnJ621LIqXBgDV1TVFk5JnpLJq6XkBVl/Ro9n7Gcrs1nPtxx7p7cXGB74dcXt2AsGjbhu12Q5alQyrvug5Vh7rvGyK7uoee5zEZTxiP1kTbClvWeK5hNJGWKR0UVUWRZybtbBre/fiWvMz5y7/+K2zXoa0bHMcjikIc2+P1j+8YjyPapiSOQl6+/JJ3796htSbZbNluU7Lkhu16y2J+jO+HlGXB1c0K2zETGoEfEPiB2Ubfp6prQ6LpB3hBaIg+fZ+8qGlaw+Hfto8Wnv6nrsfwYPtNg4HogbsOq5QPRwvvgofD33eozLPbfLjbnMczqM81RETnzIS0kcJMiJoc0+rSb/P3dgD+9j9m/STObH+5rtuhyA0dcp7nQ4F5V/1ot/uy+9xjXn84WDuO7FBEts/93/9/35HJrrhvWaYR4DkOEkXo+bSt6WTOZjP80KDR02VCWRUsZvMBxDmZTHj79i2e5w16j+PxmI8fP/LmzRtevHgxkAv+6le/Io5HgMX11Q2r1XpIu7/77jvm8zk3NzfEcTzgq3qKmp6R1jCiOkhhQLyeZ+Ye4zikaSuyrMD3PfwgYD6fcnV9iWhMjWS7NTOFbVUzGY/5In4xQBKCIGC7XVOUOVVlmhKWNE0PYbT9aOuaMi9Yb1aIDgyKUIShz3w+pSwDbm9vB/HcHptnWDMCpLRJs4L3Hz4ZObqOw9/cnMzwNALW6zWz2Zg4CtHKgGQvL/OhdhJHMZNxzYYEoTVNU5tItmkQQFWUWLIjcswyjk9PODo6BgFVXXF0dIq0LGazKWmWMIpCri4/kucVz58dIYXFarXi7et3LG9NFN1Dd1zPxhEOeZlxc3vDs2fPCcKQ5Y2hF5/N5rx69QNBqJBVRaA0theabVctRVGBbLFs/7M29G9dn2t87T6/mxWZmtldgNK/7lBQIZD3bHC/tt1H8PcdmRic4E/58UNd3iEQkmJIn4UpIRon2ZXpUD0F904npFsHx5l25zDNY5/TmlZvnyb1fEu9FmE/TL67g/1n7E7475+Afaf1uRrbfg3uofPtU0o5cEHZtt0xY5j3GyOUHXOpAVRWdUkYBeR5znJ5w3g8Rko5OKCLiwscx+mIBUOqqhrGcPqa1/HxMR8/XpF2CkqLxWJghXAch/F4TNjxj52dnVFVFUVRDIPQJ6fHqLZCqRbbkpyfn7Farcy0gGsTBCGN1liWTRgF/M3f/DW3yyVpmvLmjYFeqKo1qa7rorTGcezhZlNVBrgaRSGjOIIko65bBApLmlER27JBmPG09WqF6x4BYoBV9HOVWpsh8bKoaRoDQl6v16zXK0ajiRna3m6wLEEQmMjm6srgwS6enFM5DWVZUJUVdVV3DsUMf0/HRoylyHPapkEIA+vwHIfKdUmSDU3bcnFxwfnTC2zbYbVemaH1TcLR8SlV2TCfLRiPYxzbZr1e8eH9J5RuaVuN5wcotaSua47mR3zxxQuyouCqoy4/OTmlqVs+vP+IFALX9fA8nzCKaZUmTzOKqsULayNeIgS+5yFsl6p+cKn/T12H6tD9Oekf9yEbSt1x+it13yHu2qzpiO7b/33w7u5EwoPITNNF8g8Dkd1AY9+BDqvT1dXa6AeInQisJwS6myC9fwwOsmbcKbKYTodxZvfnMe9ybvOBbdsOtDP9Qdo/WP2O7B/w3Z3cLY4eOhC7fzvo3ekm+m3TkpayPziG1tixHUajMUIYgr6yNgyjQRgAwojJnhyzXm+68R0bISTbbcLz5887JL/GcVxAcHHxlPF4zNXVFXX9gR9/fEOR14zH42Hy4OjoyChBKTUYds9+GkVmSNt1zZhRU1dYlgGeIgR1XeG6Ln7o4/sBZd101D4Nk8mUm9trRuOYly+fY1k2H999JE/zblRHEcUR4/GYkT1CoweSyLIokdyy2aYmDbctcwkrhdvh8/Ii7WpMJWD41PoRICPg6yOFjVIQx2OCwGc8Hpl0tiwotwWjOOLli+c8f/6My6tPbDab7li0VGWO7xsacHODaHC3Ja5rdARk58TGkwnjybgrLmt8FeK4bgfrsPj40ZQEomhEkmVUZUVV1WbY3XKYTGa0TUuaJLiei1YwikccHR3huS6hH7JNNnz89IkkywnjEUHgk2Y5WZYyn82YTaaGp61qOlX7HIWxE1U3XepssVMS/v9l7Tqp/TTx0MTA3fv6LqexZaV018G/X/bpJwQ+V5vut+OhQwMTTHT8gAe2W2s98LQd/mxpusBSI7RESA26I87W4g5GKxRG4eUzNbNdoKwZTO1bqXfgWfMa4437+gfC/myta3eDd9PQB87pEVK33YPy8ADcPVqWYSywLQtrAPKaGovQiriDZgihGY2mzBYzHMehrIuuSH3NkydPiCLDp5UkKZvNluXSREibjSEb3G63fPXVOV999RVv3rxhPl9QFCWO7SFDuxMcyWia7cqXuwAAIABJREFUhpOTE16/fj0MvINhjPA8jyiKBi4tz3OYTs9pmorJbEZZlkwmBgJRVsZAXcdhNp93bBRGZ9IPAqazCWVZMxlPkJhJg7woEFIYvYHQJ45jIEGpgMViTtu0XUNHYgtBU5ZkicCZGlZbfxjOt9huN1xdXbHdblFKcX19jeO4HB2d4tguQRAwmSg224SmqdlsN4BRBg/DgJcvX/Lu/Tta1ZIkCdPppEu3IU1zQGBJm7ZpKNKMqiiQQhDHMdPpFMdzWd4aR3p6dsZmszZ6m+MRQRRiOy5CCoI4Zjye4Lg2220CypAm2rZLEIQo1dI0ijwryPMCARSZIa9sleLs/ALHD8mLkropkNImz0uSJGW92ZAkCVmeo5UgiAL8KCLJctKsoK7b7vr9V44y/cy1e2PftblDAcKdbdwNr0M/1vSQ7FQIQwS6/x27Nra7Hfef24n2uB957dr7g9r27hIGxgO7DZROWU2beUxpWcbhacEuCebBNLOqqgGRbjBbYMk70YU+gtplou2pSvYpgg6diEOOrF+HCB13D+xAX6PUwCTQR5I9p77jWGhNB1y1CV3PjDAVGZ5tdwpCaqBzjuMYp7L5lH5is0mwrEvG4zHbbWI6hrohimKSxHStFosjisIQAYKgLGscx2Y8nvDVVyOWy+VALdOL9jrdsHuvatTvZ57ngIlAJtMpQjRDShBFEdPplO12S92YY73eGGqh6+trVqvVIO0WRRFZeg0wkCKGkc9iMTc001lGVRY4rkPgeVjzGU1VkSYJVdVQlRnWZISQpkbhOA5+4BIEHp7n4jg2s9mM09PToelj1JWO0Uowm03x/IDb5YrN1qikG4bdnE+fPjKdTvnlL3/J9eUly+WS46Mj/Dgmz82NoW1bw/e/WnFzfYVEc3JywosXzwyfmWoRtiF6FJ7Ntixom5bJ8TG21uRFgR/4xGODG6zrEq1bClvg2DZKtRwdHQ3zmEWes1qtzfxsd10tFnNG4zGfrpdstgllkVNXJWHgU9UVSZIihSQMIoqiIs8yFAYM3TYNlrQ5ZJ9AJ+RrTNzoxZpMYTfd69OvQxCn3fVoikafRt4PCHqbNHWou5rZbl37Lsh43GntbuuhrMrYK90Ylbo3PbT72JejDu2XNliPToqvP1g9jMMU/oUwKWffhe/XwXhvoB7hITtG7333GWbvDtjDAv+uU3qMTnf3wB06ALsHbd/L7x6Y3gkLAbpp8FwbIQWu7YBqh7m12WxKmqZDhLTeGOVw3/cBA02Q0uLo6Ji6romiiDwvuLq6Rik91NIcxwBJl8slz549YzKeDWysnucNA+695FpfZ2tbsy09SWT/A4YZ1unIBbfbLdfX14RhxHyxwHY9bm4ME0TTtJycnnJ6eorv+7SNoq01t9eG3cH1jlitVgSBZ4bI2wZRm5pb4PuMRzFxGJCqjLoqadsarZ1uhtQZjnOe59ze3gwjWr0S12Qy4cn5E6S0KQoDvWiamjiOmM6mzOdz3r9/z9dff82TiydmX22LzWZD1o0fua5h34ijiDCIuL5echOuaFrN0WLO2dkpYRxT1hXKgLdQluTJyxddqcPi48cPpGnGiXsMQvP+/RtWq1tcx2U+nzOfThFCkmcZYRgaphLHQQhh+N0E+L5Hq1qWqxXvP3wysJS2oi4LqsjQNVVVPWQrCGibhqaqaOsWrTTCgqauEHbQVa13rv8+JRSHG1ufrwP/vPVYxKO17sxW3DkMHjYNhjTQ8R5Ee7vNvMeK90ZrQ9NWtQEHc58hZL82d6gR0BX2TGTXdYV0V4vrBULbDsNmTsO/oZt5KKoyz+l79bBD3vtfsz73fq3vGGz77bqL0hqUaii7ArPvuPi+181KlsOAd6tbg5K/uSYIA05PT8myjJubG7IsGyTiqqpiuVwOw8mGUrnl17/+NdfX10ynBmO2XK5xHZfz83O22+2AwPc8b5A96424n93sI1wTMRSdHQia2giUGPYNgwnrCRVd18P3I5TWnbBHaQrXp6dY0iFNtswXc66vrmiaijiOaNvadEN9geP7xFHIZDSix2vWVUngLxiPRoxGI5qmHGqnSZJSFAVZljGbzTg7O++G541U28ePn6ib1qT50kTJRVHgOjbr1ZJPnz51s5MwGo+ouqaHgQ40RrCkXBP4AS+ePaNuWs7Ozjg+WuAEPlXbMJpNzYUsTT10mxi2EW80wh+P0RLef3rHp7c/kGcJIPj46S2BGxAGEVmad0IsR9zc3BinmmU4ljRizGkClk9WKsq6QbeKoqhRzYaiyCmLAsuSeH6AYzuGKEIpqrrqaHEUTatxHPHAmfX/33cgu8Y+DGT/BEXQTwUEjzlGcQ+kehhdcL9+zb3ndtEK/Wvu19zocB33HeGhJsMhxztsSxdxmfJV95k7QZjq7yY/Bc04tHY9+edfd9/L779nv6Z26HWf3ckDJ6nH0fTOLPCNuG/b1Kb+UuSkWYpjWTiWQHtwdHTEer1mMpng2i7L5RLf97m5uWExnw8DyFVVsdlsCMMQKSWj0Wg4KScnJ0wmE7Is48mTJ3z11Vd89913FHmJZdkURcF2ux0oaIzwrAFg+r6hqOm7qb0TzrL07vRoMehEti1YtkVZVDRNSxCETKczPM9ntVpzdX1FXTWEYYRju5x3QNwg9Igio8ReViVRFJi5RNvGcz3aqmE8js3satMS+D6jOB7omm3HYbtdM5mMOT83dSqtDYXOl1/+meEFu12zXm8JwwA/iKjbhqIsAG04xppmgPKEYYglRCddZ+iCBuUmIVGtoWQ6ms1ASk6OjwmCgEo3KDReFIBl0QpBEIUUQnFzdcn7Dx8MyFlqmnSFbStOTmeoVvH+3Qe+//ZbxqMJZV51IGBzXpfLJVma4jkmWs+LglY62N7I0D8LqOoGrYyWaK8P2tOWrzcJ621KkeU0bYsrDc2zELvDO3dGZMz8jjlCcEfe8HMK5L0t/JyAY9++DtWzd6Ou3c/cpbjfr3HvihPt8qj1KSr6fgS3H4HtpsH76SsMASRguqPmNfvb/DOd2X73cf8A3b1mDz2801XY9b6HDuBjKeTwed1r98G2++/vv2e3dterZQvV3qlFKwNacRyHo8WCyWTEn/50NagDrddrnjw9x3Eclss1aI3reMSxSUOKvDTDxLbNdDIz8I3bFefnF/zqz79im2x58+YdWWoodk5OToYB8iRJyLKMJEkGTvq+EeB0qY7WJqr13ADbgqLISJMMjRGjtW0zWrXZpNiui+v6BH5IFEe4XkAQhl09L2GTJcymc2azGVJqTk5PaeqStq04Pl4wjmPqpkZogWoaFvMZ88kU1Socz8Af1psNjuvgeQYga4bAI9q2HWp/UkrCMOTrr7+lyEtevvySsqrxPQ8/8PB8z0Ar2prpdDIYQtO2bDZmgFu1iroyNT8pLI5PF0zCEWEQUpQlfhwzGkXkTY2qKsq2pqxKbjdrPl5d8frtG96+e88m2TIZT/izL17wi7/4JSeRRWAbtaw4DojDiDCISbYZ8/kMz/OGayXLcvC9LqI0o3tIyxhlZ0x1rXBthdcN+GutTRc0CKnqlqppsJsWaTmUjXHOPSp+1xbUznUuEIOYbu/QdqEQn1ufc2a7kdMDx9fjwMT9Gen9aLAnYd23s12ndihyM+ms6lK/zwcj+/7hnn8Zjr0p/mu9s/GDw/wfcGb7B+jucf/vnWNiJzTcO6iP7cju84/dNfrX7O/8/vv67a6qyjgLS+J6Dq7nYTsOTgeTCMNwGAB3HIe8NNHBhw8fODs7YxSNTUG3VcyieKgfGlrnasCZbTZbvv/ue2SHjn/31ihmG2UdQ8E8HptOZBiGg5Raz+MFBoAcBIFRd+oe67pEStuMiglwHJfAjxCWxLbNTJrZJk2W5li2zcnJGScncHtzy/XVLY7j8OzZM2azMZvVkuvrS8OC2kVcruMgEdgyJgpDPMcz1DDCQgmB5TometTG0ZRlyXJ5Q9M0RnS349DPOxyY67qm8KtaojiiaWvC0GhwNk1NHBmgcFEUrG5XHTGmwS+2rYECtK3CkpaRoAtHpHlGIwwwN2tqsrpilWdcr5f84dvv+Jc/fc16u0ULsBwbGXi0lmB2Mudi7hN7NmVR4zo2lpDUlWY+n3NyfI7nGiX5tmnIsxzPMTTYddOC49NIn7yoaOuSIs8o8wzVNjReg++bCQOtjHOypMR3PVzXKJnXqgMl7NvQznXdOwO9V2zfrQc/tn5uVPbwD/c/43Ofs/+33fLRw9TyoT+Q8q6Stb89+7Z80KHtQ0MOBDsPXdnPTDP3N+q+Q+mcSgfRPfS6/rW7jYK79993gHLnvYec4SEvv/v5jtMRMtoWnm2cGN0FooGbmxuWyxsuLp4Y5H/nnJJ0Oygm1XXdaRfarNcmtWqaZtBclFJydnaG7/v87ne/Mwy2loXrupyeniKEmfEsimIAy/aK3VrrIe3cFSHuNQosKbEtp8OZmVO33aRkmWGqlZaN6hxBnhdUdcVkOuXoaIHnhcxmgqrKh9S1VUY7MgxGZNnWAIptC9d2DG0LAt8PDL0MFpVSxBPD9bXZrgZm2J7v69mzZ4RhyM3NLUmS8MWXXzCdzFFK4+cFGsXt0jQCbNsIhYzi0UBImW5TwsCk7ZZrAM1xPCIMAnND1FAWObaUVHXJ8vaWtK1pHZtWtdyu17x+/5ZNliA9o2faKMU6z/hwc8Xl7Ygjf8Lx7IzpdEyR53z/7Xe8//CBv/jVX7JYzPB9c77iKDKHWGlWqyXbJKERNqusxbIUum2RHc6wvxQdx2U8nlDXLdvNlizPaVqF5Tq4toPnaUot0Xu6mfsuRml90MHsZhqHVu9QPrf2i+7D/wfrOpx9PRbE9K99LP3dtVMppZGp0w/1Pna/cz9qHHwAusuk9DBwr3v6ju6AyUcc6WfJGe+fgf2C4c7z3Emu9+//qfrawTvDkDA/npo+Vj+7y+EFtpTIDi+nNdQdnkq1muVyTRBElGXN5fUn6rri/PyMp8+eEoURX7/+IwLRdRsl2+0GKQVBYAbF+y5kH4VdXV3RKkUYBEjLgGuzPOfTx0sur666YfM1s9kU3Tt7DWjd1fQKg3IHbCkZx2MaZRSGqqqmqgqub6758OkD6/USy3ZMt0dI6laRFxXSdvD8kKZpOT055Xgx58379xR1iUSzOD7BsQSjcTxIonmO222DCdkty8F1fahrmtJgsKrSAHht2+bJkyeD1OCPP/7I1dU1x8fHPHnyBJC8e/uBsqpo2tpMDngei8Uc2zL7EQYhi9kC1ejuc20EgjD0ODk5w5ISt7sheJ5H0zbUeUqlFbZnE8QjmjwnL2s2SYG2PKTjULUtWBY4Lts8Z7lZUyxcNpsVo1gTRRGz+ZxXr94NUnNRFJNlOWFopliausZKt/iBj3B8akqU1tRC4To2pRA0bU1eaMqqQmvI8ow0TU1Ea1mgNG1r6muOZd9FYtB140RHc2NSMY28u8xlT5/T5VYdQ+yhpREHWC3uLynkUJMbHultC+AuhTxkh7vIhENRWG+Du6nx8FlC49gWSt0FL/1x2GVx67dm9/ehqtin6Rinv/uoBUYBruux7N4zDqSZ/Vfre6Gy7NuiQ33qbhMQd3cSAaazs+fV6XNuZYat1CAE2p1AbYbM2XNWh9LW3Vx/txGglDKki0KglaKoi4580aXRUDWaIIiwHZub2zV1pbBsl7bVOLbH7fUt84kRws3SjOVqxfF8ytHxcSecu0IpM8ycdarfnu+iULi+R6uhahRpkZNXLdl6i1YKz7k2uCrPo64rVNMyn5l0LfQDw9V/u+JaWozHU1oN0jWMGEm+4f2ntxTlltHYQloKP/Dx/JBaC1ZpxeUq5e31R9abhCdlxpMqI0sSFh9njMOQ49mco9mEk8WUOPbwXIcP795RVzXHR2c4bkCyzSmzGqSRU1PKDMK3qsG2JUGwYL1e8fr1j1xdX2FbNnVd8+OPPwCCy8tryqJkMpnSNopPN58QSnB+/gQpFFVREwcxL54+5+Z2SVM3eJ6HagVlWXF8fIJj2VRaI2hZbleUdUk8meKPR8ggIq01L19+xcWf/RX/92/+O3/49ht8x0KLlrouyOuG2+Wa4mxCnhZYeHhuyMnJE8ajt6RpRVUpbNvDdQNspzKcW2gcz6aoNW1bodsSWzY4gU3mWRSupCw0eZWT5gm3q1u2aWqo2e2ue940tG2C0BLfMTTmPfwjryqEZWNbFkVdm7E6Aa02ND1SWtBpZapWYYteHO6Ao8FQcd01E8zrevvsHYpxCr27kMbwO3u7S2X1YO9K3Y0i7qfI/ZaIHcM3ttwau+Z+tFS3zXDD7qtbA+gVo640UFH2zqrb7J7OaPB0e6UrpcG2ZDeSeX/O4IA6kxp+QO5FXPv7ufu3zgO3e87m3jTBQye3X2QU3Dmyg2Govi9tt0tlorVGKkWDRigDPnQtj9pryYuatknYiJRRFHWGfMJ8PmW1uuX2Zsl0MmKT57i2hQwDrq8usR2HwPewLYfVdsuHj++pajP4LC0LYUnWmxWO65JmBa4XU9YtRa1Is4q2rimcmqr6hOc4CKFRdYOuGxJvS+j7TMYTytKkpJvbhI9XN7RCc3pxSlquicYev/r3zzk6ipGiwQ88sG2WacX7m5Ty9SVXSmO5Lktd0SyvyLcJlRCUraZqFHlRcHt7w9nRhIsnZwYD5/m4nofjhDiuhSstHN9GC00cRyjVsFzeomnNeFJZcnt7Q1Hk/OIXvwQtePXqey4uLvjyy5fcXN/StpqTxQlxEON7PnVZU2YFq5slr757xfn5BePxhOvrG5o6N2K9dTeaYlnUQpOkG65W1wR+wOk4YjSbYeg/JX4w4vjkCZO3V2C/xwk98jJhk6+JQpeqaypURUNpNQQTj8CLCYIRyTanaQVSOgRBTJ4X5EVKXVUmYpKati5pmxzXsfBcjyzyyHObtpW0yhjqJtmQZnmn0doiOkS9JS1cR6LyhDorsOMI23GRTYkQGtuR1Gjazm6UNnz3sjXOoG1bVN0QWjZS33coZi5RdBxeJsYxqaru7FR09dv+pt+LYd+R8fT21Kuh7zqw3s7vgK79tz50qXqIxowzvHNkxmZ7au4+ktrdB0S3ZeLOe+idH7MR4u4vO3X/u88xTlwNztysgxRA9zsrLWaW6zBif39Pd7se/e+7z+++7tDrLSkfvPexdei7GtUC3VyXEEjbRiMoi4qSisV8biAHmw1FWTKdzggCn9XqliRNkVKy3mzQLTSt4ub2kqwouLi4YDqds92mNOsNbauwXYe8yCmrirJqWa23jEYLqrolz0zB3+pSQqXMMHbge4bZVoNqTf1sOp0gRQfszGvyH15zvbplfjxhPB7xt//xr/iLf/cS11M0TQoWLLcJy/qKxgIZeFhRgKoVeVNiqRrtWFiBTzSbMoljYt/D1g1vP7zn8vIDz54+ZT6dc7taY8mKprGYzhaotsZxXZM5KVPTurm5paoNb1cQhExnU55ePGW7Tfj06RNBEPDixXNmswU/fP8DqoWjxQLX9RBCUuQFaZJxdXWF6/rMpnN8z6Oqao6Pj7m4eGomO9BUTc3VakUrBeEkxvYdtGgoypq6LXj99gNvf/MbPm62aEuQ1xWVYYak6YDeZVlTFBVhYI5xluckSYrnglYmEoqiiG2y4frmkm2ypizTO4gEisAL8DzXNBCsO3Gfuq7ZdHATpbrxP2FmgB3XMcPwhdFetVC4jkWjbBpt5OgcW5rUXoAy9LBmFFEKQCJUT0m9Y1xdhKL63KrTKhJC3Bn5vbKSuGfkfUzQ20nf1OrrqrCLGZNDOeRQMAHcy4o+h3rQ7AQm8OC193yH3vm925H+qeFRmIS5faRB8qhupsGa9A7jLiLi0EY9svYd2O57P9fF/B9Z+3n87knppbfoTroUgsD3kZbFydkZoW/z9ddfA4rRKOLTp4/8+3/3lYlCqgJpu9SNYrVJ8IIVaVFTNy1ph5+SlqSq6+74ZIbjP/2AlA6uLc3YFKIbgg6Jwogo8BFoAtclikKeX1zw8uULBIJku2W92vL04oxNuiLLUr781Uu++OIljuOiRYXletRtRdEoaqXJ65pVlpLVFd4oRGqPukhpdEOFwg19xvMZJ7MJi1HI7ad3/OF3v6EsC05PTvHcEaPYZbXaorXEdgWjSYDWLVpbjMcTrq4vTVc1CDk/P2c0NoLHZVlycnLKaDQaSCln0xmXl1ckScNoJLsObUVR5JydnXF6ejoY0mQy5vT01IgcA0mWcn25ZJlueXJxxvHZCUjFNluzyQvqNmOb3/Kn779GexFa1BRlhedaxHGEbluqqqYoahgL4miMUpClGev1mtOTGNs2qV8UmQH821uP7RaqqqEoMprmrkHVCwSborsYpkdME8fFtg0XXNuYBlFRFOimxXNdatseZqYtS5JlObqpEI5RCbMRCKWoWt01z1qENkytuvdlQ1hi6te6y8N67QZTCRo8VVePNTi3fVvarX8dUke/X/uyHrx/d/Xd2N3P2bX1Q8w2P9dnsLPf+zW1uzL94e066Mx6h3YvD/+Za3c+c/fA7WNT9r9zcJQ/0anZvyM8PFB3Hr0XtABwbJswMFipDx+vefrkgidPn/Lqu2+4vV0xnY2ZTGem7lW3SNthOpuDNBS+eVHy6sfXZEVJmudGpk0YaIHtOMhWYdkO0rIMXYzrmpPRGkbVIAyJRzHnJ8eGWUS1BJ6Zx4ziEb7nGkcrBKenR7z9EHJ7c0UYGhDwNklwA4F0NLebLUlREE2n2OuSt5efWOaK//V//z94fnHKn373W775l6/J2worcHHjAG0LtC148YsvGI8C/v6//F9sNlv+03/43xBCMBlNkMJCCsOm6gcuVWWG76MoYrPpxT984nhEVRrantlsRhyPub6+wbYKhJSD0adpQpIkAz/c06cXnJwcU+SGzttI5BkB3yAIQEOSZFStIoxibNcly9YgFFg1SuS4XovrKZJmY9SG6gbphuasK4MFs6SDxsII8IAUFtPJlPFoRBgGWJahRIrCsBMyzlivl9zeLpGWzbPnL2hbAwsRUnQcfg1CGMdmWWZG1PPMuSmLuou8TVmhKs1xa3TTiX9omqZCCfBdhyDwUUIg6xpdllRNQ6salALdKCzp3I/MHjG/HT/WXfnqgaHvdhL733u72f39/uNDLOc+hGL/cw593v57Ptd0uLNeHtTKfuo9/To4aL77AeZ3OTipn1zivsP5XCS2W/8a7oB7G73v4R+7W+w2AvrhOaUUVdMY1LznY9k2Whi66eVySZaf8uz5C1zPQUjJ8fExWV4gpIXr+lRNie341GXBarPi3YePWLZNmqX4foAlLZrW9F2qukEIw05qnILokPauoWTusGeO6zKdTHAtSRAEuEFAXpREUcTR8TFVWQAt0SggLfNuaF4zns+wHEVRp9QKiqYhU4qkLCjaGisIWZyf8jf/6e/Ii4QffvyRtC5Im4K0yrFFyzhyCaIxs198yXff/Inf/+ZfePXDD5yfvWQcH2PbHn5g0eqyY7AwqXEcj9hsVmbCIgjxXB/P9btB8qIDBCtUW1FXDXmeo7UBpVqWwdvN53MWizlKtfiBTxAG3RwteJ6L1oq6qQ0TrWoQSEPlU+bYnsC2Fb6vOVqEnJ2M+fbtR3RTY0uLKt9SJBnTowWnixPiKMT3QupKUZQVq9WG2WyO63lmSsIxHdamaXEcFykleVGSZwWj8ZjRaMx6baiKAj+AqeikCvUwBN477KZpjE6BZeG6jimKqxY/cJG2g21LHNdDejatBj8MjZCHAEcJKqGxhEYKUFKCZepisksnB1uhTy53r3nju0TXUDPXv8ay7kdK+w7lrmam2MeN9QiAn5Mh9fb72ED5odd/7u93+6rvOeXPIRl216PkjPcjKwtLWo/Xze5vyaOv2cfIPIY/2d2JQ7n7fki7+zdbGuZUww5iCo4911pVVyyXFV+8fE662fLtt9+ymE0MMDNLDBi0rgn8gG1aslzfsl0nVE1FVZWEkcGZaSSjjq8szTKAjsvNRjUNwgLb8xiPR8ymU+IoxrHkMLTteR5WGBJEMUEQ0LSKVgscz8N2JFrX2LbA0RZKK1qtkNIyoNuqwXY96lbz6s1r3n68JhjFuF7M24/v+Mf/V/OHb/9EqRoc16FoKjbZBtm6NPOIsi6wHIu//Ou/5Js/fcc//9M/Yf9dhEXIaOJ1w+RjlDZ1qPl8QVmmHB0dkWUplmUIK6WUTCZTsizn3bv3eK7P0dExVbk1KbqwmM1mWJZNVVUD9dFms8G2HQPctSzGkzGj0YjNZmOowz9+wg4DmqqhrSssNHWe0oiWUWBzfjzhZrNgvV2bIe+2QasOPjJfcDydE4U24/EMpQRv37zn++9fMRpNsS1niKbSNDX05Y2ZIU22KbbtMB5PSBJDU14UBUdHR4xGY5Pma3O9eZ5nNEe3Cck2oSzr7tLXaNVgS4HrOTRtS5anhKKn32moawth2wgpESgsobGFNnqbUqItq5P+7KbqjSXcK5LrDgmgEUMpCPSOEzrcOOsf951Zb2u97bftQ/s9FGHt2uCdaLgArIPv+1lL6/v1s53VQ0zEI6HqQWe266lN8dMyAMudoejHt+XhvFefZmqtB6zP0H3sPnMovu45p30vvFu83H1d/+hIuxsb6QRQkWZguCwMLY8wgrtRHFMUiSn2qwbfd9lsE9bbpAPP5qzWK5qmpapK2tag4f0wBMtiNl8ghClG9tsUBD5VUTIZ+5yenDCfz/Fd4yAsYWon9miEJSVJmnakgxPQGunYKDTxKOLlF8/54d2PrK9XHbZLc311Qzjy0EoghIVjuziuifY0ijzPuLq5ZLm85O0P3+NJG9uzUUKbWovUVE1JVZfMRzOOT444Pj7i+tP3vH//kePFC+qqIUkavHDc4fJaHMemqiSWZd8JKVcVvu8P0w6O43aYuMbIEEYhTd0KUx9jAAAgAElEQVR2Sug1SbIly4zzqKq6G9GyOVucEQT+QE5ZFgXLmyUL2yVZbgitltHIplRQlTmuHTDxHL44P6YuKoptznqVMp8uOJkf8+z0jNBy8B0fiUOWFfzww2vyvGQ+NwLHjmN3lNt5N4MqyPOSLMsYjWIWiyOub2/ZbrdYlsV0OsOSFnXddA7pzhHUtVGREqLXR7CQEhazKZPpiDw3KWQ0ihHSwDJcz8cPQxSCvCjZbLdkRWE+E0HbaJJtwX1ONLnjyLRRbRLGqHtHtmtTjzXd+uBgv2b2MG08XM/e/Zx9G+6fF+IOXLu7Pfvb9rm109Y4vH5uzawHhPY7rlrVNVDUg8hsF7B256Uf4sEOFRx7B7c7KN4/fygnN5/9eBdl2A4BwpIdcNDMvwlhFJ3rpkE6Dh8vPzEdjQkCB7QF2kILTVbkhl20KLsLVaG0qYVZto2QFrPxCNd1WRzNzF3adVBtS1kZIKiKNCcnpxwfHzGdziiLgvVqTRgGZgi5K2tK26FuW9LMwBPqVqGFYZh96bzg/LtvePX2DWmSmrTVsijyirotaVSD7/qcn56RtZJP25ztOkU1FU7gMRrF6LLGtiRCKGxb4nkOUmoc1ybveLqePL3g+2/f8/7DB55eLJG2hxuESCHNNjUudW2EgcuyIs8Na4TvG8GTJEkZjyc8ffqMzXprUsTKEEoa2AJYtlFb9zyfqjIgVa3NmJOUoiNyXCOEIMsSijSnzmuS2w0TVxBaHpZQ+MLCQeA5Ds7RMQ4uutB8/HDDJJrw7Mlznp6echQHzCMftOY3//33vP7xLYvFEUEQ8MUXX3YOzChNZVnC+/fvubm+wfN8giDstEBN4yLLMm5vb/C9oIto1MD0oZTm6OiI+XyBFFaXdiqC0MOWAtuxKauGumMa8TwfW1oINE1Z0mhFXVTEoY8ljZO8uV1RVA1amSjNRF0947PRzty1Cc0uREJ3hKn307HHUrRdONNuEd/Uyu17r+nffyjKGvzETnBhWfft/NBr979/dz7Vtix2S/89gLb/2lYdDqgeOLOez7/HnrSdMxPcj4a6zRvy9uGLDzih3QNW1/Xgydu2HWoO/Xt2d3L/vbv/3z0p96iHhgFZU7hHGdFULcxBkJZvxlbqmij08Bybpq0oioyqbmhbg7AuSoMlcz1nR98ARnHMbD4ljiNAMx2PusOtUa2mLBum01mnbhRRVyVFmVM3hu+/rWrDsTVfsE1S6rrl6GhB3TTkRcnxJKJpco5PTjg9PaMoam5vVpycH3cI+xalFapskE1L5LksRjHrrKBMNjg6xBMSJSTjwCdyXVxL4jk2nmPjOhZFnqFbw9pRViXbdcbHT5eEoynTRYyQEinBsmzKsugovtvuWpA0Tc16vSFNM3wvpKnvzmE/ouX7PmVpIo4oDkmTDKV0N9dZDcdTCMNB1yt+3V7fgrI4nU04n8c0WYmQDZ5n4UsX6fjIFs4mM+K/+lvyP2tQZUvgBpzOFxxNR1hofv/b3/H73/+R2Wxm6JI8nzzPiaKYsjQUUJeXl3z77XckyWaAdJRlNYxwlWVJWZaoVnN9fUVdt/docDzPH647I2RsRJFv0o2BcGy2hhFXa1NrDAJkx1vneO5QYy2TlFYpimSLEjZgaoh7wIQ7JyKse8/T4b2MjTx0Zofq1PtrN4j4qTr1Yzbef+6hRt9ultZ/xy5edDeAEtx3mGg9RKVAp2jycD1wZv3MYJ8792Hsg6hM9Af4/k4+tvP3Pf9dh7Pf+cFBfSYU3T3gu45tl4ZESAP0Vd1FJ3XvyNQgwqJ1a2TttWKbJKw3S7RqiOIxliVJc1O4nk6nHbGha2hyVG3SRlOKw/NdhHJompqmbdBC4zpG6d11HczEvwEgX18b4RHbsk2KKi2SNCXNMlzfZ7ZY4Hgeju+hN4LF4ojTsye8f/+R4PdfMx6P8AKP0Dcg0e02QeUlHjANPCILlts12zTFQTAJY45HE6ZBSGDZeJYkcB0sAV4YkG4MRuzm9paydLi6vWW+2XJcz9AapLRxHZfG8XAcD98LQCuzn9owqtZVw9X1DVma0bZG36AndVytVqRpyvGxYRBJ0i2e56O1GuZbjeOzu5taTZqk3FxfU2QVx7MxZ/MRsT/BdSTUClWayDKQNq1sCMce7iIwAyhNS+A4tEXFH/70Df/4D/8Pju0xnx/hOh4vX3451O+qqmK1WvHmzRvW6zVSCqqqRgiDAdt2lNxHR0ecnZ2RJhlN0w5EBR8/fqQsK66urijLCsd28X2TLud5QV5mxHHE6ek5xycnNI2i6eu2VUVRmtKB47jUZYVuDMNL5AcIy2VTNF35oo9ajH6k0DvdQfq+/a6t/IwG3c9YP7dQf+h9u3b9WBNwN/jYXVKKoZSxmzrvb9djqIgHzqy/IKWUA69Vf1fYHyHSuu+oHM7NH9vRXed2KPU8tKO7n7P/t3tpp+hTXfO845jIypJG7UVasuOcMpm50t3doTtIbas6GpuAo6MFs8mkS61cZrMxN9c3ZGlCVWS4dkTg+9RC01QFZVHgBzGr5a3BawujK5pl6UA1fXx0Qqs0my5dieNOuKRpcX2ftCjww5AXX3zJNz++5Z9/90daJfjLv/or4jAgdG1sFInjUTsVjufTKk2yTWiyEtUoYi/gJBpzFI0YOR6BFHhC4AiBIwWOkLy7veX7V99xs1ohxIj3nz4xPznnSX5CWdZY0jDOuq4xVNf1qJuqm011aOrWUIors5+ua3QPbEtSFBlFkVPXVSdn11/M/Tk0DY1ePMfqRnnqpjYUQcmWV69+ZBw6+O4XZvJBS1Ta0ugSO3AInRiNjdAS1zbvT5a3fP/qR/75N78jywoWizmBH/H06VMzLuU4vH//niRJeP36Nbe3twOWLAzDjgzghm265dnzp13xf4TvGeaQ6+tblsslcRwThsbZqKF0YJbhZxO0rZkJ1loN2LXAD4jCqItWYzzXRQNFkpLludErcEymUbe1Uc4SAtvWg2CzsRnYdWK79rafKf1r1k/VtR4LWu7SyPtO5zGn1lPg39EN9cfxEFzk/vccWp9NM3c3vndmJrLpD9zDyMwczc8fBHjI2dRHbX1kdqj4/9jabTb0YbbWBrMmbWM0PeQjLwocyzQdylLj2jbz+RzV4XzG4wllV9tp24Yk2SKFRsoJEnh68YTlzRWr1ZK2LmhtgRAa33NwXRuEQ3G7ZLMxxylNsyGd1lqzTRKc21viOGYymTAajxFSst5s2SQJo0AQBiG2H/HixRfM/+UbLj9d88ff/5HF5D+BbeFqydQPQbXElgEG52mG3WrqCiInYjaeMPdjZp7PyHeZRQGx62JrzWa95Jtv/sS3370iL0ocN+Ty9par6xuTCrctWjtYloPvC5q26ShzVDdvCHm+MbOlYcRyuSJJEsqyorEgSbadBqjk+vqak5MTFouFgWygCYJgQJ4rpbo6rQEZ+6HPapPxw9s3SFkjbc0X9QXxKMD2fOp1jXZyjk7OTV2qSGmrnHS55IfvvuXVD68paphNF8znxzx58pTF4nggy7y5ueHNmze8fv26U4kyxhfHI4QwqlXn5+eMx+OO9+zu+syyjKIoBsomywLHqSiLahjCb1WLZTuUZUVVNdRVSVlWCGF0HnzP0A81VY1vu0hLMolHONKAu7EdkiqlpKuV9WY6eKmenJC7u8MwY3nfGfxr18MM7PBr9v9253zUEJjswzbgPujW7hTU+ohMKYUlJD0v2m4y3f+7D9/q16O02YeK8PtO5lBk1k/q7x+U/ULfocitj5AeKzbCnbffRxnvNyB6AQSlFY1qsbrh3TzPEYGPbEx3KgxGzKYLWlWzXK4YjWJkmqGUwrVtotA3WCFLkmw2+McL4ihE0FJXBWWe0rQ1ruMSxzEap7tzmwt+Om3IspyqbsmyjCzLaVVL3RjJsqIssGyLNMupm5JffvHEYI6E5Msvf8F//LsV//W//j3/8N/+kePJmK9++ZKjWUQ4XeA5FklVIqWgPj5m5Pg0lcQRHoHjM4oiFqOY2SRmFDm4NiSbNd/+6Wt+/9vfcvnpGo0FUpDmObfrNUmaGsCnb2OLPqoyeKosy7AdyXg8Igwj2sakTgB13fD27TvGo4Ao8gEHISRZlrFer5hMpoYNo24GjNZ2uyEMA7yunlW3NcKWlG1NWpW8ev8OJVour6+YLWZMZgssL0RZNh+uEpTWpOmadH1NvrolWS+pGxiNT3jy5II///M/7zRDXYQQfP/99/z2t78dBGXqusa2bebzRSdgs8EPQjzfY7VadUZnoVoDqenJNevaAIab5s5Ym8bsV9O0pKkBAQd+iOeHaC2NcMtmSypT0Brf85nPZoRxRBQGuLaNbTuUGm7SEnco7/Syjn3kou/GeYR1D8Jw1938t62fioQ+F5kZG7yfwe0D6Xf1dXtn1j8PHZ6u08/crRreOZbD2/3oBMDnOiKfi8w+F0vtHoTHOiLywEG6/513dbL+QO3WzBAdJKOjADK6mV1zQQgc28JxHWzbwZImKvCDAMeJugjOxrZM6jeZjDk9XmDbkjxNqcqC1z+8Ig59oigE1ybPM1plM4oi4smU1+8uqapywCP1zQPbNiG17wdMJhM0dPW6LRpBkmYkyQbXanlyfkIQRARRzPPnL3n+9DXf/vGP/P1/+XtEkRH97V9wdj7HkhorXSMqYDphEkTo1kW2Hra0CF2X2XjEeBSAKtiub/nmmz/w63/6NT/88ANNU2HZEXQpd5KmRl1pM8V1JJ7vAoqqrAehmCQxM6dtozoR6I5HznYJA814MqYqM8qyIIpiLEuy2Ww6seQpcTzCsd1uOsMU2E2zoKSuGyzXxvIdLM+mEZp3l1fcLFeMRiMmsyOi6Rxsl6wsadqKpkop0yWWKjiajlkcn/PkyZf89V//LeOxwa9JYfH6zQ/84z/+4yDaPJ1Ou3Pkc3FxMdS7xuMxq9UK1zUpsFaqa1KpgePO933a1tT+XNfDiu2BFLRpFFIoyrKhbdIhlbItGzcIjDhz3QzXcZFmqKZhu96Yhgtg2Q4ORn+2bXeZW82YXkckNERAxo2Z1/Q3n3/tMmZ2eC5z14E9ljnt+o/dwGWXuXZ3LnTfrs0PHASa/cR6OGjett2PGRAbHIe+29BhJ+5c5rD2Hd9jO7vvvfdfg94ZNNVmMkADvVCyFKYVZksLaVumY4lGic7IbNdg44SDJSWO7Zi7n7C6SMtCWhqFJi9LfH/E6ekZm9V6aBS0Tc3V5SXjUcwojojjgMuPJWC0BdumNUPLYcR4HOMGQUey6LJebwdM3Wq1REpTv0u2RhgkCCNGoxFRFFMUJZv1Gmc+ZblaE0chluUTRA6z2YK//dv/BVVUvPr2D/yf61vKfMN//s//gXDkYwub0AGkTeCC1AEWHrawcW2L0PeRSnF9fcsPP3zDr3/9z/zu918bPJ3nUbYG8e04NmVVsl6vSdOM2SQ2R18b8HEYhMgjwfXNlaE18l0sadNTMPu+x3gyQgrF5eUK27K4ubnublCG1TUITAHdth200jSNGZlzXYemqajrEs93GU2NToGtFaJtkNrQN93crlluC3BclNAIqbFEjee5LGYzXj59wsXZC56c/2Jg+VVa8S9/+D1/+MO/oJRisVgAdOfYqGOVZUldNdiWjbBsGlVz9uSUs7MzNpsNt5fLHdzZFCllJ2ZcMh1P8Dx/oEcvihzbNenprk5q29ZYUppGkjajUqptaNumm7DYIIQkb1vi4xMsNLoDgLdNgzFHwa6at+iDh54yaPBj99kuesit1hxkwdizvs52Oycp9ovt+02HPbuld3R9hEb3GXp4hB4pAXdsHWJIN7W6G8t6kKHt++qdnXkYmSk1ODSUNhxDSiGwsC1rcCK7u7b7eO+LD+zsrteG+yGsFMJgTKSF0AYvZksJUnbUJwbbptCg9B1RW3cCEdDWhpJFSxOltboCIY2zsyRFVWHVgsaxcWwLdCe95gVEoU3bNqAbqrKirgpsS1KWOXmeMV/MCMIRjm1GU7Kyoswr8gK08BkJuLh4zuzohDev3w5qTr7nEkUjXNejqRo+fviA6/qcnBxTFYW58BGsV2tcCzajHMvKaNUSS0qeP39Gul6x2dzw3XdfI/9BUijFiy+fM1nM8KKA0LHxHIljBagW6qqmKAvSZMN6dcurV9/xzbdf88033/HpcotGIm0H3UqEUli2Rjclm+WSdJOgTo6xsBBSYAlz7uMoROsZYNg+yrIk2SZUVUZdN7ieoGkqRnHAKI5J04ymbqnrFlvahpixKLF8M81gxlYUbVPS1AWqKbBQRJ6H77p4lgVtA22LY9tIy+rG0xom0zFHxwt832E08jk7O+bkaMFidsI4mpOlqZn8aCrevH1N0zacnJ2SpRlCGGckpIUfRCRp1ilgCbzAYXa0IIpjtklCmudUTU2SJlSlGZivqoqqLI2cmmpBNdgSXMdiOo5xfB8hzD7S3Yjb2tTTbEtSVgVatx3sJ+jKIgrHdphEPqHnGF68pqCta+g6oVpauH5gqHt6UK02sCmBAbgb73EXEPSkjxrDn6aVGWY3Was0Y1Bad4GLifFa2s7hPGbLvYPpvUnv+MTDyLBrVgzZmNBIYdFj50ChteyizC4l1YbzUGhtmDJ0N9GjGWTnht93tuqBM3Ok4VL6/2h7s+7YjSzf7xcRmJEDk8ORzpFKKpW7fAc/XK/77f3mJ38Ar/btbrtV7S6pdAaSOWNGRPhhB8AkD49U3dXGWkdJkZlIJBKxsYf/oMOB2qn80y8so8KHmnemFWpi979Spk7p5suM7OUo1yjpDyhUoHyEgOdhwuIpgpaRc4zOP6lQIpHeW48dRuRakz5cFMVEkQHriWPhTKZpis8z0sxhnUyf1usVh90jXnlx0o7jIKVtGXopH8YRsrxguVpQlFYwUqeBw/kRHTm+++47/pf/+l/4+7//e3788U8URcFqueZ2syGPM/70L/8vnz68p29axn7k9uaWJI7ZPj5g+57YpOCFC3h9fc1qWfLD3/2BuqtIyozt4z3/2//+f3D3P/5vfv+HH/jmm2+4vtlQlAtM1IU+1ZHT6cDxcODjx4/89NNPfPggfpA6LsEbnNdESvxBIxxq7Dhutxx3R4Z2ZOwGlPH4kIVaJz6SHodWQp4exg4XcHrDWHO1XLBe5RgNRZbQqVHuQsag0WClyS6y2hH1+UzXnjketmjliLyDZsDoiPViSZZGRArKPBeCNg7rLZvbG77/4fcUi5IkyyhX4umQJRmMnsNpy8ePnzgcDtTtGbRMkM/nirJYECfiaXq12dA0LcdTJaa8KNZXV2RFwU8//cT+cOB8OnGqTtjRSrYZlGDjKKaqTtixR2tDlsbEkaFpevIyQ3kJQgqFs720GVLRtGu7hriJxLG+G4gSGX7keUHXNiQmAmMoyoIRz+PhyDk41JsoxvnQn/ZKbvwKlJWbfaQ0zsskVaGDAbGs1lEEB2e11kmRQ7K5y6nzFLA+B6jO6/8ig5uzOAU6MLEkXk4FsJ+He9aJ38SUZik34rwKxsgWZXQIsGEEMAVN/xRz5l/9WmYWR6KIGRlRv7SegJNxc+PzS5uf7gi8XmfPz3vxt8vgZq2VL+oiY3utAXlJTr/cf5oLkHHsB9l3+NM0TSyzHO/NvA8pByIhIecFEHE67BhHG7wpBbkemYTlcs1isWa73bLd7kUK+uaGq6sNVVWzP+w4HY78y7/8GNy/RWb7/fsPbLePvP36HctlyWq5YPv4wGG/Y7Vc0JUFzsHQ95zGkZ9++hmlNHlRyF1Ywe3dHf/lv/4X1ldr/vGf/oF//Md/4J/+nz/xy4d7rq6uWK9XrFZLUJ62a2kaKXm6rp/9LiEo7UYJioi+G+l7h9ERxoDtW6rTkWr2yDTBC3LAO09d1RyPR9JM3Jcki/WBX2rZ77ekkaLMU6wfaJseY9LAfpDS3gciNNqEXtEB6waOhwNjP4gzj7VEaO5ubvn999+yWS0pilSuzViTZClpnlGulrJyoojRe/p+YN8cuf/wiY/v3/Pp032QIzqTJOJyVdcNQz/y7t07ymDuez6f8d6TJCkoGRKZ2IgY5eMjp8MR7yEyEW3TsChLIICxvZMykBHnhSlxOJxRWihOeZbinKNtRczRe8fNzTXjOFDXFcMwwV2kFzcOPefDgTTL0FFElGbEkaHoU3or5rpGA84hgoyhWkLhncVaj4n0xRwwJBdTNTNHgYvhHMzPned3r9SizwaClzXr5b9QIV3u8gX2VxD89nlsMM7MbKAoikIfNxyvugiITGKTnxe7nwWzCeEsqFxxtjHGQy8N23/r9qWm/+XJmTZ53ufGKJfbNNJ/ev7n+/xMm5yn958YCFEYBY9BaG8cRgE2di3WetIkI0sL4igGrxmGkbbt5mxtHI/UTU3e5ESxIS9yktTQtBkfPvyF0+nIu3fvBHRZVez3Bw7HfTDwyLm6WlFVNVV95tM9oW8Vo434eGojd9cPH95zc7Ph7du3XF1tAFitl/zww+/5l3/5F37++SceHx/59OkjzllQDqVCz0RDEiekWcr1Zo3Wmq4d8E7hvaaNBjoz4JzC+5HR+cBUaGi7jtxmeC+KpB7pgZRFSRQZmrplHEbiJEEpTZoM3FzfkESKNpDp0yQhimTq13Y9tUPURrKUvpfsLI6EtjMOPbvtI23TkWUlWZ5xc3PDD7//A3e31ySJCd6VijiJMXGExdMOgsd63O3Y7ff0Xc+n9x8YhyG4yRez8qlzTvqUxYK7uzvG0fLx40c+fPiA1prN5hrvoDqfGe1AVVW0TcPQ96xXK7IkZbfb4b1nvVpTFoWYHdeNrI2woNMse1aFDIPwNyfM3u3tbfBJrYMTmJ3dq3bbHXmai+JL1+KGDhXFjENPbDRREjM6h7MON3q8MegoRqNxiPy1IbSCpv41YJ1ITk+Ba66s1EUfbs7InpeK/xET0svti31y+CxOXPbgf6sf/1kwG4bhWUATYxCRGInj+OkNAaZppvJPkd5/zq28fPPXVDMuD9AI1Pmzv78sV18LjoA4guuLcjgA+IwR/pwgvdWMVdFai+ZWvMXZgUgr4iijKEpA0/cjUSQlaZ4XYSKZcX19jbWWpqmp6zNFUbJal6RpxDffvpsb/uPg+OqrN2itOJ2OLJdLFouCzfVaRs/K0zQyIczzEqVE7rttGu7v7xnHgdPpThyWiow0Tfnqqzf84Q8/cHMjkILj8UBVnbm//4TWnihSs8N2mqQURUGSiFfkWFj6fqTvBDsWaU1VtwzDSBzF9F3Lh48f+Msvf0EZTxwbhrEL7uaaJJEA17UVVV0B05Q4ZlmWZIlm6Cc1ip62rkT7DU0SG5wdaOqzaL6lEUp5TqeK3XbLdrul6zVFWWDH8RnVJY4T4tgE5YkRrxTdOHBuWgY7ctgfOByOAeIQUZ3P0gvNC9I0F+K889zd3rFcrgG4v38IKh4R6/Wam5sbrOvphobz+UR1Ps9STnmakaUpTZKwWq1YrVeU0/WQZsHxPBDLi2QO/hMAfcJRtW3Ldit2gOv1mr4X+tR6veZwOPCnH/9E3VQsIoOJjDBalKhxDH3L6CzKREKtGkR6SAMmitBKobWf/TllDUlG4/zE5Hyicfs5yWBOx17GmS9BpP6W7VLq/rUh4L/3vb5oNfd8jCon5TMawWUQnwecn2dKl5nZSyrTy4OfJ5kvAtm0TViUL8kRDaEXM9FqZuyKnpRnn8sbAbPjdtfUXF9tuL66Is/Ewamua5wVtPv5XLFcLsJF2VBVFU1TE0WGsiz4+us31PWZYWgZRilzkzjBmJtQVtSczyfKsuTqakWWpnhECfV8PnI6HcnzkiTJSLMUXSnatiGOI7bbB5omnzFaRZHx9u1bNpsNXdfy8PDAX37+CfyIZ6SuTlR1hVJKdNNikaQxJmLoR6pzQ113QnoeGsbBEmcReMduv+eXXz6IOu4ix9oepe1MprZuRCGa83jpXXrRqiRNEpJIcdjvUXjSJCbPIrxTWCvZLXnOm9sbotiw3+857vecD0eMUhht6LuOru2ozhWHw5E8S1FqQRQVOA9t38HQU7cdu+ORrh84ns8M3UisxUJvGEbG0VGWJVmWUVU1XSdSRFprdrsd2+0jXdcSRZPlX0zfW6qq4vHxnq4VDmcUaFAqZFtZJq2MrhMIjtKKvBCCvm9brBVw9nQD9d7PwaxpGrQWIv8ETZg8VpVSFEWOtSNaIw5VvcUkIt+towJlIqq6RbkR5WVIhxtCdRfENa2VIVrItNyUaygpPyfep6yeS6mhsPa5GCL8/7C9TGIuW0a/hnL4re2zYDZ9ARM+ph+HoKr6lJUBv5mZvTz4lx/ktaxLAW4YQ23/9PdLjMprNKhnHyjcmWfGf7hgpn0URRmgGtHsXylgx5FzP+AtxCYJvSWxX7N2wDpHVdd4HHVT0/YdXrlQDjravuFcVdTViTiRUvh4PDIMY8CWrRjtyI///CNaK96+fUtRFLRtJ+YeduDxccvxdCBNBAh7PicMg2ipRZHm5uZm9vZcX625vr7i9vZWjHYXC77+6g3D0GCHlqo+cTod8d6zXC5CpikXyzhYDoczv/zlA3YcaNsEn0Ge5Qy9pu87Hh4eWC5Lrq6WxKkmS6NQVkZ4K3ZxSVBsPR1PHI8nvLUsiyRkbjV29BRFQpalDP3IvjrTNjIhrqoT4DkdD/RdKxm0k4InMgajhdB+PlecFwVxGskEC89oHU3XUVUN28cdVd3Q9QP9MNCcG4ahw5g4tBBGvG/p+4E4jhmGgdP5HoVQ9wQM3ARpn5Fh7Lh/+MjxuEejyDcbVCzKKCNT092HfpmfWy95lrNarymXS/aHM1Vdz9izthXl2Wl9LRYLJj+BqcwU7JoIV9pxwCsXGBNQLkuyvMDisc5jh4HIKJyNpeGuFN6N4EacB+vj2Zz6cv1dgtafbS+W0gwQeLF+/6OC28tgdgnXmjwgyCgAACAASURBVB6n4718/K3tVQbAtPDH0YaTLU3Fz/TMfiUze63uhaee3LRNGZMOGJx+tM+yu+l4Lln1X9ok43tOihctsSc1jigEt0sNtem4hn6g70achdF6NtdXRJFhdAL5QGnOVY3zluVqSZLEVFXF/f0nfv7Lz5yqE4si4+pqJa8bxbJNrNpKtFE8BHLy8XhAxtGa6+sriiIjz3O2jzsm2RehWFnu7z/Rde0sFjgBDicNtTzPWS5WXG82GO2JDTg3UNdnrB0lwypLUDJJFM7oEa3g8eGeoc+JowSlYrZdS1W13N9/Io41fX/DV2/vSJYFTS1BNU1iIq1p6pquacV3Uim8tTNGSHkZaDRUOBsUN7DgLW1z5v6TQCysE4vBOBKKTz9YoqiE0NCvm4q6XZA2Yh7TDyKzXdUN1nl2+wNV3TKOlroRPmiaCuD1/v4epVTglkp5uD/sOR6PLBfLmVYTRVK+Pjw+cDzsJBgqNasE42EYevCQxA7v/DP1jDiOKcpipj9FsZDOJ7pO13Uzsd4YQ57nIgxp7RxgHx4eZrZC1zVYL+T7siy5u70mL0r2xyOH05kyy4KDkcZZxzCMoqgyDPQOGfu7iwQgLFGBY1wM1p4tZALe4fIVF0v9P7jMnLLSS2rT0xp+PZj9VnB7dQAw0Q2AIBMsX8olCdx5D0pG2dMERMCtBqfcZ284nYyXjPppuwyivDLlvPygv9YkfJnxaa2CkkbISsJUc7b2CinuaEeG0bIocoZxZLeXYJMXRfCYTVBas909Ak74hre3XN9cEycRD/f37Pd70viGT58+YbSoqG421/K5UGw219zc3vDjjz9SVWeKskQrxd3dHVdB8bYoCnGS6gaUIgj+SR/tdBIsVF2LAe1yuWS1WjGuxOA4TWI2VwsWpWhkLRYLnBOBxTgW1dqyKILgZE+aTjr/XaDpWPpeshxVKfaHPcUi5c5u8N6J56fryfMUvKeuKhG6LEs2V2sUnrZuwPcYpcnShDRNiIxm8A5rIDLii2nHDjtaQJNEgUFRlvT7mqaW6eJ+t2O/WVKUKZ6RtmvZ7bbs9we6vidJMobBUjddkBHq0Uay7vNJJIqWyyVFLoF8v5dANo4jfT/QtR1d34knQBTRNA1NXYu+mHdYbYIGmaKuKqIokv0VBd4LV3NamNMgqa4bkrSYBQonx/ppCDAt3CRJZgoUwPl8BiRbNFHM2Nu55IpMJCBvFNo5bN+TZClZls7H0XcCwJW8wmG09DKdsxBoWeACMR48DuUn2UcCnuvzAPGyIvq17bcSjcv9vKy6LjOyl5XXy8zwNYwq/IbS7PQG0xfSNM0cUJxzoJzI/aqL1FE9l/iY/k37nCSALjmW05fmL8TgLl//2oG/djKm3sSvneSJlqMvnncZYMdhFH0u5zCRIs0zFoVkYd47Ntc3NE3NaB1d37NerfnhD39gtV7z009/xjrBuelYjDTiKCWKBEA59CNvv37LdvvI8XgEZMrXD1JWKuW5vb1Ba8N2u+N8OhNFMTc3N/OCqaqKw+Ewo9EXi+WcdXz15g14R9e0JIkEsCSJUJiAtvboSHM6najrWkw5shSPw9pRQK5ulAyu62iaiqY+s9090PcVTXMmTQzV2VBXFV1bhwa0l2TAWmFFKKGlpXFMkWVzCVVVGu/EQQlvmSxctdYsFwvubm8Z7SPH88DQd9SV6IHlRUrTVuwPe7bbLVVd0/U9eV6SJhldJ9/parlisVwEeEXLV2++ZrPZMAwD9/efaNqaJEkoSxm09EPHbrcFoCyl/ZClCd5a0iSj73u2j48kcRLYHnoOQFMrY7lcorWc08PhIE7vb94CAvHo+/4z34CqqsiyYIbSdazX6/m6zPKcpm8xUUyWSTC0o6WpG+wwEms533boIZaJcQs0dc04jCTFgtFbNB6Fw1sL3sm007n5c0yeAVqFtSXZCJbgmat+PRP6rcb9y79dPv9JjedJAPJSwfdLQfEylkzbZR//1QHAJWfqspk+nfCnvT//QTK25xI+r52Qy+b75YHOJegrk8ovTS+/tF329p6RXLEzTmbSl4rjONyVLo4fj4kEfxbFMd3QU9c1q9WSvCiIk4S66dC6Yr1esVxdUZY74sgQKUWapiwWJUoZ6rrhdDrRNDXv3r3ju999z6dPnzgej3Tdmd1uNzeI0zThq6/ehlF9jUK00YqipKoqqkqa+n0/8P79B8ryzN3dHWmac9ifGPuBIs9YLKR5n9uEOIEoknN+rlqatme0nihOuLracDyeadqWU1WBSnB+ZOgahiHH2oGmqVB+wDkBsyqnGIcOvMMoWVzejdhxIE9l4irI/qnstBijyLKUrmvpg+Cjd6KbRtCxWi4W9IMnSjqshyQxWNtT1Se6seFUnai7htHbYMRsiaMWhWG93mC0DhPFIRiMpNR1w/39Jx63jywWJevVGhMZttsth8Oetm0CxcoEZIUnSWLe3N5xPBw41xXKK8pFSRSJSu9hf6AoJftKkuTZwszybO7Ddp14jXovWLz1ek3XdfPzpyBfFIU8jwlN4EiSlDjOSNMYZz29HfBWptPJJuF4ODK2HbERqlgTvCiWccTQyuRTKX1xPYc1ZEeMlsHTtCSlGJJqy3sH5rlw9X9kr0ze73nf+/LxZeb2suqafn4NpvFZMJuyrziOBW+VpmhtZkXYCZ5xcYSh9//buJRnpeTF/8/TjCkqv3j9a8qYf802lcOXdwUzAQmdmxu0SgnzQClQYdGV5YIkS6jbmrp9Uh3d7rYsFiXv3r0jLxZY79ntj2itub6+I9KK+nzkeDhyOp3J8zyQzKUAuL9/IM8Lvv/+99zf31M3DQ8PD7Rty2KxkkXuJ5DlSJII/WUirI/jyHK5xFrP4XBAKc1yuUYpzZ9/+pmbzQ1XV2v63tG0A2kWkSQReZ6QF5moimDo+pG66YiSlLxcQNNyfZNwOvYcjxWOUTLTLCZNjMA9vAZnGa30voz2GO1RjOCUBDXrGIdR6HA4vLdhOqfAWyKjiGON7y0WYRF0YycQGGO43lyR5gN12xHHimHoqJozmcqIEkNeZhRKh/7XifPhjEJTFCUffnlP1/fc3N2Kw3wl0+O+7ynygiIPKrfNGBzkI66u1iH4GJwdybKURVbITSSOub2+QRuNdZ4kVCgAdhjnYNU0DcMwUC4XvHnzJkykk3naqbUMb+7u7oIU95YsyxgCFu50Oolc0uMjo7NEcQI6gtFhR8/QjWiF8IpNxPl8Rnmwgwha2mFkUZRicxhHJKPHe4uzAy6wAJyWEhNnMUkswysvzIBAvUaKz6lV9Hwt/y0B7WXG9nL9vwxmL3/31x7HZ8HsfD7PO+z7nmEYhcqkZcR/Kd8hE00pj6b3+LU3f20KeQkB8UEC6LX23mVN/Vvba9OS+bh06N2FTHAcBW+lJ0NXOyDyNdA0Ffv9jr4Xcrk0bzOOR8dicaIsF0EXrROc2WKJdwJKresugHEd66slWVaQJMlMWNZarOa+/+579vsd2+2W+0+f2D5ueXx8pKrqUMasBJu023I+VxR5QZbliC6/9Ip++ukn8WwcPXGUoVRCVbVEscYYRRQrFouC9dUCoxXWW4bRY6KE65s7rBMRSWcdfb8lSSLSNCJODUoJlc1ESrBNbiRSEEcaUQ/yONvjtTTMjQY3joHDqwOez+Gsl8zOQJ4lRMbQ95am6WibhnFwRHFCmudEicVEBq/B+YG2rTCJJi8L8jIjSTK0NtzeDRz3R7YPO4wWsrxSCjtaykVKWUpQOZ9PDINIGDVtA4iiiQBqx6DhNsw9sa+vbynynPfv37M/HIiJydKUsR+omobN9UbaJUYLvsto6qYJUts9URRYKKGS8V5s95pGwLWn0wmAuq65vr7mfD5zc3Mj3qgo6q5jaFpsN5KmCYt8iYkjuUGM4h2QmBjnPcMoBPnrjXhSjM6zWS7pBzHi6ccgIOktCmFfGLwgEWZUxhOmXuNQryzAv6Zn9qW1+NprX1ZqLyeur/XqLqeer22fBbNPnz4xARWn3pbojMkFMN1pJOPR0jgME5AJAvHyTV+WiNPfXqaVzjki9dTce/mav/bkvdy+BOt4baqpFHRdE3S6ngJhFIkm1m63Dxdnz+PjVuzJliuSOKfvRulToBHWgKWu29AkbkWpdRyZ3NYlqxJ8UdMIfGC73c4TZOcc2+1uVkGNo0g4gJksZhn5jxwOB7yDN2++ph8c9w97tIY41phIgJRFceZcNygsaZaSxDHL9YY4zjBRSj9YdtsdSimWqwKtFSZSNG1F1yWkqcb7EYVFaS0NZm+CMMEIkXgMTE3pvtdYG86HfUI0aZDXJhHKK/q2RwOR0WRpho5jHLCKClRk6Maeums4Vw6MZ311RbHIGAfLannDV2/e8Of4z5wOJ+LYYKKIrusZhoHb2xt+97vfcTqd+PDhPYfDgaHvQQk/15iEPM9Yr1dyvStFaiLKvKSrG86nM3aYdMqk7xWHn+umIYojFosFt3d3GCOla9vI1HkMoN8oEjeoKfvyXqqdiYkyQTbquiZNUvrRit+C8/jRsihiFuWCJPBYx3GgSDOhRhlhVHjlafpOtPG0piwyTNujvUMD/TAwOotWCqMjIjUNAXwgUk7rRmwNpWemnwWYL5V2r22/1ku7jBGv7e9lj/1LPfTXts+C2W63482bN0EyeI1XmvO55nhq5iY0gPaB73URzF6bQLy2TUFkisKfpaH633bynu/86T4z7+8itZWK+Pm+J06YUlCWBf0gzW8Vsqc8yzFG+obDMIYJXcx+d6RpWhaLI9vtETt0rFdL8jTCOxgGy+m05Xg8sdlcsVotGPWAyFHHGJOTpgmbzYb1+orT6cRutw2o8JSHhwd2uz3r9Yo3b95QFAV//vNfGEdLFCnqusHoiNVyJTCGusHaCO9EMz7PEtIsRhuhrZioFZctFEobrFWMDpSKMFFMP44oo8iLjMgY8izGGHB+BOVIEiOByTvw8r2rSc7EW7wfMTqX79RZgZbYMQgMqqBVZyX4KS00KeeIjAYjCibjMBBFmkVRkpcFx/pM86mhaSp0pAXnhpjHaKVZlmvevXvLIRfTlF/ef2KwniTLOB5loilKF5P/gyOKDTc3tyyXJc5JGZwkMVmaEXlojmfuP3zisN/LYEAJDEXkndIwxJJr9OHhgW7oub6+Dl4REbvdkaIo5oRgXmxhclkUBfuw76ZpAPjll18oFwuaVly+ojjCJLAol+RZgVGKIeoFi2egrg54DeuiQEeafhjQSq6rrq5F8VZ5IqMYByvqG1oTGY1WwSxFMRO1haMhIkPWWqz6cs/7b+2hvaycvvT7lwHtt5Kbz4JZ23aC/dEy8cvyAtC0YWIkwceJUUgglQo8RSAVooL5OXRi+jfdsS7hH5fPCz/MdKNnKSYvEDDq4tGHJudL+ulFHQ7grZs6nvN+rbVYrVHW0/Ytzo4orYgiEzTqjZRQxmB0RF3XnIaTTLWMoe96PlQfOOy3fPvuLVksCPS+78Idv6Q616G30osxSppy9+aO9fobNpsrNptaLuiyZLvd8sMPP9D3Pf/jf/wD3osUtQkwgclB3FrHer0ED6fTVsovI9QdO44M40Dpc5I0Ik4VXhmSOEHpiH6wVNWZ/X5P07QoLQEtTmKMUaRJxOZqRZ4mKOVo6ooo0oHkHGADxhBHkZgte2FFGC3vgfdERuN1xPTNKOVJIgHEOgu9l15QkiQ4i9CjjGF5tRHi/GaNPmp2xy2+txRFSp6nKA0GCYZd1wTzmTW//+F70IZPDzucs+z3B8GkVWeMMdzcXHN3d0OWp6J3NrRsdzv6rmO1Eg21oZPs2BhNHiaOKPE8rZuGdr8jzXNhFhR5UCg5hOy6ARRGJ+S5BPUJ8V8UBUVR0DTNzASYJqJTu0MpJbzO3pImKcqJcvL5XKEBby1xlNDWLR7xGzgcDiRZKtWDMQx9T1VXmMiQxglZGuGdYNmUVmI/iLQHZAgQ1DK8VF/KS6btZ+mLJ02zeX2+jCMvYWmXM8LfSGq+lJVd7kYr9UwZB++fkL0X7/VZMKvqll8+3oOFwY7kWYF1PngmmvCGErDmmYdkqODA4eSNL4YCL6O6DweGFyqMmvTKpgAzBafw6NTT+ZIOzNSg49ndRVQ7Ls6pmpYRoSx++vDT77n4++gc/blCaUUSxTjn6bsBjSGLRFgwTzLaqqauT1xdXbG53qCM4XQ+MfQJo+0YVEQUadrWcjwdeXzczmW1C+fu7u6WJMnJsi3eB4PgKKKuPcfjiabpuLu75e3bt/z88y+8f/+R6twFCMCRIfDyirxkkoBO04K2rTida6pzxc3tDesrkcbBS88rTVOcVQGE23M+1wFuYMiyBOV7Ih1TFBnLRYnW0NQVx6bHGEVZ5MRaoxx4rTEqQiOA2bHrsVGPjuN55O95UpZwXpDzkdFYIDEDfRRJf3LoOJ1POKXJlyuSJCHPMhZjyWa9phhSlqslRZEHYDR4N7LbPhCblKv1Nd///jviLOPU/J88bD8F3mQq3M5ESu5iIZ+raSoeH+45V2eWiwVlkePGgf1hz7k5k8YJOjGCN8MSpzEYRTtIOZflOVVVkSQJy9Vqttg77A+gBeoyjgNt24b7Zo4AoQWCYIwOzA7R0MsmFRIM3o4on6LwNE3N0PfERsr4OIlphx4VifP58XjCBDPm0Y5UTQ0qiEREEVmRSyWQpvTDGEQSxqfvwwiQdwLaei3qtqOaWgOB4B+EFb2fltBTJhFAHs/iiA9JjQqLcIJtKaV5FvimuDQN/nzw3fTPcxVCwuQVeCuBWAz3fmWa2Y+e7fYg0z2taJr+RYNOeiHSDpts4sUs2E5GBjNsJbzGC0h1ssrSWj8BY2GmsZip3zblWCEgvgxiAtiV3+qn8P0UmXj+owsnSvkg23J5bPpJWts6j44ivHPBWQfcCNprxmREeU9sDHfXVzR5iok0+AGFZVEmFMVXKG8Z+4E0jRltKuBMNwQX8Jwo3OmTOKPvLX/+15/5+OEjq/US7x273Y627fj48ZNcKC5cXIOjaVqyLKcoFoE0L72hoii4vb1lvz8w2BNxnJDlmuUyY3O9mt2ohn5gCN6gSgt521rL4ShKuGliiFXKellQlkK1Op3OAa3uyLMslLAxURRUfpWQ1UGgGbE2ZHEqNKyuwY4DBMAtc4Ycgpx14DzjIE34pm05VDXECV9/+44oikjjmM1qRW8zkbYZRyKlGZ0P08qGr7/+hq++egPKULUt3/zuLeWx4Hg4BIXbhOViQZZFNE0FjJRFwbfffA2ERj3w+LjlXJ3wBlrb0XkJRokTfweTRCxWC+JYJpr39w+UZUlZFEFCO5mb14fDPrBlnPBhh56q8gEpIDpm4rU5EMeRBL6mxlqPNinejqFfbRmRLHfE07cNvRPvVAt4pWa39SmryYoctGawllVw7yoWi2BGnNL3fdDoG5AKX4u00BS0rGPA460ParaOJ0V+hVPq6efQXnoa3XnBwYV+uTaicWZdEJDUL4QnCEs5LGl9EaSmLGxa/3O1pieBI/9svb/iASA7HnFBc+g5ml8p90xi5yUORF1kV3460BBZPisB57+H3psXNcxpf88ew89z9sYU4J4i96SQ8dlnUryKbn5tE5kUNZsqaCa/T3Ha+fqbbxlXS6q6om5r+r7DOiiWBev1mqFtOJ9OYitWliwWS86nmvO5ZrVaYUyM1orr6yvKRU5dn+m6hoeHlq5rWa3WXF9fE0cxp+OZrhVYQf6u4O3bd6zXG+7v7xGbugX73Z7Hxy3GCNk9SmJu724piiIIBIxUdUOe5cRJhlIhI4hN8LsUhY4sT1ksS7SLWS2E3nQ+n9luH9nvD8RRQvLmKxQG0OCnC1rc43Uk8uQyZW3nDMRagWe4oGOmUFgntnJt19O0Hf1osd5jItEQ2x8O0hPTIuCntSGPMnQkPcwkzTgezgy9Z3OVsQw9Q6Ud33zzDTdf3fLzX/7Cjz/+ie3jA0kci5+BAudGkrgMmaMwXvqupeu6IBIgcB3nRRo8y2Xo1bQ1fT8GgPKa3W7H5GMwuaA/Pj5ydXVFlgsmcPKB0FrP+LIJtNu2LVmQChIMovTO8jwnjqRVMPmLTn6dMniyMtkchuD96oOqSCztFC+86rbvnw3gpkGDwHrsLDrqw4KfVErQCHNDK7yVQG97i7cWo2PJKEcXgo0KCtDBDDwEM8VzDNtUFT7VQy9Ss2f4hcts5HmratqeUSovtlcZAC9HodPOXkMCv9yUuhRSuzzECwnfl1OS+dg/bwz+GrTj3zvtfO1zzA1HLsTfvMfhGMeBrmuoa82//vlfwVsJvkboUhjR0R/HkTRJsHkumvk6Io5TkkQECtfrTfjcsFwuKBcFRZHSNGdO5yN9L8ayIrf8iHM+aHKV5HnBV199RZblfPz4kd1ux/F45NPHT3Rdz3fffcfd3Z00yVNR1d3vD4EFYNBG0TTS96qqE4/be1nYScQ337yjKHPSxNCdD8RGPvsEIzgcjkKW73r6cZBsSoGzGqzFGU2kpwt6ENVhBaMTqWg7yo1Aa00UR3gUvXX0dmSwdnZpV8ZQBrpQ0wq4N04yysUS721gK8DxWNGPAiwtyxWr1RV9J6Deu6++4utVST8MfPjlPVv/dPFHkSgM52nGuTrTtjUP9/cksbBbrq+vKYoCkOnipOEvsIp2nuQDvHv3DUUh0I+pBxbHMdfXN2x3e06n08UkPJpdrIwxPDw8zGDtNE3n52htKIoFcZRxOp1pg/LGFMymTA6eOM7TxHQSh1DOYv34jPM8Pe8lXeipbfQkxBAZjYlTMIHnrA29GxixMoVXQTlNXVgOT9Z3IcSJqOKUBPkQ0Kb3frn2ph7V0+Nra/mvWeufBbOJcnH57zXMx/T/09/kpDF/gM+SsIvM7Esj2b8mWL48rtc+5G/tZxomvLYvPdXmgbAu8tyWppGkeuhFJSFJYsplSVkWxGlKlETESYQL1CTrxtAvMVxfXzEOliiOyfOpd+JomopxFIfrtpXG8G63I00ztDJ8/PiJYRhYrVbkuZR9f/zj/8zV1RVN0/D+/Xt2+z2bqw13d3f88Iff0/YtXS+O7PvDHm00SZpitOF8btluH1EK0jRhtbqWrClILKdJjikLUZytauqqYuiHcD6gbVu6rsdkGpxicBZlHc4oIiMlhHYab6QZ7ZAyyHpPN4pabWQjlIqwIH0QY7CjpbeWfhxJs4w8L6lqgZ3c3G5I0pRzdUT05Qb63rFYrNEmIo7lXHVdNdu8eSOS1uv1mk+fPnE6nkjimPXbtyzKgLZ3njzL+Oqrrxj7Aa00zsp3BMyZ1CWFTGszB5+bm2tRpA2QjP1+LwDpOEapJ6rcZDLsvZ/5zdvtdg4mco0oiqJAKY0xMYrn1+/EKX7pQzld7zM9UIlAwWgteZ7Nw61hlIGT0oqu70IWJo5h0soIHGUfDHmtxVmpZiIPUZwwascwWJk2K5l7jkwaabLeXVhXRj8d/2Uz/0vphnrxeLnOX+7n17ZX6Uwvg9fLf5d8yunfs5Nx+cYXZaLi84B4uf1bR77/rswM/9nFMr1eTVPOkL4Tghl4xtHT9dLXM0aaocPQcz47TNeSlyVRXGK04e7ulqIohbJ0OJNl0u8Bi1KOLBOrtbZtUNoTRYbN5oq3b99yOp15fHxktz2Qpunsqt11A03T8OOPPzIMIx8/fuR4PHJ3e8cPP4hQYxIn1O2Zw2FHnucsFgWLRUFTN4zO8u6bt6EHdkbpBOesqKEAcZywvlrDmHI+7ITq0zR472btLSmpZGFMwszWTZ5BMvVy3qGDxpfXGqViDGCGPpD5Hc73oAWj6LVicJ6q7USRBEOajyhlBLkeKE9xlNOPA9ZCni9ZLq8YRkfbdEBPkuYs1ykoxS9/+WWmCS2XS+pzxdD1uHHEB3mpCfG/LJc8Ng8sylLgEgs910gTILoPJZsx4kY1jiOPj48CVLaWPM+5vr6eJ5dt280wDDuZkXj/meLsVP5FkbjBy/SzpzpX83tOE8spcD6zVYRZmWOiw/V9j8OSpEJur+ozfd9xfb2ZX3Nq6vm1WsuNe7SD6PY5g9eR9DPRxIkAhp3z1L7DjQHQjEZ5xwBCfvcXyUFwi5r6XPPamnrXl22rF+vwstf+pbX+ma5i2D63mnvRC5t+dyndMwW0S8jDs4DwssQMzXfPUzb3peDzEqD3+b6f9+f+3dkdr2dmcqfyYC3KmDCBUWijZLLXNhR5SpwIn6/ve7rqLD0KP3K1KGdZmTz4JN7evmEcLefzmfP5gDFrsiwjiguMEWUL8KzXa06niqIoWK82bLdboihis9lwPldstzvu7x/QWuAi3333Hd9/9z1fffU1q9WKq82aujuRZTF5kdHUDcPQkuUJWkfkeYq1A8PQYgy0Xc31ckOSGKLYkGUpkTdoP7LbboniKMhii0Bn1/fUTU1kDLGJSIzBawVeFoX2AspEi2ySt54o0kRJQmxzVHBWssOIR2MDYr0dBpqmpe46UBEoTZblDKPl4XEnGLA8R0cJaSraXh6oqgP7w5G8t6zX0quKohijDNZbvPMsipLk229JkjjAJ2oWZUHXdzR1w+5xK5PgJKEsCiIjXNGyLIOXp4BbFwvxTsgzkVw6n88sl0vev39PHMdzH0wbMwevS9D5VOoqpebyEQjeDE/O7uM4+a3GWFuG1wqVMElisoCfs3ZEKakQ0jQJYgaifacjLZWFFQCsVpo8y6X0H0fSJEUc5GWSOQyDZNLOk5iIpq6xoSWU6og8SKNHyhAbg0NhPWjErwGlcAF/6C5gCJdlZlh5n8WGL/XMfi0z+9Iaf9UE+GXGNf1eRsrPVSleKxlfC1YvD/lLYDj14s7z8vn/nmzsswP5tT87oX/gPdpPWCpDGsekWUykI9I0FlWKJEZrg0PuusfjEeMdRZGyWi2J45jj8UTfd7RtC4i9WD90HE97rHXc3t6QZQl9cbCZ+wAAIABJREFU3wc1iw6lFH/4wx/4T//pP/Hx40ceH6V/9u7dO87nWoCagffX9zJtTtOU/X5HlqeUQ8E4DqAcTVvhLHRdj/fSVJYS8waPLBBrB/q+pW07ytSQpYLDut5cM46O87mhCvLU56rGeyiSFJ+kkMRhGmxAR8SJEPP7XrK42Gmx9NMGpT1JGmFiL3QbNzCMosc1OIdXWgjWiQB466YlSeJgNGJwzpIXJev1FQ8Pj2HiLNdoP4wcDkfSTBZ8HEc8eE/fdRR5zu3tLU1Ts9ttsaMs3ixNsYHr6kRWhKppSLI0wGQEQzaOo9x8ooi2a/npp59ompZvv/0W70UFYxzHuQl/c3PDdrudXzcFtDRNZ5VakTJPZrHNSaOuKBZYO7XWCbJCYfJodFDDFf+EKZjleRZK1gZnLV55xl6YFUW5wGaWJHgM2HEUfT/CwG0Y6ZqWtm5kfcUx2gHIzUlZhxtG4jgii2PBslnL4D3aSSBjFFclAOUApgb91C+77IeJht/zBelePP7K+vyVNf+qOOMEFJ0ysGmbUtpLj4ApO5ucjqasZ94fzwPZZ4j/i8MXmMdvp5mXxzq/7jeC3JQZ+vAfrc2rz5s+r1bSxBZRvYg4McJr036WzFbaiOqD0WgjktJ7d2Acn8oPYyIh8noX7q6iHns8HkKJoVmv1+HGIcezWCxYX61RqLn5O5H9J7/ByYcgSZJZyXQYBlRkAxbLUlUEqWhpNIt4gJ1paWmW4r0ly8S4OIkN2nZY7ymKnM1mQ9P2tO1UJgmNB69Q7qnkjoxBQAgKpSWzsi6M58fQlrAjeNGqN3GE1yPt6LDW03UDEHp7JsIjGYTznjjNMFHC8Xhmu9uLAxWa3f5IXYsyRJxIP2i326I0rNdLFguR9BF/SyX9sNCfO+73lKVIiRdFQT4FqqZBGUNeFPRDP2vI+YvrZTK2sXak73tWq9V8HS2XS7mxZPnFmpC/zRI/wdBkkgACninR5nnOfn+UDDOslWEQhy3npyxNQN2xFkWbOInF5DqJSeKIKJUWQpbK4AlPkHg6gw+Gz12HUoTyVADezjrauiEOjk8KHWz1VPAXMBgNeb6g7jvGtgE7Mg7Sh0OL9pp1ErCUIrA8JmzqRU/9abXObZ9p4PlyQPhr6/pye7VnNqXHE29xQutPiOXLbO21YOBeZIFzvcxFMJsP+LkyrBdDwGf7f5n1vSb9+yWjlIujkIAR0v1JoePlxFYgBIJsF1J0ymJRUCQibledzpzOJ7FRi2PyrCQtChZ5wXK5YGhrzueKjx/vw/RR0Ptt2wZsmAAcv/32G0CcewQ/lmFMzPX1AkCa74NAAa6urgKmSprcv/vd73j37h3/7b/9N5aLJY+PW4FBOEOSxVRNG6RtcspySVOL49SESZPvVtoE1jqslTs+/kmLPYoiiqIgTRM84j4+9APeabTqQyAzz2bwHoXpB4wTfuGkGdePI946YUsMI3Ny7wV/1HcDoIijhDFMcK9vbsgKcSk6Hk/cPzzy889/oVyU1I3QvYyJGWxL00nWuN09hqxzAd6ThnMXx7EMXOqGcRiCZE6Lz9xsbB0ZQ7lYkM+9UwlO4zgR0cXM2GhDWZZMjkpKqbmd0LYteV5Ihh7ksSeZHxGPFMf47777TgC2hwOPj4+z9Lb4FojY5aROY50liiMiG+GcpW5qRjvOfTITyQ0lzRJWrIQdEcccTweM0dhxkGNrag77HZMw5PF4DJ6s1/RdS2M0o3fUdUPvBDQexZFkg2FwNY4jfduQZtmMPRPu7YgdOtCiFh1pPQui2nGYKZBioPNcSXbaB+FR8Zyb+Srg/q8tM6c3vmz0X+7g8kCm5zwLHi/e52V5OckJ65ABikzMBTFdq8938m/cfq1vdkmZerl5L6BZrUUSKEk0i7Lk+nrNMk9RynPMEvpBVFIXqyVFvhCYQT/gPaRJRpyULBcLFMKfFBL5Ex5JdLDSAGAUhVQQtYdqrORYAlh2WgggpWRVNXz//ff85//8nymKgsfHRw6HYwg8KVHscZU0q4uiDPtvcQ7SVExmJ+rNOFjG0RHHQozvhx7j7DyoMUHGOUnEs7LrRwFBas0wOpq2l16KjkgSuRyHwTKEvhjh4nR2xFuL005ctelxXtH3FqPlXIxtRz8MFOWCuzd35EVOFEdoZeiHhmEY2W73nKqacrHm97//PYvVkrZref/+Pf3QUy5K4sgQx9Fcnp1OJ/Ce8+nE0HXPSrbpO++6jqauaTuh8kVJTN00ojzbNPMUUehrEVpHT2YmSgRH27YNrQQVuKg6aNotGIZhDogTHm21Ws1KGoIRFI+G0+nAzc0bmqaRwNL3AaT+xB8GYQYkyaSZJ703pWCzueJcV3RtSxwZyiIXxY+hpw2fpQi/21xdsVwsOIdzJDcAMXUh9DrzoiArsiADL9CQjx9+IUpi8rykLDOKNud4PtEOPW5yJL7YdCgrJ18CafLL5yD0qadVr5RHXUxsf62//nJ7dQAwNStflpmXEfMSo/IUKT5PIz9DlYR96qAjxrMy9emD/S3by4xuolQ8Hd/TZHX6+/TZJMPzjHZEjxrrZFGCB+UwRrFaLYhjMSrtuhYVCUL8+vqGPIkxWoLFL+8/4NwYpJZL2rahLMs5Oxom5Hvd4UP63w8dWZaSpUVQSL3n8fFR+lRFyc3NNYuA5paeS85uJ3LQmyC9fT6JzZpzjvP5zOF4oshL0jRD/AVEBVf6TWB0TFEsUeQ0x3va7szQ93jniGOZtPWDI0pahtFKBtFbxkHcymMdkcQpxkxN7kBTw4F3szeA10/nuh/EeQgUURTjXMs4WJbLJbd3t2R5gnNeoBplidKGuhEF1sViyf3jI7vjHqUVHz6+D5N0Jf0gBWXAiyVxQl1V9F2PwrMoS7KbaynVvMONFusJ4pBLojhCRREmEnltkdju55tV6zsWpciVa21mcOxqJYj+4+kYJrCSmUyvb5rmGbzidDrRdR1JIjaGSZLMZag06ge8t5RlTlGI5tn5fMZay2JR8vDwgLXSAnHO8vj4iDFGBAmyDKNg7HuGriNaLlktFtxebxjHkaZpWCwWLMuCNI5IIkNsNBqDiyKs7/BGY72jG3uqpmIMGZbHsVpLOa1jg1OeSEOswRsDxtAMT5JgRmm0eYJvaRRz8TXPA8KT/TQsFNWOyzX82vp+ub3qznQpe325g6lJ+cXJwjT5ezY6DoFCPw0KnjXzL1JHE7BJf8t2GWCfH7886jmYPQWyyw8Q5i8IFozQNxMKijaOMY0py1wa9ueaLCt4c3PN1eYm7EPzj//4T5xOJ/74x/+Jr79+w8PjJw6HI9fXG1SwQdO6J44S0iQnjor5WOK+JUmjoFuWXdz9faAt3aGU4sOHDwzDwNu379hsNux2O07nE8VSnNdlYCN9zNVyzXK5RquI86mee6Jax4jPZkocp3grOmz1UfTo0yQVY1wVgY5Q+6Pgv9yIcyPDCP1o5396EJmZJPg9EkoQrzTKSOmhkKZz3/bUtaD/u66nC7Q5ARPnrNZLyXBMLGYrWrO+2tC2Pf/wj//E3/9ffw8a1usVVX0mz8Vl3aCoz2c2G4Ei9L3Y1rVtEyZ4ovdvh4FhHBgZcVEkvqhxTJTENH03yzjPYoxWuCZ93+O95/b2lmEYw2TRslgsRJDx40eyImexkHbBpDS7XC7n4Dapz0wEdOcc+/0+AKQXMxRjWhPTtTwJhE77dM5xOByCdeEVSk3HJzeVSTQSYL1ez+X25GPQ933ADnYzv3f0woUerWUIXM89e9I0pchE5WWKDyLdZCBN6DpD17V0bcM4XS+z1NXT2r+UzJ5+99lwzzmZkv/K2n5t+yyYXRr9fmlnl6Xn5YHaKdjBLOMjIYL58bJc9d6j3FMvTX7/OlXhb9mmjE+CakhheZ3R4LyfkeppKvpcRZmzXC9IYoEkCKDSUZYFd3dfsb7a0PUdHz585PC4w46Wosxpmo7TqWKxWJFl4oe43x8QYcZS5F0CnEDKSpl+OS8lUhTFwgk0hraVRVTXNUVRhknpUcwu4oQ4jrl/+MhgC9J8ysoqurYjihLGwfLp8Ik0zYjjOChvyHhfK40xEU11kCZ46CHFiUwnHZq67jgeK+JYZG6GfhQcnnf040jbiXdjpCDRwut1Vgjm3jnh+noYAgj1eKo4VTXdIIq3Xduyubnh7s0tq6sFZRnUP6yi7TrqpsFZSNKMKDgaZUWGiQRSYiJDnMRsVmuGpqM6nWUx971AFJxjcJa6qtFenL+jENwAurbFW4eKDM3Q0YSyccKDDUOP1tHMyhC5bGZ1DOFYxsTBvCRJkqcAEaaaU1aUJMlsNydeCw3H43H+zler9bxGJqOUKbBqLabVZVnO/Nwsy8TYZhzZ7XaowN2bdNikl5fTti2nk/i2VlXFbrebnaPmtYKn6tpgcCKSTG4YSKKEm82GNE0wRov6SzdgnEEpTRpHuDRFG0NX9Tg9reknV7WpBz9h8OZ3vGQQePl//8r6/Df3zC7pD5cB6zLbmUrRiRM2fZmCa/FPqhc+4MumRz6HZPytJeVr22sf9mUm9qVidhp2iFmwkOqHQWy8BDAdNOLf3FEUK6I4Y3888rjd0TQdu/stZVlye3tLWS5xDk7HM6MdiGPD9fU1SmnGwVFVNcdjLfJgXqOUJy9EfyyKYor8qaRfLEqyLOd4FOeiJBE4x8ePn+bvoq4qPnz6Czd3V5TlktPxRNP0IQBJKVuWK5q65Z/+6Z95//4XjNH89//+v/LHP/4RbzvKckGsE/qupR3k4tNKgJ55kYde14DS0hB/akMo8BmREq07o8Rqru9a8I4oMkFdVgL1OAyifmBlmpUmGd9+8w1/93d/x+3tDcfTierckCQ5w9jz+PjIP//zn1iurhitZXOz4d03b/nhh+/J8hRtFCbS5EnG+59+4Xw8CwbQPWX9YirTM8QiSxQFQ2g7juLUpFt0EkOoPpRSs8oEqBk8nKYp5/OZtu1ml6XHx0dB8fM0QIuiSLiWQW57Wjtd17FcLgFmcOwE2ej7bs6cxDT6TNcJCHfKvsqynGWH3rx5Q5qmbLcyBBIc2l4s97KMIsmDYKaQ/b33/PGPf4fWmk+fPnH/8EAcgoubCeye0TvJkMIU3rmOuqnJ0jRAQWQAlGZp4IiOuHEEo+nRDFbkwJ45uk3/P/fXw2BwevT+i8iMv6Zv9irO7LLx/zIDewnNmL5gpZRw7KZpZqiF3UUwm7apFDXGoLzU1965L32Of9M29fIUzBpISgmmZg5y87n8PPrbccCEO4r3gqiuqpo8NjgnZd/V1RUaQzeIZtb+/6PszZosOc87v9+b+3L2Oqeqa+1u9IYdJEFR0syEbzxj2dKE9A2sS38N+d4R9vewxzH2hR2hGA8pjjQkRZEAKBAk0Gj0VnvVWXNfX1+8macPig2QyoiKBqpPZZ3qynzyef7Pf5mvyLMCQ9PZ3d0jzwuCIISzMzRNkmUJ3W6Hg8N9hsMRuqZTFBVRlBCsYvJcGTaqDdeKqsqUpUu3xLbVheO6HqPRiE5HjV8tMbPN03Qcl7KqSLOUOE7IsoIgiOh2es2TXsN1fEBrxrqU5WJJURRcXlyzt7uPa+sNlUTx6/QaLEsJnR3HxrZMleBU5pRFpqRdtUSTEs92Gq8ykGWBFJIqTymSGKREWCaaoaNJcCxTheZ2IckL4jRHovHg/n3u3r5NrauObjPMd7Fccnp2xlZRcO/effqDPsPRgKPbh+iGIC9Sijwji1IM3WA8HmMYBleXVwTBijJtcgmkocTZmlgXmbxWnVpeVBhSYjjW+vpoi5nj2CpFqVAUi1a32k4yLTZWFAWu11kve9qHvWw6fmU2oFMWBUnjSGvbyvlXNGNnGAQ4TRFsi6JpmkiUzCovcqI4xnPdNSzU8ti2t7cxTY1VsCCKAjqdDmVVEIRKNmfZJk+++pJev89iMWe1WtLpdnBdD7/r07MdMt0giCOSOKZuqCClhDAO0TUlf+r3evT6fbo9Xz0giowEdZ/vjCeEqXporRclzb3JxuJlPbFtTHBCynXIumg/2e4U1vSqPxAz28ySfN0Xae1Knlf4WpuCbpkmZVV+zaVC1bRXgOC6OLbYnJTrQlbJrxc0eePP9gLjNe+rLbyt97yolRmg2ChczQubc8q168ArTE9CVaJJA8MwsU1DuVygI4SBoZvUQnFpyrwiDkOyOMIxNWzDoSxVcG2ahiRJjW5ILNMgTdQmy3Zs0iSn0+nS6/YYDEa4tk8cJ5SlkgD1Bz5Jomx3yrKk13PWT/UoDOkPhlRlxTLPyVKVdGSaBpZpsFzlWKaNaTksVyuurq4pior+YICh66RpRJpk1HXF7q1tBGoV3/U6yiFFGiRphqBQ29COkmdZTkSaxSyXAksvKMioZQpahWmYdF2Lge/Q9x0cHQxZoktwbY1SmIDEam5ITdORQgPDpKghTFLCJMd0fW4f7jHo+pzPzijzFNtUXKuyyMnTlK3RFpPJNoeHR2xtbSniKJJopZxHkjhmej0lDmOGwyG9Xg/P90iThPlsxnyuAn5LWeF5Xba2Jzi2rSyOhCRJk3XyVl4WpGkCAtJMkZ77fdlks6nrqcV/LMvCanh8WUPSrcoSw1b5olVZQtMBdnwfx3FUmE0UgVCRfEVd49o2pmEyvZ4yHA3RdZ3hcEhZlWrTKmuCOGQVhbi+h+e6yrSzcUzpdruYpsbe/i7X06vm583XNt5KyO5zfn5O3sQCCt2kKGuqSMELnm6yNxkzXWhcZRlRlVHXkrqqiNIcwyww7AorLwjjWDUwTXJUx/PwdQMsm7KqqAxlrV6Wr+5XhT0q3qVAYGgGmlBGo9RqWyuFhhQVQmpIKlRYQVNHNoTo4oZo/VvlTK9j6bf6snbubW1K2i6tjEvVBX3trGK9AGi7upvyKNmW4T+gnaTZgqqi1BovNuPjxspXa9nHknXkVntIIUAqD7Y2CFVHYpk6ulD/sLpQhnumYWHqNqbhEIcBYbJCViWOYWANeirZO8+xdYUTdTo2NFLcolBSpaosmV3PKNKSupQ4lsvQ6+JYNgBBEFHmGY7bURhXGRBFCZ6nQmWSxgjQNJR9t6yrxn9KUuYZUZk33ShIdDTd5Ho24/MvviCKQ/Zu7XFxcU5VVPS6fYb9Id6d29SVZDjYwhAmvttDiowsF9SiopI6AsXa3x530RljaxlTMmIh0HHodQcMu0M8t4NtGtiiwhZgagLLcJstmRo7dc1AGCZZWbEMYxZRgl4l9Hybw7uH3NoZoouKKk1Yzae4bhekhmWY7Ozs4HldhqMxtuUga8iSnDgKSFPVQURhQLAMuLy85MsnT5T77J07jLcn9Pt9ev0eq0Ax+jVDEMQRYRKh6RpO16WQJZ2+Um5EcYRu6uRFTpZnoAls18YRGnUlKcqcvMwxMJCFclcxTRNN15XjhqbT63S5vLggWAX4HZ/VcoVpWWyNRqrYaTpRHGFqOqZt4Vi24mUVGdPrK9Is5fadO2iaxnK1wHYdZQogJJUskULd3EWR4zads67rLBcLPM9dJ3yp/AEdy3FB09nZ3SPNM0zH4dZgRBwlXE+v1b0iNLY7HnQ61EmGjqAsauI4pZYCdJswLRBGBnqEmxd0HZtup0OVZwRpShyuCJZLkBLPcagqobIiyoooTXEdR1k+6Rq23dSRqkCTJZphInVl8irrujFBYKOjedUYafXXNZqvHTNf99+v6oj4Ha1mux2DV2PoTT3aZvDv5vn/EC3lv4RrsonTbbKJbx5r3lBDBJZSBaBapk1dFkRBTl0WjV22iW072JZLZuQURUAexziOjaVrKg+hLKilKi79vt8Y8qkMRNd1UATZkqurq8ZlYcn+/j6u65IkKVJCf9BXTr21xDAUkHxycoKsJb6n/NHayD/Lsuj4PpomGn+zCsd1cTqK0hGsVmR5xs7ONlujIYapEUUhZyenGIbJw/uPONw/oixq5osFWVrgeS6mraMbDrXMCaMIU6/xXIOO72IbE1xT4FsGi+kcWUl8p0PHs3FtE8+28QxwtQpLV1tNXYBoxgwl/YKqzKmKFFkXDPo9Du7d596bb+N1e1xNr1ktV4z6Q4oKrq6v2N7e5/69e7w8PkMKjbOzc8IgwPNcur7P9eWUKArRNBj0B0gJ6fFLrq6uOD09bVKQhrz99tu8+dZbCCFYLOecnByT51lDbrUp64rr6TW7u7sNVUKB7a0kKQwjJpMJSZxRFOV6enkFbJtUtcJcx+PxevHUJnFZlsXV5dU69SyOYwzdoChLur3u+ve6KYc6PT1VeQKmipjLigKarjDPc0X4FUqLmacZeZ5SFiWDwaCRuC2IY2UY0HJIe4M+QRwRJcqLL8sLikottLa2tjh5+VLdG0Kj3+01VJOIOMko6goJxHGqJirPwbUMHKFGc1mVjet0Q4SVNYYQGJaJsG21dNF19e/VQCtSSnQpKXVd9SmGom+owqWtJzetoRW96sy+fk9/azH7fUXjZhf3iqfF18bUm7Knb/p+N9m+m0XsDy1odV3/zg/5Td9j870ByEqSxDGeY9Pp9Oh2/aZ1N9fv37GVDk40TGrXssjzgqqsKKoaYRiqKJgqVk5dQJKyLNagbVlWzcq8xnU9pWE0LTRNUNavsjEBFrM5RVU0bH7l7NACvXme8/LFC2azGZ1Oh62xTpWm9IY9vvP++xzs7aJpgk7Hb7AcpzFljIjiiLIqubi65smTp/huB2EIdnYndHsuuoQoWqGhpDyGZuHaJqPBgFF/yMvnL5hP5xRpznQ+w9BW9Dyfcc/D8mxAp6okeaESqZSu0KKsauargDjLGWyN2b/zBncePmK0c4vZckWWF+iGiSIJW/T7AwzTJIxjJGqb+/LliRojzR7z+ZwkTZriAyA4OjoCAdfX18xms4aSIPnNb37LZ5/9mqOjI95+520ePXq4dl397W9/w2AwYD6fc3p6SqfTQWkIX9nzJHFKWdRreo2UEl031jIxKSWmqbShEslytSRJE/KyQAKD4ZAsz8mKnKIslC29r0jMVpPYVNZVQzhWsjaaicdzXyU6qQdgsualDXt9xuMxnuMSpyoo2rCMRl2SrGVXaLrypwtWdLtdev0heZ4ThnPSNFUdaRiueXKWbWCgg9DI8oo0y6mKAr/jqIVCnlFaCoNUN5NqoCzLxDSU+Fw2hF8hVeanjpqYDENTZgXNNtnQoCwNhKzQhTJ/FLIx5pctp7Ud3DansFfHv7iYbXLCNkmnwJrhvPmamwVjs4jcLDKv+/+bBe33HW1HtnmezWPdhTXvteXOgSKSlnWBYSpKhGlaTeJRhbEuZBqO42HqKvDD1PUmi7H5+XUdSaVyBGzlbqo3Bd6yFPfGNE2EJlmu5gRBsAZvq6qk01XETc9T4G64WhHHEYvFgiiK1oBzlmXr1frB/j4PHjyg2+0SRIHKm6wqdCHodDocHR0Cas1e5jlBEHLn7m3euH9X+Yh9/lumpzMw4N3qLR48eAPL0tCE3mwJzQYM1+h4DlbjtjAcDJhPF1xeTFkuFgTLOaupyXa/x63JmK7vU0mdvCoRVUUlK5K8IEpLHL/H7TfucXT/If5wSF7XlBJM26EjVTxaUUuFIU2nXE0XFKXSahqmyfX1tVI86BrjyQRT19nenjCdTTk5O8V1XPb29iiKfL3p6/V6OI5Dmqb83d/9HXVd8ejRQ+7fv8+jR4/wPI/r62vOzk6J46SxME8xDZPhcMigP8S2ba6vZ1+T+LXRcUWhLIbCMFTxf/LVoqY1EjBNcy1Mb8myrusC6kGcJIlaqugaaZ6vg1Dsjkp7Oru4UMuhoiXjpnQ9X3HG4oTp/JrxeIzrugRBwGq1Yj5fKClbEzItdKUHNiyH2WxGEARrr7Xzy0v2d3eUQ0xWKA2uqSRiatupxloVL6hR16560Bs6lubQEWB6HpWsyFPFeYvjjLLOoKooigyBo4i0uoGkQheKuGsIHa3OMaiUXEqyxt/rjWXi+t7+lxazmwTZFjPbHBs3W+3Nz21+7aYH2uu+z83Xbxayf8mY+bpj87yt5rS9GFsLYbXMgO3JPo6lDPiiKKIqC+raUyAuKr1K0zTcTge/04G6RjctFQZbKXcBRbLVsCwf8NE0fZ2ktFq1+YmK1JgVau1elDCbXxOGK4TQWDQGfm0nljdEzq2trbXPlmmqm2y0pagMj7/8kqLIGE/GjLaG3HvjDSUYrmvyIudwf4/BoMdiuWI0GpOWKctggW4ZbO9O2DvYxfW9ZrusxkIFK7RQgdnInyyObt9GHB6xXKx48ewlz549Y3Z5zcXlFckywDIsNM2kriolVK9rhF4SZxnCcjh84x77t+9iuj5hWlBpFRg6jueTZjnT2YKiqnG9Di9eHnN+ccX+4RFxErNYzhkNt9jd3aUsCxazKZcXl5yenqqbKM+VqFooisv29vb6uqWxphkNRwRBQBTG/OxnP1sHBk8mEx49egspJdvbOywWC06Oz4jCmIP9Q/K8WEMTqpvWGo2m6tyKuiJYqRFxMpk03ZuCYdpM2nZ50MbR+b7PaqUUB3mW0ev2cE2X+eIY0zRVdyUr1Tk2RdKyTEqprMZbK+6qKAmiV0qCTqeDbdtkmSqktuvT6XSoaQKwGyuifl/x2sIwZDlfMB4NqOuSIIqpSonb6VBWyjo+yzKyJEPWFa5t47uO4pxVFYamvm+336UoFRG6LHOEVDbpGhqy1jA0msg7lMxNyCbTU3mjWZQYVM0yr6kfSGpZI9uYPF7tBdrjG4vZZtHZLAYtDnZTKbDJS9v8mq+JyKX8GmHu941/rzvfza/7naMZNdZr32847ybtpN3GCinVU1JK8qKgquuGkGpRlCXT2RxZl+iauiCLvKQs82a8sKhkjWWrYNnWgBHAshSHyDC09YVdlq1kLEZr1t1FkbFcLmgDFIoGH+n3+3ieh2EYzGbzNQly59YOw8FwHXwL0Ol0KfKC0xfHZEko0EAeAAAgAElEQVSCYehohoamawxHQ+WoISWuq4IuxpMt3rUsXNfj1q1bDHoDXMcFWWLbLlCQZQWGXmOZOoZu4rgOXd9HQ2CY9poepKGhlRJXMyhqwfVCEUHzPMfQdQzTJMlztvf32b19F3+4RVpLsqoiLwqysiSNY8I4JYySJvDLYL5YInSd4dYWCIHrediWQ5ZnLOZzTEPn7t07HL88JouVh79l21xcnBEEIb7v0ev1GoxUR4hXjjCz2ZxO10cIwWw24+nTpxRFwf379znYP+Ddd95jPl+wXC7pdnucnp42zPqyEcGvmhRzgyzNFbFV1zGEimpsGf+60biBNKNi++BvJ4WW1pGlGZaZMhwO2dvbw3Yc4iRhtZiti+DFxYVaMjS3QeCopDDbtPCacJWTk5O1VZTve+vuT0rJcDSkKArCOGC5XLJYrBQM4brc2tshTNQ1WTRyLj1XY3qRZ+RZigDKMidHkuXK1CAImoQ1ahzPRZYVugbCNHBsS7kQC6F4iLqmfOOkpCorldSl6c3GN0fXasxmumnv5Aq1ZJGyUr8/5TX0NVOL17pmbBaMb8OYNt1mW7ttz/PW4uibwQM3XTJvHn/IqPn7DnGjkN0chTffixJ8W2tmtaxqzs/PMU0Tz3GVX5muU1YVi9VK+cQP+krIXFXMV0vKPKOoShWvVlfkZY7WeOBHkdLSDYc6hmGSZSmu565BTMdRW88WmDUM5eDqOEoutVgsAKhKJZtRDwJBmmaswpBlEDAajTg8PGQymdDt9bk8P6MqSlzb4fL8gvlijhQ1t/Z2sGyDVRCQpJnCRXQDv+vjdTrNf3dwPQ8qiazAdX0EFVWVouk1aCa6aWPZLmiGSgfXdAZbWxSVMp8skwJL6lTCIIlzwiijrio0raaKczr9PjuHd7C7A9Iaak1x26LFgiiOydOMoqiZTHYQusF8ucLzfQ7HE8bjLdKsoNcbkGYZRZYRJRHhKkBWFaulcuet6rJ5KCllw2CgirgS3NfEcYzruvR6fabTa+q6Zj5fNOnmfaIo4uTklNOTc87PL7l161YzhqlurSgqHEdlZp6dnROswjVu1nZteZ6zWq1wG7lSm2heCYUD6YayyjEtizTLQKjioek6VeOO0el0cFxXMf3rxl24UW7YloXZ2EJdXV2haQqsz0vV6Z+cnnByckIcx5imWmbkubI1SjeIuYqrWKzxtbouse0Jju0gNH1d5EClONV1RbiMCIIC2aafo+RPeZaR5QmGZa6zSk3TVHrZhjOnCyW5qkWFqCVFpvz7dNNScIxU2Z1QK0oU6tmutQuBtaBTNg/QV8c3FrObxWD9BRsZAZuFoX1dS/QDvva6153r5mtunrf93Oaff9B5xTeL1Tc7S9M01xcfKJ5bkRfKS6oJmKgqtYnr+B79QY9utweyIksT0jSmygtaJ/RWCZGm6qZpaSuaJtYuDWrcluvOzLbtBmAWTatu0PE7ZJlSHbSJN6vVsgE/lW1zXhYUecHOzg5b4zG93gBZ11xenLM93mZvd5fPPvuUJ7MneB2PXneAbTlcXD7h+fOXeH6XnZ0d/E6PJM2YzuZ0u33uHNzGFDpVUdDrd+h0bDy7i2mCZapFVVELLKGjGRaOYaFrOrUUzKYLrk5nXFzNsQwFUOdZTl40DHrH5o3DOxy88RBpOIpf5rkIw0QKDcNycG2fVakK26DbxfN7LIOItMhZBSvSLOfqekbVOHPMZ3PFgYoiVvMFg0GfyfY2tm2yvb3DbDYlClUK0WDQbyzIM8IwaG5Ub603LEv1ux8Oh8oksaxJ04yzszNevjwGBI8evcnhwSHb2zsgJXt7V5yenDeYZogQglWwYrlcMugP2D84aArOJUIod13bsjBMk1rWFM1Wsn1gm4aB47osF0scT5Fju70uu7u7PH32bK0FtZpi1nZ5s9lMgfNdn36/T5orbmMYhiRJSlEWCF0pEvJS6TVbzK0ddVuXEcdxEJrAsZVJpWnoKkvCNKGuSMIYWZdowlirQxzHoa4qqsogiqKmI/SxTBPZKINMwyCVClM0DIO6wf0sw0QaqjgZutKP1mhQq3tnrUrXUB5rbRlrP98cr5Uz3Txu4mY3i8mmWqC1TLmZE7DpkdYWv9YfvcWv2tZ/s5t6XTF73SGEULrLr3FSfrcYthjGmlUtJWmarnE/z3aQElZBiBCgC0G342M7rnLFsG2C5YKyrBQ733YaeU6F7bhsbY9JG1Z++/QDQZKo0VAtACwcW/l0tZvJqqoIViuG/T6O7aBJiWtbSsRdKH/2oiiVZk3XcV2Pra0x+weHLIOQ6+mCd95+i/fe+4DxcEBZFOiGhWN73HvjPkdHt5XDBzovXpyS5Rk/+MEfMRyNOTk55Z8//ZSd7V2G3QG2bnJxfobf8djb2+HwYBdN14iTGMvUcR0DhInQlC1MLWscr8Odew/JUsky+BWzxQopaSx3KrYmY/74T/4177z/HrrtUqGDoVHWAsoa03aRoqBMcqoaptMFZxdX7B8dMhwO+e0Xn3NxdYnlOKyWAb7foSorkjQlS5QBo+04LJZLFsslo9GIhw8fsLt7i5OTEwCSWKUsHR0d0u12kFKyWCzI8xzfd5WvmBT0uoNm46xGxG63T5Z9hW07fP75F3z80a+YTCbcvn2b+/ce4tg+T58+ba4JF0/WzBYLhKGTl4V6YDVC9DbRnGarfXl9tVYi1EhWQYCh62sN7XQ2YxWs2N7bpdftEjaayiIv0CXs7e4x7PWaTIiA3kBpNLe3t+n1elxdXXF8fEJweUkYJWRZxmA0JEkS0vyVakERbAv6/S5ZE3Q83tpSxT+JyRrPuDBYoQtFuXAcEyGV27Ft2xRlieU4aJpY6089z1MZA830UZblWryfNhkKRmNLVVcVhm1R5c1EV6tJSxMgNB1hqCalKvPmpv/6zf6tndnNgrAuGjfGt81i125vNovHZqFqP7eJs7U4ktFY+26+pv0ebQG8qUz4nTGYm4Tdrx+tqV3bYWZtm9u8h6ghPNq2tZaQpFnOfL5E1zRc21QkRdtSqeSGwWw2ZbVagqbh+10s22pAWhspZcPzkc1mqoOUNCZ/XS4uLnjx4iWaJtjb3aNIc0y9XMvE9KpmsjXG2LUIo5g0zRUvLMuxbGsdYnx4dESWl3Rch+vZgrOTE84vr3nn3fc4un2I43o4rkOn0+Vgfw+EoNfrY+g6vV6fo8PbHOwfcuf2bSxdhVgIAb7fxTCUti/PZcO7cylK1YWWhXJw1TULx+sz2tlncrggKp5xdTUFYXB07x73HzxgcnCHWndIK5B1SSVr0CuKumQVNLZDZU0cplS1htBMEBpb4wnj+Yy8Kti5tcvp6RmL+bLJEKiwXQdLN8iznG63j+97nJ2d8eMf/xc+/PB7fPe73+Plyxecnp4CkidPnjAajdjdvdUA5MpVY7m8ZDQasVyuqOsKy3LWD5q2o5/P5zi2i23bPH/+gl/96p8Bwdtvv80777zT0F0uGA6H6+t1uVyuz7GJjxmGsTYSyLKMWkplEeX7zK6naLrGYDBguVrxm9/8hiRJsF1VdFtHlTiJ6Xoe3U6XCOXS4fs+Ovoa6mldhqNVqFQmcqCCjDUDKaGqZCPbqsjzEtO2iON4HdpSNl2eBmga3Dk6wNDVuBjFISenJ7iuR5EXdHsdDo8OlHSsGSmFpjVbUKUs2d0dK3+3olTZrbJGqxQdo6hqDKHoIBog9Vd6JtnyVrW2bP2eYnazOHwTcXaz42p/afBKQdAer8vh3Pxoj3ah0HZLrytaNwvna/GwGwuAm8cmwXdzA9v+HELo1FK5O1S18uNSHY2kqgqODvcxDBOEIEoSYilJshyjMcqLk4ggDAAYjSxGoyH9/ogojEjSlMuLa3q9HlWDz5VlxaNHb9Lp+CzmCxzDwrEsFkvF4n7rrTfpdvs8efack5NTFsuA0daIW3v77B8c8Oz5CwaDIVLqFGWp1t9pxnw6xfUc3nn3Aw4P93hx/JzT03OqUvLgwUM8z1djV3+AY/sYmonrKLxTd1y2d3ZUnJ7vMxj0gArDsDB0HYmFlBqi8aaKkoI8i4jjmLio6I23mRSSynAQQmfvjftMDm6TVhpn1wvlEiJU+IXQlSfcKlwhJAx7Izodk15vwGRnmyRPOL88R9cNLF0FkpSVAq+FbAwORI3pmbi+0gmenJziug6djs/Pf/5PnJ2d8yd/8sf4vs8//uPPMC2Dvb09zs8v1v51SZKwvX2LnZ0d1SU3wbymafLkyVdoa4dZRaxW1zPUtWQ2m/LRRx8jJZiWTm/Q4+DggJ2dHQC++OKLhrwar2GYdhoIgoDZbEae59y9c5dHDx4QLJdr+KNlEKhxMQFNwQyObWNqSm2Q+mph4LkuSaZGRU966xGzpY9AiKxlkwNhN9SRfN2AtPmf/WGPNM+IEkUMR0qkrEBomLpgOOgTRyFJmqqHUFWilwWaaark9DRVYT/NCOs1EXyapjMaKFJzEKxYLQPiNEM2GhwpVVBKXQt0XSj1iK6tA8VlrZQEhqaMAlrzz/b4gzqzthBtFpX2H/p1OX7fVHw2N4g3X9cWls3AlJsd3LfhZOtlwR9g7ri5mLhZ3IzG86mu1VNUWeGYyqOq2yVNczzPwW5CSNJGTEsjK5GojaLjuAgEs9kSlRquDPq63W7jAus0ILJK/07TgkF/SJWl+I7Nw/v3cX2Ps/MLvnzyhKvpFMd1+P6Dh9y+e5fjk1P+609/wu2juxwcHfFf/v4fyJIUWcF4uIXtmAjD5Oe/+CW/ffw58/mUKAzZPzxgf/+AJEkIVhFg0Ov0uXv7LlIq/Wdd1Yy3tuh4PqCkVlWpHCdc16UwoChqyqomTSviKCGKlJ40ShNKzaA7nlAbamS3uj1K3aSqanQ0PEvlIghZU1dK4F1VEs9xAUGcxAokXywp64KsKJACzs7Ouby6UuLwLMd1PEzTIFxFCBTdwtB1kDVnZ2cYhiLqfvHFF8RxzA9+8H3+5E/+lF//+lN+/etf8/DhQ1arFUGwWi+uoijCcRyWyyWGYdLvD5pO3qQqKy4vLjEMk6ura6Xfte119J8QGp2Ox/OXL/nsN7+h2+3y1ptvMhqNuP/gwTqJKU0Snj59yvX0GtdtshaShDAK+fiTjxn2B9RSGWuugoCioXmkaUqUJI0rLevlVTsmurYKjA7DENM2m+v8VYp6LyswLXPNbdPLuinYkk6no3AsJJV85XZhmDqGMKgrZYcta53lck5eZJimjuM6ZLkqhgrLrYlnCwbDnvo6KSkrqYJhshKJxHM7qlloeGR1DaVQPDKj0YEaQgMdLFQO69owQlOQlNbg1Jtcs2/szG4C7u2fr9ssbn7NzWJ18xwt2fbmVrQtZlLKjS7p66/5ti5x8318W2e22e5blholW9O6oijA1JBIdE2l93R85e2/NRoyGgxYzqdNARcsVytWiwW6rmLl8rzEsm1GwzGGafLs6TNevHiJ49gcHBxydHRHORCUNUVeYpoWtqX4PZUm6Wz1+PBf/SnUJb/4xS/45Ef/TC1VpuVoOOLO3TfodPt89PEnnJyd8ejhW4wn2/yH//B/4nodyqLEsVyVcmT76KbDfBkSxjF1XbEMIqzLa8JVwPn5BUEQsLOzy+2jO7iOSxCsmC8XdPt9ZV1jWetRoPW01w2bNK2awI+CPMtJ0pIsEwjNISsT5mFMluXktcSyHNIa4rKm43aopSAtlINtXVWUeUae5lBKNASrZUAa51i2yZePnyAMiNKQ69kVF1eXnJwcMxpv4dguRVXieC7LZUAQhnS7PYQQ7O7u4rouz549QwiNt99+mxcvXvCjH/2Yv/qrv+TP//wv+Kd/+jkXFxccHysu12KxZGdnm8lkst5eep6PruscHBzw/PkLut0+IMjzEt+3G7xVCbFtW419tZT4HR/HVaPdbDHn2YvnSCmb7IcDhBDsHx5wcHTY2GPH6kGq62qZkhekcUKYxKxWKzRNY3xrm7KqCK+u8H2fu3fvMuoPuL66Io1itra28FyX+WKmgoWbMTDL8rUJo5SSLM1wm3T0vKjWYvkWajEbqyRg7eOPELiOQ8d30YHVMsC2leNvpyuJ04wkyVgsA+bLheoqgwgqqGQJUhBFIa7tMpHbXF5P126yum4pG/a6QtaCyhJIoZZxmhSUssKUjaGFoaMhQFZoUlGB/iDM7GbRujkqvm4cvYmVbRab9mOzmLWjZVvV24/NxUF7rrYLvPl3/9Jjs5i1hbN9D+pDkfJ03VB6TMdZP5WjRlBeFCWWrjejfNOhNj9PFEZcXFxi2za6rnPnzh0Gg4HKgSwU69v3bSVWLgpGoxHvvvu+8orPU/7h7/+B87NjVqsVUtZMtnd48OARo60JQRLx8cefkhcF/92f/fdMZ3P+8w9/yHQ6o1NUgEaSFExnS27tbDOZbLF3axvXszBNhQO+/eabaEIQRylFpnR0utA42NtH0w6YLxdopsFkMsGyLMIgJIpiqkoCOnWlUVc1ZVkpUFZY1FVOmlWUdUmUlqwSFU1n6CoII0djEcbqNUWJaRh4jg1VQV0XiLqCGvIkJ45TkjinDmqWqzlpkXF+fYbpmPheB9O2GtMAFfHW8btYts1qGXBxfsmg3+f9d9/h8PAQgNPTEx4/foym6WxtbfFP//QL+v0uh4eHlJUSYQ+HQ5W2JbQ1ljpoxqHFYsFisWS5XGIaltoqPn1OGwziecoRWNd0pIBev4ftWhSlsgAaj8ccHBxwfHzMl19+ybNnz5SrretSVdV69NzZ2WG8tYVj2+weHHF1eYkTBgpfa2LsbMtSm8o0JYpjtgZDleyl3AXW90lr2tjeYyr5SdkZzedzaIxTpdCbJkKN2VmWYZgmpmNiuy6e760xvo7r0+/46IYBskbThdKT1hpuljPTA5arkCzL12oIz3NxXJeqrLAtF6/TQTdtsmyJ73cYDIdrM8skSaipVXqXpSGkMvus6gIKRajWtSa/FtBhbRDxjcXsdZ3ZzaK1WWBuFrLXZQNsFq511d/optq/a3GCtqi10fbwynpoE5MTGx+0xU2+4qC87midbNd0jCa2S713TT0Za+VKUTWMbFlV5FlKGAZoSCzDxHdddm7t0uv2mc9nxLHCCQbDIVaTjzgcbSFQCdez2YIwjOl1e0wmY46O7rC7uwsILi4u+Oyz37BcTDl5/hVd12I8UZKUg8Pb2I7L8xfPCZOY73z3O5iWzbMXxzx9+hVpmrA92SZp3G/DOGS8td243xa8PDnBcSxu7Uy4d+8Bvf6QMFhxdHSbd956V22WmvDeJEnIi5Iyz7i6ulKdmGZi2g4ir9CESVEoykm76atlTRilrFYJpSyRQsfvDzDcHNOw6fWGlEVNkhekaU5VVPiOi5A1VZ4hqHEs5e+1XCxYzAOm0zmz+Rx0ieNaxGnC7rDPeHtMXubM5tNmG6iukU6nQ5akxElMEKxAVvzFX/wF2zs7/OQnP+H6+oosSwmjCMu2KGcFlm3z4P5DyqLi6bMnaJrOyckp19fXdHs9dm/t0u116Q+GGIbFoK8K3uHhEUEQNdCCpvBTZKMPVdFufscjiFYkScpyuWA0ustgMKDf63F1fY0QNNkPCUma4NhqrC2LAl3TWUxnzK6nCF0jiiKCMGS6nCs801Sj7aeffspqNuf+/ftIKXny1RN812O4NcBxHIIoWBtDtlNI3jyQ22xY11OBK21wTpZlSKAoKwauS6/bpS4LkigitVQGgybAtR3CKFaBK4Vc05HSNEXmOa1/v+d3GY3Gja9Zzny5Yhn8lr2dW3S6SgpomBaz2ZwsXyncTtYMeh2qIqHIcqq8oKgLZCWU9bqhksT0GgSy4Z99QzF7VYC+8W9QrZ149Rr5aqvQ6hDrpsvRhNZEVr2yJ67bgiPl+mtbPpbrucjWLrgsFZ+o8V6SqIIj4JWh25qG0hbQekPPtflu1aHpCjLUNMXEK6qcUpaYuvIwE7JS7G0BZZER1yWmocwEdakz3Brh2A5lDmGRkWcVhu7SG4/o9TvcuXubvMwJViGz+Zyr8yviNOXWzi5/9IP3Odg/IC9y5tMpv/z4I8IgJC8KDF3DcV329vexTeUeYJg2s/mcIHzJaDLhu9/5DhKNzx9/yReff0YNHOzdYjqdMhmrEWg+W7G7O8a2NU5Pzri6usRxLJ4/93j2fIe9vT2klPiuR6c7pJQai/mSMC0oq5K8yBrxdU63m+F5PkEQE0UJqzCh4/vUlSKetgLjLE0pyhLDNul6Hn3nVXaiaVikcUpdqvzFXMtAVBRFTRKHlHmCrkESR1xfXrFYLBFCR2/GfXVN6SwXK/YPDnl47xH/+POfI2qlla3Kgp2dW1imztnpGY7ncXp5wd//5Cf82Z/9Gf/DX/w5P/zhD5nPp01eZILtObw8fslisaDf79Pt9KmlxHGUa0QQR8yXAWgGlmUy2NpSzg3XgiiOWAUret1eY1pp0R/0FGWgqinyhDgusEydPK2ZTa+oipzZfE5V5jiOxd6tW4RxohQUhoFj2XT8Dp7rrjFfz3Pxu13VuZQFYRqrEBvHodvtMZlMME2Ty4sLsjhhMOijoXF9eUm332O5XFBVKrbOsW1sx0HLMpV/UFUga0xdhdWUVbUe+yUSKiWXsgwTqQlSBMtF0OBTNbs7txDor9LDqvJrma6appQqea5MNfNMJUx1fG/dNdayVIqIuiArYtIsBDSK0kLTB9S1iRQlRQ2ylKBJZb1e1JgGqJhkDbkBmul/8zd/87VS9b/9r//L37QkT9WpbHzoGoaho2sq4FMVkleMXE2ozkdvdFcgMRqPdkNXpMT1uTUN2YSatrHyeZFRFDlCU8EWo9GQ4XCA53sq31AT+J6r9I9NARMovzytGfVANqQ6JX9Q70IiUJ5ppmVS0epvlDFkUalsQt93qfIc17FwHZu6qkiSlLqoMXQL07CoSui4PUzNIQoTDMPmzUfv8N3vfp833rjHcrXkcnpFGMSMtsZ88J3v8Z3vfI/x1jZRmvLZZ7/l8y8e8+Srrzi/vEDKGq/jY5g6SRqhAX6nw3Q6p64lvf6A4XDIG/fuYRg6H3/yMVdXFwTBCk3WDPo9tsYjXMckDgIm4yGCmrJIuZ5eEoZLpRW1bM4vL3jy9ClnF5dkecXpxSVRlhMXBWGaollqSykr1J9SkKY5SZqr9CNdJwhCpvMZSZqof88mhNYwDSU2tm1sS21kfcfFsx2VFoSkyFN0Ab5j4do6QlaE4Yr5fKbUA1WF0EDoEk1XLHnbbjMHVCzdaDhGoLFYLOl2OhRFxs6tMbfvHHJ1fUGWl2RFxcuTU4ZbIw4ODxhuDXFcG6Ept1SVgpUTRiEvj08oyorvfu9DPL9DFCf0BwOKSrlEnJ6d8+XjL9EMg52dCUmqFjaDYR+omc+nCFHR63WwbIM0DSnKFN0AXRfcfeM2QlbM5zO2xxOkrEniGFPX8R0VD6hrOr1uj47rU+YZs+mU1XKB53sqh8FUwcmr1ZKsyFVnomtYhtrKOraFYehUZcF4PMHzXFbBkjzL8D0f13GJopDlaomyDKzVOTQNXRPomiBLVdxeVZWYlsH+3i3qPMc2TTqex2q5asjjPZ49f4lsin9/MMCxbWokhqFhWCZSGgjNaB6KyXoRgQChK2+2osxJkpAoDqmqHClKDEM90JM4ochr8lxlS1S1xNBMhG5QycaEQKr7uZSCf/8//k//M/yeQJNvGyk3x8kWewK+tpFssa0WvG9TnV93CKGAvyRJlBZttcJuMIlut8tkMsFxHJIw+lpbm2fZK3dcqQStUIEUqKgzgdAU3iOE0lxmeYGsBY6mY7s+uqEcIrI8xTR1oiAgqMFzPXqdTsN01qnKms6wQ1FUTMZ9vvfhh/iex/X0ii8ff4XjWZiOxu3bdzANm8Viycef/Iqrqyt8v8/+3j5FqTRnXqeDYeh0uz6+71KUBbVUFkRVWbJ/cIDneRRlyZ27d6mBn/70pwTN1nA07FMUJZPJiNHWFr/+9a+5fXsfgc7Z2TlJmqEJCUJy5+5tEBqLp0vGk21s2+V6PiM6PsZw3GaDW1HUtdLIlcr3P0nVk1w3dEzTUoL4okA0I7/jOuvAWqE1pGUNaiqkrNENHdtUwmdDSKgLwiAgSwp0x0ElxAvyIidJEzQBlq3slapKNlY0QmV2ZgXTqwV7u4e8/+4HzKdzNM3g9u27/OhHf8e/+Tf/in/3Z/+O//3/+I+8PD1lNNjio48/Jk5C/vRP/xjHsxheD5jOpk0gjeTk9ASBhmU6pEnKrVt7vDw+odPrYzkuL1++5Pr6Gqi5/sWUZ89GPHr0iDfffJPp9TVlkTEcKt3s1tYWhmGQJCG1LFmtFpiORa/jc2t7m4uLS2azKcvFcp1DuowCwiDGMHS2mgc3VATLBUEYcHZ2hu06TVG3FUs/S9cTjfp95OuNpmVb1I12set3FL0iiqgr2cTl5UTBisFoq4muixnLMX63g6ap7ahp65i2IsOuVgFPn37Fm/cecOvWLa4uVSDOwcHhGi5qrwfHsckym0pogEaWl8SxpCwzciEwTR2QCE1SFTnLlY6hDRiNRuiG4OL8jMV8TprG9Pw+Ah1NlwjNoK5K0qIizQvqqsCxdCzTQEoT3XhF8n9tMWvpEJv41CZrvy0+vxMADOtCtpmEvkmY3ZRD3cTVpFDynrr5ZUmpQN40TRWJUNM42N1jMBisfcbyLGOxWDCdTlkFAT2rR9kY0anQ0oqqKpT1b62eJo5TK48ulVCiVspVRiGgY7v43V6TgGStg3Jtx2HQH3D77h3ef/99er0exy+es7pccnR4gNAEXz75nIvZlNVqznIRUhTlen2vCRX+6jgOvX4X0zSQsqIocqJYYTBFnjPZ2eFo/4BgFWAYJu+89z5Pnz3j8vKS0WiMblqYpq02TK7L9vY25+fnjMYTyrJk0BtweHibz7/4gtl8jucrT7arqfL1evDgAZZl86Z10NwAACAASURBVNlvfotEicejOCKMIoosY2u4Ra/bw2q2vK6rfNRs21aGepGF5TrrxPM2JWj94JIVRZVTldWaNyiQFEJZpFdFQRCsuMhSqrLlOFXouhrtJTVlrkwty0oipcDQTWQtmV5f8+WXT3j3vXcZjcd89ewJfrfD3bsP+PyLr9ia3OLP//zf8//+P/+JIAh5+vQrLi7OKMucN996yGg04t/+2/+W50+f8dFHH7G1tYWswfe6fPH4MUdZzl//9V/zxePH/OKjXyKl5OjokCgKOT87Yzab8fjxYzzH5e7dO/R7nUZepAJNjCaAOE5CTNNmNlswny158PAh77/3AZ9++imXl1eAWGtEDdPEti3CaEVepCpguqqoqlp5zjUYb9HQJVqCrdFs0NvroCzLxho9XWeq6qbCQQ3D4NatW+RlwfPnz4nTrFkQqAdHex+bpglChZSkWabMQx0H27YZjUZEYch8Pl8v8XTTbDamGdPlQmWBVpI8kySpcilRNk06oMbRNK1xLEXKFbVy5+10PTU2tpkJQqAJDU0YDaZrYGhKF1qXCoOupVDvVXxLMXudgeImt+xmx3Uzxal1Q93Um7XnbWkQr+v62mKWpqlyHWiY+m0hbFNszs/P1wQ/IQRew9O5e/cu/cFAORpUismc5Wmzmk4py5q6LkmSrEmpEViWge91cD2VG6kDWRQzafyg0jSn43Y4OryD43ospjP2dvc5Pjlm+dmC5WLOYjHnh3/3nzB0gd/1GY56+H6Hfm/UWK+EjXOCpgzxej1cT2064zhiNrtWLOi6wjIUF+vk5JS33nyT/mDAL36pbqowClksl2t/q7KqODw85Ho6ZTqdsndwwGw+Q2h6Y9Q4JklTDg4PMS2Li8srvv/hh/R7PY6PT5jPrun2BhRFznx2TZEXVHlBlmREvZB+r7/OgmwfUFmmglbaUI7Nv1vbKmkC07DQtQqkpCxKsjRmuZgzn08JgxWr5ZLVck5ZZOiG+l07tk232yFLExbzGQKVKq8hsEyTXtegqmqOj1/SG/R5+PBNTs5OOT4+5b333+NHP/4x/99//nv+6i//kvff/4Cf/fRnYKrtn5Twycef8O677xBHIXt7e1xfX/PZZ58hazUNaJpOkqS8ePGC8XjM/v4+q9WKuq4bHliMLmB7e5vZ9ZRP//lTHtx/g7t37xCGIVdXV40Ros6dO3dZLpdcXszo9bokcYa3pWx+xuPx2kPMsiz293fpdLoslwvyPKWqShDQ7/VwPBeha0xnU5ZhqDpiU6lkOr6P7/vrezJNUkVoliqjMwxX6LrOYDCgKtW2Mi9VBN7F1TUA3W63UXo0BbGoWQULtEIlN93eP+D27dt8+dsvlGbTtnn58iWu67K1tcVWv49pW8wXC8RKRSjahkESRyjYSaIbGrqmiLBSKuzbGwxwPVsxAlAQUb/fV7brtovMlfaylUBpmoZuWhgC6tIgzytKBFlZUtbfQs242TFtfm5zpGy3kJsUjJZv9SpAQfva339boWyLmcqNbOTVzThrWRaGp35xcRCu30dd18zTlOl0yuPHj5Eopwe18XHwfA/f7zAcjvFcH7MpXkIDXVfWJK7ro+uCOE6IgoBhv0+R5QyHQw4PbhMsA37603/kiy++QEOQpim9XpdBv6swAl2jN+ziOha6rvg0YRigaSa2pRJ9Op0OtqU0mq0MJU0j8lw5HHi+p0bwhih59/AI23X4519/uhb/RokKOHE9jyzPmUwmaJrGxeUFO3u7FGWhKCC2w7MXLyiKgvF4zO7eLtPZjMl4i/v33+B6OmOxnAE1hwd7hKsFZyfHyrQwiZldXXHteMonbTRST/wmMk0JsdXTst08twTjoigbuYq27tTKsiSJQxbzGdPpNcvlrHGuKLFtE8sUDXicI20Dr+NimzpZHKMJJV6XQlEgOraDYdqcnF3wy19+jO24fPi9H/Af/+//i/H2Lv/qX/83/OhHP+KXH/2KD957nxcvXvDi+VOOj19yfn7CwcEeURiwvT3h+vKS73//+7iuy/XVlBfPj0mTkLqSfPLJJximiW4a3Lt3jySJubg4B1jnjXqehzsaEUUR0+mM+/fvsb+/z+npKavVihcvTtb0hPl8iaaZit5TSSxLb1xVJL7vYpg6g0EXXQffd8hSNXa6rqusjlyHLM8JGzWC1NR9ZDQefO30opYSXVxX5XOOx2NqJLPZjCKvGI1GkMTM5/OvkcbzPMfr+Ozu7uJ7IatgwXA4pKpKZrMZw6GyC5pOp7z7zjsAXF1drelNuq74mI7jkGYZeSXpdjuswpg0LsiSnAyFr9uWgW6YJFHEeDSg1+uqJscwsUwL6oo8KzA1Q6VXNXQRTdNwHNnICwVGK+UqG5PIbypm61y7G9wGKRUztw3JXaeWS9kY7ym7jk0CbFusWslQm13Ynu93Clpj7lY352y7PpW0rL7Otu31/F+WpWpJm6JZV7BYLJsWVIXbCiHQGsoFgrWnu2ValFVJXdXrEFXfc/nggw8YDQecXVzy4x//V54/fUFZKSM6xeFR55FCYtomtmVgWwZCVwRBw1RdhmW6qPg4Rcz1fB/bUjrAVbBqHBYktm3huR5xEpPGEQ/u3Wc82uLjjz9RbqMNNtjr9nBcV3U+psnW1pjjk2Mmk+0mRCOi0+mS5wVZnhGsVnz/+3+EZVucnJ7yve99FxAEqyWGrjEcDBiNBjx+/Jg0jXAcC4EK9VU6RbWtVq4eaumiCrFyYrBtZStT16/yHvK8UBYumgaN/1UQLlksZiwWM6I4UCJk08A0LJA6VaWrboSaxWKKitRuAjsQFGVFWVVAa3CguvNPPvkV3/3wQ7r9AfNlwHvf+ZD33v8ux8fHvDh+yQcffIBAslhMubq6YD6fKVDdc9geT/jbv/1b3n//fd57732+/OIriqLko48+4dNPP8UwTSzH5t133+Xw8EhBAZHKr1wulmyNRrx8+ZKD/V263S6LxQIp/3/S3uzJjvvK8/vkvtx9qx07AYIgRZGSKLXlkKJ7HJamHzx+cMS8zIPD/0n7H/ASXv4Iz8uEwxE9L+NuWd1Ud0sUN5DEDlSh9rr7zZv74oeTmSiApBRh34gSwBKq7q26med3zvluBb1ej1ZLRN8vXx7iOi2SVKLWPv30M1alZVOnJCUPBn1mswm2bWCahhgaJrLPC0th98ixabhuPVL6YVCOYq9kTZ63JIoiMsetMzW9tXRyFe+smqIsy6KtqMxmczzPE5tr26Lf6+PYDQ6PDggCQUePj49ZLpY4jiNqhOWSbrdb37/z+Zx14NfW4/PZjOV6jWW1JU0qTeXe0wQwVFVhIlTKjMoqSVMKSbfSDZRcSMlJkkisXili1y0Tx3JAU6TLjGSVEad/ophRRkGpSDGox8G87MzyvOSRiE6qKNOiQUGptGKXOrPLpNjverzG9FfkhaK8zuAvyk6tMiysPqo3quamJSLQLnL11QhcKvZT5Pk1NIq0IC2EwDraHnHjxg329vZod1p8883XPHnyhLPTC5bLJZoi8LWmix5stDHCsW0UNSOMAzTdptNoYFo6cRgR+iFhWJAmRZlBqdZvYpVonZavvd4vFnKRNV0Hw7R4eXyMompMJlMUVWUwlKDXi/EFw9Kt4eT8jKyA3a1tHj9+TKfTIYqTMsBDXFYHwx5Pnj6j0ZCT9+HDh5ydnuAtJYtgtZgThwGdVpNeuyXmiUFMEMSl4Z4sbvM8Yb1e0W636yQs27JBgTRJySpOXpzIaKiokmEZBoSRfERRSJ7moEGhq2KbXCilr5pCnqcslwFKBkVSEMcpKCpRklIoAWkml6ZhGAwHA549f85wY5Of/Pin/Ke//zs6vS/58Y9+wuHLI/b3X2K9ZfDuu/c4Oztld3cHz5tzenLCgwcPKG7n7O7u8uDBA1ZLj3fuvstsNufFi30UVfbCqqHzySefsLOzzbvv3sM0DfafP6PZbBIEAc1Wk/l8zosXz7l16xa6rvPy5SG+H9Bwm2yMtsog3zXtTptOp0OSxKxWS4LAJ44jdvd2CJ54pYtwUvL35NDO0owwimhHkYBWkYzkVR5nJSLv9nokZRhxFEVomsZoNOLLLz8HVeHmzZvEUSqHVhzJisBI8TwPRdEE6AkC1v4ay5TxMUrF72xzMJT7LM3p9/slo0DE71EU1fZFVdGpCO1h5JPE4hRrlIVLUUBXNXRdZWtjs0yikrR7yV4F13Zxeg6np+fkeYFhmaBJ3muz1RF5napQoJKjo2eQ5n9iZ1YXmVfV5ltjZvWiL3dP1Ye4cArZtcp7vKyvvLx3e7OgUe7c4iQhjqJaAKvrOoYmqFqRvkqFumz2GMcxfhDiui2ha1RWRoXIMarns0rqQKPZZGM0Ymt7G8Mw+Oabb3jy5AkolXVRKd9RJH5e1VSJ74pCvPUSQ1cZDLp0e10aDUc6QVWBvCDPiksk36K0fREGUZoKv6bRaKBpKmkaY6gazVaTpuOwnC9ot9ocHR4RJ0ktfNZ1HdOyGAwGxGnK6ekZN25cZ7lckecFumGyWEhOwGw24/3332e9XnP/y/t8+KMP+frrr3n65AkK0Om0SlVDTKfVpNNu1e+6Y73KGICMIPRILyKWyxmO68hIYJqCnOUFcRTVJOc4inAs+TeSFBRJt6pLWrthqLK/VHM67S4KOYulFD1FEXDIME1SJRUOU15AId1imkRkhYK3XoIC7XaLhw8f0O33efe993j65Ck3rt3k6tWr/Obv/hORv+av/uqX9Hpd7n/1Baenp3WH8eLFC27evMkPfvADHj54yD/97l/4q7/6V/zrX/9r/p9/+C3T2azUDOYcH5+wtydJ66aucXJyQq/XY7lcYJoiGq/8/lerFWEY0usOOD8fo6oab711m+VywfPnT5nPZ6UNlAAdrVarDO3VWS5Lt1fbJdVyHMdBz9JSJSAAkau7NaO/ShpvNZvEnQ6r1YokiusDs9/vk+YZ89kcUNnZ3SVOxPBTUSoPMqGGhFEkIFJPdsWxJ53W7Rs3GY/H7D95Rq/Xg6Lg4uKilv8Jeq3U4EGr1cJuNMhzIfuGQUiSRlAUGIYm1B1Tr/3+dF19jdgrnNICy3bIUTHTTEwdS9AwB6I4wrQcCk2nEP7O9xcz7U2XizcKlqaqNTerKhYVC19sP14hlpf1lJepG28WssvdX3XiVIWv8h2j/P+SMAJemUBWCKuiKBi6QVlPoBBfccM0scqZvkLzNjY2UFWVZ8+e8Q+//S3L1ZJup8toOMDzPfFYS8tcA+R7a0pBHKtl+o7w0AxTI88S8sJGQdwIHNvm7OyCOMoYDIZlUEkZRVYm+Sync5bLjE6nRafTZjjqE0URX375Bd1Wl9VqTa8/YDabMRptEMaC2O7s7HH9+k3+j3//73n/gw+wLJcHDx+XLqc54+mMte8TRhHXrl/n008/ZXd3m4br8Nmnf6zdEgxDo91u4nkee3tSzMfjMYqi4gcRjmMzn+c0XYlYi0OfYL3i4kwcTsX9VBNZU5aW14aAACtVgzwXQz9ViK9Oty3/PlfQlAwUuY6yPKszGCi5f6Zp0LDFP265WOKHIUmWk+YrUFR0XSVKJCgjzwpmsxk/+clPiIOIgxcvuHfvLof7z3ny5BHXr1/h17/+FUkScXEu0P9wOMDUDV6+fEmn0+HmzVuEQcIffv97Vqs1H330EdPZjM+//KJ0N8n45ptvGI8vuLK7Q7/f59mTp3ID5hmTybSWriVJzGw2o+G2a/H3b35zgKJAr98pd40SeGOFFuPxuOzGtDr52w8C4iTDbTQwbZOVJ6j4YDAQAM2SLmgwHBLH8nwVUNPvdul2u4zH4zrpSVEU4jIFXS0NScO1X1OwqnXQfD5ntVwTpyE5Oa1mg7OzM9brNYC45tq2FFXXLUe9CMMy6fV6DDY3WC6XjGczZtMFhqGhYKGohVj6lK4zvp8ShxGtdpONwZBuu0O3K5bmaZIQxSnj6ZS1HxCHEc1OmzjwCWM5zM7HF1y7doNeu4Nheq9tw77dmdWs/Et/f8WLlcJ1qTAVeYGivHJuTbNM/LovFcE3C9dlUOA1/WfZEWhlnmXVvhaFiJBlwazVttwARlnsbNtGVXTJ8yvke1umRavVoj/o0+/1aTQbHB8d8+jhQ05OT6EoGI5GbG9t4fs+R0eHtNstFKUkbdaSq7Ko6jqddksSnIsC0zCx7SozIEYpC26v18M03PqCqSxfZtMZdYSXLjsoufgmHBzsUygKWZ4zmczo9Xpcu34dPwyYz+cYhsHO7i6/+e1vufP222xtb/Hxxx9zenrKnTt3ePjkMZPJBKUo+Ou//mum0ykvX77kww8+4JNP/sBsNmNnZwuAyXjM9tYWrmtj6iphuGZ3ewNFUUmynCwt0DWl7LgidFUOOVM3UVSFNAlIIrEwVhXxulcUyElFYlIuuBVFLJSyNKYoVLI8IYpCGU9TQZun06kUPNsWJw9tTdNtEwaRFOYkKqk64o9lWQ69/haqZpDlCsF6xeHBPnfeusXjx084PTnho48+Yjab8ODBA+7du8v77/+Adtvl66+/RlUgDkNevHjB/fv3eevWbe7evcvR4RFff/0V97+6z4c/+hG/+MUv+OKLL3j8+DFXr15FVRROTk64ceMG7Xab+WzGaHPIvXv3iOOIp0+fsl6vcV2X+/e/YGtrm8GgTxyLy6sgv0I6f++9dzk+Ocbzlpyfn6LrlTuzUqLuObqm4Tqu7EMDn4vpFD/widOUa9eu0Ww0SYKQMIpo2E7t3uH7PsvlgiAIxOml2SLSY9RAkFBd11mtfRaLBVtbO7TabY5PjuXAd2wuDs/o9sXvzCr3xHkkBqpqOeEEQcDe3h6tdpv5ciEgQxSiKEJ2vzg/L9U8UuQ1qsmoQFFU+t1OLU0UZHch3b4uxFjDtDCyvJTXJSiFkJ0VTVLCvLUHCiSJuDF/fzHLXxWzogQDlApdRGRCerlYzwpIyaEAQxXUkUSWwG9yyb7PjeM1i6DyT9d10VS1btsVRUFTtdJZQK/Rsuo1iv1vQJLmDNp9ev0um5ubpUGejAH7z58xm81qF4KGbaJpOoG3wlvMJSBkc5M4XlNkAliYmiZqBlXHsWxc16HTbuOtFrIP8iNiJ4JcK5Ednf0XcuLv7nQxTQvfF0QmjrJ6UWxaOrdv32Zvb4/pdMLx8VHdiZ5fXGBoRukHr5Jmst/4z37+c+5/9RXL1ZKf/cXP+L//7u/4+OOP+eDHP+Lpi+fs7+9z584dskSQ2I8//pjhcFgrLMLQJ0kiobJ027iOiW3ZzGYziiylyDNUDXSlIElD2k2nvg4M05AdSZLIiJHlkvJdgGGaGGUnnmUpmqrVPKGCQmyiLQNVU8lzlSLPWPtrFosFmqaQZbl0X2kmUrfMZ7mU31kVqCuAQmXMB021iWkoRHFGmmacnRzy4x//FH9ni+l4zPsf/IAf/+jHfPrpHzg7O6fZdImiiPd/8AM+/vgfCP2A27dvM59LUEnDbbG9s4MfhMzmc373u9/R7ff41a9+xe3btzk6ekkQ+CwXMjJ98MEHPHv2lNViztnZGWEZNVi5sLQ7LeIkZLVakBcpcRxycXEuYnHL4vPPP5eovl6XnZ09bNsuaTozlosVzWYLRVGIoog0EzldNaEYZaydYRoopZxIK8G349MTlqsF29vbeN6qXvXomsFyueTw+IiV56Fqcv/EcUwYBHVYTpqk2KXDraZr9TUpPm5SzFxXkHdBUFes1mKBbTs2k+mUF/v72OVyv+Kj6Zcs6jVNo2HLaGmWRbwoCpRCiOuarqOoKs12h/U6IM0LVEW4ZfP5XHSnywW6ppBnOeolcea35Ez/y//4P/zNmzuyNwvSmx5g1Qxu2lbpU5R/Z2f2XY837XsKCpqt1muopaZpqEqZaFN2aGEY1qk/rZb42e/ubPPWzZu4rsN67XN09JJnz15wdPSS5XJJHEf0en2iOCAMQizLpN/v02w2yPOCKFiX0qgcXVUxDB1TNzANA8excGyXVrNJmmYE6zWBvyZLBcE0Sy/8yhNrtVwTxzFZBaioRk1ruHb9KkVR8PLlS9I0ISvdG1y3gePIc/QHfTzP4+nTp3z44Yc8evyE/f19/stf/4rPv/iCJ0+e0O/3cRyXtb9mMBhimiYboxGapvHVV1/xgx/8gDAMmE4n9HtdslxsZkajEZ1Wq3YoMU2dOA4lEiyKWK2WWKaOoatoCtglI9zQVWzTBLIypQoMXRUUNEuAAts0cG0LQ9dAycuRUiRsqibgQEWEFoNAiOK4BAIM8hzWfoC39qWg66LtNU0dx3VothpsbW3g2DZrz2M46LP2VkS+z2gwYH9/n8Vyzu7uLoZhsFotybKEnd1tVsslm5sbYoJZhiifnp4SRwmz2RzDMAmjkPPzc/zA58svv2RnZ5sf/ehDXEcCUb784gsO9g+4dvUqw+GAi4uLGqXUdfG/7/W6pUQvpCjymmMJRR3wLLuwgOfPXxAGEZblsDHaEvsiS7IogzAUXzdNww9D1t6aJBMVzaDfJ0tSojDE1A1WqxXjCxGx9/t9xpMxUUn9iaNEUqTiCAUxHt3Z2SGKZDc2HA3JMhmZ2+0WuinXaRonYglf2ghVaGgcx/XapkD21VESowC262DoUpSqGmAYesmPdDANo/bFa5f3eVESiJfLJfPFkkzR6A8GNcCgamppHZ+XKiJkzaMLzeXf/rv/9rvlTG8u7L9vJHyzGF1WDbxZxKrv82ete0p0TE5oyQeo/JbyslBUTOFms4njODQbDVzXraHeh48fkCYJcZQJLJ6JpEdVdDRdZbGcATmtVgO3YZOksQSTKhrNlkuWydcoZCiFeCylRUEcqeiKSho1cUyLxHFYzGeslp7wZWzRtw0GIhWZ+8LXGY1G5HlO4Esc2fXr10EpeP78GRcXZ3S7HVxXoujW67VA04ZFlhWcnZ3R6fYkKV1V+a/+zb/h4mLM48dPaLY7DAaDsrNoopsGBwcHvPPOXb748gtu3LpRBg3PsSyLXqeNoshYriqI6Dr0aTYcVFWhKBzSOEYpMlquha6rIisrXU81TcNtuGVABWSanJiqmpeHToKuayRxTpFFZJlkdWaZoN+6aYpraFGgKLK6kJFKovgMQ6x9VDUhSnMS34csQ8s0TMXAbtiMhiOJrNNEo+uvbTY3BmiKwsXZMbtbm1y9ssPhySluw+Xu3bv88Y9/qMe2tb9mNBzQbDa5uLiQbMvhiOOjEwzd4MqVa7z33nsoqsLJ2RmapvH48WPxUOv32NraIs9zXu4fcHh0yLUre+zt7ZKmKUdHRyyXSzxvxXq9xHHc6urHdRv0+3183+fw8JAgaF5CJUWDOJnMZdcaxww6faI4FvSx02YwHJIpiFFjGNRqDJBdllkuwbvdLq12kwcPHqCqsHf1CltbW0zGM84vzutF/WC0QafTRVV1VqtVSXPKalebQi2tvTORTlmlo0qVHbtYLCQ13TBIc3Hl9crX1e51aDRdiiInjdM646LZbKIh3eBw0JN8AKdJnudEZWPieR5xlmG0VDTDwnGbrP2wft1FlgqlqUiJ2w10tYD8T1Az3kQtLz9qPleFZl4aE4VnJGhKzuteZt9V/N78b0EzBTwIL0G9l1UAqqrScNw6DcY0TRSox4X1eo2GZFBKR1TKljQd06y6SXnjojhE8QscR9xKszzHDzwcSxdLoFy6jSzNxAkzikmjmIbdFCZ/q02eyCmh5EIxKDKB1F3XpdU0azWEQOYmw+GQ9XrN6dkxWZaxsbHBarVk5S1plN5Rg96AVrPNbDpjY3OD4WjEP/7jP/KrX/2K84sL/v7v/17a/pKw6DgO48mE8/EF7757D0UVxO2dt+8wn8/Is5zBoE+WxIxGG+iaynQ6IQjWOI6N561wHAvbshjPhVDZbLroioauQhKIOkFXVDQKDFWh0FUyVTomwzBQixzSFEPXSPOYKBPH36LIUFWFLE8oCoUi14nKm9iyTNkjzuZYto1tOXjrNXGaUSgqhaJQqPKRA4qq0GjYsswfn6KOtnBtgyKN6DRtAs9ktZxxZXeXKE1YLBZsbgwZDoccHByQ5yk3rl/j4uKMX/ziF3z22Wecnp4ShTFXrlxhMplydHTEbDHn2vVrFCXpd3//gFu3bjCfz/nis0/56UcfcfvWW5yensiIXuSSDl4W/CSNgQxNB10XbXFBxmq1FKK27UjnXqabg4LrNqCAwI9YrTyUTACuOI7ExrrTodFo0Ot1aWYtWi2h0fR6A0zDQCnEWaTaQ127do3FYsZiIT5s85nw0LrdLq7rYlg28/mcRkNCgk/Pz2i1Wuzs7HB4dICWaTRcB8cUPWi/1WE6nbIsFSjdbpeL8Rjf97Fdh62tLfw4KilZCQ2niW1bGJrkbRrldRIHkUixyuzcChgzDINOp0O73UbRdQ4v5iw9rxT152IFlGcYmoAny+WCbqeFimTNfm8xk7NExr0CQanqZGEFKinRpWoEyEI3KyR1GOV1n7LXZE+Xvua1YlaSQRzHIQhDGd9Kga2w4+UNdl3Zf1RIUHHJrFG7FL6qKiW5NS9I0hjfX5NmCZbpYJhayTBPCUMf0zQAlSJLoZCxCUApcoqsoMgKMqUgVXTSJCZLhKXccBvClckKNEWj0emR5EKnyDMJlfV9n263x872HqZp8+jxI5pNgdhXq0UJmkji9GAwYO2tebn/ktHmJreuXuXx48d89NFHJGnCgwcPuH79OheTCcNejytXrvDl/fsoqsLGxgY//OEPuf/ll7z33nsslguiMGR3b4fxxTmdbhdNk/2hrpX2Lap0wYamMvM8Vt4KFWg3XTRVoddpo6tquRNSyIucLImJgoDA97AdB8swMHSNRFVkV1o6oxiWhaWa6IZJnEgeZIFCmuVi6qhqrBdL1msfVJ312me59AjjmKTI0E2DVrNDu9NCJUdRCsLQF2qNZRCGHrqqM5uNSeOMNA55efCC7XLR9gAAIABJREFUOBOR/LNnT2k3G/T7Pcbjc0D2e1EU8/vf/4GrV68QhiGPHz0hChPZxfhr/NAnL3KuXrvGxXhMmiZQFEwn4uD6hz98wvVr17j91lv461VtyX3z5k16vR55nmCYKnEcEUVx+bpDFvMlrtso+ZfiLzYYjOh1c7rdLmEYYhi2eIhlKe1Wi8lsymKxIIpjVus1a28tGaeqShgEOKNNFMBbSPxcEAYsvSWmYeAHXi0LLAoJ09ENA98P0FMxhbRt9zX7e1WVcU5DDBurfVmv1yMKQ7zVSgxI12uajQadTlvoElFUC99RC4IgxHUcHMcuQ29ScRVOpFM7PT0VXa8r+mouUbYKRSIJT8/OZCrQNPy1RxwFDPs9bNsmIZS1g22iq68arm8rABTKqAD5QJHPqYpCochtXpQ2IlU3VRSl81QqCnoUBQ05VZW8IFMlxDMlRy2o/caqcM9CoUxDLhUDaSpRWqqGbsrsXaTyoj3PKyHcWNJaymWzVs7PYm+UkeVpLViXC1nHRCdJU/Jcl6j3PEc1FHRVcgKjTDyplNJtVlFk4ayioCkamlbqy7IEpdBqoCNNpQNsNBrM5hMs00AzRUjearj0+gOi2Ofrb77kxo2baJpKFEX4vsf5xRlbW1ts72xycTEhTyRl+/0PP2Q+lxHvzp23+Q//538gCAJMw+Tqlas4toW38rAti9APuHX9JocHL0njiLfffouTo0P8LMEwxArZtkziOGQ6G9Npt2maLuOLC0nz1lQWq5X8Dgs5ypIsRkkhL8TjSlEVsiQjK3RQVcTURcTGjmmi6AZZmhCmMVlRYGgahmWVSe+lCVOhoBo6SqHgByFRnIAmcWzBLCIIIwk6KTJcy2Jrc0Sv2yWJQ+IwIEszzKaBYjusVx6aqkNpUyTLYJ35fMLVm7fI8oTlYkaj0eCnH/2U4+NjvGXAzuYVfv/739PrDFCQgGdFEaWA560wTYvT04jFcsG9d95he2uDxw8fMZmM6fU6DIcj5vM5n/zxE8xybZDlOfPFgizNuH7jBsvlHNM0LhHIcxqNFqqmM5ufcvXqNRQUms02i/mc8/NzVt4Ky7RoNFwuzs9wXKdetEdRhLdeEyWxdC95wXwyY9ZsU+Q5i9JhxixNGubLBYapY9k2FOCvV0QRmJkgngQ+zWaLosiIklSyLZKMi+kRcZJhkZNGEaGqEvk+ni32QZPphDyXaaTVaqFr0mkXRYpj6oRxzqqUGzZsG03VCUOfyBdeXJ7n5FmOZUkGgXDulFp77Xke6zDC7gw5O5+wXq7qPM8o8Ol12jRsB6fbZTAY0Go4JPGrzuzbAMD//D/9DSWC9EpmpIrFi6KWJmaXXcwuKZ8uMfcrjlpRUjuEq6W+xuyv/16UKGdRlKeZUrKGS5VBXkhRTNPaTUOMRhQpNKoqFI3yxpDuUCkLXXnqlM+mqaXddVHSOsqWt8iEYpLnspdDUckLoUooqirsZ02j0XQZbWzg2BbT2ZQwCmh3hCdzfHJCryvUjqIoaLeaUOQ8efyQ09Njrl7dJYwCIGOxmHF6ekx/2GPvyq4EYqgwm624fuMWaul4+8MffsDp6SmBH7D2fNZrn4bbYDTaZDqZ0m13GQ6GXL92nSSO2Bh08L0FJ8cvcWyLIpPAYssyxU9OkfY+jEJ0w6DfH5Sk4xTNMPADn7W/RtU05itZyEaJhE9EaUpSFMRZRpxlBHHE0vNRTQu30WK6WBCEEaZl02i20E2TOE0ZT+eEcUqOiheEhHHCcu0RRJKIngNxmpKTo+sFhpJDntB2HfIkIglD2q0mrWabdrONoRvYliPyKUXFdRosy5toOBowHA2wTIPZdIpjOaiKxvMn+yxmaxpOh6bb4ne/+yfuvfMO169d4eDlM/xgASSYpkEcxaVBosv21gabGyM0VeVg/0AkfXmOaVpcjCc0W21RKsQxw9EGw40RURzT7nS4ees2QRhz//43GKbNYLhBlpW5AYWkgBdKwWq9wg/XaIYmPn6DAWEUcnh0SBRFNWAQhiGu4zLs9wl8X0Kii4KVt6KgoNPtCP3CNEnyHG+9xrEdbNtEK30Fg8AHJKeg0WjiNpoUisp4OmWxXGEYOqvFgqLIMVRF/ORKVD1JxHPQdRy5gXJJLsuyWN4f1yaJYlynQbPZQlcNmWCcBgoqSZygKiqj0QY3b94iyzIODg5ktdFq1UXB82M6nS7Nhst65ZEmMZ1Ou/S2U7j3zj0syyQpi/x/8+/+u//+O4vZ//a//69/I6mbVRGTApYXhdiRlEWiZsq+8fHtCOHXH38K3awcYesx9zKaeunLiqJ4JbsqIWC95KaJdvRV21xpSN8U0FcqAUPVSglOZUFT7fkkSk3TNPGSchu4rsOgL0RYVZMwhTx/5dPWaDaE5mFL7FyaJqzXHmEYEMWRZBFOx5ydneL7Pm+9dQvTMul02kynUxaLFVf3rnHnzl0mkwk//elPmUwmzGaz2k9e8gRswkASw7/++mscW4KIXx7sszUaMptd4K2W9cVbFDmuK0JxTdPqLMh+vy+pRHnOyvMksbooCAMfRVUF/VI1kjQjKwoUTRN0TdexbRcUlbUfkuWSrDOdzYniuEZl00wstcfjKUvPJ81yQTKjCN8PSTM5raMkISty0iTBUMExNVolDabZaIjKIEkJ/IDlYlWH+cr7qdQ8rdHmqMwszWvZkb8OaTY6vH3nHZ4/38cwTJrNdomezdnd3SYvEpIkwA+EJ2bqFq7TQFFguVjWBaVavG9ubtY+e0VRcHh4yGw24+zsjDiOuH7jGlmWsVxKrsDaCzAMk7OzcwnqbXcwDLPs0EPm8xnL5Zw8z9B0jYbbZG9vD8uy8LyKxJ2SxDFZmpKWcr7hcMhgMKDf74skab1mvljQ7XbFwDCOcRoOtmmW3ZAlzrxb20RJzHodoJo6puXg+Wu8lY+iKLSbDoNeD00T7aSqiGZalBxKTRp3XQcFsXVSy4nK81YYpo2q6iWYF11CzSUYuNoXTqdTDg8PSRIZ84uiKCcnBcuypbkpY+s6nTZN1xFTgzgBpaDpNmm3W/z6v/63341mGobxLe/9y0jlZSeM7y5W//+KWV1pLv3bqhOrpFV5nkP+7bT0SqsJl3hu+evBw/JtFC63iJflVKpSmjkWrwexVHvA1WpV6ihLbk75Rtm2zc72Nu1mgyjyOT45YTqb4rouW1s7tQ2403AZl75mjUaLp/e/JI5Tbt9+G8d2mY5FkvSXf/mXfP755wwGgxpcqYi2mqYxmYi/W7vd5s6dO+R5zpUruxSZOHsOh0PSNC1VBCNc1+Xg4ADbtmuuUJUo75dxebouXMFQN/A8YYkrmkqaZ2RJguu6KCioqoJlO6BorDwZpRRVJUxilAKiJOP8YoLn+6AoJFnBcrliOp2LRbghMje9FEyT5WiqguXaNB2DTtMuw4a1b71vsnMS/pJR7oCiMC5/lhRFVYmjmGkyxXFc7n/5DefnU+7cvktR5Pz+97/nJz/5Ee++d4//+B//L2bzC/7Vf/FLXr58Qb8/pMjBarTodvq1W8RisWBzc5OrV69yfHzMs2fPUFWV3V1BMj3Pqw8Gz/P427/9W+7de4eNjW22t3fw1yFHR8csFgs6nU5pdmABVj2FXJb1pOkrUwXTFKCkOrTTNGW5XNaSoizL2Nra4uTkhLXv1xmbIA40INdsnudsbAzp9fsMh0OW+55cy02XdqtPq9ViMpaCapQopVK6vGiXCOxyf8lkpmgqhmXhljs5VVWJ04zxZEEYJTV94/LP6DgOvu/z9OnT18CNVbmP29raIsl1Hj15ymQyEYcSx2E+nxMYGoNBrw41FpfjV9GU3w0AlEv+qni9+f/9KXpF5bv/vQXrz3VmuoZyicpRUzre+Pq8JPNefn2vXpvyWjG7LDp/8+d5HU0trbcVYSpXn69m+jRJUQrZd5HnzOdz8jx/dSGvPbzVAtPQaHfa9Ad9VFV4VXlRMOh0ODh8SafbxXEcHj58iK7rLBbCgN7c2OLk9JTNzW0+//xzjo+Pefr0qehV45j1es3u7i6dTqeWsty8eZPT01MePXpEp9Nib2dIu9Umy1Pm83l9+GRZVrsUVGnak8lE8hrLi78owDTFr2w8HkvYRSFdl+8HaJqObhiEYQSKj6YZWLYt4RzlbijLclhJaMZ67aHrBkmSsV6HRHGMYegoiqQNlXYFqOQUGTQbDXrtBoNBGxUIglCcHYoC13Vw3aYs0xerEgiQMF7HcQQN67RZLFekWcZ65ZEkBZ1Oh/l8xWeffcrPf/6fs1jMefnygHb7Lm+//TaHx/vcv3+fX/zylzx69A2npxe1GaXnefU9cH5+LmTo3V3a7XbNLVMUhf39fTzPK4tNVPPP/vjHz2m12rx95x3u3btHt9sDKt0r6LpS2shr6IZKEATMZ3MiP+L09IzZbFYu6u167XLZdkv802IGg0Fd8NrttnAw04Q4EiAt8tclCbeFWpo7vhkQVBXOoLSa8jyPYa/L7t4ejm2zv7/PfDZnOBywublFGAY1D7TIJaYxjmPG0wmqagLUB2dFsfJ9yTGoPue6bs3Pq6yJ4jhmUf7eG40Glm0yn085OTmh3XTp97uMNkTKNZlMiOKwvpe/VcyqJ76cY1k9asThTxQk5dvuQa898j/xtVBOq4ryWgcoDGFq0fh3vQZ5rZS0kNf5blLUvu3TdrmQVcUsyzKU0n++Yl6rqopaqPVNFQQBwVoukH6JsIjT7QJDVdjYGDLcGNWnVZIkeGthvetle+04jkiwNBVN12m325yfn/MXP/uLmiZQFAVRJHB2pyO8sopsOx6PyfMc13U5PDyk0WgwGo0EVTJ0Judj0jRlc3OT9XqN53ns7u4ym81Yr9f1Dma9XteZmKKsEPmWZhhopomhquS5TPWW7aDrBouFx8oLaLc7KIrGOpDvEwRS0Bw3od1qoxlOyVVbslx6uA2Xbqdb7hFkr5nnOVTuxeSYuriKGqYI2T3PK7uQouwMROheFbM8L+h2hAOGojCdzUiShOVyhYJGvz/ENCxOTy/46uuvuHHjBp9//hmttsuVK3tkecxkPMFxLN55510m49+hqXLDn52does6u7u7LBYLZrOZLODLDvHs7IzhcAhIYRmNhiwWU07P5vzsZz9jONzk6dNnzOdzxuOJpLKXfMgwFONQw9DK7qdX25L3u2Ls6XleXVhM0yTPhTRaedlV4SAVjali6FuWxcnZKZPJRAJbHBvHcVBVleViIftRXcjmSZIyK9PW01TQ7Ybp4LgOdqOJZrwyi1DLw8x2XJarFd5qJQRWx6HRbmOGIfZiSaczKMOyXZrNJovFop72fF8yPjc2NlAURdDaKKpXH48ePWK29Ll152263S4HB/vMZjMsy6Tb7ZCWsXvy+4tqUjp8RzGrHGIvO8Je9ie7XAS+6/HnGP9/7nGZXHtZnF65ZdTPXRQo1V6sthrKhSpxacysAIPve7xpQVQgyK1WyrMsy5KbPJcbT4S3CoNej+FwSByJ7U4YhmU3NqDT6ZIkCcfHJ9i2jePICbVcLsuuZ8LR0RHNZov1SvyiNE3j6tVrqKrCkydPmM3mZd5hUSJsJ1y/fgPXdTk9PcXzPG7evMnx8TEHB/tsbW0zn8+ZTU7Y3h7WF3jV6gMlAVm6rzAMKApBh3u9rkjREOLuarFA1VR8P8CxHeIkpciLMjszI0lT/CDAshySLGE6mTGZTEjSDNN2iVKFIBKOWRj4RKGQktXqQ4WgjOYzDR0NA9PQsXSdPM9YLJf0+33s0rJZUZQyRxS63V4t3k+SpO42Z7OZhMlcXICicnZ+wdWrN2k2m+Q5dPsdHjz8ih9/+BN293ZEFdHvMBqNuLg45fj4lNFoxN2773Cwf1TLh6o1wmUfL8MwaDabjMfj0kpHro3KDcJxXPI8Q1HEzFHhgiRJ2djYrHdbIg+LUFUwLRNdV2q502i0Ubu5Pnr0iOl0KgaflvjhVc9fjaBAaTfkC5JYWqWHgY/jOFiWWVugJ0nCcrWi0+9hmibztcdq6TP3lnWoTJZldWr64eEhlm6g6XoJRBScnZ1zfn5G6AdYtlF3eKZl0W63sGyb1UpUHpVUqtfr1QL16XQqpNuyePvleFx1ndX4fnh4yMXFOaZp0mo1GJaE58PDQ/k9m4aYTpaPbxWzNE3LG9Cp9WFBENRvanVjfG8x4v/7mAnivHE5EKX+fFHGtr8RllJJReQXkRPEEUXxKqegWvRfflrhu303182yLFDy+gKtHDzIhRCbZ4WAAY2GXAyzRXlyWAyHA0nDcYTTYxri5+6tfIIowjRtikLhzp23OT/vkWUZ9zbeJQhDbMtle3uHT//4mRgYmjqKUjAYVO4ZQyzLYDK5YD6fMhgMuH37VtkdDHBduwQbIgI/wLIdVqsV5+dj2XUpsmeTAq3LfklRMM0URdEoCoFefD8gihNarTZ5DlGclgaJOf46oACSJEPXTUE/Q1nGC5fMQNdNfD9kuVxjWgZZmmLqGu12m2bDxXUcsiQhQtBt27RQLZE7mYZOGAQQys2kqVp9A8clsCA7obWEuvT7DAayG5xOp3WHEcVJKSlb0mg28f0QioLbb71FGPncu3eXP/zh92IJtF7guC5bWxucnJxz5cpVvJXPfC6HzHq9Zj6fc+3atXKflbJYLOpOdrVa0e12abfbpclnE2+94OJijG03xPE1Ezt4KcjyPSq+nO97TCcTslzE6I7tcMIxtu1QFEU9qlmWuBZX3U3lGmMYRi1wVxRxiVFVsdZpt9tiw1VGwVWfD6odY5yU64uwtJLKJIw6S4mTFNaBrFzKnzXPZVQsyvtC1TWSNGd8McX3A0zTIPAjWu1+XWQr+VOz2aRZcuQuLi44Pz+vZYi6rtdjerfbpTca8uzFAScnJ0BBq9Wm2XTrUJdHDx/gui6bw0FNwIXvUQAIQ9uqK2V14lzeY33fQykKiss7Li7NnVVT9Z1fKZ/NLqGUl6VS1S9Q11636q7a7aqYSUcm02hF0q3oJa+Nrd/RXRaAZZqg5OQZtU+XoKfSIe7tXKHRaJBnGefn53ieR6vVklMvTtl7+yrL5Zyj432MkvpQdQ+qqrExGPD111+zvbXN5sYm09kM3dB56623+Od//hem4ymnp2dsb4vrwsnJKZ1OB9u2GY1GrFYrtre3aLXa7O/vc3BwQLfbJU1T0S1uvE0QeIj6QRWfrDgpL3yzTu9uNBokSUq3a5adrVbKnUwaTVWUEaYpiKYisXNJmpGkCWmW45SBL3muUKBg2TaO26BAJ13L15imRZCKGqPZaLKzs0Wr0WA2naAoAuo4to2ilHmqiUKUBFiuWfqkZbWpQPXzV5SVyoiw0WgQ+CGLxYIwDGk0mqBIR2JaBs+ePcHQLU5OjggCMSfc3t7igw9+yD//8z9x9doV3rp1mxcvngknLMnZ2trm8FAW9hWCWQWJVKz62gllNqvHqfl8Tr/f4caNG7x48ZwkSdje3mYxX7FcLjk/P68Px7293bIgihVSXhazXrdHFMQcHBzU1/h8Pufo6KhOQh+W9j/VxFTty5rNJlZpW1/pl4uiwC5zFtI0rbuluAz9zXNxj64W/51OB285Y7XyaLouw+GQTrctHWUcA9JYtNtdFBUs0yIor6dur00cpbil1LBqhqpONAgC8V1LxOJ9WGZtRJG8LwDT+Zzp8im6abO9vVVa0FMv/eU9btTAwOUd+LeKmcRlBfWIUqW9FEVRtux53aVdHgmrHZWmaWiqVhePLKvMDoXcWgZq1kXl1d8pi9kr4KEafapRt/pT13Vhk5eJ1pXYPUni8gJ49f3z6psXrzpGoWy8csGtnAdUBVm4OmJcV+2sFEWh0xW5BfnrWQatVhtdF0PK9XrNl19+SbfTYdAflSLchPl8SRjFQgBcB/S6slPIsoLlyuPnP/85H3/8TzJOuA3u3n27XpBub2+xWCzY3d0hjmO+/vortre3uXr1Cp999hlFkXP9+jWuXLnCo8cPePHiBbZlsLe3x3CwgYLGarWiyCXlKE1yFDRsy4VCLvZer1cucgs2t3Y4Pz9j7Qf0LAdNNVitPOaLJYPhENO0SFJ5XxdLjzCScJg4StH1TKzELYM4jAl9H10RrpJt23UArQJYhgj4TVOAAFs3UNQCGwO35VLZBFUdiYx8Qk6+du1avYQ/OT5GVaXgXblyBcd1OD07FxuZsntpt5u4rgNltuPZ2Sn37t1D03Rcp0GWFfh+yHA4ZDyesF77vPvuu5yfn7O9vY3rujx69EikPf0+juPUAvNer1fy9GJu3ryJouQEwZo8F7PG1WqFtwo4Pj5mPJ5Kwer1yPOCyWSCZRlsbmyyuTXk+fPn7O7ucnZyzvHxcWmPPuDZs2c1gGMYRpkIJZpIEYyLIubmrVsknsfjx4+xbIvt7W0mkwlnZ2cMBgMcx2J7Z0cS2dOEPFfwInE+dhwHTTXl3i/ADyM53FZLkky6OsOyUMr7t93p8OWXX/KTn/yYLEvZ2RNg6vPPP+diIpND1RCNRqO6mPV6PSaTSb17nEwmjEtplGEYWKV5ZZq+ssY3TRPdUOvCJSJ9n9M4YTKZfX8xq9CuypXiskbzcuLSm5SIqrjVf5a7qrz6ekUR/6vSt6nqhKqeqPq7bdv1zq4aabMsq2PYL/sXFUVRz9zic/bd1tyXH29SLV7TjyoKDbdBkkZMp9O6I2i1WnWkmrdcEwQBSlHgOA6u00DTNIIgoCgKtkYjwjBkMBwSxVG9wN/Z2ZFAjNId4NatWyiKwi9/+UsODg6YzWYcHx+Txgl3774NKIzH4/KGVDg+Pubw8GVtga2XC9y9vV2yLOPw8CWBH9Scu+pCqA6HKIpq8XOlj2s2m4CIlReLBdeuXau7NMuS2LLZYo4fhKWrwgxVE/twVdMpCgVvvS7De4X8mucZuqqiOyamYcrIbBjomkIQrAnyjDSRLkRVIEtTnEYD27ZY+x6aYXHlyhXyosBbeZesZFQcx8I0LXRd2PXn5+e1Zc5oNGJra5M4kTEwzXI6HV3GE9uk2+3W4NN0OuXRo0dsbW3JOZcrjEab7O/v03BdTk5OKQr49a9/zb/8y7/geR6dTodut4vvS9dX/Y593685VGmakKQxjiN7ysViBigl9SWuO4kwDJlOJ9y8eYM4DonjiPPz8xoZNXWz3gdW/MLJZEK/3+fq1auMx2PG4zG2bbO3t8eVK1eYTqc8efqU0zNRlJyPL+oCMRqNSt6awWq5ZO2HGLaNXlI9qnE2ClOWywVXr+ySJTGaopAkokIQOlSBqim0Gg2SVNLSp9MZt27e4vzijPPzcxynwf6LR5jX7boJ2tjYAKDT6fD+++/zySeflPuwixp8qhqnIPCRRAi1pqjohlqDLlVxq+79Chn+zmJWtdWVAWJlulgVtArWrVC+NwXpqq7VBaLIpR1SNLUuFnXbdOlRXFq0VcWygmmVEmG8POJe7ggvc7Dq8vinlna8XsTe/BAfLfklVlZHcRwzDsdkaUaaZDTcBp12uyxwRo0ONhoNwkgSqeM0odlsimFi+br9krW9vb0NSGxZnuc8f/4cz/NoNpv0uh10QwqJZTdwXNnh5XlOp9um3+/z1u2bgvKZGqZlECchQZixWC5o2Ca6bnB+flF32ABxnNBoqKzXPlmWo+uKZAwoSkkItUvETbJEdcNguVpycTEmTTKKQmW58sTnX9MwLZs0zVh6HmtftJt6KmJg29QwdKvkxKmYuoZt6qSRWOLINfWK6Ow2G+i6hh/5qJpKlRkpaHG/JKCuarpMUR4kjUZDtJ3ITmo+F9H61tYWz1/s8/z5c168eEGn06XT6XLr1g3yrODo6IjpdMLdu3exLPESazbaBL4AFdPZgvPxlL29XXq9Ho8fP8ZxHJ4+fVoXtOFwyIsXLwC4cUOi5R4+fMTm5pDd3bdKswOwLJu5tSxpHtIQiOGAX6KJYnd+cnJCq9ViMV+wtbElNtXlQVN1mNX1WB1Opmny4sULjo6OSjQ5YDQaoSgKOzs7nBwf4a/XvP/uPSzL4uDgRQ1OOU3pMhMKDl+eMF8sUBWDbqdT339JnpPmOUZJEBeSuU6UpMwWS3Z3rzAenzOdzXj58gjbsnDdBkEQ1LpskEmoSq5yHKeuM81ms75/6wlLU0XdUK6MbNsW1L9c1YRhyGgglkVpeWB/bzGrxN2XHSsu766qJWIVmVZBulXRU/XXXS6qru1yIam+15vOG/Aq3anqKGR8TFDy11PUq4Jaj7CquHiG5aj5pwoZvBK2v1nMgsBHN7X6FK081cnkNTfdpvysZWfTbLRQePUznpTI5vnFBe/ce4cwDNnbu0LgC4hiu0Ic3N3dpdvt8tt/+G39Ru7t7bFaLnj48CHD4bAe623b5vT0lOl0yocffkie5zx69Ihut0uj0eDBgwf4vk+n0y73WEJpiMtIOsuy6g6i6rjX63W9hB0OhyyXy5Kr1qHVaqHEMfPZnNlsIYaLukEUxSyXK3TDoIVCHCesvTVxkmDbDqZh4lgmpq5gGTq2bZVEXKFTRGGEWhpeghTRVquJoop7qaZr4mSbyu9j7a0piqLeSQZByHh8UQIWcg20Wi0M3Sydgo8xTIMgCOssTNl1SRGN45AoSnBdR4T2WUan02a5XLJaeWxsbHJxcU671SDLUj777DPeffddHMcRj/x+n+VyiaIoDAaD2ozRdd3aIHQ6nfHJJ38ACjwv5Nq1PoZusVp55HlRGyQcH5/w8OFD+v0ueSGuqo1GozbC3NwU5PP58+d1IcuyjMViUeZ4vloF2bZdA3eu6zKfz8kK4X6FUVTvqaQRkYzKte+jqSlueT0enp5g6Baj0ZAsSclfW2yLlFG6fomFW61W3Ln1Fufn59y/fx/TNBkOBuXeUCIKXdetUUzXdZnNZjx//pygNITc3NzE8zzm83kd5KKUcrskS2u1ius6pS25AGrz+bwqFq/Xrje2O7hTAAAgAElEQVRv9gqFuEyLuEzNcByHVqtVv4HVh6IIEpjUnvCvOrY3C1j1ue8CFFTtFaO3+rdSFHltrFV4PT1dxts/J6b6c49CzOKUvE5/qkZN25DRoeE066VmHMd0diQ2LI5jgeoXS3q9HoYl49BotCEX2GKB4zgUKAyHIyzLlqzJNOPw8IgPP/yQs7MzLMvi7t27tFqt2qf+zp07pGlaZxdMJhPm8zndbpeTk5M6tXq9XpNGEZ12q058397eRlEULi4uME2TOI5fk5A4jsPJyUlt+2w7Lul8SRzHLJdL0TyioKoaWZoTRbGo7xUNRStAUVEUDcu2abVbtB0TXUnRNQXbkVWBaZpYto3rSKJTAXUiumkL01+StQuyPKbVForLi+cvGI/HFEVRSnZgMpkKelpec81Gs15ug3Q+k+kUKBiUvlnSuYt0aLFYYhgmSZJycXFRLqBjFosVw+GG2CYVCc1mB4CDgwM2NjZ45513ODqSPExBic/rsOsvvviiBGwGnF+ckq1kegkCOTRURS+7KqXemdm2Jb9v26TZataSpCROcC23poWYplm/b9U9OZvNqNQo1bhVqRSq9/WTT/9IryvcxJcvX3L79m3eeeedmit3Nr5gPJ5jOBZZJtSOJBai9aDXR9PFtEHusarByF9lNhQKk/kM3TIxMwmkjqIARVV5++23OT4+rg+iKgV9MBgwnU7r/V9lje84jiSmr9ecjy/QNBUNsYlqNNy6caqUBBV7wbq0ioLv2ZldtqWGV+NW9bm8ZPxeJthW5NC0DKm4PBqqpb6T4tsM/MsII5TazDfQ0oo0W+3SKgDi8usRXlUmAMOfGDPr5+PV8776ELJsFAtzuoKCbdsmSzLpHlRd3DzKtne1Ev5TdYqkcYJlWWz0uqxWKxzH4fHjx6iaxnA4ZLQlUPTFxcVrp1JRiBnjarHgvR+8W5MDr1+/DsCLFy/KVKCl2Kc0pJ0/PDyk1WrR7XZ59uwJoaLSbDj1XqHiQVVdTMUqrzqyMAw5OjpiPB7TarXI85zFQqyLKtNF0cuJKiK/9H5VII2miUOD0AFsDCVFJafRdDFNq0ziNlBVrYyQg3a7Q15AGEW4jQbtToesKFjOIzzPw3EcNjY22NzclGXv6Rl5LgdktQB3HEdsiMxyNZKmNNst9vb2mJUW5dXk4Dguu7u7uG6D4+MTokgKzcuXh3Q6PRzHIcsKBoMRL/afkKYJe3t7dVL53bt368JxdnaGqqr0+/16/1gFmoyGQyxbVB1xPBNdraKXIJNeI583b97g6OhQbtDy0Dk8PGS9XtfC8izLGI1GtSytypAsioIrV64wm81otVqcnJwwHo+5du0anV4Pz/Not9v1dd3pdHBd2Zfqus58PpciaRhMpjOSWKYw3TGJM9mRFWSlgYOBOMXIf4OAZIqq1BGHb91+C9dx+eMfnzIcDgXtL2tEGIasVqs6THq9XjMajeoCVa0abNuuuWaGaaDn8v5WFJgwDMUg1HUpsrzeDV+uU98qZo7j1F3VZVSx+u+ojBar9lXViVghDIZlkuU5WdmVaWUhUypC6iVqRXlX1H8qQJ7l3ypwVaGpqBl5SUnX9Fc7hFdqhdczN18TqF/6n+JbhawQ+kFJVK0AiGqPFvkiFE9iIT9WxotnZ2ci89F1tre3mVs2y8UCVdeYz+e12eS1a9cwTZNOp8OTJ09otVrcv3+f07NTRhsb/OY3vykDd0U1cHxygm1ZzOdzjk9Oyje2xWq55ODgoCb0jkYjPM9jMpmIJXjg15whoV6s6XTaNfG0slKWhPK0BjpGo1FtXez7IVGSoKjScRl5TpJmFAXYmnxO1cSlRFFFfN5ouDQaLs2mjanEaBR0e21MS5Kuk0yixVTNBORQipNEjDKjBEXTxaVAk9c5m80YDobs7OwwHo85Pj6pD41OpyORd5ZJs9EkTeXiXnkeliOOHXlR1Be7gCEJqioOu4KSy/U7m83Yu3KVKIp58WKf4bBb7hAN0jRjd3eX5fL/pezNliS50vy+n+/u4bFHRq5VKKDQ6Gk2ZjhmMpFjQw5NDyCZbB5w+BBzoxvZSMYLmdHYpFqcrRd0A6gt18hYfV958Z1zMipRAKkwK1QhMxYPdz/f+Zb/sudXv/oVf/mXf2lKc+34vlqtWC6Xarq657NXr7m+fqfOY0ZdNwyiIb7vYVmy4V1fX9O2MqgYjYSX2LSycep7UbMzbFs06LQckP5ZmqbmHj07O8OyLO7v73n34QMvX77k5cuX3N5ck6Ypl6+/oK5rHh7uWC5PWa3vxVVpPqdoax7SDU3fMRxNiOIB+8eNuCopmXdbi616Hr0Fu8NeSmL1+VlRkKupf9u2ppeoM2bNy9RZ6W63M4FsOpvR1LVS6RWmgxeFuArzFkWRoUsFri9VYRjJ5qUSqh8NZqGyk9IIciPFo06w47jU9ZPJr/7Csmu2H72XxROw1bKsp/780b91nmSpzO0Y+a//dhwHOvld23WqHGmxLRfLdXAsC1tprcsx9MjKaLGw6WkRFYwOepmqYlkSILGU0octlKeux3V9XM+laTo2271ACYKQ+fKUrm6Uf1+tRCptRiOZagaBR9f33K8e2Fx/UMfT8/OvvuKXf/o1juvy4foDlmXxm9/+hs1mzatXr/hPv/oVtmVRBSG557BPUtoOxpMZH65vub6+4d/+m3/L999/z+PqEduGaBKxXq/NEMayLPEDOCQc9mvpY7ou09kcL4jIsoR31x8YRBG267BaC4C2amqm0wlt27Hebajbjr6Tc+8rypBtWzSNbFx+4CvLNE/UKpwYy4YwDESdNPLxbDmn8XCIHwY0fUedF7R9K5kaFk1bYdngeDZFlVHVBV3fCVsilB7T3d09eVHhuR6TyZTNZkvTtFycX+L5AUWeU9fS03Q9X9HPPCx6okCcs2zHIVfDgSQRV/JoMMBza6X4IfaIlm1ze3fLIA6k/+e7YnQMKuiM+Pv/+l85O1vStuIyFQQuRZnw/kOKZb+kbRv6TkxBwiBiPNaekjJUSdODqHssF1RVYcqoJE24ub1hNptADw8PK4MRbNuW+4cHArUOxStC+mKxUqAIgoCT5ZL7uzusvmd/2HHYbQl8H7sXB6R6OMR1fXrEYGR/SKmqmsAfMJ4Kz7gqS4aDAUHoiakPlvjAokQdOqmQmlbiwsur1xRFzrd//I5BHPHixWdst1uydM98JqDw27s7A3SP45jLF1fc3t+x3Ykw6fnZGX4UUu+2JHlGZ8HAG+H1HpEf4DsuVVtB04ELgerXlUVBnmXSz/6xYFZVDU3T4bpPLHfdeJTmqYfjeNi2a7ImobmAbbtUZaPAqsqgt1WkcNXzqor6I0OUvj1C2SOgVZ3W1nWrAppNR0ejXJMcZUnVdj1t1SiCuCqBLCVb1Eufx+otwJUEsOuUdlpHp8KbZTtPYoNdT1f30GmvARfX9dUU1lbI+BHvb26YTWYsZnMmszl1UTOMR8znU3bpgcvXn4kB7z/+I8vFgl/8y1/S9B3/8T/+P/iez+PjI+vVo0w7Vyum4zH7/Y54EIHjkuYlg2jAf/n137PbCSjz/fU1u/2enUKcN01Pngl2Z7/fczKfsXp4oCxr2q5nPB6SZRk39w+8u74hDCUordYfSJNEHMYVsPZxu6LrJFv1HZu2b2mrms62iGPBfDmWiOqFiufXNBWu04mdmG0TDyOqsiTLcy7OT2Vw0rZ0TUvT98TjEVme87hZEQ+GRNFA9T4bosBhMIh5fFyDZWM7Hk3bUdaNwRF1XUeSZgRBxGgyYTgcGQDr43prsuP5dI5lddw/3JMeEqJBxGQ6ZRTHDMdjBoOW9x9u2Ox2FFVFVdf8wz//E19++SWfvX7Fdr9jEsdYdDiWyyAMOJnNub+/Ix5GRIHPYjYmz118r+dsOZEJeJNSlgVvvv+Gquqoy4a2kjZMU1b0PYZorWWllsslbV3TVjWDIMSzPbzAwwsEnlSqzCNUqhGXl5d4QUCnNoqiLHlUG5qgB+QaufQErkPgOjwqH9hS4eA6y6aoxX1+v09oe+lDxfGQvChJk5TA9yirlLwqcR3J4l1PHKCqQgCu4+EEYY1YzOcn1HXJdp/g+gHn5xcGoxmo6eXj4yO333/Pmw+yyfeWRQvcr9cA1G2jDIQ7qkLI83EcY/UWvitOYnVVs1PcYh0vdPn8yWCmMyMNjtX9HNu2DQZMP44hEvq1z6ef+ka0bfujPtpH6P6+/+i5xwTx4/fVVBBDZdIBse9plCqGZ1vaYlEkco/rTNuW3cVyVNaobgJVmlrYBGEo5a56pT7xddtS1jWP6w3D4Yg4HrF+3NA3PV98/jlnp2fsk4Noc7Ud682GX379NS+urri+vaUoCt5fX+M7LmHoq5F0QVFmYhDr2lKCO470r8ZjHh5XbDcbXn32kt9/8w11VeHYNrvtFt9zqWqBkfiBy3a3ZbPdUBQVQRjiehLsH1aijBFGPsPhgDzPKCrJgqCjVFM427aZTqc4rmS3QSiOVEHoKzSNhx+4RFGoskGZVgrVxsf1HPb7lqqqafueqm5xfQEt264IBtplhef66joKjkq7dU+nNX0v3Nf97kBdt4ZP2PeWmtbVildakuclD/crBXVYEsdD+r4nLwqsviVWnMCyqri5uaHrYbDdMp8vhPMZBkTDmJOTJW0n2fTF1QW/3W746quv2O+20iPsana7LePJiCxLefPmO15/+ZqqimnbmrOzUw6JOEC9uLogLypub1eEQWz6QE/ORr4yQ+64uLgwrAIZFuSqVzggiCLTF9V9I8FwRWYdaIcry7LUQEE+q2tq41Tf9h23t9c0jfSfslI4rL1Slanblrbt6ZDrIeu0ZxD6BI5N00SGWTAajUwfdrvdmmlqkiTsdjsAg3LQk3M9tDC0yKLAbRrSPFNKM5GZ1Ea2KMoetjuiIOTl1RULNR29v78nPRxkM0gSY333vLf+k+5M+iLoYKJ7Z8/hFMfQBqMuqx7Hr3/+fHhC/OuH/n/9Ov274+lo3/cms3seENv2Y50z/XnPH89/IpSpHsdTMt1db6SALAtaFUhlMQl9ZhDIwl6v14RegONKhuE0LpPRhM8/+5yLy3P+/u//nuv371jOZ+RZhuvYLE/nQE/oy67jWlBUNZVSh/iv/9//i+u4hIHPy5cv+PDuHZvNmtl0wj5N6HtR/uyahsAPBF6QHHA9n6apeHi4RWhIYmhclmL+EEUhw+FISiIlie15gtGbTMZYHbiOje8FDAahgb+4nsNwqLiGXUuWpeY66YnlaCiqK/Qy4WybHqniHOUQLxJBvm8TRTGj0ZCiqJQrUU0QhOR5QVFUOLarhkoNTdMalodeyGApIj6KonQJWDR1weP6gbu7eyxsFicn/Pznf8J8sWCz3TKIh5yoauGff/sboffE4tb0r//Vv+Zf/OIXZFlK33c8Pq6oa6FNff75K9Uf7phMxpRVbmwCy7KkArregt7GsV0DBNXEch0UbNvm5ubmqG0jPcDD4UDbtiwWJ/zh22+NvJRu5AMmsdAIA61TpgNJ3/e4tkXfNYi8tQOWY0RLNQQoiqTnFBaC77IcGWC4TottyWDL1T64YHqsGgKi4T56HUorQuSHhsMhnpra6hiSpim1GqjN53O8/U5EJzsx9Wnbluvra+q6ZjGd8fDwwJ1ys3p8fKSqKmO4MhqNzM/yPP/pnpkOYPoE6pvoeNEfB6Xnwex5VnYcbD710AtCv4/hQj57j5963fOfHx+H/TyQmWN5At3KC+XnZVXJUMIWDwCZkMru4jgOo+GY29s7Ke1ezQnCgM1mQ5mVLJZzgtGAJE/4q7/6KyzL4h//6R/wPI8vX7+mrAriQUhVFSxPLsiyjIvzM77//juwWixLJsO+79FZ4IcuX37xBU1bMRgG7A89YeRhWRFVJQDctm2wrI4gdJi5IxwvoKwq2rKh7WqlhfVknGzbFp7vUlctZdUQBgFRNJUFWRREYcBkMiSKQoIgpO87mqZWSg0DhsOB4vxFBpogk6sGsIjjEX3fEYYeXSdDA8uyqapa2aq12LZI9IShaJRJcG05qD6ObTlG/6ooSmVj97SgBFIyIAyECL5arYy8jg7AZ6dn2LZDmmVcX1+T5zleENJ3Pe/fv+Pu7tbg7hxXBElXqxW/+JOvuPvwnul0zGp1R1nlTGcTMyh5+fIF+/0WrJ6zszPevv2etoXxaEaSpIShZFQabH1cWuqFrzOpNE3Z7/doVQ7btqmbmtPTUzUNrcxmoXvZbdsaJQqd0ekFHUURw8mYMkt5/+4OyxKOtZ6Oaoqc+Hz3NJ2sF2wBuGZphu95lGWOq9oAeuinBTzFeEU4km0rE3490dUZflMJNUwHPzHBll7lxcUF8UgyzbTIeXx8JEkSJYgQspjOWCwWlAofp/XcNAj3SSGnf8KAqscPDU26J6L3cSD6qYD0PEv7qUBkMjiTSX0s7XMMuTjOzI6zLf2642PVfzqFc9PP658dS9+20jfjCcMmv5BAVyujXJ3VVFVNb2FGwXleiDpmJPV9mqSMRyN8VyaP5eM9/9O/+p85P7/kP/2n/0iZF3z28gV5ntF1DU4UMIgDppMhttXhuD2ebxE0yFDCcmi6jj/7+iu22w0nJ1N+/7vfEccDzs6WLOZT4edtt9D35HlGVZdMRjN6LIqqwcls5rOJSPhEEVUtNmPz+dQ4azvjJ/liP/BIUwHZ+oFLHAtQ0fN8NbG2Dck5SRIDDxmPxwZCIhxcsG2Huu4V/9XCshw818O2OwK/pw5ayqLi9uae7WZPNIgI/Ii2a8mzHV3b44fuUTtB3agKl9a2Hbe3t8xmc+KByOI8Pj5yf3/PcrnE86W8/uzzz4njmG+++Ybr6xtubu84PTsnLwrevX/Ht999x9WLl3ieR55lxIMBs+mE+WSC3TUkhx2z+ZRhPTDcTC0LtF4/cnq6RI1fCYKQ2XSB6wb0ncVo1JlAKQMAT0FeZIqnJ8m6zKzrmi+++ALP8/j1r3/NZDY3UBpd3u12O+P2pYUUbds2pagESp94MKBXYgBNU6ljDnDd/gkEXpTmuHRQbKqStm7wfA/ftYkHsQHqaqSCJqofV1o6S9RT4yzLsOmMhJg4twt/1VLA+izLuLy8JMkzvv32WzabDfP5lMlkQp7nvLi8wlEwrMlkguM4ShNuZaSJ9LT3J+lMx6KIOoDoLOg40BwHp0+BXz9Z2j3L4I5L2uMyVQemT70enkpOnVU9L3v1z/q+/4EYZNeKK5NjPbETLEuMUXSPTOPiuq6j7Xpxj1I/d2xPjYcHDIKITPEHbdvCtlxOFlOCIOBXv/oVm82G6WTKzc01YeSzOJmz322YTsf0bU008DgctlxenpMcdiRpRmd5rHfibj4chpRlyqvPX5ClKadnp/RtK9mH0+PYNus1JElLELrUTcsinhAEHo7r4bjC62vblvF4RBwPSNMDo9FIqTxslGKD/JnNpgS+i+vYBKEr4MXOwg9cgiAkCHyyLP/omgV+iO04DAY+nuuTJClJkZmsoGtRDtkuYq/ZQW8zHI5pmp7tRuzmLAt83zcEZMlQBcMmFKxWqbf0ijgPtmICjEYj9XlS1iVpYqALp6enDIcjHtcbul7oQVdXV5Sq+S/qG5KZjccjmaZG5/znX70lCEQ7H+Dk5ITT01NFXevw/ZDd9kDgD2gbKePHoxk3Nzdm4WtsVRzHZhKpndzX6zUvXrzg66+/NkFOB5h3794ZSWmN7NcYrMlkYhazzrQGAxFFyLKcIi8oC2GbNG1LWVYsFh0TJVMkRHope9veUhuFZlSA59gyhfZ902LQYhOistF9hPXU5PfjhGc8GZt+mua0dl3Hzd2daPXtRKvPDXzjOarpTYMgVEbEar0peMf+cKDIc9O37yXl/2jdf3IAcBwo9As1OFYf+POA9amAcvycT5WizwOPyaZ+Iqt7XpYev8fzsvOTpa4eLvC8h2dj9T2uct1u6loFN0XHUSKNriNNUHqx2yrSjEOSMI5HXFxccHZ1xj/902+w+o5f/Pzn/Pa3v2W7XfNv/vIv6OlwrY4oCtjtMuJhSNz7TCcT7qyKjgbHG9ACeZrx8uUL0vTA2dk5q4cHoiikyDI8x+ZkIT23Is+IByG+L6oS48kMP/Epioow9FkuT3CVDV/XNQxHA6JBIPxTqzdmvCDCh4PIo2vF+xIEviI3rBiwxkPxQ9QKFq7rSzblCZVGRPlE7siyHAXp6BTwsSRJUmzb4eTk1DT8y7Imy1I8T4jklbIPcxzp25RlpUjIObPZnC+++IK2kUWlxQpt26Eocg7JHtt1aHvY7g/YtkNWVKLJ1jSUVcnF5SVnp6dcX18TDyKCIGK/33N7/QGLhi+/eMVgEBolFEGphybDkknqnv1eI/AbVQpnyuvg0Sx2ERYcGZHPOI558eIFURRxdnbGYiESUVoe6FgdQ2cxmty+XC65uroymdJ+vwcwpaAeJmz2O3ZJajKv3rbEtazt8VxPZbgtbVFQFTW1ZVGXOVbfmuxTZ1s6c9OlrC6h0zQ16rHHyQkIvEszSLSK7Gq14r3qiw2GMR8+fGAwGjKdTk2JGkURv/j5n7BdbyjLks1mw2a3k0Cd50r4YMd4PFb2hdD/VDDTJ0crZuhphA5mx2Joz6eNwA/Kuqdg8cOe2vGfTwXC5+8BfBRknw8B2rbFsaxPN/eP+nCW9UR4NyUqT0qzbdvStA3YogEWBAG+anJu1hvG4wlx+KTLP5tOmc/FJec//+f/ws/+5CteXF7y+29+w3w+56uvXrPb7bm8OiP0HTabB5kY+j5np0v2hy1VJW7Q2A1B6DGMT4jjAYvFlPfv3zMZT0jThEEkVveTyYi8yEmSPSeLE1Bgk6KsadqIMIwMpalpGrq+JQh8Xr16BWAUORaLxUd0GM+FPNubYYpG/oM0oOM4IBoOqKpG4bECEtXrKsut+AMoVyt9z5RlZTIrLWTZtq1qCC+Yzebc3t4qaeodTXPks9g9mU5rVHkQBPT0pkQD5Ng9X0QzQ6G43N7eM53NzMQvyzLqpqbIC7H2syxsx+ZhdccXn38BwO9/+zvW97dMJmPTApFNFq6vr81xfPfdd8q8tyTwQ+I45v5uxXg8YrE4oSwLo6Sitdc0Lent27d8/vnneJ5nuIqa0dG2rclkJpOJoa41TcN4PDb9s2N9tSRJxFPV95nP5/i+0LVcJcduqwl523SEUchisSDLnrJdHUSllSLXWveuj4VS9XRZOykdB18NrtfKxhq9oMnhuqUxnU7Jy8IoCD8+PiqO7ITT01NmJwvjbaAzwtFoxMnyhPXjmv1+b7Tcnk80P9kzs23bkM11tqN3iufQC32jGd6k/bGDuSnPFB3qeLDwnPZkWSKiqH//qQB4nBl+ckL6XAbIsj4aAuiFaR8NAORzpcFYlYXg6/yYpm1pmpYsz0lV6TCbLuTEeS7XHz7w+PjIX/7FX/CLn/8L/ubf/3tOzk4os4L14yOzyZSqqiiLktD3qfOCokgZD0fY9NRVxffffUeS7BmPR4xGE1brhDgUqzPPcbGwGA2HzKcTRoMBrqJ0aDmX0AuEQ+e64sPYd+y2GwEknp7gui5Z1uA4Ho7tEHoyZRrHY1zLJfIjpqMpruVC2yuvQNEAE+6cxkgVCKRFJoy1gqDUVaMkhPZ0Xc/V1QsuL19w2Iv7j86mHx4eePv2rZQUfsTd7QOWZZmxfxgM6EY9h8Me27ZZLE5wHIfV6hF4Ckae54qEdQ/v318bruFutyWOR8TDGNf3KKuK4XgCFhRVaZrt93d3BP4tURRxcXFJEPjMJlMGUUgcD2hqIXd/+eWXnJ6eMRpNFCxiILQ0laXE8RDX8XEdgfokScbhkBLHAxzHVhnckIuLCzOpu7i44M2bNwa/qbXxkyThZz/7GWmaCqn9/olHqxf1cDhkNpuxWq3Y7+UcbbdbU3qmaaqYBS29BaPxWGAhdUOeFwRhRFlXdH2H53oUWU4UBriOgHNff/E5y+WS1cMj799fm3JWb3qnp6dsNht+85vfcXp68pEQhB5saJ6kph3paS2IR0Ko2C1eIP2+qtVDnsKgAm5ubvj222+5ubkRYciZDEaiKGL1+Ejbd/L6tsV7FgM+WWY+//t5APuxHhU8EdV1gNNfTvPX9O91Wfg8Q3s+fTz+vB8rRT81gPixR6M4Ztq63rAT1GdNZzNaNSWpW0GsD6IIT42pLaSJ+eb7NwyCkL/+67+mrWv+5m/+hhcvXmC5NkVZkKYutt3TtR15kZM2FVWVM4wjwUitZHpzeXHGxfkFaZbwu99/Q9M7hIMRZSFuUOPxiMloxtnZJfQd282aphGz2slkJj0ryxLJ7LzAth3OTk9Zbzb84Q9/MNr1dV1zdnamHKJzw2esypqmbkkT2altp8fzbNq2V8Ro0Y0PA03I3iuZF9k1Z9M5GugyGAxI04ztZm+yBz3xalvx6dQ0Fq3dnue5ESIMw4CrqytsG+q6UjZ9YieXZeLpqEGSaSpaWePxmOVyyXQ6o6xq7leP1E2Nq9Qs7u8f6dqGzXZtMhXXEeiE9q10XYfxWExopfrwWa83fPbZK7bbHbvdnrIUY1qtMnE4JOy2exzHYb8/mHNgqSb7brdjsViYvtNwOOT09JT7+3u6ruPt27cGO6Y9ODW9SvcB7+5kan52dsbJyYnpLWn5H615pku1MAzxfJ/V44owFKXXjeL+niyXWF3H43rNZr3hxYsrJZkUKMXXkOl0yps3b3j16iV5XhpV2q4TVY/pdKomuG/N+tZUJfHVlPV0OBzM7+M4VnjCjH2SiFeGMiapm9p4HSyXS87OzhgOhXSvnbF0vDhOrLbbLXEcc7I8UVWJPH4ymGnw7Kea/p96nWVZBlh3nD0d19bHPz9+nn79sczPp8rDT8Exnh2I4Xv2vcwsj4vetmmwFev+OFvsFaWqKAr6TlzMPRU4szwnAsHQuAFv377FsRz+91y5KXYAACAASURBVP/1fyMKI/6Pv/s7+h4836dqK9LDnq6tCcMAB0h2e7LswCCOSA4+vi8mHlZvU5UttuXTVj1t3eP5AYEXYVs2+/2OupCJlGsLAT4KAilHO1Hz8EciMZ20Ca7jU7cN+90ex7KZLU4A+Q7RcMB+e8BzfJJEejCjeEyWFeLS3fQUacVwMiAMQoq8xrZ8caNKK5HGDiI2mz15XpOlGZvNhqbuCcOIoijx/YBWcQgBJTsjShVpmhIEgVHqMJAYde2121IQ+FQqk5LSxjfOPXqiWpYlSSIL5kkOOmez3XG3eqTteuEezmR6q2EHYeATBCFd20Lf0VRPZerelU0q8HwjOng4JLiOS5rkotTrutDbuI6P74ms0nAYA0Ll8wOXthXjZ8sSuWstJaWhH/r+vry85PPPP6dpGt6+fct6vVauRWf83//hPxi6YN/37JTiioZibDYbgTjEMfv9nsfHRzNwELhCSRDZYLvYjofjBUSDIaNYWg/0HdPZFE9tNiJ66ZMd9iznC27u7lVP1Db9LO2SNR6PzTQWMNAPPVHt+57temWuy/G1tVV10/eiTjMYDU3lluc5b9684d27dwyHQ3rbou5aWqTUdQOfyXyGFwbimtX4NF0nuPgfC2bHgeUYnqH/fp7J6If+udboPwa4wg8hH8efoz/rGENyjHX7//OQ5vwPy9Dj45Ap58c4NdtW4oxqJ3B9oe6AZcqUqqqgt/nTP/1Tfv6zn/PHP/6R/XbH6ekpH95fs1rdM1/MsGwJkK7j4Ni6tBUgap5leO6I8Xgi4oa7jKbusXqbxeyUrGzxPXElKooS1wsIfB9ZfzauG5DnJU1d0Hc2jmNTFBV9bzObTtkneyxrx2g05vz8jEIBI/u+5+7uAdt22e0OjIYy9j/sxUgiimKGw5jZfEZVF6TpI5ZVYeFRlrIRTacjAn/Pfr9XvZaOwyEhSVLTv7Jtken2NpI5OK6U75PpmNl0RtM21FUtYFTXxbbB9z3GkxFRGGHbjrnZdUYQhtIs14Oo9XrNYZ8oGaUnE+OiqinyHNcP1PM2CnBbYtuOmeqOR2MsegJfWhZZmtC2LbPpjPBkwPt3H9j2Cd9/914JOAogVTe0tQT0fr9jOBoQDyMeHx9FSTiRocCLFy949+4dh8OBV69eGfjIbDbj+vqa77//3sArbm9vqapKOSmJTLgmsPu+zz//8z8zmUx4/fq1Uey4vLwkCAL++Mc/8vDwQF3XeL6P5/ucqmy66xpGo5FRnYgC6Vn5nsvN9Q3j4ZC7uztGI1H6ffPmLf/u3/0v3Nw9GKlyDUzWXNE3b94YkQWdqOgkQyc/GmyrhzZaGUMT6bUm2/gIjyfKJpKlfv/99xwOBzMIgSeV60Yp3HZdx/39vWEfwE9kZscE5uPA8jyYHT/ftm0qI7L2lGnp1+iDOf4c/bf+wloC6KeGCMfB6QdZ4rNA9jy7M4C7uqFrn2p+y3ENSruqKtq+Uxr0oTl5XdexWCz5+uuv+e4P34myhe/zuFrhez5JknB2viRQWB3fFXiD53lEYcR4OKIfxDJ76KHvOsJwwMlsSV3X5EVP2xf47oDA9ekbi8bqsenJ+gLbsrCRrK7rOpq6py4riryibx2iaEg0iDlZCujy4eER3xey9GazxXU8wmDAupHSqa5bttudcpY6YbE4paMjSUpsKyDZl2w3qWSknk/T9IxGU8AmDAfMZguTKUDPdltgWUqF2LGwOyGGaQyUH7g0WUXdlPiBr0QSa/Iio+9bBnGoyp2RsR/Lc9lcNE2maQT5vtvvGQ3luup7ahANCMIUx/cZjkayIJQ0T9e2jGczgiBkOIwpC8k2ag2StmC7WePsU9rWZjE/Zb3ekmUF0+mcm5sPqlUii6uuhShvWZgMMI7HZNnT4tXBSt//VVXh+z6LxYIPHz4YAUNNHczznO1uhx+IGKfObHSpqgdwh8OBJElMxrfZbEwwaPuOopS+Wd8LFAkaM43Vr5dj7wGRTsqzlqYu8TyH16+/oCgqDocDVVWZnpgOUjr50MemKxy91oeD8KPKTq87XZKmeWZ04Y4DoIZn+L7InOuhhFbn0YOFs7MzM6D8SXem572y55nVcWD5VAP+U1Z0z8vJ4/fXr9Mnhf5j3Jl+6JN3jIP7VOZ2/L6f+r3mc+oyRwu96fetlbkDtkWa52w2awI1Fp9Op7x88Rl/+7d/y/nynPl8zh9+93tcx2G8GLOYzwQVn1TYQFVW+IGM14ssxVHBsms10BS6zqJrLYq8pm0tHFsmUU3V8LjaSmkWSZN/MIiYTW18f4AwGARR33ei+FEXLafnJxyyvbleRVHgez7z2YKmac100rJswiBmPBKV1ywteVxteFyvabtWSTvvWa/XvH49xBuEEhBdl/F4os6d6LLt93vTf9JTt9lMzoXufXRdayRehkPhUep/S8M4MhtGFImpsr4+qSppy7I0sjAic/TUAmmaBltxOHvbZaBc4DUmrShKBoMBYRhwdnZO01S8e/M9eZZxfnbGZDJmtXrgw4dbIODi4oqyrHn/7oPSkUt59eozgsCjbSt6OiXnbBHHEYNByPn5KcLnFDzbixcvcByHh4cHg9zXLvK675XnOQ8PD3Rdx+vXr5k4Du+vb8yk07Zt4/r93XffGU8KjVfTwoY6mGE7bPe3LE4WtG0rDXzHNv27zWZDVRZ8/fXXbFYPXF1d4ToOm82G16+/5PbmhuFoymq1lp6aMiXRE0YdEPXUUvfLjte9Dmwa5nG8tsqyNDJXGuqltc3u7++Vn+xQDa4yww/X4o62bRuxx8FgYAYV8CNl5nFw+hSO5DhAPQ9S+ksdA231rqJv4E8NGUzwsiys/mMy+vMMT7//RxlcL38s28Y+Oj6e990U0r/vnxqYnudhIYDdspKbTAw7ejzPN+J2bdvy61//mtPTU7I046CsyEZxzGeffUZTV1RNQd2USryyYhQPaaqawyFlvz9wenJKT8cwiqnrijzJKPKSuqoQJRKbIivVOeuZjKZMpiOqqpIxu+VB3+FYFmVRUZU1tu1Bb5NlBXlRcnd7T3JIGY3GFKrnNpvOOBwSkcG2hOEQRzGDMBa12jSnruXmH45GFHlNXYnl3n6fSnm6PVDXFcNRTBD4lKU4YjmObGJxPFBikyVtW+M4NsOh3HCTycRo4clzKvpevCNdV1RFm6Yiy2Qiq6docTw0QVnfQ1EUcbJYEATSP9tut5JhRiL54/quul/FAGe5XGIxJ1D4p5uba3xPBBR3+x2TyZgw9Nhutjw8POB5I5bLc4bDEbe3N4zHU8IgVOVlSlUVpJkO0iIAidXhuDZnp2fSEuh7Zecn08owDI0lYBAE7PZ7qURs2wByZ7MZWZ5zfn6OZYm5yX4vysXj8di4eBdFYUrTsiyNFth4PGY0mfLq81eMxlPyTMDDtRKibCpBzceDSJmqKKkoG7IsIQoWApXobTNs0S0f3T/T9CQNNdEcTC3T3ratENWPDIsdxzF4uTzPsVV/UrNIgiAw50nzMeM4NqKOWqzz5OQEyxKDGAtlbnSUr/x3M7PnGdJPPfpeyLOWwmzJy8Qd/Aek7/4oAP3I5PTp76cy9bj8PQ5klvq347j0dFg99KgsrxeQK1aP3fW0nXg26lLAdQUk2DYN8WhM2VRkuz1hFPGzr77k8sUVq9WKf/qHf+TP/uzP+f7771mvNlxdXPDV659RFQXv3r/l9PSE4TCmqm2yTJDV0hi1yLKUw2GPZVtkqUJqVwW+EitsmpbAD5gtT0Sho6gYRAGDOMRRY/g0Sbi8vFR6YC2HvfR6ZCdzaJqO9eMWy3KpqobVas1kPMWxXe7vH7AsVfJGMUmScEikwZ3nuSkjBoMBvi9IfgkcAzbrjYJiiBhluAkYDge0nZZIF9rSZDxjPJ4qkK6t0OsOFxeXzGYz7u7u+fDhPff3Dyoj86iqTJy0m1aVs55ZOL3iEGoTDL2zN01jrP92u70SL6yZhlNGkwm4NuLB0RKGPpcXZywXCx5W97RNw3r9yEh91ng8xvWEpoYtjlfbbc7hsCcMtQFxwcnJguubD5RloUqjBseV65amCYd9yv3diigcsl6LTM16vTELvO970jRjOp2w2x+wbJvBaETXtWx3e5I8Y7s/GE9Tx3G4uroCZHq3WIib95s3b4xnpKY6dV0nXFfVc8vy3ChXRFHAIAzxPJc4luBRlQW/+93vqPKCqiqIwgjfDynrmvOzS96+f89oIvi8t2/fUlUVs9nMYNu0GOV0OjUQksPhYDLOohA+5eFwoOk6qqYhUVlWR89A8Wv7XhQ4tC9vEAhyoEgzhtEAWim/26rG7sHBwsHiX379pyYwHleM/0PTzOe9seNA8px25NpPN6ERY7RtnGdZVdeLUKKFJYKJLTSK09VZHXZvoV2JBM8mMAfPsX6grCF9Nmh74bu1HGeOqGxMnjMcjlmv13RNwyAegeWQqb7MSBGe66bl9c++4pe//CXr9Zq/+z//LwD+4l//Bd99+y02Fq9eXEm5c9hiWRaT+QTXdUizhCzb0zUd9C3rzYquawhCn+HonOl8AnbL42qNZVu4gUNeZQRBiD9wubu75uz8FC/2GA0jPNcCWpLDHsd2ePniBXVdc319Tdd1fPbZK8kYsowsS4mygEEcUVbSX7lfPZqyxHU8kiQ108bs5oOUOqWYuDiOxWIxM5zM/W7Dw8OD9GwUODIKBuRZBh0q4PT4fsR8coKNA3ZPUeR4nq8UNjpFR+oVaHJGXbdUVUldN4zHU/reUuKJGRdXF+ySlO1BdvpIod8B4+6+PewYtAMs18aPfF5+/hLLsoyreFkL6LaZDCjzHevVDV2TU+SlBIBhxPX1NWVRcXp6ymevviA57Pnjd28oqhI/cEjSLftDh2V33N3fqj5Nyfn5uUHkr1b3JIeM5XLJbptyf7fm5OScwz5l9bgiSzOC0Gc0HGMrV6PVekvVtNiuy8nyjDQ90PHA/pBQN9eMRyOur29FCywIyDLBPWZZoXw3QyW7c5A10QlYOo6H1FXD9fU1ru+TZiJPHUfiuhUPJYB8eP9B/Eubhvlshm1PzTouy5KsKOiwTQao1Y9172o+n5umvS73dM9Pr+ueniRPsRyLrMiomgrLtrA9B98JSIuMopbq5fLy0njCdl3H+uGRrmnJDhl921PmFa7tUVcV3/7hO87Pz6nLBtf2CH1p5fxoMNOPH+tHPf//H0wMe+sHzz0uI5/33p4/2ralo/+oRAWkDOSJYG6YAD0GzW9ZcpJ762MmQt9LJkaPyApfXRGHckNrv0jXdXnz5g3n5+f8yS9+SRSG/PrXv2a/33N5eckginj39i1FmpmLW1UVtmUZnE9WZESeKy7PbUZViZGqVnqQmzOh73sm07HpKwC0fcs+2eF7LmUp6fZme49ltcoxKcRxXLJ8T9M29DQUVcFqdc/+cKAsSsoyZzQZ4ofnBoekJ3CafqKnQvqzbUd8CR3Xom5yLIQWJNO6DU1TMRiEWFZP19XkeULbdkTRVPXJhHok1J+EwTAiiqSpvV6LsGJZljw8PABir3dycmIwUlobazKZ8P79e3b7PUEkMku6H6Pvm6IoKIpCGvdq0KQpRr7vkxcZWKLsuloJKXs+m5ieU5qmrB4fWC6XXJxfAAIlKquK1XpDUVW4nktW5hwOW1HwsG1cd8r9/S1RJMOUxWKhZJMwmv5RFFNVDWmSE8dDmqbF9wIF+BSDEvEV7RlPJlRNI9Lkjoevhht9UZDlOWWWGxPd5XLJfr/n+vqaFy9eMJvNeP/+vXFI1430vu+NjPxkFIsNoSq3Pd+lyHM2bKibWpQxlISOzoQtq6NpBEC+WCzoe5Gi0vgxfQ0cxyFJErOujm3iJBimTGdTLMc2PTPTf28arL7jz//8zzkcDgaSoilPp6enjOMh3/3xDXQ9dD1xGNG4Htuyoi5KqrwgmM7kmMOILv6JzOz48Skc2POS8AfP44dB7L/3eCo9FQe07+jbp56YZQnNyLJENvsjzTTbOsoOVbB79h3UW2MBLy4vubm5oW1brs4vjDN213W8ePHCXMjVamU8LyUz6ZTbeWTMPhaLBbbjcH9/z2Aw4Orqgsh3yfOAzQZzk2nEt9Z9EmK0RZIkZhI1Ho+Zz2eMxhF1XVJUPbbbc8g2WG5D1WY0ZUtzX+K6HnmZsN1tyfK9mgrJYKNqhfMpN19NXRdqUPCkqzWdTs33sm0IwgDXtUhTcS7K85w8z8BqmM4GLJczWvX9Z3PJ8rq+ZDFbEkVLDoc9TZMzHA3oVL/LsixDU7Ft20zytJyMLmdvbm7YbDZcXUmm+/C44pAeFGVLXqsXT9d1psQBDAFbN9ObpmG1Wpnm8mAgw4K6bikKcZTfbrbcXN8aHa3D4WDQ89vtlnggNnMiPBlycXHBfD7n/v4eiLFtpMRWhP8oCrCsnjD0KQqPsixM+aeb5hoF3yKQINd/mirqhrp+HJKEgR+YXtjV1RXn5+eUpWSFrusymUzYbDY8Pj4aUOlkMiEexFiObaa7RVEYilMURUxHY7744gtW9w8fSW0BhmlQliXYrsGs6b6YDlie5/Hy5UszxNGbkRi61KR58mRMcuSepHFzjuPw7bffGt9T3Zfb7XY8Pj6yW284W16YJEZTpHTQBFGt1b3un3RnOn78GNL+UxmZwZFZPy6I+KmBwUe/R2VmvYgjfvQZqpzk2fEcj75tCxzHorM+IV0kIk6s12viOCYOBYS42+1wHIezszMuLy9J05S3b99SKCt5Qcxn5FmOp/0OjjiqyeGpz3F2dk6ZHmjb+qOTrJudYRgamRO9QLWonnDTlgSBTZLtcL0pWKLNnheJ8nyspMcRRdRNRVll9LT0tISR7KB1XVGUimTswXQ2Eu/BOGRURPS9ZBYy1ZQAIZmmy3q9AhrarsBxYb4YMxqNWCwWJMkBx+1YLE5omobDYY8f2ISRQ914ZFmN7fS0tShw9D0ma9Cbms4QLUsUJHS/RxOXoyiSMroVSIIGYOvG893dnThcKceivu/J88I0pNu2JU1Tg6gXD1GZovZ9z3J5SlGUlIWUOPf39yawrlYrNpsNdVUTeIGZ4mlkvnbG8jzPuA7poKFbIYOBaIDpJrlWg9VyP47vKRl3WJ6ckCpmhKbrDAYDmqbh8uzc9KC096k2BdZqIMfgct3rHAwG9BbEcUTZ1KLHr/TAkiShzHJevnxpfDd9BSHp+97g57IsIy9r8710sNWKGXrKCJJx39/fG5K44ziUVW08D3SvUK9jHeD0FFIDofX56boOB9n4Bsp/QT9nuVwaXq7OFHXA1Y//4TLz+UTzGO+l/3RdJ0HH+qHK66fgFM8DmnmvTxyPfqZl29A9GQw7yujVtm06hCfXWU9aaaZUVW/QNAIkdFyXqqqYTqd8/vnnOI7Dhw8fjEnuMa5Gjkv6AmmSsFwucV2X9XpNpXbMi4sL9vsd6W5L29XmJtPja+01qikrYto74dWrV4ZAXdcNTSsBaziMsawpTSPihN5kqDKcRknrBLiu0EWKosB1xU3I9yfCMWw00VhnCQVtOyTPC4bDiK6DXrlGT6cT1SerqOoSy5bzq01lw8jDsmMGcaDkg2KSJOXu7o7bu2uiKMLzba6v31GWLUE4MHvO4+Mjti0qpEmSGMzSzc0N+/2e+XyuguWT2UjX9uz3O5pagsjlxRV93/PHP/6RLM0pR5VZbPSIechoIuTlc+F7GvxV3XK6PDPQn8APeKl0zHRfSDeTBdeW49qugSF88803eJ7Hcrk0g4k0Tc2E75isrf8/UbQdXf5GUSRCBa6j+LO9lKpJwlpp4OuApMvrIAg4Pz9ns9lwf3/PdDo1juAab6apXXod1U1D2zUCQPZcQ3TXCPtdIyR2z5GeGUdWbSZ7bDsTtJ73x8MwZDwemw2ormsD09AsgI6e3X5DpYQ7dUYmbZYQ3/dZrR5pmtZkrVrkU6vXtm1nNjA9nNK0vK7ruLq6oigKM8XVj08Gs0+Wj8+C0vPnPwWNJ+zPMSbsf+ihJptqCGoeAtk4/vczxQudmWHR030ExzDvIW/P2dmZoLVth6+++oqrqyuyLOP9+/ccDgcjd+wrQnuWybStawQnRCcp8XEZpbO33W6D71h4nmN6Dfr86JRf44106QmYxRHHEfS9GXt3arwfBK3B1bRtJxPbTgLzcCjTsyyTbO/s7EzhsIT4HUWRujGkHxIEPZ4XABxJDlvmGoZhwHw+UZmbpXbMhjgefCSVPBhEjEZDbNsyCHPXdaibHt8PTAmgcUSff/658QnV5YvOBKbTqQn+kvUdDJRBss2GXrlvOY5rrn3bdibjHQ6HxPHQ6I/lecF2u8e2Wy4uLrBt2/Tb9ORMy1VvNhsTcFu1yADmc1Hz8H3feKDulCSNzhD0wtc+srovKefDZTQaiZ6ZbVFWFUVZsnpc0SPS3bpMO15nSZIYmpCWiNam28ecTJ2hPbmV25R1SVWVxOORut7BkzJtL5nO6v7BwBt0EBZTk5ggCEAlCLpHqUGxolrsm+CvFSx61R7q+x7P97At28gUaV06fR/K/RAxmUxM1q1l6NtWIB7DwRNPtqrFt1U/1/M8CnXfHA4HwwOFH/EAOP77o4DwiQHA8eu6rqPnSWb7eV/tU6/96DmKhyWt/u6jC2w998Pkh1i3np6ulwECRxmiZJXynP1+z8uXLzk7WdJ1Hb///e9JksTsdJoBoFNY4QEmOJbCpNkOj4+PuK7L+fk5o5G4BG23W8bjIaPBiCBwjbqH7kNopQPANLX14tZyL57n4vgubedg2S62I/pkTSO0nrazGI4makReYTetOPk4DtgOfhDhuL7SBQtVo7zF80LCcKAE/UQRVhD9shBlKtZQ16LRP5sNzObUdx2HJGU0tLFt6fOEQaEAr2NcRyZnXdtzcXGFZXtE0dDgjIbD4UcYRF2iOY6YImtApkZ9dz24nmjFTyYTRVl5VAOGHMfxFH2rV47gIUEQsdsdyHNxx9aL3nFctcgsNpudKvk63rx5w2g0Yr/fm4xspIQZ149r6HqGowGLkxmzuYAyP3z4QNNWPKykP4rVUZRyT7muSxB6YHX0vWPkrXWQDcOQJE0/Em189/YtoUL/+77PdrtlvV4zm06xOwGYaiCsJmsHQfCRSYiuPgQPKeWb24hprx6M5HluNpC+kQATeL4p+XSJp7M8PwhIso1p+Ovvofu9esqqYRF609blqiQw1kf+D74XEvgRju3Rdxaz6ZzAl41hm+9NKS5GM1Kq75KDkejuLdglB8NUubm7NRm13ng+Gcx+LKh9ClH/Y1na8xL0+eN5EHz+OwtAlZPm5/p1z4jmH/XGQHliPh2H/uMoyZ9/+ad/RpIkfPjwwfQfNIlXlwij0YjGtmV8X4q1lu96rNdrIl8oF1pbSu/0Wr4YZGKlp4V9/3RjAiYt1mlyrAC3g8GA9fqRyPbBcqlrsPCYTCQbq6oHyqJhPPLpWotcK3+2Dmla03cOw+GEMm+xqehah7Josa2O8SggDIZYJOx3OYc2Zzh06TsH3xuYjGcQjVlvHkgOOSLOKN9lv0to6g42ex4eVlxdXXJxMeZwSLi9vTclWRR5lGUF5KYvpssX7fGpg02SJMaBSC9GmSzWRvtdduvWYKz0otRQAI0m9zzPZAmWZZMkKZ7nCxi5qSmK0kzOhLQu5cvhcKBU5rXTyZR4KLATz/EUHCLjxYsX3N/fG2qRlsLWaiAamS7BU1ko9k9KuTqD0y2PwWDAy1efcXNzQ6Qa4LLZHXh83EnW7gcmCz49PSXLMh4eHoxMuZ6m6w3HdV2jNtEhaiehajUcN+h9xzUVjOu6jIdD9vu9yXpATLu1JI++XrOZOL4fcy31Pa17alpM4hiJoIdMeiqtoV7v379nPp//QGHHtsVuuOla9lvxQDg9PTU9VZ29awNirYunHz85ADiGUejgoBf9MXFcj1+lh/UxreFTPbLnQU5/EceR5l//7PnyGvWfZ6XvMRm+R8bTTd/RqNS7V6XacnnKfD7n5uZGGrNKQuYYpKl3o+12S1PXRjq4bVsqVbJ1dWNEBx3HYaDSZ2kYuzR1Q983SkPfM6nw/f0933zzDaenp+YiH/fRZHdqaVtoesiKEt8LaLuepmmJ4xnu1CPLc2zbJR7NubiasNvuCAJp9tJDMApwrAjbqsjSmsB3qMqesqhIkpKy6EiSPYd9SRwPuLkWgvTl5QXDeEZdteRZo3BgtRILFA3329tbsqzg7nZNU1uC2j6UqnflsX7cEanv8/DwYKg2RVEYVPdXX31Fmqb8wz/8A20rtCmN8t7tdjiuTMt0BqcVVU9OTj4qTX3f5+LiQpy7b29ZrVa8fv0llxcvuLu7F9HEIMJxXA6HDM8NODlZkueZyiykZ9n1HUWR07S1abbXZUVR5GRZCki2MRhEqqyPDdi2bRvVwK7JczVIeNhQVVL+azrTdDrl4eFBDSxyUZ4YDJSje6My2Jj5fMx2s2E8iE22KnSulNlsxmg0MiKWL168MKj7k5MTwza4e7hnOh1TqSHKcCgMCiHZj0nTlGR/II5jc2/rQdhsNmO9XuP7PpvNRlgt6tqkacp8Puf6+tp4E+gApU3Dm6bBJ1ACjKXJInWPDQQArNECmoephwvb7VZaOAoB4IcB4+kEPwywHJv5YkFW5Li+x15lbmEY/nQw+1Q29inc2PHPj3tXz5//PCA+/4znn/tTD8sWeevnmWPf97R9R5kVtMhOo2kgvu/TNy339/fc393Jbtz1ZmKls6hj8N9x8AaZ0lqWxXg6Nb2DpmlEUkhdUM9zqPIU28YADrUBhG6QauyX7mVYlmWcf2zHYeLNKNKUvKhxhgF9b5EcSvN9utYmzwuSQ0KWNiTJgbKsCYMhk8kUByiKmiwraBvoWqirTgWenKpsaBtIDjmBH9G14AQe9A7a9DhN97RtT5aV+J5PEAzwPckI9/s9aZby/t2NG+ZBVQAAIABJREFUTFCdgNZq6TpL9fqES6onZJowrS3XdPNa3y/7vcgDJYmwGYIwoscypZW+B3VWNRwO+frrr9Hj/Pfv3xNFEZeXl1xfX/P27XsW86UpNYtCwLlRFGPbT2Rv13XM5olqb4ShsC1q1/uonMqyzByLViBpmobFYmHoSavVijRNSbOUqmwMAj+OYzOpdQOfqq5Zr9cMh0MBJqsmtu4jRureWiwWghFTwSxNU1N2Pj4+8v79e66vrw2sR5eLWi5J9+E0Qr/rRD26Kisz3YzVedMcTj1hdryAOI6ZTCb4vs+7d+9M9qVZCLr81FmS7/v0KvBHUWR6e1EUGcpSmqbKTFxAzYCRE+r73ohRjtS1L9X/z2azj6bY+nyt1+uPYsaP+mYeB67n/z7uRen/N6lq/0Mjkp/qw+mfS6b338ek2Qpz9oN+2dGY2vaeJixaxmS7WbPb7owfoGs7RArMalmWwbEcK4R8FLT5GPirsxbnSKbEth3hT9pyUTUfTafiGliojSgAs1gAPD+kKK6pVEnTtZKMZlmmpnHSyC/LiuSQ47oyfZPzD0VR4dmWOItnGX1v0XWiEitlVaos31r1XcWFvutgtztgOxZlmbPfC3q8LGtc16dtevBsxuMp4/FUcSG3eK5vApM+z31nmeuo1V1FYigyx7pYiMS4XnhPvRbZQPK8xPMCNenqlLaWKFYIf3TNcrnks88+58OHD3z4cKPMM6aMhiOSJFNNc8xUcTg8x3GEYC8l8QjPc0mSA01bK+pZgOu4dH5gBjXb7ZbHx0dDcj4uoRaLhVGR1RmWRslrPJ9u3M8XC4pKwMO3t4JzSxXJXHMsXddlqnisWqAwiiIzydOKETc3N6zXa9I0ZzSKGQ6HYgYSD4hHQ+7ubqjrypThuhTTgoie41KqNsfp6akZKBwDlZumUXptQ1PK6paK3Fu9qTA0TEIGgBau4zMaTZgoSlSe5zSNCHZeXFzx5s0biqKkaQ6m//kENbEEEeE4dH1P2/eEgwGL5ZLheMzDwwOn5+fkZclecTn140et5nSA+FSvTJ/Y4yCiA1zffZxpfSqD0o8f4NU6/e8noKt5rvr/rv/Y0em4N2ZbNlEU4kehIa9ut1sxWK1kcqeDiKsa+se4IL0bfxoDJybBpeLaGbCj5+Gri+q6Dll6wLI6wxDouo7RaGQyxDdv3hi1g/1+b6YxMqVZUdUd8VDgFNfX11RVRRwL+FP6TkISr8onrSrHcTgkKbvrDVHo03cNdVljWVCWGWUl/pNNWxIEHpbd4XoWnmfhuiFlVfK4fqCpK5L0QBQF1HXFYjHn/v4Ox7HJsgTPEwDmdDrGdR0DbRDoyZBDsqcsc+q2pCwLM3EKgoDxeGya/avVygj2acOQruvo2h7bcvG83gQGHYyWy6UBo263W9I05fXr1wazpP0TBU7ikRxSurZVkj0Nh0Oipo2tCWi+7xkVFd1qcB2Xtm/NRqgBoHIulQiBbYnJje+z3+9p29ZkYLZtUZQZ89kJV1cCKXl4eCDPM8q6Zr3ZYDvCfNjuNeDZNj22MIp4eJBp42q1+khZQqtvLJdLbm5umM0mLBYLc6wWFmEok2pdout1qSeSGoRsq/tTDx50j66ua+LRxMgM6R7VcYmvh1vHgU2Xq13X8ebtW1zfU34EgdrUn3iUw+GQoijNcKzreuWEFRhsoYZF6RaWLkO/++47BoMBd3d36rz+hG+m1sT/aCEfBaLjbOUjfFn/wwD0qQHAj5WXfd9rJtSTc9LzGNqLs3h/VAb2xyR09Z56NK7/dF1HpAJIXcpO0PFxINa7jPmez49NgW4926GzjnTZgFyVHa5r05YFtiOCg8dltc7mBEIQMxqNDLVIXzQsi6bPqLuCIs3ZrDeKnNvS0Ro9ft2ILpuMMIgYDkc0bUGSbkmSFvoGx3KIhzFZ0ZjjCCOX8XhIkeeyMSAlV1YUVHWmeKkpXVZQVkOqOqPra4oywe99LLvjcX1neoZlWZLlGVWdYTtqc6ElTTNDsH4+FcvzXNzgjT1aZjKzum+EqtVD38kkLAwEVhAPRji2h225jEdCcLYtlzw7EAYDFvOlUGOKlp99+XOKvGSz2ROEvmEaJMmB6XRCWQZoF/dWfZemkeNs+oa+a8UwJs+xbVgsJMMoy9xkmKLzLw7nWZbw1Vdf8vCwEtnw7Z7FXFoLGkf239p7zyZHjjTP8+ceOgIqgVRVRVHd22zu7M5M28zHX7sPMDZntmt382aaZJMsxZTQQOgIvxcuMhIl+t7uGsMsLasyAWQgEP74I/6iKitmi7m5Xs0z2IPFWimlfSHW67XTiVuv14Y+FbhgYrOh6XTqMifbW5S+5+AhNmO0MCKboQ1NSCzY2BL47TDMIv/LUjMabGCz+C/7mT0bspn30ncdu11FGMacnS2YzxdO6ujm5sZULx6gM2WlWtq2x+LMgyBwWEGruLFcLh0n+ezsjL7Xxsnj8fjvB7NTkKz9firOOLxI+odPZeanJpWnfbPh37Ael0oYzuVJIANcv+xT01akvqDH8kk1wH4oUukg59lyRuF6ZLY5aaePNvjYnUQphRmSUpQFnnzSaurNeFun6oLFbEwYWrUMz42v7Xu2yGe7y2vdfI0zG41TetFTViV+4PPH//KN65kcDjld35FlY6SQlOWRzWZJ3+PQ4Yieps5BtcgoRHqRwx9pPJZHFAu6HoOW39HFMW2XE4SK2AtIM11OITrqpmY01u7hFmJRloWmlHUNfiBIs4j9fs/NrS5Nozim7VraVjqwpBURHB52wdkMzRqkdG0HCMbjiWte28/FkpvH4zGXl5eOzmQXv73ev/76hu12x2az5uxsxnQ6McTsWvNb85Kua+m6kVuUXde7xWOz9mFWeTweHV7KZo16OKKdlBaLBV3XO8cquxgfHx91BltpkvtLXvL2/btnEAubwSul2Bk8m22y236UbaJXVcX79+9ZLpfM53O3Tq1B83ADt6oww2GKhUAcDgfOplMXGD58+ADgcGTW79L6gD48PDhVEwuWdbxiI57Ydh1N2zIeT+kPeyfrYzc2S8Wy6rS292z7kHoYFzCbzZ3kEeCA7Lbv9+OPP7pJ8dCh7bPBbBh1h8HjNMDZN+IyHJ6sqU4DmYtLX8rOLLRiEMDAdGEUTn3jGSTDnl//VJ4AT8yAvqdtdCl5NtUATdE/fw+n59abia292T1hlHSLkobGDQwEDK4VZidUZJl03EJbkmqDjXuHGLeuzBZnU1YFwoco9phMx1xdaeT6w8MDXV9R1z1BoHmAvRq53bhp9SKbxSmSEGmAy3Ec064LPE8QRgIhW6r6oMtMCdLrkF5HNgqfTaVfvHjB3d0dWTY3WB+fKNYBKYwkYRRT1xpVHoQ+beeb8yiIk9jRo06R6nb3t5uFUsqZ/up+jKKqtIXdZDJxaHMb/O3mlKYpDw8Pz6gsdV0zGo0RaFs1zXjQ9CY/0L6hk8mcIAgoypy61osnSSMHO2jbFt/zXc/I0mfspjabzVwbwva57BRRg6IDlIKq0s7ef/3rX3n37p3LkOpWL+L1Zu1ezzrETyYTzR3dbPARzkzk8fHR9Vu32y37/Z7lUhv5LhbaKUw7Tb1guVxyc3dDbQjz4/GYs7MzTWs65m6Dn44nSFOJ5HnuSlDd92w45IUbKtSDTLGua9fWsPf8cI0ABprx5OW5XC7p2hYhpXOY2mw2rsVjoSue5xn6YOLek4NrKOX+vt0Uh+XzZ4PZMEuS2AzKfimkfIowNnvpuqc3JIRRuVAfTzx1sHre5n/WwLdIMZuFuccMniAEmImmvXid6vVjFCy3G4dBsTd513UE0nM7klIK3wSnoeyvPQ8LFh3CT+xXmGgdJpt/9krR9T2i6/CUIE5SfKmboAJPW7FtDxqU2urGv/U23O+3KNVxdqb11tfbNX7oM5pkpGanbaqatu+YTWd0fc/93T2qV2RpQppo81/rUB5HIXHkAS11VSGA4/GA50l8X5rsp3GI8CgKng0n9Iak3dm/++4754Lz888/u77JaqU10oQQ7HdaujmOErq016yEpiMI9YI/HnWA1FlL5+AZFpM1nU75+uuvAQwequT29oHe8DXsdCvPc9fTmU6nvHz5kjzPubm5oW608GBRFEbsb2RKwhqEMt4NJUGgHZi6vqNtG5Tq6PuWKNIQi6Zp9UDE84nj1EwPe0O8LwHpGtqj0YR3795RljUgkNLn4WHpSue6blC99iK1oNy26/jpp590loXSbkQXFyil2KxWzM/OuDy/oDOUqoPhsC7XK+bzOQrdytgftdBjp3q94K3Q4ShjvV5zOB7pUCQmo5zNtN3harXSa6Hv6VRPnCQgJb++fauz5yAgMwDwttWPeXh4oKj0oGAynbIzzBebbdu1YkG3aZriBz5VXdOqmtALuXpxyTiboKSiKRv2+Z5xmlCWB/qmR8qeQAqSLOLF5QXnV5e8++UNUvrEgSRKY1TbURQVVVGQH49cX10hfZ/AE8jgC7LZCM/QhQTKOFoPv7oe8uIJo6WUsBxuLdPjsq5BFFIfl6mWNO5KWk/ige5DDIcCJyVo3drM0UwW6am7Goy6pwWu2mxxuGs0TUPXmF6X9DjNOu0hhQDvuaBkj+ZnuqxRSPD0l/C1e0wUhCgh8YKIqu7odzor0OcMl1cvmEx0z8wPJKN9SpToJnivdA9ASokfhCRRRpJmSGCz23F/d8vjckUSR0zGE+ZncxCCVy9gvlgQhyG/3d5okw46yrIjPx6JE01rETJgNB7rzCOKWD486tIzyPR1aGseH+85W8wRQg8UDkaHbDKdMxlPSZKU+/sNP/70hiDQopKBHxJFGVmWEkYBTdtwe/+WP7x+zcuX19zd3dG2NXXdcH2tLdPevHnDfq8pO+uVlto5HA588803eF7Ij3/7G0LoSxyGIdNw7LLctm2pmxLPFyzOz3j79i1xErLbdzw83tF2tSulqqpiNI5IkpjttiOMJFk25vp6wWiUsVwuOey1ge50MtX6a13Hz3/7G03TcGlcz+u6ZjqdslqtefnyFXVdMR5PmE6nPDxo0cswjIiimDwvycY6y8qrkna/Yzo/c4DT+XxOVVXstzvyyZSr8wvaquJ4OHCItEHv3d0d0vc5lgWe7/Pzr7/QK0VdVdRty9XFBbPzBUmWEqcpwtNB6f72lsMx53xxye3tLV2rOJstSOKMqmwo8orXr19T1bW+f6R28KqqCs/vSEdjzhbn3N7ecnN7S1PXPK5WHPY7erT9nR2UBUFAID1QZpjm+cRBiBd6LJf3hIGiawuOxzVZEpghW814FCL6mkkT0VQtcRqRRClK9ByOK6p3R7pWcNyWBL7PanXLcX9A+pK6bFB9z3x2RpLFeIYi9tlgdtozswFl2Oy3ZZmdKNoFb6cy9jmnmLJhgDp97ud6dOoTgU0PCz6NZxPDxwx7azZQyeceAk+wkEEgs691kq09ew9CDyw81esenycRvqQoKrqmJ/C1BHYQBEjh0XYNeV48GagqQ3j3PLpOg1QnkxlSPmmcPd4/sFqt2G63horUkCUpXdNyMFPQtm0RwIsXL/jDt695WD5or8QWgigjyXzdZzRcyziMSeKExeJ549nzIl6+/Ja2q1FKEIYRnpdTFjlCSGN+ckcUJbx+fYbqoSgql9FUVUPXKcJIkKUJTathAaNxxna7YTyeoJSmd2lZHa2n9cMPPzhisZSSyXTG+fm5YwFYnqp9r9boxP7b9lKeJoIB43GqzX3PvnZletumTKc6KOr+kFa5sNmTvc5Xl1dk6Zj7+3vWqy1hECOFT9voDET14MmA9WpLkWtQ9sX5FfP53ExLH+nB8XAtCv/bb79lu906WtJ4PKbve25vbnh80LJFZVGQF4We2hpTXQVPzmDo1snucCDJMgLTa/xwc+OGXl3X8de//pWvv9bv/d///d9p29YZhPznf/4nf/rTn/BMt72sazbGjPjqxQtm8znL9ZrUTCwrg4ss65qZp6FHlqcc+YGDZPi+r+0aleL777+j71vyvERj+CR5XlAUJUo1pGnC3d0N7979hlJweblgsTjTMuhFiSDmuM8dPbJtWw6bPU1bMR1PuL29ZTzO8AOPsvwCNOO0P/apQDIMdPYC22nGp/pPp68PTz0TG2BOBwuf+v65n52ez6f+Zo96xu60C+B0mCDkpyWMTnsD9jrYRrC9cSU6y6mwkim2hBUO8KeUQnraCEOPoSP3+n3f0fc+RV7w/t1v3NzcIKU2tbg4H/NP//RPdF3nRO2k8KnKhvu7R90gbWuKsqIbUKz6tjMiigIvDFFCECUpdVWxNJLYcRxzdXUFUnI8luxvHwz1SpCmGU3TGllk3USPopixH7iyo64brelVHQlDqTmURU0Spxz2RwMelrx584bZ7Mz1VM7Ozthud47j13atw2nZAULXPZG/7f1iNeytzdwQGqBJ9S1hGBsoyMqonGzw/ZAwjFku1066+/XrP7Ldbk3J5LtGu9Ws8zzvmbadFShs2/YZS2E+n5OmKb/d3rrP2WqSWWrO4+Mjs9mMy8tLyrJkvV47eEnTNBwNnGVhFCrs9ND2isIw5PLy0vGCrcGyBejaprol79uenpWpHhkKkw1ClpYHaBXfzcb5CYxGI87Pz12rxr7fOIzculdKOb6pqARtC/fLB9I0xvMCpIS2VVRVQ123ZlIqmU5n9L1ASri6esFiMTfUuT2PDzuE0Dzg7Xatp+xNjaIjyzTQdzabECfRl6EZwwDxucAwRGYPF7kNEKfBZvh6w6bhMLDZ34mPxpj//48h8PKjAQFPkAsNs3jKLF3zcgD5OH0Pp0F+mEna7KCSHmmYIBTa3KSuDDctMlxHwcPDiiyLCaOAx8ectm0clUv7QWpeYV23xqFIcH19zevXf2Q2m7Beb4whiMaRCeGhVEFRVOwOe0bjMR2KtukRsqXrSupK34xJkrBa79muN3iWi7rTdmx5XhPFGecXc7bbNX/72y9OFQSECyaWT2l12Oy1KcoDRakbl+fnGhD72283jEYjLi4uWC5XJEnK+fk5IJxVmhYc1NzK1WpF3bSMp9Nn+l4aIqGR5cN7bFgF2DF9lqUURe42GovNevXqFcfj0TkO2amfpZTZIUBoDGy++eYbiqLg6uqKsiwdHtCyAeI4dhpnZVk6A5bYSPTc3d0xGo24urri8vISKyZghz6aOH90KhwW7hIEgfET1eyEYeN9NBo5FY62bZ1jlQW9alFNXcpayIsNeMvlEqsWYvmlSuns1GqK2RLSMgIsj9ip/ZrNuIt08Doaj1MrA6TbR5I0yTg7mxH4IW3X0LU9ucwpih2Hw5HVcstonHF9/VLzSCOts1dXjRleCC7OL/A8uL+/wfM8Xr66NtP0JzaGJ32SJHVr9KNgZqcDH/WRzA1k9YmGWcmwbLRR/kuL/zTgPc+s9FRwGJROoRzDctRhzMxOjlLPnvsUjHCBbBg8rXqADWadmZB9LvMbgg9tYLYpvup6Sr8gMnzPOI7MeDxC0ZkpkJ0I9waeoGWntcvQlsfHDWEQEYYRWTbm6mrMq1cvGY8nFEXFmzfvCMOQq6sr+r5juVwZRsGILB3TdIpjoSdX7eNS72oGArE4m7Narnjz6y/4njZlnU2njLORhp0UlSGVd6RpZq6Pz/GYu6xEQzSg61p3L+hpY4mUgvF4BGgnck1V6RiPp+R5RZKkvHr1SjvCez6+F/Dh/Q1ZpjMk2+KwIoyWlmM5eEIIl4nZhWmFAi4vLw1UAd68+dVpk9lgYR9vs7kwDJ0w588///zE4hCCyUiXo1aOyE6cLXDUBljrzr5YLFx2tj8cHGK/LEv+/Oc/s9ls+POf/0xVVcznc2cObAOODYaWjvTixQty87O7uzvqumE+P2M+nzthxeGUd4jwt9NHO4yw3pcWUrHZbLi4uHBa/lYsoWkarq6umM1m/Prrr44naTfqyWTCxEwj21rznnMjK26ZDnaD88KA0XhE1ynKQ0XXK4T08HzdU81GI8Iooldw3B9Yb/Z4nkApPcUVCra7NUV5xA80EH4+nzn4yXQ6NdJH3bPk55PB7LSUG2ZVFveiJ0aV0zOyjzkt3YbHMCjYgHYaOECZjONj/8tPZYvDEa1mIPTPgpV7nPnCvLbVThsGs67r6Lvuo/MfBi8LWByeu02z+7ZD9oLUlAiLxYLFYmE4mRUIhe9LUzbUSE+f1Xq9ctLdk/GYNB2TxClBGBh56ZjHhxU3tzdcnJ8zX8yJo4TD8eBoP3GUkJda7mV72LPb7TkcNF1ESsFkPKGpO5qqpm466kbLSmejCUGgpWV2h4J3798zP5vw9VevEQKnVhqGPhjoiZXf7rqWyWTMeJwRRQFpluJJj/fv33M8HvE8z3HxgkBPTH/99Vfu7+9ZzM8Bi6qPXIbUdh3748EFEnt/1XXNdrvl7u7OqcMOnbXt4+1jLei0qir2+z2r1cqVVEOfx6H1YZ7nHA/amm1iqDNWWaMoS8ZGzkcM8GFW7NPKSCugrCs3sNDvv3L3i1V+tYDZxWLhsr7RaEQ6yp4JFlqSu82oVqsVi8XCab1ZalnTNK401EMJ4QQQoihy0tO2TLZrxHKEbXZnp8dCCKfOYfmjsQHNhqn2gm2NmYj9O1JKyqbm4XbJIR875RN7zbMsI8syXr165T6Xug7oO12C2swyCkI+fPhgSupzd75DcxUrTPBFCaDTzOf05xb2YLMSm5Gdlp3Dr2FZOuyP2ZvI7jJPQfE5pu3jLOvj/p17PfHkKvUs+JnnWHwYRqZnKNUzfI49d/u6tkFtM1en92XeUxAE4AeMkhFnswXT2cThq+xN7/nS7aRCQGeyOt1E16CVy6sXxEmGUnr6ujvkrDZ6AFDWNfu8ZLn5xagqBIzHE+I0pahrfru91zpQTc3+UHA4FKZPkuEHEVXd4PsB8/MLlILp7Iw4GbFerxyebLVc40nJxcW1OSeN0o7iGCEgSeLB0EKZ6yJou5qiUKA0LGK93ppBhuB4XJsyrmK/OzAajZ2ixVdfTUy2VNC2Pdk446uvvuL29smhyJaEw0AlxBNQVCnlSiMLFI1jQVU1xvouNEGlIUkyfD+kKCp8P8TzAgd8VUpnlHGaIAMf4Xs0fYcMfLzOp2xq/CgkCSPOLy/MhHNF1Wrc4dyYkPzHf/yHK+EeHx85Pz/n7u5OT29NgLXkczuhHYJjt9ut68lZ6psF8VrQ8HBdWIu3IeZqu90+K/9sz82CtocaYvZnSimD0dPX12L9LKjZnvfZdPZEzDfrx/b1jmVJHKeMsglS+Oy2B+qqJfCVgwHVVUtTd3gyIAoT2qanKgu6Vos/XF9e0LY1X331Fd9++y339/cIIVy5PhqNjKCm/2yj+GQw+1y/C3AoaVte2FLDBpDTQDYMPsOg6DKpwYeidwv1PAgNjs8NFE5ff/h3npWjJph1XQdCPQP/DR93ev42MxvKBA2Dmf1APeEzGk1JsxQpPFeeQG92uoS2rZlMx8RxxG63pSxqR3Ha74+EUQxIjkdtdrLb7QAYj8fM5+eOs6eU4urqijCMOB41VeT9hw8oz0MEPgi9MKMoYrFYkKUpdVXTdx1hqG/CKElQwHq9oes6xuMRF+eXJHHEygwGwDhRl9qd3ZZ4GEyglKDoyfMjbduB0ooXh8OR29tbXr/+g6PbWNS453kmwzh3O3ZRaOjCdDZ1O3SSJEZJd+UkcdI0fSZyaGWV7ed+NCKIdvEvFgu++uorrq+vnUv6brfTgowOV9UanFpmCPaFQ6nb881z7apuaT1xHDvyuc0at8YU2oJ5pZR89913+L7vfB8sHcqJCxiakoOdlAXBwIty6EJl7z+r4jp0qmqaxl0Xz/O0uq3JHrWZcuYCnx0sWEUX25M8Pz93maL9sv1RTQWs3MTU930iP3iG6bRtk7womc5mTljSBk0raWQ15Cx20OrWnZ+f8823X3HYbXn11Uu+//57JpMJ9/f37u9YCS9NXm+fBfCPgtmwTPxcdmbTX3vRhhmZTfuG2ZINBENYxJAuNJxsnh6fCq7DrOkU9IrQzuS2/HDBSFh/TYPoH7g6DTFp9pzsa9rzt6WMBQnaoGhhAp7nEYQBnic57A+UfkkY+qRZ6lQNkiTCDzzu7+/48OE9Qjzh4rSm2EvKsmazWxkepsI3qPzAyGivNxtyI+/y/sMH3r5753anKIlRUrDZ7ygLXRbpckSiVMfZfMpus+VwKMiyGWkas99vSZLIAVn7VpOhJ11r6EJaMqbve46HA2mWGIpYgJbMDtyGBjrDlFKyWCyIjWnMdDrFk76j+0wmU9fnWa83VFVNGEZOtFH60kkmWTOLDx8+uB16Npu5hXh1dUXXdS5jSdOMt2/eEUURo9GE9+9+I000Ub9rlaEx9Xz91bd8++23/PDDD4ZgHvLq1Stm8wn/73/8P6zXWm11PJ1wc3ND1dSMpxPyPGcxnXB7f0deagrVy5cvqduGh8dHJuPxs57ydrvl9evXxHHsNMPsfWUDkg3a9jmlcTufTqdOlNCWfb7vM5vNuLu7czaCw/6hzdLsfW9dw22gs1minR5bIcYkSTT7wPcd88JmhOu1nigGppIRQnB+fk4SRs54JQgCXrx4QZQm/PLmHUWtYStRmpCaLA5wctuWHhYEAePZlKbv+HB7QzZKOO53vHh5zWq1cvppSinTa/Vc9t116hlN7rM9s9OJow0Wtkk7LB+HwWv4fBdIBo97Prl8juPSgeJJQuZTgez0b3qDC/y5DE1KiTTAjDDQrHyheBYE7TlYjNfwdYbvYVhm2iDkykygLCtkmJAkAWEYGR/Kmq7bURQBcRySJlqkUO+esN/vWK4euL2/Q4iAoixp6g6EIQV7gr7vaLsGBERxCAqqumS33VOUBXEUE0TaD7HvGi3dHXp0fUNXN7RtZYjR2leyrkv2W+24LT2IkxApJHXfotHxHZ4n8bwYz9OczKLMnXepEDAej0j6RT+RAAAc2klEQVSS1Khl6A0uy1KSJMX3ddBwn2vsmwa95PHx0U3vDocDQnhu5y6Kgk7pxrbFgGVZ5qzn7OK+urpym4gQWo5nOp1SlVr/Sp9L5iAVtnyyChdlWWq1V6N8m6YpZVWyXnUkScbj4yOPjxpYbPXnrCyQVs6Qrry1clO73Y626wj9wIkftm3Lbrfj8fHRwUxGo5HTx7eBZDQa0XYdRaV5le2g6rEEb7vx3t3dOW24YcloM7eXL1+y2WxYr9dOB83en7YPOQSE29e15tC26rLDF+0n0ROZIcJsNmO/37OpdR/yH/7hH1gsFtw/PPDDDz/QtD3no8xxWu/v710LIE1Tvv/+e3766SfatnUltQ262/0e+o44Spxyx2g04ng88vDw4NgzT22pp7X62czMllOnGdppL+p0MnlaOp4GltOM7bQ3NQxyw+d+KZh9Clox/LA8z3PBzJHJ++dTVbvo/EGAOz1v++8hH3WYGfaqx5OS0XjEZDyh6/T0qOtbLi8XXJpeQDZKiSItw/3hg55spWnKeDZlt8s5lgVt04KApm51f031yFziBRLRS3bbHXl+REjJdD5DIDgc91R1RTCQFlZmwYS+TxyFqK6HvqPrWva7HQJdwqZxTNe3hH6KEC1NU7Hfd09QCKHchFabXejdv+u092RZaBzVYnFOUZRstzuzY+vg3dQtq9XGlT/aMMWjaxVC9GY8rzgWB8q6NGWvlk6yO/nLly85Pz931Bzb2wHcVDEItPuPLcVGo5GTelZKOQiG/RyTJHHlnBB6g5jNZjwsH1BCa2vNjTO7HRZFRkraNriLqqJqGuqmoWs7vES6qafNGh8fH3n9+jWPj48ADvBrg/RsNtNZy17LfxcDyIXlL9r7dDabOe9QO9wA3MZugcVTo432ZGqjhyT2nrVZjjVGsXi+0cg6gdXuZ0IIPOPWtVwutfLtmQYKv3nzhtvbWyJj5Lve72h77aGphMIPPeptxWa3RqwFf/3xP1mv11xfX2tISFPx3dd/0kOXu3tmk3N++eWN8dUc8+HDDfP5nL/85S8OBzes7D4bzOzxqd7XMMCcZlqfe9zpaw7Lt9Pf65/ByY8/+5qnQNu+712j3/7cfmg2mLnzVKfSRU9BdfgeTzPIoRKt3Znt730pieInIrIOeBAGEZ4X0vdWwFJQ1y0gOD/XAaAs9fStbju6tqYx0i1t12juYFujZZoTuq6hqkt61THKUqZTTQp+fDwymWQsLhaMspFDyUshGI1SxpmWYw78GMx7CU3ZkqYJRV7QtDpT65uGumnd7m+13MuywtrubbdPJiVCCMpS05Y00ttgv0YT4jjh/v6Bw+HIeDzm/Pyc4zE3Tt8hZVEymUwMvejI/rjH86XbrW3AvLm54fLy8plyg5WUiaKI29tbFvMFDw8PrvyyQwKbDVmak+/7pldXOF2tyXRC3ysO+4PLoJRSXF9fa9qUea5V0rANc5tBKqDteye4aDFhcRxzfX3N2dmZI4oP2zH23m3MBNMqitjycai7Z+EY9p4byvzAE5k/iiK+/vprptOpg4D4vrbJsy0fu4ZsMLODONvzs+cBTxPLLMtIY43322w2rt+nM8sWJTSI+lgczETbnndjMuOG7XbP1dUF33zzDdvtGiE85vMZTfOkwHM8Fvh+SJJkBEHkbA8tMd+ubd9/QkV8ks70qankMDicBqJhQ3wIufhUI3/4s0812eFjWMWnDhtITgcIDq0mniAgtvmPmRDqMlOB96QAMOyZfSrQDt+r7RcOP3zP8/B8j76H/e7AUR4NqfxMSxc3JT/++ANJEvHq1SsuLs/xPMF6bQUIdaDv6kqXk0AY+kZ3S6vwVlVpXMcDZjNNkNY4N21rpv+WR5qExKHHsS1pqpy2aajLI7u11rpKopgkjpEopPDo2pLtpmB/2LtSLAxDJpMn53WlFE1Ts16v6HvlemxJkjixvoeHR5aPa9pWg21n0zmeF7DbHdhstuZz0ni2qqwJ/MipmZRlRVFU1E2DH2ihy9vbW25uboxBsuYb/vWvf9WNYqNPf3t760b2P/zwA0msm84W6FvXemB1d3dvCNIe4/EEKSXj8YSm0SodWrLJJwp8lssHwjB00koW8mCxW3azGlKrbH+q7zomWcZ+v3co/Lu7O77++mt+/fVXNwixrvZCCFf6WVCvXRPWcckKFtogYwdASikXtK3MuB1s2MxztVo5KSNbng0leez9bGWxLRTGKoHYrMze67ZUtOcshHAOVFVVsTseGJ2NHVi2rFo26y3rzQrVw2ishQb++S//hO8F3N3/RppkPDzes3xcMRmPWS03/Pm7/8rbt2+5vbnnX//1X7l/uOV//I//i6++emkya2OF+KVgZi/YpzItGyROs5Xh44dKE/b7KRTDBqJhwHyeCX0+mNkL+6mSUinluJXDIKeUotcUAJdZSUOMH6LI4Um4Dp4Hbvt6dhezfQW7c0kp8Y21WRLEJMZh/HjULkDSA98PAM9Jo6xWK+7ubumNGOB0OqVqGzwpCUPfZRy2X2IDjT1P+2/bt+v6lqYuUF1N24Ine7IspO98VN/TNDWCjiCAMBRIoaWig0AaH8WWIMDc+BFxHCLEkzyMHb9bGEnXKrpWMR6lbhCgp9E6QK1WKwODKAZeo70WbzRNf6vUqp2cfJq6phc9UaSDpV3ggBs22cmiZRDYa6NVaLecnS2cLZsVGLSfn52Ibrdb51A+Go24vLxkuVqSpAlVXT3TsLdyOxahP5vNHILfrhkruX44HLhcnDOfz5nP52w2GwfL2O12jlZk+2B2cmtlx+u6JjCNcZuNSCldA9+ev83ArF6fhZfYTMkGMqWUA/cOMWSnOE+7KQ8TBLtp22DadVrHfbPZEIWaKXE4HLi7u3N9y5cvrynaiqI6sj8caZoaBEwmI5TSDJ88byiKI1XVUJY5i8WCsszZbNd89eoVWTRF4HM8lnRdw+Pjks165zYO3Xts6NVzLcUvymZ/KoANf3/a/zr9vQ1kw2A2DHr2wj0PHGBRvadZ3LDPNsS4DYPhMJscPtZKe7jf83Ep3fdaw/xLpeawtB2et4amdHR0yEhbt9V1bZqnLWdnU6N7Hxoxw9+QUvLVV6+ckufN3Q1RFDFfnOH5nut3WBClH2ijWtsc19nF2OlS7XZbxqMZvoS+74hCn8V8RhLF+GYwIxAkcewyWCtFbRvSxzyn7XpqszgFkjjW6b5evPq97ncHw7XTHgJKCaIoYTabIATc398bCaGRyeQi6rphuVzpMquqkdIzZSvs90cWizl1U7HZbxiNMicXdDgc2O/3vHjxguvra47HI+/fv3ecRjttm81mVFWtVSekloeez+fUde3wVjazsFZz1iZOCEFTN9SNhlmE8ZMgouVdDqXVn7icHuv1mrIsjaCgtrWbz+cu85lOtdv6y5cv6fue29vbZ8KGNruy95FlodhNyn7e9v6zZbFNPOzfyfNcezOYgGvvW+u3YDM6G6DstHgoSmo3aDvJtNlh13V4QhDFMZ7QRjR7YwdnMWdWaig9y0AqkAo/1N4Ro1FKXbccDjv+2z/+d9q2Bin4L9/9kYuLK+7vb5G+oG4avn11wf/9b/+T/FgynY15+/Yto1HGP/7jP9K2tdlMKuqmouu+MM18dugN+OMel1LPGltSCBgEDvuhDJH5pxnOMKI+7QJmaOD+zPPppA1Qp3264Ws/O/3B+dBruW1pA696/jiXiZ4Ex9O+3BAgOHyvfd9rORJfcL9+RC0VSRwzHhuyd9/x/ua95juORsxmU6Qn2G51DyVJMl69eIUMfPbFAU8IsvGYKPBp+05PJJuaqihRKKbjMUmaEng+hyKnLiukEkRBTFkVrJYb+q5xgnhZlhH4PmezGUkUcTw+B2Xa5naaF3Sdom5MBis9uk5r1t3f615QmmR4XsBolOD7IU3bsd0ukVJyPOb4vue8F8JwR103CCGdEw/YQZI0i7U2OKwj09mUjp5edQ74eXFx4VRFhdBy0XVVMclGrB6eXIWOYg+9ctpa1m3+4uKCsiyd2KW1irPii1EUGRf1iKoqmYw1DjBNM4SEkTGH7pXip7/9jfv7e0dw9jyP1XpFVRZcX1/rZruUzCaaOB4EAReXlzRdSxhHLJePTGb6vDzPo6wrkAKkoEfLwjdth/Q8egRt33MscucVEIQeeXF4ggh5oVsXwzUyn88B7dNaNw1Vo69xEIWaqSKfpLE9oUG1fdvSmuzN93ykkuBBahR97f2/XW+eKSVLKWk77SdQNiXz5pzJbMx0PHOfd5amnC8SpHzBH//4R/7t3/6NIj9wNltArxBKakWYtuPtr290iduUSKn165SxBPR935DfS6q6pBkIdH48zRxokulGud7l3e/t4u6tmKIJCBZAKnWcC3xNntZxoKfvnqYPljYkBQhpA4ai73qdlamBQKMLKgIhJI1prHdNb2Kqdmvq+6edZXi455t/+0HgHiOljyZqa8J2GPpEof+suX86ybXPFeIJpvFUkvccqwYhJX7o03o1xxbKvDQ9u57ZZIIfBBpNrrQQYBBMiEONE3pYLUEKwigkCVPm8ynL1QN3mzVSKMriiOdLfC+gaxp8JKJThF7AeD6m7dBUpn1nms9zylLwuLynLktmswlpEtOaEsr3E9abg5GYntM2PV2nNLzC09I7io68OBD4EeOx9kQ8GKybBT7mVc5qucaXIRfnFzRNy/FQkYuKttPTUb1hSeq6oaobpNDwjaqq6TrFbn8grwuQWj0k8H38IOCY586yDKXwECxmc46Hg55KIzju9tRlhUKw2x04bHeMx2NeXF7poIOgKStEqBBRjOgVdVFy95smMk8mE+5v7tgf9lxeX1JXLZIaz9fleOAFdH3HzCiw9m3Lzf2NaVs0JGnM337+kcV8wYvLFzw86omrBuzGvPvtA/tjgRdEBFFCpwS7/YFdXqCCgN4057fHnKJomZ9NKcqa7f6AFIqzxZymqdjsNnqwMM6QCNq2o29rjvstTdORxjF9B/vtgThN6Ht4eFxqvUDp4YcR2+MG4ZUEQYQvfHLja1rlejoc+hF0IISEXlHmT59daR4TxzF5fmBypqfNURpzNDCa68UlYRgTBLoaaNqarmgo6wNBEPDbL2/xOwh6j3KXk6+PWpG26Hn47Y4kPFAWW/78pz8gpeTl9RU///wTkzMtY1SUNe8/vNdUKQPY/WQwO+2Bfe73pz2zj4LdJ373qezp+dG7oGQpTUp9GrLx8WFxYM//nv1ug5nFwQn1MU5O/+5j/bIhzONTZfjwPdWqwRN6WtTJnrqtoEWXucBus9G9hesXWie+qtnv9uzVTgNje+P/WVYcDjs+vIeyzGnamixLBpleT1U1jjdXVTVx3bHZHNjsjtRNT7k+cP+4oWlqojDgbDbl7m6JlIIo9MnS0r2fw2HP43IDvSBLRkxnyikz7Pd7OmVYDKMGhe6rVVXF3ozJlVII40NpgbCHwx4jpIzv6z4gCKIodDpvRVHRtr37XIq8oKUhjDStKE4TJLr8aJoGKQR1U3HY7zkejtSmRIujCIHAjyIW83MKg72yGY3t+1kGhTWUARxtR5fyPruNLulmX0/p+44iz/ltt+dh9UAcx7z+4x94eHzAX/mmxNKlmh2UCCMcWpalJop3Wm0iThOEJ9kdtNN7Xhbsjwc6pak+ZV3TKQiigN0hZ7PfEccR89mEOAlothV53tI0W3xfsjjTDAoPaeSlNNxjOjnT16muqVsz8AIQ2v91fr5gMp4wG09oq5q721s2yxWhHzCeTqiqBoFkMp5otydh+8Xes4pLCJ2pRXHohiMoQSAD6qJmt86p6oK+7/B9rTQShB7v3741n2WICAVN3dPVHXTQq57fHt8biMnBKB3/xN3dHevdVrt8+Vr2ars/Ig5f0DP7XCD60gIelmme90TE/nv9t88FO6WGjABx8pxPB8mnDM4fVsBPva7Bv/U5P2ceuNend8HsNHDb8vhTfx9g+FP7OIm+gQJPlzcvL684OztjOpkihaAz9LDAQB+kF9B0jQGn1pRVwXQ64+JiwWQ6Ic8PBgPUkOcrR+1ompZeab7ibH5B2zXGjOLI5eUrzudzuq5B9Z2WXQkD02TX5cZ0OtHlYA+jbEIY675QVReUlZZdieKYssxR6Elr29YUxZG6qU3fSdE0FduqRkupd0CPkBDHIZ4v6NrWGH6YkXteGhmaGD8M6FQHnsL3PZIoNhlATpHnRGFIaiR2rFilbYJb8cHD4ehQ7Zb8bMtTO8WzgVgpLZFjBQ37vmdxvkAIHIQB9P202+/YbDbMzrRuv1Wu0G0UPViwtDRtn6cngw+PD1RNQ9M2WsRTCDf9rKqKttMlpe2PKSXNICFHtQrf8wzbQprpsa4glHpi29Ap95rz+Zyryxc0bUte6lLY9338MHCVRBzH1I3ub6lW36dZliHNOYF08JU0zSgrDWmZzXRpbsnjlrMJmrA/Ho/pmt5Qykr2+y1llZvfe0beSL//JEkIg8QNUuz5SymZzSZmI9BQHI349zke91RVQZxGVHWhxRsGxxdVM4YTw2eLdvCz02P4mOHzP5VZnTbYbaB6VuaiXHamnyM/+Xy7+0r5GUiI+XKlYqf7Z6fTUCE+PofTyeiXjjiK6XkaoweeT5REZGlKEidOGnu72ejFgOaohYH2cNQZok8YhUCCHwSMxwmLxTmgDNRAummw73tG9aClaRVxklCUJcuV7mF9//33fPPN1/hS8vh4T5alCBRxpA1nlcm4fF8rXBy2B4oiZ7Ves9lqf8og0By8XinyoqTtWtd0thM0bQAMyiiLBkFIHEfoj6tnMhnR9frGTdOYs9kZcZyS5yXacVyboLSqI4gC/MBnZHpfy+XS9akkQpeanm7kV6b3s1gsEFLS/XZDWTWMDfWnN2R+uxFZus+QU2yZB9PplPPFgv1eT1gfHh6YTifMZlMm0wnSlxzzI7/88ovLvgAnS2Qb5ofdESl0k71pWwJw0AeLj7PwBwYNeSm1q/x2uyNNUjw/IwwDqqqmLBt8z+P6+oowDOhaPcmrq9pl/XGckGUj5vO5zqiPB7emHd5SavzearkCegKpKYmz8QShYLPbMh5PORq3q67vzBCrYzRKnR5bb6bjeZ6DUE8uXG1DvtdWhr7vk3qpuU801EOhteXSNEP1kuVy6YJpmo7MlFc+42BaiIu9hlaJZMi8gU8EM+c6dJKhfSqb+lzTfRjITpvpp8OAZwFnEDS/FDTs1HP4QUlpkftPAelTh3p6kZMg+NyJ/TQgfg6mcvrd8wJU1+phg5SEfkQYxHgyoO/h9vaewA8YW6G9JMX3tNdg07SkWUqnejxfmgxVN2u32y2Py0fattI8w06fjzbX9dnt9mx3Obvtjl/fvqcoc/7lX/6Ff/7nfyYIfHbbDS9fviCKIjbrFUHgOxzV4bCnKHI2my33N3fkx5KiLLXlnQHVxnFMa6addnpoWQaW4weQJilZnLoAKYTC8wXT2YiuawkCj9FoxGw2IYpiPF9QFloiCCEpC33Dx5HW0b+6vCSOInLjjr6pN7qBLSRxFNH6PgqQnkeWar20INQLa7fb8f79eweWtdZ0m82G+XzOfr/nxx9/dKqtURSR57qRb1VhNaFZTzmbtmGz3qBQBF7g+I1NUxtGQqkB0tLT5xfH+F1HYgLfztivWUB1XhrHoe7JTayqGvM6IVLqhaqzn5ZkFBsgr6bdRYGGXPjCM+DfmuPx4MyXj/kRhHBqHE/ZXcJ0NiUJIvqm5f7ujiCKuTy/YHFxThDGrAbqHJqStUcILQllhRxBK3wEoS63oyiiKmsDacnIsgnSw2DWjrSd9hl1MJAWl1lrF/dzwjDkw4d3jpO7Xq+xvqfJKKPve/e5AE5SHT5DZxpOHe1CPs2sTgOPDYCnpeXpgj/93WlmpjPDIWD39HUkUoKST/QpHdCsokX70bk/e3+mN6aNesNnelae59F3zbPANrwep5nq6XsAaOoaJcQTTcQQ0i0KexSnzKYZi8VC35hdT1PXCKXldUajEW3fUzcaSJtmmvpU15UpqzqEKA0dSE8INRYsp2u1WON8Puebb//C69ev3W4W+IEmnPc9RZGz25SUBv1+OOyRUpdhZVUhBGSjjDAaTMqkJDQjeLvhxXGsR/X+U5k0SjKyRJcPVa0nr74fEoQeodB+nH4gQfQIoctVzQPFeJqGCANIfnx8pDMLx6q1lHnBZr0mS1Pk4HPZ7/cGI/jkoWCnzEN7siRJ3HR0qHu23++N03bJt6+/cTZuZVmy2axYb9bEaUyvesaGldCh4SBZppHp1qg2SiLddxKCsqqQvodnpuC25G0t4LlpqI0CiA5cvSlFO7RVdUCWxKRJRBxFeJ5w8tNgqidzj9ogY+lfXdeRpKn+HM39bQP648MjvpB46NLy6vyC+WLBZruhrp94nhbd33WtM2Wxw7Eg0CBsP/CcHl9dNYjeGCq3um3hStFAtxp2u52m6+Gj1HPhV7sO7d+tqoqLiwseHh6MwsuaMNZDp8BMou3xRWjGKQ7rUxnMcNE/Pe5jPuPfb+DzLKg9L+mGz3uabto3/pxs3j0LZC4wm1exr2mNfO2Nf9ofOwXMWpDql4KzAuq6xQsCvEDjr8qyBqUIg4AwCEniFCl98mNJfixpTO8mjiKy0YhmuabtW8pKgxvnC61a2jQ1YRgghJ7cNnWH7wujHKBvsrZr6ZViPp9zfn7Bbrfjw4f3SCnwpCA/HpACqqqka2tub29Yr9f0fcf5+cLhuoRCN1k9YQJoTVn29EpnWRcXT36NbddRNz6ep7O0OAyZjEYGCAu+L8lGCUkSmaCly2gpFX4AqYwMod3XPM4+xQ8i2laXeaUBCldlRRJFBCOPqizxfB8ltFKIEIL98cBmtwUkvh8Sxhrxnowy8KTR2ipojcLvaqu9G8ezKUopjmXBbDHHC3zevHnjsG22jO9VT5Jpc97tfuf6ZUmS6CY5ODkiBAMp65zWPLaoSpfN9qrX5XvT0J1UMlIGhEGAkBYFpafb2iNB05wskPWwPyARz9ZBURT0SpHET2oceVm4Pu7j4yNv3r4hwGM2mZDEehOVQrBer2majta0D/Rno3ttVrLaqsUEgecgPbbH2HcK0Qsj912gMVBaKioIAgd96voOKXS/sW46I6wpHKRG0dF2NaNxShQHXF6ds1yv6bqGKNIu7tIX9OoL0Ay7sIfl4vBnfz8oPS9LT79Og8zpoR83lCH6+Pe6h6b/fwrMfd7/GmSYJqB1re419YMAOXyfchAo7TnbG+2UrvWJk3ffBFqUsS50MzqenWkpHIv6zgsC3yczWlGB56N6xbE8goSm0TtfaJRk+x4CP3I9IBtYq6pyWmAA19dXHI9H/tf/+p+MDV7qeDxw2O9A9URhwGym6UFS6GxQN461we1us6cqNJzC9leSJEFISV7kdEppfFsQ0Pc9y9XSuYnbjUFTnHyiyCdJY9I0Qbv15EYKKTG8w9iUy4XLypumohUtaZrpm9qUOV2rDXotadq+f52Z6kW4Wq2Qnk+Sjihr7SCuyfFafsb29qxKhpXlsYRzC6ydTqdOn0wPSAK22y3vPrzTjXJf97gCs7kcDroEruuaUTZ+hpwvy5KmKh3n8un+eVKZETxVOErpIJImqXYfchgzpTmRqkUr/E5IIt3DUmYarAUCGofUt5u1bbLb9Wc12a4Xl0zHY47WCd31Ej3oe3dt4kSXqcfj3vWubCVmKxC32RsxT/3/Hs+3rBnQUvG9C46n60sIQZpqWpTtLSul6WAWWPzixQuCSF/Xoq4cTxNA/L2G9u/H78fvx+/H/w7HF9KM34/fj9+P34//fY7fg9nvx+/H78f/Ecfvwez34/fj9+P/iOP/A2MP1qPnCw9LAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "%matplotlib inline\n", + "# 验证训练好的模型\n", + "\n", + "img = mmcv.imread('data/cats_dogs_dataset/training_set/training_set/cats/cat.1.jpg')\n", + "\n", + "model.cfg = cfg\n", + "result = inference_model(model, img)\n", + "\n", + "show_result_pyplot(model, img, result)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "MMClassification_python_cn.ipynb", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.11" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "31475aa888da4c8d844ba99a0b3397f5": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "520112917e0f4844995d418c5041d23a": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "863d2a8cc4074f2e890ba6aea7c54384": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "8a8ab7c27e404459951cffe7a32b8faa": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "9f3f6b72b4d14e2a96b9185331c8081b": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_be55ab36267d4dcab1d83dfaa8540270", + "placeholder": "​", + "style": "IPY_MODEL_863d2a8cc4074f2e890ba6aea7c54384", + "value": "100%" + } + }, + "a275bef3584b49ab9b680b528420d461": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_e310c50e610248dd897fbbf5dd09dd7a", + "max": 14206911, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_31475aa888da4c8d844ba99a0b3397f5", + "value": 14206911 + } + }, + "badf240bbb7d442fbd214e837edbffe2": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_9f3f6b72b4d14e2a96b9185331c8081b", + "IPY_MODEL_a275bef3584b49ab9b680b528420d461", + "IPY_MODEL_c4b2c6914a05497b8d2b691bd6dda6da" + ], + "layout": "IPY_MODEL_520112917e0f4844995d418c5041d23a" + } + }, + "be55ab36267d4dcab1d83dfaa8540270": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c4b2c6914a05497b8d2b691bd6dda6da": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_e1a3dce90c1a4804a9ef0c687a9c0703", + "placeholder": "​", + "style": "IPY_MODEL_8a8ab7c27e404459951cffe7a32b8faa", + "value": " 13.5M/13.5M [00:01<00:00, 9.60MB/s]" + } + }, + "e1a3dce90c1a4804a9ef0c687a9c0703": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "e310c50e610248dd897fbbf5dd09dd7a": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/docs/zh_CN/tutorials/MMClassification_tools_cn.ipynb b/docs/zh_CN/tutorials/MMClassification_tools_cn.ipynb new file mode 100755 index 0000000..1914956 --- /dev/null +++ b/docs/zh_CN/tutorials/MMClassification_tools_cn.ipynb @@ -0,0 +1,1247 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "MMClassification_tools_cn.ipynb", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.8" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "XjQxmm04iTx4", + "tags": [] + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4z0JDgisPRr-" + }, + "source": [ + "# MMClassification 命令行工具教程\n", + "\n", + "在本教程中会介绍如下内容:\n", + "\n", + "* 如何安装 MMClassification\n", + "* 准备数据\n", + "* 准备配置文件\n", + "* 使用 shell 命令进行模型训练和测试" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "inm7Ciy5PXrU" + }, + "source": [ + "## 安装 MMClassification\n", + "\n", + "在使用 MMClassification 之前,我们需要配置环境,步骤如下:\n", + "\n", + "- 安装 Python, CUDA, C/C++ compiler 和 git\n", + "- 安装 PyTorch (CUDA 版)\n", + "- 安装 mmcv\n", + "- 克隆 mmcls github 代码库然后安装\n", + "\n", + "因为我们在 Google Colab 进行实验,Colab 已经帮我们完成了基本的配置,我们可以直接跳过前面两个步骤 。" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TDOxbcDvPbNk" + }, + "source": [ + "### 检查环境" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "c6MbAw10iUJI", + "outputId": "5f95ad09-7b96-4d27-dfa8-17f31caba50d" + }, + "source": [ + "%cd /content" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/content\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "4IyFL3MaiYRu", + "outputId": "b0ab6848-12ea-49a1-98ec-691e2c9814e1" + }, + "source": [ + "!pwd" + ], + "execution_count": 2, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/content\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "DMw7QwvpiiUO", + "outputId": "d699b9d2-22e5-431c-83d8-9317a694cb0e" + }, + "source": [ + "# 检查 nvcc 版本\n", + "!nvcc -V" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "nvcc: NVIDIA (R) Cuda compiler driver\n", + "Copyright (c) 2005-2020 NVIDIA Corporation\n", + "Built on Mon_Oct_12_20:09:46_PDT_2020\n", + "Cuda compilation tools, release 11.1, V11.1.105\n", + "Build cuda_11.1.TC455_06.29190527_0\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "4VIBU7Fain4D", + "outputId": "7eb1d91f-86c7-43cf-d335-3d37ae014060" + }, + "source": [ + "# 检查 GCC 版本\n", + "!gcc --version" + ], + "execution_count": 4, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0\n", + "Copyright (C) 2017 Free Software Foundation, Inc.\n", + "This is free software; see the source for copying conditions. There is NO\n", + "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n", + "\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "24lDLCqFisZ9", + "outputId": "3c553c42-e7ac-4c6a-863e-13ad158bac22" + }, + "source": [ + "# 检查 PyTorch 的安装情况\n", + "import torch, torchvision\n", + "print(torch.__version__)\n", + "print(torch.cuda.is_available())" + ], + "execution_count": 5, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "1.9.0+cu111\n", + "True\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R2aZNLUwizBs" + }, + "source": [ + "### 安装 MMCV\n", + "\n", + "MMCV 是 OpenMMLab 代码库的基础库。Linux 环境的安装 whl 包已经提前打包好,大家可以直接下载安装。\n", + "\n", + "需要注意 PyTorch 和 CUDA 版本,确保能够正常安装。\n", + "\n", + "在前面的步骤中,我们输出了环境中 CUDA 和 PyTorch 的版本,分别是 11.1 和 1.9.0,我们需要选择相应的 MMCV 版本。\n", + "\n", + "另外,也可以安装完整版的 MMCV-full,它包含所有的特性以及丰富的开箱即用的 CUDA 算子。需要注意的是完整版本可能需要更长时间来编译。" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "nla40LrLi7oo", + "outputId": "475dcd11-0b58-45d3-ad81-a3b7772d3132" + }, + "source": [ + "# 安装 mmcv\n", + "!pip install mmcv -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.9.0/index.html\n", + "# !pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.9.0/index.html" + ], + "execution_count": 6, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Looking in links: https://download.openmmlab.com/mmcv/dist/cu111/torch1.9.0/index.html\n", + "Collecting mmcv\n", + " Downloading mmcv-1.3.15.tar.gz (352 kB)\n", + "\u001b[K |████████████████████████████████| 352 kB 5.2 MB/s \n", + "\u001b[?25hCollecting addict\n", + " Downloading addict-2.4.0-py3-none-any.whl (3.8 kB)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from mmcv) (1.19.5)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from mmcv) (21.0)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.7/dist-packages (from mmcv) (7.1.2)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from mmcv) (3.13)\n", + "Collecting yapf\n", + " Downloading yapf-0.31.0-py2.py3-none-any.whl (185 kB)\n", + "\u001b[K |████████████████████████████████| 185 kB 45.4 MB/s \n", + "\u001b[?25hRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->mmcv) (2.4.7)\n", + "Building wheels for collected packages: mmcv\n", + " Building wheel for mmcv (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for mmcv: filename=mmcv-1.3.15-py2.py3-none-any.whl size=509835 sha256=0296cfd1e3e858ece30623050be2953941a442daf0575389030aa25603e5c205\n", + " Stored in directory: /root/.cache/pip/wheels/b2/f4/4e/8f6d2dd2bef6b7eb8c89aa0e5d61acd7bff60aaf3d4d4b29b0\n", + "Successfully built mmcv\n", + "Installing collected packages: yapf, addict, mmcv\n", + "Successfully installed addict-2.4.0 mmcv-1.3.15 yapf-0.31.0\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GDTUrYvXjlRb" + }, + "source": [ + "### 克隆并安装 MMClassification\n", + "\n", + "接着,我们从 github 上克隆下 mmcls 最新代码库并进行安装。" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Bwme6tWHjl5s", + "outputId": "07c0ca6f-8a10-4ac3-a6bc-afabff6aba51" + }, + "source": [ + "# 下载 mmcls 代码库\n", + "!git clone https://github.com/open-mmlab/mmclassification.git\n", + "%cd mmclassification/\n", + "\n", + "# 从源码安装 MMClassification\n", + "!pip install -e . " + ], + "execution_count": 7, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Cloning into 'mmclassification'...\n", + "remote: Enumerating objects: 4152, done.\u001b[K\n", + "remote: Counting objects: 100% (994/994), done.\u001b[K\n", + "remote: Compressing objects: 100% (574/574), done.\u001b[K\n", + "remote: Total 4152 (delta 476), reused 764 (delta 403), pack-reused 3158\u001b[K\n", + "Receiving objects: 100% (4152/4152), 8.20 MiB | 20.90 MiB/s, done.\n", + "Resolving deltas: 100% (2525/2525), done.\n", + "/content/mmclassification\n", + "Obtaining file:///content/mmclassification\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from mmcls==0.16.0) (3.2.2)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from mmcls==0.16.0) (1.19.5)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from mmcls==0.16.0) (21.0)\n", + "Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->mmcls==0.16.0) (2.8.2)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->mmcls==0.16.0) (1.3.2)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->mmcls==0.16.0) (0.10.0)\n", + "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->mmcls==0.16.0) (2.4.7)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from cycler>=0.10->matplotlib->mmcls==0.16.0) (1.15.0)\n", + "Installing collected packages: mmcls\n", + " Running setup.py develop for mmcls\n", + "Successfully installed mmcls-0.16.0\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "hFg_oSG4j3zB", + "outputId": "521a6a75-2dbb-4ff2-ab9f-4fbe785b4400" + }, + "source": [ + "# 检查 MMClassification 的安装情况\n", + "import mmcls\n", + "print(mmcls.__version__)" + ], + "execution_count": 8, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "0.16.0\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "arpM46CZOPtR" + }, + "source": [ + "## 准备数据" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "XHCHnKb_Qd3P", + "outputId": "4f6eaa3f-7b96-46e4-e75b-aae28c8ec42d" + }, + "source": [ + "# 下载分类数据集文件 (猫狗数据集)\n", + "!wget https://www.dropbox.com/s/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip?dl=0 -O cats_dogs_dataset.zip\n", + "!mkdir data\n", + "!unzip -q cats_dogs_dataset.zip -d ./data/" + ], + "execution_count": 9, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "--2021-10-21 02:53:27-- https://www.dropbox.com/s/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip?dl=0\n", + "Resolving www.dropbox.com (www.dropbox.com)... 162.125.3.18, 2620:100:6018:18::a27d:312\n", + "Connecting to www.dropbox.com (www.dropbox.com)|162.125.3.18|:443... connected.\n", + "HTTP request sent, awaiting response... 301 Moved Permanently\n", + "Location: /s/raw/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip [following]\n", + "--2021-10-21 02:53:27-- https://www.dropbox.com/s/raw/wml49yrtdo53mie/cats_dogs_dataset_reorg.zip\n", + "Reusing existing connection to www.dropbox.com:443.\n", + "HTTP request sent, awaiting response... 302 Found\n", + "Location: https://uc2e142222b11f678e96f89b0223.dl.dropboxusercontent.com/cd/0/inline/BYaBa5-WWfPf_jhSt9A5JMet_BB55MzZhB2D3RXLo53VGHSIYbVMnFTdccihcsD-kwc9FxBG8qOwqA50z7XD6-3yUXWK9iA0x4L8IV5wegYKilKuDauDKWiNAsbgZoEBg4nC1UWR5pLSiH3j0Dn68b2V/file# [following]\n", + "--2021-10-21 02:53:27-- https://uc2e142222b11f678e96f89b0223.dl.dropboxusercontent.com/cd/0/inline/BYaBa5-WWfPf_jhSt9A5JMet_BB55MzZhB2D3RXLo53VGHSIYbVMnFTdccihcsD-kwc9FxBG8qOwqA50z7XD6-3yUXWK9iA0x4L8IV5wegYKilKuDauDKWiNAsbgZoEBg4nC1UWR5pLSiH3j0Dn68b2V/file\n", + "Resolving uc2e142222b11f678e96f89b0223.dl.dropboxusercontent.com (uc2e142222b11f678e96f89b0223.dl.dropboxusercontent.com)... 162.125.3.15, 2620:100:6018:15::a27d:30f\n", + "Connecting to uc2e142222b11f678e96f89b0223.dl.dropboxusercontent.com (uc2e142222b11f678e96f89b0223.dl.dropboxusercontent.com)|162.125.3.15|:443... connected.\n", + "HTTP request sent, awaiting response... 302 Found\n", + "Location: /cd/0/inline2/BYZCXE2D0HPaLzwKVyTyfirCsVVcpsp0-D9eMfo9OFpQdWubKX08yUdUJz2CZ7dn6Vm4ZF22V2hf_4XTw41KZRj5m3Dm_1Z8gH9h_kawyi4bsKn5EYJ6b89lfhXhoxgBa0Fa8h7V39gPRaIfaWDiUE0tzYAM_aEVwT30FVU4uWisNXBvjz5-yS6_XYzJIiMZ1CUrFU8DwqBis4RwPmLA7rzdCsVV7a6VV0NiTcNgOKMwLP0lMYx4bYpDDmnOtF-m-GBVArV_2Xd0akIDKSXy4LY-4ovbTNI13uvUX5U3UcjpR0UPjGtBcgm3LR4Iqcvw5D6Wt14g3PCmBMIPgdTp_IN9RnLl9AK_mfl4v1kmJ_C-BfoEr43qQP-6uqBavD3Xhz8/file [following]\n", + "--2021-10-21 02:53:27-- https://uc2e142222b11f678e96f89b0223.dl.dropboxusercontent.com/cd/0/inline2/BYZCXE2D0HPaLzwKVyTyfirCsVVcpsp0-D9eMfo9OFpQdWubKX08yUdUJz2CZ7dn6Vm4ZF22V2hf_4XTw41KZRj5m3Dm_1Z8gH9h_kawyi4bsKn5EYJ6b89lfhXhoxgBa0Fa8h7V39gPRaIfaWDiUE0tzYAM_aEVwT30FVU4uWisNXBvjz5-yS6_XYzJIiMZ1CUrFU8DwqBis4RwPmLA7rzdCsVV7a6VV0NiTcNgOKMwLP0lMYx4bYpDDmnOtF-m-GBVArV_2Xd0akIDKSXy4LY-4ovbTNI13uvUX5U3UcjpR0UPjGtBcgm3LR4Iqcvw5D6Wt14g3PCmBMIPgdTp_IN9RnLl9AK_mfl4v1kmJ_C-BfoEr43qQP-6uqBavD3Xhz8/file\n", + "Reusing existing connection to uc2e142222b11f678e96f89b0223.dl.dropboxusercontent.com:443.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 228802825 (218M) [application/zip]\n", + "Saving to: ‘cats_dogs_dataset.zip’\n", + "\n", + "cats_dogs_dataset.z 100%[===================>] 218.20M 73.2MB/s in 3.0s \n", + "\n", + "2021-10-21 02:53:31 (73.2 MB/s) - ‘cats_dogs_dataset.zip’ saved [228802825/228802825]\n", + "\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e4t2P2aTQokX" + }, + "source": [ + "完成下载和解压之后, \"Cats and Dogs Dataset\" 文件夹下的文件结构如下:\n", + "```\n", + "data/cats_dogs_dataset\n", + "├── classes.txt\n", + "├── test.txt\n", + "├── val.txt\n", + "├── training_set\n", + "│ ├── training_set\n", + "│ │ ├── cats\n", + "│ │ │ ├── cat.1.jpg\n", + "│ │ │ ├── cat.2.jpg\n", + "│ │ │ ├── ...\n", + "│ │ ├── dogs\n", + "│ │ │ ├── dog.2.jpg\n", + "│ │ │ ├── dog.3.jpg\n", + "│ │ │ ├── ...\n", + "├── val_set\n", + "│ ├── val_set\n", + "│ │ ├── cats\n", + "│ │ │ ├── cat.3.jpg\n", + "│ │ │ ├── cat.5.jpg\n", + "│ │ │ ├── ...\n", + "│ │ ├── dogs\n", + "│ │ │ ├── dog.1.jpg\n", + "│ │ │ ├── dog.6.jpg\n", + "│ │ │ ├── ...\n", + "├── test_set\n", + "│ ├── test_set\n", + "│ │ ├── cats\n", + "│ │ │ ├── cat.4001.jpg\n", + "│ │ │ ├── cat.4002.jpg\n", + "│ │ │ ├── ...\n", + "│ │ ├── dogs\n", + "│ │ │ ├── dog.4001.jpg\n", + "│ │ │ ├── dog.4002.jpg\n", + "│ │ │ ├── ...\n", + "```\n", + "\n", + "可以通过 shell 命令 `tree data/cats_dogs_dataset` 查看文件结构。" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 297 + }, + "id": "46tyHTdtQy_Z", + "outputId": "a6e89ddb-431e-4ba0-f1f5-3581a702fd2a" + }, + "source": [ + "# 获取一张图像可视化\n", + "from PIL import Image\n", + "Image.open('data/cats_dogs_dataset/training_set/training_set/cats/cat.1.jpg')" + ], + "execution_count": 10, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAASwAAAEYCAIAAABp9FyZAAEAAElEQVR4nJT96ZMkWXIfCKrqe88uv+KOvCuzju7qrmo00A00QZAcoZAznPmwIkP+p/NhZWVnZTAHSBALAmig+u6ursrKMzIyDj/teofqflB3S89qckTWpCTKM8Lc3ezZ0+unP1XF737+KQAgGEQDQCAoIgD04MGDul7f3Nx0fTsalbPZJCa/XC5dOQ4hGGMAoGka55y1tu/7LMu89865zWbz+PHj+Xy+2WxGo1G3rokIAJgZEa3N9L3C4Jwzxngfu64TkbIsq6pqu40xJqXU922MkYiMMSKS56Uh61zubGmtM5QjGkTMDZRl2fd9CKGoSkTx3gthSgkRBCGllFIKnAAAEbGXPM/1q7vOM3OM0ZBLKcWYUkrM/PHH33n58mWe5yklnlW3t7e5M6fHJ7PRaLOY3769zAxNsvz06PDJwwe5ofnV1eNHD2+uru7du3O7aeu6Nha97/7n//l/fvHimQ8dIi4Wi/V6vVk34/H0+voa0Xzy8XdfvXrVtt39+/c//vjjt2/fPn32TZ7nDBKZY4x37997+/btn/zpj9++fftX//E//vCHP1yv1yWS98FaK2i+//3vv3nztutDluWcQMjEwNfXN/cfPHrz5o0IVlVlQvfpp5+mlH72s5+NZ9MY48nZ6Xy5XK1Wn37/e3fu3Hnx6uVvf/vbPoQ7d+6cn5/317ezg8lqteq6phqVV1eXm83q0aNHTdOklFKSlASBrM2yLHM2ny9eiAgB5nleFBWhjZG7kMAUybibTXdxs1yHaMspOtf7aAiMMdZaRERhRHTOOUNd13FKIfQxRokJUay1xpibxTzGKCLWWiJCkizL8jy/vb1JKT169Kiu133fHx8f397etm07nU67rqubdex9nueTycQY5JjeXF4cTmfHx8fG4Pz2drVaTSaTO3furNb1YrHw3mdZllKq6xoRx+NxWZbM7JyLMW42G+dcVVUppc1mlee5916/6/T0tGmaZ8+e3bt3DxHLsgSAvu+dcyIyn8/J8PHxIVFs13Mi/z/+9/9qOrWzcX7n7lEKTQi9QLKICAAg8K2j7/uUEiKKSAih67rEIYRQ94uiKEII0+n06urq5OQkhAAAMUa9iJQSAFRVZa1dr9ciwszGGN36RDal5L1HIGYGABHRaxARXe5vXwrAVoaBmVl2B4CIiE/RGOO9DyGQNYMQhhCICA3q240xiGj0f4jW2rqui6Lquq4sS9/HoiiOjo5fvXo1mUx+/OMf379//2/+5m8ePnz4y9fPfvSjH/3t3/y1M/aPP//8F/+0qKpqeXuTCSBi0zTZeDSbzbquy/P86uqqmB5WVeUyU9dS1/XV1VVMvqqqs7Oz5XK5XC6rajydTlerze3tbUppPB4bY4qi0KUmIu9749xqtdKlOz8/J6I/+7M/CyE450Lb6foj4dOnT7OsCCGEEIu8stYSwsHBASBba5m5qoo337yq6/WdO3em0/Hd+3frun79+uXZ2Vni8PTpVwcH0zx3JydHTdN88+zrsspzgL7vjTHT6bQo87atQ+ibpgkhWGuzLBPBGFIIqWkakM7lGcfEzMzsvUdIPiYfOJ9UQGSMIWsNCxElkARCAqr4AACFRcR7j8LMzCnF6FNKFinLbFEUWZYdnZ40TdN1XQghhOBD1/e9bjYVifV6HUIYjUZElBeubVuBlGWZI6OLEH3o+x4AyrIUkfl8EUI4Pj4moouLCxa01qrCreu6russy0RkPB7Xda17O8syIkopxRiZue97773esu7YsixVjJ1zRDRsY2stS4wxWgPWWmt1JwMiJh9i4pQSS6Rhl8v7h/ceEVVy9r9YHwYijkYjABiNRnoPw14XEbWBjx8/zvNcf6NXP0ia3ttwV2ru9CnuiyUi6tbUW8L3D/3GuDtCivsHMydhERARRgAAIkLEBNLH4FNcbtYhJUHMigKtuV0uNm2zbuou+Murq+9//rnNsuvb2zzPz8/PP//88+l0aoxxzumNq6LRVTo6Ouq67ujoaL1e3717dzwep5SyLBuNRrPZjJlXq9VqtXLO6T7I81y1qd71crns+17dAedc13UoYoy5vLw8OTl5/vz5D37wg/Ozs6PDwzzPmVkfZmbp5cuXKSUU0MtAxLars8xJ4uOTQ/VN7t69+0//9E+67Z49e6ZKJ8Z4cHDw4ZMnP//5z5tNrdvx0f0Hq/miqde31zd92xhCjoEADdJ6ueqaNoVokMo8K/LMEkYfurZGREEQER9DCMHHwMyM0HVt27ad72OMMaXOt13X6T3GGL33IXh1Urz3XddlWWYt7R7rdqsw87Nnz54/f/7ixYu3b99u6lVKyVqbZS7LsizLdFPpNiADRJQ4OOemo/F0Oi2KAoD188fjMQC0bRtDyLJMDd1yuby+vo4xZllmjCGi8Xh8dnZ2584dAPDe13UdY9RtrIrDWqsbTLdTCAERp9Op6ik9Qfe8un4JpO9DCMHaLM9LvVRh8N7HGEUQwZCezcxpdwwibq0djUZlWaoa0C/QbeScCyGMx2MRybJMnQoR6ft+vV5fXV0h4ocffnh6ejoajVSRqIx1Xafr+L6PalWwU0qDJRzEbBBLRCQilVh98U4g3700QoiI+pCstcZYEBSGFDlF7r0XgN57Y20fvMszm7nRZFxUZe/94dFRluf/5e//7uPvfPKTP/9n63pzfHz8/Pnzf/2v//WDBw9+9atf6dqpsiyKIqWkznnTNKenp9PpVNVn0zSbzeby8lK90BDCb37zG2vt2dlZ3/d1XRdFoU+rbdurq6vlcpmEp9Pp0dGRcw4AiqLo2857//vf/q5erfURTMbjsiy7rh3WIYTgMiPCSGJAdspbPnz85OT0qOub2XTcdvXzF98kDin0PnQI7H2XGTo7OyGU2/n1Yn5jCI4OZ02z6ft+uVyq8VksFl3XuMxYR71v62a92azUThZFUZalc86nkFIMKaaUYkqIaDNXlCUQCoIxJitcVhY2z4xzLs8QkZm93+p0IrKWdD/oHTnnNMbRM+/cuXN0dKSO1aCRdds4ZxCxKAqXGR+6EEJd11VejMtK/UlriQCNMS6zR7ODvu/bth6NRqPRqGmaul7nuauqSpcxxjgej+/fv3/37t3pdHp9fb1er9u2TSk553RzWmuLotDtp0ZPhVDV7mCo9F7UzAij97Hvg6AGVsAhMbP3UZIYpNwVpHel4qcirnKoFl9XRKVFhTDP89VqhYjz+Xwymcznc0TUB6OGmJnrum6aRgVYTZBzTkVCpUtvYJCbQa50lQch3JdD3h3f0hr6sIczhRCAEI2xmTHGGAN7QktEYMgVeQKZHh4kEJtnPsWsKNQe5lXJCJfXV6t644ocDLVtu1gs1Dm/vr6u63qz2YiIOkjOOdU7RHRwcPDJJ58Q0ZMnTz755JOyLL/88stvvvkGAL773e/OZjO9AFVn6l8YY5yzIty2zXg8un//3tnZ6cHBjAhR+PDo4JunX2eZ+9//978cFcXLZ8/KLJtOp23b6uecHh2v18uUUgpxvV7H5HNrEofet5PJ+N6dO9Px+M2b13/ywz/65puvr64u//zPf3JyeHB5eVGV+Xqz/Onf/5c//qPPLSGBfP797/3iZ19Yg9aStSTAXdfU9TrGOBqNzs/PsyyLMa5Wq+Vy7n3nMluWeVnmiCiEYBAIhRhIyFrnXDmqqqoqRuVkNj04OJhOp7OD6eHxgbWGCNWl6vs+hF631mJ5u9lsvPcAbK3VHQIAR0dHBwcHk+moKDPdIQCit8/MbdsmDjHGuq5VIZZlqVYhRh99UIMxLisAcM6URWGtTSl436FAmRcff/zxeDxWpVMUxWQySSldXl7e3Nzoe3UXqQEnIjUY+vtB8FRpMrPamGG3ExGA+rEphMBJhJETxMgxcIoCQIho33dHh+BQ+r4vikz3SkoJgMkYRHRZpjAMEZ2cnCyXyyzLxuNxjHEw6HVdLxYL9bJ0rVXwjDEpSQhBRPKsGELBwdskIpb0LXdUzwkhgKAxKaWESAxRBEXEUhRJzFFEEggyi0hiFo0JrUE0REQWt2qCpe07RCRryqpq2lZEYpLFallVo7quRaQoy//rr/7qiy++QKLb29vvfOc7f//3f99s6ocPH8auXc9vl32neuqDDx719abv+7OzMyL66KOPlq2/d+/exZtXb968bttWndLxePxv/s2/+cd//Mem3pycnIzHY2a4vVmUZbl14Jv65OREg73cZcvlEgBIoCzL4L3v+8s3bzil9WpV2swgoUDXdbPZbLFYdFknkDarFRG5zPjer7vm9cXLsiwn01Gql865er3Jsmw+nxPR2fGJc261Wi2Xy6urq+l4TACb1SqFUGU5JHCZTSm1bcucEocYXVHMRqNqs9m0bde2YG2mqJgxxpmMUiINNwQCJ0keAGKSNvVNkN5zx+LFJiRBHFRvSomjD4GMMYaAmSUxAItYA5g46Mbb1Ku2bdu2Vf1rDDkqHRm2FCO1bZ1lGSQGxEk1MiDq76UUuqbt+56IMmvAmKbZHB4cENF8fpNCrKoKcthsNhoX6Is8zxFxtVpdXl4y83Q6HZS7ao0YI2AkA9aRCIfYAwBLBOTRuGyaJnFIHIwxxiKSLas8Nd6iYW77nr2DIfISZiGQJAn4nQnat0tqZwerqLpqeL96I0+ePDk+Pp5Op6PR6IMPPhAR3U+j0aiqqsVicXt7KyJnZ2dFUXjv+77Xc4hoELwhStRYVj/hW+7oPlTzh2Hh8JOR9YUAIKL33qeYkjADoDHkrMmczV2eXd/ekDVN137w5HHne5u5pmur8ajre2MtGVONR19+9fvJbDo7PCiKQt3OqqpGo9Gf//mfP3z4UL0mZn78+PHx8XGM8fT09MWLFymlm5ubzWYzn8/VCy2KYrFYfPHFF8fHx/pPdXjOz8+n0ykR5WUxnk4Wi0XbtqvV6sWLF8wceq+e6t2z83q1PpzOXnzz7PToeDVfKKwXY4g+1HVdliVymk6nHzx+GPo+BC+QMkvPn399c3PVdc3sYHzx5uXJ6WGemf/8N/+xblZHx7PQN5NR8dGTR7/77a+Cbwn59avnTx4/9H3TtQ0Bhr7zfZtnVlK6vb5ZLm4NQe6yIssIxfdtW29C7xHEWLSOjLVElERSSiHGEEJdr9fr9XK5nK/m8/n8dn59e3t7u1wMDpuxuIsPO0U1jNmhgH2j/vxqtQKALMum0+l0Os3zTLXDpl4horWkf82yDFGcM9bavm36tvFdrxGawW1smTnHzE2z8V2v77KOBNLbt2/X67VzTjezWjPd5JPJJMuyrus05NZ4SiN5NbaDX9Z1nbqsek7btgMU5JxzRW7IKXZojHMuR0ZDzhpjAFFkcNNokMB9+6PH4DFaa7uuUxX44MEDNd/OuTt37uhbFJO01up1VFX1ve99786dOxrOqgevTrmKlt6DSvtgAP/wUIR6ODRm0AMM6X/qNuDO+UyAaipZUASZgRlEsKiqdV0XVeVj/Og7n9g8Ozo9sXk2OZjdLhfj2TQri6vbm1W9+dN/9hO05uTk5NWrV9/5znfOz88vLi5++MMfnpyc6FV57w8PD4uiUP/wt7/97cuXL//xH//x9va2KIrZbKaPqmma29vb3/72t4vFYjabVVUFAOPxWCMTl5lqVLRd3fXNerO8ur5EEmMRgcsia5vN4cF0ubh1lgyBcJzP5xqBiPBmtT44nOaFOz06/MlP/jQvXAy9c/b09CT6vl4v1svbm5urosgeP35EBASSO/v0q99H3x/OpkXmzk+OFzfX7Wb9wYP7yffL25sQeiIQSEQwHo+so029WixuQ+jJQF44a01KofdtjB4GvBoSAACKSNI9swvIt3uJmVmiCLddHZMnAxrpEMHOSw9bwCb2KqXOuTzPVZdZa8qyqKoqz3PryFrbdY2i9+r3xRjbtl0ul7rXU0pkMLPbXAincHBwsFotrt9eEdF4UnH0vu0Kl63Xa0Q8OTm5d+/eeDweHDcAUIezaRqVKI3xUkoqhAPerkZF10FxpqZpFNExxhhnsywja3ST6+5VxzAz1lpryWzFTz9XDaC6ixqeWWvH43Ge53pxAKB46Xw+1/iwqqrXr1+/ePFCjaR+AgAcHx//5je/2Ww2RVE45/RD6rper9d93zdNo8pG10ghx6Io9BtVC2RZpiC1Yll6q3onapb1nxpDF0VRVVVZVU3TZFmWlZVzzthMAej5fEHGhpgOj45Diud375zdOe98/3/8X//ng0cPL6/edn0vAOWoCilOZtO79++9fP1qNBk/evzBZ599Nh6Pf/Ob30wmkxcvXnzzzTfq1Olz+sUvfqH48KtXr87Pz3//+99/9NFHZVl++umnZ2dnuhSImOf5V199dXx8/Mtf/hIATk5OVqvVJ598olFQ3/ez2Wy9XnddN5lM1DXS9wLAcrlURHS5XFprD2cH6+UKRRWT2azWfdt99fWX8+ubTz75KMssp74s86LIvv76933fssS2q1+9fhGTn87GL14+G0+q3rdX15fXN2/bzVpiiH3XNzUJHx/M2ratmzUixhgXiwURHR0dKDaY0hb922LxBgZrYMlYS5mxItJ3zaZeT6fTssrzLBuX1fHJ0Z07Z4eHh7l1VVXps3PO5blTxA6AQwjWUVFuN7e1xhgS4aoo+rZt64Zjij6AJGcssBAgsFRF3tZN9CF3WdvU41FlLLEkH3oOERGJIEXfNM3Fxaury7cicnJ6NCrK29vb+Xyu9uPo6EizoLe3t8+ePVPtUJbl8+fPnXP3799X4+acSykVRQbAKQVEEUld1zTNxvuu79s8dyH08/lNCH1KIUY/m02Yeb1eA7CGuCIphP7wcDYZV+PxuMwLYwwNDqe8n6AbPEZFhBQUUo9xOp2GEN68eXNxcaEJrsvLS5UuPV/zM+pfPX369ObmRmGMIRuxb131ngeveECi1B/eoRdOA9+iKPq+L4qirmt9Wl3X9n2fhPvgF8vbLMu64K21LitijHXdANJ0dti2PaLp+/CTn/zk5ORkvV5XVaUZBSIajUbX19eHh4eLxaJpGiIqiuJv//Zv792790d/9EfMfHNzs1gsUkp/+Zd/qeqQiMqyfP36dYzx+Ph4sVhsNpu+71erVQjh6dOnimDleX5wcFCWZdd1mqW4uLjQ/G9d18457/2ACetT2OpXQGCRxJIYWHTbWTIaKRFRkeW5y1KKIXiO6eunv0+hn89v8txNJ2VV5p9+7zsHh9OUYowhxpBSJAACkJRSCNH7FAJw4hii70PfpeCB08F07AhD16bgncFxWYyrYlTmKAklISfgCJwgReSEkjgGjoFDhMQAkFlbuKxwWb1ZJR+MMWWRFVnujEUBBN45WSmloNspy22e58agKmUVv8E7G2DzEPvEYReA6OfwLkoRpG1g0tVN6HqJiZlFEiE658oy36zWVVWdnh1PR+Msy0ajMsts1zUhhNlsNh6P5/P5L3/5y+l0ul6vDw4OFK3RdMAQWCkmR0RqG3Qn617VZ0dEapk0mdQ0jXPGWHSZzXNnLaWUhKOztm3btt40m3W9WtKQlhjQkSESVbOjO7Isy0G01Ka9ePHi9evXCl71fT9cxAByKtz6/PlzTdnroQs9JItUzDTqHdB/VbT6NFTX6mld16niVJtZluVoNGq6thyVWZbNZjMRKUYVACmQBYAhxjwv79+/H0JC6y6vb44ODo8PjzimB/furxbL3GUoIMxnp6dd007Hk+jDqxcv7925+4uf/fzDx0/UNKl38fjx46dPn+olaf7m+vo6hPD48WO1FXme39zcvHnzRm9EobajoyOVHMXfXr9+rVn7V69eGWM630YOAIpJJJFEBJk1qm45eP1PYkBOFoEAUMQ5d3BwMJvNcpdZa0ej6sWL519//dXHHz55ffHq9cWrH/7x5yAxc+QyQwYA2RAYi2SAJYa+TaFHSQRCIChMIJk1ZZ4hCiICSgh9Xa839SqEYC2Nx1VVVdYRAHvfNW29Xq+Xy7mkFL3vu8b3vXAkAINkkKIPwGxAJHHsve86jh5YmCNzHJQsIA/wIzOL8ADux+RZorUWkGPyQ8CieQqVQGYW4P19q/Kw3cJbdQm5y4iorHIDeH19/ebNa9/3usOIqGmau3fv/sM//IPqSn2UxpjDw8MQgtJllOalSXl1d9VfVauufqb6hppn0qxy3/dZZgUZgF1mjaXEIQTPHB0hGQRg4bgVwn2kZIgJY4xd130LUGmaRhHh29vbm5sbvRoF69WHUYd20O6avdAIFXYZc13NQc4HRYCIm81G8564lz9UKUVEBWY1Lq3rWgHihw8fZpl7/Pjx4fGxc246nW7qtu/7LM+Losrz8vT0vKjGs+nhcrH+zW9+o67vvXv3UkqvXr2azWZ1XSuGmef5d7/7Xc2YP3r06OnTp4vFQnXe27dvz87OTk9P27a11qqKQcTb29uDg4O7d++KSFVVRVG8fPny/Px8Pp+rlFZVNZ1O9QllWdb3/cXFRdd1mr1Q/bKfZbFksiwDAEgcYxRmYU4xgojR3AVHg5LlNi+cdZQ5MxqXhHJ9/db7DlJcr5fX129fv37ZdU1RZFlmM2s092AMIqcYve/bGHqWSMAEbFAyS6M829RrlpRl1lrbNM3bt29vb68VnFQIepcqiokjS0IBSZxCTMGLjxxTip6jz52xRBxiW9eL+c1yfts1LSRWy7/NMO0g0BB67733vUqmYjPqPaEkSIwsJGCRLAIkTqFXNS8pShxOYORkLWWZzTNLiIr6pBBFJMstsrRtu1ot6s1Gcwm6/k3TXF9f931/fn6uN6jpQWVfXlxcPHr0SDfAkydPUgptW282q75vEUUZkESwg+gTEThnjEHm2PctOWKOPnTMUdWLD13bbVyGuTWZM84Z2reBg/iptCgSpYGpopcas+nmUxF9/fr17e2tAkqDN6Uepoh47zUS1bSP7NKsKpYauw/uqMZ+aiSVQaIGTe07Ik6n077vJ5PJcrmsqmo+n6uLeHx8vKo3eVmWZbnZbE5OT0XE+1CWozzP27btfDw+Pn748IOTk7OLV68OZzMCCH1fuOzy9cX3v/tpmeVvXr3+3ne+29XNw3v3/+xHP754+eqjx0/+z7/839+8eXP37t2yLH/5y1/e3t5+97vfnc/neZ5r0uz09FRd8fPzc9XB0+l0uVxqQL9arbqu22w29+7dIyIlH45Go4uLCyLSuPpb6m/AyQqXGWMMbQEni2QAM2N1GUMIoetVB6nWK8vy5Pj4//u3//nxk0enx4f/7//X/7Oqiq5rUgocg+4PADYoZMAQeN8l33PwMfQx9MF3vm+7trZkJDEKVGV+eDCdjCoQburNzfXVYn7bbNYpemuwKsrpeDKbTDNrCmty6xwSsEBMEhPHxDFJYkicQuA+BO+TD7zTXMbgAMkwb32iAZBPvHWLBiWle8aYrW3w3ouoalJ2VFRASFFQ986HikqpA2SLFGME4MODg5OTk7IsU4p93y2Xy/F4/Pd///d/8id/ogp3vV4fHh6ORiNjjKbEx+OxhkWz2Uz5cX3fa/6jKArl0zRNo3jMZrNZr9dqS7quMxYBJKXA4q2DIndE0Pdt7zsfuph64R13VB//gMfssgUppcSSiIAIALd025TSeDy21k4mk/V6rRen1qksy4ODA2WNrlarGCNmOHgIuOPEqEwOmY99yVe4YrFYNM1GBVt3YUrddFKuVpuiKEKIjx49IrLe+zos1019fX395s2bpmnmy8VHWVZV1XK1NsYgmtvFyl1cHB+fnp+fA8DbN78rsnyzWndNm1kXen/3/M6ds/Pr6+u2ae6cn//i5z8/P7t7dnL6i5/93JL52c9+dufOnclkotv90aNHX//ut6EzKlH3799/8fTr29vbs5PjLMvatu2bPqX0+9///uOPP95sNnnuXr16dXh4qNB23/cnJ7OLi4vRaKJ2nnbMISAgwMFByLIMAaJzFqlwmbr6lkxZZoMHgbx1zrqmjpyOjo7unt9Rnsfx8eHRwfTNmzdNu2XbEpElEiKDRFlGAsYYQjTGAAsKhN4H8LPZtG3brm8Oq8N79+50XfPm8kKNkjHGbqM1gyTqP1siY50lI1HUxAEZFOPbLoJDkCLLghj2sWNIwYNRdU8iggCIAkAxBjIIrNe5TVYrQKraX3dgSluUVcVywCQHDHbA22XHQ1a7YkBZ/q1qAQBAhMzYLMvmu/S6eqF1XaszCQBv375FxPPz89VqxcwPHjy4vb2dzWa6jXU/a3ilnBtVqcqXUt2R5zlzsg5ALKRABGWZIzuQFPoOLSBHRKFB++qLAZgZMgHqDQ8KW/Njmp0fjUaKT04mk4GHpXmL6XSqkOY2Zef9AMmoXA3wxi7Pg+rgTafT8Xisfp0ybJQ+PgikgoeHh4f379+fTCaj0ej169dEdHFxoV7TYrHQ98YYwRAiXl/fImII4YPHTxDx+vq667r1eq3a9+uvvy7L8vz8/He/+50CZUpMf/v27dHR0fPnzxeLhTHm6OjIe399fX16eppS0rsry1KBzaurq4ODA72A8Xj8q1/96t69ez/84Q+rqmLmZ8+e6bPR3aO+9MHBQYxR3csQ+hi9ujQx+tC3gKzhk+71FH3wXYg9wZb2oKsxLqsss+o7LJfLzz///uXlxYsXz773ve9tF00iICMJS4zRh9CrXySQhKNIMghZZrPMWgLgqMrxnXkB9W4kz12e51nunDOIol7cer1u6ybGaACJiABRwAA65wgQZWsVt+moFHkXCirYrlDC4IKJvMtwqBnIc6ffrn/VN+6CoyGdtkUitgQpgF3A6bfMctmGPGqylsvl7e0NJFbc3lo7n88//vjjL7/8Urfuo0ePbm5u6rpWL0xdQt2lr1+/HtIn+gjirsJDYYI8z/W1ZjJGo1Fdr40xZZkLQggeCVxmicAYdNZkWVbm7r+ZIRygywEgUeHUr8mybLPZxBjX67X3frVaDUkVBQkHVTF4XCrVGsUOf8rzXNOGKjZE9PTp04uLi9VqS9VVudUiKb2qt2/f6jemlBaLxQBsKmxTluXNzY3q0YH+qr7N9c1tlmVv31w+/+aZJVNkue/7tmn+y9/+re/6zWqdQnz14uX/8G//+8l4/PLFiz/70z99+eLFwcGBItQi8uzZs7/5m79R1RO3Kem6LMumaV69ejUajbqu+/DDD8/Ozl6+fDmfzx8/frxcLu/cuaPMJg2PVe8qPNu2rZq+fXhsx2/eHvpdmsno+36z2SinVF0yDd5ijFVVdV33/Pnz8/Pzo6OjL7/8crPZqE4cXKZ3JlREKwbatt1ua9waYV1YY4z3/vLy8uXLl8vlUlXk4M4MWhsAmvXGt12MUdKOSAhgCcejkTNWVU+7qX3bcYiay+77XsOcATMfLNiA1WvegraS/c5BUIB0gFDxD+jEg2cxwA0q8FdXV87aw8PDqqo0A6EKHXcpQe/9YrH4i7/4C2vts2fPLi4u7t27Z629vb1VRsrt7a3m2BSe2HeMrbUDCUF/PyiL9WYJwM455qgR1iBuxqB15JwhMVkQamPsUoiYxAlbjuSpgOqgooK8BDboRRabdr5ummYlEvq+nc0mGpuqsW7bNkWxJntz8fbe3YfrVdPUPYKtu5SVU3JV6yWKZbStT00f123XxXS7Wv/5v/wX1XSSEPoUPafFKgiVqzoKlU0PJhv1EUfT4/HsKAh5Rsoyz/Ly8uLt7Q1YO6tm/+Zf/ZsMs9RHi1lh8na9aTZ1ledXby6OpiNH8cGd48cPzn/9i39Y3l7c//BhE/2ybT2Qq6bgRid3PnpzU883sfZ47/GHXz57Fk367/6Hf/XXf/9Xk9MiXne0kcrOTk4fZrOztbjrjt3ouKyOTmYn9fUybVqHPJ1k8/XbYmp/9et/StyenR39zd/8ddNsfvSjH725eHtyfJZn5aiajMfjEPqiyMbjIsR6dlCaLhYJ8wjY+LhusAuZYIamcnmZ5dPR2DnXtm1iNtb23i831ybj6awwNq3W14vltaR+XGXs20mZZYSxaTAEJ2KZnYgVxMjiI4SkJREi0ocwnk5NniWEgOKB6xRaSdFS4l4gWEshhOVyzQmLfMTJ5NnYmlyYmJXDxGQYKdE485br1NXcBytSuGig4fD87cVNvWlBAoInis5GlwUwI8s5+ALDNMdRBlYCpd5RwuQJIgGjJOaYUlBvxSdfjArKsIstYDKGUvIxdCCJJBpIBsRwgNClrvb1qmtDDGLAVcUkyyqJEtoUfTo5OkXG2MfC5IXJmk2zXqySj8eZbS4v4vzmBx9+8IMPP7h5+c2r3//mbDbC2D7/6jeWosFAGA4Pqq5dFjm2c5hmp7P8rJ1zO+fj0b2HZx9O8+OSJqnB1CB6wy1QsOjN+ro+KU6y4Py8cR6PyknOGNdNJuBAJETugwS2A2VMHVy1IUR0e3ur8WiMsSzLoihU38TYISLAFkce3Nc8zxG2/rEa4sGNfN/f2Lq1CqLc3Ny8evXq/v37P/jBD7744ouvv/7amhIARqNR33eIqKFmCOHg4ECkSUkyl2/LF3xKKV1dXX322We///3v9bSu6+7du/f02YuBF/bJJ588f/ZSWazffPPNarUqyxKA6roGgNlsVlWFiKzX6yyzbduen59+9PHj29vru3fvapS7XC6b6GcnR6enp/Pb67quHz9+3Ly90uWKMcQUWZBRdB1ub2/v3bv3u9/97ptvvlkul8aYuq5VTW6x0B1BbyC+D5CVrs8+WqZRB+8IGQMdAnaFY/qZqvXVYvCW7gsiYmkbTQwxhT4LDcsHaFofunNOJO7jc8O71BPZ2aitdTLGgGEAQAEEQjSIhGCI7EcffXSzap1P0+MiUN4LMJksL5//7p9gR8dXbE+vX9NO+5tEz9EsubFb+5ZSMgadtYPjoC7sYELRGBCWrX8RgBkAGMUYs80qEjKIOmtE1PZdSNFm7ujoqKjK1Wq1rmtF4Bnh5OSERbZMtOBDiqFfbWpnjMkLhwgCse+7ELo8dyGEmDoyGSKyBBZBsogGjLHGQbJExCAAQGQSgzAjRBHZAjODd7vLsbxb/WF/6J/UNVVGGO6x24hIQzje5ihdlmVN02h+dvAThvM15z6dTp8/f350dHR0dNT3fZ7nIEbLL66uLvXzFa/fppUAlElDZBEiIq1WN2/fvh3SNVojOx6Pl8vl4eHR119//fjx46Zpnj9/PhqN3r5969lnWUZkEXpni5RSXdd3794PIfR9cXt7O5tNHj38YLmc3717d7Vaee99DHXoJ0cHZVm+DWE+nz84Poq5HYprYgwCQM4YY8hkSjguiuKnP/3pv/t3/+7ly5dd16l2U19F10oXITdGwzCidxUtvKtiGVZVAa2UEjrUDbTrS+B1M/GuUlbPHNBsZ+ywWYf119M0IlA3VU/Q/Bj8QR2Zao0BAJe9hPCe2LBBQdziS5vN5vXry8v5WrLCk1s2XUI7Gk8fnZ0MW04dRfW9YVfXpqpBP1m5oAMwkbYLYlRiCWSLJsp+ols0fbiVSRHljTNICGn7+YJtUMl3LDiZHliXL5bruFiGEIzLsqK8Xa271h8cHgPAYrUUkbIcgbHzNzd1tymKLK8sInahafyq67p8lPt1E2IkAUMUJQBLgozQEJIxVtiBoRQ5cBIN01iAhVOyWhk5RCOqQTUStdaqa6vPVTl1k0kJu+rgfb3lvdfiWY0lFIQYlPc+8DNsCAV8r6+vLy8v27Z9+vTpyclJvfFaLFtV1XK5HI8rxRWbpjHGGrPVx0WRgVDf+9PT01//+tdKBEPEk5OTb7755t6DR+v1ej6fHx4efvHFF/P54t69e7//4vc//vGPw3rjW49opqMxkX3+/OXyZvnR4yeTquybenk7f2Xo8vVFmZUWLYqs1+vJbCrONE1jcqsG/M2bN2OirUEgIiEAJgJj3HrTKOnx7Ozspz/96TfffFNVlYhoQkLdBNxh0SIylBUMcRHs2hToybQrXdumUolUCDWEVuMwRE2D/Mgu9k4hiAiIbEHHnVfSd53G/EPFatu2XddV5RgRQQB3ZWXMggASE4hovg6UzSOJkRModiPILGCMSSAIQOf3HyRy2Xi16tO6iz1DBIMkX3311QBAyC4OFJGDg4Mh4Bx0BO5VvRljtP59wEJpKAZIMmgHRAEQFXNEERJNhrR9TwAMeZ7nQEaQ0JDN8rprZ7OZyTOtUytHo7wsg/Cde3cjp01Ti8h8tayqajYeReG8IsaeEfMiF+G2W3ddl1KoxidoBCSSY2MpQWJIUQxGQyTWIBExQxd870NIHJPWuKaU0rtSpm8dbdsOyk+XRsml+td9G4i4LTVC2C7K27dvNbi31qb4zhHaX2U1fUqUcc5tdvnTtm1VkouiePPmzWQy0hJYpdID0Gq5Xq/X1mYx8nq9/vjJ9xSeKctSAIeLF5G/+Iu/+Puf/gMzf/rpp0+fPj06Onr58uVkWoTQp+Q9hXv3Hty9e/err756+fIlACCJiFxdXf3DP/zD5z/4bL1et02vd0E2C6AsmfL27eXr1+nju3eVTumIDCBuZVJ+9KMf/fKXv1Sq0Gefffazn/3sRz/60d/93d9pC5NBcoYQQP5rbEG1ErqxhjTGcAoIg4BwEhAQJgRD6KzZuSrvaboYw/DI9qtljCHlsgFIWZZa5qdyKvLeT70MVcGD+hiEB40dPh9xdy8sX331e6E8pbRY3C7qPtkMbVbX4Xg82jv/3aG0Kg15UkoxBACQZIuigF1hgKokDRcRty1qmFlSGKIqJ1vpFQOAuNOTGAMzAHJCTgmBMsfMEaT1fRY8WDM7PDR51jQNgwjCJx8+eXnxet3UzJyYBaBp26vr60nlUkpJApADkQSeMZDDBN5kAMaQBTRinGGJfWwFMhRGJGROzD6ktvNtn+bLlSTPKcQY7a7MXoabBAC1PIOVG57/sGm0bcT+7jHGcAIi0mhQ/as8z5vYyc6hVX2mLzabzWazSSlpNkKTE4N3VBRFSkYLYfWXTdOMx1Nrs5RSSn0IwZpMKX8ff/zxixcvrLU+pM1m8yd/8ie//fKrzWajqY6UkoLOeVn87ne/m84ejcdT7/311c3HH1ff/e53U0qvXr84PDxMKRVF1nXd119//cHjRylJ13WT0WHTbKjIEkEm+Xg8Xt5c13Wd5zkipJSM0VgupQQMcOfs5Ory6Je//GVVVZNRObc0KvOzkyNmRpTQt2qXfBdUnwuqchIkQRAkAREkAWQWiUkTzRYQkIQAkohyjwaO0YAhx105GO6BmcLvMTEGVFOLPwa+oTo+RVFs6yH2NYIwAIS4rY9BJERAEBBmThYdAFi0iIigKDomgOhDORtVRG5hyfSuyMjl0IeUwhC1yh5Tcthp72ucXTG3JGOMEsS998H7PM+1TGlwH7YheuqNUdwR0zb1bZAImYksEIYUBSjLyz6GNsTj05OmaTZNff/hg2lmF6vlKM8Ojo+avgshtG1LxoxGozzPlbJiTMbMiUNM26xbnmvTsFYh28RBIBlrkE0IwWAQjiZYSwKEzBAj+5CQQTiyMCSm4c6H4qDBBRpeD5pvoHoO1n9YO+34gIiHh4dKUsM9LFt2xfuDy6QtrtRgdl23XC5ns1kIQX3R4+NjRPzoo49SSrPZTJdAO7jpJlND/eDBg7dv3ypTnJm7rqvr+uTk5Pj4+OTk5H/9X//XP/7jP14ul7/5zW/+/b//903TPH78uO9DURTnZ3estX3fzmaz4+NDIlByCRE5Z+q6/s2vf3t0dORcruvgnFssFpt69fHHH3/22Wd5kZ2eHk8mEzVTuyAnppT++q//ejabnZ+ff/LJJ1999ZX3/vnz5//8n//zDz74QNnnGrnxjoe97yAM/KH9/g6Kd6f0LoGm7cZSSpp93SviBtiVfeKOG2h2x951bg/nXFVV+uBUF9PgY79fUzYo3OHRDxec4raHDxFtjakIS5zOxkSEzFlmtVhWhK0j/aLhdoYb3F8Hs3foJoGdgz1ETMPGG96orrX3XYxRgJFgl0vcfhEaAIAueCGsJmNN4drMuTyr20aJX8WoOjg+MsY8e/ZMCZsiorlxETk9PdXLAyEtjSciLXHue2+MtdZ1ra83rTAacpwASZJsOdjO5WQzQStC5ArryiyvsmL0Tvb22Rv6gIfFGvaHBpDfEr/BEurJVVUpJU1XbT/a2cfWtBJPmT6qj6uqurq6stZqn6imac7OzpqmUS7bkJ3Teo7VaqUdNKy1T58+VR9PE18vXrw4OTnJ8/zTTz/96U9/enp6ent7+/Lly//wH/4DIq4Wq67pZtPpdDKZ394+++br9XJ1MJ2hAMcUfKfX+ptf//r0+HRcjYno9PT09PR0tVp0XffDH/7gz//8J7PZTDvKqBYkA7Rl8XNK6eLiou/7L7/88u7du2dnZ1dXV7/+9a8fPnw4Go1UDQ3aTUEX2KGgeuw/jkF5DSoMOeXWZIaQEwdvQHJrHKGBLXnSgOjrLZfSoP5HKIQCkkCScOza2hBMJ6OD2STPLKfg+zb4ThPuKECA+lp/WnpX2KEMG2ds7jKOkRXy3WX4tK9E37Rv31y8fv16tVr4rms2q3qzijH0bRN9r5WNVZGPyqIq8qrIo++j71PwWpYhKep/cddeSaEs9bC0h1W/O/b3qsYIzPE9cw5pvz8akozGpXaXefbsaZbZqirqep1SuHPnbDodf/PN13W9zjJbljkC+75dLee+bw8PptbkzhbW5JwwJRE2nMj3MQYgskS272PbemYxxgKgMQYFNOfpnHMuI7KAzlBGrnC2zPKRVX9mXzzMrtRotVqNx2Mt7jg+Pv7oo4+ePn2q4ZbITvkZY4zDHYxJRH3fi4AGeOPxWBibpjG7ekWtu48xLpdLEdFWUSrDNzc3GigulvPb21vNvE8mE2PMYrE4Pj72PhJR13XWZsfHx1mW/frXvy6cVVWq1ZnW2ouLC6CrO3fuNE2j4UTTNIvFIqT45MkTjun2ZlEUb/6n/+l/+l/+l//l6dOnxuJqvsrzvKzy1Wo1Go20edazb1589zvfe/P8ddNubt6+Oj09Lcry6dOnn3/63R//+E/++q//+l/86Iez2axvlr0PKXljzXK9zh12bWMIq7LYrFciQgiXby4OZtOubTJnASDFkBTJjASipQNGy8ri1usz3nsRVl2uh4hyuaXve+U5DICKcgn1KWgdwNZMpRT3ukjum46Ukmp6RT4066Nu4uD+hBCHOFOZDztojfWTVDWDVk4Y48gxgomCDLPJgQdadp4AkIQEwaAlODg7GxzjfUs7pLv0RoawBRGLokACZlbZ8z7kRdF1nbW77MsuctZABgBiZMRIWu+fOMY4nY1jYEA+mIyrqoLEHH2RWeB8vV4agylxjD50bbNeFc5uNhv2/aiqjHDXNrlz4/FYgp9fLZ1zWW4NmeQ1IpDM5FmVYzQppdKOwDGxjR1DpOX81jqDPhUWDdq27b//vc/HVebbNqaOY0B6v8fMoJX1cZ6enmrZnppjbb+3c+Jxf/kQseu6ELbdINOOLN/3PcC2y5PZFoamAePBHe2Ad2VQzrnRwUHT1tqv7dmzZ1o66JwLIXgfjo9PM5cb4xaLFaF1zhWF0dimKAqXFQDQtq2PbK3Nsvz29vbTTz/drJv/8l/+y2c/+DzGeH529vsvv7x7544z9uH9By9evDg8PDCIoe+vr68//PCj5XKJAqfHZ4v5/L/7V/+qX7c8F9c1Mcj19dt//Md/JE5N0zx58sFms7m8eTut8sm0jLFHIwcHBxzfsfz2j9vbW6VfDjHMgBVraPeH79rT5O8dg084OBf7zuq+J4mIKcRviZ++yF0GAByTkLHOWWsiYOAgIPp0RYR2SIs2z3r31He/BO39wxIALFKwmTGGgQBwuZyvN+uurb2PLARkARmAm2az20Lv/czzbTcjZhZR/U5E1Ie4vw78fo5kf/du/4RGTURktjsKCwFGH5gZEgSQThgAfNdx6KzRloPCMabQ+Q5TSpJ8ZtECE8eMgDJrrbHAEvrC5tZYh4aQxFhGZo4ShQyEtheR3NgYY7tqnHOjvBofFQAMHKvMlWU5LsfT0WwyLuf+GgAEDHN8J4S8l4RVRTiZTHTrpF1nYg0bREQJuMO7hueiSlQ16DaGTDDkKrTtpHL2dDvKLv8xCOHp6en1DTRNE6NXOyYimizRMOb4+DiENJ8vu64zxmo0qBWPdbMlxT969Oji4uLJkw8/++yzGKP6t1p+VdhiOjm4urr6T//pP//Zn/345ub6+vr67PxE/V7FWqzNmHkxX11f3T558mTT1G3bUm6stW8uX//TF6m+vvl//Ot/nXOKm+UWXiFBRAEevKZ9SQOAq6urIevDu654zAx/IIT7Dv8AaA1SJxw5SQIWEU5JRPSfnKIwIwhoty4RBCAUNIql6ecP2hY1uZxS8J4RxRgTow/BE2X7KmAQ6W9FicPWz2zOW27A1jkiskAm9pF2smQAkwinlCjwnr7YnU9q7lJKnJJoxk+2tObth7MwM/KWPmr3QlzFZId1I+N0WzFLSgyAJICEMUYEABYW6aMXkRgjpoQYSEBEiAP7GJERsSRASalv+uQJ0REZgdRFn5ITR0lrhQ0RJhGfJCbvKO99MERllvcM63ppUirK8vryjXOuKnN0Lvp0efF2M58jp5gCSmCOKUUre3Ed77IRiKgFjog4VDG+efNmUD/b9i3bnbEtjLDO7ZoFy1A9mOK2eGxnJLc0drNr9pjeb1cBAN772Wz2/Pk35+fnV1eX2iKp7/s8L+q6fvjgUdsu7t27d311G0I4mEzU7RQRbZwxnU5PTk7UT/vTn/zZf/7P/znG+Omnn15cvhGR5XJ9enq6Xq+/+OKL73zn45OTE+0sjoiPPnjw4vkrAJzNRqvlpiiKv/u7f/jxDz8PoTcGq9HI5FlTr+fz+fLq8vnz53/83Y+rR49Wt5dt2wqIIbPZbCq7SwCqAdmlBdbrJjOEYCQGFMmc1bWFbUF9Gnb5nrliFRnmbTKDmWVPYvchCt7rjcB7CcPBr5FdRmT4Pe11lYYdFlAUTvEXAEAUIv18VkxH3qv8Vli1ZGaJCREtGTKGyCSk6WzsAVph6byEGCNHjhykIDNc8L54D1TPwX9O24p1YWZAdbi22MyWijCkWHeOq4gAGr1XSQIgJAIGDRICATIgCAeJzBwNoiVsfEAkQ6SNok3iLMuwqqxq0r5HJJvnRi/ABxOFiAwmZ8GREySK3Idkk1datqVEhtkRJ9+vF2KjBVtYVxWlA1ovlldN3debyXRkichAkmhlh3QNClhFsaqq29tbhfiJqKqqi4uLg4OD21st5YJhB6gKxL3ksnqwupTaUk6ldyiY0EUcdsCAqqeULi8vQwhaJzHgE0P6/vr6On2cmqb5sz/7Z79Mv765uf3kk0++/vrr6+trtYez2SzG+PLly3v37n3xxc8Ojg5DCKenp/rkZrMZFDCfz++cnR3OJv/H//aX53fOPvv0e8+ePd3Um+ODw8PZbL3acIiT0cjZvN3UP/3pT0eT8cnJiVjazOsQwmw0Oj8///1XX96ZjWd5posQUyT3bhm/pdcQceDjD+yF4a+yo5IN0jJsTXmfljR4lcPD0m9Je/zB4YTtY43v3LnhJwAoOc1pjLBzN/KiUCuq56g7qo2ctsqXRa8SEYUFQGKMqG6gAANLEC8xCIyzDAA0sDOgrSgQgEXe9ZseLph2hD7YFbKKOroANq+stYACAGYXIOsWSgj7cK6+TmAFBCAKCidmYENEgAJMIAiJOUpMBtgQGTJgyRkgAuNIEjvk3EBmTWXHSpQ3iBmBIUAkJudRjAHnqCxNnjsA8Bl7TyGEvNhG6ZVz03K6Xq8Xi+uDR/dFUgo+9H1wOTprwYgtQ8eQYUbWorHqB6oIfUsVad8hremoqkpLrXY8xndjJBDf8dZ3rd23n5lSgoEastNVKg/69oFBMnhcl5eXxydHbds+fPjw7du3h4eHfd8rUmJ2pZMppcPDw4ODg6Zp5/M5ACgpHtAolKqklrIsf/WrXznn8ry8vr7+5JNPnj17Bv0W0z88vHNx8WqxuL1z5+QnP/nJX/3VX3Vd8+d//pMv/ukXFxeX9+8/vLmeHx4eC3ej0Wher7VqhAhTCr7rzo6PF4tFw+FoOqqqar6ouy5ZS5hwX2x2QY4ob+adbOyA/uFM2mOBKYI6iOX+MUSS+4L6LYu37wkP5++bQdkVpuj66++13qLetPsfPlwS7eo/9w+VJWSBrQuscs6SZLFYLNfr9bpuQgxAyIAIZCzBUE84FJGqMZOBCQKACkGJiPY6ARQV5gFD7vte5B11ZDiECZCRHKQEAMwCCdACsBAkbeQBJAaBCCxBUY220puMehkZgTNorTMpOmBEtCha5GWdHbltC2wlkJCBlHS8Sqc9rELoVVPM55ZS+/bqrYiMq2rscimZkdknDly3tbGoUNe2JTbtVfQOUO/Z2VlZltrNQWuC6rq228r6fRSHiCiEwCnsdNK29CmEEGHbutvspk0MmeU8z4cpNgNRq+887AiQfd9rq3OteAohFEWxWq2I6NWrVwo6f/nll7PZbDKZaAcATbIx0OvXr4+PTwRB/dKUUlmWIQRfx8PD2e3tbQj9/Qf3NpvVN9988/3PPj05PWqb/uDg4OGjB5eXV1ptmFK6f+8ug6zfrNvYlVVpDfV9v1ksvvvggXMOeu1Mp2wpZGbkd1zZfediu18RtQWQuvqKaorsLyZ+a2N9S9gMwZ7carsVARAEJNWGwsJRREAIwRhjB29lPyZEVLsESrhDBJGkcfhwPYOd+dY10C73KyKOTJJ3aXdjjDXOAc3XdfTBex9iCkBREMiISSgWAREJcPdFAsJsrdsqd5ZtgoS22c7hfvn9lRzUFsi7SqiIQEgGELYXGEVEkmTGkggIWyDrjDO0raatCjXjAA4AaIf0SkyFNTnliJhCDCEQ2sJmZNg5Kgp0jo0JiJg5wMxgNTZW21Jti2AlZvb+yepiISJVUU4mk+lkQgJtEsZY5hVLTCFEZDssK+3R+UQkz/OPP/5Yy7TVvTw4OLi4uChLx8wAabBvtBuKMFg55959GoBkWab9KbSoQs/vum40Gk0mE96lJTWNIZXVFoBv374tiuLm5kr7fFZVdX19qz2wqmr8u9/9jhNog0O1kMvl0mXF0dFRjPHi8mo8Hj979uzj73wiIutVfXFx8bOf/ezw8NCM3eXlZe9bY1AgqgH/j//xP5ZlaUz86quvjo5ODg4OLi4uHz54slwuq9JkRU5E4/EYne3axhlzenr6zTff3PvjH47H465rfEjOubwq1vVKb0Rxf7VmqlO08GxoM6OnDUOs9j3YP3RQh9/DLnje/82eS/Jt1rXswuxv+aIiot3r0l57Ic25OVsOH7j/Ff+tQ3YJdxQYTKs1bjSCJsa8D530feSUYmIBlC7G/c8c9JROaOFdUy/ctfTcpvKBY4wkoDxH2v11q+n2yhHFCIAB0HZViKyOGGSZwSQgkBvKHVmDBCiSIhILE24vHgWIyJHpu84ao01Bfds10lhriyzvwwIBQEgYtEPX1psz0LXBOVdVZUqp3iyD7yfj8tGjab3eSExd067sKicLzLnLXWV737ZtijFa4h5iFDHIbCRaSAwMSSZFUS9WVVFMy9F6XberJrYhQ+ebNoQQ+sgpGSJrjTOOiGxeKo5NgBjZCFQuL4zrYtJ844MHD6y1V1dXcdcFeLVaaS3z1dXV8fHx69evi6KgSblpm/rVsyKzjrBw1K1uoa8f3H/ULRZN2374/c+7yE+/eTk7OBTnkPnm5mYymdy9ezcGdsZm1pUum1YjOpb55VWe5xnRw/O70IV2vuq7W0tQzAprsGmS75OzJFDkxcGL118FeUtFNjsfz9u38+7F5Hy26dvYbLRCP8VYWMddl3p/UFTduq4mJSFaaxPKstmgpcwCIhNEFEZAAnZGDIL2ESOIvvMpxtwhEUTfZNbsQBwBiAhoDGbWisRdCzbRdg+KHLJx+4KmLxCBCDVGQARrDcC2GALJ6LnMPETvCNArpxS3BEvZ/gPEcoqRWQNCIrSMLkHsO6/uInOPws4Zm5El8cEzsikwyzJjsWmXb19dLxaLruusy9u2596v3t6mKE+efGTYvKHDIs9VuhAxt5aZ+75f1w0RIRnjUBR3QQIylFKGWFWTvu/X66UBmY7GRNuuakhiHSWyMfrESUASrhMPlcfIiAKYQFJiC2gFYwhtH4iZhBHFWnLOCVHfbNsjiUid0ng8rpt6E4JW7UwORjHGNjYA0nRtH/zQhFtRfe/9eDxmz7fzt8aYyWRSVrOmae5Wo6uu71LKDCDyslutNss+Bj0HTVqsVnZQZoNl0ye8XC6JrHY3bNtewRJEbLtaHU6zK7cfyBzGuK1Fhb1OaizGmPl8/oMf/EBd0GfPng2emJIwlTiiI7smh0dlnqXgtcslAgAnrTGv26btUxJWVpcirjqobOjemVJyzo3H4w8++GC1Wi2XKy0eTykxWwCwWY6IeVYaY0JiFyIYUldA/VVt02StjTH0TQtBABEzq/xnIlK0feuw7aRh61wJ7E1b2AHoIog6tBQHt21A2AdD9IcWZvjTkOfAve4m8H4stO/9Dif/V79C3s8lfuvYdZt+l4IzFomydlUfHEzzPNvUi7bbiKQYIcQ+chyPxyL8zdMvv3r6+67r7tw9v//o4ZuLy4PD8b37d8ajg5ub29cXl9533zx7WTz5M82Xpr3iD94l9IYrl11kKwmGdpiyQ01T2kISsHM6hze+t4AAoD63sO/7wmrOI6a+hRgdaeqLNfWl66yyrdMptNbxWx0hQPodB80Nj1Ur67Ub/T4B0Hs/Pbq/Wq0SyGiigyizyJULYbVa9FtbDnZ42O9DxsjM2h9aZFe1hViWZdstjTGGnOwBANbapmkQeZf94303Q9u/P3v27Ec/+tEXX3xxeHg4FEbUda39aYhIh54Cc5ZZJuzbOnhvAKwxArhcr4EskVzd3BblpCgrtK7btKcPzxTI1baLIfbW2rt3z3VMD/OWf2etJQLN5xhjjMvQGPQRjGWGvu+bts+ybLNZ31xd51VWlqXWEkIAmzmLxhhjEcHaRJR2vd9FRDNy260g/xXiJe4Ym/sCM/iHw7J/SzZ0a8oOsBlcL+97+IOIEfcqgPSj9uKL9wR7P7T7rx5DvICImnwi2gLUyuTUcorRaGQMth0eHJy+vHj98uXzlNInn3xwfHzo8izG+IP/8b8TxvW6RrJFaY+OZtZm9+6efHH17ZpV3UL76zMIp4gQGM1VKLPXbJt0JGMMIO/QnS3KJdtSJlDhZBESYAEBDSMtEYEgx5RCT9Y4Mpu2Tikpdq3fq/kw7YKxTx4EAGutvCPEvbuFIcdjjKmqSvt3aCBwc33ddZ11LoVoEI0ydV0PMDUWLZkin1nciwNhANNYk6eiXM0sM8ysA4ZCnCBiirIbcGGKwunAakTULwZB3BFqxVgFBl+8ePGDH/xAu6NqCTwRDYMju67TvvHL5XwyGjuDHFPf92WWj0ajqhwtFqvxZOayeHl1nRfd9ODYutzaTmlK67VhjsrzFkiz2eznP/+5cy4lSSkaQ3meAWDTNJ4zS2AASIQFEzOHVDddiFdFmanqcdksd5khAkmKyLldffpWGFAAeAdOsQgCAgnIH+TlcC9sG3wN2iHGgxb/Q6nYV/D6pX9owYb34q4rwrcO3BUZ/qEc/rcOZ60h7YUHacsyT8aYMs/apu66xloajSfWbGdUfPX110gwnRQHB9Pj48NyPAKAkOLt9YVzOQtOqrwsRmFSJIbrGxos3rDpdavoNhgiveFqdzIQAECjrxD6lHCr9N5fPcR31VQqhCAALAicOWcI1e8mIiBi5hCCoq8DRE97fN1BHIY5MESEe43S95WpiGjhkfakh53aTYElMViRmDiGzOaZM0i2Kg+bpo7JF1n+bvDi8LmIyABEpKCl5up41yTb7Goq425ksYqic455V/8uqIOmiqJ4/fbq4cOHX3755QcffPD3f//3n3/++c9//vM8z3Wmmq6+RgUHBwchhL5ryiLLbEEGRCSBlNXo9OxO04YuRLCYfIwsPgbK8icff7RcvlXKpQhro38AbtqNy4xzhqLE3TRibQhPMZGxrGNDBQQpcep8X9f1eXaa57lOJmFRgJsdkUFi5tB1AIIAkN7LcaEAIpIAoiC82z3f2tw0tDbcQxe/dfL/jYF693XvS/Xwe2XqDP/8loTvn/x/L4ckoN3yQUkCwigKmsfk+8zSZDIS4VcvXlxcvGrbdnSYHx8fn54eF1UeQttcL40xNssuLy8ns1melVVhgSwSoDCIR3S8x80aFoTfJ2zpb5iZU0xp4M/B0PcJcJtyHIbS7BZHbzltH44IQCIRp+MEQzQpkEFkE30ffffg0aOhKe4gVzFGnTlJu+JPHMpcCAlQPdhB8Wl6sO/7tm6AJcsyZyxTSill1gJnhMQpcYwAzlpCsoDYdnW9XhVlZoe7hf2c0p47FEJANPpivV4nbq21hrbtfZWkol4l7Lx54XdNfjWc03Y1Oo+BiNq21czeeDwe2hlqIgESh67tYevUNk2z2jTTg1hNptcvXorgdHZcVKNN3Qv2f/qnH//8P33JzCjJEqTQaVP3xe3VpCpTSol70NGFQRCxzE2P6JAYQQQYAY0zwAAQOaWU8rzQftAsgUC8D5kTZubAffICkBEyR0hsUVEodUC1heYWFv/WXtefQ58Y3qOzDHI17KFvOZmyxw3U9xr4tvRuf2rlu+wq3wUgsbDA+wTL4ev+W3Ko+SlELQ7czuchQiLI85wlrtfrm5vrq+tLY+jBgwc//LPvzec38/m8blaj0ags8xhjs1l9+NFjBLNYLK+uLkVwMpkdHZ7cu3Mmr1aaYgCwzMi7kkjtlyiS5H2Xvu+3TEnmLb0OAJCE+Z2wicjW+3r/nkh4C0Vtqw3FMFprM2PR2YjoA2SWrEFEUJDVEokAGpyOK93AzGy3mUNbVZWzOEwC1bAQ9iIy9e/yPB+Px+qU+s6AdUja8D4hgCUSIESxRpBYYrT7fs47Db2rbBhyMgqE9H0PGEW2LfJUVaj16/vemG0VXIoMAErS12TD8fHxer2+f/++pvWU4anW7/Ly0lqroAgAOEtt0yQdI+5svWqur6+tzWyWA4B1+RaPstTVm2a9Ikwh9taAiHRda60tclOvV7vJctGQqrdEaLIia2JKwpET8DYHjYCAnFnXdZ11VebcZtOSkVEx4hA5Re3EM3h9aAwbRERLGomhJsWRUQnO+9H18EZr3/XUgvexlj8UiWEXDqF1Gub27DCh4S24o33ty/AQYpBz/39ZQoO0deO2zxdEmFOKAgjSbNZvry7btj2cHdy7d+fw8PD1ywvmCIy99zqnZHowOTo6atYNWRcjg5Ag9l24vr559uyZMSeD4dqJFoqIpk/3HezhancrEPSFy4wj1/e9SNrl6/c8CFQlhMiyKzVAQokxGkg6OqogQINioIzZarUaRrvAziZr5/jh6WhKSTEYzswwpFnhHL1arfIbCNLj8Vgly5GxGQCRIwRgiSFG7kOXldloNLLWoIT3aBm4A9mIcGjppc9eCym895NpzrueQrJjXROR9z7PjZJRIiWN+rz3J3fuzufz+Xx+fHw8n8/VuI1GI21We+fOnYEMoFWFRZGv15sQwmg0Go+mXes7H1abGqEdjSZZll3fzIloOpldX19//btfZyhd6Lf+QL1y42lGxWqzzMwUUp9brMoxETVNE2OPmGLElGwKKIIpBZRtTwxjsOu6IrdIkFLKXVYUed+3yYslAmssMBFkWSbMYq0ytpGEGBGBEAnf8dr3PUN9kKrRBhH91rIPz/IPj8F4bsk3u5Kl/ZgKERWyG97yTgj/W9L23zi0DUeMmiAB2XUequt1URRNW7dtOx5XT548Kcvy9nb+9MWLoijGk6qqptZa50zmcmGLIJzI9zoP1BgqQCgGGjwCtWm81zzlW8W+sq0eBO2Zv9VWJDvGTLv1muUdTxARaadqgDR3AwhCIpvVqspcljtAjswkbIlcTovVUvGOYQUUmJnNZvtCMQSrOo8gCUBioB3VHmDdtHmeI0vyIZKhEVhjc+tYCGg7sx0FQgh99E1Xu2SsNUWRhSjbTl4AYK1NiZumQcSyGGVZttk0tCtLSynpqmm3D2bRFzokNaWkkyQ0Ka+sF4Uly7LUpL/S35RWNjCttaC+bdt3A1CNVFUZQqzrOs/K4+Pjum4vLy+Pjo5EBFiOZtPMke/WBOm3v/5Fe/nryWSiTdYcudRvbppl2/bEARGzrODQJBGIyYgYwYPJRFhS6JgBGAxuya5d22ora4NSZnkIvu87YxwYk1ICBIvAAL5vIXiDpIN+OCbmmBkQ3A7rMiZLu8YT+7EN7MovB6EaktG0O4YQYDhZa754x4D/Nq6o+1VEH98OVNum4AmRjAnbru/vObrD3sJd8b7sKITMuFrXZGA2mzHzfH7jnLtz53z+5c1vf/7bxEFHL15dX79584aZZ8dHxhgRC5xbk1nnCK0w5WUlDOORrZtOUuZ7WS4XvhdEyTILAEQ6FVN3V+z7ranfqRJgTlqbO1ybD526XTrTN6XIvJUQ3LWqMwZj3HZ8tGQAEZhjilmW5YUziH3fBo5Vnim9qCoLAOjaRvNnVVU5awihqTdbxhyANQTOikiKQex2Wq7Z9YLhHbklyzIVCgVKtr9hA8Axhab1NrfFuEBLXeo361ooaXNxq7tfRdEYN51OU0q+D1VVFcUWrQ4hKdUQERWG4fROcxuznRmg4R+9T2LW7nQqk4PhNsZoenD3CWbgyoHwsEcnk8l4PO37fj5fAkvTNBG76XjifbdY3EYfRlX1Z//inw1CKIJauLTZNNbaruu6tu/qOoRAZEej0XRatn1iYWBwZMlaYQzBhzYaY5BFUmLablDVsgQsKQoy4xadEyLZoqMMYLboKChSAPtCsi8z74MH2xMGs7lvOQcFvJ+i2LdUQ6C472fyXqpDD33XPmq6b6WH7T6YHdzt/dPTUx20WFXF/fv33rx587/95f+n7/t79+7duXNH1ejR0RERXV9fI2TWZC4rsjzPstxZZzJjjPE+JuGUDEKBJgdyLF1i2/luX8vAu+qNAne1vO/fguCOcWB4O6s0pW0/KwAQSCwMu4o5JJKUgBnRAACBUpsNCKMAkhChAUJJzNtWUYNKgj0XRvYycPuqc3go+6uq8cLQZ0Sb02iCMfap8z5KLPMqQZwvbiNIgkTWCCq1Ba2GmLDthyk6I7betJvNJoSkGTzEd22aWLRv+buGJeoVqBCqM73Ph3z69KmurCI3tIOGBwap3r+6u2ppNUvp++1IDSKq15uubogTSAgNO0PHk3w2Pjk6OPzux8daPyU67rzzmeGM5IMPPggh9H1Yr9c3Nzc3Nzf16qbdzO3hnZQSImUG87wAwYa4TRFAOMUArNMqCS0gJwm4I50DonVkjAXC+C7bzpoVlsQi2u3vnRgMUoe7STiwxzuT9ysJ9yVQdvn9ISAf7NguE/seYAN7Xu6+SA/vkvePQS98S1mIyGx2dDu/9t5XVdHF8Ox3v7m8vOhif3R6dHLntBiXN7e3bduPRiMim1Uj68q8KMvRqCxzZa2RRWNMv+kDp95DAiIwiclH6QN/ywnXK/wWODl4BIgIyID7Dvz2T9YqLLHN16ekk8kEaRh0lVAMEBMSQRoSSoaIIAGApJQ4kXtXSER7lMBBx+n6w676PMWIAIa2FF7Q2hDm4L3oOBaiPsa+68qicNYai6kNgkyORGRTNz1Hl1ubOQYEBkTZNurCXTSo3Ss0hRDj1iJpabxKl3K+hXEQJ5EteWfQavsR9sXFxXg81lntuBuolnblhd573PEAt7cagoYHbeeX65W1mfbCsCCzUSEhhHYzPTr8/qefnB4eeO8dhNj6br0drxVjCiEQx8XVhXOuKkeT08ODUX40KZumAaDny42RXdUZApDNiEzp2s4z67RZq85/gsSccmcjs7AXABFLIAwASaEZ1PZnIEPi/tv5Bt7V76Y9QGU/bBu07H4QOMjGALeYXU9eiTu2pwiKwBYOBQRE0Yp4UIDIkjFkmvRecnn4in1XindV14jYB399c5NSQnPw+vWLX/zyZwcH07/4iz/XgbUvXj1nBmbo5rd5Vo7HY6HcFkVRlnlRkAElsANgAI5JkjALJAHfh7rp6qYz06kumAZ7qGE1Q+JgjAEEEY4p6MUMgpG2JRFxZ7KEaNtqUrYlB2lno5JaCEQUSMg0GDqRBEy715xS5JicMyxsQAiBhCUGANAOPZAipN0kKkOQIHCKvI0jhhhhsIQxRrNrESQiurfROJvbJJxSMmhdnvmem7ZHvSMAALDaOcIYUxSF99si9Lt37x4eHs7nyy1DD2XwgHc7YEi+JZGtKTPmXcf14cyjoyO1hIr06HUrFroDALYToLYZSCKXZS4rEm/6PszncxTg6F2eEbKzcng4efLg7P7pzIH0i3lEUTdYRFDIgpCRjGy7mW9SmgMNnsPBuCzL8rrZIBgRDLFrVm0SYygzrsAtSxM5JjZGSPW0yYwRFuMxgkjihNpoNhKhQVXDiRlFy7Pl27jLvmYdfrm/PrjjbeFewn3fZH1LRFN8Vzo4/HL4/H2TqDtYOxjum53h7UPlmiIiehmXt2tErJv66Yuvmqa+++DukycfFONysVxe3rxNkc/Pz7OsWC7WImjzLDAKgRgBC2CRwGhzKOscYEwExCQgXeia0HapD3uTpb8VVg3XP+xvVSUi4n033Jpzdmc/cQfgx2FBCDQjvzMJIAKIBIYIOSUI1hgSBE779h92ePXgsOCeo067ahLZVZlpMK9SN6yqloYT0dArNYTgysrlLgXfdI04Gh1MI+HN25X0yCDCKAq+D1w4haacc9qX/vnzl5eXlwrIqkpWl09ECN+NTCMy6tDuXfe7/TFYAJ27pC6rao7Bs1KLui2rZ0+0lcmUYtd1lrDIHIqs57cns9H3Pvn4g3tn3Dfr1cLElPrAkR1RURZqq30fUkqz0VTHNbZ+O+RIRKy1ZydnxjhhU7d+uWrqto2QmJnIERBvnU/GbaWkRUSLFIh01ntCIP52Da6IZvATsRnEabBp+1L3LaM0/NR12A//BudC9oKT/bfsX8OwzvJ+Q+FBsId1HmRbt4iujO4nVYvz2qeUFsvbvq8fffDws8+/m2X2xcuXV9eXzuZFUWyamjd1ilKWo8RCFsGkJFEw2iwnIgCOwlnlTDSUOEXxPvbcBe5xx5L8Q4gY9jzAQSSYmSzIXpf7YfhSiD2JttV8j7gn70Zq75hrBlGIDEpSi7ol+oqIMpf2JW3AqAaZVCLx8ASHWHowIYqVKOqrYqmx1bZxhMGyLAW57RvIbCkTMBRYfO8ZMKUUGWzadddSdajPrGmaoigGyZa9Y/tE4V3DBUV41CXY3oy8W5R10wxokgbfQ1cL1Rm4NzdbRFigaZo8z53Ls4x81yeBPHPsO2fkYDI+Ppw65Pnqln1/dDhbxcgpEYo14iwAAyEDSdusfd8agumkJBq1bbtZN33fQ+iJKMttnlWFy5Z1X7fBR06SCBHQ6HQRECFrDRnmLX0J49ZWGMQhSwuwNYHvjBez2auy2d9YfyiHg1wNdmz/YQ+PU4VE/Zw/tKWw52TCnuUcXgyfMHQqYWZtf64/dT/pc1lHXC4XZZV/+tn3P3j8IEl6c3XZ9U1WFEVRANBivQo+jUaTSZ7ZPCsnFREah2TBWkKLKYGEBJTQgjEklELX+9BG9mjA7LVRU6ALd3nOwTXY4bS8j2ztqycRUVRZZ/furSeEMDQuEI3XEC0RWKLILJIYAczA9SPZC4kHoBjfr3G39h27E1i0GSTHFH0wSGjBkhFjRUQSi0YGzNonUiRFiUREzvoUV5t1BKiqql+tE0NkiilZZtZEn6a2EVGHSVxcXNR1S7s04LB22/IF2AJrMUbmbVofBwWDZlBmx8fHOuTg9vZWa9jUqOr4F8VUNDjU3TAuy9b3jJDnuYikECVFAhGC89Oz87OTvqlf31zGZjmucodyOJ1sNpsQAqQoSCgps0SU5c4RcN9tk6qOCFksweWbl1U5ns6Oq/FsMq6AsiRN2HTAghYNQhLYGnMjiBhDxAGIE0ER5ZHQtiRINdI2Rh+YxIN07RulQU5x79jfQ/siKrt2r2mv5/Jwzh/+1EzXvrnbZjXUgoegY2q6rtNRFnE3qTPuzfELIdw04ejo8PMffP+zz74rEJ8//6bruvF4rOSHGPn4+DhzRZYVo9FkPJrkoyKlgIjkCJ0gQgyhD10IXsiAEHMKyXe+jckj8f4iDC9g17eW9+jjaa9nx55DuAUdYorGGOZ35cUADLCNvfUz6V2967YrsbCS3QBA64Zp/xpgzxoPOnQf4BDZUaMAZFf+PiiU4QENiJpzLlgQETJYVVWfuK5rdG40Hq/qRhBYkAQsGmRgESFril242dY1CkBMAmAI8zyjCBDT1JXXkBAzQIwA6CjLjDD3HIpi20MlxpRlWOYFMzdN0/k1GnDW9mHjY304neWZvb6+nlQlokhsY2gBwIoYEkHYdGvnLJi07JeJGY1khiRxFvCDo/sHWLroesB1CMYdzu2Me4/ZyBSEBAlALKNI4pgwJPKRWEBSClGSLbPZqDg/P3v58uXtm2/idDaZHpyOZgelu856L3C7Xq67hK40tgiMEcRY22IGkgCIrKEo0ncpJeAYebxsVliVxjruehAuyKFAKqMzCJB8vy1pQWs0lRclAYDNHOzCG2utj7hcb2KMVVWhlb7vACDPs6ZpEnCm7eUdEEkS38VQGmTmwDFFVtsYY2z6zqTtwOembQGoKArLKbUNJdpsNiFJjHFdbzwnBlm3Xd21XfA+BCYDhIHTum/btv3OlH78Rw8+/6MPEndfP3t+fXNlrAsR13UDQNFzntOonFow/YYNBxqfkhMEhsjcobMmiz71YtHVdW+yPImpN2HVpkBFdE5iop27DtpoUGTrAao9T0kArHNIlEIQ7t/BM3HLq0YAwyA+KkxigABsTJB8mhRjBawTSBTmxH1sNpKmVYEZYuA21Chm7BywD21D+di5HLaYolhyAACMEsWAscZmYCmCCJuEObqeODA4Z/JqlAATi48hz3NIrOADQSLaNUKLMWbImKx1gMYaIUDGKLGd5M772MeYhO0gygCIhCKoVfMuy5iBmWNKFAICEZFB3OIPahZY0q5HXdt3uSusddaStRYIY0wxRi9bzaR6XafGxhhDrw2jWEmAg/YKIQiCMUS7Rg64y0keHByMrK3Xi8jh4OBgMpm0bTseF9sP3zY810dgRCwBZ1kWow/9dviWMcbm2Wg6cb7YMveB8moym01uFptRVYLl1kvnvRBLhLbeGAskQCgICCDaBQCjz3KbZZnNnEmJOXFSshc3bZsza4SQQAREQtAaSGZOOl1ExKctIrVarNfrZUpp1PdK3UKS3Od932VZlnNuzHvueqJ3LBZjjMnylFLf+5Ral2UiqMWfQzHO7XJRb1pmjpzW9SYyC0LdtDbPYtsg4nhU1k2nI8EfP3z0b378nclkcnl5efn2mpHu3r1XN82zZ89Oz+7ArkWA9z4rq9FofHJysgF9vqAt5y1u5+uGtM2zxyB9CNtG/vzOO9j3CGhHWhgcbLMb2MZ7qbx952I/lh72j5qBbXkTbkNiBCQgUTKmvDtwz6Xfd0n0i1RLGsC0ayKRUpLELnfMyfvtQGwkAOUMAgCAQRTZYxQKpJTAkCFnMpdYQuKQxIe0c0AkJbHDRRAZ7e6onm1pDFIS5iTsUyQgi1aHrWl3Ld6dLIwikFgSJSKKBBCYk7qHVnZjD9XR1yY54/F4GRbqFEncBl1DJCA78ioRgTAkBgH1iFCEiDKblWWW5a5PnhkQAVGSiAFBJCVmx5gyV2RZlpKO9UD12ep1jUhlWbLgarXa1O0osHVFVRQ5ZbaLPqyT74Qy6wCMNeTIgEFjkAVsTFGAWSSl5LeCseOhKqpnMAoIS2TxIWnHjz74siy74AeI2Mft2Oe2hrquvfebpiUi7zvF38oyz7IsLzLFuzWoY47ljuafUjLbelNUfkbb+T74EIIhR8Y1bQ9t33a+7Tt1SjvvGUQIIwe/CZAYCNtN3XbtqMjvPbj/0UcfffDBvYuLi5vbhTGGBd6+fQtoHjz8wLncGNP3Xu+1Cz4LQfcmOGMMWUJryRpKbKy1ne91k4SQutb7PjLS4G7vQ01q6OKO2bPvYyMip3ftZAZHXXasN9nLtQ54iWYoYPcuQjIoKXoSpm0XDEhE2p5+P0AgQG3tiEMCc1eerhK19Wt2v7FkdH5OCtG590rJEBEFgFADycxmZVm1IUJMSXwInYYA6u5aEUFBAiJAwXcKxseYQIRQCBmBgQEZQXLrtpC6XqsQE4sIIXICzzrgKVhrHdk8L8TIUGisSUIiUshHVzC9j0flmSXlRyRmBNqS5UUbrgXm6WSSFa5uVyGEsix3HTs1ZjDacAnRel9bS7k1iJhlKcaYQkwp9T7GJLm1jsjarO26erUMsrr/8EkfORiZFC543/QNEY4q5zlSQiJRIM2KCBm2KYTgU98G4xAERNEoFiRbMFDwqW37rut8itq7JSua7dQEYQ3ANGbg6JS1p3bAh06Xazodq85S9G8ITgraRh0pimJduoPvPXywWS5Xq5W19vBw5IpC5+TkWdmnXYNJTiLCvB22Xo1GrQ8319d5UXz3O9/58MMPR6PR119/s1qvjTHVdFY3jV+vJ9ODR48evXp1kecFkVHPpm1b9uycy04mFo2z5Cw5g0SgfVmEEYA4Qdv7vg8xCjqjlSZDlMW7JOqQ4pIdf2OwbHGv6H4/iv6vGsb9SHKQQ8JtRgK3USEnkESEtGXADJI/fObWt8KdVLOICOmov/e+MXFiEbFbT1rj0l2zEKOqwSCDc3lRVH2oEdnaDLhBNERICGCSJdgHvklAe9NhHzwzEBE4BDTMnBCIhBT90ZQMKKEBRYi23GXU1STasr+MmAE9DyFo4aP2UPiWR6EvxmUVhQNLSL0IEqBhVA5NSokkIlZ57pIUZG2C5FyOiFuaC2k9H8vO/ds0Xehb5TqJCAc+ODndLFcA4JybzqgoyxBSaFqOXd94g+b8aFoV+eX13KfOcZ6SERHRShdOBIzOkMnAAAAETglYUiQAnV4SIjNz23XDxMU+buswO9+HEBi2WXjded0GYowC7yZAAIAALFeN7LqYkXnXiWujpaIpKdWpTEnZQk3b+RDJWJflAti0bV3XddMEgbpvVYabrmVmIGzbNssyTglSPJiM7969+8mTx7PppGmay1dvjk6OyZrb29uirD77/g+S8IsXL9br2vtARAQmcwUAiNliUWTYOnSWLKlHshUDYfACfR98zwzWYf4H5Lx3x+AH4Q7O1Rf7juK+qHzrGM4hIiCEXWEFM4NyDBHhHXosKalfL8wRUQxZRGSdTrd9Aru6YU4sUZSSuOvUaMkgiffsvbdkjMtEBCSJCAoIyja+Q7QmY+YsKzJXxLhOgbMsI7TWEkAERqZt3lMLzQd9YBAlCoM2ijQGtn3VGQg5RLXX246uAsIiSRgAALPMTSaTIq+0ikIzs1vvGVFEdDvGGM22AGg7OHowjBllMXGKPih7BW0SEpa+rauqsJzqek1WxtMx5fbN1ZtqZIkIGBFZBLeeoSQBYo5d55umNSg6BdEY44ppSNzWTRQgorIsi5wRcT2f+5iq0exgXExHFUm8vpmHzU1enQKqk8CIWrwkBq33fiPcdW2MPoaemTnEGL1PkFLquq5uGpW0JBxCsNb2MWyZH7vUjmPqg+ogirBt4AkAkIIxJsawTVvZd6rNsFL/gBlBKCeHrjBEN4ultbaaTK21rQ+NfrvLWt+3fjviS31gFNQs1nK5nExG3//edx8/+iDLMt82BuTw+CjLchbJs8K5vPN914e27cvxyJBNIQBSlmXO2jzPq2kFJMZQbo2zhMASZeetUeTUe982wUdBsEg60utdr/EhGQh7Gc6BeqL4pNnmG95J4JDVwPdR6MGxlD2JBQAWBmCrBBsmQB3XyYzbTomwI83hNtcPAMAh6mVt/T5tJ8OCZQksQqwpAI4pGQbIUJJoIIcCu17JBtCgBeIir7Ks8H3wMbisRERLBgjBqkiTfd8iEaKgNRnaJIwEIsKg8TEBCSniZwjRCFIU7ll0k6bEkNiaLM9zRNP3ISUv8G7teI8Cq5lGay3gO8J3Sin0fYghxsAsYChBSIw+bqdYgt+RPCRlxtrMKUmOSJiREEWSSEKRtu2cswp1MEdD23GLddf1IbEgpBT6AMDOWmtwuV7neY7Jt6t5WY3Pj6YQ+1evLrL8YFC3hKzWKUq6rZfGGBHu+ib2fpeKi8wEu1Q47wrefQxlWcbIUcQYIyBBGAGEBY01O8Jk3JXMMcc8p5BSVOeKDTEAQmJOQAYBkISQyZB1Li+std53USAmMdvCbQMoaLDrW5+2Dp4YskhElKXgvS/z7O75nQ/uPRiX1Xxxq1hOVY3Wm41z+fHx6aZrv/rqqQCc3rlbFAUn6TpfuOzwaOZsDlsoQawR68gQYsLA2+SHiMTAbeubrg9BwGQINkWvvsqQcoC9rADuEV+HWJE4DUIoeyTPtGtkTjteGw8jHxEGUQQt6dUWISBIxLtIklHofWhHY0IRIYG4az+s7zWAIJA4lZkJTc+BVZA0USkpCYEkHUIvoA0KjEo5EphRURV5mVLyPpbMzO/lriyhRe08AGrhtkuQZVnkxMwC2whSa2UMiEGwRMbYKGwEQSRItNbVofNd37dt7pzmP40xWnugYPqg81QIt0uD24SMon++awNHFRNCHUnLEMPR0VH0PrTNqMizzK7X64p4Oh13rRJNlPEgKCiCIknb5ufjsXNuuVwu1qubRUop2XLW9T5zziK2bRv7jp2TxIagLLKU/M3lcnZweHb3XpyMLyH6zRJ2BCsRSaxMg+BjIMKUUtc1uudYYkgJowEABUKMMcAcUup7b4siioA+ZtAAUoC5770GfiICZDSGIXFdCImFAXXbJUSFVS2RJpcAUBCZCKwlayfl4XK5XK5XU5xOp9OsyNfLVdM0jQ99H8hs8cZk0JAg4ng8fnD/3uOHj6wzby/fzOc3ItLXG4MgCIK8WteBU1mMQorL5VILtZnBTfLJdGrJaS+jHMkSGQTa9uDdTYNNECP7PvZdjImNNQyUmIDSt3xIeR9cUZFTJiMisu/3fdFBPtOuB8zwOe9SqSqE9K26LVQhlL2ErQgIJBFEMQSoHWkIdKC9oqJskZzVMd0ppe3An+iDM9uaJo4xRp9Zt20nxYKonTc027ntiZhlGQlxTKLde0CYBVlEwPKWhC1qKBInffdms6mqKsvyzvcxBCIS5q5tpzGVWZ65LMbku9b76EPsvC+KKiND1qbgu6ZFQwbQIkUkLTrue51jTDtmzDbrqvxj2SF+ZW4cGBbxiX1sDdoiyybF1FkKoS+yPEbfNDw6mCCi9z4mgpSIyBp0zjhnsyyzBvM8P5xNYozz+fzt22uRdHp6enh4+PPfPs1yW5aj8biajMfJ98JRUry9ZkvALM5SDL6v10cH43/24z/+T3/3m6qq8sxpqts5h4Rd15I1b99etb4vy5xBdDjUaDSCd8OPrSBFQAY0LgM048mImVf1RuuyAaHpehKnII1WxO58LY4xWpMxKTEqL8ttr84UkYwBgKLMTo6OFcxwed40tTEGskKT8syszXK0brMsy2dPvzLGTEbjFy9eHB0clmXxL//lv3Rk/tNf/V/GmO9+8slicfuLX/zi7OyOdh5JSZq28zFMDw/OTs+/+PnPsix7/OjJZ599dnxwuFwudd9nFlMK3ps8c87aFCMz+z72PgHA9e18uawnk6MmyGq1mc6O27CQPZIK7UYwDCJEuyrHbVlcfEdAlx2cM+xYld6dD6LMUkfWZFlm8wwQvfdNvW7aBnNnEZx6IhFjjAbRWScMW4OlwpkSKCMlRBFBFsZtAE+AlozvWo5hQMskBREmsmQQmEhAKGk0qMQag7Sp6+Vi8ejJY20lUde1mkoAYSIRtrtoEAC0/eY2SwNaIBeTA0BjAMABOGNOp+PpdJqVRdf5xZI22GSElXNgbIicUpIQJQuIGUhKHBjfURBU9WxVEG51vArhAN4k30cWhm1HdMaoEK4iaXnhCKzNyTkDBMCQZYX+NTFDRCJOiRHN3bv3r64uL169atv6yYcfTqfTzXr9+vUbk7nlYlUVpUi5WCxKZ8ej8vrt5ePHjxeLRXc97/tWhyWdnZnDw6NPP/7wzZs3r14+Ozk5+eTjJ6/fXNzc3Nx/9PDizRu0iEGSsHG2Go9CSmLICDGzziVDQ1u/SMtHgldhE9z2ZcuKHMM2JNaqU9qWhPY2c1mWqTSKSOTkDNnM+eBB8OTkJPownk6Wy+XBdKLEmsy5osySD13XpBQz58aj0fV6vVgstMckIjZNM5vN7tw5/7f/9t+G0H/51ddNuymK4sWLZ1VV/It/8c/rTRhNJ3meb7ouAnDXtk33+s3FRx995H2s2+bnP//5wcHB0exgPBk55wLXzlhCJEFh5iTIW1b6el2v12sfxIKklHxIre/fG3G4J10Dc1r2yJy0BcphL1x6j2/9LfO4r82l70TNY0rGmBACEFoSQ6RET2vRWpti0GopAgbgpKMBmELotaaGiBBQXRTQ4lGl+CBqfIuIhMDMkFhprjo8BhABIYRQ5oX3vsxyBFbGUt+1LssQwZDANnggFBbeVnMYInDWFVnuuy7FkDtXWBdjNGRGeXZcFAcH07IsV5s69R1EF41jIgbs+tCGKJyi95ZFOGFkcPsJUmEZOjRu6020cxHvampC9FFYkNTLkJiYEjnWTGNRFMagdWgzl4iBPVGmFBZmFmHEhBhE+KuvnlZVcXx6upzb5XLVtt1oVD14+LDY9KOPPmrWm03dnpycBN8tl8vDw+NN07RNj9YcHBwpivv27VViOTs76bpG5yePqqIsMuZYVdXp6anJ3PXtTd02ITIYQoS+D9mu1dU2ttnB630MUTgrcpdnGEk7RGZloelWJDF2W9KlQC4ROWeIHDN3fZOYRMQ5V4zGOpo3xqjjeqy1y/V6Mpls1svlqrEIVVWNR2XbtvP5zXaydNMeHBxs1kvvuz/6/PNPPvpouVy+vbxoNquPv/PJ8eHB1dVVu6lD9F8/e+6cK8pyNJ2cn58fnRwbl4cU58tllmWZDskqysJl683m9ubmk+88BmBOECFK4r5pvdeaDFiu1pvNRkwFAFE4CseUjPmvVE4O0jXAE0Og6Oy7+knYA2D2iemDWCqWo50afYqsWWVnMudSHwGQiAwYEmtSwl37Q9y6hTt5VlKbgDquQ05/QDRo+1h3k60JcVtilkREYRuRXXFCiOPDg/V6rbig9x3H0PftQKHVwVAW2SFHEBIRBDRgCOBoOruNIXCqbGYM9iKGqDTOSjTRYyDLMUeIBiMBI4J1zpALzidOwftdh4XB+x9+7uszRER493u9T2RBw4Ysx5g47vSNNnsnY4CMIYcMhBG3DwuRyOqixSQisayqEMJ6vVpvNpnNRqORMabr+izLvvrq6fHBzFr7xRc/m00n9++cP3vxLPTeWjsaTfKi6PvQ+dVytal7f36Gp2dHH3/y5Pmrl7/5za+q8ejo+ODq6vLo5BitScKd77vWW8qMMUmYZNvHdptU2AlhTFvKi7ZP7/s+gZTWtrFGRBRKIfodO8Qa1M4sAEwoBncfxDw7nHVdN5/PtRxsPB5772+vr/K7513XNetVkbuqzEGga5vlYk7FdFxW9WYTQ+i6bjaZ/OAHPzg7OfmnL34aQg9Em83KOSpHJRlcN/WP/uRP1+v1arNumu5Xv/rNum0EIc/z73z66e3tbd/3uXNnJ6d37tw5PT09Pz+X5GOMiZOghBCaTdM0nfeRWbTlkSscIDKzMegyk+I7IRxEbvBC6Q9my+n+GcTyWwL8h0IYY1TMTwOo4UOMMdYaa8GqwVO6LIu1dnDWAEAjQ2TZVpnv1/gmrdwHRNEWHCJiCEXIAMYdOgrIoIkGFkRABGvNcj4HlKoqueEMnettSkpAN4RgrXUppaD9a4UEOKVEKFVR9nnRM5TOWjIUGQUyQG5rT8i+jyFYiaUlz+JZMmOKLK8A2t43bd+FiIjWZmk3RWgLKMN7oqgrtL+y2VBkbLSrR9RhD9p+N6WECMYZIiJIiBhjQkTnMiI0qN4fg8jp6fF8frtaLazNxtPxaDxeL5dv3rx5dnnz8OHDzWbTtfWd+/fY+5cvX84Ojha38/F4bG1WN33XdSbLKaMY4/X88v69h5999r0udL/+9W8fPnp0en7+1dOnaKgcjU5OTgRhvaoZQS8mttsGWZr4gl2ePQrrMx7kE0ALoOKOrxxSAmN1RNmYSIOfRISjUaXNZrQWwlrquu7u3fPr6+vj48N1V1tLF69fl5k7OT4kQt93m3UnMR1OJ1ervigK7z2HOB2NP//8s/Pz85fPn4vIZDKJyS+Xi7przk6OjDXz5XJx82VIsSgKk2e9X61Wm/O7Z9/77LOrqysdv3P3/Pzk7DTPc2PMaDRq1qsEzJxEMPnUdb7v+5TE+xhCIGvzPI8cIkfjLFmM4V1ufZClQVT2LR7sWOzv9PUfvEv+4AghoCFjjDNkt9Are+8tvPuW3Wdtn8XWZDHT3hB49T4HydxOQBZJuzJrEQEZulduc/RqUrd1ppiYJcsLSaHrm75tjg6mQOJTJILOt7oNLJEV3BYeASECYhIB4SgQU06WXGYEjUBBhgBsEkoeghEQEsgBjLOGxbAAgXMWXaaFEa7tAydEQmTaNTnfFzbeVZQleOeKEJElDJQUUVZo2FpblaVzjhBTCsZYYzHLMuCACJo32nUsRxRhjszx+vp6PB4/+fDjxfxmPp+/fv3aew9kHz165L3/+MMPJ+Pq+bOno+Pj6WTy9PdfHhwdTyYTYfC8Ikbn8igc6hYxzRdXjz/8+NNPv/P6zcWmXk26KQDU9Xo0mZweH00mk+ub+XK53LRtSh609MkaIiAC1p6vMRoEMATIMXoR2TU4ClohSqTwOGZZNhqVZVl67/u+875PKWW5LcpMRBKH1WpljHEuOzk5+fJ3v5mORwbw/8fafwVJlqXpgdj/n3OudC1D60gtK0tXV1frnumeQcNmsRhgbWcNtnhZmHEfCBiMLyQf8cAXgHzh8oGEGWwAcgcYLGZmZ3pqplpWl+ysqqzUkZkRGTrCPVyrK4/gw3H39IysbgIkr4WFRXi4X79+4/znV9//fdlsttNuUoaUEkbRQMOgikexEIIRIuKYKHBc5/Kli9evXgs8b2Njw7SYTzGXy8zOz/mDfhBFhGp2Quz3+z1vUCyVSqWSm0path1F/JVXXjk4ONBCHf1+H6TKZDLT09P7QSxjKQiCEqCIrosiEk10QBnCkF5IMNPUn/2URelHdG9wPGI6dk1jB3jKPeoawfhP43QGxvPTbCipHUdxFIcWRQpKIJEg6XClMYNQQlAIDSuVGjZGlMZmiDF2kqKuFaGUOIhjwzC0eKoc4UuHmwVwlDqJAqWEjkhNk4Wh7zhWGPn5fDaIA8KJ7bD+gOJIt4TFeqoa1Gg2EkEpVGowGICUBhDgAlAZenhSKAPBZtS0TA0NUUJalDJGQiERpEHRchKu63pB2B/4YRgOSVieDSKOlCWfL9GMjRBAaLAoGKBvgWmarmtreigdVziO4ziOisaOdDj8MYw+pFBKuk4iDEPPG/iep4UQe72O53nJjDs9PR0EQafdTCQSDEmtVqOm5Vq2BARC3EQKjTgK4yiKJKBpms1OO9k4KU+VX3nlxubWdrvdpmyYfhBA23IdJ+j1+iCezTExypg5FAzR4OVxkyOOIsqYZVkaDG0aVEqpJDcMw3Vd17W1k4mjEJRSkmuGIgIolQApGDN5GNkZBxEtyxKSI6pBvzc7Xfb6/TDwJBLbMU2k/qDf7XZNlh0MBulkanZ2dmVlBRGfPNkElIVC7uSk0u1iOpPqewMp+dTU1NT09KDBAGm91Ww0Wplc2jRNz/Pu3btXq9UMgxYKhdLUVC6TYYQGQbC7t82kCUBAciSoFAiuhBBAqQClhhAKLoSyLIuYpucHDJ+TKz0VWI7tc2xg/Hlpt0lXeeoMurbnOE7E9Qy6bqRRSankE20MkCglGXfnCSGj9s/IrSnQ/Ktk+MNkVzOOY0YoMJCjUWOdBTJCNY3NMCIdKdVoUuDpuVkh4nQmeXyilIiTmbTWltRnYJrbYIxYRaQAAhH8/sBilKJSsRAomWEwJFJKXf13HcsLIi+UQnBC0KBmzCPFBUhlMiNp26Yd69hAied2tbF2I6gRqQE8SxoJIYJHQojx1BchRGO+dTNNt1y0pmwkY333uRBSoi70AyiiZUlTjPNI4zZt29Tk0I7jSGY2Go3I98LAAyXmpmcymUwQBG4q2W51+p6PQCMhB56vgCSSSaH6cRydnJwUisULFy54fvjw4UMBKITo9XoAhJkGj2JCiG3bjuOEg6Gco2EYSMmIaIREUURHlESUUk3vGYYhIZpBTHNGWslkEhG5iA3DkJJzbgkhGCOUIqWGlDKVyB3t7wnhDvrdbDZLKY2i8ODgYKpUSCQSBiOB12/WG74/EFGsQColAs8vFmZLxSIhZHt7+8mTJ+tnVjX4wTCZrtZyKSIeD3zPdacvXbrU972Do8OB7znJRKFYFKCYaSSTrms7mog9k0pblqH3PsGVlEAVSCkF53Eck2cUjMM6p+VYwMxOt0ufzUM/d5ARgh8ngLIAwONo8mmT/nDCbGDcZ04kEuB7Y4C7/l+A5AaB4TYohBACheCgkCiDWc9dhgLdzbdMc7g+1dDxDucNxHPjhUSBntMnhIAiUmoU57MrJCA5jwqFnN40OedRFDI2HNceRkNggGEYCFKKKOQxRbANajBK0ANEStBMAiqJyhM8lko6juPYaFlGECsujTBmiC5jqUHkZdL5dD4nQAipyqVSKpkJw0fFLGk0Wopzkxp6rsIwzCgIkSAIqUBSRgjKOI6E4AAykozZyVhBJJAL5ftxwo8JtSSSMBIGEgIUJPJQEMR0OjNoj/lFqByOnQAimlai2eq46aykuL3ztBjmC6X8Xu0AY5pIJLKZDGazeuqq1R0MwvCzn/40m0rNzc0pEMsL0wcHB77v9Zq1lDmdYSXRj092TtbWV65fOCfDwaNHj1yWQNELej6CEQlJubARFChuYq1WLRaLCpVBTcMy6s2Gm0qGPK4326Wp8szslO+HQRQRYgAxJaURjyWQGNVhrXZ1ZqbTagSeBwAEoVTInZycpJNJilAsFsMw7HfYfHEBQA5aPQDZjTxC5OrakuWaALLrd4hrd1oNK5Hsx23GLOTe8tqcZVnpbOLoYD+KIsdyMMbNp09npqeZYvWDtm2n1ubOhmG493DPnBLJKKmUAgsybrZcLudyOYL4k/d+lnCcbDY7Pz/v5u36cdM0zbm5OcP0wshLJ5M7OzuO5QouAahrpsNwAMLkgW8Y1KZG6EUCedKyMAp0vYoorXqiuJJKSlBKU8XHnKNWzlMyiiKLTjAbAZCJFjyA5GJoopQhZQwAvD4KaTJCJUoAJRQoUMSgIoqEjONIMURLMgMojSRR0BFd0zQN05RSxoorVJGIwjBMpVL6TRGRoF50SlJUxI4NhpRQpJKgjoYIxZ7gqAAIEUopgaAkBSSIQRiFlAnLvPd0q+N1uypyStmuCjiNJEoBsULBlAAkChUSZIxRg5GEZTkGIVIYCqgWu5QghUCJKDGKRSwk4TLkIop4LMEmjBpGrpB3k0nbtoGCk3AWFuYME4ul/Kdbt30/9DxPSR0wEFTPxqhhJF2hYRYAkiqqw1ZtS2QELxzVdSiORxPRBUrEcFRcMcYIVTxWSilK6WAwqFardsKemi5Kudjvd9utlmO76VwmCILBYKCU0nt5s944qVWuX7+eTiRs2z46Omq1Wp7nMcay2WzUE4VCYTDo1ev1bC49uzA9MzOzvb2tlDIYSyRTBnMGQTiAgFIjkUxS0xiqiIVcckG0MoHjMsa8QaCENE0zCKJBr0cpNSjV8xPT09PddkepeH9nlzIUMUeEhGsrpISQbrebSaU1JYxUaNu2aTJAPhj0giCwLEop7XQ6ALLf71NK41gQEmt/4vf9qamp9fX1brdrMqNWq6XTaSnlwsKCVo9Lp9PNZvOjjz4ql8uXL1++tbMlpdQBfxRFR0dHnXY7mUx+7e03DWLoRlEcx5zHpmkAqFQq5TiONwhs2263O4yxQbOF1NI54RALBUIIEKiEksZwxEfXu4c4ax0L6aoaGfYIhkUsJb8iIXyusPf8EfNQSikUBwqIlBCChIGCKI4kgAQUSsZKEY20BkUNkxomZYyMY11CFRI55NNHDbIBAAlKapQSoNDsUWQ444IgRcyJFjgBlFr1VaFSGIahYVuI9Lhy1Op2qEGYZXQa3Vw+g6h0h44RhShAZx2IaAAzgBpg2BZBwYFTGYeCc8WFkkoJqVwWxahQBCEPhQA0kVFmmsx2mGmYtl2aKi6vLK2tLXERJRLORn2n3/dEFEeR5rEbMl8ITRen2zI4zq2FpsyTSkqgSIY8RRoIrltq2okDAGPMMRyZJDGP4jhGNBCJUpFSSCnd3NxcWFhYXl7sDrrHx8dRxA3DSDjJhw8fzszMzExPR1G0u7t9cnKScNy1tbV6vR75PiGk0WiUivl0Og0AWmAUUWmS/Hq9nswk0+nMxYsXP//yFqEskfJcJ+lHca/bt22XGYZFiRJy4PmaYdEwTQLACHVMK+m4ANButnq9HiqwDDPiscGsOI6z2Wy33bFtt9frJZPJOIp0wc515fTUbKfTieP45KSulEo4icFg0G4HhomWZSSTySjyarVaOp3EkTSdaWr4LjJmFtKF8+fPr62tPXnyRA9/6c5hrVZLJBJWKkUpLZfLmUym22p/+OGH2ZXFRCJhGUYYhqZpJnNpg7Eoio6Pj9OJpGVZmVS2XC5rNcwgCAKLaeYLABgMBvl8gdbbBGkQRVIpRKqGfmz4X4bni5y6SgkAZGJGaSxnT5GMZzImLfC3GKFSQik54vBHpYYrmzJGQKEY9hC4BKQKFYQ8VERx9SwnFIoIoCIetQFHLBhcgBAqVoRzGcnYZGpYVAPJJUpigC7voAQKAFSTOiqFUgIgNQyLIOMxD4Mojnm301cwZGpkSdNWSikhCSiGYFFqokEVsZmlFAqpdzBUiqBCQhiyRCRp4Md+LKnhWGbCTqUN2wJGy9PTFy5dnJ2dNh2mEAaeF/GYkSFoeHjrhwPBkiGJR4mxbuzAqMxFCEopJShKh0odQ5E2BZLKOI6jMGZRxKiBjCYd2+uRiIBuJCJIQMIYOXdmPZvPBUHQajTS6XS5XGy320+fbv7gBz84OTlpt1pKqWKx6DhO4Pn9fp8QMjc3p2El3W5bCKEzTw/CZrOplHBd1/f9jY2NQin/+uuvH59Um612v98HRYhhas1TAGBIMslUwIyE7QAQXS2zTavT6+ZzGaWwWasLIc6cOZNMph8/fpzO57qdTuhHOo20UmkAKTifmZlq1Oq+H87PzLdanV7fC8MwnUoN5MDz+mHom5wZRtJ2TCFCz4+DIEA9jKbQdV3TNB0nQSlN2YlCoVCv103TrFaruVyu2WzqLcx13W6v19rdXVlZef3116MounXrllMobGxsDAaD5eXlYrHY67ZN05yamrp47vxgMDg+Pq7VqtlsemqqpP8v3W5Nc4L1BgNEFAqR0SAKIy4kEsKQUoODIkgQqe6gDf/XI0scVwSelekmquV6avZUGea3HIxIQRQqlKik5FIolIhKSlBEylAoTRLMpGQKKWLPC41YjqeohsWz+Jm6DpWoWfc5V5xLrlApCSAYU6aJBtVeIR7SAYNSQkmJAEAZUARBoNfqxDGfm11wEqmtvad+GCWT6YHf1+UkUIo51FBKEQYGoZbJEqbpGMzSwH6FwgKBRHGToDIoEkBFjTCGMOaxok4ykcwUrERaUQMYXVhaPHv+HGO48ejB4eFuFAdRFHmep5mFUI8x6lm4KNbJLoJUBJXmvudCFxKUrtngEFWolAqCQHAFQ2VEzfYtFCMI0mJoWsyKKUUQSr8LJYTMzs42Ws1er0eQ2YbV7w84l/Pzi/V6vdvt6mE/3x9wzpNuolgsZjIZxzQPDw+bzSajWCqV9DRWKpXs9/tRFCTTSQDodDq2a9m2c+XK1Y1Hj/YPj+I4zqTSqWRaSpCKM8B8OhMlEoSQKIq6nb4fBq1+3TQtygwASDquk3BXV1Yopft7O8lEQgoRhmEqlRl0e+l8odfrJRKJ2dl5iqzbbXf7g06nWyoVCwVLSlk9qudyuUIhF8WB5/XDyLdtpgNmXR7QwNQo4oaBSqluv1c5qQaeH0VRo9HQaFLXdeeXFuMgjKIomUrpcDSfz8/OztYD7/z58/1+v9vt9ru95eVlx3H6ne7BwV46nZ6bnXYcZ25+xrKN6nHl5ORkupQMw9CyjV5vwEyzXq+3O71u3yeGLaRCyhRBJbQPHJU0dLCmRwdx7P1QKQVCAR3NhIIidEhCObZA9Tyn44uHVLEeNwcEBVKzAgLBOJKcc+CCcE6FYqCYBnYKZSiFOCY71/YGenwPh6OquqKmhADK2BBfDcCJpIJIqYcohhAcJaRUHACYZJSiohDGXCpkpmOYvpIoOCogrpMcMosrxUTgEUIMZjgGS9pmyrFtw2REqShCYiqCymAUkFE0mUEpPWkNhJIcKDJqOik7mWG2LQGtRPK4Wvnww1/Fkh8e7XZbTSHjMAy6YXc42zacJgHFh3PlUghERQjgsN4rpRIMNWuY0HUmHbjqMyhiUkoZM7UlA0cEajCwLENKHkcKhKQUGaOUkP39/WQyOT8/32zWd3arOu9ijP3VX/w555xRmkqlUqkUIoqY1+t1IUQcBJq0f252emZmplqt9vv9lGsDyFYrHAwG6Uwym80KoR4/3rQtN5XKWFYjjngQBGiTOI6DKFRh7Lou59yyXRAyDn0Z8X63Vy47rU7HsqxyMZ/JFWzTEkKkkynGWNJxlVKmabYbTSEUj+JEIvHk8VYi6czMzPm+n87mL1+9ls/nNx486LXDZNK1bIP3AkTUPX3PCzXjE2PM933DsMIwAIVCiFKhYBhGebl89+7dVCrVaDSKxaJtWu12GwByudzy8nKn09nf26tWq8VicYDq6tWr8/Pzjzce6QAkDv1K9SiXy6VTCUTa63X2duJkMplMJq9cuVSr7GoYXRCFScPs9gfdwaDVG1gOBBEnBuFCxlxIQNDyZlrFTvP8g8JRf5yMBiMoosRhFYA9rzM1jpV+i1eMooAQApSgREUVIZQxhoxKKSSlCrkEImE4fScJAQJSgyU0WQ4xAEAB0+y4AECkolJbPpWAgpgaECIQASjXieuQj0+3yJRUDBG1oAKAzKRzyWQ6CKKe5xumQw0SRLqDioiKIjCXAaKymHIYWKhQxEJEQgkDEQQHJQxUjFDLoJbBGGPESlBEpjBWwIH6YUgUcFCDOKw+qnqhZ1qUMWIZKATvdDoh5XEYCc6RGdrudRnGGOm/0RGOnlKKSmMH9C0XiAgoBedaxckGUK6rgy4hpIwFKkkFt0ytpTEQIjaZyUyGqNKZrFJKUzxlMpmlpaUg8H/yk590Op2LFy+ur615nre19aTZbKYSyWIp7/v+/MxMLpcDgNpJ5eDgoN/v53K5fqNvmMx2LM/3hRDUMAI/Ojw8nJqZKZenTctpNpteGAEq0zIIowyoyYw4jFAKg7JUIpkoJ3K5XBiGWgZIciF5xJCk08nFuflBLGr9gWb+z+VyjFDTNMMwbLfbQmQXF5Yty6OUJhKpmZm5Wq0x6Aau63IRSSkt23AcKwz9TqeTz2flkFQfDcMIg4gxphSm0+np6elkMqkZbNPptGVZvX5f92l1hJJIJM6dP4+IjLHPH9z/8V/+ValUunjxvBDi1uc3oyhaXFy0TSubzZRKJUqI7td3um09AMl5NPA9wzD8KFaUeWFEmOnHsQREMuQyH/sxAc/q9/C8Hqic1KUR+tUAEx35ye9kQpJl8lAyVshGAS4qpSQCQ2SWJWPOla6pSKEAKdUCMYpSqRTX0qiEDQ3SpHLEXCiR0hE5Oh+GyQYiSpAEkDKDUiJijogU1RDshgoJRUoJCKRATcsGyxr0FRKuAAAHXoCIlAAhwKaKGaUERa3MLhQP/SiWUegYDJSkSkokwAgFk2CMwAwnb5qmAtIPwiCKgl4fiKcQeoO+H/mOYxNC+v1uX3LbthzH6QUdOTpASAUElAKldEeIETQMhqhAClRKCFRiSESiS9aIKJWK43gwGCSobqkpIYTkClCCJIxxy7IAjL6SGkVhECoActn0/uGBZeeXFhcPDvaOj4/q9fr+/v7Zs2fjOL5582a326UU8/l8KpHknF+5cqXbaj19+tR1XdsydD+g2+026r1sNmuaJhIihGg2mwohnU43m+2pmelsLmea5u7BYRRFlmU5ju0q0zGtdrs9XC5SJpNJwzCazXa5WBRCdFttKeW1K1eXlpYYxf1K7WBvl4AKgmBxcREAENXR0dHly5drtZrOmqNYPHq8mcsXi6Wpva3ddCZpmowQ1em2dIMxl8sNBv04jhkz4zjW+RZjplJxrdFYjqKjzc3BYNBoNC5evFirnuzv7y8uLvba3TAMbdsOw7BWq8VxnE6nOedLSwvFYpFSOj87e+XSpXa7tb+7l0i4fjA4Pgp1n1aJGKRAZboJJxa8XTmxE26t3kJCewPfspNSoSTUZAYhDKlEBIKUiOcsatKhqdEBz0oDZPI5OGJAkxOq0i8ejDHCKBAiQQkllFCKDxuJMZcR19xiQBANpQhRAFQoopSKJQKAQRARhURm2Aq51AyLGotECAB4XmAySoEioFJAkCAalNHQj4aDxEM/D5QqCsoistvs9gf+3OJCpMTG5uMwjrP5DGMGDikMgQne15+RAJGSICgETgj3A89mVF+mUkwqFfpBL4p61E5nbSDoBYEXBMyxlCADvz8IfEQQMmIeEAIGIyMAK2o1L4bEdMwo4iKWOpWnSAijACCiOAg0wZH+B3DGGIDyfZ8ApQpCHu7v71vz88V0OooiwtA2HUQS8ZiYpNvtSylt2zUNNxZSCMGY2W6352fnfH9wfHxICPZ7nWajdunieTeRiOM44bq+77uuvba21m62tp4+yWaztmHMzMxowLuu3ff7/cXFeanH5qWKAiElSFDtdncQ+IZpnz1/LpFI9bzBo0ePUqnU+sKZ6tbhYNBjmgQIFIA6PNiTCqempnZ399fPnjk+Pl5bXX/48OHCwoKUUshYKh5GPmPG/v5+JpPRsOzj42POuU7VBoOB53m9Xu+f//N//sufvLdkLezubvf7Pce1bJvZjqVAmibzPE9Hhr1ejxDGOU+lMlJGWkjQtO2FhYUoiphpaEbmmbnZdDJ1dHTU6/XKhWKhULBte+n8mVKpdHBwEHjezs5Ov9+bmZpOpRPJlJvPZpRStm2LOAy5KBbzhJBBv6fp83jX84OIS4yFpABomI5BFRkKJAohIhEJKU3TmbQ3HOG2fc8zDMO0rCHIUbudOB4r4E6+amyKMFHXGXoqGUMUSyBAEDSXklScc6mQS8GlUkjBAIJMAAgA27SH56cAABwIJZQyM+ZcEcaFxqYRykwuZRiGhDFgTGgpQUREGkkVDnykLIpjgso0TQTgYaBAUmYEQYiUmo49NT0tQCTTWRfEIBjosTVKiUGRKRlLKWPJoxBMxgxKTEKpRTlSQslI4UVKyaWUkYgCwc0oBIJ+FPhhQCSXSvW9PuccGBhEa8eAjFUUAgXkJJKcy2FSCGPAge7SCCEoDrkJdBkbUQ/djxJxEEoRKdWYN0mNaD8oEEpZMPCEEEoCUN2QUswwGSPBoI+oDMNIJtyB35eSMwaIYBjG5ubm3OzsjRs3Wq1Gr9dDxGKx2Gw2s6lUGIbFYpFRTCQSnU4HEU2Dca61uJg0QICSgvNY5LL5/f39ge+trKwsLi56ntdsNg8P9w0cj9gIBQpRK5+SSqVy5sza1tOnP/rRjx4/fpxMJra2tmq1mmUaBiWuYzuO0+v1gtAjhOQL2UqlQik1LdbpdXd3d3//93//5s2bf/Pe37766suEEMsyFxbPISohYs8f6MjTshylAh6LKI4BYkoNznkUBc1m0/f9ZrNpmmbSdZVSGuVTKBSWl5fn5ub0Teh2u41WKyYKpMym05zzRMKdLpdPahXP86LAV2I+m0unk66UVq1Wq9dr3W53ZnoaEahhIKGU0kgoRRk1TDQsociQmkGjkyklE9DQsQPUKZ+eKZ9M/+AU0P/5Y9IIJz0qIQxGAEl4xmzKQElESoimUBzOQehdQPvlIfGPlFwIHZnrkwz/NPLdzBjOfEqpWw+EAlFIheJKKc15QBCRGQpAIjFMI/a5pro0HduwzFjGFtoAGu0qpFRsjKlVUnHOlQCgjFGUSlGtdqPZhygBBQoJECVBaAAgoNDEmjEPCSEU0WQGMwhFApKLmMeCBxgM+7aoGIHhdMioUUFAciAaDQRSaSIPADKsjqHSA1YAONItkXHEKWNxLMCgiiACoYQCAaUw4jHnQg+CZjKZMAqoYyRsJ4r7yZSTzizZtj0YiK2trSgMv/71rxsG9X3f6w8454Zh9Pt9RIzjOOGm6vV6u90Ow9DKOUJwIUbcwcqIueCc7+zszC3Mu26iVqt5oee6NkC27w1EGFFKkTIFoKRSiiM1CWIQBPVmo1jMNxoNpPTe7dvdbjeZTF44t/Z0+8lg0A9Cv9frcc5jLmzbbrfbqVSKC5VMJl3XtizDde1bd27/j//4v//www8qlcrA6/X7XUIgl88iomHYmsswDKIgCKQEBIMSw06wMI78cCjNGUQRATBNs9VqPXr0qFarpVIpRiilNJ1OT01NPdne0swgX355Swl5/aWr8/PzP/zd3zmpVWzbDj2v3WkIIXgcJpOuaWQ8r0+ZCbpkLZQXRlKBQMIIlRI5KC64lFKNJC75hLo1Uc/MTJNZDJe6HOq9kd9skzqbnTS/oVFRJqXU8w0AgIoQoIQQPxa6+KdGMaN+iRiRf8oJqpvJY7zp40g3e/wqgKHyCKAUXOgWHIwmpzTdkUTlR0G31wvjiDEGFPTcj+bOAK6k5CyMhGEYJjN0uVLDMogCRjAmhChAkCbVVV/KlZIghIwVokIJFAgqQE5QUAKMMtOgtmEySkUUBrGQQgY8EEIqISUCl8NcdNiNFVLvHkqOlcqVkEIqLpDoyS39yQmgazsEWRRFvu8jReYYRCghOAMNmDaVUiYFzbloW9bZc2d2d7f10HQYeqDidCrtum42m3rnnXdMw6hUKo8fbyilVpdXXnvttc3NzXajkUgkKpVKMuHs7e3phTXwepxzHksglKAWBkEhVD5fVAqPj4/jOEplU/l8ljJstOoYEwMMgyAo1Kp0RApCjYWl+cPD4z/8b/7h559/ns8VT05OYsG//vWvnz23fuvLz5v1EyFEIpUqlRYHg0Gz1SEEgygYDLz1M6ulmdLP3//l9PR0q9X4kz/5nyuVyrnzZ1KphO8Pjo6OXNd98uTJ6uoqY4wSRqnmQFSIGEUxs4aex0m4tmkFQeAFgSPEK6+8cnh4WKvVTNNUplk5qZaLpXQ6ffnyZc/zlJCvvfJqIpEAlLdvffHZzY+vXbtWzOUMk+bzWQLo+d1Wu1av1wv5KcO0FbJuv+eHQa8fhLEwhRJccAlCgQaO6dE+JEoOSXcBETWSWFsL0xetFIjhikdEVCAmFDjGdqi+ijNbP6LtV3N3a4dJlAKBMuaSc8G5EAIpIYoQZADDnVI+ryc3aYSjuGZonFJwRERQhCAAUgJIlJKAcsjPj6BluVApikoSShRAEPmRiAyLUUo5j2CksSRkLLlgUUyRGIyaoBSPleCUAiOEcsGlQJAKJMRUCV2lFdgXXc4jRVQQhZxzhcilUErEMQcpBEFJUCmQXIlQhH4kkOs6gZQSUBqUoa53CSm0rLEiI1C3ktobAwhUcjh7hwSBAEkmk4QQ3/cpAjNZcliwIoNWn1JqWJJSQylkyBg1GWMbDx7uH+5ZNi2WcgpixkABD8Lep59+kU6nGaXb29vJZDKbzSql2u22lJIx1u/3bdv2PE8Py+ZyubAfyBEp+LCfKaWUkEtnKyfHCuXMzLRS6uHDh4ByemaqXekhIh/q/gmlRzYJC+KIWWx/fz+ZSu0d7Nqu9fTp05WVlf297W63FQQDzsXUVOnCxTOBH20+3TJMWq2dKCUT6SSltNtruwkbKXEcZ3p6+vr16z/96XutVqPX73734nd93x8MBjpYIIS4rkuJQSkDgEajoSsKtm2PQcOxEIyxqampZDK5srKiyVpiwR9vPtHBOedxPp8HlNXjyrlz54qlfDAYKBCDgWdQYlrMMIxk0nUc6/CgZpi25ST7/X7EhR8GElAoFccCCAUAwhgq1E1UNam1OKxfIpkcMR1GeaAFTwGfCzsnQ9Nx2eaUwWgNU83YggIkAckVEI4KUA19rJIKQCmqzQYB9GzueIBDW5kkCJSgQD1NKEEBKBkGoZZIYhQJISbVjG0xGYrpAUihkV5aaMO0DTdhIyLnUcJJGhaTIAgQAVKBkCiBKMYFgwjjiHPORRQzStLJpJNIRH6AACKKYxEQgVIRooBzCNGTKlaIseAAEhklqAyGQkgCSnIuCRcERcxFHIuQc8Y1q6KGnSmiEIGo4UATQQV0dCul0qTjCoiiw5heKaEm/kmc8yjiccx1OZ4xSixHSskjFcogiiKpeBzHUvHDw4MgHhQKWSlTrmvH3Ds5OfKDQavVtm17EEWI+PLLLxcKhY8//OiTTz5ZXV0tFAp7e3tra2sn1eO5uTltpchBCCm4EgoEf5bPHBwc2K6VL+YYY4eVw4OD/WTKzeQy6XQ6DMPBYBBEoZSSMUNTJtdqNcMy//LH/+v5cxcrtZNOv7d6ZrXZae7t7SDIYiHXbLfa7ValchzHvF4/iaK41+tkMrlOpwVAvv+7379///65c+dq+5V0JqnBPQDged7+/v6DB/ey2TwhRArNo8mIOUyNQh4TLReNmEgk3GTCcRzXdZ/u7MzNzCilarWabdvpdLpUKmlITRiGT548VkrNzk2bJut0W1tPHuXzuUIx51imSgshVLfbFnHUaDQALSGBWk4QRZRSjZhRoAMZQgihBJFQEEQqopTy4yHdPZKRsqAa5nhCKzMoRYeGqTEbz6FGxynlqdrp2FtybYFAQCuHSVRcKSJMw0A9ykS4ZgDSuFXDGColD7dZeKYGqcc8KdUAHjFCTMYGQYKMIBAikRCQCpUyhkBzIblUCITBqOihCEMv9Kq1k6yMhgxGFA2DAiqiCEFgng+MAVEqigSPhcXANNF2DMMyQUglIn2XlGICQCEjZCAlF6CU5Eh1JkgoNVCaSkoKKIUQAiXnPBIy5gKHeFGpZwjp0OUN4+whTzdSQKGUlEPeSDWBYhNKcAljympEDMPQ9307mbBMo5CfGgwG/X4/9KMwDgDAMDjnfGFhznaY6TDLogp4t6d6/U4YBr/zO7/TaDSajcZgMNjY2BBCDHr9tbW1MAz1jFmz2RRC+L6vUeaMMUqVoDLmIAVXEglhpgnEYKZltZqd/qBjOua582fa7davf/3rly69HMVcIWgaC4mgZckyubxpmgpxEPjpdJoQ8s43v9FsNs+eXUdU/X5/Z2+3Vmvs7++GYdxsNcIgSqUSs7PTlUrFNM2FpXnDMFKpVI3v1uvBxsYD0zSz2Wwi6Uop8/m8lGBZFgLxvCAIAh5L23YYG85Yj1vPhmEAVQCQTqeTyWSn0xn4XqfTUUotLi5qnHc2mzVNo9vtHh8eSSmYwRYXF5aWFwiBZqNWq9U0L2CpVMrn81tPD3SoJoSghgkAgJQrSRGlUhIUKgSlKCGUGADgcT6emyNIYFSCGyL4R434ob1JOZJrPd2dHz+iRlNy+lepNO0ZQUQKqAVbpATLoEAkJwACpVIwhqQqAUozbymAZwYvhSDaw1DNNKN5rrlBkVAALWUmhh6ZgKSMooKYx0IKJpGAoUCikn4YCSE63dbu7vYgKHe73TAMqElNO4F6mgEo8wahbaNhGACUi1gKafQjAoNcJgtaf4LaRAEzTAIoiFC8K+UQ8IogKEWlh8gZihilkLGIpRRxGMVBGIahZHJsTqg0EkKpZ6ovEwEAgFKKMqoQpB7i0gLVCnSlWIs6EUDO+WAwcFJJalFmuQAwisSY41j5fDaTz1SrR6m0I6UIoyibTS2vLM7OTaXTyY8+3P6Lv/iLOIoWFxcRlZ4toJQ2Gg2/308mk59//vn1a1f29/dnZ2dBC+5M7AgAQ8lyg2DMuVLKsiypZLvd9n1fQ1IIIaZpJxIJYrAgCJq9VqvTzuTypm1/7fr1drt7/+GDVCbdbrcJIQuLc8eVQ8/v61Q+8LyIc8YY2LC2fiaTL3zyyScJN/Xnf/7njuM8fMi+8/bbX3755e3bt/VgRzqTOjo6Yoxls/lUKgUKa7VGFDU5F9rkZCTHA+ae52ksnoi5ZVm9TieKoitXrvgDb2Njo16vr6+vb25uJhKJdDpVq9WymdTa2srR8QEPw9u3b+cyaSQqk0kxZsRh0Ov1NDxFjlwZ0qGn4rGEEdGRQKREKYrmaDoWxuNIBHXYqUbsvSAlRZ16IygllRqDuV+0Q3iBZkYppWmvh05M5zmgtf+U0hSGUkoQIIfg1UhGL4a1iKgZ93BEezN+L4sZlFClJBccERkhehyeaVuNQQkuKdHESFobiRCIoqjVahmOEfFYaRJ8AogUNT1Tww0MEluGySwaM8GDMFJCgFIxWkgJVWBIVKAgirmI49hESwhBpFQEqaJEUJSoADSZhSIgpezLIJRhSENuSzUs2VAlUAghFScAkkhiAiqCiDFILlAxAwAVZ0P2MaUMBQwJGfI3QsxDI2lxk2ZS6ZNKZffexqtOaqY4MwgqMfcUDEwmHCeRdC2I/Ppep1mrYugDU0HkH1LpR8FJvdoddNdnX3rj6kthGFqWEYahi4QQIv1gZXbG9/1z587UasuWbTgW7XRajDEnXdIZVxAEIY8VoOmYhmlGUdw8qYdBnMlkTMceDAbKdNNJM4o7UspYhhgrEUnfD6mJ03NTnX57Zn768y9vrq6snT27qlR878Htc+fO3bn/pDi1sLVzlEjl+GGVGvbmxj3LMouFstf1ZCSDjmcqtjq7kMlktra29maevvXWa/v7+7nc2ffff991Enu7+7lcDoEc7B9ms1nP6wPw2blpKaVtk8qBX3ByKJWbzkZR1Ot2FhYWhBB60LnRaDze2Dg4OFhcXDxzZv3BgwdvvHI5DEPGWDHr7O3tdVv1TqMxMzNTyOdN09zf3+9141TKHnjQ6/cBIGW7hmmFvkg66d1KM5LScGxghkA1gnAKANDVQAWYNp1hECT0fAGICYV6ZFQhxiBRKUBAg47y8FEtlKBCIqUMeDzkaEXUJU/tXB0eAAAqBDFkk0dESUFJ1N4PJRJFgRJGGaWUD6ujihCkhOKQz1tSZgqphgzoQJQCRGKYTMSEK0REYAoQQ83ARYArSUChRSg1CUWpqzLIlOkKEfcDLE5lGLUdaszM5I6PD00CBBVIJSVnRI3GiHAYZGvahZCGQBlVILlgCoExRACCfFgAVqjhsXq70o4ClJIqHivLgtIDY6fKWZPpNbwAkNdFAiklyOHQPQISRNu2O50ORTI/O2cyFgXhYDDodDqFrG0yI47j6lG11Wim01mTMs/zas2aAhGLuD1ouUnHck0pIQ7iVqsVRZFpMsZYtVq1LGN9fX3g9S3L0FQOepoJAHTXvuedSClBEcMwtHQHYyYzjMHAY4xFJBoMBoPA17z3ngdCeolEwrZtpVQYxFLKRCKRzuYfP358clJ7urU9VZ4ulUr5fPGoUm23u4jqz//8z4Mg+M53vhOG4f7+fiaTTiaTURjt7Ow4jgMAmUxGTw8JIZ4+fbqwsJDP5/WU4BdffJHL5TRoZqzFa5qmZkPsdDr5fF6HCdq0MpmM5tGYn5/d2trKZNJhGL7zzju6ZmOYtNvt6t5MPp+/ePGi4ziJRKLVaiWTyX6/L4QwDOPp06d62LLf79vUVEBj+az3oBSAUoKPWmugBAqFIBEIKGoZ45EaHA3R6wx/vDYmazDP6WHqFHJysP2Zgq86ta5e9Jn6h1Nn+MpXTSafX+V8n6vHjuJ8hkoqMYyNpRAoRRzHxDEJMaTig8EgnXE5571BP45DiRaj1LQppa52/MMLopQCpVIpL/ANQgVlRAEKSQAtUAyJRD3aqIbOXLfxcWiBQiohRDysC3KlFBAE8WwsGl+oL0/eDngB/SClxFH2rpeRrq0VCgXJhWEYjUbDpMyyLIoQR4E3CEzGhGEMvL5jWVEURTwSsTw+rFRODhvtFjNZzQ2+9rU3TdPM5/Nf+9rXKpWjo6Ojp0+fLi7OE0Lq9bplWb3uIJNN6aKo53mGYTCGoBSPoijiQnSFkgSZ49iI2Ov1okDLmqsgiAgF3w/jWDDGTNtKJtJKqVarxZhx69YtZljZbP7mzc/jOHZdd2tru1Y5OHfu3Jkz5/b29m7duqWLlvV6PZvNtnZblNJCIaflPnU7u91ub25uuq57/vz5hYWF//gf/3J1dS6RSEgpXdcFgH6/r0ag0CiKHMNtt9uGYQCA41oas55IOHEcnz9/fmFhXncLhRD9QbdYLEZhjEAs087nCvNzC1EUxRH3Bv7hwREhJJvJFYvFzSdbiUQik85aps3DiAv0hYpCTVmNKJXUdJ2jf7dUoMFDkgCdaK/jaEJicq2f+lnh6U18WMWbYIWZNIZnrx1jw0eHHKNtEEZzR89Fs6fW3lcaIQ6fpvSQudJRMwiltMqT7n4rRYYam0HgOa4lJQz8vmnOJNJJLoJkJikUF0hjUEJJxiiFMRUfpcQweBQHUWQZkdQkcFygAg7KoFQpJRVyQD1nBEgkICjgursghRAi5oJzTZyjG7Ry8iZqKxrDYU8ZIQDoWFxKqYQkCpDptUd7vd5UseS6brfbtQxjdXVVKVWr1aKgL3kgeZxKJW3DNCiTXKCCfC4fC14qlWbmZir16vsf/NKwnRs3blT2W81m03XdXq/3H//jf7As67vf+07MI9/3DYNKCXEcm6aJQDOZDCKGnFuWOaRwRilEHARBGHPHcdxEghDieQMQyjCZkhCGoSYy1/zblBiIpN/vNdqtXLZAiTE1M21Zzu3bd13XLZSKqVTqpWsXFhcX6/X6zZufuq5brZ5wzpeXV3K5XBjE2Ww2nU53u/3BYJDNZovFot9ra97RTqdzfHy8uDilDe/o6Gh6erpQKBiGsbOzU6lUtHShpnzWk5PJlJtMJrWHZ4xNTZU/+uijdrvd6bZ++MMf+r7vuu7du3dc152bmwOA3d1d27ZbrdbMzIxhGLlcjjFWKpX29vbCMKzX6/V6fX5mMYy4F8ZBEPKR96OAxDSlnqlVAqQkIBEVUaMOxATv49gNvmiHv8UjfeWD4/KMmrA/OS60vhCCfeXJxyY9Xpww6T9RV1Oe1YqUUgpUHId6uAIkB0IZAjJGAPt8YLKEYVIE7rhmKu10Or5tm1JFiMAl5xFnupA8pMgkqBhDISTnoeBIiSb5UkLGsaKCAIBBUKtAoxadGkEKtPgv5zyWQg9c6Q80NjwYhR9f6QnH38VI5FEpBYCEENM0HdOKwlD/yfO8MJl0XVdzqDE47rTaMQ/TSVdYdrfTC/2IEmLbtvC9KOS9bn/Q8/xB0O13T45OEM1MJnNycuK69je+8Y16vb63t6er9gBSCNFs1h3H8X2/kC8FQcBVqOnPhDDthEtKRAgRhHG1WiUUDKCWZQgQqDV1UCmJmXQ2mUz6vl9vNDwvYIxZphOG8fe//7u9QX/76W4+V+ScV45P/u7f/bvZrCWEcF338uXLmUzmZz/7hZRydnbWNM1sLm0apmmaUvJEwikWi4h46+Yn2Ww2juNWq/XZZ5+9/vrrt2/fbjabcRzrMFVKWa1WdcGTc86U4bpuvpBljOmAQjN8P3782DDY/fv3F5fmV1dXLctqt9sbGxuri3NBECAlQRTWarWFhYWt7adra2u26+QK+W63G/H44uVL9Xr90aNHzXZrtrzo+0FvEAQR17V9RKTUoIRqIxwiREGhkkCGGrfjiFRXvMlIqfeUHSqlJt3Zi1v2i5HUUAVswgKHdDQTbnDMNqaeT4VOLcWvPLQPRNSjvajGqqAIBNRIcEwAGNqxJSyWSdjZXJrzMGkbQcIZdJQEnk6nhIijKJKSM30j2LCUq6cYKackFpwqxgiVBLiUsYypIkopm1Ht2obTgRp5M2QclAJ0p0oB6h3omd7VJFsMvBCaT+5Do/uCDIfcaq7tpFMpPWmacFxKqe/7QohyudxvVYLA8we9hJ1GRCUFYySdzj68/8B2HS1wmS/m3nrrbSEEEMwVih9//LFhGMlMcmtnmxk0mUzuHx0uLi4iEiF4z/MTSPq+V56ZBkqIZemxa87RMaxEIgEAnh9IyYWScSwy2RTzWRBEQsSJhGMwWwjVbnd93+92+57nWaaTzBiUklqt4QfB9vaOUkAIZcwIw+j48GRpaWV5cbHX6dTr9QvnzqwsrymlqtUTxUWjXY3DkBGyvLhYKBQoqjiO9/f3NfeEBqweHx+7rouIvu+3Wi3DMIrFolJKY9+ogY6bUEpFUSSECENfCNFoNFZWlqenp7/7vW/rucqbN2/Ozs5OT5cvXry4sbFxcnKiuQX0Z9/e3kbEVCplGEaz2eScN5tNx3GWlpY6/X670xv4MScUkVFKpUICugg3ZtYTQxEuAAXPEDB6sZGRAsxXrn5EeM7njEK+cX6oU0o1noGaOIEcmxZqYMyzQaoxKHRc/Jw0wvEqVS+EwRpPrWsiSikgCkEiISajqKQAKcWwrA9EKiESFiYoLk2VOQ/KuUw2YaUdFovY8/pcxpHBAkqY4oJQpusxSkiiaW0IiQVnUhBGBUUpUSklQCpQko/kYxUhisCowzMckFdSIShyOsqf9OnjD3bK4+vjGRGGJj0ekeFls9lapRoEgSYRlFI2m01E7DUroe8pJYWMURLDMKjFXNd97bU3ylNTfW9wcHyQyqSn56b9MKhWq9Vq9cyZMysrK5XqcbfX0fNNb7/9dqVSKRbzQohMJhOGISEsCCLfD/PFnBBCy8ozgDhmnMtur1ssFaonNc4jx7GQ0jBsGAZNpVJhAJqs2rbdZCJNiQapqHK5/OEHH6WzmXq93un0CCGFYvHosPIP/uvvPXr06Cc/+dtWq5NIJBhjCkSpNLW9vZ1IJNrtdrvdTCRSmkV7MBg4jqMT1zAM19bWdnd3c7mclFKPIwVBMDc3l8/nNRY0lUo1TxrJZDIIAsdxEglHr7mTk+r58+e+uPXZ7u6ulPLatWtB4BkGTSRy29u79Xqz1WpxLl966aW9vYPV1fWDg4N8Pm8Y1uzs7ObmZqVysrHxeHp6ut/3+u2o2x/ECu1ExrAMU0rOhZRcCiLgWb1EgKSAEsAwhkp4MAr8yGiQYtIRjfdoTa82GR/pHV9nuXKC83d4ktEz5cgf6tB0bIE6QRq/1+T7jteknNDoPuUeNP+FVEJKhagIaOJ3hQSI0MMHEpFQVBQpEGVEPvp+OZlQyio6iUQpv1DORlGwtb1JDMYYAaRMfzxUoIQcf1SdCxlSMgCFQyQaaDj5KO7XDNNqJIY+Cut1QqrUBK/2ZAj6lY7+xahgeH/lSOxOSkTUG3O/39cI42az+fjxY4c1TWradooAiQOp6Xx83//yyztLy8tA8LByHIvIuu/EIvJ9f2554f6Dewpkr9cbDAaFQmFra+uzzz6bm5srFEqUGrOz80+fPiWENJvNTqdTni5JKTnnYegLpQzDCIKg0+kkEgnPG/R6/UwmAyg1gXQqlapWDoVQmUwul8tFcSyEQsR8qVgslI6PKp1O5/iktjg3ryPGfD7/8ccfr62tcc41ZuXLL+/85V/+5Y0bNxBxaqocBEG73fa8frfbjmNRrVYdx5mfn7dtW88EahP6kz/5E7036fTPNM10Or2wsFAul282f51MJoPAy+ezU1NThJAwDI6PjzYePWi1Wrlcbm5u5uzZ9ZdffunRo0c7OzuXL166cu1as9m0LKs0NfWXP/7xH/zBH+wfHi4sLdWbTTeZ9MPQcpx8sTgzN/dkayvoq74XUMOw3LTFjEjEhEshuVIUFMhngSFq1D+ZoJ2ftMYXw8JxxDR2euMXjgOr8XnGT9NObXyucXlGDhEycGr9jU8++f0rF+TwDSgOC01KKAVIFAClSGTMFQwLGZSCQRmjlFFqBT0a+ElABYQFfirpJm3XR1goFFOpVL5YzBXyzGQGSBXL+NmUECihJGGUSxGLoS6v1MpVBMUQ1QcKlVBC89cZ1NAyQ2PwK4x2qTAMNMxlTETNRjP1pzY/bWymYegXMsYUF7pfIiw7CALGWBiGhVxeKVWpVHTfGVmQKKQSdiL0QwnCMAzBYTAYJJNJy7LmFubPXbzQ6bUr9Vq313YTiUIpf9G40Gw3opCvrq0RQoIwNE0zm8sxw8jmcpzzMIrCiB8dHSmltra2lpaWNDtg7fg4lUppcSjd5u50uoyx7qAvuGKMHR0d5XI5x3Fu3bqlgammab/88st7hwfz8/Obm5ux4EnH3Ts8KGRzuXy+2+1+42tvSCkf3n+Qy+Webm51262pUrl6XHnzza/dvHmz1+3EYTQ3N5fP5hBR8rjf7dRqtWQyWSwWC4WCdo+zs7OtVuvkpAYAtVrt8PDw8uXL+/v7zWZzZWUpnU5vb29r4M7GxkPHcV5//fVur23bdrvdHAwG77333vT0tG2b8/Ozu7u7hUJBA1Dv3Lmzurp6eHjIGPvlL39548aNzc1N7Yf1uKNpmiHjEhQjJJlMGo7VHQyiwEfDAqJ1FIfsLABACCWEcB6ON2K9u+kKzTieHKc2oxXyjKl6aAUjLnN9Er2c1KigYE5KHbKhsLbQmB4lY8GFbvNRggQVQBzHmtVBozJ0GVCfajJMG+8IABq7+Ez0WykhJDimRZXk2olqaCqAAignk8Vc0avWssX0oFrvVY8VUYRCp9v27Vb3pH7i6hbFRGMAEWEiVdP3YrwJAUAshh+eTmwPoAC0HAs+M0KtPCh4NDknppTiL8g+njrGD46dp5Sy3++blElmBEHQ7/cNOmwnmrYDQPwo7HZ6/b5HwKCESQmpdFohHFaOJYhUNrO4NH9cYQ8ebex+tGua5szMjOM4A68XRVEunzl/7uJ7772nlAqCwLKsfD4fxzwMo9nZ2e2dxzs7O77vSwnnzp3jnLtustVpM8a63V61WjUMQ8/Laf/T6wa+P7h27YplOSe1WiqVOjo62NvbY4x1Op3eoI+ocunM9PR0Kp3O5XJPnz7NZDKOa62sLh0eHk5NTW1vb6fT2R//+C8RaTabLZUs3ZozTTOO47Nnz56cnOhKxvz8PCHkV7/6lW3bP/zhD//Df/gPnufdvn377bffbjabFy5caDQat27d+r3f+735+fmDg/16vaYj3jDyCSFRFLRaLULI9HT50qULT548effdd7/zre/HsVCK/+3f/uT69euLi8uffvop5zyKol/96sNr1661Wh0p5RtvvPXBBx8YhpHJJk3LMUwrlU4wK8EVUMsTinb6fVBICRUAOKSSHaoyTq6u8RobpyHjHG/8tMnlNz5OvWq86Ws6DAnPXJkcucSxb4SJ7sUYJ6BPKCfULOD5MHXiykeQN5QwbHaAklyqcayIksRc24vf7ymoxGGjwoSIQh4qEEjBdR2khBiMPVMjmVBoGQ4R6TMKPeSnUD1ru+unCS06P7oR4/muYYgPwyvWbzH+AOMRaT00Of54k592+OCERI4uvJqJJCIGQeB5XiaV1ucMY8GCUErZHXj9vmdS03UThmml02lmmhGPu91+q9+2XZsrmc1np2YLx8fHSolEIqs33Vqt9knrk2vXrsVxPD09PT09s7+/L6XMZrNCiNdff9M0Te0NkBqtVmt3d7fZas/NzcWCx7FApBqaYBjW9HTq+OhOqTRFKTVN1ut3hIwvX768f3TIOWcGDcMgCMJUyoiioNuTVtOYLk7/u3/37374wx8+fvy4VC406q1arUYIyWazr7zyCiHs6dOnOslMJBKBH1YqlV6vt7q6ure3FwTBW2+9VSqVPM978uTJzMzM1tbT1dUVfbWPHj06Pj5++eWX4zjiPC6Xy1EUOY5Tq1cNY35j40GhULh06ZLjWJ1OZ3t7u9lsGobhBf7Tp0/7/f73vve9hYWFf/kv/6WUcmVlJZ/Pc865FLbrlMvljz/+uFgu7e7uEoiEkhxUq9M0LD+MZBgM+kGExCTAFQIhRElUChVIPVMytsBTRjWZhk16ocnvL+7dk1atzW/MKayUUs8HlS+2LtTIbZ4ywnHYDM8fQxFCAKDaLCUgEqWpuBVolI1SHBGFBJC2aVAlY9+LQ+A8kpKjoRhjzW5PgoZGaxaA8dfEgYg6FEClKCLTGk6EEEZ1PKDrJVqM3jAMRqkeb3wmWCO0fvdzNRj8DVpzkzd3sg8LEzID2tQ9zxuSrykVx3HMVawAqWVajuW4yJhQGAtRrdX2Dvbbva6TsIUS2wc7zXatPFu0E66dcPOlYnmmXJouLa+uLiwtWa5zcHwElGzv7X1x+0tmmZWT+mdffBnG4t7dB81Ge3dnP5vJVyrVYrGUyWTffPNrrVYnDONMOpdJ5xgzlVJSQjKZvnjxolLi4sXztVp1MOjlcrm/fe9dpcSjRw/b7SYXUSLhZjJp0zQIwSgKH9y9C0K8/sqrv/e7P3jr9Tc2H2/MzcwsLSysLC3kM1mv1/X7A5Rq0O15vT4BVS6XlVJTU1O5XO6TTz7Z3d1dXFyklH7xxRfnzp2bmZm+fPlyu93OZDL9fr/f76+sLH/55Ze7u7vpdDoIgkazVq1Wq9XjqampXq/XaNQ453qoP4yCZCqRTqevXbu2sLCwvb398ccf37hx45/9s3/26NGjR48eaSkFwzB+9rOfTU1N6XBUobRt23EtxqhhUDdhJl3boIDAAQVBhagQFRAgFBV5NkQ76eLGv45Tu8mN+5TVvWiHX/mgUkqOSjIS1Fda4IsvP+WBX3xQ52uKDLWHQNdTRpaJRKECxQXnXPBIcO6Hnhf4/cGg3+/7vh9FkYwlCBnHMY9iHnEZxWycmD5ngUoZlFIkQ93tkUKbAhUCII4/7bArTYDoiUSlNwPts4etwmcfY3IG7Ld/VP1C/TydOTi2rZSKoogD6gY0pTSKIsclgNSyXWY4zPSCQRBFkR8G7V631+vZrrW4tlyem0qXM7GIFMqjo4NSqXT9+vU4jh8/fsyoeebMmWKxvLe3t7OzUy6X0+l05fik1+tNTU0dHx8nEknGjCAIT05q29vbYRC3Wi2pqFIYBjGjNueSEGbbbuCHnXb3pFZdXll+uPGAMnL9+rVKpcp5FIQegMzlMrlCNplMAwDn3LCMKA5+57vfHAwG/9P/9H/9gz/4g2r1pFwuf+tb39ra2vY878GDBxpAp3Owk5OT2dnZl199rdVqdbtdDVe4devW6upqKpX6+te/ns1mL168WK1WX3311W63++qrr968efPhw4eVSuXq1auIeHi0Pzc3p2n8X3755Vqt2u22wzBMphJKKc3e/2jjSbVa/Z3f+R0p5c2bN4+Pjzvt3o2XXgGA2knDYNY3v/nN7333d/7Fv/gXf/RHf8Q5/9M/+wvbti3LMkzDtE0kVEgZxGbPjxA4IAUAJIgKFCGEYORF8FyKNTx0G3OyoADDwd/nGlovms2p3sZw7WmdUKXUhGOctMCxzx1XMSayUJh0g6feV4JC1GzVz10YSoGEEiSCgE7DJFBUwKlCjexUSohYKo6xIv5wbSMiAmUSQZ2yfl0XIVQTglIClKBWMJVK4uiTK6VQKkEBFQBRRM+FaaiMeBbWa7nsyQ/82+/pZAygE1SlFOdcgzxQAUUCAHEcE0Icx2l26iFXsUTXdCzblQq5kkqK5ZWVmIe9Qa/dbZkJY2Fplhqs3W4tLy8jYqVy1Gi0jo6OTNMcDAZhGO7u7jqOW61WNx4+LpfLu7u7tVpteXn1zNpKv+/Ztru3d1AqToVhnM8XgyBwnWSz3YrjjpTSdt1UMl2v1yuVE6XE0dGB4zjb20ez8zMvzVz/27/92zfeeD2K4lqt1un2pZQHBweEkHOL55XCL7/8Us/pPnjwoFarr66uPn78+MmTrenp6Ww2u5Zd29jY8H3/zJk1zwsMw/jkk0/CMPz88887nc6rr766s7ODqIdg4M6dO7qo67ru/fv3HcfR25brusmUi0TNzc2VSiVK8enTTc0ymkwmDw4O9H04f/58IpH44P2by8vLvu9nMplvf/vbBwcHn3/+udYDTSaTV69e/fzzz3u93j/5J//kww8/DIJAKREEXhj6hBmG71FKvTAOQ58RVIRJkJqJHgklFJEimTC2SV832bQY/wkm5hjG1nLK3U3+ipqzdIRYBt0h1M8ZmZ18Hs42aXjjtx6Xdk6tRqWUBKEpEIdXq7mShk+SOhHTrwSpCKAwKTEoMSmlhEkqJVeSA0AUhwQoAhClmPbaI2zZkDVRCYlIpZIUUQkJSCghUilQQCYUP8c2o/eaYf9FKqUlowARUVI6uaPgC63CF7+f/tgjVSpBqMkMRlkcx/1+nzGWSqWqTT8II2/gu27SNkzJVcwlF4oxli/mcrJwUj/2Q88LvFyykCvm0m7m4cOHW1tbSql0OhuGYeX4SbvdPjw8brc7uVxOcHVwcCClXF8/q6PuZDK5ML9Ur9dNx261WoSwQd+jlBFCbdvu9zwpwbZdQlgcB5ls4sKFCw8ePEink2EY1mqbC4tzrVZramqqVqsxRhgzCYFCoXDjxg3f9xMk+uyzzwghn3/++fz8/DvvvPOf/tN/ImQIZC+Xy/fv3xdCvPTSS/1+PwzjT359M5PJcM47nc7s7KxSam5u7vHjx7rDWSgU3n777SdPnly6dMl13StXrlQPDxNJ5+DgwDCM5eXlWq3mOFY2m7VtEwBs287mMtlchnNuWZbneUEULi4v7ezs6Lgxl8tlclnGmCYpD6Lw4OhwfX395uefuclE5aRaLBUGfW/gexAJLiUhRAJathHFQqDej4lmTyAEKGPMNMcWOF5y6oXyzOQaGNvk5AuHFjWRWOrXak4mNS40nPZjz2/6XxWUjs37RSMHAAXjEsazqgwAcM4ZstGAMpdINOlTs983DGpajDFKUAFIPbyOgERJUASVGDYJxDODenbgyKehVCA1Rb3USSCllGl6rSEGcOS+pQINHRzJzY1F5ya3txfd4Fc6xsm/djqdfr8fhmEURb1er9FoaAEJN5VExN7Aq9fr1WrtpF47qdfqzcYHH334+Ze3Or32zPzcysqSbdtRFCAqKWUqlZqdnV1aWtLdtoODg2QyKaXM5/MahmLbNgDxfZ8xdv/ewzCIEbFUKnU6HUSqaQVbrZZpmrMz8/l8XodSjJnJZLpSqVQqFe1ker2Onk6o108AwLLM+YW5a9eura6uFot5w6CVylGn0/n617++sbHxwx/+EBGbzWYymVxdXb148eLu7m6tXi0Wi6lUamdn51e/+tXu7vb58+ebzebU1NTa2prulywuLq6srGxsbBwcHPi+3+v1Dg8PLcuKoqjZbO7t7ywuLtq2/fTpUyllr9dJp9NvvfWWlFKL/i4uLmoqxGazGUXRtWvXHj16hIjb29ubm5s//OEPdXr59OnTSqXy/vvva2EcXRZ+4403MpkMM5kQPAxDPxj4vs85Z0z/xxWgUjDKl7T+5BjeGMdRFIVhGARBEATy+UM935Z48RhXccZeC34zGelXL6pRxngq/nrxh8lfXzRLbdrDduXzua5SAkymGOGAoRBBzP0w6Id+3/O4FLEUkeCxkMymBudccK6egV+RmEaslGb4QBnHSGzLkBLCMFaxP747hFJCEVDEikuUwEBRNXkHAcAQz+BpMOEPx7cbJrAIiCgkR4YoFZexjuwpoFRRIulQmwZy4McqwVxUYMcsibbXEUEgTNOMlWh7bde1E4lEPOhGKjYs48mTJ5mT1PnzZw3DCCM/DMOZ2ZVmo7u+vl6pVMIgRqBnzpy5devW1NSURqjk8knAeGY2XywWc7lc9fiwPJ358MMP9/cOX3rlZUpZo94ybbNcLinEhw8fUmok0qlOe+A6yZOTWtBH1yycnFQ6DS9fyLz26svbO1t37ny5Z9FsNjtTSGdT5pnF6ffee69VOajVaiDS5XIxm17kkflf/70/+vjjD4rFYr1e29i4T4h68OAeALEt9969hzPTc/VaY25u4dqVyx988MH8N75RP6lm06mdp1tn19cSjl0oFOon1YO93Rs3brRarU6r2Ww233n769lsNplMzs/OJBNuu9n68IP333777f29vVKpaDCadBN/9Vf/ayaTefTwQaFQuHDx2o2Xrjx48ODv/uiHtVrt//F//79ZltXvtdbX1w8Odvr9fiaT+fLW/ptvvnn50rlf/OIX8aA/l0/P5jI3f30LgU3PLpbKM082d6amptt9r+cHiGgaVHAZiyAOQwPpcysflCKodIRFkIMiqCdkiZJSSGmBBCWEiJWUusmhzdh1Xc0nQ4Y+UIESinNAXajQ4ocjrkAc6q2hUkNaaQCt/kkQlZRIiA7pYcipARp6CQDyuYKlIhyJ5iZVFCUSJTVxBDVYpFSoFJiOZFY0MoG5uK2UORQ+UlRKoutFQUyHjlQR9uIeA8/3EqSUOgETQoRhyMgQdjR2gJNbxW9ycS9+nzTC8QamtPa30uwWMMpch7NWpmnapouIFInv+/v7/Xa7baOpQZK+P8jlcoi00WgwgxSLRcZYOp1OJNxeb2CarN/v375z6wc/nL5z545pmk+fPg2C4M0339RYMMMwFhcX7969q0d46/U6Iu7s7IT+IAiiQqEwP7dYrdfq9cbc7EKxXP71p5+5yWQcx5zLM7PnKDEGg8HOzm4ul6vVahpLoMmplpaWSqVCPp+7ffv2wcHBzZs3y+WpK1eubW5uIuL6mTOVylEun3306FGxlB8MButnlj/44INer5tMJnO5/Orq+p3b9wBgd2+bUfPu3buGYVy9enV7e/vs2bMbGxv9fj+dTj969CibzepPsby8XKlUzp8/3+l0CoXCxsaGUuqb3/zmn/3Z/1IulxljzWbz+vXr1Wrl7t27lNJMJnflyqVCoRDH8dbWlg527t27Z1nWt771rVqt1u/3Hcf53d/93VarpVPoSqXy13/915zz6WLh448/Nk17dXXVYPaDjU0u4Oq1yzv7x6DLLZQKRKEkgCTEGOsNvuh/vvIYgiKV0msDR7yDp+Bm4zPIySI8PkOujQc1EBEmIjIppSbBUBNwGTlRySHquVKqhptP4nX0NZjGs/M/l7gqPUfCQLNFSQVKKoWUapsngMAmDeZUsD52WZMoWzGUCnzWToHny1zwvENXLzyifzjVkx1f9yj6/4pggDGmiw2SiyAIBr1Ou90uZEoLCwsS214Y5Sjxo7DTH8zPz9Zq1bnFhWvXrlqW8Ytf/mxjY2N+fr5QmvrTP/3TTqejad5v377d6/WOj48Hg0G3202lUmEYFgoFvQqnpqZardbc3EKlUuGcX7l8bWFhAQDjOK5Wq91ud2pmxrbcdrsbBAGPvTiOGaNra2vVanVubu74+HB1dfXOnTvFUl6IeGdnWzcAq9WTpaVl23ajKBr0/bt3bwdBUCjmX331VR1nci6FEOfPn9/f32+1WicnJ2EYplLJdruDiIuLi0dHR1NTU7OzszpPS6fTH3zwgeu6UkqNQygUCr1eL5vNRlF0++6dUqn0+PHjge8hZe1uR0r55ptvbm9vOY6TzmYQ0XVdpKxyUj04OLhw/qpuAuVyOb271et1TUizv79///5913VfffVVPZUPABcuXEilUo1G68njbduKzp5bz+aKBwcHQgBoqRWKCMi5Ekqh4AoovGA8z+zheYtSE90LeH4A6tSSm1hvEysKQBdG1W/OgHR/W02IW0yWiPRBX3ihmoiTx4985fUMWd50tUgB6tEjbZCjy2OT55WjyS4cVWxhDB4f9QYHcXzKnE6Z/viejj3eixc3ebyQLo7pZ8a3FWDEpazTibECrmEYvX4/jKJkKhMLPvCDMPQJpaZt+WF0+869k1rt+kvXCsVp09rlQp05e6ZR76VSmZ/85GdKqXw+L4S6ceOVTqfz/vvvV6s13w8tK6CU9vve1tb2yUn96uXLiUTi1he3Dw8P3/7GNwihW5vbJqHlcrlQKBweHBsGPTk5AQDfC7UQfCqVIgQWFxd3d3c3Nzc3t2SxmM/n89///vefPNlUCsIw3tzcLORLCTcVeCSby8zOTi8uLr7/q59HUSBk/Morr6yvr//4xz8OguDBgwe5bEEIUSwWNAHy7OzsnTt3vvvd7yYSienpaULIX/3VX/X7ng7q6vX65uamnsbqdrsEmWMnms3mzZs3v/3tb29sbHQ6rSiKpqZmFhfnnz59Wq/Xa7WaZVmpVObq1WK3MwCAZrN58eJFpZTOD7WAx+HhYT6fTyaTzWaz0WjMzMwUi8VardZotKSAbDarUbupNC8W8/VmN5ZCciEAFKEAmmRTmuw5rOKpZTPOdMZP0FTOkwtJP2EsAwzPV93hq4zhK+1k0gGMf3j2oEabvGDpGu817pCPsQdj2t9RZ214fjb6IoAKlVA4HPDi2o6Ifs5wExpbnd4ShBBDwPgEu5GcoCHAiSLyqf3gOU/4G27KZA49aYSaoQilAgCCY8eI+gPjCNNgmqZB0bKs0JPNZtt1XdOwm80moZBKJaqV2ptvfC2bS1cqR416k3NeqzWq1dr8/GIqlXrppZeEEAcHB+vr6x9//LHmz/3BD35Qq9VgNFWsFSBefvnlzz777OqV6+vr6/1+/6OPPmo22pTSkpsoFoue5+3sPj2zfk4Auq7baGwLrvr9/vz8/MbGg5dffun/9T//29dee6Xba7/zzts///nPDcOcm5uLY+G6rmMnXDcZBEFD9DmPZmZm7t2/c/Xq1Tt3vkylUoVC7uDgYGpqilJ6eHgMANVKLZ8vFosFXSy5d+/erVu3zp07Rynd29u7cuWKHmLK5/PdbvfBgwfLy8vVarVSqWTTmVar9d3vfL9YLO7vHbbb7W9969ubm48//fRTjYY/f/68RsN1u93Dw0PTcPSM0t7enpaIOTk5oZRWq9U4jsvlsmVZ6XRaA3empqbu37svpVxdWc/ni/Vao/pkM5VKlaZmB37ox5HkoQCiKANAJTiXihFj0ga+cp1M/jze/cdQfr0etBGOPRKMYyiYXJajX/Grl+I4xD3lCSbOdvqQExeMiFrQdtJXP+9RlEkoI9QgI/MhIKUCpCPnotf5RAg6WVNxHEerz47xrPrQd2TS6sbH5B2c9IST8IgXb+5kADzeDsY3YgzNIXq613G0gK5pmvofk85m+t6g1qh3ev0gCgk1FJBqrSaUsp3E9s7ehx/9mhrmN7/13StXX1KAX//612u12q1bt/7sz/7M9/1Lly4RQjKZzPHx8e3bt3d3d3V0qgd55+bmFheWEXF9ff3ixUue55mmqQX9giDo9XqpVGpubs4w6GAwUEoBylar1e22S6XSyclJKpWyLOvq1av5fLHf77/77rudTi8MQ9dJvv32O9lsFgA5jwBgd3f7s88+syzrtddec103CAK9RyDi/Py8aZqFYo4QyOUyL7/8cqfTeeWVV05OTnZ3dzOZjBaEymaza2trZ86c0ZIy2k6azeba2roQsl5vcM63t7fzuWKz2fzlL3+1tnbGMIxGo7W3d3B0dNTtdmemZ+dm5xcWFhBxdnZ2b2/v+Pi4VCodHw+zOwB49OiRhgecO3euVqvt7u7Ozs6+8fpbKysrJycnnj+4fv3q6ury1uYTQiUjoNkBFQiQHAAYQfVVx3jRv7hOxj5AE3xMgkX/i44X7fxFy3nRGvGFr7FmBk7AueI4PmXh44/Dhl/IFBpAGICBaCA4Bht/Md2PmrQrbRLjeeexkxy+8cTN0vHhqTsIL2xyk1c2dqGT/nPcGH1WLAVERErJsBdCIIoiOaLGQAKEEFRCKRVxbrtur9fzPM+yDWoayOjUzNztu3cLpVJ5ZvbguPL+Bx8xxpLJJDWsmXLfdd1yuZzNZm/evGnbtu/7nudls1kNwjRNM5PJCCFM02y326Zp3b17T0q5sLBAkAkVN5vt3d3dleU1LsXq6ioXUa/Xq9Vqi4uLcRzXq82Dg4OZmanNrce6BWKY9MmTJ9/73u98+OGH9Xq9WqlFobx69drq6hqP1Te/8e2NjQ0/GDx+/PiXv/zlxYvnw8jP5Qqu687Pz3/00UeImMsVNIupkPHZMxfff/993Vd4+PDh3/k7f8dxnI8++ujoqFoqlXRLXZdegiBwXffWl3fOnF1rNBrbu7vNdjvi/O79e7NzCzdeflkIsbTsD7xeIplutNqLyytCwe72juM4rVarUCgAQLPZTCQS+Xw+iiLDMObm5nZ2drRKlGEYlmWls/njkyoI6fuDXK6QTCVs28zlU34QIBGubVEpg1hyyRllhmEE8VdLfL74oH5EN4S0M8DRJBSMREVfLOw9t/YAlJI4JM6dOO0Ly1WdjkVPH2R8VvLc/PEp//niIyPrHY24j/i/2bAYiwAw7OZNOmV9oiiKoijSRF3jCtWkacnRyMmk7zp1nDLLU99PeT/9LuPLOAVP1VA1fUlKKS25nkwm2+22TlSklJpVxTTt69evFwqlarVaO6kzaliWk83mE4lUpXLyk5/8NIpiRPKDH/xwdnYum81dvnzl+LiSSCTn5ubjmHue3+32Go0mpaxeb3S7XcZYv+8BkLNnzyYSCcMwfvSjH+Xzec/zhBCPHj3y/QGAvHz5crlcnpoqLS0t1ev1lZWVZrN59uzZTrs3GAz29/c9z9vbPbAsp9vtPXm8NT+3KCUsLs2Hkf/aa6/atlUqlW7fvp3LFtbX1zmXlA7p1bLZdLNZZ4w0m/VHjx5p73ThwgXHccrlsl6pc3PTFy5cuHLlyhtvvPH666+fnJzoIZ1Go1GvNU+q9adbO/Nzi8VikVJ27tz5RqO1tbVtGIZtudev3Ui4qXQqe3h4eHJy8s4775imefbsWT1xn0ql9Kz95uZmuVxeXl7Wqvdnz54lhOzs7Ekp55cW19fX0+lkHIQxj2zb9LwuKuXYLGFbjCBIwRBcyz71r//KZTO5oPUY93j/1UBlc3SM/dL4tF/Zb/zKWEwfQ8D3ZIo4+pmoZ184+hpbhLbb8fqc9OfPfQSJ4y+QGsk++SVRSWaapu/7ajTnPx6cxVH2NeYg1NOAMLIQGHk2bTnPgDzPQ++EeDa4NLYuHE1djKec4NmMswAYQsBjKaSUQBkwMjMz47puIuHGcez1B0EQMAKU0kwms7+/77jWxUvn9e44GPT+5m/+5uWXX3769KlSSvfNs7lMEASlUulw58nW1lapVELEw8ND/T8GgA8++KBYLJbL5b29PQ0Z09JL8zPz3W5fi6483txcXz9bKpW2trZSyUy5XE6nU7du3fpv/tv/9vbt241GLY5DnSvq0cdz587NzMycO3fu019/XDk+mZmesyynWj15tLFhGu7Pf/7LVCrzs5/99Fvf+ub9+/f10Mo//sf/eGNj4xc/f//MmTONRmN9fX15eTmKwlQqyRgtFPL9nqeFEwuFwvT09L/+1/+6VCqFYfjtb3/76tWrpmnu7OxwzqenpweDAec8m82bpu374blz5xBVoVBAoIVCARFN06pWK2trZ/7Nv/njH/3o93d3d/O54ltvvKlXtsb3HR0d2bady+WazWY+n1dK5XK5Xq8npczn87pO+9lnn+3s7OXz2Watvrq6+uTJo0KhBEpkc8lYkUazA0oggSgMCepxgGfH2CrIV024A4DeWOM4HnfFJt3AZGFmHEaN0xwBI5AnwTgWw/MrpZ5TCFZKKQGSjOqd+i10FqfhKEopzrmIYyEEY0yHh8P0bCRPgqPGnhztA8MPJUH7OTkxi6yxCmN7GcJfTkENcKI6qibKxFJK9lUjHpPbzH/mPqeeP8aP654kQ/0uEoa1Ncjn845lE/Ksk6GUjON44IUntcra2oplGZVKZWpqampqtdVqbWw80DrP8/PzlUrl448+sSzr1VdfzWbylNJGvTWw/bnZBV3Hf3B/QyNUHMdZXzu7uLh47949wzDeevPtJ48egSJrq2ds237waKNcLqdSqV/+6v1ioXzn3t2rV6/atrWzu/3Sjevdbvfk5CRUMaX4ta99Lebh3/zNu3fv3p2ZmWk22oyxIIja7a5SYNvu/Pxit9u1TDtbSObzWc/r/+Ef/uHNmze1AMbly5c554lE4o033vj0009937vx8vXt7e3dve1SYVGb0MnJybVr13K53K1bt2CUY+uuhud5W1tbOsw+c/bq5tOdfLEc8nh9fZ0QEu7u2G7y3XffNQx648YNapjf+u53ugNPSvnw8ZP1tZUHDx7k8/lsNqttT0fpxWKx3W7rjSmVSsVxPDMzg4g7Wwf5fP7b3/52pXKUz6YZY3MzU/VmM5dPu7Y5CCPDoGnimpbwgphSRuG5mGu8GH7LOhkvPzI6xvHaGPY5PmccBuPtHhFBzwSLUywyvy2rVEppV0MAKRK94GE4y6DEb5hAOGU+zz4doUgZUkaI0iT/qKQatk+GA5Zs8l5IKfWIkM70JvFockSTTC3rK2/ZOFA+ZY2TofbIeJ79dbKyNLxmQhhjhpYnlYIx5tqOY5saKsBRaX4RwzB4FARBADJKJSx/0NvfHXDOk+7S/Ow0gZf29/efPn363t/8TalUKpVKxXy+VCqKOOr3+6urq+12W5dVNA5zfn7+5s2b58+fz+Vyh4eHug0gpXz48GEwCHQvpFQqzfa6iUSi3++7rru4uLiytlqr1XK53Pz8fL1en5qaqp58rGKyuDjf7/ePjg/OnDkDAA8fPkwmk41Go9/3qtWTTDoHALOzs4LLc+cu9P2D48rht7/zzQ8++GBra2t6ejqfz6+urk5PT3t+/8MPfzU9PRVG/k9/+tMw9PU0vRBicXFRx8nz8/OffvppoVDIZDKWZWkumTiOe70eYyyKoq2tp3qJbj/dXVs9I6Sam1vY3Hw6MzMTxzEi1fLAukV54fzFo6Oj/f39tbW1hw8fdjodwzAuXbokhDg6OiqXy8lkcmlpSatWcc6DIFg/e6byQfWLL744ONjLpBILC3OWZXARmaYlZRz6nuAhY44BjEDEowCo+1sM4DdZxbgwcSp70vHXZPc8iEJ8VnQApUBKKZQEeAYsUc/X9odvgUBG2kPjtUq1nUulkAgcmc5EW04+G8Ianhafb9dJQKlQAVGgEToEQJCh5x9eBhundkOfO3K4zyEMJmb8XizIjt8PXnCD6oUcVz0fb0we41CWDJG4ilKqyaRTSdfzPIpEh6AmM6SUmn/FMoyz6+uVSqXTap07d67X6fz8pz/N5/MEYGVpKZVImKYphOh3u65t8yiuVCpLS0tBEBweHvZ6vXa7nUwmE4mEZsUFgLm5Oa2sVK/Xu91u0knrUqdp2jMzc73eYGtr69rVl1rtxrVz1wmB3d3dpaWFv/7rv06lEufOnSHSiONYVw7X19dM06zX60EQJBIpzwsdOxEEkWFYJ9Xa3t7BwsJSp3/cbLa++c1vXrx4MZPOPX78+Oiocu/eg1/84hf1ev2b33pHiOjjTz58++23Egnn888/55x/8cUXCwsLpVLp4cOHUkqtRm7bdrfbdV1XZ0qFQqHZbBJC9vb2Ll26tLu7pwmCK5XKpUuXfn3zk9XVVcdxoigol6ebzSYAefDgASHk0427c3NzQRAopXZ3dy9fvryxsaGVNmq12oMHD2ZnZx8+fPj9738/juPl5WXHzhYKDzudzo0bN0BGs7Mz9+/f9/v9QtkRUiqQBCSAVFzwKJZSCniW9r8Yjr544ETlHF844Pn4EwA0nGO8BE/5i7EbHL+vtr1T61MpUEJqL6qE1COykxY4sTuM3+u0Yesj4AqFJuAf5pmAyBUwJMPWIoFnQbZ+5TDQff5Ek8Y2tsYXTevU41/5tFPH+MHxD5pURtdgdBZqmqZt247jaCIQz/P6/b4u0Pm+X57KWzaN4oAyTKZcypAZZHZuOoz8bq+dL2QvXb6wtr6iGV++9rWv5fNFIVSj0QrD2LKcIIhqtUa/77322hvpdDaORbFYTqeznMswjKenZ5PJZCqV0ngUSmkYhkqpZDJpGMbNmzeXl5ejKKpWq/Pz8w8ePBhDxtyEvbKy8vjx4yiKLl26JIRqNBq+75fLZdM0C/ni4eFxFPFWq+N53ve//733338/l8sVi8WpqSlE/Pf//t8rhf/oH/2jdrudy+VWV1d/8YufdXvtVrvh+z4hRLfjpZSa996yLMbYzs4OY0ynT4VCodVqcc6npmc9P2y0mmfOnbUcJ4iiIAovXb5anppZWV3N5Yul8jRSEvH40ZPNh48eLyws5HK5vb29jY0NAIii6NGjR9vb267rnjt37urVq6+++uq5c+cIIb/+9a9/8pOfvPvuu4OBXyqV5ufn0+l0r9fjIpqaLuTz2WIxXy7k8/lswnUoAYpK9zn+i45TIejE6n+OPWycECYSCcuyJlf1ZEB3alkO/Qo++1VNNOqEECLmIuZxHCsu9JA6PB+OTlZ9XjQWpVSsVCRlJGWsMAaIAWIFHNCPIz/mARd+HA8TQv1dC2jhaD5t8spwVFA59fnHzzm1sb14/KY/jT/P6ANoVCtoPiXHcXR5Wn/aMAzDMGSEOo6TSqUcxzEYebr1hKBaXJiJAt80zXNn1l3b+vt/77+6efPm4eHxg3v3TdPMpJMn1eOPPvwV57zf7wNAuVyenp7W1q5r+rlc7vj4+NNPPzVNc2VlJZVKdbtd27Bbrdbq6qqUst3rJpPJGzduVE9OVldX792/PxgMrl+//vDhw0KhQCn95JNPrl58SYMwdaw7NTUlpcxkMnt7e3HMwzDsdvsGs5XCs2fPz88tommvri1fODz/2We/tm334sWLnEs9S6Vjvw8/fH/gddbW1iqVytRUOQzDa9eu3b17lzF25syZ27dvdzqdpaWlsevzfV9XTXSTybCsZrM5VZ7xvXBzc3NhYaFeb6ytrepNwfcHu7u7nudVqkda62J/f7/b7RaLRdd1L1y4sLOzQyl1XVeTcGvU6NzcnBBienradV1GUlPTpUTCeXD/Do+8J5sbQsSU0pn5OUBDIUFq9vyo2/MAQE/eTPqx3742xstj8vt4Bb7YV6OUGsYzODQhiECklEoNSRO1J1QTWdJz16AAAKieldeoaRkLQDJUiFXk+e6alBJHKDmNByCEjONSbTgcUCAROOTdp1rpGRA1EgglIn22M403DP0e+qInyQj0E+REr+KU/ZyyzNGHe26E99RrJ1+uf9VhMENiWVYi4TqOg4hRFPX7fQIoRzVVy7JSCYcxdrC9oThfWFiYmZkdDAaDQa/fbfd6vaODg3q9XigU2s16IpO6du3Kndv32o1md9DX6RMhpN/v60omIaTZbPZ6vWazmUqlCCG9Xk/nfsCRxyKdylSqx0opz/NKpdJxpXJ8fJxKpZ48eTI1NdVut4WIL148/2d/9mfn1i5+7Wtfu/Xl5wCwtLSUyWQ++OCDMAxnZ+fiOD6pNqWUBwcH+Xz5ymWn3+9fubH8F3/xFz/43d978mQznc5KAfv7+8vLy4lE4l/9q3917vyZP/zDP3zvJ3/9y1/+fHllYXd3N2HzYrE4Oztbq9XOnDkTBEEikXBdV0fUUkrHcXQSqzPVdqejlEqn03t7e6Zprq+vSykbjeadO3cMw8hkMvfu3UPEoWnxMJVK5XI5SulgMLh//363233jjTeklBqFI6Ws1+uZTGZ7e1srGTbr/rXrVwyDVo4Pzp9dTafTntdNJpODbk8h9cNYSDAINShhBAlD+I3m9tWHnJizgVEFcrxyyAjUNV70WqJchy3EYKCQc87HpdEJOxxb7+kFqxQqoBS1aJLGUOKzOtBzNR6p5IuLf/KcQgGXigxh61Iq1KB0ZtDhmD6oZznh+NBp4fgqJ9uAOCJO/crj1O7yWxzjZCBx6i4MadIp0/5ZKRUEgS+57/uu7WiSC4pkvE3oFdnpdJ4+3dIlmHQ6c/bs2XfffXdz87FlXZmZmbly5cqFCxe2t7cNk85a8zMzM61WS8d1k9ZYKBSy2ezc3JxSynXdw8NDSqnNHC2yeXh4OLsw/+DBg8FgkEqn/WCQTCYXFhY++eSTXLGQyaRN0zxz5sze3p6ueUxPTx8c7CPi9PR0s9nc3d1LJtKtVqtQKJ5UG8lk0jTNRqMRx5lcLieEWF1dff/9Dy5euCyl3Nvb63a78/OzhJA//dM/PXtu9dvf+ea/+3f//vz55UEX9/b2Xn/99YODg263a5rmG2+8kU6nB4MBpbTX62mfHARBPp8/PDx0nIwQsW4wZLPZyvHJzOyUUuratWtCCN/3AeD4+FgjgZLJZKO6c/HixV6vl0gkbt26NT8/H8fxu+++WygUVlZWjo+Pdb6wurraarVeffXVW59vrK+vP3nyaHV11XWdTCbTatVeffXV+/fvS2BRLNFwKXUsy7JtGykTg/+ynPDUHg0TIFIcUdpOeDmIo1Abp27fgcLxEMZvWrdf8Y4KCCFKyJEVAn3GUPwcEdszp/obCq4xl5RLJFLz0BAplFJCxGGsADRtKZA46pqGUDII/R4jYJsmSpQxyBhRsnymOF2aTdgJRggBCDxvMpaQE5AimLDYF2ue8LzrG0cU8ELB1zRchqYShAi0wHTRMAQQXywWpmgYBe1O2rbnp8vFbFozRKLJ0GSl2ekLVy+jyRLZtBcHH938JAbxO7//g53Dnb3jvYPqwa8++dXm7maz1/zy9t16o7W5tX3/wUat3tx49EQBqVRr2Vzhzt37SNj9BxupdDbmkjKzUCwvr68EPPzi7peLqytuIlkslW/duUOZ1en4Cu39w3o2PwNg+z5dXr7suuVCPt/tdF575VUd7208eSxAOalkfmYqUEKY9KTbVo4ZMezwAJP28WHv0cO9Rxs7J9Xm4uKi7bBLl9cTSWY7mM44c3MzjFkEEj977/Nz6y/Vq6o4taSII9HeP6pVa+3Np7tcqtnZWd8fGCZaJpyc7B0ebnW7FX9QTyVBmBgz9Hg0CHyCiinJ+x4LedzpfvnRhwlGTg73Svn07Gzx7LmVRufkf/jf/I9WIjkIg67fNR1ycPyUWfKP/ru//0f/3d/vdRsH+7ulQjlhpB7d3dq8f9A4GlRre//n/8v/6enWxtWrl3/x/ocDjwtpv/ezjxsd7seGmSpZifxAQDsITwa9p0dHJkhDCUMJJjmTnCrBQDKQqLj+UsClivV3qeIYZAwyUkJ/xSAFAUEADMpR6QcFAUmRowol1/QWAlQkeBiGcRwjom1aJjMMpFQhSoWxIFwSLqlQTFGmdMZKlEKtdxQjDoTwQAWMhAYNLDYwSI9ClyiqJJGCKmkgmAQZpQSRIEqpxxuBS+QShSISqEI2sJNNNE4E1pXRRKuh7CY4HZquS7cu3RpP1HiCTSa1zw9bgW3b2WzWsiylRH8AnHPdwDiV48oJYYBTlga/oSOjXmhdwAuec2zP4z9ZlpVOF+YXF6QU+/v7YRzl89nLly/rJDYMw62tLc0qrcsht27d+qf/9J/W6/Uf//jH09PTL7/8MiKeWSNHR0e1Wm1qaspxnGKxWK1Ws9msUurVV1/d29t75ZVXfvKTn5w7d+7hw4cvv/zyL37+829+85uXstl33303lUpduHDx0qVLd+/dnp6a7XRae3t7qVSmenKSyWS+/PKLbDbtMJrL5bRchC5dLi8vd3s9ANREt1IBoEJE0zSTyaROIOM43tzcPHtuHQDOnDmzu7t7//79a9eucc7PnDnz8MFGoVCoVCrZbE5H5lLK69ev1+v1RqPR6bRmpsprayumxUAKyzYBIIqCQa8vZNw6juM4FlLpd9cgeCFEFITvvPNOPp///ne/92/+5H/5P/7v//nV61cR8Ve/+nBvb+fSpQuZbGp5YZEy3N3dvn/33vT0bCGff+utt9KpfLPWXFlZ2ds7uHXr1oXLF37v937v6GD/6OjozTffBICbN28uLC0KIYIgEApiSToDPwzDdDKVzxU73fA/0yP99mPSHY0fmdzNdXN/zP8yLmoMR1Wfr+hMvvwrUqqJdXsqdpsM6L7yhZHgiEjU6drSJLkhG+PRxkaoa6kanqNpiXU55CsvaNzTP5Vwj211nAScMjn5VUi3UzasxvP7hnF0dDQzXTZNc29vjzE6MzNju04chz9576fr6+ual/7C+YtBENy9cy+fz6dTmVKx/HRre3t7+7vf+d7Ozs4Hv/pwfX393t3Hq6ur3/jGN3Qb8OLFi6lUan19/csvv+z1eouLi9Vq9R/8g3/w2WefXb9+/fDwMJtNP368kc5mbMfMZrP37t31wyCdzuwf7Nqum8lkGGPZXNr3gi+//NK27aRl7u3tZbPpo6OjarWyuLK8u7u7tLzc6/UopQQUNUwA1E1ZAOh2u5ZlWZbl+/7+/r6UPAwDRNTRMiJms9l2uz09Pb2/v6+USiYTe3u7u7u7b775+v1790ql0t7eTqPRuHjxvB9wPwhMyygWi4wRg7I4jveaNSEECmnbpk4XpZRRGN145eU//uN/owd//w//u//tF5991m639/Z3pVALi/MLC0uua5+oI0Q4f/5iPltAxHarG4YRo3atVgsDXiqVlpeXj+p7m5ubUkrGOvqC8/l8s9k+f+ESECoBu4Mwijrtdjvi0rJdJM5vWuL/XxwvLv1xjDa0llEbf4yChOez0skh4MkFPF6HL3qXU+/1m4xWn4oaxvgCYMLTTK7/8TTU0AjHl6jrbHpSrtNpScUdx5m05smkEV6gMxz/iZLTO9aLHwZeMD8yum5KqUEpYUYy6Rby2TgOB4NBIuG6rouUVKvNV199tVwub25uptPplZUV7asXFhZ+/vOff+Mb31BKxXF8+/btTCbzox/96P3333/ttde63e6jR4+63W4ul7ty5Uqj0fjTP/3TpaWldrstpZyfn6eU6uwuDMN8Put5XrNZDwKv0235weCkVqvX6zMzM2EYLiwsvPfee4VSsVqtaVrOs8ur77zzTqfX/s53vnPnzu04js+cWTNMO4oiEcUAYJkMkIFS/sDrW53ATS0sLCQSiStXrrTajWw2fXh4UC6XhRC7u7v5fKFSqayvr6dSGQDodLqDwQARG41at9u1LCOfz29vbwFAs9m0HRMAeCw6nQ4iUkqTyWQqFTAkgkd67oRLLuI49AfvvvvuK6+8tre384tfvD87O/vWG28KJQkAEDY3N/fe3/5seWXJYjTm/jvvvNPr9A3DMJldOT558uTJ/s7++XMXUqnM/v5+rX0Sx4elciGRSMwOG6omYca9e/csx83k8nYiU8jmJKAfxoyZvQGH/38cL+aK8HytYZgrqmfSDCNjeL4iODH6BJMR3MjAXrTDF4+vtMahLSADRBjqdg85tAFAPA9QIafeXh+6DaU7clJKnVjrf+SYvuk3vXbypkzGrpM/fGXqOP60Y1zsuEmYz+c1IGt9fV1XBfb29gAgjuNPPvlEKZVOp//6r//6888/L5VK/X7/H/7Df/jrX//6z//8z8+ePcsY63a7U1NT3/rWtxYXF3UhsVwuSyn/+I//+MmTJ7lcDhFzuZzWCfvFL37R7/cfPXo0Pz9PKJSnismkm8vlwtBfXV2eX5jN5TKFQi6Xz5anStMzU9lsNpNJeV6/1+vU6/VPPvkklczoYk8unfnoo48OD/YAgDFmGQYhBJRUggsehZFfrVaPjo7u3r27v79v23a5XC4Wi4ZhrK+v625kv99PJBKaTK3dbm9tbeXz2Ww2u729rZTq97u1Wq3X61UqFcMwEomUlHJ7e/vevXudTs80zWI+m0g6OGIuD8MQCBq2pQemHj58+Ps/+OHS0ko2nalVT1658XIymb5w4dI3v/ntt978Wqk01Wr2OJcXL1xZWV7d3zu8f/++ZVnLy8uGYSgl5udnL125XK2dzM0urK6uPnz4qN3uXrx4eW9vz7UdKaU3CHQ7d9DrN2v14+PD32pZ/wXHi0vu1AJTEx2/UwUIMnFMwjbVuD34nEjmc0CU33Qxp2LAsc1zgTGH8VcUK/1dSDL+YmM2NCE0Y4VSapgWju3Btm3TYoSQMAzHVaxJKC28MA/2lRsVTDhAOTG5rx8Z/zBErhnGeOaQAnY6nSiKbNuM45jz2PM8REylUp1OB0aVaz1wqAljPvrooxs3bnQ6nTt37gwGg7W1tV//+teXL1++/fhhtVrVyz2VSt29e1drSDiO0+/3X3nllY2Njampqd3dXT1SqJTsdNrMNC5cOLe/v28YtFgsahZ6J5FCVH/v7/3Bk82n6+trX375peu6x3tVQumnn3565eqlmZmZIAiuXLnS6/XSqYSfz/V6vTCMfS8kjmOajBFaLucZo8VisVarLizO1Wq1TCaTSqXS6XQmk6lWq4Swg/0jRKpRAaZp53I5ANjb33ntlVc1E/b9+/eXlxejKGo3WwDSdRKO42i5mGLRDf2g2+4IyQnBZDKZTqYQZNtoV4+OX331dWaxZDL55MkT23b/7b/9f77y+tc+/eTm1HQpCKLFxSXbtr+8dUdJaZpseXnZ98NarZZyU6++9koYhre/vNMJBm+99bUgin76k59PT0/nsumnT5+eO3uh3W4PPK/T9RJBSA3bNM1SqeQ4Tq0d/Bcb3Fcd4/UzubTURINuHJeOVuZYHez5EVZAnFBHnDzGS3rSE04iV+EFz3nqQESlkWfaGU5c53PhqG7pwnAbEFKCUkgIjoY1dDNAjmnnTdMcv8Gkyx6DEiaNcLjrP98knPw+PtX4B0IoHYnj6LvDOVcKUqlUsZATIu71eoRgKpVCSnzfO7N+NpfLPX78eHt71zRtzuX+/uGNGzfm5xd3d/eiKJqfn5+bm0PEzc3NL7+883RrL5FIZLPZIAgIIcvLyycnJ4i4tLRECHn48KHWkCoUCoPBQJMapdPpYrnUbreZQROJhOXYvV4nm806iUSn0zIM4/GTjQsXLpw9t57P57kvd3d3FxYWHj16lMtl5+dnj6r+0tJSrVabKpfDIGg2WjyWtmEalEgedTodPZWjuyD1+onj2I8fP37y5Ekcx61We3FxOQxix0msrKycnNSIaUVx0GjWhBC2bV6/fv3zLz472j+IosgbBK1WK5VKZTJpPWVfrZ4QACUFKOladrFYzBeLFLHXaZuWNbMwbxhGMpn84tZtyvDi6ophGEopy7JmpucMg0khHMc1TWduZqZer3f8ZiKRWFlZQakePXr45MmWEKI4vxALhSCpYbTb7cXFxYXFZakwlUp3et2IAxqmH4Rerx/EkWe7QBP/v9je+JAThNnjR04trbEdSikBhgRlOKEdNo7G4HlfMml+MGGHAEDoc4IZpyz2xXdHYpw6J2gsJ04Y4Rj8qi9VSkCgAKB9kZ7eD4KACeI4zrh/OPl+k1c/6fr1g5pd68Xj1LWOf9Dx8ThCkFJy3eNErFarhkFX1lbn5+eUUt1+L47DDz/8UHMNEUJu3boVx7Ee+avVaqZp6r7Z+fPnf/rTn66srHiex2PQ0XW32w2C4NKlS/v7+ysrK7pDvbOzk8/n9cWsrq7atr22dqNSqUzPzrRanTNnztTrdT8IPM8HgJmZmUQioUkxwjDMZrP7+/vlcjmTyZgmk4rHcby3t5fNZm2ToVSJhFMoFASXYRhbJlOSD3qhbchisaBRL+lMslDIPX261e12k8mk53m1Wo0xc3FheTDwi8WilFJy3uv1PK+fy+UajcaZM2fW19cNQvf39+fm5rSsfL3e9DxP19l6rXav3eFhZNtmNp9xE3a9Xt8+2Hv77bf7ne7jx49TmfRrb7yue8Wm5czOL5oWe7q9mclkirlsFPJCodBstsIwHAz84+Nqt9vVknUA8pVXbqCbabdavV7HsRNB6B0dHW9tbbZarU6nC0gz+UIxn7YdHoQx+tS2nX70n2Vj/x+P8dp7MQQ75ZrGbmfkCUcrcyieeRp8MunT1Aup4FfWRH5TmIqIoVQwhJU+F9NOIvgYIcTzvCiKCKGccymBUSaEFiIdyneYpknokHVGf8fnDzVBkXjKSjXGZfyS8XNM09R4cUTUBq+U0nNWzLKEEP1+nwI6lsGF8Lo9QqBYLAoRHxwchGGwsLCg0cmpVEoLOGslCaXU7OysnrRYX19/8ODBgwcPdK1F45tff/31vb29vb29dDqt1Y7+36z9V7AkWZoeiB3l2sM9tLj65k2dpUVXy6kW0z0zmCUGoC3Apa0ZZ7kGGtb4vEvjE1/JB7yAfADNCBJY0NpADBc7GEwPMHpaVFd1VXdVV6WoyryZV8vQruURfDj3Rkbem1XTDdCtLCtuhIeHe8T5/Vff/32O42iatre3J0fRMcYSdVmr1Vqt1kcffYQxxgpRFK1SqaRp2mg2CVEODg4kVf5kMpHU19J1F0WhqiohqNlqr6wse97k6OioP0Ddbrcsy+WFXhrFEwQELaej4fXrN7M8khUUXVcRQpqmrq6uTqfTer2+v79fq9W++93vnp4Mut2FSqVy69ath08e27bZaNTW19ffeeed23dura+vO5b92WcPRqPRjevX4zgsy3IymdCSFzl99PBTXdebjdrXvvpVx3WTNIUIbVy9WnB2PB5OkygTLMjT3d39ZrNZ73bzIh6OglqtBqFgjHmet7K6dOfW7X6/f+ocS9gahFAOGWq68slnjzzP46wMw7BiGghAhLDrVhcWFoMwPu4PipKVjE+nnqabeZ4CaM0WKIQQn9fz+PlgHhVndT6ZKF0uy8ttFr7Nm9AssATnGYosziOE5BQFnxMXku/FWLlgvfK9EigyM7AZfrUontHhnGE5ZR8BPOtUZse5bNJ8bhLyGUCtmMOYz+jEhQSji6d3lHnv/J+/yY+Y2XBZUgwhApBAwDknhBiaZqqa49i+76+vr46nE8kjdnRy3Gq1To5Lw7AqFbfXW9Q0LY7jPC9939/c3BwOx3JOvNtd+NM//XPP8958800ggjzPpceo1WrtdlvKm5ZlGQSBEAIhJNlydV1njF25chUhVHEdKbV9fHSyvLqyuLjY7S6wkxNKabPZrrhOq9XinOd5fnrobW0/BoDrhvoXf/EXGxvr7U5THlzBxDRN17HdSoUxTss8Cn2AhK7rnHPf9xvN2sHBwZMnjyuVShiGb7zxxo9+9OOiKK5evQoA+qu/+qsoivM8v3nzhm3brVbrXVb+8R//seu6tmFKzhvp/23buX7dWV+/4jhOf+AnSRInicwSBYITb5pl2dif3nn11Ve+9CUhxOPHj70kspibsnKwt7m1tZWmebPZvLK6Vq3WPvroow/ZLzY2NnwvCMMwCAICgbwvk1M0nnpRFGEIsrRAAnh+mMShEGI4HBJFkwBDzIWu6wjjLMugYc8WK4RQXB4VeFYS74uRNJe3iyUGMYvRnoad8w/mSxtiDh8Gzxtvs1M9C80uAGXmPuuC2zz7FAjALBA9w9ZAeRx564EAPDNPeOEi5/2seFbI+8L1XDih+S/oaSg898YL39QMdsQ5N00TQ0iLsijLJEniOMYQYgCyLJNTFI7jMEYnk0kURYyVt27d2t/ff/DggZxk73Q6vu8Ph8NGo7G0tCQJiP7pP/2ntm2//fbb9+/fZ1TIafetra2dnZ2Dg4Pl5eWrV69+8MEHvV5PUjPFcSxTNdu2t7YeyHYIoxxBWRDGEOLBYBBFiarqiqIM+qO93QOpEb+xseG4dpZlN25cG42GnPPj4+MiyxcXexhDXdcd26q6lSwrhBBlnt24c8uyrFqtJqnya7WaBFL3+/1vfvObf//v//3RaOJUqh999DGEcGNj4+6n9z3POzg4SNN0ZWVFOoqTk5OlpaWFhYWjo6O1tbW7d+/euHHjvffeE0IstDocAsrZldUV1TIKynXbNCp2EEeKqh8eHnqBr1n6a1/5MoRwa2vrcPshgEA3UJxM//wvPr2yfvXo6KTTahdFkaeZnGnSFVKW5WQ62t/fh5Ue5wBiWDLqhVlZlkkcYozr9bpl2zhKiiyPkiRLUt1ArKTEfGqB8OmA3lOMKEZ4Zg8zM7i8PbfPfHm7sLw552cZ2fMqmTOvw8+nZ+ed3gXruPzGC0sdPC+rvOAJ5UZm38j80Wfl3HPEC4Twojk99/MuHOfyns+1z/naFEIInPvGLMviOFYw1jCxLGNpceXJk83l1RVVNQ4ODhYWFh4+/HRxYcEwjFqtFgSB5GKwbfvmzZuDwWAwGKytrb311lue50nZsKIoIMBJkty/f1+mi2tra1mW3b179+rVq+PxWCIwW62WEKIoiv39/V5vgXOua4bS0LtdRVV1RPBoOKnYrlWpSfmKvf2Dk5MTQghCOAiChd7SyemRhI8rilKU2UsvvUAIYZRqKtE0rV51OAOU8qpTURRla2vrtddeK8vytH/81ltvZln6l3/5l9evX8cY37hxI0k+efz4MSHktddeOzo6RhhoulJvLIeRT2lxdHSAEMAAGoZTFFTXzNFwoqlGmuSKommaBqnQNK0UXFGUJEnCNIGahi3DS6KNhYXbneYnd+/9+Mc/Pjw8tCzLsqyuCXu9HsbKZBx88N57mqbs7x/bpjWdTvI0H4/H4/HY1HRCUBQHeZ6TRCnL0jL1sqQInBGOSAxDrVZDiIwmXp7njDEExXysePbg0vqR5Amz4sLnGeEXOA/wjCeYfcLMxp6prEomh1kVcMYqKF3ObGh4Zp/oeRYl0zTwrNWdfeoZH6l8aW79CwbOWaDIvOVcvs4zjbdzpOnsOufd8bzhzYfFZ7udv2vey4NnXeK8Hfq+TxBCACoYywl6TdNs3VhbW1MVLKPWPM89z2t3O91u9+S4jxCCALtOLYqiMIgDP1JVtdFo6Vr85PF2p937ype/9uDBg3/5L/6Vruv379//2te+Jif3fv7zn3uel+e5pJp2XVf26Hzfz/O82WwuLS0F/jjwgzQryrJ0nGoQRIqqcg5s2y5ZGQZRpVLpdDq1WmM0Gu3v7xcpsyxreXkZQlGvVuI4rNdcSUiRxBFC2K1YhqZpmlakpWmaaZpKNc80TSV+TRaiNU2TCsHdbvff/k9/+Lu/+7+Qx3/ppZem06kUeFnsLchwenByypjY2dl57ZVX33333TfffPPg4OC1V1/VNO360npW5MeDfqtZLwColPkoDIfeJCny/8f/+C8++uTj0XgCCe52u9evbrz55pu3LF9VdM8LojBVCVpeXt16stuotW3LGY9GxyeIUsoIU1XiVKqoCrjRyLJEVRSaF3mWJElGCHIBCMOwYruSpFRAmGclVpU0zeeNRC4jcY4gma/+P9dj/OpGOGchswDtqRz3fC4373bAnIu+YBczI7wccF5Y/5f8zVNqtmdf4s8Y4YVrkKf0bMQIwbNgnwuObv7GcNlhXth/fof5T5eXTQWFEKqKqitEahLKud6yLHe2nywtLem6PpmMAQD3799/443XmvXWZDIZjUb1er3dbkMIT09PkyT58MMPV1dXGWPvv//+4eGhZIw3DEOGrJ7nLS4uvvjii91u9+TkZDqdGoaRZdnp6Wmj0eh0OlJGoixLjBXHqVbrNc65ZVZURbcqtqKoSZLkZcmY8LxA1bVuZ2FpcWVxYfn48IQQ8vrrr1cq1mcP7g4GQNeUzz777M7tWwghS9fVBlGJYpp2HMcK0WLGGo267FIsLS1tb29//PEvFxYWqtWqYRij0bgsmZQBjeN4dXU1pzljbHFx8eTkBCFkWVaj0ag57vb2ruu6YRjWarXT01PTNPf3969cuTIdDpGq5ll2cnTsZXGOoJ/GkyT59NHmz+9+nJel3aojhLws/WznSXtlcblTtFqt3kJzPPJqdfvRo89sq1qvVzudnm1ZEMKVlRVT04sik1FDobdCP+CcJklURCUNSwyFrmrNVl2CrvI0lWJehmkWRRFfWNyXVshs4aA57unL2+flihcynVlO+FxnMx9tzm8SYQuetUl5evjpeV5c4fNWMNtHiAu293TlP/24p+f3bKJ7vuvFZy4ccXau8833+X3QXKg9u/LLXnT2vGmaCsYqUVSMIIRSQKtUy93d3Sj0Fxa60+k0DMNWqzX1PYTQhx9+KE2UMSYVMw3D2NjYkL3+9fV1Xdffe++90WhUrVYxxrVabTAYfPbZZ/1+X9M0ORk8nU7zPJdxlIRxyuJNo9GIIwIAqNcaEpJ/dHQcp0me565Tk0xbnhdEcdzpdBYXFxFCuq7HcfzZZ581m/XDw8NGo1arOoqCMcaGqlmWxSjFiJimiQE0DIMnmaap/X4/z3NVI8PhsCiKer3+xhtvSNF5RdH+0T/6R3lenpycMMaBwj1vsr+/a1lWp92R+hz3Dg51XW80GlLBdzKZvPHGm48fbXba3fh02F7oOZYtiWdU16k4DtNUP4kAQablCATTggIAwjQZTidDHDSa7sLiGkKo223/hz/5s29/+3v1etVxHMAhhFBRFBUTqbmNMd6bZEmWyuEyTdMwhAqGtm23W92iKDzPi6KEcoEVYnJTURQsnh9eAjZTin+6QAEA5bnswoXtgg3Mthl88uxfMbNJNHfUi8eZDSGAZ7PQy0t3/tYw840zw7vsJwV/DvgGnLFuyx4+eD7dwOyDz1NBIQSfN5sLbvC5j794E8/mkPIxQmiuKPuU2hQAkGWZ1Ljc3t1RFPLSSy+9+vprURRI/XrGmOM4Evgix8CHw6HjOMfHx5PJRAKgZWlnf3/fsqw4jnd2diQc/MqVK/J2GEVRURSS4VvTtDAMi6JYX1sLgkDq/pUlHY1G/eFgNJxomlYwLnF8aZYdHBy9++7PptPp22+/vbK0vLW1hTHsdrsbG+vHRwerq6tJHDFS6rqexHFRFApK5aoFSTadTqXWYlEUq6urGxtXDg4O3n333c8++8wwzN/6rd+ZTqf7+4ebm5tLS8ubOw8BAJ1OZ2lpZTqeHB4e/vZv/3an2XrnnXfDMOy02kdHR6+99ppUtknTtNPpWIZ5kB2VBZR0VVGZH03Gx6enOS0rFTtJUwGh67pMiKP+qbFcOz4+KgvGGLx567pbrVSr1dPT09XVjdCPJENkgXAURUKISqXS00zdUzllRZ4mYZSkcZnnUIBarSbp3hBCpq5xIMqyTNMUW/aFdTC/HjjnApx5p3lzeu76ee7z8/BmaYTnK/ai/cHzouDMhcBnyzBCUhg+K5OIVMznZoh/lXU+/+CpEc5hrQlEDEIBoayccggBwgAhPLsDibNRYikezqFA880TCKDgZ3VUxsCMqRsAAIXAUPINX2AiPbs3yCuE51g+eRBTVRSsAEaDuMAYI9wouRLlotlZHftR4MWdzqpC8KA/zZJiMhm9euuaFEK6d+/e2mL7ypWrg9HwF+//1LAsADkmsN1t6abmxf76+kYURZXmchRFBTTDqV/SwND0NNuqWHYYeLwsrqytMz0DhglNff94U1XV7bt7jHPN1qGOBpOjadCvtbQXX9ugNOp224phT2N25BUP90eHB+PQ0D/2Dh6l48Od/esnfVDQ5fanL968rhtOo95u1Jw/+P/+fzauXL/9wmu+nx8eDUZhXnG1ovAcxwVAJGnAGKO0LGm++fihaentdvODD96L42R1ddXzR0QBb73y5Xffffev/+PffO1rX1tcXGSOePDxZ4ZhfPMbv/mzn/0sjuhCb/3e3c0XXnjh5q1X9vb2kKs9PHo8CYZra2uL9SZSTNNs3v20/8f//hc/+vmH/6f/y/9ZrSuNpv3w4ce9Tv10MhmewKsbNxE3LMPByNhYf2l/d7S8tE6palh1t8a4KBSFVEGe8mgcjcbTMwknQytME6iqNp0mUEWD6TCOU4EgxkgIXhZFliQYoSbKfd/XDUvR1IkXKIqqmZafxDrGDMJSACY4g4goaiF4URRVeO6XhFx/QIqoMM6FlLEFApwPCkEIgaCcPxNznZcYz9pgMoxlrGQMYIwxIjOPBM+A1tJYJLSFIygABAid2WdSzPUtz5NBcN7qkO+VL8mTgORchercp8r/YYzA2ZS+OCP2nXOvQPqgz7PsGSgWzqXU/NKE7vnRBL6kIzf/6lzofPYgTdOyYAAAwABCiFIqmSa2trbWVpYBAK1Wa2lxYWvrcVEUb7311ns/+qs0zSGEDMDxeKqbJ7Va7c23vvKnf/qndhAWZalpmh/FeZ5LtqWFpRsSylwUBYCiLMvxeDwZjS1Tp3k2GAwEK23TarVaAgJN07Ks+OUnHxFdfeVLLwdB8JWvfOXll68ppGA8UzUSpQWaJiEItGFsOhXFUaN4gLFi2laz3bJV3VKU3YP9z+5/dPvGxtpq7+2338ZIPT09dd1O1aktLa+leZ9SurK6dHx8VJwmhmHs7e1CCI+Pj2/fvv3yyy+//7OfD4fD733ve3fuvPjRRx8dHBx885vfLIoCIbS3t5ckme/7Jycn/+v/6r+u1+vD4QgAsLKyAiE8ONh3HCcryvF0ktMSYmBVTKxaJ9OIw/Lf/MH3d0cTiBjCOEziJEujKFpe6RGixnEchsnGFUfGF+NRIPmRdV1XFGU4GuV5lqSBEMAwDDWmqqpKnvwojGXPWla2JWk6hFBisBRFkQUw3/ctRm3oQiQEYAIwVVXiPOcICybkxCoXgnNelJTjc7qjp0b4nO1CKDh7EpznRzPm7NlME4QQYyxX+uX1OctF5x3j7NVfK/q7fHA4ByoAM8pDxhiEZ10Uzrn4vGu9ZDxgzoTmP/LCPs89lQvPz79F5h4SQi0Jj998801WFluPH2EMFxd6N27cODza/+u//uuX7tzgnGdpQZk4PjjojyfXrl3r9LpxmmQlZYwRTT0rPFI6HA7D+GxUEgqmEEVVVU0hBGHL1IGh27ZtaMrCwsLaympRFJSV/iS1bWvojUxTv3Pnzne+851Wx84yryhDiFG4fzTyp35SKJYFtcwbBy7mKcu5ggtIia7Vm4121TYJfPcnf/Vk67MvfelLluUcH41cN4vC0nXq2/ubvV5vd3fXMHRGxUcffdTptA8ODr797W/X63WZsrbbbUqpZVWuXbv20c/vjkajOI6vXLmysrJyejqQrI2MMamvWK83X331VUldRQj+bPdxWZRX1tfcei0p07JIh9NJtWH+wR/9a8WpqSZOioQIUK1WqWCMsSTOYiPpdpYQQoEf7e7ucobRKqKU2rad5+5gcOr7fpZHElAYx7FlWfM2INHF4/GYMQEhlBQ+CCFZfDKwIgTXDMO2TaSQOC8YKwnGqkIEQogAwAXCClYVDoRSMp7O4dwE5BBIsXg4K6UCB215mAABAABJREFUAKAA4pmexOXU6UJ2d2EFznsO+YDP6688b60+jfieV4mcP7pM5y5YFD+PB4GcJzxvTYLZSV6GqM82eGm2av5cL1z2F5zchXfNDkgwQRBDCME5dhSpqorU4+PjimV+9RtfR4L/0R/9Eef0lVdfwgqp1lqnp6cZY4ZdqVRrRFHCKBl/9khR9ThLi7xEWV7QkhDCudA0fTQamaZpmjqGkHNOCK5WqzXXMQ1DUKYouOq4Gxsb6+vraZwMh0PE0At3bvz0/en+/t5v/Rffwxg/ePiZYytE4UEa+2mqmFYyip/s7wYFvnL7BoiOB6dDVtICCrddc6uuW3O7Nfs7xnf/3f/0B59++qnr1peXNpr19uPNXyz21izLWlxcFIBVqy5jpedPFheXiqKwbEPX9U8+vgchfOWVV/K8/OCDD770pS9Vq26WpRjjoijSNL179+Nms/29731P1cje7gnnvNVqyN5XtVrd29s7PBkuLnRXr15hZer7U4FRkI4Vw4iSISKMKHo0ndqO1e22R8PTLMsYU5Mka7U6gkMIcOBHjUZbUkhJqFq9Xs+LhE6KsT9N09SwaoqipGkqqfoAAIqiyJ1loxIhlGdlHMfT6VQIsdBqckGLIs+LTGoWUM6IbmiWLRAWlJWM0TJlvGRCUEqJmCtbiFn1UK5pLteOEAKcq2DPFub8DR2eswbPIr5ZWw8h8txVOmuZyG1mwxdM47I5/K3bZaN9qk94Phr//MrvbJvhqmdZ7HlR6xnDm8Wo8FKzdXbGzzVCyhhCEAFYlqxIs0KnhJCKa7uu++DBvTgJf/Nb3/zG279xcnRYr9c1TTkdjTJK7YrjVmumZZeMjkajew8e2Lbt+wHEZ9T/mmkAWmJVWVnpyrYH4JwxppwT5CyvrGCEaJ5pisoEjOPYtu0OwYjziW9UHPP45NAwDD8KNdXQbaOk6XB6RAFeXr/yeBQ/3N5url3/vf/qH6w2lH/5//of7//iEy8Lsa1RAsbBBIN84+qV/8P/8X/4b/43v//b3/svNjY23n3no7/7u7/34MHDRs+5d+/etesbn376YDIZdbvdd955p9Np05K3l7t5VvZ6C4ZhaJqRJMnhwTE6J6e6d++ubVcIIdevX5XC4IZh9Pv9yWSiaZptO47jCCGKkmu6yTiY+FMBcrda1QIeJpOr1xe2T0ZJmgKeIa7GkZ+GQXNjw7Z1VdF9P6jX2lE0vX37NqWiUqnoui5XW6VSSbPqdDr2/XA6HX/rO6/leT6dTqXu1Tl5NC+KgpCz6VNFAXIinFJ6OjihJSeEACSIqiMEEACaplQdmyOcFwVI0rQoGC/OCFuwMrd0ZjO4s6xPyBUr7ZALgcHTkgnnF0mZnpsWzfYRz7KufF78eWG3L7bDC5Hg7LPmWyxPBd/OjQpjjOd95XO3C37sQjg6yzOFOE+EL4XOs1sLvFRoEkJwCDgXZVkKAUu7hBD6YXjz5s0sjbd2d9rNhmlbnud1Oq3JNDQtlxDk+/5w6mVZNplMsqzgII7TrNlsAoSyLEOQFDmVWk5lWQrGTNOoVqu2aRGEGCt93+92Oo7jVCs2QiiM02q92ak1/MGJPx22m/UCMkXFiqKsrVwL4lEeZrphjcfTx4NHx4NBc6HbaLf60/GdazeBSqChelE4CTyaJW3bchoLJaBxFPzDf/gP/+gPfyCY+spLX7l7916vu2w5jBCCIDEMs9PpKCrudruu69Tr9SiK2u12peJ8+OEvIYSNeivPc4nelLTi9XqtVqvdunXr4ODA87yl5YUgCCCEq6urEq1aluVgMLl9g5RlycuCgyyLPdOATU2/cXXxdDIIJoGtq4iV/aO+AtHtjQ2FTxYXV8qCIUR++tP3bKs6HEx0XbcsK8sy3/cluD8M4zTJTdOWA9O+75um2W4bsr6f56V0MpLQrSzOiCoRQpZONE0TAGiaYVZsh9WKsjRte+wHACPGBOQFFoxghIhGFY6edihmqxYCAATkQMilyyF8imgBc9DTeTuRswGSThqdb/J+8dxgbWYtF1zi5f7kvHU9d7t8/AtRLpmdEDzTSJKn+LlN0vnYev6eMSNcE+c6TfCckQ48z2vPF4TmD6UrhHMAITxjcRUwL4soicu8uPrWm6enx5988kmjVpUaRv3hKReqbdtpmg5GwyxLAAAY47X1jdFopJug1ekIITzPAwCkaWoYxmQ6NjW9Wq02m816vW4ZJoRQsHJw2lcUxTSMarWqa3qWJQUtSVlAxE1Ly/txiVAcx05ePT0ZJHlAQa4ZlbwcPHryeOSlrYU2V+CPfvLDn/3ojz755ce9WttwLAp4lIQaoF44BUxrVCpf/upbP/jBn967d29p4ZpttTgH0+l0fX398eNHlUpFVev7B9s3btwYjUae5z169Gh9/YplBZTSx5tbb7zxRqVSQRioGmk1W2tra48ePYqi6ODggHO+tbXV6/VUjTSbTV3XKaVpGt+9+7E/DlnBYi80VK0s0lH/ULXNTr0WZu4LGyuc7k29WEfqSq3RqNc7lqukiW1VVVXf2d7/7NNH6+sbhmHpuo4QCENfckxlWTYejyGEVzeu9fv98Xg8nU7b7bZTceWvXJas2WwmSTYcDqMoCsNY/sqUUsizaq2R53lWFpVKBUCc5BnR9HqrhTCCUBAEORYQAogFAZCx+UX/1A6Z4BCdiS6dCQc9uzIvL9HnesLnGszMU8086sxG5LjPcw3h+Rt/jtohAM8QoJFnEANn7Zm/3QjnW/PS0mQ6PjPCp/oWZQmeV7yakZ3K52fXTAVnXBCIsKIQQCjlSZJAATRFPTw5btVrnXZzOp1wWtTr1TDy949GWVH4vh9FIcY4iKMsy6pVx3YdkqnVajXPc7l0ZLw0HfmSQtNxbFqWcRLKNvrrb74RBeHp6WmSpq7rIoXoli0gXF1brLj2J589OJ1OOAOKouzvH968c3U0HaRlVKs2bt++UzzZe+/+I6aGt15uZ97IMAzHcQqac0HtimmamhCMENRo1LzheG1tzR9njx9v/db37uzvHXVWdM/zTNOMk5BzpqnGaf9YCKaq6pUrG6urq7u7+1/60pcG/VGa5kFw1G63R6PR1BvfNK5Pp9NutxsE/t7e/srKymAwcF13cak3Gg9URZcyiXWzWoTZwdbexnqz5bpZPIK0YHFY15U379yOvPgXB6eVhvPCyy+0ay0eRLV2y/fiTqfy859/WK3WVVW7c+eOpmkyBWWMqarCqIiimBBlZWVtc+tJURQQwmq1WqvWx+OxEAJCvLu7WxQ0DMM8z+UUtWmaQoiNK4vdbjdNcj8K3VpV07Q4zYmqYKJyCJIkHU8mXhjkeU6ZYIydnkbPLkB0vn7kWubnz3AAuLgk/ALOfRQhTznNZhV+earzOz81krkYdb6nhxCet4JfxbAvH/bCxxGpHyKtglEOISYEUPqUgu38bOTbhGTBgOcwvNkdQhI8AgAopUVRSA5fCKFyLm8ozjm85RtnsofgvMEqznuPEjQkmBAQqKqqqxpRFUzI6elpMJ1YtlGxLKKpJycnU2+cUZzmOUJIM8yyLDudjqZpZZlLrlvD0FhZtho1AECe50mSdFvdXq8n2RxHw2EQBAsLC2mafvjhh9VqtdlulWV5dHxsmrZuBJK2rNqovvTSS8d//cPxeHxTu7OyfOXRp4+RwoECNVVturWVRdYP0n6YRv6kaTu9ZjsK/bXOEuAMAaASJABzHPvDj37RqjZef/317/+//y1CzuMn206lUa06hmHJryUvEhky5Xkqv42yLI+Pj994/Uvr6+sLC0vvvvvu4kJ3aWmJcz4YDG7dvoEgQQg1m804ToSIkyTyfR8hJKsgo/HArOij40HrxmLqxXmYmETRFB2rRrVi+Rn71htfXm2shF7UMCvXF5fWlpZZcHLr1p1/+S/+1Q/++D/8t//tP3r/Z7/43/2jf7yzs3d6etputz3P++lPf7q5+bBarVYqlYcPHym6urKy8vjx448++qhRb8p46ujopFKpEAJee+01QggtOedcesvj45293YOJ743H45JRy7J008YYE1VhHCRJsriyXBSFomqHuztxHGtW+3zCG3HOqaAAACAQwgicEWmflWfmSy+zuHFmJDOhMXGu6TBbhxdMbj6aA+dB3Aw0oigYPGve8BzZMxtBlGtYsiRr+tM5+Hm3KQ3kGSNECHF+ZmZfbNyXM0B4PtQ478HRpSneCzWl2V1E7iZzCTFHbM6ogEIICFIAGGN6tVaWZY6hUuBRniVxSGmhKJKNDY/Hw6IoGo0GpXQymRiqstDpagq2TR1Cg1KKIJBAtsFwQhTkuLamaRDCPM8fP37s+76u64ZViePU8wLXdevNFlbIcDx68+byLz+5ZzmV2y/c8f3w3t0HVzZW6tVWyZM4DSM/CLMUFpkOBcjjcHiKPMLzrGraTcdxdN1QiKVqFdNgtKzXq4KJ034/itMkzSeer6qO5OGXGUueF3lecgYgxJqmHR4eUsoVRRmPx5ubT8qSdbu9Ws2RpKkff3x3fX1d1+FPf/reCy+8ILWZLMtyHFv+lJJh6fT4sa3h9W5NdetIEF6UhGITmWFcsIx13PrG166CEsReoCJsUNpcvf6z937x/e//6//+v/8ffvjDH373u9/9/vf/9T/4B/9gc3NTCLG1tbW9va2qapqk3jS4fv26bmt7e3vT6fTVV191ner7778fx3G1Wh8MBmEY+75fliWCRELVfd+nLJtf4rTkvDxjTqlWqxXLrujmydRPo1jDuNbtTtMZ0b2AEMwGnQAAQvDzsI4DIOaC1V97uxypfcGTF8xBPpCsKNLZzkA2GGNB2cU3CgEAwHMBKZGKKzPACgDPcGNf+NTZ85ftUHZp5+808t+ZdsWFs39aPj3Pd88OiyCngjN2JuWNiDymqmu0yKUvjaJ4MplgDFuthmCo3z9ZWFhYXV3lgrl2pVZzw8ifjieclnmWWpalKDhJkmA6Kcuy2V2SFfw8SygtVFWdTqcyoLIsy3acwWBw0u8TVau41bUrG15wZNiV6zduPtk7+uu/+tHXvsFevHMn8Ee9Vmc0hb7vGxC27cpyozGd+mNvnEDV0PSeW207bkVRFSBUCDSMCBA119nbPtjc3AyT2I/inYMDy2kR7GiapusqQkDPVdma4YKORqPV1XVK6d7uwd27dw3D0DTd9/04jnVdf+edd7rd7ssvv/yjH/3oysbawsLC9vZ2GIaGYQAAKC3KsvQ8bzQaxaF6uHe003KrNm7UdShIFpUCpu2FFRgkcUphznRFdet1BSFdUf7kB3/67/7dv/vOt793/96nN67farU6v/M7v/uTn/zEMAxJJhDHsaZpACBVVTnnH330Ua/Xe+WVV3q9XppkUq1xZWXt/v37um7K2xxEghAiKSFP+4Ner3fz5m27UmGMSf2PMAw9LxhmQw4BLUuMkKkZkEvWWUEpo+WZaDTA4NwBivm1PcsMf93tuUv68/aEc3rA84VJMIcJmwHCznBg50rVF6xgHqFKJAwCISS58sEZYOZp1+EMjH5++3nu6V5w+uBZ/w4+56Zy+bsQQsivWABAMFZUVVd0NIfxgxBSzhVFsSsVBGUNIL9x40ZZlnEYrK6upmk8GQ9ffvnlvb2dyWiYRqFOcKXqqArGiKcpiMPg+Pg49L0sK4Ig4Jz3+31CyPr6+mA0PDw+ct3q1atX6/W6LAiJMGh1e3a9bdm/2Nt7b2nlIPRix3QQBRZW67pBMGwYJgAgDkOFlYaw3EqlZ1VdRTchMiDUEVIAUAnOkuSzzz579OSxgCBMk+3d/cWVa+MxBgAoKqEyaOM8y7IsTwzDkFXHjY0NTTPSNOec37hxI038/f39lZWVOI6fPNms1+v379+vVCqyPWDblkQ4AAA8D1uWwbh2Mjz5+BNqqOLO7Y2KpVHBC5+O/J1mZ7lRreVpAblAEB7v7zx++Nnf/OyT27df8Lzg5ZdfXltbq1Qqm5ubpmlOJpNf/vKXMmQYDoftdrvTWTo8PIqiSMo2QQgXFhZ0Xf/ggw/+/M//8stf/nK1WpU/epFTibiIoqjV7uYFvXvvQRAERVHIQbBud2Fxcbnb7kgl8Pv37+9v7cjBTlmeKGkua6EIQelpEIZn6R886xqeL7BfI0Obt4rPC/Fmz8AzSbIzK0BzIsGzHWQ8jM5lVL44opw9JjIWvbDHPKj84pmBZ56fWSa6JKN99ufchP6FS5pPUp/WgdiZ8CKfUXcDAITwPM/UNQEYpUXFMhcXF4syCwLvxrWbjx8/rlRsRXHu3f3YsoyNjY2tx49u3bp5Yqj9/gktU29aAiA0jJ1Go0RGGIamaUrpIkppGCUQwp+9//6NG7ekWj3n/OjoyDRs16l13KoAEGLy1a//xnAUPnm08x9/8B//m//6f5V4I0MnDcvBCaAEL7huvNB1NVVjtq7ojm03NLNqW25FrzuGo6tlmjzZfPThhx8OBr5VqZWUnQ4HI8+3rEXLsjRdBUBGXDAIzgg48jxP4qxer+d5SYhyfHwchmEcjQEArVYny7LNzc1vfOMbUjxYVRUt04QQnjfVNB0AIHniJnHqTb008xUVlKzodFpmxdYs5+B0kFCj1uBJFKeRlwaT3ScPHz246zauJ3H+0ouvttvtdrs7Ho9dt7K9vb23tyeJXgEAiqK4bg1jZTr1v/vd7wIApFc8PDzc29vjnP/O7/zOkydP6vXmeccil3NPZVmGcVypVBqNZqfTjeM4CIKTo5OToxMAwEeMVSyz2Wwu9HrLi4uMsTzPx4OpDNdnEdYswQGAnYWgUAAA4Fk4+p9ohM99ad5U5J8zZwDOvZ9czFJXFyEkXRpCSApFKucTueCc2+IsX5sPR2fWMosM590X+JU9IUKIzammgfOAEz1vygM82/SfN3g+tydjLAMFgQgCgMQ5SoJzRdeazSZlBQA8TZONjStREKoK/s53vsWKcjA8NW17f28HQ9BuNfIk8X2/yFNs27pmUwZs25aaoWVZTjw/L4swiJeWVlzXHQyGp4PhK6+8IrOav/rh33z7a7c13QJQuXLl6mtvvLGzs/s3f/k3q63WG6/dabWbnZp7PD6ZponQxJVuu1N1Se5CCFWEK5bdrruNaoXgktHk0aOHH3zw/vbWYyGAomgCgKyko/H45KTPGLNsEyEgBBuNRoPBwPMmkqzRsqzJxEuSxDAM3/cffrb5W7/1DSmTeuPGje3t7R/+8IdXr17t9/sLCwtydv7w8LDb7cnKDcY44rlQcQrY48ODsTd13Uqr0220F3THff/De0kWZ2kYTAaJPzQUXqtZnIPl5dVvfes7JydHw8HolVdf/tGPfvRnf/Zntm0vLi5KxNyLL77suu7x8fHS0tLW1tbp6anrujdv3lxcWMqybGdnZzLxbNuWNE2yNysl3IqimEyDOErLgkmbLPICQqyqhJe0Ua0tLi4iCIus8DyvUqmYpiFbAnL1I4TgWaGSF6WUsj2rjkIIf13zmy3d2eN5nzaPjp49ObMUmb7NFvYsHZtvQs6CVThnAjNDmD8ymbUKwFMn+0WcGQB+ruMW562LeRc3M/3nxqWz/qTE6wghOORIIQRhCDGGCCGiIEww1oiiaYpCEIZAfgWWZa2srAhKgiDoLXQajUYSRrqhtpvN05Oj8Xi40G03mw2nYhm6qhHsuq5t23/yo/d93x8MBlLp8nQwTNN0MvEsq1Kr1Uy74nneZOKNx9M0TRcWFj57+OjK1Wu64RIOGvXWV9/66kc/e+8P/+0f0sRTv/b62nrP1UzGSoghxrioMK1oAQAwQpaqVyuWjsBoND493nvn3Z/cu38/yzLbUZjgEEOiqSPfT9NUCKGqKsaQsVLX9Xq9btvm3t6e7/tJkty4ccuyrOPjk1q1ceP6rTxPfvCDH3z729++f/+u69YopQ8fPnzrra+UZVmrKUmcCSFc18nzIsuySqWiWInlWCrkoMjHUeIn+TjKjX6o206YZhwwVQGs9AXNalWnt7xYc178O3/n7wjBoii5c+fWD3/4w3/2z/7Z1atX5OylJOGv1+tCCCDg4sLS3vHj27dvb2xsJEmyvb3d7/cVRel0ekIIxrgkFqlWqzJq9X1fMy1KKaUcAESIqiqKZCSheTGdTvf39sqybDdbk8mkf3I6nU4bt+9I2S94Rj9RzhdgZgtyzop+bU/4zHL9nJee+cRz5yHmhErBeUAnzguTMiiVStlfvJFZTw88Hbf9Ih89s+bLkeeFWYp5v/rc65n5dEl6L30jA0xRNE1RESIIQIwVXVE1VRVCYCAIhgTBsiynga/pzXa7vfV417T0KIqGpyeaprVbjcXFhS9/+cu7208whnmWjkIfA1Gr1bI8yYv0zp07w+Hw8PAQYyxDi4WFBdtyHj/e2tnfM3Sz2+1eu3bj8PBQMilyDpI4U1THsuyFhQXXcrIg+MW7P/wX//z/mYWjb//m13Rbg1AYWBEIlYIbik0QxggpCCoQRb6392T7088+uffxJ/vHUaWmVlTbC0pVgbqqx3Hc7/cdxzZNU1ERpQXn3DRN1634vn/jxo0kSYIg8jwPY2yZZhzHUTR4+eWXdV3f2tr61re+s7Oz8+Uvf3mWmAGQSVJjSqksOFlV21AVHSHMmSqQgnHJYJKxNEwUXSNEFaKoONWVhdWbG8uLvWav+Zbk1b969er3v//9e/fv/t7v/V6axr7vHx4elmVpmvbR0ZFcf1mWSR1lSRgnhEjTNE1TySk8nfplWZqm2Wp2FEUJw9D3fctpIYQMTbcsC0KYxkkSZ3mSFkVR5oXquqxM5SCibdudTiejdLbSGGOUczkQe7kW+sVFh8/b5nPCv3XPy4t/vtEtzkkDAQDSJRJCePH8wsz8RggHsGSgoJADLgADgmBVanHOducASKlSABCY4z6chZSzNuCs5jOrfFqKJq0RzY1Lzk56hpvheSmTwjTPVLWgigIA4JRBCDVNM1QtjmNVVTVVPZObREoYUQATQwNxNEYItdp1AMBgNDwd9B9tOqZpW1ZF0+pYMz0vmCZFl9V7vd6Cgxa7PVPTf/GLDxlj167e0HW9iPP15ZWDJ9sKUYswnp6OKpVKw3Z2Hm7S5UVdy7JkdHxwWq/X7Yp27YXrQe7/8pe//ItPHh/k6ObNm6urq7WaW9E0AIBmar7vjz0vjuPJZLK7u/vo0aOjo5PxCHBhT8YEIUyAAuNMFaGmCH9QUzYUR7dVlTBI8yROBbd0c2mxE0UTzrlhwDyH/cGx1MfOkrRZ16sV9bd+81tlQZvV2ulBX1W05eVlb+ClWdLptGge58nUsbBgQaPQVFY6FRsiHAc+wqBXdXAFj8d9XOLXXnuz013gCDY6XdO2sEKqLTVJaCHYJDhZudLLWRIm/vb27rWrN/KS5SVvd3sYK5ubm5RSRVFuvHRlaWnp8fZ2HEUY44k3zcvC96cPHtzTVdU2rSSYHoY+hFAn5LUXbwqkaJo2Go08b6CqKqMlRqDb7Z6ennIhNBMvrV4Nw1DL1FIUmqbdqLpFUYymXpDEUIA8yxhE1UYjyQoAEBAIMAYFRAArBGOMS5Ryzjk762ALhBjjjDHBhQIBRpBxSUgtIIQIQoFKAIC4hGyBT4WMoPzvrOCPz94Lz0QEzwyFi7OJLbm7AFQAIQAFc33FWU4HzqkTz4xQ1icUReEcQC4YE5TSgrIZ3f3l+8FzrfnCHWVmb7KJP/tzZoQz1MIF2IGiqUKIPM+FEIALIURRFAmIpKbXzP7l3de2bauuHh0dlSVzXdcwTMdxbctZXl5+8OAzSdq/uLh49epV3w/39vY+/fTTrJj87u/+7te+9jVFUX7845882vzs6sb1Gzdu+L4fhuHh4SFCqFKpWJaVpiml9PT0FGN89erV5ZWlVqtFCGKsFIKtrq7+7P13/+f/+Q+rVffmzZvr66u9Xq/ZbCZ5Eoah53lh5Pu+PxqNhoNxGMZu1SFYEwIWOeMMqaqpEFKW+f7+/sbGhlutQCgAEWmaJkkWx+HW9uNrN64RQo6OjtI0dR3H87yf/OSd3/nOV4+Ojr3p9PHmzvLyqoLJytIyxsSbThVF0VUtDqNHnz0cj4fedOrYlb3jsYqJ6zjLS4sV87ptGY5tQChM0+QQLK+v6aYhMLFchwlAVIXmdGdn5+4n9waDwXg81jSj1WoRQp48eSKJsGzbFgJ2Oh0IoRwKk97A87woisIwpJQeHx93u908TeXcY9VxJddzkiRPdva73a6maZJtZDweSzwqhHA6ncqCsLxHt1qtRqNx75f3HccxbKvWakKVHA+GByen/f5ptd6QSwYJDASEAHBOOecQw/l7/SwQO/sTPNW75vCMYuK56/zX3Walo/mkUVZrwFwl5XKKByTvKDwXIcMAUsplQfnzjHB2bZcrNBf+lEfmJbvwpHww60xe+JpkxfJsCBqcpbyUcZmgz2fGckg0DoMoSgEAQoAsy05PBhAO87y8c+fOw4cPP/30U1VV19fXF3qLL730wq1bN4Lo9Ec/+pu/+Zu/efXVV7/73d/85S8/3j/YlfGbaeqqSqI4HI2HktuGMZZlYnt7GwAgAJ9Op7Wa67ru7du3d3d3S/q667rT6eT09PTo6EAS3XJRyAhfCIEQxBhbhmabZlkwTTM4gyFLqWAYMlZmWVb0anVCiKropcgF55pmaJrmeWGr1UIASZ5Vx3GSOM2zouaKvb296dRbW7tSr1fX1tbiKEuSaDSaXLt2TbLIcA50Q1VUwjmXqteQi2q1euvmnZWlrmloGiGMl7ZrCwg0w0jyQiCYJNnx6YlC1CeffRaFsVTCkqoHp6enjIk4StfW1mq1epIku7u7nhc0Gg0AAPKZN51maer7/ng8lgO+0/Ek9H1d1y3HgRBKy5TJv2maiqJIpnMhhGEYEMJKpbK2tra/vz+dTiULhvzdB4OB41Qghr7vT0Kf6EZeUsexDctOs6LkDDAmECFIgQIwzigtVXyGgBEQ8fPqBhMAzFkghwiALzK+52ZPX7zN1vB8asY5n+8HXqjQzJ4neZ7Lm5mqqgQTQgAEZcn+9r6nmKvciGdHM+bDX3xpygk8heGhWUNydk5Zll3IIQkhCJ89D88rsVEU+b6PMRZ5Zhp2pVJp1FsQQsExIaTT6QwGA0VRNjbWTdM2DMMPvKIoOOclDVZXV03TVBQ8mXgQAl3XsjzBGHc6raLIfD/M83Q8KQBAjUaDYMUPvP7gNC+ysixXV5dv374NIej1ei+9/ILv+++99+4nn3wSx3FRFOPx2DAkaz5RVdW2TckmDACaTjwhIC2BpqpZWpQlZ1SYBg6C4OTkpNVqKLpCWU5ZVhSFELDV7ERxIDFoqqrmeW6a5uLiYh4PbfuMpuX09DSKAjkemaZxWZZ5nqoagVDIFfzw4fbqxutFkUnlqVqtoavKGeaEM83QAUB5XgKMfD843D/UDYuWLMuyRqNh23a93oQQJknW6XRUVV1dWY+iSPorhEClUoEQcp4NBgPf9yEXGEDXdbvtDispFKLVarUaTc6553lREMrf2jSFJICSN744jgEARVHI+4WmaVmWycQ4SZK9vT1qFKZlIYwBgkIwVuR5nlJKEVFYyShlGBKocYIUDIWAnAMoIALwbF1SfiaGIgCSNRsOEbhU87ywdP8Ttpl6CpgLAy87KvC85JPMqjKzPWSZZC7oFGdXcXaSYmYk86Yyw6xdCC+fjnvN1XNlJHPZDQIA8iyVyoRCCEoZAEBVVYUoUpWNYIwQkhM9UIAwDF1DM00TAHJ8PJCGKimG6vW6hFD2+ydhGBJCVldXr11bj5Ix57wsS8Hh8vJip9N6/Hjr008/vXbtmqoRw9TyPFdVQikNQy/PY8epCyEURSnLMgi8MKwGQUBpIesKlUrl6tVrUm17OBw+efKk0bAoLeSQq6oSRcGc0zzPq7VKFCWMlYoK8pzFiQ8EsqzKxPM2n2xbTmVpqct4UZSZXN9pmkoM7XQ6VVVV0zTLsDEkSwsLYRD5fqgrqj8Z16uugqBVqRzu766trfU6a0WRTSaTg9290JuaGgrDsMyLwI88z/erDrPNim1qmtofDvSSCoymU09AGCUpLQGxlIWFhdFodHx8jBDpdrsY435/6LruwsJCFCabm5ue50uV76WlxbIsp0F/MhxJo1JVlVOW53mZ50tLS5qmyV/Ndd2KZQdB4Hme7H9KPlLGmCTRYoxtb283Gg3LsjjnhmGsra3FcRyGoe9NDNNUFcwgABghDIBgrMyFEKygJS05JBgCRQMAAYwhnaO3ZbMSI8RACA4RhwDIkQuAZnWOeV/yn2OH89tz1/bnbWS2CSHKomBMcPYrnYeYi4Bn+8/Z3nmvjz4jFAPnkLVg7spnm6xHS3i3YE8LvrZtE0JkYUbi7CSFScVYtJ2a67ppGpdMlnjK0cSLszQIgiiKyjKngqqKatiaU3OSbCphPRCJJI2iMFFVcv361a2t7V6vt7a2CtdhmmYnJyejcToaD6bT0LIMVSUY4zRNZemSECK5qJut+sbGxu3bt2XxsNFoKLiQM41B6CkKllzURVEYhhUGcRBE06kX+gFnBcGKpgJccZIkGY/H1aoNIGe80HUCOdQUJUkSAlGv3YEQjsfTlKWGphUFK4oiDP12uyvN+Ojo5OjogDGqKNgwtLLMkzTOsowQ4rpuzgVCiFMWRZE3DRACmqYACDVNS/OCcjYeTcMkLilPo5QQdZpGCBEIseu6tVotCCKJAjk5OeFnrIQwTZN+n8pSwng4Ojk5iaKo1WppiprneRyGMpLnlOU8l+m9/NVqtZofpaPRSN7UpCcsyzLLsuXlZdu2IYRSyVSqJiOEut0uRCgMgzDNDNsyTNtxKpppHh6dAAIQwBACDDnnJQIYAsEYRAgJiAQAZxSBECP4FMsFziwQzC/U/3zD488O5cFz2Pe8LXxeiEtkNogQopRleUEpx+isJ/MFnnD2eU+NbQ4jOl8LlTetmfOcvQTmzHj+G4ECzSZH5IyixLmbpgnO+xkSdM5KWhTFg/ufTcbe8vKyW62Ypk1pIb8RBAkhxDR1IbQkiZIk2tvbo5TG0WRpaUkIsbm5CSG8du1avVHd3toVggnBGo2abdu+76uqYlnm6enpdBJRSkejEUKoKIokibIsWVxcrFaraZoGfgTPZ8yWlpZu3LiRBKcAgDRNoyhQFNxoNCzb4JxHYQIhPDnpf/LxgzLLLUPTNZMQcnRYTCaTvb09zgvLNqq1CoF85E+XV3pxHIdhUHOrCCF/6uV57tiVTqPmjb0sSQenfdu2gRAYAoxEwWj/9GTQP5WORcGIFuWwP20tLAjCMMZZlkVZZGZqGJKcTmzbnvoeAEi2SYFAURzHccJoKvWqgiAaDEZlWdZqNcdxNjc3u52FdrutaVq/3/d9jxDMOR+PjvI8B5wrGOuqmkKIEDIMA4ozYighRJ7niqJUq1VN06I4l2mIrusyQpHez7IsmRkJIbIsk0KurutOvSGCpGDUtoyFxZ5pO1ESx0lmaSrXVcHPOPMpZSUrGGMM2QAADKGY08FG5zT1f+v2n+wM4RyaR65zcd5RBM/OWzwnHOVzrBtlWTImsKrIm/cXn+uFY81GM+D5SKK0bZGXYq6JL/vy0o9d8KLwfBJKsi0ihBQ8i0vPtllOyDmnRZnned1t5Hk5Gk2EEISQvEgxhpZl7e3tWbbR63Wq1epoNNjc3Nzd3T05Pb62sXp4eGjb9q1btzjncojh+o2rQeidnJwM3ju1LadSqfR6PcexAeCGbkMIJf44SZLBYLC9va0oiiT2RAjFcSwBkLpmpEm+0FsyDEMAFoY+Y8yyDEXFlFLLspxKlRBy9+7dLI8VoikqTJI4SQrJ+KbrRFE77caqZRlxEhBCVpYWj4/h0dGRRpRuqwkhFpTlacYZbdTrURRhBMbDwVlHWJRFnsqalq7rFdvWNU1wkMYhQijN4igKk6SSOTaL/CAIHj3ePD09tStuWbKJFxBCwjDyfR9AtrKysrd3kGVZq9WSSdrDhw/7/X5RFEmSMMZ0Q7NtuyiKzc1N28ASHGMZJiEEAYgQ0hSVc+77viw3OI5TrVYRQp7nmZar6zpCSGZ9cviGMWbb9mg0AgDIzv5kMpmpNRe0zPNcFnVqbqUsy7AMTF1HCGFMKGdZliUsKYq8LEuhm9IYEARM+gN4tlbROYkgBHzmDP/W+uKvuM0a3TPsqFztn2f88+ZzNkSDMcZYUEoJUXVdT7IcnutjQCQAQjPwmjSh+dbCzCCl7YE5zdT5aFs8C02QG3+Wq4afz3fJ90IAZ/cV2eqY/TAzp1pSzgT3Ap9y5rqVSqVimjpR0GA08E8CAKFbq66uX6k16v1+fzgcAoH6p8OD4qjT6aysrPR6PTkAKTFfjuPohnpyekQUJAXrb926dXp6Oh6P5WC+FPTNskwKJzmOs7S0BACQyvL1ej3L1DwvEEIQKrquQYhoyRln7Xb78eaW3G0ymTiVaq/X8zyPAwggz7IkTsIktU5PTxEW/cEJEGWWZVHgs7IQBGOECMFFUQxP+0Wec86btVql4jSqtTAMoWCAKxgyhFHBKCtyQ9V67U4axaMwKcsSIVGxdFVDELKClsfHx2EcRVFk6BNV1bOsiMIYEgwAeu3VV588eUKwevXq0vLyMmPs6OhoNBrJezQhxPO88XgsDa/eqAKaDwaD80DdPT09VVV1eXlZiuEcHBzI8uDR0ZGu68fHx9VaCwAgoaTSknVdl89IdeQ8zw3DSJLE9/12u61oGhNn925W0iSKaV5oRDk8PLQct1qtGqrKypJTBgHXVFIAoSkEIZSXFHCGMRac5UUBIeQCQM4xAOIcniV7ZE/X2yUfNV9unN/zwlvk+pwfWZQB+YVwdOYbpR3N6iNEQpllrUIatLxgeo5UEOdsbdJsZjRVs6PLE50fZbpQpLlwYV+crcI5mJuYXfmMGh0AuRRmB8mLlDGmaZpt25VKxXVdxso4jr/yla8GgV+WpTcNBIeuWysKenR0kiRZp9Nrt9vVqsMYm078LMsAAN/59ncRQoeHh7JMV6vVIISUUlUjr7z6kjRgwzAqtithmZKLOoqi0WgimyULCwtXroAiDg3DsCzDtHTZDZb3rqPDkzRNCVGbzebKykoUJsfHxwcH+7X6jeEwjCKa53Ych4PBKVFgniZFmdMipbSAgmMIIGCcgjLPu0sNqR4DABCCY0xM04AQZFm/LHPOOYCIEFUWjdrtNkU0z1PD1BACRZF5gZem6XA8BABM/Cn0/ZXFFSGYW3WajdbS0sruwQ7G+MqVK+12+/j4+Be/+AXn/MbNa81mUyqEjsfjsiw5p6ZpIgSiKP7aV74qyTgIIcvLy7quf3r/wer6Wr1ed113dXXVsqzj4+ODg4ODgwO7UpNz+icnJ7I4J8OKsizjOK7ValEUXblyRQixsLCgadrj3SdYIVLIFQCQZQUvSgLglfV13/dZVpiuDoSYjEd5UXQ6HVYWZQExxrKJDwUEQkDBMcICinOoNwMACyC44AI9Y3i/lhucT/8u1ClnBsbn5vUuLP7Z24mkfpBtN9M0MVYkH4S8Pz3d+6lhPGNCsw+bJYEzP3m5DSg+X9/juZfHnyaN8Mz45+ZEZBdRwqZMU7csAwAQx6Gsqu3u7jqO0+12KpVKFCWKolRs98r6VdfWJ5PJ4eHRZDJ1nAqldDyejsfjPM+bjXbVre/t7zx48GB7e3ttba1ardq2Va/XHz58uL29LauCnXZPktg2Go0kyYIg6HQ6d+7cUVV1PJ5mumaaZV7ygnFdV1WVqBpWFIVyBLEWRX6SFbppJ1lRUFZtNARnWR5xoRAFaZoCIAMQGKZepElRZIJTjABGQAgGOAW8APKeKITglNGCYwEBUwhQCCqKglIOASq44BwALtyKAxXshyFRkBAsTiKAOWUMEkgp7Xa7/dPB/tHhZDi5c+fF3Z2dD95/f3XjCmNMlqaKopCS4PKLGgwGx8fHQrBer9NsNsuynE6nNcc9OTyK43h1aVnqcOi6/vrrr0+nU3mfkjFnlmXVeu1r3/g6EMRxHIyxZIWr1WqLi4tS9dU0TZkHyX7PZDLJ8xxrqqYaQhEEY1YwKigUwNQNxgSkIC9TjDHNCwIRNgzLNGMvp0VGZxUHrnDOaVkqpk0A54ALiBCCAEh2Xc7RU3qLy0WKX3GtzrvEC9Dt+ZdmJjMfGAohiJR31nWdsZQQASHiAsjZ0LP88pLdg7kbxnxGd+E2cNawvuT3fkVTFEJIiByEEIjzsREhZPSIEIIII4QYYJZlVNwKA+zo9EiWJSCEvV7PsoxOb6Hdbo/H4939Iwnnj8KYlpyWfDL2wiC2bKPb7V25cuXk5KRWqyMEEEK6Zk6mI0n+vbu7K4TgnN26dbNeb0wmkzDygyBkjCFEhBB5nodhvLOzJyv7v/3dv8NYnhfM80OiIFXFbrXiODbnAkASp1leMLda4wJRSru9xU/v72GMDEPTdaIbimnpCPI8jynLIKCqAgEACFBOM4IU09AiPyjLEnJGsIIAF6zklNIit009x7DIKaUijZM0zTkHhqZpFQdjUDDKRZkkAVGh6Vo9sxVGSa+3WKlUsjSP/KBimVEQJlF0fHzc6XQIIYPBYDqdWpbJGEvSSJZSrl+/Kr+Wk5MjCVrquXVJexNFERDCNE0kwMnJiWwRQQ3KPoQfBmEc2bbdbHRn8Zj8V5bfdnd3FUU5OTnp9Xr7+/u9Xs/3fUVRGMJZSWleIAAVpKhEwZAQpAz7J1gqiARByVi73UYIcSGqtikjZ0oZFwiCEglBECCQUQjgGff8LLbjCCnz6/PXssML3ujC47NQc07gafaWCyZARqNRURSSOjIrSowVTTV005IdHiEEhGimpSHE0wlifmlK8IKrlbGrnJuaDzJnl/p5FzbvOWeecHZkfk4ZTpAs8EQIcUoLIUSWZYwJ2VV7+PBhURRbWzvLy6uGYViW1W63a7UaKJKtra3BYIQQWllZqdfqURTt7u5yziVRN6XUdV3G2OPHj8fjcRz/Ynl5Ncuyer0ehmEQhEVReNPgjTfeaDQapmlqmkEpvX//Psb4zu0XppNYCAGRIAQAyAmBbtWq1ap5kVqWoSrGwsKS6zYODw+Pjo4o5apK6g3HMLSSFmHoGxpSCEjj0LZNTJCqEMA456wsctUilmUiATDGlCKE5YCKAEAgBBUFYayrKk+TUpZPVFV3HCcTimmpOlQBEmmRh7FPDKKbxpUrawih7kJnZWmVIJQXKQT8zTffnESxqurdbvuVV16Jomh7e2s6ncqcByGEMSSEuG6lUrFkP7lbb/e6vTSKj46OFEVpNBoAwslw5LpuEAREVer1+urqasnowcHBaDSyraoQIkkSecx+vy/rApJPMY5j2bhWVbXRaGCM9076nPMiy0zdUBXN0o08SYs01xWNEMKhUJjCgeAA5GVRpGmz3cqyTPZpKaUcCACBimQGxgUQACEAuBCCAQYAK4UKnvUlX2yH8wne/KpGl4Zyn/uuC4ZwZoQnJyeuU6vX65pmlIynaZ5n5Rn49Tz+PJ/jfWY4anYq8/Yji11wrn8oKdzmne8XnCu4dHeBF9zx+T4y3IUQVhw7z/PJNEYQ67puWTrGihDCNM1Go2UYxunJoCiKdrudZ+WTx9sapKZprq5cGY2HDx9uQiiazWa9Xi3LstGsqaoqi2xSoVT2x2TN5ujo+PT09Nat2zdu3ECQfPrpp7VagzE2HA51Xa9V6zIBGI0DxpiiQsPQiQIAoEwAxkGSRu12yzAMwyQAKpbtANifTEe6qakaUlWFiSLNYsZtSzMMQ0OQIwAR5ggBWjAhABJcxcjUrTzP00yUZV4wijBECBAMozAlhECBBKdQAE0lmqphiCjNLFO3KpbA6Lh/HEQ+A8yq2GbFzrJMlAAAvrKynETZaDDcfPjpKEpd15XoIk3TbLsShqHv+RAJVSWKolQqlm3bMlYqaWEycu/evUePHtXr9VqtNhqPDcO4fft2VhZFUWRFHgRBkqXLy8srKysrKyu7O4ey6CD7THKSCwBw9epVKRoph/cfPnyo63qSJABhDLGuIdetNmpNTVF9BrIkduyK53kCgmqjjgieTKcZE5VKBbAciVJBnGNIS0aLAiKCVZUAxhEUAkAoOEDiDLT9nKDsV1yolxf/hUU+81XzQSJ41lfJjZRlaVmWadqcA6Jqvh/2T4ccQErpzAgFhACetTtmLY15OwFzFIaSX/XCqxdO7le8tlmSiWYSxxCCc2dIIQUAEILKEkAoMEGarpqGiRDiHAwGIwk8yLIsCKIwjHd29j3Pe/XONTmyXZa5aZqtVrNarRKC87w8Oe4jDGq1Wq/XlSp/JycnlUolDMMrV67s7R1sbW1JQnjD1OQmFcIajYahmwcHB48ePer17uR5hnLAAdKBgjHiAHJANMNiAiVZLpHiaV6ougkQ0Q1EC2DZhuOYCsGMlZRi3dBYmVHGAAeIEEXFgAHKiiSNFKzO94U5ZxBiCKEMpEtKpcvSdYUzMZlMiG2bltnttgVBfuIFSSAgxyoejQYQYg2rg8Gp67orSysKJh9/+NEgiLvdbrvdPjnpHx8fCiHq9drKyoofTG/dutHr9Uajwe7uLmOs3Wk1Go3+kwNNUd2KI9M/WdLb3d2VIHin6h4dHR0cHGRZVpalpmlhGBuGIWv3sj1o27asjXme12q1ZK7heV6v11MUhXKiqSoSQFMNxkTJcyiAqZlJkhQFpbRQDV0zdFnUIESZjscQQkSIShDDsOBUAAABQVAAOREBBEKAc8EBR4DPE7LMp3C/7jYzLVlhmX/mQh532YGRIAgGgwGlp1lW2I5LKQ+CiKja3Amdy0ZJI2T8gpHMMsCzlsa55czO7/JVfYEdPnPw809AEM76kLNAlyPMGJOeStM0QpBMBizLMgyr11t89PDxcDjsdHoLC0uci+l0Gsex53mSji5N8yiKxuOxALwoCggFALzdbiuKwhhXVVUyt3e73SAIrl+/3ustxnHsedOjo6PVlfWyLH/+859HUdLr9WzbfnD/0/F4/PWvfz2KSFGwsswYYzbVDVMVAmKMEcZCiKJgYRhPJp5Ee2OMNUVJBbdts9PplEUaBZMk9lUCHdsqy5wJQDBWFEUAVpa5YBwDVQ6qqSoBkMnRASGYFIgPgwjjXNMQxkrgJ5PJZLFWM03dcRyOhUSZVutup9erVCpBEGlYBwIMh/2DvYN6tfG9733vL995//DwcGdnx3EcJKV287zb7d64eS2Oww8++ODo6MA0zSsb66Zpjsfjg4ODRqOh6zpWiGmaIEuzLJNCWpZT0QxdCCGJWCWjsVOpdzqdNE3H47FsD0odGyllMa80LBuMqZdhjHlJoyg6KakKMcHYta08zy3diFIxPO0TQ7MrFYTQZDIp81zWOFRVxec+w9BVJgCCgp8NIYkZK9Q8XcWva36Xl/G8M5TbbIrighHO+OYAACQryP7RSMafYZKfHbpIIYScMdnePFPb5RxwUSIhzoVv0HnnAAHIOccIyT2xAIBxAiHEhM1dsAACwLN/ARACyBInOPv3bJ74/HoA4EAgIRjjJQAEYwEABQJDKKeAOQScM6RZjPE057SkJQRCgyY2dJ0cb23DPNVAKQofUogx0vWs0cCawSEsq5ZzeppNJn5ZijwrHceROXCtSqbj5KPp/XrdabbqhmF+9NEn+/v7EKg3btxY6C1vbm6GQbq/f3h0dLS6uq4oyqNHj5I43djYaDabH330EcAoCILJxPvN3/zNW7evTqdTRsVw4C0sLIRBGIaeputRFNQbNUUFtboNssn6UldV9DQNypIxirOyFLpWlgaCmooxAggzhIGQrIiNXgVCGMVBFkQYobIsiixXVZUDoaqaQqGOlZSVWZZM/dHEG5cH6k3rJkIEQtZt1C0VY4zUIjt9coKIIjTr8OBUN8wvf/nrllnxPB+ohVklUZSlxdQ0zXbPtW2L8vjkdNfSjV6n7joqhsjSlTwOIt/Xm2aGi0Kl0+mQRKTd7tZrDbNmjYaT1fXV3d1dwzCvX79+fHz86suvbG5unvaHURRwzuXwsRCsLHMAQJqWrlspy7LfPxGC27ZZFFkQeJwpAAFdVXNGwziQBpbHNKQZAKBEgmFM87KkPuecl8yoVDRNo4xVHQcQUm02JQheEkBSSuU8qkKwqkAhMBUlZ4xzjgEmSMEQcQ44EwAgAZAAiAMoAOLnYDcCmezeYYxl0V5WX2bhiXybOP9PkTpLsx7jrGCDZ15KnL2ZzzFqzxyRDC/BeTZ59id6jtzx3+rZLgSis6T2Cyo0zz3UhZ3F+bXOTl46SUrpN77xjTAM/WAaJVFZ5hCAbrfrOM64fzKdenmeV6uOU3GLgnpeYJkVRVEgEq1WgxAyngym0+HJ6VFR5L1er9vtLvSWfN/f399P01Q25b/yla/Eceo4jqqqhwdHw+FQuk2A0dtvv40QopTLSkO73WaMRXFACHr55Ze3d54AAN5///3FxR7G2K3Xq9UqZ2DiTYfDcRJnlmU5lQrGGApMsBRUIRgJBRNVJWmeSU8ufzchBCKYqEpRFCWlaVGmaZrmlHIBISSaeu/evZWVFcuykjyhJVcUDRFYlvTVV1+beEEYxpVKxa5Ui6Ioco8Q8vbbbwdBcHh4ODztCyEsy9I1jVLKWNms1SGEN25eOzo41DTt/v37k8lkRjwhh5KCIJDa4zdv3D4+PvY87/S0X6lUVFV95513GGMvvPhylmUzWiQJqUEItdttGbJyzqMoGo1GUuVXjozI0Qpd103TlPjBGaO2XBWyCIkxJqpaFIXMaTnnUiBRKp9HUZQkiThn6JSVRV1RAFE455wKTnkpGIFEwbhkshUEEAAcQQgBBE9LE//JznO2PVMdnRnDfAZ5OX6d3+Bljhlw5uTPXuUXnfJTm3k2RAbPGvCvEqZ+3jMQQuk1iyJLkmhzc5OygjGWl3lRZAJBDhjGuFqtWpbVbLbKQhwfnw76Q01TllcWkyRRVdJqNSqOXW+4aRZmWVoUuRDiyZMnO9t7vV7v6tWrnHNNM770pS89fPjw5z//EADQ6/WOj06Kouh2uwght171vKmqapTSbnfh4HBimJqu641G5969Tz797F4Y+s1m/aWXXlhYWCiKLJ2eSGLcOI4Hg0ESZ61WS7Q6RVFAATiGgmPEmYIRhgBCHKWJoWolpSVniPGyLBmniGAuAGM0y/O0KPOy5AICTFRNs22Hcx6nGRdCIuMNS1cUxfdC3w/TJLcrzvLyqqroYRjV681rL93Z3d0Npt5xeQghlDYWhuGrr748GY6EED//4Geu647H44WFhW63K3XRIISWZTHGJhMvjmNJS2WaZqfTURTVcRzJf/HSSy9t7+xNJhNJ9atpmqxIG4ZRFIX0jRIkHEURhNC2bYxMCU7AGDuOY9v2DOE0W7cSRyXNksKnY98AAIlcUVVVgpyeqjOcz+Bahi2n7WhOsyxjJccIE6JwVvKzXAxAwJFAHHIAgRDsfCxYLmBZImHgIreNOGfmR/PLdWYX83A2Mn8xYC7NE3NjR/PmLj6fnnH2AZdfuuwJL9ve31qwAXNWLea8Hzh31BDAsiyldkUY+QAAXdecqttuN5FCIAaEEFamw+FwMpnqmtlstq5cueJ7AcZKvV6ltBCAp2nMWFmW5WQyGY0Gn3762dWrVyHAf/VXf8UYW1lZsazK4eHhm2+++eqrr8ZxfHR0tLu722w2r127trGxsXu4U6vVOOfv/vRnnU7n1q0buq7//Oc/z/MUAAEhvHPnVhgFlmUkSeS6bhkixliSJEmSSD9QFEWcJgghBARnQJSAl1hBUEEYIoAARwhRAbgQTIC0yPM8LyhTFEUAxIAs0UBKOeOcQ3Ttxq2spP3+cGlpYWFhZepPIBS27TzZ3tV1vVarIKw4lWoUxScnfUXRFteXLd2QtyrP8+Ioarfba2trWZwkSeK6rqSikgNWQghN06T8E8ZYdtiLopBI0S996UsPHjzQdePo6OjJkycSKGOYdlmWcnJC/qaappmm+eDBA8aYoiiWZQEApKsUQlQqFUqphIAbhiG1biTDvPRm0nmcAZWFKMtMGjw6n1YFAMi5sBkj68wIMcYZ55ADIQCCGCkaA5xzwCkjQDAgmEyJABdQcC4EBBg+053/VdbtZT90wX+Seej3BSOZf/N8mDp/eHDJE86OAP+2pudlT/sF0enls589I2bdTwGYYJAzCHi16srbSlnmeY5VBA3NNEwtDfNbt264bm17e2d3d9uyKoSojuPIoDGOQ9+fFmVWFLmmKaurq9VqYzQa5VnZ7XZl7V7y7Z+cnAwGg3v37g0Gg+WllRdffFFRlOFwqCjwwYNP6vXm8spiveH+8qNPqtXqq6++cnBwYFdM01Idx4mTIMsSiESzVQfZZBa4YowNgwAAgyBwKw4+v60xJiAAEHIkEKWlwlQIASQKBBwRzHNRMlqUDGAkIMKKAijPaBqlWRTHULHzzGCMaZqhaCTL8yiKJl7EmHDdumlVgiDK8wJCbFdct1r/+OOP5QxUq9UaD0fHx8eWZa0uLW9uPmw0GnEUvf322w8fPnzxxRefbD6WnL9SGVsIked5EAQAAEppv9/f2dk5OjqqVBxCyO3bt4UQuq5L6if52+V5Lu0kiqJGoyGBaZRSQogMK4qiGI+mYRjKvh8AQNK9yUbibEHOLLAoipIXjluhlMZJlKRxq9XSdJUL1h+MZrEepVSCOhBCll2RfGiEEJOoHIE4TYu8UFQNQ1QKToFAApSccSAEFwAjCAAEQMKpZ4Y4W5fw/En54LnmJy7oE15AeV+Am80A1uB8oWPl+TnhBTf4jJE8j0T1/3+eEHMhtcgBhJKKWQjBh8O+aZq2bcuVQQV3q9VGo+HaqsRblWWxura8ceWazEBOTw/b7XazWXerlhCcUqppSqfTOTg4Ho/He7sHe3t77Xb7pZde2tnZ+4u/+AtFUer1Zq1WW15eXl+7IiVsr1y58md//cfNVmOht7Czs5Pn6Qsv3nZd1zRNLuhw2E+S6LR/dPv2TcbKZqvuOI7a6Y69qYxPCCEQYCGEnKkjEBEsR+MEAAAhAAUUEDIBABcAQVXRhRCUA4xxGIYIKgACJkTBWZKlQRxFScpAsLy8WrHd8cQTgOm6XnHraZq2Wz3TcgWH00kQhnmr1VlYWFhcXD69eyIR6rqqLSwsUFYSiA4PDxcWFhCEw8Hg008/Pdjbl7P2lmVJi9V1Xf4cknXbMIzr127KqvLu7h6E8MaNG+dM3mMZOkp7m9UdsixzHAcAIDlmarWaoihxHEOgKgq2bVMIQWkRRaWmaYZhlGXOWAkAVxQs5VSKosjzFCBBIKKMp1nMitI2zLN2SEnP2xhkZoSEkCiImBAAAA0T2zAhhARhBWYCIy5kKCoAEwwIweWk7Rdh0L5g0V4wivn9ybyZgfNolT/LOjH/GZdjTgBm+qnPGQyZBbfzz1z2tLOjffHFXC7MyB+Rcy4gQhgRhDSVaKqiaqhSser1mqKqaZqHSSx/aUMV7XZ7cXFRJjbDUT/LMqKgvOAlzcsoB0A4joMQSNN0OBzKW9Irr7zy6quv9vt9KQb45S9/eTwej8dTCGG1Wm2325L4aHt7+/r1K9vbu8cnB0TBYehPp36WZbdu3RoO+/V6fWNjPQj9drt5cnLkeV6e523TcCmtVqv1ai1K0iKnaVHAgo6nnkoUQ1V0RREEQagQAQHEmqFDDLM854JhjCEmRNUQQrplcw5KxgtO85JmeVkwziEwbUvVtYKyaOIRQjTDMnSdUnDt+vXJZOp5Xkl5lie6HmZZsbOz0213hBBlXjDG2u12r9PO83x/f79im3GWLSws7Gw9uXLlShAE3W7X930OZrQDQZqmEuTAGNvf39/f33/llVfiOJbd1CAIMMZS/LQsSzltOJtR6vf7EuokRx/lsqxWq5p6xuUhg/aiKOS4xs7OjhyskZhnVVUl2kkQKKsyssMk1VSlk5SDCzLgl64bAKAizASDACsAISAIViqGrhGlYLQUgHAGGYWQCnYOGQP8jH7wrEzDn00Rn67NL84J53c9Y8WZDRPNnB6ao/ieRdK/lnmAOQv8Yk/4q2yX95/dIM4BdBwhrKhY0xRdVw1DK8tyPB5iogIA8pKGUTSZeFndOKOrgVASfuV5xjlrNOqe521tbUEIX3zxTr1el1xPk8nENM1r165JXJVlWZrGZWNa6goeHh6envRfe+2169evR1FkuGxpaQFjfHh47PnTWq2W56ZhGEKIWq2WZUm1Wk2SZHFxceqN6/V65g0IIfV6vdUKseePpz7PspyXcRxzTScQKYQIiBFRsKIqqqoZhqTDK/NMwhgoY0gIx6mmRc7STGSQUUEFwIiomm6apu/7qqq2Wq1qo06wOp0E4+lkbf3qyUk/zTJdN1VVrbi1oiiCKMon4cLCAiHk5OQkjWOVYNd1DU1/+PBhrVZjJW02m4qiLPZ6QRDEYVhrNQkhsgGbZVm1Wpe54mAwiOP45OSk1WrVarXpdHr9+nUJe5B4QFnykaNMskVUqVQqlUq1Wh2Px1EUAQAcxxn0p3J9SsxqkiSmacqcULYHZJf4XE9FMEbzPCWESBQUQiAIQkppterIZVMURZYlYRhwzhVF0YEMa1nBhWBcVUuMFVXBEEIEOGACACIEZ5IsigPIxVnvQgAAzqA3kAt0Po1xoUXxq6xnIhG00mXLe4n02hjj2YgUAEByHIhnqzpP3SMAiqKIWUsDPHVx7JIW79m5ztHgg2cd9DMe8rwVOfvcWbQsj1CWTCEEQoyBwBhrmoIJzPNkMhkyxoqiZIJrquHWG/VGy7btyB94XvDZZ48cxzFNHZxjfZKEm6b+0ksv5HkeBFEUJVJlZWNjAwBwenqaZZlhGJzzIIiiKGo2mxDilZWVv/t3/65pWE+ePEmSRFEUVcXj8bjIabVahRBnaa7ruu/7zWYziiKEgKZpcRyapl6vNSGEEAFaMLkQJ55/Nl3OmRdEpX42jcY5l+SZRUGJpaVpDhAyLJszltMCAUSIGmdpxXbygiFSBlEc+GGz3ZlMJr7vX716tdPp5CXNs2IcTQ6OjoMg6LQXS8qTJPM8j6i6W2s8ePAgz/MrG0tCCMuyNtbXhRCtVrPMC9kq4JTFcaxpim2alUql2WxeuXIFEJymabPZrFQqnueVJZOFTV3Xm82mLJnqun7r1q0wDBuNxvb2dp7ncq5XrhbP86bT6euvv46QbLFO4viMnkPTtIpj27ataVocx5ggRSVCiDiJIAK6qkkPgQkyTJ1xO8vTeqseBIFt20WeK4qSpWkUhoqihEHQ7/dd122326WmlbpOKQ2CIGNnxNMqJipxTV3nAGRZtrS8Mva94Xjke76fRJQxxdQty6IUnS+8clbdkTWk5xobf1boYba8nwlHJRLlPK7js5fn6Srms8R5LPVTuxKAUipnmTDGEIpZLRigzw0vn7td8Kiz5ubnhakYY4wwIUBXSM216zXXMlQI+GQy2d/fp7RstttOpZqVNAgChBAAMI6TarW2sbFRluWDB/fCMJQYjnq93mw2G41WWZZlSQkhUuekLMuyYAiher1er9fjOI2i6MGDB7/xG9+8fv16nud7u/uU0kajwRir180oilRFr9ebSZKlSSa/BzmPl6ZpWVCpXZckWZ6nNUVACHWFGIYhWQYZFXme2bYDCaZMZAXFGKsqAAgjTLJM1gWZQAxwUZZUSqJEScIZTOKMM6CqqqIoRVEUOW133c5Cp6Ss3WkdHhxhRX28uZVlmR/86euvv9nrLa5dubqzuzWeDFfXlgEAjq0Dzv3ptN/vU0rTOC7L0vf90bBvmqbjOIamFEUxHA7jOE6SRDUN2fdLkoRzLlHjhBCMlNFoVJalpukYYzlfJpm5JaeRZMeSTg9jfHJyIsejJLuMbdtCiN3d3W53SXLwJEkiR5zkjx4EgbRSybiFEHIcxzCMaTANgkDGHZL2u9/vM8YWFhYcx+l0OpJpSlZoIYQcCICgPI7jOoamZVlRluXHH3+k6ppZsddWl5M8mwZ+nCZ5mii6fZZOAQYghxBAJCASYmY7MzgK5AByhM6CzflA9EIk+NQIZ8YG5/oTGM9rMn5RAMkYA+eUFrKJfFY+fl5z/1cxxfkYevZgdjGzHQhSAOCUUoakNgBASHpm7LoV0zQNQy+KIi9Kt2q1Wm1As6tXr4Zh+Gd/9udFkb/88stS1aTdbhuGITlXwjBK4gxCqKpqliemaRKsRlF0xu0HsWEYvV5PFiHkMMFgMAAANBqNw8Ptfr9fqzYopScnJ6cn/Xa722i0IMQIEc5BmuZ5VmCkViqaadowPhGcFUVBWSG7ZIyCgjImOMsKmtMyLwC3CcIK4QhxGiaUFZALBgUQjJcUQM45BwykaZ7npYDY0E1VTfKkoJRubKxXKtZwOAYAuLWqlNzwAv/FF19eXFwsKHVcuyzL49OjSqUyGAy+9Nqrsj5p23YUhHEcc841VV1fXwcAEInkFQIKYBmmoel2/YyNQs7ixHEsw6V6rWlZFsZYtl6kqUyn0zjJKKW6rsvSyGQyybJMdiYIIbJvYVmW67ryOJVKRaoOW5ZlWZbs5TDG1tfXj46OKKW1Wg1jvLW1BQBYWlpy3cpkMpL65K5b0XU1DH1K6cnJkeM4kv9O1nIUBVuW4XsJJgRgBBBkgheMFqxgvGy26hBCQDBnOS8zAoRjGFhVgpQBAYAAGCKIAAAAI4whOpMREwIAIdm75QixuDTld9mUyOUpeHFOWjo/DzE7BJ8rtCB0bq7iqVcUQsBz+5GjGL/WdsHm5zEK4HMeCy4Y4wUoSppTVjIOAWcQilrNDYJoOp1ipLZ7C72FJdu2G+7iX/7lD9M0/upXv+xWK3fvfhJFwbVr16RYqjykqujIViGUNKy8Wq1WbFdO2QghdN1sNBqyLHF6enrz5s2VlZUoiiR1hWEYMnxijAEB6/Vmp9MzDXs4GFmWZRq2aellyQzD0jSFc753+CnnXK7Ler0uIFaVMMnyMIgppQXnQghVpQZlCmUAUEOFBBKAORRAcAEQQhBjiLGuFAUtioJyIBgQVKRprqtGo1kjCuoudMqyWFlZznP69d/4BmPs8PD43/7hHx4dHd154dZoMtQ0rSiyLEvefffdpaUlQ9OhAIwxzwuAEJVKpdHopFGcJElepARhwzBkuJ7xM55oyeobxyk8n0va2NhQVXVrazvLsoWFhUqlsru7W6vVdF2XTUXOeb1en/3usnHvuq6iKFEUKYrS7XYHg1NZ15G8dVmWpGmcJImEZCiKEgSexMRK0CTgAnIRB+FxXgjKXNd1LJtS2m40K5WKlLJREEYCpGnGOYeKQoGgZZ56uR8EukwviYxRIVEViDEhmLIyCMMyKqlSkUY1I3MBc3wuF5bxM4nVpZeeGqH8BueD1HlzuuA9GWNYUWc7Sy1VIQSUOd7Ml3JpogghVPJfieXqbzVCedu5HJcKIRCEiqLoOpaCZzXXIhh4njfoD6MoAkD0FjqLyyuUga2trYdJkcSpAODJk+1er7O0tMw5E4JtbW0ZhuW61apbd90aISpnglKu6VC2NDDGrVbLNM0sKySxRaPRStP08PBQIWq32x2NRtvb23deWRccFgUNgkhV1VqtBQTa2to2TTPLckoppazIqRAwirLpdBqGvqJomm7olm3ZVUS0JC38MDJNsywpKynEmANRMiHJIEzVkLBDwUoKEEGKvFfmeR6Eke8FaZZTAUI/KEq2sr6uaIpTdTRVj6KEUhrGgaZp3W6XqPq7P/tZXqTD8eD4+FDTtKOj/eWVxSJlg9O+PCBjjJ6XCSbDkSRAURUi6wVRFDHGpnHIGJtOpxLpImfEy7K0TF227E3TlNQn0mijKKpUKhhjSdsjYeKe51mWpaqqLOokSdLv9/GZTBCZFWaks5UeQg4fS1mLarW6uroqhDg+PlbVMwJBic6XY2hhGEoZ49kksTiXs/YlZoCLssh5STVFbdRqNbcqBM9zWhQFURUAoUawoRAgWJxn6FzCCM7J2c/Swrl6jADgc0eZnskJ5ZvlPX5WzJTOcNarkJmrRCqgeUDMJccq33hWOPrCMccv2J4pqIqngSi4lDECeV8giBBMiCzhMggFIQohCGHYbNZr9bbj1HzfP+2PwjDcfby3sNC9fv3q6tqKpilJGoRhUJal41QBAEmS5BnD2IMAcQ44B71Fh3OO4NmXwDlXFE3yjrZanWq1Gsfx5uam/BqDIPjpT3+6uLiMMR70x1lW6HoCBE6SrN3unp6e3rt3rygKyzIMwzRNI/CjhYWFsmQlZcX5byn7YEAgCFEuBKMsS3MEIGNMV1QdCkVRIOdZljFaYHjG8+X7fhgnWZJFScIAKQqqavqt6zdWVxerVWdra6fd6o1GE4zVD97/QDXMRr1FWbG4tPTyyy+/+urLtVrVC70XXry9fX8rSZKjo6MkSeSZZFnGSypXv0oUOQifpmmZ5WWWc3JWPJf6BVmWSROVReCiKGSrVmoYKooCQCYh1JIqSgJiOOej0Wh5eVnTNM/z0jSV2fhwOHzllddkLTqO4zRNEULNZpMQ0mw2kyQBAMg8YjgcysZGmsaUUtM0Za1LDoVmWSbFTPv9fr/fl7UuCfSJiwxCiACUfCGc8yTLTLOoVh2JZQWcQQRVRanYtsXNeJpKA5F3Fmk7su9/wRDOndjzPeH8M0TMlWTAnKuZTXzNyqfwfF5pdixwHoXC87nfs8IM/1Wb75e3uTuKAADg88H8eea1eVOklBEEAECU0iSJPA9jyHSNFEVRr9fVjoawNpn4h0f9OMlVRX/hzktc0DCMt7d2KMuLImu26isry61WqyxpFCbTqR/4cVkyjBWFaHfv3lUUxXVqruvKz63VGp1OZ0ZfqyjKZDyNosh1XQDAwcGR69YgxHEcm6ajKjqEqNdbpCWPwuTw4HgwGFi2cePGjbW1NVXVqlUSRUkZRvI+KLteecYmnlfQUgqG5SgXQqiqCg2c5zlgnHMeR1GZZ4qi6KqEPUFVVXXDVvWUCUCI6rjVt978Ur1VyfN8Oh3bVgVhXJbl7u7uxA++9a1v3bp1i6j4+vWrjlNRDXU0HigKls06aRhFludFnuc5BvBsqQHIGGGMZXEi+bW4RiRRmBywlHpmcvWnaTqZTHTdgBDK6ovUDJVJnWziSbI/WUlijIVhmGVnViqEIITIuFQattxTjixgjKXSweLiommae3t7CKFr165t72xOpqO8SG3b5oIGoRfFgaLi3b1t27Y9fxKEnm3buq7XiKsoSjQc5Xle5gUQnArOGIuSWMGQFrnjOK1Wq1qtljQfDIeDwSBJknZrMc/zGdYcAIDPWWXOPIecKDhHzMxXMcDndMLJbA7wQiFEfimze7Ps6lBK6fxQ75wRCiHweWEGwqeaZ7+u6M2FgUg0m+rnz+/jl2WpEYLQDFwgpCWbpmkYRpoUB4dHnheqqq7pVp6VrGTj8QQT2Ou1iIKTJJlMgOM4YRiZpmnoVrVaV4heFJxgVdf1Vlf3fZ9RzjmvVCqyspdl2cbGRhynsgAohGi1WpZlDYfDtbU1SunW1mPOxFe/eqNWbUwmvmnYh4eHmqatra2ZppnlCaVnbBqD07GASFVVy3EFIHg0HU+8GdpYToRICjzTtKrVKkpiRcGspBhjhhCBiGAVIWTbGsaKXXGCJC0oM7ygVm/cunXr0cldINDS0lJRFKpqPHnyUNf1OiaO47z8yitTb9zr9aIoPD4+pqzw/enJycm1a9dkc+9gb78/OCWESNyJpqi1Wq1WdymlIzIIwxAAIPGcMjaTLmuG6pReKE1TKTAqwaira1fkejMMQ1q7LALZtn18fDwYDOQkZ5ZlEMJarTYcDmUWAACQuG3Jzy37DTIclUVX+a7XXnsty7LpdCozbUkz1Ww2Hzx4IBmlZPUVnAt7tbud8Xg8ybIiz3heAi6Q4BBwWV5OkoQLmud5EsemadZqNVZpSx2UOI5loC5tUMbql23sgtuYZXbP5IRsjixx/mU54Cw51yThPITQMAyp4HF23BlOTkKQhBDn8SoXXEAg0Fw/Y+5fcA68lrgDcS7XxoXAkuJQmrYQQsyIwM+usAAAMCTQ2fSKiQudkIquWaapaZpODAJNgswoDOOpBwFvOU5d1+I4LmmCLDiMioojABBFEeW5KPI8oGKH7ne7XV2pVBpVmbyNx2PP84bDUW9hQSE08MeKUnS7blEUp8d9XVeXl5fjOBa0jPxJFEW0MEJ/VObR9kH21ltvLTL48/c/+L/93/+vv/3d7xmafnBw0Gv3KpZz5/oV19QsrdJzOya36q36ONVomWuEaIoCeF5zyMaKZQD19HgQ+lFRZEbdbrh1t1LXNBWXhauHSADFQpqjYahhca5koGhBkkW5h0Ta9/2GbrzywhorxrfX1v743/8AEWV1/VrBkEqUarW6vLauEG2hu9Rudp9sPkrjyLR0moCdzZ1J4D3eemKa5iuvvPx3/5e/ByH8xS8++NnPfjYZeLW6u967igkpswRVtCSaXL16VQ7j6qamGep4OomS0LQNoigIIcrLrMgVRUEMUc5sWLEqdp6k0+l0Y2Pj4OBgOBzKEd6z9l2WVyvOYDBQMeGct1qtMssnw4GgZRCFL770ku3aDzcf1Wo1igUjQABm6kTRFUGZppJOs2FZ1mDY1zQVABGGAWMMEVJxXYiVVm+BAVJrdjvLa2EY9/v9PM/1Sfj6izcCQg4oHzGQYS1J85xjU3O8HAhViCQ3SmaqpF6t8SJP0ySYnnjTKeCiZmiUIsYYLbMoiiqmJUHnBtEBYPI2SghJsSwmnxc+uQAAIclgJgAACApAZuv7spOZ7xzKZFRuT73cHNoGzJnKcxzcnO198Q7PVGWevWfIz5JLbnbXqJhmlmVRFBm62mq1HMtUdU3T9DiK0jQtyszQdALP4XgQIoRqtRo6E2aDtm0jhDgD+/v7w+FwOp0uLi7KwEnX9UajAdCZ4M50Or179y5jTFe1drsp7zuS29P3p1Iiqtls5go+Ojj8xS8+ePDg0xfv3HRdlzN6enq69fhxu9l5/bW3bt++XeRUJjBurSoEVBStyPMsDU0dq6parzUNRQWcaopXmnnFchrVlmU6mmKqqmqUSCVIU1RDIRhBwCkUAEDMIYxLmgRRnCTNduv2K69du3lHcPjhhx/WarV2Z/Hje/eJZn/1q191q48mfrC4uPjjH//44ODgK299aTrOTk6P4jC6fv36p5sPp9PpdDp95513/vAP/1AI8fWvf/Uf/+N/LIT47OGDP/mTPymKotVqfPOb38yybG9v7/bt23Ieem9vjxC10+kkSbK4sCznlSSoRQjBGJdTFEEZLC4uyo4oIUTX9cXFxf39fUmuJRnWkiSp1Woy+NzY2BiNRmmaPnz40HIqGGPP87I8F0IUZRlFUZkXrCiLPGclJYQAIqrVqmEYYRiGYZSXpRACQry+vr6zvXd4eGi7jhCQc+44zsrKypMnTzAmEMJqtUo59KM4z0skQJZlUHBGC2YaqmPrmgLkkuMcnNcdFUXRNQ1CqGmagrAkH5NjSQAALpl4FSixbkJgzrnEugkhIH8qz0aeeqpLmM+ZHQIA8Dlh8PlE6Zl9yqR8hn64bHKzg8O51t8X2OH8cS7vNisNn2OUmEwzbNt2HLvZaFQqFUltYBhGMJ1QSpGBFE1BWVYWVABOiFGtugghz/OLolAULJnmZBcrTVPGSk0z5FCcZZmDwZBS6jgO53w6nSqY9Hq9paWFo6MjuYZ83//kk3s7O1umaTabzQyw69ev/29///c3Nx9alkXLoigKKDgh5PDoqN3ai+N489FW6Ed37ry4tnGFx9ypOoFfhmGsKRYxdKTrTsVs1Nzd7Z2j/aMio9NgGoWxZVWqTq1qaQrGEIg0L2iRQyB0RcUqTNK8P54UDCyurK5fv/Xi629g3fj00aaqaMNk2hKiWmuYtiuEiKJoeXn53/ybf7O2tn779u00Tbd3dwAXZZH9+Mfv/L3/8u/pur6zs5OmqcSRHR4e/5N/8k9ardZv/873/vk//+cHBwf9fv8//IcfSJu5f//+0tJSr9djTMgxwkePHslIPssKmdepqiab7Lqu66pRa9SDIIjTJC8LytnC0mKcJlmRU86iJK7VakVRmLZVFIWAQFpys9ksKRVCuK47GAyEEIZhFBBqmlZ1XNeutFutquNSSg9PD1RVlTqtGGPbcWRG+uDBA4VonU5H0bXxeCqLOnEcB0FgmhbGCiYqhyjNizhOoyio1WpAgDzPC4I4N4UQlLGyLKGqAsGEELTMAQCQEBUTFSOCkUIQxlhXVCEEQWfCEBixubDuHFjDZ7UZhMScEV7e0Dml/rxNypwNnStjz3vCC43IeWObFTwvZJ6Xt8vlVvlAFmzm01d5nNF00m21a7WaYRh5ng/GIyEEbiBd13XLVBSl0ahhDNMolikWMJjEMcjsVQhOaVEUmeM4EMKizAbDUwiwnLjhnFoVxzCMVqslGC+yPE1Tz5skSSTLgEdHRycnJ0mSvP76m6+99lq9Xj8cHDPG+ifH4+Go025dv/5K6E0nw9G9e/dMS3/hpTtra1fCKN7aeXf67k8m3viNt16p1+uEKEAgoupYIYATw9DddgcAoCBlOp6Ox9PTUR8NBrbt0JpVr1erFQdAQBnkjArAVMwnQeiFaXtp8dU33uwsrxLdiLKcCq5pxkJvCSmqbdscgP39/TAM8Wj01a9+9f333ycEJ1G0sbEBBXj5lReTKH7v5+85jvPWW289fvz4Zz97bzgcrqysmKbZbrf/+N//yR/8wR90Op3f//3f/+3f/ju9XmdnZ+cv//IvDw8Pj49P4zi+cePG+vr6yvJavV6XJOWyhCidw9nMrm5J3icZC2RZJgnwp9OpTPlUVZUSogCAPM/zNJPLwDCMWq1mOxWE0OlggDEusiwMQ8E4BtDQ9TiMRqNRvV2ToO1+vx+GUavTOVOeUZWlxRVd15/sbO/vH0rBxiAIFhcXsaIWBWUcQKIAiCBCcmSFiVIwAIAhVzvSNGZbumlyWpZ5DuS8Py04wmVZClVVEEYIQsAxRkhTGEFCCJ2Vz67nC9VRDsDzjFBayEzmeobenrUxwLMDUfKlC23GCx2FmQX+iuHo5SNIGi95d5lx9SOEVlfW5FSb7/tFntq2XXNcVVWDwFdVXbfsar3GSko0VQbSCEPOKda0Wt0FACCEyoJVq24QBJZlCSEk/EpRlLxI+4MTJ00BAGWeMcZGo0FZllmWJEly584dz/M8z6OUdjqdWr2+f3Dw45/8pCizmzdvLvZ6t2/eSNMUcsY5f/PN169fv+p5gVt3j06P9g73GWCdTqPeaUiaas4BxAQhAgQCiGi6GUVRrdZo1ls0L0+OTjc3nxwfnnjR9P3Hn16/fv3G9WuWbnBAMlrkrIA59+K81Vu8fvvFdm+pACCeekhRm91eMBkWZfT48Van2xtPw/d+9kGl1rDK8v6Du/3h4Lvf/S7GOEuid378k62tLcPUsiLd2dmRTK0vvPDiZDKWTmwwGFy9enVzc3N1dfVf/at/ZVmWaZpXr1797/67/32e5z/5yU92dna2t7e3nuy8/fbbQohKxZ1OfcuydN2wLEvXdYnJPj09DYJgZWVF/qaGYezu7uJzjSBJZiGpKOTYhFtxCCGPHm+61apZsVGCu92uFwSu62IIoygydUPCvmlRyuUqZTB2dnYOD08o5wsLC9Vq1Q8DCfWsVquKogEARqPR/fuffvs33srLJIjikgnLqgAAMAJlWY68CeDM0DRD0yhnEELF0FVVVWsuhjBL0rIsEYCUUgwgBxAJgCBEEMqYU8rbc84NmEMIIXhqLHwWlZ5v5MKin23SCM9haGCWB+I5OkMxt80m9Oet7kII+kwD8HO2+VcvPBYznhsA8LmS6cLCQlEUEgOlaoYso3mBHwUhwQgphDN5LZhoKoRQswyJZZPHJIRwDsxcl7FlURSTyTTLMoQAY2UUBSfHfXCOX4cQNptNia15/PhxURTNZvPWrTuO41BKR6NRGMYbV1am48ng9OT2C7fzPC/yNM2Tbq+LMdZMw7SMJEuXV5c6vW6z2VpZWbE1Q1E0WuaaahRFKRjHiBcFFUJUHcexbU65pmkAQTnZEE5DAYgfxJNpIOVvVV3DhBwORm+/+MrGzduckDjNKUKM0ZKyshSj0WTryXa7szD1vcdbW29/a63Zai0t+cvLy0mWxmGUxuF/+Q//wc7W9tbWVr1ZV4h2fHy8s7MjhLBtu1KxJaZMwmgP9o9UVV1aWjo6Ovrxj3/8/e9//+tf//rGxsbf+3t/b3tr9969e6urq7u7+5LZkVIWBAFjTNfMIj/D1mCMbdv+/5H232GWp1d5KPp9v5x3jpVT5+7pMDM9WWKUJSSEQBIS0TZwbLiC4+NrAT72se8FTLA54CNfc4UIAgQCC0kocqWRJkdNT3dP566uXDvn/cvx+84fq2qrZkbisZ+7n3762V1dtfeuvX/rW2u977ve1Wq1QM7W7XZhbWgYhrquwyHreR4hxHEcFjMLCwvValXVNNd1G63mkSNH4P23xmNCSKRqmqxQSlVVzefzURR2u93xeEwISaV0SVHgyD569Kjr+LVarTvoW5aDEFJV9fjxo8PxCCFk2264P9s4Ho9c12MxE8UhIonnO67rcizmGIwQyhsqi5EgcDyLSSyGIWYQZhnEcRzPcgghEkeI7plrkDgWaDwBSClGFFECxSiDEUIUH8iE3y886Gu3mgFeOuHxD7KLB3/kjSGN3lCR/uO3g2nwdaEIumrYE0oI2dzegm6YFziO48IoanV7/X4/n8sgjKMoGQwGYeQTQgRBCigmhIgiz7IYhkfT6SxC1HXtbC4NL09VFYbZs8oTBIHFnCzLsCZaFGXAxyH+fT9sNFqm7UA3mMvlTmRPDloNSmmSRK9evmzbNiewpVKB5/mhOWQYzg8ChmEWlhd4XhRFuVyp8DETxyHLCalUmpIgSgKWZcOYpI20JCsxoZ7rIo4tV6teEFieXTx8VNf1kOLRyHQcj+M4HsUEx0auVKjOYlF2w1BSNTPwu91uQtGw1jAyueMnTzle0Gp2Tpw4cfbs2d167Z577tnbKkPi1Tu3EEJrq3emp6dfeumlpaUlSmkYxNMz1Wq1OhwOgZELw/D06dNXrlxJp9MvvfQS7HuhlK6vr1+8ePHa1RvHjh1bWVnJZHLpdPapp55SFJUQ0ul0RkNT0zTQx2iaFifJcDSCQ9S0LFGSEMYJIRzPR3FsGIbreQhjPwjwvkva1NSUrCj90dC0rVqt5vo+kBwMwyBCe72ewPNpI+V5XkTDzc3NWq1m27YoSkBL2rZru46q6LquswJPSKvb7QKuWyqkZFnlRRkWuULDFRlR6Pm2bSOaoIQkSQTEYBT4jMBalgUAIYuxyPMMw7AMTpKEIoIJjcMQISRQnmEYShIGXHs5WA2ECKYIU0IJphjhPej/u+jo68IGhBEHWzLo/SZOO/A70AME+vesbP8Ho+514Tf5WXSgHUX7e3yB24WXNx5ZmUxG0zSe5+MkimMiCoyqKqIscQwbR6HjWEkYMeweA9kf9NKZFKV0OBzCYATLCkDyAMcF4Q14j6Iord1WNpUGWpnneUri4dCFwXDTtG3XkWX59F1nzpw+q+janTt36ruNM2fuqkxVv/3tx+rthqYpR48eVlX1W48/rihacThADMdxPPVc1226vjOdnXIcS1HlbCYTx0GSuIrESxIXRJFCEcYcwwsZ3WARtl1vc3tnZ72WcnxBEMIwSjAbJtjpjnqj4S987Jdy5anOyI5IUqhmEs+3/aBQKIhVrtVqFYvV6blZ03K26/UwDgqFQq1ZazRaqiTXajWO43q9Qa3RtF3v7rvvNgwDnM6iMGk0GqIoPvTQw/V6rd1u7+zUoLYEtbc2ZbAMXylPzc9xYRhub2+3Wp1ut3vmzJl3v/s9sNHl0sVXd3d3YaQ9k8mA+ygMWCVJUq/XGYaBDhzML1RVdRxHFEXLsiRJymeyjUZDlCXdMDL5nGboN27caLRaGGNFkniedz1nPBiSJImLEcZ4dnEGvBQ4jqMUQe2WyaRN2xJFET5ZQRCq1arjOK1Wi+NQmk6EL5EoivlMmuf5Wq3mOjROYoypKIqiJBFCfIpgdlkURZ7d64k4lmUwHo1GwKwEQcBiBnY0YIwJZTDCDGL298bHkKMoQyfD8Difz74xANCB+hMhdFDJDa6sE+nMJEOC8TPGeDKXOAmkSSwdLF+Z/eVN+IAWllLKfZ9VMRPtjiRJYKYAT5TRjX3EKWEYJpMy5udnZ6anFVHoddvD4YASgkkSBEFCIo7jRE20LCuOklQqlc1mOY7z/TAIgjCMdV23bbvT7hmGMTc3Z9v2zZs3jx86miRJs9m0LIeXRNM02+0uRYhSmspkDcPI5wqHjh4xTdNxnB/+4R9ev3r9nnvO9YeDxx77xiuXX/nRD/5IdbqyvrFx/dbNr33tsamZ4g++7/2pVPrxJ566du3G/ffff8/xu3u9XqtdP3LkULmUW1qed50xpjHL4nwuo8lSGAUkThBC1tjsdrvPf/PF9fX1VCqVxKTdbntesLSy/JM//VOCosqqmmAaIyqocpwktucSRKOBOxgOr1+/furM6SCJrly7tluvlavT6+vr1WpVkpRRfzDoDSmlHMM4jjMY9AzD+OAHP0hp8sUvfjGdTi8sLFy7fuXRRx8lJAGraMsaZ7NZXdcHg8H8/DwhZDwe9/v9u+++98knn+R5PpvNtVotRdYOHTr0Az/wAxsbG48//uTFixez2ayuq7dv356dnZ2ZmUmn0zs7O7DLiVIKFSlUKLA623EcXVFTqZSRTm1tbyOWOX7yhOd5t1ZXB4MBgxDLstPVqXwm26jXPcddXl4WFB7ETN1ud3t7p9luI4Q0zdBTBkZsEARBHIVhDAvestksw9FMJjM/OwdHT+B5Yeibo3Gv1+v3+2k9nc/np6enYbUGy7KuZ8VxXCgUDE2HopfnOMuyhsMhhxlw9Tc0HY5pSZIkp78XXAyFC53lGMrgMNzzuaKYcP9IRjoYP5P4BLXOwViaZFEIS8Bv9g6JA8sGJg/L7E/xf/90+D1u3P4NcDa8zxkORyOO42RJ4nmexShM4vHYEoVOEgZh5OuqVq2UGYbpNBuDwQAhVMgXU0YaftxxnPHY4nk+m83OzhYdxynkS4dWjty6detrX/uaqmrnzp3b2dwqFIpT1erO7m6z2TQy2XPnznh+yDDcxtbm9tYOywvVapVh+eMnTjVbneWVQ41mZ319bafeePDBhxFmeE48cuTI5s728vJsJpsnURIGcSaVPnr4yOnTp48cOtZttUWRV1UdM1wYJJbp+b67tDBHqWC7set6DEWKomhGjhPlueXBwHK63f5wPNK11NnThxdXltVsMaEoxnyUhAmDSRDbrjsyRxhjqzEybWtoWnfWN+5/4IEwjvvDwclTxwuFQrPZZFnsRyHLc/lsrtVqkYS++c1v3tzc/M3f/M33vve9/+pf/etvfeubly9fXlxcvHXr1mAweNObHoEfzOVyzzzzVDqdXl9f932/WCzzvJjJZHzfVxT19u3bSUxLRyuNRuMP//CTnufdf//9jz76KMuyq+urnCg4jtPqdgbjEcaYEwWBJI7jjPs9hFA2nwOSsNls9kfDSrHUarWiJM7lcpbrXLlyxbZtUZbDMGQxTpI9WlwURd/1hsNhVa9A5QyAKmRUx3HS2YwkKoIgcGHAsiHYTCZJwgi84zjtbsfzvCD0aJxgRBGmi/NzAsfGcTIaD8BTw/E9RdFWlmbBQzEmCULI87xREPieV6lUwjAkPeJ5XkwSnGBKaRRFHCvuBRFDMcYE05ggRAjFAqyWQIhyb2znXheErwubiR09eq0GZ3IHQnTCqgNgc7B7nOTVg4/zPe8fvIGAkGGYiV4W/EgiipOEhlFECCEkdl03CSPbNleWl0G4ZFoWRiiIo1QqlU6n+2ZvMBghhMrl8tTUTLmcuK6fJMnq7bV8Pt/v1TY3twRBePvb3wGLZpeWlhiGeeWVVxzHfcc73lEolF58+TvNZrPRaOkp474HH3j00Uc/81efnZqampqa2draGUn9ZrNer9dnZuf/l3/+C1EcXLn2aqvRSsLk1KnTlXK1XK0oiha4AYe3IjeM41jRldnZ2Uw2bRhapVLSNM1xbMzKFPMMpixHoygw7SAIAtd1lUxuanEl4kUfc6WpmSOnzxqpzHaji1iGYZiYJJhFLIsH45HjWKIszU/NkVrtIz/2kwmNB6P+cGyWqhWGYbL5zK3Vm2kjA0pIjuNYljMy6WeffT6Tybz3ve995ZVXNjc3//k//+fz8/N/+7d/G8XBT/7kT966dYtSks1mNzY2yuXq6dOnwY+wVqstLCy88MILhIBDD0MJzufzrus7jjcYDC5dukQpjuO4VC08/PDDCwsLURQ9/vjjGxsb/X4fzlaO41RV9X3/xRdfNE3z5MmTH/3oR9u7ddM0eZ43DMP23GazOR6Py9WqZVlzMzMgolAleWZmppDLg85J07TRaDTp3IIgpBSbpomNPRMtmINxXc/3/enUdBAElmXxHEPjhCQxhxkW0ZnpKk2I7TqmaUVx4Eccz4vpdDpKCKVoZFocw8iynJVlkRfS6XQYhrZpea7vhxFGDKwjJRQFCDP4u3JOEHRSiliWxRO7mkIh9/2CcNLyHQRIDzaBB4OQvGb6/rtl6mQs+OD3oAPl7sFylBDC0+8dhIIg4AMS2ImsnEXsHneCCEJI5Ll0JpVLZ3LZtCzwsiIyCPue67ouQkQURT/0WJZVFA0caX3fb7e6wIal0xmWZU3TDoIAWpQkSdauvVoqlR568JFStXL58qtXr11r9/rNVufo0eNvffvbLl5+9fNf+PtHH330Iz/xkz//8z9/9913tzZrS0tLXuBOTVVKpYKR0m/dusGy+Oy504IgDMeW67qFQonBnGma6XTa9wMw+ZR4IU4ihmEcy3Rdt1It8SyHMd2bIbBM0zRt2w4pTpLEspxut2/oqZWVw4IgWZZVKBRYFlyCUETiwbAXx2E+n0/x6eu3rp88eZITuM6w2+o0wyS88MorlUplc3OzUCh5jt9utA0jpSlqLlcY9NpXrlxBCK2sLD3//POZTObjH//4cNj/0pe+1O403/ve97Isu729BZEGkxZBELTb7YcffvjatevAvK3d2WBZFobKIXN6XhBFUaVSabRrcKTed9991Wp1YWFhAmZeunRpY2MDFlcEQQAr71fmFizL0lOG5/uO70mKHARBlCS2bWNKBUFIGylD1TBCEAwDs5/JZMbj8Wg04ji+Pxx2Oh1BkBRNzeeKSZL0hoMgiOBEI4TouZTvuqmUXikWOIzjKGARJkmk63qvN6CU2o4XRTEnSAzLl8tlP3Cz2SzPciC1H4/HvuPm83nDMGzL2t3dHY/HkiACmCwIAo08juNEnuc4joVxe0IJgakDghFiKMGgwJrE0uRv5oDR0yRCDk48TWpU+gZmDyIN+kZQeE5IxYOw6vfsCb9fELL7G7xhGwGIg+M4FgSJxgnGWBRFTZUNQyvmC/lsptNu5vNZiRf6vY5pjnluT+yfyhgzMzOIMlevXl1bW1NVdWlpec84jKBUKqVpWrfT39rakiRpeXn5Zz7yI6ZpfvWrX33p5VckSbJshxXEBx96ZGFh6a8/+zeXr1750Q9+eGlp5T/8+v/7oQcf2djYmCnPTE1NCSIvivxubTOXyziW6ThWsVhkWdxqdRBChw8fLRQK1tjqdDpjx15eXj58+LAoikkYweyPLMssw4T7t8DzgPhGCK23ahhjx3bDMFZVvVSsFAolhmEYiiRZQCSJ4iD03NF4yLK4XC6uX98pT1V7g24Q+YVK/tbqzSAOnnjyyepURZFVjhPSRmZ3tz7o9o8dPaHIsjnqx3G8vb09Go3e/e531mq1xx9/4uMf/1fvfOc7//N//s+ixN+4cQOExPPzcxzHnT9//tq1a9lsThTF6enpp5566sTxU1/72tcEQTCMNKBZzWZbFEVAXyzfBF1EoVBotVqg7b7rrrviOIYJCQg/y7JAHu30R1EURUl8Z20tpuTw0SOe521sbaVSqcMrK5lMptfpupZdLBQMTbcsa2D2U6kU2HAlCbEcxzRNhuHS2Uw2k4+iqDccxDEY9oQMwwQ0iaIgl0mViwWRZVkGqaKAMbUty7E9RdPDKHH9IIhiy3T0dIbjkK7rNEng4zBNk2e5paUlQgiNk36/7zrOZEZXkiQaeSzLChzPC6zAciyDGIQYRBBNGEowQpgS7nXMwcHy8nWQ6SSbHWwUJ/ch2PCexwwGTgnodfzaMWS8v1L7dT3nPw6iwkPRfS93juNA0JQkFGEk8byqqpqmyLKKMQ7DMCGEEBSGoe06rutpmgqLtbudPiUYYxxF8czM7NTUVC5XQAjNzs4nMe12u83GerFY/tCHfqxYLAZB8Bef/rOdndpgNGQo1jTtzT/w6NTsXK8//OQnP4kZ9rd+63deuXjxDz7xf8Vx3Gg1wzjq9kZ31jeWlxcLhfyx43dREpJ8/tqVV9/5znffvn1ze7Pmum672a4UyudO321ZVmfcLRQKMzMztm333EG91kqlUplMRpG1IIgpQQwjMCyNk9APCMMwA8cOw9Aa25lMjhGVkesxlslQxrWddErnMI5CF9OEJoQiHLiBG4Qsy9u2m9Dw5e+8srmzdvTEUVmRXN/LF4qdVhtm5Fv1Zr/fr3vee971drTvQv/MM8+Nx8OHHnrwxo1bN27ceOihhy5fvux53iOPPLK+vm4YqUKhoOvG7Owcx3G1Wq3T6dR2G7qWWlhYePXVq4QgnucnRQd0VvlKDo7gUqk0MzNTr9dv3ry5vr4OLwMhZFkWxrhcLlcqFUmSzt91tt1ujy2T43k38MEvqxIEMDxRKBRInNA4SafTuqqBjRCMSgFcFEQRIWQ0GsQkiSNCKXU9N0koaCR4nmcUUZIksPAKkyRjKLlcRuD5HssKgpBKZwllXT8YmZbrh1EUxQQlIxMgD0opJ0j5bK5YqqyvrcmyrKczPC8CRkopYTmBZWOEEMUkSUiMEow4jsEsgzCBaKQYUVwqFQ5G3cF6clKLQvzsDfUemLfAB5ScE7AUggS+AplzMpUzybEY42ivi/sfRUcnywPANR0mreI4FgSFEMJxjK6okiTwHCdwLC9wgeuUisVqqcgLrGPZo9Ew8H2EEC+IkiSl0+lsNmsYac/zNjY2Nta3dF2fn58/c+bcysoKpXRjY3N1dbXb7W7fujg9PZ1QZBjGmXP3MCx35fqNXn947733yYp6c/XOyxcuXLt56/Cho2sb65Ik+S6ulIsrK0t+4BZzOcwkR48cKhbzKV3b2drudrvFfBFjrEpqtVrd3anbxGEYZmpqGiEGIbS9tZtL5zKZnGmavh9A6+v7Xrvdtk0LY9xMhpRSy3IqlSlV0k3TlnjJtR2JF3LZNEYk9l1VFjmW+q4bJ2Fte9juNBut+vKhpd3GjpKS773vnmvXr25sbS4sLLQbbVXVDS1T294NgzhJaOTZv/prv6Kq6qc+9SlYQL+zs3Xs2DHfdwuFwtve9rannn7ixRdfBGMewzByudzCwoKqqoZhbGxsiILcbrfPnj178eJl0zRd1y+XyyBhwxgvLS5nSilo2NLp9JkzZ4bD4auvvrq9vY0Qgmxp2zbM7ILXUzVbaDQaDMdatm17riCJcRwzHMey7M7W1uzs7Pl77rVG4zurq6qsLCws+LFn2za45Xe7PYbjCCFraxssz6WMjCRJmGMJQb1eD1pNIrDZbLpaLCCS+I6ZSxmlQp5jGd/3h8ORKMgRoUFELMfdrTcxZgmlgiDMzc1NT0+7rttutWAVR6VYyuVyHMOapjkej4fDIaY0nU6nNRrHcRyFJIoRSTgWiywjcIzIcSwhDCLMZIrijbfX5aVJfEIPPWkO9z0IGMuyJs0eQghKC6DaoKuBxVTotZK3/6kbxCrGGAJyIm0lhIRh7DMsQoTwPCIcwnR2dp4kked5DCtHUWSaFsvgQqHw0MOPAA6+sbH1jW88Ztv2XafO/NiP/djRo0dN0+52u88//3yv13ddVxCEYrHIx8uyLBtGmuHYa9eumZZdLlff8fa3y5r+xFNP37x5O53OLi4u1hp1gHBu3dg+cfIuyx7Xa80rV16tlAq3b946ddeJuempfq9naPr09HSr3qrVaojgeq2GVMY0zSAIZVlNGZnhcGzbfqfTL5VKju0BXxeGoWWPMca6phUVQZKU8cgyjDSJCWYZWVN5XrTNcRjGgW97luXLPIdJo1HvtBqmSfOFLHRusBdla2vrAx/4wH/87d8CYbrnBaIoFsvl+k69UilvrN783H//ux//iY9+/OMf/6u/+qs7d+6cO3eu2+1WKiXLsj772c8+/MiDnU6n3+8rinLs2DFIgIANnj59GiMWdPCNRgNcemF5PSHEc30E/suynCTJxsYGxng4HIJJ1OnTp03T7Ha7IL6HCUOMMcyyKJo6HI382q5t22EYSooCMxC6rt++fduznWw2K4vSzZs3C5V8p9MBLTHGGHRwlFJIITzPaykjSSg0nBhjPw4KhZwsy4Hn+r4/ogSRJI7CE0ePeZ4fBpHj+X5EXM/3fZ9hOF4S683GxAAuiiJd0yY6FhhlhqOEUsqL4nQ5FwaBS+MgTJIkRAnlODZKGIVjWUwZShlM8VS1iA5oryc3yHjkwBz9BFYBDgT6PfhVwzCE2daDuRQeVlRkTdMy6TQQjKZp9rs9y7I0TSNRvGfhelCVxpLXPQg8jizLE+4x2V/ymk6no8Egk8lJomJZznhk86yQzRYNPcUz/NLSEk3I2tqapitvectbThw7FsfxhevPbG1tWZazsnz47NlzqVSmXmtubW01Gg3XdcfmMEmiTCady2cwpq7rpnghn88/+9xzU1NTKysrzWbzgYcf0nX9rz771xzH3bhxI18sgLVeOp2+cOHCqZN3J0mCWfbSpcuEEITZ8lR1MBhRhHQ9dequM0mSlKemPM9Tdc0wjMR0KaWQ3uM4BoGloijD4RCUg6qqAvEFZYiSkZl9O3cGYT9wbdNyXddQNVEQwtDvd7qj0SiOIvCZdoMRz/NJTAVJEwRxMBjpWur8+fONVv3bjz/25je/6Zlnnzhz5tSPfvhH//d/+2uiyM/MnPvGN/7hRz7w/n/2z/7J9evXVu/c8l1na3tjenq6vrOrKEoYxrdv3vrAB350d3eXJEhNG/V6PZvNJkmyvn7n2OEjd911125t++knn1peXmRZtr67myRxNpvFGNu2HYTm7OzseDxeWFgwTWtra2dpaanV7Ph+yHG8KIqIMq7rg7V+GIYwrrm0sowY3B8ObdfZ3NwMk7hQKIgcLwjC4ZVDjmVhihzLzmazgsiura1FSTw1NSUK8ng8Hlum63qdTiedzWqaIcsyw7Htdrtea4ZhyIrorhMnBUHI53KB622sreeyWU3TdnZ2dF3P5nKaoXth0Ov3x+NxEEeuJ/i+H0UBy7I8z/IcIwi8KPGYklK5wPOcY4193w9DH2wdccQzDJPEcRT4CFNFFCSRZxFFScKzmOcYhhI8N1s9mPoOXvoHg2FyH0hziASWZcGZJwgCdKBpPBg8XhhgjAWeh6I/lUqldEOW5Xa77dnOeDy2bTuOIvCQ5Hne8pyDMTy5ua7reSEhSFUF2He3xxuGgeM4SZgoim7oaVGUWSxgxBTz+dHQXFhYeNMjDwmCcPXqFdd15xdmlTSTz+cVRWvUW5cuXa7Xm8VCeXl5+ebNm3EcJyQSRT6Xy+qG6vvucDi0O12GZU+ePMlx3MbGxgc/+MFOv/eZz3wmV8j3+33DMMaWOTc3NzMz8/nPf/7UqVOt5uDOnTvpbLbX6/d6vff84Ptqtdqdjc3ZubkgiDDL2Jb7zve8e2dnN5fL5fN5bzCGIw88pKHelmW53+8DvGYYBijUoe4gfAI1vCAIIi/EcWyZI9eyHcfJZlKiKA57/e2drWF/AP0zYiOGYQI/4nlREtX+aEwSNDc39453vfOLX/z8tetXfuFj/+Lf/O8fX1lZ/q3f+s1f+uX/x9raiNLkB978iKrK/9v/9i8vXX4lpWuf+uNPBkFweHnliSeeMIx0p9V+85sf/fCHP/yLv/Cxs+fvmZ6e3tzcvHHjhijyLMK6rp89d3ppYfHatSvdblfkeVmWqtUqyESNlFir1TBmjh8/Xi6X//zP/xIj1nVdjFnAioeDcRzHKysri4vLo9Go06zVarVMLivKEsNxDMdub2+bjp1KpXiGVRRlujrFYkziJPQDWZZTaW17e9vxXFmW44iMx+OROR6NxtVq1XKcJKG5XE5PGcPhsNPuEUIYgR5aWu71er1u9/iRoxzD9nu9Q4cOgRQEYSwpMuZYx3XH47Hje0Eoj0aD0WgUhiFCRBJ5VVVUTY4CP5fPFLLZdMbAGLdajeFwKAmioeQwxkkc+r6fxBEH2y3iSJElgWdlgedYFq8sz9MDwmi037Ml+47Xb+wVXyd2AaBpIuCmrwVLWYEnhJB9WJXneYHjWZadmZlJ60YmkxEEwbasWq22tbXV7XYVQ6P7yOpBSgO2hUD1CxQtRH7ZSFFKdT2la4Zleb12T5G16emZo4ePPProo/ls9ubNGyzLnj17OgiCF55/tjOqdTqdXm+AKKPrhqrqAi/tvTBB4HjYNUwSEoG13nypeuTIkcuXL2ua9sEPfvC/f/7vkiRRFGVrZ9vzPEmSKELHjx8H+2qe5zOZQhAE37lwgeO4fn9w733nL1+6stuo//N/8YtbWzuvXr3iOv5HP/rRJ55+KpPJaJo+V6yAwhjKCvDGpZQ2m00ISAjCSe/tJh6005LAi6JIkmQ47FujcX/QTaI4jqMkilzXdix7b7sYChHCvh8ymFM03XPDwcjEGH/kxz9ar+/+5V//5Y995MO3Vm+urt744Q+8//Tp07/x658gJE7icDweHj9+7Md/4iO+62Rz6du3bz/z5FMcx43HViGXt203CIL/49/9h1euXH722Wd7vV4+n7es8c7mFsMwhWKOY9iHH34QY7x+5w7DYLDcTqfTa+s3MpmMbbuDweDcuXOapm2sb7344ou6ntI0bTQaR1HEcVw2mz118vRgMOh32tvb27woyKrC8jzDscPh0AsDjuMkXiiXy2kjlTYMx7JJnMRx7AeOJEkE0eFwyGBuZmYmJsmtW7cxxs12O45JtVpVdW04HI5HFsMwksaDG2KjXl+YnSsXSzdv3ICLLY5jihDDsWESm5ZlmqYfhVEsQ8GJMeVYFiHCcazAsyyLWQ7rilKpljRNa7eb4+FIVVVDybEMQ2kShiElCYcZeHuTMBAFXhYlQeQ4dt/fHgrCyYc9SUSvowehoJ94gKP9EWNIiZO0OUE+Hd8DyTU8EXiPQ2G5E+/R7rqmlcvlRx99NJfL9UZD2DFo2zZoHQCdh/KM7lshpdMZWAPiDUZLS0sMwzYarerU1I99+KO5XKG+Uzt58mS73b5y9dV+v9vptP775/9G4JhMJlOayk5PT8/NLVim0+l0TdPMpPlCoaDruqIoHM+Y5qjTaZvWCESDLMtevnz53LlzU1NTf/hHn5ydnd3d3d2t11KplCzLlm2fPXt2c3Nzc3Pz0be99dlnn52amitVKtu7u/1+/95774UFQB/84AdHw3632+62W0srhwbDnm2OZEH0PI9NKMzOKhzLiYIgS0mSmKZVKJfgjYXXAJ9CQhKRl1jMERIzDBOFoeM45nA0Gg8cx+l1OmNzKLCcJAksx6Y0VVGU2u4Og1lEKMEEEaqqchjG/eHwS1/60i/+0seu3rj56T//zO/8zm/98Z/+8X/87U98+s/+8H3ve9/f/M1fZzMpyxr/8A//8Gc+85mP/eIvPPX0E+9617u21jcuXrzIMJxlWaIoq6p64cKFwyeOWZb12GOP9fv9xcX5yA/6/f7Kykqv0/3Sl7508uTJ+++/v9Gob2xsIIR6vd799z9s2/bFixcr5SnbcquV6a2trdnZ2X5/CFbZi4uLmUym2WzazjgI3SRJDMOQFJnluf5waDk2QoiXRMdxBJ0zDAMYY1BswlxLr9dDDC6VSiRB4/E4Jkkmk9nc3CSEpFIpsCGGN9bzPNsOrNH4wQcfPHbs2NrtVdu0oMuANVv5QoEXhf5oOByNoGuwbD9JEoZBgiByLCYkASIglysKHMMwOEkSRGhKN0ReUFU18SnGOEkwy7IMx4LmmUShGccRxUwcJzBF8T3LzklKxAcocoZhNE2DccyDIphJBL6ukkQITTgT6CElSeKMFMuyw+EQnNTiOO52u61W69KlSwghLZ3h92vXbDY/N7cAjpRgMs+yLABxDMPADgMJ4+FgPDU1dfbs3ebI+upXv37lyuckQfy7L34+m82mdBUhwrK4UM6qsswwDMxlY8zKkloqlXQ9xTI8dJiUUkCPYOkXQoRl2Tt37tx1110zMzN/98UvaJq2vr4+Go38MDAMY2yas7OzHMddfPXyAw8+sL29vby8HCXJhQsXwPR+bm725u3bhWLu/vvv//KXv9zrthmGOXHs6NVXL/f7fXCh7jXa6XQ6l8tls1myvwzLdd1cLgcZEqqSZH8tpqzKDMIMw4ehb9njYa/f63UsexyHIWZoSlMJjYPQYxiczRgzM9PDbpdlWYyYiFCBFzTDUFWdE4R6s/Unf/xnP/+//IuLr17+4z/58/f/yAcb7fZv/c7/+Tu/+XsXLnxnY/0Ox3G/+Zu/eeLksU996lMPPHjfr//6r//e7/6nz372s2trG71Od3NzO5VKlUvVzdqOJEmnTp2CYQswOwQTpPPnz9u2/corr9x//33Hjx+/ceNGv9+/fPmKpmlxTLrdvucFhw4d4XlREESMaRB4mqbJslguFwmJDUMjJLZHlq7rqq7JqhIlieXYgiBwPA+BxPM8IhTW0RiaLstyFEXVahUxuN/v+16YzWa9wN/a2gZuE8oogecqlYqmOqurq7OzU77jWpZVLBSCIGg5rTOnTzMM02q1wCRGUZQwiW1wFkc0l1Mty3Idx3UshBDGSBQ4XuQty5qdrhqGznOMLMuSJJimGQSByMrgBAn4sCiKHMdSQlhRoZQ6cURCn0MJwZTCchmoPjGilCSIUlCYUYoIoZQQhmUhbCa1KNqHOrl9A/M3tnOpVArKUagwwzCMKAK8IQ5C2H+CDgwfdpoduA8JGQgZjPHEHzaKInhDYZbih3/ofcVSeWw7f/CJ/7p6YxUmXADv8UNPQ5Kuq7zAcBjFNMAEgwubIEgYsRzH8zyvqYYgCDCjBJvlJUnkBdayxpZlPfjgg6VS6dN/+ReLi4tbW1uj0ahYLsG4TTabnZ2dffHl75w6dQowzIWFheHAAjXGu9/9bsdx2u3WRz/60Vs3rokir6rqzHR1qlr+2te/QimNwyAOA0Q4sC1jGEbXdRj2KRaLQA9AkQ9vHQyvJEmCMaKU+q49HPZ7vd5o3As8T9VkkeUZkSUk4XmGEOKFdqO9yzEsSQjDMighTmRxgsDxIsdxo9Fo/Ykn3v+BH6lWZm/dWTt85MT584+sr6//+Z//+fvf//7P/OWfy7L4wgvP12q1JApFif/xH//xX/7lX3744Yc//vGP/+kf/8mb3vQDt27devzxx7VMyjTNH/iBH3j44YdfeOG5fqfLsuzNmzfPnTl78eLFqampM3fdBVble8RgrXX16lWW5cIwlCT5c5/7u52d7cXFRWDwS+VCp9tSVEmUeMwkQegghMIwjEYjWVUMwzBti2EYN/BFUeQ4Lo5jgeNN03RdVxalQqHQatdFUWR5DiZjVFVleY7juFQqNRiNYLgxXyyUy+WUEYDAJZtK37hxo5FK8Rw3tuxerwe7ljHGvV7PcuwgjhzH6ff7Y9tSlJzveXEcYowYhmHw/ph7EnEcx7FsEPiQe6CeBN9Kx3MBnZY5RZAkjDEOAoAzwzDmDraC9ADHAKfypBwl+0uJgWyYzEzAxYFfy/XjAxT/eDxmGJj73ytuKdkDWqMgCMMQYywKAsMwYEEpicqkR40TEoV7rSmDuSSmXhwghHLZwtGjR0+dOjUzM/P0M0/evL26s10bDgY8J0J9EsfxwtK8IokUJZZjKkQoFvO8wDqO47t+kiQsG3CsYBipyVQUzDFNynIARXie5yX5xZcvzM7O3759R5blhcVlhmF2d2pLK8szMzNbuzuEoFOnTn/pS186d/fdnW5/Z3ubYdF4bB89evSv//qvFxYWTp488fTTTzWbzdFgqChKo1FLorBSqRi6bOjyaOjHcWRZJvgXh2GQzWY1TbMsC+ptoEMppbCqTeR4iEbPd3zXcT07Dvck9WDihRDhBRYh1vOcwaCnY81yHZbl4oTanh8nVJIVyx4fO3bsiaee+sQnPvELv/iL/6/f+I1/+S//n5/+9Kd/6X/9X8fj8Z07d970pjdtbKw9+uijmWzqwndeunPnDs/z73nPe55++ul2u/vjH/mo43jPP//83NycF4elUunFF19cW1t785sfETn+lVdeOXTo0Pb2drlctm37pZdeOnPmdCaTqdfrg8FAVdLT07OZTGptba1cqubz+Xvvvafb7bquTSldWlq6dOkV2zZFUWy3Q9d1EEJRFLmWl8qk4QqBqQCWZcFhqFQoxmEI7AvDMCsrK88//3xCyalTp6IweemllxzPLZcrcRybtg3uEp7n+b4vSXI+nw+Jl81mDcNAlCqiVC6WOJaVJKlcLnueF4RhEAR+FELAxHE8Gg2hIpMkWRRg2QvlGGZ2dlbXVLpPInCYSetpscDXdtuQpQD01lMZ8BYbDocEB2yCMWG4g2GDD0wbQRhMiIrJfcdxgiCA+vhgxzhh21+HahqGEUVRGASwe4DjOIHjwV8ZnojZ13MDnagoGXTAffQgVBtFka7rhw8fPnnypCRJly5d+qM/+iNVk0mCWJbN5nIs5jDGrMCn02nbtnu9Ds+hcrlUqhRVRaKUcJxhJZYoihwnUIKBEgj8vU3xoigaKQ0h4jg2pbRSqVSr1a3VjeXl5W984xv5QkE3DNd1y+UyxcgwDNXQv/Od7/zoj/7o7dVVTdehQvY8z3Xdu++5u97Y/dKXv/iff+/3/uqv/mowGABhpWmaNR5VSsViPgf9sKYpjuNQmoShPxz2HceCYSKAf6DbAUd6MBCYKlU9z3EcJwx9zFCMKcaIZ/Go3ytXirquWPZ4OBwmSSQIvKoqfMQTQhCKEcIIkSSJCE0IIZub6/fdd98rly4/88yzH/rQh37n9/7zE48/9Za3vO1T/5//srOz9bP/7J8cO3as3W59/R++urK0uLm1vr6+HvnBD/3QD62urv3sz/7shz70Y7/xG7/xXz/x39a2N2dmZrrd7u3bt5eXFx944AFVVb/z8ovHjhxtt5uwIA38s0HPOT21cPPmzSiK3vGOdwWB98yzT9XrdVVVVFX2fKdQKMzOziqKPB6Px+OxYRg0jGRZhpEFEKOBsMZxHEWUYKoo9H3Hss3R2PM8z7fL5XJMkl6vhxF76NChIAp7vT402IqiybJsu06j0SiXqtlsdmB2R6PRvffe2+t2V2/eKuYLlFJw6WYYhuU4gihEoCRJoiIjLNi27bte4LtJzAiCoMqyLIs0IaCAFTkeCxQYfJY10ul0EASYZcMwFESZ53nMMoRgQZRjxMYUEYbFhxfnXsdAkH0btdcBLVAfJvh7L7uYNIEHQxohxIlCFEVRGAKmAkU8wBUswlBtIkrhCvN9X9dyk4edlKMMw9x3330LCwuSJL366qvPPffcYDSYrk7Pzc21Os04jpMY9tLsQfyqLDIMo6qiIouKImeyeiZtUEps21QFfXNzM0noVHVG1w2OExRZE0WRUrq1tSWIXCaTkmVpfmHW85xvf/vbR5YOE0J836/Vau94xzu2drYtyzIM4z3ve++HPvShf/fv/p3tOE8//fReafrii77vX79+/bOf/ey//tf/+p577rnr1Knf+I3fOHLkSBAE+XyeZdlms3nPPfcMBoPNzc1isbiz04KtfWfOnGk0GmD4lyQJEGVQ5U5EDnEcK4IMjoCua0uSUCoXLWssSUKv38nnc5lMqtNt+b7L87znuYZhqIwex7Fp2cPh0PEDzHAMxyaUMS2nPxqLkuK4/n/9wz/8iz//zNrG+r/7d//+r/7iU//wD187d/b0f/gP/8edO6t/8ZefHg36iipls1l7bJ4/f/6uu858+7Fvra1t3H333W9+06N9c/SFL3wBYmM47BeyuRMnTmi6cvXVK0kSJUnSajQOHz6UzWbjOL506dIjD79lfX0dVr4QEucL2TAMJUk0zZHnO9PT06Y5EkVhNBqB5aFvEVEURVmyHLvb7/OioKqqH4Xdbndhdg4htLK0bGjarRs3VVmZmZkZjfvNZtMPA0EQwiAOwxAxOI6TMAyDKCIEsSyLYJERYhFCfuyU8oVcLuc6Tq/dETheVRTf94vF4mAwsB1H0dRCuSSI4mAwaHU7ju0HQQC+OwzDsHg/h1GSSqWqpXK1Ws1m0xzHhUHg+/7q+pbrumEY6ykDlLS5bKHRbi0uLouSZNv2cDjiDiac18UVs6/bpvu21hzHIfpd7fVBIHTys/i1KlAoEuBigt4GUwTVbBLFe054oqgoSjqd5jhuNHRBtcyyrGEYU1NT09PT2Wx2e3v761//+tbWFsMws7Ozx48f7/f7r776arlawJgyLGIxphRBWmBZNpXSSRJBDaApKqUUEjjP89PT0wzD8ZyI9u1MwB5b13XXsxFCMzMztVrt1q0b2WwWIWZ7e3txcfHUqdOb2zuO47qu/673vPf//L0/+PCHPjI9M/dHf/RH7Xb3nvvu/8Y3vjEcDlVJ/Lf/9t9+/etfb7fbR48e/YM/+IO5uZlMJjUajdbWVt/97ncHrsMxxDYHDz94vt1uF87e5fv+eNS3rVEYuDynciySJSkKvSj0JsUCu7/FlbAMolES+yQJkxj7rhP4LqGh6zpxnKKUAl6VK+bCKOptb88XFsIwtFzHDfwoiRhKMGUShGdmq6VqSZL1wWD0hc997r57726321/4u8+99a1vXV29tbm5+cQTT9x//32/+qu/+pm/+PNsLp3JZJ558qkXX3yx3x/ec889mmZsbW397ku/e/8jD/3SL/3Sl7/85a9+9atvetPDKU1fXV29+56zHMeZ5iiXy/3UT/0Uz3M3btyo1+vz8/Pf/OY3K5XKysoSITEsh0mS2DTNH/mRH7lz587YHG5tbYA6n+NYjDHIvnVdF2XJC4Juv9dqtSzXgTbStm3XdXVVBdEIIaTdbluWJSmypmk+G7Isq2iqqmobGxuNVqtSmUqn01s726IoplOpzc3NVE6DSXGWYeIgJPGe0NI0zZWVlWwuNxyPGu2WZdtw5Vy5chVhJCsiz7D7sAhhMM5kMjzDRlHUarWGgx7w2AzD8KLExYnrB1EUJRS7lsNwAsMwtuNQaHejkJsEzyTYDgYhfq21hCiKNI4m3/m6cnFy52BPSBFVFIVlGMuyAFBhGQZwJ1YQ9zjJOAavAT+IU3qqWCzOz8+Xy2WO4waDwZ07t/v9fqfTkWU5m00jhNrtZqNRS6VShw4tW/aQwZRhEceyHOYYhpFlRZWlbDozGPYCz4/CMI4JTRISJwIrXL58eXZ2dm5uAVHGNC3f913Hbzab4J1z/r57VlaWbt++tb2zqapqHMe3bt0C/cri8hL2mFdfffVXfu3X/uzP/mw0Gv3qv/m1X/7lX75x48aP/9RPfvWrX93Y2Hj3u999+8b1+fn5T3/60+fOndvZ3ZIkYXd3d3FxUeTZfDbNs3hufrpRq0mCkESRyPMch9zYn5kqMQyjyhVFUQaDAZA6cOKCjGEShBIvRZEsy6IkMizLihJLkcwwmGPY0WggSQKl1LRtThCSJBkMRmzSgMehlCIGY2BsKTUtvpAvmbZZyKdfufDi/ffd+5Y3P/zE409yj9z3Uz/1U7/9W7+5trb2pjc98uWvfP1nfuZn/uIvP33z5s2zZ8/WajWE0Obm5unTp6Mo4ljhzp07Tz/99L//9//+7rvvfvLJx+M4Ho/Hnud9+MMffv75Z2/dutXr9aIoBG4T7GHD0B+NBwiTOAnr9TrPc5Ikffvb36aUzsxOHTlyTFVV0zTr9d1Go5lPVaCLI4iCOkMQhIwoxHEMHxmlFLr3+m5tNBpVqkUoBXVdVxVUr9fX1tZGozF4Z8D5DlZdkx8EmYQgCKlUisQJRgg++vF43B8MLMfmREHTtG63u7q+pigKz+xJNaHZA5eLlKbDXVjcAA8oimLfDlU9ZTleGBOGYcBDQJZlczzEDI2TmKIEH56boa+9TUpKGNgDGADwTEVRzMADqAD2DaADdOIba1GEEMGoUCgIPD8YDGzbFgSBY1hIdJEfwK4fSRThwUVRXFlY8TwP+gfTNGFumlJaKBQ6nY7neZlMJp/PY4xt2/Z9X1Y5SimDOUEQeVbgOE6RNVVRcrlcp9MyhyNB5PL5TDpjyKLEsnhnp4YQ0jRDFGSEsCTtLQB1HOfYsWOYoY5jqarS63eGw/709HQcEFhCMB6PL1++/KEf+7Enn3zy6tWrf/B//ZdPfvKTq6urWsqYnZ29ceNGqVTK5XKYJOfOnfu1X/u1T/zX//L5z39+2B8oiuS7bi6XKxYLKd1wXMs0TUWSTdPM5XKDwci27XK5PFmE0u12wQ0FdJLAwQLAjBDKptLwGQVBQDCCxSkI47E53K3VpqamOI7b3NmGHp4Q4nuJ67pxHEqSJMkilPc8z6czmWPHjj/99LOHDx1vNluIMj/4g+/7yle+5pP4Pe951xOPfyudNiglb3nrD+xub1Wqpa9//esz1SmGYZ544qlTJ05SihVFabe6V2/dqFQq7Xb7rW996wc+8P5mrX7hwoWLly6wmHnzmx9RFGVzfb3RqB87diyO452dnVwu12g0ANuAYSW4oKvVsu/7kiTdvHlT07R8Pp/P5yVJGvdNx3HGlhmTxA/DVqc9GAwIRhzH3X3mbBRFiND52dlOq7166zbP8w8+dN+VK1fGllkoFDhWGA6Hpm2FYTQej6dmZhzHi+N4enYG5khTqRQrIvAQSuKYxglNCM9xMHefTqdZjrNdx49CzDBhGI4sM/RdIAYButMUVZZlDnOu6xqGAQaNsJI5jiJKacjJuVyu2aq7rsuy7Hg8xhgLHK8oUqFQ4DguDvcz4UFtCn7DlODBgvOgkAVuB7vBN96AUaCwdDqKKKUJwwLMBa4NsiynDMMwDGAgXr1yCUAIKF/321Hc7bUZFucLWU3TgtCDYfB8IRv4Y0IIxglGCaUxiWkcBXHAJWGkyVroeqY5YjHWVYMV2cAN5ubmbty4YZr2kcPHisVSFCW25WKMz58/Twi5/OrFdru5tLQoiBxCaDwee1YoSQ5J6PXr16dn52RZsSz7t3/3d1944cVr166fvOsuSZJqtdrszJykyN/69uO/9R9//RN/8F9+/Cc+sr6+HgSBLIuzs7O9TjuTSUdBmJrRa7vbKytL/X6/UimFvq8pAiICixNz1APPvyiKUrpMKeUYQlkqcIgmge95hBBJkhx7xLKsF/jwZjoOJ4pyQgnDMJSQOCYMg3hehN4hjmOCY8vzvShGfMQTXpSFbDadz+fN0bhYyBq6oqn8+XNn/u7vvtDc3Xrg3jN/+5Wvdzqd97///V/60hcJSY4fP/7tx7551+mTsizX6/XhcHj+/PmLF14RBCmfz7/7XT+YMKjdbouiOB6P/+Iv/kKTlVOnTrEcfuqJJ69cuXLq1Kn5+fn5+bnt7W3LsjzP293dVlUVLgxV1avVahzHV69ezWQyQRAyDJckNIqSfn/Y6w08zyvnS77v266jGfp0scjynGmaY9vK5/OgyR72BzzLMggDvf70008zDLO8sry4uDjoj2D/WRzHMzMzpUql2+0Ph0O4/PaG41i0Z5kZxzzDBp5PkgQW0QyHQ0mWwzjqDQeO6yqKksnnNE2D6zNJElmUYNdiEkb5fNbQdU01CCFhEDi2DU8tFqtVWdL0lB9ECGPYHmvbJqVJJqULrMRg8l2y/nWaT7o/OjgpLyd88Rtjj752MOJgMmRZ1vO8MAgADCSEJBRhjBVFUUQJBlgxQqZpbm5uWpbFYTwB5aEABrrMtu0oihzHRoiCcWgcx5ZlKhJKEKaUkDhKaIwSFAVB6PmqrCiKkktnaJywiMEEJQmNgpjhQ0mSVFUHSTGA3aVSybKsmzdvcjyzuLjYarUYFuVyGdM056eXUqnU+vr6/Pz8oaNHfv3Xf/13f/d3X3jhhb/5m785f//9u7u7Kysr6XS61WmPt8bvete7ut3uxVcv/9q/+ZVPfvKTqqpm06lOp3PmrrvM0bjZqnuOXSoVWq1WLpPieX61vptPp9VcBiGU1pROxyahzzMMSxNJkmKf5TFVVTlJEhL6BGNV5IfmUFG0MHAxxoTQJIh4no/CMIyTdDotCEK73bEtZ2ZmhlLcbrdZSSEMSiiJkyQiSUxjWRYLhdytm9dr9a181qBJyLH0xPHDG2u33/Oe95w9e3Y4HPI8XyqVHMf+7d/+7Y98+EN/+7d/+7GPfexLX/hivV5vt9vnz5+/fftOFEWf+cxn3vne93z729+em5u7fPny0tJCtVT+1Kc+9c53vf1jH/vY5csXm80mxzBzc7PwmQZBEIQWwxKeF1VNYRjGsseypKZSKc/zer2eKEqu66VSaZblzLHd6w1iPxIEwQt8lueKgpDNZovFopYyQNcO00PQ20OFefLkyeFw6DjOtWvXet2B4zjpdFrXDUVReoOBruuyLDfbLU3TFhYWarUamyBNVnieN3Q9Y6RGg6E5HiuKcvTo0WazORqNFE2dnp4OwtDzvDAMMylVFEUWM1DNCoIUer4VRpBCEN4rj7PZbDqdZll2c2DanhfEcYJoEsBeACZJkvGwn9JkFIdRHOwBM5PbRLAG7enBoILAIOg1JmsHRW0Hv3lyR1GUIAjCPWpFiuOYQZjjOMMwQs9vtVq+79P9spbjOA5j0LjhvRmlEE4UVVWhSCOEuK4L5F6SJAgjhChDMaIxJSiOEE0og9g4DKkoCoJkaDqoBVjMptMZwsZzc3NJQre2tizLzueLiwvLhmE89thji4uLCYksa5zL5Xr9DgRYv9+/fft2qVS66+yZb3/72x/96Ee7/d7LL7/8jne84+Lly3DSf/mrX0mn06IofuADH/hPv/tbP/MzP/XNb36zUCiUy8VbN26uHFq2bdu0RqlUajQaGYaxvblezGe3t7eDwDdHw5mZGdd1F+ZmMSUwOhCHQYioY5lxHIs8Bx5WSRKTOCJRKHKYSJwsq5RSzw8NTRlbNIwTVdHjOB4MBlFCoyhxHM+2XZqQmCBVN3K5lCiwCYm8MPB8t1wuDHs9TZF9z9naXOM53Gk0n3/m6XJl/oknvl0s5E6ePPn5z/9dLp8hhExPT//xH//x3WfOMgxz69bqxYsXK5WpbrdLCP3GN77x8MMP7+zswOAS7Db71re+tbm+cf/95zHGTz7+eKvVvOeeeziOe+6550rlNFjf67oahkGn08lm8gzDuK5LKc1ms5pqVCqVKIpUxTKMNCZROp3u9nsjc7y5uel4rmmaoiLD0NDU1BTPctZ4TBMCXYxmyePxmGKQauF0Os0JPDisjsdjUZShtmdZFuQ1Ed3zf0ilUuVSGVMUBgFsHzMMI5VOIwZ7YUAI4Xkec2wQBJqiQtOYJEkc78GN9VoNNr8KnAj5Ay5jQYyarc54PGQw9mw7Cv1sOqVIgu/FPIMFnkV0n6Cf1KKvQ0oPBhiEBN1XtLH7twl+M8mNB3lCGIEHQ2Vuf5sSwzAHdaF0X3bD8zzHM5ihCYnCyI/iAGEiiJymK1EcEBpHceAHbkIiXmB5gaUoQUmMSExRAgoGntszmCKEeJ4XBSHsWw6DWOSkarkCm150XRdFcWpqCsiDL3/5y/fccw+suXRd99q1a7qunzp1Coqora2te+65Z21tDWP89re//fd///fT6TQh5O6775Ykqd/vq6pqWdbZs2e/8pWv6Lr+4IMPHvTtS+vGcDis1+uyLKqqev3G1ZWVlcFgMBz006kURYkkC45rCSKHGcpymOOZKA54gUWYUJQIIqcbaiqta7oiK2IQehQlPMPK8t5wM5BJoDhxXY9SynHccDhutVoJIbbjMgxTKJUWV5bnFuZTqVScRKY5qlQq/UF3NB56ru3aZqfZmJufvfLqpWw2Wy6Xoyja3t7+4Ac/OD09ffXq1be//e2wY1gURYi34XDY7/fhE/z617/O8/xb3/rWTqfz0ksvaZp28uRJSulzzz337LPPHj9+fHl52TTNjY2NQ4cOKYoyMzOTy2WjKCSE5HI5SRY2NjbS6XShUCwUCjzPdzq9jY2t8Xis63q73Y7jGEDsbrfb7XZd1wX1WafTGY1GruuOx3uyb5CbEUJ0Xed5HuRslmXB+B8YpQP9SAjZ3t6GGQBCCIicbNseDAatVqvdbo9GI8hpEAggXRqPx6PBEC7d0WjUarXazeZw1Pd9H65hSZIgW4Dn4tbWlqjI3W630+mEYeg4DhTDiqLADE2xmK+Uy0zCMQnHUIGjAkd4lvAsfIXwbMziENMAkQCRENOIQTGLGUJxQmgUkzCiUYwTwiEsMCxLEUMo/MEJwQlBcYLixB6bPMPqqsZiJoliliCGosD1Yj+ghHAsK7IchxkWYZ5hBZYLYHKO7qnm9oKcIpEXcEIZgmRelDBH/Yj6kYhYL0AxYQnlowSFcUxQQnESEz/BfqGcNXLa2O6b9lhSRDd0X7rw8kyphKMoCdyZal4R0TNP/sPVq8/ff/+JwWCbF8PhqLGxeXN+aaoyVVzbuJPNZ9Y2t+4+f9/G9k6/P/yJn/ipr33tH04eP9VqtDfWNhmCZ6szu5s7b37gkdPHTp07ecaQtIfuObl69RXP7OUMqdfcVUTG9yxKwkIh1+12O71uOpPT0pkgIVomRzmhPzY3dnaxKL568+bAcvumY0XJyI8GXuhSxkP8eqt3u9bijIJamL6921EzRSdC6fKUG9EAseu7tZgTepbjJKQ1Ho5832dYKZ3q2BZWFYckVR1hpy0EY2yNarduFVU9r2TmKwuqoB9aOqFrWdsJsoUKLyt3NjcKU2UcD+49fajfqrmjcVrO3Lh4Z+Nm+7GvvPiLP/sr//UPPk0i7ud/9ucwE4+tBmYtxfADx4x9p9usqSL3yAP3v+mRh7Y213e3d2q1miBIa2sbrU6PE2TL8adnF+46c7cgqbyoveVt7z589PSNm5uN5jCdmVpaOdHpjR0varQ6mMNOYPqxlWCfEaITp08FSXRnfW08HuuqqooSTggTJdOFkshyta1tczQaDAYjc5wvFwuVUqZcwZLcGQwJxUZalxVeVQSS+IHviDyTzuilSllPZQaW2xmNsaKM2v3GzrY9GsaB7zgWy1FJ4cfWgONJEFq+MwzsAfKtoi4tlLIlRcgZeUNOCVg05NRUcTqlZWnMYMpUK9OnTp5OGZl6o2HZtqbrumGomhYMzJlC+dDsAvUjTGgxk2MxRgidO3cuncs6Qdgd9PfGQ6GPh7MEvdbX8P//2wTOId/Lbe1gXp2Yi05y7Pd8wNclXrTPi8CAhWEY6XQ6n8/D9+RyOdgRa5qmruvXrl2jlML/ApZt2/aNGzeuXLny0ksv1Wq1hx9+OJPJpFKpwWCwvr5+7ty5U6dOXbp06ed+7ueeeeaZIAhyudzZs2cXFhaAec/n81/4whcIIU888cSFCxeKxWKtVoPqALbt7cFompZOp+M4XlhYgMkA3/dhMHI4HhFCLNPRdR3QM4ZhoijSNAPG0scjazAYgG9CGMWCKHGcEMaJaZrd/rDV6kRR1O9044j0+31D07vdriRw1mjMIux5Hix5NwwDHt913evXrzcaje3tbVD8QqewuLi4uLi4trZWKpVYFkuS9MrFlz/+8Y97vguOLOfOnXv++ecLhdLU1MzKygrYb0IVmiTJ9evX19fXVVU9e/ZsEATHjh3L5/MnT54UBOH27dsbGxvPPPPMP/zDP5w/f75QKKytrYVhCPuAV1dXt7a2AKLjOA4akP7+LYqilZWVu+++O5PJOI4Dq4fG43GtVnMcR1XVXC535MgRSZJGo1Gz2YS+APKSpmkgSQNDCowxTPGDHykoLnO5TLFY5Hke3H4lSSkUCrBPW1VV6CEppa7rWrYNfVCSJJ7nWZZlWVYYhuCW0u/3x+PxYDCo1Wr1eh0KYIB/YEINdokXi8VMJoMx3tzcHA6HoihWq9MMv7+PHto8dn8D2T8SUf9TtzfGIcTG6/45ibrJoTCZ4Tj4UG8U5UyK4ck/IaRBcgEVJgjiZFleWlp6+OE3ZTK59fXNK1eu+X64tLSysnzY0NPn771f11NzcwvZbP7y5SutVuftb3/nO9/5blmWO53Oxz/+8T/90z89duwYOCAxDAMi1VarxTBMKpV605veND8//xM/8RP9fp9SOjc35zhOs9mE7fY3b96cyBXgjuM4AG/KqhLH8aA/goY5TGLLsjmO8/0QJnolUQnDEFjpKIoYVnTccHNrZ2e75gUJQszOzs729k69Xu/3+57jJlFI4oQmRBS4lKEpijI9PQ2zLDzP+74vCAKcDuC8RAgZDoe9Xi9Jkm63m8vlLly4cPTo0a2treeff351dTWdTj/+xLefe+65H/zBdxNCPvaxj/3Tf/pP6/V6uVxlGEaW5enp6Vwu1+/3t7e3G43GysrK3NzctWvXnn32Wdd1fd/vdDqqqsL+90984hO7u7vZbPb8+fOnT5/GGK+vryuKAhcxqIXgUAB2lFIqSRLoaSeXShiG/X4fLnRoWTHGsO8efKJgLBOMsTHG+Xx+amoKIQTDuJqmQWTCgyPEQGxjsAhLEp7n44gQQjDHyrICw+i5XK5YKkGkOY4DGQV0eaVSiVJ68+bNzc1NyAEgYJyenl5aWur3u2traxQRTdNM0xwOh4qiQPMFnRE3wWDI/u5B9AbpzGuD8PsG5/f8OsMyEzT1IH4z6Tkxwgef96BMZxK6iPxj6CtCCNE9rWkcxz71wzAkSZIyQkLIYDBIkqRYLILPuWMNWJYtFouVSgW4OJ7ni8Xi9evXwSTmySefzOVyOzs7cRzPzs6+8sorP/3TP/2Vr3xlOBx+5StfyWaz0JacOnUKHDc2NjbOnz///PPPv/jii8eOHauW5ampKUEQBoPB5JeNogjEk+CGlMlk2u02kFEpI8MyfLPZvOuuu9Y2N7PZXLfbLRTLAPExLA9eYFEURYQyvGA5PqU0CILhcCwpOqK42xsGQaDrumvbmUzGGo+zht7v97P5oizJU4WsYRgsMx4MBkDEgSAOXEwn0FqhUFhcXNzd3YWxqdXVVYZhjhw58hu/8Rv/7b/9f+v1+tbWRq934tixY3ES/P7v//5P/9Q/ee75pxuNmihrYBEfx7Gqqjs7O4VCYWpqKp1Ow+RELpe7ffv2aDQChvDo0aO2bX/2s581jPTJE6fuvffeVCoDLDbGmOdZjmM4nmVZ7Hlev993TB/YAlC0wwaEidceqJ2q1Sos/cxms+B1FIUBJtSyxiSJDMOA758Q2gzDwEPZnhsmcTAeZVPpubkFTVO2traGg0EulylXK1Hox3EchEESx0EQ2LbdarUIEsHwG7AZgB5gqVu/3xcEoVwuC4LQ7XZhsMF1bdhrAsvhGrs7iiqlUnqpVIrjsNcbRIHHTAgTuCX7K+y/d6ghRL/PH0Lp9/yDD7R28DfcJmzkwQIVfqvveXtjBMJt4roPNi1wfCKEUqmU4zjdbpdSCjO7nU7n0qVL169f7/V6oihCoQJzIYPBQJKkRqNh2zaUsul0OpVKXb9+/ad/+qfX1tba7bau667rttttcOOzbfvOnTv1en08Hs/MzKyvry8sLFQqFUopOJRIkgQguO/7d999N/TuSZL0er12u82ybDqdhuzEcRzPC6KiqKoqy7IsqXCJ9AejbreLMcvwQm84ajabjuPU6m3Xi2TF4AWl2xv1+mPbcnPZQiaVVmVF4HgWMzQhSRSjOGIpAc8ymI0ihMCGKVmWJ9lGVVVVVZeXl2dmZgByJITUajUwtD169PCf/ukfv+1tb+n1ek8//fTi4uLKygpJ6JNPPv2Od7wzjgkgFjs7O71er1KpaJoGwjE4cLe3twEygfWPCKGXX35Z1/W3ve1toij6vr+6ugrAI/ArcAUqilIoFFKp1B4Jwe0tWoGFH2CYnclk4L+8fVNWjLGqqtlsdk87NhzCp8yy7GAwaDQaLMtC5QxpKtm30Od5XtE1SVVYlqeUIox5nk+lUpRgkHlxomgYKUBcyuUyDGHDUCtAsvV6HRLy7OwsXCQgW7t27dorr7xSKpXuvuecJEm9XgdgWHCXRoiBF8+BehMsFSZw6D+S2f5nMyE94OGNEMTa3tTFngnifkkJqXIiVqb7S03f+JiTCEQIkYRAeMMlJXAcSRAhxDRNSnAmkykVCoEfbm5uBkFgGEa5XISTu9PpQJsUJ8lwOCyXyzdXb9dqtVKptLq6WiwWFxcXl5aWMKYvvPCcJEkTXfVTTz1x//33y7J44cIqpfTYsWMvvfTC5ub6Aw88sLu73etsnjhxwvMCUZQNI2VZDiGIUkwpVhRtPLYEQarVGocOHWIYjmHQcDgaDod6OlWv1w09Xa/XFU11HI/lBNd1CUGypnueV6814IrRUsWg00+Hie0GQehTiglBDMYIMYIgDLq9YrE4Hg3K+RwlSUpVhsOhIAi52TxMuIVhOBh04K2gFMOCIZBGbGxsrK+vT83PXrt2LZPJVavV1dU7b3rzw7/9n353cXHxyJFDtmPevn17YWFhNOrduXNne+v4I4+86cJ3rgNHBzRgsVicNEL5fB5yIEIICkXLsmB4GhZrt/iW43hTUzNhGAKVHwQewyBFlQWBkySJ45hqtQq29teuXYPyARbI8DwP+6HgzFUUxbbtOI71lOE4jm1psixLkiSKPMyXDofDbC4ny7JpuVa7PbYd6OiSmDMKBsuyOzs7PM+zLJ9KpQjFzWaz3W67tiPJAsdxDM/pvJHL5TlRdl03CAIgKrPZrCzLEIqwvNnzPMdxQBMWx3F1qkJo0mw2u902x3HpbLpYLCqKVK/XQbyZz1QZcCKBKhlk+5CRvm8m/Ed7vzfeXmc3ivadvA+GEwip4Jbsj/9OforZX839PeMQhjsnTSbk8yiKTNPEGKdSKUVRLMtqt9twVjEMp2lGqVjJZvKU4OFg7Puhquq+Hz766FsXF5cNI/2D73nfwvyS74Xnzt7z2GOPLSwsAEp+7Ngxy7IWFxfDMNzc3BwMBtPT0/fee28QBIcOHWIYBnYh2FAWWlaj0dB1nRCytbWVSqXgHQD6aHLk9YfjMCaypFqmE8exF4SEoMFgYNsuQgwvShgxruPZth3GEcPxHC+32v16s2Watut4hpFKp9OSqIgcjwmlhPAcoymqoWuKLPEc2+l04jieXLXj8RievdfrNZtNGBQE6nVra4sQsr21kyTJ5uampiuLi4tXrlz5uX/yc6+88vJHPvKR7e3tqakpURTb7e6jj7716tXrU9W5SqUSBIGmaalUqlarwR1BEFqtFs/zsLnJMIwTJ07kcrnl5eVqtdput5vNJrARSZLYtg1XsCAIuq5jjLvd7vb2Nqyd2N3drdfrvu8DTGIYRqVSWVxcjPcduyH8IJ9D6wtoja7rULczDKPrOrAacNzYtg2QgaZpURLHCXEdDySvlMEIMa7rmqYdhiHFyPdCeKPWtzZb3Q7LstBZQKSJopjL5cD2oVar9ft9WHkCg3v5fH5+fr7f7167dmU8HqdSKeDGUqnU2tpaq9UKgoBlWQ7oO9gSDqAcTK8BG/7GG/k+AjXy/TJhQiaZEGNM99drcyxLD1COE76LvMGZG2OM9nVy+A36OEVRkiRJYhoEQRRFiJA4InEcrywvi4IcBMHu7q5pmlB8+r7/4P33bmxs3F5d1XW9VCoRQgLL5Hl+Zmbmm9/85rlz5yRJarValNK3vOUtn/rUpxBhbty4cfbsWUmS7ty5Ax//9PR0v9+fmZnRNO3VV1+9cePG0tLSaDSqVqsnj8+3Wi1A+brd7vT0NELIMIxSqbS2tpbL5VzXnZmZcRwnlUpBEZXPF0VR5gTRcV3DMKIwSSjxPIfheMMwOE6IEsLygq4osiw7XgijzxiRbtc2NK1YLC0tLASe06gH5XI59IOpSrnVasmybA4HwKr1ej04bR3HWV4+BGjecDj2fT+fz1um4/s+vGxZ17e3t48dO/H8888vLa4MBj2IgW8//tjHPvax//7f/yafz73jHe9YX78zHJjf/Oa33vLo27/61a+2222e503TBPeT8Xi8vb2tqirLsuvr6yzLZjKZWq2WSqXOnTt3/frN0Wi0srJiW47r+uAr5zjO1NRUsZiXZTFOIteNZVlOpw0aU9gZjDF2HKder8MehGKxCP1YEATgVgqOWITBgIRTRZVlURQ4QgjYMsRx7PoBZIUoiliWzeVy3rAD3WahUMimM45rm6bJUCQIRNdTsiwihDzPkwQRbNQ0LdtsNuEFm6ZpWRYcIkmSpNNpaHwIIaIoIoQ6nU775nVRFGdnZ23bxBiJosjzrOu6umaompJKZSRJYqANAGAKMilwnRASE5UdSPhBb4335TITRAcdMKR5HfQCPwVSXShB9+3rE2iH4CvwzaAURwfQ1NdlxYPPC18EWhxsmuCVZLPZlZUVhBDP8/BSM5kMaDJt2waaAboX27b7/T5YfZqmOTc3B93a5ubmO97xjj/6oz/ieb5YLNx333lCkkIhPz8/5/ve2bNnwjB4/PFvLyzMLy0tvvTSi7qu3XXXqfvvv6/b7Vy7eiPwo0p56tjRE/lcMfAjluE5VnAdX5ZURdYowQzmlhZXzLHNMvyRY8eH45HtOppqYMQ2W51mu53N5hiOkyTFcbz+cIARSwm2LRdRBsqWsTl0XVdRJI7j0oaBSAzvjyLteQtompbSNZEXqtUqy7KmaVJKOY47c+YMCHemp6ePHj1arVZBLQgrZY4dO5bL5ebn51VVDYJA05VUykiSaGFh7uLFC3NzMwAtqqq+traxtLTCMsLnPve5H/3RH22324VC4cEHH7x48eLq6qogCDBr1mg0isViNpvt9/vHjh2bm5uDdjEIgq2trd3d3Wazee3atbW1NcuyRFGs1+vgr/O2t72N47jDhw9nMhlonIrFIqQv+E0Hg0Gv17t16xYE4UsvvQTX1UsvvTQYDBYWFhBCOzs7/X7fsqyFhQXYMgJ8A7AUuq4HQRDHxLZdyI2dXtf3AkVRZE1FCGVzuZu3VkHeOL+4sLxyuNcfbm1tlUql+fn5Uql06NAhOMcVRaGUguJ8MBjU63XgXTDGsHc9CDx45RPkH8qTRqNx5846BwnE8zyw44dCEYKBHoBVDpIKZH+SEL4f79u0TWLvdZkKIQQLrhFCKNkzp0n2K15CCED2EKLfO89CVkTfYwcGtLmdTgdjLMtyOpUy9LQoiqPh0PfCJElUWdZUHTp427anysuO7S2tTAFq6vvhoUNHKKW2bbdb3fvuu880zZ/4iZ966aWXgyDa2tpJovj+++8Pw3B3dxfeh5s3b169enV5eRkwgGKxePTo0Xa7PRwOWZYle+u4Atg3Au8tnItgz16pVKDesyzrkUce2djYkCVVFOThcNgbDAihURRt79YhScYJBfyAMizGOEwIolTgMc/LkiAQksiiGCfhaDQKfJfnWVmWTWscx2I2m5ZleWdr++jRI+l0uqf3wzCczO9kMhlVVWGX+Pb2tq6lYAvA3NzcPzz2mG3b5XK1Wi0DrzUej13XEQTh61//+vz8nCiKzUb77rvvvXjxIkDzn/vc537lV37lC1/4wtbWlizLKysrjUajVCqxLBtFEYjUQNwzHo+NlCgIQr/fZxjO9wLLsmR5T4O2u7t7+PBKGPpRHNbrdVmWV1dXdSUNvRbGuFKptFqtdDoNkqNOp8MwzPz8/OHDhweDwe3bt69fvz47Owu7soHYWJifNQxjPB47jieIIsMwYFOmaZrvB61Wa2VuGt7qJKF7xENCMINAqK0oSq8/PHbsRK/X6Xa7mqbdvHEHFshSSkulEsdxpVLp1KlTL7zwQq1Wu3PnDsdxkBLgF49YDiG0F4E8g/GeFzjHcZQS3/d9x+VA1g3pCMpCiDEYxp2E3yQYGI7FGCOMKaEUI8wyCMFWi/36cxKKeC94IHHB64Awg8echBP09HuN3/cpdyEI33iDQwSOeSgCoQsKgwCuLU3TGMwAGpZKpXzfr9frLM+xLNtutyGBW5YVRdHS0pJlWXNzc5TSy5cvA845NzMLGCxUQZDSp6enC4XCqVOnYMhYFEXQl/d6vbnpAqV0Y2MDnhGKGZjDkiQJIdRoNFRVBfB2Z2fHcVxZVjiOb7bb7XZbllSCmGazSQgSJYnjBM/zhuY4iiKe58MgEnlGV0SYiEMIqZLIUDI2+yIPSkVeiiRF12SBZ1mWIApshOu6lUpFVVXgkeEKg8sUpAUwfdNutw8fPnzlypWtrS3f93d2dgwjvXJoKZ3K3rlz5/r1a2fPns3n841GY2Fh4cknntZUI47jx5549vz580eOHPnSl770lre85fnnn19cXLx9+/aJEyfu3LnTarXuuuuufr9/8eJFwzAOH7lHFOU4jiVJsS1nOBxLkmLb9uLiYq/XKxQKQeCNzdHm5mYul2s2G+qCkclkEELD4XDCT0y2VhiGsb6+XqvVXNc1TbNSqWCeW1xcbDbq5nB06tQJw9BAE+d5nqKqhUKBUGZ7tz4ajRiGTafT9MCuFJ5lESIMw/AsF4ZhfzSemZnp9/ulUmlra4fn+VRKh1UZkwJwZ2fHNE1JkuBVQUqEOQe4VFzPhlIOKGvAxnzfL5VKe85dUciA9T+cRhPyAHKgJO1NOaD9Bb2Ag4GwBgQuEwXpweyEXkshQEUKrm8QM8neZOl3d4lC3QgZ4/sG4Rt6RYwxFJPwOgEEGwwG4/EYXhuoGQBtB3hwa2trPB5fuHABpqenpqZgqQDAbouLi9ls9ktf+lIulxuNRqdPn+4Pus89/0ychKLEExpLslCr7zSatfvuv7fX71y6/MrxE0c5nrl2/cqdtdv5QpZluSQhvV7fdb1isZTL5QmhSUJs2wnDqN3u3L69Cv8VhtEzzzzrBX4Qhbbj9XvD8cgK4ihJqO+H3V7Pdf0gCMa2ZZl2EhOeEziOUyVOUwRdFQ1NSulyylBVReRZNp/PMQxmWLZUKRaLeYJREEeSqoCJo2VZzWYTAAxIRLBPAiGk6zp4TzSbzdu3b0N97vsuTCclSYQx7vU7mq7ANvlisaQo6vr6xqFDh0ejMULonjMn/v7v/17TtPe///2bm5uQKMbjMQiaofVKpVIAijz77LNra2vdbleW5XK5XCgUyuUyyNa3tnZeffXVXq9HKZ0Qj5ZlVSqVYrHYarVg5IVS2ul0SqUS2IXA3L0sy/AsIASH2srzvNFoBA0LlLWe50FdCtbvuVwujvfHnCklCGHMchzHi4LAi6PRaGp2jlJ64cIFx3V1wxgMx9DJTyQKiqJEUbS+vg7g6vT0dCaTiePYdV0QRUOkQZcI7wYMlEwwPN8POJAdQBRNRnUppaD/gnUue+N/DCNJUpTEE2QPHXBkmoQHPbBoDU3I9EkiZRj4ItnXKDAsi/aL2+8Xgf/IDU5HIFrgn7AVWVNVx/ZGo1E+m52ZnpVl+fbt291u1zGtYrFouw7PC1NT07pujOv1dDodBtHi8oogiHGcJAm5c2ftLW95y/Xr16empkqlUjabvXr16szMzF133eX7/pEjRyBv9Pv9s2fPXrp0CQ6U0WgUOAz4iGqatri4OB6PG40GnFxra2sAaguCcPPmzZs3byqK4nshALmj0WhsOwQzcRxHMfG8gFLKcDxGDKWUZVlZ1QzDSPFBkiSyxEkSzzCMqkosL7AcVjU5IVEqrXMcJylyGEcJQaJiI4SWl5dZhltdXfU8b3FxUZaF3d3dMAxd14dPCs7BJElkWd7c3HRdFxa1m6YtSVKSRLXaTiaTYznc6XQAg7l9+87Zs2c7nV4YWblczvf9mzdvVqvVBx98sNFoDIdDhNBjjz0Gk6xPPfVUuVyen5/f3d3t9oaqqg+HQ9u2VUWDaQOO4zRNO3r08Hg8FgQum8sAsRkEnsDKuq6DjAbkDdBTAXjW6XSKxSKldDgcFgoF8Mt48cUXc9nM1NTUnTt3Dh9aPnv2bLfbLRbzt27fbm9uKapBCIVytN/vl3IZlucwhQsVrDlpHMdCSkSONR6PeUn2fR/qI4TQ8ePH4akNw4CZr0KhMBgMQAsOXY+iKNls1nXdbrfLgq2TwKuqIssSJhQhBEQAFJ4izzOAhU5oBvgPSE2QqSck/uQ+2AQAYDPpEics/Ot4eXxAU3bw/p4V8X7gJUkC2fL7BdtBLuQgNcLzPFSzGGPDMDKZDMdx4F8IuRoYKqCMwXEA9GudTofjuMuXL49GI9/3M5mMKIq7u7urq6tQm4HV74ULF+C0k2UZIvDVV1+1LOuZZ54B57Vut3vr1q25ublqtbqxsQHKDwAAAO4CxgwUKrlcrlqt9vv9GzdumKYJJ3er1Wq1Wp4XhGE4Go2GgzF8ojGhECGCIPGiJEmSoij5jGaokq5KmZSWNlRZ5FVFyOdScRzm89lsLheEoeO6upHOFvKIZcBPVZKkkydPTk9Pdzqder0+kYkNh0NVVaGNgZN7cXERWp3xeOx5jm2blUrl2LFjSRKNRiOWZa9evYoQk81mLdOZnZmP43htbQ00Ky+//DL0QhjjZrO5ubkJEjPf92GmwTCMI0eO5PN52HELmC20XpIkPfDAA8ViEa6chYUFQRCAlAemvlKppFIpjLHv+2AGeezYsePHj+dyuV6vB7MdCwsLmUwG9AkQJ0BOCIIAfCnP8zDTwHGcqqqQMH3fD6JwcmH7YQg6R5bhrl+/Tgg5duxEqVTZ2dkRRTGTyST7znfdbhe6tn6/r+s6MHxQf004TCgHYMkUqA5YlgHgc1JCctCoQCKC4hCCagJpwlNCYnUchxV4iEiIWLpffzL7lANEzOTvSfV88G+gKKDEBfH2JJjx99HqQPzRN9zCIICjBarNIAhsy4UlM+VSFTJ5rVaD6jqVSvXaHdM0OYFvNBrgiXT4yBGEUKVSeeGFF5aWlp599tluv3fixIk/+ZM/KZfLLEMxxnfu3AGt2bVr16ampkCjdOPGDYBkFxYWer0ex3FLS0uBY8JusDiOYQ0YQmg4HEIPBt5EzWZT1/VKpTIcDgllbdsVRZHhOUVRMGIp8jmOS6XScBQqisZwPFw0iqKk9YTDSNOUbCEbRYnrewylvCQRSiVJ4gU2SRIvDBhekHmO5bgkSdbX11mGu+uuu1iWrdVqluXk83lBEPL5oud55XLZ90JVVXu9HkJoJpsHqQqlFCaJYB3AhJTb2Ng8dOjI4UNHn3322ePHT8IvNRqNADP81Kc+9cu//Mvf/OY3BUEoFAoLCwsAJ05NTa2vr58/f363tgb1fxi2NFUH1jSXy21ubnIc1+/3NU3hLJZlMdTJQH9blgWBBO8q7KPfc0vhuHK5HMcxsIsLK8uHDh2q7e4Mh8MjRw5RSu/cuVOtVuv1piTLxWIxTlCz3Y0pMoyUoamjfhf6TEEQWMxQmvA8jzELrAb8V5jEw+GQEEQI2djYgKMf5EeapvE8n8lkQFQMM+hhGAKRKIoilsUkSTRNg3YRkoFhGJqi7knE4piDoVtAESBLEkIAj4JClNnfiABYKIcFfEA8DaFMD3iuTcJm75/7ghioSzGh8M1kz5U7EURuwrazLBuR8B8Jwu+ZEnme51gBTjUWY1lSK5VK4PsATrpBQFUE4nqe50kUNxqN/nAA2eDo0aNnz57FGN+4cSObzb744ouu6545c+YLX/hCtVrtdDqiwELbOTs7e/PmzVar9ZGPfORrX/savKpUKrW1taVpGihFs9nsnU4Teg/QoKVSqUajsba2Njc312g0ms0mIGPT09OtVuv69euipAFiKbCMIAgCLzmeixBSFBWzrCBIHMdRjFiWVVVV11KKbEYxr2pyNm04XuB5XpyELOIzmYzr+mEYCrKEMe72esAL67pMCGk129evXwdameOETqdTKBSq1elGowFw/9TUFHzKgADBwKSiKNvb24DrgMIWChaGYQzdWF/fnJqaATQBdF4XL148c+bMl7/85dnZ2TiOa7UabC5ZXV0FTAuwrvHYymazSULBANY0bYTQ0tISGOqk02nPd69du1YoFFiWrdfrPM+PRiOGYer1uqqqhJDhcAg6xFQqBfjq7OxsvV5HCI1GI9BwYk3r9/tR6IP+RBAE0zSDOJFlOZPJ9IajIAhUQ5+MXOwDkHvXJ8gzlheP+4F38+ZNXVEXFhYGg4E56ubzeaDmARyBTYnXrl2D6hQkPkAhIoSMtErI3sYxSGxg7gZyc8dxPNvjACjhedEwZHjLRqOR74eqqrIszzCwihC6VsQwXOhHlFIGs4gimiBCKUzThn4E6RXtV9aQ9CRRRAjQ14RhGMSwhJAoTliWZQQREeJTiqIImsMEY4bhv4u+7IcZhC6hFFOGQSxlecqyCUUkIUmEKEGUp5jhOZ5HDI4Z1o1oJl++ub5RLlZK+UKhVB4NBoVctZDLf2f88uKpo47jPPfcc8tLS+ff/FCn03nh2efy+fz2lUvj4SiXyzXr9ely2bXt6elpJ2ZGdiRpua/8w+NhEOi6vrZZ833iuNb09LTvRa7jj4bmsN9fWVm5cukKIwiBFxSL1WazoTZ73SvXS6VSwrKXrl8PEVnb2WQYhuXw4NowSRIlo8KHbdumruuGJkVRpIiI4xhNx5IkmOYgpaY4jsNMXCgYtdotU65WlpZrtVqW5Ub+iFFEXdd3dnY4jsvlclHs6hrX7/cX52cty+p6fXXq5Hg8jmLkuGF/sK2qaq8/DgKSL+jTM/NBQFOp1KVL1xHm2p3hPfcsl/LGhjloN3YRZRYWFk6dOJ5Op/P54vbuTqfb96M4ZujXv/UP73vfD9398Lnd3k5WVAlJWMRjgo+sHFlbW2U5rKtCNiMHvsJgt1SSZTlIkj4hvdXVF0msRn4UB3EYhnEQMgzIU8xuuwYkBE3iwPV0RRc5kec1XpQ4jvP39nCozXZ7bm5OlOWYkFQmE4ZhbzBA+3i7IIoiSiRMsMT3HTOJWdcLlg4d5nl+p9n1YzQc2wRhQRJTuuF4rj0eqxzruq4XBFA6chyH4sSx7Ewmk0llMWYZzJeK1TAMR6bDi0q1OkUpDYJAVlWEUKfXa66vb+7uMhybsAxFtNbvIoTCOOR0FSGUuFE6nVYEmYmIyolUVuM4tkdjYG4xQpqu7OGckBkB6wcABh1wfDoIS9LXTkJAEZvsOzLhA7Js+B4oMidfwfujSUDO4H0xDd2XrfLMa55iklTRa1mKyddlWZ6IeBiGoRjFcRwGcafTyWWzhmHU6/UkileWlqanpzutdhTFCKF2u/PAAw/OTE/duHEDlteCsAsg3NFoNDMz0263ESIcx+3u7pZKpWarHgVhKnX4O995MQoClmU7nfbM1HQURQgTVZUbjdpw2I8o1jTN9z0g/W3bjOJAURTTHCX7y8M933ddV5KkQiFHwphlsSAKsiIKgsCyDMOIkiSJEo8QyeVy6XQKqkGGRaIoRmGCKGNbriQqnhsUCoU4jkVBxhgH/t5ugl53IIkKy7ICL0H5BH0XxwkwkwFLOJrN5nA4hLmHQqGgaUYURZ1OR1LkdDaTxHRjazMMo51afX5+EZACmWOXl88Konjx4sV77r77U3/yx49+8MOdTocQkkqlbt26kUrrQeA///zz5+4+I8ui67rlUlVVVde1l5ZWEEKrt+qg8gWwHthUkFLMzMyAswHGOIoiMIPVjD2eSdf1dDqtKAqoT/HeykoPIQRN5h6CGLqpTJphmGazGUZJThCCIGi1Wmh/QgAAkCgOQMsFgQcVH3RuwP3AvNJEVUcphSlE2+ahHICUA1VkGIaO58IgIsGIZVkY2en3+yk9tbCwkMvlYCEHIKIw5k/2bSu+q9sGIBXvr1Iib3CO2Ss4Dyi86WuB0El4TNLg5D488oTln/wsVLMHgxBsoPABpdtr4vAAjQgPwnFcRBJCCIMxyzIUoyiKoigRRRE0fnCgdLtdSZBJnICkUJKklaXlmdnpb33rW51OZ2lpaTDoqYpUKhc4jknpGV2Xo8iI4xijxPO8x594NW2kaMIcPXboiSeeYBHO57P1Wl0UuG63LbCcKIqtVst1XVlP+763vb0JA3u8wI5GI9s2NU2jlMRxjBmaJCLLYlEU8/n8sDPSdBlaeYxpkrCSJBkprVKpdDqtyRuoaVoURcVCmVIKkkXbtqF6BHAYYABA7W/evLmzswNTGlCb7fXP+709GGfBABFM/c7MzDAMF8ex6w42NjZs266Up6ampk6dumt9c1NVdVlV0qnst598Yjy2Tp46dfny5aNHjz765h8YDvscxzSbbccZO45z/MRRSollVUvFShB6rmsLAg+UPUJ7py2QCjAMAQWqqqqrq6sYY4C1QBzDcdzCwsKVa9dBKANsIUJoz7MvSUCmDzpnVVVBN8LQvYEEhBjgMwBukSSJECS6bhQlDKCGLIJtaoDQwk9BEwHCVLCTDIIAEBfY1SNwDEheoygCHkIUxXQ6zZljWMFdKJcopbVaLQzDdDrdaDTS6TQEpO/74Get63q/3wfIKooiDmJvgpccTEEH895Bag4dWOv7xqw1yX7wU/H+GsOD7eLr8uTrfnbyLAx+fcjBC9kT1hAKAFKCKORIAgMFGMOg0PbWFou5w8srkiA2m03X9iqlciqd2dnZev/7fiiKoueeey6TyRSyGd/3eQFHQXj06Eq/3z+0vPTKK69wPBPFYRSxsiy5flCtiMdPHBkO++Vitlar6caMZfLjcT+K/SRCmCGKKooSJ6qqZVkJiUgYa5rGcVBoJAyDOZ6P7CCOIsMwJEnwPG88HqqaBAwSLJyK41jVRMPQUil9OOxLkjQej8MwIoSCMaksa47jVKszzWZnenp6a2urUqlYlitJqmk6sqyVSqVMJh9FkesGk7FXGFcfDsdwYQEWWigUkphOT0/funWrWCyGYQyCzJSRKeRLXuC32+2NjQ3X88rl6k5tt15rwkVpjseGob/88nd+8Affe/XlF+fn5zudVqvdmJqumKZpmuMTJ05Y1ng0Hhw5cmRra8tx3HJpCiaGAckAJg3ADCBICoWCoihwbiKEANj0fb9arcLFDSPae6OVUZROpwHlt20b1JSyLKfT6SR0tre3GYYTRNEwjChKgiDQVJ1ihDGOknSSJAzD7YnpRc5znAl9B04zEBuCIMCAL8wlA8zOcZwVeCBPg/9yXVdV1ampqVQm3Wg09mYgHafRaMmyCB65vu8DNi7LMsxeAJ4EhLnv+1xyYICQ7pN19A1q7O8ZhAcjCh/w6p5Up5Oget134v11vwelp1AYJ2EM/wsxt/dEdE9qAzKcPYkcgdM9Yfg9/yiQvQPLAnyApuhhGEZBmMvleFZot9t2y3vzm99crU5/7WtfiaNk4dBcp9MKI19TZKqI6YweeDbLUZZLWBbxCRYQxjh55KFzzWZzZrby9a989dixo9VqKZdNGbrU6/Wyad20RoQks3NVz/P8gJJYmqqUu91uIZ/p9/vzc7OKIq2treVLZUUSx+OxoasIoQFNKCGKIsqyrKqwi5fyPKMosixLu7u7lmXBpQZiJkIoy2KW4eOIiCl5FJs8Jwq8xHOiJCo8JzbqrWajvbKywrGCqujj8dixPVg6e+AUQ5IkZTKZq1evchzHsQIA+js7O7Kslstlc2wXSsVjx45du3ZtY2PzuRdeyGazsqz2er2nnnrq6NGjkqr0+31FlvO5nCxLS8sLhJBcPkNotLCwcOnSJZCh1uu7iqKwDE8JZhkxmykSgmDPJEQyNMOQzWCeExhtz/OGw+FDDz2UzWaffPLJytQ0HKmGYciybJrm9vb2aDQ66GEBVQBsNfYdFCc08t29/MYSjDFJiB8GlFJJEBiOgyGjJIp5nhdUFUga4NtgYA0OL7gBfIgxDoLAcRxEYphLBE1yQikMyHd63dnZWcdxVtfXhsNhNptOpVK2bS9OzeD9AUUgfvr9Pmg5AZjxfZ+baD4nMQOB8T1T4sHK8/uFKFSVk4RG91esvS5c0f44xYTtwG/Ie9+dzKB7gc3iPRRr0hdO2MiEkCQhFKMJY5lOpWRRkQXRGps8zzOIQQgVCiVFUR577DHbtqcq5bW1VUkWpirldqdVrU4FnqPpUrvdOHpkpd/v+76YYtVut5vP5wSB73Wap8+cdBzn1Mmj4/G4WMrSJNB1PYrdMAhkiQ98p1wpt9tt3VD8QJmdnRZFvlIpMQwSRb5SqcRJ2G4LkiTB+E82m0UkkCReEBmEEGYEhmEUVeJ53vP2WiNJkhiGY1m+WCwLgtBsdCnFo5GZTmeHw3GpVEkSks8X4zhOp7OQ7rrdPlzf5XIVPl4Yb6V0z7oSnDI2NjYWF5Z7vR7IhqrV6TiOR+ZYVtXx2NQ0/dTp09vb24IgbW1tzc7PHTl6iGGYbqtdKJeazfr995+nhJw5c/prX/saw+BcLscwjKIohw8fZhjGcbzp6dlOpyeKsqrqpmmXS9O1Wg3IGzhWgIGwLGs8HmuaVqvVZmdnT506BaPYkLFBZAs6FZiEgDxJCIFhImDhEELAuCaRDxew5wWZPEmn05lMptftA6tBKRWQhDFGiDAM4hisp9PAHwIpALOCwBTE+z6A7D79RgjJpFKUUhiCyefziGHa7fba2lpv0IcNNrBSBQTiPM+7vg98NScIcRyPTNPzPEGSEEIJpQQhijEHsMpE0gk9A7yIN8bM98VLDiRJdKDr20to+3cmPwhp8I2Pf7A0nXw/hONeikbfPQWYPbUHm1AaRRFFCGNGEAVBEHhOgEYIIZTJZGzTGo1GumrMzc1Vlme/9a3HZVm87577nn7mSde13v9D7+1229lsWpVFa9xPZ/TQ5yqVih/YLEdZOR1G7mjcO3H02Pb29r333n3l1Uuarlj2iGeYSqWMGSqJfCGf5TjOMLRyOR+GLsa0VCoUi3lVlSVJ9H0/k02xHHa9EM5soD0qlYrrdDmOYzBLKWVZDGq7hETFYjGKYkIIy/IIJXFEctkCxth1dwDq4nnBth2O4z3PSxLS7w8qlYphpDDGpmmBlnVqatrzbADPgMICkg0hDFMOMEoCLwk6GdcPJEVpdTqAkfh+yLJ8r9crlktHjhwBsqGYyzeN1Kg/uHLp4nz1raoqE0Isy+p223Dx9fv9fD7veb7juCzLBkEYBCEh1Bw77U57coGBJAtS1uLiIkKoWCwuLCyIothut0EDDM2FIAhQ+4FZ4OzsLOQrqF0RQmEYAlgyHI1Gls3zPGIZvG+8AMkKwsz3XSZkQt/HlAocN1kxAHwDNJzgAQNT/JNrG65hoAEhg8my3On1oP3TDH1nZ0fTtHQuKwgCkIRHjhyxBiPQS8DqX3hMMDqZpBAO7atV6D5bckDl/QbS77Xhhw6AMcxrNwpOMuf3S55wpOF9/hDt94fca2vXvRimrzkC9jId2ntJe6o6llUUFSAsBnOtVqtYKCiSCv4uiiRlMpmpqalv/P++8cADDxSL+e985zuzs7OVcmFjY2Pl0KI6Erq9JkJUlZWlhflWuxEEXhgGkhBpspLPZAWBO3r08LWrry4uLtTr9WIur6hSJpMZD0eGoR09egQUxmPTK+QyGONUKkWTiGdxEgUpXc0cO04pHfb6uqJOV6c4zLiuOzc9U28G8BHEEdk/nmLfT/K5IsaM4zj5fD4M4uFwGIbhcDhmMOe4Vqqc6XUHqqrWa02e54fDYafT1VQjiqJ8Pn/k8DHbtm/evNnvDcPIgcwAOw/hpIvjGGY7RqORoiitVmtxcVHTDN/3VUPN54sXLlyYm5sDQzRZlkF5t7a2Zprm/Pz8hVe+c/jwYUWVdnd3vvjFLwJvBnlDUZSdnZ0oinTduHLlSqVScWyPYZhSqXRndT2XKxQKBbiIofWChlDX9atXry4tLaXT6dXVVcuyNE0D2QogosVicWK+JkkSKJ+AexwOh81m0/M8QRCKxaIsy8Bk5rJ5gRfbrU4cJbIsFwoFsCYALCQMfUIIwyAoNwghE5UYnA6T9ZtJksDlBiprkFhDyMApwHFcOp12fU9RFIwxWGal0+lqtVoqlUgQEUJAb2AYRqFQGA6HMMQMyY9Syk2OyQlGAo3j5KKn+1bce4Xfa123mQMzfnsXUxwn390hwUymePEBZzS6b6t6MG73ytc4+u4/J3KcPWkfwhShfQAJMyzDMK7rKLqm63oUx1EU27ADIEyAWZZleWNjo9FovOdd7zq8cuTXfuVXT993zrIsjGmlUomjwLZtTdM827Ftu1QoDoZ9MP81x8NqpUII2W1bMJ7PMAxGKJVKpVIpEsWGYaiKxPN83bY1TYvCUJYkeKNqtVqlUikUChMVMshlgiDIZrOAts3MzDSbTdd1YcXsVHXGdd0giDiO6/UGmXQuiiKGYaMoMU2TJKjX67VanSRJ7r/vIXCaAhwCBuRv3759+vRpkDgPBoNcLifL8tTUFAwKViqVTCYzGo1qtcbhw4cFQbBtR5blbDbbbLRXV1dlWR4MBmEYp1IpNa15gZ8vFlieI2EwHo/7w0Gz3QIkc3F+IZPJnDpxkud5nuUW5uZfefn5EydOFAqFo0eP3rhxa2Fh4cUXX+Q4vt/vq4rOsQLLhjzPO443Gpm6nuJ4zvM8sCmo1Wqj0ejkyZOrq6scx2WzWfDC6Ha7hw4dAsxjt94AaX4URZZlgTdxv98HZrzf7zMMA6PS4M3F8lwqnQmCIEpi1/cUTQ2ikCAqy7JtWqqs8Cw3GAyWFheKxeJwONxY34aJwU6nw/N8pVIZDAYXL16emqpM+jIIEKgNAUEFNQtCSPI8mF3mRQFaTdt0QUQOczPrd+40m81isZjJZUHR1h8OEkpEWZqoPrmD2QwfgENfUw0eiJZJz8rsD0BABTXZWU8P+HbT15KKBzPkpFJ9Y2pFb7jBccWyLMdyUDZMXmShUPCj0HGcOEkkSVY0led5kiCMsSLLt27dknjh53/+523T/PVf//UHHnjAJYHneRy7t2zD9VyUxEkU6JpsWU6r2S7kM7MzM3R62rLGV69e5dVCFEW+68VhlMlkioXy3PRsxkj3e90wjEVRLuTyoW6AvVoURYIgwTaLCxculMtlx3EGg8GhQ4d0PdXpdFRVJwSZpk0ptW3XspyxOc5kcq7rc5yAMR+GYcrIQMWYzeYHg4FtueVymWX5MPSKxeLNmzeTJIFuCopA0IuEYWgYBvhwN5tNjuPAz+Lc4jmQXGezWUlSJjsFwPEJlD2gaEWIGY1GyRjZtr2wsLC5uanreqvVAvkYgzBYg3qeQ2mSzRZUVW42m7lcYXt7d3n5UBTFjuPs7tbS6Qyg9o7t9ft9WK6CELPfX6GJ0hI6uunp6UajEYbh1tYWyI9g4pFSCm9ppVLpdDrdbrdarYK4FOb34VxDCGUymYkquN3tybKKMTscDsHzBiHU6/V6ne7c/OxgMJBluVDMAz+0ubm5vLwMCieQlUHufeSRhzY3N+Eih1IWFqIwDAM6GFCTMwxjuy6UpnrKmHAhuVwO7PwAHYUxRXD6msxIgGo3n8/ncjluEn7Qp33PGDh4m5BOUEBCKsf7HOAkSU6+n933U5mgNXRftobeANVM7h98BLTvZcjsewTvFbqIYow9z4spYRiGZxiouVVV1VSD5/lbN28KnPhPf/pn4jD6+7//+2w6JwiCbbvmeBiFviQJlJLxYOj7rudr1ljgBZZnBUJw4Cc8z8YBiQOipSWRlyml3e4wDGOGop3txmDYy6YztmVxDKdpKUqJ53kkIiInekHY7/Y0RU2lUlEQirwgavLudm1+nuu2e7OzsyzmXNdlGCbyiTVyZSUl8MpwMK5WqzyHBv1xJpOJIrK9vc1xQqPewhjD0btnGToaRFEEck2QhmUyGZZldV1nGGaCrQPiBxUUDK1PGGRZlg0jxTBMr9cDYTpwGHFMNjc3WUXp9TqZfK7RbmU8z3MDeDrXdiilYehHURQFwXg4TKKIxFGpVL506VK/PyiVSo7jYsTxPI8RyzJ8KiVsbGyUSiWMcRQFkiRgTEejMcMwpmnW63Xo2ZrNJhR4pVJpdnaW4zjwdMrn88vLy5/7/Bfife8vKJ7B9wmsQ8DWudvtQo3tOI5p26qqIoZBDMdxgqqqQPQnUTwxblMlmedZz7ZK+Vyr0wfBAAwWg0g1k8koigLPC9tjwZ8OITTodSbkPqgOgNXA+6N/iGVABrSxsbG5uZlNpYH/oPuegFCntFotCCWWZV8ThAfz1esgTbRvYMFz3CTXTUpQdMAl8WAjCzf6P2DidjDe0GvzMEII0dc4ke6/Howxdl2XMhhOQUKoF/imafpeSAi59957Txw7+cILLziWffTo0bXV9U6no2ZVOCw4jsOEgg+ayEuj0bBULGSz+SiK+r2RpkgYceXSlEcYSVQ5jrFMW+JlhmGiKMGE4znZc/oMclVVRYgEbsiyQqlU3tzdIoTAmFy73YVf//btO/l8sVFv5bKFOI6HAzOTyWQy2Xy+kMqqg8HA9yPPCxmGCQMShbRYKK/d2RgNzSiKGIbrdHqwGqHRaFiWPR6Pg9Cr1Wrz8/OixJcrRYRQGIZh5AehJysiZqhuqJIkwUQvAOJhGEbRuFqtEkIYhh0Oh51ORxIVnucBAlFV3XVdFmNF0ba2dliW7w0HYG3GZtjl5eUkiSRJEnneskzf9xkGHzl0uFbvZDPFSxev/vAPL4uCyvPieDwEWoXn+f6gm82lM9lUq9USJd6yx45jr6ys3L59u9FonDx5stPpbGxsgGfk1tYWz/OKorTbbZD4yLJcKpVgTl9V1e985zuqqq6srHQ6HYQQqDTBkBKQSVEUp2bkKAhBM81xnOcFgFjKorRb20mn083arq7rqqpeuHDhrW95S73ZS6VSYDkHF61lWRsbG6COoJTClFyyv1MMIgda0Em5QSkdDofgUMzwHBAwMPWyubVlmibQZjzi/SicjHqGSdxot/qjITeJwEmCoq81R5uUlBBRYRRNIu3gOG+0//XJ33vTum+wEj4Ipb4xJv/vwt6zSY4syw5877lWoUVGSiQSKABV1VVd09NLLm2bazPkrPiD/BHD5bddcse4NNoOh5zp7mlZ1SUgMpEytHTt/sR+OBGOAFDDDSuDZWVGRkRG+H333nPPOXc/CN/9qwheIeabeGqqbemzSZ6hF7VtRzcNzrng6uDg4J//83/+y7//1fDhoVlvoM6ZzWbd4y5jxDZ02zAJVZZl6Yw26i3LsnRNJ0pKwT0/6HX7URTFUS5yahquZWiyJLwkjJE4TAnRNM1k1JCCFgUXRZkkBaNmLWgdH/NHjx5PJpOrq+t6vZ5lxXg8bjbaumbmeTGbzYVQk8lM181Wq31wcDhfzuKocJ36/d1E07RGo8W5VIoeHh4LIY6OTgzDAIXK85yrqzf1egtHMmMM6lIooeGf32w2MXCL4xiiHsNgtVpts9lYlvX27Q0E7JRS1Mm1QAZBkKYpAI96vb7Oi5OTY2wLFsJYhZtWvWEYhu/7eZ5mSbIpcimlKPl6uaJErpbp4/Onv/nNb8bj6fHx6dXVFdzzNE1LsxhOH4PBwdu3V4PBACYmEKGT3QJ2qIGOj4+vr6+TJAEzBjSM+XzeaLUfHh4wi68k7eAw4I8qyxLsE6T0lO/Sg2CEcGVZwDjCMJRSMrUt35I4ztLYMLQnT57AcBH5EP0nak62076ynQKBMea7NgJV7pw7gafEaYJfVIwiVtG04wiQUlbkUFgfDQYD8GTey4T7RebHubEKwn0Xtv3f+qCrpDsmhNpx1vaxzSrsP45D+v5MsvoXVx6yrr5zy0FYQnISxXEYRo7n9vv9Rr31+PHjf/Nv/s2nzz87PDz8w+9+H3ie67rddieNQkJIQmlZlrqhpXFSFMVisXIdC6cmEYQojSgtS0sldVNjZVbkiRyPZkRpusYUViApZtu+bdtE8LwsiNKklFlaHh+fYjEQISwMY8t0BgdHZcmjKLYsRwhVCxpxlMZR6tjlaDj7/uUPrVaj2+2+/OFS07TTk0ebzebm+r7d7sAKDRNexojjOPP5XAje63WbzaYQvCwLTWNhuJFS9vu9+Xzuug5jrNfr2rbtOPZ6vXry5DF2DD158kQIAYn98fGJ7/sAEjAGwKW5O+Otbrf33Xffdtrty8vX56dnjNFer2dZxu319fX12267c3xyuF6vb25u5jPe7w9OTx/94fff/OxnX/23//bfnj9/7jgWYyRO4DIqazXfsoz+QVupFqPmarWCdeJwOCyKAk0jIFPLsvI8H4/HWZY9e/as1Wq9vbl1HOfNmzeEkE6nszWeCYJmswl8FXJN1NW6rk+u7nr9jlJqOp1qlLXbbctyVqtVFG5++tOfLmfT09NTQ9cXi9mLFy/u7+/9em8+ny8WC0AvWP8MXQj6QDSEpmnuUhFFAoR0Tu2szDzPw7Fo2BbCcjQa3d7eVgbTkASCZ88YgzjT87xGo6Hvx8+7Cfs/XTSCZYfeD8cS6l1AumqP5oZgo7scu59OqzS7X9Zug20X+WrP/JeSvXHFDqqiCudlZjo23g4otaAF+du//dtPPvlkuVzOihJ/6tOnTzerdVFm5e7mB15ZltiedXgwKIqiWasnSZqlPE3KKIosU5eShWGqlCKEtdtd27TSLO73+2UhNWZoSouStOTCMKyiKDarMGjaV2/emqbZ7/am07ltWZ129/r6erMKPcdVgrSbHSXI7e1tHMab1WYynjYbrShMlWSlFLPZSimBq9DzHSF2K98MA7vchsNhq9XIssT3XSnlyckJpRQ80vv7W0qbZZl3u+2yLB3Hur6+evz4ERwA+v0+pm1gY+FqgAsBxuWaZkgpTdvCQR5F0dHh4cXFhW3blKnr6+ter7MlfNgO53yz2QzvH5LEXi7Ck5Ozv/u7v/v888891+ech2FRlnkYrYXgjJEwDJlGlFKHhwe2VXv9+jVUrW/evDFNs9PpYKgwn89RtmFXdq/XS9O01+udnJxcXl5Op9NWq4W1MHme53kOOAevBNt+arXa+cXjdrOVZnEcx1mSxnEsynKzWTm2VZblYrHgnJcajaKo2+1OxxNBLEoppgtkJ2rHEBLmVJjjQxcuhHDtLdcCjHZML9M0pRqD3jJo1G3bBpYLD3iYdHDOwVwDGIudGWDzvpcJ9/NVFRsffIFNRtVN29ud9EHNSd6HW/eDs+oVq8Rb/W7FTkD/VwUhbnSnvzQMQ5QcHMI0y8IwtB3ns88+O310dnd39+tf/eYv/uIvfv+7362Xm4tH55+9+DRcr7///vuzk1PHqYEuZJi67/tUESysB3WD5wUWGGqaFkcR8f3u4HATroqicBzP8wJR8ulkXhbi5PhQSpXnBerAVssjkuZ5OZnMfN+fTqd5noPIcnV1ZVmOZakgqK9WK6yCieOk2+3puu55dcb08XhaqzWKInvz5k2v15OSfPPNNxdPzsuylPKdA93x8bHrugcHB5hJ6rr+/PlzrEa5vr5eLBZPnz4FYDOdTrEUAcWP4zhJkgCTbDQaSIme5/V7A0rpdDrFBKLX6zlCWI4dh5HjOIZp/tVf/a9v37zWdDqdTk1TZ4w26w0gq2Ve9Hq9V6+Ws9ms1WoJIWaz2ePHj2/vbsJwbRiakKUQXNO08WQYx/H9/W273by/exNFEezlcRwsl0s4AhNC0MgBb4uiaLlcekGNMYadSrPZrN1uHx8fv3r1qpJrSilvbm5Wq5Vpmp7nbfL8YTSUXKAlBnmlXg/yLP3222+zOCry3HEs0zSzJD05OXl9PcRKXXhbVTJiLC+B3wKYt3BIytMY7DmkH3CPpJS+7zmOQymFpBs1s+d5wJ/Vzj20Cpkvv/wS7HnMxt91aHJPOaHtrF/IXn9ICNGZodSWUbYtO3dw5XbSKGHGxpQgQknNMqiUGmF0B5wKriSVmkbVbljJyFY6KKUUrrZT0BNFKN3iMqRRb6Bf94M604wwSiiljWYrz/M0589ffP7ll18+PDz8H3/971zb+d/+6n/57W9/65l297zlOPZ8MSWEtPpNachws4zjWCmla2o2Lcqy9AO71a7VW36UrheLhWmazKbrdGV7tuEbV5d/fPr0aRzzZt3UWS5IuViMGOOue7FaLe/v723bef78+cPDg1JqvV63eMM0vSgaep53eXl3dHS0WsetljUaTwGpfffDt57nFTwTqrRt+/io57mWFM7V1RUMLxazOS9Eu9m7fTvq9/tJnNu27Tudg+6jxWLRbvcuL6+fPn2KtHZ3N2SM9fuH63V0dmbf3Nw/fvx4s9nUavy3v/3D8fHxbLmazBdCiG+++/6TTz755rtvYZAzW85N01yGC8/zjh8dwVC03W7HcUgpdxpmr+WEy4fbt1YUhY7jXDx5dHV1tV5tbNt+9OSJ67qvLq/DMHZ9pWjy8vUfWm3vT9/+/he/+MXvfv+Pz58/H4/HR72Tu7u70XD5ySeftJvp/e3o4pyEm2Q6XazXa9j1Usvkko4m86wUpaLdwVFeluXdwzrNv7982263R6MJEO/1OrQsqyzFmzdXpmmHYbjZRJSOKaVK0SCoC6Fub+91x9hsNv1+P/Dt0WgUOL0g8K6vr03dKMuy1WrpjOGSzrIsSkulFMbu6OKAZAJHLcsSNpYIIaQBzlSeRkqn6yyOeU4ZpaZm6vYmDpM85ZwfHR2dnp6A+zadTmku83XChOJJbhOjzMrrHy4PDg5kUtrE0HSS57t12eTH5gQfNH67MpXu32H/nj/6nYqlUZEBKNm616gde1vt5UmAUVUyZFujDHX59u2Tx48ty7q5eksphb0vFOtffvmlZZr/9b/+1zAMP/nkE8eyv//++6IovJqDAaa7VwP4jgllDQabOC8x5WOMtVotVCB43xeLheOam3C5XC6Xy3m9Hriu22rVgsBbrqYlzylTSRqORsP7+zuUhZqp+v2+41pZniyWM9PSh6P7+WIahiFEVZZlFWXmerYiIssTSrTZbBLHcRiuKSW2bZZlrpRcLueu6/q+m+epEKVp6kkSTSajg6PPANyNx+N6vf6P//iPYRjCNRTGnoyxy8vLp0+f5nn+u9/97nmRYRlLvV5XShmGgcMb5RDZ8Y3QH87n8263PRqNgKm22+16vZmm+Xg8tiyr2+32ewfoIWezGWrgNMpRa8VxfH5+/vLly1qthkwSBEG1GhEjk8Vi4bouMg+KQMO2CCFQ/SA9sp2zPdwrw+Wq2+2apnl4eLhcLi8vL4+PjweDwc3NzXq9Bq4LA2w8Jnow6BVN00zTdDabZVlmBSZEEtbOVRATiG63ixeAtIlhia7rMDKEZAmFKH693qmz3VZpuqN5Sil/9rOfgVyOp4OXXKPRuPruDcIHbBsYGsHTiDEGexS9ipaqINwvSvf7wyqE9uOtusn3FUnVYwrYtwn5LpIJoZAj7TCb6oGUUlyW+79eVbjnZ2dXV1dCqfPjk1qtBu/08/PzTqcjpZxMJnB5qSp4WAzAuZkxhrUQx8fHpsYty1osFmBvwrgFvwVcDsYzSqlms9lqtYKamSQJ1STR+GI94arBVbEKZ6XIKaVZEa1W64KnsI0pimI0Kut13zBYUaRFkWZZnOdJHG9M06zVvDRNXdcSgjcagRAFjIxm8yEhxLRovV4/GLSn0ylT6vCou1qtuEgfX5wQQkajUZaHp2eD+/v7zWZDCAHxGrpYuBhiYZBt27e3t48ePcLU+9WrV5RSz/Ngb6OUSpKkXq8/e/YMC70wtQPgvlqt0jQGCbPRaOm6Ph6PHceBveejs3MU7cPhcD6fT6fTfr9fFJnnOUKo8/OzJ08e397etloNzstGo0cIcV3bsixCpGUZvu8mSeR5XczWtr5eGiOE5Hnu+h5MFl3fxyGoadpyubR1A+NQWGmt1+ujoyNYVKxWK1zxIOUBGsGkAaS8drvt2U69Xn/8+PF0PCl2y94RdSgvqWYAnYI/Nb7Wdf3s7Ay8cKxwYrBKSZKUp+CyAqGsxtevX7+GpxMGQnhti8XipHuEPITjHtQfpdRsNsMzGoah70fgx3G4D5kgzBhl++H3cQB/8MU2j4t3kw9V9Yq7e7L9lS/vr0yjuy0U0+m0Xq/DNwkd/OHh4cnJyWq1evPmTZ5lEKRtNpssSWG0yvMCHycMPxqNxsnJyXxyC9QLD4tGAkqTOI6hcENx32w2B4MB0xOp8v5BS5EiDOMoXpY8SRJeFInruiVP02xj2azSE+Z5GidrqQrT0jrdRrMVcNHGWOX45GCxWHielyRJr9fNsiyMlorkRRnV63XXa3a73VarESdLxhg4K5QK22GMMdthQqaO6y1WOWbBwN/wlq5Wq3q9jvXu0L+u12sp5enpKZeyLEvTsGzLWS5WvhfMpvPNOvz000/hHJMkSZpkRFGN6VEYF4WBOQekepPJ5Pnz50KI1XJNKR2Px+PxGKo/4HuiFEDCBoNBkiT9fh9eZsA20AXN53POOcAYvMIkScD1ofo2a5Vl2e/3cTICe4N70tnpWRRFeDQImpCmcGrgsgSVbLvvqeaCUApaQhJGSZKcnZ2B2qbrumUYSikEYZIkab71pEYFhLGBrus4sJCN8VCoqjRbp5QJIZUq1dagUIciCnvXwGjVdQPzWCiw8RTYeAFhJPI20ux7mbCqS6sESN8PCSkl0Bu2J3v/OIA/eMx9wKa6A2OM7IwS93kwTN8ybLZSQ7VNsMBgNE1Lk7Tdbl9cXFBKsfAM8HRF5VFKodw6PDhgjI3HY16Wp6eng8FgOp2uFgs8IBjAsFfFuGk0GsVxjIXGOLHSNFVsxUXWbDal7JRlzhhtNuuAxfzA0w1NN2i71XEcq1YL4jjudpumqXNeeJ5Tq/m2bSslkGSCwCuKzHFsTaOdTivLsjgOLVtvtmr1es0wjFrNMy2t3WlYluX59ld/9sVoNBqN7gkhjWZjuVz+/g+/se02qizTNL/++mus4xwOh0+fPp3NZm/fvr25ucHsC06HXhBg1TYGVo8ePfrDH/6wWCwAmuOUxMS52WzCQlfTtMVisVwu2+22bTmtZns8mjx79mwwGIBvBT2ulHKz2Xi2h9HC69ev8zw/Pz9XSnmeB94sGipgUfgaK4crv6Mt0E1pWZYgi8+XSxyOIMegigNfdDgcttvtbfRSCu5Yde1xzouiCFo1dMsYr2dpgimIbVqoM/chD1CoMUureJTQJSIU8VPbtuFsr5RahEs8EXKgZVnYWzidTkFbw0GD+lYpxbnwfb9izMPzW0p5fHxciX3fy4T7QfJB5FR9HaGyiqL9+3xwf/JBq/kRGQ5VKHl/aKGUEuTHVcWHh4fYtvX5i0+Pj483mw1WDiA9WqYJel4URUQqePvDgpYQ0mq1sCBtOp06pgKUXNHhweXnnNdqNSyvAniIQBVSosdwXAvFm67rQVCHSQznknNeMTmVUhcXF2AJ4ngmhOCJMMXCB2NZFhoSIQSeER85qql+v49xWb1ex3Cl+kRN0xRCgKHW6XTm8/njx4+BEyLMTNMcjUZYKA9e1WQyAf3ScRwEAwbfOPtxt16vB27XYDBAZwHk/dNPP6WUWpaFMx5ygVartVwukW9RvIVh+NOf/vS7777D4pdarYZJAKxx8GfiAgUiCoC92WzWajWqa8gGs9EckzcEuRACDhebzQbTlNlsBufSJEnw+VJKTdPMdzfM/W9ubur1OiEE4KSp6ajgHh4eoGNQQqCeRPAouiUMgDNAKcXRgLJos9lAYITGD68QoGhlgYEmCFQHQggeDW1qGIY1J5CUlFJkZZHzUhClG7pGaVrkcZqsozAMw3fAzH4m/CAg94NQEbE/fvg4YD6I4S13lLxTD6IF3C90q6dWO8UGqSaNO73SYrGAL60oym+++Qaek7AAQtmDiiWKIkPTYcuNhanHx8eB78NUE4d91TpiC5W2cx8PgoAQAhCGUroFaXSfc6mIrul2s91RSsVRKiRpd/pKqTRNNd0yLVdRRjVmux5jpmVpppmnaWrb1LIs163lec6YyTmxbR/igCQpGGN5LvK87Pf7i8XCNLdiv9PTWpJky+VS102laL8/WC6XWVYwpn/55VfTafTkyZPZbGYYxvn5Od5e7AbGZjz0eGhxDw8PuZTNZvPZs2fwAr++vp7P5+jKHMdZrVaoJEGb7Pf7cN02TVsIZRjWZrOZTGau67969QOuKgx4TNPs9jrT6TQJ46Dm+YH76WfPm83mN998o+n05vbedV2mkZLnuHaxRACnTFmWkC/5vh8mMfyUCCFXV1e+7wul0By+ffu21Wppiui6PhwOUbngOMOYju3tBcMoQtf1QpXgciRJslwuRVFCxGybFmaA6I/AKbVtO0qWbLeTE2HJGIPSBd0mwkzX9SrxIH/iCEBhqZQCI3S1WgF6QXWGF4w3DY8AHm+73R4Oh6iHi6L4kUz4cQTuJzelZJW4Pw6/DxAdpRRjKC/fEXEQhFJuTdLUBzf6jiVHCNG07QP+7Ks/Wy6Xb9++zeIExxgaCciu8yy7vb3dYdDaeDxGjY7VnCBegoaLfLXVzgqBK0DTNDjSwlqr1Wo9fvxY07TxeNzsOESVZSE1Znfa9bIsw/V9EpetphvHaRQWnHON2UlcBkHQbPjz2cZ1XaKMJN74HqnXAkN31+v1bLq2rcxxHKIM32smSeI6ruc2RsNpq9lN4tzQ7Tzj08miXmuNx+PZbHZ0eOrY2mw2u357V6vVNGZqzIThClySWq3WYrHA5Yj1w0VRnJ2dIc5x1pSlmM3GpmnHcex5wcXFU8fxxHYdHU2SjDHmOF6aput1WK83oyiJ49T3/STJlKKTyeyHH77DzobJZAJZY1mWrXbz4OBgs9lQSUDv/OKLL/Da8PIAuSGn9ft9IMO2bTOqAzuxLAv1Ht7/zz///PLy0nEc1/dxuk0mS8/zbN2AAzpQqLu7u0ajUZYlRAwY1kFXAeMMpW9Nn5BRDcdFCapRBpp7tNkgaHEZAHRBAsSyKogqdF3HX4qhPN+thxBcSUEo0XRdd2zPtlyNGVKoq5trrAPYNorMYFSnRENKL4oC+g+U8Shizd2KoXdBWMGbdNf1iT0DGBTulFKNvaOtbaNmj3Czn8QwlEzLnBBC1XtJdRvAO6iG7bFkdEOXUqJvVkrV/aDf77fbbRALkyTB0ILthFRSyvl8zssSLMqyLCUVkIRW6mTPdcFXZIyVZY6AhLESYO5vv/325OQEJGBs+cCFIoQQXCtynme5bdt5JotCdNoDwzDms8i27U570G53b29va0FnuVw2Gg3XcjRmM1qulvHhwImjQggRbjJe0rdX9+122zCMX//q951O5+TE63YOpzM5n22WiyiOijRNn1y8GA1Ho9GMEPLyh6sgCGazZZGrOCps2758c3t4eIhF8P1+v9FoYCvQxcXFs2fPvv/++1//+tfPnz/HdoSiKLAQt9/vYy53fX2NYtgwDCz6Mgzj5z//uRDij3/8IwyzdW24XC4p0TrtXriJ4zg+P7+Yz6etVgs7QDVNcz1HKQU8NlxtcBS+fv0aBx/nHP/2er3b21u0bZizTyaTPCsNw3j69Cnn/Pz8fPTLCVI3fDHKsuRSQkF/eNidTqd1z4/jmBDiOM5kMmm1Wu12+/7+/urqCppDuMWhiXh4eKi168irqGtEUTYajTiO18sVrnv0csvlEvW8ZVmz2ezRo0eYsoBJc3d3B12Y4zhIjABslFLAC+hOdYTeEjLI2Wzm+z5aCcTeer0mUjqOYzl2vdkwbYtqrNVux2mim0YYRxBkvQNm6Pvyv/3MRveat/3v/P/efjTB4sYYo3tVKNlR4fI4xRHVbrdrtZptmEIIbGvYWg9YdqVjlDunRrXTEFNKGWWU0na77ZgW6Py8LDGcNQwjixdgFcODAH0a8LrFYhEEASp7bE3Tdd20g+U64ZwbZlByuVqlrusdHB6G8ds0l0kSpbmcTNeEMM9vtdr9IkmTuIijnChdcKqkzLI8CjPBqeB0uQgPDw+LXNqWz0timZ6u2etVrDFrPlt1Oh3TcDvtAyW1JEke7ieWtXZd17EDJZXgVNdsYBJRFAFuAevi9evXp6eneZ5jbzF26y6Xy3q9LpTGGOt2u8PhsNFojUaTbrcbx/Mvv/zKtu3lcvny5et2u/3s2YskSf7tv/13n336xWq11jSdMe3+/l7XzE6nXREdTVOXUgrJlVKo08x2K4oiz3OlFPf3d6j0iiKXUobh5uCgH8dxrRas16s4juI4yjPOOUfSE0JUBvtAvwkh+KNAswQU1+l00GHGcRyGoRDC87zxeHx3dwfTGtiWojFDV4IMWRSFLDkOdMwe1+v14eGhUmoymaCLZroJ5o1t29fX1wBIa7Xa27dv8fJwVFW+UpbjGYal66XjuLVaw7bdOI5Xq81u/sEcZzvZLwqe52XgOWznsIoFYd1uF4U9qOdKKX2/qvwgwOieFp7u1vcQ9d6yiA+KUrWnCZb/XXUi0FHyUTmKjwRWJaZpllmOJVKmbpimqWmaZ291A8BFP0jgVbmLzAxqvLbL5HDIwk/BVkP3WA1kUaUAQEfyTC5vUNAzuuGch2FclnIymZWF4rwMN6kULE1z27Y5F0lSEM7RNYFyAXMNFCGcczwUcuxkMjFNM8/L9Xr9+eef397eP3nyyXodNptNIdTRkXN5eSmEME1b100URZgQ4l3FNhLMpkejUavVsm370aNHzWYTmm7UbOPpEo0QCj+M2nVdn81mlmV99tlnl5eX8/n89va21WodHx9DamxZDiFsPJ4MBgPX8RljlGq1Ws333SzLsjxVSrmuyxhxTAfjnzRNHx4eut2u3FlCZFmGRYX4mKAh7rT7Ukq0r1j/hFeFfSFFUYxGI4QN2vJ6vQ5vC4gScFifnJzc3d3NZrPNJnJd29/dgiC4G99jsy/+ZCK2rtbMMOH1cnhwgF1RWIxr2w6AFjDgwOFWSoFWRXb7nrePRohm2bVaDa8EPtG2bR8eHoL1ho9YKVXJaNnOJ0pKCZ+OWq0GgTI460mS6B8P2fcrzP0acssRVXsE638i46k90OWfCkIpJdkT4LMdmch2HcBTIA3FmxCzJkyBGGOWYeJNwbB1axb+EYaU57lkHNcuNYyqfA03S0xE8GahQcewFbDNdDqFcQiQxkLQZrPJmHZ9fS2EAFY5mUwM3cLHgJmkaZoYnTUDF0CRrmtRtAEYqJRwXUeI0rIMSlWjUQvDNT7mxXI6GAzCMHzx4sWf/vSnn/zkJ69evTo6OrJtu9fr4XLMsqzRaKDvD8MQi9rhHo8KEAEA6g98hNBH0d3Wx+FwqGkalntiDkEp/earxkMAADSASURBVPbbb3u9nq7r2P4nhDBN03ebgquyEIwxw7DKsry7u8uz0vMUUk1lDgRLVUIIIBagWbiIISMACo2cjKGfYRhxHPu+f3p6aprmw8MD/Lan06lQcr1e76jkGorGWq2GdYVYxgpd8u3tbZ7n/X5/OBw2GjUEKmBYlAl5nmPeyxjT6VaOFIahpmmY3MxmM6DNRVH4tQasa6o+DdaGMIACUKx2mnK8EsMw2u32zggrA0gDO2PEv9qtBDVNk0vpmqZNiGYYhDEu5WK1enN1BdnkaDJJ01QXe4a/+4BnlVcQb/LHVLn731E78LOKq+33K/bnu197LwgZY4puxU34Oa48/D1ESDi3F1m+fVW7lTI40fdfzPZJ5a64JXR7xey0+ZqmlVmEAMNprZRC3QJvvCAIqnkx7lOkWV5meZ5PZ1NKqW7qpSjjOGZMI4QkcUY1ommaYTUKnq43YZ7MMQSr1WrrzYwQQhn1fLPdbkiVe54XRotG07u9vcXBX5TJJly0O/XFcuG4xmw+0nSVpJtNuNhqi7I4juPFkjVUw3GNakaHTw2RVqvVxuNxJagROxfJPM8p0aQgUhDLNAO/XgsalmkrSV3PO+gf+l5tPJr6vn90eJKm6Tdff/uXf/G/C6Fub681TcPIYTi8DwK/KNws02zbrDyEgIjEcdjptHSdFUXW73dd1767u8MvYl5qmvr19egnP/lsNBo5jvVwPwU8OJ/PdV1fzFcnJyfdbrcUHFc/AIhqhIOFVpAgdzoddFzAxgkhGN8DyNlsNug+it3ePsaYVFtHIvwvIhkcNOhuAZkCjwGpqHLmrdAKXGk4u4VQaRqZpt1stlutDhakPTw81Ot1xnRCSkqZELwsha4rQhikmBiTwoe7LMuHhwesfMOcZhuEVbxV4cT2RL2oZHYX+4/vpt/vJ98LTvYu8PaDUO2USlW4boEfTQMAAxDJdk2EJQZxZLdRAHMFBKHaU/orpQACJUliMG07jTAMvttz2mvXtZ1BCIp+PHu73ea7ZWb4SPAeFWRSlLFlm59+9pQxbbPZxPFSERrUPF7KzWY5md4LIYQ8KnnGNJFkK8aYwRzN4KvN3Pd923FsWzNtZVhSM/h4PD48PCxF1Gg0TG74tQPGWJpt8iI6GPTRzcZxLJUgvBSCO65OqDmZ3sfJ6uTkZBMmhmFgLAZeVXVq4DQpiqLT6eBszrKMUl0IcX5+nqYpBsSGYQyHQ9u2sTsByvrZbOa6brPZvLx8m2XJ3d2DrrN6/VNoZ7vdTp7nnBeUKlyLZZmj1OecY/EtmjdcbUiMKOxRJ8NkLc9zKTSMT0H7xDr7R48eXV2/xbJUdGVoDWAJ43nexcUFsMRqhn57ezuZTNBcoDaGFBD7xfTdSm2MAaB+BrCEq//u7g7jB+zcBtiTpulkMoFfKJrSqnfA0YNhMnx6wG7FcwGIIoSgGMHpCdS60ajXajUQlbG1Al3Sy5cvqwnHNggRcvsZbD8mxc7QjlJKiUb+u64w1f9uUyi+uReE+Bq9m/roBmSJEILhtZQSosl2s7UFYNS7cWL1CuW+kzdlIHYJul23yPb+wHLnbwcYwNyt3QOYAbwL7y/OV2ZI29Db7eZgMBBCkHtRcsziqes6RennebFabUoe2w7z/EBTOt5PzzNn88KymWlRRYo4WVk2o4xruiS0bLZ837fxgp8/f/6f//N//pf/8l/+8pe//PLLL1erlWVrhDLPD8A7qTfcOFlF8ZKyAV75er0uyxKzKYyq0QOj5Ts+Pp7P5xhgWLYLClhV5KMoaLfbUsp//Md/rK57gGGj4Qy1XByn6OK63a7neUkaxXGm63qr1fA8LwwxjOAw8Ib+ixCS5zmWH4LFBgVdp9NBLcoYs+1cKfXdd9+9efOGc76Jt4UJnAKbzWaSZdh81mw25/O5pkiSJOijgAVEUfT27dvpdLrZhK1WixACpeVisRiNRjiesCt3vV6DtsY5bzWalNIKjcNvlWW5DuNarYYzGgw1jDH1nXB8H/ZTSimxbXaw4RSHHQgMEP6CIAr0wfM8QjggVvw6WpVyZ0OBzKxXhc3+nKBq0sjOkxsVCKbn5CPg9Edve0nuRzLhvlRK7Q1I5tMF6ml8ohkXwGmASaDKxxtEd/IrKWWl36/4N47jUKn274M/AQvu8OtorNEb1Ov1wWAALwkMADRNm0wmrmcFQWDb1mw+zrOCMdXtNoVQNzc3R0cn9UaglGKaajYb0MK4ZolBGQa8lqUxJrMsk9IAaH542HNdp1ZzkZkJYZwXf/EX//Pd3d2f/dlP/+Zv/uYv//IvX79+zTk/PT0VopxOJ91ut9GobTabNI2VssE+JYQ0m816vQ6EBmoA27b7/f7FxcUPP/zguu7bt29X6ywIgtVq5fs+xt/oyhBa8G5DtKB/xu7hZrN+d3cHpOH4+BDvHg5y0zQJ9bIswTdrtRr6Xsdx0EFgoHd2dvbNN9+AV+R53v39PYg+yJ/T6bRKC69evbJtu+Blu90+ODiYzudSykajAQu5Ikkx4J5Opxi1qZ2boOPYqGgw+gcFDNyAer0OQs9iOkN1KoTANP/t27fw9oVZVl5wQgj0n+v1utForFYrSikip6q6wcw2DCMtJQ7xwWDg+37VgsIAju/cij3POzg46PV6b9++3jKuHAf0LFBE0I1rMOemTN8mJUIxusN/QpIkTZBVFWFCEqUIZQwe2Eh2iKh35evu+5RSyii6JiJKQray3G1kEkIIKbiglFKNUko5kVwWSihCCAAYHNhVrGLMQLeuNu97CmNvzM4IhFIqiVJKKkqURhljSmNSZ8TUNSwJlaZhGGmqhMg0jRal0jTt+OTM8xw/cBljtmtacyPLEqF4t9+kVLeo7ZuB23Axk8RGsW6jX3frGByfDk5hoPTmzRu33YuiqEjWqzBpt09gsdVstDRNc23nzZs3FxcXUkqDabyM7h8eXvz0RZbzMkqTlM/mm6dPf6LrtfPzz/7jf/x/wg0pCm6a9eWCHB6e5+nEd08vr/84HN3863/9r//2//27NI0ajWCzWf7iF//Tcrn84x+/Nk0TC7fvbh9OT0//2f/wP/6f/+H/3oQrz/Nsx2x3mo5jnz8+e3h4MC19Mh09f/HJr3/9a8Mw6o3g/uHW853lctpoNOJ0ZNp5p+fEqa1I1u32nFj7Z//sZ5eXl9gV4Thev98PguCHb38Yj8effvrp7e3tfD4/PDy8vRk9efIkz6Tr1A/6x8PhkChD1xzfa87nc8t1er1eccUn81m73QZi2Wq1yrIM15vlfNFvd7KT0ySKV7P5oNu7ebh3a8EqCm3fe/32CkdzmqYHwcHJ+SPLcx3X1W3r8uZ6PB4vNuvBwdHt7W1ZiE6753u1rOCbKHn69GmSps1WW1BW7/byPJe6YXj+aac7HA7vRsOiKBaLxTrcSKJQPaJu1zSNKaJTZmq6bZimac7ml5Zech4nIfXsrpCyzLK6b1MpeMCKgjiObts+ISQJRw/5QuVespK6ro2miyjeaBrlqSBcdZsN13MYI0WRvdcTvldGSokyd39KQfesCj+oQquEtn9n+j49bf8L9f7aw+pHbO9lVJFeJerqR9XL0N7fpbGfVPefC4+gaRr2EKLWNQwLXU0cxwcHvSxPtN2ChIeHO6VUu90WYrso7u7ubj6fQ/NWlmWn0wEHEt2CZVmDweCzzz67vrtJkkxK4vs1TdOIkEopypjreLZtHwyODMNar9fKskzHffLs+Xq9Pjw8pjRnjCVxBl+J29vbp0+f+l4tiqKi4CjI0yxJ0hin+Hw+Pzo6Wq1Wd3d3jx8/uby8fPLkyaNHjzqdznfffXd/N9R1/c2bN2i3lssl1Iz4q0Evhgk0pq/41IBG9Pt9xhg8MxeLBdjtjx49+v3vf39/f48ar3o3Hj9+3Gg0xuPxaDQCmFmVu0hNQDLhVaHrOp4OboKMsYeHh88//xyvStd1rOZ9eHgYj8foxDabjb9bz0gprdoEYNHghQkh4FUD8ucf//jHR48eWZb1y1/+siiKVqslpfz6668/+eQTYDlgaGRZNhgMgA/h+9AlwmijXq+LkmNVliy3lA+UVM+ffwrIlBCi63oYhvi4PS+YTGY3N/dKkV6vDfg0TXNGnCjagO/JebFahWVZ1uv14XAY1Hxd17NsN6L4IAj3o6jCIZGIYLf6QfjtR291xbM9Pf5+MOzfuQqe/X/fv9uHs0dF2f5zsY/CuGoOqzgXuxvnnCtdiIKQjBBiGBqjmiACnI8sywxTw5wRHGsppRRMSbpZR1eX15PJxHXdTqfjed6/+Bf/YjgcrtdroqTGWBylD/ejPM9zWaRZTil1Pd82rSzLiqIwNM2wLKFUvd5I0nQ8mTqWbVnW+fl5PIlXyw1wAkpps9mO4zjLcl3XhVO6rmvbUtd1368pJQ4OequX96enp9PJHIzhly9fnp2dL5fLP/zhD0dHxy9fvuy0e+v1er1eP3nyZDwed/oH6NmwTQXgId5hXddh0wIUfgcJepvNptc7sCzn6uracZwwjO/uHrrd/tu3b4+OjuI4ffTocbO5hi0NlZxS+vDwMBgMANIWRfHw8IA+EMxJQJf9fr9er89Xy++++84wjPF4fHZ2tl6vj4+PUaweHR1JKV+/fg34VCkVRdFqter1erAtw+tHfdTr9Q4ODprNJoYEUkoQ2SmlQGtA3MMEC1u1q/oZzGFMRDabDcgxsLqRnKNotM2tHxyVquLicc4nb4bYn4GDAycyCs56vX50JBhj/X4fMtfNZjMdrymlYRhiS2xR5IoIz3POz88bjZrtWGka/4ioFze526akdtgjmlT2vgXbxwHw0Y8+TIDk/Zt6fyz5rmrdaymVUrB1opRKKqsuWe75cXxwHLCdpxuyN/AeTdOoCeZkXpalaeoVD/jhYRQEHqFyNBoLwXXdlFKORqNwU2Bv3nK5Ngzr5OTs/Pzcdd2XL19vNhuxE3THcRqG8WKx6J0c5aVSShoZL/LtkVkL7LuHyXqxDIIA1vS+KylNO92s0+nd3Nx8/fWfTk5OTNM0jEgIifYDJ3FRFIZhFEUShuvZbEIUOzk+u7y8nM1mL168mE7nd3d3R0dHWZaDqXz+6KLb7f72t79N0/T+/j5KM9u2W60WRqCAfB3HQT5J0xRYa61Wa7VarVaLUrVcLh3HGY/Hy+Xy8ePHaZoOh8Pj42NCCFw8oC2CfIxoOhzHB4MB8Eyc1MvlUt85iwF1NAyj2+3qun59fY3dwEdHR9DBgGwNdBpMSwCBuq4zZmiaBlkZ0hSG8tXyerClwQ2IoqjX6wFah8B/Pp8LIcDGVruJOXjVUBULIaDPIITYtq1RyjmPokhYHF9QqTRNA6mDEOK6frPZhABFCJEkWZrmUZTM50vf9weDI9xZKVoUvCi4rrNer6frdDIZapp2eHRAKc2yBFemrhmu6+v7AAluCDMUDFUaqbLKvmyPvF927sdnFRhVEqPve+xXd64eYYsg7U3w9x9N7qAXqlPMdvCeit0S0g8OBcQGngVBiKkDNzLUP4QQXTctyxSSa5pByNZHvCiKsixw6SyXy+kkAiul0Wg1m83T0zPLcjab6OXL14PB4PBwEMfxZDLhXPq+X683ozgNoyRN09lsgbQTBIEQ6ocfXt7d3MKpodtum7ZTFMU6jOxSZ9So1xu+HyRJslqtCSFgSyF5Z1mqaSxNk5JnRZli8fJgMBiPx0Ko4+Pjm5ubo6Njzvnf//3ff/GTn37//feDweD4+Pj+fnh0dPL9q5cHB/jgs/l8jkEiIWQ43J7osPfu9XrdbrfRaDw83KVpqpRChQnC12azubq6ajQat7e39XodyYoxNp/PfcdttVpwFgMh3nEcVBa46bp+cnICet319bXv+ycnJ9Pp9Oc///nbt2+/+uqrLMsODg4WiwVKfaVUURTT6RSzkEfnj2BAOhwOkyRtNhvNZrPb7YLqgLEH4BDkydlsBgtM1JbQvy8Wi263C6kkCn78yVBOr1YrcN+FEI1aDWgnL8qiKKIo0inDUhqcXJodBLWaECKPIiEkZRrT9KLkQeCbli0ViTbhYrkC9SrLMqr09WaRZrFuMMex2+22bZs4elB6UEp1bc/djOyVhXBWpJSCvVrFD/Lh/hVP90xE2UfOa1s54ftuolXcVk/HdoRvuWd8Wv06Ie/hsZj+KchYsIN893RVGYwivsrkqCiEELnU4NeANQCGoeV5zjSq6yzPM0II3prJZILTsdnuQPOCqaui7Pb+4f7+/umz52DrJlluWLbjeYZlx2k2WazBUUziGEms3SwFJ3kp80JEcdZ1g3qzk+e5UGw6X91ev/7iiy++/OJnQpR5NiSEuJ6NIieOI8uykiRqt5uaVnMcCz4rX3/99RdffGFZzuXlZa1Wq9Xqs9ns9vaWl9KyrCzL8rwkhJydndVqtQspCCGgYqH9gw/feDxGQQtaAiGk8sYsigLJQSn1m9/8BoM+KSWiC1wT4Jyz2Sx0Xdtz11F4dXONqToMrW3bprqW87Ioinqr6fieJR0cbZiqwTYC6Q5L2tBjdzod5BmkO13X4YVBCHFdB+a8GNxjBcV6vQagHQRBEATD4ZDueELAq1Eu4joHQwOnDxIsShV8Z71eY1hvGIbpGtvnpQRTRxBuJsNxHKdJkqxWK7iSel7gecHx8TGkLZpWSkmKgqdpmiSJZZh3dzemafZ6HU3ThCgtK8DT4TopiuKdoHj/WkfGQwrGT6uwqQZ0VetVpcHqd8lusKGUAk/74zjcj1X6/o3tbcWgO1gVH4xS22ExBoDV3aqCGeHH9gh71Y/gXxA49VazU6v7ux0MinNuMD3LCiEEY0RImec559uRzOnpaTWuhJsgLtYwDK+vr4uiqLT5YRje3t4mQuV5HkUJVvMFvgtvON+v9QaHtm03m23dsC6vrh3HSdJ8NlvEcRoE9eUyMQyrLPPAr0u5bDYbm81G11mWMdPSizIryiyMllGUcC6//fZ7vJM3N7dHR0cvX74scn5+fj6bzZ49e6Hr+tdff/306dOrq6s/+/mfAx2BIhmwBJrA2WzW7XZXqxVAdlTX9Xq93x8sl2vX9U3TnkwmcZzatlur1ZbLtW27nEuMCjabTavV0XWGQR/Ga47jAFdUSsF/5P7+Ho1co9EYDAb/8A//ALwEHSbAHlhoA2vB+I4xBifbyiQSgQ1b/jiOoYTG1QvcH4c4YwysQ9ScOHcajQZqXcijq9k69ltRStEWWpbFiwKUzlajicISlA8ACmma2pbrezVG9fUqLHJu6BI4X5HzshCM6rblCh4nWcZLaZnOQb/NeXF8fHx2djadTgkhvV5PKQU+g6YZWZa96wk/uGHgCDCtovBUmGTV++3DoWxvTZrYmawR8iNL7ck/AdLsp+L9tIkEhTtru2lEdf+9BnI7VK2IDghCsnNcZ4wFfuC6LlFss9kkSaKUYIz4vp9mSbfbNgxtPp/ned5sNoWorVZL23azLNtsInhDYQh7eHg8nc7AgW63u6ZpL5frh4eH+/uh3WpKJYnGYOkHVDCJYi4EnJdMx4aVxtHRkWmaF4+fZGk+HA7DMHRdO0kylCio5TzPUUToul6W+WazEqKMo/Lp06f/6T/9p6Ojo08+efbLX/5yMBgsF+snT55Mp1Pbds/OzoQQnU7v/n6ICQqiCxplAIkAaWDVA55KRUi6ubmBI3273ca2pru7O0JIlmWwloIrJFLrkydP1utlURR4h5GmhsMhSsTz83NMvdFcgVuLqMiy7MWLF8jb8N3BpY8PFIxcYCeW51awGWaA+m6zJ5b+Yh0qIgRJDyU3RrWYeEGSbxgGki02i0HQAONguLA6jsMcB50LWFmu61q6gak1MOosy+qtpuU6buCXZWk6NgwN0iKnlJqOLSnJ16tC8Have3p6moSLI3L4/MWzWq0Gyr5tm8PhOIqiNM23xAD2Tyh0q7lq9Zcg8KrSFLfq4q5iUu7WZf9oq7Zf+lYISjV8V7t9GBVBZ7++rQbxamc3UCVkRF1VGFdKQmNHWEPZY5omY3rFzzYMo1Zr+r5r26ams7u7u9lsYpo63EQsy3r06HyxXEKryjRNRzI1zZLz6WxWlmXJ+ZvLS1wBmqa1O52UKGAMvXYHezl1XYcIUEnZ7XYtyxoNR0dHRzjLeZE+PDycnB6jPoEUkGnEcWy4d2n6tlPFNFzXbaXUJ598outGkiTPnj0zdGswGPR6vWZzuw5+NJr4vo+MPR6PNU07Pj7mnB8eHqLRHY/HFxcXEA21Wq0gCO7u7hhjZ2dnYRh+++23X3311a9+9Stwu9FxgVr5i1/84r/8l/+CGq/dbj99+vQffvX3P7x6KYQ4OBzc3t4u16v+4GA4HD5+/Hi+XHz7/XcwtJdE3dzccCkqwAxLCBuNxtdffw1COUrlKIrg6YLrYbVaYWZQXWOO4ziO0+12R6MR0jigF+BMkF/gbrDrLori4OAABHfDMDDJABseFF/M8S3LWi6XkvPqaux0OrZtx5sQ8NXBwYHneS+vr4HxgIQAnhC2hYL2be6WTzw8PHiel4QL7DyE3x+OOayHkZLgk9Wrsm0fayG7RZxqbz1TFXXVpU/3tE4fcFCrUlDX31lC7d8qygvbGT0h0ioLtu2P6DYIcfjt/6LaOW1VQV7FIdmtcEPxjKkreuU0Te3dqleiWJIknBeGoUFU1u93fd9XSiyXy9l8slgsmOGnWbwFZhWXigpZckGYRhzDUkqlaQJALwgCz3eSTagRZdmWptGiyJSUghdECccGPT8URV4UmWVZjmMVRZGXSiohhLAsQ9cZITLPc6m4aRqEENe1a0EDiqeiKG3btq26aVr1egMDZSWpruvn5+dxnMKJdDKZIUv4vg/8EHDFeDx2XReNVr/fv7+/f/LkCdIFpdR1Xfg49XuDNMk77V6R80a9dXf7UBYCUn3H9r75+ttGvRVFUeDXKdHubh8ANn7//feAW3BSwEgbgQSSDQR7nHOdaVtfiSjK8/zy8tLzvDzPgyDYLzvhkorsJ3d0PHNnKC6lHA6HcGEDlQekeSklDPhQtgghcH/gLmzPnQxtBQorPEKe55vNRgkBRBqyj8ViUfeDFy9edDqd6XT6/fffp1JCuQJADj4jjuM8f/4cPCdMPtCkbDYbKqRju2EYhpvIHwRxHE8ms1arZZqmECUyjY46hO7tY8INLWyVFfczpNqDOvfjs4rJ/TaSvg+T7j8O28mXqiJWSkn2EiBjTGdbK0Q0gWoPVsXHY+jvrdPY/wJ1PNktkMHfqBlarVYLgiDP09lsRpk6PDzsdvtRtGk2m7quj8fD29trSPXare5wugR+QCkFXoLXg4sGY1/MiMCnSXgOL1NdI4LnmqbZlm5bupIlkazI4jTeaJT5Xt2xDcHzdrtZlnkUbRgjnHPTNKXihmE4jpskcRjGyOqr5cZxnIP+sWW5o9EI3iT1WhOoBucSAzHOpWVZ4/H45PjUMu0oipIixmu+uLiADljTtE8//fSHH36QUqLAhr5OKfXzn//8P/z7vwmC4Pr6+uTkZDabHRwcYFMSfnE4HGLzKex97+/vj04PwjBEhun3+5jZ4AM9ODjAe4L3DR+H57iAQFEn393dffHFF1jHjSUTuHzb7Taq0DLcgBzLGAPfAL9br9fR5UZRBLkTKjKgKZD8wamNcw6QtkIT0MVBSwEjRry8oigYIYwxz/NmsxmMZ0VRXl9fj0YjUAKn0ZrLMk4jRZVuaqUo1uGKUvry9Q+LxeLg4IBznpf505MnMKFsB+2rq2vP8zwvuLt7aLVaoAdjc6iUklLyDkKsqj7y0U3uqW/3I6qKq/1Wbf/R9mLwwyCs/pftyTXkznimyrraLgirmlnuiarU+zPG/adA74H7VHItTdMcxyWE5Hme5yWl1DQsnIWmaUqhcpEzxgaDI0pVHMej0aikWiFKYAA5L1Spcl6UZen7fpqlWZlLqryaDxXMcrM6OOxiKJfGCXzB6nXfcxwlcs/yAPB4ttPtNDRN40XCGNE0WpSJpmlZlqVZjEQNvB4qONM04zilVMuygjEjiiLH9uq1JhSx0+ncNNNut0uplqbpxcXFv/+//sPp6enDwwNyICEExAAU+dPpFPzm2WzW6XRev36dpulXX3318uXLn/3sZ9hDdnt7OxgMLi8vUaRh9LxYLD7//HNM4WazGei1myhah2G7210ul5999tnvfve7TqeTJEkpxJNaTQjBpVyu10VRCKUYY8vlErAKOrfDw0O4GIK9SfaafDB70Inx3RK4CqTAnQG9oItG1gUj6uTkBCxZpRREjNVcChAdzvTqmK44DLphQFjo2g6K4SLNMJxEVeV4VpIkcRzqus55IUSZprEQYrXa9Pvds7OT9XpNCGm3m2VZClESwqIo0TTDcTzDsGzb9TwvjmNwoTWN6jgP9svRqiLVdosH99OO2iN279/UbnXZfqzuEto7ff0HcYi7VUNIPAXbC6r9+Kx4HkwjVeZUO6Lcx48ssfltZyXEOQdIIIRYrdaaxjzPA2oSx/E334xqtdrZ2Umr3aSULhaz7Uei63GWCQGwDuXi9lxaLGa+73e77aIopORZlpimfnDQcx3DdUzDYJEs8jQqsrjI4jmlWZa16g1d1xkRTFN5FqORKHgphPB9H0pzsEySJLm9vYVSybIspYjjuFlaXl3eCCHKsgRhoCiKxWIVx3G3203THOif4BISHpRYEEys1+vf//73Uspnz55Np9M//elP8GI7PT29urqSUvZ6vT/84Q9//dd/Xa83wjAMgtpyuQqC2suXr0zTyrK80+k+PDw0Gs3FYkEpOzgYcC7a7c5o/sAYg6k7shN8lnADalJuV6b4EONiv5Jt22/evHn69Onr16/xV4MPTSlF8GCwCcEbgAnozvjOhx+5AT0hCmBkNnz6i8VivV6juEWWQ39eYaoIPLzCrRs3pYhMPN3Wlp9S2J/iTHS1gItSCJEX2Wq1WiwXSik/8A1T/+LLnxiGMRoPXdedziaz2SwIgvls9eyTT29urocP45/97GeT6ehv/uY/npwcpWlKqaKU6oam890u4v3g2W8FP7iy99PX/lhivw/c7y1xl4/jtoq6Kly3ma36YrcTuzoXEW8aofs6pqps3u8VKaUVQRQHKt2hpgCpHcfRNLbZRFJyTaemaaNdnE75cDicTEaaprVajU6ns7m/1TRq21ajUUdvqfa8DxDe1ahNCFHka1GmjHDLoLXAwTdFWWpUGCY1NGqZZuDbVAleprqmJGGe55imaVkGIbamab7vr9cb7EKIo1RoSgrSaraB7DHGCSFY/cMYm80WSZJMp/Plcn1/f1+vNW5v7uI4/u1vf1u5qkkpMXBP0xTerZBKdLvdy8tLmI7e3t4eHBy8evXq6PC0Xq/f3d1Bjlh13d1uN4qi4XAI1OHTTz/F2gnginCnHw6Hg8FAKeV5HkibWFmFwg+DuLrnNxqNXq8npYQrB8brKEaAl8IgGPQxw7ExpvM8D9TfsixBJ8IRg5as3W4TQvCeYFwppbQsCzxVDAP3QT6xkwgi9tATmaZJdzjCcrmEV28SRuCyttvto6OjSMZZlsAhBqsXESxxXGRZsl7nWZa0280sS1arxdHRIDC6lLI4zjgvZrM5ti9j3CpEKZXgvNCrUrOKjf2Scr/LYjsK2C66WDUZp7uWshoP0t0KKOxV2o9Dtev65O5W9Zn7ob6NaiU/yNJq74by9YOKVO3kF/tfsz32D3qDLEvX6zUhstvttlotXWdYcmJaxtOnTz3PWywW9/cPzVbdtHQwv+D9WhQFZQqe0GEY2natVqtRSqF/PRz0MBqp1zyn33EtG8+ra5rneWVZapShjISt7XSxwI4h09Q1TfM8Wq830zQ9OztjVC/ykWnaUsKOhwVBrdWqgRht23at1iCE4MxOkuTu7i5uJLBCiqKk0+mAbw0/MiBbr1696vf7x8fH4BALIU5PTzVN+/bbbx8/fnx6evr61dXJycloNOp2u5PJBLp1nDIQKDYajXq9Dihhs9ngcgfoP51Oge4sFgsArQB1N5sN+iVCyOvXr1ut1mq1siyr1Woxxo6Pjw3DuLq6QrQj9hD8mqYhSeD4wyxRSokDAkGodoRKCDJ0XUdaVkrBOA85E3g4alqMMQnZgpOgByL7Sc4p7BiZttls1ut1meUYGBZFMZ/P7Y5LGFWUaIberNWwgCQMwxeffSqEIIxePH3S6/UmkwnTtYKX3UH/V7/6VRyl9UZwfX3t+95PPv+y5HmjQUteFEUhRKmrvSZwP7lVViIfRKDck+3t94E4gcT71r2apknJP+4G9zPtByXlfqRJKSnZPqn2fhn8QVjul69qx06uCqFqyJnnucHMyWSilHJdeGyZnPO3b99KyZvNervdlkpMJhOUT48fPxaGBPcCcjgkZCjoGGNYU053JCld1y3LmM3WWKnVbjZZs2nbtqGzfr/HCF0ul5pGHceiSmlUBUEgKfU8z3EsZGNeSinJbDYbDA6VpIZhtVqtNM2jKJpN52XBV6sZDAsty1ou16vVqtlsUqoVRQEkxvO8yWTiuj6qvk6ng9IUVOl6vV6r1ZbL5U9+8pObm5uzs7PVaoXtOghXaO1fvHjBGPvzP//zt2/fws+bEALwSQhxcnIyHo/hcQZ+DMpLLFGB5ngymUDdZ9v2er0ej8fYREC46Ha7eK9OTk4g2AUbDgwkDKWra6bYbelCqKOVgk9hdZ6iV6w+esQ29O+YBoOOj6MEJylGi+ghAfYg0kRZSikNw1gvV2C9ebaDswAfcU3W6vX68fEh4C7Hsdrt5uHhwcXFxT/8wz/EcdhsNpUShEjXtYUor69vs6woioJR/fDwiBCZpqlusG63W5R5nqecFzoj78AVolS1kpIxRhQhUqndltzthIARxoihM01jlJKdA7LEGSaFYJQwDcinFFwwppSs0uzWk5BSvchzzrkopVKEMqoIQfwKUe7uLCmlbHcQGKYuJaRVOqWalIpSzbIMy9yipsjA1UvFdVNhsFXQztVCM5iu66lJJRdaqlFFiFKddotoWpJnBtN8r24aBo7M2+HIcWxDmb4VNBq1m5u3i8nE85xwOTdNkxIpi8IwDCaUo9u1Vi1M1GK5iSKv223rVmuxTDebKef8aCEoVWkad9ttL+Cz2SwMw4uL8/WsEJlp2y0iCSFEqnK1Xraa/X7v6OWb10meTRdzTdc55ylPf/PH3xz3HjebTV4aSZxYVhmGcRynus4opVmeUkrjWClF1+sw8Os31w/CzAkhPI5qtdomDLE4xTTN2XjyyflFFEUqLx3NiONsdvuA6mtYlFLKFy9eOKbVbjTLLBdCUKk0QmXJN8vV6P7h7Oxstlq/ffv24snjMisF5SUvDMls3aKCdJutgpc13725ueFKJkl8MOj+8PLber3++PTpfLECNfzw9Ozq6mqxCYlp6q4rDH2RxIs44qbBTDNK00WWbjZZu9VKBVmPp4zIZrfNebFYz4LA8wOPMb0sSyXKOAw5557j8FKu16HneZLQyWxONaZpmm7bm8WCJKlhWAYzsjxdTGdpnDBKbc2hnOlUI4LkccYY0zQ9S1JCiG2bcRzXW/X1ZmV5ZpSFTmAfN05N0zRNXZmiLAuRlkUeG6Y2vrqxOLOFxtf5fJktZ0uSsEW2SuwsTRbPPnlEKT0adF5fXWJJa5rxu7vher02bOs9G/wPUg39CDVVSmm69kHKIu/jqx/c5HvqPlkVjR/fk+4ATLKfgfe+L/eIcrhpO2M5+b4/AH1/Vdv+ayt4YRoGChK05kQqqsibN28Gg8HR4NC1bJD62Y45lWVZksRhtGaMRNFGKkEIwQ7XsizzvIiiaLMOy1JQSq/v5rA5gTGmKHPPc9vt9uvXr13X1nVGlZJSYCqdJFEey16v5/n+ZrOZzWZ5nhNGCSGz2YxSCvQlC0Nt56mz2Wy4KBaLhRClplEuSs9zOJeO43ieo+umFCrLCkIk0wjnfJ2sALe6rqvT7e4NSilXPNqEYRhmaco51y1TpyxNU6y2XywWIGSjkNN1/U9/+hMMPuDF5HkeiuowDFer1fPnz9EXzefz0WhUFNlP/+yrzWal6zolClgoHg0FC+d8Op0KotbrNYhEURTphYGGsBRC7LyYXMdJkiSM1qZptJt113XDsExTURQbnWmdTs+2bZ1qQqg4jrMsq9eabGe2r5Siu4uz1WrVg1qz3hBFORmP5/O5qRuNej3KC0IIVsphz4phGAAlgcNVHanc0ULyPF+vl3mRCsF1ndm2bVr67e1tkXPGGCGsLDlKMCHE/f09hkCe511eXo7Go/V6XavVNF0H0qPiSK8u2Y+jaL9Pq26apn8cXeTHVLy7R94vGj8Ecj74dUJI9fjboPqon9wHfiilSr3jzexHb1UY/+gNVSVThDFmaLppGKfHx41Gox7UiNgqXNHWu0wry5xSWpZFlke9Xq9/0PN9dz6fw1xos4miMM6yjHMZx3Gz2Tw9PY3j+Pr6Stf1i/OzIPDjOA58F4WoZRiEkHa73el0kiRq1dxGo0EYXa7KNIuLogBFcj6faqbhunaWJZtwBT69ptMo3mR5VJa5rjMuSl1nvu+GYShE6bq279eEQENueJ6nFA2oAyJ+u9EsyxJiBc/zDEODwQTwCeibZuMJDAthG0F3usosy+r1Okhb0CL88MMP6/X69PQ0Lyz4poEvJimZz+eWZaBN5ZxrpkHIdgkPBjloW2azWc7LrQu9xsqy5HIbeJQxfbevU0k9iqKSSwAn6HEcRzMMjRACyC0XinOJk/FwcFwIjh5V13XdNHCsO45T5sVisSBCSik9z9Moy7KMUJZlWavV8jwP86Fms1mWOfg0OGcJISAhQv+dJEkYrtMsJkRBDacbrCxLx/YsyypLAfQI12ezWU/TdDR66Pf76FaiaJNlie1aeZEWZSYJeaeikHvaPPpjZOvqKlfVhqb3udcfRCP5qHOrMuGPRu8O5Hx/pr/7AueKlFLf80fEUfcBZrOfqPcjEzfH8ZSQZSEIEZZu2Jbte55j2Y16Swo5ny+JkIwxx/ZM06REwwXturYQXEtou93EygF4eOMlwZU4z0shRK1Zm8/n0KQ/efLk5GiQpsnDw8PpydFms2o2641aLU0TsPLv729FTlar1SYKF4uFEMLzPKZpgM5BQFU7U0ac7ppmMEZqdQ8+vJZldLqtkueGodfrQbvV4Vyapm6atu8FRVFIs+k4DlOk3+8XRcHLMssypogeMEkUdPpJHNdqNThDj6fbZbpKKczicNmBsYCxPiZ7Sqmjo6PLqzcwaA2CoN1u2p776aefFkX26tUry9oaLidJjGbBdd3VbOuqzDnXiUKGzNJE13WhJCZD2m6Up2nachHZtu35jmkaZVnmeapptN/v27Ypyh28KYlSFNm+3W7PV0tQW7XdjVLqed48SZWQpqa7rlv3A0boerVq1htYCgScSQgB1CeOY6UEZBaKSJgjcs43m1jtRiZSCil5nudpxo+Ojjw3UErN50sEuev6pmkahgZyDzgPGlGw1apU/PhL33k9qT2zQ7mnJ9q/oNXeHELtaWc/ntTv56UPHqHSN+GLHccCheg/qf2tIrZ60h8Nsx99AdW/uq6XosAnVLXpQojb21vLsmpBUPcDx3E0uh0KB41GnuemaQjBhLQJYbPp/P7hDodFnpe6rteCOiFsPp9HUTKbzf70pz+ZpvlXf/WvPv/882izyrL0k08+MQ1ts1mBdbFabVf8LZfL5XRTlmWUxOBV27bNNA0Tv8V6ZVlWrVbbLj/NMkJIo1lHv+04FtOk73uNRhBFHmh3zVatKErOC00zdEPjgkZh6Fq2tUN316sVCDegpzRqddM0geADAe72DyGJwOZnOBTZtv369WssbPj+++8B8FiWNZ/PgyD46quv7u7uiqJYrVbpZMwYg3oY9bzjeXmelWUZx/FyudTY1qW3LEuLUThE5GWBTAjyJytLzJMQA4ZhGAw7DAspuWW5QRAQInWmGYbluq7BdIxJYQ8HcaOiW6ojHgrvj2PZiotRkuimddDrd9ptzXaw/oBSCm6NpmmrFbSgGSFktVoZpg7YJs/zohCQL2k6LcsiyxLOuSJC7NkimqZZrzVbrY5lWff3t6Zpnp+fg/h+d/0Af7rZbIZ0bQIWr/LeB6lp/ztV37VfuO5f3PvX/X4mrNo2pKvqV7YVKduOK8AoIoRIyX80m+E+lFJEDtkxCgR/xwncZ7Ttv7zq9VBK8zynioCJjbMt5lwJWfeDIAi63a5nO2VZlnmBzifw67oeoyINgsC2zSSJyrJUSt+y4DXD87yyFErRsiyHk9nTp08vLi5OTk5gvwXyVFnmURTd3ZVpHE8m4zzPYeFcUSsxSatgOtu26YZSSn3fN20Lf5RhGI3ANwxjE65KnmkaMQzNMLVa3WeMUaaUErpOLVsnhOg6sW29VA74N3e3t0EQVPF2dXWFckujjDEWJ8l8sciLoh7UFSHM0AVROS8Vo4Iow7YGx0eW60hKxEyto3ATR77vx1l6fDg4Pz9Hoo6izc3dLfDYVqcNnga86vI8x96IVs1HUk3TlGisMghkjIlSglEkd7Y3ZVlSaggh8lwqJT3Hct26bZu6ruV5qtFttlBU4SQFkTCKIs6547mWZUmiIO/gnM9mM40ynbIsy3rtTrvdXq9WScnBnmWMBUGA2nu3zaLcSrQMDZm/KAoiNSkl51wqUu78RXXD2mw2vJR0T91eOa+BhwBHvNF0QghZLBb4TMEl+JFNvep9+UKVx9ie8pB+dCM/dkPe2itK6Xs/2aKldL9sALxRvQz6PjIEEgww5eqVkL1JoNwZNLI9tu7+Lc9zyzAxscBRZJmmqRtYHhSGYbhaZ1lGFcGmWOygTrPYMLRefytTwN4VpEqN6egGcW1JKQ8PD1ut1u3tbRRFrm0qJVerlefaq9VyvSZ3NzeTydhxnMPDQzQJtm0pSgjBDpOMy7Isy3ojIBqBkGIdbhzHwqTLNs1ms0moKIrMD1yAPY5jWpYlpSKU27bjc1dKAtM+O/dbrdZ1koxGI2h2EBjQlUopDcuoNepCiIeHB875YhMppeB1DbMWMJIZY3d3d47jgJSH+rPZbN7d3ZmmuVgsIOFrNpuu615cXMyXC6UEpdSyLNu2MF7HS8LUIcuynJdZloVh6AU+AgaHI9/NnKWUGqOmaeIiIoRYlmUYuuCllNKyTEJIFEVMUV03cW0gfmzbBrM0ThMM2+bz+fX1tUZZs1a3TBO7D5bLZcpFxSVA9wiZhVKqLEs0GsCNQBAnAh6+GWVKKckYMQwDLw8jE8ZYWRZhGCpFXdetNwKpOBeFH7iWbfT73eVyKURpWb6UEhY2Otnr3HD8VHXmfrrbuyn60W0/sD6oJBnb92WqQrBafL/NYFVB+67Zw6Gye0AB26UdAKMq55vdC2B73HH5Y7todq+NUaoRwrIsL9JM13XP8VutNiEsjtMkSm3TdF3XsdADyDhPUBcZhmZbrlRcCGVbTpLGnHPBpdBFlmVRlOR5rjHj/Pz85ubm+vq63W7quj56uIvjSNd12zK63Xa3286SRNc1ZF2lxFTMCCEFL/Xd4vWCl2EYmqbZ7XZt28bmUwzWpJSM0UajLmUpZNnptCglJc8NUzs6HsRxbJmOZdlKqTTJGaO6rsWLzcHBQb1eL9LMcZwsTjCIOzo6wjkCSU4YhqPRKMuyVkeDDblSCqRwpRS0WoZhtFotCGHBWU/T9OjoCL5SUsp6PSiK4rvvvqvVfMKo41igXEdRiHIagcEYg+kLdPd5ntca9eoz0jQNkyV87kmcOo5j2UaSxGEYahr1PEdJURR5zQ9c18+yTJZC103GWDW+r1RskEowxsAp7Xd7zVo93GygQSnLkmk6ukHOueNaUso4jqXcajLozsZB7HbpaNVSRJ0ahg4DTugYqxCQUirJKaWe52V5hM2thBCsKpBSDgYD04YHQr5er/8/mJNilez1C2gAAAAASUVORK5CYII=\n", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "execution_count": 10 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "My5Z6p7pQ3UC" + }, + "source": [ + "### 支持新的数据集\n", + "\n", + "MMClassification 要求数据集必须将图像和标签放在同级目录下。有两种方式可以支持自定义数据集。\n", + "\n", + "最简单的方式就是将数据集转换成现有的数据集格式(比如 ImageNet)。另一种方式就是新建一个新的数据集类。细节可以查看 [文档](https://github.com/open-mmlab/mmclassification/blob/master/docs_zh-CN/tutorials/new_dataset.md).\n", + "\n", + "在这个教程中,为了方便学习,我们已经将 “猫狗分类数据集” 按照 ImageNet 的数据集格式进行了整理。" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P335gKt9Q5U-" + }, + "source": [ + "除了图片文件外,数据集还包括以下文件:\n", + "\n", + "1. 类别列表。每行代表一个类别。\n", + " ```\n", + " cats\n", + " dogs\n", + " ```\n", + "2. 训练/验证/测试标签。\n", + "每行包括一个文件名和其相对应的标签。\n", + " ```\n", + " ...\n", + " cats/cat.3769.jpg 0\n", + " cats/cat.882.jpg 0\n", + " ...\n", + " dogs/dog.3881.jpg 1\n", + " dogs/dog.3377.jpg 1\n", + " ...\n", + " ```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BafQ7ijBQ8N_" + }, + "source": [ + "## 使用 shell 命令进行模型训练和测试\n", + "\n", + "MMCls 同样提供了命令行工具,提供如下功能:\n", + "\n", + "1. 模型训练\n", + "2. 模型微调\n", + "3. 模型测试\n", + "4. 推理计算\n", + "\n", + "模型训练的过程与模型微调的过程一致,我们已经看到 Python API 的推理和模型微调过程。接下来我们将会看到如何使用命令行工具完成这些任务。更过细节可以参考 [文档](https://github.com/open-mmlab/mmclassification/blob/master/docs_zh-CN/getting_started.md)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Aj5cGMihURrZ" + }, + "source": [ + "### 模型微调\n", + "\n", + "通过命令行进行模型微调步骤如下:\n", + "\n", + "1. 准备自定义数据集\n", + "2. 在 py 脚本中修改配置文件\n", + "3. 使用命令行工具进行模型微调\n", + "\n", + "第 1 步与之前的介绍一致,我们将会介绍后面两个步骤的内容。" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wl-FNFP8O0dh" + }, + "source": [ + "#### 创建一个新的配置文件\n", + "\n", + "为了能够复用不同配置文件中常用的部分,我们支持多配置文件继承。比如模型微调 MobileNetV2 ,新的配置文件可以通过继承 `configs/_base_/models/mobilenet_v2_1x.py` 来创建模型的基本结构。\n", + "\n", + "根据以往的实践,我们通常把完整的配置拆分成四个部分:模型、数据集、优化器、运行设置。每个部分的配置单独保存到一个文件,并放在 `config/_base_` 的对应目录下。\n", + "\n", + "这样一来,在创建新的配置文件时,我们就可以选择继承若干个需要的配置文件,然后覆盖其中需要修改的部分内容。\n", + "\n", + "我们的新配置文件开头的继承部分为:\n", + "\n", + "```python\n", + "_base_ = [\n", + " '../_base_/models/mobilenet_v2_1x.py',\n", + " '../_base_/schedules/imagenet_bs256_epochstep.py',\n", + " '../_base_/default_runtime.py'\n", + "]\n", + "```\n", + "\n", + "这里,因为我们使用了一个新的数据集,所以没有继承任何数据集相关的配置。\n", + "\n", + "此外,也可以不使用这种继承的方式,而直接构建完整的配置文件,比如 `configs/mnist/lenet5.py`." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_UV3oBhLRG8B" + }, + "source": [ + "之后,我们只需要设定配置文件中我们希望修改的部分,其他部分的设置会自动从继承的配置文件中读取。" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "8QfM4qBeWIQh", + "outputId": "0e658dca-722e-4bed-dd0b-601731b00457" + }, + "source": [ + "%%writefile configs/mobilenet_v2/mobilenet_v2_1x_cats_dogs.py\n", + "_base_ = [\n", + " '../_base_/models/mobilenet_v2_1x.py',\n", + " '../_base_/schedules/imagenet_bs256_epochstep.py',\n", + " '../_base_/default_runtime.py'\n", + "]\n", + "\n", + "# ---- 模型配置 ----\n", + "# 这里使用 init_cfg 来加载预训练模型,通过这种方式,只有主干网络的权重会被加载。\n", + "# 另外还修改了分类头部的 num_classes 来匹配我们的数据集。\n", + "\n", + "model = dict(\n", + " backbone=dict(\n", + " init_cfg = dict(\n", + " type='Pretrained', \n", + " checkpoint='https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth', \n", + " prefix='backbone')\n", + " ),\n", + " head=dict(\n", + " num_classes=2,\n", + " topk = (1, )\n", + " ))\n", + "\n", + "# ---- 数据集配置 ----\n", + "# 我们已经将数据集重新组织为 ImageNet 格式\n", + "dataset_type = 'ImageNet'\n", + "img_norm_cfg = dict(\n", + " mean=[124.508, 116.050, 106.438],\n", + " std=[58.577, 57.310, 57.437],\n", + " to_rgb=True)\n", + "train_pipeline = [\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='RandomResizedCrop', size=224, backend='pillow'),\n", + " dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),\n", + " dict(type='Normalize', **img_norm_cfg),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='ToTensor', keys=['gt_label']),\n", + " dict(type='Collect', keys=['img', 'gt_label'])\n", + "]\n", + "test_pipeline = [\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='Resize', size=(256, -1), backend='pillow'),\n", + " dict(type='CenterCrop', crop_size=224),\n", + " dict(type='Normalize', **img_norm_cfg),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='Collect', keys=['img'])\n", + "]\n", + "data = dict(\n", + " # 设置每个 GPU 上的 batch size 和 workers 数, 根据你的硬件来修改这些选项。\n", + " samples_per_gpu=32,\n", + " workers_per_gpu=2,\n", + " # 指定训练集类型和路径\n", + " train=dict(\n", + " type=dataset_type,\n", + " data_prefix='data/cats_dogs_dataset/training_set/training_set',\n", + " classes='data/cats_dogs_dataset/classes.txt',\n", + " pipeline=train_pipeline),\n", + " # 指定验证集类型和路径\n", + " val=dict(\n", + " type=dataset_type,\n", + " data_prefix='data/cats_dogs_dataset/val_set/val_set',\n", + " ann_file='data/cats_dogs_dataset/val.txt',\n", + " classes='data/cats_dogs_dataset/classes.txt',\n", + " pipeline=test_pipeline),\n", + " # 指定测试集类型和路径\n", + " test=dict(\n", + " type=dataset_type,\n", + " data_prefix='data/cats_dogs_dataset/test_set/test_set',\n", + " ann_file='data/cats_dogs_dataset/test.txt',\n", + " classes='data/cats_dogs_dataset/classes.txt',\n", + " pipeline=test_pipeline))\n", + "\n", + "# 设置验证指标\n", + "evaluation = dict(metric='accuracy', metric_options={'topk': (1, )})\n", + "\n", + "# ---- 优化器设置 ----\n", + "# 通常在微调任务中,我们需要一个较小的学习率,训练轮次可以较短。\n", + "# 设置学习率\n", + "optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)\n", + "optimizer_config = dict(grad_clip=None)\n", + "# 设置学习率调度器\n", + "lr_config = dict(policy='step', step=1, gamma=0.1)\n", + "runner = dict(type='EpochBasedRunner', max_epochs=2)\n", + "\n", + "# ---- 运行设置 ----\n", + "# 每 10 个训练批次输出一次日志\n", + "log_config = dict(interval=10)" + ], + "execution_count": 11, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Writing configs/mobilenet_v2/mobilenet_v2_1x_cats_dogs.py\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "chLX7bL3RP2F" + }, + "source": [ + "#### 使用命令行进行模型微调\n", + "\n", + "我们使用 `tools/train.py` 进行模型微调:\n", + "\n", + "```\n", + "python tools/train.py ${CONFIG_FILE} [optional arguments]\n", + "```\n", + "\n", + "如果你希望指定训练过程中相关文件的保存位置,可以增加一个参数 `--work_dir ${YOUR_WORK_DIR}`.\n", + "\n", + "通过增加参数 `--seed ${SEED}`,设置随机种子以保证结果的可重复性,而参数 `--deterministic`则会启用 cudnn 的确定性选项,进一步保证可重复性,但可能降低些许效率。\n", + "\n", + "这里我们使用 `MobileNetV2` 和数据集 `CatsDogsDataset` 作为示例" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "gbFGR4SBRUYN", + "outputId": "66019f0f-2ded-4fae-9a5f-ece9729a7c2d" + }, + "source": [ + "!python tools/train.py \\\n", + " configs/mobilenet_v2/mobilenet_v2_1x_cats_dogs.py \\\n", + " --work-dir work_dirs/mobilenet_v2_1x_cats_dogs \\\n", + " --seed 0 \\\n", + " --deterministic" + ], + "execution_count": 12, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/usr/local/lib/python3.7/dist-packages/mmcv/cnn/bricks/transformer.py:28: UserWarning: Fail to import ``MultiScaleDeformableAttention`` from ``mmcv.ops.multi_scale_deform_attn``, You should install ``mmcv-full`` if you need this module. \n", + " warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from '\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/local/lib/python3.7/dist-packages/yaml/constructor.py:126: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\n", + " if not isinstance(key, collections.Hashable):\n", + "2021-10-21 02:53:42,465 - mmcls - INFO - Environment info:\n", + "------------------------------------------------------------\n", + "sys.platform: linux\n", + "Python: 3.7.12 (default, Sep 10 2021, 00:21:48) [GCC 7.5.0]\n", + "CUDA available: True\n", + "GPU 0: Tesla K80\n", + "CUDA_HOME: /usr/local/cuda\n", + "NVCC: Build cuda_11.1.TC455_06.29190527_0\n", + "GCC: gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0\n", + "PyTorch: 1.9.0+cu111\n", + "PyTorch compiling details: PyTorch built with:\n", + " - GCC 7.3\n", + " - C++ Version: 201402\n", + " - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n", + " - Intel(R) MKL-DNN v2.1.2 (Git Hash 98be7e8afa711dc9b66c8ff3504129cb82013cdb)\n", + " - OpenMP 201511 (a.k.a. OpenMP 4.5)\n", + " - NNPACK is enabled\n", + " - CPU capability usage: AVX2\n", + " - CUDA Runtime 11.1\n", + " - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n", + " - CuDNN 8.0.5\n", + " - Magma 2.5.2\n", + " - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.1, CUDNN_VERSION=8.0.5, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.9.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, \n", + "\n", + "TorchVision: 0.10.0+cu111\n", + "OpenCV: 4.1.2\n", + "MMCV: 1.3.15\n", + "MMCV Compiler: n/a\n", + "MMCV CUDA Compiler: n/a\n", + "MMClassification: 0.16.0+77a3834\n", + "------------------------------------------------------------\n", + "\n", + "2021-10-21 02:53:42,465 - mmcls - INFO - Distributed training: False\n", + "2021-10-21 02:53:43,086 - mmcls - INFO - Config:\n", + "model = dict(\n", + " type='ImageClassifier',\n", + " backbone=dict(\n", + " type='MobileNetV2',\n", + " widen_factor=1.0,\n", + " init_cfg=dict(\n", + " type='Pretrained',\n", + " checkpoint=\n", + " 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth',\n", + " prefix='backbone')),\n", + " neck=dict(type='GlobalAveragePooling'),\n", + " head=dict(\n", + " type='LinearClsHead',\n", + " num_classes=2,\n", + " in_channels=1280,\n", + " loss=dict(type='CrossEntropyLoss', loss_weight=1.0),\n", + " topk=(1, )))\n", + "optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)\n", + "optimizer_config = dict(grad_clip=None)\n", + "lr_config = dict(policy='step', gamma=0.1, step=1)\n", + "runner = dict(type='EpochBasedRunner', max_epochs=2)\n", + "checkpoint_config = dict(interval=1)\n", + "log_config = dict(interval=10, hooks=[dict(type='TextLoggerHook')])\n", + "dist_params = dict(backend='nccl')\n", + "log_level = 'INFO'\n", + "load_from = None\n", + "resume_from = None\n", + "workflow = [('train', 1)]\n", + "dataset_type = 'ImageNet'\n", + "img_norm_cfg = dict(\n", + " mean=[124.508, 116.05, 106.438], std=[58.577, 57.31, 57.437], to_rgb=True)\n", + "train_pipeline = [\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='RandomResizedCrop', size=224, backend='pillow'),\n", + " dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),\n", + " dict(\n", + " type='Normalize',\n", + " mean=[124.508, 116.05, 106.438],\n", + " std=[58.577, 57.31, 57.437],\n", + " to_rgb=True),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='ToTensor', keys=['gt_label']),\n", + " dict(type='Collect', keys=['img', 'gt_label'])\n", + "]\n", + "test_pipeline = [\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='Resize', size=(256, -1), backend='pillow'),\n", + " dict(type='CenterCrop', crop_size=224),\n", + " dict(\n", + " type='Normalize',\n", + " mean=[124.508, 116.05, 106.438],\n", + " std=[58.577, 57.31, 57.437],\n", + " to_rgb=True),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='Collect', keys=['img'])\n", + "]\n", + "data = dict(\n", + " samples_per_gpu=32,\n", + " workers_per_gpu=2,\n", + " train=dict(\n", + " type='ImageNet',\n", + " data_prefix='data/cats_dogs_dataset/training_set/training_set',\n", + " classes='data/cats_dogs_dataset/classes.txt',\n", + " pipeline=[\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='RandomResizedCrop', size=224, backend='pillow'),\n", + " dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),\n", + " dict(\n", + " type='Normalize',\n", + " mean=[124.508, 116.05, 106.438],\n", + " std=[58.577, 57.31, 57.437],\n", + " to_rgb=True),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='ToTensor', keys=['gt_label']),\n", + " dict(type='Collect', keys=['img', 'gt_label'])\n", + " ]),\n", + " val=dict(\n", + " type='ImageNet',\n", + " data_prefix='data/cats_dogs_dataset/val_set/val_set',\n", + " ann_file='data/cats_dogs_dataset/val.txt',\n", + " classes='data/cats_dogs_dataset/classes.txt',\n", + " pipeline=[\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='Resize', size=(256, -1), backend='pillow'),\n", + " dict(type='CenterCrop', crop_size=224),\n", + " dict(\n", + " type='Normalize',\n", + " mean=[124.508, 116.05, 106.438],\n", + " std=[58.577, 57.31, 57.437],\n", + " to_rgb=True),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='Collect', keys=['img'])\n", + " ]),\n", + " test=dict(\n", + " type='ImageNet',\n", + " data_prefix='data/cats_dogs_dataset/test_set/test_set',\n", + " ann_file='data/cats_dogs_dataset/test.txt',\n", + " classes='data/cats_dogs_dataset/classes.txt',\n", + " pipeline=[\n", + " dict(type='LoadImageFromFile'),\n", + " dict(type='Resize', size=(256, -1), backend='pillow'),\n", + " dict(type='CenterCrop', crop_size=224),\n", + " dict(\n", + " type='Normalize',\n", + " mean=[124.508, 116.05, 106.438],\n", + " std=[58.577, 57.31, 57.437],\n", + " to_rgb=True),\n", + " dict(type='ImageToTensor', keys=['img']),\n", + " dict(type='Collect', keys=['img'])\n", + " ]))\n", + "evaluation = dict(metric='accuracy', metric_options=dict(topk=(1, )))\n", + "work_dir = 'work_dirs/mobilenet_v2_1x_cats_dogs'\n", + "gpu_ids = range(0, 1)\n", + "\n", + "2021-10-21 02:53:43,086 - mmcls - INFO - Set random seed to 0, deterministic: True\n", + "2021-10-21 02:53:43,251 - mmcls - INFO - initialize MobileNetV2 with init_cfg {'type': 'Pretrained', 'checkpoint': 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth', 'prefix': 'backbone'}\n", + "2021-10-21 02:53:43,252 - mmcv - INFO - load backbone in model from: https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth\n", + "Use load_from_http loader\n", + "Downloading: \"https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth\" to /root/.cache/torch/hub/checkpoints/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth\n", + "100% 13.5M/13.5M [00:01<00:00, 9.62MB/s]\n", + "2021-10-21 02:53:46,164 - mmcls - INFO - initialize LinearClsHead with init_cfg {'type': 'Normal', 'layer': 'Linear', 'std': 0.01}\n", + "2021-10-21 02:54:01,365 - mmcls - INFO - Start running, host: root@3a8df14fab46, work_dir: /content/mmclassification/work_dirs/mobilenet_v2_1x_cats_dogs\n", + "2021-10-21 02:54:01,365 - mmcls - INFO - Hooks will be executed in the following order:\n", + "before_run:\n", + "(VERY_HIGH ) StepLrUpdaterHook \n", + "(NORMAL ) CheckpointHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_train_epoch:\n", + "(VERY_HIGH ) StepLrUpdaterHook \n", + "(LOW ) IterTimerHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_train_iter:\n", + "(VERY_HIGH ) StepLrUpdaterHook \n", + "(LOW ) IterTimerHook \n", + "(LOW ) EvalHook \n", + " -------------------- \n", + "after_train_iter:\n", + "(ABOVE_NORMAL) OptimizerHook \n", + "(NORMAL ) CheckpointHook \n", + "(LOW ) IterTimerHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "after_train_epoch:\n", + "(NORMAL ) CheckpointHook \n", + "(LOW ) EvalHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_val_epoch:\n", + "(LOW ) IterTimerHook \n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "before_val_iter:\n", + "(LOW ) IterTimerHook \n", + " -------------------- \n", + "after_val_iter:\n", + "(LOW ) IterTimerHook \n", + " -------------------- \n", + "after_val_epoch:\n", + "(VERY_LOW ) TextLoggerHook \n", + " -------------------- \n", + "2021-10-21 02:54:01,365 - mmcls - INFO - workflow: [('train', 1)], max: 2 epochs\n", + "2021-10-21 02:54:07,010 - mmcls - INFO - Epoch [1][10/201]\tlr: 5.000e-03, eta: 0:03:34, time: 0.548, data_time: 0.260, memory: 1709, loss: 0.3917\n", + "2021-10-21 02:54:09,888 - mmcls - INFO - Epoch [1][20/201]\tlr: 5.000e-03, eta: 0:02:39, time: 0.288, data_time: 0.021, memory: 1709, loss: 0.3508\n", + "2021-10-21 02:54:12,795 - mmcls - INFO - Epoch [1][30/201]\tlr: 5.000e-03, eta: 0:02:19, time: 0.291, data_time: 0.020, memory: 1709, loss: 0.3955\n", + "2021-10-21 02:54:15,744 - mmcls - INFO - Epoch [1][40/201]\tlr: 5.000e-03, eta: 0:02:08, time: 0.295, data_time: 0.019, memory: 1709, loss: 0.2485\n", + "2021-10-21 02:54:18,667 - mmcls - INFO - Epoch [1][50/201]\tlr: 5.000e-03, eta: 0:02:00, time: 0.292, data_time: 0.021, memory: 1709, loss: 0.4196\n", + "2021-10-21 02:54:21,590 - mmcls - INFO - Epoch [1][60/201]\tlr: 5.000e-03, eta: 0:01:54, time: 0.293, data_time: 0.022, memory: 1709, loss: 0.4994\n", + "2021-10-21 02:54:24,496 - mmcls - INFO - Epoch [1][70/201]\tlr: 5.000e-03, eta: 0:01:48, time: 0.291, data_time: 0.021, memory: 1709, loss: 0.4372\n", + "2021-10-21 02:54:27,400 - mmcls - INFO - Epoch [1][80/201]\tlr: 5.000e-03, eta: 0:01:44, time: 0.290, data_time: 0.020, memory: 1709, loss: 0.3179\n", + "2021-10-21 02:54:30,313 - mmcls - INFO - Epoch [1][90/201]\tlr: 5.000e-03, eta: 0:01:39, time: 0.292, data_time: 0.020, memory: 1709, loss: 0.3175\n", + "2021-10-21 02:54:33,208 - mmcls - INFO - Epoch [1][100/201]\tlr: 5.000e-03, eta: 0:01:35, time: 0.289, data_time: 0.020, memory: 1709, loss: 0.3412\n", + "2021-10-21 02:54:36,129 - mmcls - INFO - Epoch [1][110/201]\tlr: 5.000e-03, eta: 0:01:31, time: 0.292, data_time: 0.021, memory: 1709, loss: 0.2985\n", + "2021-10-21 02:54:39,067 - mmcls - INFO - Epoch [1][120/201]\tlr: 5.000e-03, eta: 0:01:28, time: 0.294, data_time: 0.021, memory: 1709, loss: 0.2778\n", + "2021-10-21 02:54:41,963 - mmcls - INFO - Epoch [1][130/201]\tlr: 5.000e-03, eta: 0:01:24, time: 0.289, data_time: 0.020, memory: 1709, loss: 0.2229\n", + "2021-10-21 02:54:44,861 - mmcls - INFO - Epoch [1][140/201]\tlr: 5.000e-03, eta: 0:01:21, time: 0.290, data_time: 0.021, memory: 1709, loss: 0.2318\n", + "2021-10-21 02:54:47,782 - mmcls - INFO - Epoch [1][150/201]\tlr: 5.000e-03, eta: 0:01:17, time: 0.293, data_time: 0.020, memory: 1709, loss: 0.2333\n", + "2021-10-21 02:54:50,682 - mmcls - INFO - Epoch [1][160/201]\tlr: 5.000e-03, eta: 0:01:14, time: 0.290, data_time: 0.020, memory: 1709, loss: 0.2783\n", + "2021-10-21 02:54:53,595 - mmcls - INFO - Epoch [1][170/201]\tlr: 5.000e-03, eta: 0:01:11, time: 0.291, data_time: 0.019, memory: 1709, loss: 0.2132\n", + "2021-10-21 02:54:56,499 - mmcls - INFO - Epoch [1][180/201]\tlr: 5.000e-03, eta: 0:01:07, time: 0.290, data_time: 0.021, memory: 1709, loss: 0.2096\n", + "2021-10-21 02:54:59,381 - mmcls - INFO - Epoch [1][190/201]\tlr: 5.000e-03, eta: 0:01:04, time: 0.288, data_time: 0.023, memory: 1709, loss: 0.1729\n", + "2021-10-21 02:55:02,270 - mmcls - INFO - Epoch [1][200/201]\tlr: 5.000e-03, eta: 0:01:01, time: 0.288, data_time: 0.020, memory: 1709, loss: 0.1969\n", + "2021-10-21 02:55:02,313 - mmcls - INFO - Saving checkpoint at 1 epochs\n", + "[ ] 0/1601, elapsed: 0s, ETA:[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n", + "[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n", + "[>>] 1601/1601, 171.8 task/s, elapsed: 9s, ETA: 0s2021-10-21 02:55:11,743 - mmcls - INFO - Epoch(val) [1][51]\taccuracy_top-1: 95.6277\n", + "2021-10-21 02:55:16,920 - mmcls - INFO - Epoch [2][10/201]\tlr: 5.000e-04, eta: 0:00:59, time: 0.501, data_time: 0.237, memory: 1709, loss: 0.1764\n", + "2021-10-21 02:55:19,776 - mmcls - INFO - Epoch [2][20/201]\tlr: 5.000e-04, eta: 0:00:56, time: 0.286, data_time: 0.021, memory: 1709, loss: 0.1514\n", + "2021-10-21 02:55:22,637 - mmcls - INFO - Epoch [2][30/201]\tlr: 5.000e-04, eta: 0:00:52, time: 0.286, data_time: 0.019, memory: 1709, loss: 0.1395\n", + "2021-10-21 02:55:25,497 - mmcls - INFO - Epoch [2][40/201]\tlr: 5.000e-04, eta: 0:00:49, time: 0.286, data_time: 0.020, memory: 1709, loss: 0.1508\n", + "2021-10-21 02:55:28,338 - mmcls - INFO - Epoch [2][50/201]\tlr: 5.000e-04, eta: 0:00:46, time: 0.284, data_time: 0.018, memory: 1709, loss: 0.1771\n", + "2021-10-21 02:55:31,214 - mmcls - INFO - Epoch [2][60/201]\tlr: 5.000e-04, eta: 0:00:43, time: 0.287, data_time: 0.019, memory: 1709, loss: 0.1438\n", + "2021-10-21 02:55:34,075 - mmcls - INFO - Epoch [2][70/201]\tlr: 5.000e-04, eta: 0:00:40, time: 0.286, data_time: 0.020, memory: 1709, loss: 0.1321\n", + "2021-10-21 02:55:36,921 - mmcls - INFO - Epoch [2][80/201]\tlr: 5.000e-04, eta: 0:00:36, time: 0.285, data_time: 0.023, memory: 1709, loss: 0.1629\n", + "2021-10-21 02:55:39,770 - mmcls - INFO - Epoch [2][90/201]\tlr: 5.000e-04, eta: 0:00:33, time: 0.285, data_time: 0.018, memory: 1709, loss: 0.1574\n", + "2021-10-21 02:55:42,606 - mmcls - INFO - Epoch [2][100/201]\tlr: 5.000e-04, eta: 0:00:30, time: 0.284, data_time: 0.019, memory: 1709, loss: 0.1220\n", + "2021-10-21 02:55:45,430 - mmcls - INFO - Epoch [2][110/201]\tlr: 5.000e-04, eta: 0:00:27, time: 0.282, data_time: 0.021, memory: 1709, loss: 0.2550\n", + "2021-10-21 02:55:48,280 - mmcls - INFO - Epoch [2][120/201]\tlr: 5.000e-04, eta: 0:00:24, time: 0.285, data_time: 0.021, memory: 1709, loss: 0.1528\n", + "2021-10-21 02:55:51,131 - mmcls - INFO - Epoch [2][130/201]\tlr: 5.000e-04, eta: 0:00:21, time: 0.285, data_time: 0.020, memory: 1709, loss: 0.1223\n", + "2021-10-21 02:55:53,983 - mmcls - INFO - Epoch [2][140/201]\tlr: 5.000e-04, eta: 0:00:18, time: 0.285, data_time: 0.019, memory: 1709, loss: 0.1734\n", + "2021-10-21 02:55:56,823 - mmcls - INFO - Epoch [2][150/201]\tlr: 5.000e-04, eta: 0:00:15, time: 0.284, data_time: 0.022, memory: 1709, loss: 0.1527\n", + "2021-10-21 02:55:59,645 - mmcls - INFO - Epoch [2][160/201]\tlr: 5.000e-04, eta: 0:00:12, time: 0.283, data_time: 0.021, memory: 1709, loss: 0.1910\n", + "2021-10-21 02:56:02,514 - mmcls - INFO - Epoch [2][170/201]\tlr: 5.000e-04, eta: 0:00:09, time: 0.287, data_time: 0.019, memory: 1709, loss: 0.1922\n", + "2021-10-21 02:56:05,375 - mmcls - INFO - Epoch [2][180/201]\tlr: 5.000e-04, eta: 0:00:06, time: 0.286, data_time: 0.018, memory: 1709, loss: 0.1760\n", + "2021-10-21 02:56:08,241 - mmcls - INFO - Epoch [2][190/201]\tlr: 5.000e-04, eta: 0:00:03, time: 0.287, data_time: 0.019, memory: 1709, loss: 0.1739\n", + "2021-10-21 02:56:11,081 - mmcls - INFO - Epoch [2][200/201]\tlr: 5.000e-04, eta: 0:00:00, time: 0.282, data_time: 0.019, memory: 1709, loss: 0.1654\n", + "2021-10-21 02:56:11,125 - mmcls - INFO - Saving checkpoint at 2 epochs\n", + "[>>] 1601/1601, 170.9 task/s, elapsed: 9s, ETA: 0s2021-10-21 02:56:20,592 - mmcls - INFO - Epoch(val) [2][51]\taccuracy_top-1: 97.5016\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "m_ZSkwB5Rflb" + }, + "source": [ + "### 测试模型\n", + "\n", + "使用 `tools/test.py` 对模型进行测试:\n", + "\n", + "```\n", + "python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]\n", + "```\n", + "\n", + "这里有一些可选参数可以进行配置:\n", + "\n", + "- `--metrics`: 评价指标。可以在数据集类中找到所有可用的选择,一般对单标签分类任务,我们都可以使用 \"accuracy\" 进行评价。\n", + "- `--metric-options`: 传递给评价指标的自定义参数。比如指定了 \"topk=1\",那么就会计算 \"top-1 accuracy\"。\n", + "\n", + "更多细节请参看 `tools/test.py` 的帮助文档。\n", + "\n", + "这里使用我们微调好的 `MobileNetV2` 模型进行测试" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Zd4EM00QRtyc", + "outputId": "e0be9ba6-47f5-45d9-cca2-d2c5a38b1407" + }, + "source": [ + "!python tools/test.py configs/mobilenet_v2/mobilenet_v2_1x_cats_dogs.py work_dirs/mobilenet_v2_1x_cats_dogs/latest.pth --metrics accuracy --metric-options topk=1" + ], + "execution_count": 13, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/usr/local/lib/python3.7/dist-packages/mmcv/cnn/bricks/transformer.py:28: UserWarning: Fail to import ``MultiScaleDeformableAttention`` from ``mmcv.ops.multi_scale_deform_attn``, You should install ``mmcv-full`` if you need this module. \n", + " warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from '\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/local/lib/python3.7/dist-packages/yaml/constructor.py:126: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\n", + " if not isinstance(key, collections.Hashable):\n", + "Use load_from_local loader\n", + "[>>] 2023/2023, 169.7 task/s, elapsed: 12s, ETA: 0s\n", + "accuracy : 97.38\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IwThQkjaRwF7" + }, + "source": [ + "### 推理计算\n", + "\n", + "有时我们会希望保存模型在数据集上的推理结果,可以使用如下命令:\n", + "\n", + "```shell\n", + "python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}]\n", + "```\n", + "\n", + "参数:\n", + "\n", + "- `--out`: 输出结果的文件名。如果不指定,计算结果不会被保存。支持的格式包括json, pkl 和 yml\n", + "- `--out-items`: 哪些推理结果需要被保存,可以从 \"class_scores\", \"pred_score\", \"pred_label\" 和 \"pred_class\" 中选择若干个,或者使用 \"all\" 来保存所有推理结果。\n", + "\n", + "这些项的具体含义:\n", + "- `class_scores`: 各个样本在每个类上的分类得分。\n", + "- `pred_score`: 各个样本在预测类上的分类得分。\n", + "- `pred_label`: 各个样本预测类的标签。标签文本将会从模型权重文件中读取,如果模型权重文件中没有标签文本,则会使用 ImageNet 的标签文本。\n", + "- `pred_class`: 各个样本预测类的 id,为一组整数。\n", + "- `all`: 保存以上所有项。\n", + "- `none`: 不保存以上任何项。因为输出文件除了推理结果,还会保存评价指标,如果你只希望保存总体评价指标,可以设置不保存任何项,可以大幅减小输出文件大小。" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "6GVKloPHR0Fn", + "outputId": "1efde0e4-97cd-4e62-ce98-1cbc79da3a6c" + }, + "source": [ + "!python tools/test.py configs/mobilenet_v2/mobilenet_v2_1x_cats_dogs.py work_dirs/mobilenet_v2_1x_cats_dogs/latest.pth --out results.json --out-items all" + ], + "execution_count": 14, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/usr/local/lib/python3.7/dist-packages/mmcv/cnn/bricks/transformer.py:28: UserWarning: Fail to import ``MultiScaleDeformableAttention`` from ``mmcv.ops.multi_scale_deform_attn``, You should install ``mmcv-full`` if you need this module. \n", + " warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from '\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/usr/local/lib/python3.7/dist-packages/yaml/constructor.py:126: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\n", + " if not isinstance(key, collections.Hashable):\n", + "Use load_from_local loader\n", + "[>>] 2023/2023, 170.3 task/s, elapsed: 12s, ETA: 0s\n", + "dumping results to results.json\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "G0NJI1s6e3FD" + }, + "source": [ + "导出的json 文件中保存了所有样本的推理结果、分类结果和分类得分" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 370 + }, + "id": "HJdJeLUafFhX", + "outputId": "486c0652-2124-419a-ec7d-fd3583baedb1" + }, + "source": [ + "import json\n", + "\n", + "with open(\"./results.json\", 'r') as f:\n", + " results = json.load(f)\n", + "\n", + "# 展示第一张图片的结果信息\n", + "print('class_scores:', results['class_scores'][0])\n", + "print('pred_class:', results['pred_class'][0])\n", + "print('pred_label:', results['pred_label'][0])\n", + "print('pred_score:', results['pred_score'][0])\n", + "Image.open('data/cats_dogs_dataset/training_set/training_set/cats/cat.1.jpg')" + ], + "execution_count": 15, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "class_scores: [1.0, 5.184615757547473e-13]\n", + "pred_class: cats\n", + "pred_label: 0\n", + "pred_score: 1.0\n" + ] + }, + { + "output_type": "execute_result", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAASwAAAEYCAIAAABp9FyZAAEAAElEQVR4nJT96ZMkWXIfCKrqe88uv+KOvCuzju7qrmo00A00QZAcoZAznPmwIkP+p/NhZWVnZTAHSBALAmig+u6ursrKMzIyDj/teofqflB3S89qckTWpCTKM8Lc3ezZ0+unP1XF737+KQAgGEQDQCAoIgD04MGDul7f3Nx0fTsalbPZJCa/XC5dOQ4hGGMAoGka55y1tu/7LMu89865zWbz+PHj+Xy+2WxGo1G3rokIAJgZEa3N9L3C4Jwzxngfu64TkbIsq6pqu40xJqXU922MkYiMMSKS56Uh61zubGmtM5QjGkTMDZRl2fd9CKGoSkTx3gthSgkRBCGllFIKnAAAEbGXPM/1q7vOM3OM0ZBLKcWYUkrM/PHH33n58mWe5yklnlW3t7e5M6fHJ7PRaLOY3769zAxNsvz06PDJwwe5ofnV1eNHD2+uru7du3O7aeu6Nha97/7n//l/fvHimQ8dIi4Wi/V6vVk34/H0+voa0Xzy8XdfvXrVtt39+/c//vjjt2/fPn32TZ7nDBKZY4x37997+/btn/zpj9++fftX//E//vCHP1yv1yWS98FaK2i+//3vv3nztutDluWcQMjEwNfXN/cfPHrz5o0IVlVlQvfpp5+mlH72s5+NZ9MY48nZ6Xy5XK1Wn37/e3fu3Hnx6uVvf/vbPoQ7d+6cn5/317ezg8lqteq6phqVV1eXm83q0aNHTdOklFKSlASBrM2yLHM2ny9eiAgB5nleFBWhjZG7kMAUybibTXdxs1yHaMspOtf7aAiMMdZaRERhRHTOOUNd13FKIfQxRokJUay1xpibxTzGKCLWWiJCkizL8jy/vb1JKT169Kiu133fHx8f397etm07nU67rqubdex9nueTycQY5JjeXF4cTmfHx8fG4Pz2drVaTSaTO3furNb1YrHw3mdZllKq6xoRx+NxWZbM7JyLMW42G+dcVVUppc1mlee5916/6/T0tGmaZ8+e3bt3DxHLsgSAvu+dcyIyn8/J8PHxIVFs13Mi/z/+9/9qOrWzcX7n7lEKTQi9QLKICAAg8K2j7/uUEiKKSAih67rEIYRQ94uiKEII0+n06urq5OQkhAAAMUa9iJQSAFRVZa1dr9ciwszGGN36RDal5L1HIGYGABHRaxARXe5vXwrAVoaBmVl2B4CIiE/RGOO9DyGQNYMQhhCICA3q240xiGj0f4jW2rqui6Lquq4sS9/HoiiOjo5fvXo1mUx+/OMf379//2/+5m8ePnz4y9fPfvSjH/3t3/y1M/aPP//8F/+0qKpqeXuTCSBi0zTZeDSbzbquy/P86uqqmB5WVeUyU9dS1/XV1VVMvqqqs7Oz5XK5XC6rajydTlerze3tbUppPB4bY4qi0KUmIu9749xqtdKlOz8/J6I/+7M/CyE450Lb6foj4dOnT7OsCCGEEIu8stYSwsHBASBba5m5qoo337yq6/WdO3em0/Hd+3frun79+uXZ2Vni8PTpVwcH0zx3JydHTdN88+zrsspzgL7vjTHT6bQo87atQ+ibpgkhWGuzLBPBGFIIqWkakM7lGcfEzMzsvUdIPiYfOJ9UQGSMIWsNCxElkARCAqr4AACFRcR7j8LMzCnF6FNKFinLbFEUWZYdnZ40TdN1XQghhOBD1/e9bjYVifV6HUIYjUZElBeubVuBlGWZI6OLEH3o+x4AyrIUkfl8EUI4Pj4moouLCxa01qrCreu6russy0RkPB7Xda17O8syIkopxRiZue97773esu7YsixVjJ1zRDRsY2stS4wxWgPWWmt1JwMiJh9i4pQSS6Rhl8v7h/ceEVVy9r9YHwYijkYjABiNRnoPw14XEbWBjx8/zvNcf6NXP0ia3ttwV2ru9CnuiyUi6tbUW8L3D/3GuDtCivsHMydhERARRgAAIkLEBNLH4FNcbtYhJUHMigKtuV0uNm2zbuou+Murq+9//rnNsuvb2zzPz8/PP//88+l0aoxxzumNq6LRVTo6Ouq67ujoaL1e3717dzwep5SyLBuNRrPZjJlXq9VqtXLO6T7I81y1qd71crns+17dAedc13UoYoy5vLw8OTl5/vz5D37wg/Ozs6PDwzzPmVkfZmbp5cuXKSUU0MtAxLars8xJ4uOTQ/VN7t69+0//9E+67Z49e6ZKJ8Z4cHDw4ZMnP//5z5tNrdvx0f0Hq/miqde31zd92xhCjoEADdJ6ueqaNoVokMo8K/LMEkYfurZGREEQER9DCMHHwMyM0HVt27ad72OMMaXOt13X6T3GGL33IXh1Urz3XddlWWYt7R7rdqsw87Nnz54/f/7ixYu3b99u6lVKyVqbZS7LsizLdFPpNiADRJQ4OOemo/F0Oi2KAoD188fjMQC0bRtDyLJMDd1yuby+vo4xZllmjCGi8Xh8dnZ2584dAPDe13UdY9RtrIrDWqsbTLdTCAERp9Op6ik9Qfe8un4JpO9DCMHaLM9LvVRh8N7HGEUQwZCezcxpdwwibq0djUZlWaoa0C/QbeScCyGMx2MRybJMnQoR6ft+vV5fXV0h4ocffnh6ejoajVSRqIx1Xafr+L6PalWwU0qDJRzEbBBLRCQilVh98U4g3700QoiI+pCstcZYEBSGFDlF7r0XgN57Y20fvMszm7nRZFxUZe/94dFRluf/5e//7uPvfPKTP/9n63pzfHz8/Pnzf/2v//WDBw9+9atf6dqpsiyKIqWkznnTNKenp9PpVNVn0zSbzeby8lK90BDCb37zG2vt2dlZ3/d1XRdFoU+rbdurq6vlcpmEp9Pp0dGRcw4AiqLo2857//vf/q5erfURTMbjsiy7rh3WIYTgMiPCSGJAdspbPnz85OT0qOub2XTcdvXzF98kDin0PnQI7H2XGTo7OyGU2/n1Yn5jCI4OZ02z6ft+uVyq8VksFl3XuMxYR71v62a92azUThZFUZalc86nkFIMKaaUYkqIaDNXlCUQCoIxJitcVhY2z4xzLs8QkZm93+p0IrKWdD/oHTnnNMbRM+/cuXN0dKSO1aCRdds4ZxCxKAqXGR+6EEJd11VejMtK/UlriQCNMS6zR7ODvu/bth6NRqPRqGmaul7nuauqSpcxxjgej+/fv3/37t3pdHp9fb1er9u2TSk553RzWmuLotDtp0ZPhVDV7mCo9F7UzAij97Hvg6AGVsAhMbP3UZIYpNwVpHel4qcirnKoFl9XRKVFhTDP89VqhYjz+Xwymcznc0TUB6OGmJnrum6aRgVYTZBzTkVCpUtvYJCbQa50lQch3JdD3h3f0hr6sIczhRCAEI2xmTHGGAN7QktEYMgVeQKZHh4kEJtnPsWsKNQe5lXJCJfXV6t644ocDLVtu1gs1Dm/vr6u63qz2YiIOkjOOdU7RHRwcPDJJ58Q0ZMnTz755JOyLL/88stvvvkGAL773e/OZjO9AFVn6l8YY5yzIty2zXg8un//3tnZ6cHBjAhR+PDo4JunX2eZ+9//978cFcXLZ8/KLJtOp23b6uecHh2v18uUUgpxvV7H5HNrEofet5PJ+N6dO9Px+M2b13/ywz/65puvr64u//zPf3JyeHB5eVGV+Xqz/Onf/5c//qPPLSGBfP797/3iZ19Yg9aStSTAXdfU9TrGOBqNzs/PsyyLMa5Wq+Vy7n3nMluWeVnmiCiEYBAIhRhIyFrnXDmqqqoqRuVkNj04OJhOp7OD6eHxgbWGCNWl6vs+hF631mJ5u9lsvPcAbK3VHQIAR0dHBwcHk+moKDPdIQCit8/MbdsmDjHGuq5VIZZlqVYhRh99UIMxLisAcM6URWGtTSl436FAmRcff/zxeDxWpVMUxWQySSldXl7e3Nzoe3UXqQEnIjUY+vtB8FRpMrPamGG3ExGA+rEphMBJhJETxMgxcIoCQIho33dHh+BQ+r4vikz3SkoJgMkYRHRZpjAMEZ2cnCyXyyzLxuNxjHEw6HVdLxYL9bJ0rVXwjDEpSQhBRPKsGELBwdskIpb0LXdUzwkhgKAxKaWESAxRBEXEUhRJzFFEEggyi0hiFo0JrUE0REQWt2qCpe07RCRryqpq2lZEYpLFallVo7quRaQoy//rr/7qiy++QKLb29vvfOc7f//3f99s6ocPH8auXc9vl32neuqDDx719abv+7OzMyL66KOPlq2/d+/exZtXb968bttWndLxePxv/s2/+cd//Mem3pycnIzHY2a4vVmUZbl14Jv65OREg73cZcvlEgBIoCzL4L3v+8s3bzil9WpV2swgoUDXdbPZbLFYdFknkDarFRG5zPjer7vm9cXLsiwn01Gql865er3Jsmw+nxPR2fGJc261Wi2Xy6urq+l4TACb1SqFUGU5JHCZTSm1bcucEocYXVHMRqNqs9m0bde2YG2mqJgxxpmMUiINNwQCJ0keAGKSNvVNkN5zx+LFJiRBHFRvSomjD4GMMYaAmSUxAItYA5g46Mbb1Ku2bdu2Vf1rDDkqHRm2FCO1bZ1lGSQGxEk1MiDq76UUuqbt+56IMmvAmKbZHB4cENF8fpNCrKoKcthsNhoX6Is8zxFxtVpdXl4y83Q6HZS7ao0YI2AkA9aRCIfYAwBLBOTRuGyaJnFIHIwxxiKSLas8Nd6iYW77nr2DIfISZiGQJAn4nQnat0tqZwerqLpqeL96I0+ePDk+Pp5Op6PR6IMPPhAR3U+j0aiqqsVicXt7KyJnZ2dFUXjv+77Xc4hoELwhStRYVj/hW+7oPlTzh2Hh8JOR9YUAIKL33qeYkjADoDHkrMmczV2eXd/ekDVN137w5HHne5u5pmur8ajre2MtGVONR19+9fvJbDo7PCiKQt3OqqpGo9Gf//mfP3z4UL0mZn78+PHx8XGM8fT09MWLFymlm5ubzWYzn8/VCy2KYrFYfPHFF8fHx/pPdXjOz8+n0ykR5WUxnk4Wi0XbtqvV6sWLF8wceq+e6t2z83q1PpzOXnzz7PToeDVfKKwXY4g+1HVdliVymk6nHzx+GPo+BC+QMkvPn399c3PVdc3sYHzx5uXJ6WGemf/8N/+xblZHx7PQN5NR8dGTR7/77a+Cbwn59avnTx4/9H3TtQ0Bhr7zfZtnVlK6vb5ZLm4NQe6yIssIxfdtW29C7xHEWLSOjLVElERSSiHGEEJdr9fr9XK5nK/m8/n8dn59e3t7u1wMDpuxuIsPO0U1jNmhgH2j/vxqtQKALMum0+l0Os3zTLXDpl4horWkf82yDFGcM9bavm36tvFdrxGawW1smTnHzE2z8V2v77KOBNLbt2/X67VzTjezWjPd5JPJJMuyrus05NZ4SiN5NbaDX9Z1nbqsek7btgMU5JxzRW7IKXZojHMuR0ZDzhpjAFFkcNNokMB9+6PH4DFaa7uuUxX44MEDNd/OuTt37uhbFJO01up1VFX1ve99786dOxrOqgevTrmKlt6DSvtgAP/wUIR6ODRm0AMM6X/qNuDO+UyAaipZUASZgRlEsKiqdV0XVeVj/Og7n9g8Ozo9sXk2OZjdLhfj2TQri6vbm1W9+dN/9hO05uTk5NWrV9/5znfOz88vLi5++MMfnpyc6FV57w8PD4uiUP/wt7/97cuXL//xH//x9va2KIrZbKaPqmma29vb3/72t4vFYjabVVUFAOPxWCMTl5lqVLRd3fXNerO8ur5EEmMRgcsia5vN4cF0ubh1lgyBcJzP5xqBiPBmtT44nOaFOz06/MlP/jQvXAy9c/b09CT6vl4v1svbm5urosgeP35EBASSO/v0q99H3x/OpkXmzk+OFzfX7Wb9wYP7yffL25sQeiIQSEQwHo+so029WixuQ+jJQF44a01KofdtjB4GvBoSAACKSNI9swvIt3uJmVmiCLddHZMnAxrpEMHOSw9bwCb2KqXOuTzPVZdZa8qyqKoqz3PryFrbdY2i9+r3xRjbtl0ul7rXU0pkMLPbXAincHBwsFotrt9eEdF4UnH0vu0Kl63Xa0Q8OTm5d+/eeDweHDcAUIezaRqVKI3xUkoqhAPerkZF10FxpqZpFNExxhhnsywja3ST6+5VxzAz1lpryWzFTz9XDaC6ixqeWWvH43Ge53pxAKB46Xw+1/iwqqrXr1+/ePFCjaR+AgAcHx//5je/2Ww2RVE45/RD6rper9d93zdNo8pG10ghx6Io9BtVC2RZpiC1Yll6q3onapb1nxpDF0VRVVVZVU3TZFmWlZVzzthMAej5fEHGhpgOj45Diud375zdOe98/3/8X//ng0cPL6/edn0vAOWoCilOZtO79++9fP1qNBk/evzBZ599Nh6Pf/Ob30wmkxcvXnzzzTfq1Olz+sUvfqH48KtXr87Pz3//+99/9NFHZVl++umnZ2dnuhSImOf5V199dXx8/Mtf/hIATk5OVqvVJ598olFQ3/ez2Wy9XnddN5lM1DXS9wLAcrlURHS5XFprD2cH6+UKRRWT2azWfdt99fWX8+ubTz75KMssp74s86LIvv76933fssS2q1+9fhGTn87GL14+G0+q3rdX15fXN2/bzVpiiH3XNzUJHx/M2ratmzUixhgXiwURHR0dKDaY0hb922LxBgZrYMlYS5mxItJ3zaZeT6fTssrzLBuX1fHJ0Z07Z4eHh7l1VVXps3PO5blTxA6AQwjWUVFuN7e1xhgS4aoo+rZt64Zjij6AJGcssBAgsFRF3tZN9CF3WdvU41FlLLEkH3oOERGJIEXfNM3Fxaury7cicnJ6NCrK29vb+Xyu9uPo6EizoLe3t8+ePVPtUJbl8+fPnXP3799X4+acSykVRQbAKQVEEUld1zTNxvuu79s8dyH08/lNCH1KIUY/m02Yeb1eA7CGuCIphP7wcDYZV+PxuMwLYwwNDqe8n6AbPEZFhBQUUo9xOp2GEN68eXNxcaEJrsvLS5UuPV/zM+pfPX369ObmRmGMIRuxb131ngeveECi1B/eoRdOA9+iKPq+L4qirmt9Wl3X9n2fhPvgF8vbLMu64K21LitijHXdANJ0dti2PaLp+/CTn/zk5ORkvV5XVaUZBSIajUbX19eHh4eLxaJpGiIqiuJv//Zv792790d/9EfMfHNzs1gsUkp/+Zd/qeqQiMqyfP36dYzx+Ph4sVhsNpu+71erVQjh6dOnimDleX5wcFCWZdd1mqW4uLjQ/G9d18457/2ACetT2OpXQGCRxJIYWHTbWTIaKRFRkeW5y1KKIXiO6eunv0+hn89v8txNJ2VV5p9+7zsHh9OUYowhxpBSJAACkJRSCNH7FAJw4hii70PfpeCB08F07AhD16bgncFxWYyrYlTmKAklISfgCJwgReSEkjgGjoFDhMQAkFlbuKxwWb1ZJR+MMWWRFVnujEUBBN45WSmloNspy22e58agKmUVv8E7G2DzEPvEYReA6OfwLkoRpG1g0tVN6HqJiZlFEiE658oy36zWVVWdnh1PR+Msy0ajMsts1zUhhNlsNh6P5/P5L3/5y+l0ul6vDw4OFK3RdMAQWCkmR0RqG3Qn617VZ0dEapk0mdQ0jXPGWHSZzXNnLaWUhKOztm3btt40m3W9WtKQlhjQkSESVbOjO7Isy0G01Ka9ePHi9evXCl71fT9cxAByKtz6/PlzTdnroQs9JItUzDTqHdB/VbT6NFTX6mld16niVJtZluVoNGq6thyVWZbNZjMRKUYVACmQBYAhxjwv79+/H0JC6y6vb44ODo8PjzimB/furxbL3GUoIMxnp6dd007Hk+jDqxcv7925+4uf/fzDx0/UNKl38fjx46dPn+olaf7m+vo6hPD48WO1FXme39zcvHnzRm9EobajoyOVHMXfXr9+rVn7V69eGWM630YOAIpJJJFEBJk1qm45eP1PYkBOFoEAUMQ5d3BwMJvNcpdZa0ej6sWL519//dXHHz55ffHq9cWrH/7x5yAxc+QyQwYA2RAYi2SAJYa+TaFHSQRCIChMIJk1ZZ4hCiICSgh9Xa839SqEYC2Nx1VVVdYRAHvfNW29Xq+Xy7mkFL3vu8b3vXAkAINkkKIPwGxAJHHsve86jh5YmCNzHJQsIA/wIzOL8ADux+RZorUWkGPyQ8CieQqVQGYW4P19q/Kw3cJbdQm5y4iorHIDeH19/ebNa9/3usOIqGmau3fv/sM//IPqSn2UxpjDw8MQgtJllOalSXl1d9VfVauufqb6hppn0qxy3/dZZgUZgF1mjaXEIQTPHB0hGQRg4bgVwn2kZIgJY4xd130LUGmaRhHh29vbm5sbvRoF69WHUYd20O6avdAIFXYZc13NQc4HRYCIm81G8564lz9UKUVEBWY1Lq3rWgHihw8fZpl7/Pjx4fGxc246nW7qtu/7LM+Losrz8vT0vKjGs+nhcrH+zW9+o67vvXv3UkqvXr2azWZ1XSuGmef5d7/7Xc2YP3r06OnTp4vFQnXe27dvz87OTk9P27a11qqKQcTb29uDg4O7d++KSFVVRVG8fPny/Px8Pp+rlFZVNZ1O9QllWdb3/cXFRdd1mr1Q/bKfZbFksiwDAEgcYxRmYU4xgojR3AVHg5LlNi+cdZQ5MxqXhHJ9/db7DlJcr5fX129fv37ZdU1RZFlmM2s092AMIqcYve/bGHqWSMAEbFAyS6M829RrlpRl1lrbNM3bt29vb68VnFQIepcqiokjS0IBSZxCTMGLjxxTip6jz52xRBxiW9eL+c1yfts1LSRWy7/NMO0g0BB67733vUqmYjPqPaEkSIwsJGCRLAIkTqFXNS8pShxOYORkLWWZzTNLiIr6pBBFJMstsrRtu1ot6s1Gcwm6/k3TXF9f931/fn6uN6jpQWVfXlxcPHr0SDfAkydPUgptW282q75vEUUZkESwg+gTEThnjEHm2PctOWKOPnTMUdWLD13bbVyGuTWZM84Z2reBg/iptCgSpYGpopcas+nmUxF9/fr17e2tAkqDN6Uepoh47zUS1bSP7NKsKpYauw/uqMZ+aiSVQaIGTe07Ik6n077vJ5PJcrmsqmo+n6uLeHx8vKo3eVmWZbnZbE5OT0XE+1CWozzP27btfDw+Pn748IOTk7OLV68OZzMCCH1fuOzy9cX3v/tpmeVvXr3+3ne+29XNw3v3/+xHP754+eqjx0/+z7/839+8eXP37t2yLH/5y1/e3t5+97vfnc/neZ5r0uz09FRd8fPzc9XB0+l0uVxqQL9arbqu22w29+7dIyIlH45Go4uLCyLSuPpb6m/AyQqXGWMMbQEni2QAM2N1GUMIoetVB6nWK8vy5Pj4//u3//nxk0enx4f/7//X/7Oqiq5rUgocg+4PADYoZMAQeN8l33PwMfQx9MF3vm+7trZkJDEKVGV+eDCdjCoQburNzfXVYn7bbNYpemuwKsrpeDKbTDNrCmty6xwSsEBMEhPHxDFJYkicQuA+BO+TD7zTXMbgAMkwb32iAZBPvHWLBiWle8aYrW3w3ouoalJ2VFRASFFQ986HikqpA2SLFGME4MODg5OTk7IsU4p93y2Xy/F4/Pd///d/8id/ogp3vV4fHh6ORiNjjKbEx+OxhkWz2Uz5cX3fa/6jKArl0zRNo3jMZrNZr9dqS7quMxYBJKXA4q2DIndE0Pdt7zsfuph64R13VB//gMfssgUppcSSiIAIALd025TSeDy21k4mk/V6rRen1qksy4ODA2WNrlarGCNmOHgIuOPEqEwOmY99yVe4YrFYNM1GBVt3YUrddFKuVpuiKEKIjx49IrLe+zos1019fX395s2bpmnmy8VHWVZV1XK1NsYgmtvFyl1cHB+fnp+fA8DbN78rsnyzWndNm1kXen/3/M6ds/Pr6+u2ae6cn//i5z8/P7t7dnL6i5/93JL52c9+dufOnclkotv90aNHX//ut6EzKlH3799/8fTr29vbs5PjLMvatu2bPqX0+9///uOPP95sNnnuXr16dXh4qNB23/cnJ7OLi4vRaKJ2nnbMISAgwMFByLIMAaJzFqlwmbr6lkxZZoMHgbx1zrqmjpyOjo7unt9Rnsfx8eHRwfTNmzdNu2XbEpElEiKDRFlGAsYYQjTGAAsKhN4H8LPZtG3brm8Oq8N79+50XfPm8kKNkjHGbqM1gyTqP1siY50lI1HUxAEZFOPbLoJDkCLLghj2sWNIwYNRdU8iggCIAkAxBjIIrNe5TVYrQKraX3dgSluUVcVywCQHDHbA22XHQ1a7YkBZ/q1qAQBAhMzYLMvmu/S6eqF1XaszCQBv375FxPPz89VqxcwPHjy4vb2dzWa6jXU/a3ilnBtVqcqXUt2R5zlzsg5ALKRABGWZIzuQFPoOLSBHRKFB++qLAZgZMgHqDQ8KW/Njmp0fjUaKT04mk4GHpXmL6XSqkOY2Zef9AMmoXA3wxi7Pg+rgTafT8Xisfp0ybJQ+PgikgoeHh4f379+fTCaj0ej169dEdHFxoV7TYrHQ98YYwRAiXl/fImII4YPHTxDx+vq667r1eq3a9+uvvy7L8vz8/He/+50CZUpMf/v27dHR0fPnzxeLhTHm6OjIe399fX16eppS0rsry1KBzaurq4ODA72A8Xj8q1/96t69ez/84Q+rqmLmZ8+e6bPR3aO+9MHBQYxR3csQ+hi9ujQx+tC3gKzhk+71FH3wXYg9wZb2oKsxLqsss+o7LJfLzz///uXlxYsXz773ve9tF00iICMJS4zRh9CrXySQhKNIMghZZrPMWgLgqMrxnXkB9W4kz12e51nunDOIol7cer1u6ybGaACJiABRwAA65wgQZWsVt+moFHkXCirYrlDC4IKJvMtwqBnIc6ffrn/VN+6CoyGdtkUitgQpgF3A6bfMctmGPGqylsvl7e0NJFbc3lo7n88//vjjL7/8Urfuo0ePbm5u6rpWL0xdQt2lr1+/HtIn+gjirsJDYYI8z/W1ZjJGo1Fdr40xZZkLQggeCVxmicAYdNZkWVbm7r+ZIRygywEgUeHUr8mybLPZxBjX67X3frVaDUkVBQkHVTF4XCrVGsUOf8rzXNOGKjZE9PTp04uLi9VqS9VVudUiKb2qt2/f6jemlBaLxQBsKmxTluXNzY3q0YH+qr7N9c1tlmVv31w+/+aZJVNkue/7tmn+y9/+re/6zWqdQnz14uX/8G//+8l4/PLFiz/70z99+eLFwcGBItQi8uzZs7/5m79R1RO3Kem6LMumaV69ejUajbqu+/DDD8/Ozl6+fDmfzx8/frxcLu/cuaPMJg2PVe8qPNu2rZq+fXhsx2/eHvpdmsno+36z2SinVF0yDd5ijFVVdV33/Pnz8/Pzo6OjL7/8crPZqE4cXKZ3JlREKwbatt1ua9waYV1YY4z3/vLy8uXLl8vlUlXk4M4MWhsAmvXGt12MUdKOSAhgCcejkTNWVU+7qX3bcYiay+77XsOcATMfLNiA1WvegraS/c5BUIB0gFDxD+jEg2cxwA0q8FdXV87aw8PDqqo0A6EKHXcpQe/9YrH4i7/4C2vts2fPLi4u7t27Z629vb1VRsrt7a3m2BSe2HeMrbUDCUF/PyiL9WYJwM455qgR1iBuxqB15JwhMVkQamPsUoiYxAlbjuSpgOqgooK8BDboRRabdr5ummYlEvq+nc0mGpuqsW7bNkWxJntz8fbe3YfrVdPUPYKtu5SVU3JV6yWKZbStT00f123XxXS7Wv/5v/wX1XSSEPoUPafFKgiVqzoKlU0PJhv1EUfT4/HsKAh5Rsoyz/Ly8uLt7Q1YO6tm/+Zf/ZsMs9RHi1lh8na9aTZ1ledXby6OpiNH8cGd48cPzn/9i39Y3l7c//BhE/2ybT2Qq6bgRid3PnpzU883sfZ47/GHXz57Fk367/6Hf/XXf/9Xk9MiXne0kcrOTk4fZrOztbjrjt3ouKyOTmYn9fUybVqHPJ1k8/XbYmp/9et/StyenR39zd/8ddNsfvSjH725eHtyfJZn5aiajMfjEPqiyMbjIsR6dlCaLhYJ8wjY+LhusAuZYIamcnmZ5dPR2DnXtm1iNtb23i831ybj6awwNq3W14vltaR+XGXs20mZZYSxaTAEJ2KZnYgVxMjiI4SkJREi0ocwnk5NniWEgOKB6xRaSdFS4l4gWEshhOVyzQmLfMTJ5NnYmlyYmJXDxGQYKdE485br1NXcBytSuGig4fD87cVNvWlBAoInis5GlwUwI8s5+ALDNMdRBlYCpd5RwuQJIgGjJOaYUlBvxSdfjArKsIstYDKGUvIxdCCJJBpIBsRwgNClrvb1qmtDDGLAVcUkyyqJEtoUfTo5OkXG2MfC5IXJmk2zXqySj8eZbS4v4vzmBx9+8IMPP7h5+c2r3//mbDbC2D7/6jeWosFAGA4Pqq5dFjm2c5hmp7P8rJ1zO+fj0b2HZx9O8+OSJqnB1CB6wy1QsOjN+ro+KU6y4Py8cR6PyknOGNdNJuBAJETugwS2A2VMHVy1IUR0e3ur8WiMsSzLoihU38TYISLAFkce3Nc8zxG2/rEa4sGNfN/f2Lq1CqLc3Ny8evXq/v37P/jBD7744ouvv/7amhIARqNR33eIqKFmCOHg4ECkSUkyl2/LF3xKKV1dXX322We///3v9bSu6+7du/f02YuBF/bJJ588f/ZSWazffPPNarUqyxKA6roGgNlsVlWFiKzX6yyzbduen59+9PHj29vru3fvapS7XC6b6GcnR6enp/Pb67quHz9+3Ly90uWKMcQUWZBRdB1ub2/v3bv3u9/97ptvvlkul8aYuq5VTW6x0B1BbyC+D5CVrs8+WqZRB+8IGQMdAnaFY/qZqvXVYvCW7gsiYmkbTQwxhT4LDcsHaFofunNOJO7jc8O71BPZ2aitdTLGgGEAQAEEQjSIhGCI7EcffXSzap1P0+MiUN4LMJksL5//7p9gR8dXbE+vX9NO+5tEz9EsubFb+5ZSMgadtYPjoC7sYELRGBCWrX8RgBkAGMUYs80qEjKIOmtE1PZdSNFm7ujoqKjK1Wq1rmtF4Bnh5OSERbZMtOBDiqFfbWpnjMkLhwgCse+7ELo8dyGEmDoyGSKyBBZBsogGjLHGQbJExCAAQGQSgzAjRBHZAjODd7vLsbxb/WF/6J/UNVVGGO6x24hIQzje5ihdlmVN02h+dvAThvM15z6dTp8/f350dHR0dNT3fZ7nIEbLL66uLvXzFa/fppUAlElDZBEiIq1WN2/fvh3SNVojOx6Pl8vl4eHR119//fjx46Zpnj9/PhqN3r5969lnWUZkEXpni5RSXdd3794PIfR9cXt7O5tNHj38YLmc3717d7Vaee99DHXoJ0cHZVm+DWE+nz84Poq5HYprYgwCQM4YY8hkSjguiuKnP/3pv/t3/+7ly5dd16l2U19F10oXITdGwzCidxUtvKtiGVZVAa2UEjrUDbTrS+B1M/GuUlbPHNBsZ+ywWYf119M0IlA3VU/Q/Bj8QR2Zao0BAJe9hPCe2LBBQdziS5vN5vXry8v5WrLCk1s2XUI7Gk8fnZ0MW04dRfW9YVfXpqpBP1m5oAMwkbYLYlRiCWSLJsp+ols0fbiVSRHljTNICGn7+YJtUMl3LDiZHliXL5bruFiGEIzLsqK8Xa271h8cHgPAYrUUkbIcgbHzNzd1tymKLK8sInahafyq67p8lPt1E2IkAUMUJQBLgozQEJIxVtiBoRQ5cBIN01iAhVOyWhk5RCOqQTUStdaqa6vPVTl1k0kJu+rgfb3lvdfiWY0lFIQYlPc+8DNsCAV8r6+vLy8v27Z9+vTpyclJvfFaLFtV1XK5HI8rxRWbpjHGGrPVx0WRgVDf+9PT01//+tdKBEPEk5OTb7755t6DR+v1ej6fHx4efvHFF/P54t69e7//4vc//vGPw3rjW49opqMxkX3+/OXyZvnR4yeTquybenk7f2Xo8vVFmZUWLYqs1+vJbCrONE1jcqsG/M2bN2OirUEgIiEAJgJj3HrTKOnx7Ozspz/96TfffFNVlYhoQkLdBNxh0SIylBUMcRHs2hToybQrXdumUolUCDWEVuMwRE2D/Mgu9k4hiAiIbEHHnVfSd53G/EPFatu2XddV5RgRQQB3ZWXMggASE4hovg6UzSOJkRModiPILGCMSSAIQOf3HyRy2Xi16tO6iz1DBIMkX3311QBAyC4OFJGDg4Mh4Bx0BO5VvRljtP59wEJpKAZIMmgHRAEQFXNEERJNhrR9TwAMeZ7nQEaQ0JDN8rprZ7OZyTOtUytHo7wsg/Cde3cjp01Ti8h8tayqajYeReG8IsaeEfMiF+G2W3ddl1KoxidoBCSSY2MpQWJIUQxGQyTWIBExQxd870NIHJPWuKaU0rtSpm8dbdsOyk+XRsml+td9G4i4LTVC2C7K27dvNbi31qb4zhHaX2U1fUqUcc5tdvnTtm1VkouiePPmzWQy0hJYpdID0Gq5Xq/X1mYx8nq9/vjJ9xSeKctSAIeLF5G/+Iu/+Puf/gMzf/rpp0+fPj06Onr58uVkWoTQp+Q9hXv3Hty9e/err756+fIlACCJiFxdXf3DP/zD5z/4bL1et02vd0E2C6AsmfL27eXr1+nju3eVTumIDCBuZVJ+9KMf/fKXv1Sq0Gefffazn/3sRz/60d/93d9pC5NBcoYQQP5rbEG1ErqxhjTGcAoIg4BwEhAQJgRD6KzZuSrvaboYw/DI9qtljCHlsgFIWZZa5qdyKvLeT70MVcGD+hiEB40dPh9xdy8sX331e6E8pbRY3C7qPtkMbVbX4Xg82jv/3aG0Kg15UkoxBACQZIuigF1hgKokDRcRty1qmFlSGKIqJ1vpFQOAuNOTGAMzAHJCTgmBMsfMEaT1fRY8WDM7PDR51jQNgwjCJx8+eXnxet3UzJyYBaBp26vr60nlUkpJApADkQSeMZDDBN5kAMaQBTRinGGJfWwFMhRGJGROzD6ktvNtn+bLlSTPKcQY7a7MXoabBAC1PIOVG57/sGm0bcT+7jHGcAIi0mhQ/as8z5vYyc6hVX2mLzabzWazSSlpNkKTE4N3VBRFSkYLYfWXTdOMx1Nrs5RSSn0IwZpMKX8ff/zxixcvrLU+pM1m8yd/8ie//fKrzWajqY6UkoLOeVn87ne/m84ejcdT7/311c3HH1ff/e53U0qvXr84PDxMKRVF1nXd119//cHjRylJ13WT0WHTbKjIEkEm+Xg8Xt5c13Wd5zkipJSM0VgupQQMcOfs5Ory6Je//GVVVZNRObc0KvOzkyNmRpTQt2qXfBdUnwuqchIkQRAkAREkAWQWiUkTzRYQkIQAkohyjwaO0YAhx105GO6BmcLvMTEGVFOLPwa+oTo+RVFs6yH2NYIwAIS4rY9BJERAEBBmThYdAFi0iIigKDomgOhDORtVRG5hyfSuyMjl0IeUwhC1yh5Tcthp72ucXTG3JGOMEsS998H7PM+1TGlwH7YheuqNUdwR0zb1bZAImYksEIYUBSjLyz6GNsTj05OmaTZNff/hg2lmF6vlKM8Ojo+avgshtG1LxoxGozzPlbJiTMbMiUNM26xbnmvTsFYh28RBIBlrkE0IwWAQjiZYSwKEzBAj+5CQQTiyMCSm4c6H4qDBBRpeD5pvoHoO1n9YO+34gIiHh4dKUsM9LFt2xfuDy6QtrtRgdl23XC5ns1kIQX3R4+NjRPzoo49SSrPZTJdAO7jpJlND/eDBg7dv3ypTnJm7rqvr+uTk5Pj4+OTk5H/9X//XP/7jP14ul7/5zW/+/b//903TPH78uO9DURTnZ3estX3fzmaz4+NDIlByCRE5Z+q6/s2vf3t0dORcruvgnFssFpt69fHHH3/22Wd5kZ2eHk8mEzVTuyAnppT++q//ejabnZ+ff/LJJ1999ZX3/vnz5//8n//zDz74QNnnGrnxjoe97yAM/KH9/g6Kd6f0LoGm7cZSSpp93SviBtiVfeKOG2h2x951bg/nXFVV+uBUF9PgY79fUzYo3OHRDxec4raHDxFtjakIS5zOxkSEzFlmtVhWhK0j/aLhdoYb3F8Hs3foJoGdgz1ETMPGG96orrX3XYxRgJFgl0vcfhEaAIAueCGsJmNN4drMuTyr20aJX8WoOjg+MsY8e/ZMCZsiorlxETk9PdXLAyEtjSciLXHue2+MtdZ1ra83rTAacpwASZJsOdjO5WQzQStC5ArryiyvsmL0Tvb22Rv6gIfFGvaHBpDfEr/BEurJVVUpJU1XbT/a2cfWtBJPmT6qj6uqurq6stZqn6imac7OzpqmUS7bkJ3Teo7VaqUdNKy1T58+VR9PE18vXrw4OTnJ8/zTTz/96U9/enp6ent7+/Lly//wH/4DIq4Wq67pZtPpdDKZ394+++br9XJ1MJ2hAMcUfKfX+ptf//r0+HRcjYno9PT09PR0tVp0XffDH/7gz//8J7PZTDvKqBYkA7Rl8XNK6eLiou/7L7/88u7du2dnZ1dXV7/+9a8fPnw4Go1UDQ3aTUEX2KGgeuw/jkF5DSoMOeXWZIaQEwdvQHJrHKGBLXnSgOjrLZfSoP5HKIQCkkCScOza2hBMJ6OD2STPLKfg+zb4ThPuKECA+lp/WnpX2KEMG2ds7jKOkRXy3WX4tK9E37Rv31y8fv16tVr4rms2q3qzijH0bRN9r5WNVZGPyqIq8qrIo++j71PwWpYhKep/cddeSaEs9bC0h1W/O/b3qsYIzPE9cw5pvz8akozGpXaXefbsaZbZqirqep1SuHPnbDodf/PN13W9zjJbljkC+75dLee+bw8PptbkzhbW5JwwJRE2nMj3MQYgskS272PbemYxxgKgMQYFNOfpnHMuI7KAzlBGrnC2zPKRVX9mXzzMrtRotVqNx2Mt7jg+Pv7oo4+ePn2q4ZbITvkZY4zDHYxJRH3fi4AGeOPxWBibpjG7ekWtu48xLpdLEdFWUSrDNzc3GigulvPb21vNvE8mE2PMYrE4Pj72PhJR13XWZsfHx1mW/frXvy6cVVWq1ZnW2ouLC6CrO3fuNE2j4UTTNIvFIqT45MkTjun2ZlEUb/6n/+l/+l/+l//l6dOnxuJqvsrzvKzy1Wo1Go20edazb1589zvfe/P8ddNubt6+Oj09Lcry6dOnn3/63R//+E/++q//+l/86Iez2axvlr0PKXljzXK9zh12bWMIq7LYrFciQgiXby4OZtOubTJnASDFkBTJjASipQNGy8ri1usz3nsRVl2uh4hyuaXve+U5DICKcgn1KWgdwNZMpRT3ukjum46Ukmp6RT4066Nu4uD+hBCHOFOZDztojfWTVDWDVk4Y48gxgomCDLPJgQdadp4AkIQEwaAlODg7GxzjfUs7pLv0RoawBRGLokACZlbZ8z7kRdF1nbW77MsuctZABgBiZMRIWu+fOMY4nY1jYEA+mIyrqoLEHH2RWeB8vV4agylxjD50bbNeFc5uNhv2/aiqjHDXNrlz4/FYgp9fLZ1zWW4NmeQ1IpDM5FmVYzQppdKOwDGxjR1DpOX81jqDPhUWDdq27b//vc/HVebbNqaOY0B6v8fMoJX1cZ6enmrZnppjbb+3c+Jxf/kQseu6ELbdINOOLN/3PcC2y5PZFoamAePBHe2Ad2VQzrnRwUHT1tqv7dmzZ1o66JwLIXgfjo9PM5cb4xaLFaF1zhWF0dimKAqXFQDQtq2PbK3Nsvz29vbTTz/drJv/8l/+y2c/+DzGeH529vsvv7x7544z9uH9By9evDg8PDCIoe+vr68//PCj5XKJAqfHZ4v5/L/7V/+qX7c8F9c1Mcj19dt//Md/JE5N0zx58sFms7m8eTut8sm0jLFHIwcHBxzfsfz2j9vbW6VfDjHMgBVraPeH79rT5O8dg084OBf7zuq+J4mIKcRviZ++yF0GAByTkLHOWWsiYOAgIPp0RYR2SIs2z3r31He/BO39wxIALFKwmTGGgQBwuZyvN+uurb2PLARkARmAm2az20Lv/czzbTcjZhZR/U5E1Ie4vw78fo5kf/du/4RGTURktjsKCwFGH5gZEgSQThgAfNdx6KzRloPCMabQ+Q5TSpJ8ZtECE8eMgDJrrbHAEvrC5tZYh4aQxFhGZo4ShQyEtheR3NgYY7tqnHOjvBofFQAMHKvMlWU5LsfT0WwyLuf+GgAEDHN8J4S8l4RVRTiZTHTrpF1nYg0bREQJuMO7hueiSlQ16DaGTDDkKrTtpHL2dDvKLv8xCOHp6en1DTRNE6NXOyYimizRMOb4+DiENJ8vu64zxmo0qBWPdbMlxT969Oji4uLJkw8/++yzGKP6t1p+VdhiOjm4urr6T//pP//Zn/345ub6+vr67PxE/V7FWqzNmHkxX11f3T558mTT1G3bUm6stW8uX//TF6m+vvl//Ot/nXOKm+UWXiFBRAEevKZ9SQOAq6urIevDu654zAx/IIT7Dv8AaA1SJxw5SQIWEU5JRPSfnKIwIwhoty4RBCAUNIql6ecP2hY1uZxS8J4RxRgTow/BE2X7KmAQ6W9FicPWz2zOW27A1jkiskAm9pF2smQAkwinlCjwnr7YnU9q7lJKnJJoxk+2tObth7MwM/KWPmr3QlzFZId1I+N0WzFLSgyAJICEMUYEABYW6aMXkRgjpoQYSEBEiAP7GJERsSRASalv+uQJ0REZgdRFn5ITR0lrhQ0RJhGfJCbvKO99MERllvcM63ppUirK8vryjXOuKnN0Lvp0efF2M58jp5gCSmCOKUUre3Ed77IRiKgFjog4VDG+efNmUD/b9i3bnbEtjLDO7ZoFy1A9mOK2eGxnJLc0drNr9pjeb1cBAN772Wz2/Pk35+fnV1eX2iKp7/s8L+q6fvjgUdsu7t27d311G0I4mEzU7RQRbZwxnU5PTk7UT/vTn/zZf/7P/znG+Omnn15cvhGR5XJ9enq6Xq+/+OKL73zn45OTE+0sjoiPPnjw4vkrAJzNRqvlpiiKv/u7f/jxDz8PoTcGq9HI5FlTr+fz+fLq8vnz53/83Y+rR49Wt5dt2wqIIbPZbCq7SwCqAdmlBdbrJjOEYCQGFMmc1bWFbUF9Gnb5nrliFRnmbTKDmWVPYvchCt7rjcB7CcPBr5FdRmT4Pe11lYYdFlAUTvEXAEAUIv18VkxH3qv8Vli1ZGaJCREtGTKGyCSk6WzsAVph6byEGCNHjhykIDNc8L54D1TPwX9O24p1YWZAdbi22MyWijCkWHeOq4gAGr1XSQIgJAIGDRICATIgCAeJzBwNoiVsfEAkQ6SNok3iLMuwqqxq0r5HJJvnRi/ABxOFiAwmZ8GREySK3Idkk1datqVEhtkRJ9+vF2KjBVtYVxWlA1ovlldN3debyXRkichAkmhlh3QNClhFsaqq29tbhfiJqKqqi4uLg4OD21st5YJhB6gKxL3ksnqwupTaUk6ldyiY0EUcdsCAqqeULi8vQwhaJzHgE0P6/vr6On2cmqb5sz/7Z79Mv765uf3kk0++/vrr6+trtYez2SzG+PLly3v37n3xxc8Ojg5DCKenp/rkZrMZFDCfz++cnR3OJv/H//aX53fOPvv0e8+ePd3Um+ODw8PZbL3acIiT0cjZvN3UP/3pT0eT8cnJiVjazOsQwmw0Oj8///1XX96ZjWd5posQUyT3bhm/pdcQceDjD+yF4a+yo5IN0jJsTXmfljR4lcPD0m9Je/zB4YTtY43v3LnhJwAoOc1pjLBzN/KiUCuq56g7qo2ctsqXRa8SEYUFQGKMqG6gAANLEC8xCIyzDAA0sDOgrSgQgEXe9ZseLph2hD7YFbKKOroANq+stYACAGYXIOsWSgj7cK6+TmAFBCAKCidmYENEgAJMIAiJOUpMBtgQGTJgyRkgAuNIEjvk3EBmTWXHSpQ3iBmBIUAkJudRjAHnqCxNnjsA8Bl7TyGEvNhG6ZVz03K6Xq8Xi+uDR/dFUgo+9H1wOTprwYgtQ8eQYUbWorHqB6oIfUsVad8hremoqkpLrXY8xndjJBDf8dZ3rd23n5lSgoEastNVKg/69oFBMnhcl5eXxydHbds+fPjw7du3h4eHfd8rUmJ2pZMppcPDw4ODg6Zp5/M5ACgpHtAolKqklrIsf/WrXznn8ry8vr7+5JNPnj17Bv0W0z88vHNx8WqxuL1z5+QnP/nJX/3VX3Vd8+d//pMv/ukXFxeX9+8/vLmeHx4eC3ej0Wher7VqhAhTCr7rzo6PF4tFw+FoOqqqar6ouy5ZS5hwX2x2QY4ob+adbOyA/uFM2mOBKYI6iOX+MUSS+4L6LYu37wkP5++bQdkVpuj66++13qLetPsfPlwS7eo/9w+VJWSBrQuscs6SZLFYLNfr9bpuQgxAyIAIZCzBUE84FJGqMZOBCQKACkGJiPY6ARQV5gFD7vte5B11ZDiECZCRHKQEAMwCCdACsBAkbeQBJAaBCCxBUY220puMehkZgTNorTMpOmBEtCha5GWdHbltC2wlkJCBlHS8Sqc9rELoVVPM55ZS+/bqrYiMq2rscimZkdknDly3tbGoUNe2JTbtVfQOUO/Z2VlZltrNQWuC6rq228r6fRSHiCiEwCnsdNK29CmEEGHbutvspk0MmeU8z4cpNgNRq+887AiQfd9rq3OteAohFEWxWq2I6NWrVwo6f/nll7PZbDKZaAcATbIx0OvXr4+PTwRB/dKUUlmWIQRfx8PD2e3tbQj9/Qf3NpvVN9988/3PPj05PWqb/uDg4OGjB5eXV1ptmFK6f+8ug6zfrNvYlVVpDfV9v1ksvvvggXMOeu1Mp2wpZGbkd1zZfediu18RtQWQuvqKaorsLyZ+a2N9S9gMwZ7carsVARAEJNWGwsJRREAIwRhjB29lPyZEVLsESrhDBJGkcfhwPYOd+dY10C73KyKOTJJ3aXdjjDXOAc3XdfTBex9iCkBREMiISSgWAREJcPdFAsJsrdsqd5ZtgoS22c7hfvn9lRzUFsi7SqiIQEgGELYXGEVEkmTGkggIWyDrjDO0raatCjXjAA4AaIf0SkyFNTnliJhCDCEQ2sJmZNg5Kgp0jo0JiJg5wMxgNTZW21Jti2AlZvb+yepiISJVUU4mk+lkQgJtEsZY5hVLTCFEZDssK+3R+UQkz/OPP/5Yy7TVvTw4OLi4uChLx8wAabBvtBuKMFg55959GoBkWab9KbSoQs/vum40Gk0mE96lJTWNIZXVFoBv374tiuLm5kr7fFZVdX19qz2wqmr8u9/9jhNog0O1kMvl0mXF0dFRjPHi8mo8Hj979uzj73wiIutVfXFx8bOf/ezw8NCM3eXlZe9bY1AgqgH/j//xP5ZlaUz86quvjo5ODg4OLi4uHz54slwuq9JkRU5E4/EYne3axhlzenr6zTff3PvjH47H465rfEjOubwq1vVKb0Rxf7VmqlO08GxoM6OnDUOs9j3YP3RQh9/DLnje/82eS/Jt1rXswuxv+aIiot3r0l57Ic25OVsOH7j/Ff+tQ3YJdxQYTKs1bjSCJsa8D530feSUYmIBlC7G/c8c9JROaOFdUy/ctfTcpvKBY4wkoDxH2v11q+n2yhHFCIAB0HZViKyOGGSZwSQgkBvKHVmDBCiSIhILE24vHgWIyJHpu84ao01Bfds10lhriyzvwwIBQEgYtEPX1psz0LXBOVdVZUqp3iyD7yfj8tGjab3eSExd067sKicLzLnLXWV737ZtijFa4h5iFDHIbCRaSAwMSSZFUS9WVVFMy9F6XberJrYhQ+ebNoQQ+sgpGSJrjTOOiGxeKo5NgBjZCFQuL4zrYtJ844MHD6y1V1dXcdcFeLVaaS3z1dXV8fHx69evi6KgSblpm/rVsyKzjrBw1K1uoa8f3H/ULRZN2374/c+7yE+/eTk7OBTnkPnm5mYymdy9ezcGdsZm1pUum1YjOpb55VWe5xnRw/O70IV2vuq7W0tQzAprsGmS75OzJFDkxcGL118FeUtFNjsfz9u38+7F5Hy26dvYbLRCP8VYWMddl3p/UFTduq4mJSFaaxPKstmgpcwCIhNEFEZAAnZGDIL2ESOIvvMpxtwhEUTfZNbsQBwBiAhoDGbWisRdCzbRdg+KHLJx+4KmLxCBCDVGQARrDcC2GALJ6LnMPETvCNArpxS3BEvZ/gPEcoqRWQNCIrSMLkHsO6/uInOPws4Zm5El8cEzsikwyzJjsWmXb19dLxaLruusy9u2596v3t6mKE+efGTYvKHDIs9VuhAxt5aZ+75f1w0RIRnjUBR3QQIylFKGWFWTvu/X66UBmY7GRNuuakhiHSWyMfrESUASrhMPlcfIiAKYQFJiC2gFYwhtH4iZhBHFWnLOCVHfbNsjiUid0ng8rpt6E4JW7UwORjHGNjYA0nRtH/zQhFtRfe/9eDxmz7fzt8aYyWRSVrOmae5Wo6uu71LKDCDyslutNss+Bj0HTVqsVnZQZoNl0ye8XC6JrHY3bNtewRJEbLtaHU6zK7cfyBzGuK1Fhb1OaizGmPl8/oMf/EBd0GfPng2emJIwlTiiI7smh0dlnqXgtcslAgAnrTGv26btUxJWVpcirjqobOjemVJyzo3H4w8++GC1Wi2XKy0eTykxWwCwWY6IeVYaY0JiFyIYUldA/VVt02StjTH0TQtBABEzq/xnIlK0feuw7aRh61wJ7E1b2AHoIog6tBQHt21A2AdD9IcWZvjTkOfAve4m8H4stO/9Dif/V79C3s8lfuvYdZt+l4IzFomydlUfHEzzPNvUi7bbiKQYIcQ+chyPxyL8zdMvv3r6+67r7tw9v//o4ZuLy4PD8b37d8ajg5ub29cXl9533zx7WTz5M82Xpr3iD94l9IYrl11kKwmGdpiyQ01T2kISsHM6hze+t4AAoD63sO/7wmrOI6a+hRgdaeqLNfWl66yyrdMptNbxWx0hQPodB80Nj1Ur67Ub/T4B0Hs/Pbq/Wq0SyGiigyizyJULYbVa9FtbDnZ42O9DxsjM2h9aZFe1hViWZdstjTGGnOwBANbapmkQeZf94303Q9u/P3v27Ec/+tEXX3xxeHg4FEbUda39aYhIh54Cc5ZZJuzbOnhvAKwxArhcr4EskVzd3BblpCgrtK7btKcPzxTI1baLIfbW2rt3z3VMD/OWf2etJQLN5xhjjMvQGPQRjGWGvu+bts+ybLNZ31xd51VWlqXWEkIAmzmLxhhjEcHaRJR2vd9FRDNy260g/xXiJe4Ym/sCM/iHw7J/SzZ0a8oOsBlcL+97+IOIEfcqgPSj9uKL9wR7P7T7rx5DvICImnwi2gLUyuTUcorRaGQMth0eHJy+vHj98uXzlNInn3xwfHzo8izG+IP/8b8TxvW6RrJFaY+OZtZm9+6efHH17ZpV3UL76zMIp4gQGM1VKLPXbJt0JGMMIO/QnS3KJdtSJlDhZBESYAEBDSMtEYEgx5RCT9Y4Mpu2Tikpdq3fq/kw7YKxTx4EAGutvCPEvbuFIcdjjKmqSvt3aCBwc33ddZ11LoVoEI0ydV0PMDUWLZkin1nciwNhANNYk6eiXM0sM8ysA4ZCnCBiirIbcGGKwunAakTULwZB3BFqxVgFBl+8ePGDH/xAu6NqCTwRDYMju67TvvHL5XwyGjuDHFPf92WWj0ajqhwtFqvxZOayeHl1nRfd9ODYutzaTmlK67VhjsrzFkiz2eznP/+5cy4lSSkaQ3meAWDTNJ4zS2AASIQFEzOHVDddiFdFmanqcdksd5khAkmKyLldffpWGFAAeAdOsQgCAgnIH+TlcC9sG3wN2iHGgxb/Q6nYV/D6pX9owYb34q4rwrcO3BUZ/qEc/rcOZ60h7YUHacsyT8aYMs/apu66xloajSfWbGdUfPX110gwnRQHB9Pj48NyPAKAkOLt9YVzOQtOqrwsRmFSJIbrGxos3rDpdavoNhgiveFqdzIQAECjrxD6lHCr9N5fPcR31VQqhCAALAicOWcI1e8mIiBi5hCCoq8DRE97fN1BHIY5MESEe43S95WpiGjhkfakh53aTYElMViRmDiGzOaZM0i2Kg+bpo7JF1n+bvDi8LmIyABEpKCl5up41yTb7Goq425ksYqic455V/8uqIOmiqJ4/fbq4cOHX3755QcffPD3f//3n3/++c9//vM8z3Wmmq6+RgUHBwchhL5ryiLLbEEGRCSBlNXo9OxO04YuRLCYfIwsPgbK8icff7RcvlXKpQhro38AbtqNy4xzhqLE3TRibQhPMZGxrGNDBQQpcep8X9f1eXaa57lOJmFRgJsdkUFi5tB1AIIAkN7LcaEAIpIAoiC82z3f2tw0tDbcQxe/dfL/jYF693XvS/Xwe2XqDP/8loTvn/x/L4ckoN3yQUkCwigKmsfk+8zSZDIS4VcvXlxcvGrbdnSYHx8fn54eF1UeQttcL40xNssuLy8ns1melVVhgSwSoDCIR3S8x80aFoTfJ2zpb5iZU0xp4M/B0PcJcJtyHIbS7BZHbzltH44IQCIRp+MEQzQpkEFkE30ffffg0aOhKe4gVzFGnTlJu+JPHMpcCAlQPdhB8Wl6sO/7tm6AJcsyZyxTSill1gJnhMQpcYwAzlpCsoDYdnW9XhVlZoe7hf2c0p47FEJANPpivV4nbq21hrbtfZWkol4l7Lx54XdNfjWc03Y1Oo+BiNq21czeeDwe2hlqIgESh67tYevUNk2z2jTTg1hNptcvXorgdHZcVKNN3Qv2f/qnH//8P33JzCjJEqTQaVP3xe3VpCpTSol70NGFQRCxzE2P6JAYQQQYAY0zwAAQOaWU8rzQftAsgUC8D5kTZubAffICkBEyR0hsUVEodUC1heYWFv/WXtefQ58Y3qOzDHI17KFvOZmyxw3U9xr4tvRuf2rlu+wq3wUgsbDA+wTL4ev+W3Ko+SlELQ7czuchQiLI85wlrtfrm5vrq+tLY+jBgwc//LPvzec38/m8blaj0ags8xhjs1l9+NFjBLNYLK+uLkVwMpkdHZ7cu3Mmr1aaYgCwzMi7kkjtlyiS5H2Xvu+3TEnmLb0OAJCE+Z2wicjW+3r/nkh4C0Vtqw3FMFprM2PR2YjoA2SWrEFEUJDVEokAGpyOK93AzGy3mUNbVZWzOEwC1bAQ9iIy9e/yPB+Px+qU+s6AdUja8D4hgCUSIESxRpBYYrT7fs47Db2rbBhyMgqE9H0PGEW2LfJUVaj16/vemG0VXIoMAErS12TD8fHxer2+f/++pvWU4anW7/Ly0lqroAgAOEtt0yQdI+5svWqur6+tzWyWA4B1+RaPstTVm2a9Ikwh9taAiHRda60tclOvV7vJctGQqrdEaLIia2JKwpET8DYHjYCAnFnXdZ11VebcZtOSkVEx4hA5Re3EM3h9aAwbRERLGomhJsWRUQnO+9H18EZr3/XUgvexlj8UiWEXDqF1Gub27DCh4S24o33ty/AQYpBz/39ZQoO0deO2zxdEmFOKAgjSbNZvry7btj2cHdy7d+fw8PD1ywvmCIy99zqnZHowOTo6atYNWRcjg5Ag9l24vr559uyZMSeD4dqJFoqIpk/3HezhancrEPSFy4wj1/e9SNrl6/c8CFQlhMiyKzVAQokxGkg6OqogQINioIzZarUaRrvAziZr5/jh6WhKSTEYzswwpFnhHL1arfIbCNLj8Vgly5GxGQCRIwRgiSFG7kOXldloNLLWoIT3aBm4A9mIcGjppc9eCym895NpzrueQrJjXROR9z7PjZJRIiWN+rz3J3fuzufz+Xx+fHw8n8/VuI1GI21We+fOnYEMoFWFRZGv15sQwmg0Go+mXes7H1abGqEdjSZZll3fzIloOpldX19//btfZyhd6Lf+QL1y42lGxWqzzMwUUp9brMoxETVNE2OPmGLElGwKKIIpBZRtTwxjsOu6IrdIkFLKXVYUed+3yYslAmssMBFkWSbMYq0ytpGEGBGBEAnf8dr3PUN9kKrRBhH91rIPz/IPj8F4bsk3u5Kl/ZgKERWyG97yTgj/W9L23zi0DUeMmiAB2XUequt1URRNW7dtOx5XT548Kcvy9nb+9MWLoijGk6qqptZa50zmcmGLIJzI9zoP1BgqQCgGGjwCtWm81zzlW8W+sq0eBO2Zv9VWJDvGTLv1muUdTxARaadqgDR3AwhCIpvVqspcljtAjswkbIlcTovVUvGOYQUUmJnNZvtCMQSrOo8gCUBioB3VHmDdtHmeI0vyIZKhEVhjc+tYCGg7sx0FQgh99E1Xu2SsNUWRhSjbTl4AYK1NiZumQcSyGGVZttk0tCtLSynpqmm3D2bRFzokNaWkkyQ0Ka+sF4Uly7LUpL/S35RWNjCttaC+bdt3A1CNVFUZQqzrOs/K4+Pjum4vLy+Pjo5EBFiOZtPMke/WBOm3v/5Fe/nryWSiTdYcudRvbppl2/bEARGzrODQJBGIyYgYwYPJRFhS6JgBGAxuya5d22ora4NSZnkIvu87YxwYk1ICBIvAAL5vIXiDpIN+OCbmmBkQ3A7rMiZLu8YT+7EN7MovB6EaktG0O4YQYDhZa754x4D/Nq6o+1VEH98OVNum4AmRjAnbru/vObrD3sJd8b7sKITMuFrXZGA2mzHzfH7jnLtz53z+5c1vf/7bxEFHL15dX79584aZZ8dHxhgRC5xbk1nnCK0w5WUlDOORrZtOUuZ7WS4XvhdEyTILAEQ6FVN3V+z7ranfqRJgTlqbO1ybD526XTrTN6XIvJUQ3LWqMwZj3HZ8tGQAEZhjilmW5YUziH3fBo5Vnim9qCoLAOjaRvNnVVU5awihqTdbxhyANQTOikiKQex2Wq7Z9YLhHbklyzIVCgVKtr9hA8Axhab1NrfFuEBLXeo361ooaXNxq7tfRdEYN51OU0q+D1VVFcUWrQ4hKdUQERWG4fROcxuznRmg4R+9T2LW7nQqk4PhNsZoenD3CWbgyoHwsEcnk8l4PO37fj5fAkvTNBG76XjifbdY3EYfRlX1Z//inw1CKIJauLTZNNbaruu6tu/qOoRAZEej0XRatn1iYWBwZMlaYQzBhzYaY5BFUmLablDVsgQsKQoy4xadEyLZoqMMYLboKChSAPtCsi8z74MH2xMGs7lvOQcFvJ+i2LdUQ6C472fyXqpDD33XPmq6b6WH7T6YHdzt/dPTUx20WFXF/fv33rx587/95f+n7/t79+7duXNH1ejR0RERXV9fI2TWZC4rsjzPstxZZzJjjPE+JuGUDEKBJgdyLF1i2/luX8vAu+qNAne1vO/fguCOcWB4O6s0pW0/KwAQSCwMu4o5JJKUgBnRAACBUpsNCKMAkhChAUJJzNtWUYNKgj0XRvYycPuqc3go+6uq8cLQZ0Sb02iCMfap8z5KLPMqQZwvbiNIgkTWCCq1Ba2GmLDthyk6I7betJvNJoSkGTzEd22aWLRv+buGJeoVqBCqM73Ph3z69KmurCI3tIOGBwap3r+6u2ppNUvp++1IDSKq15uubogTSAgNO0PHk3w2Pjk6OPzux8daPyU67rzzmeGM5IMPPggh9H1Yr9c3Nzc3Nzf16qbdzO3hnZQSImUG87wAwYa4TRFAOMUArNMqCS0gJwm4I50DonVkjAXC+C7bzpoVlsQi2u3vnRgMUoe7STiwxzuT9ysJ9yVQdvn9ISAf7NguE/seYAN7Xu6+SA/vkvePQS98S1mIyGx2dDu/9t5XVdHF8Ox3v7m8vOhif3R6dHLntBiXN7e3bduPRiMim1Uj68q8KMvRqCxzZa2RRWNMv+kDp95DAiIwiclH6QN/ywnXK/wWODl4BIgIyID7Dvz2T9YqLLHN16ekk8kEaRh0lVAMEBMSQRoSSoaIIAGApJQ4kXtXSER7lMBBx+n6w676PMWIAIa2FF7Q2hDm4L3oOBaiPsa+68qicNYai6kNgkyORGRTNz1Hl1ubOQYEBkTZNurCXTSo3Ss0hRDj1iJpabxKl3K+hXEQJ5EteWfQavsR9sXFxXg81lntuBuolnblhd573PEAt7cagoYHbeeX65W1mfbCsCCzUSEhhHYzPTr8/qefnB4eeO8dhNj6br0drxVjCiEQx8XVhXOuKkeT08ODUX40KZumAaDny42RXdUZApDNiEzp2s4z67RZq85/gsSccmcjs7AXABFLIAwASaEZ1PZnIEPi/tv5Bt7V76Y9QGU/bBu07H4QOMjGALeYXU9eiTu2pwiKwBYOBQRE0Yp4UIDIkjFkmvRecnn4in1XindV14jYB399c5NSQnPw+vWLX/zyZwcH07/4iz/XgbUvXj1nBmbo5rd5Vo7HY6HcFkVRlnlRkAElsANgAI5JkjALJAHfh7rp6qYz06kumAZ7qGE1Q+JgjAEEEY4p6MUMgpG2JRFxZ7KEaNtqUrYlB2lno5JaCEQUSMg0GDqRBEy715xS5JicMyxsQAiBhCUGANAOPZAipN0kKkOQIHCKvI0jhhhhsIQxRrNrESQiurfROJvbJJxSMmhdnvmem7ZHvSMAALDaOcIYUxSF99si9Lt37x4eHs7nyy1DD2XwgHc7YEi+JZGtKTPmXcf14cyjoyO1hIr06HUrFroDALYToLYZSCKXZS4rEm/6PszncxTg6F2eEbKzcng4efLg7P7pzIH0i3lEUTdYRFDIgpCRjGy7mW9SmgMNnsPBuCzL8rrZIBgRDLFrVm0SYygzrsAtSxM5JjZGSPW0yYwRFuMxgkjihNpoNhKhQVXDiRlFy7Pl27jLvmYdfrm/PrjjbeFewn3fZH1LRFN8Vzo4/HL4/H2TqDtYOxjum53h7UPlmiIiehmXt2tErJv66Yuvmqa+++DukycfFONysVxe3rxNkc/Pz7OsWC7WImjzLDAKgRgBC2CRwGhzKOscYEwExCQgXeia0HapD3uTpb8VVg3XP+xvVSUi4n033Jpzdmc/cQfgx2FBCDQjvzMJIAKIBIYIOSUI1hgSBE779h92ePXgsOCeo067ahLZVZlpMK9SN6yqloYT0dArNYTgysrlLgXfdI04Gh1MI+HN25X0yCDCKAq+D1w4haacc9qX/vnzl5eXlwrIqkpWl09ECN+NTCMy6tDuXfe7/TFYAJ27pC6rao7Bs1KLui2rZ0+0lcmUYtd1lrDIHIqs57cns9H3Pvn4g3tn3Dfr1cLElPrAkR1RURZqq30fUkqz0VTHNbZ+O+RIRKy1ZydnxjhhU7d+uWrqto2QmJnIERBvnU/GbaWkRUSLFIh01ntCIP52Da6IZvATsRnEabBp+1L3LaM0/NR12A//BudC9oKT/bfsX8OwzvJ+Q+FBsId1HmRbt4iujO4nVYvz2qeUFsvbvq8fffDws8+/m2X2xcuXV9eXzuZFUWyamjd1ilKWo8RCFsGkJFEw2iwnIgCOwlnlTDSUOEXxPvbcBe5xx5L8Q4gY9jzAQSSYmSzIXpf7YfhSiD2JttV8j7gn70Zq75hrBlGIDEpSi7ol+oqIMpf2JW3AqAaZVCLx8ASHWHowIYqVKOqrYqmx1bZxhMGyLAW57RvIbCkTMBRYfO8ZMKUUGWzadddSdajPrGmaoigGyZa9Y/tE4V3DBUV41CXY3oy8W5R10wxokgbfQ1cL1Rm4NzdbRFigaZo8z53Ls4x81yeBPHPsO2fkYDI+Ppw65Pnqln1/dDhbxcgpEYo14iwAAyEDSdusfd8agumkJBq1bbtZN33fQ+iJKMttnlWFy5Z1X7fBR06SCBHQ6HQRECFrDRnmLX0J49ZWGMQhSwuwNYHvjBez2auy2d9YfyiHg1wNdmz/YQ+PU4VE/Zw/tKWw52TCnuUcXgyfMHQqYWZtf64/dT/pc1lHXC4XZZV/+tn3P3j8IEl6c3XZ9U1WFEVRANBivQo+jUaTSZ7ZPCsnFREah2TBWkKLKYGEBJTQgjEklELX+9BG9mjA7LVRU6ALd3nOwTXY4bS8j2ztqycRUVRZZ/furSeEMDQuEI3XEC0RWKLILJIYAczA9SPZC4kHoBjfr3G39h27E1i0GSTHFH0wSGjBkhFjRUQSi0YGzNonUiRFiUREzvoUV5t1BKiqql+tE0NkiilZZtZEn6a2EVGHSVxcXNR1S7s04LB22/IF2AJrMUbmbVofBwWDZlBmx8fHOuTg9vZWa9jUqOr4F8VUNDjU3TAuy9b3jJDnuYikECVFAhGC89Oz87OTvqlf31zGZjmucodyOJ1sNpsQAqQoSCgps0SU5c4RcN9tk6qOCFksweWbl1U5ns6Oq/FsMq6AsiRN2HTAghYNQhLYGnMjiBhDxAGIE0ER5ZHQtiRINdI2Rh+YxIN07RulQU5x79jfQ/siKrt2r2mv5/Jwzh/+1EzXvrnbZjXUgoegY2q6rtNRFnE3qTPuzfELIdw04ejo8PMffP+zz74rEJ8//6bruvF4rOSHGPn4+DhzRZYVo9FkPJrkoyKlgIjkCJ0gQgyhD10IXsiAEHMKyXe+jckj8f4iDC9g17eW9+jjaa9nx55DuAUdYorGGOZ35cUADLCNvfUz6V2967YrsbCS3QBA64Zp/xpgzxoPOnQf4BDZUaMAZFf+PiiU4QENiJpzLlgQETJYVVWfuK5rdG40Hq/qRhBYkAQsGmRgESFril242dY1CkBMAmAI8zyjCBDT1JXXkBAzQIwA6CjLjDD3HIpi20MlxpRlWOYFMzdN0/k1GnDW9mHjY304neWZvb6+nlQlokhsY2gBwIoYEkHYdGvnLJi07JeJGY1khiRxFvCDo/sHWLroesB1CMYdzu2Me4/ZyBSEBAlALKNI4pgwJPKRWEBSClGSLbPZqDg/P3v58uXtm2/idDaZHpyOZgelu856L3C7Xq67hK40tgiMEcRY22IGkgCIrKEo0ncpJeAYebxsVliVxjruehAuyKFAKqMzCJB8vy1pQWs0lRclAYDNHOzCG2utj7hcb2KMVVWhlb7vACDPs6ZpEnCm7eUdEEkS38VQGmTmwDFFVtsYY2z6zqTtwOembQGoKArLKbUNJdpsNiFJjHFdbzwnBlm3Xd21XfA+BCYDhIHTum/btv3OlH78Rw8+/6MPEndfP3t+fXNlrAsR13UDQNFzntOonFow/YYNBxqfkhMEhsjcobMmiz71YtHVdW+yPImpN2HVpkBFdE5iop27DtpoUGTrAao9T0kArHNIlEIQ7t/BM3HLq0YAwyA+KkxigABsTJB8mhRjBawTSBTmxH1sNpKmVYEZYuA21Chm7BywD21D+di5HLaYolhyAACMEsWAscZmYCmCCJuEObqeODA4Z/JqlAATi48hz3NIrOADQSLaNUKLMWbImKx1gMYaIUDGKLGd5M772MeYhO0gygCIhCKoVfMuy5iBmWNKFAICEZFB3OIPahZY0q5HXdt3uSusddaStRYIY0wxRi9bzaR6XafGxhhDrw2jWEmAg/YKIQiCMUS7Rg64y0keHByMrK3Xi8jh4OBgMpm0bTseF9sP3zY810dgRCwBZ1kWow/9dviWMcbm2Wg6cb7YMveB8moym01uFptRVYLl1kvnvRBLhLbeGAskQCgICCDaBQCjz3KbZZnNnEmJOXFSshc3bZsza4SQQAREQtAaSGZOOl1ExKctIrVarNfrZUpp1PdK3UKS3Od932VZlnNuzHvueqJ3LBZjjMnylFLf+5Ral2UiqMWfQzHO7XJRb1pmjpzW9SYyC0LdtDbPYtsg4nhU1k2nI8EfP3z0b378nclkcnl5efn2mpHu3r1XN82zZ89Oz+7ArkWA9z4rq9FofHJysgF9vqAt5y1u5+uGtM2zxyB9CNtG/vzOO9j3CGhHWhgcbLMb2MZ7qbx952I/lh72j5qBbXkTbkNiBCQgUTKmvDtwz6Xfd0n0i1RLGsC0ayKRUpLELnfMyfvtQGwkAOUMAgCAQRTZYxQKpJTAkCFnMpdYQuKQxIe0c0AkJbHDRRAZ7e6onm1pDFIS5iTsUyQgi1aHrWl3Ld6dLIwikFgSJSKKBBCYk7qHVnZjD9XR1yY54/F4GRbqFEncBl1DJCA78ioRgTAkBgH1iFCEiDKblWWW5a5PnhkQAVGSiAFBJCVmx5gyV2RZlpKO9UD12ep1jUhlWbLgarXa1O0osHVFVRQ5ZbaLPqyT74Qy6wCMNeTIgEFjkAVsTFGAWSSl5LeCseOhKqpnMAoIS2TxIWnHjz74siy74AeI2Mft2Oe2hrquvfebpiUi7zvF38oyz7IsLzLFuzWoY47ljuafUjLbelNUfkbb+T74EIIhR8Y1bQ9t33a+7Tt1SjvvGUQIIwe/CZAYCNtN3XbtqMjvPbj/0UcfffDBvYuLi5vbhTGGBd6+fQtoHjz8wLncGNP3Xu+1Cz4LQfcmOGMMWUJryRpKbKy1ne91k4SQutb7PjLS4G7vQ01q6OKO2bPvYyMip3ftZAZHXXasN9nLtQ54iWYoYPcuQjIoKXoSpm0XDEhE2p5+P0AgQG3tiEMCc1eerhK19Wt2v7FkdH5OCtG590rJEBEFgFADycxmZVm1IUJMSXwInYYA6u5aEUFBAiJAwXcKxseYQIRQCBmBgQEZQXLrtpC6XqsQE4sIIXICzzrgKVhrHdk8L8TIUGisSUIiUshHVzC9j0flmSXlRyRmBNqS5UUbrgXm6WSSFa5uVyGEsix3HTs1ZjDacAnRel9bS7k1iJhlKcaYQkwp9T7GJLm1jsjarO26erUMsrr/8EkfORiZFC543/QNEY4q5zlSQiJRIM2KCBm2KYTgU98G4xAERNEoFiRbMFDwqW37rut8itq7JSua7dQEYQ3ANGbg6JS1p3bAh06Xazodq85S9G8ITgraRh0pimJduoPvPXywWS5Xq5W19vBw5IpC5+TkWdmnXYNJTiLCvB22Xo1GrQ8319d5UXz3O9/58MMPR6PR119/s1qvjTHVdFY3jV+vJ9ODR48evXp1kecFkVHPpm1b9uycy04mFo2z5Cw5g0SgfVmEEYA4Qdv7vg8xCjqjlSZDlMW7JOqQ4pIdf2OwbHGv6H4/iv6vGsb9SHKQQ8JtRgK3USEnkESEtGXADJI/fObWt8KdVLOICOmov/e+MXFiEbFbT1rj0l2zEKOqwSCDc3lRVH2oEdnaDLhBNERICGCSJdgHvklAe9NhHzwzEBE4BDTMnBCIhBT90ZQMKKEBRYi23GXU1STasr+MmAE9DyFo4aP2UPiWR6EvxmUVhQNLSL0IEqBhVA5NSokkIlZ57pIUZG2C5FyOiFuaC2k9H8vO/ds0Xehb5TqJCAc+ODndLFcA4JybzqgoyxBSaFqOXd94g+b8aFoV+eX13KfOcZ6SERHRShdOBIzOkMnAAAAETglYUiQAnV4SIjNz23XDxMU+buswO9+HEBi2WXjded0GYowC7yZAAIAALFeN7LqYkXnXiWujpaIpKdWpTEnZQk3b+RDJWJflAti0bV3XddMEgbpvVYabrmVmIGzbNssyTglSPJiM7969+8mTx7PppGmay1dvjk6OyZrb29uirD77/g+S8IsXL9br2vtARAQmcwUAiNliUWTYOnSWLKlHshUDYfACfR98zwzWYf4H5Lx3x+AH4Q7O1Rf7juK+qHzrGM4hIiCEXWEFM4NyDBHhHXosKalfL8wRUQxZRGSdTrd9Aru6YU4sUZSSuOvUaMkgiffsvbdkjMtEBCSJCAoIyja+Q7QmY+YsKzJXxLhOgbMsI7TWEkAERqZt3lMLzQd9YBAlCoM2ijQGtn3VGQg5RLXX246uAsIiSRgAALPMTSaTIq+0ikIzs1vvGVFEdDvGGM22AGg7OHowjBllMXGKPih7BW0SEpa+rauqsJzqek1WxtMx5fbN1ZtqZIkIGBFZBLeeoSQBYo5d55umNSg6BdEY44ppSNzWTRQgorIsi5wRcT2f+5iq0exgXExHFUm8vpmHzU1enQKqk8CIWrwkBq33fiPcdW2MPoaemTnEGL1PkFLquq5uGpW0JBxCsNb2MWyZH7vUjmPqg+ogirBt4AkAkIIxJsawTVvZd6rNsFL/gBlBKCeHrjBEN4ultbaaTK21rQ+NfrvLWt+3fjviS31gFNQs1nK5nExG3//edx8/+iDLMt82BuTw+CjLchbJs8K5vPN914e27cvxyJBNIQBSlmXO2jzPq2kFJMZQbo2zhMASZeetUeTUe982wUdBsEg60utdr/EhGQh7Gc6BeqL4pNnmG95J4JDVwPdR6MGxlD2JBQAWBmCrBBsmQB3XyYzbTomwI83hNtcPAMAh6mVt/T5tJ8OCZQksQqwpAI4pGQbIUJJoIIcCu17JBtCgBeIir7Ks8H3wMbisRERLBgjBqkiTfd8iEaKgNRnaJIwEIsKg8TEBCSniZwjRCFIU7ll0k6bEkNiaLM9zRNP3ISUv8G7teI8Cq5lGay3gO8J3Sin0fYghxsAsYChBSIw+bqdYgt+RPCRlxtrMKUmOSJiREEWSSEKRtu2cswp1MEdD23GLddf1IbEgpBT6AMDOWmtwuV7neY7Jt6t5WY3Pj6YQ+1evLrL8YFC3hKzWKUq6rZfGGBHu+ib2fpeKi8wEu1Q47wrefQxlWcbIUcQYIyBBGAGEBY01O8Jk3JXMMcc8p5BSVOeKDTEAQmJOQAYBkISQyZB1Li+std53USAmMdvCbQMoaLDrW5+2Dp4YskhElKXgvS/z7O75nQ/uPRiX1Xxxq1hOVY3Wm41z+fHx6aZrv/rqqQCc3rlbFAUn6TpfuOzwaOZsDlsoQawR68gQYsLA2+SHiMTAbeubrg9BwGQINkWvvsqQcoC9rADuEV+HWJE4DUIoeyTPtGtkTjteGw8jHxEGUQQt6dUWISBIxLtIklHofWhHY0IRIYG4az+s7zWAIJA4lZkJTc+BVZA0USkpCYEkHUIvoA0KjEo5EphRURV5mVLyPpbMzO/lriyhRe08AGrhtkuQZVnkxMwC2whSa2UMiEGwRMbYKGwEQSRItNbVofNd37dt7pzmP40xWnugYPqg81QIt0uD24SMon++awNHFRNCHUnLEMPR0VH0PrTNqMizzK7X64p4Oh13rRJNlPEgKCiCIknb5ufjsXNuuVwu1qubRUop2XLW9T5zziK2bRv7jp2TxIagLLKU/M3lcnZweHb3XpyMLyH6zRJ2BCsRSaxMg+BjIMKUUtc1uudYYkgJowEABUKMMcAcUup7b4siioA+ZtAAUoC5770GfiICZDSGIXFdCImFAXXbJUSFVS2RJpcAUBCZCKwlayfl4XK5XK5XU5xOp9OsyNfLVdM0jQ99H8hs8cZk0JAg4ng8fnD/3uOHj6wzby/fzOc3ItLXG4MgCIK8WteBU1mMQorL5VILtZnBTfLJdGrJaS+jHMkSGQTa9uDdTYNNECP7PvZdjImNNQyUmIDSt3xIeR9cUZFTJiMisu/3fdFBPtOuB8zwOe9SqSqE9K26LVQhlL2ErQgIJBFEMQSoHWkIdKC9oqJskZzVMd0ppe3An+iDM9uaJo4xRp9Zt20nxYKonTc027ntiZhlGQlxTKLde0CYBVlEwPKWhC1qKBInffdms6mqKsvyzvcxBCIS5q5tpzGVWZ65LMbku9b76EPsvC+KKiND1qbgu6ZFQwbQIkUkLTrue51jTDtmzDbrqvxj2SF+ZW4cGBbxiX1sDdoiyybF1FkKoS+yPEbfNDw6mCCi9z4mgpSIyBp0zjhnsyyzBvM8P5xNYozz+fzt22uRdHp6enh4+PPfPs1yW5aj8biajMfJ98JRUry9ZkvALM5SDL6v10cH43/24z/+T3/3m6qq8sxpqts5h4Rd15I1b99etb4vy5xBdDjUaDSCd8OPrSBFQAY0LgM048mImVf1RuuyAaHpehKnII1WxO58LY4xWpMxKTEqL8ttr84UkYwBgKLMTo6OFcxwed40tTEGskKT8syszXK0brMsy2dPvzLGTEbjFy9eHB0clmXxL//lv3Rk/tNf/V/GmO9+8slicfuLX/zi7OyOdh5JSZq28zFMDw/OTs+/+PnPsix7/OjJZ599dnxwuFwudd9nFlMK3ps8c87aFCMz+z72PgHA9e18uawnk6MmyGq1mc6O27CQPZIK7UYwDCJEuyrHbVlcfEdAlx2cM+xYld6dD6LMUkfWZFlm8wwQvfdNvW7aBnNnEZx6IhFjjAbRWScMW4OlwpkSKCMlRBFBFsZtAE+AlozvWo5hQMskBREmsmQQmEhAKGk0qMQag7Sp6+Vi8ejJY20lUde1mkoAYSIRtrtoEAC0/eY2SwNaIBeTA0BjAMABOGNOp+PpdJqVRdf5xZI22GSElXNgbIicUpIQJQuIGUhKHBjfURBU9WxVEG51vArhAN4k30cWhm1HdMaoEK4iaXnhCKzNyTkDBMCQZYX+NTFDRCJOiRHN3bv3r64uL169atv6yYcfTqfTzXr9+vUbk7nlYlUVpUi5WCxKZ8ej8vrt5ePHjxeLRXc97/tWhyWdnZnDw6NPP/7wzZs3r14+Ozk5+eTjJ6/fXNzc3Nx/9PDizRu0iEGSsHG2Go9CSmLICDGzziVDQ1u/SMtHgldhE9z2ZcuKHMM2JNaqU9qWhPY2c1mWqTSKSOTkDNnM+eBB8OTkJPownk6Wy+XBdKLEmsy5osySD13XpBQz58aj0fV6vVgstMckIjZNM5vN7tw5/7f/9t+G0H/51ddNuymK4sWLZ1VV/It/8c/rTRhNJ3meb7ouAnDXtk33+s3FRx995H2s2+bnP//5wcHB0exgPBk55wLXzlhCJEFh5iTIW1b6el2v12sfxIKklHxIre/fG3G4J10Dc1r2yJy0BcphL1x6j2/9LfO4r82l70TNY0rGmBACEFoSQ6RET2vRWpti0GopAgbgpKMBmELotaaGiBBQXRTQ4lGl+CBqfIuIhMDMkFhprjo8BhABIYRQ5oX3vsxyBFbGUt+1LssQwZDANnggFBbeVnMYInDWFVnuuy7FkDtXWBdjNGRGeXZcFAcH07IsV5s69R1EF41jIgbs+tCGKJyi95ZFOGFkcPsJUmEZOjRu6020cxHvampC9FFYkNTLkJiYEjnWTGNRFMagdWgzl4iBPVGmFBZmFmHEhBhE+KuvnlZVcXx6upzb5XLVtt1oVD14+LDY9KOPPmrWm03dnpycBN8tl8vDw+NN07RNj9YcHBwpivv27VViOTs76bpG5yePqqIsMuZYVdXp6anJ3PXtTd02ITIYQoS+D9mu1dU2ttnB630MUTgrcpdnGEk7RGZloelWJDF2W9KlQC4ROWeIHDN3fZOYRMQ5V4zGOpo3xqjjeqy1y/V6Mpls1svlqrEIVVWNR2XbtvP5zXaydNMeHBxs1kvvuz/6/PNPPvpouVy+vbxoNquPv/PJ8eHB1dVVu6lD9F8/e+6cK8pyNJ2cn58fnRwbl4cU58tllmWZDskqysJl683m9ubmk+88BmBOECFK4r5pvdeaDFiu1pvNRkwFAFE4CseUjPmvVE4O0jXAE0Og6Oy7+knYA2D2iemDWCqWo50afYqsWWVnMudSHwGQiAwYEmtSwl37Q9y6hTt5VlKbgDquQ05/QDRo+1h3k60JcVtilkREYRuRXXFCiOPDg/V6rbig9x3H0PftQKHVwVAW2SFHEBIRBDRgCOBoOruNIXCqbGYM9iKGqDTOSjTRYyDLMUeIBiMBI4J1zpALzidOwftdh4XB+x9+7uszRER493u9T2RBw4Ysx5g47vSNNnsnY4CMIYcMhBG3DwuRyOqixSQisayqEMJ6vVpvNpnNRqORMabr+izLvvrq6fHBzFr7xRc/m00n9++cP3vxLPTeWjsaTfKi6PvQ+dVytal7f36Gp2dHH3/y5Pmrl7/5za+q8ejo+ODq6vLo5BitScKd77vWW8qMMUmYZNvHdptU2AlhTFvKi7ZP7/s+gZTWtrFGRBRKIfodO8Qa1M4sAEwoBncfxDw7nHVdN5/PtRxsPB5772+vr/K7513XNetVkbuqzEGga5vlYk7FdFxW9WYTQ+i6bjaZ/OAHPzg7OfmnL34aQg9Em83KOSpHJRlcN/WP/uRP1+v1arNumu5Xv/rNum0EIc/z73z66e3tbd/3uXNnJ6d37tw5PT09Pz+X5GOMiZOghBCaTdM0nfeRWbTlkSscIDKzMegyk+I7IRxEbvBC6Q9my+n+GcTyWwL8h0IYY1TMTwOo4UOMMdYaa8GqwVO6LIu1dnDWAEAjQ2TZVpnv1/gmrdwHRNEWHCJiCEXIAMYdOgrIoIkGFkRABGvNcj4HlKoqueEMnettSkpAN4RgrXUppaD9a4UEOKVEKFVR9nnRM5TOWjIUGQUyQG5rT8i+jyFYiaUlz+JZMmOKLK8A2t43bd+FiIjWZmk3RWgLKMN7oqgrtL+y2VBkbLSrR9RhD9p+N6WECMYZIiJIiBhjQkTnMiI0qN4fg8jp6fF8frtaLazNxtPxaDxeL5dv3rx5dnnz8OHDzWbTtfWd+/fY+5cvX84Ojha38/F4bG1WN33XdSbLKaMY4/X88v69h5999r0udL/+9W8fPnp0en7+1dOnaKgcjU5OTgRhvaoZQS8mttsGWZr4gl2ePQrrMx7kE0ALoOKOrxxSAmN1RNmYSIOfRISjUaXNZrQWwlrquu7u3fPr6+vj48N1V1tLF69fl5k7OT4kQt93m3UnMR1OJ1ervigK7z2HOB2NP//8s/Pz85fPn4vIZDKJyS+Xi7przk6OjDXz5XJx82VIsSgKk2e9X61Wm/O7Z9/77LOrqysdv3P3/Pzk7DTPc2PMaDRq1qsEzJxEMPnUdb7v+5TE+xhCIGvzPI8cIkfjLFmM4V1ufZClQVT2LR7sWOzv9PUfvEv+4AghoCFjjDNkt9Are+8tvPuW3Wdtn8XWZDHT3hB49T4HydxOQBZJuzJrEQEZulduc/RqUrd1ppiYJcsLSaHrm75tjg6mQOJTJILOt7oNLJEV3BYeASECYhIB4SgQU06WXGYEjUBBhgBsEkoeghEQEsgBjLOGxbAAgXMWXaaFEa7tAydEQmTaNTnfFzbeVZQleOeKEJElDJQUUVZo2FpblaVzjhBTCsZYYzHLMuCACJo32nUsRxRhjszx+vp6PB4/+fDjxfxmPp+/fv3aew9kHz165L3/+MMPJ+Pq+bOno+Pj6WTy9PdfHhwdTyYTYfC8Ikbn8igc6hYxzRdXjz/8+NNPv/P6zcWmXk26KQDU9Xo0mZweH00mk+ub+XK53LRtSh609MkaIiAC1p6vMRoEMATIMXoR2TU4ClohSqTwOGZZNhqVZVl67/u+875PKWW5LcpMRBKH1WpljHEuOzk5+fJ3v5mORwbw/8fafwVJlqXpgdj/n3OudC1D60gtK0tXV1frnumeQcNmsRhgbWcNtnhZmHEfCBiMLyQf8cAXgHzh8oGEGWwAcgcYLGZmZ3pqplpWl+ysqqzUkZkRGTrCPVyrK4/gw3H39IysbgIkr4WFRXi4X79+4/znV9//fdlsttNuUoaUEkbRQMOgikexEIIRIuKYKHBc5/Kli9evXgs8b2Njw7SYTzGXy8zOz/mDfhBFhGp2Quz3+z1vUCyVSqWSm0path1F/JVXXjk4ONBCHf1+H6TKZDLT09P7QSxjKQiCEqCIrosiEk10QBnCkF5IMNPUn/2URelHdG9wPGI6dk1jB3jKPeoawfhP43QGxvPTbCipHUdxFIcWRQpKIJEg6XClMYNQQlAIDSuVGjZGlMZmiDF2kqKuFaGUOIhjwzC0eKoc4UuHmwVwlDqJAqWEjkhNk4Wh7zhWGPn5fDaIA8KJ7bD+gOJIt4TFeqoa1Gg2EkEpVGowGICUBhDgAlAZenhSKAPBZtS0TA0NUUJalDJGQiERpEHRchKu63pB2B/4YRgOSVieDSKOlCWfL9GMjRBAaLAoGKBvgWmarmtreigdVziO4ziOisaOdDj8MYw+pFBKuk4iDEPPG/iep4UQe72O53nJjDs9PR0EQafdTCQSDEmtVqOm5Vq2BARC3EQKjTgK4yiKJKBpms1OO9k4KU+VX3nlxubWdrvdpmyYfhBA23IdJ+j1+iCezTExypg5FAzR4OVxkyOOIsqYZVkaDG0aVEqpJDcMw3Vd17W1k4mjEJRSkmuGIgIolQApGDN5GNkZBxEtyxKSI6pBvzc7Xfb6/TDwJBLbMU2k/qDf7XZNlh0MBulkanZ2dmVlBRGfPNkElIVC7uSk0u1iOpPqewMp+dTU1NT09KDBAGm91Ww0Wplc2jRNz/Pu3btXq9UMgxYKhdLUVC6TYYQGQbC7t82kCUBAciSoFAiuhBBAqQClhhAKLoSyLIuYpucHDJ+TKz0VWI7tc2xg/Hlpt0lXeeoMurbnOE7E9Qy6bqRRSankE20MkCglGXfnCSGj9s/IrSnQ/Ktk+MNkVzOOY0YoMJCjUWOdBTJCNY3NMCIdKdVoUuDpuVkh4nQmeXyilIiTmbTWltRnYJrbYIxYRaQAAhH8/sBilKJSsRAomWEwJFJKXf13HcsLIi+UQnBC0KBmzCPFBUhlMiNp26Yd69hAied2tbF2I6gRqQE8SxoJIYJHQojx1BchRGO+dTNNt1y0pmwkY333uRBSoi70AyiiZUlTjPNI4zZt29Tk0I7jSGY2Go3I98LAAyXmpmcymUwQBG4q2W51+p6PQCMhB56vgCSSSaH6cRydnJwUisULFy54fvjw4UMBKITo9XoAhJkGj2JCiG3bjuOEg6Gco2EYSMmIaIREUURHlESUUk3vGYYhIZpBTHNGWslkEhG5iA3DkJJzbgkhGCOUIqWGlDKVyB3t7wnhDvrdbDZLKY2i8ODgYKpUSCQSBiOB12/WG74/EFGsQColAs8vFmZLxSIhZHt7+8mTJ+tnVjX4wTCZrtZyKSIeD3zPdacvXbrU972Do8OB7znJRKFYFKCYaSSTrms7mog9k0pblqH3PsGVlEAVSCkF53Eck2cUjMM6p+VYwMxOt0ufzUM/d5ARgh8ngLIAwONo8mmT/nDCbGDcZ04kEuB7Y4C7/l+A5AaB4TYohBACheCgkCiDWc9dhgLdzbdMc7g+1dDxDucNxHPjhUSBntMnhIAiUmoU57MrJCA5jwqFnN40OedRFDI2HNceRkNggGEYCFKKKOQxRbANajBK0ANEStBMAiqJyhM8lko6juPYaFlGECsujTBmiC5jqUHkZdL5dD4nQAipyqVSKpkJw0fFLGk0Wopzkxp6rsIwzCgIkSAIqUBSRgjKOI6E4AAykozZyVhBJJAL5ftxwo8JtSSSMBIGEgIUJPJQEMR0OjNoj/lFqByOnQAimlai2eq46aykuL3ztBjmC6X8Xu0AY5pIJLKZDGazeuqq1R0MwvCzn/40m0rNzc0pEMsL0wcHB77v9Zq1lDmdYSXRj092TtbWV65fOCfDwaNHj1yWQNELej6CEQlJubARFChuYq1WLRaLCpVBTcMy6s2Gm0qGPK4326Wp8szslO+HQRQRYgAxJaURjyWQGNVhrXZ1ZqbTagSeBwAEoVTInZycpJNJilAsFsMw7HfYfHEBQA5aPQDZjTxC5OrakuWaALLrd4hrd1oNK5Hsx23GLOTe8tqcZVnpbOLoYD+KIsdyMMbNp09npqeZYvWDtm2n1ubOhmG493DPnBLJKKmUAgsybrZcLudyOYL4k/d+lnCcbDY7Pz/v5u36cdM0zbm5OcP0wshLJ5M7OzuO5QouAahrpsNwAMLkgW8Y1KZG6EUCedKyMAp0vYoorXqiuJJKSlBKU8XHnKNWzlMyiiKLTjAbAZCJFjyA5GJoopQhZQwAvD4KaTJCJUoAJRQoUMSgIoqEjONIMURLMgMojSRR0BFd0zQN05RSxoorVJGIwjBMpVL6TRGRoF50SlJUxI4NhpRQpJKgjoYIxZ7gqAAIEUopgaAkBSSIQRiFlAnLvPd0q+N1uypyStmuCjiNJEoBsULBlAAkChUSZIxRg5GEZTkGIVIYCqgWu5QghUCJKDGKRSwk4TLkIop4LMEmjBpGrpB3k0nbtoGCk3AWFuYME4ul/Kdbt30/9DxPSR0wEFTPxqhhJF2hYRYAkiqqw1ZtS2QELxzVdSiORxPRBUrEcFRcMcYIVTxWSilK6WAwqFardsKemi5Kudjvd9utlmO76VwmCILBYKCU0nt5s944qVWuX7+eTiRs2z46Omq1Wp7nMcay2WzUE4VCYTDo1ev1bC49uzA9MzOzvb2tlDIYSyRTBnMGQTiAgFIjkUxS0xiqiIVcckG0MoHjMsa8QaCENE0zCKJBr0cpNSjV8xPT09PddkepeH9nlzIUMUeEhGsrpISQbrebSaU1JYxUaNu2aTJAPhj0giCwLEop7XQ6ALLf71NK41gQEmt/4vf9qamp9fX1brdrMqNWq6XTaSnlwsKCVo9Lp9PNZvOjjz4ql8uXL1++tbMlpdQBfxRFR0dHnXY7mUx+7e03DWLoRlEcx5zHpmkAqFQq5TiONwhs2263O4yxQbOF1NI54RALBUIIEKiEksZwxEfXu4c4ax0L6aoaGfYIhkUsJb8iIXyusPf8EfNQSikUBwqIlBCChIGCKI4kgAQUSsZKEY20BkUNkxomZYyMY11CFRI55NNHDbIBAAlKapQSoNDsUWQ444IgRcyJFjgBlFr1VaFSGIahYVuI9Lhy1Op2qEGYZXQa3Vw+g6h0h44RhShAZx2IaAAzgBpg2BZBwYFTGYeCc8WFkkoJqVwWxahQBCEPhQA0kVFmmsx2mGmYtl2aKi6vLK2tLXERJRLORn2n3/dEFEeR5rEbMl8ITRen2zI4zq2FpsyTSkqgSIY8RRoIrltq2okDAGPMMRyZJDGP4jhGNBCJUpFSSCnd3NxcWFhYXl7sDrrHx8dRxA3DSDjJhw8fzszMzExPR1G0u7t9cnKScNy1tbV6vR75PiGk0WiUivl0Og0AWmAUUWmS/Hq9nswk0+nMxYsXP//yFqEskfJcJ+lHca/bt22XGYZFiRJy4PmaYdEwTQLACHVMK+m4ANButnq9HiqwDDPiscGsOI6z2Wy33bFtt9frJZPJOIp0wc515fTUbKfTieP45KSulEo4icFg0G4HhomWZSSTySjyarVaOp3EkTSdaWr4LjJmFtKF8+fPr62tPXnyRA9/6c5hrVZLJBJWKkUpLZfLmUym22p/+OGH2ZXFRCJhGUYYhqZpJnNpg7Eoio6Pj9OJpGVZmVS2XC5rNcwgCAKLaeYLABgMBvl8gdbbBGkQRVIpRKqGfmz4X4bni5y6SgkAZGJGaSxnT5GMZzImLfC3GKFSQik54vBHpYYrmzJGQKEY9hC4BKQKFYQ8VERx9SwnFIoIoCIetQFHLBhcgBAqVoRzGcnYZGpYVAPJJUpigC7voAQKAFSTOiqFUgIgNQyLIOMxD4Mojnm301cwZGpkSdNWSikhCSiGYFFqokEVsZmlFAqpdzBUiqBCQhiyRCRp4Md+LKnhWGbCTqUN2wJGy9PTFy5dnJ2dNh2mEAaeF/GYkSFoeHjrhwPBkiGJR4mxbuzAqMxFCEopJShKh0odQ5E2BZLKOI6jMGZRxKiBjCYd2+uRiIBuJCJIQMIYOXdmPZvPBUHQajTS6XS5XGy320+fbv7gBz84OTlpt1pKqWKx6DhO4Pn9fp8QMjc3p2El3W5bCKEzTw/CZrOplHBd1/f9jY2NQin/+uuvH59Um612v98HRYhhas1TAGBIMslUwIyE7QAQXS2zTavT6+ZzGaWwWasLIc6cOZNMph8/fpzO57qdTuhHOo20UmkAKTifmZlq1Oq+H87PzLdanV7fC8MwnUoN5MDz+mHom5wZRtJ2TCFCz4+DIEA9jKbQdV3TNB0nQSlN2YlCoVCv103TrFaruVyu2WzqLcx13W6v19rdXVlZef3116MounXrllMobGxsDAaD5eXlYrHY67ZN05yamrp47vxgMDg+Pq7VqtlsemqqpP8v3W5Nc4L1BgNEFAqR0SAKIy4kEsKQUoODIkgQqe6gDf/XI0scVwSelekmquV6avZUGea3HIxIQRQqlKik5FIolIhKSlBEylAoTRLMpGQKKWLPC41YjqeohsWz+Jm6DpWoWfc5V5xLrlApCSAYU6aJBtVeIR7SAYNSQkmJAEAZUARBoNfqxDGfm11wEqmtvad+GCWT6YHf1+UkUIo51FBKEQYGoZbJEqbpGMzSwH6FwgKBRHGToDIoEkBFjTCGMOaxok4ykcwUrERaUQMYXVhaPHv+HGO48ejB4eFuFAdRFHmep5mFUI8x6lm4KNbJLoJUBJXmvudCFxKUrtngEFWolAqCQHAFQ2VEzfYtFCMI0mJoWsyKKUUQSr8LJYTMzs42Ws1er0eQ2YbV7w84l/Pzi/V6vdvt6mE/3x9wzpNuolgsZjIZxzQPDw+bzSajWCqV9DRWKpXs9/tRFCTTSQDodDq2a9m2c+XK1Y1Hj/YPj+I4zqTSqWRaSpCKM8B8OhMlEoSQKIq6nb4fBq1+3TQtygwASDquk3BXV1Yopft7O8lEQgoRhmEqlRl0e+l8odfrJRKJ2dl5iqzbbXf7g06nWyoVCwVLSlk9qudyuUIhF8WB5/XDyLdtpgNmXR7QwNQo4oaBSqluv1c5qQaeH0VRo9HQaFLXdeeXFuMgjKIomUrpcDSfz8/OztYD7/z58/1+v9vt9ru95eVlx3H6ne7BwV46nZ6bnXYcZ25+xrKN6nHl5ORkupQMw9CyjV5vwEyzXq+3O71u3yeGLaRCyhRBJbQPHJU0dLCmRwdx7P1QKQVCAR3NhIIidEhCObZA9Tyn44uHVLEeNwcEBVKzAgLBOJKcc+CCcE6FYqCYBnYKZSiFOCY71/YGenwPh6OquqKmhADK2BBfDcCJpIJIqYcohhAcJaRUHACYZJSiohDGXCpkpmOYvpIoOCogrpMcMosrxUTgEUIMZjgGS9pmyrFtw2REqShCYiqCymAUkFE0mUEpPWkNhJIcKDJqOik7mWG2LQGtRPK4Wvnww1/Fkh8e7XZbTSHjMAy6YXc42zacJgHFh3PlUghERQjgsN4rpRIMNWuY0HUmHbjqMyhiUkoZM7UlA0cEajCwLENKHkcKhKQUGaOUkP39/WQyOT8/32zWd3arOu9ijP3VX/w555xRmkqlUqkUIoqY1+t1IUQcBJq0f252emZmplqt9vv9lGsDyFYrHAwG6Uwym80KoR4/3rQtN5XKWFYjjngQBGiTOI6DKFRh7Lou59yyXRAyDn0Z8X63Vy47rU7HsqxyMZ/JFWzTEkKkkynGWNJxlVKmabYbTSEUj+JEIvHk8VYi6czMzPm+n87mL1+9ls/nNx486LXDZNK1bIP3AkTUPX3PCzXjE2PM933DsMIwAIVCiFKhYBhGebl89+7dVCrVaDSKxaJtWu12GwByudzy8nKn09nf26tWq8VicYDq6tWr8/Pzjzce6QAkDv1K9SiXy6VTCUTa63X2duJkMplMJq9cuVSr7GoYXRCFScPs9gfdwaDVG1gOBBEnBuFCxlxIQNDyZlrFTvP8g8JRf5yMBiMoosRhFYA9rzM1jpV+i1eMooAQApSgREUVIZQxhoxKKSSlCrkEImE4fScJAQJSgyU0WQ4xAEAB0+y4AECkolJbPpWAgpgaECIQASjXieuQj0+3yJRUDBG1oAKAzKRzyWQ6CKKe5xumQw0SRLqDioiKIjCXAaKymHIYWKhQxEJEQgkDEQQHJQxUjFDLoJbBGGPESlBEpjBWwIH6YUgUcFCDOKw+qnqhZ1qUMWIZKATvdDoh5XEYCc6RGdrudRnGGOm/0RGOnlKKSmMH9C0XiAgoBedaxckGUK6rgy4hpIwFKkkFt0ytpTEQIjaZyUyGqNKZrFJKUzxlMpmlpaUg8H/yk590Op2LFy+ur615nre19aTZbKYSyWIp7/v+/MxMLpcDgNpJ5eDgoN/v53K5fqNvmMx2LM/3hRDUMAI/Ojw8nJqZKZenTctpNpteGAEq0zIIowyoyYw4jFAKg7JUIpkoJ3K5XBiGWgZIciF5xJCk08nFuflBLGr9gWb+z+VyjFDTNMMwbLfbQmQXF5Yty6OUJhKpmZm5Wq0x6Aau63IRSSkt23AcKwz9TqeTz2flkFQfDcMIg4gxphSm0+np6elkMqkZbNPptGVZvX5f92l1hJJIJM6dP4+IjLHPH9z/8V/+ValUunjxvBDi1uc3oyhaXFy0TSubzZRKJUqI7td3um09AMl5NPA9wzD8KFaUeWFEmOnHsQREMuQyH/sxAc/q9/C8Hqic1KUR+tUAEx35ye9kQpJl8lAyVshGAS4qpSQCQ2SWJWPOla6pSKEAKdUCMYpSqRTX0qiEDQ3SpHLEXCiR0hE5Oh+GyQYiSpAEkDKDUiJijogU1RDshgoJRUoJCKRATcsGyxr0FRKuAAAHXoCIlAAhwKaKGaUERa3MLhQP/SiWUegYDJSkSkokwAgFk2CMwAwnb5qmAtIPwiCKgl4fiKcQeoO+H/mOYxNC+v1uX3LbthzH6QUdOTpASAUElAKldEeIETQMhqhAClRKCFRiSESiS9aIKJWK43gwGCSobqkpIYTkClCCJIxxy7IAjL6SGkVhECoActn0/uGBZeeXFhcPDvaOj4/q9fr+/v7Zs2fjOL5582a326UU8/l8KpHknF+5cqXbaj19+tR1XdsydD+g2+026r1sNmuaJhIihGg2mwohnU43m+2pmelsLmea5u7BYRRFlmU5ju0q0zGtdrs9XC5SJpNJwzCazXa5WBRCdFttKeW1K1eXlpYYxf1K7WBvl4AKgmBxcREAENXR0dHly5drtZrOmqNYPHq8mcsXi6Wpva3ddCZpmowQ1em2dIMxl8sNBv04jhkz4zjW+RZjplJxrdFYjqKjzc3BYNBoNC5evFirnuzv7y8uLvba3TAMbdsOw7BWq8VxnE6nOedLSwvFYpFSOj87e+XSpXa7tb+7l0i4fjA4Pgp1n1aJGKRAZboJJxa8XTmxE26t3kJCewPfspNSoSTUZAYhDKlEBIKUiOcsatKhqdEBz0oDZPI5OGJAkxOq0i8ejDHCKBAiQQkllFCKDxuJMZcR19xiQBANpQhRAFQoopSKJQKAQRARhURm2Aq51AyLGotECAB4XmAySoEioFJAkCAalNHQj4aDxEM/D5QqCsoistvs9gf+3OJCpMTG5uMwjrP5DGMGDikMgQne15+RAJGSICgETgj3A89mVF+mUkwqFfpBL4p61E5nbSDoBYEXBMyxlCADvz8IfEQQMmIeEAIGIyMAK2o1L4bEdMwo4iKWOpWnSAijACCiOAg0wZH+B3DGGIDyfZ8ApQpCHu7v71vz88V0OooiwtA2HUQS8ZiYpNvtSylt2zUNNxZSCMGY2W6352fnfH9wfHxICPZ7nWajdunieTeRiOM44bq+77uuvba21m62tp4+yWaztmHMzMxowLuu3ff7/cXFeanH5qWKAiElSFDtdncQ+IZpnz1/LpFI9bzBo0ePUqnU+sKZ6tbhYNBjmgQIFIA6PNiTCqempnZ399fPnjk+Pl5bXX/48OHCwoKUUshYKh5GPmPG/v5+JpPRsOzj42POuU7VBoOB53m9Xu+f//N//sufvLdkLezubvf7Pce1bJvZjqVAmibzPE9Hhr1ejxDGOU+lMlJGWkjQtO2FhYUoiphpaEbmmbnZdDJ1dHTU6/XKhWKhULBte+n8mVKpdHBwEHjezs5Ov9+bmZpOpRPJlJvPZpRStm2LOAy5KBbzhJBBv6fp83jX84OIS4yFpABomI5BFRkKJAohIhEJKU3TmbQ3HOG2fc8zDMO0rCHIUbudOB4r4E6+amyKMFHXGXoqGUMUSyBAEDSXklScc6mQS8GlUkjBAIJMAAgA27SH56cAABwIJZQyM+ZcEcaFxqYRykwuZRiGhDFgTGgpQUREGkkVDnykLIpjgso0TQTgYaBAUmYEQYiUmo49NT0tQCTTWRfEIBjosTVKiUGRKRlLKWPJoxBMxgxKTEKpRTlSQslI4UVKyaWUkYgCwc0oBIJ+FPhhQCSXSvW9PuccGBhEa8eAjFUUAgXkJJKcy2FSCGPAge7SCCEoDrkJdBkbUQ/djxJxEEoRKdWYN0mNaD8oEEpZMPCEEEoCUN2QUswwGSPBoI+oDMNIJtyB35eSMwaIYBjG5ubm3OzsjRs3Wq1Gr9dDxGKx2Gw2s6lUGIbFYpFRTCQSnU4HEU2Dca61uJg0QICSgvNY5LL5/f39ge+trKwsLi56ntdsNg8P9w0cj9gIBQpRK5+SSqVy5sza1tOnP/rRjx4/fpxMJra2tmq1mmUaBiWuYzuO0+v1gtAjhOQL2UqlQik1LdbpdXd3d3//93//5s2bf/Pe37766suEEMsyFxbPISohYs8f6MjTshylAh6LKI4BYkoNznkUBc1m0/f9ZrNpmmbSdZVSGuVTKBSWl5fn5ub0Teh2u41WKyYKpMym05zzRMKdLpdPahXP86LAV2I+m0unk66UVq1Wq9dr3W53ZnoaEahhIKGU0kgoRRk1TDQsociQmkGjkyklE9DQsQPUKZ+eKZ9M/+AU0P/5Y9IIJz0qIQxGAEl4xmzKQElESoimUBzOQehdQPvlIfGPlFwIHZnrkwz/NPLdzBjOfEqpWw+EAlFIheJKKc15QBCRGQpAIjFMI/a5pro0HduwzFjGFtoAGu0qpFRsjKlVUnHOlQCgjFGUSlGtdqPZhygBBQoJECVBaAAgoNDEmjEPCSEU0WQGMwhFApKLmMeCBxgM+7aoGIHhdMioUUFAciAaDQRSaSIPADKsjqHSA1YAONItkXHEKWNxLMCgiiACoYQCAaUw4jHnQg+CZjKZMAqoYyRsJ4r7yZSTzizZtj0YiK2trSgMv/71rxsG9X3f6w8454Zh9Pt9RIzjOOGm6vV6u90Ow9DKOUJwIUbcwcqIueCc7+zszC3Mu26iVqt5oee6NkC27w1EGFFKkTIFoKRSiiM1CWIQBPVmo1jMNxoNpPTe7dvdbjeZTF44t/Z0+8lg0A9Cv9frcc5jLmzbbrfbqVSKC5VMJl3XtizDde1bd27/j//4v//www8qlcrA6/X7XUIgl88iomHYmsswDKIgCKQEBIMSw06wMI78cCjNGUQRATBNs9VqPXr0qFarpVIpRiilNJ1OT01NPdne0swgX355Swl5/aWr8/PzP/zd3zmpVWzbDj2v3WkIIXgcJpOuaWQ8r0+ZCbpkLZQXRlKBQMIIlRI5KC64lFKNJC75hLo1Uc/MTJNZDJe6HOq9kd9skzqbnTS/oVFRJqXU8w0AgIoQoIQQPxa6+KdGMaN+iRiRf8oJqpvJY7zp40g3e/wqgKHyCKAUXOgWHIwmpzTdkUTlR0G31wvjiDEGFPTcj+bOAK6k5CyMhGEYJjN0uVLDMogCRjAmhChAkCbVVV/KlZIghIwVokIJFAgqQE5QUAKMMtOgtmEySkUUBrGQQgY8EEIqISUCl8NcdNiNFVLvHkqOlcqVkEIqLpDoyS39yQmgazsEWRRFvu8jReYYRCghOAMNmDaVUiYFzbloW9bZc2d2d7f10HQYeqDidCrtum42m3rnnXdMw6hUKo8fbyilVpdXXnvttc3NzXajkUgkKpVKMuHs7e3phTXwepxzHksglKAWBkEhVD5fVAqPj4/jOEplU/l8ljJstOoYEwMMgyAo1Kp0RApCjYWl+cPD4z/8b/7h559/ns8VT05OYsG//vWvnz23fuvLz5v1EyFEIpUqlRYHg0Gz1SEEgygYDLz1M6ulmdLP3//l9PR0q9X4kz/5nyuVyrnzZ1KphO8Pjo6OXNd98uTJ6uoqY4wSRqnmQFSIGEUxs4aex0m4tmkFQeAFgSPEK6+8cnh4WKvVTNNUplk5qZaLpXQ6ffnyZc/zlJCvvfJqIpEAlLdvffHZzY+vXbtWzOUMk+bzWQLo+d1Wu1av1wv5KcO0FbJuv+eHQa8fhLEwhRJccAlCgQaO6dE+JEoOSXcBETWSWFsL0xetFIjhikdEVCAmFDjGdqi+ijNbP6LtV3N3a4dJlAKBMuaSc8G5EAIpIYoQZADDnVI+ryc3aYSjuGZonFJwRERQhCAAUgJIlJKAcsjPj6BluVApikoSShRAEPmRiAyLUUo5j2CksSRkLLlgUUyRGIyaoBSPleCUAiOEcsGlQJAKJMRUCV2lFdgXXc4jRVQQhZxzhcilUErEMQcpBEFJUCmQXIlQhH4kkOs6gZQSUBqUoa53CSm0rLEiI1C3ktobAwhUcjh7hwSBAEkmk4QQ3/cpAjNZcliwIoNWn1JqWJJSQylkyBg1GWMbDx7uH+5ZNi2WcgpixkABD8Lep59+kU6nGaXb29vJZDKbzSql2u22lJIx1u/3bdv2PE8Py+ZyubAfyBEp+LCfKaWUkEtnKyfHCuXMzLRS6uHDh4ByemaqXekhIh/q/gmlRzYJC+KIWWx/fz+ZSu0d7Nqu9fTp05WVlf297W63FQQDzsXUVOnCxTOBH20+3TJMWq2dKCUT6SSltNtruwkbKXEcZ3p6+vr16z/96XutVqPX73734nd93x8MBjpYIIS4rkuJQSkDgEajoSsKtm2PQcOxEIyxqampZDK5srKiyVpiwR9vPtHBOedxPp8HlNXjyrlz54qlfDAYKBCDgWdQYlrMMIxk0nUc6/CgZpi25ST7/X7EhR8GElAoFccCCAUAwhgq1E1UNam1OKxfIpkcMR1GeaAFTwGfCzsnQ9Nx2eaUwWgNU83YggIkAckVEI4KUA19rJIKQCmqzQYB9GzueIBDW5kkCJSgQD1NKEEBKBkGoZZIYhQJISbVjG0xGYrpAUihkV5aaMO0DTdhIyLnUcJJGhaTIAgQAVKBkCiBKMYFgwjjiHPORRQzStLJpJNIRH6AACKKYxEQgVIRooBzCNGTKlaIseAAEhklqAyGQkgCSnIuCRcERcxFHIuQc8Y1q6KGnSmiEIGo4UATQQV0dCul0qTjCoiiw5heKaEm/kmc8yjiccx1OZ4xSixHSskjFcogiiKpeBzHUvHDw4MgHhQKWSlTrmvH3Ds5OfKDQavVtm17EEWI+PLLLxcKhY8//OiTTz5ZXV0tFAp7e3tra2sn1eO5uTltpchBCCm4EgoEf5bPHBwc2K6VL+YYY4eVw4OD/WTKzeQy6XQ6DMPBYBBEoZSSMUNTJtdqNcMy//LH/+v5cxcrtZNOv7d6ZrXZae7t7SDIYiHXbLfa7ValchzHvF4/iaK41+tkMrlOpwVAvv+7379///65c+dq+5V0JqnBPQDged7+/v6DB/ey2TwhRArNo8mIOUyNQh4TLReNmEgk3GTCcRzXdZ/u7MzNzCilarWabdvpdLpUKmlITRiGT548VkrNzk2bJut0W1tPHuXzuUIx51imSgshVLfbFnHUaDQALSGBWk4QRZRSjZhRoAMZQgihBJFQEEQqopTy4yHdPZKRsqAa5nhCKzMoRYeGqTEbz6FGxynlqdrp2FtybYFAQCuHSVRcKSJMw0A9ykS4ZgDSuFXDGColD7dZeKYGqcc8KdUAHjFCTMYGQYKMIBAikRCQCpUyhkBzIblUCITBqOihCEMv9Kq1k6yMhgxGFA2DAiqiCEFgng+MAVEqigSPhcXANNF2DMMyQUglIn2XlGICQCEjZCAlF6CU5Eh1JkgoNVCaSkoKKIUQAiXnPBIy5gKHeFGpZwjp0OUN4+whTzdSQKGUlEPeSDWBYhNKcAljympEDMPQ9307mbBMo5CfGgwG/X4/9KMwDgDAMDjnfGFhznaY6TDLogp4t6d6/U4YBr/zO7/TaDSajcZgMNjY2BBCDHr9tbW1MAz1jFmz2RRC+L6vUeaMMUqVoDLmIAVXEglhpgnEYKZltZqd/qBjOua582fa7davf/3rly69HMVcIWgaC4mgZckyubxpmgpxEPjpdJoQ8s43v9FsNs+eXUdU/X5/Z2+3Vmvs7++GYdxsNcIgSqUSs7PTlUrFNM2FpXnDMFKpVI3v1uvBxsYD0zSz2Wwi6Uop8/m8lGBZFgLxvCAIAh5L23YYG85Yj1vPhmEAVQCQTqeTyWSn0xn4XqfTUUotLi5qnHc2mzVNo9vtHh8eSSmYwRYXF5aWFwiBZqNWq9U0L2CpVMrn81tPD3SoJoSghgkAgJQrSRGlUhIUKgSlKCGUGADgcT6emyNIYFSCGyL4R434ob1JOZJrPd2dHz+iRlNy+lepNO0ZQUQKqAVbpATLoEAkJwACpVIwhqQqAUozbymAZwYvhSDaw1DNNKN5rrlBkVAALWUmhh6ZgKSMooKYx0IKJpGAoUCikn4YCSE63dbu7vYgKHe73TAMqElNO4F6mgEo8wahbaNhGACUi1gKafQjAoNcJgtaf4LaRAEzTAIoiFC8K+UQ8IogKEWlh8gZihilkLGIpRRxGMVBGIahZHJsTqg0EkKpZ6ovEwEAgFKKMqoQpB7i0gLVCnSlWIs6EUDO+WAwcFJJalFmuQAwisSY41j5fDaTz1SrR6m0I6UIoyibTS2vLM7OTaXTyY8+3P6Lv/iLOIoWFxcRlZ4toJQ2Gg2/308mk59//vn1a1f29/dnZ2dBC+5M7AgAQ8lyg2DMuVLKsiypZLvd9n1fQ1IIIaZpJxIJYrAgCJq9VqvTzuTypm1/7fr1drt7/+GDVCbdbrcJIQuLc8eVQ8/v61Q+8LyIc8YY2LC2fiaTL3zyyScJN/Xnf/7njuM8fMi+8/bbX3755e3bt/VgRzqTOjo6Yoxls/lUKgUKa7VGFDU5F9rkZCTHA+ae52ksnoi5ZVm9TieKoitXrvgDb2Njo16vr6+vb25uJhKJdDpVq9WymdTa2srR8QEPw9u3b+cyaSQqk0kxZsRh0Ov1NDxFjlwZ0qGn4rGEEdGRQKREKYrmaDoWxuNIBHXYqUbsvSAlRZ16IygllRqDuV+0Q3iBZkYppWmvh05M5zmgtf+U0hSGUkoQIIfg1UhGL4a1iKgZ93BEezN+L4sZlFClJBccERkhehyeaVuNQQkuKdHESFobiRCIoqjVahmOEfFYaRJ8AogUNT1Tww0MEluGySwaM8GDMFJCgFIxWkgJVWBIVKAgirmI49hESwhBpFQEqaJEUJSoADSZhSIgpezLIJRhSENuSzUs2VAlUAghFScAkkhiAiqCiDFILlAxAwAVZ0P2MaUMBQwJGfI3QsxDI2lxk2ZS6ZNKZffexqtOaqY4MwgqMfcUDEwmHCeRdC2I/Ppep1mrYugDU0HkH1LpR8FJvdoddNdnX3rj6kthGFqWEYahi4QQIv1gZXbG9/1z587UasuWbTgW7XRajDEnXdIZVxAEIY8VoOmYhmlGUdw8qYdBnMlkTMceDAbKdNNJM4o7UspYhhgrEUnfD6mJ03NTnX57Zn768y9vrq6snT27qlR878Htc+fO3bn/pDi1sLVzlEjl+GGVGvbmxj3LMouFstf1ZCSDjmcqtjq7kMlktra29maevvXWa/v7+7nc2ffff991Enu7+7lcDoEc7B9ms1nP6wPw2blpKaVtk8qBX3ByKJWbzkZR1Ot2FhYWhBB60LnRaDze2Dg4OFhcXDxzZv3BgwdvvHI5DEPGWDHr7O3tdVv1TqMxMzNTyOdN09zf3+9141TKHnjQ6/cBIGW7hmmFvkg66d1KM5LScGxghkA1gnAKANDVQAWYNp1hECT0fAGICYV6ZFQhxiBRKUBAg47y8FEtlKBCIqUMeDzkaEXUJU/tXB0eAAAqBDFkk0dESUFJ1N4PJRJFgRJGGaWUD6ujihCkhOKQz1tSZgqphgzoQJQCRGKYTMSEK0REYAoQQ83ARYArSUChRSg1CUWpqzLIlOkKEfcDLE5lGLUdaszM5I6PD00CBBVIJSVnRI3GiHAYZGvahZCGQBlVILlgCoExRACCfFgAVqjhsXq70o4ClJIqHivLgtIDY6fKWZPpNbwAkNdFAiklyOHQPQISRNu2O50ORTI/O2cyFgXhYDDodDqFrG0yI47j6lG11Wim01mTMs/zas2aAhGLuD1ouUnHck0pIQ7iVqsVRZFpMsZYtVq1LGN9fX3g9S3L0FQOepoJAHTXvuedSClBEcMwtHQHYyYzjMHAY4xFJBoMBoPA17z3ngdCeolEwrZtpVQYxFLKRCKRzuYfP358clJ7urU9VZ4ulUr5fPGoUm23u4jqz//8z4Mg+M53vhOG4f7+fiaTTiaTURjt7Ow4jgMAmUxGTw8JIZ4+fbqwsJDP5/WU4BdffJHL5TRoZqzFa5qmZkPsdDr5fF6HCdq0MpmM5tGYn5/d2trKZNJhGL7zzju6ZmOYtNvt6t5MPp+/ePGi4ziJRKLVaiWTyX6/L4QwDOPp06d62LLf79vUVEBj+az3oBSAUoKPWmugBAqFIBEIKGoZ45EaHA3R6wx/vDYmazDP6WHqFHJysP2Zgq86ta5e9Jn6h1Nn+MpXTSafX+V8n6vHjuJ8hkoqMYyNpRAoRRzHxDEJMaTig8EgnXE5571BP45DiRaj1LQppa52/MMLopQCpVIpL/ANQgVlRAEKSQAtUAyJRD3aqIbOXLfxcWiBQiohRDysC3KlFBAE8WwsGl+oL0/eDngB/SClxFH2rpeRrq0VCgXJhWEYjUbDpMyyLIoQR4E3CEzGhGEMvL5jWVEURTwSsTw+rFRODhvtFjNZzQ2+9rU3TdPM5/Nf+9rXKpWjo6Ojp0+fLi7OE0Lq9bplWb3uIJNN6aKo53mGYTCGoBSPoijiQnSFkgSZ49iI2Ov1okDLmqsgiAgF3w/jWDDGTNtKJtJKqVarxZhx69YtZljZbP7mzc/jOHZdd2tru1Y5OHfu3Jkz5/b29m7duqWLlvV6PZvNtnZblNJCIaflPnU7u91ub25uuq57/vz5hYWF//gf/3J1dS6RSEgpXdcFgH6/r0ag0CiKHMNtt9uGYQCA41oas55IOHEcnz9/fmFhXncLhRD9QbdYLEZhjEAs087nCvNzC1EUxRH3Bv7hwREhJJvJFYvFzSdbiUQik85aps3DiAv0hYpCTVmNKJXUdJ2jf7dUoMFDkgCdaK/jaEJicq2f+lnh6U18WMWbYIWZNIZnrx1jw0eHHKNtEEZzR89Fs6fW3lcaIQ6fpvSQudJRMwiltMqT7n4rRYYam0HgOa4lJQz8vmnOJNJJLoJkJikUF0hjUEJJxiiFMRUfpcQweBQHUWQZkdQkcFygAg7KoFQpJRVyQD1nBEgkICjgursghRAi5oJzTZyjG7Ry8iZqKxrDYU8ZIQDoWFxKqYQkCpDptUd7vd5UseS6brfbtQxjdXVVKVWr1aKgL3kgeZxKJW3DNCiTXKCCfC4fC14qlWbmZir16vsf/NKwnRs3blT2W81m03XdXq/3H//jf7As67vf+07MI9/3DYNKCXEcm6aJQDOZDCKGnFuWOaRwRilEHARBGHPHcdxEghDieQMQyjCZkhCGoSYy1/zblBiIpN/vNdqtXLZAiTE1M21Zzu3bd13XLZSKqVTqpWsXFhcX6/X6zZufuq5brZ5wzpeXV3K5XBjE2Ww2nU53u/3BYJDNZovFot9ra97RTqdzfHy8uDilDe/o6Gh6erpQKBiGsbOzU6lUtHShpnzWk5PJlJtMJrWHZ4xNTZU/+uijdrvd6bZ++MMf+r7vuu7du3dc152bmwOA3d1d27ZbrdbMzIxhGLlcjjFWKpX29vbCMKzX6/V6fX5mMYy4F8ZBEPKR96OAxDSlnqlVAqQkIBEVUaMOxATv49gNvmiHv8UjfeWD4/KMmrA/OS60vhCCfeXJxyY9Xpww6T9RV1Oe1YqUUgpUHId6uAIkB0IZAjJGAPt8YLKEYVIE7rhmKu10Or5tm1JFiMAl5xFnupA8pMgkqBhDISTnoeBIiSb5UkLGsaKCAIBBUKtAoxadGkEKtPgv5zyWQg9c6Q80NjwYhR9f6QnH38VI5FEpBYCEENM0HdOKwlD/yfO8MJl0XVdzqDE47rTaMQ/TSVdYdrfTC/2IEmLbtvC9KOS9bn/Q8/xB0O13T45OEM1MJnNycuK69je+8Y16vb63t6er9gBSCNFs1h3H8X2/kC8FQcBVqOnPhDDthEtKRAgRhHG1WiUUDKCWZQgQqDV1UCmJmXQ2mUz6vl9vNDwvYIxZphOG8fe//7u9QX/76W4+V+ScV45P/u7f/bvZrCWEcF338uXLmUzmZz/7hZRydnbWNM1sLm0apmmaUvJEwikWi4h46+Yn2Ww2juNWq/XZZ5+9/vrrt2/fbjabcRzrMFVKWa1WdcGTc86U4bpuvpBljOmAQjN8P3782DDY/fv3F5fmV1dXLctqt9sbGxuri3NBECAlQRTWarWFhYWt7adra2u26+QK+W63G/H44uVL9Xr90aNHzXZrtrzo+0FvEAQR17V9RKTUoIRqIxwiREGhkkCGGrfjiFRXvMlIqfeUHSqlJt3Zi1v2i5HUUAVswgKHdDQTbnDMNqaeT4VOLcWvPLQPRNSjvajGqqAIBNRIcEwAGNqxJSyWSdjZXJrzMGkbQcIZdJQEnk6nhIijKJKSM30j2LCUq6cYKackFpwqxgiVBLiUsYypIkopm1Ht2obTgRp5M2QclAJ0p0oB6h3omd7VJFsMvBCaT+5Do/uCDIfcaq7tpFMpPWmacFxKqe/7QohyudxvVYLA8we9hJ1GRCUFYySdzj68/8B2HS1wmS/m3nrrbSEEEMwVih9//LFhGMlMcmtnmxk0mUzuHx0uLi4iEiF4z/MTSPq+V56ZBkqIZemxa87RMaxEIgEAnh9IyYWScSwy2RTzWRBEQsSJhGMwWwjVbnd93+92+57nWaaTzBiUklqt4QfB9vaOUkAIZcwIw+j48GRpaWV5cbHX6dTr9QvnzqwsrymlqtUTxUWjXY3DkBGyvLhYKBQoqjiO9/f3NfeEBqweHx+7rouIvu+3Wi3DMIrFolJKY9+ogY6bUEpFUSSECENfCNFoNFZWlqenp7/7vW/rucqbN2/Ozs5OT5cvXry4sbFxcnKiuQX0Z9/e3kbEVCplGEaz2eScN5tNx3GWlpY6/X670xv4MScUkVFKpUICugg3ZtYTQxEuAAXPEDB6sZGRAsxXrn5EeM7njEK+cX6oU0o1noGaOIEcmxZqYMyzQaoxKHRc/Jw0wvEqVS+EwRpPrWsiSikgCkEiISajqKQAKcWwrA9EKiESFiYoLk2VOQ/KuUw2YaUdFovY8/pcxpHBAkqY4oJQpusxSkiiaW0IiQVnUhBGBUUpUSklQCpQko/kYxUhisCowzMckFdSIShyOsqf9OnjD3bK4+vjGRGGJj0ekeFls9lapRoEgSYRlFI2m01E7DUroe8pJYWMURLDMKjFXNd97bU3ylNTfW9wcHyQyqSn56b9MKhWq9Vq9cyZMysrK5XqcbfX0fNNb7/9dqVSKRbzQohMJhOGISEsCCLfD/PFnBBCy8ozgDhmnMtur1ssFaonNc4jx7GQ0jBsGAZNpVJhAJqs2rbdZCJNiQapqHK5/OEHH6WzmXq93un0CCGFYvHosPIP/uvvPXr06Cc/+dtWq5NIJBhjCkSpNLW9vZ1IJNrtdrvdTCRSmkV7MBg4jqMT1zAM19bWdnd3c7mclFKPIwVBMDc3l8/nNRY0lUo1TxrJZDIIAsdxEglHr7mTk+r58+e+uPXZ7u6ulPLatWtB4BkGTSRy29u79Xqz1WpxLl966aW9vYPV1fWDg4N8Pm8Y1uzs7ObmZqVysrHxeHp6ut/3+u2o2x/ECu1ExrAMU0rOhZRcCiLgWb1EgKSAEsAwhkp4MAr8yGiQYtIRjfdoTa82GR/pHV9nuXKC83d4ktEz5cgf6tB0bIE6QRq/1+T7jteknNDoPuUeNP+FVEJKhagIaOJ3hQSI0MMHEpFQVBQpEGVEPvp+OZlQyio6iUQpv1DORlGwtb1JDMYYAaRMfzxUoIQcf1SdCxlSMgCFQyQaaDj5KO7XDNNqJIY+Cut1QqrUBK/2ZAj6lY7+xahgeH/lSOxOSkTUG3O/39cI42az+fjxY4c1TWradooAiQOp6Xx83//yyztLy8tA8LByHIvIuu/EIvJ9f2554f6Dewpkr9cbDAaFQmFra+uzzz6bm5srFEqUGrOz80+fPiWENJvNTqdTni5JKTnnYegLpQzDCIKg0+kkEgnPG/R6/UwmAyg1gXQqlapWDoVQmUwul8tFcSyEQsR8qVgslI6PKp1O5/iktjg3ryPGfD7/8ccfr62tcc41ZuXLL+/85V/+5Y0bNxBxaqocBEG73fa8frfbjmNRrVYdx5mfn7dtW88EahP6kz/5E7036fTPNM10Or2wsFAul282f51MJoPAy+ezU1NThJAwDI6PjzYePWi1Wrlcbm5u5uzZ9ZdffunRo0c7OzuXL166cu1as9m0LKs0NfWXP/7xH/zBH+wfHi4sLdWbTTeZ9MPQcpx8sTgzN/dkayvoq74XUMOw3LTFjEjEhEshuVIUFMhngSFq1D+ZoJ2ftMYXw8JxxDR2euMXjgOr8XnGT9NObXyucXlGDhEycGr9jU8++f0rF+TwDSgOC01KKAVIFAClSGTMFQwLGZSCQRmjlFFqBT0a+ElABYQFfirpJm3XR1goFFOpVL5YzBXyzGQGSBXL+NmUECihJGGUSxGLoS6v1MpVBMUQ1QcKlVBC89cZ1NAyQ2PwK4x2qTAMNMxlTETNRjP1pzY/bWymYegXMsYUF7pfIiw7CALGWBiGhVxeKVWpVHTfGVmQKKQSdiL0QwnCMAzBYTAYJJNJy7LmFubPXbzQ6bUr9Vq313YTiUIpf9G40Gw3opCvrq0RQoIwNE0zm8sxw8jmcpzzMIrCiB8dHSmltra2lpaWNDtg7fg4lUppcSjd5u50uoyx7qAvuGKMHR0d5XI5x3Fu3bqlgammab/88st7hwfz8/Obm5ux4EnH3Ts8KGRzuXy+2+1+42tvSCkf3n+Qy+Webm51262pUrl6XHnzza/dvHmz1+3EYTQ3N5fP5hBR8rjf7dRqtWQyWSwWC4WCdo+zs7OtVuvkpAYAtVrt8PDw8uXL+/v7zWZzZWUpnU5vb29r4M7GxkPHcV5//fVur23bdrvdHAwG77333vT0tG2b8/Ozu7u7hUJBA1Dv3Lmzurp6eHjIGPvlL39548aNzc1N7Yf1uKNpmiHjEhQjJJlMGo7VHQyiwEfDAqJ1FIfsLABACCWEcB6ON2K9u+kKzTieHKc2oxXyjKl6aAUjLnN9Er2c1KigYE5KHbKhsLbQmB4lY8GFbvNRggQVQBzHmtVBozJ0GVCfajJMG+8IABq7+Ez0WykhJDimRZXk2olqaCqAAignk8Vc0avWssX0oFrvVY8VUYRCp9v27Vb3pH7i6hbFRGMAEWEiVdP3YrwJAUAshh+eTmwPoAC0HAs+M0KtPCh4NDknppTiL8g+njrGD46dp5Sy3++blElmBEHQ7/cNOmwnmrYDQPwo7HZ6/b5HwKCESQmpdFohHFaOJYhUNrO4NH9cYQ8ebex+tGua5szMjOM4A68XRVEunzl/7uJ7772nlAqCwLKsfD4fxzwMo9nZ2e2dxzs7O77vSwnnzp3jnLtustVpM8a63V61WjUMQ8/Laf/T6wa+P7h27YplOSe1WiqVOjo62NvbY4x1Op3eoI+ocunM9PR0Kp3O5XJPnz7NZDKOa62sLh0eHk5NTW1vb6fT2R//+C8RaTabLZUs3ZozTTOO47Nnz56cnOhKxvz8PCHkV7/6lW3bP/zhD//Df/gPnufdvn377bffbjabFy5caDQat27d+r3f+735+fmDg/16vaYj3jDyCSFRFLRaLULI9HT50qULT548effdd7/zre/HsVCK/+3f/uT69euLi8uffvop5zyKol/96sNr1661Wh0p5RtvvPXBBx8YhpHJJk3LMUwrlU4wK8EVUMsTinb6fVBICRUAOKSSHaoyTq6u8RobpyHjHG/8tMnlNz5OvWq86Ws6DAnPXJkcucSxb4SJ7sUYJ6BPKCfULOD5MHXiykeQN5QwbHaAklyqcayIksRc24vf7ymoxGGjwoSIQh4qEEjBdR2khBiMPVMjmVBoGQ4R6TMKPeSnUD1ru+unCS06P7oR4/muYYgPwyvWbzH+AOMRaT00Of54k592+OCERI4uvJqJJCIGQeB5XiaV1ucMY8GCUErZHXj9vmdS03UThmml02lmmhGPu91+q9+2XZsrmc1np2YLx8fHSolEIqs33Vqt9knrk2vXrsVxPD09PT09s7+/L6XMZrNCiNdff9M0Te0NkBqtVmt3d7fZas/NzcWCx7FApBqaYBjW9HTq+OhOqTRFKTVN1ut3hIwvX768f3TIOWcGDcMgCMJUyoiioNuTVtOYLk7/u3/37374wx8+fvy4VC406q1arUYIyWazr7zyCiHs6dOnOslMJBKBH1YqlV6vt7q6ure3FwTBW2+9VSqVPM978uTJzMzM1tbT1dUVfbWPHj06Pj5++eWX4zjiPC6Xy1EUOY5Tq1cNY35j40GhULh06ZLjWJ1OZ3t7u9lsGobhBf7Tp0/7/f73vve9hYWFf/kv/6WUcmVlJZ/Pc865FLbrlMvljz/+uFgu7e7uEoiEkhxUq9M0LD+MZBgM+kGExCTAFQIhRElUChVIPVMytsBTRjWZhk16ocnvL+7dk1atzW/MKayUUs8HlS+2LtTIbZ4ywnHYDM8fQxFCAKDaLCUgEqWpuBVolI1SHBGFBJC2aVAlY9+LQ+A8kpKjoRhjzW5PgoZGaxaA8dfEgYg6FEClKCLTGk6EEEZ1PKDrJVqM3jAMRqkeb3wmWCO0fvdzNRj8DVpzkzd3sg8LEzID2tQ9zxuSrykVx3HMVawAqWVajuW4yJhQGAtRrdX2Dvbbva6TsIUS2wc7zXatPFu0E66dcPOlYnmmXJouLa+uLiwtWa5zcHwElGzv7X1x+0tmmZWT+mdffBnG4t7dB81Ge3dnP5vJVyrVYrGUyWTffPNrrVYnDONMOpdJ5xgzlVJSQjKZvnjxolLi4sXztVp1MOjlcrm/fe9dpcSjRw/b7SYXUSLhZjJp0zQIwSgKH9y9C0K8/sqrv/e7P3jr9Tc2H2/MzcwsLSysLC3kM1mv1/X7A5Rq0O15vT4BVS6XlVJTU1O5XO6TTz7Z3d1dXFyklH7xxRfnzp2bmZm+fPlyu93OZDL9fr/f76+sLH/55Ze7u7vpdDoIgkazVq1Wq9XjqampXq/XaNQ453qoP4yCZCqRTqevXbu2sLCwvb398ccf37hx45/9s3/26NGjR48eaSkFwzB+9rOfTU1N6XBUobRt23EtxqhhUDdhJl3boIDAAQVBhagQFRAgFBV5NkQ76eLGv45Tu8mN+5TVvWiHX/mgUkqOSjIS1Fda4IsvP+WBX3xQ52uKDLWHQNdTRpaJRKECxQXnXPBIcO6Hnhf4/cGg3+/7vh9FkYwlCBnHMY9iHnEZxWycmD5ngUoZlFIkQ93tkUKbAhUCII4/7bArTYDoiUSlNwPts4etwmcfY3IG7Ld/VP1C/TydOTi2rZSKoogD6gY0pTSKIsclgNSyXWY4zPSCQRBFkR8G7V631+vZrrW4tlyem0qXM7GIFMqjo4NSqXT9+vU4jh8/fsyoeebMmWKxvLe3t7OzUy6X0+l05fik1+tNTU0dHx8nEknGjCAIT05q29vbYRC3Wi2pqFIYBjGjNueSEGbbbuCHnXb3pFZdXll+uPGAMnL9+rVKpcp5FIQegMzlMrlCNplMAwDn3LCMKA5+57vfHAwG/9P/9H/9gz/4g2r1pFwuf+tb39ra2vY878GDBxpAp3Owk5OT2dnZl199rdVqdbtdDVe4devW6upqKpX6+te/ns1mL168WK1WX3311W63++qrr968efPhw4eVSuXq1auIeHi0Pzc3p2n8X3755Vqt2u22wzBMphJKKc3e/2jjSbVa/Z3f+R0p5c2bN4+Pjzvt3o2XXgGA2knDYNY3v/nN7333d/7Fv/gXf/RHf8Q5/9M/+wvbti3LMkzDtE0kVEgZxGbPjxA4IAUAJIgKFCGEYORF8FyKNTx0G3OyoADDwd/nGlovms2p3sZw7WmdUKXUhGOctMCxzx1XMSayUJh0g6feV4JC1GzVz10YSoGEEiSCgE7DJFBUwKlCjexUSohYKo6xIv5wbSMiAmUSQZ2yfl0XIVQTglIClKBWMJVK4uiTK6VQKkEBFQBRRM+FaaiMeBbWa7nsyQ/82+/pZAygE1SlFOdcgzxQAUUCAHEcE0Icx2l26iFXsUTXdCzblQq5kkqK5ZWVmIe9Qa/dbZkJY2Fplhqs3W4tLy8jYqVy1Gi0jo6OTNMcDAZhGO7u7jqOW61WNx4+LpfLu7u7tVpteXn1zNpKv+/Ztru3d1AqToVhnM8XgyBwnWSz3YrjjpTSdt1UMl2v1yuVE6XE0dGB4zjb20ez8zMvzVz/27/92zfeeD2K4lqt1un2pZQHBweEkHOL55XCL7/8Us/pPnjwoFarr66uPn78+MmTrenp6Ww2u5Zd29jY8H3/zJk1zwsMw/jkk0/CMPz88887nc6rr766s7ODqIdg4M6dO7qo67ru/fv3HcfR25brusmUi0TNzc2VSiVK8enTTc0ymkwmDw4O9H04f/58IpH44P2by8vLvu9nMplvf/vbBwcHn3/+udYDTSaTV69e/fzzz3u93j/5J//kww8/DIJAKREEXhj6hBmG71FKvTAOQ58RVIRJkJqJHgklFJEimTC2SV832bQY/wkm5hjG1nLK3U3+ipqzdIRYBt0h1M8ZmZ18Hs42aXjjtx6Xdk6tRqWUBKEpEIdXq7mShk+SOhHTrwSpCKAwKTEoMSmlhEkqJVeSA0AUhwQoAhClmPbaI2zZkDVRCYlIpZIUUQkJSCghUilQQCYUP8c2o/eaYf9FKqUlowARUVI6uaPgC63CF7+f/tgjVSpBqMkMRlkcx/1+nzGWSqWqTT8II2/gu27SNkzJVcwlF4oxli/mcrJwUj/2Q88LvFyykCvm0m7m4cOHW1tbSql0OhuGYeX4SbvdPjw8brc7uVxOcHVwcCClXF8/q6PuZDK5ML9Ur9dNx261WoSwQd+jlBFCbdvu9zwpwbZdQlgcB5ls4sKFCw8ePEink2EY1mqbC4tzrVZramqqVqsxRhgzCYFCoXDjxg3f9xMk+uyzzwghn3/++fz8/DvvvPOf/tN/ImQIZC+Xy/fv3xdCvPTSS/1+PwzjT359M5PJcM47nc7s7KxSam5u7vHjx7rDWSgU3n777SdPnly6dMl13StXrlQPDxNJ5+DgwDCM5eXlWq3mOFY2m7VtEwBs287mMtlchnNuWZbneUEULi4v7ezs6Lgxl8tlclnGmCYpD6Lw4OhwfX395uefuclE5aRaLBUGfW/gexAJLiUhRAJathHFQqDej4lmTyAEKGPMNMcWOF5y6oXyzOQaGNvk5AuHFjWRWOrXak4mNS40nPZjz2/6XxWUjs37RSMHAAXjEsazqgwAcM4ZstGAMpdINOlTs983DGpajDFKUAFIPbyOgERJUASVGDYJxDODenbgyKehVCA1Rb3USSCllGl6rSEGcOS+pQINHRzJzY1F5ya3txfd4Fc6xsm/djqdfr8fhmEURb1er9FoaAEJN5VExN7Aq9fr1WrtpF47qdfqzcYHH334+Ze3Or32zPzcysqSbdtRFCAqKWUqlZqdnV1aWtLdtoODg2QyKaXM5/MahmLbNgDxfZ8xdv/ewzCIEbFUKnU6HUSqaQVbrZZpmrMz8/l8XodSjJnJZLpSqVQqFe1ker2Onk6o108AwLLM+YW5a9eura6uFot5w6CVylGn0/n617++sbHxwx/+EBGbzWYymVxdXb148eLu7m6tXi0Wi6lUamdn51e/+tXu7vb58+ebzebU1NTa2prulywuLq6srGxsbBwcHPi+3+v1Dg8PLcuKoqjZbO7t7ywuLtq2/fTpUyllr9dJp9NvvfWWlFKL/i4uLmoqxGazGUXRtWvXHj16hIjb29ubm5s//OEPdXr59OnTSqXy/vvva2EcXRZ+4403MpkMM5kQPAxDPxj4vs85Z0z/xxWgUjDKl7T+5BjeGMdRFIVhGARBEATy+UM935Z48RhXccZeC34zGelXL6pRxngq/nrxh8lfXzRLbdrDduXzua5SAkymGOGAoRBBzP0w6Id+3/O4FLEUkeCxkMymBudccK6egV+RmEaslGb4QBnHSGzLkBLCMFaxP747hFJCEVDEikuUwEBRNXkHAcAQz+BpMOEPx7cbJrAIiCgkR4YoFZexjuwpoFRRIulQmwZy4McqwVxUYMcsibbXEUEgTNOMlWh7bde1E4lEPOhGKjYs48mTJ5mT1PnzZw3DCCM/DMOZ2ZVmo7u+vl6pVMIgRqBnzpy5devW1NSURqjk8knAeGY2XywWc7lc9fiwPJ358MMP9/cOX3rlZUpZo94ybbNcLinEhw8fUmok0qlOe+A6yZOTWtBH1yycnFQ6DS9fyLz26svbO1t37ny5Z9FsNjtTSGdT5pnF6ffee69VOajVaiDS5XIxm17kkflf/70/+vjjD4rFYr1e29i4T4h68OAeALEt9969hzPTc/VaY25u4dqVyx988MH8N75RP6lm06mdp1tn19cSjl0oFOon1YO93Rs3brRarU6r2Ww233n769lsNplMzs/OJBNuu9n68IP333777f29vVKpaDCadBN/9Vf/ayaTefTwQaFQuHDx2o2Xrjx48ODv/uiHtVrt//F//79ZltXvtdbX1w8Odvr9fiaT+fLW/ptvvnn50rlf/OIX8aA/l0/P5jI3f30LgU3PLpbKM082d6amptt9r+cHiGgaVHAZiyAOQwPpcysflCKodIRFkIMiqCdkiZJSSGmBBCWEiJWUusmhzdh1Xc0nQ4Y+UIESinNAXajQ4ocjrkAc6q2hUkNaaQCt/kkQlZRIiA7pYcipARp6CQDyuYKlIhyJ5iZVFCUSJTVxBDVYpFSoFJiOZFY0MoG5uK2UORQ+UlRKoutFQUyHjlQR9uIeA8/3EqSUOgETQoRhyMgQdjR2gJNbxW9ycS9+nzTC8QamtPa30uwWMMpch7NWpmnapouIFInv+/v7/Xa7baOpQZK+P8jlcoi00WgwgxSLRcZYOp1OJNxeb2CarN/v375z6wc/nL5z545pmk+fPg2C4M0339RYMMMwFhcX7969q0d46/U6Iu7s7IT+IAiiQqEwP7dYrdfq9cbc7EKxXP71p5+5yWQcx5zLM7PnKDEGg8HOzm4ul6vVahpLoMmplpaWSqVCPp+7ffv2wcHBzZs3y+WpK1eubW5uIuL6mTOVylEun3306FGxlB8MButnlj/44INer5tMJnO5/Orq+p3b9wBgd2+bUfPu3buGYVy9enV7e/vs2bMbGxv9fj+dTj969CibzepPsby8XKlUzp8/3+l0CoXCxsaGUuqb3/zmn/3Z/1IulxljzWbz+vXr1Wrl7t27lNJMJnflyqVCoRDH8dbWlg527t27Z1nWt771rVqt1u/3Hcf53d/93VarpVPoSqXy13/915zz6WLh448/Nk17dXXVYPaDjU0u4Oq1yzv7x6DLLZQKRKEkgCTEGOsNvuh/vvIYgiKV0msDR7yDp+Bm4zPIySI8PkOujQc1EBEmIjIppSbBUBNwGTlRySHquVKqhptP4nX0NZjGs/M/l7gqPUfCQLNFSQVKKoWUapsngMAmDeZUsD52WZMoWzGUCnzWToHny1zwvENXLzyifzjVkx1f9yj6/4pggDGmiw2SiyAIBr1Ou90uZEoLCwsS214Y5Sjxo7DTH8zPz9Zq1bnFhWvXrlqW8Ytf/mxjY2N+fr5QmvrTP/3TTqejad5v377d6/WOj48Hg0G3202lUmEYFgoFvQqnpqZardbc3EKlUuGcX7l8bWFhAQDjOK5Wq91ud2pmxrbcdrsbBAGPvTiOGaNra2vVanVubu74+HB1dfXOnTvFUl6IeGdnWzcAq9WTpaVl23ajKBr0/bt3bwdBUCjmX331VR1nci6FEOfPn9/f32+1WicnJ2EYplLJdruDiIuLi0dHR1NTU7OzszpPS6fTH3zwgeu6UkqNQygUCr1eL5vNRlF0++6dUqn0+PHjge8hZe1uR0r55ptvbm9vOY6TzmYQ0XVdpKxyUj04OLhw/qpuAuVyOb271et1TUizv79///5913VfffVVPZUPABcuXEilUo1G68njbduKzp5bz+aKBwcHQgBoqRWKCMi5Ekqh4AoovGA8z+zheYtSE90LeH4A6tSSm1hvEysKQBdG1W/OgHR/W02IW0yWiPRBX3ihmoiTx4985fUMWd50tUgB6tEjbZCjy2OT55WjyS4cVWxhDB4f9QYHcXzKnE6Z/viejj3eixc3ebyQLo7pZ8a3FWDEpazTibECrmEYvX4/jKJkKhMLPvCDMPQJpaZt+WF0+869k1rt+kvXCsVp09rlQp05e6ZR76VSmZ/85GdKqXw+L4S6ceOVTqfz/vvvV6s13w8tK6CU9vve1tb2yUn96uXLiUTi1he3Dw8P3/7GNwihW5vbJqHlcrlQKBweHBsGPTk5AQDfC7UQfCqVIgQWFxd3d3c3Nzc3t2SxmM/n89///vefPNlUCsIw3tzcLORLCTcVeCSby8zOTi8uLr7/q59HUSBk/Morr6yvr//4xz8OguDBgwe5bEEIUSwWNAHy7OzsnTt3vvvd7yYSienpaULIX/3VX/X7ng7q6vX65uamnsbqdrsEmWMnms3mzZs3v/3tb29sbHQ6rSiKpqZmFhfnnz59Wq/Xa7WaZVmpVObq1WK3MwCAZrN58eJFpZTOD7WAx+HhYT6fTyaTzWaz0WjMzMwUi8VardZotKSAbDarUbupNC8W8/VmN5ZCciEAFKEAmmRTmuw5rOKpZTPOdMZP0FTOkwtJP2EsAwzPV93hq4zhK+1k0gGMf3j2oEabvGDpGu817pCPsQdj2t9RZ214fjb6IoAKlVA4HPDi2o6Ifs5wExpbnd4ShBBDwPgEu5GcoCHAiSLyqf3gOU/4G27KZA49aYSaoQilAgCCY8eI+gPjCNNgmqZB0bKs0JPNZtt1XdOwm80moZBKJaqV2ptvfC2bS1cqR416k3NeqzWq1dr8/GIqlXrppZeEEAcHB+vr6x9//LHmz/3BD35Qq9VgNFWsFSBefvnlzz777OqV6+vr6/1+/6OPPmo22pTSkpsoFoue5+3sPj2zfk4Auq7baGwLrvr9/vz8/MbGg5dffun/9T//29dee6Xba7/zzts///nPDcOcm5uLY+G6rmMnXDcZBEFD9DmPZmZm7t2/c/Xq1Tt3vkylUoVC7uDgYGpqilJ6eHgMANVKLZ8vFosFXSy5d+/erVu3zp07Rynd29u7cuWKHmLK5/PdbvfBgwfLy8vVarVSqWTTmVar9d3vfL9YLO7vHbbb7W9969ubm48//fRTjYY/f/68RsN1u93Dw0PTcPSM0t7enpaIOTk5oZRWq9U4jsvlsmVZ6XRaA3empqbu37svpVxdWc/ni/Vao/pkM5VKlaZmB37ox5HkoQCiKANAJTiXihFj0ga+cp1M/jze/cdQfr0etBGOPRKMYyiYXJajX/Grl+I4xD3lCSbOdvqQExeMiFrQdtJXP+9RlEkoI9QgI/MhIKUCpCPnotf5RAg6WVNxHEerz47xrPrQd2TS6sbH5B2c9IST8IgXb+5kADzeDsY3YgzNIXq613G0gK5pmvofk85m+t6g1qh3ev0gCgk1FJBqrSaUsp3E9s7ehx/9mhrmN7/13StXX1KAX//612u12q1bt/7sz/7M9/1Lly4RQjKZzPHx8e3bt3d3d3V0qgd55+bmFheWEXF9ff3ixUue55mmqQX9giDo9XqpVGpubs4w6GAwUEoBylar1e22S6XSyclJKpWyLOvq1av5fLHf77/77rudTi8MQ9dJvv32O9lsFgA5jwBgd3f7s88+syzrtddec103CAK9RyDi/Py8aZqFYo4QyOUyL7/8cqfTeeWVV05OTnZ3dzOZjBaEymaza2trZ86c0ZIy2k6azeba2roQsl5vcM63t7fzuWKz2fzlL3+1tnbGMIxGo7W3d3B0dNTtdmemZ+dm5xcWFhBxdnZ2b2/v+Pi4VCodHw+zOwB49OiRhgecO3euVqvt7u7Ozs6+8fpbKysrJycnnj+4fv3q6ury1uYTQiUjoNkBFQiQHAAYQfVVx3jRv7hOxj5AE3xMgkX/i44X7fxFy3nRGvGFr7FmBk7AueI4PmXh44/Dhl/IFBpAGICBaCA4Bht/Md2PmrQrbRLjeeexkxy+8cTN0vHhqTsIL2xyk1c2dqGT/nPcGH1WLAVERErJsBdCIIoiOaLGQAKEEFRCKRVxbrtur9fzPM+yDWoayOjUzNztu3cLpVJ5ZvbguPL+Bx8xxpLJJDWsmXLfdd1yuZzNZm/evGnbtu/7nudls1kNwjRNM5PJCCFM02y326Zp3b17T0q5sLBAkAkVN5vt3d3dleU1LsXq6ioXUa/Xq9Vqi4uLcRzXq82Dg4OZmanNrce6BWKY9MmTJ9/73u98+OGH9Xq9WqlFobx69drq6hqP1Te/8e2NjQ0/GDx+/PiXv/zlxYvnw8jP5Qqu687Pz3/00UeImMsVNIupkPHZMxfff/993Vd4+PDh3/k7f8dxnI8++ujoqFoqlXRLXZdegiBwXffWl3fOnF1rNBrbu7vNdjvi/O79e7NzCzdeflkIsbTsD7xeIplutNqLyytCwe72juM4rVarUCgAQLPZTCQS+Xw+iiLDMObm5nZ2drRKlGEYlmWls/njkyoI6fuDXK6QTCVs28zlU34QIBGubVEpg1hyyRllhmEE8VdLfL74oH5EN4S0M8DRJBSMREVfLOw9t/YAlJI4JM6dOO0Ly1WdjkVPH2R8VvLc/PEp//niIyPrHY24j/i/2bAYiwAw7OZNOmV9oiiKoijSRF3jCtWkacnRyMmk7zp1nDLLU99PeT/9LuPLOAVP1VA1fUlKKS25nkwm2+22TlSklJpVxTTt69evFwqlarVaO6kzaliWk83mE4lUpXLyk5/8NIpiRPKDH/xwdnYum81dvnzl+LiSSCTn5ubjmHue3+32Go0mpaxeb3S7XcZYv+8BkLNnzyYSCcMwfvSjH+Xzec/zhBCPHj3y/QGAvHz5crlcnpoqLS0t1ev1lZWVZrN59uzZTrs3GAz29/c9z9vbPbAsp9vtPXm8NT+3KCUsLs2Hkf/aa6/atlUqlW7fvp3LFtbX1zmXlA7p1bLZdLNZZ4w0m/VHjx5p73ThwgXHccrlsl6pc3PTFy5cuHLlyhtvvPH666+fnJzoIZ1Go1GvNU+q9adbO/Nzi8VikVJ27tz5RqO1tbVtGIZtudev3Ui4qXQqe3h4eHJy8s4775imefbsWT1xn0ql9Kz95uZmuVxeXl7Wqvdnz54lhOzs7Ekp55cW19fX0+lkHIQxj2zb9LwuKuXYLGFbjCBIwRBcyz71r//KZTO5oPUY93j/1UBlc3SM/dL4tF/Zb/zKWEwfQ8D3ZIo4+pmoZ184+hpbhLbb8fqc9OfPfQSJ4y+QGsk++SVRSWaapu/7ajTnPx6cxVH2NeYg1NOAMLIQGHk2bTnPgDzPQ++EeDa4NLYuHE1djKec4NmMswAYQsBjKaSUQBkwMjMz47puIuHGcez1B0EQMAKU0kwms7+/77jWxUvn9e44GPT+5m/+5uWXX3769KlSSvfNs7lMEASlUulw58nW1lapVELEw8ND/T8GgA8++KBYLJbL5b29PQ0Z09JL8zPz3W5fi6483txcXz9bKpW2trZSyUy5XE6nU7du3fpv/tv/9vbt241GLY5DnSvq0cdz587NzMycO3fu019/XDk+mZmesyynWj15tLFhGu7Pf/7LVCrzs5/99Fvf+ub9+/f10Mo//sf/eGNj4xc/f//MmTONRmN9fX15eTmKwlQqyRgtFPL9nqeFEwuFwvT09L/+1/+6VCqFYfjtb3/76tWrpmnu7OxwzqenpweDAec8m82bpu374blz5xBVoVBAoIVCARFN06pWK2trZ/7Nv/njH/3o93d3d/O54ltvvKlXtsb3HR0d2bady+WazWY+n1dK5XK5Xq8npczn87pO+9lnn+3s7OXz2Watvrq6+uTJo0KhBEpkc8lYkUazA0oggSgMCepxgGfH2CrIV024A4DeWOM4HnfFJt3AZGFmHEaN0xwBI5AnwTgWw/MrpZ5TCFZKKQGSjOqd+i10FqfhKEopzrmIYyEEY0yHh8P0bCRPgqPGnhztA8MPJUH7OTkxi6yxCmN7GcJfTkENcKI6qibKxFJK9lUjHpPbzH/mPqeeP8aP654kQ/0uEoa1Ncjn845lE/Ksk6GUjON44IUntcra2oplGZVKZWpqampqtdVqbWw80DrP8/PzlUrl448+sSzr1VdfzWbylNJGvTWw/bnZBV3Hf3B/QyNUHMdZXzu7uLh47949wzDeevPtJ48egSJrq2ds237waKNcLqdSqV/+6v1ioXzn3t2rV6/atrWzu/3Sjevdbvfk5CRUMaX4ta99Lebh3/zNu3fv3p2ZmWk22oyxIIja7a5SYNvu/Pxit9u1TDtbSObzWc/r/+Ef/uHNmze1AMbly5c554lE4o033vj0009937vx8vXt7e3dve1SYVGb0MnJybVr13K53K1bt2CUY+uuhud5W1tbOsw+c/bq5tOdfLEc8nh9fZ0QEu7u2G7y3XffNQx648YNapjf+u53ugNPSvnw8ZP1tZUHDx7k8/lsNqttT0fpxWKx3W7rjSmVSsVxPDMzg4g7Wwf5fP7b3/52pXKUz6YZY3MzU/VmM5dPu7Y5CCPDoGnimpbwgphSRuG5mGu8GH7LOhkvPzI6xvHaGPY5PmccBuPtHhFBzwSLUywyvy2rVEppV0MAKRK94GE4y6DEb5hAOGU+zz4doUgZUkaI0iT/qKQatk+GA5Zs8l5IKfWIkM70JvFockSTTC3rK2/ZOFA+ZY2TofbIeJ79dbKyNLxmQhhjhpYnlYIx5tqOY5saKsBRaX4RwzB4FARBADJKJSx/0NvfHXDOk+7S/Ow0gZf29/efPn363t/8TalUKpVKxXy+VCqKOOr3+6urq+12W5dVNA5zfn7+5s2b58+fz+Vyh4eHug0gpXz48GEwCHQvpFQqzfa6iUSi3++7rru4uLiytlqr1XK53Pz8fL1en5qaqp58rGKyuDjf7/ePjg/OnDkDAA8fPkwmk41Go9/3qtWTTDoHALOzs4LLc+cu9P2D48rht7/zzQ8++GBra2t6ejqfz6+urk5PT3t+/8MPfzU9PRVG/k9/+tMw9PU0vRBicXFRx8nz8/OffvppoVDIZDKWZWkumTiOe70eYyyKoq2tp3qJbj/dXVs9I6Sam1vY3Hw6MzMTxzEi1fLAukV54fzFo6Oj/f39tbW1hw8fdjodwzAuXbokhDg6OiqXy8lkcmlpSatWcc6DIFg/e6byQfWLL744ONjLpBILC3OWZXARmaYlZRz6nuAhY44BjEDEowCo+1sM4DdZxbgwcSp70vHXZPc8iEJ8VnQApUBKKZQEeAYsUc/X9odvgUBG2kPjtUq1nUulkAgcmc5EW04+G8Ianhafb9dJQKlQAVGgEToEQJCh5x9eBhundkOfO3K4zyEMJmb8XizIjt8PXnCD6oUcVz0fb0we41CWDJG4ilKqyaRTSdfzPIpEh6AmM6SUmn/FMoyz6+uVSqXTap07d67X6fz8pz/N5/MEYGVpKZVImKYphOh3u65t8yiuVCpLS0tBEBweHvZ6vXa7nUwmE4mEZsUFgLm5Oa2sVK/Xu91u0knrUqdp2jMzc73eYGtr69rVl1rtxrVz1wmB3d3dpaWFv/7rv06lEufOnSHSiONYVw7X19dM06zX60EQJBIpzwsdOxEEkWFYJ9Xa3t7BwsJSp3/cbLa++c1vXrx4MZPOPX78+Oiocu/eg1/84hf1ev2b33pHiOjjTz58++23Egnn888/55x/8cUXCwsLpVLp4cOHUkqtRm7bdrfbdV1XZ0qFQqHZbBJC9vb2Ll26tLu7pwmCK5XKpUuXfn3zk9XVVcdxoigol6ebzSYAefDgASHk0427c3NzQRAopXZ3dy9fvryxsaGVNmq12oMHD2ZnZx8+fPj9738/juPl5WXHzhYKDzudzo0bN0BGs7Mz9+/f9/v9QtkRUiqQBCSAVFzwKJZSCniW9r8Yjr544ETlHF844Pn4EwA0nGO8BE/5i7EbHL+vtr1T61MpUEJqL6qE1COykxY4sTuM3+u0Yesj4AqFJuAf5pmAyBUwJMPWIoFnQbZ+5TDQff5Ek8Y2tsYXTevU41/5tFPH+MHxD5pURtdgdBZqmqZt247jaCIQz/P6/b4u0Pm+X57KWzaN4oAyTKZcypAZZHZuOoz8bq+dL2QvXb6wtr6iGV++9rWv5fNFIVSj0QrD2LKcIIhqtUa/77322hvpdDaORbFYTqeznMswjKenZ5PJZCqV0ngUSmkYhkqpZDJpGMbNmzeXl5ejKKpWq/Pz8w8ePBhDxtyEvbKy8vjx4yiKLl26JIRqNBq+75fLZdM0C/ni4eFxFPFWq+N53ve//733338/l8sVi8WpqSlE/Pf//t8rhf/oH/2jdrudy+VWV1d/8YufdXvtVrvh+z4hRLfjpZSa996yLMbYzs4OY0ynT4VCodVqcc6npmc9P2y0mmfOnbUcJ4iiIAovXb5anppZWV3N5Yul8jRSEvH40ZPNh48eLyws5HK5vb29jY0NAIii6NGjR9vb267rnjt37urVq6+++uq5c+cIIb/+9a9/8pOfvPvuu4OBXyqV5ufn0+l0r9fjIpqaLuTz2WIxXy7k8/lswnUoAYpK9zn+i45TIejE6n+OPWycECYSCcuyJlf1ZEB3alkO/Qo++1VNNOqEECLmIuZxHCsu9JA6PB+OTlZ9XjQWpVSsVCRlJGWsMAaIAWIFHNCPIz/mARd+HA8TQv1dC2jhaD5t8spwVFA59fnHzzm1sb14/KY/jT/P6ANoVCtoPiXHcXR5Wn/aMAzDMGSEOo6TSqUcxzEYebr1hKBaXJiJAt80zXNn1l3b+vt/77+6efPm4eHxg3v3TdPMpJMn1eOPPvwV57zf7wNAuVyenp7W1q5r+rlc7vj4+NNPPzVNc2VlJZVKdbtd27Bbrdbq6qqUst3rJpPJGzduVE9OVldX792/PxgMrl+//vDhw0KhQCn95JNPrl58SYMwdaw7NTUlpcxkMnt7e3HMwzDsdvsGs5XCs2fPz88tommvri1fODz/2We/tm334sWLnEs9S6Vjvw8/fH/gddbW1iqVytRUOQzDa9eu3b17lzF25syZ27dvdzqdpaWlsevzfV9XTXSTybCsZrM5VZ7xvXBzc3NhYaFeb6ytrepNwfcHu7u7nudVqkda62J/f7/b7RaLRdd1L1y4sLOzQyl1XVeTcGvU6NzcnBBienradV1GUlPTpUTCeXD/Do+8J5sbQsSU0pn5OUBDIUFq9vyo2/MAQE/eTPqx3742xstj8vt4Bb7YV6OUGsYzODQhiECklEoNSRO1J1QTWdJz16AAAKieldeoaRkLQDJUiFXk+e6alBJHKDmNByCEjONSbTgcUCAROOTdp1rpGRA1EgglIn22M403DP0e+qInyQj0E+REr+KU/ZyyzNGHe26E99RrJ1+uf9VhMENiWVYi4TqOg4hRFPX7fQIoRzVVy7JSCYcxdrC9oThfWFiYmZkdDAaDQa/fbfd6vaODg3q9XigU2s16IpO6du3Kndv32o1md9DX6RMhpN/v60omIaTZbPZ6vWazmUqlCCG9Xk/nfsCRxyKdylSqx0opz/NKpdJxpXJ8fJxKpZ48eTI1NdVut4WIL148/2d/9mfn1i5+7Wtfu/Xl5wCwtLSUyWQ++OCDMAxnZ+fiOD6pNqWUBwcH+Xz5ymWn3+9fubH8F3/xFz/43d978mQznc5KAfv7+8vLy4lE4l/9q3917vyZP/zDP3zvJ3/9y1/+fHllYXd3N2HzYrE4Oztbq9XOnDkTBEEikXBdV0fUUkrHcXQSqzPVdqejlEqn03t7e6Zprq+vSykbjeadO3cMw8hkMvfu3UPEoWnxMJVK5XI5SulgMLh//363233jjTeklBqFI6Ws1+uZTGZ7e1srGTbr/rXrVwyDVo4Pzp9dTafTntdNJpODbk8h9cNYSDAINShhBAlD+I3m9tWHnJizgVEFcrxyyAjUNV70WqJchy3EYKCQc87HpdEJOxxb7+kFqxQqoBS1aJLGUOKzOtBzNR6p5IuLf/KcQgGXigxh61Iq1KB0ZtDhmD6oZznh+NBp4fgqJ9uAOCJO/crj1O7yWxzjZCBx6i4MadIp0/5ZKRUEgS+57/uu7WiSC4pkvE3oFdnpdJ4+3dIlmHQ6c/bs2XfffXdz87FlXZmZmbly5cqFCxe2t7cNk85a8zMzM61WS8d1k9ZYKBSy2ezc3JxSynXdw8NDSqnNHC2yeXh4OLsw/+DBg8FgkEqn/WCQTCYXFhY++eSTXLGQyaRN0zxz5sze3p6ueUxPTx8c7CPi9PR0s9nc3d1LJtKtVqtQKJ5UG8lk0jTNRqMRx5lcLieEWF1dff/9Dy5euCyl3Nvb63a78/OzhJA//dM/PXtu9dvf+ea/+3f//vz55UEX9/b2Xn/99YODg263a5rmG2+8kU6nB4MBpbTX62mfHARBPp8/PDx0nIwQsW4wZLPZyvHJzOyUUuratWtCCN/3AeD4+FgjgZLJZKO6c/HixV6vl0gkbt26NT8/H8fxu+++WygUVlZWjo+Pdb6wurraarVeffXVW59vrK+vP3nyaHV11XWdTCbTatVeffXV+/fvS2BRLNFwKXUsy7JtGykTg/+ynPDUHg0TIFIcUdpOeDmIo1Abp27fgcLxEMZvWrdf8Y4KCCFKyJEVAn3GUPwcEdszp/obCq4xl5RLJFLz0BAplFJCxGGsADRtKZA46pqGUDII/R4jYJsmSpQxyBhRsnymOF2aTdgJRggBCDxvMpaQE5AimLDYF2ue8LzrG0cU8ELB1zRchqYShAi0wHTRMAQQXywWpmgYBe1O2rbnp8vFbFozRKLJ0GSl2ekLVy+jyRLZtBcHH938JAbxO7//g53Dnb3jvYPqwa8++dXm7maz1/zy9t16o7W5tX3/wUat3tx49EQBqVRr2Vzhzt37SNj9BxupdDbmkjKzUCwvr68EPPzi7peLqytuIlkslW/duUOZ1en4Cu39w3o2PwNg+z5dXr7suuVCPt/tdF575VUd7208eSxAOalkfmYqUEKY9KTbVo4ZMezwAJP28WHv0cO9Rxs7J9Xm4uKi7bBLl9cTSWY7mM44c3MzjFkEEj977/Nz6y/Vq6o4taSII9HeP6pVa+3Np7tcqtnZWd8fGCZaJpyc7B0ebnW7FX9QTyVBmBgz9Hg0CHyCiinJ+x4LedzpfvnRhwlGTg73Svn07Gzx7LmVRufkf/jf/I9WIjkIg67fNR1ycPyUWfKP/ru//0f/3d/vdRsH+7ulQjlhpB7d3dq8f9A4GlRre//n/8v/6enWxtWrl3/x/ocDjwtpv/ezjxsd7seGmSpZifxAQDsITwa9p0dHJkhDCUMJJjmTnCrBQDKQqLj+UsClivV3qeIYZAwyUkJ/xSAFAUEADMpR6QcFAUmRowol1/QWAlQkeBiGcRwjom1aJjMMpFQhSoWxIFwSLqlQTFGmdMZKlEKtdxQjDoTwQAWMhAYNLDYwSI9ClyiqJJGCKmkgmAQZpQSRIEqpxxuBS+QShSISqEI2sJNNNE4E1pXRRKuh7CY4HZquS7cu3RpP1HiCTSa1zw9bgW3b2WzWsiylRH8AnHPdwDiV48oJYYBTlga/oSOjXmhdwAuec2zP4z9ZlpVOF+YXF6QU+/v7YRzl89nLly/rJDYMw62tLc0qrcsht27d+qf/9J/W6/Uf//jH09PTL7/8MiKeWSNHR0e1Wm1qaspxnGKxWK1Ws9msUurVV1/d29t75ZVXfvKTn5w7d+7hw4cvv/zyL37+829+85uXstl33303lUpduHDx0qVLd+/dnp6a7XRae3t7qVSmenKSyWS+/PKLbDbtMJrL5bRchC5dLi8vd3s9ANREt1IBoEJE0zSTyaROIOM43tzcPHtuHQDOnDmzu7t7//79a9eucc7PnDnz8MFGoVCoVCrZbE5H5lLK69ev1+v1RqPR6bRmpsprayumxUAKyzYBIIqCQa8vZNw6juM4FlLpd9cgeCFEFITvvPNOPp///ne/92/+5H/5P/7v//nV61cR8Ve/+nBvb+fSpQuZbGp5YZEy3N3dvn/33vT0bCGff+utt9KpfLPWXFlZ2ds7uHXr1oXLF37v937v6GD/6OjozTffBICbN28uLC0KIYIgEApiSToDPwzDdDKVzxU73fA/0yP99mPSHY0fmdzNdXN/zP8yLmoMR1Wfr+hMvvwrUqqJdXsqdpsM6L7yhZHgiEjU6drSJLkhG+PRxkaoa6kanqNpiXU55CsvaNzTP5Vwj211nAScMjn5VUi3UzasxvP7hnF0dDQzXTZNc29vjzE6MzNju04chz9576fr6+ual/7C+YtBENy9cy+fz6dTmVKx/HRre3t7+7vf+d7Ozs4Hv/pwfX393t3Hq6ur3/jGN3Qb8OLFi6lUan19/csvv+z1eouLi9Vq9R/8g3/w2WefXb9+/fDwMJtNP368kc5mbMfMZrP37t31wyCdzuwf7Nqum8lkGGPZXNr3gi+//NK27aRl7u3tZbPpo6OjarWyuLK8u7u7tLzc6/UopQQUNUwA1E1ZAOh2u5ZlWZbl+/7+/r6UPAwDRNTRMiJms9l2uz09Pb2/v6+USiYTe3u7u7u7b775+v1790ql0t7eTqPRuHjxvB9wPwhMyygWi4wRg7I4jveaNSEECmnbpk4XpZRRGN145eU//uN/owd//w//u//tF5991m639/Z3pVALi/MLC0uua5+oI0Q4f/5iPltAxHarG4YRo3atVgsDXiqVlpeXj+p7m5ubUkrGOvqC8/l8s9k+f+ESECoBu4Mwijrtdjvi0rJdJM5vWuL/XxwvLv1xjDa0llEbf4yChOez0skh4MkFPF6HL3qXU+/1m4xWn4oaxvgCYMLTTK7/8TTU0AjHl6jrbHpSrtNpScUdx5m05smkEV6gMxz/iZLTO9aLHwZeMD8yum5KqUEpYUYy6Rby2TgOB4NBIuG6rouUVKvNV199tVwub25uptPplZUV7asXFhZ+/vOff+Mb31BKxXF8+/btTCbzox/96P3333/ttde63e6jR4+63W4ul7ty5Uqj0fjTP/3TpaWldrstpZyfn6eU6uwuDMN8Put5XrNZDwKv0235weCkVqvX6zMzM2EYLiwsvPfee4VSsVqtaVrOs8ur77zzTqfX/s53vnPnzu04js+cWTNMO4oiEcUAYJkMkIFS/sDrW53ATS0sLCQSiStXrrTajWw2fXh4UC6XhRC7u7v5fKFSqayvr6dSGQDodLqDwQARG41at9u1LCOfz29vbwFAs9m0HRMAeCw6nQ4iUkqTyWQqFTAkgkd67oRLLuI49AfvvvvuK6+8tre384tfvD87O/vWG28KJQkAEDY3N/fe3/5seWXJYjTm/jvvvNPr9A3DMJldOT558uTJ/s7++XMXUqnM/v5+rX0Sx4elciGRSMwOG6omYca9e/csx83k8nYiU8jmJKAfxoyZvQGH/38cL+aK8HytYZgrqmfSDCNjeL4iODH6BJMR3MjAXrTDF4+vtMahLSADRBjqdg85tAFAPA9QIafeXh+6DaU7clJKnVjrf+SYvuk3vXbypkzGrpM/fGXqOP60Y1zsuEmYz+c1IGt9fV1XBfb29gAgjuNPPvlEKZVOp//6r//6888/L5VK/X7/H/7Df/jrX//6z//8z8+ePcsY63a7U1NT3/rWtxYXF3UhsVwuSyn/+I//+MmTJ7lcDhFzuZzWCfvFL37R7/cfPXo0Pz9PKJSnismkm8vlwtBfXV2eX5jN5TKFQi6Xz5anStMzU9lsNpNJeV6/1+vU6/VPPvkklczoYk8unfnoo48OD/YAgDFmGQYhBJRUggsehZFfrVaPjo7u3r27v79v23a5XC4Wi4ZhrK+v625kv99PJBKaTK3dbm9tbeXz2Ww2u729rZTq97u1Wq3X61UqFcMwEomUlHJ7e/vevXudTs80zWI+m0g6OGIuD8MQCBq2pQemHj58+Ps/+OHS0ko2nalVT1658XIymb5w4dI3v/ntt978Wqk01Wr2OJcXL1xZWV7d3zu8f/++ZVnLy8uGYSgl5udnL125XK2dzM0urK6uPnz4qN3uXrx4eW9vz7UdKaU3CHQ7d9DrN2v14+PD32pZ/wXHi0vu1AJTEx2/UwUIMnFMwjbVuD34nEjmc0CU33Qxp2LAsc1zgTGH8VcUK/1dSDL+YmM2NCE0Y4VSapgWju3Btm3TYoSQMAzHVaxJKC28MA/2lRsVTDhAOTG5rx8Z/zBErhnGeOaQAnY6nSiKbNuM45jz2PM8REylUp1OB0aVaz1wqAljPvrooxs3bnQ6nTt37gwGg7W1tV//+teXL1++/fhhtVrVyz2VSt29e1drSDiO0+/3X3nllY2Njampqd3dXT1SqJTsdNrMNC5cOLe/v28YtFgsahZ6J5FCVH/v7/3Bk82n6+trX375peu6x3tVQumnn3565eqlmZmZIAiuXLnS6/XSqYSfz/V6vTCMfS8kjmOajBFaLucZo8VisVarLizO1Wq1TCaTSqXS6XQmk6lWq4Swg/0jRKpRAaZp53I5ANjb33ntlVc1E/b9+/eXlxejKGo3WwDSdRKO42i5mGLRDf2g2+4IyQnBZDKZTqYQZNtoV4+OX331dWaxZDL55MkT23b/7b/9f77y+tc+/eTm1HQpCKLFxSXbtr+8dUdJaZpseXnZ98NarZZyU6++9koYhre/vNMJBm+99bUgin76k59PT0/nsumnT5+eO3uh3W4PPK/T9RJBSA3bNM1SqeQ4Tq0d/Bcb3Fcd4/UzubTURINuHJeOVuZYHez5EVZAnFBHnDzGS3rSE04iV+EFz3nqQESlkWfaGU5c53PhqG7pwnAbEFKCUkgIjoY1dDNAjmnnTdMcv8Gkyx6DEiaNcLjrP98knPw+PtX4B0IoHYnj6LvDOVcKUqlUsZATIu71eoRgKpVCSnzfO7N+NpfLPX78eHt71zRtzuX+/uGNGzfm5xd3d/eiKJqfn5+bm0PEzc3NL7+883RrL5FIZLPZIAgIIcvLyycnJ4i4tLRECHn48KHWkCoUCoPBQJMapdPpYrnUbreZQROJhOXYvV4nm806iUSn0zIM4/GTjQsXLpw9t57P57kvd3d3FxYWHj16lMtl5+dnj6r+0tJSrVabKpfDIGg2WjyWtmEalEgedTodPZWjuyD1+onj2I8fP37y5Ekcx61We3FxOQxix0msrKycnNSIaUVx0GjWhBC2bV6/fv3zLz472j+IosgbBK1WK5VKZTJpPWVfrZ4QACUFKOladrFYzBeLFLHXaZuWNbMwbxhGMpn84tZtyvDi6ophGEopy7JmpucMg0khHMc1TWduZqZer3f8ZiKRWFlZQakePXr45MmWEKI4vxALhSCpYbTb7cXFxYXFZakwlUp3et2IAxqmH4Rerx/EkWe7QBP/v9je+JAThNnjR04trbEdSikBhgRlOKEdNo7G4HlfMml+MGGHAEDoc4IZpyz2xXdHYpw6J2gsJ04Y4Rj8qi9VSkCgAKB9kZ7eD4KACeI4zrh/OPl+k1c/6fr1g5pd68Xj1LWOf9Dx8ThCkFJy3eNErFarhkFX1lbn5+eUUt1+L47DDz/8UHMNEUJu3boVx7Ee+avVaqZp6r7Z+fPnf/rTn66srHiex2PQ0XW32w2C4NKlS/v7+ysrK7pDvbOzk8/n9cWsrq7atr22dqNSqUzPzrRanTNnztTrdT8IPM8HgJmZmUQioUkxwjDMZrP7+/vlcjmTyZgmk4rHcby3t5fNZm2ToVSJhFMoFASXYRhbJlOSD3qhbchisaBRL+lMslDIPX261e12k8mk53m1Wo0xc3FheTDwi8WilFJy3uv1PK+fy+UajcaZM2fW19cNQvf39+fm5rSsfL3e9DxP19l6rXav3eFhZNtmNp9xE3a9Xt8+2Hv77bf7ne7jx49TmfRrb7yue8Wm5czOL5oWe7q9mclkirlsFPJCodBstsIwHAz84+Nqt9vVknUA8pVXbqCbabdavV7HsRNB6B0dHW9tbbZarU6nC0gz+UIxn7YdHoQx+tS2nX70n2Vj/x+P8dp7MQQ75ZrGbmfkCUcrcyieeRp8MunT1Aup4FfWRH5TmIqIoVQwhJU+F9NOIvgYIcTzvCiKCKGccymBUSaEFiIdyneYpknokHVGf8fnDzVBkXjKSjXGZfyS8XNM09R4cUTUBq+U0nNWzLKEEP1+nwI6lsGF8Lo9QqBYLAoRHxwchGGwsLCg0cmpVEoLOGslCaXU7OysnrRYX19/8ODBgwcPdK1F45tff/31vb29vb29dDqt1Y7+36z9V7AkWZoeiB3l2sM9tLj65k2dpUVXy6kW0z0zmCUGoC3Apa0ZZ7kGGtb4vEvjE1/JB7yAfADNCBJY0NpADBc7GEwPMHpaVFd1VXdVV6WoyryZV8vQruURfDj3Rkbem1XTDdCtLCtuhIeHe8T5/Vff/32O42iatre3J0fRMcYSdVmr1Vqt1kcffYQxxgpRFK1SqaRp2mg2CVEODg4kVf5kMpHU19J1F0WhqiohqNlqr6wse97k6OioP0Ddbrcsy+WFXhrFEwQELaej4fXrN7M8khUUXVcRQpqmrq6uTqfTer2+v79fq9W++93vnp4Mut2FSqVy69ath08e27bZaNTW19ffeeed23dura+vO5b92WcPRqPRjevX4zgsy3IymdCSFzl99PBTXdebjdrXvvpVx3WTNIUIbVy9WnB2PB5OkygTLMjT3d39ZrNZ73bzIh6OglqtBqFgjHmet7K6dOfW7X6/f+ocS9gahFAOGWq68slnjzzP46wMw7BiGghAhLDrVhcWFoMwPu4PipKVjE+nnqabeZ4CaM0WKIQQn9fz+PlgHhVndT6ZKF0uy8ttFr7Nm9AssATnGYosziOE5BQFnxMXku/FWLlgvfK9EigyM7AZfrUontHhnGE5ZR8BPOtUZse5bNJ8bhLyGUCtmMOYz+jEhQSji6d3lHnv/J+/yY+Y2XBZUgwhApBAwDknhBiaZqqa49i+76+vr46nE8kjdnRy3Gq1To5Lw7AqFbfXW9Q0LY7jPC9939/c3BwOx3JOvNtd+NM//XPP8958800ggjzPpceo1WrtdlvKm5ZlGQSBEAIhJNlydV1njF25chUhVHEdKbV9fHSyvLqyuLjY7S6wkxNKabPZrrhOq9XinOd5fnrobW0/BoDrhvoXf/EXGxvr7U5THlzBxDRN17HdSoUxTss8Cn2AhK7rnHPf9xvN2sHBwZMnjyuVShiGb7zxxo9+9OOiKK5evQoA+qu/+qsoivM8v3nzhm3brVbrXVb+8R//seu6tmFKzhvp/23buX7dWV+/4jhOf+AnSRInicwSBYITb5pl2dif3nn11Ve+9CUhxOPHj70kspibsnKwt7m1tZWmebPZvLK6Vq3WPvroow/ZLzY2NnwvCMMwCAICgbwvk1M0nnpRFGEIsrRAAnh+mMShEGI4HBJFkwBDzIWu6wjjLMugYc8WK4RQXB4VeFYS74uRNJe3iyUGMYvRnoad8w/mSxtiDh8Gzxtvs1M9C80uAGXmPuuC2zz7FAjALBA9w9ZAeRx564EAPDNPeOEi5/2seFbI+8L1XDih+S/oaSg898YL39QMdsQ5N00TQ0iLsijLJEniOMYQYgCyLJNTFI7jMEYnk0kURYyVt27d2t/ff/DggZxk73Q6vu8Ph8NGo7G0tCQJiP7pP/2ntm2//fbb9+/fZ1TIafetra2dnZ2Dg4Pl5eWrV69+8MEHvV5PUjPFcSxTNdu2t7YeyHYIoxxBWRDGEOLBYBBFiarqiqIM+qO93QOpEb+xseG4dpZlN25cG42GnPPj4+MiyxcXexhDXdcd26q6lSwrhBBlnt24c8uyrFqtJqnya7WaBFL3+/1vfvObf//v//3RaOJUqh999DGEcGNj4+6n9z3POzg4SNN0ZWVFOoqTk5OlpaWFhYWjo6O1tbW7d+/euHHjvffeE0IstDocAsrZldUV1TIKynXbNCp2EEeKqh8eHnqBr1n6a1/5MoRwa2vrcPshgEA3UJxM//wvPr2yfvXo6KTTahdFkaeZnGnSFVKW5WQ62t/fh5Ue5wBiWDLqhVlZlkkcYozr9bpl2zhKiiyPkiRLUt1ArKTEfGqB8OmA3lOMKEZ4Zg8zM7i8PbfPfHm7sLw552cZ2fMqmTOvw8+nZ+ed3gXruPzGC0sdPC+rvOAJ5UZm38j80Wfl3HPEC4Twojk99/MuHOfyns+1z/naFEIInPvGLMviOFYw1jCxLGNpceXJk83l1RVVNQ4ODhYWFh4+/HRxYcEwjFqtFgSB5GKwbfvmzZuDwWAwGKytrb311lue50nZsKIoIMBJkty/f1+mi2tra1mW3b179+rVq+PxWCIwW62WEKIoiv39/V5vgXOua4bS0LtdRVV1RPBoOKnYrlWpSfmKvf2Dk5MTQghCOAiChd7SyemRhI8rilKU2UsvvUAIYZRqKtE0rV51OAOU8qpTURRla2vrtddeK8vytH/81ltvZln6l3/5l9evX8cY37hxI0k+efz4MSHktddeOzo6RhhoulJvLIeRT2lxdHSAEMAAGoZTFFTXzNFwoqlGmuSKommaBqnQNK0UXFGUJEnCNIGahi3DS6KNhYXbneYnd+/9+Mc/Pjw8tCzLsqyuCXu9HsbKZBx88N57mqbs7x/bpjWdTvI0H4/H4/HY1HRCUBQHeZ6TRCnL0jL1sqQInBGOSAxDrVZDiIwmXp7njDEExXysePbg0vqR5Amz4sLnGeEXOA/wjCeYfcLMxp6prEomh1kVcMYqKF3ObGh4Zp/oeRYl0zTwrNWdfeoZH6l8aW79CwbOWaDIvOVcvs4zjbdzpOnsOufd8bzhzYfFZ7udv2vey4NnXeK8Hfq+TxBCACoYywl6TdNs3VhbW1MVLKPWPM89z2t3O91u9+S4jxCCALtOLYqiMIgDP1JVtdFo6Vr85PF2p937ype/9uDBg3/5L/6Vruv379//2te+Jif3fv7zn3uel+e5pJp2XVf26Hzfz/O82WwuLS0F/jjwgzQryrJ0nGoQRIqqcg5s2y5ZGQZRpVLpdDq1WmM0Gu3v7xcpsyxreXkZQlGvVuI4rNdcSUiRxBFC2K1YhqZpmlakpWmaaZpKNc80TSV+TRaiNU2TCsHdbvff/k9/+Lu/+7+Qx3/ppZem06kUeFnsLchwenByypjY2dl57ZVX33333TfffPPg4OC1V1/VNO360npW5MeDfqtZLwColPkoDIfeJCny/8f/+C8++uTj0XgCCe52u9evbrz55pu3LF9VdM8LojBVCVpeXt16stuotW3LGY9GxyeIUsoIU1XiVKqoCrjRyLJEVRSaF3mWJElGCHIBCMOwYruSpFRAmGclVpU0zeeNRC4jcY4gma/+P9dj/OpGOGchswDtqRz3fC4373bAnIu+YBczI7wccF5Y/5f8zVNqtmdf4s8Y4YVrkKf0bMQIwbNgnwuObv7GcNlhXth/fof5T5eXTQWFEKqKqitEahLKud6yLHe2nywtLem6PpmMAQD3799/443XmvXWZDIZjUb1er3dbkMIT09PkyT58MMPV1dXGWPvv//+4eGhZIw3DEOGrJ7nLS4uvvjii91u9+TkZDqdGoaRZdnp6Wmj0eh0OlJGoixLjBXHqVbrNc65ZVZURbcqtqKoSZLkZcmY8LxA1bVuZ2FpcWVxYfn48IQQ8vrrr1cq1mcP7g4GQNeUzz777M7tWwghS9fVBlGJYpp2HMcK0WLGGo267FIsLS1tb29//PEvFxYWqtWqYRij0bgsmZQBjeN4dXU1pzljbHFx8eTkBCFkWVaj0ag57vb2ruu6YRjWarXT01PTNPf3969cuTIdDpGq5ll2cnTsZXGOoJ/GkyT59NHmz+9+nJel3aojhLws/WznSXtlcblTtFqt3kJzPPJqdfvRo89sq1qvVzudnm1ZEMKVlRVT04sik1FDobdCP+CcJklURCUNSwyFrmrNVl2CrvI0lWJehmkWRRFfWNyXVshs4aA57unL2+flihcynVlO+FxnMx9tzm8SYQuetUl5evjpeV5c4fNWMNtHiAu293TlP/24p+f3bKJ7vuvFZy4ccXau8833+X3QXKg9u/LLXnT2vGmaCsYqUVSMIIRSQKtUy93d3Sj0Fxa60+k0DMNWqzX1PYTQhx9+KE2UMSYVMw3D2NjYkL3+9fV1Xdffe++90WhUrVYxxrVabTAYfPbZZ/1+X9M0ORk8nU7zPJdxlIRxyuJNo9GIIwIAqNcaEpJ/dHQcp0me565Tk0xbnhdEcdzpdBYXFxFCuq7HcfzZZ581m/XDw8NGo1arOoqCMcaGqlmWxSjFiJimiQE0DIMnmaap/X4/z3NVI8PhsCiKer3+xhtvSNF5RdH+0T/6R3lenpycMMaBwj1vsr+/a1lWp92R+hz3Dg51XW80GlLBdzKZvPHGm48fbXba3fh02F7oOZYtiWdU16k4DtNUP4kAQablCATTggIAwjQZTidDHDSa7sLiGkKo223/hz/5s29/+3v1etVxHMAhhFBRFBUTqbmNMd6bZEmWyuEyTdMwhAqGtm23W92iKDzPi6KEcoEVYnJTURQsnh9eAjZTin+6QAEA5bnswoXtgg3Mthl88uxfMbNJNHfUi8eZDSGAZ7PQy0t3/tYw840zw7vsJwV/DvgGnLFuyx4+eD7dwOyDz1NBIQSfN5sLbvC5j794E8/mkPIxQmiuKPuU2hQAkGWZ1Ljc3t1RFPLSSy+9+vprURRI/XrGmOM4Evgix8CHw6HjOMfHx5PJRAKgZWlnf3/fsqw4jnd2diQc/MqVK/J2GEVRURSS4VvTtDAMi6JYX1sLgkDq/pUlHY1G/eFgNJxomlYwLnF8aZYdHBy9++7PptPp22+/vbK0vLW1hTHsdrsbG+vHRwerq6tJHDFS6rqexHFRFApK5aoFSTadTqXWYlEUq6urGxtXDg4O3n333c8++8wwzN/6rd+ZTqf7+4ebm5tLS8ubOw8BAJ1OZ2lpZTqeHB4e/vZv/3an2XrnnXfDMOy02kdHR6+99ppUtknTtNPpWIZ5kB2VBZR0VVGZH03Gx6enOS0rFTtJUwGh67pMiKP+qbFcOz4+KgvGGLx567pbrVSr1dPT09XVjdCPJENkgXAURUKISqXS00zdUzllRZ4mYZSkcZnnUIBarSbp3hBCpq5xIMqyTNMUW/aFdTC/HjjnApx5p3lzeu76ee7z8/BmaYTnK/ai/cHzouDMhcBnyzBCUhg+K5OIVMznZoh/lXU+/+CpEc5hrQlEDEIBoayccggBwgAhPLsDibNRYikezqFA880TCKDgZ3VUxsCMqRsAAIXAUPINX2AiPbs3yCuE51g+eRBTVRSsAEaDuMAYI9wouRLlotlZHftR4MWdzqpC8KA/zZJiMhm9euuaFEK6d+/e2mL7ypWrg9HwF+//1LAsADkmsN1t6abmxf76+kYURZXmchRFBTTDqV/SwND0NNuqWHYYeLwsrqytMz0DhglNff94U1XV7bt7jHPN1qGOBpOjadCvtbQXX9ugNOp224phT2N25BUP90eHB+PQ0D/2Dh6l48Od/esnfVDQ5fanL968rhtOo95u1Jw/+P/+fzauXL/9wmu+nx8eDUZhXnG1ovAcxwVAJGnAGKO0LGm++fihaentdvODD96L42R1ddXzR0QBb73y5Xffffev/+PffO1rX1tcXGSOePDxZ4ZhfPMbv/mzn/0sjuhCb/3e3c0XXnjh5q1X9vb2kKs9PHo8CYZra2uL9SZSTNNs3v20/8f//hc/+vmH/6f/y/9ZrSuNpv3w4ce9Tv10MhmewKsbNxE3LMPByNhYf2l/d7S8tE6palh1t8a4KBSFVEGe8mgcjcbTMwknQytME6iqNp0mUEWD6TCOU4EgxkgIXhZFliQYoSbKfd/XDUvR1IkXKIqqmZafxDrGDMJSACY4g4goaiF4URRVeO6XhFx/QIqoMM6FlLEFApwPCkEIgaCcPxNznZcYz9pgMoxlrGQMYIwxIjOPBM+A1tJYJLSFIygABAid2WdSzPUtz5NBcN7qkO+VL8mTgORchercp8r/YYzA2ZS+OCP2nXOvQPqgz7PsGSgWzqXU/NKE7vnRBL6kIzf/6lzofPYgTdOyYAAAwABCiFIqmSa2trbWVpYBAK1Wa2lxYWvrcVEUb7311ns/+qs0zSGEDMDxeKqbJ7Va7c23vvKnf/qndhAWZalpmh/FeZ5LtqWFpRsSylwUBYCiLMvxeDwZjS1Tp3k2GAwEK23TarVaAgJN07Ks+OUnHxFdfeVLLwdB8JWvfOXll68ppGA8UzUSpQWaJiEItGFsOhXFUaN4gLFi2laz3bJV3VKU3YP9z+5/dPvGxtpq7+2338ZIPT09dd1O1aktLa+leZ9SurK6dHx8VJwmhmHs7e1CCI+Pj2/fvv3yyy+//7OfD4fD733ve3fuvPjRRx8dHBx885vfLIoCIbS3t5ckme/7Jycn/+v/6r+u1+vD4QgAsLKyAiE8ONh3HCcryvF0ktMSYmBVTKxaJ9OIw/Lf/MH3d0cTiBjCOEziJEujKFpe6RGixnEchsnGFUfGF+NRIPmRdV1XFGU4GuV5lqSBEMAwDDWmqqpKnvwojGXPWla2JWk6hFBisBRFkQUw3/ctRm3oQiQEYAIwVVXiPOcICybkxCoXgnNelJTjc7qjp0b4nO1CKDh7EpznRzPm7NlME4QQYyxX+uX1OctF5x3j7NVfK/q7fHA4ByoAM8pDxhiEZ10Uzrn4vGu9ZDxgzoTmP/LCPs89lQvPz79F5h4SQi0Jj998801WFluPH2EMFxd6N27cODza/+u//uuX7tzgnGdpQZk4PjjojyfXrl3r9LpxmmQlZYwRTT0rPFI6HA7D+GxUEgqmEEVVVU0hBGHL1IGh27ZtaMrCwsLaympRFJSV/iS1bWvojUxTv3Pnzne+851Wx84yryhDiFG4fzTyp35SKJYFtcwbBy7mKcu5ggtIia7Vm4121TYJfPcnf/Vk67MvfelLluUcH41cN4vC0nXq2/ubvV5vd3fXMHRGxUcffdTptA8ODr797W/X63WZsrbbbUqpZVWuXbv20c/vjkajOI6vXLmysrJyejqQrI2MMamvWK83X331VUldRQj+bPdxWZRX1tfcei0p07JIh9NJtWH+wR/9a8WpqSZOioQIUK1WqWCMsSTOYiPpdpYQQoEf7e7ucobRKqKU2rad5+5gcOr7fpZHElAYx7FlWfM2INHF4/GYMQEhlBQ+CCFZfDKwIgTXDMO2TaSQOC8YKwnGqkIEQogAwAXCClYVDoRSMp7O4dwE5BBIsXg4K6UCB215mAABAABJREFUAKAA4pmexOXU6UJ2d2EFznsO+YDP6688b60+jfieV4mcP7pM5y5YFD+PB4GcJzxvTYLZSV6GqM82eGm2av5cL1z2F5zchXfNDkgwQRBDCME5dhSpqorU4+PjimV+9RtfR4L/0R/9Eef0lVdfwgqp1lqnp6cZY4ZdqVRrRFHCKBl/9khR9ThLi7xEWV7QkhDCudA0fTQamaZpmjqGkHNOCK5WqzXXMQ1DUKYouOq4Gxsb6+vraZwMh0PE0At3bvz0/en+/t5v/Rffwxg/ePiZYytE4UEa+2mqmFYyip/s7wYFvnL7BoiOB6dDVtICCrddc6uuW3O7Nfs7xnf/3f/0B59++qnr1peXNpr19uPNXyz21izLWlxcFIBVqy5jpedPFheXiqKwbEPX9U8+vgchfOWVV/K8/OCDD770pS9Vq26WpRjjoijSNL179+Nms/29731P1cje7gnnvNVqyN5XtVrd29s7PBkuLnRXr15hZer7U4FRkI4Vw4iSISKMKHo0ndqO1e22R8PTLMsYU5Mka7U6gkMIcOBHjUZbUkhJqFq9Xs+LhE6KsT9N09SwaoqipGkqqfoAAIqiyJ1loxIhlGdlHMfT6VQIsdBqckGLIs+LTGoWUM6IbmiWLRAWlJWM0TJlvGRCUEqJmCtbiFn1UK5pLteOEAKcq2DPFub8DR2eswbPIr5ZWw8h8txVOmuZyG1mwxdM47I5/K3bZaN9qk94Phr//MrvbJvhqmdZ7HlR6xnDm8Wo8FKzdXbGzzVCyhhCEAFYlqxIs0KnhJCKa7uu++DBvTgJf/Nb3/zG279xcnRYr9c1TTkdjTJK7YrjVmumZZeMjkajew8e2Lbt+wHEZ9T/mmkAWmJVWVnpyrYH4JwxppwT5CyvrGCEaJ5pisoEjOPYtu0OwYjziW9UHPP45NAwDD8KNdXQbaOk6XB6RAFeXr/yeBQ/3N5url3/vf/qH6w2lH/5//of7//iEy8Lsa1RAsbBBIN84+qV/8P/8X/4b/43v//b3/svNjY23n3no7/7u7/34MHDRs+5d+/etesbn376YDIZdbvdd955p9Np05K3l7t5VvZ6C4ZhaJqRJMnhwTE6J6e6d++ubVcIIdevX5XC4IZh9Pv9yWSiaZptO47jCCGKkmu6yTiY+FMBcrda1QIeJpOr1xe2T0ZJmgKeIa7GkZ+GQXNjw7Z1VdF9P6jX2lE0vX37NqWiUqnoui5XW6VSSbPqdDr2/XA6HX/rO6/leT6dTqXu1Tl5NC+KgpCz6VNFAXIinFJ6OjihJSeEACSIqiMEEACaplQdmyOcFwVI0rQoGC/OCFuwMrd0ZjO4s6xPyBUr7ZALgcHTkgnnF0mZnpsWzfYRz7KufF78eWG3L7bDC5Hg7LPmWyxPBd/OjQpjjOd95XO3C37sQjg6yzOFOE+EL4XOs1sLvFRoEkJwCDgXZVkKAUu7hBD6YXjz5s0sjbd2d9rNhmlbnud1Oq3JNDQtlxDk+/5w6mVZNplMsqzgII7TrNlsAoSyLEOQFDmVWk5lWQrGTNOoVqu2aRGEGCt93+92Oo7jVCs2QiiM02q92ak1/MGJPx22m/UCMkXFiqKsrVwL4lEeZrphjcfTx4NHx4NBc6HbaLf60/GdazeBSqChelE4CTyaJW3bchoLJaBxFPzDf/gP/+gPfyCY+spLX7l7916vu2w5jBCCIDEMs9PpKCrudruu69Tr9SiK2u12peJ8+OEvIYSNeivPc4nelLTi9XqtVqvdunXr4ODA87yl5YUgCCCEq6urEq1aluVgMLl9g5RlycuCgyyLPdOATU2/cXXxdDIIJoGtq4iV/aO+AtHtjQ2FTxYXV8qCIUR++tP3bKs6HEx0XbcsK8sy3/cluD8M4zTJTdOWA9O+75um2W4bsr6f56V0MpLQrSzOiCoRQpZONE0TAGiaYVZsh9WKsjRte+wHACPGBOQFFoxghIhGFY6edihmqxYCAATkQMilyyF8imgBc9DTeTuRswGSThqdb/J+8dxgbWYtF1zi5f7kvHU9d7t8/AtRLpmdEDzTSJKn+LlN0vnYev6eMSNcE+c6TfCckQ48z2vPF4TmD6UrhHMAITxjcRUwL4soicu8uPrWm6enx5988kmjVpUaRv3hKReqbdtpmg5GwyxLAAAY47X1jdFopJug1ekIITzPAwCkaWoYxmQ6NjW9Wq02m816vW4ZJoRQsHJw2lcUxTSMarWqa3qWJQUtSVlAxE1Ly/txiVAcx05ePT0ZJHlAQa4ZlbwcPHryeOSlrYU2V+CPfvLDn/3ojz755ce9WttwLAp4lIQaoF44BUxrVCpf/upbP/jBn967d29p4ZpttTgH0+l0fX398eNHlUpFVev7B9s3btwYjUae5z169Gh9/YplBZTSx5tbb7zxRqVSQRioGmk1W2tra48ePYqi6ODggHO+tbXV6/VUjTSbTV3XKaVpGt+9+7E/DlnBYi80VK0s0lH/ULXNTr0WZu4LGyuc7k29WEfqSq3RqNc7lqukiW1VVVXf2d7/7NNH6+sbhmHpuo4QCENfckxlWTYejyGEVzeu9fv98Xg8nU7b7bZTceWvXJas2WwmSTYcDqMoCsNY/sqUUsizaq2R53lWFpVKBUCc5BnR9HqrhTCCUBAEORYQAogFAZCx+UX/1A6Z4BCdiS6dCQc9uzIvL9HnesLnGszMU8086sxG5LjPcw3h+Rt/jtohAM8QoJFnEANn7Zm/3QjnW/PS0mQ6PjPCp/oWZQmeV7yakZ3K52fXTAVnXBCIsKIQQCjlSZJAATRFPTw5btVrnXZzOp1wWtTr1TDy949GWVH4vh9FIcY4iKMsy6pVx3YdkqnVajXPc7l0ZLw0HfmSQtNxbFqWcRLKNvrrb74RBeHp6WmSpq7rIoXoli0gXF1brLj2J589OJ1OOAOKouzvH968c3U0HaRlVKs2bt++UzzZe+/+I6aGt15uZ97IMAzHcQqac0HtimmamhCMENRo1LzheG1tzR9njx9v/db37uzvHXVWdM/zTNOMk5BzpqnGaf9YCKaq6pUrG6urq7u7+1/60pcG/VGa5kFw1G63R6PR1BvfNK5Pp9NutxsE/t7e/srKymAwcF13cak3Gg9URZcyiXWzWoTZwdbexnqz5bpZPIK0YHFY15U379yOvPgXB6eVhvPCyy+0ay0eRLV2y/fiTqfy859/WK3WVVW7c+eOpmkyBWWMqarCqIiimBBlZWVtc+tJURQQwmq1WqvWx+OxEAJCvLu7WxQ0DMM8z+UUtWmaQoiNK4vdbjdNcj8K3VpV07Q4zYmqYKJyCJIkHU8mXhjkeU6ZYIydnkbPLkB0vn7kWubnz3AAuLgk/ALOfRQhTznNZhV+earzOz81krkYdb6nhxCet4JfxbAvH/bCxxGpHyKtglEOISYEUPqUgu38bOTbhGTBgOcwvNkdQhI8AgAopUVRSA5fCKFyLm8ozjm85RtnsofgvMEqznuPEjQkmBAQqKqqqxpRFUzI6elpMJ1YtlGxLKKpJycnU2+cUZzmOUJIM8yyLDudjqZpZZlLrlvD0FhZtho1AECe50mSdFvdXq8n2RxHw2EQBAsLC2mafvjhh9VqtdlulWV5dHxsmrZuBJK2rNqovvTSS8d//cPxeHxTu7OyfOXRp4+RwoECNVVturWVRdYP0n6YRv6kaTu9ZjsK/bXOEuAMAaASJABzHPvDj37RqjZef/317/+//y1CzuMn206lUa06hmHJryUvEhky5Xkqv42yLI+Pj994/Uvr6+sLC0vvvvvu4kJ3aWmJcz4YDG7dvoEgQQg1m804ToSIkyTyfR8hJKsgo/HArOij40HrxmLqxXmYmETRFB2rRrVi+Rn71htfXm2shF7UMCvXF5fWlpZZcHLr1p1/+S/+1Q/++D/8t//tP3r/Z7/43/2jf7yzs3d6etputz3P++lPf7q5+bBarVYqlYcPHym6urKy8vjx448++qhRb8p46ujopFKpEAJee+01QggtOedcesvj45293YOJ743H45JRy7J008YYE1VhHCRJsriyXBSFomqHuztxHGtW+3zCG3HOqaAAACAQwgicEWmflWfmSy+zuHFmJDOhMXGu6TBbhxdMbj6aA+dB3Aw0oigYPGve8BzZMxtBlGtYsiRr+tM5+Hm3KQ3kGSNECHF+ZmZfbNyXM0B4PtQ478HRpSneCzWl2V1E7iZzCTFHbM6ogEIICFIAGGN6tVaWZY6hUuBRniVxSGmhKJKNDY/Hw6IoGo0GpXQymRiqstDpagq2TR1Cg1KKIJBAtsFwQhTkuLamaRDCPM8fP37s+76u64ZViePU8wLXdevNFlbIcDx68+byLz+5ZzmV2y/c8f3w3t0HVzZW6tVWyZM4DSM/CLMUFpkOBcjjcHiKPMLzrGraTcdxdN1QiKVqFdNgtKzXq4KJ034/itMkzSeer6qO5OGXGUueF3lecgYgxJqmHR4eUsoVRRmPx5ubT8qSdbu9Ws2RpKkff3x3fX1d1+FPf/reCy+8ILWZLMtyHFv+lJJh6fT4sa3h9W5NdetIEF6UhGITmWFcsIx13PrG166CEsReoCJsUNpcvf6z937x/e//6//+v/8ffvjDH373u9/9/vf/9T/4B/9gc3NTCLG1tbW9va2qapqk3jS4fv26bmt7e3vT6fTVV191ner7778fx3G1Wh8MBmEY+75fliWCRELVfd+nLJtf4rTkvDxjTqlWqxXLrujmydRPo1jDuNbtTtMZ0b2AEMwGnQAAQvDzsI4DIOaC1V97uxypfcGTF8xBPpCsKNLZzkA2GGNB2cU3CgEAwHMBKZGKKzPACgDPcGNf+NTZ85ftUHZp5+808t+ZdsWFs39aPj3Pd88OiyCngjN2JuWNiDymqmu0yKUvjaJ4MplgDFuthmCo3z9ZWFhYXV3lgrl2pVZzw8ifjieclnmWWpalKDhJkmA6Kcuy2V2SFfw8SygtVFWdTqcyoLIsy3acwWBw0u8TVau41bUrG15wZNiV6zduPtk7+uu/+tHXvsFevHMn8Ee9Vmc0hb7vGxC27cpyozGd+mNvnEDV0PSeW207bkVRFSBUCDSMCBA119nbPtjc3AyT2I/inYMDy2kR7GiapusqQkDPVdma4YKORqPV1XVK6d7uwd27dw3D0DTd9/04jnVdf+edd7rd7ssvv/yjH/3oysbawsLC9vZ2GIaGYQAAKC3KsvQ8bzQaxaF6uHe003KrNm7UdShIFpUCpu2FFRgkcUphznRFdet1BSFdUf7kB3/67/7dv/vOt793/96nN67farU6v/M7v/uTn/zEMAxJJhDHsaZpACBVVTnnH330Ua/Xe+WVV3q9XppkUq1xZWXt/v37um7K2xxEghAiKSFP+4Ner3fz5m27UmGMSf2PMAw9LxhmQw4BLUuMkKkZkEvWWUEpo+WZaDTA4NwBivm1PcsMf93tuUv68/aEc3rA84VJMIcJmwHCznBg50rVF6xgHqFKJAwCISS58sEZYOZp1+EMjH5++3nu6V5w+uBZ/w4+56Zy+bsQQsivWABAMFZUVVd0NIfxgxBSzhVFsSsVBGUNIL9x40ZZlnEYrK6upmk8GQ9ffvnlvb2dyWiYRqFOcKXqqArGiKcpiMPg+Pg49L0sK4Ig4Jz3+31CyPr6+mA0PDw+ct3q1atX6/W6LAiJMGh1e3a9bdm/2Nt7b2nlIPRix3QQBRZW67pBMGwYJgAgDkOFlYaw3EqlZ1VdRTchMiDUEVIAUAnOkuSzzz579OSxgCBMk+3d/cWVa+MxBgAoKqEyaOM8y7IsTwzDkFXHjY0NTTPSNOec37hxI038/f39lZWVOI6fPNms1+v379+vVCqyPWDblkQ4AAA8D1uWwbh2Mjz5+BNqqOLO7Y2KpVHBC5+O/J1mZ7lRreVpAblAEB7v7zx++Nnf/OyT27df8Lzg5ZdfXltbq1Qqm5ubpmlOJpNf/vKXMmQYDoftdrvTWTo8PIqiSMo2QQgXFhZ0Xf/ggw/+/M//8stf/nK1WpU/epFTibiIoqjV7uYFvXvvQRAERVHIQbBud2Fxcbnb7kgl8Pv37+9v7cjBTlmeKGkua6EIQelpEIZn6R886xqeL7BfI0Obt4rPC/Fmz8AzSbIzK0BzIsGzHWQ8jM5lVL44opw9JjIWvbDHPKj84pmBZ56fWSa6JKN99ufchP6FS5pPUp/WgdiZ8CKfUXcDAITwPM/UNQEYpUXFMhcXF4syCwLvxrWbjx8/rlRsRXHu3f3YsoyNjY2tx49u3bp5Yqj9/gktU29aAiA0jJ1Go0RGGIamaUrpIkppGCUQwp+9//6NG7ekWj3n/OjoyDRs16l13KoAEGLy1a//xnAUPnm08x9/8B//m//6f5V4I0MnDcvBCaAEL7huvNB1NVVjtq7ojm03NLNqW25FrzuGo6tlmjzZfPThhx8OBr5VqZWUnQ4HI8+3rEXLsjRdBUBGXDAIzgg48jxP4qxer+d5SYhyfHwchmEcjQEArVYny7LNzc1vfOMbUjxYVRUt04QQnjfVNB0AIHniJnHqTb008xUVlKzodFpmxdYs5+B0kFCj1uBJFKeRlwaT3ScPHz246zauJ3H+0ouvttvtdrs7Ho9dt7K9vb23tyeJXgEAiqK4bg1jZTr1v/vd7wIApFc8PDzc29vjnP/O7/zOkydP6vXmeccil3NPZVmGcVypVBqNZqfTjeM4CIKTo5OToxMAwEeMVSyz2Wwu9HrLi4uMsTzPx4OpDNdnEdYswQGAnYWgUAAA4Fk4+p9ohM99ad5U5J8zZwDOvZ9czFJXFyEkXRpCSApFKucTueCc2+IsX5sPR2fWMosM590X+JU9IUKIzammgfOAEz1vygM82/SfN3g+tydjLAMFgQgCgMQ5SoJzRdeazSZlBQA8TZONjStREKoK/s53vsWKcjA8NW17f28HQ9BuNfIk8X2/yFNs27pmUwZs25aaoWVZTjw/L4swiJeWVlzXHQyGp4PhK6+8IrOav/rh33z7a7c13QJQuXLl6mtvvLGzs/s3f/k3q63WG6/dabWbnZp7PD6ZponQxJVuu1N1Se5CCFWEK5bdrruNaoXgktHk0aOHH3zw/vbWYyGAomgCgKyko/H45KTPGLNsEyEgBBuNRoPBwPMmkqzRsqzJxEuSxDAM3/cffrb5W7/1DSmTeuPGje3t7R/+8IdXr17t9/sLCwtydv7w8LDb7cnKDcY44rlQcQrY48ODsTd13Uqr0220F3THff/De0kWZ2kYTAaJPzQUXqtZnIPl5dVvfes7JydHw8HolVdf/tGPfvRnf/Zntm0vLi5KxNyLL77suu7x8fHS0tLW1tbp6anrujdv3lxcWMqybGdnZzLxbNuWNE2yNysl3IqimEyDOErLgkmbLPICQqyqhJe0Ua0tLi4iCIus8DyvUqmYpiFbAnL1I4TgWaGSF6WUsj2rjkIIf13zmy3d2eN5nzaPjp49ObMUmb7NFvYsHZtvQs6CVThnAjNDmD8ymbUKwFMn+0WcGQB+ruMW562LeRc3M/3nxqWz/qTE6wghOORIIQRhCDGGCCGiIEww1oiiaYpCEIZAfgWWZa2srAhKgiDoLXQajUYSRrqhtpvN05Oj8Xi40G03mw2nYhm6qhHsuq5t23/yo/d93x8MBlLp8nQwTNN0MvEsq1Kr1Uy74nneZOKNx9M0TRcWFj57+OjK1Wu64RIOGvXWV9/66kc/e+8P/+0f0sRTv/b62nrP1UzGSoghxrioMK1oAQAwQpaqVyuWjsBoND493nvn3Z/cu38/yzLbUZjgEEOiqSPfT9NUCKGqKsaQsVLX9Xq9btvm3t6e7/tJkty4ccuyrOPjk1q1ceP6rTxPfvCDH3z729++f/+u69YopQ8fPnzrra+UZVmrKUmcCSFc18nzIsuySqWiWInlWCrkoMjHUeIn+TjKjX6o206YZhwwVQGs9AXNalWnt7xYc178O3/n7wjBoii5c+fWD3/4w3/2z/7Z1atX5OylJOGv1+tCCCDg4sLS3vHj27dvb2xsJEmyvb3d7/cVRel0ekIIxrgkFqlWqzJq9X1fMy1KKaUcAESIqiqKZCSheTGdTvf39sqybDdbk8mkf3I6nU4bt+9I2S94Rj9RzhdgZgtyzop+bU/4zHL9nJee+cRz5yHmhErBeUAnzguTMiiVStlfvJFZTw88Hbf9Ih89s+bLkeeFWYp5v/rc65n5dEl6L30jA0xRNE1RESIIQIwVXVE1VRVCYCAIhgTBsiynga/pzXa7vfV417T0KIqGpyeaprVbjcXFhS9/+cu7208whnmWjkIfA1Gr1bI8yYv0zp07w+Hw8PAQYyxDi4WFBdtyHj/e2tnfM3Sz2+1eu3bj8PBQMilyDpI4U1THsuyFhQXXcrIg+MW7P/wX//z/mYWjb//m13Rbg1AYWBEIlYIbik0QxggpCCoQRb6392T7088+uffxJ/vHUaWmVlTbC0pVgbqqx3Hc7/cdxzZNU1ERpQXn3DRN1634vn/jxo0kSYIg8jwPY2yZZhzHUTR4+eWXdV3f2tr61re+s7Oz8+Uvf3mWmAGQSVJjSqksOFlV21AVHSHMmSqQgnHJYJKxNEwUXSNEFaKoONWVhdWbG8uLvWav+Zbk1b969er3v//9e/fv/t7v/V6axr7vHx4elmVpmvbR0ZFcf1mWSR1lSRgnhEjTNE1TySk8nfplWZqm2Wp2FEUJw9D3fctpIYQMTbcsC0KYxkkSZ3mSFkVR5oXquqxM5SCibdudTiejdLbSGGOUczkQe7kW+sVFh8/b5nPCv3XPy4t/vtEtzkkDAQDSJRJCePH8wsz8RggHsGSgoJADLgADgmBVanHOducASKlSABCY4z6chZSzNuCs5jOrfFqKJq0RzY1Lzk56hpvheSmTwjTPVLWgigIA4JRBCDVNM1QtjmNVVTVVPZObREoYUQATQwNxNEYItdp1AMBgNDwd9B9tOqZpW1ZF0+pYMz0vmCZFl9V7vd6Cgxa7PVPTf/GLDxlj167e0HW9iPP15ZWDJ9sKUYswnp6OKpVKw3Z2Hm7S5UVdy7JkdHxwWq/X7Yp27YXrQe7/8pe//ItPHh/k6ObNm6urq7WaW9E0AIBmar7vjz0vjuPJZLK7u/vo0aOjo5PxCHBhT8YEIUyAAuNMFaGmCH9QUzYUR7dVlTBI8yROBbd0c2mxE0UTzrlhwDyH/cGx1MfOkrRZ16sV9bd+81tlQZvV2ulBX1W05eVlb+ClWdLptGge58nUsbBgQaPQVFY6FRsiHAc+wqBXdXAFj8d9XOLXXnuz013gCDY6XdO2sEKqLTVJaCHYJDhZudLLWRIm/vb27rWrN/KS5SVvd3sYK5ubm5RSRVFuvHRlaWnp8fZ2HEUY44k3zcvC96cPHtzTVdU2rSSYHoY+hFAn5LUXbwqkaJo2Go08b6CqKqMlRqDb7Z6ennIhNBMvrV4Nw1DL1FIUmqbdqLpFUYymXpDEUIA8yxhE1UYjyQoAEBAIMAYFRAArBGOMS5Ryzjk762ALhBjjjDHBhQIBRpBxSUgtIIQIQoFKAIC4hGyBT4WMoPzvrOCPz94Lz0QEzwyFi7OJLbm7AFQAIQAFc33FWU4HzqkTz4xQ1icUReEcQC4YE5TSgrIZ3f3l+8FzrfnCHWVmb7KJP/tzZoQz1MIF2IGiqUKIPM+FEIALIURRFAmIpKbXzP7l3de2bauuHh0dlSVzXdcwTMdxbctZXl5+8OAzSdq/uLh49epV3w/39vY+/fTTrJj87u/+7te+9jVFUX7845882vzs6sb1Gzdu+L4fhuHh4SFCqFKpWJaVpiml9PT0FGN89erV5ZWlVqtFCGKsFIKtrq7+7P13/+f/+Q+rVffmzZvr66u9Xq/ZbCZ5Eoah53lh5Pu+PxqNhoNxGMZu1SFYEwIWOeMMqaqpEFKW+f7+/sbGhlutQCgAEWmaJkkWx+HW9uNrN64RQo6OjtI0dR3H87yf/OSd3/nOV4+Ojr3p9PHmzvLyqoLJytIyxsSbThVF0VUtDqNHnz0cj4fedOrYlb3jsYqJ6zjLS4sV87ptGY5tQChM0+QQLK+v6aYhMLFchwlAVIXmdGdn5+4n9waDwXg81jSj1WoRQp48eSKJsGzbFgJ2Oh0IoRwKk97A87woisIwpJQeHx93u908TeXcY9VxJddzkiRPdva73a6maZJtZDweSzwqhHA6ncqCsLxHt1qtRqNx75f3HccxbKvWakKVHA+GByen/f5ptd6QSwYJDASEAHBOOecQw/l7/SwQO/sTPNW75vCMYuK56/zX3Walo/mkUVZrwFwl5XKKByTvKDwXIcMAUsplQfnzjHB2bZcrNBf+lEfmJbvwpHww60xe+JpkxfJsCBqcpbyUcZmgz2fGckg0DoMoSgEAQoAsy05PBhAO87y8c+fOw4cPP/30U1VV19fXF3qLL730wq1bN4Lo9Ec/+pu/+Zu/efXVV7/73d/85S8/3j/YlfGbaeqqSqI4HI2HktuGMZZlYnt7GwAgAJ9Op7Wa67ru7du3d3d3S/q667rT6eT09PTo6EAS3XJRyAhfCIEQxBhbhmabZlkwTTM4gyFLqWAYMlZmWVb0anVCiKropcgF55pmaJrmeWGr1UIASZ5Vx3GSOM2zouaKvb296dRbW7tSr1fX1tbiKEuSaDSaXLt2TbLIcA50Q1VUwjmXqteQi2q1euvmnZWlrmloGiGMl7ZrCwg0w0jyQiCYJNnx6YlC1CeffRaFsVTCkqoHp6enjIk4StfW1mq1epIku7u7nhc0Gg0AAPKZN51maer7/ng8lgO+0/Ek9H1d1y3HgRBKy5TJv2maiqJIpnMhhGEYEMJKpbK2tra/vz+dTiULhvzdB4OB41Qghr7vT0Kf6EZeUsexDctOs6LkDDAmECFIgQIwzigtVXyGgBEQ8fPqBhMAzFkghwiALzK+52ZPX7zN1vB8asY5n+8HXqjQzJ4neZ7Lm5mqqgQTQgAEZcn+9r6nmKvciGdHM+bDX3xpygk8heGhWUNydk5Zll3IIQkhCJ89D88rsVEU+b6PMRZ5Zhp2pVJp1FsQQsExIaTT6QwGA0VRNjbWTdM2DMMPvKIoOOclDVZXV03TVBQ8mXgQAl3XsjzBGHc6raLIfD/M83Q8KQBAjUaDYMUPvP7gNC+ysixXV5dv374NIej1ei+9/ILv+++99+4nn3wSx3FRFOPx2DAkaz5RVdW2TckmDACaTjwhIC2BpqpZWpQlZ1SYBg6C4OTkpNVqKLpCWU5ZVhSFELDV7ERxIDFoqqrmeW6a5uLiYh4PbfuMpuX09DSKAjkemaZxWZZ5nqoagVDIFfzw4fbqxutFkUnlqVqtoavKGeaEM83QAUB5XgKMfD843D/UDYuWLMuyRqNh23a93oQQJknW6XRUVV1dWY+iSPorhEClUoEQcp4NBgPf9yEXGEDXdbvtDispFKLVarUaTc6553lREMrf2jSFJICSN744jgEARVHI+4WmaVmWycQ4SZK9vT1qFKZlIYwBgkIwVuR5nlJKEVFYyShlGBKocYIUDIWAnAMoIALwbF1SfiaGIgCSNRsOEbhU87ywdP8Ttpl6CpgLAy87KvC85JPMqjKzPWSZZC7oFGdXcXaSYmYk86Yyw6xdCC+fjnvN1XNlJHPZDQIA8iyVyoRCCEoZAEBVVYUoUpWNYIwQkhM9UIAwDF1DM00TAHJ8PJCGKimG6vW6hFD2+ydhGBJCVldXr11bj5Ix57wsS8Hh8vJip9N6/Hjr008/vXbtmqoRw9TyPFdVQikNQy/PY8epCyEURSnLMgi8MKwGQUBpIesKlUrl6tVrUm17OBw+efKk0bAoLeSQq6oSRcGc0zzPq7VKFCWMlYoK8pzFiQ8EsqzKxPM2n2xbTmVpqct4UZSZXN9pmkoM7XQ6VVVV0zTLsDEkSwsLYRD5fqgrqj8Z16uugqBVqRzu766trfU6a0WRTSaTg9290JuaGgrDsMyLwI88z/erDrPNim1qmtofDvSSCoymU09AGCUpLQGxlIWFhdFodHx8jBDpdrsY435/6LruwsJCFCabm5ue50uV76WlxbIsp0F/MhxJo1JVlVOW53mZ50tLS5qmyV/Ndd2KZQdB4Hme7H9KPlLGmCTRYoxtb283Gg3LsjjnhmGsra3FcRyGoe9NDNNUFcwgABghDIBgrMyFEKygJS05JBgCRQMAAYwhnaO3ZbMSI8RACA4RhwDIkQuAZnWOeV/yn2OH89tz1/bnbWS2CSHKomBMcPYrnYeYi4Bn+8/Z3nmvjz4jFAPnkLVg7spnm6xHS3i3YE8LvrZtE0JkYUbi7CSFScVYtJ2a67ppGpdMlnjK0cSLszQIgiiKyjKngqqKatiaU3OSbCphPRCJJI2iMFFVcv361a2t7V6vt7a2CtdhmmYnJyejcToaD6bT0LIMVSUY4zRNZemSECK5qJut+sbGxu3bt2XxsNFoKLiQM41B6CkKllzURVEYhhUGcRBE06kX+gFnBcGKpgJccZIkGY/H1aoNIGe80HUCOdQUJUkSAlGv3YEQjsfTlKWGphUFK4oiDP12uyvN+Ojo5OjogDGqKNgwtLLMkzTOsowQ4rpuzgVCiFMWRZE3DRACmqYACDVNS/OCcjYeTcMkLilPo5QQdZpGCBEIseu6tVotCCKJAjk5OeFnrIQwTZN+n8pSwng4Ojk5iaKo1WppiprneRyGMpLnlOU8l+m9/NVqtZofpaPRSN7UpCcsyzLLsuXlZdu2IYRSyVSqJiOEut0uRCgMgzDNDNsyTNtxKpppHh6dAAIQwBACDDnnJQIYAsEYRAgJiAQAZxSBECP4FMsFziwQzC/U/3zD488O5cFz2Pe8LXxeiEtkNogQopRleUEpx+isJ/MFnnD2eU+NbQ4jOl8LlTetmfOcvQTmzHj+G4ECzSZH5IyixLmbpgnO+xkSdM5KWhTFg/ufTcbe8vKyW62Ypk1pIb8RBAkhxDR1IbQkiZIk2tvbo5TG0WRpaUkIsbm5CSG8du1avVHd3toVggnBGo2abdu+76uqYlnm6enpdBJRSkejEUKoKIokibIsWVxcrFaraZoGfgTPZ8yWlpZu3LiRBKcAgDRNoyhQFNxoNCzb4JxHYQIhPDnpf/LxgzLLLUPTNZMQcnRYTCaTvb09zgvLNqq1CoF85E+XV3pxHIdhUHOrCCF/6uV57tiVTqPmjb0sSQenfdu2gRAYAoxEwWj/9GTQP5WORcGIFuWwP20tLAjCMMZZlkVZZGZqGJKcTmzbnvoeAEi2SYFAURzHccJoKvWqgiAaDEZlWdZqNcdxNjc3u52FdrutaVq/3/d9jxDMOR+PjvI8B5wrGOuqmkKIEDIMA4ozYighRJ7niqJUq1VN06I4l2mIrusyQpHez7IsmRkJIbIsk0KurutOvSGCpGDUtoyFxZ5pO1ESx0lmaSrXVcHPOPMpZSUrGGMM2QAADKGY08FG5zT1f+v2n+wM4RyaR65zcd5RBM/OWzwnHOVzrBtlWTImsKrIm/cXn+uFY81GM+D5SKK0bZGXYq6JL/vy0o9d8KLwfBJKsi0ihBQ8i0vPtllOyDmnRZnned1t5Hk5Gk2EEISQvEgxhpZl7e3tWbbR63Wq1epoNNjc3Nzd3T05Pb62sXp4eGjb9q1btzjncojh+o2rQeidnJwM3ju1LadSqfR6PcexAeCGbkMIJf44SZLBYLC9va0oiiT2RAjFcSwBkLpmpEm+0FsyDEMAFoY+Y8yyDEXFlFLLspxKlRBy9+7dLI8VoikqTJI4SQrJ+KbrRFE77caqZRlxEhBCVpYWj4/h0dGRRpRuqwkhFpTlacYZbdTrURRhBMbDwVlHWJRFnsqalq7rFdvWNU1wkMYhQijN4igKk6SSOTaL/CAIHj3ePD09tStuWbKJFxBCwjDyfR9AtrKysrd3kGVZq9WSSdrDhw/7/X5RFEmSMMZ0Q7NtuyiKzc1N28ASHGMZJiEEAYgQ0hSVc+77viw3OI5TrVYRQp7nmZar6zpCSGZ9cviGMWbb9mg0AgDIzv5kMpmpNRe0zPNcFnVqbqUsy7AMTF1HCGFMKGdZliUsKYq8LEuhm9IYEARM+gN4tlbROYkgBHzmDP/W+uKvuM0a3TPsqFztn2f88+ZzNkSDMcZYUEoJUXVdT7IcnutjQCQAQjPwmjSh+dbCzCCl7YE5zdT5aFs8C02QG3+Wq4afz3fJ90IAZ/cV2eqY/TAzp1pSzgT3Ap9y5rqVSqVimjpR0GA08E8CAKFbq66uX6k16v1+fzgcAoH6p8OD4qjT6aysrPR6PTkAKTFfjuPohnpyekQUJAXrb926dXp6Oh6P5WC+FPTNskwKJzmOs7S0BACQyvL1ej3L1DwvEEIQKrquQYhoyRln7Xb78eaW3G0ymTiVaq/X8zyPAwggz7IkTsIktU5PTxEW/cEJEGWWZVHgs7IQBGOECMFFUQxP+0Wec86btVql4jSqtTAMoWCAKxgyhFHBKCtyQ9V67U4axaMwKcsSIVGxdFVDELKClsfHx2EcRVFk6BNV1bOsiMIYEgwAeu3VV588eUKwevXq0vLyMmPs6OhoNBrJezQhxPO88XgsDa/eqAKaDwaD80DdPT09VVV1eXlZiuEcHBzI8uDR0ZGu68fHx9VaCwAgoaTSknVdl89IdeQ8zw3DSJLE9/12u61oGhNn925W0iSKaV5oRDk8PLQct1qtGqrKypJTBgHXVFIAoSkEIZSXFHCGMRac5UUBIeQCQM4xAOIcniV7ZE/X2yUfNV9unN/zwlvk+pwfWZQB+YVwdOYbpR3N6iNEQpllrUIatLxgeo5UEOdsbdJsZjRVs6PLE50fZbpQpLlwYV+crcI5mJuYXfmMGh0AuRRmB8mLlDGmaZpt25VKxXVdxso4jr/yla8GgV+WpTcNBIeuWysKenR0kiRZp9Nrt9vVqsMYm078LMsAAN/59ncRQoeHh7JMV6vVIISUUlUjr7z6kjRgwzAqtithmZKLOoqi0WgimyULCwtXroAiDg3DsCzDtHTZDZb3rqPDkzRNCVGbzebKykoUJsfHxwcH+7X6jeEwjCKa53Ych4PBKVFgniZFmdMipbSAgmMIIGCcgjLPu0sNqR4DABCCY0xM04AQZFm/LHPOOYCIEFUWjdrtNkU0z1PD1BACRZF5gZem6XA8BABM/Cn0/ZXFFSGYW3WajdbS0sruwQ7G+MqVK+12+/j4+Be/+AXn/MbNa81mUyqEjsfjsiw5p6ZpIgSiKP7aV74qyTgIIcvLy7quf3r/wer6Wr1ed113dXXVsqzj4+ODg4ODgwO7UpNz+icnJ7I4J8OKsizjOK7ValEUXblyRQixsLCgadrj3SdYIVLIFQCQZQUvSgLglfV13/dZVpiuDoSYjEd5UXQ6HVYWZQExxrKJDwUEQkDBMcICinOoNwMACyC44AI9Y3i/lhucT/8u1ClnBsbn5vUuLP7Z24mkfpBtN9M0MVYkH4S8Pz3d+6lhPGNCsw+bJYEzP3m5DSg+X9/juZfHnyaN8Mz45+ZEZBdRwqZMU7csAwAQx6Gsqu3u7jqO0+12KpVKFCWKolRs98r6VdfWJ5PJ4eHRZDJ1nAqldDyejsfjPM+bjXbVre/t7zx48GB7e3ttba1ardq2Va/XHz58uL29LauCnXZPktg2Go0kyYIg6HQ6d+7cUVV1PJ5mumaaZV7ygnFdV1WVqBpWFIVyBLEWRX6SFbppJ1lRUFZtNARnWR5xoRAFaZoCIAMQGKZepElRZIJTjABGQAgGOAW8APKeKITglNGCYwEBUwhQCCqKglIOASq44BwALtyKAxXshyFRkBAsTiKAOWUMEkgp7Xa7/dPB/tHhZDi5c+fF3Z2dD95/f3XjCmNMlqaKopCS4PKLGgwGx8fHQrBer9NsNsuynE6nNcc9OTyK43h1aVnqcOi6/vrrr0+nU3mfkjFnlmXVeu1r3/g6EMRxHIyxZIWr1WqLi4tS9dU0TZkHyX7PZDLJ8xxrqqYaQhEEY1YwKigUwNQNxgSkIC9TjDHNCwIRNgzLNGMvp0VGZxUHrnDOaVkqpk0A54ALiBCCAEh2Xc7RU3qLy0WKX3GtzrvEC9Dt+ZdmJjMfGAohiJR31nWdsZQQASHiAsjZ0LP88pLdg7kbxnxGd+E2cNawvuT3fkVTFEJIiByEEIjzsREhZPSIEIIII4QYYJZlVNwKA+zo9EiWJSCEvV7PsoxOb6Hdbo/H4939Iwnnj8KYlpyWfDL2wiC2bKPb7V25cuXk5KRWqyMEEEK6Zk6mI0n+vbu7K4TgnN26dbNeb0wmkzDygyBkjCFEhBB5nodhvLOzJyv7v/3dv8NYnhfM80OiIFXFbrXiODbnAkASp1leMLda4wJRSru9xU/v72GMDEPTdaIbimnpCPI8jynLIKCqAgEACFBOM4IU09AiPyjLEnJGsIIAF6zklNIit009x7DIKaUijZM0zTkHhqZpFQdjUDDKRZkkAVGh6Vo9sxVGSa+3WKlUsjSP/KBimVEQJlF0fHzc6XQIIYPBYDqdWpbJGEvSSJZSrl+/Kr+Wk5MjCVrquXVJexNFERDCNE0kwMnJiWwRQQ3KPoQfBmEc2bbdbHRn8Zj8V5bfdnd3FUU5OTnp9Xr7+/u9Xs/3fUVRGMJZSWleIAAVpKhEwZAQpAz7J1gqiARByVi73UYIcSGqtikjZ0oZFwiCEglBECCQUQjgGff8LLbjCCnz6/PXssML3ujC47NQc07gafaWCyZARqNRURSSOjIrSowVTTV005IdHiEEhGimpSHE0wlifmlK8IKrlbGrnJuaDzJnl/p5FzbvOWeecHZkfk4ZTpAs8EQIcUoLIUSWZYwJ2VV7+PBhURRbWzvLy6uGYViW1W63a7UaKJKtra3BYIQQWllZqdfqURTt7u5yziVRN6XUdV3G2OPHj8fjcRz/Ynl5Ncuyer0ehmEQhEVReNPgjTfeaDQapmlqmkEpvX//Psb4zu0XppNYCAGRIAQAyAmBbtWq1ap5kVqWoSrGwsKS6zYODw+Pjo4o5apK6g3HMLSSFmHoGxpSCEjj0LZNTJCqEMA456wsctUilmUiATDGlCKE5YCKAEAgBBUFYayrKk+TUpZPVFV3HCcTimmpOlQBEmmRh7FPDKKbxpUrawih7kJnZWmVIJQXKQT8zTffnESxqurdbvuVV16Jomh7e2s6ncqcByGEMSSEuG6lUrFkP7lbb/e6vTSKj46OFEVpNBoAwslw5LpuEAREVer1+urqasnowcHBaDSyraoQIkkSecx+vy/rApJPMY5j2bhWVbXRaGCM9076nPMiy0zdUBXN0o08SYs01xWNEMKhUJjCgeAA5GVRpGmz3cqyTPZpKaUcCACBimQGxgUQACEAuBCCAQYAK4UKnvUlX2yH8wne/KpGl4Zyn/uuC4ZwZoQnJyeuU6vX65pmlIynaZ5n5Rn49Tz+PJ/jfWY4anYq8/Yji11wrn8oKdzmne8XnCu4dHeBF9zx+T4y3IUQVhw7z/PJNEYQ67puWTrGihDCNM1Go2UYxunJoCiKdrudZ+WTx9sapKZprq5cGY2HDx9uQiiazWa9Xi3LstGsqaoqi2xSoVT2x2TN5ujo+PT09Nat2zdu3ECQfPrpp7VagzE2HA51Xa9V6zIBGI0DxpiiQsPQiQIAoEwAxkGSRu12yzAMwyQAKpbtANifTEe6qakaUlWFiSLNYsZtSzMMQ0OQIwAR5ggBWjAhABJcxcjUrTzP00yUZV4wijBECBAMozAlhECBBKdQAE0lmqphiCjNLFO3KpbA6Lh/HEQ+A8yq2GbFzrJMlAAAvrKynETZaDDcfPjpKEpd15XoIk3TbLsShqHv+RAJVSWKolQqlm3bMlYqaWEycu/evUePHtXr9VqtNhqPDcO4fft2VhZFUWRFHgRBkqXLy8srKysrKyu7O4ey6CD7THKSCwBw9epVKRoph/cfPnyo63qSJABhDLGuIdetNmpNTVF9BrIkduyK53kCgmqjjgieTKcZE5VKBbAciVJBnGNIS0aLAiKCVZUAxhEUAkAoOEDiDLT9nKDsV1yolxf/hUU+81XzQSJ41lfJjZRlaVmWadqcA6Jqvh/2T4ccQErpzAgFhACetTtmLY15OwFzFIaSX/XCqxdO7le8tlmSiWYSxxCCc2dIIQUAEILKEkAoMEGarpqGiRDiHAwGIwk8yLIsCKIwjHd29j3Pe/XONTmyXZa5aZqtVrNarRKC87w8Oe4jDGq1Wq/XlSp/JycnlUolDMMrV67s7R1sbW1JQnjD1OQmFcIajYahmwcHB48ePer17uR5hnLAAdKBgjHiAHJANMNiAiVZLpHiaV6ougkQ0Q1EC2DZhuOYCsGMlZRi3dBYmVHGAAeIEEXFgAHKiiSNFKzO94U5ZxBiCKEMpEtKpcvSdYUzMZlMiG2bltnttgVBfuIFSSAgxyoejQYQYg2rg8Gp67orSysKJh9/+NEgiLvdbrvdPjnpHx8fCiHq9drKyoofTG/dutHr9Uajwe7uLmOs3Wk1Go3+kwNNUd2KI9M/WdLb3d2VIHin6h4dHR0cHGRZVpalpmlhGBuGIWv3sj1o27asjXme12q1ZK7heV6v11MUhXKiqSoSQFMNxkTJcyiAqZlJkhQFpbRQDV0zdFnUIESZjscQQkSIShDDsOBUAAABQVAAOREBBEKAc8EBR4DPE7LMp3C/7jYzLVlhmX/mQh532YGRIAgGgwGlp1lW2I5LKQ+CiKja3Amdy0ZJI2T8gpHMMsCzlsa55czO7/JVfYEdPnPw809AEM76kLNAlyPMGJOeStM0QpBMBizLMgyr11t89PDxcDjsdHoLC0uci+l0Gsex53mSji5N8yiKxuOxALwoCggFALzdbiuKwhhXVVUyt3e73SAIrl+/3ustxnHsedOjo6PVlfWyLH/+859HUdLr9WzbfnD/0/F4/PWvfz2KSFGwsswYYzbVDVMVAmKMEcZCiKJgYRhPJp5Ee2OMNUVJBbdts9PplEUaBZMk9lUCHdsqy5wJQDBWFEUAVpa5YBwDVQ6qqSoBkMnRASGYFIgPgwjjXNMQxkrgJ5PJZLFWM03dcRyOhUSZVutup9erVCpBEGlYBwIMh/2DvYN6tfG9733vL995//DwcGdnx3EcJKV287zb7d64eS2Oww8++ODo6MA0zSsb66Zpjsfjg4ODRqOh6zpWiGmaIEuzLJNCWpZT0QxdCCGJWCWjsVOpdzqdNE3H47FsD0odGyllMa80LBuMqZdhjHlJoyg6KakKMcHYta08zy3diFIxPO0TQ7MrFYTQZDIp81zWOFRVxec+w9BVJgCCgp8NIYkZK9Q8XcWva36Xl/G8M5TbbIrighHO+OYAACQryP7RSMafYZKfHbpIIYScMdnePFPb5RxwUSIhzoVv0HnnAAHIOccIyT2xAIBxAiHEhM1dsAACwLN/ARACyBInOPv3bJ74/HoA4EAgIRjjJQAEYwEABQJDKKeAOQScM6RZjPE057SkJQRCgyY2dJ0cb23DPNVAKQofUogx0vWs0cCawSEsq5ZzeppNJn5ZijwrHceROXCtSqbj5KPp/XrdabbqhmF+9NEn+/v7EKg3btxY6C1vbm6GQbq/f3h0dLS6uq4oyqNHj5I43djYaDabH330EcAoCILJxPvN3/zNW7evTqdTRsVw4C0sLIRBGIaeputRFNQbNUUFtboNssn6UldV9DQNypIxirOyFLpWlgaCmooxAggzhIGQrIiNXgVCGMVBFkQYobIsiixXVZUDoaqaQqGOlZSVWZZM/dHEG5cH6k3rJkIEQtZt1C0VY4zUIjt9coKIIjTr8OBUN8wvf/nrllnxPB+ohVklUZSlxdQ0zXbPtW2L8vjkdNfSjV6n7joqhsjSlTwOIt/Xm2aGi0Kl0+mQRKTd7tZrDbNmjYaT1fXV3d1dwzCvX79+fHz86suvbG5unvaHURRwzuXwsRCsLHMAQJqWrlspy7LfPxGC27ZZFFkQeJwpAAFdVXNGwziQBpbHNKQZAKBEgmFM87KkPuecl8yoVDRNo4xVHQcQUm02JQheEkBSSuU8qkKwqkAhMBUlZ4xzjgEmSMEQcQ44EwAgAZAAiAMoAOLnYDcCmezeYYxl0V5WX2bhiXybOP9PkTpLsx7jrGCDZ15KnL2ZzzFqzxyRDC/BeTZ59id6jtzx3+rZLgSis6T2Cyo0zz3UhZ3F+bXOTl46SUrpN77xjTAM/WAaJVFZ5hCAbrfrOM64fzKdenmeV6uOU3GLgnpeYJkVRVEgEq1WgxAyngym0+HJ6VFR5L1er9vtLvSWfN/f399P01Q25b/yla/Eceo4jqqqhwdHw+FQuk2A0dtvv40QopTLSkO73WaMRXFACHr55Ze3d54AAN5///3FxR7G2K3Xq9UqZ2DiTYfDcRJnlmU5lQrGGApMsBRUIRgJBRNVJWmeSU8ufzchBCKYqEpRFCWlaVGmaZrmlHIBISSaeu/evZWVFcuykjyhJVcUDRFYlvTVV1+beEEYxpVKxa5Ui6Ioco8Q8vbbbwdBcHh4ODztCyEsy9I1jVLKWNms1SGEN25eOzo41DTt/v37k8lkRjwhh5KCIJDa4zdv3D4+PvY87/S0X6lUVFV95513GGMvvPhylmUzWiQJqUEItdttGbJyzqMoGo1GUuVXjozI0Qpd103TlPjBGaO2XBWyCIkxJqpaFIXMaTnnUiBRKp9HUZQkiThn6JSVRV1RAFE455wKTnkpGIFEwbhkshUEEAAcQQgBBE9LE//JznO2PVMdnRnDfAZ5OX6d3+Bljhlw5uTPXuUXnfJTm3k2RAbPGvCvEqZ+3jMQQuk1iyJLkmhzc5OygjGWl3lRZAJBDhjGuFqtWpbVbLbKQhwfnw76Q01TllcWkyRRVdJqNSqOXW+4aRZmWVoUuRDiyZMnO9t7vV7v6tWrnHNNM770pS89fPjw5z//EADQ6/WOj06Kouh2uwght171vKmqapTSbnfh4HBimJqu641G5969Tz797F4Y+s1m/aWXXlhYWCiKLJ2eSGLcOI4Hg0ESZ61WS7Q6RVFAATiGgmPEmYIRhgBCHKWJoWolpSVniPGyLBmniGAuAGM0y/O0KPOy5AICTFRNs22Hcx6nGRdCIuMNS1cUxfdC3w/TJLcrzvLyqqroYRjV681rL93Z3d0Npt5xeQghlDYWhuGrr748GY6EED//4Geu647H44WFhW63K3XRIISWZTHGJhMvjmNJS2WaZqfTURTVcRzJf/HSSy9t7+xNJhNJ9atpmqxIG4ZRFIX0jRIkHEURhNC2bYxMCU7AGDuOY9v2DOE0W7cSRyXNksKnY98AAIlcUVVVgpyeqjOcz+Bahi2n7WhOsyxjJccIE6JwVvKzXAxAwJFAHHIAgRDsfCxYLmBZImHgIreNOGfmR/PLdWYX83A2Mn8xYC7NE3NjR/PmLj6fnnH2AZdfuuwJL9ve31qwAXNWLea8Hzh31BDAsiyldkUY+QAAXdecqttuN5FCIAaEEFamw+FwMpnqmtlstq5cueJ7AcZKvV6ltBCAp2nMWFmW5WQyGY0Gn3762dWrVyHAf/VXf8UYW1lZsazK4eHhm2+++eqrr8ZxfHR0tLu722w2r127trGxsXu4U6vVOOfv/vRnnU7n1q0buq7//Oc/z/MUAAEhvHPnVhgFlmUkSeS6bhkixliSJEmSSD9QFEWcJgghBARnQJSAl1hBUEEYIoAARwhRAbgQTIC0yPM8LyhTFEUAxIAs0UBKOeOcQ3Ttxq2spP3+cGlpYWFhZepPIBS27TzZ3tV1vVarIKw4lWoUxScnfUXRFteXLd2QtyrP8+Ioarfba2trWZwkSeK6rqSikgNWQghN06T8E8ZYdtiLopBI0S996UsPHjzQdePo6OjJkycSKGOYdlmWcnJC/qaappmm+eDBA8aYoiiWZQEApKsUQlQqFUqphIAbhiG1biTDvPRm0nmcAZWFKMtMGjw6n1YFAMi5sBkj68wIMcYZ55ADIQCCGCkaA5xzwCkjQDAgmEyJABdQcC4EBBg+053/VdbtZT90wX+Seej3BSOZf/N8mDp/eHDJE86OAP+2pudlT/sF0enls589I2bdTwGYYJAzCHi16srbSlnmeY5VBA3NNEwtDfNbt264bm17e2d3d9uyKoSojuPIoDGOQ9+fFmVWFLmmKaurq9VqYzQa5VnZ7XZl7V7y7Z+cnAwGg3v37g0Gg+WllRdffFFRlOFwqCjwwYNP6vXm8spiveH+8qNPqtXqq6++cnBwYFdM01Idx4mTIMsSiESzVQfZZBa4YowNgwAAgyBwKw4+v60xJiAAEHIkEKWlwlQIASQKBBwRzHNRMlqUDGAkIMKKAijPaBqlWRTHULHzzGCMaZqhaCTL8yiKJl7EmHDdumlVgiDK8wJCbFdct1r/+OOP5QxUq9UaD0fHx8eWZa0uLW9uPmw0GnEUvf322w8fPnzxxRefbD6WnL9SGVsIked5EAQAAEppv9/f2dk5OjqqVBxCyO3bt4UQuq5L6if52+V5Lu0kiqJGoyGBaZRSQogMK4qiGI+mYRjKvh8AQNK9yUbibEHOLLAoipIXjluhlMZJlKRxq9XSdJUL1h+MZrEepVSCOhBCll2RfGiEEJOoHIE4TYu8UFQNQ1QKToFAApSccSAEFwAjCAAEQMKpZ4Y4W5fw/En54LnmJy7oE15AeV+Am80A1uB8oWPl+TnhBTf4jJE8j0T1/3+eEHMhtcgBhJKKWQjBh8O+aZq2bcuVQQV3q9VGo+HaqsRblWWxura8ceWazEBOTw/b7XazWXerlhCcUqppSqfTOTg4Ho/He7sHe3t77Xb7pZde2tnZ+4u/+AtFUer1Zq1WW15eXl+7IiVsr1y58md//cfNVmOht7Czs5Pn6Qsv3nZd1zRNLuhw2E+S6LR/dPv2TcbKZqvuOI7a6Y69qYxPCCEQYCGEnKkjEBEsR+MEAAAhAAUUEDIBABcAQVXRhRCUA4xxGIYIKgACJkTBWZKlQRxFScpAsLy8WrHd8cQTgOm6XnHraZq2Wz3TcgWH00kQhnmr1VlYWFhcXD69eyIR6rqqLSwsUFYSiA4PDxcWFhCEw8Hg008/Pdjbl7P2lmVJi9V1Xf4cknXbMIzr127KqvLu7h6E8MaNG+dM3mMZOkp7m9UdsixzHAcAIDlmarWaoihxHEOgKgq2bVMIQWkRRaWmaYZhlGXOWAkAVxQs5VSKosjzFCBBIKKMp1nMitI2zLN2SEnP2xhkZoSEkCiImBAAAA0T2zAhhARhBWYCIy5kKCoAEwwIweWk7Rdh0L5g0V4wivn9ybyZgfNolT/LOjH/GZdjTgBm+qnPGQyZBbfzz1z2tLOjffHFXC7MyB+Rcy4gQhgRhDSVaKqiaqhSser1mqKqaZqHSSx/aUMV7XZ7cXFRJjbDUT/LMqKgvOAlzcsoB0A4joMQSNN0OBzKW9Irr7zy6quv9vt9KQb45S9/eTwej8dTCGG1Wm2325L4aHt7+/r1K9vbu8cnB0TBYehPp36WZbdu3RoO+/V6fWNjPQj9drt5cnLkeV6e523TcCmtVqv1ai1K0iKnaVHAgo6nnkoUQ1V0RREEQagQAQHEmqFDDLM854JhjCEmRNUQQrplcw5KxgtO85JmeVkwziEwbUvVtYKyaOIRQjTDMnSdUnDt+vXJZOp5Xkl5lie6HmZZsbOz0213hBBlXjDG2u12r9PO83x/f79im3GWLSws7Gw9uXLlShAE3W7X930OZrQDQZqmEuTAGNvf39/f33/llVfiOJbd1CAIMMZS/LQsSzltOJtR6vf7EuokRx/lsqxWq5p6xuUhg/aiKOS4xs7OjhyskZhnVVUl2kkQKKsyssMk1VSlk5SDCzLgl64bAKAizASDACsAISAIViqGrhGlYLQUgHAGGYWQCnYOGQP8jH7wrEzDn00Rn67NL84J53c9Y8WZDRPNnB6ao/ieRdK/lnmAOQv8Yk/4q2yX95/dIM4BdBwhrKhY0xRdVw1DK8tyPB5iogIA8pKGUTSZeFndOKOrgVASfuV5xjlrNOqe521tbUEIX3zxTr1el1xPk8nENM1r165JXJVlWZrGZWNa6goeHh6envRfe+2169evR1FkuGxpaQFjfHh47PnTWq2W56ZhGEKIWq2WZUm1Wk2SZHFxceqN6/V65g0IIfV6vdUKseePpz7PspyXcRxzTScQKYQIiBFRsKIqqqoZhqTDK/NMwhgoY0gIx6mmRc7STGSQUUEFwIiomm6apu/7qqq2Wq1qo06wOp0E4+lkbf3qyUk/zTJdN1VVrbi1oiiCKMon4cLCAiHk5OQkjWOVYNd1DU1/+PBhrVZjJW02m4qiLPZ6QRDEYVhrNQkhsgGbZVm1Wpe54mAwiOP45OSk1WrVarXpdHr9+nUJe5B4QFnykaNMskVUqVQqlUq1Wh2Px1EUAQAcxxn0p3J9SsxqkiSmacqcULYHZJf4XE9FMEbzPCWESBQUQiAIQkppterIZVMURZYlYRhwzhVF0YEMa1nBhWBcVUuMFVXBEEIEOGACACIEZ5IsigPIxVnvQgAAzqA3kAt0Po1xoUXxq6xnIhG00mXLe4n02hjj2YgUAEByHIhnqzpP3SMAiqKIWUsDPHVx7JIW79m5ztHgg2cd9DMe8rwVOfvcWbQsj1CWTCEEQoyBwBhrmoIJzPNkMhkyxoqiZIJrquHWG/VGy7btyB94XvDZZ48cxzFNHZxjfZKEm6b+0ksv5HkeBFEUJVJlZWNjAwBwenqaZZlhGJzzIIiiKGo2mxDilZWVv/t3/65pWE+ePEmSRFEUVcXj8bjIabVahRBnaa7ruu/7zWYziiKEgKZpcRyapl6vNSGEEAFaMLkQJ55/Nl3OmRdEpX42jcY5l+SZRUGJpaVpDhAyLJszltMCAUSIGmdpxXbygiFSBlEc+GGz3ZlMJr7vX716tdPp5CXNs2IcTQ6OjoMg6LQXS8qTJPM8j6i6W2s8ePAgz/MrG0tCCMuyNtbXhRCtVrPMC9kq4JTFcaxpim2alUql2WxeuXIFEJymabPZrFQqnueVJZOFTV3Xm82mLJnqun7r1q0wDBuNxvb2dp7ncq5XrhbP86bT6euvv46QbLFO4viMnkPTtIpj27ataVocx5ggRSVCiDiJIAK6qkkPgQkyTJ1xO8vTeqseBIFt20WeK4qSpWkUhoqihEHQ7/dd122326WmlbpOKQ2CIGNnxNMqJipxTV3nAGRZtrS8Mva94Xjke76fRJQxxdQty6IUnS+8clbdkTWk5xobf1boYba8nwlHJRLlPK7js5fn6Srms8R5LPVTuxKAUipnmTDGEIpZLRigzw0vn7td8Kiz5ubnhakYY4wwIUBXSM216zXXMlQI+GQy2d/fp7RstttOpZqVNAgChBAAMI6TarW2sbFRluWDB/fCMJQYjnq93mw2G41WWZZlSQkhUuekLMuyYAiher1er9fjOI2i6MGDB7/xG9+8fv16nud7u/uU0kajwRir180oilRFr9ebSZKlSSa/BzmPl6ZpWVCpXZckWZ6nNUVACHWFGIYhWQYZFXme2bYDCaZMZAXFGKsqAAgjTLJM1gWZQAxwUZZUSqJEScIZTOKMM6CqqqIoRVEUOW133c5Cp6Ss3WkdHhxhRX28uZVlmR/86euvv9nrLa5dubqzuzWeDFfXlgEAjq0Dzv3ptN/vU0rTOC7L0vf90bBvmqbjOIamFEUxHA7jOE6SRDUN2fdLkoRzLlHjhBCMlNFoVJalpukYYzlfJpm5JaeRZMeSTg9jfHJyIsejJLuMbdtCiN3d3W53SXLwJEkiR5zkjx4EgbRSybiFEHIcxzCMaTANgkDGHZL2u9/vM8YWFhYcx+l0OpJpSlZoIYQcCICgPI7jOoamZVlRluXHH3+k6ppZsddWl5M8mwZ+nCZ5mii6fZZOAQYghxBAJCASYmY7MzgK5AByhM6CzflA9EIk+NQIZ8YG5/oTGM9rMn5RAMkYA+eUFrKJfFY+fl5z/1cxxfkYevZgdjGzHQhSAOCUUoakNgBASHpm7LoV0zQNQy+KIi9Kt2q1Wm1As6tXr4Zh+Gd/9udFkb/88stS1aTdbhuGITlXwjBK4gxCqKpqliemaRKsRlF0xu0HsWEYvV5PFiHkMMFgMAAANBqNw8Ptfr9fqzYopScnJ6cn/Xa722i0IMQIEc5BmuZ5VmCkViqaadowPhGcFUVBWSG7ZIyCgjImOMsKmtMyLwC3CcIK4QhxGiaUFZALBgUQjJcUQM45BwykaZ7npYDY0E1VTfKkoJRubKxXKtZwOAYAuLWqlNzwAv/FF19eXFwsKHVcuyzL49OjSqUyGAy+9Nqrsj5p23YUhHEcc841VV1fXwcAEInkFQIKYBmmoel2/YyNQs7ixHEsw6V6rWlZFsZYtl6kqUyn0zjJKKW6rsvSyGQyybJMdiYIIbJvYVmW67ryOJVKRaoOW5ZlWZbs5TDG1tfXj46OKKW1Wg1jvLW1BQBYWlpy3cpkMpL65K5b0XU1DH1K6cnJkeM4kv9O1nIUBVuW4XsJJgRgBBBkgheMFqxgvGy26hBCQDBnOS8zAoRjGFhVgpQBAYAAGCKIAAAAI4whOpMREwIAIdm75QixuDTld9mUyOUpeHFOWjo/DzE7BJ8rtCB0bq7iqVcUQsBz+5GjGL/WdsHm5zEK4HMeCy4Y4wUoSppTVjIOAWcQilrNDYJoOp1ipLZ7C72FJdu2G+7iX/7lD9M0/upXv+xWK3fvfhJFwbVr16RYqjykqujIViGUNKy8Wq1WbFdO2QghdN1sNBqyLHF6enrz5s2VlZUoiiR1hWEYMnxijAEB6/Vmp9MzDXs4GFmWZRq2aellyQzD0jSFc753+CnnXK7Ler0uIFaVMMnyMIgppQXnQghVpQZlCmUAUEOFBBKAORRAcAEQQhBjiLGuFAUtioJyIBgQVKRprqtGo1kjCuoudMqyWFlZznP69d/4BmPs8PD43/7hHx4dHd154dZoMtQ0rSiyLEvefffdpaUlQ9OhAIwxzwuAEJVKpdHopFGcJElepARhwzBkuJ7xM55oyeobxyk8n0va2NhQVXVrazvLsoWFhUqlsru7W6vVdF2XTUXOeb1en/3usnHvuq6iKFEUKYrS7XYHg1NZ15G8dVmWpGmcJImEZCiKEgSexMRK0CTgAnIRB+FxXgjKXNd1LJtS2m40K5WKlLJREEYCpGnGOYeKQoGgZZ56uR8EukwviYxRIVEViDEhmLIyCMMyKqlSkUY1I3MBc3wuF5bxM4nVpZeeGqH8BueD1HlzuuA9GWNYUWc7Sy1VIQSUOd7Ml3JpogghVPJfieXqbzVCedu5HJcKIRCEiqLoOpaCZzXXIhh4njfoD6MoAkD0FjqLyyuUga2trYdJkcSpAODJk+1er7O0tMw5E4JtbW0ZhuW61apbd90aISpnglKu6VC2NDDGrVbLNM0sKySxRaPRStP08PBQIWq32x2NRtvb23deWRccFgUNgkhV1VqtBQTa2to2TTPLckoppazIqRAwirLpdBqGvqJomm7olm3ZVUS0JC38MDJNsywpKynEmANRMiHJIEzVkLBDwUoKEEGKvFfmeR6Eke8FaZZTAUI/KEq2sr6uaIpTdTRVj6KEUhrGgaZp3W6XqPq7P/tZXqTD8eD4+FDTtKOj/eWVxSJlg9O+PCBjjJ6XCSbDkSRAURUi6wVRFDHGpnHIGJtOpxLpImfEy7K0TF227E3TlNQn0mijKKpUKhhjSdsjYeKe51mWpaqqLOokSdLv9/GZTBCZFWaks5UeQg4fS1mLarW6uroqhDg+PlbVMwJBic6XY2hhGEoZ49kksTiXs/YlZoCLssh5STVFbdRqNbcqBM9zWhQFURUAoUawoRAgWJxn6FzCCM7J2c/Swrl6jADgc0eZnskJ5ZvlPX5WzJTOcNarkJmrRCqgeUDMJccq33hWOPrCMccv2J4pqIqngSi4lDECeV8giBBMiCzhMggFIQohCGHYbNZr9bbj1HzfP+2PwjDcfby3sNC9fv3q6tqKpilJGoRhUJal41QBAEmS5BnD2IMAcQ44B71Fh3OO4NmXwDlXFE3yjrZanWq1Gsfx5uam/BqDIPjpT3+6uLiMMR70x1lW6HoCBE6SrN3unp6e3rt3rygKyzIMwzRNI/CjhYWFsmQlZcX5byn7YEAgCFEuBKMsS3MEIGNMV1QdCkVRIOdZljFaYHjG8+X7fhgnWZJFScIAKQqqavqt6zdWVxerVWdra6fd6o1GE4zVD97/QDXMRr1FWbG4tPTyyy+/+urLtVrVC70XXry9fX8rSZKjo6MkSeSZZFnGSypXv0oUOQifpmmZ5WWWc3JWPJf6BVmWSROVReCiKGSrVmoYKooCQCYh1JIqSgJiOOej0Wh5eVnTNM/z0jSV2fhwOHzllddkLTqO4zRNEULNZpMQ0mw2kyQBAMg8YjgcysZGmsaUUtM0Za1LDoVmWSbFTPv9fr/fl7UuCfSJiwxCiACUfCGc8yTLTLOoVh2JZQWcQQRVRanYtsXNeJpKA5F3Fmk7su9/wRDOndjzPeH8M0TMlWTAnKuZTXzNyqfwfF5pdixwHoXC87nfs8IM/1Wb75e3uTuKAADg88H8eea1eVOklBEEAECU0iSJPA9jyHSNFEVRr9fVjoawNpn4h0f9OMlVRX/hzktc0DCMt7d2KMuLImu26isry61WqyxpFCbTqR/4cVkyjBWFaHfv3lUUxXVqruvKz63VGp1OZ0ZfqyjKZDyNosh1XQDAwcGR69YgxHEcm6ajKjqEqNdbpCWPwuTw4HgwGFi2cePGjbW1NVXVqlUSRUkZRvI+KLteecYmnlfQUgqG5SgXQqiqCg2c5zlgnHMeR1GZZ4qi6KqEPUFVVXXDVvWUCUCI6rjVt978Ur1VyfN8Oh3bVgVhXJbl7u7uxA++9a1v3bp1i6j4+vWrjlNRDXU0HigKls06aRhFludFnuc5BvBsqQHIGGGMZXEi+bW4RiRRmBywlHpmcvWnaTqZTHTdgBDK6ovUDJVJnWziSbI/WUlijIVhmGVnViqEIITIuFQattxTjixgjKXSweLiommae3t7CKFr165t72xOpqO8SG3b5oIGoRfFgaLi3b1t27Y9fxKEnm3buq7XiKsoSjQc5Xle5gUQnArOGIuSWMGQFrnjOK1Wq1qtljQfDIeDwSBJknZrMc/zGdYcAIDPWWXOPIecKDhHzMxXMcDndMLJbA7wQiFEfimze7Ps6lBK6fxQ75wRCiHweWEGwqeaZ7+u6M2FgUg0m+rnz+/jl2WpEYLQDFwgpCWbpmkYRpoUB4dHnheqqq7pVp6VrGTj8QQT2Ou1iIKTJJlMgOM4YRiZpmnoVrVaV4heFJxgVdf1Vlf3fZ9RzjmvVCqyspdl2cbGRhynsgAohGi1WpZlDYfDtbU1SunW1mPOxFe/eqNWbUwmvmnYh4eHmqatra2ZppnlCaVnbBqD07GASFVVy3EFIHg0HU+8GdpYToRICjzTtKrVKkpiRcGspBhjhhCBiGAVIWTbGsaKXXGCJC0oM7ygVm/cunXr0cldINDS0lJRFKpqPHnyUNf1OiaO47z8yitTb9zr9aIoPD4+pqzw/enJycm1a9dkc+9gb78/OCWESNyJpqi1Wq1WdymlIzIIwxAAIPGcMjaTLmuG6pReKE1TKTAqwaira1fkejMMQ1q7LALZtn18fDwYDOQkZ5ZlEMJarTYcDmUWAACQuG3Jzy37DTIclUVX+a7XXnsty7LpdCozbUkz1Ww2Hzx4IBmlZPUVnAt7tbud8Xg8ybIiz3heAi6Q4BBwWV5OkoQLmud5EsemadZqNVZpSx2UOI5loC5tUMbql23sgtuYZXbP5IRsjixx/mU54Cw51yThPITQMAyp4HF23BlOTkKQhBDn8SoXXEAg0Fw/Y+5fcA68lrgDcS7XxoXAkuJQmrYQQsyIwM+usAAAMCTQ2fSKiQudkIquWaapaZpODAJNgswoDOOpBwFvOU5d1+I4LmmCLDiMioojABBFEeW5KPI8oGKH7ne7XV2pVBpVmbyNx2PP84bDUW9hQSE08MeKUnS7blEUp8d9XVeXl5fjOBa0jPxJFEW0MEJ/VObR9kH21ltvLTL48/c/+L/93/+vv/3d7xmafnBw0Gv3KpZz5/oV19QsrdJzOya36q36ONVomWuEaIoCeF5zyMaKZQD19HgQ+lFRZEbdbrh1t1LXNBWXhauHSADFQpqjYahhca5koGhBkkW5h0Ta9/2GbrzywhorxrfX1v743/8AEWV1/VrBkEqUarW6vLauEG2hu9Rudp9sPkrjyLR0moCdzZ1J4D3eemKa5iuvvPx3/5e/ByH8xS8++NnPfjYZeLW6u967igkpswRVtCSaXL16VQ7j6qamGep4OomS0LQNoigIIcrLrMgVRUEMUc5sWLEqdp6k0+l0Y2Pj4OBgOBzKEd6z9l2WVyvOYDBQMeGct1qtMssnw4GgZRCFL770ku3aDzcf1Wo1igUjQABm6kTRFUGZppJOs2FZ1mDY1zQVABGGAWMMEVJxXYiVVm+BAVJrdjvLa2EY9/v9PM/1Sfj6izcCQg4oHzGQYS1J85xjU3O8HAhViCQ3SmaqpF6t8SJP0ySYnnjTKeCiZmiUIsYYLbMoiiqmJUHnBtEBYPI2SghJsSwmnxc+uQAAIclgJgAACApAZuv7spOZ7xzKZFRuT73cHNoGzJnKcxzcnO198Q7PVGWevWfIz5JLbnbXqJhmlmVRFBm62mq1HMtUdU3T9DiK0jQtyszQdALP4XgQIoRqtRo6E2aDtm0jhDgD+/v7w+FwOp0uLi7KwEnX9UajAdCZ4M50Or179y5jTFe1drsp7zuS29P3p1Iiqtls5go+Ojj8xS8+ePDg0xfv3HRdlzN6enq69fhxu9l5/bW3bt++XeRUJjBurSoEVBStyPMsDU0dq6parzUNRQWcaopXmnnFchrVlmU6mmKqqmqUSCVIU1RDIRhBwCkUAEDMIYxLmgRRnCTNduv2K69du3lHcPjhhx/WarV2Z/Hje/eJZn/1q191q48mfrC4uPjjH//44ODgK299aTrOTk6P4jC6fv36p5sPp9PpdDp95513/vAP/1AI8fWvf/Uf/+N/LIT47OGDP/mTPymKotVqfPOb38yybG9v7/bt23Ieem9vjxC10+kkSbK4sCznlSSoRQjBGJdTFEEZLC4uyo4oIUTX9cXFxf39fUmuJRnWkiSp1Woy+NzY2BiNRmmaPnz40HIqGGPP87I8F0IUZRlFUZkXrCiLPGclJYQAIqrVqmEYYRiGYZSXpRACQry+vr6zvXd4eGi7jhCQc+44zsrKypMnTzAmEMJqtUo59KM4z0skQJZlUHBGC2YaqmPrmgLkkuMcnNcdFUXRNQ1CqGmagrAkH5NjSQAALpl4FSixbkJgzrnEugkhIH8qz0aeeqpLmM+ZHQIA8Dlh8PlE6Zl9yqR8hn64bHKzg8O51t8X2OH8cS7vNisNn2OUmEwzbNt2HLvZaFQqFUltYBhGMJ1QSpGBFE1BWVYWVABOiFGtugghz/OLolAULJnmZBcrTVPGSk0z5FCcZZmDwZBS6jgO53w6nSqY9Hq9paWFo6MjuYZ83//kk3s7O1umaTabzQyw69ev/29///c3Nx9alkXLoigKKDgh5PDoqN3ai+N489FW6Ed37ry4tnGFx9ypOoFfhmGsKRYxdKTrTsVs1Nzd7Z2j/aMio9NgGoWxZVWqTq1qaQrGEIg0L2iRQyB0RcUqTNK8P54UDCyurK5fv/Xi629g3fj00aaqaMNk2hKiWmuYtiuEiKJoeXn53/ybf7O2tn779u00Tbd3dwAXZZH9+Mfv/L3/8u/pur6zs5OmqcSRHR4e/5N/8k9ardZv/873/vk//+cHBwf9fv8//IcfSJu5f//+0tJSr9djTMgxwkePHslIPssKmdepqiab7Lqu66pRa9SDIIjTJC8LytnC0mKcJlmRU86iJK7VakVRmLZVFIWAQFpys9ksKRVCuK47GAyEEIZhFBBqmlZ1XNeutFutquNSSg9PD1RVlTqtGGPbcWRG+uDBA4VonU5H0bXxeCqLOnEcB0FgmhbGCiYqhyjNizhOoyio1WpAgDzPC4I4N4UQlLGyLKGqAsGEELTMAQCQEBUTFSOCkUIQxlhXVCEEQWfCEBixubDuHFjDZ7UZhMScEV7e0Dml/rxNypwNnStjz3vCC43IeWObFTwvZJ6Xt8vlVvlAFmzm01d5nNF00m21a7WaYRh5ng/GIyEEbiBd13XLVBSl0ahhDNMolikWMJjEMcjsVQhOaVEUmeM4EMKizAbDUwiwnLjhnFoVxzCMVqslGC+yPE1Tz5skSSTLgEdHRycnJ0mSvP76m6+99lq9Xj8cHDPG+ifH4+Go025dv/5K6E0nw9G9e/dMS3/hpTtra1fCKN7aeXf67k8m3viNt16p1+uEKEAgoupYIYATw9DddgcAoCBlOp6Ox9PTUR8NBrbt0JpVr1erFQdAQBnkjArAVMwnQeiFaXtp8dU33uwsrxLdiLKcCq5pxkJvCSmqbdscgP39/TAM8Wj01a9+9f333ycEJ1G0sbEBBXj5lReTKH7v5+85jvPWW289fvz4Zz97bzgcrqysmKbZbrf/+N//yR/8wR90Op3f//3f/+3f/ju9XmdnZ+cv//IvDw8Pj49P4zi+cePG+vr6yvJavV6XJOWyhCidw9nMrm5J3icZC2RZJgnwp9OpTPlUVZUSogCAPM/zNJPLwDCMWq1mOxWE0OlggDEusiwMQ8E4BtDQ9TiMRqNRvV2ToO1+vx+GUavTOVOeUZWlxRVd15/sbO/vH0rBxiAIFhcXsaIWBWUcQKIAiCBCcmSFiVIwAIAhVzvSNGZbumlyWpZ5DuS8Py04wmVZClVVEEYIQsAxRkhTGEFCCJ2Vz67nC9VRDsDzjFBayEzmeobenrUxwLMDUfKlC23GCx2FmQX+iuHo5SNIGi95d5lx9SOEVlfW5FSb7/tFntq2XXNcVVWDwFdVXbfsar3GSko0VQbSCEPOKda0Wt0FACCEyoJVq24QBJZlCSEk/EpRlLxI+4MTJ00BAGWeMcZGo0FZllmWJEly584dz/M8z6OUdjqdWr2+f3Dw45/8pCizmzdvLvZ6t2/eSNMUcsY5f/PN169fv+p5gVt3j06P9g73GWCdTqPeaUiaas4BxAQhAgQCiGi6GUVRrdZo1ls0L0+OTjc3nxwfnnjR9P3Hn16/fv3G9WuWbnBAMlrkrIA59+K81Vu8fvvFdm+pACCeekhRm91eMBkWZfT48Van2xtPw/d+9kGl1rDK8v6Du/3h4Lvf/S7GOEuid378k62tLcPUsiLd2dmRTK0vvPDiZDKWTmwwGFy9enVzc3N1dfVf/at/ZVmWaZpXr1797/67/32e5z/5yU92dna2t7e3nuy8/fbbQohKxZ1OfcuydN2wLEvXdYnJPj09DYJgZWVF/qaGYezu7uJzjSBJZiGpKOTYhFtxCCGPHm+61apZsVGCu92uFwSu62IIoygydUPCvmlRyuUqZTB2dnYOD08o5wsLC9Vq1Q8DCfWsVquKogEARqPR/fuffvs33srLJIjikgnLqgAAMAJlWY68CeDM0DRD0yhnEELF0FVVVWsuhjBL0rIsEYCUUgwgBxAJgCBEEMqYU8rbc84NmEMIIXhqLHwWlZ5v5MKin23SCM9haGCWB+I5OkMxt80m9Oet7kII+kwD8HO2+VcvPBYznhsA8LmS6cLCQlEUEgOlaoYso3mBHwUhwQgphDN5LZhoKoRQswyJZZPHJIRwDsxcl7FlURSTyTTLMoQAY2UUBSfHfXCOX4cQNptNia15/PhxURTNZvPWrTuO41BKR6NRGMYbV1am48ng9OT2C7fzPC/yNM2Tbq+LMdZMw7SMJEuXV5c6vW6z2VpZWbE1Q1E0WuaaahRFKRjHiBcFFUJUHcexbU65pmkAQTnZEE5DAYgfxJNpIOVvVV3DhBwORm+/+MrGzduckDjNKUKM0ZKyshSj0WTryXa7szD1vcdbW29/a63Zai0t+cvLy0mWxmGUxuF/+Q//wc7W9tbWVr1ZV4h2fHy8s7MjhLBtu1KxJaZMwmgP9o9UVV1aWjo6Ovrxj3/8/e9//+tf//rGxsbf+3t/b3tr9969e6urq7u7+5LZkVIWBAFjTNfMIj/D1mCMbdv+/5H232GWp1d5KPp9v5x3jpVT5+7pMDM9WWKUJSSEQBIS0TZwbLiC4+NrAT72se8FTLA54CNfc4UIAgQCC0kocqWRJkdNT3dP566uXDvn/cvx+84fq2qrZkbisZ+7n3762V1dtfeuvX/rW2u977ve1Wq1QM7W7XZhbWgYhrquwyHreR4hxHEcFjMLCwvValXVNNd1G63mkSNH4P23xmNCSKRqmqxQSlVVzefzURR2u93xeEwISaV0SVHgyD569Kjr+LVarTvoW5aDEFJV9fjxo8PxCCFk2264P9s4Ho9c12MxE8UhIonnO67rcizmGIwQyhsqi5EgcDyLSSyGIWYQZhnEcRzPcgghEkeI7plrkDgWaDwBSClGFFECxSiDEUIUH8iE3y886Gu3mgFeOuHxD7KLB3/kjSGN3lCR/uO3g2nwdaEIumrYE0oI2dzegm6YFziO48IoanV7/X4/n8sgjKMoGQwGYeQTQgRBCigmhIgiz7IYhkfT6SxC1HXtbC4NL09VFYbZs8oTBIHFnCzLsCZaFGXAxyH+fT9sNFqm7UA3mMvlTmRPDloNSmmSRK9evmzbNiewpVKB5/mhOWQYzg8ChmEWlhd4XhRFuVyp8DETxyHLCalUmpIgSgKWZcOYpI20JCsxoZ7rIo4tV6teEFieXTx8VNf1kOLRyHQcj+M4HsUEx0auVKjOYlF2w1BSNTPwu91uQtGw1jAyueMnTzle0Gp2Tpw4cfbs2d167Z577tnbKkPi1Tu3EEJrq3emp6dfeumlpaUlSmkYxNMz1Wq1OhwOgZELw/D06dNXrlxJp9MvvfQS7HuhlK6vr1+8ePHa1RvHjh1bWVnJZHLpdPapp55SFJUQ0ul0RkNT0zTQx2iaFifJcDSCQ9S0LFGSEMYJIRzPR3FsGIbreQhjPwjwvkva1NSUrCj90dC0rVqt5vo+kBwMwyBCe72ewPNpI+V5XkTDzc3NWq1m27YoSkBL2rZru46q6LquswJPSKvb7QKuWyqkZFnlRRkWuULDFRlR6Pm2bSOaoIQkSQTEYBT4jMBalgUAIYuxyPMMw7AMTpKEIoIJjcMQISRQnmEYShIGXHs5WA2ECKYIU0IJphjhPej/u+jo68IGhBEHWzLo/SZOO/A70AME+vesbP8Ho+514Tf5WXSgHUX7e3yB24WXNx5ZmUxG0zSe5+MkimMiCoyqKqIscQwbR6HjWEkYMeweA9kf9NKZFKV0OBzCYATLCkDyAMcF4Q14j6Iord1WNpUGWpnneUri4dCFwXDTtG3XkWX59F1nzpw+q+janTt36ruNM2fuqkxVv/3tx+rthqYpR48eVlX1W48/rihacThADMdxPPVc1226vjOdnXIcS1HlbCYTx0GSuIrESxIXRJFCEcYcwwsZ3WARtl1vc3tnZ72WcnxBEMIwSjAbJtjpjnqj4S987Jdy5anOyI5IUqhmEs+3/aBQKIhVrtVqFYvV6blZ03K26/UwDgqFQq1ZazRaqiTXajWO43q9Qa3RtF3v7rvvNgwDnM6iMGk0GqIoPvTQw/V6rd1u7+zUoLYEtbc2ZbAMXylPzc9xYRhub2+3Wp1ut3vmzJl3v/s9sNHl0sVXd3d3YaQ9k8mA+ygMWCVJUq/XGYaBDhzML1RVdRxHFEXLsiRJymeyjUZDlCXdMDL5nGboN27caLRaGGNFkniedz1nPBiSJImLEcZ4dnEGvBQ4jqMUQe2WyaRN2xJFET5ZQRCq1arjOK1Wi+NQmk6EL5EoivlMmuf5Wq3mOjROYoypKIqiJBFCfIpgdlkURZ7d64k4lmUwHo1GwKwEQcBiBnY0YIwJZTDCDGL298bHkKMoQyfD8Difz74xANCB+hMhdFDJDa6sE+nMJEOC8TPGeDKXOAmkSSwdLF+Z/eVN+IAWllLKfZ9VMRPtjiRJYKYAT5TRjX3EKWEYJpMy5udnZ6anFVHoddvD4YASgkkSBEFCIo7jRE20LCuOklQqlc1mOY7z/TAIgjCMdV23bbvT7hmGMTc3Z9v2zZs3jx86miRJs9m0LIeXRNM02+0uRYhSmspkDcPI5wqHjh4xTdNxnB/+4R9ev3r9nnvO9YeDxx77xiuXX/nRD/5IdbqyvrFx/dbNr33tsamZ4g++7/2pVPrxJ566du3G/ffff8/xu3u9XqtdP3LkULmUW1qed50xpjHL4nwuo8lSGAUkThBC1tjsdrvPf/PF9fX1VCqVxKTdbntesLSy/JM//VOCosqqmmAaIyqocpwktucSRKOBOxgOr1+/furM6SCJrly7tluvlavT6+vr1WpVkpRRfzDoDSmlHMM4jjMY9AzD+OAHP0hp8sUvfjGdTi8sLFy7fuXRRx8lJAGraMsaZ7NZXdcHg8H8/DwhZDwe9/v9u+++98knn+R5PpvNtVotRdYOHTr0Az/wAxsbG48//uTFixez2ayuq7dv356dnZ2ZmUmn0zs7O7DLiVIKFSlUKLA623EcXVFTqZSRTm1tbyOWOX7yhOd5t1ZXB4MBgxDLstPVqXwm26jXPcddXl4WFB7ETN1ud3t7p9luI4Q0zdBTBkZsEARBHIVhDAvestksw9FMJjM/OwdHT+B5Yeibo3Gv1+v3+2k9nc/np6enYbUGy7KuZ8VxXCgUDE2HopfnOMuyhsMhhxlw9Tc0HY5pSZIkp78XXAyFC53lGMrgMNzzuaKYcP9IRjoYP5P4BLXOwViaZFEIS8Bv9g6JA8sGJg/L7E/xf/90+D1u3P4NcDa8zxkORyOO42RJ4nmexShM4vHYEoVOEgZh5OuqVq2UGYbpNBuDwQAhVMgXU0YaftxxnPHY4nk+m83OzhYdxynkS4dWjty6detrX/uaqmrnzp3b2dwqFIpT1erO7m6z2TQy2XPnznh+yDDcxtbm9tYOywvVapVh+eMnTjVbneWVQ41mZ319bafeePDBhxFmeE48cuTI5s728vJsJpsnURIGcSaVPnr4yOnTp48cOtZttUWRV1UdM1wYJJbp+b67tDBHqWC7set6DEWKomhGjhPlueXBwHK63f5wPNK11NnThxdXltVsMaEoxnyUhAmDSRDbrjsyRxhjqzEybWtoWnfWN+5/4IEwjvvDwclTxwuFQrPZZFnsRyHLc/lsrtVqkYS++c1v3tzc/M3f/M33vve9/+pf/etvfeubly9fXlxcvHXr1mAweNObHoEfzOVyzzzzVDqdXl9f932/WCzzvJjJZHzfVxT19u3bSUxLRyuNRuMP//CTnufdf//9jz76KMuyq+urnCg4jtPqdgbjEcaYEwWBJI7jjPs9hFA2nwOSsNls9kfDSrHUarWiJM7lcpbrXLlyxbZtUZbDMGQxTpI9WlwURd/1hsNhVa9A5QyAKmRUx3HS2YwkKoIgcGHAsiHYTCZJwgi84zjtbsfzvCD0aJxgRBGmi/NzAsfGcTIaD8BTw/E9RdFWlmbBQzEmCULI87xREPieV6lUwjAkPeJ5XkwSnGBKaRRFHCvuBRFDMcYE05ggRAjFAqyWQIhyb2znXheErwubiR09eq0GZ3IHQnTCqgNgc7B7nOTVg4/zPe8fvIGAkGGYiV4W/EgiipOEhlFECCEkdl03CSPbNleWl0G4ZFoWRiiIo1QqlU6n+2ZvMBghhMrl8tTUTLmcuK6fJMnq7bV8Pt/v1TY3twRBePvb3wGLZpeWlhiGeeWVVxzHfcc73lEolF58+TvNZrPRaOkp474HH3j00Uc/81efnZqampqa2draGUn9ZrNer9dnZuf/l3/+C1EcXLn2aqvRSsLk1KnTlXK1XK0oiha4AYe3IjeM41jRldnZ2Uw2bRhapVLSNM1xbMzKFPMMpixHoygw7SAIAtd1lUxuanEl4kUfc6WpmSOnzxqpzHaji1iGYZiYJJhFLIsH45HjWKIszU/NkVrtIz/2kwmNB6P+cGyWqhWGYbL5zK3Vm2kjA0pIjuNYljMy6WeffT6Tybz3ve995ZVXNjc3//k//+fz8/N/+7d/G8XBT/7kT966dYtSks1mNzY2yuXq6dOnwY+wVqstLCy88MILhIBDD0MJzufzrus7jjcYDC5dukQpjuO4VC08/PDDCwsLURQ9/vjjGxsb/X4fzlaO41RV9X3/xRdfNE3z5MmTH/3oR9u7ddM0eZ43DMP23GazOR6Py9WqZVlzMzMgolAleWZmppDLg85J07TRaDTp3IIgpBSbpomNPRMtmINxXc/3/enUdBAElmXxHEPjhCQxhxkW0ZnpKk2I7TqmaUVx4Eccz4vpdDpKCKVoZFocw8iynJVlkRfS6XQYhrZpea7vhxFGDKwjJRQFCDP4u3JOEHRSiliWxRO7mkIh9/2CcNLyHQRIDzaBB4OQvGb6/rtl6mQs+OD3oAPl7sFylBDC0+8dhIIg4AMS2ImsnEXsHneCCEJI5Ll0JpVLZ3LZtCzwsiIyCPue67ouQkQURT/0WJZVFA0caX3fb7e6wIal0xmWZU3TDoIAWpQkSdauvVoqlR568JFStXL58qtXr11r9/rNVufo0eNvffvbLl5+9fNf+PtHH330Iz/xkz//8z9/9913tzZrS0tLXuBOTVVKpYKR0m/dusGy+Oy504IgDMeW67qFQonBnGma6XTa9wMw+ZR4IU4ihmEcy3Rdt1It8SyHMd2bIbBM0zRt2w4pTpLEspxut2/oqZWVw4IgWZZVKBRYFlyCUETiwbAXx2E+n0/x6eu3rp88eZITuM6w2+o0wyS88MorlUplc3OzUCh5jt9utA0jpSlqLlcY9NpXrlxBCK2sLD3//POZTObjH//4cNj/0pe+1O403/ve97Isu729BZEGkxZBELTb7YcffvjatevAvK3d2WBZFobKIXN6XhBFUaVSabRrcKTed9991Wp1YWFhAmZeunRpY2MDFlcEQQAr71fmFizL0lOG5/uO70mKHARBlCS2bWNKBUFIGylD1TBCEAwDs5/JZMbj8Wg04ji+Pxx2Oh1BkBRNzeeKSZL0hoMgiOBEI4TouZTvuqmUXikWOIzjKGARJkmk63qvN6CU2o4XRTEnSAzLl8tlP3Cz2SzPciC1H4/HvuPm83nDMGzL2t3dHY/HkiACmCwIAo08juNEnuc4joVxe0IJgakDghFiKMGgwJrE0uRv5oDR0yRCDk48TWpU+gZmDyIN+kZQeE5IxYOw6vfsCb9fELL7G7xhGwGIg+M4FgSJxgnGWBRFTZUNQyvmC/lsptNu5vNZiRf6vY5pjnluT+yfyhgzMzOIMlevXl1bW1NVdWlpec84jKBUKqVpWrfT39rakiRpeXn5Zz7yI6ZpfvWrX33p5VckSbJshxXEBx96ZGFh6a8/+zeXr1750Q9+eGlp5T/8+v/7oQcf2djYmCnPTE1NCSIvivxubTOXyziW6ThWsVhkWdxqdRBChw8fLRQK1tjqdDpjx15eXj58+LAoikkYweyPLMssw4T7t8DzgPhGCK23ahhjx3bDMFZVvVSsFAolhmEYiiRZQCSJ4iD03NF4yLK4XC6uX98pT1V7g24Q+YVK/tbqzSAOnnjyyepURZFVjhPSRmZ3tz7o9o8dPaHIsjnqx3G8vb09Go3e/e531mq1xx9/4uMf/1fvfOc7//N//s+ixN+4cQOExPPzcxzHnT9//tq1a9lsThTF6enpp5566sTxU1/72tcEQTCMNKBZzWZbFEVAXyzfBF1EoVBotVqg7b7rrrviOIYJCQg/y7JAHu30R1EURUl8Z20tpuTw0SOe521sbaVSqcMrK5lMptfpupZdLBQMTbcsa2D2U6kU2HAlCbEcxzRNhuHS2Uw2k4+iqDccxDEY9oQMwwQ0iaIgl0mViwWRZVkGqaKAMbUty7E9RdPDKHH9IIhiy3T0dIbjkK7rNEng4zBNk2e5paUlQgiNk36/7zrOZEZXkiQaeSzLChzPC6zAciyDGIQYRBBNGEowQpgS7nXMwcHy8nWQ6SSbHWwUJ/ch2PCexwwGTgnodfzaMWS8v1L7dT3nPw6iwkPRfS93juNA0JQkFGEk8byqqpqmyLKKMQ7DMCGEEBSGoe06rutpmgqLtbudPiUYYxxF8czM7NTUVC5XQAjNzs4nMe12u83GerFY/tCHfqxYLAZB8Bef/rOdndpgNGQo1jTtzT/w6NTsXK8//OQnP4kZ9rd+63deuXjxDz7xf8Vx3Gg1wzjq9kZ31jeWlxcLhfyx43dREpJ8/tqVV9/5znffvn1ze7Pmum672a4UyudO321ZVmfcLRQKMzMztm333EG91kqlUplMRpG1IIgpQQwjMCyNk9APCMMwA8cOw9Aa25lMjhGVkesxlslQxrWddErnMI5CF9OEJoQiHLiBG4Qsy9u2m9Dw5e+8srmzdvTEUVmRXN/LF4qdVhtm5Fv1Zr/fr3vee971drTvQv/MM8+Nx8OHHnrwxo1bN27ceOihhy5fvux53iOPPLK+vm4YqUKhoOvG7Owcx3G1Wq3T6dR2G7qWWlhYePXVq4QgnucnRQd0VvlKDo7gUqk0MzNTr9dv3ry5vr4OLwMhZFkWxrhcLlcqFUmSzt91tt1ujy2T43k38MEvqxIEMDxRKBRInNA4SafTuqqBjRCMSgFcFEQRIWQ0GsQkiSNCKXU9N0koaCR4nmcUUZIksPAKkyRjKLlcRuD5HssKgpBKZwllXT8YmZbrh1EUxQQlIxMgD0opJ0j5bK5YqqyvrcmyrKczPC8CRkopYTmBZWOEEMUkSUiMEow4jsEsgzCBaKQYUVwqFQ5G3cF6clKLQvzsDfUemLfAB5ScE7AUggS+AplzMpUzybEY42ivi/sfRUcnywPANR0mreI4FgSFEMJxjK6okiTwHCdwLC9wgeuUisVqqcgLrGPZo9Ew8H2EEC+IkiSl0+lsNmsYac/zNjY2Nta3dF2fn58/c+bcysoKpXRjY3N1dbXb7W7fujg9PZ1QZBjGmXP3MCx35fqNXn947733yYp6c/XOyxcuXLt56/Cho2sb65Ik+S6ulIsrK0t+4BZzOcwkR48cKhbzKV3b2drudrvFfBFjrEpqtVrd3anbxGEYZmpqGiEGIbS9tZtL5zKZnGmavh9A6+v7Xrvdtk0LY9xMhpRSy3IqlSlV0k3TlnjJtR2JF3LZNEYk9l1VFjmW+q4bJ2Fte9juNBut+vKhpd3GjpKS773vnmvXr25sbS4sLLQbbVXVDS1T294NgzhJaOTZv/prv6Kq6qc+9SlYQL+zs3Xs2DHfdwuFwtve9rannn7ixRdfBGMewzByudzCwoKqqoZhbGxsiILcbrfPnj178eJl0zRd1y+XyyBhwxgvLS5nSilo2NLp9JkzZ4bD4auvvrq9vY0Qgmxp2zbM7ILXUzVbaDQaDMdatm17riCJcRwzHMey7M7W1uzs7Pl77rVG4zurq6qsLCws+LFn2za45Xe7PYbjCCFraxssz6WMjCRJmGMJQb1eD1pNIrDZbLpaLCCS+I6ZSxmlQp5jGd/3h8ORKMgRoUFELMfdrTcxZgmlgiDMzc1NT0+7rttutWAVR6VYyuVyHMOapjkej4fDIaY0nU6nNRrHcRyFJIoRSTgWiywjcIzIcSwhDCLMZIrijbfX5aVJfEIPPWkO9z0IGMuyJs0eQghKC6DaoKuBxVTotZK3/6kbxCrGGAJyIm0lhIRh7DMsQoTwPCIcwnR2dp4kked5DCtHUWSaFsvgQqHw0MOPAA6+sbH1jW88Ztv2XafO/NiP/djRo0dN0+52u88//3yv13ddVxCEYrHIx8uyLBtGmuHYa9eumZZdLlff8fa3y5r+xFNP37x5O53OLi4u1hp1gHBu3dg+cfIuyx7Xa80rV16tlAq3b946ddeJuempfq9naPr09HSr3qrVaojgeq2GVMY0zSAIZVlNGZnhcGzbfqfTL5VKju0BXxeGoWWPMca6phUVQZKU8cgyjDSJCWYZWVN5XrTNcRjGgW97luXLPIdJo1HvtBqmSfOFLHRusBdla2vrAx/4wH/87d8CYbrnBaIoFsvl+k69UilvrN783H//ux//iY9+/OMf/6u/+qs7d+6cO3eu2+1WKiXLsj772c8+/MiDnU6n3+8rinLs2DFIgIANnj59GiMWdPCNRgNcemF5PSHEc30E/suynCTJxsYGxng4HIJJ1OnTp03T7Ha7IL6HCUOMMcyyKJo6HI382q5t22EYSooCMxC6rt++fduznWw2K4vSzZs3C5V8p9MBLTHGGHRwlFJIITzPaykjSSg0nBhjPw4KhZwsy4Hn+r4/ogSRJI7CE0ePeZ4fBpHj+X5EXM/3fZ9hOF4S683GxAAuiiJd0yY6FhhlhqOEUsqL4nQ5FwaBS+MgTJIkRAnlODZKGIVjWUwZShlM8VS1iA5oryc3yHjkwBz9BFYBDgT6PfhVwzCE2daDuRQeVlRkTdMy6TQQjKZp9rs9y7I0TSNRvGfhelCVxpLXPQg8jizLE+4x2V/ymk6no8Egk8lJomJZznhk86yQzRYNPcUz/NLSEk3I2tqapitvectbThw7FsfxhevPbG1tWZazsnz47NlzqVSmXmtubW01Gg3XdcfmMEmiTCady2cwpq7rpnghn88/+9xzU1NTKysrzWbzgYcf0nX9rz771xzH3bhxI18sgLVeOp2+cOHCqZN3J0mCWfbSpcuEEITZ8lR1MBhRhHQ9dequM0mSlKemPM9Tdc0wjMR0KaWQ3uM4BoGloijD4RCUg6qqAvEFZYiSkZl9O3cGYT9wbdNyXddQNVEQwtDvd7qj0SiOIvCZdoMRz/NJTAVJEwRxMBjpWur8+fONVv3bjz/25je/6Zlnnzhz5tSPfvhH//d/+2uiyM/MnPvGN/7hRz7w/n/2z/7J9evXVu/c8l1na3tjenq6vrOrKEoYxrdv3vrAB350d3eXJEhNG/V6PZvNJkmyvn7n2OEjd911125t++knn1peXmRZtr67myRxNpvFGNu2HYTm7OzseDxeWFgwTWtra2dpaanV7Ph+yHG8KIqIMq7rg7V+GIYwrrm0sowY3B8ObdfZ3NwMk7hQKIgcLwjC4ZVDjmVhihzLzmazgsiura1FSTw1NSUK8ng8Hlum63qdTiedzWqaIcsyw7Htdrtea4ZhyIrorhMnBUHI53KB622sreeyWU3TdnZ2dF3P5nKaoXth0Ov3x+NxEEeuJ/i+H0UBy7I8z/IcIwi8KPGYklK5wPOcY4193w9DH2wdccQzDJPEcRT4CFNFFCSRZxFFScKzmOcYhhI8N1s9mPoOXvoHg2FyH0hziASWZcGZJwgCdKBpPBg8XhhgjAWeh6I/lUqldEOW5Xa77dnOeDy2bTuOIvCQ5Hne8pyDMTy5ua7reSEhSFUF2He3xxuGgeM4SZgoim7oaVGUWSxgxBTz+dHQXFhYeNMjDwmCcPXqFdd15xdmlTSTz+cVRWvUW5cuXa7Xm8VCeXl5+ebNm3EcJyQSRT6Xy+qG6vvucDi0O12GZU+ePMlx3MbGxgc/+MFOv/eZz3wmV8j3+33DMMaWOTc3NzMz8/nPf/7UqVOt5uDOnTvpbLbX6/d6vff84Ptqtdqdjc3ZubkgiDDL2Jb7zve8e2dnN5fL5fN5bzCGIw88pKHelmW53+8DvGYYBijUoe4gfAI1vCAIIi/EcWyZI9eyHcfJZlKiKA57/e2drWF/AP0zYiOGYQI/4nlREtX+aEwSNDc39453vfOLX/z8tetXfuFj/+Lf/O8fX1lZ/q3f+s1f+uX/x9raiNLkB978iKrK/9v/9i8vXX4lpWuf+uNPBkFweHnliSeeMIx0p9V+85sf/fCHP/yLv/Cxs+fvmZ6e3tzcvHHjhijyLMK6rp89d3ppYfHatSvdblfkeVmWqtUqyESNlFir1TBmjh8/Xi6X//zP/xIj1nVdjFnAioeDcRzHKysri4vLo9Go06zVarVMLivKEsNxDMdub2+bjp1KpXiGVRRlujrFYkziJPQDWZZTaW17e9vxXFmW44iMx+OROR6NxtVq1XKcJKG5XE5PGcPhsNPuEUIYgR5aWu71er1u9/iRoxzD9nu9Q4cOgRQEYSwpMuZYx3XH47Hje0Eoj0aD0WgUhiFCRBJ5VVVUTY4CP5fPFLLZdMbAGLdajeFwKAmioeQwxkkc+r6fxBEH2y3iSJElgWdlgedYFq8sz9MDwmi037Ml+47Xb+wVXyd2AaBpIuCmrwVLWYEnhJB9WJXneYHjWZadmZlJ60YmkxEEwbasWq22tbXV7XYVQ6P7yOpBSgO2hUD1CxQtRH7ZSFFKdT2la4Zleb12T5G16emZo4ePPProo/ls9ubNGyzLnj17OgiCF55/tjOqdTqdXm+AKKPrhqrqAi/tvTBB4HjYNUwSEoG13nypeuTIkcuXL2ua9sEPfvC/f/7vkiRRFGVrZ9vzPEmSKELHjx8H+2qe5zOZQhAE37lwgeO4fn9w733nL1+6stuo//N/8YtbWzuvXr3iOv5HP/rRJ55+KpPJaJo+V6yAwhjKCvDGpZQ2m00ISAjCSe/tJh6005LAi6JIkmQ47FujcX/QTaI4jqMkilzXdix7b7sYChHCvh8ymFM03XPDwcjEGH/kxz9ar+/+5V//5Y995MO3Vm+urt744Q+8//Tp07/x658gJE7icDweHj9+7Md/4iO+62Rz6du3bz/z5FMcx43HViGXt203CIL/49/9h1euXH722Wd7vV4+n7es8c7mFsMwhWKOY9iHH34QY7x+5w7DYLDcTqfTa+s3MpmMbbuDweDcuXOapm2sb7344ou6ntI0bTQaR1HEcVw2mz118vRgMOh32tvb27woyKrC8jzDscPh0AsDjuMkXiiXy2kjlTYMx7JJnMRx7AeOJEkE0eFwyGBuZmYmJsmtW7cxxs12O45JtVpVdW04HI5HFsMwksaDG2KjXl+YnSsXSzdv3ICLLY5jihDDsWESm5ZlmqYfhVEsQ8GJMeVYFiHCcazAsyyLWQ7rilKpljRNa7eb4+FIVVVDybEMQ2kShiElCYcZeHuTMBAFXhYlQeQ4dt/fHgrCyYc9SUSvowehoJ94gKP9EWNIiZO0OUE+Hd8DyTU8EXiPQ2G5E+/R7rqmlcvlRx99NJfL9UZD2DFo2zZoHQCdh/KM7lshpdMZWAPiDUZLS0sMwzYarerU1I99+KO5XKG+Uzt58mS73b5y9dV+v9vptP775/9G4JhMJlOayk5PT8/NLVim0+l0TdPMpPlCoaDruqIoHM+Y5qjTaZvWCESDLMtevnz53LlzU1NTf/hHn5ydnd3d3d2t11KplCzLlm2fPXt2c3Nzc3Pz0be99dlnn52amitVKtu7u/1+/95774UFQB/84AdHw3632+62W0srhwbDnm2OZEH0PI9NKMzOKhzLiYIgS0mSmKZVKJfgjYXXAJ9CQhKRl1jMERIzDBOFoeM45nA0Gg8cx+l1OmNzKLCcJAksx6Y0VVGU2u4Og1lEKMEEEaqqchjG/eHwS1/60i/+0seu3rj56T//zO/8zm/98Z/+8X/87U98+s/+8H3ve9/f/M1fZzMpyxr/8A//8Gc+85mP/eIvPPX0E+9617u21jcuXrzIMJxlWaIoq6p64cKFwyeOWZb12GOP9fv9xcX5yA/6/f7Kykqv0/3Sl7508uTJ+++/v9Gob2xsIIR6vd799z9s2/bFixcr5SnbcquV6a2trdnZ2X5/CFbZi4uLmUym2WzazjgI3SRJDMOQFJnluf5waDk2QoiXRMdxBJ0zDAMYY1BswlxLr9dDDC6VSiRB4/E4Jkkmk9nc3CSEpFIpsCGGN9bzPNsOrNH4wQcfPHbs2NrtVdu0oMuANVv5QoEXhf5oOByNoGuwbD9JEoZBgiByLCYkASIglysKHMMwOEkSRGhKN0ReUFU18SnGOEkwy7IMx4LmmUShGccRxUwcJzBF8T3LzklKxAcocoZhNE2DccyDIphJBL6ukkQITTgT6CElSeKMFMuyw+EQnNTiOO52u61W69KlSwghLZ3h92vXbDY/N7cAjpRgMs+yLABxDMPADgMJ4+FgPDU1dfbs3ebI+upXv37lyuckQfy7L34+m82mdBUhwrK4UM6qsswwDMxlY8zKkloqlXQ9xTI8dJiUUkCPYOkXQoRl2Tt37tx1110zMzN/98UvaJq2vr4+Go38MDAMY2yas7OzHMddfPXyAw8+sL29vby8HCXJhQsXwPR+bm725u3bhWLu/vvv//KXv9zrthmGOXHs6NVXL/f7fXCh7jXa6XQ6l8tls1myvwzLdd1cLgcZEqqSZH8tpqzKDMIMw4ehb9njYa/f63UsexyHIWZoSlMJjYPQYxiczRgzM9PDbpdlWYyYiFCBFzTDUFWdE4R6s/Unf/xnP/+//IuLr17+4z/58/f/yAcb7fZv/c7/+Tu/+XsXLnxnY/0Ox3G/+Zu/eeLksU996lMPPHjfr//6r//e7/6nz372s2trG71Od3NzO5VKlUvVzdqOJEmnTp2CYQswOwQTpPPnz9u2/corr9x//33Hjx+/ceNGv9+/fPmKpmlxTLrdvucFhw4d4XlREESMaRB4mqbJslguFwmJDUMjJLZHlq7rqq7JqhIlieXYgiBwPA+BxPM8IhTW0RiaLstyFEXVahUxuN/v+16YzWa9wN/a2gZuE8oogecqlYqmOqurq7OzU77jWpZVLBSCIGg5rTOnTzMM02q1wCRGUZQwiW1wFkc0l1Mty3Idx3UshBDGSBQ4XuQty5qdrhqGznOMLMuSJJimGQSByMrgBAn4sCiKHMdSQlhRoZQ6cURCn0MJwZTCchmoPjGilCSIUlCYUYoIoZQQhmUhbCa1KNqHOrl9A/M3tnOpVArKUagwwzCMKAK8IQ5C2H+CDgwfdpoduA8JGQgZjPHEHzaKInhDYZbih3/ofcVSeWw7f/CJ/7p6YxUmXADv8UNPQ5Kuq7zAcBjFNMAEgwubIEgYsRzH8zyvqYYgCDCjBJvlJUnkBdayxpZlPfjgg6VS6dN/+ReLi4tbW1uj0ahYLsG4TTabnZ2dffHl75w6dQowzIWFheHAAjXGu9/9bsdx2u3WRz/60Vs3rokir6rqzHR1qlr+2te/QimNwyAOA0Q4sC1jGEbXdRj2KRaLQA9AkQ9vHQyvJEmCMaKU+q49HPZ7vd5o3As8T9VkkeUZkSUk4XmGEOKFdqO9yzEsSQjDMighTmRxgsDxIsdxo9Fo/Ykn3v+BH6lWZm/dWTt85MT584+sr6//+Z//+fvf//7P/OWfy7L4wgvP12q1JApFif/xH//xX/7lX3744Yc//vGP/+kf/8mb3vQDt27devzxx7VMyjTNH/iBH3j44YdfeOG5fqfLsuzNmzfPnTl78eLFqampM3fdBVble8RgrXX16lWW5cIwlCT5c5/7u52d7cXFRWDwS+VCp9tSVEmUeMwkQegghMIwjEYjWVUMwzBti2EYN/BFUeQ4Lo5jgeNN03RdVxalQqHQatdFUWR5DiZjVFVleY7juFQqNRiNYLgxXyyUy+WUEYDAJZtK37hxo5FK8Rw3tuxerwe7ljHGvV7PcuwgjhzH6ff7Y9tSlJzveXEcYowYhmHw/ph7EnEcx7FsEPiQe6CeBN9Kx3MBnZY5RZAkjDEOAoAzwzDmDraC9ADHAKfypBwl+0uJgWyYzEzAxYFfy/XjAxT/eDxmGJj73ytuKdkDWqMgCMMQYywKAsMwYEEpicqkR40TEoV7rSmDuSSmXhwghHLZwtGjR0+dOjUzM/P0M0/evL26s10bDgY8J0J9EsfxwtK8IokUJZZjKkQoFvO8wDqO47t+kiQsG3CsYBipyVQUzDFNynIARXie5yX5xZcvzM7O3759R5blhcVlhmF2d2pLK8szMzNbuzuEoFOnTn/pS186d/fdnW5/Z3ubYdF4bB89evSv//qvFxYWTp488fTTTzWbzdFgqChKo1FLorBSqRi6bOjyaOjHcWRZJvgXh2GQzWY1TbMsC+ptoEMppbCqTeR4iEbPd3zXcT07Dvck9WDihRDhBRYh1vOcwaCnY81yHZbl4oTanh8nVJIVyx4fO3bsiaee+sQnPvELv/iL/6/f+I1/+S//n5/+9Kd/6X/9X8fj8Z07d970pjdtbKw9+uijmWzqwndeunPnDs/z73nPe55++ul2u/vjH/mo43jPP//83NycF4elUunFF19cW1t785sfETn+lVdeOXTo0Pb2drlctm37pZdeOnPmdCaTqdfrg8FAVdLT07OZTGptba1cqubz+Xvvvafb7bquTSldWlq6dOkV2zZFUWy3Q9d1EEJRFLmWl8qk4QqBqQCWZcFhqFQoxmEI7AvDMCsrK88//3xCyalTp6IweemllxzPLZcrcRybtg3uEp7n+b4vSXI+nw+Jl81mDcNAlCqiVC6WOJaVJKlcLnueF4RhEAR+FELAxHE8Gg2hIpMkWRRg2QvlGGZ2dlbXVLpPInCYSetpscDXdtuQpQD01lMZ8BYbDocEB2yCMWG4g2GDD0wbQRhMiIrJfcdxgiCA+vhgxzhh21+HahqGEUVRGASwe4DjOIHjwV8ZnojZ13MDnagoGXTAffQgVBtFka7rhw8fPnnypCRJly5d+qM/+iNVk0mCWJbN5nIs5jDGrMCn02nbtnu9Ds+hcrlUqhRVRaKUcJxhJZYoihwnUIKBEgj8vU3xoigaKQ0h4jg2pbRSqVSr1a3VjeXl5W984xv5QkE3DNd1y+UyxcgwDNXQv/Od7/zoj/7o7dVVTdehQvY8z3Xdu++5u97Y/dKXv/iff+/3/uqv/mowGABhpWmaNR5VSsViPgf9sKYpjuNQmoShPxz2HceCYSKAf6DbAUd6MBCYKlU9z3EcJwx9zFCMKcaIZ/Go3ytXirquWPZ4OBwmSSQIvKoqfMQTQhCKEcIIkSSJCE0IIZub6/fdd98rly4/88yzH/rQh37n9/7zE48/9Za3vO1T/5//srOz9bP/7J8cO3as3W59/R++urK0uLm1vr6+HvnBD/3QD62urv3sz/7shz70Y7/xG7/xXz/x39a2N2dmZrrd7u3bt5eXFx944AFVVb/z8ovHjhxtt5uwIA38s0HPOT21cPPmzSiK3vGOdwWB98yzT9XrdVVVVFX2fKdQKMzOziqKPB6Px+OxYRg0jGRZhpEFEKOBsMZxHEWUYKoo9H3Hss3R2PM8z7fL5XJMkl6vhxF76NChIAp7vT402IqiybJsu06j0SiXqtlsdmB2R6PRvffe2+t2V2/eKuYLlFJw6WYYhuU4gihEoCRJoiIjLNi27bte4LtJzAiCoMqyLIs0IaCAFTkeCxQYfJY10ul0EASYZcMwFESZ53nMMoRgQZRjxMYUEYbFhxfnXsdAkH0btdcBLVAfJvh7L7uYNIEHQxohxIlCFEVRGAKmAkU8wBUswlBtIkrhCvN9X9dyk4edlKMMw9x3330LCwuSJL366qvPPffcYDSYrk7Pzc21Os04jpMY9tLsQfyqLDIMo6qiIouKImeyeiZtUEps21QFfXNzM0noVHVG1w2OExRZE0WRUrq1tSWIXCaTkmVpfmHW85xvf/vbR5YOE0J836/Vau94xzu2drYtyzIM4z3ve++HPvShf/fv/p3tOE8//fReafrii77vX79+/bOf/ey//tf/+p577rnr1Knf+I3fOHLkSBAE+XyeZdlms3nPPfcMBoPNzc1isbiz04KtfWfOnGk0GmD4lyQJEGVQ5U5EDnEcK4IMjoCua0uSUCoXLWssSUKv38nnc5lMqtNt+b7L87znuYZhqIwex7Fp2cPh0PEDzHAMxyaUMS2nPxqLkuK4/n/9wz/8iz//zNrG+r/7d//+r/7iU//wD187d/b0f/gP/8edO6t/8ZefHg36iipls1l7bJ4/f/6uu858+7Fvra1t3H333W9+06N9c/SFL3wBYmM47BeyuRMnTmi6cvXVK0kSJUnSajQOHz6UzWbjOL506dIjD79lfX0dVr4QEucL2TAMJUk0zZHnO9PT06Y5EkVhNBqB5aFvEVEURVmyHLvb7/OioKqqH4Xdbndhdg4htLK0bGjarRs3VVmZmZkZjfvNZtMPA0EQwiAOwxAxOI6TMAyDKCIEsSyLYJERYhFCfuyU8oVcLuc6Tq/dETheVRTf94vF4mAwsB1H0dRCuSSI4mAwaHU7ju0HQQC+OwzDsHg/h1GSSqWqpXK1Ws1m0xzHhUHg+/7q+pbrumEY6ykDlLS5bKHRbi0uLouSZNv2cDjiDiac18UVs6/bpvu21hzHIfpd7fVBIHTys/i1KlAoEuBigt4GUwTVbBLFe054oqgoSjqd5jhuNHRBtcyyrGEYU1NT09PT2Wx2e3v761//+tbWFsMws7Ozx48f7/f7r776arlawJgyLGIxphRBWmBZNpXSSRJBDaApKqUUEjjP89PT0wzD8ZyI9u1MwB5b13XXsxFCMzMztVrt1q0b2WwWIWZ7e3txcfHUqdOb2zuO47qu/673vPf//L0/+PCHPjI9M/dHf/RH7Xb3nvvu/8Y3vjEcDlVJ/Lf/9t9+/etfb7fbR48e/YM/+IO5uZlMJjUajdbWVt/97ncHrsMxxDYHDz94vt1uF87e5fv+eNS3rVEYuDynciySJSkKvSj0JsUCu7/FlbAMolES+yQJkxj7rhP4LqGh6zpxnKKUAl6VK+bCKOptb88XFsIwtFzHDfwoiRhKMGUShGdmq6VqSZL1wWD0hc997r57726321/4u8+99a1vXV29tbm5+cQTT9x//32/+qu/+pm/+PNsLp3JZJ558qkXX3yx3x/ec889mmZsbW397ku/e/8jD/3SL/3Sl7/85a9+9atvetPDKU1fXV29+56zHMeZ5iiXy/3UT/0Uz3M3btyo1+vz8/Pf/OY3K5XKysoSITEsh0mS2DTNH/mRH7lz587YHG5tbYA6n+NYjDHIvnVdF2XJC4Juv9dqtSzXgTbStm3XdXVVBdEIIaTdbluWJSmypmk+G7Isq2iqqmobGxuNVqtSmUqn01s726IoplOpzc3NVE6DSXGWYeIgJPGe0NI0zZWVlWwuNxyPGu2WZdtw5Vy5chVhJCsiz7D7sAhhMM5kMjzDRlHUarWGgx7w2AzD8KLExYnrB1EUJRS7lsNwAsMwtuNQaHejkJsEzyTYDgYhfq21hCiKNI4m3/m6cnFy52BPSBFVFIVlGMuyAFBhGQZwJ1YQ9zjJOAavAT+IU3qqWCzOz8+Xy2WO4waDwZ07t/v9fqfTkWU5m00jhNrtZqNRS6VShw4tW/aQwZRhEceyHOYYhpFlRZWlbDozGPYCz4/CMI4JTRISJwIrXL58eXZ2dm5uAVHGNC3f913Hbzab4J1z/r57VlaWbt++tb2zqapqHMe3bt0C/cri8hL2mFdfffVXfu3X/uzP/mw0Gv3qv/m1X/7lX75x48aP/9RPfvWrX93Y2Hj3u999+8b1+fn5T3/60+fOndvZ3ZIkYXd3d3FxUeTZfDbNs3hufrpRq0mCkESRyPMch9zYn5kqMQyjyhVFUQaDAZA6cOKCjGEShBIvRZEsy6IkMizLihJLkcwwmGPY0WggSQKl1LRtThCSJBkMRmzSgMehlCIGY2BsKTUtvpAvmbZZyKdfufDi/ffd+5Y3P/zE409yj9z3Uz/1U7/9W7+5trb2pjc98uWvfP1nfuZn/uIvP33z5s2zZ8/WajWE0Obm5unTp6Mo4ljhzp07Tz/99L//9//+7rvvfvLJx+M4Ho/Hnud9+MMffv75Z2/dutXr9aIoBG4T7GHD0B+NBwiTOAnr9TrPc5Ikffvb36aUzsxOHTlyTFVV0zTr9d1Go5lPVaCLI4iCOkMQhIwoxHEMHxmlFLr3+m5tNBpVqkUoBXVdVxVUr9fX1tZGozF4Z8D5DlZdkx8EmYQgCKlUisQJRgg++vF43B8MLMfmREHTtG63u7q+pigKz+xJNaHZA5eLlKbDXVjcAA8oimLfDlU9ZTleGBOGYcBDQJZlczzEDI2TmKIEH56boa+9TUpKGNgDGADwTEVRzMADqAD2DaADdOIba1GEEMGoUCgIPD8YDGzbFgSBY1hIdJEfwK4fSRThwUVRXFlY8TwP+gfTNGFumlJaKBQ6nY7neZlMJp/PY4xt2/Z9X1Y5SimDOUEQeVbgOE6RNVVRcrlcp9MyhyNB5PL5TDpjyKLEsnhnp4YQ0jRDFGSEsCTtLQB1HOfYsWOYoY5jqarS63eGw/709HQcEFhCMB6PL1++/KEf+7Enn3zy6tWrf/B//ZdPfvKTq6urWsqYnZ29ceNGqVTK5XKYJOfOnfu1X/u1T/zX//L5z39+2B8oiuS7bi6XKxYLKd1wXMs0TUWSTdPM5XKDwci27XK5PFmE0u12wQ0FdJLAwQLAjBDKptLwGQVBQDCCxSkI47E53K3VpqamOI7b3NmGHp4Q4nuJ67pxHEqSJMkilPc8z6czmWPHjj/99LOHDx1vNluIMj/4g+/7yle+5pP4Pe951xOPfyudNiglb3nrD+xub1Wqpa9//esz1SmGYZ544qlTJ05SihVFabe6V2/dqFQq7Xb7rW996wc+8P5mrX7hwoWLly6wmHnzmx9RFGVzfb3RqB87diyO452dnVwu12g0ANuAYSW4oKvVsu/7kiTdvHlT07R8Pp/P5yVJGvdNx3HGlhmTxA/DVqc9GAwIRhzH3X3mbBRFiND52dlOq7166zbP8w8+dN+VK1fGllkoFDhWGA6Hpm2FYTQej6dmZhzHi+N4enYG5khTqRQrIvAQSuKYxglNCM9xMHefTqdZjrNdx49CzDBhGI4sM/RdIAYButMUVZZlDnOu6xqGAQaNsJI5jiJKacjJuVyu2aq7rsuy7Hg8xhgLHK8oUqFQ4DguDvcz4UFtCn7DlODBgvOgkAVuB7vBN96AUaCwdDqKKKUJwwLMBa4NsiynDMMwDGAgXr1yCUAIKF/321Hc7bUZFucLWU3TgtCDYfB8IRv4Y0IIxglGCaUxiWkcBXHAJWGkyVroeqY5YjHWVYMV2cAN5ubmbty4YZr2kcPHisVSFCW25WKMz58/Twi5/OrFdru5tLQoiBxCaDwee1YoSQ5J6PXr16dn52RZsSz7t3/3d1944cVr166fvOsuSZJqtdrszJykyN/69uO/9R9//RN/8F9+/Cc+sr6+HgSBLIuzs7O9TjuTSUdBmJrRa7vbKytL/X6/UimFvq8pAiICixNz1APPvyiKUrpMKeUYQlkqcIgmge95hBBJkhx7xLKsF/jwZjoOJ4pyQgnDMJSQOCYMg3hehN4hjmOCY8vzvShGfMQTXpSFbDadz+fN0bhYyBq6oqn8+XNn/u7vvtDc3Xrg3jN/+5Wvdzqd97///V/60hcJSY4fP/7tx7551+mTsizX6/XhcHj+/PmLF14RBCmfz7/7XT+YMKjdbouiOB6P/+Iv/kKTlVOnTrEcfuqJJ69cuXLq1Kn5+fn5+bnt7W3LsjzP293dVlUVLgxV1avVahzHV69ezWQyQRAyDJckNIqSfn/Y6w08zyvnS77v266jGfp0scjynGmaY9vK5/OgyR72BzzLMggDvf70008zDLO8sry4uDjoj2D/WRzHMzMzpUql2+0Ph0O4/PaG41i0Z5kZxzzDBp5PkgQW0QyHQ0mWwzjqDQeO6yqKksnnNE2D6zNJElmUYNdiEkb5fNbQdU01CCFhEDi2DU8tFqtVWdL0lB9ECGPYHmvbJqVJJqULrMRg8l2y/nWaT7o/OjgpLyd88Rtjj752MOJgMmRZ1vO8MAgADCSEJBRhjBVFUUQJBlgxQqZpbm5uWpbFYTwB5aEABrrMtu0oihzHRoiCcWgcx5ZlKhJKEKaUkDhKaIwSFAVB6PmqrCiKkktnaJywiMEEJQmNgpjhQ0mSVFUHSTGA3aVSybKsmzdvcjyzuLjYarUYFuVyGdM056eXUqnU+vr6/Pz8oaNHfv3Xf/13f/d3X3jhhb/5m785f//9u7u7Kysr6XS61WmPt8bvete7ut3uxVcv/9q/+ZVPfvKTqqpm06lOp3PmrrvM0bjZqnuOXSoVWq1WLpPieX61vptPp9VcBiGU1pROxyahzzMMSxNJkmKf5TFVVTlJEhL6BGNV5IfmUFG0MHAxxoTQJIh4no/CMIyTdDotCEK73bEtZ2ZmhlLcbrdZSSEMSiiJkyQiSUxjWRYLhdytm9dr9a181qBJyLH0xPHDG2u33/Oe95w9e3Y4HPI8XyqVHMf+7d/+7Y98+EN/+7d/+7GPfexLX/hivV5vt9vnz5+/fftOFEWf+cxn3vne93z729+em5u7fPny0tJCtVT+1Kc+9c53vf1jH/vY5csXm80mxzBzc7PwmQZBEIQWwxKeF1VNYRjGsseypKZSKc/zer2eKEqu66VSaZblzLHd6w1iPxIEwQt8lueKgpDNZovFopYyQNcO00PQ20OFefLkyeFw6DjOtWvXet2B4zjpdFrXDUVReoOBruuyLDfbLU3TFhYWarUamyBNVnieN3Q9Y6RGg6E5HiuKcvTo0WazORqNFE2dnp4OwtDzvDAMMylVFEUWM1DNCoIUer4VRpBCEN4rj7PZbDqdZll2c2DanhfEcYJoEsBeACZJkvGwn9JkFIdRHOwBM5PbRLAG7enBoILAIOg1JmsHRW0Hv3lyR1GUIAjCPWpFiuOYQZjjOMMwQs9vtVq+79P9spbjOA5j0LjhvRmlEE4UVVWhSCOEuK4L5F6SJAgjhChDMaIxJSiOEE0og9g4DKkoCoJkaDqoBVjMptMZwsZzc3NJQre2tizLzueLiwvLhmE89thji4uLCYksa5zL5Xr9DgRYv9+/fft2qVS66+yZb3/72x/96Ee7/d7LL7/8jne84+Lly3DSf/mrX0mn06IofuADH/hPv/tbP/MzP/XNb36zUCiUy8VbN26uHFq2bdu0RqlUajQaGYaxvblezGe3t7eDwDdHw5mZGdd1F+ZmMSUwOhCHQYioY5lxHIs8Bx5WSRKTOCJRKHKYSJwsq5RSzw8NTRlbNIwTVdHjOB4MBlFCoyhxHM+2XZqQmCBVN3K5lCiwCYm8MPB8t1wuDHs9TZF9z9naXOM53Gk0n3/m6XJl/oknvl0s5E6ePPn5z/9dLp8hhExPT//xH//x3WfOMgxz69bqxYsXK5WpbrdLCP3GN77x8MMP7+zswOAS7Db71re+tbm+cf/95zHGTz7+eKvVvOeeeziOe+6550rlNFjf67oahkGn08lm8gzDuK5LKc1ms5pqVCqVKIpUxTKMNCZROp3u9nsjc7y5uel4rmmaoiLD0NDU1BTPctZ4TBMCXYxmyePxmGKQauF0Os0JPDisjsdjUZShtmdZFuQ1Ed3zf0ilUuVSGVMUBgFsHzMMI5VOIwZ7YUAI4Xkec2wQBJqiQtOYJEkc78GN9VoNNr8KnAj5Ay5jQYyarc54PGQw9mw7Cv1sOqVIgu/FPIMFnkV0n6Cf1KKvQ0oPBhiEBN1XtLH7twl+M8mNB3lCGIEHQ2Vuf5sSwzAHdaF0X3bD8zzHM5ihCYnCyI/iAGEiiJymK1EcEBpHceAHbkIiXmB5gaUoQUmMSExRAgoGntszmCKEeJ4XBSHsWw6DWOSkarkCm150XRdFcWpqCsiDL3/5y/fccw+suXRd99q1a7qunzp1Coqora2te+65Z21tDWP89re//fd///fT6TQh5O6775Ykqd/vq6pqWdbZs2e/8pWv6Lr+4IMPHvTtS+vGcDis1+uyLKqqev3G1ZWVlcFgMBz006kURYkkC45rCSKHGcpymOOZKA54gUWYUJQIIqcbaiqta7oiK2IQehQlPMPK8t5wM5BJoDhxXY9SynHccDhutVoJIbbjMgxTKJUWV5bnFuZTqVScRKY5qlQq/UF3NB56ru3aZqfZmJufvfLqpWw2Wy6Xoyja3t7+4Ac/OD09ffXq1be//e2wY1gURYi34XDY7/fhE/z617/O8/xb3/rWTqfz0ksvaZp28uRJSulzzz337LPPHj9+fHl52TTNjY2NQ4cOKYoyMzOTy2WjKCSE5HI5SRY2NjbS6XShUCwUCjzPdzq9jY2t8Xis63q73Y7jGEDsbrfb7XZd1wX1WafTGY1GruuOx3uyb5CbEUJ0Xed5HuRslmXB+B8YpQP9SAjZ3t6GGQBCCIicbNseDAatVqvdbo9GI8hpEAggXRqPx6PBEC7d0WjUarXazeZw1Pd9H65hSZIgW4Dn4tbWlqjI3W630+mEYeg4DhTDiqLADE2xmK+Uy0zCMQnHUIGjAkd4lvAsfIXwbMziENMAkQCRENOIQTGLGUJxQmgUkzCiUYwTwiEsMCxLEUMo/MEJwQlBcYLixB6bPMPqqsZiJoliliCGosD1Yj+ghHAsK7IchxkWYZ5hBZYLYHKO7qnm9oKcIpEXcEIZgmRelDBH/Yj6kYhYL0AxYQnlowSFcUxQQnESEz/BfqGcNXLa2O6b9lhSRDd0X7rw8kyphKMoCdyZal4R0TNP/sPVq8/ff/+JwWCbF8PhqLGxeXN+aaoyVVzbuJPNZ9Y2t+4+f9/G9k6/P/yJn/ipr33tH04eP9VqtDfWNhmCZ6szu5s7b37gkdPHTp07ecaQtIfuObl69RXP7OUMqdfcVUTG9yxKwkIh1+12O71uOpPT0pkgIVomRzmhPzY3dnaxKL568+bAcvumY0XJyI8GXuhSxkP8eqt3u9bijIJamL6921EzRSdC6fKUG9EAseu7tZgTepbjJKQ1Ho5832dYKZ3q2BZWFYckVR1hpy0EY2yNarduFVU9r2TmKwuqoB9aOqFrWdsJsoUKLyt3NjcKU2UcD+49fajfqrmjcVrO3Lh4Z+Nm+7GvvPiLP/sr//UPPk0i7ud/9ucwE4+tBmYtxfADx4x9p9usqSL3yAP3v+mRh7Y213e3d2q1miBIa2sbrU6PE2TL8adnF+46c7cgqbyoveVt7z589PSNm5uN5jCdmVpaOdHpjR0varQ6mMNOYPqxlWCfEaITp08FSXRnfW08HuuqqooSTggTJdOFkshyta1tczQaDAYjc5wvFwuVUqZcwZLcGQwJxUZalxVeVQSS+IHviDyTzuilSllPZQaW2xmNsaKM2v3GzrY9GsaB7zgWy1FJ4cfWgONJEFq+MwzsAfKtoi4tlLIlRcgZeUNOCVg05NRUcTqlZWnMYMpUK9OnTp5OGZl6o2HZtqbrumGomhYMzJlC+dDsAvUjTGgxk2MxRgidO3cuncs6Qdgd9PfGQ6GPh7MEvdbX8P//2wTOId/Lbe1gXp2Yi05y7Pd8wNclXrTPi8CAhWEY6XQ6n8/D9+RyOdgRa5qmruvXrl2jlML/ApZt2/aNGzeuXLny0ksv1Wq1hx9+OJPJpFKpwWCwvr5+7ty5U6dOXbp06ed+7ueeeeaZIAhyudzZs2cXFhaAec/n81/4whcIIU888cSFCxeKxWKtVoPqALbt7cFompZOp+M4XlhYgMkA3/dhMHI4HhFCLNPRdR3QM4ZhoijSNAPG0scjazAYgG9CGMWCKHGcEMaJaZrd/rDV6kRR1O9044j0+31D07vdriRw1mjMIux5Hix5NwwDHt913evXrzcaje3tbVD8QqewuLi4uLi4trZWKpVYFkuS9MrFlz/+8Y97vguOLOfOnXv++ecLhdLU1MzKygrYb0IVmiTJ9evX19fXVVU9e/ZsEATHjh3L5/MnT54UBOH27dsbGxvPPPPMP/zDP5w/f75QKKytrYVhCPuAV1dXt7a2AKLjOA4akP7+LYqilZWVu+++O5PJOI4Dq4fG43GtVnMcR1XVXC535MgRSZJGo1Gz2YS+APKSpmkgSQNDCowxTPGDHykoLnO5TLFY5Hke3H4lSSkUCrBPW1VV6CEppa7rWrYNfVCSJJ7nWZZlWVYYhuCW0u/3x+PxYDCo1Wr1eh0KYIB/YEINdokXi8VMJoMx3tzcHA6HoihWq9MMv7+PHto8dn8D2T8SUf9TtzfGIcTG6/45ibrJoTCZ4Tj4UG8U5UyK4ck/IaRBcgEVJgjiZFleWlp6+OE3ZTK59fXNK1eu+X64tLSysnzY0NPn771f11NzcwvZbP7y5SutVuftb3/nO9/5blmWO53Oxz/+8T/90z89duwYOCAxDAMi1VarxTBMKpV605veND8//xM/8RP9fp9SOjc35zhOs9mE7fY3b96cyBXgjuM4AG/KqhLH8aA/goY5TGLLsjmO8/0QJnolUQnDEFjpKIoYVnTccHNrZ2e75gUJQszOzs729k69Xu/3+57jJlFI4oQmRBS4lKEpijI9PQ2zLDzP+74vCAKcDuC8RAgZDoe9Xi9Jkm63m8vlLly4cPTo0a2treeff351dTWdTj/+xLefe+65H/zBdxNCPvaxj/3Tf/pP6/V6uVxlGEaW5enp6Vwu1+/3t7e3G43GysrK3NzctWvXnn32Wdd1fd/vdDqqqsL+90984hO7u7vZbPb8+fOnT5/GGK+vryuKAhcxqIXgUAB2lFIqSRLoaSeXShiG/X4fLnRoWTHGsO8efKJgLBOMsTHG+Xx+amoKIQTDuJqmQWTCgyPEQGxjsAhLEp7n44gQQjDHyrICw+i5XK5YKkGkOY4DGQV0eaVSiVJ68+bNzc1NyAEgYJyenl5aWur3u2traxQRTdNM0xwOh4qiQPMFnRE3wWDI/u5B9AbpzGuD8PsG5/f8OsMyEzT1IH4z6Tkxwgef96BMZxK6iPxj6CtCCNE9rWkcxz71wzAkSZIyQkLIYDBIkqRYLILPuWMNWJYtFouVSgW4OJ7ni8Xi9evXwSTmySefzOVyOzs7cRzPzs6+8sorP/3TP/2Vr3xlOBx+5StfyWaz0JacOnUKHDc2NjbOnz///PPPv/jii8eOHauW5ampKUEQBoPB5JeNogjEk+CGlMlk2u02kFEpI8MyfLPZvOuuu9Y2N7PZXLfbLRTLAPExLA9eYFEURYQyvGA5PqU0CILhcCwpOqK42xsGQaDrumvbmUzGGo+zht7v97P5oizJU4WsYRgsMx4MBkDEgSAOXEwn0FqhUFhcXNzd3YWxqdXVVYZhjhw58hu/8Rv/7b/9f+v1+tbWRq934tixY3ES/P7v//5P/9Q/ee75pxuNmihrYBEfx7Gqqjs7O4VCYWpqKp1Ow+RELpe7ffv2aDQChvDo0aO2bX/2s581jPTJE6fuvffeVCoDLDbGmOdZjmM4nmVZ7Hlev993TB/YAlC0wwaEidceqJ2q1Sos/cxms+B1FIUBJtSyxiSJDMOA758Q2gzDwEPZnhsmcTAeZVPpubkFTVO2traGg0EulylXK1Hox3EchEESx0EQ2LbdarUIEsHwG7AZgB5gqVu/3xcEoVwuC4LQ7XZhsMF1bdhrAsvhGrs7iiqlUnqpVIrjsNcbRIHHTAgTuCX7K+y/d6ghRL/PH0Lp9/yDD7R28DfcJmzkwQIVfqvveXtjBMJt4roPNi1wfCKEUqmU4zjdbpdSCjO7nU7n0qVL169f7/V6oihCoQJzIYPBQJKkRqNh2zaUsul0OpVKXb9+/ad/+qfX1tba7bau667rttttcOOzbfvOnTv1en08Hs/MzKyvry8sLFQqFUopOJRIkgQguO/7d999N/TuSZL0er12u82ybDqdhuzEcRzPC6KiqKoqy7IsqXCJ9AejbreLMcvwQm84ajabjuPU6m3Xi2TF4AWl2xv1+mPbcnPZQiaVVmVF4HgWMzQhSRSjOGIpAc8ymI0ihMCGKVmWJ9lGVVVVVZeXl2dmZgByJITUajUwtD169PCf/ukfv+1tb+n1ek8//fTi4uLKygpJ6JNPPv2Od7wzjgkgFjs7O71er1KpaJoGwjE4cLe3twEygfWPCKGXX35Z1/W3ve1toij6vr+6ugrAI/ArcAUqilIoFFKp1B4Jwe0tWoGFH2CYnclk4L+8fVNWjLGqqtlsdk87NhzCp8yy7GAwaDQaLMtC5QxpKtm30Od5XtE1SVVYlqeUIox5nk+lUpRgkHlxomgYKUBcyuUyDGHDUCtAsvV6HRLy7OwsXCQgW7t27dorr7xSKpXuvuecJEm9XgdgWHCXRoiBF8+BehMsFSZw6D+S2f5nMyE94OGNEMTa3tTFngnifkkJqXIiVqb7S03f+JiTCEQIkYRAeMMlJXAcSRAhxDRNSnAmkykVCoEfbm5uBkFgGEa5XISTu9PpQJsUJ8lwOCyXyzdXb9dqtVKptLq6WiwWFxcXl5aWMKYvvPCcJEkTXfVTTz1x//33y7J44cIqpfTYsWMvvfTC5ub6Aw88sLu73etsnjhxwvMCUZQNI2VZDiGIUkwpVhRtPLYEQarVGocOHWIYjmHQcDgaDod6OlWv1w09Xa/XFU11HI/lBNd1CUGypnueV6814IrRUsWg00+Hie0GQehTiglBDMYIMYIgDLq9YrE4Hg3K+RwlSUpVhsOhIAi52TxMuIVhOBh04K2gFMOCIZBGbGxsrK+vT83PXrt2LZPJVavV1dU7b3rzw7/9n353cXHxyJFDtmPevn17YWFhNOrduXNne+v4I4+86cJ3rgNHBzRgsVicNEL5fB5yIEIICkXLsmB4GhZrt/iW43hTUzNhGAKVHwQewyBFlQWBkySJ45hqtQq29teuXYPyARbI8DwP+6HgzFUUxbbtOI71lOE4jm1psixLkiSKPMyXDofDbC4ny7JpuVa7PbYd6OiSmDMKBsuyOzs7PM+zLJ9KpQjFzWaz3W67tiPJAsdxDM/pvJHL5TlRdl03CAIgKrPZrCzLEIqwvNnzPMdxQBMWx3F1qkJo0mw2u902x3HpbLpYLCqKVK/XQbyZz1QZcCKBKhlk+5CRvm8m/Ed7vzfeXmc3ivadvA+GEwip4Jbsj/9OforZX839PeMQhjsnTSbk8yiKTNPEGKdSKUVRLMtqt9twVjEMp2lGqVjJZvKU4OFg7Puhquq+Hz766FsXF5cNI/2D73nfwvyS74Xnzt7z2GOPLSwsAEp+7Ngxy7IWFxfDMNzc3BwMBtPT0/fee28QBIcOHWIYBnYh2FAWWlaj0dB1nRCytbWVSqXgHQD6aHLk9YfjMCaypFqmE8exF4SEoMFgYNsuQgwvShgxruPZth3GEcPxHC+32v16s2Watut4hpFKp9OSqIgcjwmlhPAcoymqoWuKLPEc2+l04jieXLXj8RievdfrNZtNGBQE6nVra4sQsr21kyTJ5uampiuLi4tXrlz5uX/yc6+88vJHPvKR7e3tqakpURTb7e6jj7716tXrU9W5SqUSBIGmaalUqlarwR1BEFqtFs/zsLnJMIwTJ07kcrnl5eVqtdput5vNJrARSZLYtg1XsCAIuq5jjLvd7vb2Nqyd2N3drdfrvu8DTGIYRqVSWVxcjPcduyH8IJ9D6wtoja7rULczDKPrOrAacNzYtg2QgaZpURLHCXEdDySvlMEIMa7rmqYdhiHFyPdCeKPWtzZb3Q7LstBZQKSJopjL5cD2oVar9ft9WHkCg3v5fH5+fr7f7167dmU8HqdSKeDGUqnU2tpaq9UKgoBlWQ7oO9gSDqAcTK8BG/7GG/k+AjXy/TJhQiaZEGNM99drcyxLD1COE76LvMGZG2OM9nVy+A36OEVRkiRJYhoEQRRFiJA4InEcrywvi4IcBMHu7q5pmlB8+r7/4P33bmxs3F5d1XW9VCoRQgLL5Hl+Zmbmm9/85rlz5yRJarValNK3vOUtn/rUpxBhbty4cfbsWUmS7ty5Ax//9PR0v9+fmZnRNO3VV1+9cePG0tLSaDSqVqsnj8+3Wi1A+brd7vT0NELIMIxSqbS2tpbL5VzXnZmZcRwnlUpBEZXPF0VR5gTRcV3DMKIwSSjxPIfheMMwOE6IEsLygq4osiw7XgijzxiRbtc2NK1YLC0tLASe06gH5XI59IOpSrnVasmybA4HwKr1ej04bR3HWV4+BGjecDj2fT+fz1um4/s+vGxZ17e3t48dO/H8888vLa4MBj2IgW8//tjHPvax//7f/yafz73jHe9YX78zHJjf/Oa33vLo27/61a+2222e503TBPeT8Xi8vb2tqirLsuvr6yzLZjKZWq2WSqXOnTt3/frN0Wi0srJiW47r+uAr5zjO1NRUsZiXZTFOIteNZVlOpw0aU9gZjDF2HKder8MehGKxCP1YEATgVgqOWITBgIRTRZVlURQ4QgjYMsRx7PoBZIUoiliWzeVy3rAD3WahUMimM45rm6bJUCQIRNdTsiwihDzPkwQRbNQ0LdtsNuEFm6ZpWRYcIkmSpNNpaHwIIaIoIoQ6nU775nVRFGdnZ23bxBiJosjzrOu6umaompJKZSRJYqANAGAKMilwnRASE5UdSPhBb4335TITRAcdMKR5HfQCPwVSXShB9+3rE2iH4CvwzaAURwfQ1NdlxYPPC18EWhxsmuCVZLPZlZUVhBDP8/BSM5kMaDJt2waaAboX27b7/T5YfZqmOTc3B93a5ubmO97xjj/6oz/ieb5YLNx333lCkkIhPz8/5/ve2bNnwjB4/PFvLyzMLy0tvvTSi7qu3XXXqfvvv6/b7Vy7eiPwo0p56tjRE/lcMfAjluE5VnAdX5ZURdYowQzmlhZXzLHNMvyRY8eH45HtOppqYMQ2W51mu53N5hiOkyTFcbz+cIARSwm2LRdRBsqWsTl0XVdRJI7j0oaBSAzvjyLteQtompbSNZEXqtUqy7KmaVJKOY47c+YMCHemp6ePHj1arVZBLQgrZY4dO5bL5ebn51VVDYJA05VUykiSaGFh7uLFC3NzMwAtqqq+traxtLTCMsLnPve5H/3RH22324VC4cEHH7x48eLq6qogCDBr1mg0isViNpvt9/vHjh2bm5uDdjEIgq2trd3d3Wazee3atbW1NcuyRFGs1+vgr/O2t72N47jDhw9nMhlonIrFIqQv+E0Hg0Gv17t16xYE4UsvvQTX1UsvvTQYDBYWFhBCOzs7/X7fsqyFhQXYMgJ8A7AUuq4HQRDHxLZdyI2dXtf3AkVRZE1FCGVzuZu3VkHeOL+4sLxyuNcfbm1tlUql+fn5Uql06NAhOMcVRaGUguJ8MBjU63XgXTDGsHc9CDx45RPkH8qTRqNx5846BwnE8zyw44dCEYKBHoBVDpIKZH+SEL4f79u0TWLvdZkKIQQLrhFCKNkzp0n2K15CCED2EKLfO89CVkTfYwcGtLmdTgdjLMtyOpUy9LQoiqPh0PfCJElUWdZUHTp427anysuO7S2tTAFq6vvhoUNHKKW2bbdb3fvuu880zZ/4iZ966aWXgyDa2tpJovj+++8Pw3B3dxfeh5s3b169enV5eRkwgGKxePTo0Xa7PRwOWZYle+u4Atg3Au8tnItgz16pVKDesyzrkUce2djYkCVVFOThcNgbDAihURRt79YhScYJBfyAMizGOEwIolTgMc/LkiAQksiiGCfhaDQKfJfnWVmWTWscx2I2m5ZleWdr++jRI+l0uqf3wzCczO9kMhlVVWGX+Pb2tq6lYAvA3NzcPzz2mG3b5XK1Wi0DrzUej13XEQTh61//+vz8nCiKzUb77rvvvXjxIkDzn/vc537lV37lC1/4wtbWlizLKysrjUajVCqxLBtFEYjUQNwzHo+NlCgIQr/fZxjO9wLLsmR5T4O2u7t7+PBKGPpRHNbrdVmWV1dXdSUNvRbGuFKptFqtdDoNkqNOp8MwzPz8/OHDhweDwe3bt69fvz47Owu7soHYWJifNQxjPB47jieIIsMwYFOmaZrvB61Wa2VuGt7qJKF7xENCMINAqK0oSq8/PHbsRK/X6Xa7mqbdvHEHFshSSkulEsdxpVLp1KlTL7zwQq1Wu3PnDsdxkBLgF49YDiG0F4E8g/GeFzjHcZQS3/d9x+VA1g3pCMpCiDEYxp2E3yQYGI7FGCOMKaEUI8wyCMFWi/36cxKKeC94IHHB64Awg8echBP09HuN3/cpdyEI33iDQwSOeSgCoQsKgwCuLU3TGMwAGpZKpXzfr9frLM+xLNtutyGBW5YVRdHS0pJlWXNzc5TSy5cvA845NzMLGCxUQZDSp6enC4XCqVOnYMhYFEXQl/d6vbnpAqV0Y2MDnhGKGZjDkiQJIdRoNFRVBfB2Z2fHcVxZVjiOb7bb7XZbllSCmGazSQgSJYnjBM/zhuY4iiKe58MgEnlGV0SYiEMIqZLIUDI2+yIPSkVeiiRF12SBZ1mWIApshOu6lUpFVVXgkeEKg8sUpAUwfdNutw8fPnzlypWtrS3f93d2dgwjvXJoKZ3K3rlz5/r1a2fPns3n841GY2Fh4cknntZUI47jx5549vz580eOHPnSl770lre85fnnn19cXLx9+/aJEyfu3LnTarXuuuuufr9/8eJFwzAOH7lHFOU4jiVJsS1nOBxLkmLb9uLiYq/XKxQKQeCNzdHm5mYul2s2G+qCkclkEELD4XDCT0y2VhiGsb6+XqvVXNc1TbNSqWCeW1xcbDbq5nB06tQJw9BAE+d5nqKqhUKBUGZ7tz4ajRiGTafT9MCuFJ5lESIMw/AsF4ZhfzSemZnp9/ulUmlra4fn+VRKh1UZkwJwZ2fHNE1JkuBVQUqEOQe4VFzPhlIOKGvAxnzfL5VKe85dUciA9T+cRhPyAHKgJO1NOaD9Bb2Ag4GwBgQuEwXpweyEXkshQEUKrm8QM8neZOl3d4lC3QgZ4/sG4Rt6RYwxFJPwOgEEGwwG4/EYXhuoGQBtB3hwa2trPB5fuHABpqenpqZgqQDAbouLi9ls9ktf+lIulxuNRqdPn+4Pus89/0ychKLEExpLslCr7zSatfvuv7fX71y6/MrxE0c5nrl2/cqdtdv5QpZluSQhvV7fdb1isZTL5QmhSUJs2wnDqN3u3L69Cv8VhtEzzzzrBX4Qhbbj9XvD8cgK4ihJqO+H3V7Pdf0gCMa2ZZl2EhOeEziOUyVOUwRdFQ1NSulyylBVReRZNp/PMQxmWLZUKRaLeYJREEeSqoCJo2VZzWYTAAxIRLBPAiGk6zp4TzSbzdu3b0N97vsuTCclSYQx7vU7mq7ANvlisaQo6vr6xqFDh0ejMULonjMn/v7v/17TtPe///2bm5uQKMbjMQiaofVKpVIAijz77LNra2vdbleW5XK5XCgUyuUyyNa3tnZeffXVXq9HKZ0Qj5ZlVSqVYrHYarVg5IVS2ul0SqUS2IXA3L0sy/AsIASH2srzvNFoBA0LlLWe50FdCtbvuVwujvfHnCklCGHMchzHi4LAi6PRaGp2jlJ64cIFx3V1wxgMx9DJTyQKiqJEUbS+vg7g6vT0dCaTiePYdV0QRUOkQZcI7wYMlEwwPN8POJAdQBRNRnUppaD/gnUue+N/DCNJUpTEE2QPHXBkmoQHPbBoDU3I9EkiZRj4ItnXKDAsi/aL2+8Xgf/IDU5HIFrgn7AVWVNVx/ZGo1E+m52ZnpVl+fbt291u1zGtYrFouw7PC1NT07pujOv1dDodBtHi8oogiHGcJAm5c2ftLW95y/Xr16empkqlUjabvXr16szMzF133eX7/pEjRyBv9Pv9s2fPXrp0CQ6U0WgUOAz4iGqatri4OB6PG40GnFxra2sAaguCcPPmzZs3byqK4nshALmj0WhsOwQzcRxHMfG8gFLKcDxGDKWUZVlZ1QzDSPFBkiSyxEkSzzCMqkosL7AcVjU5IVEqrXMcJylyGEcJQaJiI4SWl5dZhltdXfU8b3FxUZaF3d3dMAxd14dPCs7BJElkWd7c3HRdFxa1m6YtSVKSRLXaTiaTYznc6XQAg7l9+87Zs2c7nV4YWblczvf9mzdvVqvVBx98sNFoDIdDhNBjjz0Gk6xPPfVUuVyen5/f3d3t9oaqqg+HQ9u2VUWDaQOO4zRNO3r08Hg8FgQum8sAsRkEnsDKuq6DjAbkDdBTAXjW6XSKxSKldDgcFgoF8Mt48cUXc9nM1NTUnTt3Dh9aPnv2bLfbLRbzt27fbm9uKapBCIVytN/vl3IZlucwhQsVrDlpHMdCSkSONR6PeUn2fR/qI4TQ8ePH4akNw4CZr0KhMBgMQAsOXY+iKNls1nXdbrfLgq2TwKuqIssSJhQhBEQAFJ4izzOAhU5oBvgPSE2QqSck/uQ+2AQAYDPpEics/Ot4eXxAU3bw/p4V8X7gJUkC2fL7BdtBLuQgNcLzPFSzGGPDMDKZDMdx4F8IuRoYKqCMwXEA9GudTofjuMuXL49GI9/3M5mMKIq7u7urq6tQm4HV74ULF+C0k2UZIvDVV1+1LOuZZ54B57Vut3vr1q25ublqtbqxsQHKDwAAAO4CxgwUKrlcrlqt9vv9GzdumKYJJ3er1Wq1Wp4XhGE4Go2GgzF8ojGhECGCIPGiJEmSoij5jGaokq5KmZSWNlRZ5FVFyOdScRzm89lsLheEoeO6upHOFvKIZcBPVZKkkydPTk9Pdzqder0+kYkNh0NVVaGNgZN7cXERWp3xeOx5jm2blUrl2LFjSRKNRiOWZa9evYoQk81mLdOZnZmP43htbQ00Ky+//DL0QhjjZrO5ubkJEjPf92GmwTCMI0eO5PN52HELmC20XpIkPfDAA8ViEa6chYUFQRCAlAemvlKppFIpjLHv+2AGeezYsePHj+dyuV6vB7MdCwsLmUwG9AkQJ0BOCIIAfCnP8zDTwHGcqqqQMH3fD6JwcmH7YQg6R5bhrl+/Tgg5duxEqVTZ2dkRRTGTyST7znfdbhe6tn6/r+s6MHxQf004TCgHYMkUqA5YlgHgc1JCctCoQCKC4hCCagJpwlNCYnUchxV4iEiIWLpffzL7lANEzOTvSfV88G+gKKDEBfH2JJjx99HqQPzRN9zCIICjBarNIAhsy4UlM+VSFTJ5rVaD6jqVSvXaHdM0OYFvNBrgiXT4yBGEUKVSeeGFF5aWlp599tluv3fixIk/+ZM/KZfLLEMxxnfu3AGt2bVr16ampkCjdOPGDYBkFxYWer0ex3FLS0uBY8JusDiOYQ0YQmg4HEIPBt5EzWZT1/VKpTIcDgllbdsVRZHhOUVRMGIp8jmOS6XScBQqisZwPFw0iqKk9YTDSNOUbCEbRYnrewylvCQRSiVJ4gU2SRIvDBhekHmO5bgkSdbX11mGu+uuu1iWrdVqluXk83lBEPL5oud55XLZ90JVVXu9HkJoJpsHqQqlFCaJYB3AhJTb2Ng8dOjI4UNHn3322ePHT8IvNRqNADP81Kc+9cu//Mvf/OY3BUEoFAoLCwsAJ05NTa2vr58/f363tgb1fxi2NFUH1jSXy21ubnIc1+/3NU3hLJZlMdTJQH9blgWBBO8q7KPfc0vhuHK5HMcxsIsLK8uHDh2q7e4Mh8MjRw5RSu/cuVOtVuv1piTLxWIxTlCz3Y0pMoyUoamjfhf6TEEQWMxQmvA8jzELrAb8V5jEw+GQEEQI2djYgKMf5EeapvE8n8lkQFQMM+hhGAKRKIoilsUkSTRNg3YRkoFhGJqi7knE4piDoVtAESBLEkIAj4JClNnfiABYKIcFfEA8DaFMD3iuTcJm75/7ghioSzGh8M1kz5U7EURuwrazLBuR8B8Jwu+ZEnme51gBTjUWY1lSK5VK4PsATrpBQFUE4nqe50kUNxqN/nAA2eDo0aNnz57FGN+4cSObzb744ouu6545c+YLX/hCtVrtdDqiwELbOTs7e/PmzVar9ZGPfORrX/savKpUKrW1taVpGihFs9nsnU4Teg/QoKVSqUajsba2Njc312g0ms0mIGPT09OtVuv69euipAFiKbCMIAgCLzmeixBSFBWzrCBIHMdRjFiWVVVV11KKbEYxr2pyNm04XuB5XpyELOIzmYzr+mEYCrKEMe72esAL67pMCGk129evXwdameOETqdTKBSq1elGowFw/9TUFHzKgADBwKSiKNvb24DrgMIWChaGYQzdWF/fnJqaATQBdF4XL148c+bMl7/85dnZ2TiOa7UabC5ZXV0FTAuwrvHYymazSULBANY0bYTQ0tISGOqk02nPd69du1YoFFiWrdfrPM+PRiOGYer1uqqqhJDhcAg6xFQqBfjq7OxsvV5HCI1GI9BwYk3r9/tR6IP+RBAE0zSDOJFlOZPJ9IajIAhUQ5+MXOwDkHvXJ8gzlheP+4F38+ZNXVEXFhYGg4E56ubzeaDmARyBTYnXrl2D6hQkPkAhIoSMtErI3sYxSGxg7gZyc8dxPNvjACjhedEwZHjLRqOR74eqqrIszzCwihC6VsQwXOhHlFIGs4gimiBCKUzThn4E6RXtV9aQ9CRRRAjQ14RhGMSwhJAoTliWZQQREeJTiqIImsMEY4bhv4u+7IcZhC6hFFOGQSxlecqyCUUkIUmEKEGUp5jhOZ5HDI4Z1o1oJl++ub5RLlZK+UKhVB4NBoVctZDLf2f88uKpo47jPPfcc8tLS+ff/FCn03nh2efy+fz2lUvj4SiXyzXr9ely2bXt6elpJ2ZGdiRpua/8w+NhEOi6vrZZ833iuNb09LTvRa7jj4bmsN9fWVm5cukKIwiBFxSL1WazoTZ73SvXS6VSwrKXrl8PEVnb2WQYhuXw4NowSRIlo8KHbdumruuGJkVRpIiI4xhNx5IkmOYgpaY4jsNMXCgYtdotU65WlpZrtVqW5Ub+iFFEXdd3dnY4jsvlclHs6hrX7/cX52cty+p6fXXq5Hg8jmLkuGF/sK2qaq8/DgKSL+jTM/NBQFOp1KVL1xHm2p3hPfcsl/LGhjloN3YRZRYWFk6dOJ5Op/P54vbuTqfb96M4ZujXv/UP73vfD9398Lnd3k5WVAlJWMRjgo+sHFlbW2U5rKtCNiMHvsJgt1SSZTlIkj4hvdXVF0msRn4UB3EYhnEQMgzIU8xuuwYkBE3iwPV0RRc5kec1XpQ4jvP39nCozXZ7bm5OlOWYkFQmE4ZhbzBA+3i7IIoiSiRMsMT3HTOJWdcLlg4d5nl+p9n1YzQc2wRhQRJTuuF4rj0eqxzruq4XBFA6chyH4sSx7Ewmk0llMWYZzJeK1TAMR6bDi0q1OkUpDYJAVlWEUKfXa66vb+7uMhybsAxFtNbvIoTCOOR0FSGUuFE6nVYEmYmIyolUVuM4tkdjYG4xQpqu7OGckBkB6wcABh1wfDoIS9LXTkJAEZvsOzLhA7Js+B4oMidfwfujSUDO4H0xDd2XrfLMa55iklTRa1mKyddlWZ6IeBiGoRjFcRwGcafTyWWzhmHU6/UkileWlqanpzutdhTFCKF2u/PAAw/OTE/duHEDlteCsAsg3NFoNDMz0263ESIcx+3u7pZKpWarHgVhKnX4O995MQoClmU7nfbM1HQURQgTVZUbjdpw2I8o1jTN9z0g/W3bjOJAURTTHCX7y8M933ddV5KkQiFHwphlsSAKsiIKgsCyDMOIkiSJEo8QyeVy6XQKqkGGRaIoRmGCKGNbriQqnhsUCoU4jkVBxhgH/t5ugl53IIkKy7ICL0H5BH0XxwkwkwFLOJrN5nA4hLmHQqGgaUYURZ1OR1LkdDaTxHRjazMMo51afX5+EZACmWOXl88Konjx4sV77r77U3/yx49+8MOdTocQkkqlbt26kUrrQeA///zz5+4+I8ui67rlUlVVVde1l5ZWEEKrt+qg8gWwHthUkFLMzMyAswHGOIoiMIPVjD2eSdf1dDqtKAqoT/HeykoPIQRN5h6CGLqpTJphmGazGUZJThCCIGi1Wmh/QgAAkCgOQMsFgQcVH3RuwP3AvNJEVUcphSlE2+ahHICUA1VkGIaO58IgIsGIZVkY2en3+yk9tbCwkMvlYCEHIKIw5k/2bSu+q9sGIBXvr1Iib3CO2Ss4Dyi86WuB0El4TNLg5D488oTln/wsVLMHgxBsoPABpdtr4vAAjQgPwnFcRBJCCIMxyzIUoyiKoigRRRE0fnCgdLtdSZBJnICkUJKklaXlmdnpb33rW51OZ2lpaTDoqYpUKhc4jknpGV2Xo8iI4xijxPO8x594NW2kaMIcPXboiSeeYBHO57P1Wl0UuG63LbCcKIqtVst1XVlP+763vb0JA3u8wI5GI9s2NU2jlMRxjBmaJCLLYlEU8/n8sDPSdBlaeYxpkrCSJBkprVKpdDqtyRuoaVoURcVCmVIKkkXbtqF6BHAYYABA7W/evLmzswNTGlCb7fXP+709GGfBABFM/c7MzDAMF8ex6w42NjZs266Up6ampk6dumt9c1NVdVlV0qnst598Yjy2Tp46dfny5aNHjz765h8YDvscxzSbbccZO45z/MRRSollVUvFShB6rmsLAg+UPUJ7py2QCjAMAQWqqqqrq6sYY4C1QBzDcdzCwsKVa9dBKANsIUJoz7MvSUCmDzpnVVVBN8LQvYEEhBjgMwBukSSJECS6bhQlDKCGLIJtaoDQwk9BEwHCVLCTDIIAEBfY1SNwDEheoygCHkIUxXQ6zZljWMFdKJcopbVaLQzDdDrdaDTS6TQEpO/74Get63q/3wfIKooiDmJvgpccTEEH895Bag4dWOv7xqw1yX7wU/H+GsOD7eLr8uTrfnbyLAx+fcjBC9kT1hAKAFKCKORIAgMFGMOg0PbWFou5w8srkiA2m03X9iqlciqd2dnZev/7fiiKoueeey6TyRSyGd/3eQFHQXj06Eq/3z+0vPTKK69wPBPFYRSxsiy5flCtiMdPHBkO++Vitlar6caMZfLjcT+K/SRCmCGKKooSJ6qqZVkJiUgYa5rGcVBoJAyDOZ6P7CCOIsMwJEnwPG88HqqaBAwSLJyK41jVRMPQUil9OOxLkjQej8MwIoSCMaksa47jVKszzWZnenp6a2urUqlYlitJqmk6sqyVSqVMJh9FkesGk7FXGFcfDsdwYQEWWigUkphOT0/funWrWCyGYQyCzJSRKeRLXuC32+2NjQ3X88rl6k5tt15rwkVpjseGob/88nd+8Affe/XlF+fn5zudVqvdmJqumKZpmuMTJ05Y1ng0Hhw5cmRra8tx3HJpCiaGAckAJg3ADCBICoWCoihwbiKEANj0fb9arcLFDSPae6OVUZROpwHlt20b1JSyLKfT6SR0tre3GYYTRNEwjChKgiDQVJ1ihDGOknSSJAzD7YnpRc5znAl9B04zEBuCIMCAL8wlA8zOcZwVeCBPg/9yXVdV1ampqVQm3Wg09mYgHafRaMmyCB65vu8DNi7LMsxeAJ4EhLnv+1xyYICQ7pN19A1q7O8ZhAcjCh/w6p5Up5Oget134v11vwelp1AYJ2EM/wsxt/dEdE9qAzKcPYkcgdM9Yfg9/yiQvQPLAnyApuhhGEZBmMvleFZot9t2y3vzm99crU5/7WtfiaNk4dBcp9MKI19TZKqI6YweeDbLUZZLWBbxCRYQxjh55KFzzWZzZrby9a989dixo9VqKZdNGbrU6/Wyad20RoQks3NVz/P8gJJYmqqUu91uIZ/p9/vzc7OKIq2treVLZUUSx+OxoasIoQFNKCGKIsqyrKqwi5fyPKMosixLu7u7lmXBpQZiJkIoy2KW4eOIiCl5FJs8Jwq8xHOiJCo8JzbqrWajvbKywrGCqujj8dixPVg6e+AUQ5IkZTKZq1evchzHsQIA+js7O7Kslstlc2wXSsVjx45du3ZtY2PzuRdeyGazsqz2er2nnnrq6NGjkqr0+31FlvO5nCxLS8sLhJBcPkNotLCwcOnSJZCh1uu7iqKwDE8JZhkxmykSgmDPJEQyNMOQzWCeExhtz/OGw+FDDz2UzWaffPLJytQ0HKmGYciybJrm9vb2aDQ66GEBVQBsNfYdFCc08t29/MYSjDFJiB8GlFJJEBiOgyGjJIp5nhdUFUga4NtgYA0OL7gBfIgxDoLAcRxEYphLBE1yQikMyHd63dnZWcdxVtfXhsNhNptOpVK2bS9OzeD9AUUgfvr9Pmg5AZjxfZ+baD4nMQOB8T1T4sHK8/uFKFSVk4RG91esvS5c0f44xYTtwG/Ie9+dzKB7gc3iPRRr0hdO2MiEkCQhFKMJY5lOpWRRkQXRGps8zzOIQQgVCiVFUR577DHbtqcq5bW1VUkWpirldqdVrU4FnqPpUrvdOHpkpd/v+76YYtVut5vP5wSB73Wap8+cdBzn1Mmj4/G4WMrSJNB1PYrdMAhkiQ98p1wpt9tt3VD8QJmdnRZFvlIpMQwSRb5SqcRJ2G4LkiTB+E82m0UkkCReEBmEEGYEhmEUVeJ53vP2WiNJkhiGY1m+WCwLgtBsdCnFo5GZTmeHw3GpVEkSks8X4zhOp7OQ7rrdPlzf5XIVPl4Yb6V0z7oSnDI2NjYWF5Z7vR7IhqrV6TiOR+ZYVtXx2NQ0/dTp09vb24IgbW1tzc7PHTl6iGGYbqtdKJeazfr995+nhJw5c/prX/saw+BcLscwjKIohw8fZhjGcbzp6dlOpyeKsqrqpmmXS9O1Wg3IGzhWgIGwLGs8HmuaVqvVZmdnT506BaPYkLFBZAs6FZiEgDxJCIFhImDhEELAuCaRDxew5wWZPEmn05lMptftA6tBKRWQhDFGiDAM4hisp9PAHwIpALOCwBTE+z6A7D79RgjJpFKUUhiCyefziGHa7fba2lpv0IcNNrBSBQTiPM+7vg98NScIcRyPTNPzPEGSEEIJpQQhijEHsMpE0gk9A7yIN8bM98VLDiRJdKDr20to+3cmPwhp8I2Pf7A0nXw/hONeikbfPQWYPbUHm1AaRRFFCGNGEAVBEHhOgEYIIZTJZGzTGo1GumrMzc1Vlme/9a3HZVm87577nn7mSde13v9D7+1229lsWpVFa9xPZ/TQ5yqVih/YLEdZOR1G7mjcO3H02Pb29r333n3l1Uuarlj2iGeYSqWMGSqJfCGf5TjOMLRyOR+GLsa0VCoUi3lVlSVJ9H0/k02xHHa9EM5soD0qlYrrdDmOYzBLKWVZDGq7hETFYjGKYkIIy/IIJXFEctkCxth1dwDq4nnBth2O4z3PSxLS7w8qlYphpDDGpmmBlnVqatrzbADPgMICkg0hDFMOMEoCLwk6GdcPJEVpdTqAkfh+yLJ8r9crlktHjhwBsqGYyzeN1Kg/uHLp4nz1raoqE0Isy+p223Dx9fv9fD7veb7juCzLBkEYBCEh1Bw77U57coGBJAtS1uLiIkKoWCwuLCyIothut0EDDM2FIAhQ+4FZ4OzsLOQrqF0RQmEYAlgyHI1Gls3zPGIZvG+8AMkKwsz3XSZkQt/HlAocN1kxAHwDNJzgAQNT/JNrG65hoAEhg8my3On1oP3TDH1nZ0fTtHQuKwgCkIRHjhyxBiPQS8DqX3hMMDqZpBAO7atV6D5bckDl/QbS77Xhhw6AMcxrNwpOMuf3S55wpOF9/hDt94fca2vXvRimrzkC9jId2ntJe6o6llUUFSAsBnOtVqtYKCiSCv4uiiRlMpmpqalv/P++8cADDxSL+e985zuzs7OVcmFjY2Pl0KI6Erq9JkJUlZWlhflWuxEEXhgGkhBpspLPZAWBO3r08LWrry4uLtTr9WIur6hSJpMZD0eGoR09egQUxmPTK+QyGONUKkWTiGdxEgUpXc0cO04pHfb6uqJOV6c4zLiuOzc9U28G8BHEEdk/nmLfT/K5IsaM4zj5fD4M4uFwGIbhcDhmMOe4Vqqc6XUHqqrWa02e54fDYafT1VQjiqJ8Pn/k8DHbtm/evNnvDcPIgcwAOw/hpIvjGGY7RqORoiitVmtxcVHTDN/3VUPN54sXLlyYm5sDQzRZlkF5t7a2Zprm/Pz8hVe+c/jwYUWVdnd3vvjFLwJvBnlDUZSdnZ0oinTduHLlSqVScWyPYZhSqXRndT2XKxQKBbiIofWChlDX9atXry4tLaXT6dXVVcuyNE0D2QogosVicWK+JkkSKJ+AexwOh81m0/M8QRCKxaIsy8Bk5rJ5gRfbrU4cJbIsFwoFsCYALCQMfUIIwyAoNwghE5UYnA6T9ZtJksDlBiprkFhDyMApwHFcOp12fU9RFIwxWGal0+lqtVoqlUgQEUJAb2AYRqFQGA6HMMQMyY9Syk2OyQlGAo3j5KKn+1bce4Xfa123mQMzfnsXUxwn390hwUymePEBZzS6b6t6MG73ytc4+u4/J3KcPWkfwhShfQAJMyzDMK7rKLqm63oUx1EU27ADIEyAWZZleWNjo9FovOdd7zq8cuTXfuVXT993zrIsjGmlUomjwLZtTdM827Ftu1QoDoZ9MP81x8NqpUII2W1bMJ7PMAxGKJVKpVIpEsWGYaiKxPN83bY1TYvCUJYkeKNqtVqlUikUChMVMshlgiDIZrOAts3MzDSbTdd1YcXsVHXGdd0giDiO6/UGmXQuiiKGYaMoMU2TJKjX67VanSRJ7r/vIXCaAhwCBuRv3759+vRpkDgPBoNcLifL8tTUFAwKViqVTCYzGo1qtcbhw4cFQbBtR5blbDbbbLRXV1dlWR4MBmEYp1IpNa15gZ8vFlieI2EwHo/7w0Gz3QIkc3F+IZPJnDpxkud5nuUW5uZfefn5EydOFAqFo0eP3rhxa2Fh4cUXX+Q4vt/vq4rOsQLLhjzPO443Gpm6nuJ4zvM8sCmo1Wqj0ejkyZOrq6scx2WzWfDC6Ha7hw4dAsxjt94AaX4URZZlgTdxv98HZrzf7zMMA6PS4M3F8lwqnQmCIEpi1/cUTQ2ikCAqy7JtWqqs8Cw3GAyWFheKxeJwONxY34aJwU6nw/N8pVIZDAYXL16emqpM+jIIEKgNAUEFNQtCSPI8mF3mRQFaTdt0QUQOczPrd+40m81isZjJZUHR1h8OEkpEWZqoPrmD2QwfgENfUw0eiJZJz8rsD0BABTXZWU8P+HbT15KKBzPkpFJ9Y2pFb7jBccWyLMdyUDZMXmShUPCj0HGcOEkkSVY0led5kiCMsSLLt27dknjh53/+523T/PVf//UHHnjAJYHneRy7t2zD9VyUxEkU6JpsWU6r2S7kM7MzM3R62rLGV69e5dVCFEW+68VhlMlkioXy3PRsxkj3e90wjEVRLuTyoW6AvVoURYIgwTaLCxculMtlx3EGg8GhQ4d0PdXpdFRVJwSZpk0ptW3XspyxOc5kcq7rc5yAMR+GYcrIQMWYzeYHg4FtueVymWX5MPSKxeLNmzeTJIFuCopA0IuEYWgYBvhwN5tNjuPAz+Lc4jmQXGezWUlSJjsFwPEJlD2gaEWIGY1GyRjZtr2wsLC5uanreqvVAvkYgzBYg3qeQ2mSzRZUVW42m7lcYXt7d3n5UBTFjuPs7tbS6Qyg9o7t9ft9WK6CELPfX6GJ0hI6uunp6UajEYbh1tYWyI9g4pFSCm9ppVLpdDrdbrdarYK4FOb34VxDCGUymYkquN3tybKKMTscDsHzBiHU6/V6ne7c/OxgMJBluVDMAz+0ubm5vLwMCieQlUHufeSRhzY3N+Eih1IWFqIwDAM6GFCTMwxjuy6UpnrKmHAhuVwO7PwAHYUxRXD6msxIgGo3n8/ncjluEn7Qp33PGDh4m5BOUEBCKsf7HOAkSU6+n933U5mgNXRftobeANVM7h98BLTvZcjsewTvFbqIYow9z4spYRiGZxiouVVV1VSD5/lbN28KnPhPf/pn4jD6+7//+2w6JwiCbbvmeBiFviQJlJLxYOj7rudr1ljgBZZnBUJw4Cc8z8YBiQOipSWRlyml3e4wDGOGop3txmDYy6YztmVxDKdpKUqJ53kkIiInekHY7/Y0RU2lUlEQirwgavLudm1+nuu2e7OzsyzmXNdlGCbyiTVyZSUl8MpwMK5WqzyHBv1xJpOJIrK9vc1xQqPewhjD0btnGToaRFEEck2QhmUyGZZldV1nGGaCrQPiBxUUDK1PGGRZlg0jxTBMr9cDYTpwGHFMNjc3WUXp9TqZfK7RbmU8z3MDeDrXdiilYehHURQFwXg4TKKIxFGpVL506VK/PyiVSo7jYsTxPI8RyzJ8KiVsbGyUSiWMcRQFkiRgTEejMcMwpmnW63Xo2ZrNJhR4pVJpdnaW4zjwdMrn88vLy5/7/Bfife8vKJ7B9wmsQ8DWudvtQo3tOI5p26qqIoZBDMdxgqqqQPQnUTwxblMlmedZz7ZK+Vyr0wfBAAwWg0g1k8koigLPC9tjwZ8OITTodSbkPqgOgNXA+6N/iGVABrSxsbG5uZlNpYH/oPuegFCntFotCCWWZV8ThAfz1esgTbRvYMFz3CTXTUpQdMAl8WAjCzf6P2DidjDe0GvzMEII0dc4ke6/Howxdl2XMhhOQUKoF/imafpeSAi59957Txw7+cILLziWffTo0bXV9U6no2ZVOCw4jsOEgg+ayEuj0bBULGSz+SiK+r2RpkgYceXSlEcYSVQ5jrFMW+JlhmGiKMGE4znZc/oMclVVRYgEbsiyQqlU3tzdIoTAmFy73YVf//btO/l8sVFv5bKFOI6HAzOTyWQy2Xy+kMqqg8HA9yPPCxmGCQMShbRYKK/d2RgNzSiKGIbrdHqwGqHRaFiWPR6Pg9Cr1Wrz8/OixJcrRYRQGIZh5AehJysiZqhuqJIkwUQvAOJhGEbRuFqtEkIYhh0Oh51ORxIVnucBAlFV3XVdFmNF0ba2dliW7w0HYG3GZtjl5eUkiSRJEnneskzf9xkGHzl0uFbvZDPFSxev/vAPL4uCyvPieDwEWoXn+f6gm82lM9lUq9USJd6yx45jr6ys3L59u9FonDx5stPpbGxsgGfk1tYWz/OKorTbbZD4yLJcKpVgTl9V1e985zuqqq6srHQ6HYQQqDTBkBKQSVEUp2bkKAhBM81xnOcFgFjKorRb20mn083arq7rqqpeuHDhrW95S73ZS6VSYDkHF61lWRsbG6COoJTClFyyv1MMIgda0Em5QSkdDofgUMzwHBAwMPWyubVlmibQZjzi/SicjHqGSdxot/qjITeJwEmCoq81R5uUlBBRYRRNIu3gOG+0//XJ33vTum+wEj4Ipb4xJv/vwt6zSY4syw5877lWoUVGSiQSKABV1VVd09NLLm2bazPkrPiD/BHD5bddcse4NNoOh5zp7mlZ1SUgMpEytHTt/sR+OBGOAFDDDSuDZWVGRkRG+H333nPPOXc/CN/9qwheIeabeGqqbemzSZ6hF7VtRzcNzrng6uDg4J//83/+y7//1fDhoVlvoM6ZzWbd4y5jxDZ02zAJVZZl6Yw26i3LsnRNJ0pKwT0/6HX7URTFUS5yahquZWiyJLwkjJE4TAnRNM1k1JCCFgUXRZkkBaNmLWgdH/NHjx5PJpOrq+t6vZ5lxXg8bjbaumbmeTGbzYVQk8lM181Wq31wcDhfzuKocJ36/d1E07RGo8W5VIoeHh4LIY6OTgzDAIXK85yrqzf1egtHMmMM6lIooeGf32w2MXCL4xiiHsNgtVpts9lYlvX27Q0E7JRS1Mm1QAZBkKYpAI96vb7Oi5OTY2wLFsJYhZtWvWEYhu/7eZ5mSbIpcimlKPl6uaJErpbp4/Onv/nNb8bj6fHx6dXVFdzzNE1LsxhOH4PBwdu3V4PBACYmEKGT3QJ2qIGOj4+vr6+TJAEzBjSM+XzeaLUfHh4wi68k7eAw4I8qyxLsE6T0lO/Sg2CEcGVZwDjCMJRSMrUt35I4ztLYMLQnT57AcBH5EP0nak62076ynQKBMea7NgJV7pw7gafEaYJfVIwiVtG04wiQUlbkUFgfDQYD8GTey4T7RebHubEKwn0Xtv3f+qCrpDsmhNpx1vaxzSrsP45D+v5MsvoXVx6yrr5zy0FYQnISxXEYRo7n9vv9Rr31+PHjf/Nv/s2nzz87PDz8w+9+H3ie67rddieNQkJIQmlZlrqhpXFSFMVisXIdC6cmEYQojSgtS0sldVNjZVbkiRyPZkRpusYUViApZtu+bdtE8LwsiNKklFlaHh+fYjEQISwMY8t0BgdHZcmjKLYsRwhVCxpxlMZR6tjlaDj7/uUPrVaj2+2+/OFS07TTk0ebzebm+r7d7sAKDRNexojjOPP5XAje63WbzaYQvCwLTWNhuJFS9vu9+Xzuug5jrNfr2rbtOPZ6vXry5DF2DD158kQIAYn98fGJ7/sAEjAGwKW5O+Otbrf33Xffdtrty8vX56dnjNFer2dZxu319fX12267c3xyuF6vb25u5jPe7w9OTx/94fff/OxnX/23//bfnj9/7jgWYyRO4DIqazXfsoz+QVupFqPmarWCdeJwOCyKAk0jIFPLsvI8H4/HWZY9e/as1Wq9vbl1HOfNmzeEkE6nszWeCYJmswl8FXJN1NW6rk+u7nr9jlJqOp1qlLXbbctyVqtVFG5++tOfLmfT09NTQ9cXi9mLFy/u7+/9em8+ny8WC0AvWP8MXQj6QDSEpmnuUhFFAoR0Tu2szDzPw7Fo2BbCcjQa3d7eVgbTkASCZ88YgzjT87xGo6Hvx8+7Cfs/XTSCZYfeD8cS6l1AumqP5oZgo7scu59OqzS7X9Zug20X+WrP/JeSvXHFDqqiCudlZjo23g4otaAF+du//dtPPvlkuVzOihJ/6tOnTzerdVFm5e7mB15ZltiedXgwKIqiWasnSZqlPE3KKIosU5eShWGqlCKEtdtd27TSLO73+2UhNWZoSouStOTCMKyiKDarMGjaV2/emqbZ7/am07ltWZ129/r6erMKPcdVgrSbHSXI7e1tHMab1WYynjYbrShMlWSlFLPZSimBq9DzHSF2K98MA7vchsNhq9XIssT3XSnlyckJpRQ80vv7W0qbZZl3u+2yLB3Hur6+evz4ERwA+v0+pm1gY+FqgAsBxuWaZkgpTdvCQR5F0dHh4cXFhW3blKnr6+ter7MlfNgO53yz2QzvH5LEXi7Ck5Ozv/u7v/v888891+ech2FRlnkYrYXgjJEwDJlGlFKHhwe2VXv9+jVUrW/evDFNs9PpYKgwn89RtmFXdq/XS9O01+udnJxcXl5Op9NWq4W1MHme53kOOAevBNt+arXa+cXjdrOVZnEcx1mSxnEsynKzWTm2VZblYrHgnJcajaKo2+1OxxNBLEoppgtkJ2rHEBLmVJjjQxcuhHDtLdcCjHZML9M0pRqD3jJo1G3bBpYLD3iYdHDOwVwDGIudGWDzvpcJ9/NVFRsffIFNRtVN29ud9EHNSd6HW/eDs+oVq8Rb/W7FTkD/VwUhbnSnvzQMQ5QcHMI0y8IwtB3ns88+O310dnd39+tf/eYv/uIvfv+7362Xm4tH55+9+DRcr7///vuzk1PHqYEuZJi67/tUESysB3WD5wUWGGqaFkcR8f3u4HATroqicBzP8wJR8ulkXhbi5PhQSpXnBerAVssjkuZ5OZnMfN+fTqd5noPIcnV1ZVmOZakgqK9WK6yCieOk2+3puu55dcb08XhaqzWKInvz5k2v15OSfPPNNxdPzsuylPKdA93x8bHrugcHB5hJ6rr+/PlzrEa5vr5eLBZPnz4FYDOdTrEUAcWP4zhJkgCTbDQaSIme5/V7A0rpdDrFBKLX6zlCWI4dh5HjOIZp/tVf/a9v37zWdDqdTk1TZ4w26w0gq2Ve9Hq9V6+Ws9ms1WoJIWaz2ePHj2/vbsJwbRiakKUQXNO08WQYx/H9/W273by/exNFEezlcRwsl0s4AhNC0MgBb4uiaLlcekGNMYadSrPZrN1uHx8fv3r1qpJrSilvbm5Wq5Vpmp7nbfL8YTSUXKAlBnmlXg/yLP3222+zOCry3HEs0zSzJD05OXl9PcRKXXhbVTJiLC+B3wKYt3BIytMY7DmkH3CPpJS+7zmOQymFpBs1s+d5wJ/Vzj20Cpkvv/wS7HnMxt91aHJPOaHtrF/IXn9ICNGZodSWUbYtO3dw5XbSKGHGxpQgQknNMqiUGmF0B5wKriSVmkbVbljJyFY6KKUUrrZT0BNFKN3iMqRRb6Bf94M604wwSiiljWYrz/M0589ffP7ll18+PDz8H3/971zb+d/+6n/57W9/65l297zlOPZ8MSWEtPpNachws4zjWCmla2o2Lcqy9AO71a7VW36UrheLhWmazKbrdGV7tuEbV5d/fPr0aRzzZt3UWS5IuViMGOOue7FaLe/v723bef78+cPDg1JqvV63eMM0vSgaep53eXl3dHS0WsetljUaTwGpfffDt57nFTwTqrRt+/io57mWFM7V1RUMLxazOS9Eu9m7fTvq9/tJnNu27Tudg+6jxWLRbvcuL6+fPn2KtHZ3N2SM9fuH63V0dmbf3Nw/fvx4s9nUavy3v/3D8fHxbLmazBdCiG+++/6TTz755rtvYZAzW85N01yGC8/zjh8dwVC03W7HcUgpdxpmr+WEy4fbt1YUhY7jXDx5dHV1tV5tbNt+9OSJ67qvLq/DMHZ9pWjy8vUfWm3vT9/+/he/+MXvfv+Pz58/H4/HR72Tu7u70XD5ySeftJvp/e3o4pyEm2Q6XazXa9j1Usvkko4m86wUpaLdwVFeluXdwzrNv7982263R6MJEO/1OrQsqyzFmzdXpmmHYbjZRJSOKaVK0SCoC6Fub+91x9hsNv1+P/Dt0WgUOL0g8K6vr03dKMuy1WrpjOGSzrIsSkulFMbu6OKAZAJHLcsSNpYIIaQBzlSeRkqn6yyOeU4ZpaZm6vYmDpM85ZwfHR2dnp6A+zadTmku83XChOJJbhOjzMrrHy4PDg5kUtrE0HSS57t12eTH5gQfNH67MpXu32H/nj/6nYqlUZEBKNm616gde1vt5UmAUVUyZFujDHX59u2Tx48ty7q5eksphb0vFOtffvmlZZr/9b/+1zAMP/nkE8eyv//++6IovJqDAaa7VwP4jgllDQabOC8x5WOMtVotVCB43xeLheOam3C5XC6Xy3m9Hriu22rVgsBbrqYlzylTSRqORsP7+zuUhZqp+v2+41pZniyWM9PSh6P7+WIahiFEVZZlFWXmerYiIssTSrTZbBLHcRiuKSW2bZZlrpRcLueu6/q+m+epEKVp6kkSTSajg6PPANyNx+N6vf6P//iPYRjCNRTGnoyxy8vLp0+f5nn+u9/97nmRYRlLvV5XShmGgcMb5RDZ8Y3QH87n8263PRqNgKm22+16vZmm+Xg8tiyr2+32ewfoIWezGWrgNMpRa8VxfH5+/vLly1qthkwSBEG1GhEjk8Vi4bouMg+KQMO2CCFQ/SA9sp2zPdwrw+Wq2+2apnl4eLhcLi8vL4+PjweDwc3NzXq9Bq4LA2w8Jnow6BVN00zTdDabZVlmBSZEEtbOVRATiG63ixeAtIlhia7rMDKEZAmFKH693qmz3VZpuqN5Sil/9rOfgVyOp4OXXKPRuPruDcIHbBsYGsHTiDEGexS9ipaqINwvSvf7wyqE9uOtusn3FUnVYwrYtwn5LpIJoZAj7TCb6oGUUlyW+79eVbjnZ2dXV1dCqfPjk1qtBu/08/PzTqcjpZxMJnB5qSp4WAzAuZkxhrUQx8fHpsYty1osFmBvwrgFvwVcDsYzSqlms9lqtYKamSQJ1STR+GI94arBVbEKZ6XIKaVZEa1W64KnsI0pimI0Kut13zBYUaRFkWZZnOdJHG9M06zVvDRNXdcSgjcagRAFjIxm8yEhxLRovV4/GLSn0ylT6vCou1qtuEgfX5wQQkajUZaHp2eD+/v7zWZDCAHxGrpYuBhiYZBt27e3t48ePcLU+9WrV5RSz/Ngb6OUSpKkXq8/e/YMC70wtQPgvlqt0jQGCbPRaOm6Ph6PHceBveejs3MU7cPhcD6fT6fTfr9fFJnnOUKo8/OzJ08e397etloNzstGo0cIcV3bsixCpGUZvu8mSeR5XczWtr5eGiOE5Hnu+h5MFl3fxyGoadpyubR1A+NQWGmt1+ujoyNYVKxWK1zxIOUBGsGkAaS8drvt2U69Xn/8+PF0PCl2y94RdSgvqWYAnYI/Nb7Wdf3s7Ay8cKxwYrBKSZKUp+CyAqGsxtevX7+GpxMGQnhti8XipHuEPITjHtQfpdRsNsMzGoah70fgx3G4D5kgzBhl++H3cQB/8MU2j4t3kw9V9Yq7e7L9lS/vr0yjuy0U0+m0Xq/DNwkd/OHh4cnJyWq1evPmTZ5lEKRtNpssSWG0yvMCHycMPxqNxsnJyXxyC9QLD4tGAkqTOI6hcENx32w2B4MB0xOp8v5BS5EiDOMoXpY8SRJeFInruiVP02xj2azSE+Z5GidrqQrT0jrdRrMVcNHGWOX45GCxWHielyRJr9fNsiyMlorkRRnV63XXa3a73VarESdLxhg4K5QK22GMMdthQqaO6y1WOWbBwN/wlq5Wq3q9jvXu0L+u12sp5enpKZeyLEvTsGzLWS5WvhfMpvPNOvz000/hHJMkSZpkRFGN6VEYF4WBOQekepPJ5Pnz50KI1XJNKR2Px+PxGKo/4HuiFEDCBoNBkiT9fh9eZsA20AXN53POOcAYvMIkScD1ofo2a5Vl2e/3cTICe4N70tnpWRRFeDQImpCmcGrgsgSVbLvvqeaCUApaQhJGSZKcnZ2B2qbrumUYSikEYZIkab71pEYFhLGBrus4sJCN8VCoqjRbp5QJIZUq1dagUIciCnvXwGjVdQPzWCiw8RTYeAFhJPI20ux7mbCqS6sESN8PCSkl0Bu2J3v/OIA/eMx9wKa6A2OM7IwS93kwTN8ybLZSQ7VNsMBgNE1Lk7Tdbl9cXFBKsfAM8HRF5VFKodw6PDhgjI3HY16Wp6eng8FgOp2uFgs8IBjAsFfFuGk0GsVxjIXGOLHSNFVsxUXWbDal7JRlzhhtNuuAxfzA0w1NN2i71XEcq1YL4jjudpumqXNeeJ5Tq/m2bSslkGSCwCuKzHFsTaOdTivLsjgOLVtvtmr1es0wjFrNMy2t3WlYluX59ld/9sVoNBqN7gkhjWZjuVz+/g+/se02qizTNL/++mus4xwOh0+fPp3NZm/fvr25ucHsC06HXhBg1TYGVo8ePfrDH/6wWCwAmuOUxMS52WzCQlfTtMVisVwu2+22bTmtZns8mjx79mwwGIBvBT2ulHKz2Xi2h9HC69ev8zw/Pz9XSnmeB94sGipgUfgaK4crv6Mt0E1pWZYgi8+XSxyOIMegigNfdDgcttvtbfRSCu5Yde1xzouiCFo1dMsYr2dpgimIbVqoM/chD1CoMUureJTQJSIU8VPbtuFsr5RahEs8EXKgZVnYWzidTkFbw0GD+lYpxbnwfb9izMPzW0p5fHxciX3fy4T7QfJB5FR9HaGyiqL9+3xwf/JBq/kRGQ5VKHl/aKGUEuTHVcWHh4fYtvX5i0+Pj483mw1WDiA9WqYJel4URUQqePvDgpYQ0mq1sCBtOp06pgKUXNHhweXnnNdqNSyvAniIQBVSosdwXAvFm67rQVCHSQznknNeMTmVUhcXF2AJ4ngmhOCJMMXCB2NZFhoSIQSeER85qql+v49xWb1ex3Cl+kRN0xRCgKHW6XTm8/njx4+BEyLMTNMcjUZYKA9e1WQyAf3ScRwEAwbfOPtxt16vB27XYDBAZwHk/dNPP6WUWpaFMx5ygVartVwukW9RvIVh+NOf/vS7777D4pdarYZJAKxx8GfiAgUiCoC92WzWajWqa8gGs9EckzcEuRACDhebzQbTlNlsBufSJEnw+VJKTdPMdzfM/W9ubur1OiEE4KSp6ajgHh4eoGNQQqCeRPAouiUMgDNAKcXRgLJos9lAYITGD68QoGhlgYEmCFQHQggeDW1qGIY1J5CUlFJkZZHzUhClG7pGaVrkcZqsozAMw3fAzH4m/CAg94NQEbE/fvg4YD6I4S13lLxTD6IF3C90q6dWO8UGqSaNO73SYrGAL60oym+++Qaek7AAQtmDiiWKIkPTYcuNhanHx8eB78NUE4d91TpiC5W2cx8PgoAQAhCGUroFaXSfc6mIrul2s91RSsVRKiRpd/pKqTRNNd0yLVdRRjVmux5jpmVpppmnaWrb1LIs163lec6YyTmxbR/igCQpGGN5LvK87Pf7i8XCNLdiv9PTWpJky+VS102laL8/WC6XWVYwpn/55VfTafTkyZPZbGYYxvn5Od5e7AbGZjz0eGhxDw8PuZTNZvPZs2fwAr++vp7P5+jKHMdZrVaoJEGb7Pf7cN02TVsIZRjWZrOZTGau67969QOuKgx4TNPs9jrT6TQJ46Dm+YH76WfPm83mN998o+n05vbedV2mkZLnuHaxRACnTFmWkC/5vh8mMfyUCCFXV1e+7wul0By+ffu21Wppiui6PhwOUbngOMOYju3tBcMoQtf1QpXgciRJslwuRVFCxGybFmaA6I/AKbVtO0qWbLeTE2HJGIPSBd0mwkzX9SrxIH/iCEBhqZQCI3S1WgF6QXWGF4w3DY8AHm+73R4Oh6iHi6L4kUz4cQTuJzelZJW4Pw6/DxAdpRRjKC/fEXEQhFJuTdLUBzf6jiVHCNG07QP+7Ks/Wy6Xb9++zeIExxgaCciu8yy7vb3dYdDaeDxGjY7VnCBegoaLfLXVzgqBK0DTNDjSwlqr1Wo9fvxY07TxeNzsOESVZSE1Znfa9bIsw/V9EpetphvHaRQWnHON2UlcBkHQbPjz2cZ1XaKMJN74HqnXAkN31+v1bLq2rcxxHKIM32smSeI6ruc2RsNpq9lN4tzQ7Tzj08miXmuNx+PZbHZ0eOrY2mw2u357V6vVNGZqzIThClySWq3WYrHA5Yj1w0VRnJ2dIc5x1pSlmM3GpmnHcex5wcXFU8fxxHYdHU2SjDHmOF6aput1WK83oyiJ49T3/STJlKKTyeyHH77DzobJZAJZY1mWrXbz4OBgs9lQSUDv/OKLL/Da8PIAuSGn9ft9IMO2bTOqAzuxLAv1Ht7/zz///PLy0nEc1/dxuk0mS8/zbN2AAzpQqLu7u0ajUZYlRAwY1kFXAeMMpW9Nn5BRDcdFCapRBpp7tNkgaHEZAHRBAsSyKogqdF3HX4qhPN+thxBcSUEo0XRdd2zPtlyNGVKoq5trrAPYNorMYFSnRENKL4oC+g+U8Shizd2KoXdBWMGbdNf1iT0DGBTulFKNvaOtbaNmj3Czn8QwlEzLnBBC1XtJdRvAO6iG7bFkdEOXUqJvVkrV/aDf77fbbRALkyTB0ILthFRSyvl8zssSLMqyLCUVkIRW6mTPdcFXZIyVZY6AhLESYO5vv/325OQEJGBs+cCFIoQQXCtynme5bdt5JotCdNoDwzDms8i27U570G53b29va0FnuVw2Gg3XcjRmM1qulvHhwImjQggRbjJe0rdX9+122zCMX//q951O5+TE63YOpzM5n22WiyiOijRNn1y8GA1Ho9GMEPLyh6sgCGazZZGrOCps2758c3t4eIhF8P1+v9FoYCvQxcXFs2fPvv/++1//+tfPnz/HdoSiKLAQt9/vYy53fX2NYtgwDCz6Mgzj5z//uRDij3/8IwyzdW24XC4p0TrtXriJ4zg+P7+Yz6etVgs7QDVNcz1HKQU8NlxtcBS+fv0aBx/nHP/2er3b21u0bZizTyaTPCsNw3j69Cnn/Pz8fPTLCVI3fDHKsuRSQkF/eNidTqd1z4/jmBDiOM5kMmm1Wu12+/7+/urqCppDuMWhiXh4eKi168irqGtEUTYajTiO18sVrnv0csvlEvW8ZVmz2ezRo0eYsoBJc3d3B12Y4zhIjABslFLAC+hOdYTeEjLI2Wzm+z5aCcTeer0mUjqOYzl2vdkwbYtqrNVux2mim0YYRxBkvQNm6Pvyv/3MRveat/3v/P/efjTB4sYYo3tVKNlR4fI4xRHVbrdrtZptmEIIbGvYWg9YdqVjlDunRrXTEFNKGWWU0na77ZgW6Py8LDGcNQwjixdgFcODAH0a8LrFYhEEASp7bE3Tdd20g+U64ZwbZlByuVqlrusdHB6G8ds0l0kSpbmcTNeEMM9vtdr9IkmTuIijnChdcKqkzLI8CjPBqeB0uQgPDw+LXNqWz0timZ6u2etVrDFrPlt1Oh3TcDvtAyW1JEke7ieWtXZd17EDJZXgVNdsYBJRFAFuAevi9evXp6eneZ5jbzF26y6Xy3q9LpTGGOt2u8PhsNFojUaTbrcbx/Mvv/zKtu3lcvny5et2u/3s2YskSf7tv/13n336xWq11jSdMe3+/l7XzE6nXREdTVOXUgrJlVKo08x2K4oiz3OlFPf3d6j0iiKXUobh5uCgH8dxrRas16s4juI4yjPOOUfSE0JUBvtAvwkh+KNAswQU1+l00GHGcRyGoRDC87zxeHx3dwfTGtiWojFDV4IMWRSFLDkOdMwe1+v14eGhUmoymaCLZroJ5o1t29fX1wBIa7Xa27dv8fJwVFW+UpbjGYal66XjuLVaw7bdOI5Xq81u/sEcZzvZLwqe52XgOWznsIoFYd1uF4U9qOdKKX2/qvwgwOieFp7u1vcQ9d6yiA+KUrWnCZb/XXUi0FHyUTmKjwRWJaZpllmOJVKmbpimqWmaZ291A8BFP0jgVbmLzAxqvLbL5HDIwk/BVkP3WA1kUaUAQEfyTC5vUNAzuuGch2FclnIymZWF4rwMN6kULE1z27Y5F0lSEM7RNYFyAXMNFCGcczwUcuxkMjFNM8/L9Xr9+eef397eP3nyyXodNptNIdTRkXN5eSmEME1b100URZgQ4l3FNhLMpkejUavVsm370aNHzWYTmm7UbOPpEo0QCj+M2nVdn81mlmV99tlnl5eX8/n89va21WodHx9DamxZDiFsPJ4MBgPX8RljlGq1Ws333SzLsjxVSrmuyxhxTAfjnzRNHx4eut2u3FlCZFmGRYX4mKAh7rT7Ukq0r1j/hFeFfSFFUYxGI4QN2vJ6vQ5vC4gScFifnJzc3d3NZrPNJnJd29/dgiC4G99jsy/+ZCK2rtbMMOH1cnhwgF1RWIxr2w6AFjDgwOFWSoFWRXb7nrePRohm2bVaDa8EPtG2bR8eHoL1ho9YKVXJaNnOJ0pKCZ+OWq0GgTI460mS6B8P2fcrzP0acssRVXsE638i46k90OWfCkIpJdkT4LMdmch2HcBTIA3FmxCzJkyBGGOWYeJNwbB1axb+EYaU57lkHNcuNYyqfA03S0xE8GahQcewFbDNdDqFcQiQxkLQZrPJmHZ9fS2EAFY5mUwM3cLHgJmkaZoYnTUDF0CRrmtRtAEYqJRwXUeI0rIMSlWjUQvDNT7mxXI6GAzCMHzx4sWf/vSnn/zkJ69evTo6OrJtu9fr4XLMsqzRaKDvD8MQi9rhHo8KEAEA6g98hNBH0d3Wx+FwqGkalntiDkEp/earxkMAADSASURBVPbbb3u9nq7r2P4nhDBN03ebgquyEIwxw7DKsry7u8uz0vMUUk1lDgRLVUIIIBagWbiIISMACo2cjKGfYRhxHPu+f3p6aprmw8MD/Lan06lQcr1e76jkGorGWq2GdYVYxgpd8u3tbZ7n/X5/OBw2GjUEKmBYlAl5nmPeyxjT6VaOFIahpmmY3MxmM6DNRVH4tQasa6o+DdaGMIACUKx2mnK8EsMw2u32zggrA0gDO2PEv9qtBDVNk0vpmqZNiGYYhDEu5WK1enN1BdnkaDJJ01QXe4a/+4BnlVcQb/LHVLn731E78LOKq+33K/bnu197LwgZY4puxU34Oa48/D1ESDi3F1m+fVW7lTI40fdfzPZJ5a64JXR7xey0+ZqmlVmEAMNprZRC3QJvvCAIqnkx7lOkWV5meZ5PZ1NKqW7qpSjjOGZMI4QkcUY1ommaYTUKnq43YZ7MMQSr1WrrzYwQQhn1fLPdbkiVe54XRotG07u9vcXBX5TJJly0O/XFcuG4xmw+0nSVpJtNuNhqi7I4juPFkjVUw3GNakaHTw2RVqvVxuNxJagROxfJPM8p0aQgUhDLNAO/XgsalmkrSV3PO+gf+l5tPJr6vn90eJKm6Tdff/uXf/G/C6Fub681TcPIYTi8DwK/KNws02zbrDyEgIjEcdjptHSdFUXW73dd1767u8MvYl5qmvr19egnP/lsNBo5jvVwPwU8OJ/PdV1fzFcnJyfdbrcUHFc/AIhqhIOFVpAgdzoddFzAxgkhGN8DyNlsNug+it3ePsaYVFtHIvwvIhkcNOhuAZkCjwGpqHLmrdAKXGk4u4VQaRqZpt1stlutDhakPTw81Ot1xnRCSkqZELwsha4rQhikmBiTwoe7LMuHhwesfMOcZhuEVbxV4cT2RL2oZHYX+4/vpt/vJ98LTvYu8PaDUO2USlW4boEfTQMAAxDJdk2EJQZxZLdRAHMFBKHaU/orpQACJUliMG07jTAMvttz2mvXtZ1BCIp+PHu73ea7ZWb4SPAeFWRSlLFlm59+9pQxbbPZxPFSERrUPF7KzWY5md4LIYQ8KnnGNJFkK8aYwRzN4KvN3Pd923FsWzNtZVhSM/h4PD48PCxF1Gg0TG74tQPGWJpt8iI6GPTRzcZxLJUgvBSCO65OqDmZ3sfJ6uTkZBMmhmFgLAZeVXVq4DQpiqLT6eBszrKMUl0IcX5+nqYpBsSGYQyHQ9u2sTsByvrZbOa6brPZvLx8m2XJ3d2DrrN6/VNoZ7vdTp7nnBeUKlyLZZmj1OecY/EtmjdcbUiMKOxRJ8NkLc9zKTSMT0H7xDr7R48eXV2/xbJUdGVoDWAJ43nexcUFsMRqhn57ezuZTNBcoDaGFBD7xfTdSm2MAaB+BrCEq//u7g7jB+zcBtiTpulkMoFfKJrSqnfA0YNhMnx6wG7FcwGIIoSgGMHpCdS60ajXajUQlbG1Al3Sy5cvqwnHNggRcvsZbD8mxc7QjlJKiUb+u64w1f9uUyi+uReE+Bq9m/roBmSJEILhtZQSosl2s7UFYNS7cWL1CuW+kzdlIHYJul23yPb+wHLnbwcYwNyt3QOYAbwL7y/OV2ZI29Db7eZgMBBCkHtRcsziqes6RennebFabUoe2w7z/EBTOt5PzzNn88KymWlRRYo4WVk2o4xruiS0bLZ837fxgp8/f/6f//N//pf/8l/+8pe//PLLL1erlWVrhDLPD8A7qTfcOFlF8ZKyAV75er0uyxKzKYyq0QOj5Ts+Pp7P5xhgWLYLClhV5KMoaLfbUsp//Md/rK57gGGj4Qy1XByn6OK63a7neUkaxXGm63qr1fA8LwwxjOAw8Ib+ixCS5zmWH4LFBgVdp9NBLcoYs+1cKfXdd9+9efOGc76Jt4UJnAKbzWaSZdh81mw25/O5pkiSJOijgAVEUfT27dvpdLrZhK1WixACpeVisRiNRjiesCt3vV6DtsY5bzWalNIKjcNvlWW5DuNarYYzGgw1jDH1nXB8H/ZTSimxbXaw4RSHHQgMEP6CIAr0wfM8QjggVvw6WpVyZ0OBzKxXhc3+nKBq0sjOkxsVCKbn5CPg9Edve0nuRzLhvlRK7Q1I5tMF6ml8ohkXwGmASaDKxxtEd/IrKWWl36/4N47jUKn274M/AQvu8OtorNEb1Ov1wWAALwkMADRNm0wmrmcFQWDb1mw+zrOCMdXtNoVQNzc3R0cn9UaglGKaajYb0MK4ZolBGQa8lqUxJrMsk9IAaH542HNdp1ZzkZkJYZwXf/EX//Pd3d2f/dlP/+Zv/uYv//IvX79+zTk/PT0VopxOJ91ut9GobTabNI2VssE+JYQ0m816vQ6EBmoA27b7/f7FxcUPP/zguu7bt29X6ywIgtVq5fs+xt/oyhBa8G5DtKB/xu7hZrN+d3cHpOH4+BDvHg5y0zQJ9bIswTdrtRr6Xsdx0EFgoHd2dvbNN9+AV+R53v39PYg+yJ/T6bRKC69evbJtu+Blu90+ODiYzudSykajAQu5Ikkx4J5Opxi1qZ2boOPYqGgw+gcFDNyAer0OQs9iOkN1KoTANP/t27fw9oVZVl5wQgj0n+v1utForFYrSikip6q6wcw2DCMtJQ7xwWDg+37VgsIAju/cij3POzg46PV6b9++3jKuHAf0LFBE0I1rMOemTN8mJUIxusN/QpIkTZBVFWFCEqUIZQwe2Eh2iKh35evu+5RSyii6JiJKQray3G1kEkIIKbiglFKNUko5kVwWSihCCAAYHNhVrGLMQLeuNu97CmNvzM4IhFIqiVJKKkqURhljSmNSZ8TUNSwJlaZhGGmqhMg0jRal0jTt+OTM8xw/cBljtmtacyPLEqF4t9+kVLeo7ZuB23Axk8RGsW6jX3frGByfDk5hoPTmzRu33YuiqEjWqzBpt09gsdVstDRNc23nzZs3FxcXUkqDabyM7h8eXvz0RZbzMkqTlM/mm6dPf6LrtfPzz/7jf/x/wg0pCm6a9eWCHB6e5+nEd08vr/84HN3863/9r//2//27NI0ajWCzWf7iF//Tcrn84x+/Nk0TC7fvbh9OT0//2f/wP/6f/+H/3oQrz/Nsx2x3mo5jnz8+e3h4MC19Mh09f/HJr3/9a8Mw6o3g/uHW853lctpoNOJ0ZNp5p+fEqa1I1u32nFj7Z//sZ5eXl9gV4Thev98PguCHb38Yj8effvrp7e3tfD4/PDy8vRk9efIkz6Tr1A/6x8PhkChD1xzfa87nc8t1er1eccUn81m73QZi2Wq1yrIM15vlfNFvd7KT0ySKV7P5oNu7ebh3a8EqCm3fe/32CkdzmqYHwcHJ+SPLcx3X1W3r8uZ6PB4vNuvBwdHt7W1ZiE6753u1rOCbKHn69GmSps1WW1BW7/byPJe6YXj+aac7HA7vRsOiKBaLxTrcSKJQPaJu1zSNKaJTZmq6bZimac7ml5Zech4nIfXsrpCyzLK6b1MpeMCKgjiObts+ISQJRw/5QuVespK6ro2miyjeaBrlqSBcdZsN13MYI0WRvdcTvldGSokyd39KQfesCj+oQquEtn9n+j49bf8L9f7aw+pHbO9lVJFeJerqR9XL0N7fpbGfVPefC4+gaRr2EKLWNQwLXU0cxwcHvSxPtN2ChIeHO6VUu90WYrso7u7ubj6fQ/NWlmWn0wEHEt2CZVmDweCzzz67vrtJkkxK4vs1TdOIkEopypjreLZtHwyODMNar9fKskzHffLs+Xq9Pjw8pjRnjCVxBl+J29vbp0+f+l4tiqKi4CjI0yxJ0hin+Hw+Pzo6Wq1Wd3d3jx8/uby8fPLkyaNHjzqdznfffXd/N9R1/c2bN2i3lssl1Iz4q0Evhgk0pq/41IBG9Pt9xhg8MxeLBdjtjx49+v3vf39/f48ar3o3Hj9+3Gg0xuPxaDQCmFmVu0hNQDLhVaHrOp4OboKMsYeHh88//xyvStd1rOZ9eHgYj8foxDabjb9bz0gprdoEYNHghQkh4FUD8ucf//jHR48eWZb1y1/+siiKVqslpfz6668/+eQTYDlgaGRZNhgMgA/h+9AlwmijXq+LkmNVliy3lA+UVM+ffwrIlBCi63oYhvi4PS+YTGY3N/dKkV6vDfg0TXNGnCjagO/JebFahWVZ1uv14XAY1Hxd17NsN6L4IAj3o6jCIZGIYLf6QfjtR291xbM9Pf5+MOzfuQqe/X/fv9uHs0dF2f5zsY/CuGoOqzgXuxvnnCtdiIKQjBBiGBqjmiACnI8sywxTw5wRHGsppRRMSbpZR1eX15PJxHXdTqfjed6/+Bf/YjgcrtdroqTGWBylD/ejPM9zWaRZTil1Pd82rSzLiqIwNM2wLKFUvd5I0nQ8mTqWbVnW+fl5PIlXyw1wAkpps9mO4zjLcl3XhVO6rmvbUtd1368pJQ4OequX96enp9PJHIzhly9fnp2dL5fLP/zhD0dHxy9fvuy0e+v1er1eP3nyZDwed/oH6NmwTQXgId5hXddh0wIUfgcJepvNptc7sCzn6uracZwwjO/uHrrd/tu3b4+OjuI4ffTocbO5hi0NlZxS+vDwMBgMANIWRfHw8IA+EMxJQJf9fr9er89Xy++++84wjPF4fHZ2tl6vj4+PUaweHR1JKV+/fg34VCkVRdFqter1erAtw+tHfdTr9Q4ODprNJoYEUkoQ2SmlQGtA3MMEC1u1q/oZzGFMRDabDcgxsLqRnKNotM2tHxyVquLicc4nb4bYn4GDAycyCs56vX50JBhj/X4fMtfNZjMdrymlYRhiS2xR5IoIz3POz88bjZrtWGka/4ioFze526akdtgjmlT2vgXbxwHw0Y8+TIDk/Zt6fyz5rmrdaymVUrB1opRKKqsuWe75cXxwHLCdpxuyN/AeTdOoCeZkXpalaeoVD/jhYRQEHqFyNBoLwXXdlFKORqNwU2Bv3nK5Ngzr5OTs/Pzcdd2XL19vNhuxE3THcRqG8WKx6J0c5aVSShoZL/LtkVkL7LuHyXqxDIIA1vS+KylNO92s0+nd3Nx8/fWfTk5OTNM0jEgIifYDJ3FRFIZhFEUShuvZbEIUOzk+u7y8nM1mL168mE7nd3d3R0dHWZaDqXz+6KLb7f72t79N0/T+/j5KM9u2W60WRqCAfB3HQT5J0xRYa61Wa7VarVaLUrVcLh3HGY/Hy+Xy8ePHaZoOh8Pj42NCCFw8oC2CfIxoOhzHB4MB8Eyc1MvlUt85iwF1NAyj2+3qun59fY3dwEdHR9DBgGwNdBpMSwCBuq4zZmiaBlkZ0hSG8tXyerClwQ2IoqjX6wFah8B/Pp8LIcDGVruJOXjVUBULIaDPIITYtq1RyjmPokhYHF9QqTRNA6mDEOK6frPZhABFCJEkWZrmUZTM50vf9weDI9xZKVoUvCi4rrNer6frdDIZapp2eHRAKc2yBFemrhmu6+v7AAluCDMUDFUaqbLKvmyPvF927sdnFRhVEqPve+xXd64eYYsg7U3w9x9N7qAXqlPMdvCeit0S0g8OBcQGngVBiKkDNzLUP4QQXTctyxSSa5pByNZHvCiKsixw6SyXy+kkAiul0Wg1m83T0zPLcjab6OXL14PB4PBwEMfxZDLhXPq+X683ozgNoyRN09lsgbQTBIEQ6ocfXt7d3MKpodtum7ZTFMU6jOxSZ9So1xu+HyRJslqtCSFgSyF5Z1mqaSxNk5JnRZli8fJgMBiPx0Ko4+Pjm5ubo6Njzvnf//3ff/GTn37//feDweD4+Pj+fnh0dPL9q5cHB/jgs/l8jkEiIWQ43J7osPfu9XrdbrfRaDw83KVpqpRChQnC12azubq6ajQat7e39XodyYoxNp/PfcdttVpwFgMh3nEcVBa46bp+cnICet319bXv+ycnJ9Pp9Oc///nbt2+/+uqrLMsODg4WiwVKfaVUURTT6RSzkEfnj2BAOhwOkyRtNhvNZrPb7YLqgLEH4BDkydlsBgtM1JbQvy8Wi263C6kkCn78yVBOr1YrcN+FEI1aDWgnL8qiKKIo0inDUhqcXJodBLWaECKPIiEkZRrT9KLkQeCbli0ViTbhYrkC9SrLMqr09WaRZrFuMMex2+22bZs4elB6UEp1bc/djOyVhXBWpJSCvVrFD/Lh/hVP90xE2UfOa1s54ftuolXcVk/HdoRvuWd8Wv06Ie/hsZj+KchYsIN893RVGYwivsrkqCiEELnU4NeANQCGoeV5zjSq6yzPM0II3prJZILTsdnuQPOCqaui7Pb+4f7+/umz52DrJlluWLbjeYZlx2k2WazBUUziGEms3SwFJ3kp80JEcdZ1g3qzk+e5UGw6X91ev/7iiy++/OJnQpR5NiSEuJ6NIieOI8uykiRqt5uaVnMcCz4rX3/99RdffGFZzuXlZa1Wq9Xqs9ns9vaWl9KyrCzL8rwkhJydndVqtQspCCGgYqH9gw/feDxGQQtaAiGk8sYsigLJQSn1m9/8BoM+KSWiC1wT4Jyz2Sx0Xdtz11F4dXONqToMrW3bprqW87Ioinqr6fieJR0cbZiqwTYC6Q5L2tBjdzod5BmkO13X4YVBCHFdB+a8GNxjBcV6vQagHQRBEATD4ZDueELAq1Eu4joHQwOnDxIsShV8Z71eY1hvGIbpGtvnpQRTRxBuJsNxHKdJkqxWK7iSel7gecHx8TGkLZpWSkmKgqdpmiSJZZh3dzemafZ6HU3ThCgtK8DT4TopiuKdoHj/WkfGQwrGT6uwqQZ0VetVpcHqd8lusKGUAk/74zjcj1X6/o3tbcWgO1gVH4xS22ExBoDV3aqCGeHH9gh71Y/gXxA49VazU6v7ux0MinNuMD3LCiEEY0RImec559uRzOnpaTWuhJsgLtYwDK+vr4uiqLT5YRje3t4mQuV5HkUJVvMFvgtvON+v9QaHtm03m23dsC6vrh3HSdJ8NlvEcRoE9eUyMQyrLPPAr0u5bDYbm81G11mWMdPSizIryiyMllGUcC6//fZ7vJM3N7dHR0cvX74scn5+fj6bzZ49e6Hr+tdff/306dOrq6s/+/mfAx2BIhmwBJrA2WzW7XZXqxVAdlTX9Xq93x8sl2vX9U3TnkwmcZzatlur1ZbLtW27nEuMCjabTavV0XWGQR/Ga47jAFdUSsF/5P7+Ho1co9EYDAb/8A//ALwEHSbAHlhoA2vB+I4xBifbyiQSgQ1b/jiOoYTG1QvcH4c4YwysQ9ScOHcajQZqXcijq9k69ltRStEWWpbFiwKUzlajicISlA8ACmma2pbrezVG9fUqLHJu6BI4X5HzshCM6rblCh4nWcZLaZnOQb/NeXF8fHx2djadTgkhvV5PKQU+g6YZWZa96wk/uGHgCDCtovBUmGTV++3DoWxvTZrYmawR8iNL7ck/AdLsp+L9tIkEhTtru2lEdf+9BnI7VK2IDghCsnNcZ4wFfuC6LlFss9kkSaKUYIz4vp9mSbfbNgxtPp/ned5sNoWorVZL23azLNtsInhDYQh7eHg8nc7AgW63u6ZpL5frh4eH+/uh3WpKJYnGYOkHVDCJYi4EnJdMx4aVxtHRkWmaF4+fZGk+HA7DMHRdO0kylCio5TzPUUToul6W+WazEqKMo/Lp06f/6T/9p6Ojo08+efbLX/5yMBgsF+snT55Mp1Pbds/OzoQQnU7v/n6ICQqiCxplAIkAaWDVA55KRUi6ubmBI3273ca2pru7O0JIlmWwloIrJFLrkydP1utlURR4h5GmhsMhSsTz83NMvdFcgVuLqMiy7MWLF8jb8N3BpY8PFIxcYCeW51awGWaA+m6zJ5b+Yh0qIgRJDyU3RrWYeEGSbxgGki02i0HQAONguLA6jsMcB50LWFmu61q6gak1MOosy+qtpuU6buCXZWk6NgwN0iKnlJqOLSnJ16tC8Have3p6moSLI3L4/MWzWq0Gyr5tm8PhOIqiNM23xAD2Tyh0q7lq9Zcg8KrSFLfq4q5iUu7WZf9oq7Zf+lYISjV8V7t9GBVBZ7++rQbxamc3UCVkRF1VGFdKQmNHWEPZY5omY3rFzzYMo1Zr+r5r26ams7u7u9lsYpo63EQsy3r06HyxXEKryjRNRzI1zZLz6WxWlmXJ+ZvLS1wBmqa1O52UKGAMvXYHezl1XYcIUEnZ7XYtyxoNR0dHRzjLeZE+PDycnB6jPoEUkGnEcWy4d2n6tlPFNFzXbaXUJ598outGkiTPnj0zdGswGPR6vWZzuw5+NJr4vo+MPR6PNU07Pj7mnB8eHqLRHY/HFxcXEA21Wq0gCO7u7hhjZ2dnYRh+++23X3311a9+9Stwu9FxgVr5i1/84r/8l/+CGq/dbj99+vQffvX3P7x6KYQ4OBzc3t4u16v+4GA4HD5+/Hi+XHz7/XcwtJdE3dzccCkqwAxLCBuNxtdffw1COUrlKIrg6YLrYbVaYWZQXWOO4ziO0+12R6MR0jigF+BMkF/gbrDrLori4OAABHfDMDDJABseFF/M8S3LWi6XkvPqaux0OrZtx5sQ8NXBwYHneS+vr4HxgIQAnhC2hYL2be6WTzw8PHiel4QL7DyE3x+OOayHkZLgk9Wrsm0fayG7RZxqbz1TFXXVpU/3tE4fcFCrUlDX31lC7d8qygvbGT0h0ioLtu2P6DYIcfjt/6LaOW1VQV7FIdmtcEPxjKkreuU0Te3dqleiWJIknBeGoUFU1u93fd9XSiyXy9l8slgsmOGnWbwFZhWXigpZckGYRhzDUkqlaQJALwgCz3eSTagRZdmWptGiyJSUghdECccGPT8URV4UmWVZjmMVRZGXSiohhLAsQ9cZITLPc6m4aRqEENe1a0EDiqeiKG3btq26aVr1egMDZSWpruvn5+dxnMKJdDKZIUv4vg/8EHDFeDx2XReNVr/fv7+/f/LkCdIFpdR1Xfg49XuDNMk77V6R80a9dXf7UBYCUn3H9r75+ttGvRVFUeDXKdHubh8ANn7//feAW3BSwEgbgQSSDQR7nHOdaVtfiSjK8/zy8tLzvDzPgyDYLzvhkorsJ3d0PHNnKC6lHA6HcGEDlQekeSklDPhQtgghcH/gLmzPnQxtBQorPEKe55vNRgkBRBqyj8ViUfeDFy9edDqd6XT6/fffp1JCuQJADj4jjuM8f/4cPCdMPtCkbDYbKqRju2EYhpvIHwRxHE8ms1arZZqmECUyjY46hO7tY8INLWyVFfczpNqDOvfjs4rJ/TaSvg+T7j8O28mXqiJWSkn2EiBjTGdbK0Q0gWoPVsXHY+jvrdPY/wJ1PNktkMHfqBlarVYLgiDP09lsRpk6PDzsdvtRtGk2m7quj8fD29trSPXare5wugR+QCkFXoLXg4sGY1/MiMCnSXgOL1NdI4LnmqbZlm5bupIlkazI4jTeaJT5Xt2xDcHzdrtZlnkUbRgjnHPTNKXihmE4jpskcRjGyOqr5cZxnIP+sWW5o9EI3iT1WhOoBucSAzHOpWVZ4/H45PjUMu0oipIixmu+uLiADljTtE8//fSHH36QUqLAhr5OKfXzn//8P/z7vwmC4Pr6+uTkZDabHRwcYFMSfnE4HGLzKex97+/vj04PwjBEhun3+5jZ4AM9ODjAe4L3DR+H57iAQFEn393dffHFF1jHjSUTuHzb7Taq0DLcgBzLGAPfAL9br9fR5UZRBLkTKjKgKZD8wamNcw6QtkIT0MVBSwEjRry8oigYIYwxz/NmsxmMZ0VRXl9fj0YjUAKn0ZrLMk4jRZVuaqUo1uGKUvry9Q+LxeLg4IBznpf505MnMKFsB+2rq2vP8zwvuLt7aLVaoAdjc6iUklLyDkKsqj7y0U3uqW/3I6qKq/1Wbf/R9mLwwyCs/pftyTXkznimyrraLgirmlnuiarU+zPG/adA74H7VHItTdMcxyWE5Hme5yWl1DQsnIWmaUqhcpEzxgaDI0pVHMej0aikWiFKYAA5L1Spcl6UZen7fpqlWZlLqryaDxXMcrM6OOxiKJfGCXzB6nXfcxwlcs/yAPB4ttPtNDRN40XCGNE0WpSJpmlZlqVZjEQNvB4qONM04zilVMuygjEjiiLH9uq1JhSx0+ncNNNut0uplqbpxcXFv/+//sPp6enDwwNyICEExAAU+dPpFPzm2WzW6XRev36dpulXX3318uXLn/3sZ9hDdnt7OxgMLi8vUaRh9LxYLD7//HNM4WazGei1myhah2G7210ul5999tnvfve7TqeTJEkpxJNaTQjBpVyu10VRCKUYY8vlErAKOrfDw0O4GIK9SfaafDB70Inx3RK4CqTAnQG9oItG1gUj6uTkBCxZpRREjNVcChAdzvTqmK44DLphQFjo2g6K4SLNMJxEVeV4VpIkcRzqus55IUSZprEQYrXa9Pvds7OT9XpNCGm3m2VZClESwqIo0TTDcTzDsGzb9TwvjmNwoTWN6jgP9svRqiLVdosH99OO2iN279/UbnXZfqzuEto7ff0HcYi7VUNIPAXbC6r9+Kx4HkwjVeZUO6Lcx48ssfltZyXEOQdIIIRYrdaaxjzPA2oSx/E334xqtdrZ2Umr3aSULhaz7Uei63GWCQGwDuXi9lxaLGa+73e77aIopORZlpimfnDQcx3DdUzDYJEs8jQqsrjI4jmlWZa16g1d1xkRTFN5FqORKHgphPB9H0pzsEySJLm9vYVSybIspYjjuFlaXl3eCCHKsgRhoCiKxWIVx3G3203THOif4BISHpRYEEys1+vf//73Uspnz55Np9M//elP8GI7PT29urqSUvZ6vT/84Q9//dd/Xa83wjAMgtpyuQqC2suXr0zTyrK80+k+PDw0Gs3FYkEpOzgYcC7a7c5o/sAYg6k7shN8lnADalJuV6b4EONiv5Jt22/evHn69Onr16/xV4MPTSlF8GCwCcEbgAnozvjOhx+5AT0hCmBkNnz6i8VivV6juEWWQ39eYaoIPLzCrRs3pYhMPN3Wlp9S2J/iTHS1gItSCJEX2Wq1WiwXSik/8A1T/+LLnxiGMRoPXdedziaz2SwIgvls9eyTT29urocP45/97GeT6ehv/uY/npwcpWlKqaKU6oam890u4v3g2W8FP7iy99PX/lhivw/c7y1xl4/jtoq6Kly3ma36YrcTuzoXEW8aofs6pqps3u8VKaUVQRQHKt2hpgCpHcfRNLbZRFJyTaemaaNdnE75cDicTEaaprVajU6ns7m/1TRq21ajUUdvqfa8DxDe1ahNCFHka1GmjHDLoLXAwTdFWWpUGCY1NGqZZuDbVAleprqmJGGe55imaVkGIbamab7vr9cb7EKIo1RoSgrSaraB7DHGCSFY/cMYm80WSZJMp/Plcn1/f1+vNW5v7uI4/u1vf1u5qkkpMXBP0xTerZBKdLvdy8tLmI7e3t4eHBy8evXq6PC0Xq/f3d1Bjlh13d1uN4qi4XAI1OHTTz/F2gnginCnHw6Hg8FAKeV5HkibWFmFwg+DuLrnNxqNXq8npYQrB8brKEaAl8IgGPQxw7ExpvM8D9TfsixBJ8IRg5as3W4TQvCeYFwppbQsCzxVDAP3QT6xkwgi9tATmaZJdzjCcrmEV28SRuCyttvto6OjSMZZlsAhBqsXESxxXGRZsl7nWZa0280sS1arxdHRIDC6lLI4zjgvZrM5ti9j3CpEKZXgvNCrUrOKjf2Scr/LYjsK2C66WDUZp7uWshoP0t0KKOxV2o9Dtev65O5W9Zn7ob6NaiU/yNJq74by9YOKVO3kF/tfsz32D3qDLEvX6zUhstvttlotXWdYcmJaxtOnTz3PWywW9/cPzVbdtHQwv+D9WhQFZQqe0GEY2natVqtRSqF/PRz0MBqp1zyn33EtG8+ra5rneWVZapShjISt7XSxwI4h09Q1TfM8Wq830zQ9OztjVC/ykWnaUsKOhwVBrdWqgRht23at1iCE4MxOkuTu7i5uJLBCiqKk0+mAbw0/MiBbr1696vf7x8fH4BALIU5PTzVN+/bbbx8/fnx6evr61dXJycloNOp2u5PJBLp1nDIQKDYajXq9Dihhs9ngcgfoP51Oge4sFgsArQB1N5sN+iVCyOvXr1ut1mq1siyr1Woxxo6Pjw3DuLq6QrQj9hD8mqYhSeD4wyxRSokDAkGodoRKCDJ0XUdaVkrBOA85E3g4alqMMQnZgpOgByL7Sc4p7BiZttls1ut1meUYGBZFMZ/P7Y5LGFWUaIberNWwgCQMwxeffSqEIIxePH3S6/UmkwnTtYKX3UH/V7/6VRyl9UZwfX3t+95PPv+y5HmjQUteFEUhRKmrvSZwP7lVViIfRKDck+3t94E4gcT71r2apknJP+4G9zPtByXlfqRJKSnZPqn2fhn8QVjul69qx06uCqFqyJnnucHMyWSilHJdeGyZnPO3b99KyZvNervdlkpMJhOUT48fPxaGBPcCcjgkZCjoGGNYU053JCld1y3LmM3WWKnVbjZZs2nbtqGzfr/HCF0ul5pGHceiSmlUBUEgKfU8z3EsZGNeSinJbDYbDA6VpIZhtVqtNM2jKJpN52XBV6sZDAsty1ou16vVqtlsUqoVRQEkxvO8yWTiuj6qvk6ng9IUVOl6vV6r1ZbL5U9+8pObm5uzs7PVaoXtOghXaO1fvHjBGPvzP//zt2/fws+bEALwSQhxcnIyHo/hcQZ+DMpLLFGB5ngymUDdZ9v2er0ej8fYREC46Ha7eK9OTk4g2AUbDgwkDKWra6bYbelCqKOVgk9hdZ6iV6w+esQ29O+YBoOOj6MEJylGi+ghAfYg0kRZSikNw1gvV2C9ebaDswAfcU3W6vX68fEh4C7Hsdrt5uHhwcXFxT/8wz/EcdhsNpUShEjXtYUor69vs6woioJR/fDwiBCZpqlusG63W5R5nqecFzoj78AVolS1kpIxRhQhUqndltzthIARxoihM01jlJKdA7LEGSaFYJQwDcinFFwwppSs0uzWk5BSvchzzrkopVKEMqoIQfwKUe7uLCmlbHcQGKYuJaRVOqWalIpSzbIMy9yipsjA1UvFdVNhsFXQztVCM5iu66lJJRdaqlFFiFKddotoWpJnBtN8r24aBo7M2+HIcWxDmb4VNBq1m5u3i8nE85xwOTdNkxIpi8IwDCaUo9u1Vi1M1GK5iSKv223rVmuxTDebKef8aCEoVWkad9ttL+Cz2SwMw4uL8/WsEJlp2y0iCSFEqnK1Xraa/X7v6OWb10meTRdzTdc55ylPf/PH3xz3HjebTV4aSZxYVhmGcRynus4opVmeUkrjWClF1+sw8Os31w/CzAkhPI5qtdomDLE4xTTN2XjyyflFFEUqLx3NiONsdvuA6mtYlFLKFy9eOKbVbjTLLBdCUKk0QmXJN8vV6P7h7Oxstlq/ffv24snjMisF5SUvDMls3aKCdJutgpc13725ueFKJkl8MOj+8PLber3++PTpfLECNfzw9Ozq6mqxCYlp6q4rDH2RxIs44qbBTDNK00WWbjZZu9VKBVmPp4zIZrfNebFYz4LA8wOPMb0sSyXKOAw5557j8FKu16HneZLQyWxONaZpmm7bm8WCJKlhWAYzsjxdTGdpnDBKbc2hnOlUI4LkccYY0zQ9S1JCiG2bcRzXW/X1ZmV5ZpSFTmAfN05N0zRNXZmiLAuRlkUeG6Y2vrqxOLOFxtf5fJktZ0uSsEW2SuwsTRbPPnlEKT0adF5fXWJJa5rxu7vher02bOs9G/wPUg39CDVVSmm69kHKIu/jqx/c5HvqPlkVjR/fk+4ATLKfgfe+L/eIcrhpO2M5+b4/AH1/Vdv+ayt4YRoGChK05kQqqsibN28Gg8HR4NC1bJD62Y45lWVZksRhtGaMRNFGKkEIwQ7XsizzvIiiaLMOy1JQSq/v5rA5gTGmKHPPc9vt9uvXr13X1nVGlZJSYCqdJFEey16v5/n+ZrOZzWZ5nhNGCSGz2YxSCvQlC0Nt56mz2Wy4KBaLhRClplEuSs9zOJeO43ieo+umFCrLCkIk0wjnfJ2sALe6rqvT7e4NSilXPNqEYRhmaco51y1TpyxNU6y2XywWIGSjkNN1/U9/+hMMPuDF5HkeiuowDFer1fPnz9EXzefz0WhUFNlP/+yrzWal6zolClgoHg0FC+d8Op0KotbrNYhEURTphYGGsBRC7LyYXMdJkiSM1qZptJt113XDsExTURQbnWmdTs+2bZ1qQqg4jrMsq9eabGe2r5Siu4uz1WrVg1qz3hBFORmP5/O5qRuNej3KC0IIVsphz4phGAAlgcNVHanc0ULyPF+vl3mRCsF1ndm2bVr67e1tkXPGGCGsLDlKMCHE/f09hkCe511eXo7Go/V6XavVNF0H0qPiSK8u2Y+jaL9Pq26apn8cXeTHVLy7R94vGj8Ecj74dUJI9fjboPqon9wHfiilSr3jzexHb1UY/+gNVSVThDFmaLppGKfHx41Gox7UiNgqXNHWu0wry5xSWpZFlke9Xq9/0PN9dz6fw1xos4miMM6yjHMZx3Gz2Tw9PY3j+Pr6Stf1i/OzIPDjOA58F4WoZRiEkHa73el0kiRq1dxGo0EYXa7KNIuLogBFcj6faqbhunaWJZtwBT69ptMo3mR5VJa5rjMuSl1nvu+GYShE6bq279eEQENueJ6nFA2oAyJ+u9EsyxJiBc/zDEODwQTwCeibZuMJDAthG0F3usosy+r1Okhb0CL88MMP6/X69PQ0Lyz4poEvJimZz+eWZaBN5ZxrpkHIdgkPBjloW2azWc7LrQu9xsqy5HIbeJQxfbevU0k9iqKSSwAn6HEcRzMMjRACyC0XinOJk/FwcFwIjh5V13XdNHCsO45T5sVisSBCSik9z9Moy7KMUJZlWavV8jwP86Fms1mWOfg0OGcJISAhQv+dJEkYrtMsJkRBDacbrCxLx/YsyypLAfQI12ezWU/TdDR66Pf76FaiaJNlie1aeZEWZSYJeaeikHvaPPpjZOvqKlfVhqb3udcfRCP5qHOrMuGPRu8O5Hx/pr/7AueKlFLf80fEUfcBZrOfqPcjEzfH8ZSQZSEIEZZu2Jbte55j2Y16Swo5ny+JkIwxx/ZM06REwwXturYQXEtou93EygF4eOMlwZU4z0shRK1Zm8/n0KQ/efLk5GiQpsnDw8PpydFms2o2641aLU0TsPLv729FTlar1SYKF4uFEMLzPKZpgM5BQFU7U0ac7ppmMEZqdQ8+vJZldLqtkueGodfrQbvV4Vyapm6atu8FRVFIs+k4DlOk3+8XRcHLMssypogeMEkUdPpJHNdqNThDj6fbZbpKKczicNmBsYCxPiZ7Sqmjo6PLqzcwaA2CoN1u2p776aefFkX26tUry9oaLidJjGbBdd3VbOuqzDnXiUKGzNJE13WhJCZD2m6Up2nachHZtu35jmkaZVnmeapptN/v27Ypyh28KYlSFNm+3W7PV0tQW7XdjVLqed48SZWQpqa7rlv3A0boerVq1htYCgScSQgB1CeOY6UEZBaKSJgjcs43m1jtRiZSCil5nudpxo+Ojjw3UErN50sEuev6pmkahgZyDzgPGlGw1apU/PhL33k9qT2zQ7mnJ9q/oNXeHELtaWc/ntTv56UPHqHSN+GLHccCheg/qf2tIrZ60h8Nsx99AdW/uq6XosAnVLXpQojb21vLsmpBUPcDx3E0uh0KB41GnuemaQjBhLQJYbPp/P7hDodFnpe6rteCOiFsPp9HUTKbzf70pz+ZpvlXf/WvPv/882izyrL0k08+MQ1ts1mBdbFabVf8LZfL5XRTlmWUxOBV27bNNA0Tv8V6ZVlWrVbbLj/NMkJIo1lHv+04FtOk73uNRhBFHmh3zVatKErOC00zdEPjgkZh6Fq2tUN316sVCDegpzRqddM0geADAe72DyGJwOZnOBTZtv369WssbPj+++8B8FiWNZ/PgyD46quv7u7uiqJYrVbpZMwYg3oY9bzjeXmelWUZx/FyudTY1qW3LEuLUThE5GWBTAjyJytLzJMQA4ZhGAw7DAspuWW5QRAQInWmGYbluq7BdIxJYQ8HcaOiW6ojHgrvj2PZiotRkuimddDrd9ptzXaw/oBSCm6NpmmrFbSgGSFktVoZpg7YJs/zohCQL2k6LcsiyxLOuSJC7NkimqZZrzVbrY5lWff3t6Zpnp+fg/h+d/0Af7rZbIZ0bQIWr/LeB6lp/ztV37VfuO5f3PvX/X4mrNo2pKvqV7YVKduOK8AoIoRIyX80m+E+lFJEDtkxCgR/xwncZ7Ttv7zq9VBK8zynioCJjbMt5lwJWfeDIAi63a5nO2VZlnmBzifw67oeoyINgsC2zSSJyrJUSt+y4DXD87yyFErRsiyHk9nTp08vLi5OTk5gvwXyVFnmURTd3ZVpHE8m4zzPYeFcUSsxSatgOtu26YZSSn3fN20Lf5RhGI3ANwxjE65KnmkaMQzNMLVa3WeMUaaUErpOLVsnhOg6sW29VA74N3e3t0EQVPF2dXWFckujjDEWJ8l8sciLoh7UFSHM0AVROS8Vo4Iow7YGx0eW60hKxEyto3ATR77vx1l6fDg4Pz9Hoo6izc3dLfDYVqcNnga86vI8x96IVs1HUk3TlGisMghkjIlSglEkd7Y3ZVlSaggh8lwqJT3Hct26bZu6ruV5qtFttlBU4SQFkTCKIs6547mWZUmiIO/gnM9mM40ynbIsy3rtTrvdXq9WScnBnmWMBUGA2nu3zaLcSrQMDZm/KAoiNSkl51wqUu78RXXD2mw2vJR0T91eOa+BhwBHvNF0QghZLBb4TMEl+JFNvep9+UKVx9ie8pB+dCM/dkPe2itK6Xs/2aKldL9sALxRvQz6PjIEEgww5eqVkL1JoNwZNLI9tu7+Lc9zyzAxscBRZJmmqRtYHhSGYbhaZ1lGFcGmWOygTrPYMLRefytTwN4VpEqN6egGcW1JKQ8PD1ut1u3tbRRFrm0qJVerlefaq9VyvSZ3NzeTydhxnMPDQzQJtm0pSgjBDpOMy7Isy3ojIBqBkGIdbhzHwqTLNs1ms0moKIrMD1yAPY5jWpYlpSKU27bjc1dKAtM+O/dbrdZ1koxGI2h2EBjQlUopDcuoNepCiIeHB875YhMppeB1DbMWMJIZY3d3d47jgJSH+rPZbN7d3ZmmuVgsIOFrNpuu615cXMyXC6UEpdSyLNu2MF7HS8LUIcuynJdZloVh6AU+AgaHI9/NnKWUGqOmaeIiIoRYlmUYuuCllNKyTEJIFEVMUV03cW0gfmzbBrM0ThMM2+bz+fX1tUZZs1a3TBO7D5bLZcpFxSVA9wiZhVKqLEs0GsCNQBAnAh6+GWVKKckYMQwDLw8jE8ZYWRZhGCpFXdetNwKpOBeFH7iWbfT73eVyKURpWb6UEhY2Otnr3HD8VHXmfrrbuyn60W0/sD6oJBnb92WqQrBafL/NYFVB+67Zw6Gye0AB26UdAKMq55vdC2B73HH5Y7todq+NUaoRwrIsL9JM13XP8VutNiEsjtMkSm3TdF3XsdADyDhPUBcZhmZbrlRcCGVbTpLGnHPBpdBFlmVRlOR5rjHj/Pz85ubm+vq63W7quj56uIvjSNd12zK63Xa3286SRNc1ZF2lxFTMCCEFL/Xd4vWCl2EYmqbZ7XZt28bmUwzWpJSM0UajLmUpZNnptCglJc8NUzs6HsRxbJmOZdlKqTTJGaO6rsWLzcHBQb1eL9LMcZwsTjCIOzo6wjkCSU4YhqPRKMuyVkeDDblSCqRwpRS0WoZhtFotCGHBWU/T9OjoCL5SUsp6PSiK4rvvvqvVfMKo41igXEdRiHIagcEYg+kLdPd5ntca9eoz0jQNkyV87kmcOo5j2UaSxGEYahr1PEdJURR5zQ9c18+yTJZC103GWDW+r1RskEowxsAp7Xd7zVo93GygQSnLkmk6ukHOueNaUso4jqXcajLozsZB7HbpaNVSRJ0ahg4DTugYqxCQUirJKaWe52V5hM2thBCsKpBSDgYD04YHQr5er/8/mJNilez1C2gAAAAASUVORK5CYII=\n", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "execution_count": 15 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1bEUwwzcVG8o" + }, + "source": [ + "也可以使用 MMClassification 提供的可视化函数 imshow_infos 更好地展示预测结果。" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BcSNyvAWRx20", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 304 + }, + "outputId": "1db811f7-9637-44c4-8aec-330f5765e20c" + }, + "source": [ + "from mmcls.core.visualization import imshow_infos\n", + "\n", + "filepath = 'data/cats_dogs_dataset/training_set/training_set/cats/cat.1.jpg'\n", + "\n", + "result = {\n", + " 'pred_class': results['pred_class'][0],\n", + " 'pred_label': results['pred_label'][0],\n", + " 'pred_score': results['pred_score'][0],\n", + "}\n", + "\n", + "img = imshow_infos(filepath, result)" + ], + "execution_count": 16, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAATMAAAEfCAYAAAAtNiETAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAFiQAABYkBbWid+gAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOy9d7Rk2VXm+TvmmjDPv8yXmZWmSt4hUDWSCiEE3QgBYnCSMMPMYgYaqUFIgxHQTTdDQ4uhEaIb0EA3vqGBxkiAJOQwLYMTKpUk5MtlVnr/TNhrjps/zr03IrMk/pq1ZvWsilqxXuWLeBFx7z3n29/+9rd3iBACj90euz12e+z2P/pN/n/9AR67PXZ77PbY7f+N22Ng9tjtsdtjt/9f3PTtv3jK5zw1AAgkIBBCA4LgBSDY3t6mrktGoxHzYkKapgyHfXywjCcTnIMkzwkhsLKywuXLl9na2sJai/ceIQQAw+GQ2WzG4cOHqeuauq6ZTCaE2gKglEJrTZIkSCnxHuq6BgRaaaTUOOeo65oQAmmakmUJxhiUlnhvqaoKay1CCJRS8YB1ipKaJMlIkz5aJyiVIESCICAD9PKUuq4xxpDlGUJKjKkJImCa1xNKIIQghEAIAaRASwVeIIMgTVPKsiRJsviZpMZah1KKtbU19vb2SZKE5zznHqy1fPjDHyYf9Lk22uf4Xac498gjiOD4J8+6m73rV9m/cYPx/h49pTh2aJudQ9tsrqwggUwr8A5nHVmvjweEEtR1yZOe9ESqqmQ02iNJE1ZXV9nd3eXChUvkWU6a5sznJVmax+scYHV1lVOnThFC4Oy5sxRlCQICUFQVw5UVhBTc+bi7OHT4MBcvXWJ/f5+qqqiLEhVACIEPgTTNyLIeztMcf0Ka9ZBSU9cWhMA6z2w6J0sznKnItOSJj388ZVlyMB7RHwzY39+ntoY0y5gVczY3N9k5eoTdvT12d3cZjUYIIbhj5ygJgkQLQnDoROO9Y3f3BlVVkmUZ3nu0jmvI+4A1Hmsdcd17pHQ4bxEBtNYoleCDpDYOJxRpb5U6SHYnBdfHEwoLMuvhUBjrUBKklEgpIQQIHojMIQSP9x7nLM4avPckUpGmKXmekmUZOkuZz+dUVYUxBmOrbp1LKanruK43NtaYTqd479ne3qKsirjPHIQQcD6+fqo0Wmu8NVRVybyYs7OzQz/LmUwmWFPR7/cRQlAUJYG4tqWUOOcYjUZMJnGvb2xscOjQIWazGd57QghYa5FSopTCWktRzBBCUNc1zjlWV1fZ3Nykqipu3rzJYDAgTVPW1tYQQjCfz7u9NJlMCDjW1gZoGXBmgtaOf/qC5zIcajbWemxvDLGuwNqaEBzP+6L/XXxGMGvBhkZKazW1VlprASXLMowtCSFQVRUBh/ceYzy9ocZay2AwAGAwGDCbzQCw1hJCQClFCIHpdMr6+jpHjx7loYcewriAcw7nXFwMzWew1mGMIdEpzjm8p7u4IQS899R13Z2U9jiEWIDO8jE+6t485qzFWolxFuMs0iqkjP8OxPdBClSI4OjjCyKlbDYw1M6Ck0zmMwb9eAxZluFFzWg8RiaayXxGr9djd3+Ppz/9GTz48MPsjvYRQrC1tUWepuzv3iBNE5IkodfrMRvH98zynOAjiK6urlIWM4b9AXt7e2yvriCkYjIdo5Si1+uRZSnT2Zj5fN6dzzzLkFKidUKWBYJfnLP5fN4tMGstSilqYwgEpBBMp1PW1tcoy5LhcMhdd96JkpLxaMRBVSFCQIkI6tPJhKKoGA5XcdYhUEgEzroGVBJ6WYIUgro25L2cYjLmwoUL3HHHHXjnuXHjBkqrLsDt7OxgnePc2XNkWUY5LzDGsLmxwXg8IhUCLQS9Xk6aJHgfmve0zEyFlArVG5AkGpkoaizeFpRljZCerK8IPuBDwBuD8gCKEMAHT1GWVEFQmQpjLcZBqCtsEHjnydIU71231mRzzoOLgbDXy6mNx7t21XkgAkNVV1y6eoWiiMckpSRJ47EnSYLWGojXynvfrWlrXQeg1tZkWUYis2Zv2C7wV1XNSn+ACIGimGFt3QBpznw+ZTQ6oDaOjY0NhsMhdV0jpWQ4HLK+vs7m5ma316qqIssy0jRtANp1RKQFsnZfGGMQQrCyssJ0Ou3WWgwqETTbfet9xBQSQaJTsqw5RhOfU9c1wTvirl0kl48Cs45pBEkI7ckSECJTs9aS5ymDwQDn4wE55xGyjWIRXLTWGGMYDocdc7LWkiQJVVVRVRWTyYT5fM7GxgZ33XUXo9GIgxu7lGXZMbkInhFMlVJLAOeBBeMC8N7FxdP8rgPmpWNrwau98IvXlBA8QQr4TGDXnvw06S4QgG8ugrOeIMEaixSS2pjI6Jwlz3N0mqASjXEW6x3D1RUSnfCp++/n+S94AXd//j/h3e99D8N+j93dPe7+vGdy/eo6ly9epJiM8dailUJ4T5okzQI2IAR1VTPcPhQXlHW4YOOGq+bs7u4SgmM8HuO9Y29vj8FgwPr6OrPZnKKYo1SCThOCD5RlyWQyYXV1lcFgQJbnZFnGdD6jKAq0lJGBlRXXr1xla2OTwUq8xkmSMOwPmE+nuIb9xKDlESIQL51HyICpasrK0O9LNjc3SdKUy1cug3NoLdnb36XXy7HOUJuKlJQABO9YGQxwwTMaHTCfz5gXc3pZRp6mjPf3cfGJESBwVFWJdYYkUdS1oywLvI8MKc8H6ESThx4gMK7EOYN1EWwVEiEUKkki+KGoHAgEWmuyLCM4gZMaEcBKByICZ7uZU61RUoLQIEAqgXISrxRSRuBVSnXrcmdnp2NDrgHAdl+2azxJIgjkeR6vh6m7bGQwGMQgphOstVRVPF6lFGmWsLa2xmw2IwTHYDAg0boLYFmWopP4eYwxOOdYWVlhMBiwurpKmqacPn2a+XyOcw6tNf1+vwOvNE2RMu8ISZtZtXt/OBwyGo3iZ67rbv+FsCAxgUBdW4IXpAONUqIjNMGBqS1SBbRSKLWAsM8KZu0mbQmNQHVgJkTWRYqqquJibVBWiITRZMLa2hoHBwcMh8PuZ1VV5HneHZz3nqqqmM/n3QU4IFL09iS07ADiBRTIJZa1YEQtO1u+LYPQAvA8XviGIvvmbzwE1x0HYgGEQUCQoqHeEq2S+JiMaZRCxtcSIKQEFdBJgnOOweoKdW1RaYINnizNSPM8nngZmeneaJ+DyRihFVIpSlMhxiOKIqYMBwcH1PMpwVisszhjqU3NIM+x1jGbxTRjOByysbHBZDYn7/c5tLPNlSuXuH79OpPJGKngcY+7i8lk0kXbeM4Caaqa4xbNNYTaVKwmqxw6tE2/32f/YJ8bN13cLP0e0/mU2qacPv0wOzs7zCeTZiP1qeZzqqpGa00vyzHGUhUlQch4PlSCQKBEwJoKpQRbmxuYquTalUv084wsUVy6fIGTJ09y8uRxTj9yhqKYk/dTLl44x9raGiePH+Pc+fNsbazhvefypYusrwzRSLwD6xxFUVDXJUoJhmtrWGu4efNmTOFqh7WB4XCFNEsQEoQJGFeAirJKQBCEQEiB1BqhEqRIkEFiE4dNcxIvsELhEFjrqOZzvBNxc1pLcCayKqnw3nNwcAAhIAgkOjLvJNEICUpJ1jc345rGdYE9rscYjCOLcZSlBwHW1bh5fA9rLYO81wS8gLU1tjYIH8gSTapjoE9TjRQJWim8NZiqRCLo93sc3rmD8XjM/v4+3nvW19cZDodYa9nf32dvb488z0mSBKVUl2VFopMjZUxr2wzMOdfgRCBvgqP3nrKMaX/7Oi0TNQ3LE0BlIEt1lLlczOysBR1CvCZiQWY+I5jdCmyRAbV5ZwSklCRJlqiljxtZRK2oqirSNKaD29vbjMdj0jS9JW1pqetsNuPg4KBDa2NMpJhAv99HKdWkrzHNjNpO+/lu/bxRW/OPSjOXj8cYA0GgVIwCQsSUleZYlaADxkU0XPx/e8KFXkRSKSVSLyJMaWsEkOiMXGlKU0NVUzvLaDJGa01d1VjnEErxkY99lBvXrjMrCuZ1ybHjx3nkkUcophM2NzcJK0Pm4zGjPYOrDNY6Njc30SLga8PK6gpZnrFzeIdDPrCyvs68mDGbTbh+/XqX2gyHQx7/+Mdz4cIFLl++ymAwoN9fQSnNdDoj+JgOt6AfQiDLshgUmnPrXNT9Mp0AgmI+Z7cBB+ccmU46lmGMIUlSAMqqROkEU8cg1uv3EQLKcs7u3g2yPCVNNf1eDs5Qm5LZfMp0NqHXzxn2cxIlEcEzm01wDdNaWxlQFGUMCMGTaI3wARAYU2Otx3nbrN6cXq8XU/bZDFOXFEVkIXneAxHQSoJKkc7ipIvpPA7jLcopQhCY4ChsoKwDlQ1UXmAAJzSEW4OotRZnPcYYtFQI2awn7xEECDFRMsbgQzw3u/sHFOWMsiy7AK21QuseWkuckxgTgTpJVce6BoNBk4aCMRbnTExXqxi8EhUDf1nOo14FHBwc4K2h1+sBMC8KslRTVRWz2awDHyEE4/GYa9eu4b1ndXX1lv0V96jBOUsIvst6uj3XPAei7FQURcfEWkCM67SHL+cIqfAuZnB10r4e4CC4QGjOo7ML7HoUmLXg0YlmtKCwYEmt4NdSdSEikLQLPi6OnEOHDtHr9TrRb3V1ldOnT3epYr/fJ4TAaDRib28P7z2HDh/mxvXrTJpI3+v1GnQPixR4CZzaExRPiOzy9tuB7Pbbo1PIKH7HP/HxZAlPAGT7OkJgjENo0EIiZdzkSim0iimVyzzXd2+ytrZGUVfcdeouLly8iNSKsq5J84yyrkAKhFBkScbFK5fJ04zVjTXCJKYOto66Ry/Luevkk9m9dpVPf2JO0SyMwzuHSYBrV66wurrKeDRmOFyhqg1BSmazCePxmKqq0FpTliUPPfQQ29vbaK3RSrGxscn6+iZCKPZ295lOZzjvGKxEFj2dTgG6lMAZCz5G4F6vF8FKKopZ1OIkMD4YoaQk0XFDeOcQUpLoyLY3NjawxjEvCoSUSAnzyZirlyHNEqSKi9+amrWVITeuXeVgf5edI0fI8wznLIe3tyjLkquXLzFcGVLMpjhbs7WxTl2WSCFQQmBMhU4UiRYURcG+taysDNBKkiUpIsS0tSrn4ANaJwgtOtABcMHhvEdYSwgSi2VWO2YmMCkdU2MpLNQIPPG8ZjqJG1qB9OCMx3uHF6bZGxnG1HjrqU1N8KZjNt571tbW6KUZeZLinO3OfxnmHcsTWmLrQJ6kyAAhODKt8LWgKuadpuZqgyR0QRog0RrvHKYqMVUZ0880JTiLd44bN24wnU5J05TNzU1WVlbi+zfgqrXu9LTJZBI/T/N+URYKKC2QSnfpZ9STBWU1ZzgcUlYBHyy1KcEsCn69Xo/aWZQW2NJS1yXGSJRKSJK4/5VUKAES0YjWnwXMOk1KtsDRCuS3Vu/ae2QnjZ6lFEVRddXD48ePM5/PWVlZIUkSjhw5wpkzZ4BYSMjzHK01RVFQVbGicurYcbRSXZolhGioaBQzTVPtDIGOKbQMSYh/3GkSdZ1Igdt7kiRxGwaF9wJJACWRXi4ihpRIpQgiamSyZWwhIIPAe/A+VhDTvE9ZVWynKcYYjp88yd7BAVnWYzqdopRi7/w+6+sbEODgYIROU575/Odz5pEzBK3YPzjgCXfdiSRw/pGznDhxgkwpHn7gAQqiVjboD9AiYK0h+MClS5cwxrC7f8CJU6dY31iL4FZVOGcp5xVVVXL27FnG43G3IJXSpGnGYDBgNBoTgDzPKcuSoii6iNwKtUIIUp0QnI/s3Hmcd+RZjneukwza62rrmsFwSN7rk2c9Hve4xzErCh548CFC8KysrEY2W06pKkFdFmgh2NxcJ4TA1atXUVIwn06YzeccPnyYwaBPqiVjZ5kc7JPphHxtlel4SjGfMxwMEYkG4ZtqasJsPmEymSKER0qFTiSIBGsc1lYN+wQVJL4R429dO40MITVJmqC9RSmPsIJAC1YC56C0Dq0aHawBeecsUY6N18yaeJdSoETSyTbOuQ5IsiwlbTRamr9VSlJVxeJapAnOGcrSUhQF4/GYfkMAQghIJZDoJV3KsLqxxc2b15lNpvR6PQbDAdbV2KomSRImkwlSSjY2Njh27Bhaa6bTKVVVdQShTR+jzpZ1v3fOgAhkut+lxC3BWZaDWo2sZfRtpqZ042JIFd6U1JXFueZ3GoRwTVoa0EqwjGafEcxiriubPDZGylYTa0FBa81gMOxKsVUV6PX7ZFlKUiXs7+93Amm/3+fKlSudINxqNi393Nra4oEHHmBjY6PLodscva2qKRUvtBSq0dNEFyVaHc4502k+QkCapl3uvmB1souCWjmSJB6zszElWN9Y48aNa2xsrKPThCzrMToYkaY5MtF4H1lVALRKGI3GrK2vY51nY3ON8WzE5vYWh4/scP78ee697162trbZ2z2gKAqGwyFZnuNDiCXqPDJXlWiO3XEHx6RkMplw8eIlTh4/xu7uLufOnWM2OmiAOxZizp49y53H7yBNU/b29uj3cvb29lhdW2djY4MTp07gg2M0OugKBYnW3LhxgyzLuHHjBr1enyc8YYeiKNnc3GR3d4+D8T7aa9JME3DMiylJqmL6gAPhcT4yjKqMOkuapZi6gACDfg8RoK4qaKrN1hgKP2MyHpPlCUeP3cGhQ1vcvLmLkpCkGVeu7FEWc/q9nCAkBwcVPgS0jtW52cySJCnT6YiymHbVOWct3lmUko22klIUU/A5gkBdFARn6GcZeaKQeLxt2DwSqRMEMorJIgAe721kF0ogkHjrqYynrAtUmrG2vo0TBXWoIUkZqIw6SOaVwdQ1WshG54Uk1SgBZRk3tZRxg+tEoZSIwVFKtFZdlT5LojUjOEeWZThbNyl0Aj4yEgnkaUIxm0WhXWmqsmAw6EUQ8RZvGptTkiAl1FXNfD7nalVzcLBHv99j+9AmMsDly5dxzrG+tcXW6gazecl0OsVay3gcq8v9fj/uG625cOECOzs7HDt2jIODgw4TnPMMhjkhOLwPCBGrqcYsSE6SKNI0BruimHWkxtqazc0j7I8mVNMCLeiKRiE4rHUc3t5iOMxIlAPvMLb+7GDmvaeuasajCcF7vKdJ3WTH2qaThDRNgVgmtTZSy/HBGCEUzntuTKa85y/+oruoo4MRe9evU5ZlB5ptSmuNZSQFu9eusXv5CuPxhKoqm+JDQEqFlKqJbqqpYAp8WKSYAhHL4SL+TSvwtwG2jWSx+ODI0hznAv3+gLqu0SpS+mvXNLPZlL3dAb75+yjGiiZy+phqu4aNhMDu9fMoJblyMWdre4PZ3g0+uXeVoii5FgLTnR1GozHOeS7WdWdD0IlGq4SqrnjzH/4mW1tbbGxscu36NfZu7nLmU0Nu3rzJ5XOfZtDLGR0c4Oqa8ewmF66cZWtjo2Fd885akqQpW1tbpFlGVZVcv34tWipkm1LTeKssvY/9A9vbh2JZX0jGkwlFWZClKU84dRfHjxx7VKDrmLv3BOcJUhJ8tGzIBmjbVD/RCaH5YNYanPPs7e6RpinFfIoxJf3+IbIs48iRbZxzzCYTyqJY2BmIQi8EvDM4E8Cr+N7W4uoaAgQd10AiBWkvVtOiTzCJgUjquMEb4ZnQasIQiKw8BAlBEJzDEyAIRHP900QhlcALSTGbE2zU5xAaIxO89ZHVI1qtIm7oxn6hlULoFrSiTIOUOCFQSnYkot3wkdVYrFucz1YCEc0a727NW7bMpyxLRAN47ecQIhYasixhOp4wGAzY2t6IroTaNDrWjLIq6VnL2tpaLFDt7XH27Fm2trYYjUZsbW1hjOH69etddrO/v8/6+jqHDx/mzJmH6Q9ydKMht9rpsm7W+k2XU8s2TZ02wTPUMfgKoh3FeYsXgURLynKOEx7na+q6/OxgVpUl169ex7so/C+L7ELIJtVTVDpWXyLgOXzzHO8DSiuqqubKJUvw0TxrTY2ZxZI3gGo0thCinQABtjaUsznOxvIsLZgphRQS62yX7tJt36XKYwhorZoSvu82w7L8J5uoaY3B+0DwtgGWBELAzSwueAIW2WhkSiuc882CixfEWoc1ZazolhVCSsbjQPAlxWxGbQxSCOZFgSZEwdNH0DDWQIC6AGNj9L16cYTwNeV8zM0bN5lMxlTzPs5ZZuM9ZnlOVZYE7wDHwSgwmo7Is4yyKHDWRrYa4MruDfI8Cvmz2bQpoauO4nvvm20nuLq727FWax3OWRBw9tJ5Xvi8F3Di6DFCI1IrJfE26kzeB7ypQcpoZ1EKqVqztQcFvX4P3Yj+ZVWhdaAs55w7d5bV1VWyRHPl8iWe8tSnMlwZcPnyZZJEY40kOB/F8ZZmhxCrbs4i0hQpJNFQEyJLlgKtk7g+bavRWMrSIqRHa0maxs2slMC5QF3V1LXFmJoKiapKZKJQqeyATwmJUikKFQ2+eJypEUEigyQ4hwmB2jq8dQjvm7Qz/r0jIIIHCaoJytaaWFgLMQh0tgRvCERrigjx9YKSCB9QhE7ekM2+CCG+tmqupQwgvMPVMUNp0cw7R1BRZ8qSlLmU5L2oGe7duElVzqN1QmuUjEBaFAWHDh3i3e9+N0eOHKEsyy61VEqxvr7eec3aaiQQNa+6BHSTyXmUilJUS2y0bolFQp7nrK6u4lys3NZ1TZpq6trH4JQolJY4ZzHeEoIjkTHlFsGDX1QAHgVmswZMola2/MitVUHv/cKuJgSiAY42/3XOYmoT2U2DJn7p4kW9yUdACYHWttqidvc8v3isDXwhxKL5Z2qRb53ciLAAPkFTrQwdm4smUdkVENro5xtDqRCycf/X8blaNouwKdM30TRNUqyxKKUpyoLZbB4FaBUfK4qCqq5iWm0tOpXooPFNpTDM5gTvSbOMujYkSd0BtDWGJNGQpk3UXlRwBXFxyF6PNEupfKwoS6Is0O/3Oo9SC2aRyi8uvvMeY6IJ2tqm4tcEJec9py+c48TRyM5EI+q3UTY0lah2swhAaI2UAmvjayilyNIMgYxA3jjEx5Nxt7aquuZgtIdzjv29XXpZSp4lTfq40EO88xhjIXhE8OhEQ/BIoj1BS8hTTVXVlGVBmkW7g7EV4/E+3ruuCNWyAB8WoAMyFrKCbIAzgglSgpdIEfABhEhIU4210Ss3Kw2T2lJ5COjItpzvtCIIDWMDF8B728ksDeHEs3DsCwQssRdoU62mmuyj+TSaphfaXmTMkYBorUnTBCFilbRLeZUEQpQQQqCYz+P1bNhNmqYoHSWYsorXua5rDh8+zM2bN/HeUxQFeZ5z8uRJHnnkEcqy5MSJE0wmE27cuMGdd97JxUuPUBSmy75a3XthK1nIPq0Nq11ztoqv74Olth4tHFIqCI7aFhTFnMHGAB3pBt4udPJHKebeuc8IEu1NNDuqLavGMvOiS2Dhto8+LOd8g7gG75sDic8m+sRiRRAikB3s7fFFL/iiThsKEFONpurY2jqEXJhf259tdAghlr7/6E1v5MqlS4z29/nmb/7m7sRqrfHBo7XCOotUy2zNkyRp3HxSoqTCeRdBpUlt4+9l5xLXSULeiyVsU9Vd2hvNogJT1fR6PRKlMVUsfIgQo2QblbIkZTIeM58XHX0vinnnzVtOK5xr2WJMpaJOGK+LarxMsTgSU+KuBN5olnHhi4aluq6QEmKU6FLz2tSd565NMVOdRPuFVLFqKRVaSLSI56o9x945bBXTgKouMKbC2gqpYHV1wP7eTcaTA+44doRrVy7xyOmHkDJgTBnbfFyMwgTXVOMCSgLeYesKV1c4W2NNhakrTFVQFXOsiWk8PpAoxcpgwPrqGnmaYY1hMh6xt3uT8WifqmgCSZIw6PVZHQ4Z9ntkSpFpTao0ChHZlnMEEwHOOxvTXOdxxmDreHfGNOm0a9aqaER/0Wxk08gyFmPjT+ct1tXUpmyCQ1hyw0fdrvNxubpra/LeNdfW4hvGEkJojkeRaIVWKpICZ/FN61R0jAq8sUgR2FhfZXt7mzzPCdZSVwWTyYRer8dHP/pRnvnMZzIajUiShOl0ysZGTE2VUl2hYDgcNvp1bNXr9wcIITpzfOtuyLLoT53P5xRFwWw2YzqddvdiPu8YIMHjnMFhI6vOo+5X1TNqU2BsFYsN4R8pANyKXDSbcqkVqPWcNawJ0fAmEdmClKJL96QQCK2wDe3XWscTGmKa2ZZ0rXNdVa5Fxbb7QHT/Dg3otZ6325ljzEYGg363yV/y0pcC8MlPfKJ7QggepXTHIEOI1Tsp4iJ0wTXisum6G4wx5Hmv0fhCTKXaPs2yIklT0jS2B7WVJu8dZdlWXkOjV6S0Rt0kSaJPKElIk4SimIMQzGYzsjxe9ACNqTWNbVbCdJpVmqQRMKwl0QrRCMvehkYXqsjzjE+feZj/5Ru/ifvuvbcD65ahSinxblGVFkvXsv3c7WNtKhTFat1SRBIV7QhCCHTDVAWSsio74G3Xj6kNgYo0S1nfWKOqa/YP9vDesbG5gU4009EIlgKkagKHFBKtJVJo8LFAFbzHq9Yi5KiqAqUT+r2MoiwQMrCxusXq6grj8Yi9/UV3SbTSRHtNDKgxbAogaVIyr5LYUeECPgiUjLKLKUscCiUkeZphgiZYi7GR0aEkSsUkuM1G4vmM1qG26B5CaHTGBeDpxrUfWYvqWn1aJrlg6Mu2pIWvrWXX7Xm31kaDbqOXChmaVLBaOO6bx7XWqDRlWpYMhq4r2BljmM/nnU0D4MaNGwghOHToEOPxmBACd9xxB/v7e03PZehAfVkfa5n9sgdtPp93tpQsz3DBIZPoMCAYpIJeL6NymhAcdV0iFA2QLbDpM4BZXNCtHSO0iNJ2Asj2wrRI09zax0W8gEppgvcorQnBdz1l1rpoGBQxVVMyahHBe2xw3UX2TdoUN9ICrAKLDbbcuwkxddRNe8Oy0XX5NeO/Gye1dUjVgqqiriuEg6qugNCUomOSYK2h9dotElxBbQxJkwbmeS8yEGO63sPgY4tUWZSAIE3jRsuzPEbo2qCU7tzQpq47prowQJqO8YglJipFZKF1bZBC4kLUGpWUVFVJvx8NxkmSgBBU9aLyE3xYaCreI9tjCwut0dkH8t0AACAASURBVPlGQ5OSgMDZuDEFAbxFeIeQsR8xhIBREp2kaKWilynEnshemqGEYF7GzgBjTWyMLgsuXb7E0WNH2dpcZ3d3twl2C5HbeU/wUeuMoBnwuEZqiFqZULphg77buM458EuG52bdRKaTolRklxCtNXVdU1cGqWRkNt3ajOmbCICMYOqNR/gIXKF9T09MvTuNVtM2lCtBF7jbu/cxAwpNUIgBL1b5sGHpeb7zdgoZumLBckvdcmse0LVSOWdwNhZIlIrDCELT05ymCd45RgcHXdU0y3NQCtVUMO+66y5Onz7d2TOOHj3K3t4eWdPX2/5s25jSNOH8+bPsHNnq0sd237UA3Xoe27QzVkAX5tn+oMe8mKK1JNEZ1XzeVEJjZThaWaKBVgqBVkum+UdBmZBdNQYheOc73s7//YY38Nd/9V4uX7rAH/z+77G1tbWojiH4xMc/zqu++1W86Y1v4sqlSzz8wAM89SlPxjrH4+66iz/8/T/g4Qce4CMf+jDf+YpXNPpALLV+8zd9Ex/+4Ac5d/Ysr/m+71vCRdGAxxJgNQtYyEVaefToUX7ll3+J+z/9Sc6dP8t//e3fIs3SRgNqOwFaPS6yu/b1n/e8e/izd72Dj330w3zqkx/h13/9l9k5crhL3VaGK/zGr/8KZ08/wAOf/jh//mdv4/GPv6tryj516gR/9Mbf5VMfv48P/v37+O3f+hX6vR7zouiY5M7OET7w/r/mm77xZRjbOKSbNq7V1VWyPKOuq07jklIiQtSIhsMhP/Haf8v73vuXfPzjH+Gtf/pmjp84gQ+BQ4cO8Su/8Wv8zb1/z/s/8kF+542/x+d83jOjPynPeMdf/hn//W/+CoD//Gu/ykc+8XHe+Zd/0bHeJNH82E+8lr+7717u+9hHeMu73sbn3f2sLnDQpCzBefAhblRjsHWNqU2nxVhru66NqiopihnzYkpVxTYia2uUEk2aHlAy6qF7u7tYYzi0vU1VlFy9fAVbG9JE08szenlGppPGo+WadMpTG8NsNmM+n1NWFda7WDEVAk8MRPP5lBCVKKbTKVeuXOHa1WsURUHUnhadLUJE466UAkTAuZrZfEppaoy3uGBx3uBCnPCRJAl5vwdKUtc1s6ansayqrg2nrirqck5VzCjnM6qywJoaH1xkvU3a2jGupvJOs7aRMcWM2bKNqWujlbXeRynjZlYysletRMdiF658mqJOaLINS1mW7O7ukiQJ6+vr9Pp9kiSl1+shpWQ2mzXFBo+WgroqmE3H3PPcZ6MknD/3CNevXeHY0R20lhwc7LG6OqTfz9nb22U2mzGbzLC1Q5JAEDjjsbVDBEmiUkxlwQuU1Igg4+PGgBckQjMdTxBArhOC99RlgXcu9rA2AUwrRaJ0p7fF8HE7mEnVRQzRrOyv/7qv40Vf/hU8cvYsv/Wb/4Wfef3r+ecv/47GRxL/7pWvfCUv/xev4KXf+A088QlPZDQ6YHV1hbe99a387n/7b/zP3/ItnDp1ine87W08+OBD/Pd3v5snPfGJ/PTrXsfXfv3X8/d//wFe91M/Fc2ZoukmWKJk3ocmuYyprQ+W7UOH+L3f+10efPAh7nneFzKbzXnRl70QrTOs84tuhkYHiq8dtbwQAjpN+cl//zo+8IEPkmU5v/Wbv8Zrf/zH+P7X/BBFUfCq7/4ujh09yrPufi7T6ZS7n/UsyqLCmIo87/Ej//qHKIuS5z7vS5hOJnzJP31B1+TunEcqRZZlPOlJT+Tw4UNYG9Mn5yHv5VRN2re2vsbo4ACpFN55vPQIIfml//QL9Ps9vvzFX8d0MubZn383WZaT6BStE9719nfymld/L1VZ8H/8wPfzc7/4C3zJ876QNE158Qu/HCHg02ce5nu/+1X8zV/9NUBkzCHw9d/wMp73hV/I13zlV7K3u8cTnvREnG0YUZPiCRcw8wKaaQ1KSBIVz6FTugOx1hTpvWc2n6LTlP6g39kgahN1kEQJ8I1Hrym3t8ZogWz+a/a7awT4ZuKFaCwfUkryfr/Taj0B6x1CeIIIBOFxDbOLrTtlF6S1ip6y2LfYMv4Fe5cyEERMaQ0WbxxCKIRSCBSWwLyYsj8rMUJTB4Ej4CVNoUCA86RaxOKEjF0NQoim8harr847BL51ES1plk3ze5qS9aNJvLI1iGit8dbEeQ+pRuCRIjQs2cWJEr7G+4DzDegJTS/vE2zAGUNNTHE317fAC1wd289Qjtkk+tUCsLo25Mrla6yurvLUxz+ejY0Nrl+8yMWzj7C1tkplas6eOR3bV/EoCRvrq1y6cpk8TZiOS7a3t3HWc/nKJQBOnDjOYNBnPB4zUXPqMjaNJ0nrAwUvAuP9MWv9NZytKcZzlIHVjRWUh3JWsNFfRQpi5bjxBX5WMHNthST6GwjAW976Vj716U+BEPzcG36et7/1T3E+9q0JFZncH77xjfzN3/4thMBDDz0IwEte8hKSJOHf/cRrEULy8OnT/PGf/Akve9lLefd73sPXfs3X8IEPfIB77/0gzjle/zM/w794xcsXwNoAWZvSRk1OdprYU57yZJ7+tKfxrf/bt1GWFWVZ8advewdKaZSKc6xoFkyMVCFOh3AOIQPvec/7YpQTknlR8qY/+mO++5XfSZLEWWStsfXOO0/xwAMP8cH7PtS0AulGa5Gsr69x/I5jfPRjH+O9731fo4lIhIrVmXPnL7B56DhpmqGUxgbbpOIeKQS94QDvHEmWxvMZoni+tb3FV734y7nnC/8Zl69eJUsSPvjBDxGsI5GCi+fP86bz5+OmEPCm3/99vv3l38Ghw9vs7e7F/s8mrbTG0Hru2uATnCfLc07deSejgxEPP/ggyxJDe/6jAB6afscIFixNeFiukLoQA0es2JW03SHeR+d5K1yHEPAi2mdwPl5j4fCiqchK1fnA4FbtrmUmLQC45mc3AUVEW8ftrWxCKEKQtGOPlm0q0GpWEpSIn4OW0YuYagqJFIp+r89w+zAH05LcCtZ0hpER2FAJ3hmunH8Ege9eP06uWFQV0zRtq2W3VPZaBloUccKEVC3DasYlqTYta7RPH2WFaOMIDeMEobLu3HnvOxkAQPh20oyMtU+h8ATKuuqsKFVR4p0hTTXbm+vkecb+aMR8OkGnCXVZIDZW2d7eIABVVTArptR1Se0sobTMpvNGQ83QndYYe697aUbtarwNuKYC7xubhRBpDF4ygrFQGi0kwhOtJ00l1waLx4NftC9+1gLAsj/rytWrHSu6du0aWZaxubHJzd3dLro9+OCD7arpXuPkiZNsb29z+cLF7ndKKf7u/e8HYGdnh2vXrzcLWHL16tVm8XZL8PaXpG1i1Vqxc/gwN2/udoPiWg2t7dOs67gB42ssRge1F/Zzn/k5/Ni//T95xjOeHofi6YSbN29EEAjwhjf8AmmS8ou/8POcPHmCd7/7vXz/a/4lk8mEoiz5v37ydbzyu17BL/3nN7C5scE73vkufvjf/CjOu0Uri2grta5p4m41BIfTjizNqOqSNE2jlcVGu8qxo0cBOHfhPDRCbp4mXRV3fX2Df/Uj/5LnfsEXMBwOu83bGnm9qRcLWEqSJG0mF8QF8ZY/+RM2t7b4oX/9wzzhCU/gwx/+MD/+Iz/KpQsXm8roQpRuN0ULKO3mb0XczurgA0LStam0LVDgu428DCDL7Wjta9IZoG/VPOO1C7eAz7Ims3hM3KIn3bKmGytA65dqPwMsGYI7rbgFGY8i0NjMCN4zHo24eOUG+9Man2RUQTKpDOiEtdUhhw5tL+wYTeBt+yvb87BcdHFNISeCv8LWFUkSAzIh4LrAEZl+NMFG7SgWYhp9NjRaomh+F6L73jbjkJSIlnpPoLYGZMOyg6Cso6bVzxMCkuHKGkqn7I8muP0DamNQSUqS5RyMpxRlzdr6JkHQFQB6vQFSanZv3GRejellOfkgMvrKzCmrOJoqG2iqqaW2rsm0oq/P+EBKCiL6CLXQ4BNQEusdxsVWNKXiuCoceBZg9ijNTGt9q68rBI7s7DTm2MCRnSNUVcX+wX77hGgANYuStGhY1IWLF7j//vu548Rx7jhxguMnT3L0+HFe+g3fQCBw5cqVSEcbentkZ6d52za9XGyobmM1izjNMs488gjb21s4F9ue2kpqK5IuKiY1Uslmcfkm2Er+y2/8Gp/45Kd4zj3P54lPfgY/9uORQc7nM6SSzOcFr/2Jn+TLXvRi7rnni3jyk5/Eq1/1XV017Pz5C7zmB3+YF7zghbzwy76SL3vhl/INL3tJt7mUUt04oKIoO6CVSuKsjWyyioWBRW9bNBBfunwFgFMnT3WtW3VVx9RbCF7zr36AnSNHeNlXfx3P+dzP5Vte9rLFuQsgpepGrVRl1X2ediM55/nVX/plvvklL+NLX/DFaKX5ntd834KPLDnKlxlGO4G3BYO2vzWmoXGDtQ3Hi9E1t4LYMgDd/ng7hHO5CtqCVzvcoJ0gDHHKR6/X6xhiO6Cz/ZtlQG5vy16nZaBrQSUWPqKmG++W0PjOlFIcPXKUEydOcPTYEVZWVqJ4nqUkiaKqCs6cPsOZM2c4e/YsFy9e5MqVK9y8eZP9/f1mncp/9K617go9qmkJbNv8bq8Oqi5gtAzWdftSiDaQLwpmUkrKqmJelnFKDR4XrXQIJRFJyqwuGayvolLN3sE+V65eZV6WpHmOJ7Bz9AhIwayYM51OGY1G1M6S9nJUmpDlCQGDF4YsV6gUinLCaLJPUU5RiUDKAMqiVEClIFXAC4PzVRxp1MhDsqlW18ZQmRpjo0zjPNTOUZl/pADQgkjr8QL4uq/9Wp72tKfR6+W8+lWv4m1vfzvOLVqFIIqR7Ult6fmf/fmfs76+zitf+croMdGau591N89+9rMhwJvf8la+4J57eM6zPx+tFD/4gz+4tLhYrrp2FzFLo7M9+MB9932IT37yU7z+p1/H6uoqWmte9KIv66ZhysbA+On77+eLnv/8zsSolSbRmpWVIbs3d5nN5tx56hTf9m3fSlseV0rxFV/xIp785CcRWzLixhyPx6ytrSGF4Gu++qt4ypOfRFVXcbJC8zg+zoNz1nH82FH+4UPv5ztf8e2Ng7/VSiLITsZN25hz+AYgpBDs7u3yjnf+Ga/7yR/n2LFjCAF33/153HnqJCF4hsMB8/mc6TQOUvyuV78agF6v3zGWqqq4dvUaT3rKk7tNbF2MwM+557k845nPoB11LARMJ9PP6jFcBrRW/O8MznLRlN8JQSFWJYN3sWsheKQArSSqcYu2/9aqMZASGUVwFryL7nYRzZ7Ld0nA1hWmKnGm7p4TJylEzS3KKSHem9UfbHTH26qOPqsQp58ootbljMEZi/AggkA2nqxlMCznBRcuXqAsCuq64uBgj4ODPUwzVcTWNavDAcN+j0EvZ9DLWR0OWF9bZXtzg/XVFapiTl0WmKrE1lV3b48nSRIIt05lbm1NVVV29ob2WizfnXNIHEp4lAhoCUoLdCIRWoAC66OuaFzAugBCoZIMdIIlUFQVZV1jQ2BtY4PV9fVuqKgncOquO8n6PSbzGePZFNvIUfOiYHc3jnNCggsm5obS40IEN5kEHDUqbaqTWiBUQCXRNlKZktpV0ZtoLbbRmGtjKcqaojbsj6bsH0zZH83YPxh/djBzto2IodOs/uQtb+bXfvVXOXv6DEpJvv81r7kVaRr8atdj+9h0MuGrvvp/4ku++It54NOf5tyZM/z0636KPMtAwP3338/3fO/38au//MucfvghLlw4f4tDvTFi3PI21tnuogkh+KZv+RZCgI986IOcOf0Qr/yu76SuTWPglOhE8+P/7rU84+lP4/rVy/z93/1tF5Vf/T3fx7d+6//KI2ce4Bd/8ed5x9vfRSDQ68Wx1KdOnuR3fuc3eeihT/F3f/c+7r33Pv7TL/1K3Lha8Tmf83T++I/+gAvnHuKdb38L//W3f5e3vOVPu43tnCNJU+666052dg7Hi2NjNVNKiQDKMo5HjsKtb8YoBYILvPJV38/Dp8/wrre/mXOPPMCP/ui/iZ49BD/3Mz/L1tYW7//IfbzxrW/hvns/ANC4v9vR4p6f+anX8e0v/w7+8q/fx5ve+mYSHacwbGxs8u9f/3o+8A8f5i/e+x729/f5+f/wH+G2M347M172nLWG3O65TZuJlIsJwMsbcvneBr/Wy9SeD+AW1tS+Z5s6tuNqgG6wZ1mWtF6+JIlucVrLg19oVy0jvJ35LY6LplIdf6dVO11lMeHYhxh40iwh72VxnLcW5L04w19K0a3RsozN2pPJhMl4HH9OJrcazsOt63v5M3ajlxotra5riiKaa4O/Pb2mO7fWxqmzrelW66Xz3mQGsd1JxHHwAtI8Q6oEYxyb21uUdcXNvV2yXs7K2iq1NUit2N7epqij9tXOJOs3M/3rusY4i9KS1mfmfE3AobTovucgShCKJI1FoGjoFkjd9vA2qbk1CxnDgbWe2vgIalVNWTuqxfJD3H4yDx+/I0wODgghMoR3vfOdvOc97+F1P/3TTVXRo5v0zYfQjANu2pCg80jdvijbVCoaNT1CxsUSzYAL4GzFykWqAK2/S4hYam5fczERw3dVuhBoxqdkXdtIVRUdi2yHQ7bjdkOIPXM6iVM3y6JkuDKgKqvORS+kYH19nbIs48x3Y1hf34ijcZRma2uLvf19XMNAWmY3L+bkWc7a6hqz+az7HoT2s0RQiNM5kzShrmoIorN1GGuRSjAYDsF55vMZK70eqVaI0LbJRJ9TEG1FTpGlaWSSUnQRPH7xzJDg45x5U8eUtWMdYWFjEY1Z+vjhHb702c/tgOT2VpRlVtbpZs2gzhbM26DT9u+1G7Wdiddew5bpqabnUiwxotvTxNvF+2WNrO0+oTFHLn/eZfBYTuuiGNa8ZqvbNTYHJWMl0wdBV4PUGXVQ7M9KruyPmBQGrxOC1HhX403ZecduvwONf42u06M1j7Zg7V2jpzVfjKIaDdGauCbTVC8sPN4TnGnALLYrzebTKMWkadxvdNSEQMDUseAEsZiW6AyVJlRlyWQ6ZefQFvP5lL29Pba2tuJYoPmMzc1NlFJcv34dWPgfW822ts3gUme6IJYk7ZzB5twL30yP6QPELzAJgZWVAUJIiqKg1xviTE2WKnqZ4tDmkLs/9xkc7F7m1PEdenmK8KY5HsdXvvRVAj7LCCDR0KzWIBv/iNh/6ZtJCQuXbKOxyCWhvl1Qj761kV2wmA5AYy5td3n7nOYvaE2u0E6ucLdoI/FCKubzovmuAUevF1348YsaZt3ftTpaCB4ldSztW4uQMk7DbQ2uIl545xwixBK/1vHLMZJ+Gv04KrZDlVXJ1tYme3v7lGWBEPG1tIrfyFQ07KvdzG26297mxZyNbCOmEs4jpUYqRWUqgoujjFOt8S6CUqJkk4r5CH5N90WUOH2nX/o6urizNMV5H2eN9WMaaowB77vzGho2cntq37GmJVbWppvL4NAabj2Q6AVItXPe21SpjbrL79H9P3G8jXoUOC0CYwuovV6v+yKNtkG5tYgopVt8uuV1bgfH9lja49NJgguBytrGjtGwtjZbdWCBsqw5mBbszyqmtaG0gboUCJ2ipKAuythO1OiJy7pg21C9fFtmVwDO+k6HFEIQGqP5orBVd58r7pu2iKK7hu5YRY7rqNVsWstVXdeoJDaBW+tIs5TBsIcWkvlswoUL5zh27Ah5njObTVhdXefIzmHyLOdT938KHGxsrJFIhTEVpqxic7nSHD26w3Q0RjZAGVxonD6C4EIc/V57lIjQU5dxrQ77A5JUI0JsA2vwuTHfpiRJipQaSFAyjeZeRKcLwmfymREbqVttf7HoFv82zZdraK1x1pKkKf08p5jPonAXbk0Pu7I3iw/YVqxg8aUgSkf/jHOOH/qBH+AHf+A1t36ApRT2n7/8FbztbW/vHjJNU3brum43jGootRCim8XUHqkPHoEkaQbgzWezWzZsO71DCklV1dS1iRM5lzZx3JwWIZqvMXMOY+N00I2NDfb395shh9zSs2dtOwpYI5yjbvo3oxE1LnglI0Ooq5o8zVhZWWE2HiGyjDTRDSMOXXuab76xxjd2h1Ywb2+2SV1Cw4S7boYWMGilghjM2laf7otiGg0y/vs2dgN4H/sYvTF4QvyGpDQGEVuV4CLACe+QIWpYxlmkUmgRbVo+WMJS6rAMZu35DiF0k0qVUiSpQqq0syG0wa89/ltTShomr5ZAzTeZSGRNSbNetFRoGSdJOATSQSIk/SSldoFxUSGJAxeUlHE4Z5KwMTiEFLcyxu4aWNvYXJpbYyNJ5NLnkTJOPRHxuJM0je1H1jXfu1HG8UPEwQiEsAC+4Dt3fVch1RpBLH4571hZHXRrcW1lSK/Xa6bMGrJEQ0iZTSdoFbsjnK2pq4L5ZEKWaObVHGcNvV4fQWwjS5I43zAYy8HuQccMhZA447o1lMiMpJ+BV/GrEZMcEiAobO0JLjA+GKEThagduRYooaiLmqc+5RkM+yl1UWBdSXDuFs70aGamWpIf4egrXvzibvO37D1N0kXlMvbUxO+JLOa3v9zyquzSUR88shmSGBqrQiAucogN0P/hZ3+W//izP0dsz1G3tPaE28CyKfB14ND2UwIURUk7EG8ZqCLSq+aixxJx/IIE2eksbUN53KgtTY4L3hpDr9/D/T+MvXm8pVlZ3/tda73D3vsMdWqeuqu7qpueaJV7jQEaJTESo2AjComE5BrE3GsTBBnUi/lo7jVEBQFphsSIE7nXGLEVBEG4N04YmlY0CUhDT/RU3V3dp6ZT55x99n6HNdw/nrXe992nCm9eOF1VZ+/97vdd71rPep7f83t+j/Nsb29JW724u8/mM7KsVwuYzWZx8dCFFGVZkBRDsjzHedkhpV60iTw+wRC2p9uSkYp1mgDVvEKr0H0PiGaW2PuhRx1SwrmjeKSjM2bDxwTdsy+j57PbO+rHfTF8EqPgCEF3yYEhrjOkWwxD1fQAe4rH4ncOv29ICRnWiopOncX7dLNc9vndx2IEIHfvvWxOIUCDeJW5zoXQjMYinvt0OhXYobFizgygPD5YZvNWQuVdnmD6M2WZOyMbYYWEM7auZSFMJnRzJdFLtNYg7ITuPvsMsYSbhOgw+IECBwrbtN19N8xRqc1dVeFcQ6ZFJURpSWTZtqKppONUsC15ptAqoL2l0ApdSEItx0PTUpqcTOfkKpOSO63xmMglk2uxc9lUS11IdLMl0dOkHLG8dxkQ3tmkyBmXY8bjJVaX9rCyPGajOR/v2eHD3yABNF5eZmd7ShuVQuUpyH+Sd5YKRtOO5qIEjB+8X6GG4NDQqeofrJJrjgyQgfqGkEdRxHq73gvoMR5ARYmgDpyW8K5tRTctdHymlKruJ2/CcSCWfMSdQ2giyZCJ0KS1FqNFu11kenKWYgu9ZCST7EvHFWotm5c2WVld6TyiLLKdTQrlE1Md6QnYNq0oizpHVUfNMSW413R7SnCOPSsr5Fkm3KHYTSokN1oRxy2N/cA7lpvG2rbTCFMMFn6XTRRbOClKbj11ik6NIUAIyTj5gccX2/QRDX7crBx+wTgppXAk+SXBFtPcEGOmUUSt+mG0O3hmATow3lobdcVEFDGEgG2aGKan0EqRmPKCD6apE1AkIYNUWSJjp5SmMDnOA96jg+6eqdbS87OyjYDrWpEZMXAO8NZKPaoL2LBILRl6sQKFiBfpUkeOEDO6OhGCpRxPzhHVT2zbeZzdOogRy8JGo6UeOiBryjgvWmpBRsT5WHwewAdHbUXmyrsW5URlV4UM5T3at/jG4pSXBtka6uAI1YzWNtHb0hgPrrK0LpATv1M5sqgK7WhpGklkiKcpmOkoz6m9Z1pNMd5RTla5eHadPDcsjcboosC3nrPPnGN26RIqePne0MZxavm275bbviJmtv/wIWbTnV7JYhBS2NaytLwEm9J4IwGX+w8fgixJYcf9fWEXJi7kLBapjihLwbemOzsSrua5KBR437nNbdNgsozlpSW0EZVOZyWUc1GDy6SsWJZzYP9BNi9tkhcFGxsXIjh/gcl4LPVzbUtmDKPxEkePHKOqakAx3Z7hXGQnW5GRyfOceVUxne6wvLzC8ePH2YmNdK8+cYKnnjrDmTNPc+zYcaqmom1abGPJszzW7U05eepm5vM5lzY3Os+sKAq2t6aieBtbsYFiNBpx1bFj7Mx3OH/xIirT0lS4rcmUot6ZcvTwEa676jilVtSzKbat8AhG0lobS392J6kjqK9E4TZlG5OhSbhYIr4WecbVhw+zPB4vhKm7PbH0uyEJNsQQNK213V5WMnBEA9LhVvQb3JAblj6T/hwu5oSdJrA5Gc0+udNvXFov/ju+obsOpXpstih6Y9bhhkaDNhhtWC0KWg9tMPg6UjKc9Ct1zpMpc7mBid+R8MaUHFnQl3PScxOvumglEYGd6xsJgScMMD0G4+UIKCUM/+BTzWdABxdFTuX8QqyNNbfWieFWilJB1dQo46VxcnCoANobirygzEdoHHiLq1uM0hRlKRuKEwzOWC1F8UpkzwtjhOnfQhUsmdegITcBrQPagM002JZqOkUpi84z8kwzLkdoFNsb25yfb1PtTFlZXcIYjdFSZZOOK7aa08awtLqykHpPKfQsy9jZ2WHfwYOAYACTyQSVZVxz6mTstWcR7zbparluA84zIf+lPpltayk2N/uGoCrrCr21FuOYMnHpgdd1TVVVl4UZShny0RIrazkHDx1gaXUP+/fvA2NE7dZaNjc3WV5eZnt7zvETJ9m4uMnXfd3X88jDj3Pu/FluueFGnnnmKc6dOyfFuFq0yo3JmSzt4+T1z+ahhx5isnKAA4cN5XgvR44cZWPzEqOiBA/bmxJ2OtcynW6ztvcgN9/89Vy4cJYLFy6wb98BZrOKrc3tGO7mZKagbWucz9i3f429B6/COnkMhgAAIABJREFUG83mjjSDXRmNyIOndI7D+w9zbN9eQjvH1js0tiIrRMFWB4UKSSppYM4GYVzCuFLIXhRFJ/UyZOQn43B5OHa5Ydi9cIcGbvi73RhS+mz3WgjiXQ2+Y+G8IXQd01X00BI2JHM06119ks8onEk/vOb4ns5wRuPjg0jE+0BSQpJF6gPWW1SWYUajuMELVy6pwOrQ1Q5ICOSH3pkYIfEq+36tWvc4oPMWpQxlOcZElRnnnFC1BkmYrns5ffjaPd+gkCsCsBJqOim7NzokH1pgFZxQOLztvkNrjcp1DDUVJhjRhgPGRpRLJqur1HNJuhhtKHRf4F5ohQ0BZRRloSjHhnFR4JXH5lC1Wrh0ZUFuDI1r0XnO8mQPO1vbXNy8wIHjR3BBVEls02Br4afpVKheeSiU9FEYYPFXNGbOWqabW52USLcrxnrEupJOSm3bgoJ2Puf8M8+wurLCLAre+RTD+zTJ08DrLsSTjBexv6HrXk8LTmgc8uA28nwQNg6ln+ndgADPPPk4o9GYi+eeZDQecfbM4/jguXjuGYpCGkXsbF5kezrjC15A/bba4tKlTba2Ntk8/2QkJooCK8mTCPDMGcNDD0nZ0WOPfhmtBKN76vT9rKyuSpfnVmRhilIA0AsXzmMyxZOn97G6Z5WzZ9d5+qlH2LdP+olubW1H6SBLlokAXVEWWOdobEtjW6xruRATBZM8Z+PCUxxeW+O5t34dS6OS1lU0dS29O5WGoLs4bTdeldL1va58b7iuZDyGxmeY+buSgevfv/j53ecdnnORsiAL1FzB4A2vIUTAe0gLSQKASmnqqpc6SnNj6O3tToz01yI8MsG+Bhr6KRwPkglsqpqt7R2mOzPm1mMTupK8OAUos3DvQ48y/X33fSVV5aIoIgQiyScVArY1sQN63xtWoIJFY6aCGDRZFzlSESBF3ShEB8wHNCLxrUmRFxgNOlNMsiVUbILsnCE46fJUxLA6yzKMs+RBcOaMAEEUYUdZDgWxmVHBaFQO5lzRedOTpVHnrOhIQdmYZPhQc/H8eTzSinJSlIRxbNzdepwLzLa3hQicieJJOi4zZrZtubh+dgG47YUSI9CcZTR1TdtGekQ0WBvtxe79goElLan4WaU6XKxOgGfMyHnnIt9MvqNv6uC6CSgTTgDQTlfNxyLVQIddeddSzTXVPO8aCocQaIsc21rqucdZy4Xz64Di0Uck+9nUNRdjprbLhHm/MPnsJReVaofAN7SNkDfb1pKbDL8lWmmEQDW3zHa22D/fx2w2p20bQrC0rWU222K2s4X3oau9m88VVV1ho5YYIVCFqHwwKmnqHc5fWKdtK77jtud34VnwPgonq4E+WX+dQ24W0FUKJA8ted9DImxaiLv/vRvUHr6W2Pwh4VQqGS2iR9DrwgUfKx9UVM4womA73ADDwDD38EUqnQvoSNZNWdb+vYvXuvt+0ns6wxIkC2y0BieYkgJUDMsznaGdZ2e6TdvWNE1N6zxtENwsaEPQCp1U+ZXu7l0egJwwi1n3EEReSYXYcclolM4WjH8IocMh02c6w0jCR/v3Og9BaYjge5Qg6fC34AO5zoSnqCUkTgRzI9ktslEZax41kEVP35OpjMwogg2MMkOppL+oazzWNRg0ZSGCl3muGY0y8lxhjHjOuQGlDGppEkv9NEpJQqttW7wtQB/iq0+uY5EQc3VpmZXlZXSAuZVKglFZ4r3FtZbhTL28oclsB+8dGoVXCdeQ/4SInU2WlqJGewK8FZnJpWu11uLE+j5PNoztO7g6DDg+JEOZmu72mR1x1ftJ2YUUERexwRJXsGQCI+UjyzOaiOe1bUOR5yhk93fOibx0lAmazWaEgHQYj9fpfCr+TSJzdCTcqq4YjcZoJSq5TVPHXoc5o1I8NwFwdQ/QB9jc3IpGWlNVFXlekA0UZ513tLbt7l0PDLpC5FLaRgiaeZ7xyJOn2dq6FZNLh/CmaWirFq0cucnRmRiGxAtNC955Mbh5UeCdp25Ebz/PY5G3ChErTQsyUTnEOHVrPxqnFNFqhWS5dC+ouXgkA9NNhf4V7wkdnhbY7c2lvw/7Sw4pF/Lv2E9S5x0R2wMmCmyiUqpEEZLXpBAlEID4pw89Q18pRaEKTK4xGRR5xiSUzNqasta0vsW5FufAK0NwhloJ0XZ43cNMZEryJPysy9JqDVrF5EZLSBCN99ioIdd5f9GLTVFD73wQSb+ylsS9jFUbIWAAU+RoJ7OzNIrCmFhSFqKShmRJFZBlOZkWTlqmMkymqOfSzSzTsYXdvGE2c+S5YVzkNO0MowrJ/Qb61n6xAkEBTTUnuJzxeIRznvl0E9fUrC5NuPbqq9iaVjI3K5HxLozB48nKgjwfU9dz6vnOwsZ7eZjppGlESLjCQupeiridlXKhzBgBPq2wkBPHyUcN/jDY2VIGLZiEYgyykginSZGxceE83/ni7+Lzf/l5MRhaUdfNwi4qLH7NL7z73bzi5d/LeDzm3/zMz/IL77lTClOdZ14JJUMjlQxp4o/KMiYRPL/4ix/AWcfrXv/GSLQUzMZGgmtZlN1E053RFgNz5NAh7rnnzzBRqfbIkWvEwEeeXSp2dzF80LHXptaGprV434DSsdGIxnuLib0JXNtnMsXjJJY6ClYUYtZQnGIvXcXrSp5PptA+GpkuYyg1jsroTlhSK0RL3zlMfK7BWbTJyI0RqkHKkgaZ+CbP4j+9EKgHrfzE6xPJ6J5TuDh3uues+nPr2Nkd0oaVlCuiIUuepFKdcWpSBlDRQwzpW6IEtvcJM0vevviE1lmatomUIo/3LcZH/CWXzcc6C0aRl0n8z7M52+DCxYtsbm/TtI6gNLOqZjqruLCxRZaXHD12HEdJ7cW7UdB59snjbduW6UyMLtpg8r6ZTIjVD/hArg3leEJVV+zsbGO0Ymk8QRrGtCKaGWsunTNdPwFBwxqCk6jJKAHKic5JC2AthkDuA3NrqcIc7T0iJxUi2degdUYdHI2SXpdNUHhnWZosM5vP2WmnFEVGWY5ZWVvGWk9tawieebVD01aRa9ZjtXkuyTHpiG7Z2HgaY3KWlyeMx0tM5w37Vib41lPNmwg5BLbmUgrWtjWZMSyvLoGCjWlfm3kFCaAQ/z/APdK/AthW2r1pI6S3pGsWOj6W7T6lY4gBA+8u/Ur17r6kiEO3MMSbsiyvRJJl9GQER4vQpXP86I/9OG/4kTfyB5/4/Qh+BrK8794cv7RbSh2W5xzOd182wDakq5InXo8TADWEGGpowWW895w7d46TJ2/g+c9/Hh/5yIcFXHZR3FAPGq2g8D65IvKnXJ/HRo2vFIp476IgpgKteOG3fAs/+zM/zdVXXcV9993HG97wZr56//2XPS75Q+7XkIr+6cI9rUT4TgNSKpzMAtHTUt3fh2J3C/Mh4jHyLHvoof9sIkaHy/C4HidazFJ282vhfXSvJ1zoax5XeM15H9sZKpSSJjQhXqfRRvT6vGJpsoTSgdlsSxIvoQU81nm0LilGY2bziqcefpIzZ86AChw+coTVPatsbUtx/2RyhMwUnD+/wbnzF9mZXuD8VsPqkZMRC+r5dkDnje3Gy9L889EB0AjcY7TuBCoh6ZoNPdZFGIHkfceOVSjxshEzhQlS/1s3DWUW56S3+GYGto3qrRrb1ISyJM/TMw7Y4CN/s2G6vd33UohVDommhPLgpG1eMl4pukgYZ9u2rKysdJp7qcQQoGosa3sPMd2a4ggxc6koyxxrS0wuJVB6HjmgfxNmFjpvTPVgxUJIIBctzOOBh0Xsqxj91C5j1D2x5CH5XZNwSICNDy3IjVfzitXVVba3t8nyLPbX1F1XIWPixFBgUjFwzMqEaLSSSoVgNLG2kwHo7aWWEKXxbcuoHBMyF4u/xTiJJ6IpyqIPGWABN0uehNKq0xxLmEkquRCRwsi58oG2abuWdUOsUSnFwQMH+Q+//kF+6v98G3fd9bu8+Y2v5zd+40Pc9twXLIxdmtjKp3FcZJ1HZ3PRiAQJJzqD22FUixnHrwXY9+TMRTC7L3PqjVl67UrvHS7EYSXBMES90jX8jUfEmIzOYjIEgpNm1loracAS+WJtI/0aMmOYTEoCHh/A5AVPP32Gc+fPgVbcdNMpVtdWuiL+w4dvpXWe+bzBB8XScsnRYwdoLTxyZoMzl2zvUQ7uY5hRXgDtB8YsyfVIFYsYsG4+p6JsY0B5QkibsJCV0zJVKq0o1anxSiwauvFRSmSGRNba4W0rMIyCeQzfksZ/usZU/J4kl1K42xky6MsFB/Nw99xK7y8KketO7ARrhYB8/vx55pUlzw2ulR4KJvbZyKwYPZNJhUZRrHZjfEUJoJAmBfDpT32K97/vA9z92c+y/swz/M5dv8O+/fskXInA6Jfv/RKv/+Ef5rc//GGePvM0D97/ADfdKLIzJ0+e5K677uKxRx7hr7/4Re6444e6G/Te809e9Sr++otf5PTpx3nzm98cJ3a00lENQWsd1V1110txAcwOgrPpiDf4IIz+W599C3/wid/nsUcf4cknnuBjv/dRTp68FhPVELz3LK+s8JHf+S0ef/g+/uCTv8d1p052qg6rKyu8613v4Itf/Cu+/OUv8Pa3/wwoOiWDMJiw4jHGxEfoAjQOHz7MPfd8hh98zaul1CmJJga6cySwvjf+ipe8+Ds5e/Yc//E3f4umqXnPe97H4cOHeN7zn0d/dhZXPr3iQxrjKwHhu38/NCTDZzPEenb/dFc6+LxM7l1h3+AzQyP4tY8rv3alzOkVP61EwinLirjbC+VBMuYNILym6c42Ozs75FnOZCJFz9PplDNnnuD++/+are0LrK0tcfKao1x7zVGOHtnPgf2rjCc5lzYvUM+3MNqyslJw4MAqBw+tsWdtwmicLWTYhmTvRCNK/LJhofyQCCub26IaRlHkfRUJ7srPQ/V+guqSLrHEK5Yyee9iCNmrlGitBb6IXLaUTR1ed3q+iZ843JiSQkhKJKXnNcQ1d8+BWeydsLsyBx8EZomNYoJ1BOfIFBS5oSwyDuzfSxmzvUXZ+2OXGbPOze/+A9/7Pd/DD91xBydPncIHz7t+/p0LqXyAO+64gw/8u3/HiWuu4bu/52Vsbm4xWZrwyU98gi984QvccOONfO/LX8Gb3/QmXvSibyOEwLNvuYX3vOcX+KEf+iFuuOFGTpw40e1+TduQZRmbm5td+UcKz9KC6KRUUkchL9kxccsVRVny73/pg9x869dx87OfzcalS7z//e/rEegAL/7Of8A7330nN9zyHB544EHe+95f6GpFP/CB93Li6qt54Qv/Hn/7b9/Gtddcw4//6Ju6EK7Hf5CBD6ELl2NUTZ5n3PCs60VjLQTa1i7szAuTcRBm33jDs/jKffdz6tS1fOLjH+XYsWM89thj3HjTDbseWNTwGjy7dOw2POnc6bUr0RPSa1c6z/+YQbncmCWu4deSvVm81isbzStd15W/XWFiLwEdotSyj92vg0cFR7AWnGNU5CyNR7RVxenHHuehBx7gzJmnCN6yd22Zo0cOsLI6pqqmbFxYZ3vrIjvTi5xdf4LNrQvU9Q7e1uhgMVp0xFRsWnKlsqzdWeWhMUsKGkJhsLioPOFS/1DiJrCwITlC6JsNp/vXEjbE+w1RICJ6ZCGQaS3d4RvRT9NaR9jI0lRz9u/fz969e9mzZw8rKyssLy+zsrLC2toa+/bt68LHJBg5NHrGSEa+yHJyk0mf1bxgXI4YlyMmozHjcgQhMN+ZUc3mEKRloYk0lFwbCi0ORWqniIryVllOZjR1NePSxjm2Lm10z/7yQvMFaybHxz72Me677z5CCLz//R/g9z/+ccKrf6BrowZw11138ad/8ieEEHjwgQdQSvM9L3sZeZ7ztre9DYD77r+Pj3zko7z8e1/Of/7DP+L2l97Of/nsZ7n7c/eQZRl33nkn//wHXyPupBHSoPdJRiY2CdE9ryjEjujDyTJcpP/tv/1XPm9F26ooSj7823fx67/6K50cidKauz93D3fffQ9KG3711/8vPvuZP+TgwQO0TcN3fddLuO3538KlSxsoFL/yK7/C29720/zMz76dDjdKk5XYrb3DCQVpfOKJp9h38HjK+kcXPMQ2Z7Jz9uG1sK4Jgclkia3tLdZW93DjDc9iz+oy21tTlpeWFsyFUopOLifGlB0vij68Se8dUlw6Jv7/Twi3m96wmzuWzpPGf3dYOvwzhSNXMlZ9iHv554ff+zddbwiizKB17G/KkN+VMrGKImppXbx4kQsXz7O1dYlROeLksRNcde0xLm1usLOzhZ4rxpMxRSlKI4rA1VcfxznPdDrnXFWhdcbKyh5W9yyzb1/NI+vrOFw3LkOu5tALS2OQ8KL++lusVQNPNnThm1KLmd7dXvYQEkqjJNMxxHKtSHtC1GAyY8jzDOUNVimsk8x/yqQPvbAQJDmQJH+Sl5aMWJZlLE9G5JmhqsQDFY25XjkkGe6kQ6e1pixKlpeWqeua+bymqQNkkvXHu5hcHJNpKURLdBxUwPmeU3iF7kx6OB4oFOvr63FDUF0PgP3797Fx6VL3rgcffDCWFkj2SOO5+uqrOXDgAE89eXkPAKXgyOEjnF0XbaTgPWeeFqno1rZRBtlRjkp2ZjtkJiNElQTvfScGJ89vkZyYJs/Ja6/hbf/6X/O3vvEbWVpaxmSm6w6edNfW18/KQ3OWs+vrABw4sL+bYJ/69CcWFpPoTdlIcByA1/GcbjjBVEqkSIZNwuHItVOptAdAspTdrktgZ2fKsWPH+Pxf/hWnnnULKnhWVpeZ7cy65IlSIXqI4oF0mlUdftZfd1o8ib83LAHare4wNFYLc2MXvgOLIamoVrgFvKgfn3j4IUeMzkPuxAa1dOb6Wp7Z/4jhlfyGJyBdrmQDjKFPpPY417C5eYmNjQs4Zzl04CCHDx+gmBRcOH8Ra0WlYt7WzOYzxuMRy6sikT3fmRNULLzGELxnPpuztT3j3NkL0ojERb4Q/rJxLops4BX7bjwk+TIsyHeRTC5jkeWGTKW6aCnoT95sGuO0tWoYLmKSGo4BnLPSsi3TFEZTRqJ1qTXOF2xvbXZh7RDT9F7k0JPog41zwTYVrq3BWyoNoRB+57Ad4fD5Jekm733HA034G0CuDTpqtmUR98S2tMrTNBXFOEoW5Sp6rXJcQQIo3X0/kQ4fOdIN1aFDh6QHwMYGw/GysR9kwil8CDz+xGnuv/9+nvf858eHQ0wEiCVfP7vOc5/7XECyUIf27yed1MZaTdvKonO4zpBZa4WrEyVvUuYo4Wvimnvee+edrK+f5Ru/6blsbm3xXS95CXd9+D/FGjUJWQ8dOiKJC+dYXl4C4Oz6M52E9XO+4Tlsb213WIfoQ0liQLhqUec+diYKMb5MuNhwssXVJg8yZfYGO7ZMepl4Dz74EC960beRsoCjYsy1117LAw88IBMzPqdkUPtFvuiZLTzbAQjf8dcGO+bu9+7+3PB7hp5FZ8i8xy900B5e3+Uk3tTsJdUgduH35Zf+P3wIvmq6JswyED4mXyxNU0f9OpjNdrDWsW/fXo4fP4bWinPnznPu4jrj8YjJZMzS0h6yTDCrPCsgpEa7gaYR/C3PM7Qq8K7FWYkq+gL13jCnzTD1K9jtpaYf8XTSeEeepZY6R6U0bVtDUp0Ji1CBUqprERmXW2fMtOwaTKc7jPNMGgFrcN6igyMzkJmMrek23pedQks62ralaZquq/mVcFeBXvrJNwy30/vn83kHHSWdPxiKVWpyWWCYTKECUXm2YVZvk9uSLFexNrzun/2VJkSXC4kT97tf+lJuvOlGJpMJb3jD6/n9T3yi3yXTDSWvYID/fPpTn2ZtbY3Xve51jMZjsjzjm/7WN/G85z0PYwyf/OQf8ILbbuMFt91Gnmf86FtEv2xUivKqi7VtqRlJwl6Sbn0WuWFfue8+nv/853XurI6dqpeXl9na2qJuao4dO8YP/4vXyqUNFs43v+A2nv/cb6LIDXf8bz/In9/z5zz6yMM89dQTfPITn+Ad73g7e9f20DYVV119nG9/0bdJMXWcGI88/FWcc3zzN9+GIpAZ1XUaD95x5PAh/urzn+WH/tfXRHA8jVu/UwlYauOYy+B98g8+xeFDB3nVP/4+8izjjW98A2fPnuPzf/4XA6PgpZYwuKir1e/+Q+OQDNWQMpEm39AQDY3N0OglMDftsincGDbaGNbuZpHFrzvIIvVEEEOnVeQd7vq9iVFBwta6xbmwWPqmH0mMIC2W9FrTWnZmc1zw5EWB9ZbpbBtlFEsrS1zYOM9X7v8yG5sXOXbVUfbt38eTZ87wxS/dyzPPnGNUrKL1hBBKNCPybILJJihGKFVSjlYZlSuMRisEn+OtYTaznL+wTRt16ooioygyskxTlkIOzXODc20sl6uR9nvS/1HCRxczwX1DnHKUYzLZsHd2Zsznc0LwEVeTOaPTohewDKMj8ddbrLekhJGQcC1FljMqcrRStFVNNZ/FSEKe9bgcoRVU8xnT7S3apiYzWhozF7noFto2RkWBzGiKPHZ6inptuTbdj0F1vRY0MMoLyiynMBkG1fVmSK9lpteim8/nNHXdYXHGGKbTKRubl9jYvMTOQHbs8h4AzsWdvzdUH/v4x/iVD/4yX33oITKT8ZYffcvCRO+8AYgLTBbEdDrlu26/nb/zwhdy/1e+wunHHuMdb397bJcV+MpXvsKb3/wWPvjBX+KRh7/K6dOPx11Tvj/4SAPppLQXU/hpor/nzvditOHxRx/mqdOPk9ySt/7ET/CC227jyccf4/c/9lH+6I//pN8tnIUAn/rUp3nTG9/Aw1+9n1tuuYUfecPr2bd3D3vX9vAv3/q/U9cVf/pnn+GxJ57gtz78W5y45oRkgxDJlLPrT/Mv3/pWPvjLH+TRxx7hTT/yejFYQdLsZZFz6uRJ9q6tScVEDAbFcxsUOtNjRSEEzp87zz979T/n9a97LQ8/dB/f+nf/Dv/LP/n+aJDiBhIg9TYdRBQLx+4SpiEovRsL+1pYzJWOKwH6CtFUS9mwZNiuhBMNw9NFb03FqgV92bUNjeqwgUfnFUYKRlaU7Nm7htaanWpGOSpYXVtl/ewz/Pnn7+H8xfMcOXaM6667jqXlZbKiYG3fPvbu24fJSpQuyLIxeT4mL5bIixXyYomiWCbLJrhWY63B+wxUgdYlkBN8hnOKqm46uoF0epfSp6Tpnza0IbWhx8fic9EKbVh4j3Nt1wu1HxcJU9PzSPXUuxMtEmaCNtHopU0iev4qSOOZEHzs5XC5x74bJ1UkDcHUZV11cyx9dvccS1l8YEFMYj6fxyoUcUjaVpptG6NwWC5tbrCxeYnGeukipY38afrg8vIeAEcPhtnOXL7XBz79qU/zp5/5U975znd15RTSu1JUUBOjXGnV1WIONuTuNeG8JDZ8XyC8MKkRN7fDjobAJkPtMlCDECXVe0IsFE6jhsTnRWTyt00Tw4/IiVG6I/ZmxpBnhqXJiMzoDlMLQYiGzor0kPS+FM6YdZY2lpkIXqhQWRbNlerxGhQupsg7o6PihtE957SoF567TDSthEfmHLnRjIuSPNPoEPgn3/4ixuOcNrQxnDDSXHXQK2GoqtqXiS0akt3h4zCs3B0CDb2h4WILzovGV+znOAwxfKrXHRgteV7x+5HyLWJTac/l4dfw2H1dsngMZTHGZDnb0y0IPso47XD27DoXL54nBMfBgwc5fPgwJsvY2pS62LIc4V2gqluK0RLjyYSlyZhiVEgYmcXw1Qem0x0aZ2ka4SwWWYF1cPbcBk9e2GQ7GBr0ZZtvGothy7hu/g9wzuCgLEqKsgBCZwhTFy1h1RPnTtoYepUOpbNOgVYpRZ4J5070Ix3Btoxzw0gp8C0mWAoFChFPFKzMDXT8c/LYuLhpGpqm6fpsmJioS167D4E8wkHpkDJDIlnd0zYNq6urGGPY2tpiPp8zGo1YW1tjVE6Y11Ie6IGltVWysmBWz6m9JS8zsiLDq95z/+Cv/ZaCK2YzB85aZ5RkMqZSIonTRd1gAYNRqTFJgJBi5xCrahYnZG/lB18eoIouZQpblIqF6D7s+kwfMqkEDJAoCv1uZ61DqVZwuKg+m5m4+LzFmIzlyRJFkUMQSZYQnAjt+RDDxdBNDmsbRKhPU5qc3GjaTMq6AtDaSCgW3zLaVBW7NEfNsLSoNV0mMo1DUiz3HdY2wJBCMpNSnBySAYwgstz95TvpggqJvwLGsgvL2h1u7gbdhzWFKeRMwHsIARdcJz7QnTdc7jUOpli3wXnAOj8Uy+2uafffhyEyEAnJGb5tuXhxgyw3aAOPn36M9fWnuer4UW686dkopdja2o6djmSDnVUVZTlhNJlgcqkAKMZLlKPYT0ArnFI4HG3wWNdj/K0P1HXLznwuWmCjFSnaHtxX+vsQKknjvNC2Twku5kOvRLsY9kePJ2JTyVtNvLQQAlotev2yWQy88dhfNigwcW6GIHwu7x1ZpntHJG06u8QH0rjnear8oMNME6M/bXpDbDbVmw4b2qREQ+ptoLXB5LIeXWzCnBU5de2YzWu0dd38GM7my4xZlvVdqoeS0QQ6fKRt245P1bnFEbgNpP+kqbpoxHaHD90RP5jHFlg/+pY38eY3veny98Uj9QDoPYg0yQf0g3jtyYj5EHWwiDyxLGNcloxKUcO0rUilBGJtKdEoIwXUaDpi4e4jN1LiJSVJSBKAgLexfEj1D727XY9MrG6sOqdUuEEk70R151t4E8S+lLp/PXgIizI9uzeR3UZqt+fVPfNdz+tKXlIyKELCVF3J2PAJD3G7K21ouzE8vsZnh58bYnh9yBvY3Jpivaexlu2Lm2xuXsR5y/Grr+Lqa45hCsPFjQ02Lm2QZQV71/YRgmI2mwngXIrXFEyQ1WGUhDMqhmZaYQojheoafNA4L4KGVTOnsTXelrTrA1SYAAAgAElEQVShT0gteK+D8R8a427BG8i00I6apuqehVJKKBTxuRkji14+b0k4m6zDMJhHCGFWBYIKaHRMfklJX6ZBBYm28L7XcBt468BlhnUYUSVjPfx9z5lzC+VK6ZwpytNaR4ZBTDC0LcV4RF7muLZhVs0g00z2rGC1YuPcWXydFFcWs+6XGbOOz4I8xBe/5MUxs1hIRicvmM/n1FGiuJvcSvWLLwVau3dXvSgauDBx+7wyBHj3L7yHO9/7/oUBSg92cUGJUU0ZPLkWFiRwXIrvY7jqrCU3huXJmPF4hAoB1zSCP0VvUsFCl6DkGSolTT2Sh+oHEkRSOC5lIqLeEaQjtnMQknKDGvhaYWHhq/R6kMnY39eA6pA+rUQ2Jo/F6d57sdDeQ1gE76+EfV3JaKQ/d2NnV/rc7hIo4vheyYjtPvfwWS5cS3xuqffXbmwtfSbhZamuL/201lI1lllVUzU129ub5Lnm2pPXcOLEMXxwnH7ySabTqeB5GrZnW3gHBE1WKjxSrB+Ux2NRJicvMtnICAQXKMYlJvcYK7Wc86qh9jUuWNkUh943V/Z+d9/7cMMQj0YSAkOowBgZS+sadEjJrh4O6I1+MkRxE4zXkmkFOiZ6rDAEvNJkEKGayJXctbmlzWMIM0CPm/ZNglhIrimlFtR0kwc/ZCUkFZT079ZaglYUEykFrOYVKs8YswJGY12gqhs88dwD1+wyYzaajCiKvOtPGQidVXLOY0yMy6FbcPEpDDwyCY+GnlnnuYUQFTl2c4YkTEnSwUopsqy/8d2Tejcwma41BMTI6t5jE3kZugyaDGwsEVEx/PRWDAO+x7/iZAk+dAZE8A3JQAkNQEVMzcXMYhSu1jpm9DRai6icDx3rrBsl4u+IYZrq1nW07mEwmrHWMx3feMONQrL1vp+0XsIMwev6cHs4TsNxGx67F13yuIbHkCQpc6IHm3XMLkuS48qJheF3pPN1u70PhBh6JwmmYffuNOETbnOZMWsttXVM64qqadi3b41Tp67h4MF9VG3DdGebxrXkpWBA1jm2N3cAzfLyHvKioByVlJOReFOxMa3JFWhiXwmHMrEbu9bSu8HVNK7CYlEmztnomQ2B/mFmeGjEhmVCMj8XZZBUxMbSc0whmjGLnkmq6+yUaLtH128mWisypaJAhHQ3Cjqtx1iWN5gz6RkN8bN0rclQJWMmcFQ0gjF8ddZhG5HXVrGeMpgsbgwStSilurpRkRCTRiUiy6ZonGVruo0FysmY+ZbFBbBeYd3fIJsNcPjoYap5RdO2QtoLgTwvu7bxmfMUeRkpE1oAUq2pmhofohRvBL/TTqqVFv0sL52ZVCqB6DwfRK8oEx0ySGFtEb0rKaSdVxWFySKpL+twMectzntMlkUJl0w6pwP1vALvKLMM7SzLo5KDa3tYXRqDbbBNRZFrlpeX0Lmmjm29TCbCdalcSkVvq6orbOS2mcg/q2PvxllVo7OCyWRCUY7RuqBuHTvzmnllRZVUZwSV4UJ8hmiUkS7p0qADXPBdU2GjBbw13rE6GrF/zyrH9+/l+NoeQnAor9DR8xOlVOH7JIxGdmZZEEoNPJ7Og47+nu4TElfyzhKKh1KdrLNPtX9BhAbElAVivN55zCn1YUyG1oYQRK8+ygZKnasXQcDUREYY4XOqqur6N1gnu3fTtNRN24WZ0kVcepj64Dl69Ai33HILx68+yqyasn52A+8dq6t7hL7RSBf5pckqRTGiLCcsLy0zWV4hK3J8cEIxMbnIIyEZ8Lapca2McdLOa11L1dZY7wjKdGM2DMmSMZExMAuezpD3F1wSQhVjGVQU3A6hA9C9d3jjCE5eV9EQqCDJk2BbfDbw9iK+jUmqM4IFu0i8TZ6/Hhiz3WHmMCLSWnfJvmG/W0K47F47TbbQzzUzSBD0mJ9Ihue5xkeDpoxhMhlRu8DOzg4qz0XyfjoDD14r9CD5d7kxi2nZyeqEcQjyIe9RXhEi6qaVQWNkFyuiHnpQBAM7do7Xi55YekghiGxzpjJS/8U8zxmNRgQHs/mcEAIjRAUjDejq6iqZ0ly8eJHVPWsCWu7yzHzEamrbonIDHQcpkE/G5EExQpO3lptOneLA8oTlUYFr52xtXWLPniX27NtD0OIpaaWisY6WIKatvbNsb29TVbP4YDTWWZFqiYv13PnzzGYV4yUpcxlPlrBBs7G9Q20VW7OG7arF64JgChwGZXLK0ajDLqR/pAdn0d5jvMU4x8E9qxxcW2HvZCyeYvQqlQKlITNK5I+Vw9kmNksJKGM6Y5SMs+6wHIcPkg10XjGrKuq5dJMqS40L0tjGZBkuOCEy4zGZSLDoTBout64maOETOe8iD0p1O3fTNvimkk0oBOqmwbYWkwmdQ3lNcKImMq9rrPVUdcW8qrDBUbcCss/ruNF6AbF9CFR1zWy+Qx48p646wjd83Q1cffVRpvMZ6888zdZ0m6IoUdrFzuCygS5NxuTZBBUM9dxick8+LsmUJFq8A9dKhydlA9oGsgB100ImDXh2ZhXz2uJ1Bsbgo+4ckTdHEGjDRS9c5MHj5hAjEaPFELVBekdqDRoThSJlY0tJH+UDrrW0HnRmUEFjVNYJpk5GS/i0UQTZcJwPNLbGBM/yeITODKG1VM0clWvGhUE5T1NV5GVJlhWdMQouSNNeBXhFcGIDxBkRWe0QAtorWdvBYz3kRhoH+aBwHtrYxBonOnYJ2U7lSWIrLGQGFxItRUn/TgJOeYJrmIxzmsbKeQYG94qeWdM07GzvRIUKFa0v0SrnkeRIlDg2XUYOo2h8GzWU0tMaZrR6t9ukwUipfudlsg8wgvT6fHuTzBiqquqavKZzd0fE7BrbiteXmU5yGg86yCQ0Xtzxx7MsTmqHNopyVOKDJyske6UHAHkyaAHhj0lY05LqRqXiQELfPMs6+Wytt1HqGUzsHOWDompaGgetC7QerBfPDNV7q52yaxDXLXgn7bS945nlZZYnI0Z5LkmJJHMUr88HB6bvLem9X/C2lNJ9FlQRvc0kBqlxVkLmQ6trHD2wXzBFTfTOcxLjvC0LSdao0PGbQhSBTAXCziY+UQ5a0cSQUSuNNqJlVdc1SgmbuyxH2NYx3Z5SVw3Oe6qmZl7XuCCgfu0sXmnqEDC5dOyebk+ZzudMxmNuuOY4X3/DSZZGJU8//QSXtrfRJmPf3n1sbm5zbusCK6t78A68CzSNxblAWRSsLK+ytLqKE0xCtOGisKjRnlyJPHYTdeeCE4WHunY0rcf61FCkp5skiECmqGzwKUwcekA+hfcp8YQBI01SJFnUpX/kPAmTS1huiGFk7FkaOzX2OG38bo2KcId0bHJIhQFBRYgjXdPCEu5xz2jgIObNrcd6Ucvw1qF8IMtyvA80oZVs5KD2NNVZd+fqvkt8eg9xfLX05ywKtPe0Toxq27aElEr2DlyfjLvMmLVNy4WzFwfcLcl0JB6M0ZHPEiWAVJSGThpeIv6XQhR6TGhgeHRauPTUBB8fpEutseJ7tFb4qMtvncOmJsADqkaiBSilaK2Vc2s14IqJYdABCm04t3ERo1K6WZo1mCzDeYdJSrhpgPvIqw+HuvvrM1GEVErlF6R8U2cqYzKUNoSIKDkfaJzDWk+n/aR0Rxju7Wj0Cr1kWK2r2Z5JZxsd8cAUR4bke2kxTMNwJ8RJqKLRT9SP/j09QO+s5/Qz69y4cxUH1/bgfUqlK4qipCxzyrIgL3KUokuIeBe1pxR467EudU3KpW41Nnk22mCd1PklHAbV0lo5z6yqaOq2M2ZVUxMUNE6aYTgrbpULLbYWJv3a3j2cOH6cm6+7lv37Vrh47izbO1NAPMBmNsfojAMHDpAXI0IA24h8elKA7UMfj1ICuGcmNs5QOjYUSbW5MlZtY6mqlqbx+GA63HgYng8rF/o5sYhd9kkQNZCjvxyvhMjHo49IunU1AOsFMhCjnDZmrTRGifej4wYo8xecYSGTmRyPBXqHUh1+nqCJhKeBfF5phckEmklNikym4hQVGkpuhomKYUjrYwJCDFeeFZSTCXVjwTpcaLDzupcWcp5BaeblxqyaV3Ewe42tbo8JdDtzj+/G3GXcgvpuynGxRFB7mKzvjEJgYcCUFiJoiAKP3QMLHtelYVU6Q/y3fL8W0pZMgMFnE5SeHpTJMqFPoDo3NpENzcLkSTtRb8pU3NUgeW46AqjJuA4B7USPSAbPCum1KDuDknlNUAHrHWAw3VqQsU6IR0jDodKk8NEDWLhUBm/EhygGGTeejo+lZPxcl0zpJaZTBkwazHgee+YZXNtibUtrBcccj0aUo5Isk8WuVb+oQvCxZEV1pTMQO3HlOZ7A0tISRaHYnm7TNtIVflwWeKWYTqfYVigWjW0jHmWxXtoHJ/BfKfGAd2YzZnXF3r17OXXdKY4fPUJm4OmnzlBVM7KYfW93RGxwbe8+llfW2NrapiwKcikjxnvPzs4OBAW5ZryyRKalFCnPNJmRQn434IYBWBeY16m5LdGzlrBvaKyGPL/k4Qw5Z8Nsr1aSUBoasiGNZve/h0mV9NrQwHWZTiVdyFUsZFdpE/NesEsnUFF3vWqRSL1wjfFeDL2og6wfkdIP3drrjyTdZTq14Q7I7f6tlO48SokESiblhLbZQQGFKZiGGURCcqeeEY/LlWb90DEeHvFGB2qoaeEA7F5bIV1kSHZukMlLm1AkAarkoynYvLDBS26/nc/efXd3PeL5sPCQ4tXKOeMlhwhCC7jd7zDpqnpnJ4aMSrhh0h4nXnV0xxcmkCJKRg9qKb3v2s0PvZwsyxdqIFEZSvnuMyBhowqQ57EwN2mcBQkEBiat2xRSOcowZPTd6yyE8yEaqF4cb0AhSZ/1uzwziPyjnoYyU4qt6U73PqUUblaxM69IGTcVvcCO0JnuwPfd3lMNp2TgsuiZCs9P6yx6ZA3T2Zw2erl1XeO9p2lb6qYmQJe1zPOC4C2ZURzYu8Y1J67mmmPHGJUFWxfPM9/eZHlpCecD9axidXUPR1f2UDct586do25airLEIMmITGcCBxEz1XhMFshy8coyRdcbI01f56GJIWbbeAIZSueCK4cuwLviMVSIuKzcLPRGpJvlg0V/pZ90dB5U8sqGAH6cRzr4y7LpMlfApOJ170FZtJYOT14n2AMUIlOlUhldcAQvmU5RXOq9LhPx97YVQmymlZSL0Rs3WV6hu16lpFmRw1MUI4p8hHNbkQKSo8gQCNaDVxidd/d4hULzKxmy+Gu1+IiSQesMmQTU4qJ2H+mNyPDMwv6VLsmpmcgwazb870LlPWkA4s2j4nX1ix6l4kPq60QTvuCdhJLaSEjoIxs761LjcpYFOxgYeIL97iU9EASEzExGkUvplMn6kqzkAZrIdPbW4mwLQQp0R0XOqBDOU/CpYFzA0be//We5+57Psn5unR9/649LCBwkTGmtlYXeNhKK1YItzauK+bxmPq949Q/+c/7rvV/modNP8sEPfYjJZJnWOtrIffupn/7XfPXxJ3jszDPc+f5/izIZzvcCk84HqtZSWUftPJV1VNYyaxq2ZhWXpjMuTedsz2t26oZ5Y6lqx7zxVNZTu0DjwaIJOkflJZXzTKsaneeMJksEY9iezbi0vU1tLS545k3NvG2o2pbatjS7yLGz2Q47Ozusra5y6003cd211zLKc7y1lEXB0tIKWV5gdBbFAzNaa5nN57TWUpQlCk3qelWWJUvLyywvLzEal6KEYTRFZsiMhBbC5RMwniCUg7puqKqG1oEiQ6ucxCfc7ckwmMfD+ZxC3HR/YeDFia3pDVmicHwtgzakuQw9N/nx3XrQWnfJH7WwviRt4JNxWjCivW101mIjzy/Vx3b3EqGDPmru6SgdXhZcvz6TMU/fpySJoFTGqJxQFCOaxtLWLQq590znmNg4e8hxu8yYaaU7Tyl5Vb09Gwye6oI9+V8qZQrJgtN1Rhpk/DsXkhCzolp+lOpLerpv61zO3V7Worvb7U5BcCGfVBe8qIt2DzP4LjxURD5NSGDtgP8UH0byYDqWdkjh2dAbSViX1Kr6EBJlpgv1fPyFNDcR8FjhJZMVPHlmKHIjKqU+dskBHnzgAX7ix9/KX37+L0nekbUyieomGa6K+XzOrJozr+bMKzFq3/Kt38qP/cRP8MpXvIIbT51idXUPP/eud0ZjJRUU//D7XsnffeEL+Z+/4ev52899Lj/+1rd2gDFakhIuKFrraVpH3Via1tFaT2strXNYJxw66wKtC9Te0/qADQqnDEEZlM4wRUExGqOzAheU0FLQoAwh9VBSksWqbEvjLFZ8JZTRmMxIeZKWnXxlacKxI4c5eugghTHMptvMpzsYpclMTltb8rxkZXkPs1nF448/wdb2lOXVVVZWVhmNxxSjEcvLy52q6mhUSm2pVhgDWS5cLhQdbmhjmVvTSmlNVbVYB6iMQCYlTqgrzs8h2383+Tu9P216Q74f9GVRiXi6WOC92IOh04YbeuvRM0vn2m0w5dr6eU9wIpYQn46sZfl78BZnG5xtpCQQaealgvSsLXJD8BbbtOCFrBuHUXAuHzlmUVmGKOutBmKnWhvG4wmj0RhnPU2TyueSiyT2Zdgw+gqy2aYPZYA/+MQned+d7+XP/vQznHnqKe768G9z4MCBzkqH4LnvS/fy+te9jrt++8Oceeop7r//fm6+4Ua01lx/3Sl+567f4fTjj/OVe+/ltXfcEQdemr/+41e+kv/+Xz/P448+zJve9COdwVMdJjXE2tJ/FAnwv+OOO7j3S1/izFNP8aUv/TWvec1rIlgug3Lq5Ck+/J9+k0cffpjHTz/Or/6HXydEDtdkMuZfve1t/PHdn+OPP3cP73zPe1lb2xvdXVnUH/qN3+SNb/kxfu6dv8Bf/Le/5nN/+d95/gu+mSzLOHLkKO95/y/y2c//d/747r/gtT/8RqwTvhtKSl8OHTnCf/6zu/mnr34NMTBER5KiNNRoMAryzMTwzEaA2fKhX/01Pvtnn6Gu5kg1QduxpJu26X/i71onP9Zavu+Vr+R377qLv/qrv2Jza4t3/NzP8b0vfwVFWRIIvPJVr+JXf/mXefTRR1lfP8t733snr/qn/5RhHawHHApMhi4KTFHglKLxnjbIa15JzaIN0FgXm+IGWhQOpJ5Ra9AZOsspxxOKoqSqG3bmM5RWUZp5BaU1TSsZzrptaKxkw3yI2KsW/O3gwQM861nXcezoEay1nD+3zoVzZ7m0cYFLGxfZmc2oraVqWmZVhfM+6tF5dqY7zOfSiFlpxWgyZmV1hclk0uFYWvULUOS3Y52vc9GbD9jWU9ctdd3iXJB1E6ShT/LMdpeCJUM29J6GRqooCsrY/Xv4uW6xDgxQMoC7PbMFz2+XwRwazSsrmfg+g96FgPFz9ET5YWirlZRZFUXeVcyk/hqSVLG9EQbpwxC/K1ULyHeLt6ZCiB2pFGWU3FZB4a0TCkjMaorBFTHLr2nMQpCUtXe9QXjZy17GHa99Ldc963q8c7zrnT+PVn1GBeB1r/0XvO997+Oq48f4npe+lO2tTZbGEz72sY/zxS98getPneKlt9/Om9/8Jr79778IrTTPfvYtvOud7+C1r30d199wEyeuujrWdPUPpxu8QcyXsqsnrz3Jz//8O/hnr341R48e4+9927dx7733RsATliYTPvXJT/LgQw9x6623csvNN/PHf/RHhCCh4eve+Ea+/hu+gZff/l285EV/j7379vEv/9X/0bnJ6d7+4fe9ks/8yZ/wd297Lv/oe1/GuXNnyYuCf/vLv8a8mvMtz/tGvucl/4C//x3fySu+71VdFYMxGUtLE65/1g0cPHhQmv4mBYu0U8XwJTOGleWlztCJUqh4mSngFgqFp7VNp6LggnhJtW2xLrbSC4Ebb76Ze+/9Mq/4h/+Ij3784zz44IOMRiOuu+46tDbcdPPN3PvlL/O617+eX/3Qh/jyvV/mxIkTjJcm3Tl8iF5RXpDlBVlZipxy9CC0yTBZQTEaMV6aMJqMMUWBynKUyciLMatr+4QbWOTCdVNgg0dnGTZ4Lm1tcf7iBTa3N6maOVVd49Esr65hTM50Oouh5awTBD1+/BjPec5zGI1KnnrqCcBz/OhhVPCsr69TVVXErQxNY6nqBq0z9u8/wPLyCufOn2Ne1xw8eJBn3fAsjh0/ysrqMsWokMyl1pI5j1k6oRIJLNHWQs5tW8uljU2qqmY8XsJ6mNcNo9GEodDBEBMbqsV0VRO6FwAYhtLDZiDpHKk7UqqGSK8NdeaATmtueXmZtbU19uzZw3g8wfsg+mAxrE1cQzEy4nEmg5SoH0qpuMn2dZghRlYSegxCSGJVRFMRnO39kODBSy+DPtmn6Fsl94YzOTKz2YytrS32799PU4vKxs5sJ8JYHh1ipcsgdrwcMwsLfwDwsY9/nPvuv4+6rnn/v/0A3/3S74ZoqdMJfvuuu/jsf/kv5EZz+vFH2bx0iZe85MUUec7P/cy/wTvHY488ykd+9yO84uWvAAK3v+Ql3H3357jnz/+ctml457vfvWhVd1/PIBZPh/ee66+/nuXlZZ55+mnuueeejrbxnd/xHWit+cmf/ElmMxG2u+u3PizXrTUvvv12/u9f/zU2Ll5gujPlV/79L/Lt3/Ed4hEOwux7Pnc3//n//X9wzrNx8SKnHz/NDTfcxE0338LPve2nMdpw6dIlPvyb/5HbX/rdC+KCpx8/zfVXH+ED730PwXuhNGRZ10TX2UQLcORZxngkEkRt2xC8oyyLuHv6TsolQqZ9SkUPvQD5WV5aZnt7i+NXHeemm2+WBQ5MlpbwIbC8vMx0e5trT57kpptuYmtbmqmurK721RnaUJZjRuMReVFEIrGhHI2YLIlMTha7vQfAZDlKS6g1Go2ZLC0zniyR5QVKG/KikFmjoBwVjEYjVGpqGzyjsmRUjmjbls3NTepG9OhTgmJtbQ+33HIzt9xyC9tbm5w58ySz+ZTp9habm5fYt28vN910E4cPH+PIsas4cPAgk+UV8mIESjHd2cF6x7Hjx1maTNjY2ODBhx7i9OnTtG3D6uqKELSznEwZ0cwPxLAtTsIgTWmm0ynTnR0a6/BKcMy6FXzvbzqG+NHwJ5VttYPP91niRcN4OR62iKuBXI80EBZ8cT6fC6dSqQ6jIxpTE3XnskEYmw6169oTvSIpSy98t44VM76PQIaOSHJMAulzsY8GKekRGQOtpcwLmrpmMhoDUnTfVBVNXQGxnNBIyWM6LifNdimznqS3vr6OQtK758+dpyxL9q2tSQ+AOHgPPfQgRksqu8wkS3TNiRPsP3CAx04/0Z1eG8M9n7sHBRw6fIj19fWOK7X+zDNx1BhkQONNdr+jG5hHH32UH/iBH+D7v//7efe73sWjjz7Kv/qpn+IP/+gPCcCJq09w+vHHY82f71PmSozZwUOHOHfuvPBzFJw9d5ayLNmzdy+bFze6+3/k4UfEJVaa1lZopbjqxAmMMfzp3X/RXZRWmqefPsPy0nJX/G0yI2CnE8ltAdY9IXpuaaI2TUsIsXDce+q6wrlU2JvGThIXKKAVLfeObqFY0HWb7kxZWVnhzve8hzvvvJP9+/YB0kDVe890OmVldZUfe8tb8CHwnP/pOQDMdna67xPOUJTm8Q4bJ12WSRMM61qqao5tPYGcIi8xeUZQvivPKkclRSFEaxt5fVmWUe2IWmmeZ0zGI7x38Vw1RovAoYQvBbPZDqsry9zwrOs5euQwGxsXuXjhgsAY119PZjRbW5s0TkLu8+c3KMoRo/GE5T0rXHPiWiYryyijqSPemGV5bOaRMx6NQWs2Ll3Ce8uRw4dACYZkW4drLU1V4xor/U7blq3tbWazOapYlgXehaEObS7PZCYvp0tyDXblBfwsJoauxENLv0sGa/fvOy8vZq9d42i960D/PJOuSj4KRGqt0cpgQobxoeNlZhpAOlmB7jtbIbiYip5RcmggZkjTvXnT4eSaHi8OMWIKVtSRQwKXdYzNg3BlrHVMVlaYTrc7HLOOyrx1PR9IikcoKh6XGTOhKgzDOun9mBbs8WPHqOuaSxuXOna0Arx1nTaSANyBM08+wYMPPMC3vuA2OvhRaXwQd3R9fZ3nPfd5ImMDHDly9LJJ0D3Qhb/11vj3fu9jfPSjv4fWip/8yZ/k3//SL3H9dadQKE6fPs2Ja65Z+JisexmIc2fPcvjw4Rj2BQ4dOkxd12xuXBLMK36pjQx/IfXJRHvyiSeYbm/zjV9/i4SOWnodBqTUJSkczGZztFIURU7btB1twxiDNpIwCDZJ2FTkRUmWZYzGI6lJrGZdWOydxWTS7UachJ6YqLqssniU9993H7feeispn3zLs5/N/0fZm8VIlmb3fb/v++4aa0ZmVmZV9Trdw+lZKBs0BVuiIAkyCAugDBC2JcCwYcggKEgkIArScLFhwA+yRMsgKIMPhmXyyQ+ibZmUJcgjboIlPlHmYpJDDmd6lp7u6q69MjP2u32LH853b0RV14zhGBRqujIyM+LGveee8z//pa5r3nvvPUIIfOUrX+Fz3/md/NIv/RLKOT772c9x7949drvt4RCHMHSNcveUEIw0NeJY6p/HbQLSXZlE5EVaCS8vTRMInt12gyKQJpr9fouzHePxiDw1eNex226ompbEpNKRedE9KgV37tzm0++8g+1aPvzwA6xtxNq5qRiPS0bjkq7ryNKSt98+Z7vbU9U1z66u+ejhQ1prUUYzPznh/PxcCLveU5Ylt87OOT87Y3p5KcuXiNvYIBKkrqnZ7/ZUdYPtZKSTcTiQp31YikcnUsB788+PncMvgP3H4+Zxx3VMVD02V3jxZx3rG49/pu3ckUkpfKx7iwYJRkQGKC8YlXNCGRqCfMPRRvPoZ/U33OOvCR4oCxujerOFWEo4GDTK+4pgfk9JOfpdKnhQcsNbLZcoBWVZ4qstIUtIUp/8efUAACAASURBVAlI1gaMSlH622BmesB0+pEFvv/7v5/PfPozlEXJD//wD/NL//yfy0FRGhMr4zByOkdwFm87/q9f+VVOTk74qz/0Q5RFTpok/PHv/m7+xL/zb5OmCb/0hS/wPX/qe/iTf/JPkCUpP/ajkgEwAPAc/f38RwnAW2+9xfd+7/fKm40f+nq1GjCpX/21X4EQ+Lt/5+8wGo0oy5L/6C/9peFD+bVf/iX+07/8lzk9O2UynfKDf/Wv8Wu/+iuHE0oLnjCE9IKs+o3hD774Re7d+4D/4r/6r5nO52il+cRbb/Fd3/3HCcgo0bYtd195hV//jd/iP/nP/vNBJWHMYfPpXIjLAtnodV0HBEajkul0Ql9ZksGtQKTZ0rXk/PW/8Tf4wz/6Mq++JnijiaqJ//Xnf57/8C/+Rf6t7/5uTk7m/PhP/AT/+Bd/cUjD+V9+/uf5gR/4AT7x5ptcXF7yIz/yI/z8P/yH8fyMnXDw2Laha2sIjiLPmIzH5Hk2uCIUWcZ0MmEyGpFnGb2fvXMxSSseB6Vks7vf7dis1+RZynw2ITGaar+l3m2lUzImhtYGmn1FsI47F5e8/dZbZGnGerNGG8NoMkYZw/Xqhqubayk83vHs6hkf3b/Pzc0Kaz3ewWa9Y7PZMZnMWJyesVytWK3X+BCYzWbMZjPSNKPIC8bjKVobCAbvwMZNWlW1tE1HZ2WcdM6RpClJmkiHHFzE2z5uu/TiWHhcZJ47q+PXj+2mX/bnuCN72dayV8lofci37KMZu64bNtaHRduBeiFFsd/u+dhohNgECEBvtMJoFc0M1MBQ6JcIErbso0tNPJ9UbwQpzyUEGTdxA/YmnDUvQSvBUtd7mqbiZDFjPCkZj0tOZlOyXPIVstSQpwep1EsDTYjdS19B/uk/+Sf8T//gH/Duu+9itOG//ImfGF6YigdG951M3FIEL3fi//g/+H7+9J/5s/zOH3yJL3/jPf72T/4kZVmSGsPXvvpVfuzzn+dnf+7n+Pp7X+fevQ+PjA/VC69JDWWsx4uyLOXHf/zH+NrXvsq9e/f4nu/5Hn7wB39w2Lrstju+7y98H++88w5/9OUv80df+Qr/3p//88MQ/T/8zM/wR1/6Q/7RP/1n/J+//C9Y3tzwd/+bv/3cpqn/lGWzqumsjZ1Wxl//ob/GxeUl/+yX/wW/8Tu/x0/99z8zjHN5njOdypbszU98gouLC8RBMyHJ0kHiI7Iroaf079E50e79b7/4f/BkueHP/Nk/y9/8/I/y4OkVf/PzPypFNZGQjFu3bvH222+TZ1l/jgDwq7/yq/y3P/mT/MI//kXe/frX2W63/GhMjFdK8XM/+7P84i/8Av/q13+d3/v93+d3fue3+e/+3t87LFziyW0S2U6VZcl4PKIoshhcIcoAIcWm5IWEs3ZNg+060jRlNpsOKUjBO4oio+zpD4m4reSpIU8TsjwlzxKSxAgf0Ium79atW3zuc5/jzu07XF094/79j9jttnS2YzqbcHq6wCQyPrZOOqmmaViulqy3G5I05fzWLW5dXFCWJbPZnHfeeYfLy0uyPMc6R1VXWNsxHo85XZySmgyjEoxKBMz3vdWRXKxt1+K9w2g9LGQAtFE4bz9WtI4L1YuA/cswsY9TKp5/vNitHW9KrbWCzfecsyMKh3Dj5POx0epdpFv9VvUQRKx4kdLRU6+ex8l6qyvB2+T39Mnm/ejZd4ny336gfx1Bc4f3iyPLErq6Ic9z2qbibHHCaDRiVOacnp4ynY6ZTEaMxyXlqDiUiBcP1t233g6rZ8/kqlDwy1/4Av/yX/4rfuqnfgoF5FkWtxP9Gzxs5jQBgxfdoOkj5kNUa8rV4QPxIo5bHRj0jKK275lrxzXsaOzt+Rn92EjvOiknmlbI+lYxdD/Be7n485xxLKQm3oGSNMGkInESy2t5J/qFFTT97K+FoT9IlOLLck68t3QkyColkh7p7Bier9Qx7ygc/gTxUTNRXpVlGVme0dStxNhHHpCKd9teFmWdpW1aup50GQ6iY/ldDCf3cFKpniuonrtphRB6TyIAiizjlYtLRmVBUeTiAdZ1VPWe/X5P27WkiWE0HpMYI0Jy6+mcdLCv3LmLVoGuayjznLauSaKzR1NXBGfJUkOiFU1Ts1lvqVtPa6Ujmk4nvPX2J/jkd7yNd5YPPnifpq3IspS63jOejDBasa925HnGYnFKqgpWVxVPny3ZNxVFWVBORngFTdugEsNiMSfLMk4Xp9y6dYvpeEyaJBRZRp7k2NbSVTXeCbVgu15zc3ND07Z0PvD0ZslHD5/ShASVlVQWvEnBCJTQayGPCbPHxam3zT7OYzgmvObmecLsyzq6/vtfRvlI05TOyfmgEkOSCku+ayts25BpTZFqxgZS5UicRXtLoY04Jms9jOF9cRPnnAjUH1E3tD5MZ957rHdsqoq8LCiyDNTB885EnM4ojQ+H4BUVoouG1pAkpLMFy+2eW7cv+FN/+nu4urnm3W98g7quGE0n4q6Dh+gK/T/+z/+7gpdgZj7KX5BL+rmLWQHO9R8WUckfIm6m4sVObEGluvvgBq2lUjpW9lj8dIwki3Ovd3HDcYyQBTWsX6Xa93UtiN2vOmhFh3aXEL8vPo5OrH4bKLU13rV0723qhqIVjk6moZghrXIIMeTEeXQU1hqTRcsghXXCoRNaRSBL0xiGoaOXVSyEyH2h10maozti13WkaUZZloQQ2Gy3cRuF2MHELIJhc6QUyiQDDqHi6DpIXBSDndPwbuIH0Y/Rrje6U5HoqyQ8YzSJGQk+4I0U0yxLAR/VEwIuKxLyLGVXtbhOgivGo4JUFxACm/WSNDVcXpxjDDTVnn21p6klTqzrHF4ldF1Aa8PJyZzZbEa13/Pw4QO22y23bp3hbIvViciQFAQHrgu4LpDmhrPLC84ub7NcrblaXuGCZzqdcZqfEpSiLAvGoxGjssR3wi7PkhTimB+clQ7rqFuy1or1kTIYncRzWQ6kiRte6xV1t8ekqWzbMMjtur/lglP9BlG2f0oFvJebfPDyIXXOHsfWDuf+y/77RY6ZUor5fMG+rrCuwjtxzjVGkSYpeEeqNakCpUKkVniUC3jtcEFh8gNh4vi39pLpPM+e7w6V6HmlGZGmxHG4hnpTUh+PQmK0eK/10qoe/0eOQ6Kg8R1n52cily0KOm+pu5ZpnpCGFO/FdusYnnyJn5kaHC0Ob0U0eL3tdN8WaRO/1s/AIdCHffRblcPoo1AIqTBJZczqq02eCf/KVXtSI1Yxn/9bn+dH/9bnP/by+scP/pW/whe+8IVY3PpfItQNpXVfkwghdn460OvGgg+EvjsJhyIpzhwv6j+f24WgtabrZMTAGOqmJvEmZiPIdrFvvU3k8Gh9SGdabbcQx9RhMRCTn5yz4tGOIfhA27SUZSkXnXdsdzvh1gRJi/LKHYDTWMy9VtjOxsLdYyFq0PwpJRwqozVZnskqXO42h63ocPLBerdlMp2AUuzq/SDFGo9GJFreW5GnTEYjkiSjqhyp6mSz13RU3uFcR/AdRZExn08pRgXV3qE6jVfQBo9Vii6IMmI0LplOZ8zmEwiWZ0+fUu8rjNJs11vapmY6mWJrT9dZ5uNTJuMx2+2Oq6ePmJwsSIscqzz5qKQsS87OzhhPxlw9u+LrX/064/GYs9MzLi8vMMFQ7xomkzFnJ3PQipAoUJqrq42MSi6gVCKmpNQEr/Ctlw2hNnRNgwuGPEnFgy5EABwIMXA6RFxIoZE6YiVnUhtMovA2YF10ZVU9KB7rhe6XA4fSYt0hds4kCoNgfU1j8d6Q6Bwfn+8DYvSYGjEPCB7jFEYFkhAlRFa27ntXYdKENBXfORtnpdZ1dFXHZDIeusUDuR3Q4JVGZylog9XxfaCFDK4U2igq1y+U9GDQSfAkwRCCkmOpQBcZX//gHpvdmrprGc2nwqfEYXEE5fBHC4CPpzMNn4D89/f9+38BHS90I2sblOpXrocD2/8dgh7GRB+En+OD3P1Rca5O0mixLHSDopCRKckSNvsdzjt++u//ND/993+aPsqu3yKGCEzLweiB1WMMQUnH9tybim8oDLMpQyWNXWUfHBzCQWh9kFgd7oIu0ijQkKUpGbmMmNaCIiYyh2G81VoJkBy1bFkuixCTmFj8pINDBdFeOqIYO+A6R5d0FEVOkefUdTV0WdqIMZ4IiP2whVJa0yWt8KKOb0ZDd3pwVehHXt+5KC5m+OySNCEvChyBq+UNZV3ivQXvh07TxnEkNYZQlLJq954sSSnzEpMorG1o6xofLKNRgfOO7XZLVe2oq4qqbWg70bjaEPC2IzEpp6cnzGZTmqal2u9pm4bpeEKe5aQ6wXYSAp0ow5MHT3jkHZe377BYLHi6WZO7Mo7jKW3b8vTJE/a7MUVR8sc+929IR6k1qU7pWuFcBedJjGE0zujahN1mC0qIpgBV1ZAGscp29hgicIjZbkAFIY/qMIi0DniZCLgioB7du5QA4eJfRgwdOxorD6fpC7DHC9dtjycosfHqrztM/Jy1EuG4D9iuxQVwoTdkGHoBHBplFCpN0AMfUp6QKFF12KOxd8h9UOAUWMCrBBuvK6MMyjBECAags3Vc6QXx8pefNLyXtukweYHXhoePHnCzWYuhQZFxfX3FbD6hN8gN+nClf6yYjccT6s2GLm69ho0HUoG15tCJ4QeMJRwVt+DlgzvEwx38xYyOPmVGk2UZZVEwGssoZVJD1bUYF/Vlw0E7LkAvtNv938PoySB8HzrCo23S8E2xMPYFTql+Jc3BBSB2Nv2HrZSirmqSVMitgUBnu8HbzWhDXVWYKE2RD6YRrAYZ2Y61eZ3tYkiuAlQ8AX2svdIxdW1LkphBq7bdbbGtjJhS9IgmiNFxoNedeksvLO5tmWRLpqOzSA8pxBV5PEa9LrfICm4tzujalq6RqD6twlECuacohRDrnWWz3UGoSExB8EE4WEqcJ9I0xXnYV3usa8nzjLatsbaj39SmSpPnBUYZLs5kqZGmKc+ePqVthHCLVmy2WyFTliV5lpNnOUmaUu8rnj57SlglTM/OKKJLh7M2nmclWhnqukJ5WfcnRUFZ5pzM5vTkzqatyTMxpGzj1rLtOoqyZLtr8AGaztNrBKHX88qJ1dNojjAOhpsJfXd1KErq6LmCr+oBaz1+HMuWXrYUOH746BzcXx0hKHSIXXqv3kA+dx88LqQo72P3Z6Uwe4XrUZd4bjkleZWN6/9NnDb6CBobPJ0XmZu1gS50pJHPp5WUGo8jqEjM9Q6vPCHWsSApxdjQ2xGJrleMNGV5UTcNaiO22oQgN9j4+Fgxm+YjylffYLte411HgnCLcpOQaEWRSRvtu06yJV2UKcRR7WQ8IkkzQFO3jsYGtMlJRyOSLAeTMJvPuXPnNienJxTR5K9uKp5cPeFLX3uXZzfPqHY1ne1oGxcJf5l0ZL0lEF4wKh+wToIt+l5aK4Mj4FW0gnaOMs25dXLCq7cuSbUiTxLKMmM8LsnLAp0ZlIZgA5vNlqZtJeBUQdcK0J/lBVprxpMxxoj/1r7ao4yibRturq+5vLgUHlJdSTFx4mXfda1gjtaxWJxQFgWr1Yqq2kd7E0NiMuqdSG/6k1LHrV9RFsxOZnzwwQc8vXpGZx1pkqGTVDAPrSnyEUVRPCd7kVNaum2TiLWycMegsx1dZ5lOJ0wmU7b7Pb3YeLE4IYlgbRqLadc1TMZTCELqnU5nGJ2wWt6w3VQkJqEowDsl3YzyjEYFo3GB0oG2q+NxsUM3Lek8WWSeG/I04+LyUjZZbRvt1SXMd7ff451IvwCePXuGMYa3336bu995l6dPn/Dw6hkd8N5775EmCZeXlxIEXFWMRxMuLy6ZjEZsNhs2mw1GK85Pzzg9PZUOu9mz3W7QStG0LXXb4ZGuJijJDuicFQJo0msl+04rtifq4wWr7/Q5LkhHi5hDp6NFXv8CJvb/59Ev4X0Ar4KIvhHKTHxF0pV5N2C2xovMDB2odw3GCjbogxPsz0gn1NoOjdhmaWXQxpFoec2uEwdlF5R4+ONIkkCWZqQmZmj6Lm4zY6F14L0CFbBGobzCK9isN1jruH15l7QsuXf/HnXTUo7HUc0SImZ/AM0+VszKJCWYhOmtgkQpytRQ5hllkpEnIr4NbYvralzTElyHIpBGsLnIcgKK1gZar0jyEePZgnw8hySDxPDpz36G7/quf5O8zPnm+9/gyeOH1E1OXqYs9ytC4tjkewm1qCxaabIkx8asPWstCk+SCvZQ1zuappG7YlBondJ6T9CaNMtxzjFKMhbzE95+/U0mqWFcFEzHBdP5mNF0TFpm6FSRKs3NzQ3bzR6tEzyKum5RylCUJXfv3qVuara7TZSftDSNuFY0bc18OmW33bFerSSSr6loW6EdzOdzZpMpk/GY5XLJQ+OxXc50OkEpRdc6QqdpKkvdNuR5RpIZmq5idjLjs9/5WT75+m2+8u5X+ej+A5yHyXROlhX0ttvj0VguCOviptix2++pqoa261B5IWc7UDUNSmnefPNNzs/Pef+De1RNF+GB6ErrJO27dz85OzvDaDlG3gd2uz0hwPzkBK0N+22Fd5LboHWgszX73ZYk1ZRlgXNd7MiOL3TpAoyGrrNsthsePxWb9PVqTV1JoXTWMpvNGI/HtHUzFOf79+9TVRWj0YjFyYKd7Xj9tdfY7/fUVc14NObi4haJNjTVDrylKHLGowvm8xlnpwu0hpvrG6r9jtmkjAEsirquCUFxs1xxs1xRNZbWRTDcJAQtx6h/xGHgaDrwHFZYPNddhZ51P0wIgkF+HLXlCPb49l3ZgKbH16Hia+hdDxXi2KucuLc21mK84GYG4ZBVnZhsGuUkPo+ATjTKQ+ui8F55yaA1ilR7cZi2IeYSaGzcfhoCVnmME3aD8CodZihmvWmoj2lsFm80TdPiA+SjEWXbELzCWkiLlCLXeOWHDIFvWcx8K8zqLEkospRJWTIpC4osJVUQOmFkOw0kiaQN63j3ThJWqx1dUHQBMIa8nFJOT0iLEq802ajkZrnki3/4B4Tg+fCjD7i5eYp1LfuuZr1eUu33Q1eh44fto35Ntm5O7EgivDeIeb1HaUkNEtzqwKFx3lNVFda2hKQQbkwi+QXOOZR1GDQmCWRZQlFm2E4kKsYc+DTX19ci5zo9Zbvd8uDhDdZaTk4WpFnKb//f/5rVagUhUJYlRZGTZRkhYkVGaXbbLffvf0TTNJydnnBxccF+v+e6XlLkJZqE1jY0TY1OSspiDCgePnpEmmTMZicUV0u22x1t14qLq3d0lcU1HaOyhCB21QGxHXddR7XdUZQlLvKAJqMR8/kJp/MFeZoxKgtA4xHKhzEG23aDZMpow2a9IUkTZtNZLMCWy4tLXn/9dbabHe9/831s55hMRjjfYXctPkioSp812o9MvYeWc9KxOm1kGZQkjMdjlFIsb5YxY8AxnUjRr6qKNEm4ffs2WmuWNzd88MEHZEVOMR5TTqe8FovZo0ePJEowBOqqYr1es1icUBQZSsFuu+ZxCIxGI2bTCYv5hPXqmq5rZTLoOpROWG43LDcbdnWHSjJaa2V0sl5yMpExycVORseteo+ZDRuaIAXMOTckivcYq47HWL/QrfXF7Fgl8NJHXN7YthHowvSbezVI0+RasHgdbyIhLqeUIlGyQAqAi7zHzqlotZOijWwk29jZa4lIIjVu8KO0KBKVivuyRsKS4/EQuEngFxtx3mA8Xgm04pUZbrTjyYxiPKVpO3Z1S5IJ8bdtHTqRu4V/oQP+eG6mls1lbgJFoslMwNDhO9F5pYByDoOLnCFNkqYUSYZKMzaVIyVFZwobFF4ltJ3F6k4Ijc7y/r0P6LoGk2kCDq0huI7ldk0dWtpGVvsy28s6t8ea+hFDzGEPjHppdXtsASRAVdFb6/o4elXVntL0yU8GlMY7cZ7wQdbEaZpQljk7X0Er+QNJmqE1FEUeGeKIHKYoOb8llki/+7u/y8OHD7m8vOTNKKN6+PABNzfXZKlhPp/Ttg2LkwVvvfUW3nt22zXL5ZKqqijKHFtJIEueZVR1jXOOLM+wrePxo6csFqfMZie8+eab3Nws2cdAEKMFtDVK5CrOOkKwGGUo84JxMWI+ndC2lvV2ewhGRlKtx2XJ5fktbtZb1psteNlol3khKffe03Utq9UGYwzT1+ZMJmOMSTk5mXNxeYfxZM96tZVOKjHYusUYRZYXKBXY7jaE4CKtQ/zLiNvn4AUSyPKC+WLB+a1bXF9dSXJTkQ8XuDgnyKIFpZhNp5ydn3OyWKCj0eP777/PvQ/vcXl5wfnZGbvNht//vd8lTRMuLi4o8ozTxYLZdIpS0Y67aQiuI8tEZdFEo0uUoo0mnHVn8WjRaAbBkENcmvQFJ8QiNgw//bjIATMj4pSHr8i5FLQGffDyP/6Zx0XtxWyAFx/edyhleoQuVjnkeBtNkuf4zornHAqlPC6AilJGr0zsrgI2aLkGtVyLDkMIRnJNnXSvnt6TMBAUdDHyToTm0sVqIoUpNiNKh8GNo6cIoQ0kSpYnBrIsx5OSJBkqrlN8gGbfgPExk/bwvj9WzG6dTgjBkygVcZyAt7V8gLahSBK09+jgCEqjvMYoj4+e4Vk+QWUjApp909JYS7vdEqqaoGBX7amqHUmekoWEut6hgiPPU9IkYV9VA9ta+4MNUf+h9hYqiVaDXZD3KXgvFAB3WF8/z1cT7tZut2ec5NixO4SPOKAFZwKkinFZoFLFnr24WZiUNN6xZtMZ18tr0sxw69Y54NnvdixXS95//z0JzEhT7t27J572tmU0GjGdiF/W66+8SgiBb35TNJJpYphMJgPOtb7ZkmclJpEIPo8XcXgI5KOCzWbLyWLBK6+8ymg05v7DR+z2++hmkZFqyQvddTuauo5RYI7RdMx0MmG93qAQLlzdNKyWK1595VXuXF4yn81JHz2K/vziOzWZTBiPx9R1xdVVzcnJiaSBJ0kMadFc3yyZX99wMp8zmowBz3Q6pqhSbpYO51q0VpRlSV2Le0mPkWmlSbRBaekku66jih3U1dUV6/WaNE0ZTac8e/IU23WMyhG73U5oMErjuo7NZoM2Bh+7m/nJCdPplNl0ymuvvMI77wSWyxvapiXNEqpqi3ftUahthw2KDCEI17VgtjpNaOo9LsC+6dBpIfiTlklEGS0LMSU5FCq4jxWgF7upfpoA4Ij42hsNHj//ZUXtWxcz2WyLs7EGHSPaIsEdK1CBD4HOe1p/iIHUKFKk2ARlJIc1Pk+hSHxcAniFSQuCFetMAK8lrIcobarqWkwmVULPm5TlU4rJNO12LwTc4X3JNWuBxEGqYb3asK9aLu9e0jjH1977Bq21TOcTlHFy7mgd6WHfopgp36KUzNgqWJSLy+TQge9oqoo0MaQE4c9oLZyoumFvd9TMKIoJIJvJfd2Q5hneduybSqxI8CSupWsAvOTiOclh7BnMWItBkeSZANZW4sCc7QOGhd3urcXGZOveRsQHJ4kwBJq2khWxF5D+0aPH5MqwmE3pWkuSGkxqBiyuSFL2+z3eCeg8ncgmLoSAMlBVO05PTnDORR0obLdrnj19wuXFJaPxiBACTV2z2WyECf/KK3jXcf/+faajMePxmOl0yna7JctStNaMRiNCqDg9XaDQkt7UebrWY62kVtldxXK1wQf4xNtvM53Oaa3l5mtfIxBY3H2Fbt+wXa3Ej46ApdesLgkERqMJTdcwm5/QWcdkMqGu9uz3u3gHlG2u7RqM0Wy3G6zr0BqS1Mj4FS16skwCmvdVxc3NDf/un/tznJ4t2G1W1HXN9fUV1nUxzSmNo6uibRPBRLwjKKHvpNE+PU0NVVVxfXND23WMJxOJtIPBWNKkCYvTBQR4/OQxIQSmkwllWVJOxswWJ6RZxm6zYbNZs92sUSjGoxHT6ZjZZMx4XNJHAzpr0YliMpnIRrOJdJgiZ1u1VHUbIwIdeaowaQZKE5SOihVJO3e+lzVlhy7tCOgXfEzkVr1jx8ABjNefd24wNzwOCzkuji8GlrxYAL33QrdRStLKYyB3CHHU94jLh4/JYFqD1tgIMWRJDN4OjqBlY24jzUKniYQda3FNkfen0Ukq7i9dSzAJmCTmZfqo35QtZVu1oAzWtfFaT9BB0XWtUFeMLAM9ktp1eXmJC55yNCIjUHcNaSZNjNFmSFN7aTELXoIkbPB0LWSJlqQaBUme4FuLMaIOkI2UkmBY52k6qEODaizo6OXeVDS+wwO7/Y7GdhgNLoZ7ag3BKmzXCkfKd7L+j+4S/YcwbIPiyllwMnHrOEg5oPcKEogixlahUEEKxHq9oZrvcV1vYSw/xyAfeNO0BNvhkDE0eFEmaJOSaUOz3wnlIBFSofMt3neApSwTiiLno4/uM5lM+OQnP0lV7fvTjvF4zGazAWS0mU6n5FkSO5ZaOtsiA69orcV46YzF3VQIolmWcXO9xIWvc/fuXc7Ozri9WfP02TPWqyUmaOh5ZKFnjUm7r7WJdjoz6qbhzTc/QZqmrDcbbm6upVPtGvG/Tw1lWWCtw3Y1SmtGo4KqqsWeyHt2+47lasXlxSVBwZe+8ke8cnnJnbt32KyXLE5PJHdTiX5xt9viXDrQXdrWYq1EF3oPWVagtRIssG3ZbDY0jUihZKwU+kyWZbzyyivRs20zSG/2ux373Y40zzDGRNxNKDFd17DeLBEbaIc2t5jP50wmI9q6YbPZcHX1DO88s9lUHDBS0c8GFfMwdYJKM5TJhZMVcdleiP2ic+txkYkEo4g9Hkwa5YvPh5h8K7b/typmz39vxCMjftb/YqGGyLgXg+FkTNag+oKm4qIiSQiRPuSRTk45CRJWPSFeiY1V7/Ua6PExdThuzonVvAso09O5pEsU7inQu0lHmhEKVJLgrcZ6hzEp+XhEVhR0weG0l3EWoS4dL18+VszAwQAAIABJREFUnpvpepEo4ANdcHglq6bEGMShPrauwdEqwOu4JhVGt4v+4SJdcjgX6JyjaevhoCYmI00ib817XOdoQ0cTarq2iye5l4h67+OS5iDW1ZGC6GxM/vaxgj2HL8Sjc0RC7GIIQy/2tVYImCqVUUdOBoOsERSNlQsujbuT0WgkC4jMUJY5zleUZcbdVy7Jspyu8/zmb/4Wo7Lku77ru5hOZZVcV1KcnXMSNR9PtjTLBlfN/b5iNppJknUvUzJ6sHx2znJ1dcNkOkGhWK/XdC6KuudT6qqia1p00Bgt9ucDQVYnKK1oqxa130mXo0SC8vjJE55dXTGfz7l95w67/ZbdZs1+vxP/r0awO5OkNE3vhZWQpKkoAEY5SZby4OEDPvH6q9w6W/Do4X1ubq7RBrquIS8ysiiwF/5XoGsdXWclsKLzGJ0ymhQoo7C1pW5qrBVqTq9acN5zdXMN31Qs5ifS2So5Rqfn59Rtw3K5jPZJFQ/u32c2m/HWW2/yxuuvc/fObZqmJssSmRZ2a/GqD44iFxVHXVd4oYVjvQTQNtYK9UIb6Tp6YXc0bgw9GK2IyeXPF6DBGktFeET1xgy9giY+7yVcsuMt5vEi4GPja9+lJQblxZU4glH0GKXWhtB10XFaCkyvElDxZ/T8RB+CvJfQP0/eQxaxtaB6uoXkYyiAo0JtndyoALwX26gAQ17tgOcpMCaGCCGXcds1bHY7WttFpxqNb7uIJ7pByxz8t6FmtK20uUmaxAxLL4k+XRvXtRqr4s4rQGaExayCVHwPuNARvOgmlYagAiDdS5+tl6WGIkkxiRLzOysnTdu00XQwHkTfj4+HD1k6R9FfSjBCGFbdzlv5EJQSEh7IWKrEsibPc5QSZX9dN8LjKjK0CwTnpANVGmNSgRmMNDpJkpBnGa+/8RrX18+wQTyVrBW3+9lswmg0Qumcz33uc4PV84MH92mamlvnZ7z99tssr2+4ub4mTRP2+z15lvL48WN2ux3WWiqdoGKISAgKlJE7KArvgqQ/mYT1esNytSQtUqbTCTqBer+jbWuMSiDJBtWE8xKUon3CdDZjX9d8xyc+gTZ6cE1dLpd8+jPv8MqrdwhYPrr/Abv1CpOknJ3NQWm2uz11vafrLM5XnN+6xZuXr7PebHFbEfn/wR/+AbkWEP3V1+6iVGC1WmKdparEm208Hg+Ym9aJdPpKYwexvnTU2sg5opWSm1AInJyIg8JqueSZtSxOFkOAzcXFBefn58xPTiJlpmQ0KimLgrZp+J3f/i2+fjLj9ddeYzIZiQxrPKLrWjbbNXXVUNcNk8kMk2TUjXyGVd2wqxpa50lcQCmL9RIyLxDEIYNU9aPdCwVGbsQMxawX9b8sUenFsJP+8f9Ny+gLn45sjyBFC492OvquBnwXtcOdpfMerQO+77iAgH8uFEVq1CEz4PhP/7qOx2nBMonHQ8rkkH4Xx9d+LQCensitvCzutJZusGpqGicYp0oUXdWC7pcsEp5yHLj98c6sQ7RiQZT21nZ469EkoIxIN0BCBnygM4g4FUUTYNvtaZEPtKobrG0I/QYDWbGrkBCMbFaUS1EWfOtoW7En6VW2Id5det/5EAI4STVyiHPs8CFH0buzVjpHpfG6JxBGsDDIqGdMQl037BLBy8Zxo6aAZreT9XIWlw0+jtkR9H786DGPHt9H6cB8McW5Bm08Rnucb/nau1+nKAsSbXjy5AnOOabTqXQjR84GdV1Hd4ImEkdTRqMRWHHaFMxLD+87ROnMaDyhbio2uw2z+VRwo8ePaLqasijAi+jaB0kUtzY6jRpNEqUzSoupYTkas91uabqap1dPmT2ccnX1lO12SV3vqKods5MTXn3tLtPpnCdPnvIgTbi6vmaz2ZDnKadnp9yslqzWK8ajMdZ2lGXGK6/eJQTHN7/5DZbLGy4vL7i4uBD3iaYZbGKyLCXLCowWd4a26dhVMsobbSiLEmVk5PBOzoX5fD4EgFzcuqCK+QDL9Yr1dkOe5xRFQWoSZue36LqGvbW8+tqrjEcFwVnausK1DQq5mLPUkCVjAp6bm2vSfIT1mt1uT9N21G2DC+IoG6yXG6WK6d2xoHnvhOHuBXzvL/bBiOG5gnNUyJ6rUWEImj5+7vGjXwC8WFiGYiiNlNzsQ0A5GZ6cC0MCkvL974l60dCPgQOSIwVIHzuA9L8jIMwPkSIpWe3LyEigbWrSJC7qjMAbqRG2nQsOxZE/mndRPSOv3UXHmKLMCHis7RhNRmJIOrxGHwti4PjgfayYWZtEMFwKj+tajNJMRmNGZUnbVOgAtrN0tsM7WZfqAI231M7TIfO0tV2MCpNWMjExqzB4UQ9og9fSmbnOYptW3AoSwbt8xCR6q5ShBfaSoHN8R5MLXgz6gpJQ1sMHGp04VAz95RDZ1nXSyRltMElGqC3BWbrW09Kz5aU7BcWHH77PZrdmOhsxGqdkuaa1mtX6mrpuefTgAZPpKR2yUPjkJ9/m4uKCb3z9a3z5y19mMZct29XVM87Pz7Fdw0lcKIxGI2xjMbrDJDK+WB+JhfG9rFYrlIHxZEpRlGy2ax48+oiA587t25RlidWeuq5punYY03TUxq13GwKKP/zSl1gsTnHOsVytGE0mWO948PA+dbWlKFLqJsO2DdvNBq01u92arqtpmgqUnJi73Za7d+8wGo9i95pjbcfZ2WLIVKyqvfiLbdYDTy9EAmUI4m2mIunXdo66rVEQuYAJaTZiPB4PneZmsxmkYbu9xMudn59jjKFpGoqiYL1es9tteeXuXbIsZY9neXPD0ycNJ/MZs+mY8ajEuRRrHfvdjqZuWW+2+KCxQRNURmtlPe6CjJi+v3kemRMmGHFx8FpE0M4PI1tfyIaxkgjQx4zMEHpnVuRCjZXkuVDtF8bM4+vhuKhJIVNyzgTFoBNSSpxkbUCbMHSGzkvwtIyHDEUqNdHq3T9PcZIOzKFJBowwvhRCiEziGENndEJqJLNCK8HG4w8U3mbk33nbSQqWSQRzi12eSTV1W3F1/RTLgs61tF0DRs4XlHD4zNE682PFrG5C5I9IoKftOjKTkmcaT0GSCQ1ChQ66OmZDJrGtBWhxrsWBzMtKk2gdMzJTlBdAXxN1fk7FjaTFdqKGxxxZCvsY0qvNc5YscmMULaLnIM/o5SG9kFUOzlHupZPnyAmhaNtWEnacI81S5vMT2rpiv6sl8qxtpDvUmq5NxUrmbE4xShmNc5R21PWa7XbJbt/ynX/sO+k6xWq5YrfbcXV1zc3NDavltWzLOIwRdV3joxFjfzImxqAzhXZysfvOAXYYT1CQZDImP368xuM4PV3QtA337t3j4vyCIislI1RrskwAbBcCXV2TZQVlWaBNS9M1Ivg3mk++9QnOz8+4feeS4E8pRzlPnjzl6bNn3Fw/Zb0RBvx6vcF7y8lijguWZ1dPuHP3FSbTkQi/PaxX1zx58oQ8zxiNRpydn5NlqZCAoxNrkqYo5anrFmtrfCr4oVI66lrDgN0YY4YwlyRNyYqC1Bh2ux2r5RLvPIvFgtu3bw8JR+PxSPh7dUVdy0WUZxkn86lQaoKjrnZcX7XClDKa2XyCSVOurlbStSjpiPviQQS8VUx+DxFo13G5YjAor7CuHTC0HuLogRJF9PdykeR9jHv1cAmBoD+OiX0riVPf5QGC9QXBe/vzSnNkhBh5lN57EtcNWPQA2gUdUfEwXEMRQI9fl/EuSYREHozgzGIyAd5bEoXkZEZDCMEFJWpOK5Eh6oBwO11HMEqcYuLvaVqRAq7Xa+59eI+zppJlUNugU0OaFVIkjTq8rpcVs1XdUOSQmoQuKFoX6FyH2TdgKuaTKUHJxaWMAIxCbjUE30DbDkJqR0DrQPAyrxul0EmCszKTd12HxdI1lrqtadoWn3hUiKZ1DhSWENII+sVVtfxk+VBlNSqLBy9FKkRw3UTLoeACng7ntQSLJAlpIdiTtZb9rqKcNuKCURbYtsG6lq4TrlWe58wmE04WczbrJZNyhE4FyByPCu7cvmQ+n5DlEz54/wn/+jd+g5urGxmHUkO130NwzF97jc1mw+PtliQxfOPrX+Pu3TvsdhtGI8GRmq4ZOs5h4RFE5pJmRrqAEGJ6UyQptn1IqmKz3dFlDqMNWVGQpCnOOTabDav1hsXilGk+47U33sAYw/0HD8nyLPrXa25d3IJgqfY71rEjq6oKGrmxaaW4uLjF5e07XF1ds1ytWa3X2M6yWJzw6be+g0lR8PTpE0I44GXOWdq2ZTKZMp1MybKcqqq5vr5hv2uwypFl8j61lwLQuo6uban3FTUInqZFQZHFsXy6WHBzdc3V1RV3794hSQzXV1cRK8vZbNacnp4wHY9ZrW5ompqPPvqIssgwGkajAg3il1U1tE2HViZa9Ah2gxYMygewziOSYNkwqyCnYBIZ9sTvVUgnNuRkRLy7F/b3ihWtDyPpEPEXn/ctqbFx0dXjYsTvU8TNo6wvowHpYcPaewkSAjoQMzKl0IsPnpDYOyS8dxhf6QszA94MeujO/GDoKddnlqbSfIQgXar2kY8qy4jEyPVMJwsoKcQyegbke1CGumm4uroWrl/TQAgkRkngSizSva3XS4vZXlvaADkBE1n8ddtBt0e3CbpNxNhNg0rlxEMd4qekgHiUl65JpB0R3AOsd0KZQOEQv/LGNtShw2rZkmZotEnwRjopH2xcAXtUojBBNo+eINsMLbZC/f3F99gAEpyiEUxPE2hdS1ak6DRlNJlws7zhyXvfJCtyzhentO2Ozu7xriZRnqyU7iJVsLm55urqGW0zwWSKzjaoFDrXcLO8pqpaJuUZF4szxpmQL713mCB3tOAd89mMIk+5e+cO+92GPMvZV1v2+4qmDqRZge08VSUOAU3bYb3HZAlZkuO9Z7las9vvxTixLGm7jlQHFicFIQieEEJAuw4fHE3b4oKNPlSOJDUsVzeMJxNuXZzR2obl8pokUTy9esats1N0kmJMik5Sggssr5esN1sZ9zA0VcNus6Xa7hhPppwsTmmbhqdPnvCZd74DwgnWCq/s6vqK/U7caReLBU3T0rbdsKEdjXJxq00S9rsdobWkRSkcQKOpdzVZljGbzNBKiexKwX67ZR03l5/61DukSULbVLz91mtyvIHVMpcOO3qxLU5OxCDTW9arJXVt5SZSt2y2LVppiqxAmZx959A6YbfbEIAkk8g8f9TL6HAA2oMVvWCZpQNupaLJpZwCflBeoEDF0d/RL3uQaKQIr3jnZZyNGLRCC98whrMoZIsqLjXiINSzA3r7Ifxhy6oj20BGRy9jqHfSfBlZevWGnyH4eBNlcLohyPWpTTKwGUIsrpEARJJkwmOzIqFCa9BBlAPx/bVB0pp0mpFoNaSWyZib4I2EnuybjtM0R8Vt9d3LW1xdPUWl4pOmorv0tyxmnfY4LChFahKsCTjtaXxLZWvS1mBNgvGICR0agpGxsf9Q+j/q0KEGkAPpo5I/BKxzdK6jDY5Oebzun9RbDh3hYbHdVTrQAwouyIwv5EBN0EYscNQh9diE3ldJ/qRpyq7aU+Q5l7cvMYmRjdWupt7umc1SxqOMrjY8Wy/ZbixtPSNLEpqm4vr6iraZgQps9huS0pDmCZvtluVyg58lJCjm4zFJmnJ9fU2mNXfu3pFjmhhsmUuqz2jMvtoCgaraCfO67nBeNpco8XgzsftN04zdbo+09BIUYuPJ3XU2noAdaWrIBhsb6TDTNGUynnKzWrFcLdnvKy4uL3nttdc4PT3l6vpm+PevPHuXDz64x927d3n99Tf44IMPCMBoVGKM4fr6mvV6Q11VZGnG2cmC2WzGkyePefb0MevbF9y5I/hdnuesViu6riPPc8pSfsZ+v5cx27uoMhAOGyGQJTnBygWdKEOWppRFwWQ0ZjIZkySG1WpFURQYY3jttde4uLyFNuB8R9eKuF1pzXw2YTS6IITA1dVVXDpkbDYt3iuqqmG7vR5i7bzzJOMMFRzWyk3BWie0DCKvyrnIBIp+YCr6kqloc2WECHo8Fh5bPw1RcUfXnRv+q+/IeyA+Uo6UIij5dxs8JghfDZDtaRAJlaEvZP2YSCwzsYAqCP4gfR9siY6uuedfW/ymF/7tZdtWeaoWmoaKGSFRohXi8Cq+fUnEpCyEGL/oQHmHdZYkK4Q14SXlSxZiVrIL2gbKhFSrSJ79Np2ZiitVH8S7XxtNMOIIuW8qUhOLWRBgWoyBUxJ0NGeT7/NI0VF9q87Bn8x7AR87J1yvzskGMsRi9hxJNj5etpbupbw9MbT/I86XUrF7rzHCAUitm2ZwYz07O8N7T5ZmLJc3pMmIIjcYI57p+22FCgGbZuwjAbatG6y3VHVFtdpzs77hZnmN94FnoxWf+fTnmEwmzGYzPvfZz7DZrFmtV3z44T3m8ylpmrJarTFGs9vuxO/LCRu+7iyohMQkaC1e+J3tqJsQrZV77WjBvqqod6IfHJJ3giXEEyRJDNqklOVkcMh11vPhhx/RdZbbd17hyZNnXF3Ja7++WfKbv/lbnJ8teOONN0iSlPfe+2bEv/K4kdbsm4q6qimKYght6ROANquKBw8eoJSkZ92+fZsvfvFL3Nysubg4i2TjdLi4++QmiBgrglM1bTMkYY3H4habZRIcXJZFFJkrHj16RFVVPHzwgGo/YzYZ07Yt3nmKNOfs9JzFYkFVVzjrefrsKdvtDQrFyXxB27U8fPCYk5MTJuMZbdPSdg7fNTSdHwqaj+C8J0SMSD6LEGcGQZe80EyMjvXnANAfA/jPncMv+e/j/6mhI+A56sNAheBQLPtrQnNQFPTX9BEq9rHf63me3/ay1/qiwuClxey5hzq8Zn38sz2YZHg1Q0bI0Ta1bVuSMsN7x36/JUlui7FnsJTjQrpWZXAalPo2ndmwKUFaaKUNOk1xnaVuW/KkxRtpp72Tvx2BVEuUVV/MpLxIJRbtmhAQQ5RROOeGYmaDi5X6cAd48a7Wu1+8+GG8yLbuU296/pnysr1JTIKOHcH54pQ8z8Xjqix5/fXX0Si26yuqek9wGm/FJkb5EIXbQs6dTaYEDbPRjLfO3mK5WfG7v///UKQFb771NvXO0nUtbduyWq344hd/n7Zt+MxnP835+TlNUw3uESHIEkIpTVmOIkHRCpicSMgKTkb4um1oW0sSU5lKU9J2AuIbk6BVItvjICaPkk9gSFJJqW5b0S8qbSiLEaenIxaLU959910+/PAjxpMpeZFxcjLnjTfeIM9z3n33qzx79ozdbsd+X3F2dsbZ6TmbzZa2bZnNZiilh63laDSi2m2p65qnT59yEvWRWgfKMhtkUPP5nLOzM2azGQ8fPhx0lrLl8zRNi9Y6dmwjJtMxOrqcZFk6LFLu3bvH1dUV0+mU1994HaMltOTJo0dMxiPGY9G8il2RnENnp2fUdc14PKYsS9q2Zb1aE0JguVzSNi2zyZzGOqq6o226CFvIeaejdGc474JAGopoqqme78iOMatjAfnx4/jfIlni2xaLb/m1INORGQrJC19Xh2vLh+f/fXgd4Vv/7hd5ZS8n9objb3h+uoqFuutaIRA7B94KTIRCGUOqPPuupZzlMZ/TMipT5tMxm+0KXQp0gw44J95y/ePjIcBKHcS/cbVMkkQbFk/jOtAysnlvCU5Ywr1GSusob1DSwurYjosJXBjE3dZZOhfF3kS6iDqwil9GyvtWAtzjv62z9PQTUQrIgU7TlCLNsFaoGD2w3TSNOClkOeNRimFLtVvibMNkVFKmGft9TVuL7q4sS1rboaJXv8ZgO0dTyyIjjQLx7XZNkiRcXl6w2+0iLysnz4W/571nt9uRxtc0nczovEOpGm3SQZ+X5jmT6QTnHE3bsV5vBFQ2mixLaW0SlQuHk7IoSqaTKSEEtrsdy+V6OKYmMXzqU59hfjKn7Sxt4yBoNustd+9+itdeez0Wi8BiMacoPsO9e/d48uQJ8/mcWxfnJGnCfrdnNBrRdR3GKCaTMePxiHq/G7qvqqq4urrCGMPdu3d5+PAhdV1TliXT6XR4zna7ZbPZCGXAMRhgnixmQzpQv7HbbreSfG4t9+7dY7E44ez8lLIsePrkMXW1Z1QIpuYJrLebyE/TLJdLzs/OmM3nTKdTfPAUo5JPfscnefL0KR/e+5CmbinLKft9w65qaVo5Xwg9FcMInhuB9BBcdEUVwXmI+PFxITtszxn4hi97hKP/81yB6xdCLxSQl3ZQHOiocRd5NMDGn92XzIgG9b/PhxCter7949t2ZPFLqh+Ujp8/fN8LRTOIcaRGkShFkRrmZc5sOsOHllGZMp2M2G2XoD3jsVi4d10XRfTy+PiYycG7q9dC9h+G1TISmjRhkC4hViA66Ih36OFFaq3RUbxlw8HGxwU36D970YWOeMSw8QmHbUpPp5CD9HzL/uKB7dtyxSEGK0kS8jxnVJTiIFrX7HY7YewrSeRJtOH8/Ix627Fe1lT7rZA5E5FPKBWYjsY8vP8AZcT65dmzZ5Tjku9461O88XpHXuRMZ3M+eP8eTdOwWJwMXUfXWTabDYvFnDRNoxttjTGKpmmYTqdkBEzM0wSFs57caIqiQCnJhswy6bKs84wnJcqII2rXdVHaU5AmqTiidpb1Zsd2u0WhGY3GKC2tfds6Hj9+zH6/j5mKFq0M1b5mn4tXmHOOhw8fc+vWLV599VVOTk5o244QPFUtUqMk+ordvn0b7xyP7n/EZrMmBM/p6SlVVbHb7ei6jv1+T1EUw3tPkoTpdMp0OpXR0MtCIM+zIRdCJGcSjCzkYsPJyRl5njOZjJmfzLG248GD+2RZyvn5GacnJzx58pibm5thDPbeDzKn2WxGOfp/WXvPLkmS7EzvMdceWmREqpJdM8BMAxgOljxc8Cv/A//xLs/hLg+Wu1igMdOiRFZWytAert2MH8zcwzMqqwfLQ+9TnRmR4crC7PoV733fENAhDeY8YaeD4/hEUcxml5BkJaVOVWMbbi4LYfBhB7rsSpkEh6pDOvlkEdeGrW7zec4IKaUa4yMMtOh4nh+vi/r1oYuAgwX5lp0xBQNl0j/1+drXUUvLPdlX1WGhfHIPX20G7HzYT2o8rTB5PKHhR5pNQ7c6IjVzB5aFqCQdX9BzbF6eziirlPmgT9cTBI72xrIipaoK8twm+zUNAFVWjaq5lFLTg5gmVGFZ2phJTecsNSqCSskGQVyVsol9rZalr2TVSJlV5nGhpac0f3jTHya+zpl963V7kJuxrF1bdZggtTF2HIdBv892pfnDiqLQ1aSqYrPZ4Ngl0XpJGu+pqgJZ2VhKl5ndQBMG9np9xpMpWVHwuHok7HaYzqdUsmK10YSJo+mI2eyELMu4vv7Carsme8x49913ZEWJ7XlUSuEFPnmhMXlFpfnYO90OliUMSr5ESBsM2UpR5vQHPXa7iGIf4/sOwu5QbUukFIRhD7CIdjH7/dZgnGx8L9CYNak1Hj5//oIQgmi/Z7XaUBQlYSckjhMmkwndjsPH97/w+PhIluc4lsOg3+VkMubh4VE/9ZUk2m3wfV+3pnku+71mQkgSjRPK85xut4uUkthUX5VSRFHE7e0t0+kUz/M4OTlpuiO26w2qUpoPrSzxfSOYLCA1/G5SST58/IXlcsV4PKJn8mTDoabIXq/XLBYrlFK8euXS6/VJkoTRaEKSxHheQBBoVPn19TW3t3fc3NwR+AFpmhHtUqIkQ+LgBF1c16GQFVIKTTUlVQtlryeznsnamNmWpTtljAd2DG5ta2nWP58YCKEjmiYP1jJk7QJCW0BY76YV1ZHya4+sXt/oa2/Y1AwkxJwIOKSajo1tfa5jnNvTtXfgY5PKaL0KoREUpqfcsiwNnDfXpv+kWWkFCqessPKMs0GfSgWcdHtcnIy4mI7Ii4RPnz8adTTtaNTbV8asqqpmFGqkcj3ACM3m4EjZsFJiaUCmZRL8lUmYgin3SqupAFVVrQLd3P4T8ZG6OfdbT6DntueMWX29Gs9zUItWSjUhUL3otEfjsdpFfPjwgKjW2FSEQYBtu1S5BlRayqIsC25vH0gzLfb7+LiguLvl5v5Wqw7Fe/qjAXf3d40qUJIm+IHPar3i09UVk8mE2XxO4eX0B30eHx8BwT7W0IXLFxc4jkOSJOR5amAntmH82NLr9SjLgn28N3J1djMG3W6H7WZPmuo8Wq/XQ1i2YZYo6fV6TCcnLJZLVus1i9WqmbhpoturVqsVnjsiCLTnM/EmbLc7/vynP/Fwf08QhPS6XWQlWSwWGgeWJGw2a9brDWVZMBqNGAwGeoI5DicnJ5ydnbFarfjxxx91qC+1wOxsNqPX62nPudPhy+cvbNfGSHp6316vh1KK+/s7NtsNeZESRRGdTshsNuPy8pw8z7XxzTJOT2a88QPSNGUymSKl4u7unlevXiGEYDgcEsdxQxpg2/pB5boeHz9+Zh/lJHmF6wf0gq4W5ag0rblWLDeCMGYOw8HrsYWu4ImWoTnO+T5nDNoPayEEx5ImbQ+sbQzbx6+LDrUhq4/QrBBxWGeK59dTff5vpXTa5//WfrVnpz3YCqXQ6lBC93Yro+FBrWRlCU2AattaSrHMsJKUvq07LryyYOT1GPoOaeYhJ1M63S6j8biZZ/CMMXNszf5ZlRWyruTIOhxUYPrkyhptb1gtKqV97bqIbe5eA11NsG7ZFnlR6kE3vZC1b10nPbUakb6sWm6+dtHbA3bswdVlb8eQ3WGQ28o8IeskdW6OXx9XSclmsyHLMqJoi0vMaOjRCbqaNrvIcFwbZUQ6SiMIMp9OGJ1M2CcR692W3X6rGVj7PZSQbHcbsjxjPB3heZ5u9kbQMYnn0IQ+6mFBVSmWywVplmA5FufnZ03/YhRrNg3bJJ2llJyentI3ikJ1TkeHbhnCspjNZtzd3VOWFePxAN/3mc81xXSvP6CsKgOI1UbUdVzQe3TIAAAgAElEQVRNSQ0MB31OJmOur6+xjR7kZrWkzHPWyxWvXw8pRUUc7ynyDN8P6HV72sAZaut6PMuyZDrVFcyaqWQwGLDdblkuVw31keu6zGYzI+mmOD2dNyBbgO12y2azptvt8uLykiSNm26P9XpJFGkjL4RO0K/XGzqdLp1en0pCtN9hOx7RPmEXxXy8uubs7JTHxYrtbkucZCxX2oBKhIa7VBW+bWsRZstht09037DjYQm35q9pWu4EmtRQoQVCaBmA2kM7zvu22/MaB6BlqI4f6scFhDriaBsXKSW2afSXUhpqbtusM2PCjDdWmjVTn7euelZV1eT72nCSes21r7cdSjdrUVSNBOKTTWrtANd1tKpepbEQdX7PQfd79sMOs36fbLVmMOiRLlbcbzZg6VbFYr0jzXKivEDu4+bwzxQADn1i39qU0tAK27Y1LKwpI9egWDPYsuVeGy+srnMKM6jHJeHyqALUdm9rXYD2E+x4a9x1UfecHb6AOtxxhNUYuCRJDOxAC/F6gYWwbErT97ePUpA6XKsqHQbajs0+icHW/GPng3P8pceHq4/89MtPAPQHfZPr0iHjYDDg/PyC9+/fa4/KGGjP9+kPBiRpyvRkyj6OuLu7a8LM8XiM63q4vt9U+VAY5SKNd6pDaNA6lbo3cqoT9GXVTMQoipAoXZ1MYpSUeI4WWplMJvQHfYoi15+rKibjMavVqmmyL8uSjx8/NUZmOBwS+AFZmrJ4fKSqKsbjMZ7nGciFoN/vc3Jyws8//8x+v+fdu3esVivev//Aly9fmM1mzGazJi+6WCyYjEacn5+zWDxye3urGRWM5+H5LkWpSSKzTHPdz+dzut0OHz9+IEtz3r75jjTLiBYRWZZxeXmJ5/v89PPPDW9cFEUMBgMeHx9xHIfxZMLV1Wccz6M/dPFCSafbo9fvooRDIRVWklNISPMSIVxqcozD41vQbnxu55lqj6b9MK7/1mbOaBvBdkTyxGtrFciePtg1iaftmI6FNrRDGIodXct4WjE1Y4s5TlmWjUNx7FV+K73TNtQaxlIL/baS/oYEX6DpwbSdqVCloLJLjZOTBaIsqHYRj1efiUIPiaQoU5SoELbWdBWObntzXZf//f/QZ/i6AGBbGpmMzovUCOJaSg5lEoiVHirjt7YqIvXXKpqfutm0xt7UX4SmFKn7xhSAklSmetU2ZnU+Bb7mRz/eDhWhJ2azOY6GY3RAQJZl7ONYP9XNblJClpfkecU22hPtYmwcgiDEcTz6/Q5+EFDKkmi/R24VQccnrwq80MMNXdbbNRJJ0NGJ+zhOiJI9n64/MRgPUQK8MGQymbDbbsmyjI4R6xBIPM9hvV5Tlgo/CEjSlN12z2azodfrYdsO8T4xjxCLqlL4vsd43GEf3QCKbjcgDEPuHx4oy5Lz83OTi0upZEmRa8EUzS8mKKuCJNGh7v/z8Se6nZDZbI4lFN0w4CaNSdOMbqfP5fk5Jyczon3E4nHJ4vGBONqBEOx2EeuN9nLqxP/333/PaDRisVhQV5Y9z9VtMMB+v3/ihQ+HOnTQdE31/JBUsiSOYzabDf1+33h9NmVZsNms2W63VJVkuV6xWCwJw5Df/fXvSLKU//M//keUUpzMZgRBwD7e0xv0CcIQUNzd32M7Drv1FiEclBIaS5hEYHtGfTyjKHS/sb4uR6uEK2MohBZ+0a08Xxeo2vP2GGbUrvgdP8iPjUa9tXt6j4sErZVQN/81oXA7maYLAGbumz83AkEcjFn79bFh+2oN1m2HwmSnWn+rjy8wNqSstCCxZVFWEkvm2IEFRUmy3ZLtIa9yqqowVHJakKUS6ivH7xnQbCteri022ijU7dtWY+JVA32oLb7VJuWGxgNxavXs6pBAbGL6VlAvRLsS8nQi1O7vt7a6zCulNJWlwzDWlVRMeCulVqqulaBc26EsJZYlySuBYzsIy7TzSFOxUhrqsIv3OJ6H5VpkacLDl3scz2U0GaGEIiszBsMB05MplrAbhlitk5nieT7braaW7vV6JGnK3e09JydTknjPaDhgu90zn83ZxXt836coK168eMlyuSLLElxXi99meQ5kgKDT6XF+fs5qveb07JTtZkcUbel0e1x9/ohSOgRaLhdkeYptW3Q6oYaLCDSt+M01j3fX/MO//1/5wx/+jjzP+Q//4T9QldpTm4ynvHjxwqDot5RlQZKk5FnWNIgXRUEQBPi+z9XVFa7r8vLlS0ajET/99BNCCC4uLgDNAluHmuv1mrP5HNu2+fDhPf1+nyAMiHZb4iTGti1OTqZYljD72Liur8PxKNEFlE7IcDgEdFrg4VEb89/89rfYts0//uM/Mp/PGY/HKKWIk5jFYoHneYhMz5dKSRzPx3F0X4ttKXzPJnWFBjXrngywpGZcNlagMQpKPVnA7fnZTvYfe2nPzfvn3j/Orx3ep3mIq9Y+bft1AJgf8nxPztF6fZwra/9+bGjb19FUQnWTKofVrr3HWkC5ZuKoqoqqEFSWph4vbJucSrcqCokmHZM4joUlHNIkMdHW02v4VWOmk4mquRSUwrFtbKG73i2FTuhpC4dEa+TVIvSA0Ts2mCxh4diyyZFRHYB0h6qQvulDheYpXud4MI8LBKr+EtTTe5Fm0Gp1p7IoqMqywZrhKrIix3YUSujO/N7Qw3I88kRT6RRpwj6OieI9buBxdn5KfzzA7XlUqsQNHB4fHuj1uvz2t78lDENub2+xLHj58iW/+93vWK02PD4+4nm+FgpJMjZrXXncbHagNIV0vE/YBRFfbm8Jgg5pnjEajSmKijwvdCUTG8vSbU5VKTW5YJYyHPbJspQ0i5mfziiKkvV6ZUSNLWxbMB4P8Twfz9e9bxo7VTGfXfDu9QvW6xXv37+n3+8hpeRv/ub3hGHIfh+z3qzI0kyLowjIspTFIqLb7fHq9WscVxuofr+P4ziNsag53WuPq8aWDYdDgiAwepgpX758Yb+PODmZkGUJm+2GMAzI84w4jgnDgPv7O5JkT3+gzyGrik4YopTg06fPeK7Hq1dvNPPCxyuUQjNrnF5gOzbL5ZrRcMIf//jvuL+7409/+hP/8O//N25vb/nvP/wrnh/gG4Fq1/dwpaKsSvKyIC0lgpK6yblhKDb9i1rw+elCb/9+HG4+MQyiZRX/wtauZDbvmf8r6vy1VjvSVNOtz4nD5+v3LWP16uutE/lf2YQj43a8SVG1nJ9WIaGpLKqmIFCZ4E7KEolWfaqkIJXSNJwrqqpEqRKngCw31VeltIfcOv6zxqw9WI3BMElN3Tyu/VMbzYShO700+liYXv/aamo3U+jKJ0Z+vmXImqqHVFAz037Dlf5WFej4emtj1t6nzgX0ez2dQ6q50pVqcGaO4xIlG00Tjo3v+gRhF4FNlSRUVcloMqYz6JLmKVES4fV95uczHM82BQIthluWJQ8PD9zdGcGN/hDP89jv9w1gd7lc4rkBd3f3PD4+MhwOeXFxQVlKhNDVUqUMkaPtmJyZTVUq9mUMsVbB8b2QONEEhVmeUZau5vbfbphMTzg7G5GmKbPZjOF4xHa7YxvtqSqt/JSmMdPZjF43JM1SHEvnE6+vPzffged5PD4+kucFg8HQJO8zg9cLGyzcZrMl2uuQOE1T3r59i21rpgXXdRshF91VEOP7PqBDzTAMdVK60Ji9IAwQljCdBD0eHx9YLhecn58xnU61qMx2S1kVDIdD5qen7LZ77u8XnJ9d4Ps+Z4bj7erqiru7Ozyjy9nrambgTx8/Yds233//N1oNKorQKPWMSlU4ZYlTFiglyPMEJUvN/mApTcRY54JsTX5gKVDV097i9hxs/zw2GuZNExE9NSDHx2jP9+P1W3tocDAgTTZIKdTxOhI88dCEdciRHRNBHhcsnvPOGoA2dutvGK/R3HvLMwWp2SPrv9oW0gbl2pq0UQqkFAfCiao0OXHx5KHxNTTDYD1qnFZj/ZWOcYUBxQopNchMgCOEIa3TuPiqFcw+50lhKEOU5lhBSOOECoFli6bptu2KHw/et3IJzxkzpZSuuLbUboSh767zaJ7j0u32WN/ek2YmPxTocn1VSKpKs4wGYci0PyWXJevtirRIqFTJoNvDDzzCIODL5xt++OFfKIocy9JDvFyu2aw3LBYr45G4VKXUBjSKCIJQo8stB8/1OZnOWK/XeL5FkibYjkee6UWlvQBBUehsSKfrk2UF+2yP49qcns6J4z224XuKkz2dro8feDofJXSnRCUV+/0Oz3N58/oV0+kESxYs728oioLPnz9jWRZ//OPfk6YpV1dXBvMlGAwHpGnG/f09l5eXTKcnZFnO9Zcb7QE7DlEU4bpuE1JKKbUXjO6kOD8/RynVFBn6/T6h77NeLqlkyf39vcH29ShNIaOWhqu9O4BKlnS7XXzfY11tcVyHTq/Lp6sr4wkJ/CDADwOCIGA8GjM5mZIVOTe3t5zMTqiUJE4SdtGObq9DmuqcYlEUWKVWFJKV0qpfUlAJRc0+IbE1X59t4Zg8siW+9rzahahfCyVriup2dHK8DurxbG96PEQDRK/Xj+YsVhyfUtU5s/p1/XvrXM85Es+Fn0+OSxs3Zz11NJUuHkoBjjrk0HQ3goUUim0W40nwlINra1YeRaVZaQ8n0Ywkre1rz6yVzD9OTOrX+nepNJuGlmrXlROpdFyrFFRCIGWltQSNpqZlgyz1E01fj/kyLBDYCFuz0trPfFFwSEZ+NXjtAW2Fm609daJcVUT7HVQlnSAE4bKPI6y1wLEtxtMhnheQ5RHbTUwc5diO5qcq8oKizHhYPDA5GXNyNmMym+CFLr5hPXUcB+kok+D28P0AKSVfvnxh8bikE3Ya4KdtO6RZSmgJk5dxcBybu7tbgkCHN8PRkNVmjVKKwrRQxXGMEILxeEqW5WRFgWVpuh7PC4iTiCRJ8X3dMlSVJTm6vzTeax1Pz7EZj4b0BwM6YcB+v2cyHmpNRSk5v7hktVozGIx0s7fxWieTCcPhiCjas16vcBzbgGIVt7d3ZFluVNz1fQ8GA6bTqVah8n1c1+X+/p7b21sGgwGuq2X9qkozZygp2e8jlFIM+gPWmxV5nnNyMqUsc87Pz02OTnctTCYT+v0eu2jX9I8GYYBUgvV6RRTteXx84M2bt/z+97/jH//xvzRqWZUsG3EWx9XSZZPJlE5PpwaqQqcgSlkgihJhOViOi2c7RpFJMyzrrLleNBp2aWldzfp5agxXZTCbdfWwNhh1BbEOnUQTgpqkvdCGUQkQSr8WZi5rPn2teYkFFrbhXjvCkbUtVnut8/z7SIWyvs7PHa+3J15ivfaE2b8egLrCoA55xKpSVLbmVJMKQ70NdZelHj8FVUkpW7k2WYJS+I5bL+sn21fGzLVspOmfVNXTtgzLsQ2NB/qJVJUox+RbpKCQ8kC2JgWWYWi1DW22VFVDA6Kl29HMAzVpHJrapM1R9NWgtZ5u8LRkbRnkNTYIieH1qvRTSTdragpvxyKTmW6hwcfOLHapT092KPKCJC6wLUuLYuxyfFeLmZRSstqu8XsB6ecrBoMeL16/oCxKNksNGB0OJqRpznh8QhLH7PcxeVYwHI64u7vDcVym04mWtDPGfzjs0+32GI/G7HYbBoMONzd3XF9fM5pMGI2G7JMYIWyE6ADC0OqUOJ5HlubYlovAIYkyqhwqIcmSHBB895sXlGXBp08fuLv5rBu9JyOm0wmBa/FP//Tf+ef/+l90Pi7NGQ9GxEnCy5eX/PVf/xVZlpKUKb2ezsXt9ztD16MX3mKxQDvvirHncX52ysePHzWLrnbBkWVBZ9BnMhpx7TogK64+fqTb6zIZj8izjO16j+u4vPrNC0bDQdOHmaQJX66/UBYFvV6H3XZLMJ8RBj7b7ZYvn6/J8pQ8LziZnXJ2doGS4LkOF+enpGnKTz/+iTxPKIqMwPe4uvpIWZR4vkccR/zN3/wNJydjfv7pJ2xgNhnh2TZXV9d4XshkOsT1AlbbPf2wxz7NKHMNJ7EcgSorCpVRCQsL3b4nDD4TqY0OyvCOGY9IGsNWY7xMw6cGjqKg8fw0dYMmGa3wvQBhRFQsai8Qnewx60IHbaKBKOk1ZNbKoRpgDFdrsQljPKQ0dEYHfJk6dhTaeTcdeevX8rAeNUWYjhAMKRCWbWkhGFlR2S7KF5QKSgW2KrCVwlEVSmkWaZQRjpFGp6OqubqfGtmvcWY1ercV1zfxuqWZWhW6aRxZokqdL9CN0DlWE1cr3VqBZo2sGZsUUvMyWUobMstwjRtvzTqy+scVnBrEVxuz4+LAwbjps9UDLWo/0NL6n76r5bJs2yHJE24fvpDmMbKssIVLnCRkaaK9DAeiRPOIdXs9PNdDoq8jjvQ+aZrw4cMH/u5/+nve//KBy8sXrFY6pLy4uNT0OEYUeD6fc3Nza4ChmXnf5uExI452WKICLObzE0oFy9UjfqCrdF++3FIWJXGSUVWSi/GEXm9AUVSs1xscx6fIK3bbiGgXMxoNGHT7hB2fTugR+D67aEe827JZLgDBsN/h/u6B9XpHrztmu02wLZtol5LEGVme4vk+2fKRn3/+CSkrgiBkOp2RpQXr9dZgz0yPYxiaZLxis9kAsFwuefnyJev1itubGw0cDkMuLy/oGaDtYDDA9zwC3+Xx8ZHpdEoQ+Dz8eE+300EZKvDJZEK8j/nll1/I85ww9JnNpqzXG6qyYL1aMhgMkbIkz1M6ndAQUypWqyVnZ3Nev35FFEUUhRaQvr+701XT/R4hK66vv+A4PufzOWUJ2+WG2TzkfHbKahdjKwtHgLAsKiytFlZJ/ZA2C1CqQ3FLHS1+Wadz6jlqZqtSEtu0ESql2WgRlubts0Aoq+Hwk4apoxGDMgauyUsJoYkbRc1cfMjRtdv9auNkio+HvBdPc86NQWtsyKHbADQVoQXYtoNtGThWMxbGyqFwbMuMjaHdFlazpmVlIev7Nh3zSunQXimtXWBbtdl6imz4ypi1DdjzwDzV3HBTHNA7IYyxOxgbC2lJ3YRej9qT0ePJ4DRhbMuIHf/+awnI+trN5dR7H00kHVaEHQ1oVZVkt4tZxxGb9Zp+d8DpfE4Z74nimKATklcVUbRjPBlqReUg4Lt3b+h1O/z888/887/8QLfboSgl//k//d98+nTFy5evGAwGDbYqNu1KcRwzGAwMw2qoOwaUpNvtslgscF2Px0fdFXB58YJ+GGovGUjThM16TceoosdxghB136umPprNZoCWpOuEHWazueFPT7CEVih/fHggTTO22y2np6eMRxNWiw1ZmlHka6oCpidTptNpvSwIAt+QPDpUlTC6lCl5VjbzoyhKXFeLXdSq7ZPJBMfRIjl3d3d8+fKlAa5Op1Pm87mGxrguJycnrFdL1hs9To7raFqhLKMqC3p2D9fz2RvixVJqz2owGKBQ7KJrirLC94MG41Zj2nQeSjQhb5omLBaPBsx8zna7Zb1eM+j3eXF+hu/7bDd7dtu9NrBhj04nZJ9myJaOhLBrz0HrFeg8lNXMtafVejOH65xzazkecr2akbguuNVz+bgQ1jY05mRm/4Mxe7I+W8vuyZppOyvo/LXm1tPY0uPuguNNtNeaOWz7Wtuf/Criav2Trd8POqLSGF39npL1bGwf4bA9a8zqGz6uurS9n3bZtkagCyFI4/irY7QT+e0BeS6R+NyA/aXtODH6/A2ZL6u+n9rLa4oC2tuM9jGTUhIEIUmQkhclRaWpRjw/YLVa8/n6C8ISvPvNd7heQBQnKCV4+eol7395T6/X45dffmmSuVVVMZ/P6ff7/Mu//EvDEVYzpdYezHK55Lfv3iGE4OMHzdX1/d/+LX4QcHNzT5Zl+L7PaDQywN6CKNqTpJkpDsjGg5GywvP1sT99+kSaxXS7Wkfy/PycNM0M6t5q+L4GgyFZVmEJGI0GvHz5krLKNZ6s8hkMhsxmMx4eHri6uuLh4R7QwrKaVVY1PZaxmQfT6bRZEJ8/f+bm5pYk0bi4LNMGtd6KoiBOUhwzZlefP/Ob3/yGv/qrv+Lu7k7Lx7ke0+kJjmPz8PigDVsUadLGThe/UmRZQZbpntvpdGoa/q+xbbsxnuv1GtALL8sypJRac3MwQCmF7weEoSJLCxC6P9bzXEoFSVEiqkpHGXWMJnXOGKUQtvtkfj+ZhkcP5sbgqIOPU6+v47XTFDy+EZ18Y9o/+Vlfw3PLRSmtrlFXPA/h6dNrF9bz56rPVFW6BFi3INYtV0I8Pd5xcaE2ZJrm3tCAUysT1KEzUJV8VYLlG8asnZdql4/h0LdVf6YyDeTtUu5zX+JxefnZJOI3qiPPHee5sjVotlJdbTV5PnGI6QUtw4uZTIa8sdMJcR2fotDI/8D38YKQaBdphsvQZxfFvH7zHf1BjyxLeFyujAFMWC3XnJ1f0On0ePfuN1iWxXK5ZD6f8/DwQJbpfNnvf/+9Qb4LM3YS39eU0fP5Gcvlmvlszmw2J8syPn/+TJwkRPuUbrfXaEbe3t7jej5C6MmSZluyPCHLM06mUzabNbOTGe8/aKPa6XR4+fIF+31Er9djMjlBmsfhcrUmDLv4fofdNiZNCyaTsWatCLRxQmiR3aoq2e/3nJ6est1GpGlOlmYIIej3Bw1po+u63N7e8vbtW+bzOVJKXNdlOBw0cA4pJdfX10wmE4Ig4PHxkXgf0+10GA5GnJ2e0wm7+l5mc8bjEVdXup1qNjsh8HXo3e10sW3bKGJpoeEwDHEc3UlR94UKIRpwLtC0fAGmNUrP4Y8frxBCh7OdTpfFYs16G9HtDbRCfa6JSmWZG31Ww/xSVVSVxa8Bv4/fe/paGS/MbgzaseESQjSN+seGrlkHbYPB1wHRN6uQR85Le821je6vmbLaA1McugVqh6fOxdUeWu1YPLluBa5l4QmFKw5VXVtoun0QjfjKsWf2FZz+uATbphlRSjXgx5o0r+6ZrCW+2srHz23f+nLbnlmblvd44Gsr386Vtbf6y29XcywTv9f76tBDV9fqyqPjug27aZzErDdbkiRlnyYmkWrzuFgSdrqcnV8Q7RP++Z9/IC9K3r37Lafnlyhh8de/+x1h2GG73fHDD/8KCE5OZqZypifq3d0di8VSKxPFSXMtl5eXDPoDbNvmxYsXvH79mv1+z3arDZB+eEhTZdN9m3XDej3++2hPnuf4vofGSxV0Oh3evvmOd+9+g207/PnPP+rwFwvfD3n75jsuL18QhqEJGbTW5U8//5kkiXn9+jWnp6emi2GL7/ucnJxwcjJhNOoThL4JCUNOT+fGWE4QQvDlyxfyPG+81NFoxOnpnNevX2vdUGNklFLc398jhKDb7SEl+IaSZ7PZ4XsB69WG+/sHQ+qYstlsSZMcKbWxnkymDWNHzcTx6dMn9vu96VjYNfMtTVNubm6IoogXL17Q72sGk+12x3A04ru3WnqvLAp83+XViws63ZD1ZoXtaB3ImpNLyQpkZRhna5S7enYOP/fwfs6w1J0zNRTl2dzVN9btE+N2/JOWpsYz6/I5A/nVv2/9Z7y5en22r6e2IWWrNfH4ms0FNHodGhYucLGwBbjCkDc6Fr4jCGz9s96epc2uLWq727/efN/HsqyGnvo5rEt74Js2otYFf+vLfe51ezC+lcOr/7WvpR5cPSksDfZt3VPTnG0Z5XLqhKPAsh2SNCVLUxBgOS7Ctgm7Xa6+fOHkbI7rBzwu12TFj4aS22K/T3DPPaqywrJskiTlp59+ZjAYGLhEYLxdDV+xbcdw5zu4boGUiiDs8PDwyN2dZnatSs3IICvJw8MDnVBj36aTCd1eyHqzaxD2vu+RxikPD/eMx0MWj4+UrS6HTx8/MRyOiOOY5WJNkujezNev33B2do5SMJ+fkSYpWZby6dNH0jTmD3/4WzzPxbYdHMel1+saUZOtQfD3KEuJ57vM5zPiODXklGN2ux1xHNPtdvn555+5urrGdTWmbNCEdIc+zjiJSbMMx3ZZb3bEyZ7lckEU7ynynOnJnLdvv2MfR+RFQVEW5EWBsGwuXrzgy/UX9tG+mQ91U38btKtJMbXuZm1k67lh2za9Xp+8KEniHVme0u8NGAxHFFKyWFamk0Th+w5UkrzSOTDHthGWo1kzxFd+wlcL+Jm/gpKG2aUmD6ApfD2XOz5+oKujNdR4ZEq1rkkc8nFH+/yakXz2+pX2iBpf6Zl00pOPq9pPPBzv6TGV6S6qtQxMhFXvKxSu7T2BqtTb1xRAhq75W95VPahN9cH8az5/ZMjaF/zrX+Tz7u9XLu43nlCHL7dOkB4MWS0ibFkWea75+UvPMfxqetLYIgBlsdruGI904juKIrrdENuxcVyX16/fsVw+sl5viJMUpQT7fUqn0yF0PZbLNf/1v/4Tr15e4nkef/zjH5v7Pj09ZbFYMB6Pmc/nDcVPURjqHqHbeyrDyLrZbLAsi+nJnPVmgxKC7777jqqULBZLXM9jsVgQ7RPSNOH161fYts3iYYHv+c3ilVIShh3d0rTRZIZlKdluFghhIUTGzc0dv//99wz6I05OJqw3OjRbrVeUZcHNzQ0nJ1PefvdGJ+aNIvbDwwMnJyeGH66iLHPWmxUC22hk6m6LIAi0jFwck+cpZ2evefPmTePt3dzccHNzg1KKOE55XCzpdrpEUYRlCbrdPmmqaZBmszlplpPnJZ2OTsrP5jP9kM1y1usNge/z4sWLxsg/PDywXq+ZTqc8Pj6y2+1QSvHixQvyPG88t5q9Y7PZMuj39cPEkJPqSip4ns1+s8O2bcLAwSoVVaJVslzbxXY94iz7i57Tc5swNiZNU3zfw7btxitr71Mb3jb9dtsYPXmoCy1UrNdP/X7L0B45Bqhatk48Oa55cbj+dlHTHFHbFYOpM9fQzpvVYtxKfZvgUZ+nLgKYgoZhq0appuJan6tly57BmbkuaZo2g1aHlrZtNwyi7cRkHZlijgsAACAASURBVGoeN4Efe2g1xctxo/jxl/wcO3q9X9viH3fuH1xb2QxIfV7QX44QGrnueV5zT1ppSWIJhS10CLqLtggBF5fn+L4HQFUVfPz4kfl8xu3NLXEc0+vqRujhUHsYjiVYLR64ubml39eh1v39Pfv9nul0Sp7nvH//nm63SxAETZK8ztusVitCP9SlbdtluVwRxZo+aDga6vuwNJGgHwS8//CBV2/emHtyUUrT8iip2O8LHMdmNBpxcXHJfH7C9fVnNps1AovRSOtXfrm+JcsqwqBn4CGPjMdDM24Sx7F59+4d3W6H5WIFSj/wwjDk5cuXeJ5LmmY4jo3vu+R5hm15VFWF53m8ePGCzWbTgGOn0yl/+MPf8fbtW2O89BgEQUCSJGj9V5s0L5AIRqMJnW6oab1twXA8NgIvOb7jkJcVHz5e8fbtG+Ik5fT0lIvzczxPayt0Oh2yTMNfdCO8nhe2rQG/juM0xqzb7bLd7sjTnJ9/fs9oNAQUZV7Q6/U1wYCrJXy63Z4WyN7FCCRCSU3vLWzjBT1dB+3Qqr0u6u2QlNd6GkqpJvo5Nmbfel136tTr4fC5AzRD59yeqivVl6E/gzFoOlwW1Iroh7VrG0ZbW9ga0KsUldT8h6VU2FbdEfTU6eHofmuG26dQMD2+ltBdFcq0RVpCII2HekwFVm/PembtpF07D1afuJ18bycnlYl324N8/KU997q9/Vu8t1/bryxN2dw4qJUZKNt4apPJxLCKuiiV6cSt1KZfIqmqgsfHBfP5KaPRgCja0+t1CIIhP//8k8m3JIzHI169ekWapny5vkGqit+8fUO/3weUwTDpilqv18N1Xa6uruj3+43IhzY0F02S+uLigngXExExm2lv4/5xQRh2GI9G/PCv/4qsFPf3D4wnY/JcwwRev9YMqg8Pj6bJWfDixQs6HZ8ff/qRzWZNp6PFcDULrW0wPYqylPie7rO0rBDbsplOJywWj7x69ZIoivjTn/5EGIZ0u10tK+f5vHz5UlcUHx7o9buUZclquUJWFtPpwFAmOQ2b683NDavVivF4zHg8BjT2bL1es1wu2Wx08j4IOygFWZ7T6XYZDIeMx2OiaIdSWhz56uoT3V6X87MzpNL9pbsoZrVcUpUVjuOy2+0YjUbM5/NmXmulKItut2vGImW71aSag8GA+XyOknC/f8APfM7Oz9lHO4o8w3Vt7EyQFyW9XogXepAWOI5FN9Aye1lpquWWZbzeoxCwZdCOowzzCe19iIMxq6Oe53LE7eJc+/2yqJrwWRgRYbPHVwa0fv/fltzX57TN9bi20a1UirIUlMbwyNY9Pomg1KFiC1/nE/XxtUGzbK0/WpM8CgssqYWEVf2weBqxPg+aPc5LtTmN2h5ZO1dVfyYwPO/f2tqG7zh39mv71Vv7nL+WPHUMFZGSVdPL5/t+EyIVRd6EmZZrIUstClIVOZaQlHnGevlIkmRMJ0MuL86xkDwuFkTbDbc310wnU8JOSJ6lDbVzWVZMxiOyLCPeJwyGQ05PzxDAoH/Hw8MD3V6PQb9HWZZEuz2+F+B7geZX22ukf6834OTkBGE5jEYjEFp4JQy7GjIhK1zPYzgaYjs2YRhwdZWwfFgyHA7xfY+yKk27UcX19XUT2m42O7bbHSh9TN0WNGQ8HtIfhCgks/kJlSz5T//Xf+bjx4+cn1/w93//R969e4fnOdzd3xIEAZPJmLv7W+7ubijLiulkThD4dDq9ZvKPx2PqKlyv12tETTITjtXqTFmWkeUlaVriByG2nRPvE05PzwgCnfvbbvdaMBaLWn81DELyLCcIO6TJnpubW7IswbIsbm9vm+rry5cvSdOU+/t7oigiDEOKokBK2cyNIAgZDkfcmiLNavVIrxPg+0PCwGUf7wh8F6UqPVeQ+J6LFFBWhWFgtpvw7S89nL8xy1HqUMyqHYt2IeBQIeSp8yErsiJvjqRzx20v6akFqD2x566zWZutY6EOjBd27a1JpYGvHDzB+hrr42haLn1fdThdVzOfGljdKyCFLropDNZND2kTQSlMHq0V6H0taGJCs9pYVFVFlmU4Rm6ufkocJyOb3JUxFs+51U8G6Nhqc6ikHLuj7Rs+3q8d8tZfrG1ruiH9hsBzXbpdDTStGVodS1+bJtJT5GVJFGl18YvzM5I4Zbl4ZD6bU5U5H375WWONbJvRaEhZ5BR5BsbFthDstjuWiwWT8RglFcvliiRJkZWk0+nQ6XSpqjsc28H3A1xXmqS6DnUWiyWqUgwHQ8qiJEszxpMJlmVxZwRAhLC5OL9gt4+4v7/n9HTO42IBKPqDHpYQOLbuOV0tl3i+Tno/PNyTJLFunK8qsiynLDQ7SFVKdlu9uB0XHhc3XF6ec3Iy5Xe/+2s+f74my3KWyxW2/YFdtCUIPIbDIUmy143qb16x3UZYwmG1WrHZ7BiPx2y3W9PfaRljETSpiU5Hi4osl8sGb7fdbvH9kpFwyLMN/f6AJE6xhE0YdsjzNa9fvzEFCRulcpI8J0lizZK727LfbZlOx/R6PZbLJfv9vvGAd7sd6/Wa1WrFYrEAMFCPGZZlMz89pdPpcXd/z263ZTwe0w19er0Om82Kssjp+D55VSFlAUoihARZIatCt9BZPsp6njGjnqO/vhl4w1HICHyVQ6sdi/ozlrQaJpLn1nZtKH8tWvo1wwZoeIUQpm3QrPN2Lks8xZA1xkweOhTaztJxAaOoJGXlUFiNWTSK6yCx0JyJGqD8q57Z8Y1oL6Z41gt67qa/xQhwbIF/bbB+7W/f8ubaP8tSc00ppVsn6kXU7XZ1X5yURjJNNcZMS6JFnJ7OGI+GJFFEVRUMBz0EivV6ycXlJdF2g0Axn50wPTmhKkvuHx4IfJfXL1+SxjFKKna7yPB9WaxWG6JIS869efPWPAx0s3iv1zUVPw3RcITDcDhmt9tSFCXdsE9RlERRrIVQ8ozF8pGT2YzHxweyLMXzHBYLTSF0Ojvl6tNnyrIkCAO2200DiciyjCRJybKcTtghswps22O/T5AyxvNcKukymfbZbNaMxxO+e/cdtu3w/v1H/tt/+ydOTqZ8//3v6Pd7eJ7DaDTky81nfF8z1iZJgixzoig2lcEeea4proUQDQ1SDe+RUjZhf5qmJoRxiKKYTrfDaDI26HrJeDSi0+0ymUywbUv3jKYx0X7Pcr3h5ssXXFvQ6+qq8c3NTdNdsNvt+PTpE0op+ia5X8N0Hh4eiOOYH3/8M0I4lIXEEhbj8YjLizOKPNECy7Kk3+vQHXQpSoVSAtspyUtFmuUG8KnJHZX6SwbrG5sQJg/Fk+jjeJ7Xa6ptLOsHeafTOcCl5FOjqI95vK6eGtz6Xcu8aHcUYCI0SwhkqcV4dbe4FqyuXa5jB+Y5Y1bfQ3tTCgoJeSXBVk0B1jJxZQVG5a0WXf6VamZt+RtL36Iiabu57Zt/zgof/63+/P+I2/1vCTuPt9qlrlSlySCNZmaNjxNKK6rnRUGR503S33Fd+r0+3TBktXygqnKmkxGWBY4tOD+bMx4OePXigvcfPrJarthHOwSCbqdDnmUsFguUUprNIst1rms80ZqjpVYqd10P3/eJoogvX77guq7JqXlUVaqfTEXBZDLFdV0KAxe4uLhEKUV3HPLw8ICUFaenc1brleHcz7i7u+Nsfq4hAkIwGAwMmaHug4zjxCDwE3wvpCy1Qs52u2U8njIajxmOPH7z21ds1mseHh4RQrNTFEXVdC7MZnPGkxEfP/5iWq8q4jijkpoMsD/oo5vidYhZGzOgUdPJsqwZlyDQFN8K8HwfEBRlSRCE5HlB7hW6WmnbdHs9iqKkLBXrzYblcsHj44MG3Cax7pdUFbYl8H2/MVyat00rmc/n8yeV7dls1kBmHMeh2wl5cfkCz3XYRWui7Zb7hxuSRENMBv0+pQRhuThuRrTP2Uep6Uu0qbBQz+S4/q3zWe8jvlrw9dqq3zsuLMABclRHVTXuSx9A/2jDPOow8+kFHD6rtDVr/lDnvmRVaV0EIRCm0ojJudXhY31Ndahs1UZZHAzjE7QCume1UlAhKDnwldWPhupwBl31/DXPrD1QbeWXY8PWBtPCwRU+9syO3exvFQV+bftLSf+nLryN9oA12rvT0Qu5LmbkaYKSUjPNVhXC0snhTtDFHg7ZbZbE+w2j4ZjTszMAiiJDlRbXVx85PTsjS2LCwOf+/o4wCHnz5rXWXrz+rPs5TSjV7XYbjJNSqkGfe57HdrtFKdWMcQ0eBciynLPTM8qqIisipFJMplOWyyV5XjShWbfbZbdeYtnai/jpp1/wXZ+XL1+xNZTWk8mYk5MTPn/+bNTENUwiibXB3eV78rzk5OQU3/OZzWZE0Y7hcIDn+RoCEXZZrdZ0Oh36/QF//vOP9Podzs5mzE9n3N59Jk0z8qIg2ZcEvjYg9cPPdXV7TxAEjShvkiQIIZqQaDKZ8PC4II5zsjwjDDsopfj8+ZoXl5cMh0PyvCCOdU+l52nyyaurK/b7qBGmQZUaG2jmY57nbDYbpJS8evWKsizZbrcMBgNDtx0BcHNzA4DvdbBtn1evXrEtczabR8LA1YUVobBsQVFmVFIHPY6jhYlt29LiPJaFMOT3/19zZs+laNprTc/zpzoZ7d8tDroZtm0jHK0aVZYH7dqn6+b5NarfPPxsGtj1h6mkkZFrVUN1ekc1RuZpAUAe8mXia2ON0t5upTSDhqXQJK4obOoUmMR2WqSPrTjz65wZEilLpNJ5oLanJYTGipjzUstZNTdiWciyPD7ks9s3v2T1fEj6LU/vufeU0qrKruNoCIZlURUlcVmS7fe4noNrOvtty8a1LQ1StAWu5/Bi+hJbWMRJRCfoNBxd0+mUjx8+8P79z1ycn9Prd3n98jW///73urIoK7wg/MobqSeblrOLGoT6eDxuKn7auIFru7i2Fr59XCyxXIvlakWW59pTKwpczyfwAx6WC7K8oNvVRmY8HrPf75u8ozYiB61Qx3Upoj1SKZIsxbJ1IrVSEtt1SdJMJ9WzAmfo8fLlnOvrLwBUVUkcR0YVKWY+n2HbEAQur1695p//+b+z3UQ4dsh6s9UdBUFAHCc4js1sNmM8HhtDX+eEJGEYmAbwHt1uyD5OsWxNNZUXeaP+LssKL3RBKQb9PiBZFgWLx0f28Z7BcMDl5QVClfiezexkiiaDTNls1qYns+DDhw8MhwNc12G1WlNVmgEkDLtYCEaTKbttBEiiaEe30yUMXDphjyxPmJ7MWK7WSCXIS4XEw3YcXN/Dy0tK4aCyygREopGkq4UCZJ3QftI1qQ1j7RK1BXrrxabbpWSTI6qFVGQdwhlvR6GoiryZc7ZtYzdKS4dqolItBXXV+Dp1lPjswpT132qnphX+WobZQ1OHG5D8YVFq569Oh4vmkOb3VrUVyKQklZZm/EC3JgqlFZ1KKbFMh0/N91ZvXxmzskixLIWqCoqqwBLg2jaqprkuNQ6m2+lgWRDHsWbjBB22HSU360Ft41T+UjXyW9uv5ewOeYFaM1C/doSNi42qJFVe0A87ZFlCVVb0+z0N7HQEWnlZEYQ+k+lEM+Yq7cH1RwNsyyI3Y/Lq7Wse7u91/gDJw/KBxXJBVubso4TJ9ITNVieae70e+/2ek+kJ641GzD8ullxcXJBmOS9mcw1yLSs8z2c00CHRl7sbLNum0+my3UW8f/+Bl69eUlUVodOhlDpNYVseQniMxjNOooz1YkmSJEynU6J9xGajNQfKqiLsdYmzlH20J6tK8qpE2RbCc6kEFCh2+4Q02SArgW1rYkbPc5iejAg7HlEUMbb7jMa6D9OyBGlSkmeA8nDdLpbtIyyfooTFco1lhKJP5nP8wG+UdvIsJs/2BjwcY1sS17WRRuovMcBbYbBewvNwEezzgrxIEbIiS/Z0Ap+L81POz0/xfYdXL1/w8HDHfrfDqiyKKicvUmbzCd9//9f4fsDN7S3bzYbZySllWbFZ7uiEIZ7j8/Dwnpsv17x69YLLy9f86U8/oBTs44zr63vyosTzQ2xPE3wWRUZWVERpSl6lWE5gbJdlmAcN1gsLSxgPhVrJTHsjYIQ+hGzMnKU04LUmQK2Njy0MhY7SxN1CafodlKSqoOYHtCyhjYDBeAq0UI+SJUposZaqBrGaJmYBWPYhLFXN2jKGWEEhpTFEB6lIKeqWJtNsr0xftMAEi+2Cof5ZiYMB1VkwC2E5lEKyl4KikEhLNTxpQgkqbFRRmnGp+zX19pUxk1VBrWsnpWp6GmWluZFQ4Lke49EYz3NZLhfs93ukKhsw25Pjya/Rvs8ZsMbLar33ddn2LxcWLMs2tEkKKv3UscwjQaqK0PNQZY7veZzOZni+x2q1JM1Tut2Ay8tLur0OnutSSckPP/xAbvob8yJnOpuxXC359//wDxRlwdXVFXEW0xv2GFg2ST9nv4+5vb0ziX59V7soQilwHJfxuEMcJ8znfa6uNFniw8MDp6en/PzLz4zHYzq9LldXV9w/PDAcalbY5WqF5wcI22X7uGz6NKN9yuPDCsfxNcg07CCEpunJcw1o7XS72J6LEFph3t7sKNNcV6UsC2WBG/i4vs4ZrsUWy7bo9bqEYcDZ2SmvXr3khx9+QAiaPFy027Na7igLECLAtgOqSgMz+5MReZZyd/+gQ0MTUnq+h+NYlEVBWWmgaafjMxh0idKCZKfblOzctBhZmvCQUqPefdtmMpzRCwN+/HEIls3/8j//Owajoa5exjFplnMyn2FZMJ2OAUUc75vzDwZ9umGPQW/EZrXFc3x225j7uwcuLs41fqyjQ/LRaEIc73lcvKdfSGxb89k5VORVxWYfk5YFtufhKYuyMoh4ns5pzRIL0Eroa3PR/KT+vzIMgHoaa8NSu0/2gZ1WmfeFEI3Ydm08pdLQFWpRX8C2LKRlIy398K5DRqFEc+4jpp7mDupzVK2wUtF2KgxDrLn3uqWQGrSrjop5HN7TBRPNCF1gUUlJYe7fRs9ZbdwVArdpn5ItI/m1MZOa3fJQkdA4lfr3stTus8a96N3b4d5zlcd24r/GxtSfP/6c3bLhz1Vwfi1/plTrG1eyQQ+jtGvvGGaFbjek2+2QpSlFqameu/0OtiO4ublhejKlLEpsx6bb0bTQHz9+wvNcet0+lrAZDIesliteXL5gH8fc3z8wHIxYrVZ4bsDFhRbUWK/XnJ6e0ul0mM/nJu+lMW1CaMm1KIrodDrsdjuWi0eqqmQwHJhOBEFR5iRpipSSfZJoAV+liSUVin28589//jNhEOC7LkWW4XgOy+WC+/t7Xr16SVXpRPtwOGQb7bAs23wXmiMfcfhutBdtE8e6ilcUGWmaaOxaWVBVZdOUvzbKUq7rkRcZrucRxxnL1YqLywvCwGexeCTPcxaLBfnrV0zGQ7JM55u6vQ6+79LrdrTASirZJGtEpck9XcPya9s2ZVXRCQMcW7B4eMRyLL57+xbL0d9rXhTc3d4ihOD0bM6r128RKPbRhqoq2W427HY7TSSpMNqdWpcg8H3CsKOLMZ5FpxdSVSVJmjIajQzDClSl1J0BjoPEojCiLlG0p0LgOEGTxzkET4eIqpVLr1f00w8c//2Z7XAM8SQBXufp4BhVf4hN21CONsTqad7q6Xtf5bbqjxrD2a5OHgoBqrml5yAYz6aZtKHRyvBCIJWtnVvNSkmlDMWqEChhlA3EwfI+Y8yOB+FowNCVqPV6jWVZrNdr8jzH852vKiztY7Zvov1P3/8BCGsJnj1Gvf1aWPpVQcAcy7JtXMvS6uZOZcrymnra9TVbhRe4rDdLwrDDoD8wFNcO7979BiklRVEwGo24urri5cuXZKmuXvq+j21p7Nft7b15rRfJarWiLEvevn2L53nc3d0BNK09vu9zfn5OFEUsFgvu7u7odEKyLGGxzI0+psdqtde0NcKi0+tSFAEnJ3Nub2/Ji4J9rLnubctiOprw23fvsG2bs7Mz6pxZEISEnZAszajJ6x3bwrI0BqwsCtI0Jol1ZVHn9ULyIqNGs9c9lrud5tzvGdbdy8tLgqDD/cMChE1Vlex2W+J434TynueSG9R9XRBp6x76vs9wMGTQ39Dd5ThWge9pRhPL1qmDNM9Jk1iHnWXJ9cfPJFnC3/7h73A8l6oocV2fbreD6/h8+njNYNClKjN63Q5v3nzH9fU1Simi7Y7dbk8cLdhtdkwnM4KgQ5alPCzWOK7u0AiDkDAI9TUGAXlRaBLNIMQPO5roMwhI0oKy0hz+VfXNxNP/r9u38GDHRYOG6IFDhdWyvm4tPDgPz5/vuTRPu/hwuJxvR1C/VhARxpgJy/l/aXvPJzmSNM3v5x5apC4FFFT3zuz06uXtLe1IM/7h921pt6TtkMbZvZke0QKNbgANUSJlaOHODx4RlZVIoPt2STcrJCorMjLCI/yNVzzv8yCF6RA1MabVhd/m7+0A/O1/zPhJnNnhcF23Q5EbOuQ8z4cE87760X71Zf+9j1n9YbL2DNkxj+yQ+7///6Ehk11y37JMIcBzHCSK0PNpW1PJnM1m+KFBo6erhLIqWMzmA4hzMpnw+vVrPM8b9B7H4zHv3r3j1atXPH36dCAX/NWvfkUcjwCLm+tb1uvNEHY/f/6c+XzO7e0tcRwP+KqeoqZnpDWMqA5SGBCv55m+xzgOadqKLCvwfQ8/CJjPp1zfXCEakyPZ7UxPYVvVTMZjPoufDpCEIAjY7TYUZU5VmaKEJU3RQxhtP9q6pswLNts1ogODIhRh6DOfTynLgOVyOYjn9tg8w5oRIKVNmhW8efveyNF1HP7m4WSapxGw2WyYzcbEUYhWBiR7dZUPuZM4ipmMa7YkCK1pmtp4sk2DAKqixJIdkWOWcXp+xsnJKQio6oqTk3OkZTGbTUmzhFEUcn31jjyvePL4BCks1us1r1/+yGppvOgeuuN6No5wyMuM2+Utjx8/IQhDVreGXnw2m/PixfcEoUJWFYHS2F5ojl21FEUFssWy/U+uof/o+FTha//9/ajI5MzuHJR+u2NOhUDeW4OHue3eg79vyMRgBH/Kjh+r8g6OkBRD+CxMCtEYyS5Nh+opuPcqId042s6034dpXvuY1pR6+zCp51vqtQj7ZvL9E+z3sd/hf3gBDo3Wp3Jshzm4D41vH1LKgQvKtu2OGcN83ixC2TGXGkBlVZeEUUCe56xWt4zHY6SUgwG6vLzEcZyOWDCkqqqhDafPeZ2envLu3TVpp6C0WCwGVgjHcRiPx4Qd/9jFxQVVVVEUxdAIfXZ+imorlGqxLcmDBxes12vTLeDaBEFIozWWZRNGAX/3d3/LcrUiTVNevTLQC1W1JtR1XZTWOI49PGyqygBXoyhkFEeQZNR1i0BhSdMqYls2CNOetlmvcd0TQAywir6vUmvTJF4WNU1jQMibzYbNZs1oNDFN27stliUIAuPZXF8bPNjlwwdUTkNZFlRlRV3VnUExzd/TsRFjKfKctmkQwsA6PMehcl2SZEvTtlxeXvLg0SW27bDerE3T+jbh5PScqmyYzxaMxzGObbPZrHn75j1Kt7StxvMDlFpR1zUn8xM+++wpWVFw3VGXn52d09Qtb9+8QwqB63p4nk8YxbRKk6cZRdXihbURLxEC3/MQtktVf3Cr/386juWh+2vSvx5CNpS64/RX6r5B3F+zpiJ6uP7vg3f3OxI+8Mw0nSf/oSOy72gcGtBhdLq6Whv9ALHngfWEQHcdpPfn4Chrxp0ii6l0GGN2vx/zLuY2O2zbdqCd6SfpcLL6Ezmc8P2T3E+OHpuI/b8dte50Hf22KUlL2U+OoTV2bIfRaIwQhqCvrA3DaBAGgDBismenbDbbrn3HRgjJbpfw5MmTDsmvcRwXEFxePmI8HnN9fU1dv+WHH15R5DXj8XjoPDg5OTFKUEoNC7tnP40i06TtuqbNqKkrLMsATxGCuq5wXRc/9PH9gLJuOmqfhslkyu3yhtE45tmzJ1iWzbsf35Gnedeqo4jiiPF4zMgeodEDSWRZlEiWbHepCcNty9zCSuF2+Ly8SLscUwkYPrW+BcgI+PpIYaMUxPGYIPAZj0cmnC0Lyl3BKI549vQJT5485ur6PdvttpuLlqrM8X1DA24eEA3ursR1jY6A7IzYeDJhPBl3yWWNr0Ic1+1gHRbv3pmUQBSNSLKMqqyoqto0u1sOk8mMtmlJkwTXc9EKRvGIk5MTPNcl9EN2yZZ379+TZDlhPCIIfNIsJ8tS5rMZs8nU8LRVTadqn6Mw60TVTRc6W+ylhP9/GftG6jBMPNYxcPe5vspp1rJSuqvg30/79B0Cn8pN98fxoUED40x0/IBHjltrPfC0Hd+3NFVgqRFaIqQG3RFna3EHoxUKo/DyiZzZPlDWNKb2pdQ78KzZxljjPv+BsD+Z69o/4P0w9APj9BFSt/1J+XAC7l4tyzAW2JaFNQB5TY5FaEXcQTOE0IxGU2aLGY7jUNZFl6S+4eHDh0SR4dNKkpTtdsdqZTyk7daQDe52O7744gFffPEFr169Yj5fUBQlju0hQ7sTHMlomoazszNevnw5NLyDYYzwPI8oigYuLc9zmE4f0DQVk9mMsiyZTAwEoqzMAnUdh9l83rFRGJ1JPwiYziaUZc1kPEFiOg3yokBIYfQGQp84joEEpQIWizlt03YFHYktBE1ZkiUCZ2pYbf2hOd9it9tyfX3NbrdDKcXNzQ2O43Jyco5juwRBwGSi2O4SmqZmu9sCRhk8DAOePXvGj29+pFUtSZIwnU66cBvSNAcElrRpm4YizaiKAikEcRwznU5xPJfV0hjS84sLttuN0dscjwiiENtxEVIQxDHj8QTHtdntElCGNNG2XYIgRKmWplHkWUGeFwigyAx5ZasUFw8ucfyQvCipmwIpbfK8JElSNtstSZKQ5TlaCYIowI8ikiwnzQrquu3u339nK9PPHPsP9v01d8xBuFsbuK3xQgAAIABJREFUd83r0Lc1fUh2KoQhAj38jv01tn8c99/b8/a473ntr/cPctv7QxgYD+wXUDplNW36MaVlGYOnBfskmEfDzKqqBkS6wWyBJe9EF3oPap+JtqcqOaQIOnYhjhmyfhwjdNyf2IG+RqmBSaD3JHtOfcex0JoOuGoTup5pYSoyPNvuFITUQOccxzFOZfM+fc92m2BZV4zHY3a7xFQMdUMUxSSJqVotFicUhSECBEFZ1jiOzXg84YsvRqxWq4Faphftdbpm917VqD/PPM8B44FMplOEaIaQIIoiptMpu92OujFzvdkaaqGbmxvW6/Ug7RZFEVl6AzCQIoaRz2IxNzTTWUZVFjiuQ+B5WPMZTVWRJglV1VCVGdZkhJAmR+E4Dn7gEgQenufiODaz2Yzz8/Oh6GPUlU7RSjCbTfH8gOVqzXZnVNINw27O+/fvmE6n/PKXv+Tm6orVasXpyQl+HJPn5sHQtq3h+1+vub25RqI5Ozvj6dPHhs9MtQjbED0Kz2ZXFrRNy+T0FFtr8qLAD3ziscEN1nWJ1i2FLXBsG6VaTk5Ohn7MIs9Zrzemf7a7rxaLOaPxmPc3K7a7hLLIqauSMPCp6ookSZFCEgYRRVGRZxkKA4ZumwZL2hxbn0An5GuWuNGLNZHCfrjXh1/HIE7746MhGn0Yed8h6NekyUPd5cz289p3TsbHjdb+sR6Lqsx6pWujUve6h/Zf+3TUsfPSBuvRSfH1k9XDOEziXwgTcvZV+H4c9fcG6hE+ZMfore8hw+zdhH2Y4N83Sh+j092fuGMTsD9ph1Z+f2J6IywE6KbBc22EFLi2A6od+tZmsylpmg4e0mZrlMN93wcMNEFKi5OTU+q6Jooi8rzg+voGpfSQS3McAyRdrVY8fvyYyXg2sLF6njc0uPeSa32erW3NsfQkkf0PGGZYpyMX3O123NzcEIYR88UC2/W4vTVMEE3TcnZ+zvn5Ob7v0zaKttYsbwy7g+udsF6vCQLPNJG3DaI2ObfA9xmPYuIwIFUZdVXStjVaO10PqTPMc57nLJe3Q4tWr8Q1mUx4+OAhUtoUhYFeNE1NHEdMZ1Pm8zlv3rzhq6++4uHlQ3OutsV2uyXr2o9c17BvxFFEGETc3Ky4Ddc0reZkMefi4pwwjinrCmXAWyhL8vDZ0y7VYfHu3VvSNOPMPQWhefPmFev1Etdxmc/nzKdThJDkWUYYhoapxHEQQhh+NwG+79GqltV6zZu37w0spa2oy4IqMnRNVVUP0QoC2qahqSraukUrjbCgqSuEHXRZ6737vw8JxfHC1qfzwD9vfMzj0Vp3y1bcGQw+LBoMYaDjfeDt7RfzPpa8N1obmraqDTiY+wwhh7m5Y4WALrFnPLuuKqS7XFwvENp2GDZzGf4D1cxjXpV5T9/Lhx2z3v+e8anPa33HYNsf152X1qBUQ9klmH3Hxfe9rleyHBq8W90alPztDUEYcH5+TpZl3N7ekmXZIBFXVRWr1WpoTjaUyi2/+c1vuLm5YTo1GLPVaoPruDx48IDdbjcg8D3PG2TP+kXc9272Hq7xGIpuHQia2giUGPYNgwnrCRVd18P3I5TWnbBHaRLX5+dY0iFNdswXc26ur2maijiOaNvaVEN9geP7xFHIZDSix2vWVUngLxiPRoxGI5qmHHKnSZJSFAVZljGbzbi4eNA1zxuptnfv3lM3rQnzpfGSi6LAdWw26xXv37/veidhNB5RdUUPAx1ojGBJuSHwA54+fkzdtFxcXHB6ssAJfKq2YTSbmhtZmnzoLjFsI95ohD8eoyW8ef8j719/T54lgODd+9cEbkAYRGRp3gmxnHB7e2uMapbhWNKIMacJWD5ZqSjrBt0qiqJGNVuKIqcsCixL4vkBju0YogilqOqqo8VRNK3GccQHxqz//6EB2V/sQ0P2T1AE/ZRD8DHDKO6BVI+jC+7nr7n33j5aod/mfs6NDtdx3xAeKzIcM7zDsXQel0lfdfvcc8JU/zT5KWjGsbFvyT+93X0rf/iZw5zase0+eZJHLlKPo+mNWeAbcd+2qU3+pchJsxTHsnAsgfbg5OSEzWbDZDLBtV1WqxW+73N7e8tiPh8akKuqYrvdEoYhUkpGo9FwUc7OzphMJmRZxsOHD/niiy94/vw5RV5iWTZFUbDb7QYKGiM8awCYvm8oavpqam+Esyy9uzxaDDqRbQuWbVEWFU3TEgQh0+kMz/NZrzdc31xTVw1hGOHYLg86IG4QekSRUWIvq5IoCkxfom3juR5t1TAex6Z3tWkJfJ9RHA90zbbjsNttmEzGPHhg8lRaGwqdzz//M8MLttyw2ewIwwA/iKjbhqIsAG04xppmgPKEYYglRCddZ+iCBuUmIVGtoWQ6mc1ASs5OTwmCgEo3KDReFIBl0QpBEIUUQnF7fcWbt28NyFlqmnSNbSvOzmeoVvHmx7d89+23jEcTyrzqQMDmuq5WK7I0xXOMt54XBa10sL2RoX8WUNUNWhkt0V4ftKct32wTNruUIstp2hZXGppnIfabd+4WkVnmd8wRgjvyhp+TIO/Xws9xOA7X17F89r7Xtb/PfYr7wxz3vjjRPo9aH6Ki73twhx7Yfhh8GL7C4EACpjpqtjk85p9pzA6rj4cTdLfNAXp4r6qwb32PTeDHQshhf922h2Dbw8/337Ofu+vVsoVq79SilQGtOI7DyWLBZDLi66+vB3WgzWbDw0cPcByH1WoDWuM6HnFswpAiL00zsW0zncwMfGO55sGDS37151+wS3a8evUjWWoods7OzoYG8iRJyLKMJEkGTvq+EOB0oY7Wxqv13ADbgqLISJMMjRGjtW3TWrXdptiui+v6BH5IFEe4XkAQhl0+L2GbJcymc2azGVJqzs7PaeqStq04PV0wjmPqpkZogWoaFvMZ88kU1Socz8AfNtstjuvgeQYga5rAI9q2HXJ/UkrCMOSrr76lyEuePfucsqrxPQ8/8PB8z0Ar2prpdDIshKZt2W5NA7dqFXVlcn5SWJyeL5iEI8IgpChL/DhmNIrImxpVVZRtTVmVLLcb3l1f8/L1K17/+IZtsmMynvBnnz3lF3/xS84ii8A2allxHBCHEWEQk+wy5vMZnucN90qW5eB7nUdpWveQllmU3WKqa4VrK7yuwV9rbaqgQUhVt1RNg920SMuhbIxx7lHx+2tB7d3nAjGI6fYGbR8K8anxKWO27zl9YPh6HJi43yN96A32JKyH62zfqB3z3Ew4q7rQ79POyKF9uGdfhrk3yX+t9w5+MJj/A8bscILuXg//3hkm9lzDg0n92Insv/+xp0a/zeHJH36uP+6qqoyxsCSu5+B6Hrbj4HQwiTAMhwZwx3HIS+MdvH37louLC0bR2CR0W8Usiof8oaF1rgac2Xa747vn3yE7dPyPr41itlHWMRTM47GpRIZhOEip9TxeYADIQRAYdafuta5LpLRNq5gAx3EJ/AhhSWzb9KSZY9JkaY5l25ydXXB2BsvbJTfXSxzH4fHjx8xmY7brFTc3V4YFtfO4XMdBIrBlTBSGeI5nqGGEhRICy3WM96iNoSnLktXqlqZpjOhux6Gfdzgw13VN4le1RHFE09aEodHgbJqaODJA4aIoWC/XHTGmwS+2rYECtK3CkpaRoAtHpHlGIwwwN2tqsrpinWfcbFb88dvn/OHrr9jsdmgBlmMjA4/WEszO5lzOfWLPpixqXMfGEpK60sznc85OH+C5Rkm+bRryLMdzDA123bTg+DTSJy8q2rqkyDPKPEO1DY3X4Pumw0ArY5wsKfFdD9c1Sua16kAJh2to777ujYE+SLbv54M/Nn6uV/bhH+7v41P7Ofzbfvrow9DyQ3sg5V0m6/B4DtfyUYN2CA054ux8aMp+Zph5eFD3DUpnVDqI7rHt+m33CwV3n79vAOXeZ48Zw2NWfn//jtMRMtoWnm2MGN0NooHb21tWq1suLx8a5H9nnJJ0Nygm1XXdaRfabDYmtGqaZtBclFJycXGB7/t8+eWXhsHWsnBdl/Pzc4QwPZ5FUQxg2V6xW2s9hJ37IsS9RoElJbbldDgzc+l225QsM0y10rJRnSHI84KqrphMp5ycLPC8kNlMUFX5ELq2ymhHhsGILNsZQLFt4dqOoW1B4PuBoZfBolKKeGK4vra79cAM2/N9PX78mDAMub1dkiQJn33+GdPJHKU0fl6gUSxXphBg20YoZBSPBkLKdJcSBiZst1wDaI7jEWEQmAeihrLIsaWkqktWyyVpW9M6Nq1qWW42vHzzmm2WID2jZ9ooxSbPeHt7zdVyxIk/4XR2wXQ6pshzvvv2OW/evuUvfvXXLBYzfN9crziKzBQrzXq9YpckNMJmnbVYlkK3LbLDGfa3ouO4jMcT6rplt92R5TlNq7BcB9d28DxNqSX6QDfz0MQorY8amP1I49joDcqnxmHSffj/sLqOR18fc2L6bT8W/u6vUymlkanTH+p97H/nodc42AB0F0npoeFe9/Qd3YTJjxjST5Iz3r8ChwnDvfe5k1zvP/9T+bWjT4YhYP54aPqx/NldDC+wpUR2eDmtoe7wVKrVrFYbgiCiLGuubt5T1xUPHlzw6PEjojDiq5d/QiC6aqNkt9sipSAITKN4X4XsvbDr62tapQiDAGkZcG2W57x/d8XV9XXXbL5hNpuie2OvAa27nF5hUO6ALSXjeEyjjMJQVdVUVcHN7Q1v379ls1lh2Y6p9ghJ3SryokLaDp4f0jQt52fnnC7mvHrzhqIukWgWp2c4lmA0jgdJNM9xu2MwLrtlObiuD3VNUxoMVlUaAK9t2zx8+HCQGvzhhx+4vr7h9PSUhw8fApIfX7+lrCqatjadA57HYjHHtsx5hEHIYrZANbrbr41AEIYeZ2cXWFLidg8Ez/No2oY6T6m0wvZsgnhEk+fkZc02KdCWh3QcqrYFywLHZZfnrLYbioXLdrtmFGuiKGI2n/PixY+D1FwUxWRZThiaLpamrrHSHX7gIxyfmhKlNbVQuI5NKQRNW5MXmrKq0BqyPCNNU+PRWhYoTdua/Jpj2XeeGHTVONHR3JhQTCPvbnPZ0+d0sVXHEHtsaMQRVov7Qwo55OSGV/q1BXAXQh5bh/vIhGNeWL8G90PjYV9C49gWSt05L/087LO49Uez//uQVezDdIzR33/VAqMA19VY9p8ZR8LM/qv1PVdZ9mXRIT91dwiIuyeJAFPZObDq9DG3Ms1WahAC7S6gNk3mHBirY2Hrfqy/XwhQShnSRSHQSlHURUe+6NJoqBpNEETYjs3tckNdKSzbpW01ju2xvFkynxgh3CzNWK3XnM6nnJyedsK5a5QyzcxZp/rt+S4Khet7tBqqRpEWOXnVkm12aKXwnBuDq/I86rpCNS3zmQnXQj8wXP3LNTfSYjye0mqQrmHESPItb96/pih3jMYW0lL4gY/nh9RasE4rrtYpr2/esdkmPCwzHlYZWZKweDdjHIaczuaczCacLabEsYfnOrz98Ufqqub05ALHDUh2OWVWgzRyakqZRvhWNdi2JAgWbDZrXr78geuba2zLpq5rfvjhe0BwdXVDWZRMJlPaRvH+9j1CCR48eIgUiqqoiYOYp4+ecLtc0dQNnuehWkFZVpyenuFYNpXWCFpWuzVlXRJPpvjjETKISGvNs2dfcPlnf8P/9dv/zh+//QbfsdCipa4L8rphudpQXEzI0wILD88NOTt7yHj0mjStqCqFbXu4boDtVIZzC43j2RS1pm0rdFtiywYnsMk8i8KVlIUmr3LSPGG5XrJLU0PNbnfV86ahbROElviOoTHv4R95VSEsG9uyKOratNUJaLWh6ZHSgk4rU7UKW/TicEcMDYaK666YYLbr12dvUIxR6M2FNAu/W293oawe1rtSd62IhyFyfyRib+Gbtdyadc19b6lum+GB3We3BtArRl1poKLsjVV32D2d0WDpDlJXSoNtya4l836fwRF1JjX8gDzwuA7Pc/9vnQVuD4zNvW6CD43cYZJRcGfIjrqh+r603T6VidYaqRQNGqEM+NC1PGqvJS9q2iZhK1JGUdQt5DPm8ynr9ZLl7YrpZMQ2z3FtCxkG3FxfYTsOge9hWw7r3Y63795Q1abxWVoWwpJstmsc1yXNClwvpqxbilqRZhVtXVM4NVX1Hs9xEEKj6gZdNyTejtD3mYwnlKUJSbfLhHfXt7RCc355TlpuiMYev/qrJ5ycxEjR4Ace2DartOLNbUr58oprpbFcl5WuaFbX5LuESgjKVlM1irwoWC5vuTiZcPnwwmDgPB/X83CcEMe1cKWF49tooYnjCKUaVqslmta0J5Uly+UtRZHzi1/8ErTgxYvvuLy85PPPn3F7s6RtNWeLM+Igxvd86rKmzArWtytePH/BgweXjMcTbm5uaerciPXWXWuKZVELTZJuuV7fEPgB5+OI0WyGof+U+MGI07OHTF5fg/0GJ/TIy4RtviEKXaquqFAVDaXVEEw8Ai8mCEYku5ymFUjpEAQxeV6QFyl1VRmPSWrauqRtclzHwnM9ssgjz23aVtIqs1C3yZY0yzuN1hbRIeotaeE6EpUn1FmBHUfYjotsSoTQ2I6kRtN260Zpw3cvW2MM2rZF1Q2hZSP1fYNi+hJFx+FlfBwTqupunYouf9s/9Hsx7Dsynn499Wro+wasX+d3QNf+Wz80qXrwxowxvDNkZs321Ny9J7V/DojuyMSd9dB7P+YgxN1f9vL+d/sxRlwNxtyMoxRA9ysrLaaX6zhi//BM96se/e/77+9vd2x7S8oPPvuxcey7GtUCXV+XEEjbRiMoi4qSisV8biAH2y1FWTKdzggCn/V6SZKmSCnZbLfoFppWcbu8IisKLi8vmU7n7HYpzWZL2yps1yEvcsqqoqxa1psdo9GCqm7JM5Pwt7qQUCnTjB34nmG21aBakz+bTidI0QE785r8+5fcrJfMTyeMxyP+/j//DX/xl89wPUXTpGDBapewqq9pLJCBhxUFqFqRNyWWqtGOhRX4RLMpkzgm9j1s3fD67Ruurt7y+NEj5tM5y/UGS1Y0jcV0tkC1NY7rmshJmZzW7e2Sqja8XUEQMp1NeXT5iN0u4f379wRBwNOnT5jNFnz/3feoFk4WC1zXQwhJkRekScb19TWu6zObzvE9j6qqOT095fLykensQFM1NdfrNa0UhJMY23fQoqEoa+q24OXrt7z+7W95t92hLUFeV1SGGZKmA3qXZU1RVISBmeMsz0mSFM8FrYwnFEURu2TLze0Vu2RDWaZ3EAkUgRfgea4pIFh34j51XbPt4CZKde1/wvQAO65jmuELo71qoXAdi0bZNNrI0Tm2NKG9AGXoYU0rohSARKieknpvcXUeiupjq06rSAhxt8jvpZXEvUXe+wT9OumLWn1eFfYxY3JIhxxzJoB7UdGnUA+aPccEPtj2nu3Qe793J9K/NbwKEzC3HymQfFQ302BNeoNx5xFx7KA+Mg4N2P5nP1XF/B8Zh3H8/kXppbfoLroUgsD3kZbF2cUFoW/z1VdfAYrRKOL9+3f81V9+YbyQqkDaLnWjWG8TvGBNWtTUTUva4aekJanqupufzHD8p2+R0sG1pWmbQnRN0CFRGBEFPgJN4LpEUciTy0uePXuKQJDsdmzWOx5dXrBN12RZyue/esZnnz3DcVy0qLBcj7qtKBpFrTR5XbPOUrK6whuFSO1RFymNbqhQuKHPeD7jbDZhMQpZvv+RP375W8qy4PzsHM8dMYpd1usdWktsVzCaBGjdorXFeDzh+ubKVFWDkAcPHjAaG8Hjsiw5OztnNBoNpJSz6Yyrq2uSpGE0kl2FtqIoci4uLjg/Px8W0mQy5vz83IgcA0mWcnO1YpXueHh5wenFGUjFLtuwzQvqNmOXL/n6u6/QXoQWNUVZ4bkWcRyh25aqqimKGsaCOBqjFGRpxmaz4fwsxrZN6BdFpgF/ufTY7aCqGooio2nuClS9QLBJuouhe8QUcVxs23DBtY0pEBVFgW5aPNeltu2hZ9qyJFmWo5sK4RiVMBuBUIqq1V3xrEVow9Sqe1s2uCUmf627OKzXbjCZoMFSdflYg3M7XEv7+a9j6uj3c1/WB5/fH301dn8/+2v9GLPNz7UZ7J33YU7tLk1//LiOGrPeoN2Lw3/m2O/P3J+4Q2zK4XcOhvInKjWHT4QPJ+rOoveCFgCObRMGBiv19t0Njx5e8vDRI148/4blcs10NmYynZm8V90ibYfpbA7SUPjmRcmLH16SFSVpnhuZNmGgBbbjIFuFZTtIyzJ0Ma5rLkZrGFWDMCQexTw4OzXMIqol8Ew/ZhSP8D3XGFohOD8/4fXbkOXtNWFoQMC7JMENBNLRLLc7kqIgmk6xNyWvr96zyhX/8//6v/Hk8pyvv/wd3/zhK/K2wgpc3DhA2wJtC57+4jPGo4B//qf/ne12xz/+p/8FIQST0QQpLKQwbKp+4FJVpvk+iiK22178wyeOR1Sloe2ZzWbE8Zibm1tsq0BIOSz6NE1IkmTgh3v06JKzs1OK3NB5G4k8I+AbBAFoSJKMqlWEUYztumTZBoQCq0aJHNdrcT1F0myN2lDdIN3QXHVlsGCWdNBYGAEekMJiOpkyHo0IwwDLMpRIURh2QsYZm82K5XKFtGweP3lK2xpYiJCi4/BrEMIYNssyPaKeZ65NWdSd523SClVp5q3RTSf+oWmaCiXAdx2CwEcJgaxrdFlSNQ2talAKdKOwpHPfM/vI8tuzY92drz5Y6PuVxP73ft3s/37/9UMs5yGE4nA/x/Z3+JlPFR3uVi8f5Mp+6jP9ONpovr8D87scjNRPDnHf4HzKE9vPfw1PwIODPrTwH3ta7BcC+uY5pRRV0xjUvOdj2TZaGLrp1WpFlp/z+MlTXM9BSMnp6SlZXiCkhev6VE2J7fjUZcF6u+bHt++wbJs0S/H9AEtaNK2pu1R1gxCGndQYBdEh7V1DydxhzxzXZTqZ4FqSIAhwg4C8KImiiJPTU6qyAFqiUUBa5l3TvGY8n2E5iqJOqRUUTUOmFElZULQ1VhCyeHDO3/3jP5AXCd//8ANpXZA2BWmVY4uWceQSRGNmv/ic5998ze9/+wdefP89Dy6eMY5PsW0PP7BoddkxWJjQOI5HbLdr02ERhHiuj+f6XSN50QGCFaqtqKuGPM/R2oBSLcvg7ebzOYvFHKVa/MAnCIOujxY8z0VrRd3UholWNQikofIpc2xPYNsK39ecLEIuzsZ8+/oduqmxpUWV7yiSjOnJgvPFGXEU4nshdaUoyor1estsNsf1PNMl4ZgKa9O0OI6LlJK8KMmzgtF4zGg0ZrMxVEWBH8BUdFKFemgC7w120zRGp8CycF3HJMVVix+4SNvBtiWO6yE9m1aDH4ZGyEOAowSV0FhCIwUoKcEyeTHZhZPDWqEPLvfveWO7RFdQM/e/xrLue0qHBuUuZ6Y4xI31CICfEyH16/djDeXHtv/U3+/OVd8zyp9CMuyPj5Iz3vesLCxpfTxvdv9IPrrNIUbmY/iT/ZM4FrsfurT7f7OlYU417CAm4dhzrVV1xWpV8dmzJ6TbHd9++y2L2cQAM7PEgEHrmsAP2KUlq82S3SahaiqqqiSMDM5MIxl1fGVplgF0XG42qmkQFtiex3g8YjadEkcxjiWHpm3P87DCkCCKCYKAplW0WuB4HrYj0brGtgWOtlBa0WqFlJYB3VYNtutRt5oXr17y+t0NwSjG9WJev/uRX/8/mj9++zWlanBch6Kp2GZbZOvSzCPKusByLP76b/+ab75+zr/9679i/0OERcho4nXN5GOUNnmo+XxBWaacnJyQZSmWZQgrpZRMJlOyLOfHH9/guT4nJ6dU5c6E6MJiNpthWTZVVQ3UR9vtFtt2DHDXshhPxoxGI7bbraEOf/ceOwxoqoa2rrDQ1HlKI1pGgc2D0wm32wWb3cY0ebcNWnXwkfmC0+mcKLQZj2coJXj96g3fffeC0WiKbTmDN5WmqaEvb0wPabJLsW2H8XhCkhia8qIoODk5YTQamzBfm/vN8zyjObpLSHYJZVl3t75GqwZbClzPoWlbsjwlFD39TkNdWwjbRkiJQGEJjS200duUEm1ZnfRn11VvVsK9JLnukAAaMaSCQO8ZoeOFs/710Jj1a61f+2374fo95mHtr8E70XABWEc/97OG1vfzZ3ujh5iIj7iqR43ZvqU2yU/LACz3mqI/fiwf9nv1YabWesD6DNXHbp9D8vXAOB1a4f3k5f52/asj7a5tpBNARZqG4bIwtDzCCO5GcUxRJCbZrxp832W7S9jskg48m7PerGmalqoqaVuDhvfDECyL2XyBECYZ2R9TEPhURclk7HN+dsZ8Psd3jYGwhMmd2KMRlpQkadqRDk5Aa6Rjo9DEo4hnnz3h+x9/YHOz7rBdmpvrW8KRh1YCISwc28VxjbenUeR5xvXtFavVFa+//w5P2tiejRLa5FqkpmpKqrpkPppxenbC6ekJN++/482bd5wunlJXDUnS4IXjDpfX4jg2VSWxLPtOSLmq8H1/6HZwHLfDxDVGhjAKaeq2U0KvSZIdWWaMR1XVXYuWzcXigiDwB3LKsihY3a5Y2C7JaktotYxGNqWCqsxx7YCJ5/DZg1PqoqLY5WzWKfPpgrP5KY/PLwgtB9/xkThkWcH3378kz0vmcyNw7Dh2R7mddz2ogjwvybKM0ShmsTjhZrlkt9thWRbT6QxLWtR10xmkO0NQ10ZFSoheH8FCSljMpkymI/LchJDRKEZIA8twPR8/DFEI8qJku9uRFYXZJ4K20SS7gvucaHLPkGmj2iTMou4N2f6a+ljRrXcODnNmH4aNx/PZ+/s5XMP9+0LcgWv3j+fw2D419soax8fPzZn1gND+xFWrugKK+sAz2wes3VnpD/FgxxKOvYHbbxTv3z8Wk5t9f7yKMhyHAGHJDjho+t+EMIrOddMgHYd3V++ZjsbmlQgHAAAgAElEQVQEgQPaAm2hhSYrcsMuWpTdjapQ2uTCLNtGSIvZeITruixOZuYp7TqotqWsDBBURZqzs3NOT0+YTmeURcFmvSEMA9OE3KU1pe1Qty1pZuAJdavQwjDMPnOe8uD5N7x4/Yo0SU3YalkUeUXdljSqwXd9HpxfkLWS97uc3SZFNRVO4DEaxeiyxrYkQihsW+J5DlJqHNcm73i6Hj665Ltv3/Dm7VseXa6QtocbhEghzTE1LnVthIHLsiLPDWuE7xvBkyRJGY8nPHr0mO1mZ0LEyhBKGtgCWLZRW/c8n6oyIFWtTZuTlKIjctwghCDLEoo0p85rkuWWiSsILQ9LKHxh4SDwHAfn5BQHF11o3r29ZRJNePzwCY/OzzmJA+aRD1rz2//+e17+8JrF4oQgCPjss887A2aUprIs4c2bN9ze3OJ5PkEQdlqgpnCRZRnL5S2+F3QejRqYPpTSnJycMJ8vkMLqwk5FEHrYUmA7NmXVUHdMI57nY0sLgaYpSxqtqIuKOPSxpDGSt8s1RdWglfHSjNfVMz4b7cz9NaHZh0jojjD1fjj2sRBtH860n8Q3uXL73jb95495WYOd2HMuLOv+Oj+27eH37/en2pbFfuq/B9D2X9uq4w7VB8as5/PvsSdtZ8wE972h7vCGuH344iNGaH/C6roeLHnbtkPOof/M/kkefnb///sX5R710NAgaxL3KCOaqoWZBGn5pm2lrolCD8+xadqKosio6oa2NQjrojRYMtdz9vQNYBTHzOZT4jgCNNPxqJtujWo1Zdkwnc46daOIuiopypy6MXz/bVUbjq35gl2SUtctJycL6qYhL0pOJxFNk3N6dsb5+QVFUbO8XXP24LRD2LcorVBlg2xaIs9lMYrZZAVlssXRIZ6QKCEZBz6R6+JaEs+x8Rwb17Eo8gzdGtaOsirZbTLevb8iHE2ZLmKElEgJlmVTlkVH8d1294KkaWo2my1pmuF7IU19dw37Fi3f9ylL43FEcUiaZCilu77OaphPIQwHXa/4tbxZgrI4n014MI9pshIhGzzPwpcu0vGRLVxMZsR/8/fkf9agypbADTifLziZjrDQ/P53X/L73/+J2Wxm6JI8nzzPiaKYsjQUUFdXV3z77XOSZDtAOsqyGlq4yrKkLEtUq7m5uaau23s0OJ7nD/edETI2osi36dZAOLY7w4irtck1BgGy461zPHfIsZZJSqsURbJDCRswOcQDYMKdERHWvffp8F5mjXxozI7lqQ/HvhPxU3nqj63xfr/HCn37UVr/Hft40X0HSnDfYKL14JUCnaLJh+MDY9b3DPaxc+/GfuCViX6C75/kx07+vuW/q3D2Jz8YqE+4ovsTvm/Y9mlIhDRAX9XddFL3hkwNIixat0bWXit2ScJmu0KrhigeY1mSNDeJ6+l02hEbuoYmR9UmbDSpODzfRSiHpqlp2gYtNK5jlN5d18F0/BsA8s2NER6xLduEqNIiSVPSLMP1fWaLBY7n4fgeeitYLE44v3jImzfvCH7/FePxCC/wCH0DEt3tElRe4gHTwCOyYLXbsEtTHASTMOZ0NGEahASWjWdJAtfBEuCFAenWYMRul0vK0uF6uWS+3XFaz9AapLRxHZfG8XAcD98LQCtzntowqtZVw/XNLVma0bZG36AndVyv16RpyumpYRBJ0h2e56O1GvpbjeGzu4daTZqk3N7cUGQVp7MxF/MRsT/BdSTUClUazzKQNq1sCMce7iIwDShNS+A4tEXFH7/+hl//y/+NY3vM5ye4jsezZ58P+buqqliv17x69YrNZoOUgqqqEcJgwHYdJffJyQkXFxekSUbTtANRwbt37yjLiuvra8qywrFdfN+Ey3lekJcZcRxxfv6A07MzmkbR9HnbqqIoTerAcVzqskI3huEl8gOE5bItmi590XstRj9S6L3qIH3dfn+t/IwC3c8YPzdRf+xz++v6Y0XAfedjf0gphlTGfuh8eFwfQ0V8YMz6G1JKOfBa9U+FwxYirfuKyvHY/GMnum/cjoWex050fz+Hf7sXdoo+1DXvO47xrCxp1F6kJTvOKROZK909HbpJalvV0dgEnJwsmE0mXWjlMpuNub25JUsTqiLDtSMC36cWmqYqKIsCP4hZr5YGry2MrmiWpQPV9OnJGa3SbLtwJY474ZKmxfV90qLAD0OefvY53/zwmn/78k+0SvDXf/M3xGFA6NrYKBLHo3YqHM+nVZpkl9BkJapRxF7AWTTmJBoxcjwCKfCEwBECRwocIflxueS7F8+5Xa8RYsSb9++Znz3gYX5GWdZY0jDOuq5ZqK7rUTdV15vq0NStoRRX5jxd1+ge2JakKDKKIqeuq07Orr+Z+2toChq9eI7VtfLUTW0ogpIdL178wDh08N3PTOeDlqi0pdElduAQOjEaG6Elrm0+n6yWfPfiB/7tt1+SZQWLxZzAj3j06JFpl3Ic3rx5Q5IkvHz5kuVyOWDJwjDsyABu2aU7Hj951CX/R/ieYQ65uVmyWq2I45gwNMZGDakDMww/m6BtTU+w1mrArgV+QBRGnbca47kuGiiSlCzPjV6BYyKNuq2NcpYQ2LYeBJvNmoF9I7a/3g4jpX/P+Km81seclrsw8r7R+ZhR6ynw7+iG+nk8Bhe5/z3HxifDzP2D742Z8Wz6ifvQMzOz+elJgA85m3qvrffMjiX/Pzb2iw29m621waxJ2yyaHvKRFwWOZYoOZalxbZv5fI7qcD7j8YSyy+20bUOS7JBCI+UECTy6fMjq9pr1ekVbF7S2QAiN7zm4rg3CoViu2G7NPKVpNoTTWmt2SYKzXBLHMZPJhNF4jJCSzXbHNkkYBYIwCLH9iKdPP2P+h2+4en/Dn37/JxaTfwTbwtWSqR+CaoktAwzO0wy71dQVRE7EbDxh7sfMPJ+R7zKLAmLXxdaa7WbFN998zbfPX5AXJY4bcrVccn1za0LhtkVrB8ty8H1B0zYdZY7q+g0hz7emtzSMWK3WJElCWVY0FiTJrtMAldzc3HB2dsZisTCQDTRBEAzIc6VUl6c1IGM/9FlvM75//Qopa6St+ay+JB4F2J5PvanRTs7J2QOTlypS2ionXa34/vm3vPj+JUUNs+mC+fyUhw8fsVicDmSZt7e3vHr1ipcvX3YqUWbxxfEIIYxq1YMHDxiPxx3v2d39mWUZRVEMlE2WBY5TURbV0ITfqhbLdijLiqpqqKuSsqwQwug8+J6hH2qqGt92kZZkEo9wpAF3YzskVUpJlyvrl+lgpXpyQu6eDkOP5X1j8O8dH0Zgx7c5/Nud8VGDY3II24D7oFu7U1DrPTKlFJaQ9Lxo+8F0/+8hfKsfH6XNPpaEPzQyxzyzvlP/cFIOE33HPLfeQ/pYshHurP0hyviwANELICitaFSL1TXv5nmOCHxkY6pTYTBiNl3QqprVas1oFCPTDKUUrm0Thb7BClmSZLvFP10QRyGClroqKPOUpq1xHZc4jtE43ZPb3PDTaUOW5VR1S5ZlZFlOq1rqxkiWFWWBZVukWU7dlPzys4cGcyQkn3/+C/7zP6z5b//tn/mX/+PXnE7GfPHLZ5zMIsLpAs+xSKoSKQX16Skjx6epJI7wCByfURSxGMXMJjGjyMG1Idlu+Pbrr/j9737H1fsbNBZIQZrnLDcbkjQ1gE/fxha9V2XwVFmWYTuS8XhEGEa0jQmdAOq64fXrHxmPAqLIBxyEkGRZxmazZjKZGjaMuhkwWrvdljAM8Lp8Vt3WCFtStjVpVfLizY8o0XJ1c81sMWMyW2B5IcqyeXudoLQmTTekmxvy9ZJks6JuYDQ+4+HDS/78z/+80wx1EULw3Xff8bvf/W4QlKnrGtu2mc8XnYDNFj8I8XyP9XrdLToL1RpITU+uWdcGMNw0d4u1acx5NU1LmhoQcOCHeH6I1tIIt2x3pDIFrfE9n/lsRhhHRGGAa9vYtkOp4TYtcYf0Ti/r2Hsu+q6dR1j3IAx31c3/2PgpT+hTnplZg/cjuEMg/b6+bm/M+vehw9N1+pn7WcM7w3L8uD/aAfCpisinPLNP+VL7k/Cxiog8Mkn3v/MuT9ZP1H7ODNFBMjoKIKOb2RUXhMCxLRzXwbYdLGm8Aj8IcJyo8+BsbMuEfpPJmPPTBbYtydOUqix4+f0L4tAnikJwbfI8o1U2oyginkx5+eMVVVUOeKS+eGDbxqX2/YDJZIKGLl+3QyNI0owk2eJaLQ8fnBEEEUEU8+TJM548esm3f/oT//xP/4woMqK//wsuHsyxpMZKN4gKmE6YBBG6dZGthy0tQtdlNh4xHgWgCnabJd9880d+86+/4fvvv6dpKiw7gi7kTtLUqCttp7iOxPNdQFGV9SAUkySm57RtVCcC3fHI2S5hoBlPxlRlRlkWRFGMZUm2220nljwljkc4ttt1Z5gEuykWlNR1g+XaWL6D5dk0QvPj1TW3qzWj0YjJ7IRoOgfbJStLmraiqVLKdIWlCk6mYxanD3j48HP+9m//nvHY4NeksHj56nt+/etfD6LN0+m0u0Y+l5eXQ75rPB6zXq9xXRMCa6W6IpUaOO5836dtTe7PdT2s2B5IQZtGIYWiLBvaJh1CKduycYPAiDPXzXAfF2mGahp2m60puACW7eBg9Gfbdp+51bTpdURCgwdkzJjZpn/4/HuHWWbH+zL3DdjHIqd9+7HvuOwz1+73hR6ua/MDR4FmPzE+bDRv2+7HNIgNhkPfHehwEncmcxiHhu9jJ3tovQ+3Qe81mmrTGaCBXihZClMKs6WFtC1TsUSjRLfIbNdg44SDJSWO7Zinn7A6T8tCWhqFJi9LfH/E+fkF2/VmKBS0Tc311RXjUcwojojjgKt3JWC0BdumNU3LYcR4HOMGQUey6LLZ7AZM3Xq9QkqTv0t2RhgkCCNGoxFRFFMUJdvNBmc+ZbXeEEchluUTRA6z2YK///v/CVVUvPj2j/zXzZIy3/Jf/st/Ihz52MImdABpE7ggdYCFhy1sXNsi9H2kUtzcLPn++2/4zW/+jS9//5XB03keZWsQ345jU1Ylm82GNM2YTWIz+9qAj8MgRJ4Ibm6vDa2R72JJm56C2fc9xpMRUiiurtbYlsXt7U33gDKsrkFgEui27aCVpmlMy5zrOjRNRV2XeL7LaGp0CmytEG2D1Ia+6Xa5YbUrwHFRQiOkxhI1nueymM149ughlxdPefjgFwPLr9KKP/zx9/zxj39AKcVisQDorrFRxyrLkrpqsC0bYdk0qubi4TkXFxdst1uWV6s93NkUKWUnZlwyHU/wPH+gRy+KHNs14em+Tmrb1lhSmkKSNq1Sqm1o26brsNgihCRvW+LTMyw0ugOAt02DWY6CfTVv0TsPPWXQYMfus130kFutOcqCcbD6urXbGUlxmGw/LDocrFt6Q9d7aHT70MMr9EgJuGPrEEO4qdVdW9YHEdqhrd47mQ89M6UGg4bShmNIKQQWtmUNRmT/1PZf733xkZPdt9pw34WVQhiMibQQ2uDFbClByo76xGDbFBqUviNq6y4gAtraULJoaby0VlcgpDF2lqSoKqxa0Dg2jm2B7qTXvIAotGnbBnRDVVbUVYFtScoyJ88z5osZQTjCsU1rSlZWlHlFXoAWPiMBl5dPmJ2c8erl60HNyfdcomiE63o0VcO7t29xXZ+zs1OqojA3PoLNeoNrwXaUY1kZrVphScmTJ49JN2u221ueP/8K+S+SQimefv6EyWKGFwWEjo3nSBwrQLVQVzVFWZAmWzbrJS9ePOebb7/im2+e8/5qh0YibQfdSoRSWLZGNyXb1Yp0m6DOTrGwEFJgCXPt4yhE6xlg2D7KsiTZJVRVRl03uJ6gaSpGccAojknTjKZuqesWW9qGmLEosXzTzWDaVhRtU9LUBaopsFBEnofvuniWBW0DbYtj20jL6trTGibTMSenC3zfYTTyubg45exkwWJ2xjiak6Wp6fxoKl69fknTNpxdnJOlGUIYYySkhR9EJGnWKWAJvMBhdrIgimN2SUKa51RNTZImVKVpmK+qiqosjZyaakE12BJcx2I6jnF8HyHMOdI9iNva5NNsS1JWBVq3Hewn6NIiCsd2mEQ+oecYXrymoK1r6CqhWlq4fmCoe3pQrTawKYEBuBvrcecQ9KSPGsOfppVpZjdRqzRtUFp3jovx8VrazuB8bC33Bqa3Jr3hEx96hl2xYojGhEYKix47BwqtZedldiGpNpyHQmvDlKG7jh7NIDs3/L53VB8YM0caLiXZHWjbh3/yQDKqO6lhZ1Ig+u7+I2Fq724eemSHpVxLmPyAQHQtH53B09Bj8QQdl5FSNErfsVBiLL1uNW3dYO41k4ezbQfbtqDVOI7pmfQ8Dx34eL6iVab6NJmM2axu0UIbJW3H6ai0W+rKhA9NA34QMhrHhFFrMFK7mk1yi7QVT5484a/+8i/48ssvef78O8IwZDyacDKbETg+3734nqt3b6nygqZqOFmc4DoOy9sb2qrCsTzQphdwPp8zHkV89ovPycoUN/JZ3l7zX//p/+T0D1/z7PPPuLy8ZL6YEUYxll12eaotu92G7WbD+/fvefXqFe/eGT1I6USgLZSW2MLog9ooRFOyXS7ZrrbURUNT1ghLozsvtFVGR1KjkMI0T9dNiepwenWTMR3FTMYBloTQdylFY55CloVEQmuS7IZW2yZLEsoiYbtZIoXC1gryGkvaTOIRvmdj/7+0vWlz5Eaa5/lzd9yIg8EjU5lSSaXqqp2ZfTNr8/0/xdrsdLdtV7dNSaU8SMaNG+6+Lx4HGGQypequXpilgiIjEAgE/MFz/A8FZZ4LQRuH9ZbN7Q0//Ph7ikVJkmWUK/F0yJIMRs/htOXTp88cDgfq9gxaJsjnc0VZLIgT8TS92mxompbjqRJTXhTrqyuyouCnn35ifzhwPp04VSfsaCXbDEqwcRRTVSfs2KO1IUtj4sjQND15maG8BCGFwtle2gypaNq1XUPcROJY3w1EiQw/8rygaxsSE4ExFGXBiOfxcOQcHOpNFON86E97JTd+BcrKzT5SGudlkqrQwYBYVusogoOzWuukyCHZ3OXUeQpYXwJU5/V/kcHNWZwCHZhYEi+nAtjPwz3rxG9iSrOUG3FeBWNkizI6BNgwApiCpn+KOfOvfi0ziyNRxIyMqF9aT8DJuLnx+bXNT3cEXq+z5+e9+NtlcLPWyhd1kbG91oC8JKdf7j/NBcg49oPsO/xpmiaWWY73Zt6HlAORkJDzAog4HXaMow3elIJcj0zCcrlmsViz3W7ZbvciBX1zw9XVhqqq2R92nA5H/u3f/hzcv0Vm+8OHj2y3j7z75j3LZclquWD7+MBhv2O1XNCVBc7B0PecxpGffvoZpTR5UchdWMHt3R3/7f/8b6yv1vzTP/8j//RP/8g//7//yi8f77m6umK9XrFaLUF52q6laaTk6bp+9ruEoLQbJSgi+m6k7x1GRxgDtm+pTkeq2SPTBC/IAe88dVVzPB5JM3FfkizWB36pZb/fkkaKMk+xfqBteoxJA/tBSnsfiNBoE3pFB6wbOB4OjP0gzjzWEqG5u7nl9z98x2a1pChSuTZjTZKlpHlGuVrKyokiRu/p+4F9c+T+42c+ffjA58/3QY7oTJKIy1VdNwz9yPv37ymDue/5fMZ7T5KkoGRIZGIjYpSPj5wOR7yHyES0TcOiLIEAxvZOykBGnBemxOFwRmmhOOVZinOOthUxR+8dNzfXjONAXVcMwwR3kV7cOPScDwfSLENHEVGaEUeGok/prZjrGg04hwgyhmoJhXcWaz0m0hdzwJBcTNXMHAUuhnMwP3ee371Siz4bCF7WrJf/QoV0ucsX2F9B8NvnscE4M7OBoigKfdxwvOoiIDKJTX5Z7H4RzCaEs6ByxdnGGA+9NGz/vdvXmv6XJ2fa5HlfGqNcbtNI/+n5X+7zC21ynt5/YiBEYRQ8BqG9cRgF2Ni1WOtJk4wsLYijGLxmGEbatpuztXE8Ujc1eZMTxYa8yElSQ9NmfPz4V06nI+/fvxfQZVWx3x84HPfBwCPn6mpFVdVU9ZnP94S+VYw24uOpjdxdP378wM3Nhnfv3nF1tQFgtV7y44+/59/+7d/4+eefeHx85PPnTzhnQTmUCj0TDUmckGYp15s1Wmu6dsA7hfeaNhrozIBzCu9HRucDU6Gh7Tpym+G9KJJ6pAdSFiVRZGjqlnEYiZMEpTRpMnBzfUMSKdpApk+ThCiSqV/b9dQOURvJUvpesrM4EtrOOPTsto+0TUeWlWR5xs3NDT/+/g/c3V6TJCZ4VyriJMbEERZPOwge63G3Y7ff03c9nz98ZByG4CZfzMqnzjnpUxYL7u7uGEfLp0+f+PjxI1prNptrvIPqfGa0A1VV0TYNQ9+zXq3IkpTdbof3nvVqTVkUYnZcN7I2woJOs+xZFTIMwt+cMHu3t7fBJ7UOTmB2dq/abXfkaS6KL12LGzpUFDMOPbHRREnM6BzOOtzo8cagoxiNxiHy14bQCpr614B1Ijk9Ba65slIXfbg5I3teKv5nTEgvt6/2yeGLOHHZg/+tfvwXwWwYhmcBTYxBRGIkjuOnNwSYppnKP0V6/yW38vLNX1PNuDxAI1DnL/7+slx9LTgC4giuL8rhAOAzRvhzgvRWM1ZFay2aW/EWZwcirYijjKIoAU3fj0SRlKR5XoSJZMb19TXWWpqmpq7PFEXJal2SphHffvd+bviPg+Pt2zdorTidjiyXSxaLgs31WkbPytM0MiHM8xKlRO67bRru7+8Zx4HT6U4cloqMNE15+/YNf/jDj9zcCKTgeDxQVWfu7z+jtSeK1OywnSYpRVGQJOIVORaWvh/pO8GORVpT1S3DMBJHMX3X8vHTR/76y19RxhPHhmHsgru5JkkkwHVtRVVXwDQljlmWJVmiGfpJjaKnrSvRfkOTxAZnB5r6LJpvaYRSntOpYrfdst1u6XpNURbYcXxGdYnjhDg2QXlixCtFNw6cm5bBjhz2Bw6HY4A4RFTns/RC84I0zYU47zx3t3csl2sA7u8fgopHxHq95ubmBut6uqHhfD5Rnc+zlFOeZmRpSpMkrFYrVusV5XQ9pFlwPA/E8iKZg/8EQJ9wVG3bst2KHeB6vabvhT61Xq85HA7865//lbqpWEQGExlhtChR4xj6ltFZlImEWjWI9JAGTBShlUJrP/tzyhqSjMb5icn5ROP2c5LBnI69jDNfg0j9Pdul1P1rQ8D/6Ht91Wru+RhVTsoXNILLID4POL/MlC4zs5dUppcHP08yXwSyaZuwKF+TIxpCL2ai1czYFT0pzz6XNwJmx+2uqbm+2nB9dUWeiYNTXdc4K2j387liuVyEi7KhqiqapiaKDGVZ8M03b6jrM8PQMoxS5iZxgjE3oayoOZ9PlGXJ1dWKLE3xiBLq+XzkdDqS5yVJkpFmKbpStG1DHEdstw80TT5jtIoi4927d2w2G7qu5eHhgb/+/BP4Ec9IXZ2o6gqllOimxSJJY0zE0I9U54a67oT0PDSMgyXOIvCO3X7PL798FHXcRY61PUrbmUxt3YhCNOfx0rv0olVJmiQkkeKw36PwpElMnkV4p7BWslvynDe3N0SxYb/fc9zvOR+OGKUw2tB3HV3bUZ0rDocjeZai1IIoKnAe2r6DoaduO3bHI10/cDyfGbqRWIuF3jCMjKOjLEuyLKOqarpOpIi01ux2O7bbR7quJYomy7+YvrdUVcXj4z1dKxzOKNCgVMi2skxaGV0nEBylFXkhBH3ftlgr4OzpBuq9n4NZ0zRoLUT+CZoweawqpSiKHGtHtEYcqnqLSUS+W0cFykRUdYtyI8rLkA43hOouiGtaK0O0kGm5KddQUn5OvE9ZPZdSQ2HtczFE+P9he5nEXLaMfg3l8FvbF8Fs+gImfEw/DkFV9SkrA34zM3t58C8/yGtZlwLcMIba/unvlxiV12hQzz5QuDPPjP9wwUz7KIoyQDWi2b9SwI4j537AW4hNEnpLYr9m7YB1jqqu8TjqpqbtO7xyoRx0tH3DuaqoqxNxIqXw8XhkGMaALVsx2pE//8uf0Vrx7t07iqKgbTsx97ADj49bjqcDaSJA2PM5YRhESy2KNDc3N7O35/pqzfX1Fbe3t2K0u1jwzds3DEODHVqq+sTpdMR7z3K5CJmmXCzjYDkczvzy14/YcaBtE3wGeZYz9Jq+73h4eGC5LLm6WhKnmiyNQlkZ4a3YxSVBsfV0PHE8nvDWsiySkLnV2NFTFAlZljL0I/vqTNvIhLiqToDndDzQd61k0E4KnsgYjBZC+/lccV4UxGkkEyw8o3U0XUdVNWwfd1R1Q9cP9MNAc24Yhg5j4tBCGPG+pe8H4jhmGAZO53sUQt0TMHATpH1GhrHj/uETx+MejSLfbFCxKKOMTE13H/plfm695FnOar2mXC7ZH85UdT1jz9pWlGen9bVYLJj8BKYyU7BrIlxpxwGvXGBMQLksyfICi8c6jx0GIqNwNpaGu1J4N4IbcR6sj2dz6sv1dwlaf7a9WEozQODF+v3PCm4vg9klXGt6nI738vG3tlcZANPCH0cbTrY0Fb/QM/uVzOy1uheeenLTNmVMOmBw+tE+y+6m47lk1X9tk4zvOSletMSe1DiiENwuNdSm4xr6gb4bcRZG69lcXxFFhtEJ5AOlOVc1zluWqyVJElNVFff3n/n5rz9zqk4sioyrq5W8bhTLNrFqK9FG8RDIycfjARlHa66vryiKjDzP2T7umGRfhGJlub//TNe1s1jgBDicNNTyPGe5WHG92WC0Jzbg3EBdn7F2lAyrLEHJJFE4o0e0gseHe4Y+J44SlIrZdi1V1XJ//5k41vT9DW/f3ZEsC5pagmqaxERa09Q1XdOK76RSeGtnjJDyMtBoqHA2KG5gwVva5sz9Z4FYWCcWg3EkFJ9+sERRCaGhXzcVdbsgbcQ8ph9EZruqG6zz7PYHqpZa6FkAACAASURBVLplHC11I3zQNBXA6/39PUqpwC2V8nB/2HM8HlkuljOtJoqkfH14fOB42EkwVGpWCcbDMPTgIYkd3vln6hlxHFOUxUx/imIhnU90na7rZmK9MYY8z0UY0to5wD48PMxsha5rsF7I92VZcnd7TV6U7I9HDqczZZYFByONs45hGEVRZRjoHTL2dxcJQFiiAse4GKw9W8gEvMPlKy6W+n9ymTllpZfUpqc1/How+63g9uoAYKIbAEEmWL6USxK48x6UjLKnCYiAWw1OuS/ecDoZLxn103YZRHllynn5QX+tSfgy49NaBSWNkJWEqeZs7RVS3NGODKNlUeQM48huL8EmL4rgMZugtGa7ewSc8A1vb7m+uSZOIh7u79nv96TxDZ8/f8ZoUVHdbK7lc6HYbK65ub3hz3/+M1V1pihLtFLc3d1xFRRvi6IQJ6luQCmC4J/00U4nwULVtRjQLpdLVqsV40oMjtMkZnO1YFGKRtZiscA5EViMY1GtLYsiCE72pOmk898Fmo6l7yXLUZVif9hTLFLu7AbvnXh+up48T8F76qoSocuyZHO1RuFp6wZ8j1GaLE1I04TIaAbvsAYiI76YduywowU0SRQYFGVJv69papku7nc79pslRZniGWm7lt1uy35/oOt7kiRjGCx10wUZoR5tJOs+n0SiaLlcUuQSyPd7CWTjONL3A13b0fWdeAJEEU3T0NS16It5h9UmaJAp6qoiiiLZX1HgvXA1p4U5DZLquiFJi1mgcHKsn4YA08JNkmSmQAGcz2dAskUTxYy9nUuuyEQC8kahncP2PUmWkmXpfBx9JwBcySscRksv0zkLgZYFLhDjweNQfpJ9JOC5vgwQLyuiX9t+K9G43M/LqusyI3tZeb3MDF/DqMJvKM1ObzB9IU3TzAHFOQfKidyvukgd1XOJj+nftM9JAuiSYzl9af5CDO7y9a8d+GsnY+pN/NpJnmg5+uJ5lwF2HEbR53IOEynSPGNRSBbmvWNzfUPT1IzW0fU969WaH//wB1brNT/99BesE5ybjsVII45SokgAlEM/8u6bd2y3jxyPR0CmfP0gZaVSntvbG7Q2bLc7zqczURRzc3MzL5iqqjgcDjMafbFYzlnH2zdvwDu6piVJJIAlSYTCBLS1R0ea0+lEXddiypGleBzWjgJydaNkcF1H01Q09Znt7oG+r2iaM2liqM6Guqro2jo0oL0kA9YKK0IJLS2NY4osm0uoqtJ4Jw5KeMtk4aq1ZrlYcHd7y2gfOZ4Hhr6jrkQPLC9SmrZif9iz3W6p6pqu78nzkjTJ6Dr5TlfLFYvlIsArWt6++YbNZsMwDNzff6Zpa5IkoSxl0NIPHbvdFoCylPZDliZ4a0mTjL7v2T4+ksRJYHvoOQBNrYzlconWck4Ph4M4vb95BwjEo+/7L3wDqqoiy4IZStexXq/n6zLLc5q+xUQxWSbB0I6Wpm6ww0is5XzboYdYJsYt0NQ14zCSFAtGb9F4FA5vLXgn007n5s8xeQZoFdaWZCNYgmeu+vVM6Lca9y//dvn8JzWeJwHISwXfrwXFy1gybZd9/FcHAJecqctm+nTCn/b+/AfJ2J5L+Lx2Qi6b75cHOpegr0wqvza9/Np22dt7RnLFzjiZSV8qjuNwV7o4fjwmEvxZFMd0Q09d16xWS/KiIE4S6qZD64r1esVydUVZ7ogjQ6QUaZqyWJQoZajrhtPpRNPUvH//nu9/9wOfP3/meDzSdWd2u93cIE7ThLdv34VRfY1CtNGKoqSqKqpKmvp9P/Dhw0fK8szd3R1pmnPYnxj7gSLPWCykeZ/bhDiBKJJzfq5amrZntJ4oTri62nA8nmnallNVgUpwfmToGoYhx9qBpqlQfsA5AbMqpxiHDrzDKFlc3o3YcSBPZeIqyP6p7LQYo8iylK5r6YPgo3eim0bQsVouFvSDJ0o6rIckMVjbU9UnurHhVJ2ou4bR22DEbImjFoVhvd5gtA4TxSEYjKTUdcP9/Wcet48sFiXr1RoTGbbbLYfDnrZtAsXKBGSFJ0li3tzecTwcONcVyivKRUkUiUrvYX+gKCX7SpLk2cLM8mzuw3adeI16L1i89XpN13Xz86cgXxSFPI8JTeBIkpQ4zkjTGGc9vR3wVqbTySbheDgyth2xEapYE7wolnHE0MrkUyl9cT2HNWRHjJbB07QkpRiSast7B+a5cPV/Zq9M3u953/vy8WXm9rLqmn5+DabxRTCbsq84jgVvlaZobWZF2AmecXGEoff/27iUZ6Xkxf/P04wpKr94/WvKmH/LNpXDl3cFMwEJnZsbtEoJ80ApUGHRleWCJEuo25q6fVId3e62LBYl79+/Jy8WWO/Z7Y9orbm+viPSivp85Hg4cjqdyfM8kMylALi/fyDPC3744ffc399TNw0PDw+0bctisZJF7ieQ5UiSCP1lIqyP48hyucRaz+FwQCnNcrlGKc1ffvqZm80NV1dr+t7RtANpFpEkEXmekBeZqIpg6PqRuumIkpS8XEDTcn2TcDr2HI8VjlEy0ywmTYzAPbwGZxmt9L6M9hjtUYzglAQ16xiHUehwOLy3YTqnwFsio4hjje8tFmERdGMnEBhjuN5ckeYDddsRx4ph6KiaM5nKiBJDXmYUSof+14nz4YxCUxQlH3/5QNf33NzdisN8JdPjvu8p8oIiDyq3zRgc5COurtYh+BicHcmylEVWyE0kjrm9vkEbjXWeJFQoAHYY52DVNA3DMFAuF7x58yZMpJN52qm1DG/u7u6CFPeWLMsYAhbudDqJXNLjI6OzRHECOoLRYUfP0I1ohfCKTcT5fEZ5sIMIWtphZFGUYnMYRySjx3uLswMusACclhITZzFJLMMrL8yAQL1Gis+pVfR8Lf89Ae1lxvZy/b8MZi9/97cexxfB7Hw+zzvs+55hGIXKpGXEfynfIRNNKY+m9/i1N39tCnkJAfFBAui19t5lTf1b22vTkvm4dOjdhUxwHAVvpSdDVzsg8jXQNBX7/Y6+F3K5NG8zjkfHYnGiLBdBF60TnNliiXcCSq3rLoBxHeurJVlWkCTJTFjWWqzmfvj+B/b7HdvtlvvPn9k+bnl8fKSq6lDGrASbtNtyPlcUeUGW5Yguv/SKfvrpJ/FsHD1xlKFUQlW1RLHGGEUUKxaLgvXVAqMV1luG0WOihOubO6wTEUlnHX2/JUki0jQiTg1KCZXNREqwTW4kUhBHGlEP8jjb47U0zI0GN46Bw6sDns/hrJfMzkCeJUTG0PeWpulom4ZxcERxQprnRInFRAavwfmBtq0wiSYvC/IyI0kytDbc3g0c90e2DzuMFrK8Ugo7WspFSllKUDmfTwyDSBg1bQOIookAaseg4TbMPbFvrm8p8pwPHz6wPxyIicnSlLEfqJqGzfVG2iVGC77LaOqmCVLbPVEUWCihkvFebPeaRsC1p9MJgLquub6+5nw+c3NzI96oKOquY2habDeSpgmLfImJI7lBjOIdkJgY5z3DKAT56414UozOs1ku6Qcx4unHICDpLQphXxi8IBFmVMYTpl7jUK8swL+lZ/a1tfjaa19Wai8nrq/16i6nnq9tXwSzz58/MwEVp96W6IzJBTDdaSTj0dI4DBOQCQLx8k1flojT316mlc45IvXU3Hv5mr/15L3cvgbreG2qqRR0XRN0up4CYRSJJtZutw8XZ8/j41bsyZYrkjin70bpU6AR1oClrtvQJG5FqXUcmdzWJasSfFHTCHxgu93OE2TnHNvtblZBjaNIOICZLGYZ+Y8cDge8gzdvvqEfHPcPe7SGONaYSICURXHmXDcoLGmWksQxy/WGOM4wUUo/WHbbHUoplqsCrRUmUjRtRdclpKnG+xGFRWktDWZvgjDBCJF4DExN6b7XWBvOh31CNGmQ1yYRyiv6tkcDkdFkaYaOYxywigpUZOjGnrprOFcOjGd9dUWxyBgHy2p5w9s3b/hL/BdOhxNxbDBRRNf1DMPA7e0Nv/vd7zidTnz8+IHD4cDQ96CEn2tMQp5nrNcrud6VIjURZV7S1Q3n0xk7TDpl0veKw8910xDFEYvFgtu7O4yR0rVtZOo8BtBvFIkb1JR9eS/VzsREmSAbdV2TJin9aMVvwXn8aFkUMYtyQRJ4rOM4UKSZUKOMMCq88jR9J9p4WlMWGabt0d6hgX4YGJ1FK4XREZGahgA+ECmndSO2htIz088CzNdKu9e2X+ulXcaI1/b3ssf+tR76a9sXwWy32/HmzZsgGbzGK835XHM8NXMTGkD7wPe6CGavTSBe26YgMkXhL9JQ/e87ec93/nSfmfd3kdpKRfx83xMnTCkoy4J+kOa3CtlTnuUYI33DYRjDhC5mvzvSNC2LxZHt9ogdOtarJXka4R0Mg+V02nI8nthsrlitFox6QOSoY4zJSdOEzWbDen3F6XRit9sGVHjKw8MDu92e9XrFmzdvKIqCv/zlr4yjJYoUdd1gdMRquRIYQ91gbYR3ohmfZwlpFqON0FZM1IrLFgqlDdYqRgdKRZgoph9HlFHkRUZkDHkWYww4P4JyJImRwOQdePne1SRn4i3ejxidy3fqrEBL7BgEBlXQqrMS/JQWmpRzREaDEQWTcRiIIs2iKMnLgmN9pvnc0DQVOtKCc0PMY7TSLMs179+/45CLacovHz4zWE+SZRyPMtEUpYvJ/8ERxYabm1uWyxLnpAxOkpgszYg8NMcz9x8/c9jvZTCgBIYi8k5pGGLJNfrw8EA39FxfXweviIjd7khRFHNCMC+2MLksioJ92HfTNAD88ssvlIsFTSsuX1EcYRJYlEvyrMAoxRD1gsUzUFcHvIZ1UaAjTT8MaCXXVVfXonirPJFRjIMV9Q2tiYxGq2CWopiJ2sLREJEhay1Wfb3n/ff20F5WTl/7/cuA9lvJzRfBrG07wf5omfhleQFo2jAxkuDjxCgkkEoFniKQClHB/BI6Mf2b7liX8I/L54UfZrrRsxSTFwgYdfHoQ5PzJf30og4H8NZNHc95v9ZarNYo62n7FmdHlFZEkQka9UZKKGMwOqKua07DSaZaxtB3PR+rjxz2W757/44sFgR633fhjl9SnevQW+nFGCVNuXtzx3r9LZvNFZtNLRd0WbLdbvnxxx/p+57/9b/+Ee9FitoEmMDkIG6tY71egofTaSvllxHqjh1HhnGg9DlJGhGnCq8MSZygdEQ/WKrqzH6/p2lalJaAFicxxijSJGJztSJPE5RyNHVFFOlAcg6wAWOIo0jMlr2wIoyW98B7IqPxOmL6ZpTyJJEAYp2F3ksvKEkSnEXoUcawvNoIcX6zRh81u+MW31uKIiXPU5QGgwTDrmuC+cya3//4A2jD54cdzln2+4Ng0qozxhhubq65u7shy1PROxtatrsdfdexWomG2tBJdmyMJg8TR5R4ntZNQ7vfkea5MAuKPCiUHEJ23QAKoxPyXIL6hPgvioKiKGiaZmYCTBPRqd2hlBJeZ29JkxTlRDn5fK7QgLeWOEpo6xaP+A0cDgeSLJXqwRiGvqeqK0xkSOOELI3wTrBsSiuxH0TaAzIECGoZXqov5SXT9rP0xZOm2bw+X8aRl7C0yxnhbyQ1X8vKLnejlXqmjIP3T8jei/f6IphVdcsvn+7BwmBH8qzAOh88E014QwlY88xDMlRw4HDyxhdDgZdR3YcDwwsVRk16ZVOAmYJTeHTq6XxJB2Zq0PHs7iKqHRfnVE3LiFAWP3346fdc/H10jv5cobQiiWKc8/TdgMaQRSIsmCcZbVVT1yeurq7YXG9QxnA6nxj6hNF2DCoiijRtazmejjw+buey2oVzd3d3S5LkZNkW74NBcBRR157j8UTTdNzd3fLu3Tt+/vkXPnz4RHXuAgTgyBB4eUVeMklAp2lB21aczjXVueLm9ob1lUjj4KXnlaYpzqoAwu05n+sANzBkWYLyPZGOKYqM5aJEa2jqimPTY4yiLHJirVEOvNYYFaERwOzY9dioR8fxPPL3PClLOC/I+choLJCYgT6KpD85dJzOJ5zS5MsVSZKQZxmLsWSzXlMMKcvVkqLIAzAavBvZbR+ITcrV+poffv89cZZxav5vHrafA28yFW5nIiV3sZDP1TQVjw/3nKszy8WCsshx48D+sOfcnEnjBJ0YwZthidMYjKIdpJzL8pyqqkiShOVqNVvsHfYH0AJ1GceBtm3DfTNHgNACQTBGB2aHaOhlkwoJBm9HlE9ReJqmZuh7YiNlfJzEtEOPisT5/Hg8YYIZ82hHqqYGFUQiooisyKUSSFP6YQwiCePT92EEyDsBbb0WddtRTa2BQPAPworeT0voKZMIII9nccSHpEaFRTjBtpTSPAt8U1yaBn8++G7657kKIWHyCryVQCyGe78yzexHz3Z7kOmeVjRN/6JBJ70QaYdNNvFiFmwnI4MZthJe4wWkOlllaa2fgLEw01jM1G+bcqwQEF8GMQHsym/1U/h+ikw8/9GFE6V8kG25PDb9JK1tnUdHEd654KwDbgTtNWMyorwnNoa76yuaPMVEGvyAwrIoE4riLcpbxn4gTWNGmwo40w3BBTwnCnf6JM7oe8tf/vfPfPr4idV6ifeO3W5H23Z8+vRZLhQXLq7B0TQtWZZTFItAmpfeUFEU3N7est8fGOyJOE7Ics1ymbG5Xs1uVEM/MARvUKWFvG2t5XAUJdw0McQqZb0sKEuhWp1O54BWd+RZFkrYmCgKKr9KyOog0IxYG7I4FRpW12DHAQLgljlDDkHOOnCecZAmfNO2HKoa4oRvvntPFEWkccxmtaK3mUjbjCOR0ozOh2llwzfffMvbt29AGaq25dvfvaM8FhwPh6Bwm7BcLMiyiKapgJGyKPju22+A0KgHHh+3nKsT3kBrOzovwShx4u9gkojFakEcy0Tz/v6BsiwpiyJIaCdz8/pw2Ae2jBM+7NBTVT4gBUTHTLw2B+I4ksDX1Fjr0SbF2zH0qy0jkuWOePq2oXfinWoBr9Tstj5lNVmRg9YM1rIK7l3FYhHMiFP6vg8afQNS4WuRFpqClnUMeLz1Qc3W8aTIr3BKPf0c2ktPozsvOLjQL9dGNM6sCwKS+oXwBGEphyWtL4LUlIVN63+u1vQkcOSfrfdXPABkxyMuaA49R/Mr5Z5J7LzEgaiL7MpPBxoiyxcl4Pz30HvzooY57e/ZY/h5zt6YAtxT5J4UMr74TIpX0c2vbSKTomZTBc3k9ylOO998+x3jaklVV9RtTd93WAfFsmC9XjO0DefTSWzFypLFYsn5VHM+16xWK4yJ0VpxfX1Fucip6zNd1/Dw0NJ1LavVmuvra+Io5nQ807UCK8jfF7x79571esP9/T1iU7dgv9vz+LjFGCG7R0nM7d0tRVEEgYCRqm7Is5w4yVAqZASxCX6XotCR5SmLZYl2MauF0JvO5zPb7SP7/YE4SkjevEVhAA1+uqDFPV5HIk8uU9Z2zkCsFXiGCzpmCoV1YivXdj1N29GPFus9JhINsf3hID0xLQJ+WhvyKENH0sNM0ozj4czQezZXGcvQM1Ta8e2333Lz9paf//pX/vznf2X7+EASx+JnoMC5kSQuQ+YojJe+a+m6LogECFzHeZEGz3IZejVtTd+PAaC8ZrfbMfkYTC7oj4+PXF1dkeWCCZx8ILTWM75sAu22bUsWpIIEgyi9szzPiSNpFUz+opNfpwyerEw2hyF4v/qgKhJLO8ULr7rt+2cDuGnQILAeO4uO+rDgJ5USNMLc0ApvJdDb3uKtxehYMsrRhWCjggJ0MAMPwUzxHMM2VYVP9dCL1OwZfuEyG3neqpq2Z5TKi+1VBsDLUei0s9eQwC83pS6F1C4P8ULC9+WUZD72LxuDvwbt+I9OO1/7HHPDkQvxN+9xOMZxoOsa6lrzv//yv8FbCb5G6FIY0dEfx5E0SbB5Lpr5OiKOU5JEBArX60343LBcLigXBUWR0jRnTucjfS/GsiK3/IhzPmhyleR5wdu3b8mynE+fPrHb7Tgej3z+9Jmu6/n++++5u7uTJnkqqrr7/SGwAAzaKJpG+l5VdeJxey8LO4n49tv3FGVOmhi684HYyGefYASHw1HI8l1PPw6STSlwVoO1OKOJ9HRBD6I6rGB0IhVtR7kRaK2J4giPoreO3o4M1s4u7coYykAXaloB98ZJRrlY4r0NbAU4Hiv6UYClZblitbqi7wTUe/f2Ld+sSvph4OMvH9j6p4s/ikRhOE8zztWZtq15uL8niYXdcn19TVEUgEwXJw1/gVW08yQf4P37bykKgX5MPbA4jrm+vmG723M6nS4m4dHsYmWM4eHhYQZrp2k6P0drQ1EsiKOM0+lMG5Q3pmA2ZXLwxHGeJqaTOIRyFuvHZ5zn6Xkv6UJPbaMnIYbIaEycggk8Z23o3cCIlSm8Cspp6sJyeLK+CyFORBWnJMiHgDa998u1N/Wonh5fW8t/y1r/IphNlIvLf69hPqb/n/4mJ435A3yRhF1kZl8byf4twfLlcb32IX9rP9Mw4bV96ak2D4R1kee2NI0k1UMvKglJElMuS8qyIE5ToiQiTiJcoCZZN4Z+ieH6+opxsERxTJ5PvRNH01SMozhct600hne7HWmaoZXh06fPDMPAarUiz6Xs+9Of/g+urq5omoYPHz6w2+/ZXG24u7vjxz/8nrZv6XpxZN8f9mijSdIUow3nc8t2+4hSkKYJq9W1ZE1BYjlNckxZiOJsVVNXFUM/hPMBbdvSdT0m0+AUg7Mo63BGERkpIbTTeCPNaIeUQdZ7ulHUaiMboVSEBemDGIMdLb219ONImmXkeUlVC+zk5nZDkqacqyOiLzfQ947FYo02EXEs56rrqtnmzRuRtF6v13z+/JnT8UQSx6zfvWNRBrS98+RZxtu3bxn7Aa00zsp3BMyZ1CWFTGszB5+bm2tRpA2QjP1+LwDpOEapJ6rcZDLsvZ/5zdvtdg4mco0oiqJAKY0xMYrn1+/EKX7pQzld7zM9UIlAwWgteZ7Nw61hlIGT0oqu70IWJo5h0soIHGUfDHmtxVmpZiIPUZwwascwWJk2K5l7jkwaabLeXVhXRj8d/2Uz/2vphnrxeLnOX+7n17ZX6Uwvg9fLf5d8yunfs5Nx+cYXZaLiy4B4uf17R77/ocwM/8XFMr1eTVPOkL4Tghl4xtHT9dLXM0aaocPQcz47TNeSlyVRXGK04e7ulqIohbJ0OJNl0u8Bi1KOLBOrtbZtUNoTRYbN5op3795xOp15fHxktz2Qpunsqt11A03T8Oc//5lhGPn06RPH45G72zt+/FGEGpM4oW7PHA478jxnsShYLAqaumF0lvffvgs9sDNKJzhnRQ0FiOOE9dUaxpTzYSdUn6bBezdrb0lJJQtjEma2bvIMkqmX8w4dNL681igVYwAz9IHM73C+By0YRa8Vg/NUbSeKJBjSfEQpI8j1QHmKo5x+HLAW8nzJcnnFMDrapgN6kjRnuU5BKX756y8zTWi5XFKfK4aux40jPshLTYj/ZbnksXlgUZYCl1jouUaaANF9KNmMETeqcRx5fHwUoLK15HnO9fX1PLls226GYdjJjMT7LxRnp/IvisQNXqafPdW5mt9zmlhOgfOZrSLMyhwTHa7vexyWJBVye1Wf6fuO6+vN/JpTU8+v1Vpu3KMdRLfPGbyOpJ+JJk4EMOycp/YdbgyAZjTKOwYQ8ru/SA6CW9TU55rX1tS7vmxbvViHl732r631L3QVw/al1dyLXtj0u0vpnimgXUIengWElyVmaL57nrK5rwWflwC9L/f9vD/3H87ueD0zkzuVB2tRxoQJjEIbJZO9tqHIU+JE+Hx939NVZ+lR+JGrRTnLyuTBJ/H29g3jaDmfz5zPB4xZk2UZUVxgjChbgGe9XnM6VRRFwXq1YbvdEkURm82G87liu91xf/+A1gIX+f777/nh+x94+/YbVqsVV5s1dXciy2LyIqOpG4ahJcsTtI7I8xRrB4ahxRhou5rr5YYkMUSxIctSIm/QfmS33RLFUZDFFoHOru+pm5rIGGITkRiD1wq8LArtBZSJFtkkbz1RpImShNjmqOCsZIcRj8YGxHo7DDRNS911oCJQmizLGUbLw+NOMGB5jo4S0lS0vTxQVQf2hyN5b1mvpVcVRTFGGay3eOdZFCXJd9+RJHGAT9QsyoKu72jqht3jVibBSUJZFERGuKJlWQYvTwG3LhbinZBnIrl0Pp9ZLpd8+PCBOI7nPpg2Zg5el6DzqdRVSs3lIxC8GZ6c3cdx8luNsbYMrxUqYZLEZAE/Z+2IUlIhpGkSxAxE+05HWioLKwBYrTR5lkvpP46kSYo4yMskcxgGyaSdJzERTV1jQ0so1RF5kEaPlCE2BofCetCIXwNK4QL+0F3AEC7LzLDyvogNX+uZ/Vpm9rU1/qoJ8MuMa/q9jJSfq1K8VjK+FqxeHvLXwHDqxZ3n5fP/I9nYFwfya392Qv/Ae7SfsFSGNI5Js5hIR6RpLKoUSYzWBofcdY/HI8Y7iiJltVoSxzHH44m+72jbFhB7sX7oOJ72WOu4vb0hyxL6vg9qFh1KKf7whz/wX/7Lf+HTp088Pkr/7P3795zPtQA1A++v72XanKYp+/2OLE8ph4JxHEA5mrbCWei6Hu+lqSwl5g0eWSDWDvR9S9t2lKkhSwWHdb25Zhwd53NDFeSpz1WN91AkKT5JIYnDNNiAjogTIeb3vWRxsdNi6acNSnuSNMLEXug2bmAYRY9rcA6vtBCsEwHw1k1LksTBaMTgnCUvStbrKx4eHsPEWa7Rfhg5HI6kmSz4OI548J6+6yjynNvbW5qmZrfbYkdZvFmaYgPX1YmsCFXTkGRpgMkIhmwcR7n5RBFt1/LTTz/RNC3fffcd3osKxjiOcxP+5uaG7XY7v24KaGmaziq1ImWezGKbk0ZdUSywdmqtE2SFwuTR6KCGK/4JUzDL8yyUrA3OWrzyjL0wK4pySKu2WAAAIABJREFUgc0sSfAYsOMo+n6Egdsw0jUtbd3I+opjtAOQm5OyDjeMxHFEFseCZbOWwXu0k0DGKK5KAMoBTA36qV922Q8TDb/nC9K9ePyV9fkra/5VccYJKDplYNM2pbSXHgFTdjY5HU1Zz7w/ngeyLxD/F4cvMI/fTjMvj3V+3W8EuSkz9OE/WptXnzd9Xq2kiS2iehFxYoTXpv0sma20EdUHo9FGJKX37sA4PpUfxkRC5PUu3F1FPfZ4PIQSQ7Ner8ONQ45nsViwvlqjUHPzdyL7T36Dkw9BkiSzkukwDKjIBiyWpaoIUtHSaBbxADvT0tIsxXtLlolxcRIbtO2w3lMUOZvNhqbtadupTBIaD16h3FPJHRmDgBAUSktmZV0Yz4+hLWFH8KJVb+IIr0fa0WGtp+sGIPT2TIRHMgjnPXGaYaKE4/HMdrcXByo0u/2RuhZliDiRftBut0VpWK+XLBYi6SP+lkr6YaE/d9zvKUuREi+KgnwKVE2DMoa8KOiHftaQ8xfXy2RsY+1I3/esVqv5Oloul3JjyfKLNSF/myV+gqHJJAEEPFOizfOc/f4oGWZYK8MgDlvOT1magLpjLYo2cRKLyXUSk8QRUSothCyVwROeIPF0Bh8Mn7sOpQjlqQC8nXW0dUMcHJ8UOtjqqeAvYDAa8nxB3XeMbQN2ZBykD4cW7TXrJGApRWB5TNjUi57602qd2z7TwPPlgPDX1vXl9mrPbEqPJ97ihNafEMuX2dprwcC9yALnepmLYDYf8HNlWC+GgM/2/zLre03692tGKRdHIQEjpPuTQsfLia1ACATZLqTolMWioEhE3K46nTmdT2KjFsfkWUlaFCzyguVywdDWnM8Vnz7dh+mjoPfbtg3YMAE4fvfdt4A49wh+LMOYmOvrBYA03weBAlxdXQVMlTS5f/e73/H+/Xv++3//7ywXSx4ftwKDcIYki6maNkjb5JTlkqYWx6kJkybfrbQJrHVYK3d8/JMWexRFFEVBmiZ4xH186Ae802jVh0Bmns3gPQrTDxgn/MJJM64fR7x1wpYYRubk3gv+qO8GQBFHCWOY4F7f3JAV4lJ0PJ64f3jk55//SrkoqRuhexkTM9iWppOscbt7DFnnArwnDecujmMZuNQN4zAEyZwWn7nZ2DoyhnKxIJ97pxKcxnEioouZsdGGsiyZHJWUUnM7oW1b8ryQDD3IY08yPyIeKY7x33//vQBsDwceHx9n6W3xLRCxy0mdxjpLFEdENsI5S93UjHac+2QmkhtKmiWsWAk7Io45ng4Yo7HjIMfW1Bz2OyZhyOPxGDxZr+m7lsZoRu+o64beCWg8iiPJBsPgahxH+rYhzbIZeybc2xE7dKBFLTrSehZEteMwUyDFQOe5kuy0D8Kj4jk381XA/d9aZk5vfNnov9zB5YFMz3kWPF68z8vycpIT1iEDFJmYC2K6Vl/u5N+5/Vrf7JIy9XLzXkCzWoskUJJoFmXJ9fWaZZ6ilOeYJfSDqKQuVkuKfCEwg37Ae0iTjDgpWS4WKIQ/KSTyJzyS6GClAcAoCqkgag/VWMmxBLDstBBASsmqavjhhx/4r//1v1IUBY+PjxwOxxB4UqLY4yppVhdFGfbf4hykqZjMTtSbcbCMoyOOhRjfDz3G2XlQY4KMc5KIZ2XXjwKC1JphdDRtL70UHZEkcjkOg2UIfTHCxensiLcWp524atPjvKLvLUbLuRjbjn4YKMoFd2/uyIucKI7QytAPDcMwst3uOVU15WLN73//exarJW3X8uHDB/qhp1yUxJEhjqO5PDudTuA959OJoeuelWzTd951HU1d03ZC5YuSmLppRHm2aeYpotDXIrSOnsxMlAiOtm0bWgkqcFF10LRbMAzDHBAnPNpqtZqVNAQjKB4Np9OBm5s3NE0jgaXvA0j9iT8MwgxIkkkzT3pvSsFmc8W5rujaljgylEUuih9DTxs+SxF+t7m6YrlYcA7nSG4AYupC6HXmRUFWZEEGXqAhnz7+QpTE5HlJWWYUbc7xfKIdetzkSHyx6VBWTr4E0uSXz0HoU0+rXimPupjY/lp//eX26gBgala+LDMvI+YlRuUpUnyZRn6BKgn71EFHjGdl6tMH+3u2lxndRKl4Or6nyer09+mzSYbnGe2IHjXWyaIED8phjGK1WhDHYlTadS0qEoT49fUNeRJjtASLXz58xLkxSC2XtG1DWZZzdjRMyPe6w4f0vx86siwlS4ugkHrP4+Oj9KmKkpubaxYBzS09l5zdTuSgN0F6+3wSmzXnHOfzmcPxRJGXpGmG+AuICq70m8DomKJYoshpjve03Zmh7/HOEccyaesHR5S0DKOVDKK3jIO4lcc6IolTjJma3IGmhgPvZm8Ar5/OdT+I8xAooijGuZZxsCyXS27vbsnyBOe8QDXKEqUNdSMKrIvFkvvHR3bHPUorPn76ECbpSvpBCsqAF0vihLqq6LsehWdRlmQ311KqeYcbLdYTxCGXRHGEiiJMJPLaIrHdzzer1ncsSpEr19rM4NjVShD9x9MxTGAlM5le3zTNM3jF6XSi6zqSRGwMkySZy1Bp1A94bynLnKIQzbPz+Yy1lsWi5OHhAWulBeKc5fHxEWOMCBJkGUbB2PcMXUe0XLJaLLi93jCOI03TsFgsWJYFaRyRRIbYaDQGF0VY3+GNxnpHN/ZUTcUYMiyPY7WWclrHBqc8kYZYgzcGjKEZniTBjNJo8wTf0ijm4mueB4Qn+2lYKKodl2v4tfX9cnvVnelS9vpyB1OT8quThWny92x0HAKFfhoUPGvmX6SOJmCT/p7tMsA+P3551HMwewpklx8gzF8QLBihbyYUFG0cYxpTlrk07M81WVbw5uaaq81N2Ifmn/7pnzmdTvzpT//AN9+84eHxM4fDkevrDSrYoGndE0cJaZITR8V8LHHfkqRR0C3LLu7+PtCW7lBK8fHjR4Zh4N2792w2G3a7HafziWIpzusysJE+5mq5Zrlco1XE+VTPPVGtY8RnMyWOU7wVHbb6KHr0aZKKMa6KQEeo/VHwX27EuZFhhH608z89iMxMEvweCSWIVxplpPRQSNO5b3vqWtD/XdfTBdqcgIlzVuulZDgmFrMVrVlfbWjbnn/8p3/mf/4//xM0rNcrqvpMnovLukFRn89sNgJF6HuxrWvbJkzwRO/fDgPDODAy4qJIfFHjmCiJafpulnGexRitcE36vsd7z+3tLcMwhsmiZbFYiCDjp09kRc5iIe2CSWl2uVzOwW1Sn5kI6M459vt9AEgvZijGtCama3kSCJ326ZzjcDgE68IrlJqOT24qk2gkwHq9nsvtyceg7/uAHexmfu/ohQs9WssQuJ579qRpSpGJyssUH0S6yUCa0HWGrmvp2oZxul5mqauntX8pmT397ovhnnMyJf+Vtf3a9kUwuzT6/drOLkvPywO1U7CDWcZHQgTz42W56r1Huademvz+darC37NNGZ8E1ZDC8jqjwXk/I9XTVPS5ijJnuV6QxAJJEECloywL7u7esr7a0PUdHz9+4vC4w46Wosxpmo7TqWKxWJFl4oe43x8QYcZS5F0CnEDKSpl+OS8lUhTFwgk0hraVRVTXNUVRhknpUcwu4oQ4jrl/+MRgC9J8ysoqurYjihLGwfL58Jk0zYjjOChvyHhfK40xEU11kCZ46CHFiUwnHZq67jgeK+JYZG6GfhQcnnf040jbiXdjpCDRwut1Vgjm3jnh+noYAgj1eKo4VTXdIIq3Xduyubnh7s0tq6sFZRnUP6yi7TrqpsFZSNKMKDgaZUWGiQRSYiJDnMRsVmuGpqM6nWUx971AFJxjcJa6qtFenL+jENwAurbFW4eKDM3Q0YSyccKDDUOP1tHMyhC5bGZ1DOFYxsTBvCRJkqcAEaaaU1aUJMlsNydeCw3H43H+zler9bxGJqOUKbBqLabVZVnO/Nwsy8TYZhzZ7XaowN2bdNikl5fTti2nk/i2VlXFbrebnaPmtYKn6tpgcCKSTG4YSKKEm82GNE0wRov6SzdgnEEpTRpHuDRFG0NX9Tg9reknV7WpBz9h8OZ3vGQQePl//8r6/Hf3zC7pD5cB6zLbmUrRiRM2fZmCa/FPqhc+4MumR76EZPy9JeVr22sf9mUm9rVidhp2iFmwkOqHQWy8BDAdNOLf3FEUK6I4Y3888rjd0TQdu/stZVlye3tLWS5xDk7HM6MdiGPD9fU1SmnGwVFVNcdjLfJgXqOUJy9EfyyKYor8qaRfLEqyLOd4FOeiJBE4x6dPn+fvoq4qPn7+Kzd3V5TlktPxRNP0IQBJKVuWK5q65Z//+V/48OEXjNH8j//xf/GnP/0JbzvKckGsE/qupR3k4tNKgJ55kYde14DS0hB/akMo8BmREq07o8Rqru9a8I4oMkFdVgL1OAyifmBlmpUmGd99+y1//OMfub294Xg6UZ0bkiRnGHseHx/5l3/5V5arK0Zr2dxseP/tO3788QeyPEUbhYk0eZLx4adfOB/PggF0T1m/mMr0DLHIEkXBENqOozg16RadxBCqD6XUrDIBagYPp2nK+XymbbvZZenx8VFQ/DwN0KIoEq5lkNue1k7XdSyXS4AZHDtBNvq+mzMnMY0+03UCwp2yr7IsZ9mhN2/ekKYp260MgQSHthfLvSyjSPIgmClkf+89f/rTH9Fa8/nzZ+4fHohDcHEzgd0zeicZUpjCO9dRNzVZmgYoiAyA0iwNHNERN45gND2awYoc2DNHt+n/5/56GAxOj95/FZnxt/TNXsWZXTb+X2ZgL6EZ0xeslBKO3TTNDLWwuwhm0zaVosYYlJf62jv3tc/x79qmXp6CWQNJKcHUzEFuPpdfRn87DphwR/FeENVVVZPHBuek7Lu6ukJj6AbRzNrvjvTdQKQN7969p+8HTqczfPiA1p6ua1guF3z3u2/ZbK4x2jAMlqpqOB1r+l4EG2XCdcTaTiRdliNpKhdOnhdcX1+zWEj5NQEzJz/NLMsZraXtWuq6oesGTqeK5WIV7vSaPCsBHcq6lsP+wDAMfP70wPt335KnJkBJBF9nHCSJEJ2zLCVNYnFwGnvGoRNql/No7ynSLGiVgR8HvPLYvmVoavAelcToyKA9ZEksprlLaPqBuu3xaP70xz/y4w8/4IxkdJdmvvvDgV8+fOBmGPiHf/gj66s1m+srvv/hd5hI0Q8tQ9/RVS2Ribi9vSWKIu4/33M6HRnb4EvgIyFnazUHmd5JptYPlsh7oiyZr48pmGVZKi5Kg0AsJt7qVMlMvbFhGMiLxTzsmW72PmT8IjZgGIeBJijSpqko/6pQdp5PJ7IQBKegGMcxHqFZ9UNPVdcUeT63hSYc25s3b4jj/4+yN2uy5Dzv/H5v7svZl9qruhu9YQdJUNQyE77xjGlLE9I3sC7na2juHWF/D3scY1/YEYrxkOJIQ1IUCYACAQKNRm+1V5099+31xZt5+nSxAVIZUdFA1amsU3VOPvk8/+e/aCxXc8JwRavVoihzVoGSzVm2yeOvv6LT7TKfz1guF7TaLVzXw2/7dGyHVDdYRSFxFFHVVJBCQhAF6JqSP3U7HTrdLu2Or24QeUqMus63R2OCRN201ouS+tpkY/Gyntg2Jjgh5TpkXTSfbHYKa3rVH4iZbWZJvu6btGYlz0t8rUlBt0yToixecalQNe0lILgujg02J+W6kJXy1YImb/zbvMF4zfNqCm/jPS8qZQYoNgpX/cD6nHLtOvAS05NQFmjSwDBMbNNQLhfoCGFg6CaVUFyaIiuJgoA0CnFMDdtwKAoVXJskAXFcoRsSyzRIYrXJsh2bJM5otdp02h16vQGu7RNFMUWhJEDdnk8cK9udoijodJz1XT0MArq9PmVRssgy0kQlHZmmgWUaLJYZlmljWg6L5ZKrq2vyvKTb62HoOkkSksQpVVWyu7OFQK3i215LOaRIgzhJEeRqG9pS8izLCUnSiMVCYOk5OSmVTEArMQ2TtmvR8x26voOjgyELdAmurVEIE5BY9QWpaTpSaGCY5BUEcUIQZ5iuz63DPXptn/PpGUWWYJuKa1XkGVmSMBwMGY+3ODw8YjgcKuIoknCpnEfiKGJyPSEKIvr9Pp1OB8/3SOKY2XTKbKYCfgtZ4nlthltjHNtWFkdCEifxOnkrK3KSJAYBSapIz92urLPZ1PupwX8sy8KqeXxpTdItiwLDVvmiZVFA3QG2fB/HcVSYTRiCUJF8eVXh2jamYTK5ntAf9NF1nX6/T1EWatMqK1ZRwDIMcH0Pz3WVaWftmNJutzFNjb39Xa4nV/Xvm61tvJWQ3ef8/JysjgUUukleVJShghc83WRvPGIy17hKU8IypaokVVkSJhmGmWPYJVaWE0SRamDq5KiW5+HrBlg2RVlSGspavSheXq8Ke1S8S4HA0Aw0oYxGqdS2VgoNKUqE1JCUqLCCuo5sCNHFDdH6t8qZXsfSb/Rlzdzb2JQ0XVoRFaoLeuWsYr0AaLq6m/Io2ZThP6CdpN6CqqLUGC/W4+PGyldr2MeSdeRWc0ghQCoPtiYIVUdimTq6UH9YXSjDPdOwMHUb03CIghVBvESWBY5hYPU6Ktk7y7B1hRO1WjbUUtw8V1KlsiiYXk/Jk4KqkDiWS99r41g2AKtVSJGlOG5LYVzFijCM8TwVKhPXRoCmoey7ZVXW/lOSIksJi6zuRkGio+km19MpX3z5JWEUsLezx8XFOWVe0ml36Xf7eLdvUZWSfm+IIUx8t4MUKWkmqERJKXUEirW/NWqjM8LWUiakREKg49Bp9+i3+3huC9s0sEWJLcDUBJbh1lsyNXbqmoEwTNKiZBFEzMMYvYzp+DaHdw7Z2e6ji5IyiVnOJrhuG6SGZZhsb2/jeW36gxG25SArSOOMKFyRJKqDCIMVq8WKy8tLvnr8WLnP3r7NaGtMt9ul0+2wXClGv2YIVlFIEIdouobTdsllQaurlBthFKKbOlmekWYpaALbtXGERlVK8iIjKzIMDGSu3FVM00TTdeW4oel0Wm0uLy5YLVf4LZ/lYolpWQwHA1XsNJ0wCjE1HdO2cCxb8bLylMn1FUmacOv2bTRNY7GcY7uOMgUQklIWSKEu7jzPcOvOWdd1FvM5nueuE75U/oCO5big6Wzv7pFkKabjsNMbEIUx15Nrda0Ija2WB60WVZyiIyjyiihKqKQA3SZIcoSRgh7iZjltx6bdalFmKaskIQqWrBYLkBLPcShLobIiipIwSXAdR1k+6Rq2XdeRMkeTBZphInVl8iqrqjZBYKOjedkYadWrGs3Xjpmv+++XdUT8jlaz2Y7ByzH0ph5tM/h38/x/iJbyX8I12cTpNtnEN481b6gmAkupAlAt06YqcsJVRlXktV22iW072JZLamTk+YosinAcG0vXVB5CkVNJVVy6Xb825FMZiK7roAiyBVdXV7XLwoL9/X1c1yWOE6SEbq+rnHoriWEoIPnk5ARZSXxP+aM1kX+WZdHyfTRN1P5mJY7r4rQUpWO1XJJmKdvbWwwHfQxTIwwDzk5OMQyTB/cecrh/RJFXzOZz0iTH81xMW0c3HCqZEYQhpl7huQYt38U2xrimwLcM5pMZspT4TouWZ+PaJp5t4xngaiWWrraaugBRjxlK+gVlkVHmCbLK6XU7HNy9x90338Zrd7iaXLNcLBl0++QlXF1fsbW1z727d3lxfIYUGmdn5wSrFZ7n0vZ9ri8nhGGApkGv20NKSI5fcHV1xenpaZ2C1Oftt9/mzbfeQgjBfDHj5OSYLEtrcqtNUZVcT67Z3d2tqRIKbG8kSUEQMh6PiaOUPC/W08tLYNukrBTmOhqN1ounJonLsiyuLq/WqWdRFGHoBnlR0O6016/rphzq9PRU5QmYKmIuzXOou8IsyxThVygtZpakZFlCkRf0er1a4jYnipRhQMMh7fS6rKKQMFZefGmWk5dqoTUcDjl58UJdG0Kj2+7UVJOQKE7JqxIJRFGiJirPwbUMHKFGc1kWtet0TYSVFYYQGJaJsG21dNF19feqoRUpJbqUFLqu+hRD0TdU4dLWk5tW04pedmavXtPfWsx+X9G42cW95Gnxyph6U/b0TT/vJtt3s4j9oQWtqqrf+SW/6WdsPjcAWUriKMJzbFqtDu22X7fu5vr5O7bSwYmaSe1aFlmWUxYleVkhDEMVBVPFyqk3kKQo8jVoWxRlvTKvcF1PaRhNC00TFNXLbEyA+XRGXuY1m185OzRAb5ZlvHj+nOl0SqvVYjjSKZOETr/Dd95/n4O9XTRN0Gr5NZbj1KaMIWEUUpQFF1fXPH78BN9tIQzB9u6YdsdFlxCGSzSUlMfQLFzbZNDrMej2efHsObPJjDzJmMymGNqSjucz6nhYng3olKUky1UildIVWhRlxWy5IkozesMR+7ff4PaDhwy2d5gulqRZjm6YKJKwRbfbwzBNgihCora5L16cqDHS7DCbzYiTuC4+AIKjoyMQcH19zXQ6rSkJks8//y2fffYbjo6OePudt3n48MHadfW3v/2cXq/HbDbj9PSUVquF0hC+tOeJo4Qir9b0Giklum6sZWJSSkxTaUMlksVyQZzEZEWOBHr9PmmWkeYZeZErW3pfkZitOrGpqMqacKxkbdQTj+e+THRSN8B4zUvrd7qMRiM8xyVKVFC0YRm1uiRey67QdOVPt1rSbrfpdPtkWUYQzEiSRHWkQbDmyVm2gYEOQiPNSpI0o8xz/JajFgpZSmEpDFJdTKqBsiwT01Dic1kTfoVUmZ86amIyDE2ZFdTbZEODojAQskQXyvxRyNqYXzac1mZw25zCXh7/4mK2yQnbJJ0Ca4bz5mNuFozNInKzyLzu/28WtN93NB3Z5nk2j3UXVj/XhjsHikhaVDmGqSgRpmnViUclxrqQaTiOh6mrwA9T1+ssxvr313UkpcoRsJW7qV4XeMtS3BvTNBGaZLGcsVqt1uBtWRa02oq46XkK3A2WS6IoZD6fE4bhGnBO03S9Wj/Y3+f+/fu0221W4UrlTZYluhC0Wi2Ojg4BtWYvsozVKuD2nVu8ce+O8hH74rdMTqdgwLvlW9y//waWpaEJvd4SmjUYrtHyHKzabaHf6zGbzLm8mLCYz1ktZiwnJlvdDjvjEW3fp5Q6WVkgypJSlsRZTpgUOH6HW2/c5ejeA/x+n6yqKCSYtkNLqni0vJIKQ5pMuJrMyQul1TRMk+vra6V40DVG4zGmrrO1NWYynXBydorruOzt7ZHn2XrT1+l0cByHJEn4u7/7O6qq5OHDB9y7d4+HDx/ieR7X19ecnZ0SRXFtYZ5gGib9fp9et49t21xfT1+R+DXRcXmuLIaCIFDxf/LloqYxEjBNcy1Mb8iyrusC6kYcx7FaqugaSZatg1Dslkp7Oru4UMuhvCHjJrQ9X3HGopjJ7JrRaITruqxWK5bLJbPZXEnZ6pBpoSs9sGE5TKdTVqvV2mvt/PKS/d1t5RCT5kqDayqJmNp2qrFWxQtqVJWrbvSGjqU5tASYnkcpS7JEcd6iKKWoUihL8jxF4CgirW4gKdGFIu4aQkerMgxKJZeSrPH3amOZuL62/6XF7CZBtsHMNsfGzVZ783Ob37vpgfa6n3Pz8ZuF7F8yZr7u2Dxvozlt3oyNhbBaZsDWeB/HUgZ8YRhSFjlV5SkQF5VepWkabquF32pBVaGblgqDLZW7gCLZaliWD/homr5OUloum/xERWpMc7V2zwuYzq4JgiVCaMxrA7+mE8tqIudwOFz7bJmmusgGQ0VlePTVV+R5ymg8YjDsc/eNN5RguKrI8ozD/T16vQ7zxZLBYERSJCxWc3TLYGt3zN7BLq7v1dtlNRYqWKGBCsxa/mRxdOsW4vCIxXzJ86cvePr0KdPLay4ur4gXKyzDQtNMqrJUQvWqQugFUZoiLIfDN+6yf+sOpusTJDmlVoKh43g+SZoxmc7JywrXa/H8xTHnF1fsHx4RxRHzxYxBf8ju7i5FkTOfTri8uOT09FRdRFmmRNVCUVy2trbW71tqa5pBf8BqtSIMIn7+85+vA4PH4zEPH76FlJKtrW3m8zknx2eEQcTB/iFZlq+hCdVNa7VGU3VueVWyWqoRcTwe192bgmGaTNpmedDE0fm+z3KpFAdZmtJpd3BNl9n8GNM0VXclS9U51kXSskwKqazGGyvuMi9YhS+VBK1WC9u2SVNVSG3Xp9VqUVEHYNdWRN2u4rUFQcBiNmc06FFVBaswoiwkbqtFUSrr+DRNSeMUWZW4to3vOopzVpYYmvq57W6bvFBE6KLIEFLZpGtoyErD0Kgj71AyNyHrTE/ljWZRYFDWy7y6fiCpZIVsYvJ4uRdojm8sZptFZ7MYNDjYTaXAJi9t83teEZFL+Qph7veNf687383v+52jHjXWa99vOO8m7aTZxgop1V1SSrI8p6yqmpBqkRcFk+kMWRXomnpD5llBUWT1eGFRygrLVsGyjQEjgGUpDpFhaOs3dlE0krEIrV5353nKYjGnCVDIa3yk2+3ieR6GYTCdztYkyO2dbfq9/jr4FqDVapNnOafPj0njGMPQ0QwNTdfoD/rKUUNKXFcFXYzGQ961LFzXY2dnh16nh+u4IAts2wVy0jTH0CssU8fQTRzXoe37aAgM017TgzQ0tELiagZ5JbieKyJolmUYuo5hmsRZxtb+Pru37uD3hySVJC1LsjwnLQqSKCKIEoIwrgO/DGbzBULX6Q+HIASu52FbDmmWMp/NMA2dO3duc/zimDRSHv6WbXNxccZqFeD7Hp1Op8ZIdYR46Qgznc5otX2EEEynU548eUKe59y7d4+D/QPefec9ZrM5i8WCdrvD6elpzawvahH8sk4xN0iTTBFbdR1DqKjGhvGvG7UbSD0qNjf+ZlJoaB1pkmKZCf1+n729PWzHIYpjlvPpugheXFyoJUN9GawclRRmmxZeHa5ycnKytoryfW/d/Ukp6Q/65HlOEK1ux0DtAAAgAElEQVRYLBbM50sFQ7guO3vbBLF6T+a1nEvP1JieZylZmiCAosjIkKSZMjVYreqENSocz0UWJboGwjRwbEu5EAuheIi6pnzjpKQsSpXUpen1xjdD1yrMerppruQStWSRslSvn/IaesXU4rWuGZsF49swpk232cZu2/O8tTj6ZvDATZfMm8cfMmr+vkPcKGQ3R+HN56IE39aaWS3LivPzc0zTxHNc5Vem6xRlyXy5VD7xva4SMpcls+WCIkvJy0LFq1UlWZGh1R74Yai0dP2+jmGYpGmC67lrENNx1NazAWYNQzm4Oo6SS83ncwDKQslm1I1AkCQpyyBgsVoxGAw4PDxkPB7T7nS5PD+jzAtc2+Hy/ILZfIYUFTt721i2wXK1Ik5ShYvoBn7bx2u16v9u4XoelBJZguv6CErKMkHTK9BMdNPGsl3QDJUOrun0hkPyUplPFnGOJXVKYRBHGUGYUpUlmlZRRhmtbpftw9vY7R5JBZWmuG3hfE4YRWRJSp5XjMfbCN1gtlji+T6HozGj0ZAkzel0eiRpSp6mhHFIsFwhy5LlQrnzllVR35SUsqHXU0VcCe4roijCdV06nS6TyTVVVTGbzet08y5hGHJycsrpyTnn55fs7OzUY5jq1vK8xHFUZubZ2TmrZbDGzZquLcsylsslbi1XahLNS6FwIN1QVjmmZZGkKQhVPDRdp6zdMVqtFo7rKqZ/VbsL18oN27Iwa1uoq6srNE2B9VmhOv2T0xNOTk6IogjTVMuMLFO2RskGMVdxFfM1vlZVBbY9xrEdhKavixyoFKeqKgkWIatVjmzSz1HypyxNSbMYwzLXWaWmaSq9bM2Z04WSXFWiRFSSPFX+fbppKThGquxOqBQlCnVv15qFwFrQKesb6MvjG4vZzWKw/oaNjIDNwtA8riH6Aa887nXnuvmYm+dtPrf57x90XvHNYvXNztI0zfWbDxTPLc9y5SVVB0yUpdrEtXyPbq9Du90BWZImMUkSUWY5jRN6o4RIEnXRNLQVTRNrlwY1bst1Z2bbdg0wi7pVN2j5LdJUqQ6axJvlclGDn8q2OSty8ixne3ub4WhEp9NDVhWXF+dsjbbY293ls88+5fH0MV7Lo9PuYVsOF5ePefbsBZ7fZnt7G7/VIU5SJtMZ7XaX2we3MIVOmed0ui1aLRvPbmOaYJlqUZVXAkvoaIaFY1jomk4lBdPJnKvTKRdXMyxDAdRZmpHlNYPesXnj8DYHbzxAGo7il3kuwjCRQsOwHFzbZ1mowtZrt/H8DotVSJJnLFdLkjTj6npKWTtzzKYzxYEKQ5azOb1el/HWFrZtsrW1zXQ6IQxUClGv160tyFOCYFVfqN5ab1gU6rXv9/vKJLGoSJKUs7MzXrw4BgQPH77J4cEhW1vbICV7e1ecnpzXmGaAEILlaslisaDX7bF/cFAXnEuEUO66tmVhmCaVrMjrrWRzwzYNA8d1WcwXOJ4ix7Y7bXZ3d3ny9OlaC2rVxazp8qbTqQLn2z7dbpckU9zGIAiI44S8yBG6UiRkhdJrNphbM+o2LiOO4yA0gWMrk0rT0FWWhGlCVRIHEbIq0ISxVoc4jkNVlpSlQRiGdUfoY5kmslYGmYZBIhWmaBgGVY37WYaJNFRxMnSlH63QoFLXzlqVrqE81poy1ny+Pl4rZ7p53MTNbhaTTbVAY5lyMydg0yOtKX6NP3qDXzWt/2Y39bpi9rpDCKF0l69wUn63GDYYxppVLSVJkqxxP892kBKWqwAhQBeCdsvHdlzlimHbrBZziqJU7HzbqeU5JbbjMtwakdSs/ObuB4I4VqOhWgBYOLby6Wo2k2VZslou6Xe7OLaDJiWubSkRd6782fO8UJo1Xcd1PYbDEfsHhyxWAdeTOe+8/RbvvfcBo36PIs/RDQvH9rj7xj2Ojm4phw90nj8/Jc1SfvCDP6I/GHFycso/f/op21u79Ns9bN3k4vwMv+Wxt7fN4cEumq4RxRGWqeM6BggToSlbmEpWOF6L23cfkCaSxerXTOdLpKS23CkZjkf88Z/8K955/z1026VEB0OjqAQUFabtIkVOEWeUFUwmc84urtg/OqTf7/PbL7/g4uoSy3FYLlb4fouyKImThDRWBoy24zBfLJgvFgwGAx48uM/u7g4nJycAxJFKWTo6OqTdbiGlZD6fk2UZvu8qXzEp6LR79cZZjYjtdpc0/Rrbdvjiiy/5+KNfMx6PuXXrFvfuPsCxfZ48eVK/J1w8WTGdzxGGTlbk6oZVC9GbRHPqrfbl9dVaiVAhWa5WGLq+1tBOplOWqyVbe7t02m2CWlOZZzm6hL3dPfqdTp0JsaLTUxrNra0tOp0OV1dXHB+fsLq8JAhj0jSlN+gTxzFJ9lK1oAi2Od1um7QOOh4Nh6r4xxFp7RkXrJboQlEuHMdESOV2bNs2eVFgOQ6aJtb6U8/zVMZAPX0URbEW7yd1hoJR21JVZYlhW5RZPdFVatLSBAhNRxiqSSmLrL7oX73Yv7Uzu1kQ1kXjxvi2Weya7c1m8dgsVM3nNnG2Bkcyamvfzcc0P6MpgDeVCb8zBnOTsPvq0ZjaNR1m2rS59XMIa8KjbVtrCUmSZsxmC3RNw7VNRVK0LZVKbhhMpxOWywVoGr7fxrKtGqS1kVLWPB9Zb6ZaSElt8tfm4uKC589foGmCvd098iTD1Iu1TEwvK8bDEcauRRBGJEmmeGFphmVb6xDjw6Mj0qyg5TpcT+ecnZxwfnnNO+++x9GtQxzXw3EdWq02B/t7IASdThdD1+l0uhwd3uJg/5Dbt25h6SrEQgjw/TaGobR9WSZr3p1LXqgutMiVg6uuWThel8H2PuPDOWH+lKurCQiDo7t3uXf/PuOD21S6Q1KCrApKWYFeklcFy1VtO1RUREFCWWkIzQShMRyNGc2mZGXO9s4up6dnzGeLOkOgxHYdLN0gSzPa7S6+73F2dsZPfvJf+fDD7/Hd736PFy+ec3p6CkgeP37MYDBgd3enBsiVq8ZicclgMGCxWFJVJZblrG80TUc/m81wbBfbtnn27Dm//vU/A4K3336bd955p6a7XNDv99fv18VisT7HJj5mGMbaSCBNUyoplUWU7zO9nqDpGr1ej8Vyyeeff04cx9iuKrqNo0oUR7Q9j3arTYhy6fB9Hx19DfU0LsPhMlAqE9lTQcaagZRQlrKWbZVkWYFpW0RRtA5tKeouTwM0DW4fHWDoalwMo4CT0xNc1yPPctqdFodHB0o6Vo+UQtPqLahSluzujpS/W16o7FZZoZWKjpGXFYZQdBANkPpLPZNseKtaU7Z+TzG7WRy+iTi72XE1Lxq8VBA0x+tyODc/mqNZKDTd0uuK1s3C+Vo87MYC4OaxSfDd3MA2v4cQOpVU7g5lpfy4VEcjKcuco8N9DMMEIQjjmEhK4jTDqI3yojhkFawAGAwsBoM+3e6AMAiJk4TLi2s6nQ5ljc8VRcnDh2/SavnMZ3Mcw8KxLOYLxeJ+6603abe7PH76jJOTU+aLFYPhgJ29ffYPDnj67Dm9Xh8pdfKiUOvvJGU2meB6Du+8+wGHh3s8P37G6ek5ZSG5f/8Bnuersavbw7F9DM3EdRTeqTsuW9vbKk7P9+n1OkCJYVgYuo7EQkoNUXtThXFOloZEUUSUl3RGW4xzSWk4CKGz98Y9xge3SEqNs+u5cgkRKvxC6MoTbhksERL6nQGtlkmn02O8vUWcxZxfnqPrBpauAkmKUoHXQtYGB6LC9ExcX+kET05OcV2HVsvnF7/4J87OzvmTP/ljfN/nH//x55iWwd7eHufnF2v/ujiO2draYXt7W3XJdTCvaZo8fvw12tphVhGr1fsZqkoynU746KOPkRJMS6fT63BwcMD29jYAX375ZU1ejdYwTDMNrFYrptMpWZZx5/YdHt6/z2qxWMMfDYNAjYsxaApmcGwbU1Nqg8RXCwPPdYlTNSp60luPmA19BAJkJescCLumjmTrBqTJ/+z2OyRZShgrYjhSImUJQsPUBf1elygMiJNE3YTKAr3I0UxTJacniQr7qUdYr47g0zSdQU+RmlerJcvFiihJkbUGR0oVlFJVAl0XSj2ia+tAcVkpJYGhKaOAxvyzOf6gzqwpRJtFpflDvy7H75uKz+YG8ebjmsKyGZhys4P7NpxsvSz4A8wdNxcTN4ubUXs+VZW6iyorHFN5VLXbJEmG5znYdQhJUotpqWUlErVRdBwXgWA6XaBSw5VBX7vdrl1gnRpEVunfSZLT6/Yp0wTfsXlw7x6u73F2fsFXjx9zNZnguA7fv/+AW3fucHxyyn/72U+5dXSHg6Mj/uvf/wNpnCBLGPWH2I6JMEx+8ctf8dtHXzCbTQiDgP3DA/b3D4jjmNUyBAw6rS53bt1BSqX/rMqK0XBIy/MBJbUqC+U44bouuQF5XlGUFUlSEoUxYaj0pGESU2gG7dGYylAju9XuUOgmZVmho+FZKhdByIqqVALvspR4jgsIojhSIPl8QVHlpHmOFHB2ds7l1ZUSh6cZruNhmgbBMkSg6BaGroOsODs7wzAUUffLL78kiiJ+8IPv8yd/8qf85jef8pvf/IYHDx6wXC5ZrZbrxVUYhjiOw2KxwDBMut1e3cmblEXJ5cUlhmFydXWt9Lu2vY7+E0Kj1fJ49uIFn33+Oe12m7fefJPBYMC9+/fXSUxJHPPkyROuJ9e4bp21EMcEYcDHn3xMv9ujkspYc7lakdc0jyRJCOO4dqVlvbxqxkTXVoHRQRBg2mb9Pn+Zot5Jc0zLXHPb9KKqC7ak1WopHAtJKV+6XRimjiEMqlLZYctKZ7GYkeUppqnjuA5ppoqhwnIroumcXr+jvk9KilKqYJi0QCLx3JZqFmoeWVVBIRSPzKh1oIbQQAcLlcO6NozQFCSl1Tj1JtfsGzuzm4B78+/rNoub33OzWN08R0O2vbkVbYqZlHKjS3r1Md/WJW4+j2/rzDbbfctSo2RjWpfnOZgaEomuqfSelq+8/YeDPoNej8VsUhdwwWK5ZDmfo+sqVi7LCizbZtAfYZgmT5885fnzFziOzcHBIUdHt5UDQVGRZwWmaWFbit9TapLWsMOHf/anUBX88pe/5JMf/zOVVJmWg/6A23feoNXu8tHHn3BydsbDB28xGm/xH//j/4nrtSjyAsdyVcqR7aObDrNFQBBFVFXJYhViXV4TLFecn1+wWq3Y3t7l1tFtXMdltVoyW8xpd7vKusay1qNA42mvGzZJUtaBHzlZmhEnBWkqEJpDWsTMgog0zcgqiWU5JBVERUXLbVFJQZIrB9uqLCmylCzJoJBoCJaLFUmUYdkmXz16jDAgTAKup1dcXF1ycnLMYDTEsV3yssDxXBaLFasgoN3uIIRgd3cX13V5+vQpQmi8/fbbPH/+nB//+Cf81V/9JX/+53/BP/3TL7i4uOD4WHG55vMF29tbjMfj9fbS83x0Xefg4IBnz57TbncBQZYV+L5d461KiG3bauyrpMRv+TiuGu2m8xlPnz9DSllnPxwghGD/8ICDo8PaHjtSN1JdV8uULCeJYoI4Yrlcomkao50tirIkuLrC933u3LnDoNvj+uqKJIwYDod4rstsPlXBwvUYmKbZ2oRRSkmapLh1OnqWl2uxfAO1mLVVErD28UcIXMeh5bvowHKxwraV42+rLYmSlDhOmS9WzBZz1VWuQiihlAVIQRgGuLbLWG5xeT1Zu8nquqVs2KsSWQlKSyCFWsZpUlDIElPWhhaGjoYAWaJJRQX6gzCzm0Xr5qj4unH0Jla2WWyaj81i1oyWTVVvPjYXB825mi7w5tf+pcdmMWsKZ/Mc1Ici5em6ofSYjrO+K4e1oDzPCyxdr0f5ukOtf58wCLm4uMS2bXRd5/bt2/R6PZUDmSvWt+/bSqyc5wwGA959933lFZ8l/MPf/wPnZ8csl0ukrBhvbXP//kMGwzGrOOTjjz8ly3P+hx/+j0ymM/7Lj37EZDKllZeARhznTKYLdra3GI+H7O1s4XoWpqlwwLfffBNNCKIwIU+Vjk4XGgd7+2jaAbPFHM00GI/HWJZFsAoIw4iylIBOVWpUZUVRlAqUFRZVmZGkJUVVECYFy1hF0xm6CsLI0JgHkXpMXmAaBp5jQ5lTVTmiKqGCLM6IooQ4yqhWFYvljCRPOb8+w3RMfK+FaVu1aYCKeGv5bSzbZrlYcXF+Sa/b5f133+Hw8BCA09MTHj16hKbpDIdD/umffkm32+bw8JCiVCLsfr+v0raEtsZSe/U4NJ/Pmc8XLBYLTMNSW8Unz2iCQTxPOQLrmo4U0Ol2sF2LvFAWQKPRiIODA46Pj/nqq694+vSpcrV1XcqyXI+e29vbjIZDHNtm9+CIq8tLnGCl8LU6xs62LLWpTBLCKGLY66tkL+UusL5OGtPG5hpTyU/Kzmg2m0FtnCqFXjcRasxO0xTDNDEdE9t18XxvjfG1XJ9uy0c3DJAVmi6UnrTScNOMqb5isQxI02ythvA8F8d1KYsS23LxWi100yZNF/h+i16/vzazjOOYikqld1kaQiqzz7LKIVeEal2r82sBHdYGEd9YzF7Xmd0sWpsF5mYhe102wGbhWlf9jW6q+VqDEzRFrYm2h5fWQ5uYnNj4oClu8iUH5XVH42S7pmPUsV3quWvqzlgpV4qyZmTLsiRLE4JghYbEMkx812V7Z5dOu8tsNiWKFE7Q6/ex6nzE/mCIQCVcT6dzgiCi0+4wHo84OrrN7u4uILi4uOCzzz5nMZ9w8uxr2q7FaKwkKQeHt7Adl2fPnxHEEd/57ncwLZunz4958uRrkiRma7xFXLvfBlHAaLhVu9/mvDg5wXEsdrbH3L17n063T7BacnR0i3feeldtlurw3jiOyfKCIku5urpSnZhmYtoOIivRhEmeK8pJs+mrZEUQJiyXMYUskELH7/Yw3AzTsOl0+hR5RZzlJElGmZf4jouQFWWWIqhwLOXvtZjPmc9WTCYzprMZ6BLHtYiSmN1+l9HWiKzImM4m9TZQvUdarRZpnBDFEavVEmTJX/zFX7C1vc1Pf/pTrq+vSNOEIAyxbItimmPZNvfvPaDIS548fYym6ZycnHJ9fU2702F3Z5d2p02318cwLHpdVfAOD49YrcIaWtAUfoqs9aEq2s1veazCJXGcsFjMGQzu0Ov16HY6XF1fIwR19kNMnMQ4thprizxH13TmkynT6wlC1wjDkFUQMFnMFJ5pqtH2008/ZTmdce/ePaSUPP76Mb7r0R/2cByHVbhaG0M2U0hW35CbbFjXU4ErTXBOmqZIIC9Keq5Lp92mKnLiMCSxVAaDJsC1HYIwUoEruVzTkZIkQWYZjX+/57cZDEa1r1nGbLFksfote9s7tNpKCmiYFtPpjDRbKtxOVvQ6Lco8Jk8zyiwnr3JkKZT1uqGSxPQKBLLmn31DMXtZgL7xK6jWTrx8jHy5VWh0iFXd5WhCqyOrXtoTV03BkXL9vQ0fy/VcZGMXXBSKT1R7L0lUwRHw0tBtTUNpCmi1oefafLbq0HQFGWqaYuLlZUYhC0xdeZgJWSr2toAiT4mqAtNQZoK61OkPBzi2Q5FBkKdkaYmhu3RGAzrdFrfv3CIrMlbLgOlsxtX5FVGSsLO9yx/94H0O9g/I8ozZZMKvPv6IYBWQ5TmGruG4Lnv7+9imcg8wTJvpbMYqeMFgPOa73/kOEo0vHn3Fl198RgUc7O0wmUwYj9QINJsu2d0dYdsapydnXF1d4jgWz555PH22zd7eHlJKfNej1e5TSI35bEGQ5BRlQZantfg6o91O8Tyf1SoiDGOWQUzL96lKRTxtBMZpkpAXBYZt0vY8us7L7ETTsEiihKpQ+YuZloIoyfOKOAooshhdgzgKub68Yj5fIISOXo/76j2ls5gv2T845MHdh/zjL36BqJRWtixytrd3sEyds9MzHM/j9PKCv//pT/nhD3/I//QXf86PfvQjZrNJnRcZY3sOL45fMJ/P6Xa7tFtdKilxHOUasYpCZosVaAaWZdIbDpVzw7UgjEKWqyWddqc2rbTo9jqKMlBW5FlMFOVYpk6WVEwnV5R5xnQ2oywyHMdib2eHIIqVgsIwcCyblt/Cc9015ut5Ln67rTqXIidIIhVi4zi02x3G4zGmaXJ5cUEaxfR6XTQ0ri8vaXc7LBZzylLF1jm2je04aGmq8g/KEmSFqauwmqIs12O/REKp5FKWYSI1QYJgMV/V+FTF7vYOAv1lelhZvJLpqmlKqZJlylQzS1XCVMv31l1jJQuliKhy0jwiSQNAIy8sNL1HVZlIUZBXIAsJmlTW63mFaYCKSdaQG6CZ/jd/8zevlKr/7X/9X/6mIXmqTmXjQ9cwDB1dUwGfqpC8ZORqQnU+eq27AolRe7QbuiIlrs+tacg61LSJlc/ylDzPEJoKthgM+vT7PTzfU/mGmsD3XKV/rAuYQPnlafWoB7Im1Sn5g3oWEoHyTDMtk5JGf6OMIfNSZRP6vkuZZbiOhevYVGVJHCdUeYWhW5iGRVlAy+1gag5hEGMYNm8+fIfvfvf7vPHGXRbLBZeTK4JVxGA44oPvfI/vfOd7jIZbhEnCZ5/9li++fMTjr7/m/PICKSu8lo9h6sRJiAb4rRaTyYyqknS6Pfr9Pm/cvYth6Hz8ycdcXV2wWi3RZEWv22E4GuA6JtFqxXjUR1BR5AnXk0uCYKG0opbN+eUFj5884ezikjQrOb24JEwzojwnSBI0S20pZYn6VwqSJCNOMpV+pOusVgGT2ZQ4idXfsw6hNUxDiY1tG9tSG1nfcfFsR6UFIcmzBF2A71i4to6QJUGwZDabKvVAWSI0ELpE0xVL3rabzAEVSzfojxBozOcL2q0WeZ6yvTPi1u1Drq4vSLOCNC95cXJKfzjg4PCA/rCP49oITbmlqhSsjCAMeHF8Ql6UfPd7H+L5LcIoptvrkZfKJeL07JyvHn2FZhhsb4+JE7Ww6fW7QMVsNkGIkk6nhWUbJElAXiToBui64M4btxCyZDabsjUaI2VFHEWYuo7vqHhAXdPptDu0XJ8iS5lOJiwXczzfUzkMpgpOXi4XpHmmOhNdwzLUVtaxLQxDpyxyRqMxnueyXC3I0hTf83EdlzAMWCwXKMvASp1D09A1ga4J0kTF7ZVlgWkZ7O/tUGUZtmnS8jyWi2VNHu/w9NkLZF38u70ejm1TITEMDcMykdJAaEZ9U4zXiwgECF15s+VFRhwHhFFAWWZIUWAY6oYeRzF5VpFlKluirCSGZiJ0g1LWJgRSXc+FFPy7//nf/wf4PYEm3zZSbo6TDfYEvLKRbLCtBrxvUp1fdwihgL84jpUWbbnErjGJdrvNeDzGcRziIHylrc3S9KU7rlSCVihBClTUmUBoCu8RQmku0yxHVgJH07FdH91QDhFplmCaOuFqxaoCz/XotFo101mnLCpa/RZ5XjIedfnehx/iex7Xkyu+evQ1jmdhOhq3bt3GNGzm8wUff/Jrrq6u8P0u+3v75IXSnHmtFoah0277+L5LXuRUUlkQlUXB/sEBnueRFwW379yhAn72s5+xqreGg36XPC8YjwcMhkN+85vfcOvWPgKds7Nz4iRFExKE5PadWyA05k8WjMZb2LbL9WxKeHyM4bj1BrckryqlkSuU73+cqDu5buiYpqUE8XmOqEd+x3XWgbVCq0nLGlSUSFmhGzq2qYTPhpBQ5QSrFWmcozsOKiFekOUZcRKjCbBsZa9UlrK2ohEqszPNmVzN2ds95P13P2A2maFpBrdu3eHHP/47/vW//jP+7Q//Lf/7//GfeHF6yqA35KOPPyaKA/70T/8Yx7PoX/eYTCd1II3k5PQEgYZlOiRxws7OHi+OT2h1uliOy4sXL7i+vgYqrn854enTAQ8fPuTNN99kcn1Nkaf0+0o3OxwOMQyDOA6oZMFyOcd0LDotn52tLS4uLplOJyzmi3UO6SJcEawiDENnWN+4oWS1mLMKVpydnWG7Tl3UbcXST5P1RKNej2y90bRsi6rWLrb9lqJXhCFVKeu4vIxwtaQ3GNbRdREjOcJvt9A0tR01bR3TVmTY5XLFkydf8+bd++zs7HB1qQJxDg4O13BR835wHJs0tSmFBmikWUEUSYoiJRMC09QBidAkZZ6xWOoYWo/BYIBuCC7Oz5jPZiRJRMfvItDRdInQDKqyIMlLkiynKnMcS8cyDaQ00Y2XJP/XFrOGDrGJT22y9pvi8zsBwLAuZJtJ6JuE2U051E1cTQol76nqF0tKBfImSaJIhJrGwe4evV5v7TOWpSnz+ZzJZMJytaJjdShqIzoVWlpSlrmy/q3U3cRxKuXRpRJK1Eq5TMkFtGwXv92pE5CsdVCu7Tj0uj1u3bnN+++/T6fT4fj5M5aXC44ODxCa4KvHX3AxnbBczljMA/K8WK/vNaHCXx3HodNtY5oGUpbkeUYYKQwmzzLG29sc7R+wWq4wDJN33nufJ0+fcnl5yWAwQjctTNNWGybXZWtri/PzcwajMUVR0Ov0ODy8xRdffsl0NsPzlSfb1UT5et2/fx/Lsvns898iUeLxMAoJwpA8TRn2h3TaHax6y+u6ykfNtm1lqBdaWK6zTjxvUoLWNy5ZkpcZZVGueYMCSS6URXqZ56xWSy7ShLJoOE4luq5Ge0lFkSlTy6KUSCkwdBNZSSbX13z11WPefe9dBqMRXz99jN9ucefOfb748muG4x3+/M//Hf/v//OfWa0Cnjz5mouLM4oi4823HjAYDPg3/+a/59mTp3z00UcMh0NkBb7X5stHjzhKM/76r/+aLx894pcf/QopJUdHh4RhwPnZGdPplEePHuE5Lnfu3KbbadXyIhVoYtQBxFEcYJo20+mc2XTB/QcPeP+9D/j000+5vLwCxFojapgmtm0RhEuyPFEB02VJWVbKc67GePOaLtEQbI16g968D4qiqK3Rk3Wmqm4qHNQwDHZ2dsiKnAVDBqkAACAASURBVGfPnhElab0gUDeO5jo2TROECilJ0lSZhzoOtm0zGAwIg4DZbLZe4ummWW9MUyaLucoCLSVZKokT5VKibJp0QI2jSVLhWIqUKyrlzttqe2psbDIThEATGpowakzXwNCULrQqFAZdSaGeq/iWYvY6A8VNbtnNjutmilPjhrqpN2vO29AgXtf1NcUsSRLlOlAz9ZtC2KTYnJ+frwl+Qgi8mqdz584dur2ecjQoFZM5zZJ6NZ1QFBVVVRDHaZ1SI7AsA99r4XoqN1IH0jBiXPtBJUlGy21xdHgbx/WYT6bs7e5zfHLM4rM5i/mM+XzGj/7uP2PoAr/t0x908P0W3c6gtl4JaucETRnidTq4ntp0RlHIdHqtWNBViWUoLtbJySlvvfkm3V6PX/5KXVRBGDBfLNb+VkVZcnh4yPVkwmQyYe/ggOlsitD02qhxRJwkHBweYloWF5dXfP/DD+l2OhwfnzCbXtPu9MjzjNn0mjzLKbOcNE4JOwHdTnedBdncoNJUBa00oRybX1vbKmkC07DQtRKkpMgL0iRiMZ8xm00IVkuWiwXLxYwiT9EN9Vo7tk273SJNYuazKQKVKq8hsEyTTtugLCuOj1/Q6XV58OBNTs5OOT4+5b333+PHP/kJ/99/+Xv+6i//kvff/4Cf/+znYKrtn5Twycef8O677xCFAXt7e1xfX/PZZ58hKzUNaJpOHCc8f/6c0WjE/v4+y+WSqqpqHliELmBra4vp9YRP//lT7t97gzt3bhMEAVdXV7URos7t23dYLBZcXkzpdNrEUYo3VDY/o9Fo7SFmWRb7+7u0Wm0WizlZllCWBQjodjo4novQNSbTCYsgUB2xqVQyLd/H9/31NZnEiSI0S5XRGQRLdF2n1+tRFmpbmRUqAu/i6hqAdrtdKz3qgphXLFdztFwlN93aP+DWrVt89dsvlWbTtnnx4gWu6zIcDhl2u5i2xWw+RyxVhKJtGMRRiIKdJLqhoWuKCCulwr69Xg/XsxUjAAURdbtdZbtuu8hMaS8bCZSmaeimhSGgKgyyrKRAkBYFRfUt1IybHdPm5zZHymYLuUnBaPhWLwMUtFe+/m2FsilmKjeyllfX46xlWRieeuGiVbB+HlVVMUsSJpMJjx49QqKcHtTGx8HzPXy/Rb8/wnN9zLp4CQ10XVmTuK6PrguiKCZcreh3u+RpRr/f5/DgFqvFip/97B/58ssv0RAkSUKn06bXbSuMQNfo9Nu4joWuKz5NEKzQNBPbUok+rVYL21IazUaGkiQhWaYcDjzfUyN4TZS8c3iE7Tr8828+XYt/w1gFnLieR5pljMdjNE3j4vKC7b1d8iJXFBDb4enz5+R5zmg0Yndvl8l0yng05N69N7ieTJkvpkDF4cEewXLO2cmxMi2MI6ZXV1w7nvJJGwzUHb+OTFNCbHW3bDbPDcE4z4tarqKtO7WiKIijgPlsymRyzWIxrZ0rCmzbxDJFDR5nSNvAa7nYpk4aRWhCidelUBSIlu1gmDYnZxf86lcfYzsuH37vB/yn//v/YrS1y5/9q/+OH//4x/zqo1/zwXvv8/z5c54/e8Lx8QvOz084ONgjDFZsbY25vrzk+9//Pq7rcn014fmzY5I4oColn3zyCYZpopsGd+/eJY4jLi7OAdZ5o57n4Q4GhGHIZDLl3r277O/vc3p6ynK55PnzkzU9YTZboGmmoveUEsvSa1cVie+7GKZOr9dG18H3HdJEjZ2u6yqrI9chzTKCWo0gNXUdGbUHXzO9qKVEG9dV+Zyj0YgKyXQ6Jc9KBoMBxBGz2ewV0niWZXgtn93dXXwvYLma0+/3KcuC6XRKv6/sgiaTCe++8w4AV1dXa3qTris+puM4JGlKVkra7RbLICKJctI4I0Xh67ZloBsmcRgyGvTodNqqyTFMLNOCqiRLc0zNUOlVNV1E0zQcR9byQoHRSLmK2iTym4rZOtfuBrdBSsXMbUJy16nlUtbGe8quY5MA2xSrRjLUZBc25/udglabu1X1OZuuTyUtq++zbXs9/xdFoVrSumhWJczni7oFVeG2Qgi0mnKBYO3pbpkWRVlQldU6RNX3XD744AMG/R5nF5f85Cf/jWdPnlOUyohOcXjUeaSQmLaJbRnYloHQFUHQMFWXYZkuKj5OEXM938e2lA5wuVrWDgsS27bwXI8ojkiikPt37zEaDPn440+U22iNDXbaHRzXVZ2PaTIcjjg+OWY83qpDNEJarTZZlpNmKavlku9//4+wbIuT01O+973vAoLVcoGha/R7PQaDHo8ePSJJQhzHQqBCfZVOUW2rlauHWrqoQqycGGxb2cpU1cu8hyzLlYWLpkHtf7UKFsznU+bzKWG0UiJk08A0LJA6ZamrboSK+XyCitSuAzsQ5EVJUZZAY3CguvNPPvk13/3wQ9rdHrPFive+8yHvvf9djo+PeX78gg8++ACBZD6fcHV1wWw2VaC657A1GvO3f/u3vP/++7z33vt89eXX5HnBRx99wqeffophmliOzbvvvsvh4ZGCAkKVX7mYLxgOBrx48YKD/V3a7Tbz+RwpJf1+n3Zbib5fvDjGc9vkhYpa++ijj1nVlk3dmpQ8HA6YzSY4jollmcrQMFd4XlILu8eug+9565EySuJ6FHspawqCJWmaUrreOlMzCFUn1/DOminKtm06QmM2mxMEgbK5dmwG/QGu43N88pw4VtvR09NTloslrusqNcJySa/XW1+/8/mcMI7W1uPz2YxlGGLbHZUmVRTq2tPVwlDTFBOhUWY0Vkm6kCrdyjARlSIl53muYvVqEbthW7i2C7pQXWaqoIys+JZiRh0FpaGKwXocrOrOrKpqHonSSck6LRoEotGKbXRmm6TY1x2vMP2FeqKIVxn8su7UGsPC5qN5odbctFwJtGWlvRyBa8V+gfr5OjqykBRSEVjHu2Pu3LnDwcEBnW6bzz//jK+++oqL8yuWyyW6UOtr3VB6sPHWGNdxEFpJksXohkPX97FsgyxJSaKEJJEUuawzKLX1i9gkWhf1c1/ji1K9yVqei2nZvDg9RWg6k8kUoWkMRyro9er6ilHt1nB2eUEpYX9nl0ePHtHtdkmzvA7wUC6rw1Gfrx5/je///6S915Ml95Xn90lvrnflq6sNutFogBiCbnaoIGNnN0TuPmj1oIh92QeF/pPVPyATMn+E9mVDoYjZl9UMxRlwZkCQME0Q7buqy9f1N296o4eTmV3dADgR0o0odKOAqrp1b+b5nXO+Tk7eR48ecXF+hreULILVYk4cBnRaTXrtlpgnBjFBEJeGe7K4zfOE9XpFu92uk7BsywYF0iQlqzh5cSKjoaJKhmUYEEbyEUUheZqDBoWuim1yoZS+agp5nrJcBigZFElBHKegqERJSqEEpJlcmoZhMBwMeP7iBcONTX70w5/wn//mr+n0vuSHP/gRx69OODx8hfWOwfvvP+Di4pzd3R08b8752Rlff/01xd2c3d1dvv76a1ZLj/fuv89sNufly0MUVfbCqqHz6aefsrOzzfvvP8A0DQ5fPKfZbBIEAc1Wk/l8zsuXL7hz5w66rvPq1TG+H9Bwm2yMtsog3zXtTptOp0OSxKxWS4LAJ44jdvd2CJ56pYtwUvL35NDO0owwimhHkYBWkYzkVR5nJSLv9nokZRhxFEVomsZoNOLLLz8HVeH27dvEUSqHVhzJisBI8TwPRdEE6AkC1v4ay5TxMUrF72xzMJT7LM3p9/slo0DE71EU1fZFVdGpCO1h5JPE4hRrlIVLUUBXNXRdZWtjs0yikrR7yV4F13Zxeg7n55fkeYFhmaBJ3muz1RF5napQoJKjo2eQ5n9iZ1YXmdfV5htjZvWkr3dP1Ye4cArZtcp7vK6vvL53e7ugUe7c4iQhjqJaAKvrOoYmqFqRvk6Fum72GMcxfhDiui2ha1RWRoXIMaqfZ5XUgUazycZoxNb2NoZh8Mc//pGnT5+CUlkXlfIdReLnVU2V+K4oxFsvMXSVwaBLt9el0XCkE1QVyAvyrLhG8i1K2xdhEKWp8GsajQaappKmMYaq0Ww1aToOy/mCdqvNyfEJcZLUwmdd1zEti8FgQJymnJ9fcOvWTZbLFXleoBsmi4XkBMxmMz788EPW6zUPv3zIRz/4iK+++opnT5+iAJ1Oq1Q1xHRaTTrtVv2uO9brjAHICEKP9CpiuZzhuI6MBKYpyFleEEdRTXKOowjHkv9HkoIi6VZ1SWs3DFX2l2pOp91FIWexlKKnKAIOGaZJqqTCYcoLKKRbTJOIrFDw1ktQoN1u8ejR13T7fd7/4AOePX3GrYPb3Lhxg1/99X8m8tf85V/+nF6vy8M/fMH5+XndYbx8+ZLbt2/zve99j0dfP+Lvf/OP/OVf/gv+1S//Ff/P3/6a6WxWagZzTk/P2NuTpHVT1zg7O6PX67FcLjBNEY1Xfv+r1YowDOl1B1xejlFVjXfeuctyueDFi2fM57PSBkqAjlarVYb26iyXpdur7ZJqOY7joGdpqRIQgMjV3ZrRXyWNt5pN4k6H1WpFEsX1gdnv90nzjPlsDqjs7O4SJ2L4qSiVB5lQQ8IoEhCpJ7vi2JNO6+6t24zHYw6fPqfX60FRcHV1Vcv/BL1WavCg1WphNxrkuZB9wyAkSSMoCgxDE+qOqdd+f7quvkHsFU5pgWU75KiYaSamjiVomANRHGFaDoWmUwh/57uLmfa2y8VbBUtT1ZqbVRWLioUvth+vEcvresrr1I23C9n17q86carCV/mOUf63JIyA1yaQFcKqKAqGblDWEyjEV9wwTaxypq/QvI2NDVRV5fnz5/ztr3/NcrWk2+kyGg7wfE881tIy1wD53ppSEMdqmb4jPDTD1MizhLywURA3Ase2ubi4Io4yBoNhGVRSRpGVST7L6ZzlMqPTadHptBmO+kRRxJdffkG31WW1WtPrD5jNZoxGG4SxILY7O3vcvHmb/+M//Ac+/P73sSyXrx89KV1Oc8bTGWvfJ4wiDm7e5Pe//z27u9s0XIfPfv+72i3BMDTa7Sae57G3J8V8PB6jKCp+EOE4NvN5TtOViLU49AnWK64uxOFU3E81kTVlaXltCAiwUjXIczH0U4X46nTb8v/nCpqSgSLXUZZndQYDJffPNA0atvjHLRdL/DAkyXLSfAWKiq6rRIkEZeRZwWw240c/+hFxEHH08iUPHtzn+PAFT58+5ubNfX75y1+QJBFXlwL9D4cDTN3g1atXdDodbt++Qxgk/PaTT1it1vz4xz9mOpvx+ZdflO4mGX/84x8Zj6/Y392h3+/z/OkzuQHzjMlkWkvXkiRmNpvRcNu1+PtXvzpCUaDX75S7Rgm8sUKL8XhcdmNanfztBwFxkuE2Gpi2ycoTVHwwGAiAZkkXNBgOiWP5eRVQ0+926Xa7jMfjOulJURTiMgVdLQ1Jw7VfU7CqddB8Pme1XBOnITk5rWaDi4sL1us1gLjm2rYUVdctR70IwzLp9XoMNjdYLpeMZzNm0wWGoaFgoaiFWPqUrjO+nxKHEa12k43BkG67Q7crluZpkhDFKePplLUfEIcRzU6bOPAJYznMLsdXHBzcotfuYJjeG9uwb3ZmNSv/2t9f82KlcF0rTEVeoCivnVvTLBO/7mtF8O3CdR0UeEP/WXYEWplnWbWvRSEiZFkwa7UtN4BRFjvbtlEVXfL8CvnelmnRarXoD/r0e30azQanJ6c8fvSIs/NzKAqGoxHbW1v4vs/JyTHtdgtFKUmbteSqLKq6TqfdkgTnosA0TGy7ygyIUcqC2+v1MA23vmAqy5fZdEYd4aXLDkouvglHR4cUikKW50wmM3q9Hgc3b+KHAfP5HMMw2Nnd5Ve//jX33n2Xre0tPv74Y87Pz7l37x6Pnj5hMpmgFAX/+l//a6bTKa9eveKj73+fTz/9LbPZjJ2dLQAm4zHbW1u4ro2pq4Thmt3tDRRFJclysrRA15Sy44rQVTnkTN1EURXSJCCJxMJYVcTrXlEgJxWJSbngVhSxUMrSmKJQyfKEKAplPE0FbZ5Op1LwbFucPLQ1TbdNGERSmJOopOqIP5ZlOfT6W6iaQZYrBOsVx0eH3HvnDk+ePOX87Iwf//jHzGYTvv76ax48uM+HH36Pdtvlq6++QlUgDkNevnzJw4cPeefOXe7fv8/J8QlfffUHHv7hIR/94Af87Gc/44svvuDJkyfcuHEDVVE4Ozvj1q1btNtt5rMZo80hDx48II4jnj17xnq9xnVdHj78gq2tbQaDPnEsLq+C/Arp/IMP3uf07BTPW3J5eY6uV+7MSom65+iahuu4sg8NfK6mU/zAJ05TDg4OaDaaJEFIGEU0bKd27/B9n+VyQRAE4vTSbBHpMWogSKiu66zWPovFgq2tHVrtNqdnp3LgOzZXxxd0++J3ZpV74jwSA1W1nHCCIGBvb49Wu818uRCQIQpRFCG7X11elmoeKfIa1WRUoCgq/W6nliYKsruQbl8XYqxhWhhZXsrrEpRCyM6KJilh3toDBZJE3Ji/u5jlr4tZUYIBSoUuIjIhvVysZwWk5FCAoQrqSCJL4Le5ZN/lxvGGRVD5p+u6aKpat+2KoqCpWuksoNdoWfUcxf43IElzBu0+vX6Xzc3N0iBPxoDDF8+ZzWa1C0HDNtE0ncBb4S3mEhCyuUkcrykyASxMTRM1g6rjWDau69Bpt/FWC9kH+RGxE0GulciOzuFLOfF3d7qYpoXvCyITR1m9KDYtnbt377K3t8d0OuH09KTuRC+vrjA0o/SDV0kz2W/8xU9/ysM//IHlasmf/7M/5//+67/m448/5vs//AHPXr7g8PCQe/fukSWCxH788ccMh8NaYRGGPkkSCZWl28Z1TGzLZjabUWQpRZ6haqArBUka0m469XVgmIbsSJJERowsl5TvAgzTxCg78SxL0VSt5gkVFGITbRmomkqeqxR5xtpfs1gs0DSFLMul+0ozkbplPsulvGZVoK4ACpUxHzTVJqahEMUZaZpxcXbMD3/4E/ydLabjMR9+/3v88Ac/5Pe//y0XF5c0my5RFPHh977Hxx//LaEfcPfuXeZzCSppuC22d3bwg5DZfM5vfvMbuv0ev/jFL7h79y4nJ68IAp/lQkam73//+zx//ozVYs7FxQVhGTVYubC0Oy3iJGS1WpAXKXEccnV1KWJxy+Lzzz+XqL5el52dPWzbLmk6M5aLFc1mC0VRiKKINBM5XTWhGGWsnWEaKKWcSCvBt9PzM5arBdvb23jeql716JrBcrnk+PSEleehanL/xHFMGAR1WE6apNilw62ma/U1KT5uUsxcV5B3QVBXrNZigW07NpPplJeHh9jlcr/io+nXLOo1TaNhy2hplkW8KAqUQojrmq6jqCrNdof1OiDNC1RFuGXz+Vx0p8sFuqaQZznqNXHmN+RM/8v/+D/8+7d3ZG8XpLc9wKoZ3LSt0qco/9bO7Nseb9v3FBQ0W603UEtN01CVMtGm7NDCMKxTf1ot8bPf3dnmndu3cV2H9drn5OQVz5+/5OTkFcvlkjiO6PX6RHFAGIRYlkm/36fZbJDnBVGwLqVRObqqYhg6pm5gGgaOY+HYLq1mkzTNCNZrAn9NlgqCaZZe+JUn1mq5Jo5jsgpQUY2a1nBw8wZFUfDq1SvSNCEr3Rtct4HjyM/oD/p4nsezZ8/46KOPePzkKYeHh/yXv/wFn3/xBU+fPqXf7+M4Lmt/zWAwxDRNNkYjNE3jD3/4A9/73vcIw4DpdEK/1yXLxWZmNBrRabVqhxLT1InjUCLBoojVaoll6hi6iqaAXTLCDV3FNk0gK1OqwNBVQUGzBCiwTQPXtjB0DZS8HClFwqZqAg5URGgxCIQojksgwCDPYe0HeGtfCrou2l7T1HFch2arwdbWBo5ts/Y8hoM+a29F5PuMBgMODw9ZLOfs7u5iGAar1ZIsS9jZ3Wa1XLK5uSEmmGWI8vn5OXGUMJvNMQyTMAq5vLzED3y+/PJLdna2+cEPPsJ1JBDlyy++4OjwiIMbNxgOB1xdXdUopa6L/32v1y0leiFFkdccSyjqgGfZhQW8ePGSMIiwLIeN0ZbYF1mSRRmEofi6aRp+GLL21iSZqGgG/T5ZkhKFIaZusFqtGF+JiL3f7zOejIlK6k8cJZIiFUcoiPHozs4OUSS7seFoSJbJyNxut9BNuU7TOBFL+NJGqEJD4ziu1zYFsq+OkhgFsF0HQ5eiVNUAw9BLfqSDaRi1L167vM+LkkC8XC6ZL5ZkikZ/MKgBBlVTS+v4vFQRIWseXWgu//bf/bffLmd6e2H/XSPh28Xoumrg7SJWfZ9/0rqnRMfkhJZ8gMpvKS8LRcUUbjabOI5Ds9HAdd0a6n305GvSJCGOMoHFM5H0qIqOpqssljMgp9Vq4DZskjSWYFJFo9lyyTL5GoUMpRCPpbQoiCMVXVFJoyaOaZE4Dov5jNXSE76MLfq2wUCkInNf+Dqj0Yg8zwl8iSO7efMmKAUvXjzn6uqCbreD60oU3Xq9FmjasMiygouLCzrdniSlqyr/1b/5N1xdjXny5CnNdofBYFB2Fk100+Do6Ij33rvPF19+wa07t8qg4TmWZdHrtFEUGctVBRFdhz7NhoOqKhSFQxrHKEVGy7XQdVVkZaXrqaZpuA23DKiATJMTU1Xz8tBJ0HWNJM4psogsk6zOLBP0WzdNcQ0tChRFVhcyUkkUn2GItY+qJkRpTuL7kGVomYapGNgNm9FwJJF1mmh0/bXN5sYATVG4ujhld2uTG/s7HJ+d4zZc7t+/z+9+99t6bFv7a0bDAc1mk6urK8m2HI44PTnD0A329w/44IMPUFSFs4sLNE3jyZMn4qHW77G1tUWe57w6POL45JiD/T329nZJ05STkxOWyyWet2K9XuI4bnX147oN+v0+vu9zfHxMEDSvoZKiQZxM5rJrjWMGnT5RHAv62GkzGA7JFMSoMQxqNQbILsssl+DdbpdWu8nXX3+NqsLejX22traYjGdcXl3Wi/rBaINOp4uq6qxWq5LmlNWuNoVaWntnIp2ySkeVKjt2sVhIarphkObiyuuVz6vd69BouhRFThqndcZFs9lEQ7rB4aAn+QBOkzzPicrGxPM84izDaKlohoXjNln7Yf28iywVSlORErcb6GoB+Z+gZryNWl5/1HyuCs28NiYKz0jQlJw3vcy+rfi9/e+CZgp4EF6Deq+rAFRVpeG4dRqMaZooUI8L6/UaDcmglI6olC1pOqZZdZPyxkVxiOIXOI64lWZ5jh94OJYulkC5dBtZmokTZhSTRjENuylM/labPJFTQsmFYlBkAqm7rkuradZqCIHMTYbDIev1mvOLU7IsY2Njg9Vqycpb0ii9owa9Aa1mm9l0xsbmBsPRiL/7u7/jF7/4BZdXV/zN3/yNtP0lYdFxHMaTCZfjK95//wGKKojbe+/eYz6fkWc5g0GfLIkZjTbQNZXpdEIQrHEcG89b4TgWtmUxnguhstl00RUNXYUkEHWCrqhoFBiqQqGrZKp0TIZhoBY5pCmGrpHmMVEmjr9FkaGqClmeUBQKRa4TlTexZZmyR5zNsWwb23Lw1mviNKNQVApFoVDlIwcUVaHRsGWZPz5HHW3h2gZFGtFp2gSeyWo5Y393lyhNWCwWbG4MGQ6HHB0dkecpt24ecHV1wc9+9jM+++wzzs/PicKY/f19JpMpJycnzBZzDm4eUJSk38PDI+7cucV8PueLz37PT378Y+7eeYfz8zMZ0Ytc0sHLgp+kMZCh6aDroi0uyFitlkLUth3p3Mt0c1Bw3QYUEPgRq5WHkgnAFceR2Fh3OjQaDXq9Ls2sRaslNJpeb4BpGCiFOItUe6iDgwMWixmLhfiwzWfCQ+t2u7iui2HZzOdzGg0JCT6/vKDVarGzs8PxyRFaptFwHRxT9KD9VofpdMqyVKB0u12uxmN838d2Hba2tvDjqKRkJTScJrZtYWiSt2mU10kcRCLFKrNzK2DMMAw6nQ7tdhtF1zm+mrP0vFLUn4sVUJ5haAKeLJcLup0WKpI1+53FTM4SGfcKBKWqk4UVqKRE16oRIAvdrJDUYZQ3fcrekD1d+5o3illJBnEchyAMZXwrBbbCjpc32HVl/1EhQcU1s0btWviqqpTk1rwgSWN8f02aJVimg2FqJcM8JQx9TNMAVIoshULGJgClyCmygiIryJSCVNFJk5gsEZZyw20IVyYr0BSNRqdHkgudIs8kVNb3fbrdHjvbe5imzeMnj2k2BWJfrRYlaCKJ04PBgLW35tXhK0abm9y5cYMnT57w4x//mCRN+Prrr7l58yZXkwnDXo/9/X2+fPgQRVXY2Njgz/7sz3j45Zd88MEHLJYLojBkd2+H8dUlnW4XTZP9oa6V9i2qdMGGpjLzPFbeChVoN100VaHXaaOrarkTUsiLnCyJiYKAwPewHQfLMDB0jURVZFdaOqMYloWlmuiGSZxIHmSBQprlYuqoaqwXS9ZrH1Sd9dpnufQI45ikyNBNg1azQ7vTQiVHUQrC0BdqjWUQhh66qjObjUnjjDQOeXX0kjgTkfzz589oNxv0+z3G40tA9ntRFPPJJ7/lxo19wjDkyeOnRGEiuxh/jR/65EXOjYMDrsZj0jSBomA6EQfX3/72U24eHHD3nXfw16vakvv27dv0ej3yPMEwVeI4Iori8nmHLOZLXLdR8i/FX2wwGNHr5nS7XcIwxDBs8RDLUtqtFpPZlMViQRTHrNZr1t5aMk5VlTAIcEabKIC3kPi5IAxYektMw8APvFoWWBQSpqMbBr4foKdiCmnb7hv296oq45yGGDZW+7Jer0cUhnirlRiQrtc0Gw06nbbQJaKoFr6jFgRBiOs4OI5dht6k4iqcSKd2fn4uul5X9NVco2wVikQSnl9cyFSgafhrjzgKGPZ72LZNQihrB9tEV183XN9UACiUUQHygSKfUxWFQpHbvChtjOBLOQAAIABJREFURKpuqihK56lUFPQoChpyqip5QaZKiGdKjlpQ+41V4Z6FQpmGXCoG0lSitFQN3ZTZu0jlSXueV0K4saS1lMtmrZyfxd4oI8vTWrAuF7KOiU6SpuS5LlHveY5qKOiq5ARGmXhSKaXbrKLIwllFQVM0NK3Ul2UJSqHVQEeaSgfYaDSYzSdYpoFmipC81XDp9QdEsc9Xf/ySW7duo2kqURTh+x6XVxdsbW2xvbPJ1dWEPJGU7Q8/+oj5XEa8e/fe5T/+n/+RIAgwDZMb+zdwbAtv5WFbFqEfcOfmbY6PXpHGEe+++w5nJ8f4WYJhiBWybZnEcch0NqbTbtM0XcZXV5LmraksVit5DQs5ypIsRkkhL8TjSlEVsiQjK3RQVcTURcTGjmmi6AZZmhCmMVlRYGgahmWVSe+lCVOhoBo6SqHgByFRnIAmcWzBLCIIIwk6KTJcy2Jrc0Sv2yWJQ+IwIEszzKaBYjusVx6aqkNpUyTLYJ35fMKN23fI8oTlYkaj0eAnP/4Jp6eneMuAnc19PvnkE3qdAQoS8KwoohTwvBWmaXF+HrFYLnjw3ntsb23w5NFjJpMxvV6H4XDEfD7n0999ilmuDbI8Z75YkKUZN2/dYrmcY5rGNQJ5TqPRQtV0ZvNzbtw4QEGh2WyzmM+5vLxk5a2wTItGw+Xq8gLHdepFexRFeOs1URJL95IXzCczZs02RZ6zKB1mzNKkYb5cYJg6lm1DAf56RRSBmQniSeDTbLYoiowoSSXbIsm4mp4QJxkWOWkUEaoqke/j2WIfNJlOyHOZRlqtFromnXZRpDimThjnrEq5YcO20VSdMPSJfOHF5XlOnuVYlmQQCOdOqbXXnuexDiPszpCLywnr5arO84wCn16nTcN2cLpdBoMBrYZDEr/uzL4JAPzP/9O/p0SQXsuMVLF4UdTSxOy6i9k15dM15n7FUStKaodwtdQ3mP3134sS5SyK8jRTStZwqTLICymKaVq7aYjRiCKFRlWFolHeGNIdKmWhK0+d8qdpaml3XZS0jrLlLTKhmOS57OVQVPJCqBKKqgr7WdNoNF1GGxs4tsV0NiWMAtod4cmcnp3R6wq1oygK2q0mFDlPnzzi/PyUGzd2CaMAyFgsZpyfn9If9tjb35VADBVmsxU3b91BLR1v/+zPvs/5+TmBH7D2fNZrn4bbYDTaZDqZ0m13GQ6G3Dy4SRJHbAw6+N6Cs9NXOLZFkUlgsWWZ4ienSHsfRiG6YdDvD0rScYpmGPiBz9pfo2oa85UsZKNEwieiNCUpCuIsI84ygjhi6fmopoXbaDFdLAjCCNOyaTRb6KZJnKaMp3PCOCVHxQtCwjhhufYIIklEz4E4TcnJ0fUCQ8khT2i7DnkSkYQh7VaTVrNNu9nG0A1syxH5lKLiOg2W5U00HA0YjgZYpsFsOsWxHFRF48XTQxazNQ2nQ9Nt8Zvf/D0P3nuPmwf7HL16jh8sgATTNIijuDRIdNne2mBzY4SmqhwdHomkL88xTYur8YRmqy1KhThmONpguDEiimPanQ6379wlCGMePvwjhmkzGG6QZWVuQCEp4IVSsFqv8MM1mqGJj99gQBiFHJ8cE0VRDRiEYYjruAz7fQLfl5DoomDlrSgo6HQ7Qr8wTZI8x1uvcWwH2zbRSl/BIPABySloNJq4jSaFojKeTlksVxiGzmqxoChyDFURP7kSVU8S8Rx0HUduoFySy7IslvfHtUmiGNdp0Gy20FVDJhingYJKEieoispotMHt23fIsoyjoyNZbbRadVHw/JhOp0uz4bJeeaRJTKfTLr3tFB689wDLMknKIv/f/Lv/7r//1mL2v/3v/+u/l9TNqohJAcuLQuxIyiJRM2Xf+vhmhPCbjz+FblaOsPWYex1NvfZlRVG8ll2VELBectNEO/q6ba40pG8L6CuVgKFqpQSnsqCp9nwSpaZpmnhJuQ1c12HQFyKsqkmYQp6/9mlrNBtC87Aldi5NE9ZrjzAMiOJIsginYy4uzvF9n3feuYNpmXQ6babTKYvFiht7B9y7d5/JZMJPfvITJpMJs9ms9pOXPAGbMJDE8K+++grHliDiV0eHbI2GzGZXeKtlffEWRY7rilBc07Q6C7Lf70sqUZ6z8jxJrC4KwsBHUVVBv1SNJM3IigJF0wRd03Vs2wVFZe2HZLkk60xnc6I4rlHZNBNL7fF4ytLzSbNckMwowvdD0kxO6yhJyIqcNEkwVHBMjVZJg2k2GqIySFICP2C5WNVhvvJ+KjVPa7Q5KjNL81p25K9Dmo0O7957jxcvDjEMk2azXaJnc3Z3t8mLhCQJ8APhiZm6hes0UBRYLpZ1QakW75ubm7XPXlEUHB8fM5vNuLi4II4jbt46IMsylkvJFVh7AYZhcnFxKUG97Q6GYZYdesh8PmO5nJPnGZqu0XCb7O3tYVkWnleRuFOSOCZLU9JSzjccDhkMBvT7fZEkrdfMFwu63a4YGMYxTsPBNs2yG7LEmXdrmyiJWa8DVFPHtBw8f4238lEUhXbTYdDroWminVQV0UyLkkOpSeOu66Agtk5qOVF53grDtFFVvQTzomuouQQDV/vC6XTK8fExSSJjflEU5eSkYFm2NDdlbF2n06bpOmJqECegFDTdJu12i1/+1//229FMwzC+4b1/Ham87oTx7cXq/18xqyvNtf+36sQqaVWe55B/My290mrCNZ5b/mbwsHwbhest4nU5laqUZo7Fm0Es1R5wtVqVOsqSm1O+UbZts7O9TbvZIIp8Ts/OmM6muK7L1tZObQPuNFzGpa9Zo9Hi2cMvieOUu3ffxbFdpmORJP3zf/7P+fzzzxkMBjW4UhFtNU1jMhF/t3a7zb1798jznP39XYpMnD2HwyFpmpYqghGu63J0dIRt2zVXqEqU98u4PF0XrmCoG3iesMQVTSXNM7IkwXVdFBRUVcGyHVA0Vp6MUoqqEiYxSgFRknF5NcHzfVAUkqxguVwxnc7FItwQmZteCqbJcjRVwXJtmo5Bp2mXYcPaN9432TkJf8kod0BRGJe/S4qiqsRRzDSZ4jguD7/8I5eXU+7dvU9R5HzyySf86Ec/4P0PHvCf/tP/xWx+xb/4lz/n1auX9PtDihysRotup1+7RSwWCzY3N7lx4wanp6c8f/4cVVXZ3RUk0/O8+mDwPI+/+qu/4sGD99jY2GZ7ewd/HXJycspisaDT6ZRmBxZg1VPIdVlPmr42VTBNAUqqQztNU5bLZS0pyrKMra0tzs7OWPt+nbEJ4kADcs3mec7GxpBev89wOGR56Mm13HRpt/q0Wi0mYymoRolSKqXLi3aNwC73l0xmiqZiWBZuuZNTVZU4zRhPFoRRUtM3rv+OjuPg+z7Pnj17A9xYlfu4ra0tklzn8dNnTCYTcShxHObzOYGhMRj06lBjcTl+HU357QBAueSvitfb/+1P0Ssq3/3vLFj/VGemayjXqBw1peOtr89LMu/15/f6uSlvFLProvO3f5830dTSelsRpnL1+WqmT5MUpZB9F3nOfD4nz/PXF/Law1stMA2NdqdNf9BHVYVXlRcFg06Ho+NXdLpdHMfh0aNH6LrOYiEM6M2NLc7Oz9nc3Obzzz/n9PSUZ8+eiV41jlmv1+zu7tLpdGopy+3btzk/P+fx48d0Oi32doa0W22yPGU+n9eHT5ZltUtBlaY9mUwkr7G8+IsCTFP8ysbjsYRdFNJ1+X6ApunohkEYRqD4aJqBZdsSzlHuhrIsh5WEZqzXHrpukCQZ63VIFMcYho6iSNpQaVeASk6RQbPRoNduMBi0UYEgCMXZoShwXQfXbcoyfbEqgQAJ43UcR9CwTpvFckWaZaxXHklS0Ol0mM9XfPbZ7/npT/8LFos5r14d0W7f59133+X49JCHDx/ys5//nMeP/8j5+VVtRul5Xn0PXF5eChl6d5d2u11zyxRF4fDwEM/zymIT1fyz3/3uc1qtNu/ee48HDx7Q7faASvcKuq6UNvIauqESBAHz2ZzIjzg/v2A2m5WLerteu1y33RL/tJjBYFAXvHa7LRzMNCGOBEiL/HVJwm2hluaObwcEVYUzKK2mPM9j2Ouyu7eHY9scHh4yn80ZDgdsbm4RhkHNAy1yiWmM45jxdIKqmgD1wVlRrHxfcgyqz7muW/PzKmuiOI5ZlK97o9HAsk3m8ylnZ2e0my79fpfRhki5JpMJURzW9/I3iln1g6/nWFaPGnH4EwVJ+aZ70BuP/E98LZTTqqK80QEKQ5haNP5tz0GeKyUt5E2+mxS1b/q0XS9kVTHLsgyl9J+vmNeqqqIWan1TBUFAsJYLpF8iLOJ0u8BQFTY2hgw3RvVplSQJ3lpY73rZXjuOIxIsTUXTddrtNpeXl/yzP/9nNU2gKAqiSODsTkd4ZRXZdjwek+c5rutyfHxMo9FgNBoJqmToTC7HpGnK5uYm6/Uaz/PY3d1lNpuxXq/rHcx6va4zMUVZIfItzTDQTBNDVclzmeot20HXDRYLj5UX0G53UBSNdSDfJwikoDluQrvVRjOckqu2ZLn0cBsu3U633CPIXjPPc6jci8kxdXEVNUwRsnueV3YhRdkZiNC9KmZ5XtDtCAcMRWE6m5EkCcvlCgWNfn+IaVicn1/xh6/+wK1bt/j8889otV329/fI8pjJeILjWLz33vtMxr9BU+WGv7i4QNd1dnd3WSwWzGYzWcCXHeLFxQXD4RCQwjIaDVksppxfzPnzP/9zhsNNnj17znw+ZzyeSCp7yYcMQzEONQyt7H56tS15vyvGnp7n1YXFNE3yXEijlZddFQ5S0Zgqhr5lWZxdnDOZTCSwxbFxHAdVVVkuFrIf1YVsniQpszJtPU0F3W6YDo7rYDeaaMZrswi1PMxsx2W5WuGtVkJgdRwa7TZmGGIvlnQ6gzIs26XZbLJYLOppz/cl43NjYwNFUQStjaJ69fH48WNmS587996l2+1ydHTIbDbDsky63Q5pGbsnr19Uk9LhW4pZ5RB73RH2uj/Z9SLwbY9/ivH/Tz2uk2uvi9Mrt4z6ZxcFSrUXq62GcqFKXBszK8Dgux5vWxAVCHKrlfIsy7LkJs/lxhPhrcKg12M4HBJHYrsThmHZjQ3odLokScLp6Rm2beM4ckItl8uy65lwcnJCs9livRK/KE3TuHHjAFVVePr0KbPZvMw7LEqE7YybN2/hui7n5+d4nsft27c5PT3l6OiQra1t5vM5s8kZ29vD+gKvWn2gJCBL9xWGAUUh6HCv1xUpGkLcXS0WqJqK7wc4tkOcpBR5UWZnZiRpih8EWJZDkiVMJzMmkwlJmmHaLlGqEETCMQsDnygUUrJafagQlNF8pqGjYWAaOpauk+cZi+WSfr+PXVo2K4pS5ohCt9urxftJktTd5mw2kzCZqytQVC4ur7hx4zbNZpM8h26/w9eP/sAPP/oRu3s7oorodxiNRlxdnXN6es5oNOL+/fc4Ojyp5UPVGuG6j5dhGDSbTcbjcWmlI9dG5QbhOC55nqEoYuaocEWSpGxsbNa7LZGHRagqmJaJriu13Gk02qjdXB8/fsx0OhWDT0v88KqfX42gQGk35AuSWFqlh4GP4zhYlllboCdJwnK1otPvYZom87XHaukz95Z1qEyWZXVq+vHxMZZuoOl6CUQUXFxccnl5QegHWLZRd3imZdFut7Bsm9VKVB6VVKrX69UC9el0KqTbsnj75XhcdZ3V+H58fMzV1SWmadJqNRiWhOfj42N5nU1DTCfLxzeKWZqm5Q3o1PqwIAjqN7W6Mb6zGPH/fcwEcd64HohSf74oY9vfCkuppCLyQuQEcURRvM4pqBb913+s8N2+netmWRYoeX2BVg4e5EKIzbNCwIBGQy6G2aI8OSyGw4Gk4TjC6TEN8XP3Vj5BFGGaNkWhcO/eu1xe9siyjAcb7xOEIbblsr29w+9/95kYGJo6ilIwGFTuGUMsy2AyuWI+nzIYDLh7907ZHQxwXbsEGyICP8CyHVarFZeXY9l1KbJnkwKty35JUTDNFEXRKAqBXnw/IIoTWq02eQ5RnJYGiTn+OqAAkiRD101BP0NZxguXzEDXTXw/ZLlcY1oGWZpi6hrtdptmw8V1HLIkIULQbdu0UC2RO5mGThgEEMrNpKlafQPHJbAgO6G1hLr0+wwGshucTqd1hxHFSSkpW9JoNvH9EIqCu++8Qxj5PHhwn9/+9hOxBFovcFyXra0Nzs4u2d+/gbfymc/lkFmv18zncw4ODsp9Vspisag72dVqRbfbpd1ulyafTbz1gqurMbbdEMfXTOzgpSDL96j4cr7vMZ1MyHIRozu2wxmn2LZDURT1qGZZ4lpcdTeVa4xhGLXAXVHEJUZVxVqn3W6LDVcZBVd9Pqh2jHFSri/C0koqkzDqLCVOUlgHsnIpf9c8l1GxKO8LVddI0pzx1RTfDzBNg8CPaLX7dZGt5E/NZpNmyZG7urri8vKyliHqul6P6d1ul95oyPOXR5ydnQEFrVabZtOtQ10eP/oa13XZHA5qAi58hwJAGNpWXSmrE+f6Huu7HkpRUFzfcXFt7qyaqm/9Svlsdg2lvC6Vql5AXXvTqrtqt6tiJh2ZTKMVSbeil7wxtn5Ld1kAlmmCkpNn1D5dgp5Kh7i3s0+j0SDPMi4vL/E8j1arJadenLL37g2Wyzknp4cYJfWh6h5UVWNjMOCrr75ie2ubzY1NprMZuqHzzjvv8A//8I9Mx1POzy/Y3hbXhbOzczqdDrZtMxqNWK1WbG9v0Wq1OTw85OjoiG63S5qmolvceJcg8BD1gyo+WXFSXvhmnd7daDRIkpRu1yw7W62UO5k0mqooI0xTEE1FYueSNCNJE9IsxykDX/JcoUDBsm0ct0GBTrqWrzFNiyAVNUaz0WRnZ4tWo8FsOkFRBNRxbBtFKfNUE4UoCbBcs/RJy2pTger3rygrlRFho9Eg8EMWiwVhGNJoNEGRjsS0DJ4/f4qhW5ydnRAEYk64vb3F97//Z/zDP/w9Nw72eefOXV6+fC6csCRna2ub42NZ2FcIZhUkUrHqayeU2awep+bzOf1+h1u3bvHy5QuSJGF7e5vFfMVyueTy8rI+HPf2dsuCKFZIeVnMet0eURBzdHRUX+Pz+ZyTk5M6CX1Y2v9UE1O1L2s2m1ilbX2lXy6KArvMWUjTtO6W4jL0N8/FPbpa/Hc6HbzljNXKo+m6DIdDOt22dJRxDEhj0W53UVSwTIugvJ66vTZxlOKWUsOqGao60SAIxHctEYv3YZm1EUXyvgBM53Omy2fops329lZpQU+99Jf3uFEDA9d34N8oZhKXFdQjSpX2UhRF2bLndZd2fSSsdlSapqGpWl08sqwyOxRyaxmoWReV13+nLGavgYdq9KlG3epPXdeFTV4mWldi9ySJywvg9ffPq29evO4YhbLx2gW3ch5QFWTh6ohxXbWzUhSFTlfkFuRvZhm0Wm10XQwp1+s1X375Jd1Oh0F/VIpwE+bzJWEUCwFwHdDryk4hywqWK4+f/vSnfPzx38s44Ta4f//dekG6vb3FYrFgd3eHOI756qs/sL29zY0b+3z22WcURc7Nmwfs7+/z+MnXvHz5Etsy2NvbYzjYQEFjtVpR5JJylCY5Chq25UIhF3uv1ysXuQWbWztcXl6w9gN6loOmGqxWHvPFksFwiGlaJKm8r4ulRxhJOEwcpeh6JlbilkEcxoS+j64IV8m27TqAVgEsQwT8pilAgK0bKGqBjYHbcqlsgqqOREY+IScfHBzUS/iz01NUVQre/v4+jutwfnEpNjJl99JuN3FdB8psx4uLcx48eICm6bhOgywr8P2Q4XDIeDxhvfZ5//33uby8ZHt7G9d1efz4sUh7+n0cx6kF5r1er+Tpxdy+fRtFyQmCNXkuZo2r1QpvFXB6esp4PJWC1euR5wWTyQTLMtjc2GRza8iLFy/Y3d3l4uyS09PT0h59wPPnz2sAxzCMMhFKNJEiGBdFzO07d0g8jydPnmDZFtvb20wmEy4uLhgMBjiOxfbOjiSypwl5ruBF4nzsOA6aasq9X4AfRnK4rZYkmXR1hmWhlPdvu9Phyy+/5Ec/+iFZlrKzJ8DU559/ztVEJoeqIRqNRnUx6/V6TCaTevc4mUwYl9IowzCwSvPKNH1tjW+aJrqh1oVLRPo+53HCZDL77mJWoV2VK8V1jeb1xKW3KRFVcav/LHdVefX1iiL+V6VvU9UJVT1R9XfbtuudXTXSZllWx7Bf9y8qiqKeucXn7Nutua8/3qZavKEfVRQaboMkjZhOp3VH0Gq16kg1b7kmCAKUosBxHFyngaZpBEFAURRsjUaEYchgOCSKo3qBv7OzI4EYpTvAnTt3UBSFn//85xwdHTGbzTg9PSWNE+7ffxdQGI/H5Q2pcHp6yvHxq9oCWy8XuHt7u2RZxvHxKwI/qDl31YVQHQ5RFNXi50of12w2ARErLxYLDg4O6i7NsiS2bLaY4wdh6aowQ9XEPlzVdIpCwVuvy/BeIb/meYauquiOiWmYMjIbBrqmEARrgjwjTaQLURXI0hSn0cC2Lda+h2ZY7O/vkxcF3sq7ZiWj4jgWpmmh68Kuv7y8rC1zRqMRW1ubxImMgWmW0+noMp7YJt1utwafptMpjx8/ZmtrS865XGE02uTw8JCG63J2dk5RwC9/+Uv+8R//Ec/z6HQ6dLtdfF+6vuo19n2/5lClaUKSxjiO7CkXixmglNSXuO4kwjBkOp1w+/Yt4jgkjiMuLy9rZNTUzXofWPELJ5MJ/X6fGzduMB6PGY/H2LbN3t4e+/v7TKdTnj57xvmFKEoux1d1gRiNRiVvzWC1XLL2QwzbRi+pHtU4G4Upy+WCG/u7ZEmMpigkiagQhA5VoGoKrUaDJJW09Ol0xp3bd7i8uuDy8hLHaXD48jHmTbtugjY2NgDodDp8+OGHfPrpp+U+7KoGn6rGKQh8JBFCrSkquqHWoEtV3Kp7v0KGv7WYVW11ZYBYmS5WBa2CdSuU721BuqprdYEocmmHFE2ti0XdNl17FNcWbVWxrGBapUQYr4+41zvC6xysujz+qaUdbxaxtz/ER0texMrqKI5jxuGYLM1Ik4yG26DTbpcFzqjRwUajQRhJInWcJjSbTTFMLJ+3X7K2t7e3AYkty/OcFy9e4HkezWaTXreDbkghsewGjis7vDzP6XTb9Pt93rl7W1A+U8O0DOIkJAgzFssFDdtE1w0uL6/qDhsgjhMaDZX12ifLcnRdkYwBRSkJoXaJuEmWqG4YLFdLrq7GpElGUagsV574/GsapmWTphlLz2Pti3ZTT0UMbJsahm6VnDgVU9ewTZ00EkscuaZeE53dZgNd1/AjH1VTqTIjBS3ulwTUVU2XKcqDpNFoiLYT2UnN5yJa39ra4sXLQ168eMHLly/pdLp0Ol3u3LlFnhWcnJwwnU64f/8+liVeYs1Gm8AXoGI6W3A5nrK3t0uv1+PJkyc4jsOzZ8/qgjYcDnn58iUAt25JtNyjR4/Z3Byyu/tOaXYAlmUzt5YlzUMaAjEc8Es0UezOz87OaLVaLOYLtja2xKa6PGiqDrO6HqvDyTRNXr58ycnJSYkmB4xGIxRFYWdnh7PTE/z1mg/ff4BlWRwdvazBKacpXWZCwfGrM+aLBapi0O106vsvyXPSPMcoCeJCMteJkpTZYsnu7j7j8SXT2YxXr06wLQvXbRAEQa3LBpmEquQqx3HqOtNsNuv7t56wNFXUDeXKyLZtQf3LVU0YhowGYlmUlgf2dxazStx93bHi+u6qWiJWkWkVpFsVPVV/0+Wi6tquF5Lqe73tvAGv052qjkLGxwQlfzNFvSqo9QiriotnWI6af6qQwWth+9vFLAh8dFOrT9HKU51MnnPTbcrvWnY2zUYLhde/41mJbF5eXfHeg/cIw5C9vX0CX0AU2xXi4O7uLt1ul1//7a/rN3Jvb4/VcsGjR48YDof1WG/bNufn50ynUz766CPyPOfx48d0u10ajQZff/01vu/T6bTLPZZQGuIyks6yrLqDqDru9XpdL2GHwyHL5bLkqnVotVooccx8Nmc2W4jhom4QRTHL5QrdMGihEMcJa29NnCTYtoNpmDiWiakrWIaObVslEVfoFFEYoZaGlyBFtNVqoqjiXqrpmjjZpvJ6rL01RVHUO8kgCBmPr0rAQq6BVquFoZulU/AphmkQBGGdhSm7LimicRwSRQmu64jQPsvodNosl0tWK4+NjU2uri5ptxpkWcpnn33G+++/j+M44pHf77NcLlEUhcFgUJsxuq5bG4ROpzM+/fS3QIHnhRwc9DF0i9XKI8+L2iDh9PSMR48e0e93yQtxVW00GrUR5uamIJ8vXryoC1mWZSwWizLH8/UqyLbtGrhzXZf5fE5WCPcrjKJ6TyWNiGRUrn0fTU1xy+vx+PwMQ7cYjYZkSUr+xmJbpIzS9Uss3Gq14t6dd7i8vOThw4eYpslwMCj3hhJR6LpujWK6rstsNuPFixcEpSHk5uYmnucxn8/rIBellNslWVqrVVzXKW3JBVCbz+dVsXizdr19s1coxHVaxHVqhuM4tFqt+g2sPhRFkMCk9oR/3bG9XcCqz30boKBqrxm91f8rRZE3xlqFN9PTZbz9p8RU/9SjELM4Ja/Tn6pR0zZkdGg4zXqpGccxnR2JDYvjWKD6xZJer4dhyTg0Gm3IBbZY4DgOBQrD4QjLsiVrMs04Pj7ho48+4uLiAsuyuH//Pq1Wq/apv3fvHmma1tkFk8mE+XxOt9vl7OysTq1er9ekUUSn3aoT37e3t1EUhaurK0zTJI7jNyQkjuNwdnZW2z7bjks6XxLHMcvlUjSPKKiqRpbmRFEs6ntFQ9EKUFQURcOybVrtFm3HRFdSdE3BdmRVYJomlm3jOpLoVECdiG7awvSXZO2CLI9ptYXi8vLFS8bjMUVRlJIdmEymgp6W11yz0ayX2yCdz2RwiUaIAAAgAElEQVQ6BQoGpW+WdO4iHVoslhiGSZKkXF1dlQvomMVixXC4IbZJRUKz2QHg6OiIjY0N3nvvPU5OJA9TUOLLOuz6iy++KAGbAZdX52QrmV6CQA4NVdHLrkqpd2a2bcnrbZs0W81akpTECa7l1rQQ0zTr9626J2ezGZUapRq3KpVC9b5++vvf0esKN/HVq1fcvXuX9957r+bKXYyvGI/nGI5Flgm1I4mFaD3o9dF0MW2Qe6xqMPLXmQ2FwmQ+Q7dMzEwCqaMoQFFV3n33XU5PT+uDqEpBHwwGTKfTev9XWeM7jiOJ6es1l+MrNE1FQ2yiGg23bpwqJUHFXrCuraLgO3Zm122p4fW4VX0uLxm/1wm2FTk0LUMqro+GaqnvpPgmA/86wgilNvMttLQizVa7tAqAuP58hFeVCcDwJ8bM+ufx+ue+/hCybBQLc7qCgm3bJksy6R5UXdw8yrZ3tRL+U3WKpHGCZVls9LqsViscx+HJkyeomsZwOGS0JVD01dXVG6dSUYgZ42qx4IPvvV+TA2/evAnAy5cvy1SgpdinNKSdPz4+ptVq0e12ef78KaGi0mw49V6h4kFVXUzFKq86sjAMOTk5YTwe02q1yPOcxUKsiyrTRdHLiSoiv/Z+VSCNpolDg9ABbAwlRSWn0XQxTatM4jZQVa2MkIN2u0NeQBhFuI0G7U6HrChYziM8z8NxHDY2Ntjc3JRl7/kFeS4HZLUAdxxHbIjMcjWSpjTbLfb29piVFuXV5OA4Lru7u7hug9PTM6JICs2rV8d0Oj0cxyHLCgaDES8Pn5KmCXt7e3VS+f379+vCcXFxgaqq9Pv9ev9YBZqMhkMsW1QdcTwTXa2ilyCTXiOft2/f4uTkWG7Q8tA5Pj5mvV7XwvIsyxiNRrUsrcqQLIqC/f19ZrMZrVaLs7MzxuMxBwcHdHo9PM+j3W7X13Wn08F1ZV+q6zrz+VyKpGEwmc5IYpnCdMckzmRHVpCVBg4G4hQj/w4CkimqUkccvnP3HVzH5Xe/e8ZwOBS0v6wRYRiyWq3qMOn1es1oNKoLVLVqsG275poZpoGey/tbUWDCMBSDUNelyPJ6N3y9Tn2jmDmOU3dV11HF6t+jMlqs2ldVJ2KFMBiWSZbnZGVXppWFTKkIqdeoFeVdUf+pAHmWf6PAVYWmombkJSVd01/vEF6rFd7M3HxDoH7tH8U3Clkh9IOSqFoBENUeLfJFKJ7EQn6sjBcvLi5E5qPrbG9vM7dslosFqq4xn89rs8mDgwNM06TT6fD06VNarRYPHz7k/OKc0cYGv/rVr8rAXVENnJ6dYVsW8/mc07Oz8o1tsVouOTo6qgm9o9EIz/OYTCZiCR74NWdIqBdrOp12TTytrJQloTytgY7RaFRbF/t+SJQkKKp0XEaek6QZRQG2Jp9TNXEpUVQRnzcaLo2GS7NpYyoxGgXdXhvTkqTrJJNoMVUzATmU4iQRo8woQdF0cSnQ5HnOZjOGgyE7OzuMx2NOT8/qQ6PT6UjknWXSbDRJU7m4V56H5YhjR14U9cUuYEiCqorDrqDkcv3OZjP29m8QRTEvXx4yHHbLHaJBmmbs7u6yXC755JNP+Iu/+It6NK8S38fjMaPRqERXl9w4uM3p6avydfRJkhTXaWKaBooiB97p6SlZJkBFqyW6xDSTg7O6Fit1hqqKB11lB1R9br1e19fo5uYmiqJweXnJq5MT9vf32d/f5/zslPV6zc7tWyRJwtXVBaPRBuPppaQq9fuEWcLVekZa5DRbHZyGy3Iyk1Sl0uZdrcxWDYNCgcVqKSNx+fP9MCQoUf8sy+pdYtUxV7rMqitdLBZ1Iev2eqRJUrr0itLBcGz0kvPmOE4tl7J0U6ZC25HDq2yovrOY2WWcVMUgr614yhdY03SS5HXIb/ULy6mZvfG9FF4TWxVFeb2fv/b3qk9Sys7tOvO/+lPTNMjlv2V5Xo4jGaqio+gamqKgll7r8hwK5M7IUFApyBAXjBwKQVVRFCmQKKXThyqSp7xA1010QydNc2bzpVAJLJv+aIM8Scv8vqQ0qVRptQTVtCyDvCi4HF8xOz0pn0/Bvbt3efDB+2i6zsnpCYqi8Mev/8hsNuXg4IB//OQTVEUhtmwCQ2PprclyaHf+X8rebEeSLM3v+9lu5ua7xx6ZlVlZXT3NqVkAQZwBySH0ABKEecDhQ/BGNwIl8EIAwSHV4uzd1V1VucXqu9u+8uI754RnVFYNZUBWVkb4ambnO9/yX2Z8vLnj5uaWf/Ov/w0//PADq+UK24ZoErFer80QxrIs8QM4JBz2a+ljui7T2RwviMiyhPc3HxlEEbbrsFwLgLZqaqbTCW3bsd5tqNuOvpNz7yvKkG1bNI1sXH7gK8s0T9QqnBjLhjAMRJ008vFsOafxcIgfBjR9R50XtH0rmRoWTVth2eB4NkWVUdUFXd8JWyKUHtP9/QN5UeG5HpPJlM1mS9O0XF5c4fkBRZ5T19LTdD1f0c88LHqiQJyzbMchV8OBJBFX8mgwwHNrpfgh9oiWbXN3f8cgDqT/57tidAwq6Iz4m//23zg/P6VtxWUqCFyKMuHDxxTLfknbNvSdmIKEQcR4rD0lZaiSpgdR9zhdUFWFKaOSNOH27pbZbAI9PD4uDUawbVseHh8J1DoUrwjpi8VKgSIIAk5OT3m4v8fqe/aHHYfdlsD3sXtxQKqHQ1zXp0cMRvaHlKqqCfwB46nwjKuyZDgYEISemPpgiQ8sStShkwqpaSUuvLx+Q1HkfPf77xnEES9efMF2uyVL98xnAgq/u783QPc4jrl6cc3dwz3bnQiTXpyf40ch9W5Lkmd0Fgy8EV7vEfkBvuNStRU0HbgQqH5dWRTkWSb97J8KZlXV0DQdrvvEcteNR2meejiOh227JmsSmgvYtktVNgqsqgx6W0UKVz2vqqg/MUTp2yOUPQJa1WltXbcqoNl0dDTKNclRllRt19NWjSKIqxLIUrJFvfR5rN4CXEkAu05pp3V0KrxZtvMkNtj1dHUPnfYacHFdX01hbYWMH/Hh9pbZZMZiNmcym1MXNcN4xHw+ZZceuHrzhRjw/t3fcbpY8Ks/+UOavuM//af/B9/zWa1WrJcrmXYul0zHY/b7HfEgAsclzUsG0YD/+uu/YbcTUOaHmxt2+z07hThvmp48E+zOfr/nZD5j+fhIWda0Xc94PCTLMm4fHnl/c0sYSlBarj+SJok4jCtg7Wq7pOskW/Udm7ZvaauazraIY8F8OZaI6oWK59c0Fa7TiZ2YbRMPI6qyJMtzLi/OZHDStnRNS9P3xOMRWZ6z2iyJB0OiaKB6nw1R4DAYxKxWa7BsbMejaTvKujE4oq7rSNKMIIgYTSYMhyMDYF2ttyY7nk/nWFbHw+MD6SEhGkRMplNGccxwPGYwaPnw8ZbNbkdRVVR1zd/+w9/z1Vdf8cWbV2z3OyZxjEWHY7kMwoCT2ZyHh3viYUQU+CxmY/Lcxfd6zk8nMgFvUsqy4O0P31JVHXXZ0FbShmnKir7HEK21rNTp6SltXdNWNYMgxLM9vMDDCwSeVKrMI1SqEVdXV3hBQKc2iqIsWakNTdADco1cegLXIXAdVsoHtlQ4uM6yKWpxn9/vE9pe+lBxPCQvStIkJfA9yiolr0pcR7J41xMHqKoQgOt4OEFYIxbz+Ql1XbLdJ7h+wMXFpcFoBmp6uVqtuPvhB95+lE2+tyxa4GG9BqBuG2Ug3FEVQp6P4xirt/BdcRKrq5qd4hbreKHL588GM50ZaXCs7ufYtm0wYPo4hkjo5z6ffuob0bbtT/pon6D7+/6Txx4TxI9fV1NBDJVJB8S+p1GqGJ5taYtFkcg9rjNtW3YXy1FZo7oJVGlqYROEoZS76pn6xNdtS1nXrNYbhsMRcTxivdrQNz1fvn7N+dk5++Qg2lxtx3qz4Q+/+YYX19fc3N1RFAUfbm7wHZcw9NVIuqAoMzGIdW0pwR1H+lfjMY+rJdvNhldfvOS3335LXVU4ts1uu8X3XKpaYCR+4LLdbdlsNxRFRRCGuJ4E+8elKGOEkc9wOCDPM4pKsiDoKNUUzrZtptMpjivZbRCKI1UQ+gpN4+EHLlEUqmxQppVCtfFxPYf9vqWqatq+p6pbXF9Ay7YrgoF2WeG5vrqOgqPSbt3TaU3fC/d1vztQ163hE/a9paZ1teKVluR5yePDUkEdTonjIX3fkxcFVt8SK05gWVXc3t7S9TDYbpnPF8L5DAOiYczJySltJ9n05fUl/7Td8PXXX7PfbaVH2NXsdlvGkxFZlvL27fe8+eoNVRXTtjXn52ccEnGAenF9SV5U3N0tCYPY9IGenI18ZYbccXl5aVgFMizIVa9wQBBFpi+q+0aC4YrMOtAOV5ZlqYGCvFfX1Mapvu077u5uaBrpP2WlcFh7pSpTty1t29Mh10PWac8g9Akcm6aJDLNgNBqZPux2uzXT1CRJ2O12AAbloCfnemhhaJFFgds0pHmmlGYiM6mNbFGUPWx3REHIy+trFmo6+vDwQHo4yGaQJMb67nlv/WfdmfRF0MFE986ewymOoQ1GXVYdx89//nh4QvzrQ/9bP0//7ng62ve9yeyeB8S2/VTnTL/f8+P5T4Qy1eN4Sqa7640UkGVBqwKpLCahzwwCWdjr9ZrQC3BcyTCcxmUymvD6i9dcXl3wN3/zN9x8eM/pfEaeZbiOzenZHOgJfdl1XAuKqqZS6hD/7f/7f3EdlzDwefnyBR/fv2ezWTObTtinCX0vyp9d0xD4gcALkgOu59M0FY+PdwgNSQyNy1LMH6IoZDgcSUmkJLE9TzB6k8kYqwPXsfG9gMEgNPAX13MYDhXXsGvJstRcJz2xHA1FdYVeJpxt0yNVnKMc4kUiyPdtoihmNBpSFJVyJaoJgpA8LyiKCsd21VCpoWlaw/LQCxksRcRHUZSuAIumLlitH7m/f8DCZnFywi9/+QfMFws22y2DeMiJqhb+4Z/+Ueg9sbg1/dm//DP+xa9+RZal9H3HarWkroU29fr1K9Uf7phMxpRVbmwCy7KkArregt7GsV0DBNXEch0UbNvm9vb2qG0jPcDD4UDbtiwWJ/zuu++MvJRu5AMmsdAIA61TpgNJ3/e4tkXfNYi8tQOWY0RLNQQoiqTnFBaC77IcGWC4TottyWDL1T64YHqsGgKi4T56HUorQuSHhsMhnpra6hiSpim1GqjN53O8/U5EJzsx9WnblpubG+q6ZjGd8fj4yL1ys1qtVlRVZQxXRqOR+Vme5z/fM9MBTJ9AfRMdL/rjoPQ8mD3Pyo6DzecOvSD06xgu5LPX+LnnPf/58eewnwcy81meQLfyRPl5WVUylLDFA0AmpLK7OI7DaDjm7u5eSrtXc4IwYLPZUGYli9M5wWhAkif8xV/8BZZl8Xd//7d4nsdXb95QVgXxIKSqCk5PLsmyjMuLc3744XuwWixLJsO+79FZ4IcuX335JU1bMRgG7A89YeRhWRFVJQDctm2wrI4gdJi5IxwvoKwq2rKh7WqlhfVknGzbFp7vUlctZdUQBgFRNJUFWRREYcBkMiSKQoIgpO87mqZWSg0DhsOB4vxFBpogk6sGsIjjEX3fEYYeXSdDA8uyqapa2aq12LZI9IShaJRJcG05qD6ObTlG/6ooSmVj97SgBFIyIAyECL5cLo28jg7A52fn2LZDmmXc3NyQ5zleENJ3PR8+vOf+/s7g7hxXBEmXyyW/+oOvuf/4gel0zHJ5T1nlTGcTMyh5+fIF+/0WrJ7z83PevfuBtoXxaEaSpIShZFQabH1cWuqFrzOpNE3Z7/doVQ7btqmbmrOzMzUNrcxmoXvZbdsaJQqd0ekFHUURw8mYMkv58P4eyxKOtZ6Oaoqc+Hz3NJ2sF2wBuGZphu95lGWOq9oAeuinBTzFeEU4km0rE3490dUZflMJNUwHPzHBll7l5eUl8UgyzbTIWa1WJEmiBBFCFtMZi8WCUuHjtJ6bBuE+KeT0TxhQdfzY0KR7InofB6KfC0jPs7SfC0QmgzOZ1KfSPseQi+PM7Djb0s87/qz6T6dwbvpx/bPP0ret9M14wrDJLyTQ1cooV2c1VVXTW5hRcJ4Xoo4ZSX2fJinj0QjflcljuXrgf/qX/zMXF1f85//8nyjzgi9eviDPM7quwYkCBnHAdDLEtjoct8fzLYIGGUpYDk3X8cfffM12u+HkZMpvf/Mb4njA+fkpi/lU+HnbLfQ9eZ5R1SWT0Ywei6JqcDKb+WwiEj5RRFWLzdh8PjXO2s74Sb7YDzzSVEC2fuASxwJU9DxfTaxtQ3JOksTAQ8bjsYGQCAcXbNuhrnvFf7WwLAfP9bDtjsDvqYOWsqi4u31gu9kTDSICP6LtWvJsR9f2+KF71E5QN6rCpbVtx93dHbPZnHggsjir1YqHhwdOT0/xfCmvv3j9mjiO+fbbb7m5ueX27p6z8wvyouD9h/d89/33XL94ied55FlGPBgwm06YTybYXUNy2DGbTxnWA8PN1LJA6/WKs7NT1PiVIAiZTRe4bkDfWYxGnQmUMgDwFORFpnh6kqzLzLqu+fLLL/E8j1//+tdMZnMDpdHl3W63M25fWkjRtm1Tikqg9IkHA3olBtA0lfrMAa7bP4HAi9J8Lh0Um6qkrRs838N3beJBbIC6GqmgierHlZbOEvXUOMsybDojISbO7cJftRSwPssyrq6uSPKM7777js1mw3w+ZTKZkOc5L66ucRQMazKZ4DiO0oRbGmkiPe39WTrTsSiiDiA6CzoONMfB6XPg18+Wds8yuOOS9rhM1YHpc8+Hp5JTZ1XPy179s77vfyQG2bXiyuRYT+wEyxJjFN0j07i4rutou17co9TPHdtT4+EBgyAiU/xB27awLZeTxZQgCPjrv/5rNpsN08mU29sbwshncTJnv9swnY7p25po4HE4bLm6uiA57EjSjM7yWO/E3Xw4DCnLlFevX5ClKWfnZ/RtK9mH0+PYNus1JElLELrUTcsinhAEHo7r4bjC62vblvF4RBwPSNMDo9FIqTxslGKD/JnNpgS+i+vYBKEr4MXOwg9cgiAkCHyyLP/kmgV+iO04DAY+nuuTJClJkZmsoGtRDtkuYq/ZQW8zHI5pmp7tRuzmLAt83zcEZMlQBcMmFKxWqbf0ijgPtmICjEYj9X5S1iVpYqALZ2dnDIcjVusNXS/0oOvra0rV/Bf1DcnMxuORTFOjC/7LX78jCEQ7H+Dk5ISzszNFXevw/ZDd9kDgD2gbKePHoxm3t7dm4WtsVRzHZhKpndzX6zUvXrzgm2++MUFOB5j3798bSWmN7NcYrMlkYhazzrQGAxFFyLKcIi8oC2GbNG1LWVYsFh0TJVMkRHope9veUhuFZlSA59gyhfZ902LQYhOistF9gvXU5PfjhGc8GZt+mua0dl3H7f29aPXtRKvPDXzjOarpTYMgVEbEar0peMf+cKDIc9O37yXl/2Tdf3YAcBwo9BM1OFZ/8OcB63MB5fgxnytFnwcek039TFb3vCw9fo3nZednS109XOB5D8/G6ntc5brd1LUKboqOo0QaXUeaoPRit1WkGYckYRyPuLy85Pz6nL//+3/E6jt+9ctf8k//9E9st2v+9b/6c3o6XKsjigJ2u4x4GBL3PtPJhHuroqPB8Qa0QJ5mvHz5gjQ9cH5+wfLxkSgKKbIMz7E5WUjPrcgz4kGI74uqxHgyw098iqIiDH1OT09wlQ1f1zUMRwOiQSD8U6s3ZrwgwoeDyKNrxfsSBL4iN6wYsMZD8UPUChau60s25QmVRkT5RO7IshwF6egU8LEkSVJs2+Hk5Mw0/MuyJstSPE+I5JWyD3Mc6duUZaVIyDmz2Zwvv/yStpFFpcUKbduhKHIOyR7bdWh72O4P2LZDVlSiydY0lFXJ5dUV52dn3NzcEA8igiBiv99zd/MRi4avvnzFYBAaJRRBqYcmw5JJ6p79XiPwG1UKZ8rrYGUWuwgLjozIZxzHvHjxgiiKOD8/Z7EQiSgtD3SsjqGzGE1uPz095fr62mRK+/0ewJSCepiw2e/YJanJvHrbEteytsdzPZXhtrRFQVXU1JZFXeZYfWuyT51t6cxNl7K6hE7T1KjHHicnIPAuzSDRKrLL5ZIPqi82GMZ8/PiRwWjIdDo1JWoURfzql3/Adr2hLEs2mw2b3U4CdZ4r4YMd4/FY2RdC/3PBTJ8crZihpxE6mB2LoT2fNgI/KuuegsWPe2rHfz4XCJ+/BvBJkH0+BGjbFseyPt/cP+rDWdYT4d2UqDwpzbZtS9M2YIsGWBAE+KrJuVlvGI8nxOGTLv9sOmU+F5ec//Jf/iu/+IOveXF1xW+//Ufm8zlff/2G3W7P1fU5oe+w2TzKxND3OT87ZX/YUlXiBo3dEIQew/iEOB6wWEz58OEDk/GENE0YRGJ1P5mMyIucJNlzsjgBBTYpypqmjQjDyFCamqah61uCwOfVq1cARpFjsVh8QofxXMizvRmmaOQ/SAM6jgOi4YCqahQeKyBRva6y3Io/gHK10vdMWVYms9JClm3bqobwgtlszt3dnZKm3tE0Rz6L3ZPptEaVB0FAT29KNEA+u+eLaGYoFJe7uwems5mZ+GVZRt3UFHkh1n6Whe3YPC7v+fL1lwD89p9+w/rhjslkbFogssnCzc2N+Rzff/+9Mu8tCfyQOI55uF8yHo9YLE4oy8IoqWjtNU1LevfuHa9fv8bzPMNV1IyOtm1NJjOZTAx1rWkaxuOx6Z8d66slSSKeqr7PfD7H94Wu5So5dltNyNumI4xCFosFWfaU7eogKq0Uuda6d30slKqny9pJ6Tj4anC9VjbW6AVNDtctjel0Sl4WRkF4tVopjuyEs7MzZicL422gM8LRaMTJ6Qnr1Zr9fm+03J5PND/bM7Nt25DNdbajd4rn0At9oxnepP2pg7kpzxQd6niw8Jz2ZFkioqh//7kAeJwZfnZC+lwGyLI+GQLohWkfDQDkfaXBWJWF4Ov8mKZtaZqWLM9JVekwmy7kxHkuNx8/slqt+Fd//uf86pf/gr/6d/+Ok/MTyqxgvVoxm0ypqoqyKAl9nzovKIqU8XCETU9dVfzw/fckyZ7xeMRoNGG5TohDsTrzHBcLi9FwyHw6YTQY4CpKh5ZzCb1AOHSuKz6MfcduuxFA4tkJruuSZQ2O4+HYDqEnU6ZxPMa1XCI/Yjqa4loutL3yChQNMOHOaYxUgUBaZMJYKwhKXTVKQmhP1/VcX7/g6uoFh724/+hs+vHxkXfv3klJ4Ufc3z1iWZYZ+4fBgG7UczjssW2bxeIEx3FYLlfAUzDyPFckrHv48OHGcA13uy1xPCIexri+R1lVDMcTsKCoStNsf7i/J/DviKKIy8srgsBnNpkyiELieEBTC7n7q6++4uzsnNFoomARA6GlqSwljoe4jo/rCNQnSTIOh5Q4HuA4tsrghlxeXppJ3eXlJW/fvjX4Ta2NnyQJv/jFL0jTVEjtD088Wr2oh8Mhs9mM5XLJfi/naLvdmtIzTVPFLGjpLRiNxwILqRvyvCAII8q6ous7PNejyHKiMMB1BJz75svXnJ6esnxc8eHDjSln9aZ3dnbGZrPhH//xN5ydnXwiBKEHG5onqWlHeloL4pEQKnaLF0i/r2r1kKcwqIDb21u+++47bm9vRRhyJoORKIpYrla0fSfPb1u8ZzHgs2Xm87+fB7Cf6lHBE1FdBzj95TR/Tf9el4XPM7Tn08fj9/upUvRzA4ifOhrFMdPW9YadoN5rOpvRqilJ3QpifRBFeGpMbSFNzLc/vGUQhPzlX/4lbV3zV3/1V7x48QLLtSnKgjR1se2eru3Ii5y0qaiqnGEcCUZqKdObq8tzLi8uSbOE3/z2W5reIRyMKAtxgxqPR0xGM87Pr6Dv2G7WNI2Y1U4mM+lZWZZIZucFtu1wfnbGerPhd7/7ndGur+ua8/Nz5RCdGz5jVdY0dUuayE5tOz2eZ9O2vSJGi258GGhC9l7JvMiuOZvO0UCXwWBAmmZsN3uTPeiJV9uKT6emsWjt9jzPjRBhGAZcX19j21DXlbLpEzu5LBNPRw2STFPRyhqPx5yenjKdziirmoflirqpcZWaxcPDiq5t2GzXJlNxHYFOaN9K13UYj8WEVqoPn/V6wxdfvGK73bHb7SlLMabVKhOHQ8Juu8dxHPb7gzkHlmqy73Y7FouF6TsNh0POzs54eHig6zrevXtnsGPag1PTq3Qf8P5epubn5+ecnJyY3pKW/9GaZ7pUC8MQz/dZrpaEoSi9bhT39+T0FKvrWK3XbNYbXry4VpJJgVJ8DZlOp7x9+5ZXr16S56VRpe06UfWYTqdqgvvOrG9NVRJfTVlPh8PB/D6OY4UnzNgniXhlKGOSuqmN18Hp6Snn5+cMh0K6185YOl4cJ1bb7ZY4jjk5PVFViRw/G8w0ePZzTf/PPc+yLAOsO86ejmvr458fP04//1jm53Pl4efgGM8+iOF79r3MLI+L3rZpsBXr/jhb7BWlqigK+k5czD0VOLM8JwLB0LgB7969w7Ec/vf/9X8jCiP+j//wH+h78Hyfqq1ID3u6tiYMAxwg2e3JsgODOCI5+Pi+mHhYvU1VttiWT1v1tHWP5wcEXoRt2ez3O+pCJlKuLQT4KAikHO1EzcMficR00ia4jk/dNux3exzLZrY4AeQ7RMMB++0Bz/FJEunBjOIxWVaIS3fTU6QVw8mAMAgp8hrb8sWNKq1EGjuI2Gz25HlNlmZsNhuauicMI4qixPcDWsUhBJTsjChVpGlKEARGqcNAYtS1125LQeBTqUxKShvfOPfoiWpZliSJLJgnOeiczXbH/XJF2/XCPZzJ9FbDDsLAJwhCuraFvqOpnsrUvSubVOD5RnTwcEhwHZc0yUWp13Wht3EdH98TWaXhMAaEyucHLm0rxs+WJXLXWkpKQz/0/X11dcXr169pmoZ3796xXq5WWMEAACAASURBVK+Va9E5//d//I+GLtj3PTuluKKhGJvNRiAOccx+v2e1WpmBg8AVSoLIBtvFdjwcLyAaDBnF0nqg75jOpnhqsxHRS5/ssOd0vuD2/kH1RG3Tz9IuWePx2ExjAQP90BPVvu/ZrpfmuhxfW1tVN30v6jSD0dBUbnme8/btW96/f89wOKS3LequpUVKXTfwmcxneGEgrlmNT9N1gov/qWB2HFiO4Rn67+eZjD70z7VG/zHAFX4M+Th+H/1exxiSY6zb/59DmvM/LkOPP4dMOT/Fqdm2EmdUO4HrC3UHLFOmVFUFvc0f/dEf8ctf/JLf//737Lc7zs7O+PjhhuXygflihmVLgHQdB8fWpa0AUfMsw3NHjMcTETfcZTR1j9XbLGZnZGWL74krUVGUuF5A4PvI+rNx3YA8L2nqgr6zcRyboqjoe5vZdMo+2WNZO0ajMRcX5xQKGNn3Pff3j9i2y253YDSUsf9hL0YSURQzHMbM5jOquiBNV1hWhYVHWcpGNJ2OCPw9+/1e9Vo6DoeEJElN/8q2Rabb20jm4LhSvk+mY2bTGU3bUFe1gFFdF9sG3/cYT0ZEYYRtO+Zm1xlBGEqzXA+i1us1h32iZJSeTIyLqqbIc1w/UI/bKMBtiW07Zqo7Ho2x6Al8aVlkaULbtsymM8KTAR/ef2TbJ/zw/Qcl4CiAVN3Q1hLQ+/2O4WhAPIxYrVaiJJzIUODFixe8f/+ew+HAq1evDHxkNptxc3PDDz/8YOAVd3d3VFWlnJREJlwT2H3f5x/+4R+YTCa8efPGKHZcXV0RBAG///3veXx8pK5rPN/H833OVDbddQ2j0cioTkSB9Kx8z+X25pbxcMj9/T2jkSj9vn37jn/7b/8Xbu8fjVS5BiZrrujbt2+NyIJOVHSSoZMfDbbVQxutjKGJ9FqTbXyExxNlE8lSf/jhBw6HgxmEwJPKdaMUbruu4+HhwbAP4Gcys2MC83FgeR7Mjh9v2zaVEVl7yrT0c/SHOX4f/bf+wloC6OeGCMfB6UdZ4rNA9jy7M4C7uqFrn2p+y3ENSruqKtq+Uxr0oTl5XdexWJzyzTff8P3vvhdlC99ntVziez5JknB+cUqgsDq+K/AGz/OIwojxcEQ/iGX20EPfdYThgJPZKXVdkxc9bV/guwMC16dvLBqrx6Yn6wtsy8JGsrqu62jqnrqsKPKKvnWIoiHRIObkVECXj48rfF/I0pvNFtfxCIMB60ZKp7pu2W53ylnqhMXijI6OJCmxrYBkX7LdpJKRej5N0zMaTQGbMBwwmy1MpgA9222BZSkVYsfC7oQYpjFQfuDSZBV1U+IHvhJJrMmLjL5vGcShKndGxn4sz2Vz0TSZphHk+26/ZzSU66rvqUE0IAhTHN9nOBrJglDSPF3bMp7NCIKQ4TCmLCTbqDVI2oLtZo2zT2lbm8X8jPV6S5YVTKdzbm8/qlaJLK66FqK8ZWEywDgek2VPi1cHK33/V1WF7/ssFgs+fvxoBAw1dTDPc7a7HX4gYpw6s9Glqh7AHQ4HkiQxGd9mszHBoO07ilL6Zn0vUCRozDRWP18+ew+IdFKetTR1iec5vHnzJUVRcTgcqKrK9MR0kNLJh/5susLRa304CD+p7PS60yVpmmdGF+44AGp4hu+LzLkeSmh1Hj1YOD8/NwPKn3Vnet4re55ZHQeWzzXgP2dF97ycPH59/Tx9Uug/xZ3pQ5+8Yxzc5zK349f93O81n1OXOVroTb9urcwdsC3SPGezWROosfh0OuXliy/49//+33NxesF8Pud3v/ktruMwXoxZzGeCik8qbKAqK/xAxutFluKoYNm1GmgKXWfRtRZFXtO2Fo4tk6imalgtt1KaRdLkHwwiZlMb3x8gDAZB1PedKH7URcvZxQmHbG+uV1EU+J7PfLagaVoznbQsmzCIGY9E5TVLS1bLDav1mrZrlbTznvV6zZs3Q7xBKAHRdRmPJ+rciS7bfr83/Sc9dZvN5Fzo3kfXtUbiZTgUHqX+f2kYR2bDiCIxVdbXJ1UlbVmWRhZGZI6eWiBN02ArDmdvuwyUC7zGpBVFyWAwIAwDzs8vaJqK929/IM8yLs7PmUzGLJePfPx4BwRcXl5TljUf3n9UOnIpr159QRB4tG1FT6fknC3iOGIwCLm4OEP4nIJne/HiBY7j8Pj4aJD72kVe973yPOfx8ZGu63jz5g0Tx+HDza2ZdNq2bVy/v//+e+NJofFqWthQBzNsh+3+jsXJgrZtpYHv2KZ/t9lsqMqCb775hs3ykevra1zHYbPZ8ObNV9zd3jIcTVku19JTU6YkesKoA6KeWup+2fG614FNwzyO11ZZlkbmSkO9tLbZw8OD8pMdqsFVZvjhWtzRtm0j9jgYDMygAn6izDwOTp/DkRwHqOdBSn+pY6Ct3lX0Dfy5IYMJXpaF1X9KRn+e4enX/ySD6+WPZdvYR5+P5303hfTv+6cGpud5WAhgt6zkJhPDjh7P8424Xdu2/PrXv+bs7IwszTgoK7JRHPPFF1/Q1BVVU1A3pRKvrBjFQ5qq5nBI2e8PnJ2c0dMxjGLquiJPMoq8pK4qRInEpshKdc56JqMpk+mIqqpkzG550Hc4lkVZVFRljW170NtkWUFelNzfPZAcUkajMYXquc2mMw6HRGSwLWE4xFHMIIxFrTbNqWu5+YejEUVeU1diubffp1Kebg/UdcVwFBMEPmUpjliOI5tYHA+U2GRJ29Y4js1wKDfcZDIxWnjymIq+F+9I1xVV0aapyDKZyOopWhwPTVDW91AURZwsFgSB9M+2261kmJFI/ri+q+5XMcA5PT3FYk6g8E+3tzf4nggo7vY7JpMxYeix3Wx5fHzE80acnl4wHI64u7tlPJ4SBqEqL1OqqiDNdJAWAUisDse1OT87l5ZA3ys7P5lWhmFoLAGDIGC330slYtsGkDubzcjynIuLCyxLzE32e1EuHo/HxsW7KApTmpZlabTAxuMxo8mUV69fMRpPyTMBD9dKiLKpBDUfDyJlqqKkomzIsoQoWAhUorfNsEW3fHT/TNOTNNREczC1THvbtkJUPzIsdhzH4OXyPMdW/UnNIgmCwJwnzceM49iIOmqxzpOTEyxLDGIslLnRUb7yz2ZmzzOknzv6XsizlsJsydPEHfxHpO/+KAD9xOT06e+nMvW4/D0OZJb6f8dx6emweuhRWV4vIFesHrvraTvxbNSlgOsKSLBtGuLRmLKpyHZ7wijiF19/xdWLa5bLJX//t3/HH//xn/LDDz+wXm64vrzk6ze/oCoK3n94x9nZCcNhTFXbZJkgq6UxapFlKYfDHsu2yFKF1K4KfCVW2DQtgR8wOz0RhY6iYhAFDOIQR43h0yTh6upK6YG1HPbS65GdzKFpOtarLZblUlUNy+WayXiKY7s8PDxiWarkjWKSJOGQSIM7z3NTRgwGA3xfkPwSOAZs1hsFxRAxynATMBwOaDstkS60pcl4xng8VSBdW6HXHS4vr5jNZtzfP/Dx4wceHh5VRuZRVZk4aTetKmc9s3B6xSHUJhh6Z2+axlj/7XZ7JV5YMw2njCYTcG3Eg6MlDH2uLs85XSx4XD7QNg3r9YqReq/xeIzrCU0NWxyvttucw2FPGGoD4oKTkwU3tx8py0KVRg2OK9ctTRMO+5SH+yVROGS9Fpma9XpjFnjf96RpxnQ6Ybc/YNk2g9GIrmvZ7vYkecZ2fzCepo7jcH19Dcj0brEQN++3b98az0hNdeq6TriuqueW5blRroiigEEY4nkucSzBoyoLfvOb31DlBVVVEIURvh9S1jUX51e8+/CB0UTwee/evaOqKmazmcG2aTHK6XRqICSHw8FknEUhfMrD4UDTdVRNQ6KyrI6egeLX9r0ocGhf3iAQ5ECRZgyjAbRSfrdVjd2Dg4WDxZ9880cmMB5XjP9D08znvbHjQPKcduTaTzehEWO0bZxnWVXXi1CihSWCiS00itPVWR12b6FdiQTPJjAHz7F+pKwhfTZoe+G7tRxnjqhsTB4zHI5Zr9d0TcMgHoHlkKm+zEgRnuum5c0vvuYP//APWa/X/If/8/8C4M//7M/5/rvvsLF49eJayp3DFsuymMwnuK5DmiVk2Z6u6aBvWW+WdF1DEPoMRxdM5xOwW1bLNZZt4QYOeZURBCH+wOX+/obzizO82GM0jPBcC2hJDnsc2+HlixfUdc3NzQ1d1/HFF68kY8gysiwlygIGcURZSX/lYbkyZYnreCRJaqaN2e1HKXVKMXFxHIvFYmY4mfvdhsfHR+nZKHBkFAzIsww6VMDp8f2I+eQEGwfsnqLI8TxfKWx0io7UK9DkjLpuqaqSum4Yj6f0vaXEEzMury/ZJSnbg+z0kUK/A8bdfXvYMWgHWK6NH/m8fP0Sy7KMq3hZC+i2mQwo8x3r5S1dk1PkpQSAYcTNzQ1lUXF2dsYXr74kOez5/fdvKaoSP3BI0i37Q4dld9w/3Kk+TcnFxYVB5C+XDySHjNPTU3bblIf7NScnFxz2KcvVkizNCEKf0XCMrVyNlustVdNiuy4np+ek6YGOR/aHhLq5YTwacXNzJ1pgQUCWCe4xywrluxkq2Z2DrIlOwNJxPKSuGm5ubnB9nzQTeeo4EteteCgB5OOHj+Jf2jTMZzNse2rWcVmWZEVBh20yQK1+rHtX8/ncNO11uad7fnpd9/QkeYrlWGRFRtVUWLaF7Tn4TkBaZBS1VC9XV1fGE7brOtaPK7qmJTtk9G1PmVe4tkddVXz3u++5uLigLhtc2yP0pZXzk8FMHz/Vj3r+7x9NDHvrR489LiOf996eH23b0tF/UqICUgbyRDA3TIAeg+a3LDnJvfUpE6HvJROjR2SFr6+JQ7mhtV+k67q8ffuWi4sL/uBXf0gUhvz6179mv99zdXXFIIp4/+4dRZqZi1tVFbZlGZxPVmREnisuz21GVYmRqlZ6kJszoe97JtOx6SsAtH3LPtnhey5lKen2ZvuAZbXKMSnEcVyyfE/TNvQ0FFXBcvnA/nCgLErKMmc0GeKHFwaHpCdwmn6ip0L6vW1HfAkd16JuciyEFiTTug1NUzEYhFhWT9fV5HlC23ZE0VT1yYR6JNSfhMEwIoqkqb1ei7BiWZY8Pj4CYq93cnJiMFJaG2symfDhwwd2+z1BJDJLuh+j75uiKCiKQhr3atCkKUa+75MXGVii7LpcCil7PpuYnlOapixXj5yennJ5cQkIlKisKpbrDUVV4XouWZlzOGxFwcO2cd0pDw93RJEMUxaLhZJNwmj6R1FMVTWkSU4cD2maFt8LFOBTDErEV7RnPJlQNY1Ikzsevhpu9EVBlueUWW5MdE9PT9nv99zc3PDixQtmsxkfPnwwDum6kd73vZGRn4xisSFU5bbnuxR5zoYNdVOLMoaS0NGZsGV1NI0AyBeLBX0vUlQaP6avgeM4JEli1tWxTZwEw5TpbIrl2KZnZvrvTYPVd/zpn/4ph8PBQFI05ens7IxxPOT737+FroeuJw4jGtdjW1bURUmVFwTTmXzmMKKLfyYzOz4+hwN7XhL+6HH8OIj9c8dT6ak4oH1H3z71xCxLaEaWJbLZn2im2dZRdqiC3bPvoF4aC3hxdcXt7S1t23J9cWmcsbuu48WLF+ZCLpdL43kpmUmn3M4jY/axWCywHYeHhwcGgwHX15dEvkueB2w2mJtMI7617pMQoy2SJDGTqPF4zHw+YzSOqOuSouqx3Z5DtsFyG6o2oylbmocS1/XIy4TtbkuW79VUSAYbVSucT7n5auq6UIOCJ12t6XRqvpdtQxAGuK5FmopzUZ7n5HkGVsN0NuD0dEarvv9sLlle15csZqdE0SmHw56myRmOBnSq32VZlqGp2LZtJnlaTkaXs7e3t2w2G66vJdN9XC05pAdF2ZLn6sXTdZ0pcQBDwNbN9KZpWC6Xprk8GMiwoK5bikIc5bebLbc3d0ZH63A4GPT8drslHojNnAhPhlxeXjKfz3l4eABibBspsRXhP4oCLKsnDH2KwqMsC1P+6aa5RsG3CCTI9Z+mirqhro9DkjDwA9MLu76+5uLigrKUrNB1XSaTCZvNhtVqZUClk8mEeBBjObaZ7hZFYShOURQxHY358ssvWT48fiK1BRimQVmWYLsGs6b7YjpgeZ7Hy5cvzRBHb0Zi6FKT5smTMcmRe5LGzTmOw3fffWd8T3VfbrfbsVqt2K03nJ9emiRGU6R00ARRrdW97p91Zzo+fgpp/7mMzODIrJ8WRPzcwOCT36Mys17EET95D1VO8uzzHI++bQscx6KzPiNdJCJOrNdr4jgmDgWEuNvtcByH8/Nzrq6uSNOUd+/eUSgreUHMZ+RZjqf9Do44qsnhqc9xfn5BmR5o2/qTk6ybnWEYGpkTvUC1qJ5w004JApsk2+F6U7BEmz0vEuX5WEmPI4qom4qyyuhp6WkJI9lB67qiKBXJ2IPpbCTeg3HIqIjoe8ksZKopAUIyTZf1egk0tF2B48J8MWY0GrFYLEiSA47bsVic0DQNh8MeP7AJI4e68ciyGtvpaWtR4Oh7TNagNzWdIVqWKEjofo8mLkdRJGV0K5AEDcDWjef7+3txuFKORX3fk+eFaUi3bUuapgZRLx6iMkXt+57T0zOKoqQspMR5eHgwgXW5XLLZbKirmsALzBRPI/O1M5bnecZ1SAcN3QoZDEQDTDfJtRqslvtxfE/JuMPpyQmpYkZous5gMKBpGq7OL0wPSnufalNgrQZyDC7Xvc7BYEBvQRxHlE0tevxKDyxJEsos5+XLl8Z301cQkr7vDX4uyzLysjbfSwdbrZihp4wgGffDw4MhiTuOQ1nVxvNA9wr1OtYBTk8hNRBan5+u63CQjW+g/Bf0Y05PTw0vV2eKOuDq43+4zHw+0TzGe+k/XddJ0LF+rPL6OTjF84BmXuszn0c/0rJt6J4Mhh1l9GrbNh3Ck+usJ600U6qqF2gaARI6rktVVUynU16/fo3jOHz8+NGY5B7jauRzSV8gTRJOT09xXZf1ek2ldszLy0v2+x3pbkvb1eYm0+Nr7TWqKSti2jvh1atXhkBd1w1NKwFrOIyxrClNI+KE3mSoMpxGSesEuK7QRYqiwHXFTcj3J8IxbDTRWGcJBW07JM8LhsOIroNeuUZPpxPVJ6uo6hLLlvOrTWXDyMOyYwZxoOSDYpIk5f7+nrv7G6IowvNtbm7eU5YtQTgwe85qtcK2RYU0SRKDWbq9vWW/3zOfz1WwfDIb6dqe/X5HU0sQubq8pu97fv/735OlOeWoMouNHjEPGU2EvHwhfE+Dv6pbzk7PDfQn8ANeKh0z3RfSzWTBteW4tmtgCN9++y2e53F6emoGE2mamgnfMVlb/ztRtB1d/kZRJEIFrqP4s72UqknCWmng64Cky+sgCLi4uGCz2fDw8MB0OjWO4Bpvpqldeh3VTUPbNQJA9lxDdNcI+10jJHbPkZ4ZR1ZtJntsOxO0nvfHwzBkPB6bDaiuawPT0CyAjp7dfkOlhDt1RiZtlhDf91kuVzRNa7JWLfKp1WvbtjMbmB5OaVpe13VcX19TFIWZ4urjs8Hss+Xjs6D0/PFPQeMJ+3OMCfsfOtRkUw1BzSGQjeP/f6Z4oTMzLHq6T+AY5jXk5Tk/Pxe0tu3w9ddfc319TZZlfPjwgcPhYOSOfUVozzKZtnWN4IToJCU+LqN09rbbbfAdC89zTK9Bnx+d8mu8kS49AbM44jiCvjdj706N94OgNbiatu1kYttJYB4OZXqWZZLtnZ+fKxyWEL+jKFI3hvRDgqDH8wKAI8lhy1zDMAyYzycqc7PUjtkQx4NPpJIHg4jRaIhtWwZh7roOddPj+4EpATSO6PXr18YnVJcvOhOYTqcm+EvWdzBQBsk2G3rlvuU4rrn2bduZjHc4HBLHQ6M/lucF2+0e2265vLzEtm3Tb9OTMy1XvdlsTMBt1SIDmM9FzcP3feOBulOSNDpD0Atf+8jqvqScD5fRaCR6ZrZFWVUUZclytaRHpLt1mXa8zpIkMTQhLRGtTbePOZk6Q3tyK7cp65KqKonHI3W9gydl2l4yneXDo4E36CAspiYxQRCAShB0j1KDYkW12DfBXytY9Ko91Pc9nu9hW7aRKdK6dPo+lPshYjKZmKxby9C3rUA8hoMnnmxVi2+rfqzneRTqvjkcDoYHCj/hAXD89ycB4TMDgOPndV1Hz5PM9vO+2uee+8ljFA9LWv3dJxfYeu6HyY+xbj09XS8DBI4yRMkq5TH7/Z6XL19yfnJK13X89re/JUkSs9NpBoBOYYUHmOBYCpNmO6xWK1zX5eLigtFIXIK22y3j8ZDRYEQQuEbdQ/chtNIBYJraenFruRfPc3F8l7ZzsGwX2xF9sqYRWk/bWQxHEzUir7CbVpx8HAdsBz+IcFxf6YKFqlHe4nkhYThQgn6iCCuIflmIMhVrqGvR6J/NBmZz6ruOQ5IyGtrYtvR5wqBQgNcxriOTs67tuby8xrI9omhocEbD4fATDKIu0RxHTJE1IFOjvrseXE+04ieTiaKsrNSAIcdxPEXf6pUjeEgQROx2B/Jc3LH1onccVy0yi81mp0q+jrdv3zIajdjv9yYjGylhxvVqDV3PcDRgcTJjNhdQ5sePH2naisel9EexOopS7inXdQlCD6yOvneMvLUOsmEYkqTpJ6KN79+9I1Tof9/32W63rNdrZtMpdicAUw2E1WTtIAg+MQnR1YfgIaV8cxsx7dWDkTzPzQbSNxJgAs83JZ8u8XSW5wcBSbYxDX/9PXS/V09ZNSxCb9q6XJUExvrE/8H3QgI/wrE9+s5iNp0T+LIxbPO9KcXFaEZK9V1yMBLdvQW75GCYKrf3dyaj1hvPZ4PZTwW1zyHqfypLe16CPj+eB8Hnv7MAVDlpfq6f94xo/klvDJQn5tPn0H8cJfnzJ3/0xyRJwsePH03/QZN4dYkwGo1obFvG96VYa/mux3q9JvKFcqG1pfROr+WLQSZWelrY9083JmDSYp0mxwpwOxgMWK9XRLYPlktdg4XHZCLZWFU9UhYN45FP11rkWvmzdUjTmr5zGA4nlHmLTUXXOpRFi211jEcBYTDEImG/yzm0OcOhS985+N7AZDyDaMx680hyyBFxRvku+11CU3ew2fP4uOT6+orLyzGHQ8Ld3YMpyaLIoywrIDd9MV2+aI9PHWySJDEORHoxymSxNtrvslu3BmOlF6WGAmg0ued5JkuwLJskSfE8X8DITU1RlGZyJqR1KV8OhwOlMq+dTqbEQ4GdeI6n4BAZL1684OHhwVCLtBS2VgPRyHQJnspCsX9SytUZnG55DAYDXr76gtvbWyLVAJfN7sBqtZOs3Q9MFnx2dkaWZTw+PhqZcj1N1xuO67pGbaJD1E5C1Wo4btD7jmsqGNd1GQ+H7Pd7k/WAmHZrSR59vWYzcXw/5lrqe1r31LSYxDESQQ+Z9FRaQ70+fPjAfD7/kcKObYvdcNO17LfigXB2dmZ6qjp71wbEWhdPHz87ADiGUejgoBf9MXFcj1+lh/UpreFzPbLnQU5/EceR5l//7PHyHPWfZ6XvMRm+R8bTTd/RqNS7V6Xa6ekZ8/mc29tbacwqCZljkKbejbbbLU1dG+ngtm2pVMnW1Y0RHXQch4FKn6Vh7NLUDX3fKA19z6TCDw8PfPvtt5ydnZmLfNxHk92ppW2h6SErSnwvoO16mqYljme4U48sz7Ftl3g05/J6wm67Iwik2UsPwSjAsSJsqyJLawLfoSp7yqIiSUrKoiNJ9hz2JXE84PZGCNJXV5cM4xl11ZJnjcKB1UosUDTc7+7uyLKC+7s1TW0JavtQqt6Vx3q1I1Lf5/Hx0VBtiqIwqO6vv/6aNE3527/9W9pWaFMa5b3b7XBcmZbpDE4rqp6cnHxSmvq+z+XlpTh3392xXC558+Yrri5fcH//IKKJQYTjuBwOGZ4bcHJySp5nKrOQnmXXdxRFTtPWptlelxVFkZNlKSDZxmAQqbI+NmDbtm1UA7smz9Ug4XFDVUn5r+lM0+mUx8dHNbDIRXliMFCO7o3KYGPm8zHbzYbxIDbZqtC5UmazGaPRyIhYvnjxwqDuT05ODNvg/vGB6XRMpYYow6EwKIRkPyZNU5L9gTiOzb2tB2Gz2Yz1eo3v+2w2G2G1qGuTpinz+ZybmxvjTaADlDYNb5oGn0AJMJYmi9Q9NhAAsEYLaB6mHi5st1tp4SgEgB8GjKcT/DDAcmzmiwVZkeP6HnuVuYVh+PPB7HPZ2OdwY8c/P+5dPX/884D4/D2ev+/PHZYt8tbPM8e+72n7jjIraJGdRtNAfN+nb1oeHh54uL+X3bjrzcRKZ1HH4L/j4A0ypbUsi/F0anoHTdOIpJC6oJ7nUOUpto0BHGoDCN0g1dgv3cuwLMs4/9iOw8SbUaQpeVHjDAP63iI5lOb7dK1Nnhckh4QsbUiSA2VZEwZDJpMpDlAUNVlW0DbQtVBXnQo8OVXZ0DaQHHICP6JrwQk86B206XGa7mnbniwr8T2fIBjge5IR7vd70izlw/tbmaA6Aa3V0nWW6vUJl1RPyDRhWluu6ea1vl/2e5EHShJhMwRhRI9lSit9D+qsajgc8s0336DH+R8+fCCKIq6urri5ueHduw8s5qem1CwKAedGUYxtP5G9XdcxmyeqvRGGwraoXe+TcirLMvNZtAJJ0zQsFgtDT1oul6RpSpqlVGVjEPhxHJtJrRv4VHXNer1mOBwKMFk1sXUfMVL31mKxEIyYCmZpmpqyc7Va8eHDB25ubgysR5eLWi5J9+E0Qr/rRD26Kisz3YzVedMcTj1hdryAOI6ZTCb4vs/79+9N9qVZCLr81FmS7/v0KvBHUWR6e1EUGcpSmqbKTFxAzYCRE+r73ohRjtS1L9W/Z7PZJ1Nsfb7W6/UnMeMnfTOPA9fz/z/uRel/m1S1/7ERyc/14fTPJdP75zFpLsCOkgAAIABJREFUtsKc/ahfdjSmtr2nCYuWMdlu1uy2O+MH6NoOkQKzWpZlcCzHCiGfBG0+Bf7qrMU5kimxbUf4k7ZcVM1H06m4BhZqIwrALBYAzw8pihsqVdJ0rSSjWZapaZw08suyIjnkuK5M3+T8Q1FUeLYlzuJZRt9bdJ2oxEpZlSrLt1Z9V3Gh7zrY7Q7YjkVZ5uz3gh4vyxrX9WmbHjyb8XjKeDxVXMgtnuubwKTPc99Z5jpqdVeRGIrMZ10sRGJcL7ynXotsIHle4nmBmnR1SltLFCuEP7rm9PSUL754zcePH/n48VaZZ0wZDUckSaaa5pip4nB4geMIwV5K4hGe55IkB5q2VtSzANdx6fzADGq22y2r1cqQnI9LqMViYVRkdYalUfIaz6cb9/PFgqIS8PDdneDcUkUy1xxL13WZKh6rFiiMoshM8rRixO3tLev1mjTNGY1ihsOhmIHEA+LRkPv7W+q6MmW4LsW0IKLnuJSqzXF2dmYGCsdA5aZplF7b0JSyuqUi91ZvKgwNk5ABoIXr+IxGEyaKEpXnOU0jgp2Xl9e8ffuWoihpmoPpfz5BTSxBRDgOXd/T9j3hYMDi9JTheMzj4yNnFxfkZclecTn18ZNWczpAfK5Xpk/scRDRAa7vPs20PpdB6eNHeLVO//8T0NU8Vv276z91dDrujdmWTRSF+FFoyKvb7VYMViuZ3Okg4qqG/jEuSO/Gn8fAiUlwqbh2BuzoefjqorquQ5YesKzOMAS6rmM0GpkM8e3bt0btYL/fm2mMTGmWVHVHPBQ4xc3NDVVVEccC/pS+k5DEq/JJq8pxHA5Jyu5mQxT69F1DXdZYFpRlRlmJ/2TTlgSBh2V3uJ6F51m4bkhZlazWjzR1RZIeiKKAuq5YLOY8PNzjODZZluB5AsCcTse4rmOgDQI9GXJI9pRlTt2WlGVhJk5BEDAej02zf7lcGsE+bRjSdR1d22NbLp7Xm8Cgg9Hp6akBo263W9I05c2bNwazpP0TBU7ikRxSurZVkj0Nh0Oipo2tCWi+7xkVFd1qcB2Xtm/NRqgBoHIulQiBbYnJje+z3+9p29ZkYLZtUZQZ89kJ19cCKXl8fCTPM8q6Zr3ZYDvCfNjuNeDZNj22MIp4fJRp43K5/ERZQqtvnJ6ecnt7y2w2YbFYmM9qYRGGMqnWJbpel3oiqUHItro/9eBB9+jquiYeTYzMkO5RHZf4erh1HNh0udp1HW/fvcP1PeVHEKhN/YlHORwOKYrSDMe6rldOWIHBFmpYlG5h6TL0+++/ZzAYcH9/r87rz/hmak38TxbyUSA6zlY+wZf1Pw5AnxsA/FR52fe9ZkI9OSc9j6G9OIv3R2Vgf0xCV6+pR+P6T9d1RCqA1KXsBB2fBmK9y5jv+fyzKdCtZzt01pEuG5CrssN1bdqywHZEcPC4rNbZnEAIYkajkaEW6YuGZdH0GXVXUKQ5m/VGkXNbOlqjx68b0WWTEQYRw+GIpi1I0i1J0kLf4FgO8TAmKxrzOcLIZTweUuS5bAxIyZUVBVWdKV5qSpcVlNWQqs7o+pqiTPB7H8vuWK3vTc+wLEuyPKOqM2xHbS60pGlmCNbPp2J5nosbvLFHy0xmVveNULV66DuZhIWBwAriwQjH9rAtl/FICM625ZJnB8JgwGJ+KtSYouUXX/2SIi/ZbPYEoW+YBklyYDqdUJYB2sW9Vd+laeRzNn1D37ViGJPn2DYsFpJhlGVuMkzR+ReH8yxL+Prrr3h8XIps+HbPYi6tBY0jK4uS6WKuzlf9CexBY636XnwhNpuN0YnbbDaKPuWZYKKzoclkYjIn3Vu0XcfAQ3TGqGFEOkM7NiHRYGNN4NfDMI38LwphNOjApvFf+pp9MmRT36VrW/b7Et8Pmc0WzOcLI3V0e3urqhcHkEy57xuapkPjzD3PM1hBrbixWq0MJ3k2m9F1Ypw8Go3++WD2HCSr/34uznh8kuSHT2Xm5yaVz/tmx++hPS57S3EunwUywPTLPjdtxZYTmhZPqgH6oti9BDlHlzM9pkemm5N6+qiDj95J+r5HDUnJixzHftJq6tR4W1J1i8V0hO9rtQzHjK/1d9bIZ73Li26+4MyGowGd1VGUBa7n8uarL0zPJEky2q4ljkfYlk1RpGy3K7oOgw7H6qirDPoGO/CxncDgjwSP5RCEFm2HQsvvacOQps3w/J7Q8RjEUk5htVR1xXAk7uEaYlEUuVDK2hrXsxjEAYfDgds7KU2DMKRpG5rGNmBJLSJ4fOgFpzM0bZDSNi1gMRqNTfNaXxdNbh6NRpydnRk6k178+nz/8MNbdrs92+2G2WzKZDJWxOxK+K1ZQds2tO3QLMq27czi0Vn7cVaZpqnBS+msUYYj4qS0WCxo2844Vv339t6rSZIjPdd83ENHpKrMUt0N0TM7GCx3SY6RP//Y/gAa1+wc2+UNIQZAK5RMnRk6wvfCRUVlC+7tOYYwK0OjUlREZvjnn3iFXYyPj486g600yf0lL3n7/t0ziIXN4JVS7AyezTbZbT/KNtGrquL9+/csl0vm87lbp9agebiBW1WY4TDFQiAOhwNn06kLDB8+fABwODLrd2l9QB8eHpyqiQXLOl6xEU9su46mbRmPp/SHvZP1sRubpWJZdVrbe7Z9SD2MC5jN5k7yCHBAdtv3++mnn9ykeOjQ9tlgNoy6w+BxGuDshbgMhydrqtNA5uLSl7IzC60YBDAwXRiFU994Bsmw59c/lSfAEzOg72kbXUqeTTVAU/TPr+H03HozsbU3uyeMkm5R0tC4gYGAwWeF2QkVWSYdt9CWpNpg494hxq0rs8XZlFWB8CGKPSbTMVdXGrn+8PBA11fUdU8QaB5gr0ZuN25avchmcYokRBrgchzHtOsCzxOEkUDIlqo+6DJTgvQ6pNeRjcJnU+kXL15wd3dHls0N1scninVACiNJGMXUtUaVB6FP2/nmPAriJHb0qFOkut397WahlHKmv7ofo6gqbWE3mUwc2twGf7s5pWnKw8PDMypLXdeMRmME2lZNMx40vckPtG/oZDInCAKKMqeu9eJJ0sjBDtq2xfd81zOy9Bm7qc1mM9eGsH0uO0XUoOgApaCqtLP3Dz/8wLt371yGVLd6Ea83a/d+1iF+Mplo7uhmg49wZiKPj4+u37rdbtnv9yyX2sh3sdBOYdpp6gXL5ZKbuxtqQ5gfj8ecnZ1pWtMxdxv8dDxBmkokz3NXguq+Z8MhL9xQoR5kinVdu7aGveeHawQw0IwnL8/lcknXtggpncPUZrNxLR4LXfE8z9AHE3dNDq6hlPv7dlMcls+fDWbDLEliMyj7o5DyKcLY7KXrni5ICKNyoT6eeOpg9bzN/6yBb5FiNgtzzxm8QAgwE0374XWq189RsNxuHAbF3uRd1xFIz+1ISil8E5yGsr/2PCxYdAg/sT9honWYbP7ZK0XX94iuw1OCOEnxpW6CCjxtxbY9aFBqqxv/1ttwv9+iVMfZmdZbX2/X+KHPaJKRmp22qWravmM2ndH1Pfd396hekaUJaaLNf61DeRyFxJEHtNRVhQCOxwOeJ/F9abKfxiHCoyh4NpzQG5J2Z//uu++cC84vv/zi+iarldZIE0Kw32np5jhK6NJesxKajiDUC/541AFSZy2dg2dYTNZ0OuXrr78GMHioktvbB3rD17DTrTzPXU9nOp3y8uVL8jzn5uaGutHCg0VRGLG/kSkJaxDKeDeUBIF2YOr6jrZtUKqj71uiSEMsmqbVAxHPJ45TMz3sDfG+BKRraI9GE969e0dZ1oBASp+Hh6Urneu6QfXai9SCctuu4+eff9ZZFkq7EV1coJRis1oxPzvj8vyCzlCqDobDulyvmM/nKHQrY3/UQo+d6vWCt0KHo4z1es3heKRDkZiMcjbTdoer1Uqvhb6nUz1xkoCU/Pb2rc6eg4DMAMDbVj/n4eGBotKDgsl0ys4wX2y2bdeKBd2maYof+FR1TatqQi/k6sUl42yCkoqmbNjne8ZpQlke6JseKXsCKUiyiBeXF5xfXfLu1zdI6RMHkiiNUW1HUVRURUF+PHJ9dYX0fQJPIIMvyGYjPEMXEijjaD386XrIiyeMllLCcri1TI/LugZRSH1cplrSuCtpPYkHug8xHAqclKB1azNHM1mkp+5qMOqeFrhqs8XhrtE0DV1jel3S4zTrtIcUArzngpI9mp/pskYhwdM/wtfuMVEQooTECyKquqPf6axAnzNcXr1gMtE9Mz+QjPYpUaKb4L3SPQApJX4QkkQZSZohgc1ux/3dLY/LFUkcMRlPmJ/NQQhevYD5YkEchvx+e6NNOugoy478eCRONK1FyIDReKwzjyhi+fCoS88g059DW/P4eM/ZYo4QeqBwMDpkk+mcyXhKkqTc32/46ec3BIEWlQz8kCjKyLKUMApo2obb+7f86fVrXr685u7ujratqeuG62ttmfbmzRv2e03ZWa+01M7hcOCbb77B80J++vvfEUJ/xGEYMg3HLstt25a6KfF8weL8jLdv3xInIbt9x8PjHW1Xu1KqqipG44gkidluO8JIkmVjrq8XjEYZy+WSw14b6E4nU62/1nX88ve/0zQNl8b1vK5rptMpq9Waly9fUdcV4/GE6XTKw4MWvQzDiCiKyfOSbKyzrLwqafc7pvMzBzidz+dUVcV+uyOfTLk6v6CtKo6HA4dIG/Te3d0hfZ9jWeD5Pr/89iu9UtRVRd22XF1cMDtfkGQpcZoiPB2U7m9vORxzzheX3N7e0rWKs9mCJM6oyoYir3j9+jVVXev7R2oHr6qq8PyOdDTmbHHO7e0tN7e3NHXN42rFYb+jR9vf2UFZEAQE0gNlhmmeTxyEeKHHcnlPGCi6tuB4XJMlgRmy1YxHIaKvmTQRTdUSpxFJlKJEz+G4onp3pGsFx21J4PusVrcc9wekL6nLBtX3zGdnJFmMZyhinw1mpz0zG1CGzX5bltmJol3wdipjX3OKKRsGqNPXfq5Hpz4R2PSw4NN4NjF8zrC3ZgOVfO4h8AQLGQQy+14n2dqzaxB6YOGpXvf4PInwJUVR0TU9ga8lsIMgQAqPtmvI8+LJQFUZwrvn0XUapDqZzJDySePs8f6B1WrFdrs1VKSGLEnpmpaDmYK2bYsAXrx4wZ++fc3D8kF7JbYQRBlJ5us+o+FaxmFMEicsFs8bz54X8fLlt7RdjVKCMIzwvJyyyBFCGvOTO6Io4fXrM1QPRVG5jKaqGrpOEUaCLE1oWg0LGI0zttsN4/EEpTS9S8vqaD2tH3/80RGLpZRMpjPOz88dC8DyVO21WqMT+2/bS3maCAaMx6k29z372pXpbZsyneqgqPtDWuXCZk/2c766vCJLx9zf37NebQmDGCl82kZnIKoHTwasV1uKXIOyL86vmM/nZlr6SA+Oh2tR+N9++y3b7dbRksbjMX3fc3tzw+ODli0qi4K8KPTU1pjqKnhyBkO3TnaHA0mWEZhe44ebGzf06rqOH374ga+/1tf+7//+77Rt6wxC/vM//5O//OUveKbbXtY1G2NGfPXiBbP5nOV6TWomlpXBRZZ1zczT0CPLU478wEEyfN/Xdo1K8f3339H3LXleojF8kjwvKIoSpRrSNOHu7oZ3735HKbi8XLBYnGkZ9KJEEHPc544e2bYth82epq2Yjifc3t4yHmf4gUdZfgGacdof+1QgGQY6+wHbacan+k+n7w9PPRMbYE4HC5/67+d+d3o+n/qbPeoZu9MugNNhgpCfljA67Q3Yz8E2gu2NK9FZToWVTLElrHCAP6UU0tNGGHoMHbn37/uOvvcp8oL3737n5uYGKbWpxcX5mH/6p3+i6zonaieFT1U23N896gZpW1OUFd2AYtW3nRFRFHhhiBKCKEmpq4qlkcSO45irqyuQkuOxZH/7YKhXgjTNaJrWyCLrJnoUxYz9wJUddd1oTa/qSBhKzaEsapI45bA/GvCw5M2bN8xmZ66ncnZ2xna7cxy/tmsdTssOELruifxt7xerYW9t5obQAE2qbwnD2EBBVkblZIPvh4RhzHK5dtLdr1//me12a0om3zXarWad53nPtO2sQGHbts9YCvP5nDRN+f321n3PVpPMUnMeHx+ZzWZcXl5SliXr9drBS5qm4WjgLAujUGGnh7ZXFIYhl5eXjhdsDZYtQNc21S153/b0rEz1yFCYbBCytDxAq/huNs5PYDQacX5+7lo19nrjMHLrXinl+KaiErQt3C8fSNMYzwuQEtpWUVUNdd2aSalkOp3R9wIp4erqBYvF3FDn9jw+7BBC84C327Wesjc1io4s00Df2WxCnERfhmYMA8TnAsMQmT1c5DZAnAab4fsNm4bDwGYfEx+NMf//H0Pg5UcDAp4gFxpm8ZRZuublAPJxeg2nQX6YSdrsoJIeaZggFNrcpK4MNy0yXEfBw8OKLIsJo4DHx5y2bRyVS/tBal5hXbfGoUhwfX3N69d/ZjabsF5vjCGIxpEJ4aFUQVFU7A57RuMxHYq26RGypetK6krfjEmSsFrv2a43eJaLutN2bHleE8UZ5xdztts1f//7r04VBIQLJpZPaXXY7GdTlAeKUjcuz881IPb3328YjUZcXFywXK5IkpTz83NAOKs0LTiouZWr1Yq6aRlPp8/0vTREQiPLh/fYsAqwY/osSymK3G00Fpv16tUrjsejcxyyUz9LKbNDgNAY2HzzzTcURcHV1RVlWTo8oGUDxHHsNM7KsnQGLLGR6Lm7u2M0GnF1dcXl5SVWTMAOfTRx/uhUOCzcJQgC4yeq2QnDxvtoNHIqHG3bOscqC3rVopq6lLWQFxvwlsslVi3E8kuV0tmp1RSzJaRlBFgesVP7NZtxF+ngdTQep1YGSLePJGmScXY2I/BD2q6ha3tymVMUOw6HI6vlltE44/r6peaRRlpnr64aM7wQXJxf4Hlwf3+D53m8fHVtpulPbAxP+iRJ6tboR8HMTgc+6iOZG8jqEw2zkmHZaKP8lxb/acB7nlnpqeAwKJ1COYblqMOYmZ0cpZ699ikY4QLZMHha9QAbzDozIftc5jcEH9rAbFN81fWUfkFk+J5xHJnxeISiM1MgOxHuDTxBy05rl6Etj48bwiAiDCOybMzV1ZhXr14yHk8oioo3b94RhiFXV1f0fcdyuTKMghFZOqbpFMdCT67ax6Xe1QwEYnE2Z7Vc8ea3X/E9bco6m04ZZyMNOykqQyrvSNPMfD4+x2PushIN0YCua929oKeNJVIKxuMRoJ3INVWlYzyekucVSZLy6tUr7Qjv+fhewIf3N2SZzpBsi8OKMFpajuXgCSFcJmYXphUKuLy8NFAFePPmN6dNZoOFfb7N5sIwdMKcv/zyyxOLQwgmI12OWjkiO3G2wFEbYK07+2KxcNnZ/nBwiP2yLPnrX//KZrPhr3/9K1VVMZ/PnTmwDTg2GFo60osXL8jN7+7u7qjrhvn8jPl87oQVh1PeIcLfTh/tMMJ6X1pIxWaz4eLiwmn5W7GEpmm4urpiNpvx22+/OZ6k3agnkwkTM41sa817zo2suGU62A3OCwNG4xFdpygPFV2vENLD83VPNRuNCKOIXsFxf2C92eN5AqX0FFco2O7WFOURP9BA+Pl85uAn0+nUSB91z5KfTwaz01JumFVZ3IueGFVOz8g+57R0Gx7DoGAD2mngAGUyjo/9Lz+VLQ5HtJqB0D8LVu555gfz3lY7bRjMuq6j77qPzn8YvCxgcXjuNs3u2w7ZC1JTIiwWCxaLheFkViAUvi9N2VAjPX1W6/XKSXdPxmPSdEwSpwRhYOSlYx4fVtzc3nBxfs58MSeOEg7Hg6P9xFFCXmq5l+1hz26353DQdBEpBZPxhKbuaKqauumoGy0rnY0mBIGWltkdCt69f8/8bMLXX71GCJxaaRj6YKAnVn6761omkzHjcUYUBaRZiic93r9/z/F4xPM8x8ULAj0x/e2337i/v2cxPwcsqj5yGVLbdeyPBxdI7P1V1zXb7Za7uzunDjt01rbPt8+1oNOqqtjv96xWK1dSDX0eh9aHeZ5zPGhrtomhzlhljaIsGRs5HzHAh1mxTysjrYCyrtzAQl9/5e4Xq/xqAbOLxcJlfaPRiHSUPRMstCR3m1GtVisWi4XTerPUsqZpXGmohxLCCSBEUeSkp22ZbNeI5Qjb7M5Oj4UQTp3D8kdjA5oNU+0F2xozEft3pJSUTc3D7ZJDPnbKJ/Yzz7KMLMt49eqV+17qOqDvdAlqM8soCPnw4YMpqc/d+Q7NVawwwRclgE4zn9PfW9iDzUpsRnZadg5/hmXpsD9mbyK7yzwFxeeYto+zrI/7d+79xJOr1LPgZ15j8WEYmZ6hVM/wNfbc7fvaBrXNXJ3el7mmIAjADxglI85mC6azicNX2Zve86XbSYWAzmR1uomuQSuXVy+Ikwyl9PR1d8hZbfQAoKxr9nnJcvOrUVUIGI8nxGlKUdf8fnuvdaCamv2h4HAoTJ8kww8iqrrB9wPm5xcoBdPZGXEyYr1eOTzZarnGk5KLi2tzThqlHcUxQkCSxIOhhTKfi6DtaopCgdKwiPV6awYZguNxbcq4iv3uwGg0dooWX301MdlSQdv2ZOOMr776itvbJ4ciWxIOA5UQT0BRpZQrjSxQNI4FVdUY67vQBJWGJMnw/ZCiqPD9EM8LHPBVKZ1RxmmCDHyE79H0HTLw8Tqfsqnxo5AkjDi/vDATzhVVq3GHc2NC8h//8R+uhHt8fOT8/Jy7uzs9vTUB1pLP7YR2CI7dbreuJ2epbxbEa0HDw3VhLd6GmKvtdvus/LM9NwvaHmqI2d8ppQxGT3++FutnQc32vM+msydivlk/tq93LEviOGWUTZDCZ7c9UFctga8cDKiuWpq6w5MBUZjQNj1VWdC1Wvzh+vKCtq356quv+Pbbb7m/v0cI4cr10WhkBDX9ZxvFJ4PZ5/pdgENJ2/LClho2gJwGsmHwGQZFl0kNvhS9W6jnQWhwfG6gcPr+w7/zrBw1wazrOhDqGfhv+LzT87eZ2VAmaBjM7BfqCZ/RaEqapUjhufIEerPTJbRtzWQ6Jo4jdrstZVE7itN+fySMYkByPGqzk91uB8B4PGY+P3ecPaUUV1dXhGHE8aipIu8/fEB5HiLwQeiFGUURi8WCLE2pq5q+6whDfRNGSYIC1usNXdcxHo+4OL8kiSNWZjAAxom61O7stsTDYAKlBEVPnh9p2w6UVrw4HI7c3t7y+vWfHN3GosY9zzMZxrnbsYtCQxems6nboZMkMUq6KyeJk6bpM5FDK6tsv/ejEUG0i3+xWPDVV19xfX3tXNJ3u50WZHS4qtbg1DJDsC8cSt2eb55rV3VL64nj2JHPbda4NabQFswrpeS7777D933n+2DpUE5cwNCUHOykLAgGXpRDFyp7/1kV16FTVdM07nPxPE+r25rsUZspZy7w2cGCVXSxPcnz83OXKdof2x/VVMDKTUx93yfyg2eYTts2yYuS6WzmhCVt0LSSRlZDzmIHrW7d+fk533z7FYfdlldfveT7779nMplwf3/v/o6V8NLk9fZZAP8omA3LxM9lZzb9tR/aMCOzad8wW7KBYAiLGNKFhpPN0+NTwXWYNZ2CXhHamdyWHy4YCeuvaRD9A1enISbNnpN9T3v+tpSxIEEbFC1MwPM8gjDA8ySH/YHSLwlDnzRLnapBkkT4gcf9/R0fPrxHiCdcnNYUe0lZ1mx2K8PDVPgGlR8YGe31ZkNu5F3ef/jA23fv3O4UJTFKCjb7HWWhyyJdjkiU6jibT9ltthwOBVk2I01j9vstSRI5IGvfajL0pGsNXUhLxvR9z/FwIM0SQxEL0JLZgdvQQGeYUkoWiwWxMY2ZTqd40nd0n8lk6vo86/WGqqoJw8iJNkpfOskka2bx4cMHt0PPZjO3EK+urui6zmUsaZrx9s07oihiNJrw/t3vpIkm6netMjSmnq+/+pZvv/2WH3/80RDMQ169esVsPuH//Y//h/Vaq62OpxNubm6omprxdEKe5yymE27v78hLTaF6+fIlddvw8PjIZDx+1lPebre8fv2aOI6dZpi9r2xAskHbvqY0bufT6dSJEtqyz/d9ZrMZd3d3zkZw2D+0WZq9761ruA10Nku002MrxJgkiWYf+L5jXtiMcL3WE8XAVDJCCM7Pz0nCyBmvBEHAixcviNKEX9+8o6g1bCVKE1KTxQFObtvSw4IgYDyb0vQdH25vyEYJx/2OFy+vWa1WTj9NKWV6rZ7LvrtOPaPJfbZndjpxtMHCNmmH5eMweA1f7wLJ4HnPJ5fPcVw6UDxJyHwqkJ3+TW/wAX8uQ5NSIg0wIww0K18ongVBew4W4zV8n+E1DMtMG4RcmQmUZYUME5IkIAwj40NZ03U7iiIgjkPSRIsU6t0T9vsdy9UDt/d3CBFQlCVN3YEwpGBP0PcdbdeAgCgOQUFVl+y2e4qyII5igkj7IfZdo6W7Q4+ub+jqhratDDFa+0rWdcl+qx23pQdxEiKFpO5bNDq+w/MknhfjeZqTWZS58y4VAsbjEUmSGrUMvcFlWUqSpPi+Dhrue41906CXPD4+uund4XBACM/t3EVR0Cnd2LYYsCzLnPWcXdxXV1duExFCy/FMp1OqUutf6XPJHKTClk9W4aIsS632apRv0zSlrErWq44kyXh8fOTxUQOLrf6clQXSyhnSlbdWbmq329F2HaEfOPHDtm3Z7XY8Pj46mMloNHL6+DaQjEYj2q6jqDSvsh1UPZbgbTfeu7s7pw03LBlt5vby5Us2mw3r9drpoNn70/Yhh4Bw+77WHNpWXXb4ov0keiIzRJjNZuz3eza17kP+wz/8A4vFgvuHB3788Ueatud8lDlO6/39vWsBpGnK999/z88//0zbtq6ktkF3u99D3xFHiVPuGI1GHI9HHh4eHHvmqS31tFY/m5nZcuo0QzvtRZ1OJk9Lx9PAcpp/ZHnEAAAboUlEQVSxnfamhkFu+NovBbNPQSuGX5bneS6YOTJ5/3yqahedPwhwp+dt/z3kow4zw171eFIyGo+YjCd0nZ4edX3L5eWCS9MLyEYpUaRluD980JOtNE0Zz6bsdjnHsqBtWhDQ1K3ur6kemUu8QCJ6yW67I8+PCCmZzmcIBIfjnqquCAbSwsosmND3iaMQ1fXQd3Rdy363Q6BL2DSO6fqW0E8RoqVpKvb77gkKIZSb0GqzC737d532niwLjaNaLM4pipLtdmd2bB28m7pltdq48kcbpnh0rUKI3oznFcfiQFmXpuzV0kl2J3/58iXn5+eOmmN7O4CbKgaBdv+xpdhoNHJSz0opB8Gw32OSJK6cE0JvELPZjIflA0poba25cWa3w6LISEnbBndRVVRNQ900dG2Hl0g39bRZ4+PjI69fv+bx8RHAAX5tkJ7NZjpr2Wv572IAubD8RXufzmYz5x1qhxuA29gtsHhqtNGeTG30kMTeszbLscYoFs83GlknsNr9TgiBZ9y6lsulVr4900DhN2/ecHt7S2SMfNf7HW2vPTSVUPihR72t2OzWiLXgh5/+k/V6zfX1tYaENBXfff0XPXS5u2c2OefXX98YX80xHz7cMJ/P+dvf/uZwcMPK7rPBzB6f6n0NA8xppvW5552+57B8O31c/w5Ofv3Z9zwF2vZ97xr99vf2S7PBzJ2nOpUuegqqw2s8zSCHSrR2Z7aP+1ISxU9EZB3wIAwiPC+k762ApaCuW0Bwfq4DQFnq6VvddnRtTWOkW9qu0dzBtkbLNCd0XUNVl/SqY5SlTKeaFPz4eGQyyVhcLBhlI4eSl0IwGqWMMy3HHPgxmGsJTdmSpglFXtC0OlPrm4a6ad3ub7Xcy7LC2u5tt08mJUIIylLTljTS22C/RhPiOOH+/oHD4ch4POb8/JzjMTdO3yFlUTKZTAy96Mj+uMfzpdutbcC8ubnh8vLymXKDlZSJoojb21sW8wUPDw+u/LJDApsNWZqT7/umV1c4Xa3JdELfKw77g8uglFJcX19r2pR5rVXSsA1zm0EqoO17J7hoMWFxHHN9fc3Z2Zkjig/bMfbebcwE0yqK2PJxqLtn4Rj2nhvK/MATmT+KIr7++mum06mDgPi+tsmzLR+7hmwws4M42/Oz5wFPE8ssy0hjjffbbDau36czyxYlNIj6WBzMRNued2My44btds/V1QXffPMN2+0aITzm8xlN86TAczwW+H5IkmQEQeRsDy0x365t339CRXySzvSpqeQwOJwGomFDfAi5+FQjf/i7TzXZ4WNYxacOG0hOBwgOrSaeICC2+Y+ZEOoyU4H3pAAw7Jl9KtAOr9X2C4dfvud5eL5H38N+d+Aoj4ZUfqali5uSn376kSSJePXqFReX53ieYL22AoQ60Hd1pctJIAx9o7ulVXirqjSu4wGzmSZIa5ybtjXTf8sjTULi0OPYljRVTts01OWR3VprXSVRTBLHSBRSeHRtyXZTsD/sXSkWhiGTyZPzulKKpqlZr1f0vXI9tiRJnFjfw8Mjy8c1bavBtrPpHM8L2O0ObDZb8z1pPFtV1gR+5NRMyrKiKCrqpsEPtNDl7e0tNzc3xiBZ8w1/+OEH3Sg2+vS3t7duZP/jjz+SxLrpbIG+da0HVnd394Yg7TEeT5BSMh5PaBqt0qElm3yiwGe5fCAMQyetZCEPFrtlN6shtcr2p/quY5Jl7Pd7h8K/u7vj66+/5rfffnODEOtqL4RwpZ8F9do1YR2XrGChDTJ2AKSUckHbyozbwYbNPFerlZMysuXZUJLH3s9WFttCYawSiM3K7L1uS0V7zkII50BVVRW744HR2diBZcuqZbPest6sUD2Mxlpo4J//9k/4XsDd/e+kScbD4z3LxxWT8ZjVcsNfv/vfefv2Lbc39/zrv/4r9w+3/Lf/9n/x1VcvTWZtrBC/FMzsB/apTMsGidNsZfj8odKE/e8pFMMGomHAfJ4JfT6Y2Q/2UyWlUspxK4dBTilFrykALrOShhg/RJHDk3AdPA/c9v3sLmb7CnbnklLiG2uzJIhJjMP48ahdgKQHvh8AnpNGWa1W3N3d0hsxwOl0StU2eFIShr7LOGy/xAYae57237Zv1/UtTV2gupq2BU/2ZFlI3/movqdpagQdQQBhKJBCS0UHgTQ+ii1BgLnxI+I4RIgneRg7frcwkq5VdK1iPErdIEBPo3WAWq1WBgZRDLxGey3eaJr+VqlVOzn5NHVNL3qiSAdLu8ABN2yyk0XLILCfjVah3XJ2tnC2bFZg0H5/diK63W6dQ/loNOLy8pLlakmSJlR19UzD3srtWIT+bDZzCH67Zqzk+uFw4HJxznw+Zz6fs9lsHCxjt9s5WpHtg9nJrZUdr+uawDTGbTYipXQNfHv+NgOzen0WXmIzJRvIlFIO3DvEkJ3iPO2mPEwQ7KZtg2nXaR33zWZDFGqmxOFw4O7uzvUtX768pmgriurI/nCkaWoQMJmMUEozfPK8oSiOVFVDWeYsFgvKMmezXfPVq1dk0RSBz/FY0nUNj49LNuud2zh077GhV8+1FL8om/2pADZ8/LT/dfq4DWTDYDYMevaDex44wKJ6T7O4YZ9tiHEbBsNhNjl8rpX2cI/zcSnd91rD/Eul5rC0HZ63hqZ0dHTISFu31XVtmqctZ2dTo3sfGjHD35FS8tVXr5yS583dDVEUMV+c4fme63dYEKUfaKNa2xzX2cXY6VLtdlvGoxm+hL7viEKfxXxGEsX4ZjAjECRx7DJYK0VtG9LHPKftemqzOAWSONbpvl68+lr3u4Ph2mkPAaUEUZQwm00QAu7v742E0MhkchF13bBcrnSZVdVI6ZmyFfb7I4vFnLqp2Ow3jEaZkws6HA7s93tevHjB9fU1x+OR9+/fO06jnbbNZjOqqtaqE1LLQ8/nc+q6dngrm1lYqzlrEyeEoKkb6kbDLML4SRDR8i6H0upPXE6P9XpNWZZGUFDb2s3nc5f5TKfabf3ly5f0fc/t7e0zYUObXdn7yLJQ7CZlv297/9my2CYe9u/kea69GUzAtfet9VuwGZ0NUHZaPBQltRu0nWTa7LDrOjwhiOIYT2gjmr2xg7OYMys1lJ5lIBVIhR9q74jRKKWuWw6HHf/HP/6ftG0NUvC/ffdnLi6uuL+/RfqCumn49tUF//e//XfyY8l0Nubt27eMRhn/+I//SNvWZjOpqJuKrvvCNPPZoTfgj3tcSj1rbEkhYBA47JcyROafZjjDiPq0C5ihgfszz6eTNkCd9umG7/3s9AfnQ6/ltqUNvOr581wmehIcT/tyQ4Dg8Fr7vtdyJL7gfv2IWiqSOGY8NmTvvuP9zXvNdxyNmM2mSE+w3eoeSpJkvHrxChn47IsDnhBk4zFR4NP2nZ5INjVVUaJQTMdjkjQl8HwORU5dVkgliIKYsipYLTf0XeME8bIsI/B9zmYzkijieHwOyrTN7TQv6DpF3ZgMVnp0ndasu7/XvaA0yfC8gNEowfdDmrZju10ipeR4zPF9z3kvhOGOum4QQjonHrCDJGkWa21wWEemsykdPb3qHPDz4uLCqYoKoeWi66piko1YPTy5Ch3FHnrltLWs2/zFxQVlWTqxS2sVZ8UXoygyLuoRVVUyGWscYJpmCAkjYw7dK8XPf/879/f3juDseR6r9YqqLLi+vtbNdimZTTRxPAgCLi4vabqWMI5YLh+ZzPR5eZ5HWVcgBUhBj5aFb9oO6Xn0CNq+51jkzisgCD3y4vAEEfJCty6Ga2Q+nwPap7VuGqpGf8ZBFGqminySxvaEBtX2bUtrsjff85FKggepUfS19/92vXmmlCylpO20n0DZlMybcyazMdPxzH3fWZpyvkiQ8gV//vOf+bd/+zeK/MDZbAG9QiipFWHajre/vdElblMipdavU8YS0Pd9Q34vqeqSZiDQ+fE0c6BJphvlepd3j9vF3VsxRRMQLIBU6jgX+Jo8reNAT989TR8sbUgKENIGDEXf9TorUwOBRhdUBEJIGtNY75rexFTt1tT3TzvL8HCvN//2g8A9R0ofTdTWhO0w9IlC/1lz/3SSa18rxBNM46kk7zlWDUJK/NCn9WqOLZR5aXp2PbPJBD8INJpcaSHAIJgQhxon9LBaghSEUUgSpsznU5arB+42a6RQlMURz5f4XkDXNPhIRKcIvYDxfEzboalM+840n+eUpeBxeU9dlsxmE9IkpjUllO8nrDcHIzE9p216uk5peIWnpXcUHXlxIPAjxmPtiXgwWDcLfMyrnNVyjS9DLs4vaJqW46EiFxVtp6ejesOS1HVDVTdIoeEbVVXTdYrd/kBeFyC1ekjg+/hBwDHPnWUZSuEhWMzmHA8HPZVGcNztqcsKhWC3O3DY7hiPx7y4vNJBB0FTVohQIaIY0SvqouTud01knkwm3N/csT/suby+pK5aJDWer8vxwAvo+o6ZUWDt25ab+xvTtmhI0pi///ITi/mCF5cveHjUE1cN2I159/sH9scCL4gIooROCXb7A7u8QAUBvWnOb485RdEyP5tSlDXb/QEpFGeLOU1Tsdlt9GBhnCERtG1H39Yc91uapiONY/oO9tsDcZrQ9/DwuNR6gdLDDyO2xw3CKwmCCF/45MbXtMr1dDj0I+hACAm9osyfvrvSPCeOY/L8wORMT5ujNOZoYDTXi0vCMCYIdDXQtDVd0VDWB4Ig4Pdf3+J3EPQe5S4nXx+1Im3R8/D7HUl4oCy2/PUvf0JKycvrK3755WcmZ1rGqChr3n94r6lSBrD7yWB22gP73OOnPbOPgt0nHvtU9vT86F1QspQmpT4N2fj4sDiw53/P/tcGM4uDE+pjnJx+7GP9siHM41Nl+PCaatXgCT0t6mRP3VbQostcYLfZ6N7C9QutE1/V7Hd79mqngbG98f8sKw6HHR/eQ1nmNG1NliWDTK+nqhrHm6uqmrju2GwObHZH6qanXB+4f9zQNDVRGHA2m3J3t0RKQRT6ZGnprudw2PO43EAvyJIR05lyygz7/Z5OGRbDqEGh+2pVVbE3Y3KlFML4UFog7OGwxwgp4/u6DwiCKAqdzltRVLRt776XIi9oaQgjTSuK0wSJLj+apkEKQd1UHPZ7jocjtSnR4ihCIPCjiMX8nMJgr2xGY/t+lkFhDWUAR9vRpbzPbqNLutnXU/q+o8hzft/teVg9EMcxr//8Jx4eH/BXvimxdKlmByXCCIeWZamJ4p1Wm4jTBOFJdgft9J6XBfvjgU5pqk9Z13QKgihgd8jZ7HfEccR8NiFOApptRZ63NM0W35cszjSDwkMaeSkN95hOzvTnVNfUrRl4AQjt/zo/XzAZT5iNJ7RVzd3tLZvlitAPGE8nVFWDQDIZT7Tbk7D9Yu9ZxSWEztSiOHTDEZQgkAF1UbNb51R1Qd93+L5WGglCj/dv35rvMkSEgqbu6eoOOuhVz++P7w3E5GCUjn/m7u6O9W6rXb58LXu13R8Rhy/omX0uEH1pAQ/LNM97ImL/V/23zwU7pYaMAHHymk8HyacMzh9WwE+9rsG/9Tk/Zx6496d3wew0cNvy+FN/H2D4W/s8ib6BAk+XNy8vrzg7O2M6mSKFoDP0sMBAH6QX0HSNAafWlFXBdDrj4mLBZDohzw8GA9SQ5ytH7Wiall5pvuJsfkHbNcaM4sjl5SvO53O6rkH1nZZdCQPTZNflxnQ60eVgD6NsQhjrvlBVF5SVll2J4piyzFHoSWvb1hTFkbqpTd9J0TQV26pGS6l3QI+QEMchni/o2tYYfpiRe14aGZoYPwzoVAeewvc9kig2GUBOkedEYUhqJHasWKVtglvxwcPh6FDtlvxsy1M7xbOBWCktkWMFDfu+Z3G+QAgchAH0/bTb79hsNszOtG6/Va7QbRQ9WLC0NG2fpyeDD48PVE1D0zZaxFMIN/2sqoq20yWl7Y8pJc0gIUe1Ct/zDNtCmumxriCUemLb0Cn3nvP5nKvLFzRtS17qUtj3ffwwcJVEHMfUje5vqVbfp1mWIc05gXTwlTTNKCsNaZnNdGluyeOWswmasD8ej+ma3lDKSvb7LWWVm8c9I2+krz9JEsIgcYMUe/5SSmazidkINBRHI/59jsc9VVUQpxFVXWjxhsHxRdWM4cTw2aId/O70GD5n+PpPZVanDXYbqJ6VuSiXnenXyE++3u6+Un4GEmJ+XKnY6f7Z6TRUiI/P4XQy+qUjjmJ6nsbogecTJRFZmpLEiZPG3m42ejGgOWphoD0cdYboE0YhkOAHAeNxwmJxDigDNZBuGuz7nlE9aGlaRZwkFGXJcqV7WN9//z3ffPM1vpQ8Pt6TZSkCRRxpw1llMi7f1woXh+2BoshZrddsttqfMgg0B69XirwoabvWNZ3tBE0bAIMyyqJBEBLHEfrr6plMRnS9vnHTNOZsdkYcp+R5iXYc1yYoreoIogA/8BmZ3tdyuXR9KonQpaanG/mV6f0sFguElHS/31BWDWND/ekNmd9uRJbuM+QUW+bBdDrlfLFgv9cT1oeHB6bTCbPZlMl0gvQlx/zIr7/+6rIvwMkS2Yb5YXdECt1kb9qWABz0weLjLPyBQUNeSu0qv93uSJMUz88Iw4CqqinLBt/zuL6+IgwDulZP8uqqdll/HCdk2Yj5fK4z6uPBrWmHt5Qav7daroCeQGpK4mw8QSjY7LaMx1OOxu2q6zszxOoYjVKnx9ab6Xie5yDUkwtX25DvtZWh7/ukXmruEw31UGhtuTTNUL1kuVy6YJqmIzPllc84mBbiYj9Dq0QyZN7AJ4KZcx06ydA+lU19ruk+DGSnzfTTYcCzgDMIml8KGnbqOfyipLTI/aeA9KlDPb3JSRB87sR+GhA/B1M5/a/nBaiu1cMGKQn9iDCI8WRA38Pt7T2BHzC2QntJiu9pr8GmaUmzlE71eL40Gapu1m63Wx6Xj7RtpXmGnT4fba7rs9vt2e5ydtsdv719T1Hm/Mu//Av//M//TBD47LYbXr58QRRFbNYrgsB3OKrDYU9R5Gw2W+5v7siPJUVZass7A6qN45jWTDvt9NCyDCzHDyBNUrI4dQFSCIXnC6azEV3XEgQeo9GI2WxCFMV4vqAstEQQQlIW+oaPI62jf3V5SRxF5MYdfVNvdANbSOIoovV9FCA9jyzVemlBqBfWbrfj/fv3Dixrrek2mw3z+Zz9fs9PP/3kVFujKCLPdSPfqsJqQrOecjZtw2a9QaEIvMDxG5umNoyEUgOkpafPL47xu47EBL6dsV+zgOq8NI5D3ZObWFU15n1CpNQLVWc/LckoNkBeTbuLAg258IVnwL81x+PBmS8f8yMI4dQ4nrK7hOlsShJE9E3L/d0dQRRzeX7B4uKcIIxZDdQ5NCVrjxBaEsoKOYJW+AhCXW5HUURV1gbSkpFlE6SHwawdaTvtM+pgIC0us9Yu7ueEYciHD+8cJ3e9XmN9T5NRRt/37nsBnKQ6fIbONJw62oV8mlmdBh4bAE9Ly9MFf/rYaWamM8MhYPf0fSRSgpJP9Ckd0KyiRfvRuT+7PtMb00a94TM9K8/z6LvmWWAbfh6nmerpNQA0dY0S4okmYgjpFoU9ilNm04zFYqFvzK6nqWuE0vI6o9GItu+pGw2kTTNNfarrypRVHUKUhg6kJ4QaC5bTtVqscT6f8823f+P169duNwv8QBPO+56iyNltSkqDfj8c9kipy7CyqhACslFGGA0mZVISmhG83fDiONajev+pTBolGVmiy4eq1pNX3w8JQo9QaD9OP5AgeoTQ5armgWI8TUOEASQ/Pj7SmYVj1VrKvGCzXpOlKXLwvez3e4MRfPJQsFPmoT1ZkiRuOjrUPdvv98Zpu+Tb1984G7eyLNlsVqw3a+I0plc9Y8NK6NBwkCzTyHRrVBslke47CUFZVUjfwzNTcFvythbw3DTURgFEB67elKId2qo6IEti0iQijiI8Tzj5aTDVk7lHbZCx9K+u60jSVH+P5v62Af3x4RFfSDx0aXl1fsF8sWCz3VDXTzxPi+7vutaZstjhWBBoELYfeE6Pr64aRG8MlVvdtnClaKBbDbvdTtP18FHqufCrXYf271ZVxcXFBQ8PD0bhZU0Y66FTYCbR9vgiNOMUh/WpDGa46J+e9zGf8b9u4PMsqD0v6Yave5pu2gt/TjbvngUyF5jNu9j3tEa+9sY/7Y+dAmYtSPVLwVkBdd3iBQFeoPFXZVmDUoRBQBiEJHGKlD75sSQ/ljSmdxNHEdloRLNc0/YtZaXBjfOFVi1tmpowDBBCT26busP3hVEO0DdZ27X0SjGfzzk/v2C32/Hhw3ukFHhSkB8PSAFVVdK1Nbe3N6zXa/q+4/x84XBdQqGbrJ4wAbSmLHt6pbOsi4snv8a266gbH8/TWVochkxGIwOEBd+XZKOEJIlM0NJltJQKP4BURobQ7mseZ5/iBxFtq8u80gCFq7IiiSKCkUdVlni+jxJaKUQIwf54YLPbAhLfDwljjXhPRhl40mhtFbRG4Xe11d6N49kUpRTHsmC2mOMFPm/evHHYNlvG96onybQ573a/c/2yJEl0kxycHBGCgZR1TmueW1Sly2Z71evyvWnoTioZKQPCIEBIi4LS023tkaBpThbIetgfkIhn66AoCnqlSOInNY68LFwf9/HxkTdv3xDgMZtMSGK9iUohWK/XNE1Ha9oH+rvRvTYrWW3VYoLAc5Ae22PsO4XohZH7LtAYKC0VFQSBgz51fYcUut9YN50R1hQOUqPoaLua0TgligMur85Zrtd0XUMUaRd36Qt69QVohl3Yw3Jx+Lv/Oig9L0tPf06DzOmhnzeUIfr4cd1D0/9/Csx93v8aZJgmoHWt7jX1gwA5vE45CJT2nO2NdkrX+sTJu/8ItChjXehmdDw701I4FvWdFwS+T2a0ogLPR/WKY3kECU2jd77QKMn2PQR+5HpANrBWVeW0wACur684Ho/8j//x3xkbvNTxeOCw34HqicKA2UzTg6TQ2aBuHGuD291mT1VoOIXtryRJgpCSvMjplNL4tiCg73uWq6VzE7cbg6Y4+USRT5LGpGmCduvJjRRSYniHsSmXC5eVN01FK1rSNNM3tSlzulYb9FrStL1+nZnqRbharZCeT5KOKGvtIK7J8Vp+xvb2rEqGleWxhHMLrJ1Op06fTA9IArbbLe8+vNONcl/3uAKzuRwOugSu65pRNn6GnC/LkqYqHefy6f55UpkRPFU4Sukgkiapdh9yGDOlOZGqRSv8Tkgi3cNSZhqsBQIah9S3m7Vtstv1ZzXZrheXTMdjjtYJ3fUSPeh799nEiS5Tj8e9613ZSsxWIG6zN2Ke+v97PN+yZkBLxfcuOJ6uLyEEaappUba3rJSmg1lg8YsXLwgi/bkWdeV4mgDiv2po/3H8cfxx/HH8z3B8Ic344/jj+OP44/if5/gjmP1x/HH8cfwvcfwRzP44/jj+OP6XOP4/11nUN+46XBMAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + } + } + ] + } + ] +} diff --git a/docs/zh_CN/tutorials/config.md b/docs/zh_CN/tutorials/config.md new file mode 100644 index 0000000..9e9c87e --- /dev/null +++ b/docs/zh_CN/tutorials/config.md @@ -0,0 +1,417 @@ +# 教程 1:如何编写配置文件 + +MMClassification 主要使用 python 文件作为配置文件。其配置文件系统的设计将模块化与继承整合进来,方便用户进行各种实验。所有配置文件都放置在 `configs` 文件夹下,主要包含 `_base_` 原始配置文件夹 以及 `resnet`, `swin_transformer`,`vision_transformer` 等诸多算法文件夹。 + +可以使用 `python tools/misc/print_config.py /PATH/TO/CONFIG` 命令来查看完整的配置信息,从而方便检查所对应的配置文件。 + + + +- [配置文件以及权重命名规则](#配置文件以及权重命名规则) +- [配置文件结构](#配置文件结构) +- [继承并修改配置文件](#继承并修改配置文件) + - [使用配置文件里的中间变量](#使用配置文件里的中间变量) + - [忽略基础配置文件里的部分内容](#忽略基础配置文件里的部分内容) + - [引用基础配置文件里的变量](#引用基础配置文件里的变量) +- [通过命令行参数修改配置信息](#通过命令行参数修改配置信息) +- [导入用户自定义模块](#导入用户自定义模块) +- [常见问题](#常见问题) + + + +## 配置文件以及权重命名规则 + +MMClassification 按照以下风格进行配置文件命名,代码库的贡献者需要遵循相同的命名规则。文件名总体分为四部分:算法信息,模块信息,训练信息和数据信息。逻辑上属于不同部分的单词之间用下划线 `'_'` 连接,同一部分有多个单词用短横线 `'-'` 连接。 + +``` +{algorithm info}_{module info}_{training info}_{data info}.py +``` + +- `algorithm info`:算法信息,算法名称或者网络架构,如 resnet 等; +- `module info`: 模块信息,因任务而异,用以表示一些特殊的 neck、head 和 pretrain 信息; +- `training info`:一些训练信息,训练策略设置,包括 batch size,schedule 数据增强等; +- `data info`:数据信息,数据集名称、模态、输入尺寸等,如 imagenet, cifar 等; + +### 算法信息 + +指论文中的算法名称缩写,以及相应的分支架构信息。例如: + +- `resnet50` +- `mobilenet-v3-large` +- `vit-small-patch32` : `patch32` 表示 `ViT` 切分的分块大小 +- `seresnext101-32x4d` : `SeResNet101` 基本网络结构,`32x4d` 表示在 `Bottleneck` 中 `groups` 和 `width_per_group` 分别为32和4 + +### 模块信息 + +指一些特殊的 `neck` 、`head` 或者 `pretrain` 的信息, 在分类中常见为预训练信息,比如: + +- `in21k-pre` : 在 `ImageNet21k` 上预训练 +- `in21k-pre-3rd-party` : 在 `ImageNet21k` 上预训练,其权重来自其他仓库 + +### 训练信息 + +训练策略的一些设置,包括训练类型、 `batch size`、 `lr schedule`、 数据增强以及特殊的损失函数等等,比如: +Batch size 信息: + +- 格式为`{gpu x batch_per_gpu}`, 如 `8xb32` + +训练类型(主要见于 transformer 网络,如 `ViT` 算法,这类算法通常分为预训练和微调两种模式): + +- `ft` : Finetune config,用于微调的配置文件 +- `pt` : Pretrain config,用于预训练的配置文件 + +训练策略信息,训练策略以复现配置文件为基础,此基础不必标注训练策略。但如果在此基础上进行改进,则需注明训练策略,按照应用点位顺序排列,如:`{pipeline aug}-{train aug}-{loss trick}-{scheduler}-{epochs}` + +- `coslr-200e` : 使用 cosine scheduler, 训练 200 个 epoch +- `autoaug-mixup-lbs-coslr-50e` : 使用了 `autoaug`、`mixup`、`label smooth`、`cosine scheduler`, 训练了 50 个轮次 + +### 数据信息 + +- `in1k` : `ImageNet1k` 数据集,默认使用 `224x224` 大小的图片 +- `in21k` : `ImageNet21k` 数据集,有些地方也称为 `ImageNet22k` 数据集,默认使用 `224x224` 大小的图片 +- `in1k-384px` : 表示训练的输出图片大小为 `384x384` +- `cifar100` + +### 配置文件命名案例: + +``` +repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py +``` + +- `repvgg-D2se`: 算法信息 + - `repvgg`: 主要算法名称。 + - `D2se`: 模型的结构。 +- `deploy`:模块信息,该模型为推理状态。 +- `4xb64-autoaug-lbs-mixup-coslr-200e`: 训练信息 + - `4xb64`: 使用4块 GPU 并且 每块 GPU 的批大小为64。 + - `autoaug`: 使用 `AutoAugment` 数据增强方法。 + - `lbs`: 使用 `label smoothing` 损失函数。 + - `mixup`: 使用 `mixup` 训练增强方法。 + - `coslr`: 使用 `cosine scheduler` 优化策略。 + - `200e`: 训练 200 轮次。 +- `in1k`: 数据信息。 配置文件用于 `ImageNet1k` 数据集上使用 `224x224` 大小图片训练。 + +```{note} +部分配置文件目前还没有遵循此命名规范,相关文件命名近期会更新。 +``` + +### 权重命名规则 + +权重的命名主要包括配置文件名,日期和哈希值。 + +``` +{config_name}_{date}-{hash}.pth +``` + +## 配置文件结构 + +在 `configs/_base_` 文件夹下有 4 个基本组件类型,分别是: + +- [模型(model)](https://github.com/open-mmlab/mmclassification/tree/master/configs/_base_/models) +- [数据(data)](https://github.com/open-mmlab/mmclassification/tree/master/configs/_base_/datasets) +- [训练策略(schedule)](https://github.com/open-mmlab/mmclassification/tree/master/configs/_base_/schedules) +- [运行设置(runtime)](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/default_runtime.py) + +你可以通过继承一些基本配置文件轻松构建自己的训练配置文件。由来自`_base_` 的组件组成的配置称为 _primitive_。 + +为了帮助用户对 MMClassification 检测系统中的完整配置和模块有一个基本的了解,我们使用 [ResNet50 原始配置文件](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_8xb32_in1k.py) 作为案例进行说明并注释每一行含义。更详细的用法和各个模块对应的替代方案,请参考 API 文档。 + +```python +_base_ = [ + '../_base_/models/resnet50.py', # 模型 + '../_base_/datasets/imagenet_bs32.py', # 数据 + '../_base_/schedules/imagenet_bs256.py', # 训练策略 + '../_base_/default_runtime.py' # 默认运行设置 +] +``` + +下面对这四个部分分别进行说明,仍然以上述 ResNet50 原始配置文件作为案例。 + +### 模型 + +模型参数 `model` 在配置文件中为一个 `python` 字典,主要包括网络结构、损失函数等信息: + +- `type` : 分类器名称, 目前 MMClassification 只支持 `ImageClassifier`, 参考 [API 文档](https://mmclassification.readthedocs.io/zh_CN/latest/api/models.html#classifier)。 +- `backbone` : 主干网类型,可用选项参考 [API 文档](https://mmclassification.readthedocs.io/zh_CN/latest/api/models.html#backbones)。 +- `neck` : 颈网络类型,目前 MMClassification 只支持 `GlobalAveragePooling`, 参考 [API 文档](https://mmclassification.readthedocs.io/zh_CN/latest/api/models.html#necks)。 +- `head` : 头网络类型, 包括单标签分类与多标签分类头网络,可用选项参考 [API 文档](https://mmclassification.readthedocs.io/zh_CN/latest/api/models.html#heads)。 + - `loss` : 损失函数类型, 支持 `CrossEntropyLoss`, [`LabelSmoothLoss`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/models/resnet50_label_smooth.py) 等,可用选项参考 [API 文档](https://mmclassification.readthedocs.io/zh_CN/latest/api/models.html#losses)。 +- `train_cfg` :训练配置, 支持 [`mixup`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/models/resnet50_mixup.py), [`cutmix`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/models/resnet50_cutmix.py) 等训练增强。 + +```{note} +配置文件中的 'type' 不是构造时的参数,而是类名。 +``` + +```python +model = dict( + type='ImageClassifier', # 分类器类型 + backbone=dict( + type='ResNet', # 主干网络类型 + depth=50, # 主干网网络深度, ResNet 一般有18, 34, 50, 101, 152 可以选择 + num_stages=4, # 主干网络状态(stages)的数目,这些状态产生的特征图作为后续的 head 的输入。 + out_indices=(3, ), # 输出的特征图输出索引。越远离输入图像,索引越大 + frozen_stages=-1, # 网络微调时,冻结网络的stage(训练时不执行反相传播算法),若num_stages=4,backbone包含stem 与 4 个 stages。frozen_stages为-1时,不冻结网络; 为0时,冻结 stem; 为1时,冻结 stem 和 stage1; 为4时,冻结整个backbone + style='pytorch'), # 主干网络的风格,'pytorch' 意思是步长为2的层为 3x3 卷积, 'caffe' 意思是步长为2的层为 1x1 卷积。 + neck=dict(type='GlobalAveragePooling'), # 颈网络类型 + head=dict( + type='LinearClsHead', # 线性分类头, + num_classes=1000, # 输出类别数,这与数据集的类别数一致 + in_channels=2048, # 输入通道数,这与 neck 的输出通道一致 + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), # 损失函数配置信息 + topk=(1, 5), # 评估指标,Top-k 准确率, 这里为 top1 与 top5 准确率 + )) +``` + +### 数据 + +数据参数 `data` 在配置文件中为一个 `python` 字典,主要包含构造数据集加载器(dataloader)配置信息: + +- `samples_per_gpu` : 构建 dataloader 时,每个 GPU 的 Batch Size +- `workers_per_gpu` : 构建 dataloader 时,每个 GPU 的 线程数 +- `train | val | test` : 构造数据集 + - `type` : 数据集类型, MMClassification 支持 `ImageNet`、 `Cifar` 等 ,参考[API 文档](https://mmclassification.readthedocs.io/zh_CN/latest/api/datasets.html) + - `data_prefix` : 数据集根目录 + - `pipeline` : 数据处理流水线,参考相关教程文档 [如何设计数据处理流水线](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/data_pipeline.html) + +评估参数 `evaluation` 也是一个字典, 为 `evaluation hook` 的配置信息, 主要包括评估间隔、评估指标等。 + +```python +# dataset settings +dataset_type = 'ImageNet' # 数据集名称, +img_norm_cfg = dict( #图像归一化配置,用来归一化输入的图像。 + mean=[123.675, 116.28, 103.53], # 预训练里用于预训练主干网络模型的平均值。 + std=[58.395, 57.12, 57.375], # 预训练里用于预训练主干网络模型的标准差。 + to_rgb=True) # 是否反转通道,使用 cv2, mmcv 读取图片默认为 BGR 通道顺序,这里 Normalize 均值方差数组的数值是以 RGB 通道顺序, 因此需要反转通道顺序。 +# 训练数据流水线 +train_pipeline = [ + dict(type='LoadImageFromFile'), # 读取图片 + dict(type='RandomResizedCrop', size=224), # 随机缩放抠图 + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), # 以概率为0.5随机水平翻转图片 + dict(type='Normalize', **img_norm_cfg), # 归一化 + dict(type='ImageToTensor', keys=['img']), # image 转为 torch.Tensor + dict(type='ToTensor', keys=['gt_label']), # gt_label 转为 torch.Tensor + dict(type='Collect', keys=['img', 'gt_label']) # 决定数据中哪些键应该传递给检测器的流程 +] +# 测试数据流水线 +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) # test 时不传递 gt_label +] +data = dict( + samples_per_gpu=32, # 单个 GPU 的 Batch size + workers_per_gpu=2, # 单个 GPU 的 线程数 + train=dict( # 训练数据信息 + type=dataset_type, # 数据集名称 + data_prefix='data/imagenet/train', # 数据集目录,当不存在 ann_file 时,类别信息从文件夹自动获取 + pipeline=train_pipeline), # 数据集需要经过的 数据流水线 + val=dict( # 验证数据集信息 + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', # 标注文件路径,存在 ann_file 时,不通过文件夹自动获取类别信息 + pipeline=test_pipeline), + test=dict( # 测试数据集信息 + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict( # evaluation hook 的配置 + interval=1, # 验证期间的间隔,单位为 epoch 或者 iter, 取决于 runner 类型。 + metric='accuracy') # 验证期间使用的指标。 +``` + +### 训练策略 + +主要包含 优化器设置、 `optimizer hook` 设置、学习率策略和 `runner`设置: + +- `optimizer` : 优化器设置信息, 支持 `pytorch` 所有的优化器,参考相关 [mmcv](https://mmcv.readthedocs.io/zh_CN/latest/_modules/mmcv/runner/optimizer/default_constructor.html#DefaultOptimizerConstructor) 文档 +- `optimizer_config` : `optimizer hook` 的配置文件,如设置梯度限制,参考相关 [mmcv](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8) 代码 +- `lr_config` : 学习率策略,支持 "CosineAnnealing"、 "Step"、 "Cyclic" 等等,参考相关 [mmcv](https://mmcv.readthedocs.io/zh_CN/latest/_modules/mmcv/runner/hooks/lr_updater.html#LrUpdaterHook) 文档 +- `runner` : 有关 `runner` 可以参考 `mmcv` 对于 [`runner`](https://mmcv.readthedocs.io/zh_CN/latest/understand_mmcv/runner.html) 介绍文档 + +```python +# 用于构建优化器的配置文件。支持 PyTorch 中的所有优化器,同时它们的参数与 PyTorch 里的优化器参数一致。 +optimizer = dict(type='SGD', # 优化器类型 + lr=0.1, # 优化器的学习率,参数的使用细节请参照对应的 PyTorch 文档。 + momentum=0.9, # 动量(Momentum) + weight_decay=0.0001) # 权重衰减系数(weight decay)。 + # optimizer hook 的配置文件 +optimizer_config = dict(grad_clip=None) # 大多数方法不使用梯度限制(grad_clip)。 +# 学习率调整配置,用于注册 LrUpdater hook。 +lr_config = dict(policy='step', # 调度流程(scheduler)的策略,也支持 CosineAnnealing, Cyclic, 等。 + step=[30, 60, 90]) # 在 epoch 为 30, 60, 90 时, lr 进行衰减 +runner = dict(type='EpochBasedRunner', # 将使用的 runner 的类别,如 IterBasedRunner 或 EpochBasedRunner。 + max_epochs=100) # runner 总回合数, 对于 IterBasedRunner 使用 `max_iters` +``` + +### 运行设置 + +本部分主要包括保存权重策略、日志配置、训练参数、断点权重路径和工作目录等等。 + +```python +# Checkpoint hook 的配置文件。 +checkpoint_config = dict(interval=1) # 保存的间隔是 1,单位会根据 runner 不同变动,可以为 epoch 或者 iter。 +# 日志配置信息。 +log_config = dict( + interval=100, # 打印日志的间隔, 单位 iters + hooks=[ + dict(type='TextLoggerHook'), # 用于记录训练过程的文本记录器(logger)。 + # dict(type='TensorboardLoggerHook') # 同样支持 Tensorboard 日志 + ]) + +dist_params = dict(backend='nccl') # 用于设置分布式训练的参数,端口也同样可被设置。 +log_level = 'INFO' # 日志的输出级别。 +resume_from = None # 从给定路径里恢复检查点(checkpoints),训练模式将从检查点保存的轮次开始恢复训练。 +workflow = [('train', 1)] # runner 的工作流程,[('train', 1)] 表示只有一个工作流且工作流仅执行一次。 +work_dir = 'work_dir' # 用于保存当前实验的模型检查点和日志的目录文件地址。 +``` + +## 继承并修改配置文件 + +为了精简代码、更快的修改配置文件以及便于理解,我们建议继承现有方法。 + +对于在同一算法文件夹下的所有配置文件,MMClassification 推荐只存在 **一个** 对应的 _原始配置_ 文件。 +所有其他的配置文件都应该继承 _原始配置_ 文件,这样就能保证配置文件的最大继承深度为 3。 + +例如,如果在 ResNet 的基础上做了一些修改,用户首先可以通过指定 `_base_ = './resnet50_8xb32_in1k.py'`(相对于你的配置文件的路径),来继承基础的 ResNet 结构、数据集以及其他训练配置信息,然后修改配置文件中的必要参数以完成继承。如想在基础 resnet50 的基础上将训练轮数由 100 改为 300 和修改学习率衰减轮数,同时修改数据集路径,可以建立新的配置文件 `configs/resnet/resnet50_8xb32-300e_in1k.py`, 文件中写入以下内容: + +```python +_base_ = './resnet50_8xb32_in1k.py' + +runner = dict(max_epochs=300) +lr_config = dict(step=[150, 200, 250]) + +data = dict( + train=dict(data_prefix='mydata/imagenet/train'), + val=dict(data_prefix='mydata/imagenet/train', ), + test=dict(data_prefix='mydata/imagenet/train', ) +) +``` + +### 使用配置文件里的中间变量 + +用一些中间变量,中间变量让配置文件更加清晰,也更容易修改。 + +例如数据集里的 `train_pipeline` / `test_pipeline` 是作为数据流水线的中间变量。我们首先要定义 `train_pipeline` / `test_pipeline`,然后将它们传递到 `data` 中。如果想修改训练或测试时输入图片的大小,就需要修改 `train_pipeline` / `test_pipeline` 这些中间变量。 + +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=384, backend='pillow',), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=384, backend='pillow'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +``` + +### 忽略基础配置文件里的部分内容 + +有时,您需要设置 `_delete_=True` 去忽略基础配置文件里的一些域内容。 可以参照 [mmcv](https://mmcv.readthedocs.io/zh_CN/latest/understand_mmcv/config.html#inherit-from-base-config-with-ignored-fields) 来获得一些简单的指导。 + +以下是一个简单应用案例。 如果在上述 ResNet50 案例中 使用 cosine schedule ,使用继承并直接修改会报 `get unexcepected keyword 'step'` 错, 因为基础配置文件 lr_config 域信息的 `'step'` 字段被保留下来了,需要加入 `_delete_=True` 去忽略基础配置文件里的 `lr_config` 相关域内容: + +```python +_base_ = '../../configs/resnet/resnet50_8xb32_in1k.py' + +lr_config = dict( + _delete_=True, + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + by_epoch=True, + warmup_iters=5, + warmup_ratio=0.1 +) +``` + +### 引用基础配置文件里的变量 + +有时,您可以引用 `_base_` 配置信息的一些域内容,这样可以避免重复定义。 可以参照 [mmcv](https://mmcv.readthedocs.io/zh_CN/latest/understand_mmcv/config.html#reference-variables-from-base) 来获得一些简单的指导。 + +以下是一个简单应用案例,在训练数据预处理流水线中使用 auto augment 数据增强,参考配置文件 [`configs/_base_/datasets/imagenet_bs64_autoaug.py`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/datasets/imagenet_bs64_autoaug.py)。 在定义 `train_pipeline` 时,可以直接在 `_base_` 中加入定义 auto augment 数据增强的文件命名,再通过 `{{_base_.auto_increasing_policies}}` 引用变量: + +```python +_base_ = ['./pipelines/auto_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='AutoAugment', policies={{_base_.auto_increasing_policies}}), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [...] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict(..., pipeline=train_pipeline), + val=dict(..., pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') +``` + +## 通过命令行参数修改配置信息 + +当用户使用脚本 "tools/train.py" 或者 "tools/test.py" 提交任务,以及使用一些工具脚本时,可以通过指定 `--cfg-options` 参数来直接修改所使用的配置文件内容。 + +- 更新配置文件内的字典 + + 可以按照原始配置文件中字典的键的顺序指定配置选项。 + 例如,`--cfg-options model.backbone.norm_eval=False` 将主干网络中的所有 BN 模块更改为 `train` 模式。 + +- 更新配置文件内列表的键 + + 一些配置字典在配置文件中会形成一个列表。例如,训练流水线 `data.train.pipeline` 通常是一个列表。 + 例如,`[dict(type='LoadImageFromFile'), dict(type='TopDownRandomFlip', flip_prob=0.5), ...]` 。如果要将流水线中的 `'flip_prob=0.5'` 更改为 `'flip_prob=0.0'`,您可以这样指定 `--cfg-options data.train.pipeline.1.flip_prob=0.0` 。 + +- 更新列表/元组的值。 + + 当配置文件中需要更新的是一个列表或者元组,例如,配置文件通常会设置 `workflow=[('train', 1)]`,用户如果想更改, + 需要指定 `--cfg-options workflow="[(train,1),(val,1)]"`。注意这里的引号 " 对于列表以及元组数据类型的修改是必要的, + 并且 **不允许** 引号内所指定的值的书写存在空格。 + +## 导入用户自定义模块 + +```{note} +本部分仅在当将 MMClassification 当作库构建自己项目时可能用到,初学者可跳过。 +``` + +在学习完后续教程 [如何添加新数据集](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/new_dataset.html)、[如何设计数据处理流程](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/data_pipeline.html) 、[如何增加新模块](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/new_modules.html) 后,您可能使用 MMClassification 完成自己的项目并在项目中自定义了数据集、模型、数据增强等。为了精简代码,可以将 MMClassification 作为一个第三方库,只需要保留自己的额外的代码,并在配置文件中导入自定义的模块。案例可以参考 [OpenMMLab 算法大赛项目](https://github.com/zhangrui-wolf/openmmlab-competition-2021)。 + +只需要在你的配置文件中添加以下代码: + +```python +custom_imports = dict( + imports=['your_dataset_class', + 'your_transforme_class', + 'your_model_class', + 'your_module_class'], + allow_failed_imports=False) +``` + +## 常见问题 + +- 无 diff --git a/docs/zh_CN/tutorials/data_pipeline.md b/docs/zh_CN/tutorials/data_pipeline.md new file mode 100644 index 0000000..bbcf9d5 --- /dev/null +++ b/docs/zh_CN/tutorials/data_pipeline.md @@ -0,0 +1,148 @@ +# 教程 4:如何设计数据处理流程 + +## 设计数据流水线 + +按照典型的用法,我们通过 `Dataset` 和 `DataLoader` 来使用多个 worker 进行数据加 +载。对 `Dataset` 的索引操作将返回一个与模型的 `forward` 方法的参数相对应的字典。 + +数据流水线和数据集在这里是解耦的。通常,数据集定义如何处理标注文件,而数据流水 +线定义所有准备数据字典的步骤。流水线由一系列操作组成。每个操作都将一个字典作为 +输入,并输出一个字典。 + +这些操作分为数据加载,预处理和格式化。 + +这里使用 ResNet-50 在 ImageNet 数据集上的数据流水线作为示例。 + +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=256), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +``` + +对于每个操作,我们列出了添加、更新、删除的相关字典字段。在流水线的最后,我们使 +用 `Collect` 仅保留进行模型 `forward` 方法所需的项。 + +### 数据加载 + +`LoadImageFromFile` - 从文件中加载图像 + +- 添加:img, img_shape, ori_shape + +默认情况下,`LoadImageFromFile` 将会直接从硬盘加载图像,但对于一些效率较高、规 +模较小的模型,这可能会导致 IO 瓶颈。MMCV 支持多种数据加载后端来加速这一过程。例 +如,如果训练设备上配置了 [memcached](https://memcached.org/),那么我们按照如下 +方式修改配置文件。 + +``` +memcached_root = '/mnt/xxx/memcached_client/' +train_pipeline = [ + dict( + type='LoadImageFromFile', + file_client_args=dict( + backend='memcached', + server_list_cfg=osp.join(memcached_root, 'server_list.conf'), + client_cfg=osp.join(memcached_root, 'client.conf'))), +] +``` + +更多支持的数据加载后端,可以参见 [mmcv.fileio.FileClient](https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py)。 + +### 预处理 + +`Resize` - 缩放图像尺寸 + +- 添加:scale, scale_idx, pad_shape, scale_factor, keep_ratio +- 更新:img, img_shape + +`RandomFlip` - 随机翻转图像 + +- 添加:flip, flip_direction +- 更新:img + +`RandomCrop` - 随机裁剪图像 + +- 更新:img, pad_shape + +`Normalize` - 图像数据归一化 + +- 添加:img_norm_cfg +- 更新:img + +### 格式化 + +`ToTensor` - 转换(标签)数据至 `torch.Tensor` + +- 更新:根据参数 `keys` 指定 + +`ImageToTensor` - 转换图像数据至 `torch.Tensor` + +- 更新:根据参数 `keys` 指定 + +`Collect` - 保留指定键值 + +- 删除:除了参数 `keys` 指定以外的所有键值对 + +## 扩展及使用自定义流水线 + +1. 编写一个新的数据处理操作,并放置在 `mmcls/datasets/pipelines/` 目录下的任何 + 一个文件中,例如 `my_pipeline.py`。这个类需要重载 `__call__` 方法,接受一个 + 字典作为输入,并返回一个字典。 + + ```python + from mmcls.datasets import PIPELINES + + @PIPELINES.register_module() + class MyTransform(object): + + def __call__(self, results): + # 对 results['img'] 进行变换操作 + return results + ``` + +2. 在 `mmcls/datasets/pipelines/__init__.py` 中导入这个新的类。 + + ```python + ... + from .my_pipeline import MyTransform + + __all__ = [ + ..., 'MyTransform' + ] + ``` + +3. 在数据流水线的配置中添加这一操作。 + + ```python + img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='MyTransform'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) + ] + ``` + +## 流水线可视化 + +设计好数据流水线后,可以使用[可视化工具](../tools/visualization.md)查看具体的效果。 diff --git a/docs/zh_CN/tutorials/finetune.md b/docs/zh_CN/tutorials/finetune.md new file mode 100644 index 0000000..efaa88f --- /dev/null +++ b/docs/zh_CN/tutorials/finetune.md @@ -0,0 +1,222 @@ +# 教程 2:如何微调模型 + +已经证明,在 ImageNet 数据集上预先训练的分类模型对于其他数据集和其他下游任务有很好的效果。 + +该教程提供了如何将 [Model Zoo](https://github.com/open-mmlab/mmclassification/blob/master/docs/model_zoo.md) 中提供的预训练模型用于其他数据集,已获得更好的效果。 + +在新数据集上微调模型分为两步: + +- 按照 [教程 3:如何自定义数据集](new_dataset.md) 添加对新数据集的支持。 +- 按照本教程中讨论的内容修改配置文件 + +假设我们现在有一个在 ImageNet-2012 数据集上训练好的 ResNet-50 模型,并且希望在 +CIFAR-10 数据集上进行模型微调,我们需要修改配置文件中的五个部分。 + +## 继承基础配置 + +首先,创建一个新的配置文件 `configs/tutorial/resnet50_finetune_cifar.py` 来保存我们的配置,当然,这个文件名可以自由设定。 + +为了重用不同配置之间的通用部分,我们支持从多个现有配置中继承配置。要微调 +ResNet-50 模型,新配置需要继承 `_base_/models/resnet50.py` 来搭建模型的基本结构。 +为了使用 CIFAR10 数据集,新的配置文件可以直接继承 `_base_/datasets/cifar10.py`。 +而为了保留运行相关设置,比如训练调整器,新的配置文件需要继承 +`_base_/default_runtime.py`。 + +要继承以上这些配置文件,只需要把下面一段代码放在我们的配置文件开头。 + +```python +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/cifar10.py', '../_base_/default_runtime.py' +] +``` + +除此之外,你也可以不使用继承,直接编写完整的配置文件,例如 +[`configs/lenet/lenet5_mnist.py`](https://github.com/open-mmlab/mmclassification/blob/master/configs/lenet/lenet5_mnist.py)。 + +## 修改模型 + +在进行模型微调是,我们通常希望在主干网络(backbone)加载预训练模型,再用我们的数据集训练一个新的分类头(head)。 + +为了在主干网络加载预训练模型,我们需要修改主干网络的初始化设置,使用 +`Pretrained` 类型的初始化函数。另外,在初始化设置中,我们使用 +`prefix='backbone'` 来告诉初始化函数移除权重文件中键值名称的前缀,比如把 +`backbone.conv1` 变成 `conv1`。方便起见,我们这里使用一个在线的权重文件链接,它 +会在训练前自动下载对应的文件,你也可以提前下载这个模型,然后使用本地路径。 + +接下来,新的配置文件需要按照新数据集的类别数目来修改分类头的配置。只需要修改分 +类头中的 `num_classes` 设置即可。 + +```python +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) +``` + +```{tip} +这里我们只需要设定我们想要修改的部分配置,其他配置将会自动从我们的父配置文件中获取。 +``` + +另外,有时我们在进行微调时会希望冻结主干网络前面几层的参数,这么做有助于在后续 +训练中,保持网络从预训练权重中获得的提取低阶特征的能力。在 MMClassification 中, +这一功能可以通过简单的一个 `frozen_stages` 参数来实现。比如我们需要冻结前两层网 +络的参数,只需要在上面的配置中添加一行: + +```python +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) +``` + +```{note} +目前还不是所有的网络都支持 `frozen_stages` 参数,在使用之前,请先检查 +[文档](https://mmclassification.readthedocs.io/zh_CN/latest/api/models.html#backbones) +以确认你所使用的主干网络是否支持。 +``` + +## 修改数据集 + +当针对一个新的数据集进行微调时,我们通常都需要修改一些数据集相关的配置。比如这 +里,我们就需要把 CIFAR-10 数据集中的图像大小从 32 缩放到 224 来配合 ImageNet 上 +预训练模型的输入。这一需要可以通过修改数据集的预处理流水线(pipeline)来实现。 + +```python +img_norm_cfg = dict( + mean=[125.307, 122.961, 113.8575], + std=[51.5865, 50.847, 51.255], + to_rgb=False, +) +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']), +] +test_pipeline = [ + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) +``` + +## 修改训练策略设置 + +用于微调任务的超参数与默认配置不同,通常只需要较小的学习率和较少的训练时间。 + +```python +# 用于批大小为 128 的优化器学习率 +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# 学习率衰减策略 +lr_config = dict(policy='step', step=[15]) +runner = dict(type='EpochBasedRunner', max_epochs=200) +log_config = dict(interval=100) +``` + +## 开始训练 + +现在,我们完成了用于微调的配置文件,完整的文件如下: + +```python +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/cifar10_bs16.py', '../_base_/default_runtime.py' +] + +# 模型设置 +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) + +# 数据集设置 +img_norm_cfg = dict( + mean=[125.307, 122.961, 113.8575], + std=[51.5865, 50.847, 51.255], + to_rgb=False, +) +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']), +] +test_pipeline = [ + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) + +# 训练策略设置 +# 用于批大小为 128 的优化器学习率 +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# 学习率衰减策略 +lr_config = dict(policy='step', step=[15]) +runner = dict(type='EpochBasedRunner', max_epochs=200) +log_config = dict(interval=100) +``` + +接下来,我们使用一台 8 张 GPU 的电脑来训练我们的模型,指令如下: + +```shell +bash tools/dist_train.sh configs/tutorial/resnet50_finetune_cifar.py 8 +``` + +当然,我们也可以使用单张 GPU 来进行训练,使用如下命令: + +```shell +python tools/train.py configs/tutorial/resnet50_finetune_cifar.py +``` + +但是如果我们使用单张 GPU 进行训练的话,需要在数据集设置部分作如下修改: + +```python +data = dict( + samples_per_gpu=128, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) +``` + +这是因为我们的训练策略是针对批次大小(batch size)为 128 设置的。在父配置文件中, +设置了 `samples_per_gpu=16`,如果使用 8 张 GPU,总的批次大小就是 128。而如果使 +用单张 GPU,就必须手动修改 `samples_per_gpu=128` 来匹配训练策略。 diff --git a/docs/zh_CN/tutorials/new_dataset.md b/docs/zh_CN/tutorials/new_dataset.md new file mode 100644 index 0000000..86782a1 --- /dev/null +++ b/docs/zh_CN/tutorials/new_dataset.md @@ -0,0 +1,230 @@ +# 教程 3:如何自定义数据集 + +我们支持许多常用的图像分类领域公开数据集,你可以在 +[此页面](https://mmclassification.readthedocs.io/zh_CN/latest/api/datasets.html)中找到它们。 + +在本节中,我们将介绍如何[使用自己的数据集](#使用自己的数据集)以及如何[使用数据集包装](#使用数据集包装)。 + +## 使用自己的数据集 + +### 将数据集重新组织为已有格式 + +想要使用自己的数据集,最简单的方法就是将数据集转换为现有的数据集格式。 + +对于多分类任务,我们推荐使用 [`CustomDataset`](https://mmclassification.readthedocs.io/zh_CN/latest/api/datasets.html#mmcls.datasets.CustomDataset) 格式。 + +`CustomDataset` 支持两种类型的数据格式: + +1. 提供一个标注文件,其中每一行表示一张样本图片。 + + 样本图片可以以任意的结构进行组织,比如: + + ``` + train/ + ├── folder_1 + │ ├── xxx.png + │ ├── xxy.png + │ └── ... + ├── 123.png + ├── nsdf3.png + └── ... + ``` + + 而标注文件则记录了所有样本图片的文件路径以及相应的类别序号。其中第一列表示图像 + 相对于主目录(本例中为 `train` 目录)的路径,第二列表示类别序号: + + ``` + folder_1/xxx.png 0 + folder_1/xxy.png 1 + 123.png 1 + nsdf3.png 2 + ... + ``` + + ```{note} + 类别序号的值应当属于 `[0, num_classes - 1]` 范围。 + ``` + +2. 将所有样本文件按如下结构进行组织: + + ``` + train/ + ├── cat + │ ├── xxx.png + │ ├── xxy.png + │ └── ... + │ └── xxz.png + ├── bird + │ ├── bird1.png + │ ├── bird2.png + │ └── ... + └── dog + ├── 123.png + ├── nsdf3.png + ├── ... + └── asd932_.png + ``` + + 这种情况下,你不需要提供标注文件,所有位于 `cat` 目录下的图片文件都会被视为 `cat` 类别的样本。 + +通常而言,我们会将整个数据集分为三个子数据集:`train`,`val` 和 `test`,分别用于训练、验证和测试。**每一个**子 +数据集都需要被组织成如上的一种结构。 + +举个例子,完整的数据集结构如下所示(使用第一种组织结构): + +``` +mmclassification +└── data + └── my_dataset + ├── meta + │ ├── train.txt + │ ├── val.txt + │ └── test.txt + ├── train + ├── val + └── test +``` + +之后在你的配置文件中,可以修改其中的 `data` 字段为如下格式: + +```python +... +dataset_type = 'CustomDataset' +classes = ['cat', 'bird', 'dog'] # 数据集中各类别的名称 + +data = dict( + train=dict( + type=dataset_type, + data_prefix='data/my_dataset/train', + ann_file='data/my_dataset/meta/train.txt', + classes=classes, + pipeline=train_pipeline + ), + val=dict( + type=dataset_type, + data_prefix='data/my_dataset/val', + ann_file='data/my_dataset/meta/val.txt', + classes=classes, + pipeline=test_pipeline + ), + test=dict( + type=dataset_type, + data_prefix='data/my_dataset/test', + ann_file='data/my_dataset/meta/test.txt', + classes=classes, + pipeline=test_pipeline + ) +) +... +``` + +### 创建一个新的数据集类 + +用户可以编写一个继承自 `BasesDataset` 的新数据集类,并重载 `load_annotations(self)` 方法, +类似 [CIFAR10](https://github.com/open-mmlab/mmclassification/blob/master/mmcls/datasets/cifar.py) +和 [ImageNet](https://github.com/open-mmlab/mmclassification/blob/master/mmcls/datasets/imagenet.py)。 + +通常,此方法返回一个包含所有样本的列表,其中的每个样本都是一个字典。字典中包含了必要的数据信息,例如 `img` 和 `gt_label`。 + +假设我们将要实现一个 `Filelist` 数据集,该数据集将使用文件列表进行训练和测试。注释列表的格式如下: + +``` +000001.jpg 0 +000002.jpg 1 +``` + +我们可以在 `mmcls/datasets/filelist.py` 中创建一个新的数据集类以加载数据。 + +```python +import mmcv +import numpy as np + +from .builder import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module() +class Filelist(BaseDataset): + + def load_annotations(self): + assert isinstance(self.ann_file, str) + + data_infos = [] + with open(self.ann_file) as f: + samples = [x.strip().split(' ') for x in f.readlines()] + for filename, gt_label in samples: + info = {'img_prefix': self.data_prefix} + info['img_info'] = {'filename': filename} + info['gt_label'] = np.array(gt_label, dtype=np.int64) + data_infos.append(info) + return data_infos + +``` + +将新的数据集类加入到 `mmcls/datasets/__init__.py` 中: + +```python +from .base_dataset import BaseDataset +... +from .filelist import Filelist + +__all__ = [ + 'BaseDataset', ... ,'Filelist' +] +``` + +然后在配置文件中,为了使用 `Filelist`,用户可以按以下方式修改配置 + +```python +train = dict( + type='Filelist', + ann_file = 'image_list.txt', + pipeline=train_pipeline +) +``` + +## 使用数据集包装 + +数据集包装是一种可以改变数据集类行为的类,比如将数据集中的样本进行重复,或是将不同类别的数据进行再平衡。 + +### 重复数据集 + +我们使用 `RepeatDataset` 作为一个重复数据集的封装。举个例子,假设原始数据集是 `Dataset_A`,为了重复它,我们需要如下的配置文件: + +```python +data = dict( + train=dict( + type='RepeatDataset', + times=N, + dataset=dict( # 这里是 Dataset_A 的原始配置 + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) + ... +) +``` + +### 类别平衡数据集 + +我们使用 `ClassBalancedDataset` 作为根据类别频率对数据集进行重复采样的封装类。进行重复采样的数据集需要实现函数 `self.get_cat_ids(idx)` 以支持 `ClassBalancedDataset`。 + +举个例子,按照 `oversample_thr=1e-3` 对 `Dataset_A` 进行重复采样,需要如下的配置文件: + +```python +data = dict( + train = dict( + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( # 这里是 Dataset_A 的原始配置 + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) + ... +) +``` + +更加具体的细节,请参考 [API 文档](https://mmclassification.readthedocs.io/zh_CN/latest/api/datasets.html#mmcls.datasets.ClassBalancedDataset)。 diff --git a/docs/zh_CN/tutorials/new_modules.md b/docs/zh_CN/tutorials/new_modules.md new file mode 100644 index 0000000..14ee32c --- /dev/null +++ b/docs/zh_CN/tutorials/new_modules.md @@ -0,0 +1,280 @@ +# 教程 5:如何增加新模块 + +## 开发新组件 + +我们基本上将模型组件分为 3 种类型。 + +- 主干网络:通常是一个特征提取网络,例如 ResNet、MobileNet +- 颈部:用于连接主干网络和头部的组件,例如 GlobalAveragePooling +- 头部:用于执行特定任务的组件,例如分类和回归 + +### 添加新的主干网络 + +这里,我们以 ResNet_CIFAR 为例,展示了如何开发一个新的主干网络组件。 + +ResNet_CIFAR 针对 CIFAR 32x32 的图像输入,将 ResNet 中 `kernel_size=7, stride=2` 的设置替换为 `kernel_size=3, stride=1`,并移除了 stem 层之后的 +`MaxPooling`,以避免传递过小的特征图到残差块中。 + +它继承自 `ResNet` 并只修改了 stem 层。 + +1. 创建一个新文件 `mmcls/models/backbones/resnet_cifar.py`。 + +```python +import torch.nn as nn + +from ..builder import BACKBONES +from .resnet import ResNet + + +@BACKBONES.register_module() +class ResNet_CIFAR(ResNet): + + """ResNet backbone for CIFAR. + + (对这个主干网络的简短描述) + + Args: + depth(int): Network depth, from {18, 34, 50, 101, 152}. + ... + (参数文档) + """ + + def __init__(self, depth, deep_stem=False, **kwargs): + # 调用基类 ResNet 的初始化函数 + super(ResNet_CIFAR, self).__init__(depth, deep_stem=deep_stem **kwargs) + # 其他特殊的初始化流程 + assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + # 重载基类的方法,以实现对网络结构的修改 + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): # 需要返回一个元组 + pass # 此处省略了网络的前向实现 + + def init_weights(self, pretrained=None): + pass # 如果有必要的话,重载基类 ResNet 的参数初始化函数 + + def train(self, mode=True): + pass # 如果有必要的话,重载基类 ResNet 的训练状态函数 +``` + +2. 在 `mmcls/models/backbones/__init__.py` 中导入新模块 + +```python +... +from .resnet_cifar import ResNet_CIFAR + +__all__ = [ + ..., 'ResNet_CIFAR' +] +``` + +3. 在配置文件中使用新的主干网络 + +```python +model = dict( + ... + backbone=dict( + type='ResNet_CIFAR', + depth=18, + other_arg=xxx), + ... +``` + +### 添加新的颈部组件 + +这里我们以 `GlobalAveragePooling` 为例。这是一个非常简单的颈部组件,没有任何参数。 + +要添加新的颈部组件,我们主要需要实现 `forward` 函数,该函数对主干网络的输出进行 +一些操作并将结果传递到头部。 + +1. 创建一个新文件 `mmcls/models/necks/gap.py` + + ```python + import torch.nn as nn + + from ..builder import NECKS + + @NECKS.register_module() + class GlobalAveragePooling(nn.Module): + + def __init__(self): + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, inputs): + # 简单起见,我们默认输入是一个张量 + outs = self.gap(inputs) + outs = outs.view(inputs.size(0), -1) + return outs + ``` + +2. 在 `mmcls/models/necks/__init__.py` 中导入新模块 + + ```python + ... + from .gap import GlobalAveragePooling + + __all__ = [ + ..., 'GlobalAveragePooling' + ] + ``` + +3. 修改配置文件以使用新的颈部组件 + + ```python + model = dict( + neck=dict(type='GlobalAveragePooling'), + ) + ``` + +### 添加新的头部组件 + +在此,我们以 `LinearClsHead` 为例,说明如何开发新的头部组件。 + +要添加一个新的头部组件,基本上我们需要实现 `forward_train` 函数,它接受来自颈部 +或主干网络的特征图作为输入,并基于真实标签计算。 + +1. 创建一个文件 `mmcls/models/heads/linear_head.py`. + + ```python + from ..builder import HEADS + from .cls_head import ClsHead + + + @HEADS.register_module() + class LinearClsHead(ClsHead): + + def __init__(self, + num_classes, + in_channels, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, )): + super(LinearClsHead, self).__init__(loss=loss, topk=topk) + self.in_channels = in_channels + self.num_classes = num_classes + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self._init_layers() + + def _init_layers(self): + self.fc = nn.Linear(self.in_channels, self.num_classes) + + def init_weights(self): + normal_init(self.fc, mean=0, std=0.01, bias=0) + + def forward_train(self, x, gt_label): + cls_score = self.fc(x) + losses = self.loss(cls_score, gt_label) + return losses + + ``` + +2. 在 `mmcls/models/heads/__init__.py` 中导入这个模块 + + ```python + ... + from .linear_head import LinearClsHead + + __all__ = [ + ..., 'LinearClsHead' + ] + ``` + +3. 修改配置文件以使用新的头部组件。 + +连同 `GlobalAveragePooling` 颈部组件,完整的模型配置如下: + +```python +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) + +``` + +### 添加新的损失函数 + +要添加新的损失函数,我们主要需要在损失函数模块中 `forward` 函数。另外,利用装饰器 `weighted_loss` 可以方便的实现对每个元素的损失进行加权平均。 + +假设我们要模拟从另一个分类模型生成的概率分布,需要添加 `L1loss` 来实现该目的。 + +1. 创建一个新文件 `mmcls/models/losses/l1_loss.py` + + ```python + import torch + import torch.nn as nn + + from ..builder import LOSSES + from .utils import weighted_loss + + @weighted_loss + def l1_loss(pred, target): + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + + @LOSSES.register_module() + class L1Loss(nn.Module): + + def __init__(self, reduction='mean', loss_weight=1.0): + super(L1Loss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * l1_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss + ``` + +2. 在文件 `mmcls/models/losses/__init__.py` 中导入这个模块 + + ```python + ... + from .l1_loss import L1Loss, l1_loss + + __all__ = [ + ..., 'L1Loss', 'l1_loss' + ] + ``` + +3. 修改配置文件中的 `loss` 字段以使用新的损失函数 + + ```python + loss=dict(type='L1Loss', loss_weight=1.0)) + ``` diff --git a/docs/zh_CN/tutorials/runtime.md b/docs/zh_CN/tutorials/runtime.md new file mode 100644 index 0000000..0be7999 --- /dev/null +++ b/docs/zh_CN/tutorials/runtime.md @@ -0,0 +1,260 @@ +# 教程 7:如何自定义模型运行参数 + +在本教程中,我们将介绍如何在运行自定义模型时,进行自定义工作流和钩子的方法。 + + + +- [定制工作流](#定制工作流) +- [钩子](#钩子) + - [默认训练钩子](#默认训练钩子) + - [使用内置钩子](#使用内置钩子) + - [自定义钩子](#自定义钩子) +- [常见问题](#常见问题) + + + +## 定制工作流 + +工作流是一个形如 (任务名,周期数) 的列表,用于指定运行顺序和周期。这里“周期数”的单位由执行器的类型来决定。 + +比如在 MMClassification 中,我们默认使用基于**轮次**的执行器(`EpochBasedRunner`),那么“周期数”指的就是对应的任务在一个周期中 +要执行多少个轮次。通常,我们只希望执行训练任务,那么只需要使用以下设置: + +```python +workflow = [('train', 1)] +``` + +有时我们可能希望在训练过程中穿插检查模型在验证集上的一些指标(例如,损失,准确性)。 + +在这种情况下,可以将工作流程设置为: + +```python +[('train', 1), ('val', 1)] +``` + +这样一来,程序会一轮训练一轮测试地反复执行。 + +需要注意的是,默认情况下,我们并不推荐用这种方式来进行模型验证,而是推荐在训练中使用 **`EvalHook`** 进行模型验证。使用上述工作流的方式进行模型验证只是一个替代方案。 + +```{note} +1. 在验证周期时不会更新模型参数。 +2. 配置文件内的关键词 `max_epochs` 控制训练时期数,并且不会影响验证工作流程。 +3. 工作流 `[('train', 1), ('val', 1)]` 和 `[('train', 1)]` 不会改变 `EvalHook` 的行为。 + 因为 `EvalHook` 由 `after_train_epoch` 调用,而验证工作流只会影响 `after_val_epoch` 调用的钩子。 + 因此,`[('train', 1), ('val', 1)]` 和 ``[('train', 1)]`` 的区别在于,runner 在完成每一轮训练后,会计算验证集上的损失。 +``` + +## 钩子 + +钩子机制在 OpenMMLab 开源算法库中应用非常广泛,结合执行器可以实现对训练过程的整个生命周期进行管理,可以通过[相关文章](https://zhuanlan.zhihu.com/p/355272220)进一步理解钩子。 + +钩子只有在构造器中被注册才起作用,目前钩子主要分为两类: + +- 默认训练钩子 + +默认训练钩子由运行器默认注册,一般为一些基础型功能的钩子,已经有确定的优先级,一般不需要修改优先级。 + +- 定制钩子 + +定制钩子通过 `custom_hooks` 注册,一般为一些增强型功能的钩子,需要在配置文件中指定优先级,不指定该钩子的优先级将默被设定为 'NORMAL'。 + +**优先级列表** + +| Level | Value | +| :-------------: | :---: | +| HIGHEST | 0 | +| VERY_HIGH | 10 | +| HIGH | 30 | +| ABOVE_NORMAL | 40 | +| NORMAL(default) | 50 | +| BELOW_NORMAL | 60 | +| LOW | 70 | +| VERY_LOW | 90 | +| LOWEST | 100 | + +优先级确定钩子的执行顺序,每次训练前,日志会打印出各个阶段钩子的执行顺序,方便调试。 + +### 默认训练钩子 + +有一些常见的钩子未通过 `custom_hooks` 注册,但会在运行器(`Runner`)中默认注册,它们是: + +| Hooks | Priority | +| :-------------------: | :---------------: | +| `LrUpdaterHook` | VERY_HIGH (10) | +| `MomentumUpdaterHook` | HIGH (30) | +| `OptimizerHook` | ABOVE_NORMAL (40) | +| `CheckpointHook` | NORMAL (50) | +| `IterTimerHook` | LOW (70) | +| `EvalHook` | LOW (70) | +| `LoggerHook(s)` | VERY_LOW (90) | + +`OptimizerHook`,`MomentumUpdaterHook`和 `LrUpdaterHook` 在 [优化策略](./schedule.md) 部分进行了介绍, +`IterTimerHook` 用于记录所用时间,目前不支持修改; + +下面介绍如何使用去定制 `CheckpointHook`、`LoggerHooks` 以及 `EvalHook`。 + +#### 权重文件钩子(CheckpointHook) + +MMCV 的 runner 使用 `checkpoint_config` 来初始化 [`CheckpointHook`](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/hooks/checkpoint.py#L9)。 + +```python +checkpoint_config = dict(interval=1) +``` + +用户可以设置 “max_keep_ckpts” 来仅保存少量模型权重文件,或者通过 “save_optimizer” 决定是否存储优化器的状态字典。 +更多细节可参考 [这里](https://mmcv.readthedocs.io/zh_CN/latest/api.html#mmcv.runner.CheckpointHook)。 + +#### 日志钩子(LoggerHooks) + +`log_config` 包装了多个记录器钩子,并可以设置间隔。 +目前,MMCV 支持 `TextLoggerHook`、 `WandbLoggerHook`、`MlflowLoggerHook` 和 `TensorboardLoggerHook`。 +更多细节可参考[这里](https://mmcv.readthedocs.io/zh_CN/latest/api.html#mmcv.runner.LoggerHook)。 + +```python +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +``` + +#### 验证钩子(EvalHook) + +配置中的 `evaluation` 字段将用于初始化 [`EvalHook`](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/evaluation.py)。 + +`EvalHook` 有一些保留参数,如 `interval`,`save_best` 和 `start` 等。其他的参数,如“metrics”将被传递给 `dataset.evaluate()`。 + +```python +evaluation = dict(interval=1, metric='accuracy', metric_options={'topk': (1, )}) +``` + +我们可以通过参数 `save_best` 保存取得最好验证结果时的模型权重: + +```python +# "auto" 表示自动选择指标来进行模型的比较。也可以指定一个特定的 key 比如 "accuracy_top-1"。 +evaluation = dict(interval=1, save_best=True, metric='accuracy', metric_options={'topk': (1, )}) +``` + +在跑一些大型实验时,可以通过修改参数 `start` 跳过训练靠前轮次时的验证步骤,以节约时间。如下: + +```python +evaluation = dict(interval=1, start=200, metric='accuracy', metric_options={'topk': (1, )}) +``` + +表示在第 200 轮之前,只执行训练流程,不执行验证;从轮次 200 开始,在每一轮训练之后进行验证。 + +```{note} +在 MMClassification 的默认配置文件中,evaluation 字段一般被放在 datasets 基础配置文件中。 +``` + +### 使用内置钩子 + +一些钩子已在 MMCV 和 MMClassification 中实现: + +- [EMAHook](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/ema.py) +- [SyncBuffersHook](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/sync_buffer.py) +- [EmptyCacheHook](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/memory.py) +- [ProfilerHook](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/profiler.py) +- ...... + +可以直接修改配置以使用该钩子,如下格式: + +```python +custom_hooks = [ + dict(type='MMCVHook', a=a_value, b=b_value, priority='NORMAL') +] +``` + +例如使用 `EMAHook`,进行一次 EMA 的间隔是100个迭代: + +```python +custom_hooks = [ + dict(type='EMAHook', interval=100, priority='HIGH') +] +``` + +## 自定义钩子 + +### 创建一个新钩子 + +这里举一个在 MMClassification 中创建一个新钩子,并在训练中使用它的示例: + +```python +from mmcv.runner import HOOKS, Hook + + +@HOOKS.register_module() +class MyHook(Hook): + + def __init__(self, a, b): + pass + + def before_run(self, runner): + pass + + def after_run(self, runner): + pass + + def before_epoch(self, runner): + pass + + def after_epoch(self, runner): + pass + + def before_iter(self, runner): + pass + + def after_iter(self, runner): + pass +``` + +根据钩子的功能,用户需要指定钩子在训练的每个阶段将要执行的操作,比如 `before_run`,`after_run`,`before_epoch`,`after_epoch`,`before_iter` 和 `after_iter`。 + +### 注册新钩子 + +之后,需要导入 `MyHook`。假设该文件在 `mmcls/core/utils/my_hook.py`,有两种办法导入它: + +- 修改 `mmcls/core/utils/__init__.py` 进行导入 + + 新定义的模块应导入到 `mmcls/core/utils/__init__py` 中,以便注册器能找到并添加新模块: + +```python +from .my_hook import MyHook + +__all__ = ['MyHook'] +``` + +- 使用配置文件中的 `custom_imports` 变量手动导入 + +```python +custom_imports = dict(imports=['mmcls.core.utils.my_hook'], allow_failed_imports=False) +``` + +### 修改配置 + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value) +] +``` + +还可通过 `priority` 参数设置钩子优先级,如下所示: + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value, priority='NORMAL') +] +``` + +默认情况下,在注册过程中,钩子的优先级设置为“NORMAL”。 + +## 常见问题 + +### 1. resume_from, load_from,init_cfg.Pretrained 区别 + +- `load_from` :仅仅加载模型权重,主要用于加载预训练或者训练好的模型; + +- `resume_from` :不仅导入模型权重,还会导入优化器信息,当前轮次(epoch)信息,主要用于从断点继续训练。 + +- `init_cfg.Pretrained` :在权重初始化期间加载权重,您可以指定要加载的模块。 这通常在微调模型时使用,请参阅[教程 2:如何微调模型](./finetune.md) diff --git a/docs/zh_CN/tutorials/schedule.md b/docs/zh_CN/tutorials/schedule.md new file mode 100644 index 0000000..931edd0 --- /dev/null +++ b/docs/zh_CN/tutorials/schedule.md @@ -0,0 +1,333 @@ +# 教程 6:如何自定义优化策略 + +在本教程中,我们将介绍如何在运行自定义模型时,进行构造优化器、定制学习率及动量调整策略、梯度裁剪、梯度累计以及用户自定义优化方法等。 + + + +- [构造 PyTorch 内置优化器](#构造-pytorch-内置优化器) +- [定制学习率调整策略](#定制学习率调整策略) + - [学习率衰减曲线](#定制学习率衰减曲线) + - [学习率预热策略](#定制学习率预热策略) +- [定制动量调整策略](#定制动量调整策略) +- [参数化精细配置](#参数化精细配置) +- [梯度裁剪与梯度累计](#梯度裁剪与梯度累计) + - [梯度裁剪](#梯度裁剪) + - [梯度累计](#梯度累计) +- [用户自定义优化方法](#用户自定义优化方法) + - [自定义优化器](#自定义优化器) + - [自定义优化器构造器](#自定义优化器构造器) + + + +## 构造 PyTorch 内置优化器 + +MMClassification 支持 PyTorch 实现的所有优化器,仅需在配置文件中,指定 “optimizer” 字段。 +例如,如果要使用 “SGD”,则修改如下。 + +```python +optimizer = dict(type='SGD', lr=0.0003, weight_decay=0.0001) +``` + +要修改模型的学习率,只需要在优化器的配置中修改 `lr` 即可。 +要配置其他参数,可直接根据 [PyTorch API 文档](https://pytorch.org/docs/stable/optim.html?highlight=optim#module-torch.optim) 进行。 + +```{note} +配置文件中的 'type' 不是构造时的参数,而是 PyTorch 内置优化器的类名。 +``` + +例如,如果想使用 `Adam` 并设置参数为 `torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)`, +则需要进行如下修改 + +```python +optimizer = dict(type='Adam', lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) +``` + +## 定制学习率调整策略 + +### 定制学习率衰减曲线 + +深度学习研究中,广泛应用学习率衰减来提高网络的性能。要使用学习率衰减,可以在配置中设置 `lr_confg` 字段。 + +比如在默认的 ResNet 网络训练中,我们使用阶梯式的学习率衰减策略,配置文件为: + +```python +lr_config = dict(policy='step', step=[100, 150]) +``` + +在训练过程中,程序会周期性地调用 MMCV 中的 [`StepLRHook`](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L153) 来进行学习率更新。 + +此外,我们也支持其他学习率调整方法,如 `CosineAnnealing` 和 `Poly` 等。详情可见 [这里](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py) + +- ConsineAnnealing: + + ```python + lr_config = dict(policy='CosineAnnealing', min_lr_ratio=1e-5) + ``` + +- Poly: + + ```python + lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) + ``` + +### 定制学习率预热策略 + +在训练的早期阶段,网络容易不稳定,而学习率的预热就是为了减少这种不稳定性。通过预热,学习率将会从一个很小的值逐步提高到预定值。 + +在 MMClassification 中,我们同样使用 `lr_config` 配置学习率预热策略,主要的参数有以下几个: + +- `warmup` : 学习率预热曲线类别,必须为 'constant'、 'linear', 'exp' 或者 `None` 其一, 如果为 `None`, 则不使用学习率预热策略。 +- `warmup_by_epoch` : 是否以轮次(epoch)为单位进行预热。 +- `warmup_iters` : 预热的迭代次数,当 `warmup_by_epoch=True` 时,单位为轮次(epoch);当 `warmup_by_epoch=False` 时,单位为迭代次数(iter)。 +- `warmup_ratio` : 预测的初始学习率 `lr = lr * warmup_ratio`。 + +例如: + +1. 逐**迭代次数**地**线性**预热 + + ```python + lr_config = dict( + policy='CosineAnnealing', + by_epoch=False, + min_lr_ratio=1e-2, + warmup='linear', + warmup_ratio=1e-3, + warmup_iters=20 * 1252, + warmup_by_epoch=False) + ``` + +2. 逐**轮次**地**指数**预热 + + ```python + lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='exp', + warmup_iters=5, + warmup_ratio=0.1, + warmup_by_epoch=True) + ``` + +```{tip} +配置完成后,可以使用 MMClassification 提供的 [学习率可视化工具](https://mmclassification.readthedocs.io/zh_CN/latest/tools/visualization.html#id3) 画出对应学习率调整曲线。 +``` + +## 定制动量调整策略 + +MMClassification 支持动量调整器根据学习率修改模型的动量,从而使模型收敛更快。 + +动量调整程序通常与学习率调整器一起使用,例如,以下配置用于加速收敛。 +更多细节可参考 [CyclicLrUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L327) 和 [CyclicMomentumUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/momentum_updater.py#L130)。 + +这里是一个用例: + +```python +lr_config = dict( + policy='cyclic', + target_ratio=(10, 1e-4), + cyclic_times=1, + step_ratio_up=0.4, +) +momentum_config = dict( + policy='cyclic', + target_ratio=(0.85 / 0.95, 1), + cyclic_times=1, + step_ratio_up=0.4, +) +``` + +## 参数化精细配置 + +一些模型可能具有一些特定于参数的设置以进行优化,例如 BatchNorm 层不添加权重衰减或者对不同的网络层使用不同的学习率。 +在 MMClassification 中,我们通过 `optimizer` 的 `paramwise_cfg` 参数进行配置,可以参考[MMCV](https://mmcv.readthedocs.io/en/latest/_modules/mmcv/runner/optimizer/default_constructor.html#DefaultOptimizerConstructor)。 + +- 使用指定选项 + + MMClassification 提供了包括 `bias_lr_mult`、 `bias_decay_mult`、 `norm_decay_mult`、 `dwconv_decay_mult`、 `dcn_offset_lr_mult` 和 `bypass_duplicate` 选项,指定相关所有的 `bais`、 `norm`、 `dwconv`、 `dcn` 和 `bypass` 参数。例如令模型中所有的 BN 不进行参数衰减: + + ```python + optimizer = dict( + type='SGD', + lr=0.8, + weight_decay=1e-4, + paramwise_cfg=dict(norm_decay_mult=0.) + ) + ``` + +- 使用 `custom_keys` 指定参数 + + MMClassification 可通过 `custom_keys` 指定不同的参数使用不同的学习率或者权重衰减,例如对特定的参数不使用权重衰减: + + ```python + paramwise_cfg = dict( + custom_keys={ + 'backbone.cls_token': dict(decay_mult=0.0), + 'backbone.pos_embed': dict(decay_mult=0.0) + }) + + optimizer = dict( + type='SGD', + lr=0.8, + weight_decay=1e-4, + paramwise_cfg=paramwise_cfg) + ``` + + 对 backbone 使用更小的学习率与衰减系数: + + ```python + optimizer = dict( + type='SGD', + lr=0.8, + weight_decay=1e-4, + # backbone 的 'lr' and 'weight_decay' 分别为 0.1 * lr 和 0.9 * weight_decay + paramwise_cfg = dict(custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=0.9)})) + ``` + +## 梯度裁剪与梯度累计 + +除了 PyTorch 优化器的基本功能,我们还提供了一些对优化器的增强功能,例如梯度裁剪、梯度累计等,参考 [MMCV](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py)。 + +### 梯度裁剪 + +在训练过程中,损失函数可能接近于一些异常陡峭的区域,从而导致梯度爆炸。而梯度裁剪可以帮助稳定训练过程,更多介绍可以参见[该页面](https://paperswithcode.com/method/gradient-clipping)。 + +目前我们支持在 `optimizer_config` 字段中添加 `grad_clip` 参数来进行梯度裁剪,更详细的参数可参考 [PyTorch 文档](https://pytorch.org/docs/stable/generated/torch.nn.utils.clip_grad_norm_.html)。 + +用例如下: + +```python +# norm_type: 使用的范数类型,此处使用范数2。 +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +``` + +当使用继承并修改基础配置方式时,如果基础配置中 `grad_clip=None`,需要添加 `_delete_=True`。有关 `_delete_` 可以参考[教程 1:如何编写配置文件](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/config.html#id16)。案例如下: + +```python +_base_ = [./_base_/schedules/imagenet_bs256_coslr.py] + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2), _delete_=True, type='OptimizerHook') +# 当 type 为 'OptimizerHook',可以省略 type;其他情况下,此处必须指明 type='xxxOptimizerHook'。 +``` + +### 梯度累计 + +计算资源缺乏缺乏时,每个训练批次的大小(batch size)只能设置为较小的值,这可能会影响模型的性能。 + +可以使用梯度累计来规避这一问题。 + +用例如下: + +```python +data = dict(samples_per_gpu=64) +optimizer_config = dict(type="GradientCumulativeOptimizerHook", cumulative_iters=4) +``` + +表示训练时,每 4 个 iter 执行一次反向传播。由于此时单张 GPU 上的批次大小为 64,也就等价于单张 GPU 上一次迭代的批次大小为 256,也即: + +```python +data = dict(samples_per_gpu=256) +optimizer_config = dict(type="OptimizerHook") +``` + +```{note} +当在 `optimizer_config` 不指定优化器钩子类型时,默认使用 `OptimizerHook`。 +``` + +## 用户自定义优化方法 + +在学术研究和工业实践中,可能需要使用 MMClassification 未实现的优化方法,可以通过以下方法添加。 + +```{note} +本部分将修改 MMClassification 源码或者向 MMClassification 框架添加代码,初学者可跳过。 +``` + +### 自定义优化器 + +#### 1. 定义一个新的优化器 + +一个自定义的优化器可根据如下规则进行定制 + +假设我们想添加一个名为 `MyOptimzer` 的优化器,其拥有参数 `a`, `b` 和 `c`。 +可以创建一个名为 `mmcls/core/optimizer` 的文件夹,并在目录下的一个文件,如 `mmcls/core/optimizer/my_optimizer.py` 中实现该自定义优化器: + +```python +from mmcv.runner import OPTIMIZERS +from torch.optim import Optimizer + + +@OPTIMIZERS.register_module() +class MyOptimizer(Optimizer): + + def __init__(self, a, b, c): + +``` + +#### 2. 注册优化器 + +要注册上面定义的上述模块,首先需要将此模块导入到主命名空间中。有两种方法可以实现它。 + +- 修改 `mmcls/core/optimizer/__init__.py`,将其导入至 `optimizer` 包;再修改 `mmcls/core/__init__.py` 以导入 `optimizer` 包 + + 创建 `mmcls/core/optimizer/__init__.py` 文件。 + 新定义的模块应导入到 `mmcls/core/optimizer/__init__.py` 中,以便注册器能找到新模块并将其添加: + +```python +# 在 mmcls/core/optimizer/__init__.py 中 +from .my_optimizer import MyOptimizer # MyOptimizer 是我们自定义的优化器的名字 + +__all__ = ['MyOptimizer'] +``` + +```python +# 在 mmcls/core/__init__.py 中 +... +from .optimizer import * # noqa: F401, F403 +``` + +- 在配置中使用 `custom_imports` 手动导入 + +```python +custom_imports = dict(imports=['mmcls.core.optimizer.my_optimizer'], allow_failed_imports=False) +``` + +`mmcls.core.optimizer.my_optimizer` 模块将会在程序开始阶段被导入,`MyOptimizer` 类会随之自动被注册。 +注意,只有包含 `MyOptmizer` 类的包会被导入。`mmcls.core.optimizer.my_optimizer.MyOptimizer` **不会** 被直接导入。 + +#### 3. 在配置文件中指定优化器 + +之后,用户便可在配置文件的 `optimizer` 域中使用 `MyOptimizer`。 +在配置中,优化器由 “optimizer” 字段定义,如下所示: + +```python +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +``` + +要使用自定义的优化器,可以将该字段更改为 + +```python +optimizer = dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value) +``` + +### 自定义优化器构造器 + +某些模型可能具有一些特定于参数的设置以进行优化,例如 BatchNorm 层的权重衰减。 + +虽然我们的 `DefaultOptimizerConstructor` 已经提供了这些强大的功能,但可能仍然无法覆盖需求。 +此时我们可以通过自定义优化器构造函数来进行其他细粒度的参数调整。 + +```python +from mmcv.runner.optimizer import OPTIMIZER_BUILDERS + + +@OPTIMIZER_BUILDERS.register_module() +class MyOptimizerConstructor: + + def __init__(self, optimizer_cfg, paramwise_cfg=None): + pass + + def __call__(self, model): + ... # 在这里实现自己的优化器构造器。 + return my_optimizer +``` + +[这里](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/optimizer/default_constructor.py#L11)是我们默认的优化器构造器的实现,可以作为新优化器构造器实现的模板。 diff --git a/downstream/mmdetection/.circleci/config.yml b/downstream/mmdetection/.circleci/config.yml new file mode 100644 index 0000000..da7cd35 --- /dev/null +++ b/downstream/mmdetection/.circleci/config.yml @@ -0,0 +1,162 @@ +version: 2.1 + +jobs: + lint: + docker: + - image: cimg/python:3.7.4 + steps: + - checkout + - run: + name: Install pre-commit hook + command: | + pip install pre-commit + pre-commit install + - run: + name: Linting + command: pre-commit run --all-files + - run: + name: Check docstring coverage + command: | + pip install interrogate + interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 50 mmdet + + build_cpu: + parameters: + # The python version must match available image tags in + # https://circleci.com/developer/images/image/cimg/python + python: + type: string + default: "3.7.4" + torch: + type: string + torchvision: + type: string + docker: + - image: cimg/python:<< parameters.python >> + resource_class: large + steps: + - checkout + - run: + name: Install Libraries + command: | + sudo apt-get update + sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5 + - run: + name: Configure Python & pip + command: | + pip install --upgrade pip + pip install wheel + - run: + name: Install PyTorch + command: | + python -V + pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html + - when: + condition: + equal: [ "3.9.0", << parameters.python >> ] + steps: + - run: pip install "protobuf <= 3.20.1" && sudo apt-get update && sudo apt-get -y install libprotobuf-dev protobuf-compiler cmake + - run: + name: Install mmdet dependencies + command: | + pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch<< parameters.torch >>/index.html + pip install -r requirements/tests.txt -r requirements/optional.txt + pip install albumentations>=0.3.2 --no-binary imgaug,albumentations + pip install git+https://github.com/cocodataset/panopticapi.git + - run: + name: Build and install + command: | + pip install -e . + - run: + name: Run unittests + command: | + coverage run --branch --source mmdet -m pytest tests/ + coverage xml + coverage report -m + + build_cu101: + machine: + image: ubuntu-1604-cuda-10.1:201909-23 + resource_class: gpu.nvidia.small + steps: + - checkout + - run: + name: Install Libraries + command: | + sudo apt-get update + sudo apt-get install -y git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx + - run: + name: Configure Python & pip + command: | + pyenv global 3.7.0 + pip install --upgrade pip + pip install wheel + - run: + name: Install PyTorch + command: | + python -V + pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html + - run: + name: Install mmdet dependencies + # pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/torch${{matrix.torch_version}}/index.html + command: | + pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/torch1.6.0/index.html + pip install -r requirements/tests.txt -r requirements/optional.txt + pip install pycocotools + pip install albumentations>=0.3.2 --no-binary imgaug,albumentations + pip install git+https://github.com/cocodataset/panopticapi.git + python -c 'import mmcv; print(mmcv.__version__)' + - run: + name: Build and install + command: | + python setup.py check -m -s + TORCH_CUDA_ARCH_LIST=7.0 pip install -e . + - run: + name: Run unittests + command: | + pytest tests/ + +workflows: + unit_tests: + jobs: + - lint + - build_cpu: + name: build_cpu_th1.6 + torch: 1.6.0 + torchvision: 0.7.0 + requires: + - lint + - build_cpu: + name: build_cpu_th1.7 + torch: 1.7.0 + torchvision: 0.8.1 + requires: + - lint + - build_cpu: + name: build_cpu_th1.8_py3.9 + torch: 1.8.0 + torchvision: 0.9.0 + python: "3.9.0" + requires: + - lint + - build_cpu: + name: build_cpu_th1.9_py3.8 + torch: 1.9.0 + torchvision: 0.10.0 + python: "3.8.12" + requires: + - lint + - build_cpu: + name: build_cpu_th1.9_py3.9 + torch: 1.9.0 + torchvision: 0.10.0 + python: "3.9.0" + requires: + - lint + - build_cu101: + requires: + - build_cpu_th1.6 + - build_cpu_th1.7 + - build_cpu_th1.8_py3.9 + - build_cpu_th1.9_py3.8 + - build_cpu_th1.9_py3.9 diff --git a/downstream/mmdetection/.dev_scripts/batch_test_list.py b/downstream/mmdetection/.dev_scripts/batch_test_list.py new file mode 100644 index 0000000..1e74ce2 --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/batch_test_list.py @@ -0,0 +1,359 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# yapf: disable +atss = dict( + config='configs/atss/atss_r50_fpn_1x_coco.py', + checkpoint='atss_r50_fpn_1x_coco_20200209-985f7bd0.pth', + eval='bbox', + metric=dict(bbox_mAP=39.4), +) +autoassign = dict( + config='configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py', + checkpoint='auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth', + eval='bbox', + metric=dict(bbox_mAP=40.4), +) +carafe = dict( + config='configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py', + checkpoint='faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa + eval='bbox', + metric=dict(bbox_mAP=38.6), +) +cascade_rcnn = [ + dict( + config='configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py', + checkpoint='cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth', + eval='bbox', + metric=dict(bbox_mAP=40.3), + ), + dict( + config='configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py', + checkpoint='cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth', + eval=['bbox', 'segm'], + metric=dict(bbox_mAP=41.2, segm_mAP=35.9), + ), +] +cascade_rpn = dict( + config='configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py', + checkpoint='crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth', + eval='bbox', + metric=dict(bbox_mAP=40.4), +) +centripetalnet = dict( + config='configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py', # noqa + checkpoint='centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa + eval='bbox', + metric=dict(bbox_mAP=44.7), +) +cornernet = dict( + config='configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py', + checkpoint='cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa + eval='bbox', + metric=dict(bbox_mAP=41.2), +) +dcn = dict( + config='configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py', + checkpoint='faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth', + eval='bbox', + metric=dict(bbox_mAP=41.3), +) +deformable_detr = dict( + config='configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py', + checkpoint='deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa + eval='bbox', + metric=dict(bbox_mAP=44.5), +) +detectors = dict( + config='configs/detectors/detectors_htc_r50_1x_coco.py', + checkpoint='detectors_htc_r50_1x_coco-329b1453.pth', + eval=['bbox', 'segm'], + metric=dict(bbox_mAP=49.1, segm_mAP=42.6), +) +detr = dict( + config='configs/detr/detr_r50_8x2_150e_coco.py', + checkpoint='detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth', + eval='bbox', + metric=dict(bbox_mAP=40.1), +) +double_heads = dict( + config='configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py', + checkpoint='dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth', + eval='bbox', + metric=dict(bbox_mAP=40.0), +) +dynamic_rcnn = dict( + config='configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py', + checkpoint='dynamic_rcnn_r50_fpn_1x-62a3f276.pth', + eval='bbox', + metric=dict(bbox_mAP=38.9), +) +empirical_attention = dict( + config='configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py', # noqa + checkpoint='faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa + eval='bbox', + metric=dict(bbox_mAP=40.0), +) +faster_rcnn = dict( + config='configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py', + checkpoint='faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', + eval='bbox', + metric=dict(bbox_mAP=37.4), +) +fcos = dict( + config='configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py', # noqa + checkpoint='fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa + eval='bbox', + metric=dict(bbox_mAP=38.7), +) +foveabox = dict( + config='configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py', + checkpoint='fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth', + eval='bbox', + metric=dict(bbox_mAP=37.9), +) +free_anchor = dict( + config='configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py', + checkpoint='retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth', + eval='bbox', + metric=dict(bbox_mAP=38.7), +) +fsaf = dict( + config='configs/fsaf/fsaf_r50_fpn_1x_coco.py', + checkpoint='fsaf_r50_fpn_1x_coco-94ccc51f.pth', + eval='bbox', + metric=dict(bbox_mAP=37.4), +) +gcnet = dict( + config='configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py', # noqa + checkpoint='mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa + eval=['bbox', 'segm'], + metric=dict(bbox_mAP=40.4, segm_mAP=36.2), +) +gfl = dict( + config='configs/gfl/gfl_r50_fpn_1x_coco.py', + checkpoint='gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth', + eval='bbox', + metric=dict(bbox_mAP=40.2), +) +gn = dict( + config='configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py', + checkpoint='mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth', + eval=['bbox', 'segm'], + metric=dict(bbox_mAP=40.1, segm_mAP=36.4), +) +gn_ws = dict( + config='configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py', + checkpoint='faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth', + eval='bbox', + metric=dict(bbox_mAP=39.7), +) +grid_rcnn = dict( + config='configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py', + checkpoint='grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth', + eval='bbox', + metric=dict(bbox_mAP=40.4), +) +groie = dict( + config='configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py', + checkpoint='faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa + eval='bbox', + metric=dict(bbox_mAP=38.3), +) +guided_anchoring = [ + dict( + config='configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py', # noqa + checkpoint='ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth', + eval='bbox', + metric=dict(bbox_mAP=36.9), + ), + dict( + config='configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py', + checkpoint='ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth', # noqa + eval='bbox', + metric=dict(bbox_mAP=39.6), + ), +] +hrnet = dict( + config='configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py', + checkpoint='faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth', + eval='bbox', + metric=dict(bbox_mAP=36.9), +) +htc = dict( + config='configs/htc/htc_r50_fpn_1x_coco.py', + checkpoint='htc_r50_fpn_1x_coco_20200317-7332cf16.pth', + eval=['bbox', 'segm'], + metric=dict(bbox_mAP=42.3, segm_mAP=37.4), +) +libra_rcnn = dict( + config='configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py', + checkpoint='libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth', + eval='bbox', + metric=dict(bbox_mAP=38.3), +) +mask_rcnn = dict( + config='configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py', + checkpoint='mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth', + eval=['bbox', 'segm'], + metric=dict(bbox_mAP=38.2, segm_mAP=34.7), +) +ms_rcnn = dict( + config='configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py', + checkpoint='ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth', + eval=['bbox', 'segm'], + metric=dict(bbox_mAP=38.2, segm_mAP=36.0), +) +nas_fcos = dict( + config='configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py', # noqa + checkpoint='nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa + eval='bbox', + metric=dict(bbox_mAP=39.4), +) +nas_fpn = dict( + config='configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py', + checkpoint='retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth', + eval='bbox', + metric=dict(bbox_mAP=40.5), +) +paa = dict( + config='configs/paa/paa_r50_fpn_1x_coco.py', + checkpoint='paa_r50_fpn_1x_coco_20200821-936edec3.pth', + eval='bbox', + metric=dict(bbox_mAP=40.4), +) +pafpn = dict( + config='configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py', + checkpoint='faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa + eval='bbox', + metric=dict(bbox_mAP=37.5), +) +pisa = dict( + config='configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py', + checkpoint='pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth', + eval='bbox', + metric=dict(bbox_mAP=38.4), +) +point_rend = dict( + config='configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py', + checkpoint='point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth', + eval=['bbox', 'segm'], + metric=dict(bbox_mAP=38.4, segm_mAP=36.3), +) +regnet = dict( + config='configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py', + checkpoint='mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa + eval=['bbox', 'segm'], + metric=dict(bbox_mAP=40.4, segm_mAP=36.7), +) +reppoints = dict( + config='configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py', + checkpoint='reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth', + eval='bbox', + metric=dict(bbox_mAP=37.0), +) +res2net = dict( + config='configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py', + checkpoint='faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth', + eval='bbox', + metric=dict(bbox_mAP=43.0), +) +resnest = dict( + config='configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py', # noqa + checkpoint='faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa + eval='bbox', + metric=dict(bbox_mAP=42.0), +) +retinanet = dict( + config='configs/retinanet/retinanet_r50_fpn_1x_coco.py', + checkpoint='retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth', + eval='bbox', + metric=dict(bbox_mAP=36.5), +) +rpn = dict( + config='configs/rpn/rpn_r50_fpn_1x_coco.py', + checkpoint='rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth', + eval='proposal_fast', + metric=dict(AR_1000=58.2), +) +sabl = [ + dict( + config='configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py', + checkpoint='sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth', + eval='bbox', + metric=dict(bbox_mAP=37.7), + ), + dict( + config='configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py', + checkpoint='sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth', + eval='bbox', + metric=dict(bbox_mAP=39.9), + ), +] +scnet = dict( + config='configs/scnet/scnet_r50_fpn_1x_coco.py', + checkpoint='scnet_r50_fpn_1x_coco-c3f09857.pth', + eval='bbox', + metric=dict(bbox_mAP=43.5), +) +sparse_rcnn = dict( + config='configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py', + checkpoint='sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth', + eval='bbox', + metric=dict(bbox_mAP=37.9), +) +ssd = [ + dict( + config='configs/ssd/ssd300_coco.py', + checkpoint='ssd300_coco_20210803_015428-d231a06e.pth', + eval='bbox', + metric=dict(bbox_mAP=25.5), + ), + dict( + config='configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py', + checkpoint='ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth',# noqa + eval='bbox', + metric=dict(bbox_mAP=21.3), + ), +] +tridentnet = dict( + config='configs/tridentnet/tridentnet_r50_caffe_1x_coco.py', + checkpoint='tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth', + eval='bbox', + metric=dict(bbox_mAP=37.6), +) +vfnet = dict( + config='configs/vfnet/vfnet_r50_fpn_1x_coco.py', + checkpoint='vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth', + eval='bbox', + metric=dict(bbox_mAP=41.6), +) +yolact = dict( + config='configs/yolact/yolact_r50_1x8_coco.py', + checkpoint='yolact_r50_1x8_coco_20200908-f38d58df.pth', + eval=['bbox', 'segm'], + metric=dict(bbox_mAP=31.2, segm_mAP=29.0), +) +yolo = dict( + config='configs/yolo/yolov3_d53_320_273e_coco.py', + checkpoint='yolov3_d53_320_273e_coco-421362b6.pth', + eval='bbox', + metric=dict(bbox_mAP=27.9), +) +yolof = dict( + config='configs/yolof/yolof_r50_c5_8x8_1x_coco.py', + checkpoint='yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth', + eval='bbox', + metric=dict(bbox_mAP=37.5), +) +centernet = dict( + config='configs/centernet/centernet_resnet18_dcnv2_140e_coco.py', + checkpoint='centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa + eval='bbox', + metric=dict(bbox_mAP=29.5), +) +yolox = dict( + config='configs/yolox/yolox_tiny_8x8_300e_coco.py', + checkpoint='yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth', # noqa + eval='bbox', + metric=dict(bbox_mAP=31.5), +) +# yapf: enable diff --git a/downstream/mmdetection/.dev_scripts/batch_train_list.txt b/downstream/mmdetection/.dev_scripts/batch_train_list.txt new file mode 100644 index 0000000..a7004d7 --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/batch_train_list.txt @@ -0,0 +1,66 @@ +configs/atss/atss_r50_fpn_1x_coco.py +configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py +configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py +configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py +configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py +configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py +configs/detectors/detectors_htc_r50_1x_coco.py +configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py +configs/detr/detr_r50_8x2_150e_coco.py +configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py +configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py +configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py +configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py +configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py +configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py +configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py +configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py +configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py +configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py +configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py +configs/fsaf/fsaf_r50_fpn_1x_coco.py +configs/gfl/gfl_r50_fpn_1x_coco.py +configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py +configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py +configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py +configs/htc/htc_r50_fpn_1x_coco.py +configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py +configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py +configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py +configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py +configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py +configs/paa/paa_r50_fpn_1x_coco.py +configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py +configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py +configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py +configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py +configs/rpn/rpn_r50_fpn_1x_coco.py +configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py +configs/ssd/ssd300_coco.py +configs/tridentnet/tridentnet_r50_caffe_1x_coco.py +configs/vfnet/vfnet_r50_fpn_1x_coco.py +configs/yolact/yolact_r50_8x8_coco.py +configs/yolo/yolov3_d53_320_273e_coco.py +configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py +configs/scnet/scnet_r50_fpn_1x_coco.py +configs/yolof/yolof_r50_c5_8x8_1x_coco.py +configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py +configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py +configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py +configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py +configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py +configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py +configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py +configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py +configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py +configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py +configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py +configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py +configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py +configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py +configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py +configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py +configs/centernet/centernet_resnet18_dcnv2_140e_coco.py +configs/yolox/yolox_tiny_8x8_300e_coco.py +configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py +configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py diff --git a/downstream/mmdetection/.dev_scripts/benchmark_filter.py b/downstream/mmdetection/.dev_scripts/benchmark_filter.py new file mode 100644 index 0000000..178cd9c --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/benchmark_filter.py @@ -0,0 +1,167 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + + +def parse_args(): + parser = argparse.ArgumentParser(description='Filter configs to train') + parser.add_argument( + '--basic-arch', + action='store_true', + help='to train models in basic arch') + parser.add_argument( + '--datasets', action='store_true', help='to train models in dataset') + parser.add_argument( + '--data-pipeline', + action='store_true', + help='to train models related to data pipeline, e.g. augmentations') + parser.add_argument( + '--nn-module', + action='store_true', + help='to train models related to neural network modules') + parser.add_argument( + '--model-options', + nargs='+', + help='custom options to special model benchmark') + parser.add_argument( + '--out', + type=str, + default='batch_train_list.txt', + help='output path of gathered metrics to be stored') + args = parser.parse_args() + return args + + +basic_arch_root = [ + 'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet', + 'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads', + 'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor', + 'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld', + 'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa', + 'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet', + 'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet' +] + +datasets_root = [ + 'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion' +] + +data_pipeline_root = ['albu_example', 'instaboost'] + +nn_module_root = [ + 'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet', + 'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie' +] + +benchmark_pool = [ + 'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py', + 'configs/atss/atss_r50_fpn_1x_coco.py', + 'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py', + 'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py', + 'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py', + 'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py', + 'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py', + 'configs/centripetalnet/' + 'centripetalnet_hourglass104_mstest_16x6_210e_coco.py', + 'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py', + 'configs/cornernet/' + 'cornernet_hourglass104_mstest_8x6_210e_coco.py', + 'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py', + 'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py', + 'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py', + 'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py', + 'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py', + 'configs/detectors/detectors_htc_r50_1x_coco.py', + 'configs/detr/detr_r50_8x2_150e_coco.py', + 'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py', + 'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py', + 'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa + 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py', + 'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py', + 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py', + 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py', + 'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py', + 'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py', + 'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py', + 'configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py', + 'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py', + 'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py', + 'configs/fsaf/fsaf_r50_fpn_1x_coco.py', + 'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py', + 'configs/gfl/gfl_r50_fpn_1x_coco.py', + 'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py', + 'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py', + 'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py', + 'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py', + 'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py', + 'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py', + 'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py', + 'configs/htc/htc_r50_fpn_1x_coco.py', + 'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py', + 'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py', + 'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py', + 'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py', + 'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py', + 'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py', + 'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py', + 'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py', + 'configs/paa/paa_r50_fpn_1x_coco.py', + 'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py', + 'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py', + 'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py', + 'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py', + 'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py', + 'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py', + 'configs/resnest/' + 'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py', + 'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py', + 'configs/rpn/rpn_r50_fpn_1x_coco.py', + 'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py', + 'configs/ssd/ssd300_coco.py', + 'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py', + 'configs/vfnet/vfnet_r50_fpn_1x_coco.py', + 'configs/yolact/yolact_r50_1x8_coco.py', + 'configs/yolo/yolov3_d53_320_273e_coco.py', + 'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py', + 'configs/scnet/scnet_r50_fpn_1x_coco.py', + 'configs/yolof/yolof_r50_c5_8x8_1x_coco.py', +] + + +def main(): + args = parse_args() + + benchmark_type = [] + if args.basic_arch: + benchmark_type += basic_arch_root + if args.datasets: + benchmark_type += datasets_root + if args.data_pipeline: + benchmark_type += data_pipeline_root + if args.nn_module: + benchmark_type += nn_module_root + + special_model = args.model_options + if special_model is not None: + benchmark_type += special_model + + config_dpath = 'configs/' + benchmark_configs = [] + for cfg_root in benchmark_type: + cfg_dir = osp.join(config_dpath, cfg_root) + configs = os.scandir(cfg_dir) + for cfg in configs: + config_path = osp.join(cfg_dir, cfg.name) + if (config_path in benchmark_pool + and config_path not in benchmark_configs): + benchmark_configs.append(config_path) + + print(f'Totally found {len(benchmark_configs)} configs to benchmark') + with open(args.out, 'w') as f: + for config in benchmark_configs: + f.write(config + '\n') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/.dev_scripts/benchmark_inference_fps.py b/downstream/mmdetection/.dev_scripts/benchmark_inference_fps.py new file mode 100644 index 0000000..81dcd6b --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/benchmark_inference_fps.py @@ -0,0 +1,170 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + +import mmcv +from mmcv import Config, DictAction +from mmcv.runner import init_dist +from terminaltables import GithubFlavoredMarkdownTable + +from tools.analysis_tools.benchmark import repeat_measure_inference_speed + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet benchmark a model of FPS') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint_root', help='Checkpoint file root path') + parser.add_argument( + '--round-num', + type=int, + default=1, + help='round a number to a given precision in decimal digits') + parser.add_argument( + '--repeat-num', + type=int, + default=1, + help='number of repeat times of measurement for averaging the results') + parser.add_argument( + '--out', type=str, help='output path of gathered fps to be stored') + parser.add_argument( + '--max-iter', type=int, default=2000, help='num of max iter') + parser.add_argument( + '--log-interval', type=int, default=50, help='interval of logging') + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def results2markdown(result_dict): + table_data = [] + is_multiple_results = False + for cfg_name, value in result_dict.items(): + name = cfg_name.replace('configs/', '') + fps = value['fps'] + ms_times_pre_image = value['ms_times_pre_image'] + if isinstance(fps, list): + is_multiple_results = True + mean_fps = value['mean_fps'] + mean_times_pre_image = value['mean_times_pre_image'] + fps_str = ','.join([str(s) for s in fps]) + ms_times_pre_image_str = ','.join( + [str(s) for s in ms_times_pre_image]) + table_data.append([ + name, fps_str, mean_fps, ms_times_pre_image_str, + mean_times_pre_image + ]) + else: + table_data.append([name, fps, ms_times_pre_image]) + + if is_multiple_results: + table_data.insert(0, [ + 'model', 'fps', 'mean_fps', 'times_pre_image(ms)', + 'mean_times_pre_image(ms)' + ]) + + else: + table_data.insert(0, ['model', 'fps', 'times_pre_image(ms)']) + table = GithubFlavoredMarkdownTable(table_data) + print(table.table, flush=True) + + +if __name__ == '__main__': + args = parse_args() + assert args.round_num >= 0 + assert args.repeat_num >= 1 + + config = Config.fromfile(args.config) + + if args.launcher == 'none': + raise NotImplementedError('Only supports distributed mode') + else: + init_dist(args.launcher) + + result_dict = {} + for model_key in config: + model_infos = config[model_key] + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + record_metrics = model_info['metric'] + cfg_path = model_info['config'].strip() + cfg = Config.fromfile(cfg_path) + checkpoint = osp.join(args.checkpoint_root, + model_info['checkpoint'].strip()) + try: + fps = repeat_measure_inference_speed(cfg, checkpoint, + args.max_iter, + args.log_interval, + args.fuse_conv_bn, + args.repeat_num) + if args.repeat_num > 1: + fps_list = [round(fps_, args.round_num) for fps_ in fps] + times_pre_image_list = [ + round(1000 / fps_, args.round_num) for fps_ in fps + ] + mean_fps = round( + sum(fps_list) / len(fps_list), args.round_num) + mean_times_pre_image = round( + sum(times_pre_image_list) / len(times_pre_image_list), + args.round_num) + print( + f'{cfg_path} ' + f'Overall fps: {fps_list}[{mean_fps}] img / s, ' + f'times per image: ' + f'{times_pre_image_list}[{mean_times_pre_image}] ' + f'ms / img', + flush=True) + result_dict[cfg_path] = dict( + fps=fps_list, + mean_fps=mean_fps, + ms_times_pre_image=times_pre_image_list, + mean_times_pre_image=mean_times_pre_image) + else: + print( + f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, ' + f'times per image: {1000 / fps:.{args.round_num}f} ' + f'ms / img', + flush=True) + result_dict[cfg_path] = dict( + fps=round(fps, args.round_num), + ms_times_pre_image=round(1000 / fps, args.round_num)) + except Exception as e: + print(f'{cfg_path} error: {repr(e)}') + if args.repeat_num > 1: + result_dict[cfg_path] = dict( + fps=[0], + mean_fps=0, + ms_times_pre_image=[0], + mean_times_pre_image=0) + else: + result_dict[cfg_path] = dict(fps=0, ms_times_pre_image=0) + + if args.out: + mmcv.mkdir_or_exist(args.out) + mmcv.dump(result_dict, osp.join(args.out, 'batch_inference_fps.json')) + + results2markdown(result_dict) diff --git a/downstream/mmdetection/.dev_scripts/benchmark_test_image.py b/downstream/mmdetection/.dev_scripts/benchmark_test_image.py new file mode 100644 index 0000000..75f7576 --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/benchmark_test_image.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import os.path as osp +from argparse import ArgumentParser + +from mmcv import Config + +from mmdet.apis import inference_detector, init_detector, show_result_pyplot +from mmdet.utils import get_root_logger + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint_root', help='Checkpoint file root path') + parser.add_argument('--img', default='demo/demo.jpg', help='Image file') + parser.add_argument('--aug', action='store_true', help='aug test') + parser.add_argument('--model-name', help='model name to inference') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--wait-time', + type=float, + default=1, + help='the interval of show (s), 0 is block') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--score-thr', type=float, default=0.3, help='bbox score threshold') + args = parser.parse_args() + return args + + +def inference_model(config_name, checkpoint, args, logger=None): + cfg = Config.fromfile(config_name) + if args.aug: + if 'flip' in cfg.data.test.pipeline[1]: + cfg.data.test.pipeline[1].flip = True + else: + if logger is not None: + logger.error(f'{config_name}: unable to start aug test') + else: + print(f'{config_name}: unable to start aug test', flush=True) + + model = init_detector(cfg, checkpoint, device=args.device) + # test a single image + result = inference_detector(model, args.img) + + # show the results + if args.show: + show_result_pyplot( + model, + args.img, + result, + score_thr=args.score_thr, + wait_time=args.wait_time) + return result + + +# Sample test whether the inference code is correct +def main(args): + config = Config.fromfile(args.config) + + # test single model + if args.model_name: + if args.model_name in config: + model_infos = config[args.model_name] + if not isinstance(model_infos, list): + model_infos = [model_infos] + model_info = model_infos[0] + config_name = model_info['config'].strip() + print(f'processing: {config_name}', flush=True) + checkpoint = osp.join(args.checkpoint_root, + model_info['checkpoint'].strip()) + # build the model from a config file and a checkpoint file + inference_model(config_name, checkpoint, args) + return + else: + raise RuntimeError('model name input error.') + + # test all model + logger = get_root_logger( + log_file='benchmark_test_image.log', log_level=logging.ERROR) + + for model_key in config: + model_infos = config[model_key] + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + print('processing: ', model_info['config'], flush=True) + config_name = model_info['config'].strip() + checkpoint = osp.join(args.checkpoint_root, + model_info['checkpoint'].strip()) + try: + # build the model from a config file and a checkpoint file + inference_model(config_name, checkpoint, args, logger) + except Exception as e: + logger.error(f'{config_name} " : {repr(e)}') + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/downstream/mmdetection/.dev_scripts/check_links.py b/downstream/mmdetection/.dev_scripts/check_links.py new file mode 100755 index 0000000..b195d2a --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/check_links.py @@ -0,0 +1,157 @@ +# Modified from: +# https://github.com/allenai/allennlp/blob/main/scripts/check_links.py + +import argparse +import logging +import os +import pathlib +import re +import sys +from multiprocessing.dummy import Pool +from typing import NamedTuple, Optional, Tuple + +import requests +from mmcv.utils import get_logger + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Goes through all the inline-links ' + 'in markdown files and reports the breakages') + parser.add_argument( + '--num-threads', + type=int, + default=100, + help='Number of processes to confirm the link') + parser.add_argument('--https-proxy', type=str, help='https proxy') + parser.add_argument( + '--out', + type=str, + default='link_reports.txt', + help='output path of reports') + args = parser.parse_args() + return args + + +OK_STATUS_CODES = ( + 200, + 401, # the resource exists but may require some sort of login. + 403, # ^ same + 405, # HEAD method not allowed. + # the resource exists, but our default 'Accept-' header may not + # match what the server can provide. + 406, +) + + +class MatchTuple(NamedTuple): + source: str + name: str + link: str + + +def check_link( + match_tuple: MatchTuple, + http_session: requests.Session, + logger: logging = None) -> Tuple[MatchTuple, bool, Optional[str]]: + reason: Optional[str] = None + if match_tuple.link.startswith('http'): + result_ok, reason = check_url(match_tuple, http_session) + else: + result_ok = check_path(match_tuple) + if logger is None: + print(f" {'✓' if result_ok else '✗'} {match_tuple.link}") + else: + logger.info(f" {'✓' if result_ok else '✗'} {match_tuple.link}") + return match_tuple, result_ok, reason + + +def check_url(match_tuple: MatchTuple, + http_session: requests.Session) -> Tuple[bool, str]: + """Check if a URL is reachable.""" + try: + result = http_session.head( + match_tuple.link, timeout=5, allow_redirects=True) + return ( + result.ok or result.status_code in OK_STATUS_CODES, + f'status code = {result.status_code}', + ) + except (requests.ConnectionError, requests.Timeout): + return False, 'connection error' + + +def check_path(match_tuple: MatchTuple) -> bool: + """Check if a file in this repository exists.""" + relative_path = match_tuple.link.split('#')[0] + full_path = os.path.join( + os.path.dirname(str(match_tuple.source)), relative_path) + return os.path.exists(full_path) + + +def main(): + args = parse_args() + + # setup logger + logger = get_logger(name='mmdet', log_file=args.out) + + # setup https_proxy + if args.https_proxy: + os.environ['https_proxy'] = args.https_proxy + + # setup http_session + http_session = requests.Session() + for resource_prefix in ('http://', 'https://'): + http_session.mount( + resource_prefix, + requests.adapters.HTTPAdapter( + max_retries=5, + pool_connections=20, + pool_maxsize=args.num_threads), + ) + + logger.info('Finding all markdown files in the current directory...') + + project_root = (pathlib.Path(__file__).parent / '..').resolve() + markdown_files = project_root.glob('**/*.md') + + all_matches = set() + url_regex = re.compile(r'\[([^!][^\]]+)\]\(([^)(]+)\)') + for markdown_file in markdown_files: + with open(markdown_file) as handle: + for line in handle.readlines(): + matches = url_regex.findall(line) + for name, link in matches: + if 'localhost' not in link: + all_matches.add( + MatchTuple( + source=str(markdown_file), + name=name, + link=link)) + + logger.info(f' {len(all_matches)} markdown files found') + logger.info('Checking to make sure we can retrieve each link...') + + with Pool(processes=args.num_threads) as pool: + results = pool.starmap(check_link, [(match, http_session, logger) + for match in list(all_matches)]) + + # collect unreachable results + unreachable_results = [(match_tuple, reason) + for match_tuple, success, reason in results + if not success] + + if unreachable_results: + logger.info('================================================') + logger.info(f'Unreachable links ({len(unreachable_results)}):') + for match_tuple, reason in unreachable_results: + logger.info(' > Source: ' + match_tuple.source) + logger.info(' Name: ' + match_tuple.name) + logger.info(' Link: ' + match_tuple.link) + if reason is not None: + logger.info(' Reason: ' + reason) + sys.exit(1) + logger.info('No Unreachable link found.') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/.dev_scripts/convert_test_benchmark_script.py b/downstream/mmdetection/.dev_scripts/convert_test_benchmark_script.py new file mode 100644 index 0000000..c31cad4 --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/convert_test_benchmark_script.py @@ -0,0 +1,119 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + +from mmcv import Config + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert benchmark model list to script') + parser.add_argument('config', help='test config file path') + parser.add_argument('--port', type=int, default=29666, help='dist port') + parser.add_argument( + '--work-dir', + default='tools/batch_test', + help='the dir to save metric') + parser.add_argument( + '--run', action='store_true', help='run script directly') + parser.add_argument( + '--out', type=str, help='path to save model benchmark script') + + args = parser.parse_args() + return args + + +def process_model_info(model_info, work_dir): + config = model_info['config'].strip() + fname, _ = osp.splitext(osp.basename(config)) + job_name = fname + work_dir = osp.join(work_dir, fname) + checkpoint = model_info['checkpoint'].strip() + if not isinstance(model_info['eval'], list): + evals = [model_info['eval']] + else: + evals = model_info['eval'] + eval = ' '.join(evals) + return dict( + config=config, + job_name=job_name, + work_dir=work_dir, + checkpoint=checkpoint, + eval=eval) + + +def create_test_bash_info(commands, model_test_dict, port, script_name, + partition): + config = model_test_dict['config'] + job_name = model_test_dict['job_name'] + checkpoint = model_test_dict['checkpoint'] + work_dir = model_test_dict['work_dir'] + eval = model_test_dict['eval'] + + echo_info = f' \necho \'{config}\' &' + commands.append(echo_info) + commands.append('\n') + + command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \ + f'CPUS_PER_TASK=2 {script_name} ' + + command_info += f'{partition} ' + command_info += f'{job_name} ' + command_info += f'{config} ' + command_info += f'$CHECKPOINT_DIR/{checkpoint} ' + command_info += f'--work-dir {work_dir} ' + + command_info += f'--eval {eval} ' + command_info += f'--cfg-option dist_params.port={port} ' + command_info += ' &' + + commands.append(command_info) + + +def main(): + args = parse_args() + if args.out: + out_suffix = args.out.split('.')[-1] + assert args.out.endswith('.sh'), \ + f'Expected out file path suffix is .sh, but get .{out_suffix}' + assert args.out or args.run, \ + ('Please specify at least one operation (save/run/ the ' + 'script) with the argument "--out" or "--run"') + + commands = [] + partition_name = 'PARTITION=$1 ' + commands.append(partition_name) + commands.append('\n') + + checkpoint_root = 'CHECKPOINT_DIR=$2 ' + commands.append(checkpoint_root) + commands.append('\n') + + script_name = osp.join('tools', 'slurm_test.sh') + port = args.port + work_dir = args.work_dir + + cfg = Config.fromfile(args.config) + + for model_key in cfg: + model_infos = cfg[model_key] + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + print('processing: ', model_info['config']) + model_test_dict = process_model_info(model_info, work_dir) + create_test_bash_info(commands, model_test_dict, port, script_name, + '$PARTITION') + port += 1 + + command_str = ''.join(commands) + if args.out: + with open(args.out, 'w') as f: + f.write(command_str) + if args.run: + os.system(command_str) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/.dev_scripts/convert_train_benchmark_script.py b/downstream/mmdetection/.dev_scripts/convert_train_benchmark_script.py new file mode 100644 index 0000000..1ccd8e9 --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/convert_train_benchmark_script.py @@ -0,0 +1,99 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert benchmark model json to script') + parser.add_argument( + 'txt_path', type=str, help='txt path output by benchmark_filter') + parser.add_argument( + '--partition', + type=str, + default='openmmlab', + help='slurm partition name') + parser.add_argument( + '--max-keep-ckpts', + type=int, + default=1, + help='The maximum checkpoints to keep') + parser.add_argument( + '--run', action='store_true', help='run script directly') + parser.add_argument( + '--out', type=str, help='path to save model benchmark script') + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + if args.out: + out_suffix = args.out.split('.')[-1] + assert args.out.endswith('.sh'), \ + f'Expected out file path suffix is .sh, but get .{out_suffix}' + assert args.out or args.run, \ + ('Please specify at least one operation (save/run/ the ' + 'script) with the argument "--out" or "--run"') + + partition = args.partition # cluster name + + root_name = './tools' + train_script_name = osp.join(root_name, 'slurm_train.sh') + # stdout is no output + stdout_cfg = '>/dev/null' + + max_keep_ckpts = args.max_keep_ckpts + + commands = [] + with open(args.txt_path, 'r') as f: + model_cfgs = f.readlines() + for i, cfg in enumerate(model_cfgs): + cfg = cfg.strip() + if len(cfg) == 0: + continue + # print cfg name + echo_info = f'echo \'{cfg}\' &' + commands.append(echo_info) + commands.append('\n') + + fname, _ = osp.splitext(osp.basename(cfg)) + out_fname = osp.join(root_name, 'work_dir', fname) + # default setting + if cfg.find('16x') >= 0: + command_info = f'GPUS=16 GPUS_PER_NODE=8 ' \ + f'CPUS_PER_TASK=2 {train_script_name} ' + elif cfg.find('gn-head_4x4_1x_coco.py') >= 0 or \ + cfg.find('gn-head_4x4_2x_coco.py') >= 0: + command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \ + f'CPUS_PER_TASK=2 {train_script_name} ' + else: + command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \ + f'CPUS_PER_TASK=2 {train_script_name} ' + command_info += f'{partition} ' + command_info += f'{fname} ' + command_info += f'{cfg} ' + command_info += f'{out_fname} ' + if max_keep_ckpts: + command_info += f'--cfg-options ' \ + f'checkpoint_config.max_keep_ckpts=' \ + f'{max_keep_ckpts}' + ' ' + command_info += f'{stdout_cfg} &' + + commands.append(command_info) + + if i < len(model_cfgs): + commands.append('\n') + + command_str = ''.join(commands) + if args.out: + with open(args.out, 'w') as f: + f.write(command_str) + if args.run: + os.system(command_str) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/.dev_scripts/gather_models.py b/downstream/mmdetection/.dev_scripts/gather_models.py new file mode 100644 index 0000000..42e615c --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/gather_models.py @@ -0,0 +1,340 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import json +import os.path as osp +import shutil +import subprocess +from collections import OrderedDict + +import mmcv +import torch +import yaml + + +def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds): + + class OrderedDumper(Dumper): + pass + + def _dict_representer(dumper, data): + return dumper.represent_mapping( + yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items()) + + OrderedDumper.add_representer(OrderedDict, _dict_representer) + return yaml.dump(data, stream, OrderedDumper, **kwds) + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + + # remove ema state_dict + for key in list(checkpoint['state_dict']): + if key.startswith('ema_'): + checkpoint['state_dict'].pop(key) + + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + if torch.__version__ >= '1.6': + torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) + else: + torch.save(checkpoint, out_file) + sha = subprocess.check_output(['sha256sum', out_file]).decode() + final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) + subprocess.Popen(['mv', out_file, final_file]) + return final_file + + +def is_by_epoch(config): + cfg = mmcv.Config.fromfile('./configs/' + config) + return cfg.runner.type == 'EpochBasedRunner' + + +def get_final_epoch_or_iter(config): + cfg = mmcv.Config.fromfile('./configs/' + config) + if cfg.runner.type == 'EpochBasedRunner': + return cfg.runner.max_epochs + else: + return cfg.runner.max_iters + + +def get_best_epoch_or_iter(exp_dir): + best_epoch_iter_full_path = list( + sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1] + best_epoch_or_iter_model_path = best_epoch_iter_full_path.split('/')[-1] + best_epoch_or_iter = best_epoch_or_iter_model_path.\ + split('_')[-1].split('.')[0] + return best_epoch_or_iter_model_path, int(best_epoch_or_iter) + + +def get_real_epoch_or_iter(config): + cfg = mmcv.Config.fromfile('./configs/' + config) + if cfg.runner.type == 'EpochBasedRunner': + epoch = cfg.runner.max_epochs + if cfg.data.train.type == 'RepeatDataset': + epoch *= cfg.data.train.times + return epoch + else: + return cfg.runner.max_iters + + +def get_final_results(log_json_path, + epoch_or_iter, + results_lut, + by_epoch=True): + result_dict = dict() + last_val_line = None + last_train_line = None + last_val_line_idx = -1 + last_train_line_idx = -1 + with open(log_json_path, 'r') as f: + for i, line in enumerate(f.readlines()): + log_line = json.loads(line) + if 'mode' not in log_line.keys(): + continue + + if by_epoch: + if (log_line['mode'] == 'train' + and log_line['epoch'] == epoch_or_iter): + result_dict['memory'] = log_line['memory'] + + if (log_line['mode'] == 'val' + and log_line['epoch'] == epoch_or_iter): + result_dict.update({ + key: log_line[key] + for key in results_lut if key in log_line + }) + return result_dict + else: + if log_line['mode'] == 'train': + last_train_line_idx = i + last_train_line = log_line + + if log_line and log_line['mode'] == 'val': + last_val_line_idx = i + last_val_line = log_line + + # bug: max_iters = 768, last_train_line['iter'] = 750 + assert last_val_line_idx == last_train_line_idx + 1, \ + 'Log file is incomplete' + result_dict['memory'] = last_train_line['memory'] + result_dict.update({ + key: last_val_line[key] + for key in results_lut if key in last_val_line + }) + + return result_dict + + +def get_dataset_name(config): + # If there are more dataset, add here. + name_map = dict( + CityscapesDataset='Cityscapes', + CocoDataset='COCO', + CocoPanopticDataset='COCO', + DeepFashionDataset='Deep Fashion', + LVISV05Dataset='LVIS v0.5', + LVISV1Dataset='LVIS v1', + VOCDataset='Pascal VOC', + WIDERFaceDataset='WIDER Face', + OpenImagesDataset='OpenImagesDataset', + OpenImagesChallengeDataset='OpenImagesChallengeDataset') + cfg = mmcv.Config.fromfile('./configs/' + config) + return name_map[cfg.dataset_type] + + +def convert_model_info_to_pwc(model_infos): + pwc_files = {} + for model in model_infos: + cfg_folder_name = osp.split(model['config'])[-2] + pwc_model_info = OrderedDict() + pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0] + pwc_model_info['In Collection'] = 'Please fill in Collection name' + pwc_model_info['Config'] = osp.join('configs', model['config']) + + # get metadata + memory = round(model['results']['memory'] / 1024, 1) + meta_data = OrderedDict() + meta_data['Training Memory (GB)'] = memory + if 'epochs' in model: + meta_data['Epochs'] = get_real_epoch_or_iter(model['config']) + else: + meta_data['Iterations'] = get_real_epoch_or_iter(model['config']) + pwc_model_info['Metadata'] = meta_data + + # get dataset name + dataset_name = get_dataset_name(model['config']) + + # get results + results = [] + # if there are more metrics, add here. + if 'bbox_mAP' in model['results']: + metric = round(model['results']['bbox_mAP'] * 100, 1) + results.append( + OrderedDict( + Task='Object Detection', + Dataset=dataset_name, + Metrics={'box AP': metric})) + if 'segm_mAP' in model['results']: + metric = round(model['results']['segm_mAP'] * 100, 1) + results.append( + OrderedDict( + Task='Instance Segmentation', + Dataset=dataset_name, + Metrics={'mask AP': metric})) + if 'PQ' in model['results']: + metric = round(model['results']['PQ'], 1) + results.append( + OrderedDict( + Task='Panoptic Segmentation', + Dataset=dataset_name, + Metrics={'PQ': metric})) + pwc_model_info['Results'] = results + + link_string = 'https://download.openmmlab.com/mmdetection/v2.0/' + link_string += '{}/{}'.format(model['config'].rstrip('.py'), + osp.split(model['model_path'])[-1]) + pwc_model_info['Weights'] = link_string + if cfg_folder_name in pwc_files: + pwc_files[cfg_folder_name].append(pwc_model_info) + else: + pwc_files[cfg_folder_name] = [pwc_model_info] + return pwc_files + + +def parse_args(): + parser = argparse.ArgumentParser(description='Gather benchmarked models') + parser.add_argument( + 'root', + type=str, + help='root path of benchmarked models to be gathered') + parser.add_argument( + 'out', type=str, help='output path of gathered models to be stored') + parser.add_argument( + '--best', + action='store_true', + help='whether to gather the best model.') + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + models_root = args.root + models_out = args.out + mmcv.mkdir_or_exist(models_out) + + # find all models in the root directory to be gathered + raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True)) + + # filter configs that is not trained in the experiments dir + used_configs = [] + for raw_config in raw_configs: + if osp.exists(osp.join(models_root, raw_config)): + used_configs.append(raw_config) + print(f'Find {len(used_configs)} models to be gathered') + + # find final_ckpt and log file for trained each config + # and parse the best performance + model_infos = [] + for used_config in used_configs: + exp_dir = osp.join(models_root, used_config) + by_epoch = is_by_epoch(used_config) + # check whether the exps is finished + if args.best is True: + final_model, final_epoch_or_iter = get_best_epoch_or_iter(exp_dir) + else: + final_epoch_or_iter = get_final_epoch_or_iter(used_config) + final_model = '{}_{}.pth'.format('epoch' if by_epoch else 'iter', + final_epoch_or_iter) + + model_path = osp.join(exp_dir, final_model) + # skip if the model is still training + if not osp.exists(model_path): + continue + + # get the latest logs + log_json_path = list( + sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1] + log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1] + cfg = mmcv.Config.fromfile('./configs/' + used_config) + results_lut = cfg.evaluation.metric + if not isinstance(results_lut, list): + results_lut = [results_lut] + # case when using VOC, the evaluation key is only 'mAP' + # when using Panoptic Dataset, the evaluation key is 'PQ'. + for i, key in enumerate(results_lut): + if 'mAP' not in key and 'PQ' not in key: + results_lut[i] = key + '_mAP' + model_performance = get_final_results(log_json_path, + final_epoch_or_iter, results_lut, + by_epoch) + + if model_performance is None: + continue + + model_time = osp.split(log_txt_path)[-1].split('.')[0] + model_info = dict( + config=used_config, + results=model_performance, + model_time=model_time, + final_model=final_model, + log_json_path=osp.split(log_json_path)[-1]) + model_info['epochs' if by_epoch else 'iterations'] =\ + final_epoch_or_iter + model_infos.append(model_info) + + # publish model for each checkpoint + publish_model_infos = [] + for model in model_infos: + model_publish_dir = osp.join(models_out, model['config'].rstrip('.py')) + mmcv.mkdir_or_exist(model_publish_dir) + + model_name = osp.split(model['config'])[-1].split('.')[0] + + model_name += '_' + model['model_time'] + publish_model_path = osp.join(model_publish_dir, model_name) + trained_model_path = osp.join(models_root, model['config'], + model['final_model']) + + # convert model + final_model_path = process_checkpoint(trained_model_path, + publish_model_path) + + # copy log + shutil.copy( + osp.join(models_root, model['config'], model['log_json_path']), + osp.join(model_publish_dir, f'{model_name}.log.json')) + shutil.copy( + osp.join(models_root, model['config'], + model['log_json_path'].rstrip('.json')), + osp.join(model_publish_dir, f'{model_name}.log')) + + # copy config to guarantee reproducibility + config_path = model['config'] + config_path = osp.join( + 'configs', + config_path) if 'configs' not in config_path else config_path + target_config_path = osp.split(config_path)[-1] + shutil.copy(config_path, osp.join(model_publish_dir, + target_config_path)) + + model['model_path'] = final_model_path + publish_model_infos.append(model) + + models = dict(models=publish_model_infos) + print(f'Totally gathered {len(publish_model_infos)} models') + mmcv.dump(models, osp.join(models_out, 'model_info.json')) + + pwc_files = convert_model_info_to_pwc(publish_model_infos) + for name in pwc_files: + with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f: + ordered_yaml_dump(pwc_files[name], f, encoding='utf-8') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/.dev_scripts/gather_test_benchmark_metric.py b/downstream/mmdetection/.dev_scripts/gather_test_benchmark_metric.py new file mode 100644 index 0000000..07c6bf4 --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/gather_test_benchmark_metric.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import os.path as osp + +import mmcv +from mmcv import Config + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Gather benchmarked models metric') + parser.add_argument('config', help='test config file path') + parser.add_argument( + 'root', + type=str, + help='root path of benchmarked models to be gathered') + parser.add_argument( + '--out', type=str, help='output path of gathered metrics to be stored') + parser.add_argument( + '--not-show', action='store_true', help='not show metrics') + parser.add_argument( + '--show-all', action='store_true', help='show all model metrics') + + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + root_path = args.root + metrics_out = args.out + result_dict = {} + + cfg = Config.fromfile(args.config) + + for model_key in cfg: + model_infos = cfg[model_key] + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + record_metrics = model_info['metric'] + config = model_info['config'].strip() + fname, _ = osp.splitext(osp.basename(config)) + metric_json_dir = osp.join(root_path, fname) + if osp.exists(metric_json_dir): + json_list = glob.glob(osp.join(metric_json_dir, '*.json')) + if len(json_list) > 0: + log_json_path = list(sorted(json_list))[-1] + + metric = mmcv.load(log_json_path) + if config in metric.get('config', {}): + + new_metrics = dict() + for record_metric_key in record_metrics: + record_metric_key_bk = record_metric_key + old_metric = record_metrics[record_metric_key] + if record_metric_key == 'AR_1000': + record_metric_key = 'AR@1000' + if record_metric_key not in metric['metric']: + raise KeyError( + 'record_metric_key not exist, please ' + 'check your config') + new_metric = round( + metric['metric'][record_metric_key] * 100, 1) + new_metrics[record_metric_key_bk] = new_metric + + if args.show_all: + result_dict[config] = dict( + before=record_metrics, after=new_metrics) + else: + for record_metric_key in record_metrics: + old_metric = record_metrics[record_metric_key] + new_metric = new_metrics[record_metric_key] + if old_metric != new_metric: + result_dict[config] = dict( + before=record_metrics, + after=new_metrics) + break + else: + print(f'{config} not included in: {log_json_path}') + else: + print(f'{config} not exist file: {metric_json_dir}') + else: + print(f'{config} not exist dir: {metric_json_dir}') + + if metrics_out: + mmcv.mkdir_or_exist(metrics_out) + mmcv.dump(result_dict, + osp.join(metrics_out, 'batch_test_metric_info.json')) + if not args.not_show: + print('===================================') + for config_name, metrics in result_dict.items(): + print(config_name, metrics) + print('===================================') diff --git a/downstream/mmdetection/.dev_scripts/gather_train_benchmark_metric.py b/downstream/mmdetection/.dev_scripts/gather_train_benchmark_metric.py new file mode 100644 index 0000000..f9c6c80 --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/gather_train_benchmark_metric.py @@ -0,0 +1,150 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import os.path as osp + +import mmcv +from gather_models import get_final_results + +try: + import xlrd +except ImportError: + xlrd = None +try: + import xlutils + from xlutils.copy import copy +except ImportError: + xlutils = None + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Gather benchmarked models metric') + parser.add_argument( + 'root', + type=str, + help='root path of benchmarked models to be gathered') + parser.add_argument( + 'txt_path', type=str, help='txt path output by benchmark_filter') + parser.add_argument( + '--out', type=str, help='output path of gathered metrics to be stored') + parser.add_argument( + '--not-show', action='store_true', help='not show metrics') + parser.add_argument( + '--excel', type=str, help='input path of excel to be recorded') + parser.add_argument( + '--ncol', type=int, help='Number of column to be modified or appended') + + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + if args.excel: + assert args.ncol, 'Please specify "--excel" and "--ncol" ' \ + 'at the same time' + if xlrd is None: + raise RuntimeError( + 'xlrd is not installed,' + 'Please use “pip install xlrd==1.2.0” to install') + if xlutils is None: + raise RuntimeError( + 'xlutils is not installed,' + 'Please use “pip install xlutils==2.0.0” to install') + readbook = xlrd.open_workbook(args.excel) + sheet = readbook.sheet_by_name('Sheet1') + sheet_info = {} + total_nrows = sheet.nrows + for i in range(3, sheet.nrows): + sheet_info[sheet.row_values(i)[0]] = i + xlrw = copy(readbook) + table = xlrw.get_sheet(0) + + root_path = args.root + metrics_out = args.out + + result_dict = {} + with open(args.txt_path, 'r') as f: + model_cfgs = f.readlines() + for i, config in enumerate(model_cfgs): + config = config.strip() + if len(config) == 0: + continue + + config_name = osp.split(config)[-1] + config_name = osp.splitext(config_name)[0] + result_path = osp.join(root_path, config_name) + if osp.exists(result_path): + # 1 read config + cfg = mmcv.Config.fromfile(config) + total_epochs = cfg.runner.max_epochs + final_results = cfg.evaluation.metric + if not isinstance(final_results, list): + final_results = [final_results] + final_results_out = [] + for key in final_results: + if 'proposal_fast' in key: + final_results_out.append('AR@1000') # RPN + elif 'mAP' not in key: + final_results_out.append(key + '_mAP') + + # 2 determine whether total_epochs ckpt exists + ckpt_path = f'epoch_{total_epochs}.pth' + if osp.exists(osp.join(result_path, ckpt_path)): + log_json_path = list( + sorted(glob.glob(osp.join(result_path, + '*.log.json'))))[-1] + + # 3 read metric + model_performance = get_final_results( + log_json_path, total_epochs, final_results_out) + if model_performance is None: + print(f'log file error: {log_json_path}') + continue + for performance in model_performance: + if performance in ['AR@1000', 'bbox_mAP', 'segm_mAP']: + metric = round( + model_performance[performance] * 100, 1) + model_performance[performance] = metric + result_dict[config] = model_performance + + # update and append excel content + if args.excel: + if 'AR@1000' in model_performance: + metrics = f'{model_performance["AR@1000"]}' \ + f'(AR@1000)' + elif 'segm_mAP' in model_performance: + metrics = f'{model_performance["bbox_mAP"]}/' \ + f'{model_performance["segm_mAP"]}' + else: + metrics = f'{model_performance["bbox_mAP"]}' + + row_num = sheet_info.get(config, None) + if row_num: + table.write(row_num, args.ncol, metrics) + else: + table.write(total_nrows, 0, config) + table.write(total_nrows, args.ncol, metrics) + total_nrows += 1 + + else: + print(f'{config} not exist: {ckpt_path}') + else: + print(f'not exist: {config}') + + # 4 save or print results + if metrics_out: + mmcv.mkdir_or_exist(metrics_out) + mmcv.dump(result_dict, + osp.join(metrics_out, 'model_metric_info.json')) + if not args.not_show: + print('===================================') + for config_name, metrics in result_dict.items(): + print(config_name, metrics) + print('===================================') + if args.excel: + filename, sufflx = osp.splitext(args.excel) + xlrw.save(f'{filename}_o{sufflx}') + print(f'>>> Output {filename}_o{sufflx}') diff --git a/downstream/mmdetection/.dev_scripts/linter.sh b/downstream/mmdetection/.dev_scripts/linter.sh new file mode 100644 index 0000000..b0fe0ac --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/linter.sh @@ -0,0 +1,3 @@ +yapf -r -i mmdet/ configs/ tests/ tools/ +isort -rc mmdet/ configs/ tests/ tools/ +flake8 . diff --git a/downstream/mmdetection/.dev_scripts/test_benchmark.sh b/downstream/mmdetection/.dev_scripts/test_benchmark.sh new file mode 100644 index 0000000..cb79950 --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/test_benchmark.sh @@ -0,0 +1,119 @@ +PARTITION=$1 +CHECKPOINT_DIR=$2 + +echo 'configs/atss/atss_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION atss_r50_fpn_1x_coco configs/atss/atss_r50_fpn_1x_coco.py $CHECKPOINT_DIR/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth --work-dir tools/batch_test/atss_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29666 & +echo 'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION autoassign_r50_fpn_8x2_1x_coco configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py $CHECKPOINT_DIR/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth --work-dir tools/batch_test/autoassign_r50_fpn_8x2_1x_coco --eval bbox --cfg-option dist_params.port=29667 & +echo 'configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_carafe_1x_coco configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_carafe_1x_coco --eval bbox --cfg-option dist_params.port=29668 & +echo 'configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION cascade_rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth --work-dir tools/batch_test/cascade_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29669 & +echo 'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION cascade_mask_rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth --work-dir tools/batch_test/cascade_mask_rcnn_r50_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29670 & +echo 'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION crpn_faster_rcnn_r50_caffe_fpn_1x_coco configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth --work-dir tools/batch_test/crpn_faster_rcnn_r50_caffe_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29671 & +echo 'configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION centripetalnet_hourglass104_mstest_16x6_210e_coco configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py $CHECKPOINT_DIR/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth --work-dir tools/batch_test/centripetalnet_hourglass104_mstest_16x6_210e_coco --eval bbox --cfg-option dist_params.port=29672 & +echo 'configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION cornernet_hourglass104_mstest_8x6_210e_coco configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py $CHECKPOINT_DIR/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth --work-dir tools/batch_test/cornernet_hourglass104_mstest_8x6_210e_coco --eval bbox --cfg-option dist_params.port=29673 & +echo 'configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco --eval bbox --cfg-option dist_params.port=29674 & +echo 'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deformable_detr_r50_16x2_50e_coco configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py $CHECKPOINT_DIR/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth --work-dir tools/batch_test/deformable_detr_r50_16x2_50e_coco --eval bbox --cfg-option dist_params.port=29675 & +echo 'configs/detectors/detectors_htc_r50_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION detectors_htc_r50_1x_coco configs/detectors/detectors_htc_r50_1x_coco.py $CHECKPOINT_DIR/detectors_htc_r50_1x_coco-329b1453.pth --work-dir tools/batch_test/detectors_htc_r50_1x_coco --eval bbox segm --cfg-option dist_params.port=29676 & +echo 'configs/detr/detr_r50_8x2_150e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION detr_r50_8x2_150e_coco configs/detr/detr_r50_8x2_150e_coco.py $CHECKPOINT_DIR/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth --work-dir tools/batch_test/detr_r50_8x2_150e_coco --eval bbox --cfg-option dist_params.port=29677 & +echo 'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION dh_faster_rcnn_r50_fpn_1x_coco configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth --work-dir tools/batch_test/dh_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29678 & +echo 'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION dynamic_rcnn_r50_fpn_1x_coco configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/dynamic_rcnn_r50_fpn_1x-62a3f276.pth --work-dir tools/batch_test/dynamic_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29679 & +echo 'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_attention_1111_1x_coco configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_attention_1111_1x_coco --eval bbox --cfg-option dist_params.port=29680 & +echo 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_1x_coco configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29681 & +echo 'configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py $CHECKPOINT_DIR/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth --work-dir tools/batch_test/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco --eval bbox --cfg-option dist_params.port=29682 & +echo 'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fovea_align_r50_fpn_gn-head_4x4_2x_coco configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py $CHECKPOINT_DIR/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth --work-dir tools/batch_test/fovea_align_r50_fpn_gn-head_4x4_2x_coco --eval bbox --cfg-option dist_params.port=29683 & +echo 'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION retinanet_free_anchor_r50_fpn_1x_coco configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth --work-dir tools/batch_test/retinanet_free_anchor_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29684 & +echo 'configs/fsaf/fsaf_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fsaf_r50_fpn_1x_coco configs/fsaf/fsaf_r50_fpn_1x_coco.py $CHECKPOINT_DIR/fsaf_r50_fpn_1x_coco-94ccc51f.pth --work-dir tools/batch_test/fsaf_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29685 & +echo 'configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth --work-dir tools/batch_test/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco --eval bbox segm --cfg-option dist_params.port=29686 & +echo 'configs/gfl/gfl_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION gfl_r50_fpn_1x_coco configs/gfl/gfl_r50_fpn_1x_coco.py $CHECKPOINT_DIR/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth --work-dir tools/batch_test/gfl_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29687 & +echo 'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_r50_fpn_gn-all_2x_coco configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth --work-dir tools/batch_test/mask_rcnn_r50_fpn_gn-all_2x_coco --eval bbox segm --cfg-option dist_params.port=29688 & +echo 'configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_gn_ws-all_1x_coco configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_gn_ws-all_1x_coco --eval bbox --cfg-option dist_params.port=29689 & +echo 'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION grid_rcnn_r50_fpn_gn-head_2x_coco configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py $CHECKPOINT_DIR/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth --work-dir tools/batch_test/grid_rcnn_r50_fpn_gn-head_2x_coco --eval bbox --cfg-option dist_params.port=29690 & +echo 'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_groie_1x_coco configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_groie_1x_coco --eval bbox --cfg-option dist_params.port=29691 & +echo 'configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ga_retinanet_r50_caffe_fpn_1x_coco configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth --work-dir tools/batch_test/ga_retinanet_r50_caffe_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29692 & +echo 'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ga_faster_r50_caffe_fpn_1x_coco configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth --work-dir tools/batch_test/ga_faster_r50_caffe_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29693 & +echo 'configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_hrnetv2p_w18_1x_coco configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth --work-dir tools/batch_test/faster_rcnn_hrnetv2p_w18_1x_coco --eval bbox --cfg-option dist_params.port=29694 & +echo 'configs/htc/htc_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION htc_r50_fpn_1x_coco configs/htc/htc_r50_fpn_1x_coco.py $CHECKPOINT_DIR/htc_r50_fpn_1x_coco_20200317-7332cf16.pth --work-dir tools/batch_test/htc_r50_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29695 & +echo 'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION libra_faster_rcnn_r50_fpn_1x_coco configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth --work-dir tools/batch_test/libra_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29696 & +echo 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_r50_fpn_1x_coco configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --work-dir tools/batch_test/mask_rcnn_r50_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29697 & +echo 'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ms_rcnn_r50_caffe_fpn_1x_coco configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth --work-dir tools/batch_test/ms_rcnn_r50_caffe_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29698 & +echo 'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py $CHECKPOINT_DIR/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth --work-dir tools/batch_test/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco --eval bbox --cfg-option dist_params.port=29699 & +echo 'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION retinanet_r50_nasfpn_crop640_50e_coco configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py $CHECKPOINT_DIR/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth --work-dir tools/batch_test/retinanet_r50_nasfpn_crop640_50e_coco --eval bbox --cfg-option dist_params.port=29700 & +echo 'configs/paa/paa_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION paa_r50_fpn_1x_coco configs/paa/paa_r50_fpn_1x_coco.py $CHECKPOINT_DIR/paa_r50_fpn_1x_coco_20200821-936edec3.pth --work-dir tools/batch_test/paa_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29701 & +echo 'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_pafpn_1x_coco configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth --work-dir tools/batch_test/faster_rcnn_r50_pafpn_1x_coco --eval bbox --cfg-option dist_params.port=29702 & +echo 'configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pisa_faster_rcnn_r50_fpn_1x_coco configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth --work-dir tools/batch_test/pisa_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29703 & +echo 'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION point_rend_r50_caffe_fpn_mstrain_1x_coco configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py $CHECKPOINT_DIR/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth --work-dir tools/batch_test/point_rend_r50_caffe_fpn_mstrain_1x_coco --eval bbox segm --cfg-option dist_params.port=29704 & +echo 'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_regnetx-3.2GF_fpn_1x_coco configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth --work-dir tools/batch_test/mask_rcnn_regnetx-3.2GF_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29705 & +echo 'configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION reppoints_moment_r50_fpn_1x_coco configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py $CHECKPOINT_DIR/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth --work-dir tools/batch_test/reppoints_moment_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29706 & +echo 'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r2_101_fpn_2x_coco configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py $CHECKPOINT_DIR/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth --work-dir tools/batch_test/faster_rcnn_r2_101_fpn_2x_coco --eval bbox --cfg-option dist_params.port=29707 & +echo 'configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth --work-dir tools/batch_test/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco --eval bbox --cfg-option dist_params.port=29708 & +echo 'configs/retinanet/retinanet_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION retinanet_r50_fpn_1x_coco configs/retinanet/retinanet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth --work-dir tools/batch_test/retinanet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29709 & +echo 'configs/rpn/rpn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION rpn_r50_fpn_1x_coco configs/rpn/rpn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth --work-dir tools/batch_test/rpn_r50_fpn_1x_coco --eval proposal_fast --cfg-option dist_params.port=29710 & +echo 'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION sabl_retinanet_r50_fpn_1x_coco configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth --work-dir tools/batch_test/sabl_retinanet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29711 & +echo 'configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION sabl_faster_rcnn_r50_fpn_1x_coco configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth --work-dir tools/batch_test/sabl_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29712 & +echo 'configs/scnet/scnet_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION scnet_r50_fpn_1x_coco configs/scnet/scnet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/scnet_r50_fpn_1x_coco-c3f09857.pth --work-dir tools/batch_test/scnet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29713 & +echo 'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION sparse_rcnn_r50_fpn_1x_coco configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth --work-dir tools/batch_test/sparse_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29714 & +echo 'configs/ssd/ssd300_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ssd300_coco configs/ssd/ssd300_coco.py $CHECKPOINT_DIR/ssd300_coco_20210803_015428-d231a06e.pth --work-dir tools/batch_test/ssd300_coco --eval bbox --cfg-option dist_params.port=29715 & +echo 'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION tridentnet_r50_caffe_1x_coco configs/tridentnet/tridentnet_r50_caffe_1x_coco.py $CHECKPOINT_DIR/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth --work-dir tools/batch_test/tridentnet_r50_caffe_1x_coco --eval bbox --cfg-option dist_params.port=29716 & +echo 'configs/vfnet/vfnet_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION vfnet_r50_fpn_1x_coco configs/vfnet/vfnet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth --work-dir tools/batch_test/vfnet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29717 & +echo 'configs/yolact/yolact_r50_1x8_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolact_r50_1x8_coco configs/yolact/yolact_r50_1x8_coco.py $CHECKPOINT_DIR/yolact_r50_1x8_coco_20200908-f38d58df.pth --work-dir tools/batch_test/yolact_r50_1x8_coco --eval bbox segm --cfg-option dist_params.port=29718 & +echo 'configs/yolo/yolov3_d53_320_273e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolov3_d53_320_273e_coco configs/yolo/yolov3_d53_320_273e_coco.py $CHECKPOINT_DIR/yolov3_d53_320_273e_coco-421362b6.pth --work-dir tools/batch_test/yolov3_d53_320_273e_coco --eval bbox --cfg-option dist_params.port=29719 & +echo 'configs/yolof/yolof_r50_c5_8x8_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolof_r50_c5_8x8_1x_coco configs/yolof/yolof_r50_c5_8x8_1x_coco.py $CHECKPOINT_DIR/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth --work-dir tools/batch_test/yolof_r50_c5_8x8_1x_coco --eval bbox --cfg-option dist_params.port=29720 & +echo 'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION centernet_resnet18_dcnv2_140e_coco configs/centernet/centernet_resnet18_dcnv2_140e_coco.py $CHECKPOINT_DIR/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth --work-dir tools/batch_test/centernet_resnet18_dcnv2_140e_coco --eval bbox --cfg-option dist_params.port=29721 & +echo 'configs/yolox/yolox_tiny_8x8_300e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolox_tiny_8x8_300e_coco configs/yolox/yolox_tiny_8x8_300e_coco.py $CHECKPOINT_DIR/yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth --work-dir tools/batch_test/yolox_tiny_8x8_300e_coco --eval bbox --cfg-option dist_params.port=29722 & +echo 'configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ssdlite_mobilenetv2_scratch_600e_coco configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py $CHECKPOINT_DIR/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth --work-dir tools/batch_test/ssdlite_mobilenetv2_scratch_600e_coco --eval bbox --cfg-option dist_params.port=29723 & diff --git a/downstream/mmdetection/.dev_scripts/test_init_backbone.py b/downstream/mmdetection/.dev_scripts/test_init_backbone.py new file mode 100644 index 0000000..862f4af --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/test_init_backbone.py @@ -0,0 +1,181 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Check out backbone whether successfully load pretrained checkpoint.""" +import copy +import os +from os.path import dirname, exists, join + +import pytest +from mmcv import Config, ProgressBar +from mmcv.runner import _load_checkpoint + +from mmdet.models import build_detector + + +def _get_config_directory(): + """Find the predefined detector config directory.""" + try: + # Assume we are running in the source mmdetection repo + repo_dpath = dirname(dirname(__file__)) + except NameError: + # For IPython development when this __file__ is not defined + import mmdet + repo_dpath = dirname(dirname(mmdet.__file__)) + config_dpath = join(repo_dpath, 'configs') + if not exists(config_dpath): + raise Exception('Cannot find config path') + return config_dpath + + +def _get_config_module(fname): + """Load a configuration as a python module.""" + from mmcv import Config + config_dpath = _get_config_directory() + config_fpath = join(config_dpath, fname) + config_mod = Config.fromfile(config_fpath) + return config_mod + + +def _get_detector_cfg(fname): + """Grab configs necessary to create a detector. + + These are deep copied to allow for safe modification of parameters without + influencing other tests. + """ + config = _get_config_module(fname) + model = copy.deepcopy(config.model) + return model + + +def _traversed_config_file(): + """We traversed all potential config files under the `config` file. If you + need to print details or debug code, you can use this function. + + If the `backbone.init_cfg` is None (do not use `Pretrained` init way), you + need add the folder name in `ignores_folder` (if the config files in this + folder all set backbone.init_cfg is None) or add config name in + `ignores_file` (if the config file set backbone.init_cfg is None) + """ + config_path = _get_config_directory() + check_cfg_names = [] + + # `base`, `legacy_1.x` and `common` ignored by default. + ignores_folder = ['_base_', 'legacy_1.x', 'common'] + # 'ld' need load teacher model, if want to check 'ld', + # please check teacher_config path first. + ignores_folder += ['ld'] + # `selfsup_pretrain` need convert model, if want to check this model, + # need to convert the model first. + ignores_folder += ['selfsup_pretrain'] + + # the `init_cfg` in 'centripetalnet', 'cornernet', 'cityscapes', + # 'scratch' is None. + # the `init_cfg` in ssdlite(`ssdlite_mobilenetv2_scratch_600e_coco.py`) + # is None + # Please confirm `bockbone.init_cfg` is None first. + ignores_folder += ['centripetalnet', 'cornernet', 'cityscapes', 'scratch'] + ignores_file = ['ssdlite_mobilenetv2_scratch_600e_coco.py'] + + for config_file_name in os.listdir(config_path): + if config_file_name not in ignores_folder: + config_file = join(config_path, config_file_name) + if os.path.isdir(config_file): + for config_sub_file in os.listdir(config_file): + if config_sub_file.endswith('py') and \ + config_sub_file not in ignores_file: + name = join(config_file, config_sub_file) + check_cfg_names.append(name) + return check_cfg_names + + +def _check_backbone(config, print_cfg=True): + """Check out backbone whether successfully load pretrained model, by using + `backbone.init_cfg`. + + First, using `mmcv._load_checkpoint` to load the checkpoint without + loading models. + Then, using `build_detector` to build models, and using + `model.init_weights()` to initialize the parameters. + Finally, assert weights and bias of each layer loaded from pretrained + checkpoint are equal to the weights and bias of original checkpoint. + For the convenience of comparison, we sum up weights and bias of + each loaded layer separately. + + Args: + config (str): Config file path. + print_cfg (bool): Whether print logger and return the result. + + Returns: + results (str or None): If backbone successfully load pretrained + checkpoint, return None; else, return config file path. + """ + if print_cfg: + print('-' * 15 + 'loading ', config) + cfg = Config.fromfile(config) + init_cfg = None + try: + init_cfg = cfg.model.backbone.init_cfg + init_flag = True + except AttributeError: + init_flag = False + if init_cfg is None or init_cfg.get('type') != 'Pretrained': + init_flag = False + if init_flag: + checkpoint = _load_checkpoint(init_cfg.checkpoint) + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + model = build_detector( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + model.init_weights() + + checkpoint_layers = state_dict.keys() + for name, value in model.backbone.state_dict().items(): + if name in checkpoint_layers: + assert value.equal(state_dict[name]) + + if print_cfg: + print('-' * 10 + 'Successfully load checkpoint' + '-' * 10 + + '\n', ) + return None + else: + if print_cfg: + print(config + '\n' + '-' * 10 + + 'config file do not have init_cfg' + '-' * 10 + '\n') + return config + + +@pytest.mark.parametrize('config', _traversed_config_file()) +def test_load_pretrained(config): + """Check out backbone whether successfully load pretrained model by using + `backbone.init_cfg`. + + Details please refer to `_check_backbone` + """ + _check_backbone(config, print_cfg=False) + + +def _test_load_pretrained(): + """We traversed all potential config files under the `config` file. If you + need to print details or debug code, you can use this function. + + Returns: + check_cfg_names (list[str]): Config files that backbone initialized + from pretrained checkpoint might be problematic. Need to recheck + the config file. The output including the config files that the + backbone.init_cfg is None + """ + check_cfg_names = _traversed_config_file() + need_check_cfg = [] + + prog_bar = ProgressBar(len(check_cfg_names)) + for config in check_cfg_names: + init_cfg_name = _check_backbone(config) + if init_cfg_name is not None: + need_check_cfg.append(init_cfg_name) + prog_bar.update() + print('These config files need to be checked again') + print(need_check_cfg) diff --git a/downstream/mmdetection/.dev_scripts/train_benchmark.sh b/downstream/mmdetection/.dev_scripts/train_benchmark.sh new file mode 100644 index 0000000..dc30be9 --- /dev/null +++ b/downstream/mmdetection/.dev_scripts/train_benchmark.sh @@ -0,0 +1,134 @@ +echo 'configs/atss/atss_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab atss_r50_fpn_1x_coco configs/atss/atss_r50_fpn_1x_coco.py ./tools/work_dir/atss_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab autoassign_r50_fpn_8x2_1x_coco configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py ./tools/work_dir/autoassign_r50_fpn_8x2_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab cascade_mask_rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/cascade_mask_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab crpn_faster_rcnn_r50_caffe_fpn_1x_coco configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py ./tools/work_dir/crpn_faster_rcnn_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab centernet_resnet18_dcnv2_140e_coco configs/centernet/centernet_resnet18_dcnv2_140e_coco.py ./tools/work_dir/centernet_resnet18_dcnv2_140e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py' & +GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab centripetalnet_hourglass104_mstest_16x6_210e_coco configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py ./tools/work_dir/centripetalnet_hourglass104_mstest_16x6_210e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab cornernet_hourglass104_mstest_8x6_210e_coco configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py ./tools/work_dir/cornernet_hourglass104_mstest_8x6_210e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/detectors/detectors_htc_r50_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab detectors_htc_r50_1x_coco configs/detectors/detectors_htc_r50_1x_coco.py ./tools/work_dir/detectors_htc_r50_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py' & +GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab deformable_detr_r50_16x2_50e_coco configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py ./tools/work_dir/deformable_detr_r50_16x2_50e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/detr/detr_r50_8x2_150e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab detr_r50_8x2_150e_coco configs/detr/detr_r50_8x2_150e_coco.py ./tools/work_dir/detr_r50_8x2_150e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab dh_faster_rcnn_r50_fpn_1x_coco configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/dh_faster_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab dynamic_rcnn_r50_fpn_1x_coco configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/dynamic_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_1x_coco configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_caffe_dc5_mstrain_1x_coco configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py ./tools/work_dir/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_caffe_fpn_mstrain_1x_coco configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py ./tools/work_dir/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_caffe_fpn_1x_coco configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_ohem_1x_coco configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_ohem_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab fovea_align_r50_fpn_gn-head_4x4_2x_coco configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py ./tools/work_dir/fovea_align_r50_fpn_gn-head_4x4_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_fp16_1x_coco configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_fp16_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_r50_fpn_fp16_1x_coco configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py ./tools/work_dir/retinanet_r50_fpn_fp16_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_free_anchor_r50_fpn_1x_coco configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py ./tools/work_dir/retinanet_free_anchor_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/fsaf/fsaf_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab fsaf_r50_fpn_1x_coco configs/fsaf/fsaf_r50_fpn_1x_coco.py ./tools/work_dir/fsaf_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/gfl/gfl_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab gfl_r50_fpn_1x_coco configs/gfl/gfl_r50_fpn_1x_coco.py ./tools/work_dir/gfl_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_ghm_r50_fpn_1x_coco configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py ./tools/work_dir/retinanet_ghm_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab grid_rcnn_r50_fpn_gn-head_2x_coco configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py ./tools/work_dir/grid_rcnn_r50_fpn_gn-head_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ga_faster_r50_caffe_fpn_1x_coco configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py ./tools/work_dir/ga_faster_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/htc/htc_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab htc_r50_fpn_1x_coco configs/htc/htc_r50_fpn_1x_coco.py ./tools/work_dir/htc_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ld_r18_gflv1_r101_fpn_coco_1x configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py ./tools/work_dir/ld_r18_gflv1_r101_fpn_coco_1x --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab libra_faster_rcnn_r50_fpn_1x_coco configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/libra_faster_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py ./tools/work_dir/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ms_rcnn_r50_caffe_fpn_1x_coco configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py ./tools/work_dir/ms_rcnn_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py ./tools/work_dir/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/paa/paa_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab paa_r50_fpn_1x_coco configs/paa/paa_r50_fpn_1x_coco.py ./tools/work_dir/paa_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab pisa_mask_rcnn_r50_fpn_1x_coco configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/pisa_mask_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab point_rend_r50_caffe_fpn_mstrain_1x_coco configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py ./tools/work_dir/point_rend_r50_caffe_fpn_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab reppoints_moment_r50_fpn_gn-neck+head_1x_coco configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py ./tools/work_dir/reppoints_moment_r50_fpn_gn-neck+head_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_r50_caffe_fpn_1x_coco configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py ./tools/work_dir/retinanet_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/rpn/rpn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab rpn_r50_fpn_1x_coco configs/rpn/rpn_r50_fpn_1x_coco.py ./tools/work_dir/rpn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab sabl_retinanet_r50_fpn_1x_coco configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py ./tools/work_dir/sabl_retinanet_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/ssd/ssd300_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ssd300_coco configs/ssd/ssd300_coco.py ./tools/work_dir/ssd300_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab tridentnet_r50_caffe_1x_coco configs/tridentnet/tridentnet_r50_caffe_1x_coco.py ./tools/work_dir/tridentnet_r50_caffe_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/vfnet/vfnet_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab vfnet_r50_fpn_1x_coco configs/vfnet/vfnet_r50_fpn_1x_coco.py ./tools/work_dir/vfnet_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/yolact/yolact_r50_8x8_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolact_r50_8x8_coco configs/yolact/yolact_r50_8x8_coco.py ./tools/work_dir/yolact_r50_8x8_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/yolo/yolov3_d53_320_273e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolov3_d53_320_273e_coco configs/yolo/yolov3_d53_320_273e_coco.py ./tools/work_dir/yolov3_d53_320_273e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab sparse_rcnn_r50_fpn_1x_coco configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/sparse_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/scnet/scnet_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab scnet_r50_fpn_1x_coco configs/scnet/scnet_r50_fpn_1x_coco.py ./tools/work_dir/scnet_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/yolof/yolof_r50_c5_8x8_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolof_r50_c5_8x8_1x_coco configs/yolof/yolof_r50_c5_8x8_1x_coco.py ./tools/work_dir/yolof_r50_c5_8x8_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_carafe_1x_coco configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_carafe_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_mdpool_1x_coco configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_mdpool_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_dpool_1x_coco configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_dpool_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_gn-all_2x_coco configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_gn-all_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_gn_ws-all_2x_coco configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_gn_ws-all_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_hrnetv2p_w18_1x_coco configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py ./tools/work_dir/mask_rcnn_hrnetv2p_w18_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_pafpn_1x_coco configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_pafpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_r50_nasfpn_crop640_50e_coco configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py ./tools/work_dir/retinanet_r50_nasfpn_crop640_50e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_regnetx-3.2GF_fpn_1x_coco configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py ./tools/work_dir/mask_rcnn_regnetx-3.2GF_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py ./tools/work_dir/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r2_101_fpn_2x_coco configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py ./tools/work_dir/faster_rcnn_r2_101_fpn_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_groie_1x_coco configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_groie_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_1x_cityscapes configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py ./tools/work_dir/mask_rcnn_r50_fpn_1x_cityscapes --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab panoptic_fpn_r50_fpn_1x_coco configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py ./tools/work_dir/panoptic_fpn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/yolox/yolox_tiny_8x8_300e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolox_tiny_8x8_300e_coco configs/yolox/yolox_tiny_8x8_300e_coco.py ./tools/work_dir/yolox_tiny_8x8_300e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & +echo 'configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ssdlite_mobilenetv2_scratch_600e_coco configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py ./tools/work_dir/ssdlite_mobilenetv2_scratch_600e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & diff --git a/downstream/mmdetection/.github/CODE_OF_CONDUCT.md b/downstream/mmdetection/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..92afad1 --- /dev/null +++ b/downstream/mmdetection/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,76 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or + advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at chenkaidev@gmail.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq + +[homepage]: https://www.contributor-covenant.org diff --git a/downstream/mmdetection/.github/CONTRIBUTING.md b/downstream/mmdetection/.github/CONTRIBUTING.md new file mode 100644 index 0000000..c669626 --- /dev/null +++ b/downstream/mmdetection/.github/CONTRIBUTING.md @@ -0,0 +1 @@ +We appreciate all contributions to improve MMDetection. Please refer to [CONTRIBUTING.md](https://github.com/open-mmlab/mmcv/blob/master/CONTRIBUTING.md) in MMCV for more details about the contributing guideline. diff --git a/downstream/mmdetection/.github/ISSUE_TEMPLATE/config.yml b/downstream/mmdetection/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..56bbd88 --- /dev/null +++ b/downstream/mmdetection/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,9 @@ +blank_issues_enabled: false + +contact_links: + - name: Common Issues + url: https://mmdetection.readthedocs.io/en/latest/faq.html + about: Check if your issue already has solutions + - name: MMDetection Documentation + url: https://mmdetection.readthedocs.io/en/latest/ + about: Check if your question is answered in docs diff --git a/downstream/mmdetection/.github/ISSUE_TEMPLATE/error-report.md b/downstream/mmdetection/.github/ISSUE_TEMPLATE/error-report.md new file mode 100644 index 0000000..9dbd3ff --- /dev/null +++ b/downstream/mmdetection/.github/ISSUE_TEMPLATE/error-report.md @@ -0,0 +1,46 @@ +--- +name: Error report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' +--- + +Thanks for your error report and we appreciate it a lot. + +**Checklist** + +1. I have searched related issues but cannot get the expected help. +2. I have read the [FAQ documentation](https://mmdetection.readthedocs.io/en/latest/faq.html) but cannot get the expected help. +3. The bug has not been fixed in the latest version. + +**Describe the bug** +A clear and concise description of what the bug is. + +**Reproduction** + +1. What command or script did you run? + +```none +A placeholder for the command. +``` + +2. Did you make any modifications on the code or config? Did you understand what you have modified? +3. What dataset did you use? + +**Environment** + +1. Please run `python mmdet/utils/collect_env.py` to collect necessary environment information and paste it here. +2. You may add addition that may be helpful for locating the problem, such as + - How you installed PyTorch \[e.g., pip, conda, source\] + - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) + +**Error traceback** +If applicable, paste the error trackback here. + +```none +A placeholder for trackback. +``` + +**Bug fix** +If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated! diff --git a/downstream/mmdetection/.github/ISSUE_TEMPLATE/feature_request.md b/downstream/mmdetection/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..7bf92e8 --- /dev/null +++ b/downstream/mmdetection/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,21 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' +--- + +**Describe the feature** + +**Motivation** +A clear and concise description of the motivation of the feature. +Ex1. It is inconvenient when \[....\]. +Ex2. There is a recent paper \[....\], which is very helpful for \[....\]. + +**Related resources** +If there is an official code release or third-party implementations, please also provide the information here, which would be very helpful. + +**Additional context** +Add any other context or screenshots about the feature request here. +If you would like to implement the feature and create a PR, please leave a comment here and that would be much appreciated. diff --git a/downstream/mmdetection/.github/ISSUE_TEMPLATE/general_questions.md b/downstream/mmdetection/.github/ISSUE_TEMPLATE/general_questions.md new file mode 100644 index 0000000..f02dd63 --- /dev/null +++ b/downstream/mmdetection/.github/ISSUE_TEMPLATE/general_questions.md @@ -0,0 +1,7 @@ +--- +name: General questions +about: Ask general questions to get help +title: '' +labels: '' +assignees: '' +--- diff --git a/downstream/mmdetection/.github/ISSUE_TEMPLATE/reimplementation_questions.md b/downstream/mmdetection/.github/ISSUE_TEMPLATE/reimplementation_questions.md new file mode 100644 index 0000000..83607ac --- /dev/null +++ b/downstream/mmdetection/.github/ISSUE_TEMPLATE/reimplementation_questions.md @@ -0,0 +1,67 @@ +--- +name: Reimplementation Questions +about: Ask about questions during model reimplementation +title: '' +labels: reimplementation +assignees: '' +--- + +**Notice** + +There are several common situations in the reimplementation issues as below + +1. Reimplement a model in the model zoo using the provided configs +2. Reimplement a model in the model zoo on other dataset (e.g., custom datasets) +3. Reimplement a custom model but all the components are implemented in MMDetection +4. Reimplement a custom model with new modules implemented by yourself + +There are several things to do for different cases as below. + +- For case 1 & 3, please follow the steps in the following sections thus we could help to quick identify the issue. +- For case 2 & 4, please understand that we are not able to do much help here because we usually do not know the full code and the users should be responsible to the code they write. +- One suggestion for case 2 & 4 is that the users should first check whether the bug lies in the self-implemented code or the original code. For example, users can first make sure that the same model runs well on supported datasets. If you still need help, please describe what you have done and what you obtain in the issue, and follow the steps in the following sections and try as clear as possible so that we can better help you. + +**Checklist** + +1. I have searched related issues but cannot get the expected help. +2. The issue has not been fixed in the latest version. + +**Describe the issue** + +A clear and concise description of what the problem you meet and what have you done. + +**Reproduction** + +1. What command or script did you run? + +```none +A placeholder for the command. +``` + +2. What config dir you run? + +```none +A placeholder for the config. +``` + +3. Did you make any modifications on the code or config? Did you understand what you have modified? +4. What dataset did you use? + +**Environment** + +1. Please run `python mmdet/utils/collect_env.py` to collect necessary environment information and paste it here. +2. You may add addition that may be helpful for locating the problem, such as + 1. How you installed PyTorch \[e.g., pip, conda, source\] + 2. Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) + +**Results** + +If applicable, paste the related results here, e.g., what you expect and what you get. + +```none +A placeholder for results comparison +``` + +**Issue fix** + +If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated! diff --git a/downstream/mmdetection/.github/pull_request_template.md b/downstream/mmdetection/.github/pull_request_template.md new file mode 100644 index 0000000..8f8e289 --- /dev/null +++ b/downstream/mmdetection/.github/pull_request_template.md @@ -0,0 +1,25 @@ +Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily get feedback. If you do not understand some items, don't worry, just make the pull request and seek help from maintainers. + +## Motivation + +Please describe the motivation of this PR and the goal you want to achieve through this PR. + +## Modification + +Please briefly describe what modification is made in this PR. + +## BC-breaking (Optional) + +Does the modification introduce changes that break the backward-compatibility of the downstream repos? +If so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR. + +## Use cases (Optional) + +If this PR introduces a new feature, it is better to list some use cases here, and update the documentation. + +## Checklist + +1. Pre-commit or other linting tools are used to fix the potential lint issues. +2. The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness. +3. If the modification has potential influence on downstream projects, this PR should be tested with downstream projects, like MMDet or MMCls. +4. The documentation has been modified accordingly, like docstring or example tutorials. diff --git a/downstream/mmdetection/.github/workflows/build.yml b/downstream/mmdetection/.github/workflows/build.yml new file mode 100644 index 0000000..918ce92 --- /dev/null +++ b/downstream/mmdetection/.github/workflows/build.yml @@ -0,0 +1,288 @@ +name: build + +on: + push: + paths-ignore: + - ".dev_scripts/**" + - ".github/**.md" + - "demo/**" + - "docker/**" + - "tools/**" + - "README.md" + - "README_zh-CN.md" + + pull_request: + paths-ignore: + - ".dev_scripts/**" + - ".github/**.md" + - "demo/**" + - "docker/**" + - "docs/**" + - "docs_zh-CN/**" + - "tools/**" + - "README.md" + - "README_zh-CN.md" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build_cpu: + runs-on: ubuntu-18.04 + strategy: + matrix: + python-version: [3.7] + torch: [1.5.1, 1.6.0, 1.7.0, 1.8.0, 1.9.0, 1.10.1] + include: + - torch: 1.5.1 + torchvision: 0.6.1 + mmcv: 1.5 + - torch: 1.6.0 + torchvision: 0.7.0 + mmcv: 1.6 + - torch: 1.7.0 + torchvision: 0.8.1 + mmcv: 1.7 + - torch: 1.8.0 + torchvision: 0.9.0 + mmcv: 1.8 + - torch: 1.9.0 + torchvision: 0.10.0 + mmcv: 1.9 + - torch: 1.10.1 + torchvision: 0.11.2 + mmcv: "1.10" + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install Pillow + run: pip install Pillow==6.2.2 + if: ${{matrix.torchvision == '0.4.2'}} + - name: Install PyTorch + run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html + - name: Install MMCV + run: | + pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch${{matrix.mmcv}}/index.html + python -c 'import mmcv; print(mmcv.__version__)' + - name: Install unittest dependencies + run: | + pip install -r requirements/tests.txt -r requirements/optional.txt + pip install albumentations>=0.3.2 --no-binary imgaug,albumentations + pip install git+https://github.com/cocodataset/panopticapi.git + - name: Build and install + run: rm -rf .eggs && pip install -e . + - name: Run unittests and generate coverage report + run: | + coverage run --branch --source mmdet -m pytest tests/ + coverage xml + coverage report -m + + build_cuda101: + runs-on: ubuntu-18.04 + container: + image: pytorch/pytorch:1.6.0-cuda10.1-cudnn7-devel + + strategy: + matrix: + python-version: [3.7] + torch: [1.5.1+cu101, 1.6.0+cu101, 1.7.0+cu101, 1.8.0+cu101] + include: + - torch: 1.5.1+cu101 + torch_version: torch1.5.1 + torchvision: 0.6.1+cu101 + mmcv: 1.5 + - torch: 1.6.0+cu101 + torch_version: torch1.6.0 + torchvision: 0.7.0+cu101 + mmcv: 1.6 + - torch: 1.7.0+cu101 + torch_version: torch1.7.0 + torchvision: 0.8.1+cu101 + mmcv: 1.7 + - torch: 1.8.0+cu101 + torch_version: torch1.8.0 + torchvision: 0.9.0+cu101 + mmcv: 1.8 + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Fetch GPG keys + run: | + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + - name: Install system dependencies + run: | + apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 python${{matrix.python-version}}-dev + apt-get clean + rm -rf /var/lib/apt/lists/* + - name: Install Pillow + run: python -m pip install Pillow==6.2.2 + if: ${{matrix.torchvision < 0.5}} + - name: Install PyTorch + run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html + - name: Install dependencies for compiling onnx when python=3.9 + run: python -m pip install "protobuf <= 3.20.1" && apt-get install libprotobuf-dev protobuf-compiler + if: ${{matrix.python-version == '3.9'}} + - name: Install mmdet dependencies + run: | + python -V + python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/torch${{matrix.mmcv}}/index.html + python -m pip install pycocotools + python -m pip install -r requirements/tests.txt -r requirements/optional.txt + python -m pip install albumentations>=0.3.2 --no-binary imgaug,albumentations + python -m pip install git+https://github.com/cocodataset/panopticapi.git + python -c 'import mmcv; print(mmcv.__version__)' + - name: Build and install + run: | + rm -rf .eggs + python setup.py check -m -s + TORCH_CUDA_ARCH_LIST=7.0 pip install . + - name: Run unittests and generate coverage report + run: | + coverage run --branch --source mmdet -m pytest tests/ + coverage xml + coverage report -m + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1.0.10 + with: + file: ./coverage.xml + flags: unittests + env_vars: OS,PYTHON + name: codecov-umbrella + fail_ci_if_error: false + + build_cuda102: + runs-on: ubuntu-18.04 + container: + image: pytorch/pytorch:1.9.0-cuda10.2-cudnn7-devel + + strategy: + matrix: + python-version: [3.6, 3.7, 3.8, 3.9] + torch: [1.9.0+cu102, 1.10.1+cu102] + include: + - torch: 1.9.0+cu102 + torch_version: torch1.9.0 + torchvision: 0.10.0+cu102 + mmcv: 1.9 + - torch: 1.10.1+cu102 + torch_version: torch1.10.1 + torchvision: 0.11.2+cu102 + mmcv: "1.10" + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Fetch GPG keys + run: | + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + # Add ppa source repo for python3.9. + - name: Add python3.9 source + run: | + apt-get update && apt-get install -y software-properties-common + add-apt-repository -y ppa:deadsnakes/ppa + if: ${{matrix.python-version == '3.9'}} + # Install python-dev for some packages which require libpython3.Xm. + # Github's setup-python cannot install python3.9-dev, so we have to use apt install. + # Set DEBIAN_FRONTEND=noninteractive to avoid some interactions. + - name: Install python-dev + run: apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends python${{matrix.python-version}}-dev + - name: Install system dependencies + run: | + apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 + apt-get clean + rm -rf /var/lib/apt/lists/* + - name: Install Pillow + run: python -m pip install Pillow==6.2.2 + if: ${{matrix.torchvision < 0.5}} + - name: Install PyTorch + run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html + - name: Install dependencies for compiling onnx when python=3.9 + run: python -m pip install "protobuf <= 3.20.1" && apt-get update && apt-get -y install libprotobuf-dev protobuf-compiler cmake + if: ${{matrix.python-version == '3.9'}} + - name: Install mmdet dependencies + run: | + python -V + python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu102/torch${{matrix.mmcv}}/index.html + python -m pip install pycocotools + python -m pip install -r requirements/tests.txt -r requirements/optional.txt + python -m pip install albumentations>=0.3.2 --no-binary imgaug,albumentations + python -m pip install git+https://github.com/cocodataset/panopticapi.git + python -c 'import mmcv; print(mmcv.__version__)' + - name: Build and install + run: | + rm -rf .eggs + python setup.py check -m -s + TORCH_CUDA_ARCH_LIST=7.0 pip install . + - name: Run unittests and generate coverage report + run: | + coverage run --branch --source mmdet -m pytest tests/ + coverage xml + coverage report -m + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v2 + with: + files: ./coverage.xml + flags: unittests + env_vars: OS,PYTHON + name: codecov-umbrella + fail_ci_if_error: false + + build_windows: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [windows-2022] + python: [3.8] + platform: [cpu, cu111] + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Upgrade pip + run: python -m pip install pip --upgrade --user + - name: Install PyTorch + # As a complement to Linux CI, we test on PyTorch LTS version + run: pip install torch==1.8.2+${{ matrix.platform }} torchvision==0.9.2+${{ matrix.platform }} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html + - name: Install MMCV + run: pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8/index.html --only-binary mmcv-full + - name: Install unittest dependencies + run: | + python -V + python -m pip install pycocotools + python -m pip install -r requirements/tests.txt -r requirements/optional.txt + python -m pip install albumentations>=0.3.2 --no-binary imgaug,albumentations + python -m pip install git+https://github.com/cocodataset/panopticapi.git + python -c 'import mmcv; print(mmcv.__version__)' + - name: Show pip list + run: pip list + - name: Build and install + run: pip install -e . + - name: Run unittests + run: coverage run --branch --source mmdet -m pytest tests + - name: Generate coverage report + run: | + coverage xml + coverage report -m + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v2 + with: + file: ./coverage.xml + flags: unittests + env_vars: OS,PYTHON + name: codecov-umbrella + fail_ci_if_error: false diff --git a/downstream/mmdetection/.github/workflows/build_pat.yml b/downstream/mmdetection/.github/workflows/build_pat.yml new file mode 100644 index 0000000..438e27e --- /dev/null +++ b/downstream/mmdetection/.github/workflows/build_pat.yml @@ -0,0 +1,28 @@ +name: build_pat + +on: push + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build_parrots: + runs-on: ubuntu-latest + container: + image: ghcr.io/zhouzaida/parrots-mmcv:1.3.4 + credentials: + username: zhouzaida + password: ${{ secrets.CR_PAT }} + + steps: + - uses: actions/checkout@v2 + - name: Install mmdet dependencies + run: | + git clone https://github.com/open-mmlab/mmcv.git && cd mmcv + MMCV_WITH_OPS=1 python setup.py install + cd .. && rm -rf mmcv + python -c 'import mmcv; print(mmcv.__version__)' + pip install -r requirements.txt + - name: Build and install + run: rm -rf .eggs && pip install -e . diff --git a/downstream/mmdetection/.github/workflows/deploy.yml b/downstream/mmdetection/.github/workflows/deploy.yml new file mode 100644 index 0000000..f575061 --- /dev/null +++ b/downstream/mmdetection/.github/workflows/deploy.yml @@ -0,0 +1,28 @@ +name: deploy + +on: push + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build-n-publish: + runs-on: ubuntu-latest + if: startsWith(github.event.ref, 'refs/tags') + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.7 + uses: actions/setup-python@v2 + with: + python-version: 3.7 + - name: Install torch + run: pip install torch + - name: Install wheel + run: pip install wheel + - name: Build MMDetection + run: python setup.py sdist bdist_wheel + - name: Publish distribution to PyPI + run: | + pip install twine + twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }} diff --git a/downstream/mmdetection/.github/workflows/lint.yml b/downstream/mmdetection/.github/workflows/lint.yml new file mode 100644 index 0000000..91565fe --- /dev/null +++ b/downstream/mmdetection/.github/workflows/lint.yml @@ -0,0 +1,27 @@ +name: lint + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.7 + uses: actions/setup-python@v2 + with: + python-version: 3.7 + - name: Install pre-commit hook + run: | + pip install pre-commit + pre-commit install + - name: Linting + run: pre-commit run --all-files + - name: Check docstring coverage + run: | + pip install interrogate + interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 80 mmdet diff --git a/downstream/mmdetection/.gitignore b/downstream/mmdetection/.gitignore new file mode 100644 index 0000000..892731d --- /dev/null +++ b/downstream/mmdetection/.gitignore @@ -0,0 +1,124 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/en/_build/ +docs/zh_cn/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +data/ +data +.vscode +.idea +.DS_Store + +# custom +*.pkl +*.pkl.json +*.log.json +docs/modelzoo_statistics.md +mmdet/.mim +work_dirs/ + +# Pytorch +*.pth +*.py~ +*.sh~ diff --git a/downstream/mmdetection/.owners.yml b/downstream/mmdetection/.owners.yml new file mode 100644 index 0000000..97296aa --- /dev/null +++ b/downstream/mmdetection/.owners.yml @@ -0,0 +1,14 @@ +assign: + strategy: + # random + daily-shift-based + scedule: + '*/1 * * * *' + assignees: + - Czm369 + - hhaAndroid + - jbwang1997 + - RangiLyu + - BIGWangYuDong + - chhluo + - ZwwWayne diff --git a/downstream/mmdetection/.pre-commit-config.yaml b/downstream/mmdetection/.pre-commit-config.yaml new file mode 100644 index 0000000..82dd58c --- /dev/null +++ b/downstream/mmdetection/.pre-commit-config.yaml @@ -0,0 +1,50 @@ +repos: + - repo: https://github.com/PyCQA/flake8 + rev: 3.8.3 + hooks: + - id: flake8 + - repo: https://github.com/PyCQA/isort + rev: 5.10.1 + hooks: + - id: isort + - repo: https://github.com/pre-commit/mirrors-yapf + rev: v0.30.0 + hooks: + - id: yapf + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.1.0 + hooks: + - id: trailing-whitespace + - id: check-yaml + - id: end-of-file-fixer + - id: requirements-txt-fixer + - id: double-quote-string-fixer + - id: check-merge-conflict + - id: fix-encoding-pragma + args: ["--remove"] + - id: mixed-line-ending + args: ["--fix=lf"] + - repo: https://github.com/codespell-project/codespell + rev: v2.1.0 + hooks: + - id: codespell + - repo: https://github.com/executablebooks/mdformat + rev: 0.7.14 + hooks: + - id: mdformat + args: ["--number"] + additional_dependencies: + - mdformat-gfm + - mdformat_frontmatter + - linkify-it-py + - repo: https://github.com/myint/docformatter + rev: v1.3.1 + hooks: + - id: docformatter + args: ["--in-place", "--wrap-descriptions", "79"] + - repo: https://github.com/open-mmlab/pre-commit-hooks + rev: v0.2.0 # Use the ref you want to point at + hooks: + - id: check-algo-readme + - id: check-copyright + args: ["mmdet"] # replace the dir_to_check with your expected directory to check diff --git a/downstream/mmdetection/.readthedocs.yml b/downstream/mmdetection/.readthedocs.yml new file mode 100644 index 0000000..6cfbf5d --- /dev/null +++ b/downstream/mmdetection/.readthedocs.yml @@ -0,0 +1,9 @@ +version: 2 + +formats: all + +python: + version: 3.7 + install: + - requirements: requirements/docs.txt + - requirements: requirements/readthedocs.txt diff --git a/downstream/mmdetection/CITATION.cff b/downstream/mmdetection/CITATION.cff new file mode 100644 index 0000000..aac9313 --- /dev/null +++ b/downstream/mmdetection/CITATION.cff @@ -0,0 +1,8 @@ +cff-version: 1.2.0 +message: "If you use this software, please cite it as below." +authors: + - name: "MMDetection Contributors" +title: "OpenMMLab Detection Toolbox and Benchmark" +date-released: 2018-08-22 +url: "https://github.com/open-mmlab/mmdetection" +license: Apache-2.0 diff --git a/downstream/mmdetection/LICENSE b/downstream/mmdetection/LICENSE new file mode 100644 index 0000000..1bfc23e --- /dev/null +++ b/downstream/mmdetection/LICENSE @@ -0,0 +1,203 @@ +Copyright 2018-2023 OpenMMLab. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2023 OpenMMLab. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/downstream/mmdetection/MANIFEST.in b/downstream/mmdetection/MANIFEST.in new file mode 100644 index 0000000..6300b22 --- /dev/null +++ b/downstream/mmdetection/MANIFEST.in @@ -0,0 +1,6 @@ +include requirements/*.txt +include mmdet/VERSION +include mmdet/.mim/model-index.yml +include mmdet/.mim/demo/*/* +recursive-include mmdet/.mim/configs *.py *.yml +recursive-include mmdet/.mim/tools *.sh *.py diff --git a/downstream/mmdetection/README.md b/downstream/mmdetection/README.md new file mode 100644 index 0000000..522cfb8 --- /dev/null +++ b/downstream/mmdetection/README.md @@ -0,0 +1,357 @@ +
+ +
 
+
+ OpenMMLab website + + + HOT + + +      + OpenMMLab platform + + + TRY IT OUT + + +
+
 
+ +[![PyPI](https://img.shields.io/pypi/v/mmdet)](https://pypi.org/project/mmdet) +[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdetection.readthedocs.io/en/latest/) +[![badge](https://github.com/open-mmlab/mmdetection/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdetection/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmdetection/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdetection) +[![license](https://img.shields.io/github/license/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/blob/master/LICENSE) +[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues) +[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues) + +[📘Documentation](https://mmdetection.readthedocs.io/en/stable/) | +[🛠️Installation](https://mmdetection.readthedocs.io/en/stable/get_started.html) | +[👀Model Zoo](https://mmdetection.readthedocs.io/en/stable/model_zoo.html) | +[🆕Update News](https://mmdetection.readthedocs.io/en/stable/changelog.html) | +[🚀Ongoing Projects](https://github.com/open-mmlab/mmdetection/projects) | +[🤔Reporting Issues](https://github.com/open-mmlab/mmdetection/issues/new/choose) + +
+ +
+ +English | [简体中文](README_zh-CN.md) + +
+ +## Introduction + +MMDetection is an open source object detection toolbox based on PyTorch. It is +a part of the [OpenMMLab](https://openmmlab.com/) project. + +The master branch works with **PyTorch 1.5+**. + + + +
+Major features + +- **Modular Design** + + We decompose the detection framework into different components and one can easily construct a customized object detection framework by combining different modules. + +- **Support of multiple frameworks out of box** + + The toolbox directly supports popular and contemporary detection frameworks, *e.g.* Faster RCNN, Mask RCNN, RetinaNet, etc. + +- **High efficiency** + + All basic bbox and mask operations run on GPUs. The training speed is faster than or comparable to other codebases, including [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) and [SimpleDet](https://github.com/TuSimple/simpledet). + +- **State of the art** + + The toolbox stems from the codebase developed by the *MMDet* team, who won [COCO Detection Challenge](http://cocodataset.org/#detection-leaderboard) in 2018, and we keep pushing it forward. + +
+ +Apart from MMDetection, we also released a library [mmcv](https://github.com/open-mmlab/mmcv) for computer vision research, which is heavily depended on by this toolbox. + +## What's New + +**2.25.0** was released in 1/6/2022: + +- Support dedicated `MMDetWandbHook` hook +- Support [ConvNeXt](configs/convnext), [DDOD](configs/ddod), [SOLOv2](configs/solov2) +- Support [Mask2Former](configs/mask2former) for instance segmentation +- Rename [config files of Mask2Former](configs/mask2former) + +Please refer to [changelog.md](docs/en/changelog.md) for details and release history. + +For compatibility changes between different versions of MMDetection, please refer to [compatibility.md](docs/en/compatibility.md). + +## Installation + +Please refer to [Installation](docs/en/get_started.md/#Installation) for installation instructions. + +## Getting Started + +Please see [get_started.md](docs/en/get_started.md) for the basic usage of MMDetection. We provide [colab tutorial](demo/MMDet_Tutorial.ipynb) and [instance segmentation colab tutorial](demo/MMDet_InstanceSeg_Tutorial.ipynb), and other tutorials for: + +- [with existing dataset](docs/en/1_exist_data_model.md) +- [with new dataset](docs/en/2_new_data_model.md) +- [with existing dataset_new_model](docs/en/3_exist_data_new_model.md) +- [learn about configs](docs/en/tutorials/config.md) +- [customize_datasets](docs/en/tutorials/customize_dataset.md) +- [customize data pipelines](docs/en/tutorials/data_pipeline.md) +- [customize_models](docs/en/tutorials/customize_models.md) +- [customize runtime settings](docs/en/tutorials/customize_runtime.md) +- [customize_losses](docs/en/tutorials/customize_losses.md) +- [finetuning models](docs/en/tutorials/finetune.md) +- [export a model to ONNX](docs/en/tutorials/pytorch2onnx.md) +- [export ONNX to TRT](docs/en/tutorials/onnx2tensorrt.md) +- [weight initialization](docs/en/tutorials/init_cfg.md) +- [how to xxx](docs/en/tutorials/how_to.md) + +## Overview of Benchmark and Model Zoo + +Results and models are available in the [model zoo](docs/en/model_zoo.md). + +
+ Architectures +
+ + + + + + + + + + + + + + + + + +
+ Object Detection + + Instance Segmentation + + Panoptic Segmentation + + Other +
+ + + + + + + +
  • Contrastive Learning
  • + + +
  • Distillation
  • + + +
    + +
    + Components +
    + + + + + + + + + + + + + + + + + +
    + Backbones + + Necks + + Loss + + Common +
    + + + + + + + +
    + +Some other methods are also supported in [projects using MMDetection](./docs/en/projects.md). + +## FAQ + +Please refer to [FAQ](docs/en/faq.md) for frequently asked questions. + +## Contributing + +We appreciate all contributions to improve MMDetection. Ongoing projects can be found in out [GitHub Projects](https://github.com/open-mmlab/mmdetection/projects). Welcome community users to participate in these projects. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline. + +## Acknowledgement + +MMDetection is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks. +We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new detectors. + +## Citation + +If you use this toolbox or benchmark in your research, please cite this project. + +``` +@article{mmdetection, + title = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark}, + author = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and + Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and + Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and + Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and + Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong + and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua}, + journal= {arXiv preprint arXiv:1906.07155}, + year={2019} +} +``` + +## License + +This project is released under the [Apache 2.0 license](LICENSE). + +## Projects in OpenMMLab + +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision. +- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages. +- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark. +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark. +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark. +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox. +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. +- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark. +- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark. +- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark. +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark. +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark. +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark. +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark. +- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox. +- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox. +- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab model deployment framework. diff --git a/downstream/mmdetection/configs/_base_/datasets/cityscapes_detection.py b/downstream/mmdetection/configs/_base_/datasets/cityscapes_detection.py new file mode 100644 index 0000000..e341b59 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/datasets/cityscapes_detection.py @@ -0,0 +1,56 @@ +# dataset settings +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=1, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=8, + dataset=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/instancesonly_filtered_gtFine_train.json', + img_prefix=data_root + 'leftImg8bit/train/', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/instancesonly_filtered_gtFine_val.json', + img_prefix=data_root + 'leftImg8bit/val/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/instancesonly_filtered_gtFine_test.json', + img_prefix=data_root + 'leftImg8bit/test/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='bbox') diff --git a/downstream/mmdetection/configs/_base_/datasets/cityscapes_instance.py b/downstream/mmdetection/configs/_base_/datasets/cityscapes_instance.py new file mode 100644 index 0000000..4e3c34e --- /dev/null +++ b/downstream/mmdetection/configs/_base_/datasets/cityscapes_instance.py @@ -0,0 +1,56 @@ +# dataset settings +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=1, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=8, + dataset=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/instancesonly_filtered_gtFine_train.json', + img_prefix=data_root + 'leftImg8bit/train/', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/instancesonly_filtered_gtFine_val.json', + img_prefix=data_root + 'leftImg8bit/val/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/instancesonly_filtered_gtFine_test.json', + img_prefix=data_root + 'leftImg8bit/test/', + pipeline=test_pipeline)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/downstream/mmdetection/configs/_base_/datasets/coco_detection.py b/downstream/mmdetection/configs/_base_/datasets/coco_detection.py new file mode 100644 index 0000000..149f590 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/datasets/coco_detection.py @@ -0,0 +1,49 @@ +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='bbox') diff --git a/downstream/mmdetection/configs/_base_/datasets/coco_instance.py b/downstream/mmdetection/configs/_base_/datasets/coco_instance.py new file mode 100644 index 0000000..9901a85 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/datasets/coco_instance.py @@ -0,0 +1,49 @@ +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/downstream/mmdetection/configs/_base_/datasets/coco_instance_semantic.py b/downstream/mmdetection/configs/_base_/datasets/coco_instance_semantic.py new file mode 100644 index 0000000..6c8bf07 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/datasets/coco_instance_semantic.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='SegRescale', scale_factor=1 / 8), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + seg_prefix=data_root + 'stuffthingmaps/train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/downstream/mmdetection/configs/_base_/datasets/coco_panoptic.py b/downstream/mmdetection/configs/_base_/datasets/coco_panoptic.py new file mode 100644 index 0000000..dbade7c --- /dev/null +++ b/downstream/mmdetection/configs/_base_/datasets/coco_panoptic.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'CocoPanopticDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadPanopticAnnotations', + with_bbox=True, + with_mask=True, + with_seg=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='SegRescale', scale_factor=1 / 4), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/panoptic_train2017.json', + img_prefix=data_root + 'train2017/', + seg_prefix=data_root + 'annotations/panoptic_train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/panoptic_val2017.json', + img_prefix=data_root + 'val2017/', + seg_prefix=data_root + 'annotations/panoptic_val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/panoptic_val2017.json', + img_prefix=data_root + 'val2017/', + seg_prefix=data_root + 'annotations/panoptic_val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric=['PQ']) diff --git a/downstream/mmdetection/configs/_base_/datasets/deepfashion.py b/downstream/mmdetection/configs/_base_/datasets/deepfashion.py new file mode 100644 index 0000000..308b4b2 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/datasets/deepfashion.py @@ -0,0 +1,53 @@ +# dataset settings +dataset_type = 'DeepFashionDataset' +data_root = 'data/DeepFashion/In-shop/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(750, 1101), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(750, 1101), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + imgs_per_gpu=2, + workers_per_gpu=1, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', + img_prefix=data_root + 'Img/', + pipeline=train_pipeline, + data_root=data_root), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', + img_prefix=data_root + 'Img/', + pipeline=test_pipeline, + data_root=data_root), + test=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/DeepFashion_segmentation_gallery.json', + img_prefix=data_root + 'Img/', + pipeline=test_pipeline, + data_root=data_root)) +evaluation = dict(interval=5, metric=['bbox', 'segm']) diff --git a/downstream/mmdetection/configs/_base_/datasets/lvis_v0.5_instance.py b/downstream/mmdetection/configs/_base_/datasets/lvis_v0.5_instance.py new file mode 100644 index 0000000..207e005 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/datasets/lvis_v0.5_instance.py @@ -0,0 +1,24 @@ +# dataset settings +_base_ = 'coco_instance.py' +dataset_type = 'LVISV05Dataset' +data_root = 'data/lvis_v0.5/' +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + _delete_=True, + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v0.5_train.json', + img_prefix=data_root + 'train2017/')), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v0.5_val.json', + img_prefix=data_root + 'val2017/'), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v0.5_val.json', + img_prefix=data_root + 'val2017/')) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/downstream/mmdetection/configs/_base_/datasets/lvis_v1_instance.py b/downstream/mmdetection/configs/_base_/datasets/lvis_v1_instance.py new file mode 100644 index 0000000..be791ed --- /dev/null +++ b/downstream/mmdetection/configs/_base_/datasets/lvis_v1_instance.py @@ -0,0 +1,24 @@ +# dataset settings +_base_ = 'coco_instance.py' +dataset_type = 'LVISV1Dataset' +data_root = 'data/lvis_v1/' +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + _delete_=True, + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v1_train.json', + img_prefix=data_root)), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v1_val.json', + img_prefix=data_root), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v1_val.json', + img_prefix=data_root)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/downstream/mmdetection/configs/_base_/datasets/openimages_detection.py b/downstream/mmdetection/configs/_base_/datasets/openimages_detection.py new file mode 100644 index 0000000..a65d306 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/datasets/openimages_detection.py @@ -0,0 +1,65 @@ +# dataset settings +dataset_type = 'OpenImagesDataset' +data_root = 'data/OpenImages/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, denorm_bbox=True), + dict(type='Resize', img_scale=(1024, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1024, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ], + ), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/oidv6-train-annotations-bbox.csv', + img_prefix=data_root + 'OpenImages/train/', + label_file=data_root + 'annotations/class-descriptions-boxable.csv', + hierarchy_file=data_root + + 'annotations/bbox_labels_600_hierarchy.json', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/validation-annotations-bbox.csv', + img_prefix=data_root + 'OpenImages/validation/', + label_file=data_root + 'annotations/class-descriptions-boxable.csv', + hierarchy_file=data_root + + 'annotations/bbox_labels_600_hierarchy.json', + meta_file=data_root + 'annotations/validation-image-metas.pkl', + image_level_ann_file=data_root + + 'annotations/validation-annotations-human-imagelabels-boxable.csv', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/validation-annotations-bbox.csv', + img_prefix=data_root + 'OpenImages/validation/', + label_file=data_root + 'annotations/class-descriptions-boxable.csv', + hierarchy_file=data_root + + 'annotations/bbox_labels_600_hierarchy.json', + meta_file=data_root + 'annotations/validation-image-metas.pkl', + image_level_ann_file=data_root + + 'annotations/validation-annotations-human-imagelabels-boxable.csv', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='mAP') diff --git a/downstream/mmdetection/configs/_base_/datasets/voc0712.py b/downstream/mmdetection/configs/_base_/datasets/voc0712.py new file mode 100644 index 0000000..ae09acd --- /dev/null +++ b/downstream/mmdetection/configs/_base_/datasets/voc0712.py @@ -0,0 +1,55 @@ +# dataset settings +dataset_type = 'VOCDataset' +data_root = 'data/VOCdevkit/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1000, 600), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1000, 600), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=3, + dataset=dict( + type=dataset_type, + ann_file=[ + data_root + 'VOC2007/ImageSets/Main/trainval.txt', + data_root + 'VOC2012/ImageSets/Main/trainval.txt' + ], + img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', + img_prefix=data_root + 'VOC2007/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', + img_prefix=data_root + 'VOC2007/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='mAP') diff --git a/downstream/mmdetection/configs/_base_/datasets/wider_face.py b/downstream/mmdetection/configs/_base_/datasets/wider_face.py new file mode 100644 index 0000000..d1d649b --- /dev/null +++ b/downstream/mmdetection/configs/_base_/datasets/wider_face.py @@ -0,0 +1,63 @@ +# dataset settings +dataset_type = 'WIDERFaceDataset' +data_root = 'data/WIDERFace/' +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(300, 300), keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(300, 300), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=60, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'train.txt', + img_prefix=data_root + 'WIDER_train/', + min_size=17, + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + 'val.txt', + img_prefix=data_root + 'WIDER_val/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'val.txt', + img_prefix=data_root + 'WIDER_val/', + pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/_base_/default_runtime.py b/downstream/mmdetection/configs/_base_/default_runtime.py new file mode 100644 index 0000000..5b0b145 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/default_runtime.py @@ -0,0 +1,27 @@ +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +custom_hooks = [dict(type='NumClassCheckHook')] + +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] + +# disable opencv multithreading to avoid system being overloaded +opencv_num_threads = 0 +# set multi-process start method as `fork` to speed up the training +mp_start_method = 'fork' + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/downstream/mmdetection/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py b/downstream/mmdetection/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py new file mode 100644 index 0000000..2902cca --- /dev/null +++ b/downstream/mmdetection/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py @@ -0,0 +1,196 @@ +# model settings +model = dict( + type='CascadeRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/downstream/mmdetection/configs/_base_/models/cascade_rcnn_r50_fpn.py b/downstream/mmdetection/configs/_base_/models/cascade_rcnn_r50_fpn.py new file mode 100644 index 0000000..42f74ae --- /dev/null +++ b/downstream/mmdetection/configs/_base_/models/cascade_rcnn_r50_fpn.py @@ -0,0 +1,179 @@ +# model settings +model = dict( + type='CascadeRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ]), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/downstream/mmdetection/configs/_base_/models/fast_rcnn_r50_fpn.py b/downstream/mmdetection/configs/_base_/models/fast_rcnn_r50_fpn.py new file mode 100644 index 0000000..9982fe0 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/models/fast_rcnn_r50_fpn.py @@ -0,0 +1,62 @@ +# model settings +model = dict( + type='FastRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/downstream/mmdetection/configs/_base_/models/faster_rcnn_r50_caffe_c4.py b/downstream/mmdetection/configs/_base_/models/faster_rcnn_r50_caffe_c4.py new file mode 100644 index 0000000..dbf965a --- /dev/null +++ b/downstream/mmdetection/configs/_base_/models/faster_rcnn_r50_caffe_c4.py @@ -0,0 +1,117 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='FasterRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_generator=dict( + type='AnchorGenerator', + scales=[2, 4, 8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[16]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + shared_head=dict( + type='ResLayer', + depth=50, + stage=3, + stride=2, + dilation=1, + style='caffe', + norm_cfg=norm_cfg, + norm_eval=True, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=1024, + featmap_strides=[16]), + bbox_head=dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=12000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=6000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/downstream/mmdetection/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py b/downstream/mmdetection/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py new file mode 100644 index 0000000..a377a6f --- /dev/null +++ b/downstream/mmdetection/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py @@ -0,0 +1,105 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='FasterRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + strides=(1, 2, 2, 1), + dilations=(1, 1, 1, 2), + out_indices=(3, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + rpn_head=dict( + type='RPNHead', + in_channels=2048, + feat_channels=2048, + anchor_generator=dict( + type='AnchorGenerator', + scales=[2, 4, 8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[16]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=2048, + featmap_strides=[16]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=2048, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=12000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms=dict(type='nms', iou_threshold=0.7), + nms_pre=6000, + max_per_img=1000, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/downstream/mmdetection/configs/_base_/models/faster_rcnn_r50_fpn.py b/downstream/mmdetection/configs/_base_/models/faster_rcnn_r50_fpn.py new file mode 100644 index 0000000..1ef8e7b --- /dev/null +++ b/downstream/mmdetection/configs/_base_/models/faster_rcnn_r50_fpn.py @@ -0,0 +1,108 @@ +# model settings +model = dict( + type='FasterRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) + )) diff --git a/downstream/mmdetection/configs/_base_/models/mask_rcnn_r50_caffe_c4.py b/downstream/mmdetection/configs/_base_/models/mask_rcnn_r50_caffe_c4.py new file mode 100644 index 0000000..122202e --- /dev/null +++ b/downstream/mmdetection/configs/_base_/models/mask_rcnn_r50_caffe_c4.py @@ -0,0 +1,125 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='MaskRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_generator=dict( + type='AnchorGenerator', + scales=[2, 4, 8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[16]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + shared_head=dict( + type='ResLayer', + depth=50, + stage=3, + stride=2, + dilation=1, + style='caffe', + norm_cfg=norm_cfg, + norm_eval=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=1024, + featmap_strides=[16]), + bbox_head=dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=None, + mask_head=dict( + type='FCNMaskHead', + num_convs=0, + in_channels=2048, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=12000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=14, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=6000, + nms=dict(type='nms', iou_threshold=0.7), + max_per_img=1000, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/downstream/mmdetection/configs/_base_/models/mask_rcnn_r50_fpn.py b/downstream/mmdetection/configs/_base_/models/mask_rcnn_r50_fpn.py new file mode 100644 index 0000000..d903e55 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/models/mask_rcnn_r50_fpn.py @@ -0,0 +1,120 @@ +# model settings +model = dict( + type='MaskRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/downstream/mmdetection/configs/_base_/models/retinanet_r50_fpn.py b/downstream/mmdetection/configs/_base_/models/retinanet_r50_fpn.py new file mode 100644 index 0000000..56e43fa --- /dev/null +++ b/downstream/mmdetection/configs/_base_/models/retinanet_r50_fpn.py @@ -0,0 +1,60 @@ +# model settings +model = dict( + type='RetinaNet', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_input', + num_outs=5), + bbox_head=dict( + type='RetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) diff --git a/downstream/mmdetection/configs/_base_/models/rpn_r50_caffe_c4.py b/downstream/mmdetection/configs/_base_/models/rpn_r50_caffe_c4.py new file mode 100644 index 0000000..8b32ca9 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/models/rpn_r50_caffe_c4.py @@ -0,0 +1,58 @@ +# model settings +model = dict( + type='RPN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + neck=None, + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_generator=dict( + type='AnchorGenerator', + scales=[2, 4, 8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[16]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=12000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/downstream/mmdetection/configs/_base_/models/rpn_r50_fpn.py b/downstream/mmdetection/configs/_base_/models/rpn_r50_fpn.py new file mode 100644 index 0000000..edaf4d4 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/models/rpn_r50_fpn.py @@ -0,0 +1,58 @@ +# model settings +model = dict( + type='RPN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/downstream/mmdetection/configs/_base_/models/ssd300.py b/downstream/mmdetection/configs/_base_/models/ssd300.py new file mode 100644 index 0000000..f17df01 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/models/ssd300.py @@ -0,0 +1,56 @@ +# model settings +input_size = 300 +model = dict( + type='SingleStageDetector', + backbone=dict( + type='SSDVGG', + depth=16, + with_last_pool=False, + ceil_mode=True, + out_indices=(3, 4), + out_feature_indices=(22, 34), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://vgg16_caffe')), + neck=dict( + type='SSDNeck', + in_channels=(512, 1024), + out_channels=(512, 1024, 512, 256, 256, 256), + level_strides=(2, 2, 1, 1), + level_paddings=(1, 1, 0, 0), + l2_norm_scale=20), + bbox_head=dict( + type='SSDHead', + in_channels=(512, 1024, 512, 256, 256, 256), + num_classes=80, + anchor_generator=dict( + type='SSDAnchorGenerator', + scale_major=False, + input_size=input_size, + basesize_ratio_range=(0.15, 0.9), + strides=[8, 16, 32, 64, 100, 300], + ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2])), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False), + test_cfg=dict( + nms_pre=1000, + nms=dict(type='nms', iou_threshold=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200)) +cudnn_benchmark = True diff --git a/downstream/mmdetection/configs/_base_/schedules/schedule_1x.py b/downstream/mmdetection/configs/_base_/schedules/schedule_1x.py new file mode 100644 index 0000000..13b3783 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/schedules/schedule_1x.py @@ -0,0 +1,11 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[8, 11]) +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/downstream/mmdetection/configs/_base_/schedules/schedule_20e.py b/downstream/mmdetection/configs/_base_/schedules/schedule_20e.py new file mode 100644 index 0000000..00e8590 --- /dev/null +++ b/downstream/mmdetection/configs/_base_/schedules/schedule_20e.py @@ -0,0 +1,11 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[16, 19]) +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/downstream/mmdetection/configs/_base_/schedules/schedule_2x.py b/downstream/mmdetection/configs/_base_/schedules/schedule_2x.py new file mode 100644 index 0000000..69dc9ee --- /dev/null +++ b/downstream/mmdetection/configs/_base_/schedules/schedule_2x.py @@ -0,0 +1,11 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/albu_example/README.md b/downstream/mmdetection/configs/albu_example/README.md new file mode 100644 index 0000000..9a180f0 --- /dev/null +++ b/downstream/mmdetection/configs/albu_example/README.md @@ -0,0 +1,31 @@ +# Albu Example + +> [Albumentations: fast and flexible image augmentations](https://arxiv.org/abs/1809.06839) + + + +## Abstract + +Data augmentation is a commonly used technique for increasing both the size and the diversity of labeled training sets by leveraging input transformations that preserve output labels. In computer vision domain, image augmentations have become a common implicit regularization technique to combat overfitting in deep convolutional neural networks and are ubiquitously used to improve performance. While most deep learning frameworks implement basic image transformations, the list is typically limited to some variations and combinations of flipping, rotating, scaling, and cropping. Moreover, the image processing speed varies in existing tools for image augmentation. We present Albumentations, a fast and flexible library for image augmentations with many various image transform operations available, that is also an easy-to-use wrapper around other augmentation libraries. We provide examples of image augmentations for different computer vision tasks and show that Albumentations is faster than other commonly used image augmentation tools on the most of commonly used image transformations. + +
    + +
    + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | 1x | 4.4 | 16.6 | 38.0 | 34.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208-ab203bcd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208_225520.log.json) | + +## Citation + +```latex +@article{2018arXiv180906839B, + author = {A. Buslaev, A. Parinov, E. Khvedchenya, V.~I. Iglovikov and A.~A. Kalinin}, + title = "{Albumentations: fast and flexible image augmentations}", + journal = {ArXiv e-prints}, + eprint = {1809.06839}, + year = 2018 +} +``` diff --git a/downstream/mmdetection/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py b/downstream/mmdetection/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py new file mode 100644 index 0000000..b3f879a --- /dev/null +++ b/downstream/mmdetection/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py @@ -0,0 +1,73 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +albu_train_transforms = [ + dict( + type='ShiftScaleRotate', + shift_limit=0.0625, + scale_limit=0.0, + rotate_limit=0, + interpolation=1, + p=0.5), + dict( + type='RandomBrightnessContrast', + brightness_limit=[0.1, 0.3], + contrast_limit=[0.1, 0.3], + p=0.2), + dict( + type='OneOf', + transforms=[ + dict( + type='RGBShift', + r_shift_limit=10, + g_shift_limit=10, + b_shift_limit=10, + p=1.0), + dict( + type='HueSaturationValue', + hue_shift_limit=20, + sat_shift_limit=30, + val_shift_limit=20, + p=1.0) + ], + p=0.1), + dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2), + dict(type='ChannelShuffle', p=0.1), + dict( + type='OneOf', + transforms=[ + dict(type='Blur', blur_limit=3, p=1.0), + dict(type='MedianBlur', blur_limit=3, p=1.0) + ], + p=0.1), +] +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='Pad', size_divisor=32), + dict( + type='Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_labels'], + min_visibility=0.0, + filter_lost_elements=True), + keymap={ + 'img': 'image', + 'gt_masks': 'masks', + 'gt_bboxes': 'bboxes' + }, + update_pad_shape=False, + skip_img_without_anno=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'], + meta_keys=('filename', 'ori_shape', 'img_shape', 'img_norm_cfg', + 'pad_shape', 'scale_factor')) +] +data = dict(train=dict(pipeline=train_pipeline)) diff --git a/downstream/mmdetection/configs/atss/README.md b/downstream/mmdetection/configs/atss/README.md new file mode 100644 index 0000000..055ed05 --- /dev/null +++ b/downstream/mmdetection/configs/atss/README.md @@ -0,0 +1,31 @@ +# ATSS + +> [Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection](https://arxiv.org/abs/1912.02424) + + + +## Abstract + +Object detection has been dominated by anchor-based detectors for several years. Recently, anchor-free detectors have become popular due to the proposal of FPN and Focal Loss. In this paper, we first point out that the essential difference between anchor-based and anchor-free detection is actually how to define positive and negative training samples, which leads to the performance gap between them. If they adopt the same definition of positive and negative samples during training, there is no obvious difference in the final performance, no matter regressing from a box or a point. This shows that how to select positive and negative training samples is important for current object detectors. Then, we propose an Adaptive Training Sample Selection (ATSS) to automatically select positive and negative samples according to statistical characteristics of object. It significantly improves the performance of anchor-based and anchor-free detectors and bridges the gap between them. Finally, we discuss the necessity of tiling multiple anchors per location on the image to detect objects. Extensive experiments conducted on MS COCO support our aforementioned analysis and conclusions. With the newly introduced ATSS, we improve state-of-the-art detectors by a large margin to 50.7% AP without introducing any overhead. + +
    + +
    + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | 1x | 3.7 | 19.7 | 39.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/atss/atss_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209_102539.log.json) | +| R-101 | pytorch | 1x | 5.6 | 12.3 | 41.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/atss/atss_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.log.json) | + +## Citation + +```latex +@article{zhang2019bridging, + title = {Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection}, + author = {Zhang, Shifeng and Chi, Cheng and Yao, Yongqiang and Lei, Zhen and Li, Stan Z.}, + journal = {arXiv preprint arXiv:1912.02424}, + year = {2019} +} +``` diff --git a/downstream/mmdetection/configs/atss/atss_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/atss/atss_r101_fpn_1x_coco.py new file mode 100644 index 0000000..5225d2a --- /dev/null +++ b/downstream/mmdetection/configs/atss/atss_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './atss_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/atss/atss_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/atss/atss_r50_fpn_1x_coco.py new file mode 100644 index 0000000..42ff4c5 --- /dev/null +++ b/downstream/mmdetection/configs/atss/atss_r50_fpn_1x_coco.py @@ -0,0 +1,62 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='ATSS', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='ATSSHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/atss/metafile.yml b/downstream/mmdetection/configs/atss/metafile.yml new file mode 100644 index 0000000..f4c567e --- /dev/null +++ b/downstream/mmdetection/configs/atss/metafile.yml @@ -0,0 +1,60 @@ +Collections: + - Name: ATSS + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ATSS + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1912.02424 + Title: 'Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection' + README: configs/atss/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/atss.py#L6 + Version: v2.0.0 + +Models: + - Name: atss_r50_fpn_1x_coco + In Collection: ATSS + Config: configs/atss/atss_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.7 + inference time (ms/im): + - value: 50.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth + + - Name: atss_r101_fpn_1x_coco + In Collection: ATSS + Config: configs/atss/atss_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.6 + inference time (ms/im): + - value: 81.3 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth diff --git a/downstream/mmdetection/configs/autoassign/README.md b/downstream/mmdetection/configs/autoassign/README.md new file mode 100644 index 0000000..1297206 --- /dev/null +++ b/downstream/mmdetection/configs/autoassign/README.md @@ -0,0 +1,35 @@ +# AutoAssign + +> [AutoAssign: Differentiable Label Assignment for Dense Object Detection](https://arxiv.org/abs/2007.03496) + + + +## Abstract + +Determining positive/negative samples for object detection is known as label assignment. Here we present an anchor-free detector named AutoAssign. It requires little human knowledge and achieves appearance-aware through a fully differentiable weighting mechanism. During training, to both satisfy the prior distribution of data and adapt to category characteristics, we present Center Weighting to adjust the category-specific prior distributions. To adapt to object appearances, Confidence Weighting is proposed to adjust the specific assign strategy of each instance. The two weighting modules are then combined to generate positive and negative weights to adjust each location's confidence. Extensive experiments on the MS COCO show that our method steadily surpasses other best sampling strategies by large margins with various backbones. Moreover, our best model achieves 52.1% AP, outperforming all existing one-stage detectors. Besides, experiments on other datasets, e.g., PASCAL VOC, Objects365, and WiderFace, demonstrate the broad applicability of AutoAssign. + +
    + +
    + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | +| :------: | :---: | :-----: | :------: | :----: | :------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | caffe | 1x | 4.08 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.log.json) | + +**Note**: + +1. We find that the performance is unstable with 1x setting and may fluctuate by about 0.3 mAP. mAP 40.3 ~ 40.6 is acceptable. Such fluctuation can also be found in the original implementation. +2. You can get a more stable results ~ mAP 40.6 with a schedule total 13 epoch, and learning rate is divided by 10 at 10th and 13th epoch. + +## Citation + +```latex +@article{zhu2020autoassign, + title={AutoAssign: Differentiable Label Assignment for Dense Object Detection}, + author={Zhu, Benjin and Wang, Jianfeng and Jiang, Zhengkai and Zong, Fuhang and Liu, Songtao and Li, Zeming and Sun, Jian}, + journal={arXiv preprint arXiv:2007.03496}, + year={2020} +} +``` diff --git a/downstream/mmdetection/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py b/downstream/mmdetection/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py new file mode 100644 index 0000000..db548dc --- /dev/null +++ b/downstream/mmdetection/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py @@ -0,0 +1,85 @@ +# We follow the original implementation which +# adopts the Caffe pre-trained backbone. +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='AutoAssign', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + num_outs=5, + relu_before_extra_convs=True, + init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')), + bbox_head=dict( + type='AutoAssignHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + loss_bbox=dict(type='GIoULoss', loss_weight=5.0)), + train_cfg=None, + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict(lr=0.01, paramwise_cfg=dict(norm_decay_mult=0.)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=1.0 / 1000, + step=[8, 11]) +total_epochs = 12 diff --git a/downstream/mmdetection/configs/autoassign/metafile.yml b/downstream/mmdetection/configs/autoassign/metafile.yml new file mode 100644 index 0000000..f1e9051 --- /dev/null +++ b/downstream/mmdetection/configs/autoassign/metafile.yml @@ -0,0 +1,33 @@ +Collections: + - Name: AutoAssign + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - AutoAssign + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/2007.03496 + Title: 'AutoAssign: Differentiable Label Assignment for Dense Object Detection' + README: configs/autoassign/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/autoassign.py#L6 + Version: v2.12.0 + +Models: + - Name: autoassign_r50_fpn_8x2_1x_coco + In Collection: AutoAssign + Config: configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py + Metadata: + Training Memory (GB): 4.08 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth diff --git a/downstream/mmdetection/configs/carafe/README.md b/downstream/mmdetection/configs/carafe/README.md new file mode 100644 index 0000000..803abe0 --- /dev/null +++ b/downstream/mmdetection/configs/carafe/README.md @@ -0,0 +1,42 @@ +# CARAFE + +> [CARAFE: Content-Aware ReAssembly of FEatures](https://arxiv.org/abs/1905.02188) + + + +## Abstract + +Feature upsampling is a key operation in a number of modern convolutional network architectures, e.g. feature pyramids. Its design is critical for dense prediction tasks such as object detection and semantic/instance segmentation. In this work, we propose Content-Aware ReAssembly of FEatures (CARAFE), a universal, lightweight and highly effective operator to fulfill this goal. CARAFE has several appealing properties: (1) Large field of view. Unlike previous works (e.g. bilinear interpolation) that only exploit sub-pixel neighborhood, CARAFE can aggregate contextual information within a large receptive field. (2) Content-aware handling. Instead of using a fixed kernel for all samples (e.g. deconvolution), CARAFE enables instance-specific content-aware handling, which generates adaptive kernels on-the-fly. (3) Lightweight and fast to compute. CARAFE introduces little computational overhead and can be readily integrated into modern network architectures. We conduct comprehensive evaluations on standard benchmarks in object detection, instance/semantic segmentation and inpainting. CARAFE shows consistent and substantial gains across all the tasks (1.2%, 1.3%, 1.8%, 1.1db respectively) with negligible computational overhead. It has great potential to serve as a strong building block for future research. It has great potential to serve as a strong building block for future research. + +
    + +
    + +## Results and Models + +The results on COCO 2017 val is shown in the below table. + +| Method | Backbone | Style | Lr schd | Test Proposal Num | Inf time (fps) | Box AP | Mask AP | Config | Download | +| :--------------------: | :------: | :-----: | :-----: | :---------------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN w/ CARAFE | R-50-FPN | pytorch | 1x | 1000 | 16.5 | 38.6 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_20200504_175733.log.json) | +| - | - | - | - | 2000 | | | | | | +| Mask R-CNN w/ CARAFE | R-50-FPN | pytorch | 1x | 1000 | 14.0 | 39.3 | 35.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.393__segm_mAP-0.358_20200503_135957-8687f195.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_20200503_135957.log.json) | +| - | - | - | - | 2000 | | | | | | + +## Implementation + +The CUDA implementation of CARAFE can be find at https://github.com/myownskyW7/CARAFE. + +## Citation + +We provide config files to reproduce the object detection & instance segmentation results in the ICCV 2019 Oral paper for [CARAFE: Content-Aware ReAssembly of FEatures](https://arxiv.org/abs/1905.02188). + +```latex +@inproceedings{Wang_2019_ICCV, + title = {CARAFE: Content-Aware ReAssembly of FEatures}, + author = {Wang, Jiaqi and Chen, Kai and Xu, Rui and Liu, Ziwei and Loy, Chen Change and Lin, Dahua}, + booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` diff --git a/downstream/mmdetection/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py b/downstream/mmdetection/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py new file mode 100644 index 0000000..dedac3f --- /dev/null +++ b/downstream/mmdetection/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py @@ -0,0 +1,50 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + neck=dict( + type='FPN_CARAFE', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + start_level=0, + end_level=-1, + norm_cfg=None, + act_cfg=None, + order=('conv', 'norm', 'act'), + upsample_cfg=dict( + type='carafe', + up_kernel=5, + up_group=1, + encoder_kernel=3, + encoder_dilation=1, + compressed_channels=64))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=64), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=64), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py b/downstream/mmdetection/configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py new file mode 100644 index 0000000..668c023 --- /dev/null +++ b/downstream/mmdetection/configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py @@ -0,0 +1,60 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + neck=dict( + type='FPN_CARAFE', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + start_level=0, + end_level=-1, + norm_cfg=None, + act_cfg=None, + order=('conv', 'norm', 'act'), + upsample_cfg=dict( + type='carafe', + up_kernel=5, + up_group=1, + encoder_kernel=3, + encoder_dilation=1, + compressed_channels=64)), + roi_head=dict( + mask_head=dict( + upsample_cfg=dict( + type='carafe', + scale_factor=2, + up_kernel=5, + up_group=1, + encoder_kernel=3, + encoder_dilation=1, + compressed_channels=64)))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=64), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=64), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/carafe/metafile.yml b/downstream/mmdetection/configs/carafe/metafile.yml new file mode 100644 index 0000000..b58a3f6 --- /dev/null +++ b/downstream/mmdetection/configs/carafe/metafile.yml @@ -0,0 +1,55 @@ +Collections: + - Name: CARAFE + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RPN + - FPN_CARAFE + - ResNet + - RoIPool + Paper: + URL: https://arxiv.org/abs/1905.02188 + Title: 'CARAFE: Content-Aware ReAssembly of FEatures' + README: configs/carafe/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/necks/fpn_carafe.py#L11 + Version: v2.12.0 + +Models: + - Name: faster_rcnn_r50_fpn_carafe_1x_coco + In Collection: CARAFE + Config: configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py + Metadata: + Training Memory (GB): 4.26 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth + + - Name: mask_rcnn_r50_fpn_carafe_1x_coco + In Collection: CARAFE + Config: configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py + Metadata: + Training Memory (GB): 4.31 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.393__segm_mAP-0.358_20200503_135957-8687f195.pth diff --git a/downstream/mmdetection/configs/cascade_rcnn/README.md b/downstream/mmdetection/configs/cascade_rcnn/README.md new file mode 100644 index 0000000..5a9e817 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/README.md @@ -0,0 +1,79 @@ +# Cascade R-CNN + +> [Cascade R-CNN: High Quality Object Detection and Instance Segmentation](https://arxiv.org/abs/1906.09756) + + + +## Abstract + +In object detection, the intersection over union (IoU) threshold is frequently used to define positives/negatives. The threshold used to train a detector defines its quality. While the commonly used threshold of 0.5 leads to noisy (low-quality) detections, detection performance frequently degrades for larger thresholds. This paradox of high-quality detection has two causes: 1) overfitting, due to vanishing positive samples for large thresholds, and 2) inference-time quality mismatch between detector and test hypotheses. A multi-stage object detection architecture, the Cascade R-CNN, composed of a sequence of detectors trained with increasing IoU thresholds, is proposed to address these problems. The detectors are trained sequentially, using the output of a detector as training set for the next. This resampling progressively improves hypotheses quality, guaranteeing a positive training set of equivalent size for all detectors and minimizing overfitting. The same cascade is applied at inference, to eliminate quality mismatches between hypotheses and detectors. An implementation of the Cascade R-CNN without bells or whistles achieves state-of-the-art performance on the COCO dataset, and significantly improves high-quality detection on generic and specific object detection datasets, including VOC, KITTI, CityPerson, and WiderFace. Finally, the Cascade R-CNN is generalized to instance segmentation, with nontrivial improvements over the Mask R-CNN. + +
    + +
    + +## Results and Models + +### Cascade R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 1x | 4.2 | | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.404_20200504_174853-b857be87.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_20200504_174853.log.json) | +| R-50-FPN | pytorch | 1x | 4.4 | 16.1 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316_214748.log.json) | +| R-50-FPN | pytorch | 20e | - | - | 41.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_bbox_mAP-0.41_20200504_175131-e9872a90.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_20200504_175131.log.json) | +| R-101-FPN | caffe | 1x | 6.2 | | 42.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.423_20200504_175649-cab8dbd5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_20200504_175649.log.json) | +| R-101-FPN | pytorch | 1x | 6.4 | 13.5 | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317-0b6a2fbf.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317_101744.log.json) | +| R-101-FPN | pytorch | 20e | - | - | 42.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_bbox_mAP-0.425_20200504_231812-5057dcc5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_20200504_231812.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 7.6 | 10.9 | 43.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316-95c2deb6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316_055608.log.json) | +| X-101-32x4d-FPN | pytorch | 20e | 7.6 | | 43.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608-9ae0a720.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 10.7 | | 44.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702-43ce6a30.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702.log.json) | +| X-101-64x4d-FPN | pytorch | 20e | 10.7 | | 44.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357.log.json) | + +### Cascade Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 1x | 5.9 | | 41.2 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.412__segm_mAP-0.36_20200504_174659-5004b251.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_20200504_174659.log.json) | +| R-50-FPN | pytorch | 1x | 6.0 | 11.2 | 41.2 | 35.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203_170449.log.json) | +| R-50-FPN | pytorch | 20e | - | - | 41.9 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_20200504_174711.log.json) | +| R-101-FPN | caffe | 1x | 7.8 | | 43.2 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.432__segm_mAP-0.376_20200504_174813-5c1e9599.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_20200504_174813.log.json) | +| R-101-FPN | pytorch | 1x | 7.9 | 9.8 | 42.9 | 37.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203-befdf6ee.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203_092521.log.json) | +| R-101-FPN | pytorch | 20e | - | - | 43.4 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_bbox_mAP-0.434__segm_mAP-0.378_20200504_174836-005947da.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_20200504_174836.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 9.2 | 8.6 | 44.3 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201-0f411b1f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201_052416.log.json) | +| X-101-32x4d-FPN | pytorch | 20e | 9.2 | - | 45.0 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917-ed1f4751.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 12.2 | 6.7 | 45.3 | 39.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203-9a2db89d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203_044059.log.json) | +| X-101-64x4d-FPN | pytorch | 20e | 12.2 | | 45.6 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033-bdb5126a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033.log.json) | + +**Notes:** + +- The `20e` schedule in Cascade (Mask) R-CNN indicates decreasing the lr at 16 and 19 epochs, with a total of 20 epochs. + +## Pre-trained Models + +We also train some models with longer schedules and multi-scale training for Cascade Mask R-CNN. The users could finetune them for downstream tasks. + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 3x | 5.7 | | 44.0 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210707_002651-6e29b3a6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210707_002651.log.json) | +| R-50-FPN | pytorch | 3x | 5.9 | | 44.3 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco_20210628_164719-5bdc3824.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco_20210628_164719.log.json) | +| R-101-FPN | caffe | 3x | 7.7 | | 45.4 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210707_002620-a5bd2389.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210707_002620.log.json) | +| R-101-FPN | pytorch | 3x | 7.8 | | 45.5 | 39.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco_20210628_165236-51a2d363.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco_20210628_165236.log.json) | +| X-101-32x4d-FPN | pytorch | 3x | 9.0 | | 46.3 | 40.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210706_225234-40773067.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210706_225234.log.json) | +| X-101-32x8d-FPN | pytorch | 3x | 12.1 | | 46.1 | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210719_180640-9ff7e76f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210719_180640.log.json) | +| X-101-64x4d-FPN | pytorch | 3x | 12.0 | | 46.6 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210719_210311-d3e64ba0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210719_210311.log.json) | + +## Citation + +```latex +@article{Cai_2019, + title={Cascade R-CNN: High Quality Object Detection and Instance Segmentation}, + ISSN={1939-3539}, + url={http://dx.doi.org/10.1109/tpami.2019.2956516}, + DOI={10.1109/tpami.2019.2956516}, + journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher={Institute of Electrical and Electronics Engineers (IEEE)}, + author={Cai, Zhaowei and Vasconcelos, Nuno}, + year={2019}, + pages={1–1} +} +``` diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..5ee6231 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..1df87fc --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000..f59c155 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py new file mode 100644 index 0000000..45ab7ed --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py @@ -0,0 +1,6 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..1b20f16 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py @@ -0,0 +1,6 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..12d37ef --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,41 @@ +_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py'] + +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..9fb817e --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py @@ -0,0 +1,49 @@ +_base_ = ['./cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'] +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) + +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], +# multiscale_mode='range' +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + train=dict(dataset=dict(pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..49ab539 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/cascade_mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py new file mode 100644 index 0000000..1296dc4 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/cascade_mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py' +] diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..ed0c6d1 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py @@ -0,0 +1,4 @@ +_base_ = [ + '../common/mstrain_3x_coco_instance.py', + '../_base_/models/cascade_mask_rcnn_r50_fpn.py' +] diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..06cbbe7 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py new file mode 100644 index 0000000..4e35236 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..7d37d17 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..eeec1aa --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py @@ -0,0 +1,60 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' + +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=8, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + style='pytorch', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) + +# ResNeXt-101-32x8d model trained with Caffe2 at FB, +# so the mean and std need to be changed. +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], + std=[57.375, 57.120, 58.395], + to_rgb=False) + +# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], +# multiscale_mode='range' +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + train=dict(dataset=dict(pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py new file mode 100644 index 0000000..7dbef5f --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py new file mode 100644 index 0000000..579b1ac --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..ed6cf4b --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..1e90f4b --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade_rcnn_r50_caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000..5c07776 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py new file mode 100644 index 0000000..b1719c2 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py @@ -0,0 +1,6 @@ +_base_ = './cascade_rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..696bcfb --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,42 @@ +_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' + +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) + +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..87e21fb --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/cascade_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py new file mode 100644 index 0000000..6f886e1 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py @@ -0,0 +1,4 @@ +_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..5ac02c1 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py new file mode 100644 index 0000000..486e45e --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade_rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py new file mode 100644 index 0000000..78229f0 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py @@ -0,0 +1,15 @@ +_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' +model = dict( + type='CascadeRCNN', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py new file mode 100644 index 0000000..58812de --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py @@ -0,0 +1,15 @@ +_base_ = './cascade_rcnn_r50_fpn_20e_coco.py' +model = dict( + type='CascadeRCNN', + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/cascade_rcnn/metafile.yml b/downstream/mmdetection/configs/cascade_rcnn/metafile.yml new file mode 100644 index 0000000..1007f2e --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rcnn/metafile.yml @@ -0,0 +1,525 @@ +Collections: + - Name: Cascade R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Cascade R-CNN + - FPN + - RPN + - ResNet + - RoIAlign + Paper: + URL: http://dx.doi.org/10.1109/tpami.2019.2956516 + Title: 'Cascade R-CNN: Delving into High Quality Object Detection' + README: configs/cascade_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/cascade_rcnn.py#L6 + Version: v2.0.0 + +Models: + - Name: cascade_rcnn_r50_caffe_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.2 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.404_20200504_174853-b857be87.pth + + - Name: cascade_rcnn_r50_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.4 + inference time (ms/im): + - value: 62.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth + + - Name: cascade_rcnn_r50_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py + Metadata: + Training Memory (GB): 4.4 + inference time (ms/im): + - value: 62.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_bbox_mAP-0.41_20200504_175131-e9872a90.pth + + - Name: cascade_rcnn_r101_caffe_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.2 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.423_20200504_175649-cab8dbd5.pth + + - Name: cascade_rcnn_r101_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.4 + inference time (ms/im): + - value: 74.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317-0b6a2fbf.pth + + - Name: cascade_rcnn_r101_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py + Metadata: + Training Memory (GB): 6.4 + inference time (ms/im): + - value: 74.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_bbox_mAP-0.425_20200504_231812-5057dcc5.pth + + - Name: cascade_rcnn_x101_32x4d_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.6 + inference time (ms/im): + - value: 91.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316-95c2deb6.pth + + - Name: cascade_rcnn_x101_32x4d_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py + Metadata: + Training Memory (GB): 7.6 + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608-9ae0a720.pth + + - Name: cascade_rcnn_x101_64x4d_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.7 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702-43ce6a30.pth + + - Name: cascade_rcnn_x101_64x4d_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py + Metadata: + Training Memory (GB): 10.7 + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth + + - Name: cascade_mask_rcnn_r50_caffe_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.9 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.412__segm_mAP-0.36_20200504_174659-5004b251.pth + + - Name: cascade_mask_rcnn_r50_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 89.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth + + - Name: cascade_mask_rcnn_r50_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 89.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth + + - Name: cascade_mask_rcnn_r101_caffe_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.432__segm_mAP-0.376_20200504_174813-5c1e9599.pth + + - Name: cascade_mask_rcnn_r101_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.9 + inference time (ms/im): + - value: 102.04 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203-befdf6ee.pth + + - Name: cascade_mask_rcnn_r101_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py + Metadata: + Training Memory (GB): 7.9 + inference time (ms/im): + - value: 102.04 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_bbox_mAP-0.434__segm_mAP-0.378_20200504_174836-005947da.pth + + - Name: cascade_mask_rcnn_x101_32x4d_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 9.2 + inference time (ms/im): + - value: 116.28 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201-0f411b1f.pth + + - Name: cascade_mask_rcnn_x101_32x4d_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py + Metadata: + Training Memory (GB): 9.2 + inference time (ms/im): + - value: 116.28 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917-ed1f4751.pth + + - Name: cascade_mask_rcnn_x101_64x4d_fpn_1x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 12.2 + inference time (ms/im): + - value: 149.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203-9a2db89d.pth + + - Name: cascade_mask_rcnn_x101_64x4d_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py + Metadata: + Training Memory (GB): 12.2 + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033-bdb5126a.pth + + - Name: cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 5.7 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210707_002651-6e29b3a6.pth + + - Name: cascade_mask_rcnn_r50_fpn_mstrain_3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 5.9 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco_20210628_164719-5bdc3824.pth + + - Name: cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 7.7 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210707_002620-a5bd2389.pth + + - Name: cascade_mask_rcnn_r101_fpn_mstrain_3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco_20210628_165236-51a2d363.pth + + - Name: cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 9.0 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210706_225234-40773067.pth + + - Name: cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 12.1 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210719_180640-9ff7e76f.pth + + - Name: cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco + In Collection: Cascade R-CNN + Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 12.0 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210719_210311-d3e64ba0.pth diff --git a/downstream/mmdetection/configs/cascade_rpn/README.md b/downstream/mmdetection/configs/cascade_rpn/README.md new file mode 100644 index 0000000..fb2b482 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rpn/README.md @@ -0,0 +1,41 @@ +# Cascade RPN + +> [Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution](https://arxiv.org/abs/1909.06720) + + + +## Abstract + +This paper considers an architecture referred to as Cascade Region Proposal Network (Cascade RPN) for improving the region-proposal quality and detection performance by systematically addressing the limitation of the conventional RPN that heuristically defines the anchors and aligns the features to the anchors. First, instead of using multiple anchors with predefined scales and aspect ratios, Cascade RPN relies on a single anchor per location and performs multi-stage refinement. Each stage is progressively more stringent in defining positive samples by starting out with an anchor-free metric followed by anchor-based metrics in the ensuing stages. Second, to attain alignment between the features and the anchors throughout the stages, adaptive convolution is proposed that takes the anchors in addition to the image features as its input and learns the sampled features guided by the anchors. A simple implementation of a two-stage Cascade RPN achieves AR 13.4 points higher than that of the conventional RPN, surpassing any existing region proposal methods. When adopting to Fast R-CNN and Faster R-CNN, Cascade RPN can improve the detection mAP by 3.1 and 3.5 points, respectively. + +
    + +
    + +## Results and Models + +### Region proposal performance + +| Method | Backbone | Style | Mem (GB) | Train time (s/iter) | Inf time (fps) | AR 1000 | Config | Download | +| :----: | :------: | :---: | :------: | :-----------------: | :------------: | :-----: | :---------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------: | +| CRPN | R-50-FPN | caffe | - | - | - | 72.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_r50_caffe_fpn_1x_coco/cascade_rpn_r50_caffe_fpn_1x_coco-7aa93cef.pth) | + +### Detection performance + +| Method | Proposal | Backbone | Style | Schedule | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Config | Download | +| :----------: | :---------: | :------: | :---: | :------: | :------: | :-----------------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Fast R-CNN | Cascade RPN | R-50-FPN | caffe | 1x | - | - | - | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco/crpn_fast_rcnn_r50_caffe_fpn_1x_coco-cb486e66.pth) | +| Faster R-CNN | Cascade RPN | R-50-FPN | caffe | 1x | - | - | - | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth) | + +## Citation + +We provide the code for reproducing experiment results of [Cascade RPN](https://arxiv.org/abs/1909.06720). + +```latex +@inproceedings{vu2019cascade, + title={Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution}, + author={Vu, Thang and Jang, Hyunjun and Pham, Trung X and Yoo, Chang D}, + booktitle={Conference on Neural Information Processing Systems (NeurIPS)}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..29f5d07 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,77 @@ +_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + roi_head=dict( + bbox_head=dict( + bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rcnn=dict( + assigner=dict( + pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65), + sampler=dict(num=256))), + test_cfg=dict(rcnn=dict(score_thr=1e-3))) +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadProposals', num_max_proposals=300), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadProposals', num_max_proposals=300), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['proposals']), + dict( + type='ToDataContainer', + fields=[dict(key='proposals', stack=False)]), + dict(type='Collect', keys=['img', 'proposals']), + ]) +] +data = dict( + train=dict( + proposal_file=data_root + + 'proposals/crpn_r50_caffe_fpn_1x_train2017.pkl', + pipeline=train_pipeline), + val=dict( + proposal_file=data_root + + 'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl', + pipeline=test_pipeline), + test=dict( + proposal_file=data_root + + 'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl', + pipeline=test_pipeline)) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..bad86e6 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,92 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' +rpn_weight = 0.7 +model = dict( + rpn_head=dict( + _delete_=True, + type='CascadeRPNHead', + num_stages=2, + stages=[ + dict( + type='StageCascadeRPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[1.0], + strides=[4, 8, 16, 32, 64]), + adapt_cfg=dict(type='dilation', dilation=3), + bridged_feature=True, + sampling=False, + with_cls=False, + reg_decoded_bbox=True, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=(.0, .0, .0, .0), + target_stds=(0.1, 0.1, 0.5, 0.5)), + loss_bbox=dict( + type='IoULoss', linear=True, + loss_weight=10.0 * rpn_weight)), + dict( + type='StageCascadeRPNHead', + in_channels=256, + feat_channels=256, + adapt_cfg=dict(type='offset'), + bridged_feature=False, + sampling=True, + with_cls=True, + reg_decoded_bbox=True, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=(.0, .0, .0, .0), + target_stds=(0.05, 0.05, 0.1, 0.1)), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0 * rpn_weight), + loss_bbox=dict( + type='IoULoss', linear=True, + loss_weight=10.0 * rpn_weight)) + ]), + roi_head=dict( + bbox_head=dict( + bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=[ + dict( + assigner=dict( + type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5), + allowed_border=-1, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False) + ], + rpn_proposal=dict(max_per_img=300, nms=dict(iou_threshold=0.8)), + rcnn=dict( + assigner=dict( + pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65), + sampler=dict(type='RandomSampler', num=256))), + test_cfg=dict( + rpn=dict(max_per_img=300, nms=dict(iou_threshold=0.8)), + rcnn=dict(score_thr=1e-3))) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..5562e69 --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,77 @@ +_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py' +model = dict( + rpn_head=dict( + _delete_=True, + type='CascadeRPNHead', + num_stages=2, + stages=[ + dict( + type='StageCascadeRPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[1.0], + strides=[4, 8, 16, 32, 64]), + adapt_cfg=dict(type='dilation', dilation=3), + bridged_feature=True, + sampling=False, + with_cls=False, + reg_decoded_bbox=True, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=(.0, .0, .0, .0), + target_stds=(0.1, 0.1, 0.5, 0.5)), + loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0)), + dict( + type='StageCascadeRPNHead', + in_channels=256, + feat_channels=256, + adapt_cfg=dict(type='offset'), + bridged_feature=False, + sampling=True, + with_cls=True, + reg_decoded_bbox=True, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=(.0, .0, .0, .0), + target_stds=(0.05, 0.05, 0.1, 0.1)), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0)) + ]), + train_cfg=dict(rpn=[ + dict( + assigner=dict( + type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5), + allowed_border=-1, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.3, + ignore_iof_thr=-1, + iou_calculator=dict(type='BboxOverlaps2D')), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.8), + min_bbox_size=0))) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/cascade_rpn/metafile.yml b/downstream/mmdetection/configs/cascade_rpn/metafile.yml new file mode 100644 index 0000000..335b2bc --- /dev/null +++ b/downstream/mmdetection/configs/cascade_rpn/metafile.yml @@ -0,0 +1,44 @@ +Collections: + - Name: Cascade RPN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Cascade RPN + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1909.06720 + Title: 'Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution' + README: configs/cascade_rpn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.8.0/mmdet/models/dense_heads/cascade_rpn_head.py#L538 + Version: v2.8.0 + +Models: + - Name: crpn_fast_rcnn_r50_caffe_fpn_1x_coco + In Collection: Cascade RPN + Config: configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco/crpn_fast_rcnn_r50_caffe_fpn_1x_coco-cb486e66.pth + + - Name: crpn_faster_rcnn_r50_caffe_fpn_1x_coco + In Collection: Cascade RPN + Config: configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth diff --git a/downstream/mmdetection/configs/centernet/README.md b/downstream/mmdetection/configs/centernet/README.md new file mode 100644 index 0000000..0f951a0 --- /dev/null +++ b/downstream/mmdetection/configs/centernet/README.md @@ -0,0 +1,40 @@ +# CenterNet + +> [Objects as Points](https://arxiv.org/abs/1904.07850) + + + +## Abstract + +Detection identifies objects as axis-aligned boxes in an image. Most successful object detectors enumerate a nearly exhaustive list of potential object locations and classify each. This is wasteful, inefficient, and requires additional post-processing. In this paper, we take a different approach. We model an object as a single point --- the center point of its bounding box. Our detector uses keypoint estimation to find center points and regresses to all other object properties, such as size, 3D location, orientation, and even pose. Our center point based approach, CenterNet, is end-to-end differentiable, simpler, faster, and more accurate than corresponding bounding box based detectors. CenterNet achieves the best speed-accuracy trade-off on the MS COCO dataset, with 28.1% AP at 142 FPS, 37.4% AP at 52 FPS, and 45.1% AP with multi-scale testing at 1.4 FPS. We use the same approach to estimate 3D bounding box in the KITTI benchmark and human pose on the COCO keypoint dataset. Our method performs competitively with sophisticated multi-stage methods and runs in real-time. + +
    + +
    + +## Results and Models + +| Backbone | DCN | Mem (GB) | Box AP | Flip box AP | Config | Download | +| :-------: | :-: | :------: | :----: | :---------: | :---------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| ResNet-18 | N | 3.45 | 25.9 | 27.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/centernet/centernet_resnet18_140e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_140e_coco/centernet_resnet18_140e_coco_20210705_093630-bb5b3bf7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_140e_coco/centernet_resnet18_140e_coco_20210705_093630.log.json) | +| ResNet-18 | Y | 3.47 | 29.5 | 30.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/centernet/centernet_resnet18_dcnv2_140e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131.log.json) | + +Note: + +- Flip box AP setting is single-scale and `flip=True`. +- Due to complex data enhancement, we find that the performance is unstable and may fluctuate by about 0.4 mAP. mAP 29.4 ~ 29.8 is acceptable in ResNet-18-DCNv2. +- Compared to the source code, we refer to [CenterNet-Better](https://github.com/FateScript/CenterNet-better), and make the following changes + - fix wrong image mean and variance in image normalization to be compatible with the pre-trained backbone. + - Use SGD rather than ADAM optimizer and add warmup and grad clip. + - Use DistributedDataParallel as other models in MMDetection rather than using DataParallel. + +## Citation + +```latex +@article{zhou2019objects, + title={Objects as Points}, + author={Zhou, Xingyi and Wang, Dequan and Kr{\"a}henb{\"u}hl, Philipp}, + booktitle={arXiv preprint arXiv:1904.07850}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/centernet/centernet_resnet18_140e_coco.py b/downstream/mmdetection/configs/centernet/centernet_resnet18_140e_coco.py new file mode 100644 index 0000000..52c86a5 --- /dev/null +++ b/downstream/mmdetection/configs/centernet/centernet_resnet18_140e_coco.py @@ -0,0 +1,3 @@ +_base_ = './centernet_resnet18_dcnv2_140e_coco.py' + +model = dict(neck=dict(use_dcn=False)) diff --git a/downstream/mmdetection/configs/centernet/centernet_resnet18_dcnv2_140e_coco.py b/downstream/mmdetection/configs/centernet/centernet_resnet18_dcnv2_140e_coco.py new file mode 100644 index 0000000..b8a0bb1 --- /dev/null +++ b/downstream/mmdetection/configs/centernet/centernet_resnet18_dcnv2_140e_coco.py @@ -0,0 +1,127 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + type='CenterNet', + backbone=dict( + type='ResNet', + depth=18, + norm_eval=False, + norm_cfg=dict(type='BN'), + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict( + type='CTResNetNeck', + in_channel=512, + num_deconv_filters=(256, 128, 64), + num_deconv_kernels=(4, 4, 4), + use_dcn=True), + bbox_head=dict( + type='CenterNetHead', + num_classes=80, + in_channel=64, + feat_channel=64, + loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0), + loss_wh=dict(type='L1Loss', loss_weight=0.1), + loss_offset=dict(type='L1Loss', loss_weight=1.0)), + train_cfg=None, + test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100)) + +# We fixed the incorrect img_norm_cfg problem in the source code. +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True, color_type='color'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + type='RandomCenterCropPad', + crop_size=(512, 512), + ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), + mean=[0, 0, 0], + std=[1, 1, 1], + to_rgb=True, + test_pad_mode=None), + dict(type='Resize', img_scale=(512, 512), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict( + type='MultiScaleFlipAug', + scale_factor=1.0, + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict( + type='RandomCenterCropPad', + ratios=None, + border=None, + mean=[0, 0, 0], + std=[1, 1, 1], + to_rgb=True, + test_mode=True, + test_pad_mode=['logical_or', 31], + test_pad_add_pix=1), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + meta_keys=('filename', 'ori_filename', 'ori_shape', + 'img_shape', 'pad_shape', 'scale_factor', 'flip', + 'flip_direction', 'img_norm_cfg', 'border'), + keys=['img']) + ]) +] + +dataset_type = 'CocoDataset' +data_root = 'data/coco/' + +# Use RepeatDataset to speed up training +data = dict( + samples_per_gpu=16, + workers_per_gpu=4, + train=dict( + _delete_=True, + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +# optimizer +# Based on the default settings of modern detectors, the SGD effect is better +# than the Adam in the source code, so we use SGD default settings and +# if you use adam+lr5e-4, the map is 29.1. +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) + +# learning policy +# Based on the default settings of modern detectors, we added warmup settings. +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=1.0 / 1000, + step=[18, 24]) # the real step is [18*5, 24*5] +runner = dict(max_epochs=28) # the real epoch is 28*5=140 + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (16 samples per GPU) +auto_scale_lr = dict(base_batch_size=128) diff --git a/downstream/mmdetection/configs/centernet/metafile.yml b/downstream/mmdetection/configs/centernet/metafile.yml new file mode 100644 index 0000000..e86e57b --- /dev/null +++ b/downstream/mmdetection/configs/centernet/metafile.yml @@ -0,0 +1,46 @@ +Collections: + - Name: CenterNet + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x TITANXP GPUs + Architecture: + - ResNet + Paper: + URL: https://arxiv.org/abs/1904.07850 + Title: 'Objects as Points' + README: configs/centernet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.13.0/mmdet/models/detectors/centernet.py#L10 + Version: v2.13.0 + +Models: + - Name: centernet_resnet18_dcnv2_140e_coco + In Collection: CenterNet + Config: configs/centernet/centernet_resnet18_dcnv2_140e_coco.py + Metadata: + Batch Size: 128 + Training Memory (GB): 3.47 + Epochs: 140 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 29.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth + + - Name: centernet_resnet18_140e_coco + In Collection: CenterNet + Config: configs/centernet/centernet_resnet18_140e_coco.py + Metadata: + Batch Size: 128 + Training Memory (GB): 3.45 + Epochs: 140 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 25.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_140e_coco/centernet_resnet18_140e_coco_20210705_093630-bb5b3bf7.pth diff --git a/downstream/mmdetection/configs/centripetalnet/README.md b/downstream/mmdetection/configs/centripetalnet/README.md new file mode 100644 index 0000000..b01b00a --- /dev/null +++ b/downstream/mmdetection/configs/centripetalnet/README.md @@ -0,0 +1,36 @@ +# CentripetalNet + +> [CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection](https://arxiv.org/abs/2003.09119) + + + +## Abstract + +Keypoint-based detectors have achieved pretty-well performance. However, incorrect keypoint matching is still widespread and greatly affects the performance of the detector. In this paper, we propose CentripetalNet which uses centripetal shift to pair corner keypoints from the same instance. CentripetalNet predicts the position and the centripetal shift of the corner points and matches corners whose shifted results are aligned. Combining position information, our approach matches corner points more accurately than the conventional embedding approaches do. Corner pooling extracts information inside the bounding boxes onto the border. To make this information more aware at the corners, we design a cross-star deformable convolution network to conduct feature adaption. Furthermore, we explore instance segmentation on anchor-free detectors by equipping our CentripetalNet with a mask prediction module. On MS-COCO test-dev, our CentripetalNet not only outperforms all existing anchor-free detectors with an AP of 48.0% but also achieves comparable performance to the state-of-the-art instance segmentation approaches with a 40.2% MaskAP. + +
    + +
    + +## Results and Models + +| Backbone | Batch Size | Step/Total Epochs | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :--------------: | :--------------------------------------------------------------: | :---------------: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HourglassNet-104 | [16 x 6](./centripetalnet_hourglass104_mstest_16x6_210e_coco.py) | 190/210 | 16.7 | 3.7 | 44.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804.log.json) | + +Note: + +- TTA setting is single-scale and `flip=True`. +- The model we released is the best checkpoint rather than the latest checkpoint (box AP 44.8 vs 44.6 in our experiment). + +## Citation + +```latex +@InProceedings{Dong_2020_CVPR, +author = {Dong, Zhiwei and Li, Guoxuan and Liao, Yue and Wang, Fei and Ren, Pengju and Qian, Chen}, +title = {CentripetalNet: Pursuing High-Quality Keypoint Pairs for Object Detection}, +booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, +month = {June}, +year = {2020} +} +``` diff --git a/downstream/mmdetection/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py b/downstream/mmdetection/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py new file mode 100644 index 0000000..5281c5b --- /dev/null +++ b/downstream/mmdetection/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py @@ -0,0 +1,110 @@ +_base_ = [ + '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' +] + +# model settings +model = dict( + type='CornerNet', + backbone=dict( + type='HourglassNet', + downsample_times=5, + num_stacks=2, + stage_channels=[256, 256, 384, 384, 384, 512], + stage_blocks=[2, 2, 2, 2, 2, 4], + norm_cfg=dict(type='BN', requires_grad=True)), + neck=None, + bbox_head=dict( + type='CentripetalHead', + num_classes=80, + in_channels=256, + num_feat_levels=2, + corner_emb_channels=0, + loss_heatmap=dict( + type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), + loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1), + loss_guiding_shift=dict( + type='SmoothL1Loss', beta=1.0, loss_weight=0.05), + loss_centripetal_shift=dict( + type='SmoothL1Loss', beta=1.0, loss_weight=1)), + # training and testing settings + train_cfg=None, + test_cfg=dict( + corner_topk=100, + local_maximum_kernel=3, + distance_threshold=0.5, + score_thr=0.05, + max_per_img=100, + nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) +# data settings +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + type='RandomCenterCropPad', + crop_size=(511, 511), + ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), + test_mode=False, + test_pad_mode=None, + **img_norm_cfg), + dict(type='Resize', img_scale=(511, 511), keep_ratio=False), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict( + type='MultiScaleFlipAug', + scale_factor=1.0, + flip=True, + transforms=[ + dict(type='Resize'), + dict( + type='RandomCenterCropPad', + crop_size=None, + ratios=None, + border=None, + test_mode=True, + test_pad_mode=['logical_or', 127], + **img_norm_cfg), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict( + type='Collect', + keys=['img'], + meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', + 'scale_factor', 'flip', 'img_norm_cfg', 'border')), + ]) +] +data = dict( + samples_per_gpu=6, + workers_per_gpu=3, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='Adam', lr=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[190]) +runner = dict(type='EpochBasedRunner', max_epochs=210) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (16 GPUs) x (6 samples per GPU) +auto_scale_lr = dict(base_batch_size=96) diff --git a/downstream/mmdetection/configs/centripetalnet/metafile.yml b/downstream/mmdetection/configs/centripetalnet/metafile.yml new file mode 100644 index 0000000..61aed3e --- /dev/null +++ b/downstream/mmdetection/configs/centripetalnet/metafile.yml @@ -0,0 +1,39 @@ +Collections: + - Name: CentripetalNet + Metadata: + Training Data: COCO + Training Techniques: + - Adam + Training Resources: 16x V100 GPUs + Architecture: + - Corner Pooling + - Stacked Hourglass Network + Paper: + URL: https://arxiv.org/abs/2003.09119 + Title: 'CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection' + README: configs/centripetalnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.5.0/mmdet/models/detectors/cornernet.py#L9 + Version: v2.5.0 + +Models: + - Name: centripetalnet_hourglass104_mstest_16x6_210e_coco + In Collection: CentripetalNet + Config: configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py + Metadata: + Batch Size: 96 + Training Memory (GB): 16.7 + inference time (ms/im): + - value: 270.27 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 210 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth diff --git a/downstream/mmdetection/configs/cityscapes/README.md b/downstream/mmdetection/configs/cityscapes/README.md new file mode 100644 index 0000000..c52a79f --- /dev/null +++ b/downstream/mmdetection/configs/cityscapes/README.md @@ -0,0 +1,46 @@ +# Cityscapes + +> [The Cityscapes Dataset for Semantic Urban Scene Understanding](https://arxiv.org/abs/1604.01685) + + + +## Abstract + +Visual understanding of complex urban street scenes is an enabling factor for a wide range of applications. Object detection has benefited enormously from large-scale datasets, especially in the context of deep learning. For semantic urban scene understanding, however, no current dataset adequately captures the complexity of real-world urban scenes. +To address this, we introduce Cityscapes, a benchmark suite and large-scale dataset to train and test approaches for pixel-level and instance-level semantic labeling. Cityscapes is comprised of a large, diverse set of stereo video sequences recorded in streets from 50 different cities. 5000 of these images have high quality pixel-level annotations; 20000 additional images have coarse annotations to enable methods that leverage large volumes of weakly-labeled data. Crucially, our effort exceeds previous attempts in terms of dataset size, annotation richness, scene variability, and complexity. Our accompanying empirical study provides an in-depth analysis of the dataset characteristics, as well as a performance evaluation of several state-of-the-art approaches based on our benchmark. + +
    + +
    + +## Common settings + +- All baselines were trained using 8 GPU with a batch size of 8 (1 images per GPU) using the [linear scaling rule](https://arxiv.org/abs/1706.02677) to scale the learning rate. +- All models were trained on `cityscapes_train`, and tested on `cityscapes_val`. +- 1x training schedule indicates 64 epochs which corresponds to slightly less than the 24k iterations reported in the original schedule from the [Mask R-CNN paper](https://arxiv.org/abs/1703.06870) +- COCO pre-trained weights are used to initialize. +- A conversion [script](../../tools/dataset_converters/cityscapes.py) is provided to convert Cityscapes into COCO format. Please refer to [install.md](../../docs/1_exist_data_model.md#prepare-datasets) for details. +- `CityscapesDataset` implemented three evaluation methods. `bbox` and `segm` are standard COCO bbox/mask AP. `cityscapes` is the cityscapes dataset official evaluation, which may be slightly higher than COCO. + +### Faster R-CNN + +| Backbone | Style | Lr schd | Scale | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 800-1024 | 5.2 | - | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes_20200502-829424c0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes_20200502_114915.log.json) | + +### Mask R-CNN + +| Backbone | Style | Lr schd | Scale | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 800-1024 | 5.3 | - | 40.9 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes/mask_rcnn_r50_fpn_1x_cityscapes_20201211_133733-d2858245.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes/mask_rcnn_r50_fpn_1x_cityscapes_20201211_133733.log.json) | + +## Citation + +```latex +@inproceedings{Cordts2016Cityscapes, + title={The Cityscapes Dataset for Semantic Urban Scene Understanding}, + author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt}, + booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2016} +} +``` diff --git a/downstream/mmdetection/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py b/downstream/mmdetection/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py new file mode 100644 index 0000000..ca636bd --- /dev/null +++ b/downstream/mmdetection/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py @@ -0,0 +1,44 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/cityscapes_detection.py', + '../_base_/default_runtime.py' +] +model = dict( + backbone=dict(init_cfg=None), + roi_head=dict( + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=8, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))) +# optimizer +# lr is set for a batch size of 8 +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + # [7] yields higher performance than [6] + step=[7]) +runner = dict( + type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64 +log_config = dict(interval=100) +# For better, more stable performance initialize from COCO +load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (1 samples per GPU) +auto_scale_lr = dict(base_batch_size=8) diff --git a/downstream/mmdetection/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py b/downstream/mmdetection/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py new file mode 100644 index 0000000..83ea058 --- /dev/null +++ b/downstream/mmdetection/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py @@ -0,0 +1,51 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py' +] +model = dict( + backbone=dict(init_cfg=None), + roi_head=dict( + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=8, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=8, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) +# optimizer +# lr is set for a batch size of 8 +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + # [7] yields higher performance than [6] + step=[7]) +runner = dict( + type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64 +log_config = dict(interval=100) +# For better, more stable performance initialize from COCO +load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (1 samples per GPU) +auto_scale_lr = dict(base_batch_size=8) diff --git a/downstream/mmdetection/configs/common/lsj_100e_coco_instance.py b/downstream/mmdetection/configs/common/lsj_100e_coco_instance.py new file mode 100644 index 0000000..cacf23d --- /dev/null +++ b/downstream/mmdetection/configs/common/lsj_100e_coco_instance.py @@ -0,0 +1,90 @@ +_base_ = '../_base_/default_runtime.py' +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +image_size = (1024, 1024) + +file_client_args = dict(backend='disk') +# comment out the code below to use different file client +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) + +train_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=file_client_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=image_size, + ratio_range=(0.1, 2.0), + multiscale_mode='range', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=image_size), # padding to image_size leads 0.5+ mAP + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +# Use RepeatDataset to speed up training +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=4, # simply change this from 2 to 16 for 50e - 400e training. + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=5, metric=['bbox', 'segm']) + +# optimizer assumes bs=64 +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004) +optimizer_config = dict(grad_clip=None) + +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.067, + step=[22, 24]) +runner = dict(type='EpochBasedRunner', max_epochs=25) diff --git a/downstream/mmdetection/configs/common/mstrain-poly_3x_coco_instance.py b/downstream/mmdetection/configs/common/mstrain-poly_3x_coco_instance.py new file mode 100644 index 0000000..c22ed94 --- /dev/null +++ b/downstream/mmdetection/configs/common/mstrain-poly_3x_coco_instance.py @@ -0,0 +1,80 @@ +_base_ = '../_base_/default_runtime.py' +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], +# multiscale_mode='range' +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +# Use RepeatDataset to speed up training +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=3, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric=['bbox', 'segm']) + +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) + +# learning policy +# Experiments show that using step=[9, 11] has higher performance +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[9, 11]) +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/downstream/mmdetection/configs/common/mstrain_3x_coco.py b/downstream/mmdetection/configs/common/mstrain_3x_coco.py new file mode 100644 index 0000000..80ec8b8 --- /dev/null +++ b/downstream/mmdetection/configs/common/mstrain_3x_coco.py @@ -0,0 +1,76 @@ +_base_ = '../_base_/default_runtime.py' +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], +# multiscale_mode='range' +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +# Use RepeatDataset to speed up training +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=3, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='bbox') + +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) + +# learning policy +# Experiments show that using step=[9, 11] has higher performance +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[9, 11]) +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/downstream/mmdetection/configs/common/mstrain_3x_coco_instance.py b/downstream/mmdetection/configs/common/mstrain_3x_coco_instance.py new file mode 100644 index 0000000..50f39be --- /dev/null +++ b/downstream/mmdetection/configs/common/mstrain_3x_coco_instance.py @@ -0,0 +1,76 @@ +_base_ = '../_base_/default_runtime.py' +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], +# multiscale_mode='range' +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +# Use RepeatDataset to speed up training +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=3, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric=['bbox', 'segm']) + +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) + +# learning policy +# Experiments show that using step=[9, 11] has higher performance +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[9, 11]) +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/downstream/mmdetection/configs/common/ssj_270k_coco_instance.py b/downstream/mmdetection/configs/common/ssj_270k_coco_instance.py new file mode 100644 index 0000000..851098f --- /dev/null +++ b/downstream/mmdetection/configs/common/ssj_270k_coco_instance.py @@ -0,0 +1,91 @@ +_base_ = '../_base_/default_runtime.py' +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +image_size = (1024, 1024) + +file_client_args = dict(backend='disk') + +# Standard Scale Jittering (SSJ) resizes and crops an image +# with a resize range of 0.8 to 1.25 of the original image size. +train_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=file_client_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=image_size, + ratio_range=(0.8, 1.25), + multiscale_mode='range', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=image_size), # padding to image_size leads 0.5+ mAP + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) + +evaluation = dict(interval=6000, metric=['bbox', 'segm']) + +# optimizer assumes batch_size = (32 GPUs) x (2 samples per GPU) +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004) +optimizer_config = dict(grad_clip=None) + +# lr steps at [0.9, 0.95, 0.975] of the maximum iterations +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.001, + step=[243000, 256500, 263250]) +checkpoint_config = dict(interval=6000) +# The model is trained by 270k iterations with batch_size 64, +# which is roughly equivalent to 144 epochs. +runner = dict(type='IterBasedRunner', max_iters=270000) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/common/ssj_scp_270k_coco_instance.py b/downstream/mmdetection/configs/common/ssj_scp_270k_coco_instance.py new file mode 100644 index 0000000..540839f --- /dev/null +++ b/downstream/mmdetection/configs/common/ssj_scp_270k_coco_instance.py @@ -0,0 +1,97 @@ +_base_ = '../_base_/default_runtime.py' +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +image_size = (1024, 1024) + +file_client_args = dict(backend='disk') + +# Standard Scale Jittering (SSJ) resizes and crops an image +# with a resize range of 0.8 to 1.25 of the original image size. +load_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=file_client_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=image_size, + ratio_range=(0.8, 1.25), + multiscale_mode='range', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Pad', size=image_size), +] +train_pipeline = [ + dict(type='CopyPaste', max_num_pasted=100), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='MultiImageMixDataset', + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=load_pipeline), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) + +evaluation = dict(interval=6000, metric=['bbox', 'segm']) + +# optimizer assumes batch_size = (32 GPUs) x (2 samples per GPU) +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004) +optimizer_config = dict(grad_clip=None) + +# lr steps at [0.9, 0.95, 0.975] of the maximum iterations +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.001, + step=[243000, 256500, 263250]) +checkpoint_config = dict(interval=6000) +# The model is trained by 270k iterations with batch_size 64, +# which is roughly equivalent to 144 epochs. +runner = dict(type='IterBasedRunner', max_iters=270000) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/convnext/README.md b/downstream/mmdetection/configs/convnext/README.md new file mode 100644 index 0000000..edf72e8 --- /dev/null +++ b/downstream/mmdetection/configs/convnext/README.md @@ -0,0 +1,40 @@ +# ConvNeXt + +> [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) + +## Abstract + +The "Roaring 20s" of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually "modernize" a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets. + +
    + +
    + +## Results and models + +| Method | Backbone | Pretrain | Lr schd | Multi-scale crop | FP16 | Mem (GB) | box AP | mask AP | Config | Download | +| :----------------: | :--------: | :---------: | :-----: | :--------------: | :--: | :------: | :----: | :-----: | :-------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Mask R-CNN | ConvNeXt-T | ImageNet-1K | 3x | yes | yes | 7.3 | 46.2 | 41.7 | [config](./mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco_20220426_154953-050731f4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco_20220426_154953.log.json) | +| Cascade Mask R-CNN | ConvNeXt-T | ImageNet-1K | 3x | yes | yes | 9.0 | 50.3 | 43.6 | [config](./cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200-8f07c40b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200.log.json) | +| Cascade Mask R-CNN | ConvNeXt-S | ImageNet-1K | 3x | yes | yes | 12.3 | 51.8 | 44.8 | [config](./cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004.log.json) | + +**Note**: + +- ConvNeXt backbone needs to install [MMClassification](https://github.com/open-mmlab/mmclassification) first, which has abundant backbones for downstream tasks. + +```shell +pip install mmcls>=0.22.0 +``` + +- The performance is unstable. `Cascade Mask R-CNN` may fluctuate about 0.2 mAP. + +## Citation + +```bibtex +@article{liu2022convnet, + title={A ConvNet for the 2020s}, + author={Liu, Zhuang and Mao, Hanzi and Wu, Chao-Yuan and Feichtenhofer, Christoph and Darrell, Trevor and Xie, Saining}, + journal={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2022} +} +``` diff --git a/downstream/mmdetection/configs/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py b/downstream/mmdetection/configs/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py new file mode 100644 index 0000000..0ccc31d --- /dev/null +++ b/downstream/mmdetection/configs/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py @@ -0,0 +1,32 @@ +_base_ = './cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py' # noqa + +# please install mmcls>=0.22.0 +# import mmcls.models to trigger register_module in mmcls +custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa + +model = dict( + backbone=dict( + _delete_=True, + type='mmcls.ConvNeXt', + arch='small', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.6, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.'))) + +optimizer = dict( + _delete_=True, + constructor='LearningRateDecayOptimizerConstructor', + type='AdamW', + lr=0.0002, + betas=(0.9, 0.999), + weight_decay=0.05, + paramwise_cfg={ + 'decay_rate': 0.7, + 'decay_type': 'layer_wise', + 'num_layers': 12 + }) diff --git a/downstream/mmdetection/configs/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py b/downstream/mmdetection/configs/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py new file mode 100644 index 0000000..93304c0 --- /dev/null +++ b/downstream/mmdetection/configs/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py @@ -0,0 +1,149 @@ +_base_ = [ + '../_base_/models/cascade_mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# please install mmcls>=0.22.0 +# import mmcls.models to trigger register_module in mmcls +custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa + +model = dict( + backbone=dict( + _delete_=True, + type='mmcls.ConvNeXt', + arch='tiny', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + neck=dict(in_channels=[96, 192, 384, 768]), + roi_head=dict(bbox_head=[ + dict( + type='ConvFCBBoxHead', + num_shared_convs=4, + num_shared_fcs=1, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + reg_decoded_bbox=True, + norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), + dict( + type='ConvFCBBoxHead', + num_shared_convs=4, + num_shared_fcs=1, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=False, + reg_decoded_bbox=True, + norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), + dict( + type='ConvFCBBoxHead', + num_shared_convs=4, + num_shared_fcs=1, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=False, + reg_decoded_bbox=True, + norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) + ])) + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='AutoAugment', + policies=[[ + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict( + type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ]]), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict(train=dict(pipeline=train_pipeline), persistent_workers=True) + +optimizer = dict( + _delete_=True, + constructor='LearningRateDecayOptimizerConstructor', + type='AdamW', + lr=0.0002, + betas=(0.9, 0.999), + weight_decay=0.05, + paramwise_cfg={ + 'decay_rate': 0.7, + 'decay_type': 'layer_wise', + 'num_layers': 6 + }) + +lr_config = dict(warmup_iters=1000, step=[27, 33]) +runner = dict(max_epochs=36) + +# you need to set mode='dynamic' if you are using pytorch<=1.5.0 +fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/downstream/mmdetection/configs/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py b/downstream/mmdetection/configs/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py new file mode 100644 index 0000000..e8a283f --- /dev/null +++ b/downstream/mmdetection/configs/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py @@ -0,0 +1,90 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# please install mmcls>=0.22.0 +# import mmcls.models to trigger register_module in mmcls +custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa + +model = dict( + backbone=dict( + _delete_=True, + type='mmcls.ConvNeXt', + arch='tiny', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + neck=dict(in_channels=[96, 192, 384, 768])) + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='AutoAugment', + policies=[[ + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict( + type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ]]), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict(train=dict(pipeline=train_pipeline), persistent_workers=True) + +optimizer = dict( + _delete_=True, + constructor='LearningRateDecayOptimizerConstructor', + type='AdamW', + lr=0.0001, + betas=(0.9, 0.999), + weight_decay=0.05, + paramwise_cfg={ + 'decay_rate': 0.95, + 'decay_type': 'layer_wise', + 'num_layers': 6 + }) + +lr_config = dict(warmup_iters=1000, step=[27, 33]) +runner = dict(max_epochs=36) + +# you need to set mode='dynamic' if you are using pytorch<=1.5.0 +fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/downstream/mmdetection/configs/convnext/metafile.yml b/downstream/mmdetection/configs/convnext/metafile.yml new file mode 100644 index 0000000..20425bf --- /dev/null +++ b/downstream/mmdetection/configs/convnext/metafile.yml @@ -0,0 +1,93 @@ +Models: + - Name: mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco + In Collection: Mask R-CNN + Config: configs/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco + Metadata: + Training Memory (GB): 7.3 + Epochs: 36 + Training Data: COCO + Training Techniques: + - AdamW + - Mixed Precision Training + Training Resources: 8x A100 GPUs + Architecture: + - ConvNeXt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco_20220426_154953-050731f4.pth + Paper: + URL: https://arxiv.org/abs/2201.03545 + Title: 'A ConvNet for the 2020s' + README: configs/convnext/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.16.0 + + - Name: cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco + In Collection: Cascade Mask R-CNN + Config: configs/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py + Metadata: + Training Memory (GB): 9.0 + Epochs: 36 + Training Data: COCO + Training Techniques: + - AdamW + - Mixed Precision Training + Training Resources: 8x A100 GPUs + Architecture: + - ConvNeXt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 43.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200-8f07c40b.pth + Paper: + URL: https://arxiv.org/abs/2201.03545 + Title: 'A ConvNet for the 2020s' + README: configs/convnext/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.25.0 + + - Name: cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco + In Collection: Cascade Mask R-CNN + Config: configs/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py + Metadata: + Training Memory (GB): 12.3 + Epochs: 36 + Training Data: COCO + Training Techniques: + - AdamW + - Mixed Precision Training + Training Resources: 8x A100 GPUs + Architecture: + - ConvNeXt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 51.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 44.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth + Paper: + URL: https://arxiv.org/abs/2201.03545 + Title: 'A ConvNet for the 2020s' + README: configs/convnext/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.25.0 diff --git a/downstream/mmdetection/configs/cornernet/README.md b/downstream/mmdetection/configs/cornernet/README.md new file mode 100644 index 0000000..d0b9e98 --- /dev/null +++ b/downstream/mmdetection/configs/cornernet/README.md @@ -0,0 +1,43 @@ +# CornerNet + +> [Cornernet: Detecting objects as paired keypoints](https://arxiv.org/abs/1808.01244) + + + +## Abstract + +We propose CornerNet, a new approach to object detection where we detect an object bounding box as a pair of keypoints, the top-left corner and the bottom-right corner, using a single convolution neural network. By detecting objects as paired keypoints, we eliminate the need for designing a set of anchor boxes commonly used in prior single-stage detectors. In addition to our novel formulation, we introduce corner pooling, a new type of pooling layer that helps the network better localize corners. Experiments show that CornerNet achieves a 42.2% AP on MS COCO, outperforming all existing one-stage detectors. + +
    + +
    + +## Results and Models + +| Backbone | Batch Size | Step/Total Epochs | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :--------------: | :---------------------------------------------------------: | :---------------: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HourglassNet-104 | [10 x 5](./cornernet_hourglass104_mstest_10x5_210e_coco.py) | 180/210 | 13.9 | 4.2 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco/cornernet_hourglass104_mstest_10x5_210e_coco_20200824_185720-5fefbf1c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco/cornernet_hourglass104_mstest_10x5_210e_coco_20200824_185720.log.json) | +| HourglassNet-104 | [8 x 6](./cornernet_hourglass104_mstest_8x6_210e_coco.py) | 180/210 | 15.9 | 4.2 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618.log.json) | +| HourglassNet-104 | [32 x 3](./cornernet_hourglass104_mstest_32x3_210e_coco.py) | 180/210 | 9.5 | 3.9 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco/cornernet_hourglass104_mstest_32x3_210e_coco_20200819_203110-1efaea91.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco/cornernet_hourglass104_mstest_32x3_210e_coco_20200819_203110.log.json) | + +Note: + +- TTA setting is single-scale and `flip=True`. +- Experiments with `images_per_gpu=6` are conducted on Tesla V100-SXM2-32GB, `images_per_gpu=3` are conducted on GeForce GTX 1080 Ti. +- Here are the descriptions of each experiment setting: + - 10 x 5: 10 GPUs with 5 images per gpu. This is the same setting as that reported in the original paper. + - 8 x 6: 8 GPUs with 6 images per gpu. The total batchsize is similar to paper and only need 1 node to train. + - 32 x 3: 32 GPUs with 3 images per gpu. The default setting for 1080TI and need 4 nodes to train. + +## Citation + +```latex +@inproceedings{law2018cornernet, + title={Cornernet: Detecting objects as paired keypoints}, + author={Law, Hei and Deng, Jia}, + booktitle={15th European Conference on Computer Vision, ECCV 2018}, + pages={765--781}, + year={2018}, + organization={Springer Verlag} +} +``` diff --git a/downstream/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py b/downstream/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py new file mode 100644 index 0000000..6cb05a7 --- /dev/null +++ b/downstream/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py @@ -0,0 +1,110 @@ +_base_ = [ + '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' +] + +# model settings +model = dict( + type='CornerNet', + backbone=dict( + type='HourglassNet', + downsample_times=5, + num_stacks=2, + stage_channels=[256, 256, 384, 384, 384, 512], + stage_blocks=[2, 2, 2, 2, 2, 4], + norm_cfg=dict(type='BN', requires_grad=True)), + neck=None, + bbox_head=dict( + type='CornerHead', + num_classes=80, + in_channels=256, + num_feat_levels=2, + corner_emb_channels=1, + loss_heatmap=dict( + type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), + loss_embedding=dict( + type='AssociativeEmbeddingLoss', + pull_weight=0.10, + push_weight=0.10), + loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)), + # training and testing settings + train_cfg=None, + test_cfg=dict( + corner_topk=100, + local_maximum_kernel=3, + distance_threshold=0.5, + score_thr=0.05, + max_per_img=100, + nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) +# data settings +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + type='RandomCenterCropPad', + crop_size=(511, 511), + ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), + test_mode=False, + test_pad_mode=None, + **img_norm_cfg), + dict(type='Resize', img_scale=(511, 511), keep_ratio=False), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict( + type='MultiScaleFlipAug', + scale_factor=1.0, + flip=True, + transforms=[ + dict(type='Resize'), + dict( + type='RandomCenterCropPad', + crop_size=None, + ratios=None, + border=None, + test_mode=True, + test_pad_mode=['logical_or', 127], + **img_norm_cfg), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict( + type='Collect', + keys=['img'], + meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', + 'scale_factor', 'flip', 'img_norm_cfg', 'border')), + ]) +] +data = dict( + samples_per_gpu=5, + workers_per_gpu=3, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='Adam', lr=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[180]) +runner = dict(type='EpochBasedRunner', max_epochs=210) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (10 GPUs) x (5 samples per GPU) +auto_scale_lr = dict(base_batch_size=50) diff --git a/downstream/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py b/downstream/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py new file mode 100644 index 0000000..f539cdb --- /dev/null +++ b/downstream/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py @@ -0,0 +1,110 @@ +_base_ = [ + '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' +] + +# model settings +model = dict( + type='CornerNet', + backbone=dict( + type='HourglassNet', + downsample_times=5, + num_stacks=2, + stage_channels=[256, 256, 384, 384, 384, 512], + stage_blocks=[2, 2, 2, 2, 2, 4], + norm_cfg=dict(type='BN', requires_grad=True)), + neck=None, + bbox_head=dict( + type='CornerHead', + num_classes=80, + in_channels=256, + num_feat_levels=2, + corner_emb_channels=1, + loss_heatmap=dict( + type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), + loss_embedding=dict( + type='AssociativeEmbeddingLoss', + pull_weight=0.10, + push_weight=0.10), + loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)), + # training and testing settings + train_cfg=None, + test_cfg=dict( + corner_topk=100, + local_maximum_kernel=3, + distance_threshold=0.5, + score_thr=0.05, + max_per_img=100, + nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) +# data settings +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + type='RandomCenterCropPad', + crop_size=(511, 511), + ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), + test_mode=False, + test_pad_mode=None, + **img_norm_cfg), + dict(type='Resize', img_scale=(511, 511), keep_ratio=False), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict( + type='MultiScaleFlipAug', + scale_factor=1.0, + flip=True, + transforms=[ + dict(type='Resize'), + dict( + type='RandomCenterCropPad', + crop_size=None, + ratios=None, + border=None, + test_mode=True, + test_pad_mode=['logical_or', 127], + **img_norm_cfg), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict( + type='Collect', + keys=['img'], + meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', + 'scale_factor', 'flip', 'img_norm_cfg', 'border')), + ]) +] +data = dict( + samples_per_gpu=3, + workers_per_gpu=3, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='Adam', lr=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[180]) +runner = dict(type='EpochBasedRunner', max_epochs=210) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (3 samples per GPU) +auto_scale_lr = dict(base_batch_size=96) diff --git a/downstream/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py b/downstream/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py new file mode 100644 index 0000000..9b115d7 --- /dev/null +++ b/downstream/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py @@ -0,0 +1,110 @@ +_base_ = [ + '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' +] + +# model settings +model = dict( + type='CornerNet', + backbone=dict( + type='HourglassNet', + downsample_times=5, + num_stacks=2, + stage_channels=[256, 256, 384, 384, 384, 512], + stage_blocks=[2, 2, 2, 2, 2, 4], + norm_cfg=dict(type='BN', requires_grad=True)), + neck=None, + bbox_head=dict( + type='CornerHead', + num_classes=80, + in_channels=256, + num_feat_levels=2, + corner_emb_channels=1, + loss_heatmap=dict( + type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), + loss_embedding=dict( + type='AssociativeEmbeddingLoss', + pull_weight=0.10, + push_weight=0.10), + loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)), + # training and testing settings + train_cfg=None, + test_cfg=dict( + corner_topk=100, + local_maximum_kernel=3, + distance_threshold=0.5, + score_thr=0.05, + max_per_img=100, + nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) +# data settings +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + type='RandomCenterCropPad', + crop_size=(511, 511), + ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), + test_mode=False, + test_pad_mode=None, + **img_norm_cfg), + dict(type='Resize', img_scale=(511, 511), keep_ratio=False), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict( + type='MultiScaleFlipAug', + scale_factor=1.0, + flip=True, + transforms=[ + dict(type='Resize'), + dict( + type='RandomCenterCropPad', + crop_size=None, + ratios=None, + border=None, + test_mode=True, + test_pad_mode=['logical_or', 127], + **img_norm_cfg), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict( + type='Collect', + keys=['img'], + meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', + 'scale_factor', 'flip', 'img_norm_cfg', 'border')), + ]) +] +data = dict( + samples_per_gpu=6, + workers_per_gpu=3, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='Adam', lr=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[180]) +runner = dict(type='EpochBasedRunner', max_epochs=210) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (6 samples per GPU) +auto_scale_lr = dict(base_batch_size=48) diff --git a/downstream/mmdetection/configs/cornernet/metafile.yml b/downstream/mmdetection/configs/cornernet/metafile.yml new file mode 100644 index 0000000..c2f6143 --- /dev/null +++ b/downstream/mmdetection/configs/cornernet/metafile.yml @@ -0,0 +1,83 @@ +Collections: + - Name: CornerNet + Metadata: + Training Data: COCO + Training Techniques: + - Adam + Training Resources: 8x V100 GPUs + Architecture: + - Corner Pooling + - Stacked Hourglass Network + Paper: + URL: https://arxiv.org/abs/1808.01244 + Title: 'CornerNet: Detecting Objects as Paired Keypoints' + README: configs/cornernet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.3.0/mmdet/models/detectors/cornernet.py#L9 + Version: v2.3.0 + +Models: + - Name: cornernet_hourglass104_mstest_10x5_210e_coco + In Collection: CornerNet + Config: configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py + Metadata: + Training Resources: 10x V100 GPUs + Batch Size: 50 + Training Memory (GB): 13.9 + inference time (ms/im): + - value: 238.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 210 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco/cornernet_hourglass104_mstest_10x5_210e_coco_20200824_185720-5fefbf1c.pth + + - Name: cornernet_hourglass104_mstest_8x6_210e_coco + In Collection: CornerNet + Config: configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py + Metadata: + Batch Size: 48 + Training Memory (GB): 15.9 + inference time (ms/im): + - value: 238.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 210 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth + + - Name: cornernet_hourglass104_mstest_32x3_210e_coco + In Collection: CornerNet + Config: configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py + Metadata: + Training Resources: 32x V100 GPUs + Batch Size: 96 + Training Memory (GB): 9.5 + inference time (ms/im): + - value: 256.41 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 210 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco/cornernet_hourglass104_mstest_32x3_210e_coco_20200819_203110-1efaea91.pth diff --git a/downstream/mmdetection/configs/dcn/README.md b/downstream/mmdetection/configs/dcn/README.md new file mode 100644 index 0000000..745b01c --- /dev/null +++ b/downstream/mmdetection/configs/dcn/README.md @@ -0,0 +1,48 @@ +# DCN + +> [Deformable Convolutional Networks](https://arxiv.org/abs/1703.06211) + + + +## Abstract + +Convolutional neural networks (CNNs) are inherently limited to model geometric transformations due to the fixed geometric structures in its building modules. In this work, we introduce two new modules to enhance the transformation modeling capacity of CNNs, namely, deformable convolution and deformable RoI pooling. Both are based on the idea of augmenting the spatial sampling locations in the modules with additional offsets and learning the offsets from target tasks, without additional supervision. The new modules can readily replace their plain counterparts in existing CNNs and can be easily trained end-to-end by standard back-propagation, giving rise to deformable convolutional networks. Extensive experiments validate the effectiveness of our approach on sophisticated vision tasks of object detection and semantic segmentation. + +
    + +
    + +## Results and Models + +| Backbone | Model | Style | Conv | Pool | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :----------: | :-----: | :----------: | :---: | :-----: | :------: | :------------: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | Faster | pytorch | dconv(c3-c5) | - | 1x | 4.0 | 17.8 | 41.3 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130_212941.log.json) | +| R-50-FPN | Faster | pytorch | - | dpool | 1x | 5.0 | 17.2 | 38.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dpool_1x_coco/faster_rcnn_r50_fpn_dpool_1x_coco_20200307-90d3c01d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dpool_1x_coco/faster_rcnn_r50_fpn_dpool_1x_coco_20200307_203250.log.json) | +| R-101-FPN | Faster | pytorch | dconv(c3-c5) | - | 1x | 6.0 | 12.5 | 42.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-1377f13d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203_230019.log.json) | +| X-101-32x4d-FPN | Faster | pytorch | dconv(c3-c5) | - | 1x | 7.3 | 10.0 | 44.5 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco_20200203-4f85c69c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco_20200203_001325.log.json) | +| R-50-FPN | Mask | pytorch | dconv(c3-c5) | - | 1x | 4.5 | 15.4 | 41.8 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203-4d9ad43b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203_061339.log.json) | +| R-101-FPN | Mask | pytorch | dconv(c3-c5) | - | 1x | 6.5 | 11.7 | 43.5 | 38.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216-a71f5bce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216_191601.log.json) | +| R-50-FPN | Cascade | pytorch | dconv(c3-c5) | - | 1x | 4.5 | 14.6 | 43.8 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-2f1fca44.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130_220843.log.json) | +| R-101-FPN | Cascade | pytorch | dconv(c3-c5) | - | 1x | 6.4 | 11.0 | 45.0 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-3b2f0594.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203_224829.log.json) | +| R-50-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 6.0 | 10.0 | 44.4 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202-42e767a2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202_010309.log.json) | +| R-101-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 8.0 | 8.6 | 45.8 | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204-df0c5f10.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204_134006.log.json) | +| X-101-32x4d-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 9.2 | | 47.3 | 41.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-e75f90c8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-20200606_183737.log.json) | +| R-50-FPN (FP16) | Mask | pytorch | dconv(c3-c5) | - | 1x | 3.0 | | 41.9 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247-c06429d2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247.log.json) | + +**Notes:** + +- `dconv` denotes deformable convolution, `c3-c5` means adding dconv in resnet stage 3 to 5. `dpool` denotes deformable roi pooling. +- The dcn ops are modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch, which should be more memory efficient and slightly faster. +- (\*) For R-50-FPN (dg=4), dg is short for deformable_group. This model is trained and tested on Amazon EC2 p3dn.24xlarge instance. +- **Memory, Train/Inf time is outdated.** + +## Citation + +```latex +@inproceedings{dai2017deformable, + title={Deformable Convolutional Networks}, + author={Dai, Jifeng and Qi, Haozhi and Xiong, Yuwen and Li, Yi and Zhang, Guodong and Hu, Han and Wei, Yichen}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + year={2017} +} +``` diff --git a/downstream/mmdetection/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..081b998 --- /dev/null +++ b/downstream/mmdetection/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..3b3683a --- /dev/null +++ b/downstream/mmdetection/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..daaa472 --- /dev/null +++ b/downstream/mmdetection/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..a01df33 --- /dev/null +++ b/downstream/mmdetection/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..aa664bd --- /dev/null +++ b/downstream/mmdetection/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..f5fee7e --- /dev/null +++ b/downstream/mmdetection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..8787088 --- /dev/null +++ b/downstream/mmdetection/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py b/downstream/mmdetection/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py new file mode 100644 index 0000000..1b695f0 --- /dev/null +++ b/downstream/mmdetection/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py @@ -0,0 +1,12 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + _delete_=True, + type='DeformRoIPoolPack', + output_size=7, + output_channels=256), + out_channels=256, + featmap_strides=[4, 8, 16, 32]))) diff --git a/downstream/mmdetection/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..e3bea19 --- /dev/null +++ b/downstream/mmdetection/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..cb34002 --- /dev/null +++ b/downstream/mmdetection/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..ababe58 --- /dev/null +++ b/downstream/mmdetection/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..ee5cca7 --- /dev/null +++ b/downstream/mmdetection/configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) + +fp16 = dict(loss_scale=512.) diff --git a/downstream/mmdetection/configs/dcn/metafile.yml b/downstream/mmdetection/configs/dcn/metafile.yml new file mode 100644 index 0000000..36f3887 --- /dev/null +++ b/downstream/mmdetection/configs/dcn/metafile.yml @@ -0,0 +1,272 @@ +Collections: + - Name: Deformable Convolutional Networks + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Deformable Convolution + Paper: + URL: https://arxiv.org/abs/1703.06211 + Title: "Deformable Convolutional Networks" + README: configs/dcn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/dcn/deform_conv.py#L15 + Version: v2.0.0 + +Models: + - Name: faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 56.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth + + - Name: faster_rcnn_r50_fpn_dpool_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py + Metadata: + Training Memory (GB): 5.0 + inference time (ms/im): + - value: 58.14 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dpool_1x_coco/faster_rcnn_r50_fpn_dpool_1x_coco_20200307-90d3c01d.pth + + - Name: faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 80 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-1377f13d.pth + + - Name: faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 7.3 + inference time (ms/im): + - value: 100 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco_20200203-4f85c69c.pth + + - Name: mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 4.5 + inference time (ms/im): + - value: 64.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203-4d9ad43b.pth + + - Name: mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py + Metadata: + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + Training Memory (GB): 3.0 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247-c06429d2.pth + + - Name: mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 6.5 + inference time (ms/im): + - value: 85.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216-a71f5bce.pth + + - Name: cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 4.5 + inference time (ms/im): + - value: 68.49 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-2f1fca44.pth + + - Name: cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 6.4 + inference time (ms/im): + - value: 90.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-3b2f0594.pth + + - Name: cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 100 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202-42e767a2.pth + + - Name: cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 8.0 + inference time (ms/im): + - value: 116.28 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204-df0c5f10.pth + + - Name: cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 9.2 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-e75f90c8.pth diff --git a/downstream/mmdetection/configs/dcnv2/README.md b/downstream/mmdetection/configs/dcnv2/README.md new file mode 100644 index 0000000..d230f20 --- /dev/null +++ b/downstream/mmdetection/configs/dcnv2/README.md @@ -0,0 +1,37 @@ +# DCNv2 + +> [Deformable ConvNets v2: More Deformable, Better Results](https://arxiv.org/abs/1811.11168) + + + +## Abstract + +The superior performance of Deformable Convolutional Networks arises from its ability to adapt to the geometric variations of objects. Through an examination of its adaptive behavior, we observe that while the spatial support for its neural features conforms more closely than regular ConvNets to object structure, this support may nevertheless extend well beyond the region of interest, causing features to be influenced by irrelevant image content. To address this problem, we present a reformulation of Deformable ConvNets that improves its ability to focus on pertinent image regions, through increased modeling power and stronger training. The modeling power is enhanced through a more comprehensive integration of deformable convolution within the network, and by introducing a modulation mechanism that expands the scope of deformation modeling. To effectively harness this enriched modeling capability, we guide network training via a proposed feature mimicking scheme that helps the network to learn features that reflect the object focus and classification power of RCNN features. With the proposed contributions, this new version of Deformable ConvNets yields significant performance gains over the original model and produces leading results on the COCO benchmark for object detection and instance segmentation. + +## Results and Models + +| Backbone | Model | Style | Conv | Pool | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :---------------: | :----: | :-----: | :-----------: | :----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | Faster | pytorch | mdconv(c3-c5) | - | 1x | 4.1 | 17.6 | 41.4 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130-d099253b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130_222144.log.json) | +| \*R-50-FPN (dg=4) | Faster | pytorch | mdconv(c3-c5) | - | 1x | 4.2 | 17.4 | 41.5 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130-01262257.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130_222058.log.json) | +| R-50-FPN | Faster | pytorch | - | mdpool | 1x | 5.8 | 16.6 | 38.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcnv2/faster_rcnn_r50_fpn_mdpool_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307_203304.log.json) | +| R-50-FPN | Mask | pytorch | mdconv(c3-c5) | - | 1x | 4.5 | 15.1 | 41.5 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcnv2/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203-ad97591f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203_063443.log.json) | +| R-50-FPN (FP16) | Mask | pytorch | mdconv(c3-c5) | - | 1x | 3.1 | | 42.0 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434-cf8fefa5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434.log.json) | + +**Notes:** + +- `mdconv` denotes modulated deformable convolution, `c3-c5` means adding dconv in resnet stage 3 to 5. `mdpool` denotes modulated deformable roi pooling. +- The dcn ops are modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch, which should be more memory efficient and slightly faster. +- (\*) For R-50-FPN (dg=4), dg is short for deformable_group. This model is trained and tested on Amazon EC2 p3dn.24xlarge instance. +- **Memory, Train/Inf time is outdated.** + +## Citation + +```latex +@article{zhu2018deformable, + title={Deformable ConvNets v2: More Deformable, Better Results}, + author={Zhu, Xizhou and Hu, Han and Lin, Stephen and Dai, Jifeng}, + journal={arXiv preprint arXiv:1811.11168}, + year={2018} +} +``` diff --git a/downstream/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..d1bcf3c --- /dev/null +++ b/downstream/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py b/downstream/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py new file mode 100644 index 0000000..d0ab89c --- /dev/null +++ b/downstream/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdpool_1x_coco.py b/downstream/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdpool_1x_coco.py new file mode 100644 index 0000000..ad7b034 --- /dev/null +++ b/downstream/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdpool_1x_coco.py @@ -0,0 +1,12 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + _delete_=True, + type='ModulatedDeformRoIPoolPack', + output_size=7, + output_channels=256), + out_channels=256, + featmap_strides=[4, 8, 16, 32]))) diff --git a/downstream/mmdetection/configs/dcnv2/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcnv2/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..7e21454 --- /dev/null +++ b/downstream/mmdetection/configs/dcnv2/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) + +fp16 = dict(loss_scale=512.) diff --git a/downstream/mmdetection/configs/dcnv2/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/dcnv2/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..5ca2a67 --- /dev/null +++ b/downstream/mmdetection/configs/dcnv2/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/dcnv2/metafile.yml b/downstream/mmdetection/configs/dcnv2/metafile.yml new file mode 100644 index 0000000..9049421 --- /dev/null +++ b/downstream/mmdetection/configs/dcnv2/metafile.yml @@ -0,0 +1,123 @@ +Collections: + - Name: Deformable Convolutional Networks v2 + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Deformable Convolution + Paper: + URL: https://arxiv.org/abs/1811.11168 + Title: "Deformable ConvNets v2: More Deformable, Better Results" + README: configs/dcnv2/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/dcn/deform_conv.py#L15 + Version: v2.0.0 + +Models: + - Name: faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks v2 + Config: configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 4.1 + inference time (ms/im): + - value: 56.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130-d099253b.pth + + - Name: faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco + In Collection: Deformable Convolutional Networks v2 + Config: configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py + Metadata: + Training Memory (GB): 4.2 + inference time (ms/im): + - value: 57.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130-01262257.pth + + - Name: faster_rcnn_r50_fpn_mdpool_1x_coco + In Collection: Deformable Convolutional Networks v2 + Config: configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py + Metadata: + Training Memory (GB): 5.8 + inference time (ms/im): + - value: 60.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth + + - Name: mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks v2 + Config: configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 4.5 + inference time (ms/im): + - value: 66.23 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203-ad97591f.pth + + - Name: mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks v2 + Config: configs/dcn/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 3.1 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434-cf8fefa5.pth diff --git a/downstream/mmdetection/configs/ddod/README.md b/downstream/mmdetection/configs/ddod/README.md new file mode 100644 index 0000000..9ab1f48 --- /dev/null +++ b/downstream/mmdetection/configs/ddod/README.md @@ -0,0 +1,31 @@ +# DDOD + +> [Disentangle Your Dense Object Detector](https://arxiv.org/pdf/2107.02963.pdf) + + + +## Abstract + +Deep learning-based dense object detectors have achieved great success in the past few years and have been applied to numerous multimedia applications such as video understanding. However, the current training pipeline for dense detectors is compromised to lots of conjunctions that may not hold. In this paper, we investigate three such important conjunctions: 1) only samples assigned as positive in classification head are used to train the regression head; 2) classification and regression share the same input feature and computational fields defined by the parallel head architecture; and 3) samples distributed in different feature pyramid layers are treated equally when computing the loss. We first carry out a series of pilot experiments to show disentangling such conjunctions can lead to persistent performance improvement. Then, based on these findings, we propose Disentangled Dense Object Detector(DDOD), in which simple and effective disentanglement mechanisms are designed and integrated into the current state-of-the-art dense object detectors. Extensive experiments on MS COCO benchmark show that our approach can lead to 2.0 mAP, 2.4 mAP and 2.2 mAP absolute improvements on RetinaNet, FCOS, and ATSS baselines with negligible extra overhead. Notably, our best model reaches 55.0 mAP on the COCO test-dev set and 93.5 AP on the hard subset of WIDER FACE, achieving new state-of-the-art performance on these two competitive benchmarks. Code is available at https://github.com/zehuichen123/DDOD. + +
    + +
    + +## Results and Models + +| Model | Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | +| :-------: | :------: | :-----: | :-----: | :------: | :----: | :--------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| DDOD-ATSS | R-50 | pytorch | 1x | 3.4 | 41.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ddod/ddod_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737.log.json) | + +## Citation + +```latex +@inproceedings{chen2021disentangle, +title={Disentangle Your Dense Object Detector}, +author={Chen, Zehui and Yang, Chenhongyi and Li, Qiaofei and Zhao, Feng and Zha, Zheng-Jun and Wu, Feng}, +booktitle={Proceedings of the 29th ACM International Conference on Multimedia}, +pages={4939--4948}, +year={2021} +} +``` diff --git a/downstream/mmdetection/configs/ddod/ddod_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/ddod/ddod_r50_fpn_1x_coco.py new file mode 100644 index 0000000..02dd2fe --- /dev/null +++ b/downstream/mmdetection/configs/ddod/ddod_r50_fpn_1x_coco.py @@ -0,0 +1,67 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + type='DDOD', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='DDODHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_iou=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + train_cfg=dict( + # assigner is mean cls_assigner + assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8), + reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# This `persistent_workers` is only valid when PyTorch>=1.7.0 +data = dict(persistent_workers=True) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/ddod/metafile.yml b/downstream/mmdetection/configs/ddod/metafile.yml new file mode 100644 index 0000000..c223950 --- /dev/null +++ b/downstream/mmdetection/configs/ddod/metafile.yml @@ -0,0 +1,33 @@ +Collections: + - Name: DDOD + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - DDOD + - FPN + - ResNet + Paper: + URL: https://arxiv.org/pdf/2107.02963.pdf + Title: 'Disentangle Your Dense Object Detector' + README: configs/ddod/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.25.0/mmdet/models/detectors/ddod.py#L6 + Version: v2.25.0 + +Models: + - Name: ddod_r50_fpn_1x_coco + In Collection: DDOD + Config: configs/ddod/ddod_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.4 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth diff --git a/downstream/mmdetection/configs/deepfashion/README.md b/downstream/mmdetection/configs/deepfashion/README.md new file mode 100644 index 0000000..45daec0 --- /dev/null +++ b/downstream/mmdetection/configs/deepfashion/README.md @@ -0,0 +1,70 @@ +# DeepFashion + +> [DeepFashion: Powering Robust Clothes Recognition and Retrieval With Rich Annotations](https://openaccess.thecvf.com/content_cvpr_2016/html/Liu_DeepFashion_Powering_Robust_CVPR_2016_paper.html) + + + +## Abstract + +Recent advances in clothes recognition have been driven by the construction of clothes datasets. Existing datasets are limited in the amount of annotations and are difficult to cope with the various challenges in real-world applications. In this work, we introduce DeepFashion, a large-scale clothes dataset with comprehensive annotations. It contains over 800,000 images, which are richly annotated with massive attributes, clothing landmarks, and correspondence of images taken under different scenarios including store, street snapshot, and consumer. Such rich annotations enable the development of powerful algorithms in clothes recognition and facilitating future researches. To demonstrate the advantages of DeepFashion, we propose a new deep model, namely FashionNet, which learns clothing features by jointly predicting clothing attributes and landmarks. The estimated landmarks are then employed to pool or gate the learned features. It is optimized in an iterative manner. Extensive experiments demonstrate the effectiveness of FashionNet and the usefulness of DeepFashion. + +
    + +
    + +## Introduction + +[MMFashion](https://github.com/open-mmlab/mmfashion) develops "fashion parsing and segmentation" module +based on the dataset +[DeepFashion-Inshop](https://drive.google.com/drive/folders/0B7EVK8r0v71pVDZFQXRsMDZCX1E?usp=sharing). +Its annotation follows COCO style. +To use it, you need to first download the data. Note that we only use "img_highres" in this task. +The file tree should be like this: + +```sh +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── DeepFashion +│ │ ├── In-shop +│ │ ├── Anno +│ │ │   ├── segmentation +│ │ │   | ├── DeepFashion_segmentation_train.json +│ │ │   | ├── DeepFashion_segmentation_query.json +│ │ │   | ├── DeepFashion_segmentation_gallery.json +│ │ │   ├── list_bbox_inshop.txt +│ │ │   ├── list_description_inshop.json +│ │ │   ├── list_item_inshop.txt +│ │ │   └── list_landmarks_inshop.txt +│ │ ├── Eval +│ │ │ └── list_eval_partition.txt +│ │ ├── Img +│ │ │ ├── img +│ │ │ │ ├──XXX.jpg +│ │ │ ├── img_highres +│ │ │ └── ├──XXX.jpg + +``` + +After that you can train the Mask RCNN r50 on DeepFashion-In-shop dataset by launching training with the `mask_rcnn_r50_fpn_1x.py` config +or creating your own config file. + +## Results and Models + +| Backbone | Model type | Dataset | bbox detection Average Precision | segmentation Average Precision | Config | Download (Google) | +| :------: | :--------: | :-----------------: | :------------------------------: | :----------------------------: | :----------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| ResNet50 | Mask RCNN | DeepFashion-In-shop | 0.599 | 0.584 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion/mask_rcnn_r50_fpn_15e_deepfashion_20200329_192752.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion/20200329_192752.log.json) | + +## Citation + +```latex +@inproceedings{liuLQWTcvpr16DeepFashion, + author = {Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou}, + title = {DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations}, + booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2016} +} +``` diff --git a/downstream/mmdetection/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py b/downstream/mmdetection/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py new file mode 100644 index 0000000..c4e8638 --- /dev/null +++ b/downstream/mmdetection/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py', + '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15))) +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=15) diff --git a/downstream/mmdetection/configs/deformable_detr/README.md b/downstream/mmdetection/configs/deformable_detr/README.md new file mode 100644 index 0000000..378e1f2 --- /dev/null +++ b/downstream/mmdetection/configs/deformable_detr/README.md @@ -0,0 +1,41 @@ +# Deformable DETR + +> [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) + + + +## Abstract + +DETR has been recently proposed to eliminate the need for many hand-designed components in object detection while demonstrating good performance. However, it suffers from slow convergence and limited feature spatial resolution, due to the limitation of Transformer attention modules in processing image feature maps. To mitigate these issues, we proposed Deformable DETR, whose attention modules only attend to a small set of key sampling points around a reference. Deformable DETR can achieve better performance than DETR (especially on small objects) with 10 times less training epochs. Extensive experiments on the COCO benchmark demonstrate the effectiveness of our approach. + +
    + +
    + +## Results and Models + +| Backbone | Model | Lr schd | box AP | Config | Download | +| :------: | :---------------------------------: | :-----: | :----: | :------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | Deformable DETR | 50e | 44.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.log.json) | +| R-50 | + iterative bounding box refinement | 50e | 46.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco/deformable_detr_refine_r50_16x2_50e_coco_20210419_220503-5f5dff21.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco/deformable_detr_refine_r50_16x2_50e_coco_20210419_220503-5f5dff21.log.json) | +| R-50 | ++ two-stage Deformable DETR | 50e | 46.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco/deformable_detr_twostage_refine_r50_16x2_50e_coco_20210419_220613-9d28ab72.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco/deformable_detr_twostage_refine_r50_16x2_50e_coco_20210419_220613-9d28ab72.log.json) | + +# NOTE + +1. All models are trained with batch size 32. +2. The performance is unstable. `Deformable DETR` and `iterative bounding box refinement` may fluctuate about 0.3 mAP. `two-stage Deformable DETR` may fluctuate about 0.2 mAP. + +## Citation + +We provide the config files for Deformable DETR: [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159). + +```latex +@inproceedings{ +zhu2021deformable, +title={Deformable DETR: Deformable Transformers for End-to-End Object Detection}, +author={Xizhou Zhu and Weijie Su and Lewei Lu and Bin Li and Xiaogang Wang and Jifeng Dai}, +booktitle={International Conference on Learning Representations}, +year={2021}, +url={https://openreview.net/forum?id=gZ9hCDWe6ke} +} +``` diff --git a/downstream/mmdetection/configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py b/downstream/mmdetection/configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py new file mode 100644 index 0000000..c64d09f --- /dev/null +++ b/downstream/mmdetection/configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py @@ -0,0 +1,177 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] +model = dict( + type='DeformableDETR', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='ChannelMapper', + in_channels=[512, 1024, 2048], + kernel_size=1, + out_channels=256, + act_cfg=None, + norm_cfg=dict(type='GN', num_groups=32), + num_outs=4), + bbox_head=dict( + type='DeformableDETRHead', + num_query=300, + num_classes=80, + in_channels=2048, + sync_cls_avg_factor=True, + as_two_stage=False, + transformer=dict( + type='DeformableDetrTransformer', + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', embed_dims=256), + feedforward_channels=1024, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'ffn', 'norm'))), + decoder=dict( + type='DeformableDetrTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + dropout=0.1), + dict( + type='MultiScaleDeformableAttention', + embed_dims=256) + ], + feedforward_channels=1024, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=128, + normalize=True, + offset=-0.5), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), + test_cfg=dict(max_per_img=100)) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different +# from the default setting in mmdet. +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='AutoAugment', + policies=[ + [ + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict( + type='Resize', + # The radio of all image in train dataset < 7 + # follow the original impl + img_scale=[(400, 4200), (500, 4200), (600, 4200)], + multiscale_mode='value', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ] + ]), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=1), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +# test_pipeline, NOTE the Pad's size_divisor is different from the default +# setting (size_divisor=32). While there is little effect on the performance +# whether we use the default setting or use size_divisor=1. +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=1), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(filter_empty_gt=False, pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='AdamW', + lr=2e-4, + weight_decay=0.0001, + paramwise_cfg=dict( + custom_keys={ + 'backbone': dict(lr_mult=0.1), + 'sampling_offsets': dict(lr_mult=0.1), + 'reference_points': dict(lr_mult=0.1) + })) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40]) +runner = dict(type='EpochBasedRunner', max_epochs=50) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (16 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=32) diff --git a/downstream/mmdetection/configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py b/downstream/mmdetection/configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py new file mode 100644 index 0000000..01f13df --- /dev/null +++ b/downstream/mmdetection/configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py @@ -0,0 +1,2 @@ +_base_ = 'deformable_detr_r50_16x2_50e_coco.py' +model = dict(bbox_head=dict(with_box_refine=True)) diff --git a/downstream/mmdetection/configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py b/downstream/mmdetection/configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py new file mode 100644 index 0000000..2aa840d --- /dev/null +++ b/downstream/mmdetection/configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py @@ -0,0 +1,2 @@ +_base_ = 'deformable_detr_refine_r50_16x2_50e_coco.py' +model = dict(bbox_head=dict(as_two_stage=True)) diff --git a/downstream/mmdetection/configs/deformable_detr/metafile.yml b/downstream/mmdetection/configs/deformable_detr/metafile.yml new file mode 100644 index 0000000..873292d --- /dev/null +++ b/downstream/mmdetection/configs/deformable_detr/metafile.yml @@ -0,0 +1,56 @@ +Collections: + - Name: Deformable DETR + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Multi Scale Train + - Gradient Clip + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + - Transformer + Paper: + URL: https://openreview.net/forum?id=gZ9hCDWe6ke + Title: 'Deformable DETR: Deformable Transformers for End-to-End Object Detection' + README: configs/deformable_detr/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/deformable_detr.py#L6 + Version: v2.12.0 + +Models: + - Name: deformable_detr_r50_16x2_50e_coco + In Collection: Deformable DETR + Config: configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py + Metadata: + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth + + - Name: deformable_detr_refine_r50_16x2_50e_coco + In Collection: Deformable DETR + Config: configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py + Metadata: + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco/deformable_detr_refine_r50_16x2_50e_coco_20210419_220503-5f5dff21.pth + + - Name: deformable_detr_twostage_refine_r50_16x2_50e_coco + In Collection: Deformable DETR + Config: configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py + Metadata: + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco/deformable_detr_twostage_refine_r50_16x2_50e_coco_20210419_220613-9d28ab72.pth diff --git a/downstream/mmdetection/configs/detectors/README.md b/downstream/mmdetection/configs/detectors/README.md new file mode 100644 index 0000000..baa245f --- /dev/null +++ b/downstream/mmdetection/configs/detectors/README.md @@ -0,0 +1,69 @@ +# DetectoRS + +> [DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution](https://arxiv.org/abs/2006.02334) + + + +## Abstract + +Many modern object detectors demonstrate outstanding performances by using the mechanism of looking and thinking twice. In this paper, we explore this mechanism in the backbone design for object detection. At the macro level, we propose Recursive Feature Pyramid, which incorporates extra feedback connections from Feature Pyramid Networks into the bottom-up backbone layers. At the micro level, we propose Switchable Atrous Convolution, which convolves the features with different atrous rates and gathers the results using switch functions. Combining them results in DetectoRS, which significantly improves the performances of object detection. On COCO test-dev, DetectoRS achieves state-of-the-art 55.7% box AP for object detection, 48.5% mask AP for instance segmentation, and 50.0% PQ for panoptic segmentation. + +
    + +
    + +## Introduction + +DetectoRS requires COCO and [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) dataset for training. You need to download and extract it in the COCO dataset path. +The directory should be like this. + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +| | ├── stuffthingmaps +``` + +## Results and Models + +DetectoRS includes two major components: + +- Recursive Feature Pyramid (RFP). +- Switchable Atrous Convolution (SAC). + +They can be used independently. +Combining them together results in DetectoRS. +The results on COCO 2017 val are shown in the below table. + +| Method | Detector | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------: | :-----------------: | :-----: | :------: | :------------: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| RFP | Cascade + ResNet-50 | 1x | 7.5 | - | 44.8 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_rfp_1x_coco/cascade_rcnn_r50_rfp_1x_coco-8cf51bfd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_rfp_1x_coco/cascade_rcnn_r50_rfp_1x_coco_20200624_104126.log.json) | +| SAC | Cascade + ResNet-50 | 1x | 5.6 | - | 45.0 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/cascade_rcnn_r50_sac_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_sac_1x_coco/cascade_rcnn_r50_sac_1x_coco-24bfda62.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_sac_1x_coco/cascade_rcnn_r50_sac_1x_coco_20200624_104402.log.json) | +| DetectoRS | Cascade + ResNet-50 | 1x | 9.9 | - | 47.4 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_cascade_rcnn_r50_1x_coco/detectors_cascade_rcnn_r50_1x_coco-32a10ba0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_cascade_rcnn_r50_1x_coco/detectors_cascade_rcnn_r50_1x_coco_20200706_001203.log.json) | +| RFP | HTC + ResNet-50 | 1x | 11.2 | - | 46.6 | 40.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/htc_r50_rfp_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_rfp_1x_coco/htc_r50_rfp_1x_coco-8ff87c51.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_rfp_1x_coco/htc_r50_rfp_1x_coco_20200624_103053.log.json) | +| SAC | HTC + ResNet-50 | 1x | 9.3 | - | 46.4 | 40.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/htc_r50_sac_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_sac_1x_coco/htc_r50_sac_1x_coco-bfa60c54.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_sac_1x_coco/htc_r50_sac_1x_coco_20200624_103111.log.json) | +| DetectoRS | HTC + ResNet-50 | 1x | 13.6 | - | 49.1 | 42.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/detectors_htc_r50_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco-329b1453.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco_20200624_103659.log.json) | +| DetectoRS | HTC + ResNet-101 | 20e | 19.6 | | 50.5 | 43.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/detectors_htc_r101_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r101_20e_coco/detectors_htc_r101_20e_coco_20210419_203638-348d533b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r101_20e_coco/detectors_htc_r101_20e_coco_20210419_203638.log.json) | + +*Note*: This is a re-implementation based on MMDetection-V2. +The original implementation is based on MMDetection-V1. + +## Citation + +We provide the config files for [DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution](https://arxiv.org/pdf/2006.02334.pdf). + +```latex +@article{qiao2020detectors, + title={DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution}, + author={Qiao, Siyuan and Chen, Liang-Chieh and Yuille, Alan}, + journal={arXiv preprint arXiv:2006.02334}, + year={2020} +} +``` diff --git a/downstream/mmdetection/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py b/downstream/mmdetection/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py new file mode 100644 index 0000000..4430d8a --- /dev/null +++ b/downstream/mmdetection/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py @@ -0,0 +1,28 @@ +_base_ = [ + '../_base_/models/cascade_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + output_img=True), + neck=dict( + type='RFP', + rfp_steps=2, + aspp_out_channels=64, + aspp_dilations=(1, 3, 6, 1), + rfp_backbone=dict( + rfp_inplanes=256, + type='DetectoRS_ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + conv_cfg=dict(type='ConvAWS'), + pretrained='torchvision://resnet50', + style='pytorch'))) diff --git a/downstream/mmdetection/configs/detectors/cascade_rcnn_r50_sac_1x_coco.py b/downstream/mmdetection/configs/detectors/cascade_rcnn_r50_sac_1x_coco.py new file mode 100644 index 0000000..ccd9319 --- /dev/null +++ b/downstream/mmdetection/configs/detectors/cascade_rcnn_r50_sac_1x_coco.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/cascade_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py b/downstream/mmdetection/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py new file mode 100644 index 0000000..f760404 --- /dev/null +++ b/downstream/mmdetection/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py @@ -0,0 +1,32 @@ +_base_ = [ + '../_base_/models/cascade_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True), + output_img=True), + neck=dict( + type='RFP', + rfp_steps=2, + aspp_out_channels=64, + aspp_dilations=(1, 3, 6, 1), + rfp_backbone=dict( + rfp_inplanes=256, + type='DetectoRS_ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True), + pretrained='torchvision://resnet50', + style='pytorch'))) diff --git a/downstream/mmdetection/configs/detectors/detectors_htc_r101_20e_coco.py b/downstream/mmdetection/configs/detectors/detectors_htc_r101_20e_coco.py new file mode 100644 index 0000000..93d7d2b --- /dev/null +++ b/downstream/mmdetection/configs/detectors/detectors_htc_r101_20e_coco.py @@ -0,0 +1,28 @@ +_base_ = '../htc/htc_r101_fpn_20e_coco.py' + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True), + output_img=True), + neck=dict( + type='RFP', + rfp_steps=2, + aspp_out_channels=64, + aspp_dilations=(1, 3, 6, 1), + rfp_backbone=dict( + rfp_inplanes=256, + type='DetectoRS_ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True), + pretrained='torchvision://resnet101', + style='pytorch'))) diff --git a/downstream/mmdetection/configs/detectors/detectors_htc_r50_1x_coco.py b/downstream/mmdetection/configs/detectors/detectors_htc_r50_1x_coco.py new file mode 100644 index 0000000..0d2fc4f --- /dev/null +++ b/downstream/mmdetection/configs/detectors/detectors_htc_r50_1x_coco.py @@ -0,0 +1,28 @@ +_base_ = '../htc/htc_r50_fpn_1x_coco.py' + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True), + output_img=True), + neck=dict( + type='RFP', + rfp_steps=2, + aspp_out_channels=64, + aspp_dilations=(1, 3, 6, 1), + rfp_backbone=dict( + rfp_inplanes=256, + type='DetectoRS_ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True), + pretrained='torchvision://resnet50', + style='pytorch'))) diff --git a/downstream/mmdetection/configs/detectors/htc_r50_rfp_1x_coco.py b/downstream/mmdetection/configs/detectors/htc_r50_rfp_1x_coco.py new file mode 100644 index 0000000..496104e --- /dev/null +++ b/downstream/mmdetection/configs/detectors/htc_r50_rfp_1x_coco.py @@ -0,0 +1,24 @@ +_base_ = '../htc/htc_r50_fpn_1x_coco.py' + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + output_img=True), + neck=dict( + type='RFP', + rfp_steps=2, + aspp_out_channels=64, + aspp_dilations=(1, 3, 6, 1), + rfp_backbone=dict( + rfp_inplanes=256, + type='DetectoRS_ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + conv_cfg=dict(type='ConvAWS'), + pretrained='torchvision://resnet50', + style='pytorch'))) diff --git a/downstream/mmdetection/configs/detectors/htc_r50_sac_1x_coco.py b/downstream/mmdetection/configs/detectors/htc_r50_sac_1x_coco.py new file mode 100644 index 0000000..72d4db9 --- /dev/null +++ b/downstream/mmdetection/configs/detectors/htc_r50_sac_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = '../htc/htc_r50_fpn_1x_coco.py' + +model = dict( + backbone=dict( + type='DetectoRS_ResNet', + conv_cfg=dict(type='ConvAWS'), + sac=dict(type='SAC', use_deform=True), + stage_with_sac=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/detectors/metafile.yml b/downstream/mmdetection/configs/detectors/metafile.yml new file mode 100644 index 0000000..4bed569 --- /dev/null +++ b/downstream/mmdetection/configs/detectors/metafile.yml @@ -0,0 +1,114 @@ +Collections: + - Name: DetectoRS + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ASPP + - FPN + - RFP + - RPN + - ResNet + - RoIAlign + - SAC + Paper: + URL: https://arxiv.org/abs/2006.02334 + Title: 'DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution' + README: configs/detectors/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/backbones/detectors_resnet.py#L205 + Version: v2.2.0 + +Models: + - Name: cascade_rcnn_r50_rfp_1x_coco + In Collection: DetectoRS + Config: configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py + Metadata: + Training Memory (GB): 7.5 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_rfp_1x_coco/cascade_rcnn_r50_rfp_1x_coco-8cf51bfd.pth + + - Name: cascade_rcnn_r50_sac_1x_coco + In Collection: DetectoRS + Config: configs/detectors/cascade_rcnn_r50_sac_1x_coco.py + Metadata: + Training Memory (GB): 5.6 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_sac_1x_coco/cascade_rcnn_r50_sac_1x_coco-24bfda62.pth + + - Name: detectors_cascade_rcnn_r50_1x_coco + In Collection: DetectoRS + Config: configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py + Metadata: + Training Memory (GB): 9.9 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_cascade_rcnn_r50_1x_coco/detectors_cascade_rcnn_r50_1x_coco-32a10ba0.pth + + - Name: htc_r50_rfp_1x_coco + In Collection: DetectoRS + Config: configs/detectors/htc_r50_rfp_1x_coco.py + Metadata: + Training Memory (GB): 11.2 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_rfp_1x_coco/htc_r50_rfp_1x_coco-8ff87c51.pth + + - Name: htc_r50_sac_1x_coco + In Collection: DetectoRS + Config: configs/detectors/htc_r50_sac_1x_coco.py + Metadata: + Training Memory (GB): 9.3 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_sac_1x_coco/htc_r50_sac_1x_coco-bfa60c54.pth + + - Name: detectors_htc_r50_1x_coco + In Collection: DetectoRS + Config: configs/detectors/detectors_htc_r50_1x_coco.py + Metadata: + Training Memory (GB): 13.6 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco-329b1453.pth diff --git a/downstream/mmdetection/configs/detr/README.md b/downstream/mmdetection/configs/detr/README.md new file mode 100644 index 0000000..9f2485d --- /dev/null +++ b/downstream/mmdetection/configs/detr/README.md @@ -0,0 +1,37 @@ +# DETR + +> [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) + + + +## Abstract + +We present a new method that views object detection as a direct set prediction problem. Our approach streamlines the detection pipeline, effectively removing the need for many hand-designed components like a non-maximum suppression procedure or anchor generation that explicitly encode our prior knowledge about the task. The main ingredients of the new framework, called DEtection TRansformer or DETR, are a set-based global loss that forces unique predictions via bipartite matching, and a transformer encoder-decoder architecture. Given a fixed small set of learned object queries, DETR reasons about the relations of the objects and the global image context to directly output the final set of predictions in parallel. The new model is conceptually simple and does not require a specialized library, unlike many other modern detectors. DETR demonstrates accuracy and run-time performance on par with the well-established and highly-optimized Faster RCNN baseline on the challenging COCO object detection dataset. Moreover, DETR can be easily generalized to produce panoptic segmentation in a unified manner. We show that it significantly outperforms competitive baselines. + +
    + +
    + +## Results and Models + +| Backbone | Model | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :---: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | DETR | 150e | 7.9 | | 40.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detr/detr_r50_8x2_150e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835.log.json) | + +## Citation + +We provide the config files for DETR: [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872). + +```latex +@inproceedings{detr, + author = {Nicolas Carion and + Francisco Massa and + Gabriel Synnaeve and + Nicolas Usunier and + Alexander Kirillov and + Sergey Zagoruyko}, + title = {End-to-End Object Detection with Transformers}, + booktitle = {ECCV}, + year = {2020} +} +``` diff --git a/downstream/mmdetection/configs/detr/detr_r50_8x2_150e_coco.py b/downstream/mmdetection/configs/detr/detr_r50_8x2_150e_coco.py new file mode 100644 index 0000000..892447d --- /dev/null +++ b/downstream/mmdetection/configs/detr/detr_r50_8x2_150e_coco.py @@ -0,0 +1,150 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] +model = dict( + type='DETR', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + bbox_head=dict( + type='DETRHead', + num_classes=80, + in_channels=2048, + transformer=dict( + type='Transformer', + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + dropout=0.1) + ], + feedforward_channels=2048, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'ffn', 'norm'))), + decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=6, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + dropout=0.1), + feedforward_channels=2048, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')), + )), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + loss_cls=dict( + type='CrossEntropyLoss', + bg_cls_weight=0.1, + use_sigmoid=False, + loss_weight=1.0, + class_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), + test_cfg=dict(max_per_img=100)) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different +# from the default setting in mmdet. +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='AutoAugment', + policies=[[ + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict( + type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ]]), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=1), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +# test_pipeline, NOTE the Pad's size_divisor is different from the default +# setting (size_divisor=32). While there is little effect on the performance +# whether we use the default setting or use size_divisor=1. +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=1), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='AdamW', + lr=0.0001, + weight_decay=0.0001, + paramwise_cfg=dict( + custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[100]) +runner = dict(type='EpochBasedRunner', max_epochs=150) diff --git a/downstream/mmdetection/configs/detr/metafile.yml b/downstream/mmdetection/configs/detr/metafile.yml new file mode 100644 index 0000000..45622cf --- /dev/null +++ b/downstream/mmdetection/configs/detr/metafile.yml @@ -0,0 +1,33 @@ +Collections: + - Name: DETR + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Multi Scale Train + - Gradient Clip + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + - Transformer + Paper: + URL: https://arxiv.org/abs/2005.12872 + Title: 'End-to-End Object Detection with Transformers' + README: configs/detr/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/detectors/detr.py#L7 + Version: v2.7.0 + +Models: + - Name: detr_r50_8x2_150e_coco + In Collection: DETR + Config: configs/detr/detr_r50_8x2_150e_coco.py + Metadata: + Training Memory (GB): 7.9 + Epochs: 150 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth diff --git a/downstream/mmdetection/configs/double_heads/README.md b/downstream/mmdetection/configs/double_heads/README.md new file mode 100644 index 0000000..4a149b5 --- /dev/null +++ b/downstream/mmdetection/configs/double_heads/README.md @@ -0,0 +1,32 @@ +# Double Heads + +> [Rethinking Classification and Localization for Object Detection](https://arxiv.org/abs/1904.06493) + + + +## Abstract + +Two head structures (i.e. fully connected head and convolution head) have been widely used in R-CNN based detectors for classification and localization tasks. However, there is a lack of understanding of how does these two head structures work for these two tasks. To address this issue, we perform a thorough analysis and find an interesting fact that the two head structures have opposite preferences towards the two tasks. Specifically, the fully connected head (fc-head) is more suitable for the classification task, while the convolution head (conv-head) is more suitable for the localization task. Furthermore, we examine the output feature maps of both heads and find that fc-head has more spatial sensitivity than conv-head. Thus, fc-head has more capability to distinguish a complete object from part of an object, but is not robust to regress the whole object. Based upon these findings, we propose a Double-Head method, which has a fully connected head focusing on classification and a convolution head for bounding box regression. Without bells and whistles, our method gains +3.5 and +2.8 AP on MS COCO dataset from Feature Pyramid Network (FPN) baselines with ResNet-50 and ResNet-101 backbones, respectively. + +
    + +
    + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 6.8 | 9.5 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130_220238.log.json) | + +## Citation + +```latex +@article{wu2019rethinking, + title={Rethinking Classification and Localization for Object Detection}, + author={Yue Wu and Yinpeng Chen and Lu Yuan and Zicheng Liu and Lijuan Wang and Hongzhi Li and Yun Fu}, + year={2019}, + eprint={1904.06493}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/downstream/mmdetection/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..9b8118b --- /dev/null +++ b/downstream/mmdetection/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,23 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + type='DoubleHeadRoIHead', + reg_roi_scale_factor=1.3, + bbox_head=dict( + _delete_=True, + type='DoubleConvFCBBoxHead', + num_convs=4, + num_fcs=2, + in_channels=256, + conv_out_channels=1024, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0)))) diff --git a/downstream/mmdetection/configs/double_heads/metafile.yml b/downstream/mmdetection/configs/double_heads/metafile.yml new file mode 100644 index 0000000..6fe9b7a --- /dev/null +++ b/downstream/mmdetection/configs/double_heads/metafile.yml @@ -0,0 +1,41 @@ +Collections: + - Name: Rethinking Classification and Localization for Object Detection + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - RPN + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/pdf/1904.06493 + Title: 'Rethinking Classification and Localization for Object Detection' + README: configs/double_heads/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/roi_heads/double_roi_head.py#L6 + Version: v2.0.0 + +Models: + - Name: dh_faster_rcnn_r50_fpn_1x_coco + In Collection: Rethinking Classification and Localization for Object Detection + Config: configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.8 + inference time (ms/im): + - value: 105.26 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth diff --git a/downstream/mmdetection/configs/dyhead/README.md b/downstream/mmdetection/configs/dyhead/README.md new file mode 100644 index 0000000..8e6aed3 --- /dev/null +++ b/downstream/mmdetection/configs/dyhead/README.md @@ -0,0 +1,52 @@ +# DyHead + +> [Dynamic Head: Unifying Object Detection Heads with Attentions](https://arxiv.org/abs/2106.08322) + + + +## Abstract + +The complex nature of combining localization and classification in object detection has resulted in the flourished development of methods. Previous works tried to improve the performance in various object detection heads but failed to present a unified view. In this paper, we present a novel dynamic head framework to unify object detection heads with attentions. By coherently combining multiple self-attention mechanisms between feature levels for scale-awareness, among spatial locations for spatial-awareness, and within output channels for task-awareness, the proposed approach significantly improves the representation ability of object detection heads without any computational overhead. Further experiments demonstrate that the effectiveness and efficiency of the proposed dynamic head on the COCO benchmark. With a standard ResNeXt-101-DCN backbone, we largely improve the performance over popular object detectors and achieve a new state-of-the-art at 54.0 AP. Furthermore, with latest transformer backbone and extra data, we can push current best COCO result to a new record at 60.6 AP. + +
    + +
    + +## Results and Models + +| Method | Backbone | Style | Setting | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :----: | :------: | :-----: | :----------: | :-----: | :------: | :------------: | :----: | :----------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| ATSS | R-50 | caffe | reproduction | 1x | 5.4 | 13.2 | 42.5 | [config](./atss_r50_caffe_fpn_dyhead_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_for_reproduction_1x_coco/atss_r50_fpn_dyhead_for_reproduction_4x4_1x_coco_20220107_213939-162888e6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_for_reproduction_1x_coco/atss_r50_fpn_dyhead_for_reproduction_4x4_1x_coco_20220107_213939.log.json) | +| ATSS | R-50 | pytorch | simple | 1x | 4.9 | 13.7 | 43.3 | [config](./atss_r50_fpn_dyhead_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314.log.json) | + +- We trained the above models with 4 GPUs and 4 `samples_per_gpu`. +- The `reproduction` setting aims to reproduce the official implementation based on Detectron2. +- The `simple` setting serves as a minimum example to use DyHead in MMDetection. Specifically, + - it adds `DyHead` to `neck` after `FPN` + - it sets `stacked_convs=0` to `bbox_head` +- The `simple` setting achieves higher AP than the original implementation. + We have not conduct ablation study between the two settings. + `dict(type='Pad', size_divisor=128)` may further improve AP by prefer spatial alignment across pyramid levels, although large padding reduces efficiency. + +We also trained the model with Swin-L backbone. Results are as below. + +| Method | Backbone | Style | Setting | Lr schd | mstrain | box AP | Config | Download | +| :----: | :------: | :---: | :----------: | :-----: | :------: | :----: | :----------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| ATSS | Swin-L | caffe | reproduction | 2x | 480~1200 | 56.2 | [config](./atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco_20220509_100315-bc5b6516.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco_20220509_100315.log.json) | + +## Relation to Other Methods + +- DyHead can be regarded as an improved [SEPC](https://arxiv.org/abs/2005.03101) with [DyReLU modules](https://arxiv.org/abs/2003.10027) and simplified [SE blocks](https://arxiv.org/abs/1709.01507). +- Xiyang Dai et al., the author team of DyHead, adopt it for [Dynamic DETR](https://openaccess.thecvf.com/content/ICCV2021/html/Dai_Dynamic_DETR_End-to-End_Object_Detection_With_Dynamic_Attention_ICCV_2021_paper.html). + The description of Dynamic Encoder in Sec. 3.2 will help you understand DyHead. + +## Citation + +```latex +@inproceedings{DyHead_CVPR2021, + author = {Dai, Xiyang and Chen, Yinpeng and Xiao, Bin and Chen, Dongdong and Liu, Mengchen and Yuan, Lu and Zhang, Lei}, + title = {Dynamic Head: Unifying Object Detection Heads With Attentions}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2021} +} +``` diff --git a/downstream/mmdetection/configs/dyhead/atss_r50_caffe_fpn_dyhead_1x_coco.py b/downstream/mmdetection/configs/dyhead/atss_r50_caffe_fpn_dyhead_1x_coco.py new file mode 100644 index 0000000..223b653 --- /dev/null +++ b/downstream/mmdetection/configs/dyhead/atss_r50_caffe_fpn_dyhead_1x_coco.py @@ -0,0 +1,112 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='ATSS', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + neck=[ + dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + dict( + type='DyHead', + in_channels=256, + out_channels=256, + num_blocks=6, + # disable zero_init_offset to follow official implementation + zero_init_offset=False) + ], + bbox_head=dict( + type='ATSSHead', + num_classes=80, + in_channels=256, + pred_kernel_size=1, # follow DyHead official implementation + stacked_convs=0, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128], + center_offset=0.5), # follow DyHead official implementation + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) + +# use caffe img_norm, size_divisor=128, pillow resize +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=(1333, 800), + keep_ratio=True, + backend='pillow'), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=128), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True, backend='pillow'), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=128), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py b/downstream/mmdetection/configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py new file mode 100644 index 0000000..8c5109d --- /dev/null +++ b/downstream/mmdetection/configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py @@ -0,0 +1,65 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='ATSS', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=[ + dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + dict(type='DyHead', in_channels=256, out_channels=256, num_blocks=6) + ], + bbox_head=dict( + type='ATSSHead', + num_classes=80, + in_channels=256, + stacked_convs=0, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py b/downstream/mmdetection/configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py new file mode 100644 index 0000000..dc9c328 --- /dev/null +++ b/downstream/mmdetection/configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py @@ -0,0 +1,164 @@ +_base_ = '../_base_/default_runtime.py' + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa +model = dict( + type='ATSS', + backbone=dict( + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(1, 2, 3), + # Please only add indices that would be used + # in FPN, otherwise some parameter will not be used + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=[ + dict( + type='FPN', + in_channels=[384, 768, 1536], + out_channels=256, + start_level=0, + add_extra_convs='on_output', + num_outs=5), + dict( + type='DyHead', + in_channels=256, + out_channels=256, + num_blocks=6, + # disable zero_init_offset to follow official implementation + zero_init_offset=False) + ], + bbox_head=dict( + type='ATSSHead', + num_classes=80, + in_channels=256, + pred_kernel_size=1, # follow DyHead official implementation + stacked_convs=0, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128], + center_offset=0.5), # follow DyHead official implementation + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(2000, 480), (2000, 1200)], + multiscale_mode='range', + keep_ratio=True, + backend='pillow'), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=128), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2000, 1200), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True, backend='pillow'), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=128), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +# Use RepeatDataset to speed up training +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='bbox') + +# optimizer +optimizer_config = dict(grad_clip=None) +optimizer = dict( + type='AdamW', + lr=0.00005, + betas=(0.9, 0.999), + weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[8, 11]) +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/downstream/mmdetection/configs/dyhead/metafile.yml b/downstream/mmdetection/configs/dyhead/metafile.yml new file mode 100644 index 0000000..3fb7370 --- /dev/null +++ b/downstream/mmdetection/configs/dyhead/metafile.yml @@ -0,0 +1,76 @@ +Collections: + - Name: DyHead + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 4x T4 GPUs + Architecture: + - ATSS + - DyHead + - FPN + - ResNet + - Deformable Convolution + - Pyramid Convolution + Paper: + URL: https://arxiv.org/abs/2106.08322 + Title: 'Dynamic Head: Unifying Object Detection Heads with Attentions' + README: configs/dyhead/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/mmdet/models/necks/dyhead.py#L130 + Version: v2.22.0 + +Models: + - Name: atss_r50_caffe_fpn_dyhead_1x_coco + In Collection: DyHead + Config: configs/dyhead/atss_r50_caffe_fpn_dyhead_1x_coco.py + Metadata: + Training Memory (GB): 5.4 + inference time (ms/im): + - value: 75.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_for_reproduction_1x_coco/atss_r50_fpn_dyhead_for_reproduction_4x4_1x_coco_20220107_213939-162888e6.pth + + - Name: atss_r50_fpn_dyhead_1x_coco + In Collection: DyHead + Config: configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py + Metadata: + Training Memory (GB): 4.9 + inference time (ms/im): + - value: 73.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth + + - Name: atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco + In Collection: DyHead + Config: configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py + Metadata: + Training Memory (GB): 58.4 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 56.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco_20220509_100315-bc5b6516.pth diff --git a/downstream/mmdetection/configs/dynamic_rcnn/README.md b/downstream/mmdetection/configs/dynamic_rcnn/README.md new file mode 100644 index 0000000..0045df7 --- /dev/null +++ b/downstream/mmdetection/configs/dynamic_rcnn/README.md @@ -0,0 +1,30 @@ +# Dynamic R-CNN + +> [Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training](https://arxiv.org/abs/2004.06002) + + + +## Abstract + +Although two-stage object detectors have continuously advanced the state-of-the-art performance in recent years, the training process itself is far from crystal. In this work, we first point out the inconsistency problem between the fixed network settings and the dynamic training procedure, which greatly affects the performance. For example, the fixed label assignment strategy and regression loss function cannot fit the distribution change of proposals and thus are harmful to training high quality detectors. Consequently, we propose Dynamic R-CNN to adjust the label assignment criteria (IoU threshold) and the shape of regression loss function (parameters of SmoothL1 Loss) automatically based on the statistics of proposals during training. This dynamic design makes better use of the training samples and pushes the detector to fit more high quality samples. Specifically, our method improves upon ResNet-50-FPN baseline with 1.9% AP and 5.5% AP90 on the MS COCO dataset with no extra overhead. + +
    + +
    + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | 1x | 3.8 | | 38.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x_20200618_095048.log.json) | + +## Citation + +```latex +@article{DynamicRCNN, + author = {Hongkai Zhang and Hong Chang and Bingpeng Ma and Naiyan Wang and Xilin Chen}, + title = {Dynamic {R-CNN}: Towards High Quality Object Detection via Dynamic Training}, + journal = {arXiv preprint arXiv:2004.06002}, + year = {2020} +} +``` diff --git a/downstream/mmdetection/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..f2deb99 --- /dev/null +++ b/downstream/mmdetection/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,28 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + type='DynamicRoIHead', + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + train_cfg=dict( + rpn_proposal=dict(nms=dict(iou_threshold=0.85)), + rcnn=dict( + dynamic_rcnn=dict( + iou_topk=75, + beta_topk=10, + update_iter_interval=100, + initial_iou=0.4, + initial_beta=1.0))), + test_cfg=dict(rpn=dict(nms=dict(iou_threshold=0.85)))) diff --git a/downstream/mmdetection/configs/dynamic_rcnn/metafile.yml b/downstream/mmdetection/configs/dynamic_rcnn/metafile.yml new file mode 100644 index 0000000..fec43db --- /dev/null +++ b/downstream/mmdetection/configs/dynamic_rcnn/metafile.yml @@ -0,0 +1,35 @@ +Collections: + - Name: Dynamic R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Dynamic R-CNN + - FPN + - RPN + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/pdf/2004.06002 + Title: 'Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training' + README: configs/dynamic_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/roi_heads/dynamic_roi_head.py#L11 + Version: v2.2.0 + +Models: + - Name: dynamic_rcnn_r50_fpn_1x_coco + In Collection: Dynamic R-CNN + Config: configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.8 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth diff --git a/downstream/mmdetection/configs/efficientnet/README.md b/downstream/mmdetection/configs/efficientnet/README.md new file mode 100644 index 0000000..99b0572 --- /dev/null +++ b/downstream/mmdetection/configs/efficientnet/README.md @@ -0,0 +1,30 @@ +# EfficientNet + +> [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946v5) + + + +## Introduction + +Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet. + +To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3% top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters. + +## Results and Models + +### RetinaNet + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Efficientnet-b3 | pytorch | 1x | - | - | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806.log.json) | + +## Citation + +```latex +@article{tan2019efficientnet, + title={Efficientnet: Rethinking model scaling for convolutional neural networks}, + author={Tan, Mingxing and Le, Quoc V}, + journal={arXiv preprint arXiv:1905.11946}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/efficientnet/metafile.yml b/downstream/mmdetection/configs/efficientnet/metafile.yml new file mode 100644 index 0000000..de40b95 --- /dev/null +++ b/downstream/mmdetection/configs/efficientnet/metafile.yml @@ -0,0 +1,19 @@ +Models: + - Name: retinanet_effb3_fpn_crop896_8x4_1x_coco + In Collection: RetinaNet + Config: configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth + Paper: + URL: https://arxiv.org/abs/1905.11946v5 + Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks' + README: configs/efficientnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.23.0/mmdet/models/backbones/efficientnet.py#L159 + Version: v2.23.0 diff --git a/downstream/mmdetection/configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py b/downstream/mmdetection/configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py new file mode 100644 index 0000000..c90bc16 --- /dev/null +++ b/downstream/mmdetection/configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py @@ -0,0 +1,94 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] + +cudnn_benchmark = True +norm_cfg = dict(type='BN', requires_grad=True) +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa +model = dict( + backbone=dict( + _delete_=True, + type='EfficientNet', + arch='b3', + drop_path_rate=0.2, + out_indices=(3, 4, 5), + frozen_stages=0, + norm_cfg=dict( + type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01), + norm_eval=False, + init_cfg=dict( + type='Pretrained', prefix='backbone', checkpoint=checkpoint)), + neck=dict( + in_channels=[48, 136, 384], + start_level=0, + out_channels=256, + relu_before_extra_convs=True, + no_norm_on_lateral=True, + norm_cfg=norm_cfg), + bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), + # training and testing settings + train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) + +# dataset settings +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_size = (896, 896) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=img_size, + ratio_range=(0.8, 1.2), + keep_ratio=True), + dict(type='RandomCrop', crop_size=img_size), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=img_size), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_size, + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=img_size), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer_config = dict(grad_clip=None) +optimizer = dict( + type='SGD', + lr=0.04, + momentum=0.9, + weight_decay=0.0001, + paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.1, + step=[8, 11]) +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=12) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (4 samples per GPU) +auto_scale_lr = dict(base_batch_size=32) diff --git a/downstream/mmdetection/configs/empirical_attention/README.md b/downstream/mmdetection/configs/empirical_attention/README.md new file mode 100644 index 0000000..fc2620a --- /dev/null +++ b/downstream/mmdetection/configs/empirical_attention/README.md @@ -0,0 +1,33 @@ +# Empirical Attention + +> [An Empirical Study of Spatial Attention Mechanisms in Deep Networks](https://arxiv.org/abs/1904.05873) + + + +## Abstract + +Attention mechanisms have become a popular component in deep neural networks, yet there has been little examination of how different influencing factors and methods for computing attention from these factors affect performance. Toward a better general understanding of attention mechanisms, we present an empirical study that ablates various spatial attention elements within a generalized attention formulation, encompassing the dominant Transformer attention as well as the prevalent deformable convolution and dynamic convolution modules. Conducted on a variety of applications, the study yields significant findings about spatial attention in deep networks, some of which run counter to conventional understanding. For example, we find that the query and key content comparison in Transformer attention is negligible for self-attention, but vital for encoder-decoder attention. A proper combination of deformable convolution with key content only saliency achieves the best accuracy-efficiency tradeoff in self-attention. Our results suggest that there exists much room for improvement in the design of attention mechanisms. + +
    + +
    + +## Results and Models + +| Backbone | Attention Component | DCN | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----------------: | :-: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | 1111 | N | 1x | 8.0 | 13.8 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130_210344.log.json) | +| R-50 | 0010 | N | 1x | 4.2 | 18.4 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130-7cb0c14d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130_210125.log.json) | +| R-50 | 1111 | Y | 1x | 8.0 | 12.7 | 42.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130-8b2523a6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130_204442.log.json) | +| R-50 | 0010 | Y | 1x | 4.2 | 17.1 | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130-1a2e831d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130_210410.log.json) | + +## Citation + +```latex +@article{zhu2019empirical, + title={An Empirical Study of Spatial Attention Mechanisms in Deep Networks}, + author={Zhu, Xizhou and Cheng, Dazhi and Zhang, Zheng and Lin, Stephen and Dai, Jifeng}, + journal={arXiv preprint arXiv:1904.05873}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py b/downstream/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py new file mode 100644 index 0000000..a544e3a --- /dev/null +++ b/downstream/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict(plugins=[ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='0010', + kv_stride=2), + stages=(False, False, True, True), + position='after_conv2') + ])) diff --git a/downstream/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py b/downstream/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py new file mode 100644 index 0000000..bbefd27 --- /dev/null +++ b/downstream/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + plugins=[ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='0010', + kv_stride=2), + stages=(False, False, True, True), + position='after_conv2') + ], + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py b/downstream/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py new file mode 100644 index 0000000..13a4645 --- /dev/null +++ b/downstream/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict(plugins=[ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='1111', + kv_stride=2), + stages=(False, False, True, True), + position='after_conv2') + ])) diff --git a/downstream/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py b/downstream/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py new file mode 100644 index 0000000..b1f26c0 --- /dev/null +++ b/downstream/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + plugins=[ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='1111', + kv_stride=2), + stages=(False, False, True, True), + position='after_conv2') + ], + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True))) diff --git a/downstream/mmdetection/configs/empirical_attention/metafile.yml b/downstream/mmdetection/configs/empirical_attention/metafile.yml new file mode 100644 index 0000000..923bcb2 --- /dev/null +++ b/downstream/mmdetection/configs/empirical_attention/metafile.yml @@ -0,0 +1,103 @@ +Collections: + - Name: Empirical Attention + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Deformable Convolution + - FPN + - RPN + - ResNet + - RoIAlign + - Spatial Attention + Paper: + URL: https://arxiv.org/pdf/1904.05873 + Title: 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks' + README: configs/empirical_attention/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/generalized_attention.py#L10 + Version: v2.0.0 + +Models: + - Name: faster_rcnn_r50_fpn_attention_1111_1x_coco + In Collection: Empirical Attention + Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py + Metadata: + Training Memory (GB): 8.0 + inference time (ms/im): + - value: 72.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth + + - Name: faster_rcnn_r50_fpn_attention_0010_1x_coco + In Collection: Empirical Attention + Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py + Metadata: + Training Memory (GB): 4.2 + inference time (ms/im): + - value: 54.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130-7cb0c14d.pth + + - Name: faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco + In Collection: Empirical Attention + Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py + Metadata: + Training Memory (GB): 8.0 + inference time (ms/im): + - value: 78.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130-8b2523a6.pth + + - Name: faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco + In Collection: Empirical Attention + Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py + Metadata: + Training Memory (GB): 4.2 + inference time (ms/im): + - value: 58.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130-1a2e831d.pth diff --git a/downstream/mmdetection/configs/fast_rcnn/README.md b/downstream/mmdetection/configs/fast_rcnn/README.md new file mode 100644 index 0000000..767f76c --- /dev/null +++ b/downstream/mmdetection/configs/fast_rcnn/README.md @@ -0,0 +1,73 @@ +# Fast R-CNN + +> [Fast R-CNN](https://arxiv.org/abs/1504.08083) + + + +## Abstract + +This paper proposes a Fast Region-based Convolutional Network method (Fast R-CNN) for object detection. Fast R-CNN builds on previous work to efficiently classify object proposals using deep convolutional networks. Compared to previous work, Fast R-CNN employs several innovations to improve training and testing speed while also increasing detection accuracy. Fast R-CNN trains the very deep VGG16 network 9x faster than R-CNN, is 213x faster at test-time, and achieves a higher mAP on PASCAL VOC 2012. Compared to SPPnet, Fast R-CNN trains VGG16 3x faster, tests 10x faster, and is more accurate. + +
    + +
    + +## Introduction + +Before training the Fast R-CNN, users should first train an [RPN](../rpn/README.md), and use the RPN to extract the region proposals. + +- Firstly, extract the region proposals of the val set by this command as below: + +```bash +./tools/dist_test.sh \ + configs/rpn_r50_fpn_1x_coco.py \ + checkpoints/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth \ + 8 \ + --out proposals/rpn_r50_fpn_1x_val2017.pkl +``` + +- Then, change the `ann_file` and `img_prefix` of `data.test` in the RPN config to train set as below: + +```python +data = dict( + test=dict( + ann_file='data/coco/annotations/instances_train2017.json', + img_prefix='data/coco/train2017/')) +``` + +- Extract the region proposals of the train set by this command as below: + +```bash +./tools/dist_test.sh \ + configs/rpn_r50_fpn_1x_coco.py \ + checkpoints/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth \ + 8 \ + --out proposals/rpn_r50_fpn_1x_train2017.pkl +``` + +- Modify the path of `proposal_file` in Fast R-CNN config as below: + +```python +data = dict( + train=dict( + proposal_file='proposals/rpn_r50_fpn_1x_train2017.pkl'), + val=dict( + proposal_file='proposals/rpn_r50_fpn_1x_val2017.pkl'), + test=dict( + proposal_file='proposals/rpn_r50_fpn_1x_val2017.pkl')) +``` + +Finally, users can start training the Fast R-CNN. + +## Results and Models + +## Citation + +```latex +@inproceedings{girshick2015fast, + title={Fast r-cnn}, + author={Girshick, Ross}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + year={2015} +} +``` diff --git a/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r101_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r101_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..3ab8e98 --- /dev/null +++ b/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r101_caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './fast_rcnn_r50_caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000..83852b2 --- /dev/null +++ b/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './fast_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py b/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py new file mode 100644 index 0000000..c220885 --- /dev/null +++ b/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './fast_rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..f1b29ef --- /dev/null +++ b/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,48 @@ +_base_ = './fast_rcnn_r50_fpn_1x_coco.py' + +model = dict( + backbone=dict( + norm_cfg=dict(type='BN', requires_grad=False), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) + +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadProposals', num_max_proposals=2000), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadProposals', num_max_proposals=None), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['proposals']), + dict( + type='ToDataContainer', + fields=[dict(key='proposals', stack=False)]), + dict(type='Collect', keys=['img', 'proposals']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..d2f080e --- /dev/null +++ b/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,52 @@ +_base_ = [ + '../_base_/models/fast_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadProposals', num_max_proposals=2000), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadProposals', num_max_proposals=None), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['proposals']), + dict( + type='ToDataContainer', + fields=[dict(key='proposals', stack=False)]), + dict(type='Collect', keys=['img', 'proposals']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl', + pipeline=train_pipeline), + val=dict( + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', + pipeline=test_pipeline), + test=dict( + proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', + pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py b/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py new file mode 100644 index 0000000..228e856 --- /dev/null +++ b/downstream/mmdetection/configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py @@ -0,0 +1,5 @@ +_base_ = './fast_rcnn_r50_fpn_1x_coco.py' + +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/faster_rcnn/README.md b/downstream/mmdetection/configs/faster_rcnn/README.md new file mode 100644 index 0000000..865d375 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/README.md @@ -0,0 +1,88 @@ +# Faster R-CNN + +> [Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks](https://arxiv.org/abs/1506.01497) + + + +## Abstract + +State-of-the-art object detection networks depend on region proposal algorithms to hypothesize object locations. Advances like SPPnet and Fast R-CNN have reduced the running time of these detection networks, exposing region proposal computation as a bottleneck. In this work, we introduce a Region Proposal Network (RPN) that shares full-image convolutional features with the detection network, thus enabling nearly cost-free region proposals. An RPN is a fully convolutional network that simultaneously predicts object bounds and objectness scores at each position. The RPN is trained end-to-end to generate high-quality region proposals, which are used by Fast R-CNN for detection. We further merge RPN and Fast R-CNN into a single network by sharing their convolutional features---using the recently popular terminology of neural networks with 'attention' mechanisms, the RPN component tells the unified network where to look. For the very deep VGG-16 model, our detection system has a frame rate of 5fps (including all steps) on a GPU, while achieving state-of-the-art object detection accuracy on PASCAL VOC 2007, 2012, and MS COCO datasets with only 300 proposals per image. In ILSVRC and COCO 2015 competitions, Faster R-CNN and RPN are the foundations of the 1st-place winning entries in several tracks. + +
    + +
    + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-C4 | caffe | 1x | - | - | 35.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco/faster_rcnn_r50_caffe_c4_1x_coco_20220316_150152-3f885b85.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco/faster_rcnn_r50_caffe_c4_1x_coco_20220316_150152.log.json) | +| R-50-DC5 | caffe | 1x | - | - | 37.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco/faster_rcnn_r50_caffe_dc5_1x_coco_20201030_151909-531f0f43.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco/faster_rcnn_r50_caffe_dc5_1x_coco_20201030_151909.log.json) | +| R-50-FPN | caffe | 1x | 3.8 | | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.378_20200504_180032-c5925ee5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_20200504_180032.log.json) | +| R-50-FPN | pytorch | 1x | 4.0 | 21.4 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | +| R-50-FPN (FP16) | pytorch | 1x | 3.4 | 28.8 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204_143530.log.json) | +| R-50-FPN | pytorch | 2x | - | - | 38.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_20200504_210434.log.json) | +| R-101-FPN | caffe | 1x | 5.7 | | 39.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco/faster_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.398_20200504_180057-b269e9dd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco/faster_rcnn_r101_caffe_fpn_1x_coco_20200504_180057.log.json) | +| R-101-FPN | pytorch | 1x | 6.0 | 15.6 | 39.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130-f513f705.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130_204655.log.json) | +| R-101-FPN | pytorch | 2x | - | - | 39.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_2x_coco/faster_rcnn_r101_fpn_2x_coco_bbox_mAP-0.398_20200504_210455-1d2dac9c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_2x_coco/faster_rcnn_r101_fpn_2x_coco_20200504_210455.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 7.2 | 13.8 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco/faster_rcnn_x101_32x4d_fpn_1x_coco_20200203-cff10310.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco/faster_rcnn_x101_32x4d_fpn_1x_coco_20200203_000520.log.json) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco/faster_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.412_20200506_041400-64a12c0b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco/faster_rcnn_x101_32x4d_fpn_2x_coco_20200506_041400.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 10.3 | 9.4 | 42.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204_134340.log.json) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | 41.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco/faster_rcnn_x101_64x4d_fpn_2x_coco_20200512_161033-5961fa95.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco/faster_rcnn_x101_64x4d_fpn_2x_coco_20200512_161033.log.json) | + +## Different regression loss + +We trained with R-50-FPN pytorch style backbone for 1x schedule. + +| Backbone | Loss type | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :------------: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | L1Loss | 4.0 | 21.4 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | +| R-50-FPN | IoULoss | | | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_iou_1x_coco-fdd207f3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_iou_1x_coco_20200506_095954.log.json) | +| R-50-FPN | GIoULoss | | | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_giou_1x_coco-0eada910.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_giou_1x_coco_20200505_161120.log.json) | +| R-50-FPN | BoundedIoULoss | | | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_bounded_iou_1x_coco-98ad993b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_bounded_iou_1x_coco_20200505_160738.log.json) | + +## Pre-trained Models + +We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :----------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [R-50-C4](./faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py) | caffe | 1x | - | | 35.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco/faster_rcnn_r50_caffe_c4_mstrain_1x_coco_20220316_150527-db276fed.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco/faster_rcnn_r50_caffe_c4_mstrain_1x_coco_20220316_150527.log.json) | +| [R-50-DC5](./faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py) | caffe | 1x | - | | 37.4 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco_20201028_233851-b33d21b9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco_20201028_233851.log.json) | +| [R-50-DC5](./faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py) | caffe | 3x | - | | 38.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco_20201028_002107-34a53b2c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco_20201028_002107.log.json) | +| [R-50-FPN](./faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py) | caffe | 2x | 3.7 | | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco_bbox_mAP-0.397_20200504_231813-10b2de58.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco_20200504_231813.log.json) | +| [R-50-FPN](./faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | caffe | 3x | 3.7 | | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210526_095054-1f77628b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210526_095054.log.json) | +| [R-50-FPN](./faster_rcnn_r50_fpn_mstrain_3x_coco.py) | pytorch | 3x | 3.9 | | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco/faster_rcnn_r50_fpn_mstrain_3x_coco_20210524_110822-e10bd31c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco/faster_rcnn_r50_fpn_mstrain_3x_coco_20210524_110822.log.json) | +| [R-101-FPN](./faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py) | caffe | 3x | 5.6 | | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210526_095742-a7ae426d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210526_095742.log.json) | +| [R-101-FPN](./faster_rcnn_r101_fpn_mstrain_3x_coco.py) | pytorch | 3x | 5.8 | | 41.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco/faster_rcnn_r101_fpn_mstrain_3x_coco_20210524_110822-4d4d2ca8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco/faster_rcnn_r101_fpn_mstrain_3x_coco_20210524_110822.log.json) | +| [X-101-32x4d-FPN](./faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py) | pytorch | 3x | 7.0 | | 42.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210524_124151-16b9b260.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210524_124151.log.json) | +| [X-101-32x8d-FPN](./faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py) | pytorch | 3x | 10.1 | | 42.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210604_182954-002e082a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210604_182954.log.json) | +| [X-101-64x4d-FPN](./faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py) | pytorch | 3x | 10.0 | | 43.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210524_124528-26c63de6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210524_124528.log.json) | + +We further finetune some pre-trained models on the COCO subsets, which only contain only a few of the 80 categories. + +| Backbone | Style | Class name | Pre-traind model | Mem (GB) | box AP | Config | Download | +| ----------------------------------------------------------------------------- | ----- | ------------------ | ------------------------------------------------------------------- | -------- | ------ | --------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [R-50-FPN](./faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py) | caffe | person | [R-50-FPN-Caffe-3x](./faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | 3.7 | 55.8 | [config](./faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929.log.json) | +| [R-50-FPN](./faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py) | caffe | person-bicycle-car | [R-50-FPN-Caffe-3x](./faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | 3.7 | 44.1 | [config](./faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car_20201216_173117-6eda6d92.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car_20201216_173117.log.json) | + +## Torchvision New Receipe (TNR) + +Torchvision released its high-precision ResNet models. The training details can be found on the [Pytorch website](https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/). Here, we have done grid searches on learning rate and weight decay and found the optimal hyper-parameter on the detection task. + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [R-50-TNR](./faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py) | pytorch | 1x | - | | 40.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco_20220320_085147-efedfda4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco_20220320_085147.log.json) | + +## Citation + +```latex +@article{Ren_2017, + title={Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks}, + journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher={Institute of Electrical and Electronics Engineers (IEEE)}, + author={Ren, Shaoqing and He, Kaiming and Girshick, Ross and Sun, Jian}, + year={2017}, + month={Jun}, +} +``` diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..c6f078c --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './faster_rcnn_r50_caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..6a13fe9 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py @@ -0,0 +1,49 @@ +_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) + +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + train=dict(dataset=dict(pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000..1de53a6 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py new file mode 100644 index 0000000..0d41599 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster_rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..0b498bb --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py @@ -0,0 +1,7 @@ +_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py new file mode 100644 index 0000000..b071962 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_caffe_c4.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py new file mode 100644 index 0000000..f4d83e6 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py @@ -0,0 +1,38 @@ +_base_ = './faster_rcnn_r50_caffe_c4_1x_coco.py' +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py new file mode 100644 index 0000000..ee2010c --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py @@ -0,0 +1,37 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_caffe_dc5.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py new file mode 100644 index 0000000..14eaef2 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py @@ -0,0 +1,42 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_caffe_dc5.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py new file mode 100644 index 0000000..403747f --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py @@ -0,0 +1,4 @@ +_base_ = './faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py' +# learning policy +lr_config = dict(step=[28, 34]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..56c01bd --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,41 @@ +_base_ = './faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_90k_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_90k_coco.py new file mode 100644 index 0000000..b5aea6a --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_90k_coco.py @@ -0,0 +1,15 @@ +_base_ = 'faster_rcnn_r50_caffe_fpn_1x_coco.py' + +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[60000, 80000]) + +# Runner type +runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) + +checkpoint_config = dict(interval=10000) +evaluation = dict(interval=10000, metric='bbox') diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py new file mode 100644 index 0000000..4f1f376 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py @@ -0,0 +1,9 @@ +_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' +model = dict(roi_head=dict(bbox_head=dict(num_classes=3))) +classes = ('person', 'bicycle', 'car') +data = dict( + train=dict(classes=classes), + val=dict(classes=classes), + test=dict(classes=classes)) + +load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py new file mode 100644 index 0000000..b5dfb4f --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py @@ -0,0 +1,9 @@ +_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' +model = dict(roi_head=dict(bbox_head=dict(num_classes=1))) +classes = ('person', ) +data = dict( + train=dict(classes=classes), + val=dict(classes=classes), + test=dict(classes=classes)) + +load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py new file mode 100644 index 0000000..f807a19 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py @@ -0,0 +1,46 @@ +_base_ = './faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..df58973 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 23]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..9eeaace --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py @@ -0,0 +1,47 @@ +_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) + +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + train=dict(dataset=dict(pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py new file mode 100644 index 0000000..74dca24 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py @@ -0,0 +1,15 @@ +_base_ = 'faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' + +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[60000, 80000]) + +# Runner type +runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) + +checkpoint_config = dict(interval=10000) +evaluation = dict(interval=10000, metric='bbox') diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..009bd93 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py new file mode 100644 index 0000000..e77a7fa --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py new file mode 100644 index 0000000..648081f --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + bbox_head=dict( + reg_decoded_bbox=True, + loss_bbox=dict(type='BoundedIoULoss', loss_weight=10.0)))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_ciou_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_ciou_1x_coco.py new file mode 100644 index 0000000..886d566 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_ciou_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + bbox_head=dict( + reg_decoded_bbox=True, + loss_bbox=dict(type='CIoULoss', loss_weight=12.0)))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py new file mode 100644 index 0000000..acd4040 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py @@ -0,0 +1,3 @@ +_base_ = './faster_rcnn_r50_fpn_1x_coco.py' +# fp16 settings +fp16 = dict(loss_scale=512.) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py new file mode 100644 index 0000000..5556c49 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + bbox_head=dict( + reg_decoded_bbox=True, + loss_bbox=dict(type='GIoULoss', loss_weight=10.0)))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py new file mode 100644 index 0000000..ddf663e --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + bbox_head=dict( + reg_decoded_bbox=True, + loss_bbox=dict(type='IoULoss', loss_weight=10.0)))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..faf8f92 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py @@ -0,0 +1,3 @@ +_base_ = [ + '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' +] diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py new file mode 100644 index 0000000..f897e7c --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py @@ -0,0 +1,2 @@ +_base_ = './faster_rcnn_r50_fpn_1x_coco.py' +model = dict(train_cfg=dict(rcnn=dict(sampler=dict(type='OHEMSampler')))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py new file mode 100644 index 0000000..759ae3a --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + test_cfg=dict( + rcnn=dict( + score_thr=0.05, + nms=dict(type='soft_nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py new file mode 100644 index 0000000..ecbfb92 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +checkpoint = 'https://download.pytorch.org/models/resnet50-11ad3fa6.pth' +model = dict( + backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=checkpoint))) + +# `lr` and `weight_decay` have been searched to be optimal. +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.0001, + weight_decay=0.1, + paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..3808c9f --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py new file mode 100644 index 0000000..e93f5d8 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './faster_rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..f55985d --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py @@ -0,0 +1,16 @@ +_base_ = [ + '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' +] +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..a5d5aeb --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py @@ -0,0 +1,62 @@ +_base_ = [ + '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' +] +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=8, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + style='pytorch', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) + +# ResNeXt-101-32x8d model trained with Caffe2 at FB, +# so the mean and std need to be changed. +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], + std=[57.375, 57.120, 58.395], + to_rgb=False) + +# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], +# multiscale_mode='range' +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +# Use RepeatDataset to speed up training +data = dict( + train=dict(dataset=dict(pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py new file mode 100644 index 0000000..8bf2b65 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py new file mode 100644 index 0000000..7ea9b2d --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './faster_rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..80397f4 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py @@ -0,0 +1,16 @@ +_base_ = [ + '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' +] +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/faster_rcnn/metafile.yml b/downstream/mmdetection/configs/faster_rcnn/metafile.yml new file mode 100644 index 0000000..91d6751 --- /dev/null +++ b/downstream/mmdetection/configs/faster_rcnn/metafile.yml @@ -0,0 +1,451 @@ +Collections: + - Name: Faster R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - RPN + - ResNet + - RoIPool + Paper: + URL: https://arxiv.org/abs/1506.01497 + Title: "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks" + README: configs/faster_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/faster_rcnn.py#L6 + Version: v2.0.0 + +Models: + - Name: faster_rcnn_r50_caffe_c4_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 35.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco/faster_rcnn_r50_caffe_c4_1x_coco_20220316_150152-3f885b85.pth + + - Name: faster_rcnn_r50_caffe_c4_mstrain_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 35.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco/faster_rcnn_r50_caffe_c4_mstrain_1x_coco_20220316_150527-db276fed.pth + + - Name: faster_rcnn_r50_caffe_dc5_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco/faster_rcnn_r50_caffe_dc5_1x_coco_20201030_151909-531f0f43.pth + + - Name: faster_rcnn_r50_caffe_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.8 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.378_20200504_180032-c5925ee5.pth + + - Name: faster_rcnn_r50_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 46.73 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth + + - Name: faster_rcnn_r50_fpn_fp16_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py + Metadata: + Training Memory (GB): 3.4 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + inference time (ms/im): + - value: 34.72 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth + + - Name: faster_rcnn_r50_fpn_2x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 46.73 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth + + - Name: faster_rcnn_r101_caffe_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.7 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco/faster_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.398_20200504_180057-b269e9dd.pth + + - Name: faster_rcnn_r101_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 64.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130-f513f705.pth + + - Name: faster_rcnn_r101_fpn_2x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 64.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_2x_coco/faster_rcnn_r101_fpn_2x_coco_bbox_mAP-0.398_20200504_210455-1d2dac9c.pth + + - Name: faster_rcnn_x101_32x4d_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.2 + inference time (ms/im): + - value: 72.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco/faster_rcnn_x101_32x4d_fpn_1x_coco_20200203-cff10310.pth + + - Name: faster_rcnn_x101_32x4d_fpn_2x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 7.2 + inference time (ms/im): + - value: 72.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco/faster_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.412_20200506_041400-64a12c0b.pth + + - Name: faster_rcnn_x101_64x4d_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.3 + inference time (ms/im): + - value: 106.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth + + - Name: faster_rcnn_x101_64x4d_fpn_2x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 10.3 + inference time (ms/im): + - value: 106.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco/faster_rcnn_x101_64x4d_fpn_2x_coco_20200512_161033-5961fa95.pth + + - Name: faster_rcnn_r50_fpn_iou_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_iou_1x_coco-fdd207f3.pth + + - Name: faster_rcnn_r50_fpn_giou_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_giou_1x_coco-0eada910.pth + + - Name: faster_rcnn_r50_fpn_bounded_iou_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_bounded_iou_1x_coco-98ad993b.pth + + - Name: faster_rcnn_r50_caffe_dc5_mstrain_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco_20201028_233851-b33d21b9.pth + + - Name: faster_rcnn_r50_caffe_dc5_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco_20201028_002107-34a53b2c.pth + + - Name: faster_rcnn_r50_caffe_fpn_mstrain_2x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py + Metadata: + Training Memory (GB): 4.3 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco_bbox_mAP-0.397_20200504_231813-10b2de58.pth + + - Name: faster_rcnn_r50_caffe_fpn_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 3.7 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210526_095054-1f77628b.pth + + - Name: faster_rcnn_r50_fpn_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 3.9 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco/faster_rcnn_r50_fpn_mstrain_3x_coco_20210524_110822-e10bd31c.pth + + - Name: faster_rcnn_r101_caffe_fpn_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 5.6 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210526_095742-a7ae426d.pth + + - Name: faster_rcnn_r101_fpn_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 5.8 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco/faster_rcnn_r101_fpn_mstrain_3x_coco_20210524_110822-4d4d2ca8.pth + + - Name: faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 7.0 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210524_124151-16b9b260.pth + + - Name: faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 10.1 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210604_182954-002e082a.pth + + - Name: faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 10.0 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210524_124528-26c63de6.pth + + - Name: faster_rcnn_r50_fpn_tnr-pretrain_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 46.73 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco_20220320_085147-efedfda4.pth diff --git a/downstream/mmdetection/configs/fcos/README.md b/downstream/mmdetection/configs/fcos/README.md new file mode 100644 index 0000000..76be365 --- /dev/null +++ b/downstream/mmdetection/configs/fcos/README.md @@ -0,0 +1,45 @@ +# FCOS + +> [FCOS: Fully Convolutional One-Stage Object Detection](https://arxiv.org/abs/1904.01355) + + + +## Abstract + +We propose a fully convolutional one-stage object detector (FCOS) to solve object detection in a per-pixel prediction fashion, analogue to semantic segmentation. Almost all state-of-the-art object detectors such as RetinaNet, SSD, YOLOv3, and Faster R-CNN rely on pre-defined anchor boxes. In contrast, our proposed detector FCOS is anchor box free, as well as proposal free. By eliminating the predefined set of anchor boxes, FCOS completely avoids the complicated computation related to anchor boxes such as calculating overlapping during training. More importantly, we also avoid all hyper-parameters related to anchor boxes, which are often very sensitive to the final detection performance. With the only post-processing non-maximum suppression (NMS), FCOS with ResNeXt-64x4d-101 achieves 44.7% in AP with single-model and single-scale testing, surpassing previous one-stage detectors with the advantage of being much simpler. For the first time, we demonstrate a much simpler and flexible detection framework achieving improved detection accuracy. We hope that the proposed FCOS framework can serve as a simple and strong alternative for many other instance-level tasks. + +
    + +
    + +## Results and Models + +| Backbone | Style | GN | MS train | Tricks | DCN | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :---: | :-: | :------: | :----: | :-: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | caffe | Y | N | N | N | 1x | 3.6 | 22.7 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/20201227_180009.log.json) | +| R-50 | caffe | Y | N | Y | N | 1x | 3.7 | - | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/20210105_135818.log.json) | +| R-50 | caffe | Y | N | Y | Y | 1x | 3.8 | - | 42.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/20210105_224556.log.json) | +| R-101 | caffe | Y | N | N | N | 1x | 5.5 | 17.3 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/20210103_155046.log.json) | + +| Backbone | Style | GN | MS train | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----: | :-: | :------: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | caffe | Y | Y | 2x | 2.6 | 22.9 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20201227_161900.log.json) | +| R-101 | caffe | Y | Y | 2x | 5.5 | 17.3 | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20210103_155046.log.json) | +| X-101 | pytorch | Y | Y | 2x | 10.0 | 9.7 | 42.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/20210114_133041.log.json) | + +**Notes:** + +- The X-101 backbone is X-101-64x4d. +- Tricks means setting `norm_on_bbox`, `centerness_on_reg`, `center_sampling` as `True`. +- DCN means using `DCNv2` in both backbone and head. + +## Citation + +```latex +@article{tian2019fcos, + title={FCOS: Fully Convolutional One-Stage Object Detection}, + author={Tian, Zhi and Shen, Chunhua and Chen, Hao and He, Tong}, + journal={arXiv preprint arXiv:1904.01355}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py b/downstream/mmdetection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py new file mode 100644 index 0000000..2699bdb --- /dev/null +++ b/downstream/mmdetection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py @@ -0,0 +1,54 @@ +_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' + +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + bbox_head=dict( + norm_on_bbox=True, + centerness_on_reg=True, + dcn_on_last_conv=False, + center_sampling=True, + conv_bias=True, + loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), + # training and testing settings + test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6))) + +# dataset settings +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +optimizer_config = dict(_delete_=True, grad_clip=None) + +lr_config = dict(warmup='linear') diff --git a/downstream/mmdetection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py b/downstream/mmdetection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py new file mode 100644 index 0000000..cf93c91 --- /dev/null +++ b/downstream/mmdetection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py @@ -0,0 +1,56 @@ +_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' + +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + bbox_head=dict( + norm_on_bbox=True, + centerness_on_reg=True, + dcn_on_last_conv=True, + center_sampling=True, + conv_bias=True, + loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), + # training and testing settings + test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6))) + +# dataset settings +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +optimizer_config = dict(_delete_=True, grad_clip=None) + +lr_config = dict(warmup='linear') diff --git a/downstream/mmdetection/configs/fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py b/downstream/mmdetection/configs/fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py new file mode 100644 index 0000000..9f502e7 --- /dev/null +++ b/downstream/mmdetection/configs/fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py @@ -0,0 +1,2 @@ +_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' +model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5)) diff --git a/downstream/mmdetection/configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py b/downstream/mmdetection/configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py new file mode 100644 index 0000000..45bea48 --- /dev/null +++ b/downstream/mmdetection/configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet101_caffe'))) diff --git a/downstream/mmdetection/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py b/downstream/mmdetection/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py new file mode 100644 index 0000000..f4d36f1 --- /dev/null +++ b/downstream/mmdetection/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py @@ -0,0 +1,47 @@ +_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet101_caffe'))) +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py b/downstream/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py new file mode 100644 index 0000000..955787b --- /dev/null +++ b/downstream/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py @@ -0,0 +1,106 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + type='FCOS', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet50_caffe')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', # use P5 + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='FCOSHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict( + lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='constant', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/downstream/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py b/downstream/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py new file mode 100644 index 0000000..2816b16 --- /dev/null +++ b/downstream/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py @@ -0,0 +1,4 @@ +# TODO: Remove this config after benchmarking all related configs +_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' + +data = dict(samples_per_gpu=4, workers_per_gpu=4) diff --git a/downstream/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py b/downstream/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py new file mode 100644 index 0000000..497d03f --- /dev/null +++ b/downstream/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py @@ -0,0 +1,39 @@ +_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py b/downstream/mmdetection/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py new file mode 100644 index 0000000..e70e465 --- /dev/null +++ b/downstream/mmdetection/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py @@ -0,0 +1,60 @@ +_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict( + lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/fcos/metafile.yml b/downstream/mmdetection/configs/fcos/metafile.yml new file mode 100644 index 0000000..ae922eb --- /dev/null +++ b/downstream/mmdetection/configs/fcos/metafile.yml @@ -0,0 +1,146 @@ +Collections: + - Name: FCOS + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - Group Normalization + - ResNet + Paper: + URL: https://arxiv.org/abs/1904.01355 + Title: 'FCOS: Fully Convolutional One-Stage Object Detection' + README: configs/fcos/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/fcos.py#L6 + Version: v2.0.0 + +Models: + - Name: fcos_r50_caffe_fpn_gn-head_1x_coco + In Collection: FCOS + Config: configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py + Metadata: + Training Memory (GB): 3.6 + inference time (ms/im): + - value: 44.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth + + - Name: fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco + In Collection: FCOS + Config: configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py + Metadata: + Training Memory (GB): 3.7 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth + + - Name: fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco + In Collection: FCOS + Config: configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py + Metadata: + Training Memory (GB): 3.8 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth + + - Name: fcos_r101_caffe_fpn_gn-head_1x_coco + In Collection: FCOS + Config: configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py + Metadata: + Training Memory (GB): 5.5 + inference time (ms/im): + - value: 57.8 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth + + - Name: fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco + In Collection: FCOS + Config: configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py + Metadata: + Training Memory (GB): 2.6 + inference time (ms/im): + - value: 43.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth + + - Name: fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco + In Collection: FCOS + Config: configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py + Metadata: + Training Memory (GB): 5.5 + inference time (ms/im): + - value: 57.8 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth + + - Name: fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco + In Collection: FCOS + Config: configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py + Metadata: + Training Memory (GB): 10.0 + inference time (ms/im): + - value: 103.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth diff --git a/downstream/mmdetection/configs/foveabox/README.md b/downstream/mmdetection/configs/foveabox/README.md new file mode 100644 index 0000000..7fcd094 --- /dev/null +++ b/downstream/mmdetection/configs/foveabox/README.md @@ -0,0 +1,53 @@ +# FoveaBox + +> [FoveaBox: Beyond Anchor-based Object Detector](https://arxiv.org/abs/1904.03797) + + + +## Abstract + +We present FoveaBox, an accurate, flexible, and completely anchor-free framework for object detection. While almost all state-of-the-art object detectors utilize predefined anchors to enumerate possible locations, scales and aspect ratios for the search of the objects, their performance and generalization ability are also limited to the design of anchors. Instead, FoveaBox directly learns the object existing possibility and the bounding box coordinates without anchor reference. This is achieved by: (a) predicting category-sensitive semantic maps for the object existing possibility, and (b) producing category-agnostic bounding box for each position that potentially contains an object. The scales of target boxes are naturally associated with feature pyramid representations. In FoveaBox, an instance is assigned to adjacent feature levels to make the model more accurate.We demonstrate its effectiveness on standard benchmarks and report extensive experimental analysis. Without bells and whistles, FoveaBox achieves state-of-the-art single model performance on the standard COCO and Pascal VOC object detection benchmark. More importantly, FoveaBox avoids all computation and hyper-parameters related to anchor boxes, which are often sensitive to the final detection performance. We believe the simple and effective approach will serve as a solid baseline and help ease future research for object detection. + +
    + +
    + +## Introduction + +FoveaBox is an accurate, flexible and completely anchor-free object detection system for object detection framework, as presented in our paper [https://arxiv.org/abs/1904.03797](https://arxiv.org/abs/1904.03797): +Different from previous anchor-based methods, FoveaBox directly learns the object existing possibility and the bounding box coordinates without anchor reference. This is achieved by: (a) predicting category-sensitive semantic maps for the object existing possibility, and (b) producing category-agnostic bounding box for each position that potentially contains an object. + +## Results and Models + +### Results on R50/101-FPN + +| Backbone | Style | align | ms-train | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :-----: | :---: | :------: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | N | N | 1x | 5.6 | 24.1 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_1x_coco/fovea_r50_fpn_4x4_1x_coco_20200219-ee4d5303.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_1x_coco/fovea_r50_fpn_4x4_1x_coco_20200219_223025.log.json) | +| R-50 | pytorch | N | N | 2x | 5.6 | - | 37.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_2x_coco/fovea_r50_fpn_4x4_2x_coco_20200203-2df792b1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_2x_coco/fovea_r50_fpn_4x4_2x_coco_20200203_112043.log.json) | +| R-50 | pytorch | Y | N | 2x | 8.1 | 19.4 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203_134252.log.json) | +| R-50 | pytorch | Y | Y | 2x | 8.1 | 18.3 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200205-85ce26cb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200205_112557.log.json) | +| R-101 | pytorch | N | N | 1x | 9.2 | 17.4 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_1x_coco/fovea_r101_fpn_4x4_1x_coco_20200219-05e38f1c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_1x_coco/fovea_r101_fpn_4x4_1x_coco_20200219_011740.log.json) | +| R-101 | pytorch | N | N | 2x | 11.7 | - | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_2x_coco/fovea_r101_fpn_4x4_2x_coco_20200208-02320ea4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_2x_coco/fovea_r101_fpn_4x4_2x_coco_20200208_202059.log.json) | +| R-101 | pytorch | Y | N | 2x | 11.7 | 14.7 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco/fovea_align_r101_fpn_gn-head_4x4_2x_coco_20200208-c39a027a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco/fovea_align_r101_fpn_gn-head_4x4_2x_coco_20200208_203337.log.json) | +| R-101 | pytorch | Y | Y | 2x | 11.7 | 14.7 | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200208-649c5eb6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200208_202124.log.json) | + +\[1\] *1x and 2x mean the model is trained for 12 and 24 epochs, respectively.* \ +\[2\] *Align means utilizing deformable convolution to align the cls branch.* \ +\[3\] *All results are obtained with a single model and without any test time data augmentation.*\ +\[4\] *We use 4 GPUs for training.* + +Any pull requests or issues are welcome. + +## Citation + +Please consider citing our paper in your publications if the project helps your research. BibTeX reference is as follows. + +```latex +@article{kong2019foveabox, + title={FoveaBox: Beyond Anchor-based Object Detector}, + author={Kong, Tao and Sun, Fuchun and Liu, Huaping and Jiang, Yuning and Shi, Jianbo}, + journal={arXiv preprint arXiv:1904.03797}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py b/downstream/mmdetection/configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py new file mode 100644 index 0000000..c5d1784 --- /dev/null +++ b/downstream/mmdetection/configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py @@ -0,0 +1,12 @@ +_base_ = './fovea_r50_fpn_4x4_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + bbox_head=dict( + with_deform=True, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py b/downstream/mmdetection/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py new file mode 100644 index 0000000..cc5affe --- /dev/null +++ b/downstream/mmdetection/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py @@ -0,0 +1,29 @@ +_base_ = './fovea_r50_fpn_4x4_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + bbox_head=dict( + with_deform=True, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict(train=dict(pipeline=train_pipeline)) +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py b/downstream/mmdetection/configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py new file mode 100644 index 0000000..e7265bc --- /dev/null +++ b/downstream/mmdetection/configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py @@ -0,0 +1,10 @@ +_base_ = './fovea_r50_fpn_4x4_1x_coco.py' +model = dict( + bbox_head=dict( + with_deform=True, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py b/downstream/mmdetection/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py new file mode 100644 index 0000000..8fc39be --- /dev/null +++ b/downstream/mmdetection/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py @@ -0,0 +1,25 @@ +_base_ = './fovea_r50_fpn_4x4_1x_coco.py' +model = dict( + bbox_head=dict( + with_deform=True, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict(train=dict(pipeline=train_pipeline)) +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py b/downstream/mmdetection/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py new file mode 100644 index 0000000..9201af1 --- /dev/null +++ b/downstream/mmdetection/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './fovea_r50_fpn_4x4_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py b/downstream/mmdetection/configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py new file mode 100644 index 0000000..1ef5243 --- /dev/null +++ b/downstream/mmdetection/configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './fovea_r50_fpn_4x4_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py b/downstream/mmdetection/configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py new file mode 100644 index 0000000..7e986eb --- /dev/null +++ b/downstream/mmdetection/configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py @@ -0,0 +1,52 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + type='FOVEA', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + num_outs=5, + add_extra_convs='on_input'), + bbox_head=dict( + type='FoveaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + base_edge_list=[16, 32, 64, 128, 256], + scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), + sigma=0.4, + with_deform=False, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=1.50, + alpha=0.4, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), + # training and testing settings + train_cfg=dict(), + test_cfg=dict( + nms_pre=1000, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) +data = dict(samples_per_gpu=4, workers_per_gpu=4) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py b/downstream/mmdetection/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py new file mode 100644 index 0000000..68ce4d2 --- /dev/null +++ b/downstream/mmdetection/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './fovea_r50_fpn_4x4_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/foveabox/metafile.yml b/downstream/mmdetection/configs/foveabox/metafile.yml new file mode 100644 index 0000000..fe9a283 --- /dev/null +++ b/downstream/mmdetection/configs/foveabox/metafile.yml @@ -0,0 +1,172 @@ +Collections: + - Name: FoveaBox + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 4x V100 GPUs + Architecture: + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1904.03797 + Title: 'FoveaBox: Beyond Anchor-based Object Detector' + README: configs/foveabox/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/fovea.py#L6 + Version: v2.0.0 + +Models: + - Name: fovea_r50_fpn_4x4_1x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py + Metadata: + Training Memory (GB): 5.6 + inference time (ms/im): + - value: 41.49 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_1x_coco/fovea_r50_fpn_4x4_1x_coco_20200219-ee4d5303.pth + + - Name: fovea_r50_fpn_4x4_2x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py + Metadata: + Training Memory (GB): 5.6 + inference time (ms/im): + - value: 41.49 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_2x_coco/fovea_r50_fpn_4x4_2x_coco_20200203-2df792b1.pth + + - Name: fovea_align_r50_fpn_gn-head_4x4_2x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py + Metadata: + Training Memory (GB): 8.1 + inference time (ms/im): + - value: 51.55 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth + + - Name: fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py + Metadata: + Training Memory (GB): 8.1 + inference time (ms/im): + - value: 54.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200205-85ce26cb.pth + + - Name: fovea_r101_fpn_4x4_1x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py + Metadata: + Training Memory (GB): 9.2 + inference time (ms/im): + - value: 57.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_1x_coco/fovea_r101_fpn_4x4_1x_coco_20200219-05e38f1c.pth + + - Name: fovea_r101_fpn_4x4_2x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py + Metadata: + Training Memory (GB): 11.7 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_2x_coco/fovea_r101_fpn_4x4_2x_coco_20200208-02320ea4.pth + + - Name: fovea_align_r101_fpn_gn-head_4x4_2x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py + Metadata: + Training Memory (GB): 11.7 + inference time (ms/im): + - value: 68.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco/fovea_align_r101_fpn_gn-head_4x4_2x_coco_20200208-c39a027a.pth + + - Name: fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco + In Collection: FoveaBox + Config: configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py + Metadata: + Training Memory (GB): 11.7 + inference time (ms/im): + - value: 68.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200208-649c5eb6.pth diff --git a/downstream/mmdetection/configs/fpg/README.md b/downstream/mmdetection/configs/fpg/README.md new file mode 100644 index 0000000..c9bb1fe --- /dev/null +++ b/downstream/mmdetection/configs/fpg/README.md @@ -0,0 +1,43 @@ +# FPG + +> [Feature Pyramid Grids](https://arxiv.org/abs/2004.03580) + + + +## Abstract + +Feature pyramid networks have been widely adopted in the object detection literature to improve feature representations for better handling of variations in scale. In this paper, we present Feature Pyramid Grids (FPG), a deep multi-pathway feature pyramid, that represents the feature scale-space as a regular grid of parallel bottom-up pathways which are fused by multi-directional lateral connections. FPG can improve single-pathway feature pyramid networks by significantly increasing its performance at similar computation cost, highlighting importance of deep pyramid representations. In addition to its general and uniform structure, over complicated structures that have been found with neural architecture search, it also compares favorably against such approaches without relying on search. We hope that FPG with its uniform and effective nature can serve as a strong component for future work in object recognition. + +
    + +
    + +## Results and Models + +We benchmark the new training schedule (crop training, large batch, unfrozen BN, 50 epochs) introduced in NAS-FPN. +All backbones are Resnet-50 in pytorch style. + +| Method | Neck | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------: | :--------: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN | FPG | 50e | 20.0 | - | 42.3 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg_crop640_50e_coco/faster_rcnn_r50_fpg_crop640_50e_coco_20220311_011856-74109f42.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg_crop640_50e_coco/faster_rcnn_r50_fpg_crop640_50e_coco_20220311_011856.log.json) | +| Faster R-CNN | FPG-chn128 | 50e | 11.9 | - | 41.2 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco/faster_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011857-9376aa9d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco/faster_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011857.log.json) | +| Faster R-CNN | FPN | 50e | 20.0 | - | 38.9 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpn_crop640_50e_coco/faster_rcnn_r50_fpn_crop640_50e_coco_20220311_011857-be7c9f42.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpn_crop640_50e_coco/faster_rcnn_r50_fpn_crop640_50e_coco_20220311_011857.log.json) | +| Mask R-CNN | FPG | 50e | 23.2 | - | 43.0 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857.log.json) | +| Mask R-CNN | FPG-chn128 | 50e | 15.3 | - | 41.7 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco/mask_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011859-043c9b4e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco/mask_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011859.log.json) | +| Mask R-CNN | FPN | 50e | 23.2 | - | 49.6 | 35.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/mask_rcnn_r50_fpn_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpn_crop640_50e_coco/mask_rcnn_r50_fpn_crop640_50e_coco_20220311_011855-a756664a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpn_crop640_50e_coco/mask_rcnn_r50_fpn_crop640_50e_coco_20220311_011855.log.json) | +| RetinaNet | FPG | 50e | 20.8 | - | 40.5 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg_crop640_50e_coco/retinanet_r50_fpg_crop640_50e_coco_20220311_110809-b0bcf5f4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg_crop640_50e_coco/retinanet_r50_fpg_crop640_50e_coco_20220311_110809.log.json) | +| RetinaNet | FPG-chn128 | 50e | 19.9 | - | 39.9 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco/retinanet_r50_fpg-chn128_crop640_50e_coco_20220313_104829-ee99a686.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco/retinanet_r50_fpg-chn128_crop640_50e_coco_20220313_104829.log.json) | + +**Note**: Chn128 means to decrease the number of channels of features and convs from 256 (default) to 128 in +Neck and BBox Head, which can greatly decrease memory consumption without sacrificing much precision. + +## Citation + +```latex +@article{chen2020feature, + title={Feature pyramid grids}, + author={Chen, Kai and Cao, Yuhang and Loy, Chen Change and Lin, Dahua and Feichtenhofer, Christoph}, + journal={arXiv preprint arXiv:2004.03580}, + year={2020} +} +``` diff --git a/downstream/mmdetection/configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py b/downstream/mmdetection/configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py new file mode 100644 index 0000000..4535034 --- /dev/null +++ b/downstream/mmdetection/configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py @@ -0,0 +1,9 @@ +_base_ = 'faster_rcnn_r50_fpg_crop640_50e_coco.py' + +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + neck=dict(out_channels=128, inter_channels=128), + rpn_head=dict(in_channels=128), + roi_head=dict( + bbox_roi_extractor=dict(out_channels=128), + bbox_head=dict(in_channels=128))) diff --git a/downstream/mmdetection/configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py b/downstream/mmdetection/configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py new file mode 100644 index 0000000..3ab2a2c --- /dev/null +++ b/downstream/mmdetection/configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py @@ -0,0 +1,48 @@ +_base_ = 'faster_rcnn_r50_fpn_crop640_50e_coco.py' + +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + neck=dict( + type='FPG', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + inter_channels=256, + num_outs=5, + stack_times=9, + paths=['bu'] * 9, + same_down_trans=None, + same_up_trans=dict( + type='conv', + kernel_size=3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + across_lateral_trans=dict( + type='conv', + kernel_size=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + across_down_trans=dict( + type='interpolation_conv', + mode='nearest', + kernel_size=3, + norm_cfg=norm_cfg, + order=('act', 'conv', 'norm'), + inplace=False), + across_up_trans=None, + across_skip_trans=dict( + type='conv', + kernel_size=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + output_trans=dict( + type='last_conv', + kernel_size=3, + order=('act', 'conv', 'norm'), + inplace=False), + norm_cfg=norm_cfg, + skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) diff --git a/downstream/mmdetection/configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py b/downstream/mmdetection/configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py new file mode 100644 index 0000000..e4ec940 --- /dev/null +++ b/downstream/mmdetection/configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py @@ -0,0 +1,73 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + backbone=dict(norm_cfg=norm_cfg, norm_eval=False), + neck=dict(norm_cfg=norm_cfg), + roi_head=dict(bbox_head=dict(norm_cfg=norm_cfg))) +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=(640, 640), + ratio_range=(0.8, 1.2), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(640, 640)), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=(640, 640)), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(640, 640), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=64), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# learning policy +optimizer = dict( + type='SGD', + lr=0.08, + momentum=0.9, + weight_decay=0.0001, + paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.1, + step=[30, 40]) +# runtime settings +runner = dict(max_epochs=50) +evaluation = dict(interval=2) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py b/downstream/mmdetection/configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py new file mode 100644 index 0000000..baa4a5a --- /dev/null +++ b/downstream/mmdetection/configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py @@ -0,0 +1,10 @@ +_base_ = 'mask_rcnn_r50_fpg_crop640_50e_coco.py' + +model = dict( + neck=dict(out_channels=128, inter_channels=128), + rpn_head=dict(in_channels=128), + roi_head=dict( + bbox_roi_extractor=dict(out_channels=128), + bbox_head=dict(in_channels=128), + mask_roi_extractor=dict(out_channels=128), + mask_head=dict(in_channels=128))) diff --git a/downstream/mmdetection/configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py b/downstream/mmdetection/configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py new file mode 100644 index 0000000..3c9ea27 --- /dev/null +++ b/downstream/mmdetection/configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py @@ -0,0 +1,48 @@ +_base_ = 'mask_rcnn_r50_fpn_crop640_50e_coco.py' + +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + neck=dict( + type='FPG', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + inter_channels=256, + num_outs=5, + stack_times=9, + paths=['bu'] * 9, + same_down_trans=None, + same_up_trans=dict( + type='conv', + kernel_size=3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + across_lateral_trans=dict( + type='conv', + kernel_size=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + across_down_trans=dict( + type='interpolation_conv', + mode='nearest', + kernel_size=3, + norm_cfg=norm_cfg, + order=('act', 'conv', 'norm'), + inplace=False), + across_up_trans=None, + across_skip_trans=dict( + type='conv', + kernel_size=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + output_trans=dict( + type='last_conv', + kernel_size=3, + order=('act', 'conv', 'norm'), + inplace=False), + norm_cfg=norm_cfg, + skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) diff --git a/downstream/mmdetection/configs/fpg/mask_rcnn_r50_fpn_crop640_50e_coco.py b/downstream/mmdetection/configs/fpg/mask_rcnn_r50_fpn_crop640_50e_coco.py new file mode 100644 index 0000000..c6bcc24 --- /dev/null +++ b/downstream/mmdetection/configs/fpg/mask_rcnn_r50_fpn_crop640_50e_coco.py @@ -0,0 +1,79 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + backbone=dict(norm_cfg=norm_cfg, norm_eval=False), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + norm_cfg=norm_cfg, + num_outs=5), + roi_head=dict( + bbox_head=dict(norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg))) +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=(640, 640), + ratio_range=(0.8, 1.2), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(640, 640)), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=(640, 640)), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(640, 640), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=64), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# learning policy +optimizer = dict( + type='SGD', + lr=0.08, + momentum=0.9, + weight_decay=0.0001, + paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.1, + step=[30, 40]) +# runtime settings +runner = dict(max_epochs=50) +evaluation = dict(interval=2) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/fpg/metafile.yml b/downstream/mmdetection/configs/fpg/metafile.yml new file mode 100644 index 0000000..6b0a6a7 --- /dev/null +++ b/downstream/mmdetection/configs/fpg/metafile.yml @@ -0,0 +1,104 @@ +Collections: + - Name: Feature Pyramid Grids + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Feature Pyramid Grids + Paper: + URL: https://arxiv.org/abs/2004.03580 + Title: 'Feature Pyramid Grids' + README: configs/fpg/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.10.0/mmdet/models/necks/fpg.py#L101 + Version: v2.10.0 + +Models: + - Name: faster_rcnn_r50_fpg_crop640_50e_coco + In Collection: Feature Pyramid Grids + Config: configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py + Metadata: + Training Memory (GB): 20.0 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg_crop640_50e_coco/faster_rcnn_r50_fpg_crop640_50e_coco_20220311_011856-74109f42.pth + + - Name: faster_rcnn_r50_fpg-chn128_crop640_50e_coco + In Collection: Feature Pyramid Grids + Config: configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py + Metadata: + Training Memory (GB): 11.9 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco/faster_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011857-9376aa9d.pth + + - Name: mask_rcnn_r50_fpg_crop640_50e_coco + In Collection: Feature Pyramid Grids + Config: configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py + Metadata: + Training Memory (GB): 23.2 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth + + - Name: mask_rcnn_r50_fpg-chn128_crop640_50e_coco + In Collection: Feature Pyramid Grids + Config: configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py + Metadata: + Training Memory (GB): 15.3 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco/mask_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011859-043c9b4e.pth + + - Name: retinanet_r50_fpg_crop640_50e_coco + In Collection: Feature Pyramid Grids + Config: configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py + Metadata: + Training Memory (GB): 20.8 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg_crop640_50e_coco/retinanet_r50_fpg_crop640_50e_coco_20220311_110809-b0bcf5f4.pth + + - Name: retinanet_r50_fpg-chn128_crop640_50e_coco + In Collection: Feature Pyramid Grids + Config: configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py + Metadata: + Training Memory (GB): 19.9 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco/retinanet_r50_fpg-chn128_crop640_50e_coco_20220313_104829-ee99a686.pth diff --git a/downstream/mmdetection/configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py b/downstream/mmdetection/configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py new file mode 100644 index 0000000..9a6cf7e --- /dev/null +++ b/downstream/mmdetection/configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py @@ -0,0 +1,5 @@ +_base_ = 'retinanet_r50_fpg_crop640_50e_coco.py' + +model = dict( + neck=dict(out_channels=128, inter_channels=128), + bbox_head=dict(in_channels=128)) diff --git a/downstream/mmdetection/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py b/downstream/mmdetection/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py new file mode 100644 index 0000000..504ed5e --- /dev/null +++ b/downstream/mmdetection/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py @@ -0,0 +1,53 @@ +_base_ = '../nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py' + +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + neck=dict( + _delete_=True, + type='FPG', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + inter_channels=256, + num_outs=5, + add_extra_convs=True, + start_level=1, + stack_times=9, + paths=['bu'] * 9, + same_down_trans=None, + same_up_trans=dict( + type='conv', + kernel_size=3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + across_lateral_trans=dict( + type='conv', + kernel_size=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + across_down_trans=dict( + type='interpolation_conv', + mode='nearest', + kernel_size=3, + norm_cfg=norm_cfg, + order=('act', 'conv', 'norm'), + inplace=False), + across_up_trans=None, + across_skip_trans=dict( + type='conv', + kernel_size=1, + norm_cfg=norm_cfg, + inplace=False, + order=('act', 'conv', 'norm')), + output_trans=dict( + type='last_conv', + kernel_size=3, + order=('act', 'conv', 'norm'), + inplace=False), + norm_cfg=norm_cfg, + skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) + +evaluation = dict(interval=2) diff --git a/downstream/mmdetection/configs/free_anchor/README.md b/downstream/mmdetection/configs/free_anchor/README.md new file mode 100644 index 0000000..d24c340 --- /dev/null +++ b/downstream/mmdetection/configs/free_anchor/README.md @@ -0,0 +1,37 @@ +# FreeAnchor + +> [FreeAnchor: Learning to Match Anchors for Visual Object Detection](https://arxiv.org/abs/1909.02466) + + + +## Abstract + +Modern CNN-based object detectors assign anchors for ground-truth objects under the restriction of object-anchor Intersection-over-Unit (IoU). In this study, we propose a learning-to-match approach to break IoU restriction, allowing objects to match anchors in a flexible manner. Our approach, referred to as FreeAnchor, updates hand-crafted anchor assignment to "free" anchor matching by formulating detector training as a maximum likelihood estimation (MLE) procedure. FreeAnchor targets at learning features which best explain a class of objects in terms of both classification and localization. FreeAnchor is implemented by optimizing detection customized likelihood and can be fused with CNN-based detectors in a plug-and-play manner. Experiments on COCO demonstrate that FreeAnchor consistently outperforms their counterparts with significant margins. + +
    + +
    + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | 1x | 4.9 | 18.4 | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130_095625.log.json) | +| R-101 | pytorch | 1x | 6.8 | 14.9 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco/retinanet_free_anchor_r101_fpn_1x_coco_20200130-358324e6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco/retinanet_free_anchor_r101_fpn_1x_coco_20200130_100723.log.json) | +| X-101-32x4d | pytorch | 1x | 8.1 | 11.1 | 41.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco/retinanet_free_anchor_x101_32x4d_fpn_1x_coco_20200130-d4846968.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco/retinanet_free_anchor_x101_32x4d_fpn_1x_coco_20200130_095627.log.json) | + +**Notes:** + +- We use 8 GPUs with 2 images/GPU. +- For more settings and models, please refer to the [official repo](https://github.com/zhangxiaosong18/FreeAnchor). + +## Citation + +```latex +@inproceedings{zhang2019freeanchor, + title = {{FreeAnchor}: Learning to Match Anchors for Visual Object Detection}, + author = {Zhang, Xiaosong and Wan, Fang and Liu, Chang and Ji, Rongrong and Ye, Qixiang}, + booktitle = {Neural Information Processing Systems}, + year = {2019} +} +``` diff --git a/downstream/mmdetection/configs/free_anchor/metafile.yml b/downstream/mmdetection/configs/free_anchor/metafile.yml new file mode 100644 index 0000000..170fb5c --- /dev/null +++ b/downstream/mmdetection/configs/free_anchor/metafile.yml @@ -0,0 +1,79 @@ +Collections: + - Name: FreeAnchor + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FreeAnchor + - ResNet + Paper: + URL: https://arxiv.org/abs/1909.02466 + Title: 'FreeAnchor: Learning to Match Anchors for Visual Object Detection' + README: configs/free_anchor/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/dense_heads/free_anchor_retina_head.py#L10 + Version: v2.0.0 + +Models: + - Name: retinanet_free_anchor_r50_fpn_1x_coco + In Collection: FreeAnchor + Config: configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.9 + inference time (ms/im): + - value: 54.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth + + - Name: retinanet_free_anchor_r101_fpn_1x_coco + In Collection: FreeAnchor + Config: configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.8 + inference time (ms/im): + - value: 67.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco/retinanet_free_anchor_r101_fpn_1x_coco_20200130-358324e6.pth + + - Name: retinanet_free_anchor_x101_32x4d_fpn_1x_coco + In Collection: FreeAnchor + Config: configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.1 + inference time (ms/im): + - value: 90.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco/retinanet_free_anchor_x101_32x4d_fpn_1x_coco_20200130-d4846968.pth diff --git a/downstream/mmdetection/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py new file mode 100644 index 0000000..f4aea53 --- /dev/null +++ b/downstream/mmdetection/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py new file mode 100644 index 0000000..28f983c --- /dev/null +++ b/downstream/mmdetection/configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py @@ -0,0 +1,22 @@ +_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' +model = dict( + bbox_head=dict( + _delete_=True, + type='FreeAnchorRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.75))) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..65f8a9e --- /dev/null +++ b/downstream/mmdetection/configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/fsaf/README.md b/downstream/mmdetection/configs/fsaf/README.md new file mode 100644 index 0000000..4392a6e --- /dev/null +++ b/downstream/mmdetection/configs/fsaf/README.md @@ -0,0 +1,57 @@ +# FSAF + +> [Feature Selective Anchor-Free Module for Single-Shot Object Detection](https://arxiv.org/abs/1903.00621) + + + +## Abstract + +We motivate and present feature selective anchor-free (FSAF) module, a simple and effective building block for single-shot object detectors. It can be plugged into single-shot detectors with feature pyramid structure. The FSAF module addresses two limitations brought up by the conventional anchor-based detection: 1) heuristic-guided feature selection; 2) overlap-based anchor sampling. The general concept of the FSAF module is online feature selection applied to the training of multi-level anchor-free branches. Specifically, an anchor-free branch is attached to each level of the feature pyramid, allowing box encoding and decoding in the anchor-free manner at an arbitrary level. During training, we dynamically assign each instance to the most suitable feature level. At the time of inference, the FSAF module can work jointly with anchor-based branches by outputting predictions in parallel. We instantiate this concept with simple implementations of anchor-free branches and online feature selection strategy. Experimental results on the COCO detection track show that our FSAF module performs better than anchor-based counterparts while being faster. When working jointly with anchor-based branches, the FSAF module robustly improves the baseline RetinaNet by a large margin under various settings, while introducing nearly free inference overhead. And the resulting best model can achieve a state-of-the-art 44.6% mAP, outperforming all existing single-shot detectors on COCO. + +
    + +
    + +## Introduction + +FSAF is an anchor-free method published in CVPR2019 ([https://arxiv.org/pdf/1903.00621.pdf](https://arxiv.org/pdf/1903.00621.pdf)). +Actually it is equivalent to the anchor-based method with only one anchor at each feature map position in each FPN level. +And this is how we implemented it. +Only the anchor-free branch is released for its better compatibility with the current framework and less computational budget. + +In the original paper, feature maps within the central 0.2-0.5 area of a gt box are tagged as ignored. However, +it is empirically found that a hard threshold (0.2-0.2) gives a further gain on the performance. (see the table below) + +## Results and Models + +### Results on R50/R101/X101-FPN + +| Backbone | ignore range | ms-train | Lr schd | Train Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Config | Download | +| :------: | :----------: | :------: | :-----: | :------------: | :-----------------: | :------------: | :---------: | :---------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | 0.2-0.5 | N | 1x | 3.15 | 0.43 | 12.3 | 36.0 (35.9) | | [model](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco_20200715-b555b0e0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco_20200715_094657.log.json) | +| R-50 | 0.2-0.2 | N | 1x | 3.15 | 0.43 | 13.0 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fsaf/fsaf_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco-94ccc51f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco_20200428_072327.log.json) | +| R-101 | 0.2-0.2 | N | 1x | 5.08 | 0.58 | 10.8 | 39.3 (37.9) | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fsaf/fsaf_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r101_fpn_1x_coco/fsaf_r101_fpn_1x_coco-9e71098f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r101_fpn_1x_coco/fsaf_r101_fpn_1x_coco_20200428_160348.log.json) | +| X-101 | 0.2-0.2 | N | 1x | 9.38 | 1.23 | 5.6 | 42.4 (41.0) | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_x101_64x4d_fpn_1x_coco/fsaf_x101_64x4d_fpn_1x_coco-e3f6e6fd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_x101_64x4d_fpn_1x_coco/fsaf_x101_64x4d_fpn_1x_coco_20200428_160424.log.json) | + +**Notes:** + +- *1x means the model is trained for 12 epochs.* +- *AP values in the brackets represent those reported in the original paper.* +- *All results are obtained with a single model and single-scale test.* +- *X-101 backbone represents ResNext-101-64x4d.* +- *All pretrained backbones use pytorch style.* +- *All models are trained on 8 Titan-XP gpus and tested on a single gpu.* + +## Citation + +BibTeX reference is as follows. + +```latex +@inproceedings{zhu2019feature, + title={Feature Selective Anchor-Free Module for Single-Shot Object Detection}, + author={Zhu, Chenchen and He, Yihui and Savvides, Marios}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={840--849}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/fsaf/fsaf_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/fsaf/fsaf_r101_fpn_1x_coco.py new file mode 100644 index 0000000..12b49fe --- /dev/null +++ b/downstream/mmdetection/configs/fsaf/fsaf_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './fsaf_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/fsaf/fsaf_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/fsaf/fsaf_r50_fpn_1x_coco.py new file mode 100644 index 0000000..67f3ec1 --- /dev/null +++ b/downstream/mmdetection/configs/fsaf/fsaf_r50_fpn_1x_coco.py @@ -0,0 +1,48 @@ +_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' +# model settings +model = dict( + type='FSAF', + bbox_head=dict( + type='FSAFHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + reg_decoded_bbox=True, + # Only anchor-free branch is implemented. The anchor generator only + # generates 1 anchor at each feature point, as a substitute of the + # grid of features. + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=1, + scales_per_octave=1, + ratios=[1.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict(_delete_=True, type='TBLRBBoxCoder', normalizer=4.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0, + reduction='none'), + loss_bbox=dict( + _delete_=True, + type='IoULoss', + eps=1e-6, + loss_weight=1.0, + reduction='none')), + # training and testing settings + train_cfg=dict( + assigner=dict( + _delete_=True, + type='CenterRegionAssigner', + pos_scale=0.2, + neg_scale=0.2, + min_pos_iof=0.01), + allowed_border=-1, + pos_weight=-1, + debug=False)) +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=10, norm_type=2)) diff --git a/downstream/mmdetection/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py new file mode 100644 index 0000000..89c0c63 --- /dev/null +++ b/downstream/mmdetection/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './fsaf_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/fsaf/metafile.yml b/downstream/mmdetection/configs/fsaf/metafile.yml new file mode 100644 index 0000000..5434e9a --- /dev/null +++ b/downstream/mmdetection/configs/fsaf/metafile.yml @@ -0,0 +1,80 @@ +Collections: + - Name: FSAF + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x Titan-XP GPUs + Architecture: + - FPN + - FSAF + - ResNet + Paper: + URL: https://arxiv.org/abs/1903.00621 + Title: 'Feature Selective Anchor-Free Module for Single-Shot Object Detection' + README: configs/fsaf/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/detectors/fsaf.py#L6 + Version: v2.1.0 + +Models: + - Name: fsaf_r50_fpn_1x_coco + In Collection: FSAF + Config: configs/fsaf/fsaf_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.15 + inference time (ms/im): + - value: 76.92 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco-94ccc51f.pth + + - Name: fsaf_r101_fpn_1x_coco + In Collection: FSAF + Config: configs/fsaf/fsaf_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.08 + inference time (ms/im): + - value: 92.59 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.3 (37.9) + Weights: https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r101_fpn_1x_coco/fsaf_r101_fpn_1x_coco-9e71098f.pth + + - Name: fsaf_x101_64x4d_fpn_1x_coco + In Collection: FSAF + Config: configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 9.38 + inference time (ms/im): + - value: 178.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.4 (41.0) + Weights: https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_x101_64x4d_fpn_1x_coco/fsaf_x101_64x4d_fpn_1x_coco-e3f6e6fd.pth diff --git a/downstream/mmdetection/configs/gcnet/README.md b/downstream/mmdetection/configs/gcnet/README.md new file mode 100644 index 0000000..403e086 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/README.md @@ -0,0 +1,69 @@ +# GCNet + +> [GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond](https://arxiv.org/abs/1904.11492) + + + +## Abstract + +The Non-Local Network (NLNet) presents a pioneering approach for capturing long-range dependencies, via aggregating query-specific global context to each query position. However, through a rigorous empirical analysis, we have found that the global contexts modeled by non-local network are almost the same for different query positions within an image. In this paper, we take advantage of this finding to create a simplified network based on a query-independent formulation, which maintains the accuracy of NLNet but with significantly less computation. We further observe that this simplified design shares similar structure with Squeeze-Excitation Network (SENet). Hence we unify them into a three-step general framework for global context modeling. Within the general framework, we design a better instantiation, called the global context (GC) block, which is lightweight and can effectively model the global context. The lightweight property allows us to apply it for multiple layers in a backbone network to construct a global context network (GCNet), which generally outperforms both simplified NLNet and SENet on major benchmarks for various recognition tasks. + +
    + +
    + +## Introduction + +By [Yue Cao](http://yue-cao.me), [Jiarui Xu](http://jerryxu.net), [Stephen Lin](https://scholar.google.com/citations?user=c3PYmxUAAAAJ&hl=en), Fangyun Wei, [Han Hu](https://sites.google.com/site/hanhushomepage/). + +We provide config files to reproduce the results in the paper for +["GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond"](https://arxiv.org/abs/1904.11492) on COCO object detection. + +**GCNet** is initially described in [arxiv](https://arxiv.org/abs/1904.11492). Via absorbing advantages of Non-Local Networks (NLNet) and Squeeze-Excitation Networks (SENet), GCNet provides a simple, fast and effective approach for global context modeling, which generally outperforms both NLNet and SENet on major benchmarks for various recognition tasks. + +## Results and Models + +The results on COCO 2017val are shown in the below table. + +| Backbone | Model | Context | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------: | :---: | :------------: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | Mask | GC(c3-c5, r16) | 1x | 5.0 | | 39.7 | 35.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco_20200515_211915-187da160.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco_20200515_211915.log.json) | +| R-50-FPN | Mask | GC(c3-c5, r4) | 1x | 5.1 | 15.0 | 39.9 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco_20200204-17235656.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco_20200204_024626.log.json) | +| R-101-FPN | Mask | GC(c3-c5, r16) | 1x | 7.6 | 11.4 | 41.3 | 37.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco_20200205-e58ae947.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco_20200205_192835.log.json) | +| R-101-FPN | Mask | GC(c3-c5, r4) | 1x | 7.8 | 11.6 | 42.2 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco_20200206-af22dc9d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco_20200206_112128.log.json) | + +| Backbone | Model | Context | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------: | :--------------: | :------------: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | Mask | - | 1x | 4.4 | 16.6 | 38.4 | 34.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco_20200202-bb3eb55c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco_20200202_214122.log.json) | +| R-50-FPN | Mask | GC(c3-c5, r16) | 1x | 5.0 | 15.5 | 40.4 | 36.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202_174907.log.json) | +| R-50-FPN | Mask | GC(c3-c5, r4) | 1x | 5.1 | 15.1 | 40.7 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202_085547.log.json) | +| R-101-FPN | Mask | - | 1x | 6.4 | 13.3 | 40.5 | 36.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco_20200210-81658c8a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco_20200210_220422.log.json) | +| R-101-FPN | Mask | GC(c3-c5, r16) | 1x | 7.6 | 12.0 | 42.2 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200207-945e77ca.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200207_015330.log.json) | +| R-101-FPN | Mask | GC(c3-c5, r4) | 1x | 7.8 | 11.8 | 42.2 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206_142508.log.json) | +| X-101-FPN | Mask | - | 1x | 7.6 | 11.3 | 42.4 | 37.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200211-7584841c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200211_054326.log.json) | +| X-101-FPN | Mask | GC(c3-c5, r16) | 1x | 8.8 | 9.8 | 43.5 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-cbed3d2c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211_164715.log.json) | +| X-101-FPN | Mask | GC(c3-c5, r4) | 1x | 9.0 | 9.7 | 43.9 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200212-68164964.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200212_070942.log.json) | +| X-101-FPN | Cascade Mask | - | 1x | 9.2 | 8.4 | 44.7 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200310-d5ad2a5e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200310_115217.log.json) | +| X-101-FPN | Cascade Mask | GC(c3-c5, r16) | 1x | 10.3 | 7.7 | 46.2 | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-10bf2463.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211_184154.log.json) | +| X-101-FPN | Cascade Mask | GC(c3-c5, r4) | 1x | 10.6 | | 46.4 | 40.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200703_180653-ed035291.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200703_180653.log.json) | +| X-101-FPN | DCN Cascade Mask | - | 1x | | | 47.5 | 40.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco_20210615_211019-abbc39ea.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco_20210615_211019.log.json) | +| X-101-FPN | DCN Cascade Mask | GC(c3-c5, r16) | 1x | | | 48.0 | 41.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco_20210615_215648-44aa598a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco_20210615_215648.log.json) | +| X-101-FPN | DCN Cascade Mask | GC(c3-c5, r4) | 1x | | | 47.9 | 41.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco_20210615_161851-720338ec.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco_20210615_161851.log.json) | + +**Notes:** + +- The `SyncBN` is added in the backbone for all models in **Table 2**. +- `GC` denotes Global Context (GC) block is inserted after 1x1 conv of backbone. +- `DCN` denotes replace 3x3 conv with 3x3 Deformable Convolution in `c3-c5` stages of backbone. +- `r4` and `r16` denote ratio 4 and ratio 16 in GC block respectively. + +## Citation + +```latex +@article{cao2019GCNet, + title={GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond}, + author={Cao, Yue and Xu, Jiarui and Lin, Stephen and Wei, Fangyun and Hu, Han}, + journal={arXiv preprint arXiv:1904.11492}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py b/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py new file mode 100644 index 0000000..5118895 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py @@ -0,0 +1,4 @@ +_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..413499d --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py @@ -0,0 +1,4 @@ +_base_ = '../dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..50689aa --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..1367231 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..50883ff --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..31fdd07 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..ad6ad47 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict(plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..29f9167 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict(plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py b/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py new file mode 100644 index 0000000..6e1c5d0 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py @@ -0,0 +1,4 @@ +_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..781dba7 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..32972de --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..d299b69 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict(plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..5ac908e --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict(plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py b/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py new file mode 100644 index 0000000..0308a56 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py @@ -0,0 +1,4 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..e04780c --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..980f819 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py b/downstream/mmdetection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py new file mode 100644 index 0000000..f0c96e5 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py @@ -0,0 +1,4 @@ +_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/downstream/mmdetection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..7fb8e82 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 16), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py b/downstream/mmdetection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py new file mode 100644 index 0000000..b1ddbee --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + plugins=[ + dict( + cfg=dict(type='ContextBlock', ratio=1. / 4), + stages=(False, True, True, True), + position='after_conv3') + ])) diff --git a/downstream/mmdetection/configs/gcnet/metafile.yml b/downstream/mmdetection/configs/gcnet/metafile.yml new file mode 100644 index 0000000..1281122 --- /dev/null +++ b/downstream/mmdetection/configs/gcnet/metafile.yml @@ -0,0 +1,440 @@ +Collections: + - Name: GCNet + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Global Context Block + - FPN + - RPN + - ResNet + - ResNeXt + Paper: + URL: https://arxiv.org/abs/1904.11492 + Title: 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond' + README: configs/gcnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/context_block.py#L13 + Version: v2.0.0 + +Models: + - Name: mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 5.0 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco_20200515_211915-187da160.pth + + - Name: mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 5.1 + inference time (ms/im): + - value: 66.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco_20200204-17235656.pth + + - Name: mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 7.6 + inference time (ms/im): + - value: 87.72 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco_20200205-e58ae947.pth + + - Name: mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 7.8 + inference time (ms/im): + - value: 86.21 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco_20200206-af22dc9d.pth + + - Name: mask_rcnn_r50_fpn_syncbn-backbone_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py + Metadata: + Training Memory (GB): 4.4 + inference time (ms/im): + - value: 60.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco_20200202-bb3eb55c.pth + + - Name: mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 5.0 + inference time (ms/im): + - value: 64.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth + + - Name: mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 5.1 + inference time (ms/im): + - value: 66.23 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth + + - Name: mask_rcnn_r101_fpn_syncbn-backbone_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py + Metadata: + Training Memory (GB): 6.4 + inference time (ms/im): + - value: 75.19 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco_20200210-81658c8a.pth + + - Name: mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 7.6 + inference time (ms/im): + - value: 83.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200207-945e77ca.pth + + - Name: mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 7.8 + inference time (ms/im): + - value: 84.75 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth + + - Name: mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py + Metadata: + Training Memory (GB): 7.6 + inference time (ms/im): + - value: 88.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200211-7584841c.pth + + - Name: mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 8.8 + inference time (ms/im): + - value: 102.04 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-cbed3d2c.pth + + - Name: mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 9.0 + inference time (ms/im): + - value: 103.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200212-68164964.pth + + - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco + In Collection: GCNet + Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py + Metadata: + Training Memory (GB): 9.2 + inference time (ms/im): + - value: 119.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200310-d5ad2a5e.pth + + - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 10.3 + inference time (ms/im): + - value: 129.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-10bf2463.pth + + - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 10.6 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200703_180653-ed035291.pth + + - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco_20210615_211019-abbc39ea.pth + + - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco_20210615_215648-44aa598a.pth + + - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco + In Collection: GCNet + Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco_20210615_161851-720338ec.pth diff --git a/downstream/mmdetection/configs/gfl/README.md b/downstream/mmdetection/configs/gfl/README.md new file mode 100644 index 0000000..703936b --- /dev/null +++ b/downstream/mmdetection/configs/gfl/README.md @@ -0,0 +1,42 @@ +# GFL + +> [Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection](https://arxiv.org/abs/2006.04388) + + + +## Abstract + +One-stage detector basically formulates object detection as dense classification and localization. The classification is usually optimized by Focal Loss and the box location is commonly learned under Dirac delta distribution. A recent trend for one-stage detectors is to introduce an individual prediction branch to estimate the quality of localization, where the predicted quality facilitates the classification to improve detection performance. This paper delves into the representations of the above three fundamental elements: quality estimation, classification and localization. Two problems are discovered in existing practices, including (1) the inconsistent usage of the quality estimation and classification between training and inference and (2) the inflexible Dirac delta distribution for localization when there is ambiguity and uncertainty in complex scenes. To address the problems, we design new representations for these elements. Specifically, we merge the quality estimation into the class prediction vector to form a joint representation of localization quality and classification, and use a vector to represent arbitrary distribution of box locations. The improved representations eliminate the inconsistency risk and accurately depict the flexible distribution in real data, but contain continuous labels, which is beyond the scope of Focal Loss. We then propose Generalized Focal Loss (GFL) that generalizes Focal Loss from its discrete form to the continuous version for successful optimization. On COCO test-dev, GFL achieves 45.0% AP using ResNet-101 backbone, surpassing state-of-the-art SAPD (43.5%) and ATSS (43.6%) with higher or comparable inference speed, under the same backbone and training settings. Notably, our best model can achieve a single-model single-scale AP of 48.2%, at 10 FPS on a single 2080Ti GPU. + +
    + +
    + +## Results and Models + +| Backbone | Style | Lr schd | Multi-scale Training | Inf time (fps) | box AP | Config | Download | +| :---------------: | :-----: | :-----: | :------------------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | 1x | No | 19.5 | 40.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244.log.json) | +| R-50 | pytorch | 2x | Yes | 19.5 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802-37bb1edc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802.log.json) | +| R-101 | pytorch | 2x | Yes | 14.7 | 44.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126.log.json) | +| R-101-dcnv2 | pytorch | 2x | Yes | 12.9 | 47.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002.log.json) | +| X-101-32x4d | pytorch | 2x | Yes | 12.1 | 45.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002-50c1ffdb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002.log.json) | +| X-101-32x4d-dcnv2 | pytorch | 2x | Yes | 10.7 | 48.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002-14a2bf25.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002.log.json) | + +\[1\] *1x and 2x mean the model is trained for 90K and 180K iterations, respectively.* \ +\[2\] *All results are obtained with a single model and without any test time data augmentation such as multi-scale, flipping and etc..* \ +\[3\] *`dcnv2` denotes deformable convolutional networks v2.* \ +\[4\] *FPS is tested with a single GeForce RTX 2080Ti GPU, using a batch size of 1.* + +## Citation + +We provide config files to reproduce the object detection results in the paper [Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection](https://arxiv.org/abs/2006.04388) + +```latex +@article{li2020generalized, + title={Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection}, + author={Li, Xiang and Wang, Wenhai and Wu, Lijun and Chen, Shuo and Hu, Xiaolin and Li, Jun and Tang, Jinhui and Yang, Jian}, + journal={arXiv preprint arXiv:2006.04388}, + year={2020} +} +``` diff --git a/downstream/mmdetection/configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py b/downstream/mmdetection/configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py new file mode 100644 index 0000000..b72c2b6 --- /dev/null +++ b/downstream/mmdetection/configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py @@ -0,0 +1,15 @@ +_base_ = './gfl_r50_fpn_mstrain_2x_coco.py' +model = dict( + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py b/downstream/mmdetection/configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..e33b5c0 --- /dev/null +++ b/downstream/mmdetection/configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py @@ -0,0 +1,13 @@ +_base_ = './gfl_r50_fpn_mstrain_2x_coco.py' +model = dict( + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/gfl/gfl_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/gfl/gfl_r50_fpn_1x_coco.py new file mode 100644 index 0000000..cfd4b02 --- /dev/null +++ b/downstream/mmdetection/configs/gfl/gfl_r50_fpn_1x_coco.py @@ -0,0 +1,57 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='GFL', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='GFLHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + loss_cls=dict( + type='QualityFocalLoss', + use_sigmoid=True, + beta=2.0, + loss_weight=1.0), + loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), + reg_max=16, + loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py b/downstream/mmdetection/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..b8be601 --- /dev/null +++ b/downstream/mmdetection/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py @@ -0,0 +1,22 @@ +_base_ = './gfl_r50_fpn_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) +# multi-scale training +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 480), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict(train=dict(pipeline=train_pipeline)) diff --git a/downstream/mmdetection/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py b/downstream/mmdetection/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py new file mode 100644 index 0000000..2539807 --- /dev/null +++ b/downstream/mmdetection/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py @@ -0,0 +1,18 @@ +_base_ = './gfl_r50_fpn_mstrain_2x_coco.py' +model = dict( + type='GFL', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py b/downstream/mmdetection/configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..effda19 --- /dev/null +++ b/downstream/mmdetection/configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './gfl_r50_fpn_mstrain_2x_coco.py' +model = dict( + type='GFL', + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/gfl/metafile.yml b/downstream/mmdetection/configs/gfl/metafile.yml new file mode 100644 index 0000000..8f049c6 --- /dev/null +++ b/downstream/mmdetection/configs/gfl/metafile.yml @@ -0,0 +1,134 @@ +Collections: + - Name: Generalized Focal Loss + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Generalized Focal Loss + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/2006.04388 + Title: 'Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection' + README: configs/gfl/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/detectors/gfl.py#L6 + Version: v2.2.0 + +Models: + - Name: gfl_r50_fpn_1x_coco + In Collection: Generalized Focal Loss + Config: configs/gfl/gfl_r50_fpn_1x_coco.py + Metadata: + inference time (ms/im): + - value: 51.28 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth + + - Name: gfl_r50_fpn_mstrain_2x_coco + In Collection: Generalized Focal Loss + Config: configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py + Metadata: + inference time (ms/im): + - value: 51.28 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802-37bb1edc.pth + + - Name: gfl_r101_fpn_mstrain_2x_coco + In Collection: Generalized Focal Loss + Config: configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py + Metadata: + inference time (ms/im): + - value: 68.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth + + - Name: gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco + In Collection: Generalized Focal Loss + Config: configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py + Metadata: + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth + + - Name: gfl_x101_32x4d_fpn_mstrain_2x_coco + In Collection: Generalized Focal Loss + Config: configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py + Metadata: + inference time (ms/im): + - value: 82.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002-50c1ffdb.pth + + - Name: gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco + In Collection: Generalized Focal Loss + Config: configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py + Metadata: + inference time (ms/im): + - value: 93.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002-14a2bf25.pth diff --git a/downstream/mmdetection/configs/ghm/README.md b/downstream/mmdetection/configs/ghm/README.md new file mode 100644 index 0000000..cf9fb73 --- /dev/null +++ b/downstream/mmdetection/configs/ghm/README.md @@ -0,0 +1,33 @@ +# GHM + +> [Gradient Harmonized Single-stage Detector](https://arxiv.org/abs/1811.05181) + + + +## Abstract + +Despite the great success of two-stage detectors, single-stage detector is still a more elegant and efficient way, yet suffers from the two well-known disharmonies during training, i.e. the huge difference in quantity between positive and negative examples as well as between easy and hard examples. In this work, we first point out that the essential effect of the two disharmonies can be summarized in term of the gradient. Further, we propose a novel gradient harmonizing mechanism (GHM) to be a hedging for the disharmonies. The philosophy behind GHM can be easily embedded into both classification loss function like cross-entropy (CE) and regression loss function like smooth-L1 (SL1) loss. To this end, two novel loss functions called GHM-C and GHM-R are designed to balancing the gradient flow for anchor classification and bounding box refinement, respectively. Ablation study on MS COCO demonstrates that without laborious hyper-parameter tuning, both GHM-C and GHM-R can bring substantial improvement for single-stage detector. Without any whistles and bells, our model achieves 41.6 mAP on COCO test-dev set which surpasses the state-of-the-art method, Focal Loss (FL) + SL1, by 0.8. + +
    + +
    + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 4.0 | 3.3 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130_004213.log.json) | +| R-101-FPN | pytorch | 1x | 6.0 | 4.4 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r101_fpn_1x_coco/retinanet_ghm_r101_fpn_1x_coco_20200130-c148ee8f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r101_fpn_1x_coco/retinanet_ghm_r101_fpn_1x_coco_20200130_145259.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 7.2 | 5.1 | 40.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco/retinanet_ghm_x101_32x4d_fpn_1x_coco_20200131-e4333bd0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco/retinanet_ghm_x101_32x4d_fpn_1x_coco_20200131_113653.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 10.3 | 5.2 | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco/retinanet_ghm_x101_64x4d_fpn_1x_coco_20200131-dd381cef.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco/retinanet_ghm_x101_64x4d_fpn_1x_coco_20200131_113723.log.json) | + +## Citation + +```latex +@inproceedings{li2019gradient, + title={Gradient Harmonized Single-stage Detector}, + author={Li, Buyu and Liu, Yu and Wang, Xiaogang}, + booktitle={AAAI Conference on Artificial Intelligence}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/ghm/metafile.yml b/downstream/mmdetection/configs/ghm/metafile.yml new file mode 100644 index 0000000..b4f488c --- /dev/null +++ b/downstream/mmdetection/configs/ghm/metafile.yml @@ -0,0 +1,101 @@ +Collections: + - Name: GHM + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - GHM-C + - GHM-R + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1811.05181 + Title: 'Gradient Harmonized Single-stage Detector' + README: configs/ghm/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/losses/ghm_loss.py#L21 + Version: v2.0.0 + +Models: + - Name: retinanet_ghm_r50_fpn_1x_coco + In Collection: GHM + Config: configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 303.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth + + - Name: retinanet_ghm_r101_fpn_1x_coco + In Collection: GHM + Config: configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 227.27 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r101_fpn_1x_coco/retinanet_ghm_r101_fpn_1x_coco_20200130-c148ee8f.pth + + - Name: retinanet_ghm_x101_32x4d_fpn_1x_coco + In Collection: GHM + Config: configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.2 + inference time (ms/im): + - value: 196.08 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco/retinanet_ghm_x101_32x4d_fpn_1x_coco_20200131-e4333bd0.pth + + - Name: retinanet_ghm_x101_64x4d_fpn_1x_coco + In Collection: GHM + Config: configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.3 + inference time (ms/im): + - value: 192.31 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco/retinanet_ghm_x101_64x4d_fpn_1x_coco_20200131-dd381cef.pth diff --git a/downstream/mmdetection/configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py new file mode 100644 index 0000000..aaf6fc2 --- /dev/null +++ b/downstream/mmdetection/configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './retinanet_ghm_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py new file mode 100644 index 0000000..61b9751 --- /dev/null +++ b/downstream/mmdetection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py @@ -0,0 +1,19 @@ +_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' +model = dict( + bbox_head=dict( + loss_cls=dict( + _delete_=True, + type='GHMC', + bins=30, + momentum=0.75, + use_sigmoid=True, + loss_weight=1.0), + loss_bbox=dict( + _delete_=True, + type='GHMR', + mu=0.02, + bins=10, + momentum=0.7, + loss_weight=10.0))) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..cd2e4cc --- /dev/null +++ b/downstream/mmdetection/configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './retinanet_ghm_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py new file mode 100644 index 0000000..b6107d8 --- /dev/null +++ b/downstream/mmdetection/configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './retinanet_ghm_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/gn+ws/README.md b/downstream/mmdetection/configs/gn+ws/README.md new file mode 100644 index 0000000..184bed3 --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/README.md @@ -0,0 +1,54 @@ +# GN + WS + +> [Weight Standardization](https://arxiv.org/abs/1903.10520) + + + +## Abstract + +Batch Normalization (BN) has become an out-of-box technique to improve deep network training. However, its effectiveness is limited for micro-batch training, i.e., each GPU typically has only 1-2 images for training, which is inevitable for many computer vision tasks, e.g., object detection and semantic segmentation, constrained by memory consumption. To address this issue, we propose Weight Standardization (WS) and Batch-Channel Normalization (BCN) to bring two success factors of BN into micro-batch training: 1) the smoothing effects on the loss landscape and 2) the ability to avoid harmful elimination singularities along the training trajectory. WS standardizes the weights in convolutional layers to smooth the loss landscape by reducing the Lipschitz constants of the loss and the gradients; BCN combines batch and channel normalizations and leverages estimated statistics of the activations in convolutional layers to keep networks away from elimination singularities. We validate WS and BCN on comprehensive computer vision tasks, including image classification, object detection, instance segmentation, video recognition and semantic segmentation. All experimental results consistently show that WS and BCN improve micro-batch training significantly. Moreover, using WS and BCN with micro-batch training is even able to match or outperform the performances of BN with large-batch training. + +
    + +
    + +## Results and Models + +Faster R-CNN + +| Backbone | Style | Normalization | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----------: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | GN+WS | 1x | 5.9 | 11.7 | 39.7 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130_210936.log.json) | +| R-101-FPN | pytorch | GN+WS | 1x | 8.9 | 9.0 | 41.7 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco/faster_rcnn_r101_fpn_gn_ws-all_1x_coco_20200205-a93b0d75.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco/faster_rcnn_r101_fpn_gn_ws-all_1x_coco_20200205_232146.log.json) | +| X-50-32x4d-FPN | pytorch | GN+WS | 1x | 7.0 | 10.3 | 40.7 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco_20200203-839c5d9d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco_20200203_220113.log.json) | +| X-101-32x4d-FPN | pytorch | GN+WS | 1x | 10.8 | 7.6 | 42.1 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco_20200212-27da1bc2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco_20200212_195302.log.json) | + +Mask R-CNN + +| Backbone | Style | Normalization | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----------: | :-------: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | GN+WS | 2x | 7.3 | 10.5 | 40.6 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco/mask_rcnn_r50_fpn_gn_ws-all_2x_coco_20200226-16acb762.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco/mask_rcnn_r50_fpn_gn_ws-all_2x_coco_20200226_062128.log.json) | +| R-101-FPN | pytorch | GN+WS | 2x | 10.3 | 8.6 | 42.0 | 37.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco/mask_rcnn_r101_fpn_gn_ws-all_2x_coco_20200212-ea357cd9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco/mask_rcnn_r101_fpn_gn_ws-all_2x_coco_20200212_213627.log.json) | +| X-50-32x4d-FPN | pytorch | GN+WS | 2x | 8.4 | 9.3 | 41.1 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco_20200216-649fdb6f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco_20200216_201500.log.json) | +| X-101-32x4d-FPN | pytorch | GN+WS | 2x | 12.2 | 7.1 | 42.1 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco_20200319-33fb95b5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco_20200319_104101.log.json) | +| R-50-FPN | pytorch | GN+WS | 20-23-24e | 7.3 | - | 41.1 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco_20200213-487d1283.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco_20200213_035123.log.json) | +| R-101-FPN | pytorch | GN+WS | 20-23-24e | 10.3 | - | 43.1 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco_20200213-57b5a50f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco_20200213_130142.log.json) | +| X-50-32x4d-FPN | pytorch | GN+WS | 20-23-24e | 8.4 | - | 42.1 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200226-969bcb2c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200226_093732.log.json) | +| X-101-32x4d-FPN | pytorch | GN+WS | 20-23-24e | 12.2 | - | 42.7 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200316-e6cd35ef.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200316_013741.log.json) | + +Note: + +- GN+WS requires about 5% more memory than GN, and it is only 5% slower than GN. +- In the paper, a 20-23-24e lr schedule is used instead of 2x. +- The X-50-GN and X-101-GN pretrained models are also shared by the authors. + +## Citation + +```latex +@article{weightstandardization, + author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, + title = {Weight Standardization}, + journal = {arXiv preprint arXiv:1903.10520}, + year = {2019}, +} +``` diff --git a/downstream/mmdetection/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py b/downstream/mmdetection/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py new file mode 100644 index 0000000..cd2cb2b --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws'))) diff --git a/downstream/mmdetection/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py b/downstream/mmdetection/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py new file mode 100644 index 0000000..1b326b8 --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')), + neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg))) diff --git a/downstream/mmdetection/configs/gn+ws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py b/downstream/mmdetection/configs/gn+ws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py new file mode 100644 index 0000000..f64ae89 --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py @@ -0,0 +1,18 @@ +_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws'))) diff --git a/downstream/mmdetection/configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py b/downstream/mmdetection/configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py new file mode 100644 index 0000000..246851b --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py @@ -0,0 +1,18 @@ +_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + type='ResNeXt', + depth=50, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws'))) diff --git a/downstream/mmdetection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py b/downstream/mmdetection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py new file mode 100644 index 0000000..a790d93 --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py @@ -0,0 +1,4 @@ +_base_ = './mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py' +# learning policy +lr_config = dict(step=[20, 23]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py b/downstream/mmdetection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py new file mode 100644 index 0000000..a9fa6a2 --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws'))) diff --git a/downstream/mmdetection/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py b/downstream/mmdetection/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py new file mode 100644 index 0000000..5516808 --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py @@ -0,0 +1,4 @@ +_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' +# learning policy +lr_config = dict(step=[20, 23]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py b/downstream/mmdetection/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py new file mode 100644 index 0000000..63be60f --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py @@ -0,0 +1,20 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')), + neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + mask_head=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg))) +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py b/downstream/mmdetection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py new file mode 100644 index 0000000..cfa14c9 --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py @@ -0,0 +1,4 @@ +_base_ = './mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py' +# learning policy +lr_config = dict(step=[20, 23]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py b/downstream/mmdetection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py new file mode 100644 index 0000000..6498b03 --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py @@ -0,0 +1,19 @@ +_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' +# model settings +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws'))) diff --git a/downstream/mmdetection/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py b/downstream/mmdetection/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py new file mode 100644 index 0000000..79ce0ad --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py @@ -0,0 +1,4 @@ +_base_ = './mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py' +# learning policy +lr_config = dict(step=[20, 23]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py b/downstream/mmdetection/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py new file mode 100644 index 0000000..7fac317 --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py @@ -0,0 +1,19 @@ +_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' +# model settings +conv_cfg = dict(type='ConvWS') +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + type='ResNeXt', + depth=50, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws'))) diff --git a/downstream/mmdetection/configs/gn+ws/metafile.yml b/downstream/mmdetection/configs/gn+ws/metafile.yml new file mode 100644 index 0000000..bc89359 --- /dev/null +++ b/downstream/mmdetection/configs/gn+ws/metafile.yml @@ -0,0 +1,263 @@ +Collections: + - Name: Weight Standardization + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Group Normalization + - Weight Standardization + Paper: + URL: https://arxiv.org/abs/1903.10520 + Title: 'Weight Standardization' + README: configs/gn+ws/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py + Version: v2.0.0 + +Models: + - Name: faster_rcnn_r50_fpn_gn_ws-all_1x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py + Metadata: + Training Memory (GB): 5.9 + inference time (ms/im): + - value: 85.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth + + - Name: faster_rcnn_r101_fpn_gn_ws-all_1x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py + Metadata: + Training Memory (GB): 8.9 + inference time (ms/im): + - value: 111.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco/faster_rcnn_r101_fpn_gn_ws-all_1x_coco_20200205-a93b0d75.pth + + - Name: faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 97.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco_20200203-839c5d9d.pth + + - Name: faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py + Metadata: + Training Memory (GB): 10.8 + inference time (ms/im): + - value: 131.58 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco_20200212-27da1bc2.pth + + - Name: mask_rcnn_r50_fpn_gn_ws-all_2x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py + Metadata: + Training Memory (GB): 7.3 + inference time (ms/im): + - value: 95.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco/mask_rcnn_r50_fpn_gn_ws-all_2x_coco_20200226-16acb762.pth + + - Name: mask_rcnn_r101_fpn_gn_ws-all_2x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py + Metadata: + Training Memory (GB): 10.3 + inference time (ms/im): + - value: 116.28 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco/mask_rcnn_r101_fpn_gn_ws-all_2x_coco_20200212-ea357cd9.pth + + - Name: mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py + Metadata: + Training Memory (GB): 8.4 + inference time (ms/im): + - value: 107.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco_20200216-649fdb6f.pth + + - Name: mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py + Metadata: + Training Memory (GB): 12.2 + inference time (ms/im): + - value: 140.85 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco_20200319-33fb95b5.pth + + - Name: mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py + Metadata: + Training Memory (GB): 7.3 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco_20200213-487d1283.pth + + - Name: mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py + Metadata: + Training Memory (GB): 10.3 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco_20200213-57b5a50f.pth + + - Name: mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py + Metadata: + Training Memory (GB): 8.4 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200226-969bcb2c.pth + + - Name: mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco + In Collection: Weight Standardization + Config: configs/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py + Metadata: + Training Memory (GB): 12.2 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200316-e6cd35ef.pth diff --git a/downstream/mmdetection/configs/gn/README.md b/downstream/mmdetection/configs/gn/README.md new file mode 100644 index 0000000..9bb2888 --- /dev/null +++ b/downstream/mmdetection/configs/gn/README.md @@ -0,0 +1,41 @@ +# GN + +> [Group Normalization](https://arxiv.org/abs/1803.08494) + + + +## Abstract + +Batch Normalization (BN) is a milestone technique in the development of deep learning, enabling various networks to train. However, normalizing along the batch dimension introduces problems --- BN's error increases rapidly when the batch size becomes smaller, caused by inaccurate batch statistics estimation. This limits BN's usage for training larger models and transferring features to computer vision tasks including detection, segmentation, and video, which require small batches constrained by memory consumption. In this paper, we present Group Normalization (GN) as a simple alternative to BN. GN divides the channels into groups and computes within each group the mean and variance for normalization. GN's computation is independent of batch sizes, and its accuracy is stable in a wide range of batch sizes. On ResNet-50 trained in ImageNet, GN has 10.6% lower error than its BN counterpart when using a batch size of 2; when using typical batch sizes, GN is comparably good with BN and outperforms other normalization variants. Moreover, GN can be naturally transferred from pre-training to fine-tuning. GN can outperform its BN-based counterparts for object detection and segmentation in COCO, and for video classification in Kinetics, showing that GN can effectively replace the powerful BN in a variety of tasks. GN can be easily implemented by a few lines of code in modern libraries. + +
    + +
    + +## Results and Models + +| Backbone | model | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-----------: | :--------: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN (d) | Mask R-CNN | 2x | 7.1 | 11.0 | 40.2 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206_050355.log.json) | +| R-50-FPN (d) | Mask R-CNN | 3x | 7.1 | - | 40.5 | 36.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_3x_coco/mask_rcnn_r50_fpn_gn-all_3x_coco_20200214-8b23b1e5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_3x_coco/mask_rcnn_r50_fpn_gn-all_3x_coco_20200214_063512.log.json) | +| R-101-FPN (d) | Mask R-CNN | 2x | 9.9 | 9.0 | 41.9 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_2x_coco/mask_rcnn_r101_fpn_gn-all_2x_coco_20200205-d96b1b50.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_2x_coco/mask_rcnn_r101_fpn_gn-all_2x_coco_20200205_234402.log.json) | +| R-101-FPN (d) | Mask R-CNN | 3x | 9.9 | | 42.1 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_3x_coco/mask_rcnn_r101_fpn_gn-all_3x_coco_20200513_181609-0df864f4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_3x_coco/mask_rcnn_r101_fpn_gn-all_3x_coco_20200513_181609.log.json) | +| R-50-FPN (c) | Mask R-CNN | 2x | 7.1 | 10.9 | 40.0 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco_20200207-20d3e849.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco_20200207_225832.log.json) | +| R-50-FPN (c) | Mask R-CNN | 3x | 7.1 | - | 40.1 | 36.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco_20200225-542aefbc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco_20200225_235135.log.json) | + +**Notes:** + +- (d) means pretrained model converted from Detectron, and (c) means the contributed model pretrained by [@thangvubk](https://github.com/thangvubk). +- The `3x` schedule is epoch \[28, 34, 36\]. +- **Memory, Train/Inf time is outdated.** + +## Citation + +```latex +@inproceedings{wu2018group, + title={Group Normalization}, + author={Wu, Yuxin and He, Kaiming}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + year={2018} +} +``` diff --git a/downstream/mmdetection/configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py b/downstream/mmdetection/configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py new file mode 100644 index 0000000..a505ba0 --- /dev/null +++ b/downstream/mmdetection/configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py @@ -0,0 +1,7 @@ +_base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet101_gn'))) diff --git a/downstream/mmdetection/configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py b/downstream/mmdetection/configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py new file mode 100644 index 0000000..12a9d17 --- /dev/null +++ b/downstream/mmdetection/configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py @@ -0,0 +1,5 @@ +_base_ = './mask_rcnn_r101_fpn_gn-all_2x_coco.py' + +# learning policy +lr_config = dict(step=[28, 34]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py b/downstream/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py new file mode 100644 index 0000000..1de7d98 --- /dev/null +++ b/downstream/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py @@ -0,0 +1,49 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet50_gn')), + neck=dict(norm_cfg=norm_cfg), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=norm_cfg), + mask_head=dict(norm_cfg=norm_cfg))) +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py b/downstream/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py new file mode 100644 index 0000000..f917719 --- /dev/null +++ b/downstream/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py @@ -0,0 +1,5 @@ +_base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' + +# learning policy +lr_config = dict(step=[28, 34]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py b/downstream/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py new file mode 100644 index 0000000..2f430fd --- /dev/null +++ b/downstream/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py @@ -0,0 +1,17 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + norm_cfg=norm_cfg, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://contrib/resnet50_gn')), + neck=dict(norm_cfg=norm_cfg), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=norm_cfg), + mask_head=dict(norm_cfg=norm_cfg))) +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py b/downstream/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py new file mode 100644 index 0000000..66834f0 --- /dev/null +++ b/downstream/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py @@ -0,0 +1,5 @@ +_base_ = './mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py' + +# learning policy +lr_config = dict(step=[28, 34]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/gn/metafile.yml b/downstream/mmdetection/configs/gn/metafile.yml new file mode 100644 index 0000000..4a1ecae --- /dev/null +++ b/downstream/mmdetection/configs/gn/metafile.yml @@ -0,0 +1,162 @@ +Collections: + - Name: Group Normalization + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Group Normalization + Paper: + URL: https://arxiv.org/abs/1803.08494 + Title: 'Group Normalization' + README: configs/gn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py + Version: v2.0.0 + +Models: + - Name: mask_rcnn_r50_fpn_gn-all_2x_coco + In Collection: Group Normalization + Config: configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py + Metadata: + Training Memory (GB): 7.1 + inference time (ms/im): + - value: 90.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth + + - Name: mask_rcnn_r50_fpn_gn-all_3x_coco + In Collection: Group Normalization + Config: configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py + Metadata: + Training Memory (GB): 7.1 + inference time (ms/im): + - value: 90.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_3x_coco/mask_rcnn_r50_fpn_gn-all_3x_coco_20200214-8b23b1e5.pth + + - Name: mask_rcnn_r101_fpn_gn-all_2x_coco + In Collection: Group Normalization + Config: configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py + Metadata: + Training Memory (GB): 9.9 + inference time (ms/im): + - value: 111.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_2x_coco/mask_rcnn_r101_fpn_gn-all_2x_coco_20200205-d96b1b50.pth + + - Name: mask_rcnn_r101_fpn_gn-all_3x_coco + In Collection: Group Normalization + Config: configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py + Metadata: + Training Memory (GB): 9.9 + inference time (ms/im): + - value: 111.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_3x_coco/mask_rcnn_r101_fpn_gn-all_3x_coco_20200513_181609-0df864f4.pth + + - Name: mask_rcnn_r50_fpn_gn-all_contrib_2x_coco + In Collection: Group Normalization + Config: configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py + Metadata: + Training Memory (GB): 7.1 + inference time (ms/im): + - value: 91.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco_20200207-20d3e849.pth + + - Name: mask_rcnn_r50_fpn_gn-all_contrib_3x_coco + In Collection: Group Normalization + Config: configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py + Metadata: + Training Memory (GB): 7.1 + inference time (ms/im): + - value: 91.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco_20200225-542aefbc.pth diff --git a/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l1_maskrcnn_1x.py b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l1_maskrcnn_1x.py new file mode 100644 index 0000000..af90b16 --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l1_maskrcnn_1x.py @@ -0,0 +1,173 @@ +_base_ = [ + '../../_base_/datasets/coco_instance.py', + '../../_base_/schedules/schedule_1x.py', + '../../_base_/default_runtime.py' +] +checkpoint_url = '' +embed_dims = 216 +model = dict( + type='MaskRCNN', + backbone=dict( + type='GPViTAdapter', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L1', + drop_path_rate=0.1, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims, embed_dims], + out_channels=256, + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + norm_cfg=dict(type='SyncBN', requires_grad=True), + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + # norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) +optimizer_config = dict(grad_clip=None) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) +work_dir = 'work_dirs/gpvit_l1_maskrcnn_1x' diff --git a/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l1_maskrcnn_3x.py b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l1_maskrcnn_3x.py new file mode 100644 index 0000000..92f8716 --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l1_maskrcnn_3x.py @@ -0,0 +1,207 @@ +_base_ = [ + '../../_base_/datasets/coco_instance.py', + '../../_base_/schedules/schedule_3x.py', + '../../_base_/default_runtime.py' +] + +checkpoint_url = '' +embed_dims = 216 +model = dict( + type='MaskRCNN', + backbone=dict( + type='GPViTAdapter', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L1', + drop_path_rate=0.1, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims, embed_dims], + out_channels=256, + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + norm_cfg=dict(type='SyncBN', requires_grad=True), + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + # norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='AutoAugment', + policies=[ + [ + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict(type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ] + ]), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(1024, 1024), + allow_negative_crop=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) +optimizer_config = dict(grad_clip=None) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) + +work_dir = 'work_dirs/gpvit_l1_maskrcnn_3x' diff --git a/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l2_maskrcnn_1x.py b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l2_maskrcnn_1x.py new file mode 100644 index 0000000..c22423f --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l2_maskrcnn_1x.py @@ -0,0 +1,173 @@ +_base_ = [ + '../../_base_/datasets/coco_instance.py', + '../../_base_/schedules/schedule_1x.py', + '../../_base_/default_runtime.py' +] +checkpoint_url = '' +embed_dims = 348 +model = dict( + type='MaskRCNN', + backbone=dict( + type='GPViTAdapter', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L2', + drop_path_rate=0.1, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims, embed_dims], + out_channels=256, + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + norm_cfg=dict(type='SyncBN', requires_grad=True), + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + # norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) +optimizer_config = dict(grad_clip=None) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) +work_dir = 'work_dirs/gpvit_l2_maskrcnn_1x' diff --git a/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l2_maskrcnn_3x.py b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l2_maskrcnn_3x.py new file mode 100644 index 0000000..57eccd5 --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l2_maskrcnn_3x.py @@ -0,0 +1,207 @@ +_base_ = [ + '../../_base_/datasets/coco_instance.py', + '../../_base_/schedules/schedule_3x.py', + '../../_base_/default_runtime.py' +] + +checkpoint_url = '' +embed_dims = 348 +model = dict( + type='MaskRCNN', + backbone=dict( + type='GPViTAdapter', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L2', + drop_path_rate=0.1, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims, embed_dims], + out_channels=256, + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + norm_cfg=dict(type='SyncBN', requires_grad=True), + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + # norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='AutoAugment', + policies=[ + [ + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict(type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ] + ]), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(1024, 1024), + allow_negative_crop=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) +optimizer_config = dict(grad_clip=None) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) + +work_dir = 'work_dirs/gpvit_l2_maskrcnn_3x' diff --git a/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l3_maskrcnn_1x.py b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l3_maskrcnn_1x.py new file mode 100644 index 0000000..7b9f3c1 --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l3_maskrcnn_1x.py @@ -0,0 +1,173 @@ +_base_ = [ + '../../_base_/datasets/coco_instance.py', + '../../_base_/schedules/schedule_1x.py', + '../../_base_/default_runtime.py' +] +checkpoint_url = '' +embed_dims = 432 +model = dict( + type='MaskRCNN', + backbone=dict( + type='GPViTAdapter', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L3', + drop_path_rate=0.1, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims, embed_dims], + out_channels=256, + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + norm_cfg=dict(type='SyncBN', requires_grad=True), + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + # norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) +optimizer_config = dict(grad_clip=None) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) +work_dir = 'work_dirs/gpvit_l3_maskrcnn_1x' diff --git a/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l3_maskrcnn_3x.py b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l3_maskrcnn_3x.py new file mode 100644 index 0000000..a68474b --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l3_maskrcnn_3x.py @@ -0,0 +1,207 @@ +_base_ = [ + '../../_base_/datasets/coco_instance.py', + '../../_base_/schedules/schedule_3x.py', + '../../_base_/default_runtime.py' +] + +checkpoint_url = '' +embed_dims = 432 +model = dict( + type='MaskRCNN', + backbone=dict( + type='GPViTAdapter', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L3', + drop_path_rate=0.2, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims, embed_dims], + out_channels=256, + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + norm_cfg=dict(type='SyncBN', requires_grad=True), + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + # norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='AutoAugment', + policies=[ + [ + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict(type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ] + ]), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(1024, 1024), + allow_negative_crop=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) +optimizer_config = dict(grad_clip=None) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) + +work_dir = 'work_dirs/gpvit_l3_maskrcnn_3x' diff --git a/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l4_maskrcnn_1x.py b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l4_maskrcnn_1x.py new file mode 100644 index 0000000..d5e1e98 --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l4_maskrcnn_1x.py @@ -0,0 +1,173 @@ +_base_ = [ + '../../_base_/datasets/coco_instance.py', + '../../_base_/schedules/schedule_1x.py', + '../../_base_/default_runtime.py' +] +checkpoint_url = '' +embed_dims = 624 +model = dict( + type='MaskRCNN', + backbone=dict( + type='GPViTAdapter', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L4', + drop_path_rate=0.1, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims, embed_dims], + out_channels=256, + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + norm_cfg=dict(type='SyncBN', requires_grad=True), + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + # norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) +optimizer_config = dict(grad_clip=None) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) +work_dir = 'work_dirs/gpvit_l4_maskrcnn_1x' diff --git a/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l4_maskrcnn_3x.py b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l4_maskrcnn_3x.py new file mode 100644 index 0000000..ab193e5 --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/mask_rcnn/gpvit_l4_maskrcnn_3x.py @@ -0,0 +1,207 @@ +_base_ = [ + '../../_base_/datasets/coco_instance.py', + '../../_base_/schedules/schedule_3x.py', + '../../_base_/default_runtime.py' +] + +checkpoint_url = '' +embed_dims = 624 +model = dict( + type='MaskRCNN', + backbone=dict( + type='GPViTAdapter', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L4', + drop_path_rate=0.2, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims, embed_dims], + out_channels=256, + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + norm_cfg=dict(type='SyncBN', requires_grad=True), + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + # norm_cfg=dict(type='SyncBN', requires_grad=True), + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='AutoAugment', + policies=[ + [ + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict(type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ] + ]), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(1024, 1024), + allow_negative_crop=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) +optimizer_config = dict(grad_clip=None) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) + +work_dir = 'work_dirs/gpvit_l4_maskrcnn_3x' diff --git a/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l1_retinanet_1x.py b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l1_retinanet_1x.py new file mode 100644 index 0000000..687b097 --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l1_retinanet_1x.py @@ -0,0 +1,115 @@ +_base_ = [ + '../../_base_/datasets/coco_detection.py', + '../../_base_/schedules/schedule_1x.py', + '../../_base_/default_runtime.py' +] + +checkpoint_url = '' +embed_dims = 216 +model = dict( + type='RetinaNet', + backbone=dict( + type='GPViTAdapterSingleStage', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L1', + drop_path_rate=0.1, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims], + out_channels=256, + add_extra_convs='on_output', + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + bbox_head=dict( + type='RetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) + +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) + +work_dir = 'work_dirs/gpvit_l1_retinanet_1x' \ No newline at end of file diff --git a/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l1_retinanet_3x.py b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l1_retinanet_3x.py new file mode 100644 index 0000000..c373335 --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l1_retinanet_3x.py @@ -0,0 +1,149 @@ +_base_ = [ + '../../_base_/datasets/coco_detection.py', + '../../_base_/schedules/schedule_3x.py', + '../../_base_/default_runtime.py' +] + +checkpoint_url = '' +embed_dims = 216 +model = dict( + type='RetinaNet', + backbone=dict( + type='GPViTAdapterSingleStage', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L1', + drop_path_rate=0.1, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims], + out_channels=256, + add_extra_convs='on_output', + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + bbox_head=dict( + type='RetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + + +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='AutoAugment', + policies=[ + [ + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict(type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ] + ]), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(1024, 1024), + allow_negative_crop=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) + +find_unused_parameters = True +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) + +work_dir = 'work_dirs/gpvit_l1_retinanet_3x' \ No newline at end of file diff --git a/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l2_retinanet_1x.py b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l2_retinanet_1x.py new file mode 100644 index 0000000..6833bec --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l2_retinanet_1x.py @@ -0,0 +1,115 @@ +_base_ = [ + '../../_base_/datasets/coco_detection.py', + '../../_base_/schedules/schedule_1x.py', + '../../_base_/default_runtime.py' +] + +checkpoint_url = '' +embed_dims = 348 +model = dict( + type='RetinaNet', + backbone=dict( + type='GPViTAdapterSingleStage', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L2', + drop_path_rate=0.1, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims], + out_channels=256, + add_extra_convs='on_output', + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + bbox_head=dict( + type='RetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) + +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) + +work_dir = 'work_dirs/gpvit_l2_retinanet_1x' \ No newline at end of file diff --git a/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l2_retinanet_3x.py b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l2_retinanet_3x.py new file mode 100644 index 0000000..d42864a --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l2_retinanet_3x.py @@ -0,0 +1,149 @@ +_base_ = [ + '../../_base_/datasets/coco_detection.py', + '../../_base_/schedules/schedule_3x.py', + '../../_base_/default_runtime.py' +] + +checkpoint_url = '' +embed_dims = 348 +model = dict( + type='RetinaNet', + backbone=dict( + type='GPViTAdapterSingleStage', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L2', + drop_path_rate=0.1, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims], + out_channels=256, + add_extra_convs='on_output', + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + bbox_head=dict( + type='RetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + + +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='AutoAugment', + policies=[ + [ + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict(type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ] + ]), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(1024, 1024), + allow_negative_crop=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) + +find_unused_parameters = True +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) + +work_dir = 'work_dirs/gpvit_l2_retinanet_3x' \ No newline at end of file diff --git a/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l3_retinanet_1x.py b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l3_retinanet_1x.py new file mode 100644 index 0000000..ea033c6 --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l3_retinanet_1x.py @@ -0,0 +1,115 @@ +_base_ = [ + '../../_base_/datasets/coco_detection.py', + '../../_base_/schedules/schedule_1x.py', + '../../_base_/default_runtime.py' +] + +checkpoint_url = '' +embed_dims = 432 +model = dict( + type='RetinaNet', + backbone=dict( + type='GPViTAdapterSingleStage', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L3', + drop_path_rate=0.1, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims], + out_channels=256, + add_extra_convs='on_output', + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + bbox_head=dict( + type='RetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) + +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) + +work_dir = 'work_dirs/gpvit_l3_retinanet_1x' \ No newline at end of file diff --git a/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l3_retinanet_3x.py b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l3_retinanet_3x.py new file mode 100644 index 0000000..f0885ef --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l3_retinanet_3x.py @@ -0,0 +1,149 @@ +_base_ = [ + '../../_base_/datasets/coco_detection.py', + '../../_base_/schedules/schedule_3x.py', + '../../_base_/default_runtime.py' +] + +checkpoint_url = '' +embed_dims = 432 +model = dict( + type='RetinaNet', + backbone=dict( + type='GPViTAdapterSingleStage', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L3', + drop_path_rate=0.2, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims], + out_channels=256, + add_extra_convs='on_output', + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + bbox_head=dict( + type='RetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + + +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='AutoAugment', + policies=[ + [ + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict(type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ] + ]), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(1024, 1024), + allow_negative_crop=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) + +find_unused_parameters = True +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) + +work_dir = 'work_dirs/gpvit_l3_retinanet_3x' \ No newline at end of file diff --git a/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l4_retinanet_1x.py b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l4_retinanet_1x.py new file mode 100644 index 0000000..5b1592e --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l4_retinanet_1x.py @@ -0,0 +1,115 @@ +_base_ = [ + '../../_base_/datasets/coco_detection.py', + '../../_base_/schedules/schedule_1x.py', + '../../_base_/default_runtime.py' +] + +checkpoint_url = '' +embed_dims = 624 +model = dict( + type='RetinaNet', + backbone=dict( + type='GPViTAdapterSingleStage', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L4', + drop_path_rate=0.1, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims], + out_channels=256, + add_extra_convs='on_output', + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + bbox_head=dict( + type='RetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) + +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) + +work_dir = 'work_dirs/gpvit_l4_retinanet_1x' \ No newline at end of file diff --git a/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l4_retinanet_3x.py b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l4_retinanet_3x.py new file mode 100644 index 0000000..d92440f --- /dev/null +++ b/downstream/mmdetection/configs/gpvit/retinanet/gpvit_l4_retinanet_3x.py @@ -0,0 +1,149 @@ +_base_ = [ + '../../_base_/datasets/coco_detection.py', + '../../_base_/schedules/schedule_3x.py', + '../../_base_/default_runtime.py' +] + +checkpoint_url = '' +embed_dims = 624 +model = dict( + type='RetinaNet', + backbone=dict( + type='GPViTAdapterSingleStage', + conv_inplane=64, + n_points=4, + deform_num_heads=6, + cffn_ratio=0.25, + deform_ratio=1.0, + interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]], + arch='L4', + drop_path_rate=0.2, + out_indices=(11,), + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix="backbone."), + convert_syncbn=True), + neck=dict( + type='FPN', + in_channels=[embed_dims, embed_dims, embed_dims], + out_channels=256, + add_extra_convs='on_output', + num_outs=5, + norm_cfg=dict(type='SyncBN', requires_grad=True)), + bbox_head=dict( + type='RetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + + +# optimizer +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='AutoAugment', + policies=[ + [ + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict(type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict(type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ] + ]), + dict(type='RandomCrop', + crop_type='absolute_range', + crop_size=(1024, 1024), + allow_negative_crop=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'level_embed': dict(decay_mult=0.), + 'pos_embed': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'bias': dict(decay_mult=0.), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) + +find_unused_parameters = True +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) +fp16 = dict(loss_scale=dict(init_scale=512)) +checkpoint_config = dict( + interval=1, + max_keep_ckpts=3, + save_last=True, +) + +work_dir = 'work_dirs/gpvit_l4_retinanet_3x' \ No newline at end of file diff --git a/downstream/mmdetection/configs/grid_rcnn/README.md b/downstream/mmdetection/configs/grid_rcnn/README.md new file mode 100644 index 0000000..e844021 --- /dev/null +++ b/downstream/mmdetection/configs/grid_rcnn/README.md @@ -0,0 +1,47 @@ +# Grid R-CNN + +> [Grid R-CNN](https://arxiv.org/abs/1811.12030) + + + +## Abstract + +This paper proposes a novel object detection framework named Grid R-CNN, which adopts a grid guided localization mechanism for accurate object detection. Different from the traditional regression based methods, the Grid R-CNN captures the spatial information explicitly and enjoys the position sensitive property of fully convolutional architecture. Instead of using only two independent points, we design a multi-point supervision formulation to encode more clues in order to reduce the impact of inaccurate prediction of specific points. To take the full advantage of the correlation of points in a grid, we propose a two-stage information fusion strategy to fuse feature maps of neighbor grid points. The grid guided localization approach is easy to be extended to different state-of-the-art detection frameworks. Grid R-CNN leads to high quality object localization, and experiments demonstrate that it achieves a 4.1% AP gain at IoU=0.8 and a 10.0% AP gain at IoU=0.9 on COCO benchmark compared to Faster R-CNN with Res50 backbone and FPN architecture. + +Grid R-CNN is a well-performed objection detection framework. It transforms the traditional box offset regression problem into a grid point estimation problem. With the guidance of the grid points, it can obtain high-quality localization results. However, the speed of Grid R-CNN is not so satisfactory. In this technical report we present Grid R-CNN Plus, a better and faster version of Grid R-CNN. We have made several updates that significantly speed up the framework and simultaneously improve the accuracy. On COCO dataset, the Res50-FPN based Grid R-CNN Plus detector achieves an mAP of 40.4%, outperforming the baseline on the same model by 3.0 points with similar inference time. + +
    + +
    + +## Results and Models + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | 2x | 5.1 | 15.0 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130_221140.log.json) | +| R-101 | 2x | 7.0 | 12.6 | 41.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco/grid_rcnn_r101_fpn_gn-head_2x_coco_20200309-d6eca030.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco/grid_rcnn_r101_fpn_gn-head_2x_coco_20200309_164224.log.json) | +| X-101-32x4d | 2x | 8.3 | 10.8 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco_20200130-d8f0e3ff.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco_20200130_215413.log.json) | +| X-101-64x4d | 2x | 11.3 | 7.7 | 43.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco_20200204-ec76a754.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco_20200204_080641.log.json) | + +**Notes:** + +- All models are trained with 8 GPUs instead of 32 GPUs in the original paper. +- The warming up lasts for 1 epoch and `2x` here indicates 25 epochs. + +## Citation + +```latex +@inproceedings{lu2019grid, + title={Grid r-cnn}, + author={Lu, Xin and Li, Buyu and Yue, Yuxin and Li, Quanquan and Yan, Junjie}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + year={2019} +} + +@article{lu2019grid, + title={Grid R-CNN Plus: Faster and Better}, + author={Lu, Xin and Li, Buyu and Yue, Yuxin and Li, Quanquan and Yan, Junjie}, + journal={arXiv preprint arXiv:1906.05688}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py b/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py new file mode 100644 index 0000000..1bb5889 --- /dev/null +++ b/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py @@ -0,0 +1,7 @@ +_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py b/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py new file mode 100644 index 0000000..4aa00ec --- /dev/null +++ b/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = ['grid_rcnn_r50_fpn_gn-head_2x_coco.py'] +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py b/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py new file mode 100644 index 0000000..df63cd5 --- /dev/null +++ b/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py @@ -0,0 +1,131 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + type='GridRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='GridRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + with_reg=False, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False), + grid_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + grid_head=dict( + type='GridHead', + grid_points=9, + num_convs=8, + in_channels=256, + point_feat_channels=64, + norm_cfg=dict(type='GN', num_groups=36), + loss_grid=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_radius=1, + pos_weight=-1, + max_num_grid=192, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.03, + nms=dict(type='nms', iou_threshold=0.3), + max_per_img=100))) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=3665, + warmup_ratio=1.0 / 80, + step=[17, 23]) +runner = dict(type='EpochBasedRunner', max_epochs=25) diff --git a/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py b/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py new file mode 100644 index 0000000..3bc8516 --- /dev/null +++ b/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py @@ -0,0 +1,24 @@ +_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=3665, + warmup_ratio=1.0 / 80, + step=[17, 23]) +runner = dict(type='EpochBasedRunner', max_epochs=25) diff --git a/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py b/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py new file mode 100644 index 0000000..c78f8f6 --- /dev/null +++ b/downstream/mmdetection/configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py @@ -0,0 +1,13 @@ +_base_ = './grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/grid_rcnn/metafile.yml b/downstream/mmdetection/configs/grid_rcnn/metafile.yml new file mode 100644 index 0000000..d1aa851 --- /dev/null +++ b/downstream/mmdetection/configs/grid_rcnn/metafile.yml @@ -0,0 +1,101 @@ +Collections: + - Name: Grid R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RPN + - Dilated Convolution + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/abs/1906.05688 + Title: 'Grid R-CNN' + README: configs/grid_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/grid_rcnn.py#L6 + Version: v2.0.0 + +Models: + - Name: grid_rcnn_r50_fpn_gn-head_2x_coco + In Collection: Grid R-CNN + Config: configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py + Metadata: + Training Memory (GB): 5.1 + inference time (ms/im): + - value: 66.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth + + - Name: grid_rcnn_r101_fpn_gn-head_2x_coco + In Collection: Grid R-CNN + Config: configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 79.37 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco/grid_rcnn_r101_fpn_gn-head_2x_coco_20200309-d6eca030.pth + + - Name: grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco + In Collection: Grid R-CNN + Config: configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py + Metadata: + Training Memory (GB): 8.3 + inference time (ms/im): + - value: 92.59 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco_20200130-d8f0e3ff.pth + + - Name: grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco + In Collection: Grid R-CNN + Config: configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py + Metadata: + Training Memory (GB): 11.3 + inference time (ms/im): + - value: 129.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco_20200204-ec76a754.pth diff --git a/downstream/mmdetection/configs/groie/README.md b/downstream/mmdetection/configs/groie/README.md new file mode 100644 index 0000000..4a1eba0 --- /dev/null +++ b/downstream/mmdetection/configs/groie/README.md @@ -0,0 +1,72 @@ +# GRoIE + +> [A novel Region of Interest Extraction Layer for Instance Segmentation](https://arxiv.org/abs/2004.13665) + + + +## Abstract + +Given the wide diffusion of deep neural network architectures for computer vision tasks, several new applications are nowadays more and more feasible. Among them, a particular attention has been recently given to instance segmentation, by exploiting the results achievable by two-stage networks (such as Mask R-CNN or Faster R-CNN), derived from R-CNN. In these complex architectures, a crucial role is played by the Region of Interest (RoI) extraction layer, devoted to extracting a coherent subset of features from a single Feature Pyramid Network (FPN) layer attached on top of a backbone. +This paper is motivated by the need to overcome the limitations of existing RoI extractors which select only one (the best) layer from FPN. Our intuition is that all the layers of FPN retain useful information. Therefore, the proposed layer (called Generic RoI Extractor - GRoIE) introduces non-local building blocks and attention mechanisms to boost the performance. +A comprehensive ablation study at component level is conducted to find the best set of algorithms and parameters for the GRoIE layer. Moreover, GRoIE can be integrated seamlessly with every two-stage architecture for both object detection and instance segmentation tasks. Therefore, the improvements brought about by the use of GRoIE in different state-of-the-art architectures are also evaluated. The proposed layer leads up to gain a 1.1% AP improvement on bounding box detection and 1.7% AP improvement on instance segmentation. + +
    + +
    + +## Introduction + +By Leonardo Rossi, Akbar Karimi and Andrea Prati from +[IMPLab](http://implab.ce.unipr.it/). + +We provide configs to reproduce the results in the paper for +"*A novel Region of Interest Extraction Layer for Instance Segmentation*" +on COCO object detection. + +This paper is motivated by the need to overcome to the limitations of existing +RoI extractors which select only one (the best) layer from FPN. + +Our intuition is that all the layers of FPN retain useful information. + +Therefore, the proposed layer (called Generic RoI Extractor - **GRoIE**) +introduces non-local building blocks and attention mechanisms to boost the +performance. + +## Results and Models + +The results on COCO 2017 minival (5k images) are shown in the below table. + +### Application of GRoIE to different architectures + +| Backbone | Method | Lr schd | box AP | mask AP | Config | Download | +| :-------: | :-------------: | :-----: | :----: | :-----: | :---------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | Faster Original | 1x | 37.4 | | [config](../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | +| R-50-FPN | + GRoIE | 1x | 38.3 | | [config](./faster_rcnn_r50_fpn_groie_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715.log.json) | +| R-50-FPN | Grid R-CNN | 1x | 39.1 | | [config](./grid_rcnn_r50_fpn_gn-head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/grid_rcnn_r50_fpn_gn-head_1x_coco/grid_rcnn_r50_fpn_gn-head_1x_coco_20200605_202059-64f00ee8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/grid_rcnn_r50_fpn_gn-head_1x_coco/grid_rcnn_r50_fpn_gn-head_1x_coco_20200605_202059.log.json) | +| R-50-FPN | + GRoIE | 1x | | | [config](./grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py) | | +| R-50-FPN | Mask R-CNN | 1x | 38.2 | 34.7 | [config](../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) | +| R-50-FPN | + GRoIE | 1x | 39.0 | 36.0 | [config](./mask_rcnn_r50_fpn_groie_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715-50d90c74.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715.log.json) | +| R-50-FPN | GC-Net | 1x | 40.7 | 36.5 | [config](../gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202_085547.log.json) | +| R-50-FPN | + GRoIE | 1x | 41.0 | 37.8 | [config](./mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth) | +| R-101-FPN | GC-Net | 1x | 42.2 | 37.8 | [config](../gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206_142508.log.json) | +| R-101-FPN | + GRoIE | 1x | 42.6 | 38.7 | [config](./mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507-8daae01c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507.log.json) | + +## Citation + +If you use this work or benchmark in your research, please cite this project. + +```latex +@inproceedings{rossi2021novel, + title={A novel region of interest extraction layer for instance segmentation}, + author={Rossi, Leonardo and Karimi, Akbar and Prati, Andrea}, + booktitle={2020 25th International Conference on Pattern Recognition (ICPR)}, + pages={2203--2209}, + year={2021}, + organization={IEEE} +} +``` + +## Contact + +The implementation of GRoIE is currently maintained by +[Leonardo Rossi](https://github.com/hachreak/). diff --git a/downstream/mmdetection/configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py b/downstream/mmdetection/configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py new file mode 100644 index 0000000..0fc528b --- /dev/null +++ b/downstream/mmdetection/configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py @@ -0,0 +1,25 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +# model settings +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='GenericRoIExtractor', + aggregation='sum', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)))) diff --git a/downstream/mmdetection/configs/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py b/downstream/mmdetection/configs/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py new file mode 100644 index 0000000..8e4b4ab --- /dev/null +++ b/downstream/mmdetection/configs/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py @@ -0,0 +1,45 @@ +_base_ = '../grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py' +# model settings +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='GenericRoIExtractor', + aggregation='sum', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)), + grid_roi_extractor=dict( + type='GenericRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)))) diff --git a/downstream/mmdetection/configs/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py b/downstream/mmdetection/configs/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py new file mode 100644 index 0000000..8b83722 --- /dev/null +++ b/downstream/mmdetection/configs/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py @@ -0,0 +1,45 @@ +_base_ = '../gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py' +# model settings +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='GenericRoIExtractor', + aggregation='sum', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)), + mask_roi_extractor=dict( + type='GenericRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)))) diff --git a/downstream/mmdetection/configs/groie/mask_rcnn_r50_fpn_groie_1x_coco.py b/downstream/mmdetection/configs/groie/mask_rcnn_r50_fpn_groie_1x_coco.py new file mode 100644 index 0000000..81dfb48 --- /dev/null +++ b/downstream/mmdetection/configs/groie/mask_rcnn_r50_fpn_groie_1x_coco.py @@ -0,0 +1,45 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +# model settings +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='GenericRoIExtractor', + aggregation='sum', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)), + mask_roi_extractor=dict( + type='GenericRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)))) diff --git a/downstream/mmdetection/configs/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py b/downstream/mmdetection/configs/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py new file mode 100644 index 0000000..852c5ca --- /dev/null +++ b/downstream/mmdetection/configs/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py @@ -0,0 +1,45 @@ +_base_ = '../gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py' +# model settings +model = dict( + roi_head=dict( + bbox_roi_extractor=dict( + type='GenericRoIExtractor', + aggregation='sum', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)), + mask_roi_extractor=dict( + type='GenericRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + pre_cfg=dict( + type='ConvModule', + in_channels=256, + out_channels=256, + kernel_size=5, + padding=2, + inplace=False, + ), + post_cfg=dict( + type='GeneralizedAttention', + in_channels=256, + spatial_range=-1, + num_heads=6, + attention_type='0100', + kv_stride=2)))) diff --git a/downstream/mmdetection/configs/groie/metafile.yml b/downstream/mmdetection/configs/groie/metafile.yml new file mode 100644 index 0000000..269cb39 --- /dev/null +++ b/downstream/mmdetection/configs/groie/metafile.yml @@ -0,0 +1,93 @@ +Collections: + - Name: GRoIE + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Generic RoI Extractor + - FPN + - RPN + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/abs/2004.13665 + Title: 'A novel Region of Interest Extraction Layer for Instance Segmentation' + README: configs/groie/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/roi_heads/roi_extractors/groie.py#L15 + Version: v2.1.0 + +Models: + - Name: faster_rcnn_r50_fpn_groie_1x_coco + In Collection: GRoIE + Config: configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth + + - Name: grid_rcnn_r50_fpn_gn-head_groie_1x_coco + In Collection: GRoIE + Config: configs/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.1 + + - Name: mask_rcnn_r50_fpn_groie_1x_coco + In Collection: GRoIE + Config: configs/groie/mask_rcnn_r50_fpn_groie_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715-50d90c74.pth + + - Name: mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco + In Collection: GRoIE + Config: configs/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth + + - Name: mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco + In Collection: GRoIE + Config: configs/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507-8daae01c.pth diff --git a/downstream/mmdetection/configs/guided_anchoring/README.md b/downstream/mmdetection/configs/guided_anchoring/README.md new file mode 100644 index 0000000..563e43f --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/README.md @@ -0,0 +1,59 @@ +# Guided Anchoring + +> [Region Proposal by Guided Anchoring](https://arxiv.org/abs/1901.03278) + + + +## Abstract + +Region anchors are the cornerstone of modern object detection techniques. State-of-the-art detectors mostly rely on a dense anchoring scheme, where anchors are sampled uniformly over the spatial domain with a predefined set of scales and aspect ratios. In this paper, we revisit this foundational stage. Our study shows that it can be done much more effectively and efficiently. Specifically, we present an alternative scheme, named Guided Anchoring, which leverages semantic features to guide the anchoring. The proposed method jointly predicts the locations where the center of objects of interest are likely to exist as well as the scales and aspect ratios at different locations. On top of predicted anchor shapes, we mitigate the feature inconsistency with a feature adaption module. We also study the use of high-quality proposals to improve detection performance. The anchoring scheme can be seamlessly integrated into proposal methods and detectors. With Guided Anchoring, we achieve 9.1% higher recall on MS COCO with 90% fewer anchors than the RPN baseline. We also adopt Guided Anchoring in Fast R-CNN, Faster R-CNN and RetinaNet, respectively improving the detection mAP by 2.2%, 2.7% and 1.2%. + +
    + +
    + +## Results and Models + +The results on COCO 2017 val is shown in the below table. (results on test-dev are usually slightly higher than val). + +| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | AR 1000 | Config | Download | +| :----: | :-------------: | :-----: | :-----: | :------: | :------------: | :-----: | :-----------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| GA-RPN | R-50-FPN | caffe | 1x | 5.3 | 15.8 | 68.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco/ga_rpn_r50_caffe_fpn_1x_coco_20200531-899008a6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco/ga_rpn_r50_caffe_fpn_1x_coco_20200531_011819.log.json) | +| GA-RPN | R-101-FPN | caffe | 1x | 7.3 | 13.0 | 69.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco/ga_rpn_r101_caffe_fpn_1x_coco_20200531-ca9ba8fb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco/ga_rpn_r101_caffe_fpn_1x_coco_20200531_011812.log.json) | +| GA-RPN | X-101-32x4d-FPN | pytorch | 1x | 8.5 | 10.0 | 70.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco/ga_rpn_x101_32x4d_fpn_1x_coco_20200220-c28d1b18.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco/ga_rpn_x101_32x4d_fpn_1x_coco_20200220_221326.log.json) | +| GA-RPN | X-101-64x4d-FPN | pytorch | 1x | 7.1 | 7.5 | 71.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco/ga_rpn_x101_64x4d_fpn_1x_coco_20200225-3c6e1aa2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco/ga_rpn_x101_64x4d_fpn_1x_coco_20200225_152704.log.json) | + +| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------------: | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| GA-Faster RCNN | R-50-FPN | caffe | 1x | 5.5 | | 39.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718.log.json) | +| GA-Faster RCNN | R-101-FPN | caffe | 1x | 7.5 | | 41.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco/ga_faster_r101_caffe_fpn_1x_coco_bbox_mAP-0.415_20200505_115528-fb82e499.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco/ga_faster_r101_caffe_fpn_1x_coco_20200505_115528.log.json) | +| GA-Faster RCNN | X-101-32x4d-FPN | pytorch | 1x | 8.7 | 9.7 | 43.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco/ga_faster_x101_32x4d_fpn_1x_coco_20200215-1ded9da3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco/ga_faster_x101_32x4d_fpn_1x_coco_20200215_184547.log.json) | +| GA-Faster RCNN | X-101-64x4d-FPN | pytorch | 1x | 11.8 | 7.3 | 43.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco/ga_faster_x101_64x4d_fpn_1x_coco_20200215-0fa7bde7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco/ga_faster_x101_64x4d_fpn_1x_coco_20200215_104455.log.json) | +| GA-RetinaNet | R-50-FPN | caffe | 1x | 3.5 | 16.8 | 36.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020_225450.log.json) | +| GA-RetinaNet | R-101-FPN | caffe | 1x | 5.5 | 12.9 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco/ga_retinanet_r101_caffe_fpn_1x_coco_20200531-6266453c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco/ga_retinanet_r101_caffe_fpn_1x_coco_20200531_012847.log.json) | +| GA-RetinaNet | X-101-32x4d-FPN | pytorch | 1x | 6.9 | 10.6 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco/ga_retinanet_x101_32x4d_fpn_1x_coco_20200219-40c56caa.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco/ga_retinanet_x101_32x4d_fpn_1x_coco_20200219_223025.log.json) | +| GA-RetinaNet | X-101-64x4d-FPN | pytorch | 1x | 9.9 | 7.7 | 41.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco/ga_retinanet_x101_64x4d_fpn_1x_coco_20200226-ef9f7f1f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco/ga_retinanet_x101_64x4d_fpn_1x_coco_20200226_221123.log.json) | + +- In the Guided Anchoring paper, `score_thr` is set to 0.001 in Fast/Faster RCNN and 0.05 in RetinaNet for both baselines and Guided Anchoring. + +- Performance on COCO test-dev benchmark are shown as follows. + +| Method | Backbone | Style | Lr schd | Aug Train | Score thr | AP | AP_50 | AP_75 | AP_small | AP_medium | AP_large | Download | +| :------------: | :-------: | :---: | :-----: | :-------: | :-------: | :-: | :---: | :---: | :------: | :-------: | :------: | :------: | +| GA-Faster RCNN | R-101-FPN | caffe | 1x | F | 0.05 | | | | | | | | +| GA-Faster RCNN | R-101-FPN | caffe | 1x | F | 0.001 | | | | | | | | +| GA-RetinaNet | R-101-FPN | caffe | 1x | F | 0.05 | | | | | | | | +| GA-RetinaNet | R-101-FPN | caffe | 2x | T | 0.05 | | | | | | | | + +## Citation + +We provide config files to reproduce the results in the CVPR 2019 paper for [Region Proposal by Guided Anchoring](https://arxiv.org/abs/1901.03278). + +```latex +@inproceedings{wang2019region, + title={Region Proposal by Guided Anchoring}, + author={Jiaqi Wang and Kai Chen and Shuo Yang and Chen Change Loy and Dahua Lin}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..8fc203c --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,65 @@ +_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + roi_head=dict( + bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), + # model training and testing settings + train_cfg=dict( + rcnn=dict( + assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), + sampler=dict(num=256))), + test_cfg=dict(rcnn=dict(score_thr=1e-3))) +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadProposals', num_max_proposals=300), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadProposals', num_max_proposals=None), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img', 'proposals']), + ]) +] +data = dict( + train=dict( + proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl', + pipeline=train_pipeline), + val=dict( + proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', + pipeline=test_pipeline), + test=dict( + proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', + pipeline=test_pipeline)) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..a40e7c6 --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './ga_faster_r50_caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..b0add92 --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,65 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' +model = dict( + rpn_head=dict( + _delete_=True, + type='GARPNHead', + in_channels=256, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=8, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[8], + strides=[4, 8, 16, 32, 64]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.14, 0.14]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.11, 0.11]), + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + roi_head=dict( + bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + ga_assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + ga_sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + center_ratio=0.2, + ignore_ratio=0.5), + rpn_proposal=dict(nms_post=1000, max_per_img=300), + rcnn=dict( + assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), + sampler=dict(type='RandomSampler', num=256))), + test_cfg=dict( + rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3))) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_faster_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_faster_r50_fpn_1x_coco.py new file mode 100644 index 0000000..e3d8238 --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_faster_r50_fpn_1x_coco.py @@ -0,0 +1,65 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + rpn_head=dict( + _delete_=True, + type='GARPNHead', + in_channels=256, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=8, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[8], + strides=[4, 8, 16, 32, 64]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.14, 0.14]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.11, 0.11]), + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + roi_head=dict( + bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + ga_assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + ga_sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + center_ratio=0.2, + ignore_ratio=0.5), + rpn_proposal=dict(nms_post=1000, max_per_img=300), + rcnn=dict( + assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), + sampler=dict(type='RandomSampler', num=256))), + test_cfg=dict( + rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3))) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..f1dda94 --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ga_faster_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py new file mode 100644 index 0000000..fb9e2af --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ga_faster_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..1b1cccd --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './ga_retinanet_r50_caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_mstrain_2x.py b/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_mstrain_2x.py new file mode 100644 index 0000000..260895b --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_mstrain_2x.py @@ -0,0 +1,169 @@ +_base_ = '../_base_/default_runtime.py' + +# model settings +model = dict( + type='RetinaNet', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + num_outs=5), + bbox_head=dict( + type='GARetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0))) +# training and testing settings +train_cfg = dict( + ga_assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + ga_sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + center_ratio=0.2, + ignore_ratio=0.5, + debug=False) +test_cfg = dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 480), (1333, 960)], + keep_ratio=True, + multiscale_mode='range'), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='bbox') +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[16, 22]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..3351201 --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,62 @@ +_base_ = '../retinanet/retinanet_r50_caffe_fpn_1x_coco.py' +model = dict( + bbox_head=dict( + _delete_=True, + type='GARetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + ga_assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + ga_sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0), + center_ratio=0.2, + ignore_ratio=0.5)) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py new file mode 100644 index 0000000..7694723 --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py @@ -0,0 +1,62 @@ +_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' +model = dict( + bbox_head=dict( + _delete_=True, + type='GARetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + ga_assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + ga_sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0), + center_ratio=0.2, + ignore_ratio=0.5)) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..c5eb34f --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ga_retinanet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py new file mode 100644 index 0000000..5c69a6f --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ga_retinanet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..039703e --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = './ga_rpn_r50_caffe_fpn_1x_coco.py' +# model settings +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..7830894 --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,58 @@ +_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py' +model = dict( + rpn_head=dict( + _delete_=True, + type='GARPNHead', + in_channels=256, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=8, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[8], + strides=[4, 8, 16, 32, 64]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.14, 0.14]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.11, 0.11]), + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + rpn=dict( + ga_assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + ga_sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + center_ratio=0.2, + ignore_ratio=0.5)), + test_cfg=dict(rpn=dict(nms_post=1000))) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_rpn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_rpn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..27ab3e7 --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_rpn_r50_fpn_1x_coco.py @@ -0,0 +1,58 @@ +_base_ = '../rpn/rpn_r50_fpn_1x_coco.py' +model = dict( + rpn_head=dict( + _delete_=True, + type='GARPNHead', + in_channels=256, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=8, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[8], + strides=[4, 8, 16, 32, 64]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.14, 0.14]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.07, 0.07, 0.11, 0.11]), + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + rpn=dict( + ga_assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + ga_sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + center_ratio=0.2, + ignore_ratio=0.5)), + test_cfg=dict(rpn=dict(nms_post=1000))) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..cccc985 --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ga_rpn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py new file mode 100644 index 0000000..4e134d2 --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ga_rpn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/guided_anchoring/metafile.yml b/downstream/mmdetection/configs/guided_anchoring/metafile.yml new file mode 100644 index 0000000..3019d4a --- /dev/null +++ b/downstream/mmdetection/configs/guided_anchoring/metafile.yml @@ -0,0 +1,246 @@ +Collections: + - Name: Guided Anchoring + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - Guided Anchoring + - ResNet + Paper: + URL: https://arxiv.org/abs/1901.03278 + Title: 'Region Proposal by Guided Anchoring' + README: configs/guided_anchoring/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/dense_heads/ga_retina_head.py#L10 + Version: v2.0.0 + +Models: + - Name: ga_rpn_r50_caffe_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.3 + inference time (ms/im): + - value: 63.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Region Proposal + Dataset: COCO + Metrics: + AR@1000: 68.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco/ga_rpn_r50_caffe_fpn_1x_coco_20200531-899008a6.pth + + - Name: ga_rpn_r101_caffe_fpn_1x_coco.py + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py.py + Metadata: + Training Memory (GB): 7.3 + inference time (ms/im): + - value: 76.92 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Region Proposal + Dataset: COCO + Metrics: + AR@1000: 69.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco/ga_rpn_r101_caffe_fpn_1x_coco_20200531-ca9ba8fb.pth + + - Name: ga_rpn_x101_32x4d_fpn_1x_coco.py + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py.py + Metadata: + Training Memory (GB): 8.5 + inference time (ms/im): + - value: 100 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Region Proposal + Dataset: COCO + Metrics: + AR@1000: 70.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco/ga_rpn_x101_32x4d_fpn_1x_coco_20200220-c28d1b18.pth + + - Name: ga_rpn_x101_64x4d_fpn_1x_coco.py.py + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py.py.py + Metadata: + Training Memory (GB): 7.1 + inference time (ms/im): + - value: 133.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Region Proposal + Dataset: COCO + Metrics: + AR@1000: 70.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco/ga_rpn_x101_64x4d_fpn_1x_coco_20200225-3c6e1aa2.pth + + - Name: ga_faster_r50_caffe_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.5 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth + + - Name: ga_faster_r101_caffe_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.5 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco/ga_faster_r101_caffe_fpn_1x_coco_bbox_mAP-0.415_20200505_115528-fb82e499.pth + + - Name: ga_faster_x101_32x4d_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.7 + inference time (ms/im): + - value: 103.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco/ga_faster_x101_32x4d_fpn_1x_coco_20200215-1ded9da3.pth + + - Name: ga_faster_x101_64x4d_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 11.8 + inference time (ms/im): + - value: 136.99 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco/ga_faster_x101_64x4d_fpn_1x_coco_20200215-0fa7bde7.pth + + - Name: ga_retinanet_r50_caffe_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.5 + inference time (ms/im): + - value: 59.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth + + - Name: ga_retinanet_r101_caffe_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.5 + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco/ga_retinanet_r101_caffe_fpn_1x_coco_20200531-6266453c.pth + + - Name: ga_retinanet_x101_32x4d_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.9 + inference time (ms/im): + - value: 94.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco/ga_retinanet_x101_32x4d_fpn_1x_coco_20200219-40c56caa.pth + + - Name: ga_retinanet_x101_64x4d_fpn_1x_coco + In Collection: Guided Anchoring + Config: configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 9.9 + inference time (ms/im): + - value: 129.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco/ga_retinanet_x101_64x4d_fpn_1x_coco_20200226-ef9f7f1f.pth diff --git a/downstream/mmdetection/configs/hrnet/README.md b/downstream/mmdetection/configs/hrnet/README.md new file mode 100644 index 0000000..e340c78 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/README.md @@ -0,0 +1,101 @@ +# HRNet + +> [Deep High-Resolution Representation Learning for Human Pose Estimation](https://arxiv.org/abs/1902.09212) + + + +## Abstract + +This is an official pytorch implementation of Deep High-Resolution Representation Learning for Human Pose Estimation. In this work, we are interested in the human pose estimation problem with a focus on learning reliable high-resolution representations. Most existing methods recover high-resolution representations from low-resolution representations produced by a high-to-low resolution network. Instead, our proposed network maintains high-resolution representations through the whole process. We start from a high-resolution subnetwork as the first stage, gradually add high-to-low resolution subnetworks one by one to form more stages, and connect the mutli-resolution subnetworks in parallel. We conduct repeated multi-scale fusions such that each of the high-to-low resolution representations receives information from other parallel representations over and over, leading to rich high-resolution representations. As a result, the predicted keypoint heatmap is potentially more accurate and spatially more precise. We empirically demonstrate the effectiveness of our network through the superior pose estimation results over two benchmark datasets: the COCO keypoint detection dataset and the MPII Human Pose dataset. + +High-resolution representation learning plays an essential role in many vision problems, e.g., pose estimation and semantic segmentation. The high-resolution network (HRNet), recently developed for human pose estimation, maintains high-resolution representations through the whole process by connecting high-to-low resolution convolutions in parallel and produces strong high-resolution representations by repeatedly conducting fusions across parallel convolutions. +In this paper, we conduct a further study on high-resolution representations by introducing a simple yet effective modification and apply it to a wide range of vision tasks. We augment the high-resolution representation by aggregating the (upsampled) representations from all the parallel convolutions rather than only the representation from the high-resolution convolution as done in HRNet. This simple modification leads to stronger representations, evidenced by superior results. We show top results in semantic segmentation on Cityscapes, LIP, and PASCAL Context, and facial landmark detection on AFLW, COFW, 300W, and WFLW. In addition, we build a multi-level representation from the high-resolution representation and apply it to the Faster R-CNN object detection framework and the extended frameworks. The proposed approach achieves superior results to existing single-model networks on COCO object detection. + +
    + +
    + +## Results and Models + +### Faster R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HRNetV2p-W18 | pytorch | 1x | 6.6 | 13.4 | 36.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130_211246.log.json) | +| HRNetV2p-W18 | pytorch | 2x | 6.6 | - | 38.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731-a4ec0611.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731.log.json) | +| HRNetV2p-W32 | pytorch | 1x | 9.0 | 12.4 | 40.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130-6e286425.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130_204442.log.json) | +| HRNetV2p-W32 | pytorch | 2x | 9.0 | - | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927-976a9c15.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927.log.json) | +| HRNetV2p-W40 | pytorch | 1x | 10.4 | 10.5 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210-95c1f5ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210_125315.log.json) | +| HRNetV2p-W40 | pytorch | 2x | 10.4 | - | 42.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033-0f236ef4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033.log.json) | + +### Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HRNetV2p-W18 | pytorch | 1x | 7.0 | 11.7 | 37.7 | 34.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205-1c3d78ed.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205_232523.log.json) | +| HRNetV2p-W18 | pytorch | 2x | 7.0 | - | 39.8 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212-b3c825b1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212_134222.log.json) | +| HRNetV2p-W32 | pytorch | 1x | 9.4 | 11.3 | 41.2 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207-b29f616e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207_055017.log.json) | +| HRNetV2p-W32 | pytorch | 2x | 9.4 | - | 42.5 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213-45b75b4d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213_150518.log.json) | +| HRNetV2p-W40 | pytorch | 1x | 10.9 | | 42.1 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646-66738b35.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646.log.json) | +| HRNetV2p-W40 | pytorch | 2x | 10.9 | | 42.8 | 38.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732-aed5e4ab.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732.log.json) | + +### Cascade R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HRNetV2p-W18 | pytorch | 20e | 7.0 | 11.0 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210-434be9d7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210_105632.log.json) | +| HRNetV2p-W32 | pytorch | 20e | 9.4 | 11.0 | 43.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208-928455a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208_160511.log.json) | +| HRNetV2p-W40 | pytorch | 20e | 10.8 | | 43.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112-75e47b04.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112.log.json) | + +### Cascade Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HRNetV2p-W18 | pytorch | 20e | 8.5 | 8.5 | 41.6 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210-b543cd2b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210_093149.log.json) | +| HRNetV2p-W32 | pytorch | 20e | | 8.3 | 44.3 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043-39d9cf7b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043.log.json) | +| HRNetV2p-W40 | pytorch | 20e | 12.5 | | 45.1 | 39.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922-969c4610.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922.log.json) | + +### Hybrid Task Cascade (HTC) + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HRNetV2p-W18 | pytorch | 20e | 10.8 | 4.7 | 42.8 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210-b266988c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210_182735.log.json) | +| HRNetV2p-W32 | pytorch | 20e | 13.1 | 4.9 | 45.4 | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/htc_hrnetv2p_w32_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207-7639fa12.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207_193153.log.json) | +| HRNetV2p-W40 | pytorch | 20e | 14.6 | | 46.4 | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411-417c4d5b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411.log.json) | + +### FCOS + +| Backbone | Style | GN | MS train | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :----------: | :-----: | :-: | :------: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| HRNetV2p-W18 | pytorch | Y | N | 1x | 13.0 | 12.9 | 35.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710-4ad151de.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710.log.json) | +| HRNetV2p-W18 | pytorch | Y | N | 2x | 13.0 | - | 38.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110-5c575fa5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110.log.json) | +| HRNetV2p-W32 | pytorch | Y | N | 1x | 17.5 | 12.9 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730-cb8055c0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730.log.json) | +| HRNetV2p-W32 | pytorch | Y | N | 2x | 17.5 | - | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133-77b6b9bb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133.log.json) | +| HRNetV2p-W18 | pytorch | Y | Y | 2x | 13.0 | 12.9 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651-441e9d9f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651.log.json) | +| HRNetV2p-W32 | pytorch | Y | Y | 2x | 17.5 | 12.4 | 41.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846-b6f2b49f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846.log.json) | +| HRNetV2p-W48 | pytorch | Y | Y | 2x | 20.3 | 10.8 | 42.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752-f22d2ce5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752.log.json) | + +**Note:** + +- The `28e` schedule in HTC indicates decreasing the lr at 24 and 27 epochs, with a total of 28 epochs. +- HRNetV2 ImageNet pretrained models are in [HRNets for Image Classification](https://github.com/HRNet/HRNet-Image-Classification). + +## Citation + +```latex +@inproceedings{SunXLW19, + title={Deep High-Resolution Representation Learning for Human Pose Estimation}, + author={Ke Sun and Bin Xiao and Dong Liu and Jingdong Wang}, + booktitle={CVPR}, + year={2019} +} + +@article{SunZJCXLMWLW19, + title={High-Resolution Representations for Labeling Pixels and Regions}, + author={Ke Sun and Yang Zhao and Borui Jiang and Tianheng Cheng and Bin Xiao + and Dong Liu and Yadong Mu and Xinggang Wang and Wenyu Liu and Jingdong Wang}, + journal = {CoRR}, + volume = {abs/1904.04514}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py b/downstream/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py new file mode 100644 index 0000000..839cf3e --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py @@ -0,0 +1,11 @@ +_base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py' +# model settings +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py b/downstream/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py new file mode 100644 index 0000000..9942602 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py @@ -0,0 +1,40 @@ +_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), + neck=dict( + _delete_=True, + type='HRFPN', + in_channels=[32, 64, 128, 256], + out_channels=256)) +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/downstream/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py b/downstream/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py new file mode 100644 index 0000000..10d5e83 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py @@ -0,0 +1,12 @@ +_base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py' +# model settings +model = dict( + backbone=dict( + type='HRNet', + extra=dict( + stage2=dict(num_channels=(40, 80)), + stage3=dict(num_channels=(40, 80, 160)), + stage4=dict(num_channels=(40, 80, 160, 320))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), + neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py b/downstream/mmdetection/configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py new file mode 100644 index 0000000..ebd5e20 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py @@ -0,0 +1,11 @@ +_base_ = './cascade_rcnn_hrnetv2p_w32_20e_coco.py' +# model settings +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py b/downstream/mmdetection/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py new file mode 100644 index 0000000..e7f89a9 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py @@ -0,0 +1,40 @@ +_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), + neck=dict( + _delete_=True, + type='HRFPN', + in_channels=[32, 64, 128, 256], + out_channels=256)) +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/downstream/mmdetection/configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py b/downstream/mmdetection/configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py new file mode 100644 index 0000000..265e8d6 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py @@ -0,0 +1,12 @@ +_base_ = './cascade_rcnn_hrnetv2p_w32_20e_coco.py' +# model settings +model = dict( + backbone=dict( + type='HRNet', + extra=dict( + stage2=dict(num_channels=(40, 80)), + stage3=dict(num_channels=(40, 80, 160)), + stage4=dict(num_channels=(40, 80, 160, 320))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), + neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py b/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py new file mode 100644 index 0000000..1df2c3d --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' +# model settings +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py b/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py new file mode 100644 index 0000000..a4b987a --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py @@ -0,0 +1,5 @@ +_base_ = './faster_rcnn_hrnetv2p_w18_1x_coco.py' + +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py b/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py new file mode 100644 index 0000000..be05809 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py @@ -0,0 +1,37 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), + neck=dict( + _delete_=True, + type='HRFPN', + in_channels=[32, 64, 128, 256], + out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py b/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py new file mode 100644 index 0000000..63c8717 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py b/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py new file mode 100644 index 0000000..886a7c9 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' +model = dict( + backbone=dict( + type='HRNet', + extra=dict( + stage2=dict(num_channels=(40, 80)), + stage3=dict(num_channels=(40, 80, 160)), + stage4=dict(num_channels=(40, 80, 160, 320))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), + neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py b/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py new file mode 100644 index 0000000..585cc2c --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './faster_rcnn_hrnetv2p_w40_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py new file mode 100644 index 0000000..fd662bd --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py @@ -0,0 +1,10 @@ +_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py new file mode 100644 index 0000000..3497595 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py new file mode 100644 index 0000000..37bfdae --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py @@ -0,0 +1,10 @@ +_base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py' +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py new file mode 100644 index 0000000..10617f2 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py @@ -0,0 +1,70 @@ +_base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' +model = dict( + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), + neck=dict( + _delete_=True, + type='HRFPN', + in_channels=[32, 64, 128, 256], + out_channels=256, + stride=2, + num_outs=5)) +img_norm_cfg = dict( + mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py new file mode 100644 index 0000000..7b38130 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py new file mode 100644 index 0000000..482f887 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py @@ -0,0 +1,39 @@ +_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' +img_norm_cfg = dict( + mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py new file mode 100644 index 0000000..0ae9dbe --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py @@ -0,0 +1,11 @@ +_base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py' +model = dict( + backbone=dict( + type='HRNet', + extra=dict( + stage2=dict(num_channels=(40, 80)), + stage3=dict(num_channels=(40, 80, 160)), + stage4=dict(num_channels=(40, 80, 160, 320))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), + neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py b/downstream/mmdetection/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py new file mode 100644 index 0000000..3c2eb1d --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py @@ -0,0 +1,10 @@ +_base_ = './htc_hrnetv2p_w32_20e_coco.py' +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/htc_hrnetv2p_w32_20e_coco.py b/downstream/mmdetection/configs/hrnet/htc_hrnetv2p_w32_20e_coco.py new file mode 100644 index 0000000..545cb83 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/htc_hrnetv2p_w32_20e_coco.py @@ -0,0 +1,37 @@ +_base_ = '../htc/htc_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), + neck=dict( + _delete_=True, + type='HRFPN', + in_channels=[32, 64, 128, 256], + out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py b/downstream/mmdetection/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py new file mode 100644 index 0000000..94bff1b --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py @@ -0,0 +1,11 @@ +_base_ = './htc_hrnetv2p_w32_20e_coco.py' +model = dict( + backbone=dict( + type='HRNet', + extra=dict( + stage2=dict(num_channels=(40, 80)), + stage3=dict(num_channels=(40, 80, 160)), + stage4=dict(num_channels=(40, 80, 160, 320))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), + neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/htc_hrnetv2p_w40_28e_coco.py b/downstream/mmdetection/configs/hrnet/htc_hrnetv2p_w40_28e_coco.py new file mode 100644 index 0000000..7067e8b --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/htc_hrnetv2p_w40_28e_coco.py @@ -0,0 +1,4 @@ +_base_ = './htc_hrnetv2p_w40_20e_coco.py' +# learning policy +lr_config = dict(step=[24, 27]) +runner = dict(type='EpochBasedRunner', max_epochs=28) diff --git a/downstream/mmdetection/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py b/downstream/mmdetection/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py new file mode 100644 index 0000000..815f285 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py @@ -0,0 +1,4 @@ +_base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py' +# learning policy +lr_config = dict(step=[24, 27]) +runner = dict(type='EpochBasedRunner', max_epochs=28) diff --git a/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py b/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py new file mode 100644 index 0000000..cb12200 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py @@ -0,0 +1,10 @@ +_base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py' +model = dict( + backbone=dict( + extra=dict( + stage2=dict(num_channels=(18, 36)), + stage3=dict(num_channels=(18, 36, 72)), + stage4=dict(num_channels=(18, 36, 72, 144))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), + neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py b/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py new file mode 100644 index 0000000..ca62682 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py b/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py new file mode 100644 index 0000000..d5f0eb5 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py @@ -0,0 +1,37 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), + neck=dict( + _delete_=True, + type='HRFPN', + in_channels=[32, 64, 128, 256], + out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py b/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py new file mode 100644 index 0000000..63d5d13 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py b/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py new file mode 100644 index 0000000..5a76f4b --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py @@ -0,0 +1,11 @@ +_base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py' +model = dict( + backbone=dict( + type='HRNet', + extra=dict( + stage2=dict(num_channels=(40, 80)), + stage3=dict(num_channels=(40, 80, 160)), + stage4=dict(num_channels=(40, 80, 160, 320))), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), + neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py b/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py new file mode 100644 index 0000000..3a2a510 --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/hrnet/metafile.yml b/downstream/mmdetection/configs/hrnet/metafile.yml new file mode 100644 index 0000000..ac36efa --- /dev/null +++ b/downstream/mmdetection/configs/hrnet/metafile.yml @@ -0,0 +1,971 @@ +Models: + - Name: faster_rcnn_hrnetv2p_w18_1x_coco + In Collection: Faster R-CNN + Config: configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py + Metadata: + Training Memory (GB): 6.6 + inference time (ms/im): + - value: 74.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: faster_rcnn_hrnetv2p_w18_2x_coco + In Collection: Faster R-CNN + Config: configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py + Metadata: + Training Memory (GB): 6.6 + inference time (ms/im): + - value: 74.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731-a4ec0611.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: faster_rcnn_hrnetv2p_w32_1x_coco + In Collection: Faster R-CNN + Config: configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py + Metadata: + Training Memory (GB): 9.0 + inference time (ms/im): + - value: 80.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130-6e286425.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: faster_rcnn_hrnetv2p_w32_2x_coco + In Collection: Faster R-CNN + Config: configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py + Metadata: + Training Memory (GB): 9.0 + inference time (ms/im): + - value: 80.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927-976a9c15.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: faster_rcnn_hrnetv2p_w40_1x_coco + In Collection: Faster R-CNN + Config: configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py + Metadata: + Training Memory (GB): 10.4 + inference time (ms/im): + - value: 95.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210-95c1f5ce.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: faster_rcnn_hrnetv2p_w40_2x_coco + In Collection: Faster R-CNN + Config: configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py + Metadata: + Training Memory (GB): 10.4 + inference time (ms/im): + - value: 95.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033-0f236ef4.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: mask_rcnn_hrnetv2p_w18_1x_coco + In Collection: Mask R-CNN + Config: configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 85.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205-1c3d78ed.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: mask_rcnn_hrnetv2p_w18_2x_coco + In Collection: Mask R-CNN + Config: configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 85.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212-b3c825b1.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: mask_rcnn_hrnetv2p_w32_1x_coco + In Collection: Mask R-CNN + Config: configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py + Metadata: + Training Memory (GB): 9.4 + inference time (ms/im): + - value: 88.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207-b29f616e.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: mask_rcnn_hrnetv2p_w32_2x_coco + In Collection: Mask R-CNN + Config: configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py + Metadata: + Training Memory (GB): 9.4 + inference time (ms/im): + - value: 88.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213-45b75b4d.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: mask_rcnn_hrnetv2p_w40_1x_coco + In Collection: Mask R-CNN + Config: configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py + Metadata: + Training Memory (GB): 10.9 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646-66738b35.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: mask_rcnn_hrnetv2p_w40_2x_coco + In Collection: Mask R-CNN + Config: configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py + Metadata: + Training Memory (GB): 10.9 + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732-aed5e4ab.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: cascade_rcnn_hrnetv2p_w18_20e_coco + In Collection: Cascade R-CNN + Config: configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 90.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210-434be9d7.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: cascade_rcnn_hrnetv2p_w32_20e_coco + In Collection: Cascade R-CNN + Config: configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py + Metadata: + Training Memory (GB): 9.4 + inference time (ms/im): + - value: 90.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208-928455a4.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: cascade_rcnn_hrnetv2p_w40_20e_coco + In Collection: Cascade R-CNN + Config: configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py + Metadata: + Training Memory (GB): 10.8 + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112-75e47b04.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: cascade_mask_rcnn_hrnetv2p_w18_20e_coco + In Collection: Cascade R-CNN + Config: configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py + Metadata: + Training Memory (GB): 8.5 + inference time (ms/im): + - value: 117.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210-b543cd2b.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: cascade_mask_rcnn_hrnetv2p_w32_20e_coco + In Collection: Cascade R-CNN + Config: configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py + Metadata: + inference time (ms/im): + - value: 120.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043-39d9cf7b.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: cascade_mask_rcnn_hrnetv2p_w40_20e_coco + In Collection: Cascade R-CNN + Config: configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py + Metadata: + Training Memory (GB): 12.5 + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922-969c4610.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: htc_hrnetv2p_w18_20e_coco + In Collection: HTC + Config: configs/hrnet/htc_hrnetv2p_w18_20e_coco.py + Metadata: + Training Memory (GB): 10.8 + inference time (ms/im): + - value: 212.77 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210-b266988c.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: htc_hrnetv2p_w32_20e_coco + In Collection: HTC + Config: configs/hrnet/htc_hrnetv2p_w32_20e_coco.py + Metadata: + Training Memory (GB): 13.1 + inference time (ms/im): + - value: 204.08 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207-7639fa12.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: htc_hrnetv2p_w40_20e_coco + In Collection: HTC + Config: configs/hrnet/htc_hrnetv2p_w40_20e_coco.py + Metadata: + Training Memory (GB): 14.6 + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411-417c4d5b.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p_w18_gn-head_4x4_1x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 13.0 + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 35.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710-4ad151de.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p_w18_gn-head_4x4_2x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 13.0 + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110-5c575fa5.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p_w32_gn-head_4x4_1x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 17.5 + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730-cb8055c0.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p_w32_gn-head_4x4_2x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 17.5 + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133-77b6b9bb.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 13.0 + inference time (ms/im): + - value: 77.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651-441e9d9f.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 17.5 + inference time (ms/im): + - value: 80.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846-b6f2b49f.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 + + - Name: fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco + In Collection: FCOS + Config: configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py + Metadata: + Training Resources: 4x V100 GPUs + Batch Size: 16 + Training Memory (GB): 20.3 + inference time (ms/im): + - value: 92.59 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - HRNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752-f22d2ce5.pth + Paper: + URL: https://arxiv.org/abs/1904.04514 + Title: 'Deep High-Resolution Representation Learning for Visual Recognition' + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 + Version: v2.0.0 diff --git a/downstream/mmdetection/configs/htc/README.md b/downstream/mmdetection/configs/htc/README.md new file mode 100644 index 0000000..03a89aa --- /dev/null +++ b/downstream/mmdetection/configs/htc/README.md @@ -0,0 +1,67 @@ +# HTC + +> [Hybrid Task Cascade for Instance Segmentation](ttps://arxiv.org/abs/1901.07518) + + + +## Abstract + +Cascade is a classic yet powerful architecture that has boosted performance on various tasks. However, how to introduce cascade to instance segmentation remains an open question. A simple combination of Cascade R-CNN and Mask R-CNN only brings limited gain. In exploring a more effective approach, we find that the key to a successful instance segmentation cascade is to fully leverage the reciprocal relationship between detection and segmentation. In this work, we propose a new framework, Hybrid Task Cascade (HTC), which differs in two important aspects: (1) instead of performing cascaded refinement on these two tasks separately, it interweaves them for a joint multi-stage processing; (2) it adopts a fully convolutional branch to provide spatial context, which can help distinguishing hard foreground from cluttered background. Overall, this framework can learn more discriminative features progressively while integrating complementary features together in each stage. Without bells and whistles, a single HTC obtains 38.4 and 1.5 improvement over a strong Cascade Mask R-CNN baseline on MSCOCO dataset. Moreover, our overall system achieves 48.6 mask AP on the test-challenge split, ranking 1st in the COCO 2018 Challenge Object Detection Task. + +
    + +
    + +## Introduction + +HTC requires COCO and [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) dataset for training. You need to download and extract it in the COCO dataset path. +The directory should be like this. + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +| | ├── stuffthingmaps +``` + +## Results and Models + +The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val) + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 8.2 | 5.8 | 42.3 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317-7332cf16.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317_070435.log.json) | +| R-50-FPN | pytorch | 20e | 8.2 | - | 43.3 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_r50_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319-fe28c577.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319_070313.log.json) | +| R-101-FPN | pytorch | 20e | 10.2 | 5.5 | 44.8 | 39.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_r101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r101_fpn_20e_coco/htc_r101_fpn_20e_coco_20200317-9b41b48f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r101_fpn_20e_coco/htc_r101_fpn_20e_coco_20200317_153107.log.json) | +| X-101-32x4d-FPN | pytorch | 20e | 11.4 | 5.0 | 46.1 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_32x4d_fpn_16x1_20e_coco/htc_x101_32x4d_fpn_16x1_20e_coco_20200318-de97ae01.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_32x4d_fpn_16x1_20e_coco/htc_x101_32x4d_fpn_16x1_20e_coco_20200318_034519.log.json) | +| X-101-64x4d-FPN | pytorch | 20e | 14.5 | 4.4 | 47.0 | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_16x1_20e_coco/htc_x101_64x4d_fpn_16x1_20e_coco_20200318-b181fd7a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_16x1_20e_coco/htc_x101_64x4d_fpn_16x1_20e_coco_20200318_081711.log.json) | + +- In the HTC paper and COCO 2018 Challenge, `score_thr` is set to 0.001 for both baselines and HTC. +- We use 8 GPUs with 2 images/GPU for R-50 and R-101 models, and 16 GPUs with 1 image/GPU for X-101 models. + If you would like to train X-101 HTC with 8 GPUs, you need to change the lr from 0.02 to 0.01. + +We also provide a powerful HTC with DCN and multi-scale training model. No testing augmentation is used. + +| Backbone | Style | DCN | training scales | Lr schd | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :---: | :-------------: | :-----: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| X-101-64x4d-FPN | pytorch | c3-c5 | 400~1400 | 20e | 50.4 | 43.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312-946fd751.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312_203410.log.json) | + +## Citation + +We provide config files to reproduce the results in the CVPR 2019 paper for [Hybrid Task Cascade](https://arxiv.org/abs/1901.07518). + +```latex +@inproceedings{chen2019hybrid, + title={Hybrid task cascade for instance segmentation}, + author={Chen, Kai and Pang, Jiangmiao and Wang, Jiaqi and Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and Liu, Ziwei and Shi, Jianping and Ouyang, Wanli and Chen Change Loy and Dahua Lin}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/htc/htc_r101_fpn_20e_coco.py b/downstream/mmdetection/configs/htc/htc_r101_fpn_20e_coco.py new file mode 100644 index 0000000..b42297b --- /dev/null +++ b/downstream/mmdetection/configs/htc/htc_r101_fpn_20e_coco.py @@ -0,0 +1,9 @@ +_base_ = './htc_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/downstream/mmdetection/configs/htc/htc_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/htc/htc_r50_fpn_1x_coco.py new file mode 100644 index 0000000..1e8e18a --- /dev/null +++ b/downstream/mmdetection/configs/htc/htc_r50_fpn_1x_coco.py @@ -0,0 +1,56 @@ +_base_ = './htc_without_semantic_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + semantic_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[8]), + semantic_head=dict( + type='FusedSemanticHead', + num_ins=5, + fusion_level=1, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + loss_seg=dict( + type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2)))) +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='SegRescale', scale_factor=1 / 8), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict( + seg_prefix=data_root + 'stuffthingmaps/train2017/', + pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/htc/htc_r50_fpn_20e_coco.py b/downstream/mmdetection/configs/htc/htc_r50_fpn_20e_coco.py new file mode 100644 index 0000000..7d2e011 --- /dev/null +++ b/downstream/mmdetection/configs/htc/htc_r50_fpn_20e_coco.py @@ -0,0 +1,4 @@ +_base_ = './htc_r50_fpn_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/downstream/mmdetection/configs/htc/htc_without_semantic_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/htc/htc_without_semantic_r50_fpn_1x_coco.py new file mode 100644 index 0000000..565104f --- /dev/null +++ b/downstream/mmdetection/configs/htc/htc_without_semantic_r50_fpn_1x_coco.py @@ -0,0 +1,236 @@ +_base_ = [ + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + type='HybridTaskCascade', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='HybridTaskCascadeRoIHead', + interleaved=True, + mask_info_flow=True, + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=[ + dict( + type='HTCMaskHead', + with_conv_res=False, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), + dict( + type='HTCMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), + dict( + type='HTCMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)) + ]), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.001, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py b/downstream/mmdetection/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py new file mode 100644 index 0000000..0c834f2 --- /dev/null +++ b/downstream/mmdetection/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py @@ -0,0 +1,19 @@ +_base_ = './htc_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) +data = dict(samples_per_gpu=1, workers_per_gpu=1) +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/downstream/mmdetection/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py b/downstream/mmdetection/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py new file mode 100644 index 0000000..8b0d962 --- /dev/null +++ b/downstream/mmdetection/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py @@ -0,0 +1,19 @@ +_base_ = './htc_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) +data = dict(samples_per_gpu=1, workers_per_gpu=1) +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/downstream/mmdetection/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py b/downstream/mmdetection/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py new file mode 100644 index 0000000..c8d8703 --- /dev/null +++ b/downstream/mmdetection/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py @@ -0,0 +1,43 @@ +_base_ = './htc_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) +# dataset settings +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), + dict( + type='Resize', + img_scale=[(1600, 400), (1600, 1400)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='SegRescale', scale_factor=1 / 8), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), +] +data = dict( + samples_per_gpu=1, workers_per_gpu=1, train=dict(pipeline=train_pipeline)) +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/downstream/mmdetection/configs/htc/metafile.yml b/downstream/mmdetection/configs/htc/metafile.yml new file mode 100644 index 0000000..acd038c --- /dev/null +++ b/downstream/mmdetection/configs/htc/metafile.yml @@ -0,0 +1,165 @@ +Collections: + - Name: HTC + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - HTC + - RPN + - ResNet + - ResNeXt + - RoIAlign + Paper: + URL: https://arxiv.org/abs/1901.07518 + Title: 'Hybrid Task Cascade for Instance Segmentation' + README: configs/htc/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/htc.py#L6 + Version: v2.0.0 + +Models: + - Name: htc_r50_fpn_1x_coco + In Collection: HTC + Config: configs/htc/htc_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.2 + inference time (ms/im): + - value: 172.41 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317-7332cf16.pth + + - Name: htc_r50_fpn_20e_coco + In Collection: HTC + Config: configs/htc/htc_r50_fpn_20e_coco.py + Metadata: + Training Memory (GB): 8.2 + inference time (ms/im): + - value: 172.41 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319-fe28c577.pth + + - Name: htc_r101_fpn_20e_coco + In Collection: HTC + Config: configs/htc/htc_r101_fpn_20e_coco.py + Metadata: + Training Memory (GB): 10.2 + inference time (ms/im): + - value: 181.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r101_fpn_20e_coco/htc_r101_fpn_20e_coco_20200317-9b41b48f.pth + + - Name: htc_x101_32x4d_fpn_16x1_20e_coco + In Collection: HTC + Config: configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py + Metadata: + Training Resources: 16x V100 GPUs + Batch Size: 16 + Training Memory (GB): 11.4 + inference time (ms/im): + - value: 200 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_32x4d_fpn_16x1_20e_coco/htc_x101_32x4d_fpn_16x1_20e_coco_20200318-de97ae01.pth + + - Name: htc_x101_64x4d_fpn_16x1_20e_coco + In Collection: HTC + Config: configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py + Metadata: + Training Resources: 16x V100 GPUs + Batch Size: 16 + Training Memory (GB): 14.5 + inference time (ms/im): + - value: 227.27 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_16x1_20e_coco/htc_x101_64x4d_fpn_16x1_20e_coco_20200318-b181fd7a.pth + + - Name: htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco + In Collection: HTC + Config: configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py + Metadata: + Training Resources: 16x V100 GPUs + Batch Size: 16 + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 43.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312-946fd751.pth diff --git a/downstream/mmdetection/configs/instaboost/README.md b/downstream/mmdetection/configs/instaboost/README.md new file mode 100644 index 0000000..82ed334 --- /dev/null +++ b/downstream/mmdetection/configs/instaboost/README.md @@ -0,0 +1,58 @@ +# Instaboost + +> [Instaboost: Boosting instance segmentation via probability map guided copy-pasting](https://arxiv.org/abs/1908.07801) + + + +## Abstract + +Instance segmentation requires a large number of training samples to achieve satisfactory performance and benefits from proper data augmentation. To enlarge the training set and increase the diversity, previous methods have investigated using data annotation from other domain (e.g. bbox, point) in a weakly supervised mechanism. In this paper, we present a simple, efficient and effective method to augment the training set using the existing instance mask annotations. Exploiting the pixel redundancy of the background, we are able to improve the performance of Mask R-CNN for 1.7 mAP on COCO dataset and 3.3 mAP on Pascal VOC dataset by simply introducing random jittering to objects. Furthermore, we propose a location probability map based approach to explore the feasible locations that objects can be placed based on local appearance similarity. With the guidance of such map, we boost the performance of R101-Mask R-CNN on instance segmentation from 35.7 mAP to 37.9 mAP without modifying the backbone or network structure. Our method is simple to implement and does not increase the computational complexity. It can be integrated into the training pipeline of any instance segmentation model without affecting the training and inference efficiency. + +
    + +
    + +## Introduction + +Configs in this directory is the implementation for ICCV2019 paper "InstaBoost: Boosting Instance Segmentation Via Probability Map Guided Copy-Pasting" and provided by the authors of the paper. InstaBoost is a data augmentation method for object detection and instance segmentation. The paper has been released on [`arXiv`](https://arxiv.org/abs/1908.07801). + +## Usage + +### Requirements + +You need to install `instaboostfast` before using it. + +```shell +pip install instaboostfast +``` + +The code and more details can be found [here](https://github.com/GothicAi/Instaboost). + +### Integration with MMDetection + +InstaBoost have been already integrated in the data pipeline, thus all you need is to add or change **InstaBoost** configurations after **LoadImageFromFile**. We have provided examples like [this](mask_rcnn_r50_fpn_instaboost_4x#L121). You can refer to [`InstaBoostConfig`](https://github.com/GothicAi/InstaBoost-pypi#instaboostconfig) for more details. + +## Results and Models + +- All models were trained on `coco_2017_train` and tested on `coco_2017_val` for convenience of evaluation and comparison. In the paper, the results are obtained from `test-dev`. +- To balance accuracy and training time when using InstaBoost, models released in this page are all trained for 48 Epochs. Other training and testing configs strictly follow the original framework. +- For results and models in MMDetection V1.x, please refer to [Instaboost](https://github.com/GothicAi/Instaboost). + +| Network | Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-----------: | :-------------: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Mask R-CNN | R-50-FPN | 4x | 4.4 | 17.5 | 40.6 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307_223635.log.json) | +| Mask R-CNN | R-101-FPN | 4x | 6.4 | | 42.5 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco/mask_rcnn_r101_fpn_instaboost_4x_coco_20200703_235738-f23f3a5f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco/mask_rcnn_r101_fpn_instaboost_4x_coco_20200703_235738.log.json) | +| Mask R-CNN | X-101-64x4d-FPN | 4x | 10.7 | | 44.7 | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco_20200515_080947-8ed58c1b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco_20200515_080947.log.json) | +| Cascade R-CNN | R-101-FPN | 4x | 6.0 | 12.0 | 43.7 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-c19d98d9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco_20200307_223646.log.json) | + +## Citation + +```latex +@inproceedings{fang2019instaboost, + title={Instaboost: Boosting instance segmentation via probability map guided copy-pasting}, + author={Fang, Hao-Shu and Sun, Jianhua and Wang, Runzhong and Gou, Minghao and Li, Yong-Lu and Lu, Cewu}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + pages={682--691}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py b/downstream/mmdetection/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py new file mode 100644 index 0000000..9d0515d --- /dev/null +++ b/downstream/mmdetection/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py b/downstream/mmdetection/configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py new file mode 100644 index 0000000..a89a81f --- /dev/null +++ b/downstream/mmdetection/configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py @@ -0,0 +1,28 @@ +_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='InstaBoost', + action_candidate=('normal', 'horizontal', 'skip'), + action_prob=(1, 0, 0), + scale=(0.8, 1.2), + dx=15, + dy=15, + theta=(-1, 1), + color_prob=0.5, + hflag=False, + aug_ratio=0.5), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict(train=dict(pipeline=train_pipeline)) +# learning policy +lr_config = dict(step=[32, 44]) +runner = dict(type='EpochBasedRunner', max_epochs=48) diff --git a/downstream/mmdetection/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py b/downstream/mmdetection/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py new file mode 100644 index 0000000..d67b799 --- /dev/null +++ b/downstream/mmdetection/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py @@ -0,0 +1,14 @@ +_base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py b/downstream/mmdetection/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py new file mode 100644 index 0000000..ebbb43e --- /dev/null +++ b/downstream/mmdetection/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py @@ -0,0 +1,6 @@ +_base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py b/downstream/mmdetection/configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py new file mode 100644 index 0000000..55ca62b --- /dev/null +++ b/downstream/mmdetection/configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py @@ -0,0 +1,28 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='InstaBoost', + action_candidate=('normal', 'horizontal', 'skip'), + action_prob=(1, 0, 0), + scale=(0.8, 1.2), + dx=15, + dy=15, + theta=(-1, 1), + color_prob=0.5, + hflag=False, + aug_ratio=0.5), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict(train=dict(pipeline=train_pipeline)) +# learning policy +lr_config = dict(step=[32, 44]) +runner = dict(type='EpochBasedRunner', max_epochs=48) diff --git a/downstream/mmdetection/configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py b/downstream/mmdetection/configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py new file mode 100644 index 0000000..2010f44 --- /dev/null +++ b/downstream/mmdetection/configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py @@ -0,0 +1,14 @@ +_base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/instaboost/metafile.yml b/downstream/mmdetection/configs/instaboost/metafile.yml new file mode 100644 index 0000000..325283d --- /dev/null +++ b/downstream/mmdetection/configs/instaboost/metafile.yml @@ -0,0 +1,99 @@ +Collections: + - Name: InstaBoost + Metadata: + Training Data: COCO + Training Techniques: + - InstaBoost + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Paper: + URL: https://arxiv.org/abs/1908.07801 + Title: 'Instaboost: Boosting instance segmentation via probability map guided copy-pasting' + README: configs/instaboost/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/datasets/pipelines/instaboost.py#L7 + Version: v2.0.0 + +Models: + - Name: mask_rcnn_r50_fpn_instaboost_4x_coco + In Collection: InstaBoost + Config: configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py + Metadata: + Training Memory (GB): 4.4 + inference time (ms/im): + - value: 57.14 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 48 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth + + - Name: mask_rcnn_r101_fpn_instaboost_4x_coco + In Collection: InstaBoost + Config: configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py + Metadata: + Training Memory (GB): 6.4 + Epochs: 48 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco/mask_rcnn_r101_fpn_instaboost_4x_coco_20200703_235738-f23f3a5f.pth + + - Name: mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco + In Collection: InstaBoost + Config: configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py + Metadata: + Training Memory (GB): 10.7 + Epochs: 48 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco_20200515_080947-8ed58c1b.pth + + - Name: cascade_mask_rcnn_r50_fpn_instaboost_4x_coco + In Collection: InstaBoost + Config: configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py + Metadata: + Training Memory (GB): 6.0 + inference time (ms/im): + - value: 83.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 48 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-c19d98d9.pth diff --git a/downstream/mmdetection/configs/lad/README.md b/downstream/mmdetection/configs/lad/README.md new file mode 100644 index 0000000..6cad7d9 --- /dev/null +++ b/downstream/mmdetection/configs/lad/README.md @@ -0,0 +1,44 @@ +# LAD + +> [Improving Object Detection by Label Assignment Distillation](https://arxiv.org/abs/2108.10520) + + + +## Abstract + +Label assignment in object detection aims to assign targets, foreground or background, to sampled regions in an image. Unlike labeling for image classification, this problem is not well defined due to the object's bounding box. In this paper, we investigate the problem from a perspective of distillation, hence we call Label Assignment Distillation (LAD). Our initial motivation is very simple, we use a teacher network to generate labels for the student. This can be achieved in two ways: either using the teacher's prediction as the direct targets (soft label), or through the hard labels dynamically assigned by the teacher (LAD). Our experiments reveal that: (i) LAD is more effective than soft-label, but they are complementary. (ii) Using LAD, a smaller teacher can also improve a larger student significantly, while soft-label can't. We then introduce Co-learning LAD, in which two networks simultaneously learn from scratch and the role of teacher and student are dynamically interchanged. Using PAA-ResNet50 as a teacher, our LAD techniques can improve detectors PAA-ResNet101 and PAA-ResNeXt101 to 46AP and 47.5AP on the COCO test-dev set. With a stronger teacher PAA-SwinB, we improve the students PAA-ResNet50 to 43.7AP by only 1x schedule training and standard setting, and PAA-ResNet101 to 47.9AP, significantly surpassing the current methods. + +
    + +
    + +## Results and Models + +We provide config files to reproduce the object detection results in the +WACV 2022 paper for Improving Object Detection by Label Assignment +Distillation. + +### PAA with LAD + +| Teacher | Student | Training schedule | AP (val) | Config | +| :-----: | :-----: | :---------------: | :------: | :---------------------------------------------------: | +| -- | R-50 | 1x | 40.4 | | +| -- | R-101 | 1x | 42.6 | | +| R-101 | R-50 | 1x | 41.6 | [config](configs/lad/lad_r50_paa_r101_fpn_coco_1x.py) | +| R-50 | R-101 | 1x | 43.2 | [config](configs/lad/lad_r101_paa_r50_fpn_coco_1x.py) | + +## Note + +- Meaning of Config name: lad_r50(student model)\_paa(based on paa)\_r101(teacher model)\_fpn(neck)\_coco(dataset)\_1x(12 epoch).py +- Results may fluctuate by about 0.2 mAP. + +## Citation + +```latex +@inproceedings{nguyen2021improving, + title={Improving Object Detection by Label Assignment Distillation}, + author={Chuong H. Nguyen and Thuy C. Nguyen and Tuan N. Tang and Nam L. H. Phan}, + booktitle = {WACV}, + year={2022} +} +``` diff --git a/downstream/mmdetection/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py b/downstream/mmdetection/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py new file mode 100644 index 0000000..4877d95 --- /dev/null +++ b/downstream/mmdetection/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py @@ -0,0 +1,126 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa +model = dict( + type='LAD', + # student + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='LADHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # teacher + teacher_ckpt=teacher_ckpt, + teacher_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + teacher_neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + teacher_bbox_head=dict( + type='LADHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.1, + neg_iou_thr=0.1, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + score_voting=True, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +data = dict(samples_per_gpu=8, workers_per_gpu=4) +optimizer = dict(lr=0.01) +fp16 = dict(loss_scale=512.) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/lad/lad_r50_paa_r101_fpn_coco_1x.py b/downstream/mmdetection/configs/lad/lad_r50_paa_r101_fpn_coco_1x.py new file mode 100644 index 0000000..29bbe69 --- /dev/null +++ b/downstream/mmdetection/configs/lad/lad_r50_paa_r101_fpn_coco_1x.py @@ -0,0 +1,125 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa +model = dict( + type='LAD', + # student + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='LADHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # teacher + teacher_ckpt=teacher_ckpt, + teacher_backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + teacher_neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + teacher_bbox_head=dict( + type='LADHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.1, + neg_iou_thr=0.1, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + score_voting=True, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +data = dict(samples_per_gpu=8, workers_per_gpu=4) +optimizer = dict(lr=0.01) +fp16 = dict(loss_scale=512.) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/lad/metafile.yml b/downstream/mmdetection/configs/lad/metafile.yml new file mode 100644 index 0000000..5076f28 --- /dev/null +++ b/downstream/mmdetection/configs/lad/metafile.yml @@ -0,0 +1,42 @@ +Collections: + - Name: Label Assignment Distillation + Metadata: + Training Data: COCO + Training Techniques: + - Label Assignment Distillation + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/2108.10520 + Title: 'Improving Object Detection by Label Assignment Distillation' + README: configs/lad/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.19.0/mmdet/models/detectors/lad.py#L10 + Version: v2.19.0 + +Models: + - Name: lad_r50_paa_r101_fpn_coco_1x + In Collection: Label Assignment Distillation + Config: configs/lad/lad_r50_paa_r101_fpn_coco_1x.py + Metadata: + Teacher: R-101 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + + - Name: lad_r101_paa_r50_fpn_coco_1x + In Collection: Label Assignment Distillation + Config: configs/lad/lad_r101_paa_r50_fpn_coco_1x.py + Metadata: + Teacher: R-50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.2 diff --git a/downstream/mmdetection/configs/ld/README.md b/downstream/mmdetection/configs/ld/README.md new file mode 100644 index 0000000..6556542 --- /dev/null +++ b/downstream/mmdetection/configs/ld/README.md @@ -0,0 +1,43 @@ +# LD + +> [Localization Distillation for Dense Object Detection](https://arxiv.org/abs/2102.12252) + + + +## Abstract + +Knowledge distillation (KD) has witnessed its powerful capability in learning compact models in object detection. Previous KD methods for object detection mostly focus on imitating deep features within the imitation regions instead of mimicking classification logits due to its inefficiency in distilling localization information. In this paper, by reformulating the knowledge distillation process on localization, we present a novel localization distillation (LD) method which can efficiently transfer the localization knowledge from the teacher to the student. Moreover, we also heuristically introduce the concept of valuable localization region that can aid to selectively distill the semantic and localization knowledge for a certain region. Combining these two new components, for the first time, we show that logit mimicking can outperform feature imitation and localization knowledge distillation is more important and efficient than semantic knowledge for distilling object detectors. Our distillation scheme is simple as well as effective and can be easily applied to different dense object detectors. Experiments show that our LD can boost the AP score of GFocal-ResNet-50 with a single-scale 1× training schedule from 40.1 to 42.1 on the COCO benchmark without any sacrifice on the inference speed. + +
    + +
    + +## Results and Models + +### GFocalV1 with LD + +| Teacher | Student | Training schedule | Mini-batch size | AP (val) | AP50 (val) | AP75 (val) | Config | +| :-------: | :-----: | :---------------: | :-------------: | :------: | :--------: | :--------: | :-------------------------------------------------------------------------------------------------------------: | +| -- | R-18 | 1x | 6 | 35.8 | 53.1 | 38.2 | | +| R-101 | R-18 | 1x | 6 | 36.5 | 52.9 | 39.3 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py) | +| -- | R-34 | 1x | 6 | 38.9 | 56.6 | 42.2 | | +| R-101 | R-34 | 1x | 6 | 39.8 | 56.6 | 43.1 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ld/ld_r34_gflv1_r101_fpn_coco_1x.py) | +| -- | R-50 | 1x | 6 | 40.1 | 58.2 | 43.1 | | +| R-101 | R-50 | 1x | 6 | 41.1 | 58.7 | 44.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py) | +| -- | R-101 | 2x | 6 | 44.6 | 62.9 | 48.4 | | +| R-101-DCN | R-101 | 2x | 6 | 45.4 | 63.1 | 49.5 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_1x.py) | + +## Note + +- Meaning of Config name: ld_r18(student model)\_gflv1(based on gflv1)\_r101(teacher model)\_fpn(neck)\_coco(dataset)\_1x(12 epoch).py + +## Citation + +```latex +@Inproceedings{zheng2022LD, + title={Localization Distillation for Dense Object Detection}, + author= {Zheng, Zhaohui and Ye, Rongguang and Wang, Ping and Ren, Dongwei and Zuo, Wangmeng and Hou, Qibin and Cheng, Mingming}, + booktitle={CVPR}, + year={2022} +} +``` diff --git a/downstream/mmdetection/configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x.py b/downstream/mmdetection/configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x.py new file mode 100644 index 0000000..1cbdb4c --- /dev/null +++ b/downstream/mmdetection/configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x.py @@ -0,0 +1,44 @@ +_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py'] +teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa +model = dict( + teacher_config='configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py', + teacher_ckpt=teacher_ckpt, + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5)) + +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) +# multi-scale training +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 480), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict(train=dict(pipeline=train_pipeline)) diff --git a/downstream/mmdetection/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py b/downstream/mmdetection/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py new file mode 100644 index 0000000..18dce81 --- /dev/null +++ b/downstream/mmdetection/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py @@ -0,0 +1,62 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth' # noqa +model = dict( + type='KnowledgeDistillationSingleStageDetector', + teacher_config='configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py', + teacher_ckpt=teacher_ckpt, + backbone=dict( + type='ResNet', + depth=18, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict( + type='FPN', + in_channels=[64, 128, 256, 512], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='LDHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + loss_cls=dict( + type='QualityFocalLoss', + use_sigmoid=True, + beta=2.0, + loss_weight=1.0), + loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), + loss_ld=dict( + type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10), + reg_max=16, + loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/ld/ld_r34_gflv1_r101_fpn_coco_1x.py b/downstream/mmdetection/configs/ld/ld_r34_gflv1_r101_fpn_coco_1x.py new file mode 100644 index 0000000..3b6996d --- /dev/null +++ b/downstream/mmdetection/configs/ld/ld_r34_gflv1_r101_fpn_coco_1x.py @@ -0,0 +1,19 @@ +_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py'] +model = dict( + backbone=dict( + type='ResNet', + depth=34, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet34')), + neck=dict( + type='FPN', + in_channels=[64, 128, 256, 512], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5)) diff --git a/downstream/mmdetection/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py b/downstream/mmdetection/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py new file mode 100644 index 0000000..2b18785 --- /dev/null +++ b/downstream/mmdetection/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py @@ -0,0 +1,19 @@ +_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py'] +model = dict( + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5)) diff --git a/downstream/mmdetection/configs/ld/metafile.yml b/downstream/mmdetection/configs/ld/metafile.yml new file mode 100644 index 0000000..d555a6d --- /dev/null +++ b/downstream/mmdetection/configs/ld/metafile.yml @@ -0,0 +1,72 @@ +Collections: + - Name: Localization Distillation + Metadata: + Training Data: COCO + Training Techniques: + - Localization Distillation + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/2102.12252 + Title: 'Localization Distillation for Dense Object Detection' + README: configs/ld/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.11.0/mmdet/models/dense_heads/ld_head.py#L11 + Version: v2.11.0 + +Models: + - Name: ld_r18_gflv1_r101_fpn_coco_1x + In Collection: Localization Distillation + Config: configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py + Metadata: + Teacher: R-101 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.5 + box AP@0.5: 52.9 + box AP@0.75: 39.3 + + - Name: ld_r34_gflv1_r101_fpn_coco_1x + In Collection: Localization Distillation + Config: configs/ld/ld_r34_gflv1_r101_fpn_coco_1x.py + Metadata: + Teacher: R-101 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.8 + box AP@0.5: 56.6 + box AP@0.75: 43.1 + + - Name: ld_r50_gflv1_r101_fpn_coco_1x + In Collection: Localization Distillation + Config: configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py + Metadata: + Teacher: R-101 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.1 + box AP@0.5: 58.7 + box AP@0.75: 44.9 + + - Name: ld_r101_gflv1_r101dcn_fpn_coco_1x + In Collection: Localization Distillation + Config: configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_1x.py + Metadata: + Teacher: R-101-DCN + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.4 + box AP@0.5: 63.1 + box AP@0.75: 49.5 diff --git a/downstream/mmdetection/configs/legacy_1.x/README.md b/downstream/mmdetection/configs/legacy_1.x/README.md new file mode 100644 index 0000000..c48477f --- /dev/null +++ b/downstream/mmdetection/configs/legacy_1.x/README.md @@ -0,0 +1,54 @@ +# Legacy Configs in MMDetection V1.x + + + +Configs in this directory implement the legacy configs used by MMDetection V1.x and its model zoos. + +To help users convert their models from V1.x to MMDetection V2.0, we provide v1.x configs to inference the converted v1.x models. +Due to the BC-breaking changes in MMDetection V2.0 from MMDetection V1.x, running inference with the same model weights in these two version will produce different results. The difference will cause within 1% AP absolute difference as can be found in the following table. + +## Usage + +To upgrade the model version, the users need to do the following steps. + +### 1. Convert model weights + +There are three main difference in the model weights between V1.x and V2.0 codebases. + +1. Since the class order in all the detector's classification branch is reordered, all the legacy model weights need to go through the conversion process. +2. The regression and segmentation head no longer contain the background channel. Weights in these background channels should be removed to fix in the current codebase. +3. For two-stage detectors, their wegihts need to be upgraded since MMDetection V2.0 refactors all the two-stage detectors with `RoIHead`. + +The users can do the same modification as mentioned above for the self-implemented +detectors. We provide a scripts `tools/model_converters/upgrade_model_version.py` to convert the model weights in the V1.x model zoo. + +```bash +python tools/model_converters/upgrade_model_version.py ${OLD_MODEL_PATH} ${NEW_MODEL_PATH} --num-classes ${NUM_CLASSES} + +``` + +- OLD_MODEL_PATH: the path to load the model weights in 1.x version. +- NEW_MODEL_PATH: the path to save the converted model weights in 2.0 version. +- NUM_CLASSES: number of classes of the original model weights. Usually it is 81 for COCO dataset, 21 for VOC dataset. + The number of classes in V2.0 models should be equal to that in V1.x models - 1. + +### 2. Use configs with legacy settings + +After converting the model weights, checkout to the v1.2 release to find the corresponding config file that uses the legacy settings. +The V1.x models usually need these three legacy modules: `LegacyAnchorGenerator`, `LegacyDeltaXYWHBBoxCoder`, and `RoIAlign(align=False)`. +For models using ResNet Caffe backbones, they also need to change the pretrain name and the corresponding `img_norm_cfg`. +An example is in [`retinanet_r50_caffe_fpn_1x_coco_v1.py`](retinanet_r50_caffe_fpn_1x_coco_v1.py) +Then use the config to test the model weights. For most models, the obtained results should be close to that in V1.x. +We provide configs of some common structures in this directory. + +## Performance + +The performance change after converting the models in this directory are listed as the following. + +| Method | Style | Lr schd | V1.x box AP | V1.x mask AP | V2.0 box AP | V2.0 mask AP | Config | Download | +| :-------------------------: | :-----: | :-----: | :---------: | :----------: | :---------: | :----------: | :------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------: | +| Mask R-CNN R-50-FPN | pytorch | 1x | 37.3 | 34.2 | 36.8 | 33.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/legacy_1.x/mask_rcnn_r50_fpn_1x_coco_v1.py) | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth) | +| RetinaNet R-50-FPN | caffe | 1x | 35.8 | - | 35.4 | - | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/legacy_1.x/retinanet_r50_caffe_1x_coco_v1.py) | | +| RetinaNet R-50-FPN | pytorch | 1x | 35.6 | - | 35.2 | - | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py) | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/retinanet_r50_fpn_1x_20181125-7b0c2548.pth) | +| Cascade Mask R-CNN R-50-FPN | pytorch | 1x | 41.2 | 35.7 | 40.8 | 35.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/legacy_1.x/cascade_mask_rcnn_r50_fpn_1x_coco_v1.py) | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_mask_rcnn_r50_fpn_1x_20181123-88b170c9.pth) | +| SSD300-VGG16 | caffe | 120e | 25.7 | - | 25.4 | - | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/legacy_1.x/ssd300_coco_v1.py) | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ssd300_coco_vgg16_caffe_120e_20181221-84d7110b.pth) | diff --git a/downstream/mmdetection/configs/legacy_1.x/cascade_mask_rcnn_r50_fpn_1x_coco_v1.py b/downstream/mmdetection/configs/legacy_1.x/cascade_mask_rcnn_r50_fpn_1x_coco_v1.py new file mode 100644 index 0000000..fc9d004 --- /dev/null +++ b/downstream/mmdetection/configs/legacy_1.x/cascade_mask_rcnn_r50_fpn_1x_coco_v1.py @@ -0,0 +1,79 @@ +_base_ = [ + '../_base_/models/cascade_mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='CascadeRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + anchor_generator=dict(type='LegacyAnchorGenerator', center_offset=0.5), + bbox_coder=dict( + type='LegacyDeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0])), + roi_head=dict( + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', + output_size=7, + sampling_ratio=2, + aligned=False)), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + reg_class_agnostic=True, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='LegacyDeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2])), + dict( + type='Shared2FCBBoxHead', + reg_class_agnostic=True, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='LegacyDeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1])), + dict( + type='Shared2FCBBoxHead', + reg_class_agnostic=True, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='LegacyDeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067])), + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', + output_size=14, + sampling_ratio=2, + aligned=False)))) +dist_params = dict(backend='nccl', port=29515) diff --git a/downstream/mmdetection/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py b/downstream/mmdetection/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py new file mode 100644 index 0000000..8c573be --- /dev/null +++ b/downstream/mmdetection/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + type='FasterRCNN', + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + rpn_head=dict( + type='RPNHead', + anchor_generator=dict( + type='LegacyAnchorGenerator', + center_offset=0.5, + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', + output_size=7, + sampling_ratio=2, + aligned=False), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn_proposal=dict(max_per_img=2000), + rcnn=dict(assigner=dict(match_low_quality=True)))) diff --git a/downstream/mmdetection/configs/legacy_1.x/mask_rcnn_r50_fpn_1x_coco_v1.py b/downstream/mmdetection/configs/legacy_1.x/mask_rcnn_r50_fpn_1x_coco_v1.py new file mode 100644 index 0000000..04581bb --- /dev/null +++ b/downstream/mmdetection/configs/legacy_1.x/mask_rcnn_r50_fpn_1x_coco_v1.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + rpn_head=dict( + anchor_generator=dict(type='LegacyAnchorGenerator', center_offset=0.5), + bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', + output_size=7, + sampling_ratio=2, + aligned=False)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', + output_size=14, + sampling_ratio=2, + aligned=False)), + bbox_head=dict( + bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + + # model training and testing settings + train_cfg=dict( + rpn_proposal=dict(max_per_img=2000), + rcnn=dict(assigner=dict(match_low_quality=True)))) diff --git a/downstream/mmdetection/configs/legacy_1.x/retinanet_r50_caffe_fpn_1x_coco_v1.py b/downstream/mmdetection/configs/legacy_1.x/retinanet_r50_caffe_fpn_1x_coco_v1.py new file mode 100644 index 0000000..a63d248 --- /dev/null +++ b/downstream/mmdetection/configs/legacy_1.x/retinanet_r50_caffe_fpn_1x_coco_v1.py @@ -0,0 +1,41 @@ +_base_ = './retinanet_r50_fpn_1x_coco_v1.py' +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet50_caffe'))) +# use caffe img_norm +img_norm_cfg = dict( + mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py b/downstream/mmdetection/configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py new file mode 100644 index 0000000..6198b97 --- /dev/null +++ b/downstream/mmdetection/configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + bbox_head=dict( + type='RetinaHead', + anchor_generator=dict( + type='LegacyAnchorGenerator', + center_offset=0.5, + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), + loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0))) diff --git a/downstream/mmdetection/configs/legacy_1.x/ssd300_coco_v1.py b/downstream/mmdetection/configs/legacy_1.x/ssd300_coco_v1.py new file mode 100644 index 0000000..65ccc1e --- /dev/null +++ b/downstream/mmdetection/configs/legacy_1.x/ssd300_coco_v1.py @@ -0,0 +1,84 @@ +_base_ = [ + '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +# model settings +input_size = 300 +model = dict( + bbox_head=dict( + type='SSDHead', + anchor_generator=dict( + type='LegacySSDAnchorGenerator', + scale_major=False, + input_size=input_size, + basesize_ratio_range=(0.15, 0.9), + strides=[8, 16, 32, 64, 100, 300], + ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), + bbox_coder=dict( + type='LegacyDeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]))) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(300, 300), keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(300, 300), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=3, + train=dict( + _delete_=True, + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) +optimizer_config = dict(_delete_=True) +dist_params = dict(backend='nccl', port=29555) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/libra_rcnn/README.md b/downstream/mmdetection/configs/libra_rcnn/README.md new file mode 100644 index 0000000..87a128a --- /dev/null +++ b/downstream/mmdetection/configs/libra_rcnn/README.md @@ -0,0 +1,53 @@ +# Libra R-CNN + +> [Libra R-CNN: Towards Balanced Learning for Object Detection](https://arxiv.org/abs/1904.02701) + + + +## Abstract + +Compared with model architectures, the training process, which is also crucial to the success of detectors, has received relatively less attention in object detection. In this work, we carefully revisit the standard training practice of detectors, and find that the detection performance is often limited by the imbalance during the training process, which generally consists in three levels - sample level, feature level, and objective level. To mitigate the adverse effects caused thereby, we propose Libra R-CNN, a simple but effective framework towards balanced learning for object detection. It integrates three novel components: IoU-balanced sampling, balanced feature pyramid, and balanced L1 loss, respectively for reducing the imbalance at sample, feature, and objective level. Benefitted from the overall balanced design, Libra R-CNN significantly improves the detection performance. Without bells and whistles, it achieves 2.5 points and 2.0 points higher Average Precision (AP) than FPN Faster R-CNN and RetinaNet respectively on MSCOCO. + +Instance recognition is rapidly advanced along with the developments of various deep convolutional neural networks. Compared to the architectures of networks, the training process, which is also crucial to the success of detectors, has received relatively less attention. In this work, we carefully revisit the standard training practice of detectors, and find that the detection performance is often limited by the imbalance during the training process, which generally consists in three levels - sample level, feature level, and objective level. To mitigate the adverse effects caused thereby, we propose Libra R-CNN, a simple yet effective framework towards balanced learning for instance recognition. It integrates IoU-balanced sampling, balanced feature pyramid, and objective re-weighting, respectively for reducing the imbalance at sample, feature, and objective level. Extensive experiments conducted on MS COCO, LVIS and Pascal VOC datasets prove the effectiveness of the overall balanced design. + +
    + +
    + +## Results and Models + +The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val) + +| Architecture | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :----------: | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN | R-50-FPN | pytorch | 1x | 4.6 | 19.0 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | +| Fast R-CNN | R-50-FPN | pytorch | 1x | | | | | | +| Faster R-CNN | R-101-FPN | pytorch | 1x | 6.5 | 14.4 | 40.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco/libra_faster_rcnn_r101_fpn_1x_coco_20200203-8dba6a5a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco/libra_faster_rcnn_r101_fpn_1x_coco_20200203_001405.log.json) | +| Faster R-CNN | X-101-64x4d-FPN | pytorch | 1x | 10.8 | 8.5 | 42.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco/libra_faster_rcnn_x101_64x4d_fpn_1x_coco_20200315-3a7d0488.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco/libra_faster_rcnn_x101_64x4d_fpn_1x_coco_20200315_231625.log.json) | +| RetinaNet | R-50-FPN | pytorch | 1x | 4.2 | 17.7 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_retinanet_r50_fpn_1x_coco/libra_retinanet_r50_fpn_1x_coco_20200205-804d94ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_retinanet_r50_fpn_1x_coco/libra_retinanet_r50_fpn_1x_coco_20200205_112757.log.json) | + +## Citation + +We provide config files to reproduce the results in the CVPR 2019 paper [Libra R-CNN](https://arxiv.org/pdf/1904.02701.pdf). + +The extended version of [Libra R-CNN](https://arxiv.org/pdf/2108.10175.pdf) is accpeted by IJCV. + +```latex +@inproceedings{pang2019libra, + title={Libra R-CNN: Towards Balanced Learning for Object Detection}, + author={Pang, Jiangmiao and Chen, Kai and Shi, Jianping and Feng, Huajun and Ouyang, Wanli and Dahua Lin}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + year={2019} +} + +@article{pang2021towards, + title={Towards Balanced Learning for Instance Recognition}, + author={Pang, Jiangmiao and Chen, Kai and Li, Qi and Xu, Zhihai and Feng, Huajun and Shi, Jianping and Ouyang, Wanli and Lin, Dahua}, + journal={International Journal of Computer Vision}, + volume={129}, + number={5}, + pages={1376--1393}, + year={2021}, + publisher={Springer} +} +``` diff --git a/downstream/mmdetection/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..efbedc8 --- /dev/null +++ b/downstream/mmdetection/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,50 @@ +_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' +# model settings +model = dict( + neck=[ + dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + dict( + type='BFP', + in_channels=256, + num_levels=5, + refine_level=2, + refine_type='non_local') + ], + roi_head=dict( + bbox_head=dict( + loss_bbox=dict( + _delete_=True, + type='BalancedL1Loss', + alpha=0.5, + gamma=1.5, + beta=1.0, + loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rcnn=dict( + sampler=dict( + _delete_=True, + type='CombinedSampler', + num=512, + pos_fraction=0.25, + add_gt_as_proposals=True, + pos_sampler=dict(type='InstanceBalancedPosSampler'), + neg_sampler=dict( + type='IoUBalancedNegSampler', + floor_thr=-1, + floor_fraction=0, + num_bins=3))))) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +data = dict( + train=dict(proposal_file=data_root + + 'libra_proposals/rpn_r50_fpn_1x_train2017.pkl'), + val=dict(proposal_file=data_root + + 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl'), + test=dict(proposal_file=data_root + + 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl')) diff --git a/downstream/mmdetection/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000..e899706 --- /dev/null +++ b/downstream/mmdetection/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..89a0d7b --- /dev/null +++ b/downstream/mmdetection/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,41 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +# model settings +model = dict( + neck=[ + dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + dict( + type='BFP', + in_channels=256, + num_levels=5, + refine_level=2, + refine_type='non_local') + ], + roi_head=dict( + bbox_head=dict( + loss_bbox=dict( + _delete_=True, + type='BalancedL1Loss', + alpha=0.5, + gamma=1.5, + beta=1.0, + loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict(sampler=dict(neg_pos_ub=5), allowed_border=-1), + rcnn=dict( + sampler=dict( + _delete_=True, + type='CombinedSampler', + num=512, + pos_fraction=0.25, + add_gt_as_proposals=True, + pos_sampler=dict(type='InstanceBalancedPosSampler'), + neg_sampler=dict( + type='IoUBalancedNegSampler', + floor_thr=-1, + floor_fraction=0, + num_bins=3))))) diff --git a/downstream/mmdetection/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py new file mode 100644 index 0000000..06740a7 --- /dev/null +++ b/downstream/mmdetection/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py new file mode 100644 index 0000000..be27420 --- /dev/null +++ b/downstream/mmdetection/configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py @@ -0,0 +1,26 @@ +_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' +# model settings +model = dict( + neck=[ + dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_input', + num_outs=5), + dict( + type='BFP', + in_channels=256, + num_levels=5, + refine_level=1, + refine_type='non_local') + ], + bbox_head=dict( + loss_bbox=dict( + _delete_=True, + type='BalancedL1Loss', + alpha=0.5, + gamma=1.5, + beta=0.11, + loss_weight=1.0))) diff --git a/downstream/mmdetection/configs/libra_rcnn/metafile.yml b/downstream/mmdetection/configs/libra_rcnn/metafile.yml new file mode 100644 index 0000000..8c32795 --- /dev/null +++ b/downstream/mmdetection/configs/libra_rcnn/metafile.yml @@ -0,0 +1,99 @@ +Collections: + - Name: Libra R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - IoU-Balanced Sampling + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Balanced Feature Pyramid + Paper: + URL: https://arxiv.org/abs/1904.02701 + Title: 'Libra R-CNN: Towards Balanced Learning for Object Detection' + README: configs/libra_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/bfp.py#L10 + Version: v2.0.0 + +Models: + - Name: libra_faster_rcnn_r50_fpn_1x_coco + In Collection: Libra R-CNN + Config: configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.6 + inference time (ms/im): + - value: 52.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth + + - Name: libra_faster_rcnn_r101_fpn_1x_coco + In Collection: Libra R-CNN + Config: configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.5 + inference time (ms/im): + - value: 69.44 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco/libra_faster_rcnn_r101_fpn_1x_coco_20200203-8dba6a5a.pth + + - Name: libra_faster_rcnn_x101_64x4d_fpn_1x_coco + In Collection: Libra R-CNN + Config: configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.8 + inference time (ms/im): + - value: 117.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco/libra_faster_rcnn_x101_64x4d_fpn_1x_coco_20200315-3a7d0488.pth + + - Name: libra_retinanet_r50_fpn_1x_coco + In Collection: Libra R-CNN + Config: configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.2 + inference time (ms/im): + - value: 56.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_retinanet_r50_fpn_1x_coco/libra_retinanet_r50_fpn_1x_coco_20200205-804d94ce.pth diff --git a/downstream/mmdetection/configs/lvis/README.md b/downstream/mmdetection/configs/lvis/README.md new file mode 100644 index 0000000..0c2760e --- /dev/null +++ b/downstream/mmdetection/configs/lvis/README.md @@ -0,0 +1,56 @@ +# LVIS + +> [LVIS: A Dataset for Large Vocabulary Instance Segmentation](https://arxiv.org/abs/1908.03195) + + + +## Abstract + +Progress on object detection is enabled by datasets that focus the research community's attention on open challenges. This process led us from simple images to complex scenes and from bounding boxes to segmentation masks. In this work, we introduce LVIS (pronounced \`el-vis'): a new dataset for Large Vocabulary Instance Segmentation. We plan to collect ~2 million high-quality instance segmentation masks for over 1000 entry-level object categories in 164k images. Due to the Zipfian distribution of categories in natural images, LVIS naturally has a long tail of categories with few training samples. Given that state-of-the-art deep learning methods for object detection perform poorly in the low-sample regime, we believe that our dataset poses an important and exciting new scientific challenge. + +
    + +
    + +## Common Setting + +- Please follow [install guide](../../docs/get_started.md#install-mmdetection) to install open-mmlab forked cocoapi first. + +- Run following scripts to install our forked lvis-api. + + ```shell + pip install git+https://github.com/lvis-dataset/lvis-api.git + ``` + +- All experiments use oversample strategy [here](../../docs/tutorials/customize_dataset.md#class-balanced-dataset) with oversample threshold `1e-3`. + +- The size of LVIS v0.5 is half of COCO, so schedule `2x` in LVIS is roughly the same iterations as `1x` in COCO. + +## Results and models of LVIS v0.5 + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 2x | - | - | 26.1 | 25.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis-dbd06831.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_20200531_160435.log.json) | +| R-101-FPN | pytorch | 2x | - | - | 27.1 | 27.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis-54582ee2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_20200601_134748.log.json) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | 26.7 | 26.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis-3cf55ea2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_20200531_221749.log.json) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | 26.4 | 26.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis-1c99a5ad.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_20200601_194651.log.json) | + +## Results and models of LVIS v1 + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 9.1 | - | 22.5 | 21.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1-aa78ac3d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_061305.log.json) | +| R-101-FPN | pytorch | 1x | 10.8 | - | 24.6 | 23.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1-ec55ce32.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_070959.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 11.8 | - | 26.7 | 25.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-ebbc5c81.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_071317.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 14.6 | - | 27.2 | 25.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-43d9edfe.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-20200830_060206.log.json) | + +## Citation + +```latex +@inproceedings{gupta2019lvis, + title={{LVIS}: A Dataset for Large Vocabulary Instance Segmentation}, + author={Gupta, Agrim and Dollar, Piotr and Girshick, Ross}, + booktitle={Proceedings of the {IEEE} Conference on Computer Vision and Pattern Recognition}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py b/downstream/mmdetection/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py new file mode 100644 index 0000000..0f017f5 --- /dev/null +++ b/downstream/mmdetection/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py @@ -0,0 +1,6 @@ +_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py b/downstream/mmdetection/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py new file mode 100644 index 0000000..637f4a6 --- /dev/null +++ b/downstream/mmdetection/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py @@ -0,0 +1,6 @@ +_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py b/downstream/mmdetection/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py new file mode 100644 index 0000000..92ddb52 --- /dev/null +++ b/downstream/mmdetection/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/lvis_v1_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict(num_classes=1203), mask_head=dict(num_classes=1203)), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + # LVIS allows up to 300 + max_per_img=300))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict(train=dict(dataset=dict(pipeline=train_pipeline))) diff --git a/downstream/mmdetection/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py b/downstream/mmdetection/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py new file mode 100644 index 0000000..d53c5dc --- /dev/null +++ b/downstream/mmdetection/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/lvis_v0.5_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict(num_classes=1230), mask_head=dict(num_classes=1230)), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + # LVIS allows up to 300 + max_per_img=300))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict(train=dict(dataset=dict(pipeline=train_pipeline))) diff --git a/downstream/mmdetection/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py b/downstream/mmdetection/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py new file mode 100644 index 0000000..a6115c1 --- /dev/null +++ b/downstream/mmdetection/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py @@ -0,0 +1,14 @@ +_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py b/downstream/mmdetection/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py new file mode 100644 index 0000000..96b6252 --- /dev/null +++ b/downstream/mmdetection/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py @@ -0,0 +1,14 @@ +_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py b/downstream/mmdetection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py new file mode 100644 index 0000000..0f95a73 --- /dev/null +++ b/downstream/mmdetection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py @@ -0,0 +1,14 @@ +_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py b/downstream/mmdetection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py new file mode 100644 index 0000000..986acda --- /dev/null +++ b/downstream/mmdetection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py @@ -0,0 +1,14 @@ +_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/mask2former/README.md b/downstream/mmdetection/configs/mask2former/README.md new file mode 100644 index 0000000..ebce50d --- /dev/null +++ b/downstream/mmdetection/configs/mask2former/README.md @@ -0,0 +1,73 @@ +# Mask2Former + +> [Masked-attention Mask Transformer for Universal Image Segmentation](http://arxiv.org/abs/2112.01527) + + + +## Abstract + +Image segmentation is about grouping pixels with different semantics, e.g., category or instance membership, where each choice of semantics defines a task. While only the semantics of each task differ, current research focuses on designing specialized architectures for each task. We present Masked-attention Mask Transformer (Mask2Former), a new architecture capable of addressing any image segmentation task (panoptic, instance or semantic). Its key components include masked attention, which extracts localized features by constraining cross-attention within predicted mask regions. In addition to reducing the research effort by at least three times, it outperforms the best specialized architectures by a significant margin on four popular datasets. Most notably, Mask2Former sets a new state-of-the-art for panoptic segmentation (57.8 PQ on COCO), instance segmentation (50.1 AP on COCO) and semantic segmentation (57.7 mIoU on ADE20K). + +
    + +
    + +## Introduction + +Mask2Former requires COCO and [COCO-panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) dataset for training and evaluation. You need to download and extract it in the COCO dataset path. +The directory should be like this. + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +| | | ├── instances_train2017.json +| | | ├── instances_val2017.json +│ │ │ ├── panoptic_train2017.json +│ │ │ ├── panoptic_train2017 +│ │ │ ├── panoptic_val2017.json +│ │ │ ├── panoptic_val2017 +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +``` + +## Results and Models + +### Panoptic segmentation + +| Backbone | style | Pretrain | Lr schd | Mem (GB) | Inf time (fps) | PQ | box mAP | mask mAP | Config | Download | +| :------: | :-----: | :----------: | :-----: | :------: | :------------: | :--: | :-----: | :------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | ImageNet-1K | 50e | 13.9 | - | 51.9 | 44.8 | 41.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516.log.json) | +| R-101 | pytorch | ImageNet-1K | 50e | 16.1 | - | 52.4 | 45.3 | 42.4 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic/mask2former_r101_lsj_8x2_50e_coco-panoptic_20220329_225104-c54e64c9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic/mask2former_r101_lsj_8x2_50e_coco-panoptic_20220329_225104.log.json) | +| Swin-T | - | ImageNet-1K | 50e | 15.9 | - | 53.4 | 46.3 | 43.4 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220326_224553-fc567107.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220326_224553.log.json) | +| Swin-S | - | ImageNet-1K | 50e | 19.1 | - | 54.5 | 47.8 | 44.5 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220329_225200-c7b94355.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220329_225200.log.json) | +| Swin-B | - | ImageNet-1K | 50e | 26.0 | - | 55.1 | 48.2 | 44.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic_20220331_002244-c149a9e9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic_20220331_002244.log.json) | +| Swin-B | - | ImageNet-21K | 50e | 25.8 | - | 56.3 | 50.0 | 46.3 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic_20220329_230021-3bb8b482.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic_20220329_230021.log.json) | +| Swin-L | - | ImageNet-21K | 100e | 21.1 | - | 57.6 | 52.2 | 48.5 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic_20220407_104949-d4919c44.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic_20220407_104949.log.json) | + +### Instance segmentation + +| Backbone | style | Pretrain | Lr schd | Mem (GB) | Inf time (fps) | box mAP | mask mAP | Config | Download | +| -------- | ------- | ----------- | ------- | -------- | -------------- | ------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| R-50 | pytorch | ImageNet-1K | 50e | 13.7 | - | 45.7 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco/mask2former_r50_lsj_8x2_50e_coco_20220506_191028-8e96e88b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco/mask2former_r50_lsj_8x2_50e_coco_20220506_191028.log.json) | +| R-101 | pytorch | ImageNet-1K | 50e | 15.5 | - | 46.7 | 44.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco/mask2former_r101_lsj_8x2_50e_coco_20220426_100250-c50b6fa6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco/mask2former_r101_lsj_8x2_50e_coco_20220426_100250.log.json) | +| Swin-T | - | ImageNet-1K | 50e | 15.3 | - | 47.7 | 44.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco_20220508_091649-4a943037.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco_20220508_091649.log.json) | +| Swin-S | - | ImageNet-1K | 50e | 18.8 | - | 49.3 | 46.1 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco_20220504_001756-743b7d99.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco_20220504_001756.log.json) | + +Note: We have trained the instance segmentation models many times (see more details in [PR 7571](https://github.com/open-mmlab/mmdetection/pull/7571)). The results of the trained models are relatively stable (+- 0.2), and have a certain gap (about 0.2 AP) in comparison with the results in the [paper](http://arxiv.org/abs/2112.01527). However, the performance of the model trained with the official code is unstable and may also be slightly lower than the reported results as mentioned in the [issue](https://github.com/facebookresearch/Mask2Former/issues/46). + +## Citation + +```latex +@article{cheng2021mask2former, + title={Masked-attention Mask Transformer for Universal Image Segmentation}, + author={Bowen Cheng and Ishan Misra and Alexander G. Schwing and Alexander Kirillov and Rohit Girdhar}, + journal={arXiv}, + year={2021} +} +``` diff --git a/downstream/mmdetection/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py b/downstream/mmdetection/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py new file mode 100644 index 0000000..33fdde6 --- /dev/null +++ b/downstream/mmdetection/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py @@ -0,0 +1,7 @@ +_base_ = './mask2former_r50_lsj_8x2_50e_coco-panoptic.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py b/downstream/mmdetection/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py new file mode 100644 index 0000000..5543fb0 --- /dev/null +++ b/downstream/mmdetection/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py @@ -0,0 +1,7 @@ +_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py'] + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py b/downstream/mmdetection/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py new file mode 100644 index 0000000..2c23625 --- /dev/null +++ b/downstream/mmdetection/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py @@ -0,0 +1,253 @@ +_base_ = [ + '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py' +] +num_things_classes = 80 +num_stuff_classes = 53 +num_classes = num_things_classes + num_stuff_classes +model = dict( + type='Mask2Former', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + panoptic_head=dict( + type='Mask2FormerHead', + in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside + strides=[4, 8, 16, 32], + feat_channels=256, + out_channels=256, + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True)), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[1.0] * num_classes + [0.1]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0)), + panoptic_fusion_head=dict( + type='MaskFormerFusionHead', + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + loss_panoptic=None, + init_cfg=None), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + # For now, the dataset does not support + # evaluating semantic segmentation metric. + semantic_on=False, + instance_on=True, + # max_per_image is for instance segmentation. + max_per_image=100, + iou_thr=0.8, + # In Mask2Former's panoptic postprocessing, + # it will filter mask area where score is less than 0.5 . + filter_low_score=True), + init_cfg=None) + +# dataset settings +image_size = (1024, 1024) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict( + type='LoadPanopticAnnotations', + with_bbox=True, + with_mask=True, + with_seg=True), + dict(type='RandomFlip', flip_ratio=0.5), + # large scale jittering + dict( + type='Resize', + img_scale=image_size, + ratio_range=(0.1, 2.0), + multiscale_mode='range', + keep_ratio=True), + dict( + type='RandomCrop', + crop_size=image_size, + crop_type='absolute', + recompute_bbox=True, + allow_negative_crop=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=image_size), + dict(type='DefaultFormatBundle', img_to_float=True), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data_root = 'data/coco/' +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline), + val=dict( + pipeline=test_pipeline, + ins_ann_file=data_root + 'annotations/instances_val2017.json', + ), + test=dict( + pipeline=test_pipeline, + ins_ann_file=data_root + 'annotations/instances_val2017.json', + )) + +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +# optimizer +optimizer = dict( + type='AdamW', + lr=0.0001, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999), + paramwise_cfg=dict( + custom_keys={ + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi, + }, + norm_decay_mult=0.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) + +# learning policy +lr_config = dict( + policy='step', + gamma=0.1, + by_epoch=False, + step=[327778, 355092], + warmup='linear', + warmup_by_epoch=False, + warmup_ratio=1.0, # no warmup + warmup_iters=10) + +max_iters = 368750 +runner = dict(type='IterBasedRunner', max_iters=max_iters) + +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook', by_epoch=False) + ]) +interval = 5000 +workflow = [('train', interval)] +checkpoint_config = dict( + by_epoch=False, interval=interval, save_last=True, max_keep_ckpts=3) + +# Before 365001th iteration, we do evaluation every 5000 iterations. +# After 365000th iteration, we do evaluation every 368750 iterations, +# which means that we do evaluation at the end of training. +dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] +evaluation = dict( + interval=interval, + dynamic_intervals=dynamic_intervals, + metric=['PQ', 'bbox', 'segm']) diff --git a/downstream/mmdetection/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py b/downstream/mmdetection/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py new file mode 100644 index 0000000..eca6135 --- /dev/null +++ b/downstream/mmdetection/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py @@ -0,0 +1,79 @@ +_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py'] +num_things_classes = 80 +num_stuff_classes = 0 +num_classes = num_things_classes + num_stuff_classes +model = dict( + panoptic_head=dict( + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])), + panoptic_fusion_head=dict( + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes), + test_cfg=dict(panoptic_on=False)) + +# dataset settings +image_size = (1024, 1024) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +pad_cfg = dict(img=(128, 128, 128), masks=0, seg=255) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + # large scale jittering + dict( + type='Resize', + img_scale=image_size, + ratio_range=(0.1, 2.0), + multiscale_mode='range', + keep_ratio=True), + dict( + type='RandomCrop', + crop_size=image_size, + crop_type='absolute', + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True), + dict(type='Pad', size=image_size, pad_val=pad_cfg), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle', img_to_float=True), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Pad', size_divisor=32, pad_val=pad_cfg), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +data = dict( + _delete_=True, + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/downstream/mmdetection/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py b/downstream/mmdetection/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py new file mode 100644 index 0000000..f13f5e1 --- /dev/null +++ b/downstream/mmdetection/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py @@ -0,0 +1,5 @@ +_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa + +model = dict( + backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=pretrained))) diff --git a/downstream/mmdetection/configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py b/downstream/mmdetection/configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py new file mode 100644 index 0000000..33a805c --- /dev/null +++ b/downstream/mmdetection/configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py @@ -0,0 +1,42 @@ +_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth' # noqa + +depths = [2, 2, 18, 2] +model = dict( + backbone=dict( + pretrain_img_size=384, + embed_dims=128, + depths=depths, + num_heads=[4, 8, 16, 32], + window_size=12, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + panoptic_head=dict(in_channels=[128, 256, 512, 1024])) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optimizer = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/downstream/mmdetection/configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py b/downstream/mmdetection/configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py new file mode 100644 index 0000000..91a180d --- /dev/null +++ b/downstream/mmdetection/configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py @@ -0,0 +1,26 @@ +_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa + +model = dict( + backbone=dict( + embed_dims=192, + num_heads=[6, 12, 24, 48], + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + panoptic_head=dict(num_queries=200, in_channels=[192, 384, 768, 1536])) + +data = dict(samples_per_gpu=1, workers_per_gpu=1) + +lr_config = dict(step=[655556, 710184]) + +max_iters = 737500 +runner = dict(type='IterBasedRunner', max_iters=max_iters) + +# Before 735001th iteration, we do evaluation every 5000 iterations. +# After 735000th iteration, we do evaluation every 737500 iterations, +# which means that we do evaluation at the end of training.' +interval = 5000 +dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] +evaluation = dict( + interval=interval, + dynamic_intervals=dynamic_intervals, + metric=['PQ', 'bbox', 'segm']) diff --git a/downstream/mmdetection/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py b/downstream/mmdetection/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py new file mode 100644 index 0000000..b2b621c --- /dev/null +++ b/downstream/mmdetection/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py @@ -0,0 +1,37 @@ +_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa + +depths = [2, 2, 18, 2] +model = dict( + backbone=dict( + depths=depths, init_cfg=dict(type='Pretrained', + checkpoint=pretrained))) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optimizer = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/downstream/mmdetection/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py b/downstream/mmdetection/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py new file mode 100644 index 0000000..7b1b05a --- /dev/null +++ b/downstream/mmdetection/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py @@ -0,0 +1,37 @@ +_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa + +depths = [2, 2, 18, 2] +model = dict( + backbone=dict( + depths=depths, init_cfg=dict(type='Pretrained', + checkpoint=pretrained))) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optimizer = dict( + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/downstream/mmdetection/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py b/downstream/mmdetection/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py new file mode 100644 index 0000000..04b2f10 --- /dev/null +++ b/downstream/mmdetection/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py @@ -0,0 +1,62 @@ +_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa + +depths = [2, 2, 6, 2] +model = dict( + type='Mask2Former', + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=depths, + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + frozen_stages=-1, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + panoptic_head=dict( + type='Mask2FormerHead', in_channels=[96, 192, 384, 768]), + init_cfg=None) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optimizer = dict( + type='AdamW', + lr=0.0001, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999), + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/downstream/mmdetection/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py b/downstream/mmdetection/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py new file mode 100644 index 0000000..0ccbe91 --- /dev/null +++ b/downstream/mmdetection/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py @@ -0,0 +1,61 @@ +_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py'] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa +depths = [2, 2, 6, 2] +model = dict( + type='Mask2Former', + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=depths, + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + frozen_stages=-1, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + panoptic_head=dict( + type='Mask2FormerHead', in_channels=[96, 192, 384, 768]), + init_cfg=None) + +# set all layers in backbone to lr_mult=0.1 +# set all norm layers, position_embeding, +# query_embeding, level_embeding to decay_multi=0.0 +backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) +backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'backbone.patch_embed.norm': backbone_norm_multi, + 'backbone.norm': backbone_norm_multi, + 'absolute_pos_embed': backbone_embed_multi, + 'relative_position_bias_table': backbone_embed_multi, + 'query_embed': embed_multi, + 'query_feat': embed_multi, + 'level_embed': embed_multi +} +custom_keys.update({ + f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi + for stage_id, num_blocks in enumerate(depths) + for block_id in range(num_blocks) +}) +custom_keys.update({ + f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi + for stage_id in range(len(depths) - 1) +}) +# optimizer +optimizer = dict( + type='AdamW', + lr=0.0001, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999), + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/downstream/mmdetection/configs/mask2former/metafile.yml b/downstream/mmdetection/configs/mask2former/metafile.yml new file mode 100644 index 0000000..d9f4692 --- /dev/null +++ b/downstream/mmdetection/configs/mask2former/metafile.yml @@ -0,0 +1,223 @@ +Collections: + - Name: Mask2Former + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Weight Decay + Training Resources: 8x A100 GPUs + Architecture: + - Mask2Former + Paper: + URL: https://arxiv.org/pdf/2112.01527 + Title: 'Masked-attention Mask Transformer for Universal Image Segmentation' + README: configs/mask2former/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.23.0/mmdet/models/detectors/mask2former.py#L7 + Version: v2.23.0 + +Models: +- Name: mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py + Metadata: + Training Memory (GB): 19.1 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 44.5 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 54.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220329_225200-c7b94355.pth +- Name: mask2former_r101_lsj_8x2_50e_coco + In Collection: Mask2Former + Config: configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py + Metadata: + Training Memory (GB): 15.5 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 44.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco/mask2former_r101_lsj_8x2_50e_coco_20220426_100250-c50b6fa6.pth +- Name: mask2former_r101_lsj_8x2_50e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py + Metadata: + Training Memory (GB): 16.1 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.4 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 52.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic/mask2former_r101_lsj_8x2_50e_coco-panoptic_20220329_225104-c54e64c9.pth +- Name: mask2former_r50_lsj_8x2_50e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py + Metadata: + Training Memory (GB): 13.9 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.9 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 51.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth +- Name: mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py + Metadata: + Training Memory (GB): 15.9 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 43.4 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 53.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220326_224553-fc567107.pth +- Name: mask2former_r50_lsj_8x2_50e_coco + In Collection: Mask2Former + Config: configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py + Metadata: + Training Memory (GB): 13.7 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco/mask2former_r50_lsj_8x2_50e_coco_20220506_191028-8e96e88b.pth +- Name: mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py + Metadata: + Training Memory (GB): 21.1 + Iterations: 737500 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 52.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 48.5 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 57.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic_20220407_104949-d4919c44.pth +- Name: mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py + Metadata: + Training Memory (GB): 25.8 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 46.3 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 56.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic_20220329_230021-3bb8b482.pth +- Name: mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py + Metadata: + Training Memory (GB): 26.0 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 44.9 + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 55.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic_20220331_002244-c149a9e9.pth +- Name: mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py + Metadata: + Training Memory (GB): 15.3 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 44.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco_20220508_091649-4a943037.pth +- Name: mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco + In Collection: Mask2Former + Config: configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py + Metadata: + Training Memory (GB): 18.8 + Iterations: 368750 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 46.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco_20220504_001756-743b7d99.pth diff --git a/downstream/mmdetection/configs/mask_rcnn/README.md b/downstream/mmdetection/configs/mask_rcnn/README.md new file mode 100644 index 0000000..6aad9a2 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/README.md @@ -0,0 +1,59 @@ +# Mask R-CNN + +> [Mask R-CNN](https://arxiv.org/abs/1703.06870) + + + +## Abstract + +We present a conceptually simple, flexible, and general framework for object instance segmentation. Our approach efficiently detects objects in an image while simultaneously generating a high-quality segmentation mask for each instance. The method, called Mask R-CNN, extends Faster R-CNN by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition. Mask R-CNN is simple to train and adds only a small overhead to Faster R-CNN, running at 5 fps. Moreover, Mask R-CNN is easy to generalize to other tasks, e.g., allowing us to estimate human poses in the same framework. We show top results in all three tracks of the COCO suite of challenges, including instance segmentation, bounding-box object detection, and person keypoint detection. Without bells and whistles, Mask R-CNN outperforms all existing, single-model entries on every task, including the COCO 2016 challenge winners. We hope our simple and effective approach will serve as a solid baseline and help ease future research in instance-level recognition. + +
    + +
    + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 1x | 4.3 | | 38.0 | 34.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.38__segm_mAP-0.344_20200504_231812-0ebd1859.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_20200504_231812.log.json) | +| R-50-FPN | pytorch | 1x | 4.4 | 16.1 | 38.2 | 34.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) | +| R-50-FPN (FP16) | pytorch | 1x | 3.6 | 24.1 | 38.1 | 34.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205_130539.log.json) | +| R-50-FPN | pytorch | 2x | - | - | 39.2 | 35.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_20200505_003907.log.json) | +| R-101-FPN | caffe | 1x | | | 40.4 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758-805e06c1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758.log.json) | +| R-101-FPN | pytorch | 1x | 6.4 | 13.5 | 40.0 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204_144809.log.json) | +| R-101-FPN | pytorch | 2x | - | - | 40.8 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_bbox_mAP-0.408__segm_mAP-0.366_20200505_071027-14b391c7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_20200505_071027.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 7.6 | 11.3 | 41.9 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205_034906.log.json) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | 42.2 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.422__segm_mAP-0.378_20200506_004702-faef898c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_20200506_004702.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 10.7 | 8.0 | 42.8 | 38.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201_124310.log.json) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | 42.7 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208-39d6f70c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208.log.json) | +| X-101-32x8d-FPN | pytorch | 1x | - | - | 42.8 | 38.3 | | | + +## Pre-trained Models + +We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [R-50-FPN](./mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py) | caffe | 2x | 4.3 | | 40.3 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_bbox_mAP-0.403__segm_mAP-0.365_20200504_231822-a75c98ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_20200504_231822.log.json) | +| [R-50-FPN](./mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py) | caffe | 3x | 4.3 | | 40.8 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_20200504_163245.log.json) | +| [R-50-FPN](./mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 4.1 | | 40.9 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_fpn_mstrain-poly_3x_coco_20210524_201154-21b550bb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_fpn_mstrain-poly_3x_coco_20210524_201154.log.json) | +| [R-101-FPN](./mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py) | caffe | 3x | 5.9 | | 42.9 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco_20210526_132339-3c33ce02.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco_20210526_132339.log.json) | +| [R-101-FPN](./mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 6.1 | | 42.7 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_fpn_mstrain-poly_3x_coco_20210524_200244-5675c317.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_fpn_mstrain-poly_3x_coco_20210524_200244.log.json) | +| [x101-32x4d-FPN](./mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 7.3 | | 43.6 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco_20210524_201410-abcd7859.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco_20210524_201410.log.json) | +| [X-101-32x8d-FPN](./mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py) | pytorch | 1x | - | | 43.6 | 39.0 | | | +| [X-101-32x8d-FPN](./mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 10.3 | | 44.3 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco_20210607_161042-8bd2c639.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco_20210607_161042.log.json) | +| [X-101-64x4d-FPN](./mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 10.4 | | 44.5 | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco_20210526_120447-c376f129.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco_20210526_120447.log.json) | + +## Citation + +```latex +@article{He_2017, + title={Mask R-CNN}, + journal={2017 IEEE International Conference on Computer Vision (ICCV)}, + publisher={IEEE}, + author={He, Kaiming and Gkioxari, Georgia and Dollar, Piotr and Girshick, Ross}, + year={2017}, + month={Oct} +} +``` diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..95b324f --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './mask_rcnn_r50_caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py new file mode 100644 index 0000000..e39781d --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py @@ -0,0 +1,55 @@ +_base_ = [ + '../common/mstrain-poly_3x_coco_instance.py', + '../_base_/models/mask_rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + depth=101, + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + train=dict(dataset=dict(pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000..b7986e8 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py new file mode 100644 index 0000000..c9059d5 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './mask_rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py new file mode 100644 index 0000000..0696cbe --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py @@ -0,0 +1,10 @@ +_base_ = [ + '../common/mstrain-poly_3x_coco_instance.py', + '../_base_/models/mask_rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_c4_1x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_c4_1x_coco.py new file mode 100644 index 0000000..a44c018 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_c4_1x_coco.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_caffe_c4.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..5a23f8c --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,40 @@ +_base_ = './mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py new file mode 100644 index 0000000..6308e40 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py @@ -0,0 +1,49 @@ +_base_ = './mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py new file mode 100644 index 0000000..4f7150c --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 23]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py new file mode 100644 index 0000000..1b48a21 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py @@ -0,0 +1,4 @@ +_base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' +# learning policy +lr_config = dict(step=[28, 34]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py new file mode 100644 index 0000000..bebbaaa --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py @@ -0,0 +1,45 @@ +_base_ = './mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py new file mode 100644 index 0000000..3f8079d --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py @@ -0,0 +1,61 @@ +_base_ = './mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + rpn_head=dict( + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + bbox_roi_extractor=dict( + roi_layer=dict( + type='RoIAlign', + output_size=7, + sampling_ratio=2, + aligned=False)), + bbox_head=dict( + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + mask_roi_extractor=dict( + roi_layer=dict( + type='RoIAlign', + output_size=14, + sampling_ratio=2, + aligned=False)))) +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..6a6c924 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_wandb_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_wandb_coco.py new file mode 100644 index 0000000..88c8576 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_wandb_coco.py @@ -0,0 +1,26 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# Set evaluation interval +evaluation = dict(interval=2) +# Set checkpoint interval +checkpoint_config = dict(interval=4) + +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='MMDetWandbHook', + init_kwargs={ + 'project': 'mmdetection', + 'group': 'maskrcnn-r50-fpn-1x-coco' + }, + interval=50, + log_checkpoint=True, + log_checkpoint_metadata=True, + num_eval_images=100) + ]) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py new file mode 100644 index 0000000..932b1f9 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py new file mode 100644 index 0000000..fb8289b --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py @@ -0,0 +1,3 @@ +_base_ = './mask_rcnn_r50_fpn_1x_coco.py' +# fp16 settings +fp16 = dict(loss_scale=512.) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py new file mode 100644 index 0000000..b3d9242 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py @@ -0,0 +1,4 @@ +_base_ = [ + '../common/mstrain-poly_3x_coco_instance.py', + '../_base_/models/mask_rcnn_r50_fpn.py' +] diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_poly_1x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_poly_1x_coco.py new file mode 100644 index 0000000..9eb6d57 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_poly_1x_coco.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict(train=dict(pipeline=train_pipeline)) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..a8b3799 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './mask_rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py new file mode 100644 index 0000000..2cd3cee --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './mask_rcnn_r101_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py new file mode 100644 index 0000000..b698a7d --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py @@ -0,0 +1,18 @@ +_base_ = [ + '../common/mstrain-poly_3x_coco_instance.py', + '../_base_/models/mask_rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py new file mode 100644 index 0000000..108ea4e --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py @@ -0,0 +1,65 @@ +_base_ = './mask_rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=8, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + style='pytorch', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) + +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], + std=[57.375, 57.120, 58.395], + to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py new file mode 100644 index 0000000..6b912f6 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py @@ -0,0 +1,60 @@ +_base_ = './mask_rcnn_r101_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=8, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + style='pytorch', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) + +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], + std=[57.375, 57.120, 58.395], + to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py new file mode 100644 index 0000000..8ba0e9c --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py @@ -0,0 +1,85 @@ +_base_ = [ + '../common/mstrain-poly_3x_coco_instance.py', + '../_base_/models/mask_rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=8, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + style='pytorch', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) + +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], + std=[57.375, 57.120, 58.395], + to_rgb=False) + +# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], +# multiscale_mode='range' +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +# Use RepeatDataset to speed up training +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=3, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py new file mode 100644 index 0000000..2333b03 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './mask_rcnn_x101_32x4d_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py new file mode 100644 index 0000000..6074cca --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './mask_rcnn_x101_32x4d_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py new file mode 100644 index 0000000..9f9cb1c --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py @@ -0,0 +1,18 @@ +_base_ = [ + '../common/mstrain-poly_3x_coco_instance.py', + '../_base_/models/mask_rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/mask_rcnn/metafile.yml b/downstream/mmdetection/configs/mask_rcnn/metafile.yml new file mode 100644 index 0000000..f74bdf3 --- /dev/null +++ b/downstream/mmdetection/configs/mask_rcnn/metafile.yml @@ -0,0 +1,447 @@ +Collections: + - Name: Mask R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Softmax + - RPN + - Convolution + - Dense Connections + - FPN + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/abs/1703.06870v3 + Title: "Mask R-CNN" + README: configs/mask_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/mask_rcnn.py#L6 + Version: v2.0.0 + +Models: + - Name: mask_rcnn_r50_caffe_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.3 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.38__segm_mAP-0.344_20200504_231812-0ebd1859.pth + + - Name: mask_rcnn_r50_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.4 + inference time (ms/im): + - value: 62.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth + + - Name: mask_rcnn_r50_fpn_fp16_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py + Metadata: + Training Memory (GB): 3.6 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + inference time (ms/im): + - value: 41.49 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth + + - Name: mask_rcnn_r50_fpn_2x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py + Metadata: + Training Memory (GB): 4.4 + inference time (ms/im): + - value: 62.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth + + - Name: mask_rcnn_r101_caffe_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758-805e06c1.pth + + - Name: mask_rcnn_r101_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.4 + inference time (ms/im): + - value: 74.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth + + - Name: mask_rcnn_r101_fpn_2x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py + Metadata: + Training Memory (GB): 6.4 + inference time (ms/im): + - value: 74.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_bbox_mAP-0.408__segm_mAP-0.366_20200505_071027-14b391c7.pth + + - Name: mask_rcnn_x101_32x4d_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.6 + inference time (ms/im): + - value: 88.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth + + - Name: mask_rcnn_x101_32x4d_fpn_2x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 7.6 + inference time (ms/im): + - value: 88.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.422__segm_mAP-0.378_20200506_004702-faef898c.pth + + - Name: mask_rcnn_x101_64x4d_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.7 + inference time (ms/im): + - value: 125 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth + + - Name: mask_rcnn_x101_64x4d_fpn_2x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 10.7 + inference time (ms/im): + - value: 125 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208-39d6f70c.pth + + - Name: mask_rcnn_x101_32x8d_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.7 + inference time (ms/im): + - value: 125 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.3 + + - Name: mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py + Metadata: + Training Memory (GB): 4.3 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_bbox_mAP-0.403__segm_mAP-0.365_20200504_231822-a75c98ce.pth + + - Name: mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py + Metadata: + Training Memory (GB): 4.3 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth + + - Name: mask_rcnn_r50_fpn_mstrain-poly_3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py + Metadata: + Training Memory (GB): 4.1 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_fpn_mstrain-poly_3x_coco_20210524_201154-21b550bb.pth + + - Name: mask_rcnn_r101_fpn_mstrain-poly_3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py + Metadata: + Training Memory (GB): 6.1 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_fpn_mstrain-poly_3x_coco_20210524_200244-5675c317.pth + + - Name: mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py + Metadata: + Training Memory (GB): 5.9 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco_20210526_132339-3c33ce02.pth + + - Name: mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py + Metadata: + Training Memory (GB): 7.3 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco_20210524_201410-abcd7859.pth + + - Name: mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.0 + + - Name: mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco + Metadata: + Training Memory (GB): 10.3 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco_20210607_161042-8bd2c639.pth + + - Name: mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py + Metadata: + Epochs: 36 + Training Memory (GB): 10.4 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco_20210526_120447-c376f129.pth diff --git a/downstream/mmdetection/configs/maskformer/README.md b/downstream/mmdetection/configs/maskformer/README.md new file mode 100644 index 0000000..5d8daa2 --- /dev/null +++ b/downstream/mmdetection/configs/maskformer/README.md @@ -0,0 +1,53 @@ +# MaskFormer + +> [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) + + + +## Abstract + +Modern approaches typically formulate semantic segmentation as a per-pixel classification task, while instance-level segmentation is handled with an alternative mask classification. Our key insight: mask classification is sufficiently general to solve both semantic- and instance-level segmentation tasks in a unified manner using the exact same model, loss, and training procedure. Following this observation, we propose MaskFormer, a simple mask classification model which predicts a set of binary masks, each associated with a single global class label prediction. Overall, the proposed mask classification-based method simplifies the landscape of effective approaches to semantic and panoptic segmentation tasks and shows excellent empirical results. In particular, we observe that MaskFormer outperforms per-pixel classification baselines when the number of classes is large. Our mask classification-based method outperforms both current state-of-the-art semantic (55.6 mIoU on ADE20K) and panoptic segmentation (52.7 PQ on COCO) models. + +
    + +
    + +## Introduction + +MaskFormer requires COCO and [COCO-panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) dataset for training and evaluation. You need to download and extract it in the COCO dataset path. +The directory should be like this. + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── panoptic_train2017.json +│ │ │ ├── panoptic_train2017 +│ │ │ ├── panoptic_val2017.json +│ │ │ ├── panoptic_val2017 +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +``` + +## Results and Models + +| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | PQ | SQ | RQ | PQ_th | SQ_th | RQ_th | PQ_st | SQ_st | RQ_st | Config | Download | detail | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :----: | :----: | :----: | :----: | :----: | :----: | :----: | :----: | :-----------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | 75e | 16.2 | - | 46.854 | 80.617 | 57.085 | 51.089 | 81.511 | 61.853 | 40.463 | 79.269 | 49.888 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956.log.json) | This version was mentioned in Table XI, in paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) | +| Swin-L | pytorch | 300e | 27.2 | - | 53.249 | 81.704 | 64.231 | 58.798 | 82.923 | 70.282 | 44.874 | 79.863 | 55.097 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco_20220326_221612-061b4eb8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco_20220326_221612.log.json) | - | + +## Citation + +```latex +@inproceedings{cheng2021maskformer, + title={Per-Pixel Classification is Not All You Need for Semantic Segmentation}, + author={Bowen Cheng and Alexander G. Schwing and Alexander Kirillov}, + journal={NeurIPS}, + year={2021} +} +``` diff --git a/downstream/mmdetection/configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py b/downstream/mmdetection/configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py new file mode 100644 index 0000000..46b3c13 --- /dev/null +++ b/downstream/mmdetection/configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py @@ -0,0 +1,238 @@ +_base_ = [ + '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py' +] +num_things_classes = 80 +num_stuff_classes = 53 +num_classes = num_things_classes + num_stuff_classes +model = dict( + type='MaskFormer', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + panoptic_head=dict( + type='MaskFormerHead', + in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside + feat_channels=256, + out_channels=256, + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + num_queries=100, + pixel_decoder=dict( + type='TransformerEncoderPixelDecoder', + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.1, + proj_drop=0.1, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.1, + dropout_layer=None, + add_identity=True), + operation_order=('self_attn', 'norm', 'ffn', 'norm'), + norm_cfg=dict(type='LN'), + init_cfg=None, + batch_first=False), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True)), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=6, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.1, + proj_drop=0.1, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.1, + dropout_layer=None, + add_identity=True), + # the following parameter was not used, + # just make current api happy + feedforward_channels=2048, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0, + reduction='mean', + class_weight=[1.0] * num_classes + [0.1]), + loss_mask=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=20.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=1.0)), + panoptic_fusion_head=dict( + type='MaskFormerFusionHead', + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + loss_panoptic=None, + init_cfg=None), + train_cfg=dict( + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=1.0), + mask_cost=dict( + type='FocalLossCost', weight=20.0, binary_input=True), + dice_cost=dict( + type='DiceCost', weight=1.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + # For now, the dataset does not support + # evaluating semantic segmentation metric. + semantic_on=False, + instance_on=False, + # max_per_image is for instance segmentation. + max_per_image=100, + object_mask_thr=0.8, + iou_thr=0.8, + # In MaskFormer's panoptic postprocessing, + # it will not filter masks whose score is smaller than 0.5 . + filter_low_score=False), + init_cfg=None) + +# dataset settings +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadPanopticAnnotations', + with_bbox=True, + with_mask=True, + with_seg=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='AutoAugment', + policies=[[ + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict( + type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ]]), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=1), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=1), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=1, + workers_per_gpu=1, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +# optimizer +optimizer = dict( + type='AdamW', + lr=0.0001, + weight_decay=0.0001, + eps=1e-8, + betas=(0.9, 0.999), + paramwise_cfg=dict( + custom_keys={ + 'backbone': dict(lr_mult=0.1, decay_mult=1.0), + 'query_embed': dict(lr_mult=1.0, decay_mult=0.0) + }, + norm_decay_mult=0.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) + +# learning policy +lr_config = dict( + policy='step', + gamma=0.1, + by_epoch=True, + step=[50], + warmup='linear', + warmup_by_epoch=False, + warmup_ratio=1.0, # no warmup + warmup_iters=10) +runner = dict(type='EpochBasedRunner', max_epochs=75) diff --git a/downstream/mmdetection/configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py b/downstream/mmdetection/configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py new file mode 100644 index 0000000..bc23c54 --- /dev/null +++ b/downstream/mmdetection/configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py @@ -0,0 +1,67 @@ +_base_ = './maskformer_r50_mstrain_16x1_75e_coco.py' + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa +depths = [2, 2, 18, 2] +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + pretrain_img_size=384, + embed_dims=192, + patch_size=4, + window_size=12, + mlp_ratio=4, + depths=depths, + num_heads=[6, 12, 24, 48], + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + panoptic_head=dict( + in_channels=[192, 384, 768, 1536], # pass to pixel_decoder inside + pixel_decoder=dict( + _delete_=True, + type='PixelDecoder', + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU')), + enforce_decoder_input_project=True)) + +# weight_decay = 0.01 +# norm_weight_decay = 0.0 +# embed_weight_decay = 0.0 +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +norm_multi = dict(lr_mult=1.0, decay_mult=0.0) +custom_keys = { + 'norm': norm_multi, + 'absolute_pos_embed': embed_multi, + 'relative_position_bias_table': embed_multi, + 'query_embed': embed_multi +} + +# optimizer +optimizer = dict( + type='AdamW', + lr=6e-5, + weight_decay=0.01, + eps=1e-8, + betas=(0.9, 0.999), + paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) + +# learning policy +lr_config = dict( + policy='step', + gamma=0.1, + by_epoch=True, + step=[250], + warmup='linear', + warmup_by_epoch=False, + warmup_ratio=1e-6, + warmup_iters=1500) +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/downstream/mmdetection/configs/maskformer/metafile.yml b/downstream/mmdetection/configs/maskformer/metafile.yml new file mode 100644 index 0000000..6530fa1 --- /dev/null +++ b/downstream/mmdetection/configs/maskformer/metafile.yml @@ -0,0 +1,43 @@ +Collections: + - Name: MaskFormer + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Weight Decay + Training Resources: 16x V100 GPUs + Architecture: + - MaskFormer + Paper: + URL: https://arxiv.org/pdf/2107.06278 + Title: 'Per-Pixel Classification is Not All You Need for Semantic Segmentation' + README: configs/maskformer/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/mmdet/models/detectors/maskformer.py#L7 + Version: v2.22.0 + +Models: + - Name: maskformer_r50_mstrain_16x1_75e_coco + In Collection: MaskFormer + Config: configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py + Metadata: + Training Memory (GB): 16.2 + Epochs: 75 + Results: + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 46.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth + - Name: maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco + In Collection: MaskFormer + Config: configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py + Metadata: + Training Memory (GB): 27.2 + Epochs: 300 + Results: + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 53.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco_20220326_221612-061b4eb8.pth diff --git a/downstream/mmdetection/configs/ms_rcnn/README.md b/downstream/mmdetection/configs/ms_rcnn/README.md new file mode 100644 index 0000000..97bca05 --- /dev/null +++ b/downstream/mmdetection/configs/ms_rcnn/README.md @@ -0,0 +1,36 @@ +# MS R-CNN + +> [Mask Scoring R-CNN](https://arxiv.org/abs/1903.00241) + + + +## Abstract + +Letting a deep network be aware of the quality of its own predictions is an interesting yet important problem. In the task of instance segmentation, the confidence of instance classification is used as mask quality score in most instance segmentation frameworks. However, the mask quality, quantified as the IoU between the instance mask and its ground truth, is usually not well correlated with classification score. In this paper, we study this problem and propose Mask Scoring R-CNN which contains a network block to learn the quality of the predicted instance masks. The proposed network block takes the instance feature and the corresponding predicted mask together to regress the mask IoU. The mask scoring strategy calibrates the misalignment between mask quality and mask score, and improves instance segmentation performance by prioritizing more accurate mask predictions during COCO AP evaluation. By extensive evaluations on the COCO dataset, Mask Scoring R-CNN brings consistent and noticeable gain with different models, and outperforms the state-of-the-art Mask R-CNN. We hope our simple and effective approach will provide a new direction for improving instance segmentation. + +
    + +
    + +## Results and Models + +| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 1x | 4.5 | | 38.2 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848.log.json) | +| R-50-FPN | caffe | 2x | - | - | 38.8 | 36.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco/ms_rcnn_r50_caffe_fpn_2x_coco_bbox_mAP-0.388__segm_mAP-0.363_20200506_004738-ee87b137.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco/ms_rcnn_r50_caffe_fpn_2x_coco_20200506_004738.log.json) | +| R-101-FPN | caffe | 1x | 6.5 | | 40.4 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco/ms_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.404__segm_mAP-0.376_20200506_004755-b9b12a37.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco/ms_rcnn_r101_caffe_fpn_1x_coco_20200506_004755.log.json) | +| R-101-FPN | caffe | 2x | - | - | 41.1 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco/ms_rcnn_r101_caffe_fpn_2x_coco_bbox_mAP-0.411__segm_mAP-0.381_20200506_011134-5f3cc74f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco/ms_rcnn_r101_caffe_fpn_2x_coco_20200506_011134.log.json) | +| R-X101-32x4d | pytorch | 2x | 7.9 | 11.0 | 41.8 | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206-81fd1740.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206_100113.log.json) | +| R-X101-64x4d | pytorch | 1x | 11.0 | 8.0 | 43.0 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206-86ba88d2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206_091744.log.json) | +| R-X101-64x4d | pytorch | 2x | 11.0 | 8.0 | 42.6 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308-02a445e2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308_012247.log.json) | + +## Citation + +```latex +@inproceedings{huang2019msrcnn, + title={Mask Scoring R-CNN}, + author={Zhaojin Huang and Lichao Huang and Yongchao Gong and Chang Huang and Xinggang Wang}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + year={2019}, +} +``` diff --git a/downstream/mmdetection/configs/ms_rcnn/metafile.yml b/downstream/mmdetection/configs/ms_rcnn/metafile.yml new file mode 100644 index 0000000..a6c7dc5 --- /dev/null +++ b/downstream/mmdetection/configs/ms_rcnn/metafile.yml @@ -0,0 +1,159 @@ +Collections: + - Name: Mask Scoring R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RPN + - FPN + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/abs/1903.00241 + Title: 'Mask Scoring R-CNN' + README: configs/ms_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/mask_scoring_rcnn.py#L6 + Version: v2.0.0 + +Models: + - Name: ms_rcnn_r50_caffe_fpn_1x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.5 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth + + - Name: ms_rcnn_r50_caffe_fpn_2x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco/ms_rcnn_r50_caffe_fpn_2x_coco_bbox_mAP-0.388__segm_mAP-0.363_20200506_004738-ee87b137.pth + + - Name: ms_rcnn_r101_caffe_fpn_1x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.5 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco/ms_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.404__segm_mAP-0.376_20200506_004755-b9b12a37.pth + + - Name: ms_rcnn_r101_caffe_fpn_2x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco/ms_rcnn_r101_caffe_fpn_2x_coco_bbox_mAP-0.411__segm_mAP-0.381_20200506_011134-5f3cc74f.pth + + - Name: ms_rcnn_x101_32x4d_fpn_1x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.9 + inference time (ms/im): + - value: 90.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206-81fd1740.pth + + - Name: ms_rcnn_x101_64x4d_fpn_1x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 11.0 + inference time (ms/im): + - value: 125 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206-86ba88d2.pth + + - Name: ms_rcnn_x101_64x4d_fpn_2x_coco + In Collection: Mask Scoring R-CNN + Config: configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 11.0 + inference time (ms/im): + - value: 125 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308-02a445e2.pth diff --git a/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..9b7dcbb --- /dev/null +++ b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py new file mode 100644 index 0000000..202bcce --- /dev/null +++ b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './ms_rcnn_r101_caffe_fpn_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..5845125 --- /dev/null +++ b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py' +model = dict( + type='MaskScoringRCNN', + roi_head=dict( + type='MaskScoringRoIHead', + mask_iou_head=dict( + type='MaskIoUHead', + num_convs=4, + num_fcs=2, + roi_feat_size=14, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + num_classes=80)), + # model training and testing settings + train_cfg=dict(rcnn=dict(mask_thr_binary=0.5))) diff --git a/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py new file mode 100644 index 0000000..008a70a --- /dev/null +++ b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..0a163ce --- /dev/null +++ b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + type='MaskScoringRCNN', + roi_head=dict( + type='MaskScoringRoIHead', + mask_iou_head=dict( + type='MaskIoUHead', + num_convs=4, + num_fcs=2, + roi_feat_size=14, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + num_classes=80)), + # model training and testing settings + train_cfg=dict(rcnn=dict(mask_thr_binary=0.5))) diff --git a/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..20479bb --- /dev/null +++ b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ms_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py new file mode 100644 index 0000000..ee5b734 --- /dev/null +++ b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './ms_rcnn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py new file mode 100644 index 0000000..54c605b --- /dev/null +++ b/downstream/mmdetection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './ms_rcnn_x101_64x4d_fpn_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/nas_fcos/README.md b/downstream/mmdetection/configs/nas_fcos/README.md new file mode 100644 index 0000000..def8831 --- /dev/null +++ b/downstream/mmdetection/configs/nas_fcos/README.md @@ -0,0 +1,35 @@ +# NAS-FCOS + +> [NAS-FCOS: Fast Neural Architecture Search for Object Detection](https://arxiv.org/abs/1906.04423) + + + +## Abstract + +The success of deep neural networks relies on significant architecture engineering. Recently neural architecture search (NAS) has emerged as a promise to greatly reduce manual effort in network design by automatically searching for optimal architectures, although typically such algorithms need an excessive amount of computational resources, e.g., a few thousand GPU-days. To date, on challenging vision tasks such as object detection, NAS, especially fast versions of NAS, is less studied. Here we propose to search for the decoder structure of object detectors with search efficiency being taken into consideration. To be more specific, we aim to efficiently search for the feature pyramid network (FPN) as well as the prediction head of a simple anchor-free object detector, namely FCOS, using a tailored reinforcement learning paradigm. With carefully designed search space, search algorithms and strategies for evaluating network quality, we are able to efficiently search a top-performing detection architecture within 4 days using 8 V100 GPUs. The discovered architecture surpasses state-of-the-art object detection models (such as Faster R-CNN, RetinaNet and FCOS) by 1.5 to 3.5 points in AP on the COCO dataset, with comparable computation complexity and memory footprint, demonstrating the efficacy of the proposed NAS for object detection. + +
    + +
    + +## Results and Models + +| Head | Backbone | Style | GN-head | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :----------: | :------: | :---: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| NAS-FCOSHead | R-50 | caffe | Y | 1x | | | 39.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520.log.json) | +| FCOSHead | R-50 | caffe | Y | 1x | | | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521-7fdcbce0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521.log.json) | + +**Notes:** + +- To be consistent with the author's implementation, we use 4 GPUs with 4 images/GPU. + +## Citation + +```latex +@article{wang2019fcos, + title={Nas-fcos: Fast neural architecture search for object detection}, + author={Wang, Ning and Gao, Yang and Chen, Hao and Wang, Peng and Tian, Zhi and Shen, Chunhua}, + journal={arXiv preprint arXiv:1906.04423}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/nas_fcos/metafile.yml b/downstream/mmdetection/configs/nas_fcos/metafile.yml new file mode 100644 index 0000000..1ea28cf --- /dev/null +++ b/downstream/mmdetection/configs/nas_fcos/metafile.yml @@ -0,0 +1,44 @@ +Collections: + - Name: NAS-FCOS + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 4x V100 GPUs + Architecture: + - FPN + - NAS-FCOS + - ResNet + Paper: + URL: https://arxiv.org/abs/1906.04423 + Title: 'NAS-FCOS: Fast Neural Architecture Search for Object Detection' + README: configs/nas_fcos/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/detectors/nasfcos.py#L6 + Version: v2.1.0 + +Models: + - Name: nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco + In Collection: NAS-FCOS + Config: configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth + + - Name: nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco + In Collection: NAS-FCOS + Config: configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521-7fdcbce0.pth diff --git a/downstream/mmdetection/configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py b/downstream/mmdetection/configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py new file mode 100644 index 0000000..a455c92 --- /dev/null +++ b/downstream/mmdetection/configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py @@ -0,0 +1,100 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + type='NASFCOS', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False, eps=0), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + neck=dict( + type='NASFCOS_FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + num_outs=5, + norm_cfg=dict(type='BN'), + conv_cfg=dict(type='DCNv2', deform_groups=2)), + bbox_head=dict( + type='FCOSHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + norm_cfg=dict(type='GN', num_groups=32), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +optimizer = dict( + lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) diff --git a/downstream/mmdetection/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py b/downstream/mmdetection/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py new file mode 100644 index 0000000..b779492 --- /dev/null +++ b/downstream/mmdetection/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py @@ -0,0 +1,99 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + type='NASFCOS', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False, eps=0), + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + neck=dict( + type='NASFCOS_FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs=True, + num_outs=5, + norm_cfg=dict(type='BN'), + conv_cfg=dict(type='DCNv2', deform_groups=2)), + bbox_head=dict( + type='NASFCOSHead', + num_classes=80, + in_channels=256, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + norm_cfg=dict(type='GN', num_groups=32), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +optimizer = dict( + lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) diff --git a/downstream/mmdetection/configs/nas_fpn/README.md b/downstream/mmdetection/configs/nas_fpn/README.md new file mode 100644 index 0000000..c5acf40 --- /dev/null +++ b/downstream/mmdetection/configs/nas_fpn/README.md @@ -0,0 +1,36 @@ +# NAS-FPN + +> [NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection](https://arxiv.org/abs/1904.07392) + + + +## Abstract + +Current state-of-the-art convolutional architectures for object detection are manually designed. Here we aim to learn a better architecture of feature pyramid network for object detection. We adopt Neural Architecture Search and discover a new feature pyramid architecture in a novel scalable search space covering all cross-scale connections. The discovered architecture, named NAS-FPN, consists of a combination of top-down and bottom-up connections to fuse features across scales. NAS-FPN, combined with various backbone models in the RetinaNet framework, achieves better accuracy and latency tradeoff compared to state-of-the-art object detection models. NAS-FPN improves mobile detection accuracy by 2 AP compared to state-of-the-art SSDLite with MobileNetV2 model in \[32\] and achieves 48.3 AP which surpasses Mask R-CNN \[10\] detection accuracy with less computation time. + +
    + +
    + +## Results and Models + +We benchmark the new training schedule (crop training, large batch, unfrozen BN, 50 epochs) introduced in NAS-FPN. RetinaNet is used in the paper. + +| Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | 50e | 12.9 | 22.9 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco-9b953d76.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco_20200529_095329.log.json) | +| R-50-NASFPN | 50e | 13.2 | 23.0 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco_20200528_230008.log.json) | + +**Note**: We find that it is unstable to train NAS-FPN and there is a small chance that results can be 3% mAP lower. + +## Citation + +```latex +@inproceedings{ghiasi2019fpn, + title={Nas-fpn: Learning scalable feature pyramid architecture for object detection}, + author={Ghiasi, Golnaz and Lin, Tsung-Yi and Le, Quoc V}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={7036--7045}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/nas_fpn/metafile.yml b/downstream/mmdetection/configs/nas_fpn/metafile.yml new file mode 100644 index 0000000..ab8d649 --- /dev/null +++ b/downstream/mmdetection/configs/nas_fpn/metafile.yml @@ -0,0 +1,59 @@ +Collections: + - Name: NAS-FPN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - NAS-FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1904.07392 + Title: 'NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection' + README: configs/nas_fpn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/nas_fpn.py#L67 + Version: v2.0.0 + +Models: + - Name: retinanet_r50_fpn_crop640_50e_coco + In Collection: NAS-FPN + Config: configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py + Metadata: + Training Memory (GB): 12.9 + inference time (ms/im): + - value: 43.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco-9b953d76.pth + + - Name: retinanet_r50_nasfpn_crop640_50e_coco + In Collection: NAS-FPN + Config: configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py + Metadata: + Training Memory (GB): 13.2 + inference time (ms/im): + - value: 43.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth diff --git a/downstream/mmdetection/configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py b/downstream/mmdetection/configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py new file mode 100644 index 0000000..e4408fe --- /dev/null +++ b/downstream/mmdetection/configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py @@ -0,0 +1,85 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] +cudnn_benchmark = True +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + relu_before_extra_convs=True, + no_norm_on_lateral=True, + norm_cfg=norm_cfg), + bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), + # training and testing settings + train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) +# dataset settings +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=(640, 640), + ratio_range=(0.8, 1.2), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(640, 640)), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=(640, 640)), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(640, 640), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=64), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + lr=0.08, + momentum=0.9, + weight_decay=0.0001, + paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.1, + step=[30, 40]) +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=50) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py b/downstream/mmdetection/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py new file mode 100644 index 0000000..1387a10 --- /dev/null +++ b/downstream/mmdetection/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py @@ -0,0 +1,84 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] +cudnn_benchmark = True +# model settings +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + type='RetinaNet', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict(type='NASFPN', stack_times=7, norm_cfg=norm_cfg), + bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), + # training and testing settings + train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) +# dataset settings +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=(640, 640), + ratio_range=(0.8, 1.2), + keep_ratio=True), + dict(type='RandomCrop', crop_size=(640, 640)), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=(640, 640)), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(640, 640), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=128), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + lr=0.08, + momentum=0.9, + weight_decay=0.0001, + paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.1, + step=[30, 40]) +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=50) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/openimages/README.md b/downstream/mmdetection/configs/openimages/README.md new file mode 100644 index 0000000..e5c1c27 --- /dev/null +++ b/downstream/mmdetection/configs/openimages/README.md @@ -0,0 +1,148 @@ +# Open Images Dataset + +> [Open Images Dataset](https://arxiv.org/abs/1811.00982) + + + +## Abstract + + + +#### Open Images v6 + +[Open Images](https://storage.googleapis.com/openimages/web/index.html) is a dataset of ~9M images annotated with image-level labels, +object bounding boxes, object segmentation masks, visual relationships, +and localized narratives: + +- It contains a total of 16M bounding boxes for 600 object classes on + 1.9M images, making it the largest existing dataset with object location + annotations. The boxes have been largely manually drawn by professional + annotators to ensure accuracy and consistency. The images are very diverse + and often contain complex scenes with several objects (8.3 per image on + average). + +- Open Images also offers visual relationship annotations, indicating pairs + of objects in particular relations (e.g. "woman playing guitar", "beer on + table"), object properties (e.g. "table is wooden"), and human actions (e.g. + "woman is jumping"). In total it has 3.3M annotations from 1,466 distinct + relationship triplets. + +- In V5 we added segmentation masks for 2.8M object instances in 350 classes. + Segmentation masks mark the outline of objects, which characterizes their + spatial extent to a much higher level of detail. + +- In V6 we added 675k localized narratives: multimodal descriptions of images + consisting of synchronized voice, text, and mouse traces over the objects being + described. (Note we originally launched localized narratives only on train in V6, + but since July 2020 we also have validation and test covered.) + +- Finally, the dataset is annotated with 59.9M image-level labels spanning 19,957 + classes. + +We believe that having a single dataset with unified annotations for image +classification, object detection, visual relationship detection, instance +segmentation, and multimodal image descriptions will enable to study these +tasks jointly and stimulate progress towards genuine scene understanding. + + + +
    + +
    + +#### Open Images Challenge 2019 + +[Open Images Challenges 2019](https://storage.googleapis.com/openimages/web/challenge2019.html) is based on the V5 release of the Open +Images dataset. The images of the dataset are very varied and +often contain complex scenes with several objects (explore the dataset). + +## Citation + +``` +@article{OpenImages, + author = {Alina Kuznetsova and Hassan Rom and Neil Alldrin and Jasper Uijlings and Ivan Krasin and Jordi Pont-Tuset and Shahab Kamali and Stefan Popov and Matteo Malloci and Alexander Kolesnikov and Tom Duerig and Vittorio Ferrari}, + title = {The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale}, + year = {2020}, + journal = {IJCV} +} +``` + +## Prepare Dataset + +1. You need to download and extract Open Images dataset. + +2. The Open Images dataset does not have image metas (width and height of the image), + which will be used during evaluation. We suggest to get test image metas before + training/testing by using `tools/misc/get_image_metas.py`. + + **Usage** + + ```shell + python tools/misc/get_image_metas.py ${CONFIG} \ + --out ${OUTPUT FILE NAME} + ``` + +3. The directory should be like this: + + ```none + mmdetection + ├── mmdet + ├── tools + ├── configs + ├── data + │ ├── OpenImages + │ │ ├── annotations + │ │ │ ├── bbox_labels_600_hierarchy.json + │ │ │ ├── class-descriptions-boxable.csv + │ │ │ ├── oidv6-train-annotations-bbox.scv + │ │ │ ├── validation-annotations-bbox.csv + │ │ │ ├── validation-annotations-human-imagelabels-boxable.csv + │ │ │ ├── validation-image-metas.pkl # get from script + │ │ ├── challenge2019 + │ │ │ ├── challenge-2019-train-detection-bbox.txt + │ │ │ ├── challenge-2019-validation-detection-bbox.txt + │ │ │ ├── class_label_tree.np + │ │ │ ├── class_sample_train.pkl + │ │ │ ├── challenge-2019-validation-detection-human-imagelabels.csv # download from official website + │ │ │ ├── challenge-2019-validation-metas.pkl # get from script + │ │ ├── OpenImages + │ │ │ ├── train # training images + │ │ │ ├── test # testing images + │ │ │ ├── validation # validation images + ``` + +**Note**: + +1. The training and validation images of Open Images Challenge dataset are based on + Open Images v6, but the test images are different. +2. The Open Images Challenges annotations are obtained from [TSD](https://github.com/Sense-X/TSD). + You can also download the annotations from [official website](https://storage.googleapis.com/openimages/web/challenge2019_downloads.html), + and set data.train.type=OpenImagesDataset, data.val.type=OpenImagesDataset, and data.test.type=OpenImagesDataset in the config +3. If users do not want to use `validation-annotations-human-imagelabels-boxable.csv` and `challenge-2019-validation-detection-human-imagelabels.csv` + users can set `data.val.load_image_level_labels=False` and `data.test.load_image_level_labels=False` in the config. + Please note that loading image-levels label is the default of Open Images evaluation metric. + More details please refer to the [official website](https://storage.googleapis.com/openimages/web/evaluation.html) + +## Results and Models + +| Architecture | Backbone | Style | Lr schd | Sampler | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------------------------: | :------: | :-----: | :-----: | :-----------------: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN | R-50 | pytorch | 1x | Group Sampler | 7.7 | - | 51.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_20211130_231159-e87ab7ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_20211130_231159.log.json) | +| Faster R-CNN | R-50 | pytorch | 1x | Class Aware Sampler | 7.7 | - | 60.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424-98c630e5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424.log.json) | +| Faster R-CNN (Challenge 2019) | R-50 | pytorch | 1x | Group Sampler | 7.7 | - | 54.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge_20220114_045100-0e79e5df.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge_20220114_045100.log.json) | +| Faster R-CNN (Challenge 2019) | R-50 | pytorch | 1x | Class Aware Sampler | 7.1 | - | 65.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge_20220221_192021-34c402d9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge_20220221_192021.log.json) | +| Retinanet | R-50 | pytorch | 1x | Group Sampler | 6.6 | - | 61.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/retinanet_r50_fpn_32x2_1x_openimages/retinanet_r50_fpn_32x2_1x_openimages_20211223_071954-d2ae5462.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/retinanet_r50_fpn_32x2_1x_openimages/retinanet_r50_fpn_32x2_1x_openimages_20211223_071954.log.json) | +| SSD | VGG16 | pytorch | 36e | Group Sampler | 10.8 | - | 35.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/ssd300_32x8_36e_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/ssd300_32x8_36e_openimages/ssd300_32x8_36e_openimages_20211224_000232-dce93846.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/ssd300_32x8_36e_openimages/ssd300_32x8_36e_openimages_20211224_000232.log.json) | + +**Notes:** + +- 'cas' is short for 'Class Aware Sampler' + +### Results of consider image level labels + +| Architecture | Sampler | Consider Image Level Labels | box AP | +| :-------------------------------: | :-----------------: | :-------------------------: | :----: | +| Faster R-CNN r50 (Challenge 2019) | Group Sampler | w/o | 62.19 | +| Faster R-CNN r50 (Challenge 2019) | Group Sampler | w/ | 54.87 | +| Faster R-CNN r50 (Challenge 2019) | Class Aware Sampler | w/o | 71.77 | +| Faster R-CNN r50 (Challenge 2019) | Class Aware Sampler | w/ | 64.98 | diff --git a/downstream/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py b/downstream/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py new file mode 100644 index 0000000..3dfc341 --- /dev/null +++ b/downstream/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/openimages_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict(roi_head=dict(bbox_head=dict(num_classes=601))) + +# Using 32 GPUS while training +optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=26000, + warmup_ratio=1.0 / 64, + step=[8, 11]) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py b/downstream/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py new file mode 100644 index 0000000..c8900ad --- /dev/null +++ b/downstream/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py @@ -0,0 +1,47 @@ +_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py'] + +model = dict( + roi_head=dict(bbox_head=dict(num_classes=500)), + test_cfg=dict(rcnn=dict(score_thr=0.01))) + +# dataset settings +dataset_type = 'OpenImagesChallengeDataset' +data_root = 'data/OpenImages/' +data = dict( + train=dict( + type=dataset_type, + ann_file=data_root + + 'challenge2019/challenge-2019-train-detection-bbox.txt', + img_prefix=data_root + 'OpenImages/', + label_file=data_root + 'challenge2019/cls-label-description.csv', + hierarchy_file=data_root + 'challenge2019/class_label_tree.np'), + val=dict( + type=dataset_type, + ann_file=data_root + + 'challenge2019/challenge-2019-validation-detection-bbox.txt', + img_prefix=data_root + 'OpenImages/', + label_file=data_root + 'challenge2019/cls-label-description.csv', + hierarchy_file=data_root + 'challenge2019/class_label_tree.np', + meta_file=data_root + + 'challenge2019/challenge-2019-validation-metas.pkl', + image_level_ann_file=data_root + + 'challenge2019/challenge-2019-validation-detection-' + 'human-imagelabels.csv'), + test=dict( + type=dataset_type, + ann_file=data_root + + 'challenge2019/challenge-2019-validation-detection-bbox.txt', + img_prefix=data_root + 'OpenImages/', + label_file=data_root + 'challenge2019/cls-label-description.csv', + hierarchy_file=data_root + 'challenge2019/class_label_tree.np', + meta_file=data_root + + 'challenge2019/challenge-2019-validation-metas.pkl', + image_level_ann_file=data_root + + 'challenge2019/challenge-2019-validation-detection-' + 'human-imagelabels.csv')) +evaluation = dict(interval=1, metric='mAP') + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py b/downstream/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py new file mode 100644 index 0000000..88d029d --- /dev/null +++ b/downstream/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py @@ -0,0 +1,5 @@ +_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py'] + +# Use ClassAwareSampler +data = dict( + train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1))) diff --git a/downstream/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py b/downstream/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py new file mode 100644 index 0000000..26bd64e --- /dev/null +++ b/downstream/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py @@ -0,0 +1,5 @@ +_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py'] + +# Use ClassAwareSampler +data = dict( + train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1))) diff --git a/downstream/mmdetection/configs/openimages/metafile.yml b/downstream/mmdetection/configs/openimages/metafile.yml new file mode 100644 index 0000000..9be1726 --- /dev/null +++ b/downstream/mmdetection/configs/openimages/metafile.yml @@ -0,0 +1,102 @@ +Models: + - Name: faster_rcnn_r50_fpn_32x2_1x_openimages + In Collection: Faster R-CNN + Config: configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py + Metadata: + Training Memory (GB): 7.7 + Epochs: 12 + Training Data: Open Images v6 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Open Images v6 + Metrics: + box AP: 51.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_20211130_231159-e87ab7ce.pth + + - Name: retinanet_r50_fpn_32x2_1x_openimages + In Collection: RetinaNet + Config: configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py + Metadata: + Training Memory (GB): 6.6 + Epochs: 12 + Training Data: Open Images v6 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Open Images v6 + Metrics: + box AP: 61.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/retinanet_r50_fpn_32x2_1x_openimages/retinanet_r50_fpn_32x2_1x_openimages_20211223_071954-d2ae5462.pth + + - Name: ssd300_32x8_36e_openimages + In Collection: SSD + Config: configs/openimages/ssd300_32x8_36e_openimages + Metadata: + Training Memory (GB): 10.8 + Epochs: 36 + Training Data: Open Images v6 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Open Images v6 + Metrics: + box AP: 35.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/ssd300_32x8_36e_openimages/ssd300_32x8_36e_openimages_20211224_000232-dce93846.pth + + - Name: faster_rcnn_r50_fpn_32x2_1x_openimages_challenge + In Collection: Faster R-CNN + Config: configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py + Metadata: + Training Memory (GB): 7.7 + Epochs: 12 + Training Data: Open Images Challenge 2019 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Open Images Challenge 2019 + Metrics: + box AP: 54.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge_20220114_045100-0e79e5df.pth + + - Name: faster_rcnn_r50_fpn_32x2_cas_1x_openimages + In Collection: Faster R-CNN + Config: configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py + Metadata: + Training Memory (GB): 7.7 + Epochs: 12 + Training Data: Open Images Challenge 2019 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Open Images Challenge 2019 + Metrics: + box AP: 60.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424-98c630e5.pth + + - Name: faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge + In Collection: Faster R-CNN + Config: configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py + Metadata: + Training Memory (GB): 7.1 + Epochs: 12 + Training Data: Open Images Challenge 2019 + Training Techniques: + - SGD with Momentum + - Weight Decay + Results: + - Task: Object Detection + Dataset: Open Images Challenge 2019 + Metrics: + box AP: 65.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge_20220221_192021-34c402d9.pth diff --git a/downstream/mmdetection/configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py b/downstream/mmdetection/configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py new file mode 100644 index 0000000..0191aa1 --- /dev/null +++ b/downstream/mmdetection/configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py @@ -0,0 +1,22 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/openimages_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict(bbox_head=dict(num_classes=601)) + +optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=26000, + warmup_ratio=1.0 / 64, + step=[8, 11]) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/openimages/ssd300_32x8_36e_openimages.py b/downstream/mmdetection/configs/openimages/ssd300_32x8_36e_openimages.py new file mode 100644 index 0000000..e2565b9 --- /dev/null +++ b/downstream/mmdetection/configs/openimages/ssd300_32x8_36e_openimages.py @@ -0,0 +1,83 @@ +_base_ = [ + '../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py' +] +model = dict( + bbox_head=dict( + num_classes=601, + anchor_generator=dict(basesize_ratio_range=(0.2, 0.9)))) +# dataset settings +dataset_type = 'OpenImagesDataset' +data_root = 'data/OpenImages/' +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True, normed_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(300, 300), keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(300, 300), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=8, # using 32 GPUS while training. + workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory + train=dict( + _delete_=True, + type='RepeatDataset', + times=3, + dataset=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/oidv6-train-annotations-bbox.csv', + img_prefix=data_root + 'OpenImages/train/', + label_file=data_root + + 'annotations/class-descriptions-boxable.csv', + hierarchy_file=data_root + + 'annotations/bbox_labels_600_hierarchy.json', + pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=5e-4) +optimizer_config = dict() +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=20000, + warmup_ratio=0.001, + step=[8, 11]) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (32 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=256) diff --git a/downstream/mmdetection/configs/paa/README.md b/downstream/mmdetection/configs/paa/README.md new file mode 100644 index 0000000..c8861ec --- /dev/null +++ b/downstream/mmdetection/configs/paa/README.md @@ -0,0 +1,47 @@ +# PAA + +> [Probabilistic Anchor Assignment with IoU Prediction for Object Detection](https://arxiv.org/abs/2007.08103) + + + +## Abstract + +In object detection, determining which anchors to assign as positive or negative samples, known as anchor assignment, has been revealed as a core procedure that can significantly affect a model's performance. In this paper we propose a novel anchor assignment strategy that adaptively separates anchors into positive and negative samples for a ground truth bounding box according to the model's learning status such that it is able to reason about the separation in a probabilistic manner. To do so we first calculate the scores of anchors conditioned on the model and fit a probability distribution to these scores. The model is then trained with anchors separated into positive and negative samples according to their probabilities. Moreover, we investigate the gap between the training and testing objectives and propose to predict the Intersection-over-Unions of detected boxes as a measure of localization quality to reduce the discrepancy. The combined score of classification and localization qualities serving as a box selection metric in non-maximum suppression well aligns with the proposed anchor assignment strategy and leads significant performance improvements. The proposed methods only add a single convolutional layer to RetinaNet baseline and does not require multiple anchors per location, so are efficient. Experimental results verify the effectiveness of the proposed methods. Especially, our models set new records for single-stage detectors on MS COCO test-dev dataset with various backbones. + +
    + +
    + +## Results and Models + +We provide config files to reproduce the object detection results in the +ECCV 2020 paper for Probabilistic Anchor Assignment with IoU +Prediction for Object Detection. + +| Backbone | Lr schd | Mem (GB) | Score voting | box AP | Config | Download | +| :-------: | :-----: | :------: | :----------: | :----: | :---------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | 12e | 3.7 | True | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.log.json) | +| R-50-FPN | 12e | 3.7 | False | 40.2 | - | | +| R-50-FPN | 18e | 3.7 | True | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_1.5x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.log.json) | +| R-50-FPN | 18e | 3.7 | False | 41.2 | - | | +| R-50-FPN | 24e | 3.7 | True | 41.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.log.json) | +| R-50-FPN | 36e | 3.7 | True | 43.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722-06a6880b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722.log.json) | +| R-101-FPN | 12e | 6.2 | True | 42.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.log.json) | +| R-101-FPN | 12e | 6.2 | False | 42.4 | - | | +| R-101-FPN | 24e | 6.2 | True | 43.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.log.json) | +| R-101-FPN | 36e | 6.2 | True | 45.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r101_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202-83250d22.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202.log.json) | + +**Note**: + +1. We find that the performance is unstable with 1x setting and may fluctuate by about 0.2 mAP. We report the best results. + +## Citation + +```latex +@inproceedings{paa-eccv2020, + title={Probabilistic Anchor Assignment with IoU Prediction for Object Detection}, + author={Kim, Kang and Lee, Hee Seok}, + booktitle = {ECCV}, + year={2020} +} +``` diff --git a/downstream/mmdetection/configs/paa/metafile.yml b/downstream/mmdetection/configs/paa/metafile.yml new file mode 100644 index 0000000..e08b663 --- /dev/null +++ b/downstream/mmdetection/configs/paa/metafile.yml @@ -0,0 +1,104 @@ +Collections: + - Name: PAA + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - Probabilistic Anchor Assignment + - ResNet + Paper: + URL: https://arxiv.org/abs/2007.08103 + Title: 'Probabilistic Anchor Assignment with IoU Prediction for Object Detection' + README: configs/paa/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/detectors/paa.py#L6 + Version: v2.4.0 + +Models: + - Name: paa_r50_fpn_1x_coco + In Collection: PAA + Config: configs/paa/paa_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.7 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth + + - Name: paa_r50_fpn_1.5x_coco + In Collection: PAA + Config: configs/paa/paa_r50_fpn_1.5x_coco.py + Metadata: + Training Memory (GB): 3.7 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.pth + + - Name: paa_r50_fpn_2x_coco + In Collection: PAA + Config: configs/paa/paa_r50_fpn_2x_coco.py + Metadata: + Training Memory (GB): 3.7 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.pth + + - Name: paa_r50_fpn_mstrain_3x_coco + In Collection: PAA + Config: configs/paa/paa_r50_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 3.7 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722-06a6880b.pth + + - Name: paa_r101_fpn_1x_coco + In Collection: PAA + Config: configs/paa/paa_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth + + - Name: paa_r101_fpn_2x_coco + In Collection: PAA + Config: configs/paa/paa_r101_fpn_2x_coco.py + Metadata: + Training Memory (GB): 6.2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.pth + + - Name: paa_r101_fpn_mstrain_3x_coco + In Collection: PAA + Config: configs/paa/paa_r101_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 6.2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202-83250d22.pth diff --git a/downstream/mmdetection/configs/paa/paa_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/paa/paa_r101_fpn_1x_coco.py new file mode 100644 index 0000000..94f1c27 --- /dev/null +++ b/downstream/mmdetection/configs/paa/paa_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './paa_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/paa/paa_r101_fpn_2x_coco.py b/downstream/mmdetection/configs/paa/paa_r101_fpn_2x_coco.py new file mode 100644 index 0000000..641ef76 --- /dev/null +++ b/downstream/mmdetection/configs/paa/paa_r101_fpn_2x_coco.py @@ -0,0 +1,3 @@ +_base_ = './paa_r101_fpn_1x_coco.py' +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/paa/paa_r101_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/paa/paa_r101_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..71858ed --- /dev/null +++ b/downstream/mmdetection/configs/paa/paa_r101_fpn_mstrain_3x_coco.py @@ -0,0 +1,6 @@ +_base_ = './paa_r50_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/paa/paa_r50_fpn_1.5x_coco.py b/downstream/mmdetection/configs/paa/paa_r50_fpn_1.5x_coco.py new file mode 100644 index 0000000..aabce4a --- /dev/null +++ b/downstream/mmdetection/configs/paa/paa_r50_fpn_1.5x_coco.py @@ -0,0 +1,3 @@ +_base_ = './paa_r50_fpn_1x_coco.py' +lr_config = dict(step=[12, 16]) +runner = dict(type='EpochBasedRunner', max_epochs=18) diff --git a/downstream/mmdetection/configs/paa/paa_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/paa/paa_r50_fpn_1x_coco.py new file mode 100644 index 0000000..4c9c4aa --- /dev/null +++ b/downstream/mmdetection/configs/paa/paa_r50_fpn_1x_coco.py @@ -0,0 +1,70 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='PAA', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='PAAHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.1, + neg_iou_thr=0.1, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/paa/paa_r50_fpn_2x_coco.py b/downstream/mmdetection/configs/paa/paa_r50_fpn_2x_coco.py new file mode 100644 index 0000000..663d2c0 --- /dev/null +++ b/downstream/mmdetection/configs/paa/paa_r50_fpn_2x_coco.py @@ -0,0 +1,3 @@ +_base_ = './paa_r50_fpn_1x_coco.py' +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/paa/paa_r50_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/paa/paa_r50_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..91fa28c --- /dev/null +++ b/downstream/mmdetection/configs/paa/paa_r50_fpn_mstrain_3x_coco.py @@ -0,0 +1,20 @@ +_base_ = './paa_r50_fpn_1x_coco.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict(train=dict(pipeline=train_pipeline)) +lr_config = dict(step=[28, 34]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/pafpn/README.md b/downstream/mmdetection/configs/pafpn/README.md new file mode 100644 index 0000000..ae1e3a3 --- /dev/null +++ b/downstream/mmdetection/configs/pafpn/README.md @@ -0,0 +1,34 @@ +# PAFPN + +> [Path Aggregation Network for Instance Segmentation](https://arxiv.org/abs/1803.01534) + + + +## Abstract + +The way that information propagates in neural networks is of great importance. In this paper, we propose Path Aggregation Network (PANet) aiming at boosting information flow in proposal-based instance segmentation framework. Specifically, we enhance the entire feature hierarchy with accurate localization signals in lower layers by bottom-up path augmentation, which shortens the information path between lower layers and topmost feature. We present adaptive feature pooling, which links feature grid and all feature levels to make useful information in each feature level propagate directly to following proposal subnetworks. A complementary branch capturing different views for each proposal is created to further improve mask prediction. These improvements are simple to implement, with subtle extra computational overhead. Our PANet reaches the 1st place in the COCO 2017 Challenge Instance Segmentation task and the 2nd place in Object Detection task without large-batch training. It is also state-of-the-art on MVD and Cityscapes. + +
    + +
    + +## Results and Models + +| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 4.0 | 17.2 | 37.5 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_20200503_105836.log.json) | + +## Citation + +```latex +@inproceedings{liu2018path, + author = {Shu Liu and + Lu Qi and + Haifang Qin and + Jianping Shi and + Jiaya Jia}, + title = {Path Aggregation Network for Instance Segmentation}, + booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2018} +} +``` diff --git a/downstream/mmdetection/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py b/downstream/mmdetection/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py new file mode 100644 index 0000000..b2fdef9 --- /dev/null +++ b/downstream/mmdetection/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' + +model = dict( + neck=dict( + type='PAFPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/pafpn/metafile.yml b/downstream/mmdetection/configs/pafpn/metafile.yml new file mode 100644 index 0000000..f9cf97c --- /dev/null +++ b/downstream/mmdetection/configs/pafpn/metafile.yml @@ -0,0 +1,38 @@ +Collections: + - Name: PAFPN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - PAFPN + Paper: + URL: https://arxiv.org/abs/1803.01534 + Title: 'Path Aggregation Network for Instance Segmentation' + README: configs/pafpn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/pafpn.py#L11 + Version: v2.0.0 + +Models: + - Name: faster_rcnn_r50_pafpn_1x_coco + In Collection: PAFPN + Config: configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 58.14 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth diff --git a/downstream/mmdetection/configs/panoptic_fpn/README.md b/downstream/mmdetection/configs/panoptic_fpn/README.md new file mode 100644 index 0000000..12980ce --- /dev/null +++ b/downstream/mmdetection/configs/panoptic_fpn/README.md @@ -0,0 +1,62 @@ +# Panoptic FPN + +> [Panoptic feature pyramid networks](https://arxiv.org/abs/1901.02446) + + + +## Abstract + +The recently introduced panoptic segmentation task has renewed our community's interest in unifying the tasks of instance segmentation (for thing classes) and semantic segmentation (for stuff classes). However, current state-of-the-art methods for this joint task use separate and dissimilar networks for instance and semantic segmentation, without performing any shared computation. In this work, we aim to unify these methods at the architectural level, designing a single network for both tasks. Our approach is to endow Mask R-CNN, a popular instance segmentation method, with a semantic segmentation branch using a shared Feature Pyramid Network (FPN) backbone. Surprisingly, this simple baseline not only remains effective for instance segmentation, but also yields a lightweight, top-performing method for semantic segmentation. In this work, we perform a detailed study of this minimally extended version of Mask R-CNN with FPN, which we refer to as Panoptic FPN, and show it is a robust and accurate baseline for both tasks. Given its effectiveness and conceptual simplicity, we hope our method can serve as a strong baseline and aid future research in panoptic segmentation. + +
    + +
    + +## Dataset + +PanopticFPN requires COCO and [COCO-panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) dataset for training and evaluation. You need to download and extract it in the COCO dataset path. +The directory should be like this. + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── panoptic_train2017.json +│ │ │ ├── panoptic_train2017 +│ │ │ ├── panoptic_val2017.json +│ │ │ ├── panoptic_val2017 +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +``` + +## Results and Models + +| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | PQ | SQ | RQ | PQ_th | SQ_th | RQ_th | PQ_st | SQ_st | RQ_st | Config | Download | +| :-------: | :-----: | :-----: | :------: | :------------: | :--: | :--: | :--: | :---: | :---: | :---: | :---: | :---: | :---: | :---------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 4.7 | | 40.2 | 77.8 | 49.3 | 47.8 | 80.9 | 57.5 | 28.9 | 73.1 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153.log.json) | +| R-50-FPN | pytorch | 3x | - | - | 42.5 | 78.1 | 51.7 | 50.3 | 81.5 | 60.3 | 30.7 | 73.0 | 38.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco/panoptic_fpn_r50_fpn_mstrain_3x_coco_20210824_171155-5650f98b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco/panoptic_fpn_r50_fpn_mstrain_3x_coco_20210824_171155.log.json) | +| R-101-FPN | pytorch | 1x | 6.7 | | 42.2 | 78.3 | 51.4 | 50.1 | 81.4 | 59.9 | 30.3 | 73.6 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco/panoptic_fpn_r101_fpn_1x_coco_20210820_193950.log.json) | +| R-101-FPN | pytorch | 3x | - | - | 44.1 | 78.9 | 53.6 | 52.1 | 81.7 | 62.3 | 32.0 | 74.6 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco/panoptic_fpn_r101_fpn_mstrain_3x_coco_20210823_114712-9c99acc4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco/panoptic_fpn_r101_fpn_mstrain_3x_coco_20210823_114712.log.json) | + +## Citation + +The base method for panoptic segmentation task. + +```latex +@inproceedings{kirillov2018panopticfpn, + author = { + Alexander Kirillov, + Ross Girshick, + Kaiming He, + Piotr Dollar, + }, + title = {Panoptic Feature Pyramid Networks}, + booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2019} +} +``` diff --git a/downstream/mmdetection/configs/panoptic_fpn/metafile.yml b/downstream/mmdetection/configs/panoptic_fpn/metafile.yml new file mode 100644 index 0000000..8c9d39d --- /dev/null +++ b/downstream/mmdetection/configs/panoptic_fpn/metafile.yml @@ -0,0 +1,70 @@ +Collections: + - Name: PanopticFPN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - PanopticFPN + Paper: + URL: https://arxiv.org/pdf/1901.02446 + Title: 'Panoptic feature pyramid networks' + README: configs/panoptic_fpn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/detectors/panoptic_fpn.py#L7 + Version: v2.16.0 + +Models: + - Name: panoptic_fpn_r50_fpn_1x_coco + In Collection: PanopticFPN + Config: configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.6 + Epochs: 12 + Results: + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 40.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth + + - Name: panoptic_fpn_r50_fpn_mstrain_3x_coco + In Collection: PanopticFPN + Config: configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 4.6 + Epochs: 36 + Results: + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 42.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco/panoptic_fpn_r50_fpn_mstrain_3x_coco_20210824_171155-5650f98b.pth + + - Name: panoptic_fpn_r101_fpn_1x_coco + In Collection: PanopticFPN + Config: configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.5 + Epochs: 12 + Results: + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 42.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth + + - Name: panoptic_fpn_r101_fpn_mstrain_3x_coco + In Collection: PanopticFPN + Config: configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 6.5 + Epochs: 36 + Results: + - Task: Panoptic Segmentation + Dataset: COCO + Metrics: + PQ: 44.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco/panoptic_fpn_r101_fpn_mstrain_3x_coco_20210823_114712-9c99acc4.pth diff --git a/downstream/mmdetection/configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py new file mode 100644 index 0000000..78b8079 --- /dev/null +++ b/downstream/mmdetection/configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './panoptic_fpn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..057e481 --- /dev/null +++ b/downstream/mmdetection/configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py @@ -0,0 +1,6 @@ +_base_ = './panoptic_fpn_r50_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..2995524 --- /dev/null +++ b/downstream/mmdetection/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py @@ -0,0 +1,33 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_panoptic.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='PanopticFPN', + semantic_head=dict( + type='PanopticFPNHead', + num_things_classes=80, + num_stuff_classes=53, + in_channels=256, + inner_channels=128, + start_level=0, + end_level=4, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + conv_cfg=None, + loss_seg=dict( + type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5)), + panoptic_fusion_head=dict( + type='HeuristicFusionHead', + num_things_classes=80, + num_stuff_classes=53), + test_cfg=dict( + panoptic=dict( + score_thr=0.6, + max_per_img=100, + mask_thr_binary=0.5, + mask_overlap=0.5, + nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True), + stuff_area_limit=4096))) + +custom_hooks = [] diff --git a/downstream/mmdetection/configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..b510935 --- /dev/null +++ b/downstream/mmdetection/configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py @@ -0,0 +1,61 @@ +_base_ = './panoptic_fpn_r50_fpn_1x_coco.py' + +# dataset settings +dataset_type = 'CocoPanopticDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], +# multiscale_mode='range' +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadPanopticAnnotations', + with_bbox=True, + with_mask=True, + with_seg=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='SegRescale', scale_factor=1 / 4), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +# Use RepeatDataset to speed up training +data = dict( + train=dict( + _delete_=True, + type='RepeatDataset', + times=3, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/panoptic_train2017.json', + img_prefix=data_root + 'train2017/', + seg_prefix=data_root + 'annotations/panoptic_train2017/', + pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/pascal_voc/README.md b/downstream/mmdetection/configs/pascal_voc/README.md new file mode 100644 index 0000000..3c09813 --- /dev/null +++ b/downstream/mmdetection/configs/pascal_voc/README.md @@ -0,0 +1,40 @@ +# Pascal VOC + +> [The Pascal Visual Object Classes (VOC) Challenge](https://link.springer.com/article/10.1007/s11263-009-0275-4) + + + +## Abstract + +The Pascal Visual Object Classes (VOC) challenge is a benchmark in visual object category recognition and detection, providing the vision and machine learning communities with a standard dataset of images and annotation, and standard evaluation procedures. Organised annually from 2005 to present, the challenge and its associated dataset has become accepted as the benchmark for object detection. + +This paper describes the dataset and evaluation procedure. We review the state-of-the-art in evaluated methods for both classification and detection, analyse whether the methods are statistically different, what they are learning from the images (e.g. the object or its context), and what the methods find easy or confuse. The paper concludes with lessons learnt in the three year history of the challenge, and proposes directions for future improvement and extension. + +
    + +
    + +## Results and Models + +| Architecture | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :------: | :-----: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN C4 | R-50 | caffe | 18k | | - | 80.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712//home/dong/code_sensetime/2022Q1/mmdetection/work_dirs/prepare_voc/gather/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712_20220314_234327-847a14d2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712_20220314_234327.log.json) | +| Faster R-CNN | R-50 | pytorch | 1x | 2.6 | - | 80.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/faster_rcnn_r50_fpn_1x_voc0712_20220320_192712-54bef0f3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/faster_rcnn_r50_fpn_1x_voc0712_20220320_192712.log.json) | +| Retinanet | R-50 | pytorch | 1x | 2.1 | - | 77.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200617-47cbdd0e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200616_014642.log.json) | +| SSD300 | VGG16 | - | 120e | - | - | 76.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/ssd300_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd300_voc0712/ssd300_voc0712_20220320_194658-17edda1b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd300_voc0712/ssd300_voc0712_20220320_194658.log.json) | +| SSD512 | VGG16 | - | 120e | - | - | 79.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/ssd512_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd512_voc0712/ssd512_voc0712_20220320_194717-03cefefe.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd512_voc0712/ssd512_voc0712_20220320_194717.log.json) | + +## Citation + +```latex +@Article{Everingham10, + author = "Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.", + title = "The Pascal Visual Object Classes (VOC) Challenge", + journal = "International Journal of Computer Vision", + volume = "88", + year = "2010", + number = "2", + month = jun, + pages = "303--338", +} +``` diff --git a/downstream/mmdetection/configs/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py b/downstream/mmdetection/configs/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py new file mode 100644 index 0000000..7bb1d73 --- /dev/null +++ b/downstream/mmdetection/configs/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py @@ -0,0 +1,81 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_caffe_c4.py', + '../_base_/default_runtime.py' +] +model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) + +# dataset settings +dataset_type = 'VOCDataset' +data_root = 'data/VOCdevkit/' +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576), + (1333, 608), (1333, 640), (1333, 672), (1333, 704), + (1333, 736), (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=[ + data_root + 'VOC2007/ImageSets/Main/trainval.txt', + data_root + 'VOC2012/ImageSets/Main/trainval.txt' + ], + img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', + img_prefix=data_root + 'VOC2007/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', + img_prefix=data_root + 'VOC2007/', + pipeline=test_pipeline)) + +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=100, + warmup_ratio=0.001, + step=[12000, 16000]) + +# Runner type +runner = dict(type='IterBasedRunner', max_iters=18000) + +checkpoint_config = dict(interval=3000) +evaluation = dict(interval=3000, metric='mAP') diff --git a/downstream/mmdetection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py b/downstream/mmdetection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py new file mode 100644 index 0000000..7866ace --- /dev/null +++ b/downstream/mmdetection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', + '../_base_/default_runtime.py' +] +model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +# actual epoch = 3 * 3 = 9 +lr_config = dict(policy='step', step=[3]) +# runtime settings +runner = dict( + type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12 diff --git a/downstream/mmdetection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py b/downstream/mmdetection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py new file mode 100644 index 0000000..12eee2c --- /dev/null +++ b/downstream/mmdetection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py @@ -0,0 +1,75 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', + '../_base_/default_runtime.py' +] +model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) + +CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', + 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/VOCdevkit/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1000, 600), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1000, 600), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=3, + dataset=dict( + type=dataset_type, + ann_file='data/voc0712_trainval.json', + img_prefix='data/VOCdevkit', + pipeline=train_pipeline, + classes=CLASSES)), + val=dict( + type=dataset_type, + ann_file='data/voc07_test.json', + img_prefix='data/VOCdevkit', + pipeline=test_pipeline, + classes=CLASSES), + test=dict( + type=dataset_type, + ann_file='data/voc07_test.json', + img_prefix='data/VOCdevkit', + pipeline=test_pipeline, + classes=CLASSES)) +evaluation = dict(interval=1, metric='bbox') + +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +# actual epoch = 3 * 3 = 9 +lr_config = dict(policy='step', step=[3]) +# runtime settings +runner = dict( + type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12 diff --git a/downstream/mmdetection/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py b/downstream/mmdetection/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py new file mode 100644 index 0000000..b4b050d --- /dev/null +++ b/downstream/mmdetection/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py', + '../_base_/default_runtime.py' +] +model = dict(bbox_head=dict(num_classes=20)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +# actual epoch = 3 * 3 = 9 +lr_config = dict(policy='step', step=[3]) +# runtime settings +runner = dict( + type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12 diff --git a/downstream/mmdetection/configs/pascal_voc/ssd300_voc0712.py b/downstream/mmdetection/configs/pascal_voc/ssd300_voc0712.py new file mode 100644 index 0000000..e7008ae --- /dev/null +++ b/downstream/mmdetection/configs/pascal_voc/ssd300_voc0712.py @@ -0,0 +1,74 @@ +_base_ = [ + '../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py', + '../_base_/default_runtime.py' +] +model = dict( + bbox_head=dict( + num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2, + 0.9)))) +# dataset settings +dataset_type = 'VOCDataset' +data_root = 'data/VOCdevkit/' +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(300, 300), keep_ratio=False), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(300, 300), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=3, + train=dict( + type='RepeatDataset', times=10, dataset=dict(pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) +optimizer_config = dict() +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[16, 20]) +checkpoint_config = dict(interval=1) +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=24) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/pascal_voc/ssd512_voc0712.py b/downstream/mmdetection/configs/pascal_voc/ssd512_voc0712.py new file mode 100644 index 0000000..f4627c2 --- /dev/null +++ b/downstream/mmdetection/configs/pascal_voc/ssd512_voc0712.py @@ -0,0 +1,57 @@ +_base_ = 'ssd300_voc0712.py' +input_size = 512 +model = dict( + neck=dict( + out_channels=(512, 1024, 512, 256, 256, 256, 256), + level_strides=(2, 2, 2, 2, 1), + level_paddings=(1, 1, 1, 1, 1), + last_kernel_size=4), + bbox_head=dict( + in_channels=(512, 1024, 512, 256, 256, 256, 256), + anchor_generator=dict( + input_size=input_size, + strides=[8, 16, 32, 64, 128, 256, 512], + basesize_ratio_range=(0.15, 0.9), + ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2])))) +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(512, 512), keep_ratio=False), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(512, 512), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(dataset=dict(pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/pisa/README.md b/downstream/mmdetection/configs/pisa/README.md new file mode 100644 index 0000000..c847c85 --- /dev/null +++ b/downstream/mmdetection/configs/pisa/README.md @@ -0,0 +1,50 @@ +# PISA + +> [Prime Sample Attention in Object Detection](https://arxiv.org/abs/1904.04821) + + + +## Abstract + +It is a common paradigm in object detection frameworks to treat all samples equally and target at maximizing the performance on average. In this work, we revisit this paradigm through a careful study on how different samples contribute to the overall performance measured in terms of mAP. Our study suggests that the samples in each mini-batch are neither independent nor equally important, and therefore a better classifier on average does not necessarily mean higher mAP. Motivated by this study, we propose the notion of Prime Samples, those that play a key role in driving the detection performance. We further develop a simple yet effective sampling and learning strategy called PrIme Sample Attention (PISA) that directs the focus of the training process towards such samples. Our experiments demonstrate that it is often more effective to focus on prime samples than hard samples when training a detector. Particularly, On the MSCOCO dataset, PISA outperforms the random sampling baseline and hard mining schemes, e.g., OHEM and Focal Loss, consistently by around 2% on both single-stage and two-stage detectors, even with a strong backbone ResNeXt-101. + +
    + +
    + +## Results and Models + +| PISA | Network | Backbone | Lr schd | box AP | mask AP | Config | Download | +| :--: | :----------: | :------------: | :-----: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| × | Faster R-CNN | R-50-FPN | 1x | 36.4 | | - | | +| √ | Faster R-CNN | R-50-FPN | 1x | 38.4 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco_20200506_185619.log.json) | +| × | Faster R-CNN | X101-32x4d-FPN | 1x | 40.1 | | - | | +| √ | Faster R-CNN | X101-32x4d-FPN | 1x | 41.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco-e4accec4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco_20200505_181503.log.json) | +| × | Mask R-CNN | R-50-FPN | 1x | 37.3 | 34.2 | - | | +| √ | Mask R-CNN | R-50-FPN | 1x | 39.1 | 35.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco-dfcedba6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco_20200508_150500.log.json) | +| × | Mask R-CNN | X101-32x4d-FPN | 1x | 41.1 | 37.1 | - | | +| √ | Mask R-CNN | X101-32x4d-FPN | 1x | | | | | +| × | RetinaNet | R-50-FPN | 1x | 35.6 | | - | | +| √ | RetinaNet | R-50-FPN | 1x | 36.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco-76409952.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco_20200504_014311.log.json) | +| × | RetinaNet | X101-32x4d-FPN | 1x | 39.0 | | - | | +| √ | RetinaNet | X101-32x4d-FPN | 1x | 40.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco-a0c13c73.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco_20200505_001404.log.json) | +| × | SSD300 | VGG16 | 1x | 25.6 | | - | | +| √ | SSD300 | VGG16 | 1x | 27.6 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_ssd300_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco-710e3ac9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco_20200504_144325.log.json) | +| × | SSD512 | VGG16 | 1x | 29.3 | | - | | +| √ | SSD512 | VGG16 | 1x | 31.8 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_ssd512_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco-247addee.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco_20200508_131030.log.json) | + +**Notes:** + +- In the original paper, all models are trained and tested on mmdet v1.x, thus results may not be exactly the same with this release on v2.0. +- It is noted PISA only modifies the training pipeline so the inference time remains the same with the baseline. + +## Citation + +```latex +@inproceedings{cao2019prime, + title={Prime sample attention in object detection}, + author={Cao, Yuhang and Chen, Kai and Loy, Chen Change and Lin, Dahua}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + year={2020} +} +``` diff --git a/downstream/mmdetection/configs/pisa/metafile.yml b/downstream/mmdetection/configs/pisa/metafile.yml new file mode 100644 index 0000000..cd43afb --- /dev/null +++ b/downstream/mmdetection/configs/pisa/metafile.yml @@ -0,0 +1,110 @@ +Collections: + - Name: PISA + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - PISA + - RPN + - ResNet + - RoIPool + Paper: + URL: https://arxiv.org/abs/1904.04821 + Title: 'Prime Sample Attention in Object Detection' + README: configs/pisa/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/roi_heads/pisa_roi_head.py#L8 + Version: v2.1.0 + +Models: + - Name: pisa_faster_rcnn_r50_fpn_1x_coco + In Collection: PISA + Config: configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth + + - Name: pisa_faster_rcnn_x101_32x4d_fpn_1x_coco + In Collection: PISA + Config: configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco-e4accec4.pth + + - Name: pisa_mask_rcnn_r50_fpn_1x_coco + In Collection: PISA + Config: configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco-dfcedba6.pth + + - Name: pisa_retinanet_r50_fpn_1x_coco + In Collection: PISA + Config: configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco-76409952.pth + + - Name: pisa_retinanet_x101_32x4d_fpn_1x_coco + In Collection: PISA + Config: configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco-a0c13c73.pth + + - Name: pisa_ssd300_coco + In Collection: PISA + Config: configs/pisa/pisa_ssd300_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 27.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco-710e3ac9.pth + + - Name: pisa_ssd512_coco + In Collection: PISA + Config: configs/pisa/pisa_ssd512_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 31.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco-247addee.pth diff --git a/downstream/mmdetection/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..71e65b0 --- /dev/null +++ b/downstream/mmdetection/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,30 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' + +model = dict( + roi_head=dict( + type='PISARoIHead', + bbox_head=dict( + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + train_cfg=dict( + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + sampler=dict( + type='ScoreHLRSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True, + k=0.5, + bias=0.), + isr=dict(k=2, bias=0), + carl=dict(k=1, bias=0.2))), + test_cfg=dict( + rpn=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/downstream/mmdetection/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..16edd99 --- /dev/null +++ b/downstream/mmdetection/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,30 @@ +_base_ = '../faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py' + +model = dict( + roi_head=dict( + type='PISARoIHead', + bbox_head=dict( + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + train_cfg=dict( + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + sampler=dict( + type='ScoreHLRSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True, + k=0.5, + bias=0.), + isr=dict(k=2, bias=0), + carl=dict(k=1, bias=0.2))), + test_cfg=dict( + rpn=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/downstream/mmdetection/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..047a293 --- /dev/null +++ b/downstream/mmdetection/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,30 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' + +model = dict( + roi_head=dict( + type='PISARoIHead', + bbox_head=dict( + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + train_cfg=dict( + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + sampler=dict( + type='ScoreHLRSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True, + k=0.5, + bias=0.), + isr=dict(k=2, bias=0), + carl=dict(k=1, bias=0.2))), + test_cfg=dict( + rpn=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/downstream/mmdetection/configs/pisa/pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/pisa/pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..2186a8f --- /dev/null +++ b/downstream/mmdetection/configs/pisa/pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,30 @@ +_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' + +model = dict( + roi_head=dict( + type='PISARoIHead', + bbox_head=dict( + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), + train_cfg=dict( + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + sampler=dict( + type='ScoreHLRSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True, + k=0.5, + bias=0.), + isr=dict(k=2, bias=0), + carl=dict(k=1, bias=0.2))), + test_cfg=dict( + rpn=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/downstream/mmdetection/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py new file mode 100644 index 0000000..70f89e2 --- /dev/null +++ b/downstream/mmdetection/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' + +model = dict( + bbox_head=dict( + type='PISARetinaHead', + loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), + train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) diff --git a/downstream/mmdetection/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..b97b672 --- /dev/null +++ b/downstream/mmdetection/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = '../retinanet/retinanet_x101_32x4d_fpn_1x_coco.py' + +model = dict( + bbox_head=dict( + type='PISARetinaHead', + loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), + train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) diff --git a/downstream/mmdetection/configs/pisa/pisa_ssd300_coco.py b/downstream/mmdetection/configs/pisa/pisa_ssd300_coco.py new file mode 100644 index 0000000..b5cc006 --- /dev/null +++ b/downstream/mmdetection/configs/pisa/pisa_ssd300_coco.py @@ -0,0 +1,8 @@ +_base_ = '../ssd/ssd300_coco.py' + +model = dict( + bbox_head=dict(type='PISASSDHead'), + train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) + +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/pisa/pisa_ssd512_coco.py b/downstream/mmdetection/configs/pisa/pisa_ssd512_coco.py new file mode 100644 index 0000000..3219d6d --- /dev/null +++ b/downstream/mmdetection/configs/pisa/pisa_ssd512_coco.py @@ -0,0 +1,8 @@ +_base_ = '../ssd/ssd512_coco.py' + +model = dict( + bbox_head=dict(type='PISASSDHead'), + train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) + +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/point_rend/README.md b/downstream/mmdetection/configs/point_rend/README.md new file mode 100644 index 0000000..183e83d --- /dev/null +++ b/downstream/mmdetection/configs/point_rend/README.md @@ -0,0 +1,33 @@ +# PointRend + +> [PointRend: Image Segmentation as Rendering](https://arxiv.org/abs/1912.08193) + + + +## Abstract + +We present a new method for efficient high-quality image segmentation of objects and scenes. By analogizing classical computer graphics methods for efficient rendering with over- and undersampling challenges faced in pixel labeling tasks, we develop a unique perspective of image segmentation as a rendering problem. From this vantage, we present the PointRend (Point-based Rendering) neural network module: a module that performs point-based segmentation predictions at adaptively selected locations based on an iterative subdivision algorithm. PointRend can be flexibly applied to both instance and semantic segmentation tasks by building on top of existing state-of-the-art models. While many concrete implementations of the general idea are possible, we show that a simple design already achieves excellent results. Qualitatively, PointRend outputs crisp object boundaries in regions that are over-smoothed by previous methods. Quantitatively, PointRend yields significant gains on COCO and Cityscapes, for both instance and semantic segmentation. PointRend's efficiency enables output resolutions that are otherwise impractical in terms of memory or computation compared to existing approaches. + +
    + +
    + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :------: | :---: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 1x | 4.6 | | 38.4 | 36.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco_20200612_161407.log.json) | +| R-50-FPN | caffe | 3x | 4.6 | | 41.0 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco-e0ebb6b7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco_20200614_002632.log.json) | + +Note: All models are trained with multi-scale, the input image shorter side is randomly scaled to one of (640, 672, 704, 736, 768, 800). + +## Citation + +```latex +@InProceedings{kirillov2019pointrend, + title={{PointRend}: Image Segmentation as Rendering}, + author={Alexander Kirillov and Yuxin Wu and Kaiming He and Ross Girshick}, + journal={ArXiv:1912.08193}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/point_rend/metafile.yml b/downstream/mmdetection/configs/point_rend/metafile.yml new file mode 100644 index 0000000..82aea05 --- /dev/null +++ b/downstream/mmdetection/configs/point_rend/metafile.yml @@ -0,0 +1,54 @@ +Collections: + - Name: PointRend + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - PointRend + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1912.08193 + Title: 'PointRend: Image Segmentation as Rendering' + README: configs/point_rend/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/detectors/point_rend.py#L6 + Version: v2.2.0 + +Models: + - Name: point_rend_r50_caffe_fpn_mstrain_1x_coco + In Collection: PointRend + Config: configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py + Metadata: + Training Memory (GB): 4.6 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth + + - Name: point_rend_r50_caffe_fpn_mstrain_3x_coco + In Collection: PointRend + Config: configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 4.6 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco-e0ebb6b7.pth diff --git a/downstream/mmdetection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py b/downstream/mmdetection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py new file mode 100644 index 0000000..0c0e563 --- /dev/null +++ b/downstream/mmdetection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py @@ -0,0 +1,44 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' +# model settings +model = dict( + type='PointRend', + roi_head=dict( + type='PointRendRoIHead', + mask_roi_extractor=dict( + type='GenericRoIExtractor', + aggregation='concat', + roi_layer=dict( + _delete_=True, type='SimpleRoIAlign', output_size=14), + out_channels=256, + featmap_strides=[4]), + mask_head=dict( + _delete_=True, + type='CoarseMaskHead', + num_fcs=2, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), + point_head=dict( + type='MaskPointHead', + num_fcs=3, + in_channels=256, + fc_channels=256, + num_classes=80, + coarse_pred_each_layer=True, + loss_point=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rcnn=dict( + mask_size=7, + num_points=14 * 14, + oversample_ratio=3, + importance_sample_ratio=0.75)), + test_cfg=dict( + rcnn=dict( + subdivision_steps=5, + subdivision_num_points=28 * 28, + scale_factor=2))) diff --git a/downstream/mmdetection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..169278e --- /dev/null +++ b/downstream/mmdetection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py @@ -0,0 +1,4 @@ +_base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py' +# learning policy +lr_config = dict(step=[28, 34]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/pvt/README.md b/downstream/mmdetection/configs/pvt/README.md new file mode 100644 index 0000000..1fd090b --- /dev/null +++ b/downstream/mmdetection/configs/pvt/README.md @@ -0,0 +1,57 @@ +# PVT + +> [Pyramid vision transformer: A versatile backbone for dense prediction without convolutions](https://arxiv.org/abs/2102.12122) + + + +## Abstract + +Although using convolutional neural networks (CNNs) as backbones achieves great successes in computer vision, this work investigates a simple backbone network useful for many dense prediction tasks without convolutions. Unlike the recently-proposed Transformer model (e.g., ViT) that is specially designed for image classification, we propose Pyramid Vision Transformer~(PVT), which overcomes the difficulties of porting Transformer to various dense prediction tasks. PVT has several merits compared to prior arts. (1) Different from ViT that typically has low-resolution outputs and high computational and memory cost, PVT can be not only trained on dense partitions of the image to achieve high output resolution, which is important for dense predictions but also using a progressive shrinking pyramid to reduce computations of large feature maps. (2) PVT inherits the advantages from both CNN and Transformer, making it a unified backbone in various vision tasks without convolutions by simply replacing CNN backbones. (3) We validate PVT by conducting extensive experiments, showing that it boosts the performance of many downstream tasks, e.g., object detection, semantic, and instance segmentation. For example, with a comparable number of parameters, RetinaNet+PVT achieves 40.4 AP on the COCO dataset, surpassing RetinNet+ResNet50 (36.3 AP) by 4.1 absolute AP. We hope PVT could serve as an alternative and useful backbone for pixel-level predictions and facilitate future researches. + +Transformer recently has shown encouraging progresses in computer vision. In this work, we present new baselines by improving the original Pyramid Vision Transformer (abbreviated as PVTv1) by adding three designs, including (1) overlapping patch embedding, (2) convolutional feed-forward networks, and (3) linear complexity attention layers. +With these modifications, our PVTv2 significantly improves PVTv1 on three tasks e.g., classification, detection, and segmentation. Moreover, PVTv2 achieves comparable or better performances than recent works such as Swin Transformer. We hope this work will facilitate state-of-the-art Transformer researches in computer vision. + +
    + +
    + +## Results and Models + +### RetinaNet (PVTv1) + +| Backbone | Lr schd | Mem (GB) | box AP | Config | Download | +| :--------: | :-----: | :------: | :----: | :--------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| PVT-Tiny | 12e | 8.5 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_t_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110-17b566bd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110.log.json) | +| PVT-Small | 12e | 14.5 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_s_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921.log.json) | +| PVT-Medium | 12e | 20.9 | 41.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_m_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243-55effa1b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243.log.json) | + +### RetinaNet (PVTv2) + +| Backbone | Lr schd | Mem (GB) | box AP | Config | Download | +| :------: | :-----: | :------: | :----: | :------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| PVTv2-B0 | 12e | 7.4 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b0_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157-13e9aabe.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157.log.json) | +| PVTv2-B1 | 12e | 9.5 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b1_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318-7e169a7d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318.log.json) | +| PVTv2-B2 | 12e | 16.2 | 44.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b2_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843-529f0b9a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843.log.json) | +| PVTv2-B3 | 12e | 23.0 | 46.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b3_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512-8357deff.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512.log.json) | +| PVTv2-B4 | 12e | 17.0 | 46.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b4_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151-83795c86.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151.log.json) | +| PVTv2-B5 | 12e | 18.7 | 46.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800-3420eb57.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800.log.json) | + +## Citation + +```latex +@article{wang2021pyramid, + title={Pyramid vision transformer: A versatile backbone for dense prediction without convolutions}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, + journal={arXiv preprint arXiv:2102.12122}, + year={2021} +} +``` + +```latex +@article{wang2021pvtv2, + title={PVTv2: Improved Baselines with Pyramid Vision Transformer}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, + journal={arXiv preprint arXiv:2106.13797}, + year={2021} +} +``` diff --git a/downstream/mmdetection/configs/pvt/metafile.yml b/downstream/mmdetection/configs/pvt/metafile.yml new file mode 100644 index 0000000..5884378 --- /dev/null +++ b/downstream/mmdetection/configs/pvt/metafile.yml @@ -0,0 +1,243 @@ +Models: + - Name: retinanet_pvt-t_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvt-t_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110-17b566bd.pth + Paper: + URL: https://arxiv.org/abs/2102.12122 + Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 + Version: 2.17.0 + + - Name: retinanet_pvt-s_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvt-s_fpn_1x_coco.py + Metadata: + Training Memory (GB): 14.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth + Paper: + URL: https://arxiv.org/abs/2102.12122 + Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 + Version: 2.17.0 + + - Name: retinanet_pvt-m_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvt-m_fpn_1x_coco.py + Metadata: + Training Memory (GB): 20.9 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243-55effa1b.pth + Paper: + URL: https://arxiv.org/abs/2102.12122 + Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 + Version: 2.17.0 + + - Name: retinanet_pvtv2-b0_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.4 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformerV2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157-13e9aabe.pth + Paper: + URL: https://arxiv.org/abs/2106.13797 + Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 + Version: 2.17.0 + + - Name: retinanet_pvtv2-b1_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py + Metadata: + Training Memory (GB): 9.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformerV2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318-7e169a7d.pth + Paper: + URL: https://arxiv.org/abs/2106.13797 + Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 + Version: 2.17.0 + + - Name: retinanet_pvtv2-b2_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py + Metadata: + Training Memory (GB): 16.2 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformerV2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843-529f0b9a.pth + Paper: + URL: https://arxiv.org/abs/2106.13797 + Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 + Version: 2.17.0 + + - Name: retinanet_pvtv2-b3_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py + Metadata: + Training Memory (GB): 23.0 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformerV2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512-8357deff.pth + Paper: + URL: https://arxiv.org/abs/2106.13797 + Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 + Version: 2.17.0 + + - Name: retinanet_pvtv2-b4_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py + Metadata: + Training Memory (GB): 17.0 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformerV2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151-83795c86.pth + Paper: + URL: https://arxiv.org/abs/2106.13797 + Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 + Version: 2.17.0 + + - Name: retinanet_pvtv2-b5_fpn_1x_coco + In Collection: RetinaNet + Config: configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 18.7 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - PyramidVisionTransformerV2 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800-3420eb57.pth + Paper: + URL: https://arxiv.org/abs/2106.13797 + Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" + README: configs/pvt/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 + Version: 2.17.0 diff --git a/downstream/mmdetection/configs/pvt/retinanet_pvt-l_fpn_1x_coco.py b/downstream/mmdetection/configs/pvt/retinanet_pvt-l_fpn_1x_coco.py new file mode 100644 index 0000000..e299f2a --- /dev/null +++ b/downstream/mmdetection/configs/pvt/retinanet_pvt-l_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' +model = dict( + backbone=dict( + num_layers=[3, 8, 27, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_large.pth'))) +fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/downstream/mmdetection/configs/pvt/retinanet_pvt-m_fpn_1x_coco.py b/downstream/mmdetection/configs/pvt/retinanet_pvt-m_fpn_1x_coco.py new file mode 100644 index 0000000..b888f78 --- /dev/null +++ b/downstream/mmdetection/configs/pvt/retinanet_pvt-m_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' +model = dict( + backbone=dict( + num_layers=[3, 4, 18, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_medium.pth'))) diff --git a/downstream/mmdetection/configs/pvt/retinanet_pvt-s_fpn_1x_coco.py b/downstream/mmdetection/configs/pvt/retinanet_pvt-s_fpn_1x_coco.py new file mode 100644 index 0000000..4660348 --- /dev/null +++ b/downstream/mmdetection/configs/pvt/retinanet_pvt-s_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' +model = dict( + backbone=dict( + num_layers=[3, 4, 6, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_small.pth'))) diff --git a/downstream/mmdetection/configs/pvt/retinanet_pvt-t_fpn_1x_coco.py b/downstream/mmdetection/configs/pvt/retinanet_pvt-t_fpn_1x_coco.py new file mode 100644 index 0000000..a6cff7d --- /dev/null +++ b/downstream/mmdetection/configs/pvt/retinanet_pvt-t_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='RetinaNet', + backbone=dict( + _delete_=True, + type='PyramidVisionTransformer', + num_layers=[2, 2, 2, 2], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_tiny.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) +# optimizer +optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py b/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py new file mode 100644 index 0000000..cbe2295 --- /dev/null +++ b/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='RetinaNet', + backbone=dict( + _delete_=True, + type='PyramidVisionTransformerV2', + embed_dims=32, + num_layers=[2, 2, 2, 2], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b0.pth')), + neck=dict(in_channels=[32, 64, 160, 256])) +# optimizer +optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py b/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py new file mode 100644 index 0000000..5374c50 --- /dev/null +++ b/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b1.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py b/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py new file mode 100644 index 0000000..cf9a18d --- /dev/null +++ b/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + num_layers=[3, 4, 6, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b2.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py b/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py new file mode 100644 index 0000000..7a47f82 --- /dev/null +++ b/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + num_layers=[3, 4, 18, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b3.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py b/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py new file mode 100644 index 0000000..9891d7b --- /dev/null +++ b/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py @@ -0,0 +1,18 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + num_layers=[3, 8, 27, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b4.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) +# optimizer +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001) +# dataset settings +data = dict(samples_per_gpu=1, workers_per_gpu=1) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (1 samples per GPU) +auto_scale_lr = dict(base_batch_size=8) diff --git a/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py b/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py new file mode 100644 index 0000000..a9fea2e --- /dev/null +++ b/downstream/mmdetection/configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py @@ -0,0 +1,19 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + num_layers=[3, 6, 40, 3], + mlp_ratios=(4, 4, 4, 4), + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b5.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) +# optimizer +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001) +# dataset settings +data = dict(samples_per_gpu=1, workers_per_gpu=1) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (1 samples per GPU) +auto_scale_lr = dict(base_batch_size=8) diff --git a/downstream/mmdetection/configs/queryinst/README.md b/downstream/mmdetection/configs/queryinst/README.md new file mode 100644 index 0000000..ad6e0b3 --- /dev/null +++ b/downstream/mmdetection/configs/queryinst/README.md @@ -0,0 +1,36 @@ +# QueryInst + +> [Instances as Queries](https://openaccess.thecvf.com/content/ICCV2021/html/Fang_Instances_As_Queries_ICCV_2021_paper.html) + + + +## Abstract + +We present QueryInst, a new perspective for instance segmentation. QueryInst is a multi-stage end-to-end system that treats instances of interest as learnable queries, enabling query based object detectors, e.g., Sparse R-CNN, to have strong instance segmentation performance. The attributes of instances such as categories, bounding boxes, instance masks, and instance association embeddings are represented by queries in a unified manner. In QueryInst, a query is shared by both detection and segmentation via dynamic convolutions and driven by parallelly-supervised multi-stage learning. We conduct extensive experiments on three challenging benchmarks, i.e., COCO, CityScapes, and YouTube-VIS to evaluate the effectiveness of QueryInst in object detection, instance segmentation, and video instance segmentation tasks. For the first time, we demonstrate that a simple end-to-end query based framework can achieve the state-of-the-art performance in various instance-level recognition tasks. + +
    + +
    + +## Results and Models + +| Model | Backbone | Style | Lr schd | Number of Proposals | Multi-Scale | RandomCrop | box AP | mask AP | Config | Download | +| :-------: | :-------: | :-----: | :-----: | :-----------------: | :---------: | :--------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| QueryInst | R-50-FPN | pytorch | 1x | 100 | False | False | 42.0 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916.log.json) | +| QueryInst | R-50-FPN | pytorch | 3x | 100 | True | False | 44.8 | 39.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643-7837af86.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643.log.json) | +| QueryInst | R-50-FPN | pytorch | 3x | 300 | True | True | 47.5 | 41.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802-85cffbd8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802.log.json) | +| QueryInst | R-101-FPN | pytorch | 3x | 100 | True | False | 46.4 | 41.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048-91f9995b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048.log.json) | +| QueryInst | R-101-FPN | pytorch | 3x | 300 | True | True | 49.0 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621-76cce59f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621.log.json) | + +## Citation + +```latex +@InProceedings{Fang_2021_ICCV, + author = {Fang, Yuxin and Yang, Shusheng and Wang, Xinggang and Li, Yu and Fang, Chen and Shan, Ying and Feng, Bin and Liu, Wenyu}, + title = {Instances As Queries}, + booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2021}, + pages = {6910-6919} +} +``` diff --git a/downstream/mmdetection/configs/queryinst/metafile.yml b/downstream/mmdetection/configs/queryinst/metafile.yml new file mode 100644 index 0000000..da7f0a7 --- /dev/null +++ b/downstream/mmdetection/configs/queryinst/metafile.yml @@ -0,0 +1,100 @@ +Collections: + - Name: QueryInst + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + - QueryInst + Paper: + URL: https://openaccess.thecvf.com/content/ICCV2021/papers/Fang_Instances_As_Queries_ICCV_2021_paper.pdf + Title: 'Instances as Queries' + README: configs/queryinst/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/queryinst.py + Version: v2.18.0 + +Models: + - Name: queryinst_r50_fpn_1x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth + + - Name: queryinst_r50_fpn_mstrain_480-800_3x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643-7837af86.pth + + - Name: queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802-85cffbd8.pth + + - Name: queryinst_r101_fpn_mstrain_480-800_3x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048-91f9995b.pth + + - Name: queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621-76cce59f.pth diff --git a/downstream/mmdetection/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py b/downstream/mmdetection/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py new file mode 100644 index 0000000..fd138f5 --- /dev/null +++ b/downstream/mmdetection/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py b/downstream/mmdetection/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py new file mode 100644 index 0000000..07cae19 --- /dev/null +++ b/downstream/mmdetection/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/queryinst/queryinst_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/queryinst/queryinst_r50_fpn_1x_coco.py new file mode 100644 index 0000000..48f5773 --- /dev/null +++ b/downstream/mmdetection/configs/queryinst/queryinst_r50_fpn_1x_coco.py @@ -0,0 +1,138 @@ +_base_ = [ + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +num_stages = 6 +num_proposals = 100 +model = dict( + type='QueryInst', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=0, + add_extra_convs='on_input', + num_outs=4), + rpn_head=dict( + type='EmbeddingRPNHead', + num_proposals=num_proposals, + proposal_feature_channel=256), + roi_head=dict( + type='SparseRoIHead', + num_stages=num_stages, + stage_loss_weights=[1] * num_stages, + proposal_feature_channel=256, + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='DIIHead', + num_classes=80, + num_ffn_fcs=2, + num_heads=8, + num_cls_fcs=1, + num_reg_fcs=3, + feedforward_channels=2048, + in_channels=256, + dropout=0.0, + ffn_act_cfg=dict(type='ReLU', inplace=True), + dynamic_conv_cfg=dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + input_feat_shape=7, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN')), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + clip_border=False, + target_means=[0., 0., 0., 0.], + target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) + ], + mask_head=[ + dict( + type='DynamicMaskHead', + dynamic_conv_cfg=dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + input_feat_shape=14, + with_proj=False, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN')), + num_convs=4, + num_classes=80, + roi_feat_size=14, + in_channels=256, + conv_kernel_size=3, + conv_out_channels=256, + class_agnostic=False, + norm_cfg=dict(type='BN'), + upsample_cfg=dict(type='deconv', scale_factor=2), + loss_mask=dict( + type='DiceLoss', + loss_weight=8.0, + use_sigmoid=True, + activate=False, + eps=1e-5)) for _ in range(num_stages) + ]), + # training and testing settings + train_cfg=dict( + rpn=None, + rcnn=[ + dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0), + iou_cost=dict(type='IoUCost', iou_mode='giou', + weight=2.0)), + sampler=dict(type='PseudoSampler'), + pos_weight=1, + mask_size=28, + ) for _ in range(num_stages) + ]), + test_cfg=dict( + rpn=None, rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) + +# optimizer +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.0001, + weight_decay=0.0001, + paramwise_cfg=dict( + custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[8, 11], warmup_iters=1000) +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/downstream/mmdetection/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py b/downstream/mmdetection/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py new file mode 100644 index 0000000..3089b3c --- /dev/null +++ b/downstream/mmdetection/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py @@ -0,0 +1,54 @@ +_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py' +num_proposals = 300 +model = dict( + rpn_head=dict(num_proposals=num_proposals), + test_cfg=dict( + _delete_=True, + rpn=None, + rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +# augmentation strategy originates from DETR. +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='AutoAugment', + policies=[[ + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict( + type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ]]), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) +] +data = dict(train=dict(pipeline=train_pipeline)) diff --git a/downstream/mmdetection/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py b/downstream/mmdetection/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py new file mode 100644 index 0000000..89e2cd1 --- /dev/null +++ b/downstream/mmdetection/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py @@ -0,0 +1,23 @@ +_base_ = './queryinst_r50_fpn_1x_coco.py' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +min_values = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, value) for value in min_values], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) +] + +data = dict(train=dict(pipeline=train_pipeline)) +lr_config = dict(policy='step', step=[27, 33]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/regnet/README.md b/downstream/mmdetection/configs/regnet/README.md new file mode 100644 index 0000000..711ed63 --- /dev/null +++ b/downstream/mmdetection/configs/regnet/README.md @@ -0,0 +1,122 @@ +# RegNet + +> [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678) + + + +## Abstract + +In this work, we present a new network design paradigm. Our goal is to help advance the understanding of network design and discover design principles that generalize across settings. Instead of focusing on designing individual network instances, we design network design spaces that parametrize populations of networks. The overall process is analogous to classic manual design of networks, but elevated to the design space level. Using our methodology we explore the structure aspect of network design and arrive at a low-dimensional design space consisting of simple, regular networks that we call RegNet. The core insight of the RegNet parametrization is surprisingly simple: widths and depths of good networks can be explained by a quantized linear function. We analyze the RegNet design space and arrive at interesting findings that do not match the current practice of network design. The RegNet design space provides simple and fast networks that work well across a wide range of flop regimes. Under comparable training settings and flops, the RegNet models outperform the popular EfficientNet models while being up to 5x faster on GPUs. + +
    + +
    + +## Introduction + +We implement RegNetX and RegNetY models in detection systems and provide their first results on Mask R-CNN, Faster R-CNN and RetinaNet. + +The pre-trained models are converted from [model zoo of pycls](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md). + +## Usage + +To use a regnet model, there are two steps to do: + +1. Convert the model to ResNet-style supported by MMDetection +2. Modify backbone and neck in config accordingly + +### Convert model + +We already prepare models of FLOPs from 400M to 12G in our model zoo. + +For more general usage, we also provide script `regnet2mmdet.py` in the tools directory to convert the key of models pretrained by [pycls](https://github.com/facebookresearch/pycls/) to +ResNet-style checkpoints used in MMDetection. + +```bash +python -u tools/model_converters/regnet2mmdet.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +### Modify config + +The users can modify the config's `depth` of backbone and corresponding keys in `arch` according to the configs in the [pycls model zoo](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md). +The parameter `in_channels` in FPN can be found in the Figure 15 & 16 of the paper (`wi` in the legend). +This directory already provides some configs with their performance, using RegNetX from 800MF to 12GF level. +For other pre-trained models or self-implemented regnet models, the users are responsible to check these parameters by themselves. + +**Note**: Although Fig. 15 & 16 also provide `w0`, `wa`, `wm`, `group_w`, and `bot_mul` for `arch`, they are quantized thus inaccurate, using them sometimes produces different backbone that does not match the key in the pre-trained model. + +## Results and Models + +### Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------------------------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [R-50-FPN](../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py) | pytorch | 1x | 4.4 | 12.0 | 38.2 | 34.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) | +| [RegNetX-3.2GF-FPN](./mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | pytorch | 1x | 5.0 | | 40.3 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141.log.json) | +| [RegNetX-4.0GF-FPN](./mask_rcnn_regnetx-4GF_fpn_1x_coco.py) | pytorch | 1x | 5.5 | | 41.5 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217-32e9c92d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217.log.json) | +| [R-101-FPN](../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py) | pytorch | 1x | 6.4 | 10.3 | 40.0 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204_144809.log.json) | +| [RegNetX-6.4GF-FPN](./mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py) | pytorch | 1x | 6.1 | | 41.0 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439-3a7aae83.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439.log.json) | +| [X-101-32x4d-FPN](../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py) | pytorch | 1x | 7.6 | 9.4 | 41.9 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205_034906.log.json) | +| [RegNetX-8.0GF-FPN](./mask_rcnn_regnetx-8GF_fpn_1x_coco.py) | pytorch | 1x | 6.4 | | 41.7 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515-09daa87e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515.log.json) | +| [RegNetX-12GF-FPN](./mask_rcnn_regnetx-12GF_fpn_1x_coco.py) | pytorch | 1x | 7.4 | | 42.2 | 38 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552-b538bd8b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552.log.json) | +| [RegNetX-3.2GF-FPN-DCN-C3-C5](./mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py) | pytorch | 1x | 5.0 | | 40.3 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726-75f40794.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726.log.json) | + +### Faster R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [R-50-FPN](../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | pytorch | 1x | 4.0 | 18.2 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | +| [RegNetX-3.2GF-FPN](./faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | pytorch | 1x | 4.5 | | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927-126fd9bf.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927.log.json) | +| [RegNetX-3.2GF-FPN](./faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py) | pytorch | 2x | 4.5 | | 41.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955-e2081918.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955.log.json) | + +### RetinaNet + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-----------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [R-50-FPN](../retinanet/retinanet_r50_fpn_1x_coco.py) | pytorch | 1x | 3.8 | 16.6 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130_002941.log.json) | +| [RegNetX-800MF-FPN](./retinanet_regnetx-800MF_fpn_1x_coco.py) | pytorch | 1x | 2.5 | | 35.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403-f6f91d10.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403.log.json) | +| [RegNetX-1.6GF-FPN](./retinanet_regnetx-1.6GF_fpn_1x_coco.py) | pytorch | 1x | 3.3 | | 37.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403-37009a9d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403.log.json) | +| [RegNetX-3.2GF-FPN](./retinanet_regnetx-3.2GF_fpn_1x_coco.py) | pytorch | 1x | 4.2 | | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141-cb1509e8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141.log.json) | + +### Pre-trained models + +We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. + +| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :---------------: | :---------------------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster RCNN | [RegNetX-400MF-FPN](./faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 2.3 | | 37.1 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210526_095112-e1967c37.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210526_095112.log.json) | +| Faster RCNN | [RegNetX-800MF-FPN](./faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 2.8 | | 38.8 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210526_095118-a2c70b20.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210526_095118.log.json) | +| Faster RCNN | [RegNetX-1.6GF-FPN](./faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 3.4 | | 40.5 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-1_20210526_095325-94aa46cc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-1_20210526_095325.log.json) | +| Faster RCNN | [RegNetX-3.2GF-FPN](./faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 4.4 | | 42.3 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3_20210526_095152-e16a5227.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3_20210526_095152.log.json) | +| Faster RCNN | [RegNetX-4GF-FPN](./faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 4.9 | | 42.8 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210526_095201-65eaf841.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210526_095201.log.json) | +| Mask RCNN | [RegNetX-3.2GF-FPN](./mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 5.0 | | 43.1 | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221.log.json) | +| Mask RCNN | [RegNetX-400MF-FPN](./mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 2.5 | | 37.6 | 34.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco_20210601_235443-8aac57a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco_20210601_235443.log.json) | +| Mask RCNN | [RegNetX-800MF-FPN](./mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 2.9 | | 39.5 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco_20210602_210641-715d51f5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco_20210602_210641.log.json) | +| Mask RCNN | [RegNetX-1.6GF-FPN](./mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 3.6 | | 40.9 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641-6764cff5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641.log.json) | +| Mask RCNN | [RegNetX-3.2GF-FPN](./mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 5.0 | | 43.1 | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221.log.json) | +| Mask RCNN | [RegNetX-4GF-FPN](./mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 5.1 | | 43.4 | 39.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco_20210602_032621-00f0331c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco_20210602_032621.log.json) | +| Cascade Mask RCNN | [RegNetX-400MF-FPN](./cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 4.3 | | 41.6 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210715_211619-5142f449.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210715_211619.log.json) | +| Cascade Mask RCNN | [RegNetX-800MF-FPN](./cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 4.8 | | 42.8 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210715_211616-dcbd13f4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210715_211616.log.json) | +| Cascade Mask RCNN | [RegNetX-1.6GF-FPN](./cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 5.4 | | 44.5 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-1_20210715_211616-75f29a61.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-1_20210715_211616.log.json) | +| Cascade Mask RCNN | [RegNetX-3.2GF-FPN](./cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 6.4 | | 45.8 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-3_20210715_211616-b9c2c58b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-3_20210715_211616.log.json) | +| Cascade Mask RCNN | [RegNetX-4GF-FPN](./cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 6.9 | | 45.8 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210715_212034-cbb1be4c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210715_212034.log.json) | + +### Notice + +1. The models are trained using a different weight decay, i.e., `weight_decay=5e-5` according to the setting in ImageNet training. This brings improvement of at least 0.7 AP absolute but does not improve the model using ResNet-50. +2. RetinaNets using RegNets are trained with learning rate 0.02 with gradient clip. We find that using learning rate 0.02 could improve the results by at least 0.7 AP absolute and gradient clip is necessary to stabilize the training. However, this does not improve the performance of ResNet-50-FPN RetinaNet. + +## Citation + +```latex +@article{radosavovic2020designing, + title={Designing Network Design Spaces}, + author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, + year={2020}, + eprint={2003.13678}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..358d85a --- /dev/null +++ b/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_1.6gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), + neck=dict( + type='FPN', + in_channels=[72, 168, 408, 912], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..8464571 --- /dev/null +++ b/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py @@ -0,0 +1,63 @@ +_base_ = [ + '../common/mstrain_3x_coco_instance.py', + '../_base_/models/cascade_mask_rcnn_r50_fpn.py' +] +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_3.2gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), + neck=dict( + type='FPN', + in_channels=[96, 192, 432, 1008], + out_channels=256, + num_outs=5)) +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) +train_pipeline = [ + # Images are converted to float32 directly after loading in PyCls + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + train=dict(dataset=dict(pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +optimizer = dict(weight_decay=0.00005) diff --git a/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..2a8990a --- /dev/null +++ b/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_400mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), + neck=dict( + type='FPN', + in_channels=[32, 64, 160, 384], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..3157863 --- /dev/null +++ b/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_4.0gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), + neck=dict( + type='FPN', + in_channels=[80, 240, 560, 1360], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..41376ad --- /dev/null +++ b/downstream/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_800mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), + neck=dict( + type='FPN', + in_channels=[64, 128, 288, 672], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..385b5ca --- /dev/null +++ b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_1.6gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), + neck=dict( + type='FPN', + in_channels=[72, 168, 408, 912], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py new file mode 100644 index 0000000..88d270e --- /dev/null +++ b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py @@ -0,0 +1,57 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_3.2gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), + neck=dict( + type='FPN', + in_channels=[96, 192, 432, 1008], + out_channels=256, + num_outs=5)) +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) diff --git a/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py new file mode 100644 index 0000000..612490b --- /dev/null +++ b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py @@ -0,0 +1,3 @@ +_base_ = './faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py' +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..b7e6e1a --- /dev/null +++ b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py @@ -0,0 +1,61 @@ +_base_ = [ + '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' +] +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_3.2gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), + neck=dict( + type='FPN', + in_channels=[96, 192, 432, 1008], + out_channels=256, + num_outs=5)) +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + train=dict(dataset=dict(pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +optimizer = dict(weight_decay=0.00005) diff --git a/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..0a05f6e --- /dev/null +++ b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_400mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), + neck=dict( + type='FPN', + in_channels=[32, 64, 160, 384], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..98b3fc2 --- /dev/null +++ b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_4.0gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), + neck=dict( + type='FPN', + in_channels=[80, 240, 560, 1360], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..67f448b --- /dev/null +++ b/downstream/mmdetection/configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_800mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), + neck=dict( + type='FPN', + in_channels=[64, 128, 288, 672], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py new file mode 100644 index 0000000..7970c3c --- /dev/null +++ b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py @@ -0,0 +1,26 @@ +_base_ = [ + '../common/mstrain-poly_3x_coco_instance.py', + '../_base_/models/mask_rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_1.6gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), + neck=dict( + type='FPN', + in_channels=[72, 168, 408, 912], + out_channels=256, + num_outs=5)) + +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py new file mode 100644 index 0000000..ce3661c --- /dev/null +++ b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_12gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_12gf')), + neck=dict( + type='FPN', + in_channels=[224, 448, 896, 2240], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py new file mode 100644 index 0000000..44bf0d1 --- /dev/null +++ b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py @@ -0,0 +1,58 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_3.2gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), + neck=dict( + type='FPN', + in_channels=[96, 192, 432, 1008], + out_channels=256, + num_outs=5)) +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) +train_pipeline = [ + # Images are converted to float32 directly after loading in PyCls + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) diff --git a/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py new file mode 100644 index 0000000..5b53428 --- /dev/null +++ b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = 'mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf'))) diff --git a/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..aca64d3 --- /dev/null +++ b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py @@ -0,0 +1,66 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_3.2gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), + neck=dict( + type='FPN', + in_channels=[96, 192, 432, 1008], + out_channels=256, + num_outs=5)) +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) +lr_config = dict(step=[28, 34]) +runner = dict(type='EpochBasedRunner', max_epochs=36) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py new file mode 100644 index 0000000..c38dfa6 --- /dev/null +++ b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py @@ -0,0 +1,26 @@ +_base_ = [ + '../common/mstrain-poly_3x_coco_instance.py', + '../_base_/models/mask_rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_400mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), + neck=dict( + type='FPN', + in_channels=[32, 64, 160, 384], + out_channels=256, + num_outs=5)) + +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py new file mode 100644 index 0000000..874d485 --- /dev/null +++ b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_4.0gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), + neck=dict( + type='FPN', + in_channels=[80, 240, 560, 1360], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py new file mode 100644 index 0000000..f0b65ea --- /dev/null +++ b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py @@ -0,0 +1,26 @@ +_base_ = [ + '../common/mstrain-poly_3x_coco_instance.py', + '../_base_/models/mask_rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_4.0gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), + neck=dict( + type='FPN', + in_channels=[80, 240, 560, 1360], + out_channels=256, + num_outs=5)) + +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py new file mode 100644 index 0000000..99387d8 --- /dev/null +++ b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_6.4gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_6.4gf')), + neck=dict( + type='FPN', + in_channels=[168, 392, 784, 1624], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py new file mode 100644 index 0000000..335ebab --- /dev/null +++ b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py @@ -0,0 +1,26 @@ +_base_ = [ + '../common/mstrain-poly_3x_coco_instance.py', + '../_base_/models/mask_rcnn_r50_fpn.py' +] + +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_800mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), + neck=dict( + type='FPN', + in_channels=[64, 128, 288, 672], + out_channels=256, + num_outs=5)) + +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py new file mode 100644 index 0000000..1e7832f --- /dev/null +++ b/downstream/mmdetection/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_8.0gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_8.0gf')), + neck=dict( + type='FPN', + in_channels=[80, 240, 720, 1920], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/regnet/metafile.yml b/downstream/mmdetection/configs/regnet/metafile.yml new file mode 100644 index 0000000..ecd3953 --- /dev/null +++ b/downstream/mmdetection/configs/regnet/metafile.yml @@ -0,0 +1,797 @@ +Models: + - Name: mask_rcnn_regnetx-3.2GF_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.0 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask_rcnn_regnetx-4GF_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217-32e9c92d.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask_rcnn_regnetx-6.4GF_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.1 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439-3a7aae83.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask_rcnn_regnetx-8GF_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 6.4 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515-09daa87e.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask_rcnn_regnetx-12GF_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.4 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552-b538bd8b.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 5.0 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726-75f40794.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster_rcnn_regnetx-3.2GF_fpn_1x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927-126fd9bf.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster_rcnn_regnetx-3.2GF_fpn_2x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py + Metadata: + Training Memory (GB): 4.5 + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955-e2081918.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: retinanet_regnetx-800MF_fpn_1x_coco + In Collection: RetinaNet + Config: configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 2.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 35.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403-f6f91d10.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: retinanet_regnetx-1.6GF_fpn_1x_coco + In Collection: RetinaNet + Config: configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.3 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403-37009a9d.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: retinanet_regnetx-3.2GF_fpn_1x_coco + In Collection: RetinaNet + Config: configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.2 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141-cb1509e8.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 2.3 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210526_095112-e1967c37.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 2.8 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210526_095118-a2c70b20.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 3.4 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-1_20210526_095325-94aa46cc.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 4.4 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3_20210526_095152-e16a5227.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco + In Collection: Faster R-CNN + Config: configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 4.9 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210526_095201-65eaf841.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 5.0 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py + Metadata: + Training Memory (GB): 2.5 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco_20210601_235443-8aac57a4.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py + Metadata: + Training Memory (GB): 2.9 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco_20210602_210641-715d51f5.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 3.6 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641-6764cff5.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 5.0 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641-6e63e19c.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco + In Collection: Mask R-CNN + Config: configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 5.1 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco_20210602_032621-00f0331c.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco + In Collection: Cascade R-CNN + Config: configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 4.3 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210715_211619-5142f449.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco + In Collection: Cascade R-CNN + Config: configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 4.8 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210715_211616-dcbd13f4.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco + In Collection: Cascade R-CNN + Config: configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 5.4 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-1_20210715_211616-75f29a61.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco + In Collection: Cascade R-CNN + Config: configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 6.4 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-3_20210715_211616-b9c2c58b.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 + + - Name: cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco + In Collection: Cascade R-CNN + Config: configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py + Metadata: + Training Memory (GB): 6.9 + Epochs: 36 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RegNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210715_212034-cbb1be4c.pth + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: 'Designing Network Design Spaces' + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 + Version: v2.1.0 diff --git a/downstream/mmdetection/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py b/downstream/mmdetection/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py new file mode 100644 index 0000000..7395c1b --- /dev/null +++ b/downstream/mmdetection/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_1.6gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), + neck=dict( + type='FPN', + in_channels=[72, 168, 408, 912], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py b/downstream/mmdetection/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py new file mode 100644 index 0000000..f05307c --- /dev/null +++ b/downstream/mmdetection/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py @@ -0,0 +1,59 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + backbone=dict( + _delete_=True, + type='RegNet', + arch='regnetx_3.2gf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), + neck=dict( + type='FPN', + in_channels=[96, 192, 432, 1008], + out_channels=256, + num_outs=5)) +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py b/downstream/mmdetection/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py new file mode 100644 index 0000000..f6f8989 --- /dev/null +++ b/downstream/mmdetection/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='RegNet', + arch='regnetx_800mf', + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), + neck=dict( + type='FPN', + in_channels=[64, 128, 288, 672], + out_channels=256, + num_outs=5)) diff --git a/downstream/mmdetection/configs/reppoints/README.md b/downstream/mmdetection/configs/reppoints/README.md new file mode 100644 index 0000000..acf8e47 --- /dev/null +++ b/downstream/mmdetection/configs/reppoints/README.md @@ -0,0 +1,59 @@ +# RepPoints + +> [RepPoints: Point Set Representation for Object Detection](https://arxiv.org/abs/1904.11490) + + + +## Abstract + +Modern object detectors rely heavily on rectangular bounding boxes, such as anchors, proposals and the final predictions, to represent objects at various recognition stages. The bounding box is convenient to use but provides only a coarse localization of objects and leads to a correspondingly coarse extraction of object features. In this paper, we present RepPoints(representative points), a new finer representation of objects as a set of sample points useful for both localization and recognition. Given ground truth localization and recognition targets for training, RepPoints learn to automatically arrange themselves in a manner that bounds the spatial extent of an object and indicates semantically significant local areas. They furthermore do not require the use of anchors to sample a space of bounding boxes. We show that an anchor-free object detector based on RepPoints can be as effective as the state-of-the-art anchor-based detection methods, with 46.5 AP and 67.4 AP50 on the COCO test-dev detection benchmark, using ResNet-101 model. + +
    + +
    + +## Introdution + +By [Ze Yang](https://yangze.tech/), [Shaohui Liu](http://b1ueber2y.me/), and [Han Hu](https://ancientmooner.github.io/). + +We provide code support and configuration files to reproduce the results in the paper for +["RepPoints: Point Set Representation for Object Detection"](https://arxiv.org/abs/1904.11490) on COCO object detection. + +**RepPoints**, initially described in [arXiv](https://arxiv.org/abs/1904.11490), is a new representation method for visual objects, on which visual understanding tasks are typically centered. Visual object representation, aiming at both geometric description and appearance feature extraction, is conventionally achieved by `bounding box + RoIPool (RoIAlign)`. The bounding box representation is convenient to use; however, it provides only a rectangular localization of objects that lacks geometric precision and may consequently degrade feature quality. Our new representation, RepPoints, models objects by a `point set` instead of a `bounding box`, which learns to adaptively position themselves over an object in a manner that circumscribes the object’s `spatial extent` and enables `semantically aligned feature extraction`. This richer and more flexible representation maintains the convenience of bounding boxes while facilitating various visual understanding applications. This repo demonstrated the effectiveness of RepPoints for COCO object detection. + +Another feature of this repo is the demonstration of an `anchor-free detector`, which can be as effective as state-of-the-art anchor-based detection methods. The anchor-free detector can utilize either `bounding box` or `RepPoints` as the basic object representation. + +## Results and Models + +The results on COCO 2017val are shown in the table below. + +| Method | Backbone | GN | Anchor | convert func | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------: | :-----------: | :-: | :----: | :----------: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| BBox | R-50-FPN | Y | single | - | 1x | 3.9 | 15.9 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329-c98bfa96.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916.log.json) | +| BBox | R-50-FPN | Y | none | - | 1x | 3.9 | 15.4 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+Bhead_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_center_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_center_fpn_gn-neck%2Bhead_1x_coco_20200330-00f73d58.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_center_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_center_fpn_gn-neck%2Bhead_1x_coco_20200330_233609.log.json) | +| RepPoints | R-50-FPN | N | none | moment | 1x | 3.3 | 18.5 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330_233609.log.json) | +| RepPoints | R-50-FPN | Y | none | moment | 1x | 3.9 | 17.5 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329-4b38409a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329_145952.log.json) | +| RepPoints | R-50-FPN | Y | none | moment | 2x | 3.9 | - | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329-91babaa2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329_150020.log.json) | +| RepPoints | R-101-FPN | Y | none | moment | 2x | 5.8 | 13.7 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329-4fbc7310.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329_132205.log.json) | +| RepPoints | R-101-FPN-DCN | Y | none | moment | 2x | 5.9 | 12.1 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-3309fbf2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329_132134.log.json) | +| RepPoints | X-101-FPN-DCN | Y | none | moment | 2x | 7.1 | 9.3 | 44.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-f87da1ea.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329_132201.log.json) | + +**Notes:** + +- `R-xx`, `X-xx` denote the ResNet and ResNeXt architectures, respectively. +- `DCN` denotes replacing 3x3 conv with the 3x3 deformable convolution in `c3-c5` stages of backbone. +- `none` in the `anchor` column means 2-d `center point` (x,y) is used to represent the initial object hypothesis. `single` denotes one 4-d anchor box (x,y,w,h) with IoU based label assign criterion is adopted. +- `moment`, `partial MinMax`, `MinMax` in the `convert func` column are three functions to convert a point set to a pseudo box. +- Note the results here are slightly different from those reported in the paper, due to framework change. While the original paper uses an [MXNet](https://mxnet.apache.org/) implementation, we re-implement the method in [PyTorch](https://pytorch.org/) based on mmdetection. + +## Citation + +```latex +@inproceedings{yang2019reppoints, + title={RepPoints: Point Set Representation for Object Detection}, + author={Yang, Ze and Liu, Shaohui and Hu, Han and Wang, Liwei and Lin, Stephen}, + booktitle={The IEEE International Conference on Computer Vision (ICCV)}, + month={Oct}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py b/downstream/mmdetection/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py new file mode 100644 index 0000000..b24c8db --- /dev/null +++ b/downstream/mmdetection/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py @@ -0,0 +1,2 @@ +_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' +model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True)) diff --git a/downstream/mmdetection/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py b/downstream/mmdetection/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py new file mode 100644 index 0000000..8d5013d --- /dev/null +++ b/downstream/mmdetection/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' +model = dict( + bbox_head=dict(transform_method='minmax', use_grid_points=True), + # training and testing settings + train_cfg=dict( + init=dict( + assigner=dict( + _delete_=True, + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1)))) diff --git a/downstream/mmdetection/configs/reppoints/metafile.yml b/downstream/mmdetection/configs/reppoints/metafile.yml new file mode 100644 index 0000000..cd4312c --- /dev/null +++ b/downstream/mmdetection/configs/reppoints/metafile.yml @@ -0,0 +1,181 @@ +Collections: + - Name: RepPoints + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Group Normalization + - FPN + - RepPoints + - ResNet + Paper: + URL: https://arxiv.org/abs/1904.11490 + Title: 'RepPoints: Point Set Representation for Object Detection' + README: configs/reppoints/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/reppoints_detector.py#L9 + Version: v2.0.0 + +Models: + - Name: bbox_r50_grid_fpn_gn-neck+head_1x_coco + In Collection: RepPoints + Config: configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py + Metadata: + Training Memory (GB): 3.9 + inference time (ms/im): + - value: 62.89 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329-c98bfa96.pth + + - Name: bbox_r50_grid_center_fpn_gn-neck+Bhead_1x_coco + In Collection: RepPoints + Config: configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+Bhead_1x_coco.py + Metadata: + Training Memory (GB): 3.9 + inference time (ms/im): + - value: 64.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_center_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_center_fpn_gn-neck%2Bhead_1x_coco_20200330-00f73d58.pth + + - Name: reppoints_moment_r50_fpn_1x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.3 + inference time (ms/im): + - value: 54.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth + + - Name: reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco.py + Metadata: + Training Memory (GB): 3.9 + inference time (ms/im): + - value: 57.14 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329-4b38409a.pth + + - Name: reppoints_moment_r50_fpn_gn-neck+head_2x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py + Metadata: + Training Memory (GB): 3.9 + inference time (ms/im): + - value: 57.14 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329-91babaa2.pth + + - Name: reppoints_moment_r101_fpn_gn-neck+head_2x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py + Metadata: + Training Memory (GB): 5.8 + inference time (ms/im): + - value: 72.99 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329-4fbc7310.pth + + - Name: reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py + Metadata: + Training Memory (GB): 5.9 + inference time (ms/im): + - value: 82.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-3309fbf2.pth + + - Name: reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco + In Collection: RepPoints + Config: configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py + Metadata: + Training Memory (GB): 7.1 + inference time (ms/im): + - value: 107.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-f87da1ea.pth diff --git a/downstream/mmdetection/configs/reppoints/reppoints.png b/downstream/mmdetection/configs/reppoints/reppoints.png new file mode 100644 index 0000000..a9306d9 Binary files /dev/null and b/downstream/mmdetection/configs/reppoints/reppoints.png differ diff --git a/downstream/mmdetection/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py b/downstream/mmdetection/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py new file mode 100644 index 0000000..0f56a46 --- /dev/null +++ b/downstream/mmdetection/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py @@ -0,0 +1,2 @@ +_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' +model = dict(bbox_head=dict(transform_method='minmax')) diff --git a/downstream/mmdetection/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py b/downstream/mmdetection/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py new file mode 100644 index 0000000..e223d80 --- /dev/null +++ b/downstream/mmdetection/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py @@ -0,0 +1,8 @@ +_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py b/downstream/mmdetection/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py new file mode 100644 index 0000000..1185470 --- /dev/null +++ b/downstream/mmdetection/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py new file mode 100644 index 0000000..158a906 --- /dev/null +++ b/downstream/mmdetection/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py @@ -0,0 +1,67 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='RepPointsDetector', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_input', + num_outs=5), + bbox_head=dict( + type='RepPointsHead', + num_classes=80, + in_channels=256, + feat_channels=256, + point_feat_channels=256, + stacked_convs=3, + num_points=9, + gradient_mul=0.1, + point_strides=[8, 16, 32, 64, 128], + point_base_scale=4, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5), + loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0), + transform_method='moment'), + # training and testing settings + train_cfg=dict( + init=dict( + assigner=dict(type='PointAssigner', scale=4, pos_num=1), + allowed_border=-1, + pos_weight=-1, + debug=False), + refine=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) +optimizer = dict(lr=0.01) diff --git a/downstream/mmdetection/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py b/downstream/mmdetection/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py new file mode 100644 index 0000000..337f167 --- /dev/null +++ b/downstream/mmdetection/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py @@ -0,0 +1,4 @@ +_base_ = './reppoints_moment_r50_fpn_1x_coco.py' +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg)) +optimizer = dict(lr=0.01) diff --git a/downstream/mmdetection/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py b/downstream/mmdetection/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py new file mode 100644 index 0000000..feca44a --- /dev/null +++ b/downstream/mmdetection/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py @@ -0,0 +1,3 @@ +_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py b/downstream/mmdetection/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py new file mode 100644 index 0000000..c0a12d0 --- /dev/null +++ b/downstream/mmdetection/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/reppoints/reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py b/downstream/mmdetection/configs/reppoints/reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py new file mode 100644 index 0000000..9a63bd0 --- /dev/null +++ b/downstream/mmdetection/configs/reppoints/reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py @@ -0,0 +1,2 @@ +_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' +model = dict(bbox_head=dict(transform_method='partial_minmax')) diff --git a/downstream/mmdetection/configs/res2net/README.md b/downstream/mmdetection/configs/res2net/README.md new file mode 100644 index 0000000..1285870 --- /dev/null +++ b/downstream/mmdetection/configs/res2net/README.md @@ -0,0 +1,77 @@ +# Res2Net + +> [Res2Net: A New Multi-scale Backbone Architecture](https://arxiv.org/abs/1904.01169) + + + +## Abstract + +Representing features at multiple scales is of great importance for numerous vision tasks. Recent advances in backbone convolutional neural networks (CNNs) continually demonstrate stronger multi-scale representation ability, leading to consistent performance gains on a wide range of applications. However, most existing methods represent the multi-scale features in a layer-wise manner. In this paper, we propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. The proposed Res2Net block can be plugged into the state-of-the-art backbone CNN models, e.g., ResNet, ResNeXt, and DLA. We evaluate the Res2Net block on all these models and demonstrate consistent performance gains over baseline models on widely-used datasets, e.g., CIFAR-100 and ImageNet. Further ablation studies and experimental results on representative computer vision tasks, i.e., object detection, class activation mapping, and salient object detection, further verify the superiority of the Res2Net over the state-of-the-art baseline methods. + +
    + +
    + +## Introduction + +We propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. + +| Backbone | Params. | GFLOPs | top-1 err. | top-5 err. | +| :---------------: | :-----: | :----: | :--------: | :--------: | +| ResNet-101 | 44.6 M | 7.8 | 22.63 | 6.44 | +| ResNeXt-101-64x4d | 83.5M | 15.5 | 20.40 | - | +| HRNetV2p-W48 | 77.5M | 16.1 | 20.70 | 5.50 | +| Res2Net-101 | 45.2M | 8.3 | 18.77 | 4.64 | + +Compared with other backbone networks, Res2Net requires fewer parameters and FLOPs. + +**Note:** + +- GFLOPs for classification are calculated with image size (224x224). + +## Results and Models + +### Faster R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R2-101-FPN | pytorch | 2x | 7.4 | - | 43.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco_20200514_231734.log.json) | + +### Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R2-101-FPN | pytorch | 2x | 7.9 | - | 43.6 | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco-17f061e8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco_20200515_002413.log.json) | + +### Cascade R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R2-101-FPN | pytorch | 20e | 7.8 | - | 45.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco-f4b7b7db.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco_20200515_091644.log.json) | + +### Cascade Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R2-101-FPN | pytorch | 20e | 9.5 | - | 46.4 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco-8a7b41e1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco_20200515_091645.log.json) | + +### Hybrid Task Cascade (HTC) + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R2-101-FPN | pytorch | 20e | - | - | 47.5 | 41.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/htc_r2_101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco-3a8d2112.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco_20200515_150029.log.json) | + +- Res2Net ImageNet pretrained models are in [Res2Net-PretrainedModels](https://github.com/Res2Net/Res2Net-PretrainedModels). +- More applications of Res2Net are in [Res2Net-Github](https://github.com/Res2Net/). + +## Citation + +```latex +@article{gao2019res2net, + title={Res2Net: A New Multi-scale Backbone Architecture}, + author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip}, + journal={IEEE TPAMI}, + year={2020}, + doi={10.1109/TPAMI.2019.2938758}, +} +``` diff --git a/downstream/mmdetection/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py b/downstream/mmdetection/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py new file mode 100644 index 0000000..6b6c001 --- /dev/null +++ b/downstream/mmdetection/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py @@ -0,0 +1,10 @@ +_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/downstream/mmdetection/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py b/downstream/mmdetection/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py new file mode 100644 index 0000000..10dddbb --- /dev/null +++ b/downstream/mmdetection/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py @@ -0,0 +1,10 @@ +_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/downstream/mmdetection/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py b/downstream/mmdetection/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py new file mode 100644 index 0000000..fc2221c --- /dev/null +++ b/downstream/mmdetection/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py @@ -0,0 +1,10 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/downstream/mmdetection/configs/res2net/htc_r2_101_fpn_20e_coco.py b/downstream/mmdetection/configs/res2net/htc_r2_101_fpn_20e_coco.py new file mode 100644 index 0000000..22d0c5d --- /dev/null +++ b/downstream/mmdetection/configs/res2net/htc_r2_101_fpn_20e_coco.py @@ -0,0 +1,13 @@ +_base_ = '../htc/htc_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/downstream/mmdetection/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py b/downstream/mmdetection/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py new file mode 100644 index 0000000..33aef1a --- /dev/null +++ b/downstream/mmdetection/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py @@ -0,0 +1,10 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/downstream/mmdetection/configs/res2net/metafile.yml b/downstream/mmdetection/configs/res2net/metafile.yml new file mode 100644 index 0000000..27bac8c --- /dev/null +++ b/downstream/mmdetection/configs/res2net/metafile.yml @@ -0,0 +1,146 @@ +Models: + - Name: faster_rcnn_r2_101_fpn_2x_coco + In Collection: Faster R-CNN + Config: configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py + Metadata: + Training Memory (GB): 7.4 + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Res2Net + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth + Paper: + URL: https://arxiv.org/abs/1904.01169 + Title: 'Res2Net for object detection and instance segmentation' + README: configs/res2net/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 + Version: v2.1.0 + + - Name: mask_rcnn_r2_101_fpn_2x_coco + In Collection: Mask R-CNN + Config: configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py + Metadata: + Training Memory (GB): 7.9 + Epochs: 24 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Res2Net + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco-17f061e8.pth + Paper: + URL: https://arxiv.org/abs/1904.01169 + Title: 'Res2Net for object detection and instance segmentation' + README: configs/res2net/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 + Version: v2.1.0 + + - Name: cascade_rcnn_r2_101_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Res2Net + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco-f4b7b7db.pth + Paper: + URL: https://arxiv.org/abs/1904.01169 + Title: 'Res2Net for object detection and instance segmentation' + README: configs/res2net/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 + Version: v2.1.0 + + - Name: cascade_mask_rcnn_r2_101_fpn_20e_coco + In Collection: Cascade R-CNN + Config: configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py + Metadata: + Training Memory (GB): 9.5 + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Res2Net + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco-8a7b41e1.pth + Paper: + URL: https://arxiv.org/abs/1904.01169 + Title: 'Res2Net for object detection and instance segmentation' + README: configs/res2net/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 + Version: v2.1.0 + + - Name: htc_r2_101_fpn_20e_coco + In Collection: HTC + Config: configs/res2net/htc_r2_101_fpn_20e_coco.py + Metadata: + Epochs: 20 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Res2Net + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco-3a8d2112.pth + Paper: + URL: https://arxiv.org/abs/1904.01169 + Title: 'Res2Net for object detection and instance segmentation' + README: configs/res2net/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 + Version: v2.1.0 diff --git a/downstream/mmdetection/configs/resnest/README.md b/downstream/mmdetection/configs/resnest/README.md new file mode 100644 index 0000000..3676e56 --- /dev/null +++ b/downstream/mmdetection/configs/resnest/README.md @@ -0,0 +1,54 @@ +# ResNeSt + +> [ResNeSt: Split-Attention Networks](https://arxiv.org/abs/2004.08955) + + + +## Abstract + +It is well known that featuremap attention and multi-path representation are important for visual recognition. In this paper, we present a modularized architecture, which applies the channel-wise attention on different network branches to leverage their success in capturing cross-feature interactions and learning diverse representations. Our design results in a simple and unified computation block, which can be parameterized using only a few variables. Our model, named ResNeSt, outperforms EfficientNet in accuracy and latency trade-off on image classification. In addition, ResNeSt has achieved superior transfer learning results on several public benchmarks serving as the backbone, and has been adopted by the winning entries of COCO-LVIS challenge. + +
    + +
    + +## Results and Models + +### Faster R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------: | :-----: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| S-50-FPN | pytorch | 1x | 4.8 | - | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20200926_125502-20289c16.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco-20200926_125502.log.json) | +| S-101-FPN | pytorch | 1x | 7.1 | - | 44.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201006_021058-421517f1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco-20201006_021058.log.json) | + +### Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| S-50-FPN | pytorch | 1x | 5.5 | - | 42.6 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20200926_125503-8a2c3d47.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco-20200926_125503.log.json) | +| S-101-FPN | pytorch | 1x | 7.8 | - | 45.2 | 40.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_215831-af60cdf9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco-20201005_215831.log.json) | + +### Cascade R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| S-50-FPN | pytorch | 1x | - | - | 44.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201122_213640-763cc7b5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco-20201005_113242.log.json) | +| S-101-FPN | pytorch | 1x | 8.4 | - | 46.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201005_113242-b9459f8f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco-20201122_213640.log.json) | + +### Cascade Mask R-CNN + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| S-50-FPN | pytorch | 1x | - | - | 45.4 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201122_104428-99eca4c7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco-20201122_104428.log.json) | +| S-101-FPN | pytorch | 1x | 10.5 | - | 47.7 | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_113243-42607475.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco-20201005_113243.log.json) | + +## Citation + +```latex +@article{zhang2020resnest, +title={ResNeSt: Split-Attention Networks}, +author={Zhang, Hang and Wu, Chongruo and Zhang, Zhongyue and Zhu, Yi and Zhang, Zhi and Lin, Haibin and Sun, Yue and He, Tong and Muller, Jonas and Manmatha, R. and Li, Mu and Smola, Alexander}, +journal={arXiv preprint arXiv:2004.08955}, +year={2020} +} +``` diff --git a/downstream/mmdetection/configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py b/downstream/mmdetection/configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py new file mode 100644 index 0000000..406f39d --- /dev/null +++ b/downstream/mmdetection/configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py' +model = dict( + backbone=dict( + stem_channels=128, + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='open-mmlab://resnest101'))) diff --git a/downstream/mmdetection/configs/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py b/downstream/mmdetection/configs/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py new file mode 100644 index 0000000..83d7537 --- /dev/null +++ b/downstream/mmdetection/configs/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py @@ -0,0 +1,118 @@ +_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + backbone=dict( + type='ResNeSt', + stem_channels=64, + depth=50, + radix=2, + reduction_factor=4, + avg_down_stride=True, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), + roi_head=dict( + bbox_head=[ + dict( + type='Shared4Conv1FCBBoxHead', + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + norm_cfg=norm_cfg, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared4Conv1FCBBoxHead', + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + norm_cfg=norm_cfg, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared4Conv1FCBBoxHead', + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + norm_cfg=norm_cfg, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_head=dict(norm_cfg=norm_cfg))) +# # use ResNeSt img_norm +img_norm_cfg = dict( + mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/resnest/cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py b/downstream/mmdetection/configs/resnest/cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py new file mode 100644 index 0000000..0a7476a --- /dev/null +++ b/downstream/mmdetection/configs/resnest/cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py' +model = dict( + backbone=dict( + stem_channels=128, + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='open-mmlab://resnest101'))) diff --git a/downstream/mmdetection/configs/resnest/cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py b/downstream/mmdetection/configs/resnest/cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py new file mode 100644 index 0000000..6ed7730 --- /dev/null +++ b/downstream/mmdetection/configs/resnest/cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py @@ -0,0 +1,116 @@ +_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + backbone=dict( + type='ResNeSt', + stem_channels=64, + depth=50, + radix=2, + reduction_factor=4, + avg_down_stride=True, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), + roi_head=dict( + bbox_head=[ + dict( + type='Shared4Conv1FCBBoxHead', + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + norm_cfg=norm_cfg, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared4Conv1FCBBoxHead', + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + norm_cfg=norm_cfg, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared4Conv1FCBBoxHead', + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + norm_cfg=norm_cfg, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], )) +# # use ResNeSt img_norm +img_norm_cfg = dict( + mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=False, + poly2mask=False), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py b/downstream/mmdetection/configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py new file mode 100644 index 0000000..40a2f1f --- /dev/null +++ b/downstream/mmdetection/configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py' +model = dict( + backbone=dict( + stem_channels=128, + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='open-mmlab://resnest101'))) diff --git a/downstream/mmdetection/configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py b/downstream/mmdetection/configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py new file mode 100644 index 0000000..eb1ecd2 --- /dev/null +++ b/downstream/mmdetection/configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py @@ -0,0 +1,62 @@ +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + backbone=dict( + type='ResNeSt', + stem_channels=64, + depth=50, + radix=2, + reduction_factor=4, + avg_down_stride=True, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=norm_cfg))) +# # use ResNeSt img_norm +img_norm_cfg = dict( + mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=False, + poly2mask=False), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py b/downstream/mmdetection/configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py new file mode 100644 index 0000000..c882ba1 --- /dev/null +++ b/downstream/mmdetection/configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py' +model = dict( + backbone=dict( + stem_channels=128, + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='open-mmlab://resnest101'))) diff --git a/downstream/mmdetection/configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py b/downstream/mmdetection/configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py new file mode 100644 index 0000000..4e50dea --- /dev/null +++ b/downstream/mmdetection/configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py @@ -0,0 +1,64 @@ +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + backbone=dict( + type='ResNeSt', + stem_channels=64, + depth=50, + radix=2, + reduction_factor=4, + avg_down_stride=True, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=norm_cfg), + mask_head=dict(norm_cfg=norm_cfg))) +# # use ResNeSt img_norm +img_norm_cfg = dict( + mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/resnest/metafile.yml b/downstream/mmdetection/configs/resnest/metafile.yml new file mode 100644 index 0000000..cfeec71 --- /dev/null +++ b/downstream/mmdetection/configs/resnest/metafile.yml @@ -0,0 +1,230 @@ +Models: + - Name: faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco + In Collection: Faster R-CNN + Config: configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py + Metadata: + Training Memory (GB): 4.8 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20200926_125502-20289c16.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco + In Collection: Faster R-CNN + Config: configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py + Metadata: + Training Memory (GB): 7.1 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201006_021058-421517f1.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco + In Collection: Mask R-CNN + Config: configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py + Metadata: + Training Memory (GB): 5.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20200926_125503-8a2c3d47.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco + In Collection: Mask R-CNN + Config: configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_215831-af60cdf9.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco + In Collection: Cascade R-CNN + Config: configs/resnest/cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py + Metadata: + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201122_213640-763cc7b5.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco + In Collection: Cascade R-CNN + Config: configs/resnest/cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py + Metadata: + Training Memory (GB): 8.4 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201005_113242-b9459f8f.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco + In Collection: Cascade R-CNN + Config: configs/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py + Metadata: + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201122_104428-99eca4c7.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 + + - Name: cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco + In Collection: Cascade R-CNN + Config: configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py + Metadata: + Training Memory (GB): 10.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNeSt + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_113243-42607475.pth + Paper: + URL: https://arxiv.org/abs/2004.08955 + Title: 'ResNeSt: Split-Attention Networks' + README: configs/resnest/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 + Version: v2.7.0 diff --git a/downstream/mmdetection/configs/resnet_strikes_back/README.md b/downstream/mmdetection/configs/resnet_strikes_back/README.md new file mode 100644 index 0000000..dd00b20 --- /dev/null +++ b/downstream/mmdetection/configs/resnet_strikes_back/README.md @@ -0,0 +1,40 @@ +# ResNet strikes back + +> [ResNet strikes back: An improved training procedure in timm](https://arxiv.org/abs/2110.00476) + + + +## Abstract + +The influential Residual Networks designed by He et al. remain the gold-standard architecture in numerous scientific publications. They typically serve as the default architecture in studies, or as baselines when new architectures are proposed. Yet there has been significant progress on best practices for training neural networks since the inception of the ResNet architecture in 2015. Novel optimization & dataaugmentation have increased the effectiveness of the training recipes. + +In this paper, we re-evaluate the performance of the vanilla ResNet-50 when trained with a procedure that integrates such advances. We share competitive training settings and pre-trained models in the timm open-source library, with the hope that they will serve as better baselines for future work. For instance, with our more demanding training setting, a vanilla ResNet-50 reaches 80.4% top-1 accuracy at resolution 224×224 on ImageNet-val without extra data or distillation. We also report the performance achieved with popular models with our training procedure. + +
    + +
    + +## Results and Models + +| Method | Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :----------------: | :------: | :-----: | :------: | :------------: | :---------: | :---------: | :-----------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN | R-50 rsb | 1x | 3.9 | - | 40.8 (+3.4) | - | [Config](./faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_162229-32ae82a9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_162229.log.json) | +| Mask R-CNN | R-50 rsb | 1x | 4.5 | - | 41.2 (+3.0) | 38.2 (+3.0) | [Config](./mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054.log.json) | +| Cascade Mask R-CNN | R-50 rsb | 1x | 6.2 | - | 44.8 (+3.6) | 39.9 (+3.6) | [Config](./cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_193636-8b9ad50f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_193636.log.json) | +| RetinaNet | R-50 rsb | 1x | 3.8 | - | 39.0 (+2.5) | - | [Config](./retinanet_r50_fpn_rsb-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco/retinanet_r50_fpn_rsb-pretrain_1x_coco_20220113_175432-bd24aae9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco/retinanet_r50_fpn_rsb-pretrain_1x_coco_20220113_175432.log.json) | + +**Notes:** + +- 'rsb' is short for 'resnet strikes back' +- We have done some grid searches on learning rate and weight decay and get these optimal hyper-parameters. + +## Citation + +```latex +@article{wightman2021resnet, +title={Resnet strikes back: An improved training procedure in timm}, +author={Ross Wightman, Hugo Touvron, Hervé Jégou}, +journal={arXiv preprint arXiv:2110.00476}, +year={2021} +} +``` diff --git a/downstream/mmdetection/configs/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py b/downstream/mmdetection/configs/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py new file mode 100644 index 0000000..8b601f0 --- /dev/null +++ b/downstream/mmdetection/configs/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py @@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/cascade_mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) + +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.0002, + weight_decay=0.05, + paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/downstream/mmdetection/configs/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py b/downstream/mmdetection/configs/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py new file mode 100644 index 0000000..fe86684 --- /dev/null +++ b/downstream/mmdetection/configs/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py @@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) + +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.0002, + weight_decay=0.05, + paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/downstream/mmdetection/configs/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py b/downstream/mmdetection/configs/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py new file mode 100644 index 0000000..321d98e --- /dev/null +++ b/downstream/mmdetection/configs/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py @@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) + +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.0002, + weight_decay=0.05, + paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/downstream/mmdetection/configs/resnet_strikes_back/metafile.yml b/downstream/mmdetection/configs/resnet_strikes_back/metafile.yml new file mode 100644 index 0000000..4c85a16 --- /dev/null +++ b/downstream/mmdetection/configs/resnet_strikes_back/metafile.yml @@ -0,0 +1,116 @@ +Models: + - Name: faster_rcnn_r50_fpn_rsb-pretrain_1x_coco + In Collection: Faster R-CNN + Config: configs/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py + Metadata: + Training Memory (GB): 3.9 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_162229-32ae82a9.pth + Paper: + URL: https://arxiv.org/abs/2110.00476 + Title: 'ResNet strikes back: An improved training procedure in timm' + README: configs/resnet_strikes_back/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md + Version: v2.22.0 + + - Name: cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco + In Collection: Cascade R-CNN + Config: configs/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py + Metadata: + Training Memory (GB): 6.2 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_193636-8b9ad50f.pth + Paper: + URL: https://arxiv.org/abs/2110.00476 + Title: 'ResNet strikes back: An improved training procedure in timm' + README: configs/resnet_strikes_back/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md + Version: v2.22.0 + + - Name: retinanet_r50_fpn_rsb-pretrain_1x_coco + In Collection: RetinaNet + Config: configs/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco.py + Metadata: + Training Memory (GB): 3.8 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco/retinanet_r50_fpn_rsb-pretrain_1x_coco_20220113_175432-bd24aae9.pth + Paper: + URL: https://arxiv.org/abs/2110.00476 + Title: 'ResNet strikes back: An improved training procedure in timm' + README: configs/resnet_strikes_back/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md + Version: v2.22.0 + + - Name: mask_rcnn_r50_fpn_rsb-pretrain_1x_coco + In Collection: Mask R-CNN + Config: configs/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py + Metadata: + Training Memory (GB): 4.5 + Epochs: 12 + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth + Paper: + URL: https://arxiv.org/abs/2110.00476 + Title: 'ResNet strikes back: An improved training procedure in timm' + README: configs/resnet_strikes_back/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md + Version: v2.22.0 diff --git a/downstream/mmdetection/configs/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco.py b/downstream/mmdetection/configs/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco.py new file mode 100644 index 0000000..480697a --- /dev/null +++ b/downstream/mmdetection/configs/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco.py @@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) + +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.0001, + weight_decay=0.05, + paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/downstream/mmdetection/configs/retinanet/README.md b/downstream/mmdetection/configs/retinanet/README.md new file mode 100644 index 0000000..b9e0a2a --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/README.md @@ -0,0 +1,53 @@ +# RetinaNet + +> [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002) + + + +## Abstract + +The highest accuracy object detectors to date are based on a two-stage approach popularized by R-CNN, where a classifier is applied to a sparse set of candidate object locations. In contrast, one-stage detectors that are applied over a regular, dense sampling of possible object locations have the potential to be faster and simpler, but have trailed the accuracy of two-stage detectors thus far. In this paper, we investigate why this is the case. We discover that the extreme foreground-background class imbalance encountered during training of dense detectors is the central cause. We propose to address this class imbalance by reshaping the standard cross entropy loss such that it down-weights the loss assigned to well-classified examples. Our novel Focal Loss focuses training on a sparse set of hard examples and prevents the vast number of easy negatives from overwhelming the detector during training. To evaluate the effectiveness of our loss, we design and train a simple dense detector we call RetinaNet. Our results show that when trained with the focal loss, RetinaNet is able to match the speed of previous one-stage detectors while surpassing the accuracy of all existing state-of-the-art two-stage detectors. + +
    + +
    + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :-----: | :----------: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-18-FPN | pytorch | 1x | 1.7 | | 31.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r18_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055.log.json) | +| R-18-FPN | pytorch | 1x(1 x 8 BS) | 5.0 | | 31.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x8_1x_coco/retinanet_r18_fpn_1x8_1x_coco_20220407_171255-4ea310d7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x8_1x_coco/retinanet_r18_fpn_1x8_1x_coco_20220407_171255.log.json) | +| R-50-FPN | caffe | 1x | 3.5 | 18.6 | 36.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531_012518.log.json) | +| R-50-FPN | pytorch | 1x | 3.8 | 19.0 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130_002941.log.json) | +| R-50-FPN (FP16) | pytorch | 1x | 2.8 | 31.6 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702_020127.log.json) | +| R-50-FPN | pytorch | 2x | - | - | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131_114738.log.json) | +| R-101-FPN | caffe | 1x | 5.5 | 14.7 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531-b428fa0f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531_012536.log.json) | +| R-101-FPN | pytorch | 1x | 5.7 | 15.0 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130-7a93545f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130_003055.log.json) | +| R-101-FPN | pytorch | 2x | - | - | 38.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131_114859.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 7.0 | 12.1 | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130-5c8b7ec4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130_003004.log.json) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | 40.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131-237fc5e1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131_114812.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 10.0 | 8.7 | 41.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130-366f5af1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130_003008.log.json) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131-bca068ab.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131_114833.log.json) | + +## Pre-trained Models + +We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. + +| Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 3x | 3.5 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_mstrain_640-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_mstrain_3x_coco/retinanet_r50_fpn_mstrain_3x_coco_20210718_220633-88476508.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_mstrain_3x_coco/retinanet_r50_fpn_mstrain_3x_coco_20210718_220633-88476508.log.json) | +| R-101-FPN | caffe | 3x | 5.4 | 40.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco/retinanet_r101_caffe_fpn_mstrain_3x_coco_20210721_063439-88a8a944.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco/retinanet_r101_caffe_fpn_mstrain_3x_coco_20210721_063439-88a8a944.log.json) | +| R-101-FPN | pytorch | 3x | 5.4 | 41 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r101_fpn_mstrain_640-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.log.json) | +| X-101-64x4d-FPN | pytorch | 3x | 9.8 | 41.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_mstrain_3x_coco/retinanet_x101_64x4d_fpn_mstrain_3x_coco_20210719_051838-022c2187.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_mstrain_3x_coco/retinanet_x101_64x4d_fpn_mstrain_3x_coco_20210719_051838-022c2187.log.json) | + +## Citation + +```latex +@inproceedings{lin2017focal, + title={Focal loss for dense object detection}, + author={Lin, Tsung-Yi and Goyal, Priya and Girshick, Ross and He, Kaiming and Doll{\'a}r, Piotr}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + year={2017} +} +``` diff --git a/downstream/mmdetection/configs/retinanet/metafile.yml b/downstream/mmdetection/configs/retinanet/metafile.yml new file mode 100644 index 0000000..2080771 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/metafile.yml @@ -0,0 +1,312 @@ +Collections: + - Name: RetinaNet + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Focal Loss + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1708.02002 + Title: "Focal Loss for Dense Object Detection" + README: configs/retinanet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/retinanet.py#L6 + Version: v2.0.0 + +Models: + - Name: retinanet_r18_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r18_fpn_1x_coco.py + Metadata: + Training Memory (GB): 1.7 + Training Resources: 8x V100 GPUs + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 31.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth + + - Name: retinanet_r18_fpn_1x8_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py + Metadata: + Training Memory (GB): 5.0 + Training Resources: 1x V100 GPUs + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 31.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x8_1x_coco/retinanet_r18_fpn_1x8_1x_coco_20220407_171255-4ea310d7.pth + + - Name: retinanet_r50_caffe_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.5 + inference time (ms/im): + - value: 53.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth + + - Name: retinanet_r50_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 3.8 + inference time (ms/im): + - value: 52.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth + + - Name: retinanet_r50_fpn_fp16_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py + Metadata: + Training Memory (GB): 2.8 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + inference time (ms/im): + - value: 31.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth + + - Name: retinanet_r50_fpn_2x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r50_fpn_2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth + + - Name: retinanet_r50_fpn_mstrain_3x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r50_fpn_mstrain_640-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_mstrain_3x_coco/retinanet_r50_fpn_mstrain_3x_coco_20210718_220633-88476508.pth + + - Name: retinanet_r101_caffe_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.5 + inference time (ms/im): + - value: 68.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531-b428fa0f.pth + + - Name: retinanet_r101_caffe_fpn_mstrain_3x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco/retinanet_r101_caffe_fpn_mstrain_3x_coco_20210721_063439-88a8a944.pth + + - Name: retinanet_r101_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r101_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.7 + inference time (ms/im): + - value: 66.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130-7a93545f.pth + + - Name: retinanet_r101_fpn_2x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r101_fpn_2x_coco.py + Metadata: + Training Memory (GB): 5.7 + inference time (ms/im): + - value: 66.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth + + - Name: retinanet_r101_fpn_mstrain_3x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_r101_fpn_2x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.pth + + - Name: retinanet_x101_32x4d_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 82.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130-5c8b7ec4.pth + + - Name: retinanet_x101_32x4d_fpn_2x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 82.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131-237fc5e1.pth + + - Name: retinanet_x101_64x4d_fpn_1x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py + Metadata: + Training Memory (GB): 10.0 + inference time (ms/im): + - value: 114.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130-366f5af1.pth + + - Name: retinanet_x101_64x4d_fpn_2x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py + Metadata: + Training Memory (GB): 10.0 + inference time (ms/im): + - value: 114.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131-bca068ab.pth + + - Name: retinanet_x101_64x4d_fpn_mstrain_3x_coco + In Collection: RetinaNet + Config: configs/retinanet/retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_mstrain_3x_coco/retinanet_x101_64x4d_fpn_mstrain_3x_coco_20210719_051838-022c2187.pth diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..56eaae2 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './retinanet_r50_caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..b87295e --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' +# learning policy +model = dict( + pretrained='open-mmlab://detectron2/resnet101_caffe', + backbone=dict(depth=101)) +lr_config = dict(step=[28, 34]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r101_fpn_1x_coco.py new file mode 100644 index 0000000..a7f0600 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './retinanet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r101_fpn_2x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r101_fpn_2x_coco.py new file mode 100644 index 0000000..721112a --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r101_fpn_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './retinanet_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r101_fpn_mstrain_640-800_3x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r101_fpn_mstrain_640-800_3x_coco.py new file mode 100644 index 0000000..6bbcac4 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r101_fpn_mstrain_640-800_3x_coco.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py' +] +# optimizer +model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py new file mode 100644 index 0000000..01a35f2 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# data +data = dict(samples_per_gpu=8) + +# optimizer +model = dict( + backbone=dict( + depth=18, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) + +# Note: If the learning rate is set to 0.0025, the mAP will be 32.4. +optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (1 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=8) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r18_fpn_1x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r18_fpn_1x_coco.py new file mode 100644 index 0000000..6197b32 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r18_fpn_1x_coco.py @@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# optimizer +model = dict( + backbone=dict( + depth=18, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (2 samples per GPU) +auto_scale_lr = dict(base_batch_size=16) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..04c9af5 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,41 @@ +_base_ = './retinanet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_1x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_1x_coco.py new file mode 100644 index 0000000..4d7b8f2 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_1x_coco.py @@ -0,0 +1,46 @@ +_base_ = './retinanet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_2x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..eea9690 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 23]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_3x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_3x_coco.py new file mode 100644 index 0000000..8057650 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_3x_coco.py @@ -0,0 +1,4 @@ +_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' +# learning policy +lr_config = dict(step=[28, 34]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_1x_coco.py new file mode 100644 index 0000000..04bd696 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_2x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_2x_coco.py new file mode 100644 index 0000000..927915f --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_2x_coco.py @@ -0,0 +1,4 @@ +_base_ = './retinanet_r50_fpn_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_90k_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_90k_coco.py new file mode 100644 index 0000000..ceda327 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_90k_coco.py @@ -0,0 +1,15 @@ +_base_ = 'retinanet_r50_fpn_1x_coco.py' + +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[60000, 80000]) + +# Runner type +runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) + +checkpoint_config = dict(interval=10000) +evaluation = dict(interval=10000, metric='bbox') diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py new file mode 100644 index 0000000..6b6cebe --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py @@ -0,0 +1,3 @@ +_base_ = './retinanet_r50_fpn_1x_coco.py' +# fp16 settings +fp16 = dict(loss_scale=512.) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_mstrain_640-800_3x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_mstrain_640-800_3x_coco.py new file mode 100644 index 0000000..02a2c29 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_r50_fpn_mstrain_640-800_3x_coco.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py' +] +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..765a4c2 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './retinanet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py new file mode 100644 index 0000000..14de96f --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './retinanet_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py new file mode 100644 index 0000000..948cd18 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './retinanet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py new file mode 100644 index 0000000..ad04b6e --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './retinanet_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/retinanet/retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py b/downstream/mmdetection/configs/retinanet/retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py new file mode 100644 index 0000000..f6ab512 --- /dev/null +++ b/downstream/mmdetection/configs/retinanet/retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py' +] +# optimizer +model = dict( + pretrained='open-mmlab://resnext101_64x4d', + backbone=dict(type='ResNeXt', depth=101, groups=64, base_width=4)) +optimizer = dict(type='SGD', lr=0.01) diff --git a/downstream/mmdetection/configs/rpn/README.md b/downstream/mmdetection/configs/rpn/README.md new file mode 100644 index 0000000..99addc0 --- /dev/null +++ b/downstream/mmdetection/configs/rpn/README.md @@ -0,0 +1,39 @@ +# RPN + +> [Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks](https://arxiv.org/abs/1506.01497) + + + +## Abstract + +State-of-the-art object detection networks depend on region proposal algorithms to hypothesize object locations. Advances like SPPnet and Fast R-CNN have reduced the running time of these detection networks, exposing region proposal computation as a bottleneck. In this work, we introduce a Region Proposal Network (RPN) that shares full-image convolutional features with the detection network, thus enabling nearly cost-free region proposals. An RPN is a fully convolutional network that simultaneously predicts object bounds and objectness scores at each position. The RPN is trained end-to-end to generate high-quality region proposals, which are used by Fast R-CNN for detection. We further merge RPN and Fast R-CNN into a single network by sharing their convolutional features---using the recently popular terminology of neural networks with 'attention' mechanisms, the RPN component tells the unified network where to look. For the very deep VGG-16 model, our detection system has a frame rate of 5fps (including all steps) on a GPU, while achieving state-of-the-art object detection accuracy on PASCAL VOC 2007, 2012, and MS COCO datasets with only 300 proposals per image. In ILSVRC and COCO 2015 competitions, Faster R-CNN and RPN are the foundations of the 1st-place winning entries in several tracks. + +
    + +
    + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | AR1000 | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | caffe | 1x | 3.5 | 22.6 | 58.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_caffe_fpn_1x_coco/rpn_r50_caffe_fpn_1x_coco_20200531-5b903a37.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_caffe_fpn_1x_coco/rpn_r50_caffe_fpn_1x_coco_20200531_012334.log.json) | +| R-50-FPN | pytorch | 1x | 3.8 | 22.3 | 58.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_1x_coco/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_1x_coco/rpn_r50_fpn_1x_coco_20200218_151240.log.json) | +| R-50-FPN | pytorch | 2x | - | - | 58.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_2x_coco/rpn_r50_fpn_2x_coco_20200131-0728c9b3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_2x_coco/rpn_r50_fpn_2x_coco_20200131_190631.log.json) | +| R-101-FPN | caffe | 1x | 5.4 | 17.3 | 60.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_caffe_fpn_1x_coco/rpn_r101_caffe_fpn_1x_coco_20200531-0629a2e2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_caffe_fpn_1x_coco/rpn_r101_caffe_fpn_1x_coco_20200531_012345.log.json) | +| R-101-FPN | pytorch | 1x | 5.8 | 16.5 | 59.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_fpn_1x_coco/rpn_r101_fpn_1x_coco_20200131-2ace2249.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_fpn_1x_coco/rpn_r101_fpn_1x_coco_20200131_191000.log.json) | +| R-101-FPN | pytorch | 2x | - | - | 60.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_fpn_2x_coco/rpn_r101_fpn_2x_coco_20200131-24e3db1a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_fpn_2x_coco/rpn_r101_fpn_2x_coco_20200131_191106.log.json) | +| X-101-32x4d-FPN | pytorch | 1x | 7.0 | 13.0 | 60.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_1x_coco/rpn_x101_32x4d_fpn_1x_coco_20200219-b02646c6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_1x_coco/rpn_x101_32x4d_fpn_1x_coco_20200219_012037.log.json) | +| X-101-32x4d-FPN | pytorch | 2x | - | - | 61.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_2x_coco/rpn_x101_32x4d_fpn_2x_coco_20200208-d22bd0bb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_2x_coco/rpn_x101_32x4d_fpn_2x_coco_20200208_200752.log.json) | +| X-101-64x4d-FPN | pytorch | 1x | 10.1 | 9.1 | 61.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_1x_coco/rpn_x101_64x4d_fpn_1x_coco_20200208-cde6f7dd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_1x_coco/rpn_x101_64x4d_fpn_1x_coco_20200208_200752.log.json) | +| X-101-64x4d-FPN | pytorch | 2x | - | - | 61.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_2x_coco/rpn_x101_64x4d_fpn_2x_coco_20200208-c65f524f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_2x_coco/rpn_x101_64x4d_fpn_2x_coco_20200208_200752.log.json) | + +## Citation + +```latex +@inproceedings{ren2015faster, + title={Faster r-cnn: Towards real-time object detection with region proposal networks}, + author={Ren, Shaoqing and He, Kaiming and Girshick, Ross and Sun, Jian}, + booktitle={Advances in neural information processing systems}, + year={2015} +} +``` diff --git a/downstream/mmdetection/configs/rpn/rpn_r101_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/rpn/rpn_r101_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..27be946 --- /dev/null +++ b/downstream/mmdetection/configs/rpn/rpn_r101_caffe_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = './rpn_r50_caffe_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/downstream/mmdetection/configs/rpn/rpn_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/rpn/rpn_r101_fpn_1x_coco.py new file mode 100644 index 0000000..962728f --- /dev/null +++ b/downstream/mmdetection/configs/rpn/rpn_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './rpn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/rpn/rpn_r101_fpn_2x_coco.py b/downstream/mmdetection/configs/rpn/rpn_r101_fpn_2x_coco.py new file mode 100644 index 0000000..ac7671c --- /dev/null +++ b/downstream/mmdetection/configs/rpn/rpn_r101_fpn_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './rpn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/rpn/rpn_r50_caffe_c4_1x_coco.py b/downstream/mmdetection/configs/rpn/rpn_r50_caffe_c4_1x_coco.py new file mode 100644 index 0000000..6da0ee9 --- /dev/null +++ b/downstream/mmdetection/configs/rpn/rpn_r50_caffe_c4_1x_coco.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/rpn_r50_caffe_c4.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# dataset settings +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_label=False), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='proposal_fast') diff --git a/downstream/mmdetection/configs/rpn/rpn_r50_caffe_fpn_1x_coco.py b/downstream/mmdetection/configs/rpn/rpn_r50_caffe_fpn_1x_coco.py new file mode 100644 index 0000000..68c36fa --- /dev/null +++ b/downstream/mmdetection/configs/rpn/rpn_r50_caffe_fpn_1x_coco.py @@ -0,0 +1,41 @@ +_base_ = './rpn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + norm_cfg=dict(requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe'))) +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_label=False), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/rpn/rpn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/rpn/rpn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..26f95a3 --- /dev/null +++ b/downstream/mmdetection/configs/rpn/rpn_r50_fpn_1x_coco.py @@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_label=False), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes']), +] +data = dict(train=dict(pipeline=train_pipeline)) +evaluation = dict(interval=1, metric='proposal_fast') diff --git a/downstream/mmdetection/configs/rpn/rpn_r50_fpn_2x_coco.py b/downstream/mmdetection/configs/rpn/rpn_r50_fpn_2x_coco.py new file mode 100644 index 0000000..2f264bf --- /dev/null +++ b/downstream/mmdetection/configs/rpn/rpn_r50_fpn_2x_coco.py @@ -0,0 +1,5 @@ +_base_ = './rpn_r50_fpn_1x_coco.py' + +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py new file mode 100644 index 0000000..d0c7394 --- /dev/null +++ b/downstream/mmdetection/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './rpn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py b/downstream/mmdetection/configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py new file mode 100644 index 0000000..c6880b7 --- /dev/null +++ b/downstream/mmdetection/configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './rpn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py b/downstream/mmdetection/configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py new file mode 100644 index 0000000..96e691a --- /dev/null +++ b/downstream/mmdetection/configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './rpn_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py b/downstream/mmdetection/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py new file mode 100644 index 0000000..4182a39 --- /dev/null +++ b/downstream/mmdetection/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py @@ -0,0 +1,14 @@ +_base_ = './rpn_r50_fpn_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/sabl/README.md b/downstream/mmdetection/configs/sabl/README.md new file mode 100644 index 0000000..03992be --- /dev/null +++ b/downstream/mmdetection/configs/sabl/README.md @@ -0,0 +1,47 @@ +# SABL + +> [Side-Aware Boundary Localization for More Precise Object Detection](https://arxiv.org/abs/1912.04260) + + + +## Abstract + +Current object detection frameworks mainly rely on bounding box regression to localize objects. Despite the remarkable progress in recent years, the precision of bounding box regression remains unsatisfactory, hence limiting performance in object detection. We observe that precise localization requires careful placement of each side of the bounding box. However, the mainstream approach, which focuses on predicting centers and sizes, is not the most effective way to accomplish this task, especially when there exists displacements with large variance between the anchors and the targets. In this paper, we propose an alternative approach, named as Side-Aware Boundary Localization (SABL), where each side of the bounding box is respectively localized with a dedicated network branch. To tackle the difficulty of precise localization in the presence of displacements with large variance, we further propose a two-step localization scheme, which first predicts a range of movement through bucket prediction and then pinpoints the precise position within the predicted bucket. We test the proposed method on both two-stage and single-stage detection frameworks. Replacing the standard bounding box regression branch with the proposed design leads to significant improvements on Faster R-CNN, RetinaNet, and Cascade R-CNN, by 3.0%, 1.7%, and 0.9%, respectively. + +
    + +
    + +## Results and Models + +The results on COCO 2017 val is shown in the below table. (results on test-dev are usually slightly higher than val). +Single-scale testing (1333x800) is adopted in all results. + +| Method | Backbone | Lr schd | ms-train | box AP | Config | Download | +| :----------------: | :-------: | :-----: | :------: | :----: | :----------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| SABL Faster R-CNN | R-50-FPN | 1x | N | 39.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/20200830_130324.log.json) | +| SABL Faster R-CNN | R-101-FPN | 1x | N | 41.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r101_fpn_1x_coco/sabl_faster_rcnn_r101_fpn_1x_coco-f804c6c1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r101_fpn_1x_coco/20200830_183949.log.json) | +| SABL Cascade R-CNN | R-50-FPN | 1x | N | 41.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco/sabl_cascade_rcnn_r50_fpn_1x_coco-e1748e5e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco/20200831_033726.log.json) | +| SABL Cascade R-CNN | R-101-FPN | 1x | N | 43.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco/sabl_cascade_rcnn_r101_fpn_1x_coco-2b83e87c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco/20200831_141745.log.json) | + +| Method | Backbone | GN | Lr schd | ms-train | box AP | Config | Download | +| :------------: | :-------: | :-: | :-----: | :---------: | :----: | :---------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| SABL RetinaNet | R-50-FPN | N | 1x | N | 37.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/20200830_053451.log.json) | +| SABL RetinaNet | R-50-FPN | Y | 1x | N | 38.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_gn_1x_coco/sabl_retinanet_r50_fpn_gn_1x_coco-e16dfcf1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_gn_1x_coco/20200831_141955.log.json) | +| SABL RetinaNet | R-101-FPN | N | 1x | N | 39.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_1x_coco/sabl_retinanet_r101_fpn_1x_coco-42026904.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_1x_coco/20200831_034256.log.json) | +| SABL RetinaNet | R-101-FPN | Y | 1x | N | 40.5 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_1x_coco/sabl_retinanet_r101_fpn_gn_1x_coco-40a893e8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_1x_coco/20200830_201422.log.json) | +| SABL RetinaNet | R-101-FPN | Y | 2x | Y (640~800) | 42.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco-1e63382c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco/20200830_144807.log.json) | +| SABL RetinaNet | R-101-FPN | Y | 2x | Y (480~960) | 43.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco-5342f857.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco/20200830_164537.log.json) | + +## Citation + +We provide config files to reproduce the object detection results in the ECCV 2020 Spotlight paper for [Side-Aware Boundary Localization for More Precise Object Detection](https://arxiv.org/abs/1912.04260). + +```latex +@inproceedings{Wang_2020_ECCV, + title = {Side-Aware Boundary Localization for More Precise Object Detection}, + author = {Jiaqi Wang and Wenwei Zhang and Yuhang Cao and Kai Chen and Jiangmiao Pang and Tao Gong and Jianping Shi and Chen Change Loy and Dahua Lin}, + booktitle = {ECCV}, + year = {2020} +} +``` diff --git a/downstream/mmdetection/configs/sabl/metafile.yml b/downstream/mmdetection/configs/sabl/metafile.yml new file mode 100644 index 0000000..23c51cf --- /dev/null +++ b/downstream/mmdetection/configs/sabl/metafile.yml @@ -0,0 +1,140 @@ +Collections: + - Name: SABL + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + - SABL + Paper: + URL: https://arxiv.org/abs/1912.04260 + Title: 'Side-Aware Boundary Localization for More Precise Object Detection' + README: configs/sabl/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/roi_heads/bbox_heads/sabl_head.py#L14 + Version: v2.4.0 + +Models: + - Name: sabl_faster_rcnn_r50_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth + + - Name: sabl_faster_rcnn_r101_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r101_fpn_1x_coco/sabl_faster_rcnn_r101_fpn_1x_coco-f804c6c1.pth + + - Name: sabl_cascade_rcnn_r50_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco/sabl_cascade_rcnn_r50_fpn_1x_coco-e1748e5e.pth + + - Name: sabl_cascade_rcnn_r101_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco/sabl_cascade_rcnn_r101_fpn_1x_coco-2b83e87c.pth + + - Name: sabl_retinanet_r50_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth + + - Name: sabl_retinanet_r50_fpn_gn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_gn_1x_coco/sabl_retinanet_r50_fpn_gn_1x_coco-e16dfcf1.pth + + - Name: sabl_retinanet_r101_fpn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_1x_coco/sabl_retinanet_r101_fpn_1x_coco-42026904.pth + + - Name: sabl_retinanet_r101_fpn_gn_1x_coco + In Collection: SABL + Config: configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_1x_coco/sabl_retinanet_r101_fpn_gn_1x_coco-40a893e8.pth + + - Name: sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco + In Collection: SABL + Config: configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco-1e63382c.pth + + - Name: sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco + In Collection: SABL + Config: configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco-5342f857.pth diff --git a/downstream/mmdetection/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000..64fe230 --- /dev/null +++ b/downstream/mmdetection/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,90 @@ +_base_ = [ + '../_base_/models/cascade_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + roi_head=dict(bbox_head=[ + dict( + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, + loss_weight=1.0)), + dict( + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, + loss_weight=1.0)), + dict( + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)) + ])) diff --git a/downstream/mmdetection/configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..4b28a59 --- /dev/null +++ b/downstream/mmdetection/configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,86 @@ +_base_ = [ + '../_base_/models/cascade_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + roi_head=dict(bbox_head=[ + dict( + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, + loss_weight=1.0)), + dict( + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, + loss_weight=1.0)), + dict( + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)) + ])) diff --git a/downstream/mmdetection/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py new file mode 100644 index 0000000..e48d425 --- /dev/null +++ b/downstream/mmdetection/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + roi_head=dict( + bbox_head=dict( + _delete_=True, + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, + loss_weight=1.0)))) diff --git a/downstream/mmdetection/configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..732c7ba --- /dev/null +++ b/downstream/mmdetection/configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict( + _delete_=True, + type='SABLHead', + num_classes=80, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, + loss_weight=1.0)))) diff --git a/downstream/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py new file mode 100644 index 0000000..b08e916 --- /dev/null +++ b/downstream/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py @@ -0,0 +1,54 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + bbox_head=dict( + _delete_=True, + type='SABLRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py b/downstream/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py new file mode 100644 index 0000000..fc30d63 --- /dev/null +++ b/downstream/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py @@ -0,0 +1,56 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + bbox_head=dict( + _delete_=True, + type='SABLRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + norm_cfg=norm_cfg, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py b/downstream/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py new file mode 100644 index 0000000..e8fe166 --- /dev/null +++ b/downstream/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py @@ -0,0 +1,73 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +# model settings +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + bbox_head=dict( + _delete_=True, + type='SABLRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + norm_cfg=norm_cfg, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 480), (1333, 960)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict(train=dict(pipeline=train_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py b/downstream/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py new file mode 100644 index 0000000..30c4339 --- /dev/null +++ b/downstream/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py @@ -0,0 +1,73 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +# model settings +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + bbox_head=dict( + _delete_=True, + type='SABLRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + norm_cfg=norm_cfg, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict(train=dict(pipeline=train_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py new file mode 100644 index 0000000..6fe6bd6 --- /dev/null +++ b/downstream/mmdetection/configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py @@ -0,0 +1,50 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + bbox_head=dict( + _delete_=True, + type='SABLRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py b/downstream/mmdetection/configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py new file mode 100644 index 0000000..6acf080 --- /dev/null +++ b/downstream/mmdetection/configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py @@ -0,0 +1,52 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + bbox_head=dict( + _delete_=True, + type='SABLRetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + norm_cfg=norm_cfg, + bbox_coder=dict( + type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='ApproxMaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0.0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/scnet/README.md b/downstream/mmdetection/configs/scnet/README.md new file mode 100644 index 0000000..773874a --- /dev/null +++ b/downstream/mmdetection/configs/scnet/README.md @@ -0,0 +1,63 @@ +# SCNet + +> [SCNet: Training Inference Sample Consistency for Instance Segmentation](https://arxiv.org/abs/2012.10150) + + + +## Abstract + + + +Cascaded architectures have brought significant performance improvement in object detection and instance segmentation. However, there are lingering issues regarding the disparity in the Intersection-over-Union (IoU) distribution of the samples between training and inference. This disparity can potentially exacerbate detection accuracy. This paper proposes an architecture referred to as Sample Consistency Network (SCNet) to ensure that the IoU distribution of the samples at training time is close to that at inference time. Furthermore, SCNet incorporates feature relay and utilizes global contextual information to further reinforce the reciprocal relationships among classifying, detecting, and segmenting sub-tasks. Extensive experiments on the standard COCO dataset reveal the effectiveness of the proposed method over multiple evaluation metrics, including box AP, mask AP, and inference speed. In particular, while running 38% faster, the proposed SCNet improves the AP of the box and mask predictions by respectively 1.3 and 2.3 points compared to the strong Cascade Mask R-CNN baseline. + +
    + +
    + +## Dataset + +SCNet requires COCO and [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) dataset for training. You need to download and extract it in the COCO dataset path. +The directory should be like this. + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +| | ├── stuffthingmaps +``` + +## Results and Models + +The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val) + +| Backbone | Style | Lr schd | Mem (GB) | Inf speed (fps) | box AP | mask AP | TTA box AP | TTA mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :-------------: | :----: | :-----: | :--------: | :---------: | :------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-FPN | pytorch | 1x | 7.0 | 6.2 | 43.5 | 39.2 | 44.8 | 40.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco_20210117_192725.log.json) | +| R-50-FPN | pytorch | 20e | 7.0 | 6.2 | 44.5 | 40.0 | 45.8 | 41.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r50_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco-a569f645.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco_20210116_060148.log.json) | +| R-101-FPN | pytorch | 20e | 8.9 | 5.8 | 45.8 | 40.9 | 47.3 | 42.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco-294e312c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco_20210118_175824.log.json) | +| X-101-64x4d-FPN | pytorch | 20e | 13.2 | 4.9 | 47.5 | 42.3 | 48.9 | 44.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco-fb09dec9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco_20210120_045959.log.json) | + +### Notes + +- Training hyper-parameters are identical to those of [HTC](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc). +- TTA means Test Time Augmentation, which applies horizontal flip and multi-scale testing. Refer to [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r50_fpn_1x_coco.py). + +## Citation + +We provide the code for reproducing experiment results of [SCNet](https://arxiv.org/abs/2012.10150). + +```latex +@inproceedings{vu2019cascade, + title={SCNet: Training Inference Sample Consistency for Instance Segmentation}, + author={Vu, Thang and Haeyong, Kang and Yoo, Chang D}, + booktitle={AAAI}, + year={2021} +} +``` diff --git a/downstream/mmdetection/configs/scnet/metafile.yml b/downstream/mmdetection/configs/scnet/metafile.yml new file mode 100644 index 0000000..15eaebf --- /dev/null +++ b/downstream/mmdetection/configs/scnet/metafile.yml @@ -0,0 +1,116 @@ +Collections: + - Name: SCNet + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + - SCNet + Paper: + URL: https://arxiv.org/abs/2012.10150 + Title: 'SCNet: Training Inference Sample Consistency for Instance Segmentation' + README: configs/scnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.9.0/mmdet/models/detectors/scnet.py#L6 + Version: v2.9.0 + +Models: + - Name: scnet_r50_fpn_1x_coco + In Collection: SCNet + Config: configs/scnet/scnet_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 161.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth + + - Name: scnet_r50_fpn_20e_coco + In Collection: SCNet + Config: configs/scnet/scnet_r50_fpn_20e_coco.py + Metadata: + Training Memory (GB): 7.0 + inference time (ms/im): + - value: 161.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco-a569f645.pth + + - Name: scnet_r101_fpn_20e_coco + In Collection: SCNet + Config: configs/scnet/scnet_r101_fpn_20e_coco.py + Metadata: + Training Memory (GB): 8.9 + inference time (ms/im): + - value: 172.41 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco-294e312c.pth + + - Name: scnet_x101_64x4d_fpn_20e_coco + In Collection: SCNet + Config: configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py + Metadata: + Training Memory (GB): 13.2 + inference time (ms/im): + - value: 204.08 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) + Epochs: 20 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco-fb09dec9.pth diff --git a/downstream/mmdetection/configs/scnet/scnet_r101_fpn_20e_coco.py b/downstream/mmdetection/configs/scnet/scnet_r101_fpn_20e_coco.py new file mode 100644 index 0000000..ebba529 --- /dev/null +++ b/downstream/mmdetection/configs/scnet/scnet_r101_fpn_20e_coco.py @@ -0,0 +1,6 @@ +_base_ = './scnet_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/scnet/scnet_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/scnet/scnet_r50_fpn_1x_coco.py new file mode 100644 index 0000000..fe03b0d --- /dev/null +++ b/downstream/mmdetection/configs/scnet/scnet_r50_fpn_1x_coco.py @@ -0,0 +1,136 @@ +_base_ = '../htc/htc_r50_fpn_1x_coco.py' +# model settings +model = dict( + type='SCNet', + roi_head=dict( + _delete_=True, + type='SCNetRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='SCNetBBoxHead', + num_shared_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='SCNetBBoxHead', + num_shared_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='SCNetBBoxHead', + num_shared_fcs=2, + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='SCNetMaskHead', + num_convs=12, + in_channels=256, + conv_out_channels=256, + num_classes=80, + conv_to_res=True, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), + semantic_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[8]), + semantic_head=dict( + type='SCNetSemanticHead', + num_ins=5, + fusion_level=1, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + loss_seg=dict( + type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2), + conv_to_res=True), + glbctx_head=dict( + type='GlobalContextHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_weight=3.0, + conv_to_res=True), + feat_relay_head=dict( + type='FeatureRelayHead', + in_channels=1024, + out_conv_channels=256, + roi_feat_size=7, + scale_factor=2))) + +# uncomment below code to enable test time augmentations +# img_norm_cfg = dict( +# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# test_pipeline = [ +# dict(type='LoadImageFromFile'), +# dict( +# type='MultiScaleFlipAug', +# img_scale=[(600, 900), (800, 1200), (1000, 1500), (1200, 1800), +# (1400, 2100)], +# flip=True, +# transforms=[ +# dict(type='Resize', keep_ratio=True), +# dict(type='RandomFlip', flip_ratio=0.5), +# dict(type='Normalize', **img_norm_cfg), +# dict(type='Pad', size_divisor=32), +# dict(type='ImageToTensor', keys=['img']), +# dict(type='Collect', keys=['img']), +# ]) +# ] +# data = dict( +# val=dict(pipeline=test_pipeline), +# test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/scnet/scnet_r50_fpn_20e_coco.py b/downstream/mmdetection/configs/scnet/scnet_r50_fpn_20e_coco.py new file mode 100644 index 0000000..3b121a6 --- /dev/null +++ b/downstream/mmdetection/configs/scnet/scnet_r50_fpn_20e_coco.py @@ -0,0 +1,4 @@ +_base_ = './scnet_r50_fpn_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 19]) +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/downstream/mmdetection/configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py b/downstream/mmdetection/configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py new file mode 100644 index 0000000..1e54b03 --- /dev/null +++ b/downstream/mmdetection/configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py @@ -0,0 +1,15 @@ +_base_ = './scnet_r50_fpn_20e_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/scnet/scnet_x101_64x4d_fpn_8x1_20e_coco.py b/downstream/mmdetection/configs/scnet/scnet_x101_64x4d_fpn_8x1_20e_coco.py new file mode 100644 index 0000000..be8ddc5 --- /dev/null +++ b/downstream/mmdetection/configs/scnet/scnet_x101_64x4d_fpn_8x1_20e_coco.py @@ -0,0 +1,8 @@ +_base_ = './scnet_x101_64x4d_fpn_20e_coco.py' +data = dict(samples_per_gpu=1, workers_per_gpu=1) +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (1 samples per GPU) +auto_scale_lr = dict(base_batch_size=8) diff --git a/downstream/mmdetection/configs/scratch/README.md b/downstream/mmdetection/configs/scratch/README.md new file mode 100644 index 0000000..189f181 --- /dev/null +++ b/downstream/mmdetection/configs/scratch/README.md @@ -0,0 +1,35 @@ +# Scratch + +> [Rethinking ImageNet Pre-training](https://arxiv.org/abs/1811.08883) + + + +## Abstract + +We report competitive results on object detection and instance segmentation on the COCO dataset using standard models trained from random initialization. The results are no worse than their ImageNet pre-training counterparts even when using the hyper-parameters of the baseline system (Mask R-CNN) that were optimized for fine-tuning pre-trained models, with the sole exception of increasing the number of training iterations so the randomly initialized models may converge. Training from random initialization is surprisingly robust; our results hold even when: (i) using only 10% of the training data, (ii) for deeper and wider models, and (iii) for multiple tasks and metrics. Experiments show that ImageNet pre-training speeds up convergence early in training, but does not necessarily provide regularization or improve final target task accuracy. To push the envelope we demonstrate 50.9 AP on COCO object detection without using any external data---a result on par with the top COCO 2017 competition results that used ImageNet pre-training. These observations challenge the conventional wisdom of ImageNet pre-training for dependent tasks and we expect these discoveries will encourage people to rethink the current de facto paradigm of \`pre-training and fine-tuning' in computer vision. + +
    + +
    + +## Results and Models + +| Model | Backbone | Style | Lr schd | box AP | mask AP | Config | Download | +| :----------: | :------: | :-----: | :-----: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Faster R-CNN | R-50-FPN | pytorch | 6x | 40.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_bbox_mAP-0.407_20200201_193013-90813d01.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_20200201_193013.log.json) | +| Mask R-CNN | R-50-FPN | pytorch | 6x | 41.2 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_20200201_193051.log.json) | + +Note: + +- The above models are trained with 16 GPUs. + +## Citation + +```latex +@article{he2018rethinking, + title={Rethinking imagenet pre-training}, + author={He, Kaiming and Girshick, Ross and Doll{\'a}r, Piotr}, + journal={arXiv preprint arXiv:1811.08883}, + year={2018} +} +``` diff --git a/downstream/mmdetection/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py b/downstream/mmdetection/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py new file mode 100644 index 0000000..55aa3a6 --- /dev/null +++ b/downstream/mmdetection/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + frozen_stages=-1, + zero_init_residual=False, + norm_cfg=norm_cfg, + init_cfg=None), + neck=dict(norm_cfg=norm_cfg), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=norm_cfg))) +# optimizer +optimizer = dict(paramwise_cfg=dict(norm_decay_mult=0)) +optimizer_config = dict(_delete_=True, grad_clip=None) +# learning policy +lr_config = dict(warmup_ratio=0.1, step=[65, 71]) +runner = dict(type='EpochBasedRunner', max_epochs=73) diff --git a/downstream/mmdetection/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py b/downstream/mmdetection/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py new file mode 100644 index 0000000..cc52cb8 --- /dev/null +++ b/downstream/mmdetection/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py @@ -0,0 +1,25 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) +model = dict( + backbone=dict( + frozen_stages=-1, + zero_init_residual=False, + norm_cfg=norm_cfg, + init_cfg=None), + neck=dict(norm_cfg=norm_cfg), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=norm_cfg), + mask_head=dict(norm_cfg=norm_cfg))) +# optimizer +optimizer = dict(paramwise_cfg=dict(norm_decay_mult=0)) +optimizer_config = dict(_delete_=True, grad_clip=None) +# learning policy +lr_config = dict(warmup_ratio=0.1, step=[65, 71]) +runner = dict(type='EpochBasedRunner', max_epochs=73) diff --git a/downstream/mmdetection/configs/scratch/metafile.yml b/downstream/mmdetection/configs/scratch/metafile.yml new file mode 100644 index 0000000..65025fa --- /dev/null +++ b/downstream/mmdetection/configs/scratch/metafile.yml @@ -0,0 +1,48 @@ +Collections: + - Name: Rethinking ImageNet Pre-training + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - RPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1811.08883 + Title: 'Rethinking ImageNet Pre-training' + README: configs/scratch/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py + Version: v2.0.0 + +Models: + - Name: faster_rcnn_r50_fpn_gn-all_scratch_6x_coco + In Collection: Rethinking ImageNet Pre-training + Config: configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py + Metadata: + Epochs: 72 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_bbox_mAP-0.407_20200201_193013-90813d01.pth + + - Name: mask_rcnn_r50_fpn_gn-all_scratch_6x_coco + In Collection: Rethinking ImageNet Pre-training + Config: configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py + Metadata: + Epochs: 72 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth diff --git a/downstream/mmdetection/configs/seesaw_loss/README.md b/downstream/mmdetection/configs/seesaw_loss/README.md new file mode 100644 index 0000000..696b008 --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/README.md @@ -0,0 +1,47 @@ +# Seesaw Loss + +> [Seesaw Loss for Long-Tailed Instance Segmentation](https://arxiv.org/abs/2008.10032) + + + +## Abstract + +Instance segmentation has witnessed a remarkable progress on class-balanced benchmarks. However, they fail to perform as accurately in real-world scenarios, where the category distribution of objects naturally comes with a long tail. Instances of head classes dominate a long-tailed dataset and they serve as negative samples of tail categories. The overwhelming gradients of negative samples on tail classes lead to a biased learning process for classifiers. Consequently, objects of tail categories are more likely to be misclassified as backgrounds or head categories. To tackle this problem, we propose Seesaw Loss to dynamically re-balance gradients of positive and negative samples for each category, with two complementary factors, i.e., mitigation factor and compensation factor. The mitigation factor reduces punishments to tail categories w.r.t. the ratio of cumulative training instances between different categories. Meanwhile, the compensation factor increases the penalty of misclassified instances to avoid false positives of tail categories. We conduct extensive experiments on Seesaw Loss with mainstream frameworks and different data sampling strategies. With a simple end-to-end training pipeline, Seesaw Loss obtains significant gains over Cross-Entropy Loss, and achieves state-of-the-art performance on LVIS dataset without bells and whistles. + +
    + +
    + +- Please setup [LVIS dataset](../lvis/README.md) for MMDetection. + +- RFS indicates to use oversample strategy [here](../../docs/tutorials/customize_dataset.md#class-balanced-dataset) with oversample threshold `1e-3`. + +## Results and models of Seasaw Loss on LVIS v1 dataset + +| Method | Backbone | Style | Lr schd | Data Sampler | Norm Mask | box AP | mask AP | Config | Download | +| :----------------: | :-------: | :-----: | :-----: | :----------: | :-------: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Mask R-CNN | R-50-FPN | pytorch | 2x | random | N | 25.6 | 25.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-a698dd3d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-50-FPN | pytorch | 2x | random | Y | 25.6 | 25.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a1c11314.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-101-FPN | pytorch | 2x | random | N | 27.4 | 26.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-8e6e6dd5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-101-FPN | pytorch | 2x | random | Y | 27.2 | 27.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a0b59c42.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-50-FPN | pytorch | 2x | RFS | N | 27.6 | 26.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-392a804b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-50-FPN | pytorch | 2x | RFS | Y | 27.6 | 26.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-cd0f6a12.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-101-FPN | pytorch | 2x | RFS | N | 28.9 | 27.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-e68eb464.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.log.json) | +| Mask R-CNN | R-101-FPN | pytorch | 2x | RFS | Y | 28.9 | 28.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-1d817139.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | +| Cascade Mask R-CNN | R-101-FPN | pytorch | 2x | random | N | 33.1 | 29.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-71e2215e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.log.json) | +| Cascade Mask R-CNN | R-101-FPN | pytorch | 2x | random | Y | 33.0 | 30.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-8b5a6745.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | +| Cascade Mask R-CNN | R-101-FPN | pytorch | 2x | RFS | N | 30.0 | 29.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-5d8ca2a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.log.json) | +| Cascade Mask R-CNN | R-101-FPN | pytorch | 2x | RFS | Y | 32.8 | 30.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-c8551505.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | + +## Citation + +We provide config files to reproduce the instance segmentation performance in the CVPR 2021 paper for [Seesaw Loss for Long-Tailed Instance Segmentation](https://arxiv.org/abs/2008.10032). + +```latex +@inproceedings{wang2021seesaw, + title={Seesaw Loss for Long-Tailed Instance Segmentation}, + author={Jiaqi Wang and Wenwei Zhang and Yuhang Zang and Yuhang Cao and Jiangmiao Pang and Tao Gong and Kai Chen and Ziwei Liu and Chen Change Loy and Dahua Lin}, + booktitle={Proceedings of the {IEEE} Conference on Computer Vision and Pattern Recognition}, + year={2021} +} +``` diff --git a/downstream/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py b/downstream/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py new file mode 100644 index 0000000..beeb0d1 --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py @@ -0,0 +1,132 @@ +_base_ = [ + '../_base_/models/cascade_mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + roi_head=dict( + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1203, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1203, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1203, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_head=dict(num_classes=1203)), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + # LVIS allows up to 300 + max_per_img=300))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +dataset_type = 'LVISV1Dataset' +data_root = 'data/lvis_v1/' +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v1_train.json', + img_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v1_val.json', + img_prefix=data_root, + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v1_val.json', + img_prefix=data_root, + pipeline=test_pipeline)) +evaluation = dict(interval=24, metric=['bbox', 'segm']) diff --git a/downstream/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py b/downstream/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py new file mode 100644 index 0000000..0f29948 --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py @@ -0,0 +1,5 @@ +_base_ = './cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py' # noqa: E501 +model = dict( + roi_head=dict( + mask_head=dict( + predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) diff --git a/downstream/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py b/downstream/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py new file mode 100644 index 0000000..bb88750 --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py @@ -0,0 +1,98 @@ +_base_ = [ + '../_base_/models/cascade_mask_rcnn_r50_fpn.py', + '../_base_/datasets/lvis_v1_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + roi_head=dict( + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1203, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1203, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=1203, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_head=dict(num_classes=1203)), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + # LVIS allows up to 300 + max_per_img=300))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict(train=dict(dataset=dict(pipeline=train_pipeline))) +evaluation = dict(interval=24, metric=['bbox', 'segm']) diff --git a/downstream/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py b/downstream/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py new file mode 100644 index 0000000..262e76b --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py @@ -0,0 +1,5 @@ +_base_ = './cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' # noqa: E501 +model = dict( + roi_head=dict( + mask_head=dict( + predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) diff --git a/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py new file mode 100644 index 0000000..57deab1 --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py @@ -0,0 +1,6 @@ +_base_ = './mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py new file mode 100644 index 0000000..a539929 --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py @@ -0,0 +1,6 @@ +_base_ = './mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501 +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py new file mode 100644 index 0000000..1f5065e --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py @@ -0,0 +1,6 @@ +_base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py new file mode 100644 index 0000000..13d0b5f --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py @@ -0,0 +1,6 @@ +_base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501 +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py new file mode 100644 index 0000000..743f5f2 --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py @@ -0,0 +1,75 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict( + num_classes=1203, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0)), + mask_head=dict(num_classes=1203)), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + # LVIS allows up to 300 + max_per_img=300))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +dataset_type = 'LVISV1Dataset' +data_root = 'data/lvis_v1/' +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v1_train.json', + img_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v1_val.json', + img_prefix=data_root, + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v1_val.json', + img_prefix=data_root, + pipeline=test_pipeline)) +evaluation = dict(interval=24, metric=['bbox', 'segm']) diff --git a/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py new file mode 100644 index 0000000..0af8921 --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py @@ -0,0 +1,5 @@ +_base_ = './mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py' +model = dict( + roi_head=dict( + mask_head=dict( + predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) diff --git a/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py new file mode 100644 index 0000000..4fc1504 --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py @@ -0,0 +1,41 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/lvis_v1_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +model = dict( + roi_head=dict( + bbox_head=dict( + num_classes=1203, + cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), + loss_cls=dict( + type='SeesawLoss', + p=0.8, + q=2.0, + num_classes=1203, + loss_weight=1.0)), + mask_head=dict(num_classes=1203)), + test_cfg=dict( + rcnn=dict( + score_thr=0.0001, + # LVIS allows up to 300 + max_per_img=300))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict(train=dict(dataset=dict(pipeline=train_pipeline))) +evaluation = dict(interval=12, metric=['bbox', 'segm']) diff --git a/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py new file mode 100644 index 0000000..0ef6bd2 --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py @@ -0,0 +1,5 @@ +_base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' +model = dict( + roi_head=dict( + mask_head=dict( + predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) diff --git a/downstream/mmdetection/configs/seesaw_loss/metafile.yml b/downstream/mmdetection/configs/seesaw_loss/metafile.yml new file mode 100644 index 0000000..70dd2fe --- /dev/null +++ b/downstream/mmdetection/configs/seesaw_loss/metafile.yml @@ -0,0 +1,203 @@ +Collections: + - Name: Seesaw Loss + Metadata: + Training Data: LVIS + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Softmax + - RPN + - Convolution + - Dense Connections + - FPN + - ResNet + - RoIAlign + - Seesaw Loss + Paper: + URL: https://arxiv.org/abs/2008.10032 + Title: 'Seesaw Loss for Long-Tailed Instance Segmentation' + README: configs/seesaw_loss/README.md + +Models: + - Name: mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 25.6 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 25.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-a698dd3d.pth + - Name: mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 25.6 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 25.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a1c11314.pth + - Name: mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 27.4 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 26.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-8e6e6dd5.pth + - Name: mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 27.2 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 27.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a0b59c42.pth + - Name: mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 27.6 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 26.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-392a804b.pth + - Name: mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 27.6 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 26.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-cd0f6a12.pth + - Name: mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 28.9 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 27.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-e68eb464.pth + - Name: mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 28.9 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 28.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-1d817139.pth + - Name: cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 33.1 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 29.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-71e2215e.pth + - Name: cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 33.0 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 30.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-8b5a6745.pth + - Name: cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 30.0 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 29.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-5d8ca2a4.pth + - Name: cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 32.8 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 30.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-c8551505.pth diff --git a/downstream/mmdetection/configs/selfsup_pretrain/README.md b/downstream/mmdetection/configs/selfsup_pretrain/README.md new file mode 100644 index 0000000..9bd92cb --- /dev/null +++ b/downstream/mmdetection/configs/selfsup_pretrain/README.md @@ -0,0 +1,109 @@ +# Backbones Trained by Self-Supervise Algorithms + + + +## Abstract + +Unsupervised image representations have significantly reduced the gap with supervised pretraining, notably with the recent achievements of contrastive learning methods. These contrastive methods typically work online and rely on a large number of explicit pairwise feature comparisons, which is computationally challenging. In this paper, we propose an online algorithm, SwAV, that takes advantage of contrastive methods without requiring to compute pairwise comparisons. Specifically, our method simultaneously clusters the data while enforcing consistency between cluster assignments produced for different augmentations (or views) of the same image, instead of comparing features directly as in contrastive learning. Simply put, we use a swapped prediction mechanism where we predict the cluster assignment of a view from the representation of another view. Our method can be trained with large and small batches and can scale to unlimited amounts of data. Compared to previous contrastive methods, our method is more memory efficient since it does not require a large memory bank or a special momentum network. In addition, we also propose a new data augmentation strategy, multi-crop, that uses a mix of views with different resolutions in place of two full-resolution views, without increasing the memory or compute requirements much. We validate our findings by achieving 75.3% top-1 accuracy on ImageNet with ResNet-50, as well as surpassing supervised pretraining on all the considered transfer tasks. + +
    + +
    + +We present Momentum Contrast (MoCo) for unsupervised visual representation learning. From a perspective on contrastive learning as dictionary look-up, we build a dynamic dictionary with a queue and a moving-averaged encoder. This enables building a large and consistent dictionary on-the-fly that facilitates contrastive unsupervised learning. MoCo provides competitive results under the common linear protocol on ImageNet classification. More importantly, the representations learned by MoCo transfer well to downstream tasks. MoCo can outperform its supervised pre-training counterpart in 7 detection/segmentation tasks on PASCAL VOC, COCO, and other datasets, sometimes surpassing it by large margins. This suggests that the gap between unsupervised and supervised representation learning has been largely closed in many vision tasks. + +
    + +
    + +## Usage + +To use a self-supervisely pretrained backbone, there are two steps to do: + +1. Download and convert the model to PyTorch-style supported by MMDetection +2. Modify the config and change the training setting accordingly + +### Convert model + +For more general usage, we also provide script `selfsup2mmdet.py` in the tools directory to convert the key of models pretrained by different self-supervised methods to PyTorch-style checkpoints used in MMDetection. + +```bash +python -u tools/model_converters/selfsup2mmdet.py ${PRETRAIN_PATH} ${STORE_PATH} --selfsup ${method} +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +For example, to use a ResNet-50 backbone released by MoCo, you can download it from [here](https://dl.fbaipublicfiles.com/moco/moco_checkpoints/moco_v2_800ep/moco_v2_800ep_pretrain.pth.tar) and use the following command + +```bash +python -u tools/model_converters/selfsup2mmdet.py ./moco_v2_800ep_pretrain.pth.tar mocov2_r50_800ep_pretrain.pth --selfsup moco +``` + +To use the ResNet-50 backbone released by SwAV, you can download it from [here](https://dl.fbaipublicfiles.com/deepcluster/swav_800ep_pretrain.pth.tar) + +### Modify config + +The backbone requires SyncBN and the `frozen_stages` need to be changed. A config that use the moco backbone is as below + +```python +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + pretrained='./mocov2_r50_800ep_pretrain.pth', + backbone=dict( + frozen_stages=0, + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False)) + +``` + +## Results and Models + +| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------: | :-----------------------------------------------------------------: | :-----: | :------------: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Mask RCNN | [R50 by MoCo v2](./mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py) | pytorch | 1x | | | 38.0 | 34.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco_20210604_114614-a8b63483.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco_20210604_114614.log.json) | +| Mask RCNN | [R50 by MoCo v2](./mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py) | pytorch | multi-scale 2x | | | 40.8 | 36.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco_20210605_163717-d95df20a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco_20210605_163717.log.json) | +| Mask RCNN | [R50 by SwAV](./mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py) | pytorch | 1x | | | 39.1 | 35.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco/mask_rcnn_r50_fpn_swav-pretrain_1x_coco_20210604_114640-7b9baf28.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco/mask_rcnn_r50_fpn_swav-pretrain_1x_coco_20210604_114640.log.json) | +| Mask RCNN | [R50 by SwAV](./mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py) | pytorch | multi-scale 2x | | | 41.3 | 37.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco_20210605_163717-08e26fca.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco_20210605_163717.log.json) | + +### Notice + +1. We only provide single-scale 1x and multi-scale 2x configs as examples to show how to use backbones trained by self-supervised algorithms. We will try to reproduce the results in their corresponding paper using the released backbone in the future. Please stay tuned. + +## Citation + +We support to apply the backbone models pre-trained by different self-supervised methods in detection systems and provide their results on Mask R-CNN. + +The pre-trained models are converted from [MoCo](https://github.com/facebookresearch/moco) and downloaded from [SwAV](https://github.com/facebookresearch/swav). + +For SwAV, please cite + +```latex +@article{caron2020unsupervised, + title={Unsupervised Learning of Visual Features by Contrasting Cluster Assignments}, + author={Caron, Mathilde and Misra, Ishan and Mairal, Julien and Goyal, Priya and Bojanowski, Piotr and Joulin, Armand}, + booktitle={Proceedings of Advances in Neural Information Processing Systems (NeurIPS)}, + year={2020} +} +``` + +For MoCo, please cite + +```latex +@Article{he2019moco, + author = {Kaiming He and Haoqi Fan and Yuxin Wu and Saining Xie and Ross Girshick}, + title = {Momentum Contrast for Unsupervised Visual Representation Learning}, + journal = {arXiv preprint arXiv:1911.05722}, + year = {2019}, +} +@Article{chen2020mocov2, + author = {Xinlei Chen and Haoqi Fan and Ross Girshick and Kaiming He}, + title = {Improved Baselines with Momentum Contrastive Learning}, + journal = {arXiv preprint arXiv:2003.04297}, + year = {2020}, +} +``` diff --git a/downstream/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py b/downstream/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py new file mode 100644 index 0000000..f1e0615 --- /dev/null +++ b/downstream/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + frozen_stages=0, + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + init_cfg=dict( + type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth'))) diff --git a/downstream/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py b/downstream/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py new file mode 100644 index 0000000..09aa156 --- /dev/null +++ b/downstream/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py @@ -0,0 +1,32 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + frozen_stages=0, + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + init_cfg=dict( + type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth'))) + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) +] + +data = dict(train=dict(pipeline=train_pipeline)) diff --git a/downstream/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py b/downstream/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py new file mode 100644 index 0000000..f92a345 --- /dev/null +++ b/downstream/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + frozen_stages=0, + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + init_cfg=dict( + type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar'))) diff --git a/downstream/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py b/downstream/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py new file mode 100644 index 0000000..fe47361 --- /dev/null +++ b/downstream/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py @@ -0,0 +1,32 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + frozen_stages=0, + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + init_cfg=dict( + type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar'))) + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) +] + +data = dict(train=dict(pipeline=train_pipeline)) diff --git a/downstream/mmdetection/configs/simple_copy_paste/README.md b/downstream/mmdetection/configs/simple_copy_paste/README.md new file mode 100644 index 0000000..46162aa --- /dev/null +++ b/downstream/mmdetection/configs/simple_copy_paste/README.md @@ -0,0 +1,38 @@ +# SimpleCopyPaste + +> [Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation](https://arxiv.org/abs/2012.07177) + + + +## Abstract + +Building instance segmentation models that are data-efficient and can handle rare object categories is an important challenge in computer vision. Leveraging data augmentations is a promising direction towards addressing this challenge. Here, we perform a systematic study of the Copy-Paste augmentation (\[13, 12\]) for instance segmentation where we randomly paste objects onto an image. Prior studies on Copy-Paste relied on modeling the surrounding visual context for pasting the objects. However, we find that the simple mechanism of pasting objects randomly is good enough and can provide solid gains on top of strong baselines. Furthermore, we show Copy-Paste is additive with semi-supervised methods that leverage extra data through pseudo labeling (e.g. self-training). On COCO instance segmentation, we achieve 49.1 mask AP and 57.3 box AP, an improvement of +0.6 mask AP and +1.5 box AP over the previous state-of-the-art. We further demonstrate that Copy-Paste can lead to significant improvements on the LVIS benchmark. Our baseline model outperforms the LVIS 2020 Challenge winning entry by +3.6 mask AP on rare categories. + +
    + +
    + +## Results and Models + +### Mask R-CNN with Standard Scale Jittering (SSJ) and Simple Copy-Paste(SCP) + +Standard Scale Jittering(SSJ) resizes and crops an image with a resize range of 0.8 to 1.25 of the original image size, and Simple Copy-Paste(SCP) selects a random subset of objects from one of the images and pastes them onto the other image. + +| Backbone | Training schedule | Augmentation | batch size | box AP | mask AP | Config | Download | +| :------: | :---------------: | :----------: | :--------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | 90k | SSJ | 64 | 43.3 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco_20220316_181409-f79c84c5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco_20220316_181409.log.json) | +| R-50 | 90k | SSJ+SCP | 64 | 43.8 | 39.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco_20220316_181307-6bc5726f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco_20220316_181307.log.json) | +| R-50 | 270k | SSJ | 64 | 43.5 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco_20220324_182940-33a100c5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco_20220324_182940.log.json) | +| R-50 | 270k | SSJ+SCP | 64 | 45.1 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco_20220324_201229-80ee90b7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco_20220324_201229.log.json) | + +## Citation + +```latex +@inproceedings{ghiasi2021simple, + title={Simple copy-paste is a strong data augmentation method for instance segmentation}, + author={Ghiasi, Golnaz and Cui, Yin and Srinivas, Aravind and Qian, Rui and Lin, Tsung-Yi and Cubuk, Ekin D and Le, Quoc V and Zoph, Barret}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={2918--2928}, + year={2021} +} +``` diff --git a/downstream/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py b/downstream/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py new file mode 100644 index 0000000..d0ce917 --- /dev/null +++ b/downstream/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py @@ -0,0 +1,20 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + # 270k iterations with batch_size 64 is roughly equivalent to 144 epochs + '../common/ssj_270k_coco_instance.py', +] + +norm_cfg = dict(type='SyncBN', requires_grad=True) +# Use MMSyncBN that handles empty tensor in head. It can be changed to +# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed. +head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) +model = dict( + backbone=dict(frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg), + neck=dict(norm_cfg=norm_cfg), + rpn_head=dict(num_convs=2), # leads to 0.1+ mAP + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=head_norm_cfg), + mask_head=dict(norm_cfg=head_norm_cfg))) diff --git a/downstream/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py b/downstream/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py new file mode 100644 index 0000000..1eee95f --- /dev/null +++ b/downstream/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py @@ -0,0 +1,7 @@ +_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py' + +# lr steps at [0.9, 0.95, 0.975] of the maximum iterations +lr_config = dict( + warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750]) +# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs +runner = dict(type='IterBasedRunner', max_iters=90000) diff --git a/downstream/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py b/downstream/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py new file mode 100644 index 0000000..bd28ddd --- /dev/null +++ b/downstream/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py @@ -0,0 +1,20 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + # 270k iterations with batch_size 64 is roughly equivalent to 144 epochs + '../common/ssj_scp_270k_coco_instance.py' +] + +norm_cfg = dict(type='SyncBN', requires_grad=True) +# Use MMSyncBN that handles empty tensor in head. It can be changed to +# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed. +head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) +model = dict( + backbone=dict(frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg), + neck=dict(norm_cfg=norm_cfg), + rpn_head=dict(num_convs=2), # leads to 0.1+ mAP + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=head_norm_cfg), + mask_head=dict(norm_cfg=head_norm_cfg))) diff --git a/downstream/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py b/downstream/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py new file mode 100644 index 0000000..b632c13 --- /dev/null +++ b/downstream/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py @@ -0,0 +1,7 @@ +_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py' + +# lr steps at [0.9, 0.95, 0.975] of the maximum iterations +lr_config = dict( + warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750]) +# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs +runner = dict(type='IterBasedRunner', max_iters=90000) diff --git a/downstream/mmdetection/configs/simple_copy_paste/metafile.yml b/downstream/mmdetection/configs/simple_copy_paste/metafile.yml new file mode 100644 index 0000000..bb6106c --- /dev/null +++ b/downstream/mmdetection/configs/simple_copy_paste/metafile.yml @@ -0,0 +1,92 @@ +Collections: + - Name: SimpleCopyPaste + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 32x A100 GPUs + Architecture: + - Softmax + - RPN + - Convolution + - Dense Connections + - FPN + - ResNet + - RoIAlign + Paper: + URL: https://arxiv.org/abs/2012.07177 + Title: "Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation" + README: configs/simple_copy_paste/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.25.0/mmdet/datasets/pipelines/transforms.py#L2762 + Version: v2.25.0 + +Models: + - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco + In Collection: SimpleCopyPaste + Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py + Metadata: + Training Memory (GB): 7.2 + Iterations: 270000 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco_20220324_182940-33a100c5.pth + + - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco + In Collection: SimpleCopyPaste + Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py + Metadata: + Training Memory (GB): 7.2 + Iterations: 90000 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco_20220316_181409-f79c84c5.pth + + - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco + In Collection: SimpleCopyPaste + Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py + Metadata: + Training Memory (GB): 7.2 + Iterations: 270000 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 40.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco_20220324_201229-80ee90b7.pth + + - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco + In Collection: SimpleCopyPaste + Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py + Metadata: + Training Memory (GB): 7.2 + Iterations: 90000 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco_20220316_181307-6bc5726f.pth diff --git a/downstream/mmdetection/configs/solo/README.md b/downstream/mmdetection/configs/solo/README.md new file mode 100644 index 0000000..4a36676 --- /dev/null +++ b/downstream/mmdetection/configs/solo/README.md @@ -0,0 +1,54 @@ +# SOLO + +> [SOLO: Segmenting Objects by Locations](https://arxiv.org/abs/1912.04488) + + + +## Abstract + +We present a new, embarrassingly simple approach to instance segmentation in images. Compared to many other dense prediction tasks, e.g., semantic segmentation, it is the arbitrary number of instances that have made instance segmentation much more challenging. In order to predict a mask for each instance, mainstream approaches either follow the 'detect-thensegment' strategy as used by Mask R-CNN, or predict category masks first then use clustering techniques to group pixels into individual instances. We view the task of instance segmentation from a completely new perspective by introducing the notion of "instance categories", which assigns categories to each pixel within an instance according to the instance's location and size, thus nicely converting instance mask segmentation into a classification-solvable problem. Now instance segmentation is decomposed into two classification tasks. We demonstrate a much simpler and flexible instance segmentation framework with strong performance, achieving on par accuracy with Mask R-CNN and outperforming recent singleshot instance segmenters in accuracy. We hope that this very simple and strong framework can serve as a baseline for many instance-level recognition tasks besides instance segmentation. + +
    + +
    + +## Results and Models + +### SOLO + +| Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | +| :------: | :-----: | :------: | :-----: | :------: | :------------: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | N | 1x | 8.0 | 14.0 | 33.1 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055-2290a6b8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055.log.json) | +| R-50 | pytorch | Y | 3x | 7.4 | 14.0 | 35.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353-11d224d7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353.log.json) | + +### Decoupled SOLO + +| Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | +| :------: | :-----: | :------: | :-----: | :------: | :------------: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | N | 1x | 7.8 | 12.5 | 33.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348.log.json) | +| R-50 | pytorch | Y | 3x | 7.9 | 12.5 | 36.7 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504-7b3301ec.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504.log.json) | + +- Decoupled SOLO has a decoupled head which is different from SOLO head. + Decoupled SOLO serves as an efficient and equivalent variant in accuracy + of SOLO. Please refer to the corresponding config files for details. + +### Decoupled Light SOLO + +| Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | +| :------: | :-----: | :------: | :-----: | :------: | :------------: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | Y | 3x | 2.2 | 31.2 | 32.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703-e70e226f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703.log.json) | + +- Decoupled Light SOLO using decoupled structure similar to Decoupled + SOLO head, with light-weight head and smaller input size, Please refer + to the corresponding config files for details. + +## Citation + +```latex +@inproceedings{wang2020solo, + title = {{SOLO}: Segmenting Objects by Locations}, + author = {Wang, Xinlong and Kong, Tao and Shen, Chunhua and Jiang, Yuning and Li, Lei}, + booktitle = {Proc. Eur. Conf. Computer Vision (ECCV)}, + year = {2020} +} +``` diff --git a/downstream/mmdetection/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py b/downstream/mmdetection/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py new file mode 100644 index 0000000..101f8f1 --- /dev/null +++ b/downstream/mmdetection/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py @@ -0,0 +1,63 @@ +_base_ = './decoupled_solo_r50_fpn_3x_coco.py' + +# model settings +model = dict( + mask_head=dict( + type='DecoupledSOLOLightHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=dict( + type='DiceLoss', use_sigmoid=True, activate=False, + loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(852, 512), (852, 480), (852, 448), (852, 416), (852, 384), + (852, 352)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(852, 512), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/solo/decoupled_solo_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/solo/decoupled_solo_r50_fpn_1x_coco.py new file mode 100644 index 0000000..b611cdf --- /dev/null +++ b/downstream/mmdetection/configs/solo/decoupled_solo_r50_fpn_1x_coco.py @@ -0,0 +1,28 @@ +_base_ = [ + './solo_r50_fpn_1x_coco.py', +] +# model settings +model = dict( + mask_head=dict( + type='DecoupledSOLOHead', + num_classes=80, + in_channels=256, + stacked_convs=7, + feat_channels=256, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=dict( + type='DiceLoss', use_sigmoid=True, activate=False, + loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) + +optimizer = dict(type='SGD', lr=0.01) diff --git a/downstream/mmdetection/configs/solo/decoupled_solo_r50_fpn_3x_coco.py b/downstream/mmdetection/configs/solo/decoupled_solo_r50_fpn_3x_coco.py new file mode 100644 index 0000000..4a8c19d --- /dev/null +++ b/downstream/mmdetection/configs/solo/decoupled_solo_r50_fpn_3x_coco.py @@ -0,0 +1,25 @@ +_base_ = './solo_r50_fpn_3x_coco.py' + +# model settings +model = dict( + mask_head=dict( + type='DecoupledSOLOHead', + num_classes=80, + in_channels=256, + stacked_convs=7, + feat_channels=256, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=dict( + type='DiceLoss', use_sigmoid=True, activate=False, + loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) diff --git a/downstream/mmdetection/configs/solo/metafile.yml b/downstream/mmdetection/configs/solo/metafile.yml new file mode 100644 index 0000000..b6244e8 --- /dev/null +++ b/downstream/mmdetection/configs/solo/metafile.yml @@ -0,0 +1,115 @@ +Collections: + - Name: SOLO + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - Convolution + - ResNet + Paper: https://arxiv.org/abs/1912.04488 + README: configs/solo/README.md + +Models: + - Name: decoupled_solo_r50_fpn_1x_coco + In Collection: SOLO + Config: configs/solo/decoupled_solo_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 12 + inference time (ms/im): + - value: 116.4 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1333, 800) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 33.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth + + - Name: decoupled_solo_r50_fpn_3x_coco + In Collection: SOLO + Config: configs/solo/decoupled_solo_r50_fpn_3x_coco.py + Metadata: + Training Memory (GB): 7.9 + Epochs: 36 + inference time (ms/im): + - value: 117.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1333, 800) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504-7b3301ec.pth + + - Name: decoupled_solo_light_r50_fpn_3x_coco + In Collection: SOLO + Config: configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py + Metadata: + Training Memory (GB): 2.2 + Epochs: 36 + inference time (ms/im): + - value: 35.0 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (852, 512) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 32.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703-e70e226f.pth + + - Name: solo_r50_fpn_3x_coco + In Collection: SOLO + Config: configs/solo/solo_r50_fpn_3x_coco.py + Metadata: + Training Memory (GB): 7.4 + Epochs: 36 + inference time (ms/im): + - value: 94.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1333, 800) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353-11d224d7.pth + + - Name: solo_r50_fpn_1x_coco + In Collection: SOLO + Config: configs/solo/solo_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.0 + Epochs: 12 + inference time (ms/im): + - value: 95.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1333, 800) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 33.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055-2290a6b8.pth diff --git a/downstream/mmdetection/configs/solo/solo_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/solo/solo_r50_fpn_1x_coco.py new file mode 100644 index 0000000..9093a50 --- /dev/null +++ b/downstream/mmdetection/configs/solo/solo_r50_fpn_1x_coco.py @@ -0,0 +1,53 @@ +_base_ = [ + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='SOLO', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=0, + num_outs=5), + mask_head=dict( + type='SOLOHead', + num_classes=80, + in_channels=256, + stacked_convs=7, + feat_channels=256, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)), + # model training and testing settings + test_cfg=dict( + nms_pre=500, + score_thr=0.1, + mask_thr=0.5, + filter_thr=0.05, + kernel='gaussian', # gaussian/linear + sigma=2.0, + max_per_img=100)) + +# optimizer +optimizer = dict(type='SGD', lr=0.01) diff --git a/downstream/mmdetection/configs/solo/solo_r50_fpn_3x_coco.py b/downstream/mmdetection/configs/solo/solo_r50_fpn_3x_coco.py new file mode 100644 index 0000000..52302cd --- /dev/null +++ b/downstream/mmdetection/configs/solo/solo_r50_fpn_3x_coco.py @@ -0,0 +1,28 @@ +_base_ = './solo_r50_fpn_1x_coco.py' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 800), (1333, 768), (1333, 736), (1333, 704), + (1333, 672), (1333, 640)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict(train=dict(pipeline=train_pipeline)) + +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[27, 33]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/solov2/README.md b/downstream/mmdetection/configs/solov2/README.md new file mode 100644 index 0000000..2ffe70f --- /dev/null +++ b/downstream/mmdetection/configs/solov2/README.md @@ -0,0 +1,59 @@ +# SOLOv2 + +> [SOLOv2: Dynamic and Fast Instance Segmentation](https://arxiv.org/abs/2003.10152) + + + +## Abstract + +In this work, we aim at building a simple, direct, and fast instance segmentation +framework with strong performance. We follow the principle of the SOLO method of +Wang et al. "SOLO: segmenting objects by locations". Importantly, we take one +step further by dynamically learning the mask head of the object segmenter such +that the mask head is conditioned on the location. Specifically, the mask branch +is decoupled into a mask kernel branch and mask feature branch, which are +responsible for learning the convolution kernel and the convolved features +respectively. Moreover, we propose Matrix NMS (non maximum suppression) to +significantly reduce the inference time overhead due to NMS of masks. Our +Matrix NMS performs NMS with parallel matrix operations in one shot, and +yields better results. We demonstrate a simple direct instance segmentation +system, outperforming a few state-of-the-art methods in both speed and accuracy. +A light-weight version of SOLOv2 executes at 31.3 FPS and yields 37.1% AP. +Moreover, our state-of-the-art results in object detection (from our mask byproduct) +and panoptic segmentation show the potential to serve as a new strong baseline +for many instance-level recognition tasks besides instance segmentation. + +
    + +
    + +## Results and Models + +### SOLOv2 + +| Backbone | Style | MS train | Lr schd | Mem (GB) | mask AP | Config | Download | +| :--------: | :-----: | :------: | :-----: | :------: | :-----: | :-----------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | N | 1x | 5.1 | 34.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858.log.json) | +| R-50 | pytorch | Y | 3x | 5.1 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_r50_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856-fed092d4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856.log.json) | +| R-101 | pytorch | Y | 3x | 6.9 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_r101_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_fpn_3x_coco/solov2_r101_fpn_3x_coco_20220511_095119-c559a076.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_fpn_3x_coco/solov2_r101_fpn_3x_coco_20220511_095119.log.json) | +| R-101(DCN) | pytorch | Y | 3x | 7.1 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_r101_dcn_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734-16c966cb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734.log.json) | +| X-101(DCN) | pytorch | Y | 3x | 11.3 | 42.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_x101_dcn_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337-aef41095.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337.log.json) | + +### Light SOLOv2 + +| Backbone | Style | MS train | Lr schd | Mem (GB) | mask AP | Config | Download | +| :------: | :-----: | :------: | :-----: | :------: | :-----: | :------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-18 | pytorch | Y | 3x | 9.1 | 29.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_light_r18_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717-75fa355b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717.log.json) | +| R-34 | pytorch | Y | 3x | 9.3 | 31.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_light_r34_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r34_fpn_3x_coco/solov2_light_r34_fpn_3x_coco_20220511_091839-e51659d3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r34_fpn_3x_coco/solov2_light_r34_fpn_3x_coco_20220511_091839.log.json) | +| R-50 | pytorch | Y | 3x | 9.9 | 33.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_light_r50_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256-c93a6074.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256.log.json) | + +## Citation + +```latex +@article{wang2020solov2, + title={SOLOv2: Dynamic and Fast Instance Segmentation}, + author={Wang, Xinlong and Zhang, Rufeng and Kong, Tao and Li, Lei and Shen, Chunhua}, + journal={Proc. Advances in Neural Information Processing Systems (NeurIPS)}, + year={2020} +} +``` diff --git a/downstream/mmdetection/configs/solov2/metafile.yml b/downstream/mmdetection/configs/solov2/metafile.yml new file mode 100644 index 0000000..656f66f --- /dev/null +++ b/downstream/mmdetection/configs/solov2/metafile.yml @@ -0,0 +1,119 @@ +Collections: + - Name: SOLOv2 + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x A100 GPUs + Architecture: + - FPN + - Convolution + - ResNet + Paper: https://arxiv.org/abs/2003.10152 + README: configs/solov2/README.md + +Models: + - Name: solov2_r50_fpn_1x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 5.1 + Epochs: 12 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth + + - Name: solov2_r50_fpn_3x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2_r50_fpn_3x_coco.py + Metadata: + Training Memory (GB): 5.1 + Epochs: 36 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856-fed092d4.pth + + - Name: solov2_r101_fpn_3x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2_r101_fpn_3x_coco.py + Metadata: + Training Memory (GB): 6.9 + Epochs: 36 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_fpn_3x_coco/solov2_r101_fpn_3x_coco_20220511_095119-c559a076.pth + + - Name: solov2_r101_dcn_fpn_3x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2_r101_dcn_fpn_3x_coco.py + Metadata: + Training Memory (GB): 7.1 + Epochs: 36 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734-16c966cb.pth + + - Name: solov2_x101_dcn_fpn_3x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2_x101_dcn_fpn_3x_coco.py + Metadata: + Training Memory (GB): 11.3 + Epochs: 36 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337-aef41095.pth + + - Name: solov2_light_r18_fpn_3x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2_light_r18_fpn_3x_coco.py + Metadata: + Training Memory (GB): 9.1 + Epochs: 36 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 29.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717-75fa355b.pth + + - Name: solov2_light_r34_fpn_3x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2_light_r34_fpn_3x_coco.py + Metadata: + Training Memory (GB): 9.3 + Epochs: 36 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 31.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r34_fpn_3x_coco/solov2_light_r34_fpn_3x_coco_20220511_091839-e51659d3.pth + + - Name: solov2_light_r50_fpn_3x_coco + In Collection: SOLOv2 + Config: configs/solov2/solov2_light_r50_fpn_3x_coco.py + Metadata: + Training Memory (GB): 9.9 + Epochs: 36 + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 33.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256-c93a6074.pth diff --git a/downstream/mmdetection/configs/solov2/solov2_light_r18_fpn_3x_coco.py b/downstream/mmdetection/configs/solov2/solov2_light_r18_fpn_3x_coco.py new file mode 100644 index 0000000..6fb33b0 --- /dev/null +++ b/downstream/mmdetection/configs/solov2/solov2_light_r18_fpn_3x_coco.py @@ -0,0 +1,7 @@ +_base_ = 'solov2_light_r50_fpn_3x_coco.py' + +# model settings +model = dict( + backbone=dict( + depth=18, init_cfg=dict(checkpoint='torchvision://resnet18')), + neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/downstream/mmdetection/configs/solov2/solov2_light_r34_fpn_3x_coco.py b/downstream/mmdetection/configs/solov2/solov2_light_r34_fpn_3x_coco.py new file mode 100644 index 0000000..ea082a1 --- /dev/null +++ b/downstream/mmdetection/configs/solov2/solov2_light_r34_fpn_3x_coco.py @@ -0,0 +1,7 @@ +_base_ = 'solov2_light_r50_fpn_3x_coco.py' + +# model settings +model = dict( + backbone=dict( + depth=34, init_cfg=dict(checkpoint='torchvision://resnet34')), + neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/downstream/mmdetection/configs/solov2/solov2_light_r50_dcn_fpn_3x_coco.py b/downstream/mmdetection/configs/solov2/solov2_light_r50_dcn_fpn_3x_coco.py new file mode 100644 index 0000000..4d758e2 --- /dev/null +++ b/downstream/mmdetection/configs/solov2/solov2_light_r50_dcn_fpn_3x_coco.py @@ -0,0 +1,62 @@ +_base_ = 'solov2_r50_fpn_3x_coco.py' + +# model settings +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + mask_head=dict( + feat_channels=256, + stacked_convs=3, + scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), + mask_feature_head=dict(out_channels=128), + dcn_cfg=dict(type='DCNv2'), + dcn_apply_to_all_conv=False)) # light solov2 head + +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[27, 33]) +runner = dict(type='EpochBasedRunner', max_epochs=36) + +# data +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384), + (768, 352)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(448, 768), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/solov2/solov2_light_r50_fpn_3x_coco.py b/downstream/mmdetection/configs/solov2/solov2_light_r50_fpn_3x_coco.py new file mode 100644 index 0000000..e08f1db --- /dev/null +++ b/downstream/mmdetection/configs/solov2/solov2_light_r50_fpn_3x_coco.py @@ -0,0 +1,57 @@ +_base_ = 'solov2_r50_fpn_1x_coco.py' + +# model settings +model = dict( + mask_head=dict( + stacked_convs=2, + feat_channels=256, + scale_ranges=((1, 56), (28, 112), (56, 224), (112, 448), (224, 896)), + mask_feature_head=dict(out_channels=128))) + +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[27, 33]) +runner = dict(type='EpochBasedRunner', max_epochs=36) + +# data +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384), + (768, 352)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(448, 768), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/solov2/solov2_r101_dcn_fpn_3x_coco.py b/downstream/mmdetection/configs/solov2/solov2_r101_dcn_fpn_3x_coco.py new file mode 100644 index 0000000..1594118 --- /dev/null +++ b/downstream/mmdetection/configs/solov2/solov2_r101_dcn_fpn_3x_coco.py @@ -0,0 +1,13 @@ +_base_ = 'solov2_r50_fpn_3x_coco.py' + +# model settings +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(checkpoint='torchvision://resnet101'), + dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + mask_head=dict( + mask_feature_head=dict(conv_cfg=dict(type='DCNv2')), + dcn_cfg=dict(type='DCNv2'), + dcn_apply_to_all_conv=True)) diff --git a/downstream/mmdetection/configs/solov2/solov2_r101_fpn_3x_coco.py b/downstream/mmdetection/configs/solov2/solov2_r101_fpn_3x_coco.py new file mode 100644 index 0000000..6c248e5 --- /dev/null +++ b/downstream/mmdetection/configs/solov2/solov2_r101_fpn_3x_coco.py @@ -0,0 +1,6 @@ +_base_ = 'solov2_r50_fpn_3x_coco.py' + +# model settings +model = dict( + backbone=dict( + depth=101, init_cfg=dict(checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/solov2/solov2_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/solov2/solov2_r50_fpn_1x_coco.py new file mode 100644 index 0000000..9aee571 --- /dev/null +++ b/downstream/mmdetection/configs/solov2/solov2_r50_fpn_1x_coco.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='SOLOv2', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=0, + num_outs=5), + mask_head=dict( + type='SOLOV2Head', + num_classes=80, + in_channels=256, + feat_channels=512, + stacked_convs=4, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + mask_feature_head=dict( + feat_channels=128, + start_level=0, + end_level=3, + out_channels=256, + mask_stride=4, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)), + loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0)), + # model training and testing settings + test_cfg=dict( + nms_pre=500, + score_thr=0.1, + mask_thr=0.5, + filter_thr=0.05, + kernel='gaussian', # gaussian/linear + sigma=2.0, + max_per_img=100)) + +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/downstream/mmdetection/configs/solov2/solov2_r50_fpn_3x_coco.py b/downstream/mmdetection/configs/solov2/solov2_r50_fpn_3x_coco.py new file mode 100644 index 0000000..640c730 --- /dev/null +++ b/downstream/mmdetection/configs/solov2/solov2_r50_fpn_3x_coco.py @@ -0,0 +1,28 @@ +_base_ = 'solov2_r50_fpn_1x_coco.py' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 800), (1333, 768), (1333, 736), (1333, 704), + (1333, 672), (1333, 640)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict(train=dict(pipeline=train_pipeline)) + +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[27, 33]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/solov2/solov2_x101_dcn_fpn_3x_coco.py b/downstream/mmdetection/configs/solov2/solov2_x101_dcn_fpn_3x_coco.py new file mode 100644 index 0000000..6115fed --- /dev/null +++ b/downstream/mmdetection/configs/solov2/solov2_x101_dcn_fpn_3x_coco.py @@ -0,0 +1,17 @@ +_base_ = 'solov2_r50_fpn_3x_coco.py' + +# model settings +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')), + mask_head=dict( + mask_feature_head=dict(conv_cfg=dict(type='DCNv2')), + dcn_cfg=dict(type='DCNv2'), + dcn_apply_to_all_conv=True)) diff --git a/downstream/mmdetection/configs/sparse_rcnn/README.md b/downstream/mmdetection/configs/sparse_rcnn/README.md new file mode 100644 index 0000000..d7912e0 --- /dev/null +++ b/downstream/mmdetection/configs/sparse_rcnn/README.md @@ -0,0 +1,38 @@ +# Sparse R-CNN + +> [Sparse R-CNN: End-to-End Object Detection with Learnable Proposals](https://arxiv.org/abs/2011.12450) + + + +## Abstract + +We present Sparse R-CNN, a purely sparse method for object detection in images. Existing works on object detection heavily rely on dense object candidates, such as k anchor boxes pre-defined on all grids of image feature map of size H×W. In our method, however, a fixed sparse set of learned object proposals, total length of N, are provided to object recognition head to perform classification and location. By eliminating HWk (up to hundreds of thousands) hand-designed object candidates to N (e.g. 100) learnable proposals, Sparse R-CNN completely avoids all efforts related to object candidates design and many-to-one label assignment. More importantly, final predictions are directly output without non-maximum suppression post-procedure. Sparse R-CNN demonstrates accuracy, run-time and training convergence performance on par with the well-established detector baselines on the challenging COCO dataset, e.g., achieving 45.0 AP in standard 3× training schedule and running at 22 fps using ResNet-50 FPN model. We hope our work could inspire re-thinking the convention of dense prior in object detectors. + +
    + +
    + +## Results and Models + +| Model | Backbone | Style | Lr schd | Number of Proposals | Multi-Scale | RandomCrop | box AP | Config | Download | +| :----------: | :-------: | :-----: | :-----: | :-----------------: | :---------: | :--------: | :----: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Sparse R-CNN | R-50-FPN | pytorch | 1x | 100 | False | False | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.log.json) | +| Sparse R-CNN | R-50-FPN | pytorch | 3x | 100 | True | False | 42.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.log.json) | +| Sparse R-CNN | R-50-FPN | pytorch | 3x | 300 | True | True | 45.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.log.json) | +| Sparse R-CNN | R-101-FPN | pytorch | 3x | 100 | True | False | 44.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.log.json) | +| Sparse R-CNN | R-101-FPN | pytorch | 3x | 300 | True | True | 46.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.log.json) | + +### Notes + +We observe about 0.3 AP noise especially when using ResNet-101 as the backbone. + +## Citation + +```latex +@article{peize2020sparse, + title = {{SparseR-CNN}: End-to-End Object Detection with Learnable Proposals}, + author = {Peize Sun and Rufeng Zhang and Yi Jiang and Tao Kong and Chenfeng Xu and Wei Zhan and Masayoshi Tomizuka and Lei Li and Zehuan Yuan and Changhu Wang and Ping Luo}, + journal = {arXiv preprint arXiv:2011.12450}, + year = {2020} +} +``` diff --git a/downstream/mmdetection/configs/sparse_rcnn/metafile.yml b/downstream/mmdetection/configs/sparse_rcnn/metafile.yml new file mode 100644 index 0000000..bb1273e --- /dev/null +++ b/downstream/mmdetection/configs/sparse_rcnn/metafile.yml @@ -0,0 +1,80 @@ +Collections: + - Name: Sparse R-CNN + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + - Sparse R-CNN + Paper: + URL: https://arxiv.org/abs/2011.12450 + Title: 'Sparse R-CNN: End-to-End Object Detection with Learnable Proposals' + README: configs/sparse_rcnn/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.9.0/mmdet/models/detectors/sparse_rcnn.py#L6 + Version: v2.9.0 + +Models: + - Name: sparse_rcnn_r50_fpn_1x_coco + In Collection: Sparse R-CNN + Config: configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth + + - Name: sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco + In Collection: Sparse R-CNN + Config: configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.pth + + - Name: sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco + In Collection: Sparse R-CNN + Config: configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.pth + + - Name: sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco + In Collection: Sparse R-CNN + Config: configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.pth + + - Name: sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco + In Collection: Sparse R-CNN + Config: configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.pth diff --git a/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py b/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py new file mode 100644 index 0000000..de323bd --- /dev/null +++ b/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py b/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py new file mode 100644 index 0000000..ab4c5f6 --- /dev/null +++ b/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py new file mode 100644 index 0000000..b383ee4 --- /dev/null +++ b/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py @@ -0,0 +1,95 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +num_stages = 6 +num_proposals = 100 +model = dict( + type='SparseRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=0, + add_extra_convs='on_input', + num_outs=4), + rpn_head=dict( + type='EmbeddingRPNHead', + num_proposals=num_proposals, + proposal_feature_channel=256), + roi_head=dict( + type='SparseRoIHead', + num_stages=num_stages, + stage_loss_weights=[1] * num_stages, + proposal_feature_channel=256, + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='DIIHead', + num_classes=80, + num_ffn_fcs=2, + num_heads=8, + num_cls_fcs=1, + num_reg_fcs=3, + feedforward_channels=2048, + in_channels=256, + dropout=0.0, + ffn_act_cfg=dict(type='ReLU', inplace=True), + dynamic_conv_cfg=dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + input_feat_shape=7, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN')), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + clip_border=False, + target_means=[0., 0., 0., 0.], + target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) + ]), + # training and testing settings + train_cfg=dict( + rpn=None, + rcnn=[ + dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0), + iou_cost=dict(type='IoUCost', iou_mode='giou', + weight=2.0)), + sampler=dict(type='PseudoSampler'), + pos_weight=1) for _ in range(num_stages) + ]), + test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals))) + +# optimizer +optimizer = dict(_delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001) +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=1, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[8, 11]) +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py b/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py new file mode 100644 index 0000000..36f1d62 --- /dev/null +++ b/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py @@ -0,0 +1,52 @@ +_base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py' +num_proposals = 300 +model = dict( + rpn_head=dict(num_proposals=num_proposals), + test_cfg=dict( + _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +# augmentation strategy originates from DETR. +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='AutoAugment', + policies=[[ + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict( + type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ]]), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +data = dict(train=dict(pipeline=train_pipeline)) diff --git a/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py b/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py new file mode 100644 index 0000000..2fa2a80 --- /dev/null +++ b/downstream/mmdetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py @@ -0,0 +1,23 @@ +_base_ = './sparse_rcnn_r50_fpn_1x_coco.py' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +min_values = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, value) for value in min_values], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] + +data = dict(train=dict(pipeline=train_pipeline)) +lr_config = dict(policy='step', step=[27, 33]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/ssd/README.md b/downstream/mmdetection/configs/ssd/README.md new file mode 100644 index 0000000..463926b --- /dev/null +++ b/downstream/mmdetection/configs/ssd/README.md @@ -0,0 +1,62 @@ +# SSD + +> [SSD: Single Shot MultiBox Detector](https://arxiv.org/abs/1512.02325) + + + +## Abstract + +We present a method for detecting objects in images using a single deep neural network. Our approach, named SSD, discretizes the output space of bounding boxes into a set of default boxes over different aspect ratios and scales per feature map location. At prediction time, the network generates scores for the presence of each object category in each default box and produces adjustments to the box to better match the object shape. Additionally, the network combines predictions from multiple feature maps with different resolutions to naturally handle objects of various sizes. Our SSD model is simple relative to methods that require object proposals because it completely eliminates proposal generation and subsequent pixel or feature resampling stage and encapsulates all computation in a single network. This makes SSD easy to train and straightforward to integrate into systems that require a detection component. Experimental results on the PASCAL VOC, MS COCO, and ILSVRC datasets confirm that SSD has comparable accuracy to methods that utilize an additional object proposal step and is much faster, while providing a unified framework for both training and inference. Compared to other single stage methods, SSD has much better accuracy, even with a smaller input image size. For 300×300 input, SSD achieves 72.1% mAP on VOC2007 test at 58 FPS on a Nvidia Titan X and for 500×500 input, SSD achieves 75.1% mAP, outperforming a comparable state of the art Faster R-CNN model. + +
    + +
    + +## Results and models of SSD + +| Backbone | Size | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :------: | :--: | :---: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| VGG16 | 300 | caffe | 120e | 9.9 | 43.7 | 25.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd/ssd300_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428.log.json) | +| VGG16 | 512 | caffe | 120e | 19.4 | 30.7 | 29.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd/ssd512_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20210803_022849-0a47a1ca.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20210803_022849.log.json) | + +## Results and models of SSD-Lite + +| Backbone | Size | Training from scratch | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------: | :--: | :-------------------: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| MobileNetV2 | 320 | yes | 600e | 4.0 | 69.9 | 21.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627.log.json) | + +## Notice + +### Compatibility + +In v2.14.0, [PR5291](https://github.com/open-mmlab/mmdetection/pull/5291) refactored SSD neck and head for more +flexible usage. If users want to use the SSD checkpoint trained in the older versions, we provide a scripts +`tools/model_converters/upgrade_ssd_version.py` to convert the model weights. + +```bash +python tools/model_converters/upgrade_ssd_version.py ${OLD_MODEL_PATH} ${NEW_MODEL_PATH} + +``` + +- OLD_MODEL_PATH: the path to load the old version SSD model. +- NEW_MODEL_PATH: the path to save the converted model weights. + +### SSD-Lite training settings + +There are some differences between our implementation of MobileNetV2 SSD-Lite and the one in [TensorFlow 1.x detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md) . + +1. Use 320x320 as input size instead of 300x300. +2. The anchor sizes are different. +3. The C4 feature map is taken from the last layer of stage 4 instead of the middle of the block. +4. The model in TensorFlow1.x is trained on coco 2014 and validated on coco minival2014, but we trained and validated the model on coco 2017. The mAP on val2017 is usually a little lower than minival2014 (refer to the results in TensorFlow Object Detection API, e.g., MobileNetV2 SSD gets 22 mAP on minival2014 but 20.2 mAP on val2017). + +## Citation + +```latex +@article{Liu_2016, + title={SSD: Single Shot MultiBox Detector}, + journal={ECCV}, + author={Liu, Wei and Anguelov, Dragomir and Erhan, Dumitru and Szegedy, Christian and Reed, Scott and Fu, Cheng-Yang and Berg, Alexander C.}, + year={2016}, +} +``` diff --git a/downstream/mmdetection/configs/ssd/metafile.yml b/downstream/mmdetection/configs/ssd/metafile.yml new file mode 100644 index 0000000..b9ee79c --- /dev/null +++ b/downstream/mmdetection/configs/ssd/metafile.yml @@ -0,0 +1,78 @@ +Collections: + - Name: SSD + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - VGG + Paper: + URL: https://arxiv.org/abs/1512.02325 + Title: 'SSD: Single Shot MultiBox Detector' + README: configs/ssd/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.14.0/mmdet/models/dense_heads/ssd_head.py#L16 + Version: v2.14.0 + +Models: + - Name: ssd300_coco + In Collection: SSD + Config: configs/ssd/ssd300_coco.py + Metadata: + Training Memory (GB): 9.9 + inference time (ms/im): + - value: 22.88 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (300, 300) + Epochs: 120 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 25.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth + + - Name: ssd512_coco + In Collection: SSD + Config: configs/ssd/ssd512_coco.py + Metadata: + Training Memory (GB): 19.4 + inference time (ms/im): + - value: 32.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512, 512) + Epochs: 120 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 29.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20210803_022849-0a47a1ca.pth + + - Name: ssdlite_mobilenetv2_scratch_600e_coco + In Collection: SSD + Config: configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py + Metadata: + Training Memory (GB): 4.0 + inference time (ms/im): + - value: 14.3 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (320, 320) + Epochs: 600 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 21.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth diff --git a/downstream/mmdetection/configs/ssd/ssd300_coco.py b/downstream/mmdetection/configs/ssd/ssd300_coco.py new file mode 100644 index 0000000..1891bad --- /dev/null +++ b/downstream/mmdetection/configs/ssd/ssd300_coco.py @@ -0,0 +1,71 @@ +_base_ = [ + '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' +] +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(300, 300), keep_ratio=False), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(300, 300), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=3, + train=dict( + _delete_=True, + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) +optimizer_config = dict(_delete_=True) +custom_hooks = [ + dict(type='NumClassCheckHook'), + dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/ssd/ssd512_coco.py b/downstream/mmdetection/configs/ssd/ssd512_coco.py new file mode 100644 index 0000000..117777f --- /dev/null +++ b/downstream/mmdetection/configs/ssd/ssd512_coco.py @@ -0,0 +1,84 @@ +_base_ = 'ssd300_coco.py' +input_size = 512 +model = dict( + neck=dict( + out_channels=(512, 1024, 512, 256, 256, 256, 256), + level_strides=(2, 2, 2, 2, 1), + level_paddings=(1, 1, 1, 1, 1), + last_kernel_size=4), + bbox_head=dict( + in_channels=(512, 1024, 512, 256, 256, 256, 256), + anchor_generator=dict( + type='SSDAnchorGenerator', + scale_major=False, + input_size=input_size, + basesize_ratio_range=(0.1, 0.9), + strides=[8, 16, 32, 64, 128, 256, 512], + ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]))) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(512, 512), keep_ratio=False), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(512, 512), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=3, + train=dict( + _delete_=True, + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) +optimizer_config = dict(_delete_=True) +custom_hooks = [ + dict(type='NumClassCheckHook'), + dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py b/downstream/mmdetection/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py new file mode 100644 index 0000000..929eb6c --- /dev/null +++ b/downstream/mmdetection/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py @@ -0,0 +1,150 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' +] + +model = dict( + type='SingleStageDetector', + backbone=dict( + type='MobileNetV2', + out_indices=(4, 7), + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), + neck=dict( + type='SSDNeck', + in_channels=(96, 1280), + out_channels=(96, 1280, 512, 256, 256, 128), + level_strides=(2, 2, 2, 2), + level_paddings=(1, 1, 1, 1), + l2_norm_scale=None, + use_depthwise=True, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + act_cfg=dict(type='ReLU6'), + init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), + bbox_head=dict( + type='SSDHead', + in_channels=(96, 1280, 512, 256, 256, 128), + num_classes=80, + use_depthwise=True, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), + act_cfg=dict(type='ReLU6'), + init_cfg=dict(type='Normal', layer='Conv2d', std=0.001), + + # set anchor size manually instead of using the predefined + # SSD300 setting. + anchor_generator=dict( + type='SSDAnchorGenerator', + scale_major=False, + strides=[16, 32, 64, 107, 160, 320], + ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]], + min_sizes=[48, 100, 150, 202, 253, 304], + max_sizes=[100, 150, 202, 253, 304, 320]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2])), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False), + test_cfg=dict( + nms_pre=1000, + nms=dict(type='nms', iou_threshold=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200)) +cudnn_benchmark = True + +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(320, 320), keep_ratio=False), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=320), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(320, 320), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=320), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=24, + workers_per_gpu=4, + train=dict( + _delete_=True, + type='RepeatDataset', # use RepeatDataset to speed up training + times=5, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +# optimizer +optimizer = dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=4.0e-5) +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + min_lr=0) +runner = dict(type='EpochBasedRunner', max_epochs=120) + +# Avoid evaluation and saving weights too frequently +evaluation = dict(interval=5, metric='bbox') +checkpoint_config = dict(interval=5) +custom_hooks = [ + dict(type='NumClassCheckHook'), + dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (24 samples per GPU) +auto_scale_lr = dict(base_batch_size=192) diff --git a/downstream/mmdetection/configs/strong_baselines/README.md b/downstream/mmdetection/configs/strong_baselines/README.md new file mode 100644 index 0000000..aa2550d --- /dev/null +++ b/downstream/mmdetection/configs/strong_baselines/README.md @@ -0,0 +1,20 @@ +# Strong Baselines + + + +We train Mask R-CNN with large-scale jitter and longer schedule as strong baselines. +The modifications follow those in [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/configs/new_baselines). + +## Results and Models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------: | :----------------------: | +| R-50-FPN | pytorch | 50e | | | | | [config](./mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py) | [model](<>) \| [log](<>) | +| R-50-FPN | pytorch | 100e | | | | | [config](./mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py) | [model](<>) \| [log](<>) | +| R-50-FPN | caffe | 100e | | | 44.7 | 40.4 | [config](./mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py) | [model](<>) \| [log](<>) | +| R-50-FPN | caffe | 400e | | | | | [config](./mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py) | [model](<>) \| [log](<>) | + +## Notice + +When using large-scale jittering, there are sometimes empty proposals in the box and mask heads during training. +This requires MMSyncBN that allows empty tensors. Therefore, please use mmcv-full>=1.3.14 to train models supported in this directory. diff --git a/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py b/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py new file mode 100644 index 0000000..a40d6a0 --- /dev/null +++ b/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py @@ -0,0 +1,80 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../common/lsj_100e_coco_instance.py' +] + +norm_cfg = dict(type='SyncBN', requires_grad=True) +# Use MMSyncBN that handles empty tensor in head. It can be changed to +# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed +# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205. +head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) +model = dict( + backbone=dict( + frozen_stages=-1, + norm_eval=False, + norm_cfg=norm_cfg, + init_cfg=None, + style='caffe'), + neck=dict(norm_cfg=norm_cfg), + rpn_head=dict(num_convs=2), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=head_norm_cfg), + mask_head=dict(norm_cfg=head_norm_cfg))) + +file_client_args = dict(backend='disk') +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +image_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=file_client_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=image_size, + ratio_range=(0.1, 2.0), + multiscale_mode='range', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=image_size), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +# Use RepeatDataset to speed up training +data = dict( + train=dict(dataset=dict(pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py b/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py new file mode 100644 index 0000000..31824eb --- /dev/null +++ b/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py @@ -0,0 +1,2 @@ +_base_ = 'mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' +fp16 = dict(loss_scale=512.) diff --git a/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py b/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py new file mode 100644 index 0000000..1211925 --- /dev/null +++ b/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py @@ -0,0 +1,6 @@ +_base_ = './mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' + +# Use RepeatDataset to speed up training +# change repeat time from 4 (for 100 epochs) to 16 (for 400 epochs) +data = dict(train=dict(times=4 * 4)) +lr_config = dict(warmup_iters=500 * 4) diff --git a/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py b/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py new file mode 100644 index 0000000..4a15d69 --- /dev/null +++ b/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py @@ -0,0 +1,22 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../common/lsj_100e_coco_instance.py' +] + +norm_cfg = dict(type='SyncBN', requires_grad=True) +# Use MMSyncBN that handles empty tensor in head. It can be changed to +# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed +# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205. +head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) +model = dict( + # the model is trained from scratch, so init_cfg is None + backbone=dict( + frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None), + neck=dict(norm_cfg=norm_cfg), + rpn_head=dict(num_convs=2), # leads to 0.1+ mAP + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=head_norm_cfg), + mask_head=dict(norm_cfg=head_norm_cfg))) diff --git a/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py b/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py new file mode 100644 index 0000000..7b97960 --- /dev/null +++ b/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py @@ -0,0 +1,3 @@ +_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' +# use FP16 +fp16 = dict(loss_scale=512.) diff --git a/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py b/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py new file mode 100644 index 0000000..922579a --- /dev/null +++ b/downstream/mmdetection/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py @@ -0,0 +1,5 @@ +_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' + +# Use RepeatDataset to speed up training +# change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs) +data = dict(train=dict(times=2)) diff --git a/downstream/mmdetection/configs/swin/README.md b/downstream/mmdetection/configs/swin/README.md new file mode 100644 index 0000000..2136134 --- /dev/null +++ b/downstream/mmdetection/configs/swin/README.md @@ -0,0 +1,41 @@ +# Swin + +> [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) + + + +## Abstract + +This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with Shifted windows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. + +
    + +
    + +## Results and Models + +### Mask R-CNN + +| Backbone | Pretrain | Lr schd | Multi-scale crop | FP16 | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :------: | :---------: | :-----: | :--------------: | :--: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| Swin-T | ImageNet-1K | 1x | no | no | 7.6 | | 42.7 | 39.3 | [config](./mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937.log.json) | +| Swin-T | ImageNet-1K | 3x | yes | no | 10.2 | | 46.0 | 41.6 | [config](./mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725-bacf6f7b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725.log.json) | +| Swin-T | ImageNet-1K | 3x | yes | yes | 7.8 | | 46.0 | 41.7 | [config](./mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006-90a4008c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006.log.json) | +| Swin-S | ImageNet-1K | 3x | yes | yes | 11.9 | | 48.2 | 43.2 | [config](./mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808-b92c91f1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808.log.json) | + +### Notice + +Please follow the example +of `retinanet_swin-t-p4-w7_fpn_1x_coco.py` when you want to combine Swin Transformer with +the one-stage detector. Because there is a layer norm at the outs of Swin Transformer, you must set `start_level` as 0 in FPN, so we have to set the `out_indices` of backbone as `[1,2,3]`. + +## Citation + +```latex +@article{liu2021Swin, + title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, + author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, + journal={arXiv preprint arXiv:2103.14030}, + year={2021} +} +``` diff --git a/downstream/mmdetection/configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py b/downstream/mmdetection/configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py new file mode 100644 index 0000000..15d50a0 --- /dev/null +++ b/downstream/mmdetection/configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py @@ -0,0 +1,6 @@ +_base_ = './mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py' +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa +model = dict( + backbone=dict( + depths=[2, 2, 18, 2], + init_cfg=dict(type='Pretrained', checkpoint=pretrained))) diff --git a/downstream/mmdetection/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py b/downstream/mmdetection/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py new file mode 100644 index 0000000..337e858 --- /dev/null +++ b/downstream/mmdetection/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py @@ -0,0 +1,42 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa +model = dict( + type='MaskRCNN', + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[96, 192, 384, 768])) + +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.0001, + betas=(0.9, 0.999), + weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) +lr_config = dict(warmup_iters=1000, step=[8, 11]) +runner = dict(max_epochs=12) diff --git a/downstream/mmdetection/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py b/downstream/mmdetection/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py new file mode 100644 index 0000000..2be3114 --- /dev/null +++ b/downstream/mmdetection/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py @@ -0,0 +1,3 @@ +_base_ = './mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py' +# you need to set mode='dynamic' if you are using pytorch<=1.5.0 +fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/downstream/mmdetection/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py b/downstream/mmdetection/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py new file mode 100644 index 0000000..2612f6e --- /dev/null +++ b/downstream/mmdetection/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py @@ -0,0 +1,91 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa + +model = dict( + type='MaskRCNN', + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(0, 1, 2, 3), + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[96, 192, 384, 768])) + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +# augmentation strategy originates from DETR / Sparse RCNN +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='AutoAugment', + policies=[[ + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict( + type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ]]), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict(train=dict(pipeline=train_pipeline)) + +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.0001, + betas=(0.9, 0.999), + weight_decay=0.05, + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) +lr_config = dict(warmup_iters=1000, step=[27, 33]) +runner = dict(max_epochs=36) diff --git a/downstream/mmdetection/configs/swin/metafile.yml b/downstream/mmdetection/configs/swin/metafile.yml new file mode 100644 index 0000000..6c07f17 --- /dev/null +++ b/downstream/mmdetection/configs/swin/metafile.yml @@ -0,0 +1,120 @@ +Models: + - Name: mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco + In Collection: Mask R-CNN + Config: configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py + Metadata: + Training Memory (GB): 11.9 + Epochs: 36 + Training Data: COCO + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Swin Transformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 43.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808-b92c91f1.pth + Paper: + URL: https://arxiv.org/abs/2107.08430 + Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' + README: configs/swin/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.16.0 + + - Name: mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco + In Collection: Mask R-CNN + Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py + Metadata: + Training Memory (GB): 10.2 + Epochs: 36 + Training Data: COCO + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Swin Transformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725-bacf6f7b.pth + Paper: + URL: https://arxiv.org/abs/2107.08430 + Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' + README: configs/swin/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.16.0 + + - Name: mask_rcnn_swin-t-p4-w7_fpn_1x_coco + In Collection: Mask R-CNN + Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.6 + Epochs: 12 + Training Data: COCO + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Swin Transformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth + Paper: + URL: https://arxiv.org/abs/2107.08430 + Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' + README: configs/swin/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.16.0 + + - Name: mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco + In Collection: Mask R-CNN + Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 36 + Training Data: COCO + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Swin Transformer + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006-90a4008c.pth + Paper: + URL: https://arxiv.org/abs/2107.08430 + Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' + README: configs/swin/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.16.0 diff --git a/downstream/mmdetection/configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py b/downstream/mmdetection/configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py new file mode 100644 index 0000000..3315093 --- /dev/null +++ b/downstream/mmdetection/configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py @@ -0,0 +1,30 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(1, 2, 3), + # Please only add indices that would be used + # in FPN, otherwise some parameter will not be used + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5)) + +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/timm_example/README.md b/downstream/mmdetection/configs/timm_example/README.md new file mode 100644 index 0000000..4374855 --- /dev/null +++ b/downstream/mmdetection/configs/timm_example/README.md @@ -0,0 +1,62 @@ +# Timm Example + +> [PyTorch Image Models](https://github.com/rwightman/pytorch-image-models) + + + +## Abstract + +Py**T**orch **Im**age **M**odels (`timm`) is a collection of image models, layers, utilities, optimizers, schedulers, data-loaders / augmentations, and reference training / validation scripts that aim to pull together a wide variety of SOTA models with ability to reproduce ImageNet training results. + + + +## Results and Models + +### RetinaNet + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------: | :------: | +| R-50 | pytorch | 1x | | | | [config](./retinanet_timm_tv_resnet50_fpn_1x_coco.py) | | +| EfficientNet-B1 | - | 1x | | | | [config](./retinanet_timm_efficientnet_b1_fpn_1x_coco.py) | | + +## Usage + +### Install additional requirements + +MMDetection supports timm backbones via `TIMMBackbone`, a wrapper class in MMClassification. +Thus, you need to install `mmcls` in addition to timm. +If you have already installed requirements for mmdet, run + +```shell +pip install 'dataclasses; python_version<"3.7"' +pip install timm +pip install 'mmcls>=0.20.0' +``` + +See [this document](https://mmclassification.readthedocs.io/en/latest/install.html) for the details of MMClassification installation. + +### Edit config + +- See example configs for basic usage. +- See the documents of [timm feature extraction](https://rwightman.github.io/pytorch-image-models/feature_extraction/#multi-scale-feature-maps-feature-pyramid) and [TIMMBackbone](https://mmclassification.readthedocs.io/en/latest/api.html#mmcls.models.backbones.TIMMBackbone) for details. +- Which feature map is output depends on the backbone. + Please check `backbone out_channels` and `backbone out_strides` in your log, and modify `model.neck.in_channels` and `model.backbone.out_indices` if necessary. +- If you use Vision Transformer models that do not support `features_only=True`, add `custom_hooks = []` to your config to disable `NumClassCheckHook`. + +## Citation + +```latex +@misc{rw2019timm, + author = {Ross Wightman}, + title = {PyTorch Image Models}, + year = {2019}, + publisher = {GitHub}, + journal = {GitHub repository}, + doi = {10.5281/zenodo.4414861}, + howpublished = {\url{https://github.com/rwightman/pytorch-image-models}} +} +``` diff --git a/downstream/mmdetection/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py b/downstream/mmdetection/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py new file mode 100644 index 0000000..6500116 --- /dev/null +++ b/downstream/mmdetection/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py @@ -0,0 +1,20 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# please install mmcls>=0.20.0 +# import mmcls.models to trigger register_module in mmcls +custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) +model = dict( + backbone=dict( + _delete_=True, + type='mmcls.TIMMBackbone', + model_name='efficientnet_b1', + features_only=True, + pretrained=True, + out_indices=(1, 2, 3, 4)), + neck=dict(in_channels=[24, 40, 112, 320])) + +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/timm_example/retinanet_timm_tv_resnet50_fpn_1x_coco.py b/downstream/mmdetection/configs/timm_example/retinanet_timm_tv_resnet50_fpn_1x_coco.py new file mode 100644 index 0000000..0c5b7a8 --- /dev/null +++ b/downstream/mmdetection/configs/timm_example/retinanet_timm_tv_resnet50_fpn_1x_coco.py @@ -0,0 +1,19 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# please install mmcls>=0.20.0 +# import mmcls.models to trigger register_module in mmcls +custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) +model = dict( + backbone=dict( + _delete_=True, + type='mmcls.TIMMBackbone', + model_name='tv_resnet50', # ResNet-50 with torchvision weights + features_only=True, + pretrained=True, + out_indices=(1, 2, 3, 4))) + +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmdetection/configs/tood/README.md b/downstream/mmdetection/configs/tood/README.md new file mode 100644 index 0000000..925f0ed --- /dev/null +++ b/downstream/mmdetection/configs/tood/README.md @@ -0,0 +1,40 @@ +# TOOD + +> [TOOD: Task-aligned One-stage Object Detection](https://arxiv.org/abs/2108.07755) + + + +## Abstract + +One-stage object detection is commonly implemented by optimizing two sub-tasks: object classification and localization, using heads with two parallel branches, which might lead to a certain level of spatial misalignment in predictions between the two tasks. In this work, we propose a Task-aligned One-stage Object Detection (TOOD) that explicitly aligns the two tasks in a learning-based manner. First, we design a novel Task-aligned Head (T-Head) which offers a better balance between learning task-interactive and task-specific features, as well as a greater flexibility to learn the alignment via a task-aligned predictor. Second, we propose Task Alignment Learning (TAL) to explicitly pull closer (or even unify) the optimal anchors for the two tasks during training via a designed sample assignment scheme and a task-aligned loss. Extensive experiments are conducted on MS-COCO, where TOOD achieves a 51.1 AP at single-model single-scale testing. This surpasses the recent one-stage detectors by a large margin, such as ATSS (47.7 AP), GFL (48.2 AP), and PAA (49.0 AP), with fewer parameters and FLOPs. Qualitative results also demonstrate the effectiveness of TOOD for better aligning the tasks of object classification and localization. + +
    + +
    + +## Results and Models + +| Backbone | Style | Anchor Type | Lr schd | Multi-scale Training | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------------: | :-----: | :----------: | :-----: | :------------------: | :------: | :------------: | :----: | :------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | Anchor-free | 1x | N | 4.1 | | 42.4 | [config](./tood_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425.log) | +| R-50 | pytorch | Anchor-based | 1x | N | 4.1 | | 42.4 | [config](./tood_r50_fpn_anchor_based_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105-b776c134.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105.log) | +| R-50 | pytorch | Anchor-free | 2x | Y | 4.1 | | 44.5 | [config](./tood_r50_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231-3b23174c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231.log) | +| R-101 | pytorch | Anchor-free | 2x | Y | 6.0 | | 46.1 | [config](./tood_r101_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232-a18f53c8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232.log) | +| R-101-dcnv2 | pytorch | Anchor-free | 2x | Y | 6.2 | | 49.3 | [config](./tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728-4a824142.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728.log) | +| X-101-64x4d | pytorch | Anchor-free | 2x | Y | 10.2 | | 47.6 | [config](./tood_x101_64x4d_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519-a4f36113.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519.log) | +| X-101-64x4d-dcnv2 | pytorch | Anchor-free | 2x | Y | | | | [config](./tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py) | [model](<>) \| [log](<>) | + +\[1\] *1x and 2x mean the model is trained for 90K and 180K iterations, respectively.* \ +\[2\] *All results are obtained with a single model and without any test time data augmentation such as multi-scale, flipping and etc..* \ +\[3\] *`dcnv2` denotes deformable convolutional networks v2.* \\ + +## Citation + +```latex +@inproceedings{feng2021tood, + title={TOOD: Task-aligned One-stage Object Detection}, + author={Feng, Chengjian and Zhong, Yujie and Gao, Yu and Scott, Matthew R and Huang, Weilin}, + booktitle={ICCV}, + year={2021} +} +``` diff --git a/downstream/mmdetection/configs/tood/metafile.yml b/downstream/mmdetection/configs/tood/metafile.yml new file mode 100644 index 0000000..27a0f8d --- /dev/null +++ b/downstream/mmdetection/configs/tood/metafile.yml @@ -0,0 +1,95 @@ +Collections: + - Name: TOOD + Metadata: + Training Data: COCO + Training Techniques: + - SGD + Training Resources: 8x V100 GPUs + Architecture: + - TOOD + Paper: + URL: https://arxiv.org/abs/2108.07755 + Title: 'TOOD: Task-aligned One-stage Object Detection' + README: configs/tood/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.20.0/mmdet/models/detectors/tood.py#L7 + Version: v2.20.0 + +Models: + - Name: tood_r101_fpn_mstrain_2x_coco + In Collection: TOOD + Config: configs/tood/tood_r101_fpn_mstrain_2x_coco.py + Metadata: + Training Memory (GB): 6.0 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232-a18f53c8.pth + + - Name: tood_x101_64x4d_fpn_mstrain_2x_coco + In Collection: TOOD + Config: configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py + Metadata: + Training Memory (GB): 10.2 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519-a4f36113.pth + + - Name: tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco + In Collection: TOOD + Config: configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py + Metadata: + Training Memory (GB): 6.2 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728-4a824142.pth + + - Name: tood_r50_fpn_anchor_based_1x_coco + In Collection: TOOD + Config: configs/tood/tood_r50_fpn_anchor_based_1x_coco.py + Metadata: + Training Memory (GB): 4.1 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105-b776c134.pth + + - Name: tood_r50_fpn_1x_coco + In Collection: TOOD + Config: configs/tood/tood_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.1 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth + + - Name: tood_r50_fpn_mstrain_2x_coco + In Collection: TOOD + Config: configs/tood/tood_r50_fpn_mstrain_2x_coco.py + Metadata: + Training Memory (GB): 4.1 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231-3b23174c.pth diff --git a/downstream/mmdetection/configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py b/downstream/mmdetection/configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py new file mode 100644 index 0000000..c7f1bbc --- /dev/null +++ b/downstream/mmdetection/configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py @@ -0,0 +1,7 @@ +_base_ = './tood_r101_fpn_mstrain_2x_coco.py' + +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + bbox_head=dict(num_dcn=2)) diff --git a/downstream/mmdetection/configs/tood/tood_r101_fpn_mstrain_2x_coco.py b/downstream/mmdetection/configs/tood/tood_r101_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..d9d2c32 --- /dev/null +++ b/downstream/mmdetection/configs/tood/tood_r101_fpn_mstrain_2x_coco.py @@ -0,0 +1,7 @@ +_base_ = './tood_r50_fpn_mstrain_2x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/tood/tood_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/tood/tood_r50_fpn_1x_coco.py new file mode 100644 index 0000000..35a77a4 --- /dev/null +++ b/downstream/mmdetection/configs/tood/tood_r50_fpn_1x_coco.py @@ -0,0 +1,74 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='TOOD', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='TOODHead', + num_classes=80, + in_channels=256, + stacked_convs=6, + feat_channels=256, + anchor_type='anchor_free', + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + initial_loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + activated=True, # use probability instead of logit as input + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_cls=dict( + type='QualityFocalLoss', + use_sigmoid=True, + activated=True, # use probability instead of logit as input + beta=2.0, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), + train_cfg=dict( + initial_epoch=4, + initial_assigner=dict(type='ATSSAssigner', topk=9), + assigner=dict(type='TaskAlignedAssigner', topk=13), + alpha=1, + beta=6, + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) + +# custom hooks +custom_hooks = [dict(type='SetEpochInfoHook')] diff --git a/downstream/mmdetection/configs/tood/tood_r50_fpn_anchor_based_1x_coco.py b/downstream/mmdetection/configs/tood/tood_r50_fpn_anchor_based_1x_coco.py new file mode 100644 index 0000000..c7fbf6a --- /dev/null +++ b/downstream/mmdetection/configs/tood/tood_r50_fpn_anchor_based_1x_coco.py @@ -0,0 +1,2 @@ +_base_ = './tood_r50_fpn_1x_coco.py' +model = dict(bbox_head=dict(anchor_type='anchor_based')) diff --git a/downstream/mmdetection/configs/tood/tood_r50_fpn_mstrain_2x_coco.py b/downstream/mmdetection/configs/tood/tood_r50_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..157d13a --- /dev/null +++ b/downstream/mmdetection/configs/tood/tood_r50_fpn_mstrain_2x_coco.py @@ -0,0 +1,22 @@ +_base_ = './tood_r50_fpn_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) +# multi-scale training +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 480), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict(train=dict(pipeline=train_pipeline)) diff --git a/downstream/mmdetection/configs/tood/tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py b/downstream/mmdetection/configs/tood/tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py new file mode 100644 index 0000000..47c9269 --- /dev/null +++ b/downstream/mmdetection/configs/tood/tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py @@ -0,0 +1,7 @@ +_base_ = './tood_x101_64x4d_fpn_mstrain_2x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True), + ), + bbox_head=dict(num_dcn=2)) diff --git a/downstream/mmdetection/configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py b/downstream/mmdetection/configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..842f320 --- /dev/null +++ b/downstream/mmdetection/configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './tood_r50_fpn_mstrain_2x_coco.py' + +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/tridentnet/README.md b/downstream/mmdetection/configs/tridentnet/README.md new file mode 100644 index 0000000..b972b3a --- /dev/null +++ b/downstream/mmdetection/configs/tridentnet/README.md @@ -0,0 +1,38 @@ +# TridentNet + +> [Scale-Aware Trident Networks for Object Detection](https://arxiv.org/abs/1901.01892) + + + +## Abstract + +Scale variation is one of the key challenges in object detection. In this work, we first present a controlled experiment to investigate the effect of receptive fields for scale variation in object detection. Based on the findings from the exploration experiments, we propose a novel Trident Network (TridentNet) aiming to generate scale-specific feature maps with a uniform representational power. We construct a parallel multi-branch architecture in which each branch shares the same transformation parameters but with different receptive fields. Then, we adopt a scale-aware training scheme to specialize each branch by sampling object instances of proper scales for training. As a bonus, a fast approximation version of TridentNet could achieve significant improvements without any additional parameters and computational cost compared with the vanilla detector. On the COCO dataset, our TridentNet with ResNet-101 backbone achieves state-of-the-art single-model results of 48.4 mAP. + +
    + +
    + +## Results and Models + +We reports the test results using only one branch for inference. + +| Backbone | Style | mstrain | Lr schd | Mem (GB) | Inf time (fps) | box AP | Download | +| :------: | :---: | :-----: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | caffe | N | 1x | | | 37.7 | [model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838.log.json) | +| R-50 | caffe | Y | 1x | | | 37.6 | [model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839-6ce55ccb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839.log.json) | +| R-50 | caffe | Y | 3x | | | 40.3 | [model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539-46d227ba.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539.log.json) | + +**Note** + +Similar to [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/projects/TridentNet), we haven't implemented the Scale-aware Training Scheme in section 4.2 of the paper. + +## Citation + +```latex +@InProceedings{li2019scale, + title={Scale-Aware Trident Networks for Object Detection}, + author={Li, Yanghao and Chen, Yuntao and Wang, Naiyan and Zhang, Zhaoxiang}, + journal={The International Conference on Computer Vision (ICCV)}, + year={2019} +} +``` diff --git a/downstream/mmdetection/configs/tridentnet/metafile.yml b/downstream/mmdetection/configs/tridentnet/metafile.yml new file mode 100644 index 0000000..2536f97 --- /dev/null +++ b/downstream/mmdetection/configs/tridentnet/metafile.yml @@ -0,0 +1,55 @@ +Collections: + - Name: TridentNet + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + - TridentNet Block + Paper: + URL: https://arxiv.org/abs/1901.01892 + Title: 'Scale-Aware Trident Networks for Object Detection' + README: configs/tridentnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.8.0/mmdet/models/detectors/trident_faster_rcnn.py#L6 + Version: v2.8.0 + +Models: + - Name: tridentnet_r50_caffe_1x_coco + In Collection: TridentNet + Config: configs/tridentnet/tridentnet_r50_caffe_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth + + - Name: tridentnet_r50_caffe_mstrain_1x_coco + In Collection: TridentNet + Config: configs/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839-6ce55ccb.pth + + - Name: tridentnet_r50_caffe_mstrain_3x_coco + In Collection: TridentNet + Config: configs/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539-46d227ba.pth diff --git a/downstream/mmdetection/configs/tridentnet/tridentnet_r50_caffe_1x_coco.py b/downstream/mmdetection/configs/tridentnet/tridentnet_r50_caffe_1x_coco.py new file mode 100644 index 0000000..d779f75 --- /dev/null +++ b/downstream/mmdetection/configs/tridentnet/tridentnet_r50_caffe_1x_coco.py @@ -0,0 +1,55 @@ +_base_ = [ + '../_base_/models/faster_rcnn_r50_caffe_c4.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +model = dict( + type='TridentFasterRCNN', + backbone=dict( + type='TridentResNet', + trident_dilations=(1, 2, 3), + num_branch=3, + test_branch_idx=1, + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + roi_head=dict(type='TridentRoIHead', num_branch=3, test_branch_idx=1), + train_cfg=dict( + rpn_proposal=dict(max_per_img=500), + rcnn=dict( + sampler=dict(num=128, pos_fraction=0.5, + add_gt_as_proposals=False)))) + +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco.py b/downstream/mmdetection/configs/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco.py new file mode 100644 index 0000000..c73d9ea --- /dev/null +++ b/downstream/mmdetection/configs/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco.py @@ -0,0 +1,22 @@ +_base_ = 'tridentnet_r50_caffe_1x_coco.py' + +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] + +data = dict(train=dict(pipeline=train_pipeline)) diff --git a/downstream/mmdetection/configs/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco.py b/downstream/mmdetection/configs/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco.py new file mode 100644 index 0000000..0f40282 --- /dev/null +++ b/downstream/mmdetection/configs/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco.py @@ -0,0 +1,4 @@ +_base_ = 'tridentnet_r50_caffe_mstrain_1x_coco.py' + +lr_config = dict(step=[28, 34]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/downstream/mmdetection/configs/vfnet/README.md b/downstream/mmdetection/configs/vfnet/README.md new file mode 100644 index 0000000..a492bec --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/README.md @@ -0,0 +1,48 @@ +# VarifocalNet + +> [VarifocalNet: An IoU-aware Dense Object Detector](https://arxiv.org/abs/2008.13367) + + + +## Abstract + +Accurately ranking the vast number of candidate detections is crucial for dense object detectors to achieve high performance. Prior work uses the classification score or a combination of classification and predicted localization scores to rank candidates. However, neither option results in a reliable ranking, thus degrading detection performance. In this paper, we propose to learn an Iou-aware Classification Score (IACS) as a joint representation of object presence confidence and localization accuracy. We show that dense object detectors can achieve a more accurate ranking of candidate detections based on the IACS. We design a new loss function, named Varifocal Loss, to train a dense object detector to predict the IACS, and propose a new star-shaped bounding box feature representation for IACS prediction and bounding box refinement. Combining these two new components and a bounding box refinement branch, we build an IoU-aware dense object detector based on the FCOS+ATSS architecture, that we call VarifocalNet or VFNet for short. Extensive experiments on MS COCO show that our VFNet consistently surpasses the strong baseline by ∼2.0 AP with different backbones. Our best model VFNet-X-1200 with Res2Net-101-DCN achieves a single-model single-scale AP of 55.1 on COCO test-dev, which is state-of-the-art among various object detectors. + +
    + +
    + +## Introduction + +**VarifocalNet (VFNet)** learns to predict the IoU-aware classification score which mixes the object presence confidence and localization accuracy together as the detection score for a bounding box. The learning is supervised by the proposed Varifocal Loss (VFL), based on a new star-shaped bounding box feature representation (the features at nine yellow sampling points). Given the new representation, the object localization accuracy is further improved by refining the initially regressed bounding box. The full paper is available at: [https://arxiv.org/abs/2008.13367](https://arxiv.org/abs/2008.13367). + +## Results and Models + +| Backbone | Style | DCN | MS train | Lr schd | Inf time (fps) | box AP (val) | box AP (test-dev) | Config | Download | +| :---------: | :-----: | :-: | :------: | :-----: | :------------: | :----------: | :---------------: | :--------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50 | pytorch | N | N | 1x | - | 41.6 | 41.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco.json) | +| R-50 | pytorch | N | Y | 2x | - | 44.5 | 44.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco.json) | +| R-50 | pytorch | Y | Y | 2x | - | 47.8 | 48.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | +| R-101 | pytorch | N | N | 1x | - | 43.0 | 43.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco.json) | +| R-101 | pytorch | N | Y | 2x | - | 46.2 | 46.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco.json) | +| R-101 | pytorch | Y | Y | 2x | - | 49.0 | 49.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | +| X-101-32x4d | pytorch | Y | Y | 2x | - | 49.7 | 50.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | +| X-101-64x4d | pytorch | Y | Y | 2x | - | 50.4 | 50.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | + +**Notes:** + +- The MS-train scale range is 1333x\[480:960\] (`range` mode) and the inference scale keeps 1333x800. +- DCN means using `DCNv2` in both backbone and head. +- Inference time will be updated soon. +- More results and pre-trained models can be found in [VarifocalNet-Github](https://github.com/hyz-xmaster/VarifocalNet) + +## Citation + +```latex +@article{zhang2020varifocalnet, + title={VarifocalNet: An IoU-aware Dense Object Detector}, + author={Zhang, Haoyang and Wang, Ying and Dayoub, Feras and S{\"u}nderhauf, Niko}, + journal={arXiv preprint arXiv:2008.13367}, + year={2020} +} +``` diff --git a/downstream/mmdetection/configs/vfnet/metafile.yml b/downstream/mmdetection/configs/vfnet/metafile.yml new file mode 100644 index 0000000..bcbe576 --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/metafile.yml @@ -0,0 +1,116 @@ +Collections: + - Name: VFNet + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + - Varifocal Loss + Paper: + URL: https://arxiv.org/abs/2008.13367 + Title: 'VarifocalNet: An IoU-aware Dense Object Detector' + README: configs/vfnet/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.6.0/mmdet/models/detectors/vfnet.py#L6 + Version: v2.6.0 + +Models: + - Name: vfnet_r50_fpn_1x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth + + - Name: vfnet_r50_fpn_mstrain_2x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth + + - Name: vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth + + - Name: vfnet_r101_fpn_1x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_r101_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth + + - Name: vfnet_r101_fpn_mstrain_2x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth + + - Name: vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth + + - Name: vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth + + - Name: vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco + In Collection: VFNet + Config: configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth diff --git a/downstream/mmdetection/configs/vfnet/vfnet_r101_fpn_1x_coco.py b/downstream/mmdetection/configs/vfnet/vfnet_r101_fpn_1x_coco.py new file mode 100644 index 0000000..b296a07 --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/vfnet_r101_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = './vfnet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/vfnet/vfnet_r101_fpn_2x_coco.py b/downstream/mmdetection/configs/vfnet/vfnet_r101_fpn_2x_coco.py new file mode 100644 index 0000000..27962f3 --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/vfnet_r101_fpn_2x_coco.py @@ -0,0 +1,8 @@ +_base_ = './vfnet_r50_fpn_1x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py b/downstream/mmdetection/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py new file mode 100644 index 0000000..e438c24 --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py @@ -0,0 +1,15 @@ +_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' +model = dict( + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py b/downstream/mmdetection/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..eae69a0 --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py b/downstream/mmdetection/configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py new file mode 100644 index 0000000..815a36e --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py @@ -0,0 +1,18 @@ +_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/downstream/mmdetection/configs/vfnet/vfnet_r2_101_fpn_mstrain_2x_coco.py b/downstream/mmdetection/configs/vfnet/vfnet_r2_101_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..58022e0 --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/vfnet_r2_101_fpn_mstrain_2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' +model = dict( + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/downstream/mmdetection/configs/vfnet/vfnet_r50_fpn_1x_coco.py b/downstream/mmdetection/configs/vfnet/vfnet_r50_fpn_1x_coco.py new file mode 100644 index 0000000..7de6429 --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/vfnet_r50_fpn_1x_coco.py @@ -0,0 +1,107 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# model settings +model = dict( + type='VFNet', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', # use P5 + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='VFNetHead', + num_classes=80, + in_channels=256, + stacked_convs=3, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + center_sampling=False, + dcn_on_last_conv=False, + use_atss=True, + use_vfl=True, + loss_cls=dict( + type='VarifocalLoss', + use_sigmoid=True, + alpha=0.75, + gamma=2.0, + iou_weighted=True, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.5), + loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# data setting +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +# optimizer +optimizer = dict( + lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.1, + step=[8, 11]) +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/downstream/mmdetection/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py b/downstream/mmdetection/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py new file mode 100644 index 0000000..24d2093 --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py @@ -0,0 +1,6 @@ +_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + bbox_head=dict(dcn_on_last_conv=True)) diff --git a/downstream/mmdetection/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py b/downstream/mmdetection/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..6078bb9 --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py @@ -0,0 +1,39 @@ +_base_ = './vfnet_r50_fpn_1x_coco.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 480), (1333, 960)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/downstream/mmdetection/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py b/downstream/mmdetection/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py new file mode 100644 index 0000000..7efa051 --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py @@ -0,0 +1,17 @@ +_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py b/downstream/mmdetection/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..49a4312 --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py @@ -0,0 +1,15 @@ +_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/downstream/mmdetection/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py b/downstream/mmdetection/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py new file mode 100644 index 0000000..7e1ee42 --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py @@ -0,0 +1,17 @@ +_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/vfnet/vfnet_x101_64x4d_fpn_mstrain_2x_coco.py b/downstream/mmdetection/configs/vfnet/vfnet_x101_64x4d_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..e51064e --- /dev/null +++ b/downstream/mmdetection/configs/vfnet/vfnet_x101_64x4d_fpn_mstrain_2x_coco.py @@ -0,0 +1,15 @@ +_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/downstream/mmdetection/configs/wider_face/README.md b/downstream/mmdetection/configs/wider_face/README.md new file mode 100644 index 0000000..1904506 --- /dev/null +++ b/downstream/mmdetection/configs/wider_face/README.md @@ -0,0 +1,57 @@ +# WIDER FACE + +> [WIDER FACE: A Face Detection Benchmark](https://arxiv.org/abs/1511.06523) + + + +## Abstract + +Face detection is one of the most studied topics in the computer vision community. Much of the progresses have been made by the availability of face detection benchmark datasets. We show that there is a gap between current face detection performance and the real world requirements. To facilitate future face detection research, we introduce the WIDER FACE dataset, which is 10 times larger than existing datasets. The dataset contains rich annotations, including occlusions, poses, event categories, and face bounding boxes. Faces in the proposed dataset are extremely challenging due to large variations in scale, pose and occlusion, as shown in Fig. 1. Furthermore, we show that WIDER FACE dataset is an effective training source for face detection. We benchmark several representative detection systems, providing an overview of state-of-the-art performance and propose a solution to deal with large scale variation. Finally, we discuss common failure cases that worth to be further investigated. + +
    + +
    + +## Introduction + +To use the WIDER Face dataset you need to download it +and extract to the `data/WIDERFace` folder. Annotation in the VOC format +can be found in this [repo](https://github.com/sovrasov/wider-face-pascal-voc-annotations.git). +You should move the annotation files from `WIDER_train_annotations` and `WIDER_val_annotations` folders +to the `Annotation` folders inside the corresponding directories `WIDER_train` and `WIDER_val`. +Also annotation lists `val.txt` and `train.txt` should be copied to `data/WIDERFace` from `WIDER_train_annotations` and `WIDER_val_annotations`. +The directory should be like this: + +``` +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── WIDERFace +│ │ ├── WIDER_train +│ | │ ├──0--Parade +│ | │ ├── ... +│ | │ ├── Annotations +│ │ ├── WIDER_val +│ | │ ├──0--Parade +│ | │ ├── ... +│ | │ ├── Annotations +│ │ ├── val.txt +│ │ ├── train.txt + +``` + +After that you can train the SSD300 on WIDER by launching training with the `ssd300_wider_face.py` config or +create your own config based on the presented one. + +## Citation + +```latex +@inproceedings{yang2016wider, + Author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou}, + Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + Title = {WIDER FACE: A Face Detection Benchmark}, + Year = {2016} +} +``` diff --git a/downstream/mmdetection/configs/wider_face/ssd300_wider_face.py b/downstream/mmdetection/configs/wider_face/ssd300_wider_face.py new file mode 100644 index 0000000..5a3eb38 --- /dev/null +++ b/downstream/mmdetection/configs/wider_face/ssd300_wider_face.py @@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py', + '../_base_/default_runtime.py' +] +model = dict(bbox_head=dict(num_classes=1)) +# optimizer +optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4) +optimizer_config = dict() +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.001, + step=[16, 20]) +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=24) +log_config = dict(interval=1) diff --git a/downstream/mmdetection/configs/yolact/README.md b/downstream/mmdetection/configs/yolact/README.md new file mode 100644 index 0000000..9eb51b4 --- /dev/null +++ b/downstream/mmdetection/configs/yolact/README.md @@ -0,0 +1,75 @@ +# YOLACT + +> [YOLACT: Real-time Instance Segmentation](https://arxiv.org/abs/1904.02689) + + + +## Abstract + +We present a simple, fully-convolutional model for real-time instance segmentation that achieves 29.8 mAP on MS COCO at 33.5 fps evaluated on a single Titan Xp, which is significantly faster than any previous competitive approach. Moreover, we obtain this result after training on only one GPU. We accomplish this by breaking instance segmentation into two parallel subtasks: (1) generating a set of prototype masks and (2) predicting per-instance mask coefficients. Then we produce instance masks by linearly combining the prototypes with the mask coefficients. We find that because this process doesn't depend on repooling, this approach produces very high-quality masks and exhibits temporal stability for free. Furthermore, we analyze the emergent behavior of our prototypes and show they learn to localize instances on their own in a translation variant manner, despite being fully-convolutional. Finally, we also propose Fast NMS, a drop-in 12 ms faster replacement for standard NMS that only has a marginal performance penalty. + +
    + +
    + +## Introduction + +A simple, fully convolutional model for real-time instance segmentation. This is the code for our paper: + +- [YOLACT: Real-time Instance Segmentation](https://arxiv.org/abs/1904.02689) + + + +For a real-time demo, check out our ICCV video: +[![IMAGE ALT TEXT HERE](https://img.youtube.com/vi/0pMfmo8qfpQ/0.jpg)](https://www.youtube.com/watch?v=0pMfmo8qfpQ) + +## Evaluation + +Here are our YOLACT models along with their FPS on a Titan Xp and mAP on COCO's `val`: + +| Image Size | GPU x BS | Backbone | \*FPS | mAP | Weights | Configs | Download | +| :--------: | :------: | :-----------: | :---: | :--: | :-----: | :----------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------: | +| 550 | 1x8 | Resnet50-FPN | 42.5 | 29.0 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r50_1x8_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth) | +| 550 | 8x8 | Resnet50-FPN | 42.5 | 28.4 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r50_8x8_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco/yolact_r50_8x8_coco_20200908-ca34f5db.pth) | +| 550 | 1x8 | Resnet101-FPN | 33.5 | 30.4 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r101_1x8_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco/yolact_r101_1x8_coco_20200908-4cbe9101.pth) | + +\*Note: The FPS is evaluated by the [original implementation](https://github.com/dbolya/yolact). When calculating FPS, only the model inference time is taken into account. Data loading and post-processing operations such as converting masks to RLE code, generating COCO JSON results, image rendering are not included. + +## Training + +All the aforementioned models are trained with a single GPU. It typically takes ~12GB VRAM when using resnet-101 as the backbone. If you want to try multiple GPUs training, you may have to modify the configuration files accordingly, such as adjusting the training schedule and freezing batch norm. + +```Shell +# Trains using the resnet-101 backbone with a batch size of 8 on a single GPU. +./tools/dist_train.sh configs/yolact/yolact_r101.py 1 +``` + +## Testing + +Please refer to [mmdetection/docs/getting_started.md](https://mmdetection.readthedocs.io/en/latest/1_exist_data_model.html#test-existing-models). + +## Citation + +If you use YOLACT or this code base in your work, please cite + +```latex +@inproceedings{yolact-iccv2019, + author = {Daniel Bolya and Chong Zhou and Fanyi Xiao and Yong Jae Lee}, + title = {YOLACT: {Real-time} Instance Segmentation}, + booktitle = {ICCV}, + year = {2019}, +} +``` + + diff --git a/downstream/mmdetection/configs/yolact/metafile.yml b/downstream/mmdetection/configs/yolact/metafile.yml new file mode 100644 index 0000000..e7019ae --- /dev/null +++ b/downstream/mmdetection/configs/yolact/metafile.yml @@ -0,0 +1,78 @@ +Collections: + - Name: YOLACT + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + Paper: + URL: https://arxiv.org/abs/1904.02689 + Title: 'YOLACT: Real-time Instance Segmentation' + README: configs/yolact/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.5.0/mmdet/models/detectors/yolact.py#L9 + Version: v2.5.0 + +Models: + - Name: yolact_r50_1x8_coco + In Collection: YOLACT + Config: configs/yolact/yolact_r50_1x8_coco.py + Metadata: + Training Resources: 1x V100 GPU + Batch Size: 8 + inference time (ms/im): + - value: 23.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (550, 550) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 29.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth + + - Name: yolact_r50_8x8_coco + In Collection: YOLACT + Config: configs/yolact/yolact_r50_8x8_coco.py + Metadata: + Batch Size: 64 + inference time (ms/im): + - value: 23.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (550, 550) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 28.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco/yolact_r50_8x8_coco_20200908-ca34f5db.pth + + - Name: yolact_r101_1x8_coco + In Collection: YOLACT + Config: configs/yolact/yolact_r101_1x8_coco.py + Metadata: + Training Resources: 1x V100 GPU + Batch Size: 8 + inference time (ms/im): + - value: 29.85 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (550, 550) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 30.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco/yolact_r101_1x8_coco_20200908-4cbe9101.pth diff --git a/downstream/mmdetection/configs/yolact/yolact_r101_1x8_coco.py b/downstream/mmdetection/configs/yolact/yolact_r101_1x8_coco.py new file mode 100644 index 0000000..532631d --- /dev/null +++ b/downstream/mmdetection/configs/yolact/yolact_r101_1x8_coco.py @@ -0,0 +1,7 @@ +_base_ = './yolact_r50_1x8_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/downstream/mmdetection/configs/yolact/yolact_r50_1x8_coco.py b/downstream/mmdetection/configs/yolact/yolact_r50_1x8_coco.py new file mode 100644 index 0000000..dbced5a --- /dev/null +++ b/downstream/mmdetection/configs/yolact/yolact_r50_1x8_coco.py @@ -0,0 +1,165 @@ +_base_ = '../_base_/default_runtime.py' + +# model settings +img_size = 550 +model = dict( + type='YOLACT', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, # do not freeze stem + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, # update the statistics of bn + zero_init_residual=False, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_input', + num_outs=5, + upsample_cfg=dict(mode='bilinear')), + bbox_head=dict( + type='YOLACTHead', + num_classes=80, + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=3, + scales_per_octave=1, + base_sizes=[8, 16, 32, 64, 128], + ratios=[0.5, 1.0, 2.0], + strides=[550.0 / x for x in [69, 35, 18, 9, 5]], + centers=[(550 * 0.5 / x, 550 * 0.5 / x) + for x in [69, 35, 18, 9, 5]]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + reduction='none', + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5), + num_head_convs=1, + num_protos=32, + use_ohem=True), + mask_head=dict( + type='YOLACTProtonet', + in_channels=256, + num_protos=32, + num_classes=80, + max_masks_to_train=100, + loss_mask_weight=6.125), + segm_head=dict( + type='YOLACTSegmHead', + num_classes=80, + in_channels=256, + loss_segm=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + # smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + iou_thr=0.5, + top_k=200, + max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.68, 116.78, 103.94], std=[58.40, 57.12, 57.38], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(4.0, 4.0)), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(img_size, img_size), keep_ratio=False), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='Normalize', **img_norm_cfg), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(img_size, img_size), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) +optimizer_config = dict() +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.1, + step=[20, 42, 49, 52]) +runner = dict(type='EpochBasedRunner', max_epochs=55) +cudnn_benchmark = True +evaluation = dict(metric=['bbox', 'segm']) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (1 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=8) diff --git a/downstream/mmdetection/configs/yolact/yolact_r50_8x8_coco.py b/downstream/mmdetection/configs/yolact/yolact_r50_8x8_coco.py new file mode 100644 index 0000000..41003ab --- /dev/null +++ b/downstream/mmdetection/configs/yolact/yolact_r50_8x8_coco.py @@ -0,0 +1,16 @@ +_base_ = 'yolact_r50_1x8_coco.py' + +optimizer = dict(type='SGD', lr=8e-3, momentum=0.9, weight_decay=5e-4) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.1, + step=[20, 42, 49, 52]) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/yolo/README.md b/downstream/mmdetection/configs/yolo/README.md new file mode 100644 index 0000000..c9eb8a6 --- /dev/null +++ b/downstream/mmdetection/configs/yolo/README.md @@ -0,0 +1,55 @@ +# YOLOv3 + +> [YOLOv3: An Incremental Improvement](https://arxiv.org/abs/1804.02767) + + + +## Abstract + +We present some updates to YOLO! We made a bunch of little design changes to make it better. We also trained this new network that's pretty swell. It's a little bigger than last time but more accurate. It's still fast though, don't worry. At 320x320 YOLOv3 runs in 22 ms at 28.2 mAP, as accurate as SSD but three times faster. When we look at the old .5 IOU mAP detection metric YOLOv3 is quite good. It achieves 57.9 mAP@50 in 51 ms on a Titan X, compared to 57.5 mAP@50 in 198 ms by RetinaNet, similar performance but 3.8x faster. + +
    + +
    + +## Results and Models + +| Backbone | Scale | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :--------: | :---: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| DarkNet-53 | 320 | 273e | 2.7 | 63.9 | 27.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_d53_320_273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-20200819_172101.log.json) | +| DarkNet-53 | 416 | 273e | 3.8 | 61.2 | 30.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-416_273e_coco/yolov3_d53_mstrain-416_273e_coco-2b60fcd9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-416_273e_coco/yolov3_d53_mstrain-416_273e_coco-20200819_173424.log.json) | +| DarkNet-53 | 608 | 273e | 7.4 | 48.1 | 33.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020-a2c3acb8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020.log.json) | + +## Mixed Precision Training + +We also train YOLOv3 with mixed precision training. + +| Backbone | Scale | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :--------: | :---: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| DarkNet-53 | 608 | 273e | 4.7 | 48.1 | 33.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_fp16_mstrain-608_273e_coco/yolov3_d53_fp16_mstrain-608_273e_coco_20210517_213542-4bc34944.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_fp16_mstrain-608_273e_coco/yolov3_d53_fp16_mstrain-608_273e_coco_20210517_213542.log.json) | + +## Lightweight models + +| Backbone | Scale | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | +| :---------: | :---: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| MobileNetV2 | 416 | 300e | 5.3 | | 23.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823.log.json) | +| MobileNetV2 | 320 | 300e | 3.2 | | 22.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_320_300e_coco/yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_320_300e_coco/yolov3_mobilenetv2_320_300e_coco_20210719_215349.log.json) | + +Notice: We reduce the number of channels to 96 in both head and neck. It can reduce the flops and parameters, which makes these models more suitable for edge devices. + +## Credit + +This implementation originates from the project of Haoyu Wu(@wuhy08) at Western Digital. + +## Citation + +```latex +@misc{redmon2018yolov3, + title={YOLOv3: An Incremental Improvement}, + author={Joseph Redmon and Ali Farhadi}, + year={2018}, + eprint={1804.02767}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/downstream/mmdetection/configs/yolo/metafile.yml b/downstream/mmdetection/configs/yolo/metafile.yml new file mode 100644 index 0000000..22c35da --- /dev/null +++ b/downstream/mmdetection/configs/yolo/metafile.yml @@ -0,0 +1,124 @@ +Collections: + - Name: YOLOv3 + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - DarkNet + Paper: + URL: https://arxiv.org/abs/1804.02767 + Title: 'YOLOv3: An Incremental Improvement' + README: configs/yolo/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/detectors/yolo.py#L8 + Version: v2.4.0 + +Models: + - Name: yolov3_d53_320_273e_coco + In Collection: YOLOv3 + Config: configs/yolo/yolov3_d53_320_273e_coco.py + Metadata: + Training Memory (GB): 2.7 + inference time (ms/im): + - value: 15.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (320, 320) + Epochs: 273 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 27.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth + + - Name: yolov3_d53_mstrain-416_273e_coco + In Collection: YOLOv3 + Config: configs/yolo/yolov3_d53_mstrain-416_273e_coco.py + Metadata: + Training Memory (GB): 3.8 + inference time (ms/im): + - value: 16.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (416, 416) + Epochs: 273 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 30.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-416_273e_coco/yolov3_d53_mstrain-416_273e_coco-2b60fcd9.pth + + - Name: yolov3_d53_mstrain-608_273e_coco + In Collection: YOLOv3 + Config: configs/yolo/yolov3_d53_mstrain-608_273e_coco.py + Metadata: + Training Memory (GB): 7.4 + inference time (ms/im): + - value: 20.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (608, 608) + Epochs: 273 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 33.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020-a2c3acb8.pth + + - Name: yolov3_d53_fp16_mstrain-608_273e_coco + In Collection: YOLOv3 + Config: configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py + Metadata: + Training Memory (GB): 4.7 + inference time (ms/im): + - value: 20.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (608, 608) + Epochs: 273 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 33.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_fp16_mstrain-608_273e_coco/yolov3_d53_fp16_mstrain-608_273e_coco_20210517_213542-4bc34944.pth + + - Name: yolov3_mobilenetv2_320_300e_coco + In Collection: YOLOv3 + Config: configs/yolo/yolov3_mobilenetv2_320_300e_coco.py + Metadata: + Training Memory (GB): 3.2 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 22.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_320_300e_coco/yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth + + - Name: yolov3_mobilenetv2_mstrain-416_300e_coco + In Collection: YOLOv3 + Config: configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py + Metadata: + Training Memory (GB): 5.3 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 23.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth diff --git a/downstream/mmdetection/configs/yolo/yolov3_d53_320_273e_coco.py b/downstream/mmdetection/configs/yolo/yolov3_d53_320_273e_coco.py new file mode 100644 index 0000000..d4785e3 --- /dev/null +++ b/downstream/mmdetection/configs/yolo/yolov3_d53_320_273e_coco.py @@ -0,0 +1,42 @@ +_base_ = './yolov3_d53_mstrain-608_273e_coco.py' +# dataset settings +img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 2)), + dict( + type='MinIoURandomCrop', + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(320, 320), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(320, 320), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py b/downstream/mmdetection/configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py new file mode 100644 index 0000000..4ef2422 --- /dev/null +++ b/downstream/mmdetection/configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py @@ -0,0 +1,3 @@ +_base_ = './yolov3_d53_mstrain-608_273e_coco.py' +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/downstream/mmdetection/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py b/downstream/mmdetection/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py new file mode 100644 index 0000000..94325c5 --- /dev/null +++ b/downstream/mmdetection/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py @@ -0,0 +1,42 @@ +_base_ = './yolov3_d53_mstrain-608_273e_coco.py' +# dataset settings +img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 2)), + dict( + type='MinIoURandomCrop', + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=[(320, 320), (416, 416)], keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(416, 416), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py b/downstream/mmdetection/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py new file mode 100644 index 0000000..43aa2f0 --- /dev/null +++ b/downstream/mmdetection/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py @@ -0,0 +1,132 @@ +_base_ = '../_base_/default_runtime.py' +# model settings +model = dict( + type='YOLOV3', + backbone=dict( + type='Darknet', + depth=53, + out_indices=(3, 4, 5), + init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')), + neck=dict( + type='YOLOV3Neck', + num_scales=3, + in_channels=[1024, 512, 256], + out_channels=[512, 256, 128]), + bbox_head=dict( + type='YOLOV3Head', + num_classes=80, + in_channels=[512, 256, 128], + out_channels=[1024, 512, 256], + anchor_generator=dict( + type='YOLOAnchorGenerator', + base_sizes=[[(116, 90), (156, 198), (373, 326)], + [(30, 61), (62, 45), (59, 119)], + [(10, 13), (16, 30), (33, 23)]], + strides=[32, 16, 8]), + bbox_coder=dict(type='YOLOBBoxCoder'), + featmap_strides=[32, 16, 8], + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_conf=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_xy=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=2.0, + reduction='sum'), + loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='GridAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0)), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + conf_thr=0.005, + nms=dict(type='nms', iou_threshold=0.45), + max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 2)), + dict( + type='MinIoURandomCrop', + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(608, 608), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=2000, # same as burn-in in darknet + warmup_ratio=0.1, + step=[218, 246]) +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=273) +evaluation = dict(interval=1, metric=['bbox']) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py b/downstream/mmdetection/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py new file mode 100644 index 0000000..477d253 --- /dev/null +++ b/downstream/mmdetection/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py @@ -0,0 +1,53 @@ +_base_ = ['./yolov3_mobilenetv2_mstrain-416_300e_coco.py'] + +# yapf:disable +model = dict( + bbox_head=dict( + anchor_generator=dict( + base_sizes=[[(220, 125), (128, 222), (264, 266)], + [(35, 87), (102, 96), (60, 170)], + [(10, 15), (24, 36), (72, 42)]]))) +# yapf:enable + +# dataset settings +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 2)), + dict( + type='MinIoURandomCrop', + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(320, 320), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(320, 320), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + train=dict(dataset=dict(pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmdetection/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py b/downstream/mmdetection/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py new file mode 100644 index 0000000..18e0622 --- /dev/null +++ b/downstream/mmdetection/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py @@ -0,0 +1,142 @@ +_base_ = '../_base_/default_runtime.py' +# model settings +model = dict( + type='YOLOV3', + backbone=dict( + type='MobileNetV2', + out_indices=(2, 4, 6), + act_cfg=dict(type='LeakyReLU', negative_slope=0.1), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')), + neck=dict( + type='YOLOV3Neck', + num_scales=3, + in_channels=[320, 96, 32], + out_channels=[96, 96, 96]), + bbox_head=dict( + type='YOLOV3Head', + num_classes=80, + in_channels=[96, 96, 96], + out_channels=[96, 96, 96], + anchor_generator=dict( + type='YOLOAnchorGenerator', + base_sizes=[[(116, 90), (156, 198), (373, 326)], + [(30, 61), (62, 45), (59, 119)], + [(10, 13), (16, 30), (33, 23)]], + strides=[32, 16, 8]), + bbox_coder=dict(type='YOLOBBoxCoder'), + featmap_strides=[32, 16, 8], + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_conf=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_xy=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=2.0, + reduction='sum'), + loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='GridAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0)), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + conf_thr=0.005, + nms=dict(type='nms', iou_threshold=0.45), + max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 2)), + dict( + type='MinIoURandomCrop', + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3), + dict( + type='Resize', + img_scale=[(320, 320), (416, 416)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(416, 416), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=24, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', # use RepeatDataset to speed up training + times=10, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=4000, + warmup_ratio=0.0001, + step=[24, 28]) +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=30) +evaluation = dict(interval=1, metric=['bbox']) +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (24 samples per GPU) +auto_scale_lr = dict(base_batch_size=192) diff --git a/downstream/mmdetection/configs/yolof/README.md b/downstream/mmdetection/configs/yolof/README.md new file mode 100644 index 0000000..e88da02 --- /dev/null +++ b/downstream/mmdetection/configs/yolof/README.md @@ -0,0 +1,35 @@ +# YOLOF + +> [You Only Look One-level Feature](https://arxiv.org/abs/2103.09460) + + + +## Abstract + +This paper revisits feature pyramids networks (FPN) for one-stage detectors and points out that the success of FPN is due to its divide-and-conquer solution to the optimization problem in object detection rather than multi-scale feature fusion. From the perspective of optimization, we introduce an alternative way to address the problem instead of adopting the complex feature pyramids - {\\em utilizing only one-level feature for detection}. Based on the simple and efficient solution, we present You Only Look One-level Feature (YOLOF). In our method, two key components, Dilated Encoder and Uniform Matching, are proposed and bring considerable improvements. Extensive experiments on the COCO benchmark prove the effectiveness of the proposed model. Our YOLOF achieves comparable results with its feature pyramids counterpart RetinaNet while being 2.5× faster. Without transformer layers, YOLOF can match the performance of DETR in a single-level feature manner with 7× less training epochs. With an image size of 608×608, YOLOF achieves 44.3 mAP running at 60 fps on 2080Ti, which is 13% faster than YOLOv4. + +
    + +
    + +## Results and Models + +| Backbone | Style | Epoch | Lr schd | Mem (GB) | box AP | Config | Download | +| :------: | :---: | :---: | :-----: | :------: | :----: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| R-50-C5 | caffe | Y | 1x | 8.3 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolof/yolof_r50_c5_8x8_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427.log.json) | + +**Note**: + +1. We find that the performance is unstable and may fluctuate by about 0.3 mAP. mAP 37.4 ~ 37.7 is acceptable in YOLOF_R_50_C5_1x. Such fluctuation can also be found in the [original implementation](https://github.com/chensnathan/YOLOF). +2. In addition to instability issues, sometimes there are large loss fluctuations and NAN, so there may still be problems with this project, which will be improved subsequently. + +## Citation + +```latex +@inproceedings{chen2021you, + title={You Only Look One-level Feature}, + author={Chen, Qiang and Wang, Yingming and Yang, Tong and Zhang, Xiangyu and Cheng, Jian and Sun, Jian}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + year={2021} +} +``` diff --git a/downstream/mmdetection/configs/yolof/metafile.yml b/downstream/mmdetection/configs/yolof/metafile.yml new file mode 100644 index 0000000..9436fee --- /dev/null +++ b/downstream/mmdetection/configs/yolof/metafile.yml @@ -0,0 +1,32 @@ +Collections: + - Name: YOLOF + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Dilated Encoder + - ResNet + Paper: + URL: https://arxiv.org/abs/2103.09460 + Title: 'You Only Look One-level Feature' + README: configs/yolof/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/yolof.py#L6 + Version: v2.12.0 + +Models: + - Name: yolof_r50_c5_8x8_1x_coco + In Collection: YOLOF + Config: configs/yolof/yolof_r50_c5_8x8_1x_coco.py + Metadata: + Training Memory (GB): 8.3 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth diff --git a/downstream/mmdetection/configs/yolof/yolof_r50_c5_8x8_1x_coco.py b/downstream/mmdetection/configs/yolof/yolof_r50_c5_8x8_1x_coco.py new file mode 100644 index 0000000..d0b9649 --- /dev/null +++ b/downstream/mmdetection/configs/yolof/yolof_r50_c5_8x8_1x_coco.py @@ -0,0 +1,111 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='YOLOF', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron/resnet50_caffe')), + neck=dict( + type='DilatedEncoder', + in_channels=2048, + out_channels=512, + block_mid_channels=128, + num_residual_blocks=4, + block_dilations=[2, 4, 6, 8]), + bbox_head=dict( + type='YOLOFHead', + num_classes=80, + in_channels=512, + reg_decoded_bbox=True, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[1, 2, 4, 8, 16], + strides=[32]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1., 1., 1., 1.], + add_ctr_clamp=True, + ctr_clamp=32), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +# optimizer +optimizer = dict( + type='SGD', + lr=0.12, + momentum=0.9, + weight_decay=0.0001, + paramwise_cfg=dict( + norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)})) +lr_config = dict(warmup_iters=1500, warmup_ratio=0.00066667) + +# use caffe img_norm +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=8, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/yolof/yolof_r50_c5_8x8_iter-1x_coco.py b/downstream/mmdetection/configs/yolof/yolof_r50_c5_8x8_iter-1x_coco.py new file mode 100644 index 0000000..c95c02d --- /dev/null +++ b/downstream/mmdetection/configs/yolof/yolof_r50_c5_8x8_iter-1x_coco.py @@ -0,0 +1,14 @@ +_base_ = './yolof_r50_c5_8x8_1x_coco.py' + +# We implemented the iter-based config according to the source code. +# COCO dataset has 117266 images after filtering. We use 8 gpu and +# 8 batch size training, so 22500 is equivalent to +# 22500/(117266/(8x8))=12.3 epoch, 15000 is equivalent to 8.2 epoch, +# 20000 is equivalent to 10.9 epoch. Due to lr(0.12) is large, +# the iter-based and epoch-based setting have about 0.2 difference on +# the mAP evaluation value. +lr_config = dict(step=[15000, 20000]) +runner = dict(_delete_=True, type='IterBasedRunner', max_iters=22500) +checkpoint_config = dict(interval=2500) +evaluation = dict(interval=4500) +log_config = dict(interval=20) diff --git a/downstream/mmdetection/configs/yolox/README.md b/downstream/mmdetection/configs/yolox/README.md new file mode 100644 index 0000000..4890fbd --- /dev/null +++ b/downstream/mmdetection/configs/yolox/README.md @@ -0,0 +1,39 @@ +# YOLOX + +> [YOLOX: Exceeding YOLO Series in 2021](https://arxiv.org/abs/2107.08430) + + + +## Abstract + +In this report, we present some experienced improvements to YOLO series, forming a new high-performance detector -- YOLOX. We switch the YOLO detector to an anchor-free manner and conduct other advanced detection techniques, i.e., a decoupled head and the leading label assignment strategy SimOTA to achieve state-of-the-art results across a large scale range of models: For YOLO-Nano with only 0.91M parameters and 1.08G FLOPs, we get 25.3% AP on COCO, surpassing NanoDet by 1.8% AP; for YOLOv3, one of the most widely used detectors in industry, we boost it to 47.3% AP on COCO, outperforming the current best practice by 3.0% AP; for YOLOX-L with roughly the same amount of parameters as YOLOv4-CSP, YOLOv5-L, we achieve 50.0% AP on COCO at a speed of 68.9 FPS on Tesla V100, exceeding YOLOv5-L by 1.8% AP. Further, we won the 1st Place on Streaming Perception Challenge (Workshop on Autonomous Driving at CVPR 2021) using a single YOLOX-L model. We hope this report can provide useful experience for developers and researchers in practical scenes, and we also provide deploy versions with ONNX, TensorRT, NCNN, and Openvino supported. + +
    + +
    + +## Results and Models + +| Backbone | size | Mem (GB) | box AP | Config | Download | +| :--------: | :--: | :------: | :----: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| YOLOX-tiny | 416 | 3.5 | 32.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_tiny_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234.log.json) | +| YOLOX-s | 640 | 7.6 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_s_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711.log.json) | +| YOLOX-l | 640 | 19.9 | 49.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_l_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236.log.json) | +| YOLOX-x | 640 | 28.1 | 50.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_x_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254.log.json) | + +**Note**: + +1. The test score threshold is 0.001, and the box AP indicates the best AP. +2. Due to the need for pre-training weights, we cannot reproduce the performance of the `yolox-nano` model. Please refer to https://github.com/Megvii-BaseDetection/YOLOX/issues/674 for more information. +3. We also trained the model by the official release of YOLOX based on [Megvii-BaseDetection/YOLOX#735](https://github.com/Megvii-BaseDetection/YOLOX/issues/735) with commit ID [38c633](https://github.com/Megvii-BaseDetection/YOLOX/tree/38c633bf176462ee42b110c70e4ffe17b5753208). We found that the best AP of `YOLOX-tiny`, `YOLOX-s`, `YOLOX-l`, and `YOLOX-x` is 31.8, 40.3, 49.2, and 50.9, respectively. The performance is consistent with that of our re-implementation (see Table above) but still has a gap (0.3~0.8 AP) in comparison with the reported performance in their [README](https://github.com/Megvii-BaseDetection/YOLOX/blob/38c633bf176462ee42b110c70e4ffe17b5753208/README.md#benchmark). + +## Citation + +```latex +@article{yolox2021, + title={{YOLOX}: Exceeding YOLO Series in 2021}, + author={Ge, Zheng and Liu, Songtao and Wang, Feng and Li, Zeming and Sun, Jian}, + journal={arXiv preprint arXiv:2107.08430}, + year={2021} +} +``` diff --git a/downstream/mmdetection/configs/yolox/metafile.yml b/downstream/mmdetection/configs/yolox/metafile.yml new file mode 100644 index 0000000..845cb0a --- /dev/null +++ b/downstream/mmdetection/configs/yolox/metafile.yml @@ -0,0 +1,70 @@ +Collections: + - Name: YOLOX + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Nesterov + - Weight Decay + - Cosine Annealing Lr Updater + Training Resources: 8x TITANXp GPUs + Architecture: + - CSPDarkNet + - PAFPN + Paper: + URL: https://arxiv.org/abs/2107.08430 + Title: 'YOLOX: Exceeding YOLO Series in 2021' + README: configs/yolox/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.15.1/mmdet/models/detectors/yolox.py#L6 + Version: v2.15.1 + + +Models: + - Name: yolox_s_8x8_300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_s_8x8_300e_coco.py + Metadata: + Training Memory (GB): 7.6 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth + - Name: yolox_l_8x8_300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_l_8x8_300e_coco.py + Metadata: + Training Memory (GB): 19.9 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth + - Name: yolox_x_8x8_300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_x_8x8_300e_coco.py + Metadata: + Training Memory (GB): 28.1 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth + - Name: yolox_tiny_8x8_300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_tiny_8x8_300e_coco.py + Metadata: + Training Memory (GB): 3.5 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 32.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth diff --git a/downstream/mmdetection/configs/yolox/yolox_l_8x8_300e_coco.py b/downstream/mmdetection/configs/yolox/yolox_l_8x8_300e_coco.py new file mode 100644 index 0000000..dcbfa18 --- /dev/null +++ b/downstream/mmdetection/configs/yolox/yolox_l_8x8_300e_coco.py @@ -0,0 +1,8 @@ +_base_ = './yolox_s_8x8_300e_coco.py' + +# model settings +model = dict( + backbone=dict(deepen_factor=1.0, widen_factor=1.0), + neck=dict( + in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3), + bbox_head=dict(in_channels=256, feat_channels=256)) diff --git a/downstream/mmdetection/configs/yolox/yolox_m_8x8_300e_coco.py b/downstream/mmdetection/configs/yolox/yolox_m_8x8_300e_coco.py new file mode 100644 index 0000000..3048c95 --- /dev/null +++ b/downstream/mmdetection/configs/yolox/yolox_m_8x8_300e_coco.py @@ -0,0 +1,8 @@ +_base_ = './yolox_s_8x8_300e_coco.py' + +# model settings +model = dict( + backbone=dict(deepen_factor=0.67, widen_factor=0.75), + neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2), + bbox_head=dict(in_channels=192, feat_channels=192), +) diff --git a/downstream/mmdetection/configs/yolox/yolox_nano_8x8_300e_coco.py b/downstream/mmdetection/configs/yolox/yolox_nano_8x8_300e_coco.py new file mode 100644 index 0000000..d33ed04 --- /dev/null +++ b/downstream/mmdetection/configs/yolox/yolox_nano_8x8_300e_coco.py @@ -0,0 +1,11 @@ +_base_ = './yolox_tiny_8x8_300e_coco.py' + +# model settings +model = dict( + backbone=dict(deepen_factor=0.33, widen_factor=0.25, use_depthwise=True), + neck=dict( + in_channels=[64, 128, 256], + out_channels=64, + num_csp_blocks=1, + use_depthwise=True), + bbox_head=dict(in_channels=64, feat_channels=64, use_depthwise=True)) diff --git a/downstream/mmdetection/configs/yolox/yolox_s_8x8_300e_coco.py b/downstream/mmdetection/configs/yolox/yolox_s_8x8_300e_coco.py new file mode 100644 index 0000000..97ff23e --- /dev/null +++ b/downstream/mmdetection/configs/yolox/yolox_s_8x8_300e_coco.py @@ -0,0 +1,165 @@ +_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'] + +img_scale = (640, 640) # height, width + +# model settings +model = dict( + type='YOLOX', + input_size=img_scale, + random_size_range=(15, 25), + random_size_interval=10, + backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5), + neck=dict( + type='YOLOXPAFPN', + in_channels=[128, 256, 512], + out_channels=128, + num_csp_blocks=1), + bbox_head=dict( + type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128), + train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)), + # In order to align the source code, the threshold of the val phase is + # 0.01, and the threshold of the test phase is 0.001. + test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65))) + +# dataset settings +data_root = 'data/coco/' +dataset_type = 'CocoDataset' + +train_pipeline = [ + dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), + dict( + type='RandomAffine', + scaling_ratio_range=(0.1, 2), + border=(-img_scale[0] // 2, -img_scale[1] // 2)), + dict( + type='MixUp', + img_scale=img_scale, + ratio_range=(0.8, 1.6), + pad_val=114.0), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', flip_ratio=0.5), + # According to the official implementation, multi-scale + # training is not considered here but in the + # 'mmdet/models/detectors/yolox.py'. + dict(type='Resize', img_scale=img_scale, keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + # If the image is three-channel, the pad value needs + # to be set separately for each channel. + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] + +train_dataset = dict( + type='MultiImageMixDataset', + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True) + ], + filter_empty_gt=False, + ), + pipeline=train_pipeline) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + persistent_workers=True, + train=train_dataset, + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) + +# optimizer +# default 8 gpu +optimizer = dict( + type='SGD', + lr=0.01, + momentum=0.9, + weight_decay=5e-4, + nesterov=True, + paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) + +max_epochs = 300 +num_last_epochs = 15 +resume_from = None +interval = 10 + +# learning policy +lr_config = dict( + _delete_=True, + policy='YOLOX', + warmup='exp', + by_epoch=False, + warmup_by_epoch=True, + warmup_ratio=1, + warmup_iters=5, # 5 epoch + num_last_epochs=num_last_epochs, + min_lr_ratio=0.05) + +runner = dict(type='EpochBasedRunner', max_epochs=max_epochs) + +custom_hooks = [ + dict( + type='YOLOXModeSwitchHook', + num_last_epochs=num_last_epochs, + priority=48), + dict( + type='SyncNormHook', + num_last_epochs=num_last_epochs, + interval=interval, + priority=48), + dict( + type='ExpMomentumEMAHook', + resume_from=resume_from, + momentum=0.0001, + priority=49) +] +checkpoint_config = dict(interval=interval) +evaluation = dict( + save_best='auto', + # The evaluation interval is 'interval' when running epoch is + # less than ‘max_epochs - num_last_epochs’. + # The evaluation interval is 1 when running epoch is greater than + # or equal to ‘max_epochs - num_last_epochs’. + interval=interval, + dynamic_intervals=[(max_epochs - num_last_epochs, 1)], + metric='bbox') +log_config = dict(interval=50) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/yolox/yolox_tiny_8x8_300e_coco.py b/downstream/mmdetection/configs/yolox/yolox_tiny_8x8_300e_coco.py new file mode 100644 index 0000000..75931ba --- /dev/null +++ b/downstream/mmdetection/configs/yolox/yolox_tiny_8x8_300e_coco.py @@ -0,0 +1,58 @@ +_base_ = './yolox_s_8x8_300e_coco.py' + +# model settings +model = dict( + random_size_range=(10, 20), + backbone=dict(deepen_factor=0.33, widen_factor=0.375), + neck=dict(in_channels=[96, 192, 384], out_channels=96), + bbox_head=dict(in_channels=96, feat_channels=96)) + +img_scale = (640, 640) # height, width + +train_pipeline = [ + dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), + dict( + type='RandomAffine', + scaling_ratio_range=(0.5, 1.5), + border=(-img_scale[0] // 2, -img_scale[1] // 2)), + dict(type='YOLOXHSVRandomAug'), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Resize', img_scale=img_scale, keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(416, 416), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']) + ]) +] + +train_dataset = dict(pipeline=train_pipeline) + +data = dict( + train=train_dataset, + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/downstream/mmdetection/configs/yolox/yolox_x_8x8_300e_coco.py b/downstream/mmdetection/configs/yolox/yolox_x_8x8_300e_coco.py new file mode 100644 index 0000000..65c0b75 --- /dev/null +++ b/downstream/mmdetection/configs/yolox/yolox_x_8x8_300e_coco.py @@ -0,0 +1,8 @@ +_base_ = './yolox_s_8x8_300e_coco.py' + +# model settings +model = dict( + backbone=dict(deepen_factor=1.33, widen_factor=1.25), + neck=dict( + in_channels=[320, 640, 1280], out_channels=320, num_csp_blocks=4), + bbox_head=dict(in_channels=320, feat_channels=320)) diff --git a/downstream/mmdetection/docs/en/1_exist_data_model.md b/downstream/mmdetection/docs/en/1_exist_data_model.md new file mode 100644 index 0000000..297f1ee --- /dev/null +++ b/downstream/mmdetection/docs/en/1_exist_data_model.md @@ -0,0 +1,697 @@ +# 1: Inference and train with existing models and standard datasets + +MMDetection provides hundreds of existing and existing detection models in [Model Zoo](https://mmdetection.readthedocs.io/en/latest/model_zoo.html)), and supports multiple standard datasets, including Pascal VOC, COCO, CityScapes, LVIS, etc. This note will show how to perform common tasks on these existing models and standard datasets, including: + +- Use existing models to inference on given images. +- Test existing models on standard datasets. +- Train predefined models on standard datasets. + +## Inference with existing models + +By inference, we mean using trained models to detect objects on images. In MMDetection, a model is defined by a configuration file and existing model parameters are save in a checkpoint file. + +To start with, we recommend [Faster RCNN](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn) with this [configuration file](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) and this [checkpoint file](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth). It is recommended to download the checkpoint file to `checkpoints` directory. + +### High-level APIs for inference + +MMDetection provide high-level Python APIs for inference on images. Here is an example of building the model and inference on given images or videos. + +```python +from mmdet.apis import init_detector, inference_detector +import mmcv + +# Specify the path to model config and checkpoint file +config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' + +# build the model from a config file and a checkpoint file +model = init_detector(config_file, checkpoint_file, device='cuda:0') + +# test a single image and show the results +img = 'test.jpg' # or img = mmcv.imread(img), which will only load it once +result = inference_detector(model, img) +# visualize the results in a new window +model.show_result(img, result) +# or save the visualization results to image files +model.show_result(img, result, out_file='result.jpg') + +# test a video and show the results +video = mmcv.VideoReader('video.mp4') +for frame in video: + result = inference_detector(model, frame) + model.show_result(frame, result, wait_time=1) +``` + +A notebook demo can be found in [demo/inference_demo.ipynb](https://github.com/open-mmlab/mmdetection/blob/master/demo/inference_demo.ipynb). + +Note: `inference_detector` only supports single-image inference for now. + +### Asynchronous interface - supported for Python 3.7+ + +For Python 3.7+, MMDetection also supports async interfaces. +By utilizing CUDA streams, it allows not to block CPU on GPU bound inference code and enables better CPU/GPU utilization for single-threaded application. Inference can be done concurrently either between different input data samples or between different models of some inference pipeline. + +See `tests/async_benchmark.py` to compare the speed of synchronous and asynchronous interfaces. + +```python +import asyncio +import torch +from mmdet.apis import init_detector, async_inference_detector +from mmdet.utils.contextmanagers import concurrent + +async def main(): + config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' + checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' + device = 'cuda:0' + model = init_detector(config_file, checkpoint=checkpoint_file, device=device) + + # queue is used for concurrent inference of multiple images + streamqueue = asyncio.Queue() + # queue size defines concurrency level + streamqueue_size = 3 + + for _ in range(streamqueue_size): + streamqueue.put_nowait(torch.cuda.Stream(device=device)) + + # test a single image and show the results + img = 'test.jpg' # or img = mmcv.imread(img), which will only load it once + + async with concurrent(streamqueue): + result = await async_inference_detector(model, img) + + # visualize the results in a new window + model.show_result(img, result) + # or save the visualization results to image files + model.show_result(img, result, out_file='result.jpg') + + +asyncio.run(main()) + +``` + +### Demos + +We also provide three demo scripts, implemented with high-level APIs and supporting functionality codes. +Source codes are available [here](https://github.com/open-mmlab/mmdetection/tree/master/demo). + +#### Image demo + +This script performs inference on a single image. + +```shell +python demo/image_demo.py \ + ${IMAGE_FILE} \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--device ${GPU_ID}] \ + [--score-thr ${SCORE_THR}] +``` + +Examples: + +```shell +python demo/image_demo.py demo/demo.jpg \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + --device cpu +``` + +#### Webcam demo + +This is a live demo from a webcam. + +```shell +python demo/webcam_demo.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--device ${GPU_ID}] \ + [--camera-id ${CAMERA-ID}] \ + [--score-thr ${SCORE_THR}] +``` + +Examples: + +```shell +python demo/webcam_demo.py \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth +``` + +#### Video demo + +This script performs inference on a video. + +```shell +python demo/video_demo.py \ + ${VIDEO_FILE} \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--device ${GPU_ID}] \ + [--score-thr ${SCORE_THR}] \ + [--out ${OUT_FILE}] \ + [--show] \ + [--wait-time ${WAIT_TIME}] +``` + +Examples: + +```shell +python demo/video_demo.py demo/demo.mp4 \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + --out result.mp4 +``` + +#### Video demo with GPU acceleration + +This script performs inference on a video with GPU acceleration. + +```shell +python demo/video_gpuaccel_demo.py \ + ${VIDEO_FILE} \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--device ${GPU_ID}] \ + [--score-thr ${SCORE_THR}] \ + [--nvdecode] \ + [--out ${OUT_FILE}] \ + [--show] \ + [--wait-time ${WAIT_TIME}] +``` + +Examples: + +```shell +python demo/video_gpuaccel_demo.py demo/demo.mp4 \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + --nvdecode --out result.mp4 +``` + +## Test existing models on standard datasets + +To evaluate a model's accuracy, one usually tests the model on some standard datasets. +MMDetection supports multiple public datasets including COCO, Pascal VOC, CityScapes, and [more](https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets). +This section will show how to test existing models on supported datasets. + +### Prepare datasets + +Public datasets like [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/index.html) or mirror and [COCO](https://cocodataset.org/#download) are available from official websites or mirrors. Note: In the detection task, Pascal VOC 2012 is an extension of Pascal VOC 2007 without overlap, and we usually use them together. +It is recommended to download and extract the dataset somewhere outside the project directory and symlink the dataset root to `$MMDETECTION/data` as below. +If your folder structure is different, you may need to change the corresponding paths in config files. + +We provide a script to download datasets such as COCO , you can run `python tools/misc/download_dataset.py --dataset-name coco2017` to download COCO dataset. + +For more usage please refer to [dataset-download](https://github.com/open-mmlab/mmdetection/tree/master/docs/en/useful_tools.md#dataset-download) + +```text +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +│ ├── cityscapes +│ │ ├── annotations +│ │ ├── leftImg8bit +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── gtFine +│ │ │ ├── train +│ │ │ ├── val +│ ├── VOCdevkit +│ │ ├── VOC2007 +│ │ ├── VOC2012 +``` + +Some models require additional [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) datasets, such as HTC, DetectoRS and SCNet, you can download and unzip then move to the coco folder. The directory should be like this. + +```text +mmdetection +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +│ │ ├── stuffthingmaps +``` + +Panoptic segmentation models like PanopticFPN require additional [COCO Panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) datasets, you can download and unzip then move to the coco annotation folder. The directory should be like this. + +```text +mmdetection +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── panoptic_train2017.json +│ │ │ ├── panoptic_train2017 +│ │ │ ├── panoptic_val2017.json +│ │ │ ├── panoptic_val2017 +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +``` + +The [cityscapes](https://www.cityscapes-dataset.com/) annotations need to be converted into the coco format using `tools/dataset_converters/cityscapes.py`: + +```shell +pip install cityscapesscripts + +python tools/dataset_converters/cityscapes.py \ + ./data/cityscapes \ + --nproc 8 \ + --out-dir ./data/cityscapes/annotations +``` + +TODO: CHANGE TO THE NEW PATH + +### Test existing models + +We provide testing scripts for evaluating an existing model on the whole dataset (COCO, PASCAL VOC, Cityscapes, etc.). +The following testing environments are supported: + +- single GPU +- CPU +- single node multiple GPUs +- multiple nodes + +Choose the proper script to perform testing depending on the testing environment. + +```shell +# single-gpu testing +python tools/test.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--out ${RESULT_FILE}] \ + [--eval ${EVAL_METRICS}] \ + [--show] + +# CPU: disable GPUs and run single-gpu testing script +export CUDA_VISIBLE_DEVICES=-1 +python tools/test.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--out ${RESULT_FILE}] \ + [--eval ${EVAL_METRICS}] \ + [--show] + +# multi-gpu testing +bash tools/dist_test.sh \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + ${GPU_NUM} \ + [--out ${RESULT_FILE}] \ + [--eval ${EVAL_METRICS}] +``` + +`tools/dist_test.sh` also supports multi-node testing, but relies on PyTorch's [launch utility](https://pytorch.org/docs/stable/distributed.html#launch-utility). + +Optional arguments: + +- `RESULT_FILE`: Filename of the output results in pickle format. If not specified, the results will not be saved to a file. +- `EVAL_METRICS`: Items to be evaluated on the results. Allowed values depend on the dataset, e.g., `proposal_fast`, `proposal`, `bbox`, `segm` are available for COCO, `mAP`, `recall` for PASCAL VOC. Cityscapes could be evaluated by `cityscapes` as well as all COCO metrics. +- `--show`: If specified, detection results will be plotted on the images and shown in a new window. It is only applicable to single GPU testing and used for debugging and visualization. Please make sure that GUI is available in your environment. Otherwise, you may encounter an error like `cannot connect to X server`. +- `--show-dir`: If specified, detection results will be plotted on the images and saved to the specified directory. It is only applicable to single GPU testing and used for debugging and visualization. You do NOT need a GUI available in your environment for using this option. +- `--show-score-thr`: If specified, detections with scores below this threshold will be removed. +- `--cfg-options`: if specified, the key-value pair optional cfg will be merged into config file +- `--eval-options`: if specified, the key-value pair optional eval cfg will be kwargs for dataset.evaluate() function, it's only for evaluation + +### Examples + +Assuming that you have already downloaded the checkpoints to the directory `checkpoints/`. + +1. Test Faster R-CNN and visualize the results. Press any key for the next image. + Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn). + + ```shell + python tools/test.py \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + --show + ``` + +2. Test Faster R-CNN and save the painted images for future visualization. + Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn). + + ```shell + python tools/test.py \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + --show-dir faster_rcnn_r50_fpn_1x_results + ``` + +3. Test Faster R-CNN on PASCAL VOC (without saving the test results) and evaluate the mAP. + Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc). + + ```shell + python tools/test.py \ + configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc.py \ + checkpoints/faster_rcnn_r50_fpn_1x_voc0712_20200624-c9895d40.pth \ + --eval mAP + ``` + +4. Test Mask R-CNN with 8 GPUs, and evaluate the bbox and mask AP. + Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn). + + ```shell + ./tools/dist_test.sh \ + configs/mask_rcnn_r50_fpn_1x_coco.py \ + checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ + 8 \ + --out results.pkl \ + --eval bbox segm + ``` + +5. Test Mask R-CNN with 8 GPUs, and evaluate the **classwise** bbox and mask AP. + Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn). + + ```shell + ./tools/dist_test.sh \ + configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ + checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ + 8 \ + --out results.pkl \ + --eval bbox segm \ + --options "classwise=True" + ``` + +6. Test Mask R-CNN on COCO test-dev with 8 GPUs, and generate JSON files for submitting to the official evaluation server. + Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn). + + ```shell + ./tools/dist_test.sh \ + configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ + checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ + 8 \ + --format-only \ + --options "jsonfile_prefix=./mask_rcnn_test-dev_results" + ``` + + This command generates two JSON files `mask_rcnn_test-dev_results.bbox.json` and `mask_rcnn_test-dev_results.segm.json`. + +7. Test Mask R-CNN on Cityscapes test with 8 GPUs, and generate txt and png files for submitting to the official evaluation server. + Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/cityscapes). + + ```shell + ./tools/dist_test.sh \ + configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py \ + checkpoints/mask_rcnn_r50_fpn_1x_cityscapes_20200227-afe51d5a.pth \ + 8 \ + --format-only \ + --options "txtfile_prefix=./mask_rcnn_cityscapes_test_results" + ``` + + The generated png and txt would be under `./mask_rcnn_cityscapes_test_results` directory. + +### Test without Ground Truth Annotations + +MMDetection supports to test models without ground-truth annotations using `CocoDataset`. If your dataset format is not in COCO format, please convert them to COCO format. For example, if your dataset format is VOC, you can directly convert it to COCO format by the [script in tools.](https://github.com/open-mmlab/mmdetection/tree/master/tools/dataset_converters/pascal_voc.py) If your dataset format is Cityscapes, you can directly convert it to COCO format by the [script in tools.](https://github.com/open-mmlab/mmdetection/tree/master/tools/dataset_converters/cityscapes.py) The rest of the formats can be converted using [this script](https://github.com/open-mmlab/mmdetection/tree/master/tools/dataset_converters/images2coco.py). + +```shel +python tools/dataset_converters/images2coco.py \ + ${IMG_PATH} \ + ${CLASSES} \ + ${OUT} \ + [--exclude-extensions] +``` + +arguments: + +- `IMG_PATH`: The root path of images. +- `CLASSES`: The text file with a list of categories. +- `OUT`: The output annotation json file name. The save dir is in the same directory as `IMG_PATH`. +- `exclude-extensions`: The suffix of images to be excluded, such as 'png' and 'bmp'. + +After the conversion is complete, you can use the following command to test + +```shell +# single-gpu testing +python tools/test.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + --format-only \ + --options ${JSONFILE_PREFIX} \ + [--show] + +# CPU: disable GPUs and run single-gpu testing script +export CUDA_VISIBLE_DEVICES=-1 +python tools/test.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--out ${RESULT_FILE}] \ + [--eval ${EVAL_METRICS}] \ + [--show] + +# multi-gpu testing +bash tools/dist_test.sh \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + ${GPU_NUM} \ + --format-only \ + --options ${JSONFILE_PREFIX} \ + [--show] +``` + +Assuming that the checkpoints in the [model zoo](https://mmdetection.readthedocs.io/en/latest/modelzoo_statistics.html) have been downloaded to the directory `checkpoints/`, we can test Mask R-CNN on COCO test-dev with 8 GPUs, and generate JSON files using the following command. + +```sh +./tools/dist_test.sh \ + configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ + checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ + 8 \ + -format-only \ + --options "jsonfile_prefix=./mask_rcnn_test-dev_results" +``` + +This command generates two JSON files `mask_rcnn_test-dev_results.bbox.json` and `mask_rcnn_test-dev_results.segm.json`. + +### Batch Inference + +MMDetection supports inference with a single image or batched images in test mode. By default, we use single-image inference and you can use batch inference by modifying `samples_per_gpu` in the config of test data. You can do that either by modifying the config as below. + +```shell +data = dict(train=dict(...), val=dict(...), test=dict(samples_per_gpu=2, ...)) +``` + +Or you can set it through `--cfg-options` as `--cfg-options data.test.samples_per_gpu=2` + +### Deprecated ImageToTensor + +In test mode, `ImageToTensor` pipeline is deprecated, it's replaced by `DefaultFormatBundle` that recommended to manually replace it in the test data pipeline in your config file. examples: + +```python +# use ImageToTensor (deprecated) +pipelines = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) + ] + +# manually replace ImageToTensor to DefaultFormatBundle (recommended) +pipelines = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']), + ]) + ] +``` + +## Train predefined models on standard datasets + +MMDetection also provides out-of-the-box tools for training detection models. +This section will show how to train _predefined_ models (under [configs](https://github.com/open-mmlab/mmdetection/tree/master/configs)) on standard datasets i.e. COCO. + +### Prepare datasets + +Training requires preparing datasets too. See section [Prepare datasets](#prepare-datasets) above for details. + +**Note**: +Currently, the config files under `configs/cityscapes` use COCO pretrained weights to initialize. +You could download the existing models in advance if the network connection is unavailable or slow. Otherwise, it would cause errors at the beginning of training. + +### Learning rate automatically scale + +**Important**: The default learning rate in config files is for 8 GPUs and 2 sample per gpu (batch size = 8 * 2 = 16). And it had been set to `auto_scale_lr.base_batch_size` in `config/_base_/default_runtime.py`. Learning rate will be automatically scaled base on this value when the batch size is `16`. Meanwhile, in order not to affect other codebase which based on mmdet, the flag `auto_scale_lr.enable` is set to `False` by default. + +If you want to enable this feature, you need to add argument `--auto-scale-lr`. And you need to check the config name which you want to use before you process the command, because the config name indicates the default batch size. +By default, it is `8 x 2 = 16 batch size`, like `faster_rcnn_r50_caffe_fpn_90k_coco.py` or `pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py`. In other cases, you will see the config file name have `_NxM_` in dictating, like `cornernet_hourglass104_mstest_32x3_210e_coco.py` which batch size is `32 x 3 = 96`, or `scnet_x101_64x4d_fpn_8x1_20e_coco.py` which batch size is `8 x 1 = 8`. + +**Please remember to check the bottom of the specific config file you want to use, it will have `auto_scale_lr.base_batch_size` if the batch size is not `16`. If you can't find those values, check the config file which in `_base_=[xxx]` and you will find it. Please do not modify its values if you want to automatically scale the LR.** + +Learning rate automatically scale basic usage is as follows. + +```shell +python tools/train.py \ + ${CONFIG_FILE} \ + --auto-scale-lr \ + [optional arguments] +``` + +If you enabled this feature, the learning rate will be automatically scaled according to the number of GPUs of the machine and the batch size of training. See [linear scaling rule](https://arxiv.org/abs/1706.02677) for details. For example, If there are 4 GPUs and 2 pictures on each GPU, `lr = 0.01`, then if there are 16 GPUs and 4 pictures on each GPU, it will automatically scale to `lr = 0.08`. + +If you don't want to use it, you need to calculate the learning rate according to the [linear scaling rule](https://arxiv.org/abs/1706.02677) manually then change `optimizer.lr` in specific config file. + +### Training on a single GPU + +We provide `tools/train.py` to launch training jobs on a single GPU. +The basic usage is as follows. + +```shell +python tools/train.py \ + ${CONFIG_FILE} \ + [optional arguments] +``` + +During training, log files and checkpoints will be saved to the working directory, which is specified by `work_dir` in the config file or via CLI argument `--work-dir`. + +By default, the model is evaluated on the validation set every epoch, the evaluation interval can be specified in the config file as shown below. + +```python +# evaluate the model every 12 epoch. +evaluation = dict(interval=12) +``` + +This tool accepts several optional arguments, including: + +- `--no-validate` (**not suggested**): Disable evaluation during training. +- `--work-dir ${WORK_DIR}`: Override the working directory. +- `--resume-from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file. +- `--options 'Key=value'`: Overrides other settings in the used config. + +**Note**: + +Difference between `resume-from` and `load-from`: + +`resume-from` loads both the model weights and optimizer status, and the epoch is also inherited from the specified checkpoint. It is usually used for resuming the training process that is interrupted accidentally. +`load-from` only loads the model weights and the training epoch starts from 0. It is usually used for finetuning. + +### Training on CPU + +The process of training on the CPU is consistent with single GPU training. We just need to disable GPUs before the training process. + +```shell +export CUDA_VISIBLE_DEVICES=-1 +``` + +And then run the script [above](#training-on-a-single-GPU). + +**Note**: + +We do not recommend users to use CPU for training because it is too slow. We support this feature to allow users to debug on machines without GPU for convenience. + +### Training on multiple GPUs + +We provide `tools/dist_train.sh` to launch training on multiple GPUs. +The basic usage is as follows. + +```shell +bash ./tools/dist_train.sh \ + ${CONFIG_FILE} \ + ${GPU_NUM} \ + [optional arguments] +``` + +Optional arguments remain the same as stated [above](#training-on-a-single-GPU). + +#### Launch multiple jobs simultaneously + +If you would like to launch multiple jobs on a single machine, e.g., 2 jobs of 4-GPU training on a machine with 8 GPUs, +you need to specify different ports (29500 by default) for each job to avoid communication conflict. + +If you use `dist_train.sh` to launch training jobs, you can set the port in commands. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 ./tools/dist_train.sh ${CONFIG_FILE} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 ./tools/dist_train.sh ${CONFIG_FILE} 4 +``` + +### Train with multiple machines + +If you launch with multiple machines simply connected with ethernet, you can simply run following commands: + +On the first machine: + +```shell +NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR sh tools/dist_train.sh $CONFIG $GPUS +``` + +On the second machine: + +```shell +NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR sh tools/dist_train.sh $CONFIG $GPUS +``` + +Usually it is slow if you do not have high speed networking like InfiniBand. + +### Manage jobs with Slurm + +[Slurm](https://slurm.schedmd.com/) is a good job scheduling system for computing clusters. +On a cluster managed by Slurm, you can use `slurm_train.sh` to spawn training jobs. It supports both single-node and multi-node training. + +The basic usage is as follows. + +```shell +[GPUS=${GPUS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} +``` + +Below is an example of using 16 GPUs to train Mask R-CNN on a Slurm partition named _dev_, and set the work-dir to some shared file systems. + +```shell +GPUS=16 ./tools/slurm_train.sh dev mask_r50_1x configs/mask_rcnn_r50_fpn_1x_coco.py /nfs/xxxx/mask_rcnn_r50_fpn_1x +``` + +You can check [the source code](https://github.com/open-mmlab/mmdetection/blob/master/tools/slurm_train.sh) to review full arguments and environment variables. + +When using Slurm, the port option need to be set in one of the following ways: + +1. Set the port through `--options`. This is more recommended since it does not change the original configs. + + ```shell + CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} --options 'dist_params.port=29500' + CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} --options 'dist_params.port=29501' + ``` + +2. Modify the config files to set different communication ports. + + In `config1.py`, set + + ```python + dist_params = dict(backend='nccl', port=29500) + ``` + + In `config2.py`, set + + ```python + dist_params = dict(backend='nccl', port=29501) + ``` + + Then you can launch two jobs with `config1.py` and `config2.py`. + + ```shell + CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} + CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} + ``` diff --git a/downstream/mmdetection/docs/en/2_new_data_model.md b/downstream/mmdetection/docs/en/2_new_data_model.md new file mode 100644 index 0000000..27673b6 --- /dev/null +++ b/downstream/mmdetection/docs/en/2_new_data_model.md @@ -0,0 +1,264 @@ +# 2: Train with customized datasets + +In this note, you will know how to inference, test, and train predefined models with customized datasets. We use the [balloon dataset](https://github.com/matterport/Mask_RCNN/tree/master/samples/balloon) as an example to describe the whole process. + +The basic steps are as below: + +1. Prepare the customized dataset +2. Prepare a config +3. Train, test, inference models on the customized dataset. + +## Prepare the customized dataset + +There are three ways to support a new dataset in MMDetection: + +1. reorganize the dataset into COCO format. +2. reorganize the dataset into a middle format. +3. implement a new dataset. + +Usually we recommend to use the first two methods which are usually easier than the third. + +In this note, we give an example for converting the data into COCO format. + +**Note**: MMDetection only supports evaluating mask AP of dataset in COCO format for now. +So for instance segmentation task users should convert the data into coco format. + +### COCO annotation format + +The necessary keys of COCO format for instance segmentation is as below, for the complete details, please refer [here](https://cocodataset.org/#format-data). + +```json +{ + "images": [image], + "annotations": [annotation], + "categories": [category] +} + + +image = { + "id": int, + "width": int, + "height": int, + "file_name": str, +} + +annotation = { + "id": int, + "image_id": int, + "category_id": int, + "segmentation": RLE or [polygon], + "area": float, + "bbox": [x,y,width,height], + "iscrowd": 0 or 1, +} + +categories = [{ + "id": int, + "name": str, + "supercategory": str, +}] +``` + +Assume we use the balloon dataset. +After downloading the data, we need to implement a function to convert the annotation format into the COCO format. Then we can use implemented COCODataset to load the data and perform training and evaluation. + +If you take a look at the dataset, you will find the dataset format is as below: + +```json +{'base64_img_data': '', + 'file_attributes': {}, + 'filename': '34020010494_e5cb88e1c4_k.jpg', + 'fileref': '', + 'regions': {'0': {'region_attributes': {}, + 'shape_attributes': {'all_points_x': [1020, + 1000, + 994, + 1003, + 1023, + 1050, + 1089, + 1134, + 1190, + 1265, + 1321, + 1361, + 1403, + 1428, + 1442, + 1445, + 1441, + 1427, + 1400, + 1361, + 1316, + 1269, + 1228, + 1198, + 1207, + 1210, + 1190, + 1177, + 1172, + 1174, + 1170, + 1153, + 1127, + 1104, + 1061, + 1032, + 1020], + 'all_points_y': [963, + 899, + 841, + 787, + 738, + 700, + 663, + 638, + 621, + 619, + 643, + 672, + 720, + 765, + 800, + 860, + 896, + 942, + 990, + 1035, + 1079, + 1112, + 1129, + 1134, + 1144, + 1153, + 1166, + 1166, + 1150, + 1136, + 1129, + 1122, + 1112, + 1084, + 1037, + 989, + 963], + 'name': 'polygon'}}}, + 'size': 1115004} +``` + +The annotation is a JSON file where each key indicates an image's all annotations. +The code to convert the balloon dataset into coco format is as below. + +```python +import os.path as osp +import mmcv + +def convert_balloon_to_coco(ann_file, out_file, image_prefix): + data_infos = mmcv.load(ann_file) + + annotations = [] + images = [] + obj_count = 0 + for idx, v in enumerate(mmcv.track_iter_progress(data_infos.values())): + filename = v['filename'] + img_path = osp.join(image_prefix, filename) + height, width = mmcv.imread(img_path).shape[:2] + + images.append(dict( + id=idx, + file_name=filename, + height=height, + width=width)) + + bboxes = [] + labels = [] + masks = [] + for _, obj in v['regions'].items(): + assert not obj['region_attributes'] + obj = obj['shape_attributes'] + px = obj['all_points_x'] + py = obj['all_points_y'] + poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)] + poly = [p for x in poly for p in x] + + x_min, y_min, x_max, y_max = ( + min(px), min(py), max(px), max(py)) + + + data_anno = dict( + image_id=idx, + id=obj_count, + category_id=0, + bbox=[x_min, y_min, x_max - x_min, y_max - y_min], + area=(x_max - x_min) * (y_max - y_min), + segmentation=[poly], + iscrowd=0) + annotations.append(data_anno) + obj_count += 1 + + coco_format_json = dict( + images=images, + annotations=annotations, + categories=[{'id':0, 'name': 'balloon'}]) + mmcv.dump(coco_format_json, out_file) + +``` + +Using the function above, users can successfully convert the annotation file into json format, then we can use `CocoDataset` to train and evaluate the model. + +## Prepare a config + +The second step is to prepare a config thus the dataset could be successfully loaded. Assume that we want to use Mask R-CNN with FPN, the config to train the detector on balloon dataset is as below. Assume the config is under directory `configs/balloon/` and named as `mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py`, the config is as below. + +```python +# The new config inherits a base config to highlight the necessary modification +_base_ = 'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' + +# We also need to change the num_classes in head to match the dataset's annotation +model = dict( + roi_head=dict( + bbox_head=dict(num_classes=1), + mask_head=dict(num_classes=1))) + +# Modify dataset related settings +dataset_type = 'COCODataset' +classes = ('balloon',) +data = dict( + train=dict( + img_prefix='balloon/train/', + classes=classes, + ann_file='balloon/train/annotation_coco.json'), + val=dict( + img_prefix='balloon/val/', + classes=classes, + ann_file='balloon/val/annotation_coco.json'), + test=dict( + img_prefix='balloon/val/', + classes=classes, + ann_file='balloon/val/annotation_coco.json')) + +# We can use the pre-trained Mask RCNN model to obtain higher performance +load_from = 'checkpoints/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth' +``` + +## Train a new model + +To train a model with the new config, you can simply run + +```shell +python tools/train.py configs/balloon/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py +``` + +For more detailed usages, please refer to the [Case 1](1_exist_data_model.md). + +## Test and inference + +To test the trained model, you can simply run + +```shell +python tools/test.py configs/balloon/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py work_dirs/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon/latest.pth --eval bbox segm +``` + +For more detailed usages, please refer to the [Case 1](1_exist_data_model.md). diff --git a/downstream/mmdetection/docs/en/3_exist_data_new_model.md b/downstream/mmdetection/docs/en/3_exist_data_new_model.md new file mode 100644 index 0000000..b34c133 --- /dev/null +++ b/downstream/mmdetection/docs/en/3_exist_data_new_model.md @@ -0,0 +1,283 @@ +# 3: Train with customized models and standard datasets + +In this note, you will know how to train, test and inference your own customized models under standard datasets. We use the cityscapes dataset to train a customized Cascade Mask R-CNN R50 model as an example to demonstrate the whole process, which using [`AugFPN`](https://github.com/Gus-Guo/AugFPN) to replace the default `FPN` as neck, and add `Rotate` or `Translate` as training-time auto augmentation. + +The basic steps are as below: + +1. Prepare the standard dataset +2. Prepare your own customized model +3. Prepare a config +4. Train, test, and inference models on the standard dataset. + +## Prepare the standard dataset + +In this note, as we use the standard cityscapes dataset as an example. + +It is recommended to symlink the dataset root to `$MMDETECTION/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +│ ├── cityscapes +│ │ ├── annotations +│ │ ├── leftImg8bit +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── gtFine +│ │ │ ├── train +│ │ │ ├── val +│ ├── VOCdevkit +│ │ ├── VOC2007 +│ │ ├── VOC2012 + +``` + +Or you can set your dataset root through + +```bash +export MMDET_DATASETS=$data_root +``` + +We will replace dataset root with `$MMDET_DATASETS`, so you don't have to modify the corresponding path in config files. + +The cityscapes annotations have to be converted into the coco format using `tools/dataset_converters/cityscapes.py`: + +```shell +pip install cityscapesscripts +python tools/dataset_converters/cityscapes.py ./data/cityscapes --nproc 8 --out-dir ./data/cityscapes/annotations +``` + +Currently the config files in `cityscapes` use COCO pre-trained weights to initialize. +You could download the pre-trained models in advance if network is unavailable or slow, otherwise it would cause errors at the beginning of training. + +## Prepare your own customized model + +The second step is to use your own module or training setting. Assume that we want to implement a new neck called `AugFPN` to replace with the default `FPN` under the existing detector Cascade Mask R-CNN R50. The following implements`AugFPN` under MMDetection. + +### 1. Define a new neck (e.g. AugFPN) + +Firstly create a new file `mmdet/models/necks/augfpn.py`. + +```python +from ..builder import NECKS + +@NECKS.register_module() +class AugFPN(nn.Module): + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False): + pass + + def forward(self, inputs): + # implementation is ignored + pass +``` + +### 2. Import the module + +You can either add the following line to `mmdet/models/necks/__init__.py`, + +```python +from .augfpn import AugFPN +``` + +or alternatively add + +```python +custom_imports = dict( + imports=['mmdet.models.necks.augfpn.py'], + allow_failed_imports=False) +``` + +to the config file and avoid modifying the original code. + +### 3. Modify the config file + +```python +neck=dict( + type='AugFPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5) +``` + +For more detailed usages about customize your own models (e.g. implement a new backbone, head, loss, etc) and runtime training settings (e.g. define a new optimizer, use gradient clip, customize training schedules and hooks, etc), please refer to the guideline [Customize Models](tutorials/customize_models.md) and [Customize Runtime Settings](tutorials/customize_runtime.md) respectively. + +## Prepare a config + +The third step is to prepare a config for your own training setting. Assume that we want to add `AugFPN` and `Rotate` or `Translate` augmentation to existing Cascade Mask R-CNN R50 to train the cityscapes dataset, and assume the config is under directory `configs/cityscapes/` and named as `cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py`, the config is as below. + +```python +# The new config inherits the base configs to highlight the necessary modification +_base_ = [ + '../_base_/models/cascade_mask_rcnn_r50_fpn.py', + '../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py' +] + +model = dict( + # set None to avoid loading ImageNet pretrained backbone, + # instead here we set `load_from` to load from COCO pretrained detectors. + backbone=dict(init_cfg=None), + # replace neck from defaultly `FPN` to our new implemented module `AugFPN` + neck=dict( + type='AugFPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + # We also need to change the num_classes in head from 80 to 8, to match the + # cityscapes dataset's annotation. This modification involves `bbox_head` and `mask_head`. + roi_head=dict( + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + # change the number of classes from defaultly COCO to cityscapes + num_classes=8, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + # change the number of classes from defaultly COCO to cityscapes + num_classes=8, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + # change the number of classes from defaultly COCO to cityscapes + num_classes=8, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + # change the number of classes from defaultly COCO to cityscapes + num_classes=8, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) + +# over-write `train_pipeline` for new added `AutoAugment` training setting +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='AutoAugment', + policies=[ + [dict( + type='Rotate', + level=5, + img_fill_val=(124, 116, 104), + prob=0.5, + scale=1) + ], + [dict(type='Rotate', level=7, img_fill_val=(124, 116, 104)), + dict( + type='Translate', + level=5, + prob=0.5, + img_fill_val=(124, 116, 104)) + ], + ]), + dict( + type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] + +# set batch_size per gpu, and set new training pipeline +data = dict( + samples_per_gpu=1, + workers_per_gpu=3, + # over-write `pipeline` with new training pipeline setting + train=dict(dataset=dict(pipeline=train_pipeline))) + +# Set optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# Set customized learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[8]) +runner = dict(type='EpochBasedRunner', max_epochs=10) + +# We can use the COCO pretrained Cascade Mask R-CNN R50 model for more stable performance initialization +load_from = 'https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth' +``` + +## Train a new model + +To train a model with the new config, you can simply run + +```shell +python tools/train.py configs/cityscapes/cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py +``` + +For more detailed usages, please refer to the [Case 1](1_exist_data_model.md). + +## Test and inference + +To test the trained model, you can simply run + +```shell +python tools/test.py configs/cityscapes/cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py work_dirs/cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py/latest.pth --eval bbox segm +``` + +For more detailed usages, please refer to the [Case 1](1_exist_data_model.md). diff --git a/downstream/mmdetection/docs/en/Makefile b/downstream/mmdetection/docs/en/Makefile new file mode 100644 index 0000000..d4bb2cb --- /dev/null +++ b/downstream/mmdetection/docs/en/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/downstream/mmdetection/docs/en/_static/css/readthedocs.css b/downstream/mmdetection/docs/en/_static/css/readthedocs.css new file mode 100644 index 0000000..57ed0ad --- /dev/null +++ b/downstream/mmdetection/docs/en/_static/css/readthedocs.css @@ -0,0 +1,6 @@ +.header-logo { + background-image: url("../image/mmdet-logo.png"); + background-size: 156px 40px; + height: 40px; + width: 156px; +} diff --git a/downstream/mmdetection/docs/en/_static/image/mmdet-logo.png b/downstream/mmdetection/docs/en/_static/image/mmdet-logo.png new file mode 100644 index 0000000..58e2b5e Binary files /dev/null and b/downstream/mmdetection/docs/en/_static/image/mmdet-logo.png differ diff --git a/downstream/mmdetection/docs/en/api.rst b/downstream/mmdetection/docs/en/api.rst new file mode 100644 index 0000000..e61c663 --- /dev/null +++ b/downstream/mmdetection/docs/en/api.rst @@ -0,0 +1,108 @@ +mmdet.apis +-------------- +.. automodule:: mmdet.apis + :members: + +mmdet.core +-------------- + +anchor +^^^^^^^^^^ +.. automodule:: mmdet.core.anchor + :members: + +bbox +^^^^^^^^^^ +.. automodule:: mmdet.core.bbox + :members: + +export +^^^^^^^^^^ +.. automodule:: mmdet.core.export + :members: + +mask +^^^^^^^^^^ +.. automodule:: mmdet.core.mask + :members: + +evaluation +^^^^^^^^^^ +.. automodule:: mmdet.core.evaluation + :members: + +post_processing +^^^^^^^^^^^^^^^ +.. automodule:: mmdet.core.post_processing + :members: + +utils +^^^^^^^^^^ +.. automodule:: mmdet.core.utils + :members: + +mmdet.datasets +-------------- + +datasets +^^^^^^^^^^ +.. automodule:: mmdet.datasets + :members: + +pipelines +^^^^^^^^^^ +.. automodule:: mmdet.datasets.pipelines + :members: + +samplers +^^^^^^^^^^ +.. automodule:: mmdet.datasets.samplers + :members: + +api_wrappers +^^^^^^^^^^^^ +.. automodule:: mmdet.datasets.api_wrappers + :members: + +mmdet.models +-------------- + +detectors +^^^^^^^^^^ +.. automodule:: mmdet.models.detectors + :members: + +backbones +^^^^^^^^^^ +.. automodule:: mmdet.models.backbones + :members: + +necks +^^^^^^^^^^^^ +.. automodule:: mmdet.models.necks + :members: + +dense_heads +^^^^^^^^^^^^ +.. automodule:: mmdet.models.dense_heads + :members: + +roi_heads +^^^^^^^^^^ +.. automodule:: mmdet.models.roi_heads + :members: + +losses +^^^^^^^^^^ +.. automodule:: mmdet.models.losses + :members: + +utils +^^^^^^^^^^ +.. automodule:: mmdet.models.utils + :members: + +mmdet.utils +-------------- +.. automodule::mmdet.utils + :members: diff --git a/downstream/mmdetection/docs/en/changelog.md b/downstream/mmdetection/docs/en/changelog.md new file mode 100644 index 0000000..31a0ef2 --- /dev/null +++ b/downstream/mmdetection/docs/en/changelog.md @@ -0,0 +1,1681 @@ +## Changelog + +### v2.25.0 (31/5/2022) + +#### Highlights + +- Support dedicated `WandbLogger` hook +- Support [ConvNeXt](configs/convnext), [DDOD](configs/ddod), [SOLOv2](configs/solov2) +- Support [Mask2Former](configs/mask2former) for instance segmentation +- Rename [config files of Mask2Former](configs/mask2former) + +#### Backwards incompatible changes + +- Rename [config files of Mask2Former](configs/mask2former) (#7571) + + + + + + + + + + + +
    before v2.25.0after v2.25.0
    + + - `mask2former_xxx_coco.py` represents config files for **panoptic segmentation**. + + + + - `mask2former_xxx_coco.py` represents config files for **instance segmentation**. + - `mask2former_xxx_coco-panoptic.py` represents config files for **panoptic segmentation**. + +
    + +#### New Features + +- Support [ConvNeXt](https://arxiv.org/abs/2201.03545) (#7281) +- Support [DDOD](https://arxiv.org/abs/2107.02963) (#7279) +- Support [SOLOv2](https://arxiv.org/abs/2003.10152) (#7441) +- Support [Mask2Former](https://arxiv.org/abs/2112.01527) for instance segmentation (#7571, #8032) + +#### Bug Fixes + +- Enable YOLOX training on different devices (#7912) +- Fix the log plot error when evaluation with `interval != 1` (#7784) +- Fix RuntimeError of HTC (#8083) + +#### Improvements + +- Support dedicated `WandbLogger` hook (#7459) + + Users can set + + ```python + cfg.log_config.hooks = [ + dict(type='MMDetWandbHook', + init_kwargs={'project': 'MMDetection-tutorial'}, + interval=10, + log_checkpoint=True, + log_checkpoint_metadata=True, + num_eval_images=10)] + ``` + + in the config to use `MMDetWandbHook`. Example can be found in this [colab tutorial](https://colab.research.google.com/drive/1RCSXHZwDZvakFh3eo9RuNrJbCGqD0dru?usp=sharing#scrollTo=WTEdPDRaBz2C) + +- Add `AvoidOOM` to avoid OOM (#7434, #8091) + + Try to use `AvoidCUDAOOM` to avoid GPU out of memory. It will first retry after calling `torch.cuda.empty_cache()`. If it still fails, it will then retry by converting the type of inputs to FP16 format. If it still fails, it will try to copy inputs from GPUs to CPUs to continue computing. Try AvoidOOM in code to make the code continue to run when GPU memory runs out: + + ```python + from mmdet.utils import AvoidCUDAOOM + + output = AvoidCUDAOOM.retry_if_cuda_oom(some_function)(input1, input2) + ``` + + Users can also try `AvoidCUDAOOM` as a decorator to make the code continue to run when GPU memory runs out: + + ```python + from mmdet.utils import AvoidCUDAOOM + + @AvoidCUDAOOM.retry_if_cuda_oom + def function(*args, **kwargs): + ... + return xxx + ``` + +- Support reading `gpu_collect` from `cfg.evaluation.gpu_collect` (#7672) + +- Speedup the Video Inference by Accelerating data-loading Stage (#7832) + +- Support replacing the `${key}` with the value of `cfg.key` (#7492) + +- Accelerate result analysis in `analyze_result.py`. The evaluation time is speedup by 10 ~ 15 times and only tasks 10 ~ 15 minutes now. (#7891) + +- Support to set `block_dilations` in `DilatedEncoder` (#7812) + +- Support panoptic segmentation result analysis (#7922) + +- Release DyHead with Swin-Large backbone (#7733) + +- Documentations updating and adding + + - Fix wrong default type of `act_cfg` in `SwinTransformer` (#7794) + - Fix text errors in the tutorials (#7959) + - Rewrite the [installation guide](docs/en/get_started.md) (#7897) + - [Useful hooks](docs/en/tutorials/useful_hooks.md) (#7810) + - Fix heading anchor in documentation (#8006) + - Replace `markdownlint` with `mdformat` for avoiding installing ruby (#8009) + +#### Contributors + +A total of 20 developers contributed to this release. + +Thanks @ZwwWayne, @DarthThomas, @solyaH, @LutingWang, @chenxinfeng4, @Czm369, @Chenastron, @chhluo, @austinmw, @Shanyaliux @hellock, @Y-M-Y, @jbwang1997, @hhaAndroid, @Irvingao, @zhanggefan, @BIGWangYuDong, @Keiku, @PeterVennerstrom, @ayulockin + +### v2.24.0 (26/4/2022) + +#### Highlights + +- Support [Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation](https://arxiv.org/abs/2012.07177) +- Support automatically scaling LR according to GPU number and samples per GPU +- Support Class Aware Sampler that improves performance on OpenImages Dataset + +#### New Features + +- Support [Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation](https://arxiv.org/abs/2012.07177), see [example configs](configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py) (#7501) + +- Support Class Aware Sampler, users can set + + ```python + data=dict(train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1)))) + ``` + + in the config to use `ClassAwareSampler`. Examples can be found in [the configs of OpenImages Dataset](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py). (#7436) + +- Support automatically scaling LR according to GPU number and samples per GPU. (#7482) + In each config, there is a corresponding config of auto-scaling LR as below, + + ```python + auto_scale_lr = dict(enable=True, base_batch_size=N) + ``` + + where `N` is the batch size used for the current learning rate in the config (also equals to `samples_per_gpu` * gpu number to train this config). + By default, we set `enable=False` so that the original usages will not be affected. Users can set `enable=True` in each config or add `--auto-scale-lr` after the command line to enable this feature and should check the correctness of `base_batch_size` in customized configs. + +- Support setting dataloader arguments in config and add functions to handle config compatibility. (#7668) + The comparison between the old and new usages is as below. + + + + + + + + + + + +
    v2.23.0v2.24.0
    + + ```python + data = dict( + samples_per_gpu=64, workers_per_gpu=4, + train=dict(type='xxx', ...), + val=dict(type='xxx', samples_per_gpu=4, ...), + test=dict(type='xxx', ...), + ) + ``` + + + + ```python + # A recommended config that is clear + data = dict( + train=dict(type='xxx', ...), + val=dict(type='xxx', ...), + test=dict(type='xxx', ...), + # Use different batch size during inference. + train_dataloader=dict(samples_per_gpu=64, workers_per_gpu=4), + val_dataloader=dict(samples_per_gpu=8, workers_per_gpu=2), + test_dataloader=dict(samples_per_gpu=8, workers_per_gpu=2), + ) + + # Old style still works but allows to set more arguments about data loaders + data = dict( + samples_per_gpu=64, # only works for train_dataloader + workers_per_gpu=4, # only works for train_dataloader + train=dict(type='xxx', ...), + val=dict(type='xxx', ...), + test=dict(type='xxx', ...), + # Use different batch size during inference. + val_dataloader=dict(samples_per_gpu=8, workers_per_gpu=2), + test_dataloader=dict(samples_per_gpu=8, workers_per_gpu=2), + ) + ``` + +
    + +- Support memory profile hook. Users can use it to monitor the memory usages during training as below (#7560) + + ```python + custom_hooks = [ + dict(type='MemoryProfilerHook', interval=50) + ] + ``` + +- Support to run on PyTorch with MLU chip (#7578) + +- Support re-spliting data batch with tag (#7641) + +- Support the `DiceCost` used by [K-Net](https://arxiv.org/abs/2106.14855) in `MaskHungarianAssigner` (#7716) + +- Support splitting COCO data for Semi-supervised object detection (#7431) + +- Support Pathlib for Config.fromfile (#7685) + +- Support to use file client in OpenImages dataset (#7433) + +- Add a probability parameter to Mosaic transformation (#7371) + +- Support specifying interpolation mode in `Resize` pipeline (#7585) + +#### Bug Fixes + +- Avoid invalid bbox after deform_sampling (#7567) +- Fix the issue that argument color_theme does not take effect when exporting confusion matrix (#7701) +- Fix the `end_level` in Necks, which should be the index of the end input backbone level (#7502) +- Fix the bug that `mix_results` may be None in `MultiImageMixDataset` (#7530) +- Fix the bug in ResNet plugin when two plugins are used (#7797) + +#### Improvements + +- Enhance `load_json_logs` of analyze_logs.py for resumed training logs (#7732) +- Add argument `out_file` in image_demo.py (#7676) +- Allow mixed precision training with `SimOTAAssigner` (#7516) +- Updated INF to 100000.0 to be the same as that in the official YOLOX (#7778) +- Add documentations of: + - how to get channels of a new backbone (#7642) + - how to unfreeze the backbone network (#7570) + - how to train fast_rcnn model (#7549) + - proposals in Deformable DETR (#7690) + - from-scratch install script in get_started.md (#7575) +- Release pre-trained models of + - [Mask2Former](configs/mask2former) (#7595, #7709) + - RetinaNet with ResNet-18 and release models (#7387) + - RetinaNet with EfficientNet backbone (#7646) + +#### Contributors + +A total of 27 developers contributed to this release. +Thanks @jovialio, @zhangsanfeng2022, @HarryZJ, @jamiechoi1995, @nestiank, @PeterH0323, @RangeKing, @Y-M-Y, @mattcasey02, @weiji14, @Yulv-git, @xiefeifeihu, @FANG-MING, @meng976537406, @nijkah, @sudz123, @CCODING04, @SheffieldCao, @Czm369, @BIGWangYuDong, @zytx121, @jbwang1997, @chhluo, @jshilong, @RangiLyu, @hhaAndroid, @ZwwWayne + +### v2.23.0 (28/3/2022) + +#### Highlights + +- Support Mask2Former: [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) +- Support EfficientNet: [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) +- Support setting data root through environment variable `MMDET_DATASETS`, users don't have to modify the corresponding path in config files anymore. +- Find a good recipe for fine-tuning high precision ResNet backbone pre-trained by Torchvision. + +#### New Features + +- Support [Mask2Former](configs/mask2former)(#6938)(#7466)(#7471) +- Support [EfficientNet](configs/efficientnet) (#7514) +- Support setting data root through environment variable `MMDET_DATASETS`, users don't have to modify the corresponding path in config files anymore. (#7386) +- Support setting different seeds to different ranks (#7432) +- Update the `dist_train.sh` so that the script can be used to support launching multi-node training on machines without slurm (#7415) +- Find a good recipe for fine-tuning high precision ResNet backbone pre-trained by Torchvision (#7489) + +#### Bug Fixes + +- Fix bug in VOC unit test which removes the data directory (#7270) +- Adjust the order of `get_classes` and `FileClient` (#7276) +- Force the inputs of `get_bboxes` in yolox_head to float32 (#7324) +- Fix misplaced arguments in LoadPanopticAnnotations (#7388) +- Fix reduction=mean in CELoss. (#7449) +- Update unit test of CrossEntropyCost (#7537) +- Fix memory leaking in panpotic segmentation evaluation (#7538) +- Fix the bug of shape broadcast in YOLOv3 (#7551) + +#### Improvements + +- Add Chinese version of onnx2tensorrt.md (#7219) +- Update colab tutorials (#7310) +- Update information about Localization Distillation (#7350) +- Add Chinese version of `finetune.md` (#7178) +- Update YOLOX log for non square input (#7235) +- Add `nproc` in `coco_panoptic.py` for panoptic quality computing (#7315) +- Allow to set channel_order in LoadImageFromFile (#7258) +- Take point sample related functions out of mask_point_head (#7353) +- Add instance evaluation for coco_panoptic (#7313) +- Enhance the robustness of analyze_logs.py (#7407) +- Supplementary notes of sync_random_seed (#7440) +- Update docstring of cross entropy loss (#7472) +- Update pascal voc result (#7503) +- We create How-to documentation to record any questions about How to xxx. In this version, we added + - How to use Mosaic augmentation (#7507) + - How to use backbone in mmcls (#7438) + - How to produce and submit the prediction results of panoptic segmentation models on COCO test-dev set (#7430)) + +#### Contributors + +A total of 27 developers contributed to this release. +Thanks @ZwwWayne, @haofanwang, @shinya7y, @chhluo, @yangrisheng, @triple-Mu, @jbwang1997, @HikariTJU, @imflash217, @274869388, @zytx121, @matrixgame2018, @jamiechoi1995, @BIGWangYuDong, @JingweiZhang12, @Xiangxu-0103, @hhaAndroid, @jshilong, @osbm, @ceroytres, @bunge-bedstraw-herb, @Youth-Got, @daavoo, @jiangyitong, @RangiLyu, @CCODING04, @yarkable + +### v2.22.0 (24/2/2022) + +#### Highlights + +- Support MaskFormer: [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) (#7212) +- Support DyHead: [Dynamic Head: Unifying Object Detection Heads with Attentions](https://arxiv.org/abs/2106.08322) (#6823) +- Release a good recipe of using ResNet in object detectors pre-trained by [ResNet Strikes Back](https://arxiv.org/abs/2110.00476), which consistently brings about 3~4 mAP improvements over RetinaNet, Faster/Mask/Cascade Mask R-CNN (#7001) +- Support [Open Images Dataset](https://storage.googleapis.com/openimages/web/index.html) (#6331) +- Support TIMM backbone: [PyTorch Image Models](https://github.com/rwightman/pytorch-image-models) (#7020) + +#### New Features + +- Support [MaskFormer](configs/maskformer) (#7212) +- Support [DyHead](configs/dyhead) (#6823) +- Support [ResNet Strikes Back](configs/resnet_strikes_back) (#7001) +- Support [OpenImages Dataset](configs/openimages) (#6331) +- Support [TIMM backbone](configs/timm_example) (#7020) +- Support visualization for Panoptic Segmentation (#7041) + +#### Breaking Changes + +In order to support the visualization for Panoptic Segmentation, the `num_classes` can not be `None` when using the `get_palette` function to determine whether to use the panoptic palette. + +#### Bug Fixes + +- Fix bug for the best checkpoints can not be saved when the `key_score` is None (#7101) +- Fix MixUp transform filter boxes failing case (#7080) +- Add missing properties in SABLHead (#7091) +- Fix bug when NaNs exist in confusion matrix (#7147) +- Fix PALETTE AttributeError in downstream task (#7230) + +#### Improvements + +- Speed up SimOTA matching (#7098) +- Add Chinese translation of `docs_zh-CN/tutorials/init_cfg.md` (#7188) + +#### Contributors + +A total of 20 developers contributed to this release. +Thanks @ZwwWayne, @hhaAndroid, @RangiLyu, @AronLin, @BIGWangYuDong, @jbwang1997, @zytx121, @chhluo, @shinya7y, @LuooChen, @dvansa, @siatwangmin, @del-zhenwu, @vikashranjan26, @haofanwang, @jamiechoi1995, @HJoonKwon, @yarkable, @zhijian-liu, @RangeKing + +### v2.21.0 (8/2/2022) + +### Breaking Changes + +To standardize the contents in config READMEs and meta files of OpenMMLab projects, the READMEs and meta files in each config directory have been significantly changed. The template will be released in the future, for now, you can refer to the examples of README for [algorithm](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/README.md), [dataset](https://github.com/open-mmlab/mmdetection/blob/master/configs/deepfashion/README.md) and [backbone](https://github.com/open-mmlab/mmdetection/blob/master/configs/regnet/README.md). To align with the standard, the configs in dcn are put into to two directories named `dcn` and `dcnv2`. + +#### New Features + +- Allow to customize colors of different classes during visualization (#6716) +- Support CPU training (#7016) +- Add download script of COCO, LVIS, and VOC dataset (#7015) + +#### Bug Fixes + +- Fix weight conversion issue of RetinaNet with Swin-S (#6973) +- Update `__repr__` of `Compose` (#6951) +- Fix BadZipFile Error when build docker (#6966) +- Fix bug in non-distributed multi-gpu training/testing (#7019) +- Fix bbox clamp in PyTorch 1.10 (#7074) +- Relax the requirement of PALETTE in dataset wrappers (#7085) +- Keep the same weights before reassign in the PAA head (#7032) +- Update code demo in doc (#7092) + +#### Improvements + +- Speed-up training by allow to set variables of multi-processing (#6974, #7036) +- Add links of Chinese tutorials in readme (#6897) +- Disable cv2 multiprocessing by default for acceleration (#6867) +- Deprecate the support for "python setup.py test" (#6998) +- Re-organize metafiles and config readmes (#7051) +- Fix None grad problem during training TOOD by adding `SigmoidGeometricMean` (#7090) + +#### Contributors + +A total of 26 developers contributed to this release. +Thanks @del-zhenwu, @zimoqingfeng, @srishilesh, @imyhxy, @jenhaoyang, @jliu-ac, @kimnamu, @ShengliLiu, @garvan2021, @ciusji, @DIYer22, @kimnamu, @q3394101, @zhouzaida, @gaotongxiao, @topsy404, @AntoAndGar, @jbwang1997, @nijkah, @ZwwWayne, @Czm369, @jshilong, @RangiLyu, @BIGWangYuDong, @hhaAndroid, @AronLin + +### v2.20.0 (27/12/2021) + +#### New Features + +- Support [TOOD](configs/tood/README.md): Task-aligned One-stage Object Detection (ICCV 2021 Oral) (#6746) +- Support resuming from the latest checkpoint automatically (#6727) + +#### Bug Fixes + +- Fix wrong bbox `loss_weight` of the PAA head (#6744) +- Fix the padding value of `gt_semantic_seg` in batch collating (#6837) +- Fix test error of lvis when using `classwise` (#6845) +- Avoid BC-breaking of `get_local_path` (#6719) +- Fix bug in `sync_norm_hook` when the BN layer does not exist (#6852) +- Use pycocotools directly no matter what platform it is (#6838) + +#### Improvements + +- Add unit test for SimOTA with no valid bbox (#6770) +- Use precommit to check readme (#6802) +- Support selecting GPU-ids in non-distributed testing time (#6781) + +#### Contributors + +A total of 16 developers contributed to this release. +Thanks @ZwwWayne, @Czm369, @jshilong, @RangiLyu, @BIGWangYuDong, @hhaAndroid, @jamiechoi1995, @AronLin, @Keiku, @gkagkos, @fcakyon, @www516717402, @vansin, @zactodd, @kimnamu, @jenhaoyang + +### v2.19.1 (14/12/2021) + +#### New Features + +- Release [YOLOX](configs/yolox/README.md) COCO pretrained models (#6698) + +#### Bug Fixes + +- Fix DCN initialization in DenseHead (#6625) +- Fix initialization of ConvFCHead (#6624) +- Fix PseudoSampler in RCNN (#6622) +- Fix weight initialization in Swin and PVT (#6663) +- Fix dtype bug in BaseDenseHead (#6767) +- Fix SimOTA with no valid bbox (#6733) + +#### Improvements + +- Add an example of combining swin and one-stage models (#6621) +- Add `get_ann_info` to dataset_wrappers (#6526) +- Support keeping image ratio in the multi-scale training of YOLOX (#6732) +- Support `bbox_clip_border` for the augmentations of YOLOX (#6730) + +#### Documents + +- Update metafile (#6717) +- Add mmhuman3d in readme (#6699) +- Update FAQ docs (#6587) +- Add doc for `detect_anomalous_params` (#6697) + +#### Contributors + +A total of 11 developers contributed to this release. +Thanks @ZwwWayne, @LJoson, @Czm369, @jshilong, @ZCMax, @RangiLyu, @BIGWangYuDong, @hhaAndroid, @zhaoxin111, @GT9505, @shinya7y + +### v2.19.0 (29/11/2021) + +#### Highlights + +- Support [Label Assignment Distillation](https://arxiv.org/abs/2108.10520) +- Support `persistent_workers` for Pytorch >= 1.7 +- Align accuracy to the updated official YOLOX + +#### New Features + +- Support [Label Assignment Distillation](https://arxiv.org/abs/2108.10520) (#6342) +- Support `persistent_workers` for Pytorch >= 1.7 (#6435) + +#### Bug Fixes + +- Fix repeatedly output warning message (#6584) +- Avoid infinite GPU waiting in dist training (#6501) +- Fix SSD512 config error (#6574) +- Fix MMDetection model to ONNX command (#6558) + +#### Improvements + +- Refactor configs of FP16 models (#6592) +- Align accuracy to the updated official YOLOX (#6443) +- Speed up training and reduce memory cost when using PhotoMetricDistortion. (#6442) +- Make OHEM work with seesaw loss (#6514) + +#### Documents + +- Update README.md (#6567) + +#### Contributors + +A total of 11 developers contributed to this release. +Thanks @FloydHsiu, @RangiLyu, @ZwwWayne, @AndreaPi, @st9007a, @hachreak, @BIGWangYuDong, @hhaAndroid, @AronLin, @chhluo, @vealocia, @HarborYuan, @st9007a, @jshilong + +### v2.18.1 (15/11/2021) + +#### Highlights + +- Release [QueryInst](http://arxiv.org/abs/2105.01928) pre-trained weights (#6460) +- Support plot confusion matrix (#6344) + +#### New Features + +- Release [QueryInst](http://arxiv.org/abs/2105.01928) pre-trained weights (#6460) +- Support plot confusion matrix (#6344) + +#### Bug Fixes + +- Fix aug test error when the number of prediction bboxes is 0 (#6398) +- Fix SpatialReductionAttention in PVT (#6488) +- Fix wrong use of `trunc_normal_init` in PVT and Swin-Transformer (#6432) + +#### Improvements + +- Save the printed AP information of COCO API to logger (#6505) +- Always map location to cpu when load checkpoint (#6405) +- Set a random seed when the user does not set a seed (#6457) + +#### Documents + +- Chinese version of [Corruption Benchmarking](robustness_benchmarking.md) (#6375) +- Fix config path in docs (#6396) +- Update GRoIE readme (#6401) + +#### Contributors + +A total of 11 developers contributed to this release. +Thanks @st9007a, @hachreak, @HarborYuan, @vealocia, @chhluo, @AndreaPi, @AronLin, @BIGWangYuDong, @hhaAndroid, @RangiLyu, @ZwwWayne + +### v2.18.0 (27/10/2021) + +#### Highlights + +- Support [QueryInst](http://arxiv.org/abs/2105.01928) (#6050) +- Refactor dense heads to decouple onnx export logics from `get_bboxes` and speed up inference (#5317, #6003, #6369, #6268, #6315) + +#### New Features + +- Support [QueryInst](http://arxiv.org/abs/2105.01928) (#6050) +- Support infinite sampler (#5996) + +#### Bug Fixes + +- Fix init_weight in fcn_mask_head (#6378) +- Fix type error in imshow_bboxes of RPN (#6386) +- Fix broken colab link in MMDetection Tutorial (#6382) +- Make sure the device and dtype of scale_factor are the same as bboxes (#6374) +- Remove sampling hardcode (#6317) +- Fix RandomAffine bbox coordinate recorrection (#6293) +- Fix init bug of final cls/reg layer in convfc head (#6279) +- Fix img_shape broken in auto_augment (#6259) +- Fix kwargs parameter missing error in two_stage (#6256) + +#### Improvements + +- Unify the interface of stuff head and panoptic head (#6308) +- Polish readme (#6243) +- Add code-spell pre-commit hook and fix a typo (#6306) +- Fix typo (#6245, #6190) +- Fix sampler unit test (#6284) +- Fix `forward_dummy` of YOLACT to enable `get_flops` (#6079) +- Fix link error in the config documentation (#6252) +- Adjust the order to beautify the document (#6195) + +#### Refactors + +- Refactor one-stage get_bboxes logic (#5317) +- Refactor ONNX export of One-Stage models (#6003, #6369) +- Refactor dense_head and speedup (#6268) +- Migrate to use prior_generator in training of dense heads (#6315) + +#### Contributors + +A total of 18 developers contributed to this release. +Thanks @Boyden, @onnkeat, @st9007a, @vealocia, @yhcao6, @DapangpangX, @yellowdolphin, @cclauss, @kennymckormick, +@pingguokiller, @collinzrj, @AndreaPi, @AronLin, @BIGWangYuDong, @hhaAndroid, @jshilong, @RangiLyu, @ZwwWayne + +### v2.17.0 (28/9/2021) + +#### Highlights + +- Support [PVT](https://arxiv.org/abs/2102.12122) and [PVTv2](https://arxiv.org/abs/2106.13797) +- Support [SOLO](https://arxiv.org/abs/1912.04488) +- Support large scale jittering and New Mask R-CNN baselines +- Speed up `YOLOv3` inference + +#### New Features + +- Support [PVT](https://arxiv.org/abs/2102.12122) and [PVTv2](https://arxiv.org/abs/2106.13797) (#5780) +- Support [SOLO](https://arxiv.org/abs/1912.04488) (#5832) +- Support large scale jittering and New Mask R-CNN baselines (#6132) +- Add a general data structrue for the results of models (#5508) +- Added a base class for one-stage instance segmentation (#5904) +- Speed up `YOLOv3` inference (#5991) +- Release Swin Transformer pre-trained models (#6100) +- Support mixed precision training in `YOLOX` (#5983) +- Support `val` workflow in `YOLACT` (#5986) +- Add script to test `torchserve` (#5936) +- Support `onnxsim` with dynamic input shape (#6117) + +#### Bug Fixes + +- Fix the function naming errors in `model_wrappers` (#5975) +- Fix regression loss bug when the input is an empty tensor (#5976) +- Fix scores not contiguous error in `centernet_head` (#6016) +- Fix missing parameters bug in `imshow_bboxes` (#6034) +- Fix bug in `aug_test` of `HTC` when the length of `det_bboxes` is 0 (#6088) +- Fix empty proposal errors in the training of some two-stage models (#5941) +- Fix `dynamic_axes` parameter error in `ONNX` dynamic shape export (#6104) +- Fix `dynamic_shape` bug of `SyncRandomSizeHook` (#6144) +- Fix the Swin Transformer config link error in the configuration (#6172) + +#### Improvements + +- Add filter rules in `Mosaic` transform (#5897) +- Add size divisor in get flops to avoid some potential bugs (#6076) +- Add Chinese translation of `docs_zh-CN/tutorials/customize_dataset.md` (#5915) +- Add Chinese translation of `conventions.md` (#5825) +- Add description of the output of data pipeline (#5886) +- Add dataset information in the README file for `PanopticFPN` (#5996) +- Add `extra_repr` for `DropBlock` layer to get details in the model printing (#6140) +- Fix CI out of memory and add PyTorch1.9 Python3.9 unit tests (#5862) +- Fix download links error of some model (#6069) +- Improve the generalization of XML dataset (#5943) +- Polish assertion error messages (#6017) +- Remove `opencv-python-headless` dependency by `albumentations` (#5868) +- Check dtype in transform unit tests (#5969) +- Replace the default theme of documentation with PyTorch Sphinx Theme (#6146) +- Update the paper and code fields in the metafile (#6043) +- Support to customize padding value of segmentation map (#6152) +- Support to resize multiple segmentation maps (#5747) + +#### Contributors + +A total of 24 developers contributed to this release. +Thanks @morkovka1337, @HarborYuan, @guillaumefrd, @guigarfr, @www516717402, @gaotongxiao, @ypwhs, @MartaYang, @shinya7y, @justiceeem, @zhaojinjian0000, @VVsssssk, @aravind-anantha, @wangbo-zhao, @czczup, @whai362, @czczup, @marijnl, @AronLin, @BIGWangYuDong, @hhaAndroid, @jshilong, @RangiLyu, @ZwwWayne + +### v2.16.0 (30/8/2021) + +#### Highlights + +- Support [Panoptic FPN](https://arxiv.org/abs/1901.02446) and [Swin Transformer](https://arxiv.org/abs/2103.14030) + +#### New Features + +- Support [Panoptic FPN](https://arxiv.org/abs/1901.02446) and release models (#5577, #5902) +- Support Swin Transformer backbone (#5748) +- Release RetinaNet models pre-trained with multi-scale 3x schedule (#5636) +- Add script to convert unlabeled image list to coco format (#5643) +- Add hook to check whether the loss value is valid (#5674) +- Add YOLO anchor optimizing tool (#5644) +- Support export onnx models without post process. (#5851) +- Support classwise evaluation in CocoPanopticDataset (#5896) +- Adapt browse_dataset for concatenated datasets. (#5935) +- Add `PatchEmbed` and `PatchMerging` with `AdaptivePadding` (#5952) + +#### Bug Fixes + +- Fix unit tests of YOLOX (#5859) +- Fix lose randomness in `imshow_det_bboxes` (#5845) +- Make output result of `ImageToTensor` contiguous (#5756) +- Fix inference bug when calling `regress_by_class` in RoIHead in some cases (#5884) +- Fix bug in CIoU loss where alpha should not have gradient. (#5835) +- Fix the bug that `multiscale_output` is defined but not used in HRNet (#5887) +- Set the priority of EvalHook to LOW. (#5882) +- Fix a YOLOX bug when applying bbox rescaling in test mode (#5899) +- Fix mosaic coordinate error (#5947) +- Fix dtype of bbox in RandomAffine. (#5930) + +#### Improvements + +- Add Chinese version of `data_pipeline` and (#5662) +- Support to remove state dicts of EMA when publishing models. (#5858) +- Refactor the loss function in HTC and SCNet (#5881) +- Use warnings instead of logger.warning (#5540) +- Use legacy coordinate in metric of VOC (#5627) +- Add Chinese version of customize_losses (#5826) +- Add Chinese version of model_zoo (#5827) + +#### Contributors + +A total of 19 developers contributed to this release. +Thanks @ypwhs, @zywvvd, @collinzrj, @OceanPang, @ddonatien, @@haotian-liu, @viibridges, @Muyun99, @guigarfr, @zhaojinjian0000, @jbwang1997,@wangbo-zhao, @xvjiarui, @RangiLyu, @jshilong, @AronLin, @BIGWangYuDong, @hhaAndroid, @ZwwWayne + +### v2.15.1 (11/8/2021) + +#### Highlights + +- Support [YOLOX](https://arxiv.org/abs/2107.08430) + +#### New Features + +- Support [YOLOX](https://arxiv.org/abs/2107.08430)(#5756, #5758, #5760, #5767, #5770, #5774, #5777, #5808, #5828, #5848) + +#### Bug Fixes + +- Update correct SSD models. (#5789) +- Fix casting error in mask structure (#5820) +- Fix MMCV deployment documentation links. (#5790) + +#### Improvements + +- Use dynamic MMCV download link in TorchServe dockerfile (#5779) +- Rename the function `upsample_like` to `interpolate_as` for more general usage (#5788) + +#### Contributors + +A total of 14 developers contributed to this release. +Thanks @HAOCHENYE, @xiaohu2015, @HsLOL, @zhiqwang, @Adamdad, @shinya7y, @Johnson-Wang, @RangiLyu, @jshilong, @mmeendez8, @AronLin, @BIGWangYuDong, @hhaAndroid, @ZwwWayne + +### v2.15.0 (02/8/2021) + +#### Highlights + +- Support adding [MIM](https://github.com/open-mmlab/mim) dependencies during pip installation +- Support MobileNetV2 for SSD-Lite and YOLOv3 +- Support Chinese Documentation + +#### New Features + +- Add function `upsample_like` (#5732) +- Support to output pdf and epub format documentation (#5738) +- Support and release Cascade Mask R-CNN 3x pre-trained models (#5645) +- Add `ignore_index` to CrossEntropyLoss (#5646) +- Support adding [MIM](https://github.com/open-mmlab/mim) dependencies during pip installation (#5676) +- Add MobileNetV2 config and models for YOLOv3 (#5510) +- Support COCO Panoptic Dataset (#5231) +- Support ONNX export of cascade models (#5486) +- Support DropBlock with RetinaNet (#5544) +- Support MobileNetV2 SSD-Lite (#5526) + +#### Bug Fixes + +- Fix the device of label in multiclass_nms (#5673) +- Fix error of backbone initialization from pre-trained checkpoint in config file (#5603, #5550) +- Fix download links of RegNet pretrained weights (#5655) +- Fix two-stage runtime error given empty proposal (#5559) +- Fix flops count error in DETR (#5654) +- Fix unittest for `NumClassCheckHook` when it is not used. (#5626) +- Fix description bug of using custom dataset (#5546) +- Fix bug of `multiclass_nms` that returns the global indices (#5592) +- Fix `valid_mask` logic error in RPNHead (#5562) +- Fix unit test error of pretrained configs (#5561) +- Fix typo error in anchor_head.py (#5555) +- Fix bug when using dataset wrappers (#5552) +- Fix a typo error in demo/MMDet_Tutorial.ipynb (#5511) +- Fixing crash in `get_root_logger` when `cfg.log_level` is not None (#5521) +- Fix docker version (#5502) +- Fix optimizer parameter error when using `IterBasedRunner` (#5490) + +#### Improvements + +- Add unit tests for MMTracking (#5620) +- Add Chinese translation of documentation (#5718, #5618, #5558, #5423, #5593, #5421, #5408. #5369, #5419, #5530, #5531) +- Update resource limit (#5697) +- Update docstring for InstaBoost (#5640) +- Support key `reduction_override` in all loss functions (#5515) +- Use repeatdataset to accelerate CenterNet training (#5509) +- Remove unnecessary code in autoassign (#5519) +- Add documentation about `init_cfg` (#5273) + +#### Contributors + +A total of 18 developers contributed to this release. +Thanks @OceanPang, @AronLin, @hellock, @Outsider565, @RangiLyu, @ElectronicElephant, @likyoo, @BIGWangYuDong, @hhaAndroid, @noobying, @yyz561, @likyoo, +@zeakey, @ZwwWayne, @ChenyangLiu, @johnson-magic, @qingswu, @BuxianChen + +### v2.14.0 (29/6/2021) + +#### Highlights + +- Add `simple_test` to dense heads to improve the consistency of single-stage and two-stage detectors +- Revert the `test_mixins` to single image test to improve efficiency and readability +- Add Faster R-CNN and Mask R-CNN config using multi-scale training with 3x schedule + +#### New Features + +- Support pretrained models from MoCo v2 and SwAV (#5286) +- Add Faster R-CNN and Mask R-CNN config using multi-scale training with 3x schedule (#5179, #5233) +- Add `reduction_override` in MSELoss (#5437) +- Stable support of exporting DETR to ONNX with dynamic shapes and batch inference (#5168) +- Stable support of exporting PointRend to ONNX with dynamic shapes and batch inference (#5440) + +#### Bug Fixes + +- Fix size mismatch bug in `multiclass_nms` (#4980) +- Fix the import path of `MultiScaleDeformableAttention` (#5338) +- Fix errors in config of GCNet ResNext101 models (#5360) +- Fix Grid-RCNN error when there is no bbox result (#5357) +- Fix errors in `onnx_export` of bbox_head when setting reg_class_agnostic (#5468) +- Fix type error of AutoAssign in the document (#5478) +- Fix web links ending with `.md` (#5315) + +#### Improvements + +- Add `simple_test` to dense heads to improve the consistency of single-stage and two-stage detectors (#5264) +- Add support for mask diagonal flip in TTA (#5403) +- Revert the `test_mixins` to single image test to improve efficiency and readability (#5249) +- Make YOLOv3 Neck more flexible (#5218) +- Refactor SSD to make it more general (#5291) +- Refactor `anchor_generator` and `point_generator` (#5349) +- Allow to configure out the `mask_head` of the HTC algorithm (#5389) +- Delete deprecated warning in FPN (#5311) +- Move `model.pretrained` to `model.backbone.init_cfg` (#5370) +- Make deployment tools more friendly to use (#5280) +- Clarify installation documentation (#5316) +- Add ImageNet Pretrained Models docs (#5268) +- Add FAQ about training loss=nan solution and COCO AP or AR =-1 (# 5312, #5313) +- Change all weight links of http to https (#5328) + +### v2.13.0 (01/6/2021) + +#### Highlights + +- Support new methods: [CenterNet](https://arxiv.org/abs/1904.07850), [Seesaw Loss](https://arxiv.org/abs/2008.10032), [MobileNetV2](https://arxiv.org/abs/1801.04381) + +#### New Features + +- Support paper [Objects as Points](https://arxiv.org/abs/1904.07850) (#4602) +- Support paper [Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021)](https://arxiv.org/abs/2008.10032) (#5128) +- Support [MobileNetV2](https://arxiv.org/abs/1801.04381) backbone and inverted residual block (#5122) +- Support [MIM](https://github.com/open-mmlab/mim) (#5143) +- ONNX exportation with dynamic shapes of CornerNet (#5136) +- Add `mask_soft` config option to allow non-binary masks (#4615) +- Add PWC metafile (#5135) + +#### Bug Fixes + +- Fix YOLOv3 FP16 training error (#5172) +- Fix Cacscade R-CNN TTA test error when `det_bboxes` length is 0 (#5221) +- Fix `iou_thr` variable naming errors in VOC recall calculation function (#5195) +- Fix Faster R-CNN performance dropped in ONNX Runtime (#5197) +- Fix DETR dict changed error when using python 3.8 during iteration (#5226) + +#### Improvements + +- Refactor ONNX export of two stage detector (#5205) +- Replace MMDetection's EvalHook with MMCV's EvalHook for consistency (#4806) +- Update RoI extractor for ONNX (#5194) +- Use better parameter initialization in YOLOv3 head for higher performance (#5181) +- Release new DCN models of Mask R-CNN by mixed-precision training (#5201) +- Update YOLOv3 model weights (#5229) +- Add DetectoRS ResNet-101 model weights (#4960) +- Discard bboxes with sizes equals to `min_bbox_size` (#5011) +- Remove duplicated code in DETR head (#5129) +- Remove unnecessary object in class definition (#5180) +- Fix doc link (#5192) + +### v2.12.0 (01/5/2021) + +#### Highlights + +- Support new methods: [AutoAssign](https://arxiv.org/abs/2007.03496), [YOLOF](https://arxiv.org/abs/2103.09460), and [Deformable DETR](https://arxiv.org/abs/2010.04159) +- Stable support of exporting models to ONNX with batched images and dynamic shape (#5039) + +#### Backwards Incompatible Changes + +MMDetection is going through big refactoring for more general and convenient usages during the releases from v2.12.0 to v2.15.0 (maybe longer). +In v2.12.0 MMDetection inevitably brings some BC-breakings, including the MMCV dependency, model initialization, model registry, and mask AP evaluation. + +- MMCV version. MMDetection v2.12.0 relies on the newest features in MMCV 1.3.3, including `BaseModule` for unified parameter initialization, model registry, and the CUDA operator `MultiScaleDeformableAttn` for [Deformable DETR](https://arxiv.org/abs/2010.04159). Note that MMCV 1.3.2 already contains all the features used by MMDet but has known issues. Therefore, we recommend users skip MMCV v1.3.2 and use v1.3.3, though v1.3.2 might work for most cases. +- Unified model initialization (#4750). To unify the parameter initialization in OpenMMLab projects, MMCV supports `BaseModule` that accepts `init_cfg` to allow the modules' parameters initialized in a flexible and unified manner. Now the users need to explicitly call `model.init_weights()` in the training script to initialize the model (as in [here](https://github.com/open-mmlab/mmdetection/blob/master/tools/train.py#L162), previously this was handled by the detector. The models in MMDetection have been re-benchmarked to ensure accuracy based on PR #4750. __The downstream projects should update their code accordingly to use MMDetection v2.12.0__. +- Unified model registry (#5059). To easily use backbones implemented in other OpenMMLab projects, MMDetection migrates to inherit the model registry created in MMCV (#760). In this way, as long as the backbone is supported in an OpenMMLab project and that project also uses the registry in MMCV, users can use that backbone in MMDetection by simply modifying the config without copying the code of that backbone into MMDetection. +- Mask AP evaluation (#4898). Previous versions calculate the areas of masks through the bounding boxes when calculating the mask AP of small, medium, and large instances. To indeed use the areas of masks, we pop the key `bbox` during mask AP calculation. This change does not affect the overall mask AP evaluation and aligns the mask AP of similar models in other projects like Detectron2. + +#### New Features + +- Support paper [AutoAssign: Differentiable Label Assignment for Dense Object Detection](https://arxiv.org/abs/2007.03496) (#4295) +- Support paper [You Only Look One-level Feature](https://arxiv.org/abs/2103.09460) (#4295) +- Support paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) (#4778) +- Support calculating IoU with FP16 tensor in `bbox_overlaps` to save memory and keep speed (#4889) +- Add `__repr__` in custom dataset to count the number of instances (#4756) +- Add windows support by updating requirements.txt (#5052) +- Stable support of exporting models to ONNX with batched images and dynamic shape, including SSD, FSAF,FCOS, YOLOv3, RetinaNet, Faster R-CNN, and Mask R-CNN (#5039) + +#### Improvements + +- Use MMCV `MODEL_REGISTRY` (#5059) +- Unified parameter initialization for more flexible usage (#4750) +- Rename variable names and fix docstring in anchor head (#4883) +- Support training with empty GT in Cascade RPN (#4928) +- Add more details of usage of `test_robustness` in documentation (#4917) +- Changing to use `pycocotools` instead of `mmpycocotools` to fully support Detectron2 and MMDetection in one environment (#4939) +- Update torch serve dockerfile to support dockers of more versions (#4954) +- Add check for training with single class dataset (#4973) +- Refactor transformer and DETR Head (#4763) +- Update FPG model zoo (#5079) +- More accurate mask AP of small/medium/large instances (#4898) + +#### Bug Fixes + +- Fix bug in mean_ap.py when calculating mAP by 11 points (#4875) +- Fix error when key `meta` is not in old checkpoints (#4936) +- Fix hanging bug when training with empty GT in VFNet, GFL, and FCOS by changing the place of `reduce_mean` (#4923, #4978, #5058) +- Fix asyncronized inference error and provide related demo (#4941) +- Fix IoU losses dimensionality unmatch error (#4982) +- Fix torch.randperm whtn using PyTorch 1.8 (#5014) +- Fix empty bbox error in `mask_head` when using CARAFE (#5062) +- Fix `supplement_mask` bug when there are zero-size RoIs (#5065) +- Fix testing with empty rois in RoI Heads (#5081) + +### v2.11.0 (01/4/2021) + +__Highlights__ + +- Support new method: [Localization Distillation for Object Detection](https://arxiv.org/pdf/2102.12252.pdf) +- Support Pytorch2ONNX with batch inference and dynamic shape + +__New Features__ + +- Support [Localization Distillation for Object Detection](https://arxiv.org/pdf/2102.12252.pdf) (#4758) +- Support Pytorch2ONNX with batch inference and dynamic shape for Faster-RCNN and mainstream one-stage detectors (#4796) + +__Improvements__ + +- Support batch inference in head of RetinaNet (#4699) +- Add batch dimension in second stage of Faster-RCNN (#4785) +- Support batch inference in bbox coder (#4721) +- Add check for `ann_ids` in `COCODataset` to ensure it is unique (#4789) +- support for showing the FPN results (#4716) +- support dynamic shape for grid_anchor (#4684) +- Move pycocotools version check to when it is used (#4880) + +__Bug Fixes__ + +- Fix a bug of TridentNet when doing the batch inference (#4717) +- Fix a bug of Pytorch2ONNX in FASF (#4735) +- Fix a bug when show the image with float type (#4732) + +### v2.10.0 (01/03/2021) + +#### Highlights + +- Support new methods: [FPG](https://arxiv.org/abs/2004.03580) +- Support ONNX2TensorRT for SSD, FSAF, FCOS, YOLOv3, and Faster R-CNN. + +#### New Features + +- Support ONNX2TensorRT for SSD, FSAF, FCOS, YOLOv3, and Faster R-CNN (#4569) +- Support [Feature Pyramid Grids (FPG)](https://arxiv.org/abs/2004.03580) (#4645) +- Support video demo (#4420) +- Add seed option for sampler (#4665) +- Support to customize type of runner (#4570, #4669) +- Support synchronizing BN buffer in `EvalHook` (#4582) +- Add script for GIF demo (#4573) + +#### Bug Fixes + +- Fix ConfigDict AttributeError and add Colab link (#4643) +- Avoid crash in empty gt training of GFL head (#4631) +- Fix `iou_thrs` bug in RPN evaluation (#4581) +- Fix syntax error of config when upgrading model version (#4584) + +#### Improvements + +- Refactor unit test file structures (#4600) +- Refactor nms config (#4636) +- Get loading pipeline by checking the class directly rather than through config strings (#4619) +- Add doctests for mask target generation and mask structures (#4614) +- Use deep copy when copying pipeline arguments (#4621) +- Update documentations (#4642, #4650, #4620, #4630) +- Remove redundant code calling `import_modules_from_strings` (#4601) +- Clean deprecated FP16 API (#4571) +- Check whether `CLASSES` is correctly initialized in the initialization of `XMLDataset` (#4555) +- Support batch inference in the inference API (#4462, #4526) +- Clean deprecated warning and fix 'meta' error (#4695) + +### v2.9.0 (01/02/2021) + +#### Highlights + +- Support new methods: [SCNet](https://arxiv.org/abs/2012.10150), [Sparse R-CNN](https://arxiv.org/abs/2011.12450) +- Move `train_cfg` and `test_cfg` into model in configs +- Support to visualize results based on prediction quality + +#### New Features + +- Support [SCNet](https://arxiv.org/abs/2012.10150) (#4356) +- Support [Sparse R-CNN](https://arxiv.org/abs/2011.12450) (#4219) +- Support evaluate mAP by multiple IoUs (#4398) +- Support concatenate dataset for testing (#4452) +- Support to visualize results based on prediction quality (#4441) +- Add ONNX simplify option to Pytorch2ONNX script (#4468) +- Add hook for checking compatibility of class numbers in heads and datasets (#4508) + +#### Bug Fixes + +- Fix CPU inference bug of Cascade RPN (#4410) +- Fix NMS error of CornerNet when there is no prediction box (#4409) +- Fix TypeError in CornerNet inference (#4411) +- Fix bug of PAA when training with background images (#4391) +- Fix the error that the window data is not destroyed when `out_file is not None` and `show==False` (#4442) +- Fix order of NMS `score_factor` that will decrease the performance of YOLOv3 (#4473) +- Fix bug in HTC TTA when the number of detection boxes is 0 (#4516) +- Fix resize error in mask data structures (#4520) + +#### Improvements + +- Allow to customize classes in LVIS dataset (#4382) +- Add tutorials for building new models with existing datasets (#4396) +- Add CPU compatibility information in documentation (#4405) +- Add documentation of deprecated `ImageToTensor` for batch inference (#4408) +- Add more details in documentation for customizing dataset (#4430) +- Switch `imshow_det_bboxes` visualization backend from OpenCV to Matplotlib (#4389) +- Deprecate `ImageToTensor` in `image_demo.py` (#4400) +- Move train_cfg/test_cfg into model (#4347, #4489) +- Update docstring for `reg_decoded_bbox` option in bbox heads (#4467) +- Update dataset information in documentation (#4525) +- Release pre-trained R50 and R101 PAA detectors with multi-scale 3x training schedules (#4495) +- Add guidance for speed benchmark (#4537) + +### v2.8.0 (04/01/2021) + +#### Highlights + +- Support new methods: [Cascade RPN](https://arxiv.org/abs/1909.06720), [TridentNet](https://arxiv.org/abs/1901.01892) + +#### New Features + +- Support [Cascade RPN](https://arxiv.org/abs/1909.06720) (#1900) +- Support [TridentNet](https://arxiv.org/abs/1901.01892) (#3313) + +#### Bug Fixes + +- Fix bug of show result in async_benchmark (#4367) +- Fix scale factor in MaskTestMixin (#4366) +- Fix but when returning indices in `multiclass_nms` (#4362) +- Fix bug of empirical attention in resnext backbone error (#4300) +- Fix bug of `img_norm_cfg` in FCOS-HRNet models with updated performance and models (#4250) +- Fix invalid checkpoint and log in Mask R-CNN models on Cityscapes dataset (#4287) +- Fix bug in distributed sampler when dataset is too small (#4257) +- Fix bug of 'PAFPN has no attribute extra_convs_on_inputs' (#4235) + +#### Improvements + +- Update model url from aws to aliyun (#4349) +- Update ATSS for PyTorch 1.6+ (#4359) +- Update script to install ruby in pre-commit installation (#4360) +- Delete deprecated `mmdet.ops` (#4325) +- Refactor hungarian assigner for more general usage in Sparse R-CNN (#4259) +- Handle scipy import in DETR to reduce package dependencies (#4339) +- Update documentation of usages for config options after MMCV (1.2.3) supports overriding list in config (#4326) +- Update pre-train models of faster rcnn trained on COCO subsets (#4307) +- Avoid zero or too small value for beta in Dynamic R-CNN (#4303) +- Add doccumentation for Pytorch2ONNX (#4271) +- Add deprecated warning FPN arguments (#4264) +- Support returning indices of kept bboxes when using nms (#4251) +- Update type and device requirements when creating tensors `GFLHead` (#4210) +- Update device requirements when creating tensors in `CrossEntropyLoss` (#4224) + +### v2.7.0 (30/11/2020) + +- Support new method: [DETR](https://arxiv.org/abs/2005.12872), [ResNest](https://arxiv.org/abs/2004.08955), Faster R-CNN DC5. +- Support YOLO, Mask R-CNN, and Cascade R-CNN models exportable to ONNX. + +#### New Features + +- Support [DETR](https://arxiv.org/abs/2005.12872) (#4201, #4206) +- Support to link the best checkpoint in training (#3773) +- Support to override config through options in inference.py (#4175) +- Support YOLO, Mask R-CNN, and Cascade R-CNN models exportable to ONNX (#4087, #4083) +- Support [ResNeSt](https://arxiv.org/abs/2004.08955) backbone (#2959) +- Support unclip border bbox regression (#4076) +- Add tpfp func in evaluating AP (#4069) +- Support mixed precision training of SSD detector with other backbones (#4081) +- Add Faster R-CNN DC5 models (#4043) + +#### Bug Fixes + +- Fix bug of `gpu_id` in distributed training mode (#4163) +- Support Albumentations with version higher than 0.5 (#4032) +- Fix num_classes bug in faster rcnn config (#4088) +- Update code in docs/2_new_data_model.md (#4041) + +#### Improvements + +- Ensure DCN offset to have similar type as features in VFNet (#4198) +- Add config links in README files of models (#4190) +- Add tutorials for loss conventions (#3818) +- Add solution to installation issues in 30-series GPUs (#4176) +- Update docker version in get_started.md (#4145) +- Add model statistics and polish some titles in configs README (#4140) +- Clamp neg probability in FreeAnchor (#4082) +- Speed up expanding large images (#4089) +- Fix Pytorch 1.7 incompatibility issues (#4103) +- Update trouble shooting page to resolve segmentation fault (#4055) +- Update aLRP-Loss in project page (#4078) +- Clean duplicated `reduce_mean` function (#4056) +- Refactor Q&A (#4045) + +### v2.6.0 (1/11/2020) + +- Support new method: [VarifocalNet](https://arxiv.org/abs/2008.13367). +- Refactored documentation with more tutorials. + +#### New Features + +- Support GIoU calculation in `BboxOverlaps2D`, and re-implement `giou_loss` using `bbox_overlaps` (#3936) +- Support random sampling in CPU mode (#3948) +- Support VarifocalNet (#3666, #4024) + +#### Bug Fixes + +- Fix SABL validating bug in Cascade R-CNN (#3913) +- Avoid division by zero in PAA head when num_pos=0 (#3938) +- Fix temporary directory bug of multi-node testing error (#4034, #4017) +- Fix `--show-dir` option in test script (#4025) +- Fix GA-RetinaNet r50 model url (#3983) +- Update code in docs and fix broken urls (#3947) + +#### Improvements + +- Refactor pytorch2onnx API into `mmdet.core.export` and use `generate_inputs_and_wrap_model` for pytorch2onnx (#3857, #3912) +- Update RPN upgrade scripts for v2.5.0 compatibility (#3986) +- Use mmcv `tensor2imgs` (#4010) +- Update test robustness (#4000) +- Update trouble shooting page (#3994) +- Accelerate PAA training speed (#3985) +- Support batch_size > 1 in validation (#3966) +- Use RoIAlign implemented in MMCV for inference in CPU mode (#3930) +- Documentation refactoring (#4031) + +### v2.5.0 (5/10/2020) + +#### Highlights + +- Support new methods: [YOLACT](https://arxiv.org/abs/1904.02689), [CentripetalNet](https://arxiv.org/abs/2003.09119). +- Add more documentations for easier and more clear usage. + +#### Backwards Incompatible Changes + +__FP16 related methods are imported from mmcv instead of mmdet. (#3766, #3822)__ +Mixed precision training utils in `mmdet.core.fp16` are moved to `mmcv.runner`, including `force_fp32`, `auto_fp16`, `wrap_fp16_model`, and `Fp16OptimizerHook`. A deprecation warning will be raised if users attempt to import those methods from `mmdet.core.fp16`, and will be finally removed in V2.10.0. + +__\[0, N-1\] represents foreground classes and N indicates background classes for all models. (#3221)__ +Before v2.5.0, the background label for RPN is 0, and N for other heads. Now the behavior is consistent for all models. Thus `self.background_labels` in `dense_heads` is removed and all heads use `self.num_classes` to indicate the class index of background labels. +This change has no effect on the pre-trained models in the v2.x model zoo, but will affect the training of all models with RPN heads. Two-stage detectors whose RPN head uses softmax will be affected because the order of categories is changed. + +**Only call `get_subset_by_classes` when `test_mode=True` and `self.filter_empty_gt=True` (#3695)** +Function `get_subset_by_classes` in dataset is refactored and only filters out images when `test_mode=True` and `self.filter_empty_gt=True`. +In the original implementation, `get_subset_by_classes` is not related to the flag `self.filter_empty_gt` and will only be called when the classes is set during initialization no matter `test_mode` is `True` or `False`. This brings ambiguous behavior and potential bugs in many cases. After v2.5.0, if `filter_empty_gt=False`, no matter whether the classes are specified in a dataset, the dataset will use all the images in the annotations. If `filter_empty_gt=True` and `test_mode=True`, no matter whether the classes are specified, the dataset will call \`\`get_subset_by_classes\` to check the images and filter out images containing no GT boxes. Therefore, the users should be responsible for the data filtering/cleaning process for the test dataset. + +#### New Features + +- Test time augmentation for single stage detectors (#3844, #3638) +- Support to show the name of experiments during training (#3764) +- Add `Shear`, `Rotate`, `Translate` Augmentation (#3656, #3619, #3687) +- Add image-only transformations including `Constrast`, `Equalize`, `Color`, and `Brightness`. (#3643) +- Support [YOLACT](https://arxiv.org/abs/1904.02689) (#3456) +- Support [CentripetalNet](https://arxiv.org/abs/2003.09119) (#3390) +- Support PyTorch 1.6 in docker (#3905) + +#### Bug Fixes + +- Fix the bug of training ATSS when there is no ground truth boxes (#3702) +- Fix the bug of using Focal Loss when there is `num_pos` is 0 (#3702) +- Fix the label index mapping in dataset browser (#3708) +- Fix Mask R-CNN training stuck problem when their is no positive rois (#3713) +- Fix the bug of `self.rpn_head.test_cfg` in `RPNTestMixin` by using `self.rpn_head` in rpn head (#3808) +- Fix deprecated `Conv2d` from mmcv.ops (#3791) +- Fix device bug in RepPoints (#3836) +- Fix SABL validating bug (#3849) +- Use `https://download.openmmlab.com/mmcv/dist/index.html` for installing MMCV (#3840) +- Fix nonzero in NMS for PyTorch 1.6.0 (#3867) +- Fix the API change bug of PAA (#3883) +- Fix typo in bbox_flip (#3886) +- Fix cv2 import error of ligGL.so.1 in Dockerfile (#3891) + +#### Improvements + +- Change to use `mmcv.utils.collect_env` for collecting environment information to avoid duplicate codes (#3779) +- Update checkpoint file names to v2.0 models in documentation (#3795) +- Update tutorials for changing runtime settings (#3778), modifying loss (#3777) +- Improve the function of `simple_test_bboxes` in SABL (#3853) +- Convert mask to bool before using it as img's index for robustness and speedup (#3870) +- Improve documentation of modules and dataset customization (#3821) + +### v2.4.0 (5/9/2020) + +__Highlights__ + +- Fix lots of issues/bugs and reorganize the trouble shooting page +- Support new methods [SABL](https://arxiv.org/abs/1912.04260), [YOLOv3](https://arxiv.org/abs/1804.02767), and [PAA Assign](https://arxiv.org/abs/2007.08103) +- Support Batch Inference +- Start to publish `mmdet` package to PyPI since v2.3.0 +- Switch model zoo to download.openmmlab.com + +__Backwards Incompatible Changes__ + +- Support Batch Inference (#3564, #3686, #3705): Since v2.4.0, MMDetection could inference model with multiple images in a single GPU. + This change influences all the test APIs in MMDetection and downstream codebases. To help the users migrate their code, we use `replace_ImageToTensor` (#3686) to convert legacy test data pipelines during dataset initialization. +- Support RandomFlip with horizontal/vertical/diagonal direction (#3608): Since v2.4.0, MMDetection supports horizontal/vertical/diagonal flip in the data augmentation. This influences bounding box, mask, and image transformations in data augmentation process and the process that will map those data back to the original format. +- Migrate to use `mmlvis` and `mmpycocotools` for COCO and LVIS dataset (#3727). The APIs are fully compatible with the original `lvis` and `pycocotools`. Users need to uninstall the existing pycocotools and lvis packages in their environment first and install `mmlvis` & `mmpycocotools`. + +__Bug Fixes__ + +- Fix default mean/std for onnx (#3491) +- Fix coco evaluation and add metric items (#3497) +- Fix typo for install.md (#3516) +- Fix atss when sampler per gpu is 1 (#3528) +- Fix import of fuse_conv_bn (#3529) +- Fix bug of gaussian_target, update unittest of heatmap (#3543) +- Fixed VOC2012 evaluate (#3553) +- Fix scale factor bug of rescale (#3566) +- Fix with_xxx_attributes in base detector (#3567) +- Fix boxes scaling when number is 0 (#3575) +- Fix rfp check when neck config is a list (#3591) +- Fix import of fuse conv bn in benchmark.py (#3606) +- Fix webcam demo (#3634) +- Fix typo and itemize issues in tutorial (#3658) +- Fix error in distributed training when some levels of FPN are not assigned with bounding boxes (#3670) +- Fix the width and height orders of stride in valid flag generation (#3685) +- Fix weight initialization bug in Res2Net DCN (#3714) +- Fix bug in OHEMSampler (#3677) + +__New Features__ + +- Support Cutout augmentation (#3521) +- Support evaluation on multiple datasets through ConcatDataset (#3522) +- Support [PAA assign](https://arxiv.org/abs/2007.08103) #(3547) +- Support eval metric with pickle results (#3607) +- Support [YOLOv3](https://arxiv.org/abs/1804.02767) (#3083) +- Support [SABL](https://arxiv.org/abs/1912.04260) (#3603) +- Support to publish to Pypi in github-action (#3510) +- Support custom imports (#3641) + +__Improvements__ + +- Refactor common issues in documentation (#3530) +- Add pytorch 1.6 to CI config (#3532) +- Add config to runner meta (#3534) +- Add eval-option flag for testing (#3537) +- Add init_eval to evaluation hook (#3550) +- Add include_bkg in ClassBalancedDataset (#3577) +- Using config's loading in inference_detector (#3611) +- Add ATSS ResNet-101 models in model zoo (#3639) +- Update urls to download.openmmlab.com (#3665) +- Support non-mask training for CocoDataset (#3711) + +### v2.3.0 (5/8/2020) + +__Highlights__ + +- The CUDA/C++ operators have been moved to `mmcv.ops`. For backward compatibility `mmdet.ops` is kept as warppers of `mmcv.ops`. +- Support new methods [CornerNet](https://arxiv.org/abs/1808.01244), [DIOU](https://arxiv.org/abs/1911.08287)/[CIOU](https://arxiv.org/abs/2005.03572) loss, and new dataset: [LVIS V1](https://arxiv.org/abs/1908.03195) +- Provide more detailed colab training tutorials and more complete documentation. +- Support to convert RetinaNet from Pytorch to ONNX. + +__Bug Fixes__ + +- Fix the model initialization bug of DetectoRS (#3187) +- Fix the bug of module names in NASFCOSHead (#3205) +- Fix the filename bug in publish_model.py (#3237) +- Fix the dimensionality bug when `inside_flags.any()` is `False` in dense heads (#3242) +- Fix the bug of forgetting to pass flip directions in `MultiScaleFlipAug` (#3262) +- Fixed the bug caused by default value of `stem_channels` (#3333) +- Fix the bug of model checkpoint loading for CPU inference (#3318, #3316) +- Fix topk bug when box number is smaller than the expected topk number in ATSSAssigner (#3361) +- Fix the gt priority bug in center_region_assigner.py (#3208) +- Fix NaN issue of iou calculation in iou_loss.py (#3394) +- Fix the bug that `iou_thrs` is not actually used during evaluation in coco.py (#3407) +- Fix test-time augmentation of RepPoints (#3435) +- Fix runtimeError caused by incontiguous tensor in Res2Net+DCN (#3412) + +__New Features__ + +- Support [CornerNet](https://arxiv.org/abs/1808.01244) (#3036) +- Support [DIOU](https://arxiv.org/abs/1911.08287)/[CIOU](https://arxiv.org/abs/2005.03572) loss (#3151) +- Support [LVIS V1](https://arxiv.org/abs/1908.03195) dataset (#) +- Support customized hooks in training (#3395) +- Support fp16 training of generalized focal loss (#3410) +- Support to convert RetinaNet from Pytorch to ONNX (#3075) + +__Improvements__ + +- Support to process ignore boxes in ATSS assigner (#3082) +- Allow to crop images without ground truth in `RandomCrop` (#3153) +- Enable the the `Accuracy` module to set threshold (#3155) +- Refactoring unit tests (#3206) +- Unify the training settings of `to_float32` and `norm_cfg` in RegNets configs (#3210) +- Add colab training tutorials for beginners (#3213, #3273) +- Move CUDA/C++ operators into `mmcv.ops` and keep `mmdet.ops` as warppers for backward compatibility (#3232)(#3457) +- Update installation scripts in documentation (#3290) and dockerfile (#3320) +- Support to set image resize backend (#3392) +- Remove git hash in version file (#3466) +- Check mmcv version to force version compatibility (#3460) + +### v2.2.0 (1/7/2020) + +__Highlights__ + +- Support new methods: [DetectoRS](https://arxiv.org/abs/2006.02334), [PointRend](https://arxiv.org/abs/1912.08193), [Generalized Focal Loss](https://arxiv.org/abs/2006.04388), [Dynamic R-CNN](https://arxiv.org/abs/2004.06002) + +__Bug Fixes__ + +- Fix FreeAnchor when no gt in image (#3176) +- Clean up deprecated usage of `register_module()` (#3092, #3161) +- Fix pretrain bug in NAS FCOS (#3145) +- Fix `num_classes` in SSD (#3142) +- Fix FCOS warmup (#3119) +- Fix `rstrip` in `tools/publish_model.py` +- Fix `flip_ratio` default value in RandomFLip pipeline (#3106) +- Fix cityscapes eval with ms_rcnn (#3112) +- Fix RPN softmax (#3056) +- Fix filename of LVIS@v0.5 (#2998) +- Fix nan loss by filtering out-of-frame gt_bboxes in COCO (#2999) +- Fix bug in FSAF (#3018) +- Add FocalLoss `num_classes` check (#2964) +- Fix PISA Loss when there are no gts (#2992) +- Avoid nan in `iou_calculator` (#2975) +- Prevent possible bugs in loading and transforms caused by shallow copy (#2967) + +__New Features__ + +- Add DetectoRS (#3064) +- Support Generalize Focal Loss (#3097) +- Support PointRend (#2752) +- Support Dynamic R-CNN (#3040) +- Add DeepFashion dataset (#2968) +- Implement FCOS training tricks (#2935) +- Use BaseDenseHead as base class for anchor-base heads (#2963) +- Add `with_cp` for BasicBlock (#2891) +- Add `stem_channels` argument for ResNet (#2954) + +__Improvements__ + +- Add anchor free base head (#2867) +- Migrate to github action (#3137) +- Add docstring for datasets, pipelines, core modules and methods (#3130, #3125, #3120) +- Add VOC benchmark (#3060) +- Add `concat` mode in GRoI (#3098) +- Remove cmd arg `autorescale-lr` (#3080) +- Use `len(data['img_metas'])` to indicate `num_samples` (#3073, #3053) +- Switch to EpochBasedRunner (#2976) + +### v2.1.0 (8/6/2020) + +__Highlights__ + +- Support new backbones: [RegNetX](https://arxiv.org/abs/2003.13678), [Res2Net](https://arxiv.org/abs/1904.01169) +- Support new methods: [NASFCOS](https://arxiv.org/abs/1906.04423), [PISA](https://arxiv.org/abs/1904.04821), [GRoIE](https://arxiv.org/abs/2004.13665) +- Support new dataset: [LVIS](https://arxiv.org/abs/1908.03195) + +__Bug Fixes__ + +- Change the CLI argument `--validate` to `--no-validate` to enable validation after training epochs by default. (#2651) +- Add missing cython to docker file (#2713) +- Fix bug in nms cpu implementation (#2754) +- Fix bug when showing mask results (#2763) +- Fix gcc requirement (#2806) +- Fix bug in async test (#2820) +- Fix mask encoding-decoding bugs in test API (#2824) +- Fix bug in test time augmentation (#2858, #2921, #2944) +- Fix a typo in comment of apis/train (#2877) +- Fix the bug of returning None when no gt bboxes are in the original image in `RandomCrop`. Fix the bug that misses to handle `gt_bboxes_ignore`, `gt_label_ignore`, and `gt_masks_ignore` in `RandomCrop`, `MinIoURandomCrop` and `Expand` modules. (#2810) +- Fix bug of `base_channels` of regnet (#2917) +- Fix the bug of logger when loading pre-trained weights in base detector (#2936) + +__New Features__ + +- Add IoU models (#2666) +- Add colab demo for inference +- Support class agnostic nms (#2553) +- Add benchmark gathering scripts for development only (#2676) +- Add mmdet-based project links (#2736, #2767, #2895) +- Add config dump in training (#2779) +- Add ClassBalancedDataset (#2721) +- Add res2net backbone (#2237) +- Support RegNetX models (#2710) +- Use `mmcv.FileClient` to support different storage backends (#2712) +- Add ClassBalancedDataset (#2721) +- Code Release: Prime Sample Attention in Object Detection (CVPR 2020) (#2626) +- Implement NASFCOS (#2682) +- Add class weight in CrossEntropyLoss (#2797) +- Support LVIS dataset (#2088) +- Support GRoIE (#2584) + +__Improvements__ + +- Allow different x and y strides in anchor heads. (#2629) +- Make FSAF loss more robust to no gt (#2680) +- Compute pure inference time instead (#2657) and update inference speed (#2730) +- Avoided the possibility that a patch with 0 area is cropped. (#2704) +- Add warnings when deprecated `imgs_per_gpu` is used. (#2700) +- Add a mask rcnn example for config (#2645) +- Update model zoo (#2762, #2866, #2876, #2879, #2831) +- Add `ori_filename` to img_metas and use it in test show-dir (#2612) +- Use `img_fields` to handle multiple images during image transform (#2800) +- Add upsample_cfg support in FPN (#2787) +- Add `['img']` as default `img_fields` for back compatibility (#2809) +- Rename the pretrained model from `open-mmlab://resnet50_caffe` and `open-mmlab://resnet50_caffe_bgr` to `open-mmlab://detectron/resnet50_caffe` and `open-mmlab://detectron2/resnet50_caffe`. (#2832) +- Added sleep(2) in test.py to reduce hanging problem (#2847) +- Support `c10::half` in CARAFE (#2890) +- Improve documentations (#2918, #2714) +- Use optimizer constructor in mmcv and clean the original implementation in `mmdet.core.optimizer` (#2947) + +### v2.0.0 (6/5/2020) + +In this release, we made lots of major refactoring and modifications. + +1. __Faster speed__. We optimize the training and inference speed for common models, achieving up to 30% speedup for training and 25% for inference. Please refer to [model zoo](model_zoo.md#comparison-with-detectron2) for details. + +2. __Higher performance__. We change some default hyperparameters with no additional cost, which leads to a gain of performance for most models. Please refer to [compatibility](compatibility.md#training-hyperparameters) for details. + +3. __More documentation and tutorials__. We add a bunch of documentation and tutorials to help users get started more smoothly. Read it [here](https://mmdetection.readthedocs.io/en/latest/). + +4. __Support PyTorch 1.5__. The support for 1.1 and 1.2 is dropped, and we switch to some new APIs. + +5. __Better configuration system__. Inheritance is supported to reduce the redundancy of configs. + +6. __Better modular design__. Towards the goal of simplicity and flexibility, we simplify some encapsulation while add more other configurable modules like BBoxCoder, IoUCalculator, OptimizerConstructor, RoIHead. Target computation is also included in heads and the call hierarchy is simpler. + +7. Support new methods: [FSAF](https://arxiv.org/abs/1903.00621) and PAFPN (part of [PAFPN](https://arxiv.org/abs/1803.01534)). + +__Breaking Changes__ +Models training with MMDetection 1.x are not fully compatible with 2.0, please refer to the [compatibility doc](compatibility.md) for the details and how to migrate to the new version. + +__Improvements__ + +- Unify cuda and cpp API for custom ops. (#2277) +- New config files with inheritance. (#2216) +- Encapsulate the second stage into RoI heads. (#1999) +- Refactor GCNet/EmpericalAttention into plugins. (#2345) +- Set low quality match as an option in IoU-based bbox assigners. (#2375) +- Change the codebase's coordinate system. (#2380) +- Refactor the category order in heads. 0 means the first positive class instead of background now. (#2374) +- Add bbox sampler and assigner registry. (#2419) +- Speed up the inference of RPN. (#2420) +- Add `train_cfg` and `test_cfg` as class members in all anchor heads. (#2422) +- Merge target computation methods into heads. (#2429) +- Add bbox coder to support different bbox encoding and losses. (#2480) +- Unify the API for regression loss. (#2156) +- Refactor Anchor Generator. (#2474) +- Make `lr` an optional argument for optimizers. (#2509) +- Migrate to modules and methods in MMCV. (#2502, #2511, #2569, #2572) +- Support PyTorch 1.5. (#2524) +- Drop the support for Python 3.5 and use F-string in the codebase. (#2531) + +__Bug Fixes__ + +- Fix the scale factors for resized images without keep the aspect ratio. (#2039) +- Check if max_num > 0 before slicing in NMS. (#2486) +- Fix Deformable RoIPool when there is no instance. (#2490) +- Fix the default value of assigned labels. (#2536) +- Fix the evaluation of Cityscapes. (#2578) + +__New Features__ + +- Add deep_stem and avg_down option to ResNet, i.e., support ResNetV1d. (#2252) +- Add L1 loss. (#2376) +- Support both polygon and bitmap for instance masks. (#2353, #2540) +- Support CPU mode for inference. (#2385) +- Add optimizer constructor for complicated configuration of optimizers. (#2397, #2488) +- Implement PAFPN. (#2392) +- Support empty tensor input for some modules. (#2280) +- Support for custom dataset classes without overriding it. (#2408, #2443) +- Support to train subsets of coco dataset. (#2340) +- Add iou_calculator to potentially support more IoU calculation methods. (2405) +- Support class wise mean AP (was removed in the last version). (#2459) +- Add option to save the testing result images. (#2414) +- Support MomentumUpdaterHook. (#2571) +- Add a demo to inference a single image. (#2605) + +### v1.1.0 (24/2/2020) + +__Highlights__ + +- Dataset evaluation is rewritten with a unified api, which is used by both evaluation hooks and test scripts. +- Support new methods: [CARAFE](https://arxiv.org/abs/1905.02188). + +__Breaking Changes__ + +- The new MMDDP inherits from the official DDP, thus the `__init__` api is changed to be the same as official DDP. +- The `mask_head` field in HTC config files is modified. +- The evaluation and testing script is updated. +- In all transforms, instance masks are stored as a numpy array shaped (n, h, w) instead of a list of (h, w) arrays, where n is the number of instances. + +__Bug Fixes__ + +- Fix IOU assigners when ignore_iof_thr > 0 and there is no pred boxes. (#2135) +- Fix mAP evaluation when there are no ignored boxes. (#2116) +- Fix the empty RoI input for Deformable RoI Pooling. (#2099) +- Fix the dataset settings for multiple workflows. (#2103) +- Fix the warning related to `torch.uint8` in PyTorch 1.4. (#2105) +- Fix the inference demo on devices other than gpu:0. (#2098) +- Fix Dockerfile. (#2097) +- Fix the bug that `pad_val` is unused in Pad transform. (#2093) +- Fix the albumentation transform when there is no ground truth bbox. (#2032) + +__Improvements__ + +- Use torch instead of numpy for random sampling. (#2094) +- Migrate to the new MMDDP implementation in MMCV v0.3. (#2090) +- Add meta information in logs. (#2086) +- Rewrite Soft NMS with pytorch extension and remove cython as a dependency. (#2056) +- Rewrite dataset evaluation. (#2042, #2087, #2114, #2128) +- Use numpy array for masks in transforms. (#2030) + +__New Features__ + +- Implement "CARAFE: Content-Aware ReAssembly of FEatures". (#1583) +- Add `worker_init_fn()` in data_loader when seed is set. (#2066, #2111) +- Add logging utils. (#2035) + +### v1.0.0 (30/1/2020) + +This release mainly improves the code quality and add more docstrings. + +__Highlights__ + +- Documentation is online now: . +- Support new models: [ATSS](https://arxiv.org/abs/1912.02424). +- DCN is now available with the api `build_conv_layer` and `ConvModule` like the normal conv layer. +- A tool to collect environment information is available for trouble shooting. + +__Bug Fixes__ + +- Fix the incompatibility of the latest numpy and pycocotools. (#2024) +- Fix the case when distributed package is unavailable, e.g., on Windows. (#1985) +- Fix the dimension issue for `refine_bboxes()`. (#1962) +- Fix the typo when `seg_prefix` is a list. (#1906) +- Add segmentation map cropping to RandomCrop. (#1880) +- Fix the return value of `ga_shape_target_single()`. (#1853) +- Fix the loaded shape of empty proposals. (#1819) +- Fix the mask data type when using albumentation. (#1818) + +__Improvements__ + +- Enhance AssignResult and SamplingResult. (#1995) +- Add ability to overwrite existing module in Registry. (#1982) +- Reorganize requirements and make albumentations and imagecorruptions optional. (#1969) +- Check NaN in `SSDHead`. (#1935) +- Encapsulate the DCN in ResNe(X)t into a ConvModule & Conv_layers. (#1894) +- Refactoring for mAP evaluation and support multiprocessing and logging. (#1889) +- Init the root logger before constructing Runner to log more information. (#1865) +- Split `SegResizeFlipPadRescale` into different existing transforms. (#1852) +- Move `init_dist()` to MMCV. (#1851) +- Documentation and docstring improvements. (#1971, #1938, #1869, #1838) +- Fix the color of the same class for mask visualization. (#1834) +- Remove the option `keep_all_stages` in HTC and Cascade R-CNN. (#1806) + +__New Features__ + +- Add two test-time options `crop_mask` and `rle_mask_encode` for mask heads. (#2013) +- Support loading grayscale images as single channel. (#1975) +- Implement "Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection". (#1872) +- Add sphinx generated docs. (#1859, #1864) +- Add GN support for flops computation. (#1850) +- Collect env info for trouble shooting. (#1812) + +### v1.0rc1 (13/12/2019) + +The RC1 release mainly focuses on improving the user experience, and fixing bugs. + +__Highlights__ + +- Support new models: [FoveaBox](https://arxiv.org/abs/1904.03797), [RepPoints](https://arxiv.org/abs/1904.11490) and [FreeAnchor](https://arxiv.org/abs/1909.02466). +- Add a Dockerfile. +- Add a jupyter notebook demo and a webcam demo. +- Setup the code style and CI. +- Add lots of docstrings and unit tests. +- Fix lots of bugs. + +__Breaking Changes__ + +- There was a bug for computing COCO-style mAP w.r.t different scales (AP_s, AP_m, AP_l), introduced by #621. (#1679) + +__Bug Fixes__ + +- Fix a sampling interval bug in Libra R-CNN. (#1800) +- Fix the learning rate in SSD300 WIDER FACE. (#1781) +- Fix the scaling issue when `keep_ratio=False`. (#1730) +- Fix typos. (#1721, #1492, #1242, #1108, #1107) +- Fix the shuffle argument in `build_dataloader`. (#1693) +- Clip the proposal when computing mask targets. (#1688) +- Fix the "index out of range" bug for samplers in some corner cases. (#1610, #1404) +- Fix the NMS issue on devices other than GPU:0. (#1603) +- Fix SSD Head and GHM Loss on CPU. (#1578) +- Fix the OOM error when there are too many gt bboxes. (#1575) +- Fix the wrong keyword argument `nms_cfg` in HTC. (#1573) +- Process masks and semantic segmentation in Expand and MinIoUCrop transforms. (#1550, #1361) +- Fix a scale bug in the Non Local op. (#1528) +- Fix a bug in transforms when `gt_bboxes_ignore` is None. (#1498) +- Fix a bug when `img_prefix` is None. (#1497) +- Pass the device argument to `grid_anchors` and `valid_flags`. (#1478) +- Fix the data pipeline for test_robustness. (#1476) +- Fix the argument type of deformable pooling. (#1390) +- Fix the coco_eval when there are only two classes. (#1376) +- Fix a bug in Modulated DeformableConv when deformable_group>1. (#1359) +- Fix the mask cropping in RandomCrop. (#1333) +- Fix zero outputs in DeformConv when not running on cuda:0. (#1326) +- Fix the type issue in Expand. (#1288) +- Fix the inference API. (#1255) +- Fix the inplace operation in Expand. (#1249) +- Fix the from-scratch training config. (#1196) +- Fix inplace add in RoIExtractor which cause an error in PyTorch 1.2. (#1160) +- Fix FCOS when input images has no positive sample. (#1136) +- Fix recursive imports. (#1099) + +__Improvements__ + +- Print the config file and mmdet version in the log. (#1721) +- Lint the code before compiling in travis CI. (#1715) +- Add a probability argument for the `Expand` transform. (#1651) +- Update the PyTorch and CUDA version in the docker file. (#1615) +- Raise a warning when specifying `--validate` in non-distributed training. (#1624, #1651) +- Beautify the mAP printing. (#1614) +- Add pre-commit hook. (#1536) +- Add the argument `in_channels` to backbones. (#1475) +- Add lots of docstrings and unit tests, thanks to [@Erotemic](https://github.com/Erotemic). (#1603, #1517, #1506, #1505, #1491, #1479, #1477, #1475, #1474) +- Add support for multi-node distributed test when there is no shared storage. (#1399) +- Optimize Dockerfile to reduce the image size. (#1306) +- Update new results of HRNet. (#1284, #1182) +- Add an argument `no_norm_on_lateral` in FPN. (#1240) +- Test the compiling in CI. (#1235) +- Move docs to a separate folder. (#1233) +- Add a jupyter notebook demo. (#1158) +- Support different type of dataset for training. (#1133) +- Use int64_t instead of long in cuda kernels. (#1131) +- Support unsquare RoIs for bbox and mask heads. (#1128) +- Manually add type promotion to make compatible to PyTorch 1.2. (#1114) +- Allowing validation dataset for computing validation loss. (#1093) +- Use `.scalar_type()` instead of `.type()` to suppress some warnings. (#1070) + +__New Features__ + +- Add an option `--with_ap` to compute the AP for each class. (#1549) +- Implement "FreeAnchor: Learning to Match Anchors for Visual Object Detection". (#1391) +- Support [Albumentations](https://github.com/albumentations-team/albumentations) for augmentations in the data pipeline. (#1354) +- Implement "FoveaBox: Beyond Anchor-based Object Detector". (#1339) +- Support horizontal and vertical flipping. (#1273, #1115) +- Implement "RepPoints: Point Set Representation for Object Detection". (#1265) +- Add test-time augmentation to HTC and Cascade R-CNN. (#1251) +- Add a COCO result analysis tool. (#1228) +- Add Dockerfile. (#1168) +- Add a webcam demo. (#1155, #1150) +- Add FLOPs counter. (#1127) +- Allow arbitrary layer order for ConvModule. (#1078) + +### v1.0rc0 (27/07/2019) + +- Implement lots of new methods and components (Mixed Precision Training, HTC, Libra R-CNN, Guided Anchoring, Empirical Attention, Mask Scoring R-CNN, Grid R-CNN (Plus), GHM, GCNet, FCOS, HRNet, Weight Standardization, etc.). Thank all collaborators! +- Support two additional datasets: WIDER FACE and Cityscapes. +- Refactoring for loss APIs and make it more flexible to adopt different losses and related hyper-parameters. +- Speed up multi-gpu testing. +- Integrate all compiling and installing in a single script. + +### v0.6.0 (14/04/2019) + +- Up to 30% speedup compared to the model zoo. +- Support both PyTorch stable and nightly version. +- Replace NMS and SigmoidFocalLoss with Pytorch CUDA extensions. + +### v0.6rc0(06/02/2019) + +- Migrate to PyTorch 1.0. + +### v0.5.7 (06/02/2019) + +- Add support for Deformable ConvNet v2. (Many thanks to the authors and [@chengdazhi](https://github.com/chengdazhi)) +- This is the last release based on PyTorch 0.4.1. + +### v0.5.6 (17/01/2019) + +- Add support for Group Normalization. +- Unify RPNHead and single stage heads (RetinaHead, SSDHead) with AnchorHead. + +### v0.5.5 (22/12/2018) + +- Add SSD for COCO and PASCAL VOC. +- Add ResNeXt backbones and detection models. +- Refactoring for Samplers/Assigners and add OHEM. +- Add VOC dataset and evaluation scripts. + +### v0.5.4 (27/11/2018) + +- Add SingleStageDetector and RetinaNet. + +### v0.5.3 (26/11/2018) + +- Add Cascade R-CNN and Cascade Mask R-CNN. +- Add support for Soft-NMS in config files. + +### v0.5.2 (21/10/2018) + +- Add support for custom datasets. +- Add a script to convert PASCAL VOC annotations to the expected format. + +### v0.5.1 (20/10/2018) + +- Add BBoxAssigner and BBoxSampler, the `train_cfg` field in config files are restructured. +- `ConvFCRoIHead` / `SharedFCRoIHead` are renamed to `ConvFCBBoxHead` / `SharedFCBBoxHead` for consistency. diff --git a/downstream/mmdetection/docs/en/compatibility.md b/downstream/mmdetection/docs/en/compatibility.md new file mode 100644 index 0000000..a545a49 --- /dev/null +++ b/downstream/mmdetection/docs/en/compatibility.md @@ -0,0 +1,178 @@ +# Compatibility of MMDetection 2.x + +## MMDetection 2.25.0 + +In order to support Mask2Former for instance segmentation, the original config files of Mask2Former for panpotic segmentation need to be renamed [PR #7571](https://github.com/open-mmlab/mmdetection/pull/7571). + + + + + + + + + + + +
    before v2.25.0after v2.25.0
    + +``` +'mask2former_xxx_coco.py' represents config files for **panoptic segmentation**. +``` + + + +``` +'mask2former_xxx_coco.py' represents config files for **instance segmentation**. +'mask2former_xxx_coco-panoptic.py' represents config files for **panoptic segmentation**. +``` + +
    + +## MMDetection 2.21.0 + +In order to support CPU training, the logic of scatter in batch collating has been changed. We recommend to use +MMCV v1.4.4 or higher. For more details, please refer to [MMCV PR #1621](https://github.com/open-mmlab/mmcv/pull/1621). + +## MMDetection 2.18.1 + +### MMCV compatibility + +In order to fix the wrong weight reference bug in BaseTransformerLayer, the logic in batch first mode of MultiheadAttention has been changed. +We recommend to use MMCV v1.3.17 or higher. For more details, please refer to [MMCV PR #1418](https://github.com/open-mmlab/mmcv/pull/1418). + +## MMDetection 2.18.0 + +### DIIHead compatibility + +In order to support QueryInst, attn_feats is added into the returned tuple of DIIHead. + +## MMDetection 2.14.0 + +### MMCV Version + +In order to fix the problem that the priority of EvalHook is too low, all hook priorities have been re-adjusted in 1.3.8, so MMDetection 2.14.0 needs to rely on the latest MMCV 1.3.8 version. For related information, please refer to [#1120](https://github.com/open-mmlab/mmcv/pull/1120), for related issues, please refer to [#5343](https://github.com/open-mmlab/mmdetection/issues/5343). + +### SSD compatibility + +In v2.14.0, to make SSD more flexible to use, [PR5291](https://github.com/open-mmlab/mmdetection/pull/5291) refactored its backbone, neck and head. The users can use the script `tools/model_converters/upgrade_ssd_version.py` to convert their models. + +```bash +python tools/model_converters/upgrade_ssd_version.py ${OLD_MODEL_PATH} ${NEW_MODEL_PATH} +``` + +- OLD_MODEL_PATH: the path to load the old version SSD model. +- NEW_MODEL_PATH: the path to save the converted model weights. + +## MMDetection 2.12.0 + +MMDetection is going through big refactoring for more general and convenient usages during the releases from v2.12.0 to v2.18.0 (maybe longer). +In v2.12.0 MMDetection inevitably brings some BC-breakings, including the MMCV dependency, model initialization, model registry, and mask AP evaluation. + +### MMCV Version + +MMDetection v2.12.0 relies on the newest features in MMCV 1.3.3, including `BaseModule` for unified parameter initialization, model registry, and the CUDA operator `MultiScaleDeformableAttn` for [Deformable DETR](https://arxiv.org/abs/2010.04159). Note that MMCV 1.3.2 already contains all the features used by MMDet but has known issues. Therefore, we recommend users to skip MMCV v1.3.2 and use v1.3.2, though v1.3.2 might work for most of the cases. + +### Unified model initialization + +To unify the parameter initialization in OpenMMLab projects, MMCV supports `BaseModule` that accepts `init_cfg` to allow the modules' parameters initialized in a flexible and unified manner. Now the users need to explicitly call `model.init_weights()` in the training script to initialize the model (as in [here](https://github.com/open-mmlab/mmdetection/blob/master/tools/train.py#L162), previously this was handled by the detector. **The downstream projects must update their model initialization accordingly to use MMDetection v2.12.0**. Please refer to PR #4750 for details. + +### Unified model registry + +To easily use backbones implemented in other OpenMMLab projects, MMDetection v2.12.0 inherits the model registry created in MMCV (#760). In this way, as long as the backbone is supported in an OpenMMLab project and that project also uses the registry in MMCV, users can use that backbone in MMDetection by simply modifying the config without copying the code of that backbone into MMDetection. Please refer to PR #5059 for more details. + +### Mask AP evaluation + +Before [PR 4898](https://github.com/open-mmlab/mmdetection/pull/4898) and V2.12.0, the mask AP of small, medium, and large instances is calculated based on the bounding box area rather than the real mask area. This leads to higher `APs` and `APm` but lower `APl` but will not affect the overall mask AP. [PR 4898](https://github.com/open-mmlab/mmdetection/pull/4898) change it to use mask areas by deleting `bbox` in mask AP calculation. +The new calculation does not affect the overall mask AP evaluation and is consistent with [Detectron2](https://github.com/facebookresearch/detectron2/). + +## Compatibility with MMDetection 1.x + +MMDetection 2.0 goes through a big refactoring and addresses many legacy issues. It is not compatible with the 1.x version, i.e., running inference with the same model weights in these two versions will produce different results. Thus, MMDetection 2.0 re-benchmarks all the models and provides their links and logs in the model zoo. + +The major differences are in four folds: coordinate system, codebase conventions, training hyperparameters, and modular design. + +### Coordinate System + +The new coordinate system is consistent with [Detectron2](https://github.com/facebookresearch/detectron2/) and treats the center of the most left-top pixel as (0, 0) rather than the left-top corner of that pixel. +Accordingly, the system interprets the coordinates in COCO bounding box and segmentation annotations as coordinates in range `[0, width]` or `[0, height]`. +This modification affects all the computation related to the bbox and pixel selection, +which is more natural and accurate. + +- The height and width of a box with corners (x1, y1) and (x2, y2) in the new coordinate system is computed as `width = x2 - x1` and `height = y2 - y1`. + In MMDetection 1.x and previous version, a "+ 1" was added both height and width. + This modification are in three folds: + + 1. Box transformation and encoding/decoding in regression. + 2. IoU calculation. This affects the matching process between ground truth and bounding box and the NMS process. The effect to compatibility is very negligible, though. + 3. The corners of bounding box is in float type and no longer quantized. This should provide more accurate bounding box results. This also makes the bounding box and RoIs not required to have minimum size of 1, whose effect is small, though. + +- The anchors are center-aligned to feature grid points and in float type. + In MMDetection 1.x and previous version, the anchors are in `int` type and not center-aligned. + This affects the anchor generation in RPN and all the anchor-based methods. + +- ROIAlign is better aligned with the image coordinate system. The new implementation is adopted from [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/detectron2/layers/csrc/ROIAlign). + The RoIs are shifted by half a pixel by default when they are used to cropping RoI features, compared to MMDetection 1.x. + The old behavior is still available by setting `aligned=False` instead of `aligned=True`. + +- Mask cropping and pasting are more accurate. + + 1. We use the new RoIAlign to crop mask targets. In MMDetection 1.x, the bounding box is quantized before it is used to crop mask target, and the crop process is implemented by numpy. In new implementation, the bounding box for crop is not quantized and sent to RoIAlign. This implementation accelerates the training speed by a large margin (~0.1s per iter, ~2 hour when training Mask R50 for 1x schedule) and should be more accurate. + + 2. In MMDetection 2.0, the "`paste_mask()`" function is different and should be more accurate than those in previous versions. This change follows the modification in [Detectron2](https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/masks.py) and can improve mask AP on COCO by ~0.5% absolute. + +### Codebase Conventions + +- MMDetection 2.0 changes the order of class labels to reduce unused parameters in regression and mask branch more naturally (without +1 and -1). + This effect all the classification layers of the model to have a different ordering of class labels. The final layers of regression branch and mask head no longer keep K+1 channels for K categories, and their class orders are consistent with the classification branch. + + - In MMDetection 2.0, label "K" means background, and labels \[0, K-1\] correspond to the K = num_categories object categories. + + - In MMDetection 1.x and previous version, label "0" means background, and labels \[1, K\] correspond to the K categories. + + - **Note**: The class order of softmax RPN is still the same as that in 1.x in versions\<=2.4.0 while sigmoid RPN is not affected. The class orders in all heads are unified since MMDetection v2.5.0. + +- Low quality matching in R-CNN is not used. In MMDetection 1.x and previous versions, the `max_iou_assigner` will match low quality boxes for each ground truth box in both RPN and R-CNN training. We observe this sometimes does not assign the most perfect GT box to some bounding boxes, + thus MMDetection 2.0 do not allow low quality matching by default in R-CNN training in the new system. This sometimes may slightly improve the box AP (~0.1% absolute). + +- Separate scale factors for width and height. In MMDetection 1.x and previous versions, the scale factor is a single float in mode `keep_ratio=True`. This is slightly inaccurate because the scale factors for width and height have slight difference. MMDetection 2.0 adopts separate scale factors for width and height, the improvement on AP ~0.1% absolute. + +- Configs name conventions are changed. MMDetection V2.0 adopts the new name convention to maintain the gradually growing model zoo as the following: + + ```shell + [model]_(model setting)_[backbone]_[neck]_(norm setting)_(misc)_(gpu x batch)_[schedule]_[dataset].py, + ``` + + where the (`misc`) includes DCN and GCBlock, etc. More details are illustrated in the [documentation for config](tutorials/config) + +- MMDetection V2.0 uses new ResNet Caffe backbones to reduce warnings when loading pre-trained models. Most of the new backbones' weights are the same as the former ones but do not have `conv.bias`, except that they use a different `img_norm_cfg`. Thus, the new backbone will not cause warning of unexpected keys. + +### Training Hyperparameters + +The change in training hyperparameters does not affect +model-level compatibility but slightly improves the performance. The major ones are: + +- The number of proposals after nms is changed from 2000 to 1000 by setting `nms_post=1000` and `max_num=1000`. + This slightly improves both mask AP and bbox AP by ~0.2% absolute. + +- The default box regression losses for Mask R-CNN, Faster R-CNN and RetinaNet are changed from smooth L1 Loss to L1 loss. This leads to an overall improvement in box AP (~0.6% absolute). However, using L1-loss for other methods such as Cascade R-CNN and HTC does not improve the performance, so we keep the original settings for these methods. + +- The sample num of RoIAlign layer is set to be 0 for simplicity. This leads to slightly improvement on mask AP (~0.2% absolute). + +- The default setting does not use gradient clipping anymore during training for faster training speed. This does not degrade performance of the most of models. For some models such as RepPoints we keep using gradient clipping to stabilize the training process and to obtain better performance. + +- The default warmup ratio is changed from 1/3 to 0.001 for a more smooth warming up process since the gradient clipping is usually not used. The effect is found negligible during our re-benchmarking, though. + +### Upgrade Models from 1.x to 2.0 + +To convert the models trained by MMDetection V1.x to MMDetection V2.0, the users can use the script `tools/model_converters/upgrade_model_version.py` to convert +their models. The converted models can be run in MMDetection V2.0 with slightly dropped performance (less than 1% AP absolute). +Details can be found in `configs/legacy`. + +## pycocotools compatibility + +`mmpycocotools` is the OpenMMlab's fork of official `pycocotools`, which works for both MMDetection and Detectron2. +Before [PR 4939](https://github.com/open-mmlab/mmdetection/pull/4939), since `pycocotools` and `mmpycocotool` have the same package name, if users already installed `pycocotools` (installed Detectron2 first under the same environment), then the setup of MMDetection will skip installing `mmpycocotool`. Thus MMDetection fails due to the missing `mmpycocotools`. +If MMDetection is installed before Detectron2, they could work under the same environment. +[PR 4939](https://github.com/open-mmlab/mmdetection/pull/4939) deprecates mmpycocotools in favor of official pycocotools. +Users may install MMDetection and Detectron2 under the same environment after [PR 4939](https://github.com/open-mmlab/mmdetection/pull/4939), no matter what the installation order is. diff --git a/downstream/mmdetection/docs/en/conf.py b/downstream/mmdetection/docs/en/conf.py new file mode 100644 index 0000000..e902e3f --- /dev/null +++ b/downstream/mmdetection/docs/en/conf.py @@ -0,0 +1,116 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys + +import pytorch_sphinx_theme + +sys.path.insert(0, os.path.abspath('../..')) + +# -- Project information ----------------------------------------------------- + +project = 'MMDetection' +copyright = '2018-2021, OpenMMLab' +author = 'MMDetection Authors' +version_file = '../../mmdet/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +# The full version, including alpha/beta/rc tags +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'myst_parser', + 'sphinx_markdown_tables', + 'sphinx_copybutton', +] + +myst_enable_extensions = ['colon_fence'] +myst_heading_anchors = 3 + +autodoc_mock_imports = [ + 'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops' +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +# The master toctree document. +master_doc = 'index' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +# html_theme = 'sphinx_rtd_theme' +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] + +html_theme_options = { + 'menu': [ + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmdetection' + }, + ], + # Specify the language of shared menu + 'menu_lang': + 'en' +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] + +# -- Extension configuration ------------------------------------------------- +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True + + +def builder_inited_handler(app): + subprocess.run(['./stat.py']) + + +def setup(app): + app.connect('builder-inited', builder_inited_handler) diff --git a/downstream/mmdetection/docs/en/conventions.md b/downstream/mmdetection/docs/en/conventions.md new file mode 100644 index 0000000..97e5fd0 --- /dev/null +++ b/downstream/mmdetection/docs/en/conventions.md @@ -0,0 +1,78 @@ +# Conventions + +Please check the following conventions if you would like to modify MMDetection as your own project. + +## Loss + +In MMDetection, a `dict` containing losses and metrics will be returned by `model(**data)`. + +For example, in bbox head, + +```python +class BBoxHead(nn.Module): + ... + def loss(self, ...): + losses = dict() + # classification loss + losses['loss_cls'] = self.loss_cls(...) + # classification accuracy + losses['acc'] = accuracy(...) + # bbox regression loss + losses['loss_bbox'] = self.loss_bbox(...) + return losses +``` + +`bbox_head.loss()` will be called during model forward. +The returned dict contains `'loss_bbox'`, `'loss_cls'`, `'acc'` . +Only `'loss_bbox'`, `'loss_cls'` will be used during back propagation, +`'acc'` will only be used as a metric to monitor training process. + +By default, only values whose keys contain `'loss'` will be back propagated. +This behavior could be changed by modifying `BaseDetector.train_step()`. + +## Empty Proposals + +In MMDetection, We have added special handling and unit test for empty proposals of two-stage. We need to deal with the empty proposals of the entire batch and single image at the same time. For example, in CascadeRoIHead, + +```python +# simple_test method +... +# There is no proposal in the whole batch +if rois.shape[0] == 0: + bbox_results = [[ + np.zeros((0, 5), dtype=np.float32) + for _ in range(self.bbox_head[-1].num_classes) + ]] * num_imgs + if self.with_mask: + mask_classes = self.mask_head[-1].num_classes + segm_results = [[[] for _ in range(mask_classes)] + for _ in range(num_imgs)] + results = list(zip(bbox_results, segm_results)) + else: + results = bbox_results + return results +... + +# There is no proposal in the single image +for i in range(self.num_stages): + ... + if i < self.num_stages - 1: + for j in range(num_imgs): + # Handle empty proposal + if rois[j].shape[0] > 0: + bbox_label = cls_score[j][:, :-1].argmax(dim=1) + refine_roi = self.bbox_head[i].regress_by_class( + rois[j], bbox_label, bbox_pred[j], img_metas[j]) + refine_roi_list.append(refine_roi) +``` + +If you have customized `RoIHead`, you can refer to the above method to deal with empty proposals. + +## Coco Panoptic Dataset + +In MMDetection, we have supported COCO Panoptic dataset. We clarify a few conventions about the implementation of `CocoPanopticDataset` here. + +1. For mmdet\<=2.16.0, the range of foreground and background labels in semantic segmentation are different from the default setting of MMDetection. The label `0` stands for `VOID` label and the category labels start from `1`. + Since mmdet=2.17.0, the category labels of semantic segmentation start from `0` and label `255` stands for `VOID` for consistency with labels of bounding boxes. + To achieve that, the `Pad` pipeline supports setting the padding value for `seg`. +2. In the evaluation, the panoptic result is a map with the same shape as the original image. Each value in the result map has the format of `instance_id * INSTANCE_OFFSET + category_id`. diff --git a/downstream/mmdetection/docs/en/faq.md b/downstream/mmdetection/docs/en/faq.md new file mode 100644 index 0000000..a36419f --- /dev/null +++ b/downstream/mmdetection/docs/en/faq.md @@ -0,0 +1,232 @@ +# Frequently Asked Questions + +We list some common troubles faced by many users and their corresponding solutions here. Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them. If the contents here do not cover your issue, please create an issue using the [provided templates](https://github.com/open-mmlab/mmdetection/blob/master/.github/ISSUE_TEMPLATE/error-report.md/) and make sure you fill in all required information in the template. + +## Installation + +- Compatibility issue between MMCV and MMDetection; "ConvWS is already registered in conv layer"; "AssertionError: MMCV==xxx is used but incompatible. Please install mmcv>=xxx, \<=xxx." + + Compatible MMDetection and MMCV versions are shown as below. Please choose the correct version of MMCV to avoid installation issues. + +| MMDetection version | MMCV version | +| :-----------------: | :------------------------: | +| master | mmcv-full>=1.3.17, \<1.6.0 | +| 2.25.0 | mmcv-full>=1.3.17, \<1.6.0 | +| 2.24.1 | mmcv-full>=1.3.17, \<1.6.0 | +| 2.24.0 | mmcv-full>=1.3.17, \<1.6.0 | +| 2.23.0 | mmcv-full>=1.3.17, \<1.5.0 | +| 2.22.0 | mmcv-full>=1.3.17, \<1.5.0 | +| 2.21.0 | mmcv-full>=1.3.17, \<1.5.0 | +| 2.20.0 | mmcv-full>=1.3.17, \<1.5.0 | +| 2.19.1 | mmcv-full>=1.3.17, \<1.5.0 | +| 2.19.0 | mmcv-full>=1.3.17, \<1.5.0 | +| 2.18.0 | mmcv-full>=1.3.17, \<1.4.0 | +| 2.17.0 | mmcv-full>=1.3.14, \<1.4.0 | +| 2.16.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 2.15.1 | mmcv-full>=1.3.8, \<1.4.0 | +| 2.15.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 2.14.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 2.13.0 | mmcv-full>=1.3.3, \<1.4.0 | +| 2.12.0 | mmcv-full>=1.3.3, \<1.4.0 | +| 2.11.0 | mmcv-full>=1.2.4, \<1.4.0 | +| 2.10.0 | mmcv-full>=1.2.4, \<1.4.0 | +| 2.9.0 | mmcv-full>=1.2.4, \<1.4.0 | +| 2.8.0 | mmcv-full>=1.2.4, \<1.4.0 | +| 2.7.0 | mmcv-full>=1.1.5, \<1.4.0 | +| 2.6.0 | mmcv-full>=1.1.5, \<1.4.0 | +| 2.5.0 | mmcv-full>=1.1.5, \<1.4.0 | +| 2.4.0 | mmcv-full>=1.1.1, \<1.4.0 | +| 2.3.0 | mmcv-full==1.0.5 | +| 2.3.0rc0 | mmcv-full>=1.0.2 | +| 2.2.1 | mmcv==0.6.2 | +| 2.2.0 | mmcv==0.6.2 | +| 2.1.0 | mmcv>=0.5.9, \<=0.6.1 | +| 2.0.0 | mmcv>=0.5.1, \<=0.5.8 | + +- "No module named 'mmcv.ops'"; "No module named 'mmcv.\_ext'". + + 1. Uninstall existing mmcv in the environment using `pip uninstall mmcv`. + 2. Install mmcv-full following the [installation instruction](get_started#best-practices). + +- Using albumentations + + If you would like to use `albumentations`, we suggest using `pip install -r requirements/albu.txt` or + `pip install -U albumentations --no-binary qudida,albumentations`. + If you simply use `pip install albumentations>=0.3.2`, it will install `opencv-python-headless` simultaneously (even though you have already installed `opencv-python`). + Please refer to the [official documentation](https://albumentations.ai/docs/getting_started/installation/#note-on-opencv-dependencies) for details. + +- ModuleNotFoundError is raised when using some algorithms + + Some extra dependencies are required for Instaboost, Panoptic Segmentation, LVIS dataset, etc. Please note the error message and install corresponding packages, e.g., + + ```shell + # for instaboost + pip install instaboostfast + # for panoptic segmentation + pip install git+https://github.com/cocodataset/panopticapi.git + # for LVIS dataset + pip install git+https://github.com/lvis-dataset/lvis-api.git + ``` + +## Coding + +- Do I need to reinstall mmdet after some code modifications + + If you follow the best practice and install mmdet with `pip install -e .`, any local modifications made to the code will take effect without reinstallation. + +- How to develop with multiple MMDetection versions + + You can have multiple folders like mmdet-2.21, mmdet-2.22. + When you run the train or test script, it will adopt the mmdet package in the current folder. + + To use the default MMDetection installed in the environment rather than the one you are working with, you can remove the following line in those scripts: + + ```shell + PYTHONPATH="$(dirname $0)/..":$PYTHONPATH + ``` + +## PyTorch/CUDA Environment + +- "RTX 30 series card fails when building MMCV or MMDet" + + 1. Temporary work-around: do `MMCV_WITH_OPS=1 MMCV_CUDA_ARGS='-gencode=arch=compute_80,code=sm_80' pip install -e .`. + The common issue is `nvcc fatal : Unsupported gpu architecture 'compute_86'`. This means that the compiler should optimize for sm_86, i.e., nvidia 30 series card, but such optimizations have not been supported by CUDA toolkit 11.0. + This work-around modifies the compile flag by adding `MMCV_CUDA_ARGS='-gencode=arch=compute_80,code=sm_80'`, which tells `nvcc` to optimize for **sm_80**, i.e., Nvidia A100. Although A100 is different from the 30 series card, they use similar ampere architecture. This may hurt the performance but it works. + 2. PyTorch developers have updated that the default compiler flags should be fixed by [pytorch/pytorch#47585](https://github.com/pytorch/pytorch/pull/47585). So using PyTorch-nightly may also be able to solve the problem, though we have not tested it yet. + +- "invalid device function" or "no kernel image is available for execution". + + 1. Check if your cuda runtime version (under `/usr/local/`), `nvcc --version` and `conda list cudatoolkit` version match. + 2. Run `python mmdet/utils/collect_env.py` to check whether PyTorch, torchvision, and MMCV are built for the correct GPU architecture. + You may need to set `TORCH_CUDA_ARCH_LIST` to reinstall MMCV. + The GPU arch table could be found [here](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list), + i.e. run `TORCH_CUDA_ARCH_LIST=7.0 pip install mmcv-full` to build MMCV for Volta GPUs. + The compatibility issue could happen when using old GPUS, e.g., Tesla K80 (3.7) on colab. + 3. Check whether the running environment is the same as that when mmcv/mmdet has compiled. + For example, you may compile mmcv using CUDA 10.0 but run it on CUDA 9.0 environments. + +- "undefined symbol" or "cannot open xxx.so". + + 1. If those symbols are CUDA/C++ symbols (e.g., libcudart.so or GLIBCXX), check whether the CUDA/GCC runtimes are the same as those used for compiling mmcv, + i.e. run `python mmdet/utils/collect_env.py` to see if `"MMCV Compiler"`/`"MMCV CUDA Compiler"` is the same as `"GCC"`/`"CUDA_HOME"`. + 2. If those symbols are PyTorch symbols (e.g., symbols containing caffe, aten, and TH), check whether the PyTorch version is the same as that used for compiling mmcv. + 3. Run `python mmdet/utils/collect_env.py` to check whether PyTorch, torchvision, and MMCV are built by and running on the same environment. + +- setuptools.sandbox.UnpickleableException: DistutilsSetupError("each element of 'ext_modules' option must be an Extension instance or 2-tuple") + + 1. If you are using miniconda rather than anaconda, check whether Cython is installed as indicated in [#3379](https://github.com/open-mmlab/mmdetection/issues/3379). + You need to manually install Cython first and then run command `pip install -r requirements.txt`. + 2. You may also need to check the compatibility between the `setuptools`, `Cython`, and `PyTorch` in your environment. + +- "Segmentation fault". + + 1. Check you GCC version and use GCC 5.4. This usually caused by the incompatibility between PyTorch and the environment (e.g., GCC \< 4.9 for PyTorch). We also recommend the users to avoid using GCC 5.5 because many feedbacks report that GCC 5.5 will cause "segmentation fault" and simply changing it to GCC 5.4 could solve the problem. + + 2. Check whether PyTorch is correctly installed and could use CUDA op, e.g. type the following command in your terminal. + + ```shell + python -c 'import torch; print(torch.cuda.is_available())' + ``` + + And see whether they could correctly output results. + + 3. If Pytorch is correctly installed, check whether MMCV is correctly installed. + + ```shell + python -c 'import mmcv; import mmcv.ops' + ``` + + If MMCV is correctly installed, then there will be no issue of the above two commands. + + 4. If MMCV and Pytorch is correctly installed, you man use `ipdb`, `pdb` to set breakpoints or directly add 'print' in mmdetection code and see which part leads the segmentation fault. + +## Training + +- "Loss goes Nan" + + 1. Check if the dataset annotations are valid: zero-size bounding boxes will cause the regression loss to be Nan due to the commonly used transformation for box regression. Some small size (width or height are smaller than 1) boxes will also cause this problem after data augmentation (e.g., instaboost). So check the data and try to filter out those zero-size boxes and skip some risky augmentations on the small-size boxes when you face the problem. + 2. Reduce the learning rate: the learning rate might be too large due to some reasons, e.g., change of batch size. You can rescale them to the value that could stably train the model. + 3. Extend the warmup iterations: some models are sensitive to the learning rate at the start of the training. You can extend the warmup iterations, e.g., change the `warmup_iters` from 500 to 1000 or 2000. + 4. Add gradient clipping: some models requires gradient clipping to stabilize the training process. The default of `grad_clip` is `None`, you can add gradient clippint to avoid gradients that are too large, i.e., set `optimizer_config=dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))` in your config file. If your config does not inherits from any basic config that contains `optimizer_config=dict(grad_clip=None)`, you can simply add `optimizer_config=dict(grad_clip=dict(max_norm=35, norm_type=2))`. + +- "GPU out of memory" + + 1. There are some scenarios when there are large amount of ground truth boxes, which may cause OOM during target assignment. You can set `gpu_assign_thr=N` in the config of assigner thus the assigner will calculate box overlaps through CPU when there are more than N GT boxes. + + 2. Set `with_cp=True` in the backbone. This uses the sublinear strategy in PyTorch to reduce GPU memory cost in the backbone. + + 3. Try mixed precision training using following the examples in `config/fp16`. The `loss_scale` might need further tuning for different models. + + 4. Try to use `AvoidCUDAOOM` to avoid GPU out of memory. It will first retry after calling `torch.cuda.empty_cache()`. If it still fails, it will then retry by converting the type of inputs to FP16 format. If it still fails, it will try to copy inputs from GPUs to CPUs to continue computing. Try AvoidOOM in you code to make the code continue to run when GPU memory runs out: + + ```python + from mmdet.utils import AvoidCUDAOOM + + output = AvoidCUDAOOM.retry_if_cuda_oom(some_function)(input1, input2) + ``` + + You can also try `AvoidCUDAOOM` as a decorator to make the code continue to run when GPU memory runs out: + + ```python + from mmdet.utils import AvoidCUDAOOM + + @AvoidCUDAOOM.retry_if_cuda_oom + def function(*args, **kwargs): + ... + return xxx + ``` + +- "RuntimeError: Expected to have finished reduction in the prior iteration before starting a new one" + + 1. This error indicates that your module has parameters that were not used in producing loss. This phenomenon may be caused by running different branches in your code in DDP mode. + 2. You can set `find_unused_parameters = True` in the config to solve the above problems(but this will slow down the training speed. + 3. If the version of your MMCV >= 1.4.1, you can get the name of those unused parameters with `detect_anomalous_params=True` in `optimizer_config` of config. + +- Save the best model + + It can be turned on by configuring `evaluation = dict(save_best=‘auto’)`. In the case of the `auto` parameter, the first key in the returned evaluation result will be used as the basis for selecting the best model. You can also directly set the key in the evaluation result to manually set it, for example, `evaluation = dict(save_best='mAP' )`. + +- Resume training with `ExpMomentumEMAHook` + + If you use `ExpMomentumEMAHook` in training, you can't just use command line parameters `--resume-from` nor `--cfg-options resume_from` to restore model parameters during resume, i.e., the command `python tools/train.py configs/yolox/yolox_s_8x8_300e_coco.py --resume-from ./work_dir/yolox_s_8x8_300e_coco/epoch_x.pth` will not work. Since `ExpMomentumEMAHook` needs to reload the weights, taking the `yolox_s` algorithm as an example, you should modify the values of `resume_from` in two places of the config as below: + + ```python + # Open configs/yolox/yolox_s_8x8_300e_coco.py directly and modify all resume_from fields + resume_from=./work_dir/yolox_s_8x8_300e_coco/epoch_x.pth + custom_hooks=[... + dict( + type='ExpMomentumEMAHook', + resume_from=./work_dir/yolox_s_8x8_300e_coco/epoch_x.pth, + momentum=0.0001, + priority=49) + ] + ``` + +## Evaluation + +- COCO Dataset, AP or AR = -1 + 1. According to the definition of COCO dataset, the small and medium areas in an image are less than 1024 (32\*32), 9216 (96\*96), respectively. + 2. If the corresponding area has no object, the result of AP and AR will set to -1. + +## Model + +- `style` in ResNet + + The `style` parameter in ResNet allows either `pytorch` or `caffe` style. It indicates the difference in the Bottleneck module. Bottleneck is a stacking structure of `1x1-3x3-1x1` convolutional layers. In the case of `caffe` mode, the convolution layer with `stride=2` is the first `1x1` convolution, while in `pyorch` mode, it is the second `3x3` convolution has `stride=2`. A sample code is as below: + + ```python + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + ``` + +- ResNeXt parameter description + + ResNeXt comes from the paper [`Aggregated Residual Transformations for Deep Neural Networks`](https://arxiv.org/abs/1611.05431). It introduces group and uses “cardinality” to control the number of groups to achieve a balance between accuracy and complexity. It controls the basic width and grouping parameters of the internal Bottleneck module through two hyperparameters `baseWidth` and `cardinality`. An example configuration name in MMDetection is `mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py`, where `mask_rcnn` represents the algorithm using Mask R-CNN, `x101` represents the backbone network using ResNeXt-101, and `64x4d` represents that the bottleneck block has 64 group and each group has basic width of 4. + +- `norm_eval` in backbone + + Since the detection model is usually large and the input image resolution is high, this will result in a small batch of the detection model, which will make the variance of the statistics calculated by BatchNorm during the training process very large and not as stable as the statistics obtained during the pre-training of the backbone network . Therefore, the `norm_eval=True` mode is generally used in training, and the BatchNorm statistics in the pre-trained backbone network are directly used. The few algorithms that use large batches are the `norm_eval=False` mode, such as NASFPN. For the backbone network without ImageNet pre-training and the batch is relatively small, you can consider using `SyncBN`. diff --git a/downstream/mmdetection/docs/en/get_started.md b/downstream/mmdetection/docs/en/get_started.md new file mode 100644 index 0000000..79e0df5 --- /dev/null +++ b/downstream/mmdetection/docs/en/get_started.md @@ -0,0 +1,208 @@ +# Prerequisites + +In this section we demonstrate how to prepare an environment with PyTorch. + +MMDetection works on Linux, Windows and macOS. It requires Python 3.6+, CUDA 9.2+ and PyTorch 1.5+. + +```{note} +If you are experienced with PyTorch and have already installed it, just skip this part and jump to the [next section](#installation). Otherwise, you can follow these steps for the preparation. +``` + +**Step 0.** Download and install Miniconda from the [official website](https://docs.conda.io/en/latest/miniconda.html). + +**Step 1.** Create a conda environment and activate it. + +```shell +conda create --name openmmlab python=3.8 -y +conda activate openmmlab +``` + +**Step 2.** Install PyTorch following [official instructions](https://pytorch.org/get-started/locally/), e.g. + +On GPU platforms: + +```shell +conda install pytorch torchvision -c pytorch +``` + +On CPU platforms: + +```shell +conda install pytorch torchvision cpuonly -c pytorch +``` + +# Installation + +We recommend that users follow our best practices to install MMDetection. However, the whole process is highly customizable. See [Customize Installation](#customize-installation) section for more information. + +## Best Practices + +**Step 0.** Install [MMCV](https://github.com/open-mmlab/mmcv) using [MIM](https://github.com/open-mmlab/mim). + +```shell +pip install -U openmim +mim install mmcv-full +``` + +**Step 1.** Install MMDetection. + +Case a: If you develop and run mmdet directly, install it from source: + +```shell +git clone https://github.com/open-mmlab/mmdetection.git +cd mmdetection +pip install -v -e . +# "-v" means verbose, or more output +# "-e" means installing a project in editable mode, +# thus any local modifications made to the code will take effect without reinstallation. +``` + +Case b: If you use mmdet as a dependency or third-party package, install it with pip: + +```shell +pip install mmdet +``` + +## Verify the installation + +To verify whether MMDetection is installed correctly, we provide some sample codes to run an inference demo. + +**Step 1.** We need to download config and checkpoint files. + +```shell +mim download mmdet --config yolov3_mobilenetv2_320_300e_coco --dest . +``` + +The downloading will take several seconds or more, depending on your network environment. When it is done, you will find two files `yolov3_mobilenetv2_320_300e_coco.py` and `yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth` in your current folder. + +**Step 2.** Verify the inference demo. + +Option (a). If you install mmdetection from source, just run the following command. + +```shell +python demo/image_demo.py demo/demo.jpg yolov3_mobilenetv2_320_300e_coco.py yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth --device cpu --out-file result.jpg +``` + +You will see a new image `result.jpg` on your current folder, where bounding boxes are plotted on cars, benches, etc. + +Option (b). If you install mmdetection with pip, open you python interpreter and copy&paste the following codes. + +```python +from mmdet.apis import init_detector, inference_detector + +config_file = 'yolov3_mobilenetv2_320_300e_coco.py' +checkpoint_file = 'yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth' +model = init_detector(config_file, checkpoint_file, device='cpu') # or device='cuda:0' +inference_detector(model, 'demo/cat.jpg') +``` + +You will see a list of arrays printed, indicating the detected bounding boxes. + +## Customize Installation + +### CUDA versions + +When installing PyTorch, you need to specify the version of CUDA. If you are not clear on which to choose, follow our recommendations: + +- For Ampere-based NVIDIA GPUs, such as GeForce 30 series and NVIDIA A100, CUDA 11 is a must. +- For older NVIDIA GPUs, CUDA 11 is backward compatible, but CUDA 10.2 offers better compatibility and is more lightweight. + +Please make sure the GPU driver satisfies the minimum version requirements. See [this table](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions) for more information. + +```{note} +Installing CUDA runtime libraries is enough if you follow our best practices, because no CUDA code will be compiled locally. However if you hope to compile MMCV from source or develop other CUDA operators, you need to install the complete CUDA toolkit from NVIDIA's [website](https://developer.nvidia.com/cuda-downloads), and its version should match the CUDA version of PyTorch. i.e., the specified version of cudatoolkit in `conda install` command. +``` + +### Install MMCV without MIM + +MMCV contains C++ and CUDA extensions, thus depending on PyTorch in a complex way. MIM solves such dependencies automatically and makes the installation easier. However, it is not a must. + +To install MMCV with pip instead of MIM, please follow [MMCV installation guides](https://mmcv.readthedocs.io/en/latest/get_started/installation.html). This requires manually specifying a find-url based on PyTorch version and its CUDA version. + +For example, the following command install mmcv-full built for PyTorch 1.10.x and CUDA 11.3. + +```shell +pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10/index.html +``` + +### Install on CPU-only platforms + +MMDetection can be built for CPU only environment. In CPU mode you can train (requires MMCV version >= 1.4.4), test or inference a model. + +However some functionalities are gone in this mode: + +- Deformable Convolution +- Modulated Deformable Convolution +- ROI pooling +- Deformable ROI pooling +- CARAFE +- SyncBatchNorm +- CrissCrossAttention +- MaskedConv2d +- Temporal Interlace Shift +- nms_cuda +- sigmoid_focal_loss_cuda +- bbox_overlaps + +If you try to train/test/inference a model containing above ops, an error will be raised. +The following table lists affected algorithms. + +| Operator | Model | +| :-----------------------------------------------------: | :-------------------------------------------------------------------------------: | +| Deformable Convolution/Modulated Deformable Convolution | DCN、Guided Anchoring、RepPoints、CentripetalNet、VFNet、CascadeRPN、NAS-FCOS、DetectoRS | +| MaskedConv2d | Guided Anchoring | +| CARAFE | CARAFE | +| SyncBatchNorm | ResNeSt | + +### Install on Google Colab + +[Google Colab](https://research.google.com/) usually has PyTorch installed, +thus we only need to install MMCV and MMDetection with the following commands. + +**Step 1.** Install [MMCV](https://github.com/open-mmlab/mmcv) using [MIM](https://github.com/open-mmlab/mim). + +```shell +!pip3 install openmim +!mim install mmcv-full +``` + +**Step 2.** Install MMDetection from the source. + +```shell +!git clone https://github.com/open-mmlab/mmdetection.git +%cd mmdetection +!pip install -e . +``` + +**Step 3.** Verification. + +```python +import mmdet +print(mmdet.__version__) +# Example output: 2.23.0 +``` + +```{note} +Within Jupyter, the exclamation mark `!` is used to call external executables and `%cd` is a [magic command](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-cd) to change the current working directory of Python. +``` + +### Using MMDetection with Docker + +We provide a [Dockerfile](https://github.com/open-mmlab/mmdetection/blob/master/docker/Dockerfile) to build an image. Ensure that your [docker version](https://docs.docker.com/engine/install/) >=19.03. + +```shell +# build an image with PyTorch 1.6, CUDA 10.1 +# If you prefer other versions, just modified the Dockerfile +docker build -t mmdetection docker/ +``` + +Run it with + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmdetection/data mmdetection +``` + +## Trouble shooting + +If you have some issues during the installation, please first view the [FAQ](faq.md) page. +You may [open an issue](https://github.com/open-mmlab/mmdetection/issues/new/choose) on GitHub if no solution is found. diff --git a/downstream/mmdetection/docs/en/index.rst b/downstream/mmdetection/docs/en/index.rst new file mode 100644 index 0000000..c3406da --- /dev/null +++ b/downstream/mmdetection/docs/en/index.rst @@ -0,0 +1,57 @@ +Welcome to MMDetection's documentation! +======================================= + +.. toctree:: + :maxdepth: 2 + :caption: Get Started + + get_started.md + modelzoo_statistics.md + model_zoo.md + +.. toctree:: + :maxdepth: 2 + :caption: Quick Run + + 1_exist_data_model.md + 2_new_data_model.md + 3_exist_data_new_model.md + +.. toctree:: + :maxdepth: 2 + :caption: Tutorials + + tutorials/index.rst + +.. toctree:: + :maxdepth: 2 + :caption: Useful Tools and Scripts + + useful_tools.md + +.. toctree:: + :maxdepth: 2 + :caption: Notes + + conventions.md + compatibility.md + projects.md + changelog.md + faq.md + +.. toctree:: + :caption: Switch Language + + switch_language.md + +.. toctree:: + :maxdepth: 1 + :caption: API Reference + + api.rst + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/downstream/mmdetection/docs/en/make.bat b/downstream/mmdetection/docs/en/make.bat new file mode 100644 index 0000000..922152e --- /dev/null +++ b/downstream/mmdetection/docs/en/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/downstream/mmdetection/docs/en/model_zoo.md b/downstream/mmdetection/docs/en/model_zoo.md new file mode 100644 index 0000000..fcacdb0 --- /dev/null +++ b/downstream/mmdetection/docs/en/model_zoo.md @@ -0,0 +1,358 @@ +# Benchmark and Model Zoo + +## Mirror sites + +We only use aliyun to maintain the model zoo since MMDetection V2.0. The model zoo of V1.x has been deprecated. + +## Common settings + +- All models were trained on `coco_2017_train`, and tested on the `coco_2017_val`. +- We use distributed training. +- All pytorch-style pretrained backbones on ImageNet are from PyTorch model zoo, caffe-style pretrained backbones are converted from the newly released model from detectron2. +- For fair comparison with other codebases, we report the GPU memory as the maximum value of `torch.cuda.max_memory_allocated()` for all 8 GPUs. Note that this value is usually less than what `nvidia-smi` shows. +- We report the inference time as the total time of network forwarding and post-processing, excluding the data loading time. Results are obtained with the script [benchmark.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/benchmark.py) which computes the average time on 2000 images. + +## ImageNet Pretrained Models + +It is common to initialize from backbone models pre-trained on ImageNet classification task. All pre-trained model links can be found at [open_mmlab](https://github.com/open-mmlab/mmcv/blob/master/mmcv/model_zoo/open_mmlab.json). According to `img_norm_cfg` and source of weight, we can divide all the ImageNet pre-trained model weights into some cases: + +- TorchVision: Corresponding to torchvision weight, including ResNet50, ResNet101. The `img_norm_cfg` is `dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)`. +- Pycls: Corresponding to [pycls](https://github.com/facebookresearch/pycls) weight, including RegNetX. The `img_norm_cfg` is `dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)`. +- MSRA styles: Corresponding to [MSRA](https://github.com/KaimingHe/deep-residual-networks) weights, including ResNet50_Caffe and ResNet101_Caffe. The `img_norm_cfg` is `dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)`. +- Caffe2 styles: Currently only contains ResNext101_32x8d. The `img_norm_cfg` is `dict(mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False)`. +- Other styles: E.g SSD which corresponds to `img_norm_cfg` is `dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)` and YOLOv3 which corresponds to `img_norm_cfg` is `dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)`. + +The detailed table of the commonly used backbone models in MMDetection is listed below : + +| model | source | link | description | +| ---------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| ResNet50 | TorchVision | [torchvision's ResNet-50](https://download.pytorch.org/models/resnet50-19c8e357.pth) | From [torchvision's ResNet-50](https://download.pytorch.org/models/resnet50-19c8e357.pth). | +| ResNet101 | TorchVision | [torchvision's ResNet-101](https://download.pytorch.org/models/resnet101-5d3b4d8f.pth) | From [torchvision's ResNet-101](https://download.pytorch.org/models/resnet101-5d3b4d8f.pth). | +| RegNetX | Pycls | [RegNetX_3.2gf](https://download.openmmlab.com/pretrain/third_party/regnetx_3.2gf-c2599b0f.pth), [RegNetX_800mf](https://download.openmmlab.com/pretrain/third_party/regnetx_800mf-1f4be4c7.pth). etc. | From [pycls](https://github.com/facebookresearch/pycls). | +| ResNet50_Caffe | MSRA | [MSRA's ResNet-50](https://download.openmmlab.com/pretrain/third_party/resnet50_caffe-788b5fa3.pth) | Converted copy of [Detectron2's R-50.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-50.pkl) model. The original weight comes from [MSRA's original ResNet-50](https://github.com/KaimingHe/deep-residual-networks). | +| ResNet101_Caffe | MSRA | [MSRA's ResNet-101](https://download.openmmlab.com/pretrain/third_party/resnet101_caffe-3ad79236.pth) | Converted copy of [Detectron2's R-101.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-101.pkl) model. The original weight comes from [MSRA's original ResNet-101](https://github.com/KaimingHe/deep-residual-networks). | +| ResNext101_32x8d | Caffe2 | [Caffe2 ResNext101_32x8d](https://download.openmmlab.com/pretrain/third_party/resnext101_32x8d-1516f1aa.pth) | Converted copy of [Detectron2's X-101-32x8d.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/FAIR/X-101-32x8d.pkl) model. The ResNeXt-101-32x8d model trained with Caffe2 at FB. | + +## Baselines + +### RPN + +Please refer to [RPN](https://github.com/open-mmlab/mmdetection/blob/master/configs/rpn) for details. + +### Faster R-CNN + +Please refer to [Faster R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn) for details. + +### Mask R-CNN + +Please refer to [Mask R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn) for details. + +### Fast R-CNN (with pre-computed proposals) + +Please refer to [Fast R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/fast_rcnn) for details. + +### RetinaNet + +Please refer to [RetinaNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet) for details. + +### Cascade R-CNN and Cascade Mask R-CNN + +Please refer to [Cascade R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/cascade_rcnn) for details. + +### Hybrid Task Cascade (HTC) + +Please refer to [HTC](https://github.com/open-mmlab/mmdetection/blob/master/configs/htc) for details. + +### SSD + +Please refer to [SSD](https://github.com/open-mmlab/mmdetection/blob/master/configs/ssd) for details. + +### Group Normalization (GN) + +Please refer to [Group Normalization](https://github.com/open-mmlab/mmdetection/blob/master/configs/gn) for details. + +### Weight Standardization + +Please refer to [Weight Standardization](https://github.com/open-mmlab/mmdetection/blob/master/configs/gn+ws) for details. + +### Deformable Convolution v2 + +Please refer to [Deformable Convolutional Networks](https://github.com/open-mmlab/mmdetection/blob/master/configs/dcn) for details. + +### CARAFE: Content-Aware ReAssembly of FEatures + +Please refer to [CARAFE](https://github.com/open-mmlab/mmdetection/blob/master/configs/carafe) for details. + +### Instaboost + +Please refer to [Instaboost](https://github.com/open-mmlab/mmdetection/blob/master/configs/instaboost) for details. + +### Libra R-CNN + +Please refer to [Libra R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/libra_rcnn) for details. + +### Guided Anchoring + +Please refer to [Guided Anchoring](https://github.com/open-mmlab/mmdetection/blob/master/configs/guided_anchoring) for details. + +### FCOS + +Please refer to [FCOS](https://github.com/open-mmlab/mmdetection/blob/master/configs/fcos) for details. + +### FoveaBox + +Please refer to [FoveaBox](https://github.com/open-mmlab/mmdetection/blob/master/configs/foveabox) for details. + +### RepPoints + +Please refer to [RepPoints](https://github.com/open-mmlab/mmdetection/blob/master/configs/reppoints) for details. + +### FreeAnchor + +Please refer to [FreeAnchor](https://github.com/open-mmlab/mmdetection/blob/master/configs/free_anchor) for details. + +### Grid R-CNN (plus) + +Please refer to [Grid R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/grid_rcnn) for details. + +### GHM + +Please refer to [GHM](https://github.com/open-mmlab/mmdetection/blob/master/configs/ghm) for details. + +### GCNet + +Please refer to [GCNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/gcnet) for details. + +### HRNet + +Please refer to [HRNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet) for details. + +### Mask Scoring R-CNN + +Please refer to [Mask Scoring R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/ms_rcnn) for details. + +### Train from Scratch + +Please refer to [Rethinking ImageNet Pre-training](https://github.com/open-mmlab/mmdetection/blob/master/configs/scratch) for details. + +### NAS-FPN + +Please refer to [NAS-FPN](https://github.com/open-mmlab/mmdetection/blob/master/configs/nas_fpn) for details. + +### ATSS + +Please refer to [ATSS](https://github.com/open-mmlab/mmdetection/blob/master/configs/atss) for details. + +### FSAF + +Please refer to [FSAF](https://github.com/open-mmlab/mmdetection/blob/master/configs/fsaf) for details. + +### RegNetX + +Please refer to [RegNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/regnet) for details. + +### Res2Net + +Please refer to [Res2Net](https://github.com/open-mmlab/mmdetection/blob/master/configs/res2net) for details. + +### GRoIE + +Please refer to [GRoIE](https://github.com/open-mmlab/mmdetection/blob/master/configs/groie) for details. + +### Dynamic R-CNN + +Please refer to [Dynamic R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/dynamic_rcnn) for details. + +### PointRend + +Please refer to [PointRend](https://github.com/open-mmlab/mmdetection/blob/master/configs/point_rend) for details. + +### DetectoRS + +Please refer to [DetectoRS](https://github.com/open-mmlab/mmdetection/blob/master/configs/detectors) for details. + +### Generalized Focal Loss + +Please refer to [Generalized Focal Loss](https://github.com/open-mmlab/mmdetection/blob/master/configs/gfl) for details. + +### CornerNet + +Please refer to [CornerNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/cornernet) for details. + +### YOLOv3 + +Please refer to [YOLOv3](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolo) for details. + +### PAA + +Please refer to [PAA](https://github.com/open-mmlab/mmdetection/blob/master/configs/paa) for details. + +### SABL + +Please refer to [SABL](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl) for details. + +### CentripetalNet + +Please refer to [CentripetalNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/centripetalnet) for details. + +### ResNeSt + +Please refer to [ResNeSt](https://github.com/open-mmlab/mmdetection/blob/master/configs/resnest) for details. + +### DETR + +Please refer to [DETR](https://github.com/open-mmlab/mmdetection/blob/master/configs/detr) for details. + +### Deformable DETR + +Please refer to [Deformable DETR](https://github.com/open-mmlab/mmdetection/blob/master/configs/deformable_detr) for details. + +### AutoAssign + +Please refer to [AutoAssign](https://github.com/open-mmlab/mmdetection/blob/master/configs/autoassign) for details. + +### YOLOF + +Please refer to [YOLOF](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolof) for details. + +### Seesaw Loss + +Please refer to [Seesaw Loss](https://github.com/open-mmlab/mmdetection/blob/master/configs/seesaw_loss) for details. + +### CenterNet + +Please refer to [CenterNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/centernet) for details. + +### YOLOX + +Please refer to [YOLOX](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolox) for details. + +### PVT + +Please refer to [PVT](https://github.com/open-mmlab/mmdetection/blob/master/configs/pvt) for details. + +### SOLO + +Please refer to [SOLO](https://github.com/open-mmlab/mmdetection/blob/master/configs/solo) for details. + +### QueryInst + +Please refer to [QueryInst](https://github.com/open-mmlab/mmdetection/blob/master/configs/queryinst) for details. + +### PanopticFPN + +Please refer to [PanopticFPN](https://github.com/open-mmlab/mmdetection/blob/master/configs/panoptic_fpn) for details. + +### MaskFormer + +Please refer to [MaskFormer](https://github.com/open-mmlab/mmdetection/blob/master/configs/maskformer) for details. + +### DyHead + +Please refer to [DyHead](https://github.com/open-mmlab/mmdetection/blob/master/configs/dyhead) for details. + +### Mask2Former + +Please refer to [Mask2Former](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former) for details. + +### Efficientnet + +Please refer to [Efficientnet](https://github.com/open-mmlab/mmdetection/blob/master/configs/efficientnet) for details. + +### Other datasets + +We also benchmark some methods on [PASCAL VOC](https://github.com/open-mmlab/mmdetection/blob/master/configs/pascal_voc), [Cityscapes](https://github.com/open-mmlab/mmdetection/blob/master/configs/cityscapes), [OpenImages](https://github.com/open-mmlab/mmdetection/blob/master/configs/openimages) and [WIDER FACE](https://github.com/open-mmlab/mmdetection/blob/master/configs/wider_face). + +### Pre-trained Models + +We also train [Faster R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn) and [Mask R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn) using ResNet-50 and [RegNetX-3.2G](https://github.com/open-mmlab/mmdetection/blob/master/configs/regnet) with multi-scale training and longer schedules. These models serve as strong pre-trained models for downstream tasks for convenience. + +## Speed benchmark + +### Training Speed benchmark + +We provide [analyze_logs.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/analyze_logs.py) to get average time of iteration in training. You can find examples in [Log Analysis](https://mmdetection.readthedocs.io/en/latest/useful_tools.html#log-analysis). + +We compare the training speed of Mask R-CNN with some other popular frameworks (The data is copied from [detectron2](https://github.com/facebookresearch/detectron2/blob/master/docs/notes/benchmarks.md/)). +For mmdetection, we benchmark with [mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py), which should have the same setting with [mask_rcnn_R_50_FPN_noaug_1x.yaml](https://github.com/facebookresearch/detectron2/blob/master/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml) of detectron2. +We also provide the [checkpoint](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug_compare_20200518-10127928.pth) and [training log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug_20200518_105755.log.json) for reference. The throughput is computed as the average throughput in iterations 100-500 to skip GPU warmup time. + +| Implementation | Throughput (img/s) | +| -------------------------------------------------------------------------------------- | ------------------ | +| [Detectron2](https://github.com/facebookresearch/detectron2) | 62 | +| [MMDetection](https://github.com/open-mmlab/mmdetection) | 61 | +| [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark/) | 53 | +| [tensorpack](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN) | 50 | +| [simpledet](https://github.com/TuSimple/simpledet/) | 39 | +| [Detectron](https://github.com/facebookresearch/Detectron) | 19 | +| [matterport/Mask_RCNN](https://github.com/matterport/Mask_RCNN/) | 14 | + +### Inference Speed Benchmark + +We provide [benchmark.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/benchmark.py) to benchmark the inference latency. +The script benchmarkes the model with 2000 images and calculates the average time ignoring first 5 times. You can change the output log interval (defaults: 50) by setting `LOG-INTERVAL`. + +```shell +python tools/benchmark.py ${CONFIG} ${CHECKPOINT} [--log-interval $[LOG-INTERVAL]] [--fuse-conv-bn] +``` + +The latency of all models in our model zoo is benchmarked without setting `fuse-conv-bn`, you can get a lower latency by setting it. + +## Comparison with Detectron2 + +We compare mmdetection with [Detectron2](https://github.com/facebookresearch/detectron2.git) in terms of speed and performance. +We use the commit id [185c27e](https://github.com/facebookresearch/detectron2/tree/185c27e4b4d2d4c68b5627b3765420c6d7f5a659)(30/4/2020) of detectron. +For fair comparison, we install and run both frameworks on the same machine. + +### Hardware + +- 8 NVIDIA Tesla V100 (32G) GPUs +- Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz + +### Software environment + +- Python 3.7 +- PyTorch 1.4 +- CUDA 10.1 +- CUDNN 7.6.03 +- NCCL 2.4.08 + +### Performance + +| Type | Lr schd | Detectron2 | mmdetection | Download | +| -------------------------------------------------------------------------------------------------------------------------------------- | ------- | -------------------------------------------------------------------------------------------------------------------------------------- | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Faster R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py) | 1x | [37.9](https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml) | 38.0 | [model](https://download.openmmlab.com/mmdetection/v2.0/benchmark/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-5324cff8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco_20200429_234554.log.json) | +| [Mask R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py) | 1x | [38.6 & 35.2](https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml) | 38.8 & 35.4 | [model](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco-dbecf295.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco_20200430_054239.log.json) | +| [Retinanet](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_1x_coco.py) | 1x | [36.5](https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml) | 37.0 | [model](https://download.openmmlab.com/mmdetection/v2.0/benchmark/retinanet_r50_caffe_fpn_mstrain_1x_coco/retinanet_r50_caffe_fpn_mstrain_1x_coco-586977a0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/retinanet_r50_caffe_fpn_mstrain_1x_coco/retinanet_r50_caffe_fpn_mstrain_1x_coco_20200430_014748.log.json) | + +### Training Speed + +The training speed is measure with s/iter. The lower, the better. + +| Type | Detectron2 | mmdetection | +| ------------ | ---------- | ----------- | +| Faster R-CNN | 0.210 | 0.216 | +| Mask R-CNN | 0.261 | 0.265 | +| Retinanet | 0.200 | 0.205 | + +### Inference Speed + +The inference speed is measured with fps (img/s) on a single GPU, the higher, the better. +To be consistent with Detectron2, we report the pure inference speed (without the time of data loading). +For Mask R-CNN, we exclude the time of RLE encoding in post-processing. +We also include the officially reported speed in the parentheses, which is slightly higher +than the results tested on our server due to differences of hardwares. + +| Type | Detectron2 | mmdetection | +| ------------ | ----------- | ----------- | +| Faster R-CNN | 25.6 (26.3) | 22.2 | +| Mask R-CNN | 22.5 (23.3) | 19.6 | +| Retinanet | 17.8 (18.2) | 20.6 | + +### Training memory + +| Type | Detectron2 | mmdetection | +| ------------ | ---------- | ----------- | +| Faster R-CNN | 3.0 | 3.8 | +| Mask R-CNN | 3.4 | 3.9 | +| Retinanet | 3.9 | 3.4 | diff --git a/downstream/mmdetection/docs/en/projects.md b/downstream/mmdetection/docs/en/projects.md new file mode 100644 index 0000000..3123e2b --- /dev/null +++ b/downstream/mmdetection/docs/en/projects.md @@ -0,0 +1,57 @@ +# Projects based on MMDetection + +There are many projects built upon MMDetection. +We list some of them as examples of how to extend MMDetection for your own projects. +As the page might not be completed, please feel free to create a PR to update this page. + +## Projects as an extension + +Some projects extend the boundary of MMDetection for deployment or other research fields. +They reveal the potential of what MMDetection can do. We list several of them as below. + +- [OTEDetection](https://github.com/opencv/mmdetection): OpenVINO training extensions for object detection. +- [MMDetection3d](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. + +## Projects of papers + +There are also projects released with papers. +Some of the papers are published in top-tier conferences (CVPR, ICCV, and ECCV), the others are also highly influential. +To make this list also a reference for the community to develop and compare new object detection algorithms, we list them following the time order of top-tier conferences. +Methods already supported and maintained by MMDetection are not listed. + +- Involution: Inverting the Inherence of Convolution for Visual Recognition, CVPR21. [\[paper\]](https://arxiv.org/abs/2103.06255)[\[github\]](https://github.com/d-li14/involution) +- Multiple Instance Active Learning for Object Detection, CVPR 2021. [\[paper\]](https://openaccess.thecvf.com/content/CVPR2021/papers/Yuan_Multiple_Instance_Active_Learning_for_Object_Detection_CVPR_2021_paper.pdf)[\[github\]](https://github.com/yuantn/MI-AOD) +- Adaptive Class Suppression Loss for Long-Tail Object Detection, CVPR 2021. [\[paper\]](https://arxiv.org/abs/2104.00885)[\[github\]](https://github.com/CASIA-IVA-Lab/ACSL) +- Generalizable Pedestrian Detection: The Elephant In The Room, CVPR2021. [\[paper\]](https://arxiv.org/abs/2003.08799)[\[github\]](https://github.com/hasanirtiza/Pedestron) +- Group Fisher Pruning for Practical Network Compression, ICML2021. [\[paper\]](https://github.com/jshilong/FisherPruning/blob/main/resources/paper.pdf)[\[github\]](https://github.com/jshilong/FisherPruning) +- Overcoming Classifier Imbalance for Long-tail Object Detection with Balanced Group Softmax, CVPR2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/papers/Li_Overcoming_Classifier_Imbalance_for_Long-Tail_Object_Detection_With_Balanced_Group_CVPR_2020_paper.pdf)[\[github\]](https://github.com/FishYuLi/BalancedGroupSoftmax) +- Coherent Reconstruction of Multiple Humans from a Single Image, CVPR2020. [\[paper\]](https://jiangwenpl.github.io/multiperson/)[\[github\]](https://github.com/JiangWenPL/multiperson) +- Look-into-Object: Self-supervised Structure Modeling for Object Recognition, CVPR 2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/papers/Zhou_Look-Into-Object_Self-Supervised_Structure_Modeling_for_Object_Recognition_CVPR_2020_paper.pdf)[\[github\]](https://github.com/JDAI-CV/LIO) +- Video Panoptic Segmentation, CVPR2020. [\[paper\]](https://arxiv.org/abs/2006.11339)[\[github\]](https://github.com/mcahny/vps) +- D2Det: Towards High Quality Object Detection and Instance Segmentation, CVPR2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/html/Cao_D2Det_Towards_High_Quality_Object_Detection_and_Instance_Segmentation_CVPR_2020_paper.html)[\[github\]](https://github.com/JialeCao001/D2Det) +- CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection, CVPR2020. [\[paper\]](https://arxiv.org/abs/2003.09119)[\[github\]](https://github.com/KiveeDong/CentripetalNet) +- Learning a Unified Sample Weighting Network for Object Detection, CVPR 2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/html/Cai_Learning_a_Unified_Sample_Weighting_Network_for_Object_Detection_CVPR_2020_paper.html)[\[github\]](https://github.com/caiqi/sample-weighting-network) +- Scale-equalizing Pyramid Convolution for Object Detection, CVPR2020. [\[paper\]](https://arxiv.org/abs/2005.03101) [\[github\]](https://github.com/jshilong/SEPC) +- Revisiting the Sibling Head in Object Detector, CVPR2020. [\[paper\]](https://arxiv.org/abs/2003.07540)[\[github\]](https://github.com/Sense-X/TSD) +- PolarMask: Single Shot Instance Segmentation with Polar Representation, CVPR2020. [\[paper\]](https://arxiv.org/abs/1909.13226)[\[github\]](https://github.com/xieenze/PolarMask) +- Hit-Detector: Hierarchical Trinity Architecture Search for Object Detection, CVPR2020. [\[paper\]](https://arxiv.org/abs/2003.11818)[\[github\]](https://github.com/ggjy/HitDet.pytorch) +- ZeroQ: A Novel Zero Shot Quantization Framework, CVPR2020. [\[paper\]](https://arxiv.org/abs/2001.00281)[\[github\]](https://github.com/amirgholami/ZeroQ) +- CBNet: A Novel Composite Backbone Network Architecture for Object Detection, AAAI2020. [\[paper\]](https://aaai.org/Papers/AAAI/2020GB/AAAI-LiuY.1833.pdf)[\[github\]](https://github.com/VDIGPKU/CBNet) +- RDSNet: A New Deep Architecture for Reciprocal Object Detection and Instance Segmentation, AAAI2020. [\[paper\]](https://arxiv.org/abs/1912.05070)[\[github\]](https://github.com/wangsr126/RDSNet) +- Training-Time-Friendly Network for Real-Time Object Detection, AAAI2020. [\[paper\]](https://arxiv.org/abs/1909.00700)[\[github\]](https://github.com/ZJULearning/ttfnet) +- Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution, NeurIPS 2019. [\[paper\]](https://arxiv.org/abs/1909.06720)[\[github\]](https://github.com/thangvubk/Cascade-RPN) +- Reasoning R-CNN: Unifying Adaptive Global Reasoning into Large-scale Object Detection, CVPR2019. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2019/papers/Xu_Reasoning-RCNN_Unifying_Adaptive_Global_Reasoning_Into_Large-Scale_Object_Detection_CVPR_2019_paper.pdf)[\[github\]](https://github.com/chanyn/Reasoning-RCNN) +- Learning RoI Transformer for Oriented Object Detection in Aerial Images, CVPR2019. [\[paper\]](https://arxiv.org/abs/1812.00155)[\[github\]](https://github.com/dingjiansw101/AerialDetection) +- SOLO: Segmenting Objects by Locations. [\[paper\]](https://arxiv.org/abs/1912.04488)[\[github\]](https://github.com/WXinlong/SOLO) +- SOLOv2: Dynamic, Faster and Stronger. [\[paper\]](https://arxiv.org/abs/2003.10152)[\[github\]](https://github.com/WXinlong/SOLO) +- Dense Peppoints: Representing Visual Objects with Dense Point Sets. [\[paper\]](https://arxiv.org/abs/1912.11473)[\[github\]](https://github.com/justimyhxu/Dense-RepPoints) +- IterDet: Iterative Scheme for Object Detection in Crowded Environments. [\[paper\]](https://arxiv.org/abs/2005.05708)[\[github\]](https://github.com/saic-vul/iterdet) +- Cross-Iteration Batch Normalization. [\[paper\]](https://arxiv.org/abs/2002.05712)[\[github\]](https://github.com/Howal/Cross-iterationBatchNorm) +- A Ranking-based, Balanced Loss Function Unifying Classification and Localisation in Object Detection, NeurIPS2020 [\[paper\]](https://arxiv.org/abs/2009.13592)[\[github\]](https://github.com/kemaloksuz/aLRPLoss) +- RelationNet++: Bridging Visual Representations for Object Detection via Transformer Decoder, NeurIPS2020 [\[paper\]](https://arxiv.org/abs/2010.15831)[\[github\]](https://github.com/microsoft/RelationNet2) +- Generalized Focal Loss V2: Learning Reliable Localization Quality Estimation for Dense Object Detection, CVPR2021[\[paper\]](https://arxiv.org/abs/2011.12885)[\[github\]](https://github.com/implus/GFocalV2) +- Swin Transformer: Hierarchical Vision Transformer using Shifted Windows, ICCV2021[\[paper\]](https://arxiv.org/abs/2103.14030)[\[github\]](https://github.com/SwinTransformer/) +- Focal Transformer: Focal Self-attention for Local-Global Interactions in Vision Transformers, NeurIPS2021[\[paper\]](https://arxiv.org/abs/2107.00641)[\[github\]](https://github.com/microsoft/Focal-Transformer) +- End-to-End Semi-Supervised Object Detection with Soft Teacher, ICCV2021[\[paper\]](https://arxiv.org/abs/2106.09018)[\[github\]](https://github.com/microsoft/SoftTeacher) +- CBNetV2: A Novel Composite Backbone Network Architecture for Object Detection [\[paper\]](http://arxiv.org/abs/2107.00420)[\[github\]](https://github.com/VDIGPKU/CBNetV2) +- Instances as Queries, ICCV2021 [\[paper\]](https://openaccess.thecvf.com/content/ICCV2021/papers/Fang_Instances_As_Queries_ICCV_2021_paper.pdf)[\[github\]](https://github.com/hustvl/QueryInst) diff --git a/downstream/mmdetection/docs/en/robustness_benchmarking.md b/downstream/mmdetection/docs/en/robustness_benchmarking.md new file mode 100644 index 0000000..bb624ee --- /dev/null +++ b/downstream/mmdetection/docs/en/robustness_benchmarking.md @@ -0,0 +1,110 @@ +# Corruption Benchmarking + +## Introduction + +We provide tools to test object detection and instance segmentation models on the image corruption benchmark defined in [Benchmarking Robustness in Object Detection: Autonomous Driving when Winter is Coming](https://arxiv.org/abs/1907.07484). +This page provides basic tutorials how to use the benchmark. + +```latex +@article{michaelis2019winter, + title={Benchmarking Robustness in Object Detection: + Autonomous Driving when Winter is Coming}, + author={Michaelis, Claudio and Mitzkus, Benjamin and + Geirhos, Robert and Rusak, Evgenia and + Bringmann, Oliver and Ecker, Alexander S. and + Bethge, Matthias and Brendel, Wieland}, + journal={arXiv:1907.07484}, + year={2019} +} +``` + +![image corruption example](../resources/corruptions_sev_3.png) + +## About the benchmark + +To submit results to the benchmark please visit the [benchmark homepage](https://github.com/bethgelab/robust-detection-benchmark) + +The benchmark is modelled after the [imagenet-c benchmark](https://github.com/hendrycks/robustness) which was originally +published in [Benchmarking Neural Network Robustness to Common Corruptions and Perturbations](https://arxiv.org/abs/1903.12261) (ICLR 2019) by Dan Hendrycks and Thomas Dietterich. + +The image corruption functions are included in this library but can be installed separately using: + +```shell +pip install imagecorruptions +``` + +Compared to imagenet-c a few changes had to be made to handle images of arbitrary size and greyscale images. +We also modified the 'motion blur' and 'snow' corruptions to remove dependency from a linux specific library, +which would have to be installed separately otherwise. For details please refer to the [imagecorruptions repository](https://github.com/bethgelab/imagecorruptions). + +## Inference with pretrained models + +We provide a testing script to evaluate a models performance on any combination of the corruptions provided in the benchmark. + +### Test a dataset + +- [x] single GPU testing +- [ ] multiple GPU testing +- [ ] visualize detection results + +You can use the following commands to test a models performance under the 15 corruptions used in the benchmark. + +```shell +# single-gpu testing +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] +``` + +Alternatively different group of corruptions can be selected. + +```shell +# noise +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions noise + +# blur +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions blur + +# wetaher +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions weather + +# digital +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions digital +``` + +Or a costom set of corruptions e.g.: + +```shell +# gaussian noise, zoom blur and snow +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions gaussian_noise zoom_blur snow +``` + +Finally the corruption severities to evaluate can be chosen. +Severity 0 corresponds to clean data and the effect increases from 1 to 5. + +```shell +# severity 1 +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --severities 1 + +# severities 0,2,4 +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --severities 0 2 4 +``` + +## Results for modelzoo models + +The results on COCO 2017val are shown in the below table. + +| Model | Backbone | Style | Lr schd | box AP clean | box AP corr. | box % | mask AP clean | mask AP corr. | mask % | +| :-----------------: | :-----------------: | :-----: | :-----: | :----------: | :----------: | :---: | :-----------: | :-----------: | :----: | +| Faster R-CNN | R-50-FPN | pytorch | 1x | 36.3 | 18.2 | 50.2 | - | - | - | +| Faster R-CNN | R-101-FPN | pytorch | 1x | 38.5 | 20.9 | 54.2 | - | - | - | +| Faster R-CNN | X-101-32x4d-FPN | pytorch | 1x | 40.1 | 22.3 | 55.5 | - | - | - | +| Faster R-CNN | X-101-64x4d-FPN | pytorch | 1x | 41.3 | 23.4 | 56.6 | - | - | - | +| Faster R-CNN | R-50-FPN-DCN | pytorch | 1x | 40.0 | 22.4 | 56.1 | - | - | - | +| Faster R-CNN | X-101-32x4d-FPN-DCN | pytorch | 1x | 43.4 | 26.7 | 61.6 | - | - | - | +| Mask R-CNN | R-50-FPN | pytorch | 1x | 37.3 | 18.7 | 50.1 | 34.2 | 16.8 | 49.1 | +| Mask R-CNN | R-50-FPN-DCN | pytorch | 1x | 41.1 | 23.3 | 56.7 | 37.2 | 20.7 | 55.7 | +| Cascade R-CNN | R-50-FPN | pytorch | 1x | 40.4 | 20.1 | 49.7 | - | - | - | +| Cascade Mask R-CNN | R-50-FPN | pytorch | 1x | 41.2 | 20.7 | 50.2 | 35.7 | 17.6 | 49.3 | +| RetinaNet | R-50-FPN | pytorch | 1x | 35.6 | 17.8 | 50.1 | - | - | - | +| Hybrid Task Cascade | X-101-64x4d-FPN-DCN | pytorch | 1x | 50.6 | 32.7 | 64.7 | 43.8 | 28.1 | 64.0 | + +Results may vary slightly due to the stochastic application of the corruptions. diff --git a/downstream/mmdetection/docs/en/stat.py b/downstream/mmdetection/docs/en/stat.py new file mode 100755 index 0000000..427c27b --- /dev/null +++ b/downstream/mmdetection/docs/en/stat.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +import functools as func +import glob +import os.path as osp +import re + +import numpy as np + +url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/configs' + +files = sorted(glob.glob('../../configs/*/README.md')) + +stats = [] +titles = [] +num_ckpts = 0 + +for f in files: + url = osp.dirname(f.replace('../../configs', url_prefix)) + + with open(f, 'r') as content_file: + content = content_file.read() + + title = content.split('\n')[0].replace('# ', '').strip() + ckpts = set(x.lower().strip() + for x in re.findall(r'\[model\]\((https?.*)\)', content)) + + if len(ckpts) == 0: + continue + + _papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)] + assert len(_papertype) > 0 + papertype = _papertype[0] + + paper = set([(papertype, title)]) + + titles.append(title) + num_ckpts += len(ckpts) + + statsmsg = f""" +\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts) +""" + stats.append((paper, ckpts, statsmsg)) + +allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats]) +msglist = '\n'.join(x for _, _, x in stats) + +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +# Model Zoo Statistics + +* Number of papers: {len(set(titles))} +{countstr} + +* Number of checkpoints: {num_ckpts} + +{msglist} +""" + +with open('modelzoo_statistics.md', 'w') as f: + f.write(modelzoo) diff --git a/downstream/mmdetection/docs/en/switch_language.md b/downstream/mmdetection/docs/en/switch_language.md new file mode 100644 index 0000000..b2c4ad9 --- /dev/null +++ b/downstream/mmdetection/docs/en/switch_language.md @@ -0,0 +1,3 @@ +## English + +## 简体中文 diff --git a/downstream/mmdetection/docs/en/tutorials/config.md b/downstream/mmdetection/docs/en/tutorials/config.md new file mode 100644 index 0000000..6b232cf --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/config.md @@ -0,0 +1,544 @@ +# Tutorial 1: Learn about Configs + +We incorporate modular and inheritance design into our config system, which is convenient to conduct various experiments. +If you wish to inspect the config file, you may run `python tools/misc/print_config.py /PATH/TO/CONFIG` to see the complete config. + +## Modify config through script arguments + +When submitting jobs using "tools/train.py" or "tools/test.py", you may specify `--cfg-options` to in-place modify the config. + +- Update config keys of dict chains. + + The config options can be specified following the order of the dict keys in the original config. + For example, `--cfg-options model.backbone.norm_eval=False` changes the all BN modules in model backbones to `train` mode. + +- Update keys inside a list of configs. + + Some config dicts are composed as a list in your config. For example, the training pipeline `data.train.pipeline` is normally a list + e.g. `[dict(type='LoadImageFromFile'), ...]`. If you want to change `'LoadImageFromFile'` to `'LoadImageFromWebcam'` in the pipeline, + you may specify `--cfg-options data.train.pipeline.0.type=LoadImageFromWebcam`. + +- Update values of list/tuples. + + If the value to be updated is a list or a tuple. For example, the config file normally sets `workflow=[('train', 1)]`. If you want to + change this key, you may specify `--cfg-options workflow="[(train,1),(val,1)]"`. Note that the quotation mark " is necessary to + support list/tuple data types, and that **NO** white space is allowed inside the quotation marks in the specified value. + +## Config File Structure + +There are 4 basic component types under `config/_base_`, dataset, model, schedule, default_runtime. +Many methods could be easily constructed with one of each like Faster R-CNN, Mask R-CNN, Cascade R-CNN, RPN, SSD. +The configs that are composed by components from `_base_` are called _primitive_. + +For all configs under the same folder, it is recommended to have only **one** _primitive_ config. All other configs should inherit from the _primitive_ config. In this way, the maximum of inheritance level is 3. + +For easy understanding, we recommend contributors to inherit from existing methods. +For example, if some modification is made base on Faster R-CNN, user may first inherit the basic Faster R-CNN structure by specifying `_base_ = ../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py`, then modify the necessary fields in the config files. + +If you are building an entirely new method that does not share the structure with any of the existing methods, you may create a folder `xxx_rcnn` under `configs`, + +Please refer to [mmcv](https://mmcv.readthedocs.io/en/latest/understand_mmcv/config.html) for detailed documentation. + +## Config Name Style + +We follow the below style to name config files. Contributors are advised to follow the same style. + +``` +{model}_[model setting]_{backbone}_{neck}_[norm setting]_[misc]_[gpu x batch_per_gpu]_{schedule}_{dataset} +``` + +`{xxx}` is required field and `[yyy]` is optional. + +- `{model}`: model type like `faster_rcnn`, `mask_rcnn`, etc. +- `[model setting]`: specific setting for some model, like `without_semantic` for `htc`, `moment` for `reppoints`, etc. +- `{backbone}`: backbone type like `r50` (ResNet-50), `x101` (ResNeXt-101). +- `{neck}`: neck type like `fpn`, `pafpn`, `nasfpn`, `c4`. +- `[norm_setting]`: `bn` (Batch Normalization) is used unless specified, other norm layer type could be `gn` (Group Normalization), `syncbn` (Synchronized Batch Normalization). + `gn-head`/`gn-neck` indicates GN is applied in head/neck only, while `gn-all` means GN is applied in the entire model, e.g. backbone, neck, head. +- `[misc]`: miscellaneous setting/plugins of model, e.g. `dconv`, `gcb`, `attention`, `albu`, `mstrain`. +- `[gpu x batch_per_gpu]`: GPUs and samples per GPU, `8x2` is used by default. +- `{schedule}`: training schedule, options are `1x`, `2x`, `20e`, etc. + `1x` and `2x` means 12 epochs and 24 epochs respectively. + `20e` is adopted in cascade models, which denotes 20 epochs. + For `1x`/`2x`, initial learning rate decays by a factor of 10 at the 8/16th and 11/22th epochs. + For `20e`, initial learning rate decays by a factor of 10 at the 16th and 19th epochs. +- `{dataset}`: dataset like `coco`, `cityscapes`, `voc_0712`, `wider_face`. + +## Deprecated train_cfg/test_cfg + +The `train_cfg` and `test_cfg` are deprecated in config file, please specify them in the model config. The original config structure is as below. + +```python +# deprecated +model = dict( + type=..., + ... +) +train_cfg=dict(...) +test_cfg=dict(...) +``` + +The migration example is as below. + +```python +# recommended +model = dict( + type=..., + ... + train_cfg=dict(...), + test_cfg=dict(...), +) +``` + +## An Example of Mask R-CNN + +To help the users have a basic idea of a complete config and the modules in a modern detection system, +we make brief comments on the config of Mask R-CNN using ResNet50 and FPN as the following. +For more detailed usage and the corresponding alternative for each modules, please refer to the API documentation. + +```python +model = dict( + type='MaskRCNN', # The name of detector + backbone=dict( # The config of backbone + type='ResNet', # The type of the backbone, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py#L308 for more details. + depth=50, # The depth of backbone, usually it is 50 or 101 for ResNet and ResNext backbones. + num_stages=4, # Number of stages of the backbone. + out_indices=(0, 1, 2, 3), # The index of output feature maps produced in each stages + frozen_stages=1, # The weights in the first 1 stage are frozen + norm_cfg=dict( # The config of normalization layers. + type='BN', # Type of norm layer, usually it is BN or GN + requires_grad=True), # Whether to train the gamma and beta in BN + norm_eval=True, # Whether to freeze the statistics in BN + style='pytorch', # The style of backbone, 'pytorch' means that stride 2 layers are in 3x3 conv, 'caffe' means stride 2 layers are in 1x1 convs. + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), # The ImageNet pretrained backbone to be loaded + neck=dict( + type='FPN', # The neck of detector is FPN. We also support 'NASFPN', 'PAFPN', etc. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/fpn.py#L10 for more details. + in_channels=[256, 512, 1024, 2048], # The input channels, this is consistent with the output channels of backbone + out_channels=256, # The output channels of each level of the pyramid feature map + num_outs=5), # The number of output scales + rpn_head=dict( + type='RPNHead', # The type of RPN head is 'RPNHead', we also support 'GARPNHead', etc. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/rpn_head.py#L12 for more details. + in_channels=256, # The input channels of each input feature map, this is consistent with the output channels of neck + feat_channels=256, # Feature channels of convolutional layers in the head. + anchor_generator=dict( # The config of anchor generator + type='AnchorGenerator', # Most of methods use AnchorGenerator, SSD Detectors uses `SSDAnchorGenerator`. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/anchor/anchor_generator.py#L10 for more details + scales=[8], # Basic scale of the anchor, the area of the anchor in one position of a feature map will be scale * base_sizes + ratios=[0.5, 1.0, 2.0], # The ratio between height and width. + strides=[4, 8, 16, 32, 64]), # The strides of the anchor generator. This is consistent with the FPN feature strides. The strides will be taken as base_sizes if base_sizes is not set. + bbox_coder=dict( # Config of box coder to encode and decode the boxes during training and testing + type='DeltaXYWHBBoxCoder', # Type of box coder. 'DeltaXYWHBBoxCoder' is applied for most of methods. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py#L9 for more details. + target_means=[0.0, 0.0, 0.0, 0.0], # The target means used to encode and decode boxes + target_stds=[1.0, 1.0, 1.0, 1.0]), # The standard variance used to encode and decode boxes + loss_cls=dict( # Config of loss function for the classification branch + type='CrossEntropyLoss', # Type of loss for classification branch, we also support FocalLoss etc. + use_sigmoid=True, # RPN usually perform two-class classification, so it usually uses sigmoid function. + loss_weight=1.0), # Loss weight of the classification branch. + loss_bbox=dict( # Config of loss function for the regression branch. + type='L1Loss', # Type of loss, we also support many IoU Losses and smooth L1-loss, etc. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/smooth_l1_loss.py#L56 for implementation. + loss_weight=1.0)), # Loss weight of the regression branch. + roi_head=dict( # RoIHead encapsulates the second stage of two-stage/cascade detectors. + type='StandardRoIHead', # Type of the RoI head. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/standard_roi_head.py#L10 for implementation. + bbox_roi_extractor=dict( # RoI feature extractor for bbox regression. + type='SingleRoIExtractor', # Type of the RoI feature extractor, most of methods uses SingleRoIExtractor. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/roi_extractors/single_level.py#L10 for details. + roi_layer=dict( # Config of RoI Layer + type='RoIAlign', # Type of RoI Layer, DeformRoIPoolingPack and ModulatedDeformRoIPoolingPack are also supported. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/roi_align/roi_align.py#L79 for details. + output_size=7, # The output size of feature maps. + sampling_ratio=0), # Sampling ratio when extracting the RoI features. 0 means adaptive ratio. + out_channels=256, # output channels of the extracted feature. + featmap_strides=[4, 8, 16, 32]), # Strides of multi-scale feature maps. It should be consistent to the architecture of the backbone. + bbox_head=dict( # Config of box head in the RoIHead. + type='Shared2FCBBoxHead', # Type of the bbox head, Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py#L177 for implementation details. + in_channels=256, # Input channels for bbox head. This is consistent with the out_channels in roi_extractor + fc_out_channels=1024, # Output feature channels of FC layers. + roi_feat_size=7, # Size of RoI features + num_classes=80, # Number of classes for classification + bbox_coder=dict( # Box coder used in the second stage. + type='DeltaXYWHBBoxCoder', # Type of box coder. 'DeltaXYWHBBoxCoder' is applied for most of methods. + target_means=[0.0, 0.0, 0.0, 0.0], # Means used to encode and decode box + target_stds=[0.1, 0.1, 0.2, 0.2]), # Standard variance for encoding and decoding. It is smaller since the boxes are more accurate. [0.1, 0.1, 0.2, 0.2] is a conventional setting. + reg_class_agnostic=False, # Whether the regression is class agnostic. + loss_cls=dict( # Config of loss function for the classification branch + type='CrossEntropyLoss', # Type of loss for classification branch, we also support FocalLoss etc. + use_sigmoid=False, # Whether to use sigmoid. + loss_weight=1.0), # Loss weight of the classification branch. + loss_bbox=dict( # Config of loss function for the regression branch. + type='L1Loss', # Type of loss, we also support many IoU Losses and smooth L1-loss, etc. + loss_weight=1.0)), # Loss weight of the regression branch. + mask_roi_extractor=dict( # RoI feature extractor for mask generation. + type='SingleRoIExtractor', # Type of the RoI feature extractor, most of methods uses SingleRoIExtractor. + roi_layer=dict( # Config of RoI Layer that extracts features for instance segmentation + type='RoIAlign', # Type of RoI Layer, DeformRoIPoolingPack and ModulatedDeformRoIPoolingPack are also supported + output_size=14, # The output size of feature maps. + sampling_ratio=0), # Sampling ratio when extracting the RoI features. + out_channels=256, # Output channels of the extracted feature. + featmap_strides=[4, 8, 16, 32]), # Strides of multi-scale feature maps. + mask_head=dict( # Mask prediction head + type='FCNMaskHead', # Type of mask head, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py#L21 for implementation details. + num_convs=4, # Number of convolutional layers in mask head. + in_channels=256, # Input channels, should be consistent with the output channels of mask roi extractor. + conv_out_channels=256, # Output channels of the convolutional layer. + num_classes=80, # Number of class to be segmented. + loss_mask=dict( # Config of loss function for the mask branch. + type='CrossEntropyLoss', # Type of loss used for segmentation + use_mask=True, # Whether to only train the mask in the correct class. + loss_weight=1.0)))) # Loss weight of mask branch. + train_cfg = dict( # Config of training hyperparameters for rpn and rcnn + rpn=dict( # Training config of rpn + assigner=dict( # Config of assigner + type='MaxIoUAssigner', # Type of assigner, MaxIoUAssigner is used for many common detectors. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10 for more details. + pos_iou_thr=0.7, # IoU >= threshold 0.7 will be taken as positive samples + neg_iou_thr=0.3, # IoU < threshold 0.3 will be taken as negative samples + min_pos_iou=0.3, # The minimal IoU threshold to take boxes as positive samples + match_low_quality=True, # Whether to match the boxes under low quality (see API doc for more details). + ignore_iof_thr=-1), # IoF threshold for ignoring bboxes + sampler=dict( # Config of positive/negative sampler + type='RandomSampler', # Type of sampler, PseudoSampler and other samplers are also supported. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8 for implementation details. + num=256, # Number of samples + pos_fraction=0.5, # The ratio of positive samples in the total samples. + neg_pos_ub=-1, # The upper bound of negative samples based on the number of positive samples. + add_gt_as_proposals=False), # Whether add GT as proposals after sampling. + allowed_border=-1, # The border allowed after padding for valid anchors. + pos_weight=-1, # The weight of positive samples during training. + debug=False), # Whether to set the debug mode + rpn_proposal=dict( # The config to generate proposals during training + nms_across_levels=False, # Whether to do NMS for boxes across levels. Only work in `GARPNHead`, naive rpn does not support do nms cross levels. + nms_pre=2000, # The number of boxes before NMS + nms_post=1000, # The number of boxes to be kept by NMS, Only work in `GARPNHead`. + max_per_img=1000, # The number of boxes to be kept after NMS. + nms=dict( # Config of NMS + type='nms', # Type of NMS + iou_threshold=0.7 # NMS threshold + ), + min_bbox_size=0), # The allowed minimal box size + rcnn=dict( # The config for the roi heads. + assigner=dict( # Config of assigner for second stage, this is different for that in rpn + type='MaxIoUAssigner', # Type of assigner, MaxIoUAssigner is used for all roi_heads for now. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10 for more details. + pos_iou_thr=0.5, # IoU >= threshold 0.5 will be taken as positive samples + neg_iou_thr=0.5, # IoU < threshold 0.5 will be taken as negative samples + min_pos_iou=0.5, # The minimal IoU threshold to take boxes as positive samples + match_low_quality=False, # Whether to match the boxes under low quality (see API doc for more details). + ignore_iof_thr=-1), # IoF threshold for ignoring bboxes + sampler=dict( + type='RandomSampler', # Type of sampler, PseudoSampler and other samplers are also supported. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8 for implementation details. + num=512, # Number of samples + pos_fraction=0.25, # The ratio of positive samples in the total samples. + neg_pos_ub=-1, # The upper bound of negative samples based on the number of positive samples. + add_gt_as_proposals=True + ), # Whether add GT as proposals after sampling. + mask_size=28, # Size of mask + pos_weight=-1, # The weight of positive samples during training. + debug=False)) # Whether to set the debug mode + test_cfg = dict( # Config for testing hyperparameters for rpn and rcnn + rpn=dict( # The config to generate proposals during testing + nms_across_levels=False, # Whether to do NMS for boxes across levels. Only work in `GARPNHead`, naive rpn does not support do nms cross levels. + nms_pre=1000, # The number of boxes before NMS + nms_post=1000, # The number of boxes to be kept by NMS, Only work in `GARPNHead`. + max_per_img=1000, # The number of boxes to be kept after NMS. + nms=dict( # Config of NMS + type='nms', #Type of NMS + iou_threshold=0.7 # NMS threshold + ), + min_bbox_size=0), # The allowed minimal box size + rcnn=dict( # The config for the roi heads. + score_thr=0.05, # Threshold to filter out boxes + nms=dict( # Config of NMS in the second stage + type='nms', # Type of NMS + iou_thr=0.5), # NMS threshold + max_per_img=100, # Max number of detections of each image + mask_thr_binary=0.5)) # Threshold of mask prediction +dataset_type = 'CocoDataset' # Dataset type, this will be used to define the dataset +data_root = 'data/coco/' # Root path of data +img_norm_cfg = dict( # Image normalization config to normalize the input images + mean=[123.675, 116.28, 103.53], # Mean values used to pre-training the pre-trained backbone models + std=[58.395, 57.12, 57.375], # Standard variance used to pre-training the pre-trained backbone models + to_rgb=True +) # The channel orders of image used to pre-training the pre-trained backbone models +train_pipeline = [ # Training pipeline + dict(type='LoadImageFromFile'), # First pipeline to load images from file path + dict( + type='LoadAnnotations', # Second pipeline to load annotations for current image + with_bbox=True, # Whether to use bounding box, True for detection + with_mask=True, # Whether to use instance mask, True for instance segmentation + poly2mask=False), # Whether to convert the polygon mask to instance mask, set False for acceleration and to save memory + dict( + type='Resize', # Augmentation pipeline that resize the images and their annotations + img_scale=(1333, 800), # The largest scale of image + keep_ratio=True + ), # whether to keep the ratio between height and width. + dict( + type='RandomFlip', # Augmentation pipeline that flip the images and their annotations + flip_ratio=0.5), # The ratio or probability to flip + dict( + type='Normalize', # Augmentation pipeline that normalize the input images + mean=[123.675, 116.28, 103.53], # These keys are the same of img_norm_cfg since the + std=[58.395, 57.12, 57.375], # keys of img_norm_cfg are used here as arguments + to_rgb=True), + dict( + type='Pad', # Padding config + size_divisor=32), # The number the padded images should be divisible + dict(type='DefaultFormatBundle'), # Default format bundle to gather data in the pipeline + dict( + type='Collect', # Pipeline that decides which keys in the data should be passed to the detector + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), # First pipeline to load images from file path + dict( + type='MultiScaleFlipAug', # An encapsulation that encapsulates the testing augmentations + img_scale=(1333, 800), # Decides the largest scale for testing, used for the Resize pipeline + flip=False, # Whether to flip images during testing + transforms=[ + dict(type='Resize', # Use resize augmentation + keep_ratio=True), # Whether to keep the ratio between height and width, the img_scale set here will be suppressed by the img_scale set above. + dict(type='RandomFlip'), # Thought RandomFlip is added in pipeline, it is not used because flip=False + dict( + type='Normalize', # Normalization config, the values are from img_norm_cfg + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict( + type='Pad', # Padding config to pad images divisible by 32. + size_divisor=32), + dict( + type='ImageToTensor', # convert image to tensor + keys=['img']), + dict( + type='Collect', # Collect pipeline that collect necessary keys for testing. + keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, # Batch size of a single GPU + workers_per_gpu=2, # Worker to pre-fetch data for each single GPU + train=dict( # Train dataset config + type='CocoDataset', # Type of dataset, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/coco.py#L19 for details. + ann_file='data/coco/annotations/instances_train2017.json', # Path of annotation file + img_prefix='data/coco/train2017/', # Prefix of image path + pipeline=[ # pipeline, this is passed by the train_pipeline created before. + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) + ]), + val=dict( # Validation dataset config + type='CocoDataset', + ann_file='data/coco/annotations/instances_val2017.json', + img_prefix='data/coco/val2017/', + pipeline=[ # Pipeline is passed by test_pipeline created before + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ]), + test=dict( # Test dataset config, modify the ann_file for test-dev/test submission + type='CocoDataset', + ann_file='data/coco/annotations/instances_val2017.json', + img_prefix='data/coco/val2017/', + pipeline=[ # Pipeline is passed by test_pipeline created before + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + samples_per_gpu=2 # Batch size of a single GPU used in testing + )) +evaluation = dict( # The config to build the evaluation hook, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/evaluation/eval_hooks.py#L7 for more details. + interval=1, # Evaluation interval + metric=['bbox', 'segm']) # Metrics used during evaluation +optimizer = dict( # Config used to build optimizer, support all the optimizers in PyTorch whose arguments are also the same as those in PyTorch + type='SGD', # Type of optimizers, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/optimizer/default_constructor.py#L13 for more details + lr=0.02, # Learning rate of optimizers, see detail usages of the parameters in the documentation of PyTorch + momentum=0.9, # Momentum + weight_decay=0.0001) # Weight decay of SGD +optimizer_config = dict( # Config used to build the optimizer hook, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8 for implementation details. + grad_clip=None) # Most of the methods do not use gradient clip +lr_config = dict( # Learning rate scheduler config used to register LrUpdater hook + policy='step', # The policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9. + warmup='linear', # The warmup policy, also support `exp` and `constant`. + warmup_iters=500, # The number of iterations for warmup + warmup_ratio= + 0.001, # The ratio of the starting learning rate used for warmup + step=[8, 11]) # Steps to decay the learning rate +runner = dict( + type='EpochBasedRunner', # Type of runner to use (i.e. IterBasedRunner or EpochBasedRunner) + max_epochs=12) # Runner that runs the workflow in total max_epochs. For IterBasedRunner use `max_iters` +checkpoint_config = dict( # Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation. + interval=1) # The save interval is 1 +log_config = dict( # config to register logger hook + interval=50, # Interval to print the log + hooks=[ + # dict(type='TensorboardLoggerHook') # The Tensorboard logger is also supported + dict(type='TextLoggerHook') + ]) # The logger used to record the training process. +dist_params = dict(backend='nccl') # Parameters to setup distributed training, the port can also be set. +log_level = 'INFO' # The level of logging. +load_from = None # load models as a pre-trained model from a given path. This will not resume training. +resume_from = None # Resume checkpoints from a given path, the training will be resumed from the epoch when the checkpoint's is saved. +workflow = [('train', 1)] # Workflow for runner. [('train', 1)] means there is only one workflow and the workflow named 'train' is executed once. The workflow trains the model by 12 epochs according to the total_epochs. +work_dir = 'work_dir' # Directory to save the model checkpoints and logs for the current experiments. +``` + +## FAQ + +### Ignore some fields in the base configs + +Sometimes, you may set `_delete_=True` to ignore some of fields in base configs. +You may refer to [mmcv](https://mmcv.readthedocs.io/en/latest/understand_mmcv/config.html#inherit-from-base-config-with-ignored-fields) for simple illustration. + +In MMDetection, for example, to change the backbone of Mask R-CNN with the following config. + +```python +model = dict( + type='MaskRCNN', + pretrained='torchvision://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + neck=dict(...), + rpn_head=dict(...), + roi_head=dict(...)) +``` + +`ResNet` and `HRNet` use different keywords to construct. + +```python +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w32', + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256)))), + neck=dict(...)) +``` + +The `_delete_=True` would replace all old keys in `backbone` field with new keys. + +### Use intermediate variables in configs + +Some intermediate variables are used in the configs files, like `train_pipeline`/`test_pipeline` in datasets. +It's worth noting that when modifying intermediate variables in the children configs, user need to pass the intermediate variables into corresponding fields again. +For example, we would like to use multi scale strategy to train a Mask R-CNN. `train_pipeline`/`test_pipeline` are intermediate variable we would like modify. + +```python +_base_ = './mask_rcnn_r50_fpn_1x_coco.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode="value", + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +``` + +We first define the new `train_pipeline`/`test_pipeline` and pass them into `data`. + +Similarly, if we would like to switch from `SyncBN` to `BN` or `MMSyncBN`, we need to substitute every `norm_cfg` in the config. + +```python +_base_ = './mask_rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + backbone=dict(norm_cfg=norm_cfg), + neck=dict(norm_cfg=norm_cfg), + ...) +``` diff --git a/downstream/mmdetection/docs/en/tutorials/customize_dataset.md b/downstream/mmdetection/docs/en/tutorials/customize_dataset.md new file mode 100644 index 0000000..3237f16 --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/customize_dataset.md @@ -0,0 +1,542 @@ +# Tutorial 2: Customize Datasets + +## Support new data format + +To support a new data format, you can either convert them to existing formats (COCO format or PASCAL format) or directly convert them to the middle format. You could also choose to convert them offline (before training by a script) or online (implement a new dataset and do the conversion at training). In MMDetection, we recommend to convert the data into COCO formats and do the conversion offline, thus you only need to modify the config's data annotation paths and classes after the conversion of your data. + +### Reorganize new data formats to existing format + +The simplest way is to convert your dataset to existing dataset formats (COCO or PASCAL VOC). + +The annotation json files in COCO format has the following necessary keys: + +```python +'images': [ + { + 'file_name': 'COCO_val2014_000000001268.jpg', + 'height': 427, + 'width': 640, + 'id': 1268 + }, + ... +], + +'annotations': [ + { + 'segmentation': [[192.81, + 247.09, + ... + 219.03, + 249.06]], # if you have mask labels + 'area': 1035.749, + 'iscrowd': 0, + 'image_id': 1268, + 'bbox': [192.81, 224.8, 74.73, 33.43], + 'category_id': 16, + 'id': 42986 + }, + ... +], + +'categories': [ + {'id': 0, 'name': 'car'}, + ] +``` + +There are three necessary keys in the json file: + +- `images`: contains a list of images with their information like `file_name`, `height`, `width`, and `id`. +- `annotations`: contains the list of instance annotations. +- `categories`: contains the list of categories names and their ID. + +After the data pre-processing, there are two steps for users to train the customized new dataset with existing format (e.g. COCO format): + +1. Modify the config file for using the customized dataset. +2. Check the annotations of the customized dataset. + +Here we give an example to show the above two steps, which uses a customized dataset of 5 classes with COCO format to train an existing Cascade Mask R-CNN R50-FPN detector. + +#### 1. Modify the config file for using the customized dataset + +There are two aspects involved in the modification of config file: + +1. The `data` field. Specifically, you need to explicitly add the `classes` fields in `data.train`, `data.val` and `data.test`. +2. The `num_classes` field in the `model` part. Explicitly over-write all the `num_classes` from default value (e.g. 80 in COCO) to your classes number. + +In `configs/my_custom_config.py`: + +```python + +# the new config inherits the base configs to highlight the necessary modification +_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' + +# 1. dataset settings +dataset_type = 'CocoDataset' +classes = ('a', 'b', 'c', 'd', 'e') +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + # explicitly add your class names to the field `classes` + classes=classes, + ann_file='path/to/your/train/annotation_data', + img_prefix='path/to/your/train/image_data'), + val=dict( + type=dataset_type, + # explicitly add your class names to the field `classes` + classes=classes, + ann_file='path/to/your/val/annotation_data', + img_prefix='path/to/your/val/image_data'), + test=dict( + type=dataset_type, + # explicitly add your class names to the field `classes` + classes=classes, + ann_file='path/to/your/test/annotation_data', + img_prefix='path/to/your/test/image_data')) + +# 2. model settings + +# explicitly over-write all the `num_classes` field from default 80 to 5. +model = dict( + roi_head=dict( + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + # explicitly over-write all the `num_classes` field from default 80 to 5. + num_classes=5), + dict( + type='Shared2FCBBoxHead', + # explicitly over-write all the `num_classes` field from default 80 to 5. + num_classes=5), + dict( + type='Shared2FCBBoxHead', + # explicitly over-write all the `num_classes` field from default 80 to 5. + num_classes=5)], + # explicitly over-write all the `num_classes` field from default 80 to 5. + mask_head=dict(num_classes=5))) +``` + +#### 2. Check the annotations of the customized dataset + +Assuming your customized dataset is COCO format, make sure you have the correct annotations in the customized dataset: + +1. The length for `categories` field in annotations should exactly equal the tuple length of `classes` fields in your config, meaning the number of classes (e.g. 5 in this example). +2. The `classes` fields in your config file should have exactly the same elements and the same order with the `name` in `categories` of annotations. MMDetection automatically maps the uncontinuous `id` in `categories` to the continuous label indices, so the string order of `name` in `categories` field affects the order of label indices. Meanwhile, the string order of `classes` in config affects the label text during visualization of predicted bounding boxes. +3. The `category_id` in `annotations` field should be valid, i.e., all values in `category_id` should belong to `id` in `categories`. + +Here is a valid example of annotations: + +```python + +'annotations': [ + { + 'segmentation': [[192.81, + 247.09, + ... + 219.03, + 249.06]], # if you have mask labels + 'area': 1035.749, + 'iscrowd': 0, + 'image_id': 1268, + 'bbox': [192.81, 224.8, 74.73, 33.43], + 'category_id': 16, + 'id': 42986 + }, + ... +], + +# MMDetection automatically maps the uncontinuous `id` to the continuous label indices. +'categories': [ + {'id': 1, 'name': 'a'}, {'id': 3, 'name': 'b'}, {'id': 4, 'name': 'c'}, {'id': 16, 'name': 'd'}, {'id': 17, 'name': 'e'}, + ] +``` + +We use this way to support CityScapes dataset. The script is in [cityscapes.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/dataset_converters/cityscapes.py) and we also provide the finetuning [configs](https://github.com/open-mmlab/mmdetection/blob/master/configs/cityscapes). + +**Note** + +1. For instance segmentation datasets, **MMDetection only supports evaluating mask AP of dataset in COCO format for now**. +2. It is recommended to convert the data offline before training, thus you can still use `CocoDataset` and only need to modify the path of annotations and the training classes. + +### Reorganize new data format to middle format + +It is also fine if you do not want to convert the annotation format to COCO or PASCAL format. +Actually, we define a simple annotation format and all existing datasets are +processed to be compatible with it, either online or offline. + +The annotation of a dataset is a list of dict, each dict corresponds to an image. +There are 3 field `filename` (relative path), `width`, `height` for testing, +and an additional field `ann` for training. `ann` is also a dict containing at least 2 fields: +`bboxes` and `labels`, both of which are numpy arrays. Some datasets may provide +annotations like crowd/difficult/ignored bboxes, we use `bboxes_ignore` and `labels_ignore` +to cover them. + +Here is an example. + +```python + +[ + { + 'filename': 'a.jpg', + 'width': 1280, + 'height': 720, + 'ann': { + 'bboxes': (n, 4), + 'labels': (n, ), + 'bboxes_ignore': (k, 4), + 'labels_ignore': (k, ) (optional field) + } + }, + ... +] +``` + +There are two ways to work with custom datasets. + +- online conversion + + You can write a new Dataset class inherited from `CustomDataset`, and overwrite two methods + `load_annotations(self, ann_file)` and `get_ann_info(self, idx)`, + like [CocoDataset](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/coco.py) and [VOCDataset](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/voc.py). + +- offline conversion + + You can convert the annotation format to the expected format above and save it to + a pickle or json file, like [pascal_voc.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/dataset_converters/pascal_voc.py). + Then you can simply use `CustomDataset`. + +### An example of customized dataset + +Assume the annotation is in a new format in text files. +The bounding boxes annotations are stored in text file `annotation.txt` as the following + +``` +# +000001.jpg +1280 720 +2 +10 20 40 60 1 +20 40 50 60 2 +# +000002.jpg +1280 720 +3 +50 20 40 60 2 +20 40 30 45 2 +30 40 50 60 3 +``` + +We can create a new dataset in `mmdet/datasets/my_dataset.py` to load the data. + +```python +import mmcv +import numpy as np + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class MyDataset(CustomDataset): + + CLASSES = ('person', 'bicycle', 'car', 'motorcycle') + + def load_annotations(self, ann_file): + ann_list = mmcv.list_from_file(ann_file) + + data_infos = [] + for i, ann_line in enumerate(ann_list): + if ann_line != '#': + continue + + img_shape = ann_list[i + 2].split(' ') + width = int(img_shape[0]) + height = int(img_shape[1]) + bbox_number = int(ann_list[i + 3]) + + anns = ann_line.split(' ') + bboxes = [] + labels = [] + for anns in ann_list[i + 4:i + 4 + bbox_number]: + bboxes.append([float(ann) for ann in anns[:4]]) + labels.append(int(anns[4])) + + data_infos.append( + dict( + filename=ann_list[i + 1], + width=width, + height=height, + ann=dict( + bboxes=np.array(bboxes).astype(np.float32), + labels=np.array(labels).astype(np.int64)) + )) + + return data_infos + + def get_ann_info(self, idx): + return self.data_infos[idx]['ann'] + +``` + +Then in the config, to use `MyDataset` you can modify the config as the following + +```python +dataset_A_train = dict( + type='MyDataset', + ann_file = 'image_list.txt', + pipeline=train_pipeline +) +``` + +## Customize datasets by dataset wrappers + +MMDetection also supports many dataset wrappers to mix the dataset or modify the dataset distribution for training. +Currently it supports to three dataset wrappers as below: + +- `RepeatDataset`: simply repeat the whole dataset. +- `ClassBalancedDataset`: repeat dataset in a class balanced manner. +- `ConcatDataset`: concat datasets. + +### Repeat dataset + +We use `RepeatDataset` as wrapper to repeat the dataset. For example, suppose the original dataset is `Dataset_A`, to repeat it, the config looks like the following + +```python +dataset_A_train = dict( + type='RepeatDataset', + times=N, + dataset=dict( # This is the original config of Dataset_A + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) +``` + +### Class balanced dataset + +We use `ClassBalancedDataset` as wrapper to repeat the dataset based on category +frequency. The dataset to repeat needs to instantiate function `self.get_cat_ids(idx)` +to support `ClassBalancedDataset`. +For example, to repeat `Dataset_A` with `oversample_thr=1e-3`, the config looks like the following + +```python +dataset_A_train = dict( + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( # This is the original config of Dataset_A + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) +``` + +You may refer to [source code](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/dataset_wrappers.py#L211) for details. + +### Concatenate dataset + +There are three ways to concatenate the dataset. + +1. If the datasets you want to concatenate are in the same type with different annotation files, you can concatenate the dataset configs like the following. + + ```python + dataset_A_train = dict( + type='Dataset_A', + ann_file = ['anno_file_1', 'anno_file_2'], + pipeline=train_pipeline + ) + ``` + + If the concatenated dataset is used for test or evaluation, this manner supports to evaluate each dataset separately. To test the concatenated datasets as a whole, you can set `separate_eval=False` as below. + + ```python + dataset_A_train = dict( + type='Dataset_A', + ann_file = ['anno_file_1', 'anno_file_2'], + separate_eval=False, + pipeline=train_pipeline + ) + ``` + +2. In case the dataset you want to concatenate is different, you can concatenate the dataset configs like the following. + + ```python + dataset_A_train = dict() + dataset_B_train = dict() + + data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train = [ + dataset_A_train, + dataset_B_train + ], + val = dataset_A_val, + test = dataset_A_test + ) + ``` + + If the concatenated dataset is used for test or evaluation, this manner also supports to evaluate each dataset separately. + +3. We also support to define `ConcatDataset` explicitly as the following. + + ```python + dataset_A_val = dict() + dataset_B_val = dict() + + data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dataset_A_train, + val=dict( + type='ConcatDataset', + datasets=[dataset_A_val, dataset_B_val], + separate_eval=False)) + ``` + + This manner allows users to evaluate all the datasets as a single one by setting `separate_eval=False`. + +**Note:** + +1. The option `separate_eval=False` assumes the datasets use `self.data_infos` during evaluation. Therefore, COCO datasets do not support this behavior since COCO datasets do not fully rely on `self.data_infos` for evaluation. Combining different types of datasets and evaluating them as a whole is not tested thus is not suggested. +2. Evaluating `ClassBalancedDataset` and `RepeatDataset` is not supported thus evaluating concatenated datasets of these types is also not supported. + +A more complex example that repeats `Dataset_A` and `Dataset_B` by N and M times, respectively, and then concatenates the repeated datasets is as the following. + +```python +dataset_A_train = dict( + type='RepeatDataset', + times=N, + dataset=dict( + type='Dataset_A', + ... + pipeline=train_pipeline + ) +) +dataset_A_val = dict( + ... + pipeline=test_pipeline +) +dataset_A_test = dict( + ... + pipeline=test_pipeline +) +dataset_B_train = dict( + type='RepeatDataset', + times=M, + dataset=dict( + type='Dataset_B', + ... + pipeline=train_pipeline + ) +) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train = [ + dataset_A_train, + dataset_B_train + ], + val = dataset_A_val, + test = dataset_A_test +) + +``` + +## Modify Dataset Classes + +With existing dataset types, we can modify the class names of them to train subset of the annotations. +For example, if you want to train only three classes of the current dataset, +you can modify the classes of dataset. +The dataset will filter out the ground truth boxes of other classes automatically. + +```python +classes = ('person', 'bicycle', 'car') +data = dict( + train=dict(classes=classes), + val=dict(classes=classes), + test=dict(classes=classes)) +``` + +MMDetection V2.0 also supports to read the classes from a file, which is common in real applications. +For example, assume the `classes.txt` contains the name of classes as the following. + +``` +person +bicycle +car +``` + +Users can set the classes as a file path, the dataset will load it and convert it to a list automatically. + +```python +classes = 'path/to/classes.txt' +data = dict( + train=dict(classes=classes), + val=dict(classes=classes), + test=dict(classes=classes)) +``` + +**Note**: + +- Before MMDetection v2.5.0, the dataset will filter out the empty GT images automatically if the classes are set and there is no way to disable that through config. This is an undesirable behavior and introduces confusion because if the classes are not set, the dataset only filter the empty GT images when `filter_empty_gt=True` and `test_mode=False`. After MMDetection v2.5.0, we decouple the image filtering process and the classes modification, i.e., the dataset will only filter empty GT images when `filter_empty_gt=True` and `test_mode=False`, no matter whether the classes are set. Thus, setting the classes only influences the annotations of classes used for training and users could decide whether to filter empty GT images by themselves. +- Since the middle format only has box labels and does not contain the class names, when using `CustomDataset`, users cannot filter out the empty GT images through configs but only do this offline. +- Please remember to modify the `num_classes` in the head when specifying `classes` in dataset. We implemented [NumClassCheckHook](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/utils.py) to check whether the numbers are consistent since v2.9.0(after PR#4508). +- The features for setting dataset classes and dataset filtering will be refactored to be more user-friendly in the future (depends on the progress). + +## COCO Panoptic Dataset + +Now we support COCO Panoptic Dataset, the format of panoptic annotations is different from COCO format. +Both the foreground and the background will exist in the annotation file. +The annotation json files in COCO Panoptic format has the following necessary keys: + +```python +'images': [ + { + 'file_name': '000000001268.jpg', + 'height': 427, + 'width': 640, + 'id': 1268 + }, + ... +] + +'annotations': [ + { + 'filename': '000000001268.jpg', + 'image_id': 1268, + 'segments_info': [ + { + 'id':8345037, # One-to-one correspondence with the id in the annotation map. + 'category_id': 51, + 'iscrowd': 0, + 'bbox': (x1, y1, w, h), # The bbox of the background is the outer rectangle of its mask. + 'area': 24315 + }, + ... + ] + }, + ... +] + +'categories': [ # including both foreground categories and background categories + {'id': 0, 'name': 'person'}, + ... + ] +``` + +Moreover, the `seg_prefix` must be set to the path of the panoptic annotation images. + +```python +data = dict( + type='CocoPanopticDataset', + train=dict( + seg_prefix = 'path/to/your/train/panoptic/image_annotation_data' + ), + val=dict( + seg_prefix = 'path/to/your/train/panoptic/image_annotation_data' + ) +) +``` diff --git a/downstream/mmdetection/docs/en/tutorials/customize_losses.md b/downstream/mmdetection/docs/en/tutorials/customize_losses.md new file mode 100644 index 0000000..5c00368 --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/customize_losses.md @@ -0,0 +1,126 @@ +# Tutorial 6: Customize Losses + +MMDetection provides users with different loss functions. But the default configuration may be not applicable for different datasets or models, so users may want to modify a specific loss to adapt the new situation. + +This tutorial first elaborate the computation pipeline of losses, then give some instructions about how to modify each step. The modification can be categorized as tweaking and weighting. + +## Computation pipeline of a loss + +Given the input prediction and target, as well as the weights, a loss function maps the input tensor to the final loss scalar. The mapping can be divided into five steps: + +1. Set the sampling method to sample positive and negative samples. + +2. Get **element-wise** or **sample-wise** loss by the loss kernel function. + +3. Weighting the loss with a weight tensor **element-wisely**. + +4. Reduce the loss tensor to a **scalar**. + +5. Weighting the loss with a **scalar**. + +## Set sampling method (step 1) + +For some loss functions, sampling strategies are needed to avoid imbalance between positive and negative samples. + +For example, when using `CrossEntropyLoss` in RPN head, we need to set `RandomSampler` in `train_cfg` + +```python +train_cfg=dict( + rpn=dict( + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False)) +``` + +For some other losses which have positive and negative sample balance mechanism such as Focal Loss, GHMC, and QualityFocalLoss, the sampler is no more necessary. + +## Tweaking loss + +Tweaking a loss is more related with step 2, 4, 5, and most modifications can be specified in the config. +Here we take [Focal Loss (FL)](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/focal_loss.py) as an example. +The following code sniper are the construction method and config of FL respectively, they are actually one to one correspondence. + +```python +@LOSSES.register_module() +class FocalLoss(nn.Module): + + def __init__(self, + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0): +``` + +```python +loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0) +``` + +### Tweaking hyper-parameters (step 2) + +`gamma` and `beta` are two hyper-parameters in the Focal Loss. Say if we want to change the value of `gamma` to be 1.5 and `alpha` to be 0.5, then we can specify them in the config as follows: + +```python +loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=1.5, + alpha=0.5, + loss_weight=1.0) +``` + +### Tweaking the way of reduction (step 3) + +The default way of reduction is `mean` for FL. Say if we want to change the reduction from `mean` to `sum`, we can specify it in the config as follows: + +```python +loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0, + reduction='sum') +``` + +### Tweaking loss weight (step 5) + +The loss weight here is a scalar which controls the weight of different losses in multi-task learning, e.g. classification loss and regression loss. Say if we want to change to loss weight of classification loss to be 0.5, we can specify it in the config as follows: + +```python +loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=0.5) +``` + +## Weighting loss (step 3) + +Weighting loss means we re-weight the loss element-wisely. To be more specific, we multiply the loss tensor with a weight tensor which has the same shape. As a result, different entries of the loss can be scaled differently, and so called element-wisely. +The loss weight varies across different models and highly context related, but overall there are two kinds of loss weights, `label_weights` for classification loss and `bbox_weights` for bbox regression loss. You can find them in the `get_target` method of the corresponding head. Here we take [ATSSHead](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/atss_head.py#L530) as an example, which inherit [AnchorHead](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/anchor_head.py) but overwrite its `get_targets` method which yields different `label_weights` and `bbox_weights`. + +``` +class ATSSHead(AnchorHead): + + ... + + def get_targets(self, + anchor_list, + valid_flag_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + unmap_outputs=True): +``` diff --git a/downstream/mmdetection/docs/en/tutorials/customize_models.md b/downstream/mmdetection/docs/en/tutorials/customize_models.md new file mode 100644 index 0000000..81c3912 --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/customize_models.md @@ -0,0 +1,363 @@ +# Tutorial 4: Customize Models + +We basically categorize model components into 5 types. + +- backbone: usually an FCN network to extract feature maps, e.g., ResNet, MobileNet. +- neck: the component between backbones and heads, e.g., FPN, PAFPN. +- head: the component for specific tasks, e.g., bbox prediction and mask prediction. +- roi extractor: the part for extracting RoI features from feature maps, e.g., RoI Align. +- loss: the component in head for calculating losses, e.g., FocalLoss, L1Loss, and GHMLoss. + +## Develop new components + +### Add a new backbone + +Here we show how to develop new components with an example of MobileNet. + +#### 1. Define a new backbone (e.g. MobileNet) + +Create a new file `mmdet/models/backbones/mobilenet.py`. + +```python +import torch.nn as nn + +from ..builder import BACKBONES + + +@BACKBONES.register_module() +class MobileNet(nn.Module): + + def __init__(self, arg1, arg2): + pass + + def forward(self, x): # should return a tuple + pass +``` + +#### 2. Import the module + +You can either add the following line to `mmdet/models/backbones/__init__.py` + +```python +from .mobilenet import MobileNet +``` + +or alternatively add + +```python +custom_imports = dict( + imports=['mmdet.models.backbones.mobilenet'], + allow_failed_imports=False) +``` + +to the config file to avoid modifying the original code. + +#### 3. Use the backbone in your config file + +```python +model = dict( + ... + backbone=dict( + type='MobileNet', + arg1=xxx, + arg2=xxx), + ... +``` + +### Add new necks + +#### 1. Define a neck (e.g. PAFPN) + +Create a new file `mmdet/models/necks/pafpn.py`. + +```python +from ..builder import NECKS + +@NECKS.register_module() +class PAFPN(nn.Module): + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False): + pass + + def forward(self, inputs): + # implementation is ignored + pass +``` + +#### 2. Import the module + +You can either add the following line to `mmdet/models/necks/__init__.py`, + +```python +from .pafpn import PAFPN +``` + +or alternatively add + +```python +custom_imports = dict( + imports=['mmdet.models.necks.pafpn.py'], + allow_failed_imports=False) +``` + +to the config file and avoid modifying the original code. + +#### 3. Modify the config file + +```python +neck=dict( + type='PAFPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5) +``` + +### Add new heads + +Here we show how to develop a new head with the example of [Double Head R-CNN](https://arxiv.org/abs/1904.06493) as the following. + +First, add a new bbox head in `mmdet/models/roi_heads/bbox_heads/double_bbox_head.py`. +Double Head R-CNN implements a new bbox head for object detection. +To implement a bbox head, basically we need to implement three functions of the new module as the following. + +```python +from mmdet.models.builder import HEADS +from .bbox_head import BBoxHead + +@HEADS.register_module() +class DoubleConvFCBBoxHead(BBoxHead): + r"""Bbox head used in Double-Head R-CNN + + /-> cls + /-> shared convs -> + \-> reg + roi features + /-> cls + \-> shared fc -> + \-> reg + """ # noqa: W605 + + def __init__(self, + num_convs=0, + num_fcs=0, + conv_out_channels=1024, + fc_out_channels=1024, + conv_cfg=None, + norm_cfg=dict(type='BN'), + **kwargs): + kwargs.setdefault('with_avg_pool', True) + super(DoubleConvFCBBoxHead, self).__init__(**kwargs) + + + def forward(self, x_cls, x_reg): + +``` + +Second, implement a new RoI Head if it is necessary. We plan to inherit the new `DoubleHeadRoIHead` from `StandardRoIHead`. We can find that a `StandardRoIHead` already implements the following functions. + +```python +import torch + +from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler +from ..builder import HEADS, build_head, build_roi_extractor +from .base_roi_head import BaseRoIHead +from .test_mixins import BBoxTestMixin, MaskTestMixin + + +@HEADS.register_module() +class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): + """Simplest base roi head including one bbox head and one mask head. + """ + + def init_assigner_sampler(self): + + def init_bbox_head(self, bbox_roi_extractor, bbox_head): + + def init_mask_head(self, mask_roi_extractor, mask_head): + + + def forward_dummy(self, x, proposals): + + + def forward_train(self, + x, + img_metas, + proposal_list, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None): + + def _bbox_forward(self, x, rois): + + def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, + img_metas): + + def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, + img_metas): + + def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None): + + + def simple_test(self, + x, + proposal_list, + img_metas, + proposals=None, + rescale=False): + """Test without augmentation.""" + +``` + +Double Head's modification is mainly in the bbox_forward logic, and it inherits other logics from the `StandardRoIHead`. +In the `mmdet/models/roi_heads/double_roi_head.py`, we implement the new RoI Head as the following: + +```python +from ..builder import HEADS +from .standard_roi_head import StandardRoIHead + + +@HEADS.register_module() +class DoubleHeadRoIHead(StandardRoIHead): + """RoI head for Double Head RCNN + + https://arxiv.org/abs/1904.06493 + """ + + def __init__(self, reg_roi_scale_factor, **kwargs): + super(DoubleHeadRoIHead, self).__init__(**kwargs) + self.reg_roi_scale_factor = reg_roi_scale_factor + + def _bbox_forward(self, x, rois): + bbox_cls_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], rois) + bbox_reg_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], + rois, + roi_scale_factor=self.reg_roi_scale_factor) + if self.with_shared_head: + bbox_cls_feats = self.shared_head(bbox_cls_feats) + bbox_reg_feats = self.shared_head(bbox_reg_feats) + cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) + + bbox_results = dict( + cls_score=cls_score, + bbox_pred=bbox_pred, + bbox_feats=bbox_cls_feats) + return bbox_results +``` + +Last, the users need to add the module in +`mmdet/models/bbox_heads/__init__.py` and `mmdet/models/roi_heads/__init__.py` thus the corresponding registry could find and load them. + +Alternatively, the users can add + +```python +custom_imports=dict( + imports=['mmdet.models.roi_heads.double_roi_head', 'mmdet.models.bbox_heads.double_bbox_head']) +``` + +to the config file and achieve the same goal. + +The config file of Double Head R-CNN is as the following + +```python +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + type='DoubleHeadRoIHead', + reg_roi_scale_factor=1.3, + bbox_head=dict( + _delete_=True, + type='DoubleConvFCBBoxHead', + num_convs=4, + num_fcs=2, + in_channels=256, + conv_out_channels=1024, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0)))) + +``` + +Since MMDetection 2.0, the config system supports to inherit configs such that the users can focus on the modification. +The Double Head R-CNN mainly uses a new DoubleHeadRoIHead and a new +`DoubleConvFCBBoxHead`, the arguments are set according to the `__init__` function of each module. + +### Add new loss + +Assume you want to add a new loss as `MyLoss`, for bounding box regression. +To add a new loss function, the users need implement it in `mmdet/models/losses/my_loss.py`. +The decorator `weighted_loss` enable the loss to be weighted for each element. + +```python +import torch +import torch.nn as nn + +from ..builder import LOSSES +from .utils import weighted_loss + +@weighted_loss +def my_loss(pred, target): + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + +@LOSSES.register_module() +class MyLoss(nn.Module): + + def __init__(self, reduction='mean', loss_weight=1.0): + super(MyLoss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_bbox = self.loss_weight * my_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss_bbox +``` + +Then the users need to add it in the `mmdet/models/losses/__init__.py`. + +```python +from .my_loss import MyLoss, my_loss + +``` + +Alternatively, you can add + +```python +custom_imports=dict( + imports=['mmdet.models.losses.my_loss']) +``` + +to the config file and achieve the same goal. + +To use it, modify the `loss_xxx` field. +Since MyLoss is for regression, you need to modify the `loss_bbox` field in the head. + +```python +loss_bbox=dict(type='MyLoss', loss_weight=1.0)) +``` diff --git a/downstream/mmdetection/docs/en/tutorials/customize_runtime.md b/downstream/mmdetection/docs/en/tutorials/customize_runtime.md new file mode 100644 index 0000000..9b0f4f1 --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/customize_runtime.md @@ -0,0 +1,323 @@ +# Tutorial 5: Customize Runtime Settings + +## Customize optimization settings + +### Customize optimizer supported by Pytorch + +We already support to use all the optimizers implemented by PyTorch, and the only modification is to change the `optimizer` field of config files. +For example, if you want to use `ADAM` (note that the performance could drop a lot), the modification could be as the following. + +```python +optimizer = dict(type='Adam', lr=0.0003, weight_decay=0.0001) +``` + +To modify the learning rate of the model, the users only need to modify the `lr` in the config of optimizer. The users can directly set arguments following the [API doc](https://pytorch.org/docs/stable/optim.html?highlight=optim#module-torch.optim) of PyTorch. + +### Customize self-implemented optimizer + +#### 1. Define a new optimizer + +A customized optimizer could be defined as following. + +Assume you want to add a optimizer named `MyOptimizer`, which has arguments `a`, `b`, and `c`. +You need to create a new directory named `mmdet/core/optimizer`. +And then implement the new optimizer in a file, e.g., in `mmdet/core/optimizer/my_optimizer.py`: + +```python +from .registry import OPTIMIZERS +from torch.optim import Optimizer + + +@OPTIMIZERS.register_module() +class MyOptimizer(Optimizer): + + def __init__(self, a, b, c) + +``` + +#### 2. Add the optimizer to registry + +To find the above module defined above, this module should be imported into the main namespace at first. There are two options to achieve it. + +- Modify `mmdet/core/optimizer/__init__.py` to import it. + + The newly defined module should be imported in `mmdet/core/optimizer/__init__.py` so that the registry will + find the new module and add it: + +```python +from .my_optimizer import MyOptimizer +``` + +- Use `custom_imports` in the config to manually import it + +```python +custom_imports = dict(imports=['mmdet.core.optimizer.my_optimizer'], allow_failed_imports=False) +``` + +The module `mmdet.core.optimizer.my_optimizer` will be imported at the beginning of the program and the class `MyOptimizer` is then automatically registered. +Note that only the package containing the class `MyOptimizer` should be imported. +`mmdet.core.optimizer.my_optimizer.MyOptimizer` **cannot** be imported directly. + +Actually users can use a totally different file directory structure using this importing method, as long as the module root can be located in `PYTHONPATH`. + +#### 3. Specify the optimizer in the config file + +Then you can use `MyOptimizer` in `optimizer` field of config files. +In the configs, the optimizers are defined by the field `optimizer` like the following: + +```python +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +``` + +To use your own optimizer, the field can be changed to + +```python +optimizer = dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value) +``` + +### Customize optimizer constructor + +Some models may have some parameter-specific settings for optimization, e.g. weight decay for BatchNorm layers. +The users can do those fine-grained parameter tuning through customizing optimizer constructor. + +```python +from mmcv.utils import build_from_cfg + +from mmcv.runner.optimizer import OPTIMIZER_BUILDERS, OPTIMIZERS +from mmdet.utils import get_root_logger +from .my_optimizer import MyOptimizer + + +@OPTIMIZER_BUILDERS.register_module() +class MyOptimizerConstructor(object): + + def __init__(self, optimizer_cfg, paramwise_cfg=None): + + def __call__(self, model): + + return my_optimizer + +``` + +The default optimizer constructor is implemented [here](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/optimizer/default_constructor.py#L11), which could also serve as a template for new optimizer constructor. + +### Additional settings + +Tricks not implemented by the optimizer should be implemented through optimizer constructor (e.g., set parameter-wise learning rates) or hooks. We list some common settings that could stabilize the training or accelerate the training. Feel free to create PR, issue for more settings. + +- __Use gradient clip to stabilize training__: + Some models need gradient clip to clip the gradients to stabilize the training process. An example is as below: + + ```python + optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) + ``` + + If your config inherits the base config which already sets the `optimizer_config`, you might need `_delete_=True` to override the unnecessary settings. See the [config documentation](https://mmdetection.readthedocs.io/en/latest/tutorials/config.html) for more details. + +- __Use momentum schedule to accelerate model convergence__: + We support momentum scheduler to modify model's momentum according to learning rate, which could make the model converge in a faster way. + Momentum scheduler is usually used with LR scheduler, for example, the following config is used in 3D detection to accelerate convergence. + For more details, please refer to the implementation of [CyclicLrUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L327) and [CyclicMomentumUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/momentum_updater.py#L130). + + ```python + lr_config = dict( + policy='cyclic', + target_ratio=(10, 1e-4), + cyclic_times=1, + step_ratio_up=0.4, + ) + momentum_config = dict( + policy='cyclic', + target_ratio=(0.85 / 0.95, 1), + cyclic_times=1, + step_ratio_up=0.4, + ) + ``` + +## Customize training schedules + +By default we use step learning rate with 1x schedule, this calls [`StepLRHook`](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L153) in MMCV. +We support many other learning rate schedule [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py), such as `CosineAnnealing` and `Poly` schedule. Here are some examples + +- Poly schedule: + + ```python + lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) + ``` + +- ConsineAnnealing schedule: + + ```python + lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=1000, + warmup_ratio=1.0 / 10, + min_lr_ratio=1e-5) + ``` + +## Customize workflow + +Workflow is a list of (phase, epochs) to specify the running order and epochs. +By default it is set to be + +```python +workflow = [('train', 1)] +``` + +which means running 1 epoch for training. +Sometimes user may want to check some metrics (e.g. loss, accuracy) about the model on the validate set. +In such case, we can set the workflow as + +```python +[('train', 1), ('val', 1)] +``` + +so that 1 epoch for training and 1 epoch for validation will be run iteratively. + +**Note**: + +1. The parameters of model will not be updated during val epoch. +2. Keyword `total_epochs` in the config only controls the number of training epochs and will not affect the validation workflow. +3. Workflows `[('train', 1), ('val', 1)]` and `[('train', 1)]` will not change the behavior of `EvalHook` because `EvalHook` is called by `after_train_epoch` and validation workflow only affect hooks that are called through `after_val_epoch`. Therefore, the only difference between `[('train', 1), ('val', 1)]` and `[('train', 1)]` is that the runner will calculate losses on validation set after each training epoch. + +## Customize hooks + +### Customize self-implemented hooks + +#### 1. Implement a new hook + +There are some occasions when the users might need to implement a new hook. MMDetection supports customized hooks in training (#3395) since v2.3.0. Thus the users could implement a hook directly in mmdet or their mmdet-based codebases and use the hook by only modifying the config in training. +Before v2.3.0, the users need to modify the code to get the hook registered before training starts. +Here we give an example of creating a new hook in mmdet and using it in training. + +```python +from mmcv.runner import HOOKS, Hook + + +@HOOKS.register_module() +class MyHook(Hook): + + def __init__(self, a, b): + pass + + def before_run(self, runner): + pass + + def after_run(self, runner): + pass + + def before_epoch(self, runner): + pass + + def after_epoch(self, runner): + pass + + def before_iter(self, runner): + pass + + def after_iter(self, runner): + pass +``` + +Depending on the functionality of the hook, the users need to specify what the hook will do at each stage of the training in `before_run`, `after_run`, `before_epoch`, `after_epoch`, `before_iter`, and `after_iter`. + +#### 2. Register the new hook + +Then we need to make `MyHook` imported. Assuming the file is in `mmdet/core/utils/my_hook.py` there are two ways to do that: + +- Modify `mmdet/core/utils/__init__.py` to import it. + + The newly defined module should be imported in `mmdet/core/utils/__init__.py` so that the registry will + find the new module and add it: + +```python +from .my_hook import MyHook +``` + +- Use `custom_imports` in the config to manually import it + +```python +custom_imports = dict(imports=['mmdet.core.utils.my_hook'], allow_failed_imports=False) +``` + +#### 3. Modify the config + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value) +] +``` + +You can also set the priority of the hook by adding key `priority` to `'NORMAL'` or `'HIGHEST'` as below + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value, priority='NORMAL') +] +``` + +By default the hook's priority is set as `NORMAL` during registration. + +### Use hooks implemented in MMCV + +If the hook is already implemented in MMCV, you can directly modify the config to use the hook as below + +#### 4. Example: `NumClassCheckHook` + +We implement a customized hook named [NumClassCheckHook](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/utils.py) to check whether the `num_classes` in head matches the length of `CLASSES` in `dataset`. + +We set it in [default_runtime.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/_base_/default_runtime.py). + +```python +custom_hooks = [dict(type='NumClassCheckHook')] +``` + +### Modify default runtime hooks + +There are some common hooks that are not registered through `custom_hooks`, they are + +- log_config +- checkpoint_config +- evaluation +- lr_config +- optimizer_config +- momentum_config + +In those hooks, only the logger hook has the `VERY_LOW` priority, others' priority are `NORMAL`. +The above-mentioned tutorials already covers how to modify `optimizer_config`, `momentum_config`, and `lr_config`. +Here we reveals how what we can do with `log_config`, `checkpoint_config`, and `evaluation`. + +#### Checkpoint config + +The MMCV runner will use `checkpoint_config` to initialize [`CheckpointHook`](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/hooks/checkpoint.py#L9). + +```python +checkpoint_config = dict(interval=1) +``` + +The users could set `max_keep_ckpts` to only save only small number of checkpoints or decide whether to store state dict of optimizer by `save_optimizer`. More details of the arguments are [here](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.CheckpointHook) + +#### Log config + +The `log_config` wraps multiple logger hooks and enables to set intervals. Now MMCV supports `WandbLoggerHook`, `MlflowLoggerHook`, and `TensorboardLoggerHook`. +The detail usages can be found in the [doc](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.LoggerHook). + +```python +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +``` + +#### Evaluation config + +The config of `evaluation` will be used to initialize the [`EvalHook`](https://github.com/open-mmlab/mmdetection/blob/7a404a2c000620d52156774a5025070d9e00d918/mmdet/core/evaluation/eval_hooks.py#L8). +Except the key `interval`, other arguments such as `metric` will be passed to the `dataset.evaluate()` + +```python +evaluation = dict(interval=1, metric='bbox') +``` diff --git a/downstream/mmdetection/docs/en/tutorials/data_pipeline.md b/downstream/mmdetection/docs/en/tutorials/data_pipeline.md new file mode 100644 index 0000000..919220d --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/data_pipeline.md @@ -0,0 +1,199 @@ +# Tutorial 3: Customize Data Pipelines + +## Design of Data pipelines + +Following typical conventions, we use `Dataset` and `DataLoader` for data loading +with multiple workers. `Dataset` returns a dict of data items corresponding +the arguments of models' forward method. +Since the data in object detection may not be the same size (image size, gt bbox size, etc.), +we introduce a new `DataContainer` type in MMCV to help collect and distribute +data of different size. +See [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py) for more details. + +The data preparation pipeline and the dataset is decomposed. Usually a dataset +defines how to process the annotations and a data pipeline defines all the steps to prepare a data dict. +A pipeline consists of a sequence of operations. Each operation takes a dict as input and also output a dict for the next transform. + +We present a classical pipeline in the following figure. The blue blocks are pipeline operations. With the pipeline going on, each operator can add new keys (marked as green) to the result dict or update the existing keys (marked as orange). +![pipeline figure](../../../resources/data_pipeline.png) + +The operations are categorized into data loading, pre-processing, formatting and test-time augmentation. + +Here is a pipeline example for Faster R-CNN. + +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +``` + +For each operation, we list the related dict fields that are added/updated/removed. + +### Data loading + +`LoadImageFromFile` + +- add: img, img_shape, ori_shape + +`LoadAnnotations` + +- add: gt_bboxes, gt_bboxes_ignore, gt_labels, gt_masks, gt_semantic_seg, bbox_fields, mask_fields + +`LoadProposals` + +- add: proposals + +### Pre-processing + +`Resize` + +- add: scale, scale_idx, pad_shape, scale_factor, keep_ratio +- update: img, img_shape, \*bbox_fields, \*mask_fields, \*seg_fields + +`RandomFlip` + +- add: flip +- update: img, \*bbox_fields, \*mask_fields, \*seg_fields + +`Pad` + +- add: pad_fixed_size, pad_size_divisor +- update: img, pad_shape, \*mask_fields, \*seg_fields + +`RandomCrop` + +- update: img, pad_shape, gt_bboxes, gt_labels, gt_masks, \*bbox_fields + +`Normalize` + +- add: img_norm_cfg +- update: img + +`SegRescale` + +- update: gt_semantic_seg + +`PhotoMetricDistortion` + +- update: img + +`Expand` + +- update: img, gt_bboxes + +`MinIoURandomCrop` + +- update: img, gt_bboxes, gt_labels + +`Corrupt` + +- update: img + +### Formatting + +`ToTensor` + +- update: specified by `keys`. + +`ImageToTensor` + +- update: specified by `keys`. + +`Transpose` + +- update: specified by `keys`. + +`ToDataContainer` + +- update: specified by `fields`. + +`DefaultFormatBundle` + +- update: img, proposals, gt_bboxes, gt_bboxes_ignore, gt_labels, gt_masks, gt_semantic_seg + +`Collect` + +- add: img_meta (the keys of img_meta is specified by `meta_keys`) +- remove: all other keys except for those specified by `keys` + +### Test time augmentation + +`MultiScaleFlipAug` + +## Extend and use custom pipelines + +1. Write a new pipeline in a file, e.g., in `my_pipeline.py`. It takes a dict as input and returns a dict. + + ```python + import random + from mmdet.datasets import PIPELINES + + + @PIPELINES.register_module() + class MyTransform: + """Add your transform + + Args: + p (float): Probability of shifts. Default 0.5. + """ + + def __init__(self, p=0.5): + self.p = p + + def __call__(self, results): + if random.random() > self.p: + results['dummy'] = True + return results + ``` + +2. Import and use the pipeline in your config file. + Make sure the import is relative to where your train script is located. + + ```python + custom_imports = dict(imports=['path.to.my_pipeline'], allow_failed_imports=False) + + img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='MyTransform', p=0.2), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), + ] + ``` + +3. Visualize the output of your augmentation pipeline + + To visualize the output of your augmentation pipeline, `tools/misc/browse_dataset.py` + can help the user to browse a detection dataset (both images and bounding box annotations) + visually, or save the image to a designated directory. More detials can refer to + [useful_tools](../useful_tools.md) diff --git a/downstream/mmdetection/docs/en/tutorials/finetune.md b/downstream/mmdetection/docs/en/tutorials/finetune.md new file mode 100644 index 0000000..afa5021 --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/finetune.md @@ -0,0 +1,89 @@ +# Tutorial 7: Finetuning Models + +Detectors pre-trained on the COCO dataset can serve as a good pre-trained model for other datasets, e.g., CityScapes and KITTI Dataset. +This tutorial provides instruction for users to use the models provided in the [Model Zoo](../model_zoo.md) for other datasets to obtain better performance. + +There are two steps to finetune a model on a new dataset. + +- Add support for the new dataset following [Tutorial 2: Customize Datasets](customize_dataset.md). +- Modify the configs as will be discussed in this tutorial. + +Take the finetuning process on Cityscapes Dataset as an example, the users need to modify five parts in the config. + +## Inherit base configs + +To release the burden and reduce bugs in writing the whole configs, MMDetection V2.0 support inheriting configs from multiple existing configs. To finetune a Mask RCNN model, the new config needs to inherit +`_base_/models/mask_rcnn_r50_fpn.py` to build the basic structure of the model. To use the Cityscapes Dataset, the new config can also simply inherit `_base_/datasets/cityscapes_instance.py`. For runtime settings such as training schedules, the new config needs to inherit `_base_/default_runtime.py`. This configs are in the `configs` directory and the users can also choose to write the whole contents rather than use inheritance. + +```python +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py' +] +``` + +## Modify head + +Then the new config needs to modify the head according to the class numbers of the new datasets. By only changing `num_classes` in the roi_head, the weights of the pre-trained models are mostly reused except the final prediction head. + +```python +model = dict( + pretrained=None, + roi_head=dict( + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=8, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=8, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) +``` + +## Modify dataset + +The users may also need to prepare the dataset and write the configs about dataset. MMDetection V2.0 already support VOC, WIDER FACE, COCO and Cityscapes Dataset. + +## Modify training schedule + +The finetuning hyperparameters vary from the default schedule. It usually requires smaller learning rate and less training epochs + +```python +# optimizer +# lr is set for a batch size of 8 +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[7]) +# the max_epochs and step in lr_config need specifically tuned for the customized dataset +runner = dict(max_epochs=8) +log_config = dict(interval=100) +``` + +## Use pre-trained model + +To use the pre-trained model, the new config add the link of pre-trained models in the `load_from`. The users might need to download the model weights before training to avoid the download time during training. + +```python +load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth' # noqa + +``` diff --git a/downstream/mmdetection/docs/en/tutorials/how_to.md b/downstream/mmdetection/docs/en/tutorials/how_to.md new file mode 100644 index 0000000..c5184dc --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/how_to.md @@ -0,0 +1,204 @@ +# Tutorial 11: How to xxx + +This tutorial collects answers to any `How to xxx with MMDetection`. Feel free to update this doc if you meet new questions about `How to` and find the answers! + +## Use backbone network through MMClassification + +The model registry in MMDet, MMCls, MMSeg all inherit from the root registry in MMCV. This allows these repositories to directly use the modules already implemented by each other. Therefore, users can use backbone networks from MMClassification in MMDetection without implementing a network that already exists in MMClassification. + +### Use backbone network implemented in MMClassification + +Suppose you want to use `MobileNetV3-small` as the backbone network of `RetinaNet`, the example config is as the following. + +```python +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# please install mmcls>=0.20.0 +# import mmcls.models to trigger register_module in mmcls +custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) +pretrained = 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth' +model = dict( + backbone=dict( + _delete_=True, # Delete the backbone field in _base_ + type='mmcls.MobileNetV3', # Using MobileNetV3 from mmcls + arch='small', + out_indices=(3, 8, 11), # Modify out_indices + init_cfg=dict( + type='Pretrained', + checkpoint=pretrained, + prefix='backbone.')), # The pre-trained weights of backbone network in MMCls have prefix='backbone.'. The prefix in the keys will be removed so that these weights can be normally loaded. + # Modify in_channels + neck=dict(in_channels=[24, 48, 96], start_level=0)) +``` + +### Use backbone network in TIMM through MMClassification + +MMClassification also provides a wrapper for the PyTorch Image Models (timm) backbone network, users can directly use the backbone network in timm through MMClassification. Suppose you want to use EfficientNet-B1 as the backbone network of RetinaNet, the example config is as the following. + +```python +# https://github.com/open-mmlab/mmdetection/blob/master/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py + +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# please install mmcls>=0.20.0 +# import mmcls.models to trigger register_module in mmcls +custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) +model = dict( + backbone=dict( + _delete_=True, # Delete the backbone field in _base_ + type='mmcls.TIMMBackbone', # Using timm from mmcls + model_name='efficientnet_b1', + features_only=True, + pretrained=True, + out_indices=(1, 2, 3, 4)), # Modify out_indices + neck=dict(in_channels=[24, 40, 112, 320])) # Modify in_channels + +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +``` + +`type='mmcls.TIMMBackbone'` means use the `TIMMBackbone` class from MMClassification in MMDetection, and the model used is `EfficientNet-B1`, where `mmcls` means the MMClassification repo and `TIMMBackbone` means the TIMMBackbone wrapper implemented in MMClassification. + +For the principle of the Hierarchy Registry, please refer to the [MMCV document](https://github.com/open-mmlab/mmcv/blob/master/docs/en/understand_mmcv/registry.md#hierarchy-registry). For how to use other backbones in MMClassification, you can refer to the [MMClassification document](https://github.com/open-mmlab/mmclassification/blob/master/docs/en/tutorials/config.md). + +## Use Mosaic augmentation + +If you want to use `Mosaic` in training, please make sure that you use `MultiImageMixDataset` at the same time. Taking the 'Faster R-CNN' algorithm as an example, you should modify the values of `train_pipeline` and `train_dataset` in the config as below: + +```python +# Open configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py directly and add the following fields +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +img_scale=(1333, 800)​ +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), + dict( + type='RandomAffine', + scaling_ratio_range=(0.1, 2), + border=(-img_scale[0] // 2, -img_scale[1] // 2)), # The image will be enlarged by 4 times after Mosaic processing,so we use affine transformation to restore the image size. + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] + +train_dataset = dict( + _delete_ = True, # remove unnecessary Settings + type='MultiImageMixDataset', + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True) + ], + filter_empty_gt=False, + ), + pipeline=train_pipeline + ) +​ +data = dict( + train=train_dataset + ) +``` + +## Unfreeze backbone network after freezing the backbone in the config + +If you have freezed the backbone network in the config and want to unfreeze it after some epoches, you can write a hook function to do it. Taking the Faster R-CNN with the resnet backbone as an example, you can freeze one stage of the backbone network and add a `custom_hooks` in the config as below: + +```python +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + # freeze one stage of the backbone network. + backbone=dict(frozen_stages=1), +) +custom_hooks = [dict(type="UnfreezeBackboneEpochBasedHook", unfreeze_epoch=1)] +``` + +Meanwhile write the hook class `UnfreezeBackboneEpochBasedHook` in `mmdet/core/hook/unfreeze_backbone_epoch_based_hook.py` + +```python +from mmcv.parallel import is_module_wrapper +from mmcv.runner.hooks import HOOKS, Hook + + +@HOOKS.register_module() +class UnfreezeBackboneEpochBasedHook(Hook): + """Unfreeze backbone network Hook. + + Args: + unfreeze_epoch (int): The epoch unfreezing the backbone network. + """ + + def __init__(self, unfreeze_epoch=1): + self.unfreeze_epoch = unfreeze_epoch + + def before_train_epoch(self, runner): + # Unfreeze the backbone network. + # Only valid for resnet. + if runner.epoch == self.unfreeze_epoch: + model = runner.model + if is_module_wrapper(model): + model = model.module + backbone = model.backbone + if backbone.frozen_stages >= 0: + if backbone.deep_stem: + backbone.stem.train() + for param in backbone.stem.parameters(): + param.requires_grad = True + else: + backbone.norm1.train() + for m in [backbone.conv1, backbone.norm1]: + for param in m.parameters(): + param.requires_grad = True + + for i in range(1, backbone.frozen_stages + 1): + m = getattr(backbone, f'layer{i}') + m.train() + for param in m.parameters(): + param.requires_grad = True +``` + +## Get the channels of a new backbone + +If you want to get the channels of a new backbone, you can build this backbone alone and input a pseudo image to get each stage output. + +Take `ResNet` as an example: + +```python +from mmdet.models import ResNet +import torch +self = ResNet(depth=18) +self.eval() +inputs = torch.rand(1, 3, 32, 32) +level_outputs = self.forward(inputs) +for level_out in level_outputs: + print(tuple(level_out.shape)) + +``` + +Output of the above script is as below: + +```python +(1, 64, 8, 8) +(1, 128, 4, 4) +(1, 256, 2, 2) +(1, 512, 1, 1) +``` + +Users can get the channels of the new backbone by Replacing the `ResNet(depth=18)` in this script with their customized backbone. diff --git a/downstream/mmdetection/docs/en/tutorials/index.rst b/downstream/mmdetection/docs/en/tutorials/index.rst new file mode 100644 index 0000000..5513611 --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/index.rst @@ -0,0 +1,17 @@ +.. toctree:: + :maxdepth: 2 + + config.md + customize_dataset.md + data_pipeline.md + customize_models.md + customize_runtime.md + customize_losses.md + finetune.md + robustness_benchmarking.md + pytorch2onnx.md + onnx2tensorrt.md + init_cfg.md + how_to.md + test_results_submission.md + useful_hooks.md diff --git a/downstream/mmdetection/docs/en/tutorials/init_cfg.md b/downstream/mmdetection/docs/en/tutorials/init_cfg.md new file mode 100644 index 0000000..b46b494 --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/init_cfg.md @@ -0,0 +1,161 @@ +# Tutorial 10: Weight initialization + +During training, a proper initialization strategy is beneficial to speeding up the training or obtaining a higher performance. [MMCV](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/weight_init.py) provide some commonly used methods for initializing modules like `nn.Conv2d`. Model initialization in MMdetection mainly uses `init_cfg`. Users can initialize models with following two steps: + +1. Define `init_cfg` for a model or its components in `model_cfg`, but `init_cfg` of children components have higher priority and will override `init_cfg` of parents modules. +2. Build model as usual, but call `model.init_weights()` method explicitly, and model parameters will be initialized as configuration. + +The high-level workflow of initialization in MMdetection is : + +model_cfg(init_cfg) -> build_from_cfg -> model -> init_weight() -> initialize(self, self.init_cfg) -> children's init_weight() + +### Description + +It is dict or list\[dict\], and contains the following keys and values: + +- `type` (str), containing the initializer name in `INTIALIZERS`, and followed by arguments of the initializer. +- `layer` (str or list\[str\]), containing the names of basiclayers in Pytorch or MMCV with learnable parameters that will be initialized, e.g. `'Conv2d'`,`'DeformConv2d'`. +- `override` (dict or list\[dict\]), containing the sub-modules that not inherit from BaseModule and whose initialization configuration is different from other layers' which are in `'layer'` key. Initializer defined in `type` will work for all layers defined in `layer`, so if sub-modules are not derived Classes of `BaseModule` but can be initialized as same ways of layers in `layer`, it does not need to use `override`. `override` contains: + - `type` followed by arguments of initializer; + - `name` to indicate sub-module which will be initialized. + +### Initialize parameters + +Inherit a new model from `mmcv.runner.BaseModule` or `mmdet.models` Here we show an example of FooModel. + +```python +import torch.nn as nn +from mmcv.runner import BaseModule + +class FooModel(BaseModule) + def __init__(self, + arg1, + arg2, + init_cfg=None): + super(FooModel, self).__init__(init_cfg) + ... +``` + +- Initialize model by using `init_cfg` directly in code + + ```python + import torch.nn as nn + from mmcv.runner import BaseModule + # or directly inherit mmdet models + + class FooModel(BaseModule) + def __init__(self, + arg1, + arg2, + init_cfg=XXX): + super(FooModel, self).__init__(init_cfg) + ... + ``` + +- Initialize model by using `init_cfg` directly in `mmcv.Sequential` or `mmcv.ModuleList` code + + ```python + from mmcv.runner import BaseModule, ModuleList + + class FooModel(BaseModule) + def __init__(self, + arg1, + arg2, + init_cfg=None): + super(FooModel, self).__init__(init_cfg) + ... + self.conv1 = ModuleList(init_cfg=XXX) + ``` + +- Initialize model by using `init_cfg` in config file + + ```python + model = dict( + ... + model = dict( + type='FooModel', + arg1=XXX, + arg2=XXX, + init_cfg=XXX), + ... + ``` + +### Usage of init_cfg + +1. Initialize model by `layer` key + + If we only define `layer`, it just initialize the layer in `layer` key. + + NOTE: Value of `layer` key is the class name with attributes weights and bias of Pytorch, (so such as `MultiheadAttention layer` is not supported). + +- Define `layer` key for initializing module with same configuration. + + ```python + init_cfg = dict(type='Constant', layer=['Conv1d', 'Conv2d', 'Linear'], val=1) + # initialize whole module with same configuration + ``` + +- Define `layer` key for initializing layer with different configurations. + +```python +init_cfg = [dict(type='Constant', layer='Conv1d', val=1), + dict(type='Constant', layer='Conv2d', val=2), + dict(type='Constant', layer='Linear', val=3)] +# nn.Conv1d will be initialized with dict(type='Constant', val=1) +# nn.Conv2d will be initialized with dict(type='Constant', val=2) +# nn.Linear will be initialized with dict(type='Constant', val=3) +``` + +2. Initialize model by `override` key + +- When initializing some specific part with its attribute name, we can use `override` key, and the value in `override` will ignore the value in init_cfg. + + ```python + # layers: + # self.feat = nn.Conv1d(3, 1, 3) + # self.reg = nn.Conv2d(3, 3, 3) + # self.cls = nn.Linear(1,2) + + init_cfg = dict(type='Constant', + layer=['Conv1d','Conv2d'], val=1, bias=2, + override=dict(type='Constant', name='reg', val=3, bias=4)) + # self.feat and self.cls will be initialized with dict(type='Constant', val=1, bias=2) + # The module called 'reg' will be initialized with dict(type='Constant', val=3, bias=4) + ``` + +- If `layer` is None in init_cfg, only sub-module with the name in override will be initialized, and type and other args in override can be omitted. + + ```python + # layers: + # self.feat = nn.Conv1d(3, 1, 3) + # self.reg = nn.Conv2d(3, 3, 3) + # self.cls = nn.Linear(1,2) + + init_cfg = dict(type='Constant', val=1, bias=2, override=dict(name='reg')) + + # self.feat and self.cls will be initialized by Pytorch + # The module called 'reg' will be initialized with dict(type='Constant', val=1, bias=2) + ``` + +- If we don't define `layer` key or `override` key, it will not initialize anything. + +- Invalid usage + + ```python + # It is invalid that override don't have name key + init_cfg = dict(type='Constant', layer=['Conv1d','Conv2d'], val=1, bias=2, + override=dict(type='Constant', val=3, bias=4)) + + # It is also invalid that override has name and other args except type + init_cfg = dict(type='Constant', layer=['Conv1d','Conv2d'], val=1, bias=2, + override=dict(name='reg', val=3, bias=4)) + ``` + +3. Initialize model with the pretrained model + + ```python + init_cfg = dict(type='Pretrained', + checkpoint='torchvision://resnet50') + ``` + +More details can refer to the documentation in [MMCV](https://mmcv.readthedocs.io/en/latest/cnn.html#weight-initialization) and MMCV [PR #780](https://github.com/open-mmlab/mmcv/pull/780) diff --git a/downstream/mmdetection/docs/en/tutorials/onnx2tensorrt.md b/downstream/mmdetection/docs/en/tutorials/onnx2tensorrt.md new file mode 100644 index 0000000..3848bb7 --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/onnx2tensorrt.md @@ -0,0 +1,106 @@ +# Tutorial 9: ONNX to TensorRT (Experimental) + +> ## [Try the new MMDeploy to deploy your model](https://mmdeploy.readthedocs.io/) + + + +- [Tutorial 9: ONNX to TensorRT (Experimental)](#tutorial-9-onnx-to-tensorrt-experimental) + - [How to convert models from ONNX to TensorRT](#how-to-convert-models-from-onnx-to-tensorrt) + - [Prerequisite](#prerequisite) + - [Usage](#usage) + - [How to evaluate the exported models](#how-to-evaluate-the-exported-models) + - [List of supported models convertible to TensorRT](#list-of-supported-models-convertible-to-tensorrt) + - [Reminders](#reminders) + - [FAQs](#faqs) + + + +## How to convert models from ONNX to TensorRT + +### Prerequisite + +1. Please refer to [get_started.md](https://mmdetection.readthedocs.io/en/latest/get_started.html) for installation of MMCV and MMDetection from source. +2. Please refer to [ONNXRuntime in mmcv](https://mmcv.readthedocs.io/en/latest/deployment/onnxruntime_op.html) and [TensorRT plugin in mmcv](https://github.com/open-mmlab/mmcv/blob/master/docs/en/deployment/tensorrt_plugin.md/) to install `mmcv-full` with ONNXRuntime custom ops and TensorRT plugins. +3. Use our tool [pytorch2onnx](https://mmdetection.readthedocs.io/en/latest/tutorials/pytorch2onnx.html) to convert the model from PyTorch to ONNX. + +### Usage + +```bash +python tools/deployment/onnx2tensorrt.py \ + ${CONFIG} \ + ${MODEL} \ + --trt-file ${TRT_FILE} \ + --input-img ${INPUT_IMAGE_PATH} \ + --shape ${INPUT_IMAGE_SHAPE} \ + --min-shape ${MIN_IMAGE_SHAPE} \ + --max-shape ${MAX_IMAGE_SHAPE} \ + --workspace-size {WORKSPACE_SIZE} \ + --show \ + --verify \ +``` + +Description of all arguments: + +- `config` : The path of a model config file. +- `model` : The path of an ONNX model file. +- `--trt-file`: The Path of output TensorRT engine file. If not specified, it will be set to `tmp.trt`. +- `--input-img` : The path of an input image for tracing and conversion. By default, it will be set to `demo/demo.jpg`. +- `--shape`: The height and width of model input. If not specified, it will be set to `400 600`. +- `--min-shape`: The minimum height and width of model input. If not specified, it will be set to the same as `--shape`. +- `--max-shape`: The maximum height and width of model input. If not specified, it will be set to the same as `--shape`. +- `--workspace-size` : The required GPU workspace size in GiB to build TensorRT engine. If not specified, it will be set to `1` GiB. +- `--show`: Determines whether to show the outputs of the model. If not specified, it will be set to `False`. +- `--verify`: Determines whether to verify the correctness of models between ONNXRuntime and TensorRT. If not specified, it will be set to `False`. +- `--verbose`: Determines whether to print logging messages. It's useful for debugging. If not specified, it will be set to `False`. + +Example: + +```bash +python tools/deployment/onnx2tensorrt.py \ + configs/retinanet/retinanet_r50_fpn_1x_coco.py \ + checkpoints/retinanet_r50_fpn_1x_coco.onnx \ + --trt-file checkpoints/retinanet_r50_fpn_1x_coco.trt \ + --input-img demo/demo.jpg \ + --shape 400 600 \ + --show \ + --verify \ +``` + +## How to evaluate the exported models + +We prepare a tool `tools/deplopyment/test.py` to evaluate TensorRT models. + +Please refer to following links for more information. + +- [how-to-evaluate-the-exported-models](pytorch2onnx.md#how-to-evaluate-the-exported-models) +- [results-and-models](pytorch2onnx.md#results-and-models) + +## List of supported models convertible to TensorRT + +The table below lists the models that are guaranteed to be convertible to TensorRT. + +| Model | Config | Dynamic Shape | Batch Inference | Note | +| :----------------: | :--------------------------------------------------------------: | :-----------: | :-------------: | :--: | +| SSD | `configs/ssd/ssd300_coco.py` | Y | Y | | +| FSAF | `configs/fsaf/fsaf_r50_fpn_1x_coco.py` | Y | Y | | +| FCOS | `configs/fcos/fcos_r50_caffe_fpn_4x4_1x_coco.py` | Y | Y | | +| YOLOv3 | `configs/yolo/yolov3_d53_mstrain-608_273e_coco.py` | Y | Y | | +| RetinaNet | `configs/retinanet/retinanet_r50_fpn_1x_coco.py` | Y | Y | | +| Faster R-CNN | `configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py` | Y | Y | | +| Cascade R-CNN | `configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py` | Y | Y | | +| Mask R-CNN | `configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py` | Y | Y | | +| Cascade Mask R-CNN | `configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py` | Y | Y | | +| PointRend | `configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py` | Y | Y | | + +Notes: + +- *All models above are tested with Pytorch==1.6.0, onnx==1.7.0 and TensorRT-7.2.1.6.Ubuntu-16.04.x86_64-gnu.cuda-10.2.cudnn8.0* + +## Reminders + +- If you meet any problem with the listed models above, please create an issue and it would be taken care of soon. For models not included in the list, we may not provide much help here due to the limited resources. Please try to dig a little deeper and debug by yourself. +- Because this feature is experimental and may change fast, please always try with the latest `mmcv` and `mmdetecion`. + +## FAQs + +- None diff --git a/downstream/mmdetection/docs/en/tutorials/pytorch2onnx.md b/downstream/mmdetection/docs/en/tutorials/pytorch2onnx.md new file mode 100644 index 0000000..3561178 --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/pytorch2onnx.md @@ -0,0 +1,334 @@ +# Tutorial 8: Pytorch to ONNX (Experimental) + +> ## [Try the new MMDeploy to deploy your model](https://mmdeploy.readthedocs.io/) + + + +- [Tutorial 8: Pytorch to ONNX (Experimental)](#tutorial-8-pytorch-to-onnx-experimental) + - [How to convert models from Pytorch to ONNX](#how-to-convert-models-from-pytorch-to-onnx) + - [Prerequisite](#prerequisite) + - [Usage](#usage) + - [Description of all arguments](#description-of-all-arguments) + - [How to evaluate the exported models](#how-to-evaluate-the-exported-models) + - [Prerequisite](#prerequisite-1) + - [Usage](#usage-1) + - [Description of all arguments](#description-of-all-arguments-1) + - [Results and Models](#results-and-models) + - [List of supported models exportable to ONNX](#list-of-supported-models-exportable-to-onnx) + - [The Parameters of Non-Maximum Suppression in ONNX Export](#the-parameters-of-non-maximum-suppression-in-onnx-export) + - [Reminders](#reminders) + - [FAQs](#faqs) + + + +## How to convert models from Pytorch to ONNX + +### Prerequisite + +1. Install the prerequisites following [get_started.md/Prepare environment](../get_started.md). +2. Build custom operators for ONNX Runtime and install MMCV manually following [How to build custom operators for ONNX Runtime](https://github.com/open-mmlab/mmcv/blob/master/docs/en/deployment/onnxruntime_op.md/#how-to-build-custom-operators-for-onnx-runtime) +3. Install MMdetection manually following steps 2-3 in [get_started.md/Install MMdetection](../get_started.md). + +### Usage + +```bash +python tools/deployment/pytorch2onnx.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + --output-file ${OUTPUT_FILE} \ + --input-img ${INPUT_IMAGE_PATH} \ + --shape ${IMAGE_SHAPE} \ + --test-img ${TEST_IMAGE_PATH} \ + --opset-version ${OPSET_VERSION} \ + --cfg-options ${CFG_OPTIONS} + --dynamic-export \ + --show \ + --verify \ + --simplify \ +``` + +### Description of all arguments + +- `config` : The path of a model config file. +- `checkpoint` : The path of a model checkpoint file. +- `--output-file`: The path of output ONNX model. If not specified, it will be set to `tmp.onnx`. +- `--input-img`: The path of an input image for tracing and conversion. By default, it will be set to `tests/data/color.jpg`. +- `--shape`: The height and width of input tensor to the model. If not specified, it will be set to `800 1216`. +- `--test-img` : The path of an image to verify the exported ONNX model. By default, it will be set to `None`, meaning it will use `--input-img` for verification. +- `--opset-version` : The opset version of ONNX. If not specified, it will be set to `11`. +- `--dynamic-export`: Determines whether to export ONNX model with dynamic input and output shapes. If not specified, it will be set to `False`. +- `--show`: Determines whether to print the architecture of the exported model and whether to show detection outputs when `--verify` is set to `True`. If not specified, it will be set to `False`. +- `--verify`: Determines whether to verify the correctness of an exported model. If not specified, it will be set to `False`. +- `--simplify`: Determines whether to simplify the exported ONNX model. If not specified, it will be set to `False`. +- `--cfg-options`: Override some settings in the used config file, the key-value pair in `xxx=yyy` format will be merged into config file. +- `--skip-postprocess`: Determines whether export model without post process. If not specified, it will be set to `False`. Notice: This is an experimental option. Only work for some single stage models. Users need to implement the post-process by themselves. We do not guarantee the correctness of the exported model. + +Example: + +```bash +python tools/deployment/pytorch2onnx.py \ + configs/yolo/yolov3_d53_mstrain-608_273e_coco.py \ + checkpoints/yolo/yolov3_d53_mstrain-608_273e_coco.pth \ + --output-file checkpoints/yolo/yolov3_d53_mstrain-608_273e_coco.onnx \ + --input-img demo/demo.jpg \ + --test-img tests/data/color.jpg \ + --shape 608 608 \ + --show \ + --verify \ + --dynamic-export \ + --cfg-options \ + model.test_cfg.deploy_nms_pre=-1 \ +``` + +## How to evaluate the exported models + +We prepare a tool `tools/deplopyment/test.py` to evaluate ONNX models with ONNXRuntime and TensorRT. + +### Prerequisite + +- Install onnx and onnxruntime (CPU version) + + ```shell + pip install onnx onnxruntime==1.5.1 + ``` + +- If you want to run the model on GPU, please remove the CPU version before using the GPU version. + + ```shell + pip uninstall onnxruntime + pip install onnxruntime-gpu + ``` + + Note: onnxruntime-gpu is version-dependent on CUDA and CUDNN, please ensure that your + environment meets the requirements. + +- Build custom operators for ONNX Runtime following [How to build custom operators for ONNX Runtime](https://github.com/open-mmlab/mmcv/blob/master/docs/en/deployment/onnxruntime_op.md/#how-to-build-custom-operators-for-onnx-runtime) + +- Install TensorRT by referring to [How to build TensorRT plugins in MMCV](https://mmcv.readthedocs.io/en/latest/deployment/tensorrt_plugin.html#how-to-build-tensorrt-plugins-in-mmcv) (optional) + +### Usage + +```bash +python tools/deployment/test.py \ + ${CONFIG_FILE} \ + ${MODEL_FILE} \ + --out ${OUTPUT_FILE} \ + --backend ${BACKEND} \ + --format-only ${FORMAT_ONLY} \ + --eval ${EVALUATION_METRICS} \ + --show-dir ${SHOW_DIRECTORY} \ + ----show-score-thr ${SHOW_SCORE_THRESHOLD} \ + ----cfg-options ${CFG_OPTIONS} \ + ----eval-options ${EVALUATION_OPTIONS} \ +``` + +### Description of all arguments + +- `config`: The path of a model config file. +- `model`: The path of an input model file. +- `--out`: The path of output result file in pickle format. +- `--backend`: Backend for input model to run and should be `onnxruntime` or `tensorrt`. +- `--format-only` : Format the output results without perform evaluation. It is useful when you want to format the result to a specific format and submit it to the test server. If not specified, it will be set to `False`. +- `--eval`: Evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC. +- `--show-dir`: Directory where painted images will be saved +- `--show-score-thr`: Score threshold. Default is set to `0.3`. +- `--cfg-options`: Override some settings in the used config file, the key-value pair in `xxx=yyy` format will be merged into config file. +- `--eval-options`: Custom options for evaluation, the key-value pair in `xxx=yyy` format will be kwargs for `dataset.evaluate()` function + +Notes: + +- If the deployed backend platform is TensorRT, please add environment variables before running the file: + + ```bash + export ONNX_BACKEND=MMCVTensorRT + ``` + +- If you want to use the `--dynamic-export` parameter in the TensorRT backend to export ONNX, please remove the `--simplify` parameter, and vice versa. + +### Results and Models + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ModelConfigMetricPyTorchONNX RuntimeTensorRT
    FCOSconfigs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.pyBox AP36.636.536.3
    FSAFconfigs/fsaf/fsaf_r50_fpn_1x_coco.pyBox AP36.036.035.9
    RetinaNetconfigs/retinanet/retinanet_r50_fpn_1x_coco.pyBox AP36.536.436.3
    SSDconfigs/ssd/ssd300_coco.pyBox AP25.625.625.6
    YOLOv3configs/yolo/yolov3_d53_mstrain-608_273e_coco.pyBox AP33.533.533.5
    Faster R-CNNconfigs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.pyBox AP37.437.437.0
    Cascade R-CNNconfigs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.pyBox AP40.340.340.1
    Mask R-CNNconfigs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.pyBox AP38.238.137.7
    Mask AP34.733.733.3
    Cascade Mask R-CNNconfigs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.pyBox AP41.241.240.9
    Mask AP35.934.834.5
    CornerNetconfigs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.pyBox AP40.640.4-
    DETRconfigs/detr/detr_r50_8x2_150e_coco.pyBox AP40.140.1-
    PointRendconfigs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.pyBox AP38.438.4-
    Mask AP36.335.2-
    + +Notes: + +- All ONNX models are evaluated with dynamic shape on coco dataset and images are preprocessed according to the original config file. Note that CornerNet is evaluated without test-time flip, since currently only single-scale evaluation is supported with ONNX Runtime. + +- Mask AP of Mask R-CNN drops by 1% for ONNXRuntime. The main reason is that the predicted masks are directly interpolated to original image in PyTorch, while they are at first interpolated to the preprocessed input image of the model and then to original image in other backend. + +## List of supported models exportable to ONNX + +The table below lists the models that are guaranteed to be exportable to ONNX and runnable in ONNX Runtime. + +| Model | Config | Dynamic Shape | Batch Inference | Note | +| :----------------: | :-----------------------------------------------------------------: | :-----------: | :-------------: | :---------------------------------------------------------------------------: | +| FCOS | `configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py` | Y | Y | | +| FSAF | `configs/fsaf/fsaf_r50_fpn_1x_coco.py` | Y | Y | | +| RetinaNet | `configs/retinanet/retinanet_r50_fpn_1x_coco.py` | Y | Y | | +| SSD | `configs/ssd/ssd300_coco.py` | Y | Y | | +| YOLOv3 | `configs/yolo/yolov3_d53_mstrain-608_273e_coco.py` | Y | Y | | +| Faster R-CNN | `configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py` | Y | Y | | +| Cascade R-CNN | `configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py` | Y | Y | | +| Mask R-CNN | `configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py` | Y | Y | | +| Cascade Mask R-CNN | `configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py` | Y | Y | | +| CornerNet | `configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py` | Y | N | no flip, no batch inference, tested with torch==1.7.0 and onnxruntime==1.5.1. | +| DETR | `configs/detr/detr_r50_8x2_150e_coco.py` | Y | Y | batch inference is *not recommended* | +| PointRend | `configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py` | Y | Y | | + +Notes: + +- Minimum required version of MMCV is `1.3.5` + +- *All models above are tested with Pytorch==1.6.0 and onnxruntime==1.5.1*, except for CornerNet. For more details about the + torch version when exporting CornerNet to ONNX, which involves `mmcv::cummax`, please refer to the [Known Issues](https://github.com/open-mmlab/mmcv/blob/master/docs/en/deployment/onnxruntime_op.md#known-issues) in mmcv. + +- Though supported, it is *not recommended* to use batch inference in onnxruntime for `DETR`, because there is huge performance gap between ONNX and torch model (e.g. 33.5 vs 39.9 mAP on COCO for onnxruntime and torch respectively, with a batch size 2). The main reason for the gap is that these is non-negligible effect on the predicted regressions during batch inference for ONNX, since the predicted coordinates is normalized by `img_shape` (without padding) and should be converted to absolute format, but `img_shape` is not dynamically traceable thus the padded `img_shape_for_onnx` is used. + +- Currently only single-scale evaluation is supported with ONNX Runtime, also `mmcv::SoftNonMaxSuppression` is only supported for single image by now. + +## The Parameters of Non-Maximum Suppression in ONNX Export + +In the process of exporting the ONNX model, we set some parameters for the NMS op to control the number of output bounding boxes. The following will introduce the parameter setting of the NMS op in the supported models. You can set these parameters through `--cfg-options`. + +- `nms_pre`: The number of boxes before NMS. The default setting is `1000`. + +- `deploy_nms_pre`: The number of boxes before NMS when exporting to ONNX model. The default setting is `0`. + +- `max_per_img`: The number of boxes to be kept after NMS. The default setting is `100`. + +- `max_output_boxes_per_class`: Maximum number of output boxes per class of NMS. The default setting is `200`. + +## Reminders + +- When the input model has custom op such as `RoIAlign` and if you want to verify the exported ONNX model, you may have to build `mmcv` with [ONNXRuntime](https://mmcv.readthedocs.io/en/latest/deployment/onnxruntime_op.html) from source. +- `mmcv.onnx.simplify` feature is based on [onnx-simplifier](https://github.com/daquexian/onnx-simplifier). If you want to try it, please refer to [onnx in `mmcv`](https://mmcv.readthedocs.io/en/latest/deployment/onnx.html) and [onnxruntime op in `mmcv`](https://mmcv.readthedocs.io/en/latest/deployment/onnxruntime_op.html) for more information. +- If you meet any problem with the listed models above, please create an issue and it would be taken care of soon. For models not included in the list, please try to dig a little deeper and debug a little bit more and hopefully solve them by yourself. +- Because this feature is experimental and may change fast, please always try with the latest `mmcv` and `mmdetecion`. + +## FAQs + +- None diff --git a/downstream/mmdetection/docs/en/tutorials/test_results_submission.md b/downstream/mmdetection/docs/en/tutorials/test_results_submission.md new file mode 100644 index 0000000..aed595c --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/test_results_submission.md @@ -0,0 +1,112 @@ +# Tutorial 12: Test Results Submission + +## Panoptic segmentation test results submission + +The following sections introduce how to produce the prediction results of panoptic segmentation models on the COCO test-dev set and submit the predictions to [COCO evaluation server](https://competitions.codalab.org/competitions/19507). + +### Prerequisites + +- Download [COCO test dataset images](http://images.cocodataset.org/zips/test2017.zip), [testing image info](http://images.cocodataset.org/annotations/image_info_test2017.zip), and [panoptic train/val annotations](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip), then unzip them, put 'test2017' to `data/coco/`, put json files and annotation files to `data/coco/annotations/`. + +```shell +# suppose data/coco/ does not exist +mkdir -pv data/coco/ + +# download test2017 +wget -P data/coco/ http://images.cocodataset.org/zips/test2017.zip +wget -P data/coco/ http://images.cocodataset.org/annotations/image_info_test2017.zip +wget -P data/coco/ http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip + +# unzip them +unzip data/coco/test2017.zip -d data/coco/ +unzip data/coco/image_info_test2017.zip -d data/coco/ +unzip data/coco/panoptic_annotations_trainval2017.zip -d data/coco/ + +# remove zip files (optional) +rm -rf data/coco/test2017.zip data/coco/image_info_test2017.zip data/coco/panoptic_annotations_trainval2017.zip +``` + +- Run the following code to update category information in testing image info. Since the attribute `isthing` is missing in category information of 'image_info_test-dev2017.json', we need to update it with the category information in 'panoptic_val2017.json'. + +```shell +python tools/misc/gen_coco_panoptic_test_info.py data/coco/annotations +``` + +After completing the above preparations, your directory structure of `data` should be like this: + +```text +data +`-- coco + |-- annotations + | |-- image_info_test-dev2017.json + | |-- image_info_test2017.json + | |-- panoptic_image_info_test-dev2017.json + | |-- panoptic_train2017.json + | |-- panoptic_train2017.zip + | |-- panoptic_val2017.json + | `-- panoptic_val2017.zip + `-- test2017 +``` + +### Inference on coco test-dev + +The commands to perform inference on test2017 are as below: + +```shell +# test with single gpu +CUDA_VISIBLE_DEVICES=0 python tools/test.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + --format-only \ + --cfg-options data.test.ann_file=data/coco/annotations/panoptic_image_info_test-dev2017.json data.test.img_prefix=data/coco/test2017 \ + --eval-options jsonfile_prefix=${WORK_DIR}/results + +# test with four gpus +CUDA_VISIBLE_DEVICES=0,1,3,4 bash tools/dist_test.sh \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + 4 \ # four gpus + --format-only \ + --cfg-options data.test.ann_file=data/coco/annotations/panoptic_image_info_test-dev2017.json data.test.img_prefix=data/coco/test2017 \ + --eval-options jsonfile_prefix=${WORK_DIR}/results + +# test with slurm +GPUS=8 tools/slurm_test.sh \ + ${Partition} \ + ${JOB_NAME} \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + --format-only \ + --cfg-options data.test.ann_file=data/coco/annotations/panoptic_image_info_test-dev2017.json data.test.img_prefix=data/coco/test2017 \ + --eval-options jsonfile_prefix=${WORK_DIR}/results +``` + +Example + +Suppose we perform inference on `test2017` using pretrained MaskFormer with ResNet-50 backbone. + +```shell +# test with single gpu +CUDA_VISIBLE_DEVICES=0 python tools/test.py \ + configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py \ + checkpoints/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth \ + --format-only \ + --cfg-options data.test.ann_file=data/coco/annotations/panoptic_image_info_test-dev2017.json data.test.img_prefix=data/coco/test2017 \ + --eval-options jsonfile_prefix=work_dirs/maskformer/results +``` + +### Rename files and zip results + +After inference, the panoptic segmentation results (a json file and a directory where the masks are stored) will be in `WORK_DIR`. We should rename them according to the naming convention described on [COCO's Website](https://cocodataset.org/#upload). Finally, we need to compress the json and the directory where the masks are stored into a zip file, and rename the zip file according to the naming convention. Note that the zip file should **directly** contains the above two files. + +The commands to rename files and zip results: + +```shell +# In WORK_DIR, we have panoptic segmentation results: 'panoptic' and 'results.panoptic.json'. +cd ${WORK_DIR} + +# replace '[algorithm_name]' with the name of algorithm you used. +mv ./panoptic ./panoptic_test-dev2017_[algorithm_name]_results +mv ./results.panoptic.json ./panoptic_test-dev2017_[algorithm_name]_results.json +zip panoptic_test-dev2017_[algorithm_name]_results.zip -ur panoptic_test-dev2017_[algorithm_name]_results panoptic_test-dev2017_[algorithm_name]_results.json +``` diff --git a/downstream/mmdetection/docs/en/tutorials/useful_hooks.md b/downstream/mmdetection/docs/en/tutorials/useful_hooks.md new file mode 100644 index 0000000..f84be97 --- /dev/null +++ b/downstream/mmdetection/docs/en/tutorials/useful_hooks.md @@ -0,0 +1,83 @@ +# Tutorial 13: Useful Hooks + +MMDetection and MMCV provide users with various useful hooks including log hooks, evaluation hooks, NumClassCheckHook, etc. This tutorial introduces the functionalities and usages of hooks implemented in MMDetection. For using hooks in MMCV, please read the [API documentation in MMCV](https://github.com/open-mmlab/mmcv/blob/master/docs/en/understand_mmcv/runner.md). + +## CheckInvalidLossHook + +## EvalHook and DistEvalHook + +## ExpMomentumEMAHook and LinearMomentumEMAHook + +## NumClassCheckHook + +## [MemoryProfilerHook](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/hook/memory_profiler_hook.py) + +Memory profiler hook records memory information including virtual memory, swap memory, and the memory of the current process. This hook helps grasp the memory usage of the system and discover potential memory leak bugs. To use this hook, users should install `memory_profiler` and `psutil` by `pip install memory_profiler psutil` first. + +### Usage + +To use this hook, users should add the following code to the config file. + +```python +custom_hooks = [ + dict(type='MemoryProfilerHook', interval=50) +] +``` + +### Result + +During training, you can see the messages in the log recorded by `MemoryProfilerHook` as below. The system has 250 GB (246360 MB + 9407 MB) of memory and 8 GB (5740 MB + 2452 MB) of swap memory in total. Currently 9407 MB (4.4%) of memory and 5740 MB (29.9%) of swap memory were consumed. And the current training process consumed 5434 MB of memory. + +```text +2022-04-21 08:49:56,881 - mmdet - INFO - Memory information available_memory: 246360 MB, used_memory: 9407 MB, memory_utilization: 4.4 %, available_swap_memory: 5740 MB, used_swap_memory: 2452 MB, swap_memory_utilization: 29.9 %, current_process_memory: 5434 MB +``` + +## SetEpochInfoHook + +## SyncNormHook + +## SyncRandomSizeHook + +## YOLOXLrUpdaterHook + +## YOLOXModeSwitchHook + +## How to implement a custom hook + +In general, there are 10 points where hooks can be inserted from the beginning to the end of model training. The users can implement custom hooks and insert them at different points in the process of training to do what they want. + +- global points: `before_run`, `after_run` +- points in training: `before_train_epoch`, `before_train_iter`, `after_train_iter`, `after_train_epoch` +- points in validation: `before_val_epoch`, `before_val_iter`, `after_val_iter`, `after_val_epoch` + +For example, users can implement a hook to check loss and terminate training when loss goes NaN. To achieve that, there are three steps to go: + +1. Implement a new hook that inherits the `Hook` class in MMCV, and implement `after_train_iter` method which checks whether loss goes NaN after every `n` training iterations. +2. The implemented hook should be registered in `HOOKS` by `@HOOKS.register_module()` as shown in the code below. +3. Add `custom_hooks = [dict(type='MemoryProfilerHook', interval=50)]` in the config file. + +```python +import torch +from mmcv.runner.hooks import HOOKS, Hook + + +@HOOKS.register_module() +class CheckInvalidLossHook(Hook): + """Check invalid loss hook. + This hook will regularly check whether the loss is valid + during training. + Args: + interval (int): Checking interval (every k iterations). + Default: 50. + """ + + def __init__(self, interval=50): + self.interval = interval + + def after_train_iter(self, runner): + if self.every_n_iters(runner, self.interval): + assert torch.isfinite(runner.outputs['loss']), \ + runner.logger.info('loss become infinite or NaN!') +``` + +Please read [customize_runtime](https://mmdetection.readthedocs.io/en/latest/tutorials/customize_runtime.html#customize-self-implemented-hooks) for more about implementing a custom hook. diff --git a/downstream/mmdetection/docs/en/useful_tools.md b/downstream/mmdetection/docs/en/useful_tools.md new file mode 100644 index 0000000..8e5c49d --- /dev/null +++ b/downstream/mmdetection/docs/en/useful_tools.md @@ -0,0 +1,502 @@ +Apart from training/testing scripts, We provide lots of useful tools under the +`tools/` directory. + +## Log Analysis + +`tools/analysis_tools/analyze_logs.py` plots loss/mAP curves given a training +log file. Run `pip install seaborn` first to install the dependency. + +```shell +python tools/analysis_tools/analyze_logs.py plot_curve [--keys ${KEYS}] [--eval-interval ${EVALUATION_INTERVAL}] [--title ${TITLE}] [--legend ${LEGEND}] [--backend ${BACKEND}] [--style ${STYLE}] [--out ${OUT_FILE}] +``` + +![loss curve image](../../resources/loss_curve.png) + +Examples: + +- Plot the classification loss of some run. + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_cls --legend loss_cls + ``` + +- Plot the classification and regression loss of some run, and save the figure to a pdf. + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_cls loss_bbox --out losses.pdf + ``` + +- Compare the bbox mAP of two runs in the same figure. + + ```shell + python tools/analysis_tools/analyze_logs.py plot_curve log1.json log2.json --keys bbox_mAP --legend run1 run2 + ``` + +- Compute the average training speed. + + ```shell + python tools/analysis_tools/analyze_logs.py cal_train_time log.json [--include-outliers] + ``` + + The output is expected to be like the following. + + ```text + -----Analyze train time of work_dirs/some_exp/20190611_192040.log.json----- + slowest epoch 11, average time is 1.2024 + fastest epoch 1, average time is 1.1909 + time std over epochs is 0.0028 + average iter time: 1.1959 s/iter + ``` + +## Result Analysis + +`tools/analysis_tools/analyze_results.py` calculates single image mAP and saves or shows the topk images with the highest and lowest scores based on prediction results. + +**Usage** + +```shell +python tools/analysis_tools/analyze_results.py \ + ${CONFIG} \ + ${PREDICTION_PATH} \ + ${SHOW_DIR} \ + [--show] \ + [--wait-time ${WAIT_TIME}] \ + [--topk ${TOPK}] \ + [--show-score-thr ${SHOW_SCORE_THR}] \ + [--cfg-options ${CFG_OPTIONS}] +``` + +Description of all arguments: + +- `config` : The path of a model config file. +- `prediction_path`: Output result file in pickle format from `tools/test.py` +- `show_dir`: Directory where painted GT and detection images will be saved +- `--show`:Determines whether to show painted images, If not specified, it will be set to `False` +- `--wait-time`: The interval of show (s), 0 is block +- `--topk`: The number of saved images that have the highest and lowest `topk` scores after sorting. If not specified, it will be set to `20`. +- `--show-score-thr`: Show score threshold. If not specified, it will be set to `0`. +- `--cfg-options`: If specified, the key-value pair optional cfg will be merged into config file + +**Examples**: + +Assume that you have got result file in pickle format from `tools/test.py` in the path './result.pkl'. + +1. Test Faster R-CNN and visualize the results, save images to the directory `results/` + +```shell +python tools/analysis_tools/analyze_results.py \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + result.pkl \ + results \ + --show +``` + +2. Test Faster R-CNN and specified topk to 50, save images to the directory `results/` + +```shell +python tools/analysis_tools/analyze_results.py \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + result.pkl \ + results \ + --topk 50 +``` + +3. If you want to filter the low score prediction results, you can specify the `show-score-thr` parameter + +```shell +python tools/analysis_tools/analyze_results.py \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + result.pkl \ + results \ + --show-score-thr 0.3 +``` + +## Visualization + +### Visualize Datasets + +`tools/misc/browse_dataset.py` helps the user to browse a detection dataset (both +images and bounding box annotations) visually, or save the image to a +designated directory. + +```shell +python tools/misc/browse_dataset.py ${CONFIG} [-h] [--skip-type ${SKIP_TYPE[SKIP_TYPE...]}] [--output-dir ${OUTPUT_DIR}] [--not-show] [--show-interval ${SHOW_INTERVAL}] +``` + +### Visualize Models + +First, convert the model to ONNX as described +[here](#convert-mmdetection-model-to-onnx-experimental). +Note that currently only RetinaNet is supported, support for other models +will be coming in later versions. +The converted model could be visualized by tools like [Netron](https://github.com/lutzroeder/netron). + +### Visualize Predictions + +If you need a lightweight GUI for visualizing the detection results, you can refer [DetVisGUI project](https://github.com/Chien-Hung/DetVisGUI/tree/mmdetection). + +## Error Analysis + +`tools/analysis_tools/coco_error_analysis.py` analyzes COCO results per category and by +different criterion. It can also make a plot to provide useful information. + +```shell +python tools/analysis_tools/coco_error_analysis.py ${RESULT} ${OUT_DIR} [-h] [--ann ${ANN}] [--types ${TYPES[TYPES...]}] +``` + +Example: + +Assume that you have got [Mask R-CNN checkpoint file](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) in the path 'checkpoint'. For other checkpoints, please refer to our [model zoo](./model_zoo.md). You can use the following command to get the results bbox and segmentation json file. + +```shell +# out: results.bbox.json and results.segm.json +python tools/test.py \ + configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ + checkpoint/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ + --format-only \ + --options "jsonfile_prefix=./results" +``` + +1. Get COCO bbox error results per category , save analyze result images to the directory `results/` + +```shell +python tools/analysis_tools/coco_error_analysis.py \ + results.bbox.json \ + results \ + --ann=data/coco/annotations/instances_val2017.json \ +``` + +2. Get COCO segmentation error results per category , save analyze result images to the directory `results/` + +```shell +python tools/analysis_tools/coco_error_analysis.py \ + results.segm.json \ + results \ + --ann=data/coco/annotations/instances_val2017.json \ + --types='segm' +``` + +## Model Serving + +In order to serve an `MMDetection` model with [`TorchServe`](https://pytorch.org/serve/), you can follow the steps: + +### 1. Convert model from MMDetection to TorchServe + +```shell +python tools/deployment/mmdet2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ +--output-folder ${MODEL_STORE} \ +--model-name ${MODEL_NAME} +``` + +**Note**: ${MODEL_STORE} needs to be an absolute path to a folder. + +### 2. Build `mmdet-serve` docker image + +```shell +docker build -t mmdet-serve:latest docker/serve/ +``` + +### 3. Run `mmdet-serve` + +Check the official docs for [running TorchServe with docker](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment). + +In order to run in GPU, you need to install [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). You can omit the `--gpus` argument in order to run in CPU. + +Example: + +```shell +docker run --rm \ +--cpus 8 \ +--gpus device=0 \ +-p8080:8080 -p8081:8081 -p8082:8082 \ +--mount type=bind,source=$MODEL_STORE,target=/home/model-server/model-store \ +mmdet-serve:latest +``` + +[Read the docs](https://github.com/pytorch/serve/blob/072f5d088cce9bb64b2a18af065886c9b01b317b/docs/rest_api.md/) about the Inference (8080), Management (8081) and Metrics (8082) APis + +### 4. Test deployment + +```shell +curl -O curl -O https://raw.githubusercontent.com/pytorch/serve/master/docs/images/3dogs.jpg +curl http://127.0.0.1:8080/predictions/${MODEL_NAME} -T 3dogs.jpg +``` + +You should obtain a response similar to: + +```json +[ + { + "class_name": "dog", + "bbox": [ + 294.63409423828125, + 203.99111938476562, + 417.048583984375, + 281.62744140625 + ], + "score": 0.9987992644309998 + }, + { + "class_name": "dog", + "bbox": [ + 404.26019287109375, + 126.0080795288086, + 574.5091552734375, + 293.6662292480469 + ], + "score": 0.9979367256164551 + }, + { + "class_name": "dog", + "bbox": [ + 197.2144775390625, + 93.3067855834961, + 307.8505554199219, + 276.7560119628906 + ], + "score": 0.993338406085968 + } +] +``` + +And you can use `test_torchserver.py` to compare result of torchserver and pytorch, and visualize them. + +```shell +python tools/deployment/test_torchserver.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} +[--inference-addr ${INFERENCE_ADDR}] [--device ${DEVICE}] [--score-thr ${SCORE_THR}] +``` + +Example: + +```shell +python tools/deployment/test_torchserver.py \ +demo/demo.jpg \ +configs/yolo/yolov3_d53_320_273e_coco.py \ +checkpoint/yolov3_d53_320_273e_coco-421362b6.pth \ +yolov3 +``` + +## Model Complexity + +`tools/analysis_tools/get_flops.py` is a script adapted from [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) to compute the FLOPs and params of a given model. + +```shell +python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +You will get the results like this. + +```text +============================== +Input shape: (3, 1280, 800) +Flops: 239.32 GFLOPs +Params: 37.74 M +============================== +``` + +**Note**: This tool is still experimental and we do not guarantee that the +number is absolutely correct. You may well use the result for simple +comparisons, but double check it before you adopt it in technical reports or papers. + +1. FLOPs are related to the input shape while parameters are not. The default + input shape is (1, 3, 1280, 800). +2. Some operators are not counted into FLOPs like GN and custom operators. Refer to [`mmcv.cnn.get_model_complexity_info()`](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/flops_counter.py) for details. +3. The FLOPs of two-stage detectors is dependent on the number of proposals. + +## Model conversion + +### MMDetection model to ONNX (experimental) + +We provide a script to convert model to [ONNX](https://github.com/onnx/onnx) format. We also support comparing the output results between Pytorch and ONNX model for verification. + +```shell +python tools/deployment/pytorch2onnx.py ${CONFIG_FILE} ${CHECKPOINT_FILE} --output-file ${ONNX_FILE} [--shape ${INPUT_SHAPE} --verify] +``` + +**Note**: This tool is still experimental. Some customized operators are not supported for now. For a detailed description of the usage and the list of supported models, please refer to [pytorch2onnx](tutorials/pytorch2onnx.md). + +### MMDetection 1.x model to MMDetection 2.x + +`tools/model_converters/upgrade_model_version.py` upgrades a previous MMDetection checkpoint +to the new version. Note that this script is not guaranteed to work as some +breaking changes are introduced in the new version. It is recommended to +directly use the new checkpoints. + +```shell +python tools/model_converters/upgrade_model_version.py ${IN_FILE} ${OUT_FILE} [-h] [--num-classes NUM_CLASSES] +``` + +### RegNet model to MMDetection + +`tools/model_converters/regnet2mmdet.py` convert keys in pycls pretrained RegNet models to +MMDetection style. + +```shell +python tools/model_converters/regnet2mmdet.py ${SRC} ${DST} [-h] +``` + +### Detectron ResNet to Pytorch + +`tools/model_converters/detectron2pytorch.py` converts keys in the original detectron pretrained +ResNet models to PyTorch style. + +```shell +python tools/model_converters/detectron2pytorch.py ${SRC} ${DST} ${DEPTH} [-h] +``` + +### Prepare a model for publishing + +`tools/model_converters/publish_model.py` helps users to prepare their model for publishing. + +Before you upload a model to AWS, you may want to + +1. convert model weights to CPU tensors +2. delete the optimizer states and +3. compute the hash of the checkpoint file and append the hash id to the + filename. + +```shell +python tools/model_converters/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME} +``` + +E.g., + +```shell +python tools/model_converters/publish_model.py work_dirs/faster_rcnn/latest.pth faster_rcnn_r50_fpn_1x_20190801.pth +``` + +The final output filename will be `faster_rcnn_r50_fpn_1x_20190801-{hash id}.pth`. + +## Dataset Conversion + +`tools/data_converters/` contains tools to convert the Cityscapes dataset +and Pascal VOC dataset to the COCO format. + +```shell +python tools/dataset_converters/cityscapes.py ${CITYSCAPES_PATH} [-h] [--img-dir ${IMG_DIR}] [--gt-dir ${GT_DIR}] [-o ${OUT_DIR}] [--nproc ${NPROC}] +python tools/dataset_converters/pascal_voc.py ${DEVKIT_PATH} [-h] [-o ${OUT_DIR}] +``` + +## Dataset Download + +`tools/misc/download_dataset.py` supports downloading datasets such as COCO, VOC, and LVIS. + +```shell +python tools/misc/download_dataset.py --dataset-name coco2017 +python tools/misc/download_dataset.py --dataset-name voc2007 +python tools/misc/download_dataset.py --dataset-name lvis +``` + +## Benchmark + +### Robust Detection Benchmark + +`tools/analysis_tools/test_robustness.py` and`tools/analysis_tools/robustness_eval.py` helps users to evaluate model robustness. The core idea comes from [Benchmarking Robustness in Object Detection: Autonomous Driving when Winter is Coming](https://arxiv.org/abs/1907.07484). For more information how to evaluate models on corrupted images and results for a set of standard models please refer to [robustness_benchmarking.md](robustness_benchmarking.md). + +### FPS Benchmark + +`tools/analysis_tools/benchmark.py` helps users to calculate FPS. The FPS value includes model forward and post-processing. In order to get a more accurate value, currently only supports single GPU distributed startup mode. + +```shell +python -m torch.distributed.launch --nproc_per_node=1 --master_port=${PORT} tools/analysis_tools/benchmark.py \ + ${CONFIG} \ + ${CHECKPOINT} \ + [--repeat-num ${REPEAT_NUM}] \ + [--max-iter ${MAX_ITER}] \ + [--log-interval ${LOG_INTERVAL}] \ + --launcher pytorch +``` + +Examples: Assuming that you have already downloaded the `Faster R-CNN` model checkpoint to the directory `checkpoints/`. + +```shell +python -m torch.distributed.launch --nproc_per_node=1 --master_port=29500 tools/analysis_tools/benchmark.py \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + --launcher pytorch +``` + +## Miscellaneous + +### Evaluating a metric + +`tools/analysis_tools/eval_metric.py` evaluates certain metrics of a pkl result file +according to a config file. + +```shell +python tools/analysis_tools/eval_metric.py ${CONFIG} ${PKL_RESULTS} [-h] [--format-only] [--eval ${EVAL[EVAL ...]}] + [--cfg-options ${CFG_OPTIONS [CFG_OPTIONS ...]}] + [--eval-options ${EVAL_OPTIONS [EVAL_OPTIONS ...]}] +``` + +### Print the entire config + +`tools/misc/print_config.py` prints the whole config verbatim, expanding all its +imports. + +```shell +python tools/misc/print_config.py ${CONFIG} [-h] [--options ${OPTIONS [OPTIONS...]}] +``` + +## Hyper-parameter Optimization + +### YOLO Anchor Optimization + +`tools/analysis_tools/optimize_anchors.py` provides two method to optimize YOLO anchors. + +One is k-means anchor cluster which refers from [darknet](https://github.com/AlexeyAB/darknet/blob/master/src/detector.c#L1421). + +```shell +python tools/analysis_tools/optimize_anchors.py ${CONFIG} --algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} --output-dir ${OUTPUT_DIR} +``` + +Another is using differential evolution to optimize anchors. + +```shell +python tools/analysis_tools/optimize_anchors.py ${CONFIG} --algorithm differential_evolution --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} --output-dir ${OUTPUT_DIR} +``` + +E.g., + +```shell +python tools/analysis_tools/optimize_anchors.py configs/yolo/yolov3_d53_320_273e_coco.py --algorithm differential_evolution --input-shape 608 608 --device cuda --output-dir work_dirs +``` + +You will get: + +``` +loading annotations into memory... +Done (t=9.70s) +creating index... +index created! +2021-07-19 19:37:20,951 - mmdet - INFO - Collecting bboxes from annotation... +[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 117266/117266, 15874.5 task/s, elapsed: 7s, ETA: 0s + +2021-07-19 19:37:28,753 - mmdet - INFO - Collected 849902 bboxes. +differential_evolution step 1: f(x)= 0.506055 +differential_evolution step 2: f(x)= 0.506055 +...... + +differential_evolution step 489: f(x)= 0.386625 +2021-07-19 19:46:40,775 - mmdet - INFO Anchor evolution finish. Average IOU: 0.6133754253387451 +2021-07-19 19:46:40,776 - mmdet - INFO Anchor differential evolution result:[[10, 12], [15, 30], [32, 22], [29, 59], [61, 46], [57, 116], [112, 89], [154, 198], [349, 336]] +2021-07-19 19:46:40,798 - mmdet - INFO Result saved in work_dirs/anchor_optimize_result.json +``` + +## Confusion Matrix + +A confusion matrix is a summary of prediction results. + +`tools/analysis_tools/confusion_matrix.py` can analyze the prediction results and plot a confusion matrix table. + +First, run `tools/test.py` to save the `.pkl` detection results. + +Then, run + +``` +python tools/analysis_tools/confusion_matrix.py ${CONFIG} ${DETECTION_RESULTS} ${SAVE_DIR} --show +``` + +And you will get a confusion matrix like this: + +![confusion_matrix_example](https://user-images.githubusercontent.com/12907710/140513068-994cdbf4-3a4a-48f0-8fd8-2830d93fd963.png) diff --git a/downstream/mmdetection/docs/zh_cn/1_exist_data_model.md b/downstream/mmdetection/docs/zh_cn/1_exist_data_model.md new file mode 100644 index 0000000..e349343 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/1_exist_data_model.md @@ -0,0 +1,678 @@ +# 1: 使用已有模型在标准数据集上进行推理 + +MMDetection 在 [Model Zoo](https://mmdetection.readthedocs.io/en/latest/model_zoo.html) 中提供了数以百计的检测模型,并支持多种标准数据集,包括 Pascal VOC,COCO,Cityscapes,LVIS 等。这份文档将会讲述如何使用这些模型和标准数据集来运行一些常见的任务,包括: + +- 使用现有模型在给定图片上进行推理 +- 在标准数据集上测试现有模型 +- 在标准数据集上训练预定义的模型 + +## 使用现有模型进行推理 + +推理是指使用训练好的模型来检测图像上的目标。在 MMDetection 中,一个模型被定义为一个配置文件和对应的存储在 checkpoint 文件内的模型参数的集合。 + +首先,我们建议从 [Faster RCNN](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn) 开始,其 [配置](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) 文件和 [checkpoint](http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) 文件在此。 +我们建议将 checkpoint 文件下载到 `checkpoints` 文件夹内。 + +### 推理的高层编程接口 + +MMDetection 为在图片上推理提供了 Python 的高层编程接口。下面是建立模型和在图像或视频上进行推理的例子。 + +```python +from mmdet.apis import init_detector, inference_detector +import mmcv + +# 指定模型的配置文件和 checkpoint 文件路径 +config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' + +# 根据配置文件和 checkpoint 文件构建模型 +model = init_detector(config_file, checkpoint_file, device='cuda:0') + +# 测试单张图片并展示结果 +img = 'test.jpg' # 或者 img = mmcv.imread(img),这样图片仅会被读一次 +result = inference_detector(model, img) +# 在一个新的窗口中将结果可视化 +model.show_result(img, result) +# 或者将可视化结果保存为图片 +model.show_result(img, result, out_file='result.jpg') + +# 测试视频并展示结果 +video = mmcv.VideoReader('video.mp4') +for frame in video: + result = inference_detector(model, frame) + model.show_result(frame, result, wait_time=1) +``` + +jupyter notebook 上的演示样例在 [demo/inference_demo.ipynb](https://github.com/open-mmlab/mmdetection/blob/master/demo/inference_demo.ipynb) 。 + +### 异步接口-支持 Python 3.7+ + +对于 Python 3.7+,MMDetection 也有异步接口。利用 CUDA 流,绑定 GPU 的推理代码不会阻塞 CPU,从而使得 CPU/GPU 在单线程应用中能达到更高的利用率。在推理流程中,不同数据样本的推理和不同模型的推理都能并发地运行。 + +您可以参考 `tests/async_benchmark.py` 来对比同步接口和异步接口的运行速度。 + +```python +import asyncio +import torch +from mmdet.apis import init_detector, async_inference_detector +from mmdet.utils.contextmanagers import concurrent + +async def main(): + config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' + checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' + device = 'cuda:0' + model = init_detector(config_file, checkpoint=checkpoint_file, device=device) + + # 此队列用于并行推理多张图像 + streamqueue = asyncio.Queue() + # 队列大小定义了并行的数量 + streamqueue_size = 3 + + for _ in range(streamqueue_size): + streamqueue.put_nowait(torch.cuda.Stream(device=device)) + + # 测试单张图片并展示结果 + img = 'test.jpg' # or 或者 img = mmcv.imread(img),这样图片仅会被读一次 + + async with concurrent(streamqueue): + result = await async_inference_detector(model, img) + + # 在一个新的窗口中将结果可视化 + model.show_result(img, result) + # 或者将可视化结果保存为图片 + model.show_result(img, result, out_file='result.jpg') + + +asyncio.run(main()) + +``` + +### 演示样例 + +我们还提供了三个演示脚本,它们是使用高层编程接口实现的。 [源码在此](https://github.com/open-mmlab/mmdetection/tree/master/demo) 。 + +#### 图片样例 + +这是在单张图片上进行推理的脚本,可以开启 `--async-test` 来进行异步推理。 + +```shell +python demo/image_demo.py \ + ${IMAGE_FILE} \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--device ${GPU_ID}] \ + [--score-thr ${SCORE_THR}] \ + [--async-test] +``` + +运行样例: + +```shell +python demo/image_demo.py demo/demo.jpg \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + --device cpu +``` + +#### 摄像头样例 + +这是使用摄像头实时图片的推理脚本。 + +```shell +python demo/webcam_demo.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--device ${GPU_ID}] \ + [--camera-id ${CAMERA-ID}] \ + [--score-thr ${SCORE_THR}] +``` + +运行样例: + +```shell +python demo/webcam_demo.py \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth +``` + +#### 视频样例 + +这是在视频样例上进行推理的脚本。 + +```shell +python demo/video_demo.py \ + ${VIDEO_FILE} \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--device ${GPU_ID}] \ + [--score-thr ${SCORE_THR}] \ + [--out ${OUT_FILE}] \ + [--show] \ + [--wait-time ${WAIT_TIME}] +``` + +运行样例: + +```shell +python demo/video_demo.py demo/demo.mp4 \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + --out result.mp4 +``` + +#### 视频样例,显卡加速版本 + +这是在视频样例上进行推理的脚本,使用显卡加速。 + +```shell +python demo/video_gpuaccel_demo.py \ + ${VIDEO_FILE} \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--device ${GPU_ID}] \ + [--score-thr ${SCORE_THR}] \ + [--nvdecode] \ + [--out ${OUT_FILE}] \ + [--show] \ + [--wait-time ${WAIT_TIME}] + +``` + +运行样例: + +```shell +python demo/video_gpuaccel_demo.py demo/demo.mp4 \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + --nvdecode --out result.mp4 +``` + +## 在标准数据集上测试现有模型 + +为了测试一个模型的精度,我们通常会在标准数据集上对其进行测试。MMDetection 支持多个公共数据集,包括 [COCO](https://cocodataset.org/) , +[Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC) ,[Cityscapes](https://www.cityscapes-dataset.com/) 等等。 +这一部分将会介绍如何在支持的数据集上测试现有模型。 + +### 数据集准备 + +一些公共数据集,比如 Pascal VOC 及其镜像数据集,或者 COCO 等数据集都可以从官方网站或者镜像网站获取。 +注意:在检测任务中,Pascal VOC 2012 是 Pascal VOC 2007 的无交集扩展,我们通常将两者一起使用。 +我们建议将数据集下载,然后解压到项目外部的某个文件夹内,然后通过符号链接的方式,将数据集根目录链接到 `$MMDETECTION/data` 文件夹下,格式如下所示。 +如果你的文件夹结构和下方不同的话,你需要在配置文件中改变对应的路径。 +我们提供了下载 COCO 等数据集的脚本,你可以运行 `python tools/misc/download_dataset.py --dataset-name coco2017` 下载 COCO 数据集。 + +```plain +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +│ ├── cityscapes +│ │ ├── annotations +│ │ ├── leftImg8bit +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── gtFine +│ │ │ ├── train +│ │ │ ├── val +│ ├── VOCdevkit +│ │ ├── VOC2007 +│ │ ├── VOC2012 +``` + +有些模型需要额外的 [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) 数据集,比如 HTC,DetectoRS 和 SCNet,你可以下载并解压它们到 `coco` 文件夹下。文件夹会是如下结构: + +```plain +mmdetection +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +│ │ ├── stuffthingmaps +``` + +PanopticFPN 等全景分割模型需要额外的 [COCO Panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) 数据集,你可以下载并解压它们到 `coco/annotations` 文件夹下。文件夹会是如下结构: + +```text +mmdetection +├── data +│ ├── coco +│ │ ├── annotations +│ │ │ ├── panoptic_train2017.json +│ │ │ ├── panoptic_train2017 +│ │ │ ├── panoptic_val2017.json +│ │ │ ├── panoptic_val2017 +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +``` + +Cityscape 数据集的标注格式需要转换,以与 COCO 数据集标注格式保持一致,使用 `tools/dataset_converters/cityscapes.py` 来完成转换: + +```shell +pip install cityscapesscripts + +python tools/dataset_converters/cityscapes.py \ + ./data/cityscapes \ + --nproc 8 \ + --out-dir ./data/cityscapes/annotations +``` + +### 测试现有模型 + +我们提供了测试脚本,能够测试一个现有模型在所有数据集(COCO,Pascal VOC,Cityscapes 等)上的性能。我们支持在如下环境下测试: + +- 单 GPU 测试 +- CPU 测试 +- 单节点多 GPU 测试 +- 多节点测试 + +根据以上测试环境,选择合适的脚本来执行测试过程。 + +```shell +# 单 GPU 测试 +python tools/test.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--out ${RESULT_FILE}] \ + [--eval ${EVAL_METRICS}] \ + [--show] + +# CPU 测试:禁用 GPU 并运行单 GPU 测试脚本 +export CUDA_VISIBLE_DEVICES=-1 +python tools/test.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--out ${RESULT_FILE}] \ + [--eval ${EVAL_METRICS}] \ + [--show] + +# 单节点多 GPU 测试 +bash tools/dist_test.sh \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + ${GPU_NUM} \ + [--out ${RESULT_FILE}] \ + [--eval ${EVAL_METRICS}] +``` + +`tools/dist_test.sh` 也支持多节点测试,不过需要依赖 PyTorch 的 [启动工具](https://pytorch.org/docs/stable/distributed.html#launch-utility) 。 + +可选参数: + +- `RESULT_FILE`: 结果文件名称,需以 .pkl 形式存储。如果没有声明,则不将结果存储到文件。 +- `EVAL_METRICS`: 需要测试的度量指标。可选值是取决于数据集的,比如 `proposal_fast`,`proposal`,`bbox`,`segm` 是 COCO 数据集的可选值,`mAP`,`recall` 是 Pascal VOC 数据集的可选值。Cityscapes 数据集可以测试 `cityscapes` 和所有 COCO 数据集支持的度量指标。 +- `--show`: 如果开启,检测结果将被绘制在图像上,以一个新窗口的形式展示。它只适用于单 GPU 的测试,是用于调试和可视化的。请确保使用此功能时,你的 GUI 可以在环境中打开。否则,你可能会遇到这么一个错误 `cannot connect to X server`。 +- `--show-dir`: 如果指明,检测结果将会被绘制在图像上并保存到指定目录。它只适用于单 GPU 的测试,是用于调试和可视化的。即使你的环境中没有 GUI,这个选项也可使用。 +- `--show-score-thr`: 如果指明,得分低于此阈值的检测结果将会被移除。 +- `--cfg-options`: 如果指明,这里的键值对将会被合并到配置文件中。 +- `--eval-options`: 如果指明,这里的键值对将会作为字典参数被传入 `dataset.evaluation()` 函数中,仅在测试阶段使用。 + +### 样例 + +假设你已经下载了 checkpoint 文件到 `checkpoints/` 文件下了。 + +1. 测试 Faster R-CNN 并可视化其结果。按任意键继续下张图片的测试。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn) 。 + + ```shell + python tools/test.py \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + --show + ``` + +2. 测试 Faster R-CNN,并为了之后的可视化保存绘制的图像。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn) 。 + + ```shell + python tools/test.py \ + configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + --show-dir faster_rcnn_r50_fpn_1x_results + ``` + +3. 在 Pascal VOC 数据集上测试 Faster R-CNN,不保存测试结果,测试 `mAP`。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc) 。 + + ```shell + python tools/test.py \ + configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc.py \ + checkpoints/faster_rcnn_r50_fpn_1x_voc0712_20200624-c9895d40.pth \ + --eval mAP + ``` + +4. 使用 8 块 GPU 测试 Mask R-CNN,测试 `bbox` 和 `mAP` 。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn) 。 + + ```shell + ./tools/dist_test.sh \ + configs/mask_rcnn_r50_fpn_1x_coco.py \ + checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ + 8 \ + --out results.pkl \ + --eval bbox segm + ``` + +5. 使用 8 块 GPU 测试 Mask R-CNN,测试**每类**的 `bbox` 和 `mAP`。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn) 。 + + ```shell + ./tools/dist_test.sh \ + configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ + checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ + 8 \ + --out results.pkl \ + --eval bbox segm \ + --options "classwise=True" + ``` + +6. 在 COCO test-dev 数据集上,使用 8 块 GPU 测试 Mask R-CNN,并生成 JSON 文件提交到官方评测服务器。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn) 。 + + ```shell + ./tools/dist_test.sh \ + configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ + checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ + 8 \ + --format-only \ + --options "jsonfile_prefix=./mask_rcnn_test-dev_results" + ``` + +这行命令生成两个 JSON 文件 `mask_rcnn_test-dev_results.bbox.json` 和 `mask_rcnn_test-dev_results.segm.json`。 + +7. 在 Cityscapes 数据集上,使用 8 块 GPU 测试 Mask R-CNN,生成 txt 和 png 文件,并上传到官方评测服务器。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/cityscapes) 。 + + ```shell + ./tools/dist_test.sh \ + configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py \ + checkpoints/mask_rcnn_r50_fpn_1x_cityscapes_20200227-afe51d5a.pth \ + 8 \ + --format-only \ + --options "txtfile_prefix=./mask_rcnn_cityscapes_test_results" + ``` + +生成的 png 和 txt 文件在 `./mask_rcnn_cityscapes_test_results` 文件夹下。 + +### 不使用 Ground Truth 标注进行测试 + +MMDetection 支持在不使用 ground-truth 标注的情况下对模型进行测试,这需要用到 `CocoDataset`。如果你的数据集格式不是 COCO 格式的,请将其转化成 COCO 格式。如果你的数据集格式是 VOC 或者 Cityscapes,你可以使用 [tools/dataset_converters](https://github.com/open-mmlab/mmdetection/tree/master/tools/dataset_converters) 内的脚本直接将其转化成 COCO 格式。如果是其他格式,可以使用 [images2coco 脚本](https://github.com/open-mmlab/mmdetection/tree/master/tools/dataset_converters/images2coco.py) 进行转换。 + +```shell +python tools/dataset_converters/images2coco.py \ + ${IMG_PATH} \ + ${CLASSES} \ + ${OUT} \ + [--exclude-extensions] +``` + +参数: + +- `IMG_PATH`: 图片根路径。 +- `CLASSES`: 类列表文本文件名。文本中每一行存储一个类别。 +- `OUT`: 输出 json 文件名。 默认保存目录和 `IMG_PATH` 在同一级。 +- `exclude-extensions`: 待排除的文件后缀名。 + +在转换完成后,使用如下命令进行测试 + +```shell +# 单 GPU 测试 +python tools/test.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + --format-only \ + --options ${JSONFILE_PREFIX} \ + [--show] + +# CPU 测试:禁用 GPU 并运行单 GPU 测试脚本 +export CUDA_VISIBLE_DEVICES=-1 +python tools/test.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + [--out ${RESULT_FILE}] \ + [--eval ${EVAL_METRICS}] \ + [--show] + +# 单节点多 GPU 测试 +bash tools/dist_test.sh \ + ${CONFIG_FILE} \ + ${CHECKPOINT_FILE} \ + ${GPU_NUM} \ + --format-only \ + --options ${JSONFILE_PREFIX} \ + [--show] +``` + +假设 [model zoo](https://mmdetection.readthedocs.io/en/latest/modelzoo_statistics.html) 中的 checkpoint 文件被下载到了 `checkpoints/` 文件夹下, +我们可以使用以下命令,用 8 块 GPU 在 COCO test-dev 数据集上测试 Mask R-CNN,并且生成 JSON 文件。 + +```sh +./tools/dist_test.sh \ + configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ + checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ + 8 \ + -format-only \ + --options "jsonfile_prefix=./mask_rcnn_test-dev_results" +``` + +这行命令生成两个 JSON 文件 `mask_rcnn_test-dev_results.bbox.json` 和 `mask_rcnn_test-dev_results.segm.json`。 + +### 批量推理 + +MMDetection 在测试模式下,既支持单张图片的推理,也支持对图像进行批量推理。默认情况下,我们使用单张图片的测试,你可以通过修改测试数据配置文件中的 `samples_per_gpu` 来开启批量测试。 +开启批量推理的配置文件修改方法为: + +```shell +data = dict(train=dict(...), val=dict(...), test=dict(samples_per_gpu=2, ...)) +``` + +或者你可以通过将 `--cfg-options` 设置为 `--cfg-options data.test.samples_per_gpu=2` 来开启它。 + +### 弃用 ImageToTensor + +在测试模式下,弃用 `ImageToTensor` 流程,取而代之的是 `DefaultFormatBundle`。建议在你的测试数据流程的配置文件中手动替换它,如: + +```python +# (已弃用)使用 ImageToTensor +pipelines = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) + ] + +# (建议使用)手动将 ImageToTensor 替换为 DefaultFormatBundle +pipelines = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']), + ]) + ] +``` + +## 在标准数据集上训练预定义的模型 + +MMDetection 也为训练检测模型提供了开盖即食的工具。本节将展示在标准数据集(比如 COCO)上如何训练一个预定义的模型。 + +### 数据集 + +训练需要准备好数据集,细节请参考 [数据集准备](#%E6%95%B0%E6%8D%AE%E9%9B%86%E5%87%86%E5%A4%87) 。 + +**注意**: +目前,`configs/cityscapes` 文件夹下的配置文件都是使用 COCO 预训练权值进行初始化的。如果网络连接不可用或者速度很慢,你可以提前下载现存的模型。否则可能在训练的开始会有错误发生。 + +### 学习率自动缩放 + +**注意**:在配置文件中的学习率是在 8 块 GPU,每块 GPU 有 2 张图像(批大小为 8\*2=16)的情况下设置的。其已经设置在`config/_base_/default_runtime.py` 中的 `auto_scale_lr.base_batch_size`。当配置文件的批次大小为`16`时,学习率会基于该值进行自动缩放。同时,为了不影响其他基于 mmdet 的 codebase,启用自动缩放标志 `auto_scale_lr.enable` 默认设置为 `False`。 + +如果要启用此功能,需在命令添加参数 `--auto-scale-lr`。并且在启动命令之前,请检查下即将使用的配置文件的名称,因为配置名称指示默认的批处理大小。 +在默认情况下,批次大小是 `8 x 2 = 16`,例如:`faster_rcnn_r50_caffe_fpn_90k_coco.py` 或者 `pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py`;若不是默认批次,你可以在配置文件看到像 `_NxM_` 字样的,例如:`cornernet_hourglass104_mstest_32x3_210e_coco.py` 的批次大小是 `32 x 3 = 96`, 或者 `scnet_x101_64x4d_fpn_8x1_20e_coco.py` 的批次大小是 `8 x 1 = 8`。 + +**请记住:如果使用不是默认批次大小为`16`的配置文件,请检查配置文件中的底部,会有 `auto_scale_lr.base_batch_size`。如果找不到,可以在其继承的 `_base_=[xxx]` 文件中找到。另外,如果想使用自动缩放学习率的功能,请不要修改这些值。** + +学习率自动缩放基本用法如下: + +```shell +python tools/train.py \ + ${CONFIG_FILE} \ + --auto-scale-lr \ + [optional arguments] +``` + +执行命令之后,会根据机器的GPU数量和训练的批次大小对学习率进行自动缩放,缩放方式详见 [线性扩展规则](https://arxiv.org/abs/1706.02677) ,比如:在 4 块 GPU 并且每张 GPU 上有 2 张图片的情况下 `lr=0.01`,那么在 16 块 GPU 并且每张 GPU 上有 4 张图片的情况下, LR 会自动缩放至`lr=0.08`。 + +如果不启用该功能,则需要根据 [线性扩展规则](https://arxiv.org/abs/1706.02677) 来手动计算并修改配置文件里面 `optimizer.lr` 的值。 + +### 使用单 GPU 训练 + +我们提供了 `tools/train.py` 来开启在单张 GPU 上的训练任务。基本使用如下: + +```shell +python tools/train.py \ + ${CONFIG_FILE} \ + [optional arguments] +``` + +在训练期间,日志文件和 checkpoint 文件将会被保存在工作目录下,它需要通过配置文件中的 `work_dir` 或者 CLI 参数中的 `--work-dir` 来指定。 + +默认情况下,模型将在每轮训练之后在 validation 集上进行测试,测试的频率可以通过设置配置文件来指定: + +```python +# 每 12 轮迭代进行一次测试评估 +evaluation = dict(interval=12) +``` + +这个工具接受以下参数: + +- `--no-validate` (**不建议**): 在训练期间关闭测试. +- `--work-dir ${WORK_DIR}`: 覆盖工作目录. +- `--resume-from ${CHECKPOINT_FILE}`: 从某个 checkpoint 文件继续训练. +- `--options 'Key=value'`: 覆盖使用的配置文件中的其他设置. + +**注意**: +`resume-from` 和 `load-from` 的区别: + +`resume-from` 既加载了模型的权重和优化器的状态,也会继承指定 checkpoint 的迭代次数,不会重新开始训练。`load-from` 则是只加载模型的权重,它的训练是从头开始的,经常被用于微调模型。 + +### 使用 CPU 训练 + +使用 CPU 训练的流程和使用单 GPU 训练的流程一致,我们仅需要在训练流程开始前禁用 GPU。 + +```shell +export CUDA_VISIBLE_DEVICES=-1 +``` + +之后运行单 GPU 训练脚本即可。 + +**注意**: + +我们不推荐用户使用 CPU 进行训练,这太过缓慢。我们支持这个功能是为了方便用户在没有 GPU 的机器上进行调试。 + +### 在多 GPU 上训练 + +我们提供了 `tools/dist_train.sh` 来开启在多 GPU 上的训练。基本使用如下: + +```shell +bash ./tools/dist_train.sh \ + ${CONFIG_FILE} \ + ${GPU_NUM} \ + [optional arguments] +``` + +可选参数和单 GPU 训练的可选参数一致。 + +#### 同时启动多个任务 + +如果你想在一台机器上启动多个任务的话,比如在一个有 8 块 GPU 的机器上启动 2 个需要 4 块GPU的任务,你需要给不同的训练任务指定不同的端口(默认为 29500)来避免冲突。 + +如果你使用 `dist_train.sh` 来启动训练任务,你可以使用命令来设置端口。 + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 ./tools/dist_train.sh ${CONFIG_FILE} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 ./tools/dist_train.sh ${CONFIG_FILE} 4 +``` + +### 使用多台机器训练 + +如果您想使用由 ethernet 连接起来的多台机器, 您可以使用以下命令: + +在第一台机器上: + +```shell +NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR sh tools/dist_train.sh $CONFIG $GPUS +``` + +在第二台机器上: + +```shell +NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR sh tools/dist_train.sh $CONFIG $GPUS +``` + +但是,如果您不使用高速网路连接这几台机器的话,训练将会非常慢。 + +### 使用 Slurm 来管理任务 + +Slurm 是一个常见的计算集群调度系统。在 Slurm 管理的集群上,你可以使用 `slurm.sh` 来开启训练任务。它既支持单节点训练也支持多节点训练。 + +基本使用如下: + +```shell +[GPUS=${GPUS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} +``` + +以下是在一个名称为 _dev_ 的 Slurm 分区上,使用 16 块 GPU 来训练 Mask R-CNN 的例子,并且将 `work-dir` 设置在了某些共享文件系统下。 + +```shell +GPUS=16 ./tools/slurm_train.sh dev mask_r50_1x configs/mask_rcnn_r50_fpn_1x_coco.py /nfs/xxxx/mask_rcnn_r50_fpn_1x +``` + +你可以查看 [源码](https://github.com/open-mmlab/mmdetection/blob/master/tools/slurm_train.sh) 来检查全部的参数和环境变量. + +在使用 Slurm 时,端口需要以下方的某个方法之一来设置。 + +1. 通过 `--options` 来设置端口。我们非常建议用这种方法,因为它无需改变原始的配置文件。 + + ```shell + CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} --options 'dist_params.port=29500' + CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} --options 'dist_params.port=29501' + ``` + +2. 修改配置文件来设置不同的交流端口。 + + 在 `config1.py` 中,设置: + + ```python + dist_params = dict(backend='nccl', port=29500) + ``` + + 在 `config2.py` 中,设置: + + ```python + dist_params = dict(backend='nccl', port=29501) + ``` + + 然后你可以使用 `config1.py` 和 `config2.py` 来启动两个任务了。 + + ```shell + CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} + CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} + ``` diff --git a/downstream/mmdetection/docs/zh_cn/2_new_data_model.md b/downstream/mmdetection/docs/zh_cn/2_new_data_model.md new file mode 100644 index 0000000..f760c51 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/2_new_data_model.md @@ -0,0 +1,267 @@ +# 2: 在自定义数据集上进行训练 + +通过本文档,你将会知道如何使用自定义数据集对预先定义好的模型进行推理,测试以及训练。我们使用 [balloon dataset](https://github.com/matterport/Mask_RCNN/tree/master/samples/balloon) 作为例子来描述整个过程。 + +基本步骤如下: + +1. 准备自定义数据集 +2. 准备配置文件 +3. 在自定义数据集上进行训练,测试和推理。 + +## 准备自定义数据集 + +MMDetection 一共支持三种形式应用新数据集: + +1. 将数据集重新组织为 COCO 格式。 +2. 将数据集重新组织为一个中间格式。 +3. 实现一个新的数据集。 + +我们通常建议使用前面两种方法,因为它们通常来说比第三种方法要简单。 + +在本文档中,我们展示一个例子来说明如何将数据转化为 COCO 格式。 + +**注意**:MMDetection 现只支持对 COCO 格式的数据集进行 mask AP 的评测。 + +所以用户如果要进行实例分割,只能将数据转成 COCO 格式。 + +### COCO标注格式 + +用于实例分割的 COCO 数据集格式如下所示,其中的键(key)都是必要的,参考[这里](https://cocodataset.org/#format-data)来获取更多细节。 + +```json +{ + "images": [image], + "annotations": [annotation], + "categories": [category] +} + + +image = { + "id": int, + "width": int, + "height": int, + "file_name": str, +} + +annotation = { + "id": int, + "image_id": int, + "category_id": int, + "segmentation": RLE or [polygon], + "area": float, + "bbox": [x,y,width,height], + "iscrowd": 0 or 1, +} + +categories = [{ + "id": int, + "name": str, + "supercategory": str, +}] +``` + +现在假设我们使用 balloon dataset。 + +下载了数据集之后,我们需要实现一个函数将标注格式转化为 COCO 格式。然后我们就可以使用已经实现的 `COCODataset` 类来加载数据并进行训练以及评测。 + +如果你浏览过新数据集,你会发现格式如下: + +```json +{'base64_img_data': '', + 'file_attributes': {}, + 'filename': '34020010494_e5cb88e1c4_k.jpg', + 'fileref': '', + 'regions': {'0': {'region_attributes': {}, + 'shape_attributes': {'all_points_x': [1020, + 1000, + 994, + 1003, + 1023, + 1050, + 1089, + 1134, + 1190, + 1265, + 1321, + 1361, + 1403, + 1428, + 1442, + 1445, + 1441, + 1427, + 1400, + 1361, + 1316, + 1269, + 1228, + 1198, + 1207, + 1210, + 1190, + 1177, + 1172, + 1174, + 1170, + 1153, + 1127, + 1104, + 1061, + 1032, + 1020], + 'all_points_y': [963, + 899, + 841, + 787, + 738, + 700, + 663, + 638, + 621, + 619, + 643, + 672, + 720, + 765, + 800, + 860, + 896, + 942, + 990, + 1035, + 1079, + 1112, + 1129, + 1134, + 1144, + 1153, + 1166, + 1166, + 1150, + 1136, + 1129, + 1122, + 1112, + 1084, + 1037, + 989, + 963], + 'name': 'polygon'}}}, + 'size': 1115004} +``` + +标注文件时是 JSON 格式的,其中所有键(key)组成了一张图片的所有标注。 + +其中将 balloon dataset 转化为 COCO 格式的代码如下所示。 + +```python + +import os.path as osp +import mmcv + +def convert_balloon_to_coco(ann_file, out_file, image_prefix): + data_infos = mmcv.load(ann_file) + + annotations = [] + images = [] + obj_count = 0 + for idx, v in enumerate(mmcv.track_iter_progress(data_infos.values())): + filename = v['filename'] + img_path = osp.join(image_prefix, filename) + height, width = mmcv.imread(img_path).shape[:2] + + images.append(dict( + id=idx, + file_name=filename, + height=height, + width=width)) + + bboxes = [] + labels = [] + masks = [] + for _, obj in v['regions'].items(): + assert not obj['region_attributes'] + obj = obj['shape_attributes'] + px = obj['all_points_x'] + py = obj['all_points_y'] + poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)] + poly = [p for x in poly for p in x] + + x_min, y_min, x_max, y_max = ( + min(px), min(py), max(px), max(py)) + + + data_anno = dict( + image_id=idx, + id=obj_count, + category_id=0, + bbox=[x_min, y_min, x_max - x_min, y_max - y_min], + area=(x_max - x_min) * (y_max - y_min), + segmentation=[poly], + iscrowd=0) + annotations.append(data_anno) + obj_count += 1 + + coco_format_json = dict( + images=images, + annotations=annotations, + categories=[{'id':0, 'name': 'balloon'}]) + mmcv.dump(coco_format_json, out_file) +``` + +使用如上的函数,用户可以成功将标注文件转化为 JSON 格式,之后可以使用 `CocoDataset` 对模型进行训练和评测。 + +## 准备配置文件 + +第二步需要准备一个配置文件来成功加载数据集。假设我们想要用 balloon dataset 来训练配备了 FPN 的 Mask R-CNN ,如下是我们的配置文件。假设配置文件命名为 `mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py`,相应保存路径为 `configs/balloon/`,配置文件内容如下所示。 + +```python +# 这个新的配置文件继承自一个原始配置文件,只需要突出必要的修改部分即可 +_base_ = 'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' + +# 我们需要对头中的类别数量进行修改来匹配数据集的标注 +model = dict( + roi_head=dict( + bbox_head=dict(num_classes=1), + mask_head=dict(num_classes=1))) + +# 修改数据集相关设置 +dataset_type = 'CocoDataset' +classes = ('balloon',) +data = dict( + train=dict( + img_prefix='balloon/train/', + classes=classes, + ann_file='balloon/train/annotation_coco.json'), + val=dict( + img_prefix='balloon/val/', + classes=classes, + ann_file='balloon/val/annotation_coco.json'), + test=dict( + img_prefix='balloon/val/', + classes=classes, + ann_file='balloon/val/annotation_coco.json')) + +# 我们可以使用预训练的 Mask R-CNN 来获取更好的性能 +load_from = 'checkpoints/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth' +``` + +## 训练一个新的模型 + +为了使用新的配置方法来对模型进行训练,你只需要运行如下命令。 + +```shell +python tools/train.py configs/balloon/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py +``` + +参考[情况 1](./1_exist_data_model.md)来获取更多详细的使用方法。 + +## 测试以及推理 + +为了测试训练完毕的模型,你只需要运行如下命令。 + +```shell +python tools/test.py configs/balloon/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py work_dirs/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py/latest.pth --eval bbox segm +``` + +参考[情况 1](./1_exist_data_model.md)来获取更多详细的使用方法。 diff --git a/downstream/mmdetection/docs/zh_cn/3_exist_data_new_model.md b/downstream/mmdetection/docs/zh_cn/3_exist_data_new_model.md new file mode 100644 index 0000000..e32e373 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/3_exist_data_new_model.md @@ -0,0 +1,283 @@ +# 3: 在标准数据集上训练自定义模型 + +在本文中,你将知道如何在标准数据集上训练、测试和推理自定义模型。我们将在 cityscapes 数据集上以自定义 Cascade Mask R-CNN R50 模型为例演示整个过程,为了方便说明,我们将 neck 模块中的 `FPN` 替换为 `AugFPN`,并且在训练中的自动增强类中增加 `Rotate` 或 `Translate`。 + +基本步骤如下所示: + +1. 准备标准数据集 +2. 准备你的自定义模型 +3. 准备配置文件 +4. 在标准数据集上对模型进行训练、测试和推理 + +## 准备标准数据集 + +在本文中,我们使用 cityscapes 标准数据集为例进行说明。 + +推荐将数据集根路径采用符号链接方式链接到 `$MMDETECTION/data`。 + +如果你的文件结构不同,你可能需要在配置文件中进行相应的路径更改。标准的文件组织格式如下所示: + +```none +mmdetection +├── mmdet +├── tools +├── configs +├── data +│ ├── coco +│ │ ├── annotations +│ │ ├── train2017 +│ │ ├── val2017 +│ │ ├── test2017 +│ ├── cityscapes +│ │ ├── annotations +│ │ ├── leftImg8bit +│ │ │ ├── train +│ │ │ ├── val +│ │ ├── gtFine +│ │ │ ├── train +│ │ │ ├── val +│ ├── VOCdevkit +│ │ ├── VOC2007 +│ │ ├── VOC2012 +``` + +你也可以通过如下方式设定数据集根路径 + +```bash +export MMDET_DATASETS=$data_root +``` + +我们将会使用环境便变量 `$MMDET_DATASETS` 作为数据集的根目录,因此你无需再修改相应配置文件的路径信息。 + +你需要使用脚本 `tools/dataset_converters/cityscapes.py` 将 cityscapes 标注转化为 coco 标注格式。 + +```shell +pip install cityscapesscripts +python tools/dataset_converters/cityscapes.py ./data/cityscapes --nproc 8 --out-dir ./data/cityscapes/annotations +``` + +目前在 `cityscapes `文件夹中的配置文件所对应模型是采用 COCO 预训练权重进行初始化的。 + +如果你的网络不可用或者比较慢,建议你先手动下载对应的预训练权重,否则可能在训练开始时候出现错误。 + +## 准备你的自定义模型 + +第二步是准备你的自定义模型或者训练相关配置。假设你想在已有的 Cascade Mask R-CNN R50 检测模型基础上,新增一个新的 neck 模块 `AugFPN` 去代替默认的 `FPN`,以下是具体实现: + +### 1 定义新的 neck (例如 AugFPN) + +首先创建新文件 `mmdet/models/necks/augfpn.py`. + +```python +from ..builder import NECKS + +@NECKS.register_module() +class AugFPN(nn.Module): + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False): + pass + + def forward(self, inputs): + # implementation is ignored + pass +``` + +### 2 导入模块 + +你可以采用两种方式导入模块,第一种是在 `mmdet/models/necks/__init__.py` 中添加如下内容 + +```python +from .augfpn import AugFPN +``` + +第二种是增加如下代码到对应配置中,这种方式的好处是不需要改动代码 + +```python +custom_imports = dict( + imports=['mmdet.models.necks.augfpn.py'], + allow_failed_imports=False) +``` + +### 3 修改配置 + +```python +neck=dict( + type='AugFPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5) +``` + +关于自定义模型其余相关细节例如实现新的骨架网络,头部网络、损失函数,以及运行时训练配置例如定义新的优化器、使用梯度裁剪、定制训练调度策略和钩子等,请参考文档 [自定义模型](tutorials/customize_models.md) 和 [自定义运行时训练配置](tutorials/customize_runtime.md)。 + +## 准备配置文件 + +第三步是准备训练配置所需要的配置文件。假设你打算基于 cityscapes 数据集,在 Cascade Mask R-CNN R50 中新增 `AugFPN` 模块,同时增加 `Rotate` 或者 `Translate` 数据增强策略,假设你的配置文件位于 `configs/cityscapes/` 目录下,并且取名为 `cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py`,则配置信息如下: + +```python +# 继承 base 配置,然后进行针对性修改 +_base_ = [ + '../_base_/models/cascade_mask_rcnn_r50_fpn.py', + '../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py' +] + +model = dict( + # 设置为 None,表示不加载 ImageNet 预训练权重, + # 后续可以设置 `load_from` 参数用来加载 COCO 预训练权重 + backbone=dict(init_cfg=None), + pretrained=None, + # 使用新增的 `AugFPN` 模块代替默认的 `FPN` + neck=dict( + type='AugFPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + # 我们也需要将 num_classes 从 80 修改为 8 来匹配 cityscapes 数据集标注 + # 这个修改包括 `bbox_head` 和 `mask_head`. + roi_head=dict( + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + # 将 COCO 类别修改为 cityscapes 类别 + num_classes=8, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + # 将 COCO 类别修改为 cityscapes 类别 + num_classes=8, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + # 将 COCO 类别修改为 cityscapes 类别 + num_classes=8, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + # 将 COCO 类别修改为 cityscapes 类别 + num_classes=8, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) + +# 覆写 `train_pipeline`,然后新增 `AutoAugment` 训练配置 +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='AutoAugment', + policies=[ + [dict( + type='Rotate', + level=5, + img_fill_val=(124, 116, 104), + prob=0.5, + scale=1) + ], + [dict(type='Rotate', level=7, img_fill_val=(124, 116, 104)), + dict( + type='Translate', + level=5, + prob=0.5, + img_fill_val=(124, 116, 104)) + ], + ]), + dict( + type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] + +# 设置每张显卡的批处理大小,同时设置新的训练 pipeline +data = dict( + samples_per_gpu=1, + workers_per_gpu=3, + # 用新的训练 pipeline 配置覆写 pipeline + train=dict(dataset=dict(pipeline=train_pipeline))) + +# 设置优化器 +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# 设置定制的学习率策略 +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[8]) +runner = dict(type='EpochBasedRunner', max_epochs=10) + +# 我们采用 COCO 预训练过的 Cascade Mask R-CNN R50 模型权重作为初始化权重,可以得到更加稳定的性能 +load_from = 'http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth' +``` + +## 训练新模型 + +为了能够使用新增配置来训练模型,你可以运行如下命令: + +```shell +python tools/train.py configs/cityscapes/cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py +``` + +如果想了解更多用法,可以参考 [例子1](1_exist_data_model.md)。 + +## 测试和推理 + +为了能够测试训练好的模型,你可以运行如下命令: + +```shell +python tools/test.py configs/cityscapes/cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py work_dirs/cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py/latest.pth --eval bbox segm +``` + +如果想了解更多用法,可以参考 [例子1](1_exist_data_model.md)。 diff --git a/downstream/mmdetection/docs/zh_cn/Makefile b/downstream/mmdetection/docs/zh_cn/Makefile new file mode 100644 index 0000000..d4bb2cb --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/downstream/mmdetection/docs/zh_cn/_static/css/readthedocs.css b/downstream/mmdetection/docs/zh_cn/_static/css/readthedocs.css new file mode 100644 index 0000000..57ed0ad --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/_static/css/readthedocs.css @@ -0,0 +1,6 @@ +.header-logo { + background-image: url("../image/mmdet-logo.png"); + background-size: 156px 40px; + height: 40px; + width: 156px; +} diff --git a/downstream/mmdetection/docs/zh_cn/_static/image/mmdet-logo.png b/downstream/mmdetection/docs/zh_cn/_static/image/mmdet-logo.png new file mode 100644 index 0000000..58e2b5e Binary files /dev/null and b/downstream/mmdetection/docs/zh_cn/_static/image/mmdet-logo.png differ diff --git a/downstream/mmdetection/docs/zh_cn/api.rst b/downstream/mmdetection/docs/zh_cn/api.rst new file mode 100644 index 0000000..c75a467 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/api.rst @@ -0,0 +1,108 @@ +mmdet.apis +-------------- +.. automodule:: mmdet.apis + :members: + +mmdet.core +-------------- + +anchor +^^^^^^^^^^ +.. automodule:: mmdet.core.anchor + :members: + +bbox +^^^^^^^^^^ +.. automodule:: mmdet.core.bbox + :members: + +export +^^^^^^^^^^ +.. automodule:: mmdet.core.export + :members: + +mask +^^^^^^^^^^ +.. automodule:: mmdet.core.mask + :members: + +evaluation +^^^^^^^^^^ +.. automodule:: mmdet.core.evaluation + :members: + +post_processing +^^^^^^^^^^^^^^^ +.. automodule:: mmdet.core.post_processing + :members: + +utils +^^^^^^^^^^ +.. automodule:: mmdet.core.utils + :members: + +mmdet.datasets +-------------- + +datasets +^^^^^^^^^^ +.. automodule:: mmdet.datasets + :members: + +pipelines +^^^^^^^^^^ +.. automodule:: mmdet.datasets.pipelines + :members: + +samplers +^^^^^^^^^^ +.. automodule:: mmdet.datasets.samplers + :members: + +api_wrappers +^^^^^^^^^^ +.. automodule:: mmdet.datasets.api_wrappers + :members: + +mmdet.models +-------------- + +detectors +^^^^^^^^^^ +.. automodule:: mmdet.models.detectors + :members: + +backbones +^^^^^^^^^^ +.. automodule:: mmdet.models.backbones + :members: + +necks +^^^^^^^^^^^^ +.. automodule:: mmdet.models.necks + :members: + +dense_heads +^^^^^^^^^^^^ +.. automodule:: mmdet.models.dense_heads + :members: + +roi_heads +^^^^^^^^^^ +.. automodule:: mmdet.models.roi_heads + :members: + +losses +^^^^^^^^^^ +.. automodule:: mmdet.models.losses + :members: + +utils +^^^^^^^^^^ +.. automodule:: mmdet.models.utils + :members: + +mmdet.utils +-------------- +.. automodule::mmdet.utils + :members: diff --git a/downstream/mmdetection/docs/zh_cn/article.md b/downstream/mmdetection/docs/zh_cn/article.md new file mode 100644 index 0000000..9cd6fb6 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/article.md @@ -0,0 +1,53 @@ +## 中文解读文案汇总 + +### 1 官方解读文案 + +#### 1.1 框架解读 + +- **[轻松掌握 MMDetection 整体构建流程(一)](https://zhuanlan.zhihu.com/p/337375549)** +- **[轻松掌握 MMDetection 整体构建流程(二)](https://zhuanlan.zhihu.com/p/341954021)** +- **[轻松掌握 MMDetection 中 Head 流程](https://zhuanlan.zhihu.com/p/343433169)** + +#### 1.2 算法解读 + +- **[轻松掌握 MMDetection 中常用算法(一):RetinaNet 及配置详解](https://zhuanlan.zhihu.com/p/346198300)** +- **[轻松掌握 MMDetection 中常用算法(二):Faster R-CNN|Mask R-CNN](https://zhuanlan.zhihu.com/p/349807581)** +- [轻松掌握 MMDetection 中常用算法(三):FCOS](https://zhuanlan.zhihu.com/p/358056615) +- [轻松掌握 MMDetection 中常用算法(四):ATSS](https://zhuanlan.zhihu.com/p/358125611) +- [轻松掌握 MMDetection 中常用算法(五):Cascade R-CNN](https://zhuanlan.zhihu.com/p/360952172) +- [轻松掌握 MMDetection 中常用算法(六):YOLOF](https://zhuanlan.zhihu.com/p/370758213) +- [轻松掌握 MMDetection 中常用算法(七):CenterNet](https://zhuanlan.zhihu.com/p/374891478) +- [轻松掌握 MMDetection 中常用算法(八):YOLACT](https://zhuanlan.zhihu.com/p/376347955) +- [轻松掌握 MMDetection 中常用算法(九):AutoAssign](https://zhuanlan.zhihu.com/p/378581552) +- [YOLOX 在 MMDetection 中复现全流程解析](https://zhuanlan.zhihu.com/p/398545304) +- [喂喂喂!你可以减重了!小模型 - MMDetection 新增SSDLite 、 MobileNetV2YOLOV3 两大经典算法](https://zhuanlan.zhihu.com/p/402781143) + +#### 1.3 工具解读 + +- [OpenMMLab 中混合精度训练 AMP 的正确打开方式](https://zhuanlan.zhihu.com/p/375224982) +- [小白都能看懂!手把手教你使用混淆矩阵分析目标检测](https://zhuanlan.zhihu.com/p/443499860) +- [MMDetection 图像缩放 Resize 详细说明 OpenMMLab](https://zhuanlan.zhihu.com/p/381117525) +- [拿什么拯救我的 4G 显卡](https://zhuanlan.zhihu.com/p/430123077) +- [MMDet居然能用MMCls的Backbone?论配置文件的打开方式](https://zhuanlan.zhihu.com/p/436865195) + +#### 1.4 知乎问答 + +- [COCO数据集上1x模式下为什么不采用多尺度训练?](https://www.zhihu.com/question/462170786/answer/1915119662) +- [MMDetection中SOTA论文源码中将训练过程中BN层的eval打开?](https://www.zhihu.com/question/471189603/answer/2195540892) +- [基于PyTorch的MMDetection中训练的随机性来自何处?](https://www.zhihu.com/question/453511684/answer/1839683634) +- [单阶段、双阶段、anchor-based、anchor-free 这四者之间有什么联系吗?](https://www.zhihu.com/question/428972054/answer/1619925296) +- [目标检测的深度学习方法,有推荐的书籍或资料吗?](https://www.zhihu.com/question/391577080/answer/1612593817) +- [大佬们,刚入学研究生,想入门目标检测,有什么学习路线可以入门的?](https://www.zhihu.com/question/343768934/answer/1612580715) +- [目标检测领域还有什么可以做的?](https://www.zhihu.com/question/280703314/answer/1627885518) +- [如何看待Transformer在CV上的应用前景,未来有可能替代CNN吗?](https://www.zhihu.com/question/437495132/answer/1686380553) +- [MMDetection如何学习源码?](https://www.zhihu.com/question/451585041/answer/1832498963) +- [如何具体上手实现目标检测呢?](https://www.zhihu.com/question/341401981/answer/1848561187) + +#### 1.5 其他 + +- **[不得不知的 MMDetection 学习路线(个人经验版)](https://zhuanlan.zhihu.com/p/369826931)** +- [OpenMMLab 社区专访之 YOLOX 复现篇](https://zhuanlan.zhihu.com/p/405913343) + +### 2 社区解读文案 + +- [手把手带你实现经典检测网络 Mask R-CNN 的推理](https://zhuanlan.zhihu.com/p/414082071) diff --git a/downstream/mmdetection/docs/zh_cn/compatibility.md b/downstream/mmdetection/docs/zh_cn/compatibility.md new file mode 100644 index 0000000..e9ebdd9 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/compatibility.md @@ -0,0 +1,177 @@ +# MMDetection v2.x 兼容性说明 + +## MMDetection 2.25.0 + +为了加入 Mask2Former 实例分割模型,对 Mask2Former 的配置文件进行了重命名 [PR #7571](https://github.com/open-mmlab/mmdetection/pull/7571): + + + + + + + + + + + +
    在 v2.25.0 之前v2.25.0 及之后
    + +``` +'mask2former_xxx_coco.py' 代表全景分割的配置文件 +``` + + + +``` +'mask2former_xxx_coco.py' 代表实例分割的配置文件 +'mask2former_xxx_coco-panoptic.py' 代表全景分割的配置文件 +``` + +
    + +## MMDetection 2.21.0 + +为了支持 CPU 训练,MMCV 中进行批处理的 scatter 的代码逻辑已经被修改。我们推荐使用 MMCV v1.4.4 或更高版本, +更多信息请参考 [MMCV PR #1621](https://github.com/open-mmlab/mmcv/pull/1621). + +## MMDetection 2.18.1 + +### MMCV compatibility + +为了修复 BaseTransformerLayer 中的权重引用问题, MultiheadAttention 中 batch first 的逻辑有所改变。 +我们推荐使用 MMCV v1.3.17 或更高版本。 更多信息请参考 [MMCV PR #1418](https://github.com/open-mmlab/mmcv/pull/1418) 。 + +## MMDetection 2.18.0 + +### DIIHead 兼容性 + +为了支持 QueryInst,在 DIIHead 的返回元组中加入了 attn_feats。 + +## MMDetection v2.14.0 + +### MMCV 版本 + +为了修复 EvalHook 优先级过低的问题,MMCV v1.3.8 中所有 hook 的优先级都重新进行了调整,因此 MMDetection v2.14.0 需要依赖最新的 MMCV v1.3.8 版本。 相关信息请参考[PR #1120](https://github.com/open-mmlab/mmcv/pull/1120) ,相关问题请参考[#5343](https://github.com/open-mmlab/mmdetection/issues/5343) 。 + +### SSD 兼容性 + +在 v2.14.0 中,为了使 SSD 能够被更灵活地使用,[PR #5291](https://github.com/open-mmlab/mmdetection/pull/5291) 重构了 SSD 的 backbone、neck 和 head。用户可以使用 tools/model_converters/upgrade_ssd_version.py 转换旧版本训练的模型。 + +```shell +python tools/model_converters/upgrade_ssd_version.py ${OLD_MODEL_PATH} ${NEW_MODEL_PATH} + +``` + +- OLD_MODEL_PATH:旧版 SSD 模型的路径。 +- NEW_MODEL_PATH:保存转换后模型权重的路径。 + +## MMDetection v2.12.0 + +在 v2.12.0 到 v2.18.0(或以上)版本的这段时间,为了提升通用性和便捷性,MMDetection 正在进行大规模重构。在升级到 v2.12.0 后 MMDetection 不可避免地带来了一些 BC Breaking,包括 MMCV 的版本依赖、模型初始化方式、模型 registry 和 mask AP 的评估。 + +### MMCV 版本 + +MMDetection v2.12.0 依赖 MMCV v1.3.3 中新增加的功能,包括:使用 `BaseModule` 统一参数初始化,模型 registry,以及[Deformable DETR](https://arxiv.org/abs/2010.04159) 中的 `MultiScaleDeformableAttn` CUDA 算子。 +注意,尽管 MMCV v1.3.2 已经包含了 MMDet 所需的功能,但是存在一些已知的问题。我们建议用户跳过 MMCV v1.3.2 使用 v1.3.3 版本。 + +### 统一模型初始化 + +为了统一 OpenMMLab 项目中的参数初始化方式,MMCV 新增加了 `BaseModule` 类,使用 `init_cfg` 参数对模块进行统一且灵活的初始化配置管理。 +现在用户需要在训练脚本中显式调用 `model.init_weights()` 来初始化模型(例如 [这行代码](https://github.com/open-mmlab/mmdetection/blob/master/tools/train.py#L162) ,在这之前则是在 detector 中进行处理的。 +**下游项目必须相应地更新模型初始化方式才能使用 MMDetection v2.12.0**。请参阅 [PR #4750](https://github.com/open-mmlab/mmdetection/pull/4750) 了解详情。 + +### 统一模型 registry + +为了能够使用在其他 OpenMMLab 项目中实现的 backbone,MMDetection v2.12.0 继承了在 MMCV (#760) 中创建的模型 registry。 +这样,只要 OpenMMLab 项目实现了某个 backbone,并且该项目也使用 MMCV 中的 registry,那么用户只需修改配置即可在 MMDetection 中使用该 backbone,不再需要将代码复制到 MMDetection 中。 更多详细信息,请参阅 [PR #5059](https://github.com/open-mmlab/mmdetection/pull/5059) 。 + +### Mask AP 评估 + +在 [PR #4898](https://github.com/open-mmlab/mmdetection/pull/4898) 和 v2.12.0 之前,对小、中、大目标的 mask AP 的评估是基于其边界框区域而不是真正的 mask 区域。 +这导致 `APs` 和 `APm` 变得更高但 `APl` 变得更低,但是不会影响整体的 mask AP。 [PR #4898](https://github.com/open-mmlab/mmdetection/pull/4898) 删除了 mask AP 计算中的 `bbox` ,改为使用 mask 区域。 +新的计算方式不会影响整体的 mask AP 评估,与 [Detectron2](https://github.com/facebookresearch/detectron2/)一致。 + +## 与 MMDetection v1.x 的兼容性 + +MMDetection v2.0 经过了大规模重构并解决了许多遗留问题。 MMDetection v2.0 不兼容 v1.x 版本,在这两个版本中使用相同的模型权重运行推理会产生不同的结果。 因此,MMDetection v2.0 重新对所有模型进行了 benchmark,并在 model zoo 中提供了新模型的权重和训练记录。 + +新旧版本的主要的区别有四方面:坐标系、代码库约定、训练超参和模块设计。 + +### 坐标系 + +新坐标系与 [Detectron2](https://github.com/facebookresearch/detectron2/) 一致, +将最左上角的像素的中心视为坐标原点 (0, 0) 而不是最左上角像素的左上角。 因此 COCO 边界框和分割标注中的坐标被解析为范围 `[0,width]` 和 `[0,height]` 中的坐标。 这个修改影响了所有与 bbox 及像素选择相关的计算,变得更加自然且更加准确。 + +- 在新坐标系中,左上角和右下角为 (x1, y1) (x2, y2) 的框的宽度及高度计算公式为 `width = x2 - x1` 和 `height = y2 - y1`。 + 在 MMDetection v1.x 和之前的版本中,高度和宽度都多了 `+ 1` 的操作。 + 本次修改包括三部分: + + 1. box 回归中的检测框变换以及编码/解码。 + 2. IoU 计算。这会影响 ground truth 和检测框之间的匹配以及 NMS 。但对兼容性的影响可以忽略不计。 + 3. Box 的角点坐标为浮点型,不再取整。这能使得检测结果更为准确,也使得检测框和 RoI 的最小尺寸不再为 1,但影响很小。 + +- Anchor 的中心与特征图的网格点对齐,类型变为 float。 + 在 MMDetection v1.x 和之前的版本中,anchors 是 `int` 类型且没有居中对齐。 + 这会影响 RPN 中的 Anchor 生成和所有基于 Anchor 的方法。 + +- ROIAlign 更好地与图像坐标系对齐。新的实现来自 [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/detectron2/layers/csrc/ROIAlign) 。 + 当 RoI 用于提取 RoI 特征时,与 MMDetection v1.x 相比默认情况下相差半个像素。 + 能够通过设置 `aligned=False` 而不是 `aligned=True` 来维持旧版本的设置。 + +- Mask 的裁剪和粘贴更准确。 + + 1. 我们使用新的 RoIAlign 来提取 mask 目标。 在 MMDetection v1.x 中,bounding box 在提取 mask 目标之前被取整,裁剪过程是 numpy 实现的。 而在新版本中,裁剪的边界框不经过取整直接输入 RoIAlign。 此实现大大加快了训练速度(每次迭代约加速 0.1 秒,1x schedule 训练 Mask R50 时加速约 2 小时)并且理论上会更准确。 + 2. 在 MMDetection v2.0 中,修改后的 `paste_mask()` 函数应该比之前版本更准确。 此更改参考了 [Detectron2](https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/masks.py) 中的修改,可以将 COCO 上的 mask AP 提高约 0.5%。 + +### 代码库约定 + +- MMDetection v2.0 更改了类别标签的顺序,减少了回归和 mask 分支里的无用参数并使得顺序更加自然(没有 +1 和 -1)。 + 这会影响模型的所有分类层,使其输出的类别标签顺序发生改变。回归分支和 mask head 的最后一层不再为 K 个类别保留 K+1 个通道,类别顺序与分类分支一致。 + + - 在 MMDetection v2.0 中,标签 “K” 表示背景,标签 \[0, K-1\] 对应于 K = num_categories 个对象类别。 + + - 在 MMDetection v1.x 及之前的版本中,标签 “0” 表示背景,标签 \[1, K\] 对应 K 个类别。 + + - **注意**:softmax RPN 的类顺序在 version\<=2.4.0 中仍然和 1.x 中的一样,而 sigmoid RPN 不受影响。从 MMDetection v2.5.0 开始,所有 head 中的类顺序是统一的。 + +- 不使用 R-CNN 中的低质量匹配。在 MMDetection v1.x 和之前的版本中,`max_iou_assigner` 会在 RPN 和 R-CNN 训练时给每个 ground truth 匹配低质量框。我们发现这会导致最佳的 GT 框不会被分配给某些边界框, + 因此,在MMDetection v2.0 的 R-CNN 训练中默认不允许低质量匹配。这有时可能会稍微改善 box AP(约为 0.1%)。 + +- 单独的宽高比例系数。在 MMDetection v1.x 和以前的版本中,`keep_ratio=True` 时比例系数是单个浮点数,这并不准确,因为宽度和高度的比例系数会有一定的差异。 MMDetection v2.0 对宽度和高度使用单独的比例系数,对 AP 的提升约为 0.1%。 + +- 修改了 config 文件名称的规范。 由于 model zoo 中模型不断增多, MMDetection v2.0 采用新的命名规则: + + ```shell + [model]_(model setting)_[backbone]_[neck]_(norm setting)_(misc)_(gpu x batch)_[schedule]_[dataset].py + ``` + + 其中 (`misc`) 包括 DCN 和 GCBlock 等。更多详细信息在 [配置文件说明文档](config.md) 中说明 + +- MMDetection v2.0 使用新的 ResNet Caffe backbone 来减少加载预训练模型时的警告。新 backbone 中的大部分权重与以前的相同,但没有 `conv.bias`,且它们使用不同的 `img_norm_cfg`。因此,新的 backbone 不会报 `unexpected keys` 的警告。 + +### 训练超参 + +训练超参的调整不会影响模型的兼容性,但会略微提高性能。主要有: + +- 通过设置 `nms_post=1000` 和 `max_num=1000`,将 nms 之后的 proposal 数量从 2000 更改为 1000。使 mask AP 和 bbox AP 提高了约 0.2%。 + +- Mask R-CNN、Faster R-CNN 和 RetinaNet 的默认回归损失从 smooth L1 损失更改为 L1 损失,使得 box AP 整体上都有所提升(约 0.6%)。但是,将 L1-loss 用在 Cascade R-CNN 和 HTC 等其他方法上并不能提高性能,因此我们保留这些方法的原始设置。 + +- 为简单起见,RoIAlign 层的 `sampling_ratio` 设置为 0。略微提升了 AP(约 0.2% 绝对值)。 + +- 为了提升训练速度,默认设置在训练过程中不再使用梯度裁剪。大多数模型的性能不会受到影响。对于某些模型(例如 RepPoints),我们依旧使用梯度裁剪来稳定训练过程从而获得更好的性能。 + +- 因为不再默认使用梯度裁剪,默认 warmup 比率从 1/3 更改为 0.001,以使模型训练预热更加平缓。不过我们重新进行基准测试时发现这种影响可以忽略不计。 + +### 将模型从 v1.x 升级至 v2.0 + +用户可以使用脚本 `tools/model_converters/upgrade_model_version.py` 来将 MMDetection 1.x 训练的模型转换为 MMDetection v2.0。转换后的模型可以在 MMDetection v2.0 中运行,但性能略有下降(小于 1% AP)。 +详细信息可以在 `configs/legacy` 中找到。 + +## pycocotools 兼容性 + +`mmpycocotools` 是 OpenMMLab 维护的 `pycocotools` 的复刻版,适用于 MMDetection 和 Detectron2。 +在 [PR #4939](https://github.com/open-mmlab/mmdetection/pull/4939) 之前,由于 `pycocotools` 和 `mmpycocotool` 具有相同的包名,如果用户已经安装了 `pyccocotools`(在相同环境下先安装了 Detectron2 ),那么 MMDetection 的安装过程会跳过安装 `mmpycocotool`。 导致 MMDetection 缺少 `mmpycocotools` 而报错。 +但如果在 Detectron2 之前安装 MMDetection,则可以在相同的环境下工作。 +[PR #4939](https://github.com/open-mmlab/mmdetection/pull/4939) 弃用 mmpycocotools,使用官方 pycocotools。 +在 [PR #4939](https://github.com/open-mmlab/mmdetection/pull/4939) 之后,用户能够在相同环境下安装 MMDetection 和 Detectron2,不再需要关注安装顺序。 diff --git a/downstream/mmdetection/docs/zh_cn/conf.py b/downstream/mmdetection/docs/zh_cn/conf.py new file mode 100644 index 0000000..1bb57a4 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/conf.py @@ -0,0 +1,118 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys + +import pytorch_sphinx_theme + +sys.path.insert(0, os.path.abspath('../../')) + +# -- Project information ----------------------------------------------------- + +project = 'MMDetection' +copyright = '2018-2021, OpenMMLab' +author = 'MMDetection Authors' +version_file = '../../mmdet/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +# The full version, including alpha/beta/rc tags +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'myst_parser', + 'sphinx_markdown_tables', + 'sphinx_copybutton', +] + +myst_enable_extensions = ['colon_fence'] +myst_heading_anchors = 3 + +autodoc_mock_imports = [ + 'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops' +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +# The master toctree document. +master_doc = 'index' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +# html_theme = 'sphinx_rtd_theme' +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] + +html_theme_options = { + 'menu': [ + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmdetection' + }, + ], + # Specify the language of shared menu + 'menu_lang': + 'cn', +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] + +language = 'zh_CN' + +# -- Extension configuration ------------------------------------------------- +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True + + +def builder_inited_handler(app): + subprocess.run(['./stat.py']) + + +def setup(app): + app.connect('builder-inited', builder_inited_handler) diff --git a/downstream/mmdetection/docs/zh_cn/conventions.md b/downstream/mmdetection/docs/zh_cn/conventions.md new file mode 100644 index 0000000..acbb21e --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/conventions.md @@ -0,0 +1,75 @@ +# 默认约定 + +如果你想把 MMDetection 修改为自己的项目,请遵循下面的约定。 + +## 损失 + +在 MMDetection 中,`model(**data)` 的返回值是一个字典,包含着所有的损失和评价指标,他们将会由 `model(**data)` 返回。 + +例如,在 bbox head 中, + +```python +class BBoxHead(nn.Module): + ... + def loss(self, ...): + losses = dict() + # 分类损失 + losses['loss_cls'] = self.loss_cls(...) + # 分类准确率 + losses['acc'] = accuracy(...) + # 边界框损失 + losses['loss_bbox'] = self.loss_bbox(...) + return losses +``` + +`'bbox_head.loss()'` 在模型 forward 阶段会被调用。返回的字典中包含了 `'loss_bbox'`,`'loss_cls'`,`'acc'`。只有 `'loss_bbox'`, `'loss_cls'` 会被用于反向传播,`'acc'` 只会被作为评价指标来监控训练过程。 + +我们默认,只有那些键的名称中包含 `'loss'` 的值会被用于反向传播。这个行为可以通过修改 `BaseDetector.train_step()` 来改变。 + +## 空 proposals + +在 MMDetection 中,我们为两阶段方法中空 proposals 的情况增加了特殊处理和单元测试。我们同时需要处理整个 batch 和单一图片中空 proposals 的情况。例如,在 CascadeRoIHead 中, + +```python +# 简单的测试 +... + +# 在整个 batch中 都没有 proposals +if rois.shape[0] == 0: + bbox_results = [[ + np.zeros((0, 5), dtype=np.float32) + for _ in range(self.bbox_head[-1].num_classes) + ]] * num_imgs + if self.with_mask: + mask_classes = self.mask_head[-1].num_classes + segm_results = [[[] for _ in range(mask_classes)] + for _ in range(num_imgs)] + results = list(zip(bbox_results, segm_results)) + else: + results = bbox_results + return results +... + +# 在单张图片中没有 proposals +for i in range(self.num_stages): + ... + if i < self.num_stages - 1: + for j in range(num_imgs): + # 处理空 proposals + if rois[j].shape[0] > 0: + bbox_label = cls_score[j][:, :-1].argmax(dim=1) + refine_roi = self.bbox_head[i].regress_by_class( + rois[j], bbox_label[j], bbox_pred[j], img_metas[j]) + refine_roi_list.append(refine_roi) +``` + +如果你有自定义的 `RoIHead`, 你可以参考上面的方法来处理空 proposals 的情况。 + +## 全景分割数据集 + +在 MMDetection 中,我们支持了 COCO 全景分割数据集 `CocoPanopticDataset`。对于它的实现,我们在这里声明一些默认约定。 + +1. 在 mmdet\<=2.16.0 时,语义分割标注中的前景和背景标签范围与 MMDetection 中的默认规定有所不同。标签 `0` 代表 `VOID` 标签。 + 从 mmdet=2.17.0 开始,为了和框的类别标注保持一致,语义分割标注的类别标签也改为从 `0` 开始,标签 `255` 代表 `VOID` 类。 + 为了达成这一目标,我们在流程 `Pad` 里支持了设置 `seg` 的填充值的功能。 +2. 在评估中,全景分割结果必须是一个与原图大小相同的图。结果图中每个像素的值有如此形式:`instance_id * INSTANCE_OFFSET + category_id`。 diff --git a/downstream/mmdetection/docs/zh_cn/faq.md b/downstream/mmdetection/docs/zh_cn/faq.md new file mode 100644 index 0000000..8f9bcf8 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/faq.md @@ -0,0 +1,162 @@ +# 常见问题解答 + +我们在这里列出了使用时的一些常见问题及其相应的解决方案。 如果您发现有一些问题被遗漏,请随时提 PR 丰富这个列表。 如果您无法在此获得帮助,请使用 [issue模板](https://github.com/open-mmlab/mmdetection/blob/master/.github/ISSUE_TEMPLATE/error-report.md/)创建问题,但是请在模板中填写所有必填信息,这有助于我们更快定位问题。 + +## MMCV 安装相关 + +- MMCV 与 MMDetection 的兼容问题: "ConvWS is already registered in conv layer"; "AssertionError: MMCV==xxx is used but incompatible. Please install mmcv>=xxx, \<=xxx." + + 请按 [安装说明](https://mmdetection.readthedocs.io/zh_CN/latest/get_started.html#installation) 为你的 MMDetection 安装正确版本的 MMCV 。 + +- "No module named 'mmcv.ops'"; "No module named 'mmcv.\_ext'". + + 原因是安装了 `mmcv` 而不是 `mmcv-full`。 + + 1. `pip uninstall mmcv` 卸载安装的 `mmcv` + + 2. 安装 `mmcv-full` 根据 [安装说明](https://mmcv.readthedocs.io/zh/latest/#installation)。 + +## PyTorch/CUDA 环境相关 + +- "RTX 30 series card fails when building MMCV or MMDet" + + 1. 临时解决方案为使用命令 `MMCV_WITH_OPS=1 MMCV_CUDA_ARGS='-gencode=arch=compute_80,code=sm_80' pip install -e .` 进行编译。 常见报错信息为 `nvcc fatal : Unsupported gpu architecture 'compute_86'` 意思是你的编译器不支持 sm_86 架构(包括英伟达 30 系列的显卡)的优化,至 CUDA toolkit 11.0 依旧未支持. 这个命令是通过增加宏 `MMCV_CUDA_ARGS='-gencode=arch=compute_80,code=sm_80` 让 nvcc 编译器为英伟达 30 系列显卡进行 `sm_80` 的优化,虽然这有可能会无法发挥出显卡所有性能。 + + 2. 有开发者已经在 [pytorch/pytorch#47585](https://github.com/pytorch/pytorch/pull/47585) 更新了 PyTorch 默认的编译 flag, 但是我们对此并没有进行测试。 + +- "invalid device function" or "no kernel image is available for execution". + + 1. 检查您正常安装了 CUDA runtime (一般在`/usr/local/`),或者使用 `nvcc --version` 检查本地版本,有时安装 PyTorch 会顺带安装一个 CUDA runtime,并且实际优先使用 conda 环境中的版本,你可以使用 `conda list cudatoolkit` 查看其版本。 + + 2. 编译 extension 的 CUDA Toolkit 版本与运行时的 CUDA Toolkit 版本是否相符, + + - 如果您从源码自己编译的,使用 `python mmdet/utils/collect_env.py` 检查编译编译 extension 的 CUDA Toolkit 版本,然后使用 `conda list cudatoolkit` 检查当前 conda 环境是否有 CUDA Toolkit,若有检查版本是否匹配, 如不匹配,更换 conda 环境的 CUDA Toolkit,或者使用匹配的 CUDA Toolkit 中的 nvcc 编译即可,如环境中无 CUDA Toolkit,可以使用 `nvcc -V`。 + + 等命令查看当前使用的 CUDA runtime。 + + - 如果您是通过 pip 下载的预编译好的版本,请确保与当前 CUDA runtime 一致。 + + 3. 运行 `python mmdet/utils/collect_env.py` 检查是否为正确的 GPU 架构编译的 PyTorch, torchvision, 与 MMCV。 你或许需要设置 `TORCH_CUDA_ARCH_LIST` 来重新安装 MMCV,可以参考 [GPU 架构表](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list), + 例如, 运行 `TORCH_CUDA_ARCH_LIST=7.0 pip install mmcv-full` 为 Volta GPU 编译 MMCV。这种架构不匹配的问题一般会出现在使用一些旧型号的 GPU 时候出现, 例如, Tesla K80。 + +- "undefined symbol" or "cannot open xxx.so". + + 1. 如果这些 symbol 属于 CUDA/C++ (如 libcudart.so 或者 GLIBCXX),使用 `python mmdet/utils/collect_env.py`检查 CUDA/GCC runtime 与编译 MMCV 的 CUDA 版本是否相同。 + 2. 如果这些 symbols 属于 PyTorch,(例如, symbols containing caffe, aten, and TH), 检查当前 Pytorch 版本是否与编译 MMCV 的版本一致。 + 3. 运行 `python mmdet/utils/collect_env.py` 检查 PyTorch, torchvision, MMCV 等的编译环境与运行环境一致。 + +- setuptools.sandbox.UnpickleableException: DistutilsSetupError("each element of 'ext_modules' option must be an Extension instance or 2-tuple") + + 1. 如果你在使用 miniconda 而不是 anaconda,检查是否正确的安装了 Cython 如 [#3379](https://github.com/open-mmlab/mmdetection/issues/3379). + 2. 检查环境中的 `setuptools`, `Cython`, and `PyTorch` 相互之间版本是否匹配。 + +- "Segmentation fault". + + 1. 检查 GCC 的版本,通常是因为 PyTorch 版本与 GCC 版本不匹配 (例如 GCC \< 4.9 ),我们推荐用户使用 GCC 5.4,我们也不推荐使用 GCC 5.5, 因为有反馈 GCC 5.5 会导致 "segmentation fault" 并且切换到 GCC 5.4 就可以解决问题。 + + 2. 检查是否正确安装了 CUDA 版本的 PyTorch 。 + + ```shell + python -c 'import torch; print(torch.cuda.is_available())' + ``` + + 是否返回True。 + + 3. 如果 `torch` 的安装是正确的,检查是否正确编译了 MMCV。 + + ```shell + python -c 'import mmcv; import mmcv.ops' + ``` + + 4. 如果 MMCV 与 PyTorch 都被正确安装了,则使用 `ipdb`, `pdb` 设置断点,直接查找哪一部分的代码导致了 `segmentation fault`。 + +## Training 相关 + +- "Loss goes Nan" + + 1. 检查数据的标注是否正常, 长或宽为 0 的框可能会导致回归 loss 变为 nan,一些小尺寸(宽度或高度小于 1)的框在数据增强(例如,instaboost)后也会导致此问题。 因此,可以检查标注并过滤掉那些特别小甚至面积为 0 的框,并关闭一些可能会导致 0 面积框出现数据增强。 + 2. 降低学习率:由于某些原因,例如 batch size 大小的变化, 导致当前学习率可能太大。 您可以降低为可以稳定训练模型的值。 + 3. 延长 warm up 的时间:一些模型在训练初始时对学习率很敏感,您可以把 `warmup_iters` 从 500 更改为 1000 或 2000。 + 4. 添加 gradient clipping: 一些模型需要梯度裁剪来稳定训练过程。 默认的 `grad_clip` 是 `None`, 你可以在 config 设置 `optimizer_config=dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))` 如果你的 config 没有继承任何包含 `optimizer_config=dict(grad_clip=None)`, 你可以直接设置`optimizer_config=dict(grad_clip=dict(max_norm=35, norm_type=2))`. + +- "GPU out of memory" + + 1. 存在大量 ground truth boxes 或者大量 anchor 的场景,可能在 assigner 会 OOM。 您可以在 assigner 的配置中设置 `gpu_assign_thr=N`,这样当超过 N 个 GT boxes 时,assigner 会通过 CPU 计算 IOU。 + + 2. 在 backbone 中设置 `with_cp=True`。 这使用 PyTorch 中的 `sublinear strategy` 来降低 backbone 占用的 GPU 显存。 + + 3. 使用 `config/fp16` 中的示例尝试混合精度训练。`loss_scale` 可能需要针对不同模型进行调整。 + + 4. 你也可以尝试使用 `AvoidCUDAOOM` 来避免该问题。首先它将尝试调用 `torch.cuda.empty_cache()`。如果失败,将会尝试把输入类型转换到 FP16。如果仍然失败,将会把输入从 GPUs 转换到 CPUs 进行计算。这里提供了两个使用的例子: + + ```python + from mmdet.utils import AvoidCUDAOOM + + output = AvoidCUDAOOM.retry_if_cuda_oom(some_function)(input1, input2) + ``` + + 你也可也使用 `AvoidCUDAOOM` 作为装饰器让代码遇到 OOM 的时候继续运行: + + ```python + from mmdet.utils import AvoidCUDAOOM + + @AvoidCUDAOOM.retry_if_cuda_oom + def function(*args, **kwargs): + ... + return xxx + ``` + +- "RuntimeError: Expected to have finished reduction in the prior iteration before starting a new one" + + 1. 这个错误出现在存在参数没有在 forward 中使用,容易在 DDP 中运行不同分支时发生。 + 2. 你可以在 config 设置 `find_unused_parameters = True` 进行训练 (会降低训练速度)。 + 3. 你也可以通过在 config 中的 `optimizer_config` 里设置 `detect_anomalous_params=True` 查找哪些参数没有用到,但是需要 MMCV 的版本 >= 1.4.1。 + +- 训练中保存最好模型 + + 可以通过配置 `evaluation = dict(save_best=‘auto’)`开启。在 auto 参数情况下会根据返回的验证结果中的第一个 key 作为选择最优模型的依据,你也可以直接设置评估结果中的 key 来手动设置,例如 `evaluation = dict(save_best=‘mAP’)`。 + +- 在 Resume 训练中使用 `ExpMomentumEMAHook` + + 如果在训练中使用了 `ExpMomentumEMAHook`,那么 resume 时候不能仅仅通过命令行参数 `--resume-from` 或 `--cfg-options resume_from` 实现恢复模型参数功能例如 `python tools/train.py configs/yolox/yolox_s_8x8_300e_coco.py --resume-from ./work_dir/yolox_s_8x8_300e_coco/epoch_x.pth`。以 `yolox_s` 算法为例,由于 `ExpMomentumEMAHook` 需要重新加载权重,你可以通过如下做法实现: + + ```python + # 直接打开 configs/yolox/yolox_s_8x8_300e_coco.py 修改所有 resume_from 字段 + resume_from=./work_dir/yolox_s_8x8_300e_coco/epoch_x.pth + custom_hooks=[... + dict( + type='ExpMomentumEMAHook', + resume_from=./work_dir/yolox_s_8x8_300e_coco/epoch_x.pth, + momentum=0.0001, + priority=49) + ] + ``` + +## Evaluation 相关 + +- 使用 COCO Dataset 的测评接口时, 测评结果中 AP 或者 AR = -1 + 1. 根据COCO数据集的定义,一张图像中的中等物体与小物体面积的阈值分别为 9216(96\*96)与 1024(32\*32)。 + 2. 如果在某个区间没有检测框 AP 与 AR 认定为 -1. + +## Model 相关 + +- **ResNet style 参数说明** + + ResNet style 可选参数允许 `pytorch` 和 `caffe`,其差别在于 Bottleneck 模块。Bottleneck 是 `1x1-3x3-1x1` 堆叠结构,在 `caffe` 模式模式下 stride=2 参数放置在第一个 `1x1` 卷积处,而 `pyorch` 模式下 stride=2 放在第二个 `3x3` 卷积处。一个简单示例如下: + + ```python + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + ``` + +- **ResNeXt 参数说明** + + ResNeXt 来自论文 [`Aggregated Residual Transformations for Deep Neural Networks`](https://arxiv.org/abs/1611.05431). 其引入分组卷积,并且通过变量基数来控制组的数量达到精度和复杂度的平衡,其有两个超参 `baseWidth` 和 `cardinality `来控制内部 Bottleneck 模块的基本宽度和分组数参数。以 MMDetection 中配置名为 `mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py` 为例,其中 `mask_rcnn` 代表算法采用 Mask R-CNN,`x101` 代表骨架网络采用 ResNeXt-101,`64x4d`代表 Bottleneck 一共分成 64 组,每组的基本宽度是 4。 + +- **骨架网络 eval 模式说明** + + 因为检测模型通常比较大且输入图片分辨率很高,这会导致检测模型的 batch 很小,通常是 2,这会使得 BatchNorm 在训练过程计算的统计量方差非常大,不如主干网络预训练时得到的统计量稳定,因此在训练是一般都会使用 `norm_eval=True` 模式,直接使用预训练主干网络中的 BatchNorm 统计量,少数使用大 batch 的算法是 `norm_eval=False` 模式,例如 NASFPN。对于没有 ImageNet 预训练的骨架网络,如果 batch 比较小,可以考虑使用 `SyncBN`。 diff --git a/downstream/mmdetection/docs/zh_cn/get_started.md b/downstream/mmdetection/docs/zh_cn/get_started.md new file mode 100644 index 0000000..cab1b3b --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/get_started.md @@ -0,0 +1,256 @@ +## 依赖 + +- Linux 和 macOS (Windows 理论上支持) +- Python 3.6+ +- PyTorch 1.3+ +- CUDA 9.2+ (如果基于 PyTorch 源码安装,也能够支持 CUDA 9.0) +- GCC 5+ +- [MMCV](https://mmcv.readthedocs.io/en/latest/#installation) + +MMDetection 和 MMCV 版本兼容性如下所示,需要安装正确的 MMCV 版本以避免安装出现问题。 + +| MMDetection 版本 | MMCV 版本 | +| :------------: | :------------------------: | +| master | mmcv-full>=1.3.17, \<1.6.0 | +| 2.25.0 | mmcv-full>=1.3.17, \<1.6.0 | +| 2.24.1 | mmcv-full>=1.3.17, \<1.6.0 | +| 2.24.0 | mmcv-full>=1.3.17, \<1.6.0 | +| 2.23.0 | mmcv-full>=1.3.17, \<1.5.0 | +| 2.22.0 | mmcv-full>=1.3.17, \<1.5.0 | +| 2.21.0 | mmcv-full>=1.3.17, \<1.5.0 | +| 2.20.0 | mmcv-full>=1.3.17, \<1.5.0 | +| 2.19.1 | mmcv-full>=1.3.17, \<1.5.0 | +| 2.19.0 | mmcv-full>=1.3.17, \<1.5.0 | +| 2.18.1 | mmcv-full>=1.3.17, \<1.4.0 | +| 2.18.0 | mmcv-full>=1.3.14, \<1.4.0 | +| 2.17.0 | mmcv-full>=1.3.14, \<1.4.0 | +| 2.16.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 2.15.1 | mmcv-full>=1.3.8, \<1.4.0 | +| 2.15.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 2.14.0 | mmcv-full>=1.3.8, \<1.4.0 | +| 2.13.0 | mmcv-full>=1.3.3, \<1.4.0 | +| 2.12.0 | mmcv-full>=1.3.3, \<1.4.0 | +| 2.11.0 | mmcv-full>=1.2.4, \<1.4.0 | +| 2.10.0 | mmcv-full>=1.2.4, \<1.4.0 | +| 2.9.0 | mmcv-full>=1.2.4, \<1.4.0 | +| 2.8.0 | mmcv-full>=1.2.4, \<1.4.0 | +| 2.7.0 | mmcv-full>=1.1.5, \<1.4.0 | +| 2.6.0 | mmcv-full>=1.1.5, \<1.4.0 | +| 2.5.0 | mmcv-full>=1.1.5, \<1.4.0 | +| 2.4.0 | mmcv-full>=1.1.1, \<1.4.0 | +| 2.3.0 | mmcv-full==1.0.5 | +| 2.3.0rc0 | mmcv-full>=1.0.2 | +| 2.2.1 | mmcv==0.6.2 | +| 2.2.0 | mmcv==0.6.2 | +| 2.1.0 | mmcv>=0.5.9, \<=0.6.1 | +| 2.0.0 | mmcv>=0.5.1, \<=0.5.8 | + +\*\*注意:\*\*如果已经安装了 mmcv,首先需要使用 `pip uninstall mmcv` 卸载已安装的 mmcv,如果同时安装了 mmcv 和 mmcv-full,将会报 `ModuleNotFoundError` 错误。 + +## 安装流程 + +### 从零开始设置脚本 + +假设当前已经成功安装 CUDA 10.1,这里提供了一个完整的基于 conda 安装 MMDetection 的脚本。您可以参考下一节中的分步安装说明。 + +```shell +conda create -n openmmlab python=3.7 pytorch==1.6.0 cudatoolkit=10.1 torchvision -c pytorch -y +conda activate openmmlab +pip install openmim +mim install mmcv-full +git clone https://github.com/open-mmlab/mmdetection.git +cd mmdetection +pip install -r requirements/build.txt +pip install -v -e . +``` + +### 准备环境 + +1. 使用 conda 新建虚拟环境,并进入该虚拟环境; + + ```shell + conda create -n open-mmlab python=3.7 -y + conda activate open-mmlab + ``` + +2. 基于 [PyTorch 官网](https://pytorch.org/)安装 PyTorch 和 torchvision,例如: + + ```shell + conda install pytorch torchvision -c pytorch + ``` + + **注意**:需要确保 CUDA 的编译版本和运行版本匹配。可以在 [PyTorch 官网](https://pytorch.org/)查看预编译包所支持的 CUDA 版本。 + + `例 1` 例如在 `/usr/local/cuda` 下安装了 CUDA 10.1, 并想安装 PyTorch 1.5,则需要安装支持 CUDA 10.1 的预构建 PyTorch: + + ```shell + conda install pytorch cudatoolkit=10.1 torchvision -c pytorch + ``` + + `例 2` 例如在 `/usr/local/cuda` 下安装了 CUDA 9.2, 并想安装 PyTorch 1.3.1,则需要安装支持 CUDA 9.2 的预构建 PyTorch: + + ```shell + conda install pytorch=1.3.1 cudatoolkit=9.2 torchvision=0.4.2 -c pytorch + ``` + + 如果不是安装预构建的包,而是从源码中构建 PyTorch,则可以使用更多的 CUDA 版本,例如 CUDA 9.0。 + +### 安装 MMDetection + +我们建议使用 [MIM](https://github.com/open-mmlab/mim) 来安装 MMDetection: + +```shell +pip install openmim +mim install mmdet +``` + +MIM 能够自动地安装 OpenMMLab 的项目以及对应的依赖包。 + +或者,可以手动安装 MMDetection: + +1. 安装 mmcv-full,我们建议使用预构建包来安装: + + ```shell + pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/{cu_version}/{torch_version}/index.html + ``` + + 需要把命令行中的 `{cu_version}` 和 `{torch_version}` 替换成对应的版本。例如:在 CUDA 11 和 PyTorch 1.7.0 的环境下,可以使用下面命令安装最新版本的 MMCV: + + ```shell + pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu110/torch1.7.0/index.html + ``` + + 请参考 [MMCV](https://mmcv.readthedocs.io/en/latest/#installation) 获取不同版本的 MMCV 所兼容的的不同的 PyTorch 和 CUDA 版本。同时,也可以通过以下命令行从源码编译 MMCV: + + ```shell + git clone https://github.com/open-mmlab/mmcv.git + cd mmcv + MMCV_WITH_OPS=1 pip install -e . # 安装好 mmcv-full + cd .. + ``` + + 或者,可以直接使用命令行安装: + + ```shell + pip install mmcv-full + ``` + + PyTorch 在 1.x.0 和 1.x.1 之间通常是兼容的,故 mmcv-full 只提供 1.x.0 的编译包。如果你的 PyTorch 版本是 1.x.1,你可以放心地安装在 1.x.0 版本编译的 mmcv-full。 + + ``` + # 我们可以忽略 PyTorch 的小版本号 + pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu110/torch1.7/index.html + ``` + +2. 安装 MMDetection: + + 你可以直接通过如下命令从 pip 安装使用 mmdetection: + + ```shell + pip install mmdet + ``` + + 或者从 git 仓库编译源码 + + ```shell + git clone https://github.com/open-mmlab/mmdetection.git + cd mmdetection + pip install -r requirements/build.txt + pip install -v -e . # or "python setup.py develop" + ``` + +3. 安装额外的依赖以使用 Instaboost, 全景分割, 或者 LVIS 数据集 + + ```shell + # 安装 instaboost 依赖 + pip install instaboostfast + # 安装全景分割依赖 + pip install git+https://github.com/cocodataset/panopticapi.git + # 安装 LVIS 数据集依赖 + pip install git+https://github.com/lvis-dataset/lvis-api.git + # 安装 albumentations 依赖 + pip install -r requirements/albu.txt + ``` + +**注意:** + +(1) 按照上述说明,MMDetection 安装在 `dev` 模式下,因此在本地对代码做的任何修改都会生效,无需重新安装; + +(2) 如果希望使用 `opencv-python-headless` 而不是 `opencv-python`, 可以在安装 MMCV 之前安装; + +(3) 一些安装依赖是可以选择的。例如只需要安装最低运行要求的版本,则可以使用 `pip install -v -e .` 命令。如果希望使用可选择的像 `albumentations` 和 `imagecorruptions` 这种依赖项,可以使用 `pip install -r requirements/optional.txt` 进行手动安装,或者在使用 `pip` 时指定所需的附加功能(例如 `pip install -v -e .[optional]`),支持附加功能的有效键值包括 `all`、`tests`、`build` 以及 `optional` 。 + +(4) 如果希望使用 `albumentations`,我们建议使用 `pip install -r requirements/albu.txt` 或者 `pip install -U albumentations --no-binary qudida,albumentations` 进行安装。 如果简单地使用 `pip install albumentations>=0.3.2` 进行安装,则会同时安装 `opencv-python-headless`(即便已经安装了 `opencv-python` 也会再次安装)。我们建议在安装 `albumentations` 后检查环境,以确保没有同时安装 `opencv-python` 和 `opencv-python-headless`,因为同时安装可能会导致一些问题。更多细节请参考[官方文档](https://albumentations.ai/docs/getting_started/installation/#note-on-opencv-dependencies)。 + +### 只在 CPU 安装 + +我们的代码能够建立在只使用 CPU 的环境(CUDA 不可用)。 + +在CPU模式下,可以进行模型训练(需要 MMCV 版本 >= 1.4.4)、测试或者推理,然而以下功能将在 CPU 模式下不能使用: + +- Deformable Convolution +- Modulated Deformable Convolution +- ROI pooling +- Deformable ROI pooling +- CARAFE: Content-Aware ReAssembly of FEatures +- SyncBatchNorm +- CrissCrossAttention: Criss-Cross Attention +- MaskedConv2d +- Temporal Interlace Shift +- nms_cuda +- sigmoid_focal_loss_cuda +- bbox_overlaps + +因此,如果尝试使用包含上述操作的模型进行训练/测试/推理,将会报错。下表列出了由于依赖上述算子而无法在 CPU 上运行的相关模型: + +| 操作 | 模型 | +| :-----------------------------------------------------: | :-------------------------------------------------------------------------------: | +| Deformable Convolution/Modulated Deformable Convolution | DCN、Guided Anchoring、RepPoints、CentripetalNet、VFNet、CascadeRPN、NAS-FCOS、DetectoRS | +| MaskedConv2d | Guided Anchoring | +| CARAFE | CARAFE | +| SyncBatchNorm | ResNeSt | + +### 另一种选择: Docker 镜像 + +我们提供了 [Dockerfile](https://github.com/open-mmlab/mmdetection/blob/master/docker/Dockerfile) 来生成镜像,请确保 [docker](https://docs.docker.com/engine/install/) 的版本 >= 19.03。 + +```shell +# 基于 PyTorch 1.6, CUDA 10.1 生成镜像 +docker build -t mmdetection docker/ +``` + +运行命令: + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmdetection/data mmdetection +``` + +### 使用多个 MMDetection 版本进行开发 + +训练和测试的脚本已经在 PYTHONPATH 中进行了修改,以确保脚本使用当前目录中的 MMDetection。 + +要使环境中安装默认的 MMDetection 而不是当前正在在使用的,可以删除出现在相关脚本中的代码: + +```shell +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH +``` + +## 验证 + +为了验证是否正确安装了 MMDetection 和所需的环境,我们可以运行示例的 Python 代码来初始化检测器并推理一个演示图像: + +```python +from mmdet.apis import init_detector, inference_detector + +config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +# 从 model zoo 下载 checkpoint 并放在 `checkpoints/` 文件下 +# 网址为: http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth +checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' +device = 'cuda:0' +# 初始化检测器 +model = init_detector(config_file, checkpoint_file, device=device) +# 推理演示图像 +inference_detector(model, 'demo/demo.jpg') +``` + +如果成功安装 MMDetection,则上面的代码可以完整地运行。 diff --git a/downstream/mmdetection/docs/zh_cn/index.rst b/downstream/mmdetection/docs/zh_cn/index.rst new file mode 100644 index 0000000..f0e00c9 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/index.rst @@ -0,0 +1,55 @@ +Welcome to MMDetection's documentation! +======================================= + +.. toctree:: + :maxdepth: 2 + :caption: 开始你的第一步 + + get_started.md + model_zoo.md + article.md + +.. toctree:: + :maxdepth: 2 + :caption: 快速启动 + + 1_exist_data_model.md + 2_new_data_model.md + +.. toctree:: + :maxdepth: 2 + :caption: 教程 + + tutorials/index.rst + +.. toctree:: + :maxdepth: 2 + :caption: 实用工具与脚本 + + useful_tools.md + +.. toctree:: + :maxdepth: 2 + :caption: 说明 + + conventions.md + compatibility.md + faq.md + +.. toctree:: + :caption: 语言切换 + + switch_language.md + +.. toctree:: + :maxdepth: 1 + :caption: 接口文档(英文) + + api.rst + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/downstream/mmdetection/docs/zh_cn/make.bat b/downstream/mmdetection/docs/zh_cn/make.bat new file mode 100644 index 0000000..922152e --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/downstream/mmdetection/docs/zh_cn/model_zoo.md b/downstream/mmdetection/docs/zh_cn/model_zoo.md new file mode 100644 index 0000000..c34637a --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/model_zoo.md @@ -0,0 +1,333 @@ +# 模型库 + +## 镜像地址 + +从 MMDetection V2.0 起,我们只通过阿里云维护模型库。V1.x 版本的模型已经弃用。 + +## 共同设置 + +- 所有模型都是在 `coco_2017_train` 上训练,在 `coco_2017_val` 上测试。 +- 我们使用分布式训练。 +- 所有 pytorch-style 的 ImageNet 预训练主干网络来自 PyTorch 的模型库,caffe-style 的预训练主干网络来自 detectron2 最新开源的模型。 +- 为了与其他代码库公平比较,文档中所写的 GPU 内存是8个 GPU 的 `torch.cuda.max_memory_allocated()` 的最大值,此值通常小于 nvidia-smi 显示的值。 +- 我们以网络 forward 和后处理的时间加和作为推理时间,不包含数据加载时间。所有结果通过 [benchmark.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/benchmark.py) 脚本计算所得。该脚本会计算推理 2000 张图像的平均时间。 + +## ImageNet 预训练模型 + +通过 ImageNet 分类任务预训练的主干网络进行初始化是很常见的操作。所有预训练模型的链接都可以在 [open_mmlab](https://github.com/open-mmlab/mmcv/blob/master/mmcv/model_zoo/open_mmlab.json) 中找到。根据 `img_norm_cfg` 和原始权重,我们可以将所有 ImageNet 预训练模型分为以下几种情况: + +- TorchVision:torchvision 模型权重,包含 ResNet50, ResNet101。`img_norm_cfg` 为 `dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)`。 +- Pycls:[pycls](https://github.com/facebookresearch/pycls) 模型权重,包含 RegNetX。`img_norm_cfg` 为 `dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)`。 +- MSRA styles:[MSRA](https://github.com/KaimingHe/deep-residual-networks) 模型权重,包含 ResNet50_Caffe,ResNet101_Caffe。`img_norm_cfg` 为 `dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)`。 +- Caffe2 styles:现阶段只包含 ResNext101_32x8d。`img_norm_cfg` 为 `dict(mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False)`。 +- Other styles: SSD 的 `img_norm_cfg` 为 `dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)`,YOLOv3 的 `img_norm_cfg` 为 `dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)`。 + +MMdetection 常用到的主干网络细节如下表所示: + +| 模型 | 来源 | 链接 | 描述 | +| ---------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ResNet50 | TorchVision | [torchvision 中的 ResNet-50](https://download.pytorch.org/models/resnet50-19c8e357.pth) | 来自 [torchvision 中的 ResNet-50](https://download.pytorch.org/models/resnet50-19c8e357.pth)。 | +| ResNet101 | TorchVision | [torchvision 中的 ResNet-101](https://download.pytorch.org/models/resnet101-5d3b4d8f.pth) | 来自 [torchvision 中的 ResNet-101](https://download.pytorch.org/models/resnet101-5d3b4d8f.pth)。 | +| RegNetX | Pycls | [RegNetX_3.2gf](https://download.openmmlab.com/pretrain/third_party/regnetx_3.2gf-c2599b0f.pth),[RegNetX_800mf](https://download.openmmlab.com/pretrain/third_party/regnetx_800mf-1f4be4c7.pth) 等 | 来自 [pycls](https://github.com/facebookresearch/pycls)。 | +| ResNet50_Caffe | MSRA | [MSRA 中的 ResNet-50](https://download.openmmlab.com/pretrain/third_party/resnet50_caffe-788b5fa3.pth) | 由 [Detectron2 中的 R-50.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-50.pkl) 转化的副本。原始权重文件来自 [MSRA 中的原始 ResNet-50](https://github.com/KaimingHe/deep-residual-networks)。 | +| ResNet101_Caffe | MSRA | [MSRA 中的 ResNet-101](https://download.openmmlab.com/pretrain/third_party/resnet101_caffe-3ad79236.pth) | 由 [Detectron2 中的 R-101.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-101.pkl) 转化的副本。原始权重文件来自 [MSRA 中的原始 ResNet-101](https://github.com/KaimingHe/deep-residual-networks)。 | +| ResNext101_32x8d | Caffe2 | [Caffe2 ResNext101_32x8d](https://download.openmmlab.com/pretrain/third_party/resnext101_32x8d-1516f1aa.pth) | 由 [Detectron2 中的 X-101-32x8d.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/FAIR/X-101-32x8d.pkl) 转化的副本。原始 ResNeXt-101-32x8d 由 FB 使用 Caffe2 训练。 | + +## Baselines + +### RPN + +请参考 [RPN](https://github.com/open-mmlab/mmdetection/blob/master/configs/rpn)。 + +### Faster R-CNN + +请参考 [Faster R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn)。 + +### Mask R-CNN + +请参考 [Mask R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn)。 + +### Fast R-CNN (使用提前计算的 proposals) + +请参考 [Fast R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/fast_rcnn)。 + +### RetinaNet + +请参考 [RetinaNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet)。 + +### Cascade R-CNN and Cascade Mask R-CNN + +请参考 [Cascade R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/cascade_rcnn)。 + +### Hybrid Task Cascade (HTC) + +请参考 [HTC](https://github.com/open-mmlab/mmdetection/blob/master/configs/htc)。 + +### SSD + +请参考 [SSD](https://github.com/open-mmlab/mmdetection/blob/master/configs/ssd)。 + +### Group Normalization (GN) + +请参考 [Group Normalization](https://github.com/open-mmlab/mmdetection/blob/master/configs/gn)。 + +### Weight Standardization + +请参考 [Weight Standardization](https://github.com/open-mmlab/mmdetection/blob/master/configs/gn+ws)。 + +### Deformable Convolution v2 + +请参考 [Deformable Convolutional Networks](https://github.com/open-mmlab/mmdetection/blob/master/configs/dcn)。 + +### CARAFE: Content-Aware ReAssembly of FEatures + +请参考 [CARAFE](https://github.com/open-mmlab/mmdetection/blob/master/configs/carafe)。 + +### Instaboost + +请参考 [Instaboost](https://github.com/open-mmlab/mmdetection/blob/master/configs/instaboost)。 + +### Libra R-CNN + +请参考 [Libra R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/libra_rcnn)。 + +### Guided Anchoring + +请参考 [Guided Anchoring](https://github.com/open-mmlab/mmdetection/blob/master/configs/guided_anchoring)。 + +### FCOS + +请参考 [FCOS](https://github.com/open-mmlab/mmdetection/blob/master/configs/fcos)。 + +### FoveaBox + +请参考 [FoveaBox](https://github.com/open-mmlab/mmdetection/blob/master/configs/foveabox)。 + +### RepPoints + +请参考 [RepPoints](https://github.com/open-mmlab/mmdetection/blob/master/configs/reppoints)。 + +### FreeAnchor + +请参考 [FreeAnchor](https://github.com/open-mmlab/mmdetection/blob/master/configs/free_anchor)。 + +### Grid R-CNN (plus) + +请参考 [Grid R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/grid_rcnn)。 + +### GHM + +请参考 [GHM](https://github.com/open-mmlab/mmdetection/blob/master/configs/ghm)。 + +### GCNet + +请参考 [GCNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/gcnet)。 + +### HRNet + +请参考 [HRNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet)。 + +### Mask Scoring R-CNN + +请参考 [Mask Scoring R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/ms_rcnn)。 + +### Train from Scratch + +请参考 [Rethinking ImageNet Pre-training](https://github.com/open-mmlab/mmdetection/blob/master/configs/scratch)。 + +### NAS-FPN + +请参考 [NAS-FPN](https://github.com/open-mmlab/mmdetection/blob/master/configs/nas_fpn)。 + +### ATSS + +请参考 [ATSS](https://github.com/open-mmlab/mmdetection/blob/master/configs/atss)。 + +### FSAF + +请参考 [FSAF](https://github.com/open-mmlab/mmdetection/blob/master/configs/fsaf)。 + +### RegNetX + +请参考 [RegNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/regnet)。 + +### Res2Net + +请参考 [Res2Net](https://github.com/open-mmlab/mmdetection/blob/master/configs/res2net)。 + +### GRoIE + +请参考 [GRoIE](https://github.com/open-mmlab/mmdetection/blob/master/configs/groie)。 + +### Dynamic R-CNN + +请参考 [Dynamic R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/dynamic_rcnn)。 + +### PointRend + +请参考 [PointRend](https://github.com/open-mmlab/mmdetection/blob/master/configs/point_rend)。 + +### DetectoRS + +请参考 [DetectoRS](https://github.com/open-mmlab/mmdetection/blob/master/configs/detectors)。 + +### Generalized Focal Loss + +请参考 [Generalized Focal Loss](https://github.com/open-mmlab/mmdetection/blob/master/configs/gfl)。 + +### CornerNet + +请参考 [CornerNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/cornernet)。 + +### YOLOv3 + +请参考 [YOLOv3](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolo)。 + +### PAA + +请参考 [PAA](https://github.com/open-mmlab/mmdetection/blob/master/configs/paa)。 + +### SABL + +请参考 [SABL](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl)。 + +### CentripetalNet + +请参考 [CentripetalNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/centripetalnet)。 + +### ResNeSt + +请参考 [ResNeSt](https://github.com/open-mmlab/mmdetection/blob/master/configs/resnest)。 + +### DETR + +请参考 [DETR](https://github.com/open-mmlab/mmdetection/blob/master/configs/detr)。 + +### Deformable DETR + +请参考 [Deformable DETR](https://github.com/open-mmlab/mmdetection/blob/master/configs/deformable_detr)。 + +### AutoAssign + +请参考 [AutoAssign](https://github.com/open-mmlab/mmdetection/blob/master/configs/autoassign)。 + +### YOLOF + +请参考 [YOLOF](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolof)。 + +### Seesaw Loss + +请参考 [Seesaw Loss](https://github.com/open-mmlab/mmdetection/blob/master/configs/seesaw_loss)。 + +### CenterNet + +请参考 [CenterNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/centernet)。 + +### YOLOX + +请参考 [YOLOX](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolox)。 + +### PVT + +请参考 [PVT](https://github.com/open-mmlab/mmdetection/blob/master/configs/pvt)。 + +### SOLO + +请参考 [SOLO](https://github.com/open-mmlab/mmdetection/blob/master/configs/solo)。 + +### QueryInst + +请参考 [QueryInst](https://github.com/open-mmlab/mmdetection/blob/master/configs/queryinst)。 + +### Other datasets + +我们还在 [PASCAL VOC](https://github.com/open-mmlab/mmdetection/blob/master/configs/pascal_voc),[Cityscapes](https://github.com/open-mmlab/mmdetection/blob/master/configs/cityscapes) 和 [WIDER FACE](https://github.com/open-mmlab/mmdetection/blob/master/configs/wider_face) 上对一些方法进行了基准测试。 + +### Pre-trained Models + +我们还通过多尺度训练和更长的训练策略来训练用 ResNet-50 和 [RegNetX-3.2G](https://github.com/open-mmlab/mmdetection/blob/master/configs/regnet) 作为主干网络的 [Faster R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn) 和 [Mask R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn)。这些模型可以作为下游任务的预训练模型。 + +## 速度基准 + +### 训练速度基准 + +我们提供 [analyze_logs.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/analyze_logs.py) 来得到训练中每一次迭代的平均时间。示例请参考 [Log Analysis](https://mmdetection.readthedocs.io/en/latest/useful_tools.html#log-analysis)。 + +我们与其他流行框架的 Mask R-CNN 训练速度进行比较(数据是从 [detectron2](https://github.com/facebookresearch/detectron2/blob/master/docs/notes/benchmarks.md/) 复制而来)。在 mmdetection 中,我们使用 [mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py) 进行基准测试。它与 detectron2 的 [mask_rcnn_R_50_FPN_noaug_1x.yaml](https://github.com/facebookresearch/detectron2/blob/master/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml) 设置完全一样。同时,我们还提供了[模型权重](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug_compare_20200518-10127928.pth)和[训练 log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug_20200518_105755.log.json) 作为参考。为了跳过 GPU 预热时间,吞吐量按照100-500次迭代之间的平均吞吐量来计算。 + +| 框架 | 吞吐量 (img/s) | +| -------------------------------------------------------------------------------------- | ----------- | +| [Detectron2](https://github.com/facebookresearch/detectron2) | 62 | +| [MMDetection](https://github.com/open-mmlab/mmdetection) | 61 | +| [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark/) | 53 | +| [tensorpack](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN) | 50 | +| [simpledet](https://github.com/TuSimple/simpledet/) | 39 | +| [Detectron](https://github.com/facebookresearch/Detectron) | 19 | +| [matterport/Mask_RCNN](https://github.com/matterport/Mask_RCNN/) | 14 | + +### 推理时间基准 + +我们提供 [benchmark.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/benchmark.py) 对推理时间进行基准测试。此脚本将推理 2000 张图片并计算忽略前 5 次推理的平均推理时间。可以通过设置 `LOG-INTERVAL` 来改变 log 输出间隔(默认为 50)。 + +```shell +python tools/benchmark.py ${CONFIG} ${CHECKPOINT} [--log-interval $[LOG-INTERVAL]] [--fuse-conv-bn] +``` + +模型库中,所有模型在基准测量推理时间时都没设置 `fuse-conv-bn`, 此设置可以使推理时间更短。 + +## 与 Detectron2 对比 + +我们在速度和精度方面对 mmdetection 和 [Detectron2](https://github.com/facebookresearch/detectron2.git) 进行对比。对比所使用的 detectron2 的 commit id 为 [185c27e](https://github.com/facebookresearch/detectron2/tree/185c27e4b4d2d4c68b5627b3765420c6d7f5a659)(30/4/2020)。 +为了公平对比,我们所有的实验都在同一机器下进行。 + +### 硬件 + +- 8 NVIDIA Tesla V100 (32G) GPUs +- Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz + +### 软件环境 + +- Python 3.7 +- PyTorch 1.4 +- CUDA 10.1 +- CUDNN 7.6.03 +- NCCL 2.4.08 + +### 精度 + +| 模型 | 训练策略 | Detectron2 | mmdetection | 下载 | +| -------------------------------------------------------------------------------------------------------------------------------------- | ---- | -------------------------------------------------------------------------------------------------------------------------------------- | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Faster R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py) | 1x | [37.9](https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml) | 38.0 | [model](https://download.openmmlab.com/mmdetection/v2.0/benchmark/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-5324cff8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco_20200429_234554.log.json) | +| [Mask R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py) | 1x | [38.6 & 35.2](https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml) | 38.8 & 35.4 | [model](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco-dbecf295.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco_20200430_054239.log.json) | +| [Retinanet](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_1x_coco.py) | 1x | [36.5](https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml) | 37.0 | [model](https://download.openmmlab.com/mmdetection/v2.0/benchmark/retinanet_r50_caffe_fpn_mstrain_1x_coco/retinanet_r50_caffe_fpn_mstrain_1x_coco-586977a0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/retinanet_r50_caffe_fpn_mstrain_1x_coco/retinanet_r50_caffe_fpn_mstrain_1x_coco_20200430_014748.log.json) | + +### 训练速度 + +训练速度使用 s/iter 来度量。结果越低越好。 + +| 模型 | Detectron2 | mmdetection | +| ------------ | ---------- | ----------- | +| Faster R-CNN | 0.210 | 0.216 | +| Mask R-CNN | 0.261 | 0.265 | +| Retinanet | 0.200 | 0.205 | + +### 推理速度 + +推理速度通过单张 GPU 下的 fps(img/s) 来度量,越高越好。 +为了与 Detectron2 保持一致,我们所写的推理时间除去了数据加载时间。 +对于 Mask RCNN,我们去除了后处理中 RLE 编码的时间。 +我们在括号中给出了官方给出的速度。由于硬件差异,官方给出的速度会比我们所测试得到的速度快一些。 + +| 模型 | Detectron2 | mmdetection | +| ------------ | ----------- | ----------- | +| Faster R-CNN | 25.6 (26.3) | 22.2 | +| Mask R-CNN | 22.5 (23.3) | 19.6 | +| Retinanet | 17.8 (18.2) | 20.6 | + +### 训练内存 + +| 模型 | Detectron2 | mmdetection | +| ------------ | ---------- | ----------- | +| Faster R-CNN | 3.0 | 3.8 | +| Mask R-CNN | 3.4 | 3.9 | +| Retinanet | 3.9 | 3.4 | diff --git a/downstream/mmdetection/docs/zh_cn/projects.md b/downstream/mmdetection/docs/zh_cn/projects.md new file mode 100644 index 0000000..6b9d300 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/projects.md @@ -0,0 +1,48 @@ +# 基于 MMDetection 的项目 + +有许多开源项目都是基于 MMDetection 搭建的,我们在这里列举一部分作为样例,展示如何基于 MMDetection 搭建您自己的项目。 +由于这个页面列举的项目并不完全,我们欢迎社区提交 Pull Request 来更新这个文档。 + +## MMDetection 的拓展项目 + +一些项目拓展了 MMDetection 的边界,如将 MMDetection 拓展支持 3D 检测或者将 MMDetection 用于部署。 +它们展示了 MMDetection 的许多可能性,所以我们在这里也列举一些。 + +- [OTEDetection](https://github.com/opencv/mmdetection): OpenVINO training extensions for object detection. +- [MMDetection3d](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. + +## 研究项目 + +同样有许多研究论文是基于 MMDetection 进行的。许多论文都发表在了顶级的会议或期刊上,或者对社区产生了深远的影响。 +为了向社区提供一个可以参考的论文列表,帮助大家开发或者比较新的前沿算法,我们在这里也遵循会议的时间顺序列举了一些论文。 +MMDetection 中已经支持的算法不在此列。 + +- Involution: Inverting the Inherence of Convolution for Visual Recognition, CVPR21. [\[paper\]](https://arxiv.org/abs/2103.06255)[\[github\]](https://github.com/d-li14/involution) +- Multiple Instance Active Learning for Object Detection, CVPR 2021. [\[paper\]](https://openaccess.thecvf.com/content/CVPR2021/papers/Yuan_Multiple_Instance_Active_Learning_for_Object_Detection_CVPR_2021_paper.pdf)[\[github\]](https://github.com/yuantn/MI-AOD) +- Adaptive Class Suppression Loss for Long-Tail Object Detection, CVPR 2021. [\[paper\]](https://arxiv.org/abs/2104.00885)[\[github\]](https://github.com/CASIA-IVA-Lab/ACSL) +- Generalizable Pedestrian Detection: The Elephant In The Room, CVPR2021. [\[paper\]](https://arxiv.org/abs/2003.08799)[\[github\]](https://github.com/hasanirtiza/Pedestron) +- Group Fisher Pruning for Practical Network Compression, ICML2021. [\[paper\]](https://github.com/jshilong/FisherPruning/blob/main/resources/paper.pdf)[\[github\]](https://github.com/jshilong/FisherPruning) +- Overcoming Classifier Imbalance for Long-tail Object Detection with Balanced Group Softmax, CVPR2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/papers/Li_Overcoming_Classifier_Imbalance_for_Long-Tail_Object_Detection_With_Balanced_Group_CVPR_2020_paper.pdf)[\[github\]](https://github.com/FishYuLi/BalancedGroupSoftmax) +- Coherent Reconstruction of Multiple Humans from a Single Image, CVPR2020. [\[paper\]](https://jiangwenpl.github.io/multiperson/)[\[github\]](https://github.com/JiangWenPL/multiperson) +- Look-into-Object: Self-supervised Structure Modeling for Object Recognition, CVPR 2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/papers/Zhou_Look-Into-Object_Self-Supervised_Structure_Modeling_for_Object_Recognition_CVPR_2020_paper.pdf)[\[github\]](https://github.com/JDAI-CV/LIO) +- Video Panoptic Segmentation, CVPR2020. [\[paper\]](https://arxiv.org/abs/2006.11339)[\[github\]](https://github.com/mcahny/vps) +- D2Det: Towards High Quality Object Detection and Instance Segmentation, CVPR2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/html/Cao_D2Det_Towards_High_Quality_Object_Detection_and_Instance_Segmentation_CVPR_2020_paper.html)[\[github\]](https://github.com/JialeCao001/D2Det) +- CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection, CVPR2020. [\[paper\]](https://arxiv.org/abs/2003.09119)[\[github\]](https://github.com/KiveeDong/CentripetalNet) +- Learning a Unified Sample Weighting Network for Object Detection, CVPR 2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/html/Cai_Learning_a_Unified_Sample_Weighting_Network_for_Object_Detection_CVPR_2020_paper.html)[\[github\]](https://github.com/caiqi/sample-weighting-network) +- Scale-equalizing Pyramid Convolution for Object Detection, CVPR2020. [\[paper\]](https://arxiv.org/abs/2005.03101) [\[github\]](https://github.com/jshilong/SEPC) +- Revisiting the Sibling Head in Object Detector, CVPR2020. [\[paper\]](https://arxiv.org/abs/2003.07540)[\[github\]](https://github.com/Sense-X/TSD) +- PolarMask: Single Shot Instance Segmentation with Polar Representation, CVPR2020. [\[paper\]](https://arxiv.org/abs/1909.13226)[\[github\]](https://github.com/xieenze/PolarMask) +- Hit-Detector: Hierarchical Trinity Architecture Search for Object Detection, CVPR2020. [\[paper\]](https://arxiv.org/abs/2003.11818)[\[github\]](https://github.com/ggjy/HitDet.pytorch) +- ZeroQ: A Novel Zero Shot Quantization Framework, CVPR2020. [\[paper\]](https://arxiv.org/abs/2001.00281)[\[github\]](https://github.com/amirgholami/ZeroQ) +- CBNet: A Novel Composite Backbone Network Architecture for Object Detection, AAAI2020. [\[paper\]](https://aaai.org/Papers/AAAI/2020GB/AAAI-LiuY.1833.pdf)[\[github\]](https://github.com/VDIGPKU/CBNet) +- RDSNet: A New Deep Architecture for Reciprocal Object Detection and Instance Segmentation, AAAI2020. [\[paper\]](https://arxiv.org/abs/1912.05070)[\[github\]](https://github.com/wangsr126/RDSNet) +- Training-Time-Friendly Network for Real-Time Object Detection, AAAI2020. [\[paper\]](https://arxiv.org/abs/1909.00700)[\[github\]](https://github.com/ZJULearning/ttfnet) +- Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution, NeurIPS 2019. [\[paper\]](https://arxiv.org/abs/1909.06720)[\[github\]](https://github.com/thangvubk/Cascade-RPN) +- Reasoning R-CNN: Unifying Adaptive Global Reasoning into Large-scale Object Detection, CVPR2019. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2019/papers/Xu_Reasoning-RCNN_Unifying_Adaptive_Global_Reasoning_Into_Large-Scale_Object_Detection_CVPR_2019_paper.pdf)[\[github\]](https://github.com/chanyn/Reasoning-RCNN) +- Learning RoI Transformer for Oriented Object Detection in Aerial Images, CVPR2019. [\[paper\]](https://arxiv.org/abs/1812.00155)[\[github\]](https://github.com/dingjiansw101/AerialDetection) +- SOLO: Segmenting Objects by Locations. [\[paper\]](https://arxiv.org/abs/1912.04488)[\[github\]](https://github.com/WXinlong/SOLO) +- SOLOv2: Dynamic, Faster and Stronger. [\[paper\]](https://arxiv.org/abs/2003.10152)[\[github\]](https://github.com/WXinlong/SOLO) +- Dense Peppoints: Representing Visual Objects with Dense Point Sets. [\[paper\]](https://arxiv.org/abs/1912.11473)[\[github\]](https://github.com/justimyhxu/Dense-RepPoints) +- IterDet: Iterative Scheme for Object Detection in Crowded Environments. [\[paper\]](https://arxiv.org/abs/2005.05708)[\[github\]](https://github.com/saic-vul/iterdet) +- Cross-Iteration Batch Normalization. [\[paper\]](https://arxiv.org/abs/2002.05712)[\[github\]](https://github.com/Howal/Cross-iterationBatchNorm) +- A Ranking-based, Balanced Loss Function Unifying Classification and Localisation in Object Detection, NeurIPS2020 [\[paper\]](https://arxiv.org/abs/2009.13592)[\[github\]](https://github.com/kemaloksuz/aLRPLoss) diff --git a/downstream/mmdetection/docs/zh_cn/robustness_benchmarking.md b/downstream/mmdetection/docs/zh_cn/robustness_benchmarking.md new file mode 100644 index 0000000..28a6759 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/robustness_benchmarking.md @@ -0,0 +1,109 @@ +# 检测器鲁棒性检查 + +## 介绍 + +我们提供了在 [Benchmarking Robustness in Object Detection: Autonomous Driving when Winter is Coming](https://arxiv.org/abs/1907.07484) 中定义的「图像损坏基准测试」上测试目标检测和实例分割模型的工具。 +此页面提供了如何使用该基准测试的基本教程。 + +```latex +@article{michaelis2019winter, + title={Benchmarking Robustness in Object Detection: + Autonomous Driving when Winter is Coming}, + author={Michaelis, Claudio and Mitzkus, Benjamin and + Geirhos, Robert and Rusak, Evgenia and + Bringmann, Oliver and Ecker, Alexander S. and + Bethge, Matthias and Brendel, Wieland}, + journal={arXiv:1907.07484}, + year={2019} +} +``` + +![image corruption example](../resources/corruptions_sev_3.png) + +## 关于基准测试 + +要将结果提交到基准测试,请访问[基准测试主页](https://github.com/bethgelab/robust-detection-benchmark) + +基准测试是仿照 [imagenet-c 基准测试](https://github.com/hendrycks/robustness),由 Dan Hendrycks 和 Thomas Dietterich 在[Benchmarking Neural Network Robustness to Common Corruptions and Perturbations](https://arxiv.org/abs/1903.12261)(ICLR 2019)中发表。 + +图像损坏变换功能包含在此库中,但可以使用以下方法单独安装: + +```shell +pip install imagecorruptions +``` + +与 imagenet-c 相比,我们必须进行一些更改以处理任意大小的图像和灰度图像。 +我们还修改了“运动模糊”和“雪”损坏,以解除对于 linux 特定库的依赖, +否则必须单独安装这些库。有关详细信息,请参阅 [imagecorruptions](https://github.com/bethgelab/imagecorruptions)。 + +## 使用预训练模型进行推理 + +我们提供了一个测试脚本来评估模型在基准测试中提供的各种损坏变换组合下的性能。 + +### 在数据集上测试 + +- [x] 单张 GPU 测试 +- [ ] 多张 GPU 测试 +- [ ] 可视化检测结果 + +您可以使用以下命令在基准测试中使用 15 种损坏变换来测试模型性能。 + +```shell +# single-gpu testing +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] +``` + +也可以选择其它不同类型的损坏变换。 + +```shell +# noise +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions noise + +# blur +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions blur + +# wetaher +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions weather + +# digital +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions digital +``` + +或者使用一组自定义的损坏变换,例如: + +```shell +# gaussian noise, zoom blur and snow +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions gaussian_noise zoom_blur snow +``` + +最后,我们也可以选择施加在图像上的损坏变换的严重程度。 +严重程度从 1 到 5 逐级增强,0 表示不对图像施加损坏变换,即原始图像数据。 + +```shell +# severity 1 +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --severities 1 + +# severities 0,2,4 +python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --severities 0 2 4 +``` + +## 模型测试结果 + +下表是各模型在 COCO 2017val 上的测试结果。 + +| Model | Backbone | Style | Lr schd | box AP clean | box AP corr. | box % | mask AP clean | mask AP corr. | mask % | +| :-----------------: | :-----------------: | :-----: | :-----: | :----------: | :----------: | :---: | :-----------: | :-----------: | :----: | +| Faster R-CNN | R-50-FPN | pytorch | 1x | 36.3 | 18.2 | 50.2 | - | - | - | +| Faster R-CNN | R-101-FPN | pytorch | 1x | 38.5 | 20.9 | 54.2 | - | - | - | +| Faster R-CNN | X-101-32x4d-FPN | pytorch | 1x | 40.1 | 22.3 | 55.5 | - | - | - | +| Faster R-CNN | X-101-64x4d-FPN | pytorch | 1x | 41.3 | 23.4 | 56.6 | - | - | - | +| Faster R-CNN | R-50-FPN-DCN | pytorch | 1x | 40.0 | 22.4 | 56.1 | - | - | - | +| Faster R-CNN | X-101-32x4d-FPN-DCN | pytorch | 1x | 43.4 | 26.7 | 61.6 | - | - | - | +| Mask R-CNN | R-50-FPN | pytorch | 1x | 37.3 | 18.7 | 50.1 | 34.2 | 16.8 | 49.1 | +| Mask R-CNN | R-50-FPN-DCN | pytorch | 1x | 41.1 | 23.3 | 56.7 | 37.2 | 20.7 | 55.7 | +| Cascade R-CNN | R-50-FPN | pytorch | 1x | 40.4 | 20.1 | 49.7 | - | - | - | +| Cascade Mask R-CNN | R-50-FPN | pytorch | 1x | 41.2 | 20.7 | 50.2 | 35.7 | 17.6 | 49.3 | +| RetinaNet | R-50-FPN | pytorch | 1x | 35.6 | 17.8 | 50.1 | - | - | - | +| Hybrid Task Cascade | X-101-64x4d-FPN-DCN | pytorch | 1x | 50.6 | 32.7 | 64.7 | 43.8 | 28.1 | 64.0 | + +由于对图像的损坏变换存在随机性,测试结果可能略有不同。 diff --git a/downstream/mmdetection/docs/zh_cn/stat.py b/downstream/mmdetection/docs/zh_cn/stat.py new file mode 100755 index 0000000..9625c62 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/stat.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +import functools as func +import glob +import os.path as osp +import re + +import numpy as np + +url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/' + +files = sorted(glob.glob('../configs/*/README.md')) + +stats = [] +titles = [] +num_ckpts = 0 + +for f in files: + url = osp.dirname(f.replace('../', url_prefix)) + + with open(f, 'r') as content_file: + content = content_file.read() + + title = content.split('\n')[0].replace('# ', '').strip() + ckpts = set(x.lower().strip() + for x in re.findall(r'\[model\]\((https?.*)\)', content)) + + if len(ckpts) == 0: + continue + + _papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)] + assert len(_papertype) > 0 + papertype = _papertype[0] + + paper = set([(papertype, title)]) + + titles.append(title) + num_ckpts += len(ckpts) + + statsmsg = f""" +\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts) +""" + stats.append((paper, ckpts, statsmsg)) + +allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats]) +msglist = '\n'.join(x for _, _, x in stats) + +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +# Model Zoo Statistics + +* Number of papers: {len(set(titles))} +{countstr} + +* Number of checkpoints: {num_ckpts} + +{msglist} +""" + +with open('modelzoo_statistics.md', 'w') as f: + f.write(modelzoo) diff --git a/downstream/mmdetection/docs/zh_cn/switch_language.md b/downstream/mmdetection/docs/zh_cn/switch_language.md new file mode 100644 index 0000000..b2c4ad9 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/switch_language.md @@ -0,0 +1,3 @@ +## English + +## 简体中文 diff --git a/downstream/mmdetection/docs/zh_cn/tutorials/config.md b/downstream/mmdetection/docs/zh_cn/tutorials/config.md new file mode 100644 index 0000000..42b098f --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/tutorials/config.md @@ -0,0 +1,522 @@ +# 教程 1: 学习配置文件 + +我们在配置文件中支持了继承和模块化,这便于进行各种实验。如果需要检查配置文件,可以通过运行 `python tools/misc/print_config.py /PATH/TO/CONFIG` 来查看完整的配置。 + +## 通过脚本参数修改配置 + +当运行 `tools/train.py` 和 `tools/test.py` 时,可以通过 `--cfg-options` 来修改配置文件。 + +- 更新字典链中的配置 + + 可以按照原始配置文件中的 dict 键顺序地指定配置预选项。例如,使用 `--cfg-options model.backbone.norm_eval=False` 将模型主干网络中的所有 BN 模块都改为 `train` 模式。 + +- 更新配置列表中的键 + + 在配置文件里,一些字典型的配置被包含在列表中。例如,数据训练流程 `data.train.pipeline` 通常是一个列表,比如 `[dict(type='LoadImageFromFile'), ...]`。如果需要将 `'LoadImageFromFile'` 改成 `'LoadImageFromWebcam'`,需要写成下述形式: `--cfg-options data.train.pipeline.0.type=LoadImageFromWebcam`。 + +- 更新列表或元组的值 + + 如果要更新的值是列表或元组。例如,配置文件通常设置 `workflow=[('train', 1)]`,如果需要改变这个键,可以通过 `--cfg-options workflow="[(train,1),(val,1)]"` 来重新设置。需要注意,引号 " 是支持列表或元组数据类型所必需的,并且在指定值的引号内**不允许**有空格。 + +## 配置文件结构 + +在 `config/_base_` 文件夹下有 4 个基本组件类型,分别是:数据集(dataset),模型(model),训练策略(schedule)和运行时的默认设置(default runtime)。许多方法,例如 Faster R-CNN、Mask R-CNN、Cascade R-CNN、RPN、SSD 能够很容易地构建出来。由 `_base_` 下的组件组成的配置,被我们称为 _原始配置(primitive)_。 + +对于同一文件夹下的所有配置,推荐**只有一个**对应的**原始配置**文件。所有其他的配置文件都应该继承自这个**原始配置**文件。这样就能保证配置文件的最大继承深度为 3。 + +为了便于理解,我们建议贡献者继承现有方法。例如,如果在 Faster R-CNN 的基础上做了一些修改,用户首先可以通过指定 `_base_ = ../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py` 来继承基础的 Faster R-CNN 结构,然后修改配置文件中的必要参数以完成继承。 + +如果你在构建一个与任何现有方法不共享结构的全新方法,那么可以在 `configs` 文件夹下创建一个新的例如 `xxx_rcnn` 文件夹。更多细节请参考 [MMCV](https://mmcv.readthedocs.io/en/latest/understand_mmcv/config.html) 文档。 + +## 配置文件名称风格 + +我们遵循以下样式来命名配置文件。建议贡献者遵循相同的风格。 + +``` +{model}_[model setting]_{backbone}_{neck}_[norm setting]_[misc]_[gpu x batch_per_gpu]_{schedule}_{dataset} +``` + +`{xxx}` 是被要求的文件 `[yyy]` 是可选的。 + +- `{model}`: 模型种类,例如 `faster_rcnn`, `mask_rcnn` 等。 +- `[model setting]`: 特定的模型,例如 `htc` 中的`without_semantic`, `reppoints` 中的 `moment` 等。 +- `{backbone}`: 主干网络种类例如 `r50` (ResNet-50), `x101` (ResNeXt-101) 等。 +- `{neck}`: Neck 模型的种类包括 `fpn`, `pafpn`, `nasfpn`, `c4 ` 等。 +- `[norm_setting]`: 默认使用 `bn` (Batch Normalization),其他指定可以有 `gn` (Group Normalization), `syncbn` (Synchronized Batch Normalization) 等。 + `gn-head`/`gn-neck` 表示 GN 仅应用于网络的 Head 或 Neck, `gn-all` 表示 GN 用于整个模型, 例如主干网络、Neck 和 Head。 +- `[misc]`: 模型中各式各样的设置/插件,例如 `dconv`、 `gcb`、 `attention`、`albu`、 `mstrain` 等。 +- `[gpu x batch_per_gpu]`:GPU 数量和每个 GPU 的样本数,默认使用 `8x2`。 +- `{schedule}`: 训练方案,选项是 `1x`、 `2x`、 `20e` 等。`1x` 和 `2x` 分别代表 12 epoch 和 24 epoch,`20e` 在级联模型中使用,表示 20 epoch。对于 `1x`/`2x`,初始学习率在第 8/16 和第 11/22 epoch 衰减 10 倍;对于 `20e` ,初始学习率在第 16 和第 19 epoch 衰减 10 倍。 +- `{dataset}`:数据集,例如 `coco`、 `cityscapes`、 `voc_0712`、 `wider_face` 等。 + +## 弃用的 train_cfg/test_cfg + +`train_cfg` 和 `test_cfg` 在配置文件中已弃用,请在模型配置中指定它们。原始配置结构如下: + +```python +# 已经弃用的形式 +model = dict( + type=..., + ... +) +train_cfg=dict(...) +test_cfg=dict(...) +``` + +推荐的配置结构如下: + +```python +# 推荐的形式 +model = dict( + type=..., + ... + train_cfg=dict(...), + test_cfg=dict(...), +) +``` + +## Mask R-CNN 配置文件示例 + +为了帮助用户对 MMDetection 检测系统中的完整配置和模块有一个基本的了解,我们对使用 ResNet50 和 FPN 的 Mask R-CNN 的配置文件进行简要注释说明。更详细的用法和各个模块对应的替代方案,请参考 API 文档。 + +```python +model = dict( + type='MaskRCNN', # 检测器(detector)名称 + backbone=dict( # 主干网络的配置文件 + type='ResNet', # 主干网络的类别,可用选项请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py#L308 + depth=50, # 主干网络的深度,对于 ResNet 和 ResNext 通常设置为 50 或 101。 + num_stages=4, # 主干网络状态(stages)的数目,这些状态产生的特征图作为后续的 head 的输入。 + out_indices=(0, 1, 2, 3), # 每个状态产生的特征图输出的索引。 + frozen_stages=1, # 第一个状态的权重被冻结 + norm_cfg=dict( # 归一化层(norm layer)的配置项。 + type='BN', # 归一化层的类别,通常是 BN 或 GN。 + requires_grad=True), # 是否训练归一化里的 gamma 和 beta。 + norm_eval=True, # 是否冻结 BN 里的统计项。 + style='pytorch', # 主干网络的风格,'pytorch' 意思是步长为2的层为 3x3 卷积, 'caffe' 意思是步长为2的层为 1x1 卷积。 + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), # 加载通过 ImageNet 预训练的模型 + neck=dict( + type='FPN', # 检测器的 neck 是 FPN,我们同样支持 'NASFPN', 'PAFPN' 等,更多细节可以参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/fpn.py#L10。 + in_channels=[256, 512, 1024, 2048], # 输入通道数,这与主干网络的输出通道一致 + out_channels=256, # 金字塔特征图每一层的输出通道 + num_outs=5), # 输出的范围(scales) + rpn_head=dict( + type='RPNHead', # RPN_head 的类型是 'RPNHead', 我们也支持 'GARPNHead' 等,更多细节可以参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/rpn_head.py#L12。 + in_channels=256, # 每个输入特征图的输入通道,这与 neck 的输出通道一致。 + feat_channels=256, # head 卷积层的特征通道。 + anchor_generator=dict( # 锚点(Anchor)生成器的配置。 + type='AnchorGenerator', # 大多是方法使用 AnchorGenerator 作为锚点生成器, SSD 检测器使用 `SSDAnchorGenerator`。更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/anchor/anchor_generator.py#L10。 + scales=[8], # 锚点的基本比例,特征图某一位置的锚点面积为 scale * base_sizes + ratios=[0.5, 1.0, 2.0], # 高度和宽度之间的比率。 + strides=[4, 8, 16, 32, 64]), # 锚生成器的步幅。这与 FPN 特征步幅一致。 如果未设置 base_sizes,则当前步幅值将被视为 base_sizes。 + bbox_coder=dict( # 在训练和测试期间对框进行编码和解码。 + type='DeltaXYWHBBoxCoder', # 框编码器的类别,'DeltaXYWHBBoxCoder' 是最常用的,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py#L9。 + target_means=[0.0, 0.0, 0.0, 0.0], # 用于编码和解码框的目标均值 + target_stds=[1.0, 1.0, 1.0, 1.0]), # 用于编码和解码框的标准差 + loss_cls=dict( # 分类分支的损失函数配置 + type='CrossEntropyLoss', # 分类分支的损失类型,我们也支持 FocalLoss 等。 + use_sigmoid=True, # RPN通常进行二分类,所以通常使用sigmoid函数。 + los_weight=1.0), # 分类分支的损失权重。 + loss_bbox=dict( # 回归分支的损失函数配置。 + type='L1Loss', # 损失类型,我们还支持许多 IoU Losses 和 Smooth L1-loss 等,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/smooth_l1_loss.py#L56。 + loss_weight=1.0)), # 回归分支的损失权重。 + roi_head=dict( # RoIHead 封装了两步(two-stage)/级联(cascade)检测器的第二步。 + type='StandardRoIHead', # RoI head 的类型,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/standard_roi_head.py#L10。 + bbox_roi_extractor=dict( # 用于 bbox 回归的 RoI 特征提取器。 + type='SingleRoIExtractor', # RoI 特征提取器的类型,大多数方法使用 SingleRoIExtractor,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/roi_extractors/single_level.py#L10。 + roi_layer=dict( # RoI 层的配置 + type='RoIAlign', # RoI 层的类别, 也支持 DeformRoIPoolingPack 和 ModulatedDeformRoIPoolingPack,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/roi_align/roi_align.py#L79。 + output_size=7, # 特征图的输出大小。 + sampling_ratio=0), # 提取 RoI 特征时的采样率。0 表示自适应比率。 + out_channels=256, # 提取特征的输出通道。 + featmap_strides=[4, 8, 16, 32]), # 多尺度特征图的步幅,应该与主干的架构保持一致。 + bbox_head=dict( # RoIHead 中 box head 的配置. + type='Shared2FCBBoxHead', # bbox head 的类别,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py#L177。 + in_channels=256, # bbox head 的输入通道。 这与 roi_extractor 中的 out_channels 一致。 + fc_out_channels=1024, # FC 层的输出特征通道。 + roi_feat_size=7, # 候选区域(Region of Interest)特征的大小。 + num_classes=80, # 分类的类别数量。 + bbox_coder=dict( # 第二阶段使用的框编码器。 + type='DeltaXYWHBBoxCoder', # 框编码器的类别,大多数情况使用 'DeltaXYWHBBoxCoder'。 + target_means=[0.0, 0.0, 0.0, 0.0], # 用于编码和解码框的均值 + target_stds=[0.1, 0.1, 0.2, 0.2]), # 编码和解码的标准差。因为框更准确,所以值更小,常规设置时 [0.1, 0.1, 0.2, 0.2]。 + reg_class_agnostic=False, # 回归是否与类别无关。 + loss_cls=dict( # 分类分支的损失函数配置 + type='CrossEntropyLoss', # 分类分支的损失类型,我们也支持 FocalLoss 等。 + use_sigmoid=False, # 是否使用 sigmoid。 + loss_weight=1.0), # 分类分支的损失权重。 + loss_bbox=dict( # 回归分支的损失函数配置。 + type='L1Loss', # 损失类型,我们还支持许多 IoU Losses 和 Smooth L1-loss 等。 + loss_weight=1.0)), # 回归分支的损失权重。 + mask_roi_extractor=dict( # 用于 mask 生成的 RoI 特征提取器。 + type='SingleRoIExtractor', # RoI 特征提取器的类型,大多数方法使用 SingleRoIExtractor。 + roi_layer=dict( # 提取实例分割特征的 RoI 层配置 + type='RoIAlign', # RoI 层的类型,也支持 DeformRoIPoolingPack 和 ModulatedDeformRoIPoolingPack。 + output_size=14, # 特征图的输出大小。 + sampling_ratio=0), # 提取 RoI 特征时的采样率。 + out_channels=256, # 提取特征的输出通道。 + featmap_strides=[4, 8, 16, 32]), # 多尺度特征图的步幅。 + mask_head=dict( # mask 预测 head 模型 + type='FCNMaskHead', # mask head 的类型,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py#L21。 + num_convs=4, # mask head 中的卷积层数 + in_channels=256, # 输入通道,应与 mask roi extractor 的输出通道一致。 + conv_out_channels=256, # 卷积层的输出通道。 + num_classes=80, # 要分割的类别数。 + loss_mask=dict( # mask 分支的损失函数配置。 + type='CrossEntropyLoss', # 用于分割的损失类型。 + use_mask=True, # 是否只在正确的类中训练 mask。 + loss_weight=1.0)))) # mask 分支的损失权重. + train_cfg = dict( # rpn 和 rcnn 训练超参数的配置 + rpn=dict( # rpn 的训练配置 + assigner=dict( # 分配器(assigner)的配置 + type='MaxIoUAssigner', # 分配器的类型,MaxIoUAssigner 用于许多常见的检测器,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10。 + pos_iou_thr=0.7, # IoU >= 0.7(阈值) 被视为正样本。 + neg_iou_thr=0.3, # IoU < 0.3(阈值) 被视为负样本。 + min_pos_iou=0.3, # 将框作为正样本的最小 IoU 阈值。 + match_low_quality=True, # 是否匹配低质量的框(更多细节见 API 文档). + ignore_iof_thr=-1), # 忽略 bbox 的 IoF 阈值。 + sampler=dict( # 正/负采样器(sampler)的配置 + type='RandomSampler', # 采样器类型,还支持 PseudoSampler 和其他采样器,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8。 + num=256, # 样本数量。 + pos_fraction=0.5, # 正样本占总样本的比例。 + neg_pos_ub=-1, # 基于正样本数量的负样本上限。 + add_gt_as_proposals=False), # 采样后是否添加 GT 作为 proposal。 + allowed_border=-1, # 填充有效锚点后允许的边框。 + pos_weight=-1, # 训练期间正样本的权重。 + debug=False), # 是否设置调试(debug)模式 + rpn_proposal=dict( # 在训练期间生成 proposals 的配置 + nms_across_levels=False, # 是否对跨层的 box 做 NMS。仅适用于 `GARPNHead` ,naive rpn 不支持 nms cross levels。 + nms_pre=2000, # NMS 前的 box 数 + nms_post=1000, # NMS 要保留的 box 的数量,只在 GARPNHHead 中起作用。 + max_per_img=1000, # NMS 后要保留的 box 数量。 + nms=dict( # NMS 的配置 + type='nms', # NMS 的类别 + iou_threshold=0.7 # NMS 的阈值 + ), + min_bbox_size=0), # 允许的最小 box 尺寸 + rcnn=dict( # roi head 的配置。 + assigner=dict( # 第二阶段分配器的配置,这与 rpn 中的不同 + type='MaxIoUAssigner', # 分配器的类型,MaxIoUAssigner 目前用于所有 roi_heads。更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10。 + pos_iou_thr=0.5, # IoU >= 0.5(阈值)被认为是正样本。 + neg_iou_thr=0.5, # IoU < 0.5(阈值)被认为是负样本。 + min_pos_iou=0.5, # 将 box 作为正样本的最小 IoU 阈值 + match_low_quality=False, # 是否匹配低质量下的 box(有关更多详细信息,请参阅 API 文档)。 + ignore_iof_thr=-1), # 忽略 bbox 的 IoF 阈值 + sampler=dict( + type='RandomSampler', #采样器的类型,还支持 PseudoSampler 和其他采样器,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8。 + num=512, # 样本数量 + pos_fraction=0.25, # 正样本占总样本的比例。. + neg_pos_ub=-1, # 基于正样本数量的负样本上限。. + add_gt_as_proposals=True + ), # 采样后是否添加 GT 作为 proposal。 + mask_size=28, # mask 的大小 + pos_weight=-1, # 训练期间正样本的权重。 + debug=False)) # 是否设置调试模式。 + test_cfg = dict( # 用于测试 rpn 和 rcnn 超参数的配置 + rpn=dict( # 测试阶段生成 proposals 的配置 + nms_across_levels=False, # 是否对跨层的 box 做 NMS。仅适用于`GARPNHead`,naive rpn 不支持做 NMS cross levels。 + nms_pre=1000, # NMS 前的 box 数 + nms_post=1000, # NMS 要保留的 box 的数量,只在`GARPNHHead`中起作用。 + max_per_img=1000, # NMS 后要保留的 box 数量 + nms=dict( # NMS 的配置 + type='nms', # NMS 的类型 + iou_threshold=0.7 # NMS 阈值 + ), + min_bbox_size=0), # box 允许的最小尺寸 + rcnn=dict( # roi heads 的配置 + score_thr=0.05, # bbox 的分数阈值 + nms=dict( # 第二步的 NMS 配置 + type='nms', # NMS 的类型 + iou_thr=0.5), # NMS 的阈值 + max_per_img=100, # 每张图像的最大检测次数 + mask_thr_binary=0.5)) # mask 预处的阈值 +dataset_type = 'CocoDataset' # 数据集类型,这将被用来定义数据集。 +data_root = 'data/coco/' # 数据的根路径。 +img_norm_cfg = dict( # 图像归一化配置,用来归一化输入的图像。 + mean=[123.675, 116.28, 103.53], # 预训练里用于预训练主干网络模型的平均值。 + std=[58.395, 57.12, 57.375], # 预训练里用于预训练主干网络模型的标准差。 + to_rgb=True +) # 预训练里用于预训练主干网络的图像的通道顺序。 +train_pipeline = [ # 训练流程 + dict(type='LoadImageFromFile'), # 第 1 个流程,从文件路径里加载图像。 + dict( + type='LoadAnnotations', # 第 2 个流程,对于当前图像,加载它的注释信息。 + with_bbox=True, # 是否使用标注框(bounding box), 目标检测需要设置为 True。 + with_mask=True, # 是否使用 instance mask,实例分割需要设置为 True。 + poly2mask=False), # 是否将 polygon mask 转化为 instance mask, 设置为 False 以加速和节省内存。 + dict( + type='Resize', # 变化图像和其注释大小的数据增广的流程。 + img_scale=(1333, 800), # 图像的最大规模。 + keep_ratio=True + ), # 是否保持图像的长宽比。 + dict( + type='RandomFlip', # 翻转图像和其注释大小的数据增广的流程。 + flip_ratio=0.5), # 翻转图像的概率。 + dict( + type='Normalize', # 归一化当前图像的数据增广的流程。 + mean=[123.675, 116.28, 103.53], # 这些键与 img_norm_cfg 一致,因为 img_norm_cfg 被 + std=[58.395, 57.12, 57.375], # 用作参数。 + to_rgb=True), + dict( + type='Pad', # 填充当前图像到指定大小的数据增广的流程。 + size_divisor=32), # 填充图像可以被当前值整除。 + dict(type='DefaultFormatBundle'), # 流程里收集数据的默认格式捆。 + dict( + type='Collect', # 决定数据中哪些键应该传递给检测器的流程 + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), # 第 1 个流程,从文件路径里加载图像。 + dict( + type='MultiScaleFlipAug', # 封装测试时数据增广(test time augmentations)。 + img_scale=(1333, 800), # 决定测试时可改变图像的最大规模。用于改变图像大小的流程。 + flip=False, # 测试时是否翻转图像。 + transforms=[ + dict(type='Resize', # 使用改变图像大小的数据增广。 + keep_ratio=True), # 是否保持宽和高的比例,这里的图像比例设置将覆盖上面的图像规模大小的设置。 + dict(type='RandomFlip'), # 考虑到 RandomFlip 已经被添加到流程里,当 flip=False 时它将不被使用。 + dict( + type='Normalize', # 归一化配置项,值来自 img_norm_cfg。 + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict( + type='Pad', # 将配置传递给可被 32 整除的图像。 + size_divisor=32), + dict( + type='ImageToTensor', # 将图像转为张量 + keys=['img']), + dict( + type='Collect', # 收集测试时必须的键的收集流程。 + keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, # 单个 GPU 的 Batch size + workers_per_gpu=2, # 单个 GPU 分配的数据加载线程数 + train=dict( # 训练数据集配置 + type='CocoDataset', # 数据集的类别, 更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/coco.py#L19。 + ann_file='data/coco/annotations/instances_train2017.json', # 注释文件路径 + img_prefix='data/coco/train2017/', # 图片路径前缀 + pipeline=[ # 流程, 这是由之前创建的 train_pipeline 传递的。 + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + poly2mask=False), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) + ]), + val=dict( # 验证数据集的配置 + type='CocoDataset', + ann_file='data/coco/annotations/instances_val2017.json', + img_prefix='data/coco/val2017/', + pipeline=[ # 由之前创建的 test_pipeline 传递的流程。 + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ]), + test=dict( # 测试数据集配置,修改测试开发/测试(test-dev/test)提交的 ann_file + type='CocoDataset', + ann_file='data/coco/annotations/instances_val2017.json', + img_prefix='data/coco/val2017/', + pipeline=[ # 由之前创建的 test_pipeline 传递的流程。 + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + samples_per_gpu=2 # 单个 GPU 测试时的 Batch size + )) +evaluation = dict( # evaluation hook 的配置,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/evaluation/eval_hooks.py#L7。 + interval=1, # 验证的间隔。 + metric=['bbox', 'segm']) # 验证期间使用的指标。 +optimizer = dict( # 用于构建优化器的配置文件。支持 PyTorch 中的所有优化器,同时它们的参数与 PyTorch 里的优化器参数一致。 + type='SGD', # 优化器种类,更多细节可参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/optimizer/default_constructor.py#L13。 + lr=0.02, # 优化器的学习率,参数的使用细节请参照对应的 PyTorch 文档。 + momentum=0.9, # 动量(Momentum) + weight_decay=0.0001) # SGD 的衰减权重(weight decay)。 +optimizer_config = dict( # optimizer hook 的配置文件,执行细节请参考 https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8。 + grad_clip=None) # 大多数方法不使用梯度限制(grad_clip)。 +lr_config = dict( # 学习率调整配置,用于注册 LrUpdater hook。 + policy='step', # 调度流程(scheduler)的策略,也支持 CosineAnnealing, Cyclic, 等。请从 https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9 参考 LrUpdater 的细节。 + warmup='linear', # 预热(warmup)策略,也支持 `exp` 和 `constant`。 + warmup_iters=500, # 预热的迭代次数 + warmup_ratio= + 0.001, # 用于热身的起始学习率的比率 + step=[8, 11]) # 衰减学习率的起止回合数 +runner = dict( + type='EpochBasedRunner', # 将使用的 runner 的类别 (例如 IterBasedRunner 或 EpochBasedRunner)。 + max_epochs=12) # runner 总回合数, 对于 IterBasedRunner 使用 `max_iters` +checkpoint_config = dict( # Checkpoint hook 的配置文件。执行时请参考 https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py。 + interval=1) # 保存的间隔是 1。 +log_config = dict( # register logger hook 的配置文件。 + interval=50, # 打印日志的间隔 + hooks=[ + # dict(type='TensorboardLoggerHook') # 同样支持 Tensorboard 日志 + dict(type='TextLoggerHook') + ]) # 用于记录训练过程的记录器(logger)。 +dist_params = dict(backend='nccl') # 用于设置分布式训练的参数,端口也同样可被设置。 +log_level = 'INFO' # 日志的级别。 +load_from = None # 从一个给定路径里加载模型作为预训练模型,它并不会消耗训练时间。 +resume_from = None # 从给定路径里恢复检查点(checkpoints),训练模式将从检查点保存的轮次开始恢复训练。 +workflow = [('train', 1)] # runner 的工作流程,[('train', 1)] 表示只有一个工作流且工作流仅执行一次。根据 total_epochs 工作流训练 12个回合。 +work_dir = 'work_dir' # 用于保存当前实验的模型检查点和日志的目录。 +``` + +## 常问问题 (FAQ) + +### 忽略基础配置文件里的部分内容 + +有时,您也许会设置 `_delete_=True` 去忽略基础配置文件里的一些域内容。 您也许可以参照 [mmcv](https://mmcv.readthedocs.io/en/latest/understand_mmcv/config.html#inherit-from-base-config-with-ignored-fields) 来获得一些简单的指导。 + +在 MMDetection里,例如为了改变 Mask R-CNN 的主干网络的某些内容: + +```python +model = dict( + type='MaskRCNN', + pretrained='torchvision://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + neck=dict(...), + rpn_head=dict(...), + roi_head=dict(...)) +``` + +基础配置的 `Mask R-CNN` 使用 `ResNet-50`,在需要将主干网络改成 `HRNet` 的时候,因为 `HRNet` 和 `ResNet` 中有不同的字段,需要使用 `_delete_=True` 将新的键去替换 `backbone` 域内所有老的键。 + +```python +_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w32', + backbone=dict( + _delete_=True, + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256)))), + neck=dict(...)) +``` + +### 使用配置文件里的中间变量 + +配置文件里会使用一些中间变量,例如数据集里的 `train_pipeline`/`test_pipeline`。我们在定义新的 `train_pipeline`/`test_pipeline` 之后,需要将它们传递到 `data` 里。例如,我们想在训练或测试时,改变 Mask R-CNN 的多尺度策略 (multi scale strategy),`train_pipeline`/`test_pipeline` 是我们想要修改的中间变量。 + +```python +_base_ = './mask_rcnn_r50_fpn_1x_coco.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + multiscale_mode="value", + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +``` + +我们首先定义新的 `train_pipeline`/`test_pipeline` 然后传递到 `data` 里。 + +同样的,如果我们想从 `SyncBN` 切换到 `BN` 或者 `MMSyncBN`,我们需要修改配置文件里的每一个 `norm_cfg`。 + +```python +_base_ = './mask_rcnn_r50_fpn_1x_coco.py' +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + backbone=dict(norm_cfg=norm_cfg), + neck=dict(norm_cfg=norm_cfg), + ...) +``` diff --git a/downstream/mmdetection/docs/zh_cn/tutorials/customize_dataset.md b/downstream/mmdetection/docs/zh_cn/tutorials/customize_dataset.md new file mode 100644 index 0000000..8468e40 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/tutorials/customize_dataset.md @@ -0,0 +1,456 @@ +# 教程 2: 自定义数据集 + +## 支持新的数据格式 + +为了支持新的数据格式,可以选择将数据转换成现成的格式(COCO 或者 PASCAL)或将其转换成中间格式。当然也可以选择以离线的形式(在训练之前使用脚本转换)或者在线的形式(实现一个新的 dataset 在训练中进行转换)来转换数据。 + +在 MMDetection 中,建议将数据转换成 COCO 格式并以离线的方式进行,因此在完成数据转换后只需修改配置文件中的标注数据的路径和类别即可。 + +### 将新的数据格式转换为现有的数据格式 + +最简单的方法就是将你的数据集转换成现有的数据格式(COCO 或者 PASCAL VOC) + +COCO 格式的 json 标注文件有如下必要的字段: + +```python +'images': [ + { + 'file_name': 'COCO_val2014_000000001268.jpg', + 'height': 427, + 'width': 640, + 'id': 1268 + }, + ... +], + +'annotations': [ + { + 'segmentation': [[192.81, + 247.09, + ... + 219.03, + 249.06]], # 如果有 mask 标签 + 'area': 1035.749, + 'iscrowd': 0, + 'image_id': 1268, + 'bbox': [192.81, 224.8, 74.73, 33.43], + 'category_id': 16, + 'id': 42986 + }, + ... +], + +'categories': [ + {'id': 0, 'name': 'car'}, + ] +``` + +在 json 文件中有三个必要的键: + +- `images`: 包含多个图片以及它们的信息的数组,例如 `file_name`、`height`、`width` 和 `id`。 +- `annotations`: 包含多个实例标注信息的数组。 +- `categories`: 包含多个类别名字和 ID 的数组。 + +在数据预处理之后,使用现有的数据格式来训练自定义的新数据集有如下两步(以 COCO 为例): + +1. 为自定义数据集修改配置文件。 +2. 检查自定义数据集的标注。 + +这里我们举一个例子来展示上面的两个步骤,这个例子使用包括 5 个类别的 COCO 格式的数据集来训练一个现有的 Cascade Mask R-CNN R50-FPN 检测器 + +#### 1. 为自定义数据集修改配置文件 + +配置文件的修改涉及两个方面: + +1. `data` 部分。需要在 `data.train`、`data.val` 和 `data.test` 中添加 `classes`。 +2. `model` 部分中的 `num_classes`。需要将默认值(COCO 数据集中为 80)修改为自定义数据集中的类别数。 + +`configs/my_custom_config.py` 内容如下: + +```python + +# 新的配置来自基础的配置以更好地说明需要修改的地方 +_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' + +# 1. 数据集设定 +dataset_type = 'CocoDataset' +classes = ('a', 'b', 'c', 'd', 'e') +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + # 将类别名字添加至 `classes` 字段中 + classes=classes, + ann_file='path/to/your/train/annotation_data', + img_prefix='path/to/your/train/image_data'), + val=dict( + type=dataset_type, + # 将类别名字添加至 `classes` 字段中 + classes=classes, + ann_file='path/to/your/val/annotation_data', + img_prefix='path/to/your/val/image_data'), + test=dict( + type=dataset_type, + # 将类别名字添加至 `classes` 字段中 + classes=classes, + ann_file='path/to/your/test/annotation_data', + img_prefix='path/to/your/test/image_data')) + +# 2. 模型设置 + +# 将所有的 `num_classes` 默认值修改为5(原来为80) +model = dict( + roi_head=dict( + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + # 将所有的 `num_classes` 默认值修改为 5(原来为 80) + num_classes=5), + dict( + type='Shared2FCBBoxHead', + # 将所有的 `num_classes` 默认值修改为 5(原来为 80) + num_classes=5), + dict( + type='Shared2FCBBoxHead', + # 将所有的 `num_classes` 默认值修改为 5(原来为 80) + num_classes=5)], + # 将所有的 `num_classes` 默认值修改为 5(原来为 80) + mask_head=dict(num_classes=5))) +``` + +#### 2. 检查自定义数据集的标注 + +假设你自己的数据集是 COCO 格式,那么需要保证数据的标注没有问题: + +1. 标注文件中 `categories` 的长度要与配置中的 `classes` 元组长度相匹配,它们都表示有几类。(如例子中有 5 个类别) +2. 配置文件中 `classes` 字段应与标注文件里 `categories` 下的 `name` 有相同的元素且顺序一致。MMDetection 会自动将 `categories` 中不连续的 `id` 映射成连续的索引,因此 `categories` 下的 `name`的字符串顺序会影响标签的索引。同时,配置文件中的 `classes` 的字符串顺序也会影响到预测框可视化时的标签。 +3. `annotations` 中的 `category_id` 必须是有效的值。比如所有 `category_id` 的值都应该属于 `categories` 中的 `id`。 + +下面是一个有效标注的例子: + +```python + +'annotations': [ + { + 'segmentation': [[192.81, + 247.09, + ... + 219.03, + 249.06]], #如果有 mask 标签。 + 'area': 1035.749, + 'iscrowd': 0, + 'image_id': 1268, + 'bbox': [192.81, 224.8, 74.73, 33.43], + 'category_id': 16, + 'id': 42986 + }, + ... +], + +# MMDetection 会自动将 `categories` 中不连续的 `id` 映射成连续的索引。 +'categories': [ + {'id': 1, 'name': 'a'}, {'id': 3, 'name': 'b'}, {'id': 4, 'name': 'c'}, {'id': 16, 'name': 'd'}, {'id': 17, 'name': 'e'}, + ] +``` + +我们使用这种方式来支持 CityScapes 数据集。脚本在[cityscapes.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/dataset_converters/cityscapes.py) 并且我们提供了微调的[configs](https://github.com/open-mmlab/mmdetection/blob/master/configs/cityscapes). + +**注意** + +1. 对于实例分割数据集, **MMDetection 目前只支持评估 COCO 格式的 mask AP**. +2. 推荐训练之前进行离线转换,这样就可以继续使用 `CocoDataset` 且只需修改标注文件的路径以及训练的种类。 + +### 调整新的数据格式为中间格式 + +如果不想将标注格式转换为 COCO 或者 PASCAL 格式也是可行的。实际上,我们定义了一种简单的标注格式并且与所有现有的数据格式兼容,也能进行离线或者在线转换。 + +数据集的标注是包含多个字典(dict)的列表,每个字典(dict)都与一张图片对应。测试时需要用到 `filename`(相对路径)、`width` 和 `height` 三个字段;训练时则额外需要 `ann`。`ann` 也是至少包含了两个字段的字典:`bboxes` 和 `labels`,它们都是 numpy array。有些数据集可能会提供如:crowd/difficult/ignored bboxes 标注,那么我们使用 `bboxes_ignore` 以及 `labels_ignore` 来包含它们。 + +下面给出一个例子。 + +```python + +[ + { + 'filename': 'a.jpg', + 'width': 1280, + 'height': 720, + 'ann': { + 'bboxes': (n, 4), + 'labels': (n, ), + 'bboxes_ignore': (k, 4), + 'labels_ignore': (k, ) (可选字段) + } + }, + ... +] +``` + +有两种方法处理自定义数据。 + +- 在线转换(online conversion) + + 可以新写一个继承自 `CustomDataset` 的 Dataset 类,并重写 `load_annotations(self, ann_file)` 以及 `get_ann_info(self, idx)` 这两个方法,正如[CocoDataset](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/coco.py)与[VOCDataset](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/voc.py). + +- 离线转换(offline conversion) + + 可以将标注格式转换为上述的任意格式并将其保存为 pickle 或者 json 文件,例如[pascal_voc.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/dataset_converters/pascal_voc.py)。 + 然后使用`CustomDataset`。 + +### 自定义数据集的例子: + +假设文本文件中表示的是一种全新的标注格式。边界框的标注信息保存在 `annotation.txt` 中,内容如下: + +``` +# +000001.jpg +1280 720 +2 +10 20 40 60 1 +20 40 50 60 2 +# +000002.jpg +1280 720 +3 +50 20 40 60 2 +20 40 30 45 2 +30 40 50 60 3 +``` + +我们可以在 `mmdet/datasets/my_dataset.py` 中创建一个新的 dataset 用以加载数据。 + +```python +import mmcv +import numpy as np + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class MyDataset(CustomDataset): + + CLASSES = ('person', 'bicycle', 'car', 'motorcycle') + + def load_annotations(self, ann_file): + ann_list = mmcv.list_from_file(ann_file) + + data_infos = [] + for i, ann_line in enumerate(ann_list): + if ann_line != '#': + continue + + img_shape = ann_list[i + 2].split(' ') + width = int(img_shape[0]) + height = int(img_shape[1]) + bbox_number = int(ann_list[i + 3]) + + anns = ann_line.split(' ') + bboxes = [] + labels = [] + for anns in ann_list[i + 4:i + 4 + bbox_number]: + bboxes.append([float(ann) for ann in anns[:4]]) + labels.append(int(anns[4])) + + data_infos.append( + dict( + filename=ann_list[i + 1], + width=width, + height=height, + ann=dict( + bboxes=np.array(bboxes).astype(np.float32), + labels=np.array(labels).astype(np.int64)) + )) + + return data_infos + + def get_ann_info(self, idx): + return self.data_infos[idx]['ann'] + +``` + +配置文件中,可以使用 `MyDataset` 进行如下修改 + +```python +dataset_A_train = dict( + type='MyDataset', + ann_file = 'image_list.txt', + pipeline=train_pipeline +) +``` + +## 使用 dataset 包装器自定义数据集 + +MMDetection 也支持非常多的数据集包装器(wrapper)来混合数据集或在训练时修改数据集的分布。 +最近 MMDetection 支持如下三种数据集包装: + +- `RepeatDataset`:将整个数据集简单地重复。 +- `ClassBalancedDataset`:以类别均衡的方式重复数据集。 +- `ConcatDataset`:合并数据集。 + +### 重复数据集(Repeat dataset) + +使用 `RepeatDataset` 包装器来重复数据集。例如,假设原始数据集为 `Dataset_A`,重复它过后,其配置如下: + +```python +dataset_A_train = dict( + type='RepeatDataset', + times=N, + dataset=dict( # Dataset_A 的原始配置信息 + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) +``` + +### 类别均衡数据集(Class balanced dataset) + +使用 `ClassBalancedDataset` 作为包装器在类别的出现的频率上重复数据集。数据集需要实例化 `self.get_cat_ids(idx)` 函数以支持 `ClassBalancedDataset`。 +比如,以 `oversample_thr=1e-3` 来重复数据集 `Dataset_A`,其配置如下: + +```python +dataset_A_train = dict( + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( # Dataset_A 的原始配置信息 + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) +``` + +更多细节请参考[源码](../../mmdet/datasets/dataset_wrappers.py)。 + +### 合并数据集(Concatenate dataset) + +合并数据集有三种方法: + +1. 如果要合并的数据集类型一致但有多个的标注文件,那么可以使用如下配置将其合并。 + + ```python + dataset_A_train = dict( + type='Dataset_A', + ann_file = ['anno_file_1', 'anno_file_2'], + pipeline=train_pipeline + ) + ``` + + 如果合并的数据集适用于测试或者评估,那么这种方式支持每个数据集分开进行评估。如果想要将合并的数据集作为整体用于评估,那么可以像如下一样设置 `separate_eval=False`。 + + ```python + dataset_A_train = dict( + type='Dataset_A', + ann_file = ['anno_file_1', 'anno_file_2'], + separate_eval=False, + pipeline=train_pipeline + ) + ``` + +2. 如果想要合并的是不同数据集,那么可以使用如下配置。 + + ```python + dataset_A_val = dict() + dataset_B_val = dict() + + data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dataset_A_train, + val=dict( + type='ConcatDataset', + datasets=[dataset_A_val, dataset_B_val], + separate_eval=False)) + ``` + + 只需设置 `separate_eval=False`,用户就可以将所有的数据集作为一个整体来评估。 + +**注意** + +1. 在做评估时,`separate_eval=False` 选项是假设数据集使用了 `self.data_infos`。因此COCO数据集不支持此项操作,因为COCO数据集在做评估时并不是所有都依赖 `self.data_infos`。组合不同类型的数据集并将其作为一个整体来评估,这种做法没有得到测试,也不建议这样做。 + +2. 因为不支持评估 `ClassBalancedDataset` 和 `RepeatDataset`,所以也不支持评估它们的组合。 + +一个更复杂的例子则是分别将 `Dataset_A` 和 `Dataset_B` 重复N和M次,然后进行如下合并。 + +```python +dataset_A_train = dict( + type='RepeatDataset', + times=N, + dataset=dict( + type='Dataset_A', + ... + pipeline=train_pipeline + ) +) +dataset_A_val = dict( + ... + pipeline=test_pipeline +) +dataset_A_test = dict( + ... + pipeline=test_pipeline +) +dataset_B_train = dict( + type='RepeatDataset', + times=M, + dataset=dict( + type='Dataset_B', + ... + pipeline=train_pipeline + ) +) +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train = [ + dataset_A_train, + dataset_B_train + ], + val = dataset_A_val, + test = dataset_A_test +) + +``` + +## 修改数据集的类别 + +根据现有数据集的类型,我们可以修改它们的类别名称来训练其标注的子集。 +例如,如果只想训练当前数据集中的三个类别,那么就可以修改数据集的类别元组。 +数据集就会自动屏蔽掉其他类别的真实框。 + +```python +classes = ('person', 'bicycle', 'car') +data = dict( + train=dict(classes=classes), + val=dict(classes=classes), + test=dict(classes=classes)) +``` + +MMDetection V2.0 也支持从文件中读取类别名称,这种方式在实际应用中很常见。 +假设存在文件 `classes.txt`,其包含了如下的类别名称。 + +``` +person +bicycle +car +``` + +用户可以将类别设置成文件路径,数据集就会自动将其加载并转换成一个列表。 + +```python +classes = 'path/to/classes.txt' +data = dict( + train=dict(classes=classes), + val=dict(classes=classes), + test=dict(classes=classes)) +``` + +**注意** + +- 在 MMDetection v2.5.0 之前,如果类别为集合时数据集将自动过滤掉不包含 GT 的图片,且没办法通过修改配置将其关闭。这是一种不可取的行为而且会引起混淆,因为当类别不是集合时数据集只有在 `filter_empty_gt=True` 以及 `test_mode=False` 的情况下才会过滤掉不包含 GT 的图片。在 MMDetection v2.5.0 之后,我们将图片的过滤以及类别的修改进行解耦,如,数据集只有在 `filter_empty_gt=True` 和 `test_mode=False` 的情况下才会过滤掉不包含 GT 的图片,无论类别是否为集合。设置类别只会影响用于训练的标注类别,用户可以自行决定是否过滤不包含 GT 的图片。 +- 因为中间格式只有框的标签并不包含类别的名字,所以使用 `CustomDataset` 时用户不能通过修改配置来过滤不含 GT 的图片。但是可以通过离线的方式来解决。 +- 当设置数据集中的 `classes` 时,记得修改 `num_classes`。从 v2.9.0 (PR#4508) 之后,我们实现了[NumClassCheckHook](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/utils.py)来检查类别数是否一致。 +- 我们在未来将会重构设置数据集类别以及数据集过滤的特性,使其更加地方便用户使用。 diff --git a/downstream/mmdetection/docs/zh_cn/tutorials/customize_losses.md b/downstream/mmdetection/docs/zh_cn/tutorials/customize_losses.md new file mode 100644 index 0000000..f721e77 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/tutorials/customize_losses.md @@ -0,0 +1,125 @@ +# 教程 6: 自定义损失函数 + +MMDetection 为用户提供了不同的损失函数。但是默认的配置可能无法适应不同的数据和模型,所以用户可能会希望修改某一个损失函数来适应新的情况。 + +本教程首先详细的解释计算损失的过程然后给出一些关于如何修改每一个步骤的指导。对损失的修改可以被分为微调和加权。 + +## 一个损失的计算过程 + +给定输入(包括预测和目标,以及权重),损失函数会把输入的张量映射到最后的损失标量。映射过程可以分为下面五个步骤: + +1. 设置采样方法为对正负样本进行采样。 + +2. 通过损失核函数获取**元素**或者**样本**损失。 + +3. 通过权重张量来给损失**逐元素**权重。 + +4. 把损失张量归纳为一个**标量**。 + +5. 用一个**张量**给当前损失一个权重。 + +## 设置采样方法(步骤 1) + +对于一些损失函数,需要采样策略来避免正负样本之间的不平衡。 + +例如,在RPN head中使用`CrossEntropyLoss`时,我们需要在`train_cfg`中设置`RandomSampler` + +```python +train_cfg=dict( + rpn=dict( + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False)) +``` + +对于其他一些具有正负样本平衡机制的损失,例如 Focal Loss、GHMC 和 QualityFocalLoss,不再需要进行采样。 + +## 微调损失 + +微调一个损失主要与步骤 2,4,5 有关,大部分的修改可以在配置文件中指定。这里我们用 [Focal Loss (FL)](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/focal_loss.py) 作为例子。 +下面的代码分别是构建 FL 的方法和它的配置文件,他们是一一对应的。 + +```python +@LOSSES.register_module() +class FocalLoss(nn.Module): + + def __init__(self, + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0): +``` + +```python +loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0) +``` + +### 微调超参数(步骤2) + +`gamma` 和 `beta` 是 Focal Loss 中的两个超参数。如果我们想把 `gamma` 的值设为 1.5,把 `alpha` 的值设为 0.5,我们可以在配置文件中按照如下指定: + +```python +loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=1.5, + alpha=0.5, + loss_weight=1.0) +``` + +### 微调归纳方式(步骤4) + +Focal Loss 默认的归纳方式是 `mean`。如果我们想把归纳方式从 `mean` 改成 `sum`,我们可以在配置文件中按照如下指定: + +```python +loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0, + reduction='sum') +``` + +### 微调损失权重(步骤5) + +这里的损失权重是一个标量,他用来控制多任务学习中不同损失的重要程度,例如,分类损失和回归损失。如果我们想把分类损失的权重设为 0.5,我们可以在配置文件中如下指定: + +```python +loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=0.5) +``` + +## 加权损失(步骤3) + +加权损失就是我们逐元素修改损失权重。更具体来说,我们给损失张量乘以一个与他有相同形状的权重张量。所以,损失中不同的元素可以被赋予不同的比例,所以这里叫做逐元素。损失的权重在不同模型中变化很大,而且与上下文相关,但是总的来说主要有两种损失权重:分类损失的 `label_weights` 和边界框的 `bbox_weights`。你可以在相应的头中的 `get_target` 方法中找到他们。这里我们使用 [ATSSHead](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/atss_head.py#L530) 作为一个例子。它继承了 [AnchorHead](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/anchor_head.py),但是我们重写它的 +`get_targets` 方法来产生不同的 `label_weights` 和 `bbox_weights`。 + +``` +class ATSSHead(AnchorHead): + + ... + + def get_targets(self, + anchor_list, + valid_flag_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + unmap_outputs=True): +``` diff --git a/downstream/mmdetection/docs/zh_cn/tutorials/customize_models.md b/downstream/mmdetection/docs/zh_cn/tutorials/customize_models.md new file mode 100644 index 0000000..b29254a --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/tutorials/customize_models.md @@ -0,0 +1,359 @@ +# 教程 4: 自定义模型 + +我们简单地把模型的各个组件分为五类: + +- 主干网络 (backbone):通常是一个用来提取特征图 (feature map) 的全卷积网络 (FCN network),例如:ResNet, MobileNet。 +- Neck:主干网络和 Head 之间的连接部分,例如:FPN, PAFPN。 +- Head:用于具体任务的组件,例如:边界框预测和掩码预测。 +- 区域提取器 (roi extractor):从特征图中提取 RoI 特征,例如:RoI Align。 +- 损失 (loss):在 Head 组件中用于计算损失的部分,例如:FocalLoss, L1Loss, GHMLoss. + +## 开发新的组件 + +### 添加一个新的主干网络 + +这里,我们以 MobileNet 为例来展示如何开发新组件。 + +#### 1. 定义一个新的主干网络(以 MobileNet 为例) + +新建一个文件 `mmdet/models/backbones/mobilenet.py` + +```python +import torch.nn as nn + +from ..builder import BACKBONES + + +@BACKBONES.register_module() +class MobileNet(nn.Module): + + def __init__(self, arg1, arg2): + pass + + def forward(self, x): # should return a tuple + pass +``` + +#### 2. 导入该模块 + +你可以添加下述代码到 `mmdet/models/backbones/__init__.py` + +```python +from .mobilenet import MobileNet +``` + +或添加: + +```python +custom_imports = dict( + imports=['mmdet.models.backbones.mobilenet'], + allow_failed_imports=False) +``` + +到配置文件以避免原始代码被修改。 + +#### 3. 在你的配置文件中使用该主干网络 + +```python +model = dict( + ... + backbone=dict( + type='MobileNet', + arg1=xxx, + arg2=xxx), + ... +``` + +### 添加新的 Neck + +#### 1. 定义一个 Neck(以 PAFPN 为例) + +新建一个文件 `mmdet/models/necks/pafpn.py` + +```python +from ..builder import NECKS + +@NECKS.register_module() +class PAFPN(nn.Module): + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False): + pass + + def forward(self, inputs): + # implementation is ignored + pass +``` + +#### 2. 导入该模块 + +你可以添加下述代码到 `mmdet/models/necks/__init__.py` + +```python +from .pafpn import PAFPN +``` + +或添加: + +```python +custom_imports = dict( + imports=['mmdet.models.necks.pafpn.py'], + allow_failed_imports=False) +``` + +到配置文件以避免原始代码被修改。 + +#### 3. 修改配置文件 + +```python +neck=dict( + type='PAFPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5) +``` + +### 添加新的 Head + +我们以 [Double Head R-CNN](https://arxiv.org/abs/1904.06493) 为例来展示如何添加一个新的 Head。 + +首先,添加一个新的 bbox head 到 `mmdet/models/roi_heads/bbox_heads/double_bbox_head.py`。 +Double Head R-CNN 在目标检测上实现了一个新的 bbox head。为了实现 bbox head,我们需要使用如下的新模块中三个函数。 + +```python +from mmdet.models.builder import HEADS +from .bbox_head import BBoxHead + +@HEADS.register_module() +class DoubleConvFCBBoxHead(BBoxHead): + r"""Bbox head used in Double-Head R-CNN + + /-> cls + /-> shared convs -> + \-> reg + roi features + /-> cls + \-> shared fc -> + \-> reg + """ # noqa: W605 + + def __init__(self, + num_convs=0, + num_fcs=0, + conv_out_channels=1024, + fc_out_channels=1024, + conv_cfg=None, + norm_cfg=dict(type='BN'), + **kwargs): + kwargs.setdefault('with_avg_pool', True) + super(DoubleConvFCBBoxHead, self).__init__(**kwargs) + + + def forward(self, x_cls, x_reg): + +``` + +然后,如有必要,实现一个新的 bbox head。我们打算从 `StandardRoIHead` 来继承新的 `DoubleHeadRoIHead`。我们可以发现 `StandardRoIHead` 已经实现了下述函数。 + +```python +import torch + +from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler +from ..builder import HEADS, build_head, build_roi_extractor +from .base_roi_head import BaseRoIHead +from .test_mixins import BBoxTestMixin, MaskTestMixin + + +@HEADS.register_module() +class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): + """Simplest base roi head including one bbox head and one mask head. + """ + + def init_assigner_sampler(self): + + def init_bbox_head(self, bbox_roi_extractor, bbox_head): + + def init_mask_head(self, mask_roi_extractor, mask_head): + + + def forward_dummy(self, x, proposals): + + + def forward_train(self, + x, + img_metas, + proposal_list, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None): + + def _bbox_forward(self, x, rois): + + def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, + img_metas): + + def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, + img_metas): + + def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None): + + + def simple_test(self, + x, + proposal_list, + img_metas, + proposals=None, + rescale=False): + """Test without augmentation.""" + +``` + +Double Head 的修改主要在 bbox_forward 的逻辑中,且它从 `StandardRoIHead` 中继承了其他逻辑。在 `mmdet/models/roi_heads/double_roi_head.py` 中,我们用下述代码实现新的 bbox head: + +```python +from ..builder import HEADS +from .standard_roi_head import StandardRoIHead + + +@HEADS.register_module() +class DoubleHeadRoIHead(StandardRoIHead): + """RoI head for Double Head RCNN + + https://arxiv.org/abs/1904.06493 + """ + + def __init__(self, reg_roi_scale_factor, **kwargs): + super(DoubleHeadRoIHead, self).__init__(**kwargs) + self.reg_roi_scale_factor = reg_roi_scale_factor + + def _bbox_forward(self, x, rois): + bbox_cls_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], rois) + bbox_reg_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], + rois, + roi_scale_factor=self.reg_roi_scale_factor) + if self.with_shared_head: + bbox_cls_feats = self.shared_head(bbox_cls_feats) + bbox_reg_feats = self.shared_head(bbox_reg_feats) + cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) + + bbox_results = dict( + cls_score=cls_score, + bbox_pred=bbox_pred, + bbox_feats=bbox_cls_feats) + return bbox_results +``` + +最终,用户需要把该模块添加到 `mmdet/models/bbox_heads/__init__.py` 和 `mmdet/models/roi_heads/__init__.py` 以使相关的注册表可以找到并加载他们。 + +或者,用户可以添加: + +```python +custom_imports=dict( + imports=['mmdet.models.roi_heads.double_roi_head', 'mmdet.models.bbox_heads.double_bbox_head']) +``` + +到配置文件并实现相同的目的。 + +Double Head R-CNN 的配置文件如下: + +```python +_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' +model = dict( + roi_head=dict( + type='DoubleHeadRoIHead', + reg_roi_scale_factor=1.3, + bbox_head=dict( + _delete_=True, + type='DoubleConvFCBBoxHead', + num_convs=4, + num_fcs=2, + in_channels=256, + conv_out_channels=1024, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0)))) + +``` + +从 MMDetection 2.0 版本起,配置系统支持继承配置以使用户可以专注于修改。 +Double Head R-CNN 主要使用了一个新的 DoubleHeadRoIHead 和一个新的 `DoubleConvFCBBoxHead`,参数需要根据每个模块的 `__init__` 函数来设置。 + +### 添加新的损失 + +假设你想添加一个新的损失 `MyLoss` 用于边界框回归。 +为了添加一个新的损失函数,用户需要在 `mmdet/models/losses/my_loss.py` 中实现。 +装饰器 `weighted_loss` 可以使损失每个部分加权。 + +```python +import torch +import torch.nn as nn + +from ..builder import LOSSES +from .utils import weighted_loss + +@weighted_loss +def my_loss(pred, target): + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + +@LOSSES.register_module() +class MyLoss(nn.Module): + + def __init__(self, reduction='mean', loss_weight=1.0): + super(MyLoss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_bbox = self.loss_weight * my_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss_bbox +``` + +然后,用户需要把它加到 `mmdet/models/losses/__init__.py`。 + +```python +from .my_loss import MyLoss, my_loss + +``` + +或者,你可以添加: + +```python +custom_imports=dict( + imports=['mmdet.models.losses.my_loss']) +``` + +到配置文件来实现相同的目的。 + +如使用,请修改 `loss_xxx` 字段。 +因为 MyLoss 是用于回归的,你需要在 Head 中修改 `loss_xxx` 字段。 + +```python +loss_bbox=dict(type='MyLoss', loss_weight=1.0)) +``` diff --git a/downstream/mmdetection/docs/zh_cn/tutorials/customize_runtime.md b/downstream/mmdetection/docs/zh_cn/tutorials/customize_runtime.md new file mode 100644 index 0000000..8d998c3 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/tutorials/customize_runtime.md @@ -0,0 +1 @@ +# 教程 5: 自定义训练配置 diff --git a/downstream/mmdetection/docs/zh_cn/tutorials/data_pipeline.md b/downstream/mmdetection/docs/zh_cn/tutorials/data_pipeline.md new file mode 100644 index 0000000..2fd7f8f --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/tutorials/data_pipeline.md @@ -0,0 +1,190 @@ +# 教程 3: 自定义数据预处理流程 + +## 数据流程的设计 + +按照惯例,我们使用 `Dataset` 和 `DataLoader` 进行多进程的数据加载。`Dataset` 返回字典类型的数据,数据内容为模型 `forward` 方法的各个参数。由于在目标检测中,输入的图像数据具有不同的大小,我们在 `MMCV` 里引入一个新的 `DataContainer` 类去收集和分发不同大小的输入数据。更多细节请参考[这里](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py)。 + +数据的准备流程和数据集是解耦的。通常一个数据集定义了如何处理标注数据(annotations)信息,而一个数据流程定义了准备一个数据字典的所有步骤。一个流程包括一系列的操作,每个操作都把一个字典作为输入,然后再输出一个新的字典给下一个变换操作。 + +我们在下图展示了一个经典的数据处理流程。蓝色块是数据处理操作,随着数据流程的处理,每个操作都可以在结果字典中加入新的键(标记为绿色)或更新现有的键(标记为橙色)。 + +![pipeline figure](../../../resources/data_pipeline.png) + +这些操作可以分为数据加载(data loading)、预处理(pre-processing)、格式变化(formatting)和测试时数据增强(test-time augmentation)。 + +下面的例子是 `Faster R-CNN` 的一个流程: + +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +``` + +对于每个操作,我们列出它添加、更新、移除的相关字典域 (dict fields): + +### 数据加载 Data loading + +`LoadImageFromFile` + +- 增加:img, img_shape, ori_shape + +`LoadAnnotations` + +- 增加:gt_bboxes, gt_bboxes_ignore, gt_labels, gt_masks, gt_semantic_seg, bbox_fields, mask_fields + +`LoadProposals` + +- 增加:proposals + +### 预处理 Pre-processing + +`Resize` + +- 增加:scale, scale_idx, pad_shape, scale_factor, keep_ratio +- 更新:img, img_shape, \*bbox_fields, \*mask_fields, \*seg_fields + +`RandomFlip` + +- 增加:flip +- 更新:img, \*bbox_fields, \*mask_fields, \*seg_fields + +`Pad` + +- 增加:pad_fixed_size, pad_size_divisor +- 更新:img, pad_shape, \*mask_fields, \*seg_fields + +`RandomCrop` + +- 更新:img, pad_shape, gt_bboxes, gt_labels, gt_masks, \*bbox_fields + +`Normalize` + +- 增加:img_norm_cfg +- 更新:img + +`SegRescale` + +- 更新:gt_semantic_seg + +`PhotoMetricDistortion` + +- 更新:img + +`Expand` + +- 更新:img, gt_bboxes + +`MinIoURandomCrop` + +- 更新:img, gt_bboxes, gt_labels + +`Corrupt` + +- 更新:img + +### 格式 Formatting + +`ToTensor` + +- 更新:由 `keys` 指定 + +`ImageToTensor` + +- 更新:由 `keys` 指定 + +`Transpose` + +- 更新:由 `keys` 指定 + +`ToDataContainer` + +- 更新:由 `keys` 指定 + +`DefaultFormatBundle` + +- 更新:img, proposals, gt_bboxes, gt_bboxes_ignore, gt_labels, gt_masks, gt_semantic_seg + +`Collect` + +- 增加:img_metas(img_metas 的键(key)被 `meta_keys` 指定) +- 移除:除了 `keys` 指定的键(key)之外的所有其他的键(key) + +### 测试时数据增强 Test time augmentation + +`MultiScaleFlipAug` + +## 拓展和使用自定义的流程 + +1. 在任意文件里写一个新的流程,例如在 `my_pipeline.py`,它以一个字典作为输入并且输出一个字典: + + ```python + import random + from mmdet.datasets import PIPELINES + + + @PIPELINES.register_module() + class MyTransform: + """Add your transform + + Args: + p (float): Probability of shifts. Default 0.5. + """ + + def __init__(self, p=0.5): + self.p = p + + def __call__(self, results): + if random.random() > self.p: + results['dummy'] = True + return results + ``` + +2. 在配置文件里调用并使用你写的数据处理流程,需要确保你的训练脚本能够正确导入新增模块: + + ```python + custom_imports = dict(imports=['path.to.my_pipeline'], allow_failed_imports=False) + + img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='MyTransform', p=0.2), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), + ] + ``` + +3. 可视化数据增强处理流程的结果 + + 如果想要可视化数据增强处理流程的结果,可以使用 `tools/misc/browse_dataset.py` 直观 + 地浏览检测数据集(图像和标注信息),或将图像保存到指定目录。 + 使用方法请参考[日志分析](../useful_tools.md) diff --git a/downstream/mmdetection/docs/zh_cn/tutorials/finetune.md b/downstream/mmdetection/docs/zh_cn/tutorials/finetune.md new file mode 100644 index 0000000..349660e --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/tutorials/finetune.md @@ -0,0 +1,87 @@ +# 教程 7: 模型微调 + +在 COCO 数据集上预训练的检测器可以作为其他数据集(例如 CityScapes 和 KITTI 数据集)优质的预训练模型。 +本教程将指导用户如何把 [ModelZoo](../model_zoo.md) 中提供的模型用于其他数据集中并使得当前所训练的模型获得更好性能。 + +以下是在新数据集中微调模型需要的两个步骤。 + +- 按 [教程2:自定义数据集的方法](customize_dataset.md) 中的方法对新数据集添加支持中的方法对新数据集添加支持 +- 按照本教程中所讨论方法,修改配置信息 + +接下来将会以 Cityscapes Dataset 上的微调过程作为例子,具体讲述用户需要在配置中修改的五个部分。 + +## 继承基础配置 + +为了减轻编写整个配置的负担并减少漏洞的数量, MMDetection V2.0 支持从多个现有配置中继承配置信息。微调 MaskRCNN 模型的时候,新的配置信息需要使用从 `_base_/models/mask_rcnn_r50_fpn.py`中继承的配置信息来构建模型的基本结构。当使用 Cityscapes 数据集时,新的配置信息可以简便地从`_base_/datasets/cityscapes_instance.py`中继承。对于训练过程的运行设置部分,新配置需要从 `_base_/default_runtime.py`中继承。这些配置文件`configs`的目录下,用户可以选择全部内容的重新编写而不是使用继承方法。 + +```python +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py' +] +``` + +## Head 的修改 + +接下来新的配置还需要根据新数据集的类别数量对 Head 进行修改。只需要对 roi_head 中的 `num_classes`进行修改。修改后除了最后的预测模型的 Head 之外,预训练模型的权重的大部分都会被重新使用。 + +```python +model = dict( + pretrained=None, + roi_head=dict( + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=8, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=8, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) +``` + +## 数据集的修改 + +用户可能还需要准备数据集并编写有关数据集的配置。目前 MMDetection V2.0 的配置文件已经支持 VOC、WIDER FACE、COCO 和 Cityscapes Dataset 的数据集信息。 + +## 训练策略的修改 + +微调超参数与默认的训练策略不同。它通常需要更小的学习率和更少的训练回合。 + +```python +# 优化器 +# batch size 为 8 时的 lr 配置 +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# 学习策略 +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[7]) +# lr_config 中的 max_epochs 和 step 需要针对自定义数据集进行专门调整 +runner = dict(max_epochs=8) +log_config = dict(interval=100) +``` + +## 使用预训练模型 + +如果要使用预训练模型时,可以在 `load_from` 中查阅新的配置信息,用户需要在训练开始之前下载好需要的模型权重,从而避免在训练过程中浪费了宝贵时间。 + +```python +load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth' # noqa +``` diff --git a/downstream/mmdetection/docs/zh_cn/tutorials/how_to.md b/downstream/mmdetection/docs/zh_cn/tutorials/how_to.md new file mode 100644 index 0000000..e6b960d --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/tutorials/how_to.md @@ -0,0 +1,203 @@ +# 教程 11: How to xxx + +本教程收集了任何如何使用 MMDetection 进行 xxx 的答案。 如果您遇到有关`如何做`的问题及答案,请随时更新此文档! + +## 使用 MMClassification 的骨干网络 + +MMDet、MMCls、MMSeg 中的模型注册表都继承自 MMCV 中的根注册表,允许这些存储库直接使用彼此已经实现的模块。 因此用户可以在 MMDetection 中使用来自 MMClassification 的骨干网络,而无需实现MMClassification 中已经存在的网络。 + +### 使用在 MMClassification 中实现的骨干网络 + +假设想将 `MobileNetV3-small` 作为 `RetinaNet` 的骨干网络,则配置文件如下。 + +```python +通过 MMClassification 在 TIMM 中使用骨干网络_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +# please install mmcls>=0.20.0 +# import mmcls.models to trigger register_module in mmcls +custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) +pretrained = 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth' +model = dict( + backbone=dict( + _delete_=True, # 将 _base_ 中关于 backbone 的字段删除 + type='mmcls.MobileNetV3', # 使用 mmcls 中的 MobileNetV3 + arch='small', + out_indices=(3, 8, 11), # 修改 out_indices + init_cfg=dict( + type='Pretrained', + checkpoint=pretrained, + prefix='backbone.')), # MMCls 中骨干网络的预训练权重含义 prefix='backbone.',为了正常加载权重,需要把这个 prefix 去掉。 + # 修改 in_channels + neck=dict(in_channels=[24, 48, 96], start_level=0)) +``` + +### 通过 MMClassification 使用 TIMM 中实现的骨干网络 + +由于 MMClassification 提供了 Py**T**orch **Im**age **M**odels (`timm`) 骨干网络的封装,用户也可以通过 MMClassification 直接使用 `timm` 中的骨干网络。假设想将 [`EfficientNet-B1`](https://github.com/open-mmlab/mmdetection/blob/master/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py) 作为 `RetinaNet` 的骨干网络,则配置文件如下。 + +```python +# https://github.com/open-mmlab/mmdetection/blob/master/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# please install mmcls>=0.20.0 +# import mmcls.models to trigger register_module in mmcls +custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) +model = dict( + backbone=dict( + _delete_=True, # 将 _base_ 中关于 backbone 的字段删除 + type='mmcls.TIMMBackbone', # 使用 mmcls 中 timm 骨干网络 + model_name='efficientnet_b1', + features_only=True, + pretrained=True, + out_indices=(1, 2, 3, 4)), # 修改 out_indices + neck=dict(in_channels=[24, 40, 112, 320])) # 修改 in_channels + +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +``` + +`type='mmcls.TIMMBackbone'` 表示在 MMDetection 中使用 MMClassification 中的 `TIMMBackbone` 类,并且使用的模型为` EfficientNet-B1`,其中 `mmcls` 表示 MMClassification 库,而 `TIMMBackbone ` 表示 MMClassification 中实现的 TIMMBackbone 包装器。 + +关于层次注册器的具体原理可以参考 [MMCV 文档](https://github.com/open-mmlab/mmcv/blob/master/docs/zh_cn/understand_mmcv/registry.md#%E6%B3%A8%E5%86%8C%E5%99%A8%E5%B1%82%E7%BB%93%E6%9E%84),关于如何使用 MMClassification 中的其他 backbone,可以参考 [MMClassification 文档](https://github.com/open-mmlab/mmclassification/blob/master/docs/zh_CN/tutorials/config.md)。 + +## 使用马赛克数据增强 + +如果你想在训练中使用 `Mosaic`,那么请确保你同时使用 `MultiImageMixDataset`。以 `Faster R-CNN` 算法为例,你可以通过如下做法实现: + +```python +# 直接打开 configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py ,增添如下字段 +data_root = 'data/coco/' +dataset_type = 'CocoDataset' +img_scale=(1333, 800)​ +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), + dict( + type='RandomAffine', + scaling_ratio_range=(0.1, 2), + border=(-img_scale[0] // 2, -img_scale[1] // 2)), # 图像经过马赛克处理后会放大4倍,所以我们使用仿射变换来恢复图像的大小。 + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] + +train_dataset = dict( + _delete_ = True, # 删除不必要的设置 + type='MultiImageMixDataset', + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True) + ], + filter_empty_gt=False, + ), + pipeline=train_pipeline + ) +​ +data = dict( + train=train_dataset + ) +``` + +## 在配置文件中冻结骨干网络后在训练中解冻骨干网络 + +如果你在配置文件中已经冻结了骨干网络并希望在几个训练周期后解冻它,你可以通过 hook 来实现这个功能。以用 ResNet 为骨干网络的 Faster R-CNN 为例,你可以冻结一个骨干网络的一个层并在配置文件中添加如下 `custom_hooks`: + +```python +_base_ = [ + '../_base_/models/faster_rcnn_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + # freeze one stage of the backbone network. + backbone=dict(frozen_stages=1), +) +custom_hooks = [dict(type="UnfreezeBackboneEpochBasedHook", unfreeze_epoch=1)] +``` + +同时在 `mmdet/core/hook/unfreeze_backbone_epoch_based_hook.py` 当中书写 `UnfreezeBackboneEpochBasedHook` 类 + +```python +from mmcv.parallel import is_module_wrapper +from mmcv.runner.hooks import HOOKS, Hook + + +@HOOKS.register_module() +class UnfreezeBackboneEpochBasedHook(Hook): + """Unfreeze backbone network Hook. + + Args: + unfreeze_epoch (int): The epoch unfreezing the backbone network. + """ + + def __init__(self, unfreeze_epoch=1): + self.unfreeze_epoch = unfreeze_epoch + + def before_train_epoch(self, runner): + # Unfreeze the backbone network. + # Only valid for resnet. + if runner.epoch == self.unfreeze_epoch: + model = runner.model + if is_module_wrapper(model): + model = model.module + backbone = model.backbone + if backbone.frozen_stages >= 0: + if backbone.deep_stem: + backbone.stem.train() + for param in backbone.stem.parameters(): + param.requires_grad = True + else: + backbone.norm1.train() + for m in [backbone.conv1, backbone.norm1]: + for param in m.parameters(): + param.requires_grad = True + + for i in range(1, backbone.frozen_stages + 1): + m = getattr(backbone, f'layer{i}') + m.train() + for param in m.parameters(): + param.requires_grad = True +``` + +## 获得新的骨干网络的通道数 + +如果你想获得一个新骨干网络的通道数,你可以单独构建这个骨干网络并输入一个伪造的图片来获取每一个阶段的输出。 + +以 `ResNet` 为例: + +```python +from mmdet.models import ResNet +import torch +self = ResNet(depth=18) +self.eval() +inputs = torch.rand(1, 3, 32, 32) +level_outputs = self.forward(inputs) +for level_out in level_outputs: + print(tuple(level_out.shape)) + +``` + +以上脚本的输出为: + +```python +(1, 64, 8, 8) +(1, 128, 4, 4) +(1, 256, 2, 2) +(1, 512, 1, 1) +``` + +用户可以通过将脚本中的 `ResNet(depth=18)` 替换为自己的骨干网络配置来得到新的骨干网络的通道数。 diff --git a/downstream/mmdetection/docs/zh_cn/tutorials/index.rst b/downstream/mmdetection/docs/zh_cn/tutorials/index.rst new file mode 100644 index 0000000..eaf4907 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/tutorials/index.rst @@ -0,0 +1,14 @@ +.. toctree:: + :maxdepth: 2 + + config.md + customize_dataset.md + data_pipeline.md + customize_models.md + customize_runtime.md + customize_losses.md + finetune.md + pytorch2onnx.md + onnx2tensorrt.md + init_cfg.md + how_to.md diff --git a/downstream/mmdetection/docs/zh_cn/tutorials/init_cfg.md b/downstream/mmdetection/docs/zh_cn/tutorials/init_cfg.md new file mode 100644 index 0000000..f6f5968 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/tutorials/init_cfg.md @@ -0,0 +1,161 @@ +# 教程 10: 权重初始化 + +在训练过程中,适当的初始化策略有利于加快训练速度或获得更⾼的性能。 [MMCV](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/weight_init.py) 提供了一些常⽤的初始化模块的⽅法,如 `nn.Conv2d`。 MMdetection 中的模型初始化主要使⽤ `init_cfg`。⽤⼾可以通过以下两个步骤来初始化模型: + +1. 在 `model_cfg` 中为模型或其组件定义 `init_cfg`,但⼦组件的 `init_cfg` 优先级更⾼,会覆盖⽗模块的 `init_cfg` 。 +2. 像往常一样构建模型,然后显式调⽤ `model.init_weights()` ⽅法,此时模型参数将会被按照配置文件写法进行初始化。 + +MMdetection 初始化工作流的高层 API 调用流程是: + +model_cfg(init_cfg) -> build_from_cfg -> model -> init_weight() -> initialize(self, self.init_cfg) -> children's init_weight() + +### 描述 + +它的数据类型是 dict 或者 list\[dict\],包含了下列键值: + +- `type` (str),包含 `INTIALIZERS` 中的初始化器名称,后面跟着初始化器的参数。 +- `layer`(str 或 list\[str\]),包含 Pytorch 或 MMCV 中基本层的名称,以及将被初始化的可学习参数,例如 `'Conv2d'`,`'DeformConv2d'`。 +- `override` (dict 或 list\[dict\]),包含不继承⾃ `BaseModule` 且其初始化配置与 `layer` 键中的其他层不同的⼦模块。 `type` 中定义的初始化器将适⽤于 `layer` 中定义的所有层,因此如果⼦模块不是 `BaseModule` 的派⽣类但可以与 `layer` 中的层相同的⽅式初始化,则不需要使⽤ `override`。`override` 包含了: + - `type` 后跟初始化器的参数; + - `name` 用以指⽰将被初始化的⼦模块。 + +### 初始化参数 + +从 `mmcv.runner.BaseModule` 或 `mmdet.models` 继承一个新模型。这里我们用 FooModel 来举个例子。 + +```python +import torch.nn as nn +from mmcv.runner import BaseModule + +class FooModel(BaseModule) + def __init__(self, + arg1, + arg2, + init_cfg=None): + super(FooModel, self).__init__(init_cfg) + ... +``` + +- 直接在代码中使⽤ `init_cfg` 初始化模型 + + ```python + import torch.nn as nn + from mmcv.runner import BaseModule + # or directly inherit mmdet models + + class FooModel(BaseModule) + def __init__(self, + arg1, + arg2, + init_cfg=XXX): + super(FooModel, self).__init__(init_cfg) + ... + ``` + +- 在 `mmcv.Sequential` 或 `mmcv.ModuleList` 代码中直接使⽤ `init_cfg` 初始化模型 + + ```python + from mmcv.runner import BaseModule, ModuleList + + class FooModel(BaseModule) + def __init__(self, + arg1, + arg2, + init_cfg=None): + super(FooModel, self).__init__(init_cfg) + ... + self.conv1 = ModuleList(init_cfg=XXX) + ``` + +- 使⽤配置⽂件中的 `init_cfg` 初始化模型 + + ```python + model = dict( + ... + model = dict( + type='FooModel', + arg1=XXX, + arg2=XXX, + init_cfg=XXX), + ... + ``` + +### init_cfg 的使用 + +1. 用 `layer` 键初始化模型 + + 如果我们只定义了 `layer`, 它只会在 `layer` 键中初始化网络层。 + + 注意: `layer` 键对应的值是 Pytorch 的带有 weights 和 bias 属性的类名(因此不⽀持 `MultiheadAttention` 层)。 + +- 定义⽤于初始化具有相同配置的模块的 `layer` 键。 + + ```python + init_cfg = dict(type='Constant', layer=['Conv1d', 'Conv2d', 'Linear'], val=1) + # ⽤相同的配置初始化整个模块 + ``` + +- 定义⽤于初始化具有不同配置的层的 `layer` 键。 + + ```python + init_cfg = [dict(type='Constant', layer='Conv1d', val=1), + dict(type='Constant', layer='Conv2d', val=2), + dict(type='Constant', layer='Linear', val=3)] + # nn.Conv1d 将被初始化为 dict(type='Constant', val=1) + # nn.Conv2d 将被初始化为 dict(type='Constant', val=2) + # nn.Linear 将被初始化为 dict(type='Constant', val=3) + ``` + +2. 使⽤ `override` 键初始化模型 + +- 当使⽤属性名初始化某些特定部分时,我们可以使⽤ `override` 键, `override` 中的值将忽略 init_cfg 中的值。 + + ```python + # layers: + # self.feat = nn.Conv1d(3, 1, 3) + # self.reg = nn.Conv2d(3, 3, 3) + # self.cls = nn.Linear(1,2) + + init_cfg = dict(type='Constant', + layer=['Conv1d','Conv2d'], val=1, bias=2, + override=dict(type='Constant', name='reg', val=3, bias=4)) + # self.feat and self.cls 将被初始化为 dict(type='Constant', val=1, bias=2) + # 叫 'reg' 的模块将被初始化为 dict(type='Constant', val=3, bias=4) + ``` + +- 如果 init_cfg 中的 `layer` 为 None,则只会初始化 override 中有 name 的⼦模块,⽽ override 中的 type 和其他参数可以省略。 + + ```python + # layers: + # self.feat = nn.Conv1d(3, 1, 3) + # self.reg = nn.Conv2d(3, 3, 3) + # self.cls = nn.Linear(1,2) + + init_cfg = dict(type='Constant', val=1, bias=2, override=dict(name='reg')) + + # self.feat and self.cls 将被 Pytorch 初始化 + # 叫 'reg' 的模块将被 dict(type='Constant', val=1, bias=2) 初始化 + ``` + +- 如果我们不定义 `layer` 或 `override` 键,它不会初始化任何东西。 + +- 无效的使用 + + ```python + # override 没有 name 键的话是无效的 + init_cfg = dict(type='Constant', layer=['Conv1d','Conv2d'], val=1, bias=2, + override=dict(type='Constant', val=3, bias=4)) + + # override 有 name 键和其他参数但是没有 type 键也是无效的 + init_cfg = dict(type='Constant', layer=['Conv1d','Conv2d'], val=1, bias=2, + override=dict(name='reg', val=3, bias=4)) + ``` + +3. 使⽤预训练模型初始化模型 + + ```python + init_cfg = dict(type='Pretrained', + checkpoint='torchvision://resnet50') + ``` + +更多细节可以参考 [MMCV](https://mmcv.readthedocs.io/en/latest/cnn.html#weight-initialization) 的文档和 MMCV [PR #780](https://github.com/open-mmlab/mmcv/pull/780) diff --git a/downstream/mmdetection/docs/zh_cn/tutorials/onnx2tensorrt.md b/downstream/mmdetection/docs/zh_cn/tutorials/onnx2tensorrt.md new file mode 100644 index 0000000..678a131 --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/tutorials/onnx2tensorrt.md @@ -0,0 +1,106 @@ +# 教程 9: ONNX 到 TensorRT 的模型转换(实验性支持) + +> ## [尝试使用新的 MMDeploy 来部署你的模型](https://mmdeploy.readthedocs.io/) + + + +- [教程 9: ONNX 到 TensorRT 的模型转换(实验性支持)](#%E6%95%99%E7%A8%8B-9-onnx-%E5%88%B0-tensorrt-%E7%9A%84%E6%A8%A1%E5%9E%8B%E8%BD%AC%E6%8D%A2%E5%AE%9E%E9%AA%8C%E6%80%A7%E6%94%AF%E6%8C%81) + - [如何将模型从 ONNX 转换为 TensorRT](#%E5%A6%82%E4%BD%95%E5%B0%86%E6%A8%A1%E5%9E%8B%E4%BB%8E-onnx-%E8%BD%AC%E6%8D%A2%E4%B8%BA-tensorrt) + - [先决条件](#%E5%85%88%E5%86%B3%E6%9D%A1%E4%BB%B6) + - [用法](#%E7%94%A8%E6%B3%95) + - [如何评估导出的模型](#%E5%A6%82%E4%BD%95%E8%AF%84%E4%BC%B0%E5%AF%BC%E5%87%BA%E7%9A%84%E6%A8%A1%E5%9E%8B) + - [支持转换为 TensorRT 的模型列表](#%E6%94%AF%E6%8C%81%E8%BD%AC%E6%8D%A2%E4%B8%BA-tensorrt-%E7%9A%84%E6%A8%A1%E5%9E%8B%E5%88%97%E8%A1%A8) + - [提醒](#%E6%8F%90%E9%86%92) + - [常见问题](#%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98) + + + +## 如何将模型从 ONNX 转换为 TensorRT + +### 先决条件 + +1. 请参考 [get_started.md](https://mmdetection.readthedocs.io/en/latest/get_started.html) 从源码安装 MMCV 和 MMDetection。 +2. 请参考 [ONNXRuntime in mmcv](https://mmcv.readthedocs.io/en/latest/deployment/onnxruntime_op.html) 和 [TensorRT plugin in mmcv](https://github.com/open-mmlab/mmcv/blob/master/docs/en/deployment/tensorrt_plugin.md/) 安装支持 ONNXRuntime 自定义操作和 TensorRT 插件的 `mmcv-full`。 +3. 使用工具 [pytorch2onnx](https://mmdetection.readthedocs.io/en/latest/tutorials/pytorch2onnx.html) 将模型从 PyTorch 转换为 ONNX。 + +### 用法 + +```bash +python tools/deployment/onnx2tensorrt.py \ + ${CONFIG} \ + ${MODEL} \ + --trt-file ${TRT_FILE} \ + --input-img ${INPUT_IMAGE_PATH} \ + --shape ${INPUT_IMAGE_SHAPE} \ + --min-shape ${MIN_IMAGE_SHAPE} \ + --max-shape ${MAX_IMAGE_SHAPE} \ + --workspace-size {WORKSPACE_SIZE} \ + --show \ + --verify \ +``` + +所有参数的说明: + +- `config`: 模型配置文件的路径。 +- `model`: ONNX 模型文件的路径。 +- `--trt-file`: 输出 TensorRT 引擎文件的路径。如果未指定,它将被设置为 `tmp.trt`。 +- `--input-img`: 用于追踪和转换的输入图像的路径。默认情况下,它将设置为 `demo/demo.jpg`。 +- `--shape`: 模型输入的高度和宽度。如果未指定,它将设置为 `400 600`。 +- `--min-shape`: 模型输入的最小高度和宽度。如果未指定,它将被设置为与 `--shape` 相同。 +- `--max-shape`: 模型输入的最大高度和宽度。如果未指定,它将被设置为与 `--shape` 相同。 +- `--workspace-size`: 构建 TensorRT 引擎所需的 GPU 工作空间大小(以 GiB 为单位)。如果未指定,它将设置为 `1` GiB。 +- `--show`: 确定是否显示模型的输出。如果未指定,它将设置为 `False`。 +- `--verify`: 确定是否在 ONNXRuntime 和 TensorRT 之间验证模型的正确性。如果未指定,它将设置为 `False`。 +- `--verbose`: 确定是否打印日志消息。它对调试很有用。如果未指定,它将设置为 `False`。 + +例子: + +```bash +python tools/deployment/onnx2tensorrt.py \ + configs/retinanet/retinanet_r50_fpn_1x_coco.py \ + checkpoints/retinanet_r50_fpn_1x_coco.onnx \ + --trt-file checkpoints/retinanet_r50_fpn_1x_coco.trt \ + --input-img demo/demo.jpg \ + --shape 400 600 \ + --show \ + --verify \ +``` + +## 如何评估导出的模型 + +我们准备了一个工具 `tools/deplopyment/test.py` 来评估 TensorRT 模型。 + +请参阅以下链接以获取更多信息。 + +- [如何评估导出的模型](pytorch2onnx.md#how-to-evaluate-the-exported-models) +- [结果和模型](pytorch2onnx.md#results-and-models) + +## 支持转换为 TensorRT 的模型列表 + +下表列出了确定可转换为 TensorRT 的模型。 + +| Model | Config | Dynamic Shape | Batch Inference | Note | +| :----------------: | :--------------------------------------------------------------: | :-----------: | :-------------: | :--: | +| SSD | `configs/ssd/ssd300_coco.py` | Y | Y | | +| FSAF | `configs/fsaf/fsaf_r50_fpn_1x_coco.py` | Y | Y | | +| FCOS | `configs/fcos/fcos_r50_caffe_fpn_4x4_1x_coco.py` | Y | Y | | +| YOLOv3 | `configs/yolo/yolov3_d53_mstrain-608_273e_coco.py` | Y | Y | | +| RetinaNet | `configs/retinanet/retinanet_r50_fpn_1x_coco.py` | Y | Y | | +| Faster R-CNN | `configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py` | Y | Y | | +| Cascade R-CNN | `configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py` | Y | Y | | +| Mask R-CNN | `configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py` | Y | Y | | +| Cascade Mask R-CNN | `configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py` | Y | Y | | +| PointRend | `configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py` | Y | Y | | + +注意: + +- *以上所有模型通过 Pytorch==1.6.0, onnx==1.7.0 与 TensorRT-7.2.1.6.Ubuntu-16.04.x86_64-gnu.cuda-10.2.cudnn8.0 测试* + +## 提醒 + +- 如果您在上面列出的模型中遇到任何问题,请创建 issue,我们会尽快处理。对于未包含在列表中的模型,由于资源有限,我们可能无法在此提供太多帮助。请尝试深入挖掘并自行调试。 +- 由于此功能是实验性的,并且可能会快速更改,因此请始终尝试使用最新的 `mmcv` 和 `mmdetecion`。 + +## 常见问题 + +- 空 diff --git a/downstream/mmdetection/docs/zh_cn/tutorials/pytorch2onnx.md b/downstream/mmdetection/docs/zh_cn/tutorials/pytorch2onnx.md new file mode 100644 index 0000000..93a647e --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/tutorials/pytorch2onnx.md @@ -0,0 +1,3 @@ +# 教程 8: Pytorch 到 ONNX 的模型转换(实验性支持) + +> ## [尝试使用新的 MMDeploy 來部署你的模型](https://mmdeploy.readthedocs.io/) diff --git a/downstream/mmdetection/docs/zh_cn/useful_tools.md b/downstream/mmdetection/docs/zh_cn/useful_tools.md new file mode 100644 index 0000000..922164c --- /dev/null +++ b/downstream/mmdetection/docs/zh_cn/useful_tools.md @@ -0,0 +1 @@ +## 日志分析 diff --git a/downstream/mmdetection/mmdet/__init__.py b/downstream/mmdetection/mmdet/__init__.py new file mode 100644 index 0000000..1f8ee16 --- /dev/null +++ b/downstream/mmdetection/mmdet/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv + +from .version import __version__, short_version + + +def digit_version(version_str): + digit_version = [] + for x in version_str.split('.'): + if x.isdigit(): + digit_version.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + digit_version.append(int(patch_version[0]) - 1) + digit_version.append(int(patch_version[1])) + return digit_version + + +mmcv_minimum_version = '1.3.17' +mmcv_maximum_version = '1.6.0' +mmcv_version = digit_version(mmcv.__version__) + + +assert (mmcv_version >= digit_version(mmcv_minimum_version) + and mmcv_version <= digit_version(mmcv_maximum_version)), \ + f'MMCV=={mmcv.__version__} is used but incompatible. ' \ + f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' + +__all__ = ['__version__', 'short_version'] diff --git a/downstream/mmdetection/mmdet/apis/__init__.py b/downstream/mmdetection/mmdet/apis/__init__.py new file mode 100644 index 0000000..a865e94 --- /dev/null +++ b/downstream/mmdetection/mmdet/apis/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .inference import (async_inference_detector, inference_detector, + init_detector, show_result_pyplot) +from .test import multi_gpu_test, single_gpu_test +from .train import (get_root_logger, init_random_seed, set_random_seed, + train_detector) + +__all__ = [ + 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector', + 'async_inference_detector', 'inference_detector', 'show_result_pyplot', + 'multi_gpu_test', 'single_gpu_test', 'init_random_seed' +] diff --git a/downstream/mmdetection/mmdet/apis/inference.py b/downstream/mmdetection/mmdet/apis/inference.py new file mode 100644 index 0000000..795fce5 --- /dev/null +++ b/downstream/mmdetection/mmdet/apis/inference.py @@ -0,0 +1,251 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from pathlib import Path + +import mmcv +import numpy as np +import torch +from mmcv.ops import RoIPool +from mmcv.parallel import collate, scatter +from mmcv.runner import load_checkpoint + +from mmdet.core import get_classes +from mmdet.datasets import replace_ImageToTensor +from mmdet.datasets.pipelines import Compose +from mmdet.models import build_detector + + +def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None): + """Initialize a detector from config file. + + Args: + config (str, :obj:`Path`, or :obj:`mmcv.Config`): Config file path, + :obj:`Path`, or the config object. + checkpoint (str, optional): Checkpoint path. If left as None, the model + will not load any weights. + cfg_options (dict): Options to override some settings in the used + config. + + Returns: + nn.Module: The constructed detector. + """ + if isinstance(config, (str, Path)): + config = mmcv.Config.fromfile(config) + elif not isinstance(config, mmcv.Config): + raise TypeError('config must be a filename or Config object, ' + f'but got {type(config)}') + if cfg_options is not None: + config.merge_from_dict(cfg_options) + if 'pretrained' in config.model: + config.model.pretrained = None + elif 'init_cfg' in config.model.backbone: + config.model.backbone.init_cfg = None + config.model.train_cfg = None + model = build_detector(config.model, test_cfg=config.get('test_cfg')) + if checkpoint is not None: + checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') + if 'CLASSES' in checkpoint.get('meta', {}): + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + warnings.simplefilter('once') + warnings.warn('Class names are not saved in the checkpoint\'s ' + 'meta data, use COCO classes by default.') + model.CLASSES = get_classes('coco') + model.cfg = config # save the config in the model for convenience + model.to(device) + model.eval() + return model + + +class LoadImage: + """Deprecated. + + A simple pipeline to load image. + """ + + def __call__(self, results): + """Call function to load images into results. + + Args: + results (dict): A result dict contains the file name + of the image to be read. + Returns: + dict: ``results`` will be returned containing loaded image. + """ + warnings.simplefilter('once') + warnings.warn('`LoadImage` is deprecated and will be removed in ' + 'future releases. You may use `LoadImageFromWebcam` ' + 'from `mmdet.datasets.pipelines.` instead.') + if isinstance(results['img'], str): + results['filename'] = results['img'] + results['ori_filename'] = results['img'] + else: + results['filename'] = None + results['ori_filename'] = None + img = mmcv.imread(results['img']) + results['img'] = img + results['img_fields'] = ['img'] + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + return results + + +def inference_detector(model, imgs): + """Inference image(s) with the detector. + + Args: + model (nn.Module): The loaded detector. + imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]): + Either image files or loaded images. + + Returns: + If imgs is a list or tuple, the same length list type results + will be returned, otherwise return the detection results directly. + """ + + if isinstance(imgs, (list, tuple)): + is_batch = True + else: + imgs = [imgs] + is_batch = False + + cfg = model.cfg + device = next(model.parameters()).device # model device + + if isinstance(imgs[0], np.ndarray): + cfg = cfg.copy() + # set loading pipeline type + cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' + + cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) + test_pipeline = Compose(cfg.data.test.pipeline) + + datas = [] + for img in imgs: + # prepare data + if isinstance(img, np.ndarray): + # directly add img + data = dict(img=img) + else: + # add information into dict + data = dict(img_info=dict(filename=img), img_prefix=None) + # build the data pipeline + data = test_pipeline(data) + datas.append(data) + + data = collate(datas, samples_per_gpu=len(imgs)) + # just get the actual data from DataContainer + data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] + data['img'] = [img.data[0] for img in data['img']] + if next(model.parameters()).is_cuda: + # scatter to specified GPU + data = scatter(data, [device])[0] + else: + for m in model.modules(): + assert not isinstance( + m, RoIPool + ), 'CPU inference with RoIPool is not supported currently.' + + # forward the model + with torch.no_grad(): + results = model(return_loss=False, rescale=True, **data) + + if not is_batch: + return results[0] + else: + return results + + +async def async_inference_detector(model, imgs): + """Async inference image(s) with the detector. + + Args: + model (nn.Module): The loaded detector. + img (str | ndarray): Either image files or loaded images. + + Returns: + Awaitable detection results. + """ + if not isinstance(imgs, (list, tuple)): + imgs = [imgs] + + cfg = model.cfg + device = next(model.parameters()).device # model device + + if isinstance(imgs[0], np.ndarray): + cfg = cfg.copy() + # set loading pipeline type + cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' + + cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) + test_pipeline = Compose(cfg.data.test.pipeline) + + datas = [] + for img in imgs: + # prepare data + if isinstance(img, np.ndarray): + # directly add img + data = dict(img=img) + else: + # add information into dict + data = dict(img_info=dict(filename=img), img_prefix=None) + # build the data pipeline + data = test_pipeline(data) + datas.append(data) + + data = collate(datas, samples_per_gpu=len(imgs)) + # just get the actual data from DataContainer + data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] + data['img'] = [img.data[0] for img in data['img']] + if next(model.parameters()).is_cuda: + # scatter to specified GPU + data = scatter(data, [device])[0] + else: + for m in model.modules(): + assert not isinstance( + m, RoIPool + ), 'CPU inference with RoIPool is not supported currently.' + + # We don't restore `torch.is_grad_enabled()` value during concurrent + # inference since execution can overlap + torch.set_grad_enabled(False) + results = await model.aforward_test(rescale=True, **data) + return results + + +def show_result_pyplot(model, + img, + result, + score_thr=0.3, + title='result', + wait_time=0, + palette=None, + out_file=None): + """Visualize the detection results on the image. + + Args: + model (nn.Module): The loaded detector. + img (str or np.ndarray): Image filename or loaded image. + result (tuple[list] or list): The detection result, can be either + (bbox, segm) or just bbox. + score_thr (float): The threshold to visualize the bboxes and masks. + title (str): Title of the pyplot figure. + wait_time (float): Value of waitKey param. Default: 0. + palette (str or tuple(int) or :obj:`Color`): Color. + The tuple of color should be in BGR order. + out_file (str or None): The path to write the image. + Default: None. + """ + if hasattr(model, 'module'): + model = model.module + model.show_result( + img, + result, + score_thr=score_thr, + show=True, + wait_time=wait_time, + win_name=title, + bbox_color=palette, + text_color=(200, 200, 200), + mask_color=palette, + out_file=out_file) diff --git a/downstream/mmdetection/mmdet/apis/test.py b/downstream/mmdetection/mmdet/apis/test.py new file mode 100644 index 0000000..973d362 --- /dev/null +++ b/downstream/mmdetection/mmdet/apis/test.py @@ -0,0 +1,209 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import pickle +import shutil +import tempfile +import time + +import mmcv +import torch +import torch.distributed as dist +from mmcv.image import tensor2imgs +from mmcv.runner import get_dist_info + +from mmdet.core import encode_mask_results + + +def single_gpu_test(model, + data_loader, + show=False, + out_dir=None, + show_score_thr=0.3): + model.eval() + results = [] + dataset = data_loader.dataset + PALETTE = getattr(dataset, 'PALETTE', None) + prog_bar = mmcv.ProgressBar(len(dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) + + batch_size = len(result) + if show or out_dir: + if batch_size == 1 and isinstance(data['img'][0], torch.Tensor): + img_tensor = data['img'][0] + else: + img_tensor = data['img'][0].data[0] + img_metas = data['img_metas'][0].data[0] + imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) + assert len(imgs) == len(img_metas) + + for i, (img, img_meta) in enumerate(zip(imgs, img_metas)): + h, w, _ = img_meta['img_shape'] + img_show = img[:h, :w, :] + + ori_h, ori_w = img_meta['ori_shape'][:-1] + img_show = mmcv.imresize(img_show, (ori_w, ori_h)) + + if out_dir: + out_file = osp.join(out_dir, img_meta['ori_filename']) + else: + out_file = None + + model.module.show_result( + img_show, + result[i], + bbox_color=PALETTE, + text_color=PALETTE, + mask_color=PALETTE, + show=show, + out_file=out_file, + score_thr=show_score_thr) + + # encode mask results + if isinstance(result[0], tuple): + result = [(bbox_results, encode_mask_results(mask_results)) + for bbox_results, mask_results in result] + # This logic is only used in panoptic segmentation test. + elif isinstance(result[0], dict) and 'ins_results' in result[0]: + for j in range(len(result)): + bbox_results, mask_results = result[j]['ins_results'] + result[j]['ins_results'] = (bbox_results, + encode_mask_results(mask_results)) + + results.extend(result) + + for _ in range(batch_size): + prog_bar.update() + return results + + +def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): + """Test model with multiple gpus. + + This method tests model with multiple gpus and collects the results + under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' + it encodes results to gpu tensors and use gpu communication for results + collection. On cpu mode it saves the results on different gpus to 'tmpdir' + and collects them by the rank 0 worker. + + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + tmpdir (str): Path of directory to save the temporary results from + different gpus under cpu mode. + gpu_collect (bool): Option to use either gpu or cpu to collect results. + + Returns: + list: The prediction results. + """ + model.eval() + results = [] + dataset = data_loader.dataset + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = mmcv.ProgressBar(len(dataset)) + time.sleep(2) # This line can prevent deadlock problem in some cases. + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) + # encode mask results + if isinstance(result[0], tuple): + result = [(bbox_results, encode_mask_results(mask_results)) + for bbox_results, mask_results in result] + # This logic is only used in panoptic segmentation test. + elif isinstance(result[0], dict) and 'ins_results' in result[0]: + for j in range(len(result)): + bbox_results, mask_results = result[j]['ins_results'] + result[j]['ins_results'] = ( + bbox_results, encode_mask_results(mask_results)) + + results.extend(result) + + if rank == 0: + batch_size = len(result) + for _ in range(batch_size * world_size): + prog_bar.update() + + # collect results from all ranks + if gpu_collect: + results = collect_results_gpu(results, len(dataset)) + else: + results = collect_results_cpu(results, len(dataset), tmpdir) + return results + + +def collect_results_cpu(result_part, size, tmpdir=None): + rank, world_size = get_dist_info() + # create a tmp dir if it is not specified + if tmpdir is None: + MAX_LEN = 512 + # 32 is whitespace + dir_tensor = torch.full((MAX_LEN, ), + 32, + dtype=torch.uint8, + device='cuda') + if rank == 0: + mmcv.mkdir_or_exist('.dist_test') + tmpdir = tempfile.mkdtemp(dir='.dist_test') + tmpdir = torch.tensor( + bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') + dir_tensor[:len(tmpdir)] = tmpdir + dist.broadcast(dir_tensor, 0) + tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() + else: + mmcv.mkdir_or_exist(tmpdir) + # dump the part result to the dir + mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) + dist.barrier() + # collect all parts + if rank != 0: + return None + else: + # load results of all parts from tmp dir + part_list = [] + for i in range(world_size): + part_file = osp.join(tmpdir, f'part_{i}.pkl') + part_list.append(mmcv.load(part_file)) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + # remove tmp dir + shutil.rmtree(tmpdir) + return ordered_results + + +def collect_results_gpu(result_part, size): + rank, world_size = get_dist_info() + # dump result part to tensor with pickle + part_tensor = torch.tensor( + bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') + # gather all result part tensor shape + shape_tensor = torch.tensor(part_tensor.shape, device='cuda') + shape_list = [shape_tensor.clone() for _ in range(world_size)] + dist.all_gather(shape_list, shape_tensor) + # padding result part tensor to max length + shape_max = torch.tensor(shape_list).max() + part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') + part_send[:shape_tensor[0]] = part_tensor + part_recv_list = [ + part_tensor.new_zeros(shape_max) for _ in range(world_size) + ] + # gather all result part + dist.all_gather(part_recv_list, part_send) + + if rank == 0: + part_list = [] + for recv, shape in zip(part_recv_list, shape_list): + part_list.append( + pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + return ordered_results diff --git a/downstream/mmdetection/mmdet/apis/train.py b/downstream/mmdetection/mmdet/apis/train.py new file mode 100644 index 0000000..ca76331 --- /dev/null +++ b/downstream/mmdetection/mmdet/apis/train.py @@ -0,0 +1,244 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import random + +import numpy as np +import torch +import torch.distributed as dist +from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner, + Fp16OptimizerHook, OptimizerHook, build_runner, + get_dist_info) + +from mmdet.core import DistEvalHook, EvalHook, build_optimizer +from mmdet.datasets import (build_dataloader, build_dataset, + replace_ImageToTensor) +from mmdet.utils import (build_ddp, build_dp, compat_cfg, + find_latest_checkpoint, get_root_logger) + + +def init_random_seed(seed=None, device='cuda'): + """Initialize random seed. + + If the seed is not set, the seed will be automatically randomized, + and then broadcast to all processes to prevent some potential bugs. + + Args: + seed (int, Optional): The seed. Default to None. + device (str): The device where the seed will be put on. + Default to 'cuda'. + + Returns: + int: Seed to be used. + """ + if seed is not None: + return seed + + # Make sure all ranks share the same random seed to prevent + # some potential bugs. Please refer to + # https://github.com/open-mmlab/mmdetection/issues/6339 + rank, world_size = get_dist_info() + seed = np.random.randint(2**31) + if world_size == 1: + return seed + + if rank == 0: + random_num = torch.tensor(seed, dtype=torch.int32, device=device) + else: + random_num = torch.tensor(0, dtype=torch.int32, device=device) + dist.broadcast(random_num, src=0) + return random_num.item() + + +def set_random_seed(seed, deterministic=False): + """Set random seed. + + Args: + seed (int): Seed to be used. + deterministic (bool): Whether to set the deterministic option for + CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` + to True and `torch.backends.cudnn.benchmark` to False. + Default: False. + """ + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + if deterministic: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def auto_scale_lr(cfg, distributed, logger): + """Automatically scaling LR according to GPU number and sample per GPU. + + Args: + cfg (config): Training config. + distributed (bool): Using distributed or not. + logger (logging.Logger): Logger. + """ + # Get flag from config + if ('auto_scale_lr' not in cfg) or \ + (not cfg.auto_scale_lr.get('enable', False)): + logger.info('Automatic scaling of learning rate (LR)' + ' has been disabled.') + return + + # Get base batch size from config + base_batch_size = cfg.auto_scale_lr.get('base_batch_size', None) + if base_batch_size is None: + return + + # Get gpu number + if distributed: + _, world_size = get_dist_info() + num_gpus = len(range(world_size)) + else: + num_gpus = len(cfg.gpu_ids) + + # calculate the batch size + samples_per_gpu = cfg.data.train_dataloader.samples_per_gpu + batch_size = num_gpus * samples_per_gpu + logger.info(f'Training with {num_gpus} GPU(s) with {samples_per_gpu} ' + f'samples per GPU. The total batch size is {batch_size}.') + + if batch_size != base_batch_size: + # scale LR with + # [linear scaling rule](https://arxiv.org/abs/1706.02677) + scaled_lr = (batch_size / base_batch_size) * cfg.optimizer.lr + logger.info('LR has been automatically scaled ' + f'from {cfg.optimizer.lr} to {scaled_lr}') + cfg.optimizer.lr = scaled_lr + else: + logger.info('The batch size match the ' + f'base batch size: {base_batch_size}, ' + f'will not scaling the LR ({cfg.optimizer.lr}).') + + +def train_detector(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + meta=None): + + cfg = compat_cfg(cfg) + logger = get_root_logger(log_level=cfg.log_level) + + # prepare data loaders + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + + runner_type = 'EpochBasedRunner' if 'runner' not in cfg else cfg.runner[ + 'type'] + + train_dataloader_default_args = dict( + samples_per_gpu=2, + workers_per_gpu=2, + # `num_gpus` will be ignored if distributed + num_gpus=len(cfg.gpu_ids), + dist=distributed, + seed=cfg.seed, + runner_type=runner_type, + persistent_workers=False) + + train_loader_cfg = { + **train_dataloader_default_args, + **cfg.data.get('train_dataloader', {}) + } + + data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] + + # put model on gpus + if distributed: + find_unused_parameters = cfg.get('find_unused_parameters', False) + # Sets the `find_unused_parameters` parameter in + # torch.nn.parallel.DistributedDataParallel + model = build_ddp( + model, + cfg.device, + device_ids=[int(os.environ['LOCAL_RANK'])], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters) + else: + model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids) + + # build optimizer + auto_scale_lr(cfg, distributed, logger) + optimizer = build_optimizer(model, cfg.optimizer) + + runner = build_runner( + cfg.runner, + default_args=dict( + model=model, + optimizer=optimizer, + work_dir=cfg.work_dir, + logger=logger, + meta=meta)) + + # an ugly workaround to make .log and .log.json filenames the same + runner.timestamp = timestamp + + # fp16 setting + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + optimizer_config = Fp16OptimizerHook( + **cfg.optimizer_config, **fp16_cfg, distributed=distributed) + elif distributed and 'type' not in cfg.optimizer_config: + optimizer_config = OptimizerHook(**cfg.optimizer_config) + else: + optimizer_config = cfg.optimizer_config + + # register hooks + runner.register_training_hooks( + cfg.lr_config, + optimizer_config, + cfg.checkpoint_config, + cfg.log_config, + cfg.get('momentum_config', None), + custom_hooks_config=cfg.get('custom_hooks', None)) + + if distributed: + if isinstance(runner, EpochBasedRunner): + runner.register_hook(DistSamplerSeedHook()) + + # register eval hooks + if validate: + val_dataloader_default_args = dict( + samples_per_gpu=1, + workers_per_gpu=2, + dist=distributed, + shuffle=False, + persistent_workers=False) + + val_dataloader_args = { + **val_dataloader_default_args, + **cfg.data.get('val_dataloader', {}) + } + # Support batch_size > 1 in validation + + if val_dataloader_args['samples_per_gpu'] > 1: + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.val.pipeline = replace_ImageToTensor( + cfg.data.val.pipeline) + val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) + + val_dataloader = build_dataloader(val_dataset, **val_dataloader_args) + eval_cfg = cfg.get('evaluation', {}) + eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' + eval_hook = DistEvalHook if distributed else EvalHook + # In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the + # priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'. + runner.register_hook( + eval_hook(val_dataloader, **eval_cfg), priority='LOW') + + resume_from = None + if cfg.resume_from is None and cfg.get('auto_resume'): + resume_from = find_latest_checkpoint(cfg.work_dir) + if resume_from is not None: + cfg.resume_from = resume_from + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow) diff --git a/downstream/mmdetection/mmdet/core/__init__.py b/downstream/mmdetection/mmdet/core/__init__.py new file mode 100644 index 0000000..2a62038 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .anchor import * # noqa: F401, F403 +from .bbox import * # noqa: F401, F403 +from .data_structures import * # noqa: F401, F403 +from .evaluation import * # noqa: F401, F403 +from .hook import * # noqa: F401, F403 +from .mask import * # noqa: F401, F403 +from .optimizers import * # noqa: F401, F403 +from .post_processing import * # noqa: F401, F403 +from .utils import * # noqa: F401, F403 diff --git a/downstream/mmdetection/mmdet/core/anchor/__init__.py b/downstream/mmdetection/mmdet/core/anchor/__init__.py new file mode 100644 index 0000000..fcc7e4a --- /dev/null +++ b/downstream/mmdetection/mmdet/core/anchor/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator, + YOLOAnchorGenerator) +from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS, + build_anchor_generator, build_prior_generator) +from .point_generator import MlvlPointGenerator, PointGenerator +from .utils import anchor_inside_flags, calc_region, images_to_levels + +__all__ = [ + 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags', + 'PointGenerator', 'images_to_levels', 'calc_region', + 'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator', + 'build_prior_generator', 'PRIOR_GENERATORS', 'MlvlPointGenerator' +] diff --git a/downstream/mmdetection/mmdet/core/anchor/anchor_generator.py b/downstream/mmdetection/mmdet/core/anchor/anchor_generator.py new file mode 100644 index 0000000..20886fb --- /dev/null +++ b/downstream/mmdetection/mmdet/core/anchor/anchor_generator.py @@ -0,0 +1,866 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import mmcv +import numpy as np +import torch +from torch.nn.modules.utils import _pair + +from .builder import PRIOR_GENERATORS + + +@PRIOR_GENERATORS.register_module() +class AnchorGenerator: + """Standard anchor generator for 2D anchor-based detectors. + + Args: + strides (list[int] | list[tuple[int, int]]): Strides of anchors + in multiple feature levels in order (w, h). + ratios (list[float]): The list of ratios between the height and width + of anchors in a single level. + scales (list[int] | None): Anchor scales for anchors in a single level. + It cannot be set at the same time if `octave_base_scale` and + `scales_per_octave` are set. + base_sizes (list[int] | None): The basic sizes + of anchors in multiple levels. + If None is given, strides will be used as base_sizes. + (If strides are non square, the shortest stride is taken.) + scale_major (bool): Whether to multiply scales first when generating + base anchors. If true, the anchors in the same row will have the + same scales. By default it is True in V2.0 + octave_base_scale (int): The base scale of octave. + scales_per_octave (int): Number of scales for each octave. + `octave_base_scale` and `scales_per_octave` are usually used in + retinanet and the `scales` should be None when they are set. + centers (list[tuple[float, float]] | None): The centers of the anchor + relative to the feature grid center in multiple feature levels. + By default it is set to be None and not used. If a list of tuple of + float is given, they will be used to shift the centers of anchors. + center_offset (float): The offset of center in proportion to anchors' + width and height. By default it is 0 in V2.0. + + Examples: + >>> from mmdet.core import AnchorGenerator + >>> self = AnchorGenerator([16], [1.], [1.], [9]) + >>> all_anchors = self.grid_priors([(2, 2)], device='cpu') + >>> print(all_anchors) + [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], + [11.5000, -4.5000, 20.5000, 4.5000], + [-4.5000, 11.5000, 4.5000, 20.5000], + [11.5000, 11.5000, 20.5000, 20.5000]])] + >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18]) + >>> all_anchors = self.grid_priors([(2, 2), (1, 1)], device='cpu') + >>> print(all_anchors) + [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], + [11.5000, -4.5000, 20.5000, 4.5000], + [-4.5000, 11.5000, 4.5000, 20.5000], + [11.5000, 11.5000, 20.5000, 20.5000]]), \ + tensor([[-9., -9., 9., 9.]])] + """ + + def __init__(self, + strides, + ratios, + scales=None, + base_sizes=None, + scale_major=True, + octave_base_scale=None, + scales_per_octave=None, + centers=None, + center_offset=0.): + # check center and center_offset + if center_offset != 0: + assert centers is None, 'center cannot be set when center_offset' \ + f'!=0, {centers} is given.' + if not (0 <= center_offset <= 1): + raise ValueError('center_offset should be in range [0, 1], ' + f'{center_offset} is given.') + if centers is not None: + assert len(centers) == len(strides), \ + 'The number of strides should be the same as centers, got ' \ + f'{strides} and {centers}' + + # calculate base sizes of anchors + self.strides = [_pair(stride) for stride in strides] + self.base_sizes = [min(stride) for stride in self.strides + ] if base_sizes is None else base_sizes + assert len(self.base_sizes) == len(self.strides), \ + 'The number of strides should be the same as base sizes, got ' \ + f'{self.strides} and {self.base_sizes}' + + # calculate scales of anchors + assert ((octave_base_scale is not None + and scales_per_octave is not None) ^ (scales is not None)), \ + 'scales and octave_base_scale with scales_per_octave cannot' \ + ' be set at the same time' + if scales is not None: + self.scales = torch.Tensor(scales) + elif octave_base_scale is not None and scales_per_octave is not None: + octave_scales = np.array( + [2**(i / scales_per_octave) for i in range(scales_per_octave)]) + scales = octave_scales * octave_base_scale + self.scales = torch.Tensor(scales) + else: + raise ValueError('Either scales or octave_base_scale with ' + 'scales_per_octave should be set') + + self.octave_base_scale = octave_base_scale + self.scales_per_octave = scales_per_octave + self.ratios = torch.Tensor(ratios) + self.scale_major = scale_major + self.centers = centers + self.center_offset = center_offset + self.base_anchors = self.gen_base_anchors() + + @property + def num_base_anchors(self): + """list[int]: total number of base anchors in a feature grid""" + return self.num_base_priors + + @property + def num_base_priors(self): + """list[int]: The number of priors (anchors) at a point + on the feature grid""" + return [base_anchors.size(0) for base_anchors in self.base_anchors] + + @property + def num_levels(self): + """int: number of feature levels that the generator will be applied""" + return len(self.strides) + + def gen_base_anchors(self): + """Generate base anchors. + + Returns: + list(torch.Tensor): Base anchors of a feature grid in multiple \ + feature levels. + """ + multi_level_base_anchors = [] + for i, base_size in enumerate(self.base_sizes): + center = None + if self.centers is not None: + center = self.centers[i] + multi_level_base_anchors.append( + self.gen_single_level_base_anchors( + base_size, + scales=self.scales, + ratios=self.ratios, + center=center)) + return multi_level_base_anchors + + def gen_single_level_base_anchors(self, + base_size, + scales, + ratios, + center=None): + """Generate base anchors of a single level. + + Args: + base_size (int | float): Basic size of an anchor. + scales (torch.Tensor): Scales of the anchor. + ratios (torch.Tensor): The ratio between between the height + and width of anchors in a single level. + center (tuple[float], optional): The center of the base anchor + related to a single feature grid. Defaults to None. + + Returns: + torch.Tensor: Anchors in a single-level feature maps. + """ + w = base_size + h = base_size + if center is None: + x_center = self.center_offset * w + y_center = self.center_offset * h + else: + x_center, y_center = center + + h_ratios = torch.sqrt(ratios) + w_ratios = 1 / h_ratios + if self.scale_major: + ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) + hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) + else: + ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) + hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) + + # use float anchor and the anchor's center is aligned with the + # pixel center + base_anchors = [ + x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws, + y_center + 0.5 * hs + ] + base_anchors = torch.stack(base_anchors, dim=-1) + + return base_anchors + + def _meshgrid(self, x, y, row_major=True): + """Generate mesh grid of x and y. + + Args: + x (torch.Tensor): Grids of x dimension. + y (torch.Tensor): Grids of y dimension. + row_major (bool, optional): Whether to return y grids first. + Defaults to True. + + Returns: + tuple[torch.Tensor]: The mesh grids of x and y. + """ + # use shape instead of len to keep tracing while exporting to onnx + xx = x.repeat(y.shape[0]) + yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1) + if row_major: + return xx, yy + else: + return yy, xx + + def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda'): + """Generate grid anchors in multiple feature levels. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes in + multiple feature levels. + dtype (:obj:`torch.dtype`): Dtype of priors. + Default: torch.float32. + device (str): The device where the anchors will be put on. + + Return: + list[torch.Tensor]: Anchors in multiple feature levels. \ + The sizes of each tensor should be [N, 4], where \ + N = width * height * num_base_anchors, width and height \ + are the sizes of the corresponding feature level, \ + num_base_anchors is the number of anchors for that level. + """ + assert self.num_levels == len(featmap_sizes) + multi_level_anchors = [] + for i in range(self.num_levels): + anchors = self.single_level_grid_priors( + featmap_sizes[i], level_idx=i, dtype=dtype, device=device) + multi_level_anchors.append(anchors) + return multi_level_anchors + + def single_level_grid_priors(self, + featmap_size, + level_idx, + dtype=torch.float32, + device='cuda'): + """Generate grid anchors of a single level. + + Note: + This function is usually called by method ``self.grid_priors``. + + Args: + featmap_size (tuple[int]): Size of the feature maps. + level_idx (int): The index of corresponding feature map level. + dtype (obj:`torch.dtype`): Date type of points.Defaults to + ``torch.float32``. + device (str, optional): The device the tensor will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: Anchors in the overall feature maps. + """ + + base_anchors = self.base_anchors[level_idx].to(device).to(dtype) + feat_h, feat_w = featmap_size + stride_w, stride_h = self.strides[level_idx] + # First create Range with the default dtype, than convert to + # target `dtype` for onnx exporting. + shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w + shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h + + shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) + shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) + # first feat_w elements correspond to the first row of shifts + # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get + # shifted anchors (K, A, 4), reshape to (K*A, 4) + + all_anchors = base_anchors[None, :, :] + shifts[:, None, :] + all_anchors = all_anchors.view(-1, 4) + # first A rows correspond to A anchors of (0, 0) in feature map, + # then (0, 1), (0, 2), ... + return all_anchors + + def sparse_priors(self, + prior_idxs, + featmap_size, + level_idx, + dtype=torch.float32, + device='cuda'): + """Generate sparse anchors according to the ``prior_idxs``. + + Args: + prior_idxs (Tensor): The index of corresponding anchors + in the feature map. + featmap_size (tuple[int]): feature map size arrange as (h, w). + level_idx (int): The level index of corresponding feature + map. + dtype (obj:`torch.dtype`): Date type of points.Defaults to + ``torch.float32``. + device (obj:`torch.device`): The device where the points is + located. + Returns: + Tensor: Anchor with shape (N, 4), N should be equal to + the length of ``prior_idxs``. + """ + + height, width = featmap_size + num_base_anchors = self.num_base_anchors[level_idx] + base_anchor_id = prior_idxs % num_base_anchors + x = (prior_idxs // + num_base_anchors) % width * self.strides[level_idx][0] + y = (prior_idxs // width // + num_base_anchors) % height * self.strides[level_idx][1] + priors = torch.stack([x, y, x, y], 1).to(dtype).to(device) + \ + self.base_anchors[level_idx][base_anchor_id, :].to(device) + + return priors + + def grid_anchors(self, featmap_sizes, device='cuda'): + """Generate grid anchors in multiple feature levels. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes in + multiple feature levels. + device (str): Device where the anchors will be put on. + + Return: + list[torch.Tensor]: Anchors in multiple feature levels. \ + The sizes of each tensor should be [N, 4], where \ + N = width * height * num_base_anchors, width and height \ + are the sizes of the corresponding feature level, \ + num_base_anchors is the number of anchors for that level. + """ + warnings.warn('``grid_anchors`` would be deprecated soon. ' + 'Please use ``grid_priors`` ') + + assert self.num_levels == len(featmap_sizes) + multi_level_anchors = [] + for i in range(self.num_levels): + anchors = self.single_level_grid_anchors( + self.base_anchors[i].to(device), + featmap_sizes[i], + self.strides[i], + device=device) + multi_level_anchors.append(anchors) + return multi_level_anchors + + def single_level_grid_anchors(self, + base_anchors, + featmap_size, + stride=(16, 16), + device='cuda'): + """Generate grid anchors of a single level. + + Note: + This function is usually called by method ``self.grid_anchors``. + + Args: + base_anchors (torch.Tensor): The base anchors of a feature grid. + featmap_size (tuple[int]): Size of the feature maps. + stride (tuple[int], optional): Stride of the feature map in order + (w, h). Defaults to (16, 16). + device (str, optional): Device the tensor will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: Anchors in the overall feature maps. + """ + + warnings.warn( + '``single_level_grid_anchors`` would be deprecated soon. ' + 'Please use ``single_level_grid_priors`` ') + + # keep featmap_size as Tensor instead of int, so that we + # can convert to ONNX correctly + feat_h, feat_w = featmap_size + shift_x = torch.arange(0, feat_w, device=device) * stride[0] + shift_y = torch.arange(0, feat_h, device=device) * stride[1] + + shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) + shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) + shifts = shifts.type_as(base_anchors) + # first feat_w elements correspond to the first row of shifts + # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get + # shifted anchors (K, A, 4), reshape to (K*A, 4) + + all_anchors = base_anchors[None, :, :] + shifts[:, None, :] + all_anchors = all_anchors.view(-1, 4) + # first A rows correspond to A anchors of (0, 0) in feature map, + # then (0, 1), (0, 2), ... + return all_anchors + + def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): + """Generate valid flags of anchors in multiple feature levels. + + Args: + featmap_sizes (list(tuple)): List of feature map sizes in + multiple feature levels. + pad_shape (tuple): The padded shape of the image. + device (str): Device where the anchors will be put on. + + Return: + list(torch.Tensor): Valid flags of anchors in multiple levels. + """ + assert self.num_levels == len(featmap_sizes) + multi_level_flags = [] + for i in range(self.num_levels): + anchor_stride = self.strides[i] + feat_h, feat_w = featmap_sizes[i] + h, w = pad_shape[:2] + valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h) + valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w) + flags = self.single_level_valid_flags((feat_h, feat_w), + (valid_feat_h, valid_feat_w), + self.num_base_anchors[i], + device=device) + multi_level_flags.append(flags) + return multi_level_flags + + def single_level_valid_flags(self, + featmap_size, + valid_size, + num_base_anchors, + device='cuda'): + """Generate the valid flags of anchor in a single feature map. + + Args: + featmap_size (tuple[int]): The size of feature maps, arrange + as (h, w). + valid_size (tuple[int]): The valid size of the feature maps. + num_base_anchors (int): The number of base anchors. + device (str, optional): Device where the flags will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: The valid flags of each anchor in a single level \ + feature map. + """ + feat_h, feat_w = featmap_size + valid_h, valid_w = valid_size + assert valid_h <= feat_h and valid_w <= feat_w + valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) + valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) + valid_x[:valid_w] = 1 + valid_y[:valid_h] = 1 + valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) + valid = valid_xx & valid_yy + valid = valid[:, None].expand(valid.size(0), + num_base_anchors).contiguous().view(-1) + return valid + + def __repr__(self): + """str: a string that describes the module""" + indent_str = ' ' + repr_str = self.__class__.__name__ + '(\n' + repr_str += f'{indent_str}strides={self.strides},\n' + repr_str += f'{indent_str}ratios={self.ratios},\n' + repr_str += f'{indent_str}scales={self.scales},\n' + repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' + repr_str += f'{indent_str}scale_major={self.scale_major},\n' + repr_str += f'{indent_str}octave_base_scale=' + repr_str += f'{self.octave_base_scale},\n' + repr_str += f'{indent_str}scales_per_octave=' + repr_str += f'{self.scales_per_octave},\n' + repr_str += f'{indent_str}num_levels={self.num_levels}\n' + repr_str += f'{indent_str}centers={self.centers},\n' + repr_str += f'{indent_str}center_offset={self.center_offset})' + return repr_str + + +@PRIOR_GENERATORS.register_module() +class SSDAnchorGenerator(AnchorGenerator): + """Anchor generator for SSD. + + Args: + strides (list[int] | list[tuple[int, int]]): Strides of anchors + in multiple feature levels. + ratios (list[float]): The list of ratios between the height and width + of anchors in a single level. + min_sizes (list[float]): The list of minimum anchor sizes on each + level. + max_sizes (list[float]): The list of maximum anchor sizes on each + level. + basesize_ratio_range (tuple(float)): Ratio range of anchors. Being + used when not setting min_sizes and max_sizes. + input_size (int): Size of feature map, 300 for SSD300, 512 for + SSD512. Being used when not setting min_sizes and max_sizes. + scale_major (bool): Whether to multiply scales first when generating + base anchors. If true, the anchors in the same row will have the + same scales. It is always set to be False in SSD. + """ + + def __init__(self, + strides, + ratios, + min_sizes=None, + max_sizes=None, + basesize_ratio_range=(0.15, 0.9), + input_size=300, + scale_major=True): + assert len(strides) == len(ratios) + assert not (min_sizes is None) ^ (max_sizes is None) + self.strides = [_pair(stride) for stride in strides] + self.centers = [(stride[0] / 2., stride[1] / 2.) + for stride in self.strides] + + if min_sizes is None and max_sizes is None: + # use hard code to generate SSD anchors + self.input_size = input_size + assert mmcv.is_tuple_of(basesize_ratio_range, float) + self.basesize_ratio_range = basesize_ratio_range + # calculate anchor ratios and sizes + min_ratio, max_ratio = basesize_ratio_range + min_ratio = int(min_ratio * 100) + max_ratio = int(max_ratio * 100) + step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2)) + min_sizes = [] + max_sizes = [] + for ratio in range(int(min_ratio), int(max_ratio) + 1, step): + min_sizes.append(int(self.input_size * ratio / 100)) + max_sizes.append(int(self.input_size * (ratio + step) / 100)) + if self.input_size == 300: + if basesize_ratio_range[0] == 0.15: # SSD300 COCO + min_sizes.insert(0, int(self.input_size * 7 / 100)) + max_sizes.insert(0, int(self.input_size * 15 / 100)) + elif basesize_ratio_range[0] == 0.2: # SSD300 VOC + min_sizes.insert(0, int(self.input_size * 10 / 100)) + max_sizes.insert(0, int(self.input_size * 20 / 100)) + else: + raise ValueError( + 'basesize_ratio_range[0] should be either 0.15' + 'or 0.2 when input_size is 300, got ' + f'{basesize_ratio_range[0]}.') + elif self.input_size == 512: + if basesize_ratio_range[0] == 0.1: # SSD512 COCO + min_sizes.insert(0, int(self.input_size * 4 / 100)) + max_sizes.insert(0, int(self.input_size * 10 / 100)) + elif basesize_ratio_range[0] == 0.15: # SSD512 VOC + min_sizes.insert(0, int(self.input_size * 7 / 100)) + max_sizes.insert(0, int(self.input_size * 15 / 100)) + else: + raise ValueError( + 'When not setting min_sizes and max_sizes,' + 'basesize_ratio_range[0] should be either 0.1' + 'or 0.15 when input_size is 512, got' + f' {basesize_ratio_range[0]}.') + else: + raise ValueError( + 'Only support 300 or 512 in SSDAnchorGenerator when ' + 'not setting min_sizes and max_sizes, ' + f'got {self.input_size}.') + + assert len(min_sizes) == len(max_sizes) == len(strides) + + anchor_ratios = [] + anchor_scales = [] + for k in range(len(self.strides)): + scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])] + anchor_ratio = [1.] + for r in ratios[k]: + anchor_ratio += [1 / r, r] # 4 or 6 ratio + anchor_ratios.append(torch.Tensor(anchor_ratio)) + anchor_scales.append(torch.Tensor(scales)) + + self.base_sizes = min_sizes + self.scales = anchor_scales + self.ratios = anchor_ratios + self.scale_major = scale_major + self.center_offset = 0 + self.base_anchors = self.gen_base_anchors() + + def gen_base_anchors(self): + """Generate base anchors. + + Returns: + list(torch.Tensor): Base anchors of a feature grid in multiple \ + feature levels. + """ + multi_level_base_anchors = [] + for i, base_size in enumerate(self.base_sizes): + base_anchors = self.gen_single_level_base_anchors( + base_size, + scales=self.scales[i], + ratios=self.ratios[i], + center=self.centers[i]) + indices = list(range(len(self.ratios[i]))) + indices.insert(1, len(indices)) + base_anchors = torch.index_select(base_anchors, 0, + torch.LongTensor(indices)) + multi_level_base_anchors.append(base_anchors) + return multi_level_base_anchors + + def __repr__(self): + """str: a string that describes the module""" + indent_str = ' ' + repr_str = self.__class__.__name__ + '(\n' + repr_str += f'{indent_str}strides={self.strides},\n' + repr_str += f'{indent_str}scales={self.scales},\n' + repr_str += f'{indent_str}scale_major={self.scale_major},\n' + repr_str += f'{indent_str}input_size={self.input_size},\n' + repr_str += f'{indent_str}scales={self.scales},\n' + repr_str += f'{indent_str}ratios={self.ratios},\n' + repr_str += f'{indent_str}num_levels={self.num_levels},\n' + repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' + repr_str += f'{indent_str}basesize_ratio_range=' + repr_str += f'{self.basesize_ratio_range})' + return repr_str + + +@PRIOR_GENERATORS.register_module() +class LegacyAnchorGenerator(AnchorGenerator): + """Legacy anchor generator used in MMDetection V1.x. + + Note: + Difference to the V2.0 anchor generator: + + 1. The center offset of V1.x anchors are set to be 0.5 rather than 0. + 2. The width/height are minused by 1 when calculating the anchors' \ + centers and corners to meet the V1.x coordinate system. + 3. The anchors' corners are quantized. + + Args: + strides (list[int] | list[tuple[int]]): Strides of anchors + in multiple feature levels. + ratios (list[float]): The list of ratios between the height and width + of anchors in a single level. + scales (list[int] | None): Anchor scales for anchors in a single level. + It cannot be set at the same time if `octave_base_scale` and + `scales_per_octave` are set. + base_sizes (list[int]): The basic sizes of anchors in multiple levels. + If None is given, strides will be used to generate base_sizes. + scale_major (bool): Whether to multiply scales first when generating + base anchors. If true, the anchors in the same row will have the + same scales. By default it is True in V2.0 + octave_base_scale (int): The base scale of octave. + scales_per_octave (int): Number of scales for each octave. + `octave_base_scale` and `scales_per_octave` are usually used in + retinanet and the `scales` should be None when they are set. + centers (list[tuple[float, float]] | None): The centers of the anchor + relative to the feature grid center in multiple feature levels. + By default it is set to be None and not used. It a list of float + is given, this list will be used to shift the centers of anchors. + center_offset (float): The offset of center in proportion to anchors' + width and height. By default it is 0.5 in V2.0 but it should be 0.5 + in v1.x models. + + Examples: + >>> from mmdet.core import LegacyAnchorGenerator + >>> self = LegacyAnchorGenerator( + >>> [16], [1.], [1.], [9], center_offset=0.5) + >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu') + >>> print(all_anchors) + [tensor([[ 0., 0., 8., 8.], + [16., 0., 24., 8.], + [ 0., 16., 8., 24.], + [16., 16., 24., 24.]])] + """ + + def gen_single_level_base_anchors(self, + base_size, + scales, + ratios, + center=None): + """Generate base anchors of a single level. + + Note: + The width/height of anchors are minused by 1 when calculating \ + the centers and corners to meet the V1.x coordinate system. + + Args: + base_size (int | float): Basic size of an anchor. + scales (torch.Tensor): Scales of the anchor. + ratios (torch.Tensor): The ratio between between the height. + and width of anchors in a single level. + center (tuple[float], optional): The center of the base anchor + related to a single feature grid. Defaults to None. + + Returns: + torch.Tensor: Anchors in a single-level feature map. + """ + w = base_size + h = base_size + if center is None: + x_center = self.center_offset * (w - 1) + y_center = self.center_offset * (h - 1) + else: + x_center, y_center = center + + h_ratios = torch.sqrt(ratios) + w_ratios = 1 / h_ratios + if self.scale_major: + ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) + hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) + else: + ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) + hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) + + # use float anchor and the anchor's center is aligned with the + # pixel center + base_anchors = [ + x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1), + x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1) + ] + base_anchors = torch.stack(base_anchors, dim=-1).round() + + return base_anchors + + +@PRIOR_GENERATORS.register_module() +class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator): + """Legacy anchor generator used in MMDetection V1.x. + + The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator` + can be found in `LegacyAnchorGenerator`. + """ + + def __init__(self, + strides, + ratios, + basesize_ratio_range, + input_size=300, + scale_major=True): + super(LegacySSDAnchorGenerator, self).__init__( + strides=strides, + ratios=ratios, + basesize_ratio_range=basesize_ratio_range, + input_size=input_size, + scale_major=scale_major) + self.centers = [((stride - 1) / 2., (stride - 1) / 2.) + for stride in strides] + self.base_anchors = self.gen_base_anchors() + + +@PRIOR_GENERATORS.register_module() +class YOLOAnchorGenerator(AnchorGenerator): + """Anchor generator for YOLO. + + Args: + strides (list[int] | list[tuple[int, int]]): Strides of anchors + in multiple feature levels. + base_sizes (list[list[tuple[int, int]]]): The basic sizes + of anchors in multiple levels. + """ + + def __init__(self, strides, base_sizes): + self.strides = [_pair(stride) for stride in strides] + self.centers = [(stride[0] / 2., stride[1] / 2.) + for stride in self.strides] + self.base_sizes = [] + num_anchor_per_level = len(base_sizes[0]) + for base_sizes_per_level in base_sizes: + assert num_anchor_per_level == len(base_sizes_per_level) + self.base_sizes.append( + [_pair(base_size) for base_size in base_sizes_per_level]) + self.base_anchors = self.gen_base_anchors() + + @property + def num_levels(self): + """int: number of feature levels that the generator will be applied""" + return len(self.base_sizes) + + def gen_base_anchors(self): + """Generate base anchors. + + Returns: + list(torch.Tensor): Base anchors of a feature grid in multiple \ + feature levels. + """ + multi_level_base_anchors = [] + for i, base_sizes_per_level in enumerate(self.base_sizes): + center = None + if self.centers is not None: + center = self.centers[i] + multi_level_base_anchors.append( + self.gen_single_level_base_anchors(base_sizes_per_level, + center)) + return multi_level_base_anchors + + def gen_single_level_base_anchors(self, base_sizes_per_level, center=None): + """Generate base anchors of a single level. + + Args: + base_sizes_per_level (list[tuple[int, int]]): Basic sizes of + anchors. + center (tuple[float], optional): The center of the base anchor + related to a single feature grid. Defaults to None. + + Returns: + torch.Tensor: Anchors in a single-level feature maps. + """ + x_center, y_center = center + base_anchors = [] + for base_size in base_sizes_per_level: + w, h = base_size + + # use float anchor and the anchor's center is aligned with the + # pixel center + base_anchor = torch.Tensor([ + x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, + y_center + 0.5 * h + ]) + base_anchors.append(base_anchor) + base_anchors = torch.stack(base_anchors, dim=0) + + return base_anchors + + def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'): + """Generate responsible anchor flags of grid cells in multiple scales. + + Args: + featmap_sizes (list(tuple)): List of feature map sizes in multiple + feature levels. + gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). + device (str): Device where the anchors will be put on. + + Return: + list(torch.Tensor): responsible flags of anchors in multiple level + """ + assert self.num_levels == len(featmap_sizes) + multi_level_responsible_flags = [] + for i in range(self.num_levels): + anchor_stride = self.strides[i] + flags = self.single_level_responsible_flags( + featmap_sizes[i], + gt_bboxes, + anchor_stride, + self.num_base_anchors[i], + device=device) + multi_level_responsible_flags.append(flags) + return multi_level_responsible_flags + + def single_level_responsible_flags(self, + featmap_size, + gt_bboxes, + stride, + num_base_anchors, + device='cuda'): + """Generate the responsible flags of anchor in a single feature map. + + Args: + featmap_size (tuple[int]): The size of feature maps. + gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). + stride (tuple(int)): stride of current level + num_base_anchors (int): The number of base anchors. + device (str, optional): Device where the flags will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: The valid flags of each anchor in a single level \ + feature map. + """ + feat_h, feat_w = featmap_size + gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device) + gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device) + gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long() + gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long() + + # row major indexing + gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x + + responsible_grid = torch.zeros( + feat_h * feat_w, dtype=torch.uint8, device=device) + responsible_grid[gt_bboxes_grid_idx] = 1 + + responsible_grid = responsible_grid[:, None].expand( + responsible_grid.size(0), num_base_anchors).contiguous().view(-1) + return responsible_grid diff --git a/downstream/mmdetection/mmdet/core/anchor/builder.py b/downstream/mmdetection/mmdet/core/anchor/builder.py new file mode 100644 index 0000000..ddb25ad --- /dev/null +++ b/downstream/mmdetection/mmdet/core/anchor/builder.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +from mmcv.utils import Registry, build_from_cfg + +PRIOR_GENERATORS = Registry('Generator for anchors and points') + +ANCHOR_GENERATORS = PRIOR_GENERATORS + + +def build_prior_generator(cfg, default_args=None): + return build_from_cfg(cfg, PRIOR_GENERATORS, default_args) + + +def build_anchor_generator(cfg, default_args=None): + warnings.warn( + '``build_anchor_generator`` would be deprecated soon, please use ' + '``build_prior_generator`` ') + return build_prior_generator(cfg, default_args=default_args) diff --git a/downstream/mmdetection/mmdet/core/anchor/point_generator.py b/downstream/mmdetection/mmdet/core/anchor/point_generator.py new file mode 100644 index 0000000..cc9c388 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/anchor/point_generator.py @@ -0,0 +1,263 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from torch.nn.modules.utils import _pair + +from .builder import PRIOR_GENERATORS + + +@PRIOR_GENERATORS.register_module() +class PointGenerator: + + def _meshgrid(self, x, y, row_major=True): + xx = x.repeat(len(y)) + yy = y.view(-1, 1).repeat(1, len(x)).view(-1) + if row_major: + return xx, yy + else: + return yy, xx + + def grid_points(self, featmap_size, stride=16, device='cuda'): + feat_h, feat_w = featmap_size + shift_x = torch.arange(0., feat_w, device=device) * stride + shift_y = torch.arange(0., feat_h, device=device) * stride + shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) + stride = shift_x.new_full((shift_xx.shape[0], ), stride) + shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1) + all_points = shifts.to(device) + return all_points + + def valid_flags(self, featmap_size, valid_size, device='cuda'): + feat_h, feat_w = featmap_size + valid_h, valid_w = valid_size + assert valid_h <= feat_h and valid_w <= feat_w + valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) + valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) + valid_x[:valid_w] = 1 + valid_y[:valid_h] = 1 + valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) + valid = valid_xx & valid_yy + return valid + + +@PRIOR_GENERATORS.register_module() +class MlvlPointGenerator: + """Standard points generator for multi-level (Mlvl) feature maps in 2D + points-based detectors. + + Args: + strides (list[int] | list[tuple[int, int]]): Strides of anchors + in multiple feature levels in order (w, h). + offset (float): The offset of points, the value is normalized with + corresponding stride. Defaults to 0.5. + """ + + def __init__(self, strides, offset=0.5): + self.strides = [_pair(stride) for stride in strides] + self.offset = offset + + @property + def num_levels(self): + """int: number of feature levels that the generator will be applied""" + return len(self.strides) + + @property + def num_base_priors(self): + """list[int]: The number of priors (points) at a point + on the feature grid""" + return [1 for _ in range(len(self.strides))] + + def _meshgrid(self, x, y, row_major=True): + yy, xx = torch.meshgrid(y, x) + if row_major: + # warning .flatten() would cause error in ONNX exporting + # have to use reshape here + return xx.reshape(-1), yy.reshape(-1) + + else: + return yy.reshape(-1), xx.reshape(-1) + + def grid_priors(self, + featmap_sizes, + dtype=torch.float32, + device='cuda', + with_stride=False): + """Generate grid points of multiple feature levels. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes in + multiple feature levels, each size arrange as + as (h, w). + dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32. + device (str): The device where the anchors will be put on. + with_stride (bool): Whether to concatenate the stride to + the last dimension of points. + + Return: + list[torch.Tensor]: Points of multiple feature levels. + The sizes of each tensor should be (N, 2) when with stride is + ``False``, where N = width * height, width and height + are the sizes of the corresponding feature level, + and the last dimension 2 represent (coord_x, coord_y), + otherwise the shape should be (N, 4), + and the last dimension 4 represent + (coord_x, coord_y, stride_w, stride_h). + """ + + assert self.num_levels == len(featmap_sizes) + multi_level_priors = [] + for i in range(self.num_levels): + priors = self.single_level_grid_priors( + featmap_sizes[i], + level_idx=i, + dtype=dtype, + device=device, + with_stride=with_stride) + multi_level_priors.append(priors) + return multi_level_priors + + def single_level_grid_priors(self, + featmap_size, + level_idx, + dtype=torch.float32, + device='cuda', + with_stride=False): + """Generate grid Points of a single level. + + Note: + This function is usually called by method ``self.grid_priors``. + + Args: + featmap_size (tuple[int]): Size of the feature maps, arrange as + (h, w). + level_idx (int): The index of corresponding feature map level. + dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32. + device (str, optional): The device the tensor will be put on. + Defaults to 'cuda'. + with_stride (bool): Concatenate the stride to the last dimension + of points. + + Return: + Tensor: Points of single feature levels. + The shape of tensor should be (N, 2) when with stride is + ``False``, where N = width * height, width and height + are the sizes of the corresponding feature level, + and the last dimension 2 represent (coord_x, coord_y), + otherwise the shape should be (N, 4), + and the last dimension 4 represent + (coord_x, coord_y, stride_w, stride_h). + """ + feat_h, feat_w = featmap_size + stride_w, stride_h = self.strides[level_idx] + shift_x = (torch.arange(0, feat_w, device=device) + + self.offset) * stride_w + # keep featmap_size as Tensor instead of int, so that we + # can convert to ONNX correctly + shift_x = shift_x.to(dtype) + + shift_y = (torch.arange(0, feat_h, device=device) + + self.offset) * stride_h + # keep featmap_size as Tensor instead of int, so that we + # can convert to ONNX correctly + shift_y = shift_y.to(dtype) + shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) + if not with_stride: + shifts = torch.stack([shift_xx, shift_yy], dim=-1) + else: + # use `shape[0]` instead of `len(shift_xx)` for ONNX export + stride_w = shift_xx.new_full((shift_xx.shape[0], ), + stride_w).to(dtype) + stride_h = shift_xx.new_full((shift_yy.shape[0], ), + stride_h).to(dtype) + shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], + dim=-1) + all_points = shifts.to(device) + return all_points + + def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): + """Generate valid flags of points of multiple feature levels. + + Args: + featmap_sizes (list(tuple)): List of feature map sizes in + multiple feature levels, each size arrange as + as (h, w). + pad_shape (tuple(int)): The padded shape of the image, + arrange as (h, w). + device (str): The device where the anchors will be put on. + + Return: + list(torch.Tensor): Valid flags of points of multiple levels. + """ + assert self.num_levels == len(featmap_sizes) + multi_level_flags = [] + for i in range(self.num_levels): + point_stride = self.strides[i] + feat_h, feat_w = featmap_sizes[i] + h, w = pad_shape[:2] + valid_feat_h = min(int(np.ceil(h / point_stride[1])), feat_h) + valid_feat_w = min(int(np.ceil(w / point_stride[0])), feat_w) + flags = self.single_level_valid_flags((feat_h, feat_w), + (valid_feat_h, valid_feat_w), + device=device) + multi_level_flags.append(flags) + return multi_level_flags + + def single_level_valid_flags(self, + featmap_size, + valid_size, + device='cuda'): + """Generate the valid flags of points of a single feature map. + + Args: + featmap_size (tuple[int]): The size of feature maps, arrange as + as (h, w). + valid_size (tuple[int]): The valid size of the feature maps. + The size arrange as as (h, w). + device (str, optional): The device where the flags will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: The valid flags of each points in a single level \ + feature map. + """ + feat_h, feat_w = featmap_size + valid_h, valid_w = valid_size + assert valid_h <= feat_h and valid_w <= feat_w + valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) + valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) + valid_x[:valid_w] = 1 + valid_y[:valid_h] = 1 + valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) + valid = valid_xx & valid_yy + return valid + + def sparse_priors(self, + prior_idxs, + featmap_size, + level_idx, + dtype=torch.float32, + device='cuda'): + """Generate sparse points according to the ``prior_idxs``. + + Args: + prior_idxs (Tensor): The index of corresponding anchors + in the feature map. + featmap_size (tuple[int]): feature map size arrange as (w, h). + level_idx (int): The level index of corresponding feature + map. + dtype (obj:`torch.dtype`): Date type of points. Defaults to + ``torch.float32``. + device (obj:`torch.device`): The device where the points is + located. + Returns: + Tensor: Anchor with shape (N, 2), N should be equal to + the length of ``prior_idxs``. And last dimension + 2 represent (coord_x, coord_y). + """ + height, width = featmap_size + x = (prior_idxs % width + self.offset) * self.strides[level_idx][0] + y = ((prior_idxs // width) % height + + self.offset) * self.strides[level_idx][1] + prioris = torch.stack([x, y], 1).to(dtype) + prioris = prioris.to(device) + return prioris diff --git a/downstream/mmdetection/mmdet/core/anchor/utils.py b/downstream/mmdetection/mmdet/core/anchor/utils.py new file mode 100644 index 0000000..c2f2024 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/anchor/utils.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def images_to_levels(target, num_levels): + """Convert targets by image to targets by feature level. + + [target_img0, target_img1] -> [target_level0, target_level1, ...] + """ + target = torch.stack(target, 0) + level_targets = [] + start = 0 + for n in num_levels: + end = start + n + # level_targets.append(target[:, start:end].squeeze(0)) + level_targets.append(target[:, start:end]) + start = end + return level_targets + + +def anchor_inside_flags(flat_anchors, + valid_flags, + img_shape, + allowed_border=0): + """Check whether the anchors are inside the border. + + Args: + flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4). + valid_flags (torch.Tensor): An existing valid flags of anchors. + img_shape (tuple(int)): Shape of current image. + allowed_border (int, optional): The border to allow the valid anchor. + Defaults to 0. + + Returns: + torch.Tensor: Flags indicating whether the anchors are inside a \ + valid range. + """ + img_h, img_w = img_shape[:2] + if allowed_border >= 0: + inside_flags = valid_flags & \ + (flat_anchors[:, 0] >= -allowed_border) & \ + (flat_anchors[:, 1] >= -allowed_border) & \ + (flat_anchors[:, 2] < img_w + allowed_border) & \ + (flat_anchors[:, 3] < img_h + allowed_border) + else: + inside_flags = valid_flags + return inside_flags + + +def calc_region(bbox, ratio, featmap_size=None): + """Calculate a proportional bbox region. + + The bbox center are fixed and the new h' and w' is h * ratio and w * ratio. + + Args: + bbox (Tensor): Bboxes to calculate regions, shape (n, 4). + ratio (float): Ratio of the output region. + featmap_size (tuple): Feature map size used for clipping the boundary. + + Returns: + tuple: x1, y1, x2, y2 + """ + x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long() + y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long() + x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long() + y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long() + if featmap_size is not None: + x1 = x1.clamp(min=0, max=featmap_size[1]) + y1 = y1.clamp(min=0, max=featmap_size[0]) + x2 = x2.clamp(min=0, max=featmap_size[1]) + y2 = y2.clamp(min=0, max=featmap_size[0]) + return (x1, y1, x2, y2) diff --git a/downstream/mmdetection/mmdet/core/bbox/__init__.py b/downstream/mmdetection/mmdet/core/bbox/__init__.py new file mode 100644 index 0000000..371eba1 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner, + MaxIoUAssigner, RegionAssigner) +from .builder import build_assigner, build_bbox_coder, build_sampler +from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder, + PseudoBBoxCoder, TBLRBBoxCoder) +from .iou_calculators import BboxOverlaps2D, bbox_overlaps +from .samplers import (BaseSampler, CombinedSampler, + InstanceBalancedPosSampler, IoUBalancedNegSampler, + OHEMSampler, PseudoSampler, RandomSampler, + SamplingResult, ScoreHLRSampler) +from .transforms import (bbox2distance, bbox2result, bbox2roi, + bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping, + bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh, + distance2bbox, find_inside_bboxes, roi2bbox) + +__all__ = [ + 'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner', + 'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler', + 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', + 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner', + 'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back', + 'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance', + 'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder', + 'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder', + 'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy', + 'bbox_xyxy_to_cxcywh', 'RegionAssigner', 'find_inside_bboxes' +] diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/__init__.py b/downstream/mmdetection/mmdet/core/bbox/assigners/__init__.py new file mode 100644 index 0000000..5eaf7fa --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .approx_max_iou_assigner import ApproxMaxIoUAssigner +from .assign_result import AssignResult +from .atss_assigner import ATSSAssigner +from .base_assigner import BaseAssigner +from .center_region_assigner import CenterRegionAssigner +from .grid_assigner import GridAssigner +from .hungarian_assigner import HungarianAssigner +from .mask_hungarian_assigner import MaskHungarianAssigner +from .max_iou_assigner import MaxIoUAssigner +from .point_assigner import PointAssigner +from .region_assigner import RegionAssigner +from .sim_ota_assigner import SimOTAAssigner +from .task_aligned_assigner import TaskAlignedAssigner +from .uniform_assigner import UniformAssigner + +__all__ = [ + 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult', + 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner', + 'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner', + 'TaskAlignedAssigner', 'MaskHungarianAssigner' +] diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/approx_max_iou_assigner.py b/downstream/mmdetection/mmdet/core/bbox/assigners/approx_max_iou_assigner.py new file mode 100644 index 0000000..304d09c --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/approx_max_iou_assigner.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..builder import BBOX_ASSIGNERS +from ..iou_calculators import build_iou_calculator +from .max_iou_assigner import MaxIoUAssigner + + +@BBOX_ASSIGNERS.register_module() +class ApproxMaxIoUAssigner(MaxIoUAssigner): + """Assign a corresponding gt bbox or background to each bbox. + + Each proposals will be assigned with an integer indicating the ground truth + index. (semi-positive index: gt label (0-based), -1: background) + + - -1: negative sample, no assigned gt + - semi-positive integer: positive sample, index (0-based) of assigned gt + + Args: + pos_iou_thr (float): IoU threshold for positive bboxes. + neg_iou_thr (float or tuple): IoU threshold for negative bboxes. + min_pos_iou (float): Minimum iou for a bbox to be considered as a + positive bbox. Positive samples can have smaller IoU than + pos_iou_thr due to the 4th step (assign max IoU sample to each gt). + gt_max_assign_all (bool): Whether to assign all bboxes with the same + highest overlap with some gt to that gt. + ignore_iof_thr (float): IoF threshold for ignoring bboxes (if + `gt_bboxes_ignore` is specified). Negative values mean not + ignoring any bboxes. + ignore_wrt_candidates (bool): Whether to compute the iof between + `bboxes` and `gt_bboxes_ignore`, or the contrary. + match_low_quality (bool): Whether to allow quality matches. This is + usually allowed for RPN and single stage detectors, but not allowed + in the second stage. + gpu_assign_thr (int): The upper bound of the number of GT for GPU + assign. When the number of gt is above this threshold, will assign + on CPU device. Negative values mean not assign on CPU. + """ + + def __init__(self, + pos_iou_thr, + neg_iou_thr, + min_pos_iou=.0, + gt_max_assign_all=True, + ignore_iof_thr=-1, + ignore_wrt_candidates=True, + match_low_quality=True, + gpu_assign_thr=-1, + iou_calculator=dict(type='BboxOverlaps2D')): + self.pos_iou_thr = pos_iou_thr + self.neg_iou_thr = neg_iou_thr + self.min_pos_iou = min_pos_iou + self.gt_max_assign_all = gt_max_assign_all + self.ignore_iof_thr = ignore_iof_thr + self.ignore_wrt_candidates = ignore_wrt_candidates + self.gpu_assign_thr = gpu_assign_thr + self.match_low_quality = match_low_quality + self.iou_calculator = build_iou_calculator(iou_calculator) + + def assign(self, + approxs, + squares, + approxs_per_octave, + gt_bboxes, + gt_bboxes_ignore=None, + gt_labels=None): + """Assign gt to approxs. + + This method assign a gt bbox to each group of approxs (bboxes), + each group of approxs is represent by a base approx (bbox) and + will be assigned with -1, or a semi-positive number. + background_label (-1) means negative sample, + semi-positive number is the index (0-based) of assigned gt. + The assignment is done in following steps, the order matters. + + 1. assign every bbox to background_label (-1) + 2. use the max IoU of each group of approxs to assign + 2. assign proposals whose iou with all gts < neg_iou_thr to background + 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, + assign it to that bbox + 4. for each gt bbox, assign its nearest proposals (may be more than + one) to itself + + Args: + approxs (Tensor): Bounding boxes to be assigned, + shape(approxs_per_octave*n, 4). + squares (Tensor): Base Bounding boxes to be assigned, + shape(n, 4). + approxs_per_octave (int): number of approxs per octave + gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`, e.g., crowd boxes in COCO. + gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). + + Returns: + :obj:`AssignResult`: The assign result. + """ + num_squares = squares.size(0) + num_gts = gt_bboxes.size(0) + + if num_squares == 0 or num_gts == 0: + # No predictions and/or truth, return empty assignment + overlaps = approxs.new(num_gts, num_squares) + assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) + return assign_result + + # re-organize anchors by approxs_per_octave x num_squares + approxs = torch.transpose( + approxs.view(num_squares, approxs_per_octave, 4), 0, + 1).contiguous().view(-1, 4) + assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( + num_gts > self.gpu_assign_thr) else False + # compute overlap and assign gt on CPU when number of GT is large + if assign_on_cpu: + device = approxs.device + approxs = approxs.cpu() + gt_bboxes = gt_bboxes.cpu() + if gt_bboxes_ignore is not None: + gt_bboxes_ignore = gt_bboxes_ignore.cpu() + if gt_labels is not None: + gt_labels = gt_labels.cpu() + all_overlaps = self.iou_calculator(approxs, gt_bboxes) + + overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares, + num_gts).max(dim=0) + overlaps = torch.transpose(overlaps, 0, 1) + + if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None + and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0): + if self.ignore_wrt_candidates: + ignore_overlaps = self.iou_calculator( + squares, gt_bboxes_ignore, mode='iof') + ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) + else: + ignore_overlaps = self.iou_calculator( + gt_bboxes_ignore, squares, mode='iof') + ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) + overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 + + assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) + if assign_on_cpu: + assign_result.gt_inds = assign_result.gt_inds.to(device) + assign_result.max_overlaps = assign_result.max_overlaps.to(device) + if assign_result.labels is not None: + assign_result.labels = assign_result.labels.to(device) + return assign_result diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/assign_result.py b/downstream/mmdetection/mmdet/core/bbox/assigners/assign_result.py new file mode 100644 index 0000000..488010b --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/assign_result.py @@ -0,0 +1,206 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet.utils import util_mixins + + +class AssignResult(util_mixins.NiceRepr): + """Stores assignments between predicted and truth boxes. + + Attributes: + num_gts (int): the number of truth boxes considered when computing this + assignment + + gt_inds (LongTensor): for each predicted box indicates the 1-based + index of the assigned truth box. 0 means unassigned and -1 means + ignore. + + max_overlaps (FloatTensor): the iou between the predicted box and its + assigned truth box. + + labels (None | LongTensor): If specified, for each predicted box + indicates the category label of the assigned truth box. + + Example: + >>> # An assign result between 4 predicted boxes and 9 true boxes + >>> # where only two boxes were assigned. + >>> num_gts = 9 + >>> max_overlaps = torch.LongTensor([0, .5, .9, 0]) + >>> gt_inds = torch.LongTensor([-1, 1, 2, 0]) + >>> labels = torch.LongTensor([0, 3, 4, 0]) + >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels) + >>> print(str(self)) # xdoctest: +IGNORE_WANT + + >>> # Force addition of gt labels (when adding gt as proposals) + >>> new_labels = torch.LongTensor([3, 4, 5]) + >>> self.add_gt_(new_labels) + >>> print(str(self)) # xdoctest: +IGNORE_WANT + + """ + + def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): + self.num_gts = num_gts + self.gt_inds = gt_inds + self.max_overlaps = max_overlaps + self.labels = labels + # Interface for possible user-defined properties + self._extra_properties = {} + + @property + def num_preds(self): + """int: the number of predictions in this assignment""" + return len(self.gt_inds) + + def set_extra_property(self, key, value): + """Set user-defined new property.""" + assert key not in self.info + self._extra_properties[key] = value + + def get_extra_property(self, key): + """Get user-defined property.""" + return self._extra_properties.get(key, None) + + @property + def info(self): + """dict: a dictionary of info about the object""" + basic_info = { + 'num_gts': self.num_gts, + 'num_preds': self.num_preds, + 'gt_inds': self.gt_inds, + 'max_overlaps': self.max_overlaps, + 'labels': self.labels, + } + basic_info.update(self._extra_properties) + return basic_info + + def __nice__(self): + """str: a "nice" summary string describing this assign result""" + parts = [] + parts.append(f'num_gts={self.num_gts!r}') + if self.gt_inds is None: + parts.append(f'gt_inds={self.gt_inds!r}') + else: + parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}') + if self.max_overlaps is None: + parts.append(f'max_overlaps={self.max_overlaps!r}') + else: + parts.append('max_overlaps.shape=' + f'{tuple(self.max_overlaps.shape)!r}') + if self.labels is None: + parts.append(f'labels={self.labels!r}') + else: + parts.append(f'labels.shape={tuple(self.labels.shape)!r}') + return ', '.join(parts) + + @classmethod + def random(cls, **kwargs): + """Create random AssignResult for tests or debugging. + + Args: + num_preds: number of predicted boxes + num_gts: number of true boxes + p_ignore (float): probability of a predicted box assigned to an + ignored truth + p_assigned (float): probability of a predicted box not being + assigned + p_use_label (float | bool): with labels or not + rng (None | int | numpy.random.RandomState): seed or state + + Returns: + :obj:`AssignResult`: Randomly generated assign results. + + Example: + >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA + >>> self = AssignResult.random() + >>> print(self.info) + """ + from mmdet.core.bbox import demodata + rng = demodata.ensure_rng(kwargs.get('rng', None)) + + num_gts = kwargs.get('num_gts', None) + num_preds = kwargs.get('num_preds', None) + p_ignore = kwargs.get('p_ignore', 0.3) + p_assigned = kwargs.get('p_assigned', 0.7) + p_use_label = kwargs.get('p_use_label', 0.5) + num_classes = kwargs.get('p_use_label', 3) + + if num_gts is None: + num_gts = rng.randint(0, 8) + if num_preds is None: + num_preds = rng.randint(0, 16) + + if num_gts == 0: + max_overlaps = torch.zeros(num_preds, dtype=torch.float32) + gt_inds = torch.zeros(num_preds, dtype=torch.int64) + if p_use_label is True or p_use_label < rng.rand(): + labels = torch.zeros(num_preds, dtype=torch.int64) + else: + labels = None + else: + import numpy as np + + # Create an overlap for each predicted box + max_overlaps = torch.from_numpy(rng.rand(num_preds)) + + # Construct gt_inds for each predicted box + is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned) + # maximum number of assignments constraints + n_assigned = min(num_preds, min(num_gts, is_assigned.sum())) + + assigned_idxs = np.where(is_assigned)[0] + rng.shuffle(assigned_idxs) + assigned_idxs = assigned_idxs[0:n_assigned] + assigned_idxs.sort() + + is_assigned[:] = 0 + is_assigned[assigned_idxs] = True + + is_ignore = torch.from_numpy( + rng.rand(num_preds) < p_ignore) & is_assigned + + gt_inds = torch.zeros(num_preds, dtype=torch.int64) + + true_idxs = np.arange(num_gts) + rng.shuffle(true_idxs) + true_idxs = torch.from_numpy(true_idxs) + gt_inds[is_assigned] = true_idxs[:n_assigned].long() + + gt_inds = torch.from_numpy( + rng.randint(1, num_gts + 1, size=num_preds)) + gt_inds[is_ignore] = -1 + gt_inds[~is_assigned] = 0 + max_overlaps[~is_assigned] = 0 + + if p_use_label is True or p_use_label < rng.rand(): + if num_classes == 0: + labels = torch.zeros(num_preds, dtype=torch.int64) + else: + labels = torch.from_numpy( + # remind that we set FG labels to [0, num_class-1] + # since mmdet v2.0 + # BG cat_id: num_class + rng.randint(0, num_classes, size=num_preds)) + labels[~is_assigned] = 0 + else: + labels = None + + self = cls(num_gts, gt_inds, max_overlaps, labels) + return self + + def add_gt_(self, gt_labels): + """Add ground truth as assigned results. + + Args: + gt_labels (torch.Tensor): Labels of gt boxes + """ + self_inds = torch.arange( + 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) + self.gt_inds = torch.cat([self_inds, self.gt_inds]) + + self.max_overlaps = torch.cat( + [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]) + + if self.labels is not None: + self.labels = torch.cat([gt_labels, self.labels]) diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/atss_assigner.py b/downstream/mmdetection/mmdet/core/bbox/assigners/atss_assigner.py new file mode 100644 index 0000000..79c8281 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/atss_assigner.py @@ -0,0 +1,234 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch + +from ..builder import BBOX_ASSIGNERS +from ..iou_calculators import build_iou_calculator +from .assign_result import AssignResult +from .base_assigner import BaseAssigner + + +@BBOX_ASSIGNERS.register_module() +class ATSSAssigner(BaseAssigner): + """Assign a corresponding gt bbox or background to each bbox. + + Each proposals will be assigned with `0` or a positive integer + indicating the ground truth index. + + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + If ``alpha`` is not None, it means that the dynamic cost + ATSSAssigner is adopted, which is currently only used in the DDOD. + + Args: + topk (float): number of bbox selected in each level + """ + + def __init__(self, + topk, + alpha=None, + iou_calculator=dict(type='BboxOverlaps2D'), + ignore_iof_thr=-1): + self.topk = topk + self.alpha = alpha + self.iou_calculator = build_iou_calculator(iou_calculator) + self.ignore_iof_thr = ignore_iof_thr + + """Assign a corresponding gt bbox or background to each bbox. + + Args: + topk (int): number of bbox selected in each level. + alpha (float): param of cost rate for each proposal only in DDOD. + Default None. + iou_calculator (dict): builder of IoU calculator. + Default dict(type='BboxOverlaps2D'). + ignore_iof_thr (int): whether ignore max overlaps or not. + Default -1 (1 or -1). + """ + + # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py + def assign(self, + bboxes, + num_level_bboxes, + gt_bboxes, + gt_bboxes_ignore=None, + gt_labels=None, + cls_scores=None, + bbox_preds=None): + """Assign gt to bboxes. + + The assignment is done in following steps + + 1. compute iou between all bbox (bbox of all pyramid levels) and gt + 2. compute center distance between all bbox and gt + 3. on each pyramid level, for each gt, select k bbox whose center + are closest to the gt center, so we total select k*l bbox as + candidates for each gt + 4. get corresponding iou for the these candidates, and compute the + mean and std, set mean + std as the iou threshold + 5. select these candidates whose iou are greater than or equal to + the threshold as positive + 6. limit the positive sample's center in gt + + If ``alpha`` is not None, and ``cls_scores`` and `bbox_preds` + are not None, the overlaps calculation in the first step + will also include dynamic cost, which is currently only used in + the DDOD. + + Args: + bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). + num_level_bboxes (List): num of bboxes in each level + gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`, e.g., crowd boxes in COCO. Default None. + gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). + cls_scores (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * num_classes. Default None. + bbox_preds (list[Tensor]): Box energies / deltas for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * 4. Default None. + + Returns: + :obj:`AssignResult`: The assign result. + """ + INF = 100000000 + bboxes = bboxes[:, :4] + num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0) + + message = 'Invalid alpha parameter because cls_scores or ' \ + 'bbox_preds are None. If you want to use the ' \ + 'cost-based ATSSAssigner, please set cls_scores, ' \ + 'bbox_preds and self.alpha at the same time. ' + + if self.alpha is None: + # ATSSAssigner + overlaps = self.iou_calculator(bboxes, gt_bboxes) + if cls_scores is not None or bbox_preds is not None: + warnings.warn(message) + else: + # Dynamic cost ATSSAssigner in DDOD + assert cls_scores is not None and bbox_preds is not None, message + + # compute cls cost for bbox and GT + cls_cost = torch.sigmoid(cls_scores[:, gt_labels]) + + # compute iou between all bbox and gt + overlaps = self.iou_calculator(bbox_preds, gt_bboxes) + + # make sure that we are in element-wise multiplication + assert cls_cost.shape == overlaps.shape + + # overlaps is actually a cost matrix + overlaps = cls_cost**(1 - self.alpha) * overlaps**self.alpha + + # assign 0 by default + assigned_gt_inds = overlaps.new_full((num_bboxes, ), + 0, + dtype=torch.long) + + if num_gt == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + max_overlaps = overlaps.new_zeros((num_bboxes, )) + if num_gt == 0: + # No truth, assign everything to background + assigned_gt_inds[:] = 0 + if gt_labels is None: + assigned_labels = None + else: + assigned_labels = overlaps.new_full((num_bboxes, ), + -1, + dtype=torch.long) + return AssignResult( + num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) + + # compute center distance between all bbox and gt + gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 + gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 + gt_points = torch.stack((gt_cx, gt_cy), dim=1) + + bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0 + bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0 + bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1) + + distances = (bboxes_points[:, None, :] - + gt_points[None, :, :]).pow(2).sum(-1).sqrt() + + if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None + and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): + ignore_overlaps = self.iou_calculator( + bboxes, gt_bboxes_ignore, mode='iof') + ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) + ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr + distances[ignore_idxs, :] = INF + assigned_gt_inds[ignore_idxs] = -1 + + # Selecting candidates based on the center distance + candidate_idxs = [] + start_idx = 0 + for level, bboxes_per_level in enumerate(num_level_bboxes): + # on each pyramid level, for each gt, + # select k bbox whose center are closest to the gt center + end_idx = start_idx + bboxes_per_level + distances_per_level = distances[start_idx:end_idx, :] + selectable_k = min(self.topk, bboxes_per_level) + + _, topk_idxs_per_level = distances_per_level.topk( + selectable_k, dim=0, largest=False) + candidate_idxs.append(topk_idxs_per_level + start_idx) + start_idx = end_idx + candidate_idxs = torch.cat(candidate_idxs, dim=0) + + # get corresponding iou for the these candidates, and compute the + # mean and std, set mean + std as the iou threshold + candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)] + overlaps_mean_per_gt = candidate_overlaps.mean(0) + overlaps_std_per_gt = candidate_overlaps.std(0) + overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt + + is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :] + + # limit the positive sample's center in gt + for gt_idx in range(num_gt): + candidate_idxs[:, gt_idx] += gt_idx * num_bboxes + ep_bboxes_cx = bboxes_cx.view(1, -1).expand( + num_gt, num_bboxes).contiguous().view(-1) + ep_bboxes_cy = bboxes_cy.view(1, -1).expand( + num_gt, num_bboxes).contiguous().view(-1) + candidate_idxs = candidate_idxs.view(-1) + + # calculate the left, top, right, bottom distance between positive + # bbox center and gt side + l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] + t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] + r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt) + b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt) + is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 + + is_pos = is_pos & is_in_gts + + # if an anchor box is assigned to multiple gts, + # the one with the highest IoU will be selected. + overlaps_inf = torch.full_like(overlaps, + -INF).t().contiguous().view(-1) + index = candidate_idxs.view(-1)[is_pos.view(-1)] + overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] + overlaps_inf = overlaps_inf.view(num_gt, -1).t() + + max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) + assigned_gt_inds[ + max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 + + if gt_labels is not None: + assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) + pos_inds = torch.nonzero( + assigned_gt_inds > 0, as_tuple=False).squeeze() + if pos_inds.numel() > 0: + assigned_labels[pos_inds] = gt_labels[ + assigned_gt_inds[pos_inds] - 1] + else: + assigned_labels = None + return AssignResult( + num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/base_assigner.py b/downstream/mmdetection/mmdet/core/bbox/assigners/base_assigner.py new file mode 100644 index 0000000..3c2d597 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/base_assigner.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + + +class BaseAssigner(metaclass=ABCMeta): + """Base assigner that assigns boxes to ground truth boxes.""" + + @abstractmethod + def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): + """Assign boxes to either a ground truth boxes or a negative boxes.""" diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/center_region_assigner.py b/downstream/mmdetection/mmdet/core/bbox/assigners/center_region_assigner.py new file mode 100644 index 0000000..86e7859 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/center_region_assigner.py @@ -0,0 +1,336 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..builder import BBOX_ASSIGNERS +from ..iou_calculators import build_iou_calculator +from .assign_result import AssignResult +from .base_assigner import BaseAssigner + + +def scale_boxes(bboxes, scale): + """Expand an array of boxes by a given scale. + + Args: + bboxes (Tensor): Shape (m, 4) + scale (float): The scale factor of bboxes + + Returns: + (Tensor): Shape (m, 4). Scaled bboxes + """ + assert bboxes.size(1) == 4 + w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5 + h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5 + x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5 + y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5 + + w_half *= scale + h_half *= scale + + boxes_scaled = torch.zeros_like(bboxes) + boxes_scaled[:, 0] = x_c - w_half + boxes_scaled[:, 2] = x_c + w_half + boxes_scaled[:, 1] = y_c - h_half + boxes_scaled[:, 3] = y_c + h_half + return boxes_scaled + + +def is_located_in(points, bboxes): + """Are points located in bboxes. + + Args: + points (Tensor): Points, shape: (m, 2). + bboxes (Tensor): Bounding boxes, shape: (n, 4). + + Return: + Tensor: Flags indicating if points are located in bboxes, shape: (m, n). + """ + assert points.size(1) == 2 + assert bboxes.size(1) == 4 + return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \ + (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \ + (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \ + (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0)) + + +def bboxes_area(bboxes): + """Compute the area of an array of bboxes. + + Args: + bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4) + + Returns: + Tensor: Area of the bboxes. Shape: (m, ) + """ + assert bboxes.size(1) == 4 + w = (bboxes[:, 2] - bboxes[:, 0]) + h = (bboxes[:, 3] - bboxes[:, 1]) + areas = w * h + return areas + + +@BBOX_ASSIGNERS.register_module() +class CenterRegionAssigner(BaseAssigner): + """Assign pixels at the center region of a bbox as positive. + + Each proposals will be assigned with `-1`, `0`, or a positive integer + indicating the ground truth index. + - -1: negative samples + - semi-positive numbers: positive sample, index (0-based) of assigned gt + + Args: + pos_scale (float): Threshold within which pixels are + labelled as positive. + neg_scale (float): Threshold above which pixels are + labelled as positive. + min_pos_iof (float): Minimum iof of a pixel with a gt to be + labelled as positive. Default: 1e-2 + ignore_gt_scale (float): Threshold within which the pixels + are ignored when the gt is labelled as shadowed. Default: 0.5 + foreground_dominate (bool): If True, the bbox will be assigned as + positive when a gt's kernel region overlaps with another's shadowed + (ignored) region, otherwise it is set as ignored. Default to False. + """ + + def __init__(self, + pos_scale, + neg_scale, + min_pos_iof=1e-2, + ignore_gt_scale=0.5, + foreground_dominate=False, + iou_calculator=dict(type='BboxOverlaps2D')): + self.pos_scale = pos_scale + self.neg_scale = neg_scale + self.min_pos_iof = min_pos_iof + self.ignore_gt_scale = ignore_gt_scale + self.foreground_dominate = foreground_dominate + self.iou_calculator = build_iou_calculator(iou_calculator) + + def get_gt_priorities(self, gt_bboxes): + """Get gt priorities according to their areas. + + Smaller gt has higher priority. + + Args: + gt_bboxes (Tensor): Ground truth boxes, shape (k, 4). + + Returns: + Tensor: The priority of gts so that gts with larger priority is \ + more likely to be assigned. Shape (k, ) + """ + gt_areas = bboxes_area(gt_bboxes) + # Rank all gt bbox areas. Smaller objects has larger priority + _, sort_idx = gt_areas.sort(descending=True) + sort_idx = sort_idx.argsort() + return sort_idx + + def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): + """Assign gt to bboxes. + + This method assigns gts to every bbox (proposal/anchor), each bbox \ + will be assigned with -1, or a semi-positive number. -1 means \ + negative sample, semi-positive number is the index (0-based) of \ + assigned gt. + + Args: + bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). + gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). + gt_bboxes_ignore (tensor, optional): Ground truth bboxes that are + labelled as `ignored`, e.g., crowd boxes in COCO. + gt_labels (tensor, optional): Label of gt_bboxes, shape (num_gts,). + + Returns: + :obj:`AssignResult`: The assigned result. Note that \ + shadowed_labels of shape (N, 2) is also added as an \ + `assign_result` attribute. `shadowed_labels` is a tensor \ + composed of N pairs of anchor_ind, class_label], where N \ + is the number of anchors that lie in the outer region of a \ + gt, anchor_ind is the shadowed anchor index and class_label \ + is the shadowed class label. + + Example: + >>> self = CenterRegionAssigner(0.2, 0.2) + >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) + >>> gt_bboxes = torch.Tensor([[0, 0, 10, 10]]) + >>> assign_result = self.assign(bboxes, gt_bboxes) + >>> expected_gt_inds = torch.LongTensor([1, 0]) + >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) + """ + # There are in total 5 steps in the pixel assignment + # 1. Find core (the center region, say inner 0.2) + # and shadow (the relatively ourter part, say inner 0.2-0.5) + # regions of every gt. + # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions + # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in + # the image. + # 3.1. For overlapping objects, the prior bboxes in gt_core is + # assigned with the object with smallest area + # 4. Assign prior bboxes with class label according to its gt id. + # 4.1. Assign -1 to prior bboxes lying in shadowed gts + # 4.2. Assign positive prior boxes with the corresponding label + # 5. Find pixels lying in the shadow of an object and assign them with + # background label, but set the loss weight of its corresponding + # gt to zero. + assert bboxes.size(1) == 4, 'bboxes must have size of 4' + # 1. Find core positive and shadow region of every gt + gt_core = scale_boxes(gt_bboxes, self.pos_scale) + gt_shadow = scale_boxes(gt_bboxes, self.neg_scale) + + # 2. Find prior bboxes that lie in gt_core and gt_shadow regions + bbox_centers = (bboxes[:, 2:4] + bboxes[:, 0:2]) / 2 + # The center points lie within the gt boxes + is_bbox_in_gt = is_located_in(bbox_centers, gt_bboxes) + # Only calculate bbox and gt_core IoF. This enables small prior bboxes + # to match large gts + bbox_and_gt_core_overlaps = self.iou_calculator( + bboxes, gt_core, mode='iof') + # The center point of effective priors should be within the gt box + is_bbox_in_gt_core = is_bbox_in_gt & ( + bbox_and_gt_core_overlaps > self.min_pos_iof) # shape (n, k) + + is_bbox_in_gt_shadow = ( + self.iou_calculator(bboxes, gt_shadow, mode='iof') > + self.min_pos_iof) + # Rule out center effective positive pixels + is_bbox_in_gt_shadow &= (~is_bbox_in_gt_core) + + num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) + if num_gts == 0 or num_bboxes == 0: + # If no gts exist, assign all pixels to negative + assigned_gt_ids = \ + is_bbox_in_gt_core.new_zeros((num_bboxes,), + dtype=torch.long) + pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2)) + else: + # Step 3: assign a one-hot gt id to each pixel, and smaller objects + # have high priority to assign the pixel. + sort_idx = self.get_gt_priorities(gt_bboxes) + assigned_gt_ids, pixels_in_gt_shadow = \ + self.assign_one_hot_gt_indices(is_bbox_in_gt_core, + is_bbox_in_gt_shadow, + gt_priority=sort_idx) + + if gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0: + # No ground truth or boxes, return empty assignment + gt_bboxes_ignore = scale_boxes( + gt_bboxes_ignore, scale=self.ignore_gt_scale) + is_bbox_in_ignored_gts = is_located_in(bbox_centers, + gt_bboxes_ignore) + is_bbox_in_ignored_gts = is_bbox_in_ignored_gts.any(dim=1) + assigned_gt_ids[is_bbox_in_ignored_gts] = -1 + + # 4. Assign prior bboxes with class label according to its gt id. + assigned_labels = None + shadowed_pixel_labels = None + if gt_labels is not None: + # Default assigned label is the background (-1) + assigned_labels = assigned_gt_ids.new_full((num_bboxes, ), -1) + pos_inds = torch.nonzero( + assigned_gt_ids > 0, as_tuple=False).squeeze() + if pos_inds.numel() > 0: + assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds] + - 1] + # 5. Find pixels lying in the shadow of an object + shadowed_pixel_labels = pixels_in_gt_shadow.clone() + if pixels_in_gt_shadow.numel() > 0: + pixel_idx, gt_idx =\ + pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1] + assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \ + 'Some pixels are dually assigned to ignore and gt!' + shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1] + override = ( + assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1]) + if self.foreground_dominate: + # When a pixel is both positive and shadowed, set it as pos + shadowed_pixel_labels = shadowed_pixel_labels[~override] + else: + # When a pixel is both pos and shadowed, set it as shadowed + assigned_labels[pixel_idx[override]] = -1 + assigned_gt_ids[pixel_idx[override]] = 0 + + assign_result = AssignResult( + num_gts, assigned_gt_ids, None, labels=assigned_labels) + # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2) + assign_result.set_extra_property('shadowed_labels', + shadowed_pixel_labels) + return assign_result + + def assign_one_hot_gt_indices(self, + is_bbox_in_gt_core, + is_bbox_in_gt_shadow, + gt_priority=None): + """Assign only one gt index to each prior box. + + Gts with large gt_priority are more likely to be assigned. + + Args: + is_bbox_in_gt_core (Tensor): Bool tensor indicating the bbox center + is in the core area of a gt (e.g. 0-0.2). + Shape: (num_prior, num_gt). + is_bbox_in_gt_shadow (Tensor): Bool tensor indicating the bbox + center is in the shadowed area of a gt (e.g. 0.2-0.5). + Shape: (num_prior, num_gt). + gt_priority (Tensor): Priorities of gts. The gt with a higher + priority is more likely to be assigned to the bbox when the bbox + match with multiple gts. Shape: (num_gt, ). + + Returns: + tuple: Returns (assigned_gt_inds, shadowed_gt_inds). + + - assigned_gt_inds: The assigned gt index of each prior bbox \ + (i.e. index from 1 to num_gts). Shape: (num_prior, ). + - shadowed_gt_inds: shadowed gt indices. It is a tensor of \ + shape (num_ignore, 2) with first column being the \ + shadowed prior bbox indices and the second column the \ + shadowed gt indices (1-based). + """ + num_bboxes, num_gts = is_bbox_in_gt_core.shape + + if gt_priority is None: + gt_priority = torch.arange( + num_gts, device=is_bbox_in_gt_core.device) + assert gt_priority.size(0) == num_gts + # The bigger gt_priority, the more preferable to be assigned + # The assigned inds are by default 0 (background) + assigned_gt_inds = is_bbox_in_gt_core.new_zeros((num_bboxes, ), + dtype=torch.long) + # Shadowed bboxes are assigned to be background. But the corresponding + # label is ignored during loss calculation, which is done through + # shadowed_gt_inds + shadowed_gt_inds = torch.nonzero(is_bbox_in_gt_shadow, as_tuple=False) + if is_bbox_in_gt_core.sum() == 0: # No gt match + shadowed_gt_inds[:, 1] += 1 # 1-based. For consistency issue + return assigned_gt_inds, shadowed_gt_inds + + # The priority of each prior box and gt pair. If one prior box is + # matched bo multiple gts. Only the pair with the highest priority + # is saved + pair_priority = is_bbox_in_gt_core.new_full((num_bboxes, num_gts), + -1, + dtype=torch.long) + + # Each bbox could match with multiple gts. + # The following codes deal with this situation + # Matched bboxes (to any gt). Shape: (num_pos_anchor, ) + inds_of_match = torch.any(is_bbox_in_gt_core, dim=1) + # The matched gt index of each positive bbox. Length >= num_pos_anchor + # , since one bbox could match multiple gts + matched_bbox_gt_inds = torch.nonzero( + is_bbox_in_gt_core, as_tuple=False)[:, 1] + # Assign priority to each bbox-gt pair. + pair_priority[is_bbox_in_gt_core] = gt_priority[matched_bbox_gt_inds] + _, argmax_priority = pair_priority[inds_of_match].max(dim=1) + assigned_gt_inds[inds_of_match] = argmax_priority + 1 # 1-based + # Zero-out the assigned anchor box to filter the shadowed gt indices + is_bbox_in_gt_core[inds_of_match, argmax_priority] = 0 + # Concat the shadowed indices due to overlapping with that out side of + # effective scale. shape: (total_num_ignore, 2) + shadowed_gt_inds = torch.cat( + (shadowed_gt_inds, torch.nonzero( + is_bbox_in_gt_core, as_tuple=False)), + dim=0) + # `is_bbox_in_gt_core` should be changed back to keep arguments intact. + is_bbox_in_gt_core[inds_of_match, argmax_priority] = 1 + # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds` + if shadowed_gt_inds.numel() > 0: + shadowed_gt_inds[:, 1] += 1 + return assigned_gt_inds, shadowed_gt_inds diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/grid_assigner.py b/downstream/mmdetection/mmdet/core/bbox/assigners/grid_assigner.py new file mode 100644 index 0000000..a0c814e --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/grid_assigner.py @@ -0,0 +1,156 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..builder import BBOX_ASSIGNERS +from ..iou_calculators import build_iou_calculator +from .assign_result import AssignResult +from .base_assigner import BaseAssigner + + +@BBOX_ASSIGNERS.register_module() +class GridAssigner(BaseAssigner): + """Assign a corresponding gt bbox or background to each bbox. + + Each proposals will be assigned with `-1`, `0`, or a positive integer + indicating the ground truth index. + + - -1: don't care + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + Args: + pos_iou_thr (float): IoU threshold for positive bboxes. + neg_iou_thr (float or tuple): IoU threshold for negative bboxes. + min_pos_iou (float): Minimum iou for a bbox to be considered as a + positive bbox. Positive samples can have smaller IoU than + pos_iou_thr due to the 4th step (assign max IoU sample to each gt). + gt_max_assign_all (bool): Whether to assign all bboxes with the same + highest overlap with some gt to that gt. + """ + + def __init__(self, + pos_iou_thr, + neg_iou_thr, + min_pos_iou=.0, + gt_max_assign_all=True, + iou_calculator=dict(type='BboxOverlaps2D')): + self.pos_iou_thr = pos_iou_thr + self.neg_iou_thr = neg_iou_thr + self.min_pos_iou = min_pos_iou + self.gt_max_assign_all = gt_max_assign_all + self.iou_calculator = build_iou_calculator(iou_calculator) + + def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=None): + """Assign gt to bboxes. The process is very much like the max iou + assigner, except that positive samples are constrained within the cell + that the gt boxes fell in. + + This method assign a gt bbox to every bbox (proposal/anchor), each bbox + will be assigned with -1, 0, or a positive number. -1 means don't care, + 0 means negative sample, positive number is the index (1-based) of + assigned gt. + The assignment is done in following steps, the order matters. + + 1. assign every bbox to -1 + 2. assign proposals whose iou with all gts <= neg_iou_thr to 0 + 3. for each bbox within a cell, if the iou with its nearest gt > + pos_iou_thr and the center of that gt falls inside the cell, + assign it to that bbox + 4. for each gt bbox, assign its nearest proposals within the cell the + gt bbox falls in to itself. + + Args: + bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). + box_responsible_flags (Tensor): flag to indicate whether box is + responsible for prediction, shape(n, ) + gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). + gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). + + Returns: + :obj:`AssignResult`: The assign result. + """ + num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) + + # compute iou between all gt and bboxes + overlaps = self.iou_calculator(gt_bboxes, bboxes) + + # 1. assign -1 by default + assigned_gt_inds = overlaps.new_full((num_bboxes, ), + -1, + dtype=torch.long) + + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + max_overlaps = overlaps.new_zeros((num_bboxes, )) + if num_gts == 0: + # No truth, assign everything to background + assigned_gt_inds[:] = 0 + if gt_labels is None: + assigned_labels = None + else: + assigned_labels = overlaps.new_full((num_bboxes, ), + -1, + dtype=torch.long) + return AssignResult( + num_gts, + assigned_gt_inds, + max_overlaps, + labels=assigned_labels) + + # 2. assign negative: below + # for each anchor, which gt best overlaps with it + # for each anchor, the max iou of all gts + # shape of max_overlaps == argmax_overlaps == num_bboxes + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + + if isinstance(self.neg_iou_thr, float): + assigned_gt_inds[(max_overlaps >= 0) + & (max_overlaps <= self.neg_iou_thr)] = 0 + elif isinstance(self.neg_iou_thr, (tuple, list)): + assert len(self.neg_iou_thr) == 2 + assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0]) + & (max_overlaps <= self.neg_iou_thr[1])] = 0 + + # 3. assign positive: falls into responsible cell and above + # positive IOU threshold, the order matters. + # the prior condition of comparison is to filter out all + # unrelated anchors, i.e. not box_responsible_flags + overlaps[:, ~box_responsible_flags.type(torch.bool)] = -1. + + # calculate max_overlaps again, but this time we only consider IOUs + # for anchors responsible for prediction + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + + # for each gt, which anchor best overlaps with it + # for each gt, the max iou of all proposals + # shape of gt_max_overlaps == gt_argmax_overlaps == num_gts + gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) + + pos_inds = (max_overlaps > + self.pos_iou_thr) & box_responsible_flags.type(torch.bool) + assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 + + # 4. assign positive to max overlapped anchors within responsible cell + for i in range(num_gts): + if gt_max_overlaps[i] > self.min_pos_iou: + if self.gt_max_assign_all: + max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \ + box_responsible_flags.type(torch.bool) + assigned_gt_inds[max_iou_inds] = i + 1 + elif box_responsible_flags[gt_argmax_overlaps[i]]: + assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 + + # assign labels of positive anchors + if gt_labels is not None: + assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) + pos_inds = torch.nonzero( + assigned_gt_inds > 0, as_tuple=False).squeeze() + if pos_inds.numel() > 0: + assigned_labels[pos_inds] = gt_labels[ + assigned_gt_inds[pos_inds] - 1] + + else: + assigned_labels = None + + return AssignResult( + num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/hungarian_assigner.py b/downstream/mmdetection/mmdet/core/bbox/assigners/hungarian_assigner.py new file mode 100644 index 0000000..4105fb5 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/hungarian_assigner.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..builder import BBOX_ASSIGNERS +from ..match_costs import build_match_cost +from ..transforms import bbox_cxcywh_to_xyxy +from .assign_result import AssignResult +from .base_assigner import BaseAssigner + +try: + from scipy.optimize import linear_sum_assignment +except ImportError: + linear_sum_assignment = None + + +@BBOX_ASSIGNERS.register_module() +class HungarianAssigner(BaseAssigner): + """Computes one-to-one matching between predictions and ground truth. + + This class computes an assignment between the targets and the predictions + based on the costs. The costs are weighted sum of three components: + classification cost, regression L1 cost and regression iou cost. The + targets don't include the no_object, so generally there are more + predictions than targets. After the one-to-one matching, the un-matched + are treated as backgrounds. Thus each query prediction will be assigned + with `0` or a positive integer indicating the ground truth index: + + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + Args: + cls_weight (int | float, optional): The scale factor for classification + cost. Default 1.0. + bbox_weight (int | float, optional): The scale factor for regression + L1 cost. Default 1.0. + iou_weight (int | float, optional): The scale factor for regression + iou cost. Default 1.0. + iou_calculator (dict | optional): The config for the iou calculation. + Default type `BboxOverlaps2D`. + iou_mode (str | optional): "iou" (intersection over union), "iof" + (intersection over foreground), or "giou" (generalized + intersection over union). Default "giou". + """ + + def __init__(self, + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=1.0), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)): + self.cls_cost = build_match_cost(cls_cost) + self.reg_cost = build_match_cost(reg_cost) + self.iou_cost = build_match_cost(iou_cost) + + def assign(self, + bbox_pred, + cls_pred, + gt_bboxes, + gt_labels, + img_meta, + gt_bboxes_ignore=None, + eps=1e-7): + """Computes one-to-one matching based on the weighted costs. + + This method assign each query prediction to a ground truth or + background. The `assigned_gt_inds` with -1 means don't care, + 0 means negative sample, and positive number is the index (1-based) + of assigned gt. + The assignment is done in the following steps, the order matters. + + 1. assign every prediction to -1 + 2. compute the weighted costs + 3. do Hungarian matching on CPU based on the costs + 4. assign all to 0 (background) first, then for each matched pair + between predictions and gts, treat this prediction as foreground + and assign the corresponding gt index (plus 1) to it. + + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + [num_query, 4]. + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_bboxes (Tensor): Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + img_meta (dict): Meta information for current image. + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`. Default None. + eps (int | float, optional): A value added to the denominator for + numerical stability. Default 1e-7. + + Returns: + :obj:`AssignResult`: The assigned result. + """ + assert gt_bboxes_ignore is None, \ + 'Only case when gt_bboxes_ignore is None is supported.' + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + assigned_labels = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) + img_h, img_w, _ = img_meta['img_shape'] + factor = gt_bboxes.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0) + + # 2. compute the weighted costs + # classification and bboxcost. + cls_cost = self.cls_cost(cls_pred, gt_labels) + # regression L1 cost + normalize_gt_bboxes = gt_bboxes / factor + reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) + # regression iou cost, defaultly giou is used in official DETR. + bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor + iou_cost = self.iou_cost(bboxes, gt_bboxes) + # weighted sum of above three costs + cost = cls_cost + reg_cost + iou_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + if linear_sum_assignment is None: + raise ImportError('Please run "pip install scipy" ' + 'to install scipy first.') + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + bbox_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + bbox_pred.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/mask_hungarian_assigner.py b/downstream/mmdetection/mmdet/core/bbox/assigners/mask_hungarian_assigner.py new file mode 100644 index 0000000..f5f27f3 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/mask_hungarian_assigner.py @@ -0,0 +1,132 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet.core.bbox.builder import BBOX_ASSIGNERS +from mmdet.core.bbox.match_costs.builder import build_match_cost +from .assign_result import AssignResult +from .base_assigner import BaseAssigner + +try: + from scipy.optimize import linear_sum_assignment +except ImportError: + linear_sum_assignment = None + + +@BBOX_ASSIGNERS.register_module() +class MaskHungarianAssigner(BaseAssigner): + """Computes one-to-one matching between predictions and ground truth for + mask. + + This class computes an assignment between the targets and the predictions + based on the costs. The costs are weighted sum of three components: + classification cost, mask focal cost and mask dice cost. The + targets don't include the no_object, so generally there are more + predictions than targets. After the one-to-one matching, the un-matched + are treated as backgrounds. Thus each query prediction will be assigned + with `0` or a positive integer indicating the ground truth index: + + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + Args: + cls_cost (:obj:`mmcv.ConfigDict` | dict): Classification cost config. + mask_cost (:obj:`mmcv.ConfigDict` | dict): Mask cost config. + dice_cost (:obj:`mmcv.ConfigDict` | dict): Dice cost config. + """ + + def __init__(self, + cls_cost=dict(type='ClassificationCost', weight=1.0), + mask_cost=dict( + type='FocalLossCost', weight=1.0, binary_input=True), + dice_cost=dict(type='DiceCost', weight=1.0)): + self.cls_cost = build_match_cost(cls_cost) + self.mask_cost = build_match_cost(mask_cost) + self.dice_cost = build_match_cost(dice_cost) + + def assign(self, + cls_pred, + mask_pred, + gt_labels, + gt_mask, + img_meta, + gt_bboxes_ignore=None, + eps=1e-7): + """Computes one-to-one matching based on the weighted costs. + + Args: + cls_pred (Tensor | None): Class prediction in shape + (num_query, cls_out_channels). + mask_pred (Tensor): Mask prediction in shape (num_query, H, W). + gt_labels (Tensor): Label of 'gt_mask'in shape = (num_gt, ). + gt_mask (Tensor): Ground truth mask in shape = (num_gt, H, W). + img_meta (dict): Meta information for current image. + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`. Default None. + eps (int | float, optional): A value added to the denominator for + numerical stability. Default 1e-7. + + Returns: + :obj:`AssignResult`: The assigned result. + """ + assert gt_bboxes_ignore is None, \ + 'Only case when gt_bboxes_ignore is None is supported.' + # K-Net sometimes passes cls_pred=None to this assigner. + # So we should use the shape of mask_pred + num_gt, num_query = gt_labels.shape[0], mask_pred.shape[0] + + # 1. assign -1 by default + assigned_gt_inds = mask_pred.new_full((num_query, ), + -1, + dtype=torch.long) + assigned_labels = mask_pred.new_full((num_query, ), + -1, + dtype=torch.long) + if num_gt == 0 or num_query == 0: + # No ground truth or boxes, return empty assignment + if num_gt == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return AssignResult( + num_gt, assigned_gt_inds, None, labels=assigned_labels) + + # 2. compute the weighted costs + # classification and maskcost. + if self.cls_cost.weight != 0 and cls_pred is not None: + cls_cost = self.cls_cost(cls_pred, gt_labels) + else: + cls_cost = 0 + + if self.mask_cost.weight != 0: + # mask_pred shape = [num_query, h, w] + # gt_mask shape = [num_gt, h, w] + # mask_cost shape = [num_query, num_gt] + mask_cost = self.mask_cost(mask_pred, gt_mask) + else: + mask_cost = 0 + + if self.dice_cost.weight != 0: + dice_cost = self.dice_cost(mask_pred, gt_mask) + else: + dice_cost = 0 + cost = cls_cost + mask_cost + dice_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + if linear_sum_assignment is None: + raise ImportError('Please run "pip install scipy" ' + 'to install scipy first.') + + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + mask_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + mask_pred.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + return AssignResult( + num_gt, assigned_gt_inds, None, labels=assigned_labels) diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/max_iou_assigner.py b/downstream/mmdetection/mmdet/core/bbox/assigners/max_iou_assigner.py new file mode 100644 index 0000000..676421f --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/max_iou_assigner.py @@ -0,0 +1,218 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..builder import BBOX_ASSIGNERS +from ..iou_calculators import build_iou_calculator +from .assign_result import AssignResult +from .base_assigner import BaseAssigner + + +@BBOX_ASSIGNERS.register_module() +class MaxIoUAssigner(BaseAssigner): + """Assign a corresponding gt bbox or background to each bbox. + + Each proposals will be assigned with `-1`, or a semi-positive integer + indicating the ground truth index. + + - -1: negative sample, no assigned gt + - semi-positive integer: positive sample, index (0-based) of assigned gt + + Args: + pos_iou_thr (float): IoU threshold for positive bboxes. + neg_iou_thr (float or tuple): IoU threshold for negative bboxes. + min_pos_iou (float): Minimum iou for a bbox to be considered as a + positive bbox. Positive samples can have smaller IoU than + pos_iou_thr due to the 4th step (assign max IoU sample to each gt). + `min_pos_iou` is set to avoid assigning bboxes that have extremely + small iou with GT as positive samples. It brings about 0.3 mAP + improvements in 1x schedule but does not affect the performance of + 3x schedule. More comparisons can be found in + `PR #7464 `_. + gt_max_assign_all (bool): Whether to assign all bboxes with the same + highest overlap with some gt to that gt. + ignore_iof_thr (float): IoF threshold for ignoring bboxes (if + `gt_bboxes_ignore` is specified). Negative values mean not + ignoring any bboxes. + ignore_wrt_candidates (bool): Whether to compute the iof between + `bboxes` and `gt_bboxes_ignore`, or the contrary. + match_low_quality (bool): Whether to allow low quality matches. This is + usually allowed for RPN and single stage detectors, but not allowed + in the second stage. Details are demonstrated in Step 4. + gpu_assign_thr (int): The upper bound of the number of GT for GPU + assign. When the number of gt is above this threshold, will assign + on CPU device. Negative values mean not assign on CPU. + """ + + def __init__(self, + pos_iou_thr, + neg_iou_thr, + min_pos_iou=.0, + gt_max_assign_all=True, + ignore_iof_thr=-1, + ignore_wrt_candidates=True, + match_low_quality=True, + gpu_assign_thr=-1, + iou_calculator=dict(type='BboxOverlaps2D')): + self.pos_iou_thr = pos_iou_thr + self.neg_iou_thr = neg_iou_thr + self.min_pos_iou = min_pos_iou + self.gt_max_assign_all = gt_max_assign_all + self.ignore_iof_thr = ignore_iof_thr + self.ignore_wrt_candidates = ignore_wrt_candidates + self.gpu_assign_thr = gpu_assign_thr + self.match_low_quality = match_low_quality + self.iou_calculator = build_iou_calculator(iou_calculator) + + def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): + """Assign gt to bboxes. + + This method assign a gt bbox to every bbox (proposal/anchor), each bbox + will be assigned with -1, or a semi-positive number. -1 means negative + sample, semi-positive number is the index (0-based) of assigned gt. + The assignment is done in following steps, the order matters. + + 1. assign every bbox to the background + 2. assign proposals whose iou with all gts < neg_iou_thr to 0 + 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, + assign it to that bbox + 4. for each gt bbox, assign its nearest proposals (may be more than + one) to itself + + Args: + bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). + gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`, e.g., crowd boxes in COCO. + gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). + + Returns: + :obj:`AssignResult`: The assign result. + + Example: + >>> self = MaxIoUAssigner(0.5, 0.5) + >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) + >>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]]) + >>> assign_result = self.assign(bboxes, gt_bboxes) + >>> expected_gt_inds = torch.LongTensor([1, 0]) + >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) + """ + assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( + gt_bboxes.shape[0] > self.gpu_assign_thr) else False + # compute overlap and assign gt on CPU when number of GT is large + if assign_on_cpu: + device = bboxes.device + bboxes = bboxes.cpu() + gt_bboxes = gt_bboxes.cpu() + if gt_bboxes_ignore is not None: + gt_bboxes_ignore = gt_bboxes_ignore.cpu() + if gt_labels is not None: + gt_labels = gt_labels.cpu() + + overlaps = self.iou_calculator(gt_bboxes, bboxes) + + if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None + and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): + if self.ignore_wrt_candidates: + ignore_overlaps = self.iou_calculator( + bboxes, gt_bboxes_ignore, mode='iof') + ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) + else: + ignore_overlaps = self.iou_calculator( + gt_bboxes_ignore, bboxes, mode='iof') + ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) + overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 + + assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) + if assign_on_cpu: + assign_result.gt_inds = assign_result.gt_inds.to(device) + assign_result.max_overlaps = assign_result.max_overlaps.to(device) + if assign_result.labels is not None: + assign_result.labels = assign_result.labels.to(device) + return assign_result + + def assign_wrt_overlaps(self, overlaps, gt_labels=None): + """Assign w.r.t. the overlaps of bboxes with gts. + + Args: + overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, + shape(k, n). + gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ). + + Returns: + :obj:`AssignResult`: The assign result. + """ + num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) + + # 1. assign -1 by default + assigned_gt_inds = overlaps.new_full((num_bboxes, ), + -1, + dtype=torch.long) + + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + max_overlaps = overlaps.new_zeros((num_bboxes, )) + if num_gts == 0: + # No truth, assign everything to background + assigned_gt_inds[:] = 0 + if gt_labels is None: + assigned_labels = None + else: + assigned_labels = overlaps.new_full((num_bboxes, ), + -1, + dtype=torch.long) + return AssignResult( + num_gts, + assigned_gt_inds, + max_overlaps, + labels=assigned_labels) + + # for each anchor, which gt best overlaps with it + # for each anchor, the max iou of all gts + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + # for each gt, which anchor best overlaps with it + # for each gt, the max iou of all proposals + gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) + + # 2. assign negative: below + # the negative inds are set to be 0 + if isinstance(self.neg_iou_thr, float): + assigned_gt_inds[(max_overlaps >= 0) + & (max_overlaps < self.neg_iou_thr)] = 0 + elif isinstance(self.neg_iou_thr, tuple): + assert len(self.neg_iou_thr) == 2 + assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) + & (max_overlaps < self.neg_iou_thr[1])] = 0 + + # 3. assign positive: above positive IoU threshold + pos_inds = max_overlaps >= self.pos_iou_thr + assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 + + if self.match_low_quality: + # Low-quality matching will overwrite the assigned_gt_inds assigned + # in Step 3. Thus, the assigned gt might not be the best one for + # prediction. + # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2, + # bbox 1 will be assigned as the best target for bbox A in step 3. + # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's + # assigned_gt_inds will be overwritten to be bbox 2. + # This might be the reason that it is not used in ROI Heads. + for i in range(num_gts): + if gt_max_overlaps[i] >= self.min_pos_iou: + if self.gt_max_assign_all: + max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] + assigned_gt_inds[max_iou_inds] = i + 1 + else: + assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 + + if gt_labels is not None: + assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) + pos_inds = torch.nonzero( + assigned_gt_inds > 0, as_tuple=False).squeeze() + if pos_inds.numel() > 0: + assigned_labels[pos_inds] = gt_labels[ + assigned_gt_inds[pos_inds] - 1] + else: + assigned_labels = None + + return AssignResult( + num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/point_assigner.py b/downstream/mmdetection/mmdet/core/bbox/assigners/point_assigner.py new file mode 100644 index 0000000..b0dc224 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/point_assigner.py @@ -0,0 +1,134 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..builder import BBOX_ASSIGNERS +from .assign_result import AssignResult +from .base_assigner import BaseAssigner + + +@BBOX_ASSIGNERS.register_module() +class PointAssigner(BaseAssigner): + """Assign a corresponding gt bbox or background to each point. + + Each proposals will be assigned with `0`, or a positive integer + indicating the ground truth index. + + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + """ + + def __init__(self, scale=4, pos_num=3): + self.scale = scale + self.pos_num = pos_num + + def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): + """Assign gt to points. + + This method assign a gt bbox to every points set, each points set + will be assigned with the background_label (-1), or a label number. + -1 is background, and semi-positive number is the index (0-based) of + assigned gt. + The assignment is done in following steps, the order matters. + + 1. assign every points to the background_label (-1) + 2. A point is assigned to some gt bbox if + (i) the point is within the k closest points to the gt bbox + (ii) the distance between this point and the gt is smaller than + other gt bboxes + + Args: + points (Tensor): points to be assigned, shape(n, 3) while last + dimension stands for (x, y, stride). + gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`, e.g., crowd boxes in COCO. + NOTE: currently unused. + gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). + + Returns: + :obj:`AssignResult`: The assign result. + """ + num_points = points.shape[0] + num_gts = gt_bboxes.shape[0] + + if num_gts == 0 or num_points == 0: + # If no truth assign everything to the background + assigned_gt_inds = points.new_full((num_points, ), + 0, + dtype=torch.long) + if gt_labels is None: + assigned_labels = None + else: + assigned_labels = points.new_full((num_points, ), + -1, + dtype=torch.long) + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) + + points_xy = points[:, :2] + points_stride = points[:, 2] + points_lvl = torch.log2( + points_stride).int() # [3...,4...,5...,6...,7...] + lvl_min, lvl_max = points_lvl.min(), points_lvl.max() + + # assign gt box + gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2 + gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6) + scale = self.scale + gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) + + torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int() + gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max) + + # stores the assigned gt index of each point + assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long) + # stores the assigned gt dist (to this point) of each point + assigned_gt_dist = points.new_full((num_points, ), float('inf')) + points_range = torch.arange(points.shape[0]) + + for idx in range(num_gts): + gt_lvl = gt_bboxes_lvl[idx] + # get the index of points in this level + lvl_idx = gt_lvl == points_lvl + points_index = points_range[lvl_idx] + # get the points in this level + lvl_points = points_xy[lvl_idx, :] + # get the center point of gt + gt_point = gt_bboxes_xy[[idx], :] + # get width and height of gt + gt_wh = gt_bboxes_wh[[idx], :] + # compute the distance between gt center and + # all points in this level + points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1) + # find the nearest k points to gt center in this level + min_dist, min_dist_index = torch.topk( + points_gt_dist, self.pos_num, largest=False) + # the index of nearest k points to gt center in this level + min_dist_points_index = points_index[min_dist_index] + # The less_than_recorded_index stores the index + # of min_dist that is less then the assigned_gt_dist. Where + # assigned_gt_dist stores the dist from previous assigned gt + # (if exist) to each point. + less_than_recorded_index = min_dist < assigned_gt_dist[ + min_dist_points_index] + # The min_dist_points_index stores the index of points satisfy: + # (1) it is k nearest to current gt center in this level. + # (2) it is closer to current gt center than other gt center. + min_dist_points_index = min_dist_points_index[ + less_than_recorded_index] + # assign the result + assigned_gt_inds[min_dist_points_index] = idx + 1 + assigned_gt_dist[min_dist_points_index] = min_dist[ + less_than_recorded_index] + + if gt_labels is not None: + assigned_labels = assigned_gt_inds.new_full((num_points, ), -1) + pos_inds = torch.nonzero( + assigned_gt_inds > 0, as_tuple=False).squeeze() + if pos_inds.numel() > 0: + assigned_labels[pos_inds] = gt_labels[ + assigned_gt_inds[pos_inds] - 1] + else: + assigned_labels = None + + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/region_assigner.py b/downstream/mmdetection/mmdet/core/bbox/assigners/region_assigner.py new file mode 100644 index 0000000..1833b89 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/region_assigner.py @@ -0,0 +1,222 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet.core import anchor_inside_flags +from ..builder import BBOX_ASSIGNERS +from .assign_result import AssignResult +from .base_assigner import BaseAssigner + + +def calc_region(bbox, ratio, stride, featmap_size=None): + """Calculate region of the box defined by the ratio, the ratio is from the + center of the box to every edge.""" + # project bbox on the feature + f_bbox = bbox / stride + x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2]) + y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3]) + x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2]) + y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3]) + if featmap_size is not None: + x1 = x1.clamp(min=0, max=featmap_size[1]) + y1 = y1.clamp(min=0, max=featmap_size[0]) + x2 = x2.clamp(min=0, max=featmap_size[1]) + y2 = y2.clamp(min=0, max=featmap_size[0]) + return (x1, y1, x2, y2) + + +def anchor_ctr_inside_region_flags(anchors, stride, region): + """Get the flag indicate whether anchor centers are inside regions.""" + x1, y1, x2, y2 = region + f_anchors = anchors / stride + x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5 + y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5 + flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2) + return flags + + +@BBOX_ASSIGNERS.register_module() +class RegionAssigner(BaseAssigner): + """Assign a corresponding gt bbox or background to each bbox. + + Each proposals will be assigned with `-1`, `0`, or a positive integer + indicating the ground truth index. + + - -1: don't care + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + Args: + center_ratio: ratio of the region in the center of the bbox to + define positive sample. + ignore_ratio: ratio of the region to define ignore samples. + """ + + def __init__(self, center_ratio=0.2, ignore_ratio=0.5): + self.center_ratio = center_ratio + self.ignore_ratio = ignore_ratio + + def assign(self, + mlvl_anchors, + mlvl_valid_flags, + gt_bboxes, + img_meta, + featmap_sizes, + anchor_scale, + anchor_strides, + gt_bboxes_ignore=None, + gt_labels=None, + allowed_border=0): + """Assign gt to anchors. + + This method assign a gt bbox to every bbox (proposal/anchor), each bbox + will be assigned with -1, 0, or a positive number. -1 means don't care, + 0 means negative sample, positive number is the index (1-based) of + assigned gt. + + The assignment is done in following steps, and the order matters. + + 1. Assign every anchor to 0 (negative) + 2. (For each gt_bboxes) Compute ignore flags based on ignore_region + then assign -1 to anchors w.r.t. ignore flags + 3. (For each gt_bboxes) Compute pos flags based on center_region then + assign gt_bboxes to anchors w.r.t. pos flags + 4. (For each gt_bboxes) Compute ignore flags based on adjacent anchor + level then assign -1 to anchors w.r.t. ignore flags + 5. Assign anchor outside of image to -1 + + Args: + mlvl_anchors (list[Tensor]): Multi level anchors. + mlvl_valid_flags (list[Tensor]): Multi level valid flags. + gt_bboxes (Tensor): Ground truth bboxes of image + img_meta (dict): Meta info of image. + featmap_sizes (list[Tensor]): Feature mapsize each level + anchor_scale (int): Scale of the anchor. + anchor_strides (list[int]): Stride of the anchor. + gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`, e.g., crowd boxes in COCO. + gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). + allowed_border (int, optional): The border to allow the valid + anchor. Defaults to 0. + + Returns: + :obj:`AssignResult`: The assign result. + """ + if gt_bboxes_ignore is not None: + raise NotImplementedError + + num_gts = gt_bboxes.shape[0] + num_bboxes = sum(x.shape[0] for x in mlvl_anchors) + + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + max_overlaps = gt_bboxes.new_zeros((num_bboxes, )) + assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ), + dtype=torch.long) + if gt_labels is None: + assigned_labels = None + else: + assigned_labels = gt_bboxes.new_full((num_bboxes, ), + -1, + dtype=torch.long) + return AssignResult( + num_gts, + assigned_gt_inds, + max_overlaps, + labels=assigned_labels) + + num_lvls = len(mlvl_anchors) + r1 = (1 - self.center_ratio) / 2 + r2 = (1 - self.ignore_ratio) / 2 + + scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * + (gt_bboxes[:, 3] - gt_bboxes[:, 1])) + min_anchor_size = scale.new_full( + (1, ), float(anchor_scale * anchor_strides[0])) + target_lvls = torch.floor( + torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) + target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() + + # 1. assign 0 (negative) by default + mlvl_assigned_gt_inds = [] + mlvl_ignore_flags = [] + for lvl in range(num_lvls): + h, w = featmap_sizes[lvl] + assert h * w == mlvl_anchors[lvl].shape[0] + assigned_gt_inds = gt_bboxes.new_full((h * w, ), + 0, + dtype=torch.long) + ignore_flags = torch.zeros_like(assigned_gt_inds) + mlvl_assigned_gt_inds.append(assigned_gt_inds) + mlvl_ignore_flags.append(ignore_flags) + + for gt_id in range(num_gts): + lvl = target_lvls[gt_id].item() + featmap_size = featmap_sizes[lvl] + stride = anchor_strides[lvl] + anchors = mlvl_anchors[lvl] + gt_bbox = gt_bboxes[gt_id, :4] + + # Compute regions + ignore_region = calc_region(gt_bbox, r2, stride, featmap_size) + ctr_region = calc_region(gt_bbox, r1, stride, featmap_size) + + # 2. Assign -1 to ignore flags + ignore_flags = anchor_ctr_inside_region_flags( + anchors, stride, ignore_region) + mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 + + # 3. Assign gt_bboxes to pos flags + pos_flags = anchor_ctr_inside_region_flags(anchors, stride, + ctr_region) + mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1 + + # 4. Assign -1 to ignore adjacent lvl + if lvl > 0: + d_lvl = lvl - 1 + d_anchors = mlvl_anchors[d_lvl] + d_featmap_size = featmap_sizes[d_lvl] + d_stride = anchor_strides[d_lvl] + d_ignore_region = calc_region(gt_bbox, r2, d_stride, + d_featmap_size) + ignore_flags = anchor_ctr_inside_region_flags( + d_anchors, d_stride, d_ignore_region) + mlvl_ignore_flags[d_lvl][ignore_flags] = 1 + if lvl < num_lvls - 1: + u_lvl = lvl + 1 + u_anchors = mlvl_anchors[u_lvl] + u_featmap_size = featmap_sizes[u_lvl] + u_stride = anchor_strides[u_lvl] + u_ignore_region = calc_region(gt_bbox, r2, u_stride, + u_featmap_size) + ignore_flags = anchor_ctr_inside_region_flags( + u_anchors, u_stride, u_ignore_region) + mlvl_ignore_flags[u_lvl][ignore_flags] = 1 + + # 4. (cont.) Assign -1 to ignore adjacent lvl + for lvl in range(num_lvls): + ignore_flags = mlvl_ignore_flags[lvl] + mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 + + # 5. Assign -1 to anchor outside of image + flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds) + flat_anchors = torch.cat(mlvl_anchors) + flat_valid_flags = torch.cat(mlvl_valid_flags) + assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] == + flat_valid_flags.shape[0]) + inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags, + img_meta['img_shape'], + allowed_border) + outside_flags = ~inside_flags + flat_assigned_gt_inds[outside_flags] = -1 + + if gt_labels is not None: + assigned_labels = torch.zeros_like(flat_assigned_gt_inds) + pos_flags = assigned_gt_inds > 0 + assigned_labels[pos_flags] = gt_labels[ + flat_assigned_gt_inds[pos_flags] - 1] + else: + assigned_labels = None + + return AssignResult( + num_gts, flat_assigned_gt_inds, None, labels=assigned_labels) diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/sim_ota_assigner.py b/downstream/mmdetection/mmdet/core/bbox/assigners/sim_ota_assigner.py new file mode 100644 index 0000000..58bfef4 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/sim_ota_assigner.py @@ -0,0 +1,257 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn.functional as F + +from ..builder import BBOX_ASSIGNERS +from ..iou_calculators import bbox_overlaps +from .assign_result import AssignResult +from .base_assigner import BaseAssigner + + +@BBOX_ASSIGNERS.register_module() +class SimOTAAssigner(BaseAssigner): + """Computes matching between predictions and ground truth. + + Args: + center_radius (int | float, optional): Ground truth center size + to judge whether a prior is in center. Default 2.5. + candidate_topk (int, optional): The candidate top-k which used to + get top-k ious to calculate dynamic-k. Default 10. + iou_weight (int | float, optional): The scale factor for regression + iou cost. Default 3.0. + cls_weight (int | float, optional): The scale factor for classification + cost. Default 1.0. + """ + + def __init__(self, + center_radius=2.5, + candidate_topk=10, + iou_weight=3.0, + cls_weight=1.0): + self.center_radius = center_radius + self.candidate_topk = candidate_topk + self.iou_weight = iou_weight + self.cls_weight = cls_weight + + def assign(self, + pred_scores, + priors, + decoded_bboxes, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + eps=1e-7): + """Assign gt to priors using SimOTA. It will switch to CPU mode when + GPU is out of memory. + Args: + pred_scores (Tensor): Classification scores of one image, + a 2D-Tensor with shape [num_priors, num_classes] + priors (Tensor): All priors of one image, a 2D-Tensor with shape + [num_priors, 4] in [cx, xy, stride_w, stride_y] format. + decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape + [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. + gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor + with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. + gt_labels (Tensor): Ground truth labels of one image, a Tensor + with shape [num_gts]. + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`, e.g., crowd boxes in COCO. + eps (float): A value added to the denominator for numerical + stability. Default 1e-7. + Returns: + assign_result (obj:`AssignResult`): The assigned result. + """ + try: + assign_result = self._assign(pred_scores, priors, decoded_bboxes, + gt_bboxes, gt_labels, + gt_bboxes_ignore, eps) + return assign_result + except RuntimeError: + origin_device = pred_scores.device + warnings.warn('OOM RuntimeError is raised due to the huge memory ' + 'cost during label assignment. CPU mode is applied ' + 'in this batch. If you want to avoid this issue, ' + 'try to reduce the batch size or image size.') + torch.cuda.empty_cache() + + pred_scores = pred_scores.cpu() + priors = priors.cpu() + decoded_bboxes = decoded_bboxes.cpu() + gt_bboxes = gt_bboxes.cpu().float() + gt_labels = gt_labels.cpu() + + assign_result = self._assign(pred_scores, priors, decoded_bboxes, + gt_bboxes, gt_labels, + gt_bboxes_ignore, eps) + assign_result.gt_inds = assign_result.gt_inds.to(origin_device) + assign_result.max_overlaps = assign_result.max_overlaps.to( + origin_device) + assign_result.labels = assign_result.labels.to(origin_device) + + return assign_result + + def _assign(self, + pred_scores, + priors, + decoded_bboxes, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + eps=1e-7): + """Assign gt to priors using SimOTA. + Args: + pred_scores (Tensor): Classification scores of one image, + a 2D-Tensor with shape [num_priors, num_classes] + priors (Tensor): All priors of one image, a 2D-Tensor with shape + [num_priors, 4] in [cx, xy, stride_w, stride_y] format. + decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape + [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. + gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor + with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. + gt_labels (Tensor): Ground truth labels of one image, a Tensor + with shape [num_gts]. + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`, e.g., crowd boxes in COCO. + eps (float): A value added to the denominator for numerical + stability. Default 1e-7. + Returns: + :obj:`AssignResult`: The assigned result. + """ + INF = 100000.0 + num_gt = gt_bboxes.size(0) + num_bboxes = decoded_bboxes.size(0) + + # assign 0 by default + assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ), + 0, + dtype=torch.long) + valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info( + priors, gt_bboxes) + valid_decoded_bbox = decoded_bboxes[valid_mask] + valid_pred_scores = pred_scores[valid_mask] + num_valid = valid_decoded_bbox.size(0) + + if num_gt == 0 or num_bboxes == 0 or num_valid == 0: + # No ground truth or boxes, return empty assignment + max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) + if num_gt == 0: + # No truth, assign everything to background + assigned_gt_inds[:] = 0 + if gt_labels is None: + assigned_labels = None + else: + assigned_labels = decoded_bboxes.new_full((num_bboxes, ), + -1, + dtype=torch.long) + return AssignResult( + num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) + + pairwise_ious = bbox_overlaps(valid_decoded_bbox, gt_bboxes) + iou_cost = -torch.log(pairwise_ious + eps) + + gt_onehot_label = ( + F.one_hot(gt_labels.to(torch.int64), + pred_scores.shape[-1]).float().unsqueeze(0).repeat( + num_valid, 1, 1)) + + valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1) + cls_cost = ( + F.binary_cross_entropy( + valid_pred_scores.to(dtype=torch.float32).sqrt_(), + gt_onehot_label, + reduction='none', + ).sum(-1).to(dtype=valid_pred_scores.dtype)) + + cost_matrix = ( + cls_cost * self.cls_weight + iou_cost * self.iou_weight + + (~is_in_boxes_and_center) * INF) + + matched_pred_ious, matched_gt_inds = \ + self.dynamic_k_matching( + cost_matrix, pairwise_ious, num_gt, valid_mask) + + # convert to AssignResult format + assigned_gt_inds[valid_mask] = matched_gt_inds + 1 + assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) + assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long() + max_overlaps = assigned_gt_inds.new_full((num_bboxes, ), + -INF, + dtype=torch.float32) + max_overlaps[valid_mask] = matched_pred_ious + return AssignResult( + num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) + + def get_in_gt_and_in_center_info(self, priors, gt_bboxes): + num_gt = gt_bboxes.size(0) + + repeated_x = priors[:, 0].unsqueeze(1).repeat(1, num_gt) + repeated_y = priors[:, 1].unsqueeze(1).repeat(1, num_gt) + repeated_stride_x = priors[:, 2].unsqueeze(1).repeat(1, num_gt) + repeated_stride_y = priors[:, 3].unsqueeze(1).repeat(1, num_gt) + + # is prior centers in gt bboxes, shape: [n_prior, n_gt] + l_ = repeated_x - gt_bboxes[:, 0] + t_ = repeated_y - gt_bboxes[:, 1] + r_ = gt_bboxes[:, 2] - repeated_x + b_ = gt_bboxes[:, 3] - repeated_y + + deltas = torch.stack([l_, t_, r_, b_], dim=1) + is_in_gts = deltas.min(dim=1).values > 0 + is_in_gts_all = is_in_gts.sum(dim=1) > 0 + + # is prior centers in gt centers + gt_cxs = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 + gt_cys = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 + ct_box_l = gt_cxs - self.center_radius * repeated_stride_x + ct_box_t = gt_cys - self.center_radius * repeated_stride_y + ct_box_r = gt_cxs + self.center_radius * repeated_stride_x + ct_box_b = gt_cys + self.center_radius * repeated_stride_y + + cl_ = repeated_x - ct_box_l + ct_ = repeated_y - ct_box_t + cr_ = ct_box_r - repeated_x + cb_ = ct_box_b - repeated_y + + ct_deltas = torch.stack([cl_, ct_, cr_, cb_], dim=1) + is_in_cts = ct_deltas.min(dim=1).values > 0 + is_in_cts_all = is_in_cts.sum(dim=1) > 0 + + # in boxes or in centers, shape: [num_priors] + is_in_gts_or_centers = is_in_gts_all | is_in_cts_all + + # both in boxes and centers, shape: [num_fg, num_gt] + is_in_boxes_and_centers = ( + is_in_gts[is_in_gts_or_centers, :] + & is_in_cts[is_in_gts_or_centers, :]) + return is_in_gts_or_centers, is_in_boxes_and_centers + + def dynamic_k_matching(self, cost, pairwise_ious, num_gt, valid_mask): + matching_matrix = torch.zeros_like(cost, dtype=torch.uint8) + # select candidate topk ious for dynamic-k calculation + candidate_topk = min(self.candidate_topk, pairwise_ious.size(0)) + topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0) + # calculate dynamic k for each gt + dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) + for gt_idx in range(num_gt): + _, pos_idx = torch.topk( + cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False) + matching_matrix[:, gt_idx][pos_idx] = 1 + + del topk_ious, dynamic_ks, pos_idx + + prior_match_gt_mask = matching_matrix.sum(1) > 1 + if prior_match_gt_mask.sum() > 0: + cost_min, cost_argmin = torch.min( + cost[prior_match_gt_mask, :], dim=1) + matching_matrix[prior_match_gt_mask, :] *= 0 + matching_matrix[prior_match_gt_mask, cost_argmin] = 1 + # get foreground mask inside box and center prior + fg_mask_inboxes = matching_matrix.sum(1) > 0 + valid_mask[valid_mask.clone()] = fg_mask_inboxes + + matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) + matched_pred_ious = (matching_matrix * + pairwise_ious).sum(1)[fg_mask_inboxes] + return matched_pred_ious, matched_gt_inds diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/task_aligned_assigner.py b/downstream/mmdetection/mmdet/core/bbox/assigners/task_aligned_assigner.py new file mode 100644 index 0000000..1872de4 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/task_aligned_assigner.py @@ -0,0 +1,151 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..builder import BBOX_ASSIGNERS +from ..iou_calculators import build_iou_calculator +from .assign_result import AssignResult +from .base_assigner import BaseAssigner + +INF = 100000000 + + +@BBOX_ASSIGNERS.register_module() +class TaskAlignedAssigner(BaseAssigner): + """Task aligned assigner used in the paper: + `TOOD: Task-aligned One-stage Object Detection. + `_. + + Assign a corresponding gt bbox or background to each predicted bbox. + Each bbox will be assigned with `0` or a positive integer + indicating the ground truth index. + + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + Args: + topk (int): number of bbox selected in each level + iou_calculator (dict): Config dict for iou calculator. + Default: dict(type='BboxOverlaps2D') + """ + + def __init__(self, topk, iou_calculator=dict(type='BboxOverlaps2D')): + assert topk >= 1 + self.topk = topk + self.iou_calculator = build_iou_calculator(iou_calculator) + + def assign(self, + pred_scores, + decode_bboxes, + anchors, + gt_bboxes, + gt_bboxes_ignore=None, + gt_labels=None, + alpha=1, + beta=6): + """Assign gt to bboxes. + + The assignment is done in following steps + + 1. compute alignment metric between all bbox (bbox of all pyramid + levels) and gt + 2. select top-k bbox as candidates for each gt + 3. limit the positive sample's center in gt (because the anchor-free + detector only can predict positive distance) + + + Args: + pred_scores (Tensor): predicted class probability, + shape(n, num_classes) + decode_bboxes (Tensor): predicted bounding boxes, shape(n, 4) + anchors (Tensor): pre-defined anchors, shape(n, 4). + gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`, e.g., crowd boxes in COCO. + gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). + + Returns: + :obj:`TaskAlignedAssignResult`: The assign result. + """ + anchors = anchors[:, :4] + num_gt, num_bboxes = gt_bboxes.size(0), anchors.size(0) + # compute alignment metric between all bbox and gt + overlaps = self.iou_calculator(decode_bboxes, gt_bboxes).detach() + bbox_scores = pred_scores[:, gt_labels].detach() + # assign 0 by default + assigned_gt_inds = anchors.new_full((num_bboxes, ), + 0, + dtype=torch.long) + assign_metrics = anchors.new_zeros((num_bboxes, )) + + if num_gt == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + max_overlaps = anchors.new_zeros((num_bboxes, )) + if num_gt == 0: + # No gt boxes, assign everything to background + assigned_gt_inds[:] = 0 + if gt_labels is None: + assigned_labels = None + else: + assigned_labels = anchors.new_full((num_bboxes, ), + -1, + dtype=torch.long) + assign_result = AssignResult( + num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) + assign_result.assign_metrics = assign_metrics + return assign_result + + # select top-k bboxes as candidates for each gt + alignment_metrics = bbox_scores**alpha * overlaps**beta + topk = min(self.topk, alignment_metrics.size(0)) + _, candidate_idxs = alignment_metrics.topk(topk, dim=0, largest=True) + candidate_metrics = alignment_metrics[candidate_idxs, + torch.arange(num_gt)] + is_pos = candidate_metrics > 0 + + # limit the positive sample's center in gt + anchors_cx = (anchors[:, 0] + anchors[:, 2]) / 2.0 + anchors_cy = (anchors[:, 1] + anchors[:, 3]) / 2.0 + for gt_idx in range(num_gt): + candidate_idxs[:, gt_idx] += gt_idx * num_bboxes + ep_anchors_cx = anchors_cx.view(1, -1).expand( + num_gt, num_bboxes).contiguous().view(-1) + ep_anchors_cy = anchors_cy.view(1, -1).expand( + num_gt, num_bboxes).contiguous().view(-1) + candidate_idxs = candidate_idxs.view(-1) + + # calculate the left, top, right, bottom distance between positive + # bbox center and gt side + l_ = ep_anchors_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] + t_ = ep_anchors_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] + r_ = gt_bboxes[:, 2] - ep_anchors_cx[candidate_idxs].view(-1, num_gt) + b_ = gt_bboxes[:, 3] - ep_anchors_cy[candidate_idxs].view(-1, num_gt) + is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 + is_pos = is_pos & is_in_gts + + # if an anchor box is assigned to multiple gts, + # the one with the highest iou will be selected. + overlaps_inf = torch.full_like(overlaps, + -INF).t().contiguous().view(-1) + index = candidate_idxs.view(-1)[is_pos.view(-1)] + overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] + overlaps_inf = overlaps_inf.view(num_gt, -1).t() + + max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) + assigned_gt_inds[ + max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 + assign_metrics[max_overlaps != -INF] = alignment_metrics[ + max_overlaps != -INF, argmax_overlaps[max_overlaps != -INF]] + + if gt_labels is not None: + assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) + pos_inds = torch.nonzero( + assigned_gt_inds > 0, as_tuple=False).squeeze() + if pos_inds.numel() > 0: + assigned_labels[pos_inds] = gt_labels[ + assigned_gt_inds[pos_inds] - 1] + else: + assigned_labels = None + assign_result = AssignResult( + num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) + assign_result.assign_metrics = assign_metrics + return assign_result diff --git a/downstream/mmdetection/mmdet/core/bbox/assigners/uniform_assigner.py b/downstream/mmdetection/mmdet/core/bbox/assigners/uniform_assigner.py new file mode 100644 index 0000000..70294fc --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/assigners/uniform_assigner.py @@ -0,0 +1,135 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..builder import BBOX_ASSIGNERS +from ..iou_calculators import build_iou_calculator +from ..transforms import bbox_xyxy_to_cxcywh +from .assign_result import AssignResult +from .base_assigner import BaseAssigner + + +@BBOX_ASSIGNERS.register_module() +class UniformAssigner(BaseAssigner): + """Uniform Matching between the anchors and gt boxes, which can achieve + balance in positive anchors, and gt_bboxes_ignore was not considered for + now. + + Args: + pos_ignore_thr (float): the threshold to ignore positive anchors + neg_ignore_thr (float): the threshold to ignore negative anchors + match_times(int): Number of positive anchors for each gt box. + Default 4. + iou_calculator (dict): iou_calculator config + """ + + def __init__(self, + pos_ignore_thr, + neg_ignore_thr, + match_times=4, + iou_calculator=dict(type='BboxOverlaps2D')): + self.match_times = match_times + self.pos_ignore_thr = pos_ignore_thr + self.neg_ignore_thr = neg_ignore_thr + self.iou_calculator = build_iou_calculator(iou_calculator) + + def assign(self, + bbox_pred, + anchor, + gt_bboxes, + gt_bboxes_ignore=None, + gt_labels=None): + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), + 0, + dtype=torch.long) + assigned_labels = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + assign_result = AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) + assign_result.set_extra_property( + 'pos_idx', bbox_pred.new_empty(0, dtype=torch.bool)) + assign_result.set_extra_property('pos_predicted_boxes', + bbox_pred.new_empty((0, 4))) + assign_result.set_extra_property('target_boxes', + bbox_pred.new_empty((0, 4))) + return assign_result + + # 2. Compute the L1 cost between boxes + # Note that we use anchors and predict boxes both + cost_bbox = torch.cdist( + bbox_xyxy_to_cxcywh(bbox_pred), + bbox_xyxy_to_cxcywh(gt_bboxes), + p=1) + cost_bbox_anchors = torch.cdist( + bbox_xyxy_to_cxcywh(anchor), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) + + # We found that topk function has different results in cpu and + # cuda mode. In order to ensure consistency with the source code, + # we also use cpu mode. + # TODO: Check whether the performance of cpu and cuda are the same. + C = cost_bbox.cpu() + C1 = cost_bbox_anchors.cpu() + + # self.match_times x n + index = torch.topk( + C, # c=b,n,x c[i]=n,x + k=self.match_times, + dim=0, + largest=False)[1] + + # self.match_times x n + index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1] + # (self.match_times*2) x n + indexes = torch.cat((index, index1), + dim=1).reshape(-1).to(bbox_pred.device) + + pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes) + anchor_overlaps = self.iou_calculator(anchor, gt_bboxes) + pred_max_overlaps, _ = pred_overlaps.max(dim=1) + anchor_max_overlaps, _ = anchor_overlaps.max(dim=0) + + # 3. Compute the ignore indexes use gt_bboxes and predict boxes + ignore_idx = pred_max_overlaps > self.neg_ignore_thr + assigned_gt_inds[ignore_idx] = -1 + + # 4. Compute the ignore indexes of positive sample use anchors + # and predict boxes + pos_gt_index = torch.arange( + 0, C1.size(1), + device=bbox_pred.device).repeat(self.match_times * 2) + pos_ious = anchor_overlaps[indexes, pos_gt_index] + pos_ignore_idx = pos_ious < self.pos_ignore_thr + + pos_gt_index_with_ignore = pos_gt_index + 1 + pos_gt_index_with_ignore[pos_ignore_idx] = -1 + assigned_gt_inds[indexes] = pos_gt_index_with_ignore + + if gt_labels is not None: + assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) + pos_inds = torch.nonzero( + assigned_gt_inds > 0, as_tuple=False).squeeze() + if pos_inds.numel() > 0: + assigned_labels[pos_inds] = gt_labels[ + assigned_gt_inds[pos_inds] - 1] + else: + assigned_labels = None + + assign_result = AssignResult( + num_gts, + assigned_gt_inds, + anchor_max_overlaps, + labels=assigned_labels) + assign_result.set_extra_property('pos_idx', ~pos_ignore_idx) + assign_result.set_extra_property('pos_predicted_boxes', + bbox_pred[indexes]) + assign_result.set_extra_property('target_boxes', + gt_bboxes[pos_gt_index]) + return assign_result diff --git a/downstream/mmdetection/mmdet/core/bbox/builder.py b/downstream/mmdetection/mmdet/core/bbox/builder.py new file mode 100644 index 0000000..9cfa055 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/builder.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.utils import Registry, build_from_cfg + +BBOX_ASSIGNERS = Registry('bbox_assigner') +BBOX_SAMPLERS = Registry('bbox_sampler') +BBOX_CODERS = Registry('bbox_coder') + + +def build_assigner(cfg, **default_args): + """Builder of box assigner.""" + return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args) + + +def build_sampler(cfg, **default_args): + """Builder of box sampler.""" + return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) + + +def build_bbox_coder(cfg, **default_args): + """Builder of box coder.""" + return build_from_cfg(cfg, BBOX_CODERS, default_args) diff --git a/downstream/mmdetection/mmdet/core/bbox/coder/__init__.py b/downstream/mmdetection/mmdet/core/bbox/coder/__init__.py new file mode 100644 index 0000000..e12fd64 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/coder/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_bbox_coder import BaseBBoxCoder +from .bucketing_bbox_coder import BucketingBBoxCoder +from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder +from .distance_point_bbox_coder import DistancePointBBoxCoder +from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder +from .pseudo_bbox_coder import PseudoBBoxCoder +from .tblr_bbox_coder import TBLRBBoxCoder +from .yolo_bbox_coder import YOLOBBoxCoder + +__all__ = [ + 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', + 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder', + 'BucketingBBoxCoder', 'DistancePointBBoxCoder' +] diff --git a/downstream/mmdetection/mmdet/core/bbox/coder/base_bbox_coder.py b/downstream/mmdetection/mmdet/core/bbox/coder/base_bbox_coder.py new file mode 100644 index 0000000..a7ed041 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/coder/base_bbox_coder.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + + +class BaseBBoxCoder(metaclass=ABCMeta): + """Base bounding box coder.""" + + def __init__(self, **kwargs): + pass + + @abstractmethod + def encode(self, bboxes, gt_bboxes): + """Encode deltas between bboxes and ground truth boxes.""" + + @abstractmethod + def decode(self, bboxes, bboxes_pred): + """Decode the predicted bboxes according to prediction and base + boxes.""" diff --git a/downstream/mmdetection/mmdet/core/bbox/coder/bucketing_bbox_coder.py b/downstream/mmdetection/mmdet/core/bbox/coder/bucketing_bbox_coder.py new file mode 100644 index 0000000..4be0ada --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/coder/bucketing_bbox_coder.py @@ -0,0 +1,351 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import torch +import torch.nn.functional as F + +from ..builder import BBOX_CODERS +from ..transforms import bbox_rescale +from .base_bbox_coder import BaseBBoxCoder + + +@BBOX_CODERS.register_module() +class BucketingBBoxCoder(BaseBBoxCoder): + """Bucketing BBox Coder for Side-Aware Boundary Localization (SABL). + + Boundary Localization with Bucketing and Bucketing Guided Rescoring + are implemented here. + + Please refer to https://arxiv.org/abs/1912.04260 for more details. + + Args: + num_buckets (int): Number of buckets. + scale_factor (int): Scale factor of proposals to generate buckets. + offset_topk (int): Topk buckets are used to generate + bucket fine regression targets. Defaults to 2. + offset_upperbound (float): Offset upperbound to generate + bucket fine regression targets. + To avoid too large offset displacements. Defaults to 1.0. + cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. + Defaults to True. + clip_border (bool, optional): Whether clip the objects outside the + border of the image. Defaults to True. + """ + + def __init__(self, + num_buckets, + scale_factor, + offset_topk=2, + offset_upperbound=1.0, + cls_ignore_neighbor=True, + clip_border=True): + super(BucketingBBoxCoder, self).__init__() + self.num_buckets = num_buckets + self.scale_factor = scale_factor + self.offset_topk = offset_topk + self.offset_upperbound = offset_upperbound + self.cls_ignore_neighbor = cls_ignore_neighbor + self.clip_border = clip_border + + def encode(self, bboxes, gt_bboxes): + """Get bucketing estimation and fine regression targets during + training. + + Args: + bboxes (torch.Tensor): source boxes, e.g., object proposals. + gt_bboxes (torch.Tensor): target of the transformation, e.g., + ground truth boxes. + + Returns: + encoded_bboxes(tuple[Tensor]): bucketing estimation + and fine regression targets and weights + """ + + assert bboxes.size(0) == gt_bboxes.size(0) + assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 + encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets, + self.scale_factor, self.offset_topk, + self.offset_upperbound, + self.cls_ignore_neighbor) + return encoded_bboxes + + def decode(self, bboxes, pred_bboxes, max_shape=None): + """Apply transformation `pred_bboxes` to `boxes`. + Args: + boxes (torch.Tensor): Basic boxes. + pred_bboxes (torch.Tensor): Predictions for bucketing estimation + and fine regression + max_shape (tuple[int], optional): Maximum shape of boxes. + Defaults to None. + + Returns: + torch.Tensor: Decoded boxes. + """ + assert len(pred_bboxes) == 2 + cls_preds, offset_preds = pred_bboxes + assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size( + 0) == bboxes.size(0) + decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds, + self.num_buckets, self.scale_factor, + max_shape, self.clip_border) + + return decoded_bboxes + + +@mmcv.jit(coderize=True) +def generat_buckets(proposals, num_buckets, scale_factor=1.0): + """Generate buckets w.r.t bucket number and scale factor of proposals. + + Args: + proposals (Tensor): Shape (n, 4) + num_buckets (int): Number of buckets. + scale_factor (float): Scale factor to rescale proposals. + + Returns: + tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets, + t_buckets, d_buckets) + + - bucket_w: Width of buckets on x-axis. Shape (n, ). + - bucket_h: Height of buckets on y-axis. Shape (n, ). + - l_buckets: Left buckets. Shape (n, ceil(side_num/2)). + - r_buckets: Right buckets. Shape (n, ceil(side_num/2)). + - t_buckets: Top buckets. Shape (n, ceil(side_num/2)). + - d_buckets: Down buckets. Shape (n, ceil(side_num/2)). + """ + proposals = bbox_rescale(proposals, scale_factor) + + # number of buckets in each side + side_num = int(np.ceil(num_buckets / 2.0)) + pw = proposals[..., 2] - proposals[..., 0] + ph = proposals[..., 3] - proposals[..., 1] + px1 = proposals[..., 0] + py1 = proposals[..., 1] + px2 = proposals[..., 2] + py2 = proposals[..., 3] + + bucket_w = pw / num_buckets + bucket_h = ph / num_buckets + + # left buckets + l_buckets = px1[:, None] + (0.5 + torch.arange( + 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] + # right buckets + r_buckets = px2[:, None] - (0.5 + torch.arange( + 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] + # top buckets + t_buckets = py1[:, None] + (0.5 + torch.arange( + 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] + # down buckets + d_buckets = py2[:, None] - (0.5 + torch.arange( + 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] + return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets + + +@mmcv.jit(coderize=True) +def bbox2bucket(proposals, + gt, + num_buckets, + scale_factor, + offset_topk=2, + offset_upperbound=1.0, + cls_ignore_neighbor=True): + """Generate buckets estimation and fine regression targets. + + Args: + proposals (Tensor): Shape (n, 4) + gt (Tensor): Shape (n, 4) + num_buckets (int): Number of buckets. + scale_factor (float): Scale factor to rescale proposals. + offset_topk (int): Topk buckets are used to generate + bucket fine regression targets. Defaults to 2. + offset_upperbound (float): Offset allowance to generate + bucket fine regression targets. + To avoid too large offset displacements. Defaults to 1.0. + cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. + Defaults to True. + + Returns: + tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights). + + - offsets: Fine regression targets. \ + Shape (n, num_buckets*2). + - offsets_weights: Fine regression weights. \ + Shape (n, num_buckets*2). + - bucket_labels: Bucketing estimation labels. \ + Shape (n, num_buckets*2). + - cls_weights: Bucketing estimation weights. \ + Shape (n, num_buckets*2). + """ + assert proposals.size() == gt.size() + + # generate buckets + proposals = proposals.float() + gt = gt.float() + (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, + d_buckets) = generat_buckets(proposals, num_buckets, scale_factor) + + gx1 = gt[..., 0] + gy1 = gt[..., 1] + gx2 = gt[..., 2] + gy2 = gt[..., 3] + + # generate offset targets and weights + # offsets from buckets to gts + l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None] + r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None] + t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None] + d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None] + + # select top-k nearest buckets + l_topk, l_label = l_offsets.abs().topk( + offset_topk, dim=1, largest=False, sorted=True) + r_topk, r_label = r_offsets.abs().topk( + offset_topk, dim=1, largest=False, sorted=True) + t_topk, t_label = t_offsets.abs().topk( + offset_topk, dim=1, largest=False, sorted=True) + d_topk, d_label = d_offsets.abs().topk( + offset_topk, dim=1, largest=False, sorted=True) + + offset_l_weights = l_offsets.new_zeros(l_offsets.size()) + offset_r_weights = r_offsets.new_zeros(r_offsets.size()) + offset_t_weights = t_offsets.new_zeros(t_offsets.size()) + offset_d_weights = d_offsets.new_zeros(d_offsets.size()) + inds = torch.arange(0, proposals.size(0)).to(proposals).long() + + # generate offset weights of top-k nearest buckets + for k in range(offset_topk): + if k >= 1: + offset_l_weights[inds, l_label[:, + k]] = (l_topk[:, k] < + offset_upperbound).float() + offset_r_weights[inds, r_label[:, + k]] = (r_topk[:, k] < + offset_upperbound).float() + offset_t_weights[inds, t_label[:, + k]] = (t_topk[:, k] < + offset_upperbound).float() + offset_d_weights[inds, d_label[:, + k]] = (d_topk[:, k] < + offset_upperbound).float() + else: + offset_l_weights[inds, l_label[:, k]] = 1.0 + offset_r_weights[inds, r_label[:, k]] = 1.0 + offset_t_weights[inds, t_label[:, k]] = 1.0 + offset_d_weights[inds, d_label[:, k]] = 1.0 + + offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1) + offsets_weights = torch.cat([ + offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights + ], + dim=-1) + + # generate bucket labels and weight + side_num = int(np.ceil(num_buckets / 2.0)) + labels = torch.stack( + [l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1) + + batch_size = labels.size(0) + bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size, + -1).float() + bucket_cls_l_weights = (l_offsets.abs() < 1).float() + bucket_cls_r_weights = (r_offsets.abs() < 1).float() + bucket_cls_t_weights = (t_offsets.abs() < 1).float() + bucket_cls_d_weights = (d_offsets.abs() < 1).float() + bucket_cls_weights = torch.cat([ + bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights, + bucket_cls_d_weights + ], + dim=-1) + # ignore second nearest buckets for cls if necessary + if cls_ignore_neighbor: + bucket_cls_weights = (~((bucket_cls_weights == 1) & + (bucket_labels == 0))).float() + else: + bucket_cls_weights[:] = 1.0 + return offsets, offsets_weights, bucket_labels, bucket_cls_weights + + +@mmcv.jit(coderize=True) +def bucket2bbox(proposals, + cls_preds, + offset_preds, + num_buckets, + scale_factor=1.0, + max_shape=None, + clip_border=True): + """Apply bucketing estimation (cls preds) and fine regression (offset + preds) to generate det bboxes. + + Args: + proposals (Tensor): Boxes to be transformed. Shape (n, 4) + cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2). + offset_preds (Tensor): fine regression. Shape (n, num_buckets*2). + num_buckets (int): Number of buckets. + scale_factor (float): Scale factor to rescale proposals. + max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) + clip_border (bool, optional): Whether clip the objects outside the + border of the image. Defaults to True. + + Returns: + tuple[Tensor]: (bboxes, loc_confidence). + + - bboxes: predicted bboxes. Shape (n, 4) + - loc_confidence: localization confidence of predicted bboxes. + Shape (n,). + """ + + side_num = int(np.ceil(num_buckets / 2.0)) + cls_preds = cls_preds.view(-1, side_num) + offset_preds = offset_preds.view(-1, side_num) + + scores = F.softmax(cls_preds, dim=1) + score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True) + + rescaled_proposals = bbox_rescale(proposals, scale_factor) + + pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0] + ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1] + px1 = rescaled_proposals[..., 0] + py1 = rescaled_proposals[..., 1] + px2 = rescaled_proposals[..., 2] + py2 = rescaled_proposals[..., 3] + + bucket_w = pw / num_buckets + bucket_h = ph / num_buckets + + score_inds_l = score_label[0::4, 0] + score_inds_r = score_label[1::4, 0] + score_inds_t = score_label[2::4, 0] + score_inds_d = score_label[3::4, 0] + l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w + r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w + t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h + d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h + + offsets = offset_preds.view(-1, 4, side_num) + inds = torch.arange(proposals.size(0)).to(proposals).long() + l_offsets = offsets[:, 0, :][inds, score_inds_l] + r_offsets = offsets[:, 1, :][inds, score_inds_r] + t_offsets = offsets[:, 2, :][inds, score_inds_t] + d_offsets = offsets[:, 3, :][inds, score_inds_d] + + x1 = l_buckets - l_offsets * bucket_w + x2 = r_buckets - r_offsets * bucket_w + y1 = t_buckets - t_offsets * bucket_h + y2 = d_buckets - d_offsets * bucket_h + + if clip_border and max_shape is not None: + x1 = x1.clamp(min=0, max=max_shape[1] - 1) + y1 = y1.clamp(min=0, max=max_shape[0] - 1) + x2 = x2.clamp(min=0, max=max_shape[1] - 1) + y2 = y2.clamp(min=0, max=max_shape[0] - 1) + bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]], + dim=-1) + + # bucketing guided rescoring + loc_confidence = score_topk[:, 0] + top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1 + loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float() + loc_confidence = loc_confidence.view(-1, 4).mean(dim=1) + + return bboxes, loc_confidence diff --git a/downstream/mmdetection/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py b/downstream/mmdetection/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py new file mode 100644 index 0000000..a7f1c62 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py @@ -0,0 +1,392 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import mmcv +import numpy as np +import torch + +from ..builder import BBOX_CODERS +from .base_bbox_coder import BaseBBoxCoder + + +@BBOX_CODERS.register_module() +class DeltaXYWHBBoxCoder(BaseBBoxCoder): + """Delta XYWH BBox coder. + + Following the practice in `R-CNN `_, + this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and + decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2). + + Args: + target_means (Sequence[float]): Denormalizing means of target for + delta coordinates + target_stds (Sequence[float]): Denormalizing standard deviation of + target for delta coordinates + clip_border (bool, optional): Whether clip the objects outside the + border of the image. Defaults to True. + add_ctr_clamp (bool): Whether to add center clamp, when added, the + predicted box is clamped is its center is too far away from + the original anchor's center. Only used by YOLOF. Default False. + ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. + Default 32. + """ + + def __init__(self, + target_means=(0., 0., 0., 0.), + target_stds=(1., 1., 1., 1.), + clip_border=True, + add_ctr_clamp=False, + ctr_clamp=32): + super(BaseBBoxCoder, self).__init__() + self.means = target_means + self.stds = target_stds + self.clip_border = clip_border + self.add_ctr_clamp = add_ctr_clamp + self.ctr_clamp = ctr_clamp + + def encode(self, bboxes, gt_bboxes): + """Get box regression transformation deltas that can be used to + transform the ``bboxes`` into the ``gt_bboxes``. + + Args: + bboxes (torch.Tensor): Source boxes, e.g., object proposals. + gt_bboxes (torch.Tensor): Target of the transformation, e.g., + ground-truth boxes. + + Returns: + torch.Tensor: Box transformation deltas + """ + + assert bboxes.size(0) == gt_bboxes.size(0) + assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 + encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds) + return encoded_bboxes + + def decode(self, + bboxes, + pred_bboxes, + max_shape=None, + wh_ratio_clip=16 / 1000): + """Apply transformation `pred_bboxes` to `boxes`. + + Args: + bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4) + pred_bboxes (Tensor): Encoded offsets with respect to each roi. + Has shape (B, N, num_classes * 4) or (B, N, 4) or + (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H + when rois is a grid of anchors.Offset encoding follows [1]_. + max_shape (Sequence[int] or torch.Tensor or Sequence[ + Sequence[int]],optional): Maximum bounds for boxes, specifies + (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then + the max_shape should be a Sequence[Sequence[int]] + and the length of max_shape should also be B. + wh_ratio_clip (float, optional): The allowed ratio between + width and height. + + Returns: + torch.Tensor: Decoded boxes. + """ + + assert pred_bboxes.size(0) == bboxes.size(0) + if pred_bboxes.ndim == 3: + assert pred_bboxes.size(1) == bboxes.size(1) + + if pred_bboxes.ndim == 2 and not torch.onnx.is_in_onnx_export(): + # single image decode + decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means, + self.stds, max_shape, wh_ratio_clip, + self.clip_border, self.add_ctr_clamp, + self.ctr_clamp) + else: + if pred_bboxes.ndim == 3 and not torch.onnx.is_in_onnx_export(): + warnings.warn( + 'DeprecationWarning: onnx_delta2bbox is deprecated ' + 'in the case of batch decoding and non-ONNX, ' + 'please use “delta2bbox” instead. In order to improve ' + 'the decoding speed, the batch function will no ' + 'longer be supported. ') + decoded_bboxes = onnx_delta2bbox(bboxes, pred_bboxes, self.means, + self.stds, max_shape, + wh_ratio_clip, self.clip_border, + self.add_ctr_clamp, + self.ctr_clamp) + + return decoded_bboxes + + +@mmcv.jit(coderize=True) +def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)): + """Compute deltas of proposals w.r.t. gt. + + We usually compute the deltas of x, y, w, h of proposals w.r.t ground + truth bboxes to get regression target. + This is the inverse function of :func:`delta2bbox`. + + Args: + proposals (Tensor): Boxes to be transformed, shape (N, ..., 4) + gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4) + means (Sequence[float]): Denormalizing means for delta coordinates + stds (Sequence[float]): Denormalizing standard deviation for delta + coordinates + + Returns: + Tensor: deltas with shape (N, 4), where columns represent dx, dy, + dw, dh. + """ + assert proposals.size() == gt.size() + + proposals = proposals.float() + gt = gt.float() + px = (proposals[..., 0] + proposals[..., 2]) * 0.5 + py = (proposals[..., 1] + proposals[..., 3]) * 0.5 + pw = proposals[..., 2] - proposals[..., 0] + ph = proposals[..., 3] - proposals[..., 1] + + gx = (gt[..., 0] + gt[..., 2]) * 0.5 + gy = (gt[..., 1] + gt[..., 3]) * 0.5 + gw = gt[..., 2] - gt[..., 0] + gh = gt[..., 3] - gt[..., 1] + + dx = (gx - px) / pw + dy = (gy - py) / ph + dw = torch.log(gw / pw) + dh = torch.log(gh / ph) + deltas = torch.stack([dx, dy, dw, dh], dim=-1) + + means = deltas.new_tensor(means).unsqueeze(0) + stds = deltas.new_tensor(stds).unsqueeze(0) + deltas = deltas.sub_(means).div_(stds) + + return deltas + + +@mmcv.jit(coderize=True) +def delta2bbox(rois, + deltas, + means=(0., 0., 0., 0.), + stds=(1., 1., 1., 1.), + max_shape=None, + wh_ratio_clip=16 / 1000, + clip_border=True, + add_ctr_clamp=False, + ctr_clamp=32): + """Apply deltas to shift/scale base boxes. + + Typically the rois are anchor or proposed bounding boxes and the deltas are + network outputs used to shift/scale those boxes. + This is the inverse function of :func:`bbox2delta`. + + Args: + rois (Tensor): Boxes to be transformed. Has shape (N, 4). + deltas (Tensor): Encoded offsets relative to each roi. + Has shape (N, num_classes * 4) or (N, 4). Note + N = num_base_anchors * W * H, when rois is a grid of + anchors. Offset encoding follows [1]_. + means (Sequence[float]): Denormalizing means for delta coordinates. + Default (0., 0., 0., 0.). + stds (Sequence[float]): Denormalizing standard deviation for delta + coordinates. Default (1., 1., 1., 1.). + max_shape (tuple[int, int]): Maximum bounds for boxes, specifies + (H, W). Default None. + wh_ratio_clip (float): Maximum aspect ratio for boxes. Default + 16 / 1000. + clip_border (bool, optional): Whether clip the objects outside the + border of the image. Default True. + add_ctr_clamp (bool): Whether to add center clamp. When set to True, + the center of the prediction bounding box will be clamped to + avoid being too far away from the center of the anchor. + Only used by YOLOF. Default False. + ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. + Default 32. + + Returns: + Tensor: Boxes with shape (N, num_classes * 4) or (N, 4), where 4 + represent tl_x, tl_y, br_x, br_y. + + References: + .. [1] https://arxiv.org/abs/1311.2524 + + Example: + >>> rois = torch.Tensor([[ 0., 0., 1., 1.], + >>> [ 0., 0., 1., 1.], + >>> [ 0., 0., 1., 1.], + >>> [ 5., 5., 5., 5.]]) + >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], + >>> [ 1., 1., 1., 1.], + >>> [ 0., 0., 2., -1.], + >>> [ 0.7, -1.9, -0.5, 0.3]]) + >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3)) + tensor([[0.0000, 0.0000, 1.0000, 1.0000], + [0.1409, 0.1409, 2.8591, 2.8591], + [0.0000, 0.3161, 4.1945, 0.6839], + [5.0000, 5.0000, 5.0000, 5.0000]]) + """ + num_bboxes, num_classes = deltas.size(0), deltas.size(1) // 4 + if num_bboxes == 0: + return deltas + + deltas = deltas.reshape(-1, 4) + + means = deltas.new_tensor(means).view(1, -1) + stds = deltas.new_tensor(stds).view(1, -1) + denorm_deltas = deltas * stds + means + + dxy = denorm_deltas[:, :2] + dwh = denorm_deltas[:, 2:] + + # Compute width/height of each roi + rois_ = rois.repeat(1, num_classes).reshape(-1, 4) + pxy = ((rois_[:, :2] + rois_[:, 2:]) * 0.5) + pwh = (rois_[:, 2:] - rois_[:, :2]) + + dxy_wh = pwh * dxy + + max_ratio = np.abs(np.log(wh_ratio_clip)) + if add_ctr_clamp: + dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp) + dwh = torch.clamp(dwh, max=max_ratio) + else: + dwh = dwh.clamp(min=-max_ratio, max=max_ratio) + + gxy = pxy + dxy_wh + gwh = pwh * dwh.exp() + x1y1 = gxy - (gwh * 0.5) + x2y2 = gxy + (gwh * 0.5) + bboxes = torch.cat([x1y1, x2y2], dim=-1) + if clip_border and max_shape is not None: + bboxes[..., 0::2].clamp_(min=0, max=max_shape[1]) + bboxes[..., 1::2].clamp_(min=0, max=max_shape[0]) + bboxes = bboxes.reshape(num_bboxes, -1) + return bboxes + + +def onnx_delta2bbox(rois, + deltas, + means=(0., 0., 0., 0.), + stds=(1., 1., 1., 1.), + max_shape=None, + wh_ratio_clip=16 / 1000, + clip_border=True, + add_ctr_clamp=False, + ctr_clamp=32): + """Apply deltas to shift/scale base boxes. + + Typically the rois are anchor or proposed bounding boxes and the deltas are + network outputs used to shift/scale those boxes. + This is the inverse function of :func:`bbox2delta`. + + Args: + rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4) + deltas (Tensor): Encoded offsets with respect to each roi. + Has shape (B, N, num_classes * 4) or (B, N, 4) or + (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H + when rois is a grid of anchors.Offset encoding follows [1]_. + means (Sequence[float]): Denormalizing means for delta coordinates. + Default (0., 0., 0., 0.). + stds (Sequence[float]): Denormalizing standard deviation for delta + coordinates. Default (1., 1., 1., 1.). + max_shape (Sequence[int] or torch.Tensor or Sequence[ + Sequence[int]],optional): Maximum bounds for boxes, specifies + (H, W, C) or (H, W). If rois shape is (B, N, 4), then + the max_shape should be a Sequence[Sequence[int]] + and the length of max_shape should also be B. Default None. + wh_ratio_clip (float): Maximum aspect ratio for boxes. + Default 16 / 1000. + clip_border (bool, optional): Whether clip the objects outside the + border of the image. Default True. + add_ctr_clamp (bool): Whether to add center clamp, when added, the + predicted box is clamped is its center is too far away from + the original anchor's center. Only used by YOLOF. Default False. + ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. + Default 32. + + Returns: + Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or + (N, num_classes * 4) or (N, 4), where 4 represent + tl_x, tl_y, br_x, br_y. + + References: + .. [1] https://arxiv.org/abs/1311.2524 + + Example: + >>> rois = torch.Tensor([[ 0., 0., 1., 1.], + >>> [ 0., 0., 1., 1.], + >>> [ 0., 0., 1., 1.], + >>> [ 5., 5., 5., 5.]]) + >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], + >>> [ 1., 1., 1., 1.], + >>> [ 0., 0., 2., -1.], + >>> [ 0.7, -1.9, -0.5, 0.3]]) + >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3)) + tensor([[0.0000, 0.0000, 1.0000, 1.0000], + [0.1409, 0.1409, 2.8591, 2.8591], + [0.0000, 0.3161, 4.1945, 0.6839], + [5.0000, 5.0000, 5.0000, 5.0000]]) + """ + means = deltas.new_tensor(means).view(1, + -1).repeat(1, + deltas.size(-1) // 4) + stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4) + denorm_deltas = deltas * stds + means + dx = denorm_deltas[..., 0::4] + dy = denorm_deltas[..., 1::4] + dw = denorm_deltas[..., 2::4] + dh = denorm_deltas[..., 3::4] + + x1, y1 = rois[..., 0], rois[..., 1] + x2, y2 = rois[..., 2], rois[..., 3] + # Compute center of each roi + px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx) + py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy) + # Compute width/height of each roi + pw = (x2 - x1).unsqueeze(-1).expand_as(dw) + ph = (y2 - y1).unsqueeze(-1).expand_as(dh) + + dx_width = pw * dx + dy_height = ph * dy + + max_ratio = np.abs(np.log(wh_ratio_clip)) + if add_ctr_clamp: + dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp) + dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp) + dw = torch.clamp(dw, max=max_ratio) + dh = torch.clamp(dh, max=max_ratio) + else: + dw = dw.clamp(min=-max_ratio, max=max_ratio) + dh = dh.clamp(min=-max_ratio, max=max_ratio) + # Use exp(network energy) to enlarge/shrink each roi + gw = pw * dw.exp() + gh = ph * dh.exp() + # Use network energy to shift the center of each roi + gx = px + dx_width + gy = py + dy_height + # Convert center-xy/width/height to top-left, bottom-right + x1 = gx - gw * 0.5 + y1 = gy - gh * 0.5 + x2 = gx + gw * 0.5 + y2 = gy + gh * 0.5 + + bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) + + if clip_border and max_shape is not None: + # clip bboxes with dynamic `min` and `max` for onnx + if torch.onnx.is_in_onnx_export(): + from mmdet.core.export import dynamic_clip_for_onnx + x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) + bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) + return bboxes + if not isinstance(max_shape, torch.Tensor): + max_shape = x1.new_tensor(max_shape) + max_shape = max_shape[..., :2].type_as(x1) + if max_shape.ndim == 2: + assert bboxes.ndim == 3 + assert max_shape.size(0) == bboxes.size(0) + + min_xy = x1.new_tensor(0) + max_xy = torch.cat( + [max_shape] * (deltas.size(-1) // 2), + dim=-1).flip(-1).unsqueeze(-2) + bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) + bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) + + return bboxes diff --git a/downstream/mmdetection/mmdet/core/bbox/coder/distance_point_bbox_coder.py b/downstream/mmdetection/mmdet/core/bbox/coder/distance_point_bbox_coder.py new file mode 100644 index 0000000..9f308a8 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/coder/distance_point_bbox_coder.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import BBOX_CODERS +from ..transforms import bbox2distance, distance2bbox +from .base_bbox_coder import BaseBBoxCoder + + +@BBOX_CODERS.register_module() +class DistancePointBBoxCoder(BaseBBoxCoder): + """Distance Point BBox coder. + + This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, + right) and decode it back to the original. + + Args: + clip_border (bool, optional): Whether clip the objects outside the + border of the image. Defaults to True. + """ + + def __init__(self, clip_border=True): + super(BaseBBoxCoder, self).__init__() + self.clip_border = clip_border + + def encode(self, points, gt_bboxes, max_dis=None, eps=0.1): + """Encode bounding box to distances. + + Args: + points (Tensor): Shape (N, 2), The format is [x, y]. + gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy" + max_dis (float): Upper bound of the distance. Default None. + eps (float): a small value to ensure target < max_dis, instead <=. + Default 0.1. + + Returns: + Tensor: Box transformation deltas. The shape is (N, 4). + """ + assert points.size(0) == gt_bboxes.size(0) + assert points.size(-1) == 2 + assert gt_bboxes.size(-1) == 4 + return bbox2distance(points, gt_bboxes, max_dis, eps) + + def decode(self, points, pred_bboxes, max_shape=None): + """Decode distance prediction to bounding box. + + Args: + points (Tensor): Shape (B, N, 2) or (N, 2). + pred_bboxes (Tensor): Distance from the given point to 4 + boundaries (left, top, right, bottom). Shape (B, N, 4) + or (N, 4) + max_shape (Sequence[int] or torch.Tensor or Sequence[ + Sequence[int]],optional): Maximum bounds for boxes, specifies + (H, W, C) or (H, W). If priors shape is (B, N, 4), then + the max_shape should be a Sequence[Sequence[int]], + and the length of max_shape should also be B. + Default None. + Returns: + Tensor: Boxes with shape (N, 4) or (B, N, 4) + """ + assert points.size(0) == pred_bboxes.size(0) + assert points.size(-1) == 2 + assert pred_bboxes.size(-1) == 4 + if self.clip_border is False: + max_shape = None + return distance2bbox(points, pred_bboxes, max_shape) diff --git a/downstream/mmdetection/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py b/downstream/mmdetection/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py new file mode 100644 index 0000000..7fa348b --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py @@ -0,0 +1,216 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import torch + +from ..builder import BBOX_CODERS +from .base_bbox_coder import BaseBBoxCoder + + +@BBOX_CODERS.register_module() +class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder): + """Legacy Delta XYWH BBox coder used in MMDet V1.x. + + Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2, + y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh) + back to original bbox (x1, y1, x2, y2). + + Note: + The main difference between :class`LegacyDeltaXYWHBBoxCoder` and + :class:`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and + height calculation. We suggest to only use this coder when testing with + MMDet V1.x models. + + References: + .. [1] https://arxiv.org/abs/1311.2524 + + Args: + target_means (Sequence[float]): denormalizing means of target for + delta coordinates + target_stds (Sequence[float]): denormalizing standard deviation of + target for delta coordinates + """ + + def __init__(self, + target_means=(0., 0., 0., 0.), + target_stds=(1., 1., 1., 1.)): + super(BaseBBoxCoder, self).__init__() + self.means = target_means + self.stds = target_stds + + def encode(self, bboxes, gt_bboxes): + """Get box regression transformation deltas that can be used to + transform the ``bboxes`` into the ``gt_bboxes``. + + Args: + bboxes (torch.Tensor): source boxes, e.g., object proposals. + gt_bboxes (torch.Tensor): target of the transformation, e.g., + ground-truth boxes. + + Returns: + torch.Tensor: Box transformation deltas + """ + assert bboxes.size(0) == gt_bboxes.size(0) + assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 + encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means, + self.stds) + return encoded_bboxes + + def decode(self, + bboxes, + pred_bboxes, + max_shape=None, + wh_ratio_clip=16 / 1000): + """Apply transformation `pred_bboxes` to `boxes`. + + Args: + boxes (torch.Tensor): Basic boxes. + pred_bboxes (torch.Tensor): Encoded boxes with shape + max_shape (tuple[int], optional): Maximum shape of boxes. + Defaults to None. + wh_ratio_clip (float, optional): The allowed ratio between + width and height. + + Returns: + torch.Tensor: Decoded boxes. + """ + assert pred_bboxes.size(0) == bboxes.size(0) + decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means, + self.stds, max_shape, wh_ratio_clip) + + return decoded_bboxes + + +@mmcv.jit(coderize=True) +def legacy_bbox2delta(proposals, + gt, + means=(0., 0., 0., 0.), + stds=(1., 1., 1., 1.)): + """Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner. + + We usually compute the deltas of x, y, w, h of proposals w.r.t ground + truth bboxes to get regression target. + This is the inverse function of `delta2bbox()` + + Args: + proposals (Tensor): Boxes to be transformed, shape (N, ..., 4) + gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4) + means (Sequence[float]): Denormalizing means for delta coordinates + stds (Sequence[float]): Denormalizing standard deviation for delta + coordinates + + Returns: + Tensor: deltas with shape (N, 4), where columns represent dx, dy, + dw, dh. + """ + assert proposals.size() == gt.size() + + proposals = proposals.float() + gt = gt.float() + px = (proposals[..., 0] + proposals[..., 2]) * 0.5 + py = (proposals[..., 1] + proposals[..., 3]) * 0.5 + pw = proposals[..., 2] - proposals[..., 0] + 1.0 + ph = proposals[..., 3] - proposals[..., 1] + 1.0 + + gx = (gt[..., 0] + gt[..., 2]) * 0.5 + gy = (gt[..., 1] + gt[..., 3]) * 0.5 + gw = gt[..., 2] - gt[..., 0] + 1.0 + gh = gt[..., 3] - gt[..., 1] + 1.0 + + dx = (gx - px) / pw + dy = (gy - py) / ph + dw = torch.log(gw / pw) + dh = torch.log(gh / ph) + deltas = torch.stack([dx, dy, dw, dh], dim=-1) + + means = deltas.new_tensor(means).unsqueeze(0) + stds = deltas.new_tensor(stds).unsqueeze(0) + deltas = deltas.sub_(means).div_(stds) + + return deltas + + +@mmcv.jit(coderize=True) +def legacy_delta2bbox(rois, + deltas, + means=(0., 0., 0., 0.), + stds=(1., 1., 1., 1.), + max_shape=None, + wh_ratio_clip=16 / 1000): + """Apply deltas to shift/scale base boxes in the MMDet V1.x manner. + + Typically the rois are anchor or proposed bounding boxes and the deltas are + network outputs used to shift/scale those boxes. + This is the inverse function of `bbox2delta()` + + Args: + rois (Tensor): Boxes to be transformed. Has shape (N, 4) + deltas (Tensor): Encoded offsets with respect to each roi. + Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when + rois is a grid of anchors. Offset encoding follows [1]_. + means (Sequence[float]): Denormalizing means for delta coordinates + stds (Sequence[float]): Denormalizing standard deviation for delta + coordinates + max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) + wh_ratio_clip (float): Maximum aspect ratio for boxes. + + Returns: + Tensor: Boxes with shape (N, 4), where columns represent + tl_x, tl_y, br_x, br_y. + + References: + .. [1] https://arxiv.org/abs/1311.2524 + + Example: + >>> rois = torch.Tensor([[ 0., 0., 1., 1.], + >>> [ 0., 0., 1., 1.], + >>> [ 0., 0., 1., 1.], + >>> [ 5., 5., 5., 5.]]) + >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], + >>> [ 1., 1., 1., 1.], + >>> [ 0., 0., 2., -1.], + >>> [ 0.7, -1.9, -0.5, 0.3]]) + >>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32)) + tensor([[0.0000, 0.0000, 1.5000, 1.5000], + [0.0000, 0.0000, 5.2183, 5.2183], + [0.0000, 0.1321, 7.8891, 0.8679], + [5.3967, 2.4251, 6.0033, 3.7749]]) + """ + means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4) + stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4) + denorm_deltas = deltas * stds + means + dx = denorm_deltas[:, 0::4] + dy = denorm_deltas[:, 1::4] + dw = denorm_deltas[:, 2::4] + dh = denorm_deltas[:, 3::4] + max_ratio = np.abs(np.log(wh_ratio_clip)) + dw = dw.clamp(min=-max_ratio, max=max_ratio) + dh = dh.clamp(min=-max_ratio, max=max_ratio) + # Compute center of each roi + px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx) + py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy) + # Compute width/height of each roi + pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw) + ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh) + # Use exp(network energy) to enlarge/shrink each roi + gw = pw * dw.exp() + gh = ph * dh.exp() + # Use network energy to shift the center of each roi + gx = px + pw * dx + gy = py + ph * dy + # Convert center-xy/width/height to top-left, bottom-right + + # The true legacy box coder should +- 0.5 here. + # However, current implementation improves the performance when testing + # the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP) + x1 = gx - gw * 0.5 + y1 = gy - gh * 0.5 + x2 = gx + gw * 0.5 + y2 = gy + gh * 0.5 + if max_shape is not None: + x1 = x1.clamp(min=0, max=max_shape[1] - 1) + y1 = y1.clamp(min=0, max=max_shape[0] - 1) + x2 = x2.clamp(min=0, max=max_shape[1] - 1) + y2 = y2.clamp(min=0, max=max_shape[0] - 1) + bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas) + return bboxes diff --git a/downstream/mmdetection/mmdet/core/bbox/coder/pseudo_bbox_coder.py b/downstream/mmdetection/mmdet/core/bbox/coder/pseudo_bbox_coder.py new file mode 100644 index 0000000..fe71f36 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/coder/pseudo_bbox_coder.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import BBOX_CODERS +from .base_bbox_coder import BaseBBoxCoder + + +@BBOX_CODERS.register_module() +class PseudoBBoxCoder(BaseBBoxCoder): + """Pseudo bounding box coder.""" + + def __init__(self, **kwargs): + super(BaseBBoxCoder, self).__init__(**kwargs) + + def encode(self, bboxes, gt_bboxes): + """torch.Tensor: return the given ``bboxes``""" + return gt_bboxes + + def decode(self, bboxes, pred_bboxes): + """torch.Tensor: return the given ``pred_bboxes``""" + return pred_bboxes diff --git a/downstream/mmdetection/mmdet/core/bbox/coder/tblr_bbox_coder.py b/downstream/mmdetection/mmdet/core/bbox/coder/tblr_bbox_coder.py new file mode 100644 index 0000000..cb42066 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/coder/tblr_bbox_coder.py @@ -0,0 +1,206 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch + +from ..builder import BBOX_CODERS +from .base_bbox_coder import BaseBBoxCoder + + +@BBOX_CODERS.register_module() +class TBLRBBoxCoder(BaseBBoxCoder): + """TBLR BBox coder. + + Following the practice in `FSAF `_, + this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, + right) and decode it back to the original. + + Args: + normalizer (list | float): Normalization factor to be + divided with when coding the coordinates. If it is a list, it should + have length of 4 indicating normalization factor in tblr dims. + Otherwise it is a unified float factor for all dims. Default: 4.0 + clip_border (bool, optional): Whether clip the objects outside the + border of the image. Defaults to True. + """ + + def __init__(self, normalizer=4.0, clip_border=True): + super(BaseBBoxCoder, self).__init__() + self.normalizer = normalizer + self.clip_border = clip_border + + def encode(self, bboxes, gt_bboxes): + """Get box regression transformation deltas that can be used to + transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left, + bottom, right) order. + + Args: + bboxes (torch.Tensor): source boxes, e.g., object proposals. + gt_bboxes (torch.Tensor): target of the transformation, e.g., + ground truth boxes. + + Returns: + torch.Tensor: Box transformation deltas + """ + assert bboxes.size(0) == gt_bboxes.size(0) + assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 + encoded_bboxes = bboxes2tblr( + bboxes, gt_bboxes, normalizer=self.normalizer) + return encoded_bboxes + + def decode(self, bboxes, pred_bboxes, max_shape=None): + """Apply transformation `pred_bboxes` to `boxes`. + + Args: + bboxes (torch.Tensor): Basic boxes.Shape (B, N, 4) or (N, 4) + pred_bboxes (torch.Tensor): Encoded boxes with shape + (B, N, 4) or (N, 4) + max_shape (Sequence[int] or torch.Tensor or Sequence[ + Sequence[int]],optional): Maximum bounds for boxes, specifies + (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then + the max_shape should be a Sequence[Sequence[int]] + and the length of max_shape should also be B. + + Returns: + torch.Tensor: Decoded boxes. + """ + decoded_bboxes = tblr2bboxes( + bboxes, + pred_bboxes, + normalizer=self.normalizer, + max_shape=max_shape, + clip_border=self.clip_border) + + return decoded_bboxes + + +@mmcv.jit(coderize=True) +def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True): + """Encode ground truth boxes to tblr coordinate. + + It first convert the gt coordinate to tblr format, + (top, bottom, left, right), relative to prior box centers. + The tblr coordinate may be normalized by the side length of prior bboxes + if `normalize_by_wh` is specified as True, and it is then normalized by + the `normalizer` factor. + + Args: + priors (Tensor): Prior boxes in point form + Shape: (num_proposals,4). + gts (Tensor): Coords of ground truth for each prior in point-form + Shape: (num_proposals, 4). + normalizer (Sequence[float] | float): normalization parameter of + encoded boxes. If it is a list, it has to have length = 4. + Default: 4.0 + normalize_by_wh (bool): Whether to normalize tblr coordinate by the + side length (wh) of prior bboxes. + + Return: + encoded boxes (Tensor), Shape: (num_proposals, 4) + """ + + # dist b/t match center and prior's center + if not isinstance(normalizer, float): + normalizer = torch.tensor(normalizer, device=priors.device) + assert len(normalizer) == 4, 'Normalizer must have length = 4' + assert priors.size(0) == gts.size(0) + prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2 + xmin, ymin, xmax, ymax = gts.split(1, dim=1) + top = prior_centers[:, 1].unsqueeze(1) - ymin + bottom = ymax - prior_centers[:, 1].unsqueeze(1) + left = prior_centers[:, 0].unsqueeze(1) - xmin + right = xmax - prior_centers[:, 0].unsqueeze(1) + loc = torch.cat((top, bottom, left, right), dim=1) + if normalize_by_wh: + # Normalize tblr by anchor width and height + wh = priors[:, 2:4] - priors[:, 0:2] + w, h = torch.split(wh, 1, dim=1) + loc[:, :2] /= h # tb is normalized by h + loc[:, 2:] /= w # lr is normalized by w + # Normalize tblr by the given normalization factor + return loc / normalizer + + +@mmcv.jit(coderize=True) +def tblr2bboxes(priors, + tblr, + normalizer=4.0, + normalize_by_wh=True, + max_shape=None, + clip_border=True): + """Decode tblr outputs to prediction boxes. + + The process includes 3 steps: 1) De-normalize tblr coordinates by + multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the + prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert + tblr (top, bottom, left, right) pair relative to the center of priors back + to (xmin, ymin, xmax, ymax) coordinate. + + Args: + priors (Tensor): Prior boxes in point form (x0, y0, x1, y1) + Shape: (N,4) or (B, N, 4). + tblr (Tensor): Coords of network output in tblr form + Shape: (N, 4) or (B, N, 4). + normalizer (Sequence[float] | float): Normalization parameter of + encoded boxes. By list, it represents the normalization factors at + tblr dims. By float, it is the unified normalization factor at all + dims. Default: 4.0 + normalize_by_wh (bool): Whether the tblr coordinates have been + normalized by the side length (wh) of prior bboxes. + max_shape (Sequence[int] or torch.Tensor or Sequence[ + Sequence[int]],optional): Maximum bounds for boxes, specifies + (H, W, C) or (H, W). If priors shape is (B, N, 4), then + the max_shape should be a Sequence[Sequence[int]] + and the length of max_shape should also be B. + clip_border (bool, optional): Whether clip the objects outside the + border of the image. Defaults to True. + + Return: + encoded boxes (Tensor): Boxes with shape (N, 4) or (B, N, 4) + """ + if not isinstance(normalizer, float): + normalizer = torch.tensor(normalizer, device=priors.device) + assert len(normalizer) == 4, 'Normalizer must have length = 4' + assert priors.size(0) == tblr.size(0) + if priors.ndim == 3: + assert priors.size(1) == tblr.size(1) + + loc_decode = tblr * normalizer + prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2 + if normalize_by_wh: + wh = priors[..., 2:4] - priors[..., 0:2] + w, h = torch.split(wh, 1, dim=-1) + # Inplace operation with slice would failed for exporting to ONNX + th = h * loc_decode[..., :2] # tb + tw = w * loc_decode[..., 2:] # lr + loc_decode = torch.cat([th, tw], dim=-1) + # Cannot be exported using onnx when loc_decode.split(1, dim=-1) + top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1) + xmin = prior_centers[..., 0].unsqueeze(-1) - left + xmax = prior_centers[..., 0].unsqueeze(-1) + right + ymin = prior_centers[..., 1].unsqueeze(-1) - top + ymax = prior_centers[..., 1].unsqueeze(-1) + bottom + + bboxes = torch.cat((xmin, ymin, xmax, ymax), dim=-1) + + if clip_border and max_shape is not None: + # clip bboxes with dynamic `min` and `max` for onnx + if torch.onnx.is_in_onnx_export(): + from mmdet.core.export import dynamic_clip_for_onnx + xmin, ymin, xmax, ymax = dynamic_clip_for_onnx( + xmin, ymin, xmax, ymax, max_shape) + bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1) + return bboxes + if not isinstance(max_shape, torch.Tensor): + max_shape = priors.new_tensor(max_shape) + max_shape = max_shape[..., :2].type_as(priors) + if max_shape.ndim == 2: + assert bboxes.ndim == 3 + assert max_shape.size(0) == bboxes.size(0) + + min_xy = priors.new_tensor(0) + max_xy = torch.cat([max_shape, max_shape], + dim=-1).flip(-1).unsqueeze(-2) + bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) + bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) + + return bboxes diff --git a/downstream/mmdetection/mmdet/core/bbox/coder/yolo_bbox_coder.py b/downstream/mmdetection/mmdet/core/bbox/coder/yolo_bbox_coder.py new file mode 100644 index 0000000..2852eca --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/coder/yolo_bbox_coder.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch + +from ..builder import BBOX_CODERS +from .base_bbox_coder import BaseBBoxCoder + + +@BBOX_CODERS.register_module() +class YOLOBBoxCoder(BaseBBoxCoder): + """YOLO BBox coder. + + Following `YOLO `_, this coder divide + image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh). + cx, cy in [0., 1.], denotes relative center position w.r.t the center of + bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`. + + Args: + eps (float): Min value of cx, cy when encoding. + """ + + def __init__(self, eps=1e-6): + super(BaseBBoxCoder, self).__init__() + self.eps = eps + + @mmcv.jit(coderize=True) + def encode(self, bboxes, gt_bboxes, stride): + """Get box regression transformation deltas that can be used to + transform the ``bboxes`` into the ``gt_bboxes``. + + Args: + bboxes (torch.Tensor): Source boxes, e.g., anchors. + gt_bboxes (torch.Tensor): Target of the transformation, e.g., + ground-truth boxes. + stride (torch.Tensor | int): Stride of bboxes. + + Returns: + torch.Tensor: Box transformation deltas + """ + + assert bboxes.size(0) == gt_bboxes.size(0) + assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 + x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5 + y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5 + w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0] + h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1] + x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5 + y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5 + w = bboxes[..., 2] - bboxes[..., 0] + h = bboxes[..., 3] - bboxes[..., 1] + w_target = torch.log((w_gt / w).clamp(min=self.eps)) + h_target = torch.log((h_gt / h).clamp(min=self.eps)) + x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp( + self.eps, 1 - self.eps) + y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp( + self.eps, 1 - self.eps) + encoded_bboxes = torch.stack( + [x_center_target, y_center_target, w_target, h_target], dim=-1) + return encoded_bboxes + + @mmcv.jit(coderize=True) + def decode(self, bboxes, pred_bboxes, stride): + """Apply transformation `pred_bboxes` to `boxes`. + + Args: + boxes (torch.Tensor): Basic boxes, e.g. anchors. + pred_bboxes (torch.Tensor): Encoded boxes with shape + stride (torch.Tensor | int): Strides of bboxes. + + Returns: + torch.Tensor: Decoded boxes. + """ + assert pred_bboxes.size(-1) == bboxes.size(-1) == 4 + xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + ( + pred_bboxes[..., :2] - 0.5) * stride + whs = (bboxes[..., 2:] - + bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp() + decoded_bboxes = torch.stack( + (xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] - + whs[..., 1], xy_centers[..., 0] + whs[..., 0], + xy_centers[..., 1] + whs[..., 1]), + dim=-1) + return decoded_bboxes diff --git a/downstream/mmdetection/mmdet/core/bbox/demodata.py b/downstream/mmdetection/mmdet/core/bbox/demodata.py new file mode 100644 index 0000000..eb24b34 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/demodata.py @@ -0,0 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet.utils.util_random import ensure_rng + + +def random_boxes(num=1, scale=1, rng=None): + """Simple version of ``kwimage.Boxes.random`` + + Returns: + Tensor: shape (n, 4) in x1, y1, x2, y2 format. + + References: + https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 + + Example: + >>> num = 3 + >>> scale = 512 + >>> rng = 0 + >>> boxes = random_boxes(num, scale, rng) + >>> print(boxes) + tensor([[280.9925, 278.9802, 308.6148, 366.1769], + [216.9113, 330.6978, 224.0446, 456.5878], + [405.3632, 196.3221, 493.3953, 270.7942]]) + """ + rng = ensure_rng(rng) + + tlbr = rng.rand(num, 4).astype(np.float32) + + tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) + tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) + br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) + br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) + + tlbr[:, 0] = tl_x * scale + tlbr[:, 1] = tl_y * scale + tlbr[:, 2] = br_x * scale + tlbr[:, 3] = br_y * scale + + boxes = torch.from_numpy(tlbr) + return boxes diff --git a/downstream/mmdetection/mmdet/core/bbox/iou_calculators/__init__.py b/downstream/mmdetection/mmdet/core/bbox/iou_calculators/__init__.py new file mode 100644 index 0000000..04ba925 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/iou_calculators/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import build_iou_calculator +from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps + +__all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps'] diff --git a/downstream/mmdetection/mmdet/core/bbox/iou_calculators/builder.py b/downstream/mmdetection/mmdet/core/bbox/iou_calculators/builder.py new file mode 100644 index 0000000..378ee26 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/iou_calculators/builder.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.utils import Registry, build_from_cfg + +IOU_CALCULATORS = Registry('IoU calculator') + + +def build_iou_calculator(cfg, default_args=None): + """Builder of IoU calculator.""" + return build_from_cfg(cfg, IOU_CALCULATORS, default_args) diff --git a/downstream/mmdetection/mmdet/core/bbox/iou_calculators/iou2d_calculator.py b/downstream/mmdetection/mmdet/core/bbox/iou_calculators/iou2d_calculator.py new file mode 100644 index 0000000..4656d61 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/iou_calculators/iou2d_calculator.py @@ -0,0 +1,261 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from .builder import IOU_CALCULATORS + + +def cast_tensor_type(x, scale=1., dtype=None): + if dtype == 'fp16': + # scale is for preventing overflows + x = (x / scale).half() + return x + + +def fp16_clamp(x, min=None, max=None): + if not x.is_cuda and x.dtype == torch.float16: + # clamp for cpu float16, tensor fp16 has no clamp implementation + return x.float().clamp(min, max).half() + + return x.clamp(min, max) + + +@IOU_CALCULATORS.register_module() +class BboxOverlaps2D: + """2D Overlaps (e.g. IoUs, GIoUs) Calculator.""" + + def __init__(self, scale=1., dtype=None): + self.scale = scale + self.dtype = dtype + + def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): + """Calculate IoU between 2D bboxes. + + Args: + bboxes1 (Tensor): bboxes have shape (m, 4) in + format, or shape (m, 5) in format. + bboxes2 (Tensor): bboxes have shape (m, 4) in + format, shape (m, 5) in format, or be + empty. If ``is_aligned `` is ``True``, then m and n must be + equal. + mode (str): "iou" (intersection over union), "iof" (intersection + over foreground), or "giou" (generalized intersection over + union). + is_aligned (bool, optional): If True, then m and n must be equal. + Default False. + + Returns: + Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) + """ + assert bboxes1.size(-1) in [0, 4, 5] + assert bboxes2.size(-1) in [0, 4, 5] + if bboxes2.size(-1) == 5: + bboxes2 = bboxes2[..., :4] + if bboxes1.size(-1) == 5: + bboxes1 = bboxes1[..., :4] + + if self.dtype == 'fp16': + # change tensor type to save cpu and cuda memory and keep speed + bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype) + bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype) + overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) + if not overlaps.is_cuda and overlaps.dtype == torch.float16: + # resume cpu float32 + overlaps = overlaps.float() + return overlaps + + return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) + + def __repr__(self): + """str: a string describing the module""" + repr_str = self.__class__.__name__ + f'(' \ + f'scale={self.scale}, dtype={self.dtype})' + return repr_str + + +def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6): + """Calculate overlap between two set of bboxes. + + FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889 + Note: + Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou', + there are some new generated variable when calculating IOU + using bbox_overlaps function: + + 1) is_aligned is False + area1: M x 1 + area2: N x 1 + lt: M x N x 2 + rb: M x N x 2 + wh: M x N x 2 + overlap: M x N x 1 + union: M x N x 1 + ious: M x N x 1 + + Total memory: + S = (9 x N x M + N + M) * 4 Byte, + + When using FP16, we can reduce: + R = (9 x N x M + N + M) * 4 / 2 Byte + R large than (N + M) * 4 * 2 is always true when N and M >= 1. + Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2, + N + 1 < 3 * N, when N or M is 1. + + Given M = 40 (ground truth), N = 400000 (three anchor boxes + in per grid, FPN, R-CNNs), + R = 275 MB (one times) + + A special case (dense detection), M = 512 (ground truth), + R = 3516 MB = 3.43 GB + + When the batch size is B, reduce: + B x R + + Therefore, CUDA memory runs out frequently. + + Experiments on GeForce RTX 2080Ti (11019 MiB): + + | dtype | M | N | Use | Real | Ideal | + |:----:|:----:|:----:|:----:|:----:|:----:| + | FP32 | 512 | 400000 | 8020 MiB | -- | -- | + | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB | + | FP32 | 40 | 400000 | 1540 MiB | -- | -- | + | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB | + + 2) is_aligned is True + area1: N x 1 + area2: N x 1 + lt: N x 2 + rb: N x 2 + wh: N x 2 + overlap: N x 1 + union: N x 1 + ious: N x 1 + + Total memory: + S = 11 x N * 4 Byte + + When using FP16, we can reduce: + R = 11 x N * 4 / 2 Byte + + So do the 'giou' (large than 'iou'). + + Time-wise, FP16 is generally faster than FP32. + + When gpu_assign_thr is not -1, it takes more time on cpu + but not reduce memory. + There, we can reduce half the memory and keep the speed. + + If ``is_aligned`` is ``False``, then calculate the overlaps between each + bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned + pair of bboxes1 and bboxes2. + + Args: + bboxes1 (Tensor): shape (B, m, 4) in format or empty. + bboxes2 (Tensor): shape (B, n, 4) in format or empty. + B indicates the batch dim, in shape (B1, B2, ..., Bn). + If ``is_aligned`` is ``True``, then m and n must be equal. + mode (str): "iou" (intersection over union), "iof" (intersection over + foreground) or "giou" (generalized intersection over union). + Default "iou". + is_aligned (bool, optional): If True, then m and n must be equal. + Default False. + eps (float, optional): A value added to the denominator for numerical + stability. Default 1e-6. + + Returns: + Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) + + Example: + >>> bboxes1 = torch.FloatTensor([ + >>> [0, 0, 10, 10], + >>> [10, 10, 20, 20], + >>> [32, 32, 38, 42], + >>> ]) + >>> bboxes2 = torch.FloatTensor([ + >>> [0, 0, 10, 20], + >>> [0, 10, 10, 19], + >>> [10, 10, 20, 20], + >>> ]) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2) + >>> assert overlaps.shape == (3, 3) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) + >>> assert overlaps.shape == (3, ) + + Example: + >>> empty = torch.empty(0, 4) + >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]]) + >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) + >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) + >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) + """ + + assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}' + # Either the boxes are empty or the length of boxes' last dimension is 4 + assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) + assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) + + # Batch dim must be the same + # Batch dim: (B1, B2, ... Bn) + assert bboxes1.shape[:-2] == bboxes2.shape[:-2] + batch_shape = bboxes1.shape[:-2] + + rows = bboxes1.size(-2) + cols = bboxes2.size(-2) + if is_aligned: + assert rows == cols + + if rows * cols == 0: + if is_aligned: + return bboxes1.new(batch_shape + (rows, )) + else: + return bboxes1.new(batch_shape + (rows, cols)) + + area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * ( + bboxes1[..., 3] - bboxes1[..., 1]) + area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * ( + bboxes2[..., 3] - bboxes2[..., 1]) + + if is_aligned: + lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2] + rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2] + + wh = fp16_clamp(rb - lt, min=0) + overlap = wh[..., 0] * wh[..., 1] + + if mode in ['iou', 'giou']: + union = area1 + area2 - overlap + else: + union = area1 + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2]) + enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:]) + else: + lt = torch.max(bboxes1[..., :, None, :2], + bboxes2[..., None, :, :2]) # [B, rows, cols, 2] + rb = torch.min(bboxes1[..., :, None, 2:], + bboxes2[..., None, :, 2:]) # [B, rows, cols, 2] + + wh = fp16_clamp(rb - lt, min=0) + overlap = wh[..., 0] * wh[..., 1] + + if mode in ['iou', 'giou']: + union = area1[..., None] + area2[..., None, :] - overlap + else: + union = area1[..., None] + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :, None, :2], + bboxes2[..., None, :, :2]) + enclosed_rb = torch.max(bboxes1[..., :, None, 2:], + bboxes2[..., None, :, 2:]) + + eps = union.new_tensor([eps]) + union = torch.max(union, eps) + ious = overlap / union + if mode in ['iou', 'iof']: + return ious + # calculate gious + enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0) + enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] + enclose_area = torch.max(enclose_area, eps) + gious = ious - (enclose_area - union) / enclose_area + return gious diff --git a/downstream/mmdetection/mmdet/core/bbox/match_costs/__init__.py b/downstream/mmdetection/mmdet/core/bbox/match_costs/__init__.py new file mode 100644 index 0000000..1b63679 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/match_costs/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import build_match_cost +from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost, + DiceCost, FocalLossCost, IoUCost) + +__all__ = [ + 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost', + 'FocalLossCost', 'DiceCost', 'CrossEntropyLossCost' +] diff --git a/downstream/mmdetection/mmdet/core/bbox/match_costs/builder.py b/downstream/mmdetection/mmdet/core/bbox/match_costs/builder.py new file mode 100644 index 0000000..ea086ad --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/match_costs/builder.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.utils import Registry, build_from_cfg + +MATCH_COST = Registry('Match Cost') + + +def build_match_cost(cfg, default_args=None): + """Builder of IoU calculator.""" + return build_from_cfg(cfg, MATCH_COST, default_args) diff --git a/downstream/mmdetection/mmdet/core/bbox/match_costs/match_cost.py b/downstream/mmdetection/mmdet/core/bbox/match_costs/match_cost.py new file mode 100644 index 0000000..4342b02 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/match_costs/match_cost.py @@ -0,0 +1,359 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F + +from mmdet.core.bbox.iou_calculators import bbox_overlaps +from mmdet.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh +from .builder import MATCH_COST + + +@MATCH_COST.register_module() +class BBoxL1Cost: + """BBoxL1Cost. + + Args: + weight (int | float, optional): loss_weight + box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN + + Examples: + >>> from mmdet.core.bbox.match_costs.match_cost import BBoxL1Cost + >>> import torch + >>> self = BBoxL1Cost() + >>> bbox_pred = torch.rand(1, 4) + >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) + >>> factor = torch.tensor([10, 8, 10, 8]) + >>> self(bbox_pred, gt_bboxes, factor) + tensor([[1.6172, 1.6422]]) + """ + + def __init__(self, weight=1., box_format='xyxy'): + self.weight = weight + assert box_format in ['xyxy', 'xywh'] + self.box_format = box_format + + def __call__(self, bbox_pred, gt_bboxes): + """ + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + (num_query, 4). + gt_bboxes (Tensor): Ground truth boxes with normalized + coordinates (x1, y1, x2, y2). Shape (num_gt, 4). + + Returns: + torch.Tensor: bbox_cost value with weight + """ + if self.box_format == 'xywh': + gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes) + elif self.box_format == 'xyxy': + bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred) + bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) + return bbox_cost * self.weight + + +@MATCH_COST.register_module() +class FocalLossCost: + """FocalLossCost. + + Args: + weight (int | float, optional): loss_weight + alpha (int | float, optional): focal_loss alpha + gamma (int | float, optional): focal_loss gamma + eps (float, optional): default 1e-12 + binary_input (bool, optional): Whether the input is binary, + default False. + + Examples: + >>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost + >>> import torch + >>> self = FocalLossCost() + >>> cls_pred = torch.rand(4, 3) + >>> gt_labels = torch.tensor([0, 1, 2]) + >>> factor = torch.tensor([10, 8, 10, 8]) + >>> self(cls_pred, gt_labels) + tensor([[-0.3236, -0.3364, -0.2699], + [-0.3439, -0.3209, -0.4807], + [-0.4099, -0.3795, -0.2929], + [-0.1950, -0.1207, -0.2626]]) + """ + + def __init__(self, + weight=1., + alpha=0.25, + gamma=2, + eps=1e-12, + binary_input=False): + self.weight = weight + self.alpha = alpha + self.gamma = gamma + self.eps = eps + self.binary_input = binary_input + + def _focal_loss_cost(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classification logits, shape + (num_query, num_class). + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + + Returns: + torch.Tensor: cls_cost value with weight + """ + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + self.eps).log() * ( + 1 - self.alpha) * cls_pred.pow(self.gamma) + pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( + 1 - cls_pred).pow(self.gamma) + + cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] + return cls_cost * self.weight + + def _mask_focal_loss_cost(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classfication logits + in shape (num_query, d1, ..., dn), dtype=torch.float32. + gt_labels (Tensor): Ground truth in shape (num_gt, d1, ..., dn), + dtype=torch.long. Labels should be binary. + + Returns: + Tensor: Focal cost matrix with weight in shape\ + (num_query, num_gt). + """ + cls_pred = cls_pred.flatten(1) + gt_labels = gt_labels.flatten(1).float() + n = cls_pred.shape[1] + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + self.eps).log() * ( + 1 - self.alpha) * cls_pred.pow(self.gamma) + pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( + 1 - cls_pred).pow(self.gamma) + + cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \ + torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels)) + return cls_cost / n * self.weight + + def __call__(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classfication logits. + gt_labels (Tensor)): Labels. + + Returns: + Tensor: Focal cost matrix with weight in shape\ + (num_query, num_gt). + """ + if self.binary_input: + return self._mask_focal_loss_cost(cls_pred, gt_labels) + else: + return self._focal_loss_cost(cls_pred, gt_labels) + + +@MATCH_COST.register_module() +class ClassificationCost: + """ClsSoftmaxCost. + + Args: + weight (int | float, optional): loss_weight + + Examples: + >>> from mmdet.core.bbox.match_costs.match_cost import \ + ... ClassificationCost + >>> import torch + >>> self = ClassificationCost() + >>> cls_pred = torch.rand(4, 3) + >>> gt_labels = torch.tensor([0, 1, 2]) + >>> factor = torch.tensor([10, 8, 10, 8]) + >>> self(cls_pred, gt_labels) + tensor([[-0.3430, -0.3525, -0.3045], + [-0.3077, -0.2931, -0.3992], + [-0.3664, -0.3455, -0.2881], + [-0.3343, -0.2701, -0.3956]]) + """ + + def __init__(self, weight=1.): + self.weight = weight + + def __call__(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classification logits, shape + (num_query, num_class). + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + + Returns: + torch.Tensor: cls_cost value with weight + """ + # Following the official DETR repo, contrary to the loss that + # NLL is used, we approximate it in 1 - cls_score[gt_label]. + # The 1 is a constant that doesn't change the matching, + # so it can be omitted. + cls_score = cls_pred.softmax(-1) + cls_cost = -cls_score[:, gt_labels] + return cls_cost * self.weight + + +@MATCH_COST.register_module() +class IoUCost: + """IoUCost. + + Args: + iou_mode (str, optional): iou mode such as 'iou' | 'giou' + weight (int | float, optional): loss weight + + Examples: + >>> from mmdet.core.bbox.match_costs.match_cost import IoUCost + >>> import torch + >>> self = IoUCost() + >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]]) + >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) + >>> self(bboxes, gt_bboxes) + tensor([[-0.1250, 0.1667], + [ 0.1667, -0.5000]]) + """ + + def __init__(self, iou_mode='giou', weight=1.): + self.weight = weight + self.iou_mode = iou_mode + + def __call__(self, bboxes, gt_bboxes): + """ + Args: + bboxes (Tensor): Predicted boxes with unnormalized coordinates + (x1, y1, x2, y2). Shape (num_query, 4). + gt_bboxes (Tensor): Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape (num_gt, 4). + + Returns: + torch.Tensor: iou_cost value with weight + """ + # overlaps: [num_bboxes, num_gt] + overlaps = bbox_overlaps( + bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False) + # The 1 is a constant that doesn't change the matching, so omitted. + iou_cost = -overlaps + return iou_cost * self.weight + + +@MATCH_COST.register_module() +class DiceCost: + """Cost of mask assignments based on dice losses. + + Args: + weight (int | float, optional): loss_weight. Defaults to 1. + pred_act (bool, optional): Whether to apply sigmoid to mask_pred. + Defaults to False. + eps (float, optional): default 1e-12. + naive_dice (bool, optional): If True, use the naive dice loss + in which the power of the number in the denominator is + the first power. If Flase, use the second power that + is adopted by K-Net and SOLO. + Defaults to True. + """ + + def __init__(self, weight=1., pred_act=False, eps=1e-3, naive_dice=True): + self.weight = weight + self.pred_act = pred_act + self.eps = eps + self.naive_dice = naive_dice + + def binary_mask_dice_loss(self, mask_preds, gt_masks): + """ + Args: + mask_preds (Tensor): Mask prediction in shape (num_query, *). + gt_masks (Tensor): Ground truth in shape (num_gt, *) + store 0 or 1, 0 for negative class and 1 for + positive class. + + Returns: + Tensor: Dice cost matrix in shape (num_query, num_gt). + """ + mask_preds = mask_preds.flatten(1) + gt_masks = gt_masks.flatten(1).float() + numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks) + if self.naive_dice: + denominator = mask_preds.sum(-1)[:, None] + \ + gt_masks.sum(-1)[None, :] + else: + denominator = mask_preds.pow(2).sum(1)[:, None] + \ + gt_masks.pow(2).sum(1)[None, :] + loss = 1 - (numerator + self.eps) / (denominator + self.eps) + return loss + + def __call__(self, mask_preds, gt_masks): + """ + Args: + mask_preds (Tensor): Mask prediction logits in shape (num_query, *) + gt_masks (Tensor): Ground truth in shape (num_gt, *) + + Returns: + Tensor: Dice cost matrix with weight in shape (num_query, num_gt). + """ + if self.pred_act: + mask_preds = mask_preds.sigmoid() + dice_cost = self.binary_mask_dice_loss(mask_preds, gt_masks) + return dice_cost * self.weight + + +@MATCH_COST.register_module() +class CrossEntropyLossCost: + """CrossEntropyLossCost. + + Args: + weight (int | float, optional): loss weight. Defaults to 1. + use_sigmoid (bool, optional): Whether the prediction uses sigmoid + of softmax. Defaults to True. + Examples: + >>> from mmdet.core.bbox.match_costs import CrossEntropyLossCost + >>> import torch + >>> bce = CrossEntropyLossCost(use_sigmoid=True) + >>> cls_pred = torch.tensor([[7.6, 1.2], [-1.3, 10]]) + >>> gt_labels = torch.tensor([[1, 1], [1, 0]]) + >>> print(bce(cls_pred, gt_labels)) + """ + + def __init__(self, weight=1., use_sigmoid=True): + assert use_sigmoid, 'use_sigmoid = False is not supported yet.' + self.weight = weight + self.use_sigmoid = use_sigmoid + + def _binary_cross_entropy(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): The prediction with shape (num_query, 1, *) or + (num_query, *). + gt_labels (Tensor): The learning label of prediction with + shape (num_gt, *). + + Returns: + Tensor: Cross entropy cost matrix in shape (num_query, num_gt). + """ + cls_pred = cls_pred.flatten(1).float() + gt_labels = gt_labels.flatten(1).float() + n = cls_pred.shape[1] + pos = F.binary_cross_entropy_with_logits( + cls_pred, torch.ones_like(cls_pred), reduction='none') + neg = F.binary_cross_entropy_with_logits( + cls_pred, torch.zeros_like(cls_pred), reduction='none') + cls_cost = torch.einsum('nc,mc->nm', pos, gt_labels) + \ + torch.einsum('nc,mc->nm', neg, 1 - gt_labels) + cls_cost = cls_cost / n + + return cls_cost + + def __call__(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classification logits. + gt_labels (Tensor): Labels. + + Returns: + Tensor: Cross entropy cost matrix with weight in + shape (num_query, num_gt). + """ + if self.use_sigmoid: + cls_cost = self._binary_cross_entropy(cls_pred, gt_labels) + else: + raise NotImplementedError + + return cls_cost * self.weight diff --git a/downstream/mmdetection/mmdet/core/bbox/samplers/__init__.py b/downstream/mmdetection/mmdet/core/bbox/samplers/__init__.py new file mode 100644 index 0000000..f58505b --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/samplers/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_sampler import BaseSampler +from .combined_sampler import CombinedSampler +from .instance_balanced_pos_sampler import InstanceBalancedPosSampler +from .iou_balanced_neg_sampler import IoUBalancedNegSampler +from .mask_pseudo_sampler import MaskPseudoSampler +from .mask_sampling_result import MaskSamplingResult +from .ohem_sampler import OHEMSampler +from .pseudo_sampler import PseudoSampler +from .random_sampler import RandomSampler +from .sampling_result import SamplingResult +from .score_hlr_sampler import ScoreHLRSampler + +__all__ = [ + 'BaseSampler', 'PseudoSampler', 'RandomSampler', + 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', + 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler', + 'MaskSamplingResult' +] diff --git a/downstream/mmdetection/mmdet/core/bbox/samplers/base_sampler.py b/downstream/mmdetection/mmdet/core/bbox/samplers/base_sampler.py new file mode 100644 index 0000000..bd15c7c --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/samplers/base_sampler.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import torch + +from .sampling_result import SamplingResult + + +class BaseSampler(metaclass=ABCMeta): + """Base class of samplers.""" + + def __init__(self, + num, + pos_fraction, + neg_pos_ub=-1, + add_gt_as_proposals=True, + **kwargs): + self.num = num + self.pos_fraction = pos_fraction + self.neg_pos_ub = neg_pos_ub + self.add_gt_as_proposals = add_gt_as_proposals + self.pos_sampler = self + self.neg_sampler = self + + @abstractmethod + def _sample_pos(self, assign_result, num_expected, **kwargs): + """Sample positive samples.""" + pass + + @abstractmethod + def _sample_neg(self, assign_result, num_expected, **kwargs): + """Sample negative samples.""" + pass + + def sample(self, + assign_result, + bboxes, + gt_bboxes, + gt_labels=None, + **kwargs): + """Sample positive and negative bboxes. + + This is a simple implementation of bbox sampling given candidates, + assigning results and ground truth bboxes. + + Args: + assign_result (:obj:`AssignResult`): Bbox assigning results. + bboxes (Tensor): Boxes to be sampled from. + gt_bboxes (Tensor): Ground truth bboxes. + gt_labels (Tensor, optional): Class labels of ground truth bboxes. + + Returns: + :obj:`SamplingResult`: Sampling result. + + Example: + >>> from mmdet.core.bbox import RandomSampler + >>> from mmdet.core.bbox import AssignResult + >>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes + >>> rng = ensure_rng(None) + >>> assign_result = AssignResult.random(rng=rng) + >>> bboxes = random_boxes(assign_result.num_preds, rng=rng) + >>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng) + >>> gt_labels = None + >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1, + >>> add_gt_as_proposals=False) + >>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels) + """ + if len(bboxes.shape) < 2: + bboxes = bboxes[None, :] + + bboxes = bboxes[:, :4] + + gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) + if self.add_gt_as_proposals and len(gt_bboxes) > 0: + if gt_labels is None: + raise ValueError( + 'gt_labels must be given when add_gt_as_proposals is True') + bboxes = torch.cat([gt_bboxes, bboxes], dim=0) + assign_result.add_gt_(gt_labels) + gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) + gt_flags = torch.cat([gt_ones, gt_flags]) + + num_expected_pos = int(self.num * self.pos_fraction) + pos_inds = self.pos_sampler._sample_pos( + assign_result, num_expected_pos, bboxes=bboxes, **kwargs) + # We found that sampled indices have duplicated items occasionally. + # (may be a bug of PyTorch) + pos_inds = pos_inds.unique() + num_sampled_pos = pos_inds.numel() + num_expected_neg = self.num - num_sampled_pos + if self.neg_pos_ub >= 0: + _pos = max(1, num_sampled_pos) + neg_upper_bound = int(self.neg_pos_ub * _pos) + if num_expected_neg > neg_upper_bound: + num_expected_neg = neg_upper_bound + neg_inds = self.neg_sampler._sample_neg( + assign_result, num_expected_neg, bboxes=bboxes, **kwargs) + neg_inds = neg_inds.unique() + + sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, + assign_result, gt_flags) + return sampling_result diff --git a/downstream/mmdetection/mmdet/core/bbox/samplers/combined_sampler.py b/downstream/mmdetection/mmdet/core/bbox/samplers/combined_sampler.py new file mode 100644 index 0000000..4f6d86f --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/samplers/combined_sampler.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import BBOX_SAMPLERS, build_sampler +from .base_sampler import BaseSampler + + +@BBOX_SAMPLERS.register_module() +class CombinedSampler(BaseSampler): + """A sampler that combines positive sampler and negative sampler.""" + + def __init__(self, pos_sampler, neg_sampler, **kwargs): + super(CombinedSampler, self).__init__(**kwargs) + self.pos_sampler = build_sampler(pos_sampler, **kwargs) + self.neg_sampler = build_sampler(neg_sampler, **kwargs) + + def _sample_pos(self, **kwargs): + """Sample positive samples.""" + raise NotImplementedError + + def _sample_neg(self, **kwargs): + """Sample negative samples.""" + raise NotImplementedError diff --git a/downstream/mmdetection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py b/downstream/mmdetection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py new file mode 100644 index 0000000..5e0d9cc --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from ..builder import BBOX_SAMPLERS +from .random_sampler import RandomSampler + + +@BBOX_SAMPLERS.register_module() +class InstanceBalancedPosSampler(RandomSampler): + """Instance balanced sampler that samples equal number of positive samples + for each instance.""" + + def _sample_pos(self, assign_result, num_expected, **kwargs): + """Sample positive boxes. + + Args: + assign_result (:obj:`AssignResult`): The assigned results of boxes. + num_expected (int): The number of expected positive samples + + Returns: + Tensor or ndarray: sampled indices. + """ + pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) + if pos_inds.numel() != 0: + pos_inds = pos_inds.squeeze(1) + if pos_inds.numel() <= num_expected: + return pos_inds + else: + unique_gt_inds = assign_result.gt_inds[pos_inds].unique() + num_gts = len(unique_gt_inds) + num_per_gt = int(round(num_expected / float(num_gts)) + 1) + sampled_inds = [] + for i in unique_gt_inds: + inds = torch.nonzero( + assign_result.gt_inds == i.item(), as_tuple=False) + if inds.numel() != 0: + inds = inds.squeeze(1) + else: + continue + if len(inds) > num_per_gt: + inds = self.random_choice(inds, num_per_gt) + sampled_inds.append(inds) + sampled_inds = torch.cat(sampled_inds) + if len(sampled_inds) < num_expected: + num_extra = num_expected - len(sampled_inds) + extra_inds = np.array( + list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) + if len(extra_inds) > num_extra: + extra_inds = self.random_choice(extra_inds, num_extra) + extra_inds = torch.from_numpy(extra_inds).to( + assign_result.gt_inds.device).long() + sampled_inds = torch.cat([sampled_inds, extra_inds]) + elif len(sampled_inds) > num_expected: + sampled_inds = self.random_choice(sampled_inds, num_expected) + return sampled_inds diff --git a/downstream/mmdetection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py b/downstream/mmdetection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py new file mode 100644 index 0000000..56e2874 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py @@ -0,0 +1,158 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from ..builder import BBOX_SAMPLERS +from .random_sampler import RandomSampler + + +@BBOX_SAMPLERS.register_module() +class IoUBalancedNegSampler(RandomSampler): + """IoU Balanced Sampling. + + arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) + + Sampling proposals according to their IoU. `floor_fraction` of needed RoIs + are sampled from proposals whose IoU are lower than `floor_thr` randomly. + The others are sampled from proposals whose IoU are higher than + `floor_thr`. These proposals are sampled from some bins evenly, which are + split by `num_bins` via IoU evenly. + + Args: + num (int): number of proposals. + pos_fraction (float): fraction of positive proposals. + floor_thr (float): threshold (minimum) IoU for IoU balanced sampling, + set to -1 if all using IoU balanced sampling. + floor_fraction (float): sampling fraction of proposals under floor_thr. + num_bins (int): number of bins in IoU balanced sampling. + """ + + def __init__(self, + num, + pos_fraction, + floor_thr=-1, + floor_fraction=0, + num_bins=3, + **kwargs): + super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, + **kwargs) + assert floor_thr >= 0 or floor_thr == -1 + assert 0 <= floor_fraction <= 1 + assert num_bins >= 1 + + self.floor_thr = floor_thr + self.floor_fraction = floor_fraction + self.num_bins = num_bins + + def sample_via_interval(self, max_overlaps, full_set, num_expected): + """Sample according to the iou interval. + + Args: + max_overlaps (torch.Tensor): IoU between bounding boxes and ground + truth boxes. + full_set (set(int)): A full set of indices of boxes。 + num_expected (int): Number of expected samples。 + + Returns: + np.ndarray: Indices of samples + """ + max_iou = max_overlaps.max() + iou_interval = (max_iou - self.floor_thr) / self.num_bins + per_num_expected = int(num_expected / self.num_bins) + + sampled_inds = [] + for i in range(self.num_bins): + start_iou = self.floor_thr + i * iou_interval + end_iou = self.floor_thr + (i + 1) * iou_interval + tmp_set = set( + np.where( + np.logical_and(max_overlaps >= start_iou, + max_overlaps < end_iou))[0]) + tmp_inds = list(tmp_set & full_set) + if len(tmp_inds) > per_num_expected: + tmp_sampled_set = self.random_choice(tmp_inds, + per_num_expected) + else: + tmp_sampled_set = np.array(tmp_inds, dtype=np.int) + sampled_inds.append(tmp_sampled_set) + + sampled_inds = np.concatenate(sampled_inds) + if len(sampled_inds) < num_expected: + num_extra = num_expected - len(sampled_inds) + extra_inds = np.array(list(full_set - set(sampled_inds))) + if len(extra_inds) > num_extra: + extra_inds = self.random_choice(extra_inds, num_extra) + sampled_inds = np.concatenate([sampled_inds, extra_inds]) + + return sampled_inds + + def _sample_neg(self, assign_result, num_expected, **kwargs): + """Sample negative boxes. + + Args: + assign_result (:obj:`AssignResult`): The assigned results of boxes. + num_expected (int): The number of expected negative samples + + Returns: + Tensor or ndarray: sampled indices. + """ + neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) + if neg_inds.numel() != 0: + neg_inds = neg_inds.squeeze(1) + if len(neg_inds) <= num_expected: + return neg_inds + else: + max_overlaps = assign_result.max_overlaps.cpu().numpy() + # balance sampling for negative samples + neg_set = set(neg_inds.cpu().numpy()) + + if self.floor_thr > 0: + floor_set = set( + np.where( + np.logical_and(max_overlaps >= 0, + max_overlaps < self.floor_thr))[0]) + iou_sampling_set = set( + np.where(max_overlaps >= self.floor_thr)[0]) + elif self.floor_thr == 0: + floor_set = set(np.where(max_overlaps == 0)[0]) + iou_sampling_set = set( + np.where(max_overlaps > self.floor_thr)[0]) + else: + floor_set = set() + iou_sampling_set = set( + np.where(max_overlaps > self.floor_thr)[0]) + # for sampling interval calculation + self.floor_thr = 0 + + floor_neg_inds = list(floor_set & neg_set) + iou_sampling_neg_inds = list(iou_sampling_set & neg_set) + num_expected_iou_sampling = int(num_expected * + (1 - self.floor_fraction)) + if len(iou_sampling_neg_inds) > num_expected_iou_sampling: + if self.num_bins >= 2: + iou_sampled_inds = self.sample_via_interval( + max_overlaps, set(iou_sampling_neg_inds), + num_expected_iou_sampling) + else: + iou_sampled_inds = self.random_choice( + iou_sampling_neg_inds, num_expected_iou_sampling) + else: + iou_sampled_inds = np.array( + iou_sampling_neg_inds, dtype=np.int) + num_expected_floor = num_expected - len(iou_sampled_inds) + if len(floor_neg_inds) > num_expected_floor: + sampled_floor_inds = self.random_choice( + floor_neg_inds, num_expected_floor) + else: + sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int) + sampled_inds = np.concatenate( + (sampled_floor_inds, iou_sampled_inds)) + if len(sampled_inds) < num_expected: + num_extra = num_expected - len(sampled_inds) + extra_inds = np.array(list(neg_set - set(sampled_inds))) + if len(extra_inds) > num_extra: + extra_inds = self.random_choice(extra_inds, num_extra) + sampled_inds = np.concatenate((sampled_inds, extra_inds)) + sampled_inds = torch.from_numpy(sampled_inds).long().to( + assign_result.gt_inds.device) + return sampled_inds diff --git a/downstream/mmdetection/mmdet/core/bbox/samplers/mask_pseudo_sampler.py b/downstream/mmdetection/mmdet/core/bbox/samplers/mask_pseudo_sampler.py new file mode 100644 index 0000000..b5f6965 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/samplers/mask_pseudo_sampler.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""copy from +https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" + +import torch + +from mmdet.core.bbox.builder import BBOX_SAMPLERS +from .base_sampler import BaseSampler +from .mask_sampling_result import MaskSamplingResult + + +@BBOX_SAMPLERS.register_module() +class MaskPseudoSampler(BaseSampler): + """A pseudo sampler that does not do sampling actually.""" + + def __init__(self, **kwargs): + pass + + def _sample_pos(self, **kwargs): + """Sample positive samples.""" + raise NotImplementedError + + def _sample_neg(self, **kwargs): + """Sample negative samples.""" + raise NotImplementedError + + def sample(self, assign_result, masks, gt_masks, **kwargs): + """Directly returns the positive and negative indices of samples. + + Args: + assign_result (:obj:`AssignResult`): Assigned results + masks (torch.Tensor): Bounding boxes + gt_masks (torch.Tensor): Ground truth boxes + Returns: + :obj:`SamplingResult`: sampler results + """ + pos_inds = torch.nonzero( + assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() + neg_inds = torch.nonzero( + assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() + gt_flags = masks.new_zeros(masks.shape[0], dtype=torch.uint8) + sampling_result = MaskSamplingResult(pos_inds, neg_inds, masks, + gt_masks, assign_result, gt_flags) + return sampling_result diff --git a/downstream/mmdetection/mmdet/core/bbox/samplers/mask_sampling_result.py b/downstream/mmdetection/mmdet/core/bbox/samplers/mask_sampling_result.py new file mode 100644 index 0000000..3d10943 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/samplers/mask_sampling_result.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""copy from +https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" + +import torch + +from .sampling_result import SamplingResult + + +class MaskSamplingResult(SamplingResult): + """Mask sampling result.""" + + def __init__(self, pos_inds, neg_inds, masks, gt_masks, assign_result, + gt_flags): + self.pos_inds = pos_inds + self.neg_inds = neg_inds + self.pos_masks = masks[pos_inds] + self.neg_masks = masks[neg_inds] + self.pos_is_gt = gt_flags[pos_inds] + + self.num_gts = gt_masks.shape[0] + self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 + + if gt_masks.numel() == 0: + # hack for index error case + assert self.pos_assigned_gt_inds.numel() == 0 + self.pos_gt_masks = torch.empty_like(gt_masks) + else: + self.pos_gt_masks = gt_masks[self.pos_assigned_gt_inds, :] + + if assign_result.labels is not None: + self.pos_gt_labels = assign_result.labels[pos_inds] + else: + self.pos_gt_labels = None + + @property + def masks(self): + """torch.Tensor: concatenated positive and negative boxes""" + return torch.cat([self.pos_masks, self.neg_masks]) + + def __nice__(self): + data = self.info.copy() + data['pos_masks'] = data.pop('pos_masks').shape + data['neg_masks'] = data.pop('neg_masks').shape + parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] + body = ' ' + ',\n '.join(parts) + return '{\n' + body + '\n}' + + @property + def info(self): + """Returns a dictionary of info about the object.""" + return { + 'pos_inds': self.pos_inds, + 'neg_inds': self.neg_inds, + 'pos_masks': self.pos_masks, + 'neg_masks': self.neg_masks, + 'pos_is_gt': self.pos_is_gt, + 'num_gts': self.num_gts, + 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, + } diff --git a/downstream/mmdetection/mmdet/core/bbox/samplers/ohem_sampler.py b/downstream/mmdetection/mmdet/core/bbox/samplers/ohem_sampler.py new file mode 100644 index 0000000..7eb0666 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/samplers/ohem_sampler.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..builder import BBOX_SAMPLERS +from ..transforms import bbox2roi +from .base_sampler import BaseSampler + + +@BBOX_SAMPLERS.register_module() +class OHEMSampler(BaseSampler): + r"""Online Hard Example Mining Sampler described in `Training Region-based + Object Detectors with Online Hard Example Mining + `_. + """ + + def __init__(self, + num, + pos_fraction, + context, + neg_pos_ub=-1, + add_gt_as_proposals=True, + loss_key='loss_cls', + **kwargs): + super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, + add_gt_as_proposals) + self.context = context + if not hasattr(self.context, 'num_stages'): + self.bbox_head = self.context.bbox_head + else: + self.bbox_head = self.context.bbox_head[self.context.current_stage] + + self.loss_key = loss_key + + def hard_mining(self, inds, num_expected, bboxes, labels, feats): + with torch.no_grad(): + rois = bbox2roi([bboxes]) + if not hasattr(self.context, 'num_stages'): + bbox_results = self.context._bbox_forward(feats, rois) + else: + bbox_results = self.context._bbox_forward( + self.context.current_stage, feats, rois) + cls_score = bbox_results['cls_score'] + loss = self.bbox_head.loss( + cls_score=cls_score, + bbox_pred=None, + rois=rois, + labels=labels, + label_weights=cls_score.new_ones(cls_score.size(0)), + bbox_targets=None, + bbox_weights=None, + reduction_override='none')[self.loss_key] + _, topk_loss_inds = loss.topk(num_expected) + return inds[topk_loss_inds] + + def _sample_pos(self, + assign_result, + num_expected, + bboxes=None, + feats=None, + **kwargs): + """Sample positive boxes. + + Args: + assign_result (:obj:`AssignResult`): Assigned results + num_expected (int): Number of expected positive samples + bboxes (torch.Tensor, optional): Boxes. Defaults to None. + feats (list[torch.Tensor], optional): Multi-level features. + Defaults to None. + + Returns: + torch.Tensor: Indices of positive samples + """ + # Sample some hard positive samples + pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) + if pos_inds.numel() != 0: + pos_inds = pos_inds.squeeze(1) + if pos_inds.numel() <= num_expected: + return pos_inds + else: + return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], + assign_result.labels[pos_inds], feats) + + def _sample_neg(self, + assign_result, + num_expected, + bboxes=None, + feats=None, + **kwargs): + """Sample negative boxes. + + Args: + assign_result (:obj:`AssignResult`): Assigned results + num_expected (int): Number of expected negative samples + bboxes (torch.Tensor, optional): Boxes. Defaults to None. + feats (list[torch.Tensor], optional): Multi-level features. + Defaults to None. + + Returns: + torch.Tensor: Indices of negative samples + """ + # Sample some hard negative samples + neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) + if neg_inds.numel() != 0: + neg_inds = neg_inds.squeeze(1) + if len(neg_inds) <= num_expected: + return neg_inds + else: + neg_labels = assign_result.labels.new_empty( + neg_inds.size(0)).fill_(self.bbox_head.num_classes) + return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], + neg_labels, feats) diff --git a/downstream/mmdetection/mmdet/core/bbox/samplers/pseudo_sampler.py b/downstream/mmdetection/mmdet/core/bbox/samplers/pseudo_sampler.py new file mode 100644 index 0000000..b5ce298 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/samplers/pseudo_sampler.py @@ -0,0 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..builder import BBOX_SAMPLERS +from .base_sampler import BaseSampler +from .sampling_result import SamplingResult + + +@BBOX_SAMPLERS.register_module() +class PseudoSampler(BaseSampler): + """A pseudo sampler that does not do sampling actually.""" + + def __init__(self, **kwargs): + pass + + def _sample_pos(self, **kwargs): + """Sample positive samples.""" + raise NotImplementedError + + def _sample_neg(self, **kwargs): + """Sample negative samples.""" + raise NotImplementedError + + def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs): + """Directly returns the positive and negative indices of samples. + + Args: + assign_result (:obj:`AssignResult`): Assigned results + bboxes (torch.Tensor): Bounding boxes + gt_bboxes (torch.Tensor): Ground truth boxes + + Returns: + :obj:`SamplingResult`: sampler results + """ + pos_inds = torch.nonzero( + assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() + neg_inds = torch.nonzero( + assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() + gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) + sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, + assign_result, gt_flags) + return sampling_result diff --git a/downstream/mmdetection/mmdet/core/bbox/samplers/random_sampler.py b/downstream/mmdetection/mmdet/core/bbox/samplers/random_sampler.py new file mode 100644 index 0000000..d09207e --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/samplers/random_sampler.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..builder import BBOX_SAMPLERS +from .base_sampler import BaseSampler + + +@BBOX_SAMPLERS.register_module() +class RandomSampler(BaseSampler): + """Random sampler. + + Args: + num (int): Number of samples + pos_fraction (float): Fraction of positive samples + neg_pos_up (int, optional): Upper bound number of negative and + positive samples. Defaults to -1. + add_gt_as_proposals (bool, optional): Whether to add ground truth + boxes as proposals. Defaults to True. + """ + + def __init__(self, + num, + pos_fraction, + neg_pos_ub=-1, + add_gt_as_proposals=True, + **kwargs): + from mmdet.core.bbox import demodata + super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, + add_gt_as_proposals) + self.rng = demodata.ensure_rng(kwargs.get('rng', None)) + + def random_choice(self, gallery, num): + """Random select some elements from the gallery. + + If `gallery` is a Tensor, the returned indices will be a Tensor; + If `gallery` is a ndarray or list, the returned indices will be a + ndarray. + + Args: + gallery (Tensor | ndarray | list): indices pool. + num (int): expected sample num. + + Returns: + Tensor or ndarray: sampled indices. + """ + assert len(gallery) >= num + + is_tensor = isinstance(gallery, torch.Tensor) + if not is_tensor: + if torch.cuda.is_available(): + device = torch.cuda.current_device() + else: + device = 'cpu' + gallery = torch.tensor(gallery, dtype=torch.long, device=device) + # This is a temporary fix. We can revert the following code + # when PyTorch fixes the abnormal return of torch.randperm. + # See: https://github.com/open-mmlab/mmdetection/pull/5014 + perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device) + rand_inds = gallery[perm] + if not is_tensor: + rand_inds = rand_inds.cpu().numpy() + return rand_inds + + def _sample_pos(self, assign_result, num_expected, **kwargs): + """Randomly sample some positive samples.""" + pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) + if pos_inds.numel() != 0: + pos_inds = pos_inds.squeeze(1) + if pos_inds.numel() <= num_expected: + return pos_inds + else: + return self.random_choice(pos_inds, num_expected) + + def _sample_neg(self, assign_result, num_expected, **kwargs): + """Randomly sample some negative samples.""" + neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) + if neg_inds.numel() != 0: + neg_inds = neg_inds.squeeze(1) + if len(neg_inds) <= num_expected: + return neg_inds + else: + return self.random_choice(neg_inds, num_expected) diff --git a/downstream/mmdetection/mmdet/core/bbox/samplers/sampling_result.py b/downstream/mmdetection/mmdet/core/bbox/samplers/sampling_result.py new file mode 100644 index 0000000..50676d0 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/samplers/sampling_result.py @@ -0,0 +1,153 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet.utils import util_mixins + + +class SamplingResult(util_mixins.NiceRepr): + """Bbox sampling result. + + Example: + >>> # xdoctest: +IGNORE_WANT + >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA + >>> self = SamplingResult.random(rng=10) + >>> print(f'self = {self}') + self = + """ + + def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, + gt_flags): + self.pos_inds = pos_inds + self.neg_inds = neg_inds + self.pos_bboxes = bboxes[pos_inds] + self.neg_bboxes = bboxes[neg_inds] + self.pos_is_gt = gt_flags[pos_inds] + + self.num_gts = gt_bboxes.shape[0] + self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 + + if gt_bboxes.numel() == 0: + # hack for index error case + assert self.pos_assigned_gt_inds.numel() == 0 + self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4) + else: + if len(gt_bboxes.shape) < 2: + gt_bboxes = gt_bboxes.view(-1, 4) + + self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds.long(), :] + + if assign_result.labels is not None: + self.pos_gt_labels = assign_result.labels[pos_inds] + else: + self.pos_gt_labels = None + + @property + def bboxes(self): + """torch.Tensor: concatenated positive and negative boxes""" + return torch.cat([self.pos_bboxes, self.neg_bboxes]) + + def to(self, device): + """Change the device of the data inplace. + + Example: + >>> self = SamplingResult.random() + >>> print(f'self = {self.to(None)}') + >>> # xdoctest: +REQUIRES(--gpu) + >>> print(f'self = {self.to(0)}') + """ + _dict = self.__dict__ + for key, value in _dict.items(): + if isinstance(value, torch.Tensor): + _dict[key] = value.to(device) + return self + + def __nice__(self): + data = self.info.copy() + data['pos_bboxes'] = data.pop('pos_bboxes').shape + data['neg_bboxes'] = data.pop('neg_bboxes').shape + parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] + body = ' ' + ',\n '.join(parts) + return '{\n' + body + '\n}' + + @property + def info(self): + """Returns a dictionary of info about the object.""" + return { + 'pos_inds': self.pos_inds, + 'neg_inds': self.neg_inds, + 'pos_bboxes': self.pos_bboxes, + 'neg_bboxes': self.neg_bboxes, + 'pos_is_gt': self.pos_is_gt, + 'num_gts': self.num_gts, + 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, + } + + @classmethod + def random(cls, rng=None, **kwargs): + """ + Args: + rng (None | int | numpy.random.RandomState): seed or state. + kwargs (keyword arguments): + - num_preds: number of predicted boxes + - num_gts: number of true boxes + - p_ignore (float): probability of a predicted box assigned to \ + an ignored truth. + - p_assigned (float): probability of a predicted box not being \ + assigned. + - p_use_label (float | bool): with labels or not. + + Returns: + :obj:`SamplingResult`: Randomly generated sampling result. + + Example: + >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA + >>> self = SamplingResult.random() + >>> print(self.__dict__) + """ + from mmdet.core.bbox import demodata + from mmdet.core.bbox.assigners.assign_result import AssignResult + from mmdet.core.bbox.samplers.random_sampler import RandomSampler + rng = demodata.ensure_rng(rng) + + # make probabalistic? + num = 32 + pos_fraction = 0.5 + neg_pos_ub = -1 + + assign_result = AssignResult.random(rng=rng, **kwargs) + + # Note we could just compute an assignment + bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng) + gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng) + + if rng.rand() > 0.2: + # sometimes algorithms squeeze their data, be robust to that + gt_bboxes = gt_bboxes.squeeze() + bboxes = bboxes.squeeze() + + if assign_result.labels is None: + gt_labels = None + else: + gt_labels = None # todo + + if gt_labels is None: + add_gt_as_proposals = False + else: + add_gt_as_proposals = True # make probabalistic? + + sampler = RandomSampler( + num, + pos_fraction, + neg_pos_ub=neg_pos_ub, + add_gt_as_proposals=add_gt_as_proposals, + rng=rng) + self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels) + return self diff --git a/downstream/mmdetection/mmdet/core/bbox/samplers/score_hlr_sampler.py b/downstream/mmdetection/mmdet/core/bbox/samplers/score_hlr_sampler.py new file mode 100644 index 0000000..f4be9b8 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/samplers/score_hlr_sampler.py @@ -0,0 +1,265 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.ops import nms_match + +from ..builder import BBOX_SAMPLERS +from ..transforms import bbox2roi +from .base_sampler import BaseSampler +from .sampling_result import SamplingResult + + +@BBOX_SAMPLERS.register_module() +class ScoreHLRSampler(BaseSampler): + r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample + Attention in Object Detection `_. + + Score hierarchical local rank (HLR) differentiates with RandomSampler in + negative part. It firstly computes Score-HLR in a two-step way, + then linearly maps score hlr to the loss weights. + + Args: + num (int): Total number of sampled RoIs. + pos_fraction (float): Fraction of positive samples. + context (:class:`BaseRoIHead`): RoI head that the sampler belongs to. + neg_pos_ub (int): Upper bound of the ratio of num negative to num + positive, -1 means no upper bound. + add_gt_as_proposals (bool): Whether to add ground truth as proposals. + k (float): Power of the non-linear mapping. + bias (float): Shift of the non-linear mapping. + score_thr (float): Minimum score that a negative sample is to be + considered as valid bbox. + """ + + def __init__(self, + num, + pos_fraction, + context, + neg_pos_ub=-1, + add_gt_as_proposals=True, + k=0.5, + bias=0, + score_thr=0.05, + iou_thr=0.5, + **kwargs): + super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) + self.k = k + self.bias = bias + self.score_thr = score_thr + self.iou_thr = iou_thr + self.context = context + # context of cascade detectors is a list, so distinguish them here. + if not hasattr(context, 'num_stages'): + self.bbox_roi_extractor = context.bbox_roi_extractor + self.bbox_head = context.bbox_head + self.with_shared_head = context.with_shared_head + if self.with_shared_head: + self.shared_head = context.shared_head + else: + self.bbox_roi_extractor = context.bbox_roi_extractor[ + context.current_stage] + self.bbox_head = context.bbox_head[context.current_stage] + + @staticmethod + def random_choice(gallery, num): + """Randomly select some elements from the gallery. + + If `gallery` is a Tensor, the returned indices will be a Tensor; + If `gallery` is a ndarray or list, the returned indices will be a + ndarray. + + Args: + gallery (Tensor | ndarray | list): indices pool. + num (int): expected sample num. + + Returns: + Tensor or ndarray: sampled indices. + """ + assert len(gallery) >= num + + is_tensor = isinstance(gallery, torch.Tensor) + if not is_tensor: + if torch.cuda.is_available(): + device = torch.cuda.current_device() + else: + device = 'cpu' + gallery = torch.tensor(gallery, dtype=torch.long, device=device) + perm = torch.randperm(gallery.numel(), device=gallery.device)[:num] + rand_inds = gallery[perm] + if not is_tensor: + rand_inds = rand_inds.cpu().numpy() + return rand_inds + + def _sample_pos(self, assign_result, num_expected, **kwargs): + """Randomly sample some positive samples.""" + pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten() + if pos_inds.numel() <= num_expected: + return pos_inds + else: + return self.random_choice(pos_inds, num_expected) + + def _sample_neg(self, + assign_result, + num_expected, + bboxes, + feats=None, + img_meta=None, + **kwargs): + """Sample negative samples. + + Score-HLR sampler is done in the following steps: + 1. Take the maximum positive score prediction of each negative samples + as s_i. + 2. Filter out negative samples whose s_i <= score_thr, the left samples + are called valid samples. + 3. Use NMS-Match to divide valid samples into different groups, + samples in the same group will greatly overlap with each other + 4. Rank the matched samples in two-steps to get Score-HLR. + (1) In the same group, rank samples with their scores. + (2) In the same score rank across different groups, + rank samples with their scores again. + 5. Linearly map Score-HLR to the final label weights. + + Args: + assign_result (:obj:`AssignResult`): result of assigner. + num_expected (int): Expected number of samples. + bboxes (Tensor): bbox to be sampled. + feats (Tensor): Features come from FPN. + img_meta (dict): Meta information dictionary. + """ + neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten() + num_neg = neg_inds.size(0) + if num_neg == 0: + return neg_inds, None + with torch.no_grad(): + neg_bboxes = bboxes[neg_inds] + neg_rois = bbox2roi([neg_bboxes]) + bbox_result = self.context._bbox_forward(feats, neg_rois) + cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[ + 'bbox_pred'] + + ori_loss = self.bbox_head.loss( + cls_score=cls_score, + bbox_pred=None, + rois=None, + labels=neg_inds.new_full((num_neg, ), + self.bbox_head.num_classes), + label_weights=cls_score.new_ones(num_neg), + bbox_targets=None, + bbox_weights=None, + reduction_override='none')['loss_cls'] + + # filter out samples with the max score lower than score_thr + max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1) + valid_inds = (max_score > self.score_thr).nonzero().view(-1) + invalid_inds = (max_score <= self.score_thr).nonzero().view(-1) + num_valid = valid_inds.size(0) + num_invalid = invalid_inds.size(0) + + num_expected = min(num_neg, num_expected) + num_hlr = min(num_valid, num_expected) + num_rand = num_expected - num_hlr + if num_valid > 0: + valid_rois = neg_rois[valid_inds] + valid_max_score = max_score[valid_inds] + valid_argmax_score = argmax_score[valid_inds] + valid_bbox_pred = bbox_pred[valid_inds] + + # valid_bbox_pred shape: [num_valid, #num_classes, 4] + valid_bbox_pred = valid_bbox_pred.view( + valid_bbox_pred.size(0), -1, 4) + selected_bbox_pred = valid_bbox_pred[range(num_valid), + valid_argmax_score] + pred_bboxes = self.bbox_head.bbox_coder.decode( + valid_rois[:, 1:], selected_bbox_pred) + pred_bboxes_with_score = torch.cat( + [pred_bboxes, valid_max_score[:, None]], -1) + group = nms_match(pred_bboxes_with_score, self.iou_thr) + + # imp: importance + imp = cls_score.new_zeros(num_valid) + for g in group: + g_score = valid_max_score[g] + # g_score has already sorted + rank = g_score.new_tensor(range(g_score.size(0))) + imp[g] = num_valid - rank + g_score + _, imp_rank_inds = imp.sort(descending=True) + _, imp_rank = imp_rank_inds.sort() + hlr_inds = imp_rank_inds[:num_expected] + + if num_rand > 0: + rand_inds = torch.randperm(num_invalid)[:num_rand] + select_inds = torch.cat( + [valid_inds[hlr_inds], invalid_inds[rand_inds]]) + else: + select_inds = valid_inds[hlr_inds] + + neg_label_weights = cls_score.new_ones(num_expected) + + up_bound = max(num_expected, num_valid) + imp_weights = (up_bound - + imp_rank[hlr_inds].float()) / up_bound + neg_label_weights[:num_hlr] = imp_weights + neg_label_weights[num_hlr:] = imp_weights.min() + neg_label_weights = (self.bias + + (1 - self.bias) * neg_label_weights).pow( + self.k) + ori_selected_loss = ori_loss[select_inds] + new_loss = ori_selected_loss * neg_label_weights + norm_ratio = ori_selected_loss.sum() / new_loss.sum() + neg_label_weights *= norm_ratio + else: + neg_label_weights = cls_score.new_ones(num_expected) + select_inds = torch.randperm(num_neg)[:num_expected] + + return neg_inds[select_inds], neg_label_weights + + def sample(self, + assign_result, + bboxes, + gt_bboxes, + gt_labels=None, + img_meta=None, + **kwargs): + """Sample positive and negative bboxes. + + This is a simple implementation of bbox sampling given candidates, + assigning results and ground truth bboxes. + + Args: + assign_result (:obj:`AssignResult`): Bbox assigning results. + bboxes (Tensor): Boxes to be sampled from. + gt_bboxes (Tensor): Ground truth bboxes. + gt_labels (Tensor, optional): Class labels of ground truth bboxes. + + Returns: + tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negative + label weights. + """ + bboxes = bboxes[:, :4] + + gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) + if self.add_gt_as_proposals: + bboxes = torch.cat([gt_bboxes, bboxes], dim=0) + assign_result.add_gt_(gt_labels) + gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) + gt_flags = torch.cat([gt_ones, gt_flags]) + + num_expected_pos = int(self.num * self.pos_fraction) + pos_inds = self.pos_sampler._sample_pos( + assign_result, num_expected_pos, bboxes=bboxes, **kwargs) + num_sampled_pos = pos_inds.numel() + num_expected_neg = self.num - num_sampled_pos + if self.neg_pos_ub >= 0: + _pos = max(1, num_sampled_pos) + neg_upper_bound = int(self.neg_pos_ub * _pos) + if num_expected_neg > neg_upper_bound: + num_expected_neg = neg_upper_bound + neg_inds, neg_label_weights = self.neg_sampler._sample_neg( + assign_result, + num_expected_neg, + bboxes, + img_meta=img_meta, + **kwargs) + + return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, + assign_result, gt_flags), neg_label_weights diff --git a/downstream/mmdetection/mmdet/core/bbox/transforms.py b/downstream/mmdetection/mmdet/core/bbox/transforms.py new file mode 100644 index 0000000..6d72076 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/bbox/transforms.py @@ -0,0 +1,270 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + + +def find_inside_bboxes(bboxes, img_h, img_w): + """Find bboxes as long as a part of bboxes is inside the image. + + Args: + bboxes (Tensor): Shape (N, 4). + img_h (int): Image height. + img_w (int): Image width. + + Returns: + Tensor: Index of the remaining bboxes. + """ + inside_inds = (bboxes[:, 0] < img_w) & (bboxes[:, 2] > 0) \ + & (bboxes[:, 1] < img_h) & (bboxes[:, 3] > 0) + return inside_inds + + +def bbox_flip(bboxes, img_shape, direction='horizontal'): + """Flip bboxes horizontally or vertically. + + Args: + bboxes (Tensor): Shape (..., 4*k) + img_shape (tuple): Image shape. + direction (str): Flip direction, options are "horizontal", "vertical", + "diagonal". Default: "horizontal" + + Returns: + Tensor: Flipped bboxes. + """ + assert bboxes.shape[-1] % 4 == 0 + assert direction in ['horizontal', 'vertical', 'diagonal'] + flipped = bboxes.clone() + if direction == 'horizontal': + flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] + flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] + elif direction == 'vertical': + flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] + flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] + else: + flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] + flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] + flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] + flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] + return flipped + + +def bbox_mapping(bboxes, + img_shape, + scale_factor, + flip, + flip_direction='horizontal'): + """Map bboxes from the original image scale to testing scale.""" + new_bboxes = bboxes * bboxes.new_tensor(scale_factor) + if flip: + new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction) + return new_bboxes + + +def bbox_mapping_back(bboxes, + img_shape, + scale_factor, + flip, + flip_direction='horizontal'): + """Map bboxes from testing scale to original image scale.""" + new_bboxes = bbox_flip(bboxes, img_shape, + flip_direction) if flip else bboxes + new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor) + return new_bboxes.view(bboxes.shape) + + +def bbox2roi(bbox_list): + """Convert a list of bboxes to roi format. + + Args: + bbox_list (list[Tensor]): a list of bboxes corresponding to a batch + of images. + + Returns: + Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] + """ + rois_list = [] + for img_id, bboxes in enumerate(bbox_list): + if bboxes.size(0) > 0: + img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) + rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1) + else: + rois = bboxes.new_zeros((0, 5)) + rois_list.append(rois) + rois = torch.cat(rois_list, 0) + return rois + + +def roi2bbox(rois): + """Convert rois to bounding box format. + + Args: + rois (torch.Tensor): RoIs with the shape (n, 5) where the first + column indicates batch id of each RoI. + + Returns: + list[torch.Tensor]: Converted boxes of corresponding rois. + """ + bbox_list = [] + img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) + for img_id in img_ids: + inds = (rois[:, 0] == img_id.item()) + bbox = rois[inds, 1:] + bbox_list.append(bbox) + return bbox_list + + +def bbox2result(bboxes, labels, num_classes): + """Convert detection results to a list of numpy arrays. + + Args: + bboxes (torch.Tensor | np.ndarray): shape (n, 5) + labels (torch.Tensor | np.ndarray): shape (n, ) + num_classes (int): class number, including background class + + Returns: + list(ndarray): bbox results of each class + """ + if bboxes.shape[0] == 0: + return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)] + else: + if isinstance(bboxes, torch.Tensor): + bboxes = bboxes.detach().cpu().numpy() + labels = labels.detach().cpu().numpy() + return [bboxes[labels == i, :] for i in range(num_classes)] + + +def distance2bbox(points, distance, max_shape=None): + """Decode distance prediction to bounding box. + + Args: + points (Tensor): Shape (B, N, 2) or (N, 2). + distance (Tensor): Distance from the given point to 4 + boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) + max_shape (Sequence[int] or torch.Tensor or Sequence[ + Sequence[int]],optional): Maximum bounds for boxes, specifies + (H, W, C) or (H, W). If priors shape is (B, N, 4), then + the max_shape should be a Sequence[Sequence[int]] + and the length of max_shape should also be B. + + Returns: + Tensor: Boxes with shape (N, 4) or (B, N, 4) + """ + + x1 = points[..., 0] - distance[..., 0] + y1 = points[..., 1] - distance[..., 1] + x2 = points[..., 0] + distance[..., 2] + y2 = points[..., 1] + distance[..., 3] + + bboxes = torch.stack([x1, y1, x2, y2], -1) + + if max_shape is not None: + if bboxes.dim() == 2 and not torch.onnx.is_in_onnx_export(): + # speed up + bboxes[:, 0::2].clamp_(min=0, max=max_shape[1]) + bboxes[:, 1::2].clamp_(min=0, max=max_shape[0]) + return bboxes + + # clip bboxes with dynamic `min` and `max` for onnx + if torch.onnx.is_in_onnx_export(): + from mmdet.core.export import dynamic_clip_for_onnx + x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) + bboxes = torch.stack([x1, y1, x2, y2], dim=-1) + return bboxes + if not isinstance(max_shape, torch.Tensor): + max_shape = x1.new_tensor(max_shape) + max_shape = max_shape[..., :2].type_as(x1) + if max_shape.ndim == 2: + assert bboxes.ndim == 3 + assert max_shape.size(0) == bboxes.size(0) + + min_xy = x1.new_tensor(0) + max_xy = torch.cat([max_shape, max_shape], + dim=-1).flip(-1).unsqueeze(-2) + bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) + bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) + + return bboxes + + +def bbox2distance(points, bbox, max_dis=None, eps=0.1): + """Decode bounding box based on distances. + + Args: + points (Tensor): Shape (n, 2), [x, y]. + bbox (Tensor): Shape (n, 4), "xyxy" format + max_dis (float): Upper bound of the distance. + eps (float): a small value to ensure target < max_dis, instead <= + + Returns: + Tensor: Decoded distances. + """ + left = points[:, 0] - bbox[:, 0] + top = points[:, 1] - bbox[:, 1] + right = bbox[:, 2] - points[:, 0] + bottom = bbox[:, 3] - points[:, 1] + if max_dis is not None: + left = left.clamp(min=0, max=max_dis - eps) + top = top.clamp(min=0, max=max_dis - eps) + right = right.clamp(min=0, max=max_dis - eps) + bottom = bottom.clamp(min=0, max=max_dis - eps) + return torch.stack([left, top, right, bottom], -1) + + +def bbox_rescale(bboxes, scale_factor=1.0): + """Rescale bounding box w.r.t. scale_factor. + + Args: + bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois + scale_factor (float): rescale factor + + Returns: + Tensor: Rescaled bboxes. + """ + if bboxes.size(1) == 5: + bboxes_ = bboxes[:, 1:] + inds_ = bboxes[:, 0] + else: + bboxes_ = bboxes + cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5 + cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5 + w = bboxes_[:, 2] - bboxes_[:, 0] + h = bboxes_[:, 3] - bboxes_[:, 1] + w = w * scale_factor + h = h * scale_factor + x1 = cx - 0.5 * w + x2 = cx + 0.5 * w + y1 = cy - 0.5 * h + y2 = cy + 0.5 * h + if bboxes.size(1) == 5: + rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1) + else: + rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) + return rescaled_bboxes + + +def bbox_cxcywh_to_xyxy(bbox): + """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). + + Args: + bbox (Tensor): Shape (n, 4) for bboxes. + + Returns: + Tensor: Converted bboxes. + """ + cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1) + bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)] + return torch.cat(bbox_new, dim=-1) + + +def bbox_xyxy_to_cxcywh(bbox): + """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). + + Args: + bbox (Tensor): Shape (n, 4) for bboxes. + + Returns: + Tensor: Converted bboxes. + """ + x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1) + bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)] + return torch.cat(bbox_new, dim=-1) diff --git a/downstream/mmdetection/mmdet/core/data_structures/__init__.py b/downstream/mmdetection/mmdet/core/data_structures/__init__.py new file mode 100644 index 0000000..11ab96c --- /dev/null +++ b/downstream/mmdetection/mmdet/core/data_structures/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .general_data import GeneralData +from .instance_data import InstanceData + +__all__ = ['GeneralData', 'InstanceData'] diff --git a/downstream/mmdetection/mmdet/core/data_structures/general_data.py b/downstream/mmdetection/mmdet/core/data_structures/general_data.py new file mode 100644 index 0000000..99316e4 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/data_structures/general_data.py @@ -0,0 +1,326 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import numpy as np +import torch + +from mmdet.utils.util_mixins import NiceRepr + + +class GeneralData(NiceRepr): + """A general data structure of OpenMMlab. + + A data structure that stores the meta information, + the annotations of the images or the model predictions, + which can be used in communication between components. + + The attributes in `GeneralData` are divided into two parts, + the `meta_info_fields` and the `data_fields` respectively. + + - `meta_info_fields`: Usually contains the + information about the image such as filename, + image_shape, pad_shape, etc. All attributes in + it are immutable once set, + but the user can add new meta information with + `set_meta_info` function, all information can be accessed + with methods `meta_info_keys`, `meta_info_values`, + `meta_info_items`. + + - `data_fields`: Annotations or model predictions are + stored. The attributes can be accessed or modified by + dict-like or object-like operations, such as + `.` , `[]`, `in`, `del`, `pop(str)` `get(str)`, `keys()`, + `values()`, `items()`. Users can also apply tensor-like methods + to all obj:`torch.Tensor` in the `data_fileds`, + such as `.cuda()`, `.cpu()`, `.numpy()`, `device`, `.to()` + `.detach()`, `.numpy()` + + Args: + meta_info (dict, optional): A dict contains the meta information + of single image. such as `img_shape`, `scale_factor`, etc. + Default: None. + data (dict, optional): A dict contains annotations of single image or + model predictions. Default: None. + + Examples: + >>> from mmdet.core import GeneralData + >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3)) + >>> instance_data = GeneralData(meta_info=img_meta) + >>> img_shape in instance_data + True + >>> instance_data.det_labels = torch.LongTensor([0, 1, 2, 3]) + >>> instance_data["det_scores"] = torch.Tensor([0.01, 0.1, 0.2, 0.3]) + >>> print(results) + + >>> instance_data.det_scores + tensor([0.0100, 0.1000, 0.2000, 0.3000]) + >>> instance_data.det_labels + tensor([0, 1, 2, 3]) + >>> instance_data['det_labels'] + tensor([0, 1, 2, 3]) + >>> 'det_labels' in instance_data + True + >>> instance_data.img_shape + (800, 1196, 3) + >>> 'det_scores' in instance_data + True + >>> del instance_data.det_scores + >>> 'det_scores' in instance_data + False + >>> det_labels = instance_data.pop('det_labels', None) + >>> det_labels + tensor([0, 1, 2, 3]) + >>> 'det_labels' in instance_data + >>> False + """ + + def __init__(self, meta_info=None, data=None): + + self._meta_info_fields = set() + self._data_fields = set() + + if meta_info is not None: + self.set_meta_info(meta_info=meta_info) + if data is not None: + self.set_data(data) + + def set_meta_info(self, meta_info): + """Add meta information. + + Args: + meta_info (dict): A dict contains the meta information + of image. such as `img_shape`, `scale_factor`, etc. + Default: None. + """ + assert isinstance(meta_info, + dict), f'meta should be a `dict` but get {meta_info}' + meta = copy.deepcopy(meta_info) + for k, v in meta.items(): + # should be consistent with original meta_info + if k in self._meta_info_fields: + ori_value = getattr(self, k) + if isinstance(ori_value, (torch.Tensor, np.ndarray)): + if (ori_value == v).all(): + continue + else: + raise KeyError( + f'img_meta_info {k} has been set as ' + f'{getattr(self, k)} before, which is immutable ') + elif ori_value == v: + continue + else: + raise KeyError( + f'img_meta_info {k} has been set as ' + f'{getattr(self, k)} before, which is immutable ') + else: + self._meta_info_fields.add(k) + self.__dict__[k] = v + + def set_data(self, data): + """Update a dict to `data_fields`. + + Args: + data (dict): A dict contains annotations of image or + model predictions. Default: None. + """ + assert isinstance(data, + dict), f'meta should be a `dict` but get {data}' + for k, v in data.items(): + self.__setattr__(k, v) + + def new(self, meta_info=None, data=None): + """Return a new results with same image meta information. + + Args: + meta_info (dict, optional): A dict contains the meta information + of image. such as `img_shape`, `scale_factor`, etc. + Default: None. + data (dict, optional): A dict contains annotations of image or + model predictions. Default: None. + """ + new_data = self.__class__() + new_data.set_meta_info(dict(self.meta_info_items())) + if meta_info is not None: + new_data.set_meta_info(meta_info) + if data is not None: + new_data.set_data(data) + return new_data + + def keys(self): + """ + Returns: + list: Contains all keys in data_fields. + """ + return [key for key in self._data_fields] + + def meta_info_keys(self): + """ + Returns: + list: Contains all keys in meta_info_fields. + """ + return [key for key in self._meta_info_fields] + + def values(self): + """ + Returns: + list: Contains all values in data_fields. + """ + return [getattr(self, k) for k in self.keys()] + + def meta_info_values(self): + """ + Returns: + list: Contains all values in meta_info_fields. + """ + return [getattr(self, k) for k in self.meta_info_keys()] + + def items(self): + for k in self.keys(): + yield (k, getattr(self, k)) + + def meta_info_items(self): + for k in self.meta_info_keys(): + yield (k, getattr(self, k)) + + def __setattr__(self, name, val): + if name in ('_meta_info_fields', '_data_fields'): + if not hasattr(self, name): + super().__setattr__(name, val) + else: + raise AttributeError( + f'{name} has been used as a ' + f'private attribute, which is immutable. ') + else: + if name in self._meta_info_fields: + raise AttributeError(f'`{name}` is used in meta information,' + f'which is immutable') + + self._data_fields.add(name) + super().__setattr__(name, val) + + def __delattr__(self, item): + + if item in ('_meta_info_fields', '_data_fields'): + raise AttributeError(f'{item} has been used as a ' + f'private attribute, which is immutable. ') + + if item in self._meta_info_fields: + raise KeyError(f'{item} is used in meta information, ' + f'which is immutable.') + super().__delattr__(item) + if item in self._data_fields: + self._data_fields.remove(item) + + # dict-like methods + __setitem__ = __setattr__ + __delitem__ = __delattr__ + + def __getitem__(self, name): + return getattr(self, name) + + def get(self, *args): + assert len(args) < 3, '`get` get more than 2 arguments' + return self.__dict__.get(*args) + + def pop(self, *args): + assert len(args) < 3, '`pop` get more than 2 arguments' + name = args[0] + if name in self._meta_info_fields: + raise KeyError(f'{name} is a key in meta information, ' + f'which is immutable') + + if args[0] in self._data_fields: + self._data_fields.remove(args[0]) + return self.__dict__.pop(*args) + + # with default value + elif len(args) == 2: + return args[1] + else: + raise KeyError(f'{args[0]}') + + def __contains__(self, item): + return item in self._data_fields or \ + item in self._meta_info_fields + + # Tensor-like methods + def to(self, *args, **kwargs): + """Apply same name function to all tensors in data_fields.""" + new_data = self.new() + for k, v in self.items(): + if hasattr(v, 'to'): + v = v.to(*args, **kwargs) + new_data[k] = v + return new_data + + # Tensor-like methods + def cpu(self): + """Apply same name function to all tensors in data_fields.""" + new_data = self.new() + for k, v in self.items(): + if isinstance(v, torch.Tensor): + v = v.cpu() + new_data[k] = v + return new_data + + # Tensor-like methods + def mlu(self): + """Apply same name function to all tensors in data_fields.""" + new_data = self.new() + for k, v in self.items(): + if isinstance(v, torch.Tensor): + v = v.mlu() + new_data[k] = v + return new_data + + # Tensor-like methods + def cuda(self): + """Apply same name function to all tensors in data_fields.""" + new_data = self.new() + for k, v in self.items(): + if isinstance(v, torch.Tensor): + v = v.cuda() + new_data[k] = v + return new_data + + # Tensor-like methods + def detach(self): + """Apply same name function to all tensors in data_fields.""" + new_data = self.new() + for k, v in self.items(): + if isinstance(v, torch.Tensor): + v = v.detach() + new_data[k] = v + return new_data + + # Tensor-like methods + def numpy(self): + """Apply same name function to all tensors in data_fields.""" + new_data = self.new() + for k, v in self.items(): + if isinstance(v, torch.Tensor): + v = v.detach().cpu().numpy() + new_data[k] = v + return new_data + + def __nice__(self): + repr = '\n \n META INFORMATION \n' + for k, v in self.meta_info_items(): + repr += f'{k}: {v} \n' + repr += '\n DATA FIELDS \n' + for k, v in self.items(): + if isinstance(v, (torch.Tensor, np.ndarray)): + repr += f'shape of {k}: {v.shape} \n' + else: + repr += f'{k}: {v} \n' + return repr + '\n' diff --git a/downstream/mmdetection/mmdet/core/data_structures/instance_data.py b/downstream/mmdetection/mmdet/core/data_structures/instance_data.py new file mode 100644 index 0000000..eef2065 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/data_structures/instance_data.py @@ -0,0 +1,188 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import itertools + +import numpy as np +import torch + +from .general_data import GeneralData + + +class InstanceData(GeneralData): + """Data structure for instance-level annnotations or predictions. + + Subclass of :class:`GeneralData`. All value in `data_fields` + should have the same length. This design refer to + https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/instances.py # noqa E501 + + Examples: + >>> from mmdet.core import InstanceData + >>> import numpy as np + >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3)) + >>> results = InstanceData(img_meta) + >>> img_shape in results + True + >>> results.det_labels = torch.LongTensor([0, 1, 2, 3]) + >>> results["det_scores"] = torch.Tensor([0.01, 0.7, 0.6, 0.3]) + >>> results["det_masks"] = np.ndarray(4, 2, 2) + >>> len(results) + 4 + >>> print(resutls) + + >>> sorted_results = results[results.det_scores.sort().indices] + >>> sorted_results.det_scores + tensor([0.0100, 0.3000, 0.6000, 0.7000]) + >>> sorted_results.det_labels + tensor([0, 3, 2, 1]) + >>> print(results[results.scores > 0.5]) + + >>> results[results.det_scores > 0.5].det_labels + tensor([1, 2]) + >>> results[results.det_scores > 0.5].det_scores + tensor([0.7000, 0.6000]) + """ + + def __setattr__(self, name, value): + + if name in ('_meta_info_fields', '_data_fields'): + if not hasattr(self, name): + super().__setattr__(name, value) + else: + raise AttributeError( + f'{name} has been used as a ' + f'private attribute, which is immutable. ') + + else: + assert isinstance(value, (torch.Tensor, np.ndarray, list)), \ + f'Can set {type(value)}, only support' \ + f' {(torch.Tensor, np.ndarray, list)}' + + if self._data_fields: + assert len(value) == len(self), f'the length of ' \ + f'values {len(value)} is ' \ + f'not consistent with' \ + f' the length ' \ + f'of this :obj:`InstanceData` ' \ + f'{len(self)} ' + super().__setattr__(name, value) + + def __getitem__(self, item): + """ + Args: + item (str, obj:`slice`, + obj`torch.LongTensor`, obj:`torch.BoolTensor`): + get the corresponding values according to item. + + Returns: + obj:`InstanceData`: Corresponding values. + """ + assert len(self), ' This is a empty instance' + + assert isinstance( + item, (str, slice, int, torch.LongTensor, torch.BoolTensor)) + + if isinstance(item, str): + return getattr(self, item) + + if type(item) == int: + if item >= len(self) or item < -len(self): + raise IndexError(f'Index {item} out of range!') + else: + # keep the dimension + item = slice(item, None, len(self)) + + new_data = self.new() + if isinstance(item, (torch.Tensor)): + assert item.dim() == 1, 'Only support to get the' \ + ' values along the first dimension.' + if isinstance(item, torch.BoolTensor): + assert len(item) == len(self), f'The shape of the' \ + f' input(BoolTensor)) ' \ + f'{len(item)} ' \ + f' does not match the shape ' \ + f'of the indexed tensor ' \ + f'in results_filed ' \ + f'{len(self)} at ' \ + f'first dimension. ' + + for k, v in self.items(): + if isinstance(v, torch.Tensor): + new_data[k] = v[item] + elif isinstance(v, np.ndarray): + new_data[k] = v[item.cpu().numpy()] + elif isinstance(v, list): + r_list = [] + # convert to indexes from boolTensor + if isinstance(item, torch.BoolTensor): + indexes = torch.nonzero(item).view(-1) + else: + indexes = item + for index in indexes: + r_list.append(v[index]) + new_data[k] = r_list + else: + # item is a slice + for k, v in self.items(): + new_data[k] = v[item] + return new_data + + @staticmethod + def cat(instances_list): + """Concat the predictions of all :obj:`InstanceData` in the list. + + Args: + instances_list (list[:obj:`InstanceData`]): A list + of :obj:`InstanceData`. + + Returns: + obj:`InstanceData` + """ + assert all( + isinstance(results, InstanceData) for results in instances_list) + assert len(instances_list) > 0 + if len(instances_list) == 1: + return instances_list[0] + + new_data = instances_list[0].new() + for k in instances_list[0]._data_fields: + values = [results[k] for results in instances_list] + v0 = values[0] + if isinstance(v0, torch.Tensor): + values = torch.cat(values, dim=0) + elif isinstance(v0, np.ndarray): + values = np.concatenate(values, axis=0) + elif isinstance(v0, list): + values = list(itertools.chain(*values)) + else: + raise ValueError( + f'Can not concat the {k} which is a {type(v0)}') + new_data[k] = values + return new_data + + def __len__(self): + if len(self._data_fields): + for v in self.values(): + return len(v) + else: + raise AssertionError('This is an empty `InstanceData`.') diff --git a/downstream/mmdetection/mmdet/core/evaluation/__init__.py b/downstream/mmdetection/mmdet/core/evaluation/__init__.py new file mode 100644 index 0000000..67e7c55 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/evaluation/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .class_names import (cityscapes_classes, coco_classes, dataset_aliases, + get_classes, imagenet_det_classes, + imagenet_vid_classes, oid_challenge_classes, + oid_v6_classes, voc_classes) +from .eval_hooks import DistEvalHook, EvalHook +from .mean_ap import average_precision, eval_map, print_map_summary +from .panoptic_utils import INSTANCE_OFFSET +from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, + print_recall_summary) + +__all__ = [ + 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', + 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes', + 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map', + 'print_map_summary', 'eval_recalls', 'print_recall_summary', + 'plot_num_recall', 'plot_iou_recall', 'oid_v6_classes', + 'oid_challenge_classes', 'INSTANCE_OFFSET' +] diff --git a/downstream/mmdetection/mmdet/core/evaluation/bbox_overlaps.py b/downstream/mmdetection/mmdet/core/evaluation/bbox_overlaps.py new file mode 100644 index 0000000..5d6eb82 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/evaluation/bbox_overlaps.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np + + +def bbox_overlaps(bboxes1, + bboxes2, + mode='iou', + eps=1e-6, + use_legacy_coordinate=False): + """Calculate the ious between each bbox of bboxes1 and bboxes2. + + Args: + bboxes1 (ndarray): Shape (n, 4) + bboxes2 (ndarray): Shape (k, 4) + mode (str): IOU (intersection over union) or IOF (intersection + over foreground) + use_legacy_coordinate (bool): Whether to use coordinate system in + mmdet v1.x. which means width, height should be + calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. + Note when function is used in `VOCDataset`, it should be + True to align with the official implementation + `http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar` + Default: False. + + Returns: + ious (ndarray): Shape (n, k) + """ + + assert mode in ['iou', 'iof'] + if not use_legacy_coordinate: + extra_length = 0. + else: + extra_length = 1. + bboxes1 = bboxes1.astype(np.float32) + bboxes2 = bboxes2.astype(np.float32) + rows = bboxes1.shape[0] + cols = bboxes2.shape[0] + ious = np.zeros((rows, cols), dtype=np.float32) + if rows * cols == 0: + return ious + exchange = False + if bboxes1.shape[0] > bboxes2.shape[0]: + bboxes1, bboxes2 = bboxes2, bboxes1 + ious = np.zeros((cols, rows), dtype=np.float32) + exchange = True + area1 = (bboxes1[:, 2] - bboxes1[:, 0] + extra_length) * ( + bboxes1[:, 3] - bboxes1[:, 1] + extra_length) + area2 = (bboxes2[:, 2] - bboxes2[:, 0] + extra_length) * ( + bboxes2[:, 3] - bboxes2[:, 1] + extra_length) + for i in range(bboxes1.shape[0]): + x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) + y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) + x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) + y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) + overlap = np.maximum(x_end - x_start + extra_length, 0) * np.maximum( + y_end - y_start + extra_length, 0) + if mode == 'iou': + union = area1[i] + area2 - overlap + else: + union = area1[i] if not exchange else area2 + union = np.maximum(union, eps) + ious[i, :] = overlap / union + if exchange: + ious = ious.T + return ious diff --git a/downstream/mmdetection/mmdet/core/evaluation/class_names.py b/downstream/mmdetection/mmdet/core/evaluation/class_names.py new file mode 100644 index 0000000..7379711 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/evaluation/class_names.py @@ -0,0 +1,332 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv + + +def wider_face_classes(): + return ['face'] + + +def voc_classes(): + return [ + 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', + 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', + 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' + ] + + +def imagenet_det_classes(): + return [ + 'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', + 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', + 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', + 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', + 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', + 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', + 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', + 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', + 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', + 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', + 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', + 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', + 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', + 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', + 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', + 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', + 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', + 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', + 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', + 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', + 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', + 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', + 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', + 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', + 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', + 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', + 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', + 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', + 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', + 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', + 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', + 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', + 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', + 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', + 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', + 'whale', 'wine_bottle', 'zebra' + ] + + +def imagenet_vid_classes(): + return [ + 'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', + 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', + 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', + 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', + 'watercraft', 'whale', 'zebra' + ] + + +def coco_classes(): + return [ + 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', + 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', + 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', + 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', + 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', + 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', + 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', + 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', + 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', + 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', + 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', + 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush' + ] + + +def cityscapes_classes(): + return [ + 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', + 'bicycle' + ] + + +def oid_challenge_classes(): + return [ + 'Footwear', 'Jeans', 'House', 'Tree', 'Woman', 'Man', 'Land vehicle', + 'Person', 'Wheel', 'Bus', 'Human face', 'Bird', 'Dress', 'Girl', + 'Vehicle', 'Building', 'Cat', 'Car', 'Belt', 'Elephant', 'Dessert', + 'Butterfly', 'Train', 'Guitar', 'Poster', 'Book', 'Boy', 'Bee', + 'Flower', 'Window', 'Hat', 'Human head', 'Dog', 'Human arm', 'Drink', + 'Human mouth', 'Human hair', 'Human nose', 'Human hand', 'Table', + 'Marine invertebrates', 'Fish', 'Sculpture', 'Rose', 'Street light', + 'Glasses', 'Fountain', 'Skyscraper', 'Swimwear', 'Brassiere', 'Drum', + 'Duck', 'Countertop', 'Furniture', 'Ball', 'Human leg', 'Boat', + 'Balloon', 'Bicycle helmet', 'Goggles', 'Door', 'Human eye', 'Shirt', + 'Toy', 'Teddy bear', 'Pasta', 'Tomato', 'Human ear', + 'Vehicle registration plate', 'Microphone', 'Musical keyboard', + 'Tower', 'Houseplant', 'Flowerpot', 'Fruit', 'Vegetable', + 'Musical instrument', 'Suit', 'Motorcycle', 'Bagel', 'French fries', + 'Hamburger', 'Chair', 'Salt and pepper shakers', 'Snail', 'Airplane', + 'Horse', 'Laptop', 'Computer keyboard', 'Football helmet', 'Cocktail', + 'Juice', 'Tie', 'Computer monitor', 'Human beard', 'Bottle', + 'Saxophone', 'Lemon', 'Mouse', 'Sock', 'Cowboy hat', 'Sun hat', + 'Football', 'Porch', 'Sunglasses', 'Lobster', 'Crab', 'Picture frame', + 'Van', 'Crocodile', 'Surfboard', 'Shorts', 'Helicopter', 'Helmet', + 'Sports uniform', 'Taxi', 'Swan', 'Goose', 'Coat', 'Jacket', 'Handbag', + 'Flag', 'Skateboard', 'Television', 'Tire', 'Spoon', 'Palm tree', + 'Stairs', 'Salad', 'Castle', 'Oven', 'Microwave oven', 'Wine', + 'Ceiling fan', 'Mechanical fan', 'Cattle', 'Truck', 'Box', 'Ambulance', + 'Desk', 'Wine glass', 'Reptile', 'Tank', 'Traffic light', 'Billboard', + 'Tent', 'Insect', 'Spider', 'Treadmill', 'Cupboard', 'Shelf', + 'Seat belt', 'Human foot', 'Bicycle', 'Bicycle wheel', 'Couch', + 'Bookcase', 'Fedora', 'Backpack', 'Bench', 'Oyster', + 'Moths and butterflies', 'Lavender', 'Waffle', 'Fork', 'Animal', + 'Accordion', 'Mobile phone', 'Plate', 'Coffee cup', 'Saucer', + 'Platter', 'Dagger', 'Knife', 'Bull', 'Tortoise', 'Sea turtle', 'Deer', + 'Weapon', 'Apple', 'Ski', 'Taco', 'Traffic sign', 'Beer', 'Necklace', + 'Sunflower', 'Piano', 'Organ', 'Harpsichord', 'Bed', 'Cabinetry', + 'Nightstand', 'Curtain', 'Chest of drawers', 'Drawer', 'Parrot', + 'Sandal', 'High heels', 'Tableware', 'Cart', 'Mushroom', 'Kite', + 'Missile', 'Seafood', 'Camera', 'Paper towel', 'Toilet paper', + 'Sombrero', 'Radish', 'Lighthouse', 'Segway', 'Pig', 'Watercraft', + 'Golf cart', 'studio couch', 'Dolphin', 'Whale', 'Earrings', 'Otter', + 'Sea lion', 'Whiteboard', 'Monkey', 'Gondola', 'Zebra', + 'Baseball glove', 'Scarf', 'Adhesive tape', 'Trousers', 'Scoreboard', + 'Lily', 'Carnivore', 'Power plugs and sockets', 'Office building', + 'Sandwich', 'Swimming pool', 'Headphones', 'Tin can', 'Crown', 'Doll', + 'Cake', 'Frog', 'Beetle', 'Ant', 'Gas stove', 'Canoe', 'Falcon', + 'Blue jay', 'Egg', 'Fire hydrant', 'Raccoon', 'Muffin', 'Wall clock', + 'Coffee', 'Mug', 'Tea', 'Bear', 'Waste container', 'Home appliance', + 'Candle', 'Lion', 'Mirror', 'Starfish', 'Marine mammal', 'Wheelchair', + 'Umbrella', 'Alpaca', 'Violin', 'Cello', 'Brown bear', 'Canary', 'Bat', + 'Ruler', 'Plastic bag', 'Penguin', 'Watermelon', 'Harbor seal', 'Pen', + 'Pumpkin', 'Harp', 'Kitchen appliance', 'Roller skates', 'Bust', + 'Coffee table', 'Tennis ball', 'Tennis racket', 'Ladder', 'Boot', + 'Bowl', 'Stop sign', 'Volleyball', 'Eagle', 'Paddle', 'Chicken', + 'Skull', 'Lamp', 'Beehive', 'Maple', 'Sink', 'Goldfish', 'Tripod', + 'Coconut', 'Bidet', 'Tap', 'Bathroom cabinet', 'Toilet', + 'Filing cabinet', 'Pretzel', 'Table tennis racket', 'Bronze sculpture', + 'Rocket', 'Mouse', 'Hamster', 'Lizard', 'Lifejacket', 'Goat', + 'Washing machine', 'Trumpet', 'Horn', 'Trombone', 'Sheep', + 'Tablet computer', 'Pillow', 'Kitchen & dining room table', + 'Parachute', 'Raven', 'Glove', 'Loveseat', 'Christmas tree', + 'Shellfish', 'Rifle', 'Shotgun', 'Sushi', 'Sparrow', 'Bread', + 'Toaster', 'Watch', 'Asparagus', 'Artichoke', 'Suitcase', 'Antelope', + 'Broccoli', 'Ice cream', 'Racket', 'Banana', 'Cookie', 'Cucumber', + 'Dragonfly', 'Lynx', 'Caterpillar', 'Light bulb', 'Office supplies', + 'Miniskirt', 'Skirt', 'Fireplace', 'Potato', 'Light switch', + 'Croissant', 'Cabbage', 'Ladybug', 'Handgun', 'Luggage and bags', + 'Window blind', 'Snowboard', 'Baseball bat', 'Digital clock', + 'Serving tray', 'Infant bed', 'Sofa bed', 'Guacamole', 'Fox', 'Pizza', + 'Snowplow', 'Jet ski', 'Refrigerator', 'Lantern', 'Convenience store', + 'Sword', 'Rugby ball', 'Owl', 'Ostrich', 'Pancake', 'Strawberry', + 'Carrot', 'Tart', 'Dice', 'Turkey', 'Rabbit', 'Invertebrate', 'Vase', + 'Stool', 'Swim cap', 'Shower', 'Clock', 'Jellyfish', 'Aircraft', + 'Chopsticks', 'Orange', 'Snake', 'Sewing machine', 'Kangaroo', 'Mixer', + 'Food processor', 'Shrimp', 'Towel', 'Porcupine', 'Jaguar', 'Cannon', + 'Limousine', 'Mule', 'Squirrel', 'Kitchen knife', 'Tiara', 'Tiger', + 'Bow and arrow', 'Candy', 'Rhinoceros', 'Shark', 'Cricket ball', + 'Doughnut', 'Plumbing fixture', 'Camel', 'Polar bear', 'Coin', + 'Printer', 'Blender', 'Giraffe', 'Billiard table', 'Kettle', + 'Dinosaur', 'Pineapple', 'Zucchini', 'Jug', 'Barge', 'Teapot', + 'Golf ball', 'Binoculars', 'Scissors', 'Hot dog', 'Door handle', + 'Seahorse', 'Bathtub', 'Leopard', 'Centipede', 'Grapefruit', 'Snowman', + 'Cheetah', 'Alarm clock', 'Grape', 'Wrench', 'Wok', 'Bell pepper', + 'Cake stand', 'Barrel', 'Woodpecker', 'Flute', 'Corded phone', + 'Willow', 'Punching bag', 'Pomegranate', 'Telephone', 'Pear', + 'Common fig', 'Bench', 'Wood-burning stove', 'Burrito', 'Nail', + 'Turtle', 'Submarine sandwich', 'Drinking straw', 'Peach', 'Popcorn', + 'Frying pan', 'Picnic basket', 'Honeycomb', 'Envelope', 'Mango', + 'Cutting board', 'Pitcher', 'Stationary bicycle', 'Dumbbell', + 'Personal care', 'Dog bed', 'Snowmobile', 'Oboe', 'Briefcase', + 'Squash', 'Tick', 'Slow cooker', 'Coffeemaker', 'Measuring cup', + 'Crutch', 'Stretcher', 'Screwdriver', 'Flashlight', 'Spatula', + 'Pressure cooker', 'Ring binder', 'Beaker', 'Torch', 'Winter melon' + ] + + +def oid_v6_classes(): + return [ + 'Tortoise', 'Container', 'Magpie', 'Sea turtle', 'Football', + 'Ambulance', 'Ladder', 'Toothbrush', 'Syringe', 'Sink', 'Toy', + 'Organ (Musical Instrument)', 'Cassette deck', 'Apple', 'Human eye', + 'Cosmetics', 'Paddle', 'Snowman', 'Beer', 'Chopsticks', 'Human beard', + 'Bird', 'Parking meter', 'Traffic light', 'Croissant', 'Cucumber', + 'Radish', 'Towel', 'Doll', 'Skull', 'Washing machine', 'Glove', 'Tick', + 'Belt', 'Sunglasses', 'Banjo', 'Cart', 'Ball', 'Backpack', 'Bicycle', + 'Home appliance', 'Centipede', 'Boat', 'Surfboard', 'Boot', + 'Headphones', 'Hot dog', 'Shorts', 'Fast food', 'Bus', 'Boy', + 'Screwdriver', 'Bicycle wheel', 'Barge', 'Laptop', 'Miniskirt', + 'Drill (Tool)', 'Dress', 'Bear', 'Waffle', 'Pancake', 'Brown bear', + 'Woodpecker', 'Blue jay', 'Pretzel', 'Bagel', 'Tower', 'Teapot', + 'Person', 'Bow and arrow', 'Swimwear', 'Beehive', 'Brassiere', 'Bee', + 'Bat (Animal)', 'Starfish', 'Popcorn', 'Burrito', 'Chainsaw', + 'Balloon', 'Wrench', 'Tent', 'Vehicle registration plate', 'Lantern', + 'Toaster', 'Flashlight', 'Billboard', 'Tiara', 'Limousine', 'Necklace', + 'Carnivore', 'Scissors', 'Stairs', 'Computer keyboard', 'Printer', + 'Traffic sign', 'Chair', 'Shirt', 'Poster', 'Cheese', 'Sock', + 'Fire hydrant', 'Land vehicle', 'Earrings', 'Tie', 'Watercraft', + 'Cabinetry', 'Suitcase', 'Muffin', 'Bidet', 'Snack', 'Snowmobile', + 'Clock', 'Medical equipment', 'Cattle', 'Cello', 'Jet ski', 'Camel', + 'Coat', 'Suit', 'Desk', 'Cat', 'Bronze sculpture', 'Juice', 'Gondola', + 'Beetle', 'Cannon', 'Computer mouse', 'Cookie', 'Office building', + 'Fountain', 'Coin', 'Calculator', 'Cocktail', 'Computer monitor', + 'Box', 'Stapler', 'Christmas tree', 'Cowboy hat', 'Hiking equipment', + 'Studio couch', 'Drum', 'Dessert', 'Wine rack', 'Drink', 'Zucchini', + 'Ladle', 'Human mouth', 'Dairy Product', 'Dice', 'Oven', 'Dinosaur', + 'Ratchet (Device)', 'Couch', 'Cricket ball', 'Winter melon', 'Spatula', + 'Whiteboard', 'Pencil sharpener', 'Door', 'Hat', 'Shower', 'Eraser', + 'Fedora', 'Guacamole', 'Dagger', 'Scarf', 'Dolphin', 'Sombrero', + 'Tin can', 'Mug', 'Tap', 'Harbor seal', 'Stretcher', 'Can opener', + 'Goggles', 'Human body', 'Roller skates', 'Coffee cup', + 'Cutting board', 'Blender', 'Plumbing fixture', 'Stop sign', + 'Office supplies', 'Volleyball (Ball)', 'Vase', 'Slow cooker', + 'Wardrobe', 'Coffee', 'Whisk', 'Paper towel', 'Personal care', 'Food', + 'Sun hat', 'Tree house', 'Flying disc', 'Skirt', 'Gas stove', + 'Salt and pepper shakers', 'Mechanical fan', 'Face powder', 'Fax', + 'Fruit', 'French fries', 'Nightstand', 'Barrel', 'Kite', 'Tart', + 'Treadmill', 'Fox', 'Flag', 'French horn', 'Window blind', + 'Human foot', 'Golf cart', 'Jacket', 'Egg (Food)', 'Street light', + 'Guitar', 'Pillow', 'Human leg', 'Isopod', 'Grape', 'Human ear', + 'Power plugs and sockets', 'Panda', 'Giraffe', 'Woman', 'Door handle', + 'Rhinoceros', 'Bathtub', 'Goldfish', 'Houseplant', 'Goat', + 'Baseball bat', 'Baseball glove', 'Mixing bowl', + 'Marine invertebrates', 'Kitchen utensil', 'Light switch', 'House', + 'Horse', 'Stationary bicycle', 'Hammer', 'Ceiling fan', 'Sofa bed', + 'Adhesive tape', 'Harp', 'Sandal', 'Bicycle helmet', 'Saucer', + 'Harpsichord', 'Human hair', 'Heater', 'Harmonica', 'Hamster', + 'Curtain', 'Bed', 'Kettle', 'Fireplace', 'Scale', 'Drinking straw', + 'Insect', 'Hair dryer', 'Kitchenware', 'Indoor rower', 'Invertebrate', + 'Food processor', 'Bookcase', 'Refrigerator', 'Wood-burning stove', + 'Punching bag', 'Common fig', 'Cocktail shaker', 'Jaguar (Animal)', + 'Golf ball', 'Fashion accessory', 'Alarm clock', 'Filing cabinet', + 'Artichoke', 'Table', 'Tableware', 'Kangaroo', 'Koala', 'Knife', + 'Bottle', 'Bottle opener', 'Lynx', 'Lavender (Plant)', 'Lighthouse', + 'Dumbbell', 'Human head', 'Bowl', 'Humidifier', 'Porch', 'Lizard', + 'Billiard table', 'Mammal', 'Mouse', 'Motorcycle', + 'Musical instrument', 'Swim cap', 'Frying pan', 'Snowplow', + 'Bathroom cabinet', 'Missile', 'Bust', 'Man', 'Waffle iron', 'Milk', + 'Ring binder', 'Plate', 'Mobile phone', 'Baked goods', 'Mushroom', + 'Crutch', 'Pitcher (Container)', 'Mirror', 'Personal flotation device', + 'Table tennis racket', 'Pencil case', 'Musical keyboard', 'Scoreboard', + 'Briefcase', 'Kitchen knife', 'Nail (Construction)', 'Tennis ball', + 'Plastic bag', 'Oboe', 'Chest of drawers', 'Ostrich', 'Piano', 'Girl', + 'Plant', 'Potato', 'Hair spray', 'Sports equipment', 'Pasta', + 'Penguin', 'Pumpkin', 'Pear', 'Infant bed', 'Polar bear', 'Mixer', + 'Cupboard', 'Jacuzzi', 'Pizza', 'Digital clock', 'Pig', 'Reptile', + 'Rifle', 'Lipstick', 'Skateboard', 'Raven', 'High heels', 'Red panda', + 'Rose', 'Rabbit', 'Sculpture', 'Saxophone', 'Shotgun', 'Seafood', + 'Submarine sandwich', 'Snowboard', 'Sword', 'Picture frame', 'Sushi', + 'Loveseat', 'Ski', 'Squirrel', 'Tripod', 'Stethoscope', 'Submarine', + 'Scorpion', 'Segway', 'Training bench', 'Snake', 'Coffee table', + 'Skyscraper', 'Sheep', 'Television', 'Trombone', 'Tea', 'Tank', 'Taco', + 'Telephone', 'Torch', 'Tiger', 'Strawberry', 'Trumpet', 'Tree', + 'Tomato', 'Train', 'Tool', 'Picnic basket', 'Cooking spray', + 'Trousers', 'Bowling equipment', 'Football helmet', 'Truck', + 'Measuring cup', 'Coffeemaker', 'Violin', 'Vehicle', 'Handbag', + 'Paper cutter', 'Wine', 'Weapon', 'Wheel', 'Worm', 'Wok', 'Whale', + 'Zebra', 'Auto part', 'Jug', 'Pizza cutter', 'Cream', 'Monkey', 'Lion', + 'Bread', 'Platter', 'Chicken', 'Eagle', 'Helicopter', 'Owl', 'Duck', + 'Turtle', 'Hippopotamus', 'Crocodile', 'Toilet', 'Toilet paper', + 'Squid', 'Clothing', 'Footwear', 'Lemon', 'Spider', 'Deer', 'Frog', + 'Banana', 'Rocket', 'Wine glass', 'Countertop', 'Tablet computer', + 'Waste container', 'Swimming pool', 'Dog', 'Book', 'Elephant', 'Shark', + 'Candle', 'Leopard', 'Axe', 'Hand dryer', 'Soap dispenser', + 'Porcupine', 'Flower', 'Canary', 'Cheetah', 'Palm tree', 'Hamburger', + 'Maple', 'Building', 'Fish', 'Lobster', 'Garden Asparagus', + 'Furniture', 'Hedgehog', 'Airplane', 'Spoon', 'Otter', 'Bull', + 'Oyster', 'Horizontal bar', 'Convenience store', 'Bomb', 'Bench', + 'Ice cream', 'Caterpillar', 'Butterfly', 'Parachute', 'Orange', + 'Antelope', 'Beaker', 'Moths and butterflies', 'Window', 'Closet', + 'Castle', 'Jellyfish', 'Goose', 'Mule', 'Swan', 'Peach', 'Coconut', + 'Seat belt', 'Raccoon', 'Chisel', 'Fork', 'Lamp', 'Camera', + 'Squash (Plant)', 'Racket', 'Human face', 'Human arm', 'Vegetable', + 'Diaper', 'Unicycle', 'Falcon', 'Chime', 'Snail', 'Shellfish', + 'Cabbage', 'Carrot', 'Mango', 'Jeans', 'Flowerpot', 'Pineapple', + 'Drawer', 'Stool', 'Envelope', 'Cake', 'Dragonfly', 'Common sunflower', + 'Microwave oven', 'Honeycomb', 'Marine mammal', 'Sea lion', 'Ladybug', + 'Shelf', 'Watch', 'Candy', 'Salad', 'Parrot', 'Handgun', 'Sparrow', + 'Van', 'Grinder', 'Spice rack', 'Light bulb', 'Corded phone', + 'Sports uniform', 'Tennis racket', 'Wall clock', 'Serving tray', + 'Kitchen & dining room table', 'Dog bed', 'Cake stand', + 'Cat furniture', 'Bathroom accessory', 'Facial tissue holder', + 'Pressure cooker', 'Kitchen appliance', 'Tire', 'Ruler', + 'Luggage and bags', 'Microphone', 'Broccoli', 'Umbrella', 'Pastry', + 'Grapefruit', 'Band-aid', 'Animal', 'Bell pepper', 'Turkey', 'Lily', + 'Pomegranate', 'Doughnut', 'Glasses', 'Human nose', 'Pen', 'Ant', + 'Car', 'Aircraft', 'Human hand', 'Skunk', 'Teddy bear', 'Watermelon', + 'Cantaloupe', 'Dishwasher', 'Flute', 'Balance beam', 'Sandwich', + 'Shrimp', 'Sewing machine', 'Binoculars', 'Rays and skates', 'Ipod', + 'Accordion', 'Willow', 'Crab', 'Crown', 'Seahorse', 'Perfume', + 'Alpaca', 'Taxi', 'Canoe', 'Remote control', 'Wheelchair', + 'Rugby ball', 'Armadillo', 'Maracas', 'Helmet' + ] + + +dataset_aliases = { + 'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'], + 'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'], + 'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'], + 'coco': ['coco', 'mscoco', 'ms_coco'], + 'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'], + 'cityscapes': ['cityscapes'], + 'oid_challenge': ['oid_challenge', 'openimages_challenge'], + 'oid_v6': ['oid_v6', 'openimages_v6'] +} + + +def get_classes(dataset): + """Get class names of a dataset.""" + alias2name = {} + for name, aliases in dataset_aliases.items(): + for alias in aliases: + alias2name[alias] = name + + if mmcv.is_str(dataset): + if dataset in alias2name: + labels = eval(alias2name[dataset] + '_classes()') + else: + raise ValueError(f'Unrecognized dataset: {dataset}') + else: + raise TypeError(f'dataset must a str, but got {type(dataset)}') + return labels diff --git a/downstream/mmdetection/mmdet/core/evaluation/eval_hooks.py b/downstream/mmdetection/mmdet/core/evaluation/eval_hooks.py new file mode 100644 index 0000000..98856c1 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/evaluation/eval_hooks.py @@ -0,0 +1,140 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import bisect +import os.path as osp + +import mmcv +import torch.distributed as dist +from mmcv.runner import DistEvalHook as BaseDistEvalHook +from mmcv.runner import EvalHook as BaseEvalHook +from torch.nn.modules.batchnorm import _BatchNorm + + +def _calc_dynamic_intervals(start_interval, dynamic_interval_list): + assert mmcv.is_list_of(dynamic_interval_list, tuple) + + dynamic_milestones = [0] + dynamic_milestones.extend( + [dynamic_interval[0] for dynamic_interval in dynamic_interval_list]) + dynamic_intervals = [start_interval] + dynamic_intervals.extend( + [dynamic_interval[1] for dynamic_interval in dynamic_interval_list]) + return dynamic_milestones, dynamic_intervals + + +class EvalHook(BaseEvalHook): + + def __init__(self, *args, dynamic_intervals=None, **kwargs): + super(EvalHook, self).__init__(*args, **kwargs) + self.latest_results = None + + self.use_dynamic_intervals = dynamic_intervals is not None + if self.use_dynamic_intervals: + self.dynamic_milestones, self.dynamic_intervals = \ + _calc_dynamic_intervals(self.interval, dynamic_intervals) + + def _decide_interval(self, runner): + if self.use_dynamic_intervals: + progress = runner.epoch if self.by_epoch else runner.iter + step = bisect.bisect(self.dynamic_milestones, (progress + 1)) + # Dynamically modify the evaluation interval + self.interval = self.dynamic_intervals[step - 1] + + def before_train_epoch(self, runner): + """Evaluate the model only at the start of training by epoch.""" + self._decide_interval(runner) + super().before_train_epoch(runner) + + def before_train_iter(self, runner): + self._decide_interval(runner) + super().before_train_iter(runner) + + def _do_evaluate(self, runner): + """perform evaluation and save ckpt.""" + if not self._should_evaluate(runner): + return + + from mmdet.apis import single_gpu_test + + # Changed results to self.results so that MMDetWandbHook can access + # the evaluation results and log them to wandb. + results = single_gpu_test(runner.model, self.dataloader, show=False) + self.latest_results = results + runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) + key_score = self.evaluate(runner, results) + # the key_score may be `None` so it needs to skip the action to save + # the best checkpoint + if self.save_best and key_score: + self._save_ckpt(runner, key_score) + + +# Note: Considering that MMCV's EvalHook updated its interface in V1.3.16, +# in order to avoid strong version dependency, we did not directly +# inherit EvalHook but BaseDistEvalHook. +class DistEvalHook(BaseDistEvalHook): + + def __init__(self, *args, dynamic_intervals=None, **kwargs): + super(DistEvalHook, self).__init__(*args, **kwargs) + self.latest_results = None + + self.use_dynamic_intervals = dynamic_intervals is not None + if self.use_dynamic_intervals: + self.dynamic_milestones, self.dynamic_intervals = \ + _calc_dynamic_intervals(self.interval, dynamic_intervals) + + def _decide_interval(self, runner): + if self.use_dynamic_intervals: + progress = runner.epoch if self.by_epoch else runner.iter + step = bisect.bisect(self.dynamic_milestones, (progress + 1)) + # Dynamically modify the evaluation interval + self.interval = self.dynamic_intervals[step - 1] + + def before_train_epoch(self, runner): + """Evaluate the model only at the start of training by epoch.""" + self._decide_interval(runner) + super().before_train_epoch(runner) + + def before_train_iter(self, runner): + self._decide_interval(runner) + super().before_train_iter(runner) + + def _do_evaluate(self, runner): + """perform evaluation and save ckpt.""" + # Synchronization of BatchNorm's buffer (running_mean + # and running_var) is not supported in the DDP of pytorch, + # which may cause the inconsistent performance of models in + # different ranks, so we broadcast BatchNorm's buffers + # of rank 0 to other ranks to avoid this. + if self.broadcast_bn_buffer: + model = runner.model + for name, module in model.named_modules(): + if isinstance(module, + _BatchNorm) and module.track_running_stats: + dist.broadcast(module.running_var, 0) + dist.broadcast(module.running_mean, 0) + + if not self._should_evaluate(runner): + return + + tmpdir = self.tmpdir + if tmpdir is None: + tmpdir = osp.join(runner.work_dir, '.eval_hook') + + from mmdet.apis import multi_gpu_test + + # Changed results to self.results so that MMDetWandbHook can access + # the evaluation results and log them to wandb. + results = multi_gpu_test( + runner.model, + self.dataloader, + tmpdir=tmpdir, + gpu_collect=self.gpu_collect) + self.latest_results = results + if runner.rank == 0: + print('\n') + runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) + key_score = self.evaluate(runner, results) + + # the key_score may be `None` so it needs to skip + # the action to save the best checkpoint + if self.save_best and key_score: + self._save_ckpt(runner, key_score) diff --git a/downstream/mmdetection/mmdet/core/evaluation/mean_ap.py b/downstream/mmdetection/mmdet/core/evaluation/mean_ap.py new file mode 100644 index 0000000..a293b80 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/evaluation/mean_ap.py @@ -0,0 +1,782 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from multiprocessing import Pool + +import mmcv +import numpy as np +from mmcv.utils import print_log +from terminaltables import AsciiTable + +from .bbox_overlaps import bbox_overlaps +from .class_names import get_classes + + +def average_precision(recalls, precisions, mode='area'): + """Calculate average precision (for single or multiple scales). + + Args: + recalls (ndarray): shape (num_scales, num_dets) or (num_dets, ) + precisions (ndarray): shape (num_scales, num_dets) or (num_dets, ) + mode (str): 'area' or '11points', 'area' means calculating the area + under precision-recall curve, '11points' means calculating + the average precision of recalls at [0, 0.1, ..., 1] + + Returns: + float or ndarray: calculated average precision + """ + no_scale = False + if recalls.ndim == 1: + no_scale = True + recalls = recalls[np.newaxis, :] + precisions = precisions[np.newaxis, :] + assert recalls.shape == precisions.shape and recalls.ndim == 2 + num_scales = recalls.shape[0] + ap = np.zeros(num_scales, dtype=np.float32) + if mode == 'area': + zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) + ones = np.ones((num_scales, 1), dtype=recalls.dtype) + mrec = np.hstack((zeros, recalls, ones)) + mpre = np.hstack((zeros, precisions, zeros)) + for i in range(mpre.shape[1] - 1, 0, -1): + mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) + for i in range(num_scales): + ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] + ap[i] = np.sum( + (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) + elif mode == '11points': + for i in range(num_scales): + for thr in np.arange(0, 1 + 1e-3, 0.1): + precs = precisions[i, recalls[i, :] >= thr] + prec = precs.max() if precs.size > 0 else 0 + ap[i] += prec + ap /= 11 + else: + raise ValueError( + 'Unrecognized mode, only "area" and "11points" are supported') + if no_scale: + ap = ap[0] + return ap + + +def tpfp_imagenet(det_bboxes, + gt_bboxes, + gt_bboxes_ignore=None, + default_iou_thr=0.5, + area_ranges=None, + use_legacy_coordinate=False, + **kwargs): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). + gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). + gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, + of shape (k, 4). Default: None + default_iou_thr (float): IoU threshold to be considered as matched for + medium and large bboxes (small ones have special rules). + Default: 0.5. + area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, + in the format [(min1, max1), (min2, max2), ...]. Default: None. + use_legacy_coordinate (bool): Whether to use coordinate system in + mmdet v1.x. which means width, height should be + calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. + Default: False. + + Returns: + tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of + each array is (num_scales, m). + """ + + if not use_legacy_coordinate: + extra_length = 0. + else: + extra_length = 1. + + # an indicator of ignored gts + gt_ignore_inds = np.concatenate( + (np.zeros(gt_bboxes.shape[0], dtype=np.bool), + np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) + # stack gt_bboxes and gt_bboxes_ignore for convenience + gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) + + num_dets = det_bboxes.shape[0] + num_gts = gt_bboxes.shape[0] + if area_ranges is None: + area_ranges = [(None, None)] + num_scales = len(area_ranges) + # tp and fp are of shape (num_scales, num_gts), each row is tp or fp + # of a certain scale. + tp = np.zeros((num_scales, num_dets), dtype=np.float32) + fp = np.zeros((num_scales, num_dets), dtype=np.float32) + if gt_bboxes.shape[0] == 0: + if area_ranges == [(None, None)]: + fp[...] = 1 + else: + det_areas = ( + det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( + det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) + for i, (min_area, max_area) in enumerate(area_ranges): + fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 + return tp, fp + ious = bbox_overlaps( + det_bboxes, gt_bboxes - 1, use_legacy_coordinate=use_legacy_coordinate) + gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length + gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length + iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)), + default_iou_thr) + # sort all detections by scores in descending order + sort_inds = np.argsort(-det_bboxes[:, -1]) + for k, (min_area, max_area) in enumerate(area_ranges): + gt_covered = np.zeros(num_gts, dtype=bool) + # if no area range is specified, gt_area_ignore is all False + if min_area is None: + gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) + else: + gt_areas = gt_w * gt_h + gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) + for i in sort_inds: + max_iou = -1 + matched_gt = -1 + # find best overlapped available gt + for j in range(num_gts): + # different from PASCAL VOC: allow finding other gts if the + # best overlapped ones are already matched by other det bboxes + if gt_covered[j]: + continue + elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou: + max_iou = ious[i, j] + matched_gt = j + # there are 4 cases for a det bbox: + # 1. it matches a gt, tp = 1, fp = 0 + # 2. it matches an ignored gt, tp = 0, fp = 0 + # 3. it matches no gt and within area range, tp = 0, fp = 1 + # 4. it matches no gt but is beyond area range, tp = 0, fp = 0 + if matched_gt >= 0: + gt_covered[matched_gt] = 1 + if not (gt_ignore_inds[matched_gt] + or gt_area_ignore[matched_gt]): + tp[k, i] = 1 + elif min_area is None: + fp[k, i] = 1 + else: + bbox = det_bboxes[i, :4] + area = (bbox[2] - bbox[0] + extra_length) * ( + bbox[3] - bbox[1] + extra_length) + if area >= min_area and area < max_area: + fp[k, i] = 1 + return tp, fp + + +def tpfp_default(det_bboxes, + gt_bboxes, + gt_bboxes_ignore=None, + iou_thr=0.5, + area_ranges=None, + use_legacy_coordinate=False, + **kwargs): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). + gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). + gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, + of shape (k, 4). Default: None + iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + area_ranges (list[tuple] | None): Range of bbox areas to be + evaluated, in the format [(min1, max1), (min2, max2), ...]. + Default: None. + use_legacy_coordinate (bool): Whether to use coordinate system in + mmdet v1.x. which means width, height should be + calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. + Default: False. + + Returns: + tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of + each array is (num_scales, m). + """ + + if not use_legacy_coordinate: + extra_length = 0. + else: + extra_length = 1. + + # an indicator of ignored gts + gt_ignore_inds = np.concatenate( + (np.zeros(gt_bboxes.shape[0], dtype=np.bool), + np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) + # stack gt_bboxes and gt_bboxes_ignore for convenience + gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) + + num_dets = det_bboxes.shape[0] + num_gts = gt_bboxes.shape[0] + if area_ranges is None: + area_ranges = [(None, None)] + num_scales = len(area_ranges) + # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of + # a certain scale + tp = np.zeros((num_scales, num_dets), dtype=np.float32) + fp = np.zeros((num_scales, num_dets), dtype=np.float32) + + # if there is no gt bboxes in this image, then all det bboxes + # within area range are false positives + if gt_bboxes.shape[0] == 0: + if area_ranges == [(None, None)]: + fp[...] = 1 + else: + det_areas = ( + det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( + det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) + for i, (min_area, max_area) in enumerate(area_ranges): + fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 + return tp, fp + + ious = bbox_overlaps( + det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate) + # for each det, the max iou with all gts + ious_max = ious.max(axis=1) + # for each det, which gt overlaps most with it + ious_argmax = ious.argmax(axis=1) + # sort all dets in descending order by scores + sort_inds = np.argsort(-det_bboxes[:, -1]) + for k, (min_area, max_area) in enumerate(area_ranges): + gt_covered = np.zeros(num_gts, dtype=bool) + # if no area range is specified, gt_area_ignore is all False + if min_area is None: + gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) + else: + gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * ( + gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length) + gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) + for i in sort_inds: + if ious_max[i] >= iou_thr: + matched_gt = ious_argmax[i] + if not (gt_ignore_inds[matched_gt] + or gt_area_ignore[matched_gt]): + if not gt_covered[matched_gt]: + gt_covered[matched_gt] = True + tp[k, i] = 1 + else: + fp[k, i] = 1 + # otherwise ignore this detected bbox, tp = 0, fp = 0 + elif min_area is None: + fp[k, i] = 1 + else: + bbox = det_bboxes[i, :4] + area = (bbox[2] - bbox[0] + extra_length) * ( + bbox[3] - bbox[1] + extra_length) + if area >= min_area and area < max_area: + fp[k, i] = 1 + return tp, fp + + +def tpfp_openimages(det_bboxes, + gt_bboxes, + gt_bboxes_ignore=None, + iou_thr=0.5, + area_ranges=None, + use_legacy_coordinate=False, + gt_bboxes_group_of=None, + use_group_of=True, + ioa_thr=0.5, + **kwargs): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). + gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). + gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, + of shape (k, 4). Default: None + iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + area_ranges (list[tuple] | None): Range of bbox areas to be + evaluated, in the format [(min1, max1), (min2, max2), ...]. + Default: None. + use_legacy_coordinate (bool): Whether to use coordinate system in + mmdet v1.x. which means width, height should be + calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. + Default: False. + gt_bboxes_group_of (ndarray): GT group_of of this image, of shape + (k, 1). Default: None + use_group_of (bool): Whether to use group of when calculate TP and FP, + which only used in OpenImages evaluation. Default: True. + ioa_thr (float | None): IoA threshold to be considered as matched, + which only used in OpenImages evaluation. Default: 0.5. + + Returns: + tuple[np.ndarray]: Returns a tuple (tp, fp, det_bboxes), where + (tp, fp) whose elements are 0 and 1. The shape of each array is + (num_scales, m). (det_bboxes) whose will filter those are not + matched by group of gts when processing Open Images evaluation. + The shape is (num_scales, m). + """ + + if not use_legacy_coordinate: + extra_length = 0. + else: + extra_length = 1. + + # an indicator of ignored gts + gt_ignore_inds = np.concatenate( + (np.zeros(gt_bboxes.shape[0], dtype=np.bool), + np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) + # stack gt_bboxes and gt_bboxes_ignore for convenience + gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) + + num_dets = det_bboxes.shape[0] + num_gts = gt_bboxes.shape[0] + if area_ranges is None: + area_ranges = [(None, None)] + num_scales = len(area_ranges) + # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of + # a certain scale + tp = np.zeros((num_scales, num_dets), dtype=np.float32) + fp = np.zeros((num_scales, num_dets), dtype=np.float32) + + # if there is no gt bboxes in this image, then all det bboxes + # within area range are false positives + if gt_bboxes.shape[0] == 0: + if area_ranges == [(None, None)]: + fp[...] = 1 + else: + det_areas = ( + det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( + det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) + for i, (min_area, max_area) in enumerate(area_ranges): + fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 + return tp, fp, det_bboxes + + if gt_bboxes_group_of is not None and use_group_of: + # if handle group-of boxes, divided gt boxes into two parts: + # non-group-of and group-of.Then calculate ious and ioas through + # non-group-of group-of gts respectively. This only used in + # OpenImages evaluation. + assert gt_bboxes_group_of.shape[0] == gt_bboxes.shape[0] + non_group_gt_bboxes = gt_bboxes[~gt_bboxes_group_of] + group_gt_bboxes = gt_bboxes[gt_bboxes_group_of] + num_gts_group = group_gt_bboxes.shape[0] + ious = bbox_overlaps(det_bboxes, non_group_gt_bboxes) + ioas = bbox_overlaps(det_bboxes, group_gt_bboxes, mode='iof') + else: + # if not consider group-of boxes, only calculate ious through gt boxes + ious = bbox_overlaps( + det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate) + ioas = None + + if ious.shape[1] > 0: + # for each det, the max iou with all gts + ious_max = ious.max(axis=1) + # for each det, which gt overlaps most with it + ious_argmax = ious.argmax(axis=1) + # sort all dets in descending order by scores + sort_inds = np.argsort(-det_bboxes[:, -1]) + for k, (min_area, max_area) in enumerate(area_ranges): + gt_covered = np.zeros(num_gts, dtype=bool) + # if no area range is specified, gt_area_ignore is all False + if min_area is None: + gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) + else: + gt_areas = ( + gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * ( + gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length) + gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) + for i in sort_inds: + if ious_max[i] >= iou_thr: + matched_gt = ious_argmax[i] + if not (gt_ignore_inds[matched_gt] + or gt_area_ignore[matched_gt]): + if not gt_covered[matched_gt]: + gt_covered[matched_gt] = True + tp[k, i] = 1 + else: + fp[k, i] = 1 + # otherwise ignore this detected bbox, tp = 0, fp = 0 + elif min_area is None: + fp[k, i] = 1 + else: + bbox = det_bboxes[i, :4] + area = (bbox[2] - bbox[0] + extra_length) * ( + bbox[3] - bbox[1] + extra_length) + if area >= min_area and area < max_area: + fp[k, i] = 1 + else: + # if there is no no-group-of gt bboxes in this image, + # then all det bboxes within area range are false positives. + # Only used in OpenImages evaluation. + if area_ranges == [(None, None)]: + fp[...] = 1 + else: + det_areas = ( + det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( + det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) + for i, (min_area, max_area) in enumerate(area_ranges): + fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 + + if ioas is None or ioas.shape[1] <= 0: + return tp, fp, det_bboxes + else: + # The evaluation of group-of TP and FP are done in two stages: + # 1. All detections are first matched to non group-of boxes; true + # positives are determined. + # 2. Detections that are determined as false positives are matched + # against group-of boxes and calculated group-of TP and FP. + # Only used in OpenImages evaluation. + det_bboxes_group = np.zeros( + (num_scales, ioas.shape[1], det_bboxes.shape[1]), dtype=float) + match_group_of = np.zeros((num_scales, num_dets), dtype=bool) + tp_group = np.zeros((num_scales, num_gts_group), dtype=np.float32) + ioas_max = ioas.max(axis=1) + # for each det, which gt overlaps most with it + ioas_argmax = ioas.argmax(axis=1) + # sort all dets in descending order by scores + sort_inds = np.argsort(-det_bboxes[:, -1]) + for k, (min_area, max_area) in enumerate(area_ranges): + box_is_covered = tp[k] + # if no area range is specified, gt_area_ignore is all False + if min_area is None: + gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) + else: + gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( + gt_bboxes[:, 3] - gt_bboxes[:, 1]) + gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) + for i in sort_inds: + matched_gt = ioas_argmax[i] + if not box_is_covered[i]: + if ioas_max[i] >= ioa_thr: + if not (gt_ignore_inds[matched_gt] + or gt_area_ignore[matched_gt]): + if not tp_group[k, matched_gt]: + tp_group[k, matched_gt] = 1 + match_group_of[k, i] = True + else: + match_group_of[k, i] = True + + if det_bboxes_group[k, matched_gt, -1] < \ + det_bboxes[i, -1]: + det_bboxes_group[k, matched_gt] = \ + det_bboxes[i] + + fp_group = (tp_group <= 0).astype(float) + tps = [] + fps = [] + # concatenate tp, fp, and det-boxes which not matched group of + # gt boxes and tp_group, fp_group, and det_bboxes_group which + # matched group of boxes respectively. + for i in range(num_scales): + tps.append( + np.concatenate((tp[i][~match_group_of[i]], tp_group[i]))) + fps.append( + np.concatenate((fp[i][~match_group_of[i]], fp_group[i]))) + det_bboxes = np.concatenate( + (det_bboxes[~match_group_of[i]], det_bboxes_group[i])) + + tp = np.vstack(tps) + fp = np.vstack(fps) + return tp, fp, det_bboxes + + +def get_cls_results(det_results, annotations, class_id): + """Get det results and gt information of a certain class. + + Args: + det_results (list[list]): Same as `eval_map()`. + annotations (list[dict]): Same as `eval_map()`. + class_id (int): ID of a specific class. + + Returns: + tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes + """ + cls_dets = [img_res[class_id] for img_res in det_results] + cls_gts = [] + cls_gts_ignore = [] + for ann in annotations: + gt_inds = ann['labels'] == class_id + cls_gts.append(ann['bboxes'][gt_inds, :]) + + if ann.get('labels_ignore', None) is not None: + ignore_inds = ann['labels_ignore'] == class_id + cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :]) + else: + cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32)) + + return cls_dets, cls_gts, cls_gts_ignore + + +def get_cls_group_ofs(annotations, class_id): + """Get `gt_group_of` of a certain class, which is used in Open Images. + + Args: + annotations (list[dict]): Same as `eval_map()`. + class_id (int): ID of a specific class. + + Returns: + list[np.ndarray]: `gt_group_of` of a certain class. + """ + gt_group_ofs = [] + for ann in annotations: + gt_inds = ann['labels'] == class_id + if ann.get('gt_is_group_ofs', None) is not None: + gt_group_ofs.append(ann['gt_is_group_ofs'][gt_inds]) + else: + gt_group_ofs.append(np.empty((0, 1), dtype=np.bool)) + + return gt_group_ofs + + +def eval_map(det_results, + annotations, + scale_ranges=None, + iou_thr=0.5, + ioa_thr=None, + dataset=None, + logger=None, + tpfp_fn=None, + nproc=4, + use_legacy_coordinate=False, + use_group_of=False): + """Evaluate mAP of a dataset. + + Args: + det_results (list[list]): [[cls1_det, cls2_det, ...], ...]. + The outer list indicates images, and the inner list indicates + per-class detected bboxes. + annotations (list[dict]): Ground truth annotations where each item of + the list indicates an image. Keys of annotations are: + + - `bboxes`: numpy array of shape (n, 4) + - `labels`: numpy array of shape (n, ) + - `bboxes_ignore` (optional): numpy array of shape (k, 4) + - `labels_ignore` (optional): numpy array of shape (k, ) + scale_ranges (list[tuple] | None): Range of scales to be evaluated, + in the format [(min1, max1), (min2, max2), ...]. A range of + (32, 64) means the area range between (32**2, 64**2). + Default: None. + iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + ioa_thr (float | None): IoA threshold to be considered as matched, + which only used in OpenImages evaluation. Default: None. + dataset (list[str] | str | None): Dataset name or dataset classes, + there are minor differences in metrics for different datasets, e.g. + "voc07", "imagenet_det", etc. Default: None. + logger (logging.Logger | str | None): The way to print the mAP + summary. See `mmcv.utils.print_log()` for details. Default: None. + tpfp_fn (callable | None): The function used to determine true/ + false positives. If None, :func:`tpfp_default` is used as default + unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this + case). If it is given as a function, then this function is used + to evaluate tp & fp. Default None. + nproc (int): Processes used for computing TP and FP. + Default: 4. + use_legacy_coordinate (bool): Whether to use coordinate system in + mmdet v1.x. which means width, height should be + calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. + Default: False. + use_group_of (bool): Whether to use group of when calculate TP and FP, + which only used in OpenImages evaluation. Default: False. + + Returns: + tuple: (mAP, [dict, dict, ...]) + """ + assert len(det_results) == len(annotations) + if not use_legacy_coordinate: + extra_length = 0. + else: + extra_length = 1. + + num_imgs = len(det_results) + num_scales = len(scale_ranges) if scale_ranges is not None else 1 + num_classes = len(det_results[0]) # positive class num + area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges] + if scale_ranges is not None else None) + + # There is no need to use multi processes to process + # when num_imgs = 1 . + if num_imgs > 1: + assert nproc > 0, 'nproc must be at least one.' + nproc = min(nproc, num_imgs) + pool = Pool(nproc) + + eval_results = [] + for i in range(num_classes): + # get gt and det bboxes of this class + cls_dets, cls_gts, cls_gts_ignore = get_cls_results( + det_results, annotations, i) + # choose proper function according to datasets to compute tp and fp + if tpfp_fn is None: + if dataset in ['det', 'vid']: + tpfp_fn = tpfp_imagenet + elif dataset in ['oid_challenge', 'oid_v6'] \ + or use_group_of is True: + tpfp_fn = tpfp_openimages + else: + tpfp_fn = tpfp_default + if not callable(tpfp_fn): + raise ValueError( + f'tpfp_fn has to be a function or None, but got {tpfp_fn}') + + if num_imgs > 1: + # compute tp and fp for each image with multiple processes + args = [] + if use_group_of: + # used in Open Images Dataset evaluation + gt_group_ofs = get_cls_group_ofs(annotations, i) + args.append(gt_group_ofs) + args.append([use_group_of for _ in range(num_imgs)]) + if ioa_thr is not None: + args.append([ioa_thr for _ in range(num_imgs)]) + + tpfp = pool.starmap( + tpfp_fn, + zip(cls_dets, cls_gts, cls_gts_ignore, + [iou_thr for _ in range(num_imgs)], + [area_ranges for _ in range(num_imgs)], + [use_legacy_coordinate for _ in range(num_imgs)], *args)) + else: + tpfp = tpfp_fn( + cls_dets[0], + cls_gts[0], + cls_gts_ignore[0], + iou_thr, + area_ranges, + use_legacy_coordinate, + gt_bboxes_group_of=(get_cls_group_ofs(annotations, i)[0] + if use_group_of else None), + use_group_of=use_group_of, + ioa_thr=ioa_thr) + tpfp = [tpfp] + + if use_group_of: + tp, fp, cls_dets = tuple(zip(*tpfp)) + else: + tp, fp = tuple(zip(*tpfp)) + # calculate gt number of each scale + # ignored gts or gts beyond the specific scale are not counted + num_gts = np.zeros(num_scales, dtype=int) + for j, bbox in enumerate(cls_gts): + if area_ranges is None: + num_gts[0] += bbox.shape[0] + else: + gt_areas = (bbox[:, 2] - bbox[:, 0] + extra_length) * ( + bbox[:, 3] - bbox[:, 1] + extra_length) + for k, (min_area, max_area) in enumerate(area_ranges): + num_gts[k] += np.sum((gt_areas >= min_area) + & (gt_areas < max_area)) + # sort all det bboxes by score, also sort tp and fp + cls_dets = np.vstack(cls_dets) + num_dets = cls_dets.shape[0] + sort_inds = np.argsort(-cls_dets[:, -1]) + tp = np.hstack(tp)[:, sort_inds] + fp = np.hstack(fp)[:, sort_inds] + # calculate recall and precision with tp and fp + tp = np.cumsum(tp, axis=1) + fp = np.cumsum(fp, axis=1) + eps = np.finfo(np.float32).eps + recalls = tp / np.maximum(num_gts[:, np.newaxis], eps) + precisions = tp / np.maximum((tp + fp), eps) + # calculate AP + if scale_ranges is None: + recalls = recalls[0, :] + precisions = precisions[0, :] + num_gts = num_gts.item() + mode = 'area' if dataset != 'voc07' else '11points' + ap = average_precision(recalls, precisions, mode) + eval_results.append({ + 'num_gts': num_gts, + 'num_dets': num_dets, + 'recall': recalls, + 'precision': precisions, + 'ap': ap + }) + + if num_imgs > 1: + pool.close() + + if scale_ranges is not None: + # shape (num_classes, num_scales) + all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results]) + all_num_gts = np.vstack( + [cls_result['num_gts'] for cls_result in eval_results]) + mean_ap = [] + for i in range(num_scales): + if np.any(all_num_gts[:, i] > 0): + mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean()) + else: + mean_ap.append(0.0) + else: + aps = [] + for cls_result in eval_results: + if cls_result['num_gts'] > 0: + aps.append(cls_result['ap']) + mean_ap = np.array(aps).mean().item() if aps else 0.0 + + print_map_summary( + mean_ap, eval_results, dataset, area_ranges, logger=logger) + + return mean_ap, eval_results + + +def print_map_summary(mean_ap, + results, + dataset=None, + scale_ranges=None, + logger=None): + """Print mAP and results of each class. + + A table will be printed to show the gts/dets/recall/AP of each class and + the mAP. + + Args: + mean_ap (float): Calculated from `eval_map()`. + results (list[dict]): Calculated from `eval_map()`. + dataset (list[str] | str | None): Dataset name or dataset classes. + scale_ranges (list[tuple] | None): Range of scales to be evaluated. + logger (logging.Logger | str | None): The way to print the mAP + summary. See `mmcv.utils.print_log()` for details. Default: None. + """ + + if logger == 'silent': + return + + if isinstance(results[0]['ap'], np.ndarray): + num_scales = len(results[0]['ap']) + else: + num_scales = 1 + + if scale_ranges is not None: + assert len(scale_ranges) == num_scales + + num_classes = len(results) + + recalls = np.zeros((num_scales, num_classes), dtype=np.float32) + aps = np.zeros((num_scales, num_classes), dtype=np.float32) + num_gts = np.zeros((num_scales, num_classes), dtype=int) + for i, cls_result in enumerate(results): + if cls_result['recall'].size > 0: + recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] + aps[:, i] = cls_result['ap'] + num_gts[:, i] = cls_result['num_gts'] + + if dataset is None: + label_names = [str(i) for i in range(num_classes)] + elif mmcv.is_str(dataset): + label_names = get_classes(dataset) + else: + label_names = dataset + + if not isinstance(mean_ap, list): + mean_ap = [mean_ap] + + header = ['class', 'gts', 'dets', 'recall', 'ap'] + for i in range(num_scales): + if scale_ranges is not None: + print_log(f'Scale range {scale_ranges[i]}', logger=logger) + table_data = [header] + for j in range(num_classes): + row_data = [ + label_names[j], num_gts[i, j], results[j]['num_dets'], + f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}' + ] + table_data.append(row_data) + table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}']) + table = AsciiTable(table_data) + table.inner_footing_row_border = True + print_log('\n' + table.table, logger=logger) diff --git a/downstream/mmdetection/mmdet/core/evaluation/panoptic_utils.py b/downstream/mmdetection/mmdet/core/evaluation/panoptic_utils.py new file mode 100644 index 0000000..10c9ad9 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/evaluation/panoptic_utils.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# A custom value to distinguish instance ID and category ID; need to +# be greater than the number of categories. +# For a pixel in the panoptic result map: +# pan_id = ins_id * INSTANCE_OFFSET + cat_id +INSTANCE_OFFSET = 1000 diff --git a/downstream/mmdetection/mmdet/core/evaluation/recall.py b/downstream/mmdetection/mmdet/core/evaluation/recall.py new file mode 100644 index 0000000..82b3c90 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/evaluation/recall.py @@ -0,0 +1,197 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections.abc import Sequence + +import numpy as np +from mmcv.utils import print_log +from terminaltables import AsciiTable + +from .bbox_overlaps import bbox_overlaps + + +def _recalls(all_ious, proposal_nums, thrs): + + img_num = all_ious.shape[0] + total_gt_num = sum([ious.shape[0] for ious in all_ious]) + + _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32) + for k, proposal_num in enumerate(proposal_nums): + tmp_ious = np.zeros(0) + for i in range(img_num): + ious = all_ious[i][:, :proposal_num].copy() + gt_ious = np.zeros((ious.shape[0])) + if ious.size == 0: + tmp_ious = np.hstack((tmp_ious, gt_ious)) + continue + for j in range(ious.shape[0]): + gt_max_overlaps = ious.argmax(axis=1) + max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps] + gt_idx = max_ious.argmax() + gt_ious[j] = max_ious[gt_idx] + box_idx = gt_max_overlaps[gt_idx] + ious[gt_idx, :] = -1 + ious[:, box_idx] = -1 + tmp_ious = np.hstack((tmp_ious, gt_ious)) + _ious[k, :] = tmp_ious + + _ious = np.fliplr(np.sort(_ious, axis=1)) + recalls = np.zeros((proposal_nums.size, thrs.size)) + for i, thr in enumerate(thrs): + recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num) + + return recalls + + +def set_recall_param(proposal_nums, iou_thrs): + """Check proposal_nums and iou_thrs and set correct format.""" + if isinstance(proposal_nums, Sequence): + _proposal_nums = np.array(proposal_nums) + elif isinstance(proposal_nums, int): + _proposal_nums = np.array([proposal_nums]) + else: + _proposal_nums = proposal_nums + + if iou_thrs is None: + _iou_thrs = np.array([0.5]) + elif isinstance(iou_thrs, Sequence): + _iou_thrs = np.array(iou_thrs) + elif isinstance(iou_thrs, float): + _iou_thrs = np.array([iou_thrs]) + else: + _iou_thrs = iou_thrs + + return _proposal_nums, _iou_thrs + + +def eval_recalls(gts, + proposals, + proposal_nums=None, + iou_thrs=0.5, + logger=None, + use_legacy_coordinate=False): + """Calculate recalls. + + Args: + gts (list[ndarray]): a list of arrays of shape (n, 4) + proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5) + proposal_nums (int | Sequence[int]): Top N proposals to be evaluated. + iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5. + logger (logging.Logger | str | None): The way to print the recall + summary. See `mmcv.utils.print_log()` for details. Default: None. + use_legacy_coordinate (bool): Whether use coordinate system + in mmdet v1.x. "1" was added to both height and width + which means w, h should be + computed as 'x2 - x1 + 1` and 'y2 - y1 + 1'. Default: False. + + + Returns: + ndarray: recalls of different ious and proposal nums + """ + + img_num = len(gts) + assert img_num == len(proposals) + proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs) + all_ious = [] + for i in range(img_num): + if proposals[i].ndim == 2 and proposals[i].shape[1] == 5: + scores = proposals[i][:, 4] + sort_idx = np.argsort(scores)[::-1] + img_proposal = proposals[i][sort_idx, :] + else: + img_proposal = proposals[i] + prop_num = min(img_proposal.shape[0], proposal_nums[-1]) + if gts[i] is None or gts[i].shape[0] == 0: + ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32) + else: + ious = bbox_overlaps( + gts[i], + img_proposal[:prop_num, :4], + use_legacy_coordinate=use_legacy_coordinate) + all_ious.append(ious) + all_ious = np.array(all_ious) + recalls = _recalls(all_ious, proposal_nums, iou_thrs) + + print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger) + return recalls + + +def print_recall_summary(recalls, + proposal_nums, + iou_thrs, + row_idxs=None, + col_idxs=None, + logger=None): + """Print recalls in a table. + + Args: + recalls (ndarray): calculated from `bbox_recalls` + proposal_nums (ndarray or list): top N proposals + iou_thrs (ndarray or list): iou thresholds + row_idxs (ndarray): which rows(proposal nums) to print + col_idxs (ndarray): which cols(iou thresholds) to print + logger (logging.Logger | str | None): The way to print the recall + summary. See `mmcv.utils.print_log()` for details. Default: None. + """ + proposal_nums = np.array(proposal_nums, dtype=np.int32) + iou_thrs = np.array(iou_thrs) + if row_idxs is None: + row_idxs = np.arange(proposal_nums.size) + if col_idxs is None: + col_idxs = np.arange(iou_thrs.size) + row_header = [''] + iou_thrs[col_idxs].tolist() + table_data = [row_header] + for i, num in enumerate(proposal_nums[row_idxs]): + row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()] + row.insert(0, num) + table_data.append(row) + table = AsciiTable(table_data) + print_log('\n' + table.table, logger=logger) + + +def plot_num_recall(recalls, proposal_nums): + """Plot Proposal_num-Recalls curve. + + Args: + recalls(ndarray or list): shape (k,) + proposal_nums(ndarray or list): same shape as `recalls` + """ + if isinstance(proposal_nums, np.ndarray): + _proposal_nums = proposal_nums.tolist() + else: + _proposal_nums = proposal_nums + if isinstance(recalls, np.ndarray): + _recalls = recalls.tolist() + else: + _recalls = recalls + + import matplotlib.pyplot as plt + f = plt.figure() + plt.plot([0] + _proposal_nums, [0] + _recalls) + plt.xlabel('Proposal num') + plt.ylabel('Recall') + plt.axis([0, proposal_nums.max(), 0, 1]) + f.show() + + +def plot_iou_recall(recalls, iou_thrs): + """Plot IoU-Recalls curve. + + Args: + recalls(ndarray or list): shape (k,) + iou_thrs(ndarray or list): same shape as `recalls` + """ + if isinstance(iou_thrs, np.ndarray): + _iou_thrs = iou_thrs.tolist() + else: + _iou_thrs = iou_thrs + if isinstance(recalls, np.ndarray): + _recalls = recalls.tolist() + else: + _recalls = recalls + + import matplotlib.pyplot as plt + f = plt.figure() + plt.plot(_iou_thrs + [1.0], _recalls + [0.]) + plt.xlabel('IoU') + plt.ylabel('Recall') + plt.axis([iou_thrs.min(), 1, 0, 1]) + f.show() diff --git a/downstream/mmdetection/mmdet/core/export/__init__.py b/downstream/mmdetection/mmdet/core/export/__init__.py new file mode 100644 index 0000000..a8179c9 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/export/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .onnx_helper import (add_dummy_nms_for_onnx, dynamic_clip_for_onnx, + get_k_for_topk) +from .pytorch2onnx import (build_model_from_cfg, + generate_inputs_and_wrap_model, + preprocess_example_input) + +__all__ = [ + 'build_model_from_cfg', 'generate_inputs_and_wrap_model', + 'preprocess_example_input', 'get_k_for_topk', 'add_dummy_nms_for_onnx', + 'dynamic_clip_for_onnx' +] diff --git a/downstream/mmdetection/mmdet/core/export/model_wrappers.py b/downstream/mmdetection/mmdet/core/export/model_wrappers.py new file mode 100644 index 0000000..2f62bb0 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/export/model_wrappers.py @@ -0,0 +1,183 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings + +import numpy as np +import torch + +from mmdet.core import bbox2result +from mmdet.models import BaseDetector + + +class DeployBaseDetector(BaseDetector): + """DeployBaseDetector.""" + + def __init__(self, class_names, device_id): + super(DeployBaseDetector, self).__init__() + self.CLASSES = class_names + self.device_id = device_id + + def simple_test(self, img, img_metas, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def aug_test(self, imgs, img_metas, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def extract_feat(self, imgs): + raise NotImplementedError('This method is not implemented.') + + def forward_train(self, imgs, img_metas, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def val_step(self, data, optimizer): + raise NotImplementedError('This method is not implemented.') + + def train_step(self, data, optimizer): + raise NotImplementedError('This method is not implemented.') + + def forward_test(self, *, img, img_metas, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def async_simple_test(self, img, img_metas, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def forward(self, img, img_metas, return_loss=True, **kwargs): + outputs = self.forward_test(img, img_metas, **kwargs) + batch_dets, batch_labels = outputs[:2] + batch_masks = outputs[2] if len(outputs) == 3 else None + batch_size = img[0].shape[0] + img_metas = img_metas[0] + results = [] + rescale = kwargs.get('rescale', True) + for i in range(batch_size): + dets, labels = batch_dets[i], batch_labels[i] + if rescale: + scale_factor = img_metas[i]['scale_factor'] + + if isinstance(scale_factor, (list, tuple, np.ndarray)): + assert len(scale_factor) == 4 + scale_factor = np.array(scale_factor)[None, :] # [1,4] + dets[:, :4] /= scale_factor + + if 'border' in img_metas[i]: + # offset pixel of the top-left corners between original image + # and padded/enlarged image, 'border' is used when exporting + # CornerNet and CentripetalNet to onnx + x_off = img_metas[i]['border'][2] + y_off = img_metas[i]['border'][0] + dets[:, [0, 2]] -= x_off + dets[:, [1, 3]] -= y_off + dets[:, :4] *= (dets[:, :4] > 0).astype(dets.dtype) + + dets_results = bbox2result(dets, labels, len(self.CLASSES)) + + if batch_masks is not None: + masks = batch_masks[i] + img_h, img_w = img_metas[i]['img_shape'][:2] + ori_h, ori_w = img_metas[i]['ori_shape'][:2] + masks = masks[:, :img_h, :img_w] + if rescale: + masks = masks.astype(np.float32) + masks = torch.from_numpy(masks) + masks = torch.nn.functional.interpolate( + masks.unsqueeze(0), size=(ori_h, ori_w)) + masks = masks.squeeze(0).detach().numpy() + if masks.dtype != np.bool: + masks = masks >= 0.5 + segms_results = [[] for _ in range(len(self.CLASSES))] + for j in range(len(dets)): + segms_results[labels[j]].append(masks[j]) + results.append((dets_results, segms_results)) + else: + results.append(dets_results) + return results + + +class ONNXRuntimeDetector(DeployBaseDetector): + """Wrapper for detector's inference with ONNXRuntime.""" + + def __init__(self, onnx_file, class_names, device_id): + super(ONNXRuntimeDetector, self).__init__(class_names, device_id) + import onnxruntime as ort + + # get the custom op path + ort_custom_op_path = '' + try: + from mmcv.ops import get_onnxruntime_op_path + ort_custom_op_path = get_onnxruntime_op_path() + except (ImportError, ModuleNotFoundError): + warnings.warn('If input model has custom op from mmcv, \ + you may have to build mmcv with ONNXRuntime from source.') + session_options = ort.SessionOptions() + # register custom op for onnxruntime + if osp.exists(ort_custom_op_path): + session_options.register_custom_ops_library(ort_custom_op_path) + sess = ort.InferenceSession(onnx_file, session_options) + providers = ['CPUExecutionProvider'] + options = [{}] + is_cuda_available = ort.get_device() == 'GPU' + if is_cuda_available: + providers.insert(0, 'CUDAExecutionProvider') + options.insert(0, {'device_id': device_id}) + + sess.set_providers(providers, options) + + self.sess = sess + self.io_binding = sess.io_binding() + self.output_names = [_.name for _ in sess.get_outputs()] + self.is_cuda_available = is_cuda_available + + def forward_test(self, imgs, img_metas, **kwargs): + input_data = imgs[0] + # set io binding for inputs/outputs + device_type = 'cuda' if self.is_cuda_available else 'cpu' + if not self.is_cuda_available: + input_data = input_data.cpu() + self.io_binding.bind_input( + name='input', + device_type=device_type, + device_id=self.device_id, + element_type=np.float32, + shape=input_data.shape, + buffer_ptr=input_data.data_ptr()) + + for name in self.output_names: + self.io_binding.bind_output(name) + # run session to get outputs + self.sess.run_with_iobinding(self.io_binding) + ort_outputs = self.io_binding.copy_outputs_to_cpu() + return ort_outputs + + +class TensorRTDetector(DeployBaseDetector): + """Wrapper for detector's inference with TensorRT.""" + + def __init__(self, engine_file, class_names, device_id, output_names=None): + super(TensorRTDetector, self).__init__(class_names, device_id) + warnings.warn('`output_names` is deprecated and will be removed in ' + 'future releases.') + from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin + try: + load_tensorrt_plugin() + except (ImportError, ModuleNotFoundError): + warnings.warn('If input model has custom op from mmcv, \ + you may have to build mmcv with TensorRT from source.') + + output_names = ['dets', 'labels'] + model = TRTWraper(engine_file, ['input'], output_names) + with_masks = False + # if TensorRT has totally 4 inputs/outputs, then + # the detector should have `mask` output. + if len(model.engine) == 4: + model.output_names = output_names + ['masks'] + with_masks = True + self.model = model + self.with_masks = with_masks + + def forward_test(self, imgs, img_metas, **kwargs): + input_data = imgs[0].contiguous() + with torch.cuda.device(self.device_id), torch.no_grad(): + outputs = self.model({'input': input_data}) + outputs = [outputs[name] for name in self.model.output_names] + outputs = [out.detach().cpu().numpy() for out in outputs] + return outputs diff --git a/downstream/mmdetection/mmdet/core/export/onnx_helper.py b/downstream/mmdetection/mmdet/core/export/onnx_helper.py new file mode 100644 index 0000000..9f6b9a0 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/export/onnx_helper.py @@ -0,0 +1,223 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os + +import torch + + +def dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape): + """Clip boxes dynamically for onnx. + + Since torch.clamp cannot have dynamic `min` and `max`, we scale the + boxes by 1/max_shape and clamp in the range [0, 1]. + + Args: + x1 (Tensor): The x1 for bounding boxes. + y1 (Tensor): The y1 for bounding boxes. + x2 (Tensor): The x2 for bounding boxes. + y2 (Tensor): The y2 for bounding boxes. + max_shape (Tensor or torch.Size): The (H,W) of original image. + Returns: + tuple(Tensor): The clipped x1, y1, x2, y2. + """ + assert isinstance( + max_shape, + torch.Tensor), '`max_shape` should be tensor of (h,w) for onnx' + + # scale by 1/max_shape + x1 = x1 / max_shape[1] + y1 = y1 / max_shape[0] + x2 = x2 / max_shape[1] + y2 = y2 / max_shape[0] + + # clamp [0, 1] + x1 = torch.clamp(x1, 0, 1) + y1 = torch.clamp(y1, 0, 1) + x2 = torch.clamp(x2, 0, 1) + y2 = torch.clamp(y2, 0, 1) + + # scale back + x1 = x1 * max_shape[1] + y1 = y1 * max_shape[0] + x2 = x2 * max_shape[1] + y2 = y2 * max_shape[0] + return x1, y1, x2, y2 + + +def get_k_for_topk(k, size): + """Get k of TopK for onnx exporting. + + The K of TopK in TensorRT should not be a Tensor, while in ONNX Runtime + it could be a Tensor.Due to dynamic shape feature, we have to decide + whether to do TopK and what K it should be while exporting to ONNX. + If returned K is less than zero, it means we do not have to do + TopK operation. + + Args: + k (int or Tensor): The set k value for nms from config file. + size (Tensor or torch.Size): The number of elements of \ + TopK's input tensor + Returns: + tuple: (int or Tensor): The final K for TopK. + """ + ret_k = -1 + if k <= 0 or size <= 0: + return ret_k + if torch.onnx.is_in_onnx_export(): + is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT' + if is_trt_backend: + # TensorRT does not support dynamic K with TopK op + if 0 < k < size: + ret_k = k + else: + # Always keep topk op for dynamic input in onnx for ONNX Runtime + ret_k = torch.where(k < size, k, size) + elif k < size: + ret_k = k + else: + # ret_k is -1 + pass + return ret_k + + +def add_dummy_nms_for_onnx(boxes, + scores, + max_output_boxes_per_class=1000, + iou_threshold=0.5, + score_threshold=0.05, + pre_top_k=-1, + after_top_k=-1, + labels=None): + """Create a dummy onnx::NonMaxSuppression op while exporting to ONNX. + + This function helps exporting to onnx with batch and multiclass NMS op. + It only supports class-agnostic detection results. That is, the scores + is of shape (N, num_bboxes, num_classes) and the boxes is of shape + (N, num_boxes, 4). + + Args: + boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4] + scores (Tensor): The detection scores of shape + [N, num_boxes, num_classes] + max_output_boxes_per_class (int): Maximum number of output + boxes per class of nms. Defaults to 1000. + iou_threshold (float): IOU threshold of nms. Defaults to 0.5 + score_threshold (float): score threshold of nms. + Defaults to 0.05. + pre_top_k (bool): Number of top K boxes to keep before nms. + Defaults to -1. + after_top_k (int): Number of top K boxes to keep after nms. + Defaults to -1. + labels (Tensor, optional): It not None, explicit labels would be used. + Otherwise, labels would be automatically generated using + num_classed. Defaults to None. + + Returns: + tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] + and class labels of shape [N, num_det]. + """ + max_output_boxes_per_class = torch.LongTensor([max_output_boxes_per_class]) + iou_threshold = torch.tensor([iou_threshold], dtype=torch.float32) + score_threshold = torch.tensor([score_threshold], dtype=torch.float32) + batch_size = scores.shape[0] + num_class = scores.shape[2] + + nms_pre = torch.tensor(pre_top_k, device=scores.device, dtype=torch.long) + nms_pre = get_k_for_topk(nms_pre, boxes.shape[1]) + + if nms_pre > 0: + max_scores, _ = scores.max(-1) + _, topk_inds = max_scores.topk(nms_pre) + batch_inds = torch.arange(batch_size).view( + -1, 1).expand_as(topk_inds).long() + # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 + transformed_inds = boxes.shape[1] * batch_inds + topk_inds + boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape( + batch_size, -1, 4) + scores = scores.reshape(-1, num_class)[transformed_inds, :].reshape( + batch_size, -1, num_class) + if labels is not None: + labels = labels.reshape(-1, 1)[transformed_inds].reshape( + batch_size, -1) + + scores = scores.permute(0, 2, 1) + num_box = boxes.shape[1] + # turn off tracing to create a dummy output of nms + state = torch._C._get_tracing_state() + # dummy indices of nms's output + num_fake_det = 2 + batch_inds = torch.randint(batch_size, (num_fake_det, 1)) + cls_inds = torch.randint(num_class, (num_fake_det, 1)) + box_inds = torch.randint(num_box, (num_fake_det, 1)) + indices = torch.cat([batch_inds, cls_inds, box_inds], dim=1) + output = indices + setattr(DummyONNXNMSop, 'output', output) + + # open tracing + torch._C._set_tracing_state(state) + selected_indices = DummyONNXNMSop.apply(boxes, scores, + max_output_boxes_per_class, + iou_threshold, score_threshold) + + batch_inds, cls_inds = selected_indices[:, 0], selected_indices[:, 1] + box_inds = selected_indices[:, 2] + if labels is None: + labels = torch.arange(num_class, dtype=torch.long).to(scores.device) + labels = labels.view(1, num_class, 1).expand_as(scores) + scores = scores.reshape(-1, 1) + boxes = boxes.reshape(batch_size, -1).repeat(1, num_class).reshape(-1, 4) + pos_inds = (num_class * batch_inds + cls_inds) * num_box + box_inds + mask = scores.new_zeros(scores.shape) + # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 + # PyTorch style code: mask[batch_inds, box_inds] += 1 + mask[pos_inds, :] += 1 + scores = scores * mask + boxes = boxes * mask + + scores = scores.reshape(batch_size, -1) + boxes = boxes.reshape(batch_size, -1, 4) + labels = labels.reshape(batch_size, -1) + + nms_after = torch.tensor( + after_top_k, device=scores.device, dtype=torch.long) + nms_after = get_k_for_topk(nms_after, num_box * num_class) + + if nms_after > 0: + _, topk_inds = scores.topk(nms_after) + batch_inds = torch.arange(batch_size).view(-1, 1).expand_as(topk_inds) + # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 + transformed_inds = scores.shape[1] * batch_inds + topk_inds + scores = scores.reshape(-1, 1)[transformed_inds, :].reshape( + batch_size, -1) + boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape( + batch_size, -1, 4) + labels = labels.reshape(-1, 1)[transformed_inds, :].reshape( + batch_size, -1) + + scores = scores.unsqueeze(2) + dets = torch.cat([boxes, scores], dim=2) + return dets, labels + + +class DummyONNXNMSop(torch.autograd.Function): + """DummyONNXNMSop. + + This class is only for creating onnx::NonMaxSuppression. + """ + + @staticmethod + def forward(ctx, boxes, scores, max_output_boxes_per_class, iou_threshold, + score_threshold): + + return DummyONNXNMSop.output + + @staticmethod + def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, + score_threshold): + return g.op( + 'NonMaxSuppression', + boxes, + scores, + max_output_boxes_per_class, + iou_threshold, + score_threshold, + outputs=1) diff --git a/downstream/mmdetection/mmdet/core/export/pytorch2onnx.py b/downstream/mmdetection/mmdet/core/export/pytorch2onnx.py new file mode 100644 index 0000000..b8261ee --- /dev/null +++ b/downstream/mmdetection/mmdet/core/export/pytorch2onnx.py @@ -0,0 +1,159 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial + +import mmcv +import numpy as np +import torch +from mmcv.runner import load_checkpoint + + +def generate_inputs_and_wrap_model(config_path, + checkpoint_path, + input_config, + cfg_options=None): + """Prepare sample input and wrap model for ONNX export. + + The ONNX export API only accept args, and all inputs should be + torch.Tensor or corresponding types (such as tuple of tensor). + So we should call this function before exporting. This function will: + + 1. generate corresponding inputs which are used to execute the model. + 2. Wrap the model's forward function. + + For example, the MMDet models' forward function has a parameter + ``return_loss:bool``. As we want to set it as False while export API + supports neither bool type or kwargs. So we have to replace the forward + method like ``model.forward = partial(model.forward, return_loss=False)``. + + Args: + config_path (str): the OpenMMLab config for the model we want to + export to ONNX + checkpoint_path (str): Path to the corresponding checkpoint + input_config (dict): the exactly data in this dict depends on the + framework. For MMSeg, we can just declare the input shape, + and generate the dummy data accordingly. However, for MMDet, + we may pass the real img path, or the NMS will return None + as there is no legal bbox. + + Returns: + tuple: (model, tensor_data) wrapped model which can be called by + ``model(*tensor_data)`` and a list of inputs which are used to + execute the model while exporting. + """ + + model = build_model_from_cfg( + config_path, checkpoint_path, cfg_options=cfg_options) + one_img, one_meta = preprocess_example_input(input_config) + tensor_data = [one_img] + model.forward = partial( + model.forward, img_metas=[[one_meta]], return_loss=False) + + # pytorch has some bug in pytorch1.3, we have to fix it + # by replacing these existing op + opset_version = 11 + # put the import within the function thus it will not cause import error + # when not using this function + try: + from mmcv.onnx.symbolic import register_extra_symbolics + except ModuleNotFoundError: + raise NotImplementedError('please update mmcv to version>=v1.0.4') + register_extra_symbolics(opset_version) + + return model, tensor_data + + +def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None): + """Build a model from config and load the given checkpoint. + + Args: + config_path (str): the OpenMMLab config for the model we want to + export to ONNX + checkpoint_path (str): Path to the corresponding checkpoint + + Returns: + torch.nn.Module: the built model + """ + from mmdet.models import build_detector + + cfg = mmcv.Config.fromfile(config_path) + if cfg_options is not None: + cfg.merge_from_dict(cfg_options) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + cfg.model.pretrained = None + cfg.data.test.test_mode = True + + # build the model + cfg.model.train_cfg = None + model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) + checkpoint = load_checkpoint(model, checkpoint_path, map_location='cpu') + if 'CLASSES' in checkpoint.get('meta', {}): + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + from mmdet.datasets import DATASETS + dataset = DATASETS.get(cfg.data.test['type']) + assert (dataset is not None) + model.CLASSES = dataset.CLASSES + model.cpu().eval() + return model + + +def preprocess_example_input(input_config): + """Prepare an example input image for ``generate_inputs_and_wrap_model``. + + Args: + input_config (dict): customized config describing the example input. + + Returns: + tuple: (one_img, one_meta), tensor of the example input image and \ + meta information for the example input image. + + Examples: + >>> from mmdet.core.export import preprocess_example_input + >>> input_config = { + >>> 'input_shape': (1,3,224,224), + >>> 'input_path': 'demo/demo.jpg', + >>> 'normalize_cfg': { + >>> 'mean': (123.675, 116.28, 103.53), + >>> 'std': (58.395, 57.12, 57.375) + >>> } + >>> } + >>> one_img, one_meta = preprocess_example_input(input_config) + >>> print(one_img.shape) + torch.Size([1, 3, 224, 224]) + >>> print(one_meta) + {'img_shape': (224, 224, 3), + 'ori_shape': (224, 224, 3), + 'pad_shape': (224, 224, 3), + 'filename': '.png', + 'scale_factor': 1.0, + 'flip': False} + """ + input_path = input_config['input_path'] + input_shape = input_config['input_shape'] + one_img = mmcv.imread(input_path) + one_img = mmcv.imresize(one_img, input_shape[2:][::-1]) + show_img = one_img.copy() + if 'normalize_cfg' in input_config.keys(): + normalize_cfg = input_config['normalize_cfg'] + mean = np.array(normalize_cfg['mean'], dtype=np.float32) + std = np.array(normalize_cfg['std'], dtype=np.float32) + to_rgb = normalize_cfg.get('to_rgb', True) + one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb) + one_img = one_img.transpose(2, 0, 1) + one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_( + True) + (_, C, H, W) = input_shape + one_meta = { + 'img_shape': (H, W, C), + 'ori_shape': (H, W, C), + 'pad_shape': (H, W, C), + 'filename': '.png', + 'scale_factor': np.ones(4, dtype=np.float32), + 'flip': False, + 'show_img': show_img, + 'flip_direction': None + } + + return one_img, one_meta diff --git a/downstream/mmdetection/mmdet/core/hook/__init__.py b/downstream/mmdetection/mmdet/core/hook/__init__.py new file mode 100644 index 0000000..7b9ac9f --- /dev/null +++ b/downstream/mmdetection/mmdet/core/hook/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .checkloss_hook import CheckInvalidLossHook +from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook +from .memory_profiler_hook import MemoryProfilerHook +from .set_epoch_info_hook import SetEpochInfoHook +from .sync_norm_hook import SyncNormHook +from .sync_random_size_hook import SyncRandomSizeHook +from .wandblogger_hook import MMDetWandbHook +from .yolox_lrupdater_hook import YOLOXLrUpdaterHook +from .yolox_mode_switch_hook import YOLOXModeSwitchHook + +__all__ = [ + 'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook', + 'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook', + 'CheckInvalidLossHook', 'SetEpochInfoHook', 'MemoryProfilerHook', + 'MMDetWandbHook' +] diff --git a/downstream/mmdetection/mmdet/core/hook/checkloss_hook.py b/downstream/mmdetection/mmdet/core/hook/checkloss_hook.py new file mode 100644 index 0000000..754e61b --- /dev/null +++ b/downstream/mmdetection/mmdet/core/hook/checkloss_hook.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.runner.hooks import HOOKS, Hook + + +@HOOKS.register_module() +class CheckInvalidLossHook(Hook): + """Check invalid loss hook. + + This hook will regularly check whether the loss is valid + during training. + + Args: + interval (int): Checking interval (every k iterations). + Default: 50. + """ + + def __init__(self, interval=50): + self.interval = interval + + def after_train_iter(self, runner): + if self.every_n_iters(runner, self.interval): + assert torch.isfinite(runner.outputs['loss']), \ + runner.logger.info('loss become infinite or NaN!') diff --git a/downstream/mmdetection/mmdet/core/hook/ema.py b/downstream/mmdetection/mmdet/core/hook/ema.py new file mode 100644 index 0000000..ff7bfba --- /dev/null +++ b/downstream/mmdetection/mmdet/core/hook/ema.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +from mmcv.parallel import is_module_wrapper +from mmcv.runner.hooks import HOOKS, Hook + + +class BaseEMAHook(Hook): + """Exponential Moving Average Hook. + + Use Exponential Moving Average on all parameters of model in training + process. All parameters have a ema backup, which update by the formula + as below. EMAHook takes priority over EvalHook and CheckpointHook. Note, + the original model parameters are actually saved in ema field after train. + + Args: + momentum (float): The momentum used for updating ema parameter. + Ema's parameter are updated with the formula: + `ema_param = (1-momentum) * ema_param + momentum * cur_param`. + Defaults to 0.0002. + skip_buffers (bool): Whether to skip the model buffers, such as + batchnorm running stats (running_mean, running_var), it does not + perform the ema operation. Default to False. + interval (int): Update ema parameter every interval iteration. + Defaults to 1. + resume_from (str, optional): The checkpoint path. Defaults to None. + momentum_fun (func, optional): The function to change momentum + during early iteration (also warmup) to help early training. + It uses `momentum` as a constant. Defaults to None. + """ + + def __init__(self, + momentum=0.0002, + interval=1, + skip_buffers=False, + resume_from=None, + momentum_fun=None): + assert 0 < momentum < 1 + self.momentum = momentum + self.skip_buffers = skip_buffers + self.interval = interval + self.checkpoint = resume_from + self.momentum_fun = momentum_fun + + def before_run(self, runner): + """To resume model with it's ema parameters more friendly. + + Register ema parameter as ``named_buffer`` to model. + """ + model = runner.model + if is_module_wrapper(model): + model = model.module + self.param_ema_buffer = {} + if self.skip_buffers: + self.model_parameters = dict(model.named_parameters()) + else: + self.model_parameters = model.state_dict() + for name, value in self.model_parameters.items(): + # "." is not allowed in module's buffer name + buffer_name = f"ema_{name.replace('.', '_')}" + self.param_ema_buffer[name] = buffer_name + model.register_buffer(buffer_name, value.data.clone()) + self.model_buffers = dict(model.named_buffers()) + if self.checkpoint is not None: + runner.resume(self.checkpoint) + + def get_momentum(self, runner): + return self.momentum_fun(runner.iter) if self.momentum_fun else \ + self.momentum + + def after_train_iter(self, runner): + """Update ema parameter every self.interval iterations.""" + if (runner.iter + 1) % self.interval != 0: + return + momentum = self.get_momentum(runner) + for name, parameter in self.model_parameters.items(): + # exclude num_tracking + if parameter.dtype.is_floating_point: + buffer_name = self.param_ema_buffer[name] + buffer_parameter = self.model_buffers[buffer_name] + buffer_parameter.mul_(1 - momentum).add_( + parameter.data, alpha=momentum) + + def after_train_epoch(self, runner): + """We load parameter values from ema backup to model before the + EvalHook.""" + self._swap_ema_parameters() + + def before_train_epoch(self, runner): + """We recover model's parameter from ema backup after last epoch's + EvalHook.""" + self._swap_ema_parameters() + + def _swap_ema_parameters(self): + """Swap the parameter of model with parameter in ema_buffer.""" + for name, value in self.model_parameters.items(): + temp = value.data.clone() + ema_buffer = self.model_buffers[self.param_ema_buffer[name]] + value.data.copy_(ema_buffer.data) + ema_buffer.data.copy_(temp) + + +@HOOKS.register_module() +class ExpMomentumEMAHook(BaseEMAHook): + """EMAHook using exponential momentum strategy. + + Args: + total_iter (int): The total number of iterations of EMA momentum. + Defaults to 2000. + """ + + def __init__(self, total_iter=2000, **kwargs): + super(ExpMomentumEMAHook, self).__init__(**kwargs) + self.momentum_fun = lambda x: (1 - self.momentum) * math.exp(-( + 1 + x) / total_iter) + self.momentum + + +@HOOKS.register_module() +class LinearMomentumEMAHook(BaseEMAHook): + """EMAHook using linear momentum strategy. + + Args: + warm_up (int): During first warm_up steps, we may use smaller decay + to update ema parameters more slowly. Defaults to 100. + """ + + def __init__(self, warm_up=100, **kwargs): + super(LinearMomentumEMAHook, self).__init__(**kwargs) + self.momentum_fun = lambda x: min(self.momentum**self.interval, + (1 + x) / (warm_up + x)) diff --git a/downstream/mmdetection/mmdet/core/hook/memory_profiler_hook.py b/downstream/mmdetection/mmdet/core/hook/memory_profiler_hook.py new file mode 100644 index 0000000..a473061 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/hook/memory_profiler_hook.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.runner.hooks import HOOKS, Hook + + +@HOOKS.register_module() +class MemoryProfilerHook(Hook): + """Memory profiler hook recording memory information including virtual + memory, swap memory, and the memory of the current process. + + Args: + interval (int): Checking interval (every k iterations). + Default: 50. + """ + + def __init__(self, interval=50): + try: + from psutil import swap_memory, virtual_memory + self._swap_memory = swap_memory + self._virtual_memory = virtual_memory + except ImportError: + raise ImportError('psutil is not installed, please install it by: ' + 'pip install psutil') + + try: + from memory_profiler import memory_usage + self._memory_usage = memory_usage + except ImportError: + raise ImportError( + 'memory_profiler is not installed, please install it by: ' + 'pip install memory_profiler') + + self.interval = interval + + def after_iter(self, runner): + if self.every_n_iters(runner, self.interval): + # in Byte + virtual_memory = self._virtual_memory() + swap_memory = self._swap_memory() + # in MB + process_memory = self._memory_usage()[0] + factor = 1024 * 1024 + runner.logger.info( + 'Memory information ' + 'available_memory: ' + f'{round(virtual_memory.available / factor)} MB, ' + 'used_memory: ' + f'{round(virtual_memory.used / factor)} MB, ' + f'memory_utilization: {virtual_memory.percent} %, ' + 'available_swap_memory: ' + f'{round((swap_memory.total - swap_memory.used) / factor)}' + ' MB, ' + f'used_swap_memory: {round(swap_memory.used / factor)} MB, ' + f'swap_memory_utilization: {swap_memory.percent} %, ' + 'current_process_memory: ' + f'{round(process_memory)} MB') diff --git a/downstream/mmdetection/mmdet/core/hook/set_epoch_info_hook.py b/downstream/mmdetection/mmdet/core/hook/set_epoch_info_hook.py new file mode 100644 index 0000000..c2b134c --- /dev/null +++ b/downstream/mmdetection/mmdet/core/hook/set_epoch_info_hook.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.parallel import is_module_wrapper +from mmcv.runner import HOOKS, Hook + + +@HOOKS.register_module() +class SetEpochInfoHook(Hook): + """Set runner's epoch information to the model.""" + + def before_train_epoch(self, runner): + epoch = runner.epoch + model = runner.model + if is_module_wrapper(model): + model = model.module + model.set_epoch(epoch) diff --git a/downstream/mmdetection/mmdet/core/hook/sync_norm_hook.py b/downstream/mmdetection/mmdet/core/hook/sync_norm_hook.py new file mode 100644 index 0000000..82931ce --- /dev/null +++ b/downstream/mmdetection/mmdet/core/hook/sync_norm_hook.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict + +from mmcv.runner import get_dist_info +from mmcv.runner.hooks import HOOKS, Hook +from torch import nn + +from ..utils.dist_utils import all_reduce_dict + + +def get_norm_states(module): + async_norm_states = OrderedDict() + for name, child in module.named_modules(): + if isinstance(child, nn.modules.batchnorm._NormBase): + for k, v in child.state_dict().items(): + async_norm_states['.'.join([name, k])] = v + return async_norm_states + + +@HOOKS.register_module() +class SyncNormHook(Hook): + """Synchronize Norm states after training epoch, currently used in YOLOX. + + Args: + num_last_epochs (int): The number of latter epochs in the end of the + training to switch to synchronizing norm interval. Default: 15. + interval (int): Synchronizing norm interval. Default: 1. + """ + + def __init__(self, num_last_epochs=15, interval=1): + self.interval = interval + self.num_last_epochs = num_last_epochs + + def before_train_epoch(self, runner): + epoch = runner.epoch + if (epoch + 1) == runner.max_epochs - self.num_last_epochs: + # Synchronize norm every epoch. + self.interval = 1 + + def after_train_epoch(self, runner): + """Synchronizing norm.""" + epoch = runner.epoch + module = runner.model + if (epoch + 1) % self.interval == 0: + _, world_size = get_dist_info() + if world_size == 1: + return + norm_states = get_norm_states(module) + if len(norm_states) == 0: + return + norm_states = all_reduce_dict(norm_states, op='mean') + module.load_state_dict(norm_states, strict=False) diff --git a/downstream/mmdetection/mmdet/core/hook/sync_random_size_hook.py b/downstream/mmdetection/mmdet/core/hook/sync_random_size_hook.py new file mode 100644 index 0000000..6d7e96c --- /dev/null +++ b/downstream/mmdetection/mmdet/core/hook/sync_random_size_hook.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +import warnings + +import torch +from mmcv.runner import get_dist_info +from mmcv.runner.hooks import HOOKS, Hook +from torch import distributed as dist + + +@HOOKS.register_module() +class SyncRandomSizeHook(Hook): + """Change and synchronize the random image size across ranks. + SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve + similar functions. Such as `dict(type='Resize', img_scale=[(448, 448), + (832, 832)], multiscale_mode='range', keep_ratio=True)`. + + Note: Due to the multi-process dataloader, its behavior is different + from YOLOX's official implementation, the official is to change the + size every fixed iteration interval and what we achieved is a fixed + epoch interval. + + Args: + ratio_range (tuple[int]): Random ratio range. It will be multiplied + by 32, and then change the dataset output image size. + Default: (14, 26). + img_scale (tuple[int]): Size of input image. Default: (640, 640). + interval (int): The epoch interval of change image size. Default: 1. + device (torch.device | str): device for returned tensors. + Default: 'cuda'. + """ + + def __init__(self, + ratio_range=(14, 26), + img_scale=(640, 640), + interval=1, + device='cuda'): + warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. ' + 'Please use Resize pipeline to achieve similar ' + 'functions. Due to the multi-process dataloader, ' + 'its behavior is different from YOLOX\'s official ' + 'implementation, the official is to change the size ' + 'every fixed iteration interval and what we achieved ' + 'is a fixed epoch interval.') + self.rank, world_size = get_dist_info() + self.is_distributed = world_size > 1 + self.ratio_range = ratio_range + self.img_scale = img_scale + self.interval = interval + self.device = device + + def after_train_epoch(self, runner): + """Change the dataset output image size.""" + if self.ratio_range is not None and (runner.epoch + + 1) % self.interval == 0: + # Due to DDP and DP get the device behavior inconsistent, + # so we did not get the device from runner.model. + tensor = torch.LongTensor(2).to(self.device) + + if self.rank == 0: + size_factor = self.img_scale[1] * 1. / self.img_scale[0] + size = random.randint(*self.ratio_range) + size = (int(32 * size), 32 * int(size * size_factor)) + tensor[0] = size[0] + tensor[1] = size[1] + + if self.is_distributed: + dist.barrier() + dist.broadcast(tensor, 0) + + runner.data_loader.dataset.update_dynamic_scale( + (tensor[0].item(), tensor[1].item())) diff --git a/downstream/mmdetection/mmdet/core/hook/wandblogger_hook.py b/downstream/mmdetection/mmdet/core/hook/wandblogger_hook.py new file mode 100644 index 0000000..f094b04 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/hook/wandblogger_hook.py @@ -0,0 +1,586 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import importlib +import os.path as osp +import sys +import warnings + +import mmcv +import numpy as np +import pycocotools.mask as mask_util +from mmcv.runner import HOOKS +from mmcv.runner.dist_utils import master_only +from mmcv.runner.hooks.checkpoint import CheckpointHook +from mmcv.runner.hooks.logger.wandb import WandbLoggerHook +from mmcv.utils import digit_version + +from mmdet.core import DistEvalHook, EvalHook +from mmdet.core.mask.structures import polygon_to_bitmap + + +@HOOKS.register_module() +class MMDetWandbHook(WandbLoggerHook): + """Enhanced Wandb logger hook for MMDetection. + + Comparing with the :cls:`mmcv.runner.WandbLoggerHook`, this hook can not + only automatically log all the metrics but also log the following extra + information - saves model checkpoints as W&B Artifact, and + logs model prediction as interactive W&B Tables. + + - Metrics: The MMDetWandbHook will automatically log training + and validation metrics along with system metrics (CPU/GPU). + + - Checkpointing: If `log_checkpoint` is True, the checkpoint saved at + every checkpoint interval will be saved as W&B Artifacts. + This depends on the : class:`mmcv.runner.CheckpointHook` whose priority + is higher than this hook. Please refer to + https://docs.wandb.ai/guides/artifacts/model-versioning + to learn more about model versioning with W&B Artifacts. + + - Checkpoint Metadata: If evaluation results are available for a given + checkpoint artifact, it will have a metadata associated with it. + The metadata contains the evaluation metrics computed on validation + data with that checkpoint along with the current epoch. It depends + on `EvalHook` whose priority is more than MMDetWandbHook. + + - Evaluation: At every evaluation interval, the `MMDetWandbHook` logs the + model prediction as interactive W&B Tables. The number of samples + logged is given by `num_eval_images`. Currently, the `MMDetWandbHook` + logs the predicted bounding boxes along with the ground truth at every + evaluation interval. This depends on the `EvalHook` whose priority is + more than `MMDetWandbHook`. Also note that the data is just logged once + and subsequent evaluation tables uses reference to the logged data + to save memory usage. Please refer to + https://docs.wandb.ai/guides/data-vis to learn more about W&B Tables. + + For more details check out W&B's MMDetection docs: + https://docs.wandb.ai/guides/integrations/mmdetection + + ``` + Example: + log_config = dict( + ... + hooks=[ + ..., + dict(type='MMDetWandbHook', + init_kwargs={ + 'entity': "YOUR_ENTITY", + 'project': "YOUR_PROJECT_NAME" + }, + interval=50, + log_checkpoint=True, + log_checkpoint_metadata=True, + num_eval_images=100, + bbox_score_thr=0.3) + ]) + ``` + + Args: + init_kwargs (dict): A dict passed to wandb.init to initialize + a W&B run. Please refer to https://docs.wandb.ai/ref/python/init + for possible key-value pairs. + interval (int): Logging interval (every k iterations). Defaults to 50. + log_checkpoint (bool): Save the checkpoint at every checkpoint interval + as W&B Artifacts. Use this for model versioning where each version + is a checkpoint. Defaults to False. + log_checkpoint_metadata (bool): Log the evaluation metrics computed + on the validation data with the checkpoint, along with current + epoch as a metadata to that checkpoint. + Defaults to True. + num_eval_images (int): The number of validation images to be logged. + If zero, the evaluation won't be logged. Defaults to 100. + bbox_score_thr (float): Threshold for bounding box scores. + Defaults to 0.3. + """ + + def __init__(self, + init_kwargs=None, + interval=50, + log_checkpoint=False, + log_checkpoint_metadata=False, + num_eval_images=100, + bbox_score_thr=0.3, + **kwargs): + super(MMDetWandbHook, self).__init__(init_kwargs, interval, **kwargs) + + self.log_checkpoint = log_checkpoint + self.log_checkpoint_metadata = ( + log_checkpoint and log_checkpoint_metadata) + self.num_eval_images = num_eval_images + self.bbox_score_thr = bbox_score_thr + self.log_evaluation = (num_eval_images > 0) + self.ckpt_hook: CheckpointHook = None + self.eval_hook: EvalHook = None + + def import_wandb(self): + try: + import wandb + from wandb import init # noqa + + # Fix ResourceWarning when calling wandb.log in wandb v0.12.10. + # https://github.com/wandb/client/issues/2837 + if digit_version(wandb.__version__) < digit_version('0.12.10'): + warnings.warn( + f'The current wandb {wandb.__version__} is ' + f'lower than v0.12.10 will cause ResourceWarning ' + f'when calling wandb.log, Please run ' + f'"pip install --upgrade wandb"') + + except ImportError: + raise ImportError( + 'Please run "pip install "wandb>=0.12.10"" to install wandb') + self.wandb = wandb + + @master_only + def before_run(self, runner): + super(MMDetWandbHook, self).before_run(runner) + + # Save and Log config. + if runner.meta is not None: + src_cfg_path = osp.join(runner.work_dir, + runner.meta.get('exp_name', None)) + if osp.exists(src_cfg_path): + self.wandb.save(src_cfg_path, base_path=runner.work_dir) + self._update_wandb_config(runner) + else: + runner.logger.warning('No meta information found in the runner. ') + + # Inspect CheckpointHook and EvalHook + for hook in runner.hooks: + if isinstance(hook, CheckpointHook): + self.ckpt_hook = hook + if isinstance(hook, (EvalHook, DistEvalHook)): + self.eval_hook = hook + + # Check conditions to log checkpoint + if self.log_checkpoint: + if self.ckpt_hook is None: + self.log_checkpoint = False + self.log_checkpoint_metadata = False + runner.logger.warning( + 'To log checkpoint in MMDetWandbHook, `CheckpointHook` is' + 'required, please check hooks in the runner.') + else: + self.ckpt_interval = self.ckpt_hook.interval + + # Check conditions to log evaluation + if self.log_evaluation or self.log_checkpoint_metadata: + if self.eval_hook is None: + self.log_evaluation = False + self.log_checkpoint_metadata = False + runner.logger.warning( + 'To log evaluation or checkpoint metadata in ' + 'MMDetWandbHook, `EvalHook` or `DistEvalHook` in mmdet ' + 'is required, please check whether the validation ' + 'is enabled.') + else: + self.eval_interval = self.eval_hook.interval + self.val_dataset = self.eval_hook.dataloader.dataset + # Determine the number of samples to be logged. + if self.num_eval_images > len(self.val_dataset): + self.num_eval_images = len(self.val_dataset) + runner.logger.warning( + f'The num_eval_images ({self.num_eval_images}) is ' + 'greater than the total number of validation samples ' + f'({len(self.val_dataset)}). The complete validation ' + 'dataset will be logged.') + + # Check conditions to log checkpoint metadata + if self.log_checkpoint_metadata: + assert self.ckpt_interval % self.eval_interval == 0, \ + 'To log checkpoint metadata in MMDetWandbHook, the interval ' \ + f'of checkpoint saving ({self.ckpt_interval}) should be ' \ + 'divisible by the interval of evaluation ' \ + f'({self.eval_interval}).' + + # Initialize evaluation table + if self.log_evaluation: + # Initialize data table + self._init_data_table() + # Add data to the data table + self._add_ground_truth(runner) + # Log ground truth data + self._log_data_table() + + @master_only + def after_train_epoch(self, runner): + super(MMDetWandbHook, self).after_train_epoch(runner) + + if not self.by_epoch: + return + + # Log checkpoint and metadata. + if (self.log_checkpoint + and self.every_n_epochs(runner, self.ckpt_interval) + or (self.ckpt_hook.save_last and self.is_last_epoch(runner))): + if self.log_checkpoint_metadata and self.eval_hook: + metadata = { + 'epoch': runner.epoch + 1, + **self._get_eval_results() + } + else: + metadata = None + aliases = [f'epoch_{runner.epoch + 1}', 'latest'] + model_path = osp.join(self.ckpt_hook.out_dir, + f'epoch_{runner.epoch + 1}.pth') + self._log_ckpt_as_artifact(model_path, aliases, metadata) + + # Save prediction table + if self.log_evaluation and self.eval_hook._should_evaluate(runner): + results = self.eval_hook.latest_results + # Initialize evaluation table + self._init_pred_table() + # Log predictions + self._log_predictions(results) + # Log the table + self._log_eval_table(runner.epoch + 1) + + @master_only + def after_train_iter(self, runner): + if self.get_mode(runner) == 'train': + # An ugly patch. The iter-based eval hook will call the + # `after_train_iter` method of all logger hooks before evaluation. + # Use this trick to skip that call. + # Don't call super method at first, it will clear the log_buffer + return super(MMDetWandbHook, self).after_train_iter(runner) + else: + super(MMDetWandbHook, self).after_train_iter(runner) + + if self.by_epoch: + return + + # Save checkpoint and metadata + if (self.log_checkpoint + and self.every_n_iters(runner, self.ckpt_interval) + or (self.ckpt_hook.save_last and self.is_last_iter(runner))): + if self.log_checkpoint_metadata and self.eval_hook: + metadata = { + 'iter': runner.iter + 1, + **self._get_eval_results() + } + else: + metadata = None + aliases = [f'iter_{runner.iter + 1}', 'latest'] + model_path = osp.join(self.ckpt_hook.out_dir, + f'iter_{runner.iter + 1}.pth') + self._log_ckpt_as_artifact(model_path, aliases, metadata) + + # Save prediction table + if self.log_evaluation and self.eval_hook._should_evaluate(runner): + results = self.eval_hook.latest_results + # Initialize evaluation table + self._init_pred_table() + # Log predictions + self._log_predictions(results) + # Log the table + self._log_eval_table(runner.iter + 1) + + @master_only + def after_run(self, runner): + self.wandb.finish() + + def _update_wandb_config(self, runner): + """Update wandb config.""" + # Import the config file. + sys.path.append(runner.work_dir) + config_filename = runner.meta['exp_name'][:-3] + configs = importlib.import_module(config_filename) + # Prepare a nested dict of config variables. + config_keys = [key for key in dir(configs) if not key.startswith('__')] + config_dict = {key: getattr(configs, key) for key in config_keys} + # Update the W&B config. + self.wandb.config.update(config_dict) + + def _log_ckpt_as_artifact(self, model_path, aliases, metadata=None): + """Log model checkpoint as W&B Artifact. + + Args: + model_path (str): Path of the checkpoint to log. + aliases (list): List of the aliases associated with this artifact. + metadata (dict, optional): Metadata associated with this artifact. + """ + model_artifact = self.wandb.Artifact( + f'run_{self.wandb.run.id}_model', type='model', metadata=metadata) + model_artifact.add_file(model_path) + self.wandb.log_artifact(model_artifact, aliases=aliases) + + def _get_eval_results(self): + """Get model evaluation results.""" + results = self.eval_hook.latest_results + eval_results = self.val_dataset.evaluate( + results, logger='silent', **self.eval_hook.eval_kwargs) + return eval_results + + def _init_data_table(self): + """Initialize the W&B Tables for validation data.""" + columns = ['image_name', 'image'] + self.data_table = self.wandb.Table(columns=columns) + + def _init_pred_table(self): + """Initialize the W&B Tables for model evaluation.""" + columns = ['image_name', 'ground_truth', 'prediction'] + self.eval_table = self.wandb.Table(columns=columns) + + def _add_ground_truth(self, runner): + # Get image loading pipeline + from mmdet.datasets.pipelines import LoadImageFromFile + img_loader = None + for t in self.val_dataset.pipeline.transforms: + if isinstance(t, LoadImageFromFile): + img_loader = t + + if img_loader is None: + self.log_evaluation = False + runner.logger.warning( + 'LoadImageFromFile is required to add images ' + 'to W&B Tables.') + return + + # Select the images to be logged. + self.eval_image_indexs = np.arange(len(self.val_dataset)) + # Set seed so that same validation set is logged each time. + np.random.seed(42) + np.random.shuffle(self.eval_image_indexs) + self.eval_image_indexs = self.eval_image_indexs[:self.num_eval_images] + + CLASSES = self.val_dataset.CLASSES + self.class_id_to_label = { + id + 1: name + for id, name in enumerate(CLASSES) + } + self.class_set = self.wandb.Classes([{ + 'id': id, + 'name': name + } for id, name in self.class_id_to_label.items()]) + + img_prefix = self.val_dataset.img_prefix + + for idx in self.eval_image_indexs: + img_info = self.val_dataset.data_infos[idx] + image_name = img_info.get('filename', f'img_{idx}') + img_height, img_width = img_info['height'], img_info['width'] + + img_meta = img_loader( + dict(img_info=img_info, img_prefix=img_prefix)) + + # Get image and convert from BGR to RGB + image = mmcv.bgr2rgb(img_meta['img']) + + data_ann = self.val_dataset.get_ann_info(idx) + bboxes = data_ann['bboxes'] + labels = data_ann['labels'] + masks = data_ann.get('masks', None) + + # Get dict of bounding boxes to be logged. + assert len(bboxes) == len(labels) + wandb_boxes = self._get_wandb_bboxes(bboxes, labels) + + # Get dict of masks to be logged. + if masks is not None: + wandb_masks = self._get_wandb_masks( + masks, + labels, + is_poly_mask=True, + height=img_height, + width=img_width) + else: + wandb_masks = None + # TODO: Panoramic segmentation visualization. + + # Log a row to the data table. + self.data_table.add_data( + image_name, + self.wandb.Image( + image, + boxes=wandb_boxes, + masks=wandb_masks, + classes=self.class_set)) + + def _log_predictions(self, results): + table_idxs = self.data_table_ref.get_index() + assert len(table_idxs) == len(self.eval_image_indexs) + + for ndx, eval_image_index in enumerate(self.eval_image_indexs): + # Get the result + result = results[eval_image_index] + if isinstance(result, tuple): + bbox_result, segm_result = result + if isinstance(segm_result, tuple): + segm_result = segm_result[0] # ms rcnn + else: + bbox_result, segm_result = result, None + assert len(bbox_result) == len(self.class_id_to_label) + + # Get labels + bboxes = np.vstack(bbox_result) + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_result) + ] + labels = np.concatenate(labels) + + # Get segmentation mask if available. + segms = None + if segm_result is not None and len(labels) > 0: + segms = mmcv.concat_list(segm_result) + segms = mask_util.decode(segms) + segms = segms.transpose(2, 0, 1) + assert len(segms) == len(labels) + # TODO: Panoramic segmentation visualization. + + # Remove bounding boxes and masks with score lower than threshold. + if self.bbox_score_thr > 0: + assert bboxes is not None and bboxes.shape[1] == 5 + scores = bboxes[:, -1] + inds = scores > self.bbox_score_thr + bboxes = bboxes[inds, :] + labels = labels[inds] + if segms is not None: + segms = segms[inds, ...] + + # Get dict of bounding boxes to be logged. + wandb_boxes = self._get_wandb_bboxes(bboxes, labels, log_gt=False) + # Get dict of masks to be logged. + if segms is not None: + wandb_masks = self._get_wandb_masks(segms, labels) + else: + wandb_masks = None + + # Log a row to the eval table. + self.eval_table.add_data( + self.data_table_ref.data[ndx][0], + self.data_table_ref.data[ndx][1], + self.wandb.Image( + self.data_table_ref.data[ndx][1], + boxes=wandb_boxes, + masks=wandb_masks, + classes=self.class_set)) + + def _get_wandb_bboxes(self, bboxes, labels, log_gt=True): + """Get list of structured dict for logging bounding boxes to W&B. + + Args: + bboxes (list): List of bounding box coordinates in + (minX, minY, maxX, maxY) format. + labels (int): List of label ids. + log_gt (bool): Whether to log ground truth or prediction boxes. + + Returns: + Dictionary of bounding boxes to be logged. + """ + wandb_boxes = {} + + box_data = [] + for bbox, label in zip(bboxes, labels): + if not isinstance(label, int): + label = int(label) + label = label + 1 + + if len(bbox) == 5: + confidence = float(bbox[4]) + class_name = self.class_id_to_label[label] + box_caption = f'{class_name} {confidence:.2f}' + else: + box_caption = str(self.class_id_to_label[label]) + + position = dict( + minX=int(bbox[0]), + minY=int(bbox[1]), + maxX=int(bbox[2]), + maxY=int(bbox[3])) + + box_data.append({ + 'position': position, + 'class_id': label, + 'box_caption': box_caption, + 'domain': 'pixel' + }) + + wandb_bbox_dict = { + 'box_data': box_data, + 'class_labels': self.class_id_to_label + } + + if log_gt: + wandb_boxes['ground_truth'] = wandb_bbox_dict + else: + wandb_boxes['predictions'] = wandb_bbox_dict + + return wandb_boxes + + def _get_wandb_masks(self, + masks, + labels, + is_poly_mask=False, + height=None, + width=None): + """Get list of structured dict for logging masks to W&B. + + Args: + masks (list): List of masks. + labels (int): List of label ids. + is_poly_mask (bool): Whether the mask is polygonal or not. + This is true for CocoDataset. + height (int): Height of the image. + width (int): Width of the image. + + Returns: + Dictionary of masks to be logged. + """ + mask_label_dict = dict() + for mask, label in zip(masks, labels): + label = label + 1 + # Get bitmap mask from polygon. + if is_poly_mask: + if height is not None and width is not None: + mask = polygon_to_bitmap(mask, height, width) + # Create composite masks for each class. + if label not in mask_label_dict.keys(): + mask_label_dict[label] = mask + else: + mask_label_dict[label] = np.logical_or(mask_label_dict[label], + mask) + + wandb_masks = dict() + for key, value in mask_label_dict.items(): + # Create mask for that class. + value = value.astype(np.uint8) + value[value > 0] = key + + # Create dict of masks for logging. + class_name = self.class_id_to_label[key] + wandb_masks[class_name] = { + 'mask_data': value, + 'class_labels': self.class_id_to_label + } + + return wandb_masks + + def _log_data_table(self): + """Log the W&B Tables for validation data as artifact and calls + `use_artifact` on it so that the evaluation table can use the reference + of already uploaded images. + + This allows the data to be uploaded just once. + """ + data_artifact = self.wandb.Artifact('val', type='dataset') + data_artifact.add(self.data_table, 'val_data') + + self.wandb.run.use_artifact(data_artifact) + data_artifact.wait() + + self.data_table_ref = data_artifact.get('val_data') + + def _log_eval_table(self, idx): + """Log the W&B Tables for model evaluation. + + The table will be logged multiple times creating new version. Use this + to compare models at different intervals interactively. + """ + pred_artifact = self.wandb.Artifact( + f'run_{self.wandb.run.id}_pred', type='evaluation') + pred_artifact.add(self.eval_table, 'eval_data') + if self.by_epoch: + aliases = ['latest', f'epoch_{idx}'] + else: + aliases = ['latest', f'iter_{idx}'] + self.wandb.run.log_artifact(pred_artifact, aliases=aliases) diff --git a/downstream/mmdetection/mmdet/core/hook/yolox_lrupdater_hook.py b/downstream/mmdetection/mmdet/core/hook/yolox_lrupdater_hook.py new file mode 100644 index 0000000..ecb028e --- /dev/null +++ b/downstream/mmdetection/mmdet/core/hook/yolox_lrupdater_hook.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.runner.hooks import HOOKS +from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook, + annealing_cos) + + +@HOOKS.register_module() +class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook): + """YOLOX learning rate scheme. + + There are two main differences between YOLOXLrUpdaterHook + and CosineAnnealingLrUpdaterHook. + + 1. When the current running epoch is greater than + `max_epoch-last_epoch`, a fixed learning rate will be used + 2. The exp warmup scheme is different with LrUpdaterHook in MMCV + + Args: + num_last_epochs (int): The number of epochs with a fixed learning rate + before the end of the training. + """ + + def __init__(self, num_last_epochs, **kwargs): + self.num_last_epochs = num_last_epochs + super(YOLOXLrUpdaterHook, self).__init__(**kwargs) + + def get_warmup_lr(self, cur_iters): + + def _get_warmup_lr(cur_iters, regular_lr): + # exp warmup scheme + k = self.warmup_ratio * pow( + (cur_iters + 1) / float(self.warmup_iters), 2) + warmup_lr = [_lr * k for _lr in regular_lr] + return warmup_lr + + if isinstance(self.base_lr, dict): + lr_groups = {} + for key, base_lr in self.base_lr.items(): + lr_groups[key] = _get_warmup_lr(cur_iters, base_lr) + return lr_groups + else: + return _get_warmup_lr(cur_iters, self.base_lr) + + def get_lr(self, runner, base_lr): + last_iter = len(runner.data_loader) * self.num_last_epochs + + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + + progress += 1 + + if self.min_lr_ratio is not None: + target_lr = base_lr * self.min_lr_ratio + else: + target_lr = self.min_lr + + if progress >= max_progress - last_iter: + # fixed learning rate + return target_lr + else: + return annealing_cos( + base_lr, target_lr, (progress - self.warmup_iters) / + (max_progress - self.warmup_iters - last_iter)) diff --git a/downstream/mmdetection/mmdet/core/hook/yolox_mode_switch_hook.py b/downstream/mmdetection/mmdet/core/hook/yolox_mode_switch_hook.py new file mode 100644 index 0000000..10834e6 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/hook/yolox_mode_switch_hook.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.parallel import is_module_wrapper +from mmcv.runner.hooks import HOOKS, Hook + + +@HOOKS.register_module() +class YOLOXModeSwitchHook(Hook): + """Switch the mode of YOLOX during training. + + This hook turns off the mosaic and mixup data augmentation and switches + to use L1 loss in bbox_head. + + Args: + num_last_epochs (int): The number of latter epochs in the end of the + training to close the data augmentation and switch to L1 loss. + Default: 15. + skip_type_keys (list[str], optional): Sequence of type string to be + skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp') + """ + + def __init__(self, + num_last_epochs=15, + skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')): + self.num_last_epochs = num_last_epochs + self.skip_type_keys = skip_type_keys + self._restart_dataloader = False + + def before_train_epoch(self, runner): + """Close mosaic and mixup augmentation and switches to use L1 loss.""" + epoch = runner.epoch + train_loader = runner.data_loader + model = runner.model + if is_module_wrapper(model): + model = model.module + if (epoch + 1) == runner.max_epochs - self.num_last_epochs: + runner.logger.info('No mosaic and mixup aug now!') + # The dataset pipeline cannot be updated when persistent_workers + # is True, so we need to force the dataloader's multi-process + # restart. This is a very hacky approach. + train_loader.dataset.update_skip_type_keys(self.skip_type_keys) + if hasattr(train_loader, 'persistent_workers' + ) and train_loader.persistent_workers is True: + train_loader._DataLoader__initialized = False + train_loader._iterator = None + self._restart_dataloader = True + runner.logger.info('Add additional L1 loss now!') + model.bbox_head.use_l1 = True + else: + # Once the restart is complete, we need to restore + # the initialization flag. + if self._restart_dataloader: + train_loader._DataLoader__initialized = True diff --git a/downstream/mmdetection/mmdet/core/mask/__init__.py b/downstream/mmdetection/mmdet/core/mask/__init__.py new file mode 100644 index 0000000..644a9b1 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/mask/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .mask_target import mask_target +from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks +from .utils import encode_mask_results, mask2bbox, split_combined_polys + +__all__ = [ + 'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks', + 'PolygonMasks', 'encode_mask_results', 'mask2bbox' +] diff --git a/downstream/mmdetection/mmdet/core/mask/mask_target.py b/downstream/mmdetection/mmdet/core/mask/mask_target.py new file mode 100644 index 0000000..273e767 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/mask/mask_target.py @@ -0,0 +1,127 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from torch.nn.modules.utils import _pair + + +def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, + cfg): + """Compute mask target for positive proposals in multiple images. + + Args: + pos_proposals_list (list[Tensor]): Positive proposals in multiple + images. + pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each + positive proposals. + gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of + each image. + cfg (dict): Config dict that specifies the mask size. + + Returns: + list[Tensor]: Mask target of each image. + + Example: + >>> import mmcv + >>> import mmdet + >>> from mmdet.core.mask import BitmapMasks + >>> from mmdet.core.mask.mask_target import * + >>> H, W = 17, 18 + >>> cfg = mmcv.Config({'mask_size': (13, 14)}) + >>> rng = np.random.RandomState(0) + >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image + >>> pos_proposals_list = [ + >>> torch.Tensor([ + >>> [ 7.2425, 5.5929, 13.9414, 14.9541], + >>> [ 7.3241, 3.6170, 16.3850, 15.3102], + >>> ]), + >>> torch.Tensor([ + >>> [ 4.8448, 6.4010, 7.0314, 9.7681], + >>> [ 5.9790, 2.6989, 7.4416, 4.8580], + >>> [ 0.0000, 0.0000, 0.1398, 9.8232], + >>> ]), + >>> ] + >>> # Corresponding class index for each proposal for each image + >>> pos_assigned_gt_inds_list = [ + >>> torch.LongTensor([7, 0]), + >>> torch.LongTensor([5, 4, 1]), + >>> ] + >>> # Ground truth mask for each true object for each image + >>> gt_masks_list = [ + >>> BitmapMasks(rng.rand(8, H, W), height=H, width=W), + >>> BitmapMasks(rng.rand(6, H, W), height=H, width=W), + >>> ] + >>> mask_targets = mask_target( + >>> pos_proposals_list, pos_assigned_gt_inds_list, + >>> gt_masks_list, cfg) + >>> assert mask_targets.shape == (5,) + cfg['mask_size'] + """ + cfg_list = [cfg for _ in range(len(pos_proposals_list))] + mask_targets = map(mask_target_single, pos_proposals_list, + pos_assigned_gt_inds_list, gt_masks_list, cfg_list) + mask_targets = list(mask_targets) + if len(mask_targets) > 0: + mask_targets = torch.cat(mask_targets) + return mask_targets + + +def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): + """Compute mask target for each positive proposal in the image. + + Args: + pos_proposals (Tensor): Positive proposals. + pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals. + gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap + or Polygon. + cfg (dict): Config dict that indicate the mask size. + + Returns: + Tensor: Mask target of each positive proposals in the image. + + Example: + >>> import mmcv + >>> import mmdet + >>> from mmdet.core.mask import BitmapMasks + >>> from mmdet.core.mask.mask_target import * # NOQA + >>> H, W = 32, 32 + >>> cfg = mmcv.Config({'mask_size': (7, 11)}) + >>> rng = np.random.RandomState(0) + >>> # Masks for each ground truth box (relative to the image) + >>> gt_masks_data = rng.rand(3, H, W) + >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W) + >>> # Predicted positive boxes in one image + >>> pos_proposals = torch.FloatTensor([ + >>> [ 16.2, 5.5, 19.9, 20.9], + >>> [ 17.3, 13.6, 19.3, 19.3], + >>> [ 14.8, 16.4, 17.0, 23.7], + >>> [ 0.0, 0.0, 16.0, 16.0], + >>> [ 4.0, 0.0, 20.0, 16.0], + >>> ]) + >>> # For each predicted proposal, its assignment to a gt mask + >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1]) + >>> mask_targets = mask_target_single( + >>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg) + >>> assert mask_targets.shape == (5,) + cfg['mask_size'] + """ + device = pos_proposals.device + mask_size = _pair(cfg.mask_size) + binarize = not cfg.get('soft_mask_target', False) + num_pos = pos_proposals.size(0) + if num_pos > 0: + proposals_np = pos_proposals.cpu().numpy() + maxh, maxw = gt_masks.height, gt_masks.width + proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw) + proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh) + pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() + + mask_targets = gt_masks.crop_and_resize( + proposals_np, + mask_size, + device=device, + inds=pos_assigned_gt_inds, + binarize=binarize).to_ndarray() + + mask_targets = torch.from_numpy(mask_targets).float().to(device) + else: + mask_targets = pos_proposals.new_zeros((0, ) + mask_size) + + return mask_targets diff --git a/downstream/mmdetection/mmdet/core/mask/structures.py b/downstream/mmdetection/mmdet/core/mask/structures.py new file mode 100644 index 0000000..a9d0ebb --- /dev/null +++ b/downstream/mmdetection/mmdet/core/mask/structures.py @@ -0,0 +1,1102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import cv2 +import mmcv +import numpy as np +import pycocotools.mask as maskUtils +import torch +from mmcv.ops.roi_align import roi_align + + +class BaseInstanceMasks(metaclass=ABCMeta): + """Base class for instance masks.""" + + @abstractmethod + def rescale(self, scale, interpolation='nearest'): + """Rescale masks as large as possible while keeping the aspect ratio. + For details can refer to `mmcv.imrescale`. + + Args: + scale (tuple[int]): The maximum size (h, w) of rescaled mask. + interpolation (str): Same as :func:`mmcv.imrescale`. + + Returns: + BaseInstanceMasks: The rescaled masks. + """ + + @abstractmethod + def resize(self, out_shape, interpolation='nearest'): + """Resize masks to the given out_shape. + + Args: + out_shape: Target (h, w) of resized mask. + interpolation (str): See :func:`mmcv.imresize`. + + Returns: + BaseInstanceMasks: The resized masks. + """ + + @abstractmethod + def flip(self, flip_direction='horizontal'): + """Flip masks alone the given direction. + + Args: + flip_direction (str): Either 'horizontal' or 'vertical'. + + Returns: + BaseInstanceMasks: The flipped masks. + """ + + @abstractmethod + def pad(self, out_shape, pad_val): + """Pad masks to the given size of (h, w). + + Args: + out_shape (tuple[int]): Target (h, w) of padded mask. + pad_val (int): The padded value. + + Returns: + BaseInstanceMasks: The padded masks. + """ + + @abstractmethod + def crop(self, bbox): + """Crop each mask by the given bbox. + + Args: + bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ). + + Return: + BaseInstanceMasks: The cropped masks. + """ + + @abstractmethod + def crop_and_resize(self, + bboxes, + out_shape, + inds, + device, + interpolation='bilinear', + binarize=True): + """Crop and resize masks by the given bboxes. + + This function is mainly used in mask targets computation. + It firstly align mask to bboxes by assigned_inds, then crop mask by the + assigned bbox and resize to the size of (mask_h, mask_w) + + Args: + bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4) + out_shape (tuple[int]): Target (h, w) of resized mask + inds (ndarray): Indexes to assign masks to each bbox, + shape (N,) and values should be between [0, num_masks - 1]. + device (str): Device of bboxes + interpolation (str): See `mmcv.imresize` + binarize (bool): if True fractional values are rounded to 0 or 1 + after the resize operation. if False and unsupported an error + will be raised. Defaults to True. + + Return: + BaseInstanceMasks: the cropped and resized masks. + """ + + @abstractmethod + def expand(self, expanded_h, expanded_w, top, left): + """see :class:`Expand`.""" + + @property + @abstractmethod + def areas(self): + """ndarray: areas of each instance.""" + + @abstractmethod + def to_ndarray(self): + """Convert masks to the format of ndarray. + + Return: + ndarray: Converted masks in the format of ndarray. + """ + + @abstractmethod + def to_tensor(self, dtype, device): + """Convert masks to the format of Tensor. + + Args: + dtype (str): Dtype of converted mask. + device (torch.device): Device of converted masks. + + Returns: + Tensor: Converted masks in the format of Tensor. + """ + + @abstractmethod + def translate(self, + out_shape, + offset, + direction='horizontal', + fill_val=0, + interpolation='bilinear'): + """Translate the masks. + + Args: + out_shape (tuple[int]): Shape for output mask, format (h, w). + offset (int | float): The offset for translate. + direction (str): The translate direction, either "horizontal" + or "vertical". + fill_val (int | float): Border value. Default 0. + interpolation (str): Same as :func:`mmcv.imtranslate`. + + Returns: + Translated masks. + """ + + def shear(self, + out_shape, + magnitude, + direction='horizontal', + border_value=0, + interpolation='bilinear'): + """Shear the masks. + + Args: + out_shape (tuple[int]): Shape for output mask, format (h, w). + magnitude (int | float): The magnitude used for shear. + direction (str): The shear direction, either "horizontal" + or "vertical". + border_value (int | tuple[int]): Value used in case of a + constant border. Default 0. + interpolation (str): Same as in :func:`mmcv.imshear`. + + Returns: + ndarray: Sheared masks. + """ + + @abstractmethod + def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): + """Rotate the masks. + + Args: + out_shape (tuple[int]): Shape for output mask, format (h, w). + angle (int | float): Rotation angle in degrees. Positive values + mean counter-clockwise rotation. + center (tuple[float], optional): Center point (w, h) of the + rotation in source image. If not specified, the center of + the image will be used. + scale (int | float): Isotropic scale factor. + fill_val (int | float): Border value. Default 0 for masks. + + Returns: + Rotated masks. + """ + + +class BitmapMasks(BaseInstanceMasks): + """This class represents masks in the form of bitmaps. + + Args: + masks (ndarray): ndarray of masks in shape (N, H, W), where N is + the number of objects. + height (int): height of masks + width (int): width of masks + + Example: + >>> from mmdet.core.mask.structures import * # NOQA + >>> num_masks, H, W = 3, 32, 32 + >>> rng = np.random.RandomState(0) + >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int) + >>> self = BitmapMasks(masks, height=H, width=W) + + >>> # demo crop_and_resize + >>> num_boxes = 5 + >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) + >>> out_shape = (14, 14) + >>> inds = torch.randint(0, len(self), size=(num_boxes,)) + >>> device = 'cpu' + >>> interpolation = 'bilinear' + >>> new = self.crop_and_resize( + ... bboxes, out_shape, inds, device, interpolation) + >>> assert len(new) == num_boxes + >>> assert new.height, new.width == out_shape + """ + + def __init__(self, masks, height, width): + self.height = height + self.width = width + if len(masks) == 0: + self.masks = np.empty((0, self.height, self.width), dtype=np.uint8) + else: + assert isinstance(masks, (list, np.ndarray)) + if isinstance(masks, list): + assert isinstance(masks[0], np.ndarray) + assert masks[0].ndim == 2 # (H, W) + else: + assert masks.ndim == 3 # (N, H, W) + + self.masks = np.stack(masks).reshape(-1, height, width) + assert self.masks.shape[1] == self.height + assert self.masks.shape[2] == self.width + + def __getitem__(self, index): + """Index the BitmapMask. + + Args: + index (int | ndarray): Indices in the format of integer or ndarray. + + Returns: + :obj:`BitmapMasks`: Indexed bitmap masks. + """ + masks = self.masks[index].reshape(-1, self.height, self.width) + return BitmapMasks(masks, self.height, self.width) + + def __iter__(self): + return iter(self.masks) + + def __repr__(self): + s = self.__class__.__name__ + '(' + s += f'num_masks={len(self.masks)}, ' + s += f'height={self.height}, ' + s += f'width={self.width})' + return s + + def __len__(self): + """Number of masks.""" + return len(self.masks) + + def rescale(self, scale, interpolation='nearest'): + """See :func:`BaseInstanceMasks.rescale`.""" + if len(self.masks) == 0: + new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) + rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8) + else: + rescaled_masks = np.stack([ + mmcv.imrescale(mask, scale, interpolation=interpolation) + for mask in self.masks + ]) + height, width = rescaled_masks.shape[1:] + return BitmapMasks(rescaled_masks, height, width) + + def resize(self, out_shape, interpolation='nearest'): + """See :func:`BaseInstanceMasks.resize`.""" + if len(self.masks) == 0: + resized_masks = np.empty((0, *out_shape), dtype=np.uint8) + else: + resized_masks = np.stack([ + mmcv.imresize( + mask, out_shape[::-1], interpolation=interpolation) + for mask in self.masks + ]) + return BitmapMasks(resized_masks, *out_shape) + + def flip(self, flip_direction='horizontal'): + """See :func:`BaseInstanceMasks.flip`.""" + assert flip_direction in ('horizontal', 'vertical', 'diagonal') + + if len(self.masks) == 0: + flipped_masks = self.masks + else: + flipped_masks = np.stack([ + mmcv.imflip(mask, direction=flip_direction) + for mask in self.masks + ]) + return BitmapMasks(flipped_masks, self.height, self.width) + + def pad(self, out_shape, pad_val=0): + """See :func:`BaseInstanceMasks.pad`.""" + if len(self.masks) == 0: + padded_masks = np.empty((0, *out_shape), dtype=np.uint8) + else: + padded_masks = np.stack([ + mmcv.impad(mask, shape=out_shape, pad_val=pad_val) + for mask in self.masks + ]) + return BitmapMasks(padded_masks, *out_shape) + + def crop(self, bbox): + """See :func:`BaseInstanceMasks.crop`.""" + assert isinstance(bbox, np.ndarray) + assert bbox.ndim == 1 + + # clip the boundary + bbox = bbox.copy() + bbox[0::2] = np.clip(bbox[0::2], 0, self.width) + bbox[1::2] = np.clip(bbox[1::2], 0, self.height) + x1, y1, x2, y2 = bbox + w = np.maximum(x2 - x1, 1) + h = np.maximum(y2 - y1, 1) + + if len(self.masks) == 0: + cropped_masks = np.empty((0, h, w), dtype=np.uint8) + else: + cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w] + return BitmapMasks(cropped_masks, h, w) + + def crop_and_resize(self, + bboxes, + out_shape, + inds, + device='cpu', + interpolation='bilinear', + binarize=True): + """See :func:`BaseInstanceMasks.crop_and_resize`.""" + if len(self.masks) == 0: + empty_masks = np.empty((0, *out_shape), dtype=np.uint8) + return BitmapMasks(empty_masks, *out_shape) + + # convert bboxes to tensor + if isinstance(bboxes, np.ndarray): + bboxes = torch.from_numpy(bboxes).to(device=device) + if isinstance(inds, np.ndarray): + inds = torch.from_numpy(inds).to(device=device) + + num_bbox = bboxes.shape[0] + fake_inds = torch.arange( + num_bbox, device=device).to(dtype=bboxes.dtype)[:, None] + rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5 + rois = rois.to(device=device) + if num_bbox > 0: + gt_masks_th = torch.from_numpy(self.masks).to(device).index_select( + 0, inds).to(dtype=rois.dtype) + targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape, + 1.0, 0, 'avg', True).squeeze(1) + if binarize: + resized_masks = (targets >= 0.5).cpu().numpy() + else: + resized_masks = targets.cpu().numpy() + else: + resized_masks = [] + return BitmapMasks(resized_masks, *out_shape) + + def expand(self, expanded_h, expanded_w, top, left): + """See :func:`BaseInstanceMasks.expand`.""" + if len(self.masks) == 0: + expanded_mask = np.empty((0, expanded_h, expanded_w), + dtype=np.uint8) + else: + expanded_mask = np.zeros((len(self), expanded_h, expanded_w), + dtype=np.uint8) + expanded_mask[:, top:top + self.height, + left:left + self.width] = self.masks + return BitmapMasks(expanded_mask, expanded_h, expanded_w) + + def translate(self, + out_shape, + offset, + direction='horizontal', + fill_val=0, + interpolation='bilinear'): + """Translate the BitmapMasks. + + Args: + out_shape (tuple[int]): Shape for output mask, format (h, w). + offset (int | float): The offset for translate. + direction (str): The translate direction, either "horizontal" + or "vertical". + fill_val (int | float): Border value. Default 0 for masks. + interpolation (str): Same as :func:`mmcv.imtranslate`. + + Returns: + BitmapMasks: Translated BitmapMasks. + + Example: + >>> from mmdet.core.mask.structures import BitmapMasks + >>> self = BitmapMasks.random(dtype=np.uint8) + >>> out_shape = (32, 32) + >>> offset = 4 + >>> direction = 'horizontal' + >>> fill_val = 0 + >>> interpolation = 'bilinear' + >>> # Note, There seem to be issues when: + >>> # * out_shape is different than self's shape + >>> # * the mask dtype is not supported by cv2.AffineWarp + >>> new = self.translate(out_shape, offset, direction, fill_val, + >>> interpolation) + >>> assert len(new) == len(self) + >>> assert new.height, new.width == out_shape + """ + if len(self.masks) == 0: + translated_masks = np.empty((0, *out_shape), dtype=np.uint8) + else: + translated_masks = mmcv.imtranslate( + self.masks.transpose((1, 2, 0)), + offset, + direction, + border_value=fill_val, + interpolation=interpolation) + if translated_masks.ndim == 2: + translated_masks = translated_masks[:, :, None] + translated_masks = translated_masks.transpose( + (2, 0, 1)).astype(self.masks.dtype) + return BitmapMasks(translated_masks, *out_shape) + + def shear(self, + out_shape, + magnitude, + direction='horizontal', + border_value=0, + interpolation='bilinear'): + """Shear the BitmapMasks. + + Args: + out_shape (tuple[int]): Shape for output mask, format (h, w). + magnitude (int | float): The magnitude used for shear. + direction (str): The shear direction, either "horizontal" + or "vertical". + border_value (int | tuple[int]): Value used in case of a + constant border. + interpolation (str): Same as in :func:`mmcv.imshear`. + + Returns: + BitmapMasks: The sheared masks. + """ + if len(self.masks) == 0: + sheared_masks = np.empty((0, *out_shape), dtype=np.uint8) + else: + sheared_masks = mmcv.imshear( + self.masks.transpose((1, 2, 0)), + magnitude, + direction, + border_value=border_value, + interpolation=interpolation) + if sheared_masks.ndim == 2: + sheared_masks = sheared_masks[:, :, None] + sheared_masks = sheared_masks.transpose( + (2, 0, 1)).astype(self.masks.dtype) + return BitmapMasks(sheared_masks, *out_shape) + + def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): + """Rotate the BitmapMasks. + + Args: + out_shape (tuple[int]): Shape for output mask, format (h, w). + angle (int | float): Rotation angle in degrees. Positive values + mean counter-clockwise rotation. + center (tuple[float], optional): Center point (w, h) of the + rotation in source image. If not specified, the center of + the image will be used. + scale (int | float): Isotropic scale factor. + fill_val (int | float): Border value. Default 0 for masks. + + Returns: + BitmapMasks: Rotated BitmapMasks. + """ + if len(self.masks) == 0: + rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype) + else: + rotated_masks = mmcv.imrotate( + self.masks.transpose((1, 2, 0)), + angle, + center=center, + scale=scale, + border_value=fill_val) + if rotated_masks.ndim == 2: + # case when only one mask, (h, w) + rotated_masks = rotated_masks[:, :, None] # (h, w, 1) + rotated_masks = rotated_masks.transpose( + (2, 0, 1)).astype(self.masks.dtype) + return BitmapMasks(rotated_masks, *out_shape) + + @property + def areas(self): + """See :py:attr:`BaseInstanceMasks.areas`.""" + return self.masks.sum((1, 2)) + + def to_ndarray(self): + """See :func:`BaseInstanceMasks.to_ndarray`.""" + return self.masks + + def to_tensor(self, dtype, device): + """See :func:`BaseInstanceMasks.to_tensor`.""" + return torch.tensor(self.masks, dtype=dtype, device=device) + + @classmethod + def random(cls, + num_masks=3, + height=32, + width=32, + dtype=np.uint8, + rng=None): + """Generate random bitmap masks for demo / testing purposes. + + Example: + >>> from mmdet.core.mask.structures import BitmapMasks + >>> self = BitmapMasks.random() + >>> print('self = {}'.format(self)) + self = BitmapMasks(num_masks=3, height=32, width=32) + """ + from mmdet.utils.util_random import ensure_rng + rng = ensure_rng(rng) + masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype) + self = cls(masks, height=height, width=width) + return self + + def get_bboxes(self): + num_masks = len(self) + boxes = np.zeros((num_masks, 4), dtype=np.float32) + x_any = self.masks.any(axis=1) + y_any = self.masks.any(axis=2) + for idx in range(num_masks): + x = np.where(x_any[idx, :])[0] + y = np.where(y_any[idx, :])[0] + if len(x) > 0 and len(y) > 0: + # use +1 for x_max and y_max so that the right and bottom + # boundary of instance masks are fully included by the box + boxes[idx, :] = np.array([x[0], y[0], x[-1] + 1, y[-1] + 1], + dtype=np.float32) + return boxes + + +class PolygonMasks(BaseInstanceMasks): + """This class represents masks in the form of polygons. + + Polygons is a list of three levels. The first level of the list + corresponds to objects, the second level to the polys that compose the + object, the third level to the poly coordinates + + Args: + masks (list[list[ndarray]]): The first level of the list + corresponds to objects, the second level to the polys that + compose the object, the third level to the poly coordinates + height (int): height of masks + width (int): width of masks + + Example: + >>> from mmdet.core.mask.structures import * # NOQA + >>> masks = [ + >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ] + >>> ] + >>> height, width = 16, 16 + >>> self = PolygonMasks(masks, height, width) + + >>> # demo translate + >>> new = self.translate((16, 16), 4., direction='horizontal') + >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2]) + >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4) + + >>> # demo crop_and_resize + >>> num_boxes = 3 + >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) + >>> out_shape = (16, 16) + >>> inds = torch.randint(0, len(self), size=(num_boxes,)) + >>> device = 'cpu' + >>> interpolation = 'bilinear' + >>> new = self.crop_and_resize( + ... bboxes, out_shape, inds, device, interpolation) + >>> assert len(new) == num_boxes + >>> assert new.height, new.width == out_shape + """ + + def __init__(self, masks, height, width): + assert isinstance(masks, list) + if len(masks) > 0: + assert isinstance(masks[0], list) + assert isinstance(masks[0][0], np.ndarray) + + self.height = height + self.width = width + self.masks = masks + + def __getitem__(self, index): + """Index the polygon masks. + + Args: + index (ndarray | List): The indices. + + Returns: + :obj:`PolygonMasks`: The indexed polygon masks. + """ + if isinstance(index, np.ndarray): + index = index.tolist() + if isinstance(index, list): + masks = [self.masks[i] for i in index] + else: + try: + masks = self.masks[index] + except Exception: + raise ValueError( + f'Unsupported input of type {type(index)} for indexing!') + if len(masks) and isinstance(masks[0], np.ndarray): + masks = [masks] # ensure a list of three levels + return PolygonMasks(masks, self.height, self.width) + + def __iter__(self): + return iter(self.masks) + + def __repr__(self): + s = self.__class__.__name__ + '(' + s += f'num_masks={len(self.masks)}, ' + s += f'height={self.height}, ' + s += f'width={self.width})' + return s + + def __len__(self): + """Number of masks.""" + return len(self.masks) + + def rescale(self, scale, interpolation=None): + """see :func:`BaseInstanceMasks.rescale`""" + new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) + if len(self.masks) == 0: + rescaled_masks = PolygonMasks([], new_h, new_w) + else: + rescaled_masks = self.resize((new_h, new_w)) + return rescaled_masks + + def resize(self, out_shape, interpolation=None): + """see :func:`BaseInstanceMasks.resize`""" + if len(self.masks) == 0: + resized_masks = PolygonMasks([], *out_shape) + else: + h_scale = out_shape[0] / self.height + w_scale = out_shape[1] / self.width + resized_masks = [] + for poly_per_obj in self.masks: + resized_poly = [] + for p in poly_per_obj: + p = p.copy() + p[0::2] = p[0::2] * w_scale + p[1::2] = p[1::2] * h_scale + resized_poly.append(p) + resized_masks.append(resized_poly) + resized_masks = PolygonMasks(resized_masks, *out_shape) + return resized_masks + + def flip(self, flip_direction='horizontal'): + """see :func:`BaseInstanceMasks.flip`""" + assert flip_direction in ('horizontal', 'vertical', 'diagonal') + if len(self.masks) == 0: + flipped_masks = PolygonMasks([], self.height, self.width) + else: + flipped_masks = [] + for poly_per_obj in self.masks: + flipped_poly_per_obj = [] + for p in poly_per_obj: + p = p.copy() + if flip_direction == 'horizontal': + p[0::2] = self.width - p[0::2] + elif flip_direction == 'vertical': + p[1::2] = self.height - p[1::2] + else: + p[0::2] = self.width - p[0::2] + p[1::2] = self.height - p[1::2] + flipped_poly_per_obj.append(p) + flipped_masks.append(flipped_poly_per_obj) + flipped_masks = PolygonMasks(flipped_masks, self.height, + self.width) + return flipped_masks + + def crop(self, bbox): + """see :func:`BaseInstanceMasks.crop`""" + assert isinstance(bbox, np.ndarray) + assert bbox.ndim == 1 + + # clip the boundary + bbox = bbox.copy() + bbox[0::2] = np.clip(bbox[0::2], 0, self.width) + bbox[1::2] = np.clip(bbox[1::2], 0, self.height) + x1, y1, x2, y2 = bbox + w = np.maximum(x2 - x1, 1) + h = np.maximum(y2 - y1, 1) + + if len(self.masks) == 0: + cropped_masks = PolygonMasks([], h, w) + else: + cropped_masks = [] + for poly_per_obj in self.masks: + cropped_poly_per_obj = [] + for p in poly_per_obj: + # pycocotools will clip the boundary + p = p.copy() + p[0::2] = p[0::2] - bbox[0] + p[1::2] = p[1::2] - bbox[1] + cropped_poly_per_obj.append(p) + cropped_masks.append(cropped_poly_per_obj) + cropped_masks = PolygonMasks(cropped_masks, h, w) + return cropped_masks + + def pad(self, out_shape, pad_val=0): + """padding has no effect on polygons`""" + return PolygonMasks(self.masks, *out_shape) + + def expand(self, *args, **kwargs): + """TODO: Add expand for polygon""" + raise NotImplementedError + + def crop_and_resize(self, + bboxes, + out_shape, + inds, + device='cpu', + interpolation='bilinear', + binarize=True): + """see :func:`BaseInstanceMasks.crop_and_resize`""" + out_h, out_w = out_shape + if len(self.masks) == 0: + return PolygonMasks([], out_h, out_w) + + if not binarize: + raise ValueError('Polygons are always binary, ' + 'setting binarize=False is unsupported') + + resized_masks = [] + for i in range(len(bboxes)): + mask = self.masks[inds[i]] + bbox = bboxes[i, :] + x1, y1, x2, y2 = bbox + w = np.maximum(x2 - x1, 1) + h = np.maximum(y2 - y1, 1) + h_scale = out_h / max(h, 0.1) # avoid too large scale + w_scale = out_w / max(w, 0.1) + + resized_mask = [] + for p in mask: + p = p.copy() + # crop + # pycocotools will clip the boundary + p[0::2] = p[0::2] - bbox[0] + p[1::2] = p[1::2] - bbox[1] + + # resize + p[0::2] = p[0::2] * w_scale + p[1::2] = p[1::2] * h_scale + resized_mask.append(p) + resized_masks.append(resized_mask) + return PolygonMasks(resized_masks, *out_shape) + + def translate(self, + out_shape, + offset, + direction='horizontal', + fill_val=None, + interpolation=None): + """Translate the PolygonMasks. + + Example: + >>> self = PolygonMasks.random(dtype=np.int) + >>> out_shape = (self.height, self.width) + >>> new = self.translate(out_shape, 4., direction='horizontal') + >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2]) + >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501 + """ + assert fill_val is None or fill_val == 0, 'Here fill_val is not '\ + f'used, and defaultly should be None or 0. got {fill_val}.' + if len(self.masks) == 0: + translated_masks = PolygonMasks([], *out_shape) + else: + translated_masks = [] + for poly_per_obj in self.masks: + translated_poly_per_obj = [] + for p in poly_per_obj: + p = p.copy() + if direction == 'horizontal': + p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1]) + elif direction == 'vertical': + p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0]) + translated_poly_per_obj.append(p) + translated_masks.append(translated_poly_per_obj) + translated_masks = PolygonMasks(translated_masks, *out_shape) + return translated_masks + + def shear(self, + out_shape, + magnitude, + direction='horizontal', + border_value=0, + interpolation='bilinear'): + """See :func:`BaseInstanceMasks.shear`.""" + if len(self.masks) == 0: + sheared_masks = PolygonMasks([], *out_shape) + else: + sheared_masks = [] + if direction == 'horizontal': + shear_matrix = np.stack([[1, magnitude], + [0, 1]]).astype(np.float32) + elif direction == 'vertical': + shear_matrix = np.stack([[1, 0], [magnitude, + 1]]).astype(np.float32) + for poly_per_obj in self.masks: + sheared_poly = [] + for p in poly_per_obj: + p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n] + new_coords = np.matmul(shear_matrix, p) # [2, n] + new_coords[0, :] = np.clip(new_coords[0, :], 0, + out_shape[1]) + new_coords[1, :] = np.clip(new_coords[1, :], 0, + out_shape[0]) + sheared_poly.append( + new_coords.transpose((1, 0)).reshape(-1)) + sheared_masks.append(sheared_poly) + sheared_masks = PolygonMasks(sheared_masks, *out_shape) + return sheared_masks + + def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): + """See :func:`BaseInstanceMasks.rotate`.""" + if len(self.masks) == 0: + rotated_masks = PolygonMasks([], *out_shape) + else: + rotated_masks = [] + rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale) + for poly_per_obj in self.masks: + rotated_poly = [] + for p in poly_per_obj: + p = p.copy() + coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2] + # pad 1 to convert from format [x, y] to homogeneous + # coordinates format [x, y, 1] + coords = np.concatenate( + (coords, np.ones((coords.shape[0], 1), coords.dtype)), + axis=1) # [n, 3] + rotated_coords = np.matmul( + rotate_matrix[None, :, :], + coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2] + rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0, + out_shape[1]) + rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0, + out_shape[0]) + rotated_poly.append(rotated_coords.reshape(-1)) + rotated_masks.append(rotated_poly) + rotated_masks = PolygonMasks(rotated_masks, *out_shape) + return rotated_masks + + def to_bitmap(self): + """convert polygon masks to bitmap masks.""" + bitmap_masks = self.to_ndarray() + return BitmapMasks(bitmap_masks, self.height, self.width) + + @property + def areas(self): + """Compute areas of masks. + + This func is modified from `detectron2 + `_. + The function only works with Polygons using the shoelace formula. + + Return: + ndarray: areas of each instance + """ # noqa: W501 + area = [] + for polygons_per_obj in self.masks: + area_per_obj = 0 + for p in polygons_per_obj: + area_per_obj += self._polygon_area(p[0::2], p[1::2]) + area.append(area_per_obj) + return np.asarray(area) + + def _polygon_area(self, x, y): + """Compute the area of a component of a polygon. + + Using the shoelace formula: + https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates + + Args: + x (ndarray): x coordinates of the component + y (ndarray): y coordinates of the component + + Return: + float: the are of the component + """ # noqa: 501 + return 0.5 * np.abs( + np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) + + def to_ndarray(self): + """Convert masks to the format of ndarray.""" + if len(self.masks) == 0: + return np.empty((0, self.height, self.width), dtype=np.uint8) + bitmap_masks = [] + for poly_per_obj in self.masks: + bitmap_masks.append( + polygon_to_bitmap(poly_per_obj, self.height, self.width)) + return np.stack(bitmap_masks) + + def to_tensor(self, dtype, device): + """See :func:`BaseInstanceMasks.to_tensor`.""" + if len(self.masks) == 0: + return torch.empty((0, self.height, self.width), + dtype=dtype, + device=device) + ndarray_masks = self.to_ndarray() + return torch.tensor(ndarray_masks, dtype=dtype, device=device) + + @classmethod + def random(cls, + num_masks=3, + height=32, + width=32, + n_verts=5, + dtype=np.float32, + rng=None): + """Generate random polygon masks for demo / testing purposes. + + Adapted from [1]_ + + References: + .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501 + + Example: + >>> from mmdet.core.mask.structures import PolygonMasks + >>> self = PolygonMasks.random() + >>> print('self = {}'.format(self)) + """ + from mmdet.utils.util_random import ensure_rng + rng = ensure_rng(rng) + + def _gen_polygon(n, irregularity, spikeyness): + """Creates the polygon by sampling points on a circle around the + centre. Random noise is added by varying the angular spacing + between sequential points, and by varying the radial distance of + each point from the centre. + + Based on original code by Mike Ounsworth + + Args: + n (int): number of vertices + irregularity (float): [0,1] indicating how much variance there + is in the angular spacing of vertices. [0,1] will map to + [0, 2pi/numberOfVerts] + spikeyness (float): [0,1] indicating how much variance there is + in each vertex from the circle of radius aveRadius. [0,1] + will map to [0, aveRadius] + + Returns: + a list of vertices, in CCW order. + """ + from scipy.stats import truncnorm + + # Generate around the unit circle + cx, cy = (0.0, 0.0) + radius = 1 + + tau = np.pi * 2 + + irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n + spikeyness = np.clip(spikeyness, 1e-9, 1) + + # generate n angle steps + lower = (tau / n) - irregularity + upper = (tau / n) + irregularity + angle_steps = rng.uniform(lower, upper, n) + + # normalize the steps so that point 0 and point n+1 are the same + k = angle_steps.sum() / (2 * np.pi) + angles = (angle_steps / k).cumsum() + rng.uniform(0, tau) + + # Convert high and low values to be wrt the standard normal range + # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html + low = 0 + high = 2 * radius + mean = radius + std = spikeyness + a = (low - mean) / std + b = (high - mean) / std + tnorm = truncnorm(a=a, b=b, loc=mean, scale=std) + + # now generate the points + radii = tnorm.rvs(n, random_state=rng) + x_pts = cx + radii * np.cos(angles) + y_pts = cy + radii * np.sin(angles) + + points = np.hstack([x_pts[:, None], y_pts[:, None]]) + + # Scale to 0-1 space + points = points - points.min(axis=0) + points = points / points.max(axis=0) + + # Randomly place within 0-1 space + points = points * (rng.rand() * .8 + .2) + min_pt = points.min(axis=0) + max_pt = points.max(axis=0) + + high = (1 - max_pt) + low = (0 - min_pt) + offset = (rng.rand(2) * (high - low)) + low + points = points + offset + return points + + def _order_vertices(verts): + """ + References: + https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise + """ + mlat = verts.T[0].sum() / len(verts) + mlng = verts.T[1].sum() / len(verts) + + tau = np.pi * 2 + angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) + + tau) % tau + sortx = angle.argsort() + verts = verts.take(sortx, axis=0) + return verts + + # Generate a random exterior for each requested mask + masks = [] + for _ in range(num_masks): + exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9)) + exterior = (exterior * [(width, height)]).astype(dtype) + masks.append([exterior.ravel()]) + + self = cls(masks, height, width) + return self + + def get_bboxes(self): + num_masks = len(self) + boxes = np.zeros((num_masks, 4), dtype=np.float32) + for idx, poly_per_obj in enumerate(self.masks): + # simply use a number that is big enough for comparison with + # coordinates + xy_min = np.array([self.width * 2, self.height * 2], + dtype=np.float32) + xy_max = np.zeros(2, dtype=np.float32) + for p in poly_per_obj: + xy = np.array(p).reshape(-1, 2).astype(np.float32) + xy_min = np.minimum(xy_min, np.min(xy, axis=0)) + xy_max = np.maximum(xy_max, np.max(xy, axis=0)) + boxes[idx, :2] = xy_min + boxes[idx, 2:] = xy_max + + return boxes + + +def polygon_to_bitmap(polygons, height, width): + """Convert masks from the form of polygons to bitmaps. + + Args: + polygons (list[ndarray]): masks in polygon representation + height (int): mask height + width (int): mask width + + Return: + ndarray: the converted masks in bitmap representation + """ + rles = maskUtils.frPyObjects(polygons, height, width) + rle = maskUtils.merge(rles) + bitmap_mask = maskUtils.decode(rle).astype(np.bool) + return bitmap_mask + + +def bitmap_to_polygon(bitmap): + """Convert masks from the form of bitmaps to polygons. + + Args: + bitmap (ndarray): masks in bitmap representation. + + Return: + list[ndarray]: the converted mask in polygon representation. + bool: whether the mask has holes. + """ + bitmap = np.ascontiguousarray(bitmap).astype(np.uint8) + # cv2.RETR_CCOMP: retrieves all of the contours and organizes them + # into a two-level hierarchy. At the top level, there are external + # boundaries of the components. At the second level, there are + # boundaries of the holes. If there is another contour inside a hole + # of a connected component, it is still put at the top level. + # cv2.CHAIN_APPROX_NONE: stores absolutely all the contour points. + outs = cv2.findContours(bitmap, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) + contours = outs[-2] + hierarchy = outs[-1] + if hierarchy is None: + return [], False + # hierarchy[i]: 4 elements, for the indexes of next, previous, + # parent, or nested contours. If there is no corresponding contour, + # it will be -1. + with_hole = (hierarchy.reshape(-1, 4)[:, 3] >= 0).any() + contours = [c.reshape(-1, 2) for c in contours] + return contours, with_hole diff --git a/downstream/mmdetection/mmdet/core/mask/utils.py b/downstream/mmdetection/mmdet/core/mask/utils.py new file mode 100644 index 0000000..90544b3 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/mask/utils.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import pycocotools.mask as mask_util +import torch + + +def split_combined_polys(polys, poly_lens, polys_per_mask): + """Split the combined 1-D polys into masks. + + A mask is represented as a list of polys, and a poly is represented as + a 1-D array. In dataset, all masks are concatenated into a single 1-D + tensor. Here we need to split the tensor into original representations. + + Args: + polys (list): a list (length = image num) of 1-D tensors + poly_lens (list): a list (length = image num) of poly length + polys_per_mask (list): a list (length = image num) of poly number + of each mask + + Returns: + list: a list (length = image num) of list (length = mask num) of \ + list (length = poly num) of numpy array. + """ + mask_polys_list = [] + for img_id in range(len(polys)): + polys_single = polys[img_id] + polys_lens_single = poly_lens[img_id].tolist() + polys_per_mask_single = polys_per_mask[img_id].tolist() + + split_polys = mmcv.slice_list(polys_single, polys_lens_single) + mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) + mask_polys_list.append(mask_polys) + return mask_polys_list + + +# TODO: move this function to more proper place +def encode_mask_results(mask_results): + """Encode bitmap mask to RLE code. + + Args: + mask_results (list | tuple[list]): bitmap mask results. + In mask scoring rcnn, mask_results is a tuple of (segm_results, + segm_cls_score). + + Returns: + list | tuple: RLE encoded mask. + """ + if isinstance(mask_results, tuple): # mask scoring + cls_segms, cls_mask_scores = mask_results + else: + cls_segms = mask_results + num_classes = len(cls_segms) + encoded_mask_results = [[] for _ in range(num_classes)] + for i in range(len(cls_segms)): + for cls_segm in cls_segms[i]: + encoded_mask_results[i].append( + mask_util.encode( + np.array( + cls_segm[:, :, np.newaxis], order='F', + dtype='uint8'))[0]) # encoded with RLE + if isinstance(mask_results, tuple): + return encoded_mask_results, cls_mask_scores + else: + return encoded_mask_results + + +def mask2bbox(masks): + """Obtain tight bounding boxes of binary masks. + + Args: + masks (Tensor): Binary mask of shape (n, h, w). + + Returns: + Tensor: Bboxe with shape (n, 4) of \ + positive region in binary mask. + """ + N = masks.shape[0] + bboxes = masks.new_zeros((N, 4), dtype=torch.float32) + x_any = torch.any(masks, dim=1) + y_any = torch.any(masks, dim=2) + for i in range(N): + x = torch.where(x_any[i, :])[0] + y = torch.where(y_any[i, :])[0] + if len(x) > 0 and len(y) > 0: + bboxes[i, :] = bboxes.new_tensor( + [x[0], y[0], x[-1] + 1, y[-1] + 1]) + + return bboxes diff --git a/downstream/mmdetection/mmdet/core/optimizers/__init__.py b/downstream/mmdetection/mmdet/core/optimizers/__init__.py new file mode 100644 index 0000000..e867d07 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/optimizers/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import OPTIMIZER_BUILDERS, build_optimizer +from .layer_decay_optimizer_constructor import \ + LearningRateDecayOptimizerConstructor + +__all__ = [ + 'LearningRateDecayOptimizerConstructor', 'OPTIMIZER_BUILDERS', + 'build_optimizer' +] diff --git a/downstream/mmdetection/mmdet/core/optimizers/builder.py b/downstream/mmdetection/mmdet/core/optimizers/builder.py new file mode 100644 index 0000000..406dd9b --- /dev/null +++ b/downstream/mmdetection/mmdet/core/optimizers/builder.py @@ -0,0 +1,33 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +from mmcv.runner.optimizer import OPTIMIZER_BUILDERS as MMCV_OPTIMIZER_BUILDERS +from mmcv.utils import Registry, build_from_cfg + +OPTIMIZER_BUILDERS = Registry( + 'optimizer builder', parent=MMCV_OPTIMIZER_BUILDERS) + + +def build_optimizer_constructor(cfg): + constructor_type = cfg.get('type') + if constructor_type in OPTIMIZER_BUILDERS: + return build_from_cfg(cfg, OPTIMIZER_BUILDERS) + elif constructor_type in MMCV_OPTIMIZER_BUILDERS: + return build_from_cfg(cfg, MMCV_OPTIMIZER_BUILDERS) + else: + raise KeyError(f'{constructor_type} is not registered ' + 'in the optimizer builder registry.') + + +def build_optimizer(model, cfg): + optimizer_cfg = copy.deepcopy(cfg) + constructor_type = optimizer_cfg.pop('constructor', + 'DefaultOptimizerConstructor') + paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) + optim_constructor = build_optimizer_constructor( + dict( + type=constructor_type, + optimizer_cfg=optimizer_cfg, + paramwise_cfg=paramwise_cfg)) + optimizer = optim_constructor(model) + return optimizer diff --git a/downstream/mmdetection/mmdet/core/optimizers/layer_decay_optimizer_constructor.py b/downstream/mmdetection/mmdet/core/optimizers/layer_decay_optimizer_constructor.py new file mode 100644 index 0000000..1bc3469 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/optimizers/layer_decay_optimizer_constructor.py @@ -0,0 +1,154 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json + +from mmcv.runner import DefaultOptimizerConstructor, get_dist_info + +from mmdet.utils import get_root_logger +from .builder import OPTIMIZER_BUILDERS + + +def get_layer_id_for_convnext(var_name, max_layer_id): + """Get the layer id to set the different learning rates in ``layer_wise`` + decay_type. + + Args: + var_name (str): The key of the model. + max_layer_id (int): Maximum layer id. + + Returns: + int: The id number corresponding to different learning rate in + ``LearningRateDecayOptimizerConstructor``. + """ + + if var_name in ('backbone.cls_token', 'backbone.mask_token', + 'backbone.pos_embed'): + return 0 + elif var_name.startswith('backbone.downsample_layers'): + stage_id = int(var_name.split('.')[2]) + if stage_id == 0: + layer_id = 0 + elif stage_id == 1: + layer_id = 2 + elif stage_id == 2: + layer_id = 3 + elif stage_id == 3: + layer_id = max_layer_id + return layer_id + elif var_name.startswith('backbone.stages'): + stage_id = int(var_name.split('.')[2]) + block_id = int(var_name.split('.')[3]) + if stage_id == 0: + layer_id = 1 + elif stage_id == 1: + layer_id = 2 + elif stage_id == 2: + layer_id = 3 + block_id // 3 + elif stage_id == 3: + layer_id = max_layer_id + return layer_id + else: + return max_layer_id + 1 + + +def get_stage_id_for_convnext(var_name, max_stage_id): + """Get the stage id to set the different learning rates in ``stage_wise`` + decay_type. + + Args: + var_name (str): The key of the model. + max_stage_id (int): Maximum stage id. + + Returns: + int: The id number corresponding to different learning rate in + ``LearningRateDecayOptimizerConstructor``. + """ + + if var_name in ('backbone.cls_token', 'backbone.mask_token', + 'backbone.pos_embed'): + return 0 + elif var_name.startswith('backbone.downsample_layers'): + return 0 + elif var_name.startswith('backbone.stages'): + stage_id = int(var_name.split('.')[2]) + return stage_id + 1 + else: + return max_stage_id - 1 + + +@OPTIMIZER_BUILDERS.register_module() +class LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor): + # Different learning rates are set for different layers of backbone. + # Note: Currently, this optimizer constructor is built for ConvNeXt. + + def add_params(self, params, module, **kwargs): + """Add all parameters of module to the params list. + + The parameters of the given module will be added to the list of param + groups, with specific rules defined by paramwise_cfg. + + Args: + params (list[dict]): A list of param groups, it will be modified + in place. + module (nn.Module): The module to be added. + """ + logger = get_root_logger() + + parameter_groups = {} + logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}') + num_layers = self.paramwise_cfg.get('num_layers') + 2 + decay_rate = self.paramwise_cfg.get('decay_rate') + decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise') + logger.info('Build LearningRateDecayOptimizerConstructor ' + f'{decay_type} {decay_rate} - {num_layers}') + weight_decay = self.base_wd + for name, param in module.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith('.bias') or name in ( + 'pos_embed', 'cls_token'): + group_name = 'no_decay' + this_weight_decay = 0. + else: + group_name = 'decay' + this_weight_decay = weight_decay + if 'layer_wise' in decay_type: + if 'ConvNeXt' in module.backbone.__class__.__name__: + layer_id = get_layer_id_for_convnext( + name, self.paramwise_cfg.get('num_layers')) + logger.info(f'set param {name} as id {layer_id}') + else: + raise NotImplementedError() + elif decay_type == 'stage_wise': + if 'ConvNeXt' in module.backbone.__class__.__name__: + layer_id = get_stage_id_for_convnext(name, num_layers) + logger.info(f'set param {name} as id {layer_id}') + else: + raise NotImplementedError() + group_name = f'layer_{layer_id}_{group_name}' + + if group_name not in parameter_groups: + scale = decay_rate**(num_layers - layer_id - 1) + + parameter_groups[group_name] = { + 'weight_decay': this_weight_decay, + 'params': [], + 'param_names': [], + 'lr_scale': scale, + 'group_name': group_name, + 'lr': scale * self.base_lr, + } + + parameter_groups[group_name]['params'].append(param) + parameter_groups[group_name]['param_names'].append(name) + rank, _ = get_dist_info() + if rank == 0: + to_display = {} + for key in parameter_groups: + to_display[key] = { + 'param_names': parameter_groups[key]['param_names'], + 'lr_scale': parameter_groups[key]['lr_scale'], + 'lr': parameter_groups[key]['lr'], + 'weight_decay': parameter_groups[key]['weight_decay'], + } + logger.info(f'Param groups = {json.dumps(to_display, indent=2)}') + params.extend(parameter_groups.values()) diff --git a/downstream/mmdetection/mmdet/core/post_processing/__init__.py b/downstream/mmdetection/mmdet/core/post_processing/__init__.py new file mode 100644 index 0000000..00376bd --- /dev/null +++ b/downstream/mmdetection/mmdet/core/post_processing/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .bbox_nms import fast_nms, multiclass_nms +from .matrix_nms import mask_matrix_nms +from .merge_augs import (merge_aug_bboxes, merge_aug_masks, + merge_aug_proposals, merge_aug_scores) + +__all__ = [ + 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', + 'merge_aug_scores', 'merge_aug_masks', 'mask_matrix_nms', 'fast_nms' +] diff --git a/downstream/mmdetection/mmdet/core/post_processing/bbox_nms.py b/downstream/mmdetection/mmdet/core/post_processing/bbox_nms.py new file mode 100644 index 0000000..4fcf57b --- /dev/null +++ b/downstream/mmdetection/mmdet/core/post_processing/bbox_nms.py @@ -0,0 +1,171 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.ops.nms import batched_nms + +from mmdet.core.bbox.iou_calculators import bbox_overlaps + + +def multiclass_nms(multi_bboxes, + multi_scores, + score_thr, + nms_cfg, + max_num=-1, + score_factors=None, + return_inds=False): + """NMS for multi-class bboxes. + + Args: + multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) + multi_scores (Tensor): shape (n, #class), where the last column + contains scores of the background class, but this will be ignored. + score_thr (float): bbox threshold, bboxes with scores lower than it + will not be considered. + nms_cfg (dict): a dict that contains the arguments of nms operations + max_num (int, optional): if there are more than max_num bboxes after + NMS, only top max_num will be kept. Default to -1. + score_factors (Tensor, optional): The factors multiplied to scores + before applying NMS. Default to None. + return_inds (bool, optional): Whether return the indices of kept + bboxes. Default to False. + + Returns: + tuple: (dets, labels, indices (optional)), tensors of shape (k, 5), + (k), and (k). Dets are boxes with scores. Labels are 0-based. + """ + num_classes = multi_scores.size(1) - 1 + # exclude background category + if multi_bboxes.shape[1] > 4: + bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4) + else: + bboxes = multi_bboxes[:, None].expand( + multi_scores.size(0), num_classes, 4) + + scores = multi_scores[:, :-1] + + labels = torch.arange(num_classes, dtype=torch.long, device=scores.device) + labels = labels.view(1, -1).expand_as(scores) + + bboxes = bboxes.reshape(-1, 4) + scores = scores.reshape(-1) + labels = labels.reshape(-1) + + if not torch.onnx.is_in_onnx_export(): + # NonZero not supported in TensorRT + # remove low scoring boxes + valid_mask = scores > score_thr + # multiply score_factor after threshold to preserve more bboxes, improve + # mAP by 1% for YOLOv3 + if score_factors is not None: + # expand the shape to match original shape of score + score_factors = score_factors.view(-1, 1).expand( + multi_scores.size(0), num_classes) + score_factors = score_factors.reshape(-1) + scores = scores * score_factors + + if not torch.onnx.is_in_onnx_export(): + # NonZero not supported in TensorRT + inds = valid_mask.nonzero(as_tuple=False).squeeze(1) + bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds] + else: + # TensorRT NMS plugin has invalid output filled with -1 + # add dummy data to make detection output correct. + bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0) + scores = torch.cat([scores, scores.new_zeros(1)], dim=0) + labels = torch.cat([labels, labels.new_zeros(1)], dim=0) + + if bboxes.numel() == 0: + if torch.onnx.is_in_onnx_export(): + raise RuntimeError('[ONNX Error] Can not record NMS ' + 'as it has not been executed this time') + dets = torch.cat([bboxes, scores[:, None]], -1) + if return_inds: + return dets, labels, inds + else: + return dets, labels + + dets, keep = batched_nms(bboxes, scores, labels, nms_cfg) + + if max_num > 0: + dets = dets[:max_num] + keep = keep[:max_num] + + if return_inds: + return dets, labels[keep], inds[keep] + else: + return dets, labels[keep] + + +def fast_nms(multi_bboxes, + multi_scores, + multi_coeffs, + score_thr, + iou_thr, + top_k, + max_num=-1): + """Fast NMS in `YOLACT `_. + + Fast NMS allows already-removed detections to suppress other detections so + that every instance can be decided to be kept or discarded in parallel, + which is not possible in traditional NMS. This relaxation allows us to + implement Fast NMS entirely in standard GPU-accelerated matrix operations. + + Args: + multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) + multi_scores (Tensor): shape (n, #class+1), where the last column + contains scores of the background class, but this will be ignored. + multi_coeffs (Tensor): shape (n, #class*coeffs_dim). + score_thr (float): bbox threshold, bboxes with scores lower than it + will not be considered. + iou_thr (float): IoU threshold to be considered as conflicted. + top_k (int): if there are more than top_k bboxes before NMS, + only top top_k will be kept. + max_num (int): if there are more than max_num bboxes after NMS, + only top max_num will be kept. If -1, keep all the bboxes. + Default: -1. + + Returns: + tuple: (dets, labels, coefficients), tensors of shape (k, 5), (k, 1), + and (k, coeffs_dim). Dets are boxes with scores. + Labels are 0-based. + """ + + scores = multi_scores[:, :-1].t() # [#class, n] + scores, idx = scores.sort(1, descending=True) + + idx = idx[:, :top_k].contiguous() + scores = scores[:, :top_k] # [#class, topk] + num_classes, num_dets = idx.size() + boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4) + coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1) + + iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk] + iou.triu_(diagonal=1) + iou_max, _ = iou.max(dim=1) + + # Now just filter out the ones higher than the threshold + keep = iou_max <= iou_thr + + # Second thresholding introduces 0.2 mAP gain at negligible time cost + keep *= scores > score_thr + + # Assign each kept detection to its corresponding class + classes = torch.arange( + num_classes, device=boxes.device)[:, None].expand_as(keep) + classes = classes[keep] + + boxes = boxes[keep] + coeffs = coeffs[keep] + scores = scores[keep] + + # Only keep the top max_num highest scores across all classes + scores, idx = scores.sort(0, descending=True) + if max_num > 0: + idx = idx[:max_num] + scores = scores[:max_num] + + classes = classes[idx] + boxes = boxes[idx] + coeffs = coeffs[idx] + + cls_dets = torch.cat([boxes, scores[:, None]], dim=1) + return cls_dets, classes, coeffs diff --git a/downstream/mmdetection/mmdet/core/post_processing/matrix_nms.py b/downstream/mmdetection/mmdet/core/post_processing/matrix_nms.py new file mode 100644 index 0000000..9dc8c4f --- /dev/null +++ b/downstream/mmdetection/mmdet/core/post_processing/matrix_nms.py @@ -0,0 +1,121 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def mask_matrix_nms(masks, + labels, + scores, + filter_thr=-1, + nms_pre=-1, + max_num=-1, + kernel='gaussian', + sigma=2.0, + mask_area=None): + """Matrix NMS for multi-class masks. + + Args: + masks (Tensor): Has shape (num_instances, h, w) + labels (Tensor): Labels of corresponding masks, + has shape (num_instances,). + scores (Tensor): Mask scores of corresponding masks, + has shape (num_instances). + filter_thr (float): Score threshold to filter the masks + after matrix nms. Default: -1, which means do not + use filter_thr. + nms_pre (int): The max number of instances to do the matrix nms. + Default: -1, which means do not use nms_pre. + max_num (int, optional): If there are more than max_num masks after + matrix, only top max_num will be kept. Default: -1, which means + do not use max_num. + kernel (str): 'linear' or 'gaussian'. + sigma (float): std in gaussian method. + mask_area (Tensor): The sum of seg_masks. + + Returns: + tuple(Tensor): Processed mask results. + + - scores (Tensor): Updated scores, has shape (n,). + - labels (Tensor): Remained labels, has shape (n,). + - masks (Tensor): Remained masks, has shape (n, w, h). + - keep_inds (Tensor): The indices number of + the remaining mask in the input mask, has shape (n,). + """ + assert len(labels) == len(masks) == len(scores) + if len(labels) == 0: + return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( + 0, *masks.shape[-2:]), labels.new_zeros(0) + if mask_area is None: + mask_area = masks.sum((1, 2)).float() + else: + assert len(masks) == len(mask_area) + + # sort and keep top nms_pre + scores, sort_inds = torch.sort(scores, descending=True) + + keep_inds = sort_inds + if nms_pre > 0 and len(sort_inds) > nms_pre: + sort_inds = sort_inds[:nms_pre] + keep_inds = keep_inds[:nms_pre] + scores = scores[:nms_pre] + masks = masks[sort_inds] + mask_area = mask_area[sort_inds] + labels = labels[sort_inds] + + num_masks = len(labels) + flatten_masks = masks.reshape(num_masks, -1).float() + # inter. + inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0)) + expanded_mask_area = mask_area.expand(num_masks, num_masks) + # Upper triangle iou matrix. + iou_matrix = (inter_matrix / + (expanded_mask_area + expanded_mask_area.transpose(1, 0) - + inter_matrix)).triu(diagonal=1) + # label_specific matrix. + expanded_labels = labels.expand(num_masks, num_masks) + # Upper triangle label matrix. + label_matrix = (expanded_labels == expanded_labels.transpose( + 1, 0)).triu(diagonal=1) + + # IoU compensation + compensate_iou, _ = (iou_matrix * label_matrix).max(0) + compensate_iou = compensate_iou.expand(num_masks, + num_masks).transpose(1, 0) + + # IoU decay + decay_iou = iou_matrix * label_matrix + + # Calculate the decay_coefficient + if kernel == 'gaussian': + decay_matrix = torch.exp(-1 * sigma * (decay_iou**2)) + compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2)) + decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0) + elif kernel == 'linear': + decay_matrix = (1 - decay_iou) / (1 - compensate_iou) + decay_coefficient, _ = decay_matrix.min(0) + else: + raise NotImplementedError( + f'{kernel} kernel is not supported in matrix nms!') + # update the score. + scores = scores * decay_coefficient + + if filter_thr > 0: + keep = scores >= filter_thr + keep_inds = keep_inds[keep] + if not keep.any(): + return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( + 0, *masks.shape[-2:]), labels.new_zeros(0) + masks = masks[keep] + scores = scores[keep] + labels = labels[keep] + + # sort and keep top max_num + scores, sort_inds = torch.sort(scores, descending=True) + keep_inds = keep_inds[sort_inds] + if max_num > 0 and len(sort_inds) > max_num: + sort_inds = sort_inds[:max_num] + keep_inds = keep_inds[:max_num] + scores = scores[:max_num] + masks = masks[sort_inds] + labels = labels[sort_inds] + + return scores, labels, masks, keep_inds diff --git a/downstream/mmdetection/mmdet/core/post_processing/merge_augs.py b/downstream/mmdetection/mmdet/core/post_processing/merge_augs.py new file mode 100644 index 0000000..2ac4603 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/post_processing/merge_augs.py @@ -0,0 +1,154 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings + +import numpy as np +import torch +from mmcv import ConfigDict +from mmcv.ops import nms + +from ..bbox import bbox_mapping_back + + +def merge_aug_proposals(aug_proposals, img_metas, cfg): + """Merge augmented proposals (multiscale, flip, etc.) + + Args: + aug_proposals (list[Tensor]): proposals from different testing + schemes, shape (n, 5). Note that they are not rescaled to the + original image size. + + img_metas (list[dict]): list of image info dict where each dict has: + 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmdet/datasets/pipelines/formatting.py:Collect`. + + cfg (dict): rpn test config. + + Returns: + Tensor: shape (n, 4), proposals corresponding to original image scale. + """ + + cfg = copy.deepcopy(cfg) + + # deprecate arguments warning + if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: + warnings.warn( + 'In rpn_proposal or test_cfg, ' + 'nms_thr has been moved to a dict named nms as ' + 'iou_threshold, max_num has been renamed as max_per_img, ' + 'name of original arguments and the way to specify ' + 'iou_threshold of NMS will be deprecated.') + if 'nms' not in cfg: + cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) + if 'max_num' in cfg: + if 'max_per_img' in cfg: + assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \ + f'max_per_img at the same time, but get {cfg.max_num} ' \ + f'and {cfg.max_per_img} respectively' \ + f'Please delete max_num which will be deprecated.' + else: + cfg.max_per_img = cfg.max_num + if 'nms_thr' in cfg: + assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ + f'iou_threshold in nms and ' \ + f'nms_thr at the same time, but get ' \ + f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ + f' respectively. Please delete the nms_thr ' \ + f'which will be deprecated.' + + recovered_proposals = [] + for proposals, img_info in zip(aug_proposals, img_metas): + img_shape = img_info['img_shape'] + scale_factor = img_info['scale_factor'] + flip = img_info['flip'] + flip_direction = img_info['flip_direction'] + _proposals = proposals.clone() + _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape, + scale_factor, flip, + flip_direction) + recovered_proposals.append(_proposals) + aug_proposals = torch.cat(recovered_proposals, dim=0) + merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(), + aug_proposals[:, -1].contiguous(), + cfg.nms.iou_threshold) + scores = merged_proposals[:, 4] + _, order = scores.sort(0, descending=True) + num = min(cfg.max_per_img, merged_proposals.shape[0]) + order = order[:num] + merged_proposals = merged_proposals[order, :] + return merged_proposals + + +def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg): + """Merge augmented detection bboxes and scores. + + Args: + aug_bboxes (list[Tensor]): shape (n, 4*#class) + aug_scores (list[Tensor] or None): shape (n, #class) + img_shapes (list[Tensor]): shape (3, ). + rcnn_test_cfg (dict): rcnn test config. + + Returns: + tuple: (bboxes, scores) + """ + recovered_bboxes = [] + for bboxes, img_info in zip(aug_bboxes, img_metas): + img_shape = img_info[0]['img_shape'] + scale_factor = img_info[0]['scale_factor'] + flip = img_info[0]['flip'] + flip_direction = img_info[0]['flip_direction'] + bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, + flip_direction) + recovered_bboxes.append(bboxes) + bboxes = torch.stack(recovered_bboxes).mean(dim=0) + if aug_scores is None: + return bboxes + else: + scores = torch.stack(aug_scores).mean(dim=0) + return bboxes, scores + + +def merge_aug_scores(aug_scores): + """Merge augmented bbox scores.""" + if isinstance(aug_scores[0], torch.Tensor): + return torch.mean(torch.stack(aug_scores), dim=0) + else: + return np.mean(aug_scores, axis=0) + + +def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None): + """Merge augmented mask prediction. + + Args: + aug_masks (list[ndarray]): shape (n, #class, h, w) + img_shapes (list[ndarray]): shape (3, ). + rcnn_test_cfg (dict): rcnn test config. + + Returns: + tuple: (bboxes, scores) + """ + recovered_masks = [] + for mask, img_info in zip(aug_masks, img_metas): + flip = img_info[0]['flip'] + if flip: + flip_direction = img_info[0]['flip_direction'] + if flip_direction == 'horizontal': + mask = mask[:, :, :, ::-1] + elif flip_direction == 'vertical': + mask = mask[:, :, ::-1, :] + elif flip_direction == 'diagonal': + mask = mask[:, :, :, ::-1] + mask = mask[:, :, ::-1, :] + else: + raise ValueError( + f"Invalid flipping direction '{flip_direction}'") + recovered_masks.append(mask) + + if weights is None: + merged_masks = np.mean(recovered_masks, axis=0) + else: + merged_masks = np.average( + np.array(recovered_masks), axis=0, weights=np.array(weights)) + return merged_masks diff --git a/downstream/mmdetection/mmdet/core/utils/__init__.py b/downstream/mmdetection/mmdet/core/utils/__init__.py new file mode 100644 index 0000000..3f0d070 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/utils/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, + reduce_mean, sync_random_seed) +from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor, + generate_coordinate, mask2ndarray, multi_apply, + select_single_mlvl, unmap) + +__all__ = [ + 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', + 'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict', + 'center_of_mass', 'generate_coordinate', 'select_single_mlvl', + 'filter_scores_and_topk', 'sync_random_seed' +] diff --git a/downstream/mmdetection/mmdet/core/utils/dist_utils.py b/downstream/mmdetection/mmdet/core/utils/dist_utils.py new file mode 100644 index 0000000..8760774 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/utils/dist_utils.py @@ -0,0 +1,193 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools +import pickle +import warnings +from collections import OrderedDict + +import numpy as np +import torch +import torch.distributed as dist +from mmcv.runner import OptimizerHook, get_dist_info +from torch._utils import (_flatten_dense_tensors, _take_tensors, + _unflatten_dense_tensors) + + +def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): + if bucket_size_mb > 0: + bucket_size_bytes = bucket_size_mb * 1024 * 1024 + buckets = _take_tensors(tensors, bucket_size_bytes) + else: + buckets = OrderedDict() + for tensor in tensors: + tp = tensor.type() + if tp not in buckets: + buckets[tp] = [] + buckets[tp].append(tensor) + buckets = buckets.values() + + for bucket in buckets: + flat_tensors = _flatten_dense_tensors(bucket) + dist.all_reduce(flat_tensors) + flat_tensors.div_(world_size) + for tensor, synced in zip( + bucket, _unflatten_dense_tensors(flat_tensors, bucket)): + tensor.copy_(synced) + + +def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): + """Allreduce gradients. + + Args: + params (list[torch.Parameters]): List of parameters of a model + coalesce (bool, optional): Whether allreduce parameters as a whole. + Defaults to True. + bucket_size_mb (int, optional): Size of bucket, the unit is MB. + Defaults to -1. + """ + grads = [ + param.grad.data for param in params + if param.requires_grad and param.grad is not None + ] + world_size = dist.get_world_size() + if coalesce: + _allreduce_coalesced(grads, world_size, bucket_size_mb) + else: + for tensor in grads: + dist.all_reduce(tensor.div_(world_size)) + + +class DistOptimizerHook(OptimizerHook): + """Deprecated optimizer hook for distributed training.""" + + def __init__(self, *args, **kwargs): + warnings.warn('"DistOptimizerHook" is deprecated, please switch to' + '"mmcv.runner.OptimizerHook".') + super().__init__(*args, **kwargs) + + +def reduce_mean(tensor): + """"Obtain the mean of tensor on different GPUs.""" + if not (dist.is_available() and dist.is_initialized()): + return tensor + tensor = tensor.clone() + dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) + return tensor + + +def obj2tensor(pyobj, device='cuda'): + """Serialize picklable python object to tensor.""" + storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj)) + return torch.ByteTensor(storage).to(device=device) + + +def tensor2obj(tensor): + """Deserialize tensor to picklable python object.""" + return pickle.loads(tensor.cpu().numpy().tobytes()) + + +@functools.lru_cache() +def _get_global_gloo_group(): + """Return a process group based on gloo backend, containing all the ranks + The result is cached.""" + if dist.get_backend() == 'nccl': + return dist.new_group(backend='gloo') + else: + return dist.group.WORLD + + +def all_reduce_dict(py_dict, op='sum', group=None, to_float=True): + """Apply all reduce function for python dict object. + + The code is modified from https://github.com/Megvii- + BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py. + + NOTE: make sure that py_dict in different ranks has the same keys and + the values should be in the same shape. Currently only supports + nccl backend. + + Args: + py_dict (dict): Dict to be applied all reduce op. + op (str): Operator, could be 'sum' or 'mean'. Default: 'sum' + group (:obj:`torch.distributed.group`, optional): Distributed group, + Default: None. + to_float (bool): Whether to convert all values of dict to float. + Default: True. + + Returns: + OrderedDict: reduced python dict object. + """ + warnings.warn( + 'group` is deprecated. Currently only supports NCCL backend.') + _, world_size = get_dist_info() + if world_size == 1: + return py_dict + + # all reduce logic across different devices. + py_key = list(py_dict.keys()) + if not isinstance(py_dict, OrderedDict): + py_key_tensor = obj2tensor(py_key) + dist.broadcast(py_key_tensor, src=0) + py_key = tensor2obj(py_key_tensor) + + tensor_shapes = [py_dict[k].shape for k in py_key] + tensor_numels = [py_dict[k].numel() for k in py_key] + + if to_float: + warnings.warn('Note: the "to_float" is True, you need to ' + 'ensure that the behavior is reasonable.') + flatten_tensor = torch.cat( + [py_dict[k].flatten().float() for k in py_key]) + else: + flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key]) + + dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM) + if op == 'mean': + flatten_tensor /= world_size + + split_tensors = [ + x.reshape(shape) for x, shape in zip( + torch.split(flatten_tensor, tensor_numels), tensor_shapes) + ] + out_dict = {k: v for k, v in zip(py_key, split_tensors)} + if isinstance(py_dict, OrderedDict): + out_dict = OrderedDict(out_dict) + return out_dict + + +def sync_random_seed(seed=None, device='cuda'): + """Make sure different ranks share the same seed. + + All workers must call this function, otherwise it will deadlock. + This method is generally used in `DistributedSampler`, + because the seed should be identical across all processes + in the distributed group. + + In distributed sampling, different ranks should sample non-overlapped + data in the dataset. Therefore, this function is used to make sure that + each rank shuffles the data indices in the same order based + on the same seed. Then different ranks could use different indices + to select non-overlapped data from the same data list. + + Args: + seed (int, Optional): The seed. Default to None. + device (str): The device where the seed will be put on. + Default to 'cuda'. + + Returns: + int: Seed to be used. + """ + if seed is None: + seed = np.random.randint(2**31) + assert isinstance(seed, int) + + rank, world_size = get_dist_info() + + if world_size == 1: + return seed + + if rank == 0: + random_num = torch.tensor(seed, dtype=torch.int32, device=device) + else: + random_num = torch.tensor(0, dtype=torch.int32, device=device) + dist.broadcast(random_num, src=0) + return random_num.item() diff --git a/downstream/mmdetection/mmdet/core/utils/misc.py b/downstream/mmdetection/mmdet/core/utils/misc.py new file mode 100644 index 0000000..14cb745 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/utils/misc.py @@ -0,0 +1,208 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial + +import numpy as np +import torch +from six.moves import map, zip + +from ..mask.structures import BitmapMasks, PolygonMasks + + +def multi_apply(func, *args, **kwargs): + """Apply function to a list of arguments. + + Note: + This function applies the ``func`` to multiple inputs and + map the multiple outputs of the ``func`` into different + list. Each list contains the same type of outputs corresponding + to different inputs. + + Args: + func (Function): A function that will be applied to a list of + arguments + + Returns: + tuple(list): A tuple containing multiple list, each list contains \ + a kind of returned results by the function + """ + pfunc = partial(func, **kwargs) if kwargs else func + map_results = map(pfunc, *args) + return tuple(map(list, zip(*map_results))) + + +def unmap(data, count, inds, fill=0): + """Unmap a subset of item (data) back to the original set of items (of size + count)""" + if data.dim() == 1: + ret = data.new_full((count, ), fill) + ret[inds.type(torch.bool)] = data + else: + new_size = (count, ) + data.size()[1:] + ret = data.new_full(new_size, fill) + ret[inds.type(torch.bool), :] = data + return ret + + +def mask2ndarray(mask): + """Convert Mask to ndarray.. + + Args: + mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or + torch.Tensor or np.ndarray): The mask to be converted. + + Returns: + np.ndarray: Ndarray mask of shape (n, h, w) that has been converted + """ + if isinstance(mask, (BitmapMasks, PolygonMasks)): + mask = mask.to_ndarray() + elif isinstance(mask, torch.Tensor): + mask = mask.detach().cpu().numpy() + elif not isinstance(mask, np.ndarray): + raise TypeError(f'Unsupported {type(mask)} data type') + return mask + + +def flip_tensor(src_tensor, flip_direction): + """flip tensor base on flip_direction. + + Args: + src_tensor (Tensor): input feature map, shape (B, C, H, W). + flip_direction (str): The flipping direction. Options are + 'horizontal', 'vertical', 'diagonal'. + + Returns: + out_tensor (Tensor): Flipped tensor. + """ + assert src_tensor.ndim == 4 + valid_directions = ['horizontal', 'vertical', 'diagonal'] + assert flip_direction in valid_directions + if flip_direction == 'horizontal': + out_tensor = torch.flip(src_tensor, [3]) + elif flip_direction == 'vertical': + out_tensor = torch.flip(src_tensor, [2]) + else: + out_tensor = torch.flip(src_tensor, [2, 3]) + return out_tensor + + +def select_single_mlvl(mlvl_tensors, batch_id, detach=True): + """Extract a multi-scale single image tensor from a multi-scale batch + tensor based on batch index. + + Note: The default value of detach is True, because the proposal gradient + needs to be detached during the training of the two-stage model. E.g + Cascade Mask R-CNN. + + Args: + mlvl_tensors (list[Tensor]): Batch tensor for all scale levels, + each is a 4D-tensor. + batch_id (int): Batch index. + detach (bool): Whether detach gradient. Default True. + + Returns: + list[Tensor]: Multi-scale single image tensor. + """ + assert isinstance(mlvl_tensors, (list, tuple)) + num_levels = len(mlvl_tensors) + + if detach: + mlvl_tensor_list = [ + mlvl_tensors[i][batch_id].detach() for i in range(num_levels) + ] + else: + mlvl_tensor_list = [ + mlvl_tensors[i][batch_id] for i in range(num_levels) + ] + return mlvl_tensor_list + + +def filter_scores_and_topk(scores, score_thr, topk, results=None): + """Filter results using score threshold and topk candidates. + + Args: + scores (Tensor): The scores, shape (num_bboxes, K). + score_thr (float): The score filter threshold. + topk (int): The number of topk candidates. + results (dict or list or Tensor, Optional): The results to + which the filtering rule is to be applied. The shape + of each item is (num_bboxes, N). + + Returns: + tuple: Filtered results + + - scores (Tensor): The scores after being filtered, \ + shape (num_bboxes_filtered, ). + - labels (Tensor): The class labels, shape \ + (num_bboxes_filtered, ). + - anchor_idxs (Tensor): The anchor indexes, shape \ + (num_bboxes_filtered, ). + - filtered_results (dict or list or Tensor, Optional): \ + The filtered results. The shape of each item is \ + (num_bboxes_filtered, N). + """ + valid_mask = scores > score_thr + scores = scores[valid_mask] + valid_idxs = torch.nonzero(valid_mask) + + num_topk = min(topk, valid_idxs.size(0)) + # torch.sort is actually faster than .topk (at least on GPUs) + scores, idxs = scores.sort(descending=True) + scores = scores[:num_topk] + topk_idxs = valid_idxs[idxs[:num_topk]] + keep_idxs, labels = topk_idxs.unbind(dim=1) + + filtered_results = None + if results is not None: + if isinstance(results, dict): + filtered_results = {k: v[keep_idxs] for k, v in results.items()} + elif isinstance(results, list): + filtered_results = [result[keep_idxs] for result in results] + elif isinstance(results, torch.Tensor): + filtered_results = results[keep_idxs] + else: + raise NotImplementedError(f'Only supports dict or list or Tensor, ' + f'but get {type(results)}.') + return scores, labels, keep_idxs, filtered_results + + +def center_of_mass(mask, esp=1e-6): + """Calculate the centroid coordinates of the mask. + + Args: + mask (Tensor): The mask to be calculated, shape (h, w). + esp (float): Avoid dividing by zero. Default: 1e-6. + + Returns: + tuple[Tensor]: the coordinates of the center point of the mask. + + - center_h (Tensor): the center point of the height. + - center_w (Tensor): the center point of the width. + """ + h, w = mask.shape + grid_h = torch.arange(h, device=mask.device)[:, None] + grid_w = torch.arange(w, device=mask.device) + normalizer = mask.sum().float().clamp(min=esp) + center_h = (mask * grid_h).sum() / normalizer + center_w = (mask * grid_w).sum() / normalizer + return center_h, center_w + + +def generate_coordinate(featmap_sizes, device='cuda'): + """Generate the coordinate. + + Args: + featmap_sizes (tuple): The feature to be calculated, + of shape (N, C, W, H). + device (str): The device where the feature will be put on. + Returns: + coord_feat (Tensor): The coordinate feature, of shape (N, 2, W, H). + """ + + x_range = torch.linspace(-1, 1, featmap_sizes[-1], device=device) + y_range = torch.linspace(-1, 1, featmap_sizes[-2], device=device) + y, x = torch.meshgrid(y_range, x_range) + y = y.expand([featmap_sizes[0], 1, -1, -1]) + x = x.expand([featmap_sizes[0], 1, -1, -1]) + coord_feat = torch.cat([x, y], 1) + + return coord_feat diff --git a/downstream/mmdetection/mmdet/core/visualization/__init__.py b/downstream/mmdetection/mmdet/core/visualization/__init__.py new file mode 100644 index 0000000..2eb17c4 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/visualization/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .image import (color_val_matplotlib, imshow_det_bboxes, + imshow_gt_det_bboxes) +from .palette import get_palette, palette_val + +__all__ = [ + 'imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib', + 'palette_val', 'get_palette' +] diff --git a/downstream/mmdetection/mmdet/core/visualization/image.py b/downstream/mmdetection/mmdet/core/visualization/image.py new file mode 100644 index 0000000..43bebf9 --- /dev/null +++ b/downstream/mmdetection/mmdet/core/visualization/image.py @@ -0,0 +1,559 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import cv2 +import matplotlib.pyplot as plt +import mmcv +import numpy as np +import pycocotools.mask as mask_util +from matplotlib.collections import PatchCollection +from matplotlib.patches import Polygon + +from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET +from ..mask.structures import bitmap_to_polygon +from ..utils import mask2ndarray +from .palette import get_palette, palette_val + +__all__ = [ + 'color_val_matplotlib', 'draw_masks', 'draw_bboxes', 'draw_labels', + 'imshow_det_bboxes', 'imshow_gt_det_bboxes' +] + +EPS = 1e-2 + + +def color_val_matplotlib(color): + """Convert various input in BGR order to normalized RGB matplotlib color + tuples. + + Args: + color (:obj`Color` | str | tuple | int | ndarray): Color inputs. + + Returns: + tuple[float]: A tuple of 3 normalized floats indicating RGB channels. + """ + color = mmcv.color_val(color) + color = [color / 255 for color in color[::-1]] + return tuple(color) + + +def _get_adaptive_scales(areas, min_area=800, max_area=30000): + """Get adaptive scales according to areas. + + The scale range is [0.5, 1.0]. When the area is less than + ``'min_area'``, the scale is 0.5 while the area is larger than + ``'max_area'``, the scale is 1.0. + + Args: + areas (ndarray): The areas of bboxes or masks with the + shape of (n, ). + min_area (int): Lower bound areas for adaptive scales. + Default: 800. + max_area (int): Upper bound areas for adaptive scales. + Default: 30000. + + Returns: + ndarray: The adaotive scales with the shape of (n, ). + """ + scales = 0.5 + (areas - min_area) / (max_area - min_area) + scales = np.clip(scales, 0.5, 1.0) + return scales + + +def _get_bias_color(base, max_dist=30): + """Get different colors for each masks. + + Get different colors for each masks by adding a bias + color to the base category color. + Args: + base (ndarray): The base category color with the shape + of (3, ). + max_dist (int): The max distance of bias. Default: 30. + + Returns: + ndarray: The new color for a mask with the shape of (3, ). + """ + new_color = base + np.random.randint( + low=-max_dist, high=max_dist + 1, size=3) + return np.clip(new_color, 0, 255, new_color) + + +def draw_bboxes(ax, bboxes, color='g', alpha=0.8, thickness=2): + """Draw bounding boxes on the axes. + + Args: + ax (matplotlib.Axes): The input axes. + bboxes (ndarray): The input bounding boxes with the shape + of (n, 4). + color (list[tuple] | matplotlib.color): the colors for each + bounding boxes. + alpha (float): Transparency of bounding boxes. Default: 0.8. + thickness (int): Thickness of lines. Default: 2. + + Returns: + matplotlib.Axes: The result axes. + """ + polygons = [] + for i, bbox in enumerate(bboxes): + bbox_int = bbox.astype(np.int32) + poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]], + [bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]] + np_poly = np.array(poly).reshape((4, 2)) + polygons.append(Polygon(np_poly)) + p = PatchCollection( + polygons, + facecolor='none', + edgecolors=color, + linewidths=thickness, + alpha=alpha) + ax.add_collection(p) + + return ax + + +def draw_labels(ax, + labels, + positions, + scores=None, + class_names=None, + color='w', + font_size=8, + scales=None, + horizontal_alignment='left'): + """Draw labels on the axes. + + Args: + ax (matplotlib.Axes): The input axes. + labels (ndarray): The labels with the shape of (n, ). + positions (ndarray): The positions to draw each labels. + scores (ndarray): The scores for each labels. + class_names (list[str]): The class names. + color (list[tuple] | matplotlib.color): The colors for labels. + font_size (int): Font size of texts. Default: 8. + scales (list[float]): Scales of texts. Default: None. + horizontal_alignment (str): The horizontal alignment method of + texts. Default: 'left'. + + Returns: + matplotlib.Axes: The result axes. + """ + for i, (pos, label) in enumerate(zip(positions, labels)): + label_text = class_names[ + label] if class_names is not None else f'class {label}' + if scores is not None: + label_text += f'|{scores[i]:.02f}' + text_color = color[i] if isinstance(color, list) else color + + font_size_mask = font_size if scales is None else font_size * scales[i] + ax.text( + pos[0], + pos[1], + f'{label_text}', + bbox={ + 'facecolor': 'black', + 'alpha': 0.8, + 'pad': 0.7, + 'edgecolor': 'none' + }, + color=text_color, + fontsize=font_size_mask, + verticalalignment='top', + horizontalalignment=horizontal_alignment) + + return ax + + +def draw_masks(ax, img, masks, color=None, with_edge=True, alpha=0.8): + """Draw masks on the image and their edges on the axes. + + Args: + ax (matplotlib.Axes): The input axes. + img (ndarray): The image with the shape of (3, h, w). + masks (ndarray): The masks with the shape of (n, h, w). + color (ndarray): The colors for each masks with the shape + of (n, 3). + with_edge (bool): Whether to draw edges. Default: True. + alpha (float): Transparency of bounding boxes. Default: 0.8. + + Returns: + matplotlib.Axes: The result axes. + ndarray: The result image. + """ + taken_colors = set([0, 0, 0]) + if color is None: + random_colors = np.random.randint(0, 255, (masks.size(0), 3)) + color = [tuple(c) for c in random_colors] + color = np.array(color, dtype=np.uint8) + polygons = [] + for i, mask in enumerate(masks): + if with_edge: + contours, _ = bitmap_to_polygon(mask) + polygons += [Polygon(c) for c in contours] + + color_mask = color[i] + while tuple(color_mask) in taken_colors: + color_mask = _get_bias_color(color_mask) + taken_colors.add(tuple(color_mask)) + + mask = mask.astype(bool) + img[mask] = img[mask] * (1 - alpha) + color_mask * alpha + + p = PatchCollection( + polygons, facecolor='none', edgecolors='w', linewidths=1, alpha=0.8) + ax.add_collection(p) + + return ax, img + + +def imshow_det_bboxes(img, + bboxes=None, + labels=None, + segms=None, + class_names=None, + score_thr=0, + bbox_color='green', + text_color='green', + mask_color=None, + thickness=2, + font_size=8, + win_name='', + show=True, + wait_time=0, + out_file=None): + """Draw bboxes and class labels (with scores) on an image. + + Args: + img (str | ndarray): The image to be displayed. + bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or + (n, 5). + labels (ndarray): Labels of bboxes. + segms (ndarray | None): Masks, shaped (n,h,w) or None. + class_names (list[str]): Names of each classes. + score_thr (float): Minimum score of bboxes to be shown. Default: 0. + bbox_color (list[tuple] | tuple | str | None): Colors of bbox lines. + If a single color is given, it will be applied to all classes. + The tuple of color should be in RGB order. Default: 'green'. + text_color (list[tuple] | tuple | str | None): Colors of texts. + If a single color is given, it will be applied to all classes. + The tuple of color should be in RGB order. Default: 'green'. + mask_color (list[tuple] | tuple | str | None, optional): Colors of + masks. If a single color is given, it will be applied to all + classes. The tuple of color should be in RGB order. + Default: None. + thickness (int): Thickness of lines. Default: 2. + font_size (int): Font size of texts. Default: 13. + show (bool): Whether to show the image. Default: True. + win_name (str): The window name. Default: ''. + wait_time (float): Value of waitKey param. Default: 0. + out_file (str, optional): The filename to write the image. + Default: None. + + Returns: + ndarray: The image with bboxes drawn on it. + """ + assert bboxes is None or bboxes.ndim == 2, \ + f' bboxes ndim should be 2, but its ndim is {bboxes.ndim}.' + assert labels.ndim == 1, \ + f' labels ndim should be 1, but its ndim is {labels.ndim}.' + assert bboxes is None or bboxes.shape[1] == 4 or bboxes.shape[1] == 5, \ + f' bboxes.shape[1] should be 4 or 5, but its {bboxes.shape[1]}.' + assert bboxes is None or bboxes.shape[0] <= labels.shape[0], \ + 'labels.shape[0] should not be less than bboxes.shape[0].' + assert segms is None or segms.shape[0] == labels.shape[0], \ + 'segms.shape[0] and labels.shape[0] should have the same length.' + assert segms is not None or bboxes is not None, \ + 'segms and bboxes should not be None at the same time.' + + img = mmcv.imread(img).astype(np.uint8) + + if score_thr > 0: + assert bboxes is not None and bboxes.shape[1] == 5 + scores = bboxes[:, -1] + inds = scores > score_thr + bboxes = bboxes[inds, :] + labels = labels[inds] + if segms is not None: + segms = segms[inds, ...] + + img = mmcv.bgr2rgb(img) + width, height = img.shape[1], img.shape[0] + img = np.ascontiguousarray(img) + + fig = plt.figure(win_name, frameon=False) + plt.title(win_name) + canvas = fig.canvas + dpi = fig.get_dpi() + # add a small EPS to avoid precision lost due to matplotlib's truncation + # (https://github.com/matplotlib/matplotlib/issues/15363) + fig.set_size_inches((width + EPS) / dpi, (height + EPS) / dpi) + + # remove white edges by set subplot margin + plt.subplots_adjust(left=0, right=1, bottom=0, top=1) + ax = plt.gca() + ax.axis('off') + + max_label = int(max(labels) if len(labels) > 0 else 0) + text_palette = palette_val(get_palette(text_color, max_label + 1)) + text_colors = [text_palette[label] for label in labels] + + num_bboxes = 0 + if bboxes is not None: + num_bboxes = bboxes.shape[0] + bbox_palette = palette_val(get_palette(bbox_color, max_label + 1)) + colors = [bbox_palette[label] for label in labels[:num_bboxes]] + draw_bboxes(ax, bboxes, colors, alpha=0.8, thickness=thickness) + + horizontal_alignment = 'left' + positions = bboxes[:, :2].astype(np.int32) + thickness + areas = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0]) + scales = _get_adaptive_scales(areas) + scores = bboxes[:, 4] if bboxes.shape[1] == 5 else None + draw_labels( + ax, + labels[:num_bboxes], + positions, + scores=scores, + class_names=class_names, + color=text_colors, + font_size=font_size, + scales=scales, + horizontal_alignment=horizontal_alignment) + + if segms is not None: + mask_palette = get_palette(mask_color, max_label + 1) + colors = [mask_palette[label] for label in labels] + colors = np.array(colors, dtype=np.uint8) + draw_masks(ax, img, segms, colors, with_edge=True) + + if num_bboxes < segms.shape[0]: + segms = segms[num_bboxes:] + horizontal_alignment = 'center' + areas = [] + positions = [] + for mask in segms: + _, _, stats, centroids = cv2.connectedComponentsWithStats( + mask.astype(np.uint8), connectivity=8) + largest_id = np.argmax(stats[1:, -1]) + 1 + positions.append(centroids[largest_id]) + areas.append(stats[largest_id, -1]) + areas = np.stack(areas, axis=0) + scales = _get_adaptive_scales(areas) + draw_labels( + ax, + labels[num_bboxes:], + positions, + class_names=class_names, + color=text_colors, + font_size=font_size, + scales=scales, + horizontal_alignment=horizontal_alignment) + + plt.imshow(img) + + stream, _ = canvas.print_to_buffer() + buffer = np.frombuffer(stream, dtype='uint8') + img_rgba = buffer.reshape(height, width, 4) + rgb, alpha = np.split(img_rgba, [3], axis=2) + img = rgb.astype('uint8') + img = mmcv.rgb2bgr(img) + + if show: + # We do not use cv2 for display because in some cases, opencv will + # conflict with Qt, it will output a warning: Current thread + # is not the object's thread. You can refer to + # https://github.com/opencv/opencv-python/issues/46 for details + if wait_time == 0: + plt.show() + else: + plt.show(block=False) + plt.pause(wait_time) + if out_file is not None: + mmcv.imwrite(img, out_file) + + plt.close() + + return img + + +def imshow_gt_det_bboxes(img, + annotation, + result, + class_names=None, + score_thr=0, + gt_bbox_color=(61, 102, 255), + gt_text_color=(200, 200, 200), + gt_mask_color=(61, 102, 255), + det_bbox_color=(241, 101, 72), + det_text_color=(200, 200, 200), + det_mask_color=(241, 101, 72), + thickness=2, + font_size=13, + win_name='', + show=True, + wait_time=0, + out_file=None, + overlay_gt_pred=True): + """General visualization GT and result function. + + Args: + img (str | ndarray): The image to be displayed. + annotation (dict): Ground truth annotations where contain keys of + 'gt_bboxes' and 'gt_labels' or 'gt_masks'. + result (tuple[list] | list): The detection result, can be either + (bbox, segm) or just bbox. + class_names (list[str]): Names of each classes. + score_thr (float): Minimum score of bboxes to be shown. Default: 0. + gt_bbox_color (list[tuple] | tuple | str | None): Colors of bbox lines. + If a single color is given, it will be applied to all classes. + The tuple of color should be in RGB order. Default: (61, 102, 255). + gt_text_color (list[tuple] | tuple | str | None): Colors of texts. + If a single color is given, it will be applied to all classes. + The tuple of color should be in RGB order. Default: (200, 200, 200). + gt_mask_color (list[tuple] | tuple | str | None, optional): Colors of + masks. If a single color is given, it will be applied to all classes. + The tuple of color should be in RGB order. Default: (61, 102, 255). + det_bbox_color (list[tuple] | tuple | str | None):Colors of bbox lines. + If a single color is given, it will be applied to all classes. + The tuple of color should be in RGB order. Default: (241, 101, 72). + det_text_color (list[tuple] | tuple | str | None):Colors of texts. + If a single color is given, it will be applied to all classes. + The tuple of color should be in RGB order. Default: (200, 200, 200). + det_mask_color (list[tuple] | tuple | str | None, optional): Color of + masks. If a single color is given, it will be applied to all classes. + The tuple of color should be in RGB order. Default: (241, 101, 72). + thickness (int): Thickness of lines. Default: 2. + font_size (int): Font size of texts. Default: 13. + win_name (str): The window name. Default: ''. + show (bool): Whether to show the image. Default: True. + wait_time (float): Value of waitKey param. Default: 0. + out_file (str, optional): The filename to write the image. + Default: None. + overlay_gt_pred (bool): Whether to plot gts and predictions on the + same image. If False, predictions and gts will be plotted on two same + image which will be concatenated in vertical direction. The image + above is drawn with gt, and the image below is drawn with the + prediction result. Default: True. + + Returns: + ndarray: The image with bboxes or masks drawn on it. + """ + assert 'gt_bboxes' in annotation + assert 'gt_labels' in annotation + assert isinstance(result, (tuple, list, dict)), 'Expected ' \ + f'tuple or list or dict, but get {type(result)}' + + gt_bboxes = annotation['gt_bboxes'] + gt_labels = annotation['gt_labels'] + gt_masks = annotation.get('gt_masks', None) + if gt_masks is not None: + gt_masks = mask2ndarray(gt_masks) + + gt_seg = annotation.get('gt_semantic_seg', None) + if gt_seg is not None: + pad_value = 255 # the padding value of gt_seg + sem_labels = np.unique(gt_seg) + all_labels = np.concatenate((gt_labels, sem_labels), axis=0) + all_labels, counts = np.unique(all_labels, return_counts=True) + stuff_labels = all_labels[np.logical_and(counts < 2, + all_labels != pad_value)] + stuff_masks = gt_seg[None] == stuff_labels[:, None, None] + gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0) + gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)), + axis=0) + # If you need to show the bounding boxes, + # please comment the following line + # gt_bboxes = None + + img = mmcv.imread(img) + + img_with_gt = imshow_det_bboxes( + img, + gt_bboxes, + gt_labels, + gt_masks, + class_names=class_names, + bbox_color=gt_bbox_color, + text_color=gt_text_color, + mask_color=gt_mask_color, + thickness=thickness, + font_size=font_size, + win_name=win_name, + show=False) + + if not isinstance(result, dict): + if isinstance(result, tuple): + bbox_result, segm_result = result + if isinstance(segm_result, tuple): + segm_result = segm_result[0] # ms rcnn + else: + bbox_result, segm_result = result, None + + bboxes = np.vstack(bbox_result) + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_result) + ] + labels = np.concatenate(labels) + + segms = None + if segm_result is not None and len(labels) > 0: # non empty + segms = mmcv.concat_list(segm_result) + segms = mask_util.decode(segms) + segms = segms.transpose(2, 0, 1) + else: + assert class_names is not None, 'We need to know the number ' \ + 'of classes.' + VOID = len(class_names) + bboxes = None + pan_results = result['pan_results'] + # keep objects ahead + ids = np.unique(pan_results)[::-1] + legal_indices = ids != VOID + ids = ids[legal_indices] + labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64) + segms = (pan_results[None] == ids[:, None, None]) + + if overlay_gt_pred: + img = imshow_det_bboxes( + img_with_gt, + bboxes, + labels, + segms=segms, + class_names=class_names, + score_thr=score_thr, + bbox_color=det_bbox_color, + text_color=det_text_color, + mask_color=det_mask_color, + thickness=thickness, + font_size=font_size, + win_name=win_name, + show=show, + wait_time=wait_time, + out_file=out_file) + else: + img_with_det = imshow_det_bboxes( + img, + bboxes, + labels, + segms=segms, + class_names=class_names, + score_thr=score_thr, + bbox_color=det_bbox_color, + text_color=det_text_color, + mask_color=det_mask_color, + thickness=thickness, + font_size=font_size, + win_name=win_name, + show=False) + img = np.concatenate([img_with_gt, img_with_det], axis=0) + + plt.imshow(img) + if show: + if wait_time == 0: + plt.show() + else: + plt.show(block=False) + plt.pause(wait_time) + if out_file is not None: + mmcv.imwrite(img, out_file) + plt.close() + + return img diff --git a/downstream/mmdetection/mmdet/core/visualization/palette.py b/downstream/mmdetection/mmdet/core/visualization/palette.py new file mode 100644 index 0000000..11692cd --- /dev/null +++ b/downstream/mmdetection/mmdet/core/visualization/palette.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np + + +def palette_val(palette): + """Convert palette to matplotlib palette. + + Args: + palette List[tuple]: A list of color tuples. + + Returns: + List[tuple[float]]: A list of RGB matplotlib color tuples. + """ + new_palette = [] + for color in palette: + color = [c / 255 for c in color] + new_palette.append(tuple(color)) + return new_palette + + +def get_palette(palette, num_classes): + """Get palette from various inputs. + + Args: + palette (list[tuple] | str | tuple | :obj:`Color`): palette inputs. + num_classes (int): the number of classes. + + Returns: + list[tuple[int]]: A list of color tuples. + """ + assert isinstance(num_classes, int) + + if isinstance(palette, list): + dataset_palette = palette + elif isinstance(palette, tuple): + dataset_palette = [palette] * num_classes + elif palette == 'random' or palette is None: + state = np.random.get_state() + # random color + np.random.seed(42) + palette = np.random.randint(0, 256, size=(num_classes, 3)) + np.random.set_state(state) + dataset_palette = [tuple(c) for c in palette] + elif palette == 'coco': + from mmdet.datasets import CocoDataset, CocoPanopticDataset + dataset_palette = CocoDataset.PALETTE + if len(dataset_palette) < num_classes: + dataset_palette = CocoPanopticDataset.PALETTE + elif palette == 'citys': + from mmdet.datasets import CityscapesDataset + dataset_palette = CityscapesDataset.PALETTE + elif palette == 'voc': + from mmdet.datasets import VOCDataset + dataset_palette = VOCDataset.PALETTE + elif mmcv.is_str(palette): + dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes + else: + raise TypeError(f'Invalid type for palette: {type(palette)}') + + assert len(dataset_palette) >= num_classes, \ + 'The length of palette should not be less than `num_classes`.' + return dataset_palette diff --git a/downstream/mmdetection/mmdet/datasets/__init__.py b/downstream/mmdetection/mmdet/datasets/__init__.py new file mode 100644 index 0000000..f251d07 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset +from .cityscapes import CityscapesDataset +from .coco import CocoDataset +from .coco_panoptic import CocoPanopticDataset +from .custom import CustomDataset +from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, + MultiImageMixDataset, RepeatDataset) +from .deepfashion import DeepFashionDataset +from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset +from .openimages import OpenImagesChallengeDataset, OpenImagesDataset +from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler +from .utils import (NumClassCheckHook, get_loading_pipeline, + replace_ImageToTensor) +from .voc import VOCDataset +from .wider_face import WIDERFaceDataset +from .xml_style import XMLDataset + +__all__ = [ + 'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset', + 'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', + 'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler', + 'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', + 'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES', + 'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline', + 'NumClassCheckHook', 'CocoPanopticDataset', 'MultiImageMixDataset', + 'OpenImagesDataset', 'OpenImagesChallengeDataset' +] diff --git a/downstream/mmdetection/mmdet/datasets/api_wrappers/__init__.py b/downstream/mmdetection/mmdet/datasets/api_wrappers/__init__.py new file mode 100644 index 0000000..af85575 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/api_wrappers/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .coco_api import COCO, COCOeval +from .panoptic_evaluation import pq_compute_multi_core, pq_compute_single_core + +__all__ = [ + 'COCO', 'COCOeval', 'pq_compute_multi_core', 'pq_compute_single_core' +] diff --git a/downstream/mmdetection/mmdet/datasets/api_wrappers/coco_api.py b/downstream/mmdetection/mmdet/datasets/api_wrappers/coco_api.py new file mode 100644 index 0000000..eef6341 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/api_wrappers/coco_api.py @@ -0,0 +1,47 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This file add snake case alias for coco api + +import warnings + +import pycocotools +from pycocotools.coco import COCO as _COCO +from pycocotools.cocoeval import COCOeval as _COCOeval + + +class COCO(_COCO): + """This class is almost the same as official pycocotools package. + + It implements some snake case function aliases. So that the COCO class has + the same interface as LVIS class. + """ + + def __init__(self, annotation_file=None): + if getattr(pycocotools, '__version__', '0') >= '12.0.2': + warnings.warn( + 'mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', # noqa: E501 + UserWarning) + super().__init__(annotation_file=annotation_file) + self.img_ann_map = self.imgToAnns + self.cat_img_map = self.catToImgs + + def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None): + return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd) + + def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]): + return self.getCatIds(cat_names, sup_names, cat_ids) + + def get_img_ids(self, img_ids=[], cat_ids=[]): + return self.getImgIds(img_ids, cat_ids) + + def load_anns(self, ids): + return self.loadAnns(ids) + + def load_cats(self, ids): + return self.loadCats(ids) + + def load_imgs(self, ids): + return self.loadImgs(ids) + + +# just for the ease of import +COCOeval = _COCOeval diff --git a/downstream/mmdetection/mmdet/datasets/api_wrappers/panoptic_evaluation.py b/downstream/mmdetection/mmdet/datasets/api_wrappers/panoptic_evaluation.py new file mode 100644 index 0000000..55f57bf --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/api_wrappers/panoptic_evaluation.py @@ -0,0 +1,228 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +# Copyright (c) 2018, Alexander Kirillov +# This file supports `file_client` for `panopticapi`, +# the source code is copied from `panopticapi`, +# only the way to load the gt images is modified. +import multiprocessing +import os + +import mmcv +import numpy as np + +try: + from panopticapi.evaluation import OFFSET, VOID, PQStat + from panopticapi.utils import rgb2id +except ImportError: + PQStat = None + rgb2id = None + VOID = 0 + OFFSET = 256 * 256 * 256 + + +def pq_compute_single_core(proc_id, + annotation_set, + gt_folder, + pred_folder, + categories, + file_client=None, + print_log=False): + """The single core function to evaluate the metric of Panoptic + Segmentation. + + Same as the function with the same name in `panopticapi`. Only the function + to load the images is changed to use the file client. + + Args: + proc_id (int): The id of the mini process. + gt_folder (str): The path of the ground truth images. + pred_folder (str): The path of the prediction images. + categories (str): The categories of the dataset. + file_client (object): The file client of the dataset. If None, + the backend will be set to `disk`. + print_log (bool): Whether to print the log. Defaults to False. + """ + if PQStat is None: + raise RuntimeError( + 'panopticapi is not installed, please install it by: ' + 'pip install git+https://github.com/cocodataset/' + 'panopticapi.git.') + + if file_client is None: + file_client_args = dict(backend='disk') + file_client = mmcv.FileClient(**file_client_args) + + pq_stat = PQStat() + + idx = 0 + for gt_ann, pred_ann in annotation_set: + if print_log and idx % 100 == 0: + print('Core: {}, {} from {} images processed'.format( + proc_id, idx, len(annotation_set))) + idx += 1 + # The gt images can be on the local disk or `ceph`, so we use + # file_client here. + img_bytes = file_client.get( + os.path.join(gt_folder, gt_ann['file_name'])) + pan_gt = mmcv.imfrombytes(img_bytes, flag='color', channel_order='rgb') + pan_gt = rgb2id(pan_gt) + + # The predictions can only be on the local dist now. + pan_pred = mmcv.imread( + os.path.join(pred_folder, pred_ann['file_name']), + flag='color', + channel_order='rgb') + pan_pred = rgb2id(pan_pred) + + gt_segms = {el['id']: el for el in gt_ann['segments_info']} + pred_segms = {el['id']: el for el in pred_ann['segments_info']} + + # predicted segments area calculation + prediction sanity checks + pred_labels_set = set(el['id'] for el in pred_ann['segments_info']) + labels, labels_cnt = np.unique(pan_pred, return_counts=True) + for label, label_cnt in zip(labels, labels_cnt): + if label not in pred_segms: + if label == VOID: + continue + raise KeyError( + 'In the image with ID {} segment with ID {} is ' + 'presented in PNG and not presented in JSON.'.format( + gt_ann['image_id'], label)) + pred_segms[label]['area'] = label_cnt + pred_labels_set.remove(label) + if pred_segms[label]['category_id'] not in categories: + raise KeyError( + 'In the image with ID {} segment with ID {} has ' + 'unknown category_id {}.'.format( + gt_ann['image_id'], label, + pred_segms[label]['category_id'])) + if len(pred_labels_set) != 0: + raise KeyError( + 'In the image with ID {} the following segment IDs {} ' + 'are presented in JSON and not presented in PNG.'.format( + gt_ann['image_id'], list(pred_labels_set))) + + # confusion matrix calculation + pan_gt_pred = pan_gt.astype(np.uint64) * OFFSET + pan_pred.astype( + np.uint64) + gt_pred_map = {} + labels, labels_cnt = np.unique(pan_gt_pred, return_counts=True) + for label, intersection in zip(labels, labels_cnt): + gt_id = label // OFFSET + pred_id = label % OFFSET + gt_pred_map[(gt_id, pred_id)] = intersection + + # count all matched pairs + gt_matched = set() + pred_matched = set() + for label_tuple, intersection in gt_pred_map.items(): + gt_label, pred_label = label_tuple + if gt_label not in gt_segms: + continue + if pred_label not in pred_segms: + continue + if gt_segms[gt_label]['iscrowd'] == 1: + continue + if gt_segms[gt_label]['category_id'] != pred_segms[pred_label][ + 'category_id']: + continue + + union = pred_segms[pred_label]['area'] + gt_segms[gt_label][ + 'area'] - intersection - gt_pred_map.get((VOID, pred_label), 0) + iou = intersection / union + if iou > 0.5: + pq_stat[gt_segms[gt_label]['category_id']].tp += 1 + pq_stat[gt_segms[gt_label]['category_id']].iou += iou + gt_matched.add(gt_label) + pred_matched.add(pred_label) + + # count false positives + crowd_labels_dict = {} + for gt_label, gt_info in gt_segms.items(): + if gt_label in gt_matched: + continue + # crowd segments are ignored + if gt_info['iscrowd'] == 1: + crowd_labels_dict[gt_info['category_id']] = gt_label + continue + pq_stat[gt_info['category_id']].fn += 1 + + # count false positives + for pred_label, pred_info in pred_segms.items(): + if pred_label in pred_matched: + continue + # intersection of the segment with VOID + intersection = gt_pred_map.get((VOID, pred_label), 0) + # plus intersection with corresponding CROWD region if it exists + if pred_info['category_id'] in crowd_labels_dict: + intersection += gt_pred_map.get( + (crowd_labels_dict[pred_info['category_id']], pred_label), + 0) + # predicted segment is ignored if more than half of + # the segment correspond to VOID and CROWD regions + if intersection / pred_info['area'] > 0.5: + continue + pq_stat[pred_info['category_id']].fp += 1 + + if print_log: + print('Core: {}, all {} images processed'.format( + proc_id, len(annotation_set))) + return pq_stat + + +def pq_compute_multi_core(matched_annotations_list, + gt_folder, + pred_folder, + categories, + file_client=None, + nproc=32): + """Evaluate the metrics of Panoptic Segmentation with multithreading. + + Same as the function with the same name in `panopticapi`. + + Args: + matched_annotations_list (list): The matched annotation list. Each + element is a tuple of annotations of the same image with the + format (gt_anns, pred_anns). + gt_folder (str): The path of the ground truth images. + pred_folder (str): The path of the prediction images. + categories (str): The categories of the dataset. + file_client (object): The file client of the dataset. If None, + the backend will be set to `disk`. + nproc (int): Number of processes for panoptic quality computing. + Defaults to 32. When `nproc` exceeds the number of cpu cores, + the number of cpu cores is used. + """ + if PQStat is None: + raise RuntimeError( + 'panopticapi is not installed, please install it by: ' + 'pip install git+https://github.com/cocodataset/' + 'panopticapi.git.') + + if file_client is None: + file_client_args = dict(backend='disk') + file_client = mmcv.FileClient(**file_client_args) + + cpu_num = min(nproc, multiprocessing.cpu_count()) + + annotations_split = np.array_split(matched_annotations_list, cpu_num) + print('Number of cores: {}, images per core: {}'.format( + cpu_num, len(annotations_split[0]))) + workers = multiprocessing.Pool(processes=cpu_num) + processes = [] + for proc_id, annotation_set in enumerate(annotations_split): + p = workers.apply_async(pq_compute_single_core, + (proc_id, annotation_set, gt_folder, + pred_folder, categories, file_client)) + processes.append(p) + + # Close the process pool, otherwise it will lead to memory + # leaking problems. + workers.close() + workers.join() + + pq_stat = PQStat() + for p in processes: + pq_stat += p.get() + + return pq_stat diff --git a/downstream/mmdetection/mmdet/datasets/builder.py b/downstream/mmdetection/mmdet/datasets/builder.py new file mode 100644 index 0000000..1936296 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/builder.py @@ -0,0 +1,215 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import platform +import random +import warnings +from functools import partial + +import numpy as np +import torch +from mmcv.parallel import collate +from mmcv.runner import get_dist_info +from mmcv.utils import TORCH_VERSION, Registry, build_from_cfg, digit_version +from torch.utils.data import DataLoader + +from .samplers import (ClassAwareSampler, DistributedGroupSampler, + DistributedSampler, GroupSampler, InfiniteBatchSampler, + InfiniteGroupBatchSampler) + +if platform.system() != 'Windows': + # https://github.com/pytorch/pytorch/issues/973 + import resource + rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) + base_soft_limit = rlimit[0] + hard_limit = rlimit[1] + soft_limit = min(max(4096, base_soft_limit), hard_limit) + resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) + +DATASETS = Registry('dataset') +PIPELINES = Registry('pipeline') + + +def _concat_dataset(cfg, default_args=None): + from .dataset_wrappers import ConcatDataset + ann_files = cfg['ann_file'] + img_prefixes = cfg.get('img_prefix', None) + seg_prefixes = cfg.get('seg_prefix', None) + proposal_files = cfg.get('proposal_file', None) + separate_eval = cfg.get('separate_eval', True) + + datasets = [] + num_dset = len(ann_files) + for i in range(num_dset): + data_cfg = copy.deepcopy(cfg) + # pop 'separate_eval' since it is not a valid key for common datasets. + if 'separate_eval' in data_cfg: + data_cfg.pop('separate_eval') + data_cfg['ann_file'] = ann_files[i] + if isinstance(img_prefixes, (list, tuple)): + data_cfg['img_prefix'] = img_prefixes[i] + if isinstance(seg_prefixes, (list, tuple)): + data_cfg['seg_prefix'] = seg_prefixes[i] + if isinstance(proposal_files, (list, tuple)): + data_cfg['proposal_file'] = proposal_files[i] + datasets.append(build_dataset(data_cfg, default_args)) + + return ConcatDataset(datasets, separate_eval) + + +def build_dataset(cfg, default_args=None): + from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, + MultiImageMixDataset, RepeatDataset) + if isinstance(cfg, (list, tuple)): + dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) + elif cfg['type'] == 'ConcatDataset': + dataset = ConcatDataset( + [build_dataset(c, default_args) for c in cfg['datasets']], + cfg.get('separate_eval', True)) + elif cfg['type'] == 'RepeatDataset': + dataset = RepeatDataset( + build_dataset(cfg['dataset'], default_args), cfg['times']) + elif cfg['type'] == 'ClassBalancedDataset': + dataset = ClassBalancedDataset( + build_dataset(cfg['dataset'], default_args), cfg['oversample_thr']) + elif cfg['type'] == 'MultiImageMixDataset': + cp_cfg = copy.deepcopy(cfg) + cp_cfg['dataset'] = build_dataset(cp_cfg['dataset']) + cp_cfg.pop('type') + dataset = MultiImageMixDataset(**cp_cfg) + elif isinstance(cfg.get('ann_file'), (list, tuple)): + dataset = _concat_dataset(cfg, default_args) + else: + dataset = build_from_cfg(cfg, DATASETS, default_args) + + return dataset + + +def build_dataloader(dataset, + samples_per_gpu, + workers_per_gpu, + num_gpus=1, + dist=True, + shuffle=True, + seed=None, + runner_type='EpochBasedRunner', + persistent_workers=False, + class_aware_sampler=None, + **kwargs): + """Build PyTorch DataLoader. + + In distributed training, each GPU/process has a dataloader. + In non-distributed training, there is only one dataloader for all GPUs. + + Args: + dataset (Dataset): A PyTorch dataset. + samples_per_gpu (int): Number of training samples on each GPU, i.e., + batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data loading + for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed training. + dist (bool): Distributed training/test or not. Default: True. + shuffle (bool): Whether to shuffle the data at every epoch. + Default: True. + seed (int, Optional): Seed to be used. Default: None. + runner_type (str): Type of runner. Default: `EpochBasedRunner` + persistent_workers (bool): If True, the data loader will not shutdown + the worker processes after a dataset has been consumed once. + This allows to maintain the workers `Dataset` instances alive. + This argument is only valid when PyTorch>=1.7.0. Default: False. + class_aware_sampler (dict): Whether to use `ClassAwareSampler` + during training. Default: None. + kwargs: any keyword argument to be used to initialize DataLoader + + Returns: + DataLoader: A PyTorch dataloader. + """ + rank, world_size = get_dist_info() + + if dist: + # When model is :obj:`DistributedDataParallel`, + # `batch_size` of :obj:`dataloader` is the + # number of training samples on each GPU. + batch_size = samples_per_gpu + num_workers = workers_per_gpu + else: + # When model is obj:`DataParallel` + # the batch size is samples on all the GPUS + batch_size = num_gpus * samples_per_gpu + num_workers = num_gpus * workers_per_gpu + + if runner_type == 'IterBasedRunner': + # this is a batch sampler, which can yield + # a mini-batch indices each time. + # it can be used in both `DataParallel` and + # `DistributedDataParallel` + if shuffle: + batch_sampler = InfiniteGroupBatchSampler( + dataset, batch_size, world_size, rank, seed=seed) + else: + batch_sampler = InfiniteBatchSampler( + dataset, + batch_size, + world_size, + rank, + seed=seed, + shuffle=False) + batch_size = 1 + sampler = None + else: + if class_aware_sampler is not None: + # ClassAwareSampler can be used in both distributed and + # non-distributed training. + num_sample_class = class_aware_sampler.get('num_sample_class', 1) + sampler = ClassAwareSampler( + dataset, + samples_per_gpu, + world_size, + rank, + seed=seed, + num_sample_class=num_sample_class) + elif dist: + # DistributedGroupSampler will definitely shuffle the data to + # satisfy that images on each GPU are in the same group + if shuffle: + sampler = DistributedGroupSampler( + dataset, samples_per_gpu, world_size, rank, seed=seed) + else: + sampler = DistributedSampler( + dataset, world_size, rank, shuffle=False, seed=seed) + else: + sampler = GroupSampler(dataset, + samples_per_gpu) if shuffle else None + batch_sampler = None + + init_fn = partial( + worker_init_fn, num_workers=num_workers, rank=rank, + seed=seed) if seed is not None else None + + if (TORCH_VERSION != 'parrots' + and digit_version(TORCH_VERSION) >= digit_version('1.7.0')): + kwargs['persistent_workers'] = persistent_workers + elif persistent_workers is True: + warnings.warn('persistent_workers is invalid because your pytorch ' + 'version is lower than 1.7.0') + + data_loader = DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + num_workers=num_workers, + batch_sampler=batch_sampler, + collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), + pin_memory=kwargs.pop('pin_memory', False), + worker_init_fn=init_fn, + **kwargs) + + return data_loader + + +def worker_init_fn(worker_id, num_workers, rank, seed): + # The seed of each worker equals to + # num_worker * rank + worker_id + user_seed + worker_seed = num_workers * rank + worker_id + seed + np.random.seed(worker_seed) + random.seed(worker_seed) + torch.manual_seed(worker_seed) diff --git a/downstream/mmdetection/mmdet/datasets/cityscapes.py b/downstream/mmdetection/mmdet/datasets/cityscapes.py new file mode 100644 index 0000000..da6a2ad --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/cityscapes.py @@ -0,0 +1,338 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa +# and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa + +import glob +import os +import os.path as osp +import tempfile +from collections import OrderedDict + +import mmcv +import numpy as np +import pycocotools.mask as maskUtils +from mmcv.utils import print_log + +from .builder import DATASETS +from .coco import CocoDataset + + +@DATASETS.register_module() +class CityscapesDataset(CocoDataset): + + CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', + 'bicycle') + + PALETTE = [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), + (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)] + + def _filter_imgs(self, min_size=32): + """Filter images too small or without ground truths.""" + valid_inds = [] + # obtain images that contain annotation + ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) + # obtain images that contain annotations of the required categories + ids_in_cat = set() + for i, class_id in enumerate(self.cat_ids): + ids_in_cat |= set(self.coco.cat_img_map[class_id]) + # merge the image id sets of the two conditions and use the merged set + # to filter out images if self.filter_empty_gt=True + ids_in_cat &= ids_with_ann + + valid_img_ids = [] + for i, img_info in enumerate(self.data_infos): + img_id = img_info['id'] + ann_ids = self.coco.getAnnIds(imgIds=[img_id]) + ann_info = self.coco.loadAnns(ann_ids) + all_iscrowd = all([_['iscrowd'] for _ in ann_info]) + if self.filter_empty_gt and (self.img_ids[i] not in ids_in_cat + or all_iscrowd): + continue + if min(img_info['width'], img_info['height']) >= min_size: + valid_inds.append(i) + valid_img_ids.append(img_id) + self.img_ids = valid_img_ids + return valid_inds + + def _parse_ann_info(self, img_info, ann_info): + """Parse bbox and mask annotation. + + Args: + img_info (dict): Image info of an image. + ann_info (list[dict]): Annotation info of an image. + + Returns: + dict: A dict containing the following keys: bboxes, \ + bboxes_ignore, labels, masks, seg_map. \ + "masks" are already decoded into binary masks. + """ + gt_bboxes = [] + gt_labels = [] + gt_bboxes_ignore = [] + gt_masks_ann = [] + + for i, ann in enumerate(ann_info): + if ann.get('ignore', False): + continue + x1, y1, w, h = ann['bbox'] + if ann['area'] <= 0 or w < 1 or h < 1: + continue + if ann['category_id'] not in self.cat_ids: + continue + bbox = [x1, y1, x1 + w, y1 + h] + if ann.get('iscrowd', False): + gt_bboxes_ignore.append(bbox) + else: + gt_bboxes.append(bbox) + gt_labels.append(self.cat2label[ann['category_id']]) + gt_masks_ann.append(ann['segmentation']) + + if gt_bboxes: + gt_bboxes = np.array(gt_bboxes, dtype=np.float32) + gt_labels = np.array(gt_labels, dtype=np.int64) + else: + gt_bboxes = np.zeros((0, 4), dtype=np.float32) + gt_labels = np.array([], dtype=np.int64) + + if gt_bboxes_ignore: + gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) + else: + gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) + + ann = dict( + bboxes=gt_bboxes, + labels=gt_labels, + bboxes_ignore=gt_bboxes_ignore, + masks=gt_masks_ann, + seg_map=img_info['segm_file']) + + return ann + + def results2txt(self, results, outfile_prefix): + """Dump the detection results to a txt file. + + Args: + results (list[list | tuple]): Testing results of the + dataset. + outfile_prefix (str): The filename prefix of the json files. + If the prefix is "somepath/xxx", + the txt files will be named "somepath/xxx.txt". + + Returns: + list[str]: Result txt files which contains corresponding \ + instance segmentation images. + """ + try: + import cityscapesscripts.helpers.labels as CSLabels + except ImportError: + raise ImportError('Please run "pip install citscapesscripts" to ' + 'install cityscapesscripts first.') + result_files = [] + os.makedirs(outfile_prefix, exist_ok=True) + prog_bar = mmcv.ProgressBar(len(self)) + for idx in range(len(self)): + result = results[idx] + filename = self.data_infos[idx]['filename'] + basename = osp.splitext(osp.basename(filename))[0] + pred_txt = osp.join(outfile_prefix, basename + '_pred.txt') + + bbox_result, segm_result = result + bboxes = np.vstack(bbox_result) + # segm results + if isinstance(segm_result, tuple): + # Some detectors use different scores for bbox and mask, + # like Mask Scoring R-CNN. Score of segm will be used instead + # of bbox score. + segms = mmcv.concat_list(segm_result[0]) + mask_score = segm_result[1] + else: + # use bbox score for mask score + segms = mmcv.concat_list(segm_result) + mask_score = [bbox[-1] for bbox in bboxes] + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_result) + ] + labels = np.concatenate(labels) + + assert len(bboxes) == len(segms) == len(labels) + num_instances = len(bboxes) + prog_bar.update() + with open(pred_txt, 'w') as fout: + for i in range(num_instances): + pred_class = labels[i] + classes = self.CLASSES[pred_class] + class_id = CSLabels.name2label[classes].id + score = mask_score[i] + mask = maskUtils.decode(segms[i]).astype(np.uint8) + png_filename = osp.join(outfile_prefix, + basename + f'_{i}_{classes}.png') + mmcv.imwrite(mask, png_filename) + fout.write(f'{osp.basename(png_filename)} {class_id} ' + f'{score}\n') + result_files.append(pred_txt) + + return result_files + + def format_results(self, results, txtfile_prefix=None): + """Format the results to txt (standard format for Cityscapes + evaluation). + + Args: + results (list): Testing results of the dataset. + txtfile_prefix (str | None): The prefix of txt files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + + Returns: + tuple: (result_files, tmp_dir), result_files is a dict containing \ + the json filepaths, tmp_dir is the temporal directory created \ + for saving txt/png files when txtfile_prefix is not specified. + """ + assert isinstance(results, list), 'results must be a list' + assert len(results) == len(self), ( + 'The length of results is not equal to the dataset len: {} != {}'. + format(len(results), len(self))) + + assert isinstance(results, list), 'results must be a list' + assert len(results) == len(self), ( + 'The length of results is not equal to the dataset len: {} != {}'. + format(len(results), len(self))) + + if txtfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + txtfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + result_files = self.results2txt(results, txtfile_prefix) + + return result_files, tmp_dir + + def evaluate(self, + results, + metric='bbox', + logger=None, + outfile_prefix=None, + classwise=False, + proposal_nums=(100, 300, 1000), + iou_thrs=np.arange(0.5, 0.96, 0.05)): + """Evaluation in Cityscapes/COCO protocol. + + Args: + results (list[list | tuple]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. Options are + 'bbox', 'segm', 'proposal', 'proposal_fast'. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + outfile_prefix (str | None): The prefix of output file. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If results are evaluated with COCO protocol, it would be the + prefix of output json file. For example, the metric is 'bbox' + and 'segm', then json files would be "a/b/prefix.bbox.json" and + "a/b/prefix.segm.json". + If results are evaluated with cityscapes protocol, it would be + the prefix of output txt/png files. The output files would be + png images under folder "a/b/prefix/xxx/" and the file name of + images would be written into a txt file + "a/b/prefix/xxx_pred.txt", where "xxx" is the video name of + cityscapes. If not specified, a temp file will be created. + Default: None. + classwise (bool): Whether to evaluating the AP for each class. + proposal_nums (Sequence[int]): Proposal number used for evaluating + recalls, such as recall@100, recall@1000. + Default: (100, 300, 1000). + iou_thrs (Sequence[float]): IoU threshold used for evaluating + recalls. If set to a list, the average recall of all IoUs will + also be computed. Default: 0.5. + + Returns: + dict[str, float]: COCO style evaluation metric or cityscapes mAP \ + and AP@50. + """ + eval_results = dict() + + metrics = metric.copy() if isinstance(metric, list) else [metric] + + if 'cityscapes' in metrics: + eval_results.update( + self._evaluate_cityscapes(results, outfile_prefix, logger)) + metrics.remove('cityscapes') + + # left metrics are all coco metric + if len(metrics) > 0: + # create CocoDataset with CityscapesDataset annotation + self_coco = CocoDataset(self.ann_file, self.pipeline.transforms, + None, self.data_root, self.img_prefix, + self.seg_prefix, self.proposal_file, + self.test_mode, self.filter_empty_gt) + # TODO: remove this in the future + # reload annotations of correct class + self_coco.CLASSES = self.CLASSES + self_coco.data_infos = self_coco.load_annotations(self.ann_file) + eval_results.update( + self_coco.evaluate(results, metrics, logger, outfile_prefix, + classwise, proposal_nums, iou_thrs)) + + return eval_results + + def _evaluate_cityscapes(self, results, txtfile_prefix, logger): + """Evaluation in Cityscapes protocol. + + Args: + results (list): Testing results of the dataset. + txtfile_prefix (str | None): The prefix of output txt file + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + + Returns: + dict[str: float]: Cityscapes evaluation results, contains 'mAP' \ + and 'AP@50'. + """ + + try: + import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa + except ImportError: + raise ImportError('Please run "pip install citscapesscripts" to ' + 'install cityscapesscripts first.') + msg = 'Evaluating in Cityscapes style' + if logger is None: + msg = '\n' + msg + print_log(msg, logger=logger) + + result_files, tmp_dir = self.format_results(results, txtfile_prefix) + + if tmp_dir is None: + result_dir = osp.join(txtfile_prefix, 'results') + else: + result_dir = osp.join(tmp_dir.name, 'results') + + eval_results = OrderedDict() + print_log(f'Evaluating results under {result_dir} ...', logger=logger) + + # set global states in cityscapes evaluation API + CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..') + CSEval.args.predictionPath = os.path.abspath(result_dir) + CSEval.args.predictionWalk = None + CSEval.args.JSONOutput = False + CSEval.args.colorized = False + CSEval.args.gtInstancesFile = os.path.join(result_dir, + 'gtInstances.json') + CSEval.args.groundTruthSearch = os.path.join( + self.img_prefix.replace('leftImg8bit', 'gtFine'), + '*/*_gtFine_instanceIds.png') + + groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch) + assert len(groundTruthImgList), 'Cannot find ground truth images' \ + f' in {CSEval.args.groundTruthSearch}.' + predictionImgList = [] + for gt in groundTruthImgList: + predictionImgList.append(CSEval.getPrediction(gt, CSEval.args)) + CSEval_results = CSEval.evaluateImgLists(predictionImgList, + groundTruthImgList, + CSEval.args)['averages'] + + eval_results['mAP'] = CSEval_results['allAp'] + eval_results['AP@50'] = CSEval_results['allAp50%'] + if tmp_dir is not None: + tmp_dir.cleanup() + return eval_results diff --git a/downstream/mmdetection/mmdet/datasets/coco.py b/downstream/mmdetection/mmdet/datasets/coco.py new file mode 100644 index 0000000..bcdd4df --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/coco.py @@ -0,0 +1,649 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import contextlib +import io +import itertools +import logging +import os.path as osp +import tempfile +import warnings +from collections import OrderedDict + +import mmcv +import numpy as np +from mmcv.utils import print_log +from terminaltables import AsciiTable + +from mmdet.core import eval_recalls +from .api_wrappers import COCO, COCOeval +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class CocoDataset(CustomDataset): + + CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', + 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', + 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', + 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', + 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', + 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', + 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', + 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', + 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', + 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', + 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') + + PALETTE = [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), + (106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70), + (0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0), + (175, 116, 175), (250, 0, 30), (165, 42, 42), (255, 77, 255), + (0, 226, 252), (182, 182, 255), (0, 82, 0), (120, 166, 157), + (110, 76, 0), (174, 57, 255), (199, 100, 0), (72, 0, 118), + (255, 179, 240), (0, 125, 92), (209, 0, 151), (188, 208, 182), + (0, 220, 176), (255, 99, 164), (92, 0, 73), (133, 129, 255), + (78, 180, 255), (0, 228, 0), (174, 255, 243), (45, 89, 255), + (134, 134, 103), (145, 148, 174), (255, 208, 186), + (197, 226, 255), (171, 134, 1), (109, 63, 54), (207, 138, 255), + (151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105), + (166, 196, 102), (208, 195, 210), (255, 109, 65), (0, 143, 149), + (179, 0, 194), (209, 99, 106), (5, 121, 0), (227, 255, 205), + (147, 186, 208), (153, 69, 1), (3, 95, 161), (163, 255, 0), + (119, 0, 170), (0, 182, 199), (0, 165, 120), (183, 130, 88), + (95, 32, 0), (130, 114, 135), (110, 129, 133), (166, 74, 118), + (219, 142, 185), (79, 210, 114), (178, 90, 62), (65, 70, 15), + (127, 167, 115), (59, 105, 106), (142, 108, 45), (196, 172, 0), + (95, 54, 80), (128, 76, 255), (201, 57, 1), (246, 0, 122), + (191, 162, 208)] + + def load_annotations(self, ann_file): + """Load annotation from COCO style annotation file. + + Args: + ann_file (str): Path of annotation file. + + Returns: + list[dict]: Annotation info from COCO api. + """ + + self.coco = COCO(ann_file) + # The order of returned `cat_ids` will not + # change with the order of the CLASSES + self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) + + self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} + self.img_ids = self.coco.get_img_ids() + data_infos = [] + total_ann_ids = [] + for i in self.img_ids: + info = self.coco.load_imgs([i])[0] + info['filename'] = info['file_name'] + data_infos.append(info) + ann_ids = self.coco.get_ann_ids(img_ids=[i]) + total_ann_ids.extend(ann_ids) + assert len(set(total_ann_ids)) == len( + total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!" + return data_infos + + def get_ann_info(self, idx): + """Get COCO annotation by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + + img_id = self.data_infos[idx]['id'] + ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) + ann_info = self.coco.load_anns(ann_ids) + return self._parse_ann_info(self.data_infos[idx], ann_info) + + def get_cat_ids(self, idx): + """Get COCO category ids by index. + + Args: + idx (int): Index of data. + + Returns: + list[int]: All categories in the image of specified index. + """ + + img_id = self.data_infos[idx]['id'] + ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) + ann_info = self.coco.load_anns(ann_ids) + return [ann['category_id'] for ann in ann_info] + + def _filter_imgs(self, min_size=32): + """Filter images too small or without ground truths.""" + valid_inds = [] + # obtain images that contain annotation + ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) + # obtain images that contain annotations of the required categories + ids_in_cat = set() + for i, class_id in enumerate(self.cat_ids): + ids_in_cat |= set(self.coco.cat_img_map[class_id]) + # merge the image id sets of the two conditions and use the merged set + # to filter out images if self.filter_empty_gt=True + ids_in_cat &= ids_with_ann + + valid_img_ids = [] + for i, img_info in enumerate(self.data_infos): + img_id = self.img_ids[i] + if self.filter_empty_gt and img_id not in ids_in_cat: + continue + if min(img_info['width'], img_info['height']) >= min_size: + valid_inds.append(i) + valid_img_ids.append(img_id) + self.img_ids = valid_img_ids + return valid_inds + + def _parse_ann_info(self, img_info, ann_info): + """Parse bbox and mask annotation. + + Args: + ann_info (list[dict]): Annotation info of an image. + with_mask (bool): Whether to parse mask annotations. + + Returns: + dict: A dict containing the following keys: bboxes, bboxes_ignore,\ + labels, masks, seg_map. "masks" are raw annotations and not \ + decoded into binary masks. + """ + gt_bboxes = [] + gt_labels = [] + gt_bboxes_ignore = [] + gt_masks_ann = [] + for i, ann in enumerate(ann_info): + if ann.get('ignore', False): + continue + x1, y1, w, h = ann['bbox'] + inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0)) + inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0)) + if inter_w * inter_h == 0: + continue + if ann['area'] <= 0 or w < 1 or h < 1: + continue + if ann['category_id'] not in self.cat_ids: + continue + bbox = [x1, y1, x1 + w, y1 + h] + if ann.get('iscrowd', False): + gt_bboxes_ignore.append(bbox) + else: + gt_bboxes.append(bbox) + gt_labels.append(self.cat2label[ann['category_id']]) + gt_masks_ann.append(ann.get('segmentation', None)) + + if gt_bboxes: + gt_bboxes = np.array(gt_bboxes, dtype=np.float32) + gt_labels = np.array(gt_labels, dtype=np.int64) + else: + gt_bboxes = np.zeros((0, 4), dtype=np.float32) + gt_labels = np.array([], dtype=np.int64) + + if gt_bboxes_ignore: + gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) + else: + gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) + + seg_map = img_info['filename'].replace('jpg', 'png') + + ann = dict( + bboxes=gt_bboxes, + labels=gt_labels, + bboxes_ignore=gt_bboxes_ignore, + masks=gt_masks_ann, + seg_map=seg_map) + + return ann + + def xyxy2xywh(self, bbox): + """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO + evaluation. + + Args: + bbox (numpy.ndarray): The bounding boxes, shape (4, ), in + ``xyxy`` order. + + Returns: + list[float]: The converted bounding boxes, in ``xywh`` order. + """ + + _bbox = bbox.tolist() + return [ + _bbox[0], + _bbox[1], + _bbox[2] - _bbox[0], + _bbox[3] - _bbox[1], + ] + + def _proposal2json(self, results): + """Convert proposal results to COCO json style.""" + json_results = [] + for idx in range(len(self)): + img_id = self.img_ids[idx] + bboxes = results[idx] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = self.xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = 1 + json_results.append(data) + return json_results + + def _det2json(self, results): + """Convert detection results to COCO json style.""" + json_results = [] + for idx in range(len(self)): + img_id = self.img_ids[idx] + result = results[idx] + for label in range(len(result)): + bboxes = result[label] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = self.xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = self.cat_ids[label] + json_results.append(data) + return json_results + + def _segm2json(self, results): + """Convert instance segmentation results to COCO json style.""" + bbox_json_results = [] + segm_json_results = [] + for idx in range(len(self)): + img_id = self.img_ids[idx] + det, seg = results[idx] + for label in range(len(det)): + # bbox results + bboxes = det[label] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = self.xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = self.cat_ids[label] + bbox_json_results.append(data) + + # segm results + # some detectors use different scores for bbox and mask + if isinstance(seg, tuple): + segms = seg[0][label] + mask_score = seg[1][label] + else: + segms = seg[label] + mask_score = [bbox[4] for bbox in bboxes] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = self.xyxy2xywh(bboxes[i]) + data['score'] = float(mask_score[i]) + data['category_id'] = self.cat_ids[label] + if isinstance(segms[i]['counts'], bytes): + segms[i]['counts'] = segms[i]['counts'].decode() + data['segmentation'] = segms[i] + segm_json_results.append(data) + return bbox_json_results, segm_json_results + + def results2json(self, results, outfile_prefix): + """Dump the detection results to a COCO style json file. + + There are 3 types of results: proposals, bbox predictions, mask + predictions, and they have different data types. This method will + automatically recognize the type, and dump them to json files. + + Args: + results (list[list | tuple | ndarray]): Testing results of the + dataset. + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json files will be named + "somepath/xxx.bbox.json", "somepath/xxx.segm.json", + "somepath/xxx.proposal.json". + + Returns: + dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \ + values are corresponding filenames. + """ + result_files = dict() + if isinstance(results[0], list): + json_results = self._det2json(results) + result_files['bbox'] = f'{outfile_prefix}.bbox.json' + result_files['proposal'] = f'{outfile_prefix}.bbox.json' + mmcv.dump(json_results, result_files['bbox']) + elif isinstance(results[0], tuple): + json_results = self._segm2json(results) + result_files['bbox'] = f'{outfile_prefix}.bbox.json' + result_files['proposal'] = f'{outfile_prefix}.bbox.json' + result_files['segm'] = f'{outfile_prefix}.segm.json' + mmcv.dump(json_results[0], result_files['bbox']) + mmcv.dump(json_results[1], result_files['segm']) + elif isinstance(results[0], np.ndarray): + json_results = self._proposal2json(results) + result_files['proposal'] = f'{outfile_prefix}.proposal.json' + mmcv.dump(json_results, result_files['proposal']) + else: + raise TypeError('invalid type of results') + return result_files + + def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None): + gt_bboxes = [] + for i in range(len(self.img_ids)): + ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i]) + ann_info = self.coco.load_anns(ann_ids) + if len(ann_info) == 0: + gt_bboxes.append(np.zeros((0, 4))) + continue + bboxes = [] + for ann in ann_info: + if ann.get('ignore', False) or ann['iscrowd']: + continue + x1, y1, w, h = ann['bbox'] + bboxes.append([x1, y1, x1 + w, y1 + h]) + bboxes = np.array(bboxes, dtype=np.float32) + if bboxes.shape[0] == 0: + bboxes = np.zeros((0, 4)) + gt_bboxes.append(bboxes) + + recalls = eval_recalls( + gt_bboxes, results, proposal_nums, iou_thrs, logger=logger) + ar = recalls.mean(axis=1) + return ar + + def format_results(self, results, jsonfile_prefix=None, **kwargs): + """Format the results to json (standard format for COCO evaluation). + + Args: + results (list[tuple | numpy.ndarray]): Testing results of the + dataset. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + + Returns: + tuple: (result_files, tmp_dir), result_files is a dict containing \ + the json filepaths, tmp_dir is the temporal directory created \ + for saving json files when jsonfile_prefix is not specified. + """ + assert isinstance(results, list), 'results must be a list' + assert len(results) == len(self), ( + 'The length of results is not equal to the dataset len: {} != {}'. + format(len(results), len(self))) + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + result_files = self.results2json(results, jsonfile_prefix) + return result_files, tmp_dir + + def evaluate_det_segm(self, + results, + result_files, + coco_gt, + metrics, + logger=None, + classwise=False, + proposal_nums=(100, 300, 1000), + iou_thrs=None, + metric_items=None): + """Instance segmentation and object detection evaluation in COCO + protocol. + + Args: + results (list[list | tuple | dict]): Testing results of the + dataset. + result_files (dict[str, str]): a dict contains json file path. + coco_gt (COCO): COCO API object with ground truth annotation. + metric (str | list[str]): Metrics to be evaluated. Options are + 'bbox', 'segm', 'proposal', 'proposal_fast'. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + classwise (bool): Whether to evaluating the AP for each class. + proposal_nums (Sequence[int]): Proposal number used for evaluating + recalls, such as recall@100, recall@1000. + Default: (100, 300, 1000). + iou_thrs (Sequence[float], optional): IoU threshold used for + evaluating recalls/mAPs. If set to a list, the average of all + IoUs will also be computed. If not specified, [0.50, 0.55, + 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. + Default: None. + metric_items (list[str] | str, optional): Metric items that will + be returned. If not specified, ``['AR@100', 'AR@300', + 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be + used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', + 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when + ``metric=='bbox' or metric=='segm'``. + + Returns: + dict[str, float]: COCO style evaluation metric. + """ + if iou_thrs is None: + iou_thrs = np.linspace( + .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) + if metric_items is not None: + if not isinstance(metric_items, list): + metric_items = [metric_items] + + eval_results = OrderedDict() + for metric in metrics: + msg = f'Evaluating {metric}...' + if logger is None: + msg = '\n' + msg + print_log(msg, logger=logger) + + if metric == 'proposal_fast': + if isinstance(results[0], tuple): + raise KeyError('proposal_fast is not supported for ' + 'instance segmentation result.') + ar = self.fast_eval_recall( + results, proposal_nums, iou_thrs, logger='silent') + log_msg = [] + for i, num in enumerate(proposal_nums): + eval_results[f'AR@{num}'] = ar[i] + log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}') + log_msg = ''.join(log_msg) + print_log(log_msg, logger=logger) + continue + + iou_type = 'bbox' if metric == 'proposal' else metric + if metric not in result_files: + raise KeyError(f'{metric} is not in results') + try: + predictions = mmcv.load(result_files[metric]) + if iou_type == 'segm': + # Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa + # When evaluating mask AP, if the results contain bbox, + # cocoapi will use the box area instead of the mask area + # for calculating the instance area. Though the overall AP + # is not affected, this leads to different + # small/medium/large mask AP results. + for x in predictions: + x.pop('bbox') + warnings.simplefilter('once') + warnings.warn( + 'The key "bbox" is deleted for more accurate mask AP ' + 'of small/medium/large instances since v2.12.0. This ' + 'does not change the overall mAP calculation.', + UserWarning) + coco_det = coco_gt.loadRes(predictions) + except IndexError: + print_log( + 'The testing results of the whole dataset is empty.', + logger=logger, + level=logging.ERROR) + break + + cocoEval = COCOeval(coco_gt, coco_det, iou_type) + cocoEval.params.catIds = self.cat_ids + cocoEval.params.imgIds = self.img_ids + cocoEval.params.maxDets = list(proposal_nums) + cocoEval.params.iouThrs = iou_thrs + # mapping of cocoEval.stats + coco_metric_names = { + 'mAP': 0, + 'mAP_50': 1, + 'mAP_75': 2, + 'mAP_s': 3, + 'mAP_m': 4, + 'mAP_l': 5, + 'AR@100': 6, + 'AR@300': 7, + 'AR@1000': 8, + 'AR_s@1000': 9, + 'AR_m@1000': 10, + 'AR_l@1000': 11 + } + if metric_items is not None: + for metric_item in metric_items: + if metric_item not in coco_metric_names: + raise KeyError( + f'metric item {metric_item} is not supported') + + if metric == 'proposal': + cocoEval.params.useCats = 0 + cocoEval.evaluate() + cocoEval.accumulate() + + # Save coco summarize print information to logger + redirect_string = io.StringIO() + with contextlib.redirect_stdout(redirect_string): + cocoEval.summarize() + print_log('\n' + redirect_string.getvalue(), logger=logger) + + if metric_items is None: + metric_items = [ + 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', + 'AR_m@1000', 'AR_l@1000' + ] + + for item in metric_items: + val = float( + f'{cocoEval.stats[coco_metric_names[item]]:.3f}') + eval_results[item] = val + else: + cocoEval.evaluate() + cocoEval.accumulate() + + # Save coco summarize print information to logger + redirect_string = io.StringIO() + with contextlib.redirect_stdout(redirect_string): + cocoEval.summarize() + print_log('\n' + redirect_string.getvalue(), logger=logger) + + if classwise: # Compute per-category AP + # Compute per-category AP + # from https://github.com/facebookresearch/detectron2/ + precisions = cocoEval.eval['precision'] + # precision: (iou, recall, cls, area range, max dets) + assert len(self.cat_ids) == precisions.shape[2] + + results_per_category = [] + for idx, catId in enumerate(self.cat_ids): + # area range index 0: all area ranges + # max dets index -1: typically 100 per image + nm = self.coco.loadCats(catId)[0] + precision = precisions[:, :, idx, 0, -1] + precision = precision[precision > -1] + if precision.size: + ap = np.mean(precision) + else: + ap = float('nan') + results_per_category.append( + (f'{nm["name"]}', f'{float(ap):0.3f}')) + + num_columns = min(6, len(results_per_category) * 2) + results_flatten = list( + itertools.chain(*results_per_category)) + headers = ['category', 'AP'] * (num_columns // 2) + results_2d = itertools.zip_longest(*[ + results_flatten[i::num_columns] + for i in range(num_columns) + ]) + table_data = [headers] + table_data += [result for result in results_2d] + table = AsciiTable(table_data) + print_log('\n' + table.table, logger=logger) + + if metric_items is None: + metric_items = [ + 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l' + ] + + for metric_item in metric_items: + key = f'{metric}_{metric_item}' + val = float( + f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}' + ) + eval_results[key] = val + ap = cocoEval.stats[:6] + eval_results[f'{metric}_mAP_copypaste'] = ( + f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} ' + f'{ap[4]:.3f} {ap[5]:.3f}') + + return eval_results + + def evaluate(self, + results, + metric='bbox', + logger=None, + jsonfile_prefix=None, + classwise=False, + proposal_nums=(100, 300, 1000), + iou_thrs=None, + metric_items=None): + """Evaluation in COCO protocol. + + Args: + results (list[list | tuple]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. Options are + 'bbox', 'segm', 'proposal', 'proposal_fast'. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + classwise (bool): Whether to evaluating the AP for each class. + proposal_nums (Sequence[int]): Proposal number used for evaluating + recalls, such as recall@100, recall@1000. + Default: (100, 300, 1000). + iou_thrs (Sequence[float], optional): IoU threshold used for + evaluating recalls/mAPs. If set to a list, the average of all + IoUs will also be computed. If not specified, [0.50, 0.55, + 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. + Default: None. + metric_items (list[str] | str, optional): Metric items that will + be returned. If not specified, ``['AR@100', 'AR@300', + 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be + used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', + 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when + ``metric=='bbox' or metric=='segm'``. + + Returns: + dict[str, float]: COCO style evaluation metric. + """ + + metrics = metric if isinstance(metric, list) else [metric] + allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] + for metric in metrics: + if metric not in allowed_metrics: + raise KeyError(f'metric {metric} is not supported') + + coco_gt = self.coco + self.cat_ids = coco_gt.get_cat_ids(cat_names=self.CLASSES) + + result_files, tmp_dir = self.format_results(results, jsonfile_prefix) + eval_results = self.evaluate_det_segm(results, result_files, coco_gt, + metrics, logger, classwise, + proposal_nums, iou_thrs, + metric_items) + + if tmp_dir is not None: + tmp_dir.cleanup() + return eval_results diff --git a/downstream/mmdetection/mmdet/datasets/coco_panoptic.py b/downstream/mmdetection/mmdet/datasets/coco_panoptic.py new file mode 100644 index 0000000..53ef594 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/coco_panoptic.py @@ -0,0 +1,692 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import itertools +import os +from collections import defaultdict + +import mmcv +import numpy as np +from mmcv.utils import print_log +from terminaltables import AsciiTable + +from mmdet.core import INSTANCE_OFFSET +from .api_wrappers import COCO, pq_compute_multi_core +from .builder import DATASETS +from .coco import CocoDataset + +try: + import panopticapi + from panopticapi.evaluation import VOID + from panopticapi.utils import id2rgb +except ImportError: + panopticapi = None + id2rgb = None + VOID = None + +__all__ = ['CocoPanopticDataset'] + + +class COCOPanoptic(COCO): + """This wrapper is for loading the panoptic style annotation file. + + The format is shown in the CocoPanopticDataset class. + + Args: + annotation_file (str): Path of annotation file. + """ + + def __init__(self, annotation_file=None): + if panopticapi is None: + raise RuntimeError( + 'panopticapi is not installed, please install it by: ' + 'pip install git+https://github.com/cocodataset/' + 'panopticapi.git.') + + super(COCOPanoptic, self).__init__(annotation_file) + + def createIndex(self): + # create index + print('creating index...') + # anns stores 'segment_id -> annotation' + anns, cats, imgs = {}, {}, {} + img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list) + if 'annotations' in self.dataset: + for ann, img_info in zip(self.dataset['annotations'], + self.dataset['images']): + img_info['segm_file'] = ann['file_name'] + for seg_ann in ann['segments_info']: + # to match with instance.json + seg_ann['image_id'] = ann['image_id'] + seg_ann['height'] = img_info['height'] + seg_ann['width'] = img_info['width'] + img_to_anns[ann['image_id']].append(seg_ann) + # segment_id is not unique in coco dataset orz... + if seg_ann['id'] in anns.keys(): + anns[seg_ann['id']].append(seg_ann) + else: + anns[seg_ann['id']] = [seg_ann] + + if 'images' in self.dataset: + for img in self.dataset['images']: + imgs[img['id']] = img + + if 'categories' in self.dataset: + for cat in self.dataset['categories']: + cats[cat['id']] = cat + + if 'annotations' in self.dataset and 'categories' in self.dataset: + for ann in self.dataset['annotations']: + for seg_ann in ann['segments_info']: + cat_to_imgs[seg_ann['category_id']].append(ann['image_id']) + + print('index created!') + + self.anns = anns + self.imgToAnns = img_to_anns + self.catToImgs = cat_to_imgs + self.imgs = imgs + self.cats = cats + + def load_anns(self, ids=[]): + """Load anns with the specified ids. + + self.anns is a list of annotation lists instead of a + list of annotations. + + Args: + ids (int array): integer ids specifying anns + + Returns: + anns (object array): loaded ann objects + """ + anns = [] + + if hasattr(ids, '__iter__') and hasattr(ids, '__len__'): + # self.anns is a list of annotation lists instead of + # a list of annotations + for id in ids: + anns += self.anns[id] + return anns + elif type(ids) == int: + return self.anns[ids] + + +@DATASETS.register_module() +class CocoPanopticDataset(CocoDataset): + """Coco dataset for Panoptic segmentation. + + The annotation format is shown as follows. The `ann` field is optional + for testing. + + .. code-block:: none + + [ + { + 'filename': f'{image_id:012}.png', + 'image_id':9 + 'segments_info': { + [ + { + 'id': 8345037, (segment_id in panoptic png, + convert from rgb) + 'category_id': 51, + 'iscrowd': 0, + 'bbox': (x1, y1, w, h), + 'area': 24315, + 'segmentation': list,(coded mask) + }, + ... + } + } + }, + ... + ] + + Args: + ann_file (str): Panoptic segmentation annotation file path. + pipeline (list[dict]): Processing pipeline. + ins_ann_file (str): Instance segmentation annotation file path. + Defaults to None. + classes (str | Sequence[str], optional): Specify classes to load. + If is None, ``cls.CLASSES`` will be used. Defaults to None. + data_root (str, optional): Data root for ``ann_file``, + ``ins_ann_file`` ``img_prefix``, ``seg_prefix``, ``proposal_file`` + if specified. Defaults to None. + img_prefix (str, optional): Prefix of path to images. Defaults to ''. + seg_prefix (str, optional): Prefix of path to segmentation files. + Defaults to None. + proposal_file (str, optional): Path to proposal file. Defaults to None. + test_mode (bool, optional): If set True, annotation will not be loaded. + Defaults to False. + filter_empty_gt (bool, optional): If set true, images without bounding + boxes of the dataset's classes will be filtered out. This option + only works when `test_mode=False`, i.e., we never filter images + during tests. Defaults to True. + file_client_args (:obj:`mmcv.ConfigDict` | dict): file client args. + Defaults to dict(backend='disk'). + """ + CLASSES = [ + 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', + ' truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', + 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', + 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', + 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', + 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', + 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', + 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', + 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', + 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', + 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', + 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', + 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', + 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', + 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', + 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', + 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', + 'wall-wood', 'water-other', 'window-blind', 'window-other', + 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', + 'cabinet-merged', 'table-merged', 'floor-other-merged', + 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', + 'paper-merged', 'food-other-merged', 'building-other-merged', + 'rock-merged', 'wall-other-merged', 'rug-merged' + ] + THING_CLASSES = [ + 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', + 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', + 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', + 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', + 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', + 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', + 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', + 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', + 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', + 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', + 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', + 'scissors', 'teddy bear', 'hair drier', 'toothbrush' + ] + STUFF_CLASSES = [ + 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', + 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', + 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', + 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', + 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', + 'wall-wood', 'water-other', 'window-blind', 'window-other', + 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', + 'cabinet-merged', 'table-merged', 'floor-other-merged', + 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', + 'paper-merged', 'food-other-merged', 'building-other-merged', + 'rock-merged', 'wall-other-merged', 'rug-merged' + ] + + PALETTE = [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), + (106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70), + (0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0), + (175, 116, 175), (250, 0, 30), (165, 42, 42), (255, 77, 255), + (0, 226, 252), (182, 182, 255), (0, 82, 0), (120, 166, 157), + (110, 76, 0), (174, 57, 255), (199, 100, 0), (72, 0, 118), + (255, 179, 240), (0, 125, 92), (209, 0, 151), (188, 208, 182), + (0, 220, 176), (255, 99, 164), (92, 0, 73), (133, 129, 255), + (78, 180, 255), (0, 228, 0), (174, 255, 243), (45, 89, 255), + (134, 134, 103), (145, 148, 174), (255, 208, 186), + (197, 226, 255), (171, 134, 1), (109, 63, 54), (207, 138, 255), + (151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105), + (166, 196, 102), (208, 195, 210), (255, 109, 65), (0, 143, 149), + (179, 0, 194), (209, 99, 106), (5, 121, 0), (227, 255, 205), + (147, 186, 208), (153, 69, 1), (3, 95, 161), (163, 255, 0), + (119, 0, 170), (0, 182, 199), (0, 165, 120), (183, 130, 88), + (95, 32, 0), (130, 114, 135), (110, 129, 133), (166, 74, 118), + (219, 142, 185), (79, 210, 114), (178, 90, 62), (65, 70, 15), + (127, 167, 115), (59, 105, 106), (142, 108, 45), (196, 172, 0), + (95, 54, 80), (128, 76, 255), (201, 57, 1), (246, 0, 122), + (191, 162, 208), (255, 255, 128), (147, 211, 203), + (150, 100, 100), (168, 171, 172), (146, 112, 198), + (210, 170, 100), (92, 136, 89), (218, 88, 184), (241, 129, 0), + (217, 17, 255), (124, 74, 181), (70, 70, 70), (255, 228, 255), + (154, 208, 0), (193, 0, 92), (76, 91, 113), (255, 180, 195), + (106, 154, 176), + (230, 150, 140), (60, 143, 255), (128, 64, 128), (92, 82, 55), + (254, 212, 124), (73, 77, 174), (255, 160, 98), (255, 255, 255), + (104, 84, 109), (169, 164, 131), (225, 199, 255), (137, 54, 74), + (135, 158, 223), (7, 246, 231), (107, 255, 200), (58, 41, 149), + (183, 121, 142), (255, 73, 97), (107, 142, 35), (190, 153, 153), + (146, 139, 141), + (70, 130, 180), (134, 199, 156), (209, 226, 140), (96, 36, 108), + (96, 96, 96), (64, 170, 64), (152, 251, 152), (208, 229, 228), + (206, 186, 171), (152, 161, 64), (116, 112, 0), (0, 114, 143), + (102, 102, 156), (250, 141, 255)] + + def __init__(self, + ann_file, + pipeline, + ins_ann_file=None, + classes=None, + data_root=None, + img_prefix='', + seg_prefix=None, + proposal_file=None, + test_mode=False, + filter_empty_gt=True, + file_client_args=dict(backend='disk')): + super().__init__( + ann_file, + pipeline, + classes=classes, + data_root=data_root, + img_prefix=img_prefix, + seg_prefix=seg_prefix, + proposal_file=proposal_file, + test_mode=test_mode, + filter_empty_gt=filter_empty_gt, + file_client_args=file_client_args) + self.ins_ann_file = ins_ann_file + + def load_annotations(self, ann_file): + """Load annotation from COCO Panoptic style annotation file. + + Args: + ann_file (str): Path of annotation file. + + Returns: + list[dict]: Annotation info from COCO api. + """ + self.coco = COCOPanoptic(ann_file) + self.cat_ids = self.coco.get_cat_ids() + self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} + self.categories = self.coco.cats + self.img_ids = self.coco.get_img_ids() + data_infos = [] + for i in self.img_ids: + info = self.coco.load_imgs([i])[0] + info['filename'] = info['file_name'] + info['segm_file'] = info['filename'].replace('jpg', 'png') + data_infos.append(info) + return data_infos + + def get_ann_info(self, idx): + """Get COCO annotation by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + img_id = self.data_infos[idx]['id'] + ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) + ann_info = self.coco.load_anns(ann_ids) + # filter out unmatched images + ann_info = [i for i in ann_info if i['image_id'] == img_id] + return self._parse_ann_info(self.data_infos[idx], ann_info) + + def _parse_ann_info(self, img_info, ann_info): + """Parse annotations and load panoptic ground truths. + + Args: + img_info (int): Image info of an image. + ann_info (list[dict]): Annotation info of an image. + + Returns: + dict: A dict containing the following keys: bboxes, bboxes_ignore, + labels, masks, seg_map. + """ + gt_bboxes = [] + gt_labels = [] + gt_bboxes_ignore = [] + gt_mask_infos = [] + + for i, ann in enumerate(ann_info): + x1, y1, w, h = ann['bbox'] + if ann['area'] <= 0 or w < 1 or h < 1: + continue + bbox = [x1, y1, x1 + w, y1 + h] + + category_id = ann['category_id'] + contiguous_cat_id = self.cat2label[category_id] + + is_thing = self.coco.load_cats(ids=category_id)[0]['isthing'] + if is_thing: + is_crowd = ann.get('iscrowd', False) + if not is_crowd: + gt_bboxes.append(bbox) + gt_labels.append(contiguous_cat_id) + else: + gt_bboxes_ignore.append(bbox) + is_thing = False + + mask_info = { + 'id': ann['id'], + 'category': contiguous_cat_id, + 'is_thing': is_thing + } + gt_mask_infos.append(mask_info) + + if gt_bboxes: + gt_bboxes = np.array(gt_bboxes, dtype=np.float32) + gt_labels = np.array(gt_labels, dtype=np.int64) + else: + gt_bboxes = np.zeros((0, 4), dtype=np.float32) + gt_labels = np.array([], dtype=np.int64) + + if gt_bboxes_ignore: + gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) + else: + gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) + + ann = dict( + bboxes=gt_bboxes, + labels=gt_labels, + bboxes_ignore=gt_bboxes_ignore, + masks=gt_mask_infos, + seg_map=img_info['segm_file']) + + return ann + + def _filter_imgs(self, min_size=32): + """Filter images too small or without ground truths.""" + ids_with_ann = [] + # check whether images have legal thing annotations. + for lists in self.coco.anns.values(): + for item in lists: + category_id = item['category_id'] + is_thing = self.coco.load_cats(ids=category_id)[0]['isthing'] + if not is_thing: + continue + ids_with_ann.append(item['image_id']) + ids_with_ann = set(ids_with_ann) + + valid_inds = [] + valid_img_ids = [] + for i, img_info in enumerate(self.data_infos): + img_id = self.img_ids[i] + if self.filter_empty_gt and img_id not in ids_with_ann: + continue + if min(img_info['width'], img_info['height']) >= min_size: + valid_inds.append(i) + valid_img_ids.append(img_id) + self.img_ids = valid_img_ids + return valid_inds + + def _pan2json(self, results, outfile_prefix): + """Convert panoptic results to COCO panoptic json style.""" + label2cat = dict((v, k) for (k, v) in self.cat2label.items()) + pred_annotations = [] + outdir = os.path.join(os.path.dirname(outfile_prefix), 'panoptic') + + for idx in range(len(self)): + img_id = self.img_ids[idx] + segm_file = self.data_infos[idx]['segm_file'] + pan = results[idx] + + pan_labels = np.unique(pan) + segm_info = [] + for pan_label in pan_labels: + sem_label = pan_label % INSTANCE_OFFSET + # We reserve the length of self.CLASSES for VOID label + if sem_label == len(self.CLASSES): + continue + # convert sem_label to json label + cat_id = label2cat[sem_label] + is_thing = self.categories[cat_id]['isthing'] + mask = pan == pan_label + area = mask.sum() + segm_info.append({ + 'id': int(pan_label), + 'category_id': cat_id, + 'isthing': is_thing, + 'area': int(area) + }) + # evaluation script uses 0 for VOID label. + pan[pan % INSTANCE_OFFSET == len(self.CLASSES)] = VOID + pan = id2rgb(pan).astype(np.uint8) + mmcv.imwrite(pan[:, :, ::-1], os.path.join(outdir, segm_file)) + record = { + 'image_id': img_id, + 'segments_info': segm_info, + 'file_name': segm_file + } + pred_annotations.append(record) + pan_json_results = dict(annotations=pred_annotations) + return pan_json_results + + def results2json(self, results, outfile_prefix): + """Dump the results to a COCO style json file. + + There are 4 types of results: proposals, bbox predictions, mask + predictions, panoptic segmentation predictions, and they have + different data types. This method will automatically recognize + the type, and dump them to json files. + + .. code-block:: none + + [ + { + 'pan_results': np.array, # shape (h, w) + # ins_results which includes bboxes and RLE encoded masks + # is optional. + 'ins_results': (list[np.array], list[list[str]]) + }, + ... + ] + + Args: + results (list[dict]): Testing results of the dataset. + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json files will be named + "somepath/xxx.panoptic.json", "somepath/xxx.bbox.json", + "somepath/xxx.segm.json" + + Returns: + dict[str: str]: Possible keys are "panoptic", "bbox", "segm", \ + "proposal", and values are corresponding filenames. + """ + result_files = dict() + # panoptic segmentation results + if 'pan_results' in results[0]: + pan_results = [result['pan_results'] for result in results] + pan_json_results = self._pan2json(pan_results, outfile_prefix) + result_files['panoptic'] = f'{outfile_prefix}.panoptic.json' + mmcv.dump(pan_json_results, result_files['panoptic']) + + # instance segmentation results + if 'ins_results' in results[0]: + ins_results = [result['ins_results'] for result in results] + bbox_json_results, segm_json_results = self._segm2json(ins_results) + result_files['bbox'] = f'{outfile_prefix}.bbox.json' + result_files['proposal'] = f'{outfile_prefix}.bbox.json' + result_files['segm'] = f'{outfile_prefix}.segm.json' + mmcv.dump(bbox_json_results, result_files['bbox']) + mmcv.dump(segm_json_results, result_files['segm']) + + return result_files + + def evaluate_pan_json(self, + result_files, + outfile_prefix, + logger=None, + classwise=False, + nproc=32): + """Evaluate PQ according to the panoptic results json file.""" + imgs = self.coco.imgs + gt_json = self.coco.img_ann_map # image to annotations + gt_json = [{ + 'image_id': k, + 'segments_info': v, + 'file_name': imgs[k]['segm_file'] + } for k, v in gt_json.items()] + pred_json = mmcv.load(result_files['panoptic']) + pred_json = dict( + (el['image_id'], el) for el in pred_json['annotations']) + + # match the gt_anns and pred_anns in the same image + matched_annotations_list = [] + for gt_ann in gt_json: + img_id = gt_ann['image_id'] + if img_id not in pred_json.keys(): + raise Exception('no prediction for the image' + ' with id: {}'.format(img_id)) + matched_annotations_list.append((gt_ann, pred_json[img_id])) + + gt_folder = self.seg_prefix + pred_folder = os.path.join(os.path.dirname(outfile_prefix), 'panoptic') + + pq_stat = pq_compute_multi_core( + matched_annotations_list, + gt_folder, + pred_folder, + self.categories, + self.file_client, + nproc=nproc) + + metrics = [('All', None), ('Things', True), ('Stuff', False)] + pq_results = {} + + for name, isthing in metrics: + pq_results[name], classwise_results = pq_stat.pq_average( + self.categories, isthing=isthing) + if name == 'All': + pq_results['classwise'] = classwise_results + + classwise_results = None + if classwise: + classwise_results = { + k: v + for k, v in zip(self.CLASSES, pq_results['classwise'].values()) + } + print_panoptic_table(pq_results, classwise_results, logger=logger) + results = parse_pq_results(pq_results) + results['PQ_copypaste'] = ( + f'{results["PQ"]:.3f} {results["SQ"]:.3f} ' + f'{results["RQ"]:.3f} ' + f'{results["PQ_th"]:.3f} {results["SQ_th"]:.3f} ' + f'{results["RQ_th"]:.3f} ' + f'{results["PQ_st"]:.3f} {results["SQ_st"]:.3f} ' + f'{results["RQ_st"]:.3f}') + + return results + + def evaluate(self, + results, + metric='PQ', + logger=None, + jsonfile_prefix=None, + classwise=False, + nproc=32, + **kwargs): + """Evaluation in COCO Panoptic protocol. + + Args: + results (list[dict]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. 'PQ', 'bbox', + 'segm', 'proposal' are supported. 'pq' will be regarded as 'PQ. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + classwise (bool): Whether to print classwise evaluation results. + Default: False. + nproc (int): Number of processes for panoptic quality computing. + Defaults to 32. When `nproc` exceeds the number of cpu cores, + the number of cpu cores is used. + + Returns: + dict[str, float]: COCO Panoptic style evaluation metric. + """ + metrics = metric if isinstance(metric, list) else [metric] + # Compatible with lowercase 'pq' + metrics = ['PQ' if metric == 'pq' else metric for metric in metrics] + allowed_metrics = ['PQ', 'bbox', 'segm', 'proposal'] + for metric in metrics: + if metric not in allowed_metrics: + raise KeyError(f'metric {metric} is not supported') + + result_files, tmp_dir = self.format_results(results, jsonfile_prefix) + eval_results = {} + + outfile_prefix = os.path.join(tmp_dir.name, 'results') \ + if tmp_dir is not None else jsonfile_prefix + if 'PQ' in metrics: + eval_pan_results = self.evaluate_pan_json( + result_files, outfile_prefix, logger, classwise, nproc=nproc) + + eval_results.update(eval_pan_results) + metrics.remove('PQ') + + if (('bbox' in metrics) or ('segm' in metrics) + or ('proposal' in metrics)): + + assert 'ins_results' in results[0], 'instance segmentation' \ + 'results are absent from results' + + assert self.ins_ann_file is not None, 'Annotation '\ + 'file for instance segmentation or object detection ' \ + 'shuold not be None' + + coco_gt = COCO(self.ins_ann_file) + panoptic_cat_ids = self.cat_ids + self.cat_ids = coco_gt.get_cat_ids(cat_names=self.THING_CLASSES) + + eval_ins_results = self.evaluate_det_segm(results, result_files, + coco_gt, metrics, logger, + classwise, **kwargs) + self.cat_ids = panoptic_cat_ids + eval_results.update(eval_ins_results) + + if tmp_dir is not None: + tmp_dir.cleanup() + return eval_results + + +def parse_pq_results(pq_results): + """Parse the Panoptic Quality results.""" + result = dict() + result['PQ'] = 100 * pq_results['All']['pq'] + result['SQ'] = 100 * pq_results['All']['sq'] + result['RQ'] = 100 * pq_results['All']['rq'] + result['PQ_th'] = 100 * pq_results['Things']['pq'] + result['SQ_th'] = 100 * pq_results['Things']['sq'] + result['RQ_th'] = 100 * pq_results['Things']['rq'] + result['PQ_st'] = 100 * pq_results['Stuff']['pq'] + result['SQ_st'] = 100 * pq_results['Stuff']['sq'] + result['RQ_st'] = 100 * pq_results['Stuff']['rq'] + return result + + +def print_panoptic_table(pq_results, classwise_results=None, logger=None): + """Print the panoptic evaluation results table. + + Args: + pq_results(dict): The Panoptic Quality results. + classwise_results(dict | None): The classwise Panoptic Quality results. + The keys are class names and the values are metrics. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + """ + + headers = ['', 'PQ', 'SQ', 'RQ', 'categories'] + data = [headers] + for name in ['All', 'Things', 'Stuff']: + numbers = [ + f'{(pq_results[name][k] * 100):0.3f}' for k in ['pq', 'sq', 'rq'] + ] + row = [name] + numbers + [pq_results[name]['n']] + data.append(row) + table = AsciiTable(data) + print_log('Panoptic Evaluation Results:\n' + table.table, logger=logger) + + if classwise_results is not None: + class_metrics = [(name, ) + tuple(f'{(metrics[k] * 100):0.3f}' + for k in ['pq', 'sq', 'rq']) + for name, metrics in classwise_results.items()] + num_columns = min(8, len(class_metrics) * 4) + results_flatten = list(itertools.chain(*class_metrics)) + headers = ['category', 'PQ', 'SQ', 'RQ'] * (num_columns // 4) + results_2d = itertools.zip_longest( + *[results_flatten[i::num_columns] for i in range(num_columns)]) + data = [headers] + data += [result for result in results_2d] + table = AsciiTable(data) + print_log( + 'Classwise Panoptic Evaluation Results:\n' + table.table, + logger=logger) diff --git a/downstream/mmdetection/mmdet/datasets/custom.py b/downstream/mmdetection/mmdet/datasets/custom.py new file mode 100644 index 0000000..a4d8258 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/custom.py @@ -0,0 +1,410 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings +from collections import OrderedDict + +import mmcv +import numpy as np +from mmcv.utils import print_log +from terminaltables import AsciiTable +from torch.utils.data import Dataset + +from mmdet.core import eval_map, eval_recalls +from .builder import DATASETS +from .pipelines import Compose + + +@DATASETS.register_module() +class CustomDataset(Dataset): + """Custom dataset for detection. + + The annotation format is shown as follows. The `ann` field is optional for + testing. + + .. code-block:: none + + [ + { + 'filename': 'a.jpg', + 'width': 1280, + 'height': 720, + 'ann': { + 'bboxes': (n, 4) in (x1, y1, x2, y2) order. + 'labels': (n, ), + 'bboxes_ignore': (k, 4), (optional field) + 'labels_ignore': (k, 4) (optional field) + } + }, + ... + ] + + Args: + ann_file (str): Annotation file path. + pipeline (list[dict]): Processing pipeline. + classes (str | Sequence[str], optional): Specify classes to load. + If is None, ``cls.CLASSES`` will be used. Default: None. + data_root (str, optional): Data root for ``ann_file``, + ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified. + test_mode (bool, optional): If set True, annotation will not be loaded. + filter_empty_gt (bool, optional): If set true, images without bounding + boxes of the dataset's classes will be filtered out. This option + only works when `test_mode=False`, i.e., we never filter images + during tests. + """ + + CLASSES = None + + PALETTE = None + + def __init__(self, + ann_file, + pipeline, + classes=None, + data_root=None, + img_prefix='', + seg_prefix=None, + proposal_file=None, + test_mode=False, + filter_empty_gt=True, + file_client_args=dict(backend='disk')): + self.ann_file = ann_file + self.data_root = data_root + self.img_prefix = img_prefix + self.seg_prefix = seg_prefix + self.proposal_file = proposal_file + self.test_mode = test_mode + self.filter_empty_gt = filter_empty_gt + self.file_client = mmcv.FileClient(**file_client_args) + self.CLASSES = self.get_classes(classes) + + # join paths if data_root is specified + if self.data_root is not None: + if not osp.isabs(self.ann_file): + self.ann_file = osp.join(self.data_root, self.ann_file) + if not (self.img_prefix is None or osp.isabs(self.img_prefix)): + self.img_prefix = osp.join(self.data_root, self.img_prefix) + if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)): + self.seg_prefix = osp.join(self.data_root, self.seg_prefix) + if not (self.proposal_file is None + or osp.isabs(self.proposal_file)): + self.proposal_file = osp.join(self.data_root, + self.proposal_file) + # load annotations (and proposals) + if hasattr(self.file_client, 'get_local_path'): + with self.file_client.get_local_path(self.ann_file) as local_path: + self.data_infos = self.load_annotations(local_path) + else: + warnings.warn( + 'The used MMCV version does not have get_local_path. ' + f'We treat the {self.ann_file} as local paths and it ' + 'might cause errors if the path is not a local path. ' + 'Please use MMCV>= 1.3.16 if you meet errors.') + self.data_infos = self.load_annotations(self.ann_file) + + if self.proposal_file is not None: + if hasattr(self.file_client, 'get_local_path'): + with self.file_client.get_local_path( + self.proposal_file) as local_path: + self.proposals = self.load_proposals(local_path) + else: + warnings.warn( + 'The used MMCV version does not have get_local_path. ' + f'We treat the {self.ann_file} as local paths and it ' + 'might cause errors if the path is not a local path. ' + 'Please use MMCV>= 1.3.16 if you meet errors.') + self.proposals = self.load_proposals(self.proposal_file) + else: + self.proposals = None + + # filter images too small and containing no annotations + if not test_mode: + valid_inds = self._filter_imgs() + self.data_infos = [self.data_infos[i] for i in valid_inds] + if self.proposals is not None: + self.proposals = [self.proposals[i] for i in valid_inds] + # set group flag for the sampler + self._set_group_flag() + + # processing pipeline + self.pipeline = Compose(pipeline) + + def __len__(self): + """Total number of samples of data.""" + return len(self.data_infos) + + def load_annotations(self, ann_file): + """Load annotation from annotation file.""" + return mmcv.load(ann_file) + + def load_proposals(self, proposal_file): + """Load proposal from proposal file.""" + return mmcv.load(proposal_file) + + def get_ann_info(self, idx): + """Get annotation by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + + return self.data_infos[idx]['ann'] + + def get_cat_ids(self, idx): + """Get category ids by index. + + Args: + idx (int): Index of data. + + Returns: + list[int]: All categories in the image of specified index. + """ + + return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist() + + def pre_pipeline(self, results): + """Prepare results dict for pipeline.""" + results['img_prefix'] = self.img_prefix + results['seg_prefix'] = self.seg_prefix + results['proposal_file'] = self.proposal_file + results['bbox_fields'] = [] + results['mask_fields'] = [] + results['seg_fields'] = [] + + def _filter_imgs(self, min_size=32): + """Filter images too small.""" + if self.filter_empty_gt: + warnings.warn( + 'CustomDataset does not support filtering empty gt images.') + valid_inds = [] + for i, img_info in enumerate(self.data_infos): + if min(img_info['width'], img_info['height']) >= min_size: + valid_inds.append(i) + return valid_inds + + def _set_group_flag(self): + """Set flag according to image aspect ratio. + + Images with aspect ratio greater than 1 will be set as group 1, + otherwise group 0. + """ + self.flag = np.zeros(len(self), dtype=np.uint8) + for i in range(len(self)): + img_info = self.data_infos[i] + if img_info['width'] / img_info['height'] > 1: + self.flag[i] = 1 + + def _rand_another(self, idx): + """Get another random index from the same group as the given index.""" + pool = np.where(self.flag == self.flag[idx])[0] + return np.random.choice(pool) + + def __getitem__(self, idx): + """Get training/test data after pipeline. + + Args: + idx (int): Index of data. + + Returns: + dict: Training/test data (with annotation if `test_mode` is set \ + True). + """ + + if self.test_mode: + return self.prepare_test_img(idx) + while True: + data = self.prepare_train_img(idx) + if data is None: + idx = self._rand_another(idx) + continue + return data + + def prepare_train_img(self, idx): + """Get training data and annotations after pipeline. + + Args: + idx (int): Index of data. + + Returns: + dict: Training data and annotation after pipeline with new keys \ + introduced by pipeline. + """ + + img_info = self.data_infos[idx] + ann_info = self.get_ann_info(idx) + results = dict(img_info=img_info, ann_info=ann_info) + if self.proposals is not None: + results['proposals'] = self.proposals[idx] + self.pre_pipeline(results) + return self.pipeline(results) + + def prepare_test_img(self, idx): + """Get testing data after pipeline. + + Args: + idx (int): Index of data. + + Returns: + dict: Testing data after pipeline with new keys introduced by \ + pipeline. + """ + + img_info = self.data_infos[idx] + results = dict(img_info=img_info) + if self.proposals is not None: + results['proposals'] = self.proposals[idx] + self.pre_pipeline(results) + return self.pipeline(results) + + @classmethod + def get_classes(cls, classes=None): + """Get class names of current dataset. + + Args: + classes (Sequence[str] | str | None): If classes is None, use + default CLASSES defined by builtin dataset. If classes is a + string, take it as a file name. The file contains the name of + classes where each line contains one class name. If classes is + a tuple or list, override the CLASSES defined by the dataset. + + Returns: + tuple[str] or list[str]: Names of categories of the dataset. + """ + if classes is None: + return cls.CLASSES + + if isinstance(classes, str): + # take it as a file path + class_names = mmcv.list_from_file(classes) + elif isinstance(classes, (tuple, list)): + class_names = classes + else: + raise ValueError(f'Unsupported type {type(classes)} of classes.') + + return class_names + + def get_cat2imgs(self): + """Get a dict with class as key and img_ids as values, which will be + used in :class:`ClassAwareSampler`. + + Returns: + dict[list]: A dict of per-label image list, + the item of the dict indicates a label index, + corresponds to the image index that contains the label. + """ + if self.CLASSES is None: + raise ValueError('self.CLASSES can not be None') + # sort the label index + cat2imgs = {i: [] for i in range(len(self.CLASSES))} + for i in range(len(self)): + cat_ids = set(self.get_cat_ids(i)) + for cat in cat_ids: + cat2imgs[cat].append(i) + return cat2imgs + + def format_results(self, results, **kwargs): + """Place holder to format result to dataset specific output.""" + + def evaluate(self, + results, + metric='mAP', + logger=None, + proposal_nums=(100, 300, 1000), + iou_thr=0.5, + scale_ranges=None): + """Evaluate the dataset. + + Args: + results (list): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | None | str): Logger used for printing + related information during evaluation. Default: None. + proposal_nums (Sequence[int]): Proposal number used for evaluating + recalls, such as recall@100, recall@1000. + Default: (100, 300, 1000). + iou_thr (float | list[float]): IoU threshold. Default: 0.5. + scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP. + Default: None. + """ + + if not isinstance(metric, str): + assert len(metric) == 1 + metric = metric[0] + allowed_metrics = ['mAP', 'recall'] + if metric not in allowed_metrics: + raise KeyError(f'metric {metric} is not supported') + annotations = [self.get_ann_info(i) for i in range(len(self))] + eval_results = OrderedDict() + iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr + if metric == 'mAP': + assert isinstance(iou_thrs, list) + mean_aps = [] + for iou_thr in iou_thrs: + print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}') + mean_ap, _ = eval_map( + results, + annotations, + scale_ranges=scale_ranges, + iou_thr=iou_thr, + dataset=self.CLASSES, + logger=logger) + mean_aps.append(mean_ap) + eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3) + eval_results['mAP'] = sum(mean_aps) / len(mean_aps) + elif metric == 'recall': + gt_bboxes = [ann['bboxes'] for ann in annotations] + recalls = eval_recalls( + gt_bboxes, results, proposal_nums, iou_thr, logger=logger) + for i, num in enumerate(proposal_nums): + for j, iou in enumerate(iou_thrs): + eval_results[f'recall@{num}@{iou}'] = recalls[i, j] + if recalls.shape[1] > 1: + ar = recalls.mean(axis=1) + for i, num in enumerate(proposal_nums): + eval_results[f'AR@{num}'] = ar[i] + return eval_results + + def __repr__(self): + """Print the number of instance number.""" + dataset_type = 'Test' if self.test_mode else 'Train' + result = (f'\n{self.__class__.__name__} {dataset_type} dataset ' + f'with number of images {len(self)}, ' + f'and instance counts: \n') + if self.CLASSES is None: + result += 'Category names are not provided. \n' + return result + instance_count = np.zeros(len(self.CLASSES) + 1).astype(int) + # count the instance number in each image + for idx in range(len(self)): + label = self.get_ann_info(idx)['labels'] + unique, counts = np.unique(label, return_counts=True) + if len(unique) > 0: + # add the occurrence number to each class + instance_count[unique] += counts + else: + # background is the last index + instance_count[-1] += 1 + # create a table with category count + table_data = [['category', 'count'] * 5] + row_data = [] + for cls, count in enumerate(instance_count): + if cls < len(self.CLASSES): + row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}'] + else: + # add the background number + row_data += ['-1 background', f'{count}'] + if len(row_data) == 10: + table_data.append(row_data) + row_data = [] + if len(row_data) >= 2: + if row_data[-1] == '0': + row_data = row_data[:-2] + if len(row_data) >= 2: + table_data.append([]) + table_data.append(row_data) + + table = AsciiTable(table_data) + result += table.table + return result diff --git a/downstream/mmdetection/mmdet/datasets/dataset_wrappers.py b/downstream/mmdetection/mmdet/datasets/dataset_wrappers.py new file mode 100644 index 0000000..e62b88e --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/dataset_wrappers.py @@ -0,0 +1,456 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import bisect +import collections +import copy +import math +from collections import defaultdict + +import numpy as np +from mmcv.utils import build_from_cfg, print_log +from torch.utils.data.dataset import ConcatDataset as _ConcatDataset + +from .builder import DATASETS, PIPELINES +from .coco import CocoDataset + + +@DATASETS.register_module() +class ConcatDataset(_ConcatDataset): + """A wrapper of concatenated dataset. + + Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but + concat the group flag for image aspect ratio. + + Args: + datasets (list[:obj:`Dataset`]): A list of datasets. + separate_eval (bool): Whether to evaluate the results + separately if it is used as validation dataset. + Defaults to True. + """ + + def __init__(self, datasets, separate_eval=True): + super(ConcatDataset, self).__init__(datasets) + self.CLASSES = datasets[0].CLASSES + self.PALETTE = getattr(datasets[0], 'PALETTE', None) + self.separate_eval = separate_eval + if not separate_eval: + if any([isinstance(ds, CocoDataset) for ds in datasets]): + raise NotImplementedError( + 'Evaluating concatenated CocoDataset as a whole is not' + ' supported! Please set "separate_eval=True"') + elif len(set([type(ds) for ds in datasets])) != 1: + raise NotImplementedError( + 'All the datasets should have same types') + + if hasattr(datasets[0], 'flag'): + flags = [] + for i in range(0, len(datasets)): + flags.append(datasets[i].flag) + self.flag = np.concatenate(flags) + + def get_cat_ids(self, idx): + """Get category ids of concatenated dataset by index. + + Args: + idx (int): Index of data. + + Returns: + list[int]: All categories in the image of specified index. + """ + + if idx < 0: + if -idx > len(self): + raise ValueError( + 'absolute value of index should not exceed dataset length') + idx = len(self) + idx + dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) + if dataset_idx == 0: + sample_idx = idx + else: + sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] + return self.datasets[dataset_idx].get_cat_ids(sample_idx) + + def get_ann_info(self, idx): + """Get annotation of concatenated dataset by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + + if idx < 0: + if -idx > len(self): + raise ValueError( + 'absolute value of index should not exceed dataset length') + idx = len(self) + idx + dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) + if dataset_idx == 0: + sample_idx = idx + else: + sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] + return self.datasets[dataset_idx].get_ann_info(sample_idx) + + def evaluate(self, results, logger=None, **kwargs): + """Evaluate the results. + + Args: + results (list[list | tuple]): Testing results of the dataset. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + + Returns: + dict[str: float]: AP results of the total dataset or each separate + dataset if `self.separate_eval=True`. + """ + assert len(results) == self.cumulative_sizes[-1], \ + ('Dataset and results have different sizes: ' + f'{self.cumulative_sizes[-1]} v.s. {len(results)}') + + # Check whether all the datasets support evaluation + for dataset in self.datasets: + assert hasattr(dataset, 'evaluate'), \ + f'{type(dataset)} does not implement evaluate function' + + if self.separate_eval: + dataset_idx = -1 + total_eval_results = dict() + for size, dataset in zip(self.cumulative_sizes, self.datasets): + start_idx = 0 if dataset_idx == -1 else \ + self.cumulative_sizes[dataset_idx] + end_idx = self.cumulative_sizes[dataset_idx + 1] + + results_per_dataset = results[start_idx:end_idx] + print_log( + f'\nEvaluateing {dataset.ann_file} with ' + f'{len(results_per_dataset)} images now', + logger=logger) + + eval_results_per_dataset = dataset.evaluate( + results_per_dataset, logger=logger, **kwargs) + dataset_idx += 1 + for k, v in eval_results_per_dataset.items(): + total_eval_results.update({f'{dataset_idx}_{k}': v}) + + return total_eval_results + elif any([isinstance(ds, CocoDataset) for ds in self.datasets]): + raise NotImplementedError( + 'Evaluating concatenated CocoDataset as a whole is not' + ' supported! Please set "separate_eval=True"') + elif len(set([type(ds) for ds in self.datasets])) != 1: + raise NotImplementedError( + 'All the datasets should have same types') + else: + original_data_infos = self.datasets[0].data_infos + self.datasets[0].data_infos = sum( + [dataset.data_infos for dataset in self.datasets], []) + eval_results = self.datasets[0].evaluate( + results, logger=logger, **kwargs) + self.datasets[0].data_infos = original_data_infos + return eval_results + + +@DATASETS.register_module() +class RepeatDataset: + """A wrapper of repeated dataset. + + The length of repeated dataset will be `times` larger than the original + dataset. This is useful when the data loading time is long but the dataset + is small. Using RepeatDataset can reduce the data loading time between + epochs. + + Args: + dataset (:obj:`Dataset`): The dataset to be repeated. + times (int): Repeat times. + """ + + def __init__(self, dataset, times): + self.dataset = dataset + self.times = times + self.CLASSES = dataset.CLASSES + self.PALETTE = getattr(dataset, 'PALETTE', None) + if hasattr(self.dataset, 'flag'): + self.flag = np.tile(self.dataset.flag, times) + + self._ori_len = len(self.dataset) + + def __getitem__(self, idx): + return self.dataset[idx % self._ori_len] + + def get_cat_ids(self, idx): + """Get category ids of repeat dataset by index. + + Args: + idx (int): Index of data. + + Returns: + list[int]: All categories in the image of specified index. + """ + + return self.dataset.get_cat_ids(idx % self._ori_len) + + def get_ann_info(self, idx): + """Get annotation of repeat dataset by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + + return self.dataset.get_ann_info(idx % self._ori_len) + + def __len__(self): + """Length after repetition.""" + return self.times * self._ori_len + + +# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa +@DATASETS.register_module() +class ClassBalancedDataset: + """A wrapper of repeated dataset with repeat factor. + + Suitable for training on class imbalanced datasets like LVIS. Following + the sampling strategy in the `paper `_, + in each epoch, an image may appear multiple times based on its + "repeat factor". + The repeat factor for an image is a function of the frequency the rarest + category labeled in that image. The "frequency of category c" in [0, 1] + is defined by the fraction of images in the training set (without repeats) + in which category c appears. + The dataset needs to instantiate :func:`self.get_cat_ids` to support + ClassBalancedDataset. + + The repeat factor is computed as followed. + + 1. For each category c, compute the fraction # of images + that contain it: :math:`f(c)` + 2. For each category c, compute the category-level repeat factor: + :math:`r(c) = max(1, sqrt(t/f(c)))` + 3. For each image I, compute the image-level repeat factor: + :math:`r(I) = max_{c in I} r(c)` + + Args: + dataset (:obj:`CustomDataset`): The dataset to be repeated. + oversample_thr (float): frequency threshold below which data is + repeated. For categories with ``f_c >= oversample_thr``, there is + no oversampling. For categories with ``f_c < oversample_thr``, the + degree of oversampling following the square-root inverse frequency + heuristic above. + filter_empty_gt (bool, optional): If set true, images without bounding + boxes will not be oversampled. Otherwise, they will be categorized + as the pure background class and involved into the oversampling. + Default: True. + """ + + def __init__(self, dataset, oversample_thr, filter_empty_gt=True): + self.dataset = dataset + self.oversample_thr = oversample_thr + self.filter_empty_gt = filter_empty_gt + self.CLASSES = dataset.CLASSES + self.PALETTE = getattr(dataset, 'PALETTE', None) + + repeat_factors = self._get_repeat_factors(dataset, oversample_thr) + repeat_indices = [] + for dataset_idx, repeat_factor in enumerate(repeat_factors): + repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor)) + self.repeat_indices = repeat_indices + + flags = [] + if hasattr(self.dataset, 'flag'): + for flag, repeat_factor in zip(self.dataset.flag, repeat_factors): + flags.extend([flag] * int(math.ceil(repeat_factor))) + assert len(flags) == len(repeat_indices) + self.flag = np.asarray(flags, dtype=np.uint8) + + def _get_repeat_factors(self, dataset, repeat_thr): + """Get repeat factor for each images in the dataset. + + Args: + dataset (:obj:`CustomDataset`): The dataset + repeat_thr (float): The threshold of frequency. If an image + contains the categories whose frequency below the threshold, + it would be repeated. + + Returns: + list[float]: The repeat factors for each images in the dataset. + """ + + # 1. For each category c, compute the fraction # of images + # that contain it: f(c) + category_freq = defaultdict(int) + num_images = len(dataset) + for idx in range(num_images): + cat_ids = set(self.dataset.get_cat_ids(idx)) + if len(cat_ids) == 0 and not self.filter_empty_gt: + cat_ids = set([len(self.CLASSES)]) + for cat_id in cat_ids: + category_freq[cat_id] += 1 + for k, v in category_freq.items(): + category_freq[k] = v / num_images + + # 2. For each category c, compute the category-level repeat factor: + # r(c) = max(1, sqrt(t/f(c))) + category_repeat = { + cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq)) + for cat_id, cat_freq in category_freq.items() + } + + # 3. For each image I, compute the image-level repeat factor: + # r(I) = max_{c in I} r(c) + repeat_factors = [] + for idx in range(num_images): + cat_ids = set(self.dataset.get_cat_ids(idx)) + if len(cat_ids) == 0 and not self.filter_empty_gt: + cat_ids = set([len(self.CLASSES)]) + repeat_factor = 1 + if len(cat_ids) > 0: + repeat_factor = max( + {category_repeat[cat_id] + for cat_id in cat_ids}) + repeat_factors.append(repeat_factor) + + return repeat_factors + + def __getitem__(self, idx): + ori_index = self.repeat_indices[idx] + return self.dataset[ori_index] + + def get_ann_info(self, idx): + """Get annotation of dataset by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + ori_index = self.repeat_indices[idx] + return self.dataset.get_ann_info(ori_index) + + def __len__(self): + """Length after repetition.""" + return len(self.repeat_indices) + + +@DATASETS.register_module() +class MultiImageMixDataset: + """A wrapper of multiple images mixed dataset. + + Suitable for training on multiple images mixed data augmentation like + mosaic and mixup. For the augmentation pipeline of mixed image data, + the `get_indexes` method needs to be provided to obtain the image + indexes, and you can set `skip_flags` to change the pipeline running + process. At the same time, we provide the `dynamic_scale` parameter + to dynamically change the output image size. + + Args: + dataset (:obj:`CustomDataset`): The dataset to be mixed. + pipeline (Sequence[dict]): Sequence of transform object or + config dict to be composed. + dynamic_scale (tuple[int], optional): The image scale can be changed + dynamically. Default to None. It is deprecated. + skip_type_keys (list[str], optional): Sequence of type string to + be skip pipeline. Default to None. + max_refetch (int): The maximum number of retry iterations for getting + valid results from the pipeline. If the number of iterations is + greater than `max_refetch`, but results is still None, then the + iteration is terminated and raise the error. Default: 15. + """ + + def __init__(self, + dataset, + pipeline, + dynamic_scale=None, + skip_type_keys=None, + max_refetch=15): + if dynamic_scale is not None: + raise RuntimeError( + 'dynamic_scale is deprecated. Please use Resize pipeline ' + 'to achieve similar functions') + assert isinstance(pipeline, collections.abc.Sequence) + if skip_type_keys is not None: + assert all([ + isinstance(skip_type_key, str) + for skip_type_key in skip_type_keys + ]) + self._skip_type_keys = skip_type_keys + + self.pipeline = [] + self.pipeline_types = [] + for transform in pipeline: + if isinstance(transform, dict): + self.pipeline_types.append(transform['type']) + transform = build_from_cfg(transform, PIPELINES) + self.pipeline.append(transform) + else: + raise TypeError('pipeline must be a dict') + + self.dataset = dataset + self.CLASSES = dataset.CLASSES + self.PALETTE = getattr(dataset, 'PALETTE', None) + if hasattr(self.dataset, 'flag'): + self.flag = dataset.flag + self.num_samples = len(dataset) + self.max_refetch = max_refetch + + def __len__(self): + return self.num_samples + + def __getitem__(self, idx): + results = copy.deepcopy(self.dataset[idx]) + for (transform, transform_type) in zip(self.pipeline, + self.pipeline_types): + if self._skip_type_keys is not None and \ + transform_type in self._skip_type_keys: + continue + + if hasattr(transform, 'get_indexes'): + for i in range(self.max_refetch): + # Make sure the results passed the loading pipeline + # of the original dataset is not None. + indexes = transform.get_indexes(self.dataset) + if not isinstance(indexes, collections.abc.Sequence): + indexes = [indexes] + mix_results = [ + copy.deepcopy(self.dataset[index]) for index in indexes + ] + if None not in mix_results: + results['mix_results'] = mix_results + break + else: + raise RuntimeError( + 'The loading pipeline of the original dataset' + ' always return None. Please check the correctness ' + 'of the dataset and its pipeline.') + + for i in range(self.max_refetch): + # To confirm the results passed the training pipeline + # of the wrapper is not None. + updated_results = transform(copy.deepcopy(results)) + if updated_results is not None: + results = updated_results + break + else: + raise RuntimeError( + 'The training pipeline of the dataset wrapper' + ' always return None.Please check the correctness ' + 'of the dataset and its pipeline.') + + if 'mix_results' in results: + results.pop('mix_results') + + return results + + def update_skip_type_keys(self, skip_type_keys): + """Update skip_type_keys. It is called by an external hook. + + Args: + skip_type_keys (list[str], optional): Sequence of type + string to be skip pipeline. + """ + assert all([ + isinstance(skip_type_key, str) for skip_type_key in skip_type_keys + ]) + self._skip_type_keys = skip_type_keys diff --git a/downstream/mmdetection/mmdet/datasets/deepfashion.py b/downstream/mmdetection/mmdet/datasets/deepfashion.py new file mode 100644 index 0000000..609f809 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/deepfashion.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import DATASETS +from .coco import CocoDataset + + +@DATASETS.register_module() +class DeepFashionDataset(CocoDataset): + + CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', + 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', + 'skin', 'face') + + PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64), + (0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96), + (128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192), + (128, 0, 96), (128, 0, 192), (0, 32, 192)] diff --git a/downstream/mmdetection/mmdet/datasets/lvis.py b/downstream/mmdetection/mmdet/datasets/lvis.py new file mode 100644 index 0000000..511e31a --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/lvis.py @@ -0,0 +1,742 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import itertools +import logging +import os.path as osp +import tempfile +import warnings +from collections import OrderedDict + +import numpy as np +from mmcv.utils import print_log +from terminaltables import AsciiTable + +from .builder import DATASETS +from .coco import CocoDataset + + +@DATASETS.register_module() +class LVISV05Dataset(CocoDataset): + + CLASSES = ( + 'acorn', 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', + 'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', + 'antenna', 'apple', 'apple_juice', 'applesauce', 'apricot', 'apron', + 'aquarium', 'armband', 'armchair', 'armoire', 'armor', 'artichoke', + 'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award', + 'awning', 'ax', 'baby_buggy', 'basketball_backboard', 'backpack', + 'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball', + 'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage', + 'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel', + 'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat', + 'baseball_cap', 'baseball_glove', 'basket', 'basketball_hoop', + 'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel', + 'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball', 'bead', + 'beaker', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed', + 'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle', 'beer_can', + 'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle', 'bench', + 'beret', 'bib', 'Bible', 'bicycle', 'visor', 'binder', 'binoculars', + 'bird', 'birdfeeder', 'birdbath', 'birdcage', 'birdhouse', + 'birthday_cake', 'birthday_card', 'biscuit_(bread)', 'pirate_flag', + 'black_sheep', 'blackboard', 'blanket', 'blazer', 'blender', 'blimp', + 'blinker', 'blueberry', 'boar', 'gameboard', 'boat', 'bobbin', + 'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', + 'book', 'book_bag', 'bookcase', 'booklet', 'bookmark', + 'boom_microphone', 'boot', 'bottle', 'bottle_opener', 'bouquet', + 'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie', 'bowl', + 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'bowling_pin', + 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere', + 'bread-bin', 'breechcloth', 'bridal_gown', 'briefcase', + 'bristle_brush', 'broccoli', 'broach', 'broom', 'brownie', + 'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull', + 'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board', + 'bulletproof_vest', 'bullhorn', 'corned_beef', 'bun', 'bunk_bed', + 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butcher_knife', + 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', + 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf', + 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)', + 'can', 'can_opener', 'candelabrum', 'candle', 'candle_holder', + 'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'cannon', + 'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap', + 'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)', + 'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan', + 'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag', + 'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast', + 'cat', 'cauliflower', 'caviar', 'cayenne_(spice)', 'CD_player', + 'celery', 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue', + 'champagne', 'chandelier', 'chap', 'checkbook', 'checkerboard', + 'cherry', 'chessboard', 'chest_of_drawers_(furniture)', + 'chicken_(animal)', 'chicken_wire', 'chickpea', 'Chihuahua', + 'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)', + 'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk', + 'chocolate_mousse', 'choker', 'chopping_board', 'chopstick', + 'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette', + 'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent', + 'clementine', 'clip', 'clipboard', 'clock', 'clock_tower', + 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat', + 'coat_hanger', 'coatrack', 'cock', 'coconut', 'coffee_filter', + 'coffee_maker', 'coffee_table', 'coffeepot', 'coil', 'coin', + 'colander', 'coleslaw', 'coloring_material', 'combination_lock', + 'pacifier', 'comic_book', 'computer_keyboard', 'concrete_mixer', + 'cone', 'control', 'convertible_(automobile)', 'sofa_bed', 'cookie', + 'cookie_jar', 'cooking_utensil', 'cooler_(for_food)', + 'cork_(bottle_plug)', 'corkboard', 'corkscrew', 'edible_corn', + 'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset', + 'romaine_lettuce', 'costume', 'cougar', 'coverall', 'cowbell', + 'cowboy_hat', 'crab_(animal)', 'cracker', 'crape', 'crate', 'crayon', + 'cream_pitcher', 'credit_card', 'crescent_roll', 'crib', 'crock_pot', + 'crossbar', 'crouton', 'crow', 'crown', 'crucifix', 'cruise_ship', + 'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube', + 'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupcake', 'hair_curler', + 'curling_iron', 'curtain', 'cushion', 'custard', 'cutting_tool', + 'cylinder', 'cymbal', 'dachshund', 'dagger', 'dartboard', + 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk', + 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux', + 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher', + 'dishwasher_detergent', 'diskette', 'dispenser', 'Dixie_cup', 'dog', + 'dog_collar', 'doll', 'dollar', 'dolphin', 'domestic_ass', 'eye_mask', + 'doorbell', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly', + 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit', + 'dresser', 'drill', 'drinking_fountain', 'drone', 'dropper', + 'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling', + 'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan', + 'Dutch_oven', 'eagle', 'earphone', 'earplug', 'earring', 'easel', + 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater', + 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk', + 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan', + 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)', + 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm', + 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace', + 'fireplug', 'fish', 'fish_(food)', 'fishbowl', 'fishing_boat', + 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flash', + 'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)', + 'flower_arrangement', 'flute_glass', 'foal', 'folding_chair', + 'food_processor', 'football_(American)', 'football_helmet', + 'footstool', 'fork', 'forklift', 'freight_car', 'French_toast', + 'freshener', 'frisbee', 'frog', 'fruit_juice', 'fruit_salad', + 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage', + 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic', + 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'giant_panda', + 'gift_wrap', 'ginger', 'giraffe', 'cincture', + 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles', + 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose', + 'gorilla', 'gourd', 'surgical_gown', 'grape', 'grasshopper', 'grater', + 'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle', + 'grillroom', 'grinder_(tool)', 'grits', 'grizzly', 'grocery_bag', + 'guacamole', 'guitar', 'gull', 'gun', 'hair_spray', 'hairbrush', + 'hairnet', 'hairpin', 'ham', 'hamburger', 'hammer', 'hammock', + 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel', + 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw', + 'hardback_book', 'harmonium', 'hat', 'hatbox', 'hatch', 'veil', + 'headband', 'headboard', 'headlight', 'headscarf', 'headset', + 'headstall_(for_horses)', 'hearing_aid', 'heart', 'heater', + 'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus', + 'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood', + 'hook', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce', + 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear', + 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate', + 'ice_tea', 'igniter', 'incense', 'inhaler', 'iPod', + 'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jean', + 'jeep', 'jelly_bean', 'jersey', 'jet_plane', 'jewelry', 'joystick', + 'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard', + 'kilt', 'kimono', 'kitchen_sink', 'kitchen_table', 'kite', 'kitten', + 'kiwi_fruit', 'knee_pad', 'knife', 'knight_(chess_piece)', + 'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat', + 'ladder', 'ladle', 'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp', + 'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer', + 'lasagna', 'latch', 'lawn_mower', 'leather', 'legging_(clothing)', + 'Lego', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy', + 'life_jacket', 'lightbulb', 'lightning_rod', 'lime', 'limousine', + 'linen_paper', 'lion', 'lip_balm', 'lipstick', 'liquor', 'lizard', + 'Loafer_(type_of_shoe)', 'log', 'lollipop', 'lotion', + 'speaker_(stereo_equipment)', 'loveseat', 'machine_gun', 'magazine', + 'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallet', 'mammoth', + 'mandarin_orange', 'manger', 'manhole', 'map', 'marker', 'martini', + 'mascot', 'mashed_potato', 'masher', 'mask', 'mast', + 'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup', + 'measuring_stick', 'meatball', 'medicine', 'melon', 'microphone', + 'microscope', 'microwave_oven', 'milestone', 'milk', 'minivan', + 'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money', + 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor', + 'motor_scooter', 'motor_vehicle', 'motorboat', 'motorcycle', + 'mound_(baseball)', 'mouse_(animal_rodent)', + 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom', + 'music_stool', 'musical_instrument', 'nailfile', 'nameplate', 'napkin', + 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newsstand', + 'nightshirt', 'nosebag_(for_animals)', 'noseband_(for_animals)', + 'notebook', 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)', + 'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion', + 'orange_(fruit)', 'orange_juice', 'oregano', 'ostrich', 'ottoman', + 'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle', + 'padlock', 'paintbox', 'paintbrush', 'painting', 'pajamas', 'palette', + 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose', + 'papaya', 'paperclip', 'paper_plate', 'paper_towel', 'paperback_book', + 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', + 'parchment', 'parka', 'parking_meter', 'parrot', + 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport', + 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter', + 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'pegboard', + 'pelican', 'pen', 'pencil', 'pencil_box', 'pencil_sharpener', + 'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper', + 'pepper_mill', 'perfume', 'persimmon', 'baby', 'pet', 'petfood', + 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano', + 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow', + 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball', + 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)', + 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat', + 'plate', 'platter', 'playing_card', 'playpen', 'pliers', + 'plow_(farm_equipment)', 'pocket_watch', 'pocketknife', + 'poker_(fire_stirring_tool)', 'pole', 'police_van', 'polo_shirt', + 'poncho', 'pony', 'pool_table', 'pop_(soda)', 'portrait', + 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato', + 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'printer', + 'projectile_(weapon)', 'projector', 'propeller', 'prune', 'pudding', + 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', 'puppet', + 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', 'race_car', + 'racket', 'radar', 'radiator', 'radio_receiver', 'radish', 'raft', + 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat', + 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt', + 'recliner', 'record_player', 'red_cabbage', 'reflector', + 'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring', + 'river_boat', 'road_map', 'robe', 'rocking_chair', 'roller_skate', + 'Rollerblade', 'rolling_pin', 'root_beer', + 'router_(computer_equipment)', 'rubber_band', 'runner_(carpet)', + 'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', + 'safety_pin', 'sail', 'salad', 'salad_plate', 'salami', + 'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker', + 'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer', + 'sausage', 'sawhorse', 'saxophone', 'scale_(measuring_instrument)', + 'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard', + 'scrambled_eggs', 'scraper', 'scratcher', 'screwdriver', + 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane', + 'seashell', 'seedling', 'serving_dish', 'sewing_machine', 'shaker', + 'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)', + 'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog', + 'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag', 'shopping_cart', + 'short_pants', 'shot_glass', 'shoulder_bag', 'shovel', 'shower_head', + 'shower_curtain', 'shredder_(for_paper)', 'sieve', 'signboard', 'silo', + 'sink', 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', + 'ski_pole', 'skirt', 'sled', 'sleeping_bag', 'sling_(bandage)', + 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman', + 'snowmobile', 'soap', 'soccer_ball', 'sock', 'soda_fountain', + 'carbonated_water', 'sofa', 'softball', 'solar_array', 'sombrero', + 'soup', 'soup_bowl', 'soupspoon', 'sour_cream', 'soya_milk', + 'space_shuttle', 'sparkler_(fireworks)', 'spatula', 'spear', + 'spectacles', 'spice_rack', 'spider', 'sponge', 'spoon', 'sportswear', + 'spotlight', 'squirrel', 'stapler_(stapling_machine)', 'starfish', + 'statue_(sculpture)', 'steak_(food)', 'steak_knife', + 'steamer_(kitchen_appliance)', 'steering_wheel', 'stencil', + 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer', + 'stirrup', 'stockings_(leg_wear)', 'stool', 'stop_sign', 'brake_light', + 'stove', 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry', + 'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer', + 'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', + 'sunglasses', 'sunhat', 'sunscreen', 'surfboard', 'sushi', 'mop', + 'sweat_pants', 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato', + 'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table', + 'table', 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', + 'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)', + 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure', + 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup', + 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth', + 'telephone_pole', 'telephoto_lens', 'television_camera', + 'television_set', 'tennis_ball', 'tennis_racket', 'tequila', + 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread', + 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil', + 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven', + 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush', + 'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel', + 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light', + 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', + 'tray', 'tree_house', 'trench_coat', 'triangle_(musical_instrument)', + 'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)', + 'trunk', 'vat', 'turban', 'turkey_(bird)', 'turkey_(food)', 'turnip', + 'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella', + 'underwear', 'unicycle', 'urinal', 'urn', 'vacuum_cleaner', 'valve', + 'vase', 'vending_machine', 'vent', 'videotape', 'vinegar', 'violin', + 'vodka', 'volleyball', 'vulture', 'waffle', 'waffle_iron', 'wagon', + 'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet', + 'walrus', 'wardrobe', 'wasabi', 'automatic_washer', 'watch', + 'water_bottle', 'water_cooler', 'water_faucet', 'water_filter', + 'water_heater', 'water_jug', 'water_gun', 'water_scooter', 'water_ski', + 'water_tower', 'watering_can', 'watermelon', 'weathervane', 'webcam', + 'wedding_cake', 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', + 'whipped_cream', 'whiskey', 'whistle', 'wick', 'wig', 'wind_chime', + 'windmill', 'window_box_(for_plants)', 'windshield_wiper', 'windsock', + 'wine_bottle', 'wine_bucket', 'wineglass', 'wing_chair', + 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', 'wreath', + 'wrench', 'wristband', 'wristlet', 'yacht', 'yak', 'yogurt', + 'yoke_(animal_equipment)', 'zebra', 'zucchini') + + PALETTE = None + + def load_annotations(self, ann_file): + """Load annotation from lvis style annotation file. + + Args: + ann_file (str): Path of annotation file. + + Returns: + list[dict]: Annotation info from LVIS api. + """ + + try: + import lvis + if getattr(lvis, '__version__', '0') >= '10.5.3': + warnings.warn( + 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501 + UserWarning) + from lvis import LVIS + except ImportError: + raise ImportError( + 'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501 + ) + self.coco = LVIS(ann_file) + self.cat_ids = self.coco.get_cat_ids() + self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} + self.img_ids = self.coco.get_img_ids() + data_infos = [] + for i in self.img_ids: + info = self.coco.load_imgs([i])[0] + if info['file_name'].startswith('COCO'): + # Convert form the COCO 2014 file naming convention of + # COCO_[train/val/test]2014_000000000000.jpg to the 2017 + # naming convention of 000000000000.jpg + # (LVIS v1 will fix this naming issue) + info['filename'] = info['file_name'][-16:] + else: + info['filename'] = info['file_name'] + data_infos.append(info) + return data_infos + + def evaluate(self, + results, + metric='bbox', + logger=None, + jsonfile_prefix=None, + classwise=False, + proposal_nums=(100, 300, 1000), + iou_thrs=np.arange(0.5, 0.96, 0.05)): + """Evaluation in LVIS protocol. + + Args: + results (list[list | tuple]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. Options are + 'bbox', 'segm', 'proposal', 'proposal_fast'. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + jsonfile_prefix (str | None): + classwise (bool): Whether to evaluating the AP for each class. + proposal_nums (Sequence[int]): Proposal number used for evaluating + recalls, such as recall@100, recall@1000. + Default: (100, 300, 1000). + iou_thrs (Sequence[float]): IoU threshold used for evaluating + recalls. If set to a list, the average recall of all IoUs will + also be computed. Default: 0.5. + + Returns: + dict[str, float]: LVIS style metrics. + """ + + try: + import lvis + if getattr(lvis, '__version__', '0') >= '10.5.3': + warnings.warn( + 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501 + UserWarning) + from lvis import LVISEval, LVISResults + except ImportError: + raise ImportError( + 'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501 + ) + assert isinstance(results, list), 'results must be a list' + assert len(results) == len(self), ( + 'The length of results is not equal to the dataset len: {} != {}'. + format(len(results), len(self))) + + metrics = metric if isinstance(metric, list) else [metric] + allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] + for metric in metrics: + if metric not in allowed_metrics: + raise KeyError('metric {} is not supported'.format(metric)) + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + result_files = self.results2json(results, jsonfile_prefix) + + eval_results = OrderedDict() + # get original api + lvis_gt = self.coco + for metric in metrics: + msg = 'Evaluating {}...'.format(metric) + if logger is None: + msg = '\n' + msg + print_log(msg, logger=logger) + + if metric == 'proposal_fast': + ar = self.fast_eval_recall( + results, proposal_nums, iou_thrs, logger='silent') + log_msg = [] + for i, num in enumerate(proposal_nums): + eval_results['AR@{}'.format(num)] = ar[i] + log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i])) + log_msg = ''.join(log_msg) + print_log(log_msg, logger=logger) + continue + + if metric not in result_files: + raise KeyError('{} is not in results'.format(metric)) + try: + lvis_dt = LVISResults(lvis_gt, result_files[metric]) + except IndexError: + print_log( + 'The testing results of the whole dataset is empty.', + logger=logger, + level=logging.ERROR) + break + + iou_type = 'bbox' if metric == 'proposal' else metric + lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type) + lvis_eval.params.imgIds = self.img_ids + if metric == 'proposal': + lvis_eval.params.useCats = 0 + lvis_eval.params.maxDets = list(proposal_nums) + lvis_eval.evaluate() + lvis_eval.accumulate() + lvis_eval.summarize() + for k, v in lvis_eval.get_results().items(): + if k.startswith('AR'): + val = float('{:.3f}'.format(float(v))) + eval_results[k] = val + else: + lvis_eval.evaluate() + lvis_eval.accumulate() + lvis_eval.summarize() + lvis_results = lvis_eval.get_results() + if classwise: # Compute per-category AP + # Compute per-category AP + # from https://github.com/facebookresearch/detectron2/ + precisions = lvis_eval.eval['precision'] + # precision: (iou, recall, cls, area range, max dets) + assert len(self.cat_ids) == precisions.shape[2] + + results_per_category = [] + for idx, catId in enumerate(self.cat_ids): + # area range index 0: all area ranges + # max dets index -1: typically 100 per image + # the dimensions of precisions are + # [num_thrs, num_recalls, num_cats, num_area_rngs] + nm = self.coco.load_cats([catId])[0] + precision = precisions[:, :, idx, 0] + precision = precision[precision > -1] + if precision.size: + ap = np.mean(precision) + else: + ap = float('nan') + results_per_category.append( + (f'{nm["name"]}', f'{float(ap):0.3f}')) + + num_columns = min(6, len(results_per_category) * 2) + results_flatten = list( + itertools.chain(*results_per_category)) + headers = ['category', 'AP'] * (num_columns // 2) + results_2d = itertools.zip_longest(*[ + results_flatten[i::num_columns] + for i in range(num_columns) + ]) + table_data = [headers] + table_data += [result for result in results_2d] + table = AsciiTable(table_data) + print_log('\n' + table.table, logger=logger) + + for k, v in lvis_results.items(): + if k.startswith('AP'): + key = '{}_{}'.format(metric, k) + val = float('{:.3f}'.format(float(v))) + eval_results[key] = val + ap_summary = ' '.join([ + '{}:{:.3f}'.format(k, float(v)) + for k, v in lvis_results.items() if k.startswith('AP') + ]) + eval_results['{}_mAP_copypaste'.format(metric)] = ap_summary + lvis_eval.print_results() + if tmp_dir is not None: + tmp_dir.cleanup() + return eval_results + + +LVISDataset = LVISV05Dataset +DATASETS.register_module(name='LVISDataset', module=LVISDataset) + + +@DATASETS.register_module() +class LVISV1Dataset(LVISDataset): + + CLASSES = ( + 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol', + 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna', + 'apple', 'applesauce', 'apricot', 'apron', 'aquarium', + 'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor', + 'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer', + 'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy', + 'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel', + 'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon', + 'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo', + 'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow', + 'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap', + 'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)', + 'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)', + 'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie', + 'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper', + 'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt', + 'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor', + 'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath', + 'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card', + 'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket', + 'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry', + 'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg', + 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase', + 'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle', + 'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)', + 'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box', + 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere', + 'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase', + 'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts', + 'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer', + 'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn', + 'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', + 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', + 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf', + 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)', + 'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar', + 'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup', + 'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino', + 'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car', + 'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship', + 'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton', + 'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower', + 'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone', + 'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier', + 'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard', + 'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime', + 'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar', + 'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker', + 'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider', + 'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet', + 'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine', + 'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock', + 'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', + 'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach', + 'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table', + 'coffeepot', 'coil', 'coin', 'colander', 'coleslaw', + 'coloring_material', 'combination_lock', 'pacifier', 'comic_book', + 'compass', 'computer_keyboard', 'condiment', 'cone', 'control', + 'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie', + 'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)', + 'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet', + 'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall', + 'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker', + 'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib', + 'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown', + 'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch', + 'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup', + 'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain', + 'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard', + 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk', + 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux', + 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher', + 'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup', + 'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin', + 'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly', + 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit', + 'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)', + 'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell', + 'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring', + 'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater', + 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk', + 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan', + 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)', + 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm', + 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace', + 'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl', + 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap', + 'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)', + 'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal', + 'folding_chair', 'food_processor', 'football_(American)', + 'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car', + 'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice', + 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage', + 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic', + 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator', + 'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture', + 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles', + 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose', + 'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat', + 'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly', + 'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet', + 'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock', + 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel', + 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw', + 'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband', + 'headboard', 'headlight', 'headscarf', 'headset', + 'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet', + 'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog', + 'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah', + 'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce', + 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear', + 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate', + 'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board', + 'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey', + 'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak', + 'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono', + 'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit', + 'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)', + 'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)', + 'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard', + 'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather', + 'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce', + 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb', + 'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor', + 'lizard', 'log', 'lollipop', 'speaker_(stereo_equipment)', 'loveseat', + 'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)', + 'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger', + 'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato', + 'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox', + 'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine', + 'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone', + 'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy', 'mirror', + 'mitten', 'mixer_(kitchen_tool)', 'money', + 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor', + 'motor_scooter', 'motor_vehicle', 'motorcycle', 'mound_(baseball)', + 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom', + 'music_stool', 'musical_instrument', 'nailfile', 'napkin', + 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper', + 'newsstand', 'nightshirt', 'nosebag_(for_animals)', + 'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker', + 'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil', + 'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich', + 'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad', + 'pad', 'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas', + 'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', + 'pantyhose', 'papaya', 'paper_plate', 'paper_towel', 'paperback_book', + 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol', + 'parchment', 'parka', 'parking_meter', 'parrot', + 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport', + 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter', + 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg', + 'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box', + 'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)', + 'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet', + 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano', + 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow', + 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball', + 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)', + 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat', + 'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)', + 'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)', + 'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)', + 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato', + 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel', + 'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune', + 'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', + 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', + 'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish', + 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat', + 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt', + 'recliner', 'record_player', 'reflector', 'remote_control', + 'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map', + 'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade', + 'rolling_pin', 'root_beer', 'router_(computer_equipment)', + 'rubber_band', 'runner_(carpet)', 'plastic_bag', + 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin', + 'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)', + 'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)', + 'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse', + 'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf', + 'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver', + 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane', + 'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark', + 'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl', + 'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt', + 'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass', + 'shoulder_bag', 'shovel', 'shower_head', 'shower_cap', + 'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink', + 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole', + 'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)', + 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman', + 'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball', + 'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon', + 'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)', + 'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish', + 'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)', + 'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish', + 'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel', + 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer', + 'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer', + 'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign', + 'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl', + 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses', + 'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband', + 'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword', + 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table', + 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight', + 'tambourine', 'army_tank', 'tank_(storage_vessel)', + 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure', + 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup', + 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth', + 'telephone_pole', 'telephoto_lens', 'television_camera', + 'television_set', 'tennis_ball', 'tennis_racket', 'tequila', + 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread', + 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil', + 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven', + 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush', + 'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel', + 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light', + 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', + 'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle', + 'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat', + 'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)', + 'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn', + 'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest', + 'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture', + 'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick', + 'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe', + 'washbasin', 'automatic_washer', 'watch', 'water_bottle', + 'water_cooler', 'water_faucet', 'water_heater', 'water_jug', + 'water_gun', 'water_scooter', 'water_ski', 'water_tower', + 'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake', + 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream', + 'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)', + 'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket', + 'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', + 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt', + 'yoke_(animal_equipment)', 'zebra', 'zucchini') + + def load_annotations(self, ann_file): + try: + import lvis + if getattr(lvis, '__version__', '0') >= '10.5.3': + warnings.warn( + 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501 + UserWarning) + from lvis import LVIS + except ImportError: + raise ImportError( + 'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501 + ) + self.coco = LVIS(ann_file) + self.cat_ids = self.coco.get_cat_ids() + self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} + self.img_ids = self.coco.get_img_ids() + data_infos = [] + for i in self.img_ids: + info = self.coco.load_imgs([i])[0] + # coco_url is used in LVISv1 instead of file_name + # e.g. http://images.cocodataset.org/train2017/000000391895.jpg + # train/val split in specified in url + info['filename'] = info['coco_url'].replace( + 'http://images.cocodataset.org/', '') + data_infos.append(info) + return data_infos diff --git a/downstream/mmdetection/mmdet/datasets/openimages.py b/downstream/mmdetection/mmdet/datasets/openimages.py new file mode 100644 index 0000000..fba660c --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/openimages.py @@ -0,0 +1,891 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import csv +import json +import os.path as osp +import warnings +from collections import OrderedDict, defaultdict + +import mmcv +import numpy as np +import torch.distributed as dist +from mmcv.runner import get_dist_info +from mmcv.utils import print_log + +from mmdet.core import eval_map +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class OpenImagesDataset(CustomDataset): + """Open Images dataset for detection. + + Args: + ann_file (str): Annotation file path. + label_file (str): File path of the label description file that + maps the classes names in MID format to their short + descriptions. + image_level_ann_file (str): Image level annotation, which is used + in evaluation. + get_supercategory (bool): Whether to get parent class of the + current class. Default: True. + hierarchy_file (str): The file path of the class hierarchy. + Default: None. + get_metas (bool): Whether to get image metas in testing or + validation time. This should be `True` during evaluation. + Default: True. The OpenImages annotations do not have image + metas (width and height of the image), which will be used + during evaluation. We provide two ways to get image metas + in `OpenImagesDataset`: + + - 1. `load from file`: Load image metas from pkl file, which + is suggested to use. We provided a script to get image metas: + `tools/misc/get_image_metas.py`, which need to run + this script before training/testing. Please refer to + `config/openimages/README.md` for more details. + + - 2. `load from pipeline`, which will get image metas during + test time. However, this may reduce the inference speed, + especially when using distribution. + + load_from_file (bool): Whether to get image metas from pkl file. + meta_file (str): File path to get image metas. + filter_labels (bool): Whether filter unannotated classes. + Default: True. + load_image_level_labels (bool): Whether load and consider image + level labels during evaluation. Default: True. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__(self, + ann_file, + label_file='', + image_level_ann_file='', + get_supercategory=True, + hierarchy_file=None, + get_metas=True, + load_from_file=True, + meta_file='', + filter_labels=True, + load_image_level_labels=True, + file_client_args=dict(backend='disk'), + **kwargs): + # may get error if use other file_client + self.file_client_args = file_client_args + + self.cat2label = defaultdict(str) + self.index_dict = {} + + # Although it will init file_client in `CustomDataset`, + # it needs to be init here. + file_client = mmcv.FileClient(**file_client_args) + # need get `index_dict` before load annotations + assert label_file.endswith('csv') + if hasattr(file_client, 'get_local_path'): + with file_client.get_local_path(label_file) as local_path: + class_names = self.get_classes_from_csv(local_path) + else: + class_names = self.get_classes_from_csv(label_file) + super(OpenImagesDataset, self).__init__( + ann_file=ann_file, file_client_args=file_client_args, **kwargs) + self.CLASSES = class_names + self.image_level_ann_file = image_level_ann_file + self.load_image_level_labels = load_image_level_labels + if get_supercategory is True: + assert hierarchy_file is not None + if self.__class__.__name__ == 'OpenImagesDataset': + assert hierarchy_file.endswith('json') + elif self.__class__.__name__ == 'OpenImagesChallengeDataset': + assert hierarchy_file.endswith('np') + else: + raise NotImplementedError + if hasattr(self.file_client, 'get_local_path'): + with self.file_client.get_local_path( + hierarchy_file) as local_path: + self.class_label_tree = self.get_relation_matrix( + local_path) + else: + self.class_label_tree = self.get_relation_matrix( + hierarchy_file) + self.get_supercategory = get_supercategory + self.get_metas = get_metas + self.load_from_file = load_from_file + self.meta_file = meta_file + if self.data_root is not None: + if not osp.isabs(self.meta_file): + self.meta_file = osp.join(self.data_root, self.meta_file) + self.filter_labels = filter_labels + self.rank, self.world_size = get_dist_info() + self.temp_img_metas = [] + self.test_img_metas = [] + self.test_img_shapes = [] + self.load_from_pipeline = False if load_from_file else True + + def get_classes_from_csv(self, label_file): + """Get classes name from file. + + Args: + label_file (str): File path of the label description file that + maps the classes names in MID format to their short + descriptions. + + Returns: + list[str]: Class name of OpenImages. + """ + + index_list = [] + classes_names = [] + with open(label_file, 'r') as f: + reader = csv.reader(f) + for line in reader: + self.cat2label[line[0]] = line[1] + classes_names.append(line[1]) + index_list.append(line[0]) + self.index_dict = {index: i for i, index in enumerate(index_list)} + return classes_names + + def load_annotations(self, ann_file): + """Load annotation from annotation file. + + Special described `self.data_infos` (defaultdict[list[dict]]) + in this function: Annotations where item of the defaultdict + indicates an image, each of which has (n) dicts. Keys of dicts are: + + - `bbox` (list): coordinates of the box, in normalized image + coordinates, of shape 4. + - `label` (int): the label id. + - `is_group_of` (bool): Indicates that the box spans a group + of objects (e.g., a bed of flowers or a crowd of people). + - `is_occluded` (bool): Indicates that the object is occluded + by another object in the image. + - `is_truncated` (bool): Indicates that the object extends + beyond the boundary of the image. + - `is_depiction` (bool): Indicates that the object is a + depiction. + - `is_inside` (bool): Indicates a picture taken from the + inside of the object. + + Args: + ann_file (str): CSV style annotation file path. + + Returns: + list[dict]: Data infos where each item of the list + indicates an image. Keys of annotations are: + + - `img_id` (str): Image name. + - `filename` (str): Image name with suffix. + """ + self.ann_infos = defaultdict(list) + data_infos = [] + cp_filename = None + with open(ann_file, 'r') as f: + reader = csv.reader(f) + for i, line in enumerate(reader): + if i == 0: + continue + img_id = line[0] + filename = f'{img_id}.jpg' + label_id = line[2] + assert label_id in self.index_dict + label = int(self.index_dict[label_id]) + bbox = [ + float(line[4]), # xmin + float(line[6]), # ymin + float(line[5]), # xmax + float(line[7]) # ymax + ] + is_occluded = True if int(line[8]) == 1 else False + is_truncated = True if int(line[9]) == 1 else False + is_group_of = True if int(line[10]) == 1 else False + is_depiction = True if int(line[11]) == 1 else False + is_inside = True if int(line[12]) == 1 else False + + self.ann_infos[img_id].append( + dict( + bbox=bbox, + label=label, + is_occluded=is_occluded, + is_truncated=is_truncated, + is_group_of=is_group_of, + is_depiction=is_depiction, + is_inside=is_inside)) + if filename != cp_filename: + data_infos.append(dict(img_id=img_id, filename=filename)) + cp_filename = filename + return data_infos + + def get_ann_info(self, idx): + """Get OpenImages annotation by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + img_id = self.data_infos[idx]['img_id'] + bboxes = [] + labels = [] + bboxes_ignore = [] + labels_ignore = [] + is_occludeds = [] + is_truncateds = [] + is_group_ofs = [] + is_depictions = [] + is_insides = [] + for obj in self.ann_infos[img_id]: + label = int(obj['label']) + bbox = [ + float(obj['bbox'][0]), + float(obj['bbox'][1]), + float(obj['bbox'][2]), + float(obj['bbox'][3]) + ] + bboxes.append(bbox) + labels.append(label) + + # Other parameters + is_occludeds.append(obj['is_occluded']) + is_truncateds.append(obj['is_truncated']) + is_group_ofs.append(obj['is_group_of']) + is_depictions.append(obj['is_depiction']) + is_insides.append(obj['is_inside']) + if not bboxes: + bboxes = np.zeros((0, 4)) + labels = np.zeros((0, )) + else: + bboxes = np.array(bboxes) + labels = np.array(labels) + if not bboxes_ignore: + bboxes_ignore = np.zeros((0, 4)) + labels_ignore = np.zeros((0, )) + else: + bboxes_ignore = np.array(bboxes_ignore) + labels_ignore = np.array(labels_ignore) + + assert len(is_group_ofs) == len(labels) == len(bboxes) + gt_is_group_ofs = np.array(is_group_ofs, dtype=np.bool) + + # These parameters is not used yet. + is_occludeds = np.array(is_occludeds, dtype=np.bool) + is_truncateds = np.array(is_truncateds, dtype=np.bool) + is_depictions = np.array(is_depictions, dtype=np.bool) + is_insides = np.array(is_insides, dtype=np.bool) + + ann = dict( + bboxes=bboxes.astype(np.float32), + labels=labels.astype(np.int64), + bboxes_ignore=bboxes_ignore.astype(np.float32), + labels_ignore=labels_ignore.astype(np.int64), + gt_is_group_ofs=gt_is_group_ofs, + is_occludeds=is_occludeds, + is_truncateds=is_truncateds, + is_depictions=is_depictions, + is_insides=is_insides) + + return ann + + def get_meta_from_file(self, meta_file=''): + """Get image metas from pkl file.""" + metas = mmcv.load( + meta_file, + file_format='pkl', + file_client_args=self.file_client_args) + assert len(metas) == len(self) + for i in range(len(metas)): + file_name = osp.split(metas[i]['filename'])[-1] + img_info = self.data_infos[i].get('img_info', None) + if img_info is not None: + assert file_name == osp.split(img_info['filename'])[-1] + else: + assert file_name == self.data_infos[i]['filename'] + hw = metas[i]['ori_shape'][:2] + self.test_img_shapes.append(hw) + + def get_meta_from_pipeline(self, results): + """Get image metas from pipeline.""" + self.temp_img_metas.extend(results['img_metas']) + if dist.is_available() and self.world_size > 1: + from mmdet.apis.test import collect_results_cpu + + self.test_img_metas = collect_results_cpu(self.temp_img_metas, + len(self)) + else: + self.test_img_metas = self.temp_img_metas + + def get_img_shape(self, metas): + """Set images original shape into data_infos.""" + assert len(metas) == len(self) + for i in range(len(metas)): + file_name = osp.split(metas[i].data['ori_filename'])[-1] + img_info = self.data_infos[i].get('img_info', None) + if img_info is not None: + assert file_name == osp.split(img_info['filename'])[-1] + else: + assert file_name == self.data_infos[i]['filename'] + hw = metas[i].data['ori_shape'][:2] + self.test_img_shapes.append(hw) + + def prepare_test_img(self, idx): + """Get testing data after pipeline.""" + img_info = self.data_infos[idx] + results = dict(img_info=img_info) + if self.proposals is not None: + results['proposals'] = self.proposals[idx] + self.pre_pipeline(results) + results = self.pipeline(results) + if self.get_metas and self.load_from_pipeline: + self.get_meta_from_pipeline(results) + return results + + def _filter_imgs(self, min_size=32): + """Filter images too small.""" + if self.filter_empty_gt: + warnings.warn('OpenImageDatasets does not support ' + 'filtering empty gt images.') + valid_inds = [i for i in range(len(self))] + return valid_inds + + def _set_group_flag(self): + """Set flag according to image aspect ratio.""" + self.flag = np.zeros(len(self), dtype=np.uint8) + # TODO: set flag without width and height + + def get_relation_matrix(self, hierarchy_file): + """Get hierarchy for classes. + + Args: + hierarchy_file (sty): File path to the hierarchy for classes. + + Returns: + ndarray: The matrix of the corresponding relationship between + the parent class and the child class, of shape + (class_num, class_num). + """ + + if self.data_root is not None: + if not osp.isabs(hierarchy_file): + hierarchy_file = osp.join(self.data_root, hierarchy_file) + with open(hierarchy_file, 'r') as f: + hierarchy = json.load(f) + class_num = len(self.CLASSES) + class_label_tree = np.eye(class_num, class_num) + class_label_tree = self._convert_hierarchy_tree( + hierarchy, class_label_tree) + return class_label_tree + + def _convert_hierarchy_tree(self, + hierarchy_map, + class_label_tree, + parents=[], + get_all_parents=True): + """Get matrix of the corresponding relationship between the parent + class and the child class. + + Args: + hierarchy_map (dict): Including label name and corresponding + subcategory. Keys of dicts are: + + - `LabeName` (str): Name of the label. + - `Subcategory` (dict | list): Corresponding subcategory(ies). + class_label_tree (ndarray): The matrix of the corresponding + relationship between the parent class and the child class, + of shape (class_num, class_num). + parents (list): Corresponding parent class. + get_all_parents (bool): Whether get all parent names. + Default: True + + Returns: + ndarray: The matrix of the corresponding relationship between + the parent class and the child class, of shape + (class_num, class_num). + """ + + if 'Subcategory' in hierarchy_map: + for node in hierarchy_map['Subcategory']: + if 'LabelName' in node: + children_name = node['LabelName'] + children_index = self.index_dict[children_name] + children = [children_index] + else: + continue + if len(parents) > 0: + for parent_index in parents: + if get_all_parents: + children.append(parent_index) + class_label_tree[children_index, parent_index] = 1 + + class_label_tree = self._convert_hierarchy_tree( + node, class_label_tree, parents=children) + + return class_label_tree + + def add_supercategory_ann(self, annotations): + """Add parent classes of the corresponding class of the ground truth + bboxes.""" + for i, ann in enumerate(annotations): + assert len(ann['labels']) == len(ann['bboxes']) == \ + len(ann['gt_is_group_ofs']) + gt_bboxes = [] + gt_is_group_ofs = [] + gt_labels = [] + for j in range(len(ann['labels'])): + label = ann['labels'][j] + bbox = ann['bboxes'][j] + is_group = ann['gt_is_group_ofs'][j] + label = np.where(self.class_label_tree[label])[0] + if len(label) > 1: + for k in range(len(label)): + gt_bboxes.append(bbox) + gt_is_group_ofs.append(is_group) + gt_labels.append(label[k]) + else: + gt_bboxes.append(bbox) + gt_is_group_ofs.append(is_group) + gt_labels.append(label[0]) + annotations[i] = dict( + bboxes=np.array(gt_bboxes).astype(np.float32), + labels=np.array(gt_labels).astype(np.int64), + bboxes_ignore=ann['bboxes_ignore'], + gt_is_group_ofs=np.array(gt_is_group_ofs).astype(np.bool)) + + return annotations + + def process_results(self, det_results, annotations, + image_level_annotations): + """Process results of the corresponding class of the detection bboxes. + + Note: It will choose to do the following two processing according to + the parameters: + + 1. Whether to add parent classes of the corresponding class of the + detection bboxes. + + 2. Whether to ignore the classes that unannotated on that image. + """ + if image_level_annotations is not None: + assert len(annotations) == \ + len(image_level_annotations) == \ + len(det_results) + else: + assert len(annotations) == len(det_results) + for i in range(len(det_results)): + results = copy.deepcopy(det_results[i]) + valid_classes = np.where( + np.array([[bbox.shape[0]] for bbox in det_results[i]]) != 0)[0] + if image_level_annotations is not None: + labels = annotations[i]['labels'] + image_level_labels = \ + image_level_annotations[i]['image_level_labels'] + allowed_labeles = np.unique( + np.append(labels, image_level_labels)) + else: + allowed_labeles = np.unique(annotations[i]['labels']) + + for valid_class in valid_classes: + det_cls = np.where(self.class_label_tree[valid_class])[0] + for index in det_cls: + if index in allowed_labeles and \ + index != valid_class and \ + self.get_supercategory: + det_results[i][index] = \ + np.concatenate((det_results[i][index], + results[valid_class])) + elif index not in allowed_labeles and self.filter_labels: + # Remove useless parts + det_results[i][index] = np.empty( + (0, 5)).astype(np.float32) + return det_results + + def load_image_label_from_csv(self, image_level_ann_file): + """Load image level annotations from csv style ann_file. + + Args: + image_level_ann_file (str): CSV style image level annotation + file path. + + Returns: + defaultdict[list[dict]]: Annotations where item of the defaultdict + indicates an image, each of which has (n) dicts. + Keys of dicts are: + + - `image_level_label` (int): Label id. + - `confidence` (float): Labels that are human-verified to be + present in an image have confidence = 1 (positive labels). + Labels that are human-verified to be absent from an image + have confidence = 0 (negative labels). Machine-generated + labels have fractional confidences, generally >= 0.5. + The higher the confidence, the smaller the chance for + the label to be a false positive. + """ + + item_lists = defaultdict(list) + with open(image_level_ann_file, 'r') as f: + reader = csv.reader(f) + for i, line in enumerate(reader): + if i == 0: + continue + img_id = line[0] + item_lists[img_id].append( + dict( + image_level_label=int(self.index_dict[line[2]]), + confidence=float(line[3]))) + return item_lists + + def get_image_level_ann(self, image_level_ann_file): + """Get OpenImages annotation by index. + + Args: + image_level_ann_file (str): CSV style image level annotation + file path. + + Returns: + dict: Annotation info of specified index. + """ + + if hasattr(self.file_client, 'get_local_path'): + with self.file_client.get_local_path(image_level_ann_file) \ + as local_path: + item_lists = self.load_image_label_from_csv(local_path) + else: + item_lists = self.load_image_label_from_csv(image_level_ann_file) + image_level_annotations = [] + for i in range(len(self)): + img_info = self.data_infos[i].get('img_info', None) + if img_info is not None: + # for Open Images Challenges + img_id = osp.split(img_info['filename'])[-1][:-4] + else: + # for Open Images v6 + img_id = self.data_infos[i]['img_id'] + item_list = item_lists.get(img_id, None) + if item_list is not None: + image_level_labels = [] + confidences = [] + for obj in item_list: + image_level_label = int(obj['image_level_label']) + confidence = float(obj['confidence']) + + image_level_labels.append(image_level_label) + confidences.append(confidence) + + if not image_level_labels: + image_level_labels = np.zeros((0, )) + confidences = np.zeros((0, )) + else: + image_level_labels = np.array(image_level_labels) + confidences = np.array(confidences) + else: + image_level_labels = np.zeros((0, )) + confidences = np.zeros((0, )) + ann = dict( + image_level_labels=image_level_labels.astype(np.int64), + confidences=confidences.astype(np.float32)) + image_level_annotations.append(ann) + + return image_level_annotations + + def denormalize_gt_bboxes(self, annotations): + """Convert ground truth bboxes from relative position to absolute + position. + + Only used in evaluating time. + """ + assert len(self.test_img_shapes) == len(annotations) + for i in range(len(annotations)): + h, w = self.test_img_shapes[i] + annotations[i]['bboxes'][:, 0::2] *= w + annotations[i]['bboxes'][:, 1::2] *= h + return annotations + + def get_cat_ids(self, idx): + """Get category ids by index. + + Args: + idx (int): Index of data. + + Returns: + list[int]: All categories in the image of specified index. + """ + return self.get_ann_info(idx)['labels'].astype(np.int).tolist() + + def evaluate(self, + results, + metric='mAP', + logger=None, + iou_thr=0.5, + ioa_thr=0.5, + scale_ranges=None, + denorm_gt_bbox=True, + use_group_of=True): + """Evaluate in OpenImages. + + Args: + results (list[list | tuple]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. Option is + 'mAP'. Default: 'mAP'. + logger (logging.Logger | str, optional): Logger used for printing + related information during evaluation. Default: None. + iou_thr (float | list[float]): IoU threshold. Default: 0.5. + ioa_thr (float | list[float]): IoA threshold. Default: 0.5. + scale_ranges (list[tuple], optional): Scale ranges for evaluating + mAP. If not specified, all bounding boxes would be included in + evaluation. Default: None + denorm_gt_bbox (bool): Whether to denorm ground truth bboxes from + relative position to absolute position. Default: True + use_group_of (bool): Whether consider group of groud truth bboxes + during evaluating. Default: True. + + Returns: + dict[str, float]: AP metrics. + """ + + if not isinstance(metric, str): + assert len(metric) == 1 + metric = metric[0] + allowed_metrics = ['mAP'] + if metric not in allowed_metrics: + raise KeyError(f'metric {metric} is not supported') + annotations = [self.get_ann_info(i) for i in range(len(self))] + + if self.load_image_level_labels: + image_level_annotations = \ + self.get_image_level_ann(self.image_level_ann_file) + else: + image_level_annotations = None + + # load metas from file + if self.get_metas and self.load_from_file: + assert self.meta_file.endswith( + 'pkl'), 'File name must be pkl suffix' + self.get_meta_from_file(self.meta_file) + # load metas from pipeline + else: + self.get_img_shape(self.test_img_metas) + + if len(self.test_img_shapes) > len(self): + self.test_img_shapes = self.test_img_shapes[:len(self)] + + if denorm_gt_bbox: + annotations = self.denormalize_gt_bboxes(annotations) + + # Reset test_image_metas, temp_image_metas and test_img_shapes + # to avoid potential error + self.temp_img_metas = [] + self.test_img_shapes = [] + self.test_img_metas = [] + if self.get_supercategory: + annotations = self.add_supercategory_ann(annotations) + + results = self.process_results(results, annotations, + image_level_annotations) + if use_group_of: + assert ioa_thr is not None, \ + 'ioa_thr must have value when using group_of in evaluation.' + + eval_results = OrderedDict() + iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr + ioa_thrs = [ioa_thr] if isinstance(ioa_thr, float) or ioa_thr is None \ + else ioa_thr + + # get dataset type + if len(self.CLASSES) == 500: + ds_name = 'oid_challenge' + elif len(self.CLASSES) == 601: + ds_name = 'oid_v6' + else: + ds_name = self.CLASSES + warnings.warn('Cannot infer dataset type from the length of the ' + 'classes. Set `oid_v6` as dataset type.') + + if metric == 'mAP': + assert isinstance(iou_thrs, list) and isinstance(ioa_thrs, list) + assert len(ioa_thrs) == len(iou_thrs) + mean_aps = [] + for iou_thr, ioa_thr in zip(iou_thrs, ioa_thrs): + print_log(f'\n{"-" * 15}iou_thr, ioa_thr: {iou_thr}, {ioa_thr}' + f'{"-" * 15}') + mean_ap, _ = eval_map( + results, + annotations, + scale_ranges=scale_ranges, + iou_thr=iou_thr, + ioa_thr=ioa_thr, + dataset=ds_name, + logger=logger, + use_group_of=use_group_of) + mean_aps.append(mean_ap) + eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3) + eval_results['mAP'] = sum(mean_aps) / len(mean_aps) + return eval_results + + +@DATASETS.register_module() +class OpenImagesChallengeDataset(OpenImagesDataset): + """Open Images Challenge dataset for detection.""" + + def __init__(self, ann_file, **kwargs): + assert ann_file.endswith('txt') + super(OpenImagesChallengeDataset, self).__init__( + ann_file=ann_file, **kwargs) + + def get_classes_from_csv(self, label_file): + """Get classes name from file. + + Args: + label_file (str): File path of the label description file that + maps the classes names in MID format to their short + descriptions. + + Returns: + list: Class name of OpenImages. + """ + + label_list = [] + id_list = [] + with open(label_file, 'r') as f: + reader = csv.reader(f) + for line in reader: + label_name = line[0] + label_id = int(line[2]) + + label_list.append(line[1]) + id_list.append(label_id) + self.index_dict[label_name] = label_id - 1 + + indexes = np.argsort(id_list) + classes_names = [] + for index in indexes: + classes_names.append(label_list[index]) + return classes_names + + def load_annotations(self, ann_file): + """Load annotation from annotation file.""" + with open(ann_file) as f: + lines = f.readlines() + i = 0 + ann_infos = [] + while i < len(lines): + bboxes = [] + labels = [] + is_group_ofs = [] + filename = lines[i].rstrip() + i += 2 + img_gt_size = int(lines[i]) + i += 1 + for j in range(img_gt_size): + sp = lines[i + j].split() + bboxes.append( + [float(sp[1]), + float(sp[2]), + float(sp[3]), + float(sp[4])]) + labels.append(int(sp[0]) - 1) # labels begin from 1 + is_group_ofs.append(True if int(sp[5]) == 1 else False) + i += img_gt_size + + gt_bboxes = np.array(bboxes, dtype=np.float32) + gt_labels = np.array(labels, dtype=np.int64) + gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) + gt_is_group_ofs = np.array(is_group_ofs, dtype=np.bool) + + img_info = dict(filename=filename) + ann_info = dict( + bboxes=gt_bboxes, + labels=gt_labels, + bboxes_ignore=gt_bboxes_ignore, + gt_is_group_ofs=gt_is_group_ofs) + ann_infos.append(dict(img_info=img_info, ann_info=ann_info)) + + return ann_infos + + def prepare_train_img(self, idx): + """Get training data and annotations after pipeline.""" + ann_info = self.data_infos[idx] + results = dict( + img_info=ann_info['img_info'], + ann_info=ann_info['ann_info'], + ) + if self.proposals is not None: + results['proposals'] = self.proposals[idx] + self.pre_pipeline(results) + return self.pipeline(results) + + def prepare_test_img(self, idx): + """Get testing data after pipeline.""" + ann_info = self.data_infos[idx] + results = dict(img_info=ann_info['img_info']) + if self.proposals is not None: + results['proposals'] = self.proposals[idx] + self.pre_pipeline(results) + + results = self.pipeline(results) + if self.get_metas and self.load_from_pipeline: + self.get_meta_from_pipeline(results) + return results + + def get_relation_matrix(self, hierarchy_file): + """Get hierarchy for classes. + + Args: + hierarchy_file (str): File path to the hierarchy for classes. + + Returns: + ndarray: The matrix of the corresponding + relationship between the parent class and the child class, + of shape (class_num, class_num). + """ + class_label_tree = np.load(hierarchy_file, allow_pickle=True) + return class_label_tree[1:, 1:] + + def get_ann_info(self, idx): + """Get OpenImages annotation by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + # avoid some potential error + data_infos = copy.deepcopy(self.data_infos[idx]['ann_info']) + return data_infos + + def load_image_label_from_csv(self, image_level_ann_file): + """Load image level annotations from csv style ann_file. + + Args: + image_level_ann_file (str): CSV style image level annotation + file path. + + Returns: + defaultdict[list[dict]]: Annotations where item of the defaultdict + indicates an image, each of which has (n) dicts. + Keys of dicts are: + + - `image_level_label` (int): of shape 1. + - `confidence` (float): of shape 1. + """ + + item_lists = defaultdict(list) + with open(image_level_ann_file, 'r') as f: + reader = csv.reader(f) + i = -1 + for line in reader: + i += 1 + if i == 0: + continue + else: + img_id = line[0] + label_id = line[1] + assert label_id in self.index_dict + image_level_label = int(self.index_dict[label_id]) + confidence = float(line[2]) + item_lists[img_id].append( + dict( + image_level_label=image_level_label, + confidence=confidence)) + return item_lists diff --git a/downstream/mmdetection/mmdet/datasets/pipelines/__init__.py b/downstream/mmdetection/mmdet/datasets/pipelines/__init__.py new file mode 100644 index 0000000..8260da6 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/pipelines/__init__.py @@ -0,0 +1,31 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform, + ContrastTransform, EqualizeTransform, Rotate, Shear, + Translate) +from .compose import Compose +from .formatting import (Collect, DefaultFormatBundle, ImageToTensor, + ToDataContainer, ToTensor, Transpose, to_tensor) +from .instaboost import InstaBoost +from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromFile, + LoadImageFromWebcam, LoadMultiChannelImageFromFiles, + LoadPanopticAnnotations, LoadProposals) +from .test_time_aug import MultiScaleFlipAug +from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop, + MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion, + RandomAffine, RandomCenterCropPad, RandomCrop, + RandomFlip, RandomShift, Resize, SegRescale, + YOLOXHSVRandomAug) + +__all__ = [ + 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', + 'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations', + 'LoadImageFromFile', 'LoadImageFromWebcam', 'LoadPanopticAnnotations', + 'LoadMultiChannelImageFromFiles', 'LoadProposals', 'FilterAnnotations', + 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', + 'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand', + 'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad', + 'AutoAugment', 'CutOut', 'Shear', 'Rotate', 'ColorTransform', + 'EqualizeTransform', 'BrightnessTransform', 'ContrastTransform', + 'Translate', 'RandomShift', 'Mosaic', 'MixUp', 'RandomAffine', + 'YOLOXHSVRandomAug', 'CopyPaste' +] diff --git a/downstream/mmdetection/mmdet/datasets/pipelines/auto_augment.py b/downstream/mmdetection/mmdet/datasets/pipelines/auto_augment.py new file mode 100644 index 0000000..b0ff67d --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/pipelines/auto_augment.py @@ -0,0 +1,894 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import cv2 +import mmcv +import numpy as np + +from ..builder import PIPELINES +from .compose import Compose + +_MAX_LEVEL = 10 + + +def level_to_value(level, max_value): + """Map from level to values based on max_value.""" + return (level / _MAX_LEVEL) * max_value + + +def enhance_level_to_value(level, a=1.8, b=0.1): + """Map from level to values.""" + return (level / _MAX_LEVEL) * a + b + + +def random_negative(value, random_negative_prob): + """Randomly negate value based on random_negative_prob.""" + return -value if np.random.rand() < random_negative_prob else value + + +def bbox2fields(): + """The key correspondence from bboxes to labels, masks and + segmentations.""" + bbox2label = { + 'gt_bboxes': 'gt_labels', + 'gt_bboxes_ignore': 'gt_labels_ignore' + } + bbox2mask = { + 'gt_bboxes': 'gt_masks', + 'gt_bboxes_ignore': 'gt_masks_ignore' + } + bbox2seg = { + 'gt_bboxes': 'gt_semantic_seg', + } + return bbox2label, bbox2mask, bbox2seg + + +@PIPELINES.register_module() +class AutoAugment: + """Auto augmentation. + + This data augmentation is proposed in `Learning Data Augmentation + Strategies for Object Detection `_. + + TODO: Implement 'Shear', 'Sharpness' and 'Rotate' transforms + + Args: + policies (list[list[dict]]): The policies of auto augmentation. Each + policy in ``policies`` is a specific augmentation policy, and is + composed by several augmentations (dict). When AutoAugment is + called, a random policy in ``policies`` will be selected to + augment images. + + Examples: + >>> replace = (104, 116, 124) + >>> policies = [ + >>> [ + >>> dict(type='Sharpness', prob=0.0, level=8), + >>> dict( + >>> type='Shear', + >>> prob=0.4, + >>> level=0, + >>> replace=replace, + >>> axis='x') + >>> ], + >>> [ + >>> dict( + >>> type='Rotate', + >>> prob=0.6, + >>> level=10, + >>> replace=replace), + >>> dict(type='Color', prob=1.0, level=6) + >>> ] + >>> ] + >>> augmentation = AutoAugment(policies) + >>> img = np.ones(100, 100, 3) + >>> gt_bboxes = np.ones(10, 4) + >>> results = dict(img=img, gt_bboxes=gt_bboxes) + >>> results = augmentation(results) + """ + + def __init__(self, policies): + assert isinstance(policies, list) and len(policies) > 0, \ + 'Policies must be a non-empty list.' + for policy in policies: + assert isinstance(policy, list) and len(policy) > 0, \ + 'Each policy in policies must be a non-empty list.' + for augment in policy: + assert isinstance(augment, dict) and 'type' in augment, \ + 'Each specific augmentation must be a dict with key' \ + ' "type".' + + self.policies = copy.deepcopy(policies) + self.transforms = [Compose(policy) for policy in self.policies] + + def __call__(self, results): + transform = np.random.choice(self.transforms) + return transform(results) + + def __repr__(self): + return f'{self.__class__.__name__}(policies={self.policies})' + + +@PIPELINES.register_module() +class Shear: + """Apply Shear Transformation to image (and its corresponding bbox, mask, + segmentation). + + Args: + level (int | float): The level should be in range [0,_MAX_LEVEL]. + img_fill_val (int | float | tuple): The filled values for image border. + If float, the same fill value will be used for all the three + channels of image. If tuple, the should be 3 elements. + seg_ignore_label (int): The fill value used for segmentation map. + Note this value must equals ``ignore_label`` in ``semantic_head`` + of the corresponding config. Default 255. + prob (float): The probability for performing Shear and should be in + range [0, 1]. + direction (str): The direction for shear, either "horizontal" + or "vertical". + max_shear_magnitude (float): The maximum magnitude for Shear + transformation. + random_negative_prob (float): The probability that turns the + offset negative. Should be in range [0,1] + interpolation (str): Same as in :func:`mmcv.imshear`. + """ + + def __init__(self, + level, + img_fill_val=128, + seg_ignore_label=255, + prob=0.5, + direction='horizontal', + max_shear_magnitude=0.3, + random_negative_prob=0.5, + interpolation='bilinear'): + assert isinstance(level, (int, float)), 'The level must be type ' \ + f'int or float, got {type(level)}.' + assert 0 <= level <= _MAX_LEVEL, 'The level should be in range ' \ + f'[0,{_MAX_LEVEL}], got {level}.' + if isinstance(img_fill_val, (float, int)): + img_fill_val = tuple([float(img_fill_val)] * 3) + elif isinstance(img_fill_val, tuple): + assert len(img_fill_val) == 3, 'img_fill_val as tuple must ' \ + f'have 3 elements. got {len(img_fill_val)}.' + img_fill_val = tuple([float(val) for val in img_fill_val]) + else: + raise ValueError( + 'img_fill_val must be float or tuple with 3 elements.') + assert np.all([0 <= val <= 255 for val in img_fill_val]), 'all ' \ + 'elements of img_fill_val should between range [0,255].' \ + f'got {img_fill_val}.' + assert 0 <= prob <= 1.0, 'The probability of shear should be in ' \ + f'range [0,1]. got {prob}.' + assert direction in ('horizontal', 'vertical'), 'direction must ' \ + f'in be either "horizontal" or "vertical". got {direction}.' + assert isinstance(max_shear_magnitude, float), 'max_shear_magnitude ' \ + f'should be type float. got {type(max_shear_magnitude)}.' + assert 0. <= max_shear_magnitude <= 1., 'Defaultly ' \ + 'max_shear_magnitude should be in range [0,1]. ' \ + f'got {max_shear_magnitude}.' + self.level = level + self.magnitude = level_to_value(level, max_shear_magnitude) + self.img_fill_val = img_fill_val + self.seg_ignore_label = seg_ignore_label + self.prob = prob + self.direction = direction + self.max_shear_magnitude = max_shear_magnitude + self.random_negative_prob = random_negative_prob + self.interpolation = interpolation + + def _shear_img(self, + results, + magnitude, + direction='horizontal', + interpolation='bilinear'): + """Shear the image. + + Args: + results (dict): Result dict from loading pipeline. + magnitude (int | float): The magnitude used for shear. + direction (str): The direction for shear, either "horizontal" + or "vertical". + interpolation (str): Same as in :func:`mmcv.imshear`. + """ + for key in results.get('img_fields', ['img']): + img = results[key] + img_sheared = mmcv.imshear( + img, + magnitude, + direction, + border_value=self.img_fill_val, + interpolation=interpolation) + results[key] = img_sheared.astype(img.dtype) + results['img_shape'] = results[key].shape + + def _shear_bboxes(self, results, magnitude): + """Shear the bboxes.""" + h, w, c = results['img_shape'] + if self.direction == 'horizontal': + shear_matrix = np.stack([[1, magnitude], + [0, 1]]).astype(np.float32) # [2, 2] + else: + shear_matrix = np.stack([[1, 0], [magnitude, + 1]]).astype(np.float32) + for key in results.get('bbox_fields', []): + min_x, min_y, max_x, max_y = np.split( + results[key], results[key].shape[-1], axis=-1) + coordinates = np.stack([[min_x, min_y], [max_x, min_y], + [min_x, max_y], + [max_x, max_y]]) # [4, 2, nb_box, 1] + coordinates = coordinates[..., 0].transpose( + (2, 1, 0)).astype(np.float32) # [nb_box, 2, 4] + new_coords = np.matmul(shear_matrix[None, :, :], + coordinates) # [nb_box, 2, 4] + min_x = np.min(new_coords[:, 0, :], axis=-1) + min_y = np.min(new_coords[:, 1, :], axis=-1) + max_x = np.max(new_coords[:, 0, :], axis=-1) + max_y = np.max(new_coords[:, 1, :], axis=-1) + min_x = np.clip(min_x, a_min=0, a_max=w) + min_y = np.clip(min_y, a_min=0, a_max=h) + max_x = np.clip(max_x, a_min=min_x, a_max=w) + max_y = np.clip(max_y, a_min=min_y, a_max=h) + results[key] = np.stack([min_x, min_y, max_x, max_y], + axis=-1).astype(results[key].dtype) + + def _shear_masks(self, + results, + magnitude, + direction='horizontal', + fill_val=0, + interpolation='bilinear'): + """Shear the masks.""" + h, w, c = results['img_shape'] + for key in results.get('mask_fields', []): + masks = results[key] + results[key] = masks.shear((h, w), + magnitude, + direction, + border_value=fill_val, + interpolation=interpolation) + + def _shear_seg(self, + results, + magnitude, + direction='horizontal', + fill_val=255, + interpolation='bilinear'): + """Shear the segmentation maps.""" + for key in results.get('seg_fields', []): + seg = results[key] + results[key] = mmcv.imshear( + seg, + magnitude, + direction, + border_value=fill_val, + interpolation=interpolation).astype(seg.dtype) + + def _filter_invalid(self, results, min_bbox_size=0): + """Filter bboxes and corresponding masks too small after shear + augmentation.""" + bbox2label, bbox2mask, _ = bbox2fields() + for key in results.get('bbox_fields', []): + bbox_w = results[key][:, 2] - results[key][:, 0] + bbox_h = results[key][:, 3] - results[key][:, 1] + valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size) + valid_inds = np.nonzero(valid_inds)[0] + results[key] = results[key][valid_inds] + # label fields. e.g. gt_labels and gt_labels_ignore + label_key = bbox2label.get(key) + if label_key in results: + results[label_key] = results[label_key][valid_inds] + # mask fields, e.g. gt_masks and gt_masks_ignore + mask_key = bbox2mask.get(key) + if mask_key in results: + results[mask_key] = results[mask_key][valid_inds] + + def __call__(self, results): + """Call function to shear images, bounding boxes, masks and semantic + segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Sheared results. + """ + if np.random.rand() > self.prob: + return results + magnitude = random_negative(self.magnitude, self.random_negative_prob) + self._shear_img(results, magnitude, self.direction, self.interpolation) + self._shear_bboxes(results, magnitude) + # fill_val set to 0 for background of mask. + self._shear_masks( + results, + magnitude, + self.direction, + fill_val=0, + interpolation=self.interpolation) + self._shear_seg( + results, + magnitude, + self.direction, + fill_val=self.seg_ignore_label, + interpolation=self.interpolation) + self._filter_invalid(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(level={self.level}, ' + repr_str += f'img_fill_val={self.img_fill_val}, ' + repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'direction={self.direction}, ' + repr_str += f'max_shear_magnitude={self.max_shear_magnitude}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}, ' + repr_str += f'interpolation={self.interpolation})' + return repr_str + + +@PIPELINES.register_module() +class Rotate: + """Apply Rotate Transformation to image (and its corresponding bbox, mask, + segmentation). + + Args: + level (int | float): The level should be in range (0,_MAX_LEVEL]. + scale (int | float): Isotropic scale factor. Same in + ``mmcv.imrotate``. + center (int | float | tuple[float]): Center point (w, h) of the + rotation in the source image. If None, the center of the + image will be used. Same in ``mmcv.imrotate``. + img_fill_val (int | float | tuple): The fill value for image border. + If float, the same value will be used for all the three + channels of image. If tuple, the should be 3 elements (e.g. + equals the number of channels for image). + seg_ignore_label (int): The fill value used for segmentation map. + Note this value must equals ``ignore_label`` in ``semantic_head`` + of the corresponding config. Default 255. + prob (float): The probability for perform transformation and + should be in range 0 to 1. + max_rotate_angle (int | float): The maximum angles for rotate + transformation. + random_negative_prob (float): The probability that turns the + offset negative. + """ + + def __init__(self, + level, + scale=1, + center=None, + img_fill_val=128, + seg_ignore_label=255, + prob=0.5, + max_rotate_angle=30, + random_negative_prob=0.5): + assert isinstance(level, (int, float)), \ + f'The level must be type int or float. got {type(level)}.' + assert 0 <= level <= _MAX_LEVEL, \ + f'The level should be in range (0,{_MAX_LEVEL}]. got {level}.' + assert isinstance(scale, (int, float)), \ + f'The scale must be type int or float. got type {type(scale)}.' + if isinstance(center, (int, float)): + center = (center, center) + elif isinstance(center, tuple): + assert len(center) == 2, 'center with type tuple must have '\ + f'2 elements. got {len(center)} elements.' + else: + assert center is None, 'center must be None or type int, '\ + f'float or tuple, got type {type(center)}.' + if isinstance(img_fill_val, (float, int)): + img_fill_val = tuple([float(img_fill_val)] * 3) + elif isinstance(img_fill_val, tuple): + assert len(img_fill_val) == 3, 'img_fill_val as tuple must '\ + f'have 3 elements. got {len(img_fill_val)}.' + img_fill_val = tuple([float(val) for val in img_fill_val]) + else: + raise ValueError( + 'img_fill_val must be float or tuple with 3 elements.') + assert np.all([0 <= val <= 255 for val in img_fill_val]), \ + 'all elements of img_fill_val should between range [0,255]. '\ + f'got {img_fill_val}.' + assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\ + f'got {prob}.' + assert isinstance(max_rotate_angle, (int, float)), 'max_rotate_angle '\ + f'should be type int or float. got type {type(max_rotate_angle)}.' + self.level = level + self.scale = scale + # Rotation angle in degrees. Positive values mean + # clockwise rotation. + self.angle = level_to_value(level, max_rotate_angle) + self.center = center + self.img_fill_val = img_fill_val + self.seg_ignore_label = seg_ignore_label + self.prob = prob + self.max_rotate_angle = max_rotate_angle + self.random_negative_prob = random_negative_prob + + def _rotate_img(self, results, angle, center=None, scale=1.0): + """Rotate the image. + + Args: + results (dict): Result dict from loading pipeline. + angle (float): Rotation angle in degrees, positive values + mean clockwise rotation. Same in ``mmcv.imrotate``. + center (tuple[float], optional): Center point (w, h) of the + rotation. Same in ``mmcv.imrotate``. + scale (int | float): Isotropic scale factor. Same in + ``mmcv.imrotate``. + """ + for key in results.get('img_fields', ['img']): + img = results[key].copy() + img_rotated = mmcv.imrotate( + img, angle, center, scale, border_value=self.img_fill_val) + results[key] = img_rotated.astype(img.dtype) + results['img_shape'] = results[key].shape + + def _rotate_bboxes(self, results, rotate_matrix): + """Rotate the bboxes.""" + h, w, c = results['img_shape'] + for key in results.get('bbox_fields', []): + min_x, min_y, max_x, max_y = np.split( + results[key], results[key].shape[-1], axis=-1) + coordinates = np.stack([[min_x, min_y], [max_x, min_y], + [min_x, max_y], + [max_x, max_y]]) # [4, 2, nb_bbox, 1] + # pad 1 to convert from format [x, y] to homogeneous + # coordinates format [x, y, 1] + coordinates = np.concatenate( + (coordinates, + np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype)), + axis=1) # [4, 3, nb_bbox, 1] + coordinates = coordinates.transpose( + (2, 0, 1, 3)) # [nb_bbox, 4, 3, 1] + rotated_coords = np.matmul(rotate_matrix, + coordinates) # [nb_bbox, 4, 2, 1] + rotated_coords = rotated_coords[..., 0] # [nb_bbox, 4, 2] + min_x, min_y = np.min( + rotated_coords[:, :, 0], axis=1), np.min( + rotated_coords[:, :, 1], axis=1) + max_x, max_y = np.max( + rotated_coords[:, :, 0], axis=1), np.max( + rotated_coords[:, :, 1], axis=1) + min_x, min_y = np.clip( + min_x, a_min=0, a_max=w), np.clip( + min_y, a_min=0, a_max=h) + max_x, max_y = np.clip( + max_x, a_min=min_x, a_max=w), np.clip( + max_y, a_min=min_y, a_max=h) + results[key] = np.stack([min_x, min_y, max_x, max_y], + axis=-1).astype(results[key].dtype) + + def _rotate_masks(self, + results, + angle, + center=None, + scale=1.0, + fill_val=0): + """Rotate the masks.""" + h, w, c = results['img_shape'] + for key in results.get('mask_fields', []): + masks = results[key] + results[key] = masks.rotate((h, w), angle, center, scale, fill_val) + + def _rotate_seg(self, + results, + angle, + center=None, + scale=1.0, + fill_val=255): + """Rotate the segmentation map.""" + for key in results.get('seg_fields', []): + seg = results[key].copy() + results[key] = mmcv.imrotate( + seg, angle, center, scale, + border_value=fill_val).astype(seg.dtype) + + def _filter_invalid(self, results, min_bbox_size=0): + """Filter bboxes and corresponding masks too small after rotate + augmentation.""" + bbox2label, bbox2mask, _ = bbox2fields() + for key in results.get('bbox_fields', []): + bbox_w = results[key][:, 2] - results[key][:, 0] + bbox_h = results[key][:, 3] - results[key][:, 1] + valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size) + valid_inds = np.nonzero(valid_inds)[0] + results[key] = results[key][valid_inds] + # label fields. e.g. gt_labels and gt_labels_ignore + label_key = bbox2label.get(key) + if label_key in results: + results[label_key] = results[label_key][valid_inds] + # mask fields, e.g. gt_masks and gt_masks_ignore + mask_key = bbox2mask.get(key) + if mask_key in results: + results[mask_key] = results[mask_key][valid_inds] + + def __call__(self, results): + """Call function to rotate images, bounding boxes, masks and semantic + segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Rotated results. + """ + if np.random.rand() > self.prob: + return results + h, w = results['img'].shape[:2] + center = self.center + if center is None: + center = ((w - 1) * 0.5, (h - 1) * 0.5) + angle = random_negative(self.angle, self.random_negative_prob) + self._rotate_img(results, angle, center, self.scale) + rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale) + self._rotate_bboxes(results, rotate_matrix) + self._rotate_masks(results, angle, center, self.scale, fill_val=0) + self._rotate_seg( + results, angle, center, self.scale, fill_val=self.seg_ignore_label) + self._filter_invalid(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(level={self.level}, ' + repr_str += f'scale={self.scale}, ' + repr_str += f'center={self.center}, ' + repr_str += f'img_fill_val={self.img_fill_val}, ' + repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'max_rotate_angle={self.max_rotate_angle}, ' + repr_str += f'random_negative_prob={self.random_negative_prob})' + return repr_str + + +@PIPELINES.register_module() +class Translate: + """Translate the images, bboxes, masks and segmentation maps horizontally + or vertically. + + Args: + level (int | float): The level for Translate and should be in + range [0,_MAX_LEVEL]. + prob (float): The probability for performing translation and + should be in range [0, 1]. + img_fill_val (int | float | tuple): The filled value for image + border. If float, the same fill value will be used for all + the three channels of image. If tuple, the should be 3 + elements (e.g. equals the number of channels for image). + seg_ignore_label (int): The fill value used for segmentation map. + Note this value must equals ``ignore_label`` in ``semantic_head`` + of the corresponding config. Default 255. + direction (str): The translate direction, either "horizontal" + or "vertical". + max_translate_offset (int | float): The maximum pixel's offset for + Translate. + random_negative_prob (float): The probability that turns the + offset negative. + min_size (int | float): The minimum pixel for filtering + invalid bboxes after the translation. + """ + + def __init__(self, + level, + prob=0.5, + img_fill_val=128, + seg_ignore_label=255, + direction='horizontal', + max_translate_offset=250., + random_negative_prob=0.5, + min_size=0): + assert isinstance(level, (int, float)), \ + 'The level must be type int or float.' + assert 0 <= level <= _MAX_LEVEL, \ + 'The level used for calculating Translate\'s offset should be ' \ + 'in range [0,_MAX_LEVEL]' + assert 0 <= prob <= 1.0, \ + 'The probability of translation should be in range [0, 1].' + if isinstance(img_fill_val, (float, int)): + img_fill_val = tuple([float(img_fill_val)] * 3) + elif isinstance(img_fill_val, tuple): + assert len(img_fill_val) == 3, \ + 'img_fill_val as tuple must have 3 elements.' + img_fill_val = tuple([float(val) for val in img_fill_val]) + else: + raise ValueError('img_fill_val must be type float or tuple.') + assert np.all([0 <= val <= 255 for val in img_fill_val]), \ + 'all elements of img_fill_val should between range [0,255].' + assert direction in ('horizontal', 'vertical'), \ + 'direction should be "horizontal" or "vertical".' + assert isinstance(max_translate_offset, (int, float)), \ + 'The max_translate_offset must be type int or float.' + # the offset used for translation + self.offset = int(level_to_value(level, max_translate_offset)) + self.level = level + self.prob = prob + self.img_fill_val = img_fill_val + self.seg_ignore_label = seg_ignore_label + self.direction = direction + self.max_translate_offset = max_translate_offset + self.random_negative_prob = random_negative_prob + self.min_size = min_size + + def _translate_img(self, results, offset, direction='horizontal'): + """Translate the image. + + Args: + results (dict): Result dict from loading pipeline. + offset (int | float): The offset for translate. + direction (str): The translate direction, either "horizontal" + or "vertical". + """ + for key in results.get('img_fields', ['img']): + img = results[key].copy() + results[key] = mmcv.imtranslate( + img, offset, direction, self.img_fill_val).astype(img.dtype) + results['img_shape'] = results[key].shape + + def _translate_bboxes(self, results, offset): + """Shift bboxes horizontally or vertically, according to offset.""" + h, w, c = results['img_shape'] + for key in results.get('bbox_fields', []): + min_x, min_y, max_x, max_y = np.split( + results[key], results[key].shape[-1], axis=-1) + if self.direction == 'horizontal': + min_x = np.maximum(0, min_x + offset) + max_x = np.minimum(w, max_x + offset) + elif self.direction == 'vertical': + min_y = np.maximum(0, min_y + offset) + max_y = np.minimum(h, max_y + offset) + + # the boxes translated outside of image will be filtered along with + # the corresponding masks, by invoking ``_filter_invalid``. + results[key] = np.concatenate([min_x, min_y, max_x, max_y], + axis=-1) + + def _translate_masks(self, + results, + offset, + direction='horizontal', + fill_val=0): + """Translate masks horizontally or vertically.""" + h, w, c = results['img_shape'] + for key in results.get('mask_fields', []): + masks = results[key] + results[key] = masks.translate((h, w), offset, direction, fill_val) + + def _translate_seg(self, + results, + offset, + direction='horizontal', + fill_val=255): + """Translate segmentation maps horizontally or vertically.""" + for key in results.get('seg_fields', []): + seg = results[key].copy() + results[key] = mmcv.imtranslate(seg, offset, direction, + fill_val).astype(seg.dtype) + + def _filter_invalid(self, results, min_size=0): + """Filter bboxes and masks too small or translated out of image.""" + bbox2label, bbox2mask, _ = bbox2fields() + for key in results.get('bbox_fields', []): + bbox_w = results[key][:, 2] - results[key][:, 0] + bbox_h = results[key][:, 3] - results[key][:, 1] + valid_inds = (bbox_w > min_size) & (bbox_h > min_size) + valid_inds = np.nonzero(valid_inds)[0] + results[key] = results[key][valid_inds] + # label fields. e.g. gt_labels and gt_labels_ignore + label_key = bbox2label.get(key) + if label_key in results: + results[label_key] = results[label_key][valid_inds] + # mask fields, e.g. gt_masks and gt_masks_ignore + mask_key = bbox2mask.get(key) + if mask_key in results: + results[mask_key] = results[mask_key][valid_inds] + return results + + def __call__(self, results): + """Call function to translate images, bounding boxes, masks and + semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Translated results. + """ + if np.random.rand() > self.prob: + return results + offset = random_negative(self.offset, self.random_negative_prob) + self._translate_img(results, offset, self.direction) + self._translate_bboxes(results, offset) + # fill_val defaultly 0 for BitmapMasks and None for PolygonMasks. + self._translate_masks(results, offset, self.direction) + # fill_val set to ``seg_ignore_label`` for the ignored value + # of segmentation map. + self._translate_seg( + results, offset, self.direction, fill_val=self.seg_ignore_label) + self._filter_invalid(results, min_size=self.min_size) + return results + + +@PIPELINES.register_module() +class ColorTransform: + """Apply Color transformation to image. The bboxes, masks, and + segmentations are not modified. + + Args: + level (int | float): Should be in range [0,_MAX_LEVEL]. + prob (float): The probability for performing Color transformation. + """ + + def __init__(self, level, prob=0.5): + assert isinstance(level, (int, float)), \ + 'The level must be type int or float.' + assert 0 <= level <= _MAX_LEVEL, \ + 'The level should be in range [0,_MAX_LEVEL].' + assert 0 <= prob <= 1.0, \ + 'The probability should be in range [0,1].' + self.level = level + self.prob = prob + self.factor = enhance_level_to_value(level) + + def _adjust_color_img(self, results, factor=1.0): + """Apply Color transformation to image.""" + for key in results.get('img_fields', ['img']): + # NOTE defaultly the image should be BGR format + img = results[key] + results[key] = mmcv.adjust_color(img, factor).astype(img.dtype) + + def __call__(self, results): + """Call function for Color transformation. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Colored results. + """ + if np.random.rand() > self.prob: + return results + self._adjust_color_img(results, self.factor) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(level={self.level}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class EqualizeTransform: + """Apply Equalize transformation to image. The bboxes, masks and + segmentations are not modified. + + Args: + prob (float): The probability for performing Equalize transformation. + """ + + def __init__(self, prob=0.5): + assert 0 <= prob <= 1.0, \ + 'The probability should be in range [0,1].' + self.prob = prob + + def _imequalize(self, results): + """Equalizes the histogram of one image.""" + for key in results.get('img_fields', ['img']): + img = results[key] + results[key] = mmcv.imequalize(img).astype(img.dtype) + + def __call__(self, results): + """Call function for Equalize transformation. + + Args: + results (dict): Results dict from loading pipeline. + + Returns: + dict: Results after the transformation. + """ + if np.random.rand() > self.prob: + return results + self._imequalize(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + + +@PIPELINES.register_module() +class BrightnessTransform: + """Apply Brightness transformation to image. The bboxes, masks and + segmentations are not modified. + + Args: + level (int | float): Should be in range [0,_MAX_LEVEL]. + prob (float): The probability for performing Brightness transformation. + """ + + def __init__(self, level, prob=0.5): + assert isinstance(level, (int, float)), \ + 'The level must be type int or float.' + assert 0 <= level <= _MAX_LEVEL, \ + 'The level should be in range [0,_MAX_LEVEL].' + assert 0 <= prob <= 1.0, \ + 'The probability should be in range [0,1].' + self.level = level + self.prob = prob + self.factor = enhance_level_to_value(level) + + def _adjust_brightness_img(self, results, factor=1.0): + """Adjust the brightness of image.""" + for key in results.get('img_fields', ['img']): + img = results[key] + results[key] = mmcv.adjust_brightness(img, + factor).astype(img.dtype) + + def __call__(self, results): + """Call function for Brightness transformation. + + Args: + results (dict): Results dict from loading pipeline. + + Returns: + dict: Results after the transformation. + """ + if np.random.rand() > self.prob: + return results + self._adjust_brightness_img(results, self.factor) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(level={self.level}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class ContrastTransform: + """Apply Contrast transformation to image. The bboxes, masks and + segmentations are not modified. + + Args: + level (int | float): Should be in range [0,_MAX_LEVEL]. + prob (float): The probability for performing Contrast transformation. + """ + + def __init__(self, level, prob=0.5): + assert isinstance(level, (int, float)), \ + 'The level must be type int or float.' + assert 0 <= level <= _MAX_LEVEL, \ + 'The level should be in range [0,_MAX_LEVEL].' + assert 0 <= prob <= 1.0, \ + 'The probability should be in range [0,1].' + self.level = level + self.prob = prob + self.factor = enhance_level_to_value(level) + + def _adjust_contrast_img(self, results, factor=1.0): + """Adjust the image contrast.""" + for key in results.get('img_fields', ['img']): + img = results[key] + results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype) + + def __call__(self, results): + """Call function for Contrast transformation. + + Args: + results (dict): Results dict from loading pipeline. + + Returns: + dict: Results after the transformation. + """ + if np.random.rand() > self.prob: + return results + self._adjust_contrast_img(results, self.factor) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(level={self.level}, ' + repr_str += f'prob={self.prob})' + return repr_str diff --git a/downstream/mmdetection/mmdet/datasets/pipelines/compose.py b/downstream/mmdetection/mmdet/datasets/pipelines/compose.py new file mode 100644 index 0000000..d759220 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/pipelines/compose.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import collections + +from mmcv.utils import build_from_cfg + +from ..builder import PIPELINES + + +@PIPELINES.register_module() +class Compose: + """Compose multiple transforms sequentially. + + Args: + transforms (Sequence[dict | callable]): Sequence of transform object or + config dict to be composed. + """ + + def __init__(self, transforms): + assert isinstance(transforms, collections.abc.Sequence) + self.transforms = [] + for transform in transforms: + if isinstance(transform, dict): + transform = build_from_cfg(transform, PIPELINES) + self.transforms.append(transform) + elif callable(transform): + self.transforms.append(transform) + else: + raise TypeError('transform must be callable or a dict') + + def __call__(self, data): + """Call function to apply transforms sequentially. + + Args: + data (dict): A result dict contains the data to transform. + + Returns: + dict: Transformed data. + """ + + for t in self.transforms: + data = t(data) + if data is None: + return None + return data + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + str_ = t.__repr__() + if 'Compose(' in str_: + str_ = str_.replace('\n', '\n ') + format_string += '\n' + format_string += f' {str_}' + format_string += '\n)' + return format_string diff --git a/downstream/mmdetection/mmdet/datasets/pipelines/formating.py b/downstream/mmdetection/mmdet/datasets/pipelines/formating.py new file mode 100644 index 0000000..3b3e45a --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/pipelines/formating.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# flake8: noqa +import warnings + +from .formatting import * + +warnings.warn('DeprecationWarning: mmdet.datasets.pipelines.formating will be ' + 'deprecated, please replace it with ' + 'mmdet.datasets.pipelines.formatting.') diff --git a/downstream/mmdetection/mmdet/datasets/pipelines/formatting.py b/downstream/mmdetection/mmdet/datasets/pipelines/formatting.py new file mode 100644 index 0000000..45ca69c --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/pipelines/formatting.py @@ -0,0 +1,392 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections.abc import Sequence + +import mmcv +import numpy as np +import torch +from mmcv.parallel import DataContainer as DC + +from ..builder import PIPELINES + + +def to_tensor(data): + """Convert objects of various python types to :obj:`torch.Tensor`. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`, :class:`int` and :class:`float`. + + Args: + data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to + be converted. + """ + + if isinstance(data, torch.Tensor): + return data + elif isinstance(data, np.ndarray): + return torch.from_numpy(data) + elif isinstance(data, Sequence) and not mmcv.is_str(data): + return torch.tensor(data) + elif isinstance(data, int): + return torch.LongTensor([data]) + elif isinstance(data, float): + return torch.FloatTensor([data]) + else: + raise TypeError(f'type {type(data)} cannot be converted to tensor.') + + +@PIPELINES.register_module() +class ToTensor: + """Convert some results to :obj:`torch.Tensor` by given keys. + + Args: + keys (Sequence[str]): Keys that need to be converted to Tensor. + """ + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + """Call function to convert data in results to :obj:`torch.Tensor`. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data converted + to :obj:`torch.Tensor`. + """ + for key in self.keys: + results[key] = to_tensor(results[key]) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(keys={self.keys})' + + +@PIPELINES.register_module() +class ImageToTensor: + """Convert image to :obj:`torch.Tensor` by given keys. + + The dimension order of input image is (H, W, C). The pipeline will convert + it to (C, H, W). If only 2 dimension (H, W) is given, the output would be + (1, H, W). + + Args: + keys (Sequence[str]): Key of images to be converted to Tensor. + """ + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + """Call function to convert image in results to :obj:`torch.Tensor` and + transpose the channel order. + + Args: + results (dict): Result dict contains the image data to convert. + + Returns: + dict: The result dict contains the image converted + to :obj:`torch.Tensor` and transposed to (C, H, W) order. + """ + for key in self.keys: + img = results[key] + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + results[key] = (to_tensor(img.transpose(2, 0, 1))).contiguous() + return results + + def __repr__(self): + return self.__class__.__name__ + f'(keys={self.keys})' + + +@PIPELINES.register_module() +class Transpose: + """Transpose some results by given keys. + + Args: + keys (Sequence[str]): Keys of results to be transposed. + order (Sequence[int]): Order of transpose. + """ + + def __init__(self, keys, order): + self.keys = keys + self.order = order + + def __call__(self, results): + """Call function to transpose the channel order of data in results. + + Args: + results (dict): Result dict contains the data to transpose. + + Returns: + dict: The result dict contains the data transposed to \ + ``self.order``. + """ + for key in self.keys: + results[key] = results[key].transpose(self.order) + return results + + def __repr__(self): + return self.__class__.__name__ + \ + f'(keys={self.keys}, order={self.order})' + + +@PIPELINES.register_module() +class ToDataContainer: + """Convert results to :obj:`mmcv.DataContainer` by given fields. + + Args: + fields (Sequence[dict]): Each field is a dict like + ``dict(key='xxx', **kwargs)``. The ``key`` in result will + be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. + Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'), + dict(key='gt_labels'))``. + """ + + def __init__(self, + fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), + dict(key='gt_labels'))): + self.fields = fields + + def __call__(self, results): + """Call function to convert data in results to + :obj:`mmcv.DataContainer`. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data converted to \ + :obj:`mmcv.DataContainer`. + """ + + for field in self.fields: + field = field.copy() + key = field.pop('key') + results[key] = DC(results[key], **field) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(fields={self.fields})' + + +@PIPELINES.register_module() +class DefaultFormatBundle: + """Default formatting bundle. + + It simplifies the pipeline of formatting common fields, including "img", + "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg". + These fields are formatted as follows. + + - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) + - proposals: (1)to tensor, (2)to DataContainer + - gt_bboxes: (1)to tensor, (2)to DataContainer + - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer + - gt_labels: (1)to tensor, (2)to DataContainer + - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True) + - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \ + (3)to DataContainer (stack=True) + + Args: + img_to_float (bool): Whether to force the image to be converted to + float type. Default: True. + pad_val (dict): A dict for padding value in batch collating, + the default value is `dict(img=0, masks=0, seg=255)`. + Without this argument, the padding value of "gt_semantic_seg" + will be set to 0 by default, which should be 255. + """ + + def __init__(self, + img_to_float=True, + pad_val=dict(img=0, masks=0, seg=255)): + self.img_to_float = img_to_float + self.pad_val = pad_val + + def __call__(self, results): + """Call function to transform and format common fields in results. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data that is formatted with \ + default bundle. + """ + + if 'img' in results: + img = results['img'] + if self.img_to_float is True and img.dtype == np.uint8: + # Normally, image is of uint8 type without normalization. + # At this time, it needs to be forced to be converted to + # flot32, otherwise the model training and inference + # will be wrong. Only used for YOLOX currently . + img = img.astype(np.float32) + # add default meta keys + results = self._add_default_meta_keys(results) + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + img = np.ascontiguousarray(img.transpose(2, 0, 1)) + results['img'] = DC( + to_tensor(img), padding_value=self.pad_val['img'], stack=True) + for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']: + if key not in results: + continue + results[key] = DC(to_tensor(results[key])) + if 'gt_masks' in results: + results['gt_masks'] = DC( + results['gt_masks'], + padding_value=self.pad_val['masks'], + cpu_only=True) + if 'gt_semantic_seg' in results: + results['gt_semantic_seg'] = DC( + to_tensor(results['gt_semantic_seg'][None, ...]), + padding_value=self.pad_val['seg'], + stack=True) + return results + + def _add_default_meta_keys(self, results): + """Add default meta keys. + + We set default meta keys including `pad_shape`, `scale_factor` and + `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and + `Pad` are implemented during the whole pipeline. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + results (dict): Updated result dict contains the data to convert. + """ + img = results['img'] + results.setdefault('pad_shape', img.shape) + results.setdefault('scale_factor', 1.0) + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results.setdefault( + 'img_norm_cfg', + dict( + mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False)) + return results + + def __repr__(self): + return self.__class__.__name__ + \ + f'(img_to_float={self.img_to_float})' + + +@PIPELINES.register_module() +class Collect: + """Collect data from the loader relevant to the specific task. + + This is usually the last stage of the data loader pipeline. Typically keys + is set to some subset of "img", "proposals", "gt_bboxes", + "gt_bboxes_ignore", "gt_labels", and/or "gt_masks". + + The "img_meta" item is always populated. The contents of the "img_meta" + dictionary depends on "meta_keys". By default this includes: + + - "img_shape": shape of the image input to the network as a tuple \ + (h, w, c). Note that images may be zero padded on the \ + bottom/right if the batch tensor is larger than this shape. + + - "scale_factor": a float indicating the preprocessing scale + + - "flip": a boolean indicating if image flip transform was used + + - "filename": path to the image file + + - "ori_shape": original shape of the image as a tuple (h, w, c) + + - "pad_shape": image shape after padding + + - "img_norm_cfg": a dict of normalization information: + + - mean - per channel mean subtraction + - std - per channel std divisor + - to_rgb - bool indicating if bgr was converted to rgb + + Args: + keys (Sequence[str]): Keys of results to be collected in ``data``. + meta_keys (Sequence[str], optional): Meta keys to be converted to + ``mmcv.DataContainer`` and collected in ``data[img_metas]``. + Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape', + 'pad_shape', 'scale_factor', 'flip', 'flip_direction', + 'img_norm_cfg')`` + """ + + def __init__(self, + keys, + meta_keys=('filename', 'ori_filename', 'ori_shape', + 'img_shape', 'pad_shape', 'scale_factor', 'flip', + 'flip_direction', 'img_norm_cfg')): + self.keys = keys + self.meta_keys = meta_keys + + def __call__(self, results): + """Call function to collect keys in results. The keys in ``meta_keys`` + will be converted to :obj:mmcv.DataContainer. + + Args: + results (dict): Result dict contains the data to collect. + + Returns: + dict: The result dict contains the following keys + + - keys in``self.keys`` + - ``img_metas`` + """ + + data = {} + img_meta = {} + for key in self.meta_keys: + img_meta[key] = results[key] + data['img_metas'] = DC(img_meta, cpu_only=True) + for key in self.keys: + data[key] = results[key] + return data + + def __repr__(self): + return self.__class__.__name__ + \ + f'(keys={self.keys}, meta_keys={self.meta_keys})' + + +@PIPELINES.register_module() +class WrapFieldsToLists: + """Wrap fields of the data dictionary into lists for evaluation. + + This class can be used as a last step of a test or validation + pipeline for single image evaluation or inference. + + Example: + >>> test_pipeline = [ + >>> dict(type='LoadImageFromFile'), + >>> dict(type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + >>> dict(type='Pad', size_divisor=32), + >>> dict(type='ImageToTensor', keys=['img']), + >>> dict(type='Collect', keys=['img']), + >>> dict(type='WrapFieldsToLists') + >>> ] + """ + + def __call__(self, results): + """Call function to wrap fields into lists. + + Args: + results (dict): Result dict contains the data to wrap. + + Returns: + dict: The result dict where value of ``self.keys`` are wrapped \ + into list. + """ + + # Wrap dict fields into lists + for key, val in results.items(): + results[key] = [val] + return results + + def __repr__(self): + return f'{self.__class__.__name__}()' diff --git a/downstream/mmdetection/mmdet/datasets/pipelines/instaboost.py b/downstream/mmdetection/mmdet/datasets/pipelines/instaboost.py new file mode 100644 index 0000000..ca10c4c --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/pipelines/instaboost.py @@ -0,0 +1,118 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np + +from ..builder import PIPELINES + + +@PIPELINES.register_module() +class InstaBoost: + r"""Data augmentation method in `InstaBoost: Boosting Instance + Segmentation Via Probability Map Guided Copy-Pasting + `_. + + Refer to https://github.com/GothicAi/Instaboost for implementation details. + + Args: + action_candidate (tuple): Action candidates. "normal", "horizontal", \ + "vertical", "skip" are supported. Default: ('normal', \ + 'horizontal', 'skip'). + action_prob (tuple): Corresponding action probabilities. Should be \ + the same length as action_candidate. Default: (1, 0, 0). + scale (tuple): (min scale, max scale). Default: (0.8, 1.2). + dx (int): The maximum x-axis shift will be (instance width) / dx. + Default 15. + dy (int): The maximum y-axis shift will be (instance height) / dy. + Default 15. + theta (tuple): (min rotation degree, max rotation degree). \ + Default: (-1, 1). + color_prob (float): Probability of images for color augmentation. + Default 0.5. + heatmap_flag (bool): Whether to use heatmap guided. Default False. + aug_ratio (float): Probability of applying this transformation. \ + Default 0.5. + """ + + def __init__(self, + action_candidate=('normal', 'horizontal', 'skip'), + action_prob=(1, 0, 0), + scale=(0.8, 1.2), + dx=15, + dy=15, + theta=(-1, 1), + color_prob=0.5, + hflag=False, + aug_ratio=0.5): + try: + import instaboostfast as instaboost + except ImportError: + raise ImportError( + 'Please run "pip install instaboostfast" ' + 'to install instaboostfast first for instaboost augmentation.') + self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob, + scale, dx, dy, theta, + color_prob, hflag) + self.aug_ratio = aug_ratio + + def _load_anns(self, results): + labels = results['ann_info']['labels'] + masks = results['ann_info']['masks'] + bboxes = results['ann_info']['bboxes'] + n = len(labels) + + anns = [] + for i in range(n): + label = labels[i] + bbox = bboxes[i] + mask = masks[i] + x1, y1, x2, y2 = bbox + # assert (x2 - x1) >= 1 and (y2 - y1) >= 1 + bbox = [x1, y1, x2 - x1, y2 - y1] + anns.append({ + 'category_id': label, + 'segmentation': mask, + 'bbox': bbox + }) + + return anns + + def _parse_anns(self, results, anns, img): + gt_bboxes = [] + gt_labels = [] + gt_masks_ann = [] + for ann in anns: + x1, y1, w, h = ann['bbox'] + # TODO: more essential bug need to be fixed in instaboost + if w <= 0 or h <= 0: + continue + bbox = [x1, y1, x1 + w, y1 + h] + gt_bboxes.append(bbox) + gt_labels.append(ann['category_id']) + gt_masks_ann.append(ann['segmentation']) + gt_bboxes = np.array(gt_bboxes, dtype=np.float32) + gt_labels = np.array(gt_labels, dtype=np.int64) + results['ann_info']['labels'] = gt_labels + results['ann_info']['bboxes'] = gt_bboxes + results['ann_info']['masks'] = gt_masks_ann + results['img'] = img + return results + + def __call__(self, results): + img = results['img'] + ori_type = img.dtype + anns = self._load_anns(results) + if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]): + try: + import instaboostfast as instaboost + except ImportError: + raise ImportError('Please run "pip install instaboostfast" ' + 'to install instaboostfast first.') + anns, img = instaboost.get_new_data( + anns, img.astype(np.uint8), self.cfg, background=None) + + results = self._parse_anns(results, anns, img.astype(ori_type)) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})' + return repr_str diff --git a/downstream/mmdetection/mmdet/datasets/pipelines/loading.py b/downstream/mmdetection/mmdet/datasets/pipelines/loading.py new file mode 100644 index 0000000..79bbf80 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/pipelines/loading.py @@ -0,0 +1,643 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import mmcv +import numpy as np +import pycocotools.mask as maskUtils + +from mmdet.core import BitmapMasks, PolygonMasks +from ..builder import PIPELINES + +try: + from panopticapi.utils import rgb2id +except ImportError: + rgb2id = None + + +@PIPELINES.register_module() +class LoadImageFromFile: + """Load an image from file. + + Required keys are "img_prefix" and "img_info" (a dict that must contain the + key "filename"). Added or updated keys are "filename", "img", "img_shape", + "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), + "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). + + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + color_type (str): The flag argument for :func:`mmcv.imfrombytes`. + Defaults to 'color'. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__(self, + to_float32=False, + color_type='color', + channel_order='bgr', + file_client_args=dict(backend='disk')): + self.to_float32 = to_float32 + self.color_type = color_type + self.channel_order = channel_order + self.file_client_args = file_client_args.copy() + self.file_client = None + + def __call__(self, results): + """Call functions to load image and get image meta information. + + Args: + results (dict): Result dict from :obj:`mmdet.CustomDataset`. + + Returns: + dict: The dict contains loaded image and meta information. + """ + + if self.file_client is None: + self.file_client = mmcv.FileClient(**self.file_client_args) + + if results['img_prefix'] is not None: + filename = osp.join(results['img_prefix'], + results['img_info']['filename']) + else: + filename = results['img_info']['filename'] + + img_bytes = self.file_client.get(filename) + img = mmcv.imfrombytes( + img_bytes, flag=self.color_type, channel_order=self.channel_order) + if self.to_float32: + img = img.astype(np.float32) + + results['filename'] = filename + results['ori_filename'] = results['img_info']['filename'] + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img'] + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'to_float32={self.to_float32}, ' + f"color_type='{self.color_type}', " + f"channel_order='{self.channel_order}', " + f'file_client_args={self.file_client_args})') + return repr_str + + +@PIPELINES.register_module() +class LoadImageFromWebcam(LoadImageFromFile): + """Load an image from webcam. + + Similar with :obj:`LoadImageFromFile`, but the image read from webcam is in + ``results['img']``. + """ + + def __call__(self, results): + """Call functions to add image meta information. + + Args: + results (dict): Result dict with Webcam read image in + ``results['img']``. + + Returns: + dict: The dict contains loaded image and meta information. + """ + + img = results['img'] + if self.to_float32: + img = img.astype(np.float32) + + results['filename'] = None + results['ori_filename'] = None + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img'] + return results + + +@PIPELINES.register_module() +class LoadMultiChannelImageFromFiles: + """Load multi-channel images from a list of separate channel files. + + Required keys are "img_prefix" and "img_info" (a dict that must contain the + key "filename", which is expected to be a list of filenames). + Added or updated keys are "filename", "img", "img_shape", + "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), + "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). + + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + color_type (str): The flag argument for :func:`mmcv.imfrombytes`. + Defaults to 'color'. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__(self, + to_float32=False, + color_type='unchanged', + file_client_args=dict(backend='disk')): + self.to_float32 = to_float32 + self.color_type = color_type + self.file_client_args = file_client_args.copy() + self.file_client = None + + def __call__(self, results): + """Call functions to load multiple images and get images meta + information. + + Args: + results (dict): Result dict from :obj:`mmdet.CustomDataset`. + + Returns: + dict: The dict contains loaded images and meta information. + """ + + if self.file_client is None: + self.file_client = mmcv.FileClient(**self.file_client_args) + + if results['img_prefix'] is not None: + filename = [ + osp.join(results['img_prefix'], fname) + for fname in results['img_info']['filename'] + ] + else: + filename = results['img_info']['filename'] + + img = [] + for name in filename: + img_bytes = self.file_client.get(name) + img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type)) + img = np.stack(img, axis=-1) + if self.to_float32: + img = img.astype(np.float32) + + results['filename'] = filename + results['ori_filename'] = results['img_info']['filename'] + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results['img_norm_cfg'] = dict( + mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False) + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'to_float32={self.to_float32}, ' + f"color_type='{self.color_type}', " + f'file_client_args={self.file_client_args})') + return repr_str + + +@PIPELINES.register_module() +class LoadAnnotations: + """Load multiple types of annotations. + + Args: + with_bbox (bool): Whether to parse and load the bbox annotation. + Default: True. + with_label (bool): Whether to parse and load the label annotation. + Default: True. + with_mask (bool): Whether to parse and load the mask annotation. + Default: False. + with_seg (bool): Whether to parse and load the semantic segmentation + annotation. Default: False. + poly2mask (bool): Whether to convert the instance masks from polygons + to bitmaps. Default: True. + denorm_bbox (bool): Whether to convert bbox from relative value to + absolute value. Only used in OpenImage Dataset. + Default: False. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__(self, + with_bbox=True, + with_label=True, + with_mask=False, + with_seg=False, + poly2mask=True, + denorm_bbox=False, + file_client_args=dict(backend='disk')): + self.with_bbox = with_bbox + self.with_label = with_label + self.with_mask = with_mask + self.with_seg = with_seg + self.poly2mask = poly2mask + self.denorm_bbox = denorm_bbox + self.file_client_args = file_client_args.copy() + self.file_client = None + + def _load_bboxes(self, results): + """Private function to load bounding box annotations. + + Args: + results (dict): Result dict from :obj:`mmdet.CustomDataset`. + + Returns: + dict: The dict contains loaded bounding box annotations. + """ + + ann_info = results['ann_info'] + results['gt_bboxes'] = ann_info['bboxes'].copy() + + if self.denorm_bbox: + bbox_num = results['gt_bboxes'].shape[0] + if bbox_num != 0: + h, w = results['img_shape'][:2] + results['gt_bboxes'][:, 0::2] *= w + results['gt_bboxes'][:, 1::2] *= h + + gt_bboxes_ignore = ann_info.get('bboxes_ignore', None) + if gt_bboxes_ignore is not None: + results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy() + results['bbox_fields'].append('gt_bboxes_ignore') + results['bbox_fields'].append('gt_bboxes') + + gt_is_group_ofs = ann_info.get('gt_is_group_ofs', None) + if gt_is_group_ofs is not None: + results['gt_is_group_ofs'] = gt_is_group_ofs.copy() + + return results + + def _load_labels(self, results): + """Private function to load label annotations. + + Args: + results (dict): Result dict from :obj:`mmdet.CustomDataset`. + + Returns: + dict: The dict contains loaded label annotations. + """ + + results['gt_labels'] = results['ann_info']['labels'].copy() + return results + + def _poly2mask(self, mask_ann, img_h, img_w): + """Private function to convert masks represented with polygon to + bitmaps. + + Args: + mask_ann (list | dict): Polygon mask annotation input. + img_h (int): The height of output mask. + img_w (int): The width of output mask. + + Returns: + numpy.ndarray: The decode bitmap mask of shape (img_h, img_w). + """ + + if isinstance(mask_ann, list): + # polygon -- a single object might consist of multiple parts + # we merge all parts into one mask rle code + rles = maskUtils.frPyObjects(mask_ann, img_h, img_w) + rle = maskUtils.merge(rles) + elif isinstance(mask_ann['counts'], list): + # uncompressed RLE + rle = maskUtils.frPyObjects(mask_ann, img_h, img_w) + else: + # rle + rle = mask_ann + mask = maskUtils.decode(rle) + return mask + + def process_polygons(self, polygons): + """Convert polygons to list of ndarray and filter invalid polygons. + + Args: + polygons (list[list]): Polygons of one instance. + + Returns: + list[numpy.ndarray]: Processed polygons. + """ + + polygons = [np.array(p) for p in polygons] + valid_polygons = [] + for polygon in polygons: + if len(polygon) % 2 == 0 and len(polygon) >= 6: + valid_polygons.append(polygon) + return valid_polygons + + def _load_masks(self, results): + """Private function to load mask annotations. + + Args: + results (dict): Result dict from :obj:`mmdet.CustomDataset`. + + Returns: + dict: The dict contains loaded mask annotations. + If ``self.poly2mask`` is set ``True``, `gt_mask` will contain + :obj:`PolygonMasks`. Otherwise, :obj:`BitmapMasks` is used. + """ + + h, w = results['img_info']['height'], results['img_info']['width'] + gt_masks = results['ann_info']['masks'] + if self.poly2mask: + gt_masks = BitmapMasks( + [self._poly2mask(mask, h, w) for mask in gt_masks], h, w) + else: + gt_masks = PolygonMasks( + [self.process_polygons(polygons) for polygons in gt_masks], h, + w) + results['gt_masks'] = gt_masks + results['mask_fields'].append('gt_masks') + return results + + def _load_semantic_seg(self, results): + """Private function to load semantic segmentation annotations. + + Args: + results (dict): Result dict from :obj:`dataset`. + + Returns: + dict: The dict contains loaded semantic segmentation annotations. + """ + + if self.file_client is None: + self.file_client = mmcv.FileClient(**self.file_client_args) + + filename = osp.join(results['seg_prefix'], + results['ann_info']['seg_map']) + img_bytes = self.file_client.get(filename) + results['gt_semantic_seg'] = mmcv.imfrombytes( + img_bytes, flag='unchanged').squeeze() + results['seg_fields'].append('gt_semantic_seg') + return results + + def __call__(self, results): + """Call function to load multiple types annotations. + + Args: + results (dict): Result dict from :obj:`mmdet.CustomDataset`. + + Returns: + dict: The dict contains loaded bounding box, label, mask and + semantic segmentation annotations. + """ + + if self.with_bbox: + results = self._load_bboxes(results) + if results is None: + return None + if self.with_label: + results = self._load_labels(results) + if self.with_mask: + results = self._load_masks(results) + if self.with_seg: + results = self._load_semantic_seg(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(with_bbox={self.with_bbox}, ' + repr_str += f'with_label={self.with_label}, ' + repr_str += f'with_mask={self.with_mask}, ' + repr_str += f'with_seg={self.with_seg}, ' + repr_str += f'poly2mask={self.poly2mask}, ' + repr_str += f'poly2mask={self.file_client_args})' + return repr_str + + +@PIPELINES.register_module() +class LoadPanopticAnnotations(LoadAnnotations): + """Load multiple types of panoptic annotations. + + Args: + with_bbox (bool): Whether to parse and load the bbox annotation. + Default: True. + with_label (bool): Whether to parse and load the label annotation. + Default: True. + with_mask (bool): Whether to parse and load the mask annotation. + Default: True. + with_seg (bool): Whether to parse and load the semantic segmentation + annotation. Default: True. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__(self, + with_bbox=True, + with_label=True, + with_mask=True, + with_seg=True, + file_client_args=dict(backend='disk')): + if rgb2id is None: + raise RuntimeError( + 'panopticapi is not installed, please install it by: ' + 'pip install git+https://github.com/cocodataset/' + 'panopticapi.git.') + + super(LoadPanopticAnnotations, self).__init__( + with_bbox=with_bbox, + with_label=with_label, + with_mask=with_mask, + with_seg=with_seg, + poly2mask=True, + denorm_bbox=False, + file_client_args=file_client_args) + + def _load_masks_and_semantic_segs(self, results): + """Private function to load mask and semantic segmentation annotations. + + In gt_semantic_seg, the foreground label is from `0` to + `num_things - 1`, the background label is from `num_things` to + `num_things + num_stuff - 1`, 255 means the ignored label (`VOID`). + + Args: + results (dict): Result dict from :obj:`mmdet.CustomDataset`. + + Returns: + dict: The dict contains loaded mask and semantic segmentation + annotations. `BitmapMasks` is used for mask annotations. + """ + + if self.file_client is None: + self.file_client = mmcv.FileClient(**self.file_client_args) + + filename = osp.join(results['seg_prefix'], + results['ann_info']['seg_map']) + img_bytes = self.file_client.get(filename) + pan_png = mmcv.imfrombytes( + img_bytes, flag='color', channel_order='rgb').squeeze() + pan_png = rgb2id(pan_png) + + gt_masks = [] + gt_seg = np.zeros_like(pan_png) + 255 # 255 as ignore + + for mask_info in results['ann_info']['masks']: + mask = (pan_png == mask_info['id']) + gt_seg = np.where(mask, mask_info['category'], gt_seg) + + # The legal thing masks + if mask_info.get('is_thing'): + gt_masks.append(mask.astype(np.uint8)) + + if self.with_mask: + h, w = results['img_info']['height'], results['img_info']['width'] + gt_masks = BitmapMasks(gt_masks, h, w) + results['gt_masks'] = gt_masks + results['mask_fields'].append('gt_masks') + + if self.with_seg: + results['gt_semantic_seg'] = gt_seg + results['seg_fields'].append('gt_semantic_seg') + return results + + def __call__(self, results): + """Call function to load multiple types panoptic annotations. + + Args: + results (dict): Result dict from :obj:`mmdet.CustomDataset`. + + Returns: + dict: The dict contains loaded bounding box, label, mask and + semantic segmentation annotations. + """ + + if self.with_bbox: + results = self._load_bboxes(results) + if results is None: + return None + if self.with_label: + results = self._load_labels(results) + if self.with_mask or self.with_seg: + # The tasks completed by '_load_masks' and '_load_semantic_segs' + # in LoadAnnotations are merged to one function. + results = self._load_masks_and_semantic_segs(results) + + return results + + +@PIPELINES.register_module() +class LoadProposals: + """Load proposal pipeline. + + Required key is "proposals". Updated keys are "proposals", "bbox_fields". + + Args: + num_max_proposals (int, optional): Maximum number of proposals to load. + If not specified, all proposals will be loaded. + """ + + def __init__(self, num_max_proposals=None): + self.num_max_proposals = num_max_proposals + + def __call__(self, results): + """Call function to load proposals from file. + + Args: + results (dict): Result dict from :obj:`mmdet.CustomDataset`. + + Returns: + dict: The dict contains loaded proposal annotations. + """ + + proposals = results['proposals'] + if proposals.shape[1] not in (4, 5): + raise AssertionError( + 'proposals should have shapes (n, 4) or (n, 5), ' + f'but found {proposals.shape}') + proposals = proposals[:, :4] + + if self.num_max_proposals is not None: + proposals = proposals[:self.num_max_proposals] + + if len(proposals) == 0: + proposals = np.array([[0, 0, 0, 0]], dtype=np.float32) + results['proposals'] = proposals + results['bbox_fields'].append('proposals') + return results + + def __repr__(self): + return self.__class__.__name__ + \ + f'(num_max_proposals={self.num_max_proposals})' + + +@PIPELINES.register_module() +class FilterAnnotations: + """Filter invalid annotations. + + Args: + min_gt_bbox_wh (tuple[float]): Minimum width and height of ground truth + boxes. Default: (1., 1.) + min_gt_mask_area (int): Minimum foreground area of ground truth masks. + Default: 1 + by_box (bool): Filter instances with bounding boxes not meeting the + min_gt_bbox_wh threshold. Default: True + by_mask (bool): Filter instances with masks not meeting + min_gt_mask_area threshold. Default: False + keep_empty (bool): Whether to return None when it + becomes an empty bbox after filtering. Default: True + """ + + def __init__(self, + min_gt_bbox_wh=(1., 1.), + min_gt_mask_area=1, + by_box=True, + by_mask=False, + keep_empty=True): + # TODO: add more filter options + assert by_box or by_mask + self.min_gt_bbox_wh = min_gt_bbox_wh + self.min_gt_mask_area = min_gt_mask_area + self.by_box = by_box + self.by_mask = by_mask + self.keep_empty = keep_empty + + def __call__(self, results): + if self.by_box: + assert 'gt_bboxes' in results + gt_bboxes = results['gt_bboxes'] + instance_num = gt_bboxes.shape[0] + if self.by_mask: + assert 'gt_masks' in results + gt_masks = results['gt_masks'] + instance_num = len(gt_masks) + + if instance_num == 0: + return results + + tests = [] + if self.by_box: + w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + tests.append((w > self.min_gt_bbox_wh[0]) + & (h > self.min_gt_bbox_wh[1])) + if self.by_mask: + gt_masks = results['gt_masks'] + tests.append(gt_masks.areas >= self.min_gt_mask_area) + + keep = tests[0] + for t in tests[1:]: + keep = keep & t + + keys = ('gt_bboxes', 'gt_labels', 'gt_masks') + for key in keys: + if key in results: + results[key] = results[key][keep] + if not keep.any(): + if self.keep_empty: + return None + return results + + def __repr__(self): + return self.__class__.__name__ + \ + f'(min_gt_bbox_wh={self.min_gt_bbox_wh},' \ + f'(min_gt_mask_area={self.min_gt_mask_area},' \ + f'(by_box={self.by_box},' \ + f'(by_mask={self.by_mask},' \ + f'always_keep={self.always_keep})' diff --git a/downstream/mmdetection/mmdet/datasets/pipelines/test_time_aug.py b/downstream/mmdetection/mmdet/datasets/pipelines/test_time_aug.py new file mode 100644 index 0000000..5f1ab7b --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/pipelines/test_time_aug.py @@ -0,0 +1,121 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import mmcv + +from ..builder import PIPELINES +from .compose import Compose + + +@PIPELINES.register_module() +class MultiScaleFlipAug: + """Test-time augmentation with multiple scales and flipping. + + An example configuration is as followed: + + .. code-block:: + + img_scale=[(1333, 400), (1333, 800)], + flip=True, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ] + + After MultiScaleFLipAug with above configuration, the results are wrapped + into lists of the same length as followed: + + .. code-block:: + + dict( + img=[...], + img_shape=[...], + scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)] + flip=[False, True, False, True] + ... + ) + + Args: + transforms (list[dict]): Transforms to apply in each augmentation. + img_scale (tuple | list[tuple] | None): Images scales for resizing. + scale_factor (float | list[float] | None): Scale factors for resizing. + flip (bool): Whether apply flip augmentation. Default: False. + flip_direction (str | list[str]): Flip augmentation directions, + options are "horizontal", "vertical" and "diagonal". If + flip_direction is a list, multiple flip augmentations will be + applied. It has no effect when flip == False. Default: + "horizontal". + """ + + def __init__(self, + transforms, + img_scale=None, + scale_factor=None, + flip=False, + flip_direction='horizontal'): + self.transforms = Compose(transforms) + assert (img_scale is None) ^ (scale_factor is None), ( + 'Must have but only one variable can be set') + if img_scale is not None: + self.img_scale = img_scale if isinstance(img_scale, + list) else [img_scale] + self.scale_key = 'scale' + assert mmcv.is_list_of(self.img_scale, tuple) + else: + self.img_scale = scale_factor if isinstance( + scale_factor, list) else [scale_factor] + self.scale_key = 'scale_factor' + + self.flip = flip + self.flip_direction = flip_direction if isinstance( + flip_direction, list) else [flip_direction] + assert mmcv.is_list_of(self.flip_direction, str) + if not self.flip and self.flip_direction != ['horizontal']: + warnings.warn( + 'flip_direction has no effect when flip is set to False') + if (self.flip + and not any([t['type'] == 'RandomFlip' for t in transforms])): + warnings.warn( + 'flip has no effect when RandomFlip is not in transforms') + + def __call__(self, results): + """Call function to apply test time augment transforms on results. + + Args: + results (dict): Result dict contains the data to transform. + + Returns: + dict[str: list]: The augmented data, where each value is wrapped + into a list. + """ + + aug_data = [] + flip_args = [(False, None)] + if self.flip: + flip_args += [(True, direction) + for direction in self.flip_direction] + for scale in self.img_scale: + for flip, direction in flip_args: + _results = results.copy() + _results[self.scale_key] = scale + _results['flip'] = flip + _results['flip_direction'] = direction + data = self.transforms(_results) + aug_data.append(data) + # list of dict to dict of list + aug_data_dict = {key: [] for key in aug_data[0]} + for data in aug_data: + for key, val in data.items(): + aug_data_dict[key].append(val) + return aug_data_dict + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(transforms={self.transforms}, ' + repr_str += f'img_scale={self.img_scale}, flip={self.flip}, ' + repr_str += f'flip_direction={self.flip_direction})' + return repr_str diff --git a/downstream/mmdetection/mmdet/datasets/pipelines/transforms.py b/downstream/mmdetection/mmdet/datasets/pipelines/transforms.py new file mode 100644 index 0000000..0a1b389 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/pipelines/transforms.py @@ -0,0 +1,2919 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import inspect +import math +import warnings + +import cv2 +import mmcv +import numpy as np +from numpy import random + +from mmdet.core import BitmapMasks, PolygonMasks, find_inside_bboxes +from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps +from mmdet.utils import log_img_scale +from ..builder import PIPELINES + +try: + from imagecorruptions import corrupt +except ImportError: + corrupt = None + +try: + import albumentations + from albumentations import Compose +except ImportError: + albumentations = None + Compose = None + + +@PIPELINES.register_module() +class Resize: + """Resize images & bbox & mask. + + This transform resizes the input image to some scale. Bboxes and masks are + then resized with the same scale factor. If the input dict contains the key + "scale", then the scale in the input dict is used, otherwise the specified + scale in the init method is used. If the input dict contains the key + "scale_factor" (if MultiScaleFlipAug does not give img_scale but + scale_factor), the actual scale will be computed by image shape and + scale_factor. + + `img_scale` can either be a tuple (single-scale) or a list of tuple + (multi-scale). There are 3 multiscale modes: + + - ``ratio_range is not None``: randomly sample a ratio from the ratio \ + range and multiply it with the image scale. + - ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \ + sample a scale from the multiscale range. + - ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \ + sample a scale from multiple scales. + + Args: + img_scale (tuple or list[tuple]): Images scales for resizing. + multiscale_mode (str): Either "range" or "value". + ratio_range (tuple[float]): (min_ratio, max_ratio) + keep_ratio (bool): Whether to keep the aspect ratio when resizing the + image. + bbox_clip_border (bool, optional): Whether to clip the objects outside + the border of the image. In some dataset like MOT17, the gt bboxes + are allowed to cross the border of images. Therefore, we don't + need to clip the gt bboxes in these cases. Defaults to True. + backend (str): Image resize backend, choices are 'cv2' and 'pillow'. + These two backends generates slightly different results. Defaults + to 'cv2'. + interpolation (str): Interpolation method, accepted values are + "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' + backend, "nearest", "bilinear" for 'pillow' backend. + override (bool, optional): Whether to override `scale` and + `scale_factor` so as to call resize twice. Default False. If True, + after the first resizing, the existed `scale` and `scale_factor` + will be ignored so the second resizing can be allowed. + This option is a work-around for multiple times of resize in DETR. + Defaults to False. + """ + + def __init__(self, + img_scale=None, + multiscale_mode='range', + ratio_range=None, + keep_ratio=True, + bbox_clip_border=True, + backend='cv2', + interpolation='bilinear', + override=False): + if img_scale is None: + self.img_scale = None + else: + if isinstance(img_scale, list): + self.img_scale = img_scale + else: + self.img_scale = [img_scale] + assert mmcv.is_list_of(self.img_scale, tuple) + + if ratio_range is not None: + # mode 1: given a scale and a range of image ratio + assert len(self.img_scale) == 1 + else: + # mode 2: given multiple scales or a range of scales + assert multiscale_mode in ['value', 'range'] + + self.backend = backend + self.multiscale_mode = multiscale_mode + self.ratio_range = ratio_range + self.keep_ratio = keep_ratio + # TODO: refactor the override option in Resize + self.interpolation = interpolation + self.override = override + self.bbox_clip_border = bbox_clip_border + + @staticmethod + def random_select(img_scales): + """Randomly select an img_scale from given candidates. + + Args: + img_scales (list[tuple]): Images scales for selection. + + Returns: + (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \ + where ``img_scale`` is the selected image scale and \ + ``scale_idx`` is the selected index in the given candidates. + """ + + assert mmcv.is_list_of(img_scales, tuple) + scale_idx = np.random.randint(len(img_scales)) + img_scale = img_scales[scale_idx] + return img_scale, scale_idx + + @staticmethod + def random_sample(img_scales): + """Randomly sample an img_scale when ``multiscale_mode=='range'``. + + Args: + img_scales (list[tuple]): Images scale range for sampling. + There must be two tuples in img_scales, which specify the lower + and upper bound of image scales. + + Returns: + (tuple, None): Returns a tuple ``(img_scale, None)``, where \ + ``img_scale`` is sampled scale and None is just a placeholder \ + to be consistent with :func:`random_select`. + """ + + assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2 + img_scale_long = [max(s) for s in img_scales] + img_scale_short = [min(s) for s in img_scales] + long_edge = np.random.randint( + min(img_scale_long), + max(img_scale_long) + 1) + short_edge = np.random.randint( + min(img_scale_short), + max(img_scale_short) + 1) + img_scale = (long_edge, short_edge) + return img_scale, None + + @staticmethod + def random_sample_ratio(img_scale, ratio_range): + """Randomly sample an img_scale when ``ratio_range`` is specified. + + A ratio will be randomly sampled from the range specified by + ``ratio_range``. Then it would be multiplied with ``img_scale`` to + generate sampled scale. + + Args: + img_scale (tuple): Images scale base to multiply with ratio. + ratio_range (tuple[float]): The minimum and maximum ratio to scale + the ``img_scale``. + + Returns: + (tuple, None): Returns a tuple ``(scale, None)``, where \ + ``scale`` is sampled ratio multiplied with ``img_scale`` and \ + None is just a placeholder to be consistent with \ + :func:`random_select`. + """ + + assert isinstance(img_scale, tuple) and len(img_scale) == 2 + min_ratio, max_ratio = ratio_range + assert min_ratio <= max_ratio + ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio + scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio) + return scale, None + + def _random_scale(self, results): + """Randomly sample an img_scale according to ``ratio_range`` and + ``multiscale_mode``. + + If ``ratio_range`` is specified, a ratio will be sampled and be + multiplied with ``img_scale``. + If multiple scales are specified by ``img_scale``, a scale will be + sampled according to ``multiscale_mode``. + Otherwise, single scale will be used. + + Args: + results (dict): Result dict from :obj:`dataset`. + + Returns: + dict: Two new keys 'scale` and 'scale_idx` are added into \ + ``results``, which would be used by subsequent pipelines. + """ + + if self.ratio_range is not None: + scale, scale_idx = self.random_sample_ratio( + self.img_scale[0], self.ratio_range) + elif len(self.img_scale) == 1: + scale, scale_idx = self.img_scale[0], 0 + elif self.multiscale_mode == 'range': + scale, scale_idx = self.random_sample(self.img_scale) + elif self.multiscale_mode == 'value': + scale, scale_idx = self.random_select(self.img_scale) + else: + raise NotImplementedError + + results['scale'] = scale + results['scale_idx'] = scale_idx + + def _resize_img(self, results): + """Resize images with ``results['scale']``.""" + for key in results.get('img_fields', ['img']): + if self.keep_ratio: + img, scale_factor = mmcv.imrescale( + results[key], + results['scale'], + return_scale=True, + interpolation=self.interpolation, + backend=self.backend) + # the w_scale and h_scale has minor difference + # a real fix should be done in the mmcv.imrescale in the future + new_h, new_w = img.shape[:2] + h, w = results[key].shape[:2] + w_scale = new_w / w + h_scale = new_h / h + else: + img, w_scale, h_scale = mmcv.imresize( + results[key], + results['scale'], + return_scale=True, + interpolation=self.interpolation, + backend=self.backend) + results[key] = img + + scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], + dtype=np.float32) + results['img_shape'] = img.shape + # in case that there is no padding + results['pad_shape'] = img.shape + results['scale_factor'] = scale_factor + results['keep_ratio'] = self.keep_ratio + + def _resize_bboxes(self, results): + """Resize bounding boxes with ``results['scale_factor']``.""" + for key in results.get('bbox_fields', []): + bboxes = results[key] * results['scale_factor'] + if self.bbox_clip_border: + img_shape = results['img_shape'] + bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) + bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) + results[key] = bboxes + + def _resize_masks(self, results): + """Resize masks with ``results['scale']``""" + for key in results.get('mask_fields', []): + if results[key] is None: + continue + if self.keep_ratio: + results[key] = results[key].rescale(results['scale']) + else: + results[key] = results[key].resize(results['img_shape'][:2]) + + def _resize_seg(self, results): + """Resize semantic segmentation map with ``results['scale']``.""" + for key in results.get('seg_fields', []): + if self.keep_ratio: + gt_seg = mmcv.imrescale( + results[key], + results['scale'], + interpolation='nearest', + backend=self.backend) + else: + gt_seg = mmcv.imresize( + results[key], + results['scale'], + interpolation='nearest', + backend=self.backend) + results[key] = gt_seg + + def __call__(self, results): + """Call function to resize images, bounding boxes, masks, semantic + segmentation map. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \ + 'keep_ratio' keys are added into result dict. + """ + + if 'scale' not in results: + if 'scale_factor' in results: + img_shape = results['img'].shape[:2] + scale_factor = results['scale_factor'] + assert isinstance(scale_factor, float) + results['scale'] = tuple( + [int(x * scale_factor) for x in img_shape][::-1]) + else: + self._random_scale(results) + else: + if not self.override: + assert 'scale_factor' not in results, ( + 'scale and scale_factor cannot be both set.') + else: + results.pop('scale') + if 'scale_factor' in results: + results.pop('scale_factor') + self._random_scale(results) + + self._resize_img(results) + self._resize_bboxes(results) + self._resize_masks(results) + self._resize_seg(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(img_scale={self.img_scale}, ' + repr_str += f'multiscale_mode={self.multiscale_mode}, ' + repr_str += f'ratio_range={self.ratio_range}, ' + repr_str += f'keep_ratio={self.keep_ratio}, ' + repr_str += f'bbox_clip_border={self.bbox_clip_border})' + return repr_str + + +@PIPELINES.register_module() +class RandomFlip: + """Flip the image & bbox & mask. + + If the input dict contains the key "flip", then the flag will be used, + otherwise it will be randomly decided by a ratio specified in the init + method. + + When random flip is enabled, ``flip_ratio``/``direction`` can either be a + float/string or tuple of float/string. There are 3 flip modes: + + - ``flip_ratio`` is float, ``direction`` is string: the image will be + ``direction``ly flipped with probability of ``flip_ratio`` . + E.g., ``flip_ratio=0.5``, ``direction='horizontal'``, + then image will be horizontally flipped with probability of 0.5. + - ``flip_ratio`` is float, ``direction`` is list of string: the image will + be ``direction[i]``ly flipped with probability of + ``flip_ratio/len(direction)``. + E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``, + then image will be horizontally flipped with probability of 0.25, + vertically with probability of 0.25. + - ``flip_ratio`` is list of float, ``direction`` is list of string: + given ``len(flip_ratio) == len(direction)``, the image will + be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``. + E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal', + 'vertical']``, then image will be horizontally flipped with probability + of 0.3, vertically with probability of 0.5. + + Args: + flip_ratio (float | list[float], optional): The flipping probability. + Default: None. + direction(str | list[str], optional): The flipping direction. Options + are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'. + If input is a list, the length must equal ``flip_ratio``. Each + element in ``flip_ratio`` indicates the flip probability of + corresponding direction. + """ + + def __init__(self, flip_ratio=None, direction='horizontal'): + if isinstance(flip_ratio, list): + assert mmcv.is_list_of(flip_ratio, float) + assert 0 <= sum(flip_ratio) <= 1 + elif isinstance(flip_ratio, float): + assert 0 <= flip_ratio <= 1 + elif flip_ratio is None: + pass + else: + raise ValueError('flip_ratios must be None, float, ' + 'or list of float') + self.flip_ratio = flip_ratio + + valid_directions = ['horizontal', 'vertical', 'diagonal'] + if isinstance(direction, str): + assert direction in valid_directions + elif isinstance(direction, list): + assert mmcv.is_list_of(direction, str) + assert set(direction).issubset(set(valid_directions)) + else: + raise ValueError('direction must be either str or list of str') + self.direction = direction + + if isinstance(flip_ratio, list): + assert len(self.flip_ratio) == len(self.direction) + + def bbox_flip(self, bboxes, img_shape, direction): + """Flip bboxes horizontally. + + Args: + bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k) + img_shape (tuple[int]): Image shape (height, width) + direction (str): Flip direction. Options are 'horizontal', + 'vertical'. + + Returns: + numpy.ndarray: Flipped bounding boxes. + """ + + assert bboxes.shape[-1] % 4 == 0 + flipped = bboxes.copy() + if direction == 'horizontal': + w = img_shape[1] + flipped[..., 0::4] = w - bboxes[..., 2::4] + flipped[..., 2::4] = w - bboxes[..., 0::4] + elif direction == 'vertical': + h = img_shape[0] + flipped[..., 1::4] = h - bboxes[..., 3::4] + flipped[..., 3::4] = h - bboxes[..., 1::4] + elif direction == 'diagonal': + w = img_shape[1] + h = img_shape[0] + flipped[..., 0::4] = w - bboxes[..., 2::4] + flipped[..., 1::4] = h - bboxes[..., 3::4] + flipped[..., 2::4] = w - bboxes[..., 0::4] + flipped[..., 3::4] = h - bboxes[..., 1::4] + else: + raise ValueError(f"Invalid flipping direction '{direction}'") + return flipped + + def __call__(self, results): + """Call function to flip bounding boxes, masks, semantic segmentation + maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Flipped results, 'flip', 'flip_direction' keys are added \ + into result dict. + """ + + if 'flip' not in results: + if isinstance(self.direction, list): + # None means non-flip + direction_list = self.direction + [None] + else: + # None means non-flip + direction_list = [self.direction, None] + + if isinstance(self.flip_ratio, list): + non_flip_ratio = 1 - sum(self.flip_ratio) + flip_ratio_list = self.flip_ratio + [non_flip_ratio] + else: + non_flip_ratio = 1 - self.flip_ratio + # exclude non-flip + single_ratio = self.flip_ratio / (len(direction_list) - 1) + flip_ratio_list = [single_ratio] * (len(direction_list) - + 1) + [non_flip_ratio] + + cur_dir = np.random.choice(direction_list, p=flip_ratio_list) + + results['flip'] = cur_dir is not None + if 'flip_direction' not in results: + results['flip_direction'] = cur_dir + if results['flip']: + # flip image + for key in results.get('img_fields', ['img']): + results[key] = mmcv.imflip( + results[key], direction=results['flip_direction']) + # flip bboxes + for key in results.get('bbox_fields', []): + results[key] = self.bbox_flip(results[key], + results['img_shape'], + results['flip_direction']) + # flip masks + for key in results.get('mask_fields', []): + results[key] = results[key].flip(results['flip_direction']) + + # flip segs + for key in results.get('seg_fields', []): + results[key] = mmcv.imflip( + results[key], direction=results['flip_direction']) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})' + + +@PIPELINES.register_module() +class RandomShift: + """Shift the image and box given shift pixels and probability. + + Args: + shift_ratio (float): Probability of shifts. Default 0.5. + max_shift_px (int): The max pixels for shifting. Default 32. + filter_thr_px (int): The width and height threshold for filtering. + The bbox and the rest of the targets below the width and + height threshold will be filtered. Default 1. + """ + + def __init__(self, shift_ratio=0.5, max_shift_px=32, filter_thr_px=1): + assert 0 <= shift_ratio <= 1 + assert max_shift_px >= 0 + self.shift_ratio = shift_ratio + self.max_shift_px = max_shift_px + self.filter_thr_px = int(filter_thr_px) + # The key correspondence from bboxes to labels. + self.bbox2label = { + 'gt_bboxes': 'gt_labels', + 'gt_bboxes_ignore': 'gt_labels_ignore' + } + + def __call__(self, results): + """Call function to random shift images, bounding boxes. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Shift results. + """ + if random.random() < self.shift_ratio: + img_shape = results['img'].shape[:2] + + random_shift_x = random.randint(-self.max_shift_px, + self.max_shift_px) + random_shift_y = random.randint(-self.max_shift_px, + self.max_shift_px) + new_x = max(0, random_shift_x) + ori_x = max(0, -random_shift_x) + new_y = max(0, random_shift_y) + ori_y = max(0, -random_shift_y) + + # TODO: support mask and semantic segmentation maps. + for key in results.get('bbox_fields', []): + bboxes = results[key].copy() + bboxes[..., 0::2] += random_shift_x + bboxes[..., 1::2] += random_shift_y + + # clip border + bboxes[..., 0::2] = np.clip(bboxes[..., 0::2], 0, img_shape[1]) + bboxes[..., 1::2] = np.clip(bboxes[..., 1::2], 0, img_shape[0]) + + # remove invalid bboxes + bbox_w = bboxes[..., 2] - bboxes[..., 0] + bbox_h = bboxes[..., 3] - bboxes[..., 1] + valid_inds = (bbox_w > self.filter_thr_px) & ( + bbox_h > self.filter_thr_px) + # If the shift does not contain any gt-bbox area, skip this + # image. + if key == 'gt_bboxes' and not valid_inds.any(): + return results + bboxes = bboxes[valid_inds] + results[key] = bboxes + + # label fields. e.g. gt_labels and gt_labels_ignore + label_key = self.bbox2label.get(key) + if label_key in results: + results[label_key] = results[label_key][valid_inds] + + for key in results.get('img_fields', ['img']): + img = results[key] + new_img = np.zeros_like(img) + img_h, img_w = img.shape[:2] + new_h = img_h - np.abs(random_shift_y) + new_w = img_w - np.abs(random_shift_x) + new_img[new_y:new_y + new_h, new_x:new_x + new_w] \ + = img[ori_y:ori_y + new_h, ori_x:ori_x + new_w] + results[key] = new_img + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(max_shift_px={self.max_shift_px}, ' + return repr_str + + +@PIPELINES.register_module() +class Pad: + """Pad the image & masks & segmentation map. + + There are two padding modes: (1) pad to a fixed size and (2) pad to the + minimum size that is divisible by some number. + Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor", + + Args: + size (tuple, optional): Fixed padding size. + size_divisor (int, optional): The divisor of padded size. + pad_to_square (bool): Whether to pad the image into a square. + Currently only used for YOLOX. Default: False. + pad_val (dict, optional): A dict for padding value, the default + value is `dict(img=0, masks=0, seg=255)`. + """ + + def __init__(self, + size=None, + size_divisor=None, + pad_to_square=False, + pad_val=dict(img=0, masks=0, seg=255)): + self.size = size + self.size_divisor = size_divisor + if isinstance(pad_val, float) or isinstance(pad_val, int): + warnings.warn( + 'pad_val of float type is deprecated now, ' + f'please use pad_val=dict(img={pad_val}, ' + f'masks={pad_val}, seg=255) instead.', DeprecationWarning) + pad_val = dict(img=pad_val, masks=pad_val, seg=255) + assert isinstance(pad_val, dict) + self.pad_val = pad_val + self.pad_to_square = pad_to_square + + if pad_to_square: + assert size is None and size_divisor is None, \ + 'The size and size_divisor must be None ' \ + 'when pad2square is True' + else: + assert size is not None or size_divisor is not None, \ + 'only one of size and size_divisor should be valid' + assert size is None or size_divisor is None + + def _pad_img(self, results): + """Pad images according to ``self.size``.""" + pad_val = self.pad_val.get('img', 0) + for key in results.get('img_fields', ['img']): + if self.pad_to_square: + max_size = max(results[key].shape[:2]) + self.size = (max_size, max_size) + if self.size is not None: + padded_img = mmcv.impad( + results[key], shape=self.size, pad_val=pad_val) + elif self.size_divisor is not None: + padded_img = mmcv.impad_to_multiple( + results[key], self.size_divisor, pad_val=pad_val) + results[key] = padded_img + results['pad_shape'] = padded_img.shape + results['pad_fixed_size'] = self.size + results['pad_size_divisor'] = self.size_divisor + + def _pad_masks(self, results): + """Pad masks according to ``results['pad_shape']``.""" + pad_shape = results['pad_shape'][:2] + pad_val = self.pad_val.get('masks', 0) + for key in results.get('mask_fields', []): + results[key] = results[key].pad(pad_shape, pad_val=pad_val) + + def _pad_seg(self, results): + """Pad semantic segmentation map according to + ``results['pad_shape']``.""" + pad_val = self.pad_val.get('seg', 255) + for key in results.get('seg_fields', []): + results[key] = mmcv.impad( + results[key], shape=results['pad_shape'][:2], pad_val=pad_val) + + def __call__(self, results): + """Call function to pad images, masks, semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Updated result dict. + """ + self._pad_img(results) + self._pad_masks(results) + self._pad_seg(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(size={self.size}, ' + repr_str += f'size_divisor={self.size_divisor}, ' + repr_str += f'pad_to_square={self.pad_to_square}, ' + repr_str += f'pad_val={self.pad_val})' + return repr_str + + +@PIPELINES.register_module() +class Normalize: + """Normalize the image. + + Added key is "img_norm_cfg". + + Args: + mean (sequence): Mean values of 3 channels. + std (sequence): Std values of 3 channels. + to_rgb (bool): Whether to convert the image from BGR to RGB, + default is true. + """ + + def __init__(self, mean, std, to_rgb=True): + self.mean = np.array(mean, dtype=np.float32) + self.std = np.array(std, dtype=np.float32) + self.to_rgb = to_rgb + + def __call__(self, results): + """Call function to normalize images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Normalized results, 'img_norm_cfg' key is added into + result dict. + """ + for key in results.get('img_fields', ['img']): + results[key] = mmcv.imnormalize(results[key], self.mean, self.std, + self.to_rgb) + results['img_norm_cfg'] = dict( + mean=self.mean, std=self.std, to_rgb=self.to_rgb) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})' + return repr_str + + +@PIPELINES.register_module() +class RandomCrop: + """Random crop the image & bboxes & masks. + + The absolute `crop_size` is sampled based on `crop_type` and `image_size`, + then the cropped results are generated. + + Args: + crop_size (tuple): The relative ratio or absolute pixels of + height and width. + crop_type (str, optional): one of "relative_range", "relative", + "absolute", "absolute_range". "relative" randomly crops + (h * crop_size[0], w * crop_size[1]) part from an input of size + (h, w). "relative_range" uniformly samples relative crop size from + range [crop_size[0], 1] and [crop_size[1], 1] for height and width + respectively. "absolute" crops from an input with absolute size + (crop_size[0], crop_size[1]). "absolute_range" uniformly samples + crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w + in range [crop_size[0], min(w, crop_size[1])]. Default "absolute". + allow_negative_crop (bool, optional): Whether to allow a crop that does + not contain any bbox area. Default False. + recompute_bbox (bool, optional): Whether to re-compute the boxes based + on cropped instance masks. Default False. + bbox_clip_border (bool, optional): Whether clip the objects outside + the border of the image. Defaults to True. + + Note: + - If the image is smaller than the absolute crop size, return the + original image. + - The keys for bboxes, labels and masks must be aligned. That is, + `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and + `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and + `gt_masks_ignore`. + - If the crop does not contain any gt-bbox region and + `allow_negative_crop` is set to False, skip this image. + """ + + def __init__(self, + crop_size, + crop_type='absolute', + allow_negative_crop=False, + recompute_bbox=False, + bbox_clip_border=True): + if crop_type not in [ + 'relative_range', 'relative', 'absolute', 'absolute_range' + ]: + raise ValueError(f'Invalid crop_type {crop_type}.') + if crop_type in ['absolute', 'absolute_range']: + assert crop_size[0] > 0 and crop_size[1] > 0 + assert isinstance(crop_size[0], int) and isinstance( + crop_size[1], int) + else: + assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1 + self.crop_size = crop_size + self.crop_type = crop_type + self.allow_negative_crop = allow_negative_crop + self.bbox_clip_border = bbox_clip_border + self.recompute_bbox = recompute_bbox + # The key correspondence from bboxes to labels and masks. + self.bbox2label = { + 'gt_bboxes': 'gt_labels', + 'gt_bboxes_ignore': 'gt_labels_ignore' + } + self.bbox2mask = { + 'gt_bboxes': 'gt_masks', + 'gt_bboxes_ignore': 'gt_masks_ignore' + } + + def _crop_data(self, results, crop_size, allow_negative_crop): + """Function to randomly crop images, bounding boxes, masks, semantic + segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + crop_size (tuple): Expected absolute size after cropping, (h, w). + allow_negative_crop (bool): Whether to allow a crop that does not + contain any bbox area. Default to False. + + Returns: + dict: Randomly cropped results, 'img_shape' key in result dict is + updated according to crop size. + """ + assert crop_size[0] > 0 and crop_size[1] > 0 + for key in results.get('img_fields', ['img']): + img = results[key] + margin_h = max(img.shape[0] - crop_size[0], 0) + margin_w = max(img.shape[1] - crop_size[1], 0) + offset_h = np.random.randint(0, margin_h + 1) + offset_w = np.random.randint(0, margin_w + 1) + crop_y1, crop_y2 = offset_h, offset_h + crop_size[0] + crop_x1, crop_x2 = offset_w, offset_w + crop_size[1] + + # crop the image + img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] + img_shape = img.shape + results[key] = img + results['img_shape'] = img_shape + + # crop bboxes accordingly and clip to the image boundary + for key in results.get('bbox_fields', []): + # e.g. gt_bboxes and gt_bboxes_ignore + bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h], + dtype=np.float32) + bboxes = results[key] - bbox_offset + if self.bbox_clip_border: + bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) + bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) + valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & ( + bboxes[:, 3] > bboxes[:, 1]) + # If the crop does not contain any gt-bbox area and + # allow_negative_crop is False, skip this image. + if (key == 'gt_bboxes' and not valid_inds.any() + and not allow_negative_crop): + return None + results[key] = bboxes[valid_inds, :] + # label fields. e.g. gt_labels and gt_labels_ignore + label_key = self.bbox2label.get(key) + if label_key in results: + results[label_key] = results[label_key][valid_inds] + + # mask fields, e.g. gt_masks and gt_masks_ignore + mask_key = self.bbox2mask.get(key) + if mask_key in results: + results[mask_key] = results[mask_key][ + valid_inds.nonzero()[0]].crop( + np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])) + if self.recompute_bbox: + results[key] = results[mask_key].get_bboxes() + + # crop semantic seg + for key in results.get('seg_fields', []): + results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2] + + return results + + def _get_crop_size(self, image_size): + """Randomly generates the absolute crop size based on `crop_type` and + `image_size`. + + Args: + image_size (tuple): (h, w). + + Returns: + crop_size (tuple): (crop_h, crop_w) in absolute pixels. + """ + h, w = image_size + if self.crop_type == 'absolute': + return (min(self.crop_size[0], h), min(self.crop_size[1], w)) + elif self.crop_type == 'absolute_range': + assert self.crop_size[0] <= self.crop_size[1] + crop_h = np.random.randint( + min(h, self.crop_size[0]), + min(h, self.crop_size[1]) + 1) + crop_w = np.random.randint( + min(w, self.crop_size[0]), + min(w, self.crop_size[1]) + 1) + return crop_h, crop_w + elif self.crop_type == 'relative': + crop_h, crop_w = self.crop_size + return int(h * crop_h + 0.5), int(w * crop_w + 0.5) + elif self.crop_type == 'relative_range': + crop_size = np.asarray(self.crop_size, dtype=np.float32) + crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size) + return int(h * crop_h + 0.5), int(w * crop_w + 0.5) + + def __call__(self, results): + """Call function to randomly crop images, bounding boxes, masks, + semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Randomly cropped results, 'img_shape' key in result dict is + updated according to crop size. + """ + image_size = results['img'].shape[:2] + crop_size = self._get_crop_size(image_size) + results = self._crop_data(results, crop_size, self.allow_negative_crop) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(crop_size={self.crop_size}, ' + repr_str += f'crop_type={self.crop_type}, ' + repr_str += f'allow_negative_crop={self.allow_negative_crop}, ' + repr_str += f'bbox_clip_border={self.bbox_clip_border})' + return repr_str + + +@PIPELINES.register_module() +class SegRescale: + """Rescale semantic segmentation maps. + + Args: + scale_factor (float): The scale factor of the final output. + backend (str): Image rescale backend, choices are 'cv2' and 'pillow'. + These two backends generates slightly different results. Defaults + to 'cv2'. + """ + + def __init__(self, scale_factor=1, backend='cv2'): + self.scale_factor = scale_factor + self.backend = backend + + def __call__(self, results): + """Call function to scale the semantic segmentation map. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with semantic segmentation map scaled. + """ + + for key in results.get('seg_fields', []): + if self.scale_factor != 1: + results[key] = mmcv.imrescale( + results[key], + self.scale_factor, + interpolation='nearest', + backend=self.backend) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(scale_factor={self.scale_factor})' + + +@PIPELINES.register_module() +class PhotoMetricDistortion: + """Apply photometric distortion to image sequentially, every transformation + is applied with a probability of 0.5. The position of random contrast is in + second or second to last. + + 1. random brightness + 2. random contrast (mode 0) + 3. convert color from BGR to HSV + 4. random saturation + 5. random hue + 6. convert color from HSV to BGR + 7. random contrast (mode 1) + 8. randomly swap channels + + Args: + brightness_delta (int): delta of brightness. + contrast_range (tuple): range of contrast. + saturation_range (tuple): range of saturation. + hue_delta (int): delta of hue. + """ + + def __init__(self, + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18): + self.brightness_delta = brightness_delta + self.contrast_lower, self.contrast_upper = contrast_range + self.saturation_lower, self.saturation_upper = saturation_range + self.hue_delta = hue_delta + + def __call__(self, results): + """Call function to perform photometric distortion on images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with images distorted. + """ + + if 'img_fields' in results: + assert results['img_fields'] == ['img'], \ + 'Only single img_fields is allowed' + img = results['img'] + img = img.astype(np.float32) + # random brightness + if random.randint(2): + delta = random.uniform(-self.brightness_delta, + self.brightness_delta) + img += delta + + # mode == 0 --> do random contrast first + # mode == 1 --> do random contrast last + mode = random.randint(2) + if mode == 1: + if random.randint(2): + alpha = random.uniform(self.contrast_lower, + self.contrast_upper) + img *= alpha + + # convert color from BGR to HSV + img = mmcv.bgr2hsv(img) + + # random saturation + if random.randint(2): + img[..., 1] *= random.uniform(self.saturation_lower, + self.saturation_upper) + + # random hue + if random.randint(2): + img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta) + img[..., 0][img[..., 0] > 360] -= 360 + img[..., 0][img[..., 0] < 0] += 360 + + # convert color from HSV to BGR + img = mmcv.hsv2bgr(img) + + # random contrast + if mode == 0: + if random.randint(2): + alpha = random.uniform(self.contrast_lower, + self.contrast_upper) + img *= alpha + + # randomly swap channels + if random.randint(2): + img = img[..., random.permutation(3)] + + results['img'] = img + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(\nbrightness_delta={self.brightness_delta},\n' + repr_str += 'contrast_range=' + repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n' + repr_str += 'saturation_range=' + repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n' + repr_str += f'hue_delta={self.hue_delta})' + return repr_str + + +@PIPELINES.register_module() +class Expand: + """Random expand the image & bboxes. + + Randomly place the original image on a canvas of 'ratio' x original image + size filled with mean values. The ratio is in the range of ratio_range. + + Args: + mean (tuple): mean value of dataset. + to_rgb (bool): if need to convert the order of mean to align with RGB. + ratio_range (tuple): range of expand ratio. + prob (float): probability of applying this transformation + """ + + def __init__(self, + mean=(0, 0, 0), + to_rgb=True, + ratio_range=(1, 4), + seg_ignore_label=None, + prob=0.5): + self.to_rgb = to_rgb + self.ratio_range = ratio_range + if to_rgb: + self.mean = mean[::-1] + else: + self.mean = mean + self.min_ratio, self.max_ratio = ratio_range + self.seg_ignore_label = seg_ignore_label + self.prob = prob + + def __call__(self, results): + """Call function to expand images, bounding boxes. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with images, bounding boxes expanded + """ + + if random.uniform(0, 1) > self.prob: + return results + + if 'img_fields' in results: + assert results['img_fields'] == ['img'], \ + 'Only single img_fields is allowed' + img = results['img'] + + h, w, c = img.shape + ratio = random.uniform(self.min_ratio, self.max_ratio) + # speedup expand when meets large image + if np.all(self.mean == self.mean[0]): + expand_img = np.empty((int(h * ratio), int(w * ratio), c), + img.dtype) + expand_img.fill(self.mean[0]) + else: + expand_img = np.full((int(h * ratio), int(w * ratio), c), + self.mean, + dtype=img.dtype) + left = int(random.uniform(0, w * ratio - w)) + top = int(random.uniform(0, h * ratio - h)) + expand_img[top:top + h, left:left + w] = img + + results['img'] = expand_img + # expand bboxes + for key in results.get('bbox_fields', []): + results[key] = results[key] + np.tile( + (left, top), 2).astype(results[key].dtype) + + # expand masks + for key in results.get('mask_fields', []): + results[key] = results[key].expand( + int(h * ratio), int(w * ratio), top, left) + + # expand segs + for key in results.get('seg_fields', []): + gt_seg = results[key] + expand_gt_seg = np.full((int(h * ratio), int(w * ratio)), + self.seg_ignore_label, + dtype=gt_seg.dtype) + expand_gt_seg[top:top + h, left:left + w] = gt_seg + results[key] = expand_gt_seg + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, ' + repr_str += f'ratio_range={self.ratio_range}, ' + repr_str += f'seg_ignore_label={self.seg_ignore_label})' + return repr_str + + +@PIPELINES.register_module() +class MinIoURandomCrop: + """Random crop the image & bboxes, the cropped patches have minimum IoU + requirement with original image & bboxes, the IoU threshold is randomly + selected from min_ious. + + Args: + min_ious (tuple): minimum IoU threshold for all intersections with + bounding boxes + min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, + where a >= min_crop_size). + bbox_clip_border (bool, optional): Whether clip the objects outside + the border of the image. Defaults to True. + + Note: + The keys for bboxes, labels and masks should be paired. That is, \ + `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \ + `gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`. + """ + + def __init__(self, + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3, + bbox_clip_border=True): + # 1: return ori img + self.min_ious = min_ious + self.sample_mode = (1, *min_ious, 0) + self.min_crop_size = min_crop_size + self.bbox_clip_border = bbox_clip_border + self.bbox2label = { + 'gt_bboxes': 'gt_labels', + 'gt_bboxes_ignore': 'gt_labels_ignore' + } + self.bbox2mask = { + 'gt_bboxes': 'gt_masks', + 'gt_bboxes_ignore': 'gt_masks_ignore' + } + + def __call__(self, results): + """Call function to crop images and bounding boxes with minimum IoU + constraint. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with images and bounding boxes cropped, \ + 'img_shape' key is updated. + """ + + if 'img_fields' in results: + assert results['img_fields'] == ['img'], \ + 'Only single img_fields is allowed' + img = results['img'] + assert 'bbox_fields' in results + boxes = [results[key] for key in results['bbox_fields']] + boxes = np.concatenate(boxes, 0) + h, w, c = img.shape + while True: + mode = random.choice(self.sample_mode) + self.mode = mode + if mode == 1: + return results + + min_iou = mode + for i in range(50): + new_w = random.uniform(self.min_crop_size * w, w) + new_h = random.uniform(self.min_crop_size * h, h) + + # h / w in [0.5, 2] + if new_h / new_w < 0.5 or new_h / new_w > 2: + continue + + left = random.uniform(w - new_w) + top = random.uniform(h - new_h) + + patch = np.array( + (int(left), int(top), int(left + new_w), int(top + new_h))) + # Line or point crop is not allowed + if patch[2] == patch[0] or patch[3] == patch[1]: + continue + overlaps = bbox_overlaps( + patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1) + if len(overlaps) > 0 and overlaps.min() < min_iou: + continue + + # center of boxes should inside the crop img + # only adjust boxes and instance masks when the gt is not empty + if len(overlaps) > 0: + # adjust boxes + def is_center_of_bboxes_in_patch(boxes, patch): + center = (boxes[:, :2] + boxes[:, 2:]) / 2 + mask = ((center[:, 0] > patch[0]) * + (center[:, 1] > patch[1]) * + (center[:, 0] < patch[2]) * + (center[:, 1] < patch[3])) + return mask + + mask = is_center_of_bboxes_in_patch(boxes, patch) + if not mask.any(): + continue + for key in results.get('bbox_fields', []): + boxes = results[key].copy() + mask = is_center_of_bboxes_in_patch(boxes, patch) + boxes = boxes[mask] + if self.bbox_clip_border: + boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:]) + boxes[:, :2] = boxes[:, :2].clip(min=patch[:2]) + boxes -= np.tile(patch[:2], 2) + + results[key] = boxes + # labels + label_key = self.bbox2label.get(key) + if label_key in results: + results[label_key] = results[label_key][mask] + + # mask fields + mask_key = self.bbox2mask.get(key) + if mask_key in results: + results[mask_key] = results[mask_key][ + mask.nonzero()[0]].crop(patch) + # adjust the img no matter whether the gt is empty before crop + img = img[patch[1]:patch[3], patch[0]:patch[2]] + results['img'] = img + results['img_shape'] = img.shape + + # seg fields + for key in results.get('seg_fields', []): + results[key] = results[key][patch[1]:patch[3], + patch[0]:patch[2]] + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(min_ious={self.min_ious}, ' + repr_str += f'min_crop_size={self.min_crop_size}, ' + repr_str += f'bbox_clip_border={self.bbox_clip_border})' + return repr_str + + +@PIPELINES.register_module() +class Corrupt: + """Corruption augmentation. + + Corruption transforms implemented based on + `imagecorruptions `_. + + Args: + corruption (str): Corruption name. + severity (int, optional): The severity of corruption. Default: 1. + """ + + def __init__(self, corruption, severity=1): + self.corruption = corruption + self.severity = severity + + def __call__(self, results): + """Call function to corrupt image. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with images corrupted. + """ + + if corrupt is None: + raise RuntimeError('imagecorruptions is not installed') + if 'img_fields' in results: + assert results['img_fields'] == ['img'], \ + 'Only single img_fields is allowed' + results['img'] = corrupt( + results['img'].astype(np.uint8), + corruption_name=self.corruption, + severity=self.severity) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(corruption={self.corruption}, ' + repr_str += f'severity={self.severity})' + return repr_str + + +@PIPELINES.register_module() +class Albu: + """Albumentation augmentation. + + Adds custom transformations from Albumentations library. + Please, visit `https://albumentations.readthedocs.io` + to get more information. + + An example of ``transforms`` is as followed: + + .. code-block:: + + [ + dict( + type='ShiftScaleRotate', + shift_limit=0.0625, + scale_limit=0.0, + rotate_limit=0, + interpolation=1, + p=0.5), + dict( + type='RandomBrightnessContrast', + brightness_limit=[0.1, 0.3], + contrast_limit=[0.1, 0.3], + p=0.2), + dict(type='ChannelShuffle', p=0.1), + dict( + type='OneOf', + transforms=[ + dict(type='Blur', blur_limit=3, p=1.0), + dict(type='MedianBlur', blur_limit=3, p=1.0) + ], + p=0.1), + ] + + Args: + transforms (list[dict]): A list of albu transformations + bbox_params (dict): Bbox_params for albumentation `Compose` + keymap (dict): Contains {'input key':'albumentation-style key'} + skip_img_without_anno (bool): Whether to skip the image if no ann left + after aug + """ + + def __init__(self, + transforms, + bbox_params=None, + keymap=None, + update_pad_shape=False, + skip_img_without_anno=False): + if Compose is None: + raise RuntimeError('albumentations is not installed') + + # Args will be modified later, copying it will be safer + transforms = copy.deepcopy(transforms) + if bbox_params is not None: + bbox_params = copy.deepcopy(bbox_params) + if keymap is not None: + keymap = copy.deepcopy(keymap) + self.transforms = transforms + self.filter_lost_elements = False + self.update_pad_shape = update_pad_shape + self.skip_img_without_anno = skip_img_without_anno + + # A simple workaround to remove masks without boxes + if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params + and 'filter_lost_elements' in bbox_params): + self.filter_lost_elements = True + self.origin_label_fields = bbox_params['label_fields'] + bbox_params['label_fields'] = ['idx_mapper'] + del bbox_params['filter_lost_elements'] + + self.bbox_params = ( + self.albu_builder(bbox_params) if bbox_params else None) + self.aug = Compose([self.albu_builder(t) for t in self.transforms], + bbox_params=self.bbox_params) + + if not keymap: + self.keymap_to_albu = { + 'img': 'image', + 'gt_masks': 'masks', + 'gt_bboxes': 'bboxes' + } + else: + self.keymap_to_albu = keymap + self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()} + + def albu_builder(self, cfg): + """Import a module from albumentations. + + It inherits some of :func:`build_from_cfg` logic. + + Args: + cfg (dict): Config dict. It should at least contain the key "type". + + Returns: + obj: The constructed object. + """ + + assert isinstance(cfg, dict) and 'type' in cfg + args = cfg.copy() + + obj_type = args.pop('type') + if mmcv.is_str(obj_type): + if albumentations is None: + raise RuntimeError('albumentations is not installed') + obj_cls = getattr(albumentations, obj_type) + elif inspect.isclass(obj_type): + obj_cls = obj_type + else: + raise TypeError( + f'type must be a str or valid type, but got {type(obj_type)}') + + if 'transforms' in args: + args['transforms'] = [ + self.albu_builder(transform) + for transform in args['transforms'] + ] + + return obj_cls(**args) + + @staticmethod + def mapper(d, keymap): + """Dictionary mapper. Renames keys according to keymap provided. + + Args: + d (dict): old dict + keymap (dict): {'old_key':'new_key'} + Returns: + dict: new dict. + """ + + updated_dict = {} + for k, v in zip(d.keys(), d.values()): + new_k = keymap.get(k, k) + updated_dict[new_k] = d[k] + return updated_dict + + def __call__(self, results): + # dict to albumentations format + results = self.mapper(results, self.keymap_to_albu) + # TODO: add bbox_fields + if 'bboxes' in results: + # to list of boxes + if isinstance(results['bboxes'], np.ndarray): + results['bboxes'] = [x for x in results['bboxes']] + # add pseudo-field for filtration + if self.filter_lost_elements: + results['idx_mapper'] = np.arange(len(results['bboxes'])) + + # TODO: Support mask structure in albu + if 'masks' in results: + if isinstance(results['masks'], PolygonMasks): + raise NotImplementedError( + 'Albu only supports BitMap masks now') + ori_masks = results['masks'] + if albumentations.__version__ < '0.5': + results['masks'] = results['masks'].masks + else: + results['masks'] = [mask for mask in results['masks'].masks] + + results = self.aug(**results) + + if 'bboxes' in results: + if isinstance(results['bboxes'], list): + results['bboxes'] = np.array( + results['bboxes'], dtype=np.float32) + results['bboxes'] = results['bboxes'].reshape(-1, 4) + + # filter label_fields + if self.filter_lost_elements: + + for label in self.origin_label_fields: + results[label] = np.array( + [results[label][i] for i in results['idx_mapper']]) + if 'masks' in results: + results['masks'] = np.array( + [results['masks'][i] for i in results['idx_mapper']]) + results['masks'] = ori_masks.__class__( + results['masks'], results['image'].shape[0], + results['image'].shape[1]) + + if (not len(results['idx_mapper']) + and self.skip_img_without_anno): + return None + + if 'gt_labels' in results: + if isinstance(results['gt_labels'], list): + results['gt_labels'] = np.array(results['gt_labels']) + results['gt_labels'] = results['gt_labels'].astype(np.int64) + + # back to the original format + results = self.mapper(results, self.keymap_back) + + # update final shape + if self.update_pad_shape: + results['pad_shape'] = results['img'].shape + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' + return repr_str + + +@PIPELINES.register_module() +class RandomCenterCropPad: + """Random center crop and random around padding for CornerNet. + + This operation generates randomly cropped image from the original image and + pads it simultaneously. Different from :class:`RandomCrop`, the output + shape may not equal to ``crop_size`` strictly. We choose a random value + from ``ratios`` and the output shape could be larger or smaller than + ``crop_size``. The padding operation is also different from :class:`Pad`, + here we use around padding instead of right-bottom padding. + + The relation between output image (padding image) and original image: + + .. code:: text + + output image + + +----------------------------+ + | padded area | + +------|----------------------------|----------+ + | | cropped area | | + | | +---------------+ | | + | | | . center | | | original image + | | | range | | | + | | +---------------+ | | + +------|----------------------------|----------+ + | padded area | + +----------------------------+ + + There are 5 main areas in the figure: + + - output image: output image of this operation, also called padding + image in following instruction. + - original image: input image of this operation. + - padded area: non-intersect area of output image and original image. + - cropped area: the overlap of output image and original image. + - center range: a smaller area where random center chosen from. + center range is computed by ``border`` and original image's shape + to avoid our random center is too close to original image's border. + + Also this operation act differently in train and test mode, the summary + pipeline is listed below. + + Train pipeline: + + 1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image + will be ``random_ratio * crop_size``. + 2. Choose a ``random_center`` in center range. + 3. Generate padding image with center matches the ``random_center``. + 4. Initialize the padding image with pixel value equals to ``mean``. + 5. Copy the cropped area to padding image. + 6. Refine annotations. + + Test pipeline: + + 1. Compute output shape according to ``test_pad_mode``. + 2. Generate padding image with center matches the original image + center. + 3. Initialize the padding image with pixel value equals to ``mean``. + 4. Copy the ``cropped area`` to padding image. + + Args: + crop_size (tuple | None): expected size after crop, final size will + computed according to ratio. Requires (h, w) in train mode, and + None in test mode. + ratios (tuple): random select a ratio from tuple and crop image to + (crop_size[0] * ratio) * (crop_size[1] * ratio). + Only available in train mode. + border (int): max distance from center select area to image border. + Only available in train mode. + mean (sequence): Mean values of 3 channels. + std (sequence): Std values of 3 channels. + to_rgb (bool): Whether to convert the image from BGR to RGB. + test_mode (bool): whether involve random variables in transform. + In train mode, crop_size is fixed, center coords and ratio is + random selected from predefined lists. In test mode, crop_size + is image's original shape, center coords and ratio is fixed. + test_pad_mode (tuple): padding method and padding shape value, only + available in test mode. Default is using 'logical_or' with + 127 as padding shape value. + + - 'logical_or': final_shape = input_shape | padding_shape_value + - 'size_divisor': final_shape = int( + ceil(input_shape / padding_shape_value) * padding_shape_value) + test_pad_add_pix (int): Extra padding pixel in test mode. Default 0. + bbox_clip_border (bool, optional): Whether clip the objects outside + the border of the image. Defaults to True. + """ + + def __init__(self, + crop_size=None, + ratios=(0.9, 1.0, 1.1), + border=128, + mean=None, + std=None, + to_rgb=None, + test_mode=False, + test_pad_mode=('logical_or', 127), + test_pad_add_pix=0, + bbox_clip_border=True): + if test_mode: + assert crop_size is None, 'crop_size must be None in test mode' + assert ratios is None, 'ratios must be None in test mode' + assert border is None, 'border must be None in test mode' + assert isinstance(test_pad_mode, (list, tuple)) + assert test_pad_mode[0] in ['logical_or', 'size_divisor'] + else: + assert isinstance(crop_size, (list, tuple)) + assert crop_size[0] > 0 and crop_size[1] > 0, ( + 'crop_size must > 0 in train mode') + assert isinstance(ratios, (list, tuple)) + assert test_pad_mode is None, ( + 'test_pad_mode must be None in train mode') + + self.crop_size = crop_size + self.ratios = ratios + self.border = border + # We do not set default value to mean, std and to_rgb because these + # hyper-parameters are easy to forget but could affect the performance. + # Please use the same setting as Normalize for performance assurance. + assert mean is not None and std is not None and to_rgb is not None + self.to_rgb = to_rgb + self.input_mean = mean + self.input_std = std + if to_rgb: + self.mean = mean[::-1] + self.std = std[::-1] + else: + self.mean = mean + self.std = std + self.test_mode = test_mode + self.test_pad_mode = test_pad_mode + self.test_pad_add_pix = test_pad_add_pix + self.bbox_clip_border = bbox_clip_border + + def _get_border(self, border, size): + """Get final border for the target size. + + This function generates a ``final_border`` according to image's shape. + The area between ``final_border`` and ``size - final_border`` is the + ``center range``. We randomly choose center from the ``center range`` + to avoid our random center is too close to original image's border. + Also ``center range`` should be larger than 0. + + Args: + border (int): The initial border, default is 128. + size (int): The width or height of original image. + Returns: + int: The final border. + """ + k = 2 * border / size + i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k))) + return border // i + + def _filter_boxes(self, patch, boxes): + """Check whether the center of each box is in the patch. + + Args: + patch (list[int]): The cropped area, [left, top, right, bottom]. + boxes (numpy array, (N x 4)): Ground truth boxes. + + Returns: + mask (numpy array, (N,)): Each box is inside or outside the patch. + """ + center = (boxes[:, :2] + boxes[:, 2:]) / 2 + mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * ( + center[:, 0] < patch[2]) * ( + center[:, 1] < patch[3]) + return mask + + def _crop_image_and_paste(self, image, center, size): + """Crop image with a given center and size, then paste the cropped + image to a blank image with two centers align. + + This function is equivalent to generating a blank image with ``size`` + as its shape. Then cover it on the original image with two centers ( + the center of blank image and the random center of original image) + aligned. The overlap area is paste from the original image and the + outside area is filled with ``mean pixel``. + + Args: + image (np array, H x W x C): Original image. + center (list[int]): Target crop center coord. + size (list[int]): Target crop size. [target_h, target_w] + + Returns: + cropped_img (np array, target_h x target_w x C): Cropped image. + border (np array, 4): The distance of four border of + ``cropped_img`` to the original image area, [top, bottom, + left, right] + patch (list[int]): The cropped area, [left, top, right, bottom]. + """ + center_y, center_x = center + target_h, target_w = size + img_h, img_w, img_c = image.shape + + x0 = max(0, center_x - target_w // 2) + x1 = min(center_x + target_w // 2, img_w) + y0 = max(0, center_y - target_h // 2) + y1 = min(center_y + target_h // 2, img_h) + patch = np.array((int(x0), int(y0), int(x1), int(y1))) + + left, right = center_x - x0, x1 - center_x + top, bottom = center_y - y0, y1 - center_y + + cropped_center_y, cropped_center_x = target_h // 2, target_w // 2 + cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype) + for i in range(img_c): + cropped_img[:, :, i] += self.mean[i] + y_slice = slice(cropped_center_y - top, cropped_center_y + bottom) + x_slice = slice(cropped_center_x - left, cropped_center_x + right) + cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :] + + border = np.array([ + cropped_center_y - top, cropped_center_y + bottom, + cropped_center_x - left, cropped_center_x + right + ], + dtype=np.float32) + + return cropped_img, border, patch + + def _train_aug(self, results): + """Random crop and around padding the original image. + + Args: + results (dict): Image infomations in the augment pipeline. + + Returns: + results (dict): The updated dict. + """ + img = results['img'] + h, w, c = img.shape + boxes = results['gt_bboxes'] + while True: + scale = random.choice(self.ratios) + new_h = int(self.crop_size[0] * scale) + new_w = int(self.crop_size[1] * scale) + h_border = self._get_border(self.border, h) + w_border = self._get_border(self.border, w) + + for i in range(50): + center_x = random.randint(low=w_border, high=w - w_border) + center_y = random.randint(low=h_border, high=h - h_border) + + cropped_img, border, patch = self._crop_image_and_paste( + img, [center_y, center_x], [new_h, new_w]) + + mask = self._filter_boxes(patch, boxes) + # if image do not have valid bbox, any crop patch is valid. + if not mask.any() and len(boxes) > 0: + continue + + results['img'] = cropped_img + results['img_shape'] = cropped_img.shape + results['pad_shape'] = cropped_img.shape + + x0, y0, x1, y1 = patch + + left_w, top_h = center_x - x0, center_y - y0 + cropped_center_x, cropped_center_y = new_w // 2, new_h // 2 + + # crop bboxes accordingly and clip to the image boundary + for key in results.get('bbox_fields', []): + mask = self._filter_boxes(patch, results[key]) + bboxes = results[key][mask] + bboxes[:, 0:4:2] += cropped_center_x - left_w - x0 + bboxes[:, 1:4:2] += cropped_center_y - top_h - y0 + if self.bbox_clip_border: + bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w) + bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h) + keep = (bboxes[:, 2] > bboxes[:, 0]) & ( + bboxes[:, 3] > bboxes[:, 1]) + bboxes = bboxes[keep] + results[key] = bboxes + if key in ['gt_bboxes']: + if 'gt_labels' in results: + labels = results['gt_labels'][mask] + labels = labels[keep] + results['gt_labels'] = labels + if 'gt_masks' in results: + raise NotImplementedError( + 'RandomCenterCropPad only supports bbox.') + + # crop semantic seg + for key in results.get('seg_fields', []): + raise NotImplementedError( + 'RandomCenterCropPad only supports bbox.') + return results + + def _test_aug(self, results): + """Around padding the original image without cropping. + + The padding mode and value are from ``test_pad_mode``. + + Args: + results (dict): Image infomations in the augment pipeline. + + Returns: + results (dict): The updated dict. + """ + img = results['img'] + h, w, c = img.shape + results['img_shape'] = img.shape + if self.test_pad_mode[0] in ['logical_or']: + # self.test_pad_add_pix is only used for centernet + target_h = (h | self.test_pad_mode[1]) + self.test_pad_add_pix + target_w = (w | self.test_pad_mode[1]) + self.test_pad_add_pix + elif self.test_pad_mode[0] in ['size_divisor']: + divisor = self.test_pad_mode[1] + target_h = int(np.ceil(h / divisor)) * divisor + target_w = int(np.ceil(w / divisor)) * divisor + else: + raise NotImplementedError( + 'RandomCenterCropPad only support two testing pad mode:' + 'logical-or and size_divisor.') + + cropped_img, border, _ = self._crop_image_and_paste( + img, [h // 2, w // 2], [target_h, target_w]) + results['img'] = cropped_img + results['pad_shape'] = cropped_img.shape + results['border'] = border + return results + + def __call__(self, results): + img = results['img'] + assert img.dtype == np.float32, ( + 'RandomCenterCropPad needs the input image of dtype np.float32,' + ' please set "to_float32=True" in "LoadImageFromFile" pipeline') + h, w, c = img.shape + assert c == len(self.mean) + if self.test_mode: + return self._test_aug(results) + else: + return self._train_aug(results) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(crop_size={self.crop_size}, ' + repr_str += f'ratios={self.ratios}, ' + repr_str += f'border={self.border}, ' + repr_str += f'mean={self.input_mean}, ' + repr_str += f'std={self.input_std}, ' + repr_str += f'to_rgb={self.to_rgb}, ' + repr_str += f'test_mode={self.test_mode}, ' + repr_str += f'test_pad_mode={self.test_pad_mode}, ' + repr_str += f'bbox_clip_border={self.bbox_clip_border})' + return repr_str + + +@PIPELINES.register_module() +class CutOut: + """CutOut operation. + + Randomly drop some regions of image used in + `Cutout `_. + + Args: + n_holes (int | tuple[int, int]): Number of regions to be dropped. + If it is given as a list, number of holes will be randomly + selected from the closed interval [`n_holes[0]`, `n_holes[1]`]. + cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate + shape of dropped regions. It can be `tuple[int, int]` to use a + fixed cutout shape, or `list[tuple[int, int]]` to randomly choose + shape from the list. + cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The + candidate ratio of dropped regions. It can be `tuple[float, float]` + to use a fixed ratio or `list[tuple[float, float]]` to randomly + choose ratio from the list. Please note that `cutout_shape` + and `cutout_ratio` cannot be both given at the same time. + fill_in (tuple[float, float, float] | tuple[int, int, int]): The value + of pixel to fill in the dropped regions. Default: (0, 0, 0). + """ + + def __init__(self, + n_holes, + cutout_shape=None, + cutout_ratio=None, + fill_in=(0, 0, 0)): + + assert (cutout_shape is None) ^ (cutout_ratio is None), \ + 'Either cutout_shape or cutout_ratio should be specified.' + assert (isinstance(cutout_shape, (list, tuple)) + or isinstance(cutout_ratio, (list, tuple))) + if isinstance(n_holes, tuple): + assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1] + else: + n_holes = (n_holes, n_holes) + self.n_holes = n_holes + self.fill_in = fill_in + self.with_ratio = cutout_ratio is not None + self.candidates = cutout_ratio if self.with_ratio else cutout_shape + if not isinstance(self.candidates, list): + self.candidates = [self.candidates] + + def __call__(self, results): + """Call function to drop some regions of image.""" + h, w, c = results['img'].shape + n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1) + for _ in range(n_holes): + x1 = np.random.randint(0, w) + y1 = np.random.randint(0, h) + index = np.random.randint(0, len(self.candidates)) + if not self.with_ratio: + cutout_w, cutout_h = self.candidates[index] + else: + cutout_w = int(self.candidates[index][0] * w) + cutout_h = int(self.candidates[index][1] * h) + + x2 = np.clip(x1 + cutout_w, 0, w) + y2 = np.clip(y1 + cutout_h, 0, h) + results['img'][y1:y2, x1:x2, :] = self.fill_in + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(n_holes={self.n_holes}, ' + repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio + else f'cutout_shape={self.candidates}, ') + repr_str += f'fill_in={self.fill_in})' + return repr_str + + +@PIPELINES.register_module() +class Mosaic: + """Mosaic augmentation. + + Given 4 images, mosaic transform combines them into + one output image. The output image is composed of the parts from each sub- + image. + + .. code:: text + + mosaic transform + center_x + +------------------------------+ + | pad | pad | + | +-----------+ | + | | | | + | | image1 |--------+ | + | | | | | + | | | image2 | | + center_y |----+-------------+-----------| + | | cropped | | + |pad | image3 | image4 | + | | | | + +----|-------------+-----------+ + | | + +-------------+ + + The mosaic transform steps are as follows: + + 1. Choose the mosaic center as the intersections of 4 images + 2. Get the left top image according to the index, and randomly + sample another 3 images from the custom dataset. + 3. Sub image will be cropped if image is larger than mosaic patch + + Args: + img_scale (Sequence[int]): Image size after mosaic pipeline of single + image. The shape order should be (height, width). + Default to (640, 640). + center_ratio_range (Sequence[float]): Center ratio range of mosaic + output. Default to (0.5, 1.5). + min_bbox_size (int | float): The minimum pixel for filtering + invalid bboxes after the mosaic pipeline. Default to 0. + bbox_clip_border (bool, optional): Whether to clip the objects outside + the border of the image. In some dataset like MOT17, the gt bboxes + are allowed to cross the border of images. Therefore, we don't + need to clip the gt bboxes in these cases. Defaults to True. + skip_filter (bool): Whether to skip filtering rules. If it + is True, the filter rule will not be applied, and the + `min_bbox_size` is invalid. Default to True. + pad_val (int): Pad value. Default to 114. + prob (float): Probability of applying this transformation. + Default to 1.0. + """ + + def __init__(self, + img_scale=(640, 640), + center_ratio_range=(0.5, 1.5), + min_bbox_size=0, + bbox_clip_border=True, + skip_filter=True, + pad_val=114, + prob=1.0): + assert isinstance(img_scale, tuple) + assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\ + f'got {prob}.' + + log_img_scale(img_scale, skip_square=True) + self.img_scale = img_scale + self.center_ratio_range = center_ratio_range + self.min_bbox_size = min_bbox_size + self.bbox_clip_border = bbox_clip_border + self.skip_filter = skip_filter + self.pad_val = pad_val + self.prob = prob + + def __call__(self, results): + """Call function to make a mosaic of image. + + Args: + results (dict): Result dict. + + Returns: + dict: Result dict with mosaic transformed. + """ + + if random.uniform(0, 1) > self.prob: + return results + + results = self._mosaic_transform(results) + return results + + def get_indexes(self, dataset): + """Call function to collect indexes. + + Args: + dataset (:obj:`MultiImageMixDataset`): The dataset. + + Returns: + list: indexes. + """ + + indexes = [random.randint(0, len(dataset)) for _ in range(3)] + return indexes + + def _mosaic_transform(self, results): + """Mosaic transform function. + + Args: + results (dict): Result dict. + + Returns: + dict: Updated result dict. + """ + + assert 'mix_results' in results + mosaic_labels = [] + mosaic_bboxes = [] + if len(results['img'].shape) == 3: + mosaic_img = np.full( + (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2), 3), + self.pad_val, + dtype=results['img'].dtype) + else: + mosaic_img = np.full( + (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)), + self.pad_val, + dtype=results['img'].dtype) + + # mosaic center x, y + center_x = int( + random.uniform(*self.center_ratio_range) * self.img_scale[1]) + center_y = int( + random.uniform(*self.center_ratio_range) * self.img_scale[0]) + center_position = (center_x, center_y) + + loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right') + for i, loc in enumerate(loc_strs): + if loc == 'top_left': + results_patch = copy.deepcopy(results) + else: + results_patch = copy.deepcopy(results['mix_results'][i - 1]) + + img_i = results_patch['img'] + h_i, w_i = img_i.shape[:2] + # keep_ratio resize + scale_ratio_i = min(self.img_scale[0] / h_i, + self.img_scale[1] / w_i) + img_i = mmcv.imresize( + img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i))) + + # compute the combine parameters + paste_coord, crop_coord = self._mosaic_combine( + loc, center_position, img_i.shape[:2][::-1]) + x1_p, y1_p, x2_p, y2_p = paste_coord + x1_c, y1_c, x2_c, y2_c = crop_coord + + # crop and paste image + mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c] + + # adjust coordinate + gt_bboxes_i = results_patch['gt_bboxes'] + gt_labels_i = results_patch['gt_labels'] + + if gt_bboxes_i.shape[0] > 0: + padw = x1_p - x1_c + padh = y1_p - y1_c + gt_bboxes_i[:, 0::2] = \ + scale_ratio_i * gt_bboxes_i[:, 0::2] + padw + gt_bboxes_i[:, 1::2] = \ + scale_ratio_i * gt_bboxes_i[:, 1::2] + padh + + mosaic_bboxes.append(gt_bboxes_i) + mosaic_labels.append(gt_labels_i) + + if len(mosaic_labels) > 0: + mosaic_bboxes = np.concatenate(mosaic_bboxes, 0) + mosaic_labels = np.concatenate(mosaic_labels, 0) + + if self.bbox_clip_border: + mosaic_bboxes[:, 0::2] = np.clip(mosaic_bboxes[:, 0::2], 0, + 2 * self.img_scale[1]) + mosaic_bboxes[:, 1::2] = np.clip(mosaic_bboxes[:, 1::2], 0, + 2 * self.img_scale[0]) + + if not self.skip_filter: + mosaic_bboxes, mosaic_labels = \ + self._filter_box_candidates(mosaic_bboxes, mosaic_labels) + + # remove outside bboxes + inside_inds = find_inside_bboxes(mosaic_bboxes, 2 * self.img_scale[0], + 2 * self.img_scale[1]) + mosaic_bboxes = mosaic_bboxes[inside_inds] + mosaic_labels = mosaic_labels[inside_inds] + + results['img'] = mosaic_img + results['img_shape'] = mosaic_img.shape + results['gt_bboxes'] = mosaic_bboxes + results['gt_labels'] = mosaic_labels + + return results + + def _mosaic_combine(self, loc, center_position_xy, img_shape_wh): + """Calculate global coordinate of mosaic image and local coordinate of + cropped sub-image. + + Args: + loc (str): Index for the sub-image, loc in ('top_left', + 'top_right', 'bottom_left', 'bottom_right'). + center_position_xy (Sequence[float]): Mixing center for 4 images, + (x, y). + img_shape_wh (Sequence[int]): Width and height of sub-image + + Returns: + tuple[tuple[float]]: Corresponding coordinate of pasting and + cropping + - paste_coord (tuple): paste corner coordinate in mosaic image. + - crop_coord (tuple): crop corner coordinate in mosaic image. + """ + assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') + if loc == 'top_left': + # index0 to top left part of image + x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ + max(center_position_xy[1] - img_shape_wh[1], 0), \ + center_position_xy[0], \ + center_position_xy[1] + crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - ( + y2 - y1), img_shape_wh[0], img_shape_wh[1] + + elif loc == 'top_right': + # index1 to top right part of image + x1, y1, x2, y2 = center_position_xy[0], \ + max(center_position_xy[1] - img_shape_wh[1], 0), \ + min(center_position_xy[0] + img_shape_wh[0], + self.img_scale[1] * 2), \ + center_position_xy[1] + crop_coord = 0, img_shape_wh[1] - (y2 - y1), min( + img_shape_wh[0], x2 - x1), img_shape_wh[1] + + elif loc == 'bottom_left': + # index2 to bottom left part of image + x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ + center_position_xy[1], \ + center_position_xy[0], \ + min(self.img_scale[0] * 2, center_position_xy[1] + + img_shape_wh[1]) + crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min( + y2 - y1, img_shape_wh[1]) + + else: + # index3 to bottom right part of image + x1, y1, x2, y2 = center_position_xy[0], \ + center_position_xy[1], \ + min(center_position_xy[0] + img_shape_wh[0], + self.img_scale[1] * 2), \ + min(self.img_scale[0] * 2, center_position_xy[1] + + img_shape_wh[1]) + crop_coord = 0, 0, min(img_shape_wh[0], + x2 - x1), min(y2 - y1, img_shape_wh[1]) + + paste_coord = x1, y1, x2, y2 + return paste_coord, crop_coord + + def _filter_box_candidates(self, bboxes, labels): + """Filter out bboxes too small after Mosaic.""" + bbox_w = bboxes[:, 2] - bboxes[:, 0] + bbox_h = bboxes[:, 3] - bboxes[:, 1] + valid_inds = (bbox_w > self.min_bbox_size) & \ + (bbox_h > self.min_bbox_size) + valid_inds = np.nonzero(valid_inds)[0] + return bboxes[valid_inds], labels[valid_inds] + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'img_scale={self.img_scale}, ' + repr_str += f'center_ratio_range={self.center_ratio_range}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'min_bbox_size={self.min_bbox_size}, ' + repr_str += f'skip_filter={self.skip_filter})' + return repr_str + + +@PIPELINES.register_module() +class MixUp: + """MixUp data augmentation. + + .. code:: text + + mixup transform + +------------------------------+ + | mixup image | | + | +--------|--------+ | + | | | | | + |---------------+ | | + | | | | + | | image | | + | | | | + | | | | + | |-----------------+ | + | pad | + +------------------------------+ + + The mixup transform steps are as follows: + + 1. Another random image is picked by dataset and embedded in + the top left patch(after padding and resizing) + 2. The target of mixup transform is the weighted average of mixup + image and origin image. + + Args: + img_scale (Sequence[int]): Image output size after mixup pipeline. + The shape order should be (height, width). Default: (640, 640). + ratio_range (Sequence[float]): Scale ratio of mixup image. + Default: (0.5, 1.5). + flip_ratio (float): Horizontal flip ratio of mixup image. + Default: 0.5. + pad_val (int): Pad value. Default: 114. + max_iters (int): The maximum number of iterations. If the number of + iterations is greater than `max_iters`, but gt_bbox is still + empty, then the iteration is terminated. Default: 15. + min_bbox_size (float): Width and height threshold to filter bboxes. + If the height or width of a box is smaller than this value, it + will be removed. Default: 5. + min_area_ratio (float): Threshold of area ratio between + original bboxes and wrapped bboxes. If smaller than this value, + the box will be removed. Default: 0.2. + max_aspect_ratio (float): Aspect ratio of width and height + threshold to filter bboxes. If max(h/w, w/h) larger than this + value, the box will be removed. Default: 20. + bbox_clip_border (bool, optional): Whether to clip the objects outside + the border of the image. In some dataset like MOT17, the gt bboxes + are allowed to cross the border of images. Therefore, we don't + need to clip the gt bboxes in these cases. Defaults to True. + skip_filter (bool): Whether to skip filtering rules. If it + is True, the filter rule will not be applied, and the + `min_bbox_size` and `min_area_ratio` and `max_aspect_ratio` + is invalid. Default to True. + """ + + def __init__(self, + img_scale=(640, 640), + ratio_range=(0.5, 1.5), + flip_ratio=0.5, + pad_val=114, + max_iters=15, + min_bbox_size=5, + min_area_ratio=0.2, + max_aspect_ratio=20, + bbox_clip_border=True, + skip_filter=True): + assert isinstance(img_scale, tuple) + log_img_scale(img_scale, skip_square=True) + self.dynamic_scale = img_scale + self.ratio_range = ratio_range + self.flip_ratio = flip_ratio + self.pad_val = pad_val + self.max_iters = max_iters + self.min_bbox_size = min_bbox_size + self.min_area_ratio = min_area_ratio + self.max_aspect_ratio = max_aspect_ratio + self.bbox_clip_border = bbox_clip_border + self.skip_filter = skip_filter + + def __call__(self, results): + """Call function to make a mixup of image. + + Args: + results (dict): Result dict. + + Returns: + dict: Result dict with mixup transformed. + """ + + results = self._mixup_transform(results) + return results + + def get_indexes(self, dataset): + """Call function to collect indexes. + + Args: + dataset (:obj:`MultiImageMixDataset`): The dataset. + + Returns: + list: indexes. + """ + + for i in range(self.max_iters): + index = random.randint(0, len(dataset)) + gt_bboxes_i = dataset.get_ann_info(index)['bboxes'] + if len(gt_bboxes_i) != 0: + break + + return index + + def _mixup_transform(self, results): + """MixUp transform function. + + Args: + results (dict): Result dict. + + Returns: + dict: Updated result dict. + """ + + assert 'mix_results' in results + assert len( + results['mix_results']) == 1, 'MixUp only support 2 images now !' + + if results['mix_results'][0]['gt_bboxes'].shape[0] == 0: + # empty bbox + return results + + retrieve_results = results['mix_results'][0] + retrieve_img = retrieve_results['img'] + + jit_factor = random.uniform(*self.ratio_range) + is_filp = random.uniform(0, 1) > self.flip_ratio + + if len(retrieve_img.shape) == 3: + out_img = np.ones( + (self.dynamic_scale[0], self.dynamic_scale[1], 3), + dtype=retrieve_img.dtype) * self.pad_val + else: + out_img = np.ones( + self.dynamic_scale, dtype=retrieve_img.dtype) * self.pad_val + + # 1. keep_ratio resize + scale_ratio = min(self.dynamic_scale[0] / retrieve_img.shape[0], + self.dynamic_scale[1] / retrieve_img.shape[1]) + retrieve_img = mmcv.imresize( + retrieve_img, (int(retrieve_img.shape[1] * scale_ratio), + int(retrieve_img.shape[0] * scale_ratio))) + + # 2. paste + out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img + + # 3. scale jit + scale_ratio *= jit_factor + out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor), + int(out_img.shape[0] * jit_factor))) + + # 4. flip + if is_filp: + out_img = out_img[:, ::-1, :] + + # 5. random crop + ori_img = results['img'] + origin_h, origin_w = out_img.shape[:2] + target_h, target_w = ori_img.shape[:2] + padded_img = np.zeros( + (max(origin_h, target_h), max(origin_w, + target_w), 3)).astype(np.uint8) + padded_img[:origin_h, :origin_w] = out_img + + x_offset, y_offset = 0, 0 + if padded_img.shape[0] > target_h: + y_offset = random.randint(0, padded_img.shape[0] - target_h) + if padded_img.shape[1] > target_w: + x_offset = random.randint(0, padded_img.shape[1] - target_w) + padded_cropped_img = padded_img[y_offset:y_offset + target_h, + x_offset:x_offset + target_w] + + # 6. adjust bbox + retrieve_gt_bboxes = retrieve_results['gt_bboxes'] + retrieve_gt_bboxes[:, 0::2] = retrieve_gt_bboxes[:, 0::2] * scale_ratio + retrieve_gt_bboxes[:, 1::2] = retrieve_gt_bboxes[:, 1::2] * scale_ratio + if self.bbox_clip_border: + retrieve_gt_bboxes[:, 0::2] = np.clip(retrieve_gt_bboxes[:, 0::2], + 0, origin_w) + retrieve_gt_bboxes[:, 1::2] = np.clip(retrieve_gt_bboxes[:, 1::2], + 0, origin_h) + + if is_filp: + retrieve_gt_bboxes[:, 0::2] = ( + origin_w - retrieve_gt_bboxes[:, 0::2][:, ::-1]) + + # 7. filter + cp_retrieve_gt_bboxes = retrieve_gt_bboxes.copy() + cp_retrieve_gt_bboxes[:, 0::2] = \ + cp_retrieve_gt_bboxes[:, 0::2] - x_offset + cp_retrieve_gt_bboxes[:, 1::2] = \ + cp_retrieve_gt_bboxes[:, 1::2] - y_offset + if self.bbox_clip_border: + cp_retrieve_gt_bboxes[:, 0::2] = np.clip( + cp_retrieve_gt_bboxes[:, 0::2], 0, target_w) + cp_retrieve_gt_bboxes[:, 1::2] = np.clip( + cp_retrieve_gt_bboxes[:, 1::2], 0, target_h) + + # 8. mix up + ori_img = ori_img.astype(np.float32) + mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32) + + retrieve_gt_labels = retrieve_results['gt_labels'] + if not self.skip_filter: + keep_list = self._filter_box_candidates(retrieve_gt_bboxes.T, + cp_retrieve_gt_bboxes.T) + + retrieve_gt_labels = retrieve_gt_labels[keep_list] + cp_retrieve_gt_bboxes = cp_retrieve_gt_bboxes[keep_list] + + mixup_gt_bboxes = np.concatenate( + (results['gt_bboxes'], cp_retrieve_gt_bboxes), axis=0) + mixup_gt_labels = np.concatenate( + (results['gt_labels'], retrieve_gt_labels), axis=0) + + # remove outside bbox + inside_inds = find_inside_bboxes(mixup_gt_bboxes, target_h, target_w) + mixup_gt_bboxes = mixup_gt_bboxes[inside_inds] + mixup_gt_labels = mixup_gt_labels[inside_inds] + + results['img'] = mixup_img.astype(np.uint8) + results['img_shape'] = mixup_img.shape + results['gt_bboxes'] = mixup_gt_bboxes + results['gt_labels'] = mixup_gt_labels + + return results + + def _filter_box_candidates(self, bbox1, bbox2): + """Compute candidate boxes which include following 5 things: + + bbox1 before augment, bbox2 after augment, min_bbox_size (pixels), + min_area_ratio, max_aspect_ratio. + """ + + w1, h1 = bbox1[2] - bbox1[0], bbox1[3] - bbox1[1] + w2, h2 = bbox2[2] - bbox2[0], bbox2[3] - bbox2[1] + ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) + return ((w2 > self.min_bbox_size) + & (h2 > self.min_bbox_size) + & (w2 * h2 / (w1 * h1 + 1e-16) > self.min_area_ratio) + & (ar < self.max_aspect_ratio)) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'dynamic_scale={self.dynamic_scale}, ' + repr_str += f'ratio_range={self.ratio_range}, ' + repr_str += f'flip_ratio={self.flip_ratio}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'max_iters={self.max_iters}, ' + repr_str += f'min_bbox_size={self.min_bbox_size}, ' + repr_str += f'min_area_ratio={self.min_area_ratio}, ' + repr_str += f'max_aspect_ratio={self.max_aspect_ratio}, ' + repr_str += f'skip_filter={self.skip_filter})' + return repr_str + + +@PIPELINES.register_module() +class RandomAffine: + """Random affine transform data augmentation. + + This operation randomly generates affine transform matrix which including + rotation, translation, shear and scaling transforms. + + Args: + max_rotate_degree (float): Maximum degrees of rotation transform. + Default: 10. + max_translate_ratio (float): Maximum ratio of translation. + Default: 0.1. + scaling_ratio_range (tuple[float]): Min and max ratio of + scaling transform. Default: (0.5, 1.5). + max_shear_degree (float): Maximum degrees of shear + transform. Default: 2. + border (tuple[int]): Distance from height and width sides of input + image to adjust output shape. Only used in mosaic dataset. + Default: (0, 0). + border_val (tuple[int]): Border padding values of 3 channels. + Default: (114, 114, 114). + min_bbox_size (float): Width and height threshold to filter bboxes. + If the height or width of a box is smaller than this value, it + will be removed. Default: 2. + min_area_ratio (float): Threshold of area ratio between + original bboxes and wrapped bboxes. If smaller than this value, + the box will be removed. Default: 0.2. + max_aspect_ratio (float): Aspect ratio of width and height + threshold to filter bboxes. If max(h/w, w/h) larger than this + value, the box will be removed. + bbox_clip_border (bool, optional): Whether to clip the objects outside + the border of the image. In some dataset like MOT17, the gt bboxes + are allowed to cross the border of images. Therefore, we don't + need to clip the gt bboxes in these cases. Defaults to True. + skip_filter (bool): Whether to skip filtering rules. If it + is True, the filter rule will not be applied, and the + `min_bbox_size` and `min_area_ratio` and `max_aspect_ratio` + is invalid. Default to True. + """ + + def __init__(self, + max_rotate_degree=10.0, + max_translate_ratio=0.1, + scaling_ratio_range=(0.5, 1.5), + max_shear_degree=2.0, + border=(0, 0), + border_val=(114, 114, 114), + min_bbox_size=2, + min_area_ratio=0.2, + max_aspect_ratio=20, + bbox_clip_border=True, + skip_filter=True): + assert 0 <= max_translate_ratio <= 1 + assert scaling_ratio_range[0] <= scaling_ratio_range[1] + assert scaling_ratio_range[0] > 0 + self.max_rotate_degree = max_rotate_degree + self.max_translate_ratio = max_translate_ratio + self.scaling_ratio_range = scaling_ratio_range + self.max_shear_degree = max_shear_degree + self.border = border + self.border_val = border_val + self.min_bbox_size = min_bbox_size + self.min_area_ratio = min_area_ratio + self.max_aspect_ratio = max_aspect_ratio + self.bbox_clip_border = bbox_clip_border + self.skip_filter = skip_filter + + def __call__(self, results): + img = results['img'] + height = img.shape[0] + self.border[0] * 2 + width = img.shape[1] + self.border[1] * 2 + + # Rotation + rotation_degree = random.uniform(-self.max_rotate_degree, + self.max_rotate_degree) + rotation_matrix = self._get_rotation_matrix(rotation_degree) + + # Scaling + scaling_ratio = random.uniform(self.scaling_ratio_range[0], + self.scaling_ratio_range[1]) + scaling_matrix = self._get_scaling_matrix(scaling_ratio) + + # Shear + x_degree = random.uniform(-self.max_shear_degree, + self.max_shear_degree) + y_degree = random.uniform(-self.max_shear_degree, + self.max_shear_degree) + shear_matrix = self._get_shear_matrix(x_degree, y_degree) + + # Translation + trans_x = random.uniform(-self.max_translate_ratio, + self.max_translate_ratio) * width + trans_y = random.uniform(-self.max_translate_ratio, + self.max_translate_ratio) * height + translate_matrix = self._get_translation_matrix(trans_x, trans_y) + + warp_matrix = ( + translate_matrix @ shear_matrix @ rotation_matrix @ scaling_matrix) + + img = cv2.warpPerspective( + img, + warp_matrix, + dsize=(width, height), + borderValue=self.border_val) + results['img'] = img + results['img_shape'] = img.shape + + for key in results.get('bbox_fields', []): + bboxes = results[key] + num_bboxes = len(bboxes) + if num_bboxes: + # homogeneous coordinates + xs = bboxes[:, [0, 0, 2, 2]].reshape(num_bboxes * 4) + ys = bboxes[:, [1, 3, 3, 1]].reshape(num_bboxes * 4) + ones = np.ones_like(xs) + points = np.vstack([xs, ys, ones]) + + warp_points = warp_matrix @ points + warp_points = warp_points[:2] / warp_points[2] + xs = warp_points[0].reshape(num_bboxes, 4) + ys = warp_points[1].reshape(num_bboxes, 4) + + warp_bboxes = np.vstack( + (xs.min(1), ys.min(1), xs.max(1), ys.max(1))).T + + if self.bbox_clip_border: + warp_bboxes[:, [0, 2]] = \ + warp_bboxes[:, [0, 2]].clip(0, width) + warp_bboxes[:, [1, 3]] = \ + warp_bboxes[:, [1, 3]].clip(0, height) + + # remove outside bbox + valid_index = find_inside_bboxes(warp_bboxes, height, width) + if not self.skip_filter: + # filter bboxes + filter_index = self.filter_gt_bboxes( + bboxes * scaling_ratio, warp_bboxes) + valid_index = valid_index & filter_index + + results[key] = warp_bboxes[valid_index] + if key in ['gt_bboxes']: + if 'gt_labels' in results: + results['gt_labels'] = results['gt_labels'][ + valid_index] + + if 'gt_masks' in results: + raise NotImplementedError( + 'RandomAffine only supports bbox.') + return results + + def filter_gt_bboxes(self, origin_bboxes, wrapped_bboxes): + origin_w = origin_bboxes[:, 2] - origin_bboxes[:, 0] + origin_h = origin_bboxes[:, 3] - origin_bboxes[:, 1] + wrapped_w = wrapped_bboxes[:, 2] - wrapped_bboxes[:, 0] + wrapped_h = wrapped_bboxes[:, 3] - wrapped_bboxes[:, 1] + aspect_ratio = np.maximum(wrapped_w / (wrapped_h + 1e-16), + wrapped_h / (wrapped_w + 1e-16)) + + wh_valid_idx = (wrapped_w > self.min_bbox_size) & \ + (wrapped_h > self.min_bbox_size) + area_valid_idx = wrapped_w * wrapped_h / (origin_w * origin_h + + 1e-16) > self.min_area_ratio + aspect_ratio_valid_idx = aspect_ratio < self.max_aspect_ratio + return wh_valid_idx & area_valid_idx & aspect_ratio_valid_idx + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(max_rotate_degree={self.max_rotate_degree}, ' + repr_str += f'max_translate_ratio={self.max_translate_ratio}, ' + repr_str += f'scaling_ratio={self.scaling_ratio_range}, ' + repr_str += f'max_shear_degree={self.max_shear_degree}, ' + repr_str += f'border={self.border}, ' + repr_str += f'border_val={self.border_val}, ' + repr_str += f'min_bbox_size={self.min_bbox_size}, ' + repr_str += f'min_area_ratio={self.min_area_ratio}, ' + repr_str += f'max_aspect_ratio={self.max_aspect_ratio}, ' + repr_str += f'skip_filter={self.skip_filter})' + return repr_str + + @staticmethod + def _get_rotation_matrix(rotate_degrees): + radian = math.radians(rotate_degrees) + rotation_matrix = np.array( + [[np.cos(radian), -np.sin(radian), 0.], + [np.sin(radian), np.cos(radian), 0.], [0., 0., 1.]], + dtype=np.float32) + return rotation_matrix + + @staticmethod + def _get_scaling_matrix(scale_ratio): + scaling_matrix = np.array( + [[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]], + dtype=np.float32) + return scaling_matrix + + @staticmethod + def _get_share_matrix(scale_ratio): + scaling_matrix = np.array( + [[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]], + dtype=np.float32) + return scaling_matrix + + @staticmethod + def _get_shear_matrix(x_shear_degrees, y_shear_degrees): + x_radian = math.radians(x_shear_degrees) + y_radian = math.radians(y_shear_degrees) + shear_matrix = np.array([[1, np.tan(x_radian), 0.], + [np.tan(y_radian), 1, 0.], [0., 0., 1.]], + dtype=np.float32) + return shear_matrix + + @staticmethod + def _get_translation_matrix(x, y): + translation_matrix = np.array([[1, 0., x], [0., 1, y], [0., 0., 1.]], + dtype=np.float32) + return translation_matrix + + +@PIPELINES.register_module() +class YOLOXHSVRandomAug: + """Apply HSV augmentation to image sequentially. It is referenced from + https://github.com/Megvii- + BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21. + + Args: + hue_delta (int): delta of hue. Default: 5. + saturation_delta (int): delta of saturation. Default: 30. + value_delta (int): delat of value. Default: 30. + """ + + def __init__(self, hue_delta=5, saturation_delta=30, value_delta=30): + self.hue_delta = hue_delta + self.saturation_delta = saturation_delta + self.value_delta = value_delta + + def __call__(self, results): + img = results['img'] + hsv_gains = np.random.uniform(-1, 1, 3) * [ + self.hue_delta, self.saturation_delta, self.value_delta + ] + # random selection of h, s, v + hsv_gains *= np.random.randint(0, 2, 3) + # prevent overflow + hsv_gains = hsv_gains.astype(np.int16) + img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16) + + img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180 + img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255) + img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255) + cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img) + + results['img'] = img + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(hue_delta={self.hue_delta}, ' + repr_str += f'saturation_delta={self.saturation_delta}, ' + repr_str += f'value_delta={self.value_delta})' + return repr_str + + +@PIPELINES.register_module() +class CopyPaste: + """Simple Copy-Paste is a Strong Data Augmentation Method for Instance + Segmentation The simple copy-paste transform steps are as follows: + + 1. The destination image is already resized with aspect ratio kept, + cropped and padded. + 2. Randomly select a source image, which is also already resized + with aspect ratio kept, cropped and padded in a similar way + as the destination image. + 3. Randomly select some objects from the source image. + 4. Paste these source objects to the destination image directly, + due to the source and destination image have the same size. + 5. Update object masks of the destination image, for some origin objects + may be occluded. + 6. Generate bboxes from the updated destination masks and + filter some objects which are totally occluded, and adjust bboxes + which are partly occluded. + 7. Append selected source bboxes, masks, and labels. + + Args: + max_num_pasted (int): The maximum number of pasted objects. + Default: 100. + bbox_occluded_thr (int): The threshold of occluded bbox. + Default: 10. + mask_occluded_thr (int): The threshold of occluded mask. + Default: 300. + selected (bool): Whether select objects or not. If select is False, + all objects of the source image will be pasted to the + destination image. + Default: True. + """ + + def __init__( + self, + max_num_pasted=100, + bbox_occluded_thr=10, + mask_occluded_thr=300, + selected=True, + ): + self.max_num_pasted = max_num_pasted + self.bbox_occluded_thr = bbox_occluded_thr + self.mask_occluded_thr = mask_occluded_thr + self.selected = selected + + def get_indexes(self, dataset): + """Call function to collect indexes.s. + + Args: + dataset (:obj:`MultiImageMixDataset`): The dataset. + Returns: + list: Indexes. + """ + return random.randint(0, len(dataset)) + + def __call__(self, results): + """Call function to make a copy-paste of image. + + Args: + results (dict): Result dict. + Returns: + dict: Result dict with copy-paste transformed. + """ + + assert 'mix_results' in results + num_images = len(results['mix_results']) + assert num_images == 1, \ + f'CopyPaste only supports processing 2 images, got {num_images}' + if self.selected: + selected_results = self._select_object(results['mix_results'][0]) + else: + selected_results = results['mix_results'][0] + return self._copy_paste(results, selected_results) + + def _select_object(self, results): + """Select some objects from the source results.""" + bboxes = results['gt_bboxes'] + labels = results['gt_labels'] + masks = results['gt_masks'] + max_num_pasted = min(bboxes.shape[0] + 1, self.max_num_pasted) + num_pasted = np.random.randint(0, max_num_pasted) + selected_inds = np.random.choice( + bboxes.shape[0], size=num_pasted, replace=False) + + selected_bboxes = bboxes[selected_inds] + selected_labels = labels[selected_inds] + selected_masks = masks[selected_inds] + + results['gt_bboxes'] = selected_bboxes + results['gt_labels'] = selected_labels + results['gt_masks'] = selected_masks + return results + + def _copy_paste(self, dst_results, src_results): + """CopyPaste transform function. + + Args: + dst_results (dict): Result dict of the destination image. + src_results (dict): Result dict of the source image. + Returns: + dict: Updated result dict. + """ + dst_img = dst_results['img'] + dst_bboxes = dst_results['gt_bboxes'] + dst_labels = dst_results['gt_labels'] + dst_masks = dst_results['gt_masks'] + + src_img = src_results['img'] + src_bboxes = src_results['gt_bboxes'] + src_labels = src_results['gt_labels'] + src_masks = src_results['gt_masks'] + + if len(src_bboxes) == 0: + return dst_results + + # update masks and generate bboxes from updated masks + composed_mask = np.where(np.any(src_masks.masks, axis=0), 1, 0) + updated_dst_masks = self.get_updated_masks(dst_masks, composed_mask) + updated_dst_bboxes = updated_dst_masks.get_bboxes() + assert len(updated_dst_bboxes) == len(updated_dst_masks) + + # filter totally occluded objects + bboxes_inds = np.all( + np.abs( + (updated_dst_bboxes - dst_bboxes)) <= self.bbox_occluded_thr, + axis=-1) + masks_inds = updated_dst_masks.masks.sum( + axis=(1, 2)) > self.mask_occluded_thr + valid_inds = bboxes_inds | masks_inds + + # Paste source objects to destination image directly + img = dst_img * (1 - composed_mask[..., np.newaxis] + ) + src_img * composed_mask[..., np.newaxis] + bboxes = np.concatenate([updated_dst_bboxes[valid_inds], src_bboxes]) + labels = np.concatenate([dst_labels[valid_inds], src_labels]) + masks = np.concatenate( + [updated_dst_masks.masks[valid_inds], src_masks.masks]) + + dst_results['img'] = img + dst_results['gt_bboxes'] = bboxes + dst_results['gt_labels'] = labels + dst_results['gt_masks'] = BitmapMasks(masks, masks.shape[1], + masks.shape[2]) + + return dst_results + + def get_updated_masks(self, masks, composed_mask): + assert masks.masks.shape[-2:] == composed_mask.shape[-2:], \ + 'Cannot compare two arrays of different size' + masks.masks = np.where(composed_mask, 0, masks.masks) + return masks + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'max_num_pasted={self.max_num_pasted}, ' + repr_str += f'bbox_occluded_thr={self.bbox_occluded_thr}, ' + repr_str += f'mask_occluded_thr={self.mask_occluded_thr}, ' + repr_str += f'selected={self.selected}, ' + return repr_str diff --git a/downstream/mmdetection/mmdet/datasets/samplers/__init__.py b/downstream/mmdetection/mmdet/datasets/samplers/__init__.py new file mode 100644 index 0000000..a4c7ea1 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/samplers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .class_aware_sampler import ClassAwareSampler +from .distributed_sampler import DistributedSampler +from .group_sampler import DistributedGroupSampler, GroupSampler +from .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler + +__all__ = [ + 'DistributedSampler', 'DistributedGroupSampler', 'GroupSampler', + 'InfiniteGroupBatchSampler', 'InfiniteBatchSampler', 'ClassAwareSampler' +] diff --git a/downstream/mmdetection/mmdet/datasets/samplers/class_aware_sampler.py b/downstream/mmdetection/mmdet/datasets/samplers/class_aware_sampler.py new file mode 100644 index 0000000..c52708e --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/samplers/class_aware_sampler.py @@ -0,0 +1,176 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +from mmcv.runner import get_dist_info +from torch.utils.data import Sampler + +from mmdet.core.utils import sync_random_seed + + +class ClassAwareSampler(Sampler): + r"""Sampler that restricts data loading to the label of the dataset. + + A class-aware sampling strategy to effectively tackle the + non-uniform class distribution. The length of the training data is + consistent with source data. Simple improvements based on `Relay + Backpropagation for Effective Learning of Deep Convolutional + Neural Networks `_ + + The implementation logic is referred to + https://github.com/Sense-X/TSD/blob/master/mmdet/datasets/samplers/distributed_classaware_sampler.py + + Args: + dataset: Dataset used for sampling. + samples_per_gpu (int): When model is :obj:`DistributedDataParallel`, + it is the number of training samples on each GPU. + When model is :obj:`DataParallel`, it is + `num_gpus * samples_per_gpu`. + Default : 1. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + seed (int, optional): random seed used to shuffle the sampler if + ``shuffle=True``. This number should be identical across all + processes in the distributed group. Default: 0. + num_sample_class (int): The number of samples taken from each + per-label list. Default: 1 + """ + + def __init__(self, + dataset, + samples_per_gpu=1, + num_replicas=None, + rank=None, + seed=0, + num_sample_class=1): + _rank, _num_replicas = get_dist_info() + if num_replicas is None: + num_replicas = _num_replicas + if rank is None: + rank = _rank + + self.dataset = dataset + self.num_replicas = num_replicas + self.samples_per_gpu = samples_per_gpu + self.rank = rank + self.epoch = 0 + # Must be the same across all workers. If None, will use a + # random seed shared among workers + # (require synchronization among all workers) + self.seed = sync_random_seed(seed) + + # The number of samples taken from each per-label list + assert num_sample_class > 0 and isinstance(num_sample_class, int) + self.num_sample_class = num_sample_class + # Get per-label image list from dataset + assert hasattr(dataset, 'get_cat2imgs'), \ + 'dataset must have `get_cat2imgs` function' + self.cat_dict = dataset.get_cat2imgs() + + self.num_samples = int( + math.ceil( + len(self.dataset) * 1.0 / self.num_replicas / + self.samples_per_gpu)) * self.samples_per_gpu + self.total_size = self.num_samples * self.num_replicas + + # get number of images containing each category + self.num_cat_imgs = [len(x) for x in self.cat_dict.values()] + # filter labels without images + self.valid_cat_inds = [ + i for i, length in enumerate(self.num_cat_imgs) if length != 0 + ] + self.num_classes = len(self.valid_cat_inds) + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch + self.seed) + + # initialize label list + label_iter_list = RandomCycleIter(self.valid_cat_inds, generator=g) + # initialize each per-label image list + data_iter_dict = dict() + for i in self.valid_cat_inds: + data_iter_dict[i] = RandomCycleIter(self.cat_dict[i], generator=g) + + def gen_cat_img_inds(cls_list, data_dict, num_sample_cls): + """Traverse the categories and extract `num_sample_cls` image + indexes of the corresponding categories one by one.""" + id_indices = [] + for _ in range(len(cls_list)): + cls_idx = next(cls_list) + for _ in range(num_sample_cls): + id = next(data_dict[cls_idx]) + id_indices.append(id) + return id_indices + + # deterministically shuffle based on epoch + num_bins = int( + math.ceil(self.total_size * 1.0 / self.num_classes / + self.num_sample_class)) + indices = [] + for i in range(num_bins): + indices += gen_cat_img_inds(label_iter_list, data_iter_dict, + self.num_sample_class) + + # fix extra samples to make it evenly divisible + if len(indices) >= self.total_size: + indices = indices[:self.total_size] + else: + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + offset = self.num_samples * self.rank + indices = indices[offset:offset + self.num_samples] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch + + +class RandomCycleIter: + """Shuffle the list and do it again after the list have traversed. + + The implementation logic is referred to + https://github.com/wutong16/DistributionBalancedLoss/blob/master/mllt/datasets/loader/sampler.py + + Example: + >>> label_list = [0, 1, 2, 4, 5] + >>> g = torch.Generator() + >>> g.manual_seed(0) + >>> label_iter_list = RandomCycleIter(label_list, generator=g) + >>> index = next(label_iter_list) + Args: + data (list or ndarray): The data that needs to be shuffled. + generator: An torch.Generator object, which is used in setting the seed + for generating random numbers. + """ # noqa: W605 + + def __init__(self, data, generator=None): + self.data = data + self.length = len(data) + self.index = torch.randperm(self.length, generator=generator).numpy() + self.i = 0 + self.generator = generator + + def __iter__(self): + return self + + def __len__(self): + return len(self.data) + + def __next__(self): + if self.i == self.length: + self.index = torch.randperm( + self.length, generator=self.generator).numpy() + self.i = 0 + idx = self.data[self.index[self.i]] + self.i += 1 + return idx diff --git a/downstream/mmdetection/mmdet/datasets/samplers/distributed_sampler.py b/downstream/mmdetection/mmdet/datasets/samplers/distributed_sampler.py new file mode 100644 index 0000000..1bc8b7c --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/samplers/distributed_sampler.py @@ -0,0 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +from torch.utils.data import DistributedSampler as _DistributedSampler + +from mmdet.core.utils import sync_random_seed +from mmdet.utils import get_device + + +class DistributedSampler(_DistributedSampler): + + def __init__(self, + dataset, + num_replicas=None, + rank=None, + shuffle=True, + seed=0): + super().__init__( + dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) + + # In distributed sampling, different ranks should sample + # non-overlapped data in the dataset. Therefore, this function + # is used to make sure that each rank shuffles the data indices + # in the same order based on the same seed. Then different ranks + # could use different indices to select non-overlapped data from the + # same data list. + device = get_device() + self.seed = sync_random_seed(seed, device) + + def __iter__(self): + # deterministically shuffle based on epoch + if self.shuffle: + g = torch.Generator() + # When :attr:`shuffle=True`, this ensures all replicas + # use a different random ordering for each epoch. + # Otherwise, the next iteration of this sampler will + # yield the same ordering. + g.manual_seed(self.epoch + self.seed) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + # in case that indices is shorter than half of total_size + indices = (indices * + math.ceil(self.total_size / len(indices)))[:self.total_size] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) diff --git a/downstream/mmdetection/mmdet/datasets/samplers/group_sampler.py b/downstream/mmdetection/mmdet/datasets/samplers/group_sampler.py new file mode 100644 index 0000000..783d2b2 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/samplers/group_sampler.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import numpy as np +import torch +from mmcv.runner import get_dist_info +from torch.utils.data import Sampler + + +class GroupSampler(Sampler): + + def __init__(self, dataset, samples_per_gpu=1): + assert hasattr(dataset, 'flag') + self.dataset = dataset + self.samples_per_gpu = samples_per_gpu + self.flag = dataset.flag.astype(np.int64) + self.group_sizes = np.bincount(self.flag) + self.num_samples = 0 + for i, size in enumerate(self.group_sizes): + self.num_samples += int(np.ceil( + size / self.samples_per_gpu)) * self.samples_per_gpu + + def __iter__(self): + indices = [] + for i, size in enumerate(self.group_sizes): + if size == 0: + continue + indice = np.where(self.flag == i)[0] + assert len(indice) == size + np.random.shuffle(indice) + num_extra = int(np.ceil(size / self.samples_per_gpu) + ) * self.samples_per_gpu - len(indice) + indice = np.concatenate( + [indice, np.random.choice(indice, num_extra)]) + indices.append(indice) + indices = np.concatenate(indices) + indices = [ + indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu] + for i in np.random.permutation( + range(len(indices) // self.samples_per_gpu)) + ] + indices = np.concatenate(indices) + indices = indices.astype(np.int64).tolist() + assert len(indices) == self.num_samples + return iter(indices) + + def __len__(self): + return self.num_samples + + +class DistributedGroupSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + + .. note:: + Dataset is assumed to be of constant size. + + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + seed (int, optional): random seed used to shuffle the sampler if + ``shuffle=True``. This number should be identical across all + processes in the distributed group. Default: 0. + """ + + def __init__(self, + dataset, + samples_per_gpu=1, + num_replicas=None, + rank=None, + seed=0): + _rank, _num_replicas = get_dist_info() + if num_replicas is None: + num_replicas = _num_replicas + if rank is None: + rank = _rank + self.dataset = dataset + self.samples_per_gpu = samples_per_gpu + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.seed = seed if seed is not None else 0 + + assert hasattr(self.dataset, 'flag') + self.flag = self.dataset.flag + self.group_sizes = np.bincount(self.flag) + + self.num_samples = 0 + for i, j in enumerate(self.group_sizes): + self.num_samples += int( + math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu / + self.num_replicas)) * self.samples_per_gpu + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch + self.seed) + + indices = [] + for i, size in enumerate(self.group_sizes): + if size > 0: + indice = np.where(self.flag == i)[0] + assert len(indice) == size + # add .numpy() to avoid bug when selecting indice in parrots. + # TODO: check whether torch.randperm() can be replaced by + # numpy.random.permutation(). + indice = indice[list( + torch.randperm(int(size), generator=g).numpy())].tolist() + extra = int( + math.ceil( + size * 1.0 / self.samples_per_gpu / self.num_replicas) + ) * self.samples_per_gpu * self.num_replicas - len(indice) + # pad indice + tmp = indice.copy() + for _ in range(extra // size): + indice.extend(tmp) + indice.extend(tmp[:extra % size]) + indices.extend(indice) + + assert len(indices) == self.total_size + + indices = [ + indices[j] for i in list( + torch.randperm( + len(indices) // self.samples_per_gpu, generator=g)) + for j in range(i * self.samples_per_gpu, (i + 1) * + self.samples_per_gpu) + ] + + # subsample + offset = self.num_samples * self.rank + indices = indices[offset:offset + self.num_samples] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch diff --git a/downstream/mmdetection/mmdet/datasets/samplers/infinite_sampler.py b/downstream/mmdetection/mmdet/datasets/samplers/infinite_sampler.py new file mode 100644 index 0000000..d42487e --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/samplers/infinite_sampler.py @@ -0,0 +1,186 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import itertools + +import numpy as np +import torch +from mmcv.runner import get_dist_info +from torch.utils.data.sampler import Sampler + +from mmdet.core.utils import sync_random_seed + + +class InfiniteGroupBatchSampler(Sampler): + """Similar to `BatchSampler` warping a `GroupSampler. It is designed for + iteration-based runners like `IterBasedRunner` and yields a mini-batch + indices each time, all indices in a batch should be in the same group. + + The implementation logic is referred to + https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py + + Args: + dataset (object): The dataset. + batch_size (int): When model is :obj:`DistributedDataParallel`, + it is the number of training samples on each GPU. + When model is :obj:`DataParallel`, it is + `num_gpus * samples_per_gpu`. + Default : 1. + world_size (int, optional): Number of processes participating in + distributed training. Default: None. + rank (int, optional): Rank of current process. Default: None. + seed (int): Random seed. Default: 0. + shuffle (bool): Whether shuffle the indices of a dummy `epoch`, it + should be noted that `shuffle` can not guarantee that you can + generate sequential indices because it need to ensure + that all indices in a batch is in a group. Default: True. + """ # noqa: W605 + + def __init__(self, + dataset, + batch_size=1, + world_size=None, + rank=None, + seed=0, + shuffle=True): + _rank, _world_size = get_dist_info() + if world_size is None: + world_size = _world_size + if rank is None: + rank = _rank + self.rank = rank + self.world_size = world_size + self.dataset = dataset + self.batch_size = batch_size + # In distributed sampling, different ranks should sample + # non-overlapped data in the dataset. Therefore, this function + # is used to make sure that each rank shuffles the data indices + # in the same order based on the same seed. Then different ranks + # could use different indices to select non-overlapped data from the + # same data list. + self.seed = sync_random_seed(seed) + self.shuffle = shuffle + + assert hasattr(self.dataset, 'flag') + self.flag = self.dataset.flag + self.group_sizes = np.bincount(self.flag) + # buffer used to save indices of each group + self.buffer_per_group = {k: [] for k in range(len(self.group_sizes))} + + self.size = len(dataset) + self.indices = self._indices_of_rank() + + def _infinite_indices(self): + """Infinitely yield a sequence of indices.""" + g = torch.Generator() + g.manual_seed(self.seed) + while True: + if self.shuffle: + yield from torch.randperm(self.size, generator=g).tolist() + + else: + yield from torch.arange(self.size).tolist() + + def _indices_of_rank(self): + """Slice the infinite indices by rank.""" + yield from itertools.islice(self._infinite_indices(), self.rank, None, + self.world_size) + + def __iter__(self): + # once batch size is reached, yield the indices + for idx in self.indices: + flag = self.flag[idx] + group_buffer = self.buffer_per_group[flag] + group_buffer.append(idx) + if len(group_buffer) == self.batch_size: + yield group_buffer[:] + del group_buffer[:] + + def __len__(self): + """Length of base dataset.""" + return self.size + + def set_epoch(self, epoch): + """Not supported in `IterationBased` runner.""" + raise NotImplementedError + + +class InfiniteBatchSampler(Sampler): + """Similar to `BatchSampler` warping a `DistributedSampler. It is designed + iteration-based runners like `IterBasedRunner` and yields a mini-batch + indices each time. + + The implementation logic is referred to + https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py + + Args: + dataset (object): The dataset. + batch_size (int): When model is :obj:`DistributedDataParallel`, + it is the number of training samples on each GPU, + When model is :obj:`DataParallel`, it is + `num_gpus * samples_per_gpu`. + Default : 1. + world_size (int, optional): Number of processes participating in + distributed training. Default: None. + rank (int, optional): Rank of current process. Default: None. + seed (int): Random seed. Default: 0. + shuffle (bool): Whether shuffle the dataset or not. Default: True. + """ # noqa: W605 + + def __init__(self, + dataset, + batch_size=1, + world_size=None, + rank=None, + seed=0, + shuffle=True): + _rank, _world_size = get_dist_info() + if world_size is None: + world_size = _world_size + if rank is None: + rank = _rank + self.rank = rank + self.world_size = world_size + self.dataset = dataset + self.batch_size = batch_size + # In distributed sampling, different ranks should sample + # non-overlapped data in the dataset. Therefore, this function + # is used to make sure that each rank shuffles the data indices + # in the same order based on the same seed. Then different ranks + # could use different indices to select non-overlapped data from the + # same data list. + self.seed = sync_random_seed(seed) + self.shuffle = shuffle + self.size = len(dataset) + self.indices = self._indices_of_rank() + + def _infinite_indices(self): + """Infinitely yield a sequence of indices.""" + g = torch.Generator() + g.manual_seed(self.seed) + while True: + if self.shuffle: + yield from torch.randperm(self.size, generator=g).tolist() + + else: + yield from torch.arange(self.size).tolist() + + def _indices_of_rank(self): + """Slice the infinite indices by rank.""" + yield from itertools.islice(self._infinite_indices(), self.rank, None, + self.world_size) + + def __iter__(self): + # once batch size is reached, yield the indices + batch_buffer = [] + for idx in self.indices: + batch_buffer.append(idx) + if len(batch_buffer) == self.batch_size: + yield batch_buffer + batch_buffer = [] + + def __len__(self): + """Length of base dataset.""" + return self.size + + def set_epoch(self, epoch): + """Not supported in `IterationBased` runner.""" + raise NotImplementedError diff --git a/downstream/mmdetection/mmdet/datasets/utils.py b/downstream/mmdetection/mmdet/datasets/utils.py new file mode 100644 index 0000000..26e922d --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/utils.py @@ -0,0 +1,166 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings + +from mmcv.cnn import VGG +from mmcv.runner.hooks import HOOKS, Hook + +from mmdet.datasets.builder import PIPELINES +from mmdet.datasets.pipelines import (LoadAnnotations, LoadImageFromFile, + LoadPanopticAnnotations) +from mmdet.models.dense_heads import GARPNHead, RPNHead +from mmdet.models.roi_heads.mask_heads import FusedSemanticHead + + +def replace_ImageToTensor(pipelines): + """Replace the ImageToTensor transform in a data pipeline to + DefaultFormatBundle, which is normally useful in batch inference. + + Args: + pipelines (list[dict]): Data pipeline configs. + + Returns: + list: The new pipeline list with all ImageToTensor replaced by + DefaultFormatBundle. + + Examples: + >>> pipelines = [ + ... dict(type='LoadImageFromFile'), + ... dict( + ... type='MultiScaleFlipAug', + ... img_scale=(1333, 800), + ... flip=False, + ... transforms=[ + ... dict(type='Resize', keep_ratio=True), + ... dict(type='RandomFlip'), + ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), + ... dict(type='Pad', size_divisor=32), + ... dict(type='ImageToTensor', keys=['img']), + ... dict(type='Collect', keys=['img']), + ... ]) + ... ] + >>> expected_pipelines = [ + ... dict(type='LoadImageFromFile'), + ... dict( + ... type='MultiScaleFlipAug', + ... img_scale=(1333, 800), + ... flip=False, + ... transforms=[ + ... dict(type='Resize', keep_ratio=True), + ... dict(type='RandomFlip'), + ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), + ... dict(type='Pad', size_divisor=32), + ... dict(type='DefaultFormatBundle'), + ... dict(type='Collect', keys=['img']), + ... ]) + ... ] + >>> assert expected_pipelines == replace_ImageToTensor(pipelines) + """ + pipelines = copy.deepcopy(pipelines) + for i, pipeline in enumerate(pipelines): + if pipeline['type'] == 'MultiScaleFlipAug': + assert 'transforms' in pipeline + pipeline['transforms'] = replace_ImageToTensor( + pipeline['transforms']) + elif pipeline['type'] == 'ImageToTensor': + warnings.warn( + '"ImageToTensor" pipeline is replaced by ' + '"DefaultFormatBundle" for batch inference. It is ' + 'recommended to manually replace it in the test ' + 'data pipeline in your config file.', UserWarning) + pipelines[i] = {'type': 'DefaultFormatBundle'} + return pipelines + + +def get_loading_pipeline(pipeline): + """Only keep loading image and annotations related configuration. + + Args: + pipeline (list[dict]): Data pipeline configs. + + Returns: + list[dict]: The new pipeline list with only keep + loading image and annotations related configuration. + + Examples: + >>> pipelines = [ + ... dict(type='LoadImageFromFile'), + ... dict(type='LoadAnnotations', with_bbox=True), + ... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + ... dict(type='RandomFlip', flip_ratio=0.5), + ... dict(type='Normalize', **img_norm_cfg), + ... dict(type='Pad', size_divisor=32), + ... dict(type='DefaultFormatBundle'), + ... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) + ... ] + >>> expected_pipelines = [ + ... dict(type='LoadImageFromFile'), + ... dict(type='LoadAnnotations', with_bbox=True) + ... ] + >>> assert expected_pipelines ==\ + ... get_loading_pipeline(pipelines) + """ + loading_pipeline_cfg = [] + for cfg in pipeline: + obj_cls = PIPELINES.get(cfg['type']) + # TODO:use more elegant way to distinguish loading modules + if obj_cls is not None and obj_cls in (LoadImageFromFile, + LoadAnnotations, + LoadPanopticAnnotations): + loading_pipeline_cfg.append(cfg) + assert len(loading_pipeline_cfg) == 2, \ + 'The data pipeline in your config file must include ' \ + 'loading image and annotations related pipeline.' + return loading_pipeline_cfg + + +@HOOKS.register_module() +class NumClassCheckHook(Hook): + + def _check_head(self, runner): + """Check whether the `num_classes` in head matches the length of + `CLASSES` in `dataset`. + + Args: + runner (obj:`EpochBasedRunner`): Epoch based Runner. + """ + model = runner.model + dataset = runner.data_loader.dataset + if dataset.CLASSES is None: + runner.logger.warning( + f'Please set `CLASSES` ' + f'in the {dataset.__class__.__name__} and' + f'check if it is consistent with the `num_classes` ' + f'of head') + else: + assert type(dataset.CLASSES) is not str, \ + (f'`CLASSES` in {dataset.__class__.__name__}' + f'should be a tuple of str.' + f'Add comma if number of classes is 1 as ' + f'CLASSES = ({dataset.CLASSES},)') + for name, module in model.named_modules(): + if hasattr(module, 'num_classes') and not isinstance( + module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)): + assert module.num_classes == len(dataset.CLASSES), \ + (f'The `num_classes` ({module.num_classes}) in ' + f'{module.__class__.__name__} of ' + f'{model.__class__.__name__} does not matches ' + f'the length of `CLASSES` ' + f'{len(dataset.CLASSES)}) in ' + f'{dataset.__class__.__name__}') + + def before_train_epoch(self, runner): + """Check whether the training dataset is compatible with head. + + Args: + runner (obj:`EpochBasedRunner`): Epoch based Runner. + """ + self._check_head(runner) + + def before_val_epoch(self, runner): + """Check whether the dataset in val epoch is compatible with head. + + Args: + runner (obj:`EpochBasedRunner`): Epoch based Runner. + """ + self._check_head(runner) diff --git a/downstream/mmdetection/mmdet/datasets/voc.py b/downstream/mmdetection/mmdet/datasets/voc.py new file mode 100644 index 0000000..0a3ea7a --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/voc.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict + +from mmcv.utils import print_log + +from mmdet.core import eval_map, eval_recalls +from .builder import DATASETS +from .xml_style import XMLDataset + + +@DATASETS.register_module() +class VOCDataset(XMLDataset): + + CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor') + + PALETTE = [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192), + (197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255), + (153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252), + (182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0), + (0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)] + + def __init__(self, **kwargs): + super(VOCDataset, self).__init__(**kwargs) + if 'VOC2007' in self.img_prefix: + self.year = 2007 + elif 'VOC2012' in self.img_prefix: + self.year = 2012 + else: + raise ValueError('Cannot infer dataset year from img_prefix') + + def evaluate(self, + results, + metric='mAP', + logger=None, + proposal_nums=(100, 300, 1000), + iou_thr=0.5, + scale_ranges=None): + """Evaluate in VOC protocol. + + Args: + results (list[list | tuple]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. Options are + 'mAP', 'recall'. + logger (logging.Logger | str, optional): Logger used for printing + related information during evaluation. Default: None. + proposal_nums (Sequence[int]): Proposal number used for evaluating + recalls, such as recall@100, recall@1000. + Default: (100, 300, 1000). + iou_thr (float | list[float]): IoU threshold. Default: 0.5. + scale_ranges (list[tuple], optional): Scale ranges for evaluating + mAP. If not specified, all bounding boxes would be included in + evaluation. Default: None. + + Returns: + dict[str, float]: AP/recall metrics. + """ + + if not isinstance(metric, str): + assert len(metric) == 1 + metric = metric[0] + allowed_metrics = ['mAP', 'recall'] + if metric not in allowed_metrics: + raise KeyError(f'metric {metric} is not supported') + annotations = [self.get_ann_info(i) for i in range(len(self))] + eval_results = OrderedDict() + iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr + if metric == 'mAP': + assert isinstance(iou_thrs, list) + if self.year == 2007: + ds_name = 'voc07' + else: + ds_name = self.CLASSES + mean_aps = [] + for iou_thr in iou_thrs: + print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}') + # Follow the official implementation, + # http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar + # we should use the legacy coordinate system in mmdet 1.x, + # which means w, h should be computed as 'x2 - x1 + 1` and + # `y2 - y1 + 1` + mean_ap, _ = eval_map( + results, + annotations, + scale_ranges=None, + iou_thr=iou_thr, + dataset=ds_name, + logger=logger, + use_legacy_coordinate=True) + mean_aps.append(mean_ap) + eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3) + eval_results['mAP'] = sum(mean_aps) / len(mean_aps) + eval_results.move_to_end('mAP', last=False) + elif metric == 'recall': + gt_bboxes = [ann['bboxes'] for ann in annotations] + recalls = eval_recalls( + gt_bboxes, + results, + proposal_nums, + iou_thrs, + logger=logger, + use_legacy_coordinate=True) + for i, num in enumerate(proposal_nums): + for j, iou_thr in enumerate(iou_thrs): + eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j] + if recalls.shape[1] > 1: + ar = recalls.mean(axis=1) + for i, num in enumerate(proposal_nums): + eval_results[f'AR@{num}'] = ar[i] + return eval_results diff --git a/downstream/mmdetection/mmdet/datasets/wider_face.py b/downstream/mmdetection/mmdet/datasets/wider_face.py new file mode 100644 index 0000000..85a5fdc --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/wider_face.py @@ -0,0 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import xml.etree.ElementTree as ET + +import mmcv + +from .builder import DATASETS +from .xml_style import XMLDataset + + +@DATASETS.register_module() +class WIDERFaceDataset(XMLDataset): + """Reader for the WIDER Face dataset in PASCAL VOC format. + + Conversion scripts can be found in + https://github.com/sovrasov/wider-face-pascal-voc-annotations + """ + CLASSES = ('face', ) + + PALETTE = [(0, 255, 0)] + + def __init__(self, **kwargs): + super(WIDERFaceDataset, self).__init__(**kwargs) + + def load_annotations(self, ann_file): + """Load annotation from WIDERFace XML style annotation file. + + Args: + ann_file (str): Path of XML file. + + Returns: + list[dict]: Annotation info from XML file. + """ + + data_infos = [] + img_ids = mmcv.list_from_file(ann_file) + for img_id in img_ids: + filename = f'{img_id}.jpg' + xml_path = osp.join(self.img_prefix, 'Annotations', + f'{img_id}.xml') + tree = ET.parse(xml_path) + root = tree.getroot() + size = root.find('size') + width = int(size.find('width').text) + height = int(size.find('height').text) + folder = root.find('folder').text + data_infos.append( + dict( + id=img_id, + filename=osp.join(folder, filename), + width=width, + height=height)) + + return data_infos diff --git a/downstream/mmdetection/mmdet/datasets/xml_style.py b/downstream/mmdetection/mmdet/datasets/xml_style.py new file mode 100644 index 0000000..039d5d7 --- /dev/null +++ b/downstream/mmdetection/mmdet/datasets/xml_style.py @@ -0,0 +1,178 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import xml.etree.ElementTree as ET + +import mmcv +import numpy as np +from PIL import Image + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class XMLDataset(CustomDataset): + """XML dataset for detection. + + Args: + min_size (int | float, optional): The minimum size of bounding + boxes in the images. If the size of a bounding box is less than + ``min_size``, it would be add to ignored field. + img_subdir (str): Subdir where images are stored. Default: JPEGImages. + ann_subdir (str): Subdir where annotations are. Default: Annotations. + """ + + def __init__(self, + min_size=None, + img_subdir='JPEGImages', + ann_subdir='Annotations', + **kwargs): + assert self.CLASSES or kwargs.get( + 'classes', None), 'CLASSES in `XMLDataset` can not be None.' + self.img_subdir = img_subdir + self.ann_subdir = ann_subdir + super(XMLDataset, self).__init__(**kwargs) + self.cat2label = {cat: i for i, cat in enumerate(self.CLASSES)} + self.min_size = min_size + + def load_annotations(self, ann_file): + """Load annotation from XML style ann_file. + + Args: + ann_file (str): Path of XML file. + + Returns: + list[dict]: Annotation info from XML file. + """ + + data_infos = [] + img_ids = mmcv.list_from_file(ann_file) + for img_id in img_ids: + filename = osp.join(self.img_subdir, f'{img_id}.jpg') + xml_path = osp.join(self.img_prefix, self.ann_subdir, + f'{img_id}.xml') + tree = ET.parse(xml_path) + root = tree.getroot() + size = root.find('size') + if size is not None: + width = int(size.find('width').text) + height = int(size.find('height').text) + else: + img_path = osp.join(self.img_prefix, filename) + img = Image.open(img_path) + width, height = img.size + data_infos.append( + dict(id=img_id, filename=filename, width=width, height=height)) + + return data_infos + + def _filter_imgs(self, min_size=32): + """Filter images too small or without annotation.""" + valid_inds = [] + for i, img_info in enumerate(self.data_infos): + if min(img_info['width'], img_info['height']) < min_size: + continue + if self.filter_empty_gt: + img_id = img_info['id'] + xml_path = osp.join(self.img_prefix, self.ann_subdir, + f'{img_id}.xml') + tree = ET.parse(xml_path) + root = tree.getroot() + for obj in root.findall('object'): + name = obj.find('name').text + if name in self.CLASSES: + valid_inds.append(i) + break + else: + valid_inds.append(i) + return valid_inds + + def get_ann_info(self, idx): + """Get annotation from XML file by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + + img_id = self.data_infos[idx]['id'] + xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml') + tree = ET.parse(xml_path) + root = tree.getroot() + bboxes = [] + labels = [] + bboxes_ignore = [] + labels_ignore = [] + for obj in root.findall('object'): + name = obj.find('name').text + if name not in self.CLASSES: + continue + label = self.cat2label[name] + difficult = obj.find('difficult') + difficult = 0 if difficult is None else int(difficult.text) + bnd_box = obj.find('bndbox') + # TODO: check whether it is necessary to use int + # Coordinates may be float type + bbox = [ + int(float(bnd_box.find('xmin').text)), + int(float(bnd_box.find('ymin').text)), + int(float(bnd_box.find('xmax').text)), + int(float(bnd_box.find('ymax').text)) + ] + ignore = False + if self.min_size: + assert not self.test_mode + w = bbox[2] - bbox[0] + h = bbox[3] - bbox[1] + if w < self.min_size or h < self.min_size: + ignore = True + if difficult or ignore: + bboxes_ignore.append(bbox) + labels_ignore.append(label) + else: + bboxes.append(bbox) + labels.append(label) + if not bboxes: + bboxes = np.zeros((0, 4)) + labels = np.zeros((0, )) + else: + bboxes = np.array(bboxes, ndmin=2) - 1 + labels = np.array(labels) + if not bboxes_ignore: + bboxes_ignore = np.zeros((0, 4)) + labels_ignore = np.zeros((0, )) + else: + bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1 + labels_ignore = np.array(labels_ignore) + ann = dict( + bboxes=bboxes.astype(np.float32), + labels=labels.astype(np.int64), + bboxes_ignore=bboxes_ignore.astype(np.float32), + labels_ignore=labels_ignore.astype(np.int64)) + return ann + + def get_cat_ids(self, idx): + """Get category ids in XML file by index. + + Args: + idx (int): Index of data. + + Returns: + list[int]: All categories in the image of specified index. + """ + + cat_ids = [] + img_id = self.data_infos[idx]['id'] + xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml') + tree = ET.parse(xml_path) + root = tree.getroot() + for obj in root.findall('object'): + name = obj.find('name').text + if name not in self.CLASSES: + continue + label = self.cat2label[name] + cat_ids.append(label) + + return cat_ids diff --git a/downstream/mmdetection/mmdet/models/__init__.py b/downstream/mmdetection/mmdet/models/__init__.py new file mode 100644 index 0000000..12efb01 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .backbones import * # noqa: F401,F403 +from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, + ROI_EXTRACTORS, SHARED_HEADS, build_backbone, + build_detector, build_head, build_loss, build_neck, + build_roi_extractor, build_shared_head) +from .dense_heads import * # noqa: F401,F403 +from .detectors import * # noqa: F401,F403 +from .losses import * # noqa: F401,F403 +from .necks import * # noqa: F401,F403 +from .plugins import * # noqa: F401,F403 +from .roi_heads import * # noqa: F401,F403 +from .seg_heads import * # noqa: F401,F403 + +__all__ = [ + 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', + 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor', + 'build_shared_head', 'build_head', 'build_loss', 'build_detector' +] diff --git a/downstream/mmdetection/mmdet/models/backbones/__init__.py b/downstream/mmdetection/mmdet/models/backbones/__init__.py new file mode 100644 index 0000000..07d7be7 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .csp_darknet import CSPDarknet +from .darknet import Darknet +from .detectors_resnet import DetectoRS_ResNet +from .detectors_resnext import DetectoRS_ResNeXt +from .efficientnet import EfficientNet +from .hourglass import HourglassNet +from .hrnet import HRNet +from .mobilenet_v2 import MobileNetV2 +from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2 +from .regnet import RegNet +from .res2net import Res2Net +from .resnest import ResNeSt +from .resnet import ResNet, ResNetV1d +from .resnext import ResNeXt +from .ssd_vgg import SSDVGG +from .swin import SwinTransformer +from .trident_resnet import TridentResNet +from .gpvit import GPViTDet +from .gpvit_adapter import GPViTAdapter, GPViTAdapterSingleStage + +__all__ = [ + 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', + 'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet', + 'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet', + 'SwinTransformer', 'PyramidVisionTransformer', + 'PyramidVisionTransformerV2', 'EfficientNet', 'GPViTDet', 'GPViTAdapter', 'GPViTAdapterSingleStage' +] diff --git a/downstream/mmdetection/mmdet/models/backbones/adapter_modules.py b/downstream/mmdetection/mmdet/models/backbones/adapter_modules.py new file mode 100644 index 0000000..ad2da66 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/adapter_modules.py @@ -0,0 +1,386 @@ +""" +Reference: https://github.com/czczup/ViT-Adapter + +Modified: use mmcv version MultiScaleDeformableAttnFunction +""" + +from __future__ import absolute_import, division, print_function +import logging +from functools import partial +import math +import warnings + +import torch +import torch.nn.functional as F +from torch import nn +from torch.nn.init import constant_, xavier_uniform_ +from mmcv.runner import force_fp32 +from timm.models.layers import DropPath + + +from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttnFunction + + +_logger = logging.getLogger(__name__) + + +def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError('invalid input for _is_power_of_2: {} (type: {})'.format(n, type(n))) + return (n & (n - 1) == 0) and n != 0 + + +class MSDeformAttn(nn.Module): + def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4, ratio=1.0): + """Multi-Scale Deformable Attention Module. + + :param d_model hidden dimension + :param n_levels number of feature levels + :param n_heads number of attention heads + :param n_points number of sampling points per attention head per feature level + """ + super().__init__() + if d_model % n_heads != 0: + raise ValueError('d_model must be divisible by n_heads, ' + 'but got {} and {}'.format(d_model, n_heads)) + _d_per_head = d_model // n_heads + # you'd better set _d_per_head to a power of 2 + # which is more efficient in our CUDA implementation + if not _is_power_of_2(_d_per_head): + warnings.warn( + "You'd better set d_model in MSDeformAttn to make " + 'the dimension of each attention head a power of 2 ' + 'which is more efficient in our CUDA implementation.') + + self.im2col_step = 64 + + self.d_model = d_model + self.n_levels = n_levels + self.n_heads = n_heads + self.n_points = n_points + self.ratio = ratio + self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2) + self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points) + self.value_proj = nn.Linear(d_model, int(d_model * ratio)) + self.output_proj = nn.Linear(int(d_model * ratio), d_model) + + self._reset_parameters() + + def _reset_parameters(self): + constant_(self.sampling_offsets.weight.data, 0.) + thetas = torch.arange( + self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view( + self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1) + for i in range(self.n_points): + grid_init[:, :, i, :] *= i + 1 + + with torch.no_grad(): + self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) + constant_(self.attention_weights.weight.data, 0.) + constant_(self.attention_weights.bias.data, 0.) + xavier_uniform_(self.value_proj.weight.data) + constant_(self.value_proj.bias.data, 0.) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.) + + @force_fp32(apply_to=('query', 'reference_points', 'input_flatten', 'input_padding_mask')) + def forward(self, + query, + reference_points, + input_flatten, + input_spatial_shapes, + input_level_start_index, + input_padding_mask=None): + """ + :param query (N, Length_{query}, C) + :param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area + or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes + :param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C) + :param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})] + :param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}] + :param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements + + :return output (N, Length_{query}, C) + """ + + N, Len_q, _ = query.shape + N, Len_in, _ = input_flatten.shape + assert (input_spatial_shapes[:, 0] * + input_spatial_shapes[:, 1]).sum() == Len_in + + value = self.value_proj(input_flatten) + if input_padding_mask is not None: + value = value.masked_fill(input_padding_mask[..., None], float(0)) + + value = value.view(N, Len_in, self.n_heads, + int(self.ratio * self.d_model) // self.n_heads) + sampling_offsets = self.sampling_offsets(query).view( + N, Len_q, self.n_heads, self.n_levels, self.n_points, 2) + attention_weights = self.attention_weights(query).view( + N, Len_q, self.n_heads, self.n_levels * self.n_points) + attention_weights = F.softmax(attention_weights, -1).\ + view(N, Len_q, self.n_heads, self.n_levels, self.n_points) + + if reference_points.shape[-1] == 2: + offset_normalizer = torch.stack( + [input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1) + sampling_locations = reference_points[:, :, None, :, None, :] \ + + sampling_offsets / offset_normalizer[None, None, None, :, None, :] + elif reference_points.shape[-1] == 4: + sampling_locations = reference_points[:, :, None, :, None, :2] \ + + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 + else: + raise ValueError( + 'Last dim of reference_points must be 2 or 4, but get {} instead.' + .format(reference_points.shape[-1])) + + output = MultiScaleDeformableAttnFunction.apply(value.to(dtype=torch.float32), input_spatial_shapes, input_level_start_index, + sampling_locations, attention_weights, self.im2col_step) + + output = self.output_proj(output) + return output + + +def get_reference_points(spatial_shapes, device): + reference_points_list = [] + for lvl, (H_, W_) in enumerate(spatial_shapes): + ref_y, ref_x = torch.meshgrid( + torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device), + torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device)) + ref_y = ref_y.reshape(-1)[None] / H_ + ref_x = ref_x.reshape(-1)[None] / W_ + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] + return reference_points + + +def deform_inputs(x): + bs, c, h, w = x.shape + spatial_shapes = torch.as_tensor([(h // 8, w // 8), + (h // 16, w // 16), + (h // 32, w // 32)], + dtype=torch.long, device=x.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + reference_points = get_reference_points([(h // 16, w // 16)], x.device) + deform_inputs1 = [reference_points, spatial_shapes, level_start_index] + + spatial_shapes = torch.as_tensor([(h // 16, w // 16)], dtype=torch.long, device=x.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + reference_points = get_reference_points([(h // 8, w // 8), + (h // 16, w // 16), + (h // 32, w // 32)], x.device) + deform_inputs2 = [reference_points, spatial_shapes, level_start_index] + + return deform_inputs1, deform_inputs2 + + +class ConvFFN(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, + act_layer=nn.GELU, drop=0., down_stride=16): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.dwconv = DWConv(hidden_features, down_stride) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x, H, W): + x = self.fc1(x) + x = self.dwconv(x, H, W) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class DWConv(nn.Module): + def __init__(self, dim=768, down_stride=16): + super().__init__() + self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) + + self.down_stride = down_stride + + def forward(self, x, H, W): + if self.down_stride == 16: + B, N, C = x.shape + n = N // 21 + x1 = x[:, 0:16 * n, :].transpose(1, 2).view(B, C, H * 2, W * 2).contiguous() + x2 = x[:, 16 * n:20 * n, :].transpose(1, 2).view(B, C, H, W).contiguous() + x3 = x[:, 20 * n:, :].transpose(1, 2).view(B, C, H // 2, W // 2).contiguous() + x1 = self.dwconv(x1).flatten(2).transpose(1, 2) + x2 = self.dwconv(x2).flatten(2).transpose(1, 2) + x3 = self.dwconv(x3).flatten(2).transpose(1, 2) + x = torch.cat([x1, x2, x3], dim=1) + return x + elif self.down_stride == 8: + B, N, C = x.shape + n1 = H * W + n2 = n1 + H * W // 4 + n3 = n2 + H * W // 16 + + x1 = x[:, 0:n1, :].transpose(1, 2).view(B, C, H, W).contiguous() + x2 = x[:, n1:n2, :].transpose(1, 2).view(B, C, H // 2, W // 2).contiguous() + x3 = x[:, n2:, :].transpose(1, 2).view(B, C, H // 4, W // 4).contiguous() + x1 = self.dwconv(x1).flatten(2).transpose(1, 2) + x2 = self.dwconv(x2).flatten(2).transpose(1, 2) + x3 = self.dwconv(x3).flatten(2).transpose(1, 2) + x = torch.cat([x1, x2, x3], dim=1) + return x + else: + raise NotImplementedError + + +class Extractor(nn.Module): + def __init__(self, dim, num_heads=6, n_points=4, n_levels=1, deform_ratio=1.0, + with_cffn=True, cffn_ratio=0.25, drop=0., drop_path=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), down_stride=16): + super().__init__() + self.query_norm = norm_layer(dim) + self.feat_norm = norm_layer(dim) + self.attn = MSDeformAttn(d_model=dim, n_levels=n_levels, n_heads=num_heads, + n_points=n_points, ratio=deform_ratio) + self.with_cffn = with_cffn + if with_cffn: + self.ffn = ConvFFN(in_features=dim, hidden_features=int(dim * cffn_ratio), drop=drop, down_stride=down_stride) + self.ffn_norm = norm_layer(dim) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, query, reference_points, feat, spatial_shapes, level_start_index, H, W): + attn = self.attn(self.query_norm(query), reference_points, + self.feat_norm(feat), spatial_shapes, + level_start_index, None) + query = query + attn + + if self.with_cffn: + query = query + self.drop_path(self.ffn(self.ffn_norm(query), H, W)) + return query + + +class Injector(nn.Module): + def __init__(self, dim, num_heads=6, n_points=4, n_levels=1, deform_ratio=1.0, + norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=0.): + + super().__init__() + + self.query_norm = norm_layer(dim) + self.feat_norm = norm_layer(dim) + self.attn = MSDeformAttn(d_model=dim, n_levels=n_levels, n_heads=num_heads, + n_points=n_points, ratio=deform_ratio) + self.gamma = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + + def forward(self, query, reference_points, feat, spatial_shapes, level_start_index): + attn = self.attn(self.query_norm(query), reference_points, + self.feat_norm(feat), spatial_shapes, + level_start_index, None) + return query + self.gamma * attn + + +class InteractionBlock(nn.Module): + def __init__(self, dim, num_heads=6, n_points=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), + drop=0., drop_path=0., with_cffn=True, cffn_ratio=0.25, init_values=0., + deform_ratio=1.0, extra_extractor=False, down_stride=16): + + super().__init__() + + self.injector = Injector(dim=dim, n_levels=3, num_heads=num_heads, init_values=init_values, + n_points=n_points, norm_layer=norm_layer, deform_ratio=deform_ratio) + self.extractor = Extractor(dim=dim, n_levels=1, num_heads=num_heads, n_points=n_points, + norm_layer=norm_layer, deform_ratio=deform_ratio, with_cffn=with_cffn, + cffn_ratio=cffn_ratio, drop=drop, drop_path=drop_path, down_stride=down_stride) + if extra_extractor: + self.extra_extractors = nn.Sequential(*[ + Extractor(dim=dim, num_heads=num_heads, n_points=n_points, norm_layer=norm_layer, + with_cffn=with_cffn, cffn_ratio=cffn_ratio, deform_ratio=deform_ratio, + drop=drop, drop_path=drop_path, down_stride=down_stride) + for _ in range(2) + ]) + else: + self.extra_extractors = None + + def forward(self, x, c, blocks, deform_inputs1, deform_inputs2, H, W): + x = self.injector(query=x, reference_points=deform_inputs1[0], + feat=c, spatial_shapes=deform_inputs1[1], + level_start_index=deform_inputs1[2]) + for idx, blk in enumerate(blocks): + x = blk(x, H, W) + c = self.extractor(query=c, reference_points=deform_inputs2[0], + feat=x, spatial_shapes=deform_inputs2[1], + level_start_index=deform_inputs2[2], H=H, W=W) + if self.extra_extractors is not None: + for extractor in self.extra_extractors: + c = extractor(query=c, reference_points=deform_inputs2[0], + feat=x, spatial_shapes=deform_inputs2[1], + level_start_index=deform_inputs2[2], H=H, W=W) + return x, c + + +class SpatialPriorModule(nn.Module): + def __init__(self, inplanes=64, embed_dim=384, out_c1=True): + super().__init__() + + self.stem = nn.Sequential(*[ + nn.Conv2d(3, inplanes, kernel_size=3, stride=2, padding=1, bias=False), + nn.SyncBatchNorm(inplanes), + nn.ReLU(inplace=True), + nn.Conv2d(inplanes, inplanes, kernel_size=3, stride=1, padding=1, bias=False), + nn.SyncBatchNorm(inplanes), + nn.ReLU(inplace=True), + nn.Conv2d(inplanes, inplanes, kernel_size=3, stride=1, padding=1, bias=False), + nn.SyncBatchNorm(inplanes), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + ]) # s4 + self.conv2 = nn.Sequential(*[ + nn.Conv2d(inplanes, 2 * inplanes, kernel_size=3, stride=2, padding=1, bias=False), + nn.SyncBatchNorm(2 * inplanes), + nn.ReLU(inplace=True) + ]) # s8 + self.conv3 = nn.Sequential(*[ + nn.Conv2d(2 * inplanes, 4 * inplanes, kernel_size=3, stride=2, padding=1, bias=False), + nn.SyncBatchNorm(4 * inplanes), + nn.ReLU(inplace=True) + ]) # s16 + self.conv4 = nn.Sequential(*[ + nn.Conv2d(4 * inplanes, 4 * inplanes, kernel_size=3, stride=2, padding=1, bias=False), + nn.SyncBatchNorm(4 * inplanes), + nn.ReLU(inplace=True) + ]) # s32 + if out_c1: + self.fc1 = nn.Conv2d(inplanes, embed_dim, kernel_size=1, stride=1, padding=0, bias=True) + self.fc2 = nn.Conv2d(2 * inplanes, embed_dim, kernel_size=1, stride=1, padding=0, bias=True) + self.fc3 = nn.Conv2d(4 * inplanes, embed_dim, kernel_size=1, stride=1, padding=0, bias=True) + self.fc4 = nn.Conv2d(4 * inplanes, embed_dim, kernel_size=1, stride=1, padding=0, bias=True) + self.out_c1 = out_c1 + self.embed_dim = embed_dim + + def forward(self, x): + c1 = self.stem(x) + c2 = self.conv2(c1) + c3 = self.conv3(c2) + c4 = self.conv4(c3) + + if self.out_c1: + c1 = self.fc1(c1) + c2 = self.fc2(c2) + c3 = self.fc3(c3) + c4 = self.fc4(c4) + + bs, dim, _, _ = c1.shape + # c1 = c1.view(bs, dim, -1).transpose(1, 2) # 4s + c2 = c2.view(bs, self.embed_dim, -1).transpose(1, 2) # 8s + c3 = c3.view(bs, self.embed_dim, -1).transpose(1, 2) # 16s + c4 = c4.view(bs, self.embed_dim, -1).transpose(1, 2) # 32s + + if self.out_c1: + return c1, c2, c3, c4 + else: + return c2, c3, c4 diff --git a/downstream/mmdetection/mmdet/models/backbones/csp_darknet.py b/downstream/mmdetection/mmdet/models/backbones/csp_darknet.py new file mode 100644 index 0000000..2bbf396 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/csp_darknet.py @@ -0,0 +1,284 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES +from ..utils import CSPLayer + + +class Focus(nn.Module): + """Focus width and height information into channel space. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + kernel_size (int): The kernel size of the convolution. Default: 1 + stride (int): The stride of the convolution. Default: 1 + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='Swish'). + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=1, + stride=1, + conv_cfg=None, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish')): + super().__init__() + self.conv = ConvModule( + in_channels * 4, + out_channels, + kernel_size, + stride, + padding=(kernel_size - 1) // 2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x): + # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2) + patch_top_left = x[..., ::2, ::2] + patch_top_right = x[..., ::2, 1::2] + patch_bot_left = x[..., 1::2, ::2] + patch_bot_right = x[..., 1::2, 1::2] + x = torch.cat( + ( + patch_top_left, + patch_bot_left, + patch_top_right, + patch_bot_right, + ), + dim=1, + ) + return self.conv(x) + + +class SPPBottleneck(BaseModule): + """Spatial pyramid pooling layer used in YOLOv3-SPP. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + kernel_sizes (tuple[int]): Sequential of kernel sizes of pooling + layers. Default: (5, 9, 13). + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='Swish'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_sizes=(5, 9, 13), + conv_cfg=None, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=None): + super().__init__(init_cfg) + mid_channels = in_channels // 2 + self.conv1 = ConvModule( + in_channels, + mid_channels, + 1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.poolings = nn.ModuleList([ + nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) + for ks in kernel_sizes + ]) + conv2_channels = mid_channels * (len(kernel_sizes) + 1) + self.conv2 = ConvModule( + conv2_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x): + x = self.conv1(x) + x = torch.cat([x] + [pooling(x) for pooling in self.poolings], dim=1) + x = self.conv2(x) + return x + + +@BACKBONES.register_module() +class CSPDarknet(BaseModule): + """CSP-Darknet backbone used in YOLOv5 and YOLOX. + + Args: + arch (str): Architecture of CSP-Darknet, from {P5, P6}. + Default: P5. + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Default: 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int]): Output from which stages. + Default: (2, 3, 4). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Default: -1. + use_depthwise (bool): Whether to use depthwise separable convolution. + Default: False. + arch_ovewrite(list): Overwrite default arch settings. Default: None. + spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP + layers. Default: (5, 9, 13). + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Example: + >>> from mmdet.models import CSPDarknet + >>> import torch + >>> self = CSPDarknet(depth=53) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 416, 416) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 256, 52, 52) + (1, 512, 26, 26) + (1, 1024, 13, 13) + """ + # From left to right: + # in_channels, out_channels, num_blocks, add_identity, use_spp + arch_settings = { + 'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False], + [256, 512, 9, True, False], [512, 1024, 3, False, True]], + 'P6': [[64, 128, 3, True, False], [128, 256, 9, True, False], + [256, 512, 9, True, False], [512, 768, 3, True, False], + [768, 1024, 3, False, True]] + } + + def __init__(self, + arch='P5', + deepen_factor=1.0, + widen_factor=1.0, + out_indices=(2, 3, 4), + frozen_stages=-1, + use_depthwise=False, + arch_ovewrite=None, + spp_kernal_sizes=(5, 9, 13), + conv_cfg=None, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + norm_eval=False, + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=math.sqrt(5), + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu')): + super().__init__(init_cfg) + arch_setting = self.arch_settings[arch] + if arch_ovewrite: + arch_setting = arch_ovewrite + assert set(out_indices).issubset( + i for i in range(len(arch_setting) + 1)) + if frozen_stages not in range(-1, len(arch_setting) + 1): + raise ValueError('frozen_stages must be in range(-1, ' + 'len(arch_setting) + 1). But received ' + f'{frozen_stages}') + + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.use_depthwise = use_depthwise + self.norm_eval = norm_eval + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + + self.stem = Focus( + 3, + int(arch_setting[0][0] * widen_factor), + kernel_size=3, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.layers = ['stem'] + + for i, (in_channels, out_channels, num_blocks, add_identity, + use_spp) in enumerate(arch_setting): + in_channels = int(in_channels * widen_factor) + out_channels = int(out_channels * widen_factor) + num_blocks = max(round(num_blocks * deepen_factor), 1) + stage = [] + conv_layer = conv( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + stage.append(conv_layer) + if use_spp: + spp = SPPBottleneck( + out_channels, + out_channels, + kernel_sizes=spp_kernal_sizes, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + stage.append(spp) + csp_layer = CSPLayer( + out_channels, + out_channels, + num_blocks=num_blocks, + add_identity=add_identity, + use_depthwise=use_depthwise, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + stage.append(csp_layer) + self.add_module(f'stage{i + 1}', nn.Sequential(*stage)) + self.layers.append(f'stage{i + 1}') + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for i in range(self.frozen_stages + 1): + m = getattr(self, self.layers[i]) + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(CSPDarknet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + def forward(self, x): + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) diff --git a/downstream/mmdetection/mmdet/models/backbones/darknet.py b/downstream/mmdetection/mmdet/models/backbones/darknet.py new file mode 100644 index 0000000..adfb115 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/darknet.py @@ -0,0 +1,213 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Copyright (c) 2019 Western Digital Corporation or its affiliates. + +import warnings + +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES + + +class ResBlock(BaseModule): + """The basic residual block used in Darknet. Each ResBlock consists of two + ConvModules and the input is added to the final output. Each ConvModule is + composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer + has half of the number of the filters as much as the second convLayer. The + first convLayer has filter size of 1x1 and the second one has the filter + size of 3x3. + + Args: + in_channels (int): The input channels. Must be even. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True) + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='LeakyReLU', negative_slope=0.1), + init_cfg=None): + super(ResBlock, self).__init__(init_cfg) + assert in_channels % 2 == 0 # ensure the in_channels is even + half_in_channels = in_channels // 2 + + # shortcut + cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) + + self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg) + self.conv2 = ConvModule( + half_in_channels, in_channels, 3, padding=1, **cfg) + + def forward(self, x): + residual = x + out = self.conv1(x) + out = self.conv2(out) + out = out + residual + + return out + + +@BACKBONES.register_module() +class Darknet(BaseModule): + """Darknet backbone. + + Args: + depth (int): Depth of Darknet. Currently only support 53. + out_indices (Sequence[int]): Output from which stages. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True) + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Example: + >>> from mmdet.models import Darknet + >>> import torch + >>> self = Darknet(depth=53) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 416, 416) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 256, 52, 52) + (1, 512, 26, 26) + (1, 1024, 13, 13) + """ + + # Dict(depth: (layers, channels)) + arch_settings = { + 53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512), + (512, 1024))) + } + + def __init__(self, + depth=53, + out_indices=(3, 4, 5), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='LeakyReLU', negative_slope=0.1), + norm_eval=True, + pretrained=None, + init_cfg=None): + super(Darknet, self).__init__(init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for darknet') + + self.depth = depth + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.layers, self.channels = self.arch_settings[depth] + + cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) + + self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg) + + self.cr_blocks = ['conv1'] + for i, n_layers in enumerate(self.layers): + layer_name = f'conv_res_block{i + 1}' + in_c, out_c = self.channels[i] + self.add_module( + layer_name, + self.make_conv_res_block(in_c, out_c, n_layers, **cfg)) + self.cr_blocks.append(layer_name) + + self.norm_eval = norm_eval + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + outs = [] + for i, layer_name in enumerate(self.cr_blocks): + cr_block = getattr(self, layer_name) + x = cr_block(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for i in range(self.frozen_stages): + m = getattr(self, self.cr_blocks[i]) + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(Darknet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + @staticmethod + def make_conv_res_block(in_channels, + out_channels, + res_repeat, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='LeakyReLU', + negative_slope=0.1)): + """In Darknet backbone, ConvLayer is usually followed by ResBlock. This + function will make that. The Conv layers always have 3x3 filters with + stride=2. The number of the filters in Conv layer is the same as the + out channels of the ResBlock. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + res_repeat (int): The number of ResBlocks. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True) + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + """ + + cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) + + model = nn.Sequential() + model.add_module( + 'conv', + ConvModule( + in_channels, out_channels, 3, stride=2, padding=1, **cfg)) + for idx in range(res_repeat): + model.add_module('res{}'.format(idx), + ResBlock(out_channels, **cfg)) + return model diff --git a/downstream/mmdetection/mmdet/models/backbones/detectors_resnet.py b/downstream/mmdetection/mmdet/models/backbones/detectors_resnet.py new file mode 100644 index 0000000..a3c0d40 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/detectors_resnet.py @@ -0,0 +1,353 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init, + kaiming_init) +from mmcv.runner import Sequential, load_checkpoint +from torch.nn.modules.batchnorm import _BatchNorm + +from mmdet.utils import get_root_logger +from ..builder import BACKBONES +from .resnet import BasicBlock +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNet + + +class Bottleneck(_Bottleneck): + r"""Bottleneck for the ResNet backbone in `DetectoRS + `_. + + This bottleneck allows the users to specify whether to use + SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid). + + Args: + inplanes (int): The number of input channels. + planes (int): The number of output channels before expansion. + rfp_inplanes (int, optional): The number of channels from RFP. + Default: None. If specified, an additional conv layer will be + added for ``rfp_feat``. Otherwise, the structure is the same as + base class. + sac (dict, optional): Dictionary to construct SAC. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + expansion = 4 + + def __init__(self, + inplanes, + planes, + rfp_inplanes=None, + sac=None, + init_cfg=None, + **kwargs): + super(Bottleneck, self).__init__( + inplanes, planes, init_cfg=init_cfg, **kwargs) + + assert sac is None or isinstance(sac, dict) + self.sac = sac + self.with_sac = sac is not None + if self.with_sac: + self.conv2 = build_conv_layer( + self.sac, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + bias=False) + + self.rfp_inplanes = rfp_inplanes + if self.rfp_inplanes: + self.rfp_conv = build_conv_layer( + None, + self.rfp_inplanes, + planes * self.expansion, + 1, + stride=1, + bias=True) + if init_cfg is None: + self.init_cfg = dict( + type='Constant', val=0, override=dict(name='rfp_conv')) + + def rfp_forward(self, x, rfp_feat): + """The forward function that also takes the RFP features as input.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv1_plugin_names) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv2_plugin_names) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv3_plugin_names) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + if self.rfp_inplanes: + rfp_feat = self.rfp_conv(rfp_feat) + out = out + rfp_feat + + out = self.relu(out) + + return out + + +class ResLayer(Sequential): + """ResLayer to build ResNet style backbone for RPF in detectoRS. + + The difference between this module and base class is that we pass + ``rfp_inplanes`` to the first block. + + Args: + block (nn.Module): block used to build ResLayer. + inplanes (int): inplanes of block. + planes (int): planes of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + downsample_first (bool): Downsample at the first block or last block. + False for Hourglass, True for ResNet. Default: True + rfp_inplanes (int, optional): The number of channels from RFP. + Default: None. If specified, an additional conv layer will be + added for ``rfp_feat``. Otherwise, the structure is the same as + base class. + """ + + def __init__(self, + block, + inplanes, + planes, + num_blocks, + stride=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + downsample_first=True, + rfp_inplanes=None, + **kwargs): + self.block = block + assert downsample_first, f'downsample_first={downsample_first} is ' \ + 'not supported in DetectoRS' + + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = [] + conv_stride = stride + if avg_down and stride != 1: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + rfp_inplanes=rfp_inplanes, + **kwargs)) + inplanes = planes * block.expansion + for _ in range(1, num_blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + + super(ResLayer, self).__init__(*layers) + + +@BACKBONES.register_module() +class DetectoRS_ResNet(ResNet): + """ResNet backbone for DetectoRS. + + Args: + sac (dict, optional): Dictionary to construct SAC (Switchable Atrous + Convolution). Default: None. + stage_with_sac (list): Which stage to use sac. Default: (False, False, + False, False). + rfp_inplanes (int, optional): The number of channels from RFP. + Default: None. If specified, an additional conv layer will be + added for ``rfp_feat``. Otherwise, the structure is the same as + base class. + output_img (bool): If ``True``, the input image will be inserted into + the starting position of output. Default: False. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + sac=None, + stage_with_sac=(False, False, False, False), + rfp_inplanes=None, + output_img=False, + pretrained=None, + init_cfg=None, + **kwargs): + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + self.pretrained = pretrained + if init_cfg is not None: + assert isinstance(init_cfg, dict), \ + f'init_cfg must be a dict, but got {type(init_cfg)}' + if 'type' in init_cfg: + assert init_cfg.get('type') == 'Pretrained', \ + 'Only can initialize module by loading a pretrained model' + else: + raise KeyError('`init_cfg` must contain the key "type"') + self.pretrained = init_cfg.get('checkpoint') + self.sac = sac + self.stage_with_sac = stage_with_sac + self.rfp_inplanes = rfp_inplanes + self.output_img = output_img + super(DetectoRS_ResNet, self).__init__(**kwargs) + + self.inplanes = self.stem_channels + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = self.strides[i] + dilation = self.dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + sac = self.sac if self.stage_with_sac[i] else None + if self.plugins is not None: + stage_plugins = self.make_stage_plugins(self.plugins, i) + else: + stage_plugins = None + planes = self.base_channels * 2**i + res_layer = self.make_res_layer( + block=self.block, + inplanes=self.inplanes, + planes=planes, + num_blocks=num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=dcn, + sac=sac, + rfp_inplanes=rfp_inplanes if i > 0 else None, + plugins=stage_plugins) + self.inplanes = planes * self.block.expansion + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + # In order to be properly initialized by RFP + def init_weights(self): + # Calling this method will cause parameter initialization exception + # super(DetectoRS_ResNet, self).init_weights() + + if isinstance(self.pretrained, str): + logger = get_root_logger() + load_checkpoint(self, self.pretrained, strict=False, logger=logger) + elif self.pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + + if self.dcn is not None: + for m in self.modules(): + if isinstance(m, Bottleneck) and hasattr( + m.conv2, 'conv_offset'): + constant_init(m.conv2.conv_offset, 0) + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + else: + raise TypeError('pretrained must be a str or None') + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer`` for DetectoRS.""" + return ResLayer(**kwargs) + + def forward(self, x): + """Forward function.""" + outs = list(super(DetectoRS_ResNet, self).forward(x)) + if self.output_img: + outs.insert(0, x) + return tuple(outs) + + def rfp_forward(self, x, rfp_feats): + """Forward function for RFP.""" + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + rfp_feat = rfp_feats[i] if i > 0 else None + for layer in res_layer: + x = layer.rfp_forward(x, rfp_feat) + if i in self.out_indices: + outs.append(x) + return tuple(outs) diff --git a/downstream/mmdetection/mmdet/models/backbones/detectors_resnext.py b/downstream/mmdetection/mmdet/models/backbones/detectors_resnext.py new file mode 100644 index 0000000..5e8b20a --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/detectors_resnext.py @@ -0,0 +1,123 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from .detectors_resnet import Bottleneck as _Bottleneck +from .detectors_resnet import DetectoRS_ResNet + + +class Bottleneck(_Bottleneck): + expansion = 4 + + def __init__(self, + inplanes, + planes, + groups=1, + base_width=4, + base_channels=64, + **kwargs): + """Bottleneck block for ResNeXt. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, if + it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__(inplanes, planes, **kwargs) + + if groups == 1: + width = self.planes + else: + width = math.floor(self.planes * + (base_width / base_channels)) * groups + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, width, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.inplanes, + width, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + self.with_modulated_dcn = False + if self.with_dcn: + fallback_on_stride = self.dcn.pop('fallback_on_stride', False) + if self.with_sac: + self.conv2 = build_conv_layer( + self.sac, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + elif not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + self.conv_cfg, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + self.conv2 = build_conv_layer( + self.dcn, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + width, + self.planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@BACKBONES.register_module() +class DetectoRS_ResNeXt(DetectoRS_ResNet): + """ResNeXt backbone for DetectoRS. + + Args: + groups (int): The number of groups in ResNeXt. + base_width (int): The base width of ResNeXt. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, groups=1, base_width=4, **kwargs): + self.groups = groups + self.base_width = base_width + super(DetectoRS_ResNeXt, self).__init__(**kwargs) + + def make_res_layer(self, **kwargs): + return super().make_res_layer( + groups=self.groups, + base_width=self.base_width, + base_channels=self.base_channels, + **kwargs) diff --git a/downstream/mmdetection/mmdet/models/backbones/efficientnet.py b/downstream/mmdetection/mmdet/models/backbones/efficientnet.py new file mode 100644 index 0000000..7ee3595 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/efficientnet.py @@ -0,0 +1,417 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn.bricks import ConvModule, DropPath +from mmcv.runner import BaseModule, Sequential + +from ..builder import BACKBONES +from ..utils import InvertedResidual, SELayer, make_divisible + + +class EdgeResidual(BaseModule): + """Edge Residual Block. + + Args: + in_channels (int): The input channels of this module. + out_channels (int): The output channels of this module. + mid_channels (int): The input channels of the second convolution. + kernel_size (int): The kernel size of the first convolution. + Defaults to 3. + stride (int): The stride of the first convolution. Defaults to 1. + se_cfg (dict, optional): Config dict for se layer. Defaults to None, + which means no se layer. + with_residual (bool): Use residual connection. Defaults to True. + conv_cfg (dict, optional): Config dict for convolution layer. + Defaults to None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='BN')``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='ReLU')``. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict | list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels, + kernel_size=3, + stride=1, + se_cfg=None, + with_residual=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + drop_path_rate=0., + with_cp=False, + init_cfg=None, + **kwargs): + super(EdgeResidual, self).__init__(init_cfg=init_cfg) + assert stride in [1, 2] + self.with_cp = with_cp + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.with_se = se_cfg is not None + self.with_residual = ( + stride == 1 and in_channels == out_channels and with_residual) + + if self.with_se: + assert isinstance(se_cfg, dict) + + self.conv1 = ConvModule( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=1, + padding=kernel_size // 2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + if self.with_se: + self.se = SELayer(**se_cfg) + + self.conv2 = ConvModule( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=stride, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x): + + def _inner_forward(x): + out = x + out = self.conv1(out) + + if self.with_se: + out = self.se(out) + + out = self.conv2(out) + + if self.with_residual: + return x + self.drop_path(out) + else: + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +def model_scaling(layer_setting, arch_setting): + """Scaling operation to the layer's parameters according to the + arch_setting.""" + # scale width + new_layer_setting = copy.deepcopy(layer_setting) + for layer_cfg in new_layer_setting: + for block_cfg in layer_cfg: + block_cfg[1] = make_divisible(block_cfg[1] * arch_setting[0], 8) + + # scale depth + split_layer_setting = [new_layer_setting[0]] + for layer_cfg in new_layer_setting[1:-1]: + tmp_index = [0] + for i in range(len(layer_cfg) - 1): + if layer_cfg[i + 1][1] != layer_cfg[i][1]: + tmp_index.append(i + 1) + tmp_index.append(len(layer_cfg)) + for i in range(len(tmp_index) - 1): + split_layer_setting.append(layer_cfg[tmp_index[i]:tmp_index[i + + 1]]) + split_layer_setting.append(new_layer_setting[-1]) + + num_of_layers = [len(layer_cfg) for layer_cfg in split_layer_setting[1:-1]] + new_layers = [ + int(math.ceil(arch_setting[1] * num)) for num in num_of_layers + ] + + merge_layer_setting = [split_layer_setting[0]] + for i, layer_cfg in enumerate(split_layer_setting[1:-1]): + if new_layers[i] <= num_of_layers[i]: + tmp_layer_cfg = layer_cfg[:new_layers[i]] + else: + tmp_layer_cfg = copy.deepcopy(layer_cfg) + [layer_cfg[-1]] * ( + new_layers[i] - num_of_layers[i]) + if tmp_layer_cfg[0][3] == 1 and i != 0: + merge_layer_setting[-1] += tmp_layer_cfg.copy() + else: + merge_layer_setting.append(tmp_layer_cfg.copy()) + merge_layer_setting.append(split_layer_setting[-1]) + + return merge_layer_setting + + +@BACKBONES.register_module() +class EfficientNet(BaseModule): + """EfficientNet backbone. + + Args: + arch (str): Architecture of efficientnet. Defaults to b0. + out_indices (Sequence[int]): Output from which stages. + Defaults to (6, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. + Defaults to None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='Swish'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + """ + + # Parameters to build layers. + # 'b' represents the architecture of normal EfficientNet family includes + # 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8'. + # 'e' represents the architecture of EfficientNet-EdgeTPU including 'es', + # 'em', 'el'. + # 6 parameters are needed to construct a layer, From left to right: + # - kernel_size: The kernel size of the block + # - out_channel: The number of out_channels of the block + # - se_ratio: The sequeeze ratio of SELayer. + # - stride: The stride of the block + # - expand_ratio: The expand_ratio of the mid_channels + # - block_type: -1: Not a block, 0: InvertedResidual, 1: EdgeResidual + layer_settings = { + 'b': [[[3, 32, 0, 2, 0, -1]], + [[3, 16, 4, 1, 1, 0]], + [[3, 24, 4, 2, 6, 0], + [3, 24, 4, 1, 6, 0]], + [[5, 40, 4, 2, 6, 0], + [5, 40, 4, 1, 6, 0]], + [[3, 80, 4, 2, 6, 0], + [3, 80, 4, 1, 6, 0], + [3, 80, 4, 1, 6, 0], + [5, 112, 4, 1, 6, 0], + [5, 112, 4, 1, 6, 0], + [5, 112, 4, 1, 6, 0]], + [[5, 192, 4, 2, 6, 0], + [5, 192, 4, 1, 6, 0], + [5, 192, 4, 1, 6, 0], + [5, 192, 4, 1, 6, 0], + [3, 320, 4, 1, 6, 0]], + [[1, 1280, 0, 1, 0, -1]] + ], + 'e': [[[3, 32, 0, 2, 0, -1]], + [[3, 24, 0, 1, 3, 1]], + [[3, 32, 0, 2, 8, 1], + [3, 32, 0, 1, 8, 1]], + [[3, 48, 0, 2, 8, 1], + [3, 48, 0, 1, 8, 1], + [3, 48, 0, 1, 8, 1], + [3, 48, 0, 1, 8, 1]], + [[5, 96, 0, 2, 8, 0], + [5, 96, 0, 1, 8, 0], + [5, 96, 0, 1, 8, 0], + [5, 96, 0, 1, 8, 0], + [5, 96, 0, 1, 8, 0], + [5, 144, 0, 1, 8, 0], + [5, 144, 0, 1, 8, 0], + [5, 144, 0, 1, 8, 0], + [5, 144, 0, 1, 8, 0]], + [[5, 192, 0, 2, 8, 0], + [5, 192, 0, 1, 8, 0]], + [[1, 1280, 0, 1, 0, -1]] + ] + } # yapf: disable + + # Parameters to build different kinds of architecture. + # From left to right: scaling factor for width, scaling factor for depth, + # resolution. + arch_settings = { + 'b0': (1.0, 1.0, 224), + 'b1': (1.0, 1.1, 240), + 'b2': (1.1, 1.2, 260), + 'b3': (1.2, 1.4, 300), + 'b4': (1.4, 1.8, 380), + 'b5': (1.6, 2.2, 456), + 'b6': (1.8, 2.6, 528), + 'b7': (2.0, 3.1, 600), + 'b8': (2.2, 3.6, 672), + 'es': (1.0, 1.0, 224), + 'em': (1.0, 1.1, 240), + 'el': (1.2, 1.4, 300) + } + + def __init__(self, + arch='b0', + drop_path_rate=0., + out_indices=(6, ), + frozen_stages=0, + conv_cfg=dict(type='Conv2dAdaptivePadding'), + norm_cfg=dict(type='BN', eps=1e-3), + act_cfg=dict(type='Swish'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + layer=['_BatchNorm', 'GroupNorm'], + val=1) + ]): + super(EfficientNet, self).__init__(init_cfg) + assert arch in self.arch_settings, \ + f'"{arch}" is not one of the arch_settings ' \ + f'({", ".join(self.arch_settings.keys())})' + self.arch_setting = self.arch_settings[arch] + self.layer_setting = self.layer_settings[arch[:1]] + for index in out_indices: + if index not in range(0, len(self.layer_setting)): + raise ValueError('the item in out_indices must in ' + f'range(0, {len(self.layer_setting)}). ' + f'But received {index}') + + if frozen_stages not in range(len(self.layer_setting) + 1): + raise ValueError('frozen_stages must be in range(0, ' + f'{len(self.layer_setting) + 1}). ' + f'But received {frozen_stages}') + self.drop_path_rate = drop_path_rate + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.layer_setting = model_scaling(self.layer_setting, + self.arch_setting) + block_cfg_0 = self.layer_setting[0][0] + block_cfg_last = self.layer_setting[-1][0] + self.in_channels = make_divisible(block_cfg_0[1], 8) + self.out_channels = block_cfg_last[1] + self.layers = nn.ModuleList() + self.layers.append( + ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=block_cfg_0[0], + stride=block_cfg_0[3], + padding=block_cfg_0[0] // 2, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.make_layer() + # Avoid building unused layers in mmdetection. + if len(self.layers) < max(self.out_indices) + 1: + self.layers.append( + ConvModule( + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=block_cfg_last[0], + stride=block_cfg_last[3], + padding=block_cfg_last[0] // 2, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + def make_layer(self): + # Without the first and the final conv block. + layer_setting = self.layer_setting[1:-1] + + total_num_blocks = sum([len(x) for x in layer_setting]) + block_idx = 0 + dpr = [ + x.item() + for x in torch.linspace(0, self.drop_path_rate, total_num_blocks) + ] # stochastic depth decay rule + + for i, layer_cfg in enumerate(layer_setting): + # Avoid building unused layers in mmdetection. + if i > max(self.out_indices) - 1: + break + layer = [] + for i, block_cfg in enumerate(layer_cfg): + (kernel_size, out_channels, se_ratio, stride, expand_ratio, + block_type) = block_cfg + + mid_channels = int(self.in_channels * expand_ratio) + out_channels = make_divisible(out_channels, 8) + if se_ratio <= 0: + se_cfg = None + else: + # In mmdetection, the `divisor` is deleted to align + # the logic of SELayer with mmcls. + se_cfg = dict( + channels=mid_channels, + ratio=expand_ratio * se_ratio, + act_cfg=(self.act_cfg, dict(type='Sigmoid'))) + if block_type == 1: # edge tpu + if i > 0 and expand_ratio == 3: + with_residual = False + expand_ratio = 4 + else: + with_residual = True + mid_channels = int(self.in_channels * expand_ratio) + if se_cfg is not None: + # In mmdetection, the `divisor` is deleted to align + # the logic of SELayer with mmcls. + se_cfg = dict( + channels=mid_channels, + ratio=se_ratio * expand_ratio, + act_cfg=(self.act_cfg, dict(type='Sigmoid'))) + block = partial(EdgeResidual, with_residual=with_residual) + else: + block = InvertedResidual + layer.append( + block( + in_channels=self.in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + se_cfg=se_cfg, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + drop_path_rate=dpr[block_idx], + with_cp=self.with_cp, + # In mmdetection, `with_expand_conv` is set to align + # the logic of InvertedResidual with mmcls. + with_expand_conv=(mid_channels != self.in_channels))) + self.in_channels = out_channels + block_idx += 1 + self.layers.append(Sequential(*layer)) + + def forward(self, x): + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + for i in range(self.frozen_stages): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(EfficientNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() diff --git a/downstream/mmdetection/mmdet/models/backbones/gpvit.py b/downstream/mmdetection/mmdet/models/backbones/gpvit.py new file mode 100644 index 0000000..5228406 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/gpvit.py @@ -0,0 +1,51 @@ +from mmcls.models.backbones import GPViT + +from ..builder import BACKBONES + + + +@BACKBONES.register_module() +class GPViTDet(GPViT): + def __init__(self, + arch='', + img_size=224, + in_channels=3, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + qkv_bias=True, + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + interpolate_mode='bicubic', + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None, + test_cfg=dict(vis_group=False), + convert_syncbn=False, + freeze_patch_embed=False, + **kwargs): + + self.att_with_cp = False + self.group_with_cp = False + + super(GPViTDet, self).__init__( + arch, + img_size, + in_channels, + out_indices, + drop_rate, + drop_path_rate, + qkv_bias, + norm_cfg, + final_norm, + interpolate_mode, + patch_cfg, + layer_cfgs, + init_cfg, + test_cfg, + convert_syncbn, + freeze_patch_embed) + + def dummy(self): + pass + diff --git a/downstream/mmdetection/mmdet/models/backbones/gpvit_adapter.py b/downstream/mmdetection/mmdet/models/backbones/gpvit_adapter.py new file mode 100644 index 0000000..ae3c401 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/gpvit_adapter.py @@ -0,0 +1,320 @@ +# Copyright (c) Shanghai AI Lab. All rights reserved. +import logging +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmdet.models.builder import BACKBONES +from timm.models.layers import DropPath, trunc_normal_ +from torch.nn.init import normal_ + +from mmcls.gpvit_dev.models.backbones.gpvit import GPViT, resize_pos_embed +from .adapter_modules import SpatialPriorModule, InteractionBlock, get_reference_points, MSDeformAttn + +_logger = logging.getLogger(__name__) + + +@BACKBONES.register_module() +class GPViTAdapter(GPViT): + def __init__(self, + pretrain_size=224, + conv_inplane=64, + n_points=4, + deform_num_heads=6, + init_values=0., + interaction_indexes=None, + with_cffn=True, + cffn_ratio=0.25, + deform_ratio=1.0, + add_vit_feature=True, + use_extra_extractor=True, + att_with_cp=False, + group_with_cp=False, + *args, + **kwargs): + + self.att_with_cp = att_with_cp + self.group_with_cp = group_with_cp + + super().__init__(*args, **kwargs) + + self.num_classes = 80 + self.cls_token = None + self.num_block = len(self.layers) + self.pretrain_size = (pretrain_size, pretrain_size) + self.interaction_indexes = interaction_indexes + self.add_vit_feature = add_vit_feature + embed_dim = self.embed_dims + + self.level_embed = nn.Parameter(torch.zeros(3, embed_dim)) + self.spm = SpatialPriorModule(inplanes=conv_inplane,embed_dim=embed_dim) + self.interactions = nn.Sequential(*[ + InteractionBlock_GPViT( + dim=embed_dim, + num_heads=deform_num_heads, + n_points=n_points, + init_values=init_values, + drop_path=self.drop_path_rate, + # norm_layer=self.norm1, + with_cffn=with_cffn, + cffn_ratio=cffn_ratio, + deform_ratio=deform_ratio, + extra_extractor=((True if i == len(interaction_indexes) - 1 else False) and use_extra_extractor), + down_stride=8 + ) + for i in range(len(interaction_indexes)) + ]) + self.up = nn.ConvTranspose2d(embed_dim, embed_dim, 2, 2) + self.ad_norm1 = nn.SyncBatchNorm(embed_dim) + self.ad_norm2 = nn.SyncBatchNorm(embed_dim) + self.ad_norm3 = nn.SyncBatchNorm(embed_dim) + self.ad_norm4 = nn.SyncBatchNorm(embed_dim) + + self.up.apply(self._init_weights) + self.spm.apply(self._init_weights) + self.interactions.apply(self._init_weights) + self.apply(self._init_deform_weights) + normal_(self.level_embed) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm) or isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def _init_deform_weights(self, m): + if isinstance(m, MSDeformAttn): + m._reset_parameters() + + def _get_pos_embed(self, pos_embed, H, W): + pos_embed = pos_embed.reshape( + 1, self.pretrain_size[0] // 16, self.pretrain_size[1] // 16, -1).permute(0, 3, 1, 2) + pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False).\ + reshape(1, -1, H * W).permute(0, 2, 1) + return pos_embed + + def _add_level_embed(self, c2, c3, c4): + c2 = c2 + self.level_embed[0] + c3 = c3 + self.level_embed[1] + c4 = c4 + self.level_embed[2] + return c2, c3, c4 + + def forward(self, x): + deform_inputs1, deform_inputs2 = deform_inputs(x) + + # SPM forward + c1, c2, c3, c4 = self.spm(x) # s4, s8, s16, s32 + c2, c3, c4 = self._add_level_embed(c2, c3, c4) + c = torch.cat([c2, c3, c4], dim=1) + + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + H, W = patch_resolution + bs, n, dim = x.shape + pos_embed = resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=0) + + x = x + pos_embed + x = self.drop_after_pos(x) + + # Interaction + for i, layer in enumerate(self.interactions): + indexes = self.interaction_indexes[i] + x, c = layer(x, c, self.layers[indexes[0]:indexes[-1] + 1], + deform_inputs1, deform_inputs2, patch_resolution) + + # Split & Reshape + c2 = c[:, 0:c2.size(1), :] + c3 = c[:, c2.size(1):c2.size(1) + c3.size(1), :] + c4 = c[:, c2.size(1) + c3.size(1):, :] + + c2 = c2.transpose(1, 2).view(bs, dim, H, W).contiguous() + c3 = c3.transpose(1, 2).view(bs, dim, H // 2, W // 2).contiguous() + c4 = c4.transpose(1, 2).view(bs, dim, H // 4, W // 4).contiguous() + c1 = self.up(c2) + c1 + + if self.add_vit_feature: + x2 = x.transpose(1, 2).view(bs, dim, H, W).contiguous() + x1 = F.interpolate(x2, scale_factor=2, mode='bilinear', align_corners=False) + x3 = F.interpolate(x2, scale_factor=0.5, mode='bilinear', align_corners=False) + x4 = F.interpolate(x2, scale_factor=0.25, mode='bilinear', align_corners=False) + c1, c2, c3, c4 = c1 + x1, c2 + x2, c3 + x3, c4 + x4 + + # Final Norm + f1 = self.ad_norm1(c1) + f2 = self.ad_norm2(c2) + f3 = self.ad_norm3(c3) + f4 = self.ad_norm4(c4) + return [f1, f2, f3, f4] + + +@BACKBONES.register_module() +class GPViTAdapterSingleStage(GPViTAdapter): + def __init__(self, + pretrain_size=224, + conv_inplane=64, + n_points=4, + deform_num_heads=6, + init_values=0., + interaction_indexes=None, + with_cffn=True, + cffn_ratio=0.25, + deform_ratio=1.0, + add_vit_feature=True, + use_extra_extractor=True, + att_with_cp=False, + group_with_cp=False, + *args, + **kwargs): + self.att_with_cp = att_with_cp + self.group_with_cp = group_with_cp + + super(GPViTAdapter, self).__init__(*args, **kwargs) + + self.num_classes = 80 + self.cls_token = None + self.num_block = len(self.layers) + self.pretrain_size = (pretrain_size, pretrain_size) + self.interaction_indexes = interaction_indexes + self.add_vit_feature = add_vit_feature + embed_dim = self.embed_dims + + self.level_embed = nn.Parameter(torch.zeros(3, embed_dim)) + self.spm = SpatialPriorModule(inplanes=conv_inplane, embed_dim=embed_dim, out_c1=False) + self.interactions = nn.Sequential(*[ + InteractionBlock_GPViT( + dim=embed_dim, + num_heads=deform_num_heads, + n_points=n_points, + init_values=init_values, + drop_path=self.drop_path_rate, + # norm_layer=self.norm1, + with_cffn=with_cffn, + cffn_ratio=cffn_ratio, + deform_ratio=deform_ratio, + extra_extractor=((True if i == len(interaction_indexes) - 1 else False) and use_extra_extractor), + down_stride=8 + ) + for i in range(len(interaction_indexes)) + ]) + self.ad_norm2 = nn.SyncBatchNorm(embed_dim) + self.ad_norm3 = nn.SyncBatchNorm(embed_dim) + self.ad_norm4 = nn.SyncBatchNorm(embed_dim) + + self.spm.apply(self._init_weights) + self.interactions.apply(self._init_weights) + self.apply(self._init_deform_weights) + normal_(self.level_embed) + + def forward(self, x): + deform_inputs1, deform_inputs2 = deform_inputs(x) + + # SPM forward + c2, c3, c4 = self.spm(x) # s4, s8, s16, s32 + c2, c3, c4 = self._add_level_embed(c2, c3, c4) + c = torch.cat([c2, c3, c4], dim=1) + + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + H, W = patch_resolution + bs, n, dim = x.shape + pos_embed = resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=0) + + x = x + pos_embed + x = self.drop_after_pos(x) + + # Interaction + for i, layer in enumerate(self.interactions): + indexes = self.interaction_indexes[i] + x, c = layer(x, c, self.layers[indexes[0]:indexes[-1] + 1], + deform_inputs1, deform_inputs2, patch_resolution) + + # Split & Reshape + c2 = c[:, 0:c2.size(1), :] + c3 = c[:, c2.size(1):c2.size(1) + c3.size(1), :] + c4 = c[:, c2.size(1) + c3.size(1):, :] + + c2 = c2.transpose(1, 2).view(bs, dim, H, W).contiguous() + c3 = c3.transpose(1, 2).view(bs, dim, H // 2, W // 2).contiguous() + c4 = c4.transpose(1, 2).view(bs, dim, H // 4, W // 4).contiguous() + + if self.add_vit_feature: + x2 = x.transpose(1, 2).view(bs, dim, H, W).contiguous() + x3 = F.interpolate(x2, scale_factor=0.5, mode='bilinear', align_corners=False) + x4 = F.interpolate(x2, scale_factor=0.25, mode='bilinear', align_corners=False) + c2, c3, c4 = c2 + x2, c3 + x3, c4 + x4 + + # Final Norm + f2 = self.ad_norm2(c2) + f3 = self.ad_norm3(c3) + f4 = self.ad_norm4(c4) + return [f2, f3, f4] + +class InteractionBlock_GPViT(InteractionBlock): + def forward(self, x, c, blocks, deform_inputs1, deform_inputs2, patch_resolution): + H, W = patch_resolution + + x = self.injector(query=x, + reference_points=deform_inputs1[0], + feat=c, + spatial_shapes=deform_inputs1[1], + level_start_index=deform_inputs1[2]) + for idx, blk in enumerate(blocks): + x = blk(x, patch_resolution) + + c = self.extractor(query=c, + reference_points=deform_inputs2[0], + feat=x, + spatial_shapes=deform_inputs2[1], + level_start_index=deform_inputs2[2], + H=H, W=W) + if self.extra_extractors is not None: + for extractor in self.extra_extractors: + c = extractor(query=c, + reference_points=deform_inputs2[0], + feat=x, + spatial_shapes=deform_inputs2[1], + level_start_index=deform_inputs2[2], + H=H, W=W) + return x, c + + +def deform_inputs(x): + bs, c, h, w = x.shape + spatial_shapes = torch.as_tensor([(h // 8, w // 8), + (h // 16, w // 16), + (h // 32, w // 32)], + dtype=torch.long, device=x.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + reference_points = get_reference_points([(h // 8, w // 8)], x.device) + deform_inputs1 = [reference_points, spatial_shapes, level_start_index] + + spatial_shapes = torch.as_tensor([(h // 8, w // 8)], dtype=torch.long, device=x.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + reference_points = get_reference_points([(h // 8, w // 8), + (h // 16, w // 16), + (h // 32, w // 32)], x.device) + deform_inputs2 = [reference_points, spatial_shapes, level_start_index] + return deform_inputs1, deform_inputs2 diff --git a/downstream/mmdetection/mmdet/models/backbones/hourglass.py b/downstream/mmdetection/mmdet/models/backbones/hourglass.py new file mode 100644 index 0000000..f0dfb43 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/hourglass.py @@ -0,0 +1,222 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule + +from ..builder import BACKBONES +from ..utils import ResLayer +from .resnet import BasicBlock + + +class HourglassModule(BaseModule): + """Hourglass Module for HourglassNet backbone. + + Generate module recursively and use BasicBlock as the base unit. + + Args: + depth (int): Depth of current HourglassModule. + stage_channels (list[int]): Feature channels of sub-modules in current + and follow-up HourglassModule. + stage_blocks (list[int]): Number of sub-modules stacked in current and + follow-up HourglassModule. + norm_cfg (dict): Dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + upsample_cfg (dict, optional): Config dict for interpolate layer. + Default: `dict(mode='nearest')` + """ + + def __init__(self, + depth, + stage_channels, + stage_blocks, + norm_cfg=dict(type='BN', requires_grad=True), + init_cfg=None, + upsample_cfg=dict(mode='nearest')): + super(HourglassModule, self).__init__(init_cfg) + + self.depth = depth + + cur_block = stage_blocks[0] + next_block = stage_blocks[1] + + cur_channel = stage_channels[0] + next_channel = stage_channels[1] + + self.up1 = ResLayer( + BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg) + + self.low1 = ResLayer( + BasicBlock, + cur_channel, + next_channel, + cur_block, + stride=2, + norm_cfg=norm_cfg) + + if self.depth > 1: + self.low2 = HourglassModule(depth - 1, stage_channels[1:], + stage_blocks[1:]) + else: + self.low2 = ResLayer( + BasicBlock, + next_channel, + next_channel, + next_block, + norm_cfg=norm_cfg) + + self.low3 = ResLayer( + BasicBlock, + next_channel, + cur_channel, + cur_block, + norm_cfg=norm_cfg, + downsample_first=False) + + self.up2 = F.interpolate + self.upsample_cfg = upsample_cfg + + def forward(self, x): + """Forward function.""" + up1 = self.up1(x) + low1 = self.low1(x) + low2 = self.low2(low1) + low3 = self.low3(low2) + # Fixing `scale factor` (e.g. 2) is common for upsampling, but + # in some cases the spatial size is mismatched and error will arise. + if 'scale_factor' in self.upsample_cfg: + up2 = self.up2(low3, **self.upsample_cfg) + else: + shape = up1.shape[2:] + up2 = self.up2(low3, size=shape, **self.upsample_cfg) + return up1 + up2 + + +@BACKBONES.register_module() +class HourglassNet(BaseModule): + """HourglassNet backbone. + + Stacked Hourglass Networks for Human Pose Estimation. + More details can be found in the `paper + `_ . + + Args: + downsample_times (int): Downsample times in a HourglassModule. + num_stacks (int): Number of HourglassModule modules stacked, + 1 for Hourglass-52, 2 for Hourglass-104. + stage_channels (list[int]): Feature channel of each sub-module in a + HourglassModule. + stage_blocks (list[int]): Number of sub-modules stacked in a + HourglassModule. + feat_channel (int): Feature channel of conv after a HourglassModule. + norm_cfg (dict): Dictionary to construct and config norm layer. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Example: + >>> from mmdet.models import HourglassNet + >>> import torch + >>> self = HourglassNet() + >>> self.eval() + >>> inputs = torch.rand(1, 3, 511, 511) + >>> level_outputs = self.forward(inputs) + >>> for level_output in level_outputs: + ... print(tuple(level_output.shape)) + (1, 256, 128, 128) + (1, 256, 128, 128) + """ + + def __init__(self, + downsample_times=5, + num_stacks=2, + stage_channels=(256, 256, 384, 384, 384, 512), + stage_blocks=(2, 2, 2, 2, 2, 4), + feat_channel=256, + norm_cfg=dict(type='BN', requires_grad=True), + pretrained=None, + init_cfg=None): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super(HourglassNet, self).__init__(init_cfg) + + self.num_stacks = num_stacks + assert self.num_stacks >= 1 + assert len(stage_channels) == len(stage_blocks) + assert len(stage_channels) > downsample_times + + cur_channel = stage_channels[0] + + self.stem = nn.Sequential( + ConvModule( + 3, cur_channel // 2, 7, padding=3, stride=2, + norm_cfg=norm_cfg), + ResLayer( + BasicBlock, + cur_channel // 2, + cur_channel, + 1, + stride=2, + norm_cfg=norm_cfg)) + + self.hourglass_modules = nn.ModuleList([ + HourglassModule(downsample_times, stage_channels, stage_blocks) + for _ in range(num_stacks) + ]) + + self.inters = ResLayer( + BasicBlock, + cur_channel, + cur_channel, + num_stacks - 1, + norm_cfg=norm_cfg) + + self.conv1x1s = nn.ModuleList([ + ConvModule( + cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) + for _ in range(num_stacks - 1) + ]) + + self.out_convs = nn.ModuleList([ + ConvModule( + cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) + for _ in range(num_stacks) + ]) + + self.remap_convs = nn.ModuleList([ + ConvModule( + feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) + for _ in range(num_stacks - 1) + ]) + + self.relu = nn.ReLU(inplace=True) + + def init_weights(self): + """Init module weights.""" + # Training Centripetal Model needs to reset parameters for Conv2d + super(HourglassNet, self).init_weights() + for m in self.modules(): + if isinstance(m, nn.Conv2d): + m.reset_parameters() + + def forward(self, x): + """Forward function.""" + inter_feat = self.stem(x) + out_feats = [] + + for ind in range(self.num_stacks): + single_hourglass = self.hourglass_modules[ind] + out_conv = self.out_convs[ind] + + hourglass_feat = single_hourglass(inter_feat) + out_feat = out_conv(hourglass_feat) + out_feats.append(out_feat) + + if ind < self.num_stacks - 1: + inter_feat = self.conv1x1s[ind]( + inter_feat) + self.remap_convs[ind]( + out_feat) + inter_feat = self.inters[ind](self.relu(inter_feat)) + + return out_feats diff --git a/downstream/mmdetection/mmdet/models/backbones/hrnet.py b/downstream/mmdetection/mmdet/models/backbones/hrnet.py new file mode 100644 index 0000000..06c210a --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/hrnet.py @@ -0,0 +1,589 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.runner import BaseModule, ModuleList, Sequential +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES +from .resnet import BasicBlock, Bottleneck + + +class HRModule(BaseModule): + """High-Resolution Module for HRNet. + + In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange + is in this module. + """ + + def __init__(self, + num_branches, + blocks, + num_blocks, + in_channels, + num_channels, + multiscale_output=True, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + block_init_cfg=None, + init_cfg=None): + super(HRModule, self).__init__(init_cfg) + self.block_init_cfg = block_init_cfg + self._check_branches(num_branches, num_blocks, in_channels, + num_channels) + + self.in_channels = in_channels + self.num_branches = num_branches + + self.multiscale_output = multiscale_output + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + self.with_cp = with_cp + self.branches = self._make_branches(num_branches, blocks, num_blocks, + num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(inplace=False) + + def _check_branches(self, num_branches, num_blocks, in_channels, + num_channels): + if num_branches != len(num_blocks): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_BLOCKS({len(num_blocks)})' + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_CHANNELS({len(num_channels)})' + raise ValueError(error_msg) + + if num_branches != len(in_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_INCHANNELS({len(in_channels)})' + raise ValueError(error_msg) + + def _make_one_branch(self, + branch_index, + block, + num_blocks, + num_channels, + stride=1): + downsample = None + if stride != 1 or \ + self.in_channels[branch_index] != \ + num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + self.in_channels[branch_index], + num_channels[branch_index] * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(self.norm_cfg, num_channels[branch_index] * + block.expansion)[1]) + + layers = [] + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index], + stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + init_cfg=self.block_init_cfg)) + self.in_channels[branch_index] = \ + num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index], + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + init_cfg=self.block_init_cfg)) + + return Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels)) + + return ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return None + + num_branches = self.num_branches + in_channels = self.in_channels + fuse_layers = [] + num_out_branches = num_branches if self.multiscale_output else 1 + for i in range(num_out_branches): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, in_channels[i])[1], + nn.Upsample( + scale_factor=2**(j - i), mode='nearest'))) + elif j == i: + fuse_layer.append(None) + else: + conv_downsamples = [] + for k in range(i - j): + if k == i - j - 1: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[i])[1])) + else: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + nn.ReLU(inplace=False))) + fuse_layer.append(nn.Sequential(*conv_downsamples)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def forward(self, x): + """Forward function.""" + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + for i in range(len(self.fuse_layers)): + y = 0 + for j in range(self.num_branches): + if i == j: + y += x[j] + else: + y += self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + return x_fuse + + +@BACKBONES.register_module() +class HRNet(BaseModule): + """HRNet backbone. + + `High-Resolution Representations for Labeling Pixels and Regions + arXiv: `_. + + Args: + extra (dict): Detailed configuration for each stage of HRNet. + There must be 4 stages, the configuration for each stage must have + 5 keys: + + - num_modules(int): The number of HRModule in this stage. + - num_branches(int): The number of branches in the HRModule. + - block(str): The type of convolution block. + - num_blocks(tuple): The number of blocks in each branch. + The length must be equal to num_branches. + - num_channels(tuple): The number of channels in each branch. + The length must be equal to num_branches. + in_channels (int): Number of input image channels. Default: 3. + conv_cfg (dict): Dictionary to construct and config conv layer. + norm_cfg (dict): Dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: True. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: False. + multiscale_output (bool): Whether to output multi-level features + produced by multiple branches. If False, only the first level + feature will be output. Default: True. + pretrained (str, optional): Model pretrained path. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + + Example: + >>> from mmdet.models import HRNet + >>> import torch + >>> extra = dict( + >>> stage1=dict( + >>> num_modules=1, + >>> num_branches=1, + >>> block='BOTTLENECK', + >>> num_blocks=(4, ), + >>> num_channels=(64, )), + >>> stage2=dict( + >>> num_modules=1, + >>> num_branches=2, + >>> block='BASIC', + >>> num_blocks=(4, 4), + >>> num_channels=(32, 64)), + >>> stage3=dict( + >>> num_modules=4, + >>> num_branches=3, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4), + >>> num_channels=(32, 64, 128)), + >>> stage4=dict( + >>> num_modules=3, + >>> num_branches=4, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4, 4), + >>> num_channels=(32, 64, 128, 256))) + >>> self = HRNet(extra, in_channels=1) + >>> self.eval() + >>> inputs = torch.rand(1, 1, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 32, 8, 8) + (1, 64, 4, 4) + (1, 128, 2, 2) + (1, 256, 1, 1) + """ + + blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} + + def __init__(self, + extra, + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN'), + norm_eval=True, + with_cp=False, + zero_init_residual=False, + multiscale_output=True, + pretrained=None, + init_cfg=None): + super(HRNet, self).__init__(init_cfg) + + self.pretrained = pretrained + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + else: + raise TypeError('pretrained must be a str or None') + + # Assert configurations of 4 stages are in extra + assert 'stage1' in extra and 'stage2' in extra \ + and 'stage3' in extra and 'stage4' in extra + # Assert whether the length of `num_blocks` and `num_channels` are + # equal to `num_branches` + for i in range(4): + cfg = extra[f'stage{i + 1}'] + assert len(cfg['num_blocks']) == cfg['num_branches'] and \ + len(cfg['num_channels']) == cfg['num_branches'] + + self.extra = extra + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + self.zero_init_residual = zero_init_residual + + # stem net + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) + + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + 64, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.relu = nn.ReLU(inplace=True) + + # stage 1 + self.stage1_cfg = self.extra['stage1'] + num_channels = self.stage1_cfg['num_channels'][0] + block_type = self.stage1_cfg['block'] + num_blocks = self.stage1_cfg['num_blocks'][0] + + block = self.blocks_dict[block_type] + stage1_out_channels = num_channels * block.expansion + self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) + + # stage 2 + self.stage2_cfg = self.extra['stage2'] + num_channels = self.stage2_cfg['num_channels'] + block_type = self.stage2_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [channel * block.expansion for channel in num_channels] + self.transition1 = self._make_transition_layer([stage1_out_channels], + num_channels) + self.stage2, pre_stage_channels = self._make_stage( + self.stage2_cfg, num_channels) + + # stage 3 + self.stage3_cfg = self.extra['stage3'] + num_channels = self.stage3_cfg['num_channels'] + block_type = self.stage3_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [channel * block.expansion for channel in num_channels] + self.transition2 = self._make_transition_layer(pre_stage_channels, + num_channels) + self.stage3, pre_stage_channels = self._make_stage( + self.stage3_cfg, num_channels) + + # stage 4 + self.stage4_cfg = self.extra['stage4'] + num_channels = self.stage4_cfg['num_channels'] + block_type = self.stage4_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [channel * block.expansion for channel in num_channels] + self.transition3 = self._make_transition_layer(pre_stage_channels, + num_channels) + self.stage4, pre_stage_channels = self._make_stage( + self.stage4_cfg, num_channels, multiscale_output=multiscale_output) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + def _make_transition_layer(self, num_channels_pre_layer, + num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_cur_layer[i], + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_cur_layer[i])[1], + nn.ReLU(inplace=True))) + else: + transition_layers.append(None) + else: + conv_downsamples = [] + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] \ + if j == i - num_branches_pre else in_channels + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1], + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv_downsamples)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(self.norm_cfg, planes * block.expansion)[1]) + + layers = [] + block_init_cfg = None + if self.pretrained is None and not hasattr( + self, 'init_cfg') and self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm3')) + layers.append( + block( + inplanes, + planes, + stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + init_cfg=block_init_cfg, + )) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + inplanes, + planes, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + init_cfg=block_init_cfg)) + + return Sequential(*layers) + + def _make_stage(self, layer_config, in_channels, multiscale_output=True): + num_modules = layer_config['num_modules'] + num_branches = layer_config['num_branches'] + num_blocks = layer_config['num_blocks'] + num_channels = layer_config['num_channels'] + block = self.blocks_dict[layer_config['block']] + + hr_modules = [] + block_init_cfg = None + if self.pretrained is None and not hasattr( + self, 'init_cfg') and self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm3')) + + for i in range(num_modules): + # multi_scale_output is only used for the last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + hr_modules.append( + HRModule( + num_branches, + block, + num_blocks, + in_channels, + num_channels, + reset_multiscale_output, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + block_init_cfg=block_init_cfg)) + + return Sequential(*hr_modules), in_channels + + def forward(self, x): + """Forward function.""" + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg['num_branches']): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg['num_branches']): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg['num_branches']): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + return y_list + + def train(self, mode=True): + """Convert the model into training mode will keeping the normalization + layer freezed.""" + super(HRNet, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/downstream/mmdetection/mmdet/models/backbones/mobilenet_v2.py b/downstream/mmdetection/mmdet/models/backbones/mobilenet_v2.py new file mode 100644 index 0000000..8c6fcfa --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/mobilenet_v2.py @@ -0,0 +1,197 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES +from ..utils import InvertedResidual, make_divisible + + +@BACKBONES.register_module() +class MobileNetV2(BaseModule): + """MobileNetV2 backbone. + + Args: + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int], optional): Output from which stages. + Default: (1, 2, 4, 7). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + # Parameters to build layers. 4 parameters are needed to construct a + # layer, from left to right: expand_ratio, channel, num_blocks, stride. + arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], + [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], + [6, 320, 1, 1]] + + def __init__(self, + widen_factor=1., + out_indices=(1, 2, 4, 7), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + norm_eval=False, + with_cp=False, + pretrained=None, + init_cfg=None): + super(MobileNetV2, self).__init__(init_cfg) + + self.pretrained = pretrained + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + else: + raise TypeError('pretrained must be a str or None') + + self.widen_factor = widen_factor + self.out_indices = out_indices + if not set(out_indices).issubset(set(range(0, 8))): + raise ValueError('out_indices must be a subset of range' + f'(0, 8). But received {out_indices}') + + if frozen_stages not in range(-1, 8): + raise ValueError('frozen_stages must be in range(-1, 8). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.in_channels = make_divisible(32 * widen_factor, 8) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.layers = [] + + for i, layer_cfg in enumerate(self.arch_settings): + expand_ratio, channel, num_blocks, stride = layer_cfg + out_channels = make_divisible(channel * widen_factor, 8) + inverted_res_layer = self.make_layer( + out_channels=out_channels, + num_blocks=num_blocks, + stride=stride, + expand_ratio=expand_ratio) + layer_name = f'layer{i + 1}' + self.add_module(layer_name, inverted_res_layer) + self.layers.append(layer_name) + + if widen_factor > 1.0: + self.out_channel = int(1280 * widen_factor) + else: + self.out_channel = 1280 + + layer = ConvModule( + in_channels=self.in_channels, + out_channels=self.out_channel, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.add_module('conv2', layer) + self.layers.append('conv2') + + def make_layer(self, out_channels, num_blocks, stride, expand_ratio): + """Stack InvertedResidual blocks to build a layer for MobileNetV2. + + Args: + out_channels (int): out_channels of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + expand_ratio (int): Expand the number of channels of the + hidden layer in InvertedResidual by this ratio. Default: 6. + """ + layers = [] + for i in range(num_blocks): + if i >= 1: + stride = 1 + layers.append( + InvertedResidual( + self.in_channels, + out_channels, + mid_channels=int(round(self.in_channels * expand_ratio)), + stride=stride, + with_expand_conv=expand_ratio != 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def forward(self, x): + """Forward function.""" + x = self.conv1(x) + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + frozen.""" + super(MobileNetV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/downstream/mmdetection/mmdet/models/backbones/pvt.py b/downstream/mmdetection/mmdet/models/backbones/pvt.py new file mode 100644 index 0000000..8b7d5d5 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/pvt.py @@ -0,0 +1,591 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import (Conv2d, build_activation_layer, build_norm_layer, + constant_init, normal_init, trunc_normal_init) +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.transformer import MultiheadAttention +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner import (BaseModule, ModuleList, Sequential, _load_checkpoint, + load_state_dict) +from torch.nn.modules.utils import _pair as to_2tuple + +from ...utils import get_root_logger +from ..builder import BACKBONES +from ..utils import PatchEmbed, nchw_to_nlc, nlc_to_nchw, pvt_convert + + +class MixFFN(BaseModule): + """An implementation of MixFFN of PVT. + + The differences between MixFFN & FFN: + 1. Use 1X1 Conv to replace Linear layer. + 2. Introduce 3X3 Depth-wise Conv to encode positional information. + + Args: + embed_dims (int): The feature dimension. Same as + `MultiheadAttention`. + feedforward_channels (int): The hidden dimension of FFNs. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='GELU'). + ffn_drop (float, optional): Probability of an element to be + zeroed in FFN. Default 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + Default: None. + use_conv (bool): If True, add 3x3 DWConv between two Linear layers. + Defaults: False. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + feedforward_channels, + act_cfg=dict(type='GELU'), + ffn_drop=0., + dropout_layer=None, + use_conv=False, + init_cfg=None): + super(MixFFN, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.act_cfg = act_cfg + activate = build_activation_layer(act_cfg) + + in_channels = embed_dims + fc1 = Conv2d( + in_channels=in_channels, + out_channels=feedforward_channels, + kernel_size=1, + stride=1, + bias=True) + if use_conv: + # 3x3 depth wise conv to provide positional encode information + dw_conv = Conv2d( + in_channels=feedforward_channels, + out_channels=feedforward_channels, + kernel_size=3, + stride=1, + padding=(3 - 1) // 2, + bias=True, + groups=feedforward_channels) + fc2 = Conv2d( + in_channels=feedforward_channels, + out_channels=in_channels, + kernel_size=1, + stride=1, + bias=True) + drop = nn.Dropout(ffn_drop) + layers = [fc1, activate, drop, fc2, drop] + if use_conv: + layers.insert(1, dw_conv) + self.layers = Sequential(*layers) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else torch.nn.Identity() + + def forward(self, x, hw_shape, identity=None): + out = nlc_to_nchw(x, hw_shape) + out = self.layers(out) + out = nchw_to_nlc(out) + if identity is None: + identity = x + return identity + self.dropout_layer(out) + + +class SpatialReductionAttention(MultiheadAttention): + """An implementation of Spatial Reduction Attention of PVT. + + This module is modified from MultiheadAttention which is a module from + mmcv.cnn.bricks.transformer. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. Default: None. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default: False. + qkv_bias (bool): enable bias for qkv if True. Default: True. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (int): The ratio of spatial reduction of Spatial Reduction + Attention of PVT. Default: 1. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=None, + batch_first=True, + qkv_bias=True, + norm_cfg=dict(type='LN'), + sr_ratio=1, + init_cfg=None): + super().__init__( + embed_dims, + num_heads, + attn_drop, + proj_drop, + batch_first=batch_first, + dropout_layer=dropout_layer, + bias=qkv_bias, + init_cfg=init_cfg) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = Conv2d( + in_channels=embed_dims, + out_channels=embed_dims, + kernel_size=sr_ratio, + stride=sr_ratio) + # The ret[0] of build_norm_layer is norm name. + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + + # handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa + from mmdet import digit_version, mmcv_version + if mmcv_version < digit_version('1.3.17'): + warnings.warn('The legacy version of forward function in' + 'SpatialReductionAttention is deprecated in' + 'mmcv>=1.3.17 and will no longer support in the' + 'future. Please upgrade your mmcv.') + self.forward = self.legacy_forward + + def forward(self, x, hw_shape, identity=None): + + x_q = x + if self.sr_ratio > 1: + x_kv = nlc_to_nchw(x, hw_shape) + x_kv = self.sr(x_kv) + x_kv = nchw_to_nlc(x_kv) + x_kv = self.norm(x_kv) + else: + x_kv = x + + if identity is None: + identity = x_q + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + x_q = x_q.transpose(0, 1) + x_kv = x_kv.transpose(0, 1) + + out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] + + if self.batch_first: + out = out.transpose(0, 1) + + return identity + self.dropout_layer(self.proj_drop(out)) + + def legacy_forward(self, x, hw_shape, identity=None): + """multi head attention forward in mmcv version < 1.3.17.""" + x_q = x + if self.sr_ratio > 1: + x_kv = nlc_to_nchw(x, hw_shape) + x_kv = self.sr(x_kv) + x_kv = nchw_to_nlc(x_kv) + x_kv = self.norm(x_kv) + else: + x_kv = x + + if identity is None: + identity = x_q + + out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] + + return identity + self.dropout_layer(self.proj_drop(out)) + + +class PVTEncoderLayer(BaseModule): + """Implements one encoder layer in PVT. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed. + after the feed forward layer. Default: 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default: 0.0. + drop_path_rate (float): stochastic depth rate. Default: 0.0. + qkv_bias (bool): enable bias for qkv if True. + Default: True. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (int): The ratio of spatial reduction of Spatial Reduction + Attention of PVT. Default: 1. + use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. + Default: False. + init_cfg (dict, optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + sr_ratio=1, + use_conv_ffn=False, + init_cfg=None): + super(PVTEncoderLayer, self).__init__(init_cfg=init_cfg) + + # The ret[0] of build_norm_layer is norm name. + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + + self.attn = SpatialReductionAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + sr_ratio=sr_ratio) + + # The ret[0] of build_norm_layer is norm name. + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + + self.ffn = MixFFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + use_conv=use_conv_ffn, + act_cfg=act_cfg) + + def forward(self, x, hw_shape): + x = self.attn(self.norm1(x), hw_shape, identity=x) + x = self.ffn(self.norm2(x), hw_shape, identity=x) + + return x + + +class AbsolutePositionEmbedding(BaseModule): + """An implementation of the absolute position embedding in PVT. + + Args: + pos_shape (int): The shape of the absolute position embedding. + pos_dim (int): The dimension of the absolute position embedding. + drop_rate (float): Probability of an element to be zeroed. + Default: 0.0. + """ + + def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(pos_shape, int): + pos_shape = to_2tuple(pos_shape) + elif isinstance(pos_shape, tuple): + if len(pos_shape) == 1: + pos_shape = to_2tuple(pos_shape[0]) + assert len(pos_shape) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(pos_shape)}' + self.pos_shape = pos_shape + self.pos_dim = pos_dim + + self.pos_embed = nn.Parameter( + torch.zeros(1, pos_shape[0] * pos_shape[1], pos_dim)) + self.drop = nn.Dropout(p=drop_rate) + + def init_weights(self): + trunc_normal_(self.pos_embed, std=0.02) + + def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'): + """Resize pos_embed weights. + + Resize pos_embed using bilinear interpolate method. + + Args: + pos_embed (torch.Tensor): Position embedding weights. + input_shape (tuple): Tuple for (downsampled input image height, + downsampled input image width). + mode (str): Algorithm used for upsampling: + ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | + ``'trilinear'``. Default: ``'bilinear'``. + + Return: + torch.Tensor: The resized pos_embed of shape [B, L_new, C]. + """ + assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]' + pos_h, pos_w = self.pos_shape + pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):] + pos_embed_weight = pos_embed_weight.reshape( + 1, pos_h, pos_w, self.pos_dim).permute(0, 3, 1, 2).contiguous() + pos_embed_weight = F.interpolate( + pos_embed_weight, size=input_shape, mode=mode) + pos_embed_weight = torch.flatten(pos_embed_weight, + 2).transpose(1, 2).contiguous() + pos_embed = pos_embed_weight + + return pos_embed + + def forward(self, x, hw_shape, mode='bilinear'): + pos_embed = self.resize_pos_embed(self.pos_embed, hw_shape, mode) + return self.drop(x + pos_embed) + + +@BACKBONES.register_module() +class PyramidVisionTransformer(BaseModule): + """Pyramid Vision Transformer (PVT) + + Implementation of `Pyramid Vision Transformer: A Versatile Backbone for + Dense Prediction without Convolutions + `_. + + Args: + pretrain_img_size (int | tuple[int]): The size of input image when + pretrain. Defaults: 224. + in_channels (int): Number of input channels. Default: 3. + embed_dims (int): Embedding dimension. Default: 64. + num_stags (int): The num of stages. Default: 4. + num_layers (Sequence[int]): The layer number of each transformer encode + layer. Default: [3, 4, 6, 3]. + num_heads (Sequence[int]): The attention heads of each transformer + encode layer. Default: [1, 2, 5, 8]. + patch_sizes (Sequence[int]): The patch_size of each patch embedding. + Default: [4, 2, 2, 2]. + strides (Sequence[int]): The stride of each patch embedding. + Default: [4, 2, 2, 2]. + paddings (Sequence[int]): The padding of each patch embedding. + Default: [0, 0, 0, 0]. + sr_ratios (Sequence[int]): The spatial reduction rate of each + transformer encode layer. Default: [8, 4, 2, 1]. + out_indices (Sequence[int] | int): Output from which stages. + Default: (0, 1, 2, 3). + mlp_ratios (Sequence[int]): The ratio of the mlp hidden dim to the + embedding dim of each transformer encode layer. + Default: [8, 8, 4, 4]. + qkv_bias (bool): Enable bias for qkv if True. Default: True. + drop_rate (float): Probability of an element to be zeroed. + Default 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0. + drop_path_rate (float): stochastic depth rate. Default 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults: True. + use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. + Default: False. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + pretrained (str, optional): model pretrained path. Default: None. + convert_weights (bool): The flag indicates whether the + pre-trained model is from the original repo. We may need + to convert some keys to make it compatible. + Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + pretrain_img_size=224, + in_channels=3, + embed_dims=64, + num_stages=4, + num_layers=[3, 4, 6, 3], + num_heads=[1, 2, 5, 8], + patch_sizes=[4, 2, 2, 2], + strides=[4, 2, 2, 2], + paddings=[0, 0, 0, 0], + sr_ratios=[8, 4, 2, 1], + out_indices=(0, 1, 2, 3), + mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + use_abs_pos_embed=True, + norm_after_stage=False, + use_conv_ffn=False, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN', eps=1e-6), + pretrained=None, + convert_weights=True, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.convert_weights = convert_weights + if isinstance(pretrain_img_size, int): + pretrain_img_size = to_2tuple(pretrain_img_size) + elif isinstance(pretrain_img_size, tuple): + if len(pretrain_img_size) == 1: + pretrain_img_size = to_2tuple(pretrain_img_size[0]) + assert len(pretrain_img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(pretrain_img_size)}' + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + self.init_cfg = init_cfg + else: + raise TypeError('pretrained must be a str or None') + + self.embed_dims = embed_dims + + self.num_stages = num_stages + self.num_layers = num_layers + self.num_heads = num_heads + self.patch_sizes = patch_sizes + self.strides = strides + self.sr_ratios = sr_ratios + assert num_stages == len(num_layers) == len(num_heads) \ + == len(patch_sizes) == len(strides) == len(sr_ratios) + + self.out_indices = out_indices + assert max(out_indices) < self.num_stages + self.pretrained = pretrained + + # transformer encoder + dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, sum(num_layers)) + ] # stochastic num_layer decay rule + + cur = 0 + self.layers = ModuleList() + for i, num_layer in enumerate(num_layers): + embed_dims_i = embed_dims * num_heads[i] + patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims_i, + kernel_size=patch_sizes[i], + stride=strides[i], + padding=paddings[i], + bias=True, + norm_cfg=norm_cfg) + + layers = ModuleList() + if use_abs_pos_embed: + pos_shape = pretrain_img_size // np.prod(patch_sizes[:i + 1]) + pos_embed = AbsolutePositionEmbedding( + pos_shape=pos_shape, + pos_dim=embed_dims_i, + drop_rate=drop_rate) + layers.append(pos_embed) + layers.extend([ + PVTEncoderLayer( + embed_dims=embed_dims_i, + num_heads=num_heads[i], + feedforward_channels=mlp_ratios[i] * embed_dims_i, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[cur + idx], + qkv_bias=qkv_bias, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + sr_ratio=sr_ratios[i], + use_conv_ffn=use_conv_ffn) for idx in range(num_layer) + ]) + in_channels = embed_dims_i + # The ret[0] of build_norm_layer is norm name. + if norm_after_stage: + norm = build_norm_layer(norm_cfg, embed_dims_i)[1] + else: + norm = nn.Identity() + self.layers.append(ModuleList([patch_embed, layers, norm])) + cur += num_layer + + def init_weights(self): + logger = get_root_logger() + if self.init_cfg is None: + logger.warn(f'No pre-trained weights for ' + f'{self.__class__.__name__}, ' + f'training start from scratch') + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=.02, bias=0.) + elif isinstance(m, nn.LayerNorm): + constant_init(m, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[ + 1] * m.out_channels + fan_out //= m.groups + normal_init(m, 0, math.sqrt(2.0 / fan_out)) + elif isinstance(m, AbsolutePositionEmbedding): + m.init_weights() + else: + assert 'checkpoint' in self.init_cfg, f'Only support ' \ + f'specify `Pretrained` in ' \ + f'`init_cfg` in ' \ + f'{self.__class__.__name__} ' + checkpoint = _load_checkpoint( + self.init_cfg.checkpoint, logger=logger, map_location='cpu') + logger.warn(f'Load pre-trained model for ' + f'{self.__class__.__name__} from original repo') + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + if self.convert_weights: + # Because pvt backbones are not supported by mmcls, + # so we need to convert pre-trained weights to match this + # implementation. + state_dict = pvt_convert(state_dict) + load_state_dict(self, state_dict, strict=False, logger=logger) + + def forward(self, x): + outs = [] + + for i, layer in enumerate(self.layers): + x, hw_shape = layer[0](x) + + for block in layer[1]: + x = block(x, hw_shape) + x = layer[2](x) + x = nlc_to_nchw(x, hw_shape) + if i in self.out_indices: + outs.append(x) + + return outs + + +@BACKBONES.register_module() +class PyramidVisionTransformerV2(PyramidVisionTransformer): + """Implementation of `PVTv2: Improved Baselines with Pyramid Vision + Transformer `_.""" + + def __init__(self, **kwargs): + super(PyramidVisionTransformerV2, self).__init__( + patch_sizes=[7, 3, 3, 3], + paddings=[3, 1, 1, 1], + use_abs_pos_embed=False, + norm_after_stage=True, + use_conv_ffn=True, + **kwargs) diff --git a/downstream/mmdetection/mmdet/models/backbones/regnet.py b/downstream/mmdetection/mmdet/models/backbones/regnet.py new file mode 100644 index 0000000..63adc3c --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/regnet.py @@ -0,0 +1,356 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from .resnet import ResNet +from .resnext import Bottleneck + + +@BACKBONES.register_module() +class RegNet(ResNet): + """RegNet backbone. + + More details can be found in `paper `_ . + + Args: + arch (dict): The parameter of RegNets. + + - w0 (int): initial width + - wa (float): slope of width + - wm (float): quantization parameter to quantize the width + - depth (int): depth of the backbone + - group_w (int): width of group + - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck. + strides (Sequence[int]): Strides of the first block of each stage. + base_channels (int): Base channels after stem layer. + in_channels (int): Number of input image channels. Default: 3. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Example: + >>> from mmdet.models import RegNet + >>> import torch + >>> self = RegNet( + arch=dict( + w0=88, + wa=26.31, + wm=2.25, + group_w=48, + depth=25, + bot_mul=1.0)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 96, 8, 8) + (1, 192, 4, 4) + (1, 432, 2, 2) + (1, 1008, 1, 1) + """ + arch_settings = { + 'regnetx_400mf': + dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), + 'regnetx_800mf': + dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), + 'regnetx_1.6gf': + dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), + 'regnetx_3.2gf': + dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), + 'regnetx_4.0gf': + dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), + 'regnetx_6.4gf': + dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), + 'regnetx_8.0gf': + dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), + 'regnetx_12gf': + dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), + } + + def __init__(self, + arch, + in_channels=3, + stem_channels=32, + base_channels=32, + strides=(2, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + dcn=None, + stage_with_dcn=(False, False, False, False), + plugins=None, + with_cp=False, + zero_init_residual=True, + pretrained=None, + init_cfg=None): + super(ResNet, self).__init__(init_cfg) + + # Generate RegNet parameters first + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'"arch": "{arch}" is not one of the' \ + ' arch_settings' + arch = self.arch_settings[arch] + elif not isinstance(arch, dict): + raise ValueError('Expect "arch" to be either a string ' + f'or a dict, got {type(arch)}') + + widths, num_stages = self.generate_regnet( + arch['w0'], + arch['wa'], + arch['wm'], + arch['depth'], + ) + # Convert to per stage format + stage_widths, stage_blocks = self.get_stages_from_blocks(widths) + # Generate group widths and bot muls + group_widths = [arch['group_w'] for _ in range(num_stages)] + self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] + # Adjust the compatibility of stage_widths and group_widths + stage_widths, group_widths = self.adjust_width_group( + stage_widths, self.bottleneck_ratio, group_widths) + + # Group params by stage + self.stage_widths = stage_widths + self.group_widths = group_widths + self.depth = sum(stage_blocks) + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.dcn = dcn + self.stage_with_dcn = stage_with_dcn + if dcn is not None: + assert len(stage_with_dcn) == num_stages + self.plugins = plugins + self.zero_init_residual = zero_init_residual + self.block = Bottleneck + expansion_bak = self.block.expansion + self.block.expansion = 1 + self.stage_blocks = stage_blocks[:num_stages] + + self._make_stem_layer(in_channels, stem_channels) + + block_init_cfg = None + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + if self.zero_init_residual: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm3')) + else: + raise TypeError('pretrained must be a str or None') + + self.inplanes = stem_channels + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = self.strides[i] + dilation = self.dilations[i] + group_width = self.group_widths[i] + width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i])) + stage_groups = width // group_width + + dcn = self.dcn if self.stage_with_dcn[i] else None + if self.plugins is not None: + stage_plugins = self.make_stage_plugins(self.plugins, i) + else: + stage_plugins = None + + res_layer = self.make_res_layer( + block=self.block, + inplanes=self.inplanes, + planes=self.stage_widths[i], + num_blocks=num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=dcn, + plugins=stage_plugins, + groups=stage_groups, + base_width=group_width, + base_channels=self.stage_widths[i], + init_cfg=block_init_cfg) + self.inplanes = self.stage_widths[i] + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = stage_widths[-1] + self.block.expansion = expansion_bak + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def generate_regnet(self, + initial_width, + width_slope, + width_parameter, + depth, + divisor=8): + """Generates per block width from RegNet parameters. + + Args: + initial_width ([int]): Initial width of the backbone + width_slope ([float]): Slope of the quantized linear function + width_parameter ([int]): Parameter used to quantize the width. + depth ([int]): Depth of the backbone. + divisor (int, optional): The divisor of channels. Defaults to 8. + + Returns: + list, int: return a list of widths of each stage and the number \ + of stages + """ + assert width_slope >= 0 + assert initial_width > 0 + assert width_parameter > 1 + assert initial_width % divisor == 0 + widths_cont = np.arange(depth) * width_slope + initial_width + ks = np.round( + np.log(widths_cont / initial_width) / np.log(width_parameter)) + widths = initial_width * np.power(width_parameter, ks) + widths = np.round(np.divide(widths, divisor)) * divisor + num_stages = len(np.unique(widths)) + widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() + return widths, num_stages + + @staticmethod + def quantize_float(number, divisor): + """Converts a float to closest non-zero int divisible by divisor. + + Args: + number (int): Original number to be quantized. + divisor (int): Divisor used to quantize the number. + + Returns: + int: quantized number that is divisible by devisor. + """ + return int(round(number / divisor) * divisor) + + def adjust_width_group(self, widths, bottleneck_ratio, groups): + """Adjusts the compatibility of widths and groups. + + Args: + widths (list[int]): Width of each stage. + bottleneck_ratio (float): Bottleneck ratio. + groups (int): number of groups in each stage + + Returns: + tuple(list): The adjusted widths and groups of each stage. + """ + bottleneck_width = [ + int(w * b) for w, b in zip(widths, bottleneck_ratio) + ] + groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)] + bottleneck_width = [ + self.quantize_float(w_bot, g) + for w_bot, g in zip(bottleneck_width, groups) + ] + widths = [ + int(w_bot / b) + for w_bot, b in zip(bottleneck_width, bottleneck_ratio) + ] + return widths, groups + + def get_stages_from_blocks(self, widths): + """Gets widths/stage_blocks of network at each stage. + + Args: + widths (list[int]): Width in each stage. + + Returns: + tuple(list): width and depth of each stage + """ + width_diff = [ + width != width_prev + for width, width_prev in zip(widths + [0], [0] + widths) + ] + stage_widths = [ + width for width, diff in zip(widths, width_diff[:-1]) if diff + ] + stage_blocks = np.diff([ + depth for depth, diff in zip(range(len(width_diff)), width_diff) + if diff + ]).tolist() + return stage_widths, stage_blocks + + def forward(self, x): + """Forward function.""" + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) diff --git a/downstream/mmdetection/mmdet/models/backbones/res2net.py b/downstream/mmdetection/mmdet/models/backbones/res2net.py new file mode 100644 index 0000000..96afb2f --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/res2net.py @@ -0,0 +1,327 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.runner import Sequential + +from ..builder import BACKBONES +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNet + + +class Bottle2neck(_Bottleneck): + expansion = 4 + + def __init__(self, + inplanes, + planes, + scales=4, + base_width=26, + base_channels=64, + stage_type='normal', + **kwargs): + """Bottle2neck block for Res2Net. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, if + it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottle2neck, self).__init__(inplanes, planes, **kwargs) + assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.' + width = int(math.floor(self.planes * (base_width / base_channels))) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width * scales, postfix=1) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.inplanes, + width * scales, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + + if stage_type == 'stage' and self.conv2_stride != 1: + self.pool = nn.AvgPool2d( + kernel_size=3, stride=self.conv2_stride, padding=1) + convs = [] + bns = [] + + fallback_on_stride = False + if self.with_dcn: + fallback_on_stride = self.dcn.pop('fallback_on_stride', False) + if not self.with_dcn or fallback_on_stride: + for i in range(scales - 1): + convs.append( + build_conv_layer( + self.conv_cfg, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + bias=False)) + bns.append( + build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + for i in range(scales - 1): + convs.append( + build_conv_layer( + self.dcn, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + bias=False)) + bns.append( + build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + + self.conv3 = build_conv_layer( + self.conv_cfg, + width * scales, + self.planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.stage_type = stage_type + self.scales = scales + self.width = width + delattr(self, 'conv2') + delattr(self, self.norm2_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv1_plugin_names) + + spx = torch.split(out, self.width, 1) + sp = self.convs[0](spx[0].contiguous()) + sp = self.relu(self.bns[0](sp)) + out = sp + for i in range(1, self.scales - 1): + if self.stage_type == 'stage': + sp = spx[i] + else: + sp = sp + spx[i] + sp = self.convs[i](sp.contiguous()) + sp = self.relu(self.bns[i](sp)) + out = torch.cat((out, sp), 1) + + if self.stage_type == 'normal' or self.conv2_stride == 1: + out = torch.cat((out, spx[self.scales - 1]), 1) + elif self.stage_type == 'stage': + out = torch.cat((out, self.pool(spx[self.scales - 1])), 1) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv2_plugin_names) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv3_plugin_names) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Res2Layer(Sequential): + """Res2Layer to build Res2Net style backbone. + + Args: + block (nn.Module): block used to build ResLayer. + inplanes (int): inplanes of block. + planes (int): planes of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottle2neck. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + scales (int): Scales used in Res2Net. Default: 4 + base_width (int): Basic width of each scale. Default: 26 + """ + + def __init__(self, + block, + inplanes, + planes, + num_blocks, + stride=1, + avg_down=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + scales=4, + base_width=26, + **kwargs): + self.block = block + + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False), + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=1, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1], + ) + + layers = [] + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + scales=scales, + base_width=base_width, + stage_type='stage', + **kwargs)) + inplanes = planes * block.expansion + for i in range(1, num_blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + scales=scales, + base_width=base_width, + **kwargs)) + super(Res2Layer, self).__init__(*layers) + + +@BACKBONES.register_module() +class Res2Net(ResNet): + """Res2Net backbone. + + Args: + scales (int): Scales used in Res2Net. Default: 4 + base_width (int): Basic width of each scale. Default: 26 + depth (int): Depth of res2net, from {50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + num_stages (int): Res2net stages. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottle2neck. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + norm_cfg (dict): Dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + plugins (list[dict]): List of plugins for stages, each dict contains: + + - cfg (dict, required): Cfg dict to build plugin. + - position (str, required): Position inside block to insert + plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Example: + >>> from mmdet.models import Res2Net + >>> import torch + >>> self = Res2Net(depth=50, scales=4, base_width=26) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 8, 8) + (1, 512, 4, 4) + (1, 1024, 2, 2) + (1, 2048, 1, 1) + """ + + arch_settings = { + 50: (Bottle2neck, (3, 4, 6, 3)), + 101: (Bottle2neck, (3, 4, 23, 3)), + 152: (Bottle2neck, (3, 8, 36, 3)) + } + + def __init__(self, + scales=4, + base_width=26, + style='pytorch', + deep_stem=True, + avg_down=True, + pretrained=None, + init_cfg=None, + **kwargs): + self.scales = scales + self.base_width = base_width + super(Res2Net, self).__init__( + style='pytorch', + deep_stem=True, + avg_down=True, + pretrained=pretrained, + init_cfg=init_cfg, + **kwargs) + + def make_res_layer(self, **kwargs): + return Res2Layer( + scales=self.scales, + base_width=self.base_width, + base_channels=self.base_channels, + **kwargs) diff --git a/downstream/mmdetection/mmdet/models/backbones/resnest.py b/downstream/mmdetection/mmdet/models/backbones/resnest.py new file mode 100644 index 0000000..69629b9 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/resnest.py @@ -0,0 +1,322 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.runner import BaseModule + +from ..builder import BACKBONES +from ..utils import ResLayer +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNetV1d + + +class RSoftmax(nn.Module): + """Radix Softmax module in ``SplitAttentionConv2d``. + + Args: + radix (int): Radix of input. + groups (int): Groups of input. + """ + + def __init__(self, radix, groups): + super().__init__() + self.radix = radix + self.groups = groups + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttentionConv2d(BaseModule): + """Split-Attention Conv2d in ResNeSt. + + Args: + in_channels (int): Number of channels in the input feature map. + channels (int): Number of intermediate channels. + kernel_size (int | tuple[int]): Size of the convolution kernel. + stride (int | tuple[int]): Stride of the convolution. + padding (int | tuple[int]): Zero-padding added to both sides of + dilation (int | tuple[int]): Spacing between kernel elements. + groups (int): Number of blocked connections from input channels to + output channels. + groups (int): Same as nn.Conv2d. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of inter_channels. Default: 4. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. Default: None. + dcn (dict): Config dict for DCN. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + radix=2, + reduction_factor=4, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + init_cfg=None): + super(SplitAttentionConv2d, self).__init__(init_cfg) + inter_channels = max(in_channels * radix // reduction_factor, 32) + self.radix = radix + self.groups = groups + self.channels = channels + self.with_dcn = dcn is not None + self.dcn = dcn + fallback_on_stride = False + if self.with_dcn: + fallback_on_stride = self.dcn.pop('fallback_on_stride', False) + if self.with_dcn and not fallback_on_stride: + assert conv_cfg is None, 'conv_cfg must be None for DCN' + conv_cfg = dcn + self.conv = build_conv_layer( + conv_cfg, + in_channels, + channels * radix, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups * radix, + bias=False) + # To be consistent with original implementation, starting from 0 + self.norm0_name, norm0 = build_norm_layer( + norm_cfg, channels * radix, postfix=0) + self.add_module(self.norm0_name, norm0) + self.relu = nn.ReLU(inplace=True) + self.fc1 = build_conv_layer( + None, channels, inter_channels, 1, groups=self.groups) + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, inter_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.fc2 = build_conv_layer( + None, inter_channels, channels * radix, 1, groups=self.groups) + self.rsoftmax = RSoftmax(radix, groups) + + @property + def norm0(self): + """nn.Module: the normalization layer named "norm0" """ + return getattr(self, self.norm0_name) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def forward(self, x): + x = self.conv(x) + x = self.norm0(x) + x = self.relu(x) + + batch, rchannel = x.shape[:2] + batch = x.size(0) + if self.radix > 1: + splits = x.view(batch, self.radix, -1, *x.shape[2:]) + gap = splits.sum(dim=1) + else: + gap = x + gap = F.adaptive_avg_pool2d(gap, 1) + gap = self.fc1(gap) + + gap = self.norm1(gap) + gap = self.relu(gap) + + atten = self.fc2(gap) + atten = self.rsoftmax(atten).view(batch, -1, 1, 1) + + if self.radix > 1: + attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) + out = torch.sum(attens * splits, dim=1) + else: + out = atten * x + return out.contiguous() + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeSt. + + Args: + inplane (int): Input planes of this block. + planes (int): Middle planes of this block. + groups (int): Groups of conv2. + base_width (int): Base of width in terms of base channels. Default: 4. + base_channels (int): Base of channels for calculating width. + Default: 64. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of inter_channels in + SplitAttentionConv2d. Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + kwargs (dict): Key word arguments for base class. + """ + expansion = 4 + + def __init__(self, + inplanes, + planes, + groups=1, + base_width=4, + base_channels=64, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + """Bottleneck block for ResNeSt.""" + super(Bottleneck, self).__init__(inplanes, planes, **kwargs) + + if groups == 1: + width = self.planes + else: + width = math.floor(self.planes * + (base_width / base_channels)) * groups + + self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width, postfix=1) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.inplanes, + width, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.with_modulated_dcn = False + self.conv2 = SplitAttentionConv2d( + width, + width, + kernel_size=3, + stride=1 if self.avg_down_stride else self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + radix=radix, + reduction_factor=reduction_factor, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=self.dcn) + delattr(self, self.norm2_name) + + if self.avg_down_stride: + self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) + + self.conv3 = build_conv_layer( + self.conv_cfg, + width, + self.planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv1_plugin_names) + + out = self.conv2(out) + + if self.avg_down_stride: + out = self.avd_layer(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv2_plugin_names) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv3_plugin_names) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@BACKBONES.register_module() +class ResNeSt(ResNetV1d): + """ResNeSt backbone. + + Args: + groups (int): Number of groups of Bottleneck. Default: 1 + base_width (int): Base width of Bottleneck. Default: 4 + radix (int): Radix of SplitAttentionConv2d. Default: 2 + reduction_factor (int): Reduction factor of inter_channels in + SplitAttentionConv2d. Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + kwargs (dict): Keyword arguments for ResNet. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)), + 200: (Bottleneck, (3, 24, 36, 3)) + } + + def __init__(self, + groups=1, + base_width=4, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + self.groups = groups + self.base_width = base_width + self.radix = radix + self.reduction_factor = reduction_factor + self.avg_down_stride = avg_down_stride + super(ResNeSt, self).__init__(**kwargs) + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``.""" + return ResLayer( + groups=self.groups, + base_width=self.base_width, + base_channels=self.base_channels, + radix=self.radix, + reduction_factor=self.reduction_factor, + avg_down_stride=self.avg_down_stride, + **kwargs) diff --git a/downstream/mmdetection/mmdet/models/backbones/resnet.py b/downstream/mmdetection/mmdet/models/backbones/resnet.py new file mode 100644 index 0000000..1eaaae6 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/resnet.py @@ -0,0 +1,672 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer +from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES +from ..utils import ResLayer + + +class BasicBlock(BaseModule): + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_cfg=None): + super(BasicBlock, self).__init__(init_cfg) + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, planes, planes, 3, padding=1, bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.with_cp = with_cp + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Bottleneck(BaseModule): + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_cfg=None): + """Bottleneck block for ResNet. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, if + it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__(init_cfg) + assert style in ['pytorch', 'caffe'] + assert dcn is None or isinstance(dcn, dict) + assert plugins is None or isinstance(plugins, list) + if plugins is not None: + allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] + assert all(p['position'] in allowed_position for p in plugins) + + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.dcn = dcn + self.with_dcn = dcn is not None + self.plugins = plugins + self.with_plugins = plugins is not None + + if self.with_plugins: + # collect plugins for conv1/conv2/conv3 + self.after_conv1_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv1' + ] + self.after_conv2_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv2' + ] + self.after_conv3_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv3' + ] + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + if self.with_dcn: + fallback_on_stride = dcn.pop('fallback_on_stride', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + conv_cfg, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + self.conv2 = build_conv_layer( + dcn, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + planes, + planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + if self.with_plugins: + self.after_conv1_plugin_names = self.make_block_plugins( + planes, self.after_conv1_plugins) + self.after_conv2_plugin_names = self.make_block_plugins( + planes, self.after_conv2_plugins) + self.after_conv3_plugin_names = self.make_block_plugins( + planes * self.expansion, self.after_conv3_plugins) + + def make_block_plugins(self, in_channels, plugins): + """make plugins for block. + + Args: + in_channels (int): Input channels of plugin. + plugins (list[dict]): List of plugins cfg to build. + + Returns: + list[str]: List of the names of plugin. + """ + assert isinstance(plugins, list) + plugin_names = [] + for plugin in plugins: + plugin = plugin.copy() + name, layer = build_plugin_layer( + plugin, + in_channels=in_channels, + postfix=plugin.pop('postfix', '')) + assert not hasattr(self, name), f'duplicate plugin {name}' + self.add_module(name, layer) + plugin_names.append(name) + return plugin_names + + def forward_plugin(self, x, plugin_names): + out = x + for name in plugin_names: + out = getattr(self, name)(out) + return out + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + @property + def norm3(self): + """nn.Module: normalization layer after the third convolution layer""" + return getattr(self, self.norm3_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv1_plugin_names) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv2_plugin_names) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv3_plugin_names) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@BACKBONES.register_module() +class ResNet(BaseModule): + """ResNet backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + stem_channels (int | None): Number of stem channels. If not specified, + it will be the same as `base_channels`. Default: None. + base_channels (int): Number of base channels of res layer. Default: 64. + in_channels (int): Number of input image channels. Default: 3. + num_stages (int): Resnet stages. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + norm_cfg (dict): Dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + plugins (list[dict]): List of plugins for stages, each dict contains: + + - cfg (dict, required): Cfg dict to build plugin. + - position (str, required): Position inside block to insert + plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Example: + >>> from mmdet.models import ResNet + >>> import torch + >>> self = ResNet(depth=18) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + stem_channels=None, + base_channels=64, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + dcn=None, + stage_with_dcn=(False, False, False, False), + plugins=None, + with_cp=False, + zero_init_residual=True, + pretrained=None, + init_cfg=None): + super(ResNet, self).__init__(init_cfg) + self.zero_init_residual = zero_init_residual + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + + block_init_cfg = None + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + block = self.arch_settings[depth][0] + if self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm3')) + else: + raise TypeError('pretrained must be a str or None') + + self.depth = depth + if stem_channels is None: + stem_channels = base_channels + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.dcn = dcn + self.stage_with_dcn = stage_with_dcn + if dcn is not None: + assert len(stage_with_dcn) == num_stages + self.plugins = plugins + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = stem_channels + + self._make_stem_layer(in_channels, stem_channels) + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + if plugins is not None: + stage_plugins = self.make_stage_plugins(plugins, i) + else: + stage_plugins = None + planes = base_channels * 2**i + res_layer = self.make_res_layer( + block=self.block, + inplanes=self.inplanes, + planes=planes, + num_blocks=num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + plugins=stage_plugins, + init_cfg=block_init_cfg) + self.inplanes = planes * self.block.expansion + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = self.block.expansion * base_channels * 2**( + len(self.stage_blocks) - 1) + + def make_stage_plugins(self, plugins, stage_idx): + """Make plugins for ResNet ``stage_idx`` th stage. + + Currently we support to insert ``context_block``, + ``empirical_attention_block``, ``nonlocal_block`` into the backbone + like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of + Bottleneck. + + An example of plugins format could be: + + Examples: + >>> plugins=[ + ... dict(cfg=dict(type='xxx', arg1='xxx'), + ... stages=(False, True, True, True), + ... position='after_conv2'), + ... dict(cfg=dict(type='yyy'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='1'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='2'), + ... stages=(True, True, True, True), + ... position='after_conv3') + ... ] + >>> self = ResNet(depth=18) + >>> stage_plugins = self.make_stage_plugins(plugins, 0) + >>> assert len(stage_plugins) == 3 + + Suppose ``stage_idx=0``, the structure of blocks in the stage would be: + + .. code-block:: none + + conv1-> conv2->conv3->yyy->zzz1->zzz2 + + Suppose 'stage_idx=1', the structure of blocks in the stage would be: + + .. code-block:: none + + conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2 + + If stages is missing, the plugin would be applied to all stages. + + Args: + plugins (list[dict]): List of plugins cfg to build. The postfix is + required if multiple same type plugins are inserted. + stage_idx (int): Index of stage to build + + Returns: + list[dict]: Plugins for current stage + """ + stage_plugins = [] + for plugin in plugins: + plugin = plugin.copy() + stages = plugin.pop('stages', None) + assert stages is None or len(stages) == self.num_stages + # whether to insert plugin into current stage + if stages is None or stages[stage_idx]: + stage_plugins.append(plugin) + + return stage_plugins + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``.""" + return ResLayer(**kwargs) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels): + if self.deep_stem: + self.stem = nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels)[1], + nn.ReLU(inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + """Forward function.""" + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +@BACKBONES.register_module() +class ResNetV1d(ResNet): + r"""ResNetV1d variant described in `Bag of Tricks + `_. + + Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in + the input stem with three 3x3 convs. And in the downsampling block, a 2x2 + avg_pool with stride 2 is added before conv, whose stride is changed to 1. + """ + + def __init__(self, **kwargs): + super(ResNetV1d, self).__init__( + deep_stem=True, avg_down=True, **kwargs) diff --git a/downstream/mmdetection/mmdet/models/backbones/resnext.py b/downstream/mmdetection/mmdet/models/backbones/resnext.py new file mode 100644 index 0000000..8675d7c --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/resnext.py @@ -0,0 +1,154 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from ..utils import ResLayer +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNet + + +class Bottleneck(_Bottleneck): + expansion = 4 + + def __init__(self, + inplanes, + planes, + groups=1, + base_width=4, + base_channels=64, + **kwargs): + """Bottleneck block for ResNeXt. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, if + it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__(inplanes, planes, **kwargs) + + if groups == 1: + width = self.planes + else: + width = math.floor(self.planes * + (base_width / base_channels)) * groups + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, width, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.inplanes, + width, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + self.with_modulated_dcn = False + if self.with_dcn: + fallback_on_stride = self.dcn.pop('fallback_on_stride', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + self.conv_cfg, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + self.conv2 = build_conv_layer( + self.dcn, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + width, + self.planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + if self.with_plugins: + self._del_block_plugins(self.after_conv1_plugin_names + + self.after_conv2_plugin_names + + self.after_conv3_plugin_names) + self.after_conv1_plugin_names = self.make_block_plugins( + width, self.after_conv1_plugins) + self.after_conv2_plugin_names = self.make_block_plugins( + width, self.after_conv2_plugins) + self.after_conv3_plugin_names = self.make_block_plugins( + self.planes * self.expansion, self.after_conv3_plugins) + + def _del_block_plugins(self, plugin_names): + """delete plugins for block if exist. + + Args: + plugin_names (list[str]): List of plugins name to delete. + """ + assert isinstance(plugin_names, list) + for plugin_name in plugin_names: + del self._modules[plugin_name] + + +@BACKBONES.register_module() +class ResNeXt(ResNet): + """ResNeXt backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + num_stages (int): Resnet stages. Default: 4. + groups (int): Group of resnext. + base_width (int): Base width of resnext. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, groups=1, base_width=4, **kwargs): + self.groups = groups + self.base_width = base_width + super(ResNeXt, self).__init__(**kwargs) + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``""" + return ResLayer( + groups=self.groups, + base_width=self.base_width, + base_channels=self.base_channels, + **kwargs) diff --git a/downstream/mmdetection/mmdet/models/backbones/ssd_vgg.py b/downstream/mmdetection/mmdet/models/backbones/ssd_vgg.py new file mode 100644 index 0000000..c15aeac --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/ssd_vgg.py @@ -0,0 +1,128 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +from mmcv.cnn import VGG +from mmcv.runner import BaseModule + +from ..builder import BACKBONES +from ..necks import ssd_neck + + +@BACKBONES.register_module() +class SSDVGG(VGG, BaseModule): + """VGG Backbone network for single-shot-detection. + + Args: + depth (int): Depth of vgg, from {11, 13, 16, 19}. + with_last_pool (bool): Whether to add a pooling layer at the last + of the model + ceil_mode (bool): When True, will use `ceil` instead of `floor` + to compute the output shape. + out_indices (Sequence[int]): Output from which stages. + out_feature_indices (Sequence[int]): Output from which feature map. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + input_size (int, optional): Deprecated argumment. + Width and height of input, from {300, 512}. + l2_norm_scale (float, optional) : Deprecated argumment. + L2 normalization layer init scale. + + Example: + >>> self = SSDVGG(input_size=300, depth=11) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 300, 300) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 1024, 19, 19) + (1, 512, 10, 10) + (1, 256, 5, 5) + (1, 256, 3, 3) + (1, 256, 1, 1) + """ + extra_setting = { + 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256), + 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128), + } + + def __init__(self, + depth, + with_last_pool=False, + ceil_mode=True, + out_indices=(3, 4), + out_feature_indices=(22, 34), + pretrained=None, + init_cfg=None, + input_size=None, + l2_norm_scale=None): + # TODO: in_channels for mmcv.VGG + super(SSDVGG, self).__init__( + depth, + with_last_pool=with_last_pool, + ceil_mode=ceil_mode, + out_indices=out_indices) + + self.features.add_module( + str(len(self.features)), + nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) + self.features.add_module( + str(len(self.features)), + nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)) + self.features.add_module( + str(len(self.features)), nn.ReLU(inplace=True)) + self.features.add_module( + str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1)) + self.features.add_module( + str(len(self.features)), nn.ReLU(inplace=True)) + self.out_feature_indices = out_feature_indices + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + + if init_cfg is not None: + self.init_cfg = init_cfg + elif isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict(type='Constant', val=1, layer='BatchNorm2d'), + dict(type='Normal', std=0.01, layer='Linear'), + ] + else: + raise TypeError('pretrained must be a str or None') + + if input_size is not None: + warnings.warn('DeprecationWarning: input_size is deprecated') + if l2_norm_scale is not None: + warnings.warn('DeprecationWarning: l2_norm_scale in VGG is ' + 'deprecated, it has been moved to SSDNeck.') + + def init_weights(self, pretrained=None): + super(VGG, self).init_weights() + + def forward(self, x): + """Forward function.""" + outs = [] + for i, layer in enumerate(self.features): + x = layer(x) + if i in self.out_feature_indices: + outs.append(x) + + if len(outs) == 1: + return outs[0] + else: + return tuple(outs) + + +class L2Norm(ssd_neck.L2Norm): + + def __init__(self, **kwargs): + super(L2Norm, self).__init__(**kwargs) + warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py ' + 'is deprecated, please use L2Norm in ' + 'mmdet/models/necks/ssd_neck.py instead') diff --git a/downstream/mmdetection/mmdet/models/backbones/swin.py b/downstream/mmdetection/mmdet/models/backbones/swin.py new file mode 100644 index 0000000..efbd586 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/swin.py @@ -0,0 +1,763 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from collections import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_norm_layer, constant_init, trunc_normal_init +from mmcv.cnn.bricks.transformer import FFN, build_dropout +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner import BaseModule, ModuleList, _load_checkpoint +from mmcv.utils import to_2tuple + +from ...utils import get_root_logger +from ..builder import BACKBONES +from ..utils.ckpt_convert import swin_converter +from ..utils.transformer import PatchEmbed, PatchMerging + + +class WindowMSA(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (tuple[int]): The height and width of the window. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + init_cfg (dict | None, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + init_cfg=None): + + super().__init__() + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = qk_scale or head_embed_dims**-0.5 + self.init_cfg = init_cfg + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # About 2x faster than original impl + Wh, Ww = self.window_size + rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) + rel_position_index = rel_index_coords + rel_index_coords.T + rel_position_index = rel_position_index.flip(1).contiguous() + self.register_buffer('relative_position_index', rel_position_index) + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + + self.softmax = nn.Softmax(dim=-1) + + def init_weights(self): + trunc_normal_(self.relative_position_bias_table, std=0.02) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (num_windows*B, N, C) + mask (tensor | None, Optional): mask with shape of (num_windows, + Wh*Ww, Wh*Ww), value should be between (-inf, 0]. + """ + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + # make torchscript happy (cannot use tensor as tuple) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @staticmethod + def double_step_seq(step1, len1, step2, len2): + seq1 = torch.arange(0, step1 * len1, step1) + seq2 = torch.arange(0, step2 * len2, step2) + return (seq1[:, None] + seq2[None, :]).reshape(1, -1) + + +class ShiftWindowMSA(BaseModule): + """Shifted Window Multihead Self-Attention Module. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. + shift_size (int, optional): The shift step of each window towards + right-bottom. If zero, act as regular window-msa. Defaults to 0. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Defaults: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Defaults: 0. + proj_drop_rate (float, optional): Dropout ratio of output. + Defaults: 0. + dropout_layer (dict, optional): The dropout_layer used before output. + Defaults: dict(type='DropPath', drop_prob=0.). + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + shift_size=0, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0, + proj_drop_rate=0, + dropout_layer=dict(type='DropPath', drop_prob=0.), + init_cfg=None): + super().__init__(init_cfg) + + self.window_size = window_size + self.shift_size = shift_size + assert 0 <= self.shift_size < self.window_size + + self.w_msa = WindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=to_2tuple(window_size), + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=proj_drop_rate, + init_cfg=None) + + self.drop = build_dropout(dropout_layer) + + def forward(self, query, hw_shape): + B, L, C = query.shape + H, W = hw_shape + assert L == H * W, 'input feature has wrong size' + query = query.view(B, H, W, C) + + # pad feature maps to multiples of window size + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b)) + H_pad, W_pad = query.shape[1], query.shape[2] + + # cyclic shift + if self.shift_size > 0: + shifted_query = torch.roll( + query, + shifts=(-self.shift_size, -self.shift_size), + dims=(1, 2)) + + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device) + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + # nW, window_size, window_size, 1 + mask_windows = self.window_partition(img_mask) + mask_windows = mask_windows.view( + -1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, + float(-100.0)).masked_fill( + attn_mask == 0, float(0.0)) + else: + shifted_query = query + attn_mask = None + + # nW*B, window_size, window_size, C + query_windows = self.window_partition(shifted_query) + # nW*B, window_size*window_size, C + query_windows = query_windows.view(-1, self.window_size**2, C) + + # W-MSA/SW-MSA (nW*B, window_size*window_size, C) + attn_windows = self.w_msa(query_windows, mask=attn_mask) + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, + self.window_size, C) + + # B H' W' C + shifted_x = self.window_reverse(attn_windows, H_pad, W_pad) + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll( + shifted_x, + shifts=(self.shift_size, self.shift_size), + dims=(1, 2)) + else: + x = shifted_x + + if pad_r > 0 or pad_b: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + x = self.drop(x) + return x + + def window_reverse(self, windows, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + window_size = self.window_size + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, + window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + def window_partition(self, x): + """ + Args: + x: (B, H, W, C) + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + window_size = self.window_size + x = x.view(B, H // window_size, window_size, W // window_size, + window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() + windows = windows.view(-1, window_size, window_size, C) + return windows + + +class SwinBlock(BaseModule): + """" + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + window_size (int, optional): The local window scale. Default: 7. + shift (bool, optional): whether to shift window or not. Default False. + qkv_bias (bool, optional): enable bias for qkv if True. Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop_rate (float, optional): Dropout rate. Default: 0. + attn_drop_rate (float, optional): Attention dropout rate. Default: 0. + drop_path_rate (float, optional): Stochastic depth rate. Default: 0. + act_cfg (dict, optional): The config dict of activation function. + Default: dict(type='GELU'). + norm_cfg (dict, optional): The config dict of normalization. + Default: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + init_cfg (dict | list | None, optional): The init config. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + window_size=7, + shift=False, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + + super(SwinBlock, self).__init__() + + self.init_cfg = init_cfg + self.with_cp = with_cp + + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = ShiftWindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=window_size, + shift_size=window_size // 2 if shift else 0, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + init_cfg=None) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=2, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=True, + init_cfg=None) + + def forward(self, x, hw_shape): + + def _inner_forward(x): + identity = x + x = self.norm1(x) + x = self.attn(x, hw_shape) + + x = x + identity + + identity = x + x = self.norm2(x) + x = self.ffn(x, identity=identity) + + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + + return x + + +class SwinBlockSequence(BaseModule): + """Implements one stage in Swin Transformer. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + depth (int): The number of blocks in this stage. + window_size (int, optional): The local window scale. Default: 7. + qkv_bias (bool, optional): enable bias for qkv if True. Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop_rate (float, optional): Dropout rate. Default: 0. + attn_drop_rate (float, optional): Attention dropout rate. Default: 0. + drop_path_rate (float | list[float], optional): Stochastic depth + rate. Default: 0. + downsample (BaseModule | None, optional): The downsample operation + module. Default: None. + act_cfg (dict, optional): The config dict of activation function. + Default: dict(type='GELU'). + norm_cfg (dict, optional): The config dict of normalization. + Default: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + init_cfg (dict | list | None, optional): The init config. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + depth, + window_size=7, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + downsample=None, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(drop_path_rate, list): + drop_path_rates = drop_path_rate + assert len(drop_path_rates) == depth + else: + drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)] + + self.blocks = ModuleList() + for i in range(depth): + block = SwinBlock( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=feedforward_channels, + window_size=window_size, + shift=False if i % 2 == 0 else True, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rates[i], + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp, + init_cfg=None) + self.blocks.append(block) + + self.downsample = downsample + + def forward(self, x, hw_shape): + for block in self.blocks: + x = block(x, hw_shape) + + if self.downsample: + x_down, down_hw_shape = self.downsample(x, hw_shape) + return x_down, down_hw_shape, x, hw_shape + else: + return x, hw_shape, x, hw_shape + + +@BACKBONES.register_module() +class SwinTransformer(BaseModule): + """ Swin Transformer + A PyTorch implement of : `Swin Transformer: + Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/abs/2103.14030 + + Inspiration from + https://github.com/microsoft/Swin-Transformer + + Args: + pretrain_img_size (int | tuple[int]): The size of input image when + pretrain. Defaults: 224. + in_channels (int): The num of input channels. + Defaults: 3. + embed_dims (int): The feature dimension. Default: 96. + patch_size (int | tuple[int]): Patch size. Default: 4. + window_size (int): Window size. Default: 7. + mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. + Default: 4. + depths (tuple[int]): Depths of each Swin Transformer stage. + Default: (2, 2, 6, 2). + num_heads (tuple[int]): Parallel attention heads of each Swin + Transformer stage. Default: (3, 6, 12, 24). + strides (tuple[int]): The patch merging or patch embedding stride of + each Swin Transformer stage. (In swin, we set kernel size equal to + stride.) Default: (4, 2, 2, 2). + out_indices (tuple[int]): Output from which stages. + Default: (0, 1, 2, 3). + qkv_bias (bool, optional): If True, add a learnable bias to query, key, + value. Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + patch_norm (bool): If add a norm layer for patch embed and patch + merging. Default: True. + drop_rate (float): Dropout rate. Defaults: 0. + attn_drop_rate (float): Attention dropout rate. Default: 0. + drop_path_rate (float): Stochastic depth rate. Defaults: 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults: False. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer at + output of backone. Defaults: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + pretrained (str, optional): model pretrained path. Default: None. + convert_weights (bool): The flag indicates whether the + pre-trained model is from the original repo. We may need + to convert some keys to make it compatible. + Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + Default: -1 (-1 means not freezing any parameters). + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + pretrain_img_size=224, + in_channels=3, + embed_dims=96, + patch_size=4, + window_size=7, + mlp_ratio=4, + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + strides=(4, 2, 2, 2), + out_indices=(0, 1, 2, 3), + qkv_bias=True, + qk_scale=None, + patch_norm=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + use_abs_pos_embed=False, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + pretrained=None, + convert_weights=False, + frozen_stages=-1, + init_cfg=None): + self.convert_weights = convert_weights + self.frozen_stages = frozen_stages + if isinstance(pretrain_img_size, int): + pretrain_img_size = to_2tuple(pretrain_img_size) + elif isinstance(pretrain_img_size, tuple): + if len(pretrain_img_size) == 1: + pretrain_img_size = to_2tuple(pretrain_img_size[0]) + assert len(pretrain_img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(pretrain_img_size)}' + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + self.init_cfg = init_cfg + else: + raise TypeError('pretrained must be a str or None') + + super(SwinTransformer, self).__init__(init_cfg=init_cfg) + + num_layers = len(depths) + self.out_indices = out_indices + self.use_abs_pos_embed = use_abs_pos_embed + + assert strides[0] == patch_size, 'Use non-overlapping patch embed.' + + self.patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=strides[0], + norm_cfg=norm_cfg if patch_norm else None, + init_cfg=None) + + if self.use_abs_pos_embed: + patch_row = pretrain_img_size[0] // patch_size + patch_col = pretrain_img_size[1] // patch_size + num_patches = patch_row * patch_col + self.absolute_pos_embed = nn.Parameter( + torch.zeros((1, num_patches, embed_dims))) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + # set stochastic depth decay rule + total_depth = sum(depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] + + self.stages = ModuleList() + in_channels = embed_dims + for i in range(num_layers): + if i < num_layers - 1: + downsample = PatchMerging( + in_channels=in_channels, + out_channels=2 * in_channels, + stride=strides[i + 1], + norm_cfg=norm_cfg if patch_norm else None, + init_cfg=None) + else: + downsample = None + + stage = SwinBlockSequence( + embed_dims=in_channels, + num_heads=num_heads[i], + feedforward_channels=mlp_ratio * in_channels, + depth=depths[i], + window_size=window_size, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])], + downsample=downsample, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp, + init_cfg=None) + self.stages.append(stage) + if downsample: + in_channels = downsample.out_channels + + self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)] + # Add a norm layer for each output + for i in out_indices: + layer = build_norm_layer(norm_cfg, self.num_features[i])[1] + layer_name = f'norm{i}' + self.add_module(layer_name, layer) + + def train(self, mode=True): + """Convert the model into training mode while keep layers freezed.""" + super(SwinTransformer, self).train(mode) + self._freeze_stages() + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + if self.use_abs_pos_embed: + self.absolute_pos_embed.requires_grad = False + self.drop_after_pos.eval() + + for i in range(1, self.frozen_stages + 1): + + if (i - 1) in self.out_indices: + norm_layer = getattr(self, f'norm{i-1}') + norm_layer.eval() + for param in norm_layer.parameters(): + param.requires_grad = False + + m = self.stages[i - 1] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + logger = get_root_logger() + if self.init_cfg is None: + logger.warn(f'No pre-trained weights for ' + f'{self.__class__.__name__}, ' + f'training start from scratch') + if self.use_abs_pos_embed: + trunc_normal_(self.absolute_pos_embed, std=0.02) + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=.02, bias=0.) + elif isinstance(m, nn.LayerNorm): + constant_init(m, 1.0) + else: + assert 'checkpoint' in self.init_cfg, f'Only support ' \ + f'specify `Pretrained` in ' \ + f'`init_cfg` in ' \ + f'{self.__class__.__name__} ' + ckpt = _load_checkpoint( + self.init_cfg.checkpoint, logger=logger, map_location='cpu') + if 'state_dict' in ckpt: + _state_dict = ckpt['state_dict'] + elif 'model' in ckpt: + _state_dict = ckpt['model'] + else: + _state_dict = ckpt + if self.convert_weights: + # supported loading weight from original repo, + _state_dict = swin_converter(_state_dict) + + state_dict = OrderedDict() + for k, v in _state_dict.items(): + if k.startswith('backbone.'): + state_dict[k[9:]] = v + + # strip prefix of state_dict + if list(state_dict.keys())[0].startswith('module.'): + state_dict = {k[7:]: v for k, v in state_dict.items()} + + # reshape absolute position embedding + if state_dict.get('absolute_pos_embed') is not None: + absolute_pos_embed = state_dict['absolute_pos_embed'] + N1, L, C1 = absolute_pos_embed.size() + N2, C2, H, W = self.absolute_pos_embed.size() + if N1 != N2 or C1 != C2 or L != H * W: + logger.warning('Error in loading absolute_pos_embed, pass') + else: + state_dict['absolute_pos_embed'] = absolute_pos_embed.view( + N2, H, W, C2).permute(0, 3, 1, 2).contiguous() + + # interpolate position bias table if needed + relative_position_bias_table_keys = [ + k for k in state_dict.keys() + if 'relative_position_bias_table' in k + ] + for table_key in relative_position_bias_table_keys: + table_pretrained = state_dict[table_key] + table_current = self.state_dict()[table_key] + L1, nH1 = table_pretrained.size() + L2, nH2 = table_current.size() + if nH1 != nH2: + logger.warning(f'Error in loading {table_key}, pass') + elif L1 != L2: + S1 = int(L1**0.5) + S2 = int(L2**0.5) + table_pretrained_resized = F.interpolate( + table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1), + size=(S2, S2), + mode='bicubic') + state_dict[table_key] = table_pretrained_resized.view( + nH2, L2).permute(1, 0).contiguous() + + # load state_dict + self.load_state_dict(state_dict, False) + + def forward(self, x): + x, hw_shape = self.patch_embed(x) + + if self.use_abs_pos_embed: + x = x + self.absolute_pos_embed + x = self.drop_after_pos(x) + + outs = [] + for i, stage in enumerate(self.stages): + x, hw_shape, out, out_hw_shape = stage(x, hw_shape) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(out) + out = out.view(-1, *out_hw_shape, + self.num_features[i]).permute(0, 3, 1, + 2).contiguous() + outs.append(out) + + return outs diff --git a/downstream/mmdetection/mmdet/models/backbones/trident_resnet.py b/downstream/mmdetection/mmdet/models/backbones/trident_resnet.py new file mode 100644 index 0000000..013ba64 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/backbones/trident_resnet.py @@ -0,0 +1,298 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.runner import BaseModule +from torch.nn.modules.utils import _pair + +from mmdet.models.backbones.resnet import Bottleneck, ResNet +from mmdet.models.builder import BACKBONES + + +class TridentConv(BaseModule): + """Trident Convolution Module. + + Args: + in_channels (int): Number of channels in input. + out_channels (int): Number of channels in output. + kernel_size (int): Size of convolution kernel. + stride (int, optional): Convolution stride. Default: 1. + trident_dilations (tuple[int, int, int], optional): Dilations of + different trident branch. Default: (1, 2, 3). + test_branch_idx (int, optional): In inference, all 3 branches will + be used if `test_branch_idx==-1`, otherwise only branch with + index `test_branch_idx` will be used. Default: 1. + bias (bool, optional): Whether to use bias in convolution or not. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + trident_dilations=(1, 2, 3), + test_branch_idx=1, + bias=False, + init_cfg=None): + super(TridentConv, self).__init__(init_cfg) + self.num_branch = len(trident_dilations) + self.with_bias = bias + self.test_branch_idx = test_branch_idx + self.stride = _pair(stride) + self.kernel_size = _pair(kernel_size) + self.paddings = _pair(trident_dilations) + self.dilations = trident_dilations + self.in_channels = in_channels + self.out_channels = out_channels + self.bias = bias + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels, *self.kernel_size)) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.bias = None + + def extra_repr(self): + tmpstr = f'in_channels={self.in_channels}' + tmpstr += f', out_channels={self.out_channels}' + tmpstr += f', kernel_size={self.kernel_size}' + tmpstr += f', num_branch={self.num_branch}' + tmpstr += f', test_branch_idx={self.test_branch_idx}' + tmpstr += f', stride={self.stride}' + tmpstr += f', paddings={self.paddings}' + tmpstr += f', dilations={self.dilations}' + tmpstr += f', bias={self.bias}' + return tmpstr + + def forward(self, inputs): + if self.training or self.test_branch_idx == -1: + outputs = [ + F.conv2d(input, self.weight, self.bias, self.stride, padding, + dilation) for input, dilation, padding in zip( + inputs, self.dilations, self.paddings) + ] + else: + assert len(inputs) == 1 + outputs = [ + F.conv2d(inputs[0], self.weight, self.bias, self.stride, + self.paddings[self.test_branch_idx], + self.dilations[self.test_branch_idx]) + ] + + return outputs + + +# Since TridentNet is defined over ResNet50 and ResNet101, here we +# only support TridentBottleneckBlock. +class TridentBottleneck(Bottleneck): + """BottleBlock for TridentResNet. + + Args: + trident_dilations (tuple[int, int, int]): Dilations of different + trident branch. + test_branch_idx (int): In inference, all 3 branches will be used + if `test_branch_idx==-1`, otherwise only branch with index + `test_branch_idx` will be used. + concat_output (bool): Whether to concat the output list to a Tensor. + `True` only in the last Block. + """ + + def __init__(self, trident_dilations, test_branch_idx, concat_output, + **kwargs): + + super(TridentBottleneck, self).__init__(**kwargs) + self.trident_dilations = trident_dilations + self.num_branch = len(trident_dilations) + self.concat_output = concat_output + self.test_branch_idx = test_branch_idx + self.conv2 = TridentConv( + self.planes, + self.planes, + kernel_size=3, + stride=self.conv2_stride, + bias=False, + trident_dilations=self.trident_dilations, + test_branch_idx=test_branch_idx, + init_cfg=dict( + type='Kaiming', + distribution='uniform', + mode='fan_in', + override=dict(name='conv2'))) + + def forward(self, x): + + def _inner_forward(x): + num_branch = ( + self.num_branch + if self.training or self.test_branch_idx == -1 else 1) + identity = x + if not isinstance(x, list): + x = (x, ) * num_branch + identity = x + if self.downsample is not None: + identity = [self.downsample(b) for b in x] + + out = [self.conv1(b) for b in x] + out = [self.norm1(b) for b in out] + out = [self.relu(b) for b in out] + + if self.with_plugins: + for k in range(len(out)): + out[k] = self.forward_plugin(out[k], + self.after_conv1_plugin_names) + + out = self.conv2(out) + out = [self.norm2(b) for b in out] + out = [self.relu(b) for b in out] + if self.with_plugins: + for k in range(len(out)): + out[k] = self.forward_plugin(out[k], + self.after_conv2_plugin_names) + + out = [self.conv3(b) for b in out] + out = [self.norm3(b) for b in out] + + if self.with_plugins: + for k in range(len(out)): + out[k] = self.forward_plugin(out[k], + self.after_conv3_plugin_names) + + out = [ + out_b + identity_b for out_b, identity_b in zip(out, identity) + ] + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = [self.relu(b) for b in out] + if self.concat_output: + out = torch.cat(out, dim=0) + return out + + +def make_trident_res_layer(block, + inplanes, + planes, + num_blocks, + stride=1, + trident_dilations=(1, 2, 3), + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + test_branch_idx=-1): + """Build Trident Res Layers.""" + + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = [] + conv_stride = stride + downsample.extend([ + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + for i in range(num_blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride if i == 0 else 1, + trident_dilations=trident_dilations, + downsample=downsample if i == 0 else None, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + plugins=plugins, + test_branch_idx=test_branch_idx, + concat_output=True if i == num_blocks - 1 else False)) + inplanes = planes * block.expansion + return nn.Sequential(*layers) + + +@BACKBONES.register_module() +class TridentResNet(ResNet): + """The stem layer, stage 1 and stage 2 in Trident ResNet are identical to + ResNet, while in stage 3, Trident BottleBlock is utilized to replace the + normal BottleBlock to yield trident output. Different branch shares the + convolution weight but uses different dilations to achieve multi-scale + output. + + / stage3(b0) \ + x - stem - stage1 - stage2 - stage3(b1) - output + \ stage3(b2) / + + Args: + depth (int): Depth of resnet, from {50, 101, 152}. + num_branch (int): Number of branches in TridentNet. + test_branch_idx (int): In inference, all 3 branches will be used + if `test_branch_idx==-1`, otherwise only branch with index + `test_branch_idx` will be used. + trident_dilations (tuple[int]): Dilations of different trident branch. + len(trident_dilations) should be equal to num_branch. + """ # noqa + + def __init__(self, depth, num_branch, test_branch_idx, trident_dilations, + **kwargs): + + assert num_branch == len(trident_dilations) + assert depth in (50, 101, 152) + super(TridentResNet, self).__init__(depth, **kwargs) + assert self.num_stages == 3 + self.test_branch_idx = test_branch_idx + self.num_branch = num_branch + + last_stage_idx = self.num_stages - 1 + stride = self.strides[last_stage_idx] + dilation = trident_dilations + dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None + if self.plugins is not None: + stage_plugins = self.make_stage_plugins(self.plugins, + last_stage_idx) + else: + stage_plugins = None + planes = self.base_channels * 2**last_stage_idx + res_layer = make_trident_res_layer( + TridentBottleneck, + inplanes=(self.block.expansion * self.base_channels * + 2**(last_stage_idx - 1)), + planes=planes, + num_blocks=self.stage_blocks[last_stage_idx], + stride=stride, + trident_dilations=dilation, + style=self.style, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=dcn, + plugins=stage_plugins, + test_branch_idx=self.test_branch_idx) + + layer_name = f'layer{last_stage_idx + 1}' + + self.__setattr__(layer_name, res_layer) + self.res_layers.pop(last_stage_idx) + self.res_layers.insert(last_stage_idx, layer_name) + + self._freeze_stages() diff --git a/downstream/mmdetection/mmdet/models/builder.py b/downstream/mmdetection/mmdet/models/builder.py new file mode 100644 index 0000000..ace6209 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/builder.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +from mmcv.cnn import MODELS as MMCV_MODELS +from mmcv.utils import Registry + +MODELS = Registry('models', parent=MMCV_MODELS) + +BACKBONES = MODELS +NECKS = MODELS +ROI_EXTRACTORS = MODELS +SHARED_HEADS = MODELS +HEADS = MODELS +LOSSES = MODELS +DETECTORS = MODELS + + +def build_backbone(cfg): + """Build backbone.""" + return BACKBONES.build(cfg) + + +def build_neck(cfg): + """Build neck.""" + return NECKS.build(cfg) + + +def build_roi_extractor(cfg): + """Build roi extractor.""" + return ROI_EXTRACTORS.build(cfg) + + +def build_shared_head(cfg): + """Build shared head.""" + return SHARED_HEADS.build(cfg) + + +def build_head(cfg): + """Build head.""" + return HEADS.build(cfg) + + +def build_loss(cfg): + """Build loss.""" + return LOSSES.build(cfg) + + +def build_detector(cfg, train_cfg=None, test_cfg=None): + """Build detector.""" + if train_cfg is not None or test_cfg is not None: + warnings.warn( + 'train_cfg and test_cfg is deprecated, ' + 'please specify them in model', UserWarning) + assert cfg.get('train_cfg') is None or train_cfg is None, \ + 'train_cfg specified in both outer field and model field ' + assert cfg.get('test_cfg') is None or test_cfg is None, \ + 'test_cfg specified in both outer field and model field ' + return DETECTORS.build( + cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) diff --git a/downstream/mmdetection/mmdet/models/dense_heads/__init__.py b/downstream/mmdetection/mmdet/models/dense_heads/__init__.py new file mode 100644 index 0000000..bc26ca3 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/__init__.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .anchor_free_head import AnchorFreeHead +from .anchor_head import AnchorHead +from .atss_head import ATSSHead +from .autoassign_head import AutoAssignHead +from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead +from .centernet_head import CenterNetHead +from .centripetal_head import CentripetalHead +from .corner_head import CornerHead +from .ddod_head import DDODHead +from .deformable_detr_head import DeformableDETRHead +from .detr_head import DETRHead +from .embedding_rpn_head import EmbeddingRPNHead +from .fcos_head import FCOSHead +from .fovea_head import FoveaHead +from .free_anchor_retina_head import FreeAnchorRetinaHead +from .fsaf_head import FSAFHead +from .ga_retina_head import GARetinaHead +from .ga_rpn_head import GARPNHead +from .gfl_head import GFLHead +from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead +from .lad_head import LADHead +from .ld_head import LDHead +from .mask2former_head import Mask2FormerHead +from .maskformer_head import MaskFormerHead +from .nasfcos_head import NASFCOSHead +from .paa_head import PAAHead +from .pisa_retinanet_head import PISARetinaHead +from .pisa_ssd_head import PISASSDHead +from .reppoints_head import RepPointsHead +from .retina_head import RetinaHead +from .retina_sepbn_head import RetinaSepBNHead +from .rpn_head import RPNHead +from .sabl_retina_head import SABLRetinaHead +from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead +from .solov2_head import SOLOV2Head +from .ssd_head import SSDHead +from .tood_head import TOODHead +from .vfnet_head import VFNetHead +from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead +from .yolo_head import YOLOV3Head +from .yolof_head import YOLOFHead +from .yolox_head import YOLOXHead + +__all__ = [ + 'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', + 'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead', + 'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead', + 'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead', + 'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead', + 'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', + 'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', + 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead', + 'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead', + 'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead', + 'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead', + 'Mask2FormerHead', 'SOLOV2Head', 'DDODHead' +] diff --git a/downstream/mmdetection/mmdet/models/dense_heads/anchor_free_head.py b/downstream/mmdetection/mmdet/models/dense_heads/anchor_free_head.py new file mode 100644 index 0000000..b0460b9 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/anchor_free_head.py @@ -0,0 +1,350 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from abc import abstractmethod + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import force_fp32 + +from mmdet.core import build_bbox_coder, multi_apply +from mmdet.core.anchor.point_generator import MlvlPointGenerator +from ..builder import HEADS, build_loss +from .base_dense_head import BaseDenseHead +from .dense_test_mixins import BBoxTestMixin + + +@HEADS.register_module() +class AnchorFreeHead(BaseDenseHead, BBoxTestMixin): + """Anchor-free head (FCOS, Fovea, RepPoints, etc.). + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of hidden channels. Used in child classes. + stacked_convs (int): Number of stacking convs of the head. + strides (tuple): Downsample factor of each feature map. + dcn_on_last_conv (bool): If true, use dcn in the last layer of + towers. Default: False. + conv_bias (bool | str): If specified as `auto`, it will be decided by + the norm_cfg. Bias of conv will be set as True if `norm_cfg` is + None, otherwise False. Default: "auto". + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of localization loss. + bbox_coder (dict): Config of bbox coder. Defaults + 'DistancePointBBoxCoder'. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + train_cfg (dict): Training config of anchor head. + test_cfg (dict): Testing config of anchor head. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ # noqa: W605 + + _version = 1 + + def __init__(self, + num_classes, + in_channels, + feat_channels=256, + stacked_convs=4, + strides=(4, 8, 16, 32, 64), + dcn_on_last_conv=False, + conv_bias='auto', + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', loss_weight=1.0), + bbox_coder=dict(type='DistancePointBBoxCoder'), + conv_cfg=None, + norm_cfg=None, + train_cfg=None, + test_cfg=None, + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', + name='conv_cls', + std=0.01, + bias_prob=0.01))): + super(AnchorFreeHead, self).__init__(init_cfg) + self.num_classes = num_classes + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + if self.use_sigmoid_cls: + self.cls_out_channels = num_classes + else: + self.cls_out_channels = num_classes + 1 + self.in_channels = in_channels + self.feat_channels = feat_channels + self.stacked_convs = stacked_convs + self.strides = strides + self.dcn_on_last_conv = dcn_on_last_conv + assert conv_bias == 'auto' or isinstance(conv_bias, bool) + self.conv_bias = conv_bias + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + self.bbox_coder = build_bbox_coder(bbox_coder) + + self.prior_generator = MlvlPointGenerator(strides) + + # In order to keep a more general interface and be consistent with + # anchor_head. We can think of point like one anchor + self.num_base_priors = self.prior_generator.num_base_priors[0] + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.fp16_enabled = False + + self._init_layers() + + def _init_layers(self): + """Initialize layers of the head.""" + self._init_cls_convs() + self._init_reg_convs() + self._init_predictor() + + def _init_cls_convs(self): + """Initialize classification conv layers of the head.""" + self.cls_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + if self.dcn_on_last_conv and i == self.stacked_convs - 1: + conv_cfg = dict(type='DCNv2') + else: + conv_cfg = self.conv_cfg + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.conv_bias)) + + def _init_reg_convs(self): + """Initialize bbox regression conv layers of the head.""" + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + if self.dcn_on_last_conv and i == self.stacked_convs - 1: + conv_cfg = dict(type='DCNv2') + else: + conv_cfg = self.conv_cfg + self.reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.conv_bias)) + + def _init_predictor(self): + """Initialize predictor layers of the head.""" + self.conv_cls = nn.Conv2d( + self.feat_channels, self.cls_out_channels, 3, padding=1) + self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + """Hack some keys of the model state dict so that can load checkpoints + of previous version.""" + version = local_metadata.get('version', None) + if version is None: + # the key is different in early versions + # for example, 'fcos_cls' become 'conv_cls' now + bbox_head_keys = [ + k for k in state_dict.keys() if k.startswith(prefix) + ] + ori_predictor_keys = [] + new_predictor_keys = [] + # e.g. 'fcos_cls' or 'fcos_reg' + for key in bbox_head_keys: + ori_predictor_keys.append(key) + key = key.split('.') + conv_name = None + if key[1].endswith('cls'): + conv_name = 'conv_cls' + elif key[1].endswith('reg'): + conv_name = 'conv_reg' + elif key[1].endswith('centerness'): + conv_name = 'conv_centerness' + else: + assert NotImplementedError + if conv_name is not None: + key[1] = conv_name + new_predictor_keys.append('.'.join(key)) + else: + ori_predictor_keys.pop(-1) + for i in range(len(new_predictor_keys)): + state_dict[new_predictor_keys[i]] = state_dict.pop( + ori_predictor_keys[i]) + super()._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, unexpected_keys, + error_msgs) + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: Usually contain classification scores and bbox predictions. + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * 4. + """ + return multi_apply(self.forward_single, feats)[:2] + + def forward_single(self, x): + """Forward features of a single scale level. + + Args: + x (Tensor): FPN feature maps of the specified stride. + + Returns: + tuple: Scores for each class, bbox predictions, features + after classification and regression conv layers, some + models needs these features like FCOS. + """ + cls_feat = x + reg_feat = x + + for cls_layer in self.cls_convs: + cls_feat = cls_layer(cls_feat) + cls_score = self.conv_cls(cls_feat) + + for reg_layer in self.reg_convs: + reg_feat = reg_layer(reg_feat) + bbox_pred = self.conv_reg(reg_feat) + return cls_score, bbox_pred, cls_feat, reg_feat + + @abstractmethod + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute loss of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * 4. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + """ + + raise NotImplementedError + + @abstractmethod + def get_targets(self, points, gt_bboxes_list, gt_labels_list): + """Compute regression, classification and centerness targets for points + in multiple images. + + Args: + points (list[Tensor]): Points of each fpn level, each has shape + (num_points, 2). + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, + each has shape (num_gt, 4). + gt_labels_list (list[Tensor]): Ground truth labels of each box, + each has shape (num_gt,). + """ + raise NotImplementedError + + def _get_points_single(self, + featmap_size, + stride, + dtype, + device, + flatten=False): + """Get points of a single scale level. + + This function will be deprecated soon. + """ + + warnings.warn( + '`_get_points_single` in `AnchorFreeHead` will be ' + 'deprecated soon, we support a multi level point generator now' + 'you can get points of a single level feature map ' + 'with `self.prior_generator.single_level_grid_priors` ') + + h, w = featmap_size + # First create Range with the default dtype, than convert to + # target `dtype` for onnx exporting. + x_range = torch.arange(w, device=device).to(dtype) + y_range = torch.arange(h, device=device).to(dtype) + y, x = torch.meshgrid(y_range, x_range) + if flatten: + y = y.flatten() + x = x.flatten() + return y, x + + def get_points(self, featmap_sizes, dtype, device, flatten=False): + """Get points according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + dtype (torch.dtype): Type of points. + device (torch.device): Device of points. + + Returns: + tuple: points of each image. + """ + warnings.warn( + '`get_points` in `AnchorFreeHead` will be ' + 'deprecated soon, we support a multi level point generator now' + 'you can get points of all levels ' + 'with `self.prior_generator.grid_priors` ') + + mlvl_points = [] + for i in range(len(featmap_sizes)): + mlvl_points.append( + self._get_points_single(featmap_sizes[i], self.strides[i], + dtype, device, flatten)) + return mlvl_points + + def aug_test(self, feats, img_metas, rescale=False): + """Test function with test time augmentation. + + Args: + feats (list[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains features for all images in the batch. + img_metas (list[list[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. each dict has image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[ndarray]: bbox results of each class + """ + return self.aug_test_bboxes(feats, img_metas, rescale=rescale) diff --git a/downstream/mmdetection/mmdet/models/dense_heads/anchor_head.py b/downstream/mmdetection/mmdet/models/dense_heads/anchor_head.py new file mode 100644 index 0000000..d1bfab6 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/anchor_head.py @@ -0,0 +1,542 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn +from mmcv.runner import force_fp32 + +from mmdet.core import (anchor_inside_flags, build_assigner, build_bbox_coder, + build_prior_generator, build_sampler, images_to_levels, + multi_apply, unmap) +from ..builder import HEADS, build_loss +from .base_dense_head import BaseDenseHead +from .dense_test_mixins import BBoxTestMixin + + +@HEADS.register_module() +class AnchorHead(BaseDenseHead, BBoxTestMixin): + """Anchor-based head (RPN, RetinaNet, SSD, etc.). + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of hidden channels. Used in child classes. + anchor_generator (dict): Config dict for anchor generator + bbox_coder (dict): Config of bounding box coder. + reg_decoded_bbox (bool): If true, the regression loss would be + applied directly on decoded bounding boxes, converting both + the predicted boxes and regression targets to absolute + coordinates format. Default False. It should be `True` when + using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of localization loss. + train_cfg (dict): Training config of anchor head. + test_cfg (dict): Testing config of anchor head. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ # noqa: W605 + + def __init__(self, + num_classes, + in_channels, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + clip_border=True, + target_means=(.0, .0, .0, .0), + target_stds=(1.0, 1.0, 1.0, 1.0)), + reg_decoded_bbox=False, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + loss_bbox=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + train_cfg=None, + test_cfg=None, + init_cfg=dict(type='Normal', layer='Conv2d', std=0.01)): + super(AnchorHead, self).__init__(init_cfg) + self.in_channels = in_channels + self.num_classes = num_classes + self.feat_channels = feat_channels + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + if self.use_sigmoid_cls: + self.cls_out_channels = num_classes + else: + self.cls_out_channels = num_classes + 1 + + if self.cls_out_channels <= 0: + raise ValueError(f'num_classes={num_classes} is too small') + self.reg_decoded_bbox = reg_decoded_bbox + + self.bbox_coder = build_bbox_coder(bbox_coder) + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + if self.train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + if hasattr(self.train_cfg, + 'sampler') and self.train_cfg.sampler.type.split( + '.')[-1] != 'PseudoSampler': + self.sampling = True + sampler_cfg = self.train_cfg.sampler + # avoid BC-breaking + if loss_cls['type'] in [ + 'FocalLoss', 'GHMC', 'QualityFocalLoss' + ]: + warnings.warn( + 'DeprecationWarning: Determining whether to sampling' + 'by loss type is deprecated, please delete sampler in' + 'your config when using `FocalLoss`, `GHMC`, ' + '`QualityFocalLoss` or other FocalLoss variant.') + self.sampling = False + sampler_cfg = dict(type='PseudoSampler') + else: + self.sampling = False + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + self.fp16_enabled = False + + self.prior_generator = build_prior_generator(anchor_generator) + + # Usually the numbers of anchors for each level are the same + # except SSD detectors. So it is an int in the most dense + # heads but a list of int in SSDHead + self.num_base_priors = self.prior_generator.num_base_priors[0] + self._init_layers() + + @property + def num_anchors(self): + warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' + 'for consistency or also use ' + '`num_base_priors` instead') + return self.prior_generator.num_base_priors[0] + + @property + def anchor_generator(self): + warnings.warn('DeprecationWarning: anchor_generator is deprecated, ' + 'please use "prior_generator" instead') + return self.prior_generator + + def _init_layers(self): + """Initialize layers of the head.""" + self.conv_cls = nn.Conv2d(self.in_channels, + self.num_base_priors * self.cls_out_channels, + 1) + self.conv_reg = nn.Conv2d(self.in_channels, self.num_base_priors * 4, + 1) + + def forward_single(self, x): + """Forward feature of a single scale level. + + Args: + x (Tensor): Features of a single scale level. + + Returns: + tuple: + cls_score (Tensor): Cls scores for a single scale level \ + the channels number is num_base_priors * num_classes. + bbox_pred (Tensor): Box energies / deltas for a single scale \ + level, the channels number is num_base_priors * 4. + """ + cls_score = self.conv_cls(x) + bbox_pred = self.conv_reg(x) + return cls_score, bbox_pred + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: A tuple of classification scores and bbox prediction. + + - cls_scores (list[Tensor]): Classification scores for all \ + scale levels, each is a 4D-tensor, the channels number \ + is num_base_priors * num_classes. + - bbox_preds (list[Tensor]): Box energies / deltas for all \ + scale levels, each is a 4D-tensor, the channels number \ + is num_base_priors * 4. + """ + return multi_apply(self.forward_single, feats) + + def get_anchors(self, featmap_sizes, img_metas, device='cuda'): + """Get anchors according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + img_metas (list[dict]): Image meta info. + device (torch.device | str): Device for returned tensors + + Returns: + tuple: + anchor_list (list[Tensor]): Anchors of each image. + valid_flag_list (list[Tensor]): Valid flags of each image. + """ + num_imgs = len(img_metas) + + # since feature map sizes of all images are the same, we only compute + # anchors for one time + multi_level_anchors = self.prior_generator.grid_priors( + featmap_sizes, device=device) + anchor_list = [multi_level_anchors for _ in range(num_imgs)] + + # for each image, we compute valid flags of multi level anchors + valid_flag_list = [] + for img_id, img_meta in enumerate(img_metas): + multi_level_flags = self.prior_generator.valid_flags( + featmap_sizes, img_meta['pad_shape'], device) + valid_flag_list.append(multi_level_flags) + + return anchor_list, valid_flag_list + + def _get_targets_single(self, + flat_anchors, + valid_flags, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + label_channels=1, + unmap_outputs=True): + """Compute regression and classification targets for anchors in a + single image. + + Args: + flat_anchors (Tensor): Multi-level anchors of the image, which are + concatenated into a single tensor of shape (num_anchors ,4) + valid_flags (Tensor): Multi level valid flags of the image, + which are concatenated into a single tensor of + shape (num_anchors,). + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + img_meta (dict): Meta info of the image. + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + label_channels (int): Channel of label. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. + + Returns: + tuple: + labels_list (list[Tensor]): Labels of each level + label_weights_list (list[Tensor]): Label weights of each level + bbox_targets_list (list[Tensor]): BBox targets of each level + bbox_weights_list (list[Tensor]): BBox weights of each level + num_total_pos (int): Number of positive samples in all images + num_total_neg (int): Number of negative samples in all images + """ + inside_flags = anchor_inside_flags(flat_anchors, valid_flags, + img_meta['img_shape'][:2], + self.train_cfg.allowed_border) + if not inside_flags.any(): + return (None, ) * 7 + # assign gt and sample anchors + anchors = flat_anchors[inside_flags, :] + + assign_result = self.assigner.assign( + anchors, gt_bboxes, gt_bboxes_ignore, + None if self.sampling else gt_labels) + sampling_result = self.sampler.sample(assign_result, anchors, + gt_bboxes) + + num_valid_anchors = anchors.shape[0] + bbox_targets = torch.zeros_like(anchors) + bbox_weights = torch.zeros_like(anchors) + labels = anchors.new_full((num_valid_anchors, ), + self.num_classes, + dtype=torch.long) + label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + if not self.reg_decoded_bbox: + pos_bbox_targets = self.bbox_coder.encode( + sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) + else: + pos_bbox_targets = sampling_result.pos_gt_bboxes + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + if gt_labels is None: + # Only rpn gives gt_labels as None + # Foreground is the first class since v2.5.0 + labels[pos_inds] = 0 + else: + labels[pos_inds] = gt_labels[ + sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_anchors.size(0) + labels = unmap( + labels, num_total_anchors, inside_flags, + fill=self.num_classes) # fill bg label + label_weights = unmap(label_weights, num_total_anchors, + inside_flags) + bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) + bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) + + return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, + neg_inds, sampling_result) + + def get_targets(self, + anchor_list, + valid_flag_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + unmap_outputs=True, + return_sampling_results=False): + """Compute regression and classification targets for anchors in + multiple images. + + Args: + anchor_list (list[list[Tensor]]): Multi level anchors of each + image. The outer list indicates images, and the inner list + corresponds to feature levels of the image. Each element of + the inner list is a tensor of shape (num_anchors, 4). + valid_flag_list (list[list[Tensor]]): Multi level valid flags of + each image. The outer list indicates images, and the inner list + corresponds to feature levels of the image. Each element of + the inner list is a tensor of shape (num_anchors, ) + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. + img_metas (list[dict]): Meta info of each image. + gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be + ignored. + gt_labels_list (list[Tensor]): Ground truth labels of each box. + label_channels (int): Channel of label. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. + + Returns: + tuple: Usually returns a tuple containing learning targets. + + - labels_list (list[Tensor]): Labels of each level. + - label_weights_list (list[Tensor]): Label weights of each + level. + - bbox_targets_list (list[Tensor]): BBox targets of each level. + - bbox_weights_list (list[Tensor]): BBox weights of each level. + - num_total_pos (int): Number of positive samples in all + images. + - num_total_neg (int): Number of negative samples in all + images. + + additional_returns: This function enables user-defined returns from + `self._get_targets_single`. These returns are currently refined + to properties at each feature map (i.e. having HxW dimension). + The results will be concatenated after the end + """ + num_imgs = len(img_metas) + assert len(anchor_list) == len(valid_flag_list) == num_imgs + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + # concat all level anchors to a single tensor + concat_anchor_list = [] + concat_valid_flag_list = [] + for i in range(num_imgs): + assert len(anchor_list[i]) == len(valid_flag_list[i]) + concat_anchor_list.append(torch.cat(anchor_list[i])) + concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + if gt_labels_list is None: + gt_labels_list = [None for _ in range(num_imgs)] + results = multi_apply( + self._get_targets_single, + concat_anchor_list, + concat_valid_flag_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + img_metas, + label_channels=label_channels, + unmap_outputs=unmap_outputs) + (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, + pos_inds_list, neg_inds_list, sampling_results_list) = results[:7] + rest_results = list(results[7:]) # user-added return values + # no valid anchors + if any([labels is None for labels in all_labels]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + labels_list = images_to_levels(all_labels, num_level_anchors) + label_weights_list = images_to_levels(all_label_weights, + num_level_anchors) + bbox_targets_list = images_to_levels(all_bbox_targets, + num_level_anchors) + bbox_weights_list = images_to_levels(all_bbox_weights, + num_level_anchors) + res = (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) + if return_sampling_results: + res = res + (sampling_results_list, ) + for i, r in enumerate(rest_results): # user-added return values + rest_results[i] = images_to_levels(r, num_level_anchors) + + return res + tuple(rest_results) + + def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights, + bbox_targets, bbox_weights, num_total_samples): + """Compute loss of a single scale level. + + Args: + cls_score (Tensor): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W). + bbox_pred (Tensor): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W). + anchors (Tensor): Box reference for each scale level with shape + (N, num_total_anchors, 4). + labels (Tensor): Labels of each anchors with shape + (N, num_total_anchors). + label_weights (Tensor): Label weights of each anchor with shape + (N, num_total_anchors) + bbox_targets (Tensor): BBox regression targets of each anchor + weight shape (N, num_total_anchors, 4). + bbox_weights (Tensor): BBox regression loss weights of each anchor + with shape (N, num_total_anchors, 4). + num_total_samples (int): If sampling, num total samples equal to + the number of total anchors; Otherwise, it is the number of + positive anchors. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + # classification loss + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + cls_score = cls_score.permute(0, 2, 3, + 1).reshape(-1, self.cls_out_channels) + loss_cls = self.loss_cls( + cls_score, labels, label_weights, avg_factor=num_total_samples) + # regression loss + bbox_targets = bbox_targets.reshape(-1, 4) + bbox_weights = bbox_weights.reshape(-1, 4) + bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) + if self.reg_decoded_bbox: + # When the regression loss (e.g. `IouLoss`, `GIouLoss`) + # is applied directly on the decoded bounding boxes, it + # decodes the already encoded coordinates to absolute format. + anchors = anchors.reshape(-1, 4) + bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) + loss_bbox = self.loss_bbox( + bbox_pred, + bbox_targets, + bbox_weights, + avg_factor=num_total_samples) + return loss_cls, loss_bbox + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. Default: None + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + device = cls_scores[0].device + + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + num_total_samples = ( + num_total_pos + num_total_neg if self.sampling else num_total_pos) + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + # concat all level anchors and flags to a single tensor + concat_anchor_list = [] + for i in range(len(anchor_list)): + concat_anchor_list.append(torch.cat(anchor_list[i])) + all_anchor_list = images_to_levels(concat_anchor_list, + num_level_anchors) + + losses_cls, losses_bbox = multi_apply( + self.loss_single, + cls_scores, + bbox_preds, + all_anchor_list, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + num_total_samples=num_total_samples) + return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) + + def aug_test(self, feats, img_metas, rescale=False): + """Test function with test time augmentation. + + Args: + feats (list[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains features for all images in the batch. + img_metas (list[list[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. each dict has image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is ``bboxes`` with shape (n, 5), where + 5 represent (tl_x, tl_y, br_x, br_y, score). + The shape of the second tensor in the tuple is ``labels`` + with shape (n,), The length of list should always be 1. + """ + return self.aug_test_bboxes(feats, img_metas, rescale=rescale) diff --git a/downstream/mmdetection/mmdet/models/dense_heads/atss_head.py b/downstream/mmdetection/mmdet/models/dense_heads/atss_head.py new file mode 100644 index 0000000..e8f401c --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/atss_head.py @@ -0,0 +1,501 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, Scale +from mmcv.runner import force_fp32 + +from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler, + images_to_levels, multi_apply, reduce_mean, unmap) +from ..builder import HEADS, build_loss +from .anchor_head import AnchorHead + + +@HEADS.register_module() +class ATSSHead(AnchorHead): + """Bridging the Gap Between Anchor-based and Anchor-free Detection via + Adaptive Training Sample Selection. + + ATSS head structure is similar with FCOS, however ATSS use anchor boxes + and assign label by Adaptive Training Sample Selection instead max-iou. + + https://arxiv.org/abs/1912.02424 + """ + + def __init__(self, + num_classes, + in_channels, + pred_kernel_size=3, + stacked_convs=4, + conv_cfg=None, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + reg_decoded_bbox=True, + loss_centerness=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', + name='atss_cls', + std=0.01, + bias_prob=0.01)), + **kwargs): + self.pred_kernel_size = pred_kernel_size + self.stacked_convs = stacked_convs + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + super(ATSSHead, self).__init__( + num_classes, + in_channels, + reg_decoded_bbox=reg_decoded_bbox, + init_cfg=init_cfg, + **kwargs) + + self.sampling = False + if self.train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + # SSD sampling=False so use PseudoSampler + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + self.loss_centerness = build_loss(loss_centerness) + + def _init_layers(self): + """Initialize layers of the head.""" + self.relu = nn.ReLU(inplace=True) + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + pred_pad_size = self.pred_kernel_size // 2 + self.atss_cls = nn.Conv2d( + self.feat_channels, + self.num_anchors * self.cls_out_channels, + self.pred_kernel_size, + padding=pred_pad_size) + self.atss_reg = nn.Conv2d( + self.feat_channels, + self.num_base_priors * 4, + self.pred_kernel_size, + padding=pred_pad_size) + self.atss_centerness = nn.Conv2d( + self.feat_channels, + self.num_base_priors * 1, + self.pred_kernel_size, + padding=pred_pad_size) + self.scales = nn.ModuleList( + [Scale(1.0) for _ in self.prior_generator.strides]) + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: Usually a tuple of classification scores and bbox prediction + cls_scores (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_anchors * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for all scale + levels, each is a 4D-tensor, the channels number is + num_anchors * 4. + """ + return multi_apply(self.forward_single, feats, self.scales) + + def forward_single(self, x, scale): + """Forward feature of a single scale level. + + Args: + x (Tensor): Features of a single scale level. + scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize + the bbox prediction. + + Returns: + tuple: + cls_score (Tensor): Cls scores for a single scale level + the channels number is num_anchors * num_classes. + bbox_pred (Tensor): Box energies / deltas for a single scale + level, the channels number is num_anchors * 4. + centerness (Tensor): Centerness for a single scale level, the + channel number is (N, num_anchors * 1, H, W). + """ + cls_feat = x + reg_feat = x + for cls_conv in self.cls_convs: + cls_feat = cls_conv(cls_feat) + for reg_conv in self.reg_convs: + reg_feat = reg_conv(reg_feat) + cls_score = self.atss_cls(cls_feat) + # we just follow atss, not apply exp in bbox_pred + bbox_pred = scale(self.atss_reg(reg_feat)).float() + centerness = self.atss_centerness(reg_feat) + return cls_score, bbox_pred, centerness + + def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels, + label_weights, bbox_targets, num_total_samples): + """Compute loss of a single scale level. + + Args: + cls_score (Tensor): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W). + bbox_pred (Tensor): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W). + anchors (Tensor): Box reference for each scale level with shape + (N, num_total_anchors, 4). + labels (Tensor): Labels of each anchors with shape + (N, num_total_anchors). + label_weights (Tensor): Label weights of each anchor with shape + (N, num_total_anchors) + bbox_targets (Tensor): BBox regression targets of each anchor + weight shape (N, num_total_anchors, 4). + num_total_samples (int): Number os positive samples that is + reduced over all GPUs. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + + anchors = anchors.reshape(-1, 4) + cls_score = cls_score.permute(0, 2, 3, 1).reshape( + -1, self.cls_out_channels).contiguous() + bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) + centerness = centerness.permute(0, 2, 3, 1).reshape(-1) + bbox_targets = bbox_targets.reshape(-1, 4) + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + + # classification loss + loss_cls = self.loss_cls( + cls_score, labels, label_weights, avg_factor=num_total_samples) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = self.num_classes + pos_inds = ((labels >= 0) + & (labels < bg_class_ind)).nonzero().squeeze(1) + + if len(pos_inds) > 0: + pos_bbox_targets = bbox_targets[pos_inds] + pos_bbox_pred = bbox_pred[pos_inds] + pos_anchors = anchors[pos_inds] + pos_centerness = centerness[pos_inds] + + centerness_targets = self.centerness_target( + pos_anchors, pos_bbox_targets) + pos_decode_bbox_pred = self.bbox_coder.decode( + pos_anchors, pos_bbox_pred) + + # regression loss + loss_bbox = self.loss_bbox( + pos_decode_bbox_pred, + pos_bbox_targets, + weight=centerness_targets, + avg_factor=1.0) + + # centerness loss + loss_centerness = self.loss_centerness( + pos_centerness, + centerness_targets, + avg_factor=num_total_samples) + + else: + loss_bbox = bbox_pred.sum() * 0 + loss_centerness = centerness.sum() * 0 + centerness_targets = bbox_targets.new_tensor(0.) + + return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum() + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) + def loss(self, + cls_scores, + bbox_preds, + centernesses, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + centernesses (list[Tensor]): Centerness for each scale + level with shape (N, num_anchors * 1, H, W) + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (list[Tensor] | None): specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + device = cls_scores[0].device + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels) + if cls_reg_targets is None: + return None + + (anchor_list, labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets + + num_total_samples = reduce_mean( + torch.tensor(num_total_pos, dtype=torch.float, + device=device)).item() + num_total_samples = max(num_total_samples, 1.0) + + losses_cls, losses_bbox, loss_centerness,\ + bbox_avg_factor = multi_apply( + self.loss_single, + anchor_list, + cls_scores, + bbox_preds, + centernesses, + labels_list, + label_weights_list, + bbox_targets_list, + num_total_samples=num_total_samples) + + bbox_avg_factor = sum(bbox_avg_factor) + bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item() + losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) + return dict( + loss_cls=losses_cls, + loss_bbox=losses_bbox, + loss_centerness=loss_centerness) + + def centerness_target(self, anchors, gts): + # only calculate pos centerness targets, otherwise there may be nan + anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 + anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 + l_ = anchors_cx - gts[:, 0] + t_ = anchors_cy - gts[:, 1] + r_ = gts[:, 2] - anchors_cx + b_ = gts[:, 3] - anchors_cy + + left_right = torch.stack([l_, r_], dim=1) + top_bottom = torch.stack([t_, b_], dim=1) + centerness = torch.sqrt( + (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * + (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])) + assert not torch.isnan(centerness).any() + return centerness + + def get_targets(self, + anchor_list, + valid_flag_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + unmap_outputs=True): + """Get targets for ATSS head. + + This method is almost the same as `AnchorHead.get_targets()`. Besides + returning the targets as the parent method does, it also returns the + anchors as the first element of the returned tuple. + """ + num_imgs = len(img_metas) + assert len(anchor_list) == len(valid_flag_list) == num_imgs + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + num_level_anchors_list = [num_level_anchors] * num_imgs + + # concat all level anchors and flags to a single tensor + for i in range(num_imgs): + assert len(anchor_list[i]) == len(valid_flag_list[i]) + anchor_list[i] = torch.cat(anchor_list[i]) + valid_flag_list[i] = torch.cat(valid_flag_list[i]) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + if gt_labels_list is None: + gt_labels_list = [None for _ in range(num_imgs)] + (all_anchors, all_labels, all_label_weights, all_bbox_targets, + all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( + self._get_target_single, + anchor_list, + valid_flag_list, + num_level_anchors_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + img_metas, + label_channels=label_channels, + unmap_outputs=unmap_outputs) + # no valid anchors + if any([labels is None for labels in all_labels]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + anchors_list = images_to_levels(all_anchors, num_level_anchors) + labels_list = images_to_levels(all_labels, num_level_anchors) + label_weights_list = images_to_levels(all_label_weights, + num_level_anchors) + bbox_targets_list = images_to_levels(all_bbox_targets, + num_level_anchors) + bbox_weights_list = images_to_levels(all_bbox_weights, + num_level_anchors) + return (anchors_list, labels_list, label_weights_list, + bbox_targets_list, bbox_weights_list, num_total_pos, + num_total_neg) + + def _get_target_single(self, + flat_anchors, + valid_flags, + num_level_anchors, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + label_channels=1, + unmap_outputs=True): + """Compute regression, classification targets for anchors in a single + image. + + Args: + flat_anchors (Tensor): Multi-level anchors of the image, which are + concatenated into a single tensor of shape (num_anchors ,4) + valid_flags (Tensor): Multi level valid flags of the image, + which are concatenated into a single tensor of + shape (num_anchors,). + num_level_anchors Tensor): Number of anchors of each scale level. + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + img_meta (dict): Meta info of the image. + label_channels (int): Channel of label. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. + + Returns: + tuple: N is the number of total anchors in the image. + labels (Tensor): Labels of all anchors in the image with shape + (N,). + label_weights (Tensor): Label weights of all anchor in the + image with shape (N,). + bbox_targets (Tensor): BBox targets of all anchors in the + image with shape (N, 4). + bbox_weights (Tensor): BBox weights of all anchors in the + image with shape (N, 4) + pos_inds (Tensor): Indices of positive anchor with shape + (num_pos,). + neg_inds (Tensor): Indices of negative anchor with shape + (num_neg,). + """ + inside_flags = anchor_inside_flags(flat_anchors, valid_flags, + img_meta['img_shape'][:2], + self.train_cfg.allowed_border) + if not inside_flags.any(): + return (None, ) * 7 + # assign gt and sample anchors + anchors = flat_anchors[inside_flags, :] + + num_level_anchors_inside = self.get_num_level_anchors_inside( + num_level_anchors, inside_flags) + assign_result = self.assigner.assign(anchors, num_level_anchors_inside, + gt_bboxes, gt_bboxes_ignore, + gt_labels) + + sampling_result = self.sampler.sample(assign_result, anchors, + gt_bboxes) + + num_valid_anchors = anchors.shape[0] + bbox_targets = torch.zeros_like(anchors) + bbox_weights = torch.zeros_like(anchors) + labels = anchors.new_full((num_valid_anchors, ), + self.num_classes, + dtype=torch.long) + label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + if self.reg_decoded_bbox: + pos_bbox_targets = sampling_result.pos_gt_bboxes + else: + pos_bbox_targets = self.bbox_coder.encode( + sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) + + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + if gt_labels is None: + # Only rpn gives gt_labels as None + # Foreground is the first class since v2.5.0 + labels[pos_inds] = 0 + else: + labels[pos_inds] = gt_labels[ + sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_anchors.size(0) + anchors = unmap(anchors, num_total_anchors, inside_flags) + labels = unmap( + labels, num_total_anchors, inside_flags, fill=self.num_classes) + label_weights = unmap(label_weights, num_total_anchors, + inside_flags) + bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) + bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) + + return (anchors, labels, label_weights, bbox_targets, bbox_weights, + pos_inds, neg_inds) + + def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): + split_inside_flags = torch.split(inside_flags, num_level_anchors) + num_level_anchors_inside = [ + int(flags.sum()) for flags in split_inside_flags + ] + return num_level_anchors_inside diff --git a/downstream/mmdetection/mmdet/models/dense_heads/autoassign_head.py b/downstream/mmdetection/mmdet/models/dense_heads/autoassign_head.py new file mode 100644 index 0000000..446da24 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/autoassign_head.py @@ -0,0 +1,527 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import bias_init_with_prob, normal_init +from mmcv.runner import force_fp32 + +from mmdet.core import multi_apply +from mmdet.core.anchor.point_generator import MlvlPointGenerator +from mmdet.core.bbox import bbox_overlaps +from mmdet.models import HEADS +from mmdet.models.dense_heads.atss_head import reduce_mean +from mmdet.models.dense_heads.fcos_head import FCOSHead +from mmdet.models.dense_heads.paa_head import levels_to_images + +EPS = 1e-12 + + +class CenterPrior(nn.Module): + """Center Weighting module to adjust the category-specific prior + distributions. + + Args: + force_topk (bool): When no point falls into gt_bbox, forcibly + select the k points closest to the center to calculate + the center prior. Defaults to False. + topk (int): The number of points used to calculate the + center prior when no point falls in gt_bbox. Only work when + force_topk if True. Defaults to 9. + num_classes (int): The class number of dataset. Defaults to 80. + strides (tuple[int]): The stride of each input feature map. Defaults + to (8, 16, 32, 64, 128). + """ + + def __init__(self, + force_topk=False, + topk=9, + num_classes=80, + strides=(8, 16, 32, 64, 128)): + super(CenterPrior, self).__init__() + self.mean = nn.Parameter(torch.zeros(num_classes, 2)) + self.sigma = nn.Parameter(torch.ones(num_classes, 2)) + self.strides = strides + self.force_topk = force_topk + self.topk = topk + + def forward(self, anchor_points_list, gt_bboxes, labels, + inside_gt_bbox_mask): + """Get the center prior of each point on the feature map for each + instance. + + Args: + anchor_points_list (list[Tensor]): list of coordinate + of points on feature map. Each with shape + (num_points, 2). + gt_bboxes (Tensor): The gt_bboxes with shape of + (num_gt, 4). + labels (Tensor): The gt_labels with shape of (num_gt). + inside_gt_bbox_mask (Tensor): Tensor of bool type, + with shape of (num_points, num_gt), each + value is used to mark whether this point falls + within a certain gt. + + Returns: + tuple(Tensor): + + - center_prior_weights(Tensor): Float tensor with shape \ + of (num_points, num_gt). Each value represents \ + the center weighting coefficient. + - inside_gt_bbox_mask (Tensor): Tensor of bool type, \ + with shape of (num_points, num_gt), each \ + value is used to mark whether this point falls \ + within a certain gt or is the topk nearest points for \ + a specific gt_bbox. + """ + inside_gt_bbox_mask = inside_gt_bbox_mask.clone() + num_gts = len(labels) + num_points = sum([len(item) for item in anchor_points_list]) + if num_gts == 0: + return gt_bboxes.new_zeros(num_points, + num_gts), inside_gt_bbox_mask + center_prior_list = [] + for slvl_points, stride in zip(anchor_points_list, self.strides): + # slvl_points: points from single level in FPN, has shape (h*w, 2) + # single_level_points has shape (h*w, num_gt, 2) + single_level_points = slvl_points[:, None, :].expand( + (slvl_points.size(0), len(gt_bboxes), 2)) + gt_center_x = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2) + gt_center_y = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2) + gt_center = torch.stack((gt_center_x, gt_center_y), dim=1) + gt_center = gt_center[None] + # instance_center has shape (1, num_gt, 2) + instance_center = self.mean[labels][None] + # instance_sigma has shape (1, num_gt, 2) + instance_sigma = self.sigma[labels][None] + # distance has shape (num_points, num_gt, 2) + distance = (((single_level_points - gt_center) / float(stride) - + instance_center)**2) + center_prior = torch.exp(-distance / + (2 * instance_sigma**2)).prod(dim=-1) + center_prior_list.append(center_prior) + center_prior_weights = torch.cat(center_prior_list, dim=0) + + if self.force_topk: + gt_inds_no_points_inside = torch.nonzero( + inside_gt_bbox_mask.sum(0) == 0).reshape(-1) + if gt_inds_no_points_inside.numel(): + topk_center_index = \ + center_prior_weights[:, gt_inds_no_points_inside].topk( + self.topk, + dim=0)[1] + temp_mask = inside_gt_bbox_mask[:, gt_inds_no_points_inside] + inside_gt_bbox_mask[:, gt_inds_no_points_inside] = \ + torch.scatter(temp_mask, + dim=0, + index=topk_center_index, + src=torch.ones_like( + topk_center_index, + dtype=torch.bool)) + + center_prior_weights[~inside_gt_bbox_mask] = 0 + return center_prior_weights, inside_gt_bbox_mask + + +@HEADS.register_module() +class AutoAssignHead(FCOSHead): + """AutoAssignHead head used in AutoAssign. + + More details can be found in the `paper + `_ . + + Args: + force_topk (bool): Used in center prior initialization to + handle extremely small gt. Default is False. + topk (int): The number of points used to calculate the + center prior when no point falls in gt_bbox. Only work when + force_topk if True. Defaults to 9. + pos_loss_weight (float): The loss weight of positive loss + and with default value 0.25. + neg_loss_weight (float): The loss weight of negative loss + and with default value 0.75. + center_loss_weight (float): The loss weight of center prior + loss and with default value 0.75. + """ + + def __init__(self, + *args, + force_topk=False, + topk=9, + pos_loss_weight=0.25, + neg_loss_weight=0.75, + center_loss_weight=0.75, + **kwargs): + super().__init__(*args, conv_bias=True, **kwargs) + self.center_prior = CenterPrior( + force_topk=force_topk, + topk=topk, + num_classes=self.num_classes, + strides=self.strides) + self.pos_loss_weight = pos_loss_weight + self.neg_loss_weight = neg_loss_weight + self.center_loss_weight = center_loss_weight + self.prior_generator = MlvlPointGenerator(self.strides, offset=0) + + def init_weights(self): + """Initialize weights of the head. + + In particular, we have special initialization for classified conv's and + regression conv's bias + """ + + super(AutoAssignHead, self).init_weights() + bias_cls = bias_init_with_prob(0.02) + normal_init(self.conv_cls, std=0.01, bias=bias_cls) + normal_init(self.conv_reg, std=0.01, bias=4.0) + + def forward_single(self, x, scale, stride): + """Forward features of a single scale level. + + Args: + x (Tensor): FPN feature maps of the specified stride. + scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize + the bbox prediction. + stride (int): The corresponding stride for feature maps, only + used to normalize the bbox prediction when self.norm_on_bbox + is True. + + Returns: + tuple: scores for each class, bbox predictions and centerness \ + predictions of input feature maps. + """ + cls_score, bbox_pred, cls_feat, reg_feat = super( + FCOSHead, self).forward_single(x) + centerness = self.conv_centerness(reg_feat) + # scale the bbox_pred of different level + # float to avoid overflow when enabling FP16 + bbox_pred = scale(bbox_pred).float() + # bbox_pred needed for gradient computation has been modified + # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace + # F.relu(bbox_pred) with bbox_pred.clamp(min=0) + bbox_pred = bbox_pred.clamp(min=0) + bbox_pred *= stride + return cls_score, bbox_pred, centerness + + def get_pos_loss_single(self, cls_score, objectness, reg_loss, gt_labels, + center_prior_weights): + """Calculate the positive loss of all points in gt_bboxes. + + Args: + cls_score (Tensor): All category scores for each point on + the feature map. The shape is (num_points, num_class). + objectness (Tensor): Foreground probability of all points, + has shape (num_points, 1). + reg_loss (Tensor): The regression loss of each gt_bbox and each + prediction box, has shape of (num_points, num_gt). + gt_labels (Tensor): The zeros based gt_labels of all gt + with shape of (num_gt,). + center_prior_weights (Tensor): Float tensor with shape + of (num_points, num_gt). Each value represents + the center weighting coefficient. + + Returns: + tuple[Tensor]: + + - pos_loss (Tensor): The positive loss of all points + in the gt_bboxes. + """ + # p_loc: localization confidence + p_loc = torch.exp(-reg_loss) + # p_cls: classification confidence + p_cls = (cls_score * objectness)[:, gt_labels] + # p_pos: joint confidence indicator + p_pos = p_cls * p_loc + + # 3 is a hyper-parameter to control the contributions of high and + # low confidence locations towards positive losses. + confidence_weight = torch.exp(p_pos * 3) + p_pos_weight = (confidence_weight * center_prior_weights) / ( + (confidence_weight * center_prior_weights).sum( + 0, keepdim=True)).clamp(min=EPS) + reweighted_p_pos = (p_pos * p_pos_weight).sum(0) + pos_loss = F.binary_cross_entropy( + reweighted_p_pos, + torch.ones_like(reweighted_p_pos), + reduction='none') + pos_loss = pos_loss.sum() * self.pos_loss_weight + return pos_loss, + + def get_neg_loss_single(self, cls_score, objectness, gt_labels, ious, + inside_gt_bbox_mask): + """Calculate the negative loss of all points in feature map. + + Args: + cls_score (Tensor): All category scores for each point on + the feature map. The shape is (num_points, num_class). + objectness (Tensor): Foreground probability of all points + and is shape of (num_points, 1). + gt_labels (Tensor): The zeros based label of all gt with shape of + (num_gt). + ious (Tensor): Float tensor with shape of (num_points, num_gt). + Each value represent the iou of pred_bbox and gt_bboxes. + inside_gt_bbox_mask (Tensor): Tensor of bool type, + with shape of (num_points, num_gt), each + value is used to mark whether this point falls + within a certain gt. + + Returns: + tuple[Tensor]: + + - neg_loss (Tensor): The negative loss of all points + in the feature map. + """ + num_gts = len(gt_labels) + joint_conf = (cls_score * objectness) + p_neg_weight = torch.ones_like(joint_conf) + if num_gts > 0: + # the order of dinmension would affect the value of + # p_neg_weight, we strictly follow the original + # implementation. + inside_gt_bbox_mask = inside_gt_bbox_mask.permute(1, 0) + ious = ious.permute(1, 0) + + foreground_idxs = torch.nonzero(inside_gt_bbox_mask, as_tuple=True) + temp_weight = (1 / (1 - ious[foreground_idxs]).clamp_(EPS)) + + def normalize(x): + return (x - x.min() + EPS) / (x.max() - x.min() + EPS) + + for instance_idx in range(num_gts): + idxs = foreground_idxs[0] == instance_idx + if idxs.any(): + temp_weight[idxs] = normalize(temp_weight[idxs]) + + p_neg_weight[foreground_idxs[1], + gt_labels[foreground_idxs[0]]] = 1 - temp_weight + + logits = (joint_conf * p_neg_weight) + neg_loss = ( + logits**2 * F.binary_cross_entropy( + logits, torch.zeros_like(logits), reduction='none')) + neg_loss = neg_loss.sum() * self.neg_loss_weight + return neg_loss, + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses')) + def loss(self, + cls_scores, + bbox_preds, + objectnesses, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute loss of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * 4. + objectnesses (list[Tensor]): objectness for each scale level, each + is a 4D-tensor, the channel number is num_points * 1. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + + assert len(cls_scores) == len(bbox_preds) == len(objectnesses) + all_num_gt = sum([len(item) for item in gt_bboxes]) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + all_level_points = self.prior_generator.grid_priors( + featmap_sizes, + dtype=bbox_preds[0].dtype, + device=bbox_preds[0].device) + inside_gt_bbox_mask_list, bbox_targets_list = self.get_targets( + all_level_points, gt_bboxes) + + center_prior_weight_list = [] + temp_inside_gt_bbox_mask_list = [] + for gt_bboxe, gt_label, inside_gt_bbox_mask in zip( + gt_bboxes, gt_labels, inside_gt_bbox_mask_list): + center_prior_weight, inside_gt_bbox_mask = \ + self.center_prior(all_level_points, gt_bboxe, gt_label, + inside_gt_bbox_mask) + center_prior_weight_list.append(center_prior_weight) + temp_inside_gt_bbox_mask_list.append(inside_gt_bbox_mask) + inside_gt_bbox_mask_list = temp_inside_gt_bbox_mask_list + mlvl_points = torch.cat(all_level_points, dim=0) + bbox_preds = levels_to_images(bbox_preds) + cls_scores = levels_to_images(cls_scores) + objectnesses = levels_to_images(objectnesses) + + reg_loss_list = [] + ious_list = [] + num_points = len(mlvl_points) + + for bbox_pred, encoded_targets, inside_gt_bbox_mask in zip( + bbox_preds, bbox_targets_list, inside_gt_bbox_mask_list): + temp_num_gt = encoded_targets.size(1) + expand_mlvl_points = mlvl_points[:, None, :].expand( + num_points, temp_num_gt, 2).reshape(-1, 2) + encoded_targets = encoded_targets.reshape(-1, 4) + expand_bbox_pred = bbox_pred[:, None, :].expand( + num_points, temp_num_gt, 4).reshape(-1, 4) + decoded_bbox_preds = self.bbox_coder.decode( + expand_mlvl_points, expand_bbox_pred) + decoded_target_preds = self.bbox_coder.decode( + expand_mlvl_points, encoded_targets) + with torch.no_grad(): + ious = bbox_overlaps( + decoded_bbox_preds, decoded_target_preds, is_aligned=True) + ious = ious.reshape(num_points, temp_num_gt) + if temp_num_gt: + ious = ious.max( + dim=-1, keepdim=True).values.repeat(1, temp_num_gt) + else: + ious = ious.new_zeros(num_points, temp_num_gt) + ious[~inside_gt_bbox_mask] = 0 + ious_list.append(ious) + loss_bbox = self.loss_bbox( + decoded_bbox_preds, + decoded_target_preds, + weight=None, + reduction_override='none') + reg_loss_list.append(loss_bbox.reshape(num_points, temp_num_gt)) + + cls_scores = [item.sigmoid() for item in cls_scores] + objectnesses = [item.sigmoid() for item in objectnesses] + pos_loss_list, = multi_apply(self.get_pos_loss_single, cls_scores, + objectnesses, reg_loss_list, gt_labels, + center_prior_weight_list) + pos_avg_factor = reduce_mean( + bbox_pred.new_tensor(all_num_gt)).clamp_(min=1) + pos_loss = sum(pos_loss_list) / pos_avg_factor + + neg_loss_list, = multi_apply(self.get_neg_loss_single, cls_scores, + objectnesses, gt_labels, ious_list, + inside_gt_bbox_mask_list) + neg_avg_factor = sum(item.data.sum() + for item in center_prior_weight_list) + neg_avg_factor = reduce_mean(neg_avg_factor).clamp_(min=1) + neg_loss = sum(neg_loss_list) / neg_avg_factor + + center_loss = [] + for i in range(len(img_metas)): + + if inside_gt_bbox_mask_list[i].any(): + center_loss.append( + len(gt_bboxes[i]) / + center_prior_weight_list[i].sum().clamp_(min=EPS)) + # when width or height of gt_bbox is smaller than stride of p3 + else: + center_loss.append(center_prior_weight_list[i].sum() * 0) + + center_loss = torch.stack(center_loss).mean() * self.center_loss_weight + + # avoid dead lock in DDP + if all_num_gt == 0: + pos_loss = bbox_preds[0].sum() * 0 + dummy_center_prior_loss = self.center_prior.mean.sum( + ) * 0 + self.center_prior.sigma.sum() * 0 + center_loss = objectnesses[0].sum() * 0 + dummy_center_prior_loss + + loss = dict( + loss_pos=pos_loss, loss_neg=neg_loss, loss_center=center_loss) + + return loss + + def get_targets(self, points, gt_bboxes_list): + """Compute regression targets and each point inside or outside gt_bbox + in multiple images. + + Args: + points (list[Tensor]): Points of all fpn level, each has shape + (num_points, 2). + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, + each has shape (num_gt, 4). + + Returns: + tuple(list[Tensor]): + + - inside_gt_bbox_mask_list (list[Tensor]): Each + Tensor is with bool type and shape of + (num_points, num_gt), each value + is used to mark whether this point falls + within a certain gt. + - concat_lvl_bbox_targets (list[Tensor]): BBox + targets of each level. Each tensor has shape + (num_points, num_gt, 4). + """ + + concat_points = torch.cat(points, dim=0) + # the number of points per img, per lvl + inside_gt_bbox_mask_list, bbox_targets_list = multi_apply( + self._get_target_single, gt_bboxes_list, points=concat_points) + return inside_gt_bbox_mask_list, bbox_targets_list + + def _get_target_single(self, gt_bboxes, points): + """Compute regression targets and each point inside or outside gt_bbox + for a single image. + + Args: + gt_bboxes (Tensor): gt_bbox of single image, has shape + (num_gt, 4). + points (Tensor): Points of all fpn level, has shape + (num_points, 2). + + Returns: + tuple[Tensor]: Containing the following Tensors: + + - inside_gt_bbox_mask (Tensor): Bool tensor with shape + (num_points, num_gt), each value is used to mark + whether this point falls within a certain gt. + - bbox_targets (Tensor): BBox targets of each points with + each gt_bboxes, has shape (num_points, num_gt, 4). + """ + num_points = points.size(0) + num_gts = gt_bboxes.size(0) + gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) + xs, ys = points[:, 0], points[:, 1] + xs = xs[:, None] + ys = ys[:, None] + left = xs - gt_bboxes[..., 0] + right = gt_bboxes[..., 2] - xs + top = ys - gt_bboxes[..., 1] + bottom = gt_bboxes[..., 3] - ys + bbox_targets = torch.stack((left, top, right, bottom), -1) + if num_gts: + inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 + else: + inside_gt_bbox_mask = bbox_targets.new_zeros((num_points, num_gts), + dtype=torch.bool) + + return inside_gt_bbox_mask, bbox_targets + + def _get_points_single(self, + featmap_size, + stride, + dtype, + device, + flatten=False): + """Almost the same as the implementation in fcos, we remove half stride + offset to align with the original implementation. + + This function will be deprecated soon. + """ + warnings.warn( + '`_get_points_single` in `AutoAssignHead` will be ' + 'deprecated soon, we support a multi level point generator now' + 'you can get points of a single level feature map ' + 'with `self.prior_generator.single_level_grid_priors` ') + y, x = super(FCOSHead, + self)._get_points_single(featmap_size, stride, dtype, + device) + points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride), + dim=-1) + return points diff --git a/downstream/mmdetection/mmdet/models/dense_heads/base_dense_head.py b/downstream/mmdetection/mmdet/models/dense_heads/base_dense_head.py new file mode 100644 index 0000000..0c7abb7 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/base_dense_head.py @@ -0,0 +1,526 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import torch +from mmcv.cnn.utils.weight_init import constant_init +from mmcv.ops import batched_nms +from mmcv.runner import BaseModule, force_fp32 + +from mmdet.core.utils import filter_scores_and_topk, select_single_mlvl + + +class BaseDenseHead(BaseModule, metaclass=ABCMeta): + """Base class for DenseHeads.""" + + def __init__(self, init_cfg=None): + super(BaseDenseHead, self).__init__(init_cfg) + + def init_weights(self): + super(BaseDenseHead, self).init_weights() + # avoid init_cfg overwrite the initialization of `conv_offset` + for m in self.modules(): + # DeformConv2dPack, ModulatedDeformConv2dPack + if hasattr(m, 'conv_offset'): + constant_init(m.conv_offset, 0) + + @abstractmethod + def loss(self, **kwargs): + """Compute losses of the head.""" + pass + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def get_bboxes(self, + cls_scores, + bbox_preds, + score_factors=None, + img_metas=None, + cfg=None, + rescale=False, + with_nms=True, + **kwargs): + """Transform network outputs of a batch into bbox results. + + Note: When score_factors is not None, the cls_scores are + usually multiplied by it then obtain the real score used in NMS, + such as CenterNess in FCOS, IoU branch in ATSS. + + Args: + cls_scores (list[Tensor]): Classification scores for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * 4, H, W). + score_factors (list[Tensor], Optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, num_priors * 1, H, W). Default None. + img_metas (list[dict], Optional): Image meta info. Default None. + cfg (mmcv.Config, Optional): Test / postprocessing configuration, + if None, test_cfg would be used. Default None. + rescale (bool): If True, return boxes in original image space. + Default False. + with_nms (bool): If True, do nms before return boxes. + Default True. + + Returns: + list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is an (n, 5) tensor, where the first 4 columns + are bounding box positions (tl_x, tl_y, br_x, br_y) and the + 5-th column is a score between 0 and 1. The second item is a + (n,) tensor where each item is the predicted class label of + the corresponding box. + """ + assert len(cls_scores) == len(bbox_preds) + + if score_factors is None: + # e.g. Retina, FreeAnchor, Foveabox, etc. + with_score_factors = False + else: + # e.g. FCOS, PAA, ATSS, AutoAssign, etc. + with_score_factors = True + assert len(cls_scores) == len(score_factors) + + num_levels = len(cls_scores) + + featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] + mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device) + + result_list = [] + + for img_id in range(len(img_metas)): + img_meta = img_metas[img_id] + cls_score_list = select_single_mlvl(cls_scores, img_id) + bbox_pred_list = select_single_mlvl(bbox_preds, img_id) + if with_score_factors: + score_factor_list = select_single_mlvl(score_factors, img_id) + else: + score_factor_list = [None for _ in range(num_levels)] + + results = self._get_bboxes_single(cls_score_list, bbox_pred_list, + score_factor_list, mlvl_priors, + img_meta, cfg, rescale, with_nms, + **kwargs) + result_list.append(results) + return result_list + + def _get_bboxes_single(self, + cls_score_list, + bbox_pred_list, + score_factor_list, + mlvl_priors, + img_meta, + cfg, + rescale=False, + with_nms=True, + **kwargs): + """Transform outputs of a single image into bbox predictions. + + Args: + cls_score_list (list[Tensor]): Box scores from all scale + levels of a single image, each item has shape + (num_priors * num_classes, H, W). + bbox_pred_list (list[Tensor]): Box energies / deltas from + all scale levels of a single image, each item has shape + (num_priors * 4, H, W). + score_factor_list (list[Tensor]): Score factor from all scale + levels of a single image, each item has shape + (num_priors * 1, H, W). + mlvl_priors (list[Tensor]): Each element in the list is + the priors of a single level in feature pyramid. In all + anchor-based methods, it has shape (num_priors, 4). In + all anchor-free methods, it has shape (num_priors, 2) + when `with_stride=True`, otherwise it still has shape + (num_priors, 4). + img_meta (dict): Image meta info. + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + + Returns: + tuple[Tensor]: Results of detected bboxes and labels. If with_nms + is False and mlvl_score_factor is None, return mlvl_bboxes and + mlvl_scores, else return mlvl_bboxes, mlvl_scores and + mlvl_score_factor. Usually with_nms is False is used for aug + test. If with_nms is True, then return the following format + + - det_bboxes (Tensor): Predicted bboxes with shape \ + [num_bboxes, 5], where the first 4 columns are bounding \ + box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ + column are scores between 0 and 1. + - det_labels (Tensor): Predicted labels of the corresponding \ + box with shape [num_bboxes]. + """ + if score_factor_list[0] is None: + # e.g. Retina, FreeAnchor, etc. + with_score_factors = False + else: + # e.g. FCOS, PAA, ATSS, etc. + with_score_factors = True + + cfg = self.test_cfg if cfg is None else cfg + img_shape = img_meta['img_shape'] + nms_pre = cfg.get('nms_pre', -1) + + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_labels = [] + if with_score_factors: + mlvl_score_factors = [] + else: + mlvl_score_factors = None + for level_idx, (cls_score, bbox_pred, score_factor, priors) in \ + enumerate(zip(cls_score_list, bbox_pred_list, + score_factor_list, mlvl_priors)): + + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) + if with_score_factors: + score_factor = score_factor.permute(1, 2, + 0).reshape(-1).sigmoid() + cls_score = cls_score.permute(1, 2, + 0).reshape(-1, self.cls_out_channels) + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + # remind that we set FG labels to [0, num_class-1] + # since mmdet v2.0 + # BG cat_id: num_class + scores = cls_score.softmax(-1)[:, :-1] + + # After https://github.com/open-mmlab/mmdetection/pull/6268/, + # this operation keeps fewer bboxes under the same `nms_pre`. + # There is no difference in performance for most models. If you + # find a slight drop in performance, you can set a larger + # `nms_pre` than before. + results = filter_scores_and_topk( + scores, cfg.score_thr, nms_pre, + dict(bbox_pred=bbox_pred, priors=priors)) + scores, labels, keep_idxs, filtered_results = results + + bbox_pred = filtered_results['bbox_pred'] + priors = filtered_results['priors'] + + if with_score_factors: + score_factor = score_factor[keep_idxs] + + bboxes = self.bbox_coder.decode( + priors, bbox_pred, max_shape=img_shape) + + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_labels.append(labels) + if with_score_factors: + mlvl_score_factors.append(score_factor) + + return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, + img_meta['scale_factor'], cfg, rescale, + with_nms, mlvl_score_factors, **kwargs) + + def _bbox_post_process(self, + mlvl_scores, + mlvl_labels, + mlvl_bboxes, + scale_factor, + cfg, + rescale=False, + with_nms=True, + mlvl_score_factors=None, + **kwargs): + """bbox post-processing method. + + The boxes would be rescaled to the original image scale and do + the nms operation. Usually `with_nms` is False is used for aug test. + + Args: + mlvl_scores (list[Tensor]): Box scores from all scale + levels of a single image, each item has shape + (num_bboxes, ). + mlvl_labels (list[Tensor]): Box class labels from all scale + levels of a single image, each item has shape + (num_bboxes, ). + mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale + levels of a single image, each item has shape (num_bboxes, 4). + scale_factor (ndarray, optional): Scale factor of the image arange + as (w_scale, h_scale, w_scale, h_scale). + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + mlvl_score_factors (list[Tensor], optional): Score factor from + all scale levels of a single image, each item has shape + (num_bboxes, ). Default: None. + + Returns: + tuple[Tensor]: Results of detected bboxes and labels. If with_nms + is False and mlvl_score_factor is None, return mlvl_bboxes and + mlvl_scores, else return mlvl_bboxes, mlvl_scores and + mlvl_score_factor. Usually with_nms is False is used for aug + test. If with_nms is True, then return the following format + + - det_bboxes (Tensor): Predicted bboxes with shape \ + [num_bboxes, 5], where the first 4 columns are bounding \ + box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ + column are scores between 0 and 1. + - det_labels (Tensor): Predicted labels of the corresponding \ + box with shape [num_bboxes]. + """ + assert len(mlvl_scores) == len(mlvl_bboxes) == len(mlvl_labels) + + mlvl_bboxes = torch.cat(mlvl_bboxes) + if rescale: + mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) + mlvl_scores = torch.cat(mlvl_scores) + mlvl_labels = torch.cat(mlvl_labels) + + if mlvl_score_factors is not None: + # TODO: Add sqrt operation in order to be consistent with + # the paper. + mlvl_score_factors = torch.cat(mlvl_score_factors) + mlvl_scores = mlvl_scores * mlvl_score_factors + + if with_nms: + if mlvl_bboxes.numel() == 0: + det_bboxes = torch.cat([mlvl_bboxes, mlvl_scores[:, None]], -1) + return det_bboxes, mlvl_labels + + det_bboxes, keep_idxs = batched_nms(mlvl_bboxes, mlvl_scores, + mlvl_labels, cfg.nms) + det_bboxes = det_bboxes[:cfg.max_per_img] + det_labels = mlvl_labels[keep_idxs][:cfg.max_per_img] + return det_bboxes, det_labels + else: + return mlvl_bboxes, mlvl_scores, mlvl_labels + + def forward_train(self, + x, + img_metas, + gt_bboxes, + gt_labels=None, + gt_bboxes_ignore=None, + proposal_cfg=None, + **kwargs): + """ + Args: + x (list[Tensor]): Features from FPN. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + proposal_cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used + + Returns: + tuple: + losses: (dict[str, Tensor]): A dictionary of loss components. + proposal_list (list[Tensor]): Proposals of each image. + """ + outs = self(x) + if gt_labels is None: + loss_inputs = outs + (gt_bboxes, img_metas) + else: + loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) + losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + if proposal_cfg is None: + return losses + else: + proposal_list = self.get_bboxes( + *outs, img_metas=img_metas, cfg=proposal_cfg) + return losses, proposal_list + + def simple_test(self, feats, img_metas, rescale=False): + """Test function without test-time augmentation. + + Args: + feats (tuple[torch.Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + img_metas (list[dict]): List of image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is ``bboxes`` with shape (n, 5), + where 5 represent (tl_x, tl_y, br_x, br_y, score). + The shape of the second tensor in the tuple is ``labels`` + with shape (n, ). + """ + return self.simple_test_bboxes(feats, img_metas, rescale=rescale) + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def onnx_export(self, + cls_scores, + bbox_preds, + score_factors=None, + img_metas=None, + with_nms=True): + """Transform network output for a batch into bbox predictions. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + with shape (N, num_points * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_points * 4, H, W). + score_factors (list[Tensor]): score_factors for each s + cale level with shape (N, num_points * 1, H, W). + Default: None. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. Default: None. + with_nms (bool): Whether apply nms to the bboxes. Default: True. + + Returns: + tuple[Tensor, Tensor] | list[tuple]: When `with_nms` is True, + it is tuple[Tensor, Tensor], first tensor bboxes with shape + [N, num_det, 5], 5 arrange as (x1, y1, x2, y2, score) + and second element is class labels of shape [N, num_det]. + When `with_nms` is False, first tensor is bboxes with + shape [N, num_det, 4], second tensor is raw score has + shape [N, num_det, num_classes]. + """ + assert len(cls_scores) == len(bbox_preds) + + num_levels = len(cls_scores) + + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=bbox_preds[0].dtype, + device=bbox_preds[0].device) + + mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)] + mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)] + + assert len( + img_metas + ) == 1, 'Only support one input image while in exporting to ONNX' + img_shape = img_metas[0]['img_shape_for_onnx'] + + cfg = self.test_cfg + assert len(cls_scores) == len(bbox_preds) == len(mlvl_priors) + device = cls_scores[0].device + batch_size = cls_scores[0].shape[0] + # convert to tensor to keep tracing + nms_pre_tensor = torch.tensor( + cfg.get('nms_pre', -1), device=device, dtype=torch.long) + + # e.g. Retina, FreeAnchor, etc. + if score_factors is None: + with_score_factors = False + mlvl_score_factor = [None for _ in range(num_levels)] + else: + # e.g. FCOS, PAA, ATSS, etc. + with_score_factors = True + mlvl_score_factor = [ + score_factors[i].detach() for i in range(num_levels) + ] + mlvl_score_factors = [] + + mlvl_batch_bboxes = [] + mlvl_scores = [] + + for cls_score, bbox_pred, score_factors, priors in zip( + mlvl_cls_scores, mlvl_bbox_preds, mlvl_score_factor, + mlvl_priors): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + + scores = cls_score.permute(0, 2, 3, + 1).reshape(batch_size, -1, + self.cls_out_channels) + if self.use_sigmoid_cls: + scores = scores.sigmoid() + nms_pre_score = scores + else: + scores = scores.softmax(-1) + nms_pre_score = scores + + if with_score_factors: + score_factors = score_factors.permute(0, 2, 3, 1).reshape( + batch_size, -1).sigmoid() + bbox_pred = bbox_pred.permute(0, 2, 3, + 1).reshape(batch_size, -1, 4) + priors = priors.expand(batch_size, -1, priors.size(-1)) + # Get top-k predictions + from mmdet.core.export import get_k_for_topk + nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) + if nms_pre > 0: + + if with_score_factors: + nms_pre_score = (nms_pre_score * score_factors[..., None]) + else: + nms_pre_score = nms_pre_score + + # Get maximum scores for foreground classes. + if self.use_sigmoid_cls: + max_scores, _ = nms_pre_score.max(-1) + else: + # remind that we set FG labels to [0, num_class-1] + # since mmdet v2.0 + # BG cat_id: num_class + max_scores, _ = nms_pre_score[..., :-1].max(-1) + _, topk_inds = max_scores.topk(nms_pre) + + batch_inds = torch.arange( + batch_size, device=bbox_pred.device).view( + -1, 1).expand_as(topk_inds).long() + # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 + transformed_inds = bbox_pred.shape[1] * batch_inds + topk_inds + priors = priors.reshape( + -1, priors.size(-1))[transformed_inds, :].reshape( + batch_size, -1, priors.size(-1)) + bbox_pred = bbox_pred.reshape(-1, + 4)[transformed_inds, :].reshape( + batch_size, -1, 4) + scores = scores.reshape( + -1, self.cls_out_channels)[transformed_inds, :].reshape( + batch_size, -1, self.cls_out_channels) + if with_score_factors: + score_factors = score_factors.reshape( + -1, 1)[transformed_inds].reshape(batch_size, -1) + + bboxes = self.bbox_coder.decode( + priors, bbox_pred, max_shape=img_shape) + + mlvl_batch_bboxes.append(bboxes) + mlvl_scores.append(scores) + if with_score_factors: + mlvl_score_factors.append(score_factors) + + batch_bboxes = torch.cat(mlvl_batch_bboxes, dim=1) + batch_scores = torch.cat(mlvl_scores, dim=1) + if with_score_factors: + batch_score_factors = torch.cat(mlvl_score_factors, dim=1) + + # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment + + from mmdet.core.export import add_dummy_nms_for_onnx + + if not self.use_sigmoid_cls: + batch_scores = batch_scores[..., :self.num_classes] + + if with_score_factors: + batch_scores = batch_scores * (batch_score_factors.unsqueeze(2)) + + if with_nms: + max_output_boxes_per_class = cfg.nms.get( + 'max_output_boxes_per_class', 200) + iou_threshold = cfg.nms.get('iou_threshold', 0.5) + score_threshold = cfg.score_thr + nms_pre = cfg.get('deploy_nms_pre', -1) + return add_dummy_nms_for_onnx(batch_bboxes, batch_scores, + max_output_boxes_per_class, + iou_threshold, score_threshold, + nms_pre, cfg.max_per_img) + else: + return batch_bboxes, batch_scores diff --git a/downstream/mmdetection/mmdet/models/dense_heads/base_mask_head.py b/downstream/mmdetection/mmdet/models/dense_heads/base_mask_head.py new file mode 100644 index 0000000..5eb94fb --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/base_mask_head.py @@ -0,0 +1,116 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +from mmcv.runner import BaseModule + + +class BaseMaskHead(BaseModule, metaclass=ABCMeta): + """Base class for mask heads used in One-Stage Instance Segmentation.""" + + def __init__(self, init_cfg): + super(BaseMaskHead, self).__init__(init_cfg) + + @abstractmethod + def loss(self, **kwargs): + pass + + @abstractmethod + def get_results(self, **kwargs): + """Get precessed :obj:`InstanceData` of multiple images.""" + pass + + def forward_train(self, + x, + gt_labels, + gt_masks, + img_metas, + gt_bboxes=None, + gt_bboxes_ignore=None, + positive_infos=None, + **kwargs): + """ + Args: + x (list[Tensor] | tuple[Tensor]): Features from FPN. + Each has a shape (B, C, H, W). + gt_labels (list[Tensor]): Ground truth labels of all images. + each has a shape (num_gts,). + gt_masks (list[Tensor]) : Masks for each bbox, has a shape + (num_gts, h , w). + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes (list[Tensor]): Ground truth bboxes of the image, + each item has a shape (num_gts, 4). + gt_bboxes_ignore (list[Tensor], None): Ground truth bboxes to be + ignored, each item has a shape (num_ignored_gts, 4). + positive_infos (list[:obj:`InstanceData`], optional): Information + of positive samples. Used when the label assignment is + done outside the MaskHead, e.g., in BboxHead in + YOLACT or CondInst, etc. When the label assignment is done in + MaskHead, it would be None, like SOLO. All values + in it should have shape (num_positive_samples, *). + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + if positive_infos is None: + outs = self(x) + else: + outs = self(x, positive_infos) + + assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \ + 'even if only one item is returned' + loss = self.loss( + *outs, + gt_labels=gt_labels, + gt_masks=gt_masks, + img_metas=img_metas, + gt_bboxes=gt_bboxes, + gt_bboxes_ignore=gt_bboxes_ignore, + positive_infos=positive_infos, + **kwargs) + return loss + + def simple_test(self, + feats, + img_metas, + rescale=False, + instances_list=None, + **kwargs): + """Test function without test-time augmentation. + + Args: + feats (tuple[torch.Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + img_metas (list[dict]): List of image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + instances_list (list[obj:`InstanceData`], optional): Detection + results of each image after the post process. Only exist + if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc. + + Returns: + list[obj:`InstanceData`]: Instance segmentation \ + results of each image after the post process. \ + Each item usually contains following keys. \ + + - scores (Tensor): Classification scores, has a shape + (num_instance,) + - labels (Tensor): Has a shape (num_instances,). + - masks (Tensor): Processed mask results, has a + shape (num_instances, h, w). + """ + if instances_list is None: + outs = self(feats) + else: + outs = self(feats, instances_list=instances_list) + mask_inputs = outs + (img_metas, ) + results_list = self.get_results( + *mask_inputs, + rescale=rescale, + instances_list=instances_list, + **kwargs) + return results_list + + def onnx_export(self, img, img_metas): + raise NotImplementedError(f'{self.__class__.__name__} does ' + f'not support ONNX EXPORT') diff --git a/downstream/mmdetection/mmdet/models/dense_heads/cascade_rpn_head.py b/downstream/mmdetection/mmdet/models/dense_heads/cascade_rpn_head.py new file mode 100644 index 0000000..69347e0 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/cascade_rpn_head.py @@ -0,0 +1,801 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from __future__ import division +import copy +import warnings + +import torch +import torch.nn as nn +from mmcv import ConfigDict +from mmcv.ops import DeformConv2d, batched_nms +from mmcv.runner import BaseModule, ModuleList + +from mmdet.core import (RegionAssigner, build_assigner, build_sampler, + images_to_levels, multi_apply) +from mmdet.core.utils import select_single_mlvl +from ..builder import HEADS, build_head +from .base_dense_head import BaseDenseHead +from .rpn_head import RPNHead + + +class AdaptiveConv(BaseModule): + """AdaptiveConv used to adapt the sampling location with the anchors. + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the conv kernel. Default: 3 + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of + the input. Default: 1 + dilation (int or tuple, optional): Spacing between kernel elements. + Default: 3 + groups (int, optional): Number of blocked connections from input + channels to output channels. Default: 1 + bias (bool, optional): If set True, adds a learnable bias to the + output. Default: False. + type (str, optional): Type of adaptive conv, can be either 'offset' + (arbitrary anchors) or 'dilation' (uniform anchor). + Default: 'dilation'. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + dilation=3, + groups=1, + bias=False, + type='dilation', + init_cfg=dict( + type='Normal', std=0.01, override=dict(name='conv'))): + super(AdaptiveConv, self).__init__(init_cfg) + assert type in ['offset', 'dilation'] + self.adapt_type = type + + assert kernel_size == 3, 'Adaptive conv only supports kernels 3' + if self.adapt_type == 'offset': + assert stride == 1 and padding == 1 and groups == 1, \ + 'Adaptive conv offset mode only supports padding: {1}, ' \ + f'stride: {1}, groups: {1}' + self.conv = DeformConv2d( + in_channels, + out_channels, + kernel_size, + padding=padding, + stride=stride, + groups=groups, + bias=bias) + else: + self.conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size, + padding=dilation, + dilation=dilation) + + def forward(self, x, offset): + """Forward function.""" + if self.adapt_type == 'offset': + N, _, H, W = x.shape + assert offset is not None + assert H * W == offset.shape[1] + # reshape [N, NA, 18] to (N, 18, H, W) + offset = offset.permute(0, 2, 1).reshape(N, -1, H, W) + offset = offset.contiguous() + x = self.conv(x, offset) + else: + assert offset is None + x = self.conv(x) + return x + + +@HEADS.register_module() +class StageCascadeRPNHead(RPNHead): + """Stage of CascadeRPNHead. + + Args: + in_channels (int): Number of channels in the input feature map. + anchor_generator (dict): anchor generator config. + adapt_cfg (dict): adaptation config. + bridged_feature (bool, optional): whether update rpn feature. + Default: False. + with_cls (bool, optional): whether use classification branch. + Default: True. + sampling (bool, optional): whether use sampling. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[1.0], + strides=[4, 8, 16, 32, 64]), + adapt_cfg=dict(type='dilation', dilation=3), + bridged_feature=False, + with_cls=True, + sampling=True, + init_cfg=None, + **kwargs): + self.with_cls = with_cls + self.anchor_strides = anchor_generator['strides'] + self.anchor_scales = anchor_generator['scales'] + self.bridged_feature = bridged_feature + self.adapt_cfg = adapt_cfg + super(StageCascadeRPNHead, self).__init__( + in_channels, + anchor_generator=anchor_generator, + init_cfg=init_cfg, + **kwargs) + + # override sampling and sampler + self.sampling = sampling + if self.train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + # use PseudoSampler when sampling is False + if self.sampling and hasattr(self.train_cfg, 'sampler'): + sampler_cfg = self.train_cfg.sampler + else: + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + + if init_cfg is None: + self.init_cfg = dict( + type='Normal', std=0.01, override=[dict(name='rpn_reg')]) + if self.with_cls: + self.init_cfg['override'].append(dict(name='rpn_cls')) + + def _init_layers(self): + """Init layers of a CascadeRPN stage.""" + self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels, + **self.adapt_cfg) + if self.with_cls: + self.rpn_cls = nn.Conv2d(self.feat_channels, + self.num_anchors * self.cls_out_channels, + 1) + self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) + self.relu = nn.ReLU(inplace=True) + + def forward_single(self, x, offset): + """Forward function of single scale.""" + bridged_x = x + x = self.relu(self.rpn_conv(x, offset)) + if self.bridged_feature: + bridged_x = x # update feature + cls_score = self.rpn_cls(x) if self.with_cls else None + bbox_pred = self.rpn_reg(x) + return bridged_x, cls_score, bbox_pred + + def forward(self, feats, offset_list=None): + """Forward function.""" + if offset_list is None: + offset_list = [None for _ in range(len(feats))] + return multi_apply(self.forward_single, feats, offset_list) + + def _region_targets_single(self, + anchors, + valid_flags, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + featmap_sizes, + label_channels=1): + """Get anchor targets based on region for single level.""" + assign_result = self.assigner.assign( + anchors, + valid_flags, + gt_bboxes, + img_meta, + featmap_sizes, + self.anchor_scales[0], + self.anchor_strides, + gt_bboxes_ignore=gt_bboxes_ignore, + gt_labels=None, + allowed_border=self.train_cfg.allowed_border) + flat_anchors = torch.cat(anchors) + sampling_result = self.sampler.sample(assign_result, flat_anchors, + gt_bboxes) + + num_anchors = flat_anchors.shape[0] + bbox_targets = torch.zeros_like(flat_anchors) + bbox_weights = torch.zeros_like(flat_anchors) + labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long) + label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + if not self.reg_decoded_bbox: + pos_bbox_targets = self.bbox_coder.encode( + sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) + else: + pos_bbox_targets = sampling_result.pos_gt_bboxes + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + if gt_labels is None: + labels[pos_inds] = 1 + else: + labels[pos_inds] = gt_labels[ + sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, + neg_inds) + + def region_targets(self, + anchor_list, + valid_flag_list, + gt_bboxes_list, + img_metas, + featmap_sizes, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + unmap_outputs=True): + """See :func:`StageCascadeRPNHead.get_targets`.""" + num_imgs = len(img_metas) + assert len(anchor_list) == len(valid_flag_list) == num_imgs + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + if gt_labels_list is None: + gt_labels_list = [None for _ in range(num_imgs)] + (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, + pos_inds_list, neg_inds_list) = multi_apply( + self._region_targets_single, + anchor_list, + valid_flag_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + img_metas, + featmap_sizes=featmap_sizes, + label_channels=label_channels) + # no valid anchors + if any([labels is None for labels in all_labels]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + labels_list = images_to_levels(all_labels, num_level_anchors) + label_weights_list = images_to_levels(all_label_weights, + num_level_anchors) + bbox_targets_list = images_to_levels(all_bbox_targets, + num_level_anchors) + bbox_weights_list = images_to_levels(all_bbox_weights, + num_level_anchors) + return (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) + + def get_targets(self, + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + featmap_sizes, + gt_bboxes_ignore=None, + label_channels=1): + """Compute regression and classification targets for anchors. + + Args: + anchor_list (list[list]): Multi level anchors of each image. + valid_flag_list (list[list]): Multi level valid flags of each + image. + gt_bboxes (list[Tensor]): Ground truth bboxes of each image. + img_metas (list[dict]): Meta info of each image. + featmap_sizes (list[Tensor]): Feature mapsize each level + gt_bboxes_ignore (list[Tensor]): Ignore bboxes of each images + label_channels (int): Channel of label. + + Returns: + cls_reg_targets (tuple) + """ + if isinstance(self.assigner, RegionAssigner): + cls_reg_targets = self.region_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + featmap_sizes, + gt_bboxes_ignore_list=gt_bboxes_ignore, + label_channels=label_channels) + else: + cls_reg_targets = super(StageCascadeRPNHead, self).get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + label_channels=label_channels) + return cls_reg_targets + + def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes): + """ Get offset for deformable conv based on anchor shape + NOTE: currently support deformable kernel_size=3 and dilation=1 + + Args: + anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of + multi-level anchors + anchor_strides (list[int]): anchor stride of each level + + Returns: + offset_list (list[tensor]): [NLVL, NA, 2, 18]: offset of DeformConv + kernel. + """ + + def _shape_offset(anchors, stride, ks=3, dilation=1): + # currently support kernel_size=3 and dilation=1 + assert ks == 3 and dilation == 1 + pad = (ks - 1) // 2 + idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device) + yy, xx = torch.meshgrid(idx, idx) # return order matters + xx = xx.reshape(-1) + yy = yy.reshape(-1) + w = (anchors[:, 2] - anchors[:, 0]) / stride + h = (anchors[:, 3] - anchors[:, 1]) / stride + w = w / (ks - 1) - dilation + h = h / (ks - 1) - dilation + offset_x = w[:, None] * xx # (NA, ks**2) + offset_y = h[:, None] * yy # (NA, ks**2) + return offset_x, offset_y + + def _ctr_offset(anchors, stride, featmap_size): + feat_h, feat_w = featmap_size + assert len(anchors) == feat_h * feat_w + + x = (anchors[:, 0] + anchors[:, 2]) * 0.5 + y = (anchors[:, 1] + anchors[:, 3]) * 0.5 + # compute centers on feature map + x = x / stride + y = y / stride + # compute predefine centers + xx = torch.arange(0, feat_w, device=anchors.device) + yy = torch.arange(0, feat_h, device=anchors.device) + yy, xx = torch.meshgrid(yy, xx) + xx = xx.reshape(-1).type_as(x) + yy = yy.reshape(-1).type_as(y) + + offset_x = x - xx # (NA, ) + offset_y = y - yy # (NA, ) + return offset_x, offset_y + + num_imgs = len(anchor_list) + num_lvls = len(anchor_list[0]) + dtype = anchor_list[0][0].dtype + device = anchor_list[0][0].device + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + + offset_list = [] + for i in range(num_imgs): + mlvl_offset = [] + for lvl in range(num_lvls): + c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl], + anchor_strides[lvl], + featmap_sizes[lvl]) + s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl], + anchor_strides[lvl]) + + # offset = ctr_offset + shape_offset + offset_x = s_offset_x + c_offset_x[:, None] + offset_y = s_offset_y + c_offset_y[:, None] + + # offset order (y0, x0, y1, x2, .., y8, x8, y9, x9) + offset = torch.stack([offset_y, offset_x], dim=-1) + offset = offset.reshape(offset.size(0), -1) # [NA, 2*ks**2] + mlvl_offset.append(offset) + offset_list.append(torch.cat(mlvl_offset)) # [totalNA, 2*ks**2] + offset_list = images_to_levels(offset_list, num_level_anchors) + return offset_list + + def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights, + bbox_targets, bbox_weights, num_total_samples): + """Loss function on single scale.""" + # classification loss + if self.with_cls: + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + cls_score = cls_score.permute(0, 2, 3, + 1).reshape(-1, self.cls_out_channels) + loss_cls = self.loss_cls( + cls_score, labels, label_weights, avg_factor=num_total_samples) + # regression loss + bbox_targets = bbox_targets.reshape(-1, 4) + bbox_weights = bbox_weights.reshape(-1, 4) + bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) + if self.reg_decoded_bbox: + # When the regression loss (e.g. `IouLoss`, `GIouLoss`) + # is applied directly on the decoded bounding boxes, it + # decodes the already encoded coordinates to absolute format. + anchors = anchors.reshape(-1, 4) + bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) + loss_reg = self.loss_bbox( + bbox_pred, + bbox_targets, + bbox_weights, + avg_factor=num_total_samples) + if self.with_cls: + return loss_cls, loss_reg + return None, loss_reg + + def loss(self, + anchor_list, + valid_flag_list, + cls_scores, + bbox_preds, + gt_bboxes, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + anchor_list (list[list]): Multi level anchors of each image. + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. Default: None + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds] + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + featmap_sizes, + gt_bboxes_ignore=gt_bboxes_ignore, + label_channels=label_channels) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + if self.sampling: + num_total_samples = num_total_pos + num_total_neg + else: + # 200 is hard-coded average factor, + # which follows guided anchoring. + num_total_samples = sum([label.numel() + for label in labels_list]) / 200.0 + + # change per image, per level anchor_list to per_level, per_image + mlvl_anchor_list = list(zip(*anchor_list)) + # concat mlvl_anchor_list + mlvl_anchor_list = [ + torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list + ] + + losses = multi_apply( + self.loss_single, + cls_scores, + bbox_preds, + mlvl_anchor_list, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + num_total_samples=num_total_samples) + if self.with_cls: + return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1]) + return dict(loss_rpn_reg=losses[1]) + + def get_bboxes(self, + anchor_list, + cls_scores, + bbox_preds, + img_metas, + cfg, + rescale=False): + """Get proposal predict. + + Args: + anchor_list (list[list]): Multi level anchors of each image. + cls_scores (list[Tensor]): Classification scores for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * 4, H, W). + img_metas (list[dict], Optional): Image meta info. Default None. + cfg (mmcv.Config, Optional): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Default: False. + + Returns: + Tensor: Labeled boxes in shape (n, 5), where the first 4 columns + are bounding box positions (tl_x, tl_y, br_x, br_y) and the + 5-th column is a score between 0 and 1. + """ + assert len(cls_scores) == len(bbox_preds) + + result_list = [] + for img_id in range(len(img_metas)): + cls_score_list = select_single_mlvl(cls_scores, img_id) + bbox_pred_list = select_single_mlvl(bbox_preds, img_id) + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, + anchor_list[img_id], img_shape, + scale_factor, cfg, rescale) + result_list.append(proposals) + return result_list + + def _get_bboxes_single(self, + cls_scores, + bbox_preds, + mlvl_anchors, + img_shape, + scale_factor, + cfg, + rescale=False): + """Transform outputs of a single image into bbox predictions. + + Args: + cls_scores (list[Tensor]): Box scores from all scale + levels of a single image, each item has shape + (num_anchors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas from + all scale levels of a single image, each item has + shape (num_anchors * 4, H, W). + mlvl_anchors (list[Tensor]): Box reference from all scale + levels of a single image, each item has shape + (num_total_anchors, 4). + img_shape (tuple[int]): Shape of the input image, + (height, width, 3). + scale_factor (ndarray): Scale factor of the image arange as + (w_scale, h_scale, w_scale, h_scale). + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Default False. + + Returns: + Tensor: Labeled boxes in shape (n, 5), where the first 4 columns + are bounding box positions (tl_x, tl_y, br_x, br_y) and the + 5-th column is a score between 0 and 1. + """ + cfg = self.test_cfg if cfg is None else cfg + cfg = copy.deepcopy(cfg) + # bboxes from different level should be independent during NMS, + # level_ids are used as labels for batched NMS to separate them + level_ids = [] + mlvl_scores = [] + mlvl_bbox_preds = [] + mlvl_valid_anchors = [] + nms_pre = cfg.get('nms_pre', -1) + for idx in range(len(cls_scores)): + rpn_cls_score = cls_scores[idx] + rpn_bbox_pred = bbox_preds[idx] + assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] + rpn_cls_score = rpn_cls_score.permute(1, 2, 0) + if self.use_sigmoid_cls: + rpn_cls_score = rpn_cls_score.reshape(-1) + scores = rpn_cls_score.sigmoid() + else: + rpn_cls_score = rpn_cls_score.reshape(-1, 2) + # We set FG labels to [0, num_class-1] and BG label to + # num_class in RPN head since mmdet v2.5, which is unified to + # be consistent with other head since mmdet v2.0. In mmdet v2.0 + # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. + scores = rpn_cls_score.softmax(dim=1)[:, 0] + rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) + anchors = mlvl_anchors[idx] + + if 0 < nms_pre < scores.shape[0]: + # sort is faster than topk + # _, topk_inds = scores.topk(cfg.nms_pre) + ranked_scores, rank_inds = scores.sort(descending=True) + topk_inds = rank_inds[:nms_pre] + scores = ranked_scores[:nms_pre] + rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] + anchors = anchors[topk_inds, :] + mlvl_scores.append(scores) + mlvl_bbox_preds.append(rpn_bbox_pred) + mlvl_valid_anchors.append(anchors) + level_ids.append( + scores.new_full((scores.size(0), ), idx, dtype=torch.long)) + + scores = torch.cat(mlvl_scores) + anchors = torch.cat(mlvl_valid_anchors) + rpn_bbox_pred = torch.cat(mlvl_bbox_preds) + proposals = self.bbox_coder.decode( + anchors, rpn_bbox_pred, max_shape=img_shape) + ids = torch.cat(level_ids) + + if cfg.min_bbox_size >= 0: + w = proposals[:, 2] - proposals[:, 0] + h = proposals[:, 3] - proposals[:, 1] + valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) + if not valid_mask.all(): + proposals = proposals[valid_mask] + scores = scores[valid_mask] + ids = ids[valid_mask] + + # deprecate arguments warning + if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: + warnings.warn( + 'In rpn_proposal or test_cfg, ' + 'nms_thr has been moved to a dict named nms as ' + 'iou_threshold, max_num has been renamed as max_per_img, ' + 'name of original arguments and the way to specify ' + 'iou_threshold of NMS will be deprecated.') + if 'nms' not in cfg: + cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) + if 'max_num' in cfg: + if 'max_per_img' in cfg: + assert cfg.max_num == cfg.max_per_img, f'You ' \ + f'set max_num and ' \ + f'max_per_img at the same time, but get {cfg.max_num} ' \ + f'and {cfg.max_per_img} respectively' \ + 'Please delete max_num which will be deprecated.' + else: + cfg.max_per_img = cfg.max_num + if 'nms_thr' in cfg: + assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \ + f' iou_threshold in nms and ' \ + f'nms_thr at the same time, but get' \ + f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \ + f' respectively. Please delete the nms_thr ' \ + f'which will be deprecated.' + + if proposals.numel() > 0: + dets, _ = batched_nms(proposals, scores, ids, cfg.nms) + else: + return proposals.new_zeros(0, 5) + + return dets[:cfg.max_per_img] + + def refine_bboxes(self, anchor_list, bbox_preds, img_metas): + """Refine bboxes through stages.""" + num_levels = len(bbox_preds) + new_anchor_list = [] + for img_id in range(len(img_metas)): + mlvl_anchors = [] + for i in range(num_levels): + bbox_pred = bbox_preds[i][img_id].detach() + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) + img_shape = img_metas[img_id]['img_shape'] + bboxes = self.bbox_coder.decode(anchor_list[img_id][i], + bbox_pred, img_shape) + mlvl_anchors.append(bboxes) + new_anchor_list.append(mlvl_anchors) + return new_anchor_list + + +@HEADS.register_module() +class CascadeRPNHead(BaseDenseHead): + """The CascadeRPNHead will predict more accurate region proposals, which is + required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN + consists of a sequence of RPNStage to progressively improve the accuracy of + the detected proposals. + + More details can be found in ``https://arxiv.org/abs/1909.06720``. + + Args: + num_stages (int): number of CascadeRPN stages. + stages (list[dict]): list of configs to build the stages. + train_cfg (list[dict]): list of configs at training time each stage. + test_cfg (dict): config at testing time. + """ + + def __init__(self, num_stages, stages, train_cfg, test_cfg, init_cfg=None): + super(CascadeRPNHead, self).__init__(init_cfg) + assert num_stages == len(stages) + self.num_stages = num_stages + # Be careful! Pretrained weights cannot be loaded when use + # nn.ModuleList + self.stages = ModuleList() + for i in range(len(stages)): + train_cfg_i = train_cfg[i] if train_cfg is not None else None + stages[i].update(train_cfg=train_cfg_i) + stages[i].update(test_cfg=test_cfg) + self.stages.append(build_head(stages[i])) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + def loss(self): + """loss() is implemented in StageCascadeRPNHead.""" + pass + + def get_bboxes(self): + """get_bboxes() is implemented in StageCascadeRPNHead.""" + pass + + def forward_train(self, + x, + img_metas, + gt_bboxes, + gt_labels=None, + gt_bboxes_ignore=None, + proposal_cfg=None): + """Forward train function.""" + assert gt_labels is None, 'RPN does not require gt_labels' + + featmap_sizes = [featmap.size()[-2:] for featmap in x] + device = x[0].device + anchor_list, valid_flag_list = self.stages[0].get_anchors( + featmap_sizes, img_metas, device=device) + + losses = dict() + + for i in range(self.num_stages): + stage = self.stages[i] + + if stage.adapt_cfg['type'] == 'offset': + offset_list = stage.anchor_offset(anchor_list, + stage.anchor_strides, + featmap_sizes) + else: + offset_list = None + x, cls_score, bbox_pred = stage(x, offset_list) + rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score, + bbox_pred, gt_bboxes, img_metas) + stage_loss = stage.loss(*rpn_loss_inputs) + for name, value in stage_loss.items(): + losses['s{}.{}'.format(i, name)] = value + + # refine boxes + if i < self.num_stages - 1: + anchor_list = stage.refine_bboxes(anchor_list, bbox_pred, + img_metas) + if proposal_cfg is None: + return losses + else: + proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score, + bbox_pred, img_metas, + self.test_cfg) + return losses, proposal_list + + def simple_test_rpn(self, x, img_metas): + """Simple forward test function.""" + featmap_sizes = [featmap.size()[-2:] for featmap in x] + device = x[0].device + anchor_list, _ = self.stages[0].get_anchors( + featmap_sizes, img_metas, device=device) + + for i in range(self.num_stages): + stage = self.stages[i] + if stage.adapt_cfg['type'] == 'offset': + offset_list = stage.anchor_offset(anchor_list, + stage.anchor_strides, + featmap_sizes) + else: + offset_list = None + x, cls_score, bbox_pred = stage(x, offset_list) + if i < self.num_stages - 1: + anchor_list = stage.refine_bboxes(anchor_list, bbox_pred, + img_metas) + + proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score, + bbox_pred, img_metas, + self.test_cfg) + return proposal_list + + def aug_test_rpn(self, x, img_metas): + """Augmented forward test function.""" + raise NotImplementedError( + 'CascadeRPNHead does not support test-time augmentation') diff --git a/downstream/mmdetection/mmdet/models/dense_heads/centernet_head.py b/downstream/mmdetection/mmdet/models/dense_heads/centernet_head.py new file mode 100644 index 0000000..b9d5d2f --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/centernet_head.py @@ -0,0 +1,412 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import bias_init_with_prob, normal_init +from mmcv.ops import batched_nms +from mmcv.runner import force_fp32 + +from mmdet.core import multi_apply +from mmdet.models import HEADS, build_loss +from mmdet.models.utils import gaussian_radius, gen_gaussian_target +from ..utils.gaussian_target import (get_local_maximum, get_topk_from_heatmap, + transpose_and_gather_feat) +from .base_dense_head import BaseDenseHead +from .dense_test_mixins import BBoxTestMixin + + +@HEADS.register_module() +class CenterNetHead(BaseDenseHead, BBoxTestMixin): + """Objects as Points Head. CenterHead use center_point to indicate object's + position. Paper link + + Args: + in_channel (int): Number of channel in the input feature map. + feat_channel (int): Number of channel in the intermediate feature map. + num_classes (int): Number of categories excluding the background + category. + loss_center_heatmap (dict | None): Config of center heatmap loss. + Default: GaussianFocalLoss. + loss_wh (dict | None): Config of wh loss. Default: L1Loss. + loss_offset (dict | None): Config of offset loss. Default: L1Loss. + train_cfg (dict | None): Training config. Useless in CenterNet, + but we keep this variable for SingleStageDetector. Default: None. + test_cfg (dict | None): Testing config of CenterNet. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channel, + feat_channel, + num_classes, + loss_center_heatmap=dict( + type='GaussianFocalLoss', loss_weight=1.0), + loss_wh=dict(type='L1Loss', loss_weight=0.1), + loss_offset=dict(type='L1Loss', loss_weight=1.0), + train_cfg=None, + test_cfg=None, + init_cfg=None): + super(CenterNetHead, self).__init__(init_cfg) + self.num_classes = num_classes + self.heatmap_head = self._build_head(in_channel, feat_channel, + num_classes) + self.wh_head = self._build_head(in_channel, feat_channel, 2) + self.offset_head = self._build_head(in_channel, feat_channel, 2) + + self.loss_center_heatmap = build_loss(loss_center_heatmap) + self.loss_wh = build_loss(loss_wh) + self.loss_offset = build_loss(loss_offset) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.fp16_enabled = False + + def _build_head(self, in_channel, feat_channel, out_channel): + """Build head for each branch.""" + layer = nn.Sequential( + nn.Conv2d(in_channel, feat_channel, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(feat_channel, out_channel, kernel_size=1)) + return layer + + def init_weights(self): + """Initialize weights of the head.""" + bias_init = bias_init_with_prob(0.1) + self.heatmap_head[-1].bias.data.fill_(bias_init) + for head in [self.wh_head, self.offset_head]: + for m in head.modules(): + if isinstance(m, nn.Conv2d): + normal_init(m, std=0.001) + + def forward(self, feats): + """Forward features. Notice CenterNet head does not use FPN. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + center_heatmap_preds (List[Tensor]): center predict heatmaps for + all levels, the channels number is num_classes. + wh_preds (List[Tensor]): wh predicts for all levels, the channels + number is 2. + offset_preds (List[Tensor]): offset predicts for all levels, the + channels number is 2. + """ + return multi_apply(self.forward_single, feats) + + def forward_single(self, feat): + """Forward feature of a single level. + + Args: + feat (Tensor): Feature of a single level. + + Returns: + center_heatmap_pred (Tensor): center predict heatmaps, the + channels number is num_classes. + wh_pred (Tensor): wh predicts, the channels number is 2. + offset_pred (Tensor): offset predicts, the channels number is 2. + """ + center_heatmap_pred = self.heatmap_head(feat).sigmoid() + wh_pred = self.wh_head(feat) + offset_pred = self.offset_head(feat) + return center_heatmap_pred, wh_pred, offset_pred + + @force_fp32(apply_to=('center_heatmap_preds', 'wh_preds', 'offset_preds')) + def loss(self, + center_heatmap_preds, + wh_preds, + offset_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + center_heatmap_preds (list[Tensor]): center predict heatmaps for + all levels with shape (B, num_classes, H, W). + wh_preds (list[Tensor]): wh predicts for all levels with + shape (B, 2, H, W). + offset_preds (list[Tensor]): offset predicts for all levels + with shape (B, 2, H, W). + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. Default: None + + Returns: + dict[str, Tensor]: which has components below: + - loss_center_heatmap (Tensor): loss of center heatmap. + - loss_wh (Tensor): loss of hw heatmap + - loss_offset (Tensor): loss of offset heatmap. + """ + assert len(center_heatmap_preds) == len(wh_preds) == len( + offset_preds) == 1 + center_heatmap_pred = center_heatmap_preds[0] + wh_pred = wh_preds[0] + offset_pred = offset_preds[0] + + target_result, avg_factor = self.get_targets(gt_bboxes, gt_labels, + center_heatmap_pred.shape, + img_metas[0]['pad_shape']) + + center_heatmap_target = target_result['center_heatmap_target'] + wh_target = target_result['wh_target'] + offset_target = target_result['offset_target'] + wh_offset_target_weight = target_result['wh_offset_target_weight'] + + # Since the channel of wh_target and offset_target is 2, the avg_factor + # of loss_center_heatmap is always 1/2 of loss_wh and loss_offset. + loss_center_heatmap = self.loss_center_heatmap( + center_heatmap_pred, center_heatmap_target, avg_factor=avg_factor) + loss_wh = self.loss_wh( + wh_pred, + wh_target, + wh_offset_target_weight, + avg_factor=avg_factor * 2) + loss_offset = self.loss_offset( + offset_pred, + offset_target, + wh_offset_target_weight, + avg_factor=avg_factor * 2) + return dict( + loss_center_heatmap=loss_center_heatmap, + loss_wh=loss_wh, + loss_offset=loss_offset) + + def get_targets(self, gt_bboxes, gt_labels, feat_shape, img_shape): + """Compute regression and classification targets in multiple images. + + Args: + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box. + feat_shape (list[int]): feature map shape with value [B, _, H, W] + img_shape (list[int]): image shape in [h, w] format. + + Returns: + tuple[dict,float]: The float value is mean avg_factor, the dict has + components below: + - center_heatmap_target (Tensor): targets of center heatmap, \ + shape (B, num_classes, H, W). + - wh_target (Tensor): targets of wh predict, shape \ + (B, 2, H, W). + - offset_target (Tensor): targets of offset predict, shape \ + (B, 2, H, W). + - wh_offset_target_weight (Tensor): weights of wh and offset \ + predict, shape (B, 2, H, W). + """ + img_h, img_w = img_shape[:2] + bs, _, feat_h, feat_w = feat_shape + + width_ratio = float(feat_w / img_w) + height_ratio = float(feat_h / img_h) + + center_heatmap_target = gt_bboxes[-1].new_zeros( + [bs, self.num_classes, feat_h, feat_w]) + wh_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w]) + offset_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w]) + wh_offset_target_weight = gt_bboxes[-1].new_zeros( + [bs, 2, feat_h, feat_w]) + + for batch_id in range(bs): + gt_bbox = gt_bboxes[batch_id] + gt_label = gt_labels[batch_id] + center_x = (gt_bbox[:, [0]] + gt_bbox[:, [2]]) * width_ratio / 2 + center_y = (gt_bbox[:, [1]] + gt_bbox[:, [3]]) * height_ratio / 2 + gt_centers = torch.cat((center_x, center_y), dim=1) + + for j, ct in enumerate(gt_centers): + ctx_int, cty_int = ct.int() + ctx, cty = ct + scale_box_h = (gt_bbox[j][3] - gt_bbox[j][1]) * height_ratio + scale_box_w = (gt_bbox[j][2] - gt_bbox[j][0]) * width_ratio + radius = gaussian_radius([scale_box_h, scale_box_w], + min_overlap=0.3) + radius = max(0, int(radius)) + ind = gt_label[j] + gen_gaussian_target(center_heatmap_target[batch_id, ind], + [ctx_int, cty_int], radius) + + wh_target[batch_id, 0, cty_int, ctx_int] = scale_box_w + wh_target[batch_id, 1, cty_int, ctx_int] = scale_box_h + + offset_target[batch_id, 0, cty_int, ctx_int] = ctx - ctx_int + offset_target[batch_id, 1, cty_int, ctx_int] = cty - cty_int + + wh_offset_target_weight[batch_id, :, cty_int, ctx_int] = 1 + + avg_factor = max(1, center_heatmap_target.eq(1).sum()) + target_result = dict( + center_heatmap_target=center_heatmap_target, + wh_target=wh_target, + offset_target=offset_target, + wh_offset_target_weight=wh_offset_target_weight) + return target_result, avg_factor + + @force_fp32(apply_to=('center_heatmap_preds', 'wh_preds', 'offset_preds')) + def get_bboxes(self, + center_heatmap_preds, + wh_preds, + offset_preds, + img_metas, + rescale=True, + with_nms=False): + """Transform network output for a batch into bbox predictions. + + Args: + center_heatmap_preds (list[Tensor]): Center predict heatmaps for + all levels with shape (B, num_classes, H, W). + wh_preds (list[Tensor]): WH predicts for all levels with + shape (B, 2, H, W). + offset_preds (list[Tensor]): Offset predicts for all levels + with shape (B, 2, H, W). + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + rescale (bool): If True, return boxes in original image space. + Default: True. + with_nms (bool): If True, do nms before return boxes. + Default: False. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is an (n, 5) tensor, where 5 represent + (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. + The shape of the second tensor in the tuple is (n,), and + each element represents the class label of the corresponding + box. + """ + assert len(center_heatmap_preds) == len(wh_preds) == len( + offset_preds) == 1 + result_list = [] + for img_id in range(len(img_metas)): + result_list.append( + self._get_bboxes_single( + center_heatmap_preds[0][img_id:img_id + 1, ...], + wh_preds[0][img_id:img_id + 1, ...], + offset_preds[0][img_id:img_id + 1, ...], + img_metas[img_id], + rescale=rescale, + with_nms=with_nms)) + return result_list + + def _get_bboxes_single(self, + center_heatmap_pred, + wh_pred, + offset_pred, + img_meta, + rescale=False, + with_nms=True): + """Transform outputs of a single image into bbox results. + + Args: + center_heatmap_pred (Tensor): Center heatmap for current level with + shape (1, num_classes, H, W). + wh_pred (Tensor): WH heatmap for current level with shape + (1, num_classes, H, W). + offset_pred (Tensor): Offset for current level with shape + (1, corner_offset_channels, H, W). + img_meta (dict): Meta information of current image, e.g., + image size, scaling factor, etc. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + + Returns: + tuple[Tensor, Tensor]: The first item is an (n, 5) tensor, where + 5 represent (tl_x, tl_y, br_x, br_y, score) and the score + between 0 and 1. The shape of the second tensor in the tuple + is (n,), and each element represents the class label of the + corresponding box. + """ + batch_det_bboxes, batch_labels = self.decode_heatmap( + center_heatmap_pred, + wh_pred, + offset_pred, + img_meta['batch_input_shape'], + k=self.test_cfg.topk, + kernel=self.test_cfg.local_maximum_kernel) + + det_bboxes = batch_det_bboxes.view([-1, 5]) + det_labels = batch_labels.view(-1) + + batch_border = det_bboxes.new_tensor(img_meta['border'])[..., + [2, 0, 2, 0]] + det_bboxes[..., :4] -= batch_border + + if rescale: + det_bboxes[..., :4] /= det_bboxes.new_tensor( + img_meta['scale_factor']) + + if with_nms: + det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels, + self.test_cfg) + return det_bboxes, det_labels + + def decode_heatmap(self, + center_heatmap_pred, + wh_pred, + offset_pred, + img_shape, + k=100, + kernel=3): + """Transform outputs into detections raw bbox prediction. + + Args: + center_heatmap_pred (Tensor): center predict heatmap, + shape (B, num_classes, H, W). + wh_pred (Tensor): wh predict, shape (B, 2, H, W). + offset_pred (Tensor): offset predict, shape (B, 2, H, W). + img_shape (list[int]): image shape in [h, w] format. + k (int): Get top k center keypoints from heatmap. Default 100. + kernel (int): Max pooling kernel for extract local maximum pixels. + Default 3. + + Returns: + tuple[torch.Tensor]: Decoded output of CenterNetHead, containing + the following Tensors: + + - batch_bboxes (Tensor): Coords of each box with shape (B, k, 5) + - batch_topk_labels (Tensor): Categories of each box with \ + shape (B, k) + """ + height, width = center_heatmap_pred.shape[2:] + inp_h, inp_w = img_shape + + center_heatmap_pred = get_local_maximum( + center_heatmap_pred, kernel=kernel) + + *batch_dets, topk_ys, topk_xs = get_topk_from_heatmap( + center_heatmap_pred, k=k) + batch_scores, batch_index, batch_topk_labels = batch_dets + + wh = transpose_and_gather_feat(wh_pred, batch_index) + offset = transpose_and_gather_feat(offset_pred, batch_index) + topk_xs = topk_xs + offset[..., 0] + topk_ys = topk_ys + offset[..., 1] + tl_x = (topk_xs - wh[..., 0] / 2) * (inp_w / width) + tl_y = (topk_ys - wh[..., 1] / 2) * (inp_h / height) + br_x = (topk_xs + wh[..., 0] / 2) * (inp_w / width) + br_y = (topk_ys + wh[..., 1] / 2) * (inp_h / height) + + batch_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=2) + batch_bboxes = torch.cat((batch_bboxes, batch_scores[..., None]), + dim=-1) + return batch_bboxes, batch_topk_labels + + def _bboxes_nms(self, bboxes, labels, cfg): + if labels.numel() > 0: + max_num = cfg.max_per_img + bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, + -1].contiguous(), + labels, cfg.nms) + if max_num > 0: + bboxes = bboxes[:max_num] + labels = labels[keep][:max_num] + + return bboxes, labels diff --git a/downstream/mmdetection/mmdet/models/dense_heads/centripetal_head.py b/downstream/mmdetection/mmdet/models/dense_heads/centripetal_head.py new file mode 100644 index 0000000..ebc721b --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/centripetal_head.py @@ -0,0 +1,430 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule, normal_init +from mmcv.ops import DeformConv2d +from mmcv.runner import force_fp32 + +from mmdet.core import multi_apply +from ..builder import HEADS, build_loss +from .corner_head import CornerHead + + +@HEADS.register_module() +class CentripetalHead(CornerHead): + """Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object + Detection. + + CentripetalHead inherits from :class:`CornerHead`. It removes the + embedding branch and adds guiding shift and centripetal shift branches. + More details can be found in the `paper + `_ . + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + num_feat_levels (int): Levels of feature from the previous module. 2 + for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104 + outputs the final feature and intermediate supervision feature and + HourglassNet-52 only outputs the final feature. Default: 2. + corner_emb_channels (int): Channel of embedding vector. Default: 1. + train_cfg (dict | None): Training config. Useless in CornerHead, + but we keep this variable for SingleStageDetector. Default: None. + test_cfg (dict | None): Testing config of CornerHead. Default: None. + loss_heatmap (dict | None): Config of corner heatmap loss. Default: + GaussianFocalLoss. + loss_embedding (dict | None): Config of corner embedding loss. Default: + AssociativeEmbeddingLoss. + loss_offset (dict | None): Config of corner offset loss. Default: + SmoothL1Loss. + loss_guiding_shift (dict): Config of guiding shift loss. Default: + SmoothL1Loss. + loss_centripetal_shift (dict): Config of centripetal shift loss. + Default: SmoothL1Loss. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + *args, + centripetal_shift_channels=2, + guiding_shift_channels=2, + feat_adaption_conv_kernel=3, + loss_guiding_shift=dict( + type='SmoothL1Loss', beta=1.0, loss_weight=0.05), + loss_centripetal_shift=dict( + type='SmoothL1Loss', beta=1.0, loss_weight=1), + init_cfg=None, + **kwargs): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + assert centripetal_shift_channels == 2, ( + 'CentripetalHead only support centripetal_shift_channels == 2') + self.centripetal_shift_channels = centripetal_shift_channels + assert guiding_shift_channels == 2, ( + 'CentripetalHead only support guiding_shift_channels == 2') + self.guiding_shift_channels = guiding_shift_channels + self.feat_adaption_conv_kernel = feat_adaption_conv_kernel + super(CentripetalHead, self).__init__( + *args, init_cfg=init_cfg, **kwargs) + self.loss_guiding_shift = build_loss(loss_guiding_shift) + self.loss_centripetal_shift = build_loss(loss_centripetal_shift) + + def _init_centripetal_layers(self): + """Initialize centripetal layers. + + Including feature adaption deform convs (feat_adaption), deform offset + prediction convs (dcn_off), guiding shift (guiding_shift) and + centripetal shift ( centripetal_shift). Each branch has two parts: + prefix `tl_` for top-left and `br_` for bottom-right. + """ + self.tl_feat_adaption = nn.ModuleList() + self.br_feat_adaption = nn.ModuleList() + self.tl_dcn_offset = nn.ModuleList() + self.br_dcn_offset = nn.ModuleList() + self.tl_guiding_shift = nn.ModuleList() + self.br_guiding_shift = nn.ModuleList() + self.tl_centripetal_shift = nn.ModuleList() + self.br_centripetal_shift = nn.ModuleList() + + for _ in range(self.num_feat_levels): + self.tl_feat_adaption.append( + DeformConv2d(self.in_channels, self.in_channels, + self.feat_adaption_conv_kernel, 1, 1)) + self.br_feat_adaption.append( + DeformConv2d(self.in_channels, self.in_channels, + self.feat_adaption_conv_kernel, 1, 1)) + + self.tl_guiding_shift.append( + self._make_layers( + out_channels=self.guiding_shift_channels, + in_channels=self.in_channels)) + self.br_guiding_shift.append( + self._make_layers( + out_channels=self.guiding_shift_channels, + in_channels=self.in_channels)) + + self.tl_dcn_offset.append( + ConvModule( + self.guiding_shift_channels, + self.feat_adaption_conv_kernel**2 * + self.guiding_shift_channels, + 1, + bias=False, + act_cfg=None)) + self.br_dcn_offset.append( + ConvModule( + self.guiding_shift_channels, + self.feat_adaption_conv_kernel**2 * + self.guiding_shift_channels, + 1, + bias=False, + act_cfg=None)) + + self.tl_centripetal_shift.append( + self._make_layers( + out_channels=self.centripetal_shift_channels, + in_channels=self.in_channels)) + self.br_centripetal_shift.append( + self._make_layers( + out_channels=self.centripetal_shift_channels, + in_channels=self.in_channels)) + + def _init_layers(self): + """Initialize layers for CentripetalHead. + + Including two parts: CornerHead layers and CentripetalHead layers + """ + super()._init_layers() # using _init_layers in CornerHead + self._init_centripetal_layers() + + def init_weights(self): + super(CentripetalHead, self).init_weights() + for i in range(self.num_feat_levels): + normal_init(self.tl_feat_adaption[i], std=0.01) + normal_init(self.br_feat_adaption[i], std=0.01) + normal_init(self.tl_dcn_offset[i].conv, std=0.1) + normal_init(self.br_dcn_offset[i].conv, std=0.1) + _ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]] + _ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]] + _ = [ + x.conv.reset_parameters() for x in self.tl_centripetal_shift[i] + ] + _ = [ + x.conv.reset_parameters() for x in self.br_centripetal_shift[i] + ] + + def forward_single(self, x, lvl_ind): + """Forward feature of a single level. + + Args: + x (Tensor): Feature of a single level. + lvl_ind (int): Level index of current feature. + + Returns: + tuple[Tensor]: A tuple of CentripetalHead's output for current + feature level. Containing the following Tensors: + + - tl_heat (Tensor): Predicted top-left corner heatmap. + - br_heat (Tensor): Predicted bottom-right corner heatmap. + - tl_off (Tensor): Predicted top-left offset heatmap. + - br_off (Tensor): Predicted bottom-right offset heatmap. + - tl_guiding_shift (Tensor): Predicted top-left guiding shift + heatmap. + - br_guiding_shift (Tensor): Predicted bottom-right guiding + shift heatmap. + - tl_centripetal_shift (Tensor): Predicted top-left centripetal + shift heatmap. + - br_centripetal_shift (Tensor): Predicted bottom-right + centripetal shift heatmap. + """ + tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super( + ).forward_single( + x, lvl_ind, return_pool=True) + + tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool) + br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool) + + tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach()) + br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach()) + + tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool, + tl_dcn_offset) + br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool, + br_dcn_offset) + + tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind]( + tl_feat_adaption) + br_centripetal_shift = self.br_centripetal_shift[lvl_ind]( + br_feat_adaption) + + result_list = [ + tl_heat, br_heat, tl_off, br_off, tl_guiding_shift, + br_guiding_shift, tl_centripetal_shift, br_centripetal_shift + ] + return result_list + + @force_fp32() + def loss(self, + tl_heats, + br_heats, + tl_offs, + br_offs, + tl_guiding_shifts, + br_guiding_shifts, + tl_centripetal_shifts, + br_centripetal_shifts, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + tl_heats (list[Tensor]): Top-left corner heatmaps for each level + with shape (N, num_classes, H, W). + br_heats (list[Tensor]): Bottom-right corner heatmaps for each + level with shape (N, num_classes, H, W). + tl_offs (list[Tensor]): Top-left corner offsets for each level + with shape (N, corner_offset_channels, H, W). + br_offs (list[Tensor]): Bottom-right corner offsets for each level + with shape (N, corner_offset_channels, H, W). + tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each + level with shape (N, guiding_shift_channels, H, W). + br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for + each level with shape (N, guiding_shift_channels, H, W). + tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts + for each level with shape (N, centripetal_shift_channels, H, + W). + br_centripetal_shifts (list[Tensor]): Bottom-right centripetal + shifts for each level with shape (N, + centripetal_shift_channels, H, W). + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [left, top, right, bottom] format. + gt_labels (list[Tensor]): Class indices corresponding to each box. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (list[Tensor] | None): Specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. Containing the + following losses: + + - det_loss (list[Tensor]): Corner keypoint losses of all + feature levels. + - off_loss (list[Tensor]): Corner offset losses of all feature + levels. + - guiding_loss (list[Tensor]): Guiding shift losses of all + feature levels. + - centripetal_loss (list[Tensor]): Centripetal shift losses of + all feature levels. + """ + targets = self.get_targets( + gt_bboxes, + gt_labels, + tl_heats[-1].shape, + img_metas[0]['pad_shape'], + with_corner_emb=self.with_corner_emb, + with_guiding_shift=True, + with_centripetal_shift=True) + mlvl_targets = [targets for _ in range(self.num_feat_levels)] + [det_losses, off_losses, guiding_losses, centripetal_losses + ] = multi_apply(self.loss_single, tl_heats, br_heats, tl_offs, + br_offs, tl_guiding_shifts, br_guiding_shifts, + tl_centripetal_shifts, br_centripetal_shifts, + mlvl_targets) + loss_dict = dict( + det_loss=det_losses, + off_loss=off_losses, + guiding_loss=guiding_losses, + centripetal_loss=centripetal_losses) + return loss_dict + + def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift, + br_guiding_shift, tl_centripetal_shift, + br_centripetal_shift, targets): + """Compute losses for single level. + + Args: + tl_hmp (Tensor): Top-left corner heatmap for current level with + shape (N, num_classes, H, W). + br_hmp (Tensor): Bottom-right corner heatmap for current level with + shape (N, num_classes, H, W). + tl_off (Tensor): Top-left corner offset for current level with + shape (N, corner_offset_channels, H, W). + br_off (Tensor): Bottom-right corner offset for current level with + shape (N, corner_offset_channels, H, W). + tl_guiding_shift (Tensor): Top-left guiding shift for current level + with shape (N, guiding_shift_channels, H, W). + br_guiding_shift (Tensor): Bottom-right guiding shift for current + level with shape (N, guiding_shift_channels, H, W). + tl_centripetal_shift (Tensor): Top-left centripetal shift for + current level with shape (N, centripetal_shift_channels, H, W). + br_centripetal_shift (Tensor): Bottom-right centripetal shift for + current level with shape (N, centripetal_shift_channels, H, W). + targets (dict): Corner target generated by `get_targets`. + + Returns: + tuple[torch.Tensor]: Losses of the head's different branches + containing the following losses: + + - det_loss (Tensor): Corner keypoint loss. + - off_loss (Tensor): Corner offset loss. + - guiding_loss (Tensor): Guiding shift loss. + - centripetal_loss (Tensor): Centripetal shift loss. + """ + targets['corner_embedding'] = None + + det_loss, _, _, off_loss = super().loss_single(tl_hmp, br_hmp, None, + None, tl_off, br_off, + targets) + + gt_tl_guiding_shift = targets['topleft_guiding_shift'] + gt_br_guiding_shift = targets['bottomright_guiding_shift'] + gt_tl_centripetal_shift = targets['topleft_centripetal_shift'] + gt_br_centripetal_shift = targets['bottomright_centripetal_shift'] + + gt_tl_heatmap = targets['topleft_heatmap'] + gt_br_heatmap = targets['bottomright_heatmap'] + # We only compute the offset loss at the real corner position. + # The value of real corner would be 1 in heatmap ground truth. + # The mask is computed in class agnostic mode and its shape is + # batch * 1 * width * height. + tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( + gt_tl_heatmap) + br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( + gt_br_heatmap) + + # Guiding shift loss + tl_guiding_loss = self.loss_guiding_shift( + tl_guiding_shift, + gt_tl_guiding_shift, + tl_mask, + avg_factor=tl_mask.sum()) + br_guiding_loss = self.loss_guiding_shift( + br_guiding_shift, + gt_br_guiding_shift, + br_mask, + avg_factor=br_mask.sum()) + guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0 + # Centripetal shift loss + tl_centripetal_loss = self.loss_centripetal_shift( + tl_centripetal_shift, + gt_tl_centripetal_shift, + tl_mask, + avg_factor=tl_mask.sum()) + br_centripetal_loss = self.loss_centripetal_shift( + br_centripetal_shift, + gt_br_centripetal_shift, + br_mask, + avg_factor=br_mask.sum()) + centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0 + + return det_loss, off_loss, guiding_loss, centripetal_loss + + @force_fp32() + def get_bboxes(self, + tl_heats, + br_heats, + tl_offs, + br_offs, + tl_guiding_shifts, + br_guiding_shifts, + tl_centripetal_shifts, + br_centripetal_shifts, + img_metas, + rescale=False, + with_nms=True): + """Transform network output for a batch into bbox predictions. + + Args: + tl_heats (list[Tensor]): Top-left corner heatmaps for each level + with shape (N, num_classes, H, W). + br_heats (list[Tensor]): Bottom-right corner heatmaps for each + level with shape (N, num_classes, H, W). + tl_offs (list[Tensor]): Top-left corner offsets for each level + with shape (N, corner_offset_channels, H, W). + br_offs (list[Tensor]): Bottom-right corner offsets for each level + with shape (N, corner_offset_channels, H, W). + tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each + level with shape (N, guiding_shift_channels, H, W). Useless in + this function, we keep this arg because it's the raw output + from CentripetalHead. + br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for + each level with shape (N, guiding_shift_channels, H, W). + Useless in this function, we keep this arg because it's the + raw output from CentripetalHead. + tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts + for each level with shape (N, centripetal_shift_channels, H, + W). + br_centripetal_shifts (list[Tensor]): Bottom-right centripetal + shifts for each level with shape (N, + centripetal_shift_channels, H, W). + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + """ + assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas) + result_list = [] + for img_id in range(len(img_metas)): + result_list.append( + self._get_bboxes_single( + tl_heats[-1][img_id:img_id + 1, :], + br_heats[-1][img_id:img_id + 1, :], + tl_offs[-1][img_id:img_id + 1, :], + br_offs[-1][img_id:img_id + 1, :], + img_metas[img_id], + tl_emb=None, + br_emb=None, + tl_centripetal_shift=tl_centripetal_shifts[-1][ + img_id:img_id + 1, :], + br_centripetal_shift=br_centripetal_shifts[-1][ + img_id:img_id + 1, :], + rescale=rescale, + with_nms=with_nms)) + + return result_list diff --git a/downstream/mmdetection/mmdet/models/dense_heads/corner_head.py b/downstream/mmdetection/mmdet/models/dense_heads/corner_head.py new file mode 100644 index 0000000..c6a2866 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/corner_head.py @@ -0,0 +1,1086 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from logging import warning +from math import ceil, log + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, bias_init_with_prob +from mmcv.ops import CornerPool, batched_nms +from mmcv.runner import BaseModule, force_fp32 + +from mmdet.core import multi_apply +from ..builder import HEADS, build_loss +from ..utils import gaussian_radius, gen_gaussian_target +from ..utils.gaussian_target import (gather_feat, get_local_maximum, + get_topk_from_heatmap, + transpose_and_gather_feat) +from .base_dense_head import BaseDenseHead +from .dense_test_mixins import BBoxTestMixin + + +class BiCornerPool(BaseModule): + """Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.) + + Args: + in_channels (int): Input channels of module. + out_channels (int): Output channels of module. + feat_channels (int): Feature channels of module. + directions (list[str]): Directions of two CornerPools. + norm_cfg (dict): Dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + directions, + feat_channels=128, + out_channels=128, + norm_cfg=dict(type='BN', requires_grad=True), + init_cfg=None): + super(BiCornerPool, self).__init__(init_cfg) + self.direction1_conv = ConvModule( + in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) + self.direction2_conv = ConvModule( + in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) + + self.aftpool_conv = ConvModule( + feat_channels, + out_channels, + 3, + padding=1, + norm_cfg=norm_cfg, + act_cfg=None) + + self.conv1 = ConvModule( + in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) + self.conv2 = ConvModule( + in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg) + + self.direction1_pool = CornerPool(directions[0]) + self.direction2_pool = CornerPool(directions[1]) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + """Forward features from the upstream network. + + Args: + x (tensor): Input feature of BiCornerPool. + + Returns: + conv2 (tensor): Output feature of BiCornerPool. + """ + direction1_conv = self.direction1_conv(x) + direction2_conv = self.direction2_conv(x) + direction1_feat = self.direction1_pool(direction1_conv) + direction2_feat = self.direction2_pool(direction2_conv) + aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat) + conv1 = self.conv1(x) + relu = self.relu(aftpool_conv + conv1) + conv2 = self.conv2(relu) + return conv2 + + +@HEADS.register_module() +class CornerHead(BaseDenseHead, BBoxTestMixin): + """Head of CornerNet: Detecting Objects as Paired Keypoints. + + Code is modified from the `official github repo + `_ . + + More details can be found in the `paper + `_ . + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + num_feat_levels (int): Levels of feature from the previous module. 2 + for HourglassNet-104 and 1 for HourglassNet-52. Because + HourglassNet-104 outputs the final feature and intermediate + supervision feature and HourglassNet-52 only outputs the final + feature. Default: 2. + corner_emb_channels (int): Channel of embedding vector. Default: 1. + train_cfg (dict | None): Training config. Useless in CornerHead, + but we keep this variable for SingleStageDetector. Default: None. + test_cfg (dict | None): Testing config of CornerHead. Default: None. + loss_heatmap (dict | None): Config of corner heatmap loss. Default: + GaussianFocalLoss. + loss_embedding (dict | None): Config of corner embedding loss. Default: + AssociativeEmbeddingLoss. + loss_offset (dict | None): Config of corner offset loss. Default: + SmoothL1Loss. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + num_classes, + in_channels, + num_feat_levels=2, + corner_emb_channels=1, + train_cfg=None, + test_cfg=None, + loss_heatmap=dict( + type='GaussianFocalLoss', + alpha=2.0, + gamma=4.0, + loss_weight=1), + loss_embedding=dict( + type='AssociativeEmbeddingLoss', + pull_weight=0.25, + push_weight=0.25), + loss_offset=dict( + type='SmoothL1Loss', beta=1.0, loss_weight=1), + init_cfg=None): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super(CornerHead, self).__init__(init_cfg) + self.num_classes = num_classes + self.in_channels = in_channels + self.corner_emb_channels = corner_emb_channels + self.with_corner_emb = self.corner_emb_channels > 0 + self.corner_offset_channels = 2 + self.num_feat_levels = num_feat_levels + self.loss_heatmap = build_loss( + loss_heatmap) if loss_heatmap is not None else None + self.loss_embedding = build_loss( + loss_embedding) if loss_embedding is not None else None + self.loss_offset = build_loss( + loss_offset) if loss_offset is not None else None + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + self.fp16_enabled = False + self._init_layers() + + def _make_layers(self, out_channels, in_channels=256, feat_channels=256): + """Initialize conv sequential for CornerHead.""" + return nn.Sequential( + ConvModule(in_channels, feat_channels, 3, padding=1), + ConvModule( + feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None)) + + def _init_corner_kpt_layers(self): + """Initialize corner keypoint layers. + + Including corner heatmap branch and corner offset branch. Each branch + has two parts: prefix `tl_` for top-left and `br_` for bottom-right. + """ + self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList() + self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList() + self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList() + + for _ in range(self.num_feat_levels): + self.tl_pool.append( + BiCornerPool( + self.in_channels, ['top', 'left'], + out_channels=self.in_channels)) + self.br_pool.append( + BiCornerPool( + self.in_channels, ['bottom', 'right'], + out_channels=self.in_channels)) + + self.tl_heat.append( + self._make_layers( + out_channels=self.num_classes, + in_channels=self.in_channels)) + self.br_heat.append( + self._make_layers( + out_channels=self.num_classes, + in_channels=self.in_channels)) + + self.tl_off.append( + self._make_layers( + out_channels=self.corner_offset_channels, + in_channels=self.in_channels)) + self.br_off.append( + self._make_layers( + out_channels=self.corner_offset_channels, + in_channels=self.in_channels)) + + def _init_corner_emb_layers(self): + """Initialize corner embedding layers. + + Only include corner embedding branch with two parts: prefix `tl_` for + top-left and `br_` for bottom-right. + """ + self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList() + + for _ in range(self.num_feat_levels): + self.tl_emb.append( + self._make_layers( + out_channels=self.corner_emb_channels, + in_channels=self.in_channels)) + self.br_emb.append( + self._make_layers( + out_channels=self.corner_emb_channels, + in_channels=self.in_channels)) + + def _init_layers(self): + """Initialize layers for CornerHead. + + Including two parts: corner keypoint layers and corner embedding layers + """ + self._init_corner_kpt_layers() + if self.with_corner_emb: + self._init_corner_emb_layers() + + def init_weights(self): + super(CornerHead, self).init_weights() + bias_init = bias_init_with_prob(0.1) + for i in range(self.num_feat_levels): + # The initialization of parameters are different between + # nn.Conv2d and ConvModule. Our experiments show that + # using the original initialization of nn.Conv2d increases + # the final mAP by about 0.2% + self.tl_heat[i][-1].conv.reset_parameters() + self.tl_heat[i][-1].conv.bias.data.fill_(bias_init) + self.br_heat[i][-1].conv.reset_parameters() + self.br_heat[i][-1].conv.bias.data.fill_(bias_init) + self.tl_off[i][-1].conv.reset_parameters() + self.br_off[i][-1].conv.reset_parameters() + if self.with_corner_emb: + self.tl_emb[i][-1].conv.reset_parameters() + self.br_emb[i][-1].conv.reset_parameters() + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: Usually a tuple of corner heatmaps, offset heatmaps and + embedding heatmaps. + - tl_heats (list[Tensor]): Top-left corner heatmaps for all + levels, each is a 4D-tensor, the channels number is + num_classes. + - br_heats (list[Tensor]): Bottom-right corner heatmaps for all + levels, each is a 4D-tensor, the channels number is + num_classes. + - tl_embs (list[Tensor] | list[None]): Top-left embedding + heatmaps for all levels, each is a 4D-tensor or None. + If not None, the channels number is corner_emb_channels. + - br_embs (list[Tensor] | list[None]): Bottom-right embedding + heatmaps for all levels, each is a 4D-tensor or None. + If not None, the channels number is corner_emb_channels. + - tl_offs (list[Tensor]): Top-left offset heatmaps for all + levels, each is a 4D-tensor. The channels number is + corner_offset_channels. + - br_offs (list[Tensor]): Bottom-right offset heatmaps for all + levels, each is a 4D-tensor. The channels number is + corner_offset_channels. + """ + lvl_ind = list(range(self.num_feat_levels)) + return multi_apply(self.forward_single, feats, lvl_ind) + + def forward_single(self, x, lvl_ind, return_pool=False): + """Forward feature of a single level. + + Args: + x (Tensor): Feature of a single level. + lvl_ind (int): Level index of current feature. + return_pool (bool): Return corner pool feature or not. + + Returns: + tuple[Tensor]: A tuple of CornerHead's output for current feature + level. Containing the following Tensors: + + - tl_heat (Tensor): Predicted top-left corner heatmap. + - br_heat (Tensor): Predicted bottom-right corner heatmap. + - tl_emb (Tensor | None): Predicted top-left embedding heatmap. + None for `self.with_corner_emb == False`. + - br_emb (Tensor | None): Predicted bottom-right embedding + heatmap. None for `self.with_corner_emb == False`. + - tl_off (Tensor): Predicted top-left offset heatmap. + - br_off (Tensor): Predicted bottom-right offset heatmap. + - tl_pool (Tensor): Top-left corner pool feature. Not must + have. + - br_pool (Tensor): Bottom-right corner pool feature. Not must + have. + """ + tl_pool = self.tl_pool[lvl_ind](x) + tl_heat = self.tl_heat[lvl_ind](tl_pool) + br_pool = self.br_pool[lvl_ind](x) + br_heat = self.br_heat[lvl_ind](br_pool) + + tl_emb, br_emb = None, None + if self.with_corner_emb: + tl_emb = self.tl_emb[lvl_ind](tl_pool) + br_emb = self.br_emb[lvl_ind](br_pool) + + tl_off = self.tl_off[lvl_ind](tl_pool) + br_off = self.br_off[lvl_ind](br_pool) + + result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off] + if return_pool: + result_list.append(tl_pool) + result_list.append(br_pool) + + return result_list + + def get_targets(self, + gt_bboxes, + gt_labels, + feat_shape, + img_shape, + with_corner_emb=False, + with_guiding_shift=False, + with_centripetal_shift=False): + """Generate corner targets. + + Including corner heatmap, corner offset. + + Optional: corner embedding, corner guiding shift, centripetal shift. + + For CornerNet, we generate corner heatmap, corner offset and corner + embedding from this function. + + For CentripetalNet, we generate corner heatmap, corner offset, guiding + shift and centripetal shift from this function. + + Args: + gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each + has shape (num_gt, 4). + gt_labels (list[Tensor]): Ground truth labels of each box, each has + shape (num_gt,). + feat_shape (list[int]): Shape of output feature, + [batch, channel, height, width]. + img_shape (list[int]): Shape of input image, + [height, width, channel]. + with_corner_emb (bool): Generate corner embedding target or not. + Default: False. + with_guiding_shift (bool): Generate guiding shift target or not. + Default: False. + with_centripetal_shift (bool): Generate centripetal shift target or + not. Default: False. + + Returns: + dict: Ground truth of corner heatmap, corner offset, corner + embedding, guiding shift and centripetal shift. Containing the + following keys: + + - topleft_heatmap (Tensor): Ground truth top-left corner + heatmap. + - bottomright_heatmap (Tensor): Ground truth bottom-right + corner heatmap. + - topleft_offset (Tensor): Ground truth top-left corner offset. + - bottomright_offset (Tensor): Ground truth bottom-right corner + offset. + - corner_embedding (list[list[list[int]]]): Ground truth corner + embedding. Not must have. + - topleft_guiding_shift (Tensor): Ground truth top-left corner + guiding shift. Not must have. + - bottomright_guiding_shift (Tensor): Ground truth bottom-right + corner guiding shift. Not must have. + - topleft_centripetal_shift (Tensor): Ground truth top-left + corner centripetal shift. Not must have. + - bottomright_centripetal_shift (Tensor): Ground truth + bottom-right corner centripetal shift. Not must have. + """ + batch_size, _, height, width = feat_shape + img_h, img_w = img_shape[:2] + + width_ratio = float(width / img_w) + height_ratio = float(height / img_h) + + gt_tl_heatmap = gt_bboxes[-1].new_zeros( + [batch_size, self.num_classes, height, width]) + gt_br_heatmap = gt_bboxes[-1].new_zeros( + [batch_size, self.num_classes, height, width]) + gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) + gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) + + if with_corner_emb: + match = [] + + # Guiding shift is a kind of offset, from center to corner + if with_guiding_shift: + gt_tl_guiding_shift = gt_bboxes[-1].new_zeros( + [batch_size, 2, height, width]) + gt_br_guiding_shift = gt_bboxes[-1].new_zeros( + [batch_size, 2, height, width]) + # Centripetal shift is also a kind of offset, from center to corner + # and normalized by log. + if with_centripetal_shift: + gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros( + [batch_size, 2, height, width]) + gt_br_centripetal_shift = gt_bboxes[-1].new_zeros( + [batch_size, 2, height, width]) + + for batch_id in range(batch_size): + # Ground truth of corner embedding per image is a list of coord set + corner_match = [] + for box_id in range(len(gt_labels[batch_id])): + left, top, right, bottom = gt_bboxes[batch_id][box_id] + center_x = (left + right) / 2.0 + center_y = (top + bottom) / 2.0 + label = gt_labels[batch_id][box_id] + + # Use coords in the feature level to generate ground truth + scale_left = left * width_ratio + scale_right = right * width_ratio + scale_top = top * height_ratio + scale_bottom = bottom * height_ratio + scale_center_x = center_x * width_ratio + scale_center_y = center_y * height_ratio + + # Int coords on feature map/ground truth tensor + left_idx = int(min(scale_left, width - 1)) + right_idx = int(min(scale_right, width - 1)) + top_idx = int(min(scale_top, height - 1)) + bottom_idx = int(min(scale_bottom, height - 1)) + + # Generate gaussian heatmap + scale_box_width = ceil(scale_right - scale_left) + scale_box_height = ceil(scale_bottom - scale_top) + radius = gaussian_radius((scale_box_height, scale_box_width), + min_overlap=0.3) + radius = max(0, int(radius)) + gt_tl_heatmap[batch_id, label] = gen_gaussian_target( + gt_tl_heatmap[batch_id, label], [left_idx, top_idx], + radius) + gt_br_heatmap[batch_id, label] = gen_gaussian_target( + gt_br_heatmap[batch_id, label], [right_idx, bottom_idx], + radius) + + # Generate corner offset + left_offset = scale_left - left_idx + top_offset = scale_top - top_idx + right_offset = scale_right - right_idx + bottom_offset = scale_bottom - bottom_idx + gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset + gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset + gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset + gt_br_offset[batch_id, 1, bottom_idx, + right_idx] = bottom_offset + + # Generate corner embedding + if with_corner_emb: + corner_match.append([[top_idx, left_idx], + [bottom_idx, right_idx]]) + # Generate guiding shift + if with_guiding_shift: + gt_tl_guiding_shift[batch_id, 0, top_idx, + left_idx] = scale_center_x - left_idx + gt_tl_guiding_shift[batch_id, 1, top_idx, + left_idx] = scale_center_y - top_idx + gt_br_guiding_shift[batch_id, 0, bottom_idx, + right_idx] = right_idx - scale_center_x + gt_br_guiding_shift[ + batch_id, 1, bottom_idx, + right_idx] = bottom_idx - scale_center_y + # Generate centripetal shift + if with_centripetal_shift: + gt_tl_centripetal_shift[batch_id, 0, top_idx, + left_idx] = log(scale_center_x - + scale_left) + gt_tl_centripetal_shift[batch_id, 1, top_idx, + left_idx] = log(scale_center_y - + scale_top) + gt_br_centripetal_shift[batch_id, 0, bottom_idx, + right_idx] = log(scale_right - + scale_center_x) + gt_br_centripetal_shift[batch_id, 1, bottom_idx, + right_idx] = log(scale_bottom - + scale_center_y) + + if with_corner_emb: + match.append(corner_match) + + target_result = dict( + topleft_heatmap=gt_tl_heatmap, + topleft_offset=gt_tl_offset, + bottomright_heatmap=gt_br_heatmap, + bottomright_offset=gt_br_offset) + + if with_corner_emb: + target_result.update(corner_embedding=match) + if with_guiding_shift: + target_result.update( + topleft_guiding_shift=gt_tl_guiding_shift, + bottomright_guiding_shift=gt_br_guiding_shift) + if with_centripetal_shift: + target_result.update( + topleft_centripetal_shift=gt_tl_centripetal_shift, + bottomright_centripetal_shift=gt_br_centripetal_shift) + + return target_result + + @force_fp32() + def loss(self, + tl_heats, + br_heats, + tl_embs, + br_embs, + tl_offs, + br_offs, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + tl_heats (list[Tensor]): Top-left corner heatmaps for each level + with shape (N, num_classes, H, W). + br_heats (list[Tensor]): Bottom-right corner heatmaps for each + level with shape (N, num_classes, H, W). + tl_embs (list[Tensor]): Top-left corner embeddings for each level + with shape (N, corner_emb_channels, H, W). + br_embs (list[Tensor]): Bottom-right corner embeddings for each + level with shape (N, corner_emb_channels, H, W). + tl_offs (list[Tensor]): Top-left corner offsets for each level + with shape (N, corner_offset_channels, H, W). + br_offs (list[Tensor]): Bottom-right corner offsets for each level + with shape (N, corner_offset_channels, H, W). + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [left, top, right, bottom] format. + gt_labels (list[Tensor]): Class indices corresponding to each box. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (list[Tensor] | None): Specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. Containing the + following losses: + + - det_loss (list[Tensor]): Corner keypoint losses of all + feature levels. + - pull_loss (list[Tensor]): Part one of AssociativeEmbedding + losses of all feature levels. + - push_loss (list[Tensor]): Part two of AssociativeEmbedding + losses of all feature levels. + - off_loss (list[Tensor]): Corner offset losses of all feature + levels. + """ + targets = self.get_targets( + gt_bboxes, + gt_labels, + tl_heats[-1].shape, + img_metas[0]['pad_shape'], + with_corner_emb=self.with_corner_emb) + mlvl_targets = [targets for _ in range(self.num_feat_levels)] + det_losses, pull_losses, push_losses, off_losses = multi_apply( + self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs, + br_offs, mlvl_targets) + loss_dict = dict(det_loss=det_losses, off_loss=off_losses) + if self.with_corner_emb: + loss_dict.update(pull_loss=pull_losses, push_loss=push_losses) + return loss_dict + + def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off, + targets): + """Compute losses for single level. + + Args: + tl_hmp (Tensor): Top-left corner heatmap for current level with + shape (N, num_classes, H, W). + br_hmp (Tensor): Bottom-right corner heatmap for current level with + shape (N, num_classes, H, W). + tl_emb (Tensor): Top-left corner embedding for current level with + shape (N, corner_emb_channels, H, W). + br_emb (Tensor): Bottom-right corner embedding for current level + with shape (N, corner_emb_channels, H, W). + tl_off (Tensor): Top-left corner offset for current level with + shape (N, corner_offset_channels, H, W). + br_off (Tensor): Bottom-right corner offset for current level with + shape (N, corner_offset_channels, H, W). + targets (dict): Corner target generated by `get_targets`. + + Returns: + tuple[torch.Tensor]: Losses of the head's different branches + containing the following losses: + + - det_loss (Tensor): Corner keypoint loss. + - pull_loss (Tensor): Part one of AssociativeEmbedding loss. + - push_loss (Tensor): Part two of AssociativeEmbedding loss. + - off_loss (Tensor): Corner offset loss. + """ + gt_tl_hmp = targets['topleft_heatmap'] + gt_br_hmp = targets['bottomright_heatmap'] + gt_tl_off = targets['topleft_offset'] + gt_br_off = targets['bottomright_offset'] + gt_embedding = targets['corner_embedding'] + + # Detection loss + tl_det_loss = self.loss_heatmap( + tl_hmp.sigmoid(), + gt_tl_hmp, + avg_factor=max(1, + gt_tl_hmp.eq(1).sum())) + br_det_loss = self.loss_heatmap( + br_hmp.sigmoid(), + gt_br_hmp, + avg_factor=max(1, + gt_br_hmp.eq(1).sum())) + det_loss = (tl_det_loss + br_det_loss) / 2.0 + + # AssociativeEmbedding loss + if self.with_corner_emb and self.loss_embedding is not None: + pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb, + gt_embedding) + else: + pull_loss, push_loss = None, None + + # Offset loss + # We only compute the offset loss at the real corner position. + # The value of real corner would be 1 in heatmap ground truth. + # The mask is computed in class agnostic mode and its shape is + # batch * 1 * width * height. + tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( + gt_tl_hmp) + br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( + gt_br_hmp) + tl_off_loss = self.loss_offset( + tl_off, + gt_tl_off, + tl_off_mask, + avg_factor=max(1, tl_off_mask.sum())) + br_off_loss = self.loss_offset( + br_off, + gt_br_off, + br_off_mask, + avg_factor=max(1, br_off_mask.sum())) + + off_loss = (tl_off_loss + br_off_loss) / 2.0 + + return det_loss, pull_loss, push_loss, off_loss + + @force_fp32() + def get_bboxes(self, + tl_heats, + br_heats, + tl_embs, + br_embs, + tl_offs, + br_offs, + img_metas, + rescale=False, + with_nms=True): + """Transform network output for a batch into bbox predictions. + + Args: + tl_heats (list[Tensor]): Top-left corner heatmaps for each level + with shape (N, num_classes, H, W). + br_heats (list[Tensor]): Bottom-right corner heatmaps for each + level with shape (N, num_classes, H, W). + tl_embs (list[Tensor]): Top-left corner embeddings for each level + with shape (N, corner_emb_channels, H, W). + br_embs (list[Tensor]): Bottom-right corner embeddings for each + level with shape (N, corner_emb_channels, H, W). + tl_offs (list[Tensor]): Top-left corner offsets for each level + with shape (N, corner_offset_channels, H, W). + br_offs (list[Tensor]): Bottom-right corner offsets for each level + with shape (N, corner_offset_channels, H, W). + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + """ + assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas) + result_list = [] + for img_id in range(len(img_metas)): + result_list.append( + self._get_bboxes_single( + tl_heats[-1][img_id:img_id + 1, :], + br_heats[-1][img_id:img_id + 1, :], + tl_offs[-1][img_id:img_id + 1, :], + br_offs[-1][img_id:img_id + 1, :], + img_metas[img_id], + tl_emb=tl_embs[-1][img_id:img_id + 1, :], + br_emb=br_embs[-1][img_id:img_id + 1, :], + rescale=rescale, + with_nms=with_nms)) + + return result_list + + def _get_bboxes_single(self, + tl_heat, + br_heat, + tl_off, + br_off, + img_meta, + tl_emb=None, + br_emb=None, + tl_centripetal_shift=None, + br_centripetal_shift=None, + rescale=False, + with_nms=True): + """Transform outputs for a single batch item into bbox predictions. + + Args: + tl_heat (Tensor): Top-left corner heatmap for current level with + shape (N, num_classes, H, W). + br_heat (Tensor): Bottom-right corner heatmap for current level + with shape (N, num_classes, H, W). + tl_off (Tensor): Top-left corner offset for current level with + shape (N, corner_offset_channels, H, W). + br_off (Tensor): Bottom-right corner offset for current level with + shape (N, corner_offset_channels, H, W). + img_meta (dict): Meta information of current image, e.g., + image size, scaling factor, etc. + tl_emb (Tensor): Top-left corner embedding for current level with + shape (N, corner_emb_channels, H, W). + br_emb (Tensor): Bottom-right corner embedding for current level + with shape (N, corner_emb_channels, H, W). + tl_centripetal_shift: Top-left corner's centripetal shift for + current level with shape (N, 2, H, W). + br_centripetal_shift: Bottom-right corner's centripetal shift for + current level with shape (N, 2, H, W). + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + """ + if isinstance(img_meta, (list, tuple)): + img_meta = img_meta[0] + + batch_bboxes, batch_scores, batch_clses = self.decode_heatmap( + tl_heat=tl_heat.sigmoid(), + br_heat=br_heat.sigmoid(), + tl_off=tl_off, + br_off=br_off, + tl_emb=tl_emb, + br_emb=br_emb, + tl_centripetal_shift=tl_centripetal_shift, + br_centripetal_shift=br_centripetal_shift, + img_meta=img_meta, + k=self.test_cfg.corner_topk, + kernel=self.test_cfg.local_maximum_kernel, + distance_threshold=self.test_cfg.distance_threshold) + + if rescale: + batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor']) + + bboxes = batch_bboxes.view([-1, 4]) + scores = batch_scores.view(-1) + clses = batch_clses.view(-1) + + detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1) + keepinds = (detections[:, -1] > -0.1) + detections = detections[keepinds] + labels = clses[keepinds] + + if with_nms: + detections, labels = self._bboxes_nms(detections, labels, + self.test_cfg) + + return detections, labels + + def _bboxes_nms(self, bboxes, labels, cfg): + if 'nms_cfg' in cfg: + warning.warn('nms_cfg in test_cfg will be deprecated. ' + 'Please rename it as nms') + if 'nms' not in cfg: + cfg.nms = cfg.nms_cfg + + if labels.numel() > 0: + max_num = cfg.max_per_img + bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, + -1].contiguous(), + labels, cfg.nms) + if max_num > 0: + bboxes = bboxes[:max_num] + labels = labels[keep][:max_num] + + return bboxes, labels + + def decode_heatmap(self, + tl_heat, + br_heat, + tl_off, + br_off, + tl_emb=None, + br_emb=None, + tl_centripetal_shift=None, + br_centripetal_shift=None, + img_meta=None, + k=100, + kernel=3, + distance_threshold=0.5, + num_dets=1000): + """Transform outputs for a single batch item into raw bbox predictions. + + Args: + tl_heat (Tensor): Top-left corner heatmap for current level with + shape (N, num_classes, H, W). + br_heat (Tensor): Bottom-right corner heatmap for current level + with shape (N, num_classes, H, W). + tl_off (Tensor): Top-left corner offset for current level with + shape (N, corner_offset_channels, H, W). + br_off (Tensor): Bottom-right corner offset for current level with + shape (N, corner_offset_channels, H, W). + tl_emb (Tensor | None): Top-left corner embedding for current + level with shape (N, corner_emb_channels, H, W). + br_emb (Tensor | None): Bottom-right corner embedding for current + level with shape (N, corner_emb_channels, H, W). + tl_centripetal_shift (Tensor | None): Top-left centripetal shift + for current level with shape (N, 2, H, W). + br_centripetal_shift (Tensor | None): Bottom-right centripetal + shift for current level with shape (N, 2, H, W). + img_meta (dict): Meta information of current image, e.g., + image size, scaling factor, etc. + k (int): Get top k corner keypoints from heatmap. + kernel (int): Max pooling kernel for extract local maximum pixels. + distance_threshold (float): Distance threshold. Top-left and + bottom-right corner keypoints with feature distance less than + the threshold will be regarded as keypoints from same object. + num_dets (int): Num of raw boxes before doing nms. + + Returns: + tuple[torch.Tensor]: Decoded output of CornerHead, containing the + following Tensors: + + - bboxes (Tensor): Coords of each box. + - scores (Tensor): Scores of each box. + - clses (Tensor): Categories of each box. + """ + with_embedding = tl_emb is not None and br_emb is not None + with_centripetal_shift = ( + tl_centripetal_shift is not None + and br_centripetal_shift is not None) + assert with_embedding + with_centripetal_shift == 1 + batch, _, height, width = tl_heat.size() + if torch.onnx.is_in_onnx_export(): + inp_h, inp_w = img_meta['pad_shape_for_onnx'][:2] + else: + inp_h, inp_w, _ = img_meta['pad_shape'] + + # perform nms on heatmaps + tl_heat = get_local_maximum(tl_heat, kernel=kernel) + br_heat = get_local_maximum(br_heat, kernel=kernel) + + tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = get_topk_from_heatmap( + tl_heat, k=k) + br_scores, br_inds, br_clses, br_ys, br_xs = get_topk_from_heatmap( + br_heat, k=k) + + # We use repeat instead of expand here because expand is a + # shallow-copy function. Thus it could cause unexpected testing result + # sometimes. Using expand will decrease about 10% mAP during testing + # compared to repeat. + tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k) + tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k) + br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1) + br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1) + + tl_off = transpose_and_gather_feat(tl_off, tl_inds) + tl_off = tl_off.view(batch, k, 1, 2) + br_off = transpose_and_gather_feat(br_off, br_inds) + br_off = br_off.view(batch, 1, k, 2) + + tl_xs = tl_xs + tl_off[..., 0] + tl_ys = tl_ys + tl_off[..., 1] + br_xs = br_xs + br_off[..., 0] + br_ys = br_ys + br_off[..., 1] + + if with_centripetal_shift: + tl_centripetal_shift = transpose_and_gather_feat( + tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp() + br_centripetal_shift = transpose_and_gather_feat( + br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp() + + tl_ctxs = tl_xs + tl_centripetal_shift[..., 0] + tl_ctys = tl_ys + tl_centripetal_shift[..., 1] + br_ctxs = br_xs - br_centripetal_shift[..., 0] + br_ctys = br_ys - br_centripetal_shift[..., 1] + + # all possible boxes based on top k corners (ignoring class) + tl_xs *= (inp_w / width) + tl_ys *= (inp_h / height) + br_xs *= (inp_w / width) + br_ys *= (inp_h / height) + + if with_centripetal_shift: + tl_ctxs *= (inp_w / width) + tl_ctys *= (inp_h / height) + br_ctxs *= (inp_w / width) + br_ctys *= (inp_h / height) + + x_off, y_off = 0, 0 # no crop + if not torch.onnx.is_in_onnx_export(): + # since `RandomCenterCropPad` is done on CPU with numpy and it's + # not dynamic traceable when exporting to ONNX, thus 'border' + # does not appears as key in 'img_meta'. As a tmp solution, + # we move this 'border' handle part to the postprocess after + # finished exporting to ONNX, which is handle in + # `mmdet/core/export/model_wrappers.py`. Though difference between + # pytorch and exported onnx model, it might be ignored since + # comparable performance is achieved between them (e.g. 40.4 vs + # 40.6 on COCO val2017, for CornerNet without test-time flip) + if 'border' in img_meta: + x_off = img_meta['border'][2] + y_off = img_meta['border'][0] + + tl_xs -= x_off + tl_ys -= y_off + br_xs -= x_off + br_ys -= y_off + + zeros = tl_xs.new_zeros(*tl_xs.size()) + tl_xs = torch.where(tl_xs > 0.0, tl_xs, zeros) + tl_ys = torch.where(tl_ys > 0.0, tl_ys, zeros) + br_xs = torch.where(br_xs > 0.0, br_xs, zeros) + br_ys = torch.where(br_ys > 0.0, br_ys, zeros) + + bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3) + area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs() + + if with_centripetal_shift: + tl_ctxs -= x_off + tl_ctys -= y_off + br_ctxs -= x_off + br_ctys -= y_off + + tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs) + tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys) + br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs) + br_ctys *= br_ctys.gt(0.0).type_as(br_ctys) + + ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys), + dim=3) + area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs() + + rcentral = torch.zeros_like(ct_bboxes) + # magic nums from paper section 4.1 + mu = torch.ones_like(area_bboxes) / 2.4 + mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu + + bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2 + bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2 + rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] - + bboxes[..., 0]) / 2 + rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] - + bboxes[..., 1]) / 2 + rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] - + bboxes[..., 0]) / 2 + rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] - + bboxes[..., 1]) / 2 + area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) * + (rcentral[..., 3] - rcentral[..., 1])).abs() + dists = area_ct_bboxes / area_rcentral + + tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | ( + ct_bboxes[..., 0] >= rcentral[..., 2]) + tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | ( + ct_bboxes[..., 1] >= rcentral[..., 3]) + br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | ( + ct_bboxes[..., 2] >= rcentral[..., 2]) + br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | ( + ct_bboxes[..., 3] >= rcentral[..., 3]) + + if with_embedding: + tl_emb = transpose_and_gather_feat(tl_emb, tl_inds) + tl_emb = tl_emb.view(batch, k, 1) + br_emb = transpose_and_gather_feat(br_emb, br_inds) + br_emb = br_emb.view(batch, 1, k) + dists = torch.abs(tl_emb - br_emb) + + tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k) + br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1) + + scores = (tl_scores + br_scores) / 2 # scores for all possible boxes + + # tl and br should have same class + tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k) + br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1) + cls_inds = (tl_clses != br_clses) + + # reject boxes based on distances + dist_inds = dists > distance_threshold + + # reject boxes based on widths and heights + width_inds = (br_xs <= tl_xs) + height_inds = (br_ys <= tl_ys) + + # No use `scores[cls_inds]`, instead we use `torch.where` here. + # Since only 1-D indices with type 'tensor(bool)' are supported + # when exporting to ONNX, any other bool indices with more dimensions + # (e.g. 2-D bool tensor) as input parameter in node is invalid + negative_scores = -1 * torch.ones_like(scores) + scores = torch.where(cls_inds, negative_scores, scores) + scores = torch.where(width_inds, negative_scores, scores) + scores = torch.where(height_inds, negative_scores, scores) + scores = torch.where(dist_inds, negative_scores, scores) + + if with_centripetal_shift: + scores[tl_ctx_inds] = -1 + scores[tl_cty_inds] = -1 + scores[br_ctx_inds] = -1 + scores[br_cty_inds] = -1 + + scores = scores.view(batch, -1) + scores, inds = torch.topk(scores, num_dets) + scores = scores.unsqueeze(2) + + bboxes = bboxes.view(batch, -1, 4) + bboxes = gather_feat(bboxes, inds) + + clses = tl_clses.contiguous().view(batch, -1, 1) + clses = gather_feat(clses, inds).float() + + return bboxes, scores, clses + + def onnx_export(self, + tl_heats, + br_heats, + tl_embs, + br_embs, + tl_offs, + br_offs, + img_metas, + rescale=False, + with_nms=True): + """Transform network output for a batch into bbox predictions. + + Args: + tl_heats (list[Tensor]): Top-left corner heatmaps for each level + with shape (N, num_classes, H, W). + br_heats (list[Tensor]): Bottom-right corner heatmaps for each + level with shape (N, num_classes, H, W). + tl_embs (list[Tensor]): Top-left corner embeddings for each level + with shape (N, corner_emb_channels, H, W). + br_embs (list[Tensor]): Bottom-right corner embeddings for each + level with shape (N, corner_emb_channels, H, W). + tl_offs (list[Tensor]): Top-left corner offsets for each level + with shape (N, corner_offset_channels, H, W). + br_offs (list[Tensor]): Bottom-right corner offsets for each level + with shape (N, corner_offset_channels, H, W). + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + + Returns: + tuple[Tensor, Tensor]: First tensor bboxes with shape + [N, num_det, 5], 5 arrange as (x1, y1, x2, y2, score) + and second element is class labels of shape [N, num_det]. + """ + assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len( + img_metas) == 1 + result_list = [] + for img_id in range(len(img_metas)): + result_list.append( + self._get_bboxes_single( + tl_heats[-1][img_id:img_id + 1, :], + br_heats[-1][img_id:img_id + 1, :], + tl_offs[-1][img_id:img_id + 1, :], + br_offs[-1][img_id:img_id + 1, :], + img_metas[img_id], + tl_emb=tl_embs[-1][img_id:img_id + 1, :], + br_emb=br_embs[-1][img_id:img_id + 1, :], + rescale=rescale, + with_nms=with_nms)) + + detections, labels = result_list[0] + # batch_size 1 here, [1, num_det, 5], [1, num_det] + return detections.unsqueeze(0), labels.unsqueeze(0) diff --git a/downstream/mmdetection/mmdet/models/dense_heads/ddod_head.py b/downstream/mmdetection/mmdet/models/dense_heads/ddod_head.py new file mode 100644 index 0000000..b2ff223 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/ddod_head.py @@ -0,0 +1,778 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init +from mmcv.runner import force_fp32 + +from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler, + images_to_levels, multi_apply, reduce_mean, unmap) +from mmdet.core.bbox import bbox_overlaps +from ..builder import HEADS, build_loss +from .anchor_head import AnchorHead + +EPS = 1e-12 + + +@HEADS.register_module() +class DDODHead(AnchorHead): + """DDOD head decomposes conjunctions lying in most current one-stage + detectors via label assignment disentanglement, spatial feature + disentanglement, and pyramid supervision disentanglement. + + https://arxiv.org/abs/2107.02963 + + Args: + num_classes (int): Number of categories excluding the + background category. + in_channels (int): Number of channels in the input feature map. + stacked_convs (int): The number of stacked Conv. Default: 4. + conv_cfg (dict): Conv config of ddod head. Default: None. + use_dcn (bool): Use dcn, Same as ATSS when False. Default: True. + norm_cfg (dict): Normal config of ddod head. Default: + dict(type='GN', num_groups=32, requires_grad=True). + loss_iou (dict): Config of IoU loss. Default: + dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0). + """ + + def __init__(self, + num_classes, + in_channels, + stacked_convs=4, + conv_cfg=None, + use_dcn=True, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + loss_iou=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + **kwargs): + self.stacked_convs = stacked_convs + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.use_dcn = use_dcn + super(DDODHead, self).__init__(num_classes, in_channels, **kwargs) + + self.sampling = False + if self.train_cfg: + self.cls_assigner = build_assigner(self.train_cfg.assigner) + self.reg_assigner = build_assigner(self.train_cfg.reg_assigner) + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + self.loss_iou = build_loss(loss_iou) + + def _init_layers(self): + """Initialize layers of the head.""" + self.relu = nn.ReLU(inplace=True) + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=dict(type='DCN', deform_groups=1) + if i == 0 and self.use_dcn else self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=dict(type='DCN', deform_groups=1) + if i == 0 and self.use_dcn else self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.atss_cls = nn.Conv2d( + self.feat_channels, + self.num_base_priors * self.cls_out_channels, + 3, + padding=1) + self.atss_reg = nn.Conv2d( + self.feat_channels, self.num_base_priors * 4, 3, padding=1) + self.atss_iou = nn.Conv2d( + self.feat_channels, self.num_base_priors * 1, 3, padding=1) + self.scales = nn.ModuleList( + [Scale(1.0) for _ in self.prior_generator.strides]) + + # we use the global list in loss + self.cls_num_pos_samples_per_level = [ + 0. for _ in range(len(self.prior_generator.strides)) + ] + self.reg_num_pos_samples_per_level = [ + 0. for _ in range(len(self.prior_generator.strides)) + ] + + def init_weights(self): + """Initialize weights of the head.""" + for m in self.cls_convs: + normal_init(m.conv, std=0.01) + for m in self.reg_convs: + normal_init(m.conv, std=0.01) + normal_init(self.atss_reg, std=0.01) + normal_init(self.atss_iou, std=0.01) + bias_cls = bias_init_with_prob(0.01) + normal_init(self.atss_cls, std=0.01, bias=bias_cls) + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: Usually a tuple of classification scores and bbox prediction + cls_scores (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * 4. + iou_preds (list[Tensor]): IoU scores for all scale levels, + each is a 4D-tensor, the channels number is + num_base_priors * 1. + """ + return multi_apply(self.forward_single, feats, self.scales) + + def forward_single(self, x, scale): + """Forward feature of a single scale level. + + Args: + x (Tensor): Features of a single scale level. + scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize + the bbox prediction. + + Returns: + tuple: + - cls_score (Tensor): Cls scores for a single scale level \ + the channels number is num_base_priors * num_classes. + - bbox_pred (Tensor): Box energies / deltas for a single \ + scale level, the channels number is num_base_priors * 4. + - iou_pred (Tensor): Iou for a single scale level, the \ + channel number is (N, num_base_priors * 1, H, W). + """ + cls_feat = x + reg_feat = x + for cls_conv in self.cls_convs: + cls_feat = cls_conv(cls_feat) + for reg_conv in self.reg_convs: + reg_feat = reg_conv(reg_feat) + cls_score = self.atss_cls(cls_feat) + # we just follow atss, not apply exp in bbox_pred + bbox_pred = scale(self.atss_reg(reg_feat)).float() + iou_pred = self.atss_iou(reg_feat) + return cls_score, bbox_pred, iou_pred + + def loss_cls_single(self, cls_score, labels, label_weights, + reweight_factor, num_total_samples): + """Compute cls loss of a single scale level. + + Args: + cls_score (Tensor): Box scores for each scale level + Has shape (N, num_base_priors * num_classes, H, W). + labels (Tensor): Labels of each anchors with shape + (N, num_total_anchors). + label_weights (Tensor): Label weights of each anchor with shape + (N, num_total_anchors) + reweight_factor (list[int]): Reweight factor for cls and reg + loss. + num_total_samples (int): Number of positive samples that is + reduced over all GPUs. + + Returns: + tuple[Tensor]: A tuple of loss components. + """ + cls_score = cls_score.permute(0, 2, 3, 1).reshape( + -1, self.cls_out_channels).contiguous() + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + loss_cls = self.loss_cls( + cls_score, labels, label_weights, avg_factor=num_total_samples) + return reweight_factor * loss_cls, + + def loss_reg_single(self, anchors, bbox_pred, iou_pred, labels, + label_weights, bbox_targets, bbox_weights, + reweight_factor, num_total_samples): + """Compute reg loss of a single scale level. + + Args: + anchors (Tensor): Box reference for each scale level with shape + (N, num_total_anchors, 4). + bbox_pred (Tensor): Box energies / deltas for each scale + level with shape (N, num_base_priors * 4, H, W). + iou_pred (Tensor): Iou for a single scale level, the + channel number is (N, num_base_priors * 1, H, W). + labels (Tensor): Labels of each anchors with shape + (N, num_total_anchors). + label_weights (Tensor): Label weights of each anchor with shape + (N, num_total_anchors) + bbox_targets (Tensor): BBox regression targets of each anchor + weight shape (N, num_total_anchors, 4). + bbox_weights (Tensor): BBox weights of all anchors in the + image with shape (N, 4) + reweight_factor (list[int]): Reweight factor for cls and reg + loss. + num_total_samples (int): Number of positive samples that is + reduced over all GPUs. + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + anchors = anchors.reshape(-1, 4) + bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) + iou_pred = iou_pred.permute(0, 2, 3, 1).reshape(-1, ) + bbox_targets = bbox_targets.reshape(-1, 4) + bbox_weights = bbox_weights.reshape(-1, 4) + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + + iou_targets = label_weights.new_zeros(labels.shape) + iou_weights = label_weights.new_zeros(labels.shape) + iou_weights[(bbox_weights.sum(axis=1) > 0).nonzero( + as_tuple=False)] = 1. + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = self.num_classes + pos_inds = ((labels >= 0) + & + (labels < bg_class_ind)).nonzero(as_tuple=False).squeeze(1) + + if len(pos_inds) > 0: + pos_bbox_targets = bbox_targets[pos_inds] + pos_bbox_pred = bbox_pred[pos_inds] + pos_anchors = anchors[pos_inds] + + pos_decode_bbox_pred = self.bbox_coder.decode( + pos_anchors, pos_bbox_pred) + pos_decode_bbox_targets = self.bbox_coder.decode( + pos_anchors, pos_bbox_targets) + + # regression loss + loss_bbox = self.loss_bbox( + pos_decode_bbox_pred, + pos_decode_bbox_targets, + avg_factor=num_total_samples) + + iou_targets[pos_inds] = bbox_overlaps( + pos_decode_bbox_pred.detach(), + pos_decode_bbox_targets, + is_aligned=True) + loss_iou = self.loss_iou( + iou_pred, + iou_targets, + iou_weights, + avg_factor=num_total_samples) + else: + loss_bbox = bbox_pred.sum() * 0 + loss_iou = iou_pred.sum() * 0 + + return reweight_factor * loss_bbox, reweight_factor * loss_iou + + def calc_reweight_factor(self, labels_list): + """Compute reweight_factor for regression and classification loss.""" + # get pos samples for each level + bg_class_ind = self.num_classes + for ii, each_level_label in enumerate(labels_list): + pos_inds = ((each_level_label >= 0) & + (each_level_label < bg_class_ind)).nonzero( + as_tuple=False).squeeze(1) + self.cls_num_pos_samples_per_level[ii] += len(pos_inds) + # get reweight factor from 1 ~ 2 with bilinear interpolation + min_pos_samples = min(self.cls_num_pos_samples_per_level) + max_pos_samples = max(self.cls_num_pos_samples_per_level) + interval = 1. / (max_pos_samples - min_pos_samples + 1e-10) + reweight_factor_per_level = [] + for pos_samples in self.cls_num_pos_samples_per_level: + factor = 2. - (pos_samples - min_pos_samples) * interval + reweight_factor_per_level.append(factor) + return reweight_factor_per_level + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) + def loss(self, + cls_scores, + bbox_preds, + iou_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_base_priors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_base_priors * 4, H, W) + iou_preds (list[Tensor]): Score factor for all scale level, + each is a 4D-tensor, has shape (batch_size, 1, H, W). + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (list[Tensor] | None): specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + device = cls_scores[0].device + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + + # calculate common vars for cls and reg assigners at once + targets_com = self.process_predictions_and_anchors( + anchor_list, valid_flag_list, cls_scores, bbox_preds, img_metas, + gt_bboxes_ignore) + (anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list, + bbox_pred_list, gt_bboxes_ignore_list) = targets_com + + # classification branch assigner + cls_targets = self.get_cls_targets( + anchor_list, + valid_flag_list, + num_level_anchors_list, + cls_score_list, + bbox_pred_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore_list, + gt_labels_list=gt_labels, + label_channels=label_channels) + if cls_targets is None: + return None + + (cls_anchor_list, labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) = cls_targets + + num_total_samples = reduce_mean( + torch.tensor(num_total_pos, dtype=torch.float, + device=device)).item() + num_total_samples = max(num_total_samples, 1.0) + + reweight_factor_per_level = self.calc_reweight_factor(labels_list) + + cls_losses_cls, = multi_apply( + self.loss_cls_single, + cls_scores, + labels_list, + label_weights_list, + reweight_factor_per_level, + num_total_samples=num_total_samples) + + # regression branch assigner + reg_targets = self.get_reg_targets( + anchor_list, + valid_flag_list, + num_level_anchors_list, + cls_score_list, + bbox_pred_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore_list, + gt_labels_list=gt_labels, + label_channels=label_channels) + if reg_targets is None: + return None + + (reg_anchor_list, labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) = reg_targets + + num_total_samples = reduce_mean( + torch.tensor(num_total_pos, dtype=torch.float, + device=device)).item() + num_total_samples = max(num_total_samples, 1.0) + + reweight_factor_per_level = self.calc_reweight_factor(labels_list) + + reg_losses_bbox, reg_losses_iou = multi_apply( + self.loss_reg_single, + reg_anchor_list, + bbox_preds, + iou_preds, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + reweight_factor_per_level, + num_total_samples=num_total_samples) + + return dict( + loss_cls=cls_losses_cls, + loss_bbox=reg_losses_bbox, + loss_iou=reg_losses_iou) + + def process_predictions_and_anchors(self, anchor_list, valid_flag_list, + cls_scores, bbox_preds, img_metas, + gt_bboxes_ignore_list): + """Compute common vars for regression and classification targets. + + Args: + anchor_list (list[Tensor]): anchors of each image. + valid_flag_list (list[Tensor]): Valid flags of each image. + cls_scores (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * 4. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore_list (list[Tensor] | None): specify which bounding + boxes can be ignored when computing the loss. + + Return: + tuple[Tensor]: A tuple of common loss vars. + """ + num_imgs = len(img_metas) + assert len(anchor_list) == len(valid_flag_list) == num_imgs + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + num_level_anchors_list = [num_level_anchors] * num_imgs + + anchor_list_ = [] + valid_flag_list_ = [] + # concat all level anchors and flags to a single tensor + for i in range(num_imgs): + assert len(anchor_list[i]) == len(valid_flag_list[i]) + anchor_list_.append(torch.cat(anchor_list[i])) + valid_flag_list_.append(torch.cat(valid_flag_list[i])) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + + num_levels = len(cls_scores) + cls_score_list = [] + bbox_pred_list = [] + + mlvl_cls_score_list = [ + cls_score.permute(0, 2, 3, 1).reshape( + num_imgs, -1, self.num_base_priors * self.cls_out_channels) + for cls_score in cls_scores + ] + mlvl_bbox_pred_list = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_base_priors * 4) + for bbox_pred in bbox_preds + ] + + for i in range(num_imgs): + mlvl_cls_tensor_list = [ + mlvl_cls_score_list[j][i] for j in range(num_levels) + ] + mlvl_bbox_tensor_list = [ + mlvl_bbox_pred_list[j][i] for j in range(num_levels) + ] + cat_mlvl_cls_score = torch.cat(mlvl_cls_tensor_list, dim=0) + cat_mlvl_bbox_pred = torch.cat(mlvl_bbox_tensor_list, dim=0) + cls_score_list.append(cat_mlvl_cls_score) + bbox_pred_list.append(cat_mlvl_bbox_pred) + return (anchor_list_, valid_flag_list_, num_level_anchors_list, + cls_score_list, bbox_pred_list, gt_bboxes_ignore_list) + + def get_cls_targets(self, + anchor_list, + valid_flag_list, + num_level_anchors_list, + cls_score_list, + bbox_pred_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + unmap_outputs=True): + """Get cls targets for DDOD head. + + This method is almost the same as `AnchorHead.get_targets()`. + Besides returning the targets as the parent method does, + it also returns the anchors as the first element of the + returned tuple. + + Args: + anchor_list (list[Tensor]): anchors of each image. + valid_flag_list (list[Tensor]): Valid flags of each image. + num_level_anchors_list (list[Tensor]): Number of anchors of each + scale level of all image. + cls_score_list (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * num_classes. + bbox_pred_list (list[Tensor]): Box energies / deltas for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * 4. + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore_list (list[Tensor] | None): specify which bounding + boxes can be ignored when computing the loss. + gt_labels_list (list[Tensor]): class indices corresponding to + each box. + label_channels (int): Channel of label. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. + + Return: + tuple[Tensor]: A tuple of cls targets components. + """ + (all_anchors, all_labels, all_label_weights, all_bbox_targets, + all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( + self._get_target_single, + anchor_list, + valid_flag_list, + cls_score_list, + bbox_pred_list, + num_level_anchors_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + img_metas, + label_channels=label_channels, + unmap_outputs=unmap_outputs, + is_cls_assigner=True) + # no valid anchors + if any([labels is None for labels in all_labels]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0]) + labels_list = images_to_levels(all_labels, num_level_anchors_list[0]) + label_weights_list = images_to_levels(all_label_weights, + num_level_anchors_list[0]) + bbox_targets_list = images_to_levels(all_bbox_targets, + num_level_anchors_list[0]) + bbox_weights_list = images_to_levels(all_bbox_weights, + num_level_anchors_list[0]) + return (anchors_list, labels_list, label_weights_list, + bbox_targets_list, bbox_weights_list, num_total_pos, + num_total_neg) + + def get_reg_targets(self, + anchor_list, + valid_flag_list, + num_level_anchors_list, + cls_score_list, + bbox_pred_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + unmap_outputs=True): + """Get reg targets for DDOD head. + + This method is almost the same as `AnchorHead.get_targets()` when + is_cls_assigner is False. Besides returning the targets as the parent + method does, it also returns the anchors as the first element of the + returned tuple. + + Args: + anchor_list (list[Tensor]): anchors of each image. + valid_flag_list (list[Tensor]): Valid flags of each image. + num_level_anchors (int): Number of anchors of each scale level. + cls_scores (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * 4. + gt_labels_list (list[Tensor]): class indices corresponding to + each box. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore_list (list[Tensor] | None): specify which bounding + boxes can be ignored when computing the loss. + + Return: + tuple[Tensor]: A tuple of reg targets components. + """ + (all_anchors, all_labels, all_label_weights, all_bbox_targets, + all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( + self._get_target_single, + anchor_list, + valid_flag_list, + cls_score_list, + bbox_pred_list, + num_level_anchors_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + img_metas, + label_channels=label_channels, + unmap_outputs=unmap_outputs, + is_cls_assigner=False) + # no valid anchors + if any([labels is None for labels in all_labels]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0]) + labels_list = images_to_levels(all_labels, num_level_anchors_list[0]) + label_weights_list = images_to_levels(all_label_weights, + num_level_anchors_list[0]) + bbox_targets_list = images_to_levels(all_bbox_targets, + num_level_anchors_list[0]) + bbox_weights_list = images_to_levels(all_bbox_weights, + num_level_anchors_list[0]) + return (anchors_list, labels_list, label_weights_list, + bbox_targets_list, bbox_weights_list, num_total_pos, + num_total_neg) + + def _get_target_single(self, + flat_anchors, + valid_flags, + cls_scores, + bbox_preds, + num_level_anchors, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + label_channels=1, + unmap_outputs=True, + is_cls_assigner=True): + """Compute regression, classification targets for anchors in a single + image. + + Args: + flat_anchors (Tensor): Multi-level anchors of the image, + which are concatenated into a single tensor of shape + (num_base_priors, 4). + valid_flags (Tensor): Multi level valid flags of the image, + which are concatenated into a single tensor of + shape (num_base_priors,). + cls_scores (Tensor): Classification scores for all scale + levels of the image. + bbox_preds (Tensor): Box energies / deltas for all scale + levels of the image. + num_level_anchors (list[int]): Number of anchors of each + scale level. + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, ). + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts, ). + img_meta (dict): Meta info of the image. + label_channels (int): Channel of label. Default: 1. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. Default: True. + is_cls_assigner (bool): Classification or regression. + Default: True. + + Returns: + tuple: N is the number of total anchors in the image. + - labels (Tensor): Labels of all anchors in the image with \ + shape (N, ). + - label_weights (Tensor): Label weights of all anchor in the \ + image with shape (N, ). + - bbox_targets (Tensor): BBox targets of all anchors in the \ + image with shape (N, 4). + - bbox_weights (Tensor): BBox weights of all anchors in the \ + image with shape (N, 4) + - pos_inds (Tensor): Indices of positive anchor with shape \ + (num_pos, ). + - neg_inds (Tensor): Indices of negative anchor with shape \ + (num_neg, ). + """ + inside_flags = anchor_inside_flags(flat_anchors, valid_flags, + img_meta['img_shape'][:2], + self.train_cfg.allowed_border) + if not inside_flags.any(): + return (None, ) * 7 + # assign gt and sample anchors + anchors = flat_anchors[inside_flags, :] + + num_level_anchors_inside = self.get_num_level_anchors_inside( + num_level_anchors, inside_flags) + bbox_preds_valid = bbox_preds[inside_flags, :] + cls_scores_valid = cls_scores[inside_flags, :] + + assigner = self.cls_assigner if is_cls_assigner else self.reg_assigner + + # decode prediction out of assigner + bbox_preds_valid = self.bbox_coder.decode(anchors, bbox_preds_valid) + assign_result = assigner.assign(anchors, num_level_anchors_inside, + gt_bboxes, gt_bboxes_ignore, gt_labels, + cls_scores_valid, bbox_preds_valid) + sampling_result = self.sampler.sample(assign_result, anchors, + gt_bboxes) + + num_valid_anchors = anchors.shape[0] + bbox_targets = torch.zeros_like(anchors) + bbox_weights = torch.zeros_like(anchors) + labels = anchors.new_full((num_valid_anchors, ), + self.num_classes, + dtype=torch.long) + label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + if hasattr(self, 'bbox_coder'): + pos_bbox_targets = self.bbox_coder.encode( + sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) + else: + # used in VFNetHead + pos_bbox_targets = sampling_result.pos_gt_bboxes + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + if gt_labels is None: + # Only rpn gives gt_labels as None + # Foreground is the first class since v2.5.0 + labels[pos_inds] = 0 + else: + labels[pos_inds] = gt_labels[ + sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_anchors.size(0) + anchors = unmap(anchors, num_total_anchors, inside_flags) + labels = unmap( + labels, num_total_anchors, inside_flags, fill=self.num_classes) + label_weights = unmap(label_weights, num_total_anchors, + inside_flags) + bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) + bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) + + return (anchors, labels, label_weights, bbox_targets, bbox_weights, + pos_inds, neg_inds) + + def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): + """Get the anchors of each scale level inside. + + Args: + num_level_anchors (list[int]): Number of anchors of each + scale level. + inside_flags (Tensor): Multi level inside flags of the image, + which are concatenated into a single tensor of + shape (num_base_priors,). + + Returns: + list[int]: Number of anchors of each scale level inside. + """ + split_inside_flags = torch.split(inside_flags, num_level_anchors) + num_level_anchors_inside = [ + int(flags.sum()) for flags in split_inside_flags + ] + return num_level_anchors_inside diff --git a/downstream/mmdetection/mmdet/models/dense_heads/deformable_detr_head.py b/downstream/mmdetection/mmdet/models/dense_heads/deformable_detr_head.py new file mode 100644 index 0000000..71c2785 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/deformable_detr_head.py @@ -0,0 +1,318 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import Linear, bias_init_with_prob, constant_init +from mmcv.runner import force_fp32 + +from mmdet.core import multi_apply +from mmdet.models.utils.transformer import inverse_sigmoid +from ..builder import HEADS +from .detr_head import DETRHead + + +@HEADS.register_module() +class DeformableDETRHead(DETRHead): + """Head of DeformDETR: Deformable DETR: Deformable Transformers for End-to- + End Object Detection. + + Code is modified from the `official github repo + `_. + + More details can be found in the `paper + `_ . + + Args: + with_box_refine (bool): Whether to refine the reference points + in the decoder. Defaults to False. + as_two_stage (bool) : Whether to generate the proposal from + the outputs of encoder. + transformer (obj:`ConfigDict`): ConfigDict is used for building + the Encoder and Decoder. + """ + + def __init__(self, + *args, + with_box_refine=False, + as_two_stage=False, + transformer=None, + **kwargs): + self.with_box_refine = with_box_refine + self.as_two_stage = as_two_stage + if self.as_two_stage: + transformer['as_two_stage'] = self.as_two_stage + + super(DeformableDETRHead, self).__init__( + *args, transformer=transformer, **kwargs) + + def _init_layers(self): + """Initialize classification branch and regression branch of head.""" + + fc_cls = Linear(self.embed_dims, self.cls_out_channels) + reg_branch = [] + for _ in range(self.num_reg_fcs): + reg_branch.append(Linear(self.embed_dims, self.embed_dims)) + reg_branch.append(nn.ReLU()) + reg_branch.append(Linear(self.embed_dims, 4)) + reg_branch = nn.Sequential(*reg_branch) + + def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + # last reg_branch is used to generate proposal from + # encode feature map when as_two_stage is True. + num_pred = (self.transformer.decoder.num_layers + 1) if \ + self.as_two_stage else self.transformer.decoder.num_layers + + if self.with_box_refine: + self.cls_branches = _get_clones(fc_cls, num_pred) + self.reg_branches = _get_clones(reg_branch, num_pred) + else: + + self.cls_branches = nn.ModuleList( + [fc_cls for _ in range(num_pred)]) + self.reg_branches = nn.ModuleList( + [reg_branch for _ in range(num_pred)]) + + if not self.as_two_stage: + self.query_embedding = nn.Embedding(self.num_query, + self.embed_dims * 2) + + def init_weights(self): + """Initialize weights of the DeformDETR head.""" + self.transformer.init_weights() + if self.loss_cls.use_sigmoid: + bias_init = bias_init_with_prob(0.01) + for m in self.cls_branches: + nn.init.constant_(m.bias, bias_init) + for m in self.reg_branches: + constant_init(m[-1], 0, bias=0) + nn.init.constant_(self.reg_branches[0][-1].bias.data[2:], -2.0) + if self.as_two_stage: + for m in self.reg_branches: + nn.init.constant_(m[-1].bias.data[2:], 0.0) + + def forward(self, mlvl_feats, img_metas): + """Forward function. + + Args: + mlvl_feats (tuple[Tensor]): Features from the upstream + network, each is a 4D-tensor with shape + (N, C, H, W). + img_metas (list[dict]): List of image information. + + Returns: + all_cls_scores (Tensor): Outputs from the classification head, \ + shape [nb_dec, bs, num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression \ + head with normalized coordinate format (cx, cy, w, h). \ + Shape [nb_dec, bs, num_query, 4]. + enc_outputs_class (Tensor): The score of each point on encode \ + feature map, has shape (N, h*w, num_class). Only when \ + as_two_stage is True it would be returned, otherwise \ + `None` would be returned. + enc_outputs_coord (Tensor): The proposal generate from the \ + encode feature map, has shape (N, h*w, 4). Only when \ + as_two_stage is True it would be returned, otherwise \ + `None` would be returned. + """ + + batch_size = mlvl_feats[0].size(0) + input_img_h, input_img_w = img_metas[0]['batch_input_shape'] + img_masks = mlvl_feats[0].new_ones( + (batch_size, input_img_h, input_img_w)) + for img_id in range(batch_size): + img_h, img_w, _ = img_metas[img_id]['img_shape'] + img_masks[img_id, :img_h, :img_w] = 0 + + mlvl_masks = [] + mlvl_positional_encodings = [] + for feat in mlvl_feats: + mlvl_masks.append( + F.interpolate(img_masks[None], + size=feat.shape[-2:]).to(torch.bool).squeeze(0)) + mlvl_positional_encodings.append( + self.positional_encoding(mlvl_masks[-1])) + + query_embeds = None + if not self.as_two_stage: + query_embeds = self.query_embedding.weight + hs, init_reference, inter_references, \ + enc_outputs_class, enc_outputs_coord = self.transformer( + mlvl_feats, + mlvl_masks, + query_embeds, + mlvl_positional_encodings, + reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 + cls_branches=self.cls_branches if self.as_two_stage else None # noqa:E501 + ) + hs = hs.permute(0, 2, 1, 3) + outputs_classes = [] + outputs_coords = [] + + for lvl in range(hs.shape[0]): + if lvl == 0: + reference = init_reference + else: + reference = inter_references[lvl - 1] + reference = inverse_sigmoid(reference) + outputs_class = self.cls_branches[lvl](hs[lvl]) + tmp = self.reg_branches[lvl](hs[lvl]) + if reference.shape[-1] == 4: + tmp += reference + else: + assert reference.shape[-1] == 2 + tmp[..., :2] += reference + outputs_coord = tmp.sigmoid() + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + + outputs_classes = torch.stack(outputs_classes) + outputs_coords = torch.stack(outputs_coords) + if self.as_two_stage: + return outputs_classes, outputs_coords, \ + enc_outputs_class, \ + enc_outputs_coord.sigmoid() + else: + return outputs_classes, outputs_coords, \ + None, None + + @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) + def loss(self, + all_cls_scores, + all_bbox_preds, + enc_cls_scores, + enc_bbox_preds, + gt_bboxes_list, + gt_labels_list, + img_metas, + gt_bboxes_ignore=None): + """"Loss function. + + Args: + all_cls_scores (Tensor): Classification score of all + decoder layers, has shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds (Tensor): Sigmoid regression + outputs of all decode layers. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + enc_cls_scores (Tensor): Classification scores of + points on encode feature map , has shape + (N, h*w, num_classes). Only be passed when as_two_stage is + True, otherwise is None. + enc_bbox_preds (Tensor): Regression results of each points + on the encode feature map, has shape (N, h*w, 4). Only be + passed when as_two_stage is True, otherwise is None. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + img_metas (list[dict]): List of image meta information. + gt_bboxes_ignore (list[Tensor], optional): Bounding boxes + which can be ignored for each image. Default None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert gt_bboxes_ignore is None, \ + f'{self.__class__.__name__} only supports ' \ + f'for gt_bboxes_ignore setting to None.' + + num_dec_layers = len(all_cls_scores) + all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] + all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] + all_gt_bboxes_ignore_list = [ + gt_bboxes_ignore for _ in range(num_dec_layers) + ] + img_metas_list = [img_metas for _ in range(num_dec_layers)] + + losses_cls, losses_bbox, losses_iou = multi_apply( + self.loss_single, all_cls_scores, all_bbox_preds, + all_gt_bboxes_list, all_gt_labels_list, img_metas_list, + all_gt_bboxes_ignore_list) + + loss_dict = dict() + # loss of proposal generated from encode feature map. + if enc_cls_scores is not None: + binary_labels_list = [ + torch.zeros_like(gt_labels_list[i]) + for i in range(len(img_metas)) + ] + enc_loss_cls, enc_losses_bbox, enc_losses_iou = \ + self.loss_single(enc_cls_scores, enc_bbox_preds, + gt_bboxes_list, binary_labels_list, + img_metas, gt_bboxes_ignore) + loss_dict['enc_loss_cls'] = enc_loss_cls + loss_dict['enc_loss_bbox'] = enc_losses_bbox + loss_dict['enc_loss_iou'] = enc_losses_iou + + # loss from the last decoder layer + loss_dict['loss_cls'] = losses_cls[-1] + loss_dict['loss_bbox'] = losses_bbox[-1] + loss_dict['loss_iou'] = losses_iou[-1] + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], + losses_bbox[:-1], + losses_iou[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i + loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i + num_dec_layer += 1 + return loss_dict + + @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) + def get_bboxes(self, + all_cls_scores, + all_bbox_preds, + enc_cls_scores, + enc_bbox_preds, + img_metas, + rescale=False): + """Transform network outputs for a batch into bbox predictions. + + Args: + all_cls_scores (Tensor): Classification score of all + decoder layers, has shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds (Tensor): Sigmoid regression + outputs of all decode layers. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + enc_cls_scores (Tensor): Classification scores of + points on encode feature map , has shape + (N, h*w, num_classes). Only be passed when as_two_stage is + True, otherwise is None. + enc_bbox_preds (Tensor): Regression results of each points + on the encode feature map, has shape (N, h*w, 4). Only be + passed when as_two_stage is True, otherwise is None. + img_metas (list[dict]): Meta information of each image. + rescale (bool, optional): If True, return boxes in original + image space. Default False. + + Returns: + list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \ + The first item is an (n, 5) tensor, where the first 4 columns \ + are bounding box positions (tl_x, tl_y, br_x, br_y) and the \ + 5-th column is a score between 0 and 1. The second item is a \ + (n,) tensor where each item is the predicted class label of \ + the corresponding box. + """ + cls_scores = all_cls_scores[-1] + bbox_preds = all_bbox_preds[-1] + + result_list = [] + for img_id in range(len(img_metas)): + cls_score = cls_scores[img_id] + bbox_pred = bbox_preds[img_id] + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + proposals = self._get_bboxes_single(cls_score, bbox_pred, + img_shape, scale_factor, + rescale) + result_list.append(proposals) + return result_list diff --git a/downstream/mmdetection/mmdet/models/dense_heads/dense_test_mixins.py b/downstream/mmdetection/mmdet/models/dense_heads/dense_test_mixins.py new file mode 100644 index 0000000..3421548 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/dense_test_mixins.py @@ -0,0 +1,206 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import sys +from inspect import signature + +import torch +from mmcv.ops import batched_nms + +from mmdet.core import bbox_mapping_back, merge_aug_proposals + +if sys.version_info >= (3, 7): + from mmdet.utils.contextmanagers import completed + + +class BBoxTestMixin(object): + """Mixin class for testing det bboxes via DenseHead.""" + + def simple_test_bboxes(self, feats, img_metas, rescale=False): + """Test det bboxes without test-time augmentation, can be applied in + DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, + etc. + + Args: + feats (tuple[torch.Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + img_metas (list[dict]): List of image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is ``bboxes`` with shape (n, 5), + where 5 represent (tl_x, tl_y, br_x, br_y, score). + The shape of the second tensor in the tuple is ``labels`` + with shape (n,) + """ + outs = self.forward(feats) + results_list = self.get_bboxes( + *outs, img_metas=img_metas, rescale=rescale) + return results_list + + def aug_test_bboxes(self, feats, img_metas, rescale=False): + """Test det bboxes with test time augmentation, can be applied in + DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, + etc. + + Args: + feats (list[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains features for all images in the batch. + img_metas (list[list[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. each dict has image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is ``bboxes`` with shape (n, 5), + where 5 represent (tl_x, tl_y, br_x, br_y, score). + The shape of the second tensor in the tuple is ``labels`` + with shape (n,). The length of list should always be 1. + """ + # check with_nms argument + gb_sig = signature(self.get_bboxes) + gb_args = [p.name for p in gb_sig.parameters.values()] + gbs_sig = signature(self._get_bboxes_single) + gbs_args = [p.name for p in gbs_sig.parameters.values()] + assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ + f'{self.__class__.__name__}' \ + ' does not support test-time augmentation' + + aug_bboxes = [] + aug_scores = [] + aug_labels = [] + for x, img_meta in zip(feats, img_metas): + # only one image in the batch + outs = self.forward(x) + bbox_outputs = self.get_bboxes( + *outs, + img_metas=img_meta, + cfg=self.test_cfg, + rescale=False, + with_nms=False)[0] + aug_bboxes.append(bbox_outputs[0]) + aug_scores.append(bbox_outputs[1]) + if len(bbox_outputs) >= 3: + aug_labels.append(bbox_outputs[2]) + + # after merging, bboxes will be rescaled to the original image size + merged_bboxes, merged_scores = self.merge_aug_bboxes( + aug_bboxes, aug_scores, img_metas) + merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None + + if merged_bboxes.numel() == 0: + det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1) + return [ + (det_bboxes, merged_labels), + ] + + det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores, + merged_labels, self.test_cfg.nms) + det_bboxes = det_bboxes[:self.test_cfg.max_per_img] + det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img] + + if rescale: + _det_bboxes = det_bboxes + else: + _det_bboxes = det_bboxes.clone() + _det_bboxes[:, :4] *= det_bboxes.new_tensor( + img_metas[0][0]['scale_factor']) + + return [ + (_det_bboxes, det_labels), + ] + + def simple_test_rpn(self, x, img_metas): + """Test without augmentation, only for ``RPNHead`` and its variants, + e.g., ``GARPNHead``, etc. + + Args: + x (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + img_metas (list[dict]): Meta info of each image. + + Returns: + list[Tensor]: Proposals of each image, each item has shape (n, 5), + where 5 represent (tl_x, tl_y, br_x, br_y, score). + """ + rpn_outs = self(x) + proposal_list = self.get_bboxes(*rpn_outs, img_metas=img_metas) + return proposal_list + + def aug_test_rpn(self, feats, img_metas): + """Test with augmentation for only for ``RPNHead`` and its variants, + e.g., ``GARPNHead``, etc. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + img_metas (list[dict]): Meta info of each image. + + Returns: + list[Tensor]: Proposals of each image, each item has shape (n, 5), + where 5 represent (tl_x, tl_y, br_x, br_y, score). + """ + samples_per_gpu = len(img_metas[0]) + aug_proposals = [[] for _ in range(samples_per_gpu)] + for x, img_meta in zip(feats, img_metas): + proposal_list = self.simple_test_rpn(x, img_meta) + for i, proposals in enumerate(proposal_list): + aug_proposals[i].append(proposals) + # reorganize the order of 'img_metas' to match the dimensions + # of 'aug_proposals' + aug_img_metas = [] + for i in range(samples_per_gpu): + aug_img_meta = [] + for j in range(len(img_metas)): + aug_img_meta.append(img_metas[j][i]) + aug_img_metas.append(aug_img_meta) + # after merging, proposals will be rescaled to the original image size + merged_proposals = [ + merge_aug_proposals(proposals, aug_img_meta, self.test_cfg) + for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas) + ] + return merged_proposals + + if sys.version_info >= (3, 7): + + async def async_simple_test_rpn(self, x, img_metas): + sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025) + async with completed( + __name__, 'rpn_head_forward', + sleep_interval=sleep_interval): + rpn_outs = self(x) + + proposal_list = self.get_bboxes(*rpn_outs, img_metas=img_metas) + return proposal_list + + def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas): + """Merge augmented detection bboxes and scores. + + Args: + aug_bboxes (list[Tensor]): shape (n, 4*#class) + aug_scores (list[Tensor] or None): shape (n, #class) + img_shapes (list[Tensor]): shape (3, ). + + Returns: + tuple[Tensor]: ``bboxes`` with shape (n,4), where + 4 represent (tl_x, tl_y, br_x, br_y) + and ``scores`` with shape (n,). + """ + recovered_bboxes = [] + for bboxes, img_info in zip(aug_bboxes, img_metas): + img_shape = img_info[0]['img_shape'] + scale_factor = img_info[0]['scale_factor'] + flip = img_info[0]['flip'] + flip_direction = img_info[0]['flip_direction'] + bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, + flip_direction) + recovered_bboxes.append(bboxes) + bboxes = torch.cat(recovered_bboxes, dim=0) + if aug_scores is None: + return bboxes + else: + scores = torch.cat(aug_scores, dim=0) + return bboxes, scores diff --git a/downstream/mmdetection/mmdet/models/dense_heads/detr_head.py b/downstream/mmdetection/mmdet/models/dense_heads/detr_head.py new file mode 100644 index 0000000..de1913c --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/detr_head.py @@ -0,0 +1,844 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import Conv2d, Linear, build_activation_layer +from mmcv.cnn.bricks.transformer import FFN, build_positional_encoding +from mmcv.runner import force_fp32 + +from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh, + build_assigner, build_sampler, multi_apply, + reduce_mean) +from mmdet.models.utils import build_transformer +from ..builder import HEADS, build_loss +from .anchor_free_head import AnchorFreeHead + + +@HEADS.register_module() +class DETRHead(AnchorFreeHead): + """Implements the DETR transformer head. + + See `paper: End-to-End Object Detection with Transformers + `_ for details. + + Args: + num_classes (int): Number of categories excluding the background. + in_channels (int): Number of channels in the input feature map. + num_query (int): Number of query in Transformer. + num_reg_fcs (int, optional): Number of fully-connected layers used in + `FFN`, which is then used for the regression head. Default 2. + transformer (obj:`mmcv.ConfigDict`|dict): Config for transformer. + Default: None. + sync_cls_avg_factor (bool): Whether to sync the avg_factor of + all ranks. Default to False. + positional_encoding (obj:`mmcv.ConfigDict`|dict): + Config for position encoding. + loss_cls (obj:`mmcv.ConfigDict`|dict): Config of the + classification loss. Default `CrossEntropyLoss`. + loss_bbox (obj:`mmcv.ConfigDict`|dict): Config of the + regression loss. Default `L1Loss`. + loss_iou (obj:`mmcv.ConfigDict`|dict): Config of the + regression iou loss. Default `GIoULoss`. + tran_cfg (obj:`mmcv.ConfigDict`|dict): Training config of + transformer head. + test_cfg (obj:`mmcv.ConfigDict`|dict): Testing config of + transformer head. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + _version = 2 + + def __init__(self, + num_classes, + in_channels, + num_query=100, + num_reg_fcs=2, + transformer=None, + sync_cls_avg_factor=False, + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=128, + normalize=True), + loss_cls=dict( + type='CrossEntropyLoss', + bg_cls_weight=0.1, + use_sigmoid=False, + loss_weight=1.0, + class_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=5.0), + iou_cost=dict( + type='IoUCost', iou_mode='giou', weight=2.0))), + test_cfg=dict(max_per_img=100), + init_cfg=None, + **kwargs): + # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, + # since it brings inconvenience when the initialization of + # `AnchorFreeHead` is called. + super(AnchorFreeHead, self).__init__(init_cfg) + self.bg_cls_weight = 0 + self.sync_cls_avg_factor = sync_cls_avg_factor + class_weight = loss_cls.get('class_weight', None) + if class_weight is not None and (self.__class__ is DETRHead): + assert isinstance(class_weight, float), 'Expected ' \ + 'class_weight to have type float. Found ' \ + f'{type(class_weight)}.' + # NOTE following the official DETR rep0, bg_cls_weight means + # relative classification weight of the no-object class. + bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight) + assert isinstance(bg_cls_weight, float), 'Expected ' \ + 'bg_cls_weight to have type float. Found ' \ + f'{type(bg_cls_weight)}.' + class_weight = torch.ones(num_classes + 1) * class_weight + # set background class as the last indice + class_weight[num_classes] = bg_cls_weight + loss_cls.update({'class_weight': class_weight}) + if 'bg_cls_weight' in loss_cls: + loss_cls.pop('bg_cls_weight') + self.bg_cls_weight = bg_cls_weight + + if train_cfg: + assert 'assigner' in train_cfg, 'assigner should be provided '\ + 'when train_cfg is set.' + assigner = train_cfg['assigner'] + assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \ + 'The classification weight for loss and matcher should be' \ + 'exactly the same.' + assert loss_bbox['loss_weight'] == assigner['reg_cost'][ + 'weight'], 'The regression L1 weight for loss and matcher ' \ + 'should be exactly the same.' + assert loss_iou['loss_weight'] == assigner['iou_cost']['weight'], \ + 'The regression iou weight for loss and matcher should be' \ + 'exactly the same.' + self.assigner = build_assigner(assigner) + # DETR sampling=False, so use PseudoSampler + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + self.num_query = num_query + self.num_classes = num_classes + self.in_channels = in_channels + self.num_reg_fcs = num_reg_fcs + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.fp16_enabled = False + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + self.loss_iou = build_loss(loss_iou) + + if self.loss_cls.use_sigmoid: + self.cls_out_channels = num_classes + else: + self.cls_out_channels = num_classes + 1 + self.act_cfg = transformer.get('act_cfg', + dict(type='ReLU', inplace=True)) + self.activate = build_activation_layer(self.act_cfg) + self.positional_encoding = build_positional_encoding( + positional_encoding) + self.transformer = build_transformer(transformer) + self.embed_dims = self.transformer.embed_dims + assert 'num_feats' in positional_encoding + num_feats = positional_encoding['num_feats'] + assert num_feats * 2 == self.embed_dims, 'embed_dims should' \ + f' be exactly 2 times of num_feats. Found {self.embed_dims}' \ + f' and {num_feats}.' + self._init_layers() + + def _init_layers(self): + """Initialize layers of the transformer head.""" + self.input_proj = Conv2d( + self.in_channels, self.embed_dims, kernel_size=1) + self.fc_cls = Linear(self.embed_dims, self.cls_out_channels) + self.reg_ffn = FFN( + self.embed_dims, + self.embed_dims, + self.num_reg_fcs, + self.act_cfg, + dropout=0.0, + add_residual=False) + self.fc_reg = Linear(self.embed_dims, 4) + self.query_embedding = nn.Embedding(self.num_query, self.embed_dims) + + def init_weights(self): + """Initialize weights of the transformer head.""" + # The initialization for transformer is important + self.transformer.init_weights() + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + """load checkpoints.""" + # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, + # since `AnchorFreeHead._load_from_state_dict` should not be + # called here. Invoking the default `Module._load_from_state_dict` + # is enough. + + # Names of some parameters in has been changed. + version = local_metadata.get('version', None) + if (version is None or version < 2) and self.__class__ is DETRHead: + convert_dict = { + '.self_attn.': '.attentions.0.', + '.ffn.': '.ffns.0.', + '.multihead_attn.': '.attentions.1.', + '.decoder.norm.': '.decoder.post_norm.' + } + state_dict_keys = list(state_dict.keys()) + for k in state_dict_keys: + for ori_key, convert_key in convert_dict.items(): + if ori_key in k: + convert_key = k.replace(ori_key, convert_key) + state_dict[convert_key] = state_dict[k] + del state_dict[k] + + super(AnchorFreeHead, + self)._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, + unexpected_keys, error_msgs) + + def forward(self, feats, img_metas): + """Forward function. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + img_metas (list[dict]): List of image information. + + Returns: + tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. + + - all_cls_scores_list (list[Tensor]): Classification scores \ + for each scale level. Each is a 4D-tensor with shape \ + [nb_dec, bs, num_query, cls_out_channels]. Note \ + `cls_out_channels` should includes background. + - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ + outputs for each scale level. Each is a 4D-tensor with \ + normalized coordinate format (cx, cy, w, h) and shape \ + [nb_dec, bs, num_query, 4]. + """ + num_levels = len(feats) + img_metas_list = [img_metas for _ in range(num_levels)] + return multi_apply(self.forward_single, feats, img_metas_list) + + def forward_single(self, x, img_metas): + """"Forward function for a single feature level. + + Args: + x (Tensor): Input feature from backbone's single stage, shape + [bs, c, h, w]. + img_metas (list[dict]): List of image information. + + Returns: + all_cls_scores (Tensor): Outputs from the classification head, + shape [nb_dec, bs, num_query, cls_out_channels]. Note + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression + head with normalized coordinate format (cx, cy, w, h). + Shape [nb_dec, bs, num_query, 4]. + """ + # construct binary masks which used for the transformer. + # NOTE following the official DETR repo, non-zero values representing + # ignored positions, while zero values means valid positions. + batch_size = x.size(0) + input_img_h, input_img_w = img_metas[0]['batch_input_shape'] + masks = x.new_ones((batch_size, input_img_h, input_img_w)) + for img_id in range(batch_size): + img_h, img_w, _ = img_metas[img_id]['img_shape'] + masks[img_id, :img_h, :img_w] = 0 + + x = self.input_proj(x) + # interpolate masks to have the same spatial shape with x + masks = F.interpolate( + masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) + # position encoding + pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w] + # outs_dec: [nb_dec, bs, num_query, embed_dim] + outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, + pos_embed) + + all_cls_scores = self.fc_cls(outs_dec) + all_bbox_preds = self.fc_reg(self.activate( + self.reg_ffn(outs_dec))).sigmoid() + return all_cls_scores, all_bbox_preds + + @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) + def loss(self, + all_cls_scores_list, + all_bbox_preds_list, + gt_bboxes_list, + gt_labels_list, + img_metas, + gt_bboxes_ignore=None): + """"Loss function. + + Only outputs from the last feature level are used for computing + losses by default. + + Args: + all_cls_scores_list (list[Tensor]): Classification outputs + for each feature level. Each is a 4D-tensor with shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds_list (list[Tensor]): Sigmoid regression + outputs for each feature level. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + img_metas (list[dict]): List of image meta information. + gt_bboxes_ignore (list[Tensor], optional): Bounding boxes + which can be ignored for each image. Default None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + # NOTE defaultly only the outputs from the last feature scale is used. + all_cls_scores = all_cls_scores_list[-1] + all_bbox_preds = all_bbox_preds_list[-1] + assert gt_bboxes_ignore is None, \ + 'Only supports for gt_bboxes_ignore setting to None.' + + num_dec_layers = len(all_cls_scores) + all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] + all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] + all_gt_bboxes_ignore_list = [ + gt_bboxes_ignore for _ in range(num_dec_layers) + ] + img_metas_list = [img_metas for _ in range(num_dec_layers)] + + losses_cls, losses_bbox, losses_iou = multi_apply( + self.loss_single, all_cls_scores, all_bbox_preds, + all_gt_bboxes_list, all_gt_labels_list, img_metas_list, + all_gt_bboxes_ignore_list) + + loss_dict = dict() + # loss from the last decoder layer + loss_dict['loss_cls'] = losses_cls[-1] + loss_dict['loss_bbox'] = losses_bbox[-1] + loss_dict['loss_iou'] = losses_iou[-1] + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], + losses_bbox[:-1], + losses_iou[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i + loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i + num_dec_layer += 1 + return loss_dict + + def loss_single(self, + cls_scores, + bbox_preds, + gt_bboxes_list, + gt_labels_list, + img_metas, + gt_bboxes_ignore_list=None): + """"Loss function for outputs from a single decoder layer of a single + feature level. + + Args: + cls_scores (Tensor): Box score logits from a single decoder layer + for all images. Shape [bs, num_query, cls_out_channels]. + bbox_preds (Tensor): Sigmoid outputs from a single decoder layer + for all images, with normalized coordinate (cx, cy, w, h) and + shape [bs, num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + img_metas (list[dict]): List of image meta information. + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + + Returns: + dict[str, Tensor]: A dictionary of loss components for outputs from + a single decoder layer. + """ + num_imgs = cls_scores.size(0) + cls_scores_list = [cls_scores[i] for i in range(num_imgs)] + bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] + cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, + gt_bboxes_list, gt_labels_list, + img_metas, gt_bboxes_ignore_list) + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + labels = torch.cat(labels_list, 0) + label_weights = torch.cat(label_weights_list, 0) + bbox_targets = torch.cat(bbox_targets_list, 0) + bbox_weights = torch.cat(bbox_weights_list, 0) + + # classification loss + cls_scores = cls_scores.reshape(-1, self.cls_out_channels) + # construct weighted avg_factor to match with the official DETR repo + cls_avg_factor = num_total_pos * 1.0 + \ + num_total_neg * self.bg_cls_weight + if self.sync_cls_avg_factor: + cls_avg_factor = reduce_mean( + cls_scores.new_tensor([cls_avg_factor])) + cls_avg_factor = max(cls_avg_factor, 1) + + loss_cls = self.loss_cls( + cls_scores, labels, label_weights, avg_factor=cls_avg_factor) + + # Compute the average number of gt boxes across all gpus, for + # normalization purposes + num_total_pos = loss_cls.new_tensor([num_total_pos]) + num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() + + # construct factors used for rescale bboxes + factors = [] + for img_meta, bbox_pred in zip(img_metas, bbox_preds): + img_h, img_w, _ = img_meta['img_shape'] + factor = bbox_pred.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0).repeat( + bbox_pred.size(0), 1) + factors.append(factor) + factors = torch.cat(factors, 0) + + # DETR regress the relative position of boxes (cxcywh) in the image, + # thus the learning target is normalized by the image size. So here + # we need to re-scale them for calculating IoU loss + bbox_preds = bbox_preds.reshape(-1, 4) + bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors + bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors + + # regression IoU loss, defaultly GIoU loss + loss_iou = self.loss_iou( + bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) + + # regression L1 loss + loss_bbox = self.loss_bbox( + bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) + return loss_cls, loss_bbox, loss_iou + + def get_targets(self, + cls_scores_list, + bbox_preds_list, + gt_bboxes_list, + gt_labels_list, + img_metas, + gt_bboxes_ignore_list=None): + """"Compute regression and classification targets for a batch image. + + Outputs from a single decoder layer of a single feature level are used. + + Args: + cls_scores_list (list[Tensor]): Box score logits from a single + decoder layer for each image with shape [num_query, + cls_out_channels]. + bbox_preds_list (list[Tensor]): Sigmoid outputs from a single + decoder layer for each image, with normalized coordinate + (cx, cy, w, h) and shape [num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + img_metas (list[dict]): List of image meta information. + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + + Returns: + tuple: a tuple containing the following targets. + + - labels_list (list[Tensor]): Labels for all images. + - label_weights_list (list[Tensor]): Label weights for all \ + images. + - bbox_targets_list (list[Tensor]): BBox targets for all \ + images. + - bbox_weights_list (list[Tensor]): BBox weights for all \ + images. + - num_total_pos (int): Number of positive samples in all \ + images. + - num_total_neg (int): Number of negative samples in all \ + images. + """ + assert gt_bboxes_ignore_list is None, \ + 'Only supports for gt_bboxes_ignore setting to None.' + num_imgs = len(cls_scores_list) + gt_bboxes_ignore_list = [ + gt_bboxes_ignore_list for _ in range(num_imgs) + ] + + (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( + self._get_target_single, cls_scores_list, bbox_preds_list, + gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list) + num_total_pos = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg = sum((inds.numel() for inds in neg_inds_list)) + return (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) + + def _get_target_single(self, + cls_score, + bbox_pred, + gt_bboxes, + gt_labels, + img_meta, + gt_bboxes_ignore=None): + """"Compute regression and classification targets for one image. + + Outputs from a single decoder layer of a single feature level are used. + + Args: + cls_score (Tensor): Box score logits from a single decoder layer + for one image. Shape [num_query, cls_out_channels]. + bbox_pred (Tensor): Sigmoid outputs from a single decoder layer + for one image, with normalized coordinate (cx, cy, w, h) and + shape [num_query, 4]. + gt_bboxes (Tensor): Ground truth bboxes for one image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (Tensor): Ground truth class indices for one image + with shape (num_gts, ). + img_meta (dict): Meta information for one image. + gt_bboxes_ignore (Tensor, optional): Bounding boxes + which can be ignored. Default None. + + Returns: + tuple[Tensor]: a tuple containing the following for one image. + + - labels (Tensor): Labels of each image. + - label_weights (Tensor]): Label weights of each image. + - bbox_targets (Tensor): BBox targets of each image. + - bbox_weights (Tensor): BBox weights of each image. + - pos_inds (Tensor): Sampled positive indices for each image. + - neg_inds (Tensor): Sampled negative indices for each image. + """ + + num_bboxes = bbox_pred.size(0) + # assigner and sampler + assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes, + gt_labels, img_meta, + gt_bboxes_ignore) + sampling_result = self.sampler.sample(assign_result, bbox_pred, + gt_bboxes) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + + # label targets + labels = gt_bboxes.new_full((num_bboxes, ), + self.num_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_bboxes.new_ones(num_bboxes) + + # bbox targets + bbox_targets = torch.zeros_like(bbox_pred) + bbox_weights = torch.zeros_like(bbox_pred) + bbox_weights[pos_inds] = 1.0 + img_h, img_w, _ = img_meta['img_shape'] + + # DETR regress the relative position of boxes (cxcywh) in the image. + # Thus the learning target should be normalized by the image size, also + # the box format should be converted from defaultly x1y1x2y2 to cxcywh. + factor = bbox_pred.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0) + pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor + pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized) + bbox_targets[pos_inds] = pos_gt_bboxes_targets + return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, + neg_inds) + + # over-write because img_metas are needed as inputs for bbox_head. + def forward_train(self, + x, + img_metas, + gt_bboxes, + gt_labels=None, + gt_bboxes_ignore=None, + proposal_cfg=None, + **kwargs): + """Forward function for training mode. + + Args: + x (list[Tensor]): Features from backbone. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + proposal_cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert proposal_cfg is None, '"proposal_cfg" must be None' + outs = self(x, img_metas) + if gt_labels is None: + loss_inputs = outs + (gt_bboxes, img_metas) + else: + loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) + losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + return losses + + @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) + def get_bboxes(self, + all_cls_scores_list, + all_bbox_preds_list, + img_metas, + rescale=False): + """Transform network outputs for a batch into bbox predictions. + + Args: + all_cls_scores_list (list[Tensor]): Classification outputs + for each feature level. Each is a 4D-tensor with shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds_list (list[Tensor]): Sigmoid regression + outputs for each feature level. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + img_metas (list[dict]): Meta information of each image. + rescale (bool, optional): If True, return boxes in original + image space. Default False. + + Returns: + list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \ + The first item is an (n, 5) tensor, where the first 4 columns \ + are bounding box positions (tl_x, tl_y, br_x, br_y) and the \ + 5-th column is a score between 0 and 1. The second item is a \ + (n,) tensor where each item is the predicted class label of \ + the corresponding box. + """ + # NOTE defaultly only using outputs from the last feature level, + # and only the outputs from the last decoder layer is used. + cls_scores = all_cls_scores_list[-1][-1] + bbox_preds = all_bbox_preds_list[-1][-1] + + result_list = [] + for img_id in range(len(img_metas)): + cls_score = cls_scores[img_id] + bbox_pred = bbox_preds[img_id] + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + proposals = self._get_bboxes_single(cls_score, bbox_pred, + img_shape, scale_factor, + rescale) + result_list.append(proposals) + + return result_list + + def _get_bboxes_single(self, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=False): + """Transform outputs from the last decoder layer into bbox predictions + for each image. + + Args: + cls_score (Tensor): Box score logits from the last decoder layer + for each image. Shape [num_query, cls_out_channels]. + bbox_pred (Tensor): Sigmoid outputs from the last decoder layer + for each image, with coordinate format (cx, cy, w, h) and + shape [num_query, 4]. + img_shape (tuple[int]): Shape of input image, (height, width, 3). + scale_factor (ndarray, optional): Scale factor of the image arange + as (w_scale, h_scale, w_scale, h_scale). + rescale (bool, optional): If True, return boxes in original image + space. Default False. + + Returns: + tuple[Tensor]: Results of detected bboxes and labels. + + - det_bboxes: Predicted bboxes with shape [num_query, 5], \ + where the first 4 columns are bounding box positions \ + (tl_x, tl_y, br_x, br_y) and the 5-th column are scores \ + between 0 and 1. + - det_labels: Predicted labels of the corresponding box with \ + shape [num_query]. + """ + assert len(cls_score) == len(bbox_pred) + max_per_img = self.test_cfg.get('max_per_img', self.num_query) + # exclude background + if self.loss_cls.use_sigmoid: + cls_score = cls_score.sigmoid() + scores, indexes = cls_score.view(-1).topk(max_per_img) + det_labels = indexes % self.num_classes + bbox_index = indexes // self.num_classes + bbox_pred = bbox_pred[bbox_index] + else: + scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1) + scores, bbox_index = scores.topk(max_per_img) + bbox_pred = bbox_pred[bbox_index] + det_labels = det_labels[bbox_index] + + det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred) + det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1] + det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0] + det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1]) + det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0]) + if rescale: + det_bboxes /= det_bboxes.new_tensor(scale_factor) + det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1) + + return det_bboxes, det_labels + + def simple_test_bboxes(self, feats, img_metas, rescale=False): + """Test det bboxes without test-time augmentation. + + Args: + feats (tuple[torch.Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + img_metas (list[dict]): List of image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is ``bboxes`` with shape (n, 5), + where 5 represent (tl_x, tl_y, br_x, br_y, score). + The shape of the second tensor in the tuple is ``labels`` + with shape (n,) + """ + # forward of this head requires img_metas + outs = self.forward(feats, img_metas) + results_list = self.get_bboxes(*outs, img_metas, rescale=rescale) + return results_list + + def forward_onnx(self, feats, img_metas): + """Forward function for exporting to ONNX. + + Over-write `forward` because: `masks` is directly created with + zero (valid position tag) and has the same spatial size as `x`. + Thus the construction of `masks` is different from that in `forward`. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + img_metas (list[dict]): List of image information. + + Returns: + tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. + + - all_cls_scores_list (list[Tensor]): Classification scores \ + for each scale level. Each is a 4D-tensor with shape \ + [nb_dec, bs, num_query, cls_out_channels]. Note \ + `cls_out_channels` should includes background. + - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ + outputs for each scale level. Each is a 4D-tensor with \ + normalized coordinate format (cx, cy, w, h) and shape \ + [nb_dec, bs, num_query, 4]. + """ + num_levels = len(feats) + img_metas_list = [img_metas for _ in range(num_levels)] + return multi_apply(self.forward_single_onnx, feats, img_metas_list) + + def forward_single_onnx(self, x, img_metas): + """"Forward function for a single feature level with ONNX exportation. + + Args: + x (Tensor): Input feature from backbone's single stage, shape + [bs, c, h, w]. + img_metas (list[dict]): List of image information. + + Returns: + all_cls_scores (Tensor): Outputs from the classification head, + shape [nb_dec, bs, num_query, cls_out_channels]. Note + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression + head with normalized coordinate format (cx, cy, w, h). + Shape [nb_dec, bs, num_query, 4]. + """ + # Note `img_shape` is not dynamically traceable to ONNX, + # since the related augmentation was done with numpy under + # CPU. Thus `masks` is directly created with zeros (valid tag) + # and the same spatial shape as `x`. + # The difference between torch and exported ONNX model may be + # ignored, since the same performance is achieved (e.g. + # 40.1 vs 40.1 for DETR) + batch_size = x.size(0) + h, w = x.size()[-2:] + masks = x.new_zeros((batch_size, h, w)) # [B,h,w] + + x = self.input_proj(x) + # interpolate masks to have the same spatial shape with x + masks = F.interpolate( + masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) + pos_embed = self.positional_encoding(masks) + outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, + pos_embed) + + all_cls_scores = self.fc_cls(outs_dec) + all_bbox_preds = self.fc_reg(self.activate( + self.reg_ffn(outs_dec))).sigmoid() + return all_cls_scores, all_bbox_preds + + def onnx_export(self, all_cls_scores_list, all_bbox_preds_list, img_metas): + """Transform network outputs into bbox predictions, with ONNX + exportation. + + Args: + all_cls_scores_list (list[Tensor]): Classification outputs + for each feature level. Each is a 4D-tensor with shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds_list (list[Tensor]): Sigmoid regression + outputs for each feature level. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + img_metas (list[dict]): Meta information of each image. + + Returns: + tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] + and class labels of shape [N, num_det]. + """ + assert len(img_metas) == 1, \ + 'Only support one input image while in exporting to ONNX' + + cls_scores = all_cls_scores_list[-1][-1] + bbox_preds = all_bbox_preds_list[-1][-1] + + # Note `img_shape` is not dynamically traceable to ONNX, + # here `img_shape_for_onnx` (padded shape of image tensor) + # is used. + img_shape = img_metas[0]['img_shape_for_onnx'] + max_per_img = self.test_cfg.get('max_per_img', self.num_query) + batch_size = cls_scores.size(0) + # `batch_index_offset` is used for the gather of concatenated tensor + batch_index_offset = torch.arange(batch_size).to( + cls_scores.device) * max_per_img + batch_index_offset = batch_index_offset.unsqueeze(1).expand( + batch_size, max_per_img) + + # supports dynamical batch inference + if self.loss_cls.use_sigmoid: + cls_scores = cls_scores.sigmoid() + scores, indexes = cls_scores.view(batch_size, -1).topk( + max_per_img, dim=1) + det_labels = indexes % self.num_classes + bbox_index = indexes // self.num_classes + bbox_index = (bbox_index + batch_index_offset).view(-1) + bbox_preds = bbox_preds.view(-1, 4)[bbox_index] + bbox_preds = bbox_preds.view(batch_size, -1, 4) + else: + scores, det_labels = F.softmax( + cls_scores, dim=-1)[..., :-1].max(-1) + scores, bbox_index = scores.topk(max_per_img, dim=1) + bbox_index = (bbox_index + batch_index_offset).view(-1) + bbox_preds = bbox_preds.view(-1, 4)[bbox_index] + det_labels = det_labels.view(-1)[bbox_index] + bbox_preds = bbox_preds.view(batch_size, -1, 4) + det_labels = det_labels.view(batch_size, -1) + + det_bboxes = bbox_cxcywh_to_xyxy(bbox_preds) + # use `img_shape_tensor` for dynamically exporting to ONNX + img_shape_tensor = img_shape.flip(0).repeat(2) # [w,h,w,h] + img_shape_tensor = img_shape_tensor.unsqueeze(0).unsqueeze(0).expand( + batch_size, det_bboxes.size(1), 4) + det_bboxes = det_bboxes * img_shape_tensor + # dynamically clip bboxes + x1, y1, x2, y2 = det_bboxes.split((1, 1, 1, 1), dim=-1) + from mmdet.core.export import dynamic_clip_for_onnx + x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, img_shape) + det_bboxes = torch.cat([x1, y1, x2, y2], dim=-1) + det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(-1)), -1) + + return det_bboxes, det_labels diff --git a/downstream/mmdetection/mmdet/models/dense_heads/embedding_rpn_head.py b/downstream/mmdetection/mmdet/models/dense_heads/embedding_rpn_head.py new file mode 100644 index 0000000..22060b9 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/embedding_rpn_head.py @@ -0,0 +1,116 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.runner import BaseModule + +from mmdet.models.builder import HEADS +from ...core import bbox_cxcywh_to_xyxy + + +@HEADS.register_module() +class EmbeddingRPNHead(BaseModule): + """RPNHead in the `Sparse R-CNN `_ . + + Unlike traditional RPNHead, this module does not need FPN input, but just + decode `init_proposal_bboxes` and expand the first dimension of + `init_proposal_bboxes` and `init_proposal_features` to the batch_size. + + Args: + num_proposals (int): Number of init_proposals. Default 100. + proposal_feature_channel (int): Channel number of + init_proposal_feature. Defaults to 256. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + num_proposals=100, + proposal_feature_channel=256, + init_cfg=None, + **kwargs): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super(EmbeddingRPNHead, self).__init__(init_cfg) + self.num_proposals = num_proposals + self.proposal_feature_channel = proposal_feature_channel + self._init_layers() + + def _init_layers(self): + """Initialize a sparse set of proposal boxes and proposal features.""" + self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4) + self.init_proposal_features = nn.Embedding( + self.num_proposals, self.proposal_feature_channel) + + def init_weights(self): + """Initialize the init_proposal_bboxes as normalized. + + [c_x, c_y, w, h], and we initialize it to the size of the entire + image. + """ + super(EmbeddingRPNHead, self).init_weights() + nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5) + nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1) + + def _decode_init_proposals(self, imgs, img_metas): + """Decode init_proposal_bboxes according to the size of images and + expand dimension of init_proposal_features to batch_size. + + Args: + imgs (list[Tensor]): List of FPN features. + img_metas (list[dict]): List of meta-information of + images. Need the img_shape to decode the init_proposals. + + Returns: + Tuple(Tensor): + + - proposals (Tensor): Decoded proposal bboxes, + has shape (batch_size, num_proposals, 4). + - init_proposal_features (Tensor): Expanded proposal + features, has shape + (batch_size, num_proposals, proposal_feature_channel). + - imgs_whwh (Tensor): Tensor with shape + (batch_size, 4), the dimension means + [img_width, img_height, img_width, img_height]. + """ + proposals = self.init_proposal_bboxes.weight.clone() + proposals = bbox_cxcywh_to_xyxy(proposals) + num_imgs = len(imgs[0]) + imgs_whwh = [] + for meta in img_metas: + h, w, _ = meta['img_shape'] + imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]])) + imgs_whwh = torch.cat(imgs_whwh, dim=0) + imgs_whwh = imgs_whwh[:, None, :] + + # imgs_whwh has shape (batch_size, 1, 4) + # The shape of proposals change from (num_proposals, 4) + # to (batch_size ,num_proposals, 4) + proposals = proposals * imgs_whwh + + init_proposal_features = self.init_proposal_features.weight.clone() + init_proposal_features = init_proposal_features[None].expand( + num_imgs, *init_proposal_features.size()) + return proposals, init_proposal_features, imgs_whwh + + def forward_dummy(self, img, img_metas): + """Dummy forward function. + + Used in flops calculation. + """ + return self._decode_init_proposals(img, img_metas) + + def forward_train(self, img, img_metas): + """Forward function in training stage.""" + return self._decode_init_proposals(img, img_metas) + + def simple_test_rpn(self, img, img_metas): + """Forward function in testing stage.""" + return self._decode_init_proposals(img, img_metas) + + def simple_test(self, img, img_metas): + """Forward function in testing stage.""" + raise NotImplementedError + + def aug_test_rpn(self, feats, img_metas): + raise NotImplementedError( + 'EmbeddingRPNHead does not support test-time augmentation') diff --git a/downstream/mmdetection/mmdet/models/dense_heads/fcos_head.py b/downstream/mmdetection/mmdet/models/dense_heads/fcos_head.py new file mode 100644 index 0000000..d72fb56 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/fcos_head.py @@ -0,0 +1,455 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn +from mmcv.cnn import Scale +from mmcv.runner import force_fp32 + +from mmdet.core import multi_apply, reduce_mean +from ..builder import HEADS, build_loss +from .anchor_free_head import AnchorFreeHead + +INF = 1e8 + + +@HEADS.register_module() +class FCOSHead(AnchorFreeHead): + """Anchor-free head used in `FCOS `_. + + The FCOS head does not use anchor boxes. Instead bounding boxes are + predicted at each pixel and a centerness measure is used to suppress + low-quality predictions. + Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training + tricks used in official repo, which will bring remarkable mAP gains + of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for + more detail. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + strides (list[int] | list[tuple[int, int]]): Strides of points + in multiple feature levels. Default: (4, 8, 16, 32, 64). + regress_ranges (tuple[tuple[int, int]]): Regress range of multiple + level points. + center_sampling (bool): If true, use center sampling. Default: False. + center_sample_radius (float): Radius of center sampling. Default: 1.5. + norm_on_bbox (bool): If true, normalize the regression targets + with FPN strides. Default: False. + centerness_on_reg (bool): If true, position centerness on the + regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042. + Default: False. + conv_bias (bool | str): If specified as `auto`, it will be decided by the + norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise + False. Default: "auto". + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of localization loss. + loss_centerness (dict): Config of centerness loss. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + + Example: + >>> self = FCOSHead(11, 7) + >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] + >>> cls_score, bbox_pred, centerness = self.forward(feats) + >>> assert len(cls_score) == len(self.scales) + """ # noqa: E501 + + def __init__(self, + num_classes, + in_channels, + regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), + (512, INF)), + center_sampling=False, + center_sample_radius=1.5, + norm_on_bbox=False, + centerness_on_reg=False, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', + name='conv_cls', + std=0.01, + bias_prob=0.01)), + **kwargs): + self.regress_ranges = regress_ranges + self.center_sampling = center_sampling + self.center_sample_radius = center_sample_radius + self.norm_on_bbox = norm_on_bbox + self.centerness_on_reg = centerness_on_reg + super().__init__( + num_classes, + in_channels, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + norm_cfg=norm_cfg, + init_cfg=init_cfg, + **kwargs) + self.loss_centerness = build_loss(loss_centerness) + + def _init_layers(self): + """Initialize layers of the head.""" + super()._init_layers() + self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) + self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: + cls_scores (list[Tensor]): Box scores for each scale level, \ + each is a 4D-tensor, the channel number is \ + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each \ + scale level, each is a 4D-tensor, the channel number is \ + num_points * 4. + centernesses (list[Tensor]): centerness for each scale level, \ + each is a 4D-tensor, the channel number is num_points * 1. + """ + return multi_apply(self.forward_single, feats, self.scales, + self.strides) + + def forward_single(self, x, scale, stride): + """Forward features of a single scale level. + + Args: + x (Tensor): FPN feature maps of the specified stride. + scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize + the bbox prediction. + stride (int): The corresponding stride for feature maps, only + used to normalize the bbox prediction when self.norm_on_bbox + is True. + + Returns: + tuple: scores for each class, bbox predictions and centerness \ + predictions of input feature maps. + """ + cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x) + if self.centerness_on_reg: + centerness = self.conv_centerness(reg_feat) + else: + centerness = self.conv_centerness(cls_feat) + # scale the bbox_pred of different level + # float to avoid overflow when enabling FP16 + bbox_pred = scale(bbox_pred).float() + if self.norm_on_bbox: + # bbox_pred needed for gradient computation has been modified + # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace + # F.relu(bbox_pred) with bbox_pred.clamp(min=0) + bbox_pred = bbox_pred.clamp(min=0) + if not self.training: + bbox_pred *= stride + else: + bbox_pred = bbox_pred.exp() + return cls_score, bbox_pred, centerness + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) + def loss(self, + cls_scores, + bbox_preds, + centernesses, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute loss of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * 4. + centernesses (list[Tensor]): centerness for each scale level, each + is a 4D-tensor, the channel number is num_points * 1. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert len(cls_scores) == len(bbox_preds) == len(centernesses) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + all_level_points = self.prior_generator.grid_priors( + featmap_sizes, + dtype=bbox_preds[0].dtype, + device=bbox_preds[0].device) + labels, bbox_targets = self.get_targets(all_level_points, gt_bboxes, + gt_labels) + + num_imgs = cls_scores[0].size(0) + # flatten cls_scores, bbox_preds and centerness + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) + for cls_score in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) + for bbox_pred in bbox_preds + ] + flatten_centerness = [ + centerness.permute(0, 2, 3, 1).reshape(-1) + for centerness in centernesses + ] + flatten_cls_scores = torch.cat(flatten_cls_scores) + flatten_bbox_preds = torch.cat(flatten_bbox_preds) + flatten_centerness = torch.cat(flatten_centerness) + flatten_labels = torch.cat(labels) + flatten_bbox_targets = torch.cat(bbox_targets) + # repeat points to align with bbox_preds + flatten_points = torch.cat( + [points.repeat(num_imgs, 1) for points in all_level_points]) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = self.num_classes + pos_inds = ((flatten_labels >= 0) + & (flatten_labels < bg_class_ind)).nonzero().reshape(-1) + num_pos = torch.tensor( + len(pos_inds), dtype=torch.float, device=bbox_preds[0].device) + num_pos = max(reduce_mean(num_pos), 1.0) + loss_cls = self.loss_cls( + flatten_cls_scores, flatten_labels, avg_factor=num_pos) + + pos_bbox_preds = flatten_bbox_preds[pos_inds] + pos_centerness = flatten_centerness[pos_inds] + pos_bbox_targets = flatten_bbox_targets[pos_inds] + pos_centerness_targets = self.centerness_target(pos_bbox_targets) + # centerness weighted iou loss + centerness_denorm = max( + reduce_mean(pos_centerness_targets.sum().detach()), 1e-6) + + if len(pos_inds) > 0: + pos_points = flatten_points[pos_inds] + pos_decoded_bbox_preds = self.bbox_coder.decode( + pos_points, pos_bbox_preds) + pos_decoded_target_preds = self.bbox_coder.decode( + pos_points, pos_bbox_targets) + loss_bbox = self.loss_bbox( + pos_decoded_bbox_preds, + pos_decoded_target_preds, + weight=pos_centerness_targets, + avg_factor=centerness_denorm) + loss_centerness = self.loss_centerness( + pos_centerness, pos_centerness_targets, avg_factor=num_pos) + else: + loss_bbox = pos_bbox_preds.sum() + loss_centerness = pos_centerness.sum() + + return dict( + loss_cls=loss_cls, + loss_bbox=loss_bbox, + loss_centerness=loss_centerness) + + def get_targets(self, points, gt_bboxes_list, gt_labels_list): + """Compute regression, classification and centerness targets for points + in multiple images. + + Args: + points (list[Tensor]): Points of each fpn level, each has shape + (num_points, 2). + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, + each has shape (num_gt, 4). + gt_labels_list (list[Tensor]): Ground truth labels of each box, + each has shape (num_gt,). + + Returns: + tuple: + concat_lvl_labels (list[Tensor]): Labels of each level. \ + concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ + level. + """ + assert len(points) == len(self.regress_ranges) + num_levels = len(points) + # expand regress ranges to align with points + expanded_regress_ranges = [ + points[i].new_tensor(self.regress_ranges[i])[None].expand_as( + points[i]) for i in range(num_levels) + ] + # concat all levels points and regress ranges + concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) + concat_points = torch.cat(points, dim=0) + + # the number of points per img, per lvl + num_points = [center.size(0) for center in points] + + # get labels and bbox_targets of each image + labels_list, bbox_targets_list = multi_apply( + self._get_target_single, + gt_bboxes_list, + gt_labels_list, + points=concat_points, + regress_ranges=concat_regress_ranges, + num_points_per_lvl=num_points) + + # split to per img, per level + labels_list = [labels.split(num_points, 0) for labels in labels_list] + bbox_targets_list = [ + bbox_targets.split(num_points, 0) + for bbox_targets in bbox_targets_list + ] + + # concat per level image + concat_lvl_labels = [] + concat_lvl_bbox_targets = [] + for i in range(num_levels): + concat_lvl_labels.append( + torch.cat([labels[i] for labels in labels_list])) + bbox_targets = torch.cat( + [bbox_targets[i] for bbox_targets in bbox_targets_list]) + if self.norm_on_bbox: + bbox_targets = bbox_targets / self.strides[i] + concat_lvl_bbox_targets.append(bbox_targets) + return concat_lvl_labels, concat_lvl_bbox_targets + + def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges, + num_points_per_lvl): + """Compute regression and classification targets for a single image.""" + num_points = points.size(0) + num_gts = gt_labels.size(0) + if num_gts == 0: + return gt_labels.new_full((num_points,), self.num_classes), \ + gt_bboxes.new_zeros((num_points, 4)) + + areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( + gt_bboxes[:, 3] - gt_bboxes[:, 1]) + # TODO: figure out why these two are different + # areas = areas[None].expand(num_points, num_gts) + areas = areas[None].repeat(num_points, 1) + regress_ranges = regress_ranges[:, None, :].expand( + num_points, num_gts, 2) + gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) + xs, ys = points[:, 0], points[:, 1] + xs = xs[:, None].expand(num_points, num_gts) + ys = ys[:, None].expand(num_points, num_gts) + + left = xs - gt_bboxes[..., 0] + right = gt_bboxes[..., 2] - xs + top = ys - gt_bboxes[..., 1] + bottom = gt_bboxes[..., 3] - ys + bbox_targets = torch.stack((left, top, right, bottom), -1) + + if self.center_sampling: + # condition1: inside a `center bbox` + radius = self.center_sample_radius + center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2 + center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2 + center_gts = torch.zeros_like(gt_bboxes) + stride = center_xs.new_zeros(center_xs.shape) + + # project the points on current lvl back to the `original` sizes + lvl_begin = 0 + for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl): + lvl_end = lvl_begin + num_points_lvl + stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius + lvl_begin = lvl_end + + x_mins = center_xs - stride + y_mins = center_ys - stride + x_maxs = center_xs + stride + y_maxs = center_ys + stride + center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0], + x_mins, gt_bboxes[..., 0]) + center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1], + y_mins, gt_bboxes[..., 1]) + center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2], + gt_bboxes[..., 2], x_maxs) + center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3], + gt_bboxes[..., 3], y_maxs) + + cb_dist_left = xs - center_gts[..., 0] + cb_dist_right = center_gts[..., 2] - xs + cb_dist_top = ys - center_gts[..., 1] + cb_dist_bottom = center_gts[..., 3] - ys + center_bbox = torch.stack( + (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1) + inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 + else: + # condition1: inside a gt bbox + inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 + + # condition2: limit the regression range for each location + max_regress_distance = bbox_targets.max(-1)[0] + inside_regress_range = ( + (max_regress_distance >= regress_ranges[..., 0]) + & (max_regress_distance <= regress_ranges[..., 1])) + + # if there are still more than one objects for a location, + # we choose the one with minimal area + areas[inside_gt_bbox_mask == 0] = INF + areas[inside_regress_range == 0] = INF + min_area, min_area_inds = areas.min(dim=1) + + labels = gt_labels[min_area_inds] + labels[min_area == INF] = self.num_classes # set as BG + bbox_targets = bbox_targets[range(num_points), min_area_inds] + + return labels, bbox_targets + + def centerness_target(self, pos_bbox_targets): + """Compute centerness targets. + + Args: + pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape + (num_pos, 4) + + Returns: + Tensor: Centerness target. + """ + # only calculate pos centerness targets, otherwise there may be nan + left_right = pos_bbox_targets[:, [0, 2]] + top_bottom = pos_bbox_targets[:, [1, 3]] + if len(left_right) == 0: + centerness_targets = left_right[..., 0] + else: + centerness_targets = ( + left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( + top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) + return torch.sqrt(centerness_targets) + + def _get_points_single(self, + featmap_size, + stride, + dtype, + device, + flatten=False): + """Get points according to feature map size. + + This function will be deprecated soon. + """ + warnings.warn( + '`_get_points_single` in `FCOSHead` will be ' + 'deprecated soon, we support a multi level point generator now' + 'you can get points of a single level feature map ' + 'with `self.prior_generator.single_level_grid_priors` ') + + y, x = super()._get_points_single(featmap_size, stride, dtype, device) + points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride), + dim=-1) + stride // 2 + return points diff --git a/downstream/mmdetection/mmdet/models/dense_heads/fovea_head.py b/downstream/mmdetection/mmdet/models/dense_heads/fovea_head.py new file mode 100644 index 0000000..8be7fc9 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/fovea_head.py @@ -0,0 +1,385 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.ops import DeformConv2d +from mmcv.runner import BaseModule + +from mmdet.core import multi_apply +from mmdet.core.utils import filter_scores_and_topk +from ..builder import HEADS +from .anchor_free_head import AnchorFreeHead + +INF = 1e8 + + +class FeatureAlign(BaseModule): + + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + deform_groups=4, + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.1, + override=dict( + type='Normal', name='conv_adaption', std=0.01))): + super(FeatureAlign, self).__init__(init_cfg) + offset_channels = kernel_size * kernel_size * 2 + self.conv_offset = nn.Conv2d( + 4, deform_groups * offset_channels, 1, bias=False) + self.conv_adaption = DeformConv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + padding=(kernel_size - 1) // 2, + deform_groups=deform_groups) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shape): + offset = self.conv_offset(shape) + x = self.relu(self.conv_adaption(x, offset)) + return x + + +@HEADS.register_module() +class FoveaHead(AnchorFreeHead): + """FoveaBox: Beyond Anchor-based Object Detector + https://arxiv.org/abs/1904.03797 + """ + + def __init__(self, + num_classes, + in_channels, + base_edge_list=(16, 32, 64, 128, 256), + scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, + 512)), + sigma=0.4, + with_deform=False, + deform_groups=4, + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', + name='conv_cls', + std=0.01, + bias_prob=0.01)), + **kwargs): + self.base_edge_list = base_edge_list + self.scale_ranges = scale_ranges + self.sigma = sigma + self.with_deform = with_deform + self.deform_groups = deform_groups + super().__init__(num_classes, in_channels, init_cfg=init_cfg, **kwargs) + + def _init_layers(self): + # box branch + super()._init_reg_convs() + self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) + + # cls branch + if not self.with_deform: + super()._init_cls_convs() + self.conv_cls = nn.Conv2d( + self.feat_channels, self.cls_out_channels, 3, padding=1) + else: + self.cls_convs = nn.ModuleList() + self.cls_convs.append( + ConvModule( + self.feat_channels, (self.feat_channels * 4), + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.norm_cfg is None)) + self.cls_convs.append( + ConvModule((self.feat_channels * 4), (self.feat_channels * 4), + 1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.norm_cfg is None)) + self.feature_adaption = FeatureAlign( + self.feat_channels, + self.feat_channels, + kernel_size=3, + deform_groups=self.deform_groups) + self.conv_cls = nn.Conv2d( + int(self.feat_channels * 4), + self.cls_out_channels, + 3, + padding=1) + + def forward_single(self, x): + cls_feat = x + reg_feat = x + for reg_layer in self.reg_convs: + reg_feat = reg_layer(reg_feat) + bbox_pred = self.conv_reg(reg_feat) + if self.with_deform: + cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp()) + for cls_layer in self.cls_convs: + cls_feat = cls_layer(cls_feat) + cls_score = self.conv_cls(cls_feat) + return cls_score, bbox_pred + + def loss(self, + cls_scores, + bbox_preds, + gt_bbox_list, + gt_label_list, + img_metas, + gt_bboxes_ignore=None): + assert len(cls_scores) == len(bbox_preds) + + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + points = self.prior_generator.grid_priors( + featmap_sizes, + dtype=bbox_preds[0].dtype, + device=bbox_preds[0].device) + num_imgs = cls_scores[0].size(0) + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) + for cls_score in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) + for bbox_pred in bbox_preds + ] + flatten_cls_scores = torch.cat(flatten_cls_scores) + flatten_bbox_preds = torch.cat(flatten_bbox_preds) + flatten_labels, flatten_bbox_targets = self.get_targets( + gt_bbox_list, gt_label_list, featmap_sizes, points) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + pos_inds = ((flatten_labels >= 0) + & (flatten_labels < self.num_classes)).nonzero().view(-1) + num_pos = len(pos_inds) + + loss_cls = self.loss_cls( + flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs) + if num_pos > 0: + pos_bbox_preds = flatten_bbox_preds[pos_inds] + pos_bbox_targets = flatten_bbox_targets[pos_inds] + pos_weights = pos_bbox_targets.new_zeros( + pos_bbox_targets.size()) + 1.0 + loss_bbox = self.loss_bbox( + pos_bbox_preds, + pos_bbox_targets, + pos_weights, + avg_factor=num_pos) + else: + loss_bbox = torch.tensor( + 0, + dtype=flatten_bbox_preds.dtype, + device=flatten_bbox_preds.device) + return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) + + def get_targets(self, gt_bbox_list, gt_label_list, featmap_sizes, points): + label_list, bbox_target_list = multi_apply( + self._get_target_single, + gt_bbox_list, + gt_label_list, + featmap_size_list=featmap_sizes, + point_list=points) + flatten_labels = [ + torch.cat([ + labels_level_img.flatten() for labels_level_img in labels_level + ]) for labels_level in zip(*label_list) + ] + flatten_bbox_targets = [ + torch.cat([ + bbox_targets_level_img.reshape(-1, 4) + for bbox_targets_level_img in bbox_targets_level + ]) for bbox_targets_level in zip(*bbox_target_list) + ] + flatten_labels = torch.cat(flatten_labels) + flatten_bbox_targets = torch.cat(flatten_bbox_targets) + return flatten_labels, flatten_bbox_targets + + def _get_target_single(self, + gt_bboxes_raw, + gt_labels_raw, + featmap_size_list=None, + point_list=None): + + gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) * + (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1])) + label_list = [] + bbox_target_list = [] + # for each pyramid, find the cls and box target + for base_len, (lower_bound, upper_bound), stride, featmap_size, \ + points in zip(self.base_edge_list, self.scale_ranges, + self.strides, featmap_size_list, point_list): + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + points = points.view(*featmap_size, 2) + x, y = points[..., 0], points[..., 1] + labels = gt_labels_raw.new_zeros(featmap_size) + self.num_classes + bbox_targets = gt_bboxes_raw.new(featmap_size[0], featmap_size[1], + 4) + 1 + # scale assignment + hit_indices = ((gt_areas >= lower_bound) & + (gt_areas <= upper_bound)).nonzero().flatten() + if len(hit_indices) == 0: + label_list.append(labels) + bbox_target_list.append(torch.log(bbox_targets)) + continue + _, hit_index_order = torch.sort(-gt_areas[hit_indices]) + hit_indices = hit_indices[hit_index_order] + gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride + gt_labels = gt_labels_raw[hit_indices] + half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0]) + half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1]) + # valid fovea area: left, right, top, down + pos_left = torch.ceil( + gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long(). \ + clamp(0, featmap_size[1] - 1) + pos_right = torch.floor( + gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long(). \ + clamp(0, featmap_size[1] - 1) + pos_top = torch.ceil( + gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long(). \ + clamp(0, featmap_size[0] - 1) + pos_down = torch.floor( + gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long(). \ + clamp(0, featmap_size[0] - 1) + for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \ + zip(pos_left, pos_top, pos_right, pos_down, gt_labels, + gt_bboxes_raw[hit_indices, :]): + labels[py1:py2 + 1, px1:px2 + 1] = label + bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \ + (x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len + bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \ + (y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len + bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \ + (gt_x2 - x[py1:py2 + 1, px1:px2 + 1]) / base_len + bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \ + (gt_y2 - y[py1:py2 + 1, px1:px2 + 1]) / base_len + bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.) + label_list.append(labels) + bbox_target_list.append(torch.log(bbox_targets)) + return label_list, bbox_target_list + + # Same as base_dense_head/_get_bboxes_single except self._bbox_decode + def _get_bboxes_single(self, + cls_score_list, + bbox_pred_list, + score_factor_list, + mlvl_priors, + img_meta, + cfg, + rescale=False, + with_nms=True, + **kwargs): + """Transform outputs of a single image into bbox predictions. + + Args: + cls_score_list (list[Tensor]): Box scores from all scale + levels of a single image, each item has shape + (num_priors * num_classes, H, W). + bbox_pred_list (list[Tensor]): Box energies / deltas from + all scale levels of a single image, each item has shape + (num_priors * 4, H, W). + score_factor_list (list[Tensor]): Score factor from all scale + levels of a single image. Fovea head does not need this value. + mlvl_priors (list[Tensor]): Each element in the list is + the priors of a single level in feature pyramid, has shape + (num_priors, 2). + img_meta (dict): Image meta info. + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + + Returns: + tuple[Tensor]: Results of detected bboxes and labels. If with_nms + is False and mlvl_score_factor is None, return mlvl_bboxes and + mlvl_scores, else return mlvl_bboxes, mlvl_scores and + mlvl_score_factor. Usually with_nms is False is used for aug + test. If with_nms is True, then return the following format + + - det_bboxes (Tensor): Predicted bboxes with shape \ + [num_bboxes, 5], where the first 4 columns are bounding \ + box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ + column are scores between 0 and 1. + - det_labels (Tensor): Predicted labels of the corresponding \ + box with shape [num_bboxes]. + """ + cfg = self.test_cfg if cfg is None else cfg + assert len(cls_score_list) == len(bbox_pred_list) + img_shape = img_meta['img_shape'] + nms_pre = cfg.get('nms_pre', -1) + + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_labels = [] + for level_idx, (cls_score, bbox_pred, stride, base_len, priors) in \ + enumerate(zip(cls_score_list, bbox_pred_list, self.strides, + self.base_edge_list, mlvl_priors)): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) + + scores = cls_score.permute(1, 2, 0).reshape( + -1, self.cls_out_channels).sigmoid() + + # After https://github.com/open-mmlab/mmdetection/pull/6268/, + # this operation keeps fewer bboxes under the same `nms_pre`. + # There is no difference in performance for most models. If you + # find a slight drop in performance, you can set a larger + # `nms_pre` than before. + results = filter_scores_and_topk( + scores, cfg.score_thr, nms_pre, + dict(bbox_pred=bbox_pred, priors=priors)) + scores, labels, _, filtered_results = results + + bbox_pred = filtered_results['bbox_pred'] + priors = filtered_results['priors'] + + bboxes = self._bbox_decode(priors, bbox_pred, base_len, img_shape) + + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_labels.append(labels) + + return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, + img_meta['scale_factor'], cfg, rescale, + with_nms) + + def _bbox_decode(self, priors, bbox_pred, base_len, max_shape): + bbox_pred = bbox_pred.exp() + + y = priors[:, 1] + x = priors[:, 0] + x1 = (x - base_len * bbox_pred[:, 0]). \ + clamp(min=0, max=max_shape[1] - 1) + y1 = (y - base_len * bbox_pred[:, 1]). \ + clamp(min=0, max=max_shape[0] - 1) + x2 = (x + base_len * bbox_pred[:, 2]). \ + clamp(min=0, max=max_shape[1] - 1) + y2 = (y + base_len * bbox_pred[:, 3]). \ + clamp(min=0, max=max_shape[0] - 1) + decoded_bboxes = torch.stack([x1, y1, x2, y2], -1) + return decoded_bboxes + + def _get_points_single(self, *args, **kwargs): + """Get points according to feature map size. + + This function will be deprecated soon. + """ + warnings.warn( + '`_get_points_single` in `FoveaHead` will be ' + 'deprecated soon, we support a multi level point generator now' + 'you can get points of a single level feature map ' + 'with `self.prior_generator.single_level_grid_priors` ') + y, x = super()._get_points_single(*args, **kwargs) + return y + 0.5, x + 0.5 diff --git a/downstream/mmdetection/mmdet/models/dense_heads/free_anchor_retina_head.py b/downstream/mmdetection/mmdet/models/dense_heads/free_anchor_retina_head.py new file mode 100644 index 0000000..3acd25e --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/free_anchor_retina_head.py @@ -0,0 +1,272 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F + +from mmdet.core import bbox_overlaps +from ..builder import HEADS +from .retina_head import RetinaHead + +EPS = 1e-12 + + +@HEADS.register_module() +class FreeAnchorRetinaHead(RetinaHead): + """FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + stacked_convs (int): Number of conv layers in cls and reg tower. + Default: 4. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: norm_cfg=dict(type='GN', num_groups=32, + requires_grad=True). + pre_anchor_topk (int): Number of boxes that be token in each bag. + bbox_thr (float): The threshold of the saturated linear function. It is + usually the same with the IoU threshold used in NMS. + gamma (float): Gamma parameter in focal loss. + alpha (float): Alpha parameter in focal loss. + """ # noqa: W605 + + def __init__(self, + num_classes, + in_channels, + stacked_convs=4, + conv_cfg=None, + norm_cfg=None, + pre_anchor_topk=50, + bbox_thr=0.6, + gamma=2.0, + alpha=0.5, + **kwargs): + super(FreeAnchorRetinaHead, + self).__init__(num_classes, in_channels, stacked_convs, conv_cfg, + norm_cfg, **kwargs) + + self.pre_anchor_topk = pre_anchor_topk + self.bbox_thr = bbox_thr + self.gamma = gamma + self.alpha = alpha + + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + gt_bboxes (list[Tensor]): each item are the truth boxes for each + image in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + device = cls_scores[0].device + anchor_list, _ = self.get_anchors( + featmap_sizes, img_metas, device=device) + anchors = [torch.cat(anchor) for anchor in anchor_list] + + # concatenate each level + cls_scores = [ + cls.permute(0, 2, 3, + 1).reshape(cls.size(0), -1, self.cls_out_channels) + for cls in cls_scores + ] + bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4) + for bbox_pred in bbox_preds + ] + cls_scores = torch.cat(cls_scores, dim=1) + bbox_preds = torch.cat(bbox_preds, dim=1) + + cls_prob = torch.sigmoid(cls_scores) + box_prob = [] + num_pos = 0 + positive_losses = [] + for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_, + bbox_preds_) in enumerate( + zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds)): + + with torch.no_grad(): + if len(gt_bboxes_) == 0: + image_box_prob = torch.zeros( + anchors_.size(0), + self.cls_out_channels).type_as(bbox_preds_) + else: + # box_localization: a_{j}^{loc}, shape: [j, 4] + pred_boxes = self.bbox_coder.decode(anchors_, bbox_preds_) + + # object_box_iou: IoU_{ij}^{loc}, shape: [i, j] + object_box_iou = bbox_overlaps(gt_bboxes_, pred_boxes) + + # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j] + t1 = self.bbox_thr + t2 = object_box_iou.max( + dim=1, keepdim=True).values.clamp(min=t1 + 1e-12) + object_box_prob = ((object_box_iou - t1) / + (t2 - t1)).clamp( + min=0, max=1) + + # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j] + num_obj = gt_labels_.size(0) + indices = torch.stack([ + torch.arange(num_obj).type_as(gt_labels_), gt_labels_ + ], + dim=0) + object_cls_box_prob = torch.sparse_coo_tensor( + indices, object_box_prob) + + # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j] + """ + from "start" to "end" implement: + image_box_iou = torch.sparse.max(object_cls_box_prob, + dim=0).t() + + """ + # start + box_cls_prob = torch.sparse.sum( + object_cls_box_prob, dim=0).to_dense() + + indices = torch.nonzero(box_cls_prob, as_tuple=False).t_() + if indices.numel() == 0: + image_box_prob = torch.zeros( + anchors_.size(0), + self.cls_out_channels).type_as(object_box_prob) + else: + nonzero_box_prob = torch.where( + (gt_labels_.unsqueeze(dim=-1) == indices[0]), + object_box_prob[:, indices[1]], + torch.tensor([ + 0 + ]).type_as(object_box_prob)).max(dim=0).values + + # upmap to shape [j, c] + image_box_prob = torch.sparse_coo_tensor( + indices.flip([0]), + nonzero_box_prob, + size=(anchors_.size(0), + self.cls_out_channels)).to_dense() + # end + + box_prob.append(image_box_prob) + + # construct bags for objects + match_quality_matrix = bbox_overlaps(gt_bboxes_, anchors_) + _, matched = torch.topk( + match_quality_matrix, + self.pre_anchor_topk, + dim=1, + sorted=False) + del match_quality_matrix + + # matched_cls_prob: P_{ij}^{cls} + matched_cls_prob = torch.gather( + cls_prob_[matched], 2, + gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk, + 1)).squeeze(2) + + # matched_box_prob: P_{ij}^{loc} + matched_anchors = anchors_[matched] + matched_object_targets = self.bbox_coder.encode( + matched_anchors, + gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors)) + loss_bbox = self.loss_bbox( + bbox_preds_[matched], + matched_object_targets, + reduction_override='none').sum(-1) + matched_box_prob = torch.exp(-loss_bbox) + + # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )} + num_pos += len(gt_bboxes_) + positive_losses.append( + self.positive_bag_loss(matched_cls_prob, matched_box_prob)) + positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos) + + # box_prob: P{a_{j} \in A_{+}} + box_prob = torch.stack(box_prob, dim=0) + + # negative_loss: + # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B|| + negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max( + 1, num_pos * self.pre_anchor_topk) + + # avoid the absence of gradients in regression subnet + # when no ground-truth in a batch + if num_pos == 0: + positive_loss = bbox_preds.sum() * 0 + + losses = { + 'positive_bag_loss': positive_loss, + 'negative_bag_loss': negative_loss + } + return losses + + def positive_bag_loss(self, matched_cls_prob, matched_box_prob): + """Compute positive bag loss. + + :math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`. + + :math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples. + + :math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples. + + Args: + matched_cls_prob (Tensor): Classification probability of matched + samples in shape (num_gt, pre_anchor_topk). + matched_box_prob (Tensor): BBox probability of matched samples, + in shape (num_gt, pre_anchor_topk). + + Returns: + Tensor: Positive bag loss in shape (num_gt,). + """ # noqa: E501, W605 + # bag_prob = Mean-max(matched_prob) + matched_prob = matched_cls_prob * matched_box_prob + weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None) + weight /= weight.sum(dim=1).unsqueeze(dim=-1) + bag_prob = (weight * matched_prob).sum(dim=1) + # positive_bag_loss = -self.alpha * log(bag_prob) + return self.alpha * F.binary_cross_entropy( + bag_prob, torch.ones_like(bag_prob), reduction='none') + + def negative_bag_loss(self, cls_prob, box_prob): + """Compute negative bag loss. + + :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`. + + :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples. + + :math:`P_{j}^{bg}`: Classification probability of negative samples. + + Args: + cls_prob (Tensor): Classification probability, in shape + (num_img, num_anchors, num_classes). + box_prob (Tensor): Box probability, in shape + (num_img, num_anchors, num_classes). + + Returns: + Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes). + """ # noqa: E501, W605 + prob = cls_prob * (1 - box_prob) + # There are some cases when neg_prob = 0. + # This will cause the neg_prob.log() to be inf without clamp. + prob = prob.clamp(min=EPS, max=1 - EPS) + negative_bag_loss = prob**self.gamma * F.binary_cross_entropy( + prob, torch.zeros_like(prob), reduction='none') + return (1 - self.alpha) * negative_bag_loss diff --git a/downstream/mmdetection/mmdet/models/dense_heads/fsaf_head.py b/downstream/mmdetection/mmdet/models/dense_heads/fsaf_head.py new file mode 100644 index 0000000..2d2b787 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/fsaf_head.py @@ -0,0 +1,433 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmcv.runner import force_fp32 + +from mmdet.core import (anchor_inside_flags, images_to_levels, multi_apply, + unmap) +from ..builder import HEADS +from ..losses.accuracy import accuracy +from ..losses.utils import weight_reduce_loss +from .retina_head import RetinaHead + + +@HEADS.register_module() +class FSAFHead(RetinaHead): + """Anchor-free head used in `FSAF `_. + + The head contains two subnetworks. The first classifies anchor boxes and + the second regresses deltas for the anchors (num_anchors is 1 for anchor- + free methods) + + Args: + *args: Same as its base class in :class:`RetinaHead` + score_threshold (float, optional): The score_threshold to calculate + positive recall. If given, prediction scores lower than this value + is counted as incorrect prediction. Default to None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + **kwargs: Same as its base class in :class:`RetinaHead` + + Example: + >>> import torch + >>> self = FSAFHead(11, 7) + >>> x = torch.rand(1, 7, 32, 32) + >>> cls_score, bbox_pred = self.forward_single(x) + >>> # Each anchor predicts a score for each class except background + >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors + >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors + >>> assert cls_per_anchor == self.num_classes + >>> assert box_per_anchor == 4 + """ + + def __init__(self, *args, score_threshold=None, init_cfg=None, **kwargs): + # The positive bias in self.retina_reg conv is to prevent predicted \ + # bbox with 0 area + if init_cfg is None: + init_cfg = dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=[ + dict( + type='Normal', + name='retina_cls', + std=0.01, + bias_prob=0.01), + dict( + type='Normal', name='retina_reg', std=0.01, bias=0.25) + ]) + super().__init__(*args, init_cfg=init_cfg, **kwargs) + self.score_threshold = score_threshold + + def forward_single(self, x): + """Forward feature map of a single scale level. + + Args: + x (Tensor): Feature map of a single scale level. + + Returns: + tuple (Tensor): + cls_score (Tensor): Box scores for each scale level + Has shape (N, num_points * num_classes, H, W). + bbox_pred (Tensor): Box energies / deltas for each scale + level with shape (N, num_points * 4, H, W). + """ + cls_score, bbox_pred = super().forward_single(x) + # relu: TBLR encoder only accepts positive bbox_pred + return cls_score, self.relu(bbox_pred) + + def _get_targets_single(self, + flat_anchors, + valid_flags, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + label_channels=1, + unmap_outputs=True): + """Compute regression and classification targets for anchors in a + single image. + + Most of the codes are the same with the base class + :obj: `AnchorHead`, except that it also collects and returns + the matched gt index in the image (from 0 to num_gt-1). If the + anchor bbox is not matched to any gt, the corresponding value in + pos_gt_inds is -1. + """ + inside_flags = anchor_inside_flags(flat_anchors, valid_flags, + img_meta['img_shape'][:2], + self.train_cfg.allowed_border) + if not inside_flags.any(): + return (None, ) * 7 + # Assign gt and sample anchors + anchors = flat_anchors[inside_flags.type(torch.bool), :] + assign_result = self.assigner.assign( + anchors, gt_bboxes, gt_bboxes_ignore, + None if self.sampling else gt_labels) + + sampling_result = self.sampler.sample(assign_result, anchors, + gt_bboxes) + + num_valid_anchors = anchors.shape[0] + bbox_targets = torch.zeros_like(anchors) + bbox_weights = torch.zeros_like(anchors) + labels = anchors.new_full((num_valid_anchors, ), + self.num_classes, + dtype=torch.long) + label_weights = anchors.new_zeros((num_valid_anchors, label_channels), + dtype=torch.float) + pos_gt_inds = anchors.new_full((num_valid_anchors, ), + -1, + dtype=torch.long) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + + if len(pos_inds) > 0: + if not self.reg_decoded_bbox: + pos_bbox_targets = self.bbox_coder.encode( + sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) + else: + # When the regression loss (e.g. `IouLoss`, `GIouLoss`) + # is applied directly on the decoded bounding boxes, both + # the predicted boxes and regression targets should be with + # absolute coordinate format. + pos_bbox_targets = sampling_result.pos_gt_bboxes + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + # The assigned gt_index for each anchor. (0-based) + pos_gt_inds[pos_inds] = sampling_result.pos_assigned_gt_inds + if gt_labels is None: + # Only rpn gives gt_labels as None + # Foreground is the first class + labels[pos_inds] = 0 + else: + labels[pos_inds] = gt_labels[ + sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # shadowed_labels is a tensor composed of tuples + # (anchor_inds, class_label) that indicate those anchors lying in the + # outer region of a gt or overlapped by another gt with a smaller + # area. + # + # Therefore, only the shadowed labels are ignored for loss calculation. + # the key `shadowed_labels` is defined in :obj:`CenterRegionAssigner` + shadowed_labels = assign_result.get_extra_property('shadowed_labels') + if shadowed_labels is not None and shadowed_labels.numel(): + if len(shadowed_labels.shape) == 2: + idx_, label_ = shadowed_labels[:, 0], shadowed_labels[:, 1] + assert (labels[idx_] != label_).all(), \ + 'One label cannot be both positive and ignored' + label_weights[idx_, label_] = 0 + else: + label_weights[shadowed_labels] = 0 + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_anchors.size(0) + labels = unmap(labels, num_total_anchors, inside_flags) + label_weights = unmap(label_weights, num_total_anchors, + inside_flags) + bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) + bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) + pos_gt_inds = unmap( + pos_gt_inds, num_total_anchors, inside_flags, fill=-1) + + return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, + neg_inds, sampling_result, pos_gt_inds) + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute loss of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_points * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_points * 4, H, W). + gt_bboxes (list[Tensor]): each item are the truth boxes for each + image in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + for i in range(len(bbox_preds)): # loop over fpn level + # avoid 0 area of the predicted bbox + bbox_preds[i] = bbox_preds[i].clamp(min=1e-4) + # TODO: It may directly use the base-class loss function. + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + batch_size = len(gt_bboxes) + device = cls_scores[0].device + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg, + pos_assigned_gt_inds_list) = cls_reg_targets + + num_gts = np.array(list(map(len, gt_labels))) + num_total_samples = ( + num_total_pos + num_total_neg if self.sampling else num_total_pos) + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + # concat all level anchors and flags to a single tensor + concat_anchor_list = [] + for i in range(len(anchor_list)): + concat_anchor_list.append(torch.cat(anchor_list[i])) + all_anchor_list = images_to_levels(concat_anchor_list, + num_level_anchors) + losses_cls, losses_bbox = multi_apply( + self.loss_single, + cls_scores, + bbox_preds, + all_anchor_list, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + num_total_samples=num_total_samples) + + # `pos_assigned_gt_inds_list` (length: fpn_levels) stores the assigned + # gt index of each anchor bbox in each fpn level. + cum_num_gts = list(np.cumsum(num_gts)) # length of batch_size + for i, assign in enumerate(pos_assigned_gt_inds_list): + # loop over fpn levels + for j in range(1, batch_size): + # loop over batch size + # Convert gt indices in each img to those in the batch + assign[j][assign[j] >= 0] += int(cum_num_gts[j - 1]) + pos_assigned_gt_inds_list[i] = assign.flatten() + labels_list[i] = labels_list[i].flatten() + num_gts = sum(map(len, gt_labels)) # total number of gt in the batch + # The unique label index of each gt in the batch + label_sequence = torch.arange(num_gts, device=device) + # Collect the average loss of each gt in each level + with torch.no_grad(): + loss_levels, = multi_apply( + self.collect_loss_level_single, + losses_cls, + losses_bbox, + pos_assigned_gt_inds_list, + labels_seq=label_sequence) + # Shape: (fpn_levels, num_gts). Loss of each gt at each fpn level + loss_levels = torch.stack(loss_levels, dim=0) + # Locate the best fpn level for loss back-propagation + if loss_levels.numel() == 0: # zero gt + argmin = loss_levels.new_empty((num_gts, ), dtype=torch.long) + else: + _, argmin = loss_levels.min(dim=0) + + # Reweight the loss of each (anchor, label) pair, so that only those + # at the best gt level are back-propagated. + losses_cls, losses_bbox, pos_inds = multi_apply( + self.reweight_loss_single, + losses_cls, + losses_bbox, + pos_assigned_gt_inds_list, + labels_list, + list(range(len(losses_cls))), + min_levels=argmin) + num_pos = torch.cat(pos_inds, 0).sum().float() + pos_recall = self.calculate_pos_recall(cls_scores, labels_list, + pos_inds) + + if num_pos == 0: # No gt + avg_factor = num_pos + float(num_total_neg) + else: + avg_factor = num_pos + for i in range(len(losses_cls)): + losses_cls[i] /= avg_factor + losses_bbox[i] /= avg_factor + return dict( + loss_cls=losses_cls, + loss_bbox=losses_bbox, + num_pos=num_pos / batch_size, + pos_recall=pos_recall) + + def calculate_pos_recall(self, cls_scores, labels_list, pos_inds): + """Calculate positive recall with score threshold. + + Args: + cls_scores (list[Tensor]): Classification scores at all fpn levels. + Each tensor is in shape (N, num_classes * num_anchors, H, W) + labels_list (list[Tensor]): The label that each anchor is assigned + to. Shape (N * H * W * num_anchors, ) + pos_inds (list[Tensor]): List of bool tensors indicating whether + the anchor is assigned to a positive label. + Shape (N * H * W * num_anchors, ) + + Returns: + Tensor: A single float number indicating the positive recall. + """ + with torch.no_grad(): + num_class = self.num_classes + scores = [ + cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos] + for cls, pos in zip(cls_scores, pos_inds) + ] + labels = [ + label.reshape(-1)[pos] + for label, pos in zip(labels_list, pos_inds) + ] + scores = torch.cat(scores, dim=0) + labels = torch.cat(labels, dim=0) + if self.use_sigmoid_cls: + scores = scores.sigmoid() + else: + scores = scores.softmax(dim=1) + + return accuracy(scores, labels, thresh=self.score_threshold) + + def collect_loss_level_single(self, cls_loss, reg_loss, assigned_gt_inds, + labels_seq): + """Get the average loss in each FPN level w.r.t. each gt label. + + Args: + cls_loss (Tensor): Classification loss of each feature map pixel, + shape (num_anchor, num_class) + reg_loss (Tensor): Regression loss of each feature map pixel, + shape (num_anchor, 4) + assigned_gt_inds (Tensor): It indicates which gt the prior is + assigned to (0-based, -1: no assignment). shape (num_anchor), + labels_seq: The rank of labels. shape (num_gt) + + Returns: + shape: (num_gt), average loss of each gt in this level + """ + if len(reg_loss.shape) == 2: # iou loss has shape (num_prior, 4) + reg_loss = reg_loss.sum(dim=-1) # sum loss in tblr dims + if len(cls_loss.shape) == 2: + cls_loss = cls_loss.sum(dim=-1) # sum loss in class dims + loss = cls_loss + reg_loss + assert loss.size(0) == assigned_gt_inds.size(0) + # Default loss value is 1e6 for a layer where no anchor is positive + # to ensure it will not be chosen to back-propagate gradient + losses_ = loss.new_full(labels_seq.shape, 1e6) + for i, l in enumerate(labels_seq): + match = assigned_gt_inds == l + if match.any(): + losses_[i] = loss[match].mean() + return losses_, + + def reweight_loss_single(self, cls_loss, reg_loss, assigned_gt_inds, + labels, level, min_levels): + """Reweight loss values at each level. + + Reassign loss values at each level by masking those where the + pre-calculated loss is too large. Then return the reduced losses. + + Args: + cls_loss (Tensor): Element-wise classification loss. + Shape: (num_anchors, num_classes) + reg_loss (Tensor): Element-wise regression loss. + Shape: (num_anchors, 4) + assigned_gt_inds (Tensor): The gt indices that each anchor bbox + is assigned to. -1 denotes a negative anchor, otherwise it is the + gt index (0-based). Shape: (num_anchors, ), + labels (Tensor): Label assigned to anchors. Shape: (num_anchors, ). + level (int): The current level index in the pyramid + (0-4 for RetinaNet) + min_levels (Tensor): The best-matching level for each gt. + Shape: (num_gts, ), + + Returns: + tuple: + - cls_loss: Reduced corrected classification loss. Scalar. + - reg_loss: Reduced corrected regression loss. Scalar. + - pos_flags (Tensor): Corrected bool tensor indicating the + final positive anchors. Shape: (num_anchors, ). + """ + loc_weight = torch.ones_like(reg_loss) + cls_weight = torch.ones_like(cls_loss) + pos_flags = assigned_gt_inds >= 0 # positive pixel flag + pos_indices = torch.nonzero(pos_flags, as_tuple=False).flatten() + + if pos_flags.any(): # pos pixels exist + pos_assigned_gt_inds = assigned_gt_inds[pos_flags] + zeroing_indices = (min_levels[pos_assigned_gt_inds] != level) + neg_indices = pos_indices[zeroing_indices] + + if neg_indices.numel(): + pos_flags[neg_indices] = 0 + loc_weight[neg_indices] = 0 + # Only the weight corresponding to the label is + # zeroed out if not selected + zeroing_labels = labels[neg_indices] + assert (zeroing_labels >= 0).all() + cls_weight[neg_indices, zeroing_labels] = 0 + + # Weighted loss for both cls and reg loss + cls_loss = weight_reduce_loss(cls_loss, cls_weight, reduction='sum') + reg_loss = weight_reduce_loss(reg_loss, loc_weight, reduction='sum') + + return cls_loss, reg_loss, pos_flags diff --git a/downstream/mmdetection/mmdet/models/dense_heads/ga_retina_head.py b/downstream/mmdetection/mmdet/models/dense_heads/ga_retina_head.py new file mode 100644 index 0000000..6d9e874 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/ga_retina_head.py @@ -0,0 +1,113 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.ops import MaskedConv2d + +from ..builder import HEADS +from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead + + +@HEADS.register_module() +class GARetinaHead(GuidedAnchorHead): + """Guided-Anchor-based RetinaNet head.""" + + def __init__(self, + num_classes, + in_channels, + stacked_convs=4, + conv_cfg=None, + norm_cfg=None, + init_cfg=None, + **kwargs): + if init_cfg is None: + init_cfg = dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=[ + dict( + type='Normal', + name='conv_loc', + std=0.01, + bias_prob=0.01), + dict( + type='Normal', + name='retina_cls', + std=0.01, + bias_prob=0.01) + ]) + self.stacked_convs = stacked_convs + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + super(GARetinaHead, self).__init__( + num_classes, in_channels, init_cfg=init_cfg, **kwargs) + + def _init_layers(self): + """Initialize layers of the head.""" + self.relu = nn.ReLU(inplace=True) + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + + self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1) + self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2, + 1) + self.feature_adaption_cls = FeatureAdaption( + self.feat_channels, + self.feat_channels, + kernel_size=3, + deform_groups=self.deform_groups) + self.feature_adaption_reg = FeatureAdaption( + self.feat_channels, + self.feat_channels, + kernel_size=3, + deform_groups=self.deform_groups) + self.retina_cls = MaskedConv2d( + self.feat_channels, + self.num_base_priors * self.cls_out_channels, + 3, + padding=1) + self.retina_reg = MaskedConv2d( + self.feat_channels, self.num_base_priors * 4, 3, padding=1) + + def forward_single(self, x): + """Forward feature map of a single scale level.""" + cls_feat = x + reg_feat = x + for cls_conv in self.cls_convs: + cls_feat = cls_conv(cls_feat) + for reg_conv in self.reg_convs: + reg_feat = reg_conv(reg_feat) + + loc_pred = self.conv_loc(cls_feat) + shape_pred = self.conv_shape(reg_feat) + + cls_feat = self.feature_adaption_cls(cls_feat, shape_pred) + reg_feat = self.feature_adaption_reg(reg_feat, shape_pred) + + if not self.training: + mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr + else: + mask = None + cls_score = self.retina_cls(cls_feat, mask) + bbox_pred = self.retina_reg(reg_feat, mask) + return cls_score, bbox_pred, shape_pred, loc_pred diff --git a/downstream/mmdetection/mmdet/models/dense_heads/ga_rpn_head.py b/downstream/mmdetection/mmdet/models/dense_heads/ga_rpn_head.py new file mode 100644 index 0000000..4123c8b --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/ga_rpn_head.py @@ -0,0 +1,177 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv import ConfigDict +from mmcv.ops import nms + +from ..builder import HEADS +from .guided_anchor_head import GuidedAnchorHead + + +@HEADS.register_module() +class GARPNHead(GuidedAnchorHead): + """Guided-Anchor-based RPN head.""" + + def __init__(self, + in_channels, + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', + name='conv_loc', + std=0.01, + bias_prob=0.01)), + **kwargs): + super(GARPNHead, self).__init__( + 1, in_channels, init_cfg=init_cfg, **kwargs) + + def _init_layers(self): + """Initialize layers of the head.""" + self.rpn_conv = nn.Conv2d( + self.in_channels, self.feat_channels, 3, padding=1) + super(GARPNHead, self)._init_layers() + + def forward_single(self, x): + """Forward feature of a single scale level.""" + + x = self.rpn_conv(x) + x = F.relu(x, inplace=True) + (cls_score, bbox_pred, shape_pred, + loc_pred) = super(GARPNHead, self).forward_single(x) + return cls_score, bbox_pred, shape_pred, loc_pred + + def loss(self, + cls_scores, + bbox_preds, + shape_preds, + loc_preds, + gt_bboxes, + img_metas, + gt_bboxes_ignore=None): + losses = super(GARPNHead, self).loss( + cls_scores, + bbox_preds, + shape_preds, + loc_preds, + gt_bboxes, + None, + img_metas, + gt_bboxes_ignore=gt_bboxes_ignore) + return dict( + loss_rpn_cls=losses['loss_cls'], + loss_rpn_bbox=losses['loss_bbox'], + loss_anchor_shape=losses['loss_shape'], + loss_anchor_loc=losses['loss_loc']) + + def _get_bboxes_single(self, + cls_scores, + bbox_preds, + mlvl_anchors, + mlvl_masks, + img_shape, + scale_factor, + cfg, + rescale=False): + cfg = self.test_cfg if cfg is None else cfg + + cfg = copy.deepcopy(cfg) + + # deprecate arguments warning + if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: + warnings.warn( + 'In rpn_proposal or test_cfg, ' + 'nms_thr has been moved to a dict named nms as ' + 'iou_threshold, max_num has been renamed as max_per_img, ' + 'name of original arguments and the way to specify ' + 'iou_threshold of NMS will be deprecated.') + if 'nms' not in cfg: + cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) + if 'max_num' in cfg: + if 'max_per_img' in cfg: + assert cfg.max_num == cfg.max_per_img, f'You ' \ + f'set max_num and max_per_img at the same time, ' \ + f'but get {cfg.max_num} ' \ + f'and {cfg.max_per_img} respectively' \ + 'Please delete max_num which will be deprecated.' + else: + cfg.max_per_img = cfg.max_num + if 'nms_thr' in cfg: + assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ + f'iou_threshold in nms and ' \ + f'nms_thr at the same time, but get ' \ + f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ + f' respectively. Please delete the ' \ + f'nms_thr which will be deprecated.' + + assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \ + 'naive nms.' + + mlvl_proposals = [] + for idx in range(len(cls_scores)): + rpn_cls_score = cls_scores[idx] + rpn_bbox_pred = bbox_preds[idx] + anchors = mlvl_anchors[idx] + mask = mlvl_masks[idx] + assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] + # if no location is kept, end. + if mask.sum() == 0: + continue + rpn_cls_score = rpn_cls_score.permute(1, 2, 0) + if self.use_sigmoid_cls: + rpn_cls_score = rpn_cls_score.reshape(-1) + scores = rpn_cls_score.sigmoid() + else: + rpn_cls_score = rpn_cls_score.reshape(-1, 2) + # remind that we set FG labels to [0, num_class-1] + # since mmdet v2.0 + # BG cat_id: num_class + scores = rpn_cls_score.softmax(dim=1)[:, :-1] + # filter scores, bbox_pred w.r.t. mask. + # anchors are filtered in get_anchors() beforehand. + scores = scores[mask] + rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, + 4)[mask, :] + if scores.dim() == 0: + rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0) + anchors = anchors.unsqueeze(0) + scores = scores.unsqueeze(0) + # filter anchors, bbox_pred, scores w.r.t. scores + if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: + _, topk_inds = scores.topk(cfg.nms_pre) + rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] + anchors = anchors[topk_inds, :] + scores = scores[topk_inds] + # get proposals w.r.t. anchors and rpn_bbox_pred + proposals = self.bbox_coder.decode( + anchors, rpn_bbox_pred, max_shape=img_shape) + # filter out too small bboxes + if cfg.min_bbox_size >= 0: + w = proposals[:, 2] - proposals[:, 0] + h = proposals[:, 3] - proposals[:, 1] + valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) + if not valid_mask.all(): + proposals = proposals[valid_mask] + scores = scores[valid_mask] + + # NMS in current level + proposals, _ = nms(proposals, scores, cfg.nms.iou_threshold) + proposals = proposals[:cfg.nms_post, :] + mlvl_proposals.append(proposals) + proposals = torch.cat(mlvl_proposals, 0) + if cfg.get('nms_across_levels', False): + # NMS across multi levels + proposals, _ = nms(proposals[:, :4], proposals[:, -1], + cfg.nms.iou_threshold) + proposals = proposals[:cfg.max_per_img, :] + else: + scores = proposals[:, 4] + num = min(cfg.max_per_img, proposals.shape[0]) + _, topk_inds = scores.topk(num) + proposals = proposals[topk_inds, :] + return proposals diff --git a/downstream/mmdetection/mmdet/models/dense_heads/gfl_head.py b/downstream/mmdetection/mmdet/models/dense_heads/gfl_head.py new file mode 100644 index 0000000..12eb89d --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/gfl_head.py @@ -0,0 +1,648 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, Scale +from mmcv.runner import force_fp32 + +from mmdet.core import (anchor_inside_flags, bbox_overlaps, build_assigner, + build_sampler, images_to_levels, multi_apply, + reduce_mean, unmap) +from mmdet.core.utils import filter_scores_and_topk +from ..builder import HEADS, build_loss +from .anchor_head import AnchorHead + + +class Integral(nn.Module): + """A fixed layer for calculating integral result from distribution. + + This layer calculates the target location by :math: `sum{P(y_i) * y_i}`, + P(y_i) denotes the softmax vector that represents the discrete distribution + y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max} + + Args: + reg_max (int): The maximal value of the discrete set. Default: 16. You + may want to reset it according to your new dataset or related + settings. + """ + + def __init__(self, reg_max=16): + super(Integral, self).__init__() + self.reg_max = reg_max + self.register_buffer('project', + torch.linspace(0, self.reg_max, self.reg_max + 1)) + + def forward(self, x): + """Forward feature from the regression head to get integral result of + bounding box location. + + Args: + x (Tensor): Features of the regression head, shape (N, 4*(n+1)), + n is self.reg_max. + + Returns: + x (Tensor): Integral result of box locations, i.e., distance + offsets from the box center in four directions, shape (N, 4). + """ + x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) + x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) + return x + + +@HEADS.register_module() +class GFLHead(AnchorHead): + """Generalized Focal Loss: Learning Qualified and Distributed Bounding + Boxes for Dense Object Detection. + + GFL head structure is similar with ATSS, however GFL uses + 1) joint representation for classification and localization quality, and + 2) flexible General distribution for bounding box locations, + which are supervised by + Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively + + https://arxiv.org/abs/2006.04388 + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + stacked_convs (int): Number of conv layers in cls and reg tower. + Default: 4. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='GN', num_groups=32, requires_grad=True). + loss_qfl (dict): Config of Quality Focal Loss (QFL). + bbox_coder (dict): Config of bbox coder. Defaults + 'DistancePointBBoxCoder'. + reg_max (int): Max value of integral set :math: `{0, ..., reg_max}` + in QFL setting. Default: 16. + init_cfg (dict or list[dict], optional): Initialization config dict. + Example: + >>> self = GFLHead(11, 7) + >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] + >>> cls_quality_score, bbox_pred = self.forward(feats) + >>> assert len(cls_quality_score) == len(self.scales) + """ + + def __init__(self, + num_classes, + in_channels, + stacked_convs=4, + conv_cfg=None, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), + bbox_coder=dict(type='DistancePointBBoxCoder'), + reg_max=16, + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', + name='gfl_cls', + std=0.01, + bias_prob=0.01)), + **kwargs): + self.stacked_convs = stacked_convs + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.reg_max = reg_max + super(GFLHead, self).__init__( + num_classes, + in_channels, + bbox_coder=bbox_coder, + init_cfg=init_cfg, + **kwargs) + + self.sampling = False + if self.train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + # SSD sampling=False so use PseudoSampler + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + + self.integral = Integral(self.reg_max) + self.loss_dfl = build_loss(loss_dfl) + + def _init_layers(self): + """Initialize layers of the head.""" + self.relu = nn.ReLU(inplace=True) + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + assert self.num_anchors == 1, 'anchor free version' + self.gfl_cls = nn.Conv2d( + self.feat_channels, self.cls_out_channels, 3, padding=1) + self.gfl_reg = nn.Conv2d( + self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1) + self.scales = nn.ModuleList( + [Scale(1.0) for _ in self.prior_generator.strides]) + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: Usually a tuple of classification scores and bbox prediction + cls_scores (list[Tensor]): Classification and quality (IoU) + joint scores for all scale levels, each is a 4D-tensor, + the channel number is num_classes. + bbox_preds (list[Tensor]): Box distribution logits for all + scale levels, each is a 4D-tensor, the channel number is + 4*(n+1), n is max value of integral set. + """ + return multi_apply(self.forward_single, feats, self.scales) + + def forward_single(self, x, scale): + """Forward feature of a single scale level. + + Args: + x (Tensor): Features of a single scale level. + scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize + the bbox prediction. + + Returns: + tuple: + cls_score (Tensor): Cls and quality joint scores for a single + scale level the channel number is num_classes. + bbox_pred (Tensor): Box distribution logits for a single scale + level, the channel number is 4*(n+1), n is max value of + integral set. + """ + cls_feat = x + reg_feat = x + for cls_conv in self.cls_convs: + cls_feat = cls_conv(cls_feat) + for reg_conv in self.reg_convs: + reg_feat = reg_conv(reg_feat) + cls_score = self.gfl_cls(cls_feat) + bbox_pred = scale(self.gfl_reg(reg_feat)).float() + return cls_score, bbox_pred + + def anchor_center(self, anchors): + """Get anchor centers from anchors. + + Args: + anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format. + + Returns: + Tensor: Anchor centers with shape (N, 2), "xy" format. + """ + anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2 + anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2 + return torch.stack([anchors_cx, anchors_cy], dim=-1) + + def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, + bbox_targets, stride, num_total_samples): + """Compute loss of a single scale level. + + Args: + anchors (Tensor): Box reference for each scale level with shape + (N, num_total_anchors, 4). + cls_score (Tensor): Cls and quality joint scores for each scale + level has shape (N, num_classes, H, W). + bbox_pred (Tensor): Box distribution logits for each scale + level with shape (N, 4*(n+1), H, W), n is max value of integral + set. + labels (Tensor): Labels of each anchors with shape + (N, num_total_anchors). + label_weights (Tensor): Label weights of each anchor with shape + (N, num_total_anchors) + bbox_targets (Tensor): BBox regression targets of each anchor + weight shape (N, num_total_anchors, 4). + stride (tuple): Stride in this scale level. + num_total_samples (int): Number of positive samples that is + reduced over all GPUs. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert stride[0] == stride[1], 'h stride is not equal to w stride!' + anchors = anchors.reshape(-1, 4) + cls_score = cls_score.permute(0, 2, 3, + 1).reshape(-1, self.cls_out_channels) + bbox_pred = bbox_pred.permute(0, 2, 3, + 1).reshape(-1, 4 * (self.reg_max + 1)) + bbox_targets = bbox_targets.reshape(-1, 4) + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = self.num_classes + pos_inds = ((labels >= 0) + & (labels < bg_class_ind)).nonzero().squeeze(1) + score = label_weights.new_zeros(labels.shape) + + if len(pos_inds) > 0: + pos_bbox_targets = bbox_targets[pos_inds] + pos_bbox_pred = bbox_pred[pos_inds] + pos_anchors = anchors[pos_inds] + pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] + + weight_targets = cls_score.detach().sigmoid() + weight_targets = weight_targets.max(dim=1)[0][pos_inds] + pos_bbox_pred_corners = self.integral(pos_bbox_pred) + pos_decode_bbox_pred = self.bbox_coder.decode( + pos_anchor_centers, pos_bbox_pred_corners) + pos_decode_bbox_targets = pos_bbox_targets / stride[0] + score[pos_inds] = bbox_overlaps( + pos_decode_bbox_pred.detach(), + pos_decode_bbox_targets, + is_aligned=True) + pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) + target_corners = self.bbox_coder.encode(pos_anchor_centers, + pos_decode_bbox_targets, + self.reg_max).reshape(-1) + + # regression loss + loss_bbox = self.loss_bbox( + pos_decode_bbox_pred, + pos_decode_bbox_targets, + weight=weight_targets, + avg_factor=1.0) + + # dfl loss + loss_dfl = self.loss_dfl( + pred_corners, + target_corners, + weight=weight_targets[:, None].expand(-1, 4).reshape(-1), + avg_factor=4.0) + else: + loss_bbox = bbox_pred.sum() * 0 + loss_dfl = bbox_pred.sum() * 0 + weight_targets = bbox_pred.new_tensor(0) + + # cls (qfl) loss + loss_cls = self.loss_cls( + cls_score, (labels, score), + weight=label_weights, + avg_factor=num_total_samples) + + return loss_cls, loss_bbox, loss_dfl, weight_targets.sum() + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Cls and quality scores for each scale + level has shape (N, num_classes, H, W). + bbox_preds (list[Tensor]): Box distribution logits for each scale + level with shape (N, 4*(n+1), H, W), n is max value of integral + set. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (list[Tensor] | None): specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + device = cls_scores[0].device + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels) + if cls_reg_targets is None: + return None + + (anchor_list, labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets + + num_total_samples = reduce_mean( + torch.tensor(num_total_pos, dtype=torch.float, + device=device)).item() + num_total_samples = max(num_total_samples, 1.0) + + losses_cls, losses_bbox, losses_dfl,\ + avg_factor = multi_apply( + self.loss_single, + anchor_list, + cls_scores, + bbox_preds, + labels_list, + label_weights_list, + bbox_targets_list, + self.prior_generator.strides, + num_total_samples=num_total_samples) + + avg_factor = sum(avg_factor) + avg_factor = reduce_mean(avg_factor).clamp_(min=1).item() + losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox)) + losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl)) + return dict( + loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl) + + def _get_bboxes_single(self, + cls_score_list, + bbox_pred_list, + score_factor_list, + mlvl_priors, + img_meta, + cfg, + rescale=False, + with_nms=True, + **kwargs): + """Transform outputs of a single image into bbox predictions. + + Args: + cls_score_list (list[Tensor]): Box scores from all scale + levels of a single image, each item has shape + (num_priors * num_classes, H, W). + bbox_pred_list (list[Tensor]): Box energies / deltas from + all scale levels of a single image, each item has shape + (num_priors * 4, H, W). + score_factor_list (list[Tensor]): Score factor from all scale + levels of a single image. GFL head does not need this value. + mlvl_priors (list[Tensor]): Each element in the list is + the priors of a single level in feature pyramid, has shape + (num_priors, 4). + img_meta (dict): Image meta info. + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + + Returns: + tuple[Tensor]: Results of detected bboxes and labels. If with_nms + is False and mlvl_score_factor is None, return mlvl_bboxes and + mlvl_scores, else return mlvl_bboxes, mlvl_scores and + mlvl_score_factor. Usually with_nms is False is used for aug + test. If with_nms is True, then return the following format + + - det_bboxes (Tensor): Predicted bboxes with shape \ + [num_bboxes, 5], where the first 4 columns are bounding \ + box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ + column are scores between 0 and 1. + - det_labels (Tensor): Predicted labels of the corresponding \ + box with shape [num_bboxes]. + """ + cfg = self.test_cfg if cfg is None else cfg + img_shape = img_meta['img_shape'] + nms_pre = cfg.get('nms_pre', -1) + + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_labels = [] + for level_idx, (cls_score, bbox_pred, stride, priors) in enumerate( + zip(cls_score_list, bbox_pred_list, + self.prior_generator.strides, mlvl_priors)): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + assert stride[0] == stride[1] + + bbox_pred = bbox_pred.permute(1, 2, 0) + bbox_pred = self.integral(bbox_pred) * stride[0] + + scores = cls_score.permute(1, 2, 0).reshape( + -1, self.cls_out_channels).sigmoid() + + # After https://github.com/open-mmlab/mmdetection/pull/6268/, + # this operation keeps fewer bboxes under the same `nms_pre`. + # There is no difference in performance for most models. If you + # find a slight drop in performance, you can set a larger + # `nms_pre` than before. + results = filter_scores_and_topk( + scores, cfg.score_thr, nms_pre, + dict(bbox_pred=bbox_pred, priors=priors)) + scores, labels, _, filtered_results = results + + bbox_pred = filtered_results['bbox_pred'] + priors = filtered_results['priors'] + + bboxes = self.bbox_coder.decode( + self.anchor_center(priors), bbox_pred, max_shape=img_shape) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_labels.append(labels) + + return self._bbox_post_process( + mlvl_scores, + mlvl_labels, + mlvl_bboxes, + img_meta['scale_factor'], + cfg, + rescale=rescale, + with_nms=with_nms) + + def get_targets(self, + anchor_list, + valid_flag_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + unmap_outputs=True): + """Get targets for GFL head. + + This method is almost the same as `AnchorHead.get_targets()`. Besides + returning the targets as the parent method does, it also returns the + anchors as the first element of the returned tuple. + """ + num_imgs = len(img_metas) + assert len(anchor_list) == len(valid_flag_list) == num_imgs + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + num_level_anchors_list = [num_level_anchors] * num_imgs + + # concat all level anchors and flags to a single tensor + for i in range(num_imgs): + assert len(anchor_list[i]) == len(valid_flag_list[i]) + anchor_list[i] = torch.cat(anchor_list[i]) + valid_flag_list[i] = torch.cat(valid_flag_list[i]) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + if gt_labels_list is None: + gt_labels_list = [None for _ in range(num_imgs)] + (all_anchors, all_labels, all_label_weights, all_bbox_targets, + all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( + self._get_target_single, + anchor_list, + valid_flag_list, + num_level_anchors_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + img_metas, + label_channels=label_channels, + unmap_outputs=unmap_outputs) + # no valid anchors + if any([labels is None for labels in all_labels]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + anchors_list = images_to_levels(all_anchors, num_level_anchors) + labels_list = images_to_levels(all_labels, num_level_anchors) + label_weights_list = images_to_levels(all_label_weights, + num_level_anchors) + bbox_targets_list = images_to_levels(all_bbox_targets, + num_level_anchors) + bbox_weights_list = images_to_levels(all_bbox_weights, + num_level_anchors) + return (anchors_list, labels_list, label_weights_list, + bbox_targets_list, bbox_weights_list, num_total_pos, + num_total_neg) + + def _get_target_single(self, + flat_anchors, + valid_flags, + num_level_anchors, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + label_channels=1, + unmap_outputs=True): + """Compute regression, classification targets for anchors in a single + image. + + Args: + flat_anchors (Tensor): Multi-level anchors of the image, which are + concatenated into a single tensor of shape (num_anchors, 4) + valid_flags (Tensor): Multi level valid flags of the image, + which are concatenated into a single tensor of + shape (num_anchors,). + num_level_anchors Tensor): Number of anchors of each scale level. + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + img_meta (dict): Meta info of the image. + label_channels (int): Channel of label. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. + + Returns: + tuple: N is the number of total anchors in the image. + anchors (Tensor): All anchors in the image with shape (N, 4). + labels (Tensor): Labels of all anchors in the image with shape + (N,). + label_weights (Tensor): Label weights of all anchor in the + image with shape (N,). + bbox_targets (Tensor): BBox targets of all anchors in the + image with shape (N, 4). + bbox_weights (Tensor): BBox weights of all anchors in the + image with shape (N, 4). + pos_inds (Tensor): Indices of positive anchor with shape + (num_pos,). + neg_inds (Tensor): Indices of negative anchor with shape + (num_neg,). + """ + inside_flags = anchor_inside_flags(flat_anchors, valid_flags, + img_meta['img_shape'][:2], + self.train_cfg.allowed_border) + if not inside_flags.any(): + return (None, ) * 7 + # assign gt and sample anchors + anchors = flat_anchors[inside_flags, :] + + num_level_anchors_inside = self.get_num_level_anchors_inside( + num_level_anchors, inside_flags) + assign_result = self.assigner.assign(anchors, num_level_anchors_inside, + gt_bboxes, gt_bboxes_ignore, + gt_labels) + + sampling_result = self.sampler.sample(assign_result, anchors, + gt_bboxes) + + num_valid_anchors = anchors.shape[0] + bbox_targets = torch.zeros_like(anchors) + bbox_weights = torch.zeros_like(anchors) + labels = anchors.new_full((num_valid_anchors, ), + self.num_classes, + dtype=torch.long) + label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + pos_bbox_targets = sampling_result.pos_gt_bboxes + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + if gt_labels is None: + # Only rpn gives gt_labels as None + # Foreground is the first class + labels[pos_inds] = 0 + else: + labels[pos_inds] = gt_labels[ + sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_anchors.size(0) + anchors = unmap(anchors, num_total_anchors, inside_flags) + labels = unmap( + labels, num_total_anchors, inside_flags, fill=self.num_classes) + label_weights = unmap(label_weights, num_total_anchors, + inside_flags) + bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) + bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) + + return (anchors, labels, label_weights, bbox_targets, bbox_weights, + pos_inds, neg_inds) + + def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): + split_inside_flags = torch.split(inside_flags, num_level_anchors) + num_level_anchors_inside = [ + int(flags.sum()) for flags in split_inside_flags + ] + return num_level_anchors_inside diff --git a/downstream/mmdetection/mmdet/models/dense_heads/guided_anchor_head.py b/downstream/mmdetection/mmdet/models/dense_heads/guided_anchor_head.py new file mode 100644 index 0000000..53e8cd8 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/guided_anchor_head.py @@ -0,0 +1,868 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn +from mmcv.ops import DeformConv2d, MaskedConv2d +from mmcv.runner import BaseModule, force_fp32 + +from mmdet.core import (anchor_inside_flags, build_assigner, build_bbox_coder, + build_prior_generator, build_sampler, calc_region, + images_to_levels, multi_apply, multiclass_nms, unmap) +from ..builder import HEADS, build_loss +from .anchor_head import AnchorHead + + +class FeatureAdaption(BaseModule): + """Feature Adaption Module. + + Feature Adaption Module is implemented based on DCN v1. + It uses anchor shape prediction rather than feature map to + predict offsets of deform conv layer. + + Args: + in_channels (int): Number of channels in the input feature map. + out_channels (int): Number of channels in the output feature map. + kernel_size (int): Deformable conv kernel size. + deform_groups (int): Deformable conv group size. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + deform_groups=4, + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.1, + override=dict( + type='Normal', name='conv_adaption', std=0.01))): + super(FeatureAdaption, self).__init__(init_cfg) + offset_channels = kernel_size * kernel_size * 2 + self.conv_offset = nn.Conv2d( + 2, deform_groups * offset_channels, 1, bias=False) + self.conv_adaption = DeformConv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + padding=(kernel_size - 1) // 2, + deform_groups=deform_groups) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shape): + offset = self.conv_offset(shape.detach()) + x = self.relu(self.conv_adaption(x, offset)) + return x + + +@HEADS.register_module() +class GuidedAnchorHead(AnchorHead): + """Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.). + + This GuidedAnchorHead will predict high-quality feature guided + anchors and locations where anchors will be kept in inference. + There are mainly 3 categories of bounding-boxes. + + - Sampled 9 pairs for target assignment. (approxes) + - The square boxes where the predicted anchors are based on. (squares) + - Guided anchors. + + Please refer to https://arxiv.org/abs/1901.03278 for more details. + + Args: + num_classes (int): Number of classes. + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of hidden channels. + approx_anchor_generator (dict): Config dict for approx generator + square_anchor_generator (dict): Config dict for square generator + anchor_coder (dict): Config dict for anchor coder + bbox_coder (dict): Config dict for bbox coder + reg_decoded_bbox (bool): If true, the regression loss would be + applied directly on decoded bounding boxes, converting both + the predicted boxes and regression targets to absolute + coordinates format. Default False. It should be `True` when + using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. + deform_groups: (int): Group number of DCN in + FeatureAdaption module. + loc_filter_thr (float): Threshold to filter out unconcerned regions. + loss_loc (dict): Config of location loss. + loss_shape (dict): Config of anchor shape loss. + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of bbox regression loss. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__( + self, + num_classes, + in_channels, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=8, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[8], + strides=[4, 8, 16, 32, 64]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0] + ), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0] + ), + reg_decoded_bbox=False, + deform_groups=4, + loc_filter_thr=0.01, + train_cfg=None, + test_cfg=None, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0), + init_cfg=dict(type='Normal', layer='Conv2d', std=0.01, + override=dict(type='Normal', + name='conv_loc', + std=0.01, + bias_prob=0.01))): # yapf: disable + super(AnchorHead, self).__init__(init_cfg) + self.in_channels = in_channels + self.num_classes = num_classes + self.feat_channels = feat_channels + self.deform_groups = deform_groups + self.loc_filter_thr = loc_filter_thr + + # build approx_anchor_generator and square_anchor_generator + assert (approx_anchor_generator['octave_base_scale'] == + square_anchor_generator['scales'][0]) + assert (approx_anchor_generator['strides'] == + square_anchor_generator['strides']) + self.approx_anchor_generator = build_prior_generator( + approx_anchor_generator) + self.square_anchor_generator = build_prior_generator( + square_anchor_generator) + self.approxs_per_octave = self.approx_anchor_generator \ + .num_base_priors[0] + + self.reg_decoded_bbox = reg_decoded_bbox + + # one anchor per location + self.num_base_priors = self.square_anchor_generator.num_base_priors[0] + + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + self.loc_focal_loss = loss_loc['type'] in ['FocalLoss'] + self.sampling = loss_cls['type'] not in ['FocalLoss'] + self.ga_sampling = train_cfg is not None and hasattr( + train_cfg, 'ga_sampler') + if self.use_sigmoid_cls: + self.cls_out_channels = self.num_classes + else: + self.cls_out_channels = self.num_classes + 1 + + # build bbox_coder + self.anchor_coder = build_bbox_coder(anchor_coder) + self.bbox_coder = build_bbox_coder(bbox_coder) + + # build losses + self.loss_loc = build_loss(loss_loc) + self.loss_shape = build_loss(loss_shape) + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + if self.train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + # use PseudoSampler when sampling is False + if self.sampling and hasattr(self.train_cfg, 'sampler'): + sampler_cfg = self.train_cfg.sampler + else: + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + + self.ga_assigner = build_assigner(self.train_cfg.ga_assigner) + if self.ga_sampling: + ga_sampler_cfg = self.train_cfg.ga_sampler + else: + ga_sampler_cfg = dict(type='PseudoSampler') + self.ga_sampler = build_sampler(ga_sampler_cfg, context=self) + + self.fp16_enabled = False + + self._init_layers() + + @property + def num_anchors(self): + warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' + 'please use "num_base_priors" instead') + return self.square_anchor_generator.num_base_priors[0] + + def _init_layers(self): + self.relu = nn.ReLU(inplace=True) + self.conv_loc = nn.Conv2d(self.in_channels, 1, 1) + self.conv_shape = nn.Conv2d(self.in_channels, self.num_base_priors * 2, + 1) + self.feature_adaption = FeatureAdaption( + self.in_channels, + self.feat_channels, + kernel_size=3, + deform_groups=self.deform_groups) + self.conv_cls = MaskedConv2d( + self.feat_channels, self.num_base_priors * self.cls_out_channels, + 1) + self.conv_reg = MaskedConv2d(self.feat_channels, + self.num_base_priors * 4, 1) + + def forward_single(self, x): + loc_pred = self.conv_loc(x) + shape_pred = self.conv_shape(x) + x = self.feature_adaption(x, shape_pred) + # masked conv is only used during inference for speed-up + if not self.training: + mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr + else: + mask = None + cls_score = self.conv_cls(x, mask) + bbox_pred = self.conv_reg(x, mask) + return cls_score, bbox_pred, shape_pred, loc_pred + + def forward(self, feats): + return multi_apply(self.forward_single, feats) + + def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'): + """Get sampled approxs and inside flags according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + img_metas (list[dict]): Image meta info. + device (torch.device | str): device for returned tensors + + Returns: + tuple: approxes of each image, inside flags of each image + """ + num_imgs = len(img_metas) + + # since feature map sizes of all images are the same, we only compute + # approxes for one time + multi_level_approxs = self.approx_anchor_generator.grid_priors( + featmap_sizes, device=device) + approxs_list = [multi_level_approxs for _ in range(num_imgs)] + + # for each image, we compute inside flags of multi level approxes + inside_flag_list = [] + for img_id, img_meta in enumerate(img_metas): + multi_level_flags = [] + multi_level_approxs = approxs_list[img_id] + + # obtain valid flags for each approx first + multi_level_approx_flags = self.approx_anchor_generator \ + .valid_flags(featmap_sizes, + img_meta['pad_shape'], + device=device) + + for i, flags in enumerate(multi_level_approx_flags): + approxs = multi_level_approxs[i] + inside_flags_list = [] + for i in range(self.approxs_per_octave): + split_valid_flags = flags[i::self.approxs_per_octave] + split_approxs = approxs[i::self.approxs_per_octave, :] + inside_flags = anchor_inside_flags( + split_approxs, split_valid_flags, + img_meta['img_shape'][:2], + self.train_cfg.allowed_border) + inside_flags_list.append(inside_flags) + # inside_flag for a position is true if any anchor in this + # position is true + inside_flags = ( + torch.stack(inside_flags_list, 0).sum(dim=0) > 0) + multi_level_flags.append(inside_flags) + inside_flag_list.append(multi_level_flags) + return approxs_list, inside_flag_list + + def get_anchors(self, + featmap_sizes, + shape_preds, + loc_preds, + img_metas, + use_loc_filter=False, + device='cuda'): + """Get squares according to feature map sizes and guided anchors. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + shape_preds (list[tensor]): Multi-level shape predictions. + loc_preds (list[tensor]): Multi-level location predictions. + img_metas (list[dict]): Image meta info. + use_loc_filter (bool): Use loc filter or not. + device (torch.device | str): device for returned tensors + + Returns: + tuple: square approxs of each image, guided anchors of each image, + loc masks of each image + """ + num_imgs = len(img_metas) + num_levels = len(featmap_sizes) + + # since feature map sizes of all images are the same, we only compute + # squares for one time + multi_level_squares = self.square_anchor_generator.grid_priors( + featmap_sizes, device=device) + squares_list = [multi_level_squares for _ in range(num_imgs)] + + # for each image, we compute multi level guided anchors + guided_anchors_list = [] + loc_mask_list = [] + for img_id, img_meta in enumerate(img_metas): + multi_level_guided_anchors = [] + multi_level_loc_mask = [] + for i in range(num_levels): + squares = squares_list[img_id][i] + shape_pred = shape_preds[i][img_id] + loc_pred = loc_preds[i][img_id] + guided_anchors, loc_mask = self._get_guided_anchors_single( + squares, + shape_pred, + loc_pred, + use_loc_filter=use_loc_filter) + multi_level_guided_anchors.append(guided_anchors) + multi_level_loc_mask.append(loc_mask) + guided_anchors_list.append(multi_level_guided_anchors) + loc_mask_list.append(multi_level_loc_mask) + return squares_list, guided_anchors_list, loc_mask_list + + def _get_guided_anchors_single(self, + squares, + shape_pred, + loc_pred, + use_loc_filter=False): + """Get guided anchors and loc masks for a single level. + + Args: + square (tensor): Squares of a single level. + shape_pred (tensor): Shape predictions of a single level. + loc_pred (tensor): Loc predictions of a single level. + use_loc_filter (list[tensor]): Use loc filter or not. + + Returns: + tuple: guided anchors, location masks + """ + # calculate location filtering mask + loc_pred = loc_pred.sigmoid().detach() + if use_loc_filter: + loc_mask = loc_pred >= self.loc_filter_thr + else: + loc_mask = loc_pred >= 0.0 + mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_base_priors) + mask = mask.contiguous().view(-1) + # calculate guided anchors + squares = squares[mask] + anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view( + -1, 2).detach()[mask] + bbox_deltas = anchor_deltas.new_full(squares.size(), 0) + bbox_deltas[:, 2:] = anchor_deltas + guided_anchors = self.anchor_coder.decode( + squares, bbox_deltas, wh_ratio_clip=1e-6) + return guided_anchors, mask + + def ga_loc_targets(self, gt_bboxes_list, featmap_sizes): + """Compute location targets for guided anchoring. + + Each feature map is divided into positive, negative and ignore regions. + - positive regions: target 1, weight 1 + - ignore regions: target 0, weight 0 + - negative regions: target 0, weight 0.1 + + Args: + gt_bboxes_list (list[Tensor]): Gt bboxes of each image. + featmap_sizes (list[tuple]): Multi level sizes of each feature + maps. + + Returns: + tuple + """ + anchor_scale = self.approx_anchor_generator.octave_base_scale + anchor_strides = self.approx_anchor_generator.strides + # Currently only supports same stride in x and y direction. + for stride in anchor_strides: + assert (stride[0] == stride[1]) + anchor_strides = [stride[0] for stride in anchor_strides] + + center_ratio = self.train_cfg.center_ratio + ignore_ratio = self.train_cfg.ignore_ratio + img_per_gpu = len(gt_bboxes_list) + num_lvls = len(featmap_sizes) + r1 = (1 - center_ratio) / 2 + r2 = (1 - ignore_ratio) / 2 + all_loc_targets = [] + all_loc_weights = [] + all_ignore_map = [] + for lvl_id in range(num_lvls): + h, w = featmap_sizes[lvl_id] + loc_targets = torch.zeros( + img_per_gpu, + 1, + h, + w, + device=gt_bboxes_list[0].device, + dtype=torch.float32) + loc_weights = torch.full_like(loc_targets, -1) + ignore_map = torch.zeros_like(loc_targets) + all_loc_targets.append(loc_targets) + all_loc_weights.append(loc_weights) + all_ignore_map.append(ignore_map) + for img_id in range(img_per_gpu): + gt_bboxes = gt_bboxes_list[img_id] + scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * + (gt_bboxes[:, 3] - gt_bboxes[:, 1])) + min_anchor_size = scale.new_full( + (1, ), float(anchor_scale * anchor_strides[0])) + # assign gt bboxes to different feature levels w.r.t. their scales + target_lvls = torch.floor( + torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) + target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() + for gt_id in range(gt_bboxes.size(0)): + lvl = target_lvls[gt_id].item() + # rescaled to corresponding feature map + gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl] + # calculate ignore regions + ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( + gt_, r2, featmap_sizes[lvl]) + # calculate positive (center) regions + ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region( + gt_, r1, featmap_sizes[lvl]) + all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, + ctr_x1:ctr_x2 + 1] = 1 + all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1, + ignore_x1:ignore_x2 + 1] = 0 + all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, + ctr_x1:ctr_x2 + 1] = 1 + # calculate ignore map on nearby low level feature + if lvl > 0: + d_lvl = lvl - 1 + # rescaled to corresponding feature map + gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl] + ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( + gt_, r2, featmap_sizes[d_lvl]) + all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, + ignore_x1:ignore_x2 + 1] = 1 + # calculate ignore map on nearby high level feature + if lvl < num_lvls - 1: + u_lvl = lvl + 1 + # rescaled to corresponding feature map + gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl] + ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( + gt_, r2, featmap_sizes[u_lvl]) + all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, + ignore_x1:ignore_x2 + 1] = 1 + for lvl_id in range(num_lvls): + # ignore negative regions w.r.t. ignore map + all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) + & (all_ignore_map[lvl_id] > 0)] = 0 + # set negative regions with weight 0.1 + all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1 + # loc average factor to balance loss + loc_avg_factor = sum( + [t.size(0) * t.size(-1) * t.size(-2) + for t in all_loc_targets]) / 200 + return all_loc_targets, all_loc_weights, loc_avg_factor + + def _ga_shape_target_single(self, + flat_approxs, + inside_flags, + flat_squares, + gt_bboxes, + gt_bboxes_ignore, + img_meta, + unmap_outputs=True): + """Compute guided anchoring targets. + + This function returns sampled anchors and gt bboxes directly + rather than calculates regression targets. + + Args: + flat_approxs (Tensor): flat approxs of a single image, + shape (n, 4) + inside_flags (Tensor): inside flags of a single image, + shape (n, ). + flat_squares (Tensor): flat squares of a single image, + shape (approxs_per_octave * n, 4) + gt_bboxes (Tensor): Ground truth bboxes of a single image. + img_meta (dict): Meta info of a single image. + approxs_per_octave (int): number of approxs per octave + cfg (dict): RPN train configs. + unmap_outputs (bool): unmap outputs or not. + + Returns: + tuple + """ + if not inside_flags.any(): + return (None, ) * 5 + # assign gt and sample anchors + expand_inside_flags = inside_flags[:, None].expand( + -1, self.approxs_per_octave).reshape(-1) + approxs = flat_approxs[expand_inside_flags, :] + squares = flat_squares[inside_flags, :] + + assign_result = self.ga_assigner.assign(approxs, squares, + self.approxs_per_octave, + gt_bboxes, gt_bboxes_ignore) + sampling_result = self.ga_sampler.sample(assign_result, squares, + gt_bboxes) + + bbox_anchors = torch.zeros_like(squares) + bbox_gts = torch.zeros_like(squares) + bbox_weights = torch.zeros_like(squares) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes + bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes + bbox_weights[pos_inds, :] = 1.0 + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_squares.size(0) + bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags) + bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags) + bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) + + return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds) + + def ga_shape_targets(self, + approx_list, + inside_flag_list, + square_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + unmap_outputs=True): + """Compute guided anchoring targets. + + Args: + approx_list (list[list]): Multi level approxs of each image. + inside_flag_list (list[list]): Multi level inside flags of each + image. + square_list (list[list]): Multi level squares of each image. + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. + img_metas (list[dict]): Meta info of each image. + gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. + unmap_outputs (bool): unmap outputs or not. + + Returns: + tuple + """ + num_imgs = len(img_metas) + assert len(approx_list) == len(inside_flag_list) == len( + square_list) == num_imgs + # anchor number of multi levels + num_level_squares = [squares.size(0) for squares in square_list[0]] + # concat all level anchors and flags to a single tensor + inside_flag_flat_list = [] + approx_flat_list = [] + square_flat_list = [] + for i in range(num_imgs): + assert len(square_list[i]) == len(inside_flag_list[i]) + inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) + approx_flat_list.append(torch.cat(approx_list[i])) + square_flat_list.append(torch.cat(square_list[i])) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list, + neg_inds_list) = multi_apply( + self._ga_shape_target_single, + approx_flat_list, + inside_flag_flat_list, + square_flat_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + img_metas, + unmap_outputs=unmap_outputs) + # no valid anchors + if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + bbox_anchors_list = images_to_levels(all_bbox_anchors, + num_level_squares) + bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares) + bbox_weights_list = images_to_levels(all_bbox_weights, + num_level_squares) + return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, + num_total_pos, num_total_neg) + + def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts, + anchor_weights, anchor_total_num): + shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2) + bbox_anchors = bbox_anchors.contiguous().view(-1, 4) + bbox_gts = bbox_gts.contiguous().view(-1, 4) + anchor_weights = anchor_weights.contiguous().view(-1, 4) + bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0) + bbox_deltas[:, 2:] += shape_pred + # filter out negative samples to speed-up weighted_bounded_iou_loss + inds = torch.nonzero( + anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1) + bbox_deltas_ = bbox_deltas[inds] + bbox_anchors_ = bbox_anchors[inds] + bbox_gts_ = bbox_gts[inds] + anchor_weights_ = anchor_weights[inds] + pred_anchors_ = self.anchor_coder.decode( + bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6) + loss_shape = self.loss_shape( + pred_anchors_, + bbox_gts_, + anchor_weights_, + avg_factor=anchor_total_num) + return loss_shape + + def loss_loc_single(self, loc_pred, loc_target, loc_weight, + loc_avg_factor): + loss_loc = self.loss_loc( + loc_pred.reshape(-1, 1), + loc_target.reshape(-1).long(), + loc_weight.reshape(-1), + avg_factor=loc_avg_factor) + return loss_loc + + @force_fp32( + apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) + def loss(self, + cls_scores, + bbox_preds, + shape_preds, + loc_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.approx_anchor_generator.num_levels + + device = cls_scores[0].device + + # get loc targets + loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets( + gt_bboxes, featmap_sizes) + + # get sampled approxes + approxs_list, inside_flag_list = self.get_sampled_approxs( + featmap_sizes, img_metas, device=device) + # get squares and guided anchors + squares_list, guided_anchors_list, _ = self.get_anchors( + featmap_sizes, shape_preds, loc_preds, img_metas, device=device) + + # get shape targets + shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list, + squares_list, gt_bboxes, + img_metas) + if shape_targets is None: + return None + (bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num, + anchor_bg_num) = shape_targets + anchor_total_num = ( + anchor_fg_num if not self.ga_sampling else anchor_fg_num + + anchor_bg_num) + + # get anchor targets + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = self.get_targets( + guided_anchors_list, + inside_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + num_total_samples = ( + num_total_pos + num_total_neg if self.sampling else num_total_pos) + + # anchor number of multi levels + num_level_anchors = [ + anchors.size(0) for anchors in guided_anchors_list[0] + ] + # concat all level anchors to a single tensor + concat_anchor_list = [] + for i in range(len(guided_anchors_list)): + concat_anchor_list.append(torch.cat(guided_anchors_list[i])) + all_anchor_list = images_to_levels(concat_anchor_list, + num_level_anchors) + + # get classification and bbox regression losses + losses_cls, losses_bbox = multi_apply( + self.loss_single, + cls_scores, + bbox_preds, + all_anchor_list, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + num_total_samples=num_total_samples) + + # get anchor location loss + losses_loc = [] + for i in range(len(loc_preds)): + loss_loc = self.loss_loc_single( + loc_preds[i], + loc_targets[i], + loc_weights[i], + loc_avg_factor=loc_avg_factor) + losses_loc.append(loss_loc) + + # get anchor shape loss + losses_shape = [] + for i in range(len(shape_preds)): + loss_shape = self.loss_shape_single( + shape_preds[i], + bbox_anchors_list[i], + bbox_gts_list[i], + anchor_weights_list[i], + anchor_total_num=anchor_total_num) + losses_shape.append(loss_shape) + + return dict( + loss_cls=losses_cls, + loss_bbox=losses_bbox, + loss_shape=losses_shape, + loss_loc=losses_loc) + + @force_fp32( + apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) + def get_bboxes(self, + cls_scores, + bbox_preds, + shape_preds, + loc_preds, + img_metas, + cfg=None, + rescale=False): + assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len( + loc_preds) + num_levels = len(cls_scores) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + device = cls_scores[0].device + # get guided anchors + _, guided_anchors, loc_masks = self.get_anchors( + featmap_sizes, + shape_preds, + loc_preds, + img_metas, + use_loc_filter=not self.training, + device=device) + result_list = [] + for img_id in range(len(img_metas)): + cls_score_list = [ + cls_scores[i][img_id].detach() for i in range(num_levels) + ] + bbox_pred_list = [ + bbox_preds[i][img_id].detach() for i in range(num_levels) + ] + guided_anchor_list = [ + guided_anchors[img_id][i].detach() for i in range(num_levels) + ] + loc_mask_list = [ + loc_masks[img_id][i].detach() for i in range(num_levels) + ] + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, + guided_anchor_list, + loc_mask_list, img_shape, + scale_factor, cfg, rescale) + result_list.append(proposals) + return result_list + + def _get_bboxes_single(self, + cls_scores, + bbox_preds, + mlvl_anchors, + mlvl_masks, + img_shape, + scale_factor, + cfg, + rescale=False): + cfg = self.test_cfg if cfg is None else cfg + assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) + mlvl_bboxes = [] + mlvl_scores = [] + for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds, + mlvl_anchors, + mlvl_masks): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + # if no location is kept, end. + if mask.sum() == 0: + continue + # reshape scores and bbox_pred + cls_score = cls_score.permute(1, 2, + 0).reshape(-1, self.cls_out_channels) + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + scores = cls_score.softmax(-1) + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) + # filter scores, bbox_pred w.r.t. mask. + # anchors are filtered in get_anchors() beforehand. + scores = scores[mask, :] + bbox_pred = bbox_pred[mask, :] + if scores.dim() == 0: + anchors = anchors.unsqueeze(0) + scores = scores.unsqueeze(0) + bbox_pred = bbox_pred.unsqueeze(0) + # filter anchors, bbox_pred, scores w.r.t. scores + nms_pre = cfg.get('nms_pre', -1) + if nms_pre > 0 and scores.shape[0] > nms_pre: + if self.use_sigmoid_cls: + max_scores, _ = scores.max(dim=1) + else: + # remind that we set FG labels to [0, num_class-1] + # since mmdet v2.0 + # BG cat_id: num_class + max_scores, _ = scores[:, :-1].max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + anchors = anchors[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + scores = scores[topk_inds, :] + bboxes = self.bbox_coder.decode( + anchors, bbox_pred, max_shape=img_shape) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_bboxes = torch.cat(mlvl_bboxes) + if rescale: + mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) + mlvl_scores = torch.cat(mlvl_scores) + if self.use_sigmoid_cls: + # Add a dummy background class to the backend when using sigmoid + # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 + # BG cat_id: num_class + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) + # multi class NMS + det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, + cfg.score_thr, cfg.nms, + cfg.max_per_img) + return det_bboxes, det_labels diff --git a/downstream/mmdetection/mmdet/models/dense_heads/lad_head.py b/downstream/mmdetection/mmdet/models/dense_heads/lad_head.py new file mode 100644 index 0000000..85273bc --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/lad_head.py @@ -0,0 +1,232 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.runner import force_fp32 + +from mmdet.core import bbox_overlaps, multi_apply +from ..builder import HEADS +from .paa_head import PAAHead, levels_to_images + + +@HEADS.register_module() +class LADHead(PAAHead): + """Label Assignment Head from the paper: `Improving Object Detection by + Label Assignment Distillation `_""" + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) + def get_label_assignment(self, + cls_scores, + bbox_preds, + iou_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Get label assignment (from teacher). + + Args: + cls_scores (list[Tensor]): Box scores for each scale level. + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + iou_preds (list[Tensor]): iou_preds for each scale + level with shape (N, num_anchors * 1, H, W) + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (list[Tensor] | None): Specify which bounding + boxes can be ignored when are computing the loss. + + Returns: + tuple: Returns a tuple containing label assignment variables. + + - labels (Tensor): Labels of all anchors, each with + shape (num_anchors,). + - labels_weight (Tensor): Label weights of all anchor. + each with shape (num_anchors,). + - bboxes_target (Tensor): BBox targets of all anchors. + each with shape (num_anchors, 4). + - bboxes_weight (Tensor): BBox weights of all anchors. + each with shape (num_anchors, 4). + - pos_inds_flatten (Tensor): Contains all index of positive + sample in all anchor. + - pos_anchors (Tensor): Positive anchors. + - num_pos (int): Number of positive anchors. + """ + + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + device = cls_scores[0].device + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels, + ) + (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, + pos_gt_index) = cls_reg_targets + cls_scores = levels_to_images(cls_scores) + cls_scores = [ + item.reshape(-1, self.cls_out_channels) for item in cls_scores + ] + bbox_preds = levels_to_images(bbox_preds) + bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] + pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, + cls_scores, bbox_preds, labels, + labels_weight, bboxes_target, + bboxes_weight, pos_inds) + + with torch.no_grad(): + reassign_labels, reassign_label_weight, \ + reassign_bbox_weights, num_pos = multi_apply( + self.paa_reassign, + pos_losses_list, + labels, + labels_weight, + bboxes_weight, + pos_inds, + pos_gt_index, + anchor_list) + num_pos = sum(num_pos) + # convert all tensor list to a flatten tensor + labels = torch.cat(reassign_labels, 0).view(-1) + flatten_anchors = torch.cat( + [torch.cat(item, 0) for item in anchor_list]) + labels_weight = torch.cat(reassign_label_weight, 0).view(-1) + bboxes_target = torch.cat(bboxes_target, + 0).view(-1, bboxes_target[0].size(-1)) + + pos_inds_flatten = ((labels >= 0) + & + (labels < self.num_classes)).nonzero().reshape(-1) + + if num_pos: + pos_anchors = flatten_anchors[pos_inds_flatten] + else: + pos_anchors = None + + label_assignment_results = (labels, labels_weight, bboxes_target, + bboxes_weight, pos_inds_flatten, + pos_anchors, num_pos) + return label_assignment_results + + def forward_train(self, + x, + label_assignment_results, + img_metas, + gt_bboxes, + gt_labels=None, + gt_bboxes_ignore=None, + **kwargs): + """Forward train with the available label assignment (student receives + from teacher). + + Args: + x (list[Tensor]): Features from FPN. + label_assignment_results (tuple): As the outputs defined in the + function `self.get_label_assignment`. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + + Returns: + losses: (dict[str, Tensor]): A dictionary of loss components. + """ + outs = self(x) + if gt_labels is None: + loss_inputs = outs + (gt_bboxes, img_metas) + else: + loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) + losses = self.loss( + *loss_inputs, + gt_bboxes_ignore=gt_bboxes_ignore, + label_assignment_results=label_assignment_results) + return losses + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) + def loss(self, + cls_scores, + bbox_preds, + iou_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None, + label_assignment_results=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + iou_preds (list[Tensor]): iou_preds for each scale + level with shape (N, num_anchors * 1, H, W) + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (list[Tensor] | None): Specify which bounding + boxes can be ignored when are computing the loss. + label_assignment_results (tuple): As the outputs defined in the + function `self.get_label_assignment`. + + Returns: + dict[str, Tensor]: A dictionary of loss gmm_assignment. + """ + + (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, + pos_anchors, num_pos) = label_assignment_results + + cls_scores = levels_to_images(cls_scores) + cls_scores = [ + item.reshape(-1, self.cls_out_channels) for item in cls_scores + ] + bbox_preds = levels_to_images(bbox_preds) + bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] + iou_preds = levels_to_images(iou_preds) + iou_preds = [item.reshape(-1, 1) for item in iou_preds] + + # convert all tensor list to a flatten tensor + cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) + bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) + iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) + + losses_cls = self.loss_cls( + cls_scores, + labels, + labels_weight, + avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0 + if num_pos: + pos_bbox_pred = self.bbox_coder.decode( + pos_anchors, bbox_preds[pos_inds_flatten]) + pos_bbox_target = bboxes_target[pos_inds_flatten] + iou_target = bbox_overlaps( + pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) + losses_iou = self.loss_centerness( + iou_preds[pos_inds_flatten], + iou_target.unsqueeze(-1), + avg_factor=num_pos) + losses_bbox = self.loss_bbox( + pos_bbox_pred, pos_bbox_target, avg_factor=num_pos) + + else: + losses_iou = iou_preds.sum() * 0 + losses_bbox = bbox_preds.sum() * 0 + + return dict( + loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou) diff --git a/downstream/mmdetection/mmdet/models/dense_heads/ld_head.py b/downstream/mmdetection/mmdet/models/dense_heads/ld_head.py new file mode 100644 index 0000000..c5a945f --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/ld_head.py @@ -0,0 +1,261 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.runner import force_fp32 + +from mmdet.core import bbox_overlaps, multi_apply, reduce_mean +from ..builder import HEADS, build_loss +from .gfl_head import GFLHead + + +@HEADS.register_module() +class LDHead(GFLHead): + """Localization distillation Head. (Short description) + + It utilizes the learned bbox distributions to transfer the localization + dark knowledge from teacher to student. Original paper: `Localization + Distillation for Object Detection. `_ + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + loss_ld (dict): Config of Localization Distillation Loss (LD), + T is the temperature for distillation. + """ + + def __init__(self, + num_classes, + in_channels, + loss_ld=dict( + type='LocalizationDistillationLoss', + loss_weight=0.25, + T=10), + **kwargs): + + super(LDHead, self).__init__(num_classes, in_channels, **kwargs) + self.loss_ld = build_loss(loss_ld) + + def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, + bbox_targets, stride, soft_targets, num_total_samples): + """Compute loss of a single scale level. + + Args: + anchors (Tensor): Box reference for each scale level with shape + (N, num_total_anchors, 4). + cls_score (Tensor): Cls and quality joint scores for each scale + level has shape (N, num_classes, H, W). + bbox_pred (Tensor): Box distribution logits for each scale + level with shape (N, 4*(n+1), H, W), n is max value of integral + set. + labels (Tensor): Labels of each anchors with shape + (N, num_total_anchors). + label_weights (Tensor): Label weights of each anchor with shape + (N, num_total_anchors) + bbox_targets (Tensor): BBox regression targets of each anchor + weight shape (N, num_total_anchors, 4). + stride (tuple): Stride in this scale level. + num_total_samples (int): Number of positive samples that is + reduced over all GPUs. + + Returns: + dict[tuple, Tensor]: Loss components and weight targets. + """ + assert stride[0] == stride[1], 'h stride is not equal to w stride!' + anchors = anchors.reshape(-1, 4) + cls_score = cls_score.permute(0, 2, 3, + 1).reshape(-1, self.cls_out_channels) + bbox_pred = bbox_pred.permute(0, 2, 3, + 1).reshape(-1, 4 * (self.reg_max + 1)) + soft_targets = soft_targets.permute(0, 2, 3, + 1).reshape(-1, + 4 * (self.reg_max + 1)) + + bbox_targets = bbox_targets.reshape(-1, 4) + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = self.num_classes + pos_inds = ((labels >= 0) + & (labels < bg_class_ind)).nonzero().squeeze(1) + score = label_weights.new_zeros(labels.shape) + + if len(pos_inds) > 0: + pos_bbox_targets = bbox_targets[pos_inds] + pos_bbox_pred = bbox_pred[pos_inds] + pos_anchors = anchors[pos_inds] + pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] + + weight_targets = cls_score.detach().sigmoid() + weight_targets = weight_targets.max(dim=1)[0][pos_inds] + pos_bbox_pred_corners = self.integral(pos_bbox_pred) + pos_decode_bbox_pred = self.bbox_coder.decode( + pos_anchor_centers, pos_bbox_pred_corners) + pos_decode_bbox_targets = pos_bbox_targets / stride[0] + score[pos_inds] = bbox_overlaps( + pos_decode_bbox_pred.detach(), + pos_decode_bbox_targets, + is_aligned=True) + pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) + pos_soft_targets = soft_targets[pos_inds] + soft_corners = pos_soft_targets.reshape(-1, self.reg_max + 1) + + target_corners = self.bbox_coder.encode(pos_anchor_centers, + pos_decode_bbox_targets, + self.reg_max).reshape(-1) + + # regression loss + loss_bbox = self.loss_bbox( + pos_decode_bbox_pred, + pos_decode_bbox_targets, + weight=weight_targets, + avg_factor=1.0) + + # dfl loss + loss_dfl = self.loss_dfl( + pred_corners, + target_corners, + weight=weight_targets[:, None].expand(-1, 4).reshape(-1), + avg_factor=4.0) + + # ld loss + loss_ld = self.loss_ld( + pred_corners, + soft_corners, + weight=weight_targets[:, None].expand(-1, 4).reshape(-1), + avg_factor=4.0) + + else: + loss_ld = bbox_pred.sum() * 0 + loss_bbox = bbox_pred.sum() * 0 + loss_dfl = bbox_pred.sum() * 0 + weight_targets = bbox_pred.new_tensor(0) + + # cls (qfl) loss + loss_cls = self.loss_cls( + cls_score, (labels, score), + weight=label_weights, + avg_factor=num_total_samples) + + return loss_cls, loss_bbox, loss_dfl, loss_ld, weight_targets.sum() + + def forward_train(self, + x, + out_teacher, + img_metas, + gt_bboxes, + gt_labels=None, + gt_bboxes_ignore=None, + proposal_cfg=None, + **kwargs): + """ + Args: + x (list[Tensor]): Features from FPN. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + proposal_cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used + + Returns: + tuple[dict, list]: The loss components and proposals of each image. + + - losses (dict[str, Tensor]): A dictionary of loss components. + - proposal_list (list[Tensor]): Proposals of each image. + """ + outs = self(x) + soft_target = out_teacher[1] + if gt_labels is None: + loss_inputs = outs + (gt_bboxes, soft_target, img_metas) + else: + loss_inputs = outs + (gt_bboxes, gt_labels, soft_target, img_metas) + losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + if proposal_cfg is None: + return losses + else: + proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg) + return losses, proposal_list + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + soft_target, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Cls and quality scores for each scale + level has shape (N, num_classes, H, W). + bbox_preds (list[Tensor]): Box distribution logits for each scale + level with shape (N, 4*(n+1), H, W), n is max value of integral + set. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (list[Tensor] | None): specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + device = cls_scores[0].device + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels) + if cls_reg_targets is None: + return None + + (anchor_list, labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets + + num_total_samples = reduce_mean( + torch.tensor(num_total_pos, dtype=torch.float, + device=device)).item() + num_total_samples = max(num_total_samples, 1.0) + + losses_cls, losses_bbox, losses_dfl, losses_ld, \ + avg_factor = multi_apply( + self.loss_single, + anchor_list, + cls_scores, + bbox_preds, + labels_list, + label_weights_list, + bbox_targets_list, + self.prior_generator.strides, + soft_target, + num_total_samples=num_total_samples) + + avg_factor = sum(avg_factor) + 1e-6 + avg_factor = reduce_mean(avg_factor).item() + losses_bbox = [x / avg_factor for x in losses_bbox] + losses_dfl = [x / avg_factor for x in losses_dfl] + return dict( + loss_cls=losses_cls, + loss_bbox=losses_bbox, + loss_dfl=losses_dfl, + loss_ld=losses_ld) diff --git a/downstream/mmdetection/mmdet/models/dense_heads/mask2former_head.py b/downstream/mmdetection/mmdet/models/dense_heads/mask2former_head.py new file mode 100644 index 0000000..78e4d49 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/mask2former_head.py @@ -0,0 +1,430 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import Conv2d, build_plugin_layer, caffe2_xavier_init +from mmcv.cnn.bricks.transformer import (build_positional_encoding, + build_transformer_layer_sequence) +from mmcv.ops import point_sample +from mmcv.runner import ModuleList + +from mmdet.core import build_assigner, build_sampler, reduce_mean +from mmdet.models.utils import get_uncertain_point_coords_with_randomness +from ..builder import HEADS, build_loss +from .anchor_free_head import AnchorFreeHead +from .maskformer_head import MaskFormerHead + + +@HEADS.register_module() +class Mask2FormerHead(MaskFormerHead): + """Implements the Mask2Former head. + + See `Masked-attention Mask Transformer for Universal Image + Segmentation `_ for details. + + Args: + in_channels (list[int]): Number of channels in the input feature map. + feat_channels (int): Number of channels for features. + out_channels (int): Number of channels for output. + num_things_classes (int): Number of things. + num_stuff_classes (int): Number of stuff. + num_queries (int): Number of query in Transformer decoder. + pixel_decoder (:obj:`mmcv.ConfigDict` | dict): Config for pixel + decoder. Defaults to None. + enforce_decoder_input_project (bool, optional): Whether to add + a layer to change the embed_dim of tranformer encoder in + pixel decoder to the embed_dim of transformer decoder. + Defaults to False. + transformer_decoder (:obj:`mmcv.ConfigDict` | dict): Config for + transformer decoder. Defaults to None. + positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for + transformer decoder position encoding. Defaults to None. + loss_cls (:obj:`mmcv.ConfigDict` | dict): Config of the classification + loss. Defaults to None. + loss_mask (:obj:`mmcv.ConfigDict` | dict): Config of the mask loss. + Defaults to None. + loss_dice (:obj:`mmcv.ConfigDict` | dict): Config of the dice loss. + Defaults to None. + train_cfg (:obj:`mmcv.ConfigDict` | dict): Training config of + Mask2Former head. + test_cfg (:obj:`mmcv.ConfigDict` | dict): Testing config of + Mask2Former head. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels, + feat_channels, + out_channels, + num_things_classes=80, + num_stuff_classes=53, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=None, + enforce_decoder_input_project=False, + transformer_decoder=None, + positional_encoding=None, + loss_cls=None, + loss_mask=None, + loss_dice=None, + train_cfg=None, + test_cfg=None, + init_cfg=None, + **kwargs): + super(AnchorFreeHead, self).__init__(init_cfg) + self.num_things_classes = num_things_classes + self.num_stuff_classes = num_stuff_classes + self.num_classes = self.num_things_classes + self.num_stuff_classes + self.num_queries = num_queries + self.num_transformer_feat_level = num_transformer_feat_level + self.num_heads = transformer_decoder.transformerlayers.\ + attn_cfgs.num_heads + self.num_transformer_decoder_layers = transformer_decoder.num_layers + assert pixel_decoder.encoder.transformerlayers.\ + attn_cfgs.num_levels == num_transformer_feat_level + pixel_decoder_ = copy.deepcopy(pixel_decoder) + pixel_decoder_.update( + in_channels=in_channels, + feat_channels=feat_channels, + out_channels=out_channels) + self.pixel_decoder = build_plugin_layer(pixel_decoder_)[1] + self.transformer_decoder = build_transformer_layer_sequence( + transformer_decoder) + self.decoder_embed_dims = self.transformer_decoder.embed_dims + + self.decoder_input_projs = ModuleList() + # from low resolution to high resolution + for _ in range(num_transformer_feat_level): + if (self.decoder_embed_dims != feat_channels + or enforce_decoder_input_project): + self.decoder_input_projs.append( + Conv2d( + feat_channels, self.decoder_embed_dims, kernel_size=1)) + else: + self.decoder_input_projs.append(nn.Identity()) + self.decoder_positional_encoding = build_positional_encoding( + positional_encoding) + self.query_embed = nn.Embedding(self.num_queries, feat_channels) + self.query_feat = nn.Embedding(self.num_queries, feat_channels) + # from low resolution to high resolution + self.level_embed = nn.Embedding(self.num_transformer_feat_level, + feat_channels) + + self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) + self.mask_embed = nn.Sequential( + nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), + nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), + nn.Linear(feat_channels, out_channels)) + + self.test_cfg = test_cfg + self.train_cfg = train_cfg + if train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + self.sampler = build_sampler(self.train_cfg.sampler, context=self) + self.num_points = self.train_cfg.get('num_points', 12544) + self.oversample_ratio = self.train_cfg.get('oversample_ratio', 3.0) + self.importance_sample_ratio = self.train_cfg.get( + 'importance_sample_ratio', 0.75) + + self.class_weight = loss_cls.class_weight + self.loss_cls = build_loss(loss_cls) + self.loss_mask = build_loss(loss_mask) + self.loss_dice = build_loss(loss_dice) + + def init_weights(self): + for m in self.decoder_input_projs: + if isinstance(m, Conv2d): + caffe2_xavier_init(m, bias=0) + + self.pixel_decoder.init_weights() + + for p in self.transformer_decoder.parameters(): + if p.dim() > 1: + nn.init.xavier_normal_(p) + + def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks, + img_metas): + """Compute classification and mask targets for one image. + + Args: + cls_score (Tensor): Mask score logits from a single decoder layer + for one image. Shape (num_queries, cls_out_channels). + mask_pred (Tensor): Mask logits for a single decoder layer for one + image. Shape (num_queries, h, w). + gt_labels (Tensor): Ground truth class indices for one image with + shape (num_gts, ). + gt_masks (Tensor): Ground truth mask for each image, each with + shape (num_gts, h, w). + img_metas (dict): Image informtation. + + Returns: + tuple[Tensor]: A tuple containing the following for one image. + + - labels (Tensor): Labels of each image. \ + shape (num_queries, ). + - label_weights (Tensor): Label weights of each image. \ + shape (num_queries, ). + - mask_targets (Tensor): Mask targets of each image. \ + shape (num_queries, h, w). + - mask_weights (Tensor): Mask weights of each image. \ + shape (num_queries, ). + - pos_inds (Tensor): Sampled positive indices for each \ + image. + - neg_inds (Tensor): Sampled negative indices for each \ + image. + """ + # sample points + num_queries = cls_score.shape[0] + num_gts = gt_labels.shape[0] + + point_coords = torch.rand((1, self.num_points, 2), + device=cls_score.device) + # shape (num_queries, num_points) + mask_points_pred = point_sample( + mask_pred.unsqueeze(1), point_coords.repeat(num_queries, 1, + 1)).squeeze(1) + # shape (num_gts, num_points) + gt_points_masks = point_sample( + gt_masks.unsqueeze(1).float(), point_coords.repeat(num_gts, 1, + 1)).squeeze(1) + + # assign and sample + assign_result = self.assigner.assign(cls_score, mask_points_pred, + gt_labels, gt_points_masks, + img_metas) + sampling_result = self.sampler.sample(assign_result, mask_pred, + gt_masks) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + + # label target + labels = gt_labels.new_full((self.num_queries, ), + self.num_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_labels.new_ones((self.num_queries, )) + + # mask target + mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds] + mask_weights = mask_pred.new_zeros((self.num_queries, )) + mask_weights[pos_inds] = 1.0 + + return (labels, label_weights, mask_targets, mask_weights, pos_inds, + neg_inds) + + def loss_single(self, cls_scores, mask_preds, gt_labels_list, + gt_masks_list, img_metas): + """Loss function for outputs from a single decoder layer. + + Args: + cls_scores (Tensor): Mask score logits from a single decoder layer + for all images. Shape (batch_size, num_queries, + cls_out_channels). Note `cls_out_channels` should includes + background. + mask_preds (Tensor): Mask logits for a pixel decoder for all + images. Shape (batch_size, num_queries, h, w). + gt_labels_list (list[Tensor]): Ground truth class indices for each + image, each with shape (num_gts, ). + gt_masks_list (list[Tensor]): Ground truth mask for each image, + each with shape (num_gts, h, w). + img_metas (list[dict]): List of image meta information. + + Returns: + tuple[Tensor]: Loss components for outputs from a single \ + decoder layer. + """ + num_imgs = cls_scores.size(0) + cls_scores_list = [cls_scores[i] for i in range(num_imgs)] + mask_preds_list = [mask_preds[i] for i in range(num_imgs)] + (labels_list, label_weights_list, mask_targets_list, mask_weights_list, + num_total_pos, + num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list, + gt_labels_list, gt_masks_list, + img_metas) + # shape (batch_size, num_queries) + labels = torch.stack(labels_list, dim=0) + # shape (batch_size, num_queries) + label_weights = torch.stack(label_weights_list, dim=0) + # shape (num_total_gts, h, w) + mask_targets = torch.cat(mask_targets_list, dim=0) + # shape (batch_size, num_queries) + mask_weights = torch.stack(mask_weights_list, dim=0) + + # classfication loss + # shape (batch_size * num_queries, ) + cls_scores = cls_scores.flatten(0, 1) + labels = labels.flatten(0, 1) + label_weights = label_weights.flatten(0, 1) + + class_weight = cls_scores.new_tensor(self.class_weight) + loss_cls = self.loss_cls( + cls_scores, + labels, + label_weights, + avg_factor=class_weight[labels].sum()) + + num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos])) + num_total_masks = max(num_total_masks, 1) + + # extract positive ones + # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w) + mask_preds = mask_preds[mask_weights > 0] + + if mask_targets.shape[0] == 0: + # zero match + loss_dice = mask_preds.sum() + loss_mask = mask_preds.sum() + return loss_cls, loss_mask, loss_dice + + with torch.no_grad(): + points_coords = get_uncertain_point_coords_with_randomness( + mask_preds.unsqueeze(1), None, self.num_points, + self.oversample_ratio, self.importance_sample_ratio) + # shape (num_total_gts, h, w) -> (num_total_gts, num_points) + mask_point_targets = point_sample( + mask_targets.unsqueeze(1).float(), points_coords).squeeze(1) + # shape (num_queries, h, w) -> (num_queries, num_points) + mask_point_preds = point_sample( + mask_preds.unsqueeze(1), points_coords).squeeze(1) + + # dice loss + loss_dice = self.loss_dice( + mask_point_preds, mask_point_targets, avg_factor=num_total_masks) + + # mask loss + # shape (num_queries, num_points) -> (num_queries * num_points, ) + mask_point_preds = mask_point_preds.reshape(-1) + # shape (num_total_gts, num_points) -> (num_total_gts * num_points, ) + mask_point_targets = mask_point_targets.reshape(-1) + loss_mask = self.loss_mask( + mask_point_preds, + mask_point_targets, + avg_factor=num_total_masks * self.num_points) + + return loss_cls, loss_mask, loss_dice + + def forward_head(self, decoder_out, mask_feature, attn_mask_target_size): + """Forward for head part which is called after every decoder layer. + + Args: + decoder_out (Tensor): in shape (num_queries, batch_size, c). + mask_feature (Tensor): in shape (batch_size, c, h, w). + attn_mask_target_size (tuple[int, int]): target attention + mask size. + + Returns: + tuple: A tuple contain three elements. + + - cls_pred (Tensor): Classification scores in shape \ + (batch_size, num_queries, cls_out_channels). \ + Note `cls_out_channels` should includes background. + - mask_pred (Tensor): Mask scores in shape \ + (batch_size, num_queries,h, w). + - attn_mask (Tensor): Attention mask in shape \ + (batch_size * num_heads, num_queries, h, w). + """ + decoder_out = self.transformer_decoder.post_norm(decoder_out) + decoder_out = decoder_out.transpose(0, 1) + # shape (num_queries, batch_size, c) + cls_pred = self.cls_embed(decoder_out) + # shape (num_queries, batch_size, c) + mask_embed = self.mask_embed(decoder_out) + # shape (num_queries, batch_size, h, w) + mask_pred = torch.einsum('bqc,bchw->bqhw', mask_embed, mask_feature) + attn_mask = F.interpolate( + mask_pred, + attn_mask_target_size, + mode='bilinear', + align_corners=False) + # shape (num_queries, batch_size, h, w) -> + # (batch_size * num_head, num_queries, h, w) + attn_mask = attn_mask.flatten(2).unsqueeze(1).repeat( + (1, self.num_heads, 1, 1)).flatten(0, 1) + attn_mask = attn_mask.sigmoid() < 0.5 + attn_mask = attn_mask.detach() + + return cls_pred, mask_pred, attn_mask + + def forward(self, feats, img_metas): + """Forward function. + + Args: + feats (list[Tensor]): Multi scale Features from the + upstream network, each is a 4D-tensor. + img_metas (list[dict]): List of image information. + + Returns: + tuple: A tuple contains two elements. + + - cls_pred_list (list[Tensor)]: Classification logits \ + for each decoder layer. Each is a 3D-tensor with shape \ + (batch_size, num_queries, cls_out_channels). \ + Note `cls_out_channels` should includes background. + - mask_pred_list (list[Tensor]): Mask logits for each \ + decoder layer. Each with shape (batch_size, num_queries, \ + h, w). + """ + batch_size = len(img_metas) + mask_features, multi_scale_memorys = self.pixel_decoder(feats) + # multi_scale_memorys (from low resolution to high resolution) + decoder_inputs = [] + decoder_positional_encodings = [] + for i in range(self.num_transformer_feat_level): + decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i]) + # shape (batch_size, c, h, w) -> (h*w, batch_size, c) + decoder_input = decoder_input.flatten(2).permute(2, 0, 1) + level_embed = self.level_embed.weight[i].view(1, 1, -1) + decoder_input = decoder_input + level_embed + # shape (batch_size, c, h, w) -> (h*w, batch_size, c) + mask = decoder_input.new_zeros( + (batch_size, ) + multi_scale_memorys[i].shape[-2:], + dtype=torch.bool) + decoder_positional_encoding = self.decoder_positional_encoding( + mask) + decoder_positional_encoding = decoder_positional_encoding.flatten( + 2).permute(2, 0, 1) + decoder_inputs.append(decoder_input) + decoder_positional_encodings.append(decoder_positional_encoding) + # shape (num_queries, c) -> (num_queries, batch_size, c) + query_feat = self.query_feat.weight.unsqueeze(1).repeat( + (1, batch_size, 1)) + query_embed = self.query_embed.weight.unsqueeze(1).repeat( + (1, batch_size, 1)) + + cls_pred_list = [] + mask_pred_list = [] + cls_pred, mask_pred, attn_mask = self.forward_head( + query_feat, mask_features, multi_scale_memorys[0].shape[-2:]) + cls_pred_list.append(cls_pred) + mask_pred_list.append(mask_pred) + + for i in range(self.num_transformer_decoder_layers): + level_idx = i % self.num_transformer_feat_level + # if a mask is all True(all background), then set it all False. + attn_mask[torch.where( + attn_mask.sum(-1) == attn_mask.shape[-1])] = False + + # cross_attn + self_attn + layer = self.transformer_decoder.layers[i] + attn_masks = [attn_mask, None] + query_feat = layer( + query=query_feat, + key=decoder_inputs[level_idx], + value=decoder_inputs[level_idx], + query_pos=query_embed, + key_pos=decoder_positional_encodings[level_idx], + attn_masks=attn_masks, + query_key_padding_mask=None, + # here we do not apply masking on padded region + key_padding_mask=None) + cls_pred, mask_pred, attn_mask = self.forward_head( + query_feat, mask_features, multi_scale_memorys[ + (i + 1) % self.num_transformer_feat_level].shape[-2:]) + + cls_pred_list.append(cls_pred) + mask_pred_list.append(mask_pred) + + return cls_pred_list, mask_pred_list diff --git a/downstream/mmdetection/mmdet/models/dense_heads/maskformer_head.py b/downstream/mmdetection/mmdet/models/dense_heads/maskformer_head.py new file mode 100644 index 0000000..abb17ad --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/maskformer_head.py @@ -0,0 +1,555 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import Conv2d, build_plugin_layer, caffe2_xavier_init +from mmcv.cnn.bricks.transformer import (build_positional_encoding, + build_transformer_layer_sequence) +from mmcv.runner import force_fp32 + +from mmdet.core import build_assigner, build_sampler, multi_apply, reduce_mean +from mmdet.models.utils import preprocess_panoptic_gt +from ..builder import HEADS, build_loss +from .anchor_free_head import AnchorFreeHead + + +@HEADS.register_module() +class MaskFormerHead(AnchorFreeHead): + """Implements the MaskFormer head. + + See `Per-Pixel Classification is Not All You Need for Semantic + Segmentation `_ for details. + + Args: + in_channels (list[int]): Number of channels in the input feature map. + feat_channels (int): Number of channels for feature. + out_channels (int): Number of channels for output. + num_things_classes (int): Number of things. + num_stuff_classes (int): Number of stuff. + num_queries (int): Number of query in Transformer. + pixel_decoder (:obj:`mmcv.ConfigDict` | dict): Config for pixel + decoder. Defaults to None. + enforce_decoder_input_project (bool, optional): Whether to add a layer + to change the embed_dim of tranformer encoder in pixel decoder to + the embed_dim of transformer decoder. Defaults to False. + transformer_decoder (:obj:`mmcv.ConfigDict` | dict): Config for + transformer decoder. Defaults to None. + positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for + transformer decoder position encoding. Defaults to None. + loss_cls (:obj:`mmcv.ConfigDict` | dict): Config of the classification + loss. Defaults to `CrossEntropyLoss`. + loss_mask (:obj:`mmcv.ConfigDict` | dict): Config of the mask loss. + Defaults to `FocalLoss`. + loss_dice (:obj:`mmcv.ConfigDict` | dict): Config of the dice loss. + Defaults to `DiceLoss`. + train_cfg (:obj:`mmcv.ConfigDict` | dict): Training config of + Maskformer head. + test_cfg (:obj:`mmcv.ConfigDict` | dict): Testing config of Maskformer + head. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels, + feat_channels, + out_channels, + num_things_classes=80, + num_stuff_classes=53, + num_queries=100, + pixel_decoder=None, + enforce_decoder_input_project=False, + transformer_decoder=None, + positional_encoding=None, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0, + class_weight=[1.0] * 133 + [0.1]), + loss_mask=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=20.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + naive_dice=True, + loss_weight=1.0), + train_cfg=None, + test_cfg=None, + init_cfg=None, + **kwargs): + super(AnchorFreeHead, self).__init__(init_cfg) + self.num_things_classes = num_things_classes + self.num_stuff_classes = num_stuff_classes + self.num_classes = self.num_things_classes + self.num_stuff_classes + self.num_queries = num_queries + + pixel_decoder.update( + in_channels=in_channels, + feat_channels=feat_channels, + out_channels=out_channels) + self.pixel_decoder = build_plugin_layer(pixel_decoder)[1] + self.transformer_decoder = build_transformer_layer_sequence( + transformer_decoder) + self.decoder_embed_dims = self.transformer_decoder.embed_dims + pixel_decoder_type = pixel_decoder.get('type') + if pixel_decoder_type == 'PixelDecoder' and ( + self.decoder_embed_dims != in_channels[-1] + or enforce_decoder_input_project): + self.decoder_input_proj = Conv2d( + in_channels[-1], self.decoder_embed_dims, kernel_size=1) + else: + self.decoder_input_proj = nn.Identity() + self.decoder_pe = build_positional_encoding(positional_encoding) + self.query_embed = nn.Embedding(self.num_queries, out_channels) + + self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) + self.mask_embed = nn.Sequential( + nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), + nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), + nn.Linear(feat_channels, out_channels)) + + self.test_cfg = test_cfg + self.train_cfg = train_cfg + if train_cfg: + self.assigner = build_assigner(train_cfg.assigner) + self.sampler = build_sampler(train_cfg.sampler, context=self) + + self.class_weight = loss_cls.class_weight + self.loss_cls = build_loss(loss_cls) + self.loss_mask = build_loss(loss_mask) + self.loss_dice = build_loss(loss_dice) + + def init_weights(self): + if isinstance(self.decoder_input_proj, Conv2d): + caffe2_xavier_init(self.decoder_input_proj, bias=0) + + self.pixel_decoder.init_weights() + + for p in self.transformer_decoder.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def preprocess_gt(self, gt_labels_list, gt_masks_list, gt_semantic_segs, + img_metas): + """Preprocess the ground truth for all images. + + Args: + gt_labels_list (list[Tensor]): Each is ground truth + labels of each bbox, with shape (num_gts, ). + gt_masks_list (list[BitmapMasks]): Each is ground truth + masks of each instances of a image, shape + (num_gts, h, w). + gt_semantic_seg (Tensor | None): Ground truth of semantic + segmentation with the shape (batch_size, n, h, w). + [0, num_thing_class - 1] means things, + [num_thing_class, num_class-1] means stuff, + 255 means VOID. It's None when training instance segmentation. + img_metas (list[dict]): List of image meta information. + + Returns: + tuple: a tuple containing the following targets. + - labels (list[Tensor]): Ground truth class indices\ + for all images. Each with shape (n, ), n is the sum of\ + number of stuff type and number of instance in a image. + - masks (list[Tensor]): Ground truth mask for each\ + image, each with shape (n, h, w). + """ + num_things_list = [self.num_things_classes] * len(gt_labels_list) + num_stuff_list = [self.num_stuff_classes] * len(gt_labels_list) + if gt_semantic_segs is None: + gt_semantic_segs = [None] * len(gt_labels_list) + + targets = multi_apply(preprocess_panoptic_gt, gt_labels_list, + gt_masks_list, gt_semantic_segs, num_things_list, + num_stuff_list, img_metas) + labels, masks = targets + return labels, masks + + def get_targets(self, cls_scores_list, mask_preds_list, gt_labels_list, + gt_masks_list, img_metas): + """Compute classification and mask targets for all images for a decoder + layer. + + Args: + cls_scores_list (list[Tensor]): Mask score logits from a single + decoder layer for all images. Each with shape (num_queries, + cls_out_channels). + mask_preds_list (list[Tensor]): Mask logits from a single decoder + layer for all images. Each with shape (num_queries, h, w). + gt_labels_list (list[Tensor]): Ground truth class indices for all + images. Each with shape (n, ), n is the sum of number of stuff + type and number of instance in a image. + gt_masks_list (list[Tensor]): Ground truth mask for each image, + each with shape (n, h, w). + img_metas (list[dict]): List of image meta information. + + Returns: + tuple[list[Tensor]]: a tuple containing the following targets. + - labels_list (list[Tensor]): Labels of all images.\ + Each with shape (num_queries, ). + - label_weights_list (list[Tensor]): Label weights\ + of all images. Each with shape (num_queries, ). + - mask_targets_list (list[Tensor]): Mask targets of\ + all images. Each with shape (num_queries, h, w). + - mask_weights_list (list[Tensor]): Mask weights of\ + all images. Each with shape (num_queries, ). + - num_total_pos (int): Number of positive samples in\ + all images. + - num_total_neg (int): Number of negative samples in\ + all images. + """ + (labels_list, label_weights_list, mask_targets_list, mask_weights_list, + pos_inds_list, + neg_inds_list) = multi_apply(self._get_target_single, cls_scores_list, + mask_preds_list, gt_labels_list, + gt_masks_list, img_metas) + + num_total_pos = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg = sum((inds.numel() for inds in neg_inds_list)) + return (labels_list, label_weights_list, mask_targets_list, + mask_weights_list, num_total_pos, num_total_neg) + + def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks, + img_metas): + """Compute classification and mask targets for one image. + + Args: + cls_score (Tensor): Mask score logits from a single decoder layer + for one image. Shape (num_queries, cls_out_channels). + mask_pred (Tensor): Mask logits for a single decoder layer for one + image. Shape (num_queries, h, w). + gt_labels (Tensor): Ground truth class indices for one image with + shape (n, ). n is the sum of number of stuff type and number + of instance in a image. + gt_masks (Tensor): Ground truth mask for each image, each with + shape (n, h, w). + img_metas (dict): Image informtation. + + Returns: + tuple[Tensor]: a tuple containing the following for one image. + - labels (Tensor): Labels of each image. + shape (num_queries, ). + - label_weights (Tensor): Label weights of each image. + shape (num_queries, ). + - mask_targets (Tensor): Mask targets of each image. + shape (num_queries, h, w). + - mask_weights (Tensor): Mask weights of each image. + shape (num_queries, ). + - pos_inds (Tensor): Sampled positive indices for each image. + - neg_inds (Tensor): Sampled negative indices for each image. + """ + target_shape = mask_pred.shape[-2:] + if gt_masks.shape[0] > 0: + gt_masks_downsampled = F.interpolate( + gt_masks.unsqueeze(1).float(), target_shape, + mode='nearest').squeeze(1).long() + else: + gt_masks_downsampled = gt_masks + + # assign and sample + assign_result = self.assigner.assign(cls_score, mask_pred, gt_labels, + gt_masks_downsampled, img_metas) + sampling_result = self.sampler.sample(assign_result, mask_pred, + gt_masks) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + + # label target + labels = gt_labels.new_full((self.num_queries, ), + self.num_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_labels.new_ones(self.num_queries) + + # mask target + mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds] + mask_weights = mask_pred.new_zeros((self.num_queries, )) + mask_weights[pos_inds] = 1.0 + + return (labels, label_weights, mask_targets, mask_weights, pos_inds, + neg_inds) + + @force_fp32(apply_to=('all_cls_scores', 'all_mask_preds')) + def loss(self, all_cls_scores, all_mask_preds, gt_labels_list, + gt_masks_list, img_metas): + """Loss function. + + Args: + all_cls_scores (Tensor): Classification scores for all decoder + layers with shape (num_decoder, batch_size, num_queries, + cls_out_channels). Note `cls_out_channels` should includes + background. + all_mask_preds (Tensor): Mask scores for all decoder layers with + shape (num_decoder, batch_size, num_queries, h, w). + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (n, ). n is the sum of number of stuff type + and number of instance in a image. + gt_masks_list (list[Tensor]): Ground truth mask for each image with + shape (n, h, w). + img_metas (list[dict]): List of image meta information. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + num_dec_layers = len(all_cls_scores) + all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] + all_gt_masks_list = [gt_masks_list for _ in range(num_dec_layers)] + img_metas_list = [img_metas for _ in range(num_dec_layers)] + losses_cls, losses_mask, losses_dice = multi_apply( + self.loss_single, all_cls_scores, all_mask_preds, + all_gt_labels_list, all_gt_masks_list, img_metas_list) + + loss_dict = dict() + # loss from the last decoder layer + loss_dict['loss_cls'] = losses_cls[-1] + loss_dict['loss_mask'] = losses_mask[-1] + loss_dict['loss_dice'] = losses_dice[-1] + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_mask_i, loss_dice_i in zip( + losses_cls[:-1], losses_mask[:-1], losses_dice[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_mask'] = loss_mask_i + loss_dict[f'd{num_dec_layer}.loss_dice'] = loss_dice_i + num_dec_layer += 1 + return loss_dict + + def loss_single(self, cls_scores, mask_preds, gt_labels_list, + gt_masks_list, img_metas): + """Loss function for outputs from a single decoder layer. + + Args: + cls_scores (Tensor): Mask score logits from a single decoder layer + for all images. Shape (batch_size, num_queries, + cls_out_channels). Note `cls_out_channels` should includes + background. + mask_preds (Tensor): Mask logits for a pixel decoder for all + images. Shape (batch_size, num_queries, h, w). + gt_labels_list (list[Tensor]): Ground truth class indices for each + image, each with shape (n, ). n is the sum of number of stuff + types and number of instances in a image. + gt_masks_list (list[Tensor]): Ground truth mask for each image, + each with shape (n, h, w). + img_metas (list[dict]): List of image meta information. + + Returns: + tuple[Tensor]: Loss components for outputs from a single decoder\ + layer. + """ + num_imgs = cls_scores.size(0) + cls_scores_list = [cls_scores[i] for i in range(num_imgs)] + mask_preds_list = [mask_preds[i] for i in range(num_imgs)] + + (labels_list, label_weights_list, mask_targets_list, mask_weights_list, + num_total_pos, + num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list, + gt_labels_list, gt_masks_list, + img_metas) + # shape (batch_size, num_queries) + labels = torch.stack(labels_list, dim=0) + # shape (batch_size, num_queries) + label_weights = torch.stack(label_weights_list, dim=0) + # shape (num_total_gts, h, w) + mask_targets = torch.cat(mask_targets_list, dim=0) + # shape (batch_size, num_queries) + mask_weights = torch.stack(mask_weights_list, dim=0) + + # classfication loss + # shape (batch_size * num_queries, ) + cls_scores = cls_scores.flatten(0, 1) + labels = labels.flatten(0, 1) + label_weights = label_weights.flatten(0, 1) + + class_weight = cls_scores.new_tensor(self.class_weight) + loss_cls = self.loss_cls( + cls_scores, + labels, + label_weights, + avg_factor=class_weight[labels].sum()) + + num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos])) + num_total_masks = max(num_total_masks, 1) + + # extract positive ones + # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w) + mask_preds = mask_preds[mask_weights > 0] + target_shape = mask_targets.shape[-2:] + + if mask_targets.shape[0] == 0: + # zero match + loss_dice = mask_preds.sum() + loss_mask = mask_preds.sum() + return loss_cls, loss_mask, loss_dice + + # upsample to shape of target + # shape (num_total_gts, h, w) + mask_preds = F.interpolate( + mask_preds.unsqueeze(1), + target_shape, + mode='bilinear', + align_corners=False).squeeze(1) + + # dice loss + loss_dice = self.loss_dice( + mask_preds, mask_targets, avg_factor=num_total_masks) + + # mask loss + # FocalLoss support input of shape (n, num_class) + h, w = mask_preds.shape[-2:] + # shape (num_total_gts, h, w) -> (num_total_gts * h * w, 1) + mask_preds = mask_preds.reshape(-1, 1) + # shape (num_total_gts, h, w) -> (num_total_gts * h * w) + mask_targets = mask_targets.reshape(-1) + # target is (1 - mask_targets) !!! + loss_mask = self.loss_mask( + mask_preds, 1 - mask_targets, avg_factor=num_total_masks * h * w) + + return loss_cls, loss_mask, loss_dice + + def forward(self, feats, img_metas): + """Forward function. + + Args: + feats (list[Tensor]): Features from the upstream network, each + is a 4D-tensor. + img_metas (list[dict]): List of image information. + + Returns: + tuple: a tuple contains two elements. + - all_cls_scores (Tensor): Classification scores for each\ + scale level. Each is a 4D-tensor with shape\ + (num_decoder, batch_size, num_queries, cls_out_channels).\ + Note `cls_out_channels` should includes background. + - all_mask_preds (Tensor): Mask scores for each decoder\ + layer. Each with shape (num_decoder, batch_size,\ + num_queries, h, w). + """ + batch_size = len(img_metas) + input_img_h, input_img_w = img_metas[0]['batch_input_shape'] + padding_mask = feats[-1].new_ones( + (batch_size, input_img_h, input_img_w), dtype=torch.float32) + for i in range(batch_size): + img_h, img_w, _ = img_metas[i]['img_shape'] + padding_mask[i, :img_h, :img_w] = 0 + padding_mask = F.interpolate( + padding_mask.unsqueeze(1), + size=feats[-1].shape[-2:], + mode='nearest').to(torch.bool).squeeze(1) + # when backbone is swin, memory is output of last stage of swin. + # when backbone is r50, memory is output of tranformer encoder. + mask_features, memory = self.pixel_decoder(feats, img_metas) + pos_embed = self.decoder_pe(padding_mask) + memory = self.decoder_input_proj(memory) + # shape (batch_size, c, h, w) -> (h*w, batch_size, c) + memory = memory.flatten(2).permute(2, 0, 1) + pos_embed = pos_embed.flatten(2).permute(2, 0, 1) + # shape (batch_size, h * w) + padding_mask = padding_mask.flatten(1) + # shape = (num_queries, embed_dims) + query_embed = self.query_embed.weight + # shape = (num_queries, batch_size, embed_dims) + query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1) + target = torch.zeros_like(query_embed) + # shape (num_decoder, num_queries, batch_size, embed_dims) + out_dec = self.transformer_decoder( + query=target, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_embed, + key_padding_mask=padding_mask) + # shape (num_decoder, batch_size, num_queries, embed_dims) + out_dec = out_dec.transpose(1, 2) + + # cls_scores + all_cls_scores = self.cls_embed(out_dec) + + # mask_preds + mask_embed = self.mask_embed(out_dec) + all_mask_preds = torch.einsum('lbqc,bchw->lbqhw', mask_embed, + mask_features) + + return all_cls_scores, all_mask_preds + + def forward_train(self, + feats, + img_metas, + gt_bboxes, + gt_labels, + gt_masks, + gt_semantic_seg, + gt_bboxes_ignore=None): + """Forward function for training mode. + + Args: + feats (list[Tensor]): Multi-level features from the upstream + network, each is a 4D-tensor. + img_metas (list[Dict]): List of image information. + gt_bboxes (list[Tensor]): Each element is ground truth bboxes of + the image, shape (num_gts, 4). Not used here. + gt_labels (list[Tensor]): Each element is ground truth labels of + each box, shape (num_gts,). + gt_masks (list[BitmapMasks]): Each element is masks of instances + of a image, shape (num_gts, h, w). + gt_semantic_seg (list[tensor] | None): Each element is the ground + truth of semantic segmentation with the shape (N, H, W). + [0, num_thing_class - 1] means things, + [num_thing_class, num_class-1] means stuff, + 255 means VOID. It's None when training instance segmentation. + gt_bboxes_ignore (list[Tensor]): Ground truth bboxes to be + ignored. Defaults to None. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + # not consider ignoring bboxes + assert gt_bboxes_ignore is None + + # forward + all_cls_scores, all_mask_preds = self(feats, img_metas) + + # preprocess ground truth + gt_labels, gt_masks = self.preprocess_gt(gt_labels, gt_masks, + gt_semantic_seg, img_metas) + + # loss + losses = self.loss(all_cls_scores, all_mask_preds, gt_labels, gt_masks, + img_metas) + + return losses + + def simple_test(self, feats, img_metas, **kwargs): + """Test without augmentaton. + + Args: + feats (list[Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + img_metas (list[dict]): List of image information. + + Returns: + tuple: A tuple contains two tensors. + + - mask_cls_results (Tensor): Mask classification logits,\ + shape (batch_size, num_queries, cls_out_channels). + Note `cls_out_channels` should includes background. + - mask_pred_results (Tensor): Mask logits, shape \ + (batch_size, num_queries, h, w). + """ + all_cls_scores, all_mask_preds = self(feats, img_metas) + mask_cls_results = all_cls_scores[-1] + mask_pred_results = all_mask_preds[-1] + + # upsample masks + img_shape = img_metas[0]['batch_input_shape'] + mask_pred_results = F.interpolate( + mask_pred_results, + size=(img_shape[0], img_shape[1]), + mode='bilinear', + align_corners=False) + + return mask_cls_results, mask_pred_results diff --git a/downstream/mmdetection/mmdet/models/dense_heads/nasfcos_head.py b/downstream/mmdetection/mmdet/models/dense_heads/nasfcos_head.py new file mode 100644 index 0000000..380c912 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/nasfcos_head.py @@ -0,0 +1,80 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch.nn as nn +from mmcv.cnn import ConvModule, Scale + +from mmdet.models.dense_heads.fcos_head import FCOSHead +from ..builder import HEADS + + +@HEADS.register_module() +class NASFCOSHead(FCOSHead): + """Anchor-free head used in `NASFCOS `_. + + It is quite similar with FCOS head, except for the searched structure of + classification branch and bbox regression branch, where a structure of + "dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead. + """ + + def __init__(self, *args, init_cfg=None, **kwargs): + if init_cfg is None: + init_cfg = [ + dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']), + dict( + type='Normal', + std=0.01, + override=[ + dict(name='conv_reg'), + dict(name='conv_centerness'), + dict( + name='conv_cls', + type='Normal', + std=0.01, + bias_prob=0.01) + ]), + ] + super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs) + + def _init_layers(self): + """Initialize layers of the head.""" + dconv3x3_config = dict( + type='DCNv2', + kernel_size=3, + use_bias=True, + deform_groups=2, + padding=1) + conv3x3_config = dict(type='Conv', kernel_size=3, padding=1) + conv1x1_config = dict(type='Conv', kernel_size=1) + + self.arch_config = [ + dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config + ] + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + for i, op_ in enumerate(self.arch_config): + op = copy.deepcopy(op_) + chn = self.in_channels if i == 0 else self.feat_channels + assert isinstance(op, dict) + use_bias = op.pop('use_bias', False) + padding = op.pop('padding', 0) + kernel_size = op.pop('kernel_size') + module = ConvModule( + chn, + self.feat_channels, + kernel_size, + stride=1, + padding=padding, + norm_cfg=self.norm_cfg, + bias=use_bias, + conv_cfg=op) + + self.cls_convs.append(copy.deepcopy(module)) + self.reg_convs.append(copy.deepcopy(module)) + + self.conv_cls = nn.Conv2d( + self.feat_channels, self.cls_out_channels, 3, padding=1) + self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) + self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) + + self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) diff --git a/downstream/mmdetection/mmdet/models/dense_heads/paa_head.py b/downstream/mmdetection/mmdet/models/dense_heads/paa_head.py new file mode 100644 index 0000000..d79b5b9 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/paa_head.py @@ -0,0 +1,756 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmcv.runner import force_fp32 + +from mmdet.core import multi_apply, multiclass_nms +from mmdet.core.bbox.iou_calculators import bbox_overlaps +from mmdet.models import HEADS +from mmdet.models.dense_heads import ATSSHead + +EPS = 1e-12 +try: + import sklearn.mixture as skm +except ImportError: + skm = None + + +def levels_to_images(mlvl_tensor): + """Concat multi-level feature maps by image. + + [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] + Convert the shape of each element in mlvl_tensor from (N, C, H, W) to + (N, H*W , C), then split the element to N elements with shape (H*W, C), and + concat elements in same image of all level along first dimension. + + Args: + mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from + corresponding level. Each element is of shape (N, C, H, W) + + Returns: + list[torch.Tensor]: A list that contains N tensors and each tensor is + of shape (num_elements, C) + """ + batch_size = mlvl_tensor[0].size(0) + batch_list = [[] for _ in range(batch_size)] + channels = mlvl_tensor[0].size(1) + for t in mlvl_tensor: + t = t.permute(0, 2, 3, 1) + t = t.view(batch_size, -1, channels).contiguous() + for img in range(batch_size): + batch_list[img].append(t[img]) + return [torch.cat(item, 0) for item in batch_list] + + +@HEADS.register_module() +class PAAHead(ATSSHead): + """Head of PAAAssignment: Probabilistic Anchor Assignment with IoU + Prediction for Object Detection. + + Code is modified from the `official github repo + `_. + + More details can be found in the `paper + `_ . + + Args: + topk (int): Select topk samples with smallest loss in + each level. + score_voting (bool): Whether to use score voting in post-process. + covariance_type : String describing the type of covariance parameters + to be used in :class:`sklearn.mixture.GaussianMixture`. + It must be one of: + + - 'full': each component has its own general covariance matrix + - 'tied': all components share the same general covariance matrix + - 'diag': each component has its own diagonal covariance matrix + - 'spherical': each component has its own single variance + Default: 'diag'. From 'full' to 'spherical', the gmm fitting + process is faster yet the performance could be influenced. For most + cases, 'diag' should be a good choice. + """ + + def __init__(self, + *args, + topk=9, + score_voting=True, + covariance_type='diag', + **kwargs): + # topk used in paa reassign process + self.topk = topk + self.with_score_voting = score_voting + self.covariance_type = covariance_type + super(PAAHead, self).__init__(*args, **kwargs) + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) + def loss(self, + cls_scores, + bbox_preds, + iou_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + iou_preds (list[Tensor]): iou_preds for each scale + level with shape (N, num_anchors * 1, H, W) + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (list[Tensor] | None): Specify which bounding + boxes can be ignored when are computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss gmm_assignment. + """ + + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + device = cls_scores[0].device + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels, + ) + (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, + pos_gt_index) = cls_reg_targets + cls_scores = levels_to_images(cls_scores) + cls_scores = [ + item.reshape(-1, self.cls_out_channels) for item in cls_scores + ] + bbox_preds = levels_to_images(bbox_preds) + bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] + iou_preds = levels_to_images(iou_preds) + iou_preds = [item.reshape(-1, 1) for item in iou_preds] + pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, + cls_scores, bbox_preds, labels, + labels_weight, bboxes_target, + bboxes_weight, pos_inds) + + with torch.no_grad(): + reassign_labels, reassign_label_weight, \ + reassign_bbox_weights, num_pos = multi_apply( + self.paa_reassign, + pos_losses_list, + labels, + labels_weight, + bboxes_weight, + pos_inds, + pos_gt_index, + anchor_list) + num_pos = sum(num_pos) + # convert all tensor list to a flatten tensor + cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) + bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) + iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) + labels = torch.cat(reassign_labels, 0).view(-1) + flatten_anchors = torch.cat( + [torch.cat(item, 0) for item in anchor_list]) + labels_weight = torch.cat(reassign_label_weight, 0).view(-1) + bboxes_target = torch.cat(bboxes_target, + 0).view(-1, bboxes_target[0].size(-1)) + + pos_inds_flatten = ((labels >= 0) + & + (labels < self.num_classes)).nonzero().reshape(-1) + + losses_cls = self.loss_cls( + cls_scores, + labels, + labels_weight, + avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0 + if num_pos: + pos_bbox_pred = self.bbox_coder.decode( + flatten_anchors[pos_inds_flatten], + bbox_preds[pos_inds_flatten]) + pos_bbox_target = bboxes_target[pos_inds_flatten] + iou_target = bbox_overlaps( + pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) + losses_iou = self.loss_centerness( + iou_preds[pos_inds_flatten], + iou_target.unsqueeze(-1), + avg_factor=num_pos) + losses_bbox = self.loss_bbox( + pos_bbox_pred, + pos_bbox_target, + iou_target.clamp(min=EPS), + avg_factor=iou_target.sum()) + else: + losses_iou = iou_preds.sum() * 0 + losses_bbox = bbox_preds.sum() * 0 + + return dict( + loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou) + + def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_weight, + bbox_target, bbox_weight, pos_inds): + """Calculate loss of all potential positive samples obtained from first + match process. + + Args: + anchors (list[Tensor]): Anchors of each scale. + cls_score (Tensor): Box scores of single image with shape + (num_anchors, num_classes) + bbox_pred (Tensor): Box energies / deltas of single image + with shape (num_anchors, 4) + label (Tensor): classification target of each anchor with + shape (num_anchors,) + label_weight (Tensor): Classification loss weight of each + anchor with shape (num_anchors). + bbox_target (dict): Regression target of each anchor with + shape (num_anchors, 4). + bbox_weight (Tensor): Bbox weight of each anchor with shape + (num_anchors, 4). + pos_inds (Tensor): Index of all positive samples got from + first assign process. + + Returns: + Tensor: Losses of all positive samples in single image. + """ + if not len(pos_inds): + return cls_score.new([]), + anchors_all_level = torch.cat(anchors, 0) + pos_scores = cls_score[pos_inds] + pos_bbox_pred = bbox_pred[pos_inds] + pos_label = label[pos_inds] + pos_label_weight = label_weight[pos_inds] + pos_bbox_target = bbox_target[pos_inds] + pos_bbox_weight = bbox_weight[pos_inds] + pos_anchors = anchors_all_level[pos_inds] + pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred) + + # to keep loss dimension + loss_cls = self.loss_cls( + pos_scores, + pos_label, + pos_label_weight, + avg_factor=1.0, + reduction_override='none') + + loss_bbox = self.loss_bbox( + pos_bbox_pred, + pos_bbox_target, + pos_bbox_weight, + avg_factor=1.0, # keep same loss weight before reassign + reduction_override='none') + + loss_cls = loss_cls.sum(-1) + pos_loss = loss_bbox + loss_cls + return pos_loss, + + def paa_reassign(self, pos_losses, label, label_weight, bbox_weight, + pos_inds, pos_gt_inds, anchors): + """Fit loss to GMM distribution and separate positive, ignore, negative + samples again with GMM model. + + Args: + pos_losses (Tensor): Losses of all positive samples in + single image. + label (Tensor): classification target of each anchor with + shape (num_anchors,) + label_weight (Tensor): Classification loss weight of each + anchor with shape (num_anchors). + bbox_weight (Tensor): Bbox weight of each anchor with shape + (num_anchors, 4). + pos_inds (Tensor): Index of all positive samples got from + first assign process. + pos_gt_inds (Tensor): Gt_index of all positive samples got + from first assign process. + anchors (list[Tensor]): Anchors of each scale. + + Returns: + tuple: Usually returns a tuple containing learning targets. + + - label (Tensor): classification target of each anchor after + paa assign, with shape (num_anchors,) + - label_weight (Tensor): Classification loss weight of each + anchor after paa assign, with shape (num_anchors). + - bbox_weight (Tensor): Bbox weight of each anchor with shape + (num_anchors, 4). + - num_pos (int): The number of positive samples after paa + assign. + """ + if not len(pos_inds): + return label, label_weight, bbox_weight, 0 + label = label.clone() + label_weight = label_weight.clone() + bbox_weight = bbox_weight.clone() + num_gt = pos_gt_inds.max() + 1 + num_level = len(anchors) + num_anchors_each_level = [item.size(0) for item in anchors] + num_anchors_each_level.insert(0, 0) + inds_level_interval = np.cumsum(num_anchors_each_level) + pos_level_mask = [] + for i in range(num_level): + mask = (pos_inds >= inds_level_interval[i]) & ( + pos_inds < inds_level_interval[i + 1]) + pos_level_mask.append(mask) + pos_inds_after_paa = [label.new_tensor([])] + ignore_inds_after_paa = [label.new_tensor([])] + for gt_ind in range(num_gt): + pos_inds_gmm = [] + pos_loss_gmm = [] + gt_mask = pos_gt_inds == gt_ind + for level in range(num_level): + level_mask = pos_level_mask[level] + level_gt_mask = level_mask & gt_mask + value, topk_inds = pos_losses[level_gt_mask].topk( + min(level_gt_mask.sum(), self.topk), largest=False) + pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds]) + pos_loss_gmm.append(value) + pos_inds_gmm = torch.cat(pos_inds_gmm) + pos_loss_gmm = torch.cat(pos_loss_gmm) + # fix gmm need at least two sample + if len(pos_inds_gmm) < 2: + continue + device = pos_inds_gmm.device + pos_loss_gmm, sort_inds = pos_loss_gmm.sort() + pos_inds_gmm = pos_inds_gmm[sort_inds] + pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy() + min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max() + means_init = np.array([min_loss, max_loss]).reshape(2, 1) + weights_init = np.array([0.5, 0.5]) + precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1) # full + if self.covariance_type == 'spherical': + precisions_init = precisions_init.reshape(2) + elif self.covariance_type == 'diag': + precisions_init = precisions_init.reshape(2, 1) + elif self.covariance_type == 'tied': + precisions_init = np.array([[1.0]]) + if skm is None: + raise ImportError('Please run "pip install sklearn" ' + 'to install sklearn first.') + gmm = skm.GaussianMixture( + 2, + weights_init=weights_init, + means_init=means_init, + precisions_init=precisions_init, + covariance_type=self.covariance_type) + gmm.fit(pos_loss_gmm) + gmm_assignment = gmm.predict(pos_loss_gmm) + scores = gmm.score_samples(pos_loss_gmm) + gmm_assignment = torch.from_numpy(gmm_assignment).to(device) + scores = torch.from_numpy(scores).to(device) + + pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme( + gmm_assignment, scores, pos_inds_gmm) + pos_inds_after_paa.append(pos_inds_temp) + ignore_inds_after_paa.append(ignore_inds_temp) + + pos_inds_after_paa = torch.cat(pos_inds_after_paa) + ignore_inds_after_paa = torch.cat(ignore_inds_after_paa) + reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1) + reassign_ids = pos_inds[reassign_mask] + label[reassign_ids] = self.num_classes + label_weight[ignore_inds_after_paa] = 0 + bbox_weight[reassign_ids] = 0 + num_pos = len(pos_inds_after_paa) + return label, label_weight, bbox_weight, num_pos + + def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm): + """A general separation scheme for gmm model. + + It separates a GMM distribution of candidate samples into three + parts, 0 1 and uncertain areas, and you can implement other + separation schemes by rewriting this function. + + Args: + gmm_assignment (Tensor): The prediction of GMM which is of shape + (num_samples,). The 0/1 value indicates the distribution + that each sample comes from. + scores (Tensor): The probability of sample coming from the + fit GMM distribution. The tensor is of shape (num_samples,). + pos_inds_gmm (Tensor): All the indexes of samples which are used + to fit GMM model. The tensor is of shape (num_samples,) + + Returns: + tuple[Tensor]: The indices of positive and ignored samples. + + - pos_inds_temp (Tensor): Indices of positive samples. + - ignore_inds_temp (Tensor): Indices of ignore samples. + """ + # The implementation is (c) in Fig.3 in origin paper instead of (b). + # You can refer to issues such as + # https://github.com/kkhoot/PAA/issues/8 and + # https://github.com/kkhoot/PAA/issues/9. + fgs = gmm_assignment == 0 + pos_inds_temp = fgs.new_tensor([], dtype=torch.long) + ignore_inds_temp = fgs.new_tensor([], dtype=torch.long) + if fgs.nonzero().numel(): + _, pos_thr_ind = scores[fgs].topk(1) + pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1] + ignore_inds_temp = pos_inds_gmm.new_tensor([]) + return pos_inds_temp, ignore_inds_temp + + def get_targets( + self, + anchor_list, + valid_flag_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + unmap_outputs=True, + ): + """Get targets for PAA head. + + This method is almost the same as `AnchorHead.get_targets()`. We direct + return the results from _get_targets_single instead map it to levels + by images_to_levels function. + + Args: + anchor_list (list[list[Tensor]]): Multi level anchors of each + image. The outer list indicates images, and the inner list + corresponds to feature levels of the image. Each element of + the inner list is a tensor of shape (num_anchors, 4). + valid_flag_list (list[list[Tensor]]): Multi level valid flags of + each image. The outer list indicates images, and the inner list + corresponds to feature levels of the image. Each element of + the inner list is a tensor of shape (num_anchors, ) + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. + img_metas (list[dict]): Meta info of each image. + gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be + ignored. + gt_labels_list (list[Tensor]): Ground truth labels of each box. + label_channels (int): Channel of label. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. + + Returns: + tuple: Usually returns a tuple containing learning targets. + + - labels (list[Tensor]): Labels of all anchors, each with + shape (num_anchors,). + - label_weights (list[Tensor]): Label weights of all anchor. + each with shape (num_anchors,). + - bbox_targets (list[Tensor]): BBox targets of all anchors. + each with shape (num_anchors, 4). + - bbox_weights (list[Tensor]): BBox weights of all anchors. + each with shape (num_anchors, 4). + - pos_inds (list[Tensor]): Contains all index of positive + sample in all anchor. + - gt_inds (list[Tensor]): Contains all gt_index of positive + sample in all anchor. + """ + + num_imgs = len(img_metas) + assert len(anchor_list) == len(valid_flag_list) == num_imgs + concat_anchor_list = [] + concat_valid_flag_list = [] + for i in range(num_imgs): + assert len(anchor_list[i]) == len(valid_flag_list[i]) + concat_anchor_list.append(torch.cat(anchor_list[i])) + concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + if gt_labels_list is None: + gt_labels_list = [None for _ in range(num_imgs)] + results = multi_apply( + self._get_targets_single, + concat_anchor_list, + concat_valid_flag_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + img_metas, + label_channels=label_channels, + unmap_outputs=unmap_outputs) + + (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds, + valid_neg_inds, sampling_result) = results + + # Due to valid flag of anchors, we have to calculate the real pos_inds + # in origin anchor set. + pos_inds = [] + for i, single_labels in enumerate(labels): + pos_mask = (0 <= single_labels) & ( + single_labels < self.num_classes) + pos_inds.append(pos_mask.nonzero().view(-1)) + + gt_inds = [item.pos_assigned_gt_inds for item in sampling_result] + return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, + gt_inds) + + def _get_targets_single(self, + flat_anchors, + valid_flags, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + label_channels=1, + unmap_outputs=True): + """Compute regression and classification targets for anchors in a + single image. + + This method is same as `AnchorHead._get_targets_single()`. + """ + assert unmap_outputs, 'We must map outputs back to the original' \ + 'set of anchors in PAAhead' + return super(ATSSHead, self)._get_targets_single( + flat_anchors, + valid_flags, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + label_channels=1, + unmap_outputs=True) + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def get_bboxes(self, + cls_scores, + bbox_preds, + score_factors=None, + img_metas=None, + cfg=None, + rescale=False, + with_nms=True, + **kwargs): + assert with_nms, 'PAA only supports "with_nms=True" now and it ' \ + 'means PAAHead does not support ' \ + 'test-time augmentation' + return super(ATSSHead, self).get_bboxes(cls_scores, bbox_preds, + score_factors, img_metas, cfg, + rescale, with_nms, **kwargs) + + def _get_bboxes_single(self, + cls_score_list, + bbox_pred_list, + score_factor_list, + mlvl_priors, + img_meta, + cfg, + rescale=False, + with_nms=True, + **kwargs): + """Transform outputs of a single image into bbox predictions. + + Args: + cls_score_list (list[Tensor]): Box scores from all scale + levels of a single image, each item has shape + (num_priors * num_classes, H, W). + bbox_pred_list (list[Tensor]): Box energies / deltas from + all scale levels of a single image, each item has shape + (num_priors * 4, H, W). + score_factor_list (list[Tensor]): Score factors from all scale + levels of a single image, each item has shape + (num_priors * 1, H, W). + mlvl_priors (list[Tensor]): Each element in the list is + the priors of a single level in feature pyramid, has shape + (num_priors, 4). + img_meta (dict): Image meta info. + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + + Returns: + tuple[Tensor]: Results of detected bboxes and labels. If with_nms + is False and mlvl_score_factor is None, return mlvl_bboxes and + mlvl_scores, else return mlvl_bboxes, mlvl_scores and + mlvl_score_factor. Usually with_nms is False is used for aug + test. If with_nms is True, then return the following format + + - det_bboxes (Tensor): Predicted bboxes with shape \ + [num_bboxes, 5], where the first 4 columns are bounding \ + box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ + column are scores between 0 and 1. + - det_labels (Tensor): Predicted labels of the corresponding \ + box with shape [num_bboxes]. + """ + cfg = self.test_cfg if cfg is None else cfg + img_shape = img_meta['img_shape'] + nms_pre = cfg.get('nms_pre', -1) + + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_score_factors = [] + for level_idx, (cls_score, bbox_pred, score_factor, priors) in \ + enumerate(zip(cls_score_list, bbox_pred_list, + score_factor_list, mlvl_priors)): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + + scores = cls_score.permute(1, 2, 0).reshape( + -1, self.cls_out_channels).sigmoid() + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) + score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() + + if 0 < nms_pre < scores.shape[0]: + max_scores, _ = (scores * + score_factor[:, None]).sqrt().max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + priors = priors[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + scores = scores[topk_inds, :] + score_factor = score_factor[topk_inds] + + bboxes = self.bbox_coder.decode( + priors, bbox_pred, max_shape=img_shape) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_score_factors.append(score_factor) + + return self._bbox_post_process(mlvl_scores, mlvl_bboxes, + img_meta['scale_factor'], cfg, rescale, + with_nms, mlvl_score_factors, **kwargs) + + def _bbox_post_process(self, + mlvl_scores, + mlvl_bboxes, + scale_factor, + cfg, + rescale=False, + with_nms=True, + mlvl_score_factors=None, + **kwargs): + """bbox post-processing method. + + The boxes would be rescaled to the original image scale and do + the nms operation. Usually with_nms is False is used for aug test. + + Args: + mlvl_scores (list[Tensor]): Box scores from all scale + levels of a single image, each item has shape + (num_bboxes, num_class). + mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale + levels of a single image, each item has shape (num_bboxes, 4). + scale_factor (ndarray, optional): Scale factor of the image arange + as (w_scale, h_scale, w_scale, h_scale). + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + mlvl_score_factors (list[Tensor], optional): Score factor from + all scale levels of a single image, each item has shape + (num_bboxes, ). Default: None. + + Returns: + tuple[Tensor]: Results of detected bboxes and labels. If with_nms + is False and mlvl_score_factor is None, return mlvl_bboxes and + mlvl_scores, else return mlvl_bboxes, mlvl_scores and + mlvl_score_factor. Usually with_nms is False is used for aug + test. If with_nms is True, then return the following format + + - det_bboxes (Tensor): Predicted bboxes with shape \ + [num_bboxes, 5], where the first 4 columns are bounding \ + box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ + column are scores between 0 and 1. + - det_labels (Tensor): Predicted labels of the corresponding \ + box with shape [num_bboxes]. + """ + mlvl_bboxes = torch.cat(mlvl_bboxes) + if rescale: + mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) + mlvl_scores = torch.cat(mlvl_scores) + # Add a dummy background class to the backend when using sigmoid + # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 + # BG cat_id: num_class + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) + + mlvl_iou_preds = torch.cat(mlvl_score_factors) + mlvl_nms_scores = (mlvl_scores * mlvl_iou_preds[:, None]).sqrt() + det_bboxes, det_labels = multiclass_nms( + mlvl_bboxes, + mlvl_nms_scores, + cfg.score_thr, + cfg.nms, + cfg.max_per_img, + score_factors=None) + if self.with_score_voting and len(det_bboxes) > 0: + det_bboxes, det_labels = self.score_voting(det_bboxes, det_labels, + mlvl_bboxes, + mlvl_nms_scores, + cfg.score_thr) + + return det_bboxes, det_labels + + def score_voting(self, det_bboxes, det_labels, mlvl_bboxes, + mlvl_nms_scores, score_thr): + """Implementation of score voting method works on each remaining boxes + after NMS procedure. + + Args: + det_bboxes (Tensor): Remaining boxes after NMS procedure, + with shape (k, 5), each dimension means + (x1, y1, x2, y2, score). + det_labels (Tensor): The label of remaining boxes, with shape + (k, 1),Labels are 0-based. + mlvl_bboxes (Tensor): All boxes before the NMS procedure, + with shape (num_anchors,4). + mlvl_nms_scores (Tensor): The scores of all boxes which is used + in the NMS procedure, with shape (num_anchors, num_class) + score_thr (float): The score threshold of bboxes. + + Returns: + tuple: Usually returns a tuple containing voting results. + + - det_bboxes_voted (Tensor): Remaining boxes after + score voting procedure, with shape (k, 5), each + dimension means (x1, y1, x2, y2, score). + - det_labels_voted (Tensor): Label of remaining bboxes + after voting, with shape (num_anchors,). + """ + candidate_mask = mlvl_nms_scores > score_thr + candidate_mask_nonzeros = candidate_mask.nonzero(as_tuple=False) + candidate_inds = candidate_mask_nonzeros[:, 0] + candidate_labels = candidate_mask_nonzeros[:, 1] + candidate_bboxes = mlvl_bboxes[candidate_inds] + candidate_scores = mlvl_nms_scores[candidate_mask] + det_bboxes_voted = [] + det_labels_voted = [] + for cls in range(self.cls_out_channels): + candidate_cls_mask = candidate_labels == cls + if not candidate_cls_mask.any(): + continue + candidate_cls_scores = candidate_scores[candidate_cls_mask] + candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask] + det_cls_mask = det_labels == cls + det_cls_bboxes = det_bboxes[det_cls_mask].view( + -1, det_bboxes.size(-1)) + det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4], + candidate_cls_bboxes) + for det_ind in range(len(det_cls_bboxes)): + single_det_ious = det_candidate_ious[det_ind] + pos_ious_mask = single_det_ious > 0.01 + pos_ious = single_det_ious[pos_ious_mask] + pos_bboxes = candidate_cls_bboxes[pos_ious_mask] + pos_scores = candidate_cls_scores[pos_ious_mask] + pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) * + pos_scores)[:, None] + voted_box = torch.sum( + pis * pos_bboxes, dim=0) / torch.sum( + pis, dim=0) + voted_score = det_cls_bboxes[det_ind][-1:][None, :] + det_bboxes_voted.append( + torch.cat((voted_box[None, :], voted_score), dim=1)) + det_labels_voted.append(cls) + + det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0) + det_labels_voted = det_labels.new_tensor(det_labels_voted) + return det_bboxes_voted, det_labels_voted diff --git a/downstream/mmdetection/mmdet/models/dense_heads/pisa_retinanet_head.py b/downstream/mmdetection/mmdet/models/dense_heads/pisa_retinanet_head.py new file mode 100644 index 0000000..8654ef4 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/pisa_retinanet_head.py @@ -0,0 +1,155 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.runner import force_fp32 + +from mmdet.core import images_to_levels +from ..builder import HEADS +from ..losses import carl_loss, isr_p +from .retina_head import RetinaHead + + +@HEADS.register_module() +class PISARetinaHead(RetinaHead): + """PISA Retinanet Head. + + The head owns the same structure with Retinanet Head, but differs in two + aspects: + 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to + change the positive loss weights. + 2. Classification-aware regression loss is adopted as a third loss. + """ + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + gt_bboxes (list[Tensor]): Ground truth bboxes of each image + with shape (num_obj, 4). + gt_labels (list[Tensor]): Ground truth labels of each image + with shape (num_obj, 4). + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. + Default: None. + + Returns: + dict: Loss dict, comprise classification loss, regression loss and + carl loss. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + device = cls_scores[0].device + + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels, + return_sampling_results=True) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets + num_total_samples = ( + num_total_pos + num_total_neg if self.sampling else num_total_pos) + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + # concat all level anchors and flags to a single tensor + concat_anchor_list = [] + for i in range(len(anchor_list)): + concat_anchor_list.append(torch.cat(anchor_list[i])) + all_anchor_list = images_to_levels(concat_anchor_list, + num_level_anchors) + + num_imgs = len(img_metas) + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels) + for cls_score in cls_scores + ] + flatten_cls_scores = torch.cat( + flatten_cls_scores, dim=1).reshape(-1, + flatten_cls_scores[0].size(-1)) + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + flatten_bbox_preds = torch.cat( + flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1)) + flatten_labels = torch.cat(labels_list, dim=1).reshape(-1) + flatten_label_weights = torch.cat( + label_weights_list, dim=1).reshape(-1) + flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4) + flatten_bbox_targets = torch.cat( + bbox_targets_list, dim=1).reshape(-1, 4) + flatten_bbox_weights = torch.cat( + bbox_weights_list, dim=1).reshape(-1, 4) + + # Apply ISR-P + isr_cfg = self.train_cfg.get('isr', None) + if isr_cfg is not None: + all_targets = (flatten_labels, flatten_label_weights, + flatten_bbox_targets, flatten_bbox_weights) + with torch.no_grad(): + all_targets = isr_p( + flatten_cls_scores, + flatten_bbox_preds, + all_targets, + flatten_anchors, + sampling_results_list, + bbox_coder=self.bbox_coder, + loss_cls=self.loss_cls, + num_class=self.num_classes, + **self.train_cfg.isr) + (flatten_labels, flatten_label_weights, flatten_bbox_targets, + flatten_bbox_weights) = all_targets + + # For convenience we compute loss once instead separating by fpn level, + # so that we don't need to separate the weights by level again. + # The result should be the same + losses_cls = self.loss_cls( + flatten_cls_scores, + flatten_labels, + flatten_label_weights, + avg_factor=num_total_samples) + losses_bbox = self.loss_bbox( + flatten_bbox_preds, + flatten_bbox_targets, + flatten_bbox_weights, + avg_factor=num_total_samples) + loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) + + # CARL Loss + carl_cfg = self.train_cfg.get('carl', None) + if carl_cfg is not None: + loss_carl = carl_loss( + flatten_cls_scores, + flatten_labels, + flatten_bbox_preds, + flatten_bbox_targets, + self.loss_bbox, + **self.train_cfg.carl, + avg_factor=num_total_pos, + sigmoid=True, + num_class=self.num_classes) + loss_dict.update(loss_carl) + + return loss_dict diff --git a/downstream/mmdetection/mmdet/models/dense_heads/pisa_ssd_head.py b/downstream/mmdetection/mmdet/models/dense_heads/pisa_ssd_head.py new file mode 100644 index 0000000..86b67ab --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/pisa_ssd_head.py @@ -0,0 +1,140 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet.core import multi_apply +from ..builder import HEADS +from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p +from .ssd_head import SSDHead + + +# TODO: add loss evaluator for SSD +@HEADS.register_module() +class PISASSDHead(SSDHead): + + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + gt_bboxes (list[Tensor]): Ground truth bboxes of each image + with shape (num_obj, 4). + gt_labels (list[Tensor]): Ground truth labels of each image + with shape (num_obj, 4). + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. + Default: None. + + Returns: + dict: Loss dict, comprise classification loss regression loss and + carl loss. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + device = cls_scores[0].device + + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=1, + unmap_outputs=False, + return_sampling_results=True) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets + + num_images = len(img_metas) + all_cls_scores = torch.cat([ + s.permute(0, 2, 3, 1).reshape( + num_images, -1, self.cls_out_channels) for s in cls_scores + ], 1) + all_labels = torch.cat(labels_list, -1).view(num_images, -1) + all_label_weights = torch.cat(label_weights_list, + -1).view(num_images, -1) + all_bbox_preds = torch.cat([ + b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) + for b in bbox_preds + ], -2) + all_bbox_targets = torch.cat(bbox_targets_list, + -2).view(num_images, -1, 4) + all_bbox_weights = torch.cat(bbox_weights_list, + -2).view(num_images, -1, 4) + + # concat all level anchors to a single tensor + all_anchors = [] + for i in range(num_images): + all_anchors.append(torch.cat(anchor_list[i])) + + isr_cfg = self.train_cfg.get('isr', None) + all_targets = (all_labels.view(-1), all_label_weights.view(-1), + all_bbox_targets.view(-1, + 4), all_bbox_weights.view(-1, 4)) + # apply ISR-P + if isr_cfg is not None: + all_targets = isr_p( + all_cls_scores.view(-1, all_cls_scores.size(-1)), + all_bbox_preds.view(-1, 4), + all_targets, + torch.cat(all_anchors), + sampling_results_list, + loss_cls=CrossEntropyLoss(), + bbox_coder=self.bbox_coder, + **self.train_cfg.isr, + num_class=self.num_classes) + (new_labels, new_label_weights, new_bbox_targets, + new_bbox_weights) = all_targets + all_labels = new_labels.view(all_labels.shape) + all_label_weights = new_label_weights.view(all_label_weights.shape) + all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape) + all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape) + + # add CARL loss + carl_loss_cfg = self.train_cfg.get('carl', None) + if carl_loss_cfg is not None: + loss_carl = carl_loss( + all_cls_scores.view(-1, all_cls_scores.size(-1)), + all_targets[0], + all_bbox_preds.view(-1, 4), + all_targets[2], + SmoothL1Loss(beta=1.), + **self.train_cfg.carl, + avg_factor=num_total_pos, + num_class=self.num_classes) + + # check NaN and Inf + assert torch.isfinite(all_cls_scores).all().item(), \ + 'classification scores become infinite or NaN!' + assert torch.isfinite(all_bbox_preds).all().item(), \ + 'bbox predications become infinite or NaN!' + + losses_cls, losses_bbox = multi_apply( + self.loss_single, + all_cls_scores, + all_bbox_preds, + all_anchors, + all_labels, + all_label_weights, + all_bbox_targets, + all_bbox_weights, + num_total_samples=num_total_pos) + loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) + if carl_loss_cfg is not None: + loss_dict.update(loss_carl) + return loss_dict diff --git a/downstream/mmdetection/mmdet/models/dense_heads/reppoints_head.py b/downstream/mmdetection/mmdet/models/dense_heads/reppoints_head.py new file mode 100644 index 0000000..f720414 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/reppoints_head.py @@ -0,0 +1,764 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.ops import DeformConv2d + +from mmdet.core import (build_assigner, build_sampler, images_to_levels, + multi_apply, unmap) +from mmdet.core.anchor.point_generator import MlvlPointGenerator +from mmdet.core.utils import filter_scores_and_topk +from ..builder import HEADS, build_loss +from .anchor_free_head import AnchorFreeHead + + +@HEADS.register_module() +class RepPointsHead(AnchorFreeHead): + """RepPoint head. + + Args: + point_feat_channels (int): Number of channels of points features. + gradient_mul (float): The multiplier to gradients from + points refinement and recognition. + point_strides (Iterable): points strides. + point_base_scale (int): bbox scale for assigning labels. + loss_cls (dict): Config of classification loss. + loss_bbox_init (dict): Config of initial points loss. + loss_bbox_refine (dict): Config of points loss in refinement. + use_grid_points (bool): If we use bounding box representation, the + reppoints is represented as grid points on the bounding box. + center_init (bool): Whether to use center point assignment. + transform_method (str): The methods to transform RepPoints to bbox. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ # noqa: W605 + + def __init__(self, + num_classes, + in_channels, + point_feat_channels=256, + num_points=9, + gradient_mul=0.1, + point_strides=[8, 16, 32, 64, 128], + point_base_scale=4, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_init=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5), + loss_bbox_refine=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + use_grid_points=False, + center_init=True, + transform_method='moment', + moment_mul=0.01, + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', + name='reppoints_cls_out', + std=0.01, + bias_prob=0.01)), + **kwargs): + self.num_points = num_points + self.point_feat_channels = point_feat_channels + self.use_grid_points = use_grid_points + self.center_init = center_init + + # we use deform conv to extract points features + self.dcn_kernel = int(np.sqrt(num_points)) + self.dcn_pad = int((self.dcn_kernel - 1) / 2) + assert self.dcn_kernel * self.dcn_kernel == num_points, \ + 'The points number should be a square number.' + assert self.dcn_kernel % 2 == 1, \ + 'The points number should be an odd square number.' + dcn_base = np.arange(-self.dcn_pad, + self.dcn_pad + 1).astype(np.float64) + dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) + dcn_base_x = np.tile(dcn_base, self.dcn_kernel) + dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( + (-1)) + self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) + + super().__init__( + num_classes, + in_channels, + loss_cls=loss_cls, + init_cfg=init_cfg, + **kwargs) + + self.gradient_mul = gradient_mul + self.point_base_scale = point_base_scale + self.point_strides = point_strides + self.prior_generator = MlvlPointGenerator( + self.point_strides, offset=0.) + + self.sampling = loss_cls['type'] not in ['FocalLoss'] + if self.train_cfg: + self.init_assigner = build_assigner(self.train_cfg.init.assigner) + self.refine_assigner = build_assigner( + self.train_cfg.refine.assigner) + # use PseudoSampler when sampling is False + if self.sampling and hasattr(self.train_cfg, 'sampler'): + sampler_cfg = self.train_cfg.sampler + else: + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + self.transform_method = transform_method + if self.transform_method == 'moment': + self.moment_transfer = nn.Parameter( + data=torch.zeros(2), requires_grad=True) + self.moment_mul = moment_mul + + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + if self.use_sigmoid_cls: + self.cls_out_channels = self.num_classes + else: + self.cls_out_channels = self.num_classes + 1 + self.loss_bbox_init = build_loss(loss_bbox_init) + self.loss_bbox_refine = build_loss(loss_bbox_refine) + + def _init_layers(self): + """Initialize layers of the head.""" + self.relu = nn.ReLU(inplace=True) + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points + self.reppoints_cls_conv = DeformConv2d(self.feat_channels, + self.point_feat_channels, + self.dcn_kernel, 1, + self.dcn_pad) + self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels, + self.cls_out_channels, 1, 1, 0) + self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels, + self.point_feat_channels, 3, + 1, 1) + self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels, + pts_out_dim, 1, 1, 0) + self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels, + self.point_feat_channels, + self.dcn_kernel, 1, + self.dcn_pad) + self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels, + pts_out_dim, 1, 1, 0) + + def points2bbox(self, pts, y_first=True): + """Converting the points set into bounding box. + + :param pts: the input points sets (fields), each points + set (fields) is represented as 2n scalar. + :param y_first: if y_first=True, the point set is represented as + [y1, x1, y2, x2 ... yn, xn], otherwise the point set is + represented as [x1, y1, x2, y2 ... xn, yn]. + :return: each points set is converting to a bbox [x1, y1, x2, y2]. + """ + pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:]) + pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1, + ...] + pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0, + ...] + if self.transform_method == 'minmax': + bbox_left = pts_x.min(dim=1, keepdim=True)[0] + bbox_right = pts_x.max(dim=1, keepdim=True)[0] + bbox_up = pts_y.min(dim=1, keepdim=True)[0] + bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] + bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], + dim=1) + elif self.transform_method == 'partial_minmax': + pts_y = pts_y[:, :4, ...] + pts_x = pts_x[:, :4, ...] + bbox_left = pts_x.min(dim=1, keepdim=True)[0] + bbox_right = pts_x.max(dim=1, keepdim=True)[0] + bbox_up = pts_y.min(dim=1, keepdim=True)[0] + bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] + bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], + dim=1) + elif self.transform_method == 'moment': + pts_y_mean = pts_y.mean(dim=1, keepdim=True) + pts_x_mean = pts_x.mean(dim=1, keepdim=True) + pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True) + pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True) + moment_transfer = (self.moment_transfer * self.moment_mul) + ( + self.moment_transfer.detach() * (1 - self.moment_mul)) + moment_width_transfer = moment_transfer[0] + moment_height_transfer = moment_transfer[1] + half_width = pts_x_std * torch.exp(moment_width_transfer) + half_height = pts_y_std * torch.exp(moment_height_transfer) + bbox = torch.cat([ + pts_x_mean - half_width, pts_y_mean - half_height, + pts_x_mean + half_width, pts_y_mean + half_height + ], + dim=1) + else: + raise NotImplementedError + return bbox + + def gen_grid_from_reg(self, reg, previous_boxes): + """Base on the previous bboxes and regression values, we compute the + regressed bboxes and generate the grids on the bboxes. + + :param reg: the regression value to previous bboxes. + :param previous_boxes: previous bboxes. + :return: generate grids on the regressed bboxes. + """ + b, _, h, w = reg.shape + bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2. + bwh = (previous_boxes[:, 2:, ...] - + previous_boxes[:, :2, ...]).clamp(min=1e-6) + grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp( + reg[:, 2:, ...]) + grid_wh = bwh * torch.exp(reg[:, 2:, ...]) + grid_left = grid_topleft[:, [0], ...] + grid_top = grid_topleft[:, [1], ...] + grid_width = grid_wh[:, [0], ...] + grid_height = grid_wh[:, [1], ...] + intervel = torch.linspace(0., 1., self.dcn_kernel).view( + 1, self.dcn_kernel, 1, 1).type_as(reg) + grid_x = grid_left + grid_width * intervel + grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1) + grid_x = grid_x.view(b, -1, h, w) + grid_y = grid_top + grid_height * intervel + grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1) + grid_y = grid_y.view(b, -1, h, w) + grid_yx = torch.stack([grid_y, grid_x], dim=2) + grid_yx = grid_yx.view(b, -1, h, w) + regressed_bbox = torch.cat([ + grid_left, grid_top, grid_left + grid_width, grid_top + grid_height + ], 1) + return grid_yx, regressed_bbox + + def forward(self, feats): + return multi_apply(self.forward_single, feats) + + def forward_single(self, x): + """Forward feature map of a single FPN level.""" + dcn_base_offset = self.dcn_base_offset.type_as(x) + # If we use center_init, the initial reppoints is from center points. + # If we use bounding bbox representation, the initial reppoints is + # from regular grid placed on a pre-defined bbox. + if self.use_grid_points or not self.center_init: + scale = self.point_base_scale / 2 + points_init = dcn_base_offset / dcn_base_offset.max() * scale + bbox_init = x.new_tensor([-scale, -scale, scale, + scale]).view(1, 4, 1, 1) + else: + points_init = 0 + cls_feat = x + pts_feat = x + for cls_conv in self.cls_convs: + cls_feat = cls_conv(cls_feat) + for reg_conv in self.reg_convs: + pts_feat = reg_conv(pts_feat) + # initialize reppoints + pts_out_init = self.reppoints_pts_init_out( + self.relu(self.reppoints_pts_init_conv(pts_feat))) + if self.use_grid_points: + pts_out_init, bbox_out_init = self.gen_grid_from_reg( + pts_out_init, bbox_init.detach()) + else: + pts_out_init = pts_out_init + points_init + # refine and classify reppoints + pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach( + ) + self.gradient_mul * pts_out_init + dcn_offset = pts_out_init_grad_mul - dcn_base_offset + cls_out = self.reppoints_cls_out( + self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset))) + pts_out_refine = self.reppoints_pts_refine_out( + self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset))) + if self.use_grid_points: + pts_out_refine, bbox_out_refine = self.gen_grid_from_reg( + pts_out_refine, bbox_out_init.detach()) + else: + pts_out_refine = pts_out_refine + pts_out_init.detach() + + if self.training: + return cls_out, pts_out_init, pts_out_refine + else: + return cls_out, self.points2bbox(pts_out_refine) + + def get_points(self, featmap_sizes, img_metas, device): + """Get points according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + img_metas (list[dict]): Image meta info. + + Returns: + tuple: points of each image, valid flags of each image + """ + num_imgs = len(img_metas) + + # since feature map sizes of all images are the same, we only compute + # points center for one time + multi_level_points = self.prior_generator.grid_priors( + featmap_sizes, device=device, with_stride=True) + points_list = [[point.clone() for point in multi_level_points] + for _ in range(num_imgs)] + + # for each image, we compute valid flags of multi level grids + valid_flag_list = [] + for img_id, img_meta in enumerate(img_metas): + multi_level_flags = self.prior_generator.valid_flags( + featmap_sizes, img_meta['pad_shape']) + valid_flag_list.append(multi_level_flags) + + return points_list, valid_flag_list + + def centers_to_bboxes(self, point_list): + """Get bboxes according to center points. + + Only used in :class:`MaxIoUAssigner`. + """ + bbox_list = [] + for i_img, point in enumerate(point_list): + bbox = [] + for i_lvl in range(len(self.point_strides)): + scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5 + bbox_shift = torch.Tensor([-scale, -scale, scale, + scale]).view(1, 4).type_as(point[0]) + bbox_center = torch.cat( + [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1) + bbox.append(bbox_center + bbox_shift) + bbox_list.append(bbox) + return bbox_list + + def offset_to_pts(self, center_list, pred_list): + """Change from point offset to point coordinate.""" + pts_list = [] + for i_lvl in range(len(self.point_strides)): + pts_lvl = [] + for i_img in range(len(center_list)): + pts_center = center_list[i_img][i_lvl][:, :2].repeat( + 1, self.num_points) + pts_shift = pred_list[i_lvl][i_img] + yx_pts_shift = pts_shift.permute(1, 2, 0).view( + -1, 2 * self.num_points) + y_pts_shift = yx_pts_shift[..., 0::2] + x_pts_shift = yx_pts_shift[..., 1::2] + xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1) + xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1) + pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center + pts_lvl.append(pts) + pts_lvl = torch.stack(pts_lvl, 0) + pts_list.append(pts_lvl) + return pts_list + + def _point_target_single(self, + flat_proposals, + valid_flags, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + stage='init', + unmap_outputs=True): + inside_flags = valid_flags + if not inside_flags.any(): + return (None, ) * 7 + # assign gt and sample proposals + proposals = flat_proposals[inside_flags, :] + + if stage == 'init': + assigner = self.init_assigner + pos_weight = self.train_cfg.init.pos_weight + else: + assigner = self.refine_assigner + pos_weight = self.train_cfg.refine.pos_weight + assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore, + None if self.sampling else gt_labels) + sampling_result = self.sampler.sample(assign_result, proposals, + gt_bboxes) + + num_valid_proposals = proposals.shape[0] + bbox_gt = proposals.new_zeros([num_valid_proposals, 4]) + pos_proposals = torch.zeros_like(proposals) + proposals_weights = proposals.new_zeros([num_valid_proposals, 4]) + labels = proposals.new_full((num_valid_proposals, ), + self.num_classes, + dtype=torch.long) + label_weights = proposals.new_zeros( + num_valid_proposals, dtype=torch.float) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + pos_gt_bboxes = sampling_result.pos_gt_bboxes + bbox_gt[pos_inds, :] = pos_gt_bboxes + pos_proposals[pos_inds, :] = proposals[pos_inds, :] + proposals_weights[pos_inds, :] = 1.0 + if gt_labels is None: + # Only rpn gives gt_labels as None + # Foreground is the first class + labels[pos_inds] = 0 + else: + labels[pos_inds] = gt_labels[ + sampling_result.pos_assigned_gt_inds] + if pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = pos_weight + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # map up to original set of proposals + if unmap_outputs: + num_total_proposals = flat_proposals.size(0) + labels = unmap(labels, num_total_proposals, inside_flags) + label_weights = unmap(label_weights, num_total_proposals, + inside_flags) + bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags) + pos_proposals = unmap(pos_proposals, num_total_proposals, + inside_flags) + proposals_weights = unmap(proposals_weights, num_total_proposals, + inside_flags) + + return (labels, label_weights, bbox_gt, pos_proposals, + proposals_weights, pos_inds, neg_inds) + + def get_targets(self, + proposals_list, + valid_flag_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + stage='init', + label_channels=1, + unmap_outputs=True): + """Compute corresponding GT box and classification targets for + proposals. + + Args: + proposals_list (list[list]): Multi level points/bboxes of each + image. + valid_flag_list (list[list]): Multi level valid flags of each + image. + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. + img_metas (list[dict]): Meta info of each image. + gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be + ignored. + gt_bboxes_list (list[Tensor]): Ground truth labels of each box. + stage (str): `init` or `refine`. Generate target for init stage or + refine stage + label_channels (int): Channel of label. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. + + Returns: + tuple: + - labels_list (list[Tensor]): Labels of each level. + - label_weights_list (list[Tensor]): Label weights of each level. # noqa: E501 + - bbox_gt_list (list[Tensor]): Ground truth bbox of each level. + - proposal_list (list[Tensor]): Proposals(points/bboxes) of each level. # noqa: E501 + - proposal_weights_list (list[Tensor]): Proposal weights of each level. # noqa: E501 + - num_total_pos (int): Number of positive samples in all images. # noqa: E501 + - num_total_neg (int): Number of negative samples in all images. # noqa: E501 + """ + assert stage in ['init', 'refine'] + num_imgs = len(img_metas) + assert len(proposals_list) == len(valid_flag_list) == num_imgs + + # points number of multi levels + num_level_proposals = [points.size(0) for points in proposals_list[0]] + + # concat all level points and flags to a single tensor + for i in range(num_imgs): + assert len(proposals_list[i]) == len(valid_flag_list[i]) + proposals_list[i] = torch.cat(proposals_list[i]) + valid_flag_list[i] = torch.cat(valid_flag_list[i]) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + if gt_labels_list is None: + gt_labels_list = [None for _ in range(num_imgs)] + (all_labels, all_label_weights, all_bbox_gt, all_proposals, + all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply( + self._point_target_single, + proposals_list, + valid_flag_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + stage=stage, + unmap_outputs=unmap_outputs) + # no valid points + if any([labels is None for labels in all_labels]): + return None + # sampled points of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + labels_list = images_to_levels(all_labels, num_level_proposals) + label_weights_list = images_to_levels(all_label_weights, + num_level_proposals) + bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals) + proposals_list = images_to_levels(all_proposals, num_level_proposals) + proposal_weights_list = images_to_levels(all_proposal_weights, + num_level_proposals) + return (labels_list, label_weights_list, bbox_gt_list, proposals_list, + proposal_weights_list, num_total_pos, num_total_neg) + + def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels, + label_weights, bbox_gt_init, bbox_weights_init, + bbox_gt_refine, bbox_weights_refine, stride, + num_total_samples_init, num_total_samples_refine): + # classification loss + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + cls_score = cls_score.permute(0, 2, 3, + 1).reshape(-1, self.cls_out_channels) + cls_score = cls_score.contiguous() + loss_cls = self.loss_cls( + cls_score, + labels, + label_weights, + avg_factor=num_total_samples_refine) + + # points loss + bbox_gt_init = bbox_gt_init.reshape(-1, 4) + bbox_weights_init = bbox_weights_init.reshape(-1, 4) + bbox_pred_init = self.points2bbox( + pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False) + bbox_gt_refine = bbox_gt_refine.reshape(-1, 4) + bbox_weights_refine = bbox_weights_refine.reshape(-1, 4) + bbox_pred_refine = self.points2bbox( + pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False) + normalize_term = self.point_base_scale * stride + loss_pts_init = self.loss_bbox_init( + bbox_pred_init / normalize_term, + bbox_gt_init / normalize_term, + bbox_weights_init, + avg_factor=num_total_samples_init) + loss_pts_refine = self.loss_bbox_refine( + bbox_pred_refine / normalize_term, + bbox_gt_refine / normalize_term, + bbox_weights_refine, + avg_factor=num_total_samples_refine) + return loss_cls, loss_pts_init, loss_pts_refine + + def loss(self, + cls_scores, + pts_preds_init, + pts_preds_refine, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + device = cls_scores[0].device + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + + # target for initial stage + center_list, valid_flag_list = self.get_points(featmap_sizes, + img_metas, device) + pts_coordinate_preds_init = self.offset_to_pts(center_list, + pts_preds_init) + if self.train_cfg.init.assigner['type'] == 'PointAssigner': + # Assign target for center list + candidate_list = center_list + else: + # transform center list to bbox list and + # assign target for bbox list + bbox_list = self.centers_to_bboxes(center_list) + candidate_list = bbox_list + cls_reg_targets_init = self.get_targets( + candidate_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + stage='init', + label_channels=label_channels) + (*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init, + num_total_pos_init, num_total_neg_init) = cls_reg_targets_init + num_total_samples_init = ( + num_total_pos_init + + num_total_neg_init if self.sampling else num_total_pos_init) + + # target for refinement stage + center_list, valid_flag_list = self.get_points(featmap_sizes, + img_metas, device) + pts_coordinate_preds_refine = self.offset_to_pts( + center_list, pts_preds_refine) + bbox_list = [] + for i_img, center in enumerate(center_list): + bbox = [] + for i_lvl in range(len(pts_preds_refine)): + bbox_preds_init = self.points2bbox( + pts_preds_init[i_lvl].detach()) + bbox_shift = bbox_preds_init * self.point_strides[i_lvl] + bbox_center = torch.cat( + [center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1) + bbox.append(bbox_center + + bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4)) + bbox_list.append(bbox) + cls_reg_targets_refine = self.get_targets( + bbox_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + stage='refine', + label_channels=label_channels) + (labels_list, label_weights_list, bbox_gt_list_refine, + candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine, + num_total_neg_refine) = cls_reg_targets_refine + num_total_samples_refine = ( + num_total_pos_refine + + num_total_neg_refine if self.sampling else num_total_pos_refine) + + # compute loss + losses_cls, losses_pts_init, losses_pts_refine = multi_apply( + self.loss_single, + cls_scores, + pts_coordinate_preds_init, + pts_coordinate_preds_refine, + labels_list, + label_weights_list, + bbox_gt_list_init, + bbox_weights_list_init, + bbox_gt_list_refine, + bbox_weights_list_refine, + self.point_strides, + num_total_samples_init=num_total_samples_init, + num_total_samples_refine=num_total_samples_refine) + loss_dict_all = { + 'loss_cls': losses_cls, + 'loss_pts_init': losses_pts_init, + 'loss_pts_refine': losses_pts_refine + } + return loss_dict_all + + # Same as base_dense_head/_get_bboxes_single except self._bbox_decode + def _get_bboxes_single(self, + cls_score_list, + bbox_pred_list, + score_factor_list, + mlvl_priors, + img_meta, + cfg, + rescale=False, + with_nms=True, + **kwargs): + """Transform outputs of a single image into bbox predictions. + + Args: + cls_score_list (list[Tensor]): Box scores from all scale + levels of a single image, each item has shape + (num_priors * num_classes, H, W). + bbox_pred_list (list[Tensor]): Box energies / deltas from + all scale levels of a single image, each item has shape + (num_priors * 4, H, W). + score_factor_list (list[Tensor]): Score factor from all scale + levels of a single image. RepPoints head does not need + this value. + mlvl_priors (list[Tensor]): Each element in the list is + the priors of a single level in feature pyramid, has shape + (num_priors, 2). + img_meta (dict): Image meta info. + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + + Returns: + tuple[Tensor]: Results of detected bboxes and labels. If with_nms + is False and mlvl_score_factor is None, return mlvl_bboxes and + mlvl_scores, else return mlvl_bboxes, mlvl_scores and + mlvl_score_factor. Usually with_nms is False is used for aug + test. If with_nms is True, then return the following format + + - det_bboxes (Tensor): Predicted bboxes with shape \ + [num_bboxes, 5], where the first 4 columns are bounding \ + box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ + column are scores between 0 and 1. + - det_labels (Tensor): Predicted labels of the corresponding \ + box with shape [num_bboxes]. + """ + cfg = self.test_cfg if cfg is None else cfg + assert len(cls_score_list) == len(bbox_pred_list) + img_shape = img_meta['img_shape'] + nms_pre = cfg.get('nms_pre', -1) + + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_labels = [] + for level_idx, (cls_score, bbox_pred, priors) in enumerate( + zip(cls_score_list, bbox_pred_list, mlvl_priors)): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) + + cls_score = cls_score.permute(1, 2, + 0).reshape(-1, self.cls_out_channels) + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + scores = cls_score.softmax(-1)[:, :-1] + + # After https://github.com/open-mmlab/mmdetection/pull/6268/, + # this operation keeps fewer bboxes under the same `nms_pre`. + # There is no difference in performance for most models. If you + # find a slight drop in performance, you can set a larger + # `nms_pre` than before. + results = filter_scores_and_topk( + scores, cfg.score_thr, nms_pre, + dict(bbox_pred=bbox_pred, priors=priors)) + scores, labels, _, filtered_results = results + + bbox_pred = filtered_results['bbox_pred'] + priors = filtered_results['priors'] + + bboxes = self._bbox_decode(priors, bbox_pred, + self.point_strides[level_idx], + img_shape) + + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_labels.append(labels) + + return self._bbox_post_process( + mlvl_scores, + mlvl_labels, + mlvl_bboxes, + img_meta['scale_factor'], + cfg, + rescale=rescale, + with_nms=with_nms) + + def _bbox_decode(self, points, bbox_pred, stride, max_shape): + bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1) + bboxes = bbox_pred * stride + bbox_pos_center + x1 = bboxes[:, 0].clamp(min=0, max=max_shape[1]) + y1 = bboxes[:, 1].clamp(min=0, max=max_shape[0]) + x2 = bboxes[:, 2].clamp(min=0, max=max_shape[1]) + y2 = bboxes[:, 3].clamp(min=0, max=max_shape[0]) + decoded_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) + return decoded_bboxes diff --git a/downstream/mmdetection/mmdet/models/dense_heads/retina_head.py b/downstream/mmdetection/mmdet/models/dense_heads/retina_head.py new file mode 100644 index 0000000..a48720c --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/retina_head.py @@ -0,0 +1,115 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule + +from ..builder import HEADS +from .anchor_head import AnchorHead + + +@HEADS.register_module() +class RetinaHead(AnchorHead): + r"""An anchor-based head used in `RetinaNet + `_. + + The head contains two subnetworks. The first classifies anchor boxes and + the second regresses deltas for the anchors. + + Example: + >>> import torch + >>> self = RetinaHead(11, 7) + >>> x = torch.rand(1, 7, 32, 32) + >>> cls_score, bbox_pred = self.forward_single(x) + >>> # Each anchor predicts a score for each class except background + >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors + >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors + >>> assert cls_per_anchor == (self.num_classes) + >>> assert box_per_anchor == 4 + """ + + def __init__(self, + num_classes, + in_channels, + stacked_convs=4, + conv_cfg=None, + norm_cfg=None, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', + name='retina_cls', + std=0.01, + bias_prob=0.01)), + **kwargs): + self.stacked_convs = stacked_convs + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + super(RetinaHead, self).__init__( + num_classes, + in_channels, + anchor_generator=anchor_generator, + init_cfg=init_cfg, + **kwargs) + + def _init_layers(self): + """Initialize layers of the head.""" + self.relu = nn.ReLU(inplace=True) + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.retina_cls = nn.Conv2d( + self.feat_channels, + self.num_base_priors * self.cls_out_channels, + 3, + padding=1) + self.retina_reg = nn.Conv2d( + self.feat_channels, self.num_base_priors * 4, 3, padding=1) + + def forward_single(self, x): + """Forward feature of a single scale level. + + Args: + x (Tensor): Features of a single scale level. + + Returns: + tuple: + cls_score (Tensor): Cls scores for a single scale level + the channels number is num_anchors * num_classes. + bbox_pred (Tensor): Box energies / deltas for a single scale + level, the channels number is num_anchors * 4. + """ + cls_feat = x + reg_feat = x + for cls_conv in self.cls_convs: + cls_feat = cls_conv(cls_feat) + for reg_conv in self.reg_convs: + reg_feat = reg_conv(reg_feat) + cls_score = self.retina_cls(cls_feat) + bbox_pred = self.retina_reg(reg_feat) + return cls_score, bbox_pred diff --git a/downstream/mmdetection/mmdet/models/dense_heads/retina_sepbn_head.py b/downstream/mmdetection/mmdet/models/dense_heads/retina_sepbn_head.py new file mode 100644 index 0000000..b385c61 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/retina_sepbn_head.py @@ -0,0 +1,118 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init + +from ..builder import HEADS +from .anchor_head import AnchorHead + + +@HEADS.register_module() +class RetinaSepBNHead(AnchorHead): + """"RetinaHead with separate BN. + + In RetinaHead, conv/norm layers are shared across different FPN levels, + while in RetinaSepBNHead, conv layers are shared across different FPN + levels, but BN layers are separated. + """ + + def __init__(self, + num_classes, + num_ins, + in_channels, + stacked_convs=4, + conv_cfg=None, + norm_cfg=None, + init_cfg=None, + **kwargs): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + self.stacked_convs = stacked_convs + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.num_ins = num_ins + super(RetinaSepBNHead, self).__init__( + num_classes, in_channels, init_cfg=init_cfg, **kwargs) + + def _init_layers(self): + """Initialize layers of the head.""" + self.relu = nn.ReLU(inplace=True) + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + for i in range(self.num_ins): + cls_convs = nn.ModuleList() + reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.cls_convs.append(cls_convs) + self.reg_convs.append(reg_convs) + for i in range(self.stacked_convs): + for j in range(1, self.num_ins): + self.cls_convs[j][i].conv = self.cls_convs[0][i].conv + self.reg_convs[j][i].conv = self.reg_convs[0][i].conv + self.retina_cls = nn.Conv2d( + self.feat_channels, + self.num_base_priors * self.cls_out_channels, + 3, + padding=1) + self.retina_reg = nn.Conv2d( + self.feat_channels, self.num_base_priors * 4, 3, padding=1) + + def init_weights(self): + """Initialize weights of the head.""" + super(RetinaSepBNHead, self).init_weights() + for m in self.cls_convs[0]: + normal_init(m.conv, std=0.01) + for m in self.reg_convs[0]: + normal_init(m.conv, std=0.01) + bias_cls = bias_init_with_prob(0.01) + normal_init(self.retina_cls, std=0.01, bias=bias_cls) + normal_init(self.retina_reg, std=0.01) + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: Usually a tuple of classification scores and bbox prediction + cls_scores (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_anchors * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for all scale + levels, each is a 4D-tensor, the channels number is + num_anchors * 4. + """ + cls_scores = [] + bbox_preds = [] + for i, x in enumerate(feats): + cls_feat = feats[i] + reg_feat = feats[i] + for cls_conv in self.cls_convs[i]: + cls_feat = cls_conv(cls_feat) + for reg_conv in self.reg_convs[i]: + reg_feat = reg_conv(reg_feat) + cls_score = self.retina_cls(cls_feat) + bbox_pred = self.retina_reg(reg_feat) + cls_scores.append(cls_score) + bbox_preds.append(bbox_pred) + return cls_scores, bbox_preds diff --git a/downstream/mmdetection/mmdet/models/dense_heads/rpn_head.py b/downstream/mmdetection/mmdet/models/dense_heads/rpn_head.py new file mode 100644 index 0000000..f5d6a3b --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/rpn_head.py @@ -0,0 +1,265 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.ops import batched_nms + +from ..builder import HEADS +from .anchor_head import AnchorHead + + +@HEADS.register_module() +class RPNHead(AnchorHead): + """RPN head. + + Args: + in_channels (int): Number of channels in the input feature map. + init_cfg (dict or list[dict], optional): Initialization config dict. + num_convs (int): Number of convolution layers in the head. Default 1. + """ # noqa: W605 + + def __init__(self, + in_channels, + init_cfg=dict(type='Normal', layer='Conv2d', std=0.01), + num_convs=1, + **kwargs): + self.num_convs = num_convs + super(RPNHead, self).__init__( + 1, in_channels, init_cfg=init_cfg, **kwargs) + + def _init_layers(self): + """Initialize layers of the head.""" + if self.num_convs > 1: + rpn_convs = [] + for i in range(self.num_convs): + if i == 0: + in_channels = self.in_channels + else: + in_channels = self.feat_channels + # use ``inplace=False`` to avoid error: one of the variables + # needed for gradient computation has been modified by an + # inplace operation. + rpn_convs.append( + ConvModule( + in_channels, + self.feat_channels, + 3, + padding=1, + inplace=False)) + self.rpn_conv = nn.Sequential(*rpn_convs) + else: + self.rpn_conv = nn.Conv2d( + self.in_channels, self.feat_channels, 3, padding=1) + self.rpn_cls = nn.Conv2d(self.feat_channels, + self.num_base_priors * self.cls_out_channels, + 1) + self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_base_priors * 4, + 1) + + def forward_single(self, x): + """Forward feature map of a single scale level.""" + x = self.rpn_conv(x) + x = F.relu(x, inplace=True) + rpn_cls_score = self.rpn_cls(x) + rpn_bbox_pred = self.rpn_reg(x) + return rpn_cls_score, rpn_bbox_pred + + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + losses = super(RPNHead, self).loss( + cls_scores, + bbox_preds, + gt_bboxes, + None, + img_metas, + gt_bboxes_ignore=gt_bboxes_ignore) + return dict( + loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox']) + + def _get_bboxes_single(self, + cls_score_list, + bbox_pred_list, + score_factor_list, + mlvl_anchors, + img_meta, + cfg, + rescale=False, + with_nms=True, + **kwargs): + """Transform outputs of a single image into bbox predictions. + + Args: + cls_score_list (list[Tensor]): Box scores from all scale + levels of a single image, each item has shape + (num_anchors * num_classes, H, W). + bbox_pred_list (list[Tensor]): Box energies / deltas from + all scale levels of a single image, each item has + shape (num_anchors * 4, H, W). + score_factor_list (list[Tensor]): Score factor from all scale + levels of a single image. RPN head does not need this value. + mlvl_anchors (list[Tensor]): Anchors of all scale level + each item has shape (num_anchors, 4). + img_meta (dict): Image meta info. + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + + Returns: + Tensor: Labeled boxes in shape (n, 5), where the first 4 columns + are bounding box positions (tl_x, tl_y, br_x, br_y) and the + 5-th column is a score between 0 and 1. + """ + cfg = self.test_cfg if cfg is None else cfg + cfg = copy.deepcopy(cfg) + img_shape = img_meta['img_shape'] + + # bboxes from different level should be independent during NMS, + # level_ids are used as labels for batched NMS to separate them + level_ids = [] + mlvl_scores = [] + mlvl_bbox_preds = [] + mlvl_valid_anchors = [] + nms_pre = cfg.get('nms_pre', -1) + for level_idx in range(len(cls_score_list)): + rpn_cls_score = cls_score_list[level_idx] + rpn_bbox_pred = bbox_pred_list[level_idx] + assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] + rpn_cls_score = rpn_cls_score.permute(1, 2, 0) + if self.use_sigmoid_cls: + rpn_cls_score = rpn_cls_score.reshape(-1) + scores = rpn_cls_score.sigmoid() + else: + rpn_cls_score = rpn_cls_score.reshape(-1, 2) + # We set FG labels to [0, num_class-1] and BG label to + # num_class in RPN head since mmdet v2.5, which is unified to + # be consistent with other head since mmdet v2.0. In mmdet v2.0 + # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. + scores = rpn_cls_score.softmax(dim=1)[:, 0] + rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) + + anchors = mlvl_anchors[level_idx] + if 0 < nms_pre < scores.shape[0]: + # sort is faster than topk + # _, topk_inds = scores.topk(cfg.nms_pre) + ranked_scores, rank_inds = scores.sort(descending=True) + topk_inds = rank_inds[:nms_pre] + scores = ranked_scores[:nms_pre] + rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] + anchors = anchors[topk_inds, :] + + mlvl_scores.append(scores) + mlvl_bbox_preds.append(rpn_bbox_pred) + mlvl_valid_anchors.append(anchors) + level_ids.append( + scores.new_full((scores.size(0), ), + level_idx, + dtype=torch.long)) + + return self._bbox_post_process(mlvl_scores, mlvl_bbox_preds, + mlvl_valid_anchors, level_ids, cfg, + img_shape) + + def _bbox_post_process(self, mlvl_scores, mlvl_bboxes, mlvl_valid_anchors, + level_ids, cfg, img_shape, **kwargs): + """bbox post-processing method. + + Do the nms operation for bboxes in same level. + + Args: + mlvl_scores (list[Tensor]): Box scores from all scale + levels of a single image, each item has shape + (num_bboxes, ). + mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale + levels of a single image, each item has shape (num_bboxes, 4). + mlvl_valid_anchors (list[Tensor]): Anchors of all scale level + each item has shape (num_bboxes, 4). + level_ids (list[Tensor]): Indexes from all scale levels of a + single image, each item has shape (num_bboxes, ). + cfg (mmcv.Config): Test / postprocessing configuration, + if None, `self.test_cfg` would be used. + img_shape (tuple(int)): The shape of model's input image. + + Returns: + Tensor: Labeled boxes in shape (n, 5), where the first 4 columns + are bounding box positions (tl_x, tl_y, br_x, br_y) and the + 5-th column is a score between 0 and 1. + """ + scores = torch.cat(mlvl_scores) + anchors = torch.cat(mlvl_valid_anchors) + rpn_bbox_pred = torch.cat(mlvl_bboxes) + proposals = self.bbox_coder.decode( + anchors, rpn_bbox_pred, max_shape=img_shape) + ids = torch.cat(level_ids) + + if cfg.min_bbox_size >= 0: + w = proposals[:, 2] - proposals[:, 0] + h = proposals[:, 3] - proposals[:, 1] + valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) + if not valid_mask.all(): + proposals = proposals[valid_mask] + scores = scores[valid_mask] + ids = ids[valid_mask] + + if proposals.numel() > 0: + dets, _ = batched_nms(proposals, scores, ids, cfg.nms) + else: + return proposals.new_zeros(0, 5) + + return dets[:cfg.max_per_img] + + def onnx_export(self, x, img_metas): + """Test without augmentation. + + Args: + x (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + img_metas (list[dict]): Meta info of each image. + Returns: + Tensor: dets of shape [N, num_det, 5]. + """ + cls_scores, bbox_preds = self(x) + + assert len(cls_scores) == len(bbox_preds) + + batch_bboxes, batch_scores = super(RPNHead, self).onnx_export( + cls_scores, bbox_preds, img_metas=img_metas, with_nms=False) + # Use ONNX::NonMaxSuppression in deployment + from mmdet.core.export import add_dummy_nms_for_onnx + cfg = copy.deepcopy(self.test_cfg) + score_threshold = cfg.nms.get('score_thr', 0.0) + nms_pre = cfg.get('deploy_nms_pre', -1) + # Different from the normal forward doing NMS level by level, + # we do NMS across all levels when exporting ONNX. + dets, _ = add_dummy_nms_for_onnx(batch_bboxes, batch_scores, + cfg.max_per_img, + cfg.nms.iou_threshold, + score_threshold, nms_pre, + cfg.max_per_img) + return dets diff --git a/downstream/mmdetection/mmdet/models/dense_heads/sabl_retina_head.py b/downstream/mmdetection/mmdet/models/dense_heads/sabl_retina_head.py new file mode 100644 index 0000000..4fede71 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/sabl_retina_head.py @@ -0,0 +1,630 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import force_fp32 + +from mmdet.core import (build_assigner, build_bbox_coder, + build_prior_generator, build_sampler, images_to_levels, + multi_apply, unmap) +from mmdet.core.utils import filter_scores_and_topk +from ..builder import HEADS, build_loss +from .base_dense_head import BaseDenseHead +from .dense_test_mixins import BBoxTestMixin +from .guided_anchor_head import GuidedAnchorHead + + +@HEADS.register_module() +class SABLRetinaHead(BaseDenseHead, BBoxTestMixin): + """Side-Aware Boundary Localization (SABL) for RetinaNet. + + The anchor generation, assigning and sampling in SABLRetinaHead + are the same as GuidedAnchorHead for guided anchoring. + + Please refer to https://arxiv.org/abs/1912.04260 for more details. + + Args: + num_classes (int): Number of classes. + in_channels (int): Number of channels in the input feature map. + stacked_convs (int): Number of Convs for classification \ + and regression branches. Defaults to 4. + feat_channels (int): Number of hidden channels. \ + Defaults to 256. + approx_anchor_generator (dict): Config dict for approx generator. + square_anchor_generator (dict): Config dict for square generator. + conv_cfg (dict): Config dict for ConvModule. Defaults to None. + norm_cfg (dict): Config dict for Norm Layer. Defaults to None. + bbox_coder (dict): Config dict for bbox coder. + reg_decoded_bbox (bool): If true, the regression loss would be + applied directly on decoded bounding boxes, converting both + the predicted boxes and regression targets to absolute + coordinates format. Default False. It should be `True` when + using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. + train_cfg (dict): Training config of SABLRetinaHead. + test_cfg (dict): Testing config of SABLRetinaHead. + loss_cls (dict): Config of classification loss. + loss_bbox_cls (dict): Config of classification loss for bbox branch. + loss_bbox_reg (dict): Config of regression loss for bbox branch. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_classes, + in_channels, + stacked_convs=4, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[4], + strides=[8, 16, 32, 64, 128]), + conv_cfg=None, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', + num_buckets=14, + scale_factor=3.0), + reg_decoded_bbox=False, + train_cfg=None, + test_cfg=None, + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.5), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5), + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', + name='retina_cls', + std=0.01, + bias_prob=0.01))): + super(SABLRetinaHead, self).__init__(init_cfg) + self.in_channels = in_channels + self.num_classes = num_classes + self.feat_channels = feat_channels + self.num_buckets = bbox_coder['num_buckets'] + self.side_num = int(np.ceil(self.num_buckets / 2)) + + assert (approx_anchor_generator['octave_base_scale'] == + square_anchor_generator['scales'][0]) + assert (approx_anchor_generator['strides'] == + square_anchor_generator['strides']) + + self.approx_anchor_generator = build_prior_generator( + approx_anchor_generator) + self.square_anchor_generator = build_prior_generator( + square_anchor_generator) + self.approxs_per_octave = ( + self.approx_anchor_generator.num_base_priors[0]) + + # one anchor per location + self.num_base_priors = self.square_anchor_generator.num_base_priors[0] + + self.stacked_convs = stacked_convs + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.reg_decoded_bbox = reg_decoded_bbox + + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + self.sampling = loss_cls['type'] not in [ + 'FocalLoss', 'GHMC', 'QualityFocalLoss' + ] + if self.use_sigmoid_cls: + self.cls_out_channels = num_classes + else: + self.cls_out_channels = num_classes + 1 + + self.bbox_coder = build_bbox_coder(bbox_coder) + self.loss_cls = build_loss(loss_cls) + self.loss_bbox_cls = build_loss(loss_bbox_cls) + self.loss_bbox_reg = build_loss(loss_bbox_reg) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + if self.train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + # use PseudoSampler when sampling is False + if self.sampling and hasattr(self.train_cfg, 'sampler'): + sampler_cfg = self.train_cfg.sampler + else: + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + + self.fp16_enabled = False + self._init_layers() + + @property + def num_anchors(self): + warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' + 'please use "num_base_priors" instead') + return self.square_anchor_generator.num_base_priors[0] + + def _init_layers(self): + self.relu = nn.ReLU(inplace=True) + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.retina_cls = nn.Conv2d( + self.feat_channels, self.cls_out_channels, 3, padding=1) + self.retina_bbox_reg = nn.Conv2d( + self.feat_channels, self.side_num * 4, 3, padding=1) + self.retina_bbox_cls = nn.Conv2d( + self.feat_channels, self.side_num * 4, 3, padding=1) + + def forward_single(self, x): + cls_feat = x + reg_feat = x + for cls_conv in self.cls_convs: + cls_feat = cls_conv(cls_feat) + for reg_conv in self.reg_convs: + reg_feat = reg_conv(reg_feat) + cls_score = self.retina_cls(cls_feat) + bbox_cls_pred = self.retina_bbox_cls(reg_feat) + bbox_reg_pred = self.retina_bbox_reg(reg_feat) + bbox_pred = (bbox_cls_pred, bbox_reg_pred) + return cls_score, bbox_pred + + def forward(self, feats): + return multi_apply(self.forward_single, feats) + + def get_anchors(self, featmap_sizes, img_metas, device='cuda'): + """Get squares according to feature map sizes and guided anchors. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + img_metas (list[dict]): Image meta info. + device (torch.device | str): device for returned tensors + + Returns: + tuple: square approxs of each image + """ + num_imgs = len(img_metas) + + # since feature map sizes of all images are the same, we only compute + # squares for one time + multi_level_squares = self.square_anchor_generator.grid_priors( + featmap_sizes, device=device) + squares_list = [multi_level_squares for _ in range(num_imgs)] + + return squares_list + + def get_target(self, + approx_list, + inside_flag_list, + square_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=None, + sampling=True, + unmap_outputs=True): + """Compute bucketing targets. + Args: + approx_list (list[list]): Multi level approxs of each image. + inside_flag_list (list[list]): Multi level inside flags of each + image. + square_list (list[list]): Multi level squares of each image. + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. + img_metas (list[dict]): Meta info of each image. + gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. + gt_bboxes_list (list[Tensor]): Gt bboxes of each image. + label_channels (int): Channel of label. + sampling (bool): Sample Anchors or not. + unmap_outputs (bool): unmap outputs or not. + + Returns: + tuple: Returns a tuple containing learning targets. + + - labels_list (list[Tensor]): Labels of each level. + - label_weights_list (list[Tensor]): Label weights of each \ + level. + - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \ + each level. + - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \ + each level. + - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \ + each level. + - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \ + each level. + - num_total_pos (int): Number of positive samples in all \ + images. + - num_total_neg (int): Number of negative samples in all \ + images. + """ + num_imgs = len(img_metas) + assert len(approx_list) == len(inside_flag_list) == len( + square_list) == num_imgs + # anchor number of multi levels + num_level_squares = [squares.size(0) for squares in square_list[0]] + # concat all level anchors and flags to a single tensor + inside_flag_flat_list = [] + approx_flat_list = [] + square_flat_list = [] + for i in range(num_imgs): + assert len(square_list[i]) == len(inside_flag_list[i]) + inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) + approx_flat_list.append(torch.cat(approx_list[i])) + square_flat_list.append(torch.cat(square_list[i])) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + if gt_labels_list is None: + gt_labels_list = [None for _ in range(num_imgs)] + (all_labels, all_label_weights, all_bbox_cls_targets, + all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights, + pos_inds_list, neg_inds_list) = multi_apply( + self._get_target_single, + approx_flat_list, + inside_flag_flat_list, + square_flat_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + img_metas, + label_channels=label_channels, + sampling=sampling, + unmap_outputs=unmap_outputs) + # no valid anchors + if any([labels is None for labels in all_labels]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + labels_list = images_to_levels(all_labels, num_level_squares) + label_weights_list = images_to_levels(all_label_weights, + num_level_squares) + bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets, + num_level_squares) + bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights, + num_level_squares) + bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets, + num_level_squares) + bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights, + num_level_squares) + return (labels_list, label_weights_list, bbox_cls_targets_list, + bbox_cls_weights_list, bbox_reg_targets_list, + bbox_reg_weights_list, num_total_pos, num_total_neg) + + def _get_target_single(self, + flat_approxs, + inside_flags, + flat_squares, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + label_channels=None, + sampling=True, + unmap_outputs=True): + """Compute regression and classification targets for anchors in a + single image. + + Args: + flat_approxs (Tensor): flat approxs of a single image, + shape (n, 4) + inside_flags (Tensor): inside flags of a single image, + shape (n, ). + flat_squares (Tensor): flat squares of a single image, + shape (approxs_per_octave * n, 4) + gt_bboxes (Tensor): Ground truth bboxes of a single image, \ + shape (num_gts, 4). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + img_meta (dict): Meta info of the image. + label_channels (int): Channel of label. + sampling (bool): Sample Anchors or not. + unmap_outputs (bool): unmap outputs or not. + + Returns: + tuple: + + - labels_list (Tensor): Labels in a single image + - label_weights (Tensor): Label weights in a single image + - bbox_cls_targets (Tensor): BBox cls targets in a single image + - bbox_cls_weights (Tensor): BBox cls weights in a single image + - bbox_reg_targets (Tensor): BBox reg targets in a single image + - bbox_reg_weights (Tensor): BBox reg weights in a single image + - num_total_pos (int): Number of positive samples \ + in a single image + - num_total_neg (int): Number of negative samples \ + in a single image + """ + if not inside_flags.any(): + return (None, ) * 8 + # assign gt and sample anchors + expand_inside_flags = inside_flags[:, None].expand( + -1, self.approxs_per_octave).reshape(-1) + approxs = flat_approxs[expand_inside_flags, :] + squares = flat_squares[inside_flags, :] + + assign_result = self.assigner.assign(approxs, squares, + self.approxs_per_octave, + gt_bboxes, gt_bboxes_ignore) + sampling_result = self.sampler.sample(assign_result, squares, + gt_bboxes) + + num_valid_squares = squares.shape[0] + bbox_cls_targets = squares.new_zeros( + (num_valid_squares, self.side_num * 4)) + bbox_cls_weights = squares.new_zeros( + (num_valid_squares, self.side_num * 4)) + bbox_reg_targets = squares.new_zeros( + (num_valid_squares, self.side_num * 4)) + bbox_reg_weights = squares.new_zeros( + (num_valid_squares, self.side_num * 4)) + labels = squares.new_full((num_valid_squares, ), + self.num_classes, + dtype=torch.long) + label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets, + pos_bbox_cls_weights) = self.bbox_coder.encode( + sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) + + bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets + bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets + bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights + bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights + if gt_labels is None: + # Only rpn gives gt_labels as None + # Foreground is the first class + labels[pos_inds] = 0 + else: + labels[pos_inds] = gt_labels[ + sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_squares.size(0) + labels = unmap( + labels, num_total_anchors, inside_flags, fill=self.num_classes) + label_weights = unmap(label_weights, num_total_anchors, + inside_flags) + bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors, + inside_flags) + bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors, + inside_flags) + bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors, + inside_flags) + bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors, + inside_flags) + return (labels, label_weights, bbox_cls_targets, bbox_cls_weights, + bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds) + + def loss_single(self, cls_score, bbox_pred, labels, label_weights, + bbox_cls_targets, bbox_cls_weights, bbox_reg_targets, + bbox_reg_weights, num_total_samples): + # classification loss + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + cls_score = cls_score.permute(0, 2, 3, + 1).reshape(-1, self.cls_out_channels) + loss_cls = self.loss_cls( + cls_score, labels, label_weights, avg_factor=num_total_samples) + # regression loss + bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4) + bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4) + bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4) + bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4) + (bbox_cls_pred, bbox_reg_pred) = bbox_pred + bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape( + -1, self.side_num * 4) + bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape( + -1, self.side_num * 4) + loss_bbox_cls = self.loss_bbox_cls( + bbox_cls_pred, + bbox_cls_targets.long(), + bbox_cls_weights, + avg_factor=num_total_samples * 4 * self.side_num) + loss_bbox_reg = self.loss_bbox_reg( + bbox_reg_pred, + bbox_reg_targets, + bbox_reg_weights, + avg_factor=num_total_samples * 4 * self.bbox_coder.offset_topk) + return loss_cls, loss_bbox_cls, loss_bbox_reg + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.approx_anchor_generator.num_levels + + device = cls_scores[0].device + + # get sampled approxes + approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs( + self, featmap_sizes, img_metas, device=device) + + square_list = self.get_anchors(featmap_sizes, img_metas, device=device) + + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + + cls_reg_targets = self.get_target( + approxs_list, + inside_flag_list, + square_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels, + sampling=self.sampling) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_cls_targets_list, + bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + num_total_samples = ( + num_total_pos + num_total_neg if self.sampling else num_total_pos) + losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply( + self.loss_single, + cls_scores, + bbox_preds, + labels_list, + label_weights_list, + bbox_cls_targets_list, + bbox_cls_weights_list, + bbox_reg_targets_list, + bbox_reg_weights_list, + num_total_samples=num_total_samples) + return dict( + loss_cls=losses_cls, + loss_bbox_cls=losses_bbox_cls, + loss_bbox_reg=losses_bbox_reg) + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def get_bboxes(self, + cls_scores, + bbox_preds, + img_metas, + cfg=None, + rescale=False): + assert len(cls_scores) == len(bbox_preds) + num_levels = len(cls_scores) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + + device = cls_scores[0].device + mlvl_anchors = self.get_anchors( + featmap_sizes, img_metas, device=device) + result_list = [] + for img_id in range(len(img_metas)): + cls_score_list = [ + cls_scores[i][img_id].detach() for i in range(num_levels) + ] + bbox_cls_pred_list = [ + bbox_preds[i][0][img_id].detach() for i in range(num_levels) + ] + bbox_reg_pred_list = [ + bbox_preds[i][1][img_id].detach() for i in range(num_levels) + ] + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + proposals = self._get_bboxes_single( + cls_score_list, bbox_cls_pred_list, bbox_reg_pred_list, + mlvl_anchors[img_id], img_shape, scale_factor, cfg, rescale) + result_list.append(proposals) + return result_list + + def _get_bboxes_single(self, + cls_scores, + bbox_cls_preds, + bbox_reg_preds, + mlvl_anchors, + img_shape, + scale_factor, + cfg, + rescale=False): + cfg = self.test_cfg if cfg is None else cfg + nms_pre = cfg.get('nms_pre', -1) + + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_confids = [] + mlvl_labels = [] + assert len(cls_scores) == len(bbox_cls_preds) == len( + bbox_reg_preds) == len(mlvl_anchors) + for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip( + cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors): + assert cls_score.size()[-2:] == bbox_cls_pred.size( + )[-2:] == bbox_reg_pred.size()[-2::] + cls_score = cls_score.permute(1, 2, + 0).reshape(-1, self.cls_out_channels) + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + scores = cls_score.softmax(-1)[:, :-1] + bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape( + -1, self.side_num * 4) + bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape( + -1, self.side_num * 4) + + # After https://github.com/open-mmlab/mmdetection/pull/6268/, + # this operation keeps fewer bboxes under the same `nms_pre`. + # There is no difference in performance for most models. If you + # find a slight drop in performance, you can set a larger + # `nms_pre` than before. + results = filter_scores_and_topk( + scores, cfg.score_thr, nms_pre, + dict( + anchors=anchors, + bbox_cls_pred=bbox_cls_pred, + bbox_reg_pred=bbox_reg_pred)) + scores, labels, _, filtered_results = results + + anchors = filtered_results['anchors'] + bbox_cls_pred = filtered_results['bbox_cls_pred'] + bbox_reg_pred = filtered_results['bbox_reg_pred'] + + bbox_preds = [ + bbox_cls_pred.contiguous(), + bbox_reg_pred.contiguous() + ] + bboxes, confids = self.bbox_coder.decode( + anchors.contiguous(), bbox_preds, max_shape=img_shape) + + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_confids.append(confids) + mlvl_labels.append(labels) + return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, + scale_factor, cfg, rescale, True, + mlvl_confids) diff --git a/downstream/mmdetection/mmdet/models/dense_heads/solo_head.py b/downstream/mmdetection/mmdet/models/dense_heads/solo_head.py new file mode 100644 index 0000000..9f5719e --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/solo_head.py @@ -0,0 +1,1182 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule + +from mmdet.core import InstanceData, mask_matrix_nms, multi_apply +from mmdet.core.utils import center_of_mass, generate_coordinate +from mmdet.models.builder import HEADS, build_loss +from .base_mask_head import BaseMaskHead + + +@HEADS.register_module() +class SOLOHead(BaseMaskHead): + """SOLO mask head used in `SOLO: Segmenting Objects by Locations. + + `_ + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of hidden channels. Used in child classes. + Default: 256. + stacked_convs (int): Number of stacking convs of the head. + Default: 4. + strides (tuple): Downsample factor of each feature map. + scale_ranges (tuple[tuple[int, int]]): Area range of multiple + level masks, in the format [(min1, max1), (min2, max2), ...]. + A range of (16, 64) means the area range between (16, 64). + pos_scale (float): Constant scale factor to control the center region. + num_grids (list[int]): Divided image into a uniform grids, each + feature map has a different grid value. The number of output + channels is grid ** 2. Default: [40, 36, 24, 16, 12]. + cls_down_index (int): The index of downsample operation in + classification branch. Default: 0. + loss_mask (dict): Config of mask loss. + loss_cls (dict): Config of classification loss. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: norm_cfg=dict(type='GN', num_groups=32, + requires_grad=True). + train_cfg (dict): Training config of head. + test_cfg (dict): Testing config of head. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__( + self, + num_classes, + in_channels, + feat_channels=256, + stacked_convs=4, + strides=(4, 8, 16, 32, 64), + scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=None, + loss_cls=None, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + train_cfg=None, + test_cfg=None, + init_cfg=[ + dict(type='Normal', layer='Conv2d', std=0.01), + dict( + type='Normal', + std=0.01, + bias_prob=0.01, + override=dict(name='conv_mask_list')), + dict( + type='Normal', + std=0.01, + bias_prob=0.01, + override=dict(name='conv_cls')) + ], + ): + super(SOLOHead, self).__init__(init_cfg) + self.num_classes = num_classes + self.cls_out_channels = self.num_classes + self.in_channels = in_channels + self.feat_channels = feat_channels + self.stacked_convs = stacked_convs + self.strides = strides + self.num_grids = num_grids + # number of FPN feats + self.num_levels = len(strides) + assert self.num_levels == len(scale_ranges) == len(num_grids) + self.scale_ranges = scale_ranges + self.pos_scale = pos_scale + + self.cls_down_index = cls_down_index + self.loss_cls = build_loss(loss_cls) + self.loss_mask = build_loss(loss_mask) + self.norm_cfg = norm_cfg + self.init_cfg = init_cfg + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self._init_layers() + + def _init_layers(self): + self.mask_convs = nn.ModuleList() + self.cls_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels + 2 if i == 0 else self.feat_channels + self.mask_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg)) + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg)) + self.conv_mask_list = nn.ModuleList() + for num_grid in self.num_grids: + self.conv_mask_list.append( + nn.Conv2d(self.feat_channels, num_grid**2, 1)) + + self.conv_cls = nn.Conv2d( + self.feat_channels, self.cls_out_channels, 3, padding=1) + + def resize_feats(self, feats): + """Downsample the first feat and upsample last feat in feats.""" + out = [] + for i in range(len(feats)): + if i == 0: + out.append( + F.interpolate( + feats[0], + size=feats[i + 1].shape[-2:], + mode='bilinear', + align_corners=False)) + elif i == len(feats) - 1: + out.append( + F.interpolate( + feats[i], + size=feats[i - 1].shape[-2:], + mode='bilinear', + align_corners=False)) + else: + out.append(feats[i]) + return out + + def forward(self, feats): + assert len(feats) == self.num_levels + feats = self.resize_feats(feats) + mlvl_mask_preds = [] + mlvl_cls_preds = [] + for i in range(self.num_levels): + x = feats[i] + mask_feat = x + cls_feat = x + # generate and concat the coordinate + coord_feat = generate_coordinate(mask_feat.size(), + mask_feat.device) + mask_feat = torch.cat([mask_feat, coord_feat], 1) + + for mask_layer in (self.mask_convs): + mask_feat = mask_layer(mask_feat) + + mask_feat = F.interpolate( + mask_feat, scale_factor=2, mode='bilinear') + mask_pred = self.conv_mask_list[i](mask_feat) + + # cls branch + for j, cls_layer in enumerate(self.cls_convs): + if j == self.cls_down_index: + num_grid = self.num_grids[i] + cls_feat = F.interpolate( + cls_feat, size=num_grid, mode='bilinear') + cls_feat = cls_layer(cls_feat) + + cls_pred = self.conv_cls(cls_feat) + + if not self.training: + feat_wh = feats[0].size()[-2:] + upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) + mask_pred = F.interpolate( + mask_pred.sigmoid(), size=upsampled_size, mode='bilinear') + cls_pred = cls_pred.sigmoid() + # get local maximum + local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) + keep_mask = local_max[:, :, :-1, :-1] == cls_pred + cls_pred = cls_pred * keep_mask + + mlvl_mask_preds.append(mask_pred) + mlvl_cls_preds.append(cls_pred) + return mlvl_mask_preds, mlvl_cls_preds + + def loss(self, + mlvl_mask_preds, + mlvl_cls_preds, + gt_labels, + gt_masks, + img_metas, + gt_bboxes=None, + **kwargs): + """Calculate the loss of total batch. + + Args: + mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. + Each element in the list has shape + (batch_size, num_grids**2 ,h ,w). + mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element + in the list has shape + (batch_size, num_classes, num_grids ,num_grids). + gt_labels (list[Tensor]): Labels of multiple images. + gt_masks (list[Tensor]): Ground truth masks of multiple images. + Each has shape (num_instances, h, w). + img_metas (list[dict]): Meta information of multiple images. + gt_bboxes (list[Tensor]): Ground truth bboxes of multiple + images. Default: None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + num_levels = self.num_levels + num_imgs = len(gt_labels) + + featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds] + + # `BoolTensor` in `pos_masks` represent + # whether the corresponding point is + # positive + pos_mask_targets, labels, pos_masks = multi_apply( + self._get_targets_single, + gt_bboxes, + gt_labels, + gt_masks, + featmap_sizes=featmap_sizes) + + # change from the outside list meaning multi images + # to the outside list meaning multi levels + mlvl_pos_mask_targets = [[] for _ in range(num_levels)] + mlvl_pos_mask_preds = [[] for _ in range(num_levels)] + mlvl_pos_masks = [[] for _ in range(num_levels)] + mlvl_labels = [[] for _ in range(num_levels)] + for img_id in range(num_imgs): + assert num_levels == len(pos_mask_targets[img_id]) + for lvl in range(num_levels): + mlvl_pos_mask_targets[lvl].append( + pos_mask_targets[img_id][lvl]) + mlvl_pos_mask_preds[lvl].append( + mlvl_mask_preds[lvl][img_id, pos_masks[img_id][lvl], ...]) + mlvl_pos_masks[lvl].append(pos_masks[img_id][lvl].flatten()) + mlvl_labels[lvl].append(labels[img_id][lvl].flatten()) + + # cat multiple image + temp_mlvl_cls_preds = [] + for lvl in range(num_levels): + mlvl_pos_mask_targets[lvl] = torch.cat( + mlvl_pos_mask_targets[lvl], dim=0) + mlvl_pos_mask_preds[lvl] = torch.cat( + mlvl_pos_mask_preds[lvl], dim=0) + mlvl_pos_masks[lvl] = torch.cat(mlvl_pos_masks[lvl], dim=0) + mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0) + temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute( + 0, 2, 3, 1).reshape(-1, self.cls_out_channels)) + + num_pos = sum(item.sum() for item in mlvl_pos_masks) + # dice loss + loss_mask = [] + for pred, target in zip(mlvl_pos_mask_preds, mlvl_pos_mask_targets): + if pred.size()[0] == 0: + loss_mask.append(pred.sum().unsqueeze(0)) + continue + loss_mask.append( + self.loss_mask(pred, target, reduction_override='none')) + if num_pos > 0: + loss_mask = torch.cat(loss_mask).sum() / num_pos + else: + loss_mask = torch.cat(loss_mask).mean() + + flatten_labels = torch.cat(mlvl_labels) + flatten_cls_preds = torch.cat(temp_mlvl_cls_preds) + loss_cls = self.loss_cls( + flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) + return dict(loss_mask=loss_mask, loss_cls=loss_cls) + + def _get_targets_single(self, + gt_bboxes, + gt_labels, + gt_masks, + featmap_sizes=None): + """Compute targets for predictions of single image. + + Args: + gt_bboxes (Tensor): Ground truth bbox of each instance, + shape (num_gts, 4). + gt_labels (Tensor): Ground truth label of each instance, + shape (num_gts,). + gt_masks (Tensor): Ground truth mask of each instance, + shape (num_gts, h, w). + featmap_sizes (list[:obj:`torch.size`]): Size of each + feature map from feature pyramid, each element + means (feat_h, feat_w). Default: None. + + Returns: + Tuple: Usually returns a tuple containing targets for predictions. + + - mlvl_pos_mask_targets (list[Tensor]): Each element represent + the binary mask targets for positive points in this + level, has shape (num_pos, out_h, out_w). + - mlvl_labels (list[Tensor]): Each element is + classification labels for all + points in this level, has shape + (num_grid, num_grid). + - mlvl_pos_masks (list[Tensor]): Each element is + a `BoolTensor` to represent whether the + corresponding point in single level + is positive, has shape (num_grid **2). + """ + device = gt_labels.device + gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * + (gt_bboxes[:, 3] - gt_bboxes[:, 1])) + + mlvl_pos_mask_targets = [] + mlvl_labels = [] + mlvl_pos_masks = [] + for (lower_bound, upper_bound), stride, featmap_size, num_grid \ + in zip(self.scale_ranges, self.strides, + featmap_sizes, self.num_grids): + + mask_target = torch.zeros( + [num_grid**2, featmap_size[0], featmap_size[1]], + dtype=torch.uint8, + device=device) + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + labels = torch.zeros([num_grid, num_grid], + dtype=torch.int64, + device=device) + self.num_classes + pos_mask = torch.zeros([num_grid**2], + dtype=torch.bool, + device=device) + + gt_inds = ((gt_areas >= lower_bound) & + (gt_areas <= upper_bound)).nonzero().flatten() + if len(gt_inds) == 0: + mlvl_pos_mask_targets.append( + mask_target.new_zeros(0, featmap_size[0], featmap_size[1])) + mlvl_labels.append(labels) + mlvl_pos_masks.append(pos_mask) + continue + hit_gt_bboxes = gt_bboxes[gt_inds] + hit_gt_labels = gt_labels[gt_inds] + hit_gt_masks = gt_masks[gt_inds, ...] + + pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] - + hit_gt_bboxes[:, 0]) * self.pos_scale + pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] - + hit_gt_bboxes[:, 1]) * self.pos_scale + + # Make sure hit_gt_masks has a value + valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0 + output_stride = stride / 2 + + for gt_mask, gt_label, pos_h_range, pos_w_range, \ + valid_mask_flag in \ + zip(hit_gt_masks, hit_gt_labels, pos_h_ranges, + pos_w_ranges, valid_mask_flags): + if not valid_mask_flag: + continue + upsampled_size = (featmap_sizes[0][0] * 4, + featmap_sizes[0][1] * 4) + center_h, center_w = center_of_mass(gt_mask) + + coord_w = int( + (center_w / upsampled_size[1]) // (1. / num_grid)) + coord_h = int( + (center_h / upsampled_size[0]) // (1. / num_grid)) + + # left, top, right, down + top_box = max( + 0, + int(((center_h - pos_h_range) / upsampled_size[0]) // + (1. / num_grid))) + down_box = min( + num_grid - 1, + int(((center_h + pos_h_range) / upsampled_size[0]) // + (1. / num_grid))) + left_box = max( + 0, + int(((center_w - pos_w_range) / upsampled_size[1]) // + (1. / num_grid))) + right_box = min( + num_grid - 1, + int(((center_w + pos_w_range) / upsampled_size[1]) // + (1. / num_grid))) + + top = max(top_box, coord_h - 1) + down = min(down_box, coord_h + 1) + left = max(coord_w - 1, left_box) + right = min(right_box, coord_w + 1) + + labels[top:(down + 1), left:(right + 1)] = gt_label + # ins + gt_mask = np.uint8(gt_mask.cpu().numpy()) + # Follow the original implementation, F.interpolate is + # different from cv2 and opencv + gt_mask = mmcv.imrescale(gt_mask, scale=1. / output_stride) + gt_mask = torch.from_numpy(gt_mask).to(device=device) + + for i in range(top, down + 1): + for j in range(left, right + 1): + index = int(i * num_grid + j) + mask_target[index, :gt_mask.shape[0], :gt_mask. + shape[1]] = gt_mask + pos_mask[index] = True + mlvl_pos_mask_targets.append(mask_target[pos_mask]) + mlvl_labels.append(labels) + mlvl_pos_masks.append(pos_mask) + return mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks + + def get_results(self, mlvl_mask_preds, mlvl_cls_scores, img_metas, + **kwargs): + """Get multi-image mask results. + + Args: + mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. + Each element in the list has shape + (batch_size, num_grids**2 ,h ,w). + mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element + in the list has shape + (batch_size, num_classes, num_grids ,num_grids). + img_metas (list[dict]): Meta information of all images. + + Returns: + list[:obj:`InstanceData`]: Processed results of multiple + images.Each :obj:`InstanceData` usually contains + following keys. + + - scores (Tensor): Classification scores, has shape + (num_instance,). + - labels (Tensor): Has shape (num_instances,). + - masks (Tensor): Processed mask results, has + shape (num_instances, h, w). + """ + mlvl_cls_scores = [ + item.permute(0, 2, 3, 1) for item in mlvl_cls_scores + ] + assert len(mlvl_mask_preds) == len(mlvl_cls_scores) + num_levels = len(mlvl_cls_scores) + + results_list = [] + for img_id in range(len(img_metas)): + cls_pred_list = [ + mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels) + for lvl in range(num_levels) + ] + mask_pred_list = [ + mlvl_mask_preds[lvl][img_id] for lvl in range(num_levels) + ] + + cls_pred_list = torch.cat(cls_pred_list, dim=0) + mask_pred_list = torch.cat(mask_pred_list, dim=0) + + results = self._get_results_single( + cls_pred_list, mask_pred_list, img_meta=img_metas[img_id]) + results_list.append(results) + + return results_list + + def _get_results_single(self, cls_scores, mask_preds, img_meta, cfg=None): + """Get processed mask related results of single image. + + Args: + cls_scores (Tensor): Classification score of all points + in single image, has shape (num_points, num_classes). + mask_preds (Tensor): Mask prediction of all points in + single image, has shape (num_points, feat_h, feat_w). + img_meta (dict): Meta information of corresponding image. + cfg (dict, optional): Config used in test phase. + Default: None. + + Returns: + :obj:`InstanceData`: Processed results of single image. + it usually contains following keys. + + - scores (Tensor): Classification scores, has shape + (num_instance,). + - labels (Tensor): Has shape (num_instances,). + - masks (Tensor): Processed mask results, has + shape (num_instances, h, w). + """ + + def empty_results(results, cls_scores): + """Generate a empty results.""" + results.scores = cls_scores.new_ones(0) + results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2]) + results.labels = cls_scores.new_ones(0) + return results + + cfg = self.test_cfg if cfg is None else cfg + assert len(cls_scores) == len(mask_preds) + results = InstanceData(img_meta) + + featmap_size = mask_preds.size()[-2:] + + img_shape = results.img_shape + ori_shape = results.ori_shape + + h, w, _ = img_shape + upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4) + + score_mask = (cls_scores > cfg.score_thr) + cls_scores = cls_scores[score_mask] + if len(cls_scores) == 0: + return empty_results(results, cls_scores) + + inds = score_mask.nonzero() + cls_labels = inds[:, 1] + + # Filter the mask mask with an area is smaller than + # stride of corresponding feature level + lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0) + strides = cls_scores.new_ones(lvl_interval[-1]) + strides[:lvl_interval[0]] *= self.strides[0] + for lvl in range(1, self.num_levels): + strides[lvl_interval[lvl - + 1]:lvl_interval[lvl]] *= self.strides[lvl] + strides = strides[inds[:, 0]] + mask_preds = mask_preds[inds[:, 0]] + + masks = mask_preds > cfg.mask_thr + sum_masks = masks.sum((1, 2)).float() + keep = sum_masks > strides + if keep.sum() == 0: + return empty_results(results, cls_scores) + masks = masks[keep] + mask_preds = mask_preds[keep] + sum_masks = sum_masks[keep] + cls_scores = cls_scores[keep] + cls_labels = cls_labels[keep] + + # maskness. + mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks + cls_scores *= mask_scores + + scores, labels, _, keep_inds = mask_matrix_nms( + masks, + cls_labels, + cls_scores, + mask_area=sum_masks, + nms_pre=cfg.nms_pre, + max_num=cfg.max_per_img, + kernel=cfg.kernel, + sigma=cfg.sigma, + filter_thr=cfg.filter_thr) + mask_preds = mask_preds[keep_inds] + mask_preds = F.interpolate( + mask_preds.unsqueeze(0), size=upsampled_size, + mode='bilinear')[:, :, :h, :w] + mask_preds = F.interpolate( + mask_preds, size=ori_shape[:2], mode='bilinear').squeeze(0) + masks = mask_preds > cfg.mask_thr + + results.masks = masks + results.labels = labels + results.scores = scores + + return results + + +@HEADS.register_module() +class DecoupledSOLOHead(SOLOHead): + """Decoupled SOLO mask head used in `SOLO: Segmenting Objects by Locations. + + `_ + + Args: + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + *args, + init_cfg=[ + dict(type='Normal', layer='Conv2d', std=0.01), + dict( + type='Normal', + std=0.01, + bias_prob=0.01, + override=dict(name='conv_mask_list_x')), + dict( + type='Normal', + std=0.01, + bias_prob=0.01, + override=dict(name='conv_mask_list_y')), + dict( + type='Normal', + std=0.01, + bias_prob=0.01, + override=dict(name='conv_cls')) + ], + **kwargs): + super(DecoupledSOLOHead, self).__init__( + *args, init_cfg=init_cfg, **kwargs) + + def _init_layers(self): + self.mask_convs_x = nn.ModuleList() + self.mask_convs_y = nn.ModuleList() + self.cls_convs = nn.ModuleList() + + for i in range(self.stacked_convs): + chn = self.in_channels + 1 if i == 0 else self.feat_channels + self.mask_convs_x.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg)) + self.mask_convs_y.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg)) + + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg)) + + self.conv_mask_list_x = nn.ModuleList() + self.conv_mask_list_y = nn.ModuleList() + for num_grid in self.num_grids: + self.conv_mask_list_x.append( + nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) + self.conv_mask_list_y.append( + nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) + self.conv_cls = nn.Conv2d( + self.feat_channels, self.cls_out_channels, 3, padding=1) + + def forward(self, feats): + assert len(feats) == self.num_levels + feats = self.resize_feats(feats) + mask_preds_x = [] + mask_preds_y = [] + cls_preds = [] + for i in range(self.num_levels): + x = feats[i] + mask_feat = x + cls_feat = x + # generate and concat the coordinate + coord_feat = generate_coordinate(mask_feat.size(), + mask_feat.device) + mask_feat_x = torch.cat([mask_feat, coord_feat[:, 0:1, ...]], 1) + mask_feat_y = torch.cat([mask_feat, coord_feat[:, 1:2, ...]], 1) + + for mask_layer_x, mask_layer_y in \ + zip(self.mask_convs_x, self.mask_convs_y): + mask_feat_x = mask_layer_x(mask_feat_x) + mask_feat_y = mask_layer_y(mask_feat_y) + + mask_feat_x = F.interpolate( + mask_feat_x, scale_factor=2, mode='bilinear') + mask_feat_y = F.interpolate( + mask_feat_y, scale_factor=2, mode='bilinear') + + mask_pred_x = self.conv_mask_list_x[i](mask_feat_x) + mask_pred_y = self.conv_mask_list_y[i](mask_feat_y) + + # cls branch + for j, cls_layer in enumerate(self.cls_convs): + if j == self.cls_down_index: + num_grid = self.num_grids[i] + cls_feat = F.interpolate( + cls_feat, size=num_grid, mode='bilinear') + cls_feat = cls_layer(cls_feat) + + cls_pred = self.conv_cls(cls_feat) + + if not self.training: + feat_wh = feats[0].size()[-2:] + upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) + mask_pred_x = F.interpolate( + mask_pred_x.sigmoid(), + size=upsampled_size, + mode='bilinear') + mask_pred_y = F.interpolate( + mask_pred_y.sigmoid(), + size=upsampled_size, + mode='bilinear') + cls_pred = cls_pred.sigmoid() + # get local maximum + local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) + keep_mask = local_max[:, :, :-1, :-1] == cls_pred + cls_pred = cls_pred * keep_mask + + mask_preds_x.append(mask_pred_x) + mask_preds_y.append(mask_pred_y) + cls_preds.append(cls_pred) + return mask_preds_x, mask_preds_y, cls_preds + + def loss(self, + mlvl_mask_preds_x, + mlvl_mask_preds_y, + mlvl_cls_preds, + gt_labels, + gt_masks, + img_metas, + gt_bboxes=None, + **kwargs): + """Calculate the loss of total batch. + + Args: + mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction + from x branch. Each element in the list has shape + (batch_size, num_grids ,h ,w). + mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction + from y branch. Each element in the list has shape + (batch_size, num_grids ,h ,w). + mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element + in the list has shape + (batch_size, num_classes, num_grids ,num_grids). + gt_labels (list[Tensor]): Labels of multiple images. + gt_masks (list[Tensor]): Ground truth masks of multiple images. + Each has shape (num_instances, h, w). + img_metas (list[dict]): Meta information of multiple images. + gt_bboxes (list[Tensor]): Ground truth bboxes of multiple + images. Default: None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + num_levels = self.num_levels + num_imgs = len(gt_labels) + featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds_x] + + pos_mask_targets, labels, \ + xy_pos_indexes = \ + multi_apply(self._get_targets_single, + gt_bboxes, + gt_labels, + gt_masks, + featmap_sizes=featmap_sizes) + + # change from the outside list meaning multi images + # to the outside list meaning multi levels + mlvl_pos_mask_targets = [[] for _ in range(num_levels)] + mlvl_pos_mask_preds_x = [[] for _ in range(num_levels)] + mlvl_pos_mask_preds_y = [[] for _ in range(num_levels)] + mlvl_labels = [[] for _ in range(num_levels)] + for img_id in range(num_imgs): + + for lvl in range(num_levels): + mlvl_pos_mask_targets[lvl].append( + pos_mask_targets[img_id][lvl]) + mlvl_pos_mask_preds_x[lvl].append( + mlvl_mask_preds_x[lvl][img_id, + xy_pos_indexes[img_id][lvl][:, 1]]) + mlvl_pos_mask_preds_y[lvl].append( + mlvl_mask_preds_y[lvl][img_id, + xy_pos_indexes[img_id][lvl][:, 0]]) + mlvl_labels[lvl].append(labels[img_id][lvl].flatten()) + + # cat multiple image + temp_mlvl_cls_preds = [] + for lvl in range(num_levels): + mlvl_pos_mask_targets[lvl] = torch.cat( + mlvl_pos_mask_targets[lvl], dim=0) + mlvl_pos_mask_preds_x[lvl] = torch.cat( + mlvl_pos_mask_preds_x[lvl], dim=0) + mlvl_pos_mask_preds_y[lvl] = torch.cat( + mlvl_pos_mask_preds_y[lvl], dim=0) + mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0) + temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute( + 0, 2, 3, 1).reshape(-1, self.cls_out_channels)) + + num_pos = 0. + # dice loss + loss_mask = [] + for pred_x, pred_y, target in \ + zip(mlvl_pos_mask_preds_x, + mlvl_pos_mask_preds_y, mlvl_pos_mask_targets): + num_masks = pred_x.size(0) + if num_masks == 0: + # make sure can get grad + loss_mask.append((pred_x.sum() + pred_y.sum()).unsqueeze(0)) + continue + num_pos += num_masks + pred_mask = pred_y.sigmoid() * pred_x.sigmoid() + loss_mask.append( + self.loss_mask(pred_mask, target, reduction_override='none')) + if num_pos > 0: + loss_mask = torch.cat(loss_mask).sum() / num_pos + else: + loss_mask = torch.cat(loss_mask).mean() + + # cate + flatten_labels = torch.cat(mlvl_labels) + flatten_cls_preds = torch.cat(temp_mlvl_cls_preds) + + loss_cls = self.loss_cls( + flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) + return dict(loss_mask=loss_mask, loss_cls=loss_cls) + + def _get_targets_single(self, + gt_bboxes, + gt_labels, + gt_masks, + featmap_sizes=None): + """Compute targets for predictions of single image. + + Args: + gt_bboxes (Tensor): Ground truth bbox of each instance, + shape (num_gts, 4). + gt_labels (Tensor): Ground truth label of each instance, + shape (num_gts,). + gt_masks (Tensor): Ground truth mask of each instance, + shape (num_gts, h, w). + featmap_sizes (list[:obj:`torch.size`]): Size of each + feature map from feature pyramid, each element + means (feat_h, feat_w). Default: None. + + Returns: + Tuple: Usually returns a tuple containing targets for predictions. + + - mlvl_pos_mask_targets (list[Tensor]): Each element represent + the binary mask targets for positive points in this + level, has shape (num_pos, out_h, out_w). + - mlvl_labels (list[Tensor]): Each element is + classification labels for all + points in this level, has shape + (num_grid, num_grid). + - mlvl_xy_pos_indexes (list[Tensor]): Each element + in the list contains the index of positive samples in + corresponding level, has shape (num_pos, 2), last + dimension 2 present (index_x, index_y). + """ + mlvl_pos_mask_targets, mlvl_labels, \ + mlvl_pos_masks = \ + super()._get_targets_single(gt_bboxes, gt_labels, gt_masks, + featmap_sizes=featmap_sizes) + + mlvl_xy_pos_indexes = [(item - self.num_classes).nonzero() + for item in mlvl_labels] + + return mlvl_pos_mask_targets, mlvl_labels, mlvl_xy_pos_indexes + + def get_results(self, + mlvl_mask_preds_x, + mlvl_mask_preds_y, + mlvl_cls_scores, + img_metas, + rescale=None, + **kwargs): + """Get multi-image mask results. + + Args: + mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction + from x branch. Each element in the list has shape + (batch_size, num_grids ,h ,w). + mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction + from y branch. Each element in the list has shape + (batch_size, num_grids ,h ,w). + mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element + in the list has shape + (batch_size, num_classes ,num_grids ,num_grids). + img_metas (list[dict]): Meta information of all images. + + Returns: + list[:obj:`InstanceData`]: Processed results of multiple + images.Each :obj:`InstanceData` usually contains + following keys. + + - scores (Tensor): Classification scores, has shape + (num_instance,). + - labels (Tensor): Has shape (num_instances,). + - masks (Tensor): Processed mask results, has + shape (num_instances, h, w). + """ + mlvl_cls_scores = [ + item.permute(0, 2, 3, 1) for item in mlvl_cls_scores + ] + assert len(mlvl_mask_preds_x) == len(mlvl_cls_scores) + num_levels = len(mlvl_cls_scores) + + results_list = [] + for img_id in range(len(img_metas)): + cls_pred_list = [ + mlvl_cls_scores[i][img_id].view( + -1, self.cls_out_channels).detach() + for i in range(num_levels) + ] + mask_pred_list_x = [ + mlvl_mask_preds_x[i][img_id] for i in range(num_levels) + ] + mask_pred_list_y = [ + mlvl_mask_preds_y[i][img_id] for i in range(num_levels) + ] + + cls_pred_list = torch.cat(cls_pred_list, dim=0) + mask_pred_list_x = torch.cat(mask_pred_list_x, dim=0) + mask_pred_list_y = torch.cat(mask_pred_list_y, dim=0) + + results = self._get_results_single( + cls_pred_list, + mask_pred_list_x, + mask_pred_list_y, + img_meta=img_metas[img_id], + cfg=self.test_cfg) + results_list.append(results) + return results_list + + def _get_results_single(self, cls_scores, mask_preds_x, mask_preds_y, + img_meta, cfg): + """Get processed mask related results of single image. + + Args: + cls_scores (Tensor): Classification score of all points + in single image, has shape (num_points, num_classes). + mask_preds_x (Tensor): Mask prediction of x branch of + all points in single image, has shape + (sum_num_grids, feat_h, feat_w). + mask_preds_y (Tensor): Mask prediction of y branch of + all points in single image, has shape + (sum_num_grids, feat_h, feat_w). + img_meta (dict): Meta information of corresponding image. + cfg (dict): Config used in test phase. + + Returns: + :obj:`InstanceData`: Processed results of single image. + it usually contains following keys. + + - scores (Tensor): Classification scores, has shape + (num_instance,). + - labels (Tensor): Has shape (num_instances,). + - masks (Tensor): Processed mask results, has + shape (num_instances, h, w). + """ + + def empty_results(results, cls_scores): + """Generate a empty results.""" + results.scores = cls_scores.new_ones(0) + results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2]) + results.labels = cls_scores.new_ones(0) + return results + + cfg = self.test_cfg if cfg is None else cfg + + results = InstanceData(img_meta) + img_shape = results.img_shape + ori_shape = results.ori_shape + h, w, _ = img_shape + featmap_size = mask_preds_x.size()[-2:] + upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4) + + score_mask = (cls_scores > cfg.score_thr) + cls_scores = cls_scores[score_mask] + inds = score_mask.nonzero() + lvl_interval = inds.new_tensor(self.num_grids).pow(2).cumsum(0) + num_all_points = lvl_interval[-1] + lvl_start_index = inds.new_ones(num_all_points) + num_grids = inds.new_ones(num_all_points) + seg_size = inds.new_tensor(self.num_grids).cumsum(0) + mask_lvl_start_index = inds.new_ones(num_all_points) + strides = inds.new_ones(num_all_points) + + lvl_start_index[:lvl_interval[0]] *= 0 + mask_lvl_start_index[:lvl_interval[0]] *= 0 + num_grids[:lvl_interval[0]] *= self.num_grids[0] + strides[:lvl_interval[0]] *= self.strides[0] + + for lvl in range(1, self.num_levels): + lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ + lvl_interval[lvl - 1] + mask_lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ + seg_size[lvl - 1] + num_grids[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ + self.num_grids[lvl] + strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ + self.strides[lvl] + + lvl_start_index = lvl_start_index[inds[:, 0]] + mask_lvl_start_index = mask_lvl_start_index[inds[:, 0]] + num_grids = num_grids[inds[:, 0]] + strides = strides[inds[:, 0]] + + y_lvl_offset = (inds[:, 0] - lvl_start_index) // num_grids + x_lvl_offset = (inds[:, 0] - lvl_start_index) % num_grids + y_inds = mask_lvl_start_index + y_lvl_offset + x_inds = mask_lvl_start_index + x_lvl_offset + + cls_labels = inds[:, 1] + mask_preds = mask_preds_x[x_inds, ...] * mask_preds_y[y_inds, ...] + + masks = mask_preds > cfg.mask_thr + sum_masks = masks.sum((1, 2)).float() + keep = sum_masks > strides + if keep.sum() == 0: + return empty_results(results, cls_scores) + + masks = masks[keep] + mask_preds = mask_preds[keep] + sum_masks = sum_masks[keep] + cls_scores = cls_scores[keep] + cls_labels = cls_labels[keep] + + # maskness. + mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks + cls_scores *= mask_scores + + scores, labels, _, keep_inds = mask_matrix_nms( + masks, + cls_labels, + cls_scores, + mask_area=sum_masks, + nms_pre=cfg.nms_pre, + max_num=cfg.max_per_img, + kernel=cfg.kernel, + sigma=cfg.sigma, + filter_thr=cfg.filter_thr) + mask_preds = mask_preds[keep_inds] + mask_preds = F.interpolate( + mask_preds.unsqueeze(0), size=upsampled_size, + mode='bilinear')[:, :, :h, :w] + mask_preds = F.interpolate( + mask_preds, size=ori_shape[:2], mode='bilinear').squeeze(0) + masks = mask_preds > cfg.mask_thr + + results.masks = masks + results.labels = labels + results.scores = scores + + return results + + +@HEADS.register_module() +class DecoupledSOLOLightHead(DecoupledSOLOHead): + """Decoupled Light SOLO mask head used in `SOLO: Segmenting Objects by + Locations `_ + + Args: + with_dcn (bool): Whether use dcn in mask_convs and cls_convs, + default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + *args, + dcn_cfg=None, + init_cfg=[ + dict(type='Normal', layer='Conv2d', std=0.01), + dict( + type='Normal', + std=0.01, + bias_prob=0.01, + override=dict(name='conv_mask_list_x')), + dict( + type='Normal', + std=0.01, + bias_prob=0.01, + override=dict(name='conv_mask_list_y')), + dict( + type='Normal', + std=0.01, + bias_prob=0.01, + override=dict(name='conv_cls')) + ], + **kwargs): + assert dcn_cfg is None or isinstance(dcn_cfg, dict) + self.dcn_cfg = dcn_cfg + super(DecoupledSOLOLightHead, self).__init__( + *args, init_cfg=init_cfg, **kwargs) + + def _init_layers(self): + self.mask_convs = nn.ModuleList() + self.cls_convs = nn.ModuleList() + + for i in range(self.stacked_convs): + if self.dcn_cfg is not None\ + and i == self.stacked_convs - 1: + conv_cfg = self.dcn_cfg + else: + conv_cfg = None + + chn = self.in_channels + 2 if i == 0 else self.feat_channels + self.mask_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg)) + + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg)) + + self.conv_mask_list_x = nn.ModuleList() + self.conv_mask_list_y = nn.ModuleList() + for num_grid in self.num_grids: + self.conv_mask_list_x.append( + nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) + self.conv_mask_list_y.append( + nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) + self.conv_cls = nn.Conv2d( + self.feat_channels, self.cls_out_channels, 3, padding=1) + + def forward(self, feats): + assert len(feats) == self.num_levels + feats = self.resize_feats(feats) + mask_preds_x = [] + mask_preds_y = [] + cls_preds = [] + for i in range(self.num_levels): + x = feats[i] + mask_feat = x + cls_feat = x + # generate and concat the coordinate + coord_feat = generate_coordinate(mask_feat.size(), + mask_feat.device) + mask_feat = torch.cat([mask_feat, coord_feat], 1) + + for mask_layer in self.mask_convs: + mask_feat = mask_layer(mask_feat) + + mask_feat = F.interpolate( + mask_feat, scale_factor=2, mode='bilinear') + + mask_pred_x = self.conv_mask_list_x[i](mask_feat) + mask_pred_y = self.conv_mask_list_y[i](mask_feat) + + # cls branch + for j, cls_layer in enumerate(self.cls_convs): + if j == self.cls_down_index: + num_grid = self.num_grids[i] + cls_feat = F.interpolate( + cls_feat, size=num_grid, mode='bilinear') + cls_feat = cls_layer(cls_feat) + + cls_pred = self.conv_cls(cls_feat) + + if not self.training: + feat_wh = feats[0].size()[-2:] + upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) + mask_pred_x = F.interpolate( + mask_pred_x.sigmoid(), + size=upsampled_size, + mode='bilinear') + mask_pred_y = F.interpolate( + mask_pred_y.sigmoid(), + size=upsampled_size, + mode='bilinear') + cls_pred = cls_pred.sigmoid() + # get local maximum + local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) + keep_mask = local_max[:, :, :-1, :-1] == cls_pred + cls_pred = cls_pred * keep_mask + + mask_preds_x.append(mask_pred_x) + mask_preds_y.append(mask_pred_y) + cls_preds.append(cls_pred) + return mask_preds_x, mask_preds_y, cls_preds diff --git a/downstream/mmdetection/mmdet/models/dense_heads/solov2_head.py b/downstream/mmdetection/mmdet/models/dense_heads/solov2_head.py new file mode 100644 index 0000000..df42217 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/solov2_head.py @@ -0,0 +1,749 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import mmcv +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule, auto_fp16, force_fp32 + +from mmdet.core import InstanceData, mask_matrix_nms, multi_apply +from mmdet.core.utils import center_of_mass, generate_coordinate +from mmdet.models.builder import HEADS +from .solo_head import SOLOHead + + +class MaskFeatModule(BaseModule): + """SOLOv2 mask feature map branch used in `SOLOv2: Dynamic and Fast + Instance Segmentation. `_ + + Args: + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of hidden channels of the mask feature + map branch. + start_level (int): The starting feature map level from RPN that + will be used to predict the mask feature map. + end_level (int): The ending feature map level from rpn that + will be used to predict the mask feature map. + out_channels (int): Number of output channels of the mask feature + map branch. This is the channel count of the mask + feature map that to be dynamically convolved with the predicted + kernel. + mask_stride (int): Downsample factor of the mask feature map output. + Default: 4. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + feat_channels, + start_level, + end_level, + out_channels, + mask_stride=4, + conv_cfg=None, + norm_cfg=None, + init_cfg=[dict(type='Normal', layer='Conv2d', std=0.01)]): + super().__init__(init_cfg=init_cfg) + + self.in_channels = in_channels + self.feat_channels = feat_channels + self.start_level = start_level + self.end_level = end_level + self.mask_stride = mask_stride + assert start_level >= 0 and end_level >= start_level + self.out_channels = out_channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self._init_layers() + self.fp16_enabled = False + + def _init_layers(self): + self.convs_all_levels = nn.ModuleList() + for i in range(self.start_level, self.end_level + 1): + convs_per_level = nn.Sequential() + if i == 0: + convs_per_level.add_module( + f'conv{i}', + ConvModule( + self.in_channels, + self.feat_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=False)) + self.convs_all_levels.append(convs_per_level) + continue + + for j in range(i): + if j == 0: + if i == self.end_level: + chn = self.in_channels + 2 + else: + chn = self.in_channels + convs_per_level.add_module( + f'conv{j}', + ConvModule( + chn, + self.feat_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=False)) + convs_per_level.add_module( + f'upsample{j}', + nn.Upsample( + scale_factor=2, + mode='bilinear', + align_corners=False)) + continue + + convs_per_level.add_module( + f'conv{j}', + ConvModule( + self.feat_channels, + self.feat_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=False)) + convs_per_level.add_module( + f'upsample{j}', + nn.Upsample( + scale_factor=2, mode='bilinear', align_corners=False)) + + self.convs_all_levels.append(convs_per_level) + + self.conv_pred = ConvModule( + self.feat_channels, + self.out_channels, + 1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + + @auto_fp16() + def forward(self, feats): + inputs = feats[self.start_level:self.end_level + 1] + assert len(inputs) == (self.end_level - self.start_level + 1) + feature_add_all_level = self.convs_all_levels[0](inputs[0]) + for i in range(1, len(inputs)): + input_p = inputs[i] + if i == len(inputs) - 1: + coord_feat = generate_coordinate(input_p.size(), + input_p.device) + input_p = torch.cat([input_p, coord_feat], 1) + + feature_add_all_level += self.convs_all_levels[i](input_p) + + feature_pred = self.conv_pred(feature_add_all_level) + return feature_pred + + +@HEADS.register_module() +class SOLOV2Head(SOLOHead): + """SOLOv2 mask head used in `SOLOv2: Dynamic and Fast Instance + Segmentation. `_ + + Args: + mask_feature_head (dict): Config of SOLOv2MaskFeatHead. + dynamic_conv_size (int): Dynamic Conv kernel size. Default: 1. + dcn_cfg (dict): Dcn conv configurations in kernel_convs and cls_conv. + default: None. + dcn_apply_to_all_conv (bool): Whether to use dcn in every layer of + kernel_convs and cls_convs, or only the last layer. It shall be set + `True` for the normal version of SOLOv2 and `False` for the + light-weight version. default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + *args, + mask_feature_head, + dynamic_conv_size=1, + dcn_cfg=None, + dcn_apply_to_all_conv=True, + init_cfg=[ + dict(type='Normal', layer='Conv2d', std=0.01), + dict( + type='Normal', + std=0.01, + bias_prob=0.01, + override=dict(name='conv_cls')) + ], + **kwargs): + assert dcn_cfg is None or isinstance(dcn_cfg, dict) + self.dcn_cfg = dcn_cfg + self.with_dcn = dcn_cfg is not None + self.dcn_apply_to_all_conv = dcn_apply_to_all_conv + self.dynamic_conv_size = dynamic_conv_size + mask_out_channels = mask_feature_head.get('out_channels') + self.kernel_out_channels = \ + mask_out_channels * self.dynamic_conv_size * self.dynamic_conv_size + + super().__init__(*args, init_cfg=init_cfg, **kwargs) + + # update the in_channels of mask_feature_head + if mask_feature_head.get('in_channels', None) is not None: + if mask_feature_head.in_channels != self.in_channels: + warnings.warn('The `in_channels` of SOLOv2MaskFeatHead and ' + 'SOLOv2Head should be same, changing ' + 'mask_feature_head.in_channels to ' + f'{self.in_channels}') + mask_feature_head.update(in_channels=self.in_channels) + else: + mask_feature_head.update(in_channels=self.in_channels) + + self.mask_feature_head = MaskFeatModule(**mask_feature_head) + self.mask_stride = self.mask_feature_head.mask_stride + self.fp16_enabled = False + + def _init_layers(self): + self.cls_convs = nn.ModuleList() + self.kernel_convs = nn.ModuleList() + conv_cfg = None + for i in range(self.stacked_convs): + if self.with_dcn: + if self.dcn_apply_to_all_conv: + conv_cfg = self.dcn_cfg + elif i == self.stacked_convs - 1: + # light head + conv_cfg = self.dcn_cfg + + chn = self.in_channels + 2 if i == 0 else self.feat_channels + self.kernel_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.norm_cfg is None)) + + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.norm_cfg is None)) + + self.conv_cls = nn.Conv2d( + self.feat_channels, self.cls_out_channels, 3, padding=1) + + self.conv_kernel = nn.Conv2d( + self.feat_channels, self.kernel_out_channels, 3, padding=1) + + @auto_fp16() + def forward(self, feats): + assert len(feats) == self.num_levels + mask_feats = self.mask_feature_head(feats) + feats = self.resize_feats(feats) + mlvl_kernel_preds = [] + mlvl_cls_preds = [] + for i in range(self.num_levels): + ins_kernel_feat = feats[i] + # ins branch + # concat coord + coord_feat = generate_coordinate(ins_kernel_feat.size(), + ins_kernel_feat.device) + ins_kernel_feat = torch.cat([ins_kernel_feat, coord_feat], 1) + + # kernel branch + kernel_feat = ins_kernel_feat + kernel_feat = F.interpolate( + kernel_feat, + size=self.num_grids[i], + mode='bilinear', + align_corners=False) + + cate_feat = kernel_feat[:, :-2, :, :] + + kernel_feat = kernel_feat.contiguous() + for i, kernel_conv in enumerate(self.kernel_convs): + kernel_feat = kernel_conv(kernel_feat) + kernel_pred = self.conv_kernel(kernel_feat) + + # cate branch + cate_feat = cate_feat.contiguous() + for i, cls_conv in enumerate(self.cls_convs): + cate_feat = cls_conv(cate_feat) + cate_pred = self.conv_cls(cate_feat) + + mlvl_kernel_preds.append(kernel_pred) + mlvl_cls_preds.append(cate_pred) + + return mlvl_kernel_preds, mlvl_cls_preds, mask_feats + + def _get_targets_single(self, + gt_bboxes, + gt_labels, + gt_masks, + featmap_size=None): + """Compute targets for predictions of single image. + + Args: + gt_bboxes (Tensor): Ground truth bbox of each instance, + shape (num_gts, 4). + gt_labels (Tensor): Ground truth label of each instance, + shape (num_gts,). + gt_masks (Tensor): Ground truth mask of each instance, + shape (num_gts, h, w). + featmap_sizes (:obj:`torch.size`): Size of UNified mask + feature map used to generate instance segmentation + masks by dynamic convolution, each element means + (feat_h, feat_w). Default: None. + + Returns: + Tuple: Usually returns a tuple containing targets for predictions. + + - mlvl_pos_mask_targets (list[Tensor]): Each element represent + the binary mask targets for positive points in this + level, has shape (num_pos, out_h, out_w). + - mlvl_labels (list[Tensor]): Each element is + classification labels for all + points in this level, has shape + (num_grid, num_grid). + - mlvl_pos_masks (list[Tensor]): Each element is + a `BoolTensor` to represent whether the + corresponding point in single level + is positive, has shape (num_grid **2). + - mlvl_pos_indexes (list[list]): Each element + in the list contains the positive index in + corresponding level, has shape (num_pos). + """ + + device = gt_labels.device + gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * + (gt_bboxes[:, 3] - gt_bboxes[:, 1])) + + mlvl_pos_mask_targets = [] + mlvl_pos_indexes = [] + mlvl_labels = [] + mlvl_pos_masks = [] + for (lower_bound, upper_bound), num_grid \ + in zip(self.scale_ranges, self.num_grids): + mask_target = [] + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + pos_index = [] + labels = torch.zeros([num_grid, num_grid], + dtype=torch.int64, + device=device) + self.num_classes + pos_mask = torch.zeros([num_grid**2], + dtype=torch.bool, + device=device) + + gt_inds = ((gt_areas >= lower_bound) & + (gt_areas <= upper_bound)).nonzero().flatten() + if len(gt_inds) == 0: + mlvl_pos_mask_targets.append( + torch.zeros([0, featmap_size[0], featmap_size[1]], + dtype=torch.uint8, + device=device)) + mlvl_labels.append(labels) + mlvl_pos_masks.append(pos_mask) + mlvl_pos_indexes.append([]) + continue + hit_gt_bboxes = gt_bboxes[gt_inds] + hit_gt_labels = gt_labels[gt_inds] + hit_gt_masks = gt_masks[gt_inds, ...] + + pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] - + hit_gt_bboxes[:, 0]) * self.pos_scale + pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] - + hit_gt_bboxes[:, 1]) * self.pos_scale + + # Make sure hit_gt_masks has a value + valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0 + + for gt_mask, gt_label, pos_h_range, pos_w_range, \ + valid_mask_flag in \ + zip(hit_gt_masks, hit_gt_labels, pos_h_ranges, + pos_w_ranges, valid_mask_flags): + if not valid_mask_flag: + continue + upsampled_size = (featmap_size[0] * self.mask_stride, + featmap_size[1] * self.mask_stride) + center_h, center_w = center_of_mass(gt_mask) + + coord_w = int( + (center_w / upsampled_size[1]) // (1. / num_grid)) + coord_h = int( + (center_h / upsampled_size[0]) // (1. / num_grid)) + + # left, top, right, down + top_box = max( + 0, + int(((center_h - pos_h_range) / upsampled_size[0]) // + (1. / num_grid))) + down_box = min( + num_grid - 1, + int(((center_h + pos_h_range) / upsampled_size[0]) // + (1. / num_grid))) + left_box = max( + 0, + int(((center_w - pos_w_range) / upsampled_size[1]) // + (1. / num_grid))) + right_box = min( + num_grid - 1, + int(((center_w + pos_w_range) / upsampled_size[1]) // + (1. / num_grid))) + + top = max(top_box, coord_h - 1) + down = min(down_box, coord_h + 1) + left = max(coord_w - 1, left_box) + right = min(right_box, coord_w + 1) + + labels[top:(down + 1), left:(right + 1)] = gt_label + # ins + gt_mask = np.uint8(gt_mask.cpu().numpy()) + # Follow the original implementation, F.interpolate is + # different from cv2 and opencv + gt_mask = mmcv.imrescale(gt_mask, scale=1. / self.mask_stride) + gt_mask = torch.from_numpy(gt_mask).to(device=device) + + for i in range(top, down + 1): + for j in range(left, right + 1): + index = int(i * num_grid + j) + this_mask_target = torch.zeros( + [featmap_size[0], featmap_size[1]], + dtype=torch.uint8, + device=device) + this_mask_target[:gt_mask.shape[0], :gt_mask. + shape[1]] = gt_mask + mask_target.append(this_mask_target) + pos_mask[index] = True + pos_index.append(index) + if len(mask_target) == 0: + mask_target = torch.zeros( + [0, featmap_size[0], featmap_size[1]], + dtype=torch.uint8, + device=device) + else: + mask_target = torch.stack(mask_target, 0) + mlvl_pos_mask_targets.append(mask_target) + mlvl_labels.append(labels) + mlvl_pos_masks.append(pos_mask) + mlvl_pos_indexes.append(pos_index) + return (mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks, + mlvl_pos_indexes) + + @force_fp32(apply_to=('mlvl_kernel_preds', 'mlvl_cls_preds', 'mask_feats')) + def loss(self, + mlvl_kernel_preds, + mlvl_cls_preds, + mask_feats, + gt_labels, + gt_masks, + img_metas, + gt_bboxes=None, + **kwargs): + """Calculate the loss of total batch. + + Args: + mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel + prediction. The kernel is used to generate instance + segmentation masks by dynamic convolution. Each element in the + list has shape + (batch_size, kernel_out_channels, num_grids, num_grids). + mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element + in the list has shape + (batch_size, num_classes, num_grids, num_grids). + mask_feats (Tensor): Unified mask feature map used to generate + instance segmentation masks by dynamic convolution. Has shape + (batch_size, mask_out_channels, h, w). + gt_labels (list[Tensor]): Labels of multiple images. + gt_masks (list[Tensor]): Ground truth masks of multiple images. + Each has shape (num_instances, h, w). + img_metas (list[dict]): Meta information of multiple images. + gt_bboxes (list[Tensor]): Ground truth bboxes of multiple + images. Default: None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + featmap_size = mask_feats.size()[-2:] + + pos_mask_targets, labels, pos_masks, pos_indexes = multi_apply( + self._get_targets_single, + gt_bboxes, + gt_labels, + gt_masks, + featmap_size=featmap_size) + + mlvl_mask_targets = [ + torch.cat(lvl_mask_targets, 0) + for lvl_mask_targets in zip(*pos_mask_targets) + ] + + mlvl_pos_kernel_preds = [] + for lvl_kernel_preds, lvl_pos_indexes in zip(mlvl_kernel_preds, + zip(*pos_indexes)): + lvl_pos_kernel_preds = [] + for img_lvl_kernel_preds, img_lvl_pos_indexes in zip( + lvl_kernel_preds, lvl_pos_indexes): + img_lvl_pos_kernel_preds = img_lvl_kernel_preds.view( + img_lvl_kernel_preds.shape[0], -1)[:, img_lvl_pos_indexes] + lvl_pos_kernel_preds.append(img_lvl_pos_kernel_preds) + mlvl_pos_kernel_preds.append(lvl_pos_kernel_preds) + + # make multilevel mlvl_mask_pred + mlvl_mask_preds = [] + for lvl_pos_kernel_preds in mlvl_pos_kernel_preds: + lvl_mask_preds = [] + for img_id, img_lvl_pos_kernel_pred in enumerate( + lvl_pos_kernel_preds): + if img_lvl_pos_kernel_pred.size()[-1] == 0: + continue + img_mask_feats = mask_feats[[img_id]] + h, w = img_mask_feats.shape[-2:] + num_kernel = img_lvl_pos_kernel_pred.shape[1] + img_lvl_mask_pred = F.conv2d( + img_mask_feats, + img_lvl_pos_kernel_pred.permute(1, 0).view( + num_kernel, -1, self.dynamic_conv_size, + self.dynamic_conv_size), + stride=1).view(-1, h, w) + lvl_mask_preds.append(img_lvl_mask_pred) + if len(lvl_mask_preds) == 0: + lvl_mask_preds = None + else: + lvl_mask_preds = torch.cat(lvl_mask_preds, 0) + mlvl_mask_preds.append(lvl_mask_preds) + # dice loss + num_pos = 0 + for img_pos_masks in pos_masks: + for lvl_img_pos_masks in img_pos_masks: + num_pos += lvl_img_pos_masks.count_nonzero() + + loss_mask = [] + for lvl_mask_preds, lvl_mask_targets in zip(mlvl_mask_preds, + mlvl_mask_targets): + if lvl_mask_preds is None: + continue + loss_mask.append( + self.loss_mask( + lvl_mask_preds, + lvl_mask_targets, + reduction_override='none')) + if num_pos > 0: + loss_mask = torch.cat(loss_mask).sum() / num_pos + else: + loss_mask = torch.cat(loss_mask).mean() + + # cate + flatten_labels = [ + torch.cat( + [img_lvl_labels.flatten() for img_lvl_labels in lvl_labels]) + for lvl_labels in zip(*labels) + ] + flatten_labels = torch.cat(flatten_labels) + + flatten_cls_preds = [ + lvl_cls_preds.permute(0, 2, 3, 1).reshape(-1, self.num_classes) + for lvl_cls_preds in mlvl_cls_preds + ] + flatten_cls_preds = torch.cat(flatten_cls_preds) + + loss_cls = self.loss_cls( + flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) + return dict(loss_mask=loss_mask, loss_cls=loss_cls) + + @force_fp32( + apply_to=('mlvl_kernel_preds', 'mlvl_cls_scores', 'mask_feats')) + def get_results(self, mlvl_kernel_preds, mlvl_cls_scores, mask_feats, + img_metas, **kwargs): + """Get multi-image mask results. + + Args: + mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel + prediction. The kernel is used to generate instance + segmentation masks by dynamic convolution. Each element in the + list has shape + (batch_size, kernel_out_channels, num_grids, num_grids). + mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element + in the list has shape + (batch_size, num_classes, num_grids, num_grids). + mask_feats (Tensor): Unified mask feature map used to generate + instance segmentation masks by dynamic convolution. Has shape + (batch_size, mask_out_channels, h, w). + img_metas (list[dict]): Meta information of all images. + + Returns: + list[:obj:`InstanceData`]: Processed results of multiple + images.Each :obj:`InstanceData` usually contains + following keys. + + - scores (Tensor): Classification scores, has shape + (num_instance,). + - labels (Tensor): Has shape (num_instances,). + - masks (Tensor): Processed mask results, has + shape (num_instances, h, w). + """ + num_levels = len(mlvl_cls_scores) + assert len(mlvl_kernel_preds) == len(mlvl_cls_scores) + + for lvl in range(num_levels): + cls_scores = mlvl_cls_scores[lvl] + cls_scores = cls_scores.sigmoid() + local_max = F.max_pool2d(cls_scores, 2, stride=1, padding=1) + keep_mask = local_max[:, :, :-1, :-1] == cls_scores + cls_scores = cls_scores * keep_mask + mlvl_cls_scores[lvl] = cls_scores.permute(0, 2, 3, 1) + + result_list = [] + for img_id in range(len(img_metas)): + img_cls_pred = [ + mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels) + for lvl in range(num_levels) + ] + img_mask_feats = mask_feats[[img_id]] + img_kernel_pred = [ + mlvl_kernel_preds[lvl][img_id].permute(1, 2, 0).view( + -1, self.kernel_out_channels) for lvl in range(num_levels) + ] + img_cls_pred = torch.cat(img_cls_pred, dim=0) + img_kernel_pred = torch.cat(img_kernel_pred, dim=0) + result = self._get_results_single( + img_kernel_pred, + img_cls_pred, + img_mask_feats, + img_meta=img_metas[img_id]) + result_list.append(result) + return result_list + + def _get_results_single(self, + kernel_preds, + cls_scores, + mask_feats, + img_meta, + cfg=None): + """Get processed mask related results of single image. + + Args: + kernel_preds (Tensor): Dynamic kernel prediction of all points + in single image, has shape + (num_points, kernel_out_channels). + cls_scores (Tensor): Classification score of all points + in single image, has shape (num_points, num_classes). + mask_preds (Tensor): Mask prediction of all points in + single image, has shape (num_points, feat_h, feat_w). + img_meta (dict): Meta information of corresponding image. + cfg (dict, optional): Config used in test phase. + Default: None. + + Returns: + :obj:`InstanceData`: Processed results of single image. + it usually contains following keys. + - scores (Tensor): Classification scores, has shape + (num_instance,). + - labels (Tensor): Has shape (num_instances,). + - masks (Tensor): Processed mask results, has + shape (num_instances, h, w). + """ + + def empty_results(results, cls_scores): + """Generate a empty results.""" + results.scores = cls_scores.new_ones(0) + results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2]) + results.labels = cls_scores.new_ones(0) + return results + + cfg = self.test_cfg if cfg is None else cfg + assert len(kernel_preds) == len(cls_scores) + results = InstanceData(img_meta) + + featmap_size = mask_feats.size()[-2:] + + img_shape = results.img_shape + ori_shape = results.ori_shape + + # overall info + h, w, _ = img_shape + upsampled_size = (featmap_size[0] * self.mask_stride, + featmap_size[1] * self.mask_stride) + + # process. + score_mask = (cls_scores > cfg.score_thr) + cls_scores = cls_scores[score_mask] + if len(cls_scores) == 0: + return empty_results(results, cls_scores) + + # cate_labels & kernel_preds + inds = score_mask.nonzero() + cls_labels = inds[:, 1] + kernel_preds = kernel_preds[inds[:, 0]] + + # trans vector. + lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0) + strides = kernel_preds.new_ones(lvl_interval[-1]) + + strides[:lvl_interval[0]] *= self.strides[0] + for lvl in range(1, self.num_levels): + strides[lvl_interval[lvl - + 1]:lvl_interval[lvl]] *= self.strides[lvl] + strides = strides[inds[:, 0]] + + # mask encoding. + kernel_preds = kernel_preds.view( + kernel_preds.size(0), -1, self.dynamic_conv_size, + self.dynamic_conv_size) + mask_preds = F.conv2d( + mask_feats, kernel_preds, stride=1).squeeze(0).sigmoid() + # mask. + masks = mask_preds > cfg.mask_thr + sum_masks = masks.sum((1, 2)).float() + keep = sum_masks > strides + if keep.sum() == 0: + return empty_results(results, cls_scores) + masks = masks[keep] + mask_preds = mask_preds[keep] + sum_masks = sum_masks[keep] + cls_scores = cls_scores[keep] + cls_labels = cls_labels[keep] + + # maskness. + mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks + cls_scores *= mask_scores + + scores, labels, _, keep_inds = mask_matrix_nms( + masks, + cls_labels, + cls_scores, + mask_area=sum_masks, + nms_pre=cfg.nms_pre, + max_num=cfg.max_per_img, + kernel=cfg.kernel, + sigma=cfg.sigma, + filter_thr=cfg.filter_thr) + mask_preds = mask_preds[keep_inds] + mask_preds = F.interpolate( + mask_preds.unsqueeze(0), + size=upsampled_size, + mode='bilinear', + align_corners=False)[:, :, :h, :w] + mask_preds = F.interpolate( + mask_preds, + size=ori_shape[:2], + mode='bilinear', + align_corners=False).squeeze(0) + masks = mask_preds > cfg.mask_thr + + results.masks = masks + results.labels = labels + results.scores = scores + + return results diff --git a/downstream/mmdetection/mmdet/models/dense_heads/ssd_head.py b/downstream/mmdetection/mmdet/models/dense_heads/ssd_head.py new file mode 100644 index 0000000..e362fd8 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/ssd_head.py @@ -0,0 +1,357 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmcv.runner import force_fp32 + +from mmdet.core import (build_assigner, build_bbox_coder, + build_prior_generator, build_sampler, multi_apply) +from ..builder import HEADS +from ..losses import smooth_l1_loss +from .anchor_head import AnchorHead + + +# TODO: add loss evaluator for SSD +@HEADS.register_module() +class SSDHead(AnchorHead): + """SSD head used in https://arxiv.org/abs/1512.02325. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + stacked_convs (int): Number of conv layers in cls and reg tower. + Default: 0. + feat_channels (int): Number of hidden channels when stacked_convs + > 0. Default: 256. + use_depthwise (bool): Whether to use DepthwiseSeparableConv. + Default: False. + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: None. + act_cfg (dict): Dictionary to construct and config activation layer. + Default: None. + anchor_generator (dict): Config dict for anchor generator + bbox_coder (dict): Config of bounding box coder. + reg_decoded_bbox (bool): If true, the regression loss would be + applied directly on decoded bounding boxes, converting both + the predicted boxes and regression targets to absolute + coordinates format. Default False. It should be `True` when + using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. + train_cfg (dict): Training config of anchor head. + test_cfg (dict): Testing config of anchor head. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ # noqa: W605 + + def __init__(self, + num_classes=80, + in_channels=(512, 1024, 512, 256, 256, 256), + stacked_convs=0, + feat_channels=256, + use_depthwise=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + anchor_generator=dict( + type='SSDAnchorGenerator', + scale_major=False, + input_size=300, + strides=[8, 16, 32, 64, 100, 300], + ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), + basesize_ratio_range=(0.1, 0.9)), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + clip_border=True, + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + ), + reg_decoded_bbox=False, + train_cfg=None, + test_cfg=None, + init_cfg=dict( + type='Xavier', + layer='Conv2d', + distribution='uniform', + bias=0)): + super(AnchorHead, self).__init__(init_cfg) + self.num_classes = num_classes + self.in_channels = in_channels + self.stacked_convs = stacked_convs + self.feat_channels = feat_channels + self.use_depthwise = use_depthwise + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self.cls_out_channels = num_classes + 1 # add background class + self.prior_generator = build_prior_generator(anchor_generator) + + # Usually the numbers of anchors for each level are the same + # except SSD detectors. So it is an int in the most dense + # heads but a list of int in SSDHead + self.num_base_priors = self.prior_generator.num_base_priors + + self._init_layers() + + self.bbox_coder = build_bbox_coder(bbox_coder) + self.reg_decoded_bbox = reg_decoded_bbox + self.use_sigmoid_cls = False + self.cls_focal_loss = False + self.train_cfg = train_cfg + self.test_cfg = test_cfg + # set sampling=False for archor_target + self.sampling = False + if self.train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + # SSD sampling=False so use PseudoSampler + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + self.fp16_enabled = False + + @property + def num_anchors(self): + """ + Returns: + list[int]: Number of base_anchors on each point of each level. + """ + warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' + 'please use "num_base_priors" instead') + return self.num_base_priors + + def _init_layers(self): + """Initialize layers of the head.""" + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + # TODO: Use registry to choose ConvModule type + conv = DepthwiseSeparableConvModule \ + if self.use_depthwise else ConvModule + + for channel, num_base_priors in zip(self.in_channels, + self.num_base_priors): + cls_layers = [] + reg_layers = [] + in_channel = channel + # build stacked conv tower, not used in default ssd + for i in range(self.stacked_convs): + cls_layers.append( + conv( + in_channel, + self.feat_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + reg_layers.append( + conv( + in_channel, + self.feat_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + in_channel = self.feat_channels + # SSD-Lite head + if self.use_depthwise: + cls_layers.append( + ConvModule( + in_channel, + in_channel, + 3, + padding=1, + groups=in_channel, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + reg_layers.append( + ConvModule( + in_channel, + in_channel, + 3, + padding=1, + groups=in_channel, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + cls_layers.append( + nn.Conv2d( + in_channel, + num_base_priors * self.cls_out_channels, + kernel_size=1 if self.use_depthwise else 3, + padding=0 if self.use_depthwise else 1)) + reg_layers.append( + nn.Conv2d( + in_channel, + num_base_priors * 4, + kernel_size=1 if self.use_depthwise else 3, + padding=0 if self.use_depthwise else 1)) + self.cls_convs.append(nn.Sequential(*cls_layers)) + self.reg_convs.append(nn.Sequential(*reg_layers)) + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: + cls_scores (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_anchors * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for all scale + levels, each is a 4D-tensor, the channels number is + num_anchors * 4. + """ + cls_scores = [] + bbox_preds = [] + for feat, reg_conv, cls_conv in zip(feats, self.reg_convs, + self.cls_convs): + cls_scores.append(cls_conv(feat)) + bbox_preds.append(reg_conv(feat)) + return cls_scores, bbox_preds + + def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights, + bbox_targets, bbox_weights, num_total_samples): + """Compute loss of a single image. + + Args: + cls_score (Tensor): Box scores for eachimage + Has shape (num_total_anchors, num_classes). + bbox_pred (Tensor): Box energies / deltas for each image + level with shape (num_total_anchors, 4). + anchors (Tensor): Box reference for each scale level with shape + (num_total_anchors, 4). + labels (Tensor): Labels of each anchors with shape + (num_total_anchors,). + label_weights (Tensor): Label weights of each anchor with shape + (num_total_anchors,) + bbox_targets (Tensor): BBox regression targets of each anchor + weight shape (num_total_anchors, 4). + bbox_weights (Tensor): BBox regression loss weights of each anchor + with shape (num_total_anchors, 4). + num_total_samples (int): If sampling, num total samples equal to + the number of total anchors; Otherwise, it is the number of + positive anchors. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + + loss_cls_all = F.cross_entropy( + cls_score, labels, reduction='none') * label_weights + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( + as_tuple=False).reshape(-1) + neg_inds = (labels == self.num_classes).nonzero( + as_tuple=False).view(-1) + + num_pos_samples = pos_inds.size(0) + num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples + if num_neg_samples > neg_inds.size(0): + num_neg_samples = neg_inds.size(0) + topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) + loss_cls_pos = loss_cls_all[pos_inds].sum() + loss_cls_neg = topk_loss_cls_neg.sum() + loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples + + if self.reg_decoded_bbox: + # When the regression loss (e.g. `IouLoss`, `GIouLoss`) + # is applied directly on the decoded bounding boxes, it + # decodes the already encoded coordinates to absolute format. + bbox_pred = self.bbox_coder.decode(anchor, bbox_pred) + + loss_bbox = smooth_l1_loss( + bbox_pred, + bbox_targets, + bbox_weights, + beta=self.train_cfg.smoothl1_beta, + avg_factor=num_total_samples) + return loss_cls[None], loss_bbox + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + gt_bboxes (list[Tensor]): each item are the truth boxes for each + image in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + device = cls_scores[0].device + + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=1, + unmap_outputs=True) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + + num_images = len(img_metas) + all_cls_scores = torch.cat([ + s.permute(0, 2, 3, 1).reshape( + num_images, -1, self.cls_out_channels) for s in cls_scores + ], 1) + all_labels = torch.cat(labels_list, -1).view(num_images, -1) + all_label_weights = torch.cat(label_weights_list, + -1).view(num_images, -1) + all_bbox_preds = torch.cat([ + b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) + for b in bbox_preds + ], -2) + all_bbox_targets = torch.cat(bbox_targets_list, + -2).view(num_images, -1, 4) + all_bbox_weights = torch.cat(bbox_weights_list, + -2).view(num_images, -1, 4) + + # concat all level anchors to a single tensor + all_anchors = [] + for i in range(num_images): + all_anchors.append(torch.cat(anchor_list[i])) + + losses_cls, losses_bbox = multi_apply( + self.loss_single, + all_cls_scores, + all_bbox_preds, + all_anchors, + all_labels, + all_label_weights, + all_bbox_targets, + all_bbox_weights, + num_total_samples=num_total_pos) + return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) diff --git a/downstream/mmdetection/mmdet/models/dense_heads/tood_head.py b/downstream/mmdetection/mmdet/models/dense_heads/tood_head.py new file mode 100644 index 0000000..c64ebf7 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/tood_head.py @@ -0,0 +1,778 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init +from mmcv.ops import deform_conv2d +from mmcv.runner import force_fp32 + +from mmdet.core import (anchor_inside_flags, build_assigner, distance2bbox, + images_to_levels, multi_apply, reduce_mean, unmap) +from mmdet.core.utils import filter_scores_and_topk +from mmdet.models.utils import sigmoid_geometric_mean +from ..builder import HEADS, build_loss +from .atss_head import ATSSHead + + +class TaskDecomposition(nn.Module): + """Task decomposition module in task-aligned predictor of TOOD. + + Args: + feat_channels (int): Number of feature channels in TOOD head. + stacked_convs (int): Number of conv layers in TOOD head. + la_down_rate (int): Downsample rate of layer attention. + conv_cfg (dict): Config dict for convolution layer. + norm_cfg (dict): Config dict for normalization layer. + """ + + def __init__(self, + feat_channels, + stacked_convs, + la_down_rate=8, + conv_cfg=None, + norm_cfg=None): + super(TaskDecomposition, self).__init__() + self.feat_channels = feat_channels + self.stacked_convs = stacked_convs + self.in_channels = self.feat_channels * self.stacked_convs + self.norm_cfg = norm_cfg + self.layer_attention = nn.Sequential( + nn.Conv2d(self.in_channels, self.in_channels // la_down_rate, 1), + nn.ReLU(inplace=True), + nn.Conv2d( + self.in_channels // la_down_rate, + self.stacked_convs, + 1, + padding=0), nn.Sigmoid()) + + self.reduction_conv = ConvModule( + self.in_channels, + self.feat_channels, + 1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=norm_cfg is None) + + def init_weights(self): + for m in self.layer_attention.modules(): + if isinstance(m, nn.Conv2d): + normal_init(m, std=0.001) + normal_init(self.reduction_conv.conv, std=0.01) + + def forward(self, feat, avg_feat=None): + b, c, h, w = feat.shape + if avg_feat is None: + avg_feat = F.adaptive_avg_pool2d(feat, (1, 1)) + weight = self.layer_attention(avg_feat) + + # here we first compute the product between layer attention weight and + # conv weight, and then compute the convolution between new conv weight + # and feature map, in order to save memory and FLOPs. + conv_weight = weight.reshape( + b, 1, self.stacked_convs, + 1) * self.reduction_conv.conv.weight.reshape( + 1, self.feat_channels, self.stacked_convs, self.feat_channels) + conv_weight = conv_weight.reshape(b, self.feat_channels, + self.in_channels) + feat = feat.reshape(b, self.in_channels, h * w) + feat = torch.bmm(conv_weight, feat).reshape(b, self.feat_channels, h, + w) + if self.norm_cfg is not None: + feat = self.reduction_conv.norm(feat) + feat = self.reduction_conv.activate(feat) + + return feat + + +@HEADS.register_module() +class TOODHead(ATSSHead): + """TOODHead used in `TOOD: Task-aligned One-stage Object Detection. + + `_. + + TOOD uses Task-aligned head (T-head) and is optimized by Task Alignment + Learning (TAL). + + Args: + num_dcn (int): Number of deformable convolution in the head. + Default: 0. + anchor_type (str): If set to `anchor_free`, the head will use centers + to regress bboxes. If set to `anchor_based`, the head will + regress bboxes based on anchors. Default: `anchor_free`. + initial_loss_cls (dict): Config of initial loss. + + Example: + >>> self = TOODHead(11, 7) + >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] + >>> cls_score, bbox_pred = self.forward(feats) + >>> assert len(cls_score) == len(self.scales) + """ + + def __init__(self, + num_classes, + in_channels, + num_dcn=0, + anchor_type='anchor_free', + initial_loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + activated=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + **kwargs): + assert anchor_type in ['anchor_free', 'anchor_based'] + self.num_dcn = num_dcn + self.anchor_type = anchor_type + self.epoch = 0 # which would be update in SetEpochInfoHook! + super(TOODHead, self).__init__(num_classes, in_channels, **kwargs) + + if self.train_cfg: + self.initial_epoch = self.train_cfg.initial_epoch + self.initial_assigner = build_assigner( + self.train_cfg.initial_assigner) + self.initial_loss_cls = build_loss(initial_loss_cls) + self.assigner = self.initial_assigner + self.alignment_assigner = build_assigner(self.train_cfg.assigner) + self.alpha = self.train_cfg.alpha + self.beta = self.train_cfg.beta + + def _init_layers(self): + """Initialize layers of the head.""" + self.relu = nn.ReLU(inplace=True) + self.inter_convs = nn.ModuleList() + for i in range(self.stacked_convs): + if i < self.num_dcn: + conv_cfg = dict(type='DCNv2', deform_groups=4) + else: + conv_cfg = self.conv_cfg + chn = self.in_channels if i == 0 else self.feat_channels + self.inter_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg)) + + self.cls_decomp = TaskDecomposition(self.feat_channels, + self.stacked_convs, + self.stacked_convs * 8, + self.conv_cfg, self.norm_cfg) + self.reg_decomp = TaskDecomposition(self.feat_channels, + self.stacked_convs, + self.stacked_convs * 8, + self.conv_cfg, self.norm_cfg) + + self.tood_cls = nn.Conv2d( + self.feat_channels, + self.num_base_priors * self.cls_out_channels, + 3, + padding=1) + self.tood_reg = nn.Conv2d( + self.feat_channels, self.num_base_priors * 4, 3, padding=1) + + self.cls_prob_module = nn.Sequential( + nn.Conv2d(self.feat_channels * self.stacked_convs, + self.feat_channels // 4, 1), nn.ReLU(inplace=True), + nn.Conv2d(self.feat_channels // 4, 1, 3, padding=1)) + self.reg_offset_module = nn.Sequential( + nn.Conv2d(self.feat_channels * self.stacked_convs, + self.feat_channels // 4, 1), nn.ReLU(inplace=True), + nn.Conv2d(self.feat_channels // 4, 4 * 2, 3, padding=1)) + + self.scales = nn.ModuleList( + [Scale(1.0) for _ in self.prior_generator.strides]) + + def init_weights(self): + """Initialize weights of the head.""" + bias_cls = bias_init_with_prob(0.01) + for m in self.inter_convs: + normal_init(m.conv, std=0.01) + for m in self.cls_prob_module: + if isinstance(m, nn.Conv2d): + normal_init(m, std=0.01) + for m in self.reg_offset_module: + if isinstance(m, nn.Conv2d): + normal_init(m, std=0.001) + normal_init(self.cls_prob_module[-1], std=0.01, bias=bias_cls) + + self.cls_decomp.init_weights() + self.reg_decomp.init_weights() + + normal_init(self.tood_cls, std=0.01, bias=bias_cls) + normal_init(self.tood_reg, std=0.01) + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: Usually a tuple of classification scores and bbox prediction + cls_scores (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_anchors * num_classes. + bbox_preds (list[Tensor]): Decoded box for all scale levels, + each is a 4D-tensor, the channels number is + num_anchors * 4. In [tl_x, tl_y, br_x, br_y] format. + """ + cls_scores = [] + bbox_preds = [] + for idx, (x, scale, stride) in enumerate( + zip(feats, self.scales, self.prior_generator.strides)): + b, c, h, w = x.shape + anchor = self.prior_generator.single_level_grid_priors( + (h, w), idx, device=x.device) + anchor = torch.cat([anchor for _ in range(b)]) + # extract task interactive features + inter_feats = [] + for inter_conv in self.inter_convs: + x = inter_conv(x) + inter_feats.append(x) + feat = torch.cat(inter_feats, 1) + + # task decomposition + avg_feat = F.adaptive_avg_pool2d(feat, (1, 1)) + cls_feat = self.cls_decomp(feat, avg_feat) + reg_feat = self.reg_decomp(feat, avg_feat) + + # cls prediction and alignment + cls_logits = self.tood_cls(cls_feat) + cls_prob = self.cls_prob_module(feat) + cls_score = sigmoid_geometric_mean(cls_logits, cls_prob) + + # reg prediction and alignment + if self.anchor_type == 'anchor_free': + reg_dist = scale(self.tood_reg(reg_feat).exp()).float() + reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4) + reg_bbox = distance2bbox( + self.anchor_center(anchor) / stride[0], + reg_dist).reshape(b, h, w, 4).permute(0, 3, 1, + 2) # (b, c, h, w) + elif self.anchor_type == 'anchor_based': + reg_dist = scale(self.tood_reg(reg_feat)).float() + reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4) + reg_bbox = self.bbox_coder.decode(anchor, reg_dist).reshape( + b, h, w, 4).permute(0, 3, 1, 2) / stride[0] + else: + raise NotImplementedError( + f'Unknown anchor type: {self.anchor_type}.' + f'Please use `anchor_free` or `anchor_based`.') + reg_offset = self.reg_offset_module(feat) + bbox_pred = self.deform_sampling(reg_bbox.contiguous(), + reg_offset.contiguous()) + + # After deform_sampling, some boxes will become invalid (The + # left-top point is at the right or bottom of the right-bottom + # point), which will make the GIoULoss negative. + invalid_bbox_idx = (bbox_pred[:, [0]] > bbox_pred[:, [2]]) | \ + (bbox_pred[:, [1]] > bbox_pred[:, [3]]) + invalid_bbox_idx = invalid_bbox_idx.expand_as(bbox_pred) + bbox_pred = torch.where(invalid_bbox_idx, reg_bbox, bbox_pred) + + cls_scores.append(cls_score) + bbox_preds.append(bbox_pred) + return tuple(cls_scores), tuple(bbox_preds) + + def deform_sampling(self, feat, offset): + """Sampling the feature x according to offset. + + Args: + feat (Tensor): Feature + offset (Tensor): Spatial offset for feature sampling + """ + # it is an equivalent implementation of bilinear interpolation + b, c, h, w = feat.shape + weight = feat.new_ones(c, 1, 1, 1) + y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c) + return y + + def anchor_center(self, anchors): + """Get anchor centers from anchors. + + Args: + anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format. + + Returns: + Tensor: Anchor centers with shape (N, 2), "xy" format. + """ + anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 + anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 + return torch.stack([anchors_cx, anchors_cy], dim=-1) + + def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, + bbox_targets, alignment_metrics, stride): + """Compute loss of a single scale level. + + Args: + anchors (Tensor): Box reference for each scale level with shape + (N, num_total_anchors, 4). + cls_score (Tensor): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W). + bbox_pred (Tensor): Decoded bboxes for each scale + level with shape (N, num_anchors * 4, H, W). + labels (Tensor): Labels of each anchors with shape + (N, num_total_anchors). + label_weights (Tensor): Label weights of each anchor with shape + (N, num_total_anchors). + bbox_targets (Tensor): BBox regression targets of each anchor with + shape (N, num_total_anchors, 4). + alignment_metrics (Tensor): Alignment metrics with shape + (N, num_total_anchors). + stride (tuple[int]): Downsample stride of the feature map. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert stride[0] == stride[1], 'h stride is not equal to w stride!' + anchors = anchors.reshape(-1, 4) + cls_score = cls_score.permute(0, 2, 3, 1).reshape( + -1, self.cls_out_channels).contiguous() + bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) + bbox_targets = bbox_targets.reshape(-1, 4) + labels = labels.reshape(-1) + alignment_metrics = alignment_metrics.reshape(-1) + label_weights = label_weights.reshape(-1) + targets = labels if self.epoch < self.initial_epoch else ( + labels, alignment_metrics) + cls_loss_func = self.initial_loss_cls \ + if self.epoch < self.initial_epoch else self.loss_cls + + loss_cls = cls_loss_func( + cls_score, targets, label_weights, avg_factor=1.0) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = self.num_classes + pos_inds = ((labels >= 0) + & (labels < bg_class_ind)).nonzero().squeeze(1) + + if len(pos_inds) > 0: + pos_bbox_targets = bbox_targets[pos_inds] + pos_bbox_pred = bbox_pred[pos_inds] + pos_anchors = anchors[pos_inds] + + pos_decode_bbox_pred = pos_bbox_pred + pos_decode_bbox_targets = pos_bbox_targets / stride[0] + + # regression loss + pos_bbox_weight = self.centerness_target( + pos_anchors, pos_bbox_targets + ) if self.epoch < self.initial_epoch else alignment_metrics[ + pos_inds] + + loss_bbox = self.loss_bbox( + pos_decode_bbox_pred, + pos_decode_bbox_targets, + weight=pos_bbox_weight, + avg_factor=1.0) + else: + loss_bbox = bbox_pred.sum() * 0 + pos_bbox_weight = bbox_targets.new_tensor(0.) + + return loss_cls, loss_bbox, alignment_metrics.sum( + ), pos_bbox_weight.sum() + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Decoded box for each scale + level with shape (N, num_anchors * 4, H, W) in + [tl_x, tl_y, br_x, br_y] format. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (list[Tensor] | None): specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + num_imgs = len(img_metas) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + device = cls_scores[0].device + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + + flatten_cls_scores = torch.cat([ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.cls_out_channels) + for cls_score in cls_scores + ], 1) + flatten_bbox_preds = torch.cat([ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) * stride[0] + for bbox_pred, stride in zip(bbox_preds, + self.prior_generator.strides) + ], 1) + + cls_reg_targets = self.get_targets( + flatten_cls_scores, + flatten_bbox_preds, + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels) + (anchor_list, labels_list, label_weights_list, bbox_targets_list, + alignment_metrics_list) = cls_reg_targets + + losses_cls, losses_bbox,\ + cls_avg_factors, bbox_avg_factors = multi_apply( + self.loss_single, + anchor_list, + cls_scores, + bbox_preds, + labels_list, + label_weights_list, + bbox_targets_list, + alignment_metrics_list, + self.prior_generator.strides) + + cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item() + losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls)) + + bbox_avg_factor = reduce_mean( + sum(bbox_avg_factors)).clamp_(min=1).item() + losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) + return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) + + def _get_bboxes_single(self, + cls_score_list, + bbox_pred_list, + score_factor_list, + mlvl_priors, + img_meta, + cfg, + rescale=False, + with_nms=True, + **kwargs): + """Transform outputs of a single image into bbox predictions. + + Args: + cls_score_list (list[Tensor]): Box scores from all scale + levels of a single image, each item has shape + (num_priors * num_classes, H, W). + bbox_pred_list (list[Tensor]): Box energies / deltas from + all scale levels of a single image, each item has shape + (num_priors * 4, H, W). + score_factor_list (list[Tensor]): Score factor from all scale + levels of a single image, each item has shape + (num_priors * 1, H, W). + mlvl_priors (list[Tensor]): Each element in the list is + the priors of a single level in feature pyramid. In all + anchor-based methods, it has shape (num_priors, 4). In + all anchor-free methods, it has shape (num_priors, 2) + when `with_stride=True`, otherwise it still has shape + (num_priors, 4). + img_meta (dict): Image meta info. + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + + Returns: + tuple[Tensor]: Results of detected bboxes and labels. If with_nms + is False and mlvl_score_factor is None, return mlvl_bboxes and + mlvl_scores, else return mlvl_bboxes, mlvl_scores and + mlvl_score_factor. Usually with_nms is False is used for aug + test. If with_nms is True, then return the following format + + - det_bboxes (Tensor): Predicted bboxes with shape \ + [num_bboxes, 5], where the first 4 columns are bounding \ + box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ + column are scores between 0 and 1. + - det_labels (Tensor): Predicted labels of the corresponding \ + box with shape [num_bboxes]. + """ + + cfg = self.test_cfg if cfg is None else cfg + nms_pre = cfg.get('nms_pre', -1) + + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_labels = [] + for cls_score, bbox_pred, priors, stride in zip( + cls_score_list, bbox_pred_list, mlvl_priors, + self.prior_generator.strides): + + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) * stride[0] + scores = cls_score.permute(1, 2, + 0).reshape(-1, self.cls_out_channels) + + # After https://github.com/open-mmlab/mmdetection/pull/6268/, + # this operation keeps fewer bboxes under the same `nms_pre`. + # There is no difference in performance for most models. If you + # find a slight drop in performance, you can set a larger + # `nms_pre` than before. + results = filter_scores_and_topk( + scores, cfg.score_thr, nms_pre, + dict(bbox_pred=bbox_pred, priors=priors)) + scores, labels, keep_idxs, filtered_results = results + + bboxes = filtered_results['bbox_pred'] + + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_labels.append(labels) + + return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, + img_meta['scale_factor'], cfg, rescale, + with_nms, None, **kwargs) + + def get_targets(self, + cls_scores, + bbox_preds, + anchor_list, + valid_flag_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + unmap_outputs=True): + """Compute regression and classification targets for anchors in + multiple images. + + Args: + cls_scores (Tensor): Classification predictions of images, + a 3D-Tensor with shape [num_imgs, num_priors, num_classes]. + bbox_preds (Tensor): Decoded bboxes predictions of one image, + a 3D-Tensor with shape [num_imgs, num_priors, 4] in [tl_x, + tl_y, br_x, br_y] format. + anchor_list (list[list[Tensor]]): Multi level anchors of each + image. The outer list indicates images, and the inner list + corresponds to feature levels of the image. Each element of + the inner list is a tensor of shape (num_anchors, 4). + valid_flag_list (list[list[Tensor]]): Multi level valid flags of + each image. The outer list indicates images, and the inner list + corresponds to feature levels of the image. Each element of + the inner list is a tensor of shape (num_anchors, ) + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. + img_metas (list[dict]): Meta info of each image. + gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be + ignored. + gt_labels_list (list[Tensor]): Ground truth labels of each box. + label_channels (int): Channel of label. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. + + Returns: + tuple: a tuple containing learning targets. + + - anchors_list (list[list[Tensor]]): Anchors of each level. + - labels_list (list[Tensor]): Labels of each level. + - label_weights_list (list[Tensor]): Label weights of each + level. + - bbox_targets_list (list[Tensor]): BBox targets of each level. + - norm_alignment_metrics_list (list[Tensor]): Normalized + alignment metrics of each level. + """ + num_imgs = len(img_metas) + assert len(anchor_list) == len(valid_flag_list) == num_imgs + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + num_level_anchors_list = [num_level_anchors] * num_imgs + + # concat all level anchors and flags to a single tensor + for i in range(num_imgs): + assert len(anchor_list[i]) == len(valid_flag_list[i]) + anchor_list[i] = torch.cat(anchor_list[i]) + valid_flag_list[i] = torch.cat(valid_flag_list[i]) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + if gt_labels_list is None: + gt_labels_list = [None for _ in range(num_imgs)] + # anchor_list: list(b * [-1, 4]) + + if self.epoch < self.initial_epoch: + (all_anchors, all_labels, all_label_weights, all_bbox_targets, + all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( + super()._get_target_single, + anchor_list, + valid_flag_list, + num_level_anchors_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + img_metas, + label_channels=label_channels, + unmap_outputs=unmap_outputs) + all_assign_metrics = [ + weight[..., 0] for weight in all_bbox_weights + ] + else: + (all_anchors, all_labels, all_label_weights, all_bbox_targets, + all_assign_metrics) = multi_apply( + self._get_target_single, + cls_scores, + bbox_preds, + anchor_list, + valid_flag_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + img_metas, + label_channels=label_channels, + unmap_outputs=unmap_outputs) + # no valid anchors + if any([labels is None for labels in all_labels]): + return None + + # split targets to a list w.r.t. multiple levels + anchors_list = images_to_levels(all_anchors, num_level_anchors) + labels_list = images_to_levels(all_labels, num_level_anchors) + label_weights_list = images_to_levels(all_label_weights, + num_level_anchors) + bbox_targets_list = images_to_levels(all_bbox_targets, + num_level_anchors) + norm_alignment_metrics_list = images_to_levels(all_assign_metrics, + num_level_anchors) + + return (anchors_list, labels_list, label_weights_list, + bbox_targets_list, norm_alignment_metrics_list) + + def _get_target_single(self, + cls_scores, + bbox_preds, + flat_anchors, + valid_flags, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + label_channels=1, + unmap_outputs=True): + """Compute regression, classification targets for anchors in a single + image. + + Args: + cls_scores (list(Tensor)): Box scores for each image. + bbox_preds (list(Tensor)): Box energies / deltas for each image. + flat_anchors (Tensor): Multi-level anchors of the image, which are + concatenated into a single tensor of shape (num_anchors ,4) + valid_flags (Tensor): Multi level valid flags of the image, + which are concatenated into a single tensor of + shape (num_anchors,). + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + img_meta (dict): Meta info of the image. + label_channels (int): Channel of label. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. + + Returns: + tuple: N is the number of total anchors in the image. + anchors (Tensor): All anchors in the image with shape (N, 4). + labels (Tensor): Labels of all anchors in the image with shape + (N,). + label_weights (Tensor): Label weights of all anchor in the + image with shape (N,). + bbox_targets (Tensor): BBox targets of all anchors in the + image with shape (N, 4). + norm_alignment_metrics (Tensor): Normalized alignment metrics + of all priors in the image with shape (N,). + """ + inside_flags = anchor_inside_flags(flat_anchors, valid_flags, + img_meta['img_shape'][:2], + self.train_cfg.allowed_border) + if not inside_flags.any(): + return (None, ) * 7 + # assign gt and sample anchors + anchors = flat_anchors[inside_flags, :] + assign_result = self.alignment_assigner.assign( + cls_scores[inside_flags, :], bbox_preds[inside_flags, :], anchors, + gt_bboxes, gt_bboxes_ignore, gt_labels, self.alpha, self.beta) + assign_ious = assign_result.max_overlaps + assign_metrics = assign_result.assign_metrics + + sampling_result = self.sampler.sample(assign_result, anchors, + gt_bboxes) + + num_valid_anchors = anchors.shape[0] + bbox_targets = torch.zeros_like(anchors) + labels = anchors.new_full((num_valid_anchors, ), + self.num_classes, + dtype=torch.long) + label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) + norm_alignment_metrics = anchors.new_zeros( + num_valid_anchors, dtype=torch.float) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + # point-based + pos_bbox_targets = sampling_result.pos_gt_bboxes + bbox_targets[pos_inds, :] = pos_bbox_targets + + if gt_labels is None: + # Only rpn gives gt_labels as None + # Foreground is the first class since v2.5.0 + labels[pos_inds] = 0 + else: + labels[pos_inds] = gt_labels[ + sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + class_assigned_gt_inds = torch.unique( + sampling_result.pos_assigned_gt_inds) + for gt_inds in class_assigned_gt_inds: + gt_class_inds = pos_inds[sampling_result.pos_assigned_gt_inds == + gt_inds] + pos_alignment_metrics = assign_metrics[gt_class_inds] + pos_ious = assign_ious[gt_class_inds] + pos_norm_alignment_metrics = pos_alignment_metrics / ( + pos_alignment_metrics.max() + 10e-8) * pos_ious.max() + norm_alignment_metrics[gt_class_inds] = pos_norm_alignment_metrics + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_anchors.size(0) + anchors = unmap(anchors, num_total_anchors, inside_flags) + labels = unmap( + labels, num_total_anchors, inside_flags, fill=self.num_classes) + label_weights = unmap(label_weights, num_total_anchors, + inside_flags) + bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) + norm_alignment_metrics = unmap(norm_alignment_metrics, + num_total_anchors, inside_flags) + return (anchors, labels, label_weights, bbox_targets, + norm_alignment_metrics) diff --git a/downstream/mmdetection/mmdet/models/dense_heads/vfnet_head.py b/downstream/mmdetection/mmdet/models/dense_heads/vfnet_head.py new file mode 100644 index 0000000..ba285e2 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/vfnet_head.py @@ -0,0 +1,740 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, Scale +from mmcv.ops import DeformConv2d +from mmcv.runner import force_fp32 + +from mmdet.core import (MlvlPointGenerator, bbox_overlaps, build_assigner, + build_prior_generator, build_sampler, multi_apply, + reduce_mean) +from ..builder import HEADS, build_loss +from .atss_head import ATSSHead +from .fcos_head import FCOSHead + +INF = 1e8 + + +@HEADS.register_module() +class VFNetHead(ATSSHead, FCOSHead): + """Head of `VarifocalNet (VFNet): An IoU-aware Dense Object + Detector.`_. + + The VFNet predicts IoU-aware classification scores which mix the + object presence confidence and object localization accuracy as the + detection score. It is built on the FCOS architecture and uses ATSS + for defining positive/negative training examples. The VFNet is trained + with Varifocal Loss and empolys star-shaped deformable convolution to + extract features for a bbox. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + regress_ranges (tuple[tuple[int, int]]): Regress range of multiple + level points. + center_sampling (bool): If true, use center sampling. Default: False. + center_sample_radius (float): Radius of center sampling. Default: 1.5. + sync_num_pos (bool): If true, synchronize the number of positive + examples across GPUs. Default: True + gradient_mul (float): The multiplier to gradients from bbox refinement + and recognition. Default: 0.1. + bbox_norm_type (str): The bbox normalization type, 'reg_denom' or + 'stride'. Default: reg_denom + loss_cls_fl (dict): Config of focal loss. + use_vfl (bool): If true, use varifocal loss for training. + Default: True. + loss_cls (dict): Config of varifocal loss. + loss_bbox (dict): Config of localization loss, GIoU Loss. + loss_bbox (dict): Config of localization refinement loss, GIoU Loss. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: norm_cfg=dict(type='GN', num_groups=32, + requires_grad=True). + use_atss (bool): If true, use ATSS to define positive/negative + examples. Default: True. + anchor_generator (dict): Config of anchor generator for ATSS. + init_cfg (dict or list[dict], optional): Initialization config dict. + + Example: + >>> self = VFNetHead(11, 7) + >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] + >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats) + >>> assert len(cls_score) == len(self.scales) + """ # noqa: E501 + + def __init__(self, + num_classes, + in_channels, + regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), + (512, INF)), + center_sampling=False, + center_sample_radius=1.5, + sync_num_pos=True, + gradient_mul=0.1, + bbox_norm_type='reg_denom', + loss_cls_fl=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + use_vfl=True, + loss_cls=dict( + type='VarifocalLoss', + use_sigmoid=True, + alpha=0.75, + gamma=2.0, + iou_weighted=True, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.5), + loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + use_atss=True, + reg_decoded_bbox=True, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + center_offset=0.0, + strides=[8, 16, 32, 64, 128]), + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', + name='vfnet_cls', + std=0.01, + bias_prob=0.01)), + **kwargs): + # dcn base offsets, adapted from reppoints_head.py + self.num_dconv_points = 9 + self.dcn_kernel = int(np.sqrt(self.num_dconv_points)) + self.dcn_pad = int((self.dcn_kernel - 1) / 2) + dcn_base = np.arange(-self.dcn_pad, + self.dcn_pad + 1).astype(np.float64) + dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) + dcn_base_x = np.tile(dcn_base, self.dcn_kernel) + dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( + (-1)) + self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) + + super(FCOSHead, self).__init__( + num_classes, + in_channels, + norm_cfg=norm_cfg, + init_cfg=init_cfg, + **kwargs) + self.regress_ranges = regress_ranges + self.reg_denoms = [ + regress_range[-1] for regress_range in regress_ranges + ] + self.reg_denoms[-1] = self.reg_denoms[-2] * 2 + self.center_sampling = center_sampling + self.center_sample_radius = center_sample_radius + self.sync_num_pos = sync_num_pos + self.bbox_norm_type = bbox_norm_type + self.gradient_mul = gradient_mul + self.use_vfl = use_vfl + if self.use_vfl: + self.loss_cls = build_loss(loss_cls) + else: + self.loss_cls = build_loss(loss_cls_fl) + self.loss_bbox = build_loss(loss_bbox) + self.loss_bbox_refine = build_loss(loss_bbox_refine) + + # for getting ATSS targets + self.use_atss = use_atss + self.reg_decoded_bbox = reg_decoded_bbox + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + + self.anchor_center_offset = anchor_generator['center_offset'] + + self.num_base_priors = self.prior_generator.num_base_priors[0] + + self.sampling = False + if self.train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + # only be used in `get_atss_targets` when `use_atss` is True + self.atss_prior_generator = build_prior_generator(anchor_generator) + + self.fcos_prior_generator = MlvlPointGenerator( + anchor_generator['strides'], + self.anchor_center_offset if self.use_atss else 0.5) + + # In order to reuse the `get_bboxes` in `BaseDenseHead. + # Only be used in testing phase. + self.prior_generator = self.fcos_prior_generator + + @property + def num_anchors(self): + """ + Returns: + int: Number of anchors on each point of feature map. + """ + warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' + 'please use "num_base_priors" instead') + return self.num_base_priors + + @property + def anchor_generator(self): + warnings.warn('DeprecationWarning: anchor_generator is deprecated, ' + 'please use "atss_prior_generator" instead') + return self.prior_generator + + def _init_layers(self): + """Initialize layers of the head.""" + super(FCOSHead, self)._init_cls_convs() + super(FCOSHead, self)._init_reg_convs() + self.relu = nn.ReLU(inplace=True) + self.vfnet_reg_conv = ConvModule( + self.feat_channels, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.conv_bias) + self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) + self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) + + self.vfnet_reg_refine_dconv = DeformConv2d( + self.feat_channels, + self.feat_channels, + self.dcn_kernel, + 1, + padding=self.dcn_pad) + self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1) + self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides]) + + self.vfnet_cls_dconv = DeformConv2d( + self.feat_channels, + self.feat_channels, + self.dcn_kernel, + 1, + padding=self.dcn_pad) + self.vfnet_cls = nn.Conv2d( + self.feat_channels, self.cls_out_channels, 3, padding=1) + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: + cls_scores (list[Tensor]): Box iou-aware scores for each scale + level, each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box offsets for each + scale level, each is a 4D-tensor, the channel number is + num_points * 4. + bbox_preds_refine (list[Tensor]): Refined Box offsets for + each scale level, each is a 4D-tensor, the channel + number is num_points * 4. + """ + return multi_apply(self.forward_single, feats, self.scales, + self.scales_refine, self.strides, self.reg_denoms) + + def forward_single(self, x, scale, scale_refine, stride, reg_denom): + """Forward features of a single scale level. + + Args: + x (Tensor): FPN feature maps of the specified stride. + scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize + the bbox prediction. + scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to + resize the refined bbox prediction. + stride (int): The corresponding stride for feature maps, + used to normalize the bbox prediction when + bbox_norm_type = 'stride'. + reg_denom (int): The corresponding regression range for feature + maps, only used to normalize the bbox prediction when + bbox_norm_type = 'reg_denom'. + + Returns: + tuple: iou-aware cls scores for each box, bbox predictions and + refined bbox predictions of input feature maps. + """ + cls_feat = x + reg_feat = x + + for cls_layer in self.cls_convs: + cls_feat = cls_layer(cls_feat) + + for reg_layer in self.reg_convs: + reg_feat = reg_layer(reg_feat) + + # predict the bbox_pred of different level + reg_feat_init = self.vfnet_reg_conv(reg_feat) + if self.bbox_norm_type == 'reg_denom': + bbox_pred = scale( + self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom + elif self.bbox_norm_type == 'stride': + bbox_pred = scale( + self.vfnet_reg(reg_feat_init)).float().exp() * stride + else: + raise NotImplementedError + + # compute star deformable convolution offsets + # converting dcn_offset to reg_feat.dtype thus VFNet can be + # trained with FP16 + dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul, + stride).to(reg_feat.dtype) + + # refine the bbox_pred + reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset)) + bbox_pred_refine = scale_refine( + self.vfnet_reg_refine(reg_feat)).float().exp() + bbox_pred_refine = bbox_pred_refine * bbox_pred.detach() + + # predict the iou-aware cls score + cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset)) + cls_score = self.vfnet_cls(cls_feat) + + if self.training: + return cls_score, bbox_pred, bbox_pred_refine + else: + return cls_score, bbox_pred_refine + + def star_dcn_offset(self, bbox_pred, gradient_mul, stride): + """Compute the star deformable conv offsets. + + Args: + bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b). + gradient_mul (float): Gradient multiplier. + stride (int): The corresponding stride for feature maps, + used to project the bbox onto the feature map. + + Returns: + dcn_offsets (Tensor): The offsets for deformable convolution. + """ + dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred) + bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \ + gradient_mul * bbox_pred + # map to the feature map scale + bbox_pred_grad_mul = bbox_pred_grad_mul / stride + N, C, H, W = bbox_pred.size() + + x1 = bbox_pred_grad_mul[:, 0, :, :] + y1 = bbox_pred_grad_mul[:, 1, :, :] + x2 = bbox_pred_grad_mul[:, 2, :, :] + y2 = bbox_pred_grad_mul[:, 3, :, :] + bbox_pred_grad_mul_offset = bbox_pred.new_zeros( + N, 2 * self.num_dconv_points, H, W) + bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1 + bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1 + bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1 + bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1 + bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2 + bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1 + bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2 + bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2 + bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1 + bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2 + bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2 + bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2 + dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset + + return dcn_offset + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine')) + def loss(self, + cls_scores, + bbox_preds, + bbox_preds_refine, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute loss of the head. + + Args: + cls_scores (list[Tensor]): Box iou-aware scores for each scale + level, each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box offsets for each + scale level, each is a 4D-tensor, the channel number is + num_points * 4. + bbox_preds_refine (list[Tensor]): Refined Box offsets for + each scale level, each is a 4D-tensor, the channel + number is num_points * 4. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + Default: None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + all_level_points = self.fcos_prior_generator.grid_priors( + featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) + labels, label_weights, bbox_targets, bbox_weights = self.get_targets( + cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas, + gt_bboxes_ignore) + + num_imgs = cls_scores[0].size(0) + # flatten cls_scores, bbox_preds and bbox_preds_refine + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, + 1).reshape(-1, + self.cls_out_channels).contiguous() + for cls_score in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() + for bbox_pred in bbox_preds + ] + flatten_bbox_preds_refine = [ + bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() + for bbox_pred_refine in bbox_preds_refine + ] + flatten_cls_scores = torch.cat(flatten_cls_scores) + flatten_bbox_preds = torch.cat(flatten_bbox_preds) + flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine) + flatten_labels = torch.cat(labels) + flatten_bbox_targets = torch.cat(bbox_targets) + # repeat points to align with bbox_preds + flatten_points = torch.cat( + [points.repeat(num_imgs, 1) for points in all_level_points]) + + # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes + bg_class_ind = self.num_classes + pos_inds = torch.where( + ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0] + num_pos = len(pos_inds) + + pos_bbox_preds = flatten_bbox_preds[pos_inds] + pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds] + pos_labels = flatten_labels[pos_inds] + + # sync num_pos across all gpus + if self.sync_num_pos: + num_pos_avg_per_gpu = reduce_mean( + pos_inds.new_tensor(num_pos).float()).item() + num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0) + else: + num_pos_avg_per_gpu = num_pos + + pos_bbox_targets = flatten_bbox_targets[pos_inds] + pos_points = flatten_points[pos_inds] + + pos_decoded_bbox_preds = self.bbox_coder.decode( + pos_points, pos_bbox_preds) + pos_decoded_target_preds = self.bbox_coder.decode( + pos_points, pos_bbox_targets) + iou_targets_ini = bbox_overlaps( + pos_decoded_bbox_preds, + pos_decoded_target_preds.detach(), + is_aligned=True).clamp(min=1e-6) + bbox_weights_ini = iou_targets_ini.clone().detach() + bbox_avg_factor_ini = reduce_mean( + bbox_weights_ini.sum()).clamp_(min=1).item() + + pos_decoded_bbox_preds_refine = \ + self.bbox_coder.decode(pos_points, pos_bbox_preds_refine) + iou_targets_rf = bbox_overlaps( + pos_decoded_bbox_preds_refine, + pos_decoded_target_preds.detach(), + is_aligned=True).clamp(min=1e-6) + bbox_weights_rf = iou_targets_rf.clone().detach() + bbox_avg_factor_rf = reduce_mean( + bbox_weights_rf.sum()).clamp_(min=1).item() + + if num_pos > 0: + loss_bbox = self.loss_bbox( + pos_decoded_bbox_preds, + pos_decoded_target_preds.detach(), + weight=bbox_weights_ini, + avg_factor=bbox_avg_factor_ini) + + loss_bbox_refine = self.loss_bbox_refine( + pos_decoded_bbox_preds_refine, + pos_decoded_target_preds.detach(), + weight=bbox_weights_rf, + avg_factor=bbox_avg_factor_rf) + + # build IoU-aware cls_score targets + if self.use_vfl: + pos_ious = iou_targets_rf.clone().detach() + cls_iou_targets = torch.zeros_like(flatten_cls_scores) + cls_iou_targets[pos_inds, pos_labels] = pos_ious + else: + loss_bbox = pos_bbox_preds.sum() * 0 + loss_bbox_refine = pos_bbox_preds_refine.sum() * 0 + if self.use_vfl: + cls_iou_targets = torch.zeros_like(flatten_cls_scores) + + if self.use_vfl: + loss_cls = self.loss_cls( + flatten_cls_scores, + cls_iou_targets, + avg_factor=num_pos_avg_per_gpu) + else: + loss_cls = self.loss_cls( + flatten_cls_scores, + flatten_labels, + weight=label_weights, + avg_factor=num_pos_avg_per_gpu) + + return dict( + loss_cls=loss_cls, + loss_bbox=loss_bbox, + loss_bbox_rf=loss_bbox_refine) + + def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels, + img_metas, gt_bboxes_ignore): + """A wrapper for computing ATSS and FCOS targets for points in multiple + images. + + Args: + cls_scores (list[Tensor]): Box iou-aware scores for each scale + level with shape (N, num_points * num_classes, H, W). + mlvl_points (list[Tensor]): Points of each fpn level, each has + shape (num_points, 2). + gt_bboxes (list[Tensor]): Ground truth bboxes of each image, + each has shape (num_gt, 4). + gt_labels (list[Tensor]): Ground truth labels of each box, + each has shape (num_gt,). + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + + Returns: + tuple: + labels_list (list[Tensor]): Labels of each level. + label_weights (Tensor/None): Label weights of all levels. + bbox_targets_list (list[Tensor]): Regression targets of each + level, (l, t, r, b). + bbox_weights (Tensor/None): Bbox weights of all levels. + """ + if self.use_atss: + return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes, + gt_labels, img_metas, + gt_bboxes_ignore) + else: + self.norm_on_bbox = False + return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels) + + def _get_target_single(self, *args, **kwargs): + """Avoid ambiguity in multiple inheritance.""" + if self.use_atss: + return ATSSHead._get_target_single(self, *args, **kwargs) + else: + return FCOSHead._get_target_single(self, *args, **kwargs) + + def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list): + """Compute FCOS regression and classification targets for points in + multiple images. + + Args: + points (list[Tensor]): Points of each fpn level, each has shape + (num_points, 2). + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, + each has shape (num_gt, 4). + gt_labels_list (list[Tensor]): Ground truth labels of each box, + each has shape (num_gt,). + + Returns: + tuple: + labels (list[Tensor]): Labels of each level. + label_weights: None, to be compatible with ATSS targets. + bbox_targets (list[Tensor]): BBox targets of each level. + bbox_weights: None, to be compatible with ATSS targets. + """ + labels, bbox_targets = FCOSHead.get_targets(self, points, + gt_bboxes_list, + gt_labels_list) + label_weights = None + bbox_weights = None + return labels, label_weights, bbox_targets, bbox_weights + + def get_anchors(self, featmap_sizes, img_metas, device='cuda'): + """Get anchors according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + img_metas (list[dict]): Image meta info. + device (torch.device | str): Device for returned tensors + + Returns: + tuple: + anchor_list (list[Tensor]): Anchors of each image. + valid_flag_list (list[Tensor]): Valid flags of each image. + """ + num_imgs = len(img_metas) + + # since feature map sizes of all images are the same, we only compute + # anchors for one time + multi_level_anchors = self.atss_prior_generator.grid_priors( + featmap_sizes, device=device) + anchor_list = [multi_level_anchors for _ in range(num_imgs)] + + # for each image, we compute valid flags of multi level anchors + valid_flag_list = [] + for img_id, img_meta in enumerate(img_metas): + multi_level_flags = self.atss_prior_generator.valid_flags( + featmap_sizes, img_meta['pad_shape'], device=device) + valid_flag_list.append(multi_level_flags) + + return anchor_list, valid_flag_list + + def get_atss_targets(self, + cls_scores, + mlvl_points, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """A wrapper for computing ATSS targets for points in multiple images. + + Args: + cls_scores (list[Tensor]): Box iou-aware scores for each scale + level with shape (N, num_points * num_classes, H, W). + mlvl_points (list[Tensor]): Points of each fpn level, each has + shape (num_points, 2). + gt_bboxes (list[Tensor]): Ground truth bboxes of each image, + each has shape (num_gt, 4). + gt_labels (list[Tensor]): Ground truth labels of each box, + each has shape (num_gt,). + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). Default: None. + + Returns: + tuple: + labels_list (list[Tensor]): Labels of each level. + label_weights (Tensor): Label weights of all levels. + bbox_targets_list (list[Tensor]): Regression targets of each + level, (l, t, r, b). + bbox_weights (Tensor): Bbox weights of all levels. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len( + featmap_sizes + ) == self.atss_prior_generator.num_levels == \ + self.fcos_prior_generator.num_levels + + device = cls_scores[0].device + + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + + cls_reg_targets = ATSSHead.get_targets( + self, + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels, + unmap_outputs=True) + if cls_reg_targets is None: + return None + + (anchor_list, labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets + + bbox_targets_list = [ + bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list + ] + + num_imgs = len(img_metas) + # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format + bbox_targets_list = self.transform_bbox_targets( + bbox_targets_list, mlvl_points, num_imgs) + + labels_list = [labels.reshape(-1) for labels in labels_list] + label_weights_list = [ + label_weights.reshape(-1) for label_weights in label_weights_list + ] + bbox_weights_list = [ + bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list + ] + label_weights = torch.cat(label_weights_list) + bbox_weights = torch.cat(bbox_weights_list) + return labels_list, label_weights, bbox_targets_list, bbox_weights + + def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs): + """Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format. + + Args: + decoded_bboxes (list[Tensor]): Regression targets of each level, + in the form of (x1, y1, x2, y2). + mlvl_points (list[Tensor]): Points of each fpn level, each has + shape (num_points, 2). + num_imgs (int): the number of images in a batch. + + Returns: + bbox_targets (list[Tensor]): Regression targets of each level in + the form of (l, t, r, b). + """ + # TODO: Re-implemented in Class PointCoder + assert len(decoded_bboxes) == len(mlvl_points) + num_levels = len(decoded_bboxes) + mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points] + bbox_targets = [] + for i in range(num_levels): + bbox_target = self.bbox_coder.encode(mlvl_points[i], + decoded_bboxes[i]) + bbox_targets.append(bbox_target) + + return bbox_targets + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + """Override the method in the parent class to avoid changing para's + name.""" + pass + + def _get_points_single(self, + featmap_size, + stride, + dtype, + device, + flatten=False): + """Get points according to feature map size. + + This function will be deprecated soon. + """ + + warnings.warn( + '`_get_points_single` in `VFNetHead` will be ' + 'deprecated soon, we support a multi level point generator now' + 'you can get points of a single level feature map' + 'with `self.fcos_prior_generator.single_level_grid_priors` ') + + h, w = featmap_size + x_range = torch.arange( + 0, w * stride, stride, dtype=dtype, device=device) + y_range = torch.arange( + 0, h * stride, stride, dtype=dtype, device=device) + y, x = torch.meshgrid(y_range, x_range) + # to be compatible with anchor points in ATSS + if self.use_atss: + points = torch.stack( + (x.reshape(-1), y.reshape(-1)), dim=-1) + \ + stride * self.anchor_center_offset + else: + points = torch.stack( + (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2 + return points diff --git a/downstream/mmdetection/mmdet/models/dense_heads/yolact_head.py b/downstream/mmdetection/mmdet/models/dense_heads/yolact_head.py new file mode 100644 index 0000000..8f89a27 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/yolact_head.py @@ -0,0 +1,1018 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule, ModuleList, force_fp32 + +from mmdet.core import build_sampler, fast_nms, images_to_levels, multi_apply +from mmdet.core.utils import select_single_mlvl +from ..builder import HEADS, build_loss +from .anchor_head import AnchorHead + + +@HEADS.register_module() +class YOLACTHead(AnchorHead): + """YOLACT box head used in https://arxiv.org/abs/1904.02689. + + Note that YOLACT head is a light version of RetinaNet head. + Four differences are described as follows: + + 1. YOLACT box head has three-times fewer anchors. + 2. YOLACT box head shares the convs for box and cls branches. + 3. YOLACT box head uses OHEM instead of Focal loss. + 4. YOLACT box head predicts a set of mask coefficients for each box. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + anchor_generator (dict): Config dict for anchor generator + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of localization loss. + num_head_convs (int): Number of the conv layers shared by + box and cls branches. + num_protos (int): Number of the mask coefficients. + use_ohem (bool): If true, ``loss_single_OHEM`` will be used for + cls loss calculation. If false, ``loss_single`` will be used. + conv_cfg (dict): Dictionary to construct and config conv layer. + norm_cfg (dict): Dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_classes, + in_channels, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=3, + scales_per_octave=1, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + reduction='none', + loss_weight=1.0), + loss_bbox=dict( + type='SmoothL1Loss', beta=1.0, loss_weight=1.5), + num_head_convs=1, + num_protos=32, + use_ohem=True, + conv_cfg=None, + norm_cfg=None, + init_cfg=dict( + type='Xavier', + distribution='uniform', + bias=0, + layer='Conv2d'), + **kwargs): + self.num_head_convs = num_head_convs + self.num_protos = num_protos + self.use_ohem = use_ohem + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + super(YOLACTHead, self).__init__( + num_classes, + in_channels, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + anchor_generator=anchor_generator, + init_cfg=init_cfg, + **kwargs) + if self.use_ohem: + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + self.sampling = False + + def _init_layers(self): + """Initialize layers of the head.""" + self.relu = nn.ReLU(inplace=True) + self.head_convs = ModuleList() + for i in range(self.num_head_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.head_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.conv_cls = nn.Conv2d( + self.feat_channels, + self.num_base_priors * self.cls_out_channels, + 3, + padding=1) + self.conv_reg = nn.Conv2d( + self.feat_channels, self.num_base_priors * 4, 3, padding=1) + self.conv_coeff = nn.Conv2d( + self.feat_channels, + self.num_base_priors * self.num_protos, + 3, + padding=1) + + def forward_single(self, x): + """Forward feature of a single scale level. + + Args: + x (Tensor): Features of a single scale level. + + Returns: + tuple: + cls_score (Tensor): Cls scores for a single scale level \ + the channels number is num_anchors * num_classes. + bbox_pred (Tensor): Box energies / deltas for a single scale \ + level, the channels number is num_anchors * 4. + coeff_pred (Tensor): Mask coefficients for a single scale \ + level, the channels number is num_anchors * num_protos. + """ + for head_conv in self.head_convs: + x = head_conv(x) + cls_score = self.conv_cls(x) + bbox_pred = self.conv_reg(x) + coeff_pred = self.conv_coeff(x).tanh() + return cls_score, bbox_pred, coeff_pred + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """A combination of the func:``AnchorHead.loss`` and + func:``SSDHead.loss``. + + When ``self.use_ohem == True``, it functions like ``SSDHead.loss``, + otherwise, it follows ``AnchorHead.loss``. Besides, it additionally + returns ``sampling_results``. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): Class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): Specify which bounding + boxes can be ignored when computing the loss. Default: None + + Returns: + tuple: + dict[str, Tensor]: A dictionary of loss components. + List[:obj:``SamplingResult``]: Sampler results for each image. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + device = cls_scores[0].device + + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels, + unmap_outputs=not self.use_ohem, + return_sampling_results=True) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg, sampling_results) = cls_reg_targets + + if self.use_ohem: + num_images = len(img_metas) + all_cls_scores = torch.cat([ + s.permute(0, 2, 3, 1).reshape( + num_images, -1, self.cls_out_channels) for s in cls_scores + ], 1) + all_labels = torch.cat(labels_list, -1).view(num_images, -1) + all_label_weights = torch.cat(label_weights_list, + -1).view(num_images, -1) + all_bbox_preds = torch.cat([ + b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) + for b in bbox_preds + ], -2) + all_bbox_targets = torch.cat(bbox_targets_list, + -2).view(num_images, -1, 4) + all_bbox_weights = torch.cat(bbox_weights_list, + -2).view(num_images, -1, 4) + + # concat all level anchors to a single tensor + all_anchors = [] + for i in range(num_images): + all_anchors.append(torch.cat(anchor_list[i])) + + # check NaN and Inf + assert torch.isfinite(all_cls_scores).all().item(), \ + 'classification scores become infinite or NaN!' + assert torch.isfinite(all_bbox_preds).all().item(), \ + 'bbox predications become infinite or NaN!' + + losses_cls, losses_bbox = multi_apply( + self.loss_single_OHEM, + all_cls_scores, + all_bbox_preds, + all_anchors, + all_labels, + all_label_weights, + all_bbox_targets, + all_bbox_weights, + num_total_samples=num_total_pos) + else: + num_total_samples = ( + num_total_pos + + num_total_neg if self.sampling else num_total_pos) + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + # concat all level anchors and flags to a single tensor + concat_anchor_list = [] + for i in range(len(anchor_list)): + concat_anchor_list.append(torch.cat(anchor_list[i])) + all_anchor_list = images_to_levels(concat_anchor_list, + num_level_anchors) + losses_cls, losses_bbox = multi_apply( + self.loss_single, + cls_scores, + bbox_preds, + all_anchor_list, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + num_total_samples=num_total_samples) + + return dict( + loss_cls=losses_cls, loss_bbox=losses_bbox), sampling_results + + def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels, + label_weights, bbox_targets, bbox_weights, + num_total_samples): + """"See func:``SSDHead.loss``.""" + loss_cls_all = self.loss_cls(cls_score, labels, label_weights) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( + as_tuple=False).reshape(-1) + neg_inds = (labels == self.num_classes).nonzero( + as_tuple=False).view(-1) + + num_pos_samples = pos_inds.size(0) + if num_pos_samples == 0: + num_neg_samples = neg_inds.size(0) + else: + num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples + if num_neg_samples > neg_inds.size(0): + num_neg_samples = neg_inds.size(0) + topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) + loss_cls_pos = loss_cls_all[pos_inds].sum() + loss_cls_neg = topk_loss_cls_neg.sum() + loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples + if self.reg_decoded_bbox: + # When the regression loss (e.g. `IouLoss`, `GIouLoss`) + # is applied directly on the decoded bounding boxes, it + # decodes the already encoded coordinates to absolute format. + bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) + loss_bbox = self.loss_bbox( + bbox_pred, + bbox_targets, + bbox_weights, + avg_factor=num_total_samples) + return loss_cls[None], loss_bbox + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'coeff_preds')) + def get_bboxes(self, + cls_scores, + bbox_preds, + coeff_preds, + img_metas, + cfg=None, + rescale=False): + """"Similar to func:``AnchorHead.get_bboxes``, but additionally + processes coeff_preds. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + with shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + coeff_preds (list[Tensor]): Mask coefficients for each scale + level with shape (N, num_anchors * num_protos, H, W) + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + cfg (mmcv.Config | None): Test / postprocessing configuration, + if None, test_cfg would be used + rescale (bool): If True, return boxes in original image space. + Default: False. + + Returns: + list[tuple[Tensor, Tensor, Tensor]]: Each item in result_list is + a 3-tuple. The first item is an (n, 5) tensor, where the + first 4 columns are bounding box positions + (tl_x, tl_y, br_x, br_y) and the 5-th column is a score + between 0 and 1. The second item is an (n,) tensor where each + item is the predicted class label of the corresponding box. + The third item is an (n, num_protos) tensor where each item + is the predicted mask coefficients of instance inside the + corresponding box. + """ + assert len(cls_scores) == len(bbox_preds) + num_levels = len(cls_scores) + + device = cls_scores[0].device + featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] + mlvl_anchors = self.prior_generator.grid_priors( + featmap_sizes, device=device) + + det_bboxes = [] + det_labels = [] + det_coeffs = [] + for img_id in range(len(img_metas)): + cls_score_list = select_single_mlvl(cls_scores, img_id) + bbox_pred_list = select_single_mlvl(bbox_preds, img_id) + coeff_pred_list = select_single_mlvl(coeff_preds, img_id) + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + bbox_res = self._get_bboxes_single(cls_score_list, bbox_pred_list, + coeff_pred_list, mlvl_anchors, + img_shape, scale_factor, cfg, + rescale) + det_bboxes.append(bbox_res[0]) + det_labels.append(bbox_res[1]) + det_coeffs.append(bbox_res[2]) + return det_bboxes, det_labels, det_coeffs + + def _get_bboxes_single(self, + cls_score_list, + bbox_pred_list, + coeff_preds_list, + mlvl_anchors, + img_shape, + scale_factor, + cfg, + rescale=False): + """"Similar to func:``AnchorHead._get_bboxes_single``, but additionally + processes coeff_preds_list and uses fast NMS instead of traditional + NMS. + + Args: + cls_score_list (list[Tensor]): Box scores for a single scale level + Has shape (num_anchors * num_classes, H, W). + bbox_pred_list (list[Tensor]): Box energies / deltas for a single + scale level with shape (num_anchors * 4, H, W). + coeff_preds_list (list[Tensor]): Mask coefficients for a single + scale level with shape (num_anchors * num_protos, H, W). + mlvl_anchors (list[Tensor]): Box reference for a single scale level + with shape (num_total_anchors, 4). + img_shape (tuple[int]): Shape of the input image, + (height, width, 3). + scale_factor (ndarray): Scale factor of the image arange as + (w_scale, h_scale, w_scale, h_scale). + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + + Returns: + tuple[Tensor, Tensor, Tensor]: The first item is an (n, 5) tensor, + where the first 4 columns are bounding box positions + (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between + 0 and 1. The second item is an (n,) tensor where each item is + the predicted class label of the corresponding box. The third + item is an (n, num_protos) tensor where each item is the + predicted mask coefficients of instance inside the + corresponding box. + """ + cfg = self.test_cfg if cfg is None else cfg + assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors) + nms_pre = cfg.get('nms_pre', -1) + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_coeffs = [] + for cls_score, bbox_pred, coeff_pred, anchors in \ + zip(cls_score_list, bbox_pred_list, + coeff_preds_list, mlvl_anchors): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + cls_score = cls_score.permute(1, 2, + 0).reshape(-1, self.cls_out_channels) + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + scores = cls_score.softmax(-1) + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) + coeff_pred = coeff_pred.permute(1, 2, + 0).reshape(-1, self.num_protos) + + if 0 < nms_pre < scores.shape[0]: + # Get maximum scores for foreground classes. + if self.use_sigmoid_cls: + max_scores, _ = scores.max(dim=1) + else: + # remind that we set FG labels to [0, num_class-1] + # since mmdet v2.0 + # BG cat_id: num_class + max_scores, _ = scores[:, :-1].max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + anchors = anchors[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + scores = scores[topk_inds, :] + coeff_pred = coeff_pred[topk_inds, :] + bboxes = self.bbox_coder.decode( + anchors, bbox_pred, max_shape=img_shape) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_coeffs.append(coeff_pred) + mlvl_bboxes = torch.cat(mlvl_bboxes) + if rescale: + mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) + mlvl_scores = torch.cat(mlvl_scores) + mlvl_coeffs = torch.cat(mlvl_coeffs) + if self.use_sigmoid_cls: + # Add a dummy background class to the backend when using sigmoid + # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 + # BG cat_id: num_class + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) + det_bboxes, det_labels, det_coeffs = fast_nms(mlvl_bboxes, mlvl_scores, + mlvl_coeffs, + cfg.score_thr, + cfg.iou_thr, cfg.top_k, + cfg.max_per_img) + return det_bboxes, det_labels, det_coeffs + + +@HEADS.register_module() +class YOLACTSegmHead(BaseModule): + """YOLACT segmentation head used in https://arxiv.org/abs/1904.02689. + + Apply a semantic segmentation loss on feature space using layers that are + only evaluated during training to increase performance with no speed + penalty. + + Args: + in_channels (int): Number of channels in the input feature map. + num_classes (int): Number of categories excluding the background + category. + loss_segm (dict): Config of semantic segmentation loss. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_classes, + in_channels=256, + loss_segm=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + init_cfg=dict( + type='Xavier', + distribution='uniform', + override=dict(name='segm_conv'))): + super(YOLACTSegmHead, self).__init__(init_cfg) + self.in_channels = in_channels + self.num_classes = num_classes + self.loss_segm = build_loss(loss_segm) + self._init_layers() + self.fp16_enabled = False + + def _init_layers(self): + """Initialize layers of the head.""" + self.segm_conv = nn.Conv2d( + self.in_channels, self.num_classes, kernel_size=1) + + def forward(self, x): + """Forward feature from the upstream network. + + Args: + x (Tensor): Feature from the upstream network, which is + a 4D-tensor. + + Returns: + Tensor: Predicted semantic segmentation map with shape + (N, num_classes, H, W). + """ + return self.segm_conv(x) + + @force_fp32(apply_to=('segm_pred', )) + def loss(self, segm_pred, gt_masks, gt_labels): + """Compute loss of the head. + + Args: + segm_pred (list[Tensor]): Predicted semantic segmentation map + with shape (N, num_classes, H, W). + gt_masks (list[Tensor]): Ground truth masks for each image with + the same shape of the input image. + gt_labels (list[Tensor]): Class indices corresponding to each box. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + loss_segm = [] + num_imgs, num_classes, mask_h, mask_w = segm_pred.size() + for idx in range(num_imgs): + cur_segm_pred = segm_pred[idx] + cur_gt_masks = gt_masks[idx].float() + cur_gt_labels = gt_labels[idx] + segm_targets = self.get_targets(cur_segm_pred, cur_gt_masks, + cur_gt_labels) + if segm_targets is None: + loss = self.loss_segm(cur_segm_pred, + torch.zeros_like(cur_segm_pred), + torch.zeros_like(cur_segm_pred)) + else: + loss = self.loss_segm( + cur_segm_pred, + segm_targets, + avg_factor=num_imgs * mask_h * mask_w) + loss_segm.append(loss) + return dict(loss_segm=loss_segm) + + def get_targets(self, segm_pred, gt_masks, gt_labels): + """Compute semantic segmentation targets for each image. + + Args: + segm_pred (Tensor): Predicted semantic segmentation map + with shape (num_classes, H, W). + gt_masks (Tensor): Ground truth masks for each image with + the same shape of the input image. + gt_labels (Tensor): Class indices corresponding to each box. + + Returns: + Tensor: Semantic segmentation targets with shape + (num_classes, H, W). + """ + if gt_masks.size(0) == 0: + return None + num_classes, mask_h, mask_w = segm_pred.size() + with torch.no_grad(): + downsampled_masks = F.interpolate( + gt_masks.unsqueeze(0), (mask_h, mask_w), + mode='bilinear', + align_corners=False).squeeze(0) + downsampled_masks = downsampled_masks.gt(0.5).float() + segm_targets = torch.zeros_like(segm_pred, requires_grad=False) + for obj_idx in range(downsampled_masks.size(0)): + segm_targets[gt_labels[obj_idx] - 1] = torch.max( + segm_targets[gt_labels[obj_idx] - 1], + downsampled_masks[obj_idx]) + return segm_targets + + def simple_test(self, feats, img_metas, rescale=False): + """Test function without test-time augmentation.""" + raise NotImplementedError( + 'simple_test of YOLACTSegmHead is not implemented ' + 'because this head is only evaluated during training') + + +@HEADS.register_module() +class YOLACTProtonet(BaseModule): + """YOLACT mask head used in https://arxiv.org/abs/1904.02689. + + This head outputs the mask prototypes for YOLACT. + + Args: + in_channels (int): Number of channels in the input feature map. + proto_channels (tuple[int]): Output channels of protonet convs. + proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs. + include_last_relu (Bool): If keep the last relu of protonet. + num_protos (int): Number of prototypes. + num_classes (int): Number of categories excluding the background + category. + loss_mask_weight (float): Reweight the mask loss by this factor. + max_masks_to_train (int): Maximum number of masks to train for + each image. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_classes, + in_channels=256, + proto_channels=(256, 256, 256, None, 256, 32), + proto_kernel_sizes=(3, 3, 3, -2, 3, 1), + include_last_relu=True, + num_protos=32, + loss_mask_weight=1.0, + max_masks_to_train=100, + init_cfg=dict( + type='Xavier', + distribution='uniform', + override=dict(name='protonet'))): + super(YOLACTProtonet, self).__init__(init_cfg) + self.in_channels = in_channels + self.proto_channels = proto_channels + self.proto_kernel_sizes = proto_kernel_sizes + self.include_last_relu = include_last_relu + self.protonet = self._init_layers() + + self.loss_mask_weight = loss_mask_weight + self.num_protos = num_protos + self.num_classes = num_classes + self.max_masks_to_train = max_masks_to_train + self.fp16_enabled = False + + def _init_layers(self): + """A helper function to take a config setting and turn it into a + network.""" + # Possible patterns: + # ( 256, 3) -> conv + # ( 256,-2) -> deconv + # (None,-2) -> bilinear interpolate + in_channels = self.in_channels + protonets = ModuleList() + for num_channels, kernel_size in zip(self.proto_channels, + self.proto_kernel_sizes): + if kernel_size > 0: + layer = nn.Conv2d( + in_channels, + num_channels, + kernel_size, + padding=kernel_size // 2) + else: + if num_channels is None: + layer = InterpolateModule( + scale_factor=-kernel_size, + mode='bilinear', + align_corners=False) + else: + layer = nn.ConvTranspose2d( + in_channels, + num_channels, + -kernel_size, + padding=kernel_size // 2) + protonets.append(layer) + protonets.append(nn.ReLU(inplace=True)) + in_channels = num_channels if num_channels is not None \ + else in_channels + if not self.include_last_relu: + protonets = protonets[:-1] + return nn.Sequential(*protonets) + + def forward_dummy(self, x): + prototypes = self.protonet(x) + return prototypes + + def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=None): + """Forward feature from the upstream network to get prototypes and + linearly combine the prototypes, using masks coefficients, into + instance masks. Finally, crop the instance masks with given bboxes. + + Args: + x (Tensor): Feature from the upstream network, which is + a 4D-tensor. + coeff_pred (list[Tensor]): Mask coefficients for each scale + level with shape (N, num_anchors * num_protos, H, W). + bboxes (list[Tensor]): Box used for cropping with shape + (N, num_anchors * 4, H, W). During training, they are + ground truth boxes. During testing, they are predicted + boxes. + img_meta (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + sampling_results (List[:obj:``SamplingResult``]): Sampler results + for each image. + + Returns: + list[Tensor]: Predicted instance segmentation masks. + """ + prototypes = self.protonet(x) + prototypes = prototypes.permute(0, 2, 3, 1).contiguous() + + num_imgs = x.size(0) + + # The reason for not using self.training is that + # val workflow will have a dimension mismatch error. + # Note that this writing method is very tricky. + # Fix https://github.com/open-mmlab/mmdetection/issues/5978 + is_train_or_val_workflow = (coeff_pred[0].dim() == 4) + + # Train or val workflow + if is_train_or_val_workflow: + coeff_pred_list = [] + for coeff_pred_per_level in coeff_pred: + coeff_pred_per_level = \ + coeff_pred_per_level.permute( + 0, 2, 3, 1).reshape(num_imgs, -1, self.num_protos) + coeff_pred_list.append(coeff_pred_per_level) + coeff_pred = torch.cat(coeff_pred_list, dim=1) + + mask_pred_list = [] + for idx in range(num_imgs): + cur_prototypes = prototypes[idx] + cur_coeff_pred = coeff_pred[idx] + cur_bboxes = bboxes[idx] + cur_img_meta = img_meta[idx] + + # Testing state + if not is_train_or_val_workflow: + bboxes_for_cropping = cur_bboxes + else: + cur_sampling_results = sampling_results[idx] + pos_assigned_gt_inds = \ + cur_sampling_results.pos_assigned_gt_inds + bboxes_for_cropping = cur_bboxes[pos_assigned_gt_inds].clone() + pos_inds = cur_sampling_results.pos_inds + cur_coeff_pred = cur_coeff_pred[pos_inds] + + # Linearly combine the prototypes with the mask coefficients + mask_pred = cur_prototypes @ cur_coeff_pred.t() + mask_pred = torch.sigmoid(mask_pred) + + h, w = cur_img_meta['img_shape'][:2] + bboxes_for_cropping[:, 0] /= w + bboxes_for_cropping[:, 1] /= h + bboxes_for_cropping[:, 2] /= w + bboxes_for_cropping[:, 3] /= h + + mask_pred = self.crop(mask_pred, bboxes_for_cropping) + mask_pred = mask_pred.permute(2, 0, 1).contiguous() + mask_pred_list.append(mask_pred) + return mask_pred_list + + @force_fp32(apply_to=('mask_pred', )) + def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_results): + """Compute loss of the head. + + Args: + mask_pred (list[Tensor]): Predicted prototypes with shape + (num_classes, H, W). + gt_masks (list[Tensor]): Ground truth masks for each image with + the same shape of the input image. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + img_meta (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + sampling_results (List[:obj:``SamplingResult``]): Sampler results + for each image. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + loss_mask = [] + num_imgs = len(mask_pred) + total_pos = 0 + for idx in range(num_imgs): + cur_mask_pred = mask_pred[idx] + cur_gt_masks = gt_masks[idx].float() + cur_gt_bboxes = gt_bboxes[idx] + cur_img_meta = img_meta[idx] + cur_sampling_results = sampling_results[idx] + + pos_assigned_gt_inds = cur_sampling_results.pos_assigned_gt_inds + num_pos = pos_assigned_gt_inds.size(0) + # Since we're producing (near) full image masks, + # it'd take too much vram to backprop on every single mask. + # Thus we select only a subset. + if num_pos > self.max_masks_to_train: + perm = torch.randperm(num_pos) + select = perm[:self.max_masks_to_train] + cur_mask_pred = cur_mask_pred[select] + pos_assigned_gt_inds = pos_assigned_gt_inds[select] + num_pos = self.max_masks_to_train + total_pos += num_pos + + gt_bboxes_for_reweight = cur_gt_bboxes[pos_assigned_gt_inds] + + mask_targets = self.get_targets(cur_mask_pred, cur_gt_masks, + pos_assigned_gt_inds) + if num_pos == 0: + loss = cur_mask_pred.sum() * 0. + elif mask_targets is None: + loss = F.binary_cross_entropy(cur_mask_pred, + torch.zeros_like(cur_mask_pred), + torch.zeros_like(cur_mask_pred)) + else: + cur_mask_pred = torch.clamp(cur_mask_pred, 0, 1) + loss = F.binary_cross_entropy( + cur_mask_pred, mask_targets, + reduction='none') * self.loss_mask_weight + + h, w = cur_img_meta['img_shape'][:2] + gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] - + gt_bboxes_for_reweight[:, 0]) / w + gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] - + gt_bboxes_for_reweight[:, 1]) / h + loss = loss.mean(dim=(1, + 2)) / gt_bboxes_width / gt_bboxes_height + loss = torch.sum(loss) + loss_mask.append(loss) + + if total_pos == 0: + total_pos += 1 # avoid nan + loss_mask = [x / total_pos for x in loss_mask] + + return dict(loss_mask=loss_mask) + + def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds): + """Compute instance segmentation targets for each image. + + Args: + mask_pred (Tensor): Predicted prototypes with shape + (num_classes, H, W). + gt_masks (Tensor): Ground truth masks for each image with + the same shape of the input image. + pos_assigned_gt_inds (Tensor): GT indices of the corresponding + positive samples. + Returns: + Tensor: Instance segmentation targets with shape + (num_instances, H, W). + """ + if gt_masks.size(0) == 0: + return None + mask_h, mask_w = mask_pred.shape[-2:] + gt_masks = F.interpolate( + gt_masks.unsqueeze(0), (mask_h, mask_w), + mode='bilinear', + align_corners=False).squeeze(0) + gt_masks = gt_masks.gt(0.5).float() + mask_targets = gt_masks[pos_assigned_gt_inds] + return mask_targets + + def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale): + """Resize, binarize, and format the instance mask predictions. + + Args: + mask_pred (Tensor): shape (N, H, W). + label_pred (Tensor): shape (N, ). + img_meta (dict): Meta information of each image, e.g., + image size, scaling factor, etc. + rescale (bool): If rescale is False, then returned masks will + fit the scale of imgs[0]. + Returns: + list[ndarray]: Mask predictions grouped by their predicted classes. + """ + ori_shape = img_meta['ori_shape'] + scale_factor = img_meta['scale_factor'] + if rescale: + img_h, img_w = ori_shape[:2] + else: + img_h = np.round(ori_shape[0] * scale_factor[1]).astype(np.int32) + img_w = np.round(ori_shape[1] * scale_factor[0]).astype(np.int32) + + cls_segms = [[] for _ in range(self.num_classes)] + if mask_pred.size(0) == 0: + return cls_segms + + mask_pred = F.interpolate( + mask_pred.unsqueeze(0), (img_h, img_w), + mode='bilinear', + align_corners=False).squeeze(0) > 0.5 + mask_pred = mask_pred.cpu().numpy().astype(np.uint8) + + for m, l in zip(mask_pred, label_pred): + cls_segms[l].append(m) + return cls_segms + + def crop(self, masks, boxes, padding=1): + """Crop predicted masks by zeroing out everything not in the predicted + bbox. + + Args: + masks (Tensor): shape [H, W, N]. + boxes (Tensor): bbox coords in relative point form with + shape [N, 4]. + + Return: + Tensor: The cropped masks. + """ + h, w, n = masks.size() + x1, x2 = self.sanitize_coordinates( + boxes[:, 0], boxes[:, 2], w, padding, cast=False) + y1, y2 = self.sanitize_coordinates( + boxes[:, 1], boxes[:, 3], h, padding, cast=False) + + rows = torch.arange( + w, device=masks.device, dtype=x1.dtype).view(1, -1, + 1).expand(h, w, n) + cols = torch.arange( + h, device=masks.device, dtype=x1.dtype).view(-1, 1, + 1).expand(h, w, n) + + masks_left = rows >= x1.view(1, 1, -1) + masks_right = rows < x2.view(1, 1, -1) + masks_up = cols >= y1.view(1, 1, -1) + masks_down = cols < y2.view(1, 1, -1) + + crop_mask = masks_left * masks_right * masks_up * masks_down + + return masks * crop_mask.float() + + def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True): + """Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0, + and x2 <= image_size. Also converts from relative to absolute + coordinates and casts the results to long tensors. + + Warning: this does things in-place behind the scenes so + copy if necessary. + + Args: + _x1 (Tensor): shape (N, ). + _x2 (Tensor): shape (N, ). + img_size (int): Size of the input image. + padding (int): x1 >= padding, x2 <= image_size-padding. + cast (bool): If cast is false, the result won't be cast to longs. + + Returns: + tuple: + x1 (Tensor): Sanitized _x1. + x2 (Tensor): Sanitized _x2. + """ + x1 = x1 * img_size + x2 = x2 * img_size + if cast: + x1 = x1.long() + x2 = x2.long() + x1 = torch.min(x1, x2) + x2 = torch.max(x1, x2) + x1 = torch.clamp(x1 - padding, min=0) + x2 = torch.clamp(x2 + padding, max=img_size) + return x1, x2 + + def simple_test(self, + feats, + det_bboxes, + det_labels, + det_coeffs, + img_metas, + rescale=False): + """Test function without test-time augmentation. + + Args: + feats (tuple[torch.Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + det_bboxes (list[Tensor]): BBox results of each image. each + element is (n, 5) tensor, where 5 represent + (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. + det_labels (list[Tensor]): BBox results of each image. each + element is (n, ) tensor, each element represents the class + label of the corresponding box. + det_coeffs (list[Tensor]): BBox coefficient of each image. each + element is (n, m) tensor, m is vector length. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[list]: encoded masks. The c-th item in the outer list + corresponds to the c-th class. Given the c-th outer list, the + i-th item in that inner list is the mask for the i-th box with + class label c. + """ + num_imgs = len(img_metas) + scale_factors = tuple(meta['scale_factor'] for meta in img_metas) + if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): + segm_results = [[[] for _ in range(self.num_classes)] + for _ in range(num_imgs)] + else: + # if det_bboxes is rescaled to the original image size, we need to + # rescale it back to the testing scale to obtain RoIs. + if rescale and not isinstance(scale_factors[0], float): + scale_factors = [ + torch.from_numpy(scale_factor).to(det_bboxes[0].device) + for scale_factor in scale_factors + ] + _bboxes = [ + det_bboxes[i][:, :4] * + scale_factors[i] if rescale else det_bboxes[i][:, :4] + for i in range(len(det_bboxes)) + ] + mask_preds = self.forward(feats[0], det_coeffs, _bboxes, img_metas) + # apply mask post-processing to each image individually + segm_results = [] + for i in range(num_imgs): + if det_bboxes[i].shape[0] == 0: + segm_results.append([[] for _ in range(self.num_classes)]) + else: + segm_result = self.get_seg_masks(mask_preds[i], + det_labels[i], + img_metas[i], rescale) + segm_results.append(segm_result) + return segm_results + + +class InterpolateModule(BaseModule): + """This is a module version of F.interpolate. + + Any arguments you give it just get passed along for the ride. + """ + + def __init__(self, *args, init_cfg=None, **kwargs): + super().__init__(init_cfg) + + self.args = args + self.kwargs = kwargs + + def forward(self, x): + """Forward features from the upstream network.""" + return F.interpolate(x, *self.args, **self.kwargs) diff --git a/downstream/mmdetection/mmdet/models/dense_heads/yolo_head.py b/downstream/mmdetection/mmdet/models/dense_heads/yolo_head.py new file mode 100644 index 0000000..b446cb7 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/yolo_head.py @@ -0,0 +1,621 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Copyright (c) 2019 Western Digital Corporation or its affiliates. + +import warnings + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import (ConvModule, bias_init_with_prob, constant_init, is_norm, + normal_init) +from mmcv.runner import force_fp32 + +from mmdet.core import (build_assigner, build_bbox_coder, + build_prior_generator, build_sampler, images_to_levels, + multi_apply, multiclass_nms) +from ..builder import HEADS, build_loss +from .base_dense_head import BaseDenseHead +from .dense_test_mixins import BBoxTestMixin + + +@HEADS.register_module() +class YOLOV3Head(BaseDenseHead, BBoxTestMixin): + """YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767. + + Args: + num_classes (int): The number of object classes (w/o background) + in_channels (List[int]): Number of input channels per scale. + out_channels (List[int]): The number of output channels per scale + before the final 1x1 layer. Default: (1024, 512, 256). + anchor_generator (dict): Config dict for anchor generator + bbox_coder (dict): Config of bounding box coder. + featmap_strides (List[int]): The stride of each scale. + Should be in descending order. Default: (32, 16, 8). + one_hot_smoother (float): Set a non-zero value to enable label-smooth + Default: 0. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True) + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + loss_cls (dict): Config of classification loss. + loss_conf (dict): Config of confidence loss. + loss_xy (dict): Config of xy coordinate loss. + loss_wh (dict): Config of wh coordinate loss. + train_cfg (dict): Training config of YOLOV3 head. Default: None. + test_cfg (dict): Testing config of YOLOV3 head. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_classes, + in_channels, + out_channels=(1024, 512, 256), + anchor_generator=dict( + type='YOLOAnchorGenerator', + base_sizes=[[(116, 90), (156, 198), (373, 326)], + [(30, 61), (62, 45), (59, 119)], + [(10, 13), (16, 30), (33, 23)]], + strides=[32, 16, 8]), + bbox_coder=dict(type='YOLOBBoxCoder'), + featmap_strides=[32, 16, 8], + one_hot_smoother=0., + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='LeakyReLU', negative_slope=0.1), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + loss_conf=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + loss_xy=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + loss_wh=dict(type='MSELoss', loss_weight=1.0), + train_cfg=None, + test_cfg=None, + init_cfg=dict( + type='Normal', std=0.01, + override=dict(name='convs_pred'))): + super(YOLOV3Head, self).__init__(init_cfg) + # Check params + assert (len(in_channels) == len(out_channels) == len(featmap_strides)) + + self.num_classes = num_classes + self.in_channels = in_channels + self.out_channels = out_channels + self.featmap_strides = featmap_strides + self.train_cfg = train_cfg + self.test_cfg = test_cfg + if self.train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + if hasattr(self.train_cfg, 'sampler'): + sampler_cfg = self.train_cfg.sampler + else: + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + self.fp16_enabled = False + + self.one_hot_smoother = one_hot_smoother + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self.bbox_coder = build_bbox_coder(bbox_coder) + + self.prior_generator = build_prior_generator(anchor_generator) + + self.loss_cls = build_loss(loss_cls) + self.loss_conf = build_loss(loss_conf) + self.loss_xy = build_loss(loss_xy) + self.loss_wh = build_loss(loss_wh) + + self.num_base_priors = self.prior_generator.num_base_priors[0] + assert len( + self.prior_generator.num_base_priors) == len(featmap_strides) + self._init_layers() + + @property + def anchor_generator(self): + + warnings.warn('DeprecationWarning: `anchor_generator` is deprecated, ' + 'please use "prior_generator" instead') + return self.prior_generator + + @property + def num_anchors(self): + """ + Returns: + int: Number of anchors on each point of feature map. + """ + warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' + 'please use "num_base_priors" instead') + return self.num_base_priors + + @property + def num_levels(self): + return len(self.featmap_strides) + + @property + def num_attrib(self): + """int: number of attributes in pred_map, bboxes (4) + + objectness (1) + num_classes""" + + return 5 + self.num_classes + + def _init_layers(self): + self.convs_bridge = nn.ModuleList() + self.convs_pred = nn.ModuleList() + for i in range(self.num_levels): + conv_bridge = ConvModule( + self.in_channels[i], + self.out_channels[i], + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + conv_pred = nn.Conv2d(self.out_channels[i], + self.num_base_priors * self.num_attrib, 1) + + self.convs_bridge.append(conv_bridge) + self.convs_pred.append(conv_pred) + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + normal_init(m, mean=0, std=0.01) + if is_norm(m): + constant_init(m, 1) + + # Use prior in model initialization to improve stability + for conv_pred, stride in zip(self.convs_pred, self.featmap_strides): + bias = conv_pred.bias.reshape(self.num_base_priors, -1) + # init objectness with prior of 8 objects per feature map + # refer to https://github.com/ultralytics/yolov3 + nn.init.constant_(bias.data[:, 4], + bias_init_with_prob(8 / (608 / stride)**2)) + nn.init.constant_(bias.data[:, 5:], bias_init_with_prob(0.01)) + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple[Tensor]: A tuple of multi-level predication map, each is a + 4D-tensor of shape (batch_size, 5+num_classes, height, width). + """ + + assert len(feats) == self.num_levels + pred_maps = [] + for i in range(self.num_levels): + x = feats[i] + x = self.convs_bridge[i](x) + pred_map = self.convs_pred[i](x) + pred_maps.append(pred_map) + + return tuple(pred_maps), + + @force_fp32(apply_to=('pred_maps', )) + def get_bboxes(self, + pred_maps, + img_metas, + cfg=None, + rescale=False, + with_nms=True): + """Transform network output for a batch into bbox predictions. It has + been accelerated since PR #5991. + + Args: + pred_maps (list[Tensor]): Raw predictions for a batch of images. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + cfg (mmcv.Config | None): Test / postprocessing configuration, + if None, test_cfg would be used. Default: None. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is an (n, 5) tensor, where 5 represent + (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. + The shape of the second tensor in the tuple is (n,), and + each element represents the class label of the corresponding + box. + """ + assert len(pred_maps) == self.num_levels + cfg = self.test_cfg if cfg is None else cfg + scale_factors = np.array( + [img_meta['scale_factor'] for img_meta in img_metas]) + + num_imgs = len(img_metas) + featmap_sizes = [pred_map.shape[-2:] for pred_map in pred_maps] + + mlvl_anchors = self.prior_generator.grid_priors( + featmap_sizes, device=pred_maps[0].device) + flatten_preds = [] + flatten_strides = [] + for pred, stride in zip(pred_maps, self.featmap_strides): + pred = pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_attrib) + pred[..., :2].sigmoid_() + flatten_preds.append(pred) + flatten_strides.append( + pred.new_tensor(stride).expand(pred.size(1))) + + flatten_preds = torch.cat(flatten_preds, dim=1) + flatten_bbox_preds = flatten_preds[..., :4] + flatten_objectness = flatten_preds[..., 4].sigmoid() + flatten_cls_scores = flatten_preds[..., 5:].sigmoid() + flatten_anchors = torch.cat(mlvl_anchors) + flatten_strides = torch.cat(flatten_strides) + flatten_bboxes = self.bbox_coder.decode(flatten_anchors, + flatten_bbox_preds, + flatten_strides.unsqueeze(-1)) + + if with_nms and (flatten_objectness.size(0) == 0): + return torch.zeros((0, 5)), torch.zeros((0, )) + + if rescale: + flatten_bboxes /= flatten_bboxes.new_tensor( + scale_factors).unsqueeze(1) + + padding = flatten_bboxes.new_zeros(num_imgs, flatten_bboxes.shape[1], + 1) + flatten_cls_scores = torch.cat([flatten_cls_scores, padding], dim=-1) + + det_results = [] + for (bboxes, scores, objectness) in zip(flatten_bboxes, + flatten_cls_scores, + flatten_objectness): + # Filtering out all predictions with conf < conf_thr + conf_thr = cfg.get('conf_thr', -1) + if conf_thr > 0: + conf_inds = objectness >= conf_thr + bboxes = bboxes[conf_inds, :] + scores = scores[conf_inds, :] + objectness = objectness[conf_inds] + + det_bboxes, det_labels = multiclass_nms( + bboxes, + scores, + cfg.score_thr, + cfg.nms, + cfg.max_per_img, + score_factors=objectness) + det_results.append(tuple([det_bboxes, det_labels])) + return det_results + + @force_fp32(apply_to=('pred_maps', )) + def loss(self, + pred_maps, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute loss of the head. + + Args: + pred_maps (list[Tensor]): Prediction map for each scale level, + shape (N, num_anchors * num_attrib, H, W) + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + num_imgs = len(img_metas) + device = pred_maps[0][0].device + + featmap_sizes = [ + pred_maps[i].shape[-2:] for i in range(self.num_levels) + ] + mlvl_anchors = self.prior_generator.grid_priors( + featmap_sizes, device=device) + anchor_list = [mlvl_anchors for _ in range(num_imgs)] + + responsible_flag_list = [] + for img_id in range(len(img_metas)): + responsible_flag_list.append( + self.prior_generator.responsible_flags(featmap_sizes, + gt_bboxes[img_id], + device)) + + target_maps_list, neg_maps_list = self.get_targets( + anchor_list, responsible_flag_list, gt_bboxes, gt_labels) + + losses_cls, losses_conf, losses_xy, losses_wh = multi_apply( + self.loss_single, pred_maps, target_maps_list, neg_maps_list) + + return dict( + loss_cls=losses_cls, + loss_conf=losses_conf, + loss_xy=losses_xy, + loss_wh=losses_wh) + + def loss_single(self, pred_map, target_map, neg_map): + """Compute loss of a single image from a batch. + + Args: + pred_map (Tensor): Raw predictions for a single level. + target_map (Tensor): The Ground-Truth target for a single level. + neg_map (Tensor): The negative masks for a single level. + + Returns: + tuple: + loss_cls (Tensor): Classification loss. + loss_conf (Tensor): Confidence loss. + loss_xy (Tensor): Regression loss of x, y coordinate. + loss_wh (Tensor): Regression loss of w, h coordinate. + """ + + num_imgs = len(pred_map) + pred_map = pred_map.permute(0, 2, 3, + 1).reshape(num_imgs, -1, self.num_attrib) + neg_mask = neg_map.float() + pos_mask = target_map[..., 4] + pos_and_neg_mask = neg_mask + pos_mask + pos_mask = pos_mask.unsqueeze(dim=-1) + if torch.max(pos_and_neg_mask) > 1.: + warnings.warn('There is overlap between pos and neg sample.') + pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.) + + pred_xy = pred_map[..., :2] + pred_wh = pred_map[..., 2:4] + pred_conf = pred_map[..., 4] + pred_label = pred_map[..., 5:] + + target_xy = target_map[..., :2] + target_wh = target_map[..., 2:4] + target_conf = target_map[..., 4] + target_label = target_map[..., 5:] + + loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask) + loss_conf = self.loss_conf( + pred_conf, target_conf, weight=pos_and_neg_mask) + loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask) + loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask) + + return loss_cls, loss_conf, loss_xy, loss_wh + + def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list, + gt_labels_list): + """Compute target maps for anchors in multiple images. + + Args: + anchor_list (list[list[Tensor]]): Multi level anchors of each + image. The outer list indicates images, and the inner list + corresponds to feature levels of the image. Each element of + the inner list is a tensor of shape (num_total_anchors, 4). + responsible_flag_list (list[list[Tensor]]): Multi level responsible + flags of each image. Each element is a tensor of shape + (num_total_anchors, ) + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. + gt_labels_list (list[Tensor]): Ground truth labels of each box. + + Returns: + tuple: Usually returns a tuple containing learning targets. + - target_map_list (list[Tensor]): Target map of each level. + - neg_map_list (list[Tensor]): Negative map of each level. + """ + num_imgs = len(anchor_list) + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + + results = multi_apply(self._get_targets_single, anchor_list, + responsible_flag_list, gt_bboxes_list, + gt_labels_list) + + all_target_maps, all_neg_maps = results + assert num_imgs == len(all_target_maps) == len(all_neg_maps) + target_maps_list = images_to_levels(all_target_maps, num_level_anchors) + neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors) + + return target_maps_list, neg_maps_list + + def _get_targets_single(self, anchors, responsible_flags, gt_bboxes, + gt_labels): + """Generate matching bounding box prior and converted GT. + + Args: + anchors (list[Tensor]): Multi-level anchors of the image. + responsible_flags (list[Tensor]): Multi-level responsible flags of + anchors + gt_bboxes (Tensor): Ground truth bboxes of single image. + gt_labels (Tensor): Ground truth labels of single image. + + Returns: + tuple: + target_map (Tensor): Predication target map of each + scale level, shape (num_total_anchors, + 5+num_classes) + neg_map (Tensor): Negative map of each scale level, + shape (num_total_anchors,) + """ + + anchor_strides = [] + for i in range(len(anchors)): + anchor_strides.append( + torch.tensor(self.featmap_strides[i], + device=gt_bboxes.device).repeat(len(anchors[i]))) + concat_anchors = torch.cat(anchors) + concat_responsible_flags = torch.cat(responsible_flags) + + anchor_strides = torch.cat(anchor_strides) + assert len(anchor_strides) == len(concat_anchors) == \ + len(concat_responsible_flags) + assign_result = self.assigner.assign(concat_anchors, + concat_responsible_flags, + gt_bboxes) + sampling_result = self.sampler.sample(assign_result, concat_anchors, + gt_bboxes) + + target_map = concat_anchors.new_zeros( + concat_anchors.size(0), self.num_attrib) + + target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode( + sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, + anchor_strides[sampling_result.pos_inds]) + + target_map[sampling_result.pos_inds, 4] = 1 + + gt_labels_one_hot = F.one_hot( + gt_labels, num_classes=self.num_classes).float() + if self.one_hot_smoother != 0: # label smooth + gt_labels_one_hot = gt_labels_one_hot * ( + 1 - self.one_hot_smoother + ) + self.one_hot_smoother / self.num_classes + target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[ + sampling_result.pos_assigned_gt_inds] + + neg_map = concat_anchors.new_zeros( + concat_anchors.size(0), dtype=torch.uint8) + neg_map[sampling_result.neg_inds] = 1 + + return target_map, neg_map + + def aug_test(self, feats, img_metas, rescale=False): + """Test function with test time augmentation. + + Args: + feats (list[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains features for all images in the batch. + img_metas (list[list[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. each dict has image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[ndarray]: bbox results of each class + """ + return self.aug_test_bboxes(feats, img_metas, rescale=rescale) + + @force_fp32(apply_to=('pred_maps')) + def onnx_export(self, pred_maps, img_metas, with_nms=True): + num_levels = len(pred_maps) + pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)] + + cfg = self.test_cfg + assert len(pred_maps_list) == self.num_levels + + device = pred_maps_list[0].device + batch_size = pred_maps_list[0].shape[0] + + featmap_sizes = [ + pred_maps_list[i].shape[-2:] for i in range(self.num_levels) + ] + mlvl_anchors = self.prior_generator.grid_priors( + featmap_sizes, device=device) + # convert to tensor to keep tracing + nms_pre_tensor = torch.tensor( + cfg.get('nms_pre', -1), device=device, dtype=torch.long) + + multi_lvl_bboxes = [] + multi_lvl_cls_scores = [] + multi_lvl_conf_scores = [] + for i in range(self.num_levels): + # get some key info for current scale + pred_map = pred_maps_list[i] + stride = self.featmap_strides[i] + # (b,h, w, num_anchors*num_attrib) -> + # (b,h*w*num_anchors, num_attrib) + pred_map = pred_map.permute(0, 2, 3, + 1).reshape(batch_size, -1, + self.num_attrib) + # Inplace operation like + # ```pred_map[..., :2] = \torch.sigmoid(pred_map[..., :2])``` + # would create constant tensor when exporting to onnx + pred_map_conf = torch.sigmoid(pred_map[..., :2]) + pred_map_rest = pred_map[..., 2:] + pred_map = torch.cat([pred_map_conf, pred_map_rest], dim=-1) + pred_map_boxes = pred_map[..., :4] + multi_lvl_anchor = mlvl_anchors[i] + multi_lvl_anchor = multi_lvl_anchor.expand_as(pred_map_boxes) + bbox_pred = self.bbox_coder.decode(multi_lvl_anchor, + pred_map_boxes, stride) + # conf and cls + conf_pred = torch.sigmoid(pred_map[..., 4]) + cls_pred = torch.sigmoid(pred_map[..., 5:]).view( + batch_size, -1, self.num_classes) # Cls pred one-hot. + + # Get top-k prediction + from mmdet.core.export import get_k_for_topk + nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) + if nms_pre > 0: + _, topk_inds = conf_pred.topk(nms_pre) + batch_inds = torch.arange(batch_size).view( + -1, 1).expand_as(topk_inds).long() + # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 + transformed_inds = ( + bbox_pred.shape[1] * batch_inds + topk_inds) + bbox_pred = bbox_pred.reshape(-1, + 4)[transformed_inds, :].reshape( + batch_size, -1, 4) + cls_pred = cls_pred.reshape( + -1, self.num_classes)[transformed_inds, :].reshape( + batch_size, -1, self.num_classes) + conf_pred = conf_pred.reshape(-1, 1)[transformed_inds].reshape( + batch_size, -1) + + # Save the result of current scale + multi_lvl_bboxes.append(bbox_pred) + multi_lvl_cls_scores.append(cls_pred) + multi_lvl_conf_scores.append(conf_pred) + + # Merge the results of different scales together + batch_mlvl_bboxes = torch.cat(multi_lvl_bboxes, dim=1) + batch_mlvl_scores = torch.cat(multi_lvl_cls_scores, dim=1) + batch_mlvl_conf_scores = torch.cat(multi_lvl_conf_scores, dim=1) + + # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment + from mmdet.core.export import add_dummy_nms_for_onnx + conf_thr = cfg.get('conf_thr', -1) + score_thr = cfg.get('score_thr', -1) + # follow original pipeline of YOLOv3 + if conf_thr > 0: + mask = (batch_mlvl_conf_scores >= conf_thr).float() + batch_mlvl_conf_scores *= mask + if score_thr > 0: + mask = (batch_mlvl_scores > score_thr).float() + batch_mlvl_scores *= mask + batch_mlvl_conf_scores = batch_mlvl_conf_scores.unsqueeze(2).expand_as( + batch_mlvl_scores) + batch_mlvl_scores = batch_mlvl_scores * batch_mlvl_conf_scores + if with_nms: + max_output_boxes_per_class = cfg.nms.get( + 'max_output_boxes_per_class', 200) + iou_threshold = cfg.nms.get('iou_threshold', 0.5) + # keep aligned with original pipeline, improve + # mAP by 1% for YOLOv3 in ONNX + score_threshold = 0 + nms_pre = cfg.get('deploy_nms_pre', -1) + return add_dummy_nms_for_onnx( + batch_mlvl_bboxes, + batch_mlvl_scores, + max_output_boxes_per_class, + iou_threshold, + score_threshold, + nms_pre, + cfg.max_per_img, + ) + else: + return batch_mlvl_bboxes, batch_mlvl_scores diff --git a/downstream/mmdetection/mmdet/models/dense_heads/yolof_head.py b/downstream/mmdetection/mmdet/models/dense_heads/yolof_head.py new file mode 100644 index 0000000..1063524 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/yolof_head.py @@ -0,0 +1,416 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import (ConvModule, bias_init_with_prob, constant_init, is_norm, + normal_init) +from mmcv.runner import force_fp32 + +from mmdet.core import anchor_inside_flags, multi_apply, reduce_mean, unmap +from ..builder import HEADS +from .anchor_head import AnchorHead + +INF = 1e8 + + +def levels_to_images(mlvl_tensor): + """Concat multi-level feature maps by image. + + [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] + Convert the shape of each element in mlvl_tensor from (N, C, H, W) to + (N, H*W , C), then split the element to N elements with shape (H*W, C), and + concat elements in same image of all level along first dimension. + + Args: + mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from + corresponding level. Each element is of shape (N, C, H, W) + + Returns: + list[torch.Tensor]: A list that contains N tensors and each tensor is + of shape (num_elements, C) + """ + batch_size = mlvl_tensor[0].size(0) + batch_list = [[] for _ in range(batch_size)] + channels = mlvl_tensor[0].size(1) + for t in mlvl_tensor: + t = t.permute(0, 2, 3, 1) + t = t.view(batch_size, -1, channels).contiguous() + for img in range(batch_size): + batch_list[img].append(t[img]) + return [torch.cat(item, 0) for item in batch_list] + + +@HEADS.register_module() +class YOLOFHead(AnchorHead): + """YOLOFHead Paper link: https://arxiv.org/abs/2103.09460. + + Args: + num_classes (int): The number of object classes (w/o background) + in_channels (List[int]): The number of input channels per scale. + cls_num_convs (int): The number of convolutions of cls branch. + Default 2. + reg_num_convs (int): The number of convolutions of reg branch. + Default 4. + norm_cfg (dict): Dictionary to construct and config norm layer. + """ + + def __init__(self, + num_classes, + in_channels, + num_cls_convs=2, + num_reg_convs=4, + norm_cfg=dict(type='BN', requires_grad=True), + **kwargs): + self.num_cls_convs = num_cls_convs + self.num_reg_convs = num_reg_convs + self.norm_cfg = norm_cfg + super(YOLOFHead, self).__init__(num_classes, in_channels, **kwargs) + + def _init_layers(self): + cls_subnet = [] + bbox_subnet = [] + for i in range(self.num_cls_convs): + cls_subnet.append( + ConvModule( + self.in_channels, + self.in_channels, + kernel_size=3, + padding=1, + norm_cfg=self.norm_cfg)) + for i in range(self.num_reg_convs): + bbox_subnet.append( + ConvModule( + self.in_channels, + self.in_channels, + kernel_size=3, + padding=1, + norm_cfg=self.norm_cfg)) + self.cls_subnet = nn.Sequential(*cls_subnet) + self.bbox_subnet = nn.Sequential(*bbox_subnet) + self.cls_score = nn.Conv2d( + self.in_channels, + self.num_base_priors * self.num_classes, + kernel_size=3, + stride=1, + padding=1) + self.bbox_pred = nn.Conv2d( + self.in_channels, + self.num_base_priors * 4, + kernel_size=3, + stride=1, + padding=1) + self.object_pred = nn.Conv2d( + self.in_channels, + self.num_base_priors, + kernel_size=3, + stride=1, + padding=1) + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + normal_init(m, mean=0, std=0.01) + if is_norm(m): + constant_init(m, 1) + + # Use prior in model initialization to improve stability + bias_cls = bias_init_with_prob(0.01) + torch.nn.init.constant_(self.cls_score.bias, bias_cls) + + def forward_single(self, feature): + cls_score = self.cls_score(self.cls_subnet(feature)) + N, _, H, W = cls_score.shape + cls_score = cls_score.view(N, -1, self.num_classes, H, W) + + reg_feat = self.bbox_subnet(feature) + bbox_reg = self.bbox_pred(reg_feat) + objectness = self.object_pred(reg_feat) + + # implicit objectness + objectness = objectness.view(N, -1, 1, H, W) + normalized_cls_score = cls_score + objectness - torch.log( + 1. + torch.clamp(cls_score.exp(), max=INF) + + torch.clamp(objectness.exp(), max=INF)) + normalized_cls_score = normalized_cls_score.view(N, -1, H, W) + return normalized_cls_score, bbox_reg + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (batch, num_anchors * num_classes, h, w) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (batch, num_anchors * 4, h, w) + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. Default: None + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert len(cls_scores) == 1 + assert self.prior_generator.num_levels == 1 + + device = cls_scores[0].device + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + + # The output level is always 1 + anchor_list = [anchors[0] for anchors in anchor_list] + valid_flag_list = [valid_flags[0] for valid_flags in valid_flag_list] + + cls_scores_list = levels_to_images(cls_scores) + bbox_preds_list = levels_to_images(bbox_preds) + + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = self.get_targets( + cls_scores_list, + bbox_preds_list, + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels) + if cls_reg_targets is None: + return None + (batch_labels, batch_label_weights, num_total_pos, num_total_neg, + batch_bbox_weights, batch_pos_predicted_boxes, + batch_target_boxes) = cls_reg_targets + + flatten_labels = batch_labels.reshape(-1) + batch_label_weights = batch_label_weights.reshape(-1) + cls_score = cls_scores[0].permute(0, 2, 3, + 1).reshape(-1, self.cls_out_channels) + + num_total_samples = (num_total_pos + + num_total_neg) if self.sampling else num_total_pos + num_total_samples = reduce_mean( + cls_score.new_tensor(num_total_samples)).clamp_(1.0).item() + + # classification loss + loss_cls = self.loss_cls( + cls_score, + flatten_labels, + batch_label_weights, + avg_factor=num_total_samples) + + # regression loss + if batch_pos_predicted_boxes.shape[0] == 0: + # no pos sample + loss_bbox = batch_pos_predicted_boxes.sum() * 0 + else: + loss_bbox = self.loss_bbox( + batch_pos_predicted_boxes, + batch_target_boxes, + batch_bbox_weights.float(), + avg_factor=num_total_samples) + + return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) + + def get_targets(self, + cls_scores_list, + bbox_preds_list, + anchor_list, + valid_flag_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + unmap_outputs=True): + """Compute regression and classification targets for anchors in + multiple images. + + Args: + cls_scores_list (list[Tensor]): Classification scores of + each image. each is a 4D-tensor, the shape is + (h * w, num_anchors * num_classes). + bbox_preds_list (list[Tensor]): Bbox preds of each image. + each is a 4D-tensor, the shape is (h * w, num_anchors * 4). + anchor_list (list[Tensor]): Anchors of each image. Each element of + is a tensor of shape (h * w * num_anchors, 4). + valid_flag_list (list[Tensor]): Valid flags of each image. Each + element of is a tensor of shape (h * w * num_anchors, ) + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. + img_metas (list[dict]): Meta info of each image. + gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be + ignored. + gt_labels_list (list[Tensor]): Ground truth labels of each box. + label_channels (int): Channel of label. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. + + Returns: + tuple: Usually returns a tuple containing learning targets. + + - batch_labels (Tensor): Label of all images. Each element \ + of is a tensor of shape (batch, h * w * num_anchors) + - batch_label_weights (Tensor): Label weights of all images \ + of is a tensor of shape (batch, h * w * num_anchors) + - num_total_pos (int): Number of positive samples in all \ + images. + - num_total_neg (int): Number of negative samples in all \ + images. + additional_returns: This function enables user-defined returns from + `self._get_targets_single`. These returns are currently refined + to properties at each feature map (i.e. having HxW dimension). + The results will be concatenated after the end + """ + num_imgs = len(img_metas) + assert len(anchor_list) == len(valid_flag_list) == num_imgs + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + if gt_labels_list is None: + gt_labels_list = [None for _ in range(num_imgs)] + results = multi_apply( + self._get_targets_single, + bbox_preds_list, + anchor_list, + valid_flag_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + img_metas, + label_channels=label_channels, + unmap_outputs=unmap_outputs) + (all_labels, all_label_weights, pos_inds_list, neg_inds_list, + sampling_results_list) = results[:5] + rest_results = list(results[5:]) # user-added return values + # no valid anchors + if any([labels is None for labels in all_labels]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + + batch_labels = torch.stack(all_labels, 0) + batch_label_weights = torch.stack(all_label_weights, 0) + + res = (batch_labels, batch_label_weights, num_total_pos, num_total_neg) + for i, rests in enumerate(rest_results): # user-added return values + rest_results[i] = torch.cat(rests, 0) + + return res + tuple(rest_results) + + def _get_targets_single(self, + bbox_preds, + flat_anchors, + valid_flags, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + label_channels=1, + unmap_outputs=True): + """Compute regression and classification targets for anchors in a + single image. + + Args: + bbox_preds (Tensor): Bbox prediction of the image, which + shape is (h * w ,4) + flat_anchors (Tensor): Anchors of the image, which shape is + (h * w * num_anchors ,4) + valid_flags (Tensor): Valid flags of the image, which shape is + (h * w * num_anchors,). + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + img_meta (dict): Meta info of the image. + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + label_channels (int): Channel of label. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. + + Returns: + tuple: + labels (Tensor): Labels of image, which shape is + (h * w * num_anchors, ). + label_weights (Tensor): Label weights of image, which shape is + (h * w * num_anchors, ). + pos_inds (Tensor): Pos index of image. + neg_inds (Tensor): Neg index of image. + sampling_result (obj:`SamplingResult`): Sampling result. + pos_bbox_weights (Tensor): The Weight of using to calculate + the bbox branch loss, which shape is (num, ). + pos_predicted_boxes (Tensor): boxes predicted value of + using to calculate the bbox branch loss, which shape is + (num, 4). + pos_target_boxes (Tensor): boxes target value of + using to calculate the bbox branch loss, which shape is + (num, 4). + """ + inside_flags = anchor_inside_flags(flat_anchors, valid_flags, + img_meta['img_shape'][:2], + self.train_cfg.allowed_border) + if not inside_flags.any(): + return (None, ) * 8 + # assign gt and sample anchors + anchors = flat_anchors[inside_flags, :] + bbox_preds = bbox_preds.reshape(-1, 4) + bbox_preds = bbox_preds[inside_flags, :] + + # decoded bbox + decoder_bbox_preds = self.bbox_coder.decode(anchors, bbox_preds) + assign_result = self.assigner.assign( + decoder_bbox_preds, anchors, gt_bboxes, gt_bboxes_ignore, + None if self.sampling else gt_labels) + + pos_bbox_weights = assign_result.get_extra_property('pos_idx') + pos_predicted_boxes = assign_result.get_extra_property( + 'pos_predicted_boxes') + pos_target_boxes = assign_result.get_extra_property('target_boxes') + + sampling_result = self.sampler.sample(assign_result, anchors, + gt_bboxes) + num_valid_anchors = anchors.shape[0] + labels = anchors.new_full((num_valid_anchors, ), + self.num_classes, + dtype=torch.long) + label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + if gt_labels is None: + # Only rpn gives gt_labels as None + # Foreground is the first class since v2.5.0 + labels[pos_inds] = 0 + else: + labels[pos_inds] = gt_labels[ + sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_anchors.size(0) + labels = unmap( + labels, num_total_anchors, inside_flags, + fill=self.num_classes) # fill bg label + label_weights = unmap(label_weights, num_total_anchors, + inside_flags) + + return (labels, label_weights, pos_inds, neg_inds, sampling_result, + pos_bbox_weights, pos_predicted_boxes, pos_target_boxes) diff --git a/downstream/mmdetection/mmdet/models/dense_heads/yolox_head.py b/downstream/mmdetection/mmdet/models/dense_heads/yolox_head.py new file mode 100644 index 0000000..f317e14 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/dense_heads/yolox_head.py @@ -0,0 +1,493 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, + bias_init_with_prob) +from mmcv.ops.nms import batched_nms +from mmcv.runner import force_fp32 + +from mmdet.core import (MlvlPointGenerator, bbox_xyxy_to_cxcywh, + build_assigner, build_sampler, multi_apply, + reduce_mean) +from ..builder import HEADS, build_loss +from .base_dense_head import BaseDenseHead +from .dense_test_mixins import BBoxTestMixin + + +@HEADS.register_module() +class YOLOXHead(BaseDenseHead, BBoxTestMixin): + """YOLOXHead head used in `YOLOX `_. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of hidden channels in stacking convs. + Default: 256 + stacked_convs (int): Number of stacking convs of the head. + Default: 2. + strides (tuple): Downsample factor of each feature map. + use_depthwise (bool): Whether to depthwise separable convolution in + blocks. Default: False + dcn_on_last_conv (bool): If true, use dcn in the last layer of + towers. Default: False. + conv_bias (bool | str): If specified as `auto`, it will be decided by + the norm_cfg. Bias of conv will be set as True if `norm_cfg` is + None, otherwise False. Default: "auto". + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer. Default: None. + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of localization loss. + loss_obj (dict): Config of objectness loss. + loss_l1 (dict): Config of L1 loss. + train_cfg (dict): Training config of anchor head. + test_cfg (dict): Testing config of anchor head. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_classes, + in_channels, + feat_channels=256, + stacked_convs=2, + strides=[8, 16, 32], + use_depthwise=False, + dcn_on_last_conv=False, + conv_bias='auto', + conv_cfg=None, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_obj=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_l1=dict(type='L1Loss', reduction='sum', loss_weight=1.0), + train_cfg=None, + test_cfg=None, + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=math.sqrt(5), + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu')): + + super().__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.cls_out_channels = num_classes + self.in_channels = in_channels + self.feat_channels = feat_channels + self.stacked_convs = stacked_convs + self.strides = strides + self.use_depthwise = use_depthwise + self.dcn_on_last_conv = dcn_on_last_conv + assert conv_bias == 'auto' or isinstance(conv_bias, bool) + self.conv_bias = conv_bias + self.use_sigmoid_cls = True + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + self.loss_obj = build_loss(loss_obj) + + self.use_l1 = False # This flag will be modified by hooks. + self.loss_l1 = build_loss(loss_l1) + + self.prior_generator = MlvlPointGenerator(strides, offset=0) + + self.test_cfg = test_cfg + self.train_cfg = train_cfg + + self.sampling = False + if self.train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + # sampling=False so use PseudoSampler + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + + self.fp16_enabled = False + self._init_layers() + + def _init_layers(self): + self.multi_level_cls_convs = nn.ModuleList() + self.multi_level_reg_convs = nn.ModuleList() + self.multi_level_conv_cls = nn.ModuleList() + self.multi_level_conv_reg = nn.ModuleList() + self.multi_level_conv_obj = nn.ModuleList() + for _ in self.strides: + self.multi_level_cls_convs.append(self._build_stacked_convs()) + self.multi_level_reg_convs.append(self._build_stacked_convs()) + conv_cls, conv_reg, conv_obj = self._build_predictor() + self.multi_level_conv_cls.append(conv_cls) + self.multi_level_conv_reg.append(conv_reg) + self.multi_level_conv_obj.append(conv_obj) + + def _build_stacked_convs(self): + """Initialize conv layers of a single level head.""" + conv = DepthwiseSeparableConvModule \ + if self.use_depthwise else ConvModule + stacked_convs = [] + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + if self.dcn_on_last_conv and i == self.stacked_convs - 1: + conv_cfg = dict(type='DCNv2') + else: + conv_cfg = self.conv_cfg + stacked_convs.append( + conv( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=self.conv_bias)) + return nn.Sequential(*stacked_convs) + + def _build_predictor(self): + """Initialize predictor layers of a single level head.""" + conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1) + conv_reg = nn.Conv2d(self.feat_channels, 4, 1) + conv_obj = nn.Conv2d(self.feat_channels, 1, 1) + return conv_cls, conv_reg, conv_obj + + def init_weights(self): + super(YOLOXHead, self).init_weights() + # Use prior in model initialization to improve stability + bias_init = bias_init_with_prob(0.01) + for conv_cls, conv_obj in zip(self.multi_level_conv_cls, + self.multi_level_conv_obj): + conv_cls.bias.data.fill_(bias_init) + conv_obj.bias.data.fill_(bias_init) + + def forward_single(self, x, cls_convs, reg_convs, conv_cls, conv_reg, + conv_obj): + """Forward feature of a single scale level.""" + + cls_feat = cls_convs(x) + reg_feat = reg_convs(x) + + cls_score = conv_cls(cls_feat) + bbox_pred = conv_reg(reg_feat) + objectness = conv_obj(reg_feat) + + return cls_score, bbox_pred, objectness + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + Returns: + tuple[Tensor]: A tuple of multi-level predication map, each is a + 4D-tensor of shape (batch_size, 5+num_classes, height, width). + """ + + return multi_apply(self.forward_single, feats, + self.multi_level_cls_convs, + self.multi_level_reg_convs, + self.multi_level_conv_cls, + self.multi_level_conv_reg, + self.multi_level_conv_obj) + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses')) + def get_bboxes(self, + cls_scores, + bbox_preds, + objectnesses, + img_metas=None, + cfg=None, + rescale=False, + with_nms=True): + """Transform network outputs of a batch into bbox results. + Args: + cls_scores (list[Tensor]): Classification scores for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * 4, H, W). + objectnesses (list[Tensor], Optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + img_metas (list[dict], Optional): Image meta info. Default None. + cfg (mmcv.Config, Optional): Test / postprocessing configuration, + if None, test_cfg would be used. Default None. + rescale (bool): If True, return boxes in original image space. + Default False. + with_nms (bool): If True, do nms before return boxes. + Default True. + Returns: + list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is an (n, 5) tensor, where the first 4 columns + are bounding box positions (tl_x, tl_y, br_x, br_y) and the + 5-th column is a score between 0 and 1. The second item is a + (n,) tensor where each item is the predicted class label of + the corresponding box. + """ + assert len(cls_scores) == len(bbox_preds) == len(objectnesses) + cfg = self.test_cfg if cfg is None else cfg + scale_factors = np.array( + [img_meta['scale_factor'] for img_meta in img_metas]) + + num_imgs = len(img_metas) + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device, + with_stride=True) + + # flatten cls_scores, bbox_preds and objectness + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.cls_out_channels) + for cls_score in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + flatten_objectness = [ + objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) + for objectness in objectnesses + ] + + flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() + flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) + flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid() + flatten_priors = torch.cat(mlvl_priors) + + flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds) + + if rescale: + flatten_bboxes[..., :4] /= flatten_bboxes.new_tensor( + scale_factors).unsqueeze(1) + + result_list = [] + for img_id in range(len(img_metas)): + cls_scores = flatten_cls_scores[img_id] + score_factor = flatten_objectness[img_id] + bboxes = flatten_bboxes[img_id] + + result_list.append( + self._bboxes_nms(cls_scores, bboxes, score_factor, cfg)) + + return result_list + + def _bbox_decode(self, priors, bbox_preds): + xys = (bbox_preds[..., :2] * priors[:, 2:]) + priors[:, :2] + whs = bbox_preds[..., 2:].exp() * priors[:, 2:] + + tl_x = (xys[..., 0] - whs[..., 0] / 2) + tl_y = (xys[..., 1] - whs[..., 1] / 2) + br_x = (xys[..., 0] + whs[..., 0] / 2) + br_y = (xys[..., 1] + whs[..., 1] / 2) + + decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1) + return decoded_bboxes + + def _bboxes_nms(self, cls_scores, bboxes, score_factor, cfg): + max_scores, labels = torch.max(cls_scores, 1) + valid_mask = score_factor * max_scores >= cfg.score_thr + + bboxes = bboxes[valid_mask] + scores = max_scores[valid_mask] * score_factor[valid_mask] + labels = labels[valid_mask] + + if labels.numel() == 0: + return bboxes, labels + else: + dets, keep = batched_nms(bboxes, scores, labels, cfg.nms) + return dets, labels[keep] + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses')) + def loss(self, + cls_scores, + bbox_preds, + objectnesses, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute loss of the head. + Args: + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + objectnesses (list[Tensor], Optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + """ + num_imgs = len(img_metas) + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device, + with_stride=True) + + flatten_cls_preds = [ + cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.cls_out_channels) + for cls_pred in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + flatten_objectness = [ + objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) + for objectness in objectnesses + ] + + flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) + flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) + flatten_objectness = torch.cat(flatten_objectness, dim=1) + flatten_priors = torch.cat(mlvl_priors) + flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds) + + (pos_masks, cls_targets, obj_targets, bbox_targets, l1_targets, + num_fg_imgs) = multi_apply( + self._get_target_single, flatten_cls_preds.detach(), + flatten_objectness.detach(), + flatten_priors.unsqueeze(0).repeat(num_imgs, 1, 1), + flatten_bboxes.detach(), gt_bboxes, gt_labels) + + # The experimental results show that ‘reduce_mean’ can improve + # performance on the COCO dataset. + num_pos = torch.tensor( + sum(num_fg_imgs), + dtype=torch.float, + device=flatten_cls_preds.device) + num_total_samples = max(reduce_mean(num_pos), 1.0) + + pos_masks = torch.cat(pos_masks, 0) + cls_targets = torch.cat(cls_targets, 0) + obj_targets = torch.cat(obj_targets, 0) + bbox_targets = torch.cat(bbox_targets, 0) + if self.use_l1: + l1_targets = torch.cat(l1_targets, 0) + + loss_bbox = self.loss_bbox( + flatten_bboxes.view(-1, 4)[pos_masks], + bbox_targets) / num_total_samples + loss_obj = self.loss_obj(flatten_objectness.view(-1, 1), + obj_targets) / num_total_samples + loss_cls = self.loss_cls( + flatten_cls_preds.view(-1, self.num_classes)[pos_masks], + cls_targets) / num_total_samples + + loss_dict = dict( + loss_cls=loss_cls, loss_bbox=loss_bbox, loss_obj=loss_obj) + + if self.use_l1: + loss_l1 = self.loss_l1( + flatten_bbox_preds.view(-1, 4)[pos_masks], + l1_targets) / num_total_samples + loss_dict.update(loss_l1=loss_l1) + + return loss_dict + + @torch.no_grad() + def _get_target_single(self, cls_preds, objectness, priors, decoded_bboxes, + gt_bboxes, gt_labels): + """Compute classification, regression, and objectness targets for + priors in a single image. + Args: + cls_preds (Tensor): Classification predictions of one image, + a 2D-Tensor with shape [num_priors, num_classes] + objectness (Tensor): Objectness predictions of one image, + a 1D-Tensor with shape [num_priors] + priors (Tensor): All priors of one image, a 2D-Tensor with shape + [num_priors, 4] in [cx, xy, stride_w, stride_y] format. + decoded_bboxes (Tensor): Decoded bboxes predictions of one image, + a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y, + br_x, br_y] format. + gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor + with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. + gt_labels (Tensor): Ground truth labels of one image, a Tensor + with shape [num_gts]. + """ + + num_priors = priors.size(0) + num_gts = gt_labels.size(0) + gt_bboxes = gt_bboxes.to(decoded_bboxes.dtype) + # No target + if num_gts == 0: + cls_target = cls_preds.new_zeros((0, self.num_classes)) + bbox_target = cls_preds.new_zeros((0, 4)) + l1_target = cls_preds.new_zeros((0, 4)) + obj_target = cls_preds.new_zeros((num_priors, 1)) + foreground_mask = cls_preds.new_zeros(num_priors).bool() + return (foreground_mask, cls_target, obj_target, bbox_target, + l1_target, 0) + + # YOLOX uses center priors with 0.5 offset to assign targets, + # but use center priors without offset to regress bboxes. + offset_priors = torch.cat( + [priors[:, :2] + priors[:, 2:] * 0.5, priors[:, 2:]], dim=-1) + + assign_result = self.assigner.assign( + cls_preds.sigmoid() * objectness.unsqueeze(1).sigmoid(), + offset_priors, decoded_bboxes, gt_bboxes, gt_labels) + + sampling_result = self.sampler.sample(assign_result, priors, gt_bboxes) + pos_inds = sampling_result.pos_inds + num_pos_per_img = pos_inds.size(0) + + pos_ious = assign_result.max_overlaps[pos_inds] + # IOU aware classification score + cls_target = F.one_hot(sampling_result.pos_gt_labels, + self.num_classes) * pos_ious.unsqueeze(-1) + obj_target = torch.zeros_like(objectness).unsqueeze(-1) + obj_target[pos_inds] = 1 + bbox_target = sampling_result.pos_gt_bboxes + l1_target = cls_preds.new_zeros((num_pos_per_img, 4)) + if self.use_l1: + l1_target = self._get_l1_target(l1_target, bbox_target, + priors[pos_inds]) + foreground_mask = torch.zeros_like(objectness).to(torch.bool) + foreground_mask[pos_inds] = 1 + return (foreground_mask, cls_target, obj_target, bbox_target, + l1_target, num_pos_per_img) + + def _get_l1_target(self, l1_target, gt_bboxes, priors, eps=1e-8): + """Convert gt bboxes to center offset and log width height.""" + gt_cxcywh = bbox_xyxy_to_cxcywh(gt_bboxes) + l1_target[:, :2] = (gt_cxcywh[:, :2] - priors[:, :2]) / priors[:, 2:] + l1_target[:, 2:] = torch.log(gt_cxcywh[:, 2:] / priors[:, 2:] + eps) + return l1_target diff --git a/downstream/mmdetection/mmdet/models/detectors/__init__.py b/downstream/mmdetection/mmdet/models/detectors/__init__.py new file mode 100644 index 0000000..a0a89b8 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/__init__.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .atss import ATSS +from .autoassign import AutoAssign +from .base import BaseDetector +from .cascade_rcnn import CascadeRCNN +from .centernet import CenterNet +from .cornernet import CornerNet +from .ddod import DDOD +from .deformable_detr import DeformableDETR +from .detr import DETR +from .fast_rcnn import FastRCNN +from .faster_rcnn import FasterRCNN +from .fcos import FCOS +from .fovea import FOVEA +from .fsaf import FSAF +from .gfl import GFL +from .grid_rcnn import GridRCNN +from .htc import HybridTaskCascade +from .kd_one_stage import KnowledgeDistillationSingleStageDetector +from .lad import LAD +from .mask2former import Mask2Former +from .mask_rcnn import MaskRCNN +from .mask_scoring_rcnn import MaskScoringRCNN +from .maskformer import MaskFormer +from .nasfcos import NASFCOS +from .paa import PAA +from .panoptic_fpn import PanopticFPN +from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor +from .point_rend import PointRend +from .queryinst import QueryInst +from .reppoints_detector import RepPointsDetector +from .retinanet import RetinaNet +from .rpn import RPN +from .scnet import SCNet +from .single_stage import SingleStageDetector +from .solo import SOLO +from .solov2 import SOLOv2 +from .sparse_rcnn import SparseRCNN +from .tood import TOOD +from .trident_faster_rcnn import TridentFasterRCNN +from .two_stage import TwoStageDetector +from .vfnet import VFNet +from .yolact import YOLACT +from .yolo import YOLOV3 +from .yolof import YOLOF +from .yolox import YOLOX + +__all__ = [ + 'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN', + 'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN', + 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS', + 'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF', + 'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT', + 'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO', + 'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX', + 'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD', + 'MaskFormer', 'DDOD', 'Mask2Former' +] diff --git a/downstream/mmdetection/mmdet/models/detectors/atss.py b/downstream/mmdetection/mmdet/models/detectors/atss.py new file mode 100644 index 0000000..00f1acd --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/atss.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class ATSS(SingleStageDetector): + """Implementation of `ATSS `_.""" + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/autoassign.py b/downstream/mmdetection/mmdet/models/detectors/autoassign.py new file mode 100644 index 0000000..30ab720 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/autoassign.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class AutoAssign(SingleStageDetector): + """Implementation of `AutoAssign: Differentiable Label Assignment for Dense + Object Detection `_.""" + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None): + super(AutoAssign, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained) diff --git a/downstream/mmdetection/mmdet/models/detectors/base.py b/downstream/mmdetection/mmdet/models/detectors/base.py new file mode 100644 index 0000000..bf64bce --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/base.py @@ -0,0 +1,360 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from collections import OrderedDict + +import mmcv +import numpy as np +import torch +import torch.distributed as dist +from mmcv.runner import BaseModule, auto_fp16 + +from mmdet.core.visualization import imshow_det_bboxes + + +class BaseDetector(BaseModule, metaclass=ABCMeta): + """Base class for detectors.""" + + def __init__(self, init_cfg=None): + super(BaseDetector, self).__init__(init_cfg) + self.fp16_enabled = False + + @property + def with_neck(self): + """bool: whether the detector has a neck""" + return hasattr(self, 'neck') and self.neck is not None + + # TODO: these properties need to be carefully handled + # for both single stage & two stage detectors + @property + def with_shared_head(self): + """bool: whether the detector has a shared head in the RoI Head""" + return hasattr(self, 'roi_head') and self.roi_head.with_shared_head + + @property + def with_bbox(self): + """bool: whether the detector has a bbox head""" + return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox) + or (hasattr(self, 'bbox_head') and self.bbox_head is not None)) + + @property + def with_mask(self): + """bool: whether the detector has a mask head""" + return ((hasattr(self, 'roi_head') and self.roi_head.with_mask) + or (hasattr(self, 'mask_head') and self.mask_head is not None)) + + @abstractmethod + def extract_feat(self, imgs): + """Extract features from images.""" + pass + + def extract_feats(self, imgs): + """Extract features from multiple images. + + Args: + imgs (list[torch.Tensor]): A list of images. The images are + augmented from the same image but in different ways. + + Returns: + list[torch.Tensor]: Features of different images + """ + assert isinstance(imgs, list) + return [self.extract_feat(img) for img in imgs] + + def forward_train(self, imgs, img_metas, **kwargs): + """ + Args: + img (Tensor): of shape (N, C, H, W) encoding input images. + Typically these should be mean centered and std scaled. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys, see + :class:`mmdet.datasets.pipelines.Collect`. + kwargs (keyword arguments): Specific to concrete implementation. + """ + # NOTE the batched image size information may be useful, e.g. + # in DETR, this is needed for the construction of masks, which is + # then used for the transformer_head. + batch_input_shape = tuple(imgs[0].size()[-2:]) + for img_meta in img_metas: + img_meta['batch_input_shape'] = batch_input_shape + + async def async_simple_test(self, img, img_metas, **kwargs): + raise NotImplementedError + + @abstractmethod + def simple_test(self, img, img_metas, **kwargs): + pass + + @abstractmethod + def aug_test(self, imgs, img_metas, **kwargs): + """Test function with test time augmentation.""" + pass + + async def aforward_test(self, *, img, img_metas, **kwargs): + for var, name in [(img, 'img'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError(f'{name} must be a list, but got {type(var)}') + + num_augs = len(img) + if num_augs != len(img_metas): + raise ValueError(f'num of augmentations ({len(img)}) ' + f'!= num of image metas ({len(img_metas)})') + # TODO: remove the restriction of samples_per_gpu == 1 when prepared + samples_per_gpu = img[0].size(0) + assert samples_per_gpu == 1 + + if num_augs == 1: + return await self.async_simple_test(img[0], img_metas[0], **kwargs) + else: + raise NotImplementedError + + def forward_test(self, imgs, img_metas, **kwargs): + """ + Args: + imgs (List[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains all images in the batch. + img_metas (List[List[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. + """ + for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError(f'{name} must be a list, but got {type(var)}') + + num_augs = len(imgs) + if num_augs != len(img_metas): + raise ValueError(f'num of augmentations ({len(imgs)}) ' + f'!= num of image meta ({len(img_metas)})') + + # NOTE the batched image size information may be useful, e.g. + # in DETR, this is needed for the construction of masks, which is + # then used for the transformer_head. + for img, img_meta in zip(imgs, img_metas): + batch_size = len(img_meta) + for img_id in range(batch_size): + img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:]) + + if num_augs == 1: + # proposals (List[List[Tensor]]): the outer list indicates + # test-time augs (multiscale, flip, etc.) and the inner list + # indicates images in a batch. + # The Tensor should have a shape Px4, where P is the number of + # proposals. + if 'proposals' in kwargs: + kwargs['proposals'] = kwargs['proposals'][0] + return self.simple_test(imgs[0], img_metas[0], **kwargs) + else: + assert imgs[0].size(0) == 1, 'aug test does not support ' \ + 'inference with batch size ' \ + f'{imgs[0].size(0)}' + # TODO: support test augmentation for predefined proposals + assert 'proposals' not in kwargs + return self.aug_test(imgs, img_metas, **kwargs) + + @auto_fp16(apply_to=('img', )) + def forward(self, img, img_metas, return_loss=True, **kwargs): + """Calls either :func:`forward_train` or :func:`forward_test` depending + on whether ``return_loss`` is ``True``. + + Note this setting will change the expected inputs. When + ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor + and List[dict]), and when ``resturn_loss=False``, img and img_meta + should be double nested (i.e. List[Tensor], List[List[dict]]), with + the outer list indicating test time augmentations. + """ + if torch.onnx.is_in_onnx_export(): + assert len(img_metas) == 1 + return self.onnx_export(img[0], img_metas[0]) + + if return_loss: + return self.forward_train(img, img_metas, **kwargs) + else: + return self.forward_test(img, img_metas, **kwargs) + + def _parse_losses(self, losses): + """Parse the raw outputs (losses) of the network. + + Args: + losses (dict): Raw output of the network, which usually contain + losses and other necessary information. + + Returns: + tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \ + which may be a weighted sum of all losses, log_vars contains \ + all the variables to be sent to the logger. + """ + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + else: + raise TypeError( + f'{loss_name} is not a tensor or list of tensors') + + loss = sum(_value for _key, _value in log_vars.items() + if 'loss' in _key) + + # If the loss_vars has different length, GPUs will wait infinitely + if dist.is_available() and dist.is_initialized(): + log_var_length = torch.tensor(len(log_vars), device=loss.device) + dist.all_reduce(log_var_length) + message = (f'rank {dist.get_rank()}' + + f' len(log_vars): {len(log_vars)}' + ' keys: ' + + ','.join(log_vars.keys())) + assert log_var_length == len(log_vars) * dist.get_world_size(), \ + 'loss log variables are different across GPUs!\n' + message + + log_vars['loss'] = loss + for loss_name, loss_value in log_vars.items(): + # reduce loss when distributed training + if dist.is_available() and dist.is_initialized(): + loss_value = loss_value.data.clone() + dist.all_reduce(loss_value.div_(dist.get_world_size())) + log_vars[loss_name] = loss_value.item() + + return loss, log_vars + + def train_step(self, data, optimizer): + """The iteration step during training. + + This method defines an iteration step during training, except for the + back propagation and optimizer updating, which are done in an optimizer + hook. Note that in some complicated cases or models, the whole process + including back propagation and optimizer updating is also defined in + this method, such as GAN. + + Args: + data (dict): The output of dataloader. + optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of + runner is passed to ``train_step()``. This argument is unused + and reserved. + + Returns: + dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \ + ``num_samples``. + + - ``loss`` is a tensor for back propagation, which can be a + weighted sum of multiple losses. + - ``log_vars`` contains all the variables to be sent to the + logger. + - ``num_samples`` indicates the batch size (when the model is + DDP, it means the batch size on each GPU), which is used for + averaging the logs. + """ + losses = self(**data) + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) + + return outputs + + def val_step(self, data, optimizer=None): + """The iteration step during validation. + + This method shares the same signature as :func:`train_step`, but used + during val epochs. Note that the evaluation after training epochs is + not implemented with this method, but an evaluation hook. + """ + losses = self(**data) + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) + + return outputs + + def show_result(self, + img, + result, + score_thr=0.3, + bbox_color=(72, 101, 241), + text_color=(72, 101, 241), + mask_color=None, + thickness=2, + font_size=13, + win_name='', + show=False, + wait_time=0, + out_file=None): + """Draw `result` over `img`. + + Args: + img (str or Tensor): The image to be displayed. + result (Tensor or tuple): The results to draw over `img` + bbox_result or (bbox_result, segm_result). + score_thr (float, optional): Minimum score of bboxes to be shown. + Default: 0.3. + bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. + The tuple of color should be in BGR order. Default: 'green' + text_color (str or tuple(int) or :obj:`Color`):Color of texts. + The tuple of color should be in BGR order. Default: 'green' + mask_color (None or str or tuple(int) or :obj:`Color`): + Color of masks. The tuple of color should be in BGR order. + Default: None + thickness (int): Thickness of lines. Default: 2 + font_size (int): Font size of texts. Default: 13 + win_name (str): The window name. Default: '' + wait_time (float): Value of waitKey param. + Default: 0. + show (bool): Whether to show the image. + Default: False. + out_file (str or None): The filename to write the image. + Default: None. + + Returns: + img (Tensor): Only if not `show` or `out_file` + """ + img = mmcv.imread(img) + img = img.copy() + if isinstance(result, tuple): + bbox_result, segm_result = result + if isinstance(segm_result, tuple): + segm_result = segm_result[0] # ms rcnn + else: + bbox_result, segm_result = result, None + bboxes = np.vstack(bbox_result) + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_result) + ] + labels = np.concatenate(labels) + # draw segmentation masks + segms = None + if segm_result is not None and len(labels) > 0: # non empty + segms = mmcv.concat_list(segm_result) + if isinstance(segms[0], torch.Tensor): + segms = torch.stack(segms, dim=0).detach().cpu().numpy() + else: + segms = np.stack(segms, axis=0) + # if out_file specified, do not show image in window + if out_file is not None: + show = False + # draw bounding boxes + img = imshow_det_bboxes( + img, + bboxes, + labels, + segms, + class_names=self.CLASSES, + score_thr=score_thr, + bbox_color=bbox_color, + text_color=text_color, + mask_color=mask_color, + thickness=thickness, + font_size=font_size, + win_name=win_name, + show=show, + wait_time=wait_time, + out_file=out_file) + + if not (show or out_file): + return img + + def onnx_export(self, img, img_metas): + raise NotImplementedError(f'{self.__class__.__name__} does ' + f'not support ONNX EXPORT') diff --git a/downstream/mmdetection/mmdet/models/detectors/cascade_rcnn.py b/downstream/mmdetection/mmdet/models/detectors/cascade_rcnn.py new file mode 100644 index 0000000..d8c7382 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/cascade_rcnn.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .two_stage import TwoStageDetector + + +@DETECTORS.register_module() +class CascadeRCNN(TwoStageDetector): + r"""Implementation of `Cascade R-CNN: Delving into High Quality Object + Detection `_""" + + def __init__(self, + backbone, + neck=None, + rpn_head=None, + roi_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(CascadeRCNN, self).__init__( + backbone=backbone, + neck=neck, + rpn_head=rpn_head, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + init_cfg=init_cfg) + + def show_result(self, data, result, **kwargs): + """Show prediction results of the detector. + + Args: + data (str or np.ndarray): Image filename or loaded image. + result (Tensor or tuple): The results to draw over `img` + bbox_result or (bbox_result, segm_result). + + Returns: + np.ndarray: The image with bboxes drawn on it. + """ + if self.with_mask: + ms_bbox_result, ms_segm_result = result + if isinstance(ms_bbox_result, dict): + result = (ms_bbox_result['ensemble'], + ms_segm_result['ensemble']) + else: + if isinstance(result, dict): + result = result['ensemble'] + return super(CascadeRCNN, self).show_result(data, result, **kwargs) diff --git a/downstream/mmdetection/mmdet/models/detectors/centernet.py b/downstream/mmdetection/mmdet/models/detectors/centernet.py new file mode 100644 index 0000000..e1e3fd3 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/centernet.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet.core import bbox2result +from mmdet.models.builder import DETECTORS +from ...core.utils import flip_tensor +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class CenterNet(SingleStageDetector): + """Implementation of CenterNet(Objects as Points) + + . + """ + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(CenterNet, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) + + def merge_aug_results(self, aug_results, with_nms): + """Merge augmented detection bboxes and score. + + Args: + aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each + image. + with_nms (bool): If True, do nms before return boxes. + + Returns: + tuple: (out_bboxes, out_labels) + """ + recovered_bboxes, aug_labels = [], [] + for single_result in aug_results: + recovered_bboxes.append(single_result[0][0]) + aug_labels.append(single_result[0][1]) + + bboxes = torch.cat(recovered_bboxes, dim=0).contiguous() + labels = torch.cat(aug_labels).contiguous() + if with_nms: + out_bboxes, out_labels = self.bbox_head._bboxes_nms( + bboxes, labels, self.bbox_head.test_cfg) + else: + out_bboxes, out_labels = bboxes, labels + + return out_bboxes, out_labels + + def aug_test(self, imgs, img_metas, rescale=True): + """Augment testing of CenterNet. Aug test must have flipped image pair, + and unlike CornerNet, it will perform an averaging operation on the + feature map instead of detecting bbox. + + Args: + imgs (list[Tensor]): Augmented images. + img_metas (list[list[dict]]): Meta information of each image, e.g., + image size, scaling factor, etc. + rescale (bool): If True, return boxes in original image space. + Default: True. + + Note: + ``imgs`` must including flipped image pairs. + + Returns: + list[list[np.ndarray]]: BBox results of each image and classes. + The outer list corresponds to each image. The inner list + corresponds to each class. + """ + img_inds = list(range(len(imgs))) + assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], ( + 'aug test must have flipped image pair') + aug_results = [] + for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]): + flip_direction = img_metas[flip_ind][0]['flip_direction'] + img_pair = torch.cat([imgs[ind], imgs[flip_ind]]) + x = self.extract_feat(img_pair) + center_heatmap_preds, wh_preds, offset_preds = self.bbox_head(x) + assert len(center_heatmap_preds) == len(wh_preds) == len( + offset_preds) == 1 + + # Feature map averaging + center_heatmap_preds[0] = ( + center_heatmap_preds[0][0:1] + + flip_tensor(center_heatmap_preds[0][1:2], flip_direction)) / 2 + wh_preds[0] = (wh_preds[0][0:1] + + flip_tensor(wh_preds[0][1:2], flip_direction)) / 2 + + bbox_list = self.bbox_head.get_bboxes( + center_heatmap_preds, + wh_preds, [offset_preds[0][0:1]], + img_metas[ind], + rescale=rescale, + with_nms=False) + aug_results.append(bbox_list) + + nms_cfg = self.bbox_head.test_cfg.get('nms_cfg', None) + if nms_cfg is None: + with_nms = False + else: + with_nms = True + bbox_list = [self.merge_aug_results(aug_results, with_nms)] + bbox_results = [ + bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) + for det_bboxes, det_labels in bbox_list + ] + return bbox_results diff --git a/downstream/mmdetection/mmdet/models/detectors/cornernet.py b/downstream/mmdetection/mmdet/models/detectors/cornernet.py new file mode 100644 index 0000000..ce921cc --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/cornernet.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet.core import bbox2result, bbox_mapping_back +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class CornerNet(SingleStageDetector): + """CornerNet. + + This detector is the implementation of the paper `CornerNet: Detecting + Objects as Paired Keypoints `_ . + """ + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) + + def merge_aug_results(self, aug_results, img_metas): + """Merge augmented detection bboxes and score. + + Args: + aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each + image. + img_metas (list[list[dict]]): Meta information of each image, e.g., + image size, scaling factor, etc. + + Returns: + tuple: (bboxes, labels) + """ + recovered_bboxes, aug_labels = [], [] + for bboxes_labels, img_info in zip(aug_results, img_metas): + img_shape = img_info[0]['img_shape'] # using shape before padding + scale_factor = img_info[0]['scale_factor'] + flip = img_info[0]['flip'] + bboxes, labels = bboxes_labels + bboxes, scores = bboxes[:, :4], bboxes[:, -1:] + bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip) + recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1)) + aug_labels.append(labels) + + bboxes = torch.cat(recovered_bboxes, dim=0) + labels = torch.cat(aug_labels) + + if bboxes.shape[0] > 0: + out_bboxes, out_labels = self.bbox_head._bboxes_nms( + bboxes, labels, self.bbox_head.test_cfg) + else: + out_bboxes, out_labels = bboxes, labels + + return out_bboxes, out_labels + + def aug_test(self, imgs, img_metas, rescale=False): + """Augment testing of CornerNet. + + Args: + imgs (list[Tensor]): Augmented images. + img_metas (list[list[dict]]): Meta information of each image, e.g., + image size, scaling factor, etc. + rescale (bool): If True, return boxes in original image space. + Default: False. + + Note: + ``imgs`` must including flipped image pairs. + + Returns: + list[list[np.ndarray]]: BBox results of each image and classes. + The outer list corresponds to each image. The inner list + corresponds to each class. + """ + img_inds = list(range(len(imgs))) + + assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], ( + 'aug test must have flipped image pair') + aug_results = [] + for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]): + img_pair = torch.cat([imgs[ind], imgs[flip_ind]]) + x = self.extract_feat(img_pair) + outs = self.bbox_head(x) + bbox_list = self.bbox_head.get_bboxes( + *outs, [img_metas[ind], img_metas[flip_ind]], False, False) + aug_results.append(bbox_list[0]) + aug_results.append(bbox_list[1]) + + bboxes, labels = self.merge_aug_results(aug_results, img_metas) + bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes) + + return [bbox_results] diff --git a/downstream/mmdetection/mmdet/models/detectors/ddod.py b/downstream/mmdetection/mmdet/models/detectors/ddod.py new file mode 100644 index 0000000..2ae0a74 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/ddod.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class DDOD(SingleStageDetector): + """Implementation of `DDOD `_.""" + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(DDOD, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/deformable_detr.py b/downstream/mmdetection/mmdet/models/detectors/deformable_detr.py new file mode 100644 index 0000000..b1f1642 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/deformable_detr.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .detr import DETR + + +@DETECTORS.register_module() +class DeformableDETR(DETR): + + def __init__(self, *args, **kwargs): + super(DETR, self).__init__(*args, **kwargs) diff --git a/downstream/mmdetection/mmdet/models/detectors/detr.py b/downstream/mmdetection/mmdet/models/detectors/detr.py new file mode 100644 index 0000000..06d7691 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/detr.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch + +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class DETR(SingleStageDetector): + r"""Implementation of `DETR: End-to-End Object Detection with + Transformers `_""" + + def __init__(self, + backbone, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(DETR, self).__init__(backbone, None, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) + + # over-write `forward_dummy` because: + # the forward of bbox_head requires img_metas + def forward_dummy(self, img): + """Used for computing network flops. + + See `mmdetection/tools/analysis_tools/get_flops.py` + """ + warnings.warn('Warning! MultiheadAttention in DETR does not ' + 'support flops computation! Do not use the ' + 'results in your papers!') + + batch_size, _, height, width = img.shape + dummy_img_metas = [ + dict( + batch_input_shape=(height, width), + img_shape=(height, width, 3)) for _ in range(batch_size) + ] + x = self.extract_feat(img) + outs = self.bbox_head(x, dummy_img_metas) + return outs + + # over-write `onnx_export` because: + # (1) the forward of bbox_head requires img_metas + # (2) the different behavior (e.g. construction of `masks`) between + # torch and ONNX model, during the forward of bbox_head + def onnx_export(self, img, img_metas): + """Test function for exporting to ONNX, without test time augmentation. + + Args: + img (torch.Tensor): input images. + img_metas (list[dict]): List of image information. + + Returns: + tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] + and class labels of shape [N, num_det]. + """ + x = self.extract_feat(img) + # forward of this head requires img_metas + outs = self.bbox_head.forward_onnx(x, img_metas) + # get shape as tensor + img_shape = torch._shape_as_tensor(img)[2:] + img_metas[0]['img_shape_for_onnx'] = img_shape + + det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas) + + return det_bboxes, det_labels diff --git a/downstream/mmdetection/mmdet/models/detectors/fast_rcnn.py b/downstream/mmdetection/mmdet/models/detectors/fast_rcnn.py new file mode 100644 index 0000000..7aebe15 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/fast_rcnn.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .two_stage import TwoStageDetector + + +@DETECTORS.register_module() +class FastRCNN(TwoStageDetector): + """Implementation of `Fast R-CNN `_""" + + def __init__(self, + backbone, + roi_head, + train_cfg, + test_cfg, + neck=None, + pretrained=None, + init_cfg=None): + super(FastRCNN, self).__init__( + backbone=backbone, + neck=neck, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + init_cfg=init_cfg) + + def forward_test(self, imgs, img_metas, proposals, **kwargs): + """ + Args: + imgs (List[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains all images in the batch. + img_metas (List[List[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. + proposals (List[List[Tensor]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. The Tensor should have a shape Px4, where + P is the number of proposals. + """ + for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError(f'{name} must be a list, but got {type(var)}') + + num_augs = len(imgs) + if num_augs != len(img_metas): + raise ValueError(f'num of augmentations ({len(imgs)}) ' + f'!= num of image meta ({len(img_metas)})') + + if num_augs == 1: + return self.simple_test(imgs[0], img_metas[0], proposals[0], + **kwargs) + else: + # TODO: support test-time augmentation + assert NotImplementedError diff --git a/downstream/mmdetection/mmdet/models/detectors/faster_rcnn.py b/downstream/mmdetection/mmdet/models/detectors/faster_rcnn.py new file mode 100644 index 0000000..70fb662 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/faster_rcnn.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .two_stage import TwoStageDetector + + +@DETECTORS.register_module() +class FasterRCNN(TwoStageDetector): + """Implementation of `Faster R-CNN `_""" + + def __init__(self, + backbone, + rpn_head, + roi_head, + train_cfg, + test_cfg, + neck=None, + pretrained=None, + init_cfg=None): + super(FasterRCNN, self).__init__( + backbone=backbone, + neck=neck, + rpn_head=rpn_head, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + init_cfg=init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/fcos.py b/downstream/mmdetection/mmdet/models/detectors/fcos.py new file mode 100644 index 0000000..d985bd0 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/fcos.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class FCOS(SingleStageDetector): + """Implementation of `FCOS `_""" + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/fovea.py b/downstream/mmdetection/mmdet/models/detectors/fovea.py new file mode 100644 index 0000000..6fd908c --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/fovea.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class FOVEA(SingleStageDetector): + """Implementation of `FoveaBox `_""" + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/fsaf.py b/downstream/mmdetection/mmdet/models/detectors/fsaf.py new file mode 100644 index 0000000..81ed1bd --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/fsaf.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class FSAF(SingleStageDetector): + """Implementation of `FSAF `_""" + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/gfl.py b/downstream/mmdetection/mmdet/models/detectors/gfl.py new file mode 100644 index 0000000..4628e2e --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/gfl.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class GFL(SingleStageDetector): + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/grid_rcnn.py b/downstream/mmdetection/mmdet/models/detectors/grid_rcnn.py new file mode 100644 index 0000000..bba7873 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/grid_rcnn.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .two_stage import TwoStageDetector + + +@DETECTORS.register_module() +class GridRCNN(TwoStageDetector): + """Grid R-CNN. + + This detector is the implementation of: + - Grid R-CNN (https://arxiv.org/abs/1811.12030) + - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688) + """ + + def __init__(self, + backbone, + rpn_head, + roi_head, + train_cfg, + test_cfg, + neck=None, + pretrained=None, + init_cfg=None): + super(GridRCNN, self).__init__( + backbone=backbone, + neck=neck, + rpn_head=rpn_head, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + init_cfg=init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/htc.py b/downstream/mmdetection/mmdet/models/detectors/htc.py new file mode 100644 index 0000000..f7c9533 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/htc.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .cascade_rcnn import CascadeRCNN + + +@DETECTORS.register_module() +class HybridTaskCascade(CascadeRCNN): + """Implementation of `HTC `_""" + + def __init__(self, **kwargs): + super(HybridTaskCascade, self).__init__(**kwargs) + + @property + def with_semantic(self): + """bool: whether the detector has a semantic head""" + return self.roi_head.with_semantic diff --git a/downstream/mmdetection/mmdet/models/detectors/kd_one_stage.py b/downstream/mmdetection/mmdet/models/detectors/kd_one_stage.py new file mode 100644 index 0000000..fb66b51 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/kd_one_stage.py @@ -0,0 +1,103 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from pathlib import Path + +import mmcv +import torch +from mmcv.runner import load_checkpoint + +from .. import build_detector +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class KnowledgeDistillationSingleStageDetector(SingleStageDetector): + r"""Implementation of `Distilling the Knowledge in a Neural Network. + `_. + + Args: + teacher_config (str | dict): Config file path + or the config object of teacher model. + teacher_ckpt (str, optional): Checkpoint path of teacher model. + If left as None, the model will not load any weights. + """ + + def __init__(self, + backbone, + neck, + bbox_head, + teacher_config, + teacher_ckpt=None, + eval_teacher=True, + train_cfg=None, + test_cfg=None, + pretrained=None): + super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg, + pretrained) + self.eval_teacher = eval_teacher + # Build teacher model + if isinstance(teacher_config, (str, Path)): + teacher_config = mmcv.Config.fromfile(teacher_config) + self.teacher_model = build_detector(teacher_config['model']) + if teacher_ckpt is not None: + load_checkpoint( + self.teacher_model, teacher_ckpt, map_location='cpu') + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None): + """ + Args: + img (Tensor): Input images of shape (N, C, H, W). + Typically these should be mean centered and std scaled. + img_metas (list[dict]): A List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + :class:`mmdet.datasets.pipelines.Collect`. + gt_bboxes (list[Tensor]): Each item are the truth boxes for each + image in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): Class indices corresponding to each box + gt_bboxes_ignore (None | list[Tensor]): Specify which bounding + boxes can be ignored when computing the loss. + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + x = self.extract_feat(img) + with torch.no_grad(): + teacher_x = self.teacher_model.extract_feat(img) + out_teacher = self.teacher_model.bbox_head(teacher_x) + losses = self.bbox_head.forward_train(x, out_teacher, img_metas, + gt_bboxes, gt_labels, + gt_bboxes_ignore) + return losses + + def cuda(self, device=None): + """Since teacher_model is registered as a plain object, it is necessary + to put the teacher model to cuda when calling cuda function.""" + self.teacher_model.cuda(device=device) + return super().cuda(device=device) + + def train(self, mode=True): + """Set the same train mode for teacher and student model.""" + if self.eval_teacher: + self.teacher_model.train(False) + else: + self.teacher_model.train(mode) + super().train(mode) + + def __setattr__(self, name, value): + """Set attribute, i.e. self.name = value + + This reloading prevent the teacher model from being registered as a + nn.Module. The teacher module is registered as a plain object, so that + the teacher parameters will not show up when calling + ``self.parameters``, ``self.modules``, ``self.children`` methods. + """ + if name == 'teacher_model': + object.__setattr__(self, name, value) + else: + super().__setattr__(name, value) diff --git a/downstream/mmdetection/mmdet/models/detectors/lad.py b/downstream/mmdetection/mmdet/models/detectors/lad.py new file mode 100644 index 0000000..c6cc1e0 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/lad.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.runner import load_checkpoint + +from ..builder import DETECTORS, build_backbone, build_head, build_neck +from .kd_one_stage import KnowledgeDistillationSingleStageDetector + + +@DETECTORS.register_module() +class LAD(KnowledgeDistillationSingleStageDetector): + """Implementation of `LAD `_.""" + + def __init__(self, + backbone, + neck, + bbox_head, + teacher_backbone, + teacher_neck, + teacher_bbox_head, + teacher_ckpt, + eval_teacher=True, + train_cfg=None, + test_cfg=None, + pretrained=None): + super(KnowledgeDistillationSingleStageDetector, + self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, + pretrained) + self.eval_teacher = eval_teacher + self.teacher_model = nn.Module() + self.teacher_model.backbone = build_backbone(teacher_backbone) + if teacher_neck is not None: + self.teacher_model.neck = build_neck(teacher_neck) + teacher_bbox_head.update(train_cfg=train_cfg) + teacher_bbox_head.update(test_cfg=test_cfg) + self.teacher_model.bbox_head = build_head(teacher_bbox_head) + if teacher_ckpt is not None: + load_checkpoint( + self.teacher_model, teacher_ckpt, map_location='cpu') + + @property + def with_teacher_neck(self): + """bool: whether the detector has a teacher_neck""" + return hasattr(self.teacher_model, 'neck') and \ + self.teacher_model.neck is not None + + def extract_teacher_feat(self, img): + """Directly extract teacher features from the backbone+neck.""" + x = self.teacher_model.backbone(img) + if self.with_teacher_neck: + x = self.teacher_model.neck(x) + return x + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None): + """ + Args: + img (Tensor): Input images of shape (N, C, H, W). + Typically these should be mean centered and std scaled. + img_metas (list[dict]): A List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + :class:`mmdet.datasets.pipelines.Collect`. + gt_bboxes (list[Tensor]): Each item are the truth boxes for each + image in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): Class indices corresponding to each box + gt_bboxes_ignore (None | list[Tensor]): Specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + # get label assignment from the teacher + with torch.no_grad(): + x_teacher = self.extract_teacher_feat(img) + outs_teacher = self.teacher_model.bbox_head(x_teacher) + label_assignment_results = \ + self.teacher_model.bbox_head.get_label_assignment( + *outs_teacher, gt_bboxes, gt_labels, img_metas, + gt_bboxes_ignore) + + # the student use the label assignment from the teacher to learn + x = self.extract_feat(img) + losses = self.bbox_head.forward_train(x, label_assignment_results, + img_metas, gt_bboxes, gt_labels, + gt_bboxes_ignore) + return losses diff --git a/downstream/mmdetection/mmdet/models/detectors/mask2former.py b/downstream/mmdetection/mmdet/models/detectors/mask2former.py new file mode 100644 index 0000000..b9ad2ed --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/mask2former.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .maskformer import MaskFormer + + +@DETECTORS.register_module() +class Mask2Former(MaskFormer): + r"""Implementation of `Masked-attention Mask + Transformer for Universal Image Segmentation + `_.""" + + def __init__(self, + backbone, + neck=None, + panoptic_head=None, + panoptic_fusion_head=None, + train_cfg=None, + test_cfg=None, + init_cfg=None): + super().__init__( + backbone, + neck=neck, + panoptic_head=panoptic_head, + panoptic_fusion_head=panoptic_fusion_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/mask_rcnn.py b/downstream/mmdetection/mmdet/models/detectors/mask_rcnn.py new file mode 100644 index 0000000..c68489f --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/mask_rcnn.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .two_stage import TwoStageDetector + + +@DETECTORS.register_module() +class MaskRCNN(TwoStageDetector): + """Implementation of `Mask R-CNN `_""" + + def __init__(self, + backbone, + rpn_head, + roi_head, + train_cfg, + test_cfg, + neck=None, + pretrained=None, + init_cfg=None): + super(MaskRCNN, self).__init__( + backbone=backbone, + neck=neck, + rpn_head=rpn_head, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + init_cfg=init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/mask_scoring_rcnn.py b/downstream/mmdetection/mmdet/models/detectors/mask_scoring_rcnn.py new file mode 100644 index 0000000..5f55656 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/mask_scoring_rcnn.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .two_stage import TwoStageDetector + + +@DETECTORS.register_module() +class MaskScoringRCNN(TwoStageDetector): + """Mask Scoring RCNN. + + https://arxiv.org/abs/1903.00241 + """ + + def __init__(self, + backbone, + rpn_head, + roi_head, + train_cfg, + test_cfg, + neck=None, + pretrained=None, + init_cfg=None): + super(MaskScoringRCNN, self).__init__( + backbone=backbone, + neck=neck, + rpn_head=rpn_head, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + init_cfg=init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/maskformer.py b/downstream/mmdetection/mmdet/models/detectors/maskformer.py new file mode 100644 index 0000000..df8b5c2 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/maskformer.py @@ -0,0 +1,256 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np + +from mmdet.core import INSTANCE_OFFSET, bbox2result +from mmdet.core.visualization import imshow_det_bboxes +from ..builder import DETECTORS, build_backbone, build_head, build_neck +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class MaskFormer(SingleStageDetector): + r"""Implementation of `Per-Pixel Classification is + NOT All You Need for Semantic Segmentation + `_.""" + + def __init__(self, + backbone, + neck=None, + panoptic_head=None, + panoptic_fusion_head=None, + train_cfg=None, + test_cfg=None, + init_cfg=None): + super(SingleStageDetector, self).__init__(init_cfg=init_cfg) + self.backbone = build_backbone(backbone) + if neck is not None: + self.neck = build_neck(neck) + + panoptic_head_ = panoptic_head.deepcopy() + panoptic_head_.update(train_cfg=train_cfg) + panoptic_head_.update(test_cfg=test_cfg) + self.panoptic_head = build_head(panoptic_head_) + + panoptic_fusion_head_ = panoptic_fusion_head.deepcopy() + panoptic_fusion_head_.update(test_cfg=test_cfg) + self.panoptic_fusion_head = build_head(panoptic_fusion_head_) + + self.num_things_classes = self.panoptic_head.num_things_classes + self.num_stuff_classes = self.panoptic_head.num_stuff_classes + self.num_classes = self.panoptic_head.num_classes + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + # BaseDetector.show_result default for instance segmentation + if self.num_stuff_classes > 0: + self.show_result = self._show_pan_result + + def forward_dummy(self, img, img_metas): + """Used for computing network flops. See + `mmdetection/tools/analysis_tools/get_flops.py` + + Args: + img (Tensor): of shape (N, C, H, W) encoding input images. + Typically these should be mean centered and std scaled. + img_metas (list[Dict]): list of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmdet/datasets/pipelines/formatting.py:Collect`. + """ + super(SingleStageDetector, self).forward_train(img, img_metas) + x = self.extract_feat(img) + outs = self.panoptic_head(x, img_metas) + return outs + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_masks, + gt_semantic_seg=None, + gt_bboxes_ignore=None, + **kargs): + """ + Args: + img (Tensor): of shape (N, C, H, W) encoding input images. + Typically these should be mean centered and std scaled. + img_metas (list[Dict]): list of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmdet/datasets/pipelines/formatting.py:Collect`. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box. + gt_masks (list[BitmapMasks]): true segmentation masks for each box + used if the architecture supports a segmentation task. + gt_semantic_seg (list[tensor]): semantic segmentation mask for + images for panoptic segmentation. + Defaults to None for instance segmentation. + gt_bboxes_ignore (list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + Defaults to None. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + # add batch_input_shape in img_metas + super(SingleStageDetector, self).forward_train(img, img_metas) + x = self.extract_feat(img) + losses = self.panoptic_head.forward_train(x, img_metas, gt_bboxes, + gt_labels, gt_masks, + gt_semantic_seg, + gt_bboxes_ignore) + + return losses + + def simple_test(self, imgs, img_metas, **kwargs): + """Test without augmentation. + + Args: + imgs (Tensor): A batch of images. + img_metas (list[dict]): List of image information. + + Returns: + list[dict[str, np.array | tuple[list]] | tuple[list]]: + Semantic segmentation results and panoptic segmentation \ + results of each image for panoptic segmentation, or formatted \ + bbox and mask results of each image for instance segmentation. + + .. code-block:: none + + [ + # panoptic segmentation + { + 'pan_results': np.array, # shape = [h, w] + 'ins_results': tuple[list], + # semantic segmentation results are not supported yet + 'sem_results': np.array + }, + ... + ] + + or + + .. code-block:: none + + [ + # instance segmentation + ( + bboxes, # list[np.array] + masks # list[list[np.array]] + ), + ... + ] + """ + feats = self.extract_feat(imgs) + mask_cls_results, mask_pred_results = self.panoptic_head.simple_test( + feats, img_metas, **kwargs) + results = self.panoptic_fusion_head.simple_test( + mask_cls_results, mask_pred_results, img_metas, **kwargs) + for i in range(len(results)): + if 'pan_results' in results[i]: + results[i]['pan_results'] = results[i]['pan_results'].detach( + ).cpu().numpy() + + if 'ins_results' in results[i]: + labels_per_image, bboxes, mask_pred_binary = results[i][ + 'ins_results'] + bbox_results = bbox2result(bboxes, labels_per_image, + self.num_things_classes) + mask_results = [[] for _ in range(self.num_things_classes)] + for j, label in enumerate(labels_per_image): + mask = mask_pred_binary[j].detach().cpu().numpy() + mask_results[label].append(mask) + results[i]['ins_results'] = bbox_results, mask_results + + assert 'sem_results' not in results[i], 'segmantic segmentation '\ + 'results are not supported yet.' + + if self.num_stuff_classes == 0: + results = [res['ins_results'] for res in results] + + return results + + def aug_test(self, imgs, img_metas, **kwargs): + raise NotImplementedError + + def onnx_export(self, img, img_metas): + raise NotImplementedError + + def _show_pan_result(self, + img, + result, + score_thr=0.3, + bbox_color=(72, 101, 241), + text_color=(72, 101, 241), + mask_color=None, + thickness=2, + font_size=13, + win_name='', + show=False, + wait_time=0, + out_file=None): + """Draw `panoptic result` over `img`. + + Args: + img (str or Tensor): The image to be displayed. + result (dict): The results. + + score_thr (float, optional): Minimum score of bboxes to be shown. + Default: 0.3. + bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. + The tuple of color should be in BGR order. Default: 'green'. + text_color (str or tuple(int) or :obj:`Color`):Color of texts. + The tuple of color should be in BGR order. Default: 'green'. + mask_color (None or str or tuple(int) or :obj:`Color`): + Color of masks. The tuple of color should be in BGR order. + Default: None. + thickness (int): Thickness of lines. Default: 2. + font_size (int): Font size of texts. Default: 13. + win_name (str): The window name. Default: ''. + wait_time (float): Value of waitKey param. + Default: 0. + show (bool): Whether to show the image. + Default: False. + out_file (str or None): The filename to write the image. + Default: None. + + Returns: + img (Tensor): Only if not `show` or `out_file`. + """ + img = mmcv.imread(img) + img = img.copy() + pan_results = result['pan_results'] + # keep objects ahead + ids = np.unique(pan_results)[::-1] + legal_indices = ids != self.num_classes # for VOID label + ids = ids[legal_indices] + labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64) + segms = (pan_results[None] == ids[:, None, None]) + + # if out_file specified, do not show image in window + if out_file is not None: + show = False + # draw bounding boxes + img = imshow_det_bboxes( + img, + segms=segms, + labels=labels, + class_names=self.CLASSES, + bbox_color=bbox_color, + text_color=text_color, + mask_color=mask_color, + thickness=thickness, + font_size=font_size, + win_name=win_name, + show=show, + wait_time=wait_time, + out_file=out_file) + + if not (show or out_file): + return img diff --git a/downstream/mmdetection/mmdet/models/detectors/nasfcos.py b/downstream/mmdetection/mmdet/models/detectors/nasfcos.py new file mode 100644 index 0000000..a34c228 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/nasfcos.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class NASFCOS(SingleStageDetector): + """NAS-FCOS: Fast Neural Architecture Search for Object Detection. + + https://arxiv.org/abs/1906.0442 + """ + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/paa.py b/downstream/mmdetection/mmdet/models/detectors/paa.py new file mode 100644 index 0000000..f5cb837 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/paa.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class PAA(SingleStageDetector): + """Implementation of `PAA `_.""" + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/panoptic_fpn.py b/downstream/mmdetection/mmdet/models/detectors/panoptic_fpn.py new file mode 100644 index 0000000..f8ac751 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/panoptic_fpn.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor + + +@DETECTORS.register_module() +class PanopticFPN(TwoStagePanopticSegmentor): + r"""Implementation of `Panoptic feature pyramid + networks `_""" + + def __init__( + self, + backbone, + neck=None, + rpn_head=None, + roi_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None, + # for panoptic segmentation + semantic_head=None, + panoptic_fusion_head=None): + super(PanopticFPN, self).__init__( + backbone=backbone, + neck=neck, + rpn_head=rpn_head, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + init_cfg=init_cfg, + semantic_head=semantic_head, + panoptic_fusion_head=panoptic_fusion_head) diff --git a/downstream/mmdetection/mmdet/models/detectors/panoptic_two_stage_segmentor.py b/downstream/mmdetection/mmdet/models/detectors/panoptic_two_stage_segmentor.py new file mode 100644 index 0000000..5ad49ba --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/panoptic_two_stage_segmentor.py @@ -0,0 +1,279 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import torch + +from mmdet.core import INSTANCE_OFFSET, bbox2roi, multiclass_nms +from mmdet.core.visualization import imshow_det_bboxes +from ..builder import DETECTORS, build_head +from ..roi_heads.mask_heads.fcn_mask_head import _do_paste_mask +from .two_stage import TwoStageDetector + + +@DETECTORS.register_module() +class TwoStagePanopticSegmentor(TwoStageDetector): + """Base class of Two-stage Panoptic Segmentor. + + As well as the components in TwoStageDetector, Panoptic Segmentor has extra + semantic_head and panoptic_fusion_head. + """ + + def __init__( + self, + backbone, + neck=None, + rpn_head=None, + roi_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None, + # for panoptic segmentation + semantic_head=None, + panoptic_fusion_head=None): + super(TwoStagePanopticSegmentor, + self).__init__(backbone, neck, rpn_head, roi_head, train_cfg, + test_cfg, pretrained, init_cfg) + if semantic_head is not None: + self.semantic_head = build_head(semantic_head) + if panoptic_fusion_head is not None: + panoptic_cfg = test_cfg.panoptic if test_cfg is not None else None + panoptic_fusion_head_ = panoptic_fusion_head.deepcopy() + panoptic_fusion_head_.update(test_cfg=panoptic_cfg) + self.panoptic_fusion_head = build_head(panoptic_fusion_head_) + + self.num_things_classes = self.panoptic_fusion_head.\ + num_things_classes + self.num_stuff_classes = self.panoptic_fusion_head.\ + num_stuff_classes + self.num_classes = self.panoptic_fusion_head.num_classes + + @property + def with_semantic_head(self): + return hasattr(self, + 'semantic_head') and self.semantic_head is not None + + @property + def with_panoptic_fusion_head(self): + return hasattr(self, 'panoptic_fusion_heads') and \ + self.panoptic_fusion_head is not None + + def forward_dummy(self, img): + """Used for computing network flops. + + See `mmdetection/tools/get_flops.py` + """ + raise NotImplementedError( + f'`forward_dummy` is not implemented in {self.__class__.__name__}') + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + gt_semantic_seg=None, + proposals=None, + **kwargs): + x = self.extract_feat(img) + losses = dict() + + # RPN forward and loss + if self.with_rpn: + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + rpn_losses, proposal_list = self.rpn_head.forward_train( + x, + img_metas, + gt_bboxes, + gt_labels=None, + gt_bboxes_ignore=gt_bboxes_ignore, + proposal_cfg=proposal_cfg) + losses.update(rpn_losses) + else: + proposal_list = proposals + + roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list, + gt_bboxes, gt_labels, + gt_bboxes_ignore, gt_masks, + **kwargs) + losses.update(roi_losses) + + semantic_loss = self.semantic_head.forward_train(x, gt_semantic_seg) + losses.update(semantic_loss) + + return losses + + def simple_test_mask(self, + x, + img_metas, + det_bboxes, + det_labels, + rescale=False): + """Simple test for mask head without augmentation.""" + img_shapes = tuple(meta['ori_shape'] + for meta in img_metas) if rescale else tuple( + meta['pad_shape'] for meta in img_metas) + scale_factors = tuple(meta['scale_factor'] for meta in img_metas) + + if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): + masks = [] + for img_shape in img_shapes: + out_shape = (0, self.roi_head.bbox_head.num_classes) \ + + img_shape[:2] + masks.append(det_bboxes[0].new_zeros(out_shape)) + mask_pred = det_bboxes[0].new_zeros((0, 80, 28, 28)) + mask_results = dict( + masks=masks, mask_pred=mask_pred, mask_feats=None) + return mask_results + + _bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))] + if rescale: + if not isinstance(scale_factors[0], float): + scale_factors = [ + det_bboxes[0].new_tensor(scale_factor) + for scale_factor in scale_factors + ] + _bboxes = [ + _bboxes[i] * scale_factors[i] for i in range(len(_bboxes)) + ] + + mask_rois = bbox2roi(_bboxes) + mask_results = self.roi_head._mask_forward(x, mask_rois) + mask_pred = mask_results['mask_pred'] + # split batch mask prediction back to each image + num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] + mask_preds = mask_pred.split(num_mask_roi_per_img, 0) + + # resize the mask_preds to (K, H, W) + masks = [] + for i in range(len(_bboxes)): + det_bbox = det_bboxes[i][:, :4] + det_label = det_labels[i] + + mask_pred = mask_preds[i].sigmoid() + + box_inds = torch.arange(mask_pred.shape[0]) + mask_pred = mask_pred[box_inds, det_label][:, None] + + img_h, img_w, _ = img_shapes[i] + mask_pred, _ = _do_paste_mask( + mask_pred, det_bbox, img_h, img_w, skip_empty=False) + masks.append(mask_pred) + + mask_results['masks'] = masks + + return mask_results + + def simple_test(self, img, img_metas, proposals=None, rescale=False): + """Test without Augmentation.""" + x = self.extract_feat(img) + + if proposals is None: + proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) + else: + proposal_list = proposals + + bboxes, scores = self.roi_head.simple_test_bboxes( + x, img_metas, proposal_list, None, rescale=rescale) + + pan_cfg = self.test_cfg.panoptic + # class-wise predictions + det_bboxes = [] + det_labels = [] + for bboxe, score in zip(bboxes, scores): + det_bbox, det_label = multiclass_nms(bboxe, score, + pan_cfg.score_thr, + pan_cfg.nms, + pan_cfg.max_per_img) + det_bboxes.append(det_bbox) + det_labels.append(det_label) + + mask_results = self.simple_test_mask( + x, img_metas, det_bboxes, det_labels, rescale=rescale) + masks = mask_results['masks'] + + seg_preds = self.semantic_head.simple_test(x, img_metas, rescale) + + results = [] + for i in range(len(det_bboxes)): + pan_results = self.panoptic_fusion_head.simple_test( + det_bboxes[i], det_labels[i], masks[i], seg_preds[i]) + pan_results = pan_results.int().detach().cpu().numpy() + result = dict(pan_results=pan_results) + results.append(result) + return results + + def show_result(self, + img, + result, + score_thr=0.3, + bbox_color=(72, 101, 241), + text_color=(72, 101, 241), + mask_color=None, + thickness=2, + font_size=13, + win_name='', + show=False, + wait_time=0, + out_file=None): + """Draw `result` over `img`. + + Args: + img (str or Tensor): The image to be displayed. + result (dict): The results. + + score_thr (float, optional): Minimum score of bboxes to be shown. + Default: 0.3. + bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. + The tuple of color should be in BGR order. Default: 'green'. + text_color (str or tuple(int) or :obj:`Color`):Color of texts. + The tuple of color should be in BGR order. Default: 'green'. + mask_color (None or str or tuple(int) or :obj:`Color`): + Color of masks. The tuple of color should be in BGR order. + Default: None. + thickness (int): Thickness of lines. Default: 2. + font_size (int): Font size of texts. Default: 13. + win_name (str): The window name. Default: ''. + wait_time (float): Value of waitKey param. + Default: 0. + show (bool): Whether to show the image. + Default: False. + out_file (str or None): The filename to write the image. + Default: None. + + Returns: + img (Tensor): Only if not `show` or `out_file`. + """ + img = mmcv.imread(img) + img = img.copy() + pan_results = result['pan_results'] + # keep objects ahead + ids = np.unique(pan_results)[::-1] + legal_indices = ids != self.num_classes # for VOID label + ids = ids[legal_indices] + labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64) + segms = (pan_results[None] == ids[:, None, None]) + + # if out_file specified, do not show image in window + if out_file is not None: + show = False + # draw bounding boxes + img = imshow_det_bboxes( + img, + segms=segms, + labels=labels, + class_names=self.CLASSES, + bbox_color=bbox_color, + text_color=text_color, + mask_color=mask_color, + thickness=thickness, + font_size=font_size, + win_name=win_name, + show=show, + wait_time=wait_time, + out_file=out_file) + + if not (show or out_file): + return img diff --git a/downstream/mmdetection/mmdet/models/detectors/point_rend.py b/downstream/mmdetection/mmdet/models/detectors/point_rend.py new file mode 100644 index 0000000..90eb4d4 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/point_rend.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .two_stage import TwoStageDetector + + +@DETECTORS.register_module() +class PointRend(TwoStageDetector): + """PointRend: Image Segmentation as Rendering + + This detector is the implementation of + `PointRend `_. + + """ + + def __init__(self, + backbone, + rpn_head, + roi_head, + train_cfg, + test_cfg, + neck=None, + pretrained=None, + init_cfg=None): + super(PointRend, self).__init__( + backbone=backbone, + neck=neck, + rpn_head=rpn_head, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + init_cfg=init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/queryinst.py b/downstream/mmdetection/mmdet/models/detectors/queryinst.py new file mode 100644 index 0000000..5fc216c --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/queryinst.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .sparse_rcnn import SparseRCNN + + +@DETECTORS.register_module() +class QueryInst(SparseRCNN): + r"""Implementation of + `Instances as Queries `_""" + + def __init__(self, + backbone, + rpn_head, + roi_head, + train_cfg, + test_cfg, + neck=None, + pretrained=None, + init_cfg=None): + super(QueryInst, self).__init__( + backbone=backbone, + neck=neck, + rpn_head=rpn_head, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + init_cfg=init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/reppoints_detector.py b/downstream/mmdetection/mmdet/models/detectors/reppoints_detector.py new file mode 100644 index 0000000..f1986cd --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/reppoints_detector.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class RepPointsDetector(SingleStageDetector): + """RepPoints: Point Set Representation for Object Detection. + + This detector is the implementation of: + - RepPoints detector (https://arxiv.org/pdf/1904.11490) + """ + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(RepPointsDetector, + self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, + pretrained, init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/retinanet.py b/downstream/mmdetection/mmdet/models/detectors/retinanet.py new file mode 100644 index 0000000..c28545a --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/retinanet.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class RetinaNet(SingleStageDetector): + """Implementation of `RetinaNet `_""" + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/rpn.py b/downstream/mmdetection/mmdet/models/detectors/rpn.py new file mode 100644 index 0000000..6ec326b --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/rpn.py @@ -0,0 +1,159 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import mmcv +import torch +from mmcv.image import tensor2imgs + +from mmdet.core import bbox_mapping +from ..builder import DETECTORS, build_backbone, build_head, build_neck +from .base import BaseDetector + + +@DETECTORS.register_module() +class RPN(BaseDetector): + """Implementation of Region Proposal Network.""" + + def __init__(self, + backbone, + neck, + rpn_head, + train_cfg, + test_cfg, + pretrained=None, + init_cfg=None): + super(RPN, self).__init__(init_cfg) + if pretrained: + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + backbone.pretrained = pretrained + self.backbone = build_backbone(backbone) + self.neck = build_neck(neck) if neck is not None else None + rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None + rpn_head.update(train_cfg=rpn_train_cfg) + rpn_head.update(test_cfg=test_cfg.rpn) + self.rpn_head = build_head(rpn_head) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + def extract_feat(self, img): + """Extract features. + + Args: + img (torch.Tensor): Image tensor with shape (n, c, h ,w). + + Returns: + list[torch.Tensor]: Multi-level features that may have + different resolutions. + """ + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def forward_dummy(self, img): + """Dummy forward function.""" + x = self.extract_feat(img) + rpn_outs = self.rpn_head(x) + return rpn_outs + + def forward_train(self, + img, + img_metas, + gt_bboxes=None, + gt_bboxes_ignore=None): + """ + Args: + img (Tensor): Input images of shape (N, C, H, W). + Typically these should be mean centered and std scaled. + img_metas (list[dict]): A List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + :class:`mmdet.datasets.pipelines.Collect`. + gt_bboxes (list[Tensor]): Each item are the truth boxes for each + image in [tl_x, tl_y, br_x, br_y] format. + gt_bboxes_ignore (None | list[Tensor]): Specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + if (isinstance(self.train_cfg.rpn, dict) + and self.train_cfg.rpn.get('debug', False)): + self.rpn_head.debug_imgs = tensor2imgs(img) + + x = self.extract_feat(img) + losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None, + gt_bboxes_ignore) + return losses + + def simple_test(self, img, img_metas, rescale=False): + """Test function without test time augmentation. + + Args: + imgs (list[torch.Tensor]): List of multiple images + img_metas (list[dict]): List of image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[np.ndarray]: proposals + """ + x = self.extract_feat(img) + # get origin input shape to onnx dynamic input shape + if torch.onnx.is_in_onnx_export(): + img_shape = torch._shape_as_tensor(img)[2:] + img_metas[0]['img_shape_for_onnx'] = img_shape + proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) + if rescale: + for proposals, meta in zip(proposal_list, img_metas): + proposals[:, :4] /= proposals.new_tensor(meta['scale_factor']) + if torch.onnx.is_in_onnx_export(): + return proposal_list + + return [proposal.cpu().numpy() for proposal in proposal_list] + + def aug_test(self, imgs, img_metas, rescale=False): + """Test function with test time augmentation. + + Args: + imgs (list[torch.Tensor]): List of multiple images + img_metas (list[dict]): List of image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[np.ndarray]: proposals + """ + proposal_list = self.rpn_head.aug_test_rpn( + self.extract_feats(imgs), img_metas) + if not rescale: + for proposals, img_meta in zip(proposal_list, img_metas[0]): + img_shape = img_meta['img_shape'] + scale_factor = img_meta['scale_factor'] + flip = img_meta['flip'] + flip_direction = img_meta['flip_direction'] + proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape, + scale_factor, flip, + flip_direction) + return [proposal.cpu().numpy() for proposal in proposal_list] + + def show_result(self, data, result, top_k=20, **kwargs): + """Show RPN proposals on the image. + + Args: + data (str or np.ndarray): Image filename or loaded image. + result (Tensor or tuple): The results to draw over `img` + bbox_result or (bbox_result, segm_result). + top_k (int): Plot the first k bboxes only + if set positive. Default: 20 + + Returns: + np.ndarray: The image with bboxes drawn on it. + """ + if kwargs is not None: + kwargs.pop('score_thr', None) + kwargs.pop('text_color', None) + kwargs['colors'] = kwargs.pop('bbox_color', 'green') + mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs) diff --git a/downstream/mmdetection/mmdet/models/detectors/scnet.py b/downstream/mmdetection/mmdet/models/detectors/scnet.py new file mode 100644 index 0000000..a361d81 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/scnet.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .cascade_rcnn import CascadeRCNN + + +@DETECTORS.register_module() +class SCNet(CascadeRCNN): + """Implementation of `SCNet `_""" + + def __init__(self, **kwargs): + super(SCNet, self).__init__(**kwargs) diff --git a/downstream/mmdetection/mmdet/models/detectors/single_stage.py b/downstream/mmdetection/mmdet/models/detectors/single_stage.py new file mode 100644 index 0000000..c375c72 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/single_stage.py @@ -0,0 +1,171 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch + +from mmdet.core import bbox2result +from ..builder import DETECTORS, build_backbone, build_head, build_neck +from .base import BaseDetector + + +@DETECTORS.register_module() +class SingleStageDetector(BaseDetector): + """Base class for single-stage detectors. + + Single-stage detectors directly and densely predict bounding boxes on the + output features of the backbone+neck. + """ + + def __init__(self, + backbone, + neck=None, + bbox_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(SingleStageDetector, self).__init__(init_cfg) + if pretrained: + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + backbone.pretrained = pretrained + self.backbone = build_backbone(backbone) + if neck is not None: + self.neck = build_neck(neck) + bbox_head.update(train_cfg=train_cfg) + bbox_head.update(test_cfg=test_cfg) + self.bbox_head = build_head(bbox_head) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + def extract_feat(self, img): + """Directly extract features from the backbone+neck.""" + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def forward_dummy(self, img): + """Used for computing network flops. + + See `mmdetection/tools/analysis_tools/get_flops.py` + """ + x = self.extract_feat(img) + outs = self.bbox_head(x) + return outs + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None): + """ + Args: + img (Tensor): Input images of shape (N, C, H, W). + Typically these should be mean centered and std scaled. + img_metas (list[dict]): A List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + :class:`mmdet.datasets.pipelines.Collect`. + gt_bboxes (list[Tensor]): Each item are the truth boxes for each + image in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): Class indices corresponding to each box + gt_bboxes_ignore (None | list[Tensor]): Specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + super(SingleStageDetector, self).forward_train(img, img_metas) + x = self.extract_feat(img) + losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes, + gt_labels, gt_bboxes_ignore) + return losses + + def simple_test(self, img, img_metas, rescale=False): + """Test function without test-time augmentation. + + Args: + img (torch.Tensor): Images with shape (N, C, H, W). + img_metas (list[dict]): List of image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[list[np.ndarray]]: BBox results of each image and classes. + The outer list corresponds to each image. The inner list + corresponds to each class. + """ + feat = self.extract_feat(img) + results_list = self.bbox_head.simple_test( + feat, img_metas, rescale=rescale) + bbox_results = [ + bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) + for det_bboxes, det_labels in results_list + ] + return bbox_results + + def aug_test(self, imgs, img_metas, rescale=False): + """Test function with test time augmentation. + + Args: + imgs (list[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains all images in the batch. + img_metas (list[list[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. each dict has image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[list[np.ndarray]]: BBox results of each image and classes. + The outer list corresponds to each image. The inner list + corresponds to each class. + """ + assert hasattr(self.bbox_head, 'aug_test'), \ + f'{self.bbox_head.__class__.__name__}' \ + ' does not support test-time augmentation' + + feats = self.extract_feats(imgs) + results_list = self.bbox_head.aug_test( + feats, img_metas, rescale=rescale) + bbox_results = [ + bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) + for det_bboxes, det_labels in results_list + ] + return bbox_results + + def onnx_export(self, img, img_metas, with_nms=True): + """Test function without test time augmentation. + + Args: + img (torch.Tensor): input images. + img_metas (list[dict]): List of image information. + + Returns: + tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] + and class labels of shape [N, num_det]. + """ + x = self.extract_feat(img) + outs = self.bbox_head(x) + # get origin input shape to support onnx dynamic shape + + # get shape as tensor + img_shape = torch._shape_as_tensor(img)[2:] + img_metas[0]['img_shape_for_onnx'] = img_shape + # get pad input shape to support onnx dynamic shape for exporting + # `CornerNet` and `CentripetalNet`, which 'pad_shape' is used + # for inference + img_metas[0]['pad_shape_for_onnx'] = img_shape + + if len(outs) == 2: + # add dummy score_factor + outs = (*outs, None) + # TODO Can we change to `get_bboxes` when `onnx_export` fail + det_bboxes, det_labels = self.bbox_head.onnx_export( + *outs, img_metas, with_nms=with_nms) + + return det_bboxes, det_labels diff --git a/downstream/mmdetection/mmdet/models/detectors/single_stage_instance_seg.py b/downstream/mmdetection/mmdet/models/detectors/single_stage_instance_seg.py new file mode 100644 index 0000000..239b669 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/single_stage_instance_seg.py @@ -0,0 +1,363 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings + +import mmcv +import numpy as np +import torch + +from mmdet.core.visualization.image import imshow_det_bboxes +from ..builder import DETECTORS, build_backbone, build_head, build_neck +from .base import BaseDetector + +INF = 1e8 + + +@DETECTORS.register_module() +class SingleStageInstanceSegmentor(BaseDetector): + """Base class for single-stage instance segmentors.""" + + def __init__(self, + backbone, + neck=None, + bbox_head=None, + mask_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + + if pretrained: + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + backbone.pretrained = pretrained + super(SingleStageInstanceSegmentor, self).__init__(init_cfg=init_cfg) + self.backbone = build_backbone(backbone) + if neck is not None: + self.neck = build_neck(neck) + else: + self.neck = None + if bbox_head is not None: + bbox_head.update(train_cfg=copy.deepcopy(train_cfg)) + bbox_head.update(test_cfg=copy.deepcopy(test_cfg)) + self.bbox_head = build_head(bbox_head) + else: + self.bbox_head = None + + assert mask_head, f'`mask_head` must ' \ + f'be implemented in {self.__class__.__name__}' + mask_head.update(train_cfg=copy.deepcopy(train_cfg)) + mask_head.update(test_cfg=copy.deepcopy(test_cfg)) + self.mask_head = build_head(mask_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + def extract_feat(self, img): + """Directly extract features from the backbone and neck.""" + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def forward_dummy(self, img): + """Used for computing network flops. + + See `mmdetection/tools/analysis_tools/get_flops.py` + """ + raise NotImplementedError( + f'`forward_dummy` is not implemented in {self.__class__.__name__}') + + def forward_train(self, + img, + img_metas, + gt_masks, + gt_labels, + gt_bboxes=None, + gt_bboxes_ignore=None, + **kwargs): + """ + Args: + img (Tensor): Input images of shape (B, C, H, W). + Typically these should be mean centered and std scaled. + img_metas (list[dict]): A List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + :class:`mmdet.datasets.pipelines.Collect`. + gt_masks (list[:obj:`BitmapMasks`] | None) : The segmentation + masks for each box. + gt_labels (list[Tensor]): Class indices corresponding to each box + gt_bboxes (list[Tensor]): Each item is the truth boxes + of each image in [tl_x, tl_y, br_x, br_y] format. + Default: None. + gt_bboxes_ignore (list[Tensor] | None): Specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + + gt_masks = [ + gt_mask.to_tensor(dtype=torch.bool, device=img.device) + for gt_mask in gt_masks + ] + x = self.extract_feat(img) + losses = dict() + + # CondInst and YOLACT have bbox_head + if self.bbox_head: + # bbox_head_preds is a tuple + bbox_head_preds = self.bbox_head(x) + # positive_infos is a list of obj:`InstanceData` + # It contains the information about the positive samples + # CondInst, YOLACT + det_losses, positive_infos = self.bbox_head.loss( + *bbox_head_preds, + gt_bboxes=gt_bboxes, + gt_labels=gt_labels, + gt_masks=gt_masks, + img_metas=img_metas, + gt_bboxes_ignore=gt_bboxes_ignore, + **kwargs) + losses.update(det_losses) + else: + positive_infos = None + + mask_loss = self.mask_head.forward_train( + x, + gt_labels, + gt_masks, + img_metas, + positive_infos=positive_infos, + gt_bboxes=gt_bboxes, + gt_bboxes_ignore=gt_bboxes_ignore, + **kwargs) + # avoid loss override + assert not set(mask_loss.keys()) & set(losses.keys()) + + losses.update(mask_loss) + return losses + + def simple_test(self, img, img_metas, rescale=False): + """Test function without test-time augmentation. + + Args: + img (torch.Tensor): Images with shape (B, C, H, W). + img_metas (list[dict]): List of image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list(tuple): Formatted bbox and mask results of multiple \ + images. The outer list corresponds to each image. \ + Each tuple contains two type of results of single image: + + - bbox_results (list[np.ndarray]): BBox results of + single image. The list corresponds to each class. + each ndarray has a shape (N, 5), N is the number of + bboxes with this category, and last dimension + 5 arrange as (x1, y1, x2, y2, scores). + - mask_results (list[np.ndarray]): Mask results of + single image. The list corresponds to each class. + each ndarray has a shape (N, img_h, img_w), N + is the number of masks with this category. + """ + feat = self.extract_feat(img) + if self.bbox_head: + outs = self.bbox_head(feat) + # results_list is list[obj:`InstanceData`] + results_list = self.bbox_head.get_results( + *outs, img_metas=img_metas, cfg=self.test_cfg, rescale=rescale) + else: + results_list = None + + results_list = self.mask_head.simple_test( + feat, img_metas, rescale=rescale, instances_list=results_list) + + format_results_list = [] + for results in results_list: + format_results_list.append(self.format_results(results)) + + return format_results_list + + def format_results(self, results): + """Format the model predictions according to the interface with + dataset. + + Args: + results (:obj:`InstanceData`): Processed + results of single images. Usually contains + following keys. + + - scores (Tensor): Classification scores, has shape + (num_instance,) + - labels (Tensor): Has shape (num_instances,). + - masks (Tensor): Processed mask results, has + shape (num_instances, h, w). + + Returns: + tuple: Formatted bbox and mask results.. It contains two items: + + - bbox_results (list[np.ndarray]): BBox results of + single image. The list corresponds to each class. + each ndarray has a shape (N, 5), N is the number of + bboxes with this category, and last dimension + 5 arrange as (x1, y1, x2, y2, scores). + - mask_results (list[np.ndarray]): Mask results of + single image. The list corresponds to each class. + each ndarray has shape (N, img_h, img_w), N + is the number of masks with this category. + """ + data_keys = results.keys() + assert 'scores' in data_keys + assert 'labels' in data_keys + + assert 'masks' in data_keys, \ + 'results should contain ' \ + 'masks when format the results ' + mask_results = [[] for _ in range(self.mask_head.num_classes)] + + num_masks = len(results) + + if num_masks == 0: + bbox_results = [ + np.zeros((0, 5), dtype=np.float32) + for _ in range(self.mask_head.num_classes) + ] + return bbox_results, mask_results + + labels = results.labels.detach().cpu().numpy() + + if 'bboxes' not in results: + # create dummy bbox results to store the scores + results.bboxes = results.scores.new_zeros(len(results), 4) + + det_bboxes = torch.cat([results.bboxes, results.scores[:, None]], + dim=-1) + det_bboxes = det_bboxes.detach().cpu().numpy() + bbox_results = [ + det_bboxes[labels == i, :] + for i in range(self.mask_head.num_classes) + ] + + masks = results.masks.detach().cpu().numpy() + + for idx in range(num_masks): + mask = masks[idx] + mask_results[labels[idx]].append(mask) + + return bbox_results, mask_results + + def aug_test(self, imgs, img_metas, rescale=False): + raise NotImplementedError + + def show_result(self, + img, + result, + score_thr=0.3, + bbox_color=(72, 101, 241), + text_color=(72, 101, 241), + mask_color=None, + thickness=2, + font_size=13, + win_name='', + show=False, + wait_time=0, + out_file=None): + """Draw `result` over `img`. + + Args: + img (str or Tensor): The image to be displayed. + result (tuple): Format bbox and mask results. + It contains two items: + + - bbox_results (list[np.ndarray]): BBox results of + single image. The list corresponds to each class. + each ndarray has a shape (N, 5), N is the number of + bboxes with this category, and last dimension + 5 arrange as (x1, y1, x2, y2, scores). + - mask_results (list[np.ndarray]): Mask results of + single image. The list corresponds to each class. + each ndarray has shape (N, img_h, img_w), N + is the number of masks with this category. + + score_thr (float, optional): Minimum score of bboxes to be shown. + Default: 0.3. + bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. + The tuple of color should be in BGR order. Default: 'green' + text_color (str or tuple(int) or :obj:`Color`):Color of texts. + The tuple of color should be in BGR order. Default: 'green' + mask_color (None or str or tuple(int) or :obj:`Color`): + Color of masks. The tuple of color should be in BGR order. + Default: None + thickness (int): Thickness of lines. Default: 2 + font_size (int): Font size of texts. Default: 13 + win_name (str): The window name. Default: '' + wait_time (float): Value of waitKey param. + Default: 0. + show (bool): Whether to show the image. + Default: False. + out_file (str or None): The filename to write the image. + Default: None. + + Returns: + img (Tensor): Only if not `show` or `out_file` + """ + + assert isinstance(result, tuple) + bbox_result, mask_result = result + bboxes = np.vstack(bbox_result) + img = mmcv.imread(img) + img = img.copy() + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_result) + ] + labels = np.concatenate(labels) + if len(labels) == 0: + bboxes = np.zeros([0, 5]) + masks = np.zeros([0, 0, 0]) + # draw segmentation masks + else: + masks = mmcv.concat_list(mask_result) + + if isinstance(masks[0], torch.Tensor): + masks = torch.stack(masks, dim=0).detach().cpu().numpy() + else: + masks = np.stack(masks, axis=0) + # dummy bboxes + if bboxes[:, :4].sum() == 0: + num_masks = len(bboxes) + x_any = masks.any(axis=1) + y_any = masks.any(axis=2) + for idx in range(num_masks): + x = np.where(x_any[idx, :])[0] + y = np.where(y_any[idx, :])[0] + if len(x) > 0 and len(y) > 0: + bboxes[idx, :4] = np.array( + [x[0], y[0], x[-1] + 1, y[-1] + 1], + dtype=np.float32) + # if out_file specified, do not show image in window + if out_file is not None: + show = False + # draw bounding boxes + img = imshow_det_bboxes( + img, + bboxes, + labels, + masks, + class_names=self.CLASSES, + score_thr=score_thr, + bbox_color=bbox_color, + text_color=text_color, + mask_color=mask_color, + thickness=thickness, + font_size=font_size, + win_name=win_name, + show=show, + wait_time=wait_time, + out_file=out_file) + + if not (show or out_file): + return img diff --git a/downstream/mmdetection/mmdet/models/detectors/solo.py b/downstream/mmdetection/mmdet/models/detectors/solo.py new file mode 100644 index 0000000..df6f6de --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/solo.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage_instance_seg import SingleStageInstanceSegmentor + + +@DETECTORS.register_module() +class SOLO(SingleStageInstanceSegmentor): + """`SOLO: Segmenting Objects by Locations + `_ + + """ + + def __init__(self, + backbone, + neck=None, + bbox_head=None, + mask_head=None, + train_cfg=None, + test_cfg=None, + init_cfg=None, + pretrained=None): + super().__init__( + backbone=backbone, + neck=neck, + bbox_head=bbox_head, + mask_head=mask_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg, + pretrained=pretrained) diff --git a/downstream/mmdetection/mmdet/models/detectors/solov2.py b/downstream/mmdetection/mmdet/models/detectors/solov2.py new file mode 100644 index 0000000..711fcb4 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/solov2.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage_instance_seg import SingleStageInstanceSegmentor + + +@DETECTORS.register_module() +class SOLOv2(SingleStageInstanceSegmentor): + """`SOLOv2: Dynamic and Fast Instance Segmentation + `_ + + """ + + def __init__(self, + backbone, + neck=None, + bbox_head=None, + mask_head=None, + train_cfg=None, + test_cfg=None, + init_cfg=None, + pretrained=None): + super().__init__( + backbone=backbone, + neck=neck, + bbox_head=bbox_head, + mask_head=mask_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg, + pretrained=pretrained) diff --git a/downstream/mmdetection/mmdet/models/detectors/sparse_rcnn.py b/downstream/mmdetection/mmdet/models/detectors/sparse_rcnn.py new file mode 100644 index 0000000..e90c2a5 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/sparse_rcnn.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .two_stage import TwoStageDetector + + +@DETECTORS.register_module() +class SparseRCNN(TwoStageDetector): + r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with + Learnable Proposals `_""" + + def __init__(self, *args, **kwargs): + super(SparseRCNN, self).__init__(*args, **kwargs) + assert self.with_rpn, 'Sparse R-CNN and QueryInst ' \ + 'do not support external proposals' + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + proposals=None, + **kwargs): + """Forward function of SparseR-CNN and QueryInst in train stage. + + Args: + img (Tensor): of shape (N, C, H, W) encoding input images. + Typically these should be mean centered and std scaled. + img_metas (list[dict]): list of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + :class:`mmdet.datasets.pipelines.Collect`. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + gt_bboxes_ignore (None | list[Tensor): specify which bounding + boxes can be ignored when computing the loss. + gt_masks (List[Tensor], optional) : Segmentation masks for + each box. This is required to train QueryInst. + proposals (List[Tensor], optional): override rpn proposals with + custom proposals. Use when `with_rpn` is False. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + + assert proposals is None, 'Sparse R-CNN and QueryInst ' \ + 'do not support external proposals' + + x = self.extract_feat(img) + proposal_boxes, proposal_features, imgs_whwh = \ + self.rpn_head.forward_train(x, img_metas) + roi_losses = self.roi_head.forward_train( + x, + proposal_boxes, + proposal_features, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=gt_bboxes_ignore, + gt_masks=gt_masks, + imgs_whwh=imgs_whwh) + return roi_losses + + def simple_test(self, img, img_metas, rescale=False): + """Test function without test time augmentation. + + Args: + imgs (list[torch.Tensor]): List of multiple images + img_metas (list[dict]): List of image information. + rescale (bool): Whether to rescale the results. + Defaults to False. + + Returns: + list[list[np.ndarray]]: BBox results of each image and classes. + The outer list corresponds to each image. The inner list + corresponds to each class. + """ + x = self.extract_feat(img) + proposal_boxes, proposal_features, imgs_whwh = \ + self.rpn_head.simple_test_rpn(x, img_metas) + results = self.roi_head.simple_test( + x, + proposal_boxes, + proposal_features, + img_metas, + imgs_whwh=imgs_whwh, + rescale=rescale) + return results + + def forward_dummy(self, img): + """Used for computing network flops. + + See `mmdetection/tools/analysis_tools/get_flops.py` + """ + # backbone + x = self.extract_feat(img) + # rpn + num_imgs = len(img) + dummy_img_metas = [ + dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs) + ] + proposal_boxes, proposal_features, imgs_whwh = \ + self.rpn_head.simple_test_rpn(x, dummy_img_metas) + # roi_head + roi_outs = self.roi_head.forward_dummy(x, proposal_boxes, + proposal_features, + dummy_img_metas) + return roi_outs diff --git a/downstream/mmdetection/mmdet/models/detectors/tood.py b/downstream/mmdetection/mmdet/models/detectors/tood.py new file mode 100644 index 0000000..7dd18c3 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/tood.py @@ -0,0 +1,23 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class TOOD(SingleStageDetector): + r"""Implementation of `TOOD: Task-aligned One-stage Object Detection. + `_.""" + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(TOOD, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) + + def set_epoch(self, epoch): + self.bbox_head.epoch = epoch diff --git a/downstream/mmdetection/mmdet/models/detectors/trident_faster_rcnn.py b/downstream/mmdetection/mmdet/models/detectors/trident_faster_rcnn.py new file mode 100644 index 0000000..fb26168 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/trident_faster_rcnn.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .faster_rcnn import FasterRCNN + + +@DETECTORS.register_module() +class TridentFasterRCNN(FasterRCNN): + """Implementation of `TridentNet `_""" + + def __init__(self, + backbone, + rpn_head, + roi_head, + train_cfg, + test_cfg, + neck=None, + pretrained=None, + init_cfg=None): + + super(TridentFasterRCNN, self).__init__( + backbone=backbone, + neck=neck, + rpn_head=rpn_head, + roi_head=roi_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + init_cfg=init_cfg) + assert self.backbone.num_branch == self.roi_head.num_branch + assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx + self.num_branch = self.backbone.num_branch + self.test_branch_idx = self.backbone.test_branch_idx + + def simple_test(self, img, img_metas, proposals=None, rescale=False): + """Test without augmentation.""" + assert self.with_bbox, 'Bbox head must be implemented.' + x = self.extract_feat(img) + if proposals is None: + num_branch = (self.num_branch if self.test_branch_idx == -1 else 1) + trident_img_metas = img_metas * num_branch + proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas) + else: + proposal_list = proposals + # TODO: Fix trident_img_metas undefined errors + # when proposals is specified + return self.roi_head.simple_test( + x, proposal_list, trident_img_metas, rescale=rescale) + + def aug_test(self, imgs, img_metas, rescale=False): + """Test with augmentations. + + If rescale is False, then returned bboxes and masks will fit the scale + of imgs[0]. + """ + x = self.extract_feats(imgs) + num_branch = (self.num_branch if self.test_branch_idx == -1 else 1) + trident_img_metas = [img_metas * num_branch for img_metas in img_metas] + proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas) + return self.roi_head.aug_test( + x, proposal_list, img_metas, rescale=rescale) + + def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs): + """make copies of img and gts to fit multi-branch.""" + trident_gt_bboxes = tuple(gt_bboxes * self.num_branch) + trident_gt_labels = tuple(gt_labels * self.num_branch) + trident_img_metas = tuple(img_metas * self.num_branch) + + return super(TridentFasterRCNN, + self).forward_train(img, trident_img_metas, + trident_gt_bboxes, trident_gt_labels) diff --git a/downstream/mmdetection/mmdet/models/detectors/two_stage.py b/downstream/mmdetection/mmdet/models/detectors/two_stage.py new file mode 100644 index 0000000..870e2b8 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/two_stage.py @@ -0,0 +1,211 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch + +from ..builder import DETECTORS, build_backbone, build_head, build_neck +from .base import BaseDetector + + +@DETECTORS.register_module() +class TwoStageDetector(BaseDetector): + """Base class for two-stage detectors. + + Two-stage detectors typically consisting of a region proposal network and a + task-specific regression head. + """ + + def __init__(self, + backbone, + neck=None, + rpn_head=None, + roi_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(TwoStageDetector, self).__init__(init_cfg) + if pretrained: + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + backbone.pretrained = pretrained + self.backbone = build_backbone(backbone) + + if neck is not None: + self.neck = build_neck(neck) + + if rpn_head is not None: + rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None + rpn_head_ = rpn_head.copy() + rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn) + self.rpn_head = build_head(rpn_head_) + + if roi_head is not None: + # update train and test cfg here for now + # TODO: refactor assigner & sampler + rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None + roi_head.update(train_cfg=rcnn_train_cfg) + roi_head.update(test_cfg=test_cfg.rcnn) + roi_head.pretrained = pretrained + self.roi_head = build_head(roi_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + @property + def with_rpn(self): + """bool: whether the detector has RPN""" + return hasattr(self, 'rpn_head') and self.rpn_head is not None + + @property + def with_roi_head(self): + """bool: whether the detector has a RoI head""" + return hasattr(self, 'roi_head') and self.roi_head is not None + + def extract_feat(self, img): + """Directly extract features from the backbone+neck.""" + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def forward_dummy(self, img): + """Used for computing network flops. + + See `mmdetection/tools/analysis_tools/get_flops.py` + """ + outs = () + # backbone + x = self.extract_feat(img) + # rpn + if self.with_rpn: + rpn_outs = self.rpn_head(x) + outs = outs + (rpn_outs, ) + proposals = torch.randn(1000, 4).to(img.device) + # roi_head + roi_outs = self.roi_head.forward_dummy(x, proposals) + outs = outs + (roi_outs, ) + return outs + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + proposals=None, + **kwargs): + """ + Args: + img (Tensor): of shape (N, C, H, W) encoding input images. + Typically these should be mean centered and std scaled. + + img_metas (list[dict]): list of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmdet/datasets/pipelines/formatting.py:Collect`. + + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + + gt_labels (list[Tensor]): class indices corresponding to each box + + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + + gt_masks (None | Tensor) : true segmentation masks for each box + used if the architecture supports a segmentation task. + + proposals : override rpn proposals with custom proposals. Use when + `with_rpn` is False. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + x = self.extract_feat(img) + + losses = dict() + + # RPN forward and loss + if self.with_rpn: + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + rpn_losses, proposal_list = self.rpn_head.forward_train( + x, + img_metas, + gt_bboxes, + gt_labels=None, + gt_bboxes_ignore=gt_bboxes_ignore, + proposal_cfg=proposal_cfg, + **kwargs) + losses.update(rpn_losses) + else: + proposal_list = proposals + + roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list, + gt_bboxes, gt_labels, + gt_bboxes_ignore, gt_masks, + **kwargs) + losses.update(roi_losses) + + return losses + + async def async_simple_test(self, + img, + img_meta, + proposals=None, + rescale=False): + """Async test without augmentation.""" + assert self.with_bbox, 'Bbox head must be implemented.' + x = self.extract_feat(img) + + if proposals is None: + proposal_list = await self.rpn_head.async_simple_test_rpn( + x, img_meta) + else: + proposal_list = proposals + + return await self.roi_head.async_simple_test( + x, proposal_list, img_meta, rescale=rescale) + + def simple_test(self, img, img_metas, proposals=None, rescale=False): + """Test without augmentation.""" + + assert self.with_bbox, 'Bbox head must be implemented.' + x = self.extract_feat(img) + if proposals is None: + proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) + else: + proposal_list = proposals + + return self.roi_head.simple_test( + x, proposal_list, img_metas, rescale=rescale) + + def aug_test(self, imgs, img_metas, rescale=False): + """Test with augmentations. + + If rescale is False, then returned bboxes and masks will fit the scale + of imgs[0]. + """ + x = self.extract_feats(imgs) + proposal_list = self.rpn_head.aug_test_rpn(x, img_metas) + return self.roi_head.aug_test( + x, proposal_list, img_metas, rescale=rescale) + + def onnx_export(self, img, img_metas): + + img_shape = torch._shape_as_tensor(img)[2:] + img_metas[0]['img_shape_for_onnx'] = img_shape + x = self.extract_feat(img) + proposals = self.rpn_head.onnx_export(x, img_metas) + if hasattr(self.roi_head, 'onnx_export'): + return self.roi_head.onnx_export(x, proposals, img_metas) + else: + raise NotImplementedError( + f'{self.__class__.__name__} can not ' + f'be exported to ONNX. Please refer to the ' + f'list of supported models,' + f'https://mmdetection.readthedocs.io/en/latest/tutorials/pytorch2onnx.html#list-of-supported-models-exportable-to-onnx' # noqa E501 + ) diff --git a/downstream/mmdetection/mmdet/models/detectors/vfnet.py b/downstream/mmdetection/mmdet/models/detectors/vfnet.py new file mode 100644 index 0000000..38ddcda --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/vfnet.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class VFNet(SingleStageDetector): + """Implementation of `VarifocalNet + (VFNet).`_""" + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) diff --git a/downstream/mmdetection/mmdet/models/detectors/yolact.py b/downstream/mmdetection/mmdet/models/detectors/yolact.py new file mode 100644 index 0000000..4ddea0b --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/yolact.py @@ -0,0 +1,120 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet.core import bbox2result +from ..builder import DETECTORS, build_head +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class YOLACT(SingleStageDetector): + """Implementation of `YOLACT `_""" + + def __init__(self, + backbone, + neck, + bbox_head, + segm_head, + mask_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) + self.segm_head = build_head(segm_head) + self.mask_head = build_head(mask_head) + + def forward_dummy(self, img): + """Used for computing network flops. + + See `mmdetection/tools/analysis_tools/get_flops.py` + """ + feat = self.extract_feat(img) + bbox_outs = self.bbox_head(feat) + prototypes = self.mask_head.forward_dummy(feat[0]) + return (bbox_outs, prototypes) + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None): + """ + Args: + img (Tensor): of shape (N, C, H, W) encoding input images. + Typically these should be mean centered and std scaled. + img_metas (list[dict]): list of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmdet/datasets/pipelines/formatting.py:Collect`. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + gt_masks (None | Tensor) : true segmentation masks for each box + used if the architecture supports a segmentation task. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + # convert Bitmap mask or Polygon Mask to Tensor here + gt_masks = [ + gt_mask.to_tensor(dtype=torch.uint8, device=img.device) + for gt_mask in gt_masks + ] + + x = self.extract_feat(img) + + cls_score, bbox_pred, coeff_pred = self.bbox_head(x) + bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels, + img_metas) + losses, sampling_results = self.bbox_head.loss( + *bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + + segm_head_outs = self.segm_head(x[0]) + loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels) + losses.update(loss_segm) + + mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas, + sampling_results) + loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes, + img_metas, sampling_results) + losses.update(loss_mask) + + # check NaN and Inf + for loss_name in losses.keys(): + assert torch.isfinite(torch.stack(losses[loss_name]))\ + .all().item(), '{} becomes infinite or NaN!'\ + .format(loss_name) + + return losses + + def simple_test(self, img, img_metas, rescale=False): + """Test function without test-time augmentation.""" + feat = self.extract_feat(img) + det_bboxes, det_labels, det_coeffs = self.bbox_head.simple_test( + feat, img_metas, rescale=rescale) + bbox_results = [ + bbox2result(det_bbox, det_label, self.bbox_head.num_classes) + for det_bbox, det_label in zip(det_bboxes, det_labels) + ] + + segm_results = self.mask_head.simple_test( + feat, + det_bboxes, + det_labels, + det_coeffs, + img_metas, + rescale=rescale) + + return list(zip(bbox_results, segm_results)) + + def aug_test(self, imgs, img_metas, rescale=False): + """Test with augmentations.""" + raise NotImplementedError( + 'YOLACT does not support test-time augmentation') diff --git a/downstream/mmdetection/mmdet/models/detectors/yolo.py b/downstream/mmdetection/mmdet/models/detectors/yolo.py new file mode 100644 index 0000000..0ccd417 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/yolo.py @@ -0,0 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Copyright (c) 2019 Western Digital Corporation or its affiliates. +import torch + +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class YOLOV3(SingleStageDetector): + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) + + def onnx_export(self, img, img_metas): + """Test function for exporting to ONNX, without test time augmentation. + + Args: + img (torch.Tensor): input images. + img_metas (list[dict]): List of image information. + + Returns: + tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] + and class labels of shape [N, num_det]. + """ + x = self.extract_feat(img) + outs = self.bbox_head.forward(x) + # get shape as tensor + img_shape = torch._shape_as_tensor(img)[2:] + img_metas[0]['img_shape_for_onnx'] = img_shape + + det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas) + + return det_bboxes, det_labels diff --git a/downstream/mmdetection/mmdet/models/detectors/yolof.py b/downstream/mmdetection/mmdet/models/detectors/yolof.py new file mode 100644 index 0000000..6d08d16 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/yolof.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class YOLOF(SingleStageDetector): + r"""Implementation of `You Only Look One-level Feature + `_""" + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None): + super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained) diff --git a/downstream/mmdetection/mmdet/models/detectors/yolox.py b/downstream/mmdetection/mmdet/models/detectors/yolox.py new file mode 100644 index 0000000..34d51b1 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/detectors/yolox.py @@ -0,0 +1,136 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random + +import torch +import torch.distributed as dist +import torch.nn.functional as F +from mmcv.runner import get_dist_info + +from ...utils import log_img_scale +from ..builder import DETECTORS +from .single_stage import SingleStageDetector + + +@DETECTORS.register_module() +class YOLOX(SingleStageDetector): + r"""Implementation of `YOLOX: Exceeding YOLO Series in 2021 + `_ + + Note: Considering the trade-off between training speed and accuracy, + multi-scale training is temporarily kept. More elegant implementation + will be adopted in the future. + + Args: + backbone (nn.Module): The backbone module. + neck (nn.Module): The neck module. + bbox_head (nn.Module): The bbox head module. + train_cfg (obj:`ConfigDict`, optional): The training config + of YOLOX. Default: None. + test_cfg (obj:`ConfigDict`, optional): The testing config + of YOLOX. Default: None. + pretrained (str, optional): model pretrained path. + Default: None. + input_size (tuple): The model default input image size. The shape + order should be (height, width). Default: (640, 640). + size_multiplier (int): Image size multiplication factor. + Default: 32. + random_size_range (tuple): The multi-scale random range during + multi-scale training. The real training image size will + be multiplied by size_multiplier. Default: (15, 25). + random_size_interval (int): The iter interval of change + image size. Default: 10. + init_cfg (dict, optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None, + input_size=(640, 640), + size_multiplier=32, + random_size_range=(15, 25), + random_size_interval=10, + init_cfg=None): + super(YOLOX, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained, init_cfg) + log_img_scale(input_size, skip_square=True) + self.rank, self.world_size = get_dist_info() + self._default_input_size = input_size + self._input_size = input_size + self._random_size_range = random_size_range + self._random_size_interval = random_size_interval + self._size_multiplier = size_multiplier + self._progress_in_iter = 0 + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None): + """ + Args: + img (Tensor): Input images of shape (N, C, H, W). + Typically these should be mean centered and std scaled. + img_metas (list[dict]): A List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + :class:`mmdet.datasets.pipelines.Collect`. + gt_bboxes (list[Tensor]): Each item are the truth boxes for each + image in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): Class indices corresponding to each box + gt_bboxes_ignore (None | list[Tensor]): Specify which bounding + boxes can be ignored when computing the loss. + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + # Multi-scale training + img, gt_bboxes = self._preprocess(img, gt_bboxes) + + losses = super(YOLOX, self).forward_train(img, img_metas, gt_bboxes, + gt_labels, gt_bboxes_ignore) + + # random resizing + if (self._progress_in_iter + 1) % self._random_size_interval == 0: + self._input_size = self._random_resize(device=img.device) + self._progress_in_iter += 1 + + return losses + + def _preprocess(self, img, gt_bboxes): + scale_y = self._input_size[0] / self._default_input_size[0] + scale_x = self._input_size[1] / self._default_input_size[1] + if scale_x != 1 or scale_y != 1: + img = F.interpolate( + img, + size=self._input_size, + mode='bilinear', + align_corners=False) + for gt_bbox in gt_bboxes: + gt_bbox[..., 0::2] = gt_bbox[..., 0::2] * scale_x + gt_bbox[..., 1::2] = gt_bbox[..., 1::2] * scale_y + return img, gt_bboxes + + def _random_resize(self, device): + tensor = torch.LongTensor(2).to(device) + + if self.rank == 0: + size = random.randint(*self._random_size_range) + aspect_ratio = float( + self._default_input_size[1]) / self._default_input_size[0] + size = (self._size_multiplier * size, + self._size_multiplier * int(aspect_ratio * size)) + tensor[0] = size[0] + tensor[1] = size[1] + + if self.world_size > 1: + dist.barrier() + dist.broadcast(tensor, 0) + + input_size = (tensor[0].item(), tensor[1].item()) + return input_size diff --git a/downstream/mmdetection/mmdet/models/losses/__init__.py b/downstream/mmdetection/mmdet/models/losses/__init__.py new file mode 100644 index 0000000..068a54d --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/__init__.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .accuracy import Accuracy, accuracy +from .ae_loss import AssociativeEmbeddingLoss +from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss +from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, + cross_entropy, mask_cross_entropy) +from .dice_loss import DiceLoss +from .focal_loss import FocalLoss, sigmoid_focal_loss +from .gaussian_focal_loss import GaussianFocalLoss +from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss +from .ghm_loss import GHMC, GHMR +from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss, + bounded_iou_loss, iou_loss) +from .kd_loss import KnowledgeDistillationKLDivLoss +from .mse_loss import MSELoss, mse_loss +from .pisa_loss import carl_loss, isr_p +from .seesaw_loss import SeesawLoss +from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss +from .utils import reduce_loss, weight_reduce_loss, weighted_loss +from .varifocal_loss import VarifocalLoss + +__all__ = [ + 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', + 'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss', + 'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss', + 'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss', + 'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC', + 'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss', + 'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss', + 'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss', + 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss' +] diff --git a/downstream/mmdetection/mmdet/models/losses/accuracy.py b/downstream/mmdetection/mmdet/models/losses/accuracy.py new file mode 100644 index 0000000..fe765a3 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/accuracy.py @@ -0,0 +1,79 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch.nn as nn + + +@mmcv.jit(coderize=True) +def accuracy(pred, target, topk=1, thresh=None): + """Calculate accuracy according to the prediction and target. + + Args: + pred (torch.Tensor): The model prediction, shape (N, num_class) + target (torch.Tensor): The target of each prediction, shape (N, ) + topk (int | tuple[int], optional): If the predictions in ``topk`` + matches the target, the predictions will be regarded as + correct ones. Defaults to 1. + thresh (float, optional): If not None, predictions with scores under + this threshold are considered incorrect. Default to None. + + Returns: + float | tuple[float]: If the input ``topk`` is a single integer, + the function will return a single float as accuracy. If + ``topk`` is a tuple containing multiple integers, the + function will return a tuple containing accuracies of + each ``topk`` number. + """ + assert isinstance(topk, (int, tuple)) + if isinstance(topk, int): + topk = (topk, ) + return_single = True + else: + return_single = False + + maxk = max(topk) + if pred.size(0) == 0: + accu = [pred.new_tensor(0.) for i in range(len(topk))] + return accu[0] if return_single else accu + assert pred.ndim == 2 and target.ndim == 1 + assert pred.size(0) == target.size(0) + assert maxk <= pred.size(1), \ + f'maxk {maxk} exceeds pred dimension {pred.size(1)}' + pred_value, pred_label = pred.topk(maxk, dim=1) + pred_label = pred_label.t() # transpose to shape (maxk, N) + correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) + if thresh is not None: + # Only prediction values larger than thresh are counted as correct + correct = correct & (pred_value > thresh).t() + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / pred.size(0))) + return res[0] if return_single else res + + +class Accuracy(nn.Module): + + def __init__(self, topk=(1, ), thresh=None): + """Module to calculate the accuracy. + + Args: + topk (tuple, optional): The criterion used to calculate the + accuracy. Defaults to (1,). + thresh (float, optional): If not None, predictions with scores + under this threshold are considered incorrect. Default to None. + """ + super().__init__() + self.topk = topk + self.thresh = thresh + + def forward(self, pred, target): + """Forward function to calculate accuracy. + + Args: + pred (torch.Tensor): Prediction of models. + target (torch.Tensor): Target for each prediction. + + Returns: + tuple[float]: The accuracies under different topk criterions. + """ + return accuracy(pred, target, self.topk, self.thresh) diff --git a/downstream/mmdetection/mmdet/models/losses/ae_loss.py b/downstream/mmdetection/mmdet/models/losses/ae_loss.py new file mode 100644 index 0000000..5c6da22 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/ae_loss.py @@ -0,0 +1,103 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES + + +@mmcv.jit(derivate=True, coderize=True) +def ae_loss_per_image(tl_preds, br_preds, match): + """Associative Embedding Loss in one image. + + Associative Embedding Loss including two parts: pull loss and push loss. + Pull loss makes embedding vectors from same object closer to each other. + Push loss distinguish embedding vector from different objects, and makes + the gap between them is large enough. + + During computing, usually there are 3 cases: + - no object in image: both pull loss and push loss will be 0. + - one object in image: push loss will be 0 and pull loss is computed + by the two corner of the only object. + - more than one objects in image: pull loss is computed by corner pairs + from each object, push loss is computed by each object with all + other objects. We use confusion matrix with 0 in diagonal to + compute the push loss. + + Args: + tl_preds (tensor): Embedding feature map of left-top corner. + br_preds (tensor): Embedding feature map of bottim-right corner. + match (list): Downsampled coordinates pair of each ground truth box. + """ + + tl_list, br_list, me_list = [], [], [] + if len(match) == 0: # no object in image + pull_loss = tl_preds.sum() * 0. + push_loss = tl_preds.sum() * 0. + else: + for m in match: + [tl_y, tl_x], [br_y, br_x] = m + tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1) + br_e = br_preds[:, br_y, br_x].view(-1, 1) + tl_list.append(tl_e) + br_list.append(br_e) + me_list.append((tl_e + br_e) / 2.0) + + tl_list = torch.cat(tl_list) + br_list = torch.cat(br_list) + me_list = torch.cat(me_list) + + assert tl_list.size() == br_list.size() + + # N is object number in image, M is dimension of embedding vector + N, M = tl_list.size() + + pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2) + pull_loss = pull_loss.sum() / N + + margin = 1 # exp setting of CornerNet, details in section 3.3 of paper + + # confusion matrix of push loss + conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list + conf_weight = 1 - torch.eye(N).type_as(me_list) + conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs()) + + if N > 1: # more than one object in current image + push_loss = F.relu(conf_mat).sum() / (N * (N - 1)) + else: + push_loss = tl_preds.sum() * 0. + + return pull_loss, push_loss + + +@LOSSES.register_module() +class AssociativeEmbeddingLoss(nn.Module): + """Associative Embedding Loss. + + More details can be found in + `Associative Embedding `_ and + `CornerNet `_ . + Code is modified from `kp_utils.py `_ # noqa: E501 + + Args: + pull_weight (float): Loss weight for corners from same object. + push_weight (float): Loss weight for corners from different object. + """ + + def __init__(self, pull_weight=0.25, push_weight=0.25): + super(AssociativeEmbeddingLoss, self).__init__() + self.pull_weight = pull_weight + self.push_weight = push_weight + + def forward(self, pred, target, match): + """Forward function.""" + batch = pred.size(0) + pull_all, push_all = 0.0, 0.0 + for i in range(batch): + pull, push = ae_loss_per_image(pred[i], target[i], match[i]) + + pull_all += self.pull_weight * pull + push_all += self.push_weight * push + + return pull_all, push_all diff --git a/downstream/mmdetection/mmdet/models/losses/balanced_l1_loss.py b/downstream/mmdetection/mmdet/models/losses/balanced_l1_loss.py new file mode 100644 index 0000000..8500345 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/balanced_l1_loss.py @@ -0,0 +1,124 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import torch +import torch.nn as nn + +from ..builder import LOSSES +from .utils import weighted_loss + + +@mmcv.jit(derivate=True, coderize=True) +@weighted_loss +def balanced_l1_loss(pred, + target, + beta=1.0, + alpha=0.5, + gamma=1.5, + reduction='mean'): + """Calculate balanced L1 loss. + + Please see the `Libra R-CNN `_ + + Args: + pred (torch.Tensor): The prediction with shape (N, 4). + target (torch.Tensor): The learning target of the prediction with + shape (N, 4). + beta (float): The loss is a piecewise function of prediction and target + and ``beta`` serves as a threshold for the difference between the + prediction and target. Defaults to 1.0. + alpha (float): The denominator ``alpha`` in the balanced L1 loss. + Defaults to 0.5. + gamma (float): The ``gamma`` in the balanced L1 loss. + Defaults to 1.5. + reduction (str, optional): The method that reduces the loss to a + scalar. Options are "none", "mean" and "sum". + + Returns: + torch.Tensor: The calculated loss + """ + assert beta > 0 + if target.numel() == 0: + return pred.sum() * 0 + + assert pred.size() == target.size() + + diff = torch.abs(pred - target) + b = np.e**(gamma / alpha) - 1 + loss = torch.where( + diff < beta, alpha / b * + (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff, + gamma * diff + gamma / b - alpha * beta) + + return loss + + +@LOSSES.register_module() +class BalancedL1Loss(nn.Module): + """Balanced L1 Loss. + + arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) + + Args: + alpha (float): The denominator ``alpha`` in the balanced L1 loss. + Defaults to 0.5. + gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. + beta (float, optional): The loss is a piecewise function of prediction + and target. ``beta`` serves as a threshold for the difference + between the prediction and target. Defaults to 1.0. + reduction (str, optional): The method that reduces the loss to a + scalar. Options are "none", "mean" and "sum". + loss_weight (float, optional): The weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + alpha=0.5, + gamma=1.5, + beta=1.0, + reduction='mean', + loss_weight=1.0): + super(BalancedL1Loss, self).__init__() + self.alpha = alpha + self.gamma = gamma + self.beta = beta + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + """Forward function of loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, 4). + target (torch.Tensor): The learning target of the prediction with + shape (N, 4). + weight (torch.Tensor, optional): Sample-wise loss weight with + shape (N, ). + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Options are "none", "mean" and "sum". + + Returns: + torch.Tensor: The calculated loss + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_bbox = self.loss_weight * balanced_l1_loss( + pred, + target, + weight, + alpha=self.alpha, + gamma=self.gamma, + beta=self.beta, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_bbox diff --git a/downstream/mmdetection/mmdet/models/losses/cross_entropy_loss.py b/downstream/mmdetection/mmdet/models/losses/cross_entropy_loss.py new file mode 100644 index 0000000..41411fc --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/cross_entropy_loss.py @@ -0,0 +1,301 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import weight_reduce_loss + + +def cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None, + ignore_index=-100, + avg_non_ignore=False): + """Calculate the CrossEntropy loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + label (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str, optional): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (list[float], optional): The weight for each class. + ignore_index (int | None): The label index to be ignored. + If None, it will be set to default value. Default: -100. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + + Returns: + torch.Tensor: The calculated loss + """ + # The default value of ignore_index is the same as F.cross_entropy + ignore_index = -100 if ignore_index is None else ignore_index + # element-wise losses + loss = F.cross_entropy( + pred, + label, + weight=class_weight, + reduction='none', + ignore_index=ignore_index) + + # average loss over non-ignored elements + # pytorch's official cross_entropy average loss over non-ignored elements + # refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660 # noqa + if (avg_factor is None) and avg_non_ignore and reduction == 'mean': + avg_factor = label.numel() - (label == ignore_index).sum().item() + + # apply weights and do the reduction + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index): + """Expand onehot labels to match the size of prediction.""" + bin_labels = labels.new_full((labels.size(0), label_channels), 0) + valid_mask = (labels >= 0) & (labels != ignore_index) + inds = torch.nonzero( + valid_mask & (labels < label_channels), as_tuple=False) + + if inds.numel() > 0: + bin_labels[inds, labels[inds]] = 1 + + valid_mask = valid_mask.view(-1, 1).expand(labels.size(0), + label_channels).float() + if label_weights is None: + bin_label_weights = valid_mask + else: + bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels) + bin_label_weights *= valid_mask + + return bin_labels, bin_label_weights, valid_mask + + +def binary_cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None, + ignore_index=-100, + avg_non_ignore=False): + """Calculate the binary CrossEntropy loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, 1) or (N, ). + When the shape of pred is (N, 1), label will be expanded to + one-hot format, and when the shape of pred is (N, ), label + will not be expanded to one-hot format. + label (torch.Tensor): The learning label of the prediction, + with shape (N, ). + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (list[float], optional): The weight for each class. + ignore_index (int | None): The label index to be ignored. + If None, it will be set to default value. Default: -100. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + + Returns: + torch.Tensor: The calculated loss. + """ + # The default value of ignore_index is the same as F.cross_entropy + ignore_index = -100 if ignore_index is None else ignore_index + + if pred.dim() != label.dim(): + label, weight, valid_mask = _expand_onehot_labels( + label, weight, pred.size(-1), ignore_index) + else: + # should mask out the ignored elements + valid_mask = ((label >= 0) & (label != ignore_index)).float() + if weight is not None: + # The inplace writing method will have a mismatched broadcast + # shape error if the weight and valid_mask dimensions + # are inconsistent such as (B,N,1) and (B,N,C). + weight = weight * valid_mask + else: + weight = valid_mask + + # average loss over non-ignored elements + if (avg_factor is None) and avg_non_ignore and reduction == 'mean': + avg_factor = valid_mask.sum().item() + + # weighted element-wise losses + weight = weight.float() + loss = F.binary_cross_entropy_with_logits( + pred, label.float(), pos_weight=class_weight, reduction='none') + # do the reduction for the weighted loss + loss = weight_reduce_loss( + loss, weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def mask_cross_entropy(pred, + target, + label, + reduction='mean', + avg_factor=None, + class_weight=None, + ignore_index=None, + **kwargs): + """Calculate the CrossEntropy loss for masks. + + Args: + pred (torch.Tensor): The prediction with shape (N, C, *), C is the + number of classes. The trailing * indicates arbitrary shape. + target (torch.Tensor): The learning label of the prediction. + label (torch.Tensor): ``label`` indicates the class label of the mask + corresponding object. This will be used to select the mask in the + of the class which the object belongs to when the mask prediction + if not class-agnostic. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (list[float], optional): The weight for each class. + ignore_index (None): Placeholder, to be consistent with other loss. + Default: None. + + Returns: + torch.Tensor: The calculated loss + + Example: + >>> N, C = 3, 11 + >>> H, W = 2, 2 + >>> pred = torch.randn(N, C, H, W) * 1000 + >>> target = torch.rand(N, H, W) + >>> label = torch.randint(0, C, size=(N,)) + >>> reduction = 'mean' + >>> avg_factor = None + >>> class_weights = None + >>> loss = mask_cross_entropy(pred, target, label, reduction, + >>> avg_factor, class_weights) + >>> assert loss.shape == (1,) + """ + assert ignore_index is None, 'BCE loss does not support ignore_index' + # TODO: handle these two reserved arguments + assert reduction == 'mean' and avg_factor is None + num_rois = pred.size()[0] + inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) + pred_slice = pred[inds, label].squeeze(1) + return F.binary_cross_entropy_with_logits( + pred_slice, target, weight=class_weight, reduction='mean')[None] + + +@LOSSES.register_module() +class CrossEntropyLoss(nn.Module): + + def __init__(self, + use_sigmoid=False, + use_mask=False, + reduction='mean', + class_weight=None, + ignore_index=None, + loss_weight=1.0, + avg_non_ignore=False): + """CrossEntropyLoss. + + Args: + use_sigmoid (bool, optional): Whether the prediction uses sigmoid + of softmax. Defaults to False. + use_mask (bool, optional): Whether to use mask cross entropy loss. + Defaults to False. + reduction (str, optional): . Defaults to 'mean'. + Options are "none", "mean" and "sum". + class_weight (list[float], optional): Weight of each class. + Defaults to None. + ignore_index (int | None): The label index to be ignored. + Defaults to None. + loss_weight (float, optional): Weight of the loss. Defaults to 1.0. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + """ + super(CrossEntropyLoss, self).__init__() + assert (use_sigmoid is False) or (use_mask is False) + self.use_sigmoid = use_sigmoid + self.use_mask = use_mask + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = class_weight + self.ignore_index = ignore_index + self.avg_non_ignore = avg_non_ignore + if ((ignore_index is not None) and not self.avg_non_ignore + and self.reduction == 'mean'): + warnings.warn( + 'Default ``avg_non_ignore`` is False, if you would like to ' + 'ignore the certain label and average loss over non-ignore ' + 'labels, which is the same with PyTorch official ' + 'cross_entropy, set ``avg_non_ignore=True``.') + + if self.use_sigmoid: + self.cls_criterion = binary_cross_entropy + elif self.use_mask: + self.cls_criterion = mask_cross_entropy + else: + self.cls_criterion = cross_entropy + + def extra_repr(self): + """Extra repr.""" + s = f'avg_non_ignore={self.avg_non_ignore}' + return s + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + ignore_index=None, + **kwargs): + """Forward function. + + Args: + cls_score (torch.Tensor): The prediction. + label (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The method used to reduce the + loss. Options are "none", "mean" and "sum". + ignore_index (int | None): The label index to be ignored. + If not None, it will override the default value. Default: None. + Returns: + torch.Tensor: The calculated loss. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if ignore_index is None: + ignore_index = self.ignore_index + + if self.class_weight is not None: + class_weight = cls_score.new_tensor( + self.class_weight, device=cls_score.device) + else: + class_weight = None + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + weight, + class_weight=class_weight, + reduction=reduction, + avg_factor=avg_factor, + ignore_index=ignore_index, + avg_non_ignore=self.avg_non_ignore, + **kwargs) + return loss_cls diff --git a/downstream/mmdetection/mmdet/models/losses/dice_loss.py b/downstream/mmdetection/mmdet/models/losses/dice_loss.py new file mode 100644 index 0000000..585beea --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/dice_loss.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from ..builder import LOSSES +from .utils import weight_reduce_loss + + +def dice_loss(pred, + target, + weight=None, + eps=1e-3, + reduction='mean', + naive_dice=False, + avg_factor=None): + """Calculate dice loss, there are two forms of dice loss is supported: + + - the one proposed in `V-Net: Fully Convolutional Neural + Networks for Volumetric Medical Image Segmentation + `_. + - the dice loss in which the power of the number in the + denominator is the first power instead of the second + power. + + Args: + pred (torch.Tensor): The prediction, has a shape (n, *) + target (torch.Tensor): The learning label of the prediction, + shape (n, *), same shape of pred. + weight (torch.Tensor, optional): The weight of loss for each + prediction, has a shape (n,). Defaults to None. + eps (float): Avoid dividing by zero. Default: 1e-3. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. + Options are "none", "mean" and "sum". + naive_dice (bool, optional): If false, use the dice + loss defined in the V-Net paper, otherwise, use the + naive dice loss in which the power of the number in the + denominator is the first power instead of the second + power.Defaults to False. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + + input = pred.flatten(1) + target = target.flatten(1).float() + + a = torch.sum(input * target, 1) + if naive_dice: + b = torch.sum(input, 1) + c = torch.sum(target, 1) + d = (2 * a + eps) / (b + c + eps) + else: + b = torch.sum(input * input, 1) + eps + c = torch.sum(target * target, 1) + eps + d = (2 * a) / (b + c) + + loss = 1 - d + if weight is not None: + assert weight.ndim == loss.ndim + assert len(weight) == len(pred) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module() +class DiceLoss(nn.Module): + + def __init__(self, + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=False, + loss_weight=1.0, + eps=1e-3): + """Compute dice loss. + + Args: + use_sigmoid (bool, optional): Whether to the prediction is + used for sigmoid or softmax. Defaults to True. + activate (bool): Whether to activate the predictions inside, + this will disable the inside sigmoid operation. + Defaults to True. + reduction (str, optional): The method used + to reduce the loss. Options are "none", + "mean" and "sum". Defaults to 'mean'. + naive_dice (bool, optional): If false, use the dice + loss defined in the V-Net paper, otherwise, use the + naive dice loss in which the power of the number in the + denominator is the first power instead of the second + power. Defaults to False. + loss_weight (float, optional): Weight of loss. Defaults to 1.0. + eps (float): Avoid dividing by zero. Defaults to 1e-3. + """ + + super(DiceLoss, self).__init__() + self.use_sigmoid = use_sigmoid + self.reduction = reduction + self.naive_dice = naive_dice + self.loss_weight = loss_weight + self.eps = eps + self.activate = activate + + def forward(self, + pred, + target, + weight=None, + reduction_override=None, + avg_factor=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction, has a shape (n, *). + target (torch.Tensor): The label of the prediction, + shape (n, *), same shape of pred. + weight (torch.Tensor, optional): The weight of loss for each + prediction, has a shape (n,). Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Options are "none", "mean" and "sum". + + Returns: + torch.Tensor: The calculated loss + """ + + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + if self.activate: + if self.use_sigmoid: + pred = pred.sigmoid() + else: + raise NotImplementedError + + loss = self.loss_weight * dice_loss( + pred, + target, + weight, + eps=self.eps, + reduction=reduction, + naive_dice=self.naive_dice, + avg_factor=avg_factor) + + return loss diff --git a/downstream/mmdetection/mmdet/models/losses/focal_loss.py b/downstream/mmdetection/mmdet/models/losses/focal_loss.py new file mode 100644 index 0000000..6c20fdd --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/focal_loss.py @@ -0,0 +1,244 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss + +from ..builder import LOSSES +from .utils import weight_reduce_loss + + +# This method is only for debugging +def py_sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + """PyTorch version of `Focal Loss `_. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the + number of classes + target (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float, optional): A balanced form for Focal Loss. + Defaults to 0.25. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + if weight is not None: + if weight.shape != loss.shape: + if weight.size(0) == loss.size(0): + # For most cases, weight is of shape (num_priors, ), + # which means it does not have the second axis num_class + weight = weight.view(-1, 1) + else: + # Sometimes, weight per anchor per class is also needed. e.g. + # in FSAF. But it may be flattened of shape + # (num_priors x num_class, ), while loss is still of shape + # (num_priors, num_class). + assert weight.numel() == loss.numel() + weight = weight.view(loss.size(0), -1) + assert weight.ndim == loss.ndim + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +def py_focal_loss_with_prob(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + """PyTorch version of `Focal Loss `_. + Different from `py_sigmoid_focal_loss`, this function accepts probability + as input. + + Args: + pred (torch.Tensor): The prediction probability with shape (N, C), + C is the number of classes. + target (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float, optional): A balanced form for Focal Loss. + Defaults to 0.25. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + num_classes = pred.size(1) + target = F.one_hot(target, num_classes=num_classes + 1) + target = target[:, :num_classes] + + target = target.type_as(pred) + pt = (1 - pred) * target + pred * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy( + pred, target, reduction='none') * focal_weight + if weight is not None: + if weight.shape != loss.shape: + if weight.size(0) == loss.size(0): + # For most cases, weight is of shape (num_priors, ), + # which means it does not have the second axis num_class + weight = weight.view(-1, 1) + else: + # Sometimes, weight per anchor per class is also needed. e.g. + # in FSAF. But it may be flattened of shape + # (num_priors x num_class, ), while loss is still of shape + # (num_priors, num_class). + assert weight.numel() == loss.numel() + weight = weight.view(loss.size(0), -1) + assert weight.ndim == loss.ndim + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +def sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + r"""A warpper of cuda version `Focal Loss + `_. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + target (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float, optional): A balanced form for Focal Loss. + Defaults to 0.25. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + # Function.apply does not accept keyword arguments, so the decorator + # "weighted_loss" is not applicable + loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma, + alpha, None, 'none') + if weight is not None: + if weight.shape != loss.shape: + if weight.size(0) == loss.size(0): + # For most cases, weight is of shape (num_priors, ), + # which means it does not have the second axis num_class + weight = weight.view(-1, 1) + else: + # Sometimes, weight per anchor per class is also needed. e.g. + # in FSAF. But it may be flattened of shape + # (num_priors x num_class, ), while loss is still of shape + # (num_priors, num_class). + assert weight.numel() == loss.numel() + weight = weight.view(loss.size(0), -1) + assert weight.ndim == loss.ndim + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module() +class FocalLoss(nn.Module): + + def __init__(self, + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0, + activated=False): + """`Focal Loss `_ + + Args: + use_sigmoid (bool, optional): Whether to the prediction is + used for sigmoid or softmax. Defaults to True. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float, optional): A balanced form for Focal Loss. + Defaults to 0.25. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. Options are "none", "mean" and + "sum". + loss_weight (float, optional): Weight of loss. Defaults to 1.0. + activated (bool, optional): Whether the input is activated. + If True, it means the input has been activated and can be + treated as probabilities. Else, it should be treated as logits. + Defaults to False. + """ + super(FocalLoss, self).__init__() + assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' + self.use_sigmoid = use_sigmoid + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + self.loss_weight = loss_weight + self.activated = activated + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Options are "none", "mean" and "sum". + + Returns: + torch.Tensor: The calculated loss + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.use_sigmoid: + if self.activated: + calculate_loss_func = py_focal_loss_with_prob + else: + if torch.cuda.is_available() and pred.is_cuda: + calculate_loss_func = sigmoid_focal_loss + else: + num_classes = pred.size(1) + target = F.one_hot(target, num_classes=num_classes + 1) + target = target[:, :num_classes] + calculate_loss_func = py_sigmoid_focal_loss + + loss_cls = self.loss_weight * calculate_loss_func( + pred, + target, + weight, + gamma=self.gamma, + alpha=self.alpha, + reduction=reduction, + avg_factor=avg_factor) + + else: + raise NotImplementedError + return loss_cls diff --git a/downstream/mmdetection/mmdet/models/losses/gaussian_focal_loss.py b/downstream/mmdetection/mmdet/models/losses/gaussian_focal_loss.py new file mode 100644 index 0000000..7abcb69 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/gaussian_focal_loss.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch.nn as nn + +from ..builder import LOSSES +from .utils import weighted_loss + + +@mmcv.jit(derivate=True, coderize=True) +@weighted_loss +def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): + """`Focal Loss `_ for targets in gaussian + distribution. + + Args: + pred (torch.Tensor): The prediction. + gaussian_target (torch.Tensor): The learning target of the prediction + in gaussian distribution. + alpha (float, optional): A balanced form for Focal Loss. + Defaults to 2.0. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 4.0. + """ + eps = 1e-12 + pos_weights = gaussian_target.eq(1) + neg_weights = (1 - gaussian_target).pow(gamma) + pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights + neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights + return pos_loss + neg_loss + + +@LOSSES.register_module() +class GaussianFocalLoss(nn.Module): + """GaussianFocalLoss is a variant of focal loss. + + More details can be found in the `paper + `_ + Code is modified from `kp_utils.py + `_ # noqa: E501 + Please notice that the target in GaussianFocalLoss is a gaussian heatmap, + not 0/1 binary target. + + Args: + alpha (float): Power of prediction. + gamma (float): Power of target for negative samples. + reduction (str): Options are "none", "mean" and "sum". + loss_weight (float): Loss weight of current loss. + """ + + def __init__(self, + alpha=2.0, + gamma=4.0, + reduction='mean', + loss_weight=1.0): + super(GaussianFocalLoss, self).__init__() + self.alpha = alpha + self.gamma = gamma + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction + in gaussian distribution. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_reg = self.loss_weight * gaussian_focal_loss( + pred, + target, + weight, + alpha=self.alpha, + gamma=self.gamma, + reduction=reduction, + avg_factor=avg_factor) + return loss_reg diff --git a/downstream/mmdetection/mmdet/models/losses/gfocal_loss.py b/downstream/mmdetection/mmdet/models/losses/gfocal_loss.py new file mode 100644 index 0000000..0e8d263 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/gfocal_loss.py @@ -0,0 +1,245 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import weighted_loss + + +@mmcv.jit(derivate=True, coderize=True) +@weighted_loss +def quality_focal_loss(pred, target, beta=2.0): + r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning + Qualified and Distributed Bounding Boxes for Dense Object Detection + `_. + + Args: + pred (torch.Tensor): Predicted joint representation of classification + and quality (IoU) estimation with shape (N, C), C is the number of + classes. + target (tuple([torch.Tensor])): Target category label with shape (N,) + and target quality label with shape (N,). + beta (float): The beta parameter for calculating the modulating factor. + Defaults to 2.0. + + Returns: + torch.Tensor: Loss tensor with shape (N,). + """ + assert len(target) == 2, """target for QFL must be a tuple of two elements, + including category label and quality label, respectively""" + # label denotes the category id, score denotes the quality score + label, score = target + + # negatives are supervised by 0 quality score + pred_sigmoid = pred.sigmoid() + scale_factor = pred_sigmoid + zerolabel = scale_factor.new_zeros(pred.shape) + loss = F.binary_cross_entropy_with_logits( + pred, zerolabel, reduction='none') * scale_factor.pow(beta) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = pred.size(1) + pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) + pos_label = label[pos].long() + # positives are supervised by bbox quality (IoU) score + scale_factor = score[pos] - pred_sigmoid[pos, pos_label] + loss[pos, pos_label] = F.binary_cross_entropy_with_logits( + pred[pos, pos_label], score[pos], + reduction='none') * scale_factor.abs().pow(beta) + + loss = loss.sum(dim=1, keepdim=False) + return loss + + +@weighted_loss +def quality_focal_loss_with_prob(pred, target, beta=2.0): + r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning + Qualified and Distributed Bounding Boxes for Dense Object Detection + `_. + Different from `quality_focal_loss`, this function accepts probability + as input. + + Args: + pred (torch.Tensor): Predicted joint representation of classification + and quality (IoU) estimation with shape (N, C), C is the number of + classes. + target (tuple([torch.Tensor])): Target category label with shape (N,) + and target quality label with shape (N,). + beta (float): The beta parameter for calculating the modulating factor. + Defaults to 2.0. + + Returns: + torch.Tensor: Loss tensor with shape (N,). + """ + assert len(target) == 2, """target for QFL must be a tuple of two elements, + including category label and quality label, respectively""" + # label denotes the category id, score denotes the quality score + label, score = target + + # negatives are supervised by 0 quality score + pred_sigmoid = pred + scale_factor = pred_sigmoid + zerolabel = scale_factor.new_zeros(pred.shape) + loss = F.binary_cross_entropy( + pred, zerolabel, reduction='none') * scale_factor.pow(beta) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = pred.size(1) + pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) + pos_label = label[pos].long() + # positives are supervised by bbox quality (IoU) score + scale_factor = score[pos] - pred_sigmoid[pos, pos_label] + loss[pos, pos_label] = F.binary_cross_entropy( + pred[pos, pos_label], score[pos], + reduction='none') * scale_factor.abs().pow(beta) + + loss = loss.sum(dim=1, keepdim=False) + return loss + + +@mmcv.jit(derivate=True, coderize=True) +@weighted_loss +def distribution_focal_loss(pred, label): + r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning + Qualified and Distributed Bounding Boxes for Dense Object Detection + `_. + + Args: + pred (torch.Tensor): Predicted general distribution of bounding boxes + (before softmax) with shape (N, n+1), n is the max value of the + integral set `{0, ..., n}` in paper. + label (torch.Tensor): Target distance label for bounding boxes with + shape (N,). + + Returns: + torch.Tensor: Loss tensor with shape (N,). + """ + dis_left = label.long() + dis_right = dis_left + 1 + weight_left = dis_right.float() - label + weight_right = label - dis_left.float() + loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \ + + F.cross_entropy(pred, dis_right, reduction='none') * weight_right + return loss + + +@LOSSES.register_module() +class QualityFocalLoss(nn.Module): + r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss: + Learning Qualified and Distributed Bounding Boxes for Dense Object + Detection `_. + + Args: + use_sigmoid (bool): Whether sigmoid operation is conducted in QFL. + Defaults to True. + beta (float): The beta parameter for calculating the modulating factor. + Defaults to 2.0. + reduction (str): Options are "none", "mean" and "sum". + loss_weight (float): Loss weight of current loss. + activated (bool, optional): Whether the input is activated. + If True, it means the input has been activated and can be + treated as probabilities. Else, it should be treated as logits. + Defaults to False. + """ + + def __init__(self, + use_sigmoid=True, + beta=2.0, + reduction='mean', + loss_weight=1.0, + activated=False): + super(QualityFocalLoss, self).__init__() + assert use_sigmoid is True, 'Only sigmoid in QFL supported now.' + self.use_sigmoid = use_sigmoid + self.beta = beta + self.reduction = reduction + self.loss_weight = loss_weight + self.activated = activated + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (torch.Tensor): Predicted joint representation of + classification and quality (IoU) estimation with shape (N, C), + C is the number of classes. + target (tuple([torch.Tensor])): Target category label with shape + (N,) and target quality label with shape (N,). + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.use_sigmoid: + if self.activated: + calculate_loss_func = quality_focal_loss_with_prob + else: + calculate_loss_func = quality_focal_loss + loss_cls = self.loss_weight * calculate_loss_func( + pred, + target, + weight, + beta=self.beta, + reduction=reduction, + avg_factor=avg_factor) + else: + raise NotImplementedError + return loss_cls + + +@LOSSES.register_module() +class DistributionFocalLoss(nn.Module): + r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss: + Learning Qualified and Distributed Bounding Boxes for Dense Object + Detection `_. + + Args: + reduction (str): Options are `'none'`, `'mean'` and `'sum'`. + loss_weight (float): Loss weight of current loss. + """ + + def __init__(self, reduction='mean', loss_weight=1.0): + super(DistributionFocalLoss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (torch.Tensor): Predicted general distribution of bounding + boxes (before softmax) with shape (N, n+1), n is the max value + of the integral set `{0, ..., n}` in paper. + target (torch.Tensor): Target distance label for bounding boxes + with shape (N,). + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_cls = self.loss_weight * distribution_focal_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss_cls diff --git a/downstream/mmdetection/mmdet/models/losses/ghm_loss.py b/downstream/mmdetection/mmdet/models/losses/ghm_loss.py new file mode 100644 index 0000000..a4df9fe --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/ghm_loss.py @@ -0,0 +1,213 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import weight_reduce_loss + + +def _expand_onehot_labels(labels, label_weights, label_channels): + bin_labels = labels.new_full((labels.size(0), label_channels), 0) + inds = torch.nonzero( + (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() + if inds.numel() > 0: + bin_labels[inds, labels[inds]] = 1 + bin_label_weights = label_weights.view(-1, 1).expand( + label_weights.size(0), label_channels) + return bin_labels, bin_label_weights + + +# TODO: code refactoring to make it consistent with other losses +@LOSSES.register_module() +class GHMC(nn.Module): + """GHM Classification Loss. + + Details of the theorem can be viewed in the paper + `Gradient Harmonized Single-stage Detector + `_. + + Args: + bins (int): Number of the unit regions for distribution calculation. + momentum (float): The parameter for moving average. + use_sigmoid (bool): Can only be true for BCE based loss now. + loss_weight (float): The weight of the total GHM-C loss. + reduction (str): Options are "none", "mean" and "sum". + Defaults to "mean" + """ + + def __init__(self, + bins=10, + momentum=0, + use_sigmoid=True, + loss_weight=1.0, + reduction='mean'): + super(GHMC, self).__init__() + self.bins = bins + self.momentum = momentum + edges = torch.arange(bins + 1).float() / bins + self.register_buffer('edges', edges) + self.edges[-1] += 1e-6 + if momentum > 0: + acc_sum = torch.zeros(bins) + self.register_buffer('acc_sum', acc_sum) + self.use_sigmoid = use_sigmoid + if not self.use_sigmoid: + raise NotImplementedError + self.loss_weight = loss_weight + self.reduction = reduction + + def forward(self, + pred, + target, + label_weight, + reduction_override=None, + **kwargs): + """Calculate the GHM-C loss. + + Args: + pred (float tensor of size [batch_num, class_num]): + The direct prediction of classification fc layer. + target (float tensor of size [batch_num, class_num]): + Binary class target for each sample. + label_weight (float tensor of size [batch_num, class_num]): + the value is 1 if the sample is valid and 0 if ignored. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + Returns: + The gradient harmonized loss. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + # the target should be binary class label + if pred.dim() != target.dim(): + target, label_weight = _expand_onehot_labels( + target, label_weight, pred.size(-1)) + target, label_weight = target.float(), label_weight.float() + edges = self.edges + mmt = self.momentum + weights = torch.zeros_like(pred) + + # gradient length + g = torch.abs(pred.sigmoid().detach() - target) + + valid = label_weight > 0 + tot = max(valid.float().sum().item(), 1.0) + n = 0 # n valid bins + for i in range(self.bins): + inds = (g >= edges[i]) & (g < edges[i + 1]) & valid + num_in_bin = inds.sum().item() + if num_in_bin > 0: + if mmt > 0: + self.acc_sum[i] = mmt * self.acc_sum[i] \ + + (1 - mmt) * num_in_bin + weights[inds] = tot / self.acc_sum[i] + else: + weights[inds] = tot / num_in_bin + n += 1 + if n > 0: + weights = weights / n + + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') + loss = weight_reduce_loss( + loss, weights, reduction=reduction, avg_factor=tot) + return loss * self.loss_weight + + +# TODO: code refactoring to make it consistent with other losses +@LOSSES.register_module() +class GHMR(nn.Module): + """GHM Regression Loss. + + Details of the theorem can be viewed in the paper + `Gradient Harmonized Single-stage Detector + `_. + + Args: + mu (float): The parameter for the Authentic Smooth L1 loss. + bins (int): Number of the unit regions for distribution calculation. + momentum (float): The parameter for moving average. + loss_weight (float): The weight of the total GHM-R loss. + reduction (str): Options are "none", "mean" and "sum". + Defaults to "mean" + """ + + def __init__(self, + mu=0.02, + bins=10, + momentum=0, + loss_weight=1.0, + reduction='mean'): + super(GHMR, self).__init__() + self.mu = mu + self.bins = bins + edges = torch.arange(bins + 1).float() / bins + self.register_buffer('edges', edges) + self.edges[-1] = 1e3 + self.momentum = momentum + if momentum > 0: + acc_sum = torch.zeros(bins) + self.register_buffer('acc_sum', acc_sum) + self.loss_weight = loss_weight + self.reduction = reduction + + # TODO: support reduction parameter + def forward(self, + pred, + target, + label_weight, + avg_factor=None, + reduction_override=None): + """Calculate the GHM-R loss. + + Args: + pred (float tensor of size [batch_num, 4 (* class_num)]): + The prediction of box regression layer. Channel number can be 4 + or 4 * class_num depending on whether it is class-agnostic. + target (float tensor of size [batch_num, 4 (* class_num)]): + The target regression values with the same size of pred. + label_weight (float tensor of size [batch_num, 4 (* class_num)]): + The weight of each sample, 0 if ignored. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + Returns: + The gradient harmonized loss. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + mu = self.mu + edges = self.edges + mmt = self.momentum + + # ASL1 loss + diff = pred - target + loss = torch.sqrt(diff * diff + mu * mu) - mu + + # gradient length + g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() + weights = torch.zeros_like(g) + + valid = label_weight > 0 + tot = max(label_weight.float().sum().item(), 1.0) + n = 0 # n: valid bins + for i in range(self.bins): + inds = (g >= edges[i]) & (g < edges[i + 1]) & valid + num_in_bin = inds.sum().item() + if num_in_bin > 0: + n += 1 + if mmt > 0: + self.acc_sum[i] = mmt * self.acc_sum[i] \ + + (1 - mmt) * num_in_bin + weights[inds] = tot / self.acc_sum[i] + else: + weights[inds] = tot / num_in_bin + if n > 0: + weights /= n + loss = weight_reduce_loss( + loss, weights, reduction=reduction, avg_factor=tot) + return loss * self.loss_weight diff --git a/downstream/mmdetection/mmdet/models/losses/iou_loss.py b/downstream/mmdetection/mmdet/models/losses/iou_loss.py new file mode 100644 index 0000000..bf1ed04 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/iou_loss.py @@ -0,0 +1,474 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings + +import mmcv +import torch +import torch.nn as nn + +from mmdet.core import bbox_overlaps +from ..builder import LOSSES +from .utils import weighted_loss + + +@mmcv.jit(derivate=True, coderize=True) +@weighted_loss +def iou_loss(pred, target, linear=False, mode='log', eps=1e-6): + """IoU loss. + + Computing the IoU loss between a set of predicted bboxes and target bboxes. + The loss is calculated as negative log of IoU. + + Args: + pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), + shape (n, 4). + target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). + linear (bool, optional): If True, use linear scale of loss instead of + log scale. Default: False. + mode (str): Loss scaling mode, including "linear", "square", and "log". + Default: 'log' + eps (float): Eps to avoid log(0). + + Return: + torch.Tensor: Loss tensor. + """ + assert mode in ['linear', 'square', 'log'] + if linear: + mode = 'linear' + warnings.warn('DeprecationWarning: Setting "linear=True" in ' + 'iou_loss is deprecated, please use "mode=`linear`" ' + 'instead.') + ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps) + if mode == 'linear': + loss = 1 - ious + elif mode == 'square': + loss = 1 - ious**2 + elif mode == 'log': + loss = -ious.log() + else: + raise NotImplementedError + return loss + + +@mmcv.jit(derivate=True, coderize=True) +@weighted_loss +def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3): + """BIoULoss. + + This is an implementation of paper + `Improving Object Localization with Fitness NMS and Bounded IoU Loss. + `_. + + Args: + pred (torch.Tensor): Predicted bboxes. + target (torch.Tensor): Target bboxes. + beta (float): beta parameter in smoothl1. + eps (float): eps to avoid NaN. + """ + pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5 + pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5 + pred_w = pred[:, 2] - pred[:, 0] + pred_h = pred[:, 3] - pred[:, 1] + with torch.no_grad(): + target_ctrx = (target[:, 0] + target[:, 2]) * 0.5 + target_ctry = (target[:, 1] + target[:, 3]) * 0.5 + target_w = target[:, 2] - target[:, 0] + target_h = target[:, 3] - target[:, 1] + + dx = target_ctrx - pred_ctrx + dy = target_ctry - pred_ctry + + loss_dx = 1 - torch.max( + (target_w - 2 * dx.abs()) / + (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx)) + loss_dy = 1 - torch.max( + (target_h - 2 * dy.abs()) / + (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy)) + loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / + (target_w + eps)) + loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / + (target_h + eps)) + # view(..., -1) does not work for empty tensor + loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], + dim=-1).flatten(1) + + loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta, + loss_comb - 0.5 * beta) + return loss + + +@mmcv.jit(derivate=True, coderize=True) +@weighted_loss +def giou_loss(pred, target, eps=1e-7): + r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding + Box Regression `_. + + Args: + pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), + shape (n, 4). + target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). + eps (float): Eps to avoid log(0). + + Return: + Tensor: Loss tensor. + """ + gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps) + loss = 1 - gious + return loss + + +@mmcv.jit(derivate=True, coderize=True) +@weighted_loss +def diou_loss(pred, target, eps=1e-7): + r"""`Implementation of Distance-IoU Loss: Faster and Better + Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_. + + Code is modified from https://github.com/Zzh-tju/DIoU. + + Args: + pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), + shape (n, 4). + target (Tensor): Corresponding gt bboxes, shape (n, 4). + eps (float): Eps to avoid log(0). + Return: + Tensor: Loss tensor. + """ + # overlap + lt = torch.max(pred[:, :2], target[:, :2]) + rb = torch.min(pred[:, 2:], target[:, 2:]) + wh = (rb - lt).clamp(min=0) + overlap = wh[:, 0] * wh[:, 1] + + # union + ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) + ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) + union = ap + ag - overlap + eps + + # IoU + ious = overlap / union + + # enclose area + enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) + enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) + enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) + + cw = enclose_wh[:, 0] + ch = enclose_wh[:, 1] + + c2 = cw**2 + ch**2 + eps + + b1_x1, b1_y1 = pred[:, 0], pred[:, 1] + b1_x2, b1_y2 = pred[:, 2], pred[:, 3] + b2_x1, b2_y1 = target[:, 0], target[:, 1] + b2_x2, b2_y2 = target[:, 2], target[:, 3] + + left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 + right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 + rho2 = left + right + + # DIoU + dious = ious - rho2 / c2 + loss = 1 - dious + return loss + + +@mmcv.jit(derivate=True, coderize=True) +@weighted_loss +def ciou_loss(pred, target, eps=1e-7): + r"""`Implementation of paper `Enhancing Geometric Factors into + Model Learning and Inference for Object Detection and Instance + Segmentation `_. + + Code is modified from https://github.com/Zzh-tju/CIoU. + + Args: + pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), + shape (n, 4). + target (Tensor): Corresponding gt bboxes, shape (n, 4). + eps (float): Eps to avoid log(0). + Return: + Tensor: Loss tensor. + """ + # overlap + lt = torch.max(pred[:, :2], target[:, :2]) + rb = torch.min(pred[:, 2:], target[:, 2:]) + wh = (rb - lt).clamp(min=0) + overlap = wh[:, 0] * wh[:, 1] + + # union + ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) + ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) + union = ap + ag - overlap + eps + + # IoU + ious = overlap / union + + # enclose area + enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) + enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) + enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) + + cw = enclose_wh[:, 0] + ch = enclose_wh[:, 1] + + c2 = cw**2 + ch**2 + eps + + b1_x1, b1_y1 = pred[:, 0], pred[:, 1] + b1_x2, b1_y2 = pred[:, 2], pred[:, 3] + b2_x1, b2_y1 = target[:, 0], target[:, 1] + b2_x2, b2_y2 = target[:, 2], target[:, 3] + + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + + left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 + right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 + rho2 = left + right + + factor = 4 / math.pi**2 + v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + + with torch.no_grad(): + alpha = (ious > 0.5).float() * v / (1 - ious + v) + + # CIoU + cious = ious - (rho2 / c2 + alpha * v) + loss = 1 - cious.clamp(min=-1.0, max=1.0) + return loss + + +@LOSSES.register_module() +class IoULoss(nn.Module): + """IoULoss. + + Computing the IoU loss between a set of predicted bboxes and target bboxes. + + Args: + linear (bool): If True, use linear scale of loss else determined + by mode. Default: False. + eps (float): Eps to avoid log(0). + reduction (str): Options are "none", "mean" and "sum". + loss_weight (float): Weight of loss. + mode (str): Loss scaling mode, including "linear", "square", and "log". + Default: 'log' + """ + + def __init__(self, + linear=False, + eps=1e-6, + reduction='mean', + loss_weight=1.0, + mode='log'): + super(IoULoss, self).__init__() + assert mode in ['linear', 'square', 'log'] + if linear: + mode = 'linear' + warnings.warn('DeprecationWarning: Setting "linear=True" in ' + 'IOULoss is deprecated, please use "mode=`linear`" ' + 'instead.') + self.mode = mode + self.linear = linear + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. Options are "none", "mean" and "sum". + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if (weight is not None) and (not torch.any(weight > 0)) and ( + reduction != 'none'): + if pred.dim() == weight.dim() + 1: + weight = weight.unsqueeze(1) + return (pred * weight).sum() # 0 + if weight is not None and weight.dim() > 1: + # TODO: remove this in the future + # reduce the weight of shape (n, 4) to (n,) to match the + # iou_loss of shape (n,) + assert weight.shape == pred.shape + weight = weight.mean(-1) + loss = self.loss_weight * iou_loss( + pred, + target, + weight, + mode=self.mode, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss + + +@LOSSES.register_module() +class BoundedIoULoss(nn.Module): + + def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0): + super(BoundedIoULoss, self).__init__() + self.beta = beta + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + if weight is not None and not torch.any(weight > 0): + if pred.dim() == weight.dim() + 1: + weight = weight.unsqueeze(1) + return (pred * weight).sum() # 0 + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * bounded_iou_loss( + pred, + target, + weight, + beta=self.beta, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss + + +@LOSSES.register_module() +class GIoULoss(nn.Module): + + def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): + super(GIoULoss, self).__init__() + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + if weight is not None and not torch.any(weight > 0): + if pred.dim() == weight.dim() + 1: + weight = weight.unsqueeze(1) + return (pred * weight).sum() # 0 + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if weight is not None and weight.dim() > 1: + # TODO: remove this in the future + # reduce the weight of shape (n, 4) to (n,) to match the + # giou_loss of shape (n,) + assert weight.shape == pred.shape + weight = weight.mean(-1) + loss = self.loss_weight * giou_loss( + pred, + target, + weight, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss + + +@LOSSES.register_module() +class DIoULoss(nn.Module): + + def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): + super(DIoULoss, self).__init__() + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + if weight is not None and not torch.any(weight > 0): + if pred.dim() == weight.dim() + 1: + weight = weight.unsqueeze(1) + return (pred * weight).sum() # 0 + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if weight is not None and weight.dim() > 1: + # TODO: remove this in the future + # reduce the weight of shape (n, 4) to (n,) to match the + # giou_loss of shape (n,) + assert weight.shape == pred.shape + weight = weight.mean(-1) + loss = self.loss_weight * diou_loss( + pred, + target, + weight, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss + + +@LOSSES.register_module() +class CIoULoss(nn.Module): + + def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): + super(CIoULoss, self).__init__() + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + if weight is not None and not torch.any(weight > 0): + if pred.dim() == weight.dim() + 1: + weight = weight.unsqueeze(1) + return (pred * weight).sum() # 0 + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if weight is not None and weight.dim() > 1: + # TODO: remove this in the future + # reduce the weight of shape (n, 4) to (n,) to match the + # giou_loss of shape (n,) + assert weight.shape == pred.shape + weight = weight.mean(-1) + loss = self.loss_weight * ciou_loss( + pred, + target, + weight, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss diff --git a/downstream/mmdetection/mmdet/models/losses/kd_loss.py b/downstream/mmdetection/mmdet/models/losses/kd_loss.py new file mode 100644 index 0000000..75c1935 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/kd_loss.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import weighted_loss + + +@mmcv.jit(derivate=True, coderize=True) +@weighted_loss +def knowledge_distillation_kl_div_loss(pred, + soft_label, + T, + detach_target=True): + r"""Loss function for knowledge distilling using KL divergence. + + Args: + pred (Tensor): Predicted logits with shape (N, n + 1). + soft_label (Tensor): Target logits with shape (N, N + 1). + T (int): Temperature for distillation. + detach_target (bool): Remove soft_label from automatic differentiation + + Returns: + torch.Tensor: Loss tensor with shape (N,). + """ + assert pred.size() == soft_label.size() + target = F.softmax(soft_label / T, dim=1) + if detach_target: + target = target.detach() + + kd_loss = F.kl_div( + F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * ( + T * T) + + return kd_loss + + +@LOSSES.register_module() +class KnowledgeDistillationKLDivLoss(nn.Module): + """Loss function for knowledge distilling using KL divergence. + + Args: + reduction (str): Options are `'none'`, `'mean'` and `'sum'`. + loss_weight (float): Loss weight of current loss. + T (int): Temperature for distillation. + """ + + def __init__(self, reduction='mean', loss_weight=1.0, T=10): + super(KnowledgeDistillationKLDivLoss, self).__init__() + assert T >= 1 + self.reduction = reduction + self.loss_weight = loss_weight + self.T = T + + def forward(self, + pred, + soft_label, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (Tensor): Predicted logits with shape (N, n + 1). + soft_label (Tensor): Target logits with shape (N, N + 1). + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + + reduction = ( + reduction_override if reduction_override else self.reduction) + + loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss( + pred, + soft_label, + weight, + reduction=reduction, + avg_factor=avg_factor, + T=self.T) + + return loss_kd diff --git a/downstream/mmdetection/mmdet/models/losses/mse_loss.py b/downstream/mmdetection/mmdet/models/losses/mse_loss.py new file mode 100644 index 0000000..4a622f8 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/mse_loss.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import weighted_loss + + +@weighted_loss +def mse_loss(pred, target): + """Warpper of mse loss.""" + return F.mse_loss(pred, target, reduction='none') + + +@LOSSES.register_module() +class MSELoss(nn.Module): + """MSELoss. + + Args: + reduction (str, optional): The method that reduces the loss to a + scalar. Options are "none", "mean" and "sum". + loss_weight (float, optional): The weight of the loss. Defaults to 1.0 + """ + + def __init__(self, reduction='mean', loss_weight=1.0): + super().__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function of loss. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + weight (torch.Tensor, optional): Weight of the loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + + Returns: + torch.Tensor: The calculated loss + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * mse_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss diff --git a/downstream/mmdetection/mmdet/models/losses/pisa_loss.py b/downstream/mmdetection/mmdet/models/losses/pisa_loss.py new file mode 100644 index 0000000..6afea0e --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/pisa_loss.py @@ -0,0 +1,184 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch + +from mmdet.core import bbox_overlaps + + +@mmcv.jit(derivate=True, coderize=True) +def isr_p(cls_score, + bbox_pred, + bbox_targets, + rois, + sampling_results, + loss_cls, + bbox_coder, + k=2, + bias=0, + num_class=80): + """Importance-based Sample Reweighting (ISR_P), positive part. + + Args: + cls_score (Tensor): Predicted classification scores. + bbox_pred (Tensor): Predicted bbox deltas. + bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are + labels, label_weights, bbox_targets, bbox_weights, respectively. + rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs + (two_stage) in shape (n, 5). + sampling_results (obj): Sampling results. + loss_cls (func): Classification loss func of the head. + bbox_coder (obj): BBox coder of the head. + k (float): Power of the non-linear mapping. + bias (float): Shift of the non-linear mapping. + num_class (int): Number of classes, default: 80. + + Return: + tuple([Tensor]): labels, imp_based_label_weights, bbox_targets, + bbox_target_weights + """ + + labels, label_weights, bbox_targets, bbox_weights = bbox_targets + pos_label_inds = ((labels >= 0) & + (labels < num_class)).nonzero().reshape(-1) + pos_labels = labels[pos_label_inds] + + # if no positive samples, return the original targets + num_pos = float(pos_label_inds.size(0)) + if num_pos == 0: + return labels, label_weights, bbox_targets, bbox_weights + + # merge pos_assigned_gt_inds of per image to a single tensor + gts = list() + last_max_gt = 0 + for i in range(len(sampling_results)): + gt_i = sampling_results[i].pos_assigned_gt_inds + gts.append(gt_i + last_max_gt) + if len(gt_i) != 0: + last_max_gt = gt_i.max() + 1 + gts = torch.cat(gts) + assert len(gts) == num_pos + + cls_score = cls_score.detach() + bbox_pred = bbox_pred.detach() + + # For single stage detectors, rois here indicate anchors, in shape (N, 4) + # For two stage detectors, rois are in shape (N, 5) + if rois.size(-1) == 5: + pos_rois = rois[pos_label_inds][:, 1:] + else: + pos_rois = rois[pos_label_inds] + + if bbox_pred.size(-1) > 4: + bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) + pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4) + else: + pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4) + + # compute iou of the predicted bbox and the corresponding GT + pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4) + pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred) + target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target) + ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True) + + pos_imp_weights = label_weights[pos_label_inds] + # Two steps to compute IoU-HLR. Samples are first sorted by IoU locally, + # then sorted again within the same-rank group + max_l_num = pos_labels.bincount().max() + for label in pos_labels.unique(): + l_inds = (pos_labels == label).nonzero().view(-1) + l_gts = gts[l_inds] + for t in l_gts.unique(): + t_inds = l_inds[l_gts == t] + t_ious = ious[t_inds] + _, t_iou_rank_idx = t_ious.sort(descending=True) + _, t_iou_rank = t_iou_rank_idx.sort() + ious[t_inds] += max_l_num - t_iou_rank.float() + l_ious = ious[l_inds] + _, l_iou_rank_idx = l_ious.sort(descending=True) + _, l_iou_rank = l_iou_rank_idx.sort() # IoU-HLR + # linearly map HLR to label weights + pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num + + pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k) + + # normalize to make the new weighted loss value equal to the original loss + pos_loss_cls = loss_cls( + cls_score[pos_label_inds], pos_labels, reduction_override='none') + if pos_loss_cls.dim() > 1: + ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:, + None] + new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None] + else: + ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds] + new_pos_loss_cls = pos_loss_cls * pos_imp_weights + pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum() + pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio + label_weights[pos_label_inds] = pos_imp_weights + + bbox_targets = labels, label_weights, bbox_targets, bbox_weights + return bbox_targets + + +@mmcv.jit(derivate=True, coderize=True) +def carl_loss(cls_score, + labels, + bbox_pred, + bbox_targets, + loss_bbox, + k=1, + bias=0.2, + avg_factor=None, + sigmoid=False, + num_class=80): + """Classification-Aware Regression Loss (CARL). + + Args: + cls_score (Tensor): Predicted classification scores. + labels (Tensor): Targets of classification. + bbox_pred (Tensor): Predicted bbox deltas. + bbox_targets (Tensor): Target of bbox regression. + loss_bbox (func): Regression loss func of the head. + bbox_coder (obj): BBox coder of the head. + k (float): Power of the non-linear mapping. + bias (float): Shift of the non-linear mapping. + avg_factor (int): Average factor used in regression loss. + sigmoid (bool): Activation of the classification score. + num_class (int): Number of classes, default: 80. + + Return: + dict: CARL loss dict. + """ + pos_label_inds = ((labels >= 0) & + (labels < num_class)).nonzero().reshape(-1) + if pos_label_inds.numel() == 0: + return dict(loss_carl=cls_score.sum()[None] * 0.) + pos_labels = labels[pos_label_inds] + + # multiply pos_cls_score with the corresponding bbox weight + # and remain gradient + if sigmoid: + pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels] + else: + pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels] + carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k) + + # normalize carl_loss_weight to make its sum equal to num positive + num_pos = float(pos_cls_score.size(0)) + weight_ratio = num_pos / carl_loss_weights.sum() + carl_loss_weights *= weight_ratio + + if avg_factor is None: + avg_factor = bbox_targets.size(0) + # if is class agnostic, bbox pred is in shape (N, 4) + # otherwise, bbox pred is in shape (N, #classes, 4) + if bbox_pred.size(-1) > 4: + bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) + pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels] + else: + pos_bbox_preds = bbox_pred[pos_label_inds] + ori_loss_reg = loss_bbox( + pos_bbox_preds, + bbox_targets[pos_label_inds], + reduction_override='none') / avg_factor + loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum() + return dict(loss_carl=loss_carl[None]) diff --git a/downstream/mmdetection/mmdet/models/losses/seesaw_loss.py b/downstream/mmdetection/mmdet/models/losses/seesaw_loss.py new file mode 100644 index 0000000..0104047 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/seesaw_loss.py @@ -0,0 +1,262 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .accuracy import accuracy +from .cross_entropy_loss import cross_entropy +from .utils import weight_reduce_loss + + +def seesaw_ce_loss(cls_score, + labels, + label_weights, + cum_samples, + num_classes, + p, + q, + eps, + reduction='mean', + avg_factor=None): + """Calculate the Seesaw CrossEntropy loss. + + Args: + cls_score (torch.Tensor): The prediction with shape (N, C), + C is the number of classes. + labels (torch.Tensor): The learning label of the prediction. + label_weights (torch.Tensor): Sample-wise loss weight. + cum_samples (torch.Tensor): Cumulative samples for each category. + num_classes (int): The number of classes. + p (float): The ``p`` in the mitigation factor. + q (float): The ``q`` in the compenstation factor. + eps (float): The minimal value of divisor to smooth + the computation of compensation factor + reduction (str, optional): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + + Returns: + torch.Tensor: The calculated loss + """ + assert cls_score.size(-1) == num_classes + assert len(cum_samples) == num_classes + + onehot_labels = F.one_hot(labels, num_classes) + seesaw_weights = cls_score.new_ones(onehot_labels.size()) + + # mitigation factor + if p > 0: + sample_ratio_matrix = cum_samples[None, :].clamp( + min=1) / cum_samples[:, None].clamp(min=1) + index = (sample_ratio_matrix < 1.0).float() + sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index) + mitigation_factor = sample_weights[labels.long(), :] + seesaw_weights = seesaw_weights * mitigation_factor + + # compensation factor + if q > 0: + scores = F.softmax(cls_score.detach(), dim=1) + self_scores = scores[ + torch.arange(0, len(scores)).to(scores.device).long(), + labels.long()] + score_matrix = scores / self_scores[:, None].clamp(min=eps) + index = (score_matrix > 1.0).float() + compensation_factor = score_matrix.pow(q) * index + (1 - index) + seesaw_weights = seesaw_weights * compensation_factor + + cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels)) + + loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none') + + if label_weights is not None: + label_weights = label_weights.float() + loss = weight_reduce_loss( + loss, weight=label_weights, reduction=reduction, avg_factor=avg_factor) + return loss + + +@LOSSES.register_module() +class SeesawLoss(nn.Module): + """ + Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021) + arXiv: https://arxiv.org/abs/2008.10032 + + Args: + use_sigmoid (bool, optional): Whether the prediction uses sigmoid + of softmax. Only False is supported. + p (float, optional): The ``p`` in the mitigation factor. + Defaults to 0.8. + q (float, optional): The ``q`` in the compenstation factor. + Defaults to 2.0. + num_classes (int, optional): The number of classes. + Default to 1203 for LVIS v1 dataset. + eps (float, optional): The minimal value of divisor to smooth + the computation of compensation factor + reduction (str, optional): The method that reduces the loss to a + scalar. Options are "none", "mean" and "sum". + loss_weight (float, optional): The weight of the loss. Defaults to 1.0 + return_dict (bool, optional): Whether return the losses as a dict. + Default to True. + """ + + def __init__(self, + use_sigmoid=False, + p=0.8, + q=2.0, + num_classes=1203, + eps=1e-2, + reduction='mean', + loss_weight=1.0, + return_dict=True): + super(SeesawLoss, self).__init__() + assert not use_sigmoid + self.use_sigmoid = False + self.p = p + self.q = q + self.num_classes = num_classes + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + self.return_dict = return_dict + + # 0 for pos, 1 for neg + self.cls_criterion = seesaw_ce_loss + + # cumulative samples for each category + self.register_buffer( + 'cum_samples', + torch.zeros(self.num_classes + 1, dtype=torch.float)) + + # custom output channels of the classifier + self.custom_cls_channels = True + # custom activation of cls_score + self.custom_activation = True + # custom accuracy of the classsifier + self.custom_accuracy = True + + def _split_cls_score(self, cls_score): + # split cls_score to cls_score_classes and cls_score_objectness + assert cls_score.size(-1) == self.num_classes + 2 + cls_score_classes = cls_score[..., :-2] + cls_score_objectness = cls_score[..., -2:] + return cls_score_classes, cls_score_objectness + + def get_cls_channels(self, num_classes): + """Get custom classification channels. + + Args: + num_classes (int): The number of classes. + + Returns: + int: The custom classification channels. + """ + assert num_classes == self.num_classes + return num_classes + 2 + + def get_activation(self, cls_score): + """Get custom activation of cls_score. + + Args: + cls_score (torch.Tensor): The prediction with shape (N, C + 2). + + Returns: + torch.Tensor: The custom activation of cls_score with shape + (N, C + 1). + """ + cls_score_classes, cls_score_objectness = self._split_cls_score( + cls_score) + score_classes = F.softmax(cls_score_classes, dim=-1) + score_objectness = F.softmax(cls_score_objectness, dim=-1) + score_pos = score_objectness[..., [0]] + score_neg = score_objectness[..., [1]] + score_classes = score_classes * score_pos + scores = torch.cat([score_classes, score_neg], dim=-1) + return scores + + def get_accuracy(self, cls_score, labels): + """Get custom accuracy w.r.t. cls_score and labels. + + Args: + cls_score (torch.Tensor): The prediction with shape (N, C + 2). + labels (torch.Tensor): The learning label of the prediction. + + Returns: + Dict [str, torch.Tensor]: The accuracy for objectness and classes, + respectively. + """ + pos_inds = labels < self.num_classes + obj_labels = (labels == self.num_classes).long() + cls_score_classes, cls_score_objectness = self._split_cls_score( + cls_score) + acc_objectness = accuracy(cls_score_objectness, obj_labels) + acc_classes = accuracy(cls_score_classes[pos_inds], labels[pos_inds]) + acc = dict() + acc['acc_objectness'] = acc_objectness + acc['acc_classes'] = acc_classes + return acc + + def forward(self, + cls_score, + labels, + label_weights=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + cls_score (torch.Tensor): The prediction with shape (N, C + 2). + labels (torch.Tensor): The learning label of the prediction. + label_weights (torch.Tensor, optional): Sample-wise loss weight. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + Returns: + torch.Tensor | Dict [str, torch.Tensor]: + if return_dict == False: The calculated loss | + if return_dict == True: The dict of calculated losses + for objectness and classes, respectively. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + assert cls_score.size(-1) == self.num_classes + 2 + pos_inds = labels < self.num_classes + # 0 for pos, 1 for neg + obj_labels = (labels == self.num_classes).long() + + # accumulate the samples for each category + unique_labels = labels.unique() + for u_l in unique_labels: + inds_ = labels == u_l.item() + self.cum_samples[u_l] += inds_.sum() + + if label_weights is not None: + label_weights = label_weights.float() + else: + label_weights = labels.new_ones(labels.size(), dtype=torch.float) + + cls_score_classes, cls_score_objectness = self._split_cls_score( + cls_score) + # calculate loss_cls_classes (only need pos samples) + if pos_inds.sum() > 0: + loss_cls_classes = self.loss_weight * self.cls_criterion( + cls_score_classes[pos_inds], labels[pos_inds], + label_weights[pos_inds], self.cum_samples[:self.num_classes], + self.num_classes, self.p, self.q, self.eps, reduction, + avg_factor) + else: + loss_cls_classes = cls_score_classes[pos_inds].sum() + # calculate loss_cls_objectness + loss_cls_objectness = self.loss_weight * cross_entropy( + cls_score_objectness, obj_labels, label_weights, reduction, + avg_factor) + + if self.return_dict: + loss_cls = dict() + loss_cls['loss_cls_objectness'] = loss_cls_objectness + loss_cls['loss_cls_classes'] = loss_cls_classes + else: + loss_cls = loss_cls_classes + loss_cls_objectness + return loss_cls diff --git a/downstream/mmdetection/mmdet/models/losses/smooth_l1_loss.py b/downstream/mmdetection/mmdet/models/losses/smooth_l1_loss.py new file mode 100644 index 0000000..5511746 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/smooth_l1_loss.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch +import torch.nn as nn + +from ..builder import LOSSES +from .utils import weighted_loss + + +@mmcv.jit(derivate=True, coderize=True) +@weighted_loss +def smooth_l1_loss(pred, target, beta=1.0): + """Smooth L1 loss. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + beta (float, optional): The threshold in the piecewise function. + Defaults to 1.0. + + Returns: + torch.Tensor: Calculated loss + """ + assert beta > 0 + if target.numel() == 0: + return pred.sum() * 0 + + assert pred.size() == target.size() + diff = torch.abs(pred - target) + loss = torch.where(diff < beta, 0.5 * diff * diff / beta, + diff - 0.5 * beta) + return loss + + +@mmcv.jit(derivate=True, coderize=True) +@weighted_loss +def l1_loss(pred, target): + """L1 loss. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + + Returns: + torch.Tensor: Calculated loss + """ + if target.numel() == 0: + return pred.sum() * 0 + + assert pred.size() == target.size() + loss = torch.abs(pred - target) + return loss + + +@LOSSES.register_module() +class SmoothL1Loss(nn.Module): + """Smooth L1 loss. + + Args: + beta (float, optional): The threshold in the piecewise function. + Defaults to 1.0. + reduction (str, optional): The method to reduce the loss. + Options are "none", "mean" and "sum". Defaults to "mean". + loss_weight (float, optional): The weight of loss. + """ + + def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): + super(SmoothL1Loss, self).__init__() + self.beta = beta + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_bbox = self.loss_weight * smooth_l1_loss( + pred, + target, + weight, + beta=self.beta, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_bbox + + +@LOSSES.register_module() +class L1Loss(nn.Module): + """L1 loss. + + Args: + reduction (str, optional): The method to reduce the loss. + Options are "none", "mean" and "sum". + loss_weight (float, optional): The weight of loss. + """ + + def __init__(self, reduction='mean', loss_weight=1.0): + super(L1Loss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_bbox = self.loss_weight * l1_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss_bbox diff --git a/downstream/mmdetection/mmdet/models/losses/utils.py b/downstream/mmdetection/mmdet/models/losses/utils.py new file mode 100644 index 0000000..778237e --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/utils.py @@ -0,0 +1,105 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools + +import mmcv +import torch +import torch.nn.functional as F + + +def reduce_loss(loss, reduction): + """Reduce loss as specified. + + Args: + loss (Tensor): Elementwise loss tensor. + reduction (str): Options are "none", "mean" and "sum". + + Return: + Tensor: Reduced loss tensor. + """ + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, elementwise_mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() + + +@mmcv.jit(derivate=True, coderize=True) +def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): Element-wise loss. + weight (Tensor): Element-wise weights. + reduction (str): Same as built-in losses of PyTorch. + avg_factor (float): Average factor when computing the mean of losses. + + Returns: + Tensor: Processed loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + loss = loss * weight + + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + loss = reduce_loss(loss, reduction) + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + # Avoid causing ZeroDivisionError when avg_factor is 0.0, + # i.e., all labels of an image belong to ignore index. + eps = torch.finfo(torch.float32).eps + loss = loss.sum() / (avg_factor + eps) + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return loss + + +def weighted_loss(loss_func): + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + `loss_func(pred, target, **kwargs)`. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like `loss_func(pred, target, weight=None, reduction='mean', + avg_factor=None, **kwargs)`. + + :Example: + + >>> import torch + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, avg_factor=2) + tensor(1.5000) + """ + + @functools.wraps(loss_func) + def wrapper(pred, + target, + weight=None, + reduction='mean', + avg_factor=None, + **kwargs): + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + return wrapper diff --git a/downstream/mmdetection/mmdet/models/losses/varifocal_loss.py b/downstream/mmdetection/mmdet/models/losses/varifocal_loss.py new file mode 100644 index 0000000..42f0eef --- /dev/null +++ b/downstream/mmdetection/mmdet/models/losses/varifocal_loss.py @@ -0,0 +1,134 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import weight_reduce_loss + + +@mmcv.jit(derivate=True, coderize=True) +def varifocal_loss(pred, + target, + weight=None, + alpha=0.75, + gamma=2.0, + iou_weighted=True, + reduction='mean', + avg_factor=None): + """`Varifocal Loss `_ + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the + number of classes + target (torch.Tensor): The learning target of the iou-aware + classification score with shape (N, C), C is the number of classes. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + alpha (float, optional): A balance factor for the negative part of + Varifocal Loss, which is different from the alpha of Focal Loss. + Defaults to 0.75. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + iou_weighted (bool, optional): Whether to weight the loss of the + positive example with the iou target. Defaults to True. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. Options are "none", "mean" and + "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + # pred and target should be of the same size + assert pred.size() == target.size() + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + if iou_weighted: + focal_weight = target * (target > 0.0).float() + \ + alpha * (pred_sigmoid - target).abs().pow(gamma) * \ + (target <= 0.0).float() + else: + focal_weight = (target > 0.0).float() + \ + alpha * (pred_sigmoid - target).abs().pow(gamma) * \ + (target <= 0.0).float() + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module() +class VarifocalLoss(nn.Module): + + def __init__(self, + use_sigmoid=True, + alpha=0.75, + gamma=2.0, + iou_weighted=True, + reduction='mean', + loss_weight=1.0): + """`Varifocal Loss `_ + + Args: + use_sigmoid (bool, optional): Whether the prediction is + used for sigmoid or softmax. Defaults to True. + alpha (float, optional): A balance factor for the negative part of + Varifocal Loss, which is different from the alpha of Focal + Loss. Defaults to 0.75. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + iou_weighted (bool, optional): Whether to weight the loss of the + positive examples with the iou target. Defaults to True. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. Options are "none", "mean" and + "sum". + loss_weight (float, optional): Weight of loss. Defaults to 1.0. + """ + super(VarifocalLoss, self).__init__() + assert use_sigmoid is True, \ + 'Only sigmoid varifocal loss supported now.' + assert alpha >= 0.0 + self.use_sigmoid = use_sigmoid + self.alpha = alpha + self.gamma = gamma + self.iou_weighted = iou_weighted + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Options are "none", "mean" and "sum". + + Returns: + torch.Tensor: The calculated loss + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.use_sigmoid: + loss_cls = self.loss_weight * varifocal_loss( + pred, + target, + weight, + alpha=self.alpha, + gamma=self.gamma, + iou_weighted=self.iou_weighted, + reduction=reduction, + avg_factor=avg_factor) + else: + raise NotImplementedError + return loss_cls diff --git a/downstream/mmdetection/mmdet/models/necks/__init__.py b/downstream/mmdetection/mmdet/models/necks/__init__.py new file mode 100644 index 0000000..6f2fa82 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .bfp import BFP +from .channel_mapper import ChannelMapper +from .ct_resnet_neck import CTResNetNeck +from .dilated_encoder import DilatedEncoder +from .dyhead import DyHead +from .fpg import FPG +from .fpn import FPN +from .fpn_carafe import FPN_CARAFE +from .hrfpn import HRFPN +from .nas_fpn import NASFPN +from .nasfcos_fpn import NASFCOS_FPN +from .pafpn import PAFPN +from .rfp import RFP +from .ssd_neck import SSDNeck +from .yolo_neck import YOLOV3Neck +from .yolox_pafpn import YOLOXPAFPN + +__all__ = [ + 'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN', + 'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder', + 'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead' +] diff --git a/downstream/mmdetection/mmdet/models/necks/bfp.py b/downstream/mmdetection/mmdet/models/necks/bfp.py new file mode 100644 index 0000000..9fdfa03 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/bfp.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.cnn.bricks import NonLocal2d +from mmcv.runner import BaseModule + +from ..builder import NECKS + + +@NECKS.register_module() +class BFP(BaseModule): + """BFP (Balanced Feature Pyramids) + + BFP takes multi-level features as inputs and gather them into a single one, + then refine the gathered feature and scatter the refined results to + multi-level features. This module is used in Libra R-CNN (CVPR 2019), see + the paper `Libra R-CNN: Towards Balanced Learning for Object Detection + `_ for details. + + Args: + in_channels (int): Number of input channels (feature maps of all levels + should have the same channels). + num_levels (int): Number of input feature levels. + conv_cfg (dict): The config dict for convolution layers. + norm_cfg (dict): The config dict for normalization layers. + refine_level (int): Index of integration and refine level of BSF in + multi-level features from bottom to top. + refine_type (str): Type of the refine op, currently support + [None, 'conv', 'non_local']. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + num_levels, + refine_level=2, + refine_type=None, + conv_cfg=None, + norm_cfg=None, + init_cfg=dict( + type='Xavier', layer='Conv2d', distribution='uniform')): + super(BFP, self).__init__(init_cfg) + assert refine_type in [None, 'conv', 'non_local'] + + self.in_channels = in_channels + self.num_levels = num_levels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.refine_level = refine_level + self.refine_type = refine_type + assert 0 <= self.refine_level < self.num_levels + + if self.refine_type == 'conv': + self.refine = ConvModule( + self.in_channels, + self.in_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + elif self.refine_type == 'non_local': + self.refine = NonLocal2d( + self.in_channels, + reduction=1, + use_scale=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == self.num_levels + + # step 1: gather multi-level features by resize and average + feats = [] + gather_size = inputs[self.refine_level].size()[2:] + for i in range(self.num_levels): + if i < self.refine_level: + gathered = F.adaptive_max_pool2d( + inputs[i], output_size=gather_size) + else: + gathered = F.interpolate( + inputs[i], size=gather_size, mode='nearest') + feats.append(gathered) + + bsf = sum(feats) / len(feats) + + # step 2: refine gathered features + if self.refine_type is not None: + bsf = self.refine(bsf) + + # step 3: scatter refined features to multi-levels by a residual path + outs = [] + for i in range(self.num_levels): + out_size = inputs[i].size()[2:] + if i < self.refine_level: + residual = F.interpolate(bsf, size=out_size, mode='nearest') + else: + residual = F.adaptive_max_pool2d(bsf, output_size=out_size) + outs.append(residual + inputs[i]) + + return tuple(outs) diff --git a/downstream/mmdetection/mmdet/models/necks/channel_mapper.py b/downstream/mmdetection/mmdet/models/necks/channel_mapper.py new file mode 100644 index 0000000..774bdb1 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/channel_mapper.py @@ -0,0 +1,100 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule + +from ..builder import NECKS + + +@NECKS.register_module() +class ChannelMapper(BaseModule): + r"""Channel Mapper to reduce/increase channels of backbone features. + + This is used to reduce/increase channels of backbone features. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + kernel_size (int, optional): kernel_size for reducing channels (used + at each scale). Default: 3. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + act_cfg (dict, optional): Config dict for activation layer in + ConvModule. Default: dict(type='ReLU'). + num_outs (int, optional): Number of output feature maps. There + would be extra_convs when num_outs larger than the length + of in_channels. + init_cfg (dict or list[dict], optional): Initialization config dict. + Example: + >>> import torch + >>> in_channels = [2, 3, 5, 7] + >>> scales = [340, 170, 84, 43] + >>> inputs = [torch.rand(1, c, s, s) + ... for c, s in zip(in_channels, scales)] + >>> self = ChannelMapper(in_channels, 11, 3).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 11, 340, 340]) + outputs[1].shape = torch.Size([1, 11, 170, 170]) + outputs[2].shape = torch.Size([1, 11, 84, 84]) + outputs[3].shape = torch.Size([1, 11, 43, 43]) + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + num_outs=None, + init_cfg=dict( + type='Xavier', layer='Conv2d', distribution='uniform')): + super(ChannelMapper, self).__init__(init_cfg) + assert isinstance(in_channels, list) + self.extra_convs = None + if num_outs is None: + num_outs = len(in_channels) + self.convs = nn.ModuleList() + for in_channel in in_channels: + self.convs.append( + ConvModule( + in_channel, + out_channels, + kernel_size, + padding=(kernel_size - 1) // 2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + if num_outs > len(in_channels): + self.extra_convs = nn.ModuleList() + for i in range(len(in_channels), num_outs): + if i == len(in_channels): + in_channel = in_channels[-1] + else: + in_channel = out_channels + self.extra_convs.append( + ConvModule( + in_channel, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.convs) + outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] + if self.extra_convs: + for i in range(len(self.extra_convs)): + if i == 0: + outs.append(self.extra_convs[0](inputs[-1])) + else: + outs.append(self.extra_convs[i](outs[-1])) + return tuple(outs) diff --git a/downstream/mmdetection/mmdet/models/necks/ct_resnet_neck.py b/downstream/mmdetection/mmdet/models/necks/ct_resnet_neck.py new file mode 100644 index 0000000..40eb268 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/ct_resnet_neck.py @@ -0,0 +1,94 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule, auto_fp16 + +from mmdet.models.builder import NECKS + + +@NECKS.register_module() +class CTResNetNeck(BaseModule): + """The neck used in `CenterNet `_ for + object classification and box regression. + + Args: + in_channel (int): Number of input channels. + num_deconv_filters (tuple[int]): Number of filters per stage. + num_deconv_kernels (tuple[int]): Number of kernels per stage. + use_dcn (bool): If True, use DCNv2. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channel, + num_deconv_filters, + num_deconv_kernels, + use_dcn=True, + init_cfg=None): + super(CTResNetNeck, self).__init__(init_cfg) + assert len(num_deconv_filters) == len(num_deconv_kernels) + self.fp16_enabled = False + self.use_dcn = use_dcn + self.in_channel = in_channel + self.deconv_layers = self._make_deconv_layer(num_deconv_filters, + num_deconv_kernels) + + def _make_deconv_layer(self, num_deconv_filters, num_deconv_kernels): + """use deconv layers to upsample backbone's output.""" + layers = [] + for i in range(len(num_deconv_filters)): + feat_channel = num_deconv_filters[i] + conv_module = ConvModule( + self.in_channel, + feat_channel, + 3, + padding=1, + conv_cfg=dict(type='DCNv2') if self.use_dcn else None, + norm_cfg=dict(type='BN')) + layers.append(conv_module) + upsample_module = ConvModule( + feat_channel, + feat_channel, + num_deconv_kernels[i], + stride=2, + padding=1, + conv_cfg=dict(type='deconv'), + norm_cfg=dict(type='BN')) + layers.append(upsample_module) + self.in_channel = feat_channel + + return nn.Sequential(*layers) + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.ConvTranspose2d): + # In order to be consistent with the source code, + # reset the ConvTranspose2d initialization parameters + m.reset_parameters() + # Simulated bilinear upsampling kernel + w = m.weight.data + f = math.ceil(w.size(2) / 2) + c = (2 * f - 1 - f % 2) / (2. * f) + for i in range(w.size(2)): + for j in range(w.size(3)): + w[0, 0, i, j] = \ + (1 - math.fabs(i / f - c)) * ( + 1 - math.fabs(j / f - c)) + for c in range(1, w.size(0)): + w[c, 0, :, :] = w[0, 0, :, :] + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + # self.use_dcn is False + elif not self.use_dcn and isinstance(m, nn.Conv2d): + # In order to be consistent with the source code, + # reset the Conv2d initialization parameters + m.reset_parameters() + + @auto_fp16() + def forward(self, inputs): + assert isinstance(inputs, (list, tuple)) + outs = self.deconv_layers(inputs[-1]) + return outs, diff --git a/downstream/mmdetection/mmdet/models/necks/dilated_encoder.py b/downstream/mmdetection/mmdet/models/necks/dilated_encoder.py new file mode 100644 index 0000000..79a8f4b --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/dilated_encoder.py @@ -0,0 +1,109 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import (ConvModule, caffe2_xavier_init, constant_init, is_norm, + normal_init) +from torch.nn import BatchNorm2d + +from ..builder import NECKS + + +class Bottleneck(nn.Module): + """Bottleneck block for DilatedEncoder used in `YOLOF. + + `. + + The Bottleneck contains three ConvLayers and one residual connection. + + Args: + in_channels (int): The number of input channels. + mid_channels (int): The number of middle output channels. + dilation (int): Dilation rate. + norm_cfg (dict): Dictionary to construct and config norm layer. + """ + + def __init__(self, + in_channels, + mid_channels, + dilation, + norm_cfg=dict(type='BN', requires_grad=True)): + super(Bottleneck, self).__init__() + self.conv1 = ConvModule( + in_channels, mid_channels, 1, norm_cfg=norm_cfg) + self.conv2 = ConvModule( + mid_channels, + mid_channels, + 3, + padding=dilation, + dilation=dilation, + norm_cfg=norm_cfg) + self.conv3 = ConvModule( + mid_channels, in_channels, 1, norm_cfg=norm_cfg) + + def forward(self, x): + identity = x + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + out = out + identity + return out + + +@NECKS.register_module() +class DilatedEncoder(nn.Module): + """Dilated Encoder for YOLOF `. + + This module contains two types of components: + - the original FPN lateral convolution layer and fpn convolution layer, + which are 1x1 conv + 3x3 conv + - the dilated residual block + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + block_mid_channels (int): The number of middle block output channels + num_residual_blocks (int): The number of residual blocks. + block_dilations (list): The list of residual blocks dilation. + """ + + def __init__(self, in_channels, out_channels, block_mid_channels, + num_residual_blocks, block_dilations): + super(DilatedEncoder, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.block_mid_channels = block_mid_channels + self.num_residual_blocks = num_residual_blocks + self.block_dilations = block_dilations + self._init_layers() + + def _init_layers(self): + self.lateral_conv = nn.Conv2d( + self.in_channels, self.out_channels, kernel_size=1) + self.lateral_norm = BatchNorm2d(self.out_channels) + self.fpn_conv = nn.Conv2d( + self.out_channels, self.out_channels, kernel_size=3, padding=1) + self.fpn_norm = BatchNorm2d(self.out_channels) + encoder_blocks = [] + for i in range(self.num_residual_blocks): + dilation = self.block_dilations[i] + encoder_blocks.append( + Bottleneck( + self.out_channels, + self.block_mid_channels, + dilation=dilation)) + self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks) + + def init_weights(self): + caffe2_xavier_init(self.lateral_conv) + caffe2_xavier_init(self.fpn_conv) + for m in [self.lateral_norm, self.fpn_norm]: + constant_init(m, 1) + for m in self.dilated_encoder_blocks.modules(): + if isinstance(m, nn.Conv2d): + normal_init(m, mean=0, std=0.01) + if is_norm(m): + constant_init(m, 1) + + def forward(self, feature): + out = self.lateral_norm(self.lateral_conv(feature[-1])) + out = self.fpn_norm(self.fpn_conv(out)) + return self.dilated_encoder_blocks(out), diff --git a/downstream/mmdetection/mmdet/models/necks/dyhead.py b/downstream/mmdetection/mmdet/models/necks/dyhead.py new file mode 100644 index 0000000..5d752c3 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/dyhead.py @@ -0,0 +1,174 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import (build_activation_layer, build_norm_layer, constant_init, + normal_init) +from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d +from mmcv.runner import BaseModule + +from ..builder import NECKS +from ..utils import DyReLU + +# Reference: +# https://github.com/microsoft/DynamicHead +# https://github.com/jshilong/SEPC + + +class DyDCNv2(nn.Module): + """ModulatedDeformConv2d with normalization layer used in DyHead. + + This module cannot be configured with `conv_cfg=dict(type='DCNv2')` + because DyHead calculates offset and mask from middle-level feature. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + stride (int | tuple[int], optional): Stride of the convolution. + Default: 1. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: dict(type='GN', num_groups=16, requires_grad=True). + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + norm_cfg=dict(type='GN', num_groups=16, requires_grad=True)): + super().__init__() + self.with_norm = norm_cfg is not None + bias = not self.with_norm + self.conv = ModulatedDeformConv2d( + in_channels, out_channels, 3, stride=stride, padding=1, bias=bias) + if self.with_norm: + self.norm = build_norm_layer(norm_cfg, out_channels)[1] + + def forward(self, x, offset, mask): + """Forward function.""" + x = self.conv(x.contiguous(), offset, mask) + if self.with_norm: + x = self.norm(x) + return x + + +class DyHeadBlock(nn.Module): + """DyHead Block with three types of attention. + + HSigmoid arguments in default act_cfg follow official code, not paper. + https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + zero_init_offset (bool, optional): Whether to use zero init for + `spatial_conv_offset`. Default: True. + act_cfg (dict, optional): Config dict for the last activation layer of + scale-aware attention. Default: dict(type='HSigmoid', bias=3.0, + divisor=6.0). + """ + + def __init__(self, + in_channels, + out_channels, + zero_init_offset=True, + act_cfg=dict(type='HSigmoid', bias=3.0, divisor=6.0)): + super().__init__() + self.zero_init_offset = zero_init_offset + # (offset_x, offset_y, mask) * kernel_size_y * kernel_size_x + self.offset_and_mask_dim = 3 * 3 * 3 + self.offset_dim = 2 * 3 * 3 + + self.spatial_conv_high = DyDCNv2(in_channels, out_channels) + self.spatial_conv_mid = DyDCNv2(in_channels, out_channels) + self.spatial_conv_low = DyDCNv2(in_channels, out_channels, stride=2) + self.spatial_conv_offset = nn.Conv2d( + in_channels, self.offset_and_mask_dim, 3, padding=1) + self.scale_attn_module = nn.Sequential( + nn.AdaptiveAvgPool2d(1), nn.Conv2d(out_channels, 1, 1), + nn.ReLU(inplace=True), build_activation_layer(act_cfg)) + self.task_attn_module = DyReLU(out_channels) + self._init_weights() + + def _init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + normal_init(m, 0, 0.01) + if self.zero_init_offset: + constant_init(self.spatial_conv_offset, 0) + + def forward(self, x): + """Forward function.""" + outs = [] + for level in range(len(x)): + # calculate offset and mask of DCNv2 from middle-level feature + offset_and_mask = self.spatial_conv_offset(x[level]) + offset = offset_and_mask[:, :self.offset_dim, :, :] + mask = offset_and_mask[:, self.offset_dim:, :, :].sigmoid() + + mid_feat = self.spatial_conv_mid(x[level], offset, mask) + sum_feat = mid_feat * self.scale_attn_module(mid_feat) + summed_levels = 1 + if level > 0: + low_feat = self.spatial_conv_low(x[level - 1], offset, mask) + sum_feat += low_feat * self.scale_attn_module(low_feat) + summed_levels += 1 + if level < len(x) - 1: + # this upsample order is weird, but faster than natural order + # https://github.com/microsoft/DynamicHead/issues/25 + high_feat = F.interpolate( + self.spatial_conv_high(x[level + 1], offset, mask), + size=x[level].shape[-2:], + mode='bilinear', + align_corners=True) + sum_feat += high_feat * self.scale_attn_module(high_feat) + summed_levels += 1 + outs.append(self.task_attn_module(sum_feat / summed_levels)) + + return outs + + +@NECKS.register_module() +class DyHead(BaseModule): + """DyHead neck consisting of multiple DyHead Blocks. + + See `Dynamic Head: Unifying Object Detection Heads with Attentions + `_ for details. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + num_blocks (int, optional): Number of DyHead Blocks. Default: 6. + zero_init_offset (bool, optional): Whether to use zero init for + `spatial_conv_offset`. Default: True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + num_blocks=6, + zero_init_offset=True, + init_cfg=None): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_blocks = num_blocks + self.zero_init_offset = zero_init_offset + + dyhead_blocks = [] + for i in range(num_blocks): + in_channels = self.in_channels if i == 0 else self.out_channels + dyhead_blocks.append( + DyHeadBlock( + in_channels, + self.out_channels, + zero_init_offset=zero_init_offset)) + self.dyhead_blocks = nn.Sequential(*dyhead_blocks) + + def forward(self, inputs): + """Forward function.""" + assert isinstance(inputs, (tuple, list)) + outs = self.dyhead_blocks(inputs) + return tuple(outs) diff --git a/downstream/mmdetection/mmdet/models/necks/fpg.py b/downstream/mmdetection/mmdet/models/necks/fpg.py new file mode 100644 index 0000000..a6a2a12 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/fpg.py @@ -0,0 +1,406 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule + +from ..builder import NECKS + + +class Transition(BaseModule): + """Base class for transition. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + """ + + def __init__(self, in_channels, out_channels, init_cfg=None): + super().__init__(init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + + def forward(x): + pass + + +class UpInterpolationConv(Transition): + """A transition used for up-sampling. + + Up-sample the input by interpolation then refines the feature by + a convolution layer. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + scale_factor (int): Up-sampling factor. Default: 2. + mode (int): Interpolation mode. Default: nearest. + align_corners (bool): Whether align corners when interpolation. + Default: None. + kernel_size (int): Kernel size for the conv. Default: 3. + """ + + def __init__(self, + in_channels, + out_channels, + scale_factor=2, + mode='nearest', + align_corners=None, + kernel_size=3, + init_cfg=None, + **kwargs): + super().__init__(in_channels, out_channels, init_cfg) + self.mode = mode + self.scale_factor = scale_factor + self.align_corners = align_corners + self.conv = ConvModule( + in_channels, + out_channels, + kernel_size, + padding=(kernel_size - 1) // 2, + **kwargs) + + def forward(self, x): + x = F.interpolate( + x, + scale_factor=self.scale_factor, + mode=self.mode, + align_corners=self.align_corners) + x = self.conv(x) + return x + + +class LastConv(Transition): + """A transition used for refining the output of the last stage. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + num_inputs (int): Number of inputs of the FPN features. + kernel_size (int): Kernel size for the conv. Default: 3. + """ + + def __init__(self, + in_channels, + out_channels, + num_inputs, + kernel_size=3, + init_cfg=None, + **kwargs): + super().__init__(in_channels, out_channels, init_cfg) + self.num_inputs = num_inputs + self.conv_out = ConvModule( + in_channels, + out_channels, + kernel_size, + padding=(kernel_size - 1) // 2, + **kwargs) + + def forward(self, inputs): + assert len(inputs) == self.num_inputs + return self.conv_out(inputs[-1]) + + +@NECKS.register_module() +class FPG(BaseModule): + """FPG. + + Implementation of `Feature Pyramid Grids (FPG) + `_. + This implementation only gives the basic structure stated in the paper. + But users can implement different type of transitions to fully explore the + the potential power of the structure of FPG. + + Args: + in_channels (int): Number of input channels (feature maps of all levels + should have the same channels). + out_channels (int): Number of output channels (used at each scale) + num_outs (int): Number of output scales. + stack_times (int): The number of times the pyramid architecture will + be stacked. + paths (list[str]): Specify the path order of each stack level. + Each element in the list should be either 'bu' (bottom-up) or + 'td' (top-down). + inter_channels (int): Number of inter channels. + same_up_trans (dict): Transition that goes down at the same stage. + same_down_trans (dict): Transition that goes up at the same stage. + across_lateral_trans (dict): Across-pathway same-stage + across_down_trans (dict): Across-pathway bottom-up connection. + across_up_trans (dict): Across-pathway top-down connection. + across_skip_trans (dict): Across-pathway skip connection. + output_trans (dict): Transition that trans the output of the + last stage. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool): It decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, its actual mode is specified by `extra_convs_on_inputs`. + norm_cfg (dict): Config dict for normalization layer. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + transition_types = { + 'conv': ConvModule, + 'interpolation_conv': UpInterpolationConv, + 'last_conv': LastConv, + } + + def __init__(self, + in_channels, + out_channels, + num_outs, + stack_times, + paths, + inter_channels=None, + same_down_trans=None, + same_up_trans=dict( + type='conv', kernel_size=3, stride=2, padding=1), + across_lateral_trans=dict(type='conv', kernel_size=1), + across_down_trans=dict(type='conv', kernel_size=3), + across_up_trans=None, + across_skip_trans=dict(type='identity'), + output_trans=dict(type='last_conv', kernel_size=3), + start_level=0, + end_level=-1, + add_extra_convs=False, + norm_cfg=None, + skip_inds=None, + init_cfg=[ + dict(type='Caffe2Xavier', layer='Conv2d'), + dict( + type='Constant', + layer=[ + '_BatchNorm', '_InstanceNorm', 'GroupNorm', + 'LayerNorm' + ], + val=1.0) + ]): + super(FPG, self).__init__(init_cfg) + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + if inter_channels is None: + self.inter_channels = [out_channels for _ in range(num_outs)] + elif isinstance(inter_channels, int): + self.inter_channels = [inter_channels for _ in range(num_outs)] + else: + assert isinstance(inter_channels, list) + assert len(inter_channels) == num_outs + self.inter_channels = inter_channels + self.stack_times = stack_times + self.paths = paths + assert isinstance(paths, list) and len(paths) == stack_times + for d in paths: + assert d in ('bu', 'td') + + self.same_down_trans = same_down_trans + self.same_up_trans = same_up_trans + self.across_lateral_trans = across_lateral_trans + self.across_down_trans = across_down_trans + self.across_up_trans = across_up_trans + self.output_trans = output_trans + self.across_skip_trans = across_skip_trans + + self.with_bias = norm_cfg is None + # skip inds must be specified if across skip trans is not None + if self.across_skip_trans is not None: + skip_inds is not None + self.skip_inds = skip_inds + assert len(self.skip_inds[0]) <= self.stack_times + + if end_level == -1 or end_level == self.num_ins - 1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level is not the last level, no extra level is allowed + self.backbone_end_level = end_level + 1 + assert end_level < self.num_ins + assert num_outs == end_level - start_level + 1 + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + + # build lateral 1x1 convs to reduce channels + self.lateral_convs = nn.ModuleList() + for i in range(self.start_level, self.backbone_end_level): + l_conv = nn.Conv2d(self.in_channels[i], + self.inter_channels[i - self.start_level], 1) + self.lateral_convs.append(l_conv) + + extra_levels = num_outs - self.backbone_end_level + self.start_level + self.extra_downsamples = nn.ModuleList() + for i in range(extra_levels): + if self.add_extra_convs: + fpn_idx = self.backbone_end_level - self.start_level + i + extra_conv = nn.Conv2d( + self.inter_channels[fpn_idx - 1], + self.inter_channels[fpn_idx], + 3, + stride=2, + padding=1) + self.extra_downsamples.append(extra_conv) + else: + self.extra_downsamples.append(nn.MaxPool2d(1, stride=2)) + + self.fpn_transitions = nn.ModuleList() # stack times + for s in range(self.stack_times): + stage_trans = nn.ModuleList() # num of feature levels + for i in range(self.num_outs): + # same, across_lateral, across_down, across_up + trans = nn.ModuleDict() + if s in self.skip_inds[i]: + stage_trans.append(trans) + continue + # build same-stage down trans (used in bottom-up paths) + if i == 0 or self.same_up_trans is None: + same_up_trans = None + else: + same_up_trans = self.build_trans( + self.same_up_trans, self.inter_channels[i - 1], + self.inter_channels[i]) + trans['same_up'] = same_up_trans + # build same-stage up trans (used in top-down paths) + if i == self.num_outs - 1 or self.same_down_trans is None: + same_down_trans = None + else: + same_down_trans = self.build_trans( + self.same_down_trans, self.inter_channels[i + 1], + self.inter_channels[i]) + trans['same_down'] = same_down_trans + # build across lateral trans + across_lateral_trans = self.build_trans( + self.across_lateral_trans, self.inter_channels[i], + self.inter_channels[i]) + trans['across_lateral'] = across_lateral_trans + # build across down trans + if i == self.num_outs - 1 or self.across_down_trans is None: + across_down_trans = None + else: + across_down_trans = self.build_trans( + self.across_down_trans, self.inter_channels[i + 1], + self.inter_channels[i]) + trans['across_down'] = across_down_trans + # build across up trans + if i == 0 or self.across_up_trans is None: + across_up_trans = None + else: + across_up_trans = self.build_trans( + self.across_up_trans, self.inter_channels[i - 1], + self.inter_channels[i]) + trans['across_up'] = across_up_trans + if self.across_skip_trans is None: + across_skip_trans = None + else: + across_skip_trans = self.build_trans( + self.across_skip_trans, self.inter_channels[i - 1], + self.inter_channels[i]) + trans['across_skip'] = across_skip_trans + # build across_skip trans + stage_trans.append(trans) + self.fpn_transitions.append(stage_trans) + + self.output_transition = nn.ModuleList() # output levels + for i in range(self.num_outs): + trans = self.build_trans( + self.output_trans, + self.inter_channels[i], + self.out_channels, + num_inputs=self.stack_times + 1) + self.output_transition.append(trans) + + self.relu = nn.ReLU(inplace=True) + + def build_trans(self, cfg, in_channels, out_channels, **extra_args): + cfg_ = cfg.copy() + trans_type = cfg_.pop('type') + trans_cls = self.transition_types[trans_type] + return trans_cls(in_channels, out_channels, **cfg_, **extra_args) + + def fuse(self, fuse_dict): + out = None + for item in fuse_dict.values(): + if item is not None: + if out is None: + out = item + else: + out = out + item + return out + + def forward(self, inputs): + assert len(inputs) == len(self.in_channels) + + # build all levels from original feature maps + feats = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + for downsample in self.extra_downsamples: + feats.append(downsample(feats[-1])) + + outs = [feats] + + for i in range(self.stack_times): + current_outs = outs[-1] + next_outs = [] + direction = self.paths[i] + for j in range(self.num_outs): + if i in self.skip_inds[j]: + next_outs.append(outs[-1][j]) + continue + # feature level + if direction == 'td': + lvl = self.num_outs - j - 1 + else: + lvl = j + # get transitions + if direction == 'td': + same_trans = self.fpn_transitions[i][lvl]['same_down'] + else: + same_trans = self.fpn_transitions[i][lvl]['same_up'] + across_lateral_trans = self.fpn_transitions[i][lvl][ + 'across_lateral'] + across_down_trans = self.fpn_transitions[i][lvl]['across_down'] + across_up_trans = self.fpn_transitions[i][lvl]['across_up'] + across_skip_trans = self.fpn_transitions[i][lvl]['across_skip'] + # init output + to_fuse = dict( + same=None, lateral=None, across_up=None, across_down=None) + # same downsample/upsample + if same_trans is not None: + to_fuse['same'] = same_trans(next_outs[-1]) + # across lateral + if across_lateral_trans is not None: + to_fuse['lateral'] = across_lateral_trans( + current_outs[lvl]) + # across downsample + if lvl > 0 and across_up_trans is not None: + to_fuse['across_up'] = across_up_trans(current_outs[lvl - + 1]) + # across upsample + if (lvl < self.num_outs - 1 and across_down_trans is not None): + to_fuse['across_down'] = across_down_trans( + current_outs[lvl + 1]) + if across_skip_trans is not None: + to_fuse['across_skip'] = across_skip_trans(outs[0][lvl]) + x = self.fuse(to_fuse) + next_outs.append(x) + + if direction == 'td': + outs.append(next_outs[::-1]) + else: + outs.append(next_outs) + + # output trans + final_outs = [] + for i in range(self.num_outs): + lvl_out_list = [] + for s in range(len(outs)): + lvl_out_list.append(outs[s][i]) + lvl_out = self.output_transition[i](lvl_out_list) + final_outs.append(lvl_out) + + return final_outs diff --git a/downstream/mmdetection/mmdet/models/necks/fpn.py b/downstream/mmdetection/mmdet/models/necks/fpn.py new file mode 100644 index 0000000..4bdb5b2 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/fpn.py @@ -0,0 +1,204 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule, auto_fp16 + +from ..builder import NECKS + + +@NECKS.register_module() +class FPN(BaseModule): + r"""Feature Pyramid Network. + + This is an implementation of paper `Feature Pyramid Networks for Object + Detection `_. + + Args: + in_channels (list[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + num_outs (int): Number of output scales. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool | str): If bool, it decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, it is equivalent to `add_extra_convs='on_input'`. + If str, it specifies the source feature map of the extra convs. + Only the following options are allowed + + - 'on_input': Last feat map of neck inputs (i.e. backbone feature). + - 'on_lateral': Last feature map after lateral convs. + - 'on_output': The last output feature map after fpn convs. + relu_before_extra_convs (bool): Whether to apply relu before the extra + conv. Default: False. + no_norm_on_lateral (bool): Whether to apply norm on lateral. + Default: False. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer in ConvModule. + Default: None. + upsample_cfg (dict): Config dict for interpolate layer. + Default: dict(mode='nearest'). + init_cfg (dict or list[dict], optional): Initialization config dict. + + Example: + >>> import torch + >>> in_channels = [2, 3, 5, 7] + >>> scales = [340, 170, 84, 43] + >>> inputs = [torch.rand(1, c, s, s) + ... for c, s in zip(in_channels, scales)] + >>> self = FPN(in_channels, 11, len(in_channels)).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 11, 340, 340]) + outputs[1].shape = torch.Size([1, 11, 170, 170]) + outputs[2].shape = torch.Size([1, 11, 84, 84]) + outputs[3].shape = torch.Size([1, 11, 43, 43]) + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + relu_before_extra_convs=False, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + upsample_cfg=dict(mode='nearest'), + init_cfg=dict( + type='Xavier', layer='Conv2d', distribution='uniform')): + super(FPN, self).__init__(init_cfg) + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.relu_before_extra_convs = relu_before_extra_convs + self.no_norm_on_lateral = no_norm_on_lateral + self.fp16_enabled = False + self.upsample_cfg = upsample_cfg.copy() + + if end_level == -1 or end_level == self.num_ins - 1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level is not the last level, no extra level is allowed + self.backbone_end_level = end_level + 1 + assert end_level < self.num_ins + assert num_outs == end_level - start_level + 1 + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + assert isinstance(add_extra_convs, (str, bool)) + if isinstance(add_extra_convs, str): + # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' + assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') + elif add_extra_convs: # True + self.add_extra_convs = 'on_input' + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, + act_cfg=act_cfg, + inplace=False) + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if self.add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.add_extra_convs == 'on_input': + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + + @auto_fp16() + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + # In some cases, fixing `scale factor` (e.g. 2) is preferred, but + # it cannot co-exist with `size` in `F.interpolate`. + if 'scale_factor' in self.upsample_cfg: + # fix runtime error of "+=" inplace operation in PyTorch 1.10 + laterals[i - 1] = laterals[i - 1] + F.interpolate( + laterals[i], **self.upsample_cfg) + else: + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] = laterals[i - 1] + F.interpolate( + laterals[i], size=prev_shape, **self.upsample_cfg) + + # build outputs + # part 1: from original levels + outs = [ + self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) + ] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.add_extra_convs == 'on_input': + extra_source = inputs[self.backbone_end_level - 1] + elif self.add_extra_convs == 'on_lateral': + extra_source = laterals[-1] + elif self.add_extra_convs == 'on_output': + extra_source = outs[-1] + else: + raise NotImplementedError + outs.append(self.fpn_convs[used_backbone_levels](extra_source)) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return tuple(outs) diff --git a/downstream/mmdetection/mmdet/models/necks/fpn_carafe.py b/downstream/mmdetection/mmdet/models/necks/fpn_carafe.py new file mode 100644 index 0000000..fdd91f3 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/fpn_carafe.py @@ -0,0 +1,275 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule, build_upsample_layer, xavier_init +from mmcv.ops.carafe import CARAFEPack +from mmcv.runner import BaseModule, ModuleList + +from ..builder import NECKS + + +@NECKS.register_module() +class FPN_CARAFE(BaseModule): + """FPN_CARAFE is a more flexible implementation of FPN. It allows more + choice for upsample methods during the top-down pathway. + + It can reproduce the performance of ICCV 2019 paper + CARAFE: Content-Aware ReAssembly of FEatures + Please refer to https://arxiv.org/abs/1905.02188 for more details. + + Args: + in_channels (list[int]): Number of channels for each input feature map. + out_channels (int): Output channels of feature pyramids. + num_outs (int): Number of output stages. + start_level (int): Start level of feature pyramids. + (Default: 0) + end_level (int): End level of feature pyramids. + (Default: -1 indicates the last level). + norm_cfg (dict): Dictionary to construct and config norm layer. + activate (str): Type of activation function in ConvModule + (Default: None indicates w/o activation). + order (dict): Order of components in ConvModule. + upsample (str): Type of upsample layer. + upsample_cfg (dict): Dictionary to construct and config upsample layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + norm_cfg=None, + act_cfg=None, + order=('conv', 'norm', 'act'), + upsample_cfg=dict( + type='carafe', + up_kernel=5, + up_group=1, + encoder_kernel=3, + encoder_dilation=1), + init_cfg=None): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super(FPN_CARAFE, self).__init__(init_cfg) + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.with_bias = norm_cfg is None + self.upsample_cfg = upsample_cfg.copy() + self.upsample = self.upsample_cfg.get('type') + self.relu = nn.ReLU(inplace=False) + + self.order = order + assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')] + + assert self.upsample in [ + 'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None + ] + if self.upsample in ['deconv', 'pixel_shuffle']: + assert hasattr( + self.upsample_cfg, + 'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0 + self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel') + + if end_level == -1 or end_level == self.num_ins - 1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level is not the last level, no extra level is allowed + self.backbone_end_level = end_level + 1 + assert end_level < self.num_ins + assert num_outs == end_level - start_level + 1 + self.start_level = start_level + self.end_level = end_level + + self.lateral_convs = ModuleList() + self.fpn_convs = ModuleList() + self.upsample_modules = ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + norm_cfg=norm_cfg, + bias=self.with_bias, + act_cfg=act_cfg, + inplace=False, + order=self.order) + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + norm_cfg=self.norm_cfg, + bias=self.with_bias, + act_cfg=act_cfg, + inplace=False, + order=self.order) + if i != self.backbone_end_level - 1: + upsample_cfg_ = self.upsample_cfg.copy() + if self.upsample == 'deconv': + upsample_cfg_.update( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=self.upsample_kernel, + stride=2, + padding=(self.upsample_kernel - 1) // 2, + output_padding=(self.upsample_kernel - 1) // 2) + elif self.upsample == 'pixel_shuffle': + upsample_cfg_.update( + in_channels=out_channels, + out_channels=out_channels, + scale_factor=2, + upsample_kernel=self.upsample_kernel) + elif self.upsample == 'carafe': + upsample_cfg_.update(channels=out_channels, scale_factor=2) + else: + # suppress warnings + align_corners = (None + if self.upsample == 'nearest' else False) + upsample_cfg_.update( + scale_factor=2, + mode=self.upsample, + align_corners=align_corners) + upsample_module = build_upsample_layer(upsample_cfg_) + self.upsample_modules.append(upsample_module) + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_out_levels = ( + num_outs - self.backbone_end_level + self.start_level) + if extra_out_levels >= 1: + for i in range(extra_out_levels): + in_channels = ( + self.in_channels[self.backbone_end_level - + 1] if i == 0 else out_channels) + extra_l_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + bias=self.with_bias, + act_cfg=act_cfg, + inplace=False, + order=self.order) + if self.upsample == 'deconv': + upsampler_cfg_ = dict( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=self.upsample_kernel, + stride=2, + padding=(self.upsample_kernel - 1) // 2, + output_padding=(self.upsample_kernel - 1) // 2) + elif self.upsample == 'pixel_shuffle': + upsampler_cfg_ = dict( + in_channels=out_channels, + out_channels=out_channels, + scale_factor=2, + upsample_kernel=self.upsample_kernel) + elif self.upsample == 'carafe': + upsampler_cfg_ = dict( + channels=out_channels, + scale_factor=2, + **self.upsample_cfg) + else: + # suppress warnings + align_corners = (None + if self.upsample == 'nearest' else False) + upsampler_cfg_ = dict( + scale_factor=2, + mode=self.upsample, + align_corners=align_corners) + upsampler_cfg_['type'] = self.upsample + upsample_module = build_upsample_layer(upsampler_cfg_) + extra_fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + norm_cfg=self.norm_cfg, + bias=self.with_bias, + act_cfg=act_cfg, + inplace=False, + order=self.order) + self.upsample_modules.append(upsample_module) + self.fpn_convs.append(extra_fpn_conv) + self.lateral_convs.append(extra_l_conv) + + # default init_weights for conv(msra) and norm in ConvModule + def init_weights(self): + """Initialize the weights of module.""" + super(FPN_CARAFE, self).init_weights() + for m in self.modules(): + if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): + xavier_init(m, distribution='uniform') + for m in self.modules(): + if isinstance(m, CARAFEPack): + m.init_weights() + + def slice_as(self, src, dst): + """Slice ``src`` as ``dst`` + + Note: + ``src`` should have the same or larger size than ``dst``. + + Args: + src (torch.Tensor): Tensors to be sliced. + dst (torch.Tensor): ``src`` will be sliced to have the same + size as ``dst``. + + Returns: + torch.Tensor: Sliced tensor. + """ + assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3)) + if src.size(2) == dst.size(2) and src.size(3) == dst.size(3): + return src + else: + return src[:, :, :dst.size(2), :dst.size(3)] + + def tensor_add(self, a, b): + """Add tensors ``a`` and ``b`` that might have different sizes.""" + if a.size() == b.size(): + c = a + b + else: + c = a + self.slice_as(b, a) + return c + + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [] + for i, lateral_conv in enumerate(self.lateral_convs): + if i <= self.backbone_end_level - self.start_level: + input = inputs[min(i + self.start_level, len(inputs) - 1)] + else: + input = laterals[-1] + lateral = lateral_conv(input) + laterals.append(lateral) + + # build top-down path + for i in range(len(laterals) - 1, 0, -1): + if self.upsample is not None: + upsample_feat = self.upsample_modules[i - 1](laterals[i]) + else: + upsample_feat = laterals[i] + laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat) + + # build outputs + num_conv_outs = len(self.fpn_convs) + outs = [] + for i in range(num_conv_outs): + out = self.fpn_convs[i](laterals[i]) + outs.append(out) + return tuple(outs) diff --git a/downstream/mmdetection/mmdet/models/necks/hrfpn.py b/downstream/mmdetection/mmdet/models/necks/hrfpn.py new file mode 100644 index 0000000..ca15be6 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/hrfpn.py @@ -0,0 +1,100 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule +from torch.utils.checkpoint import checkpoint + +from ..builder import NECKS + + +@NECKS.register_module() +class HRFPN(BaseModule): + """HRFPN (High Resolution Feature Pyramids) + + paper: `High-Resolution Representations for Labeling Pixels and Regions + `_. + + Args: + in_channels (list): number of channels for each branch. + out_channels (int): output channels of feature pyramids. + num_outs (int): number of output stages. + pooling_type (str): pooling for generating feature pyramids + from {MAX, AVG}. + conv_cfg (dict): dictionary to construct and config conv layer. + norm_cfg (dict): dictionary to construct and config norm layer. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + stride (int): stride of 3x3 convolutional layers + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + out_channels, + num_outs=5, + pooling_type='AVG', + conv_cfg=None, + norm_cfg=None, + with_cp=False, + stride=1, + init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): + super(HRFPN, self).__init__(init_cfg) + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.reduction_conv = ConvModule( + sum(in_channels), + out_channels, + kernel_size=1, + conv_cfg=self.conv_cfg, + act_cfg=None) + + self.fpn_convs = nn.ModuleList() + for i in range(self.num_outs): + self.fpn_convs.append( + ConvModule( + out_channels, + out_channels, + kernel_size=3, + padding=1, + stride=stride, + conv_cfg=self.conv_cfg, + act_cfg=None)) + + if pooling_type == 'MAX': + self.pooling = F.max_pool2d + else: + self.pooling = F.avg_pool2d + + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == self.num_ins + outs = [inputs[0]] + for i in range(1, self.num_ins): + outs.append( + F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear')) + out = torch.cat(outs, dim=1) + if out.requires_grad and self.with_cp: + out = checkpoint(self.reduction_conv, out) + else: + out = self.reduction_conv(out) + outs = [out] + for i in range(1, self.num_outs): + outs.append(self.pooling(out, kernel_size=2**i, stride=2**i)) + outputs = [] + + for i in range(self.num_outs): + if outs[i].requires_grad and self.with_cp: + tmp_out = checkpoint(self.fpn_convs[i], outs[i]) + else: + tmp_out = self.fpn_convs[i](outs[i]) + outputs.append(tmp_out) + return tuple(outputs) diff --git a/downstream/mmdetection/mmdet/models/necks/nas_fpn.py b/downstream/mmdetection/mmdet/models/necks/nas_fpn.py new file mode 100644 index 0000000..710592e --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/nas_fpn.py @@ -0,0 +1,158 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.ops.merge_cells import GlobalPoolingCell, SumCell +from mmcv.runner import BaseModule, ModuleList + +from ..builder import NECKS + + +@NECKS.register_module() +class NASFPN(BaseModule): + """NAS-FPN. + + Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture + for Object Detection `_ + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + num_outs (int): Number of output scales. + stack_times (int): The number of times the pyramid architecture will + be stacked. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool): It decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, its actual mode is specified by `extra_convs_on_inputs`. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + stack_times, + start_level=0, + end_level=-1, + add_extra_convs=False, + norm_cfg=None, + init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): + super(NASFPN, self).__init__(init_cfg) + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) # num of input feature levels + self.num_outs = num_outs # num of output feature levels + self.stack_times = stack_times + self.norm_cfg = norm_cfg + + if end_level == -1 or end_level == self.num_ins - 1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level is not the last level, no extra level is allowed + self.backbone_end_level = end_level + 1 + assert end_level < self.num_ins + assert num_outs == end_level - start_level + 1 + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + + # add lateral connections + self.lateral_convs = nn.ModuleList() + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=None) + self.lateral_convs.append(l_conv) + + # add extra downsample layers (stride-2 pooling or conv) + extra_levels = num_outs - self.backbone_end_level + self.start_level + self.extra_downsamples = nn.ModuleList() + for i in range(extra_levels): + extra_conv = ConvModule( + out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) + self.extra_downsamples.append( + nn.Sequential(extra_conv, nn.MaxPool2d(2, 2))) + + # add NAS FPN connections + self.fpn_stages = ModuleList() + for _ in range(self.stack_times): + stage = nn.ModuleDict() + # gp(p6, p4) -> p4_1 + stage['gp_64_4'] = GlobalPoolingCell( + in_channels=out_channels, + out_channels=out_channels, + out_norm_cfg=norm_cfg) + # sum(p4_1, p4) -> p4_2 + stage['sum_44_4'] = SumCell( + in_channels=out_channels, + out_channels=out_channels, + out_norm_cfg=norm_cfg) + # sum(p4_2, p3) -> p3_out + stage['sum_43_3'] = SumCell( + in_channels=out_channels, + out_channels=out_channels, + out_norm_cfg=norm_cfg) + # sum(p3_out, p4_2) -> p4_out + stage['sum_34_4'] = SumCell( + in_channels=out_channels, + out_channels=out_channels, + out_norm_cfg=norm_cfg) + # sum(p5, gp(p4_out, p3_out)) -> p5_out + stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False) + stage['sum_55_5'] = SumCell( + in_channels=out_channels, + out_channels=out_channels, + out_norm_cfg=norm_cfg) + # sum(p7, gp(p5_out, p4_2)) -> p7_out + stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False) + stage['sum_77_7'] = SumCell( + in_channels=out_channels, + out_channels=out_channels, + out_norm_cfg=norm_cfg) + # gp(p7_out, p5_out) -> p6_out + stage['gp_75_6'] = GlobalPoolingCell( + in_channels=out_channels, + out_channels=out_channels, + out_norm_cfg=norm_cfg) + self.fpn_stages.append(stage) + + def forward(self, inputs): + """Forward function.""" + # build P3-P5 + feats = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + # build P6-P7 on top of P5 + for downsample in self.extra_downsamples: + feats.append(downsample(feats[-1])) + + p3, p4, p5, p6, p7 = feats + + for stage in self.fpn_stages: + # gp(p6, p4) -> p4_1 + p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:]) + # sum(p4_1, p4) -> p4_2 + p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:]) + # sum(p4_2, p3) -> p3_out + p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:]) + # sum(p3_out, p4_2) -> p4_out + p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:]) + # sum(p5, gp(p4_out, p3_out)) -> p5_out + p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:]) + p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:]) + # sum(p7, gp(p5_out, p4_2)) -> p7_out + p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:]) + p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:]) + # gp(p7_out, p5_out) -> p6_out + p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:]) + + return p3, p4, p5, p6, p7 diff --git a/downstream/mmdetection/mmdet/models/necks/nasfcos_fpn.py b/downstream/mmdetection/mmdet/models/necks/nasfcos_fpn.py new file mode 100644 index 0000000..c4abfe7 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/nasfcos_fpn.py @@ -0,0 +1,170 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, caffe2_xavier_init +from mmcv.ops.merge_cells import ConcatCell +from mmcv.runner import BaseModule + +from ..builder import NECKS + + +@NECKS.register_module() +class NASFCOS_FPN(BaseModule): + """FPN structure in NASFPN. + + Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for + Object Detection `_ + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + num_outs (int): Number of output scales. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool): It decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, its actual mode is specified by `extra_convs_on_inputs`. + conv_cfg (dict): dictionary to construct and config conv layer. + norm_cfg (dict): dictionary to construct and config norm layer. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=1, + end_level=-1, + add_extra_convs=False, + conv_cfg=None, + norm_cfg=None, + init_cfg=None): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super(NASFCOS_FPN, self).__init__(init_cfg) + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + + if end_level == -1 or end_level == self.num_ins - 1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level is not the last level, no extra level is allowed + self.backbone_end_level = end_level + 1 + assert end_level < self.num_ins + assert num_outs == end_level - start_level + 1 + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + + self.adapt_convs = nn.ModuleList() + for i in range(self.start_level, self.backbone_end_level): + adapt_conv = ConvModule( + in_channels[i], + out_channels, + 1, + stride=1, + padding=0, + bias=False, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU', inplace=False)) + self.adapt_convs.append(adapt_conv) + + # C2 is omitted according to the paper + extra_levels = num_outs - self.backbone_end_level + self.start_level + + def build_concat_cell(with_input1_conv, with_input2_conv): + cell_conv_cfg = dict( + kernel_size=1, padding=0, bias=False, groups=out_channels) + return ConcatCell( + in_channels=out_channels, + out_channels=out_channels, + with_out_conv=True, + out_conv_cfg=cell_conv_cfg, + out_norm_cfg=dict(type='BN'), + out_conv_order=('norm', 'act', 'conv'), + with_input1_conv=with_input1_conv, + with_input2_conv=with_input2_conv, + input_conv_cfg=conv_cfg, + input_norm_cfg=norm_cfg, + upsample_mode='nearest') + + # Denote c3=f0, c4=f1, c5=f2 for convince + self.fpn = nn.ModuleDict() + self.fpn['c22_1'] = build_concat_cell(True, True) + self.fpn['c22_2'] = build_concat_cell(True, True) + self.fpn['c32'] = build_concat_cell(True, False) + self.fpn['c02'] = build_concat_cell(True, False) + self.fpn['c42'] = build_concat_cell(True, True) + self.fpn['c36'] = build_concat_cell(True, True) + self.fpn['c61'] = build_concat_cell(True, True) # f9 + self.extra_downsamples = nn.ModuleList() + for i in range(extra_levels): + extra_act_cfg = None if i == 0 \ + else dict(type='ReLU', inplace=False) + self.extra_downsamples.append( + ConvModule( + out_channels, + out_channels, + 3, + stride=2, + padding=1, + act_cfg=extra_act_cfg, + order=('act', 'norm', 'conv'))) + + def forward(self, inputs): + """Forward function.""" + feats = [ + adapt_conv(inputs[i + self.start_level]) + for i, adapt_conv in enumerate(self.adapt_convs) + ] + + for (i, module_name) in enumerate(self.fpn): + idx_1, idx_2 = int(module_name[1]), int(module_name[2]) + res = self.fpn[module_name](feats[idx_1], feats[idx_2]) + feats.append(res) + + ret = [] + for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): # add P3, P4, P5 + feats1, feats2 = feats[idx], feats[5] + feats2_resize = F.interpolate( + feats2, + size=feats1.size()[2:], + mode='bilinear', + align_corners=False) + + feats_sum = feats1 + feats2_resize + ret.append( + F.interpolate( + feats_sum, + size=inputs[input_idx].size()[2:], + mode='bilinear', + align_corners=False)) + + for submodule in self.extra_downsamples: + ret.append(submodule(ret[-1])) + + return tuple(ret) + + def init_weights(self): + """Initialize the weights of module.""" + super(NASFCOS_FPN, self).init_weights() + for module in self.fpn.values(): + if hasattr(module, 'conv_out'): + caffe2_xavier_init(module.out_conv.conv) + + for modules in [ + self.adapt_convs.modules(), + self.extra_downsamples.modules() + ]: + for module in modules: + if isinstance(module, nn.Conv2d): + caffe2_xavier_init(module) diff --git a/downstream/mmdetection/mmdet/models/necks/pafpn.py b/downstream/mmdetection/mmdet/models/necks/pafpn.py new file mode 100644 index 0000000..8d5e32f --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/pafpn.py @@ -0,0 +1,158 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import auto_fp16 + +from ..builder import NECKS +from .fpn import FPN + + +@NECKS.register_module() +class PAFPN(FPN): + """Path Aggregation Network for Instance Segmentation. + + This is an implementation of the `PAFPN in Path Aggregation Network + `_. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + num_outs (int): Number of output scales. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool | str): If bool, it decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, it is equivalent to `add_extra_convs='on_input'`. + If str, it specifies the source feature map of the extra convs. + Only the following options are allowed + + - 'on_input': Last feat map of neck inputs (i.e. backbone feature). + - 'on_lateral': Last feature map after lateral convs. + - 'on_output': The last output feature map after fpn convs. + relu_before_extra_convs (bool): Whether to apply relu before the extra + conv. Default: False. + no_norm_on_lateral (bool): Whether to apply norm on lateral. + Default: False. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (str): Config dict for activation layer in ConvModule. + Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + relu_before_extra_convs=False, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + init_cfg=dict( + type='Xavier', layer='Conv2d', distribution='uniform')): + super(PAFPN, self).__init__( + in_channels, + out_channels, + num_outs, + start_level, + end_level, + add_extra_convs, + relu_before_extra_convs, + no_norm_on_lateral, + conv_cfg, + norm_cfg, + act_cfg, + init_cfg=init_cfg) + # add extra bottom up pathway + self.downsample_convs = nn.ModuleList() + self.pafpn_convs = nn.ModuleList() + for i in range(self.start_level + 1, self.backbone_end_level): + d_conv = ConvModule( + out_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + pafpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.downsample_convs.append(d_conv) + self.pafpn_convs.append(pafpn_conv) + + @auto_fp16() + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] += F.interpolate( + laterals[i], size=prev_shape, mode='nearest') + + # build outputs + # part 1: from original levels + inter_outs = [ + self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) + ] + + # part 2: add bottom-up path + for i in range(0, used_backbone_levels - 1): + inter_outs[i + 1] += self.downsample_convs[i](inter_outs[i]) + + outs = [] + outs.append(inter_outs[0]) + outs.extend([ + self.pafpn_convs[i - 1](inter_outs[i]) + for i in range(1, used_backbone_levels) + ]) + + # part 3: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.add_extra_convs == 'on_input': + orig = inputs[self.backbone_end_level - 1] + outs.append(self.fpn_convs[used_backbone_levels](orig)) + elif self.add_extra_convs == 'on_lateral': + outs.append(self.fpn_convs[used_backbone_levels]( + laterals[-1])) + elif self.add_extra_convs == 'on_output': + outs.append(self.fpn_convs[used_backbone_levels](outs[-1])) + else: + raise NotImplementedError + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return tuple(outs) diff --git a/downstream/mmdetection/mmdet/models/necks/rfp.py b/downstream/mmdetection/mmdet/models/necks/rfp.py new file mode 100644 index 0000000..6976f4d --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/rfp.py @@ -0,0 +1,135 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import constant_init, xavier_init +from mmcv.runner import BaseModule, ModuleList + +from ..builder import NECKS, build_backbone +from .fpn import FPN + + +class ASPP(BaseModule): + """ASPP (Atrous Spatial Pyramid Pooling) + + This is an implementation of the ASPP module used in DetectoRS + (https://arxiv.org/pdf/2006.02334.pdf) + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of channels produced by this module + dilations (tuple[int]): Dilations of the four branches. + Default: (1, 3, 6, 1) + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + out_channels, + dilations=(1, 3, 6, 1), + init_cfg=dict(type='Kaiming', layer='Conv2d')): + super().__init__(init_cfg) + assert dilations[-1] == 1 + self.aspp = nn.ModuleList() + for dilation in dilations: + kernel_size = 3 if dilation > 1 else 1 + padding = dilation if dilation > 1 else 0 + conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=1, + dilation=dilation, + padding=padding, + bias=True) + self.aspp.append(conv) + self.gap = nn.AdaptiveAvgPool2d(1) + + def forward(self, x): + avg_x = self.gap(x) + out = [] + for aspp_idx in range(len(self.aspp)): + inp = avg_x if (aspp_idx == len(self.aspp) - 1) else x + out.append(F.relu_(self.aspp[aspp_idx](inp))) + out[-1] = out[-1].expand_as(out[-2]) + out = torch.cat(out, dim=1) + return out + + +@NECKS.register_module() +class RFP(FPN): + """RFP (Recursive Feature Pyramid) + + This is an implementation of RFP in `DetectoRS + `_. Different from standard FPN, the + input of RFP should be multi level features along with origin input image + of backbone. + + Args: + rfp_steps (int): Number of unrolled steps of RFP. + rfp_backbone (dict): Configuration of the backbone for RFP. + aspp_out_channels (int): Number of output channels of ASPP module. + aspp_dilations (tuple[int]): Dilation rates of four branches. + Default: (1, 3, 6, 1) + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + rfp_steps, + rfp_backbone, + aspp_out_channels, + aspp_dilations=(1, 3, 6, 1), + init_cfg=None, + **kwargs): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super().__init__(init_cfg=init_cfg, **kwargs) + self.rfp_steps = rfp_steps + # Be careful! Pretrained weights cannot be loaded when use + # nn.ModuleList + self.rfp_modules = ModuleList() + for rfp_idx in range(1, rfp_steps): + rfp_module = build_backbone(rfp_backbone) + self.rfp_modules.append(rfp_module) + self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels, + aspp_dilations) + self.rfp_weight = nn.Conv2d( + self.out_channels, + 1, + kernel_size=1, + stride=1, + padding=0, + bias=True) + + def init_weights(self): + # Avoid using super().init_weights(), which may alter the default + # initialization of the modules in self.rfp_modules that have missing + # keys in the pretrained checkpoint. + for convs in [self.lateral_convs, self.fpn_convs]: + for m in convs.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + for rfp_idx in range(self.rfp_steps - 1): + self.rfp_modules[rfp_idx].init_weights() + constant_init(self.rfp_weight, 0) + + def forward(self, inputs): + inputs = list(inputs) + assert len(inputs) == len(self.in_channels) + 1 # +1 for input image + img = inputs.pop(0) + # FPN forward + x = super().forward(tuple(inputs)) + for rfp_idx in range(self.rfp_steps - 1): + rfp_feats = [x[0]] + list( + self.rfp_aspp(x[i]) for i in range(1, len(x))) + x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats) + # FPN forward + x_idx = super().forward(x_idx) + x_new = [] + for ft_idx in range(len(x_idx)): + add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx])) + x_new.append(add_weight * x_idx[ft_idx] + + (1 - add_weight) * x[ft_idx]) + x = x_new + return x diff --git a/downstream/mmdetection/mmdet/models/necks/ssd_neck.py b/downstream/mmdetection/mmdet/models/necks/ssd_neck.py new file mode 100644 index 0000000..179d575 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/ssd_neck.py @@ -0,0 +1,129 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmcv.runner import BaseModule + +from ..builder import NECKS + + +@NECKS.register_module() +class SSDNeck(BaseModule): + """Extra layers of SSD backbone to generate multi-scale feature maps. + + Args: + in_channels (Sequence[int]): Number of input channels per scale. + out_channels (Sequence[int]): Number of output channels per scale. + level_strides (Sequence[int]): Stride of 3x3 conv per level. + level_paddings (Sequence[int]): Padding size of 3x3 conv per level. + l2_norm_scale (float|None): L2 normalization layer init scale. + If None, not use L2 normalization on the first input feature. + last_kernel_size (int): Kernel size of the last conv layer. + Default: 3. + use_depthwise (bool): Whether to use DepthwiseSeparableConv. + Default: False. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: None. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + out_channels, + level_strides, + level_paddings, + l2_norm_scale=20., + last_kernel_size=3, + use_depthwise=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + init_cfg=[ + dict( + type='Xavier', distribution='uniform', + layer='Conv2d'), + dict(type='Constant', val=1, layer='BatchNorm2d'), + ]): + super(SSDNeck, self).__init__(init_cfg) + assert len(out_channels) > len(in_channels) + assert len(out_channels) - len(in_channels) == len(level_strides) + assert len(level_strides) == len(level_paddings) + assert in_channels == out_channels[:len(in_channels)] + + if l2_norm_scale: + self.l2_norm = L2Norm(in_channels[0], l2_norm_scale) + self.init_cfg += [ + dict( + type='Constant', + val=self.l2_norm.scale, + override=dict(name='l2_norm')) + ] + + self.extra_layers = nn.ModuleList() + extra_layer_channels = out_channels[len(in_channels):] + second_conv = DepthwiseSeparableConvModule if \ + use_depthwise else ConvModule + + for i, (out_channel, stride, padding) in enumerate( + zip(extra_layer_channels, level_strides, level_paddings)): + kernel_size = last_kernel_size \ + if i == len(extra_layer_channels) - 1 else 3 + per_lvl_convs = nn.Sequential( + ConvModule( + out_channels[len(in_channels) - 1 + i], + out_channel // 2, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + second_conv( + out_channel // 2, + out_channel, + kernel_size, + stride=stride, + padding=padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.extra_layers.append(per_lvl_convs) + + def forward(self, inputs): + """Forward function.""" + outs = [feat for feat in inputs] + if hasattr(self, 'l2_norm'): + outs[0] = self.l2_norm(outs[0]) + + feat = outs[-1] + for layer in self.extra_layers: + feat = layer(feat) + outs.append(feat) + return tuple(outs) + + +class L2Norm(nn.Module): + + def __init__(self, n_dims, scale=20., eps=1e-10): + """L2 normalization layer. + + Args: + n_dims (int): Number of dimensions to be normalized + scale (float, optional): Defaults to 20.. + eps (float, optional): Used to avoid division by zero. + Defaults to 1e-10. + """ + super(L2Norm, self).__init__() + self.n_dims = n_dims + self.weight = nn.Parameter(torch.Tensor(self.n_dims)) + self.eps = eps + self.scale = scale + + def forward(self, x): + """Forward function.""" + # normalization layer convert to FP32 in FP16 training + x_float = x.float() + norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps + return (self.weight[None, :, None, None].float().expand_as(x_float) * + x_float / norm).type_as(x) diff --git a/downstream/mmdetection/mmdet/models/necks/yolo_neck.py b/downstream/mmdetection/mmdet/models/necks/yolo_neck.py new file mode 100644 index 0000000..c8eeb57 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/yolo_neck.py @@ -0,0 +1,140 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Copyright (c) 2019 Western Digital Corporation or its affiliates. + +import torch +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule + +from ..builder import NECKS + + +class DetectionBlock(BaseModule): + """Detection block in YOLO neck. + + Let out_channels = n, the DetectionBlock contains: + Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer. + The first 6 ConvLayers are formed the following way: + 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n. + The Conv2D layer is 1x1x255. + Some block will have branch after the fifth ConvLayer. + The input channel is arbitrary (in_channels) + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True) + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='LeakyReLU', negative_slope=0.1), + init_cfg=None): + super(DetectionBlock, self).__init__(init_cfg) + double_out_channels = out_channels * 2 + + # shortcut + cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) + self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg) + self.conv2 = ConvModule( + out_channels, double_out_channels, 3, padding=1, **cfg) + self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg) + self.conv4 = ConvModule( + out_channels, double_out_channels, 3, padding=1, **cfg) + self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg) + + def forward(self, x): + tmp = self.conv1(x) + tmp = self.conv2(tmp) + tmp = self.conv3(tmp) + tmp = self.conv4(tmp) + out = self.conv5(tmp) + return out + + +@NECKS.register_module() +class YOLOV3Neck(BaseModule): + """The neck of YOLOV3. + + It can be treated as a simplified version of FPN. It + will take the result from Darknet backbone and do some upsampling and + concatenation. It will finally output the detection result. + + Note: + The input feats should be from top to bottom. + i.e., from high-lvl to low-lvl + But YOLOV3Neck will process them in reversed order. + i.e., from bottom (high-lvl) to top (low-lvl) + + Args: + num_scales (int): The number of scales / stages. + in_channels (List[int]): The number of input channels per scale. + out_channels (List[int]): The number of output channels per scale. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None. + norm_cfg (dict, optional): Dictionary to construct and config norm + layer. Default: dict(type='BN', requires_grad=True) + act_cfg (dict, optional): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + num_scales, + in_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='LeakyReLU', negative_slope=0.1), + init_cfg=None): + super(YOLOV3Neck, self).__init__(init_cfg) + assert (num_scales == len(in_channels) == len(out_channels)) + self.num_scales = num_scales + self.in_channels = in_channels + self.out_channels = out_channels + + # shortcut + cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) + + # To support arbitrary scales, the code looks awful, but it works. + # Better solution is welcomed. + self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg) + for i in range(1, self.num_scales): + in_c, out_c = self.in_channels[i], self.out_channels[i] + inter_c = out_channels[i - 1] + self.add_module(f'conv{i}', ConvModule(inter_c, out_c, 1, **cfg)) + # in_c + out_c : High-lvl feats will be cat with low-lvl feats + self.add_module(f'detect{i+1}', + DetectionBlock(in_c + out_c, out_c, **cfg)) + + def forward(self, feats): + assert len(feats) == self.num_scales + + # processed from bottom (high-lvl) to top (low-lvl) + outs = [] + out = self.detect1(feats[-1]) + outs.append(out) + + for i, x in enumerate(reversed(feats[:-1])): + conv = getattr(self, f'conv{i+1}') + tmp = conv(out) + + # Cat with low-lvl feats + tmp = F.interpolate(tmp, scale_factor=2) + tmp = torch.cat((tmp, x), 1) + + detect = getattr(self, f'detect{i+2}') + out = detect(tmp) + outs.append(out) + + return tuple(outs) diff --git a/downstream/mmdetection/mmdet/models/necks/yolox_pafpn.py b/downstream/mmdetection/mmdet/models/necks/yolox_pafpn.py new file mode 100644 index 0000000..b0f6f70 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/necks/yolox_pafpn.py @@ -0,0 +1,156 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmcv.runner import BaseModule + +from ..builder import NECKS +from ..utils import CSPLayer + + +@NECKS.register_module() +class YOLOXPAFPN(BaseModule): + """Path Aggregation Network used in YOLOX. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3 + use_depthwise (bool): Whether to depthwise separable convolution in + blocks. Default: False + upsample_cfg (dict): Config dict for interpolate layer. + Default: `dict(scale_factor=2, mode='nearest')` + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN') + act_cfg (dict): Config dict for activation layer. + Default: dict(type='Swish') + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + num_csp_blocks=3, + use_depthwise=False, + upsample_cfg=dict(scale_factor=2, mode='nearest'), + conv_cfg=None, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=math.sqrt(5), + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu')): + super(YOLOXPAFPN, self).__init__(init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + + # build top-down blocks + self.upsample = nn.Upsample(**upsample_cfg) + self.reduce_layers = nn.ModuleList() + self.top_down_blocks = nn.ModuleList() + for idx in range(len(in_channels) - 1, 0, -1): + self.reduce_layers.append( + ConvModule( + in_channels[idx], + in_channels[idx - 1], + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.top_down_blocks.append( + CSPLayer( + in_channels[idx - 1] * 2, + in_channels[idx - 1], + num_blocks=num_csp_blocks, + add_identity=False, + use_depthwise=use_depthwise, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + # build bottom-up blocks + self.downsamples = nn.ModuleList() + self.bottom_up_blocks = nn.ModuleList() + for idx in range(len(in_channels) - 1): + self.downsamples.append( + conv( + in_channels[idx], + in_channels[idx], + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.bottom_up_blocks.append( + CSPLayer( + in_channels[idx] * 2, + in_channels[idx + 1], + num_blocks=num_csp_blocks, + add_identity=False, + use_depthwise=use_depthwise, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + self.out_convs = nn.ModuleList() + for i in range(len(in_channels)): + self.out_convs.append( + ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, inputs): + """ + Args: + inputs (tuple[Tensor]): input features. + + Returns: + tuple[Tensor]: YOLOXPAFPN features. + """ + assert len(inputs) == len(self.in_channels) + + # top-down path + inner_outs = [inputs[-1]] + for idx in range(len(self.in_channels) - 1, 0, -1): + feat_heigh = inner_outs[0] + feat_low = inputs[idx - 1] + feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx]( + feat_heigh) + inner_outs[0] = feat_heigh + + upsample_feat = self.upsample(feat_heigh) + + inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx]( + torch.cat([upsample_feat, feat_low], 1)) + inner_outs.insert(0, inner_out) + + # bottom-up path + outs = [inner_outs[0]] + for idx in range(len(self.in_channels) - 1): + feat_low = outs[-1] + feat_height = inner_outs[idx + 1] + downsample_feat = self.downsamples[idx](feat_low) + out = self.bottom_up_blocks[idx]( + torch.cat([downsample_feat, feat_height], 1)) + outs.append(out) + + # out convs + for idx, conv in enumerate(self.out_convs): + outs[idx] = conv(outs[idx]) + + return tuple(outs) diff --git a/downstream/mmdetection/mmdet/models/plugins/__init__.py b/downstream/mmdetection/mmdet/models/plugins/__init__.py new file mode 100644 index 0000000..a455c07 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/plugins/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dropblock import DropBlock +from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder +from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder + +__all__ = [ + 'DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder', + 'MSDeformAttnPixelDecoder' +] diff --git a/downstream/mmdetection/mmdet/models/plugins/dropblock.py b/downstream/mmdetection/mmdet/models/plugins/dropblock.py new file mode 100644 index 0000000..bb00ade --- /dev/null +++ b/downstream/mmdetection/mmdet/models/plugins/dropblock.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import PLUGIN_LAYERS + +eps = 1e-6 + + +@PLUGIN_LAYERS.register_module() +class DropBlock(nn.Module): + """Randomly drop some regions of feature maps. + + Please refer to the method proposed in `DropBlock + `_ for details. + + Args: + drop_prob (float): The probability of dropping each block. + block_size (int): The size of dropped blocks. + warmup_iters (int): The drop probability will linearly increase + from `0` to `drop_prob` during the first `warmup_iters` iterations. + Default: 2000. + """ + + def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs): + super(DropBlock, self).__init__() + assert block_size % 2 == 1 + assert 0 < drop_prob <= 1 + assert warmup_iters >= 0 + self.drop_prob = drop_prob + self.block_size = block_size + self.warmup_iters = warmup_iters + self.iter_cnt = 0 + + def forward(self, x): + """ + Args: + x (Tensor): Input feature map on which some areas will be randomly + dropped. + + Returns: + Tensor: The tensor after DropBlock layer. + """ + if not self.training: + return x + self.iter_cnt += 1 + N, C, H, W = list(x.shape) + gamma = self._compute_gamma((H, W)) + mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1) + mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device)) + + mask = F.pad(mask, [self.block_size // 2] * 4, value=0) + mask = F.max_pool2d( + input=mask, + stride=(1, 1), + kernel_size=(self.block_size, self.block_size), + padding=self.block_size // 2) + mask = 1 - mask + x = x * mask * mask.numel() / (eps + mask.sum()) + return x + + def _compute_gamma(self, feat_size): + """Compute the value of gamma according to paper. gamma is the + parameter of bernoulli distribution, which controls the number of + features to drop. + + gamma = (drop_prob * fm_area) / (drop_area * keep_area) + + Args: + feat_size (tuple[int, int]): The height and width of feature map. + + Returns: + float: The value of gamma. + """ + gamma = (self.drop_prob * feat_size[0] * feat_size[1]) + gamma /= ((feat_size[0] - self.block_size + 1) * + (feat_size[1] - self.block_size + 1)) + gamma /= (self.block_size**2) + factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt / + self.warmup_iters) + return gamma * factor + + def extra_repr(self): + return (f'drop_prob={self.drop_prob}, block_size={self.block_size}, ' + f'warmup_iters={self.warmup_iters}') diff --git a/downstream/mmdetection/mmdet/models/plugins/msdeformattn_pixel_decoder.py b/downstream/mmdetection/mmdet/models/plugins/msdeformattn_pixel_decoder.py new file mode 100644 index 0000000..d553582 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/plugins/msdeformattn_pixel_decoder.py @@ -0,0 +1,269 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import (PLUGIN_LAYERS, Conv2d, ConvModule, caffe2_xavier_init, + normal_init, xavier_init) +from mmcv.cnn.bricks.transformer import (build_positional_encoding, + build_transformer_layer_sequence) +from mmcv.runner import BaseModule, ModuleList + +from mmdet.core.anchor import MlvlPointGenerator +from mmdet.models.utils.transformer import MultiScaleDeformableAttention + + +@PLUGIN_LAYERS.register_module() +class MSDeformAttnPixelDecoder(BaseModule): + """Pixel decoder with multi-scale deformable attention. + + Args: + in_channels (list[int] | tuple[int]): Number of channels in the + input feature maps. + strides (list[int] | tuple[int]): Output strides of feature from + backbone. + feat_channels (int): Number of channels for feature. + out_channels (int): Number of channels for output. + num_outs (int): Number of output scales. + norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization. + Defaults to dict(type='GN', num_groups=32). + act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation. + Defaults to dict(type='ReLU'). + encoder (:obj:`mmcv.ConfigDict` | dict): Config for transformer + encoder. Defaults to `DetrTransformerEncoder`. + positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for + transformer encoder position encoding. Defaults to + dict(type='SinePositionalEncoding', num_feats=128, + normalize=True). + init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict. + """ + + def __init__(self, + in_channels=[256, 512, 1024, 2048], + strides=[4, 8, 16, 32], + feat_channels=256, + out_channels=256, + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + feedforward_channels=1024, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=128, + normalize=True), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.strides = strides + self.num_input_levels = len(in_channels) + self.num_encoder_levels = \ + encoder.transformerlayers.attn_cfgs.num_levels + assert self.num_encoder_levels >= 1, \ + 'num_levels in attn_cfgs must be at least one' + input_conv_list = [] + # from top to down (low to high resolution) + for i in range(self.num_input_levels - 1, + self.num_input_levels - self.num_encoder_levels - 1, + -1): + input_conv = ConvModule( + in_channels[i], + feat_channels, + kernel_size=1, + norm_cfg=norm_cfg, + act_cfg=None, + bias=True) + input_conv_list.append(input_conv) + self.input_convs = ModuleList(input_conv_list) + + self.encoder = build_transformer_layer_sequence(encoder) + self.postional_encoding = build_positional_encoding( + positional_encoding) + # high resolution to low resolution + self.level_encoding = nn.Embedding(self.num_encoder_levels, + feat_channels) + + # fpn-like structure + self.lateral_convs = ModuleList() + self.output_convs = ModuleList() + self.use_bias = norm_cfg is None + # from top to down (low to high resolution) + # fpn for the rest features that didn't pass in encoder + for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1, + -1): + lateral_conv = ConvModule( + in_channels[i], + feat_channels, + kernel_size=1, + bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=None) + output_conv = ConvModule( + feat_channels, + feat_channels, + kernel_size=3, + stride=1, + padding=1, + bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.lateral_convs.append(lateral_conv) + self.output_convs.append(output_conv) + + self.mask_feature = Conv2d( + feat_channels, out_channels, kernel_size=1, stride=1, padding=0) + + self.num_outs = num_outs + self.point_generator = MlvlPointGenerator(strides) + + def init_weights(self): + """Initialize weights.""" + for i in range(0, self.num_encoder_levels): + xavier_init( + self.input_convs[i].conv, + gain=1, + bias=0, + distribution='uniform') + + for i in range(0, self.num_input_levels - self.num_encoder_levels): + caffe2_xavier_init(self.lateral_convs[i].conv, bias=0) + caffe2_xavier_init(self.output_convs[i].conv, bias=0) + + caffe2_xavier_init(self.mask_feature, bias=0) + + normal_init(self.level_encoding, mean=0, std=1) + for p in self.encoder.parameters(): + if p.dim() > 1: + nn.init.xavier_normal_(p) + + # init_weights defined in MultiScaleDeformableAttention + for layer in self.encoder.layers: + for attn in layer.attentions: + if isinstance(attn, MultiScaleDeformableAttention): + attn.init_weights() + + def forward(self, feats): + """ + Args: + feats (list[Tensor]): Feature maps of each level. Each has + shape of (batch_size, c, h, w). + + Returns: + tuple: A tuple containing the following: + + - mask_feature (Tensor): shape (batch_size, c, h, w). + - multi_scale_features (list[Tensor]): Multi scale \ + features, each in shape (batch_size, c, h, w). + """ + # generate padding mask for each level, for each image + batch_size = feats[0].shape[0] + encoder_input_list = [] + padding_mask_list = [] + level_positional_encoding_list = [] + spatial_shapes = [] + reference_points_list = [] + for i in range(self.num_encoder_levels): + level_idx = self.num_input_levels - i - 1 + feat = feats[level_idx] + feat_projected = self.input_convs[i](feat) + h, w = feat.shape[-2:] + + # no padding + padding_mask_resized = feat.new_zeros( + (batch_size, ) + feat.shape[-2:], dtype=torch.bool) + pos_embed = self.postional_encoding(padding_mask_resized) + level_embed = self.level_encoding.weight[i] + level_pos_embed = level_embed.view(1, -1, 1, 1) + pos_embed + # (h_i * w_i, 2) + reference_points = self.point_generator.single_level_grid_priors( + feat.shape[-2:], level_idx, device=feat.device) + # normalize + factor = feat.new_tensor([[w, h]]) * self.strides[level_idx] + reference_points = reference_points / factor + + # shape (batch_size, c, h_i, w_i) -> (h_i * w_i, batch_size, c) + feat_projected = feat_projected.flatten(2).permute(2, 0, 1) + level_pos_embed = level_pos_embed.flatten(2).permute(2, 0, 1) + padding_mask_resized = padding_mask_resized.flatten(1) + + encoder_input_list.append(feat_projected) + padding_mask_list.append(padding_mask_resized) + level_positional_encoding_list.append(level_pos_embed) + spatial_shapes.append(feat.shape[-2:]) + reference_points_list.append(reference_points) + # shape (batch_size, total_num_query), + # total_num_query=sum([., h_i * w_i,.]) + padding_masks = torch.cat(padding_mask_list, dim=1) + # shape (total_num_query, batch_size, c) + encoder_inputs = torch.cat(encoder_input_list, dim=0) + level_positional_encodings = torch.cat( + level_positional_encoding_list, dim=0) + device = encoder_inputs.device + # shape (num_encoder_levels, 2), from low + # resolution to high resolution + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=device) + # shape (0, h_0*w_0, h_0*w_0+h_1*w_1, ...) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) + reference_points = torch.cat(reference_points_list, dim=0) + reference_points = reference_points[None, :, None].repeat( + batch_size, 1, self.num_encoder_levels, 1) + valid_radios = reference_points.new_ones( + (batch_size, self.num_encoder_levels, 2)) + # shape (num_total_query, batch_size, c) + memory = self.encoder( + query=encoder_inputs, + key=None, + value=None, + query_pos=level_positional_encodings, + key_pos=None, + attn_masks=None, + key_padding_mask=None, + query_key_padding_mask=padding_masks, + spatial_shapes=spatial_shapes, + reference_points=reference_points, + level_start_index=level_start_index, + valid_radios=valid_radios) + # (num_total_query, batch_size, c) -> (batch_size, c, num_total_query) + memory = memory.permute(1, 2, 0) + + # from low resolution to high resolution + num_query_per_level = [e[0] * e[1] for e in spatial_shapes] + outs = torch.split(memory, num_query_per_level, dim=-1) + outs = [ + x.reshape(batch_size, -1, spatial_shapes[i][0], + spatial_shapes[i][1]) for i, x in enumerate(outs) + ] + + for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1, + -1): + x = feats[i] + cur_feat = self.lateral_convs[i](x) + y = cur_feat + F.interpolate( + outs[-1], + size=cur_feat.shape[-2:], + mode='bilinear', + align_corners=False) + y = self.output_convs[i](y) + outs.append(y) + multi_scale_features = outs[:self.num_outs] + + mask_feature = self.mask_feature(outs[-1]) + return mask_feature, multi_scale_features diff --git a/downstream/mmdetection/mmdet/models/plugins/pixel_decoder.py b/downstream/mmdetection/mmdet/models/plugins/pixel_decoder.py new file mode 100644 index 0000000..537a187 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/plugins/pixel_decoder.py @@ -0,0 +1,243 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import PLUGIN_LAYERS, Conv2d, ConvModule, caffe2_xavier_init +from mmcv.cnn.bricks.transformer import (build_positional_encoding, + build_transformer_layer_sequence) +from mmcv.runner import BaseModule, ModuleList + + +@PLUGIN_LAYERS.register_module() +class PixelDecoder(BaseModule): + """Pixel decoder with a structure like fpn. + + Args: + in_channels (list[int] | tuple[int]): Number of channels in the + input feature maps. + feat_channels (int): Number channels for feature. + out_channels (int): Number channels for output. + norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization. + Defaults to dict(type='GN', num_groups=32). + act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation. + Defaults to dict(type='ReLU'). + encoder (:obj:`mmcv.ConfigDict` | dict): Config for transorformer + encoder.Defaults to None. + positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for + transformer encoder position encoding. Defaults to + dict(type='SinePositionalEncoding', num_feats=128, + normalize=True). + init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + feat_channels, + out_channels, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.num_inputs = len(in_channels) + self.lateral_convs = ModuleList() + self.output_convs = ModuleList() + self.use_bias = norm_cfg is None + for i in range(0, self.num_inputs - 1): + lateral_conv = ConvModule( + in_channels[i], + feat_channels, + kernel_size=1, + bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=None) + output_conv = ConvModule( + feat_channels, + feat_channels, + kernel_size=3, + stride=1, + padding=1, + bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.lateral_convs.append(lateral_conv) + self.output_convs.append(output_conv) + + self.last_feat_conv = ConvModule( + in_channels[-1], + feat_channels, + kernel_size=3, + padding=1, + stride=1, + bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.mask_feature = Conv2d( + feat_channels, out_channels, kernel_size=3, stride=1, padding=1) + + def init_weights(self): + """Initialize weights.""" + for i in range(0, self.num_inputs - 2): + caffe2_xavier_init(self.lateral_convs[i].conv, bias=0) + caffe2_xavier_init(self.output_convs[i].conv, bias=0) + + caffe2_xavier_init(self.mask_feature, bias=0) + caffe2_xavier_init(self.last_feat_conv, bias=0) + + def forward(self, feats, img_metas): + """ + Args: + feats (list[Tensor]): Feature maps of each level. Each has + shape of (batch_size, c, h, w). + img_metas (list[dict]): List of image information. Pass in + for creating more accurate padding mask. Not used here. + + Returns: + tuple: a tuple containing the following: + - mask_feature (Tensor): Shape (batch_size, c, h, w). + - memory (Tensor): Output of last stage of backbone.\ + Shape (batch_size, c, h, w). + """ + y = self.last_feat_conv(feats[-1]) + for i in range(self.num_inputs - 2, -1, -1): + x = feats[i] + cur_feat = self.lateral_convs[i](x) + y = cur_feat + \ + F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest') + y = self.output_convs[i](y) + + mask_feature = self.mask_feature(y) + memory = feats[-1] + return mask_feature, memory + + +@PLUGIN_LAYERS.register_module() +class TransformerEncoderPixelDecoder(PixelDecoder): + """Pixel decoder with transormer encoder inside. + + Args: + in_channels (list[int] | tuple[int]): Number of channels in the + input feature maps. + feat_channels (int): Number channels for feature. + out_channels (int): Number channels for output. + norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization. + Defaults to dict(type='GN', num_groups=32). + act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation. + Defaults to dict(type='ReLU'). + encoder (:obj:`mmcv.ConfigDict` | dict): Config for transorformer + encoder.Defaults to None. + positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for + transformer encoder position encoding. Defaults to + dict(type='SinePositionalEncoding', num_feats=128, + normalize=True). + init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + feat_channels, + out_channels, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=None, + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=128, + normalize=True), + init_cfg=None): + super(TransformerEncoderPixelDecoder, self).__init__( + in_channels, + feat_channels, + out_channels, + norm_cfg, + act_cfg, + init_cfg=init_cfg) + self.last_feat_conv = None + + self.encoder = build_transformer_layer_sequence(encoder) + self.encoder_embed_dims = self.encoder.embed_dims + assert self.encoder_embed_dims == feat_channels, 'embed_dims({}) of ' \ + 'tranformer encoder must equal to feat_channels({})'.format( + feat_channels, self.encoder_embed_dims) + self.positional_encoding = build_positional_encoding( + positional_encoding) + self.encoder_in_proj = Conv2d( + in_channels[-1], feat_channels, kernel_size=1) + self.encoder_out_proj = ConvModule( + feat_channels, + feat_channels, + kernel_size=3, + stride=1, + padding=1, + bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def init_weights(self): + """Initialize weights.""" + for i in range(0, self.num_inputs - 2): + caffe2_xavier_init(self.lateral_convs[i].conv, bias=0) + caffe2_xavier_init(self.output_convs[i].conv, bias=0) + + caffe2_xavier_init(self.mask_feature, bias=0) + caffe2_xavier_init(self.encoder_in_proj, bias=0) + caffe2_xavier_init(self.encoder_out_proj.conv, bias=0) + + for p in self.encoder.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, feats, img_metas): + """ + Args: + feats (list[Tensor]): Feature maps of each level. Each has + shape of (batch_size, c, h, w). + img_metas (list[dict]): List of image information. Pass in + for creating more accurate padding mask. + + Returns: + tuple: a tuple containing the following: + - mask_feature (Tensor): shape (batch_size, c, h, w). + - memory (Tensor): shape (batch_size, c, h, w). + """ + feat_last = feats[-1] + bs, c, h, w = feat_last.shape + input_img_h, input_img_w = img_metas[0]['batch_input_shape'] + padding_mask = feat_last.new_ones((bs, input_img_h, input_img_w), + dtype=torch.float32) + for i in range(bs): + img_h, img_w, _ = img_metas[i]['img_shape'] + padding_mask[i, :img_h, :img_w] = 0 + padding_mask = F.interpolate( + padding_mask.unsqueeze(1), + size=feat_last.shape[-2:], + mode='nearest').to(torch.bool).squeeze(1) + + pos_embed = self.positional_encoding(padding_mask) + feat_last = self.encoder_in_proj(feat_last) + # (batch_size, c, h, w) -> (num_queries, batch_size, c) + feat_last = feat_last.flatten(2).permute(2, 0, 1) + pos_embed = pos_embed.flatten(2).permute(2, 0, 1) + # (batch_size, h, w) -> (batch_size, h*w) + padding_mask = padding_mask.flatten(1) + memory = self.encoder( + query=feat_last, + key=None, + value=None, + query_pos=pos_embed, + query_key_padding_mask=padding_mask) + # (num_queries, batch_size, c) -> (batch_size, c, h, w) + memory = memory.permute(1, 2, 0).view(bs, self.encoder_embed_dims, h, + w) + y = self.encoder_out_proj(memory) + for i in range(self.num_inputs - 2, -1, -1): + x = feats[i] + cur_feat = self.lateral_convs[i](x) + y = cur_feat + \ + F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest') + y = self.output_convs[i](y) + + mask_feature = self.mask_feature(y) + return mask_feature, memory diff --git a/downstream/mmdetection/mmdet/models/roi_heads/__init__.py b/downstream/mmdetection/mmdet/models/roi_heads/__init__.py new file mode 100644 index 0000000..baae2a0 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/__init__.py @@ -0,0 +1,37 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_roi_head import BaseRoIHead +from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead, + DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead, + Shared2FCBBoxHead, Shared4Conv1FCBBoxHead) +from .cascade_roi_head import CascadeRoIHead +from .double_roi_head import DoubleHeadRoIHead +from .dynamic_roi_head import DynamicRoIHead +from .grid_roi_head import GridRoIHead +from .htc_roi_head import HybridTaskCascadeRoIHead +from .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead, + FusedSemanticHead, GlobalContextHead, GridHead, + HTCMaskHead, MaskIoUHead, MaskPointHead, + SCNetMaskHead, SCNetSemanticHead) +from .mask_scoring_roi_head import MaskScoringRoIHead +from .pisa_roi_head import PISARoIHead +from .point_rend_roi_head import PointRendRoIHead +from .roi_extractors import (BaseRoIExtractor, GenericRoIExtractor, + SingleRoIExtractor) +from .scnet_roi_head import SCNetRoIHead +from .shared_heads import ResLayer +from .sparse_roi_head import SparseRoIHead +from .standard_roi_head import StandardRoIHead +from .trident_roi_head import TridentRoIHead + +__all__ = [ + 'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead', + 'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead', + 'ConvFCBBoxHead', 'DIIHead', 'SABLHead', 'Shared2FCBBoxHead', + 'StandardRoIHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', + 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', + 'MaskIoUHead', 'BaseRoIExtractor', 'GenericRoIExtractor', + 'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead', + 'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead', + 'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead', + 'FeatureRelayHead', 'GlobalContextHead' +] diff --git a/downstream/mmdetection/mmdet/models/roi_heads/base_roi_head.py b/downstream/mmdetection/mmdet/models/roi_heads/base_roi_head.py new file mode 100644 index 0000000..4adbdef --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/base_roi_head.py @@ -0,0 +1,103 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +from mmcv.runner import BaseModule + +from ..builder import build_shared_head + + +class BaseRoIHead(BaseModule, metaclass=ABCMeta): + """Base class for RoIHeads.""" + + def __init__(self, + bbox_roi_extractor=None, + bbox_head=None, + mask_roi_extractor=None, + mask_head=None, + shared_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(BaseRoIHead, self).__init__(init_cfg) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + if shared_head is not None: + shared_head.pretrained = pretrained + self.shared_head = build_shared_head(shared_head) + + if bbox_head is not None: + self.init_bbox_head(bbox_roi_extractor, bbox_head) + + if mask_head is not None: + self.init_mask_head(mask_roi_extractor, mask_head) + + self.init_assigner_sampler() + + @property + def with_bbox(self): + """bool: whether the RoI head contains a `bbox_head`""" + return hasattr(self, 'bbox_head') and self.bbox_head is not None + + @property + def with_mask(self): + """bool: whether the RoI head contains a `mask_head`""" + return hasattr(self, 'mask_head') and self.mask_head is not None + + @property + def with_shared_head(self): + """bool: whether the RoI head contains a `shared_head`""" + return hasattr(self, 'shared_head') and self.shared_head is not None + + @abstractmethod + def init_bbox_head(self): + """Initialize ``bbox_head``""" + pass + + @abstractmethod + def init_mask_head(self): + """Initialize ``mask_head``""" + pass + + @abstractmethod + def init_assigner_sampler(self): + """Initialize assigner and sampler.""" + pass + + @abstractmethod + def forward_train(self, + x, + img_meta, + proposal_list, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + **kwargs): + """Forward function during training.""" + + async def async_simple_test(self, + x, + proposal_list, + img_metas, + proposals=None, + rescale=False, + **kwargs): + """Asynchronized test function.""" + raise NotImplementedError + + def simple_test(self, + x, + proposal_list, + img_meta, + proposals=None, + rescale=False, + **kwargs): + """Test without augmentation.""" + + def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs): + """Test with augmentations. + + If rescale is False, then returned bboxes and masks will fit the scale + of imgs[0]. + """ diff --git a/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/__init__.py b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/__init__.py new file mode 100644 index 0000000..d1207db --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .bbox_head import BBoxHead +from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead, + Shared4Conv1FCBBoxHead) +from .dii_head import DIIHead +from .double_bbox_head import DoubleConvFCBBoxHead +from .sabl_head import SABLHead +from .scnet_bbox_head import SCNetBBoxHead + +__all__ = [ + 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead', + 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead', + 'SCNetBBoxHead' +] diff --git a/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/bbox_head.py b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/bbox_head.py new file mode 100644 index 0000000..461b18b --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/bbox_head.py @@ -0,0 +1,594 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.runner import BaseModule, auto_fp16, force_fp32 +from torch.nn.modules.utils import _pair + +from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms +from mmdet.models.builder import HEADS, build_loss +from mmdet.models.losses import accuracy +from mmdet.models.utils import build_linear_layer + + +@HEADS.register_module() +class BBoxHead(BaseModule): + """Simplest RoI head, with only two fc layers for classification and + regression respectively.""" + + def __init__(self, + with_avg_pool=False, + with_cls=True, + with_reg=True, + roi_feat_size=7, + in_channels=256, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + clip_border=True, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + reg_decoded_bbox=False, + reg_predictor_cfg=dict(type='Linear'), + cls_predictor_cfg=dict(type='Linear'), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict( + type='SmoothL1Loss', beta=1.0, loss_weight=1.0), + init_cfg=None): + super(BBoxHead, self).__init__(init_cfg) + assert with_cls or with_reg + self.with_avg_pool = with_avg_pool + self.with_cls = with_cls + self.with_reg = with_reg + self.roi_feat_size = _pair(roi_feat_size) + self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1] + self.in_channels = in_channels + self.num_classes = num_classes + self.reg_class_agnostic = reg_class_agnostic + self.reg_decoded_bbox = reg_decoded_bbox + self.reg_predictor_cfg = reg_predictor_cfg + self.cls_predictor_cfg = cls_predictor_cfg + self.fp16_enabled = False + + self.bbox_coder = build_bbox_coder(bbox_coder) + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + + in_channels = self.in_channels + if self.with_avg_pool: + self.avg_pool = nn.AvgPool2d(self.roi_feat_size) + else: + in_channels *= self.roi_feat_area + if self.with_cls: + # need to add background class + if self.custom_cls_channels: + cls_channels = self.loss_cls.get_cls_channels(self.num_classes) + else: + cls_channels = num_classes + 1 + self.fc_cls = build_linear_layer( + self.cls_predictor_cfg, + in_features=in_channels, + out_features=cls_channels) + if self.with_reg: + out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes + self.fc_reg = build_linear_layer( + self.reg_predictor_cfg, + in_features=in_channels, + out_features=out_dim_reg) + self.debug_imgs = None + if init_cfg is None: + self.init_cfg = [] + if self.with_cls: + self.init_cfg += [ + dict( + type='Normal', std=0.01, override=dict(name='fc_cls')) + ] + if self.with_reg: + self.init_cfg += [ + dict( + type='Normal', std=0.001, override=dict(name='fc_reg')) + ] + + @property + def custom_cls_channels(self): + return getattr(self.loss_cls, 'custom_cls_channels', False) + + @property + def custom_activation(self): + return getattr(self.loss_cls, 'custom_activation', False) + + @property + def custom_accuracy(self): + return getattr(self.loss_cls, 'custom_accuracy', False) + + @auto_fp16() + def forward(self, x): + if self.with_avg_pool: + if x.numel() > 0: + x = self.avg_pool(x) + x = x.view(x.size(0), -1) + else: + # avg_pool does not support empty tensor, + # so use torch.mean instead it + x = torch.mean(x, dim=(-1, -2)) + cls_score = self.fc_cls(x) if self.with_cls else None + bbox_pred = self.fc_reg(x) if self.with_reg else None + return cls_score, bbox_pred + + def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes, + pos_gt_labels, cfg): + """Calculate the ground truth for proposals in the single image + according to the sampling results. + + Args: + pos_bboxes (Tensor): Contains all the positive boxes, + has shape (num_pos, 4), the last dimension 4 + represents [tl_x, tl_y, br_x, br_y]. + neg_bboxes (Tensor): Contains all the negative boxes, + has shape (num_neg, 4), the last dimension 4 + represents [tl_x, tl_y, br_x, br_y]. + pos_gt_bboxes (Tensor): Contains gt_boxes for + all positive samples, has shape (num_pos, 4), + the last dimension 4 + represents [tl_x, tl_y, br_x, br_y]. + pos_gt_labels (Tensor): Contains gt_labels for + all positive samples, has shape (num_pos, ). + cfg (obj:`ConfigDict`): `train_cfg` of R-CNN. + + Returns: + Tuple[Tensor]: Ground truth for proposals + in a single image. Containing the following Tensors: + + - labels(Tensor): Gt_labels for all proposals, has + shape (num_proposals,). + - label_weights(Tensor): Labels_weights for all + proposals, has shape (num_proposals,). + - bbox_targets(Tensor):Regression target for all + proposals, has shape (num_proposals, 4), the + last dimension 4 represents [tl_x, tl_y, br_x, br_y]. + - bbox_weights(Tensor):Regression weights for all + proposals, has shape (num_proposals, 4). + """ + num_pos = pos_bboxes.size(0) + num_neg = neg_bboxes.size(0) + num_samples = num_pos + num_neg + + # original implementation uses new_zeros since BG are set to be 0 + # now use empty & fill because BG cat_id = num_classes, + # FG cat_id = [0, num_classes-1] + labels = pos_bboxes.new_full((num_samples, ), + self.num_classes, + dtype=torch.long) + label_weights = pos_bboxes.new_zeros(num_samples) + bbox_targets = pos_bboxes.new_zeros(num_samples, 4) + bbox_weights = pos_bboxes.new_zeros(num_samples, 4) + if num_pos > 0: + labels[:num_pos] = pos_gt_labels + pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight + label_weights[:num_pos] = pos_weight + if not self.reg_decoded_bbox: + pos_bbox_targets = self.bbox_coder.encode( + pos_bboxes, pos_gt_bboxes) + else: + # When the regression loss (e.g. `IouLoss`, `GIouLoss`) + # is applied directly on the decoded bounding boxes, both + # the predicted boxes and regression targets should be with + # absolute coordinate format. + pos_bbox_targets = pos_gt_bboxes + bbox_targets[:num_pos, :] = pos_bbox_targets + bbox_weights[:num_pos, :] = 1 + if num_neg > 0: + label_weights[-num_neg:] = 1.0 + + return labels, label_weights, bbox_targets, bbox_weights + + def get_targets(self, + sampling_results, + gt_bboxes, + gt_labels, + rcnn_train_cfg, + concat=True): + """Calculate the ground truth for all samples in a batch according to + the sampling_results. + + Almost the same as the implementation in bbox_head, we passed + additional parameters pos_inds_list and neg_inds_list to + `_get_target_single` function. + + Args: + sampling_results (List[obj:SamplingResults]): Assign results of + all images in a batch after sampling. + gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch, + each tensor has shape (num_gt, 4), the last dimension 4 + represents [tl_x, tl_y, br_x, br_y]. + gt_labels (list[Tensor]): Gt_labels of all images in a batch, + each tensor has shape (num_gt,). + rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. + concat (bool): Whether to concatenate the results of all + the images in a single batch. + + Returns: + Tuple[Tensor]: Ground truth for proposals in a single image. + Containing the following list of Tensors: + + - labels (list[Tensor],Tensor): Gt_labels for all + proposals in a batch, each tensor in list has + shape (num_proposals,) when `concat=False`, otherwise + just a single tensor has shape (num_all_proposals,). + - label_weights (list[Tensor]): Labels_weights for + all proposals in a batch, each tensor in list has + shape (num_proposals,) when `concat=False`, otherwise + just a single tensor has shape (num_all_proposals,). + - bbox_targets (list[Tensor],Tensor): Regression target + for all proposals in a batch, each tensor in list + has shape (num_proposals, 4) when `concat=False`, + otherwise just a single tensor has shape + (num_all_proposals, 4), the last dimension 4 represents + [tl_x, tl_y, br_x, br_y]. + - bbox_weights (list[tensor],Tensor): Regression weights for + all proposals in a batch, each tensor in list has shape + (num_proposals, 4) when `concat=False`, otherwise just a + single tensor has shape (num_all_proposals, 4). + """ + pos_bboxes_list = [res.pos_bboxes for res in sampling_results] + neg_bboxes_list = [res.neg_bboxes for res in sampling_results] + pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] + pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] + labels, label_weights, bbox_targets, bbox_weights = multi_apply( + self._get_target_single, + pos_bboxes_list, + neg_bboxes_list, + pos_gt_bboxes_list, + pos_gt_labels_list, + cfg=rcnn_train_cfg) + + if concat: + labels = torch.cat(labels, 0) + label_weights = torch.cat(label_weights, 0) + bbox_targets = torch.cat(bbox_targets, 0) + bbox_weights = torch.cat(bbox_weights, 0) + return labels, label_weights, bbox_targets, bbox_weights + + @force_fp32(apply_to=('cls_score', 'bbox_pred')) + def loss(self, + cls_score, + bbox_pred, + rois, + labels, + label_weights, + bbox_targets, + bbox_weights, + reduction_override=None): + losses = dict() + if cls_score is not None: + avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) + if cls_score.numel() > 0: + loss_cls_ = self.loss_cls( + cls_score, + labels, + label_weights, + avg_factor=avg_factor, + reduction_override=reduction_override) + if isinstance(loss_cls_, dict): + losses.update(loss_cls_) + else: + losses['loss_cls'] = loss_cls_ + if self.custom_activation: + acc_ = self.loss_cls.get_accuracy(cls_score, labels) + losses.update(acc_) + else: + losses['acc'] = accuracy(cls_score, labels) + if bbox_pred is not None: + bg_class_ind = self.num_classes + # 0~self.num_classes-1 are FG, self.num_classes is BG + pos_inds = (labels >= 0) & (labels < bg_class_ind) + # do not perform bounding box regression for BG anymore. + if pos_inds.any(): + if self.reg_decoded_bbox: + # When the regression loss (e.g. `IouLoss`, + # `GIouLoss`, `DIouLoss`) is applied directly on + # the decoded bounding boxes, it decodes the + # already encoded coordinates to absolute format. + bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred) + if self.reg_class_agnostic: + pos_bbox_pred = bbox_pred.view( + bbox_pred.size(0), 4)[pos_inds.type(torch.bool)] + else: + pos_bbox_pred = bbox_pred.view( + bbox_pred.size(0), -1, + 4)[pos_inds.type(torch.bool), + labels[pos_inds.type(torch.bool)]] + losses['loss_bbox'] = self.loss_bbox( + pos_bbox_pred, + bbox_targets[pos_inds.type(torch.bool)], + bbox_weights[pos_inds.type(torch.bool)], + avg_factor=bbox_targets.size(0), + reduction_override=reduction_override) + else: + losses['loss_bbox'] = bbox_pred[pos_inds].sum() + return losses + + @force_fp32(apply_to=('cls_score', 'bbox_pred')) + def get_bboxes(self, + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=False, + cfg=None): + """Transform network output for a batch into bbox predictions. + + Args: + rois (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). + last dimension 5 arrange as (batch_index, x1, y1, x2, y2). + cls_score (Tensor): Box scores, has shape + (num_boxes, num_classes + 1). + bbox_pred (Tensor, optional): Box energies / deltas. + has shape (num_boxes, num_classes * 4). + img_shape (Sequence[int], optional): Maximum bounds for boxes, + specifies (H, W, C) or (H, W). + scale_factor (ndarray): Scale factor of the + image arrange as (w_scale, h_scale, w_scale, h_scale). + rescale (bool): If True, return boxes in original image space. + Default: False. + cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None + + Returns: + tuple[Tensor, Tensor]: + First tensor is `det_bboxes`, has the shape + (num_boxes, 5) and last + dimension 5 represent (tl_x, tl_y, br_x, br_y, score). + Second tensor is the labels with shape (num_boxes, ). + """ + + # some loss (Seesaw loss..) may have custom activation + if self.custom_cls_channels: + scores = self.loss_cls.get_activation(cls_score) + else: + scores = F.softmax( + cls_score, dim=-1) if cls_score is not None else None + # bbox_pred would be None in some detector when with_reg is False, + # e.g. Grid R-CNN. + if bbox_pred is not None: + bboxes = self.bbox_coder.decode( + rois[..., 1:], bbox_pred, max_shape=img_shape) + else: + bboxes = rois[:, 1:].clone() + if img_shape is not None: + bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1]) + bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0]) + + if rescale and bboxes.size(0) > 0: + scale_factor = bboxes.new_tensor(scale_factor) + bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( + bboxes.size()[0], -1) + + if cfg is None: + return bboxes, scores + else: + det_bboxes, det_labels = multiclass_nms(bboxes, scores, + cfg.score_thr, cfg.nms, + cfg.max_per_img) + + return det_bboxes, det_labels + + @force_fp32(apply_to=('bbox_preds', )) + def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): + """Refine bboxes during training. + + Args: + rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, + and bs is the sampled RoIs per image. The first column is + the image id and the next 4 columns are x1, y1, x2, y2. + labels (Tensor): Shape (n*bs, ). + bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class). + pos_is_gts (list[Tensor]): Flags indicating if each positive bbox + is a gt bbox. + img_metas (list[dict]): Meta info of each image. + + Returns: + list[Tensor]: Refined bboxes of each image in a mini-batch. + + Example: + >>> # xdoctest: +REQUIRES(module:kwarray) + >>> import kwarray + >>> import numpy as np + >>> from mmdet.core.bbox.demodata import random_boxes + >>> self = BBoxHead(reg_class_agnostic=True) + >>> n_roi = 2 + >>> n_img = 4 + >>> scale = 512 + >>> rng = np.random.RandomState(0) + >>> img_metas = [{'img_shape': (scale, scale)} + ... for _ in range(n_img)] + >>> # Create rois in the expected format + >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng) + >>> img_ids = torch.randint(0, n_img, (n_roi,)) + >>> img_ids = img_ids.float() + >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1) + >>> # Create other args + >>> labels = torch.randint(0, 2, (n_roi,)).long() + >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng) + >>> # For each image, pretend random positive boxes are gts + >>> is_label_pos = (labels.numpy() > 0).astype(np.int) + >>> lbl_per_img = kwarray.group_items(is_label_pos, + ... img_ids.numpy()) + >>> pos_per_img = [sum(lbl_per_img.get(gid, [])) + ... for gid in range(n_img)] + >>> pos_is_gts = [ + >>> torch.randint(0, 2, (npos,)).byte().sort( + >>> descending=True)[0] + >>> for npos in pos_per_img + >>> ] + >>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds, + >>> pos_is_gts, img_metas) + >>> print(bboxes_list) + """ + img_ids = rois[:, 0].long().unique(sorted=True) + assert img_ids.numel() <= len(img_metas) + + bboxes_list = [] + for i in range(len(img_metas)): + inds = torch.nonzero( + rois[:, 0] == i, as_tuple=False).squeeze(dim=1) + num_rois = inds.numel() + + bboxes_ = rois[inds, 1:] + label_ = labels[inds] + bbox_pred_ = bbox_preds[inds] + img_meta_ = img_metas[i] + pos_is_gts_ = pos_is_gts[i] + + bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, + img_meta_) + + # filter gt bboxes + pos_keep = 1 - pos_is_gts_ + keep_inds = pos_is_gts_.new_ones(num_rois) + keep_inds[:len(pos_is_gts_)] = pos_keep + + bboxes_list.append(bboxes[keep_inds.type(torch.bool)]) + + return bboxes_list + + @force_fp32(apply_to=('bbox_pred', )) + def regress_by_class(self, rois, label, bbox_pred, img_meta): + """Regress the bbox for the predicted class. Used in Cascade R-CNN. + + Args: + rois (Tensor): Rois from `rpn_head` or last stage + `bbox_head`, has shape (num_proposals, 4) or + (num_proposals, 5). + label (Tensor): Only used when `self.reg_class_agnostic` + is False, has shape (num_proposals, ). + bbox_pred (Tensor): Regression prediction of + current stage `bbox_head`. When `self.reg_class_agnostic` + is False, it has shape (n, num_classes * 4), otherwise + it has shape (n, 4). + img_meta (dict): Image meta info. + + Returns: + Tensor: Regressed bboxes, the same shape as input rois. + """ + + assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape) + + if not self.reg_class_agnostic: + label = label * 4 + inds = torch.stack((label, label + 1, label + 2, label + 3), 1) + bbox_pred = torch.gather(bbox_pred, 1, inds) + assert bbox_pred.size(1) == 4 + + max_shape = img_meta['img_shape'] + + if rois.size(1) == 4: + new_rois = self.bbox_coder.decode( + rois, bbox_pred, max_shape=max_shape) + else: + bboxes = self.bbox_coder.decode( + rois[:, 1:], bbox_pred, max_shape=max_shape) + new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) + + return new_rois + + def onnx_export(self, + rois, + cls_score, + bbox_pred, + img_shape, + cfg=None, + **kwargs): + """Transform network output for a batch into bbox predictions. + + Args: + rois (Tensor): Boxes to be transformed. + Has shape (B, num_boxes, 5) + cls_score (Tensor): Box scores. has shape + (B, num_boxes, num_classes + 1), 1 represent the background. + bbox_pred (Tensor, optional): Box energies / deltas for, + has shape (B, num_boxes, num_classes * 4) when. + img_shape (torch.Tensor): Shape of image. + cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None + + Returns: + tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] + and class labels of shape [N, num_det]. + """ + + assert rois.ndim == 3, 'Only support export two stage ' \ + 'model to ONNX ' \ + 'with batch dimension. ' + if self.custom_cls_channels: + scores = self.loss_cls.get_activation(cls_score) + else: + scores = F.softmax( + cls_score, dim=-1) if cls_score is not None else None + + if bbox_pred is not None: + bboxes = self.bbox_coder.decode( + rois[..., 1:], bbox_pred, max_shape=img_shape) + else: + bboxes = rois[..., 1:].clone() + if img_shape is not None: + max_shape = bboxes.new_tensor(img_shape)[..., :2] + min_xy = bboxes.new_tensor(0) + max_xy = torch.cat( + [max_shape] * 2, dim=-1).flip(-1).unsqueeze(-2) + bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) + bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) + + # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment + from mmdet.core.export import add_dummy_nms_for_onnx + max_output_boxes_per_class = cfg.nms.get('max_output_boxes_per_class', + cfg.max_per_img) + iou_threshold = cfg.nms.get('iou_threshold', 0.5) + score_threshold = cfg.score_thr + nms_pre = cfg.get('deploy_nms_pre', -1) + + scores = scores[..., :self.num_classes] + if self.reg_class_agnostic: + return add_dummy_nms_for_onnx( + bboxes, + scores, + max_output_boxes_per_class, + iou_threshold, + score_threshold, + pre_top_k=nms_pre, + after_top_k=cfg.max_per_img) + else: + batch_size = scores.shape[0] + labels = torch.arange( + self.num_classes, dtype=torch.long).to(scores.device) + labels = labels.view(1, 1, -1).expand_as(scores) + labels = labels.reshape(batch_size, -1) + scores = scores.reshape(batch_size, -1) + bboxes = bboxes.reshape(batch_size, -1, 4) + + max_size = torch.max(img_shape) + # Offset bboxes of each class so that bboxes of different labels + # do not overlap. + offsets = (labels * max_size + 1).unsqueeze(2) + bboxes_for_nms = bboxes + offsets + + batch_dets, labels = add_dummy_nms_for_onnx( + bboxes_for_nms, + scores.unsqueeze(2), + max_output_boxes_per_class, + iou_threshold, + score_threshold, + pre_top_k=nms_pre, + after_top_k=cfg.max_per_img, + labels=labels) + # Offset the bboxes back after dummy nms. + offsets = (labels * max_size + 1).unsqueeze(2) + # Indexing + inplace operation fails with dynamic shape in ONNX + # original style: batch_dets[..., :4] -= offsets + bboxes, scores = batch_dets[..., 0:4], batch_dets[..., 4:5] + bboxes -= offsets + batch_dets = torch.cat([bboxes, scores], dim=2) + return batch_dets, labels diff --git a/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py new file mode 100644 index 0000000..21124b9 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py @@ -0,0 +1,229 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule + +from mmdet.models.builder import HEADS +from mmdet.models.utils import build_linear_layer +from .bbox_head import BBoxHead + + +@HEADS.register_module() +class ConvFCBBoxHead(BBoxHead): + r"""More general bbox head, with shared conv and fc layers and two optional + separated branches. + + .. code-block:: none + + /-> cls convs -> cls fcs -> cls + shared convs -> shared fcs + \-> reg convs -> reg fcs -> reg + """ # noqa: W605 + + def __init__(self, + num_shared_convs=0, + num_shared_fcs=0, + num_cls_convs=0, + num_cls_fcs=0, + num_reg_convs=0, + num_reg_fcs=0, + conv_out_channels=256, + fc_out_channels=1024, + conv_cfg=None, + norm_cfg=None, + init_cfg=None, + *args, + **kwargs): + super(ConvFCBBoxHead, self).__init__( + *args, init_cfg=init_cfg, **kwargs) + assert (num_shared_convs + num_shared_fcs + num_cls_convs + + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) + if num_cls_convs > 0 or num_reg_convs > 0: + assert num_shared_fcs == 0 + if not self.with_cls: + assert num_cls_convs == 0 and num_cls_fcs == 0 + if not self.with_reg: + assert num_reg_convs == 0 and num_reg_fcs == 0 + self.num_shared_convs = num_shared_convs + self.num_shared_fcs = num_shared_fcs + self.num_cls_convs = num_cls_convs + self.num_cls_fcs = num_cls_fcs + self.num_reg_convs = num_reg_convs + self.num_reg_fcs = num_reg_fcs + self.conv_out_channels = conv_out_channels + self.fc_out_channels = fc_out_channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + # add shared convs and fcs + self.shared_convs, self.shared_fcs, last_layer_dim = \ + self._add_conv_fc_branch( + self.num_shared_convs, self.num_shared_fcs, self.in_channels, + True) + self.shared_out_channels = last_layer_dim + + # add cls specific branch + self.cls_convs, self.cls_fcs, self.cls_last_dim = \ + self._add_conv_fc_branch( + self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) + + # add reg specific branch + self.reg_convs, self.reg_fcs, self.reg_last_dim = \ + self._add_conv_fc_branch( + self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) + + if self.num_shared_fcs == 0 and not self.with_avg_pool: + if self.num_cls_fcs == 0: + self.cls_last_dim *= self.roi_feat_area + if self.num_reg_fcs == 0: + self.reg_last_dim *= self.roi_feat_area + + self.relu = nn.ReLU(inplace=True) + # reconstruct fc_cls and fc_reg since input channels are changed + if self.with_cls: + if self.custom_cls_channels: + cls_channels = self.loss_cls.get_cls_channels(self.num_classes) + else: + cls_channels = self.num_classes + 1 + self.fc_cls = build_linear_layer( + self.cls_predictor_cfg, + in_features=self.cls_last_dim, + out_features=cls_channels) + if self.with_reg: + out_dim_reg = (4 if self.reg_class_agnostic else 4 * + self.num_classes) + self.fc_reg = build_linear_layer( + self.reg_predictor_cfg, + in_features=self.reg_last_dim, + out_features=out_dim_reg) + + if init_cfg is None: + # when init_cfg is None, + # It has been set to + # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))], + # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))] + # after `super(ConvFCBBoxHead, self).__init__()` + # we only need to append additional configuration + # for `shared_fcs`, `cls_fcs` and `reg_fcs` + self.init_cfg += [ + dict( + type='Xavier', + distribution='uniform', + override=[ + dict(name='shared_fcs'), + dict(name='cls_fcs'), + dict(name='reg_fcs') + ]) + ] + + def _add_conv_fc_branch(self, + num_branch_convs, + num_branch_fcs, + in_channels, + is_shared=False): + """Add shared or separable branch. + + convs -> avg pool (optional) -> fcs + """ + last_layer_dim = in_channels + # add branch specific conv layers + branch_convs = nn.ModuleList() + if num_branch_convs > 0: + for i in range(num_branch_convs): + conv_in_channels = ( + last_layer_dim if i == 0 else self.conv_out_channels) + branch_convs.append( + ConvModule( + conv_in_channels, + self.conv_out_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + last_layer_dim = self.conv_out_channels + # add branch specific fc layers + branch_fcs = nn.ModuleList() + if num_branch_fcs > 0: + # for shared branch, only consider self.with_avg_pool + # for separated branches, also consider self.num_shared_fcs + if (is_shared + or self.num_shared_fcs == 0) and not self.with_avg_pool: + last_layer_dim *= self.roi_feat_area + for i in range(num_branch_fcs): + fc_in_channels = ( + last_layer_dim if i == 0 else self.fc_out_channels) + branch_fcs.append( + nn.Linear(fc_in_channels, self.fc_out_channels)) + last_layer_dim = self.fc_out_channels + return branch_convs, branch_fcs, last_layer_dim + + def forward(self, x): + # shared part + if self.num_shared_convs > 0: + for conv in self.shared_convs: + x = conv(x) + + if self.num_shared_fcs > 0: + if self.with_avg_pool: + x = self.avg_pool(x) + + x = x.flatten(1) + + for fc in self.shared_fcs: + x = self.relu(fc(x)) + # separate branches + x_cls = x + x_reg = x + + for conv in self.cls_convs: + x_cls = conv(x_cls) + if x_cls.dim() > 2: + if self.with_avg_pool: + x_cls = self.avg_pool(x_cls) + x_cls = x_cls.flatten(1) + for fc in self.cls_fcs: + x_cls = self.relu(fc(x_cls)) + + for conv in self.reg_convs: + x_reg = conv(x_reg) + if x_reg.dim() > 2: + if self.with_avg_pool: + x_reg = self.avg_pool(x_reg) + x_reg = x_reg.flatten(1) + for fc in self.reg_fcs: + x_reg = self.relu(fc(x_reg)) + + cls_score = self.fc_cls(x_cls) if self.with_cls else None + bbox_pred = self.fc_reg(x_reg) if self.with_reg else None + return cls_score, bbox_pred + + +@HEADS.register_module() +class Shared2FCBBoxHead(ConvFCBBoxHead): + + def __init__(self, fc_out_channels=1024, *args, **kwargs): + super(Shared2FCBBoxHead, self).__init__( + num_shared_convs=0, + num_shared_fcs=2, + num_cls_convs=0, + num_cls_fcs=0, + num_reg_convs=0, + num_reg_fcs=0, + fc_out_channels=fc_out_channels, + *args, + **kwargs) + + +@HEADS.register_module() +class Shared4Conv1FCBBoxHead(ConvFCBBoxHead): + + def __init__(self, fc_out_channels=1024, *args, **kwargs): + super(Shared4Conv1FCBBoxHead, self).__init__( + num_shared_convs=4, + num_shared_fcs=1, + num_cls_convs=0, + num_cls_fcs=0, + num_reg_convs=0, + num_reg_fcs=0, + fc_out_channels=fc_out_channels, + *args, + **kwargs) diff --git a/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/dii_head.py b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/dii_head.py new file mode 100644 index 0000000..3777f52 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/dii_head.py @@ -0,0 +1,426 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import (bias_init_with_prob, build_activation_layer, + build_norm_layer) +from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention +from mmcv.runner import auto_fp16, force_fp32 + +from mmdet.core import multi_apply +from mmdet.models.builder import HEADS, build_loss +from mmdet.models.dense_heads.atss_head import reduce_mean +from mmdet.models.losses import accuracy +from mmdet.models.utils import build_transformer +from .bbox_head import BBoxHead + + +@HEADS.register_module() +class DIIHead(BBoxHead): + r"""Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object + Detection with Learnable Proposals `_ + + Args: + num_classes (int): Number of class in dataset. + Defaults to 80. + num_ffn_fcs (int): The number of fully-connected + layers in FFNs. Defaults to 2. + num_heads (int): The hidden dimension of FFNs. + Defaults to 8. + num_cls_fcs (int): The number of fully-connected + layers in classification subnet. Defaults to 1. + num_reg_fcs (int): The number of fully-connected + layers in regression subnet. Defaults to 3. + feedforward_channels (int): The hidden dimension + of FFNs. Defaults to 2048 + in_channels (int): Hidden_channels of MultiheadAttention. + Defaults to 256. + dropout (float): Probability of drop the channel. + Defaults to 0.0 + ffn_act_cfg (dict): The activation config for FFNs. + dynamic_conv_cfg (dict): The convolution config + for DynamicConv. + loss_iou (dict): The config for iou or giou loss. + + """ + + def __init__(self, + num_classes=80, + num_ffn_fcs=2, + num_heads=8, + num_cls_fcs=1, + num_reg_fcs=3, + feedforward_channels=2048, + in_channels=256, + dropout=0.0, + ffn_act_cfg=dict(type='ReLU', inplace=True), + dynamic_conv_cfg=dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + input_feat_shape=7, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN')), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + init_cfg=None, + **kwargs): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super(DIIHead, self).__init__( + num_classes=num_classes, + reg_decoded_bbox=True, + reg_class_agnostic=True, + init_cfg=init_cfg, + **kwargs) + self.loss_iou = build_loss(loss_iou) + self.in_channels = in_channels + self.fp16_enabled = False + self.attention = MultiheadAttention(in_channels, num_heads, dropout) + self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1] + + self.instance_interactive_conv = build_transformer(dynamic_conv_cfg) + self.instance_interactive_conv_dropout = nn.Dropout(dropout) + self.instance_interactive_conv_norm = build_norm_layer( + dict(type='LN'), in_channels)[1] + + self.ffn = FFN( + in_channels, + feedforward_channels, + num_ffn_fcs, + act_cfg=ffn_act_cfg, + dropout=dropout) + self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1] + + self.cls_fcs = nn.ModuleList() + for _ in range(num_cls_fcs): + self.cls_fcs.append( + nn.Linear(in_channels, in_channels, bias=False)) + self.cls_fcs.append( + build_norm_layer(dict(type='LN'), in_channels)[1]) + self.cls_fcs.append( + build_activation_layer(dict(type='ReLU', inplace=True))) + + # over load the self.fc_cls in BBoxHead + if self.loss_cls.use_sigmoid: + self.fc_cls = nn.Linear(in_channels, self.num_classes) + else: + self.fc_cls = nn.Linear(in_channels, self.num_classes + 1) + + self.reg_fcs = nn.ModuleList() + for _ in range(num_reg_fcs): + self.reg_fcs.append( + nn.Linear(in_channels, in_channels, bias=False)) + self.reg_fcs.append( + build_norm_layer(dict(type='LN'), in_channels)[1]) + self.reg_fcs.append( + build_activation_layer(dict(type='ReLU', inplace=True))) + # over load the self.fc_cls in BBoxHead + self.fc_reg = nn.Linear(in_channels, 4) + + assert self.reg_class_agnostic, 'DIIHead only ' \ + 'suppport `reg_class_agnostic=True` ' + assert self.reg_decoded_bbox, 'DIIHead only ' \ + 'suppport `reg_decoded_bbox=True`' + + def init_weights(self): + """Use xavier initialization for all weight parameter and set + classification head bias as a specific value when use focal loss.""" + super(DIIHead, self).init_weights() + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + else: + # adopt the default initialization for + # the weight and bias of the layer norm + pass + if self.loss_cls.use_sigmoid: + bias_init = bias_init_with_prob(0.01) + nn.init.constant_(self.fc_cls.bias, bias_init) + + @auto_fp16() + def forward(self, roi_feat, proposal_feat): + """Forward function of Dynamic Instance Interactive Head. + + Args: + roi_feat (Tensor): Roi-pooling features with shape + (batch_size*num_proposals, feature_dimensions, + pooling_h , pooling_w). + proposal_feat (Tensor): Intermediate feature get from + diihead in last stage, has shape + (batch_size, num_proposals, feature_dimensions) + + Returns: + tuple[Tensor]: Usually a tuple of classification scores + and bbox prediction and a intermediate feature. + + - cls_scores (Tensor): Classification scores for + all proposals, has shape + (batch_size, num_proposals, num_classes). + - bbox_preds (Tensor): Box energies / deltas for + all proposals, has shape + (batch_size, num_proposals, 4). + - obj_feat (Tensor): Object feature before classification + and regression subnet, has shape + (batch_size, num_proposal, feature_dimensions). + """ + N, num_proposals = proposal_feat.shape[:2] + + # Self attention + proposal_feat = proposal_feat.permute(1, 0, 2) + proposal_feat = self.attention_norm(self.attention(proposal_feat)) + attn_feats = proposal_feat.permute(1, 0, 2) + + # instance interactive + proposal_feat = attn_feats.reshape(-1, self.in_channels) + proposal_feat_iic = self.instance_interactive_conv( + proposal_feat, roi_feat) + proposal_feat = proposal_feat + self.instance_interactive_conv_dropout( + proposal_feat_iic) + obj_feat = self.instance_interactive_conv_norm(proposal_feat) + + # FFN + obj_feat = self.ffn_norm(self.ffn(obj_feat)) + + cls_feat = obj_feat + reg_feat = obj_feat + + for cls_layer in self.cls_fcs: + cls_feat = cls_layer(cls_feat) + for reg_layer in self.reg_fcs: + reg_feat = reg_layer(reg_feat) + + cls_score = self.fc_cls(cls_feat).view( + N, num_proposals, self.num_classes + if self.loss_cls.use_sigmoid else self.num_classes + 1) + bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, 4) + + return cls_score, bbox_delta, obj_feat.view( + N, num_proposals, self.in_channels), attn_feats + + @force_fp32(apply_to=('cls_score', 'bbox_pred')) + def loss(self, + cls_score, + bbox_pred, + labels, + label_weights, + bbox_targets, + bbox_weights, + imgs_whwh=None, + reduction_override=None, + **kwargs): + """"Loss function of DIIHead, get loss of all images. + + Args: + cls_score (Tensor): Classification prediction + results of all class, has shape + (batch_size * num_proposals_single_image, num_classes) + bbox_pred (Tensor): Regression prediction results, + has shape + (batch_size * num_proposals_single_image, 4), the last + dimension 4 represents [tl_x, tl_y, br_x, br_y]. + labels (Tensor): Label of each proposals, has shape + (batch_size * num_proposals_single_image + label_weights (Tensor): Classification loss + weight of each proposals, has shape + (batch_size * num_proposals_single_image + bbox_targets (Tensor): Regression targets of each + proposals, has shape + (batch_size * num_proposals_single_image, 4), + the last dimension 4 represents + [tl_x, tl_y, br_x, br_y]. + bbox_weights (Tensor): Regression loss weight of each + proposals's coordinate, has shape + (batch_size * num_proposals_single_image, 4), + imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\ + shape (batch_size, num_proposals, 4), the last + dimension means + [img_width,img_height, img_width, img_height]. + reduction_override (str, optional): The reduction + method used to override the original reduction + method of the loss. Options are "none", + "mean" and "sum". Defaults to None, + + Returns: + dict[str, Tensor]: Dictionary of loss components + """ + losses = dict() + bg_class_ind = self.num_classes + # note in spare rcnn num_gt == num_pos + pos_inds = (labels >= 0) & (labels < bg_class_ind) + num_pos = pos_inds.sum().float() + avg_factor = reduce_mean(num_pos) + if cls_score is not None: + if cls_score.numel() > 0: + losses['loss_cls'] = self.loss_cls( + cls_score, + labels, + label_weights, + avg_factor=avg_factor, + reduction_override=reduction_override) + losses['pos_acc'] = accuracy(cls_score[pos_inds], + labels[pos_inds]) + if bbox_pred is not None: + # 0~self.num_classes-1 are FG, self.num_classes is BG + # do not perform bounding box regression for BG anymore. + if pos_inds.any(): + pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0), + 4)[pos_inds.type(torch.bool)] + imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0), + 4)[pos_inds.type(torch.bool)] + losses['loss_bbox'] = self.loss_bbox( + pos_bbox_pred / imgs_whwh, + bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh, + bbox_weights[pos_inds.type(torch.bool)], + avg_factor=avg_factor) + losses['loss_iou'] = self.loss_iou( + pos_bbox_pred, + bbox_targets[pos_inds.type(torch.bool)], + bbox_weights[pos_inds.type(torch.bool)], + avg_factor=avg_factor) + else: + losses['loss_bbox'] = bbox_pred.sum() * 0 + losses['loss_iou'] = bbox_pred.sum() * 0 + return losses + + def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes, + pos_gt_bboxes, pos_gt_labels, cfg): + """Calculate the ground truth for proposals in the single image + according to the sampling results. + + Almost the same as the implementation in `bbox_head`, + we add pos_inds and neg_inds to select positive and + negative samples instead of selecting the first num_pos + as positive samples. + + Args: + pos_inds (Tensor): The length is equal to the + positive sample numbers contain all index + of the positive sample in the origin proposal set. + neg_inds (Tensor): The length is equal to the + negative sample numbers contain all index + of the negative sample in the origin proposal set. + pos_bboxes (Tensor): Contains all the positive boxes, + has shape (num_pos, 4), the last dimension 4 + represents [tl_x, tl_y, br_x, br_y]. + neg_bboxes (Tensor): Contains all the negative boxes, + has shape (num_neg, 4), the last dimension 4 + represents [tl_x, tl_y, br_x, br_y]. + pos_gt_bboxes (Tensor): Contains gt_boxes for + all positive samples, has shape (num_pos, 4), + the last dimension 4 + represents [tl_x, tl_y, br_x, br_y]. + pos_gt_labels (Tensor): Contains gt_labels for + all positive samples, has shape (num_pos, ). + cfg (obj:`ConfigDict`): `train_cfg` of R-CNN. + + Returns: + Tuple[Tensor]: Ground truth for proposals in a single image. + Containing the following Tensors: + + - labels(Tensor): Gt_labels for all proposals, has + shape (num_proposals,). + - label_weights(Tensor): Labels_weights for all proposals, has + shape (num_proposals,). + - bbox_targets(Tensor):Regression target for all proposals, has + shape (num_proposals, 4), the last dimension 4 + represents [tl_x, tl_y, br_x, br_y]. + - bbox_weights(Tensor):Regression weights for all proposals, + has shape (num_proposals, 4). + """ + num_pos = pos_bboxes.size(0) + num_neg = neg_bboxes.size(0) + num_samples = num_pos + num_neg + + # original implementation uses new_zeros since BG are set to be 0 + # now use empty & fill because BG cat_id = num_classes, + # FG cat_id = [0, num_classes-1] + labels = pos_bboxes.new_full((num_samples, ), + self.num_classes, + dtype=torch.long) + label_weights = pos_bboxes.new_zeros(num_samples) + bbox_targets = pos_bboxes.new_zeros(num_samples, 4) + bbox_weights = pos_bboxes.new_zeros(num_samples, 4) + if num_pos > 0: + labels[pos_inds] = pos_gt_labels + pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight + label_weights[pos_inds] = pos_weight + if not self.reg_decoded_bbox: + pos_bbox_targets = self.bbox_coder.encode( + pos_bboxes, pos_gt_bboxes) + else: + pos_bbox_targets = pos_gt_bboxes + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1 + if num_neg > 0: + label_weights[neg_inds] = 1.0 + + return labels, label_weights, bbox_targets, bbox_weights + + def get_targets(self, + sampling_results, + gt_bboxes, + gt_labels, + rcnn_train_cfg, + concat=True): + """Calculate the ground truth for all samples in a batch according to + the sampling_results. + + Almost the same as the implementation in bbox_head, we passed + additional parameters pos_inds_list and neg_inds_list to + `_get_target_single` function. + + Args: + sampling_results (List[obj:SamplingResults]): Assign results of + all images in a batch after sampling. + gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch, + each tensor has shape (num_gt, 4), the last dimension 4 + represents [tl_x, tl_y, br_x, br_y]. + gt_labels (list[Tensor]): Gt_labels of all images in a batch, + each tensor has shape (num_gt,). + rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN. + concat (bool): Whether to concatenate the results of all + the images in a single batch. + + Returns: + Tuple[Tensor]: Ground truth for proposals in a single image. + Containing the following list of Tensors: + + - labels (list[Tensor],Tensor): Gt_labels for all + proposals in a batch, each tensor in list has + shape (num_proposals,) when `concat=False`, otherwise just + a single tensor has shape (num_all_proposals,). + - label_weights (list[Tensor]): Labels_weights for + all proposals in a batch, each tensor in list has shape + (num_proposals,) when `concat=False`, otherwise just a + single tensor has shape (num_all_proposals,). + - bbox_targets (list[Tensor],Tensor): Regression target + for all proposals in a batch, each tensor in list has + shape (num_proposals, 4) when `concat=False`, otherwise + just a single tensor has shape (num_all_proposals, 4), + the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. + - bbox_weights (list[tensor],Tensor): Regression weights for + all proposals in a batch, each tensor in list has shape + (num_proposals, 4) when `concat=False`, otherwise just a + single tensor has shape (num_all_proposals, 4). + """ + pos_inds_list = [res.pos_inds for res in sampling_results] + neg_inds_list = [res.neg_inds for res in sampling_results] + pos_bboxes_list = [res.pos_bboxes for res in sampling_results] + neg_bboxes_list = [res.neg_bboxes for res in sampling_results] + pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] + pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] + labels, label_weights, bbox_targets, bbox_weights = multi_apply( + self._get_target_single, + pos_inds_list, + neg_inds_list, + pos_bboxes_list, + neg_bboxes_list, + pos_gt_bboxes_list, + pos_gt_labels_list, + cfg=rcnn_train_cfg) + if concat: + labels = torch.cat(labels, 0) + label_weights = torch.cat(label_weights, 0) + bbox_targets = torch.cat(bbox_targets, 0) + bbox_weights = torch.cat(bbox_weights, 0) + return labels, label_weights, bbox_targets, bbox_weights diff --git a/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py new file mode 100644 index 0000000..2a38d59 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py @@ -0,0 +1,178 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule, ModuleList + +from mmdet.models.backbones.resnet import Bottleneck +from mmdet.models.builder import HEADS +from .bbox_head import BBoxHead + + +class BasicResBlock(BaseModule): + """Basic residual block. + + This block is a little different from the block in the ResNet backbone. + The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock. + + Args: + in_channels (int): Channels of the input feature map. + out_channels (int): Channels of the output feature map. + conv_cfg (dict): The config dict for convolution layers. + norm_cfg (dict): The config dict for normalization layers. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN'), + init_cfg=None): + super(BasicResBlock, self).__init__(init_cfg) + + # main path + self.conv1 = ConvModule( + in_channels, + in_channels, + kernel_size=3, + padding=1, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + self.conv2 = ConvModule( + in_channels, + out_channels, + kernel_size=1, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + # identity path + self.conv_identity = ConvModule( + in_channels, + out_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + identity = x + + x = self.conv1(x) + x = self.conv2(x) + + identity = self.conv_identity(identity) + out = x + identity + + out = self.relu(out) + return out + + +@HEADS.register_module() +class DoubleConvFCBBoxHead(BBoxHead): + r"""Bbox head used in Double-Head R-CNN + + .. code-block:: none + + /-> cls + /-> shared convs -> + \-> reg + roi features + /-> cls + \-> shared fc -> + \-> reg + """ # noqa: W605 + + def __init__(self, + num_convs=0, + num_fcs=0, + conv_out_channels=1024, + fc_out_channels=1024, + conv_cfg=None, + norm_cfg=dict(type='BN'), + init_cfg=dict( + type='Normal', + override=[ + dict(type='Normal', name='fc_cls', std=0.01), + dict(type='Normal', name='fc_reg', std=0.001), + dict( + type='Xavier', + name='fc_branch', + distribution='uniform') + ]), + **kwargs): + kwargs.setdefault('with_avg_pool', True) + super(DoubleConvFCBBoxHead, self).__init__(init_cfg=init_cfg, **kwargs) + assert self.with_avg_pool + assert num_convs > 0 + assert num_fcs > 0 + self.num_convs = num_convs + self.num_fcs = num_fcs + self.conv_out_channels = conv_out_channels + self.fc_out_channels = fc_out_channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + # increase the channel of input features + self.res_block = BasicResBlock(self.in_channels, + self.conv_out_channels) + + # add conv heads + self.conv_branch = self._add_conv_branch() + # add fc heads + self.fc_branch = self._add_fc_branch() + + out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes + self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg) + + self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1) + self.relu = nn.ReLU(inplace=True) + + def _add_conv_branch(self): + """Add the fc branch which consists of a sequential of conv layers.""" + branch_convs = ModuleList() + for i in range(self.num_convs): + branch_convs.append( + Bottleneck( + inplanes=self.conv_out_channels, + planes=self.conv_out_channels // 4, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + return branch_convs + + def _add_fc_branch(self): + """Add the fc branch which consists of a sequential of fc layers.""" + branch_fcs = ModuleList() + for i in range(self.num_fcs): + fc_in_channels = ( + self.in_channels * + self.roi_feat_area if i == 0 else self.fc_out_channels) + branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) + return branch_fcs + + def forward(self, x_cls, x_reg): + # conv head + x_conv = self.res_block(x_reg) + + for conv in self.conv_branch: + x_conv = conv(x_conv) + + if self.with_avg_pool: + x_conv = self.avg_pool(x_conv) + + x_conv = x_conv.view(x_conv.size(0), -1) + bbox_pred = self.fc_reg(x_conv) + + # fc head + x_fc = x_cls.view(x_cls.size(0), -1) + for fc in self.fc_branch: + x_fc = self.relu(fc(x_fc)) + + cls_score = self.fc_cls(x_fc) + + return cls_score, bbox_pred diff --git a/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/sabl_head.py b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/sabl_head.py new file mode 100644 index 0000000..0ce986b --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/sabl_head.py @@ -0,0 +1,596 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule, force_fp32 + +from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms +from mmdet.models.builder import HEADS, build_loss +from mmdet.models.losses import accuracy + + +@HEADS.register_module() +class SABLHead(BaseModule): + """Side-Aware Boundary Localization (SABL) for RoI-Head. + + Side-Aware features are extracted by conv layers + with an attention mechanism. + Boundary Localization with Bucketing and Bucketing Guided Rescoring + are implemented in BucketingBBoxCoder. + + Please refer to https://arxiv.org/abs/1912.04260 for more details. + + Args: + cls_in_channels (int): Input channels of cls RoI feature. \ + Defaults to 256. + reg_in_channels (int): Input channels of reg RoI feature. \ + Defaults to 256. + roi_feat_size (int): Size of RoI features. Defaults to 7. + reg_feat_up_ratio (int): Upsample ratio of reg features. \ + Defaults to 2. + reg_pre_kernel (int): Kernel of 2D conv layers before \ + attention pooling. Defaults to 3. + reg_post_kernel (int): Kernel of 1D conv layers after \ + attention pooling. Defaults to 3. + reg_pre_num (int): Number of pre convs. Defaults to 2. + reg_post_num (int): Number of post convs. Defaults to 1. + num_classes (int): Number of classes in dataset. Defaults to 80. + cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024. + reg_offset_out_channels (int): Hidden and output channel \ + of reg offset branch. Defaults to 256. + reg_cls_out_channels (int): Hidden and output channel \ + of reg cls branch. Defaults to 256. + num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1. + num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0. + reg_class_agnostic (bool): Class agnostic regression or not. \ + Defaults to True. + norm_cfg (dict): Config of norm layers. Defaults to None. + bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'. + loss_cls (dict): Config of classification loss. + loss_bbox_cls (dict): Config of classification loss for bbox branch. + loss_bbox_reg (dict): Config of regression loss for bbox branch. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + num_classes, + cls_in_channels=256, + reg_in_channels=256, + roi_feat_size=7, + reg_feat_up_ratio=2, + reg_pre_kernel=3, + reg_post_kernel=3, + reg_pre_num=2, + reg_post_num=1, + cls_out_channels=1024, + reg_offset_out_channels=256, + reg_cls_out_channels=256, + num_cls_fcs=1, + num_reg_fcs=0, + reg_class_agnostic=True, + norm_cfg=None, + bbox_coder=dict( + type='BucketingBBoxCoder', + num_buckets=14, + scale_factor=1.7), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + loss_bbox_reg=dict( + type='SmoothL1Loss', beta=0.1, loss_weight=1.0), + init_cfg=None): + super(SABLHead, self).__init__(init_cfg) + self.cls_in_channels = cls_in_channels + self.reg_in_channels = reg_in_channels + self.roi_feat_size = roi_feat_size + self.reg_feat_up_ratio = int(reg_feat_up_ratio) + self.num_buckets = bbox_coder['num_buckets'] + assert self.reg_feat_up_ratio // 2 >= 1 + self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio + assert self.up_reg_feat_size == bbox_coder['num_buckets'] + self.reg_pre_kernel = reg_pre_kernel + self.reg_post_kernel = reg_post_kernel + self.reg_pre_num = reg_pre_num + self.reg_post_num = reg_post_num + self.num_classes = num_classes + self.cls_out_channels = cls_out_channels + self.reg_offset_out_channels = reg_offset_out_channels + self.reg_cls_out_channels = reg_cls_out_channels + self.num_cls_fcs = num_cls_fcs + self.num_reg_fcs = num_reg_fcs + self.reg_class_agnostic = reg_class_agnostic + assert self.reg_class_agnostic + self.norm_cfg = norm_cfg + + self.bbox_coder = build_bbox_coder(bbox_coder) + self.loss_cls = build_loss(loss_cls) + self.loss_bbox_cls = build_loss(loss_bbox_cls) + self.loss_bbox_reg = build_loss(loss_bbox_reg) + + self.cls_fcs = self._add_fc_branch(self.num_cls_fcs, + self.cls_in_channels, + self.roi_feat_size, + self.cls_out_channels) + + self.side_num = int(np.ceil(self.num_buckets / 2)) + + if self.reg_feat_up_ratio > 1: + self.upsample_x = nn.ConvTranspose1d( + reg_in_channels, + reg_in_channels, + self.reg_feat_up_ratio, + stride=self.reg_feat_up_ratio) + self.upsample_y = nn.ConvTranspose1d( + reg_in_channels, + reg_in_channels, + self.reg_feat_up_ratio, + stride=self.reg_feat_up_ratio) + + self.reg_pre_convs = nn.ModuleList() + for i in range(self.reg_pre_num): + reg_pre_conv = ConvModule( + reg_in_channels, + reg_in_channels, + kernel_size=reg_pre_kernel, + padding=reg_pre_kernel // 2, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU')) + self.reg_pre_convs.append(reg_pre_conv) + + self.reg_post_conv_xs = nn.ModuleList() + for i in range(self.reg_post_num): + reg_post_conv_x = ConvModule( + reg_in_channels, + reg_in_channels, + kernel_size=(1, reg_post_kernel), + padding=(0, reg_post_kernel // 2), + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU')) + self.reg_post_conv_xs.append(reg_post_conv_x) + self.reg_post_conv_ys = nn.ModuleList() + for i in range(self.reg_post_num): + reg_post_conv_y = ConvModule( + reg_in_channels, + reg_in_channels, + kernel_size=(reg_post_kernel, 1), + padding=(reg_post_kernel // 2, 0), + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU')) + self.reg_post_conv_ys.append(reg_post_conv_y) + + self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1) + self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1) + + self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1) + self.relu = nn.ReLU(inplace=True) + + self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs, + self.reg_in_channels, 1, + self.reg_cls_out_channels) + self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs, + self.reg_in_channels, 1, + self.reg_offset_out_channels) + self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1) + self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1) + + if init_cfg is None: + self.init_cfg = [ + dict( + type='Xavier', + layer='Linear', + distribution='uniform', + override=[ + dict(type='Normal', name='reg_conv_att_x', std=0.01), + dict(type='Normal', name='reg_conv_att_y', std=0.01), + dict(type='Normal', name='fc_reg_cls', std=0.01), + dict(type='Normal', name='fc_cls', std=0.01), + dict(type='Normal', name='fc_reg_offset', std=0.001) + ]) + ] + if self.reg_feat_up_ratio > 1: + self.init_cfg += [ + dict( + type='Kaiming', + distribution='normal', + override=[ + dict(name='upsample_x'), + dict(name='upsample_y') + ]) + ] + + @property + def custom_cls_channels(self): + return getattr(self.loss_cls, 'custom_cls_channels', False) + + @property + def custom_activation(self): + return getattr(self.loss_cls, 'custom_activation', False) + + @property + def custom_accuracy(self): + return getattr(self.loss_cls, 'custom_accuracy', False) + + def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size, + fc_out_channels): + in_channels = in_channels * roi_feat_size * roi_feat_size + branch_fcs = nn.ModuleList() + for i in range(num_branch_fcs): + fc_in_channels = (in_channels if i == 0 else fc_out_channels) + branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels)) + return branch_fcs + + def cls_forward(self, cls_x): + cls_x = cls_x.view(cls_x.size(0), -1) + for fc in self.cls_fcs: + cls_x = self.relu(fc(cls_x)) + cls_score = self.fc_cls(cls_x) + return cls_score + + def attention_pool(self, reg_x): + """Extract direction-specific features fx and fy with attention + methanism.""" + reg_fx = reg_x + reg_fy = reg_x + reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid() + reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid() + reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2) + reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3) + reg_fx = (reg_fx * reg_fx_att).sum(dim=2) + reg_fy = (reg_fy * reg_fy_att).sum(dim=3) + return reg_fx, reg_fy + + def side_aware_feature_extractor(self, reg_x): + """Refine and extract side-aware features without split them.""" + for reg_pre_conv in self.reg_pre_convs: + reg_x = reg_pre_conv(reg_x) + reg_fx, reg_fy = self.attention_pool(reg_x) + + if self.reg_post_num > 0: + reg_fx = reg_fx.unsqueeze(2) + reg_fy = reg_fy.unsqueeze(3) + for i in range(self.reg_post_num): + reg_fx = self.reg_post_conv_xs[i](reg_fx) + reg_fy = self.reg_post_conv_ys[i](reg_fy) + reg_fx = reg_fx.squeeze(2) + reg_fy = reg_fy.squeeze(3) + if self.reg_feat_up_ratio > 1: + reg_fx = self.relu(self.upsample_x(reg_fx)) + reg_fy = self.relu(self.upsample_y(reg_fy)) + reg_fx = torch.transpose(reg_fx, 1, 2) + reg_fy = torch.transpose(reg_fy, 1, 2) + return reg_fx.contiguous(), reg_fy.contiguous() + + def reg_pred(self, x, offset_fcs, cls_fcs): + """Predict bucketing estimation (cls_pred) and fine regression (offset + pred) with side-aware features.""" + x_offset = x.view(-1, self.reg_in_channels) + x_cls = x.view(-1, self.reg_in_channels) + + for fc in offset_fcs: + x_offset = self.relu(fc(x_offset)) + for fc in cls_fcs: + x_cls = self.relu(fc(x_cls)) + offset_pred = self.fc_reg_offset(x_offset) + cls_pred = self.fc_reg_cls(x_cls) + + offset_pred = offset_pred.view(x.size(0), -1) + cls_pred = cls_pred.view(x.size(0), -1) + + return offset_pred, cls_pred + + def side_aware_split(self, feat): + """Split side-aware features aligned with orders of bucketing + targets.""" + l_end = int(np.ceil(self.up_reg_feat_size / 2)) + r_start = int(np.floor(self.up_reg_feat_size / 2)) + feat_fl = feat[:, :l_end] + feat_fr = feat[:, r_start:].flip(dims=(1, )) + feat_fl = feat_fl.contiguous() + feat_fr = feat_fr.contiguous() + feat = torch.cat([feat_fl, feat_fr], dim=-1) + return feat + + def bbox_pred_split(self, bbox_pred, num_proposals_per_img): + """Split batch bbox prediction back to each image.""" + bucket_cls_preds, bucket_offset_preds = bbox_pred + bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0) + bucket_offset_preds = bucket_offset_preds.split( + num_proposals_per_img, 0) + bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds)) + return bbox_pred + + def reg_forward(self, reg_x): + outs = self.side_aware_feature_extractor(reg_x) + edge_offset_preds = [] + edge_cls_preds = [] + reg_fx = outs[0] + reg_fy = outs[1] + offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs, + self.reg_cls_fcs) + offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs, + self.reg_cls_fcs) + offset_pred_x = self.side_aware_split(offset_pred_x) + offset_pred_y = self.side_aware_split(offset_pred_y) + cls_pred_x = self.side_aware_split(cls_pred_x) + cls_pred_y = self.side_aware_split(cls_pred_y) + edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1) + edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1) + + return (edge_cls_preds, edge_offset_preds) + + def forward(self, x): + + bbox_pred = self.reg_forward(x) + cls_score = self.cls_forward(x) + + return cls_score, bbox_pred + + def get_targets(self, sampling_results, gt_bboxes, gt_labels, + rcnn_train_cfg): + pos_proposals = [res.pos_bboxes for res in sampling_results] + neg_proposals = [res.neg_bboxes for res in sampling_results] + pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] + pos_gt_labels = [res.pos_gt_labels for res in sampling_results] + cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals, + pos_gt_bboxes, pos_gt_labels, + rcnn_train_cfg) + (labels, label_weights, bucket_cls_targets, bucket_cls_weights, + bucket_offset_targets, bucket_offset_weights) = cls_reg_targets + return (labels, label_weights, (bucket_cls_targets, + bucket_offset_targets), + (bucket_cls_weights, bucket_offset_weights)) + + def bucket_target(self, + pos_proposals_list, + neg_proposals_list, + pos_gt_bboxes_list, + pos_gt_labels_list, + rcnn_train_cfg, + concat=True): + (labels, label_weights, bucket_cls_targets, bucket_cls_weights, + bucket_offset_targets, bucket_offset_weights) = multi_apply( + self._bucket_target_single, + pos_proposals_list, + neg_proposals_list, + pos_gt_bboxes_list, + pos_gt_labels_list, + cfg=rcnn_train_cfg) + + if concat: + labels = torch.cat(labels, 0) + label_weights = torch.cat(label_weights, 0) + bucket_cls_targets = torch.cat(bucket_cls_targets, 0) + bucket_cls_weights = torch.cat(bucket_cls_weights, 0) + bucket_offset_targets = torch.cat(bucket_offset_targets, 0) + bucket_offset_weights = torch.cat(bucket_offset_weights, 0) + return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, + bucket_offset_targets, bucket_offset_weights) + + def _bucket_target_single(self, pos_proposals, neg_proposals, + pos_gt_bboxes, pos_gt_labels, cfg): + """Compute bucketing estimation targets and fine regression targets for + a single image. + + Args: + pos_proposals (Tensor): positive proposals of a single image, + Shape (n_pos, 4) + neg_proposals (Tensor): negative proposals of a single image, + Shape (n_neg, 4). + pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals + of a single image, Shape (n_pos, 4). + pos_gt_labels (Tensor): gt labels assigned to positive proposals + of a single image, Shape (n_pos, ). + cfg (dict): Config of calculating targets + + Returns: + tuple: + + - labels (Tensor): Labels in a single image. \ + Shape (n,). + - label_weights (Tensor): Label weights in a single image.\ + Shape (n,) + - bucket_cls_targets (Tensor): Bucket cls targets in \ + a single image. Shape (n, num_buckets*2). + - bucket_cls_weights (Tensor): Bucket cls weights in \ + a single image. Shape (n, num_buckets*2). + - bucket_offset_targets (Tensor): Bucket offset targets \ + in a single image. Shape (n, num_buckets*2). + - bucket_offset_targets (Tensor): Bucket offset weights \ + in a single image. Shape (n, num_buckets*2). + """ + num_pos = pos_proposals.size(0) + num_neg = neg_proposals.size(0) + num_samples = num_pos + num_neg + labels = pos_gt_bboxes.new_full((num_samples, ), + self.num_classes, + dtype=torch.long) + label_weights = pos_proposals.new_zeros(num_samples) + bucket_cls_targets = pos_proposals.new_zeros(num_samples, + 4 * self.side_num) + bucket_cls_weights = pos_proposals.new_zeros(num_samples, + 4 * self.side_num) + bucket_offset_targets = pos_proposals.new_zeros( + num_samples, 4 * self.side_num) + bucket_offset_weights = pos_proposals.new_zeros( + num_samples, 4 * self.side_num) + if num_pos > 0: + labels[:num_pos] = pos_gt_labels + label_weights[:num_pos] = 1.0 + (pos_bucket_offset_targets, pos_bucket_offset_weights, + pos_bucket_cls_targets, + pos_bucket_cls_weights) = self.bbox_coder.encode( + pos_proposals, pos_gt_bboxes) + bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets + bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights + bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets + bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights + if num_neg > 0: + label_weights[-num_neg:] = 1.0 + return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, + bucket_offset_targets, bucket_offset_weights) + + def loss(self, + cls_score, + bbox_pred, + rois, + labels, + label_weights, + bbox_targets, + bbox_weights, + reduction_override=None): + losses = dict() + if cls_score is not None: + avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) + losses['loss_cls'] = self.loss_cls( + cls_score, + labels, + label_weights, + avg_factor=avg_factor, + reduction_override=reduction_override) + losses['acc'] = accuracy(cls_score, labels) + + if bbox_pred is not None: + bucket_cls_preds, bucket_offset_preds = bbox_pred + bucket_cls_targets, bucket_offset_targets = bbox_targets + bucket_cls_weights, bucket_offset_weights = bbox_weights + # edge cls + bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num) + bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num) + bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num) + losses['loss_bbox_cls'] = self.loss_bbox_cls( + bucket_cls_preds, + bucket_cls_targets, + bucket_cls_weights, + avg_factor=bucket_cls_targets.size(0), + reduction_override=reduction_override) + + losses['loss_bbox_reg'] = self.loss_bbox_reg( + bucket_offset_preds, + bucket_offset_targets, + bucket_offset_weights, + avg_factor=bucket_offset_targets.size(0), + reduction_override=reduction_override) + + return losses + + @force_fp32(apply_to=('cls_score', 'bbox_pred')) + def get_bboxes(self, + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=False, + cfg=None): + if isinstance(cls_score, list): + cls_score = sum(cls_score) / float(len(cls_score)) + scores = F.softmax(cls_score, dim=1) if cls_score is not None else None + + if bbox_pred is not None: + bboxes, confidences = self.bbox_coder.decode( + rois[:, 1:], bbox_pred, img_shape) + else: + bboxes = rois[:, 1:].clone() + confidences = None + if img_shape is not None: + bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1) + bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1) + + if rescale and bboxes.size(0) > 0: + if isinstance(scale_factor, float): + bboxes /= scale_factor + else: + bboxes /= torch.from_numpy(scale_factor).to(bboxes.device) + + if cfg is None: + return bboxes, scores + else: + det_bboxes, det_labels = multiclass_nms( + bboxes, + scores, + cfg.score_thr, + cfg.nms, + cfg.max_per_img, + score_factors=confidences) + + return det_bboxes, det_labels + + @force_fp32(apply_to=('bbox_preds', )) + def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): + """Refine bboxes during training. + + Args: + rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, + and bs is the sampled RoIs per image. + labels (Tensor): Shape (n*bs, ). + bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \ + (n*bs, num_buckets*2)]. + pos_is_gts (list[Tensor]): Flags indicating if each positive bbox + is a gt bbox. + img_metas (list[dict]): Meta info of each image. + + Returns: + list[Tensor]: Refined bboxes of each image in a mini-batch. + """ + img_ids = rois[:, 0].long().unique(sorted=True) + assert img_ids.numel() == len(img_metas) + + bboxes_list = [] + for i in range(len(img_metas)): + inds = torch.nonzero( + rois[:, 0] == i, as_tuple=False).squeeze(dim=1) + num_rois = inds.numel() + + bboxes_ = rois[inds, 1:] + label_ = labels[inds] + edge_cls_preds, edge_offset_preds = bbox_preds + edge_cls_preds_ = edge_cls_preds[inds] + edge_offset_preds_ = edge_offset_preds[inds] + bbox_pred_ = [edge_cls_preds_, edge_offset_preds_] + img_meta_ = img_metas[i] + pos_is_gts_ = pos_is_gts[i] + + bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, + img_meta_) + # filter gt bboxes + pos_keep = 1 - pos_is_gts_ + keep_inds = pos_is_gts_.new_ones(num_rois) + keep_inds[:len(pos_is_gts_)] = pos_keep + + bboxes_list.append(bboxes[keep_inds.type(torch.bool)]) + + return bboxes_list + + @force_fp32(apply_to=('bbox_pred', )) + def regress_by_class(self, rois, label, bbox_pred, img_meta): + """Regress the bbox for the predicted class. Used in Cascade R-CNN. + + Args: + rois (Tensor): shape (n, 4) or (n, 5) + label (Tensor): shape (n, ) + bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \ + (n, num_buckets *2)] + img_meta (dict): Image meta info. + + Returns: + Tensor: Regressed bboxes, the same shape as input rois. + """ + assert rois.size(1) == 4 or rois.size(1) == 5 + + if rois.size(1) == 4: + new_rois, _ = self.bbox_coder.decode(rois, bbox_pred, + img_meta['img_shape']) + else: + bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred, + img_meta['img_shape']) + new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) + + return new_rois diff --git a/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py new file mode 100644 index 0000000..cf39ebe --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models.builder import HEADS +from .convfc_bbox_head import ConvFCBBoxHead + + +@HEADS.register_module() +class SCNetBBoxHead(ConvFCBBoxHead): + """BBox head for `SCNet `_. + + This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us + to get intermediate shared feature. + """ + + def _forward_shared(self, x): + """Forward function for shared part.""" + if self.num_shared_convs > 0: + for conv in self.shared_convs: + x = conv(x) + + if self.num_shared_fcs > 0: + if self.with_avg_pool: + x = self.avg_pool(x) + + x = x.flatten(1) + + for fc in self.shared_fcs: + x = self.relu(fc(x)) + + return x + + def _forward_cls_reg(self, x): + """Forward function for classification and regression parts.""" + x_cls = x + x_reg = x + + for conv in self.cls_convs: + x_cls = conv(x_cls) + if x_cls.dim() > 2: + if self.with_avg_pool: + x_cls = self.avg_pool(x_cls) + x_cls = x_cls.flatten(1) + for fc in self.cls_fcs: + x_cls = self.relu(fc(x_cls)) + + for conv in self.reg_convs: + x_reg = conv(x_reg) + if x_reg.dim() > 2: + if self.with_avg_pool: + x_reg = self.avg_pool(x_reg) + x_reg = x_reg.flatten(1) + for fc in self.reg_fcs: + x_reg = self.relu(fc(x_reg)) + + cls_score = self.fc_cls(x_cls) if self.with_cls else None + bbox_pred = self.fc_reg(x_reg) if self.with_reg else None + + return cls_score, bbox_pred + + def forward(self, x, return_shared_feat=False): + """Forward function. + + Args: + x (Tensor): input features + return_shared_feat (bool): If True, return cls-reg-shared feature. + + Return: + out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``, + if ``return_shared_feat`` is True, append ``x_shared`` to the + returned tuple. + """ + x_shared = self._forward_shared(x) + out = self._forward_cls_reg(x_shared) + + if return_shared_feat: + out += (x_shared, ) + + return out diff --git a/downstream/mmdetection/mmdet/models/roi_heads/cascade_roi_head.py b/downstream/mmdetection/mmdet/models/roi_heads/cascade_roi_head.py new file mode 100644 index 0000000..e17313f --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/cascade_roi_head.py @@ -0,0 +1,631 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +import torch.nn as nn +from mmcv.runner import ModuleList + +from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner, + build_sampler, merge_aug_bboxes, merge_aug_masks, + multiclass_nms) +from ..builder import HEADS, build_head, build_roi_extractor +from .base_roi_head import BaseRoIHead +from .test_mixins import BBoxTestMixin, MaskTestMixin + + +@HEADS.register_module() +class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): + """Cascade roi head including one bbox head and one mask head. + + https://arxiv.org/abs/1712.00726 + """ + + def __init__(self, + num_stages, + stage_loss_weights, + bbox_roi_extractor=None, + bbox_head=None, + mask_roi_extractor=None, + mask_head=None, + shared_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + assert bbox_roi_extractor is not None + assert bbox_head is not None + assert shared_head is None, \ + 'Shared head is not supported in Cascade RCNN anymore' + + self.num_stages = num_stages + self.stage_loss_weights = stage_loss_weights + super(CascadeRoIHead, self).__init__( + bbox_roi_extractor=bbox_roi_extractor, + bbox_head=bbox_head, + mask_roi_extractor=mask_roi_extractor, + mask_head=mask_head, + shared_head=shared_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + init_cfg=init_cfg) + + def init_bbox_head(self, bbox_roi_extractor, bbox_head): + """Initialize box head and box roi extractor. + + Args: + bbox_roi_extractor (dict): Config of box roi extractor. + bbox_head (dict): Config of box in box head. + """ + self.bbox_roi_extractor = ModuleList() + self.bbox_head = ModuleList() + if not isinstance(bbox_roi_extractor, list): + bbox_roi_extractor = [ + bbox_roi_extractor for _ in range(self.num_stages) + ] + if not isinstance(bbox_head, list): + bbox_head = [bbox_head for _ in range(self.num_stages)] + assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages + for roi_extractor, head in zip(bbox_roi_extractor, bbox_head): + self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor)) + self.bbox_head.append(build_head(head)) + + def init_mask_head(self, mask_roi_extractor, mask_head): + """Initialize mask head and mask roi extractor. + + Args: + mask_roi_extractor (dict): Config of mask roi extractor. + mask_head (dict): Config of mask in mask head. + """ + self.mask_head = nn.ModuleList() + if not isinstance(mask_head, list): + mask_head = [mask_head for _ in range(self.num_stages)] + assert len(mask_head) == self.num_stages + for head in mask_head: + self.mask_head.append(build_head(head)) + if mask_roi_extractor is not None: + self.share_roi_extractor = False + self.mask_roi_extractor = ModuleList() + if not isinstance(mask_roi_extractor, list): + mask_roi_extractor = [ + mask_roi_extractor for _ in range(self.num_stages) + ] + assert len(mask_roi_extractor) == self.num_stages + for roi_extractor in mask_roi_extractor: + self.mask_roi_extractor.append( + build_roi_extractor(roi_extractor)) + else: + self.share_roi_extractor = True + self.mask_roi_extractor = self.bbox_roi_extractor + + def init_assigner_sampler(self): + """Initialize assigner and sampler for each stage.""" + self.bbox_assigner = [] + self.bbox_sampler = [] + if self.train_cfg is not None: + for idx, rcnn_train_cfg in enumerate(self.train_cfg): + self.bbox_assigner.append( + build_assigner(rcnn_train_cfg.assigner)) + self.current_stage = idx + self.bbox_sampler.append( + build_sampler(rcnn_train_cfg.sampler, context=self)) + + def forward_dummy(self, x, proposals): + """Dummy forward function.""" + # bbox head + outs = () + rois = bbox2roi([proposals]) + if self.with_bbox: + for i in range(self.num_stages): + bbox_results = self._bbox_forward(i, x, rois) + outs = outs + (bbox_results['cls_score'], + bbox_results['bbox_pred']) + # mask heads + if self.with_mask: + mask_rois = rois[:100] + for i in range(self.num_stages): + mask_results = self._mask_forward(i, x, mask_rois) + outs = outs + (mask_results['mask_pred'], ) + return outs + + def _bbox_forward(self, stage, x, rois): + """Box head forward function used in both training and testing.""" + bbox_roi_extractor = self.bbox_roi_extractor[stage] + bbox_head = self.bbox_head[stage] + bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], + rois) + # do not support caffe_c4 model anymore + cls_score, bbox_pred = bbox_head(bbox_feats) + + bbox_results = dict( + cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) + return bbox_results + + def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, + gt_labels, rcnn_train_cfg): + """Run forward function and calculate loss for box head in training.""" + rois = bbox2roi([res.bboxes for res in sampling_results]) + bbox_results = self._bbox_forward(stage, x, rois) + bbox_targets = self.bbox_head[stage].get_targets( + sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) + loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'], + bbox_results['bbox_pred'], rois, + *bbox_targets) + + bbox_results.update( + loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets) + return bbox_results + + def _mask_forward(self, stage, x, rois): + """Mask head forward function used in both training and testing.""" + mask_roi_extractor = self.mask_roi_extractor[stage] + mask_head = self.mask_head[stage] + mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], + rois) + # do not support caffe_c4 model anymore + mask_pred = mask_head(mask_feats) + + mask_results = dict(mask_pred=mask_pred) + return mask_results + + def _mask_forward_train(self, + stage, + x, + sampling_results, + gt_masks, + rcnn_train_cfg, + bbox_feats=None): + """Run forward function and calculate loss for mask head in + training.""" + pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) + mask_results = self._mask_forward(stage, x, pos_rois) + + mask_targets = self.mask_head[stage].get_targets( + sampling_results, gt_masks, rcnn_train_cfg) + pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) + loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'], + mask_targets, pos_labels) + + mask_results.update(loss_mask=loss_mask) + return mask_results + + def forward_train(self, + x, + img_metas, + proposal_list, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None): + """ + Args: + x (list[Tensor]): list of multi-level img features. + img_metas (list[dict]): list of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmdet/datasets/pipelines/formatting.py:Collect`. + proposals (list[Tensors]): list of region proposals. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + gt_masks (None | Tensor) : true segmentation masks for each box + used if the architecture supports a segmentation task. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + losses = dict() + for i in range(self.num_stages): + self.current_stage = i + rcnn_train_cfg = self.train_cfg[i] + lw = self.stage_loss_weights[i] + + # assign gts and sample proposals + sampling_results = [] + if self.with_bbox or self.with_mask: + bbox_assigner = self.bbox_assigner[i] + bbox_sampler = self.bbox_sampler[i] + num_imgs = len(img_metas) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + + for j in range(num_imgs): + assign_result = bbox_assigner.assign( + proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], + gt_labels[j]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[j], + gt_bboxes[j], + gt_labels[j], + feats=[lvl_feat[j][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + # bbox head forward and loss + bbox_results = self._bbox_forward_train(i, x, sampling_results, + gt_bboxes, gt_labels, + rcnn_train_cfg) + + for name, value in bbox_results['loss_bbox'].items(): + losses[f's{i}.{name}'] = ( + value * lw if 'loss' in name else value) + + # mask head forward and loss + if self.with_mask: + mask_results = self._mask_forward_train( + i, x, sampling_results, gt_masks, rcnn_train_cfg, + bbox_results['bbox_feats']) + for name, value in mask_results['loss_mask'].items(): + losses[f's{i}.{name}'] = ( + value * lw if 'loss' in name else value) + + # refine bboxes + if i < self.num_stages - 1: + pos_is_gts = [res.pos_is_gt for res in sampling_results] + # bbox_targets is a tuple + roi_labels = bbox_results['bbox_targets'][0] + with torch.no_grad(): + cls_score = bbox_results['cls_score'] + if self.bbox_head[i].custom_activation: + cls_score = self.bbox_head[i].loss_cls.get_activation( + cls_score) + + # Empty proposal. + if cls_score.numel() == 0: + break + + roi_labels = torch.where( + roi_labels == self.bbox_head[i].num_classes, + cls_score[:, :-1].argmax(1), roi_labels) + proposal_list = self.bbox_head[i].refine_bboxes( + bbox_results['rois'], roi_labels, + bbox_results['bbox_pred'], pos_is_gts, img_metas) + + return losses + + def simple_test(self, x, proposal_list, img_metas, rescale=False): + """Test without augmentation. + + Args: + x (tuple[Tensor]): Features from upstream network. Each + has shape (batch_size, c, h, w). + proposal_list (list(Tensor)): Proposals from rpn head. + Each has shape (num_proposals, 5), last dimension + 5 represent (x1, y1, x2, y2, score). + img_metas (list[dict]): Meta information of images. + rescale (bool): Whether to rescale the results to + the original image. Default: True. + + Returns: + list[list[np.ndarray]] or list[tuple]: When no mask branch, + it is bbox results of each image and classes with type + `list[list[np.ndarray]]`. The outer list + corresponds to each image. The inner list + corresponds to each class. When the model has mask branch, + it contains bbox results and mask results. + The outer list corresponds to each image, and first element + of tuple is bbox results, second element is mask results. + """ + assert self.with_bbox, 'Bbox head must be implemented.' + num_imgs = len(proposal_list) + img_shapes = tuple(meta['img_shape'] for meta in img_metas) + ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) + scale_factors = tuple(meta['scale_factor'] for meta in img_metas) + + # "ms" in variable names means multi-stage + ms_bbox_result = {} + ms_segm_result = {} + ms_scores = [] + rcnn_test_cfg = self.test_cfg + + rois = bbox2roi(proposal_list) + + if rois.shape[0] == 0: + # There is no proposal in the whole batch + bbox_results = [[ + np.zeros((0, 5), dtype=np.float32) + for _ in range(self.bbox_head[-1].num_classes) + ]] * num_imgs + + if self.with_mask: + mask_classes = self.mask_head[-1].num_classes + segm_results = [[[] for _ in range(mask_classes)] + for _ in range(num_imgs)] + results = list(zip(bbox_results, segm_results)) + else: + results = bbox_results + + return results + + for i in range(self.num_stages): + bbox_results = self._bbox_forward(i, x, rois) + + # split batch bbox prediction back to each image + cls_score = bbox_results['cls_score'] + bbox_pred = bbox_results['bbox_pred'] + num_proposals_per_img = tuple( + len(proposals) for proposals in proposal_list) + rois = rois.split(num_proposals_per_img, 0) + cls_score = cls_score.split(num_proposals_per_img, 0) + if isinstance(bbox_pred, torch.Tensor): + bbox_pred = bbox_pred.split(num_proposals_per_img, 0) + else: + bbox_pred = self.bbox_head[i].bbox_pred_split( + bbox_pred, num_proposals_per_img) + ms_scores.append(cls_score) + + if i < self.num_stages - 1: + if self.bbox_head[i].custom_activation: + cls_score = [ + self.bbox_head[i].loss_cls.get_activation(s) + for s in cls_score + ] + refine_rois_list = [] + for j in range(num_imgs): + if rois[j].shape[0] > 0: + bbox_label = cls_score[j][:, :-1].argmax(dim=1) + refined_rois = self.bbox_head[i].regress_by_class( + rois[j], bbox_label, bbox_pred[j], img_metas[j]) + refine_rois_list.append(refined_rois) + rois = torch.cat(refine_rois_list) + + # average scores of each image by stages + cls_score = [ + sum([score[i] for score in ms_scores]) / float(len(ms_scores)) + for i in range(num_imgs) + ] + + # apply bbox post-processing to each image individually + det_bboxes = [] + det_labels = [] + for i in range(num_imgs): + det_bbox, det_label = self.bbox_head[-1].get_bboxes( + rois[i], + cls_score[i], + bbox_pred[i], + img_shapes[i], + scale_factors[i], + rescale=rescale, + cfg=rcnn_test_cfg) + det_bboxes.append(det_bbox) + det_labels.append(det_label) + + bbox_results = [ + bbox2result(det_bboxes[i], det_labels[i], + self.bbox_head[-1].num_classes) + for i in range(num_imgs) + ] + ms_bbox_result['ensemble'] = bbox_results + + if self.with_mask: + if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): + mask_classes = self.mask_head[-1].num_classes + segm_results = [[[] for _ in range(mask_classes)] + for _ in range(num_imgs)] + else: + if rescale and not isinstance(scale_factors[0], float): + scale_factors = [ + torch.from_numpy(scale_factor).to(det_bboxes[0].device) + for scale_factor in scale_factors + ] + _bboxes = [ + det_bboxes[i][:, :4] * + scale_factors[i] if rescale else det_bboxes[i][:, :4] + for i in range(len(det_bboxes)) + ] + mask_rois = bbox2roi(_bboxes) + num_mask_rois_per_img = tuple( + _bbox.size(0) for _bbox in _bboxes) + aug_masks = [] + for i in range(self.num_stages): + mask_results = self._mask_forward(i, x, mask_rois) + mask_pred = mask_results['mask_pred'] + # split batch mask prediction back to each image + mask_pred = mask_pred.split(num_mask_rois_per_img, 0) + aug_masks.append([ + m.sigmoid().cpu().detach().numpy() for m in mask_pred + ]) + + # apply mask post-processing to each image individually + segm_results = [] + for i in range(num_imgs): + if det_bboxes[i].shape[0] == 0: + segm_results.append( + [[] + for _ in range(self.mask_head[-1].num_classes)]) + else: + aug_mask = [mask[i] for mask in aug_masks] + merged_masks = merge_aug_masks( + aug_mask, [[img_metas[i]]] * self.num_stages, + rcnn_test_cfg) + segm_result = self.mask_head[-1].get_seg_masks( + merged_masks, _bboxes[i], det_labels[i], + rcnn_test_cfg, ori_shapes[i], scale_factors[i], + rescale) + segm_results.append(segm_result) + ms_segm_result['ensemble'] = segm_results + + if self.with_mask: + results = list( + zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble'])) + else: + results = ms_bbox_result['ensemble'] + + return results + + def aug_test(self, features, proposal_list, img_metas, rescale=False): + """Test with augmentations. + + If rescale is False, then returned bboxes and masks will fit the scale + of imgs[0]. + """ + rcnn_test_cfg = self.test_cfg + aug_bboxes = [] + aug_scores = [] + for x, img_meta in zip(features, img_metas): + # only one image in the batch + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + flip_direction = img_meta[0]['flip_direction'] + + proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, + scale_factor, flip, flip_direction) + # "ms" in variable names means multi-stage + ms_scores = [] + + rois = bbox2roi([proposals]) + + if rois.shape[0] == 0: + # There is no proposal in the single image + aug_bboxes.append(rois.new_zeros(0, 4)) + aug_scores.append(rois.new_zeros(0, 1)) + continue + + for i in range(self.num_stages): + bbox_results = self._bbox_forward(i, x, rois) + ms_scores.append(bbox_results['cls_score']) + + if i < self.num_stages - 1: + cls_score = bbox_results['cls_score'] + if self.bbox_head[i].custom_activation: + cls_score = self.bbox_head[i].loss_cls.get_activation( + cls_score) + bbox_label = cls_score[:, :-1].argmax(dim=1) + rois = self.bbox_head[i].regress_by_class( + rois, bbox_label, bbox_results['bbox_pred'], + img_meta[0]) + + cls_score = sum(ms_scores) / float(len(ms_scores)) + bboxes, scores = self.bbox_head[-1].get_bboxes( + rois, + cls_score, + bbox_results['bbox_pred'], + img_shape, + scale_factor, + rescale=False, + cfg=None) + aug_bboxes.append(bboxes) + aug_scores.append(scores) + + # after merging, bboxes will be rescaled to the original image size + merged_bboxes, merged_scores = merge_aug_bboxes( + aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) + det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, + rcnn_test_cfg.score_thr, + rcnn_test_cfg.nms, + rcnn_test_cfg.max_per_img) + + bbox_result = bbox2result(det_bboxes, det_labels, + self.bbox_head[-1].num_classes) + + if self.with_mask: + if det_bboxes.shape[0] == 0: + segm_result = [[] + for _ in range(self.mask_head[-1].num_classes)] + else: + aug_masks = [] + aug_img_metas = [] + for x, img_meta in zip(features, img_metas): + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + flip_direction = img_meta[0]['flip_direction'] + _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, + scale_factor, flip, flip_direction) + mask_rois = bbox2roi([_bboxes]) + for i in range(self.num_stages): + mask_results = self._mask_forward(i, x, mask_rois) + aug_masks.append( + mask_results['mask_pred'].sigmoid().cpu().numpy()) + aug_img_metas.append(img_meta) + merged_masks = merge_aug_masks(aug_masks, aug_img_metas, + self.test_cfg) + + ori_shape = img_metas[0][0]['ori_shape'] + dummy_scale_factor = np.ones(4) + segm_result = self.mask_head[-1].get_seg_masks( + merged_masks, + det_bboxes, + det_labels, + rcnn_test_cfg, + ori_shape, + scale_factor=dummy_scale_factor, + rescale=False) + return [(bbox_result, segm_result)] + else: + return [bbox_result] + + def onnx_export(self, x, proposals, img_metas): + + assert self.with_bbox, 'Bbox head must be implemented.' + assert proposals.shape[0] == 1, 'Only support one input image ' \ + 'while in exporting to ONNX' + # remove the scores + rois = proposals[..., :-1] + batch_size = rois.shape[0] + num_proposals_per_img = rois.shape[1] + # Eliminate the batch dimension + rois = rois.view(-1, 4) + + # add dummy batch index + rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], dim=-1) + + max_shape = img_metas[0]['img_shape_for_onnx'] + ms_scores = [] + rcnn_test_cfg = self.test_cfg + + for i in range(self.num_stages): + bbox_results = self._bbox_forward(i, x, rois) + + cls_score = bbox_results['cls_score'] + bbox_pred = bbox_results['bbox_pred'] + # Recover the batch dimension + rois = rois.reshape(batch_size, num_proposals_per_img, + rois.size(-1)) + cls_score = cls_score.reshape(batch_size, num_proposals_per_img, + cls_score.size(-1)) + bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4) + ms_scores.append(cls_score) + if i < self.num_stages - 1: + assert self.bbox_head[i].reg_class_agnostic + new_rois = self.bbox_head[i].bbox_coder.decode( + rois[..., 1:], bbox_pred, max_shape=max_shape) + rois = new_rois.reshape(-1, new_rois.shape[-1]) + # add dummy batch index + rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], + dim=-1) + + cls_score = sum(ms_scores) / float(len(ms_scores)) + bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4) + rois = rois.reshape(batch_size, num_proposals_per_img, -1) + det_bboxes, det_labels = self.bbox_head[-1].onnx_export( + rois, cls_score, bbox_pred, max_shape, cfg=rcnn_test_cfg) + + if not self.with_mask: + return det_bboxes, det_labels + else: + batch_index = torch.arange( + det_bboxes.size(0), + device=det_bboxes.device).float().view(-1, 1, 1).expand( + det_bboxes.size(0), det_bboxes.size(1), 1) + rois = det_bboxes[..., :4] + mask_rois = torch.cat([batch_index, rois], dim=-1) + mask_rois = mask_rois.view(-1, 5) + aug_masks = [] + for i in range(self.num_stages): + mask_results = self._mask_forward(i, x, mask_rois) + mask_pred = mask_results['mask_pred'] + aug_masks.append(mask_pred) + max_shape = img_metas[0]['img_shape_for_onnx'] + # calculate the mean of masks from several stage + mask_pred = sum(aug_masks) / len(aug_masks) + segm_results = self.mask_head[-1].onnx_export( + mask_pred, rois.reshape(-1, 4), det_labels.reshape(-1), + self.test_cfg, max_shape) + segm_results = segm_results.reshape(batch_size, + det_bboxes.shape[1], + max_shape[0], max_shape[1]) + return det_bboxes, det_labels, segm_results diff --git a/downstream/mmdetection/mmdet/models/roi_heads/double_roi_head.py b/downstream/mmdetection/mmdet/models/roi_heads/double_roi_head.py new file mode 100644 index 0000000..895b5d3 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/double_roi_head.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import HEADS +from .standard_roi_head import StandardRoIHead + + +@HEADS.register_module() +class DoubleHeadRoIHead(StandardRoIHead): + """RoI head for Double Head RCNN. + + https://arxiv.org/abs/1904.06493 + """ + + def __init__(self, reg_roi_scale_factor, **kwargs): + super(DoubleHeadRoIHead, self).__init__(**kwargs) + self.reg_roi_scale_factor = reg_roi_scale_factor + + def _bbox_forward(self, x, rois): + """Box head forward function used in both training and testing time.""" + bbox_cls_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], rois) + bbox_reg_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], + rois, + roi_scale_factor=self.reg_roi_scale_factor) + if self.with_shared_head: + bbox_cls_feats = self.shared_head(bbox_cls_feats) + bbox_reg_feats = self.shared_head(bbox_reg_feats) + cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) + + bbox_results = dict( + cls_score=cls_score, + bbox_pred=bbox_pred, + bbox_feats=bbox_cls_feats) + return bbox_results diff --git a/downstream/mmdetection/mmdet/models/roi_heads/dynamic_roi_head.py b/downstream/mmdetection/mmdet/models/roi_heads/dynamic_roi_head.py new file mode 100644 index 0000000..4c2b6cd --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/dynamic_roi_head.py @@ -0,0 +1,155 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet.core import bbox2roi +from mmdet.models.losses import SmoothL1Loss +from ..builder import HEADS +from .standard_roi_head import StandardRoIHead + +EPS = 1e-15 + + +@HEADS.register_module() +class DynamicRoIHead(StandardRoIHead): + """RoI head for `Dynamic R-CNN `_.""" + + def __init__(self, **kwargs): + super(DynamicRoIHead, self).__init__(**kwargs) + assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss) + # the IoU history of the past `update_iter_interval` iterations + self.iou_history = [] + # the beta history of the past `update_iter_interval` iterations + self.beta_history = [] + + def forward_train(self, + x, + img_metas, + proposal_list, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None): + """Forward function for training. + + Args: + x (list[Tensor]): list of multi-level img features. + + img_metas (list[dict]): list of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmdet/datasets/pipelines/formatting.py:Collect`. + + proposals (list[Tensors]): list of region proposals. + + gt_bboxes (list[Tensor]): each item are the truth boxes for each + image in [tl_x, tl_y, br_x, br_y] format. + + gt_labels (list[Tensor]): class indices corresponding to each box + + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + + gt_masks (None | Tensor) : true segmentation masks for each box + used if the architecture supports a segmentation task. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + # assign gts and sample proposals + if self.with_bbox or self.with_mask: + num_imgs = len(img_metas) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + sampling_results = [] + cur_iou = [] + for i in range(num_imgs): + assign_result = self.bbox_assigner.assign( + proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], + gt_labels[i]) + sampling_result = self.bbox_sampler.sample( + assign_result, + proposal_list[i], + gt_bboxes[i], + gt_labels[i], + feats=[lvl_feat[i][None] for lvl_feat in x]) + # record the `iou_topk`-th largest IoU in an image + iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk, + len(assign_result.max_overlaps)) + ious, _ = torch.topk(assign_result.max_overlaps, iou_topk) + cur_iou.append(ious[-1].item()) + sampling_results.append(sampling_result) + # average the current IoUs over images + cur_iou = np.mean(cur_iou) + self.iou_history.append(cur_iou) + + losses = dict() + # bbox head forward and loss + if self.with_bbox: + bbox_results = self._bbox_forward_train(x, sampling_results, + gt_bboxes, gt_labels, + img_metas) + losses.update(bbox_results['loss_bbox']) + + # mask head forward and loss + if self.with_mask: + mask_results = self._mask_forward_train(x, sampling_results, + bbox_results['bbox_feats'], + gt_masks, img_metas) + losses.update(mask_results['loss_mask']) + + # update IoU threshold and SmoothL1 beta + update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval + if len(self.iou_history) % update_iter_interval == 0: + new_iou_thr, new_beta = self.update_hyperparameters() + + return losses + + def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, + img_metas): + num_imgs = len(img_metas) + rois = bbox2roi([res.bboxes for res in sampling_results]) + bbox_results = self._bbox_forward(x, rois) + + bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, + gt_labels, self.train_cfg) + # record the `beta_topk`-th smallest target + # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets + # and bbox_weights, respectively + pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1) + num_pos = len(pos_inds) + cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1) + beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs, + num_pos) + cur_target = torch.kthvalue(cur_target, beta_topk)[0].item() + self.beta_history.append(cur_target) + loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], + bbox_results['bbox_pred'], rois, + *bbox_targets) + + bbox_results.update(loss_bbox=loss_bbox) + return bbox_results + + def update_hyperparameters(self): + """Update hyperparameters like IoU thresholds for assigner and beta for + SmoothL1 loss based on the training statistics. + + Returns: + tuple[float]: the updated ``iou_thr`` and ``beta``. + """ + new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou, + np.mean(self.iou_history)) + self.iou_history = [] + self.bbox_assigner.pos_iou_thr = new_iou_thr + self.bbox_assigner.neg_iou_thr = new_iou_thr + self.bbox_assigner.min_pos_iou = new_iou_thr + if (np.median(self.beta_history) < EPS): + # avoid 0 or too small value for new_beta + new_beta = self.bbox_head.loss_bbox.beta + else: + new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta, + np.median(self.beta_history)) + self.beta_history = [] + self.bbox_head.loss_bbox.beta = new_beta + return new_iou_thr, new_beta diff --git a/downstream/mmdetection/mmdet/models/roi_heads/grid_roi_head.py b/downstream/mmdetection/mmdet/models/roi_heads/grid_roi_head.py new file mode 100644 index 0000000..333f629 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/grid_roi_head.py @@ -0,0 +1,170 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet.core import bbox2result, bbox2roi +from ..builder import HEADS, build_head, build_roi_extractor +from .standard_roi_head import StandardRoIHead + + +@HEADS.register_module() +class GridRoIHead(StandardRoIHead): + """Grid roi head for Grid R-CNN. + + https://arxiv.org/abs/1811.12030 + """ + + def __init__(self, grid_roi_extractor, grid_head, **kwargs): + assert grid_head is not None + super(GridRoIHead, self).__init__(**kwargs) + if grid_roi_extractor is not None: + self.grid_roi_extractor = build_roi_extractor(grid_roi_extractor) + self.share_roi_extractor = False + else: + self.share_roi_extractor = True + self.grid_roi_extractor = self.bbox_roi_extractor + self.grid_head = build_head(grid_head) + + def _random_jitter(self, sampling_results, img_metas, amplitude=0.15): + """Ramdom jitter positive proposals for training.""" + for sampling_result, img_meta in zip(sampling_results, img_metas): + bboxes = sampling_result.pos_bboxes + random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_( + -amplitude, amplitude) + # before jittering + cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2 + wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs() + # after jittering + new_cxcy = cxcy + wh * random_offsets[:, :2] + new_wh = wh * (1 + random_offsets[:, 2:]) + # xywh to xyxy + new_x1y1 = (new_cxcy - new_wh / 2) + new_x2y2 = (new_cxcy + new_wh / 2) + new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1) + # clip bboxes + max_shape = img_meta['img_shape'] + if max_shape is not None: + new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1) + new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1) + + sampling_result.pos_bboxes = new_bboxes + return sampling_results + + def forward_dummy(self, x, proposals): + """Dummy forward function.""" + # bbox head + outs = () + rois = bbox2roi([proposals]) + if self.with_bbox: + bbox_results = self._bbox_forward(x, rois) + outs = outs + (bbox_results['cls_score'], + bbox_results['bbox_pred']) + + # grid head + grid_rois = rois[:100] + grid_feats = self.grid_roi_extractor( + x[:self.grid_roi_extractor.num_inputs], grid_rois) + if self.with_shared_head: + grid_feats = self.shared_head(grid_feats) + grid_pred = self.grid_head(grid_feats) + outs = outs + (grid_pred, ) + + # mask head + if self.with_mask: + mask_rois = rois[:100] + mask_results = self._mask_forward(x, mask_rois) + outs = outs + (mask_results['mask_pred'], ) + return outs + + def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, + img_metas): + """Run forward function and calculate loss for box head in training.""" + bbox_results = super(GridRoIHead, + self)._bbox_forward_train(x, sampling_results, + gt_bboxes, gt_labels, + img_metas) + + # Grid head forward and loss + sampling_results = self._random_jitter(sampling_results, img_metas) + pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) + + # GN in head does not support zero shape input + if pos_rois.shape[0] == 0: + return bbox_results + + grid_feats = self.grid_roi_extractor( + x[:self.grid_roi_extractor.num_inputs], pos_rois) + if self.with_shared_head: + grid_feats = self.shared_head(grid_feats) + # Accelerate training + max_sample_num_grid = self.train_cfg.get('max_num_grid', 192) + sample_idx = torch.randperm( + grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid + )] + grid_feats = grid_feats[sample_idx] + + grid_pred = self.grid_head(grid_feats) + + grid_targets = self.grid_head.get_targets(sampling_results, + self.train_cfg) + grid_targets = grid_targets[sample_idx] + + loss_grid = self.grid_head.loss(grid_pred, grid_targets) + + bbox_results['loss_bbox'].update(loss_grid) + return bbox_results + + def simple_test(self, + x, + proposal_list, + img_metas, + proposals=None, + rescale=False): + """Test without augmentation.""" + assert self.with_bbox, 'Bbox head must be implemented.' + + det_bboxes, det_labels = self.simple_test_bboxes( + x, img_metas, proposal_list, self.test_cfg, rescale=False) + # pack rois into bboxes + grid_rois = bbox2roi([det_bbox[:, :4] for det_bbox in det_bboxes]) + if grid_rois.shape[0] != 0: + grid_feats = self.grid_roi_extractor( + x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois) + self.grid_head.test_mode = True + grid_pred = self.grid_head(grid_feats) + # split batch grid head prediction back to each image + num_roi_per_img = tuple(len(det_bbox) for det_bbox in det_bboxes) + grid_pred = { + k: v.split(num_roi_per_img, 0) + for k, v in grid_pred.items() + } + + # apply bbox post-processing to each image individually + bbox_results = [] + num_imgs = len(det_bboxes) + for i in range(num_imgs): + if det_bboxes[i].shape[0] == 0: + bbox_results.append([ + np.zeros((0, 5), dtype=np.float32) + for _ in range(self.bbox_head.num_classes) + ]) + else: + det_bbox = self.grid_head.get_bboxes( + det_bboxes[i], grid_pred['fused'][i], [img_metas[i]]) + if rescale: + det_bbox[:, :4] /= img_metas[i]['scale_factor'] + bbox_results.append( + bbox2result(det_bbox, det_labels[i], + self.bbox_head.num_classes)) + else: + bbox_results = [[ + np.zeros((0, 5), dtype=np.float32) + for _ in range(self.bbox_head.num_classes) + ] for _ in range(len(det_bboxes))] + + if not self.with_mask: + return bbox_results + else: + segm_results = self.simple_test_mask( + x, img_metas, det_bboxes, det_labels, rescale=rescale) + return list(zip(bbox_results, segm_results)) diff --git a/downstream/mmdetection/mmdet/models/roi_heads/htc_roi_head.py b/downstream/mmdetection/mmdet/models/roi_heads/htc_roi_head.py new file mode 100644 index 0000000..08bc1db --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/htc_roi_head.py @@ -0,0 +1,628 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +import torch.nn.functional as F + +from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, + merge_aug_masks, multiclass_nms) +from ..builder import HEADS, build_head, build_roi_extractor +from ..utils.brick_wrappers import adaptive_avg_pool2d +from .cascade_roi_head import CascadeRoIHead + + +@HEADS.register_module() +class HybridTaskCascadeRoIHead(CascadeRoIHead): + """Hybrid task cascade roi head including one bbox head and one mask head. + + https://arxiv.org/abs/1901.07518 + """ + + def __init__(self, + num_stages, + stage_loss_weights, + semantic_roi_extractor=None, + semantic_head=None, + semantic_fusion=('bbox', 'mask'), + interleaved=True, + mask_info_flow=True, + **kwargs): + super(HybridTaskCascadeRoIHead, + self).__init__(num_stages, stage_loss_weights, **kwargs) + assert self.with_bbox + assert not self.with_shared_head # shared head is not supported + + if semantic_head is not None: + self.semantic_roi_extractor = build_roi_extractor( + semantic_roi_extractor) + self.semantic_head = build_head(semantic_head) + + self.semantic_fusion = semantic_fusion + self.interleaved = interleaved + self.mask_info_flow = mask_info_flow + + @property + def with_semantic(self): + """bool: whether the head has semantic head""" + if hasattr(self, 'semantic_head') and self.semantic_head is not None: + return True + else: + return False + + def forward_dummy(self, x, proposals): + """Dummy forward function.""" + outs = () + # semantic head + if self.with_semantic: + _, semantic_feat = self.semantic_head(x) + else: + semantic_feat = None + # bbox heads + rois = bbox2roi([proposals]) + for i in range(self.num_stages): + bbox_results = self._bbox_forward( + i, x, rois, semantic_feat=semantic_feat) + outs = outs + (bbox_results['cls_score'], + bbox_results['bbox_pred']) + # mask heads + if self.with_mask: + mask_rois = rois[:100] + mask_roi_extractor = self.mask_roi_extractor[-1] + mask_feats = mask_roi_extractor( + x[:len(mask_roi_extractor.featmap_strides)], mask_rois) + if self.with_semantic and 'mask' in self.semantic_fusion: + mask_semantic_feat = self.semantic_roi_extractor( + [semantic_feat], mask_rois) + mask_feats += mask_semantic_feat + last_feat = None + for i in range(self.num_stages): + mask_head = self.mask_head[i] + if self.mask_info_flow: + mask_pred, last_feat = mask_head(mask_feats, last_feat) + else: + mask_pred = mask_head(mask_feats) + outs = outs + (mask_pred, ) + return outs + + def _bbox_forward_train(self, + stage, + x, + sampling_results, + gt_bboxes, + gt_labels, + rcnn_train_cfg, + semantic_feat=None): + """Run forward function and calculate loss for box head in training.""" + bbox_head = self.bbox_head[stage] + rois = bbox2roi([res.bboxes for res in sampling_results]) + bbox_results = self._bbox_forward( + stage, x, rois, semantic_feat=semantic_feat) + + bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes, + gt_labels, rcnn_train_cfg) + loss_bbox = bbox_head.loss(bbox_results['cls_score'], + bbox_results['bbox_pred'], rois, + *bbox_targets) + + bbox_results.update( + loss_bbox=loss_bbox, + rois=rois, + bbox_targets=bbox_targets, + ) + return bbox_results + + def _mask_forward_train(self, + stage, + x, + sampling_results, + gt_masks, + rcnn_train_cfg, + semantic_feat=None): + """Run forward function and calculate loss for mask head in + training.""" + mask_roi_extractor = self.mask_roi_extractor[stage] + mask_head = self.mask_head[stage] + pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) + mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], + pos_rois) + + # semantic feature fusion + # element-wise sum for original features and pooled semantic features + if self.with_semantic and 'mask' in self.semantic_fusion: + mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], + pos_rois) + if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: + mask_semantic_feat = F.adaptive_avg_pool2d( + mask_semantic_feat, mask_feats.shape[-2:]) + mask_feats += mask_semantic_feat + + # mask information flow + # forward all previous mask heads to obtain last_feat, and fuse it + # with the normal mask feature + if self.mask_info_flow: + last_feat = None + for i in range(stage): + last_feat = self.mask_head[i]( + mask_feats, last_feat, return_logits=False) + mask_pred = mask_head(mask_feats, last_feat, return_feat=False) + else: + mask_pred = mask_head(mask_feats, return_feat=False) + + mask_targets = mask_head.get_targets(sampling_results, gt_masks, + rcnn_train_cfg) + pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) + loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels) + + mask_results = dict(loss_mask=loss_mask) + return mask_results + + def _bbox_forward(self, stage, x, rois, semantic_feat=None): + """Box head forward function used in both training and testing.""" + bbox_roi_extractor = self.bbox_roi_extractor[stage] + bbox_head = self.bbox_head[stage] + bbox_feats = bbox_roi_extractor( + x[:len(bbox_roi_extractor.featmap_strides)], rois) + if self.with_semantic and 'bbox' in self.semantic_fusion: + bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], + rois) + if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: + bbox_semantic_feat = adaptive_avg_pool2d( + bbox_semantic_feat, bbox_feats.shape[-2:]) + bbox_feats += bbox_semantic_feat + cls_score, bbox_pred = bbox_head(bbox_feats) + + bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred) + return bbox_results + + def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None): + """Mask head forward function for testing.""" + mask_roi_extractor = self.mask_roi_extractor[stage] + mask_head = self.mask_head[stage] + mask_rois = bbox2roi([bboxes]) + mask_feats = mask_roi_extractor( + x[:len(mask_roi_extractor.featmap_strides)], mask_rois) + if self.with_semantic and 'mask' in self.semantic_fusion: + mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], + mask_rois) + if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: + mask_semantic_feat = F.adaptive_avg_pool2d( + mask_semantic_feat, mask_feats.shape[-2:]) + mask_feats += mask_semantic_feat + if self.mask_info_flow: + last_feat = None + last_pred = None + for i in range(stage): + mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat) + if last_pred is not None: + mask_pred = mask_pred + last_pred + last_pred = mask_pred + mask_pred = mask_head(mask_feats, last_feat, return_feat=False) + if last_pred is not None: + mask_pred = mask_pred + last_pred + else: + mask_pred = mask_head(mask_feats) + return mask_pred + + def forward_train(self, + x, + img_metas, + proposal_list, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + gt_semantic_seg=None): + """ + Args: + x (list[Tensor]): list of multi-level img features. + + img_metas (list[dict]): list of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmdet/datasets/pipelines/formatting.py:Collect`. + + proposal_list (list[Tensors]): list of region proposals. + + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + + gt_labels (list[Tensor]): class indices corresponding to each box + + gt_bboxes_ignore (None, list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + + gt_masks (None, Tensor) : true segmentation masks for each box + used if the architecture supports a segmentation task. + + gt_semantic_seg (None, list[Tensor]): semantic segmentation masks + used if the architecture supports semantic segmentation task. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + # semantic segmentation part + # 2 outputs: segmentation prediction and embedded features + losses = dict() + if self.with_semantic: + semantic_pred, semantic_feat = self.semantic_head(x) + loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) + losses['loss_semantic_seg'] = loss_seg + else: + semantic_feat = None + + for i in range(self.num_stages): + self.current_stage = i + rcnn_train_cfg = self.train_cfg[i] + lw = self.stage_loss_weights[i] + + # assign gts and sample proposals + sampling_results = [] + bbox_assigner = self.bbox_assigner[i] + bbox_sampler = self.bbox_sampler[i] + num_imgs = len(img_metas) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + + for j in range(num_imgs): + assign_result = bbox_assigner.assign(proposal_list[j], + gt_bboxes[j], + gt_bboxes_ignore[j], + gt_labels[j]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[j], + gt_bboxes[j], + gt_labels[j], + feats=[lvl_feat[j][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + # bbox head forward and loss + bbox_results = \ + self._bbox_forward_train( + i, x, sampling_results, gt_bboxes, gt_labels, + rcnn_train_cfg, semantic_feat) + roi_labels = bbox_results['bbox_targets'][0] + + for name, value in bbox_results['loss_bbox'].items(): + losses[f's{i}.{name}'] = ( + value * lw if 'loss' in name else value) + + # mask head forward and loss + if self.with_mask: + # interleaved execution: use regressed bboxes by the box branch + # to train the mask branch + if self.interleaved: + pos_is_gts = [res.pos_is_gt for res in sampling_results] + with torch.no_grad(): + proposal_list = self.bbox_head[i].refine_bboxes( + bbox_results['rois'], roi_labels, + bbox_results['bbox_pred'], pos_is_gts, img_metas) + # re-assign and sample 512 RoIs from 512 RoIs + sampling_results = [] + for j in range(num_imgs): + assign_result = bbox_assigner.assign( + proposal_list[j], gt_bboxes[j], + gt_bboxes_ignore[j], gt_labels[j]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[j], + gt_bboxes[j], + gt_labels[j], + feats=[lvl_feat[j][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + mask_results = self._mask_forward_train( + i, x, sampling_results, gt_masks, rcnn_train_cfg, + semantic_feat) + for name, value in mask_results['loss_mask'].items(): + losses[f's{i}.{name}'] = ( + value * lw if 'loss' in name else value) + + # refine bboxes (same as Cascade R-CNN) + if i < self.num_stages - 1 and not self.interleaved: + pos_is_gts = [res.pos_is_gt for res in sampling_results] + with torch.no_grad(): + proposal_list = self.bbox_head[i].refine_bboxes( + bbox_results['rois'], roi_labels, + bbox_results['bbox_pred'], pos_is_gts, img_metas) + + return losses + + def simple_test(self, x, proposal_list, img_metas, rescale=False): + """Test without augmentation. + + Args: + x (tuple[Tensor]): Features from upstream network. Each + has shape (batch_size, c, h, w). + proposal_list (list(Tensor)): Proposals from rpn head. + Each has shape (num_proposals, 5), last dimension + 5 represent (x1, y1, x2, y2, score). + img_metas (list[dict]): Meta information of images. + rescale (bool): Whether to rescale the results to + the original image. Default: True. + + Returns: + list[list[np.ndarray]] or list[tuple]: When no mask branch, + it is bbox results of each image and classes with type + `list[list[np.ndarray]]`. The outer list + corresponds to each image. The inner list + corresponds to each class. When the model has mask branch, + it contains bbox results and mask results. + The outer list corresponds to each image, and first element + of tuple is bbox results, second element is mask results. + """ + if self.with_semantic: + _, semantic_feat = self.semantic_head(x) + else: + semantic_feat = None + + num_imgs = len(proposal_list) + img_shapes = tuple(meta['img_shape'] for meta in img_metas) + ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) + scale_factors = tuple(meta['scale_factor'] for meta in img_metas) + + # "ms" in variable names means multi-stage + ms_bbox_result = {} + ms_segm_result = {} + ms_scores = [] + rcnn_test_cfg = self.test_cfg + + rois = bbox2roi(proposal_list) + + if rois.shape[0] == 0: + # There is no proposal in the whole batch + bbox_results = [[ + np.zeros((0, 5), dtype=np.float32) + for _ in range(self.bbox_head[-1].num_classes) + ]] * num_imgs + + if self.with_mask: + mask_classes = self.mask_head[-1].num_classes + segm_results = [[[] for _ in range(mask_classes)] + for _ in range(num_imgs)] + results = list(zip(bbox_results, segm_results)) + else: + results = bbox_results + + return results + + for i in range(self.num_stages): + bbox_head = self.bbox_head[i] + bbox_results = self._bbox_forward( + i, x, rois, semantic_feat=semantic_feat) + # split batch bbox prediction back to each image + cls_score = bbox_results['cls_score'] + bbox_pred = bbox_results['bbox_pred'] + num_proposals_per_img = tuple(len(p) for p in proposal_list) + rois = rois.split(num_proposals_per_img, 0) + cls_score = cls_score.split(num_proposals_per_img, 0) + bbox_pred = bbox_pred.split(num_proposals_per_img, 0) + ms_scores.append(cls_score) + + if i < self.num_stages - 1: + refine_rois_list = [] + for j in range(num_imgs): + if rois[j].shape[0] > 0: + bbox_label = cls_score[j][:, :-1].argmax(dim=1) + refine_rois = bbox_head.regress_by_class( + rois[j], bbox_label, bbox_pred[j], img_metas[j]) + refine_rois_list.append(refine_rois) + rois = torch.cat(refine_rois_list) + + # average scores of each image by stages + cls_score = [ + sum([score[i] for score in ms_scores]) / float(len(ms_scores)) + for i in range(num_imgs) + ] + + # apply bbox post-processing to each image individually + det_bboxes = [] + det_labels = [] + for i in range(num_imgs): + det_bbox, det_label = self.bbox_head[-1].get_bboxes( + rois[i], + cls_score[i], + bbox_pred[i], + img_shapes[i], + scale_factors[i], + rescale=rescale, + cfg=rcnn_test_cfg) + det_bboxes.append(det_bbox) + det_labels.append(det_label) + bbox_result = [ + bbox2result(det_bboxes[i], det_labels[i], + self.bbox_head[-1].num_classes) + for i in range(num_imgs) + ] + ms_bbox_result['ensemble'] = bbox_result + + if self.with_mask: + if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): + mask_classes = self.mask_head[-1].num_classes + segm_results = [[[] for _ in range(mask_classes)] + for _ in range(num_imgs)] + else: + if rescale and not isinstance(scale_factors[0], float): + scale_factors = [ + torch.from_numpy(scale_factor).to(det_bboxes[0].device) + for scale_factor in scale_factors + ] + _bboxes = [ + det_bboxes[i][:, :4] * + scale_factors[i] if rescale else det_bboxes[i] + for i in range(num_imgs) + ] + mask_rois = bbox2roi(_bboxes) + aug_masks = [] + mask_roi_extractor = self.mask_roi_extractor[-1] + mask_feats = mask_roi_extractor( + x[:len(mask_roi_extractor.featmap_strides)], mask_rois) + if self.with_semantic and 'mask' in self.semantic_fusion: + mask_semantic_feat = self.semantic_roi_extractor( + [semantic_feat], mask_rois) + mask_feats += mask_semantic_feat + last_feat = None + + num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes) + for i in range(self.num_stages): + mask_head = self.mask_head[i] + if self.mask_info_flow: + mask_pred, last_feat = mask_head(mask_feats, last_feat) + else: + mask_pred = mask_head(mask_feats) + + # split batch mask prediction back to each image + mask_pred = mask_pred.split(num_bbox_per_img, 0) + aug_masks.append( + [mask.sigmoid().cpu().numpy() for mask in mask_pred]) + + # apply mask post-processing to each image individually + segm_results = [] + for i in range(num_imgs): + if det_bboxes[i].shape[0] == 0: + segm_results.append( + [[] + for _ in range(self.mask_head[-1].num_classes)]) + else: + aug_mask = [mask[i] for mask in aug_masks] + merged_mask = merge_aug_masks( + aug_mask, [[img_metas[i]]] * self.num_stages, + rcnn_test_cfg) + segm_result = self.mask_head[-1].get_seg_masks( + merged_mask, _bboxes[i], det_labels[i], + rcnn_test_cfg, ori_shapes[i], scale_factors[i], + rescale) + segm_results.append(segm_result) + ms_segm_result['ensemble'] = segm_results + + if self.with_mask: + results = list( + zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble'])) + else: + results = ms_bbox_result['ensemble'] + + return results + + def aug_test(self, img_feats, proposal_list, img_metas, rescale=False): + """Test with augmentations. + + If rescale is False, then returned bboxes and masks will fit the scale + of imgs[0]. + """ + if self.with_semantic: + semantic_feats = [ + self.semantic_head(feat)[1] for feat in img_feats + ] + else: + semantic_feats = [None] * len(img_metas) + + rcnn_test_cfg = self.test_cfg + aug_bboxes = [] + aug_scores = [] + for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats): + # only one image in the batch + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + flip_direction = img_meta[0]['flip_direction'] + + proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, + scale_factor, flip, flip_direction) + # "ms" in variable names means multi-stage + ms_scores = [] + + rois = bbox2roi([proposals]) + + if rois.shape[0] == 0: + # There is no proposal in the single image + aug_bboxes.append(rois.new_zeros(0, 4)) + aug_scores.append(rois.new_zeros(0, 1)) + continue + + for i in range(self.num_stages): + bbox_head = self.bbox_head[i] + bbox_results = self._bbox_forward( + i, x, rois, semantic_feat=semantic) + ms_scores.append(bbox_results['cls_score']) + + if i < self.num_stages - 1: + bbox_label = bbox_results['cls_score'].argmax(dim=1) + rois = bbox_head.regress_by_class( + rois, bbox_label, bbox_results['bbox_pred'], + img_meta[0]) + + cls_score = sum(ms_scores) / float(len(ms_scores)) + bboxes, scores = self.bbox_head[-1].get_bboxes( + rois, + cls_score, + bbox_results['bbox_pred'], + img_shape, + scale_factor, + rescale=False, + cfg=None) + aug_bboxes.append(bboxes) + aug_scores.append(scores) + + # after merging, bboxes will be rescaled to the original image size + merged_bboxes, merged_scores = merge_aug_bboxes( + aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) + det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, + rcnn_test_cfg.score_thr, + rcnn_test_cfg.nms, + rcnn_test_cfg.max_per_img) + + bbox_result = bbox2result(det_bboxes, det_labels, + self.bbox_head[-1].num_classes) + + if self.with_mask: + if det_bboxes.shape[0] == 0: + segm_result = [[] + for _ in range(self.mask_head[-1].num_classes)] + else: + aug_masks = [] + aug_img_metas = [] + for x, img_meta, semantic in zip(img_feats, img_metas, + semantic_feats): + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + flip_direction = img_meta[0]['flip_direction'] + _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, + scale_factor, flip, flip_direction) + mask_rois = bbox2roi([_bboxes]) + mask_feats = self.mask_roi_extractor[-1]( + x[:len(self.mask_roi_extractor[-1].featmap_strides)], + mask_rois) + if self.with_semantic: + semantic_feat = semantic + mask_semantic_feat = self.semantic_roi_extractor( + [semantic_feat], mask_rois) + if mask_semantic_feat.shape[-2:] != mask_feats.shape[ + -2:]: + mask_semantic_feat = F.adaptive_avg_pool2d( + mask_semantic_feat, mask_feats.shape[-2:]) + mask_feats += mask_semantic_feat + last_feat = None + for i in range(self.num_stages): + mask_head = self.mask_head[i] + if self.mask_info_flow: + mask_pred, last_feat = mask_head( + mask_feats, last_feat) + else: + mask_pred = mask_head(mask_feats) + aug_masks.append(mask_pred.sigmoid().cpu().numpy()) + aug_img_metas.append(img_meta) + merged_masks = merge_aug_masks(aug_masks, aug_img_metas, + self.test_cfg) + + ori_shape = img_metas[0][0]['ori_shape'] + segm_result = self.mask_head[-1].get_seg_masks( + merged_masks, + det_bboxes, + det_labels, + rcnn_test_cfg, + ori_shape, + scale_factor=1.0, + rescale=False) + return [(bbox_result, segm_result)] + else: + return [bbox_result] diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/__init__.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/__init__.py new file mode 100644 index 0000000..48a5d42 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .coarse_mask_head import CoarseMaskHead +from .dynamic_mask_head import DynamicMaskHead +from .fcn_mask_head import FCNMaskHead +from .feature_relay_head import FeatureRelayHead +from .fused_semantic_head import FusedSemanticHead +from .global_context_head import GlobalContextHead +from .grid_head import GridHead +from .htc_mask_head import HTCMaskHead +from .mask_point_head import MaskPointHead +from .maskiou_head import MaskIoUHead +from .scnet_mask_head import SCNetMaskHead +from .scnet_semantic_head import SCNetSemanticHead + +__all__ = [ + 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', + 'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead', + 'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead', + 'DynamicMaskHead' +] diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py new file mode 100644 index 0000000..946254c --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py @@ -0,0 +1,100 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import ConvModule, Linear +from mmcv.runner import ModuleList, auto_fp16 + +from mmdet.models.builder import HEADS +from .fcn_mask_head import FCNMaskHead + + +@HEADS.register_module() +class CoarseMaskHead(FCNMaskHead): + """Coarse mask head used in PointRend. + + Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample + the input feature map instead of upsample it. + + Args: + num_convs (int): Number of conv layers in the head. Default: 0. + num_fcs (int): Number of fc layers in the head. Default: 2. + fc_out_channels (int): Number of output channels of fc layer. + Default: 1024. + downsample_factor (int): The factor that feature map is downsampled by. + Default: 2. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_convs=0, + num_fcs=2, + fc_out_channels=1024, + downsample_factor=2, + init_cfg=dict( + type='Xavier', + override=[ + dict(name='fcs'), + dict(type='Constant', val=0.001, name='fc_logits') + ]), + *arg, + **kwarg): + super(CoarseMaskHead, self).__init__( + *arg, + num_convs=num_convs, + upsample_cfg=dict(type=None), + init_cfg=None, + **kwarg) + self.init_cfg = init_cfg + self.num_fcs = num_fcs + assert self.num_fcs > 0 + self.fc_out_channels = fc_out_channels + self.downsample_factor = downsample_factor + assert self.downsample_factor >= 1 + # remove conv_logit + delattr(self, 'conv_logits') + + if downsample_factor > 1: + downsample_in_channels = ( + self.conv_out_channels + if self.num_convs > 0 else self.in_channels) + self.downsample_conv = ConvModule( + downsample_in_channels, + self.conv_out_channels, + kernel_size=downsample_factor, + stride=downsample_factor, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + else: + self.downsample_conv = None + + self.output_size = (self.roi_feat_size[0] // downsample_factor, + self.roi_feat_size[1] // downsample_factor) + self.output_area = self.output_size[0] * self.output_size[1] + + last_layer_dim = self.conv_out_channels * self.output_area + + self.fcs = ModuleList() + for i in range(num_fcs): + fc_in_channels = ( + last_layer_dim if i == 0 else self.fc_out_channels) + self.fcs.append(Linear(fc_in_channels, self.fc_out_channels)) + last_layer_dim = self.fc_out_channels + output_channels = self.num_classes * self.output_area + self.fc_logits = Linear(last_layer_dim, output_channels) + + def init_weights(self): + super(FCNMaskHead, self).init_weights() + + @auto_fp16() + def forward(self, x): + for conv in self.convs: + x = conv(x) + + if self.downsample_conv is not None: + x = self.downsample_conv(x) + + x = x.flatten(1) + for fc in self.fcs: + x = self.relu(fc(x)) + mask_pred = self.fc_logits(x).view( + x.size(0), self.num_classes, *self.output_size) + return mask_pred diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py new file mode 100644 index 0000000..5bbe7ee --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py @@ -0,0 +1,147 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.runner import auto_fp16, force_fp32 + +from mmdet.core import mask_target +from mmdet.models.builder import HEADS +from mmdet.models.dense_heads.atss_head import reduce_mean +from mmdet.models.utils import build_transformer +from .fcn_mask_head import FCNMaskHead + + +@HEADS.register_module() +class DynamicMaskHead(FCNMaskHead): + r"""Dynamic Mask Head for + `Instances as Queries `_ + + Args: + num_convs (int): Number of convolution layer. + Defaults to 4. + roi_feat_size (int): The output size of RoI extractor, + Defaults to 14. + in_channels (int): Input feature channels. + Defaults to 256. + conv_kernel_size (int): Kernel size of convolution layers. + Defaults to 3. + conv_out_channels (int): Output channels of convolution layers. + Defaults to 256. + num_classes (int): Number of classes. + Defaults to 80 + class_agnostic (int): Whether generate class agnostic prediction. + Defaults to False. + dropout (float): Probability of drop the channel. + Defaults to 0.0 + upsample_cfg (dict): The config for upsample layer. + conv_cfg (dict): The convolution layer config. + norm_cfg (dict): The norm layer config. + dynamic_conv_cfg (dict): The dynamic convolution layer config. + loss_mask (dict): The config for mask loss. + """ + + def __init__(self, + num_convs=4, + roi_feat_size=14, + in_channels=256, + conv_kernel_size=3, + conv_out_channels=256, + num_classes=80, + class_agnostic=False, + upsample_cfg=dict(type='deconv', scale_factor=2), + conv_cfg=None, + norm_cfg=None, + dynamic_conv_cfg=dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + input_feat_shape=14, + with_proj=False, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN')), + loss_mask=dict(type='DiceLoss', loss_weight=8.0), + **kwargs): + super(DynamicMaskHead, self).__init__( + num_convs=num_convs, + roi_feat_size=roi_feat_size, + in_channels=in_channels, + conv_kernel_size=conv_kernel_size, + conv_out_channels=conv_out_channels, + num_classes=num_classes, + class_agnostic=class_agnostic, + upsample_cfg=upsample_cfg, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + loss_mask=loss_mask, + **kwargs) + assert class_agnostic is False, \ + 'DynamicMaskHead only support class_agnostic=False' + self.fp16_enabled = False + + self.instance_interactive_conv = build_transformer(dynamic_conv_cfg) + + def init_weights(self): + """Use xavier initialization for all weight parameter and set + classification head bias as a specific value when use focal loss.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + nn.init.constant_(self.conv_logits.bias, 0.) + + @auto_fp16() + def forward(self, roi_feat, proposal_feat): + """Forward function of DynamicMaskHead. + + Args: + roi_feat (Tensor): Roi-pooling features with shape + (batch_size*num_proposals, feature_dimensions, + pooling_h , pooling_w). + proposal_feat (Tensor): Intermediate feature get from + diihead in last stage, has shape + (batch_size*num_proposals, feature_dimensions) + + Returns: + mask_pred (Tensor): Predicted foreground masks with shape + (batch_size*num_proposals, num_classes, + pooling_h*2, pooling_w*2). + """ + + proposal_feat = proposal_feat.reshape(-1, self.in_channels) + proposal_feat_iic = self.instance_interactive_conv( + proposal_feat, roi_feat) + + x = proposal_feat_iic.permute(0, 2, 1).reshape(roi_feat.size()) + + for conv in self.convs: + x = conv(x) + if self.upsample is not None: + x = self.upsample(x) + if self.upsample_method == 'deconv': + x = self.relu(x) + mask_pred = self.conv_logits(x) + return mask_pred + + @force_fp32(apply_to=('mask_pred', )) + def loss(self, mask_pred, mask_targets, labels): + num_pos = labels.new_ones(labels.size()).float().sum() + avg_factor = torch.clamp(reduce_mean(num_pos), min=1.).item() + loss = dict() + if mask_pred.size(0) == 0: + loss_mask = mask_pred.sum() + else: + loss_mask = self.loss_mask( + mask_pred[torch.arange(num_pos).long(), labels, ...].sigmoid(), + mask_targets, + avg_factor=avg_factor) + loss['loss_mask'] = loss_mask + return loss + + def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): + + pos_proposals = [res.pos_bboxes for res in sampling_results] + pos_assigned_gt_inds = [ + res.pos_assigned_gt_inds for res in sampling_results + ] + mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, + gt_masks, rcnn_train_cfg) + return mask_targets diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py new file mode 100644 index 0000000..c444641 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py @@ -0,0 +1,414 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from warnings import warn + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer +from mmcv.ops.carafe import CARAFEPack +from mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32 +from torch.nn.modules.utils import _pair + +from mmdet.core import mask_target +from mmdet.models.builder import HEADS, build_loss + +BYTES_PER_FLOAT = 4 +# TODO: This memory limit may be too much or too little. It would be better to +# determine it based on available resources. +GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit + + +@HEADS.register_module() +class FCNMaskHead(BaseModule): + + def __init__(self, + num_convs=4, + roi_feat_size=14, + in_channels=256, + conv_kernel_size=3, + conv_out_channels=256, + num_classes=80, + class_agnostic=False, + upsample_cfg=dict(type='deconv', scale_factor=2), + conv_cfg=None, + norm_cfg=None, + predictor_cfg=dict(type='Conv'), + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0), + init_cfg=None): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super(FCNMaskHead, self).__init__(init_cfg) + self.upsample_cfg = upsample_cfg.copy() + if self.upsample_cfg['type'] not in [ + None, 'deconv', 'nearest', 'bilinear', 'carafe' + ]: + raise ValueError( + f'Invalid upsample method {self.upsample_cfg["type"]}, ' + 'accepted methods are "deconv", "nearest", "bilinear", ' + '"carafe"') + self.num_convs = num_convs + # WARN: roi_feat_size is reserved and not used + self.roi_feat_size = _pair(roi_feat_size) + self.in_channels = in_channels + self.conv_kernel_size = conv_kernel_size + self.conv_out_channels = conv_out_channels + self.upsample_method = self.upsample_cfg.get('type') + self.scale_factor = self.upsample_cfg.pop('scale_factor', None) + self.num_classes = num_classes + self.class_agnostic = class_agnostic + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.predictor_cfg = predictor_cfg + self.fp16_enabled = False + self.loss_mask = build_loss(loss_mask) + + self.convs = ModuleList() + for i in range(self.num_convs): + in_channels = ( + self.in_channels if i == 0 else self.conv_out_channels) + padding = (self.conv_kernel_size - 1) // 2 + self.convs.append( + ConvModule( + in_channels, + self.conv_out_channels, + self.conv_kernel_size, + padding=padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + upsample_in_channels = ( + self.conv_out_channels if self.num_convs > 0 else in_channels) + upsample_cfg_ = self.upsample_cfg.copy() + if self.upsample_method is None: + self.upsample = None + elif self.upsample_method == 'deconv': + upsample_cfg_.update( + in_channels=upsample_in_channels, + out_channels=self.conv_out_channels, + kernel_size=self.scale_factor, + stride=self.scale_factor) + self.upsample = build_upsample_layer(upsample_cfg_) + elif self.upsample_method == 'carafe': + upsample_cfg_.update( + channels=upsample_in_channels, scale_factor=self.scale_factor) + self.upsample = build_upsample_layer(upsample_cfg_) + else: + # suppress warnings + align_corners = (None + if self.upsample_method == 'nearest' else False) + upsample_cfg_.update( + scale_factor=self.scale_factor, + mode=self.upsample_method, + align_corners=align_corners) + self.upsample = build_upsample_layer(upsample_cfg_) + + out_channels = 1 if self.class_agnostic else self.num_classes + logits_in_channel = ( + self.conv_out_channels + if self.upsample_method == 'deconv' else upsample_in_channels) + self.conv_logits = build_conv_layer(self.predictor_cfg, + logits_in_channel, out_channels, 1) + self.relu = nn.ReLU(inplace=True) + self.debug_imgs = None + + def init_weights(self): + super(FCNMaskHead, self).init_weights() + for m in [self.upsample, self.conv_logits]: + if m is None: + continue + elif isinstance(m, CARAFEPack): + m.init_weights() + elif hasattr(m, 'weight') and hasattr(m, 'bias'): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + nn.init.constant_(m.bias, 0) + + @auto_fp16() + def forward(self, x): + # if x.size(0) == 0: # dummy x + # return x + for conv in self.convs: + x = conv(x) + if self.upsample is not None: + x = self.upsample(x) + if self.upsample_method == 'deconv': + x = self.relu(x) + mask_pred = self.conv_logits(x) + return mask_pred + + def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): + pos_proposals = [res.pos_bboxes for res in sampling_results] + pos_assigned_gt_inds = [ + res.pos_assigned_gt_inds for res in sampling_results + ] + mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, + gt_masks, rcnn_train_cfg) + return mask_targets + + @force_fp32(apply_to=('mask_pred', )) + def loss(self, mask_pred, mask_targets, labels): + """ + Example: + >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA + >>> N = 7 # N = number of extracted ROIs + >>> C, H, W = 11, 32, 32 + >>> # Create example instance of FCN Mask Head. + >>> # There are lots of variations depending on the configuration + >>> self = FCNMaskHead(num_classes=C, num_convs=1) + >>> inputs = torch.rand(N, self.in_channels, H, W) + >>> mask_pred = self.forward(inputs) + >>> sf = self.scale_factor + >>> labels = torch.randint(0, C, size=(N,)) + >>> # With the default properties the mask targets should indicate + >>> # a (potentially soft) single-class label + >>> mask_targets = torch.rand(N, H * sf, W * sf) + >>> loss = self.loss(mask_pred, mask_targets, labels) + >>> print('loss = {!r}'.format(loss)) + """ + loss = dict() + if mask_pred.size(0) == 0: + loss_mask = mask_pred.sum() + else: + if self.class_agnostic: + loss_mask = self.loss_mask(mask_pred, mask_targets, + torch.zeros_like(labels)) + else: + loss_mask = self.loss_mask(mask_pred, mask_targets, labels) + loss['loss_mask'] = loss_mask + return loss + + def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, + ori_shape, scale_factor, rescale): + """Get segmentation masks from mask_pred and bboxes. + + Args: + mask_pred (Tensor or ndarray): shape (n, #class, h, w). + For single-scale testing, mask_pred is the direct output of + model, whose type is Tensor, while for multi-scale testing, + it will be converted to numpy array outside of this method. + det_bboxes (Tensor): shape (n, 4/5) + det_labels (Tensor): shape (n, ) + rcnn_test_cfg (dict): rcnn testing config + ori_shape (Tuple): original image height and width, shape (2,) + scale_factor(ndarray | Tensor): If ``rescale is True``, box + coordinates are divided by this scale factor to fit + ``ori_shape``. + rescale (bool): If True, the resulting masks will be rescaled to + ``ori_shape``. + + Returns: + list[list]: encoded masks. The c-th item in the outer list + corresponds to the c-th class. Given the c-th outer list, the + i-th item in that inner list is the mask for the i-th box with + class label c. + + Example: + >>> import mmcv + >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA + >>> N = 7 # N = number of extracted ROIs + >>> C, H, W = 11, 32, 32 + >>> # Create example instance of FCN Mask Head. + >>> self = FCNMaskHead(num_classes=C, num_convs=0) + >>> inputs = torch.rand(N, self.in_channels, H, W) + >>> mask_pred = self.forward(inputs) + >>> # Each input is associated with some bounding box + >>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N) + >>> det_labels = torch.randint(0, C, size=(N,)) + >>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, }) + >>> ori_shape = (H * 4, W * 4) + >>> scale_factor = torch.FloatTensor((1, 1)) + >>> rescale = False + >>> # Encoded masks are a list for each category. + >>> encoded_masks = self.get_seg_masks( + >>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, + >>> scale_factor, rescale + >>> ) + >>> assert len(encoded_masks) == C + >>> assert sum(list(map(len, encoded_masks))) == N + """ + if isinstance(mask_pred, torch.Tensor): + mask_pred = mask_pred.sigmoid() + else: + # In AugTest, has been activated before + mask_pred = det_bboxes.new_tensor(mask_pred) + + device = mask_pred.device + cls_segms = [[] for _ in range(self.num_classes) + ] # BG is not included in num_classes + bboxes = det_bboxes[:, :4] + labels = det_labels + + # In most cases, scale_factor should have been + # converted to Tensor when rescale the bbox + if not isinstance(scale_factor, torch.Tensor): + if isinstance(scale_factor, float): + scale_factor = np.array([scale_factor] * 4) + warn('Scale_factor should be a Tensor or ndarray ' + 'with shape (4,), float would be deprecated. ') + assert isinstance(scale_factor, np.ndarray) + scale_factor = torch.Tensor(scale_factor) + + if rescale: + img_h, img_w = ori_shape[:2] + bboxes = bboxes / scale_factor.to(bboxes) + else: + w_scale, h_scale = scale_factor[0], scale_factor[1] + img_h = np.round(ori_shape[0] * h_scale.item()).astype(np.int32) + img_w = np.round(ori_shape[1] * w_scale.item()).astype(np.int32) + + N = len(mask_pred) + # The actual implementation split the input into chunks, + # and paste them chunk by chunk. + if device.type == 'cpu': + # CPU is most efficient when they are pasted one by one with + # skip_empty=True, so that it performs minimal number of + # operations. + num_chunks = N + else: + # GPU benefits from parallelism for larger chunks, + # but may have memory issue + # the types of img_w and img_h are np.int32, + # when the image resolution is large, + # the calculation of num_chunks will overflow. + # so we need to change the types of img_w and img_h to int. + # See https://github.com/open-mmlab/mmdetection/pull/5191 + num_chunks = int( + np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / + GPU_MEM_LIMIT)) + assert (num_chunks <= + N), 'Default GPU_MEM_LIMIT is too small; try increasing it' + chunks = torch.chunk(torch.arange(N, device=device), num_chunks) + + threshold = rcnn_test_cfg.mask_thr_binary + im_mask = torch.zeros( + N, + img_h, + img_w, + device=device, + dtype=torch.bool if threshold >= 0 else torch.uint8) + + if not self.class_agnostic: + mask_pred = mask_pred[range(N), labels][:, None] + + for inds in chunks: + masks_chunk, spatial_inds = _do_paste_mask( + mask_pred[inds], + bboxes[inds], + img_h, + img_w, + skip_empty=device.type == 'cpu') + + if threshold >= 0: + masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) + else: + # for visualization and debugging + masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) + + im_mask[(inds, ) + spatial_inds] = masks_chunk + + for i in range(N): + cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy()) + return cls_segms + + def onnx_export(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, + ori_shape, **kwargs): + """Get segmentation masks from mask_pred and bboxes. + + Args: + mask_pred (Tensor): shape (n, #class, h, w). + det_bboxes (Tensor): shape (n, 4/5) + det_labels (Tensor): shape (n, ) + rcnn_test_cfg (dict): rcnn testing config + ori_shape (Tuple): original image height and width, shape (2,) + + Returns: + Tensor: a mask of shape (N, img_h, img_w). + """ + + mask_pred = mask_pred.sigmoid() + bboxes = det_bboxes[:, :4] + labels = det_labels + # No need to consider rescale and scale_factor while exporting to ONNX + img_h, img_w = ori_shape[:2] + threshold = rcnn_test_cfg.mask_thr_binary + if not self.class_agnostic: + box_inds = torch.arange(mask_pred.shape[0]) + mask_pred = mask_pred[box_inds, labels][:, None] + masks, _ = _do_paste_mask( + mask_pred, bboxes, img_h, img_w, skip_empty=False) + if threshold >= 0: + # should convert to float to avoid problems in TRT + masks = (masks >= threshold).to(dtype=torch.float) + return masks + + +def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True): + """Paste instance masks according to boxes. + + This implementation is modified from + https://github.com/facebookresearch/detectron2/ + + Args: + masks (Tensor): N, 1, H, W + boxes (Tensor): N, 4 + img_h (int): Height of the image to be pasted. + img_w (int): Width of the image to be pasted. + skip_empty (bool): Only paste masks within the region that + tightly bound all boxes, and returns the results this region only. + An important optimization for CPU. + + Returns: + tuple: (Tensor, tuple). The first item is mask tensor, the second one + is the slice object. + If skip_empty == False, the whole image will be pasted. It will + return a mask of shape (N, img_h, img_w) and an empty tuple. + If skip_empty == True, only area around the mask will be pasted. + A mask of shape (N, h', w') and its start and end coordinates + in the original image will be returned. + """ + # On GPU, paste all masks together (up to chunk size) + # by using the entire image to sample the masks + # Compared to pasting them one by one, + # this has more operations but is faster on COCO-scale dataset. + device = masks.device + if skip_empty: + x0_int, y0_int = torch.clamp( + boxes.min(dim=0).values.floor()[:2] - 1, + min=0).to(dtype=torch.int32) + x1_int = torch.clamp( + boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) + y1_int = torch.clamp( + boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) + else: + x0_int, y0_int = 0, 0 + x1_int, y1_int = img_w, img_h + x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 + + N = masks.shape[0] + + img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5 + img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5 + img_y = (img_y - y0) / (y1 - y0) * 2 - 1 + img_x = (img_x - x0) / (x1 - x0) * 2 - 1 + # img_x, img_y have shapes (N, w), (N, h) + # IsInf op is not supported with ONNX<=1.7.0 + if not torch.onnx.is_in_onnx_export(): + if torch.isinf(img_x).any(): + inds = torch.where(torch.isinf(img_x)) + img_x[inds] = 0 + if torch.isinf(img_y).any(): + inds = torch.where(torch.isinf(img_y)) + img_y[inds] = 0 + + gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) + gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) + grid = torch.stack([gx, gy], dim=3) + + img_masks = F.grid_sample( + masks.to(dtype=torch.float32), grid, align_corners=False) + + if skip_empty: + return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) + else: + return img_masks[:, 0], () diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/feature_relay_head.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/feature_relay_head.py new file mode 100644 index 0000000..452f37a --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/feature_relay_head.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.runner import BaseModule, auto_fp16 + +from mmdet.models.builder import HEADS + + +@HEADS.register_module() +class FeatureRelayHead(BaseModule): + """Feature Relay Head used in `SCNet `_. + + Args: + in_channels (int, optional): number of input channels. Default: 256. + conv_out_channels (int, optional): number of output channels before + classification layer. Default: 256. + roi_feat_size (int, optional): roi feat size at box head. Default: 7. + scale_factor (int, optional): scale factor to match roi feat size + at mask head. Default: 2. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels=1024, + out_conv_channels=256, + roi_feat_size=7, + scale_factor=2, + init_cfg=dict(type='Kaiming', layer='Linear')): + super(FeatureRelayHead, self).__init__(init_cfg) + assert isinstance(roi_feat_size, int) + + self.in_channels = in_channels + self.out_conv_channels = out_conv_channels + self.roi_feat_size = roi_feat_size + self.out_channels = (roi_feat_size**2) * out_conv_channels + self.scale_factor = scale_factor + self.fp16_enabled = False + + self.fc = nn.Linear(self.in_channels, self.out_channels) + self.upsample = nn.Upsample( + scale_factor=scale_factor, mode='bilinear', align_corners=True) + + @auto_fp16() + def forward(self, x): + """Forward function.""" + N, in_C = x.shape + if N > 0: + out_C = self.out_conv_channels + out_HW = self.roi_feat_size + x = self.fc(x) + x = x.reshape(N, out_C, out_HW, out_HW) + x = self.upsample(x) + return x + return None diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py new file mode 100644 index 0000000..c6eaa54 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py @@ -0,0 +1,118 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule, auto_fp16, force_fp32 + +from mmdet.models.builder import HEADS, build_loss + + +@HEADS.register_module() +class FusedSemanticHead(BaseModule): + r"""Multi-level fused semantic segmentation head. + + .. code-block:: none + + in_1 -> 1x1 conv --- + | + in_2 -> 1x1 conv -- | + || + in_3 -> 1x1 conv - || + ||| /-> 1x1 conv (mask prediction) + in_4 -> 1x1 conv -----> 3x3 convs (*4) + | \-> 1x1 conv (feature) + in_5 -> 1x1 conv --- + """ # noqa: W605 + + def __init__(self, + num_ins, + fusion_level, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + conv_cfg=None, + norm_cfg=None, + ignore_label=None, + loss_weight=None, + loss_seg=dict( + type='CrossEntropyLoss', + ignore_index=255, + loss_weight=0.2), + init_cfg=dict( + type='Kaiming', override=dict(name='conv_logits'))): + super(FusedSemanticHead, self).__init__(init_cfg) + self.num_ins = num_ins + self.fusion_level = fusion_level + self.num_convs = num_convs + self.in_channels = in_channels + self.conv_out_channels = conv_out_channels + self.num_classes = num_classes + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.fp16_enabled = False + + self.lateral_convs = nn.ModuleList() + for i in range(self.num_ins): + self.lateral_convs.append( + ConvModule( + self.in_channels, + self.in_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=False)) + + self.convs = nn.ModuleList() + for i in range(self.num_convs): + in_channels = self.in_channels if i == 0 else conv_out_channels + self.convs.append( + ConvModule( + in_channels, + conv_out_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.conv_embedding = ConvModule( + conv_out_channels, + conv_out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) + if ignore_label: + loss_seg['ignore_index'] = ignore_label + if loss_weight: + loss_seg['loss_weight'] = loss_weight + if ignore_label or loss_weight: + warnings.warn('``ignore_label`` and ``loss_weight`` would be ' + 'deprecated soon. Please set ``ingore_index`` and ' + '``loss_weight`` in ``loss_seg`` instead.') + self.criterion = build_loss(loss_seg) + + @auto_fp16() + def forward(self, feats): + x = self.lateral_convs[self.fusion_level](feats[self.fusion_level]) + fused_size = tuple(x.shape[-2:]) + for i, feat in enumerate(feats): + if i != self.fusion_level: + feat = F.interpolate( + feat, size=fused_size, mode='bilinear', align_corners=True) + # fix runtime error of "+=" inplace operation in PyTorch 1.10 + x = x + self.lateral_convs[i](feat) + + for i in range(self.num_convs): + x = self.convs[i](x) + + mask_pred = self.conv_logits(x) + x = self.conv_embedding(x) + return mask_pred, x + + @force_fp32(apply_to=('mask_pred', )) + def loss(self, mask_pred, labels): + labels = labels.squeeze(1).long() + loss_semantic_seg = self.criterion(mask_pred, labels) + return loss_semantic_seg diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/global_context_head.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/global_context_head.py new file mode 100644 index 0000000..af76a17 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/global_context_head.py @@ -0,0 +1,101 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule, auto_fp16, force_fp32 + +from mmdet.models.builder import HEADS +from mmdet.models.utils import ResLayer, SimplifiedBasicBlock + + +@HEADS.register_module() +class GlobalContextHead(BaseModule): + """Global context head used in `SCNet `_. + + Args: + num_convs (int, optional): number of convolutional layer in GlbCtxHead. + Default: 4. + in_channels (int, optional): number of input channels. Default: 256. + conv_out_channels (int, optional): number of output channels before + classification layer. Default: 256. + num_classes (int, optional): number of classes. Default: 80. + loss_weight (float, optional): global context loss weight. Default: 1. + conv_cfg (dict, optional): config to init conv layer. Default: None. + norm_cfg (dict, optional): config to init norm layer. Default: None. + conv_to_res (bool, optional): if True, 2 convs will be grouped into + 1 `SimplifiedBasicBlock` using a skip connection. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_weight=1.0, + conv_cfg=None, + norm_cfg=None, + conv_to_res=False, + init_cfg=dict( + type='Normal', std=0.01, override=dict(name='fc'))): + super(GlobalContextHead, self).__init__(init_cfg) + self.num_convs = num_convs + self.in_channels = in_channels + self.conv_out_channels = conv_out_channels + self.num_classes = num_classes + self.loss_weight = loss_weight + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.conv_to_res = conv_to_res + self.fp16_enabled = False + + if self.conv_to_res: + num_res_blocks = num_convs // 2 + self.convs = ResLayer( + SimplifiedBasicBlock, + in_channels, + self.conv_out_channels, + num_res_blocks, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + self.num_convs = num_res_blocks + else: + self.convs = nn.ModuleList() + for i in range(self.num_convs): + in_channels = self.in_channels if i == 0 else conv_out_channels + self.convs.append( + ConvModule( + in_channels, + conv_out_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + + self.pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(conv_out_channels, num_classes) + + self.criterion = nn.BCEWithLogitsLoss() + + @auto_fp16() + def forward(self, feats): + """Forward function.""" + x = feats[-1] + for i in range(self.num_convs): + x = self.convs[i](x) + x = self.pool(x) + + # multi-class prediction + mc_pred = x.reshape(x.size(0), -1) + mc_pred = self.fc(mc_pred) + + return mc_pred, x + + @force_fp32(apply_to=('pred', )) + def loss(self, pred, labels): + """Loss function.""" + labels = [lbl.unique() for lbl in labels] + targets = pred.new_zeros(pred.size()) + for i, label in enumerate(labels): + targets[i, label] = 1.0 + loss = self.loss_weight * self.criterion(pred, targets) + return loss diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/grid_head.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/grid_head.py new file mode 100644 index 0000000..0c0702d --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/grid_head.py @@ -0,0 +1,363 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule + +from mmdet.models.builder import HEADS, build_loss + + +@HEADS.register_module() +class GridHead(BaseModule): + + def __init__(self, + grid_points=9, + num_convs=8, + roi_feat_size=14, + in_channels=256, + conv_kernel_size=3, + point_feat_channels=64, + deconv_kernel_size=4, + class_agnostic=False, + loss_grid=dict( + type='CrossEntropyLoss', use_sigmoid=True, + loss_weight=15), + conv_cfg=None, + norm_cfg=dict(type='GN', num_groups=36), + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d', 'Linear']), + dict( + type='Normal', + layer='ConvTranspose2d', + std=0.001, + override=dict( + type='Normal', + name='deconv2', + std=0.001, + bias=-np.log(0.99 / 0.01))) + ]): + super(GridHead, self).__init__(init_cfg) + self.grid_points = grid_points + self.num_convs = num_convs + self.roi_feat_size = roi_feat_size + self.in_channels = in_channels + self.conv_kernel_size = conv_kernel_size + self.point_feat_channels = point_feat_channels + self.conv_out_channels = self.point_feat_channels * self.grid_points + self.class_agnostic = class_agnostic + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN': + assert self.conv_out_channels % norm_cfg['num_groups'] == 0 + + assert self.grid_points >= 4 + self.grid_size = int(np.sqrt(self.grid_points)) + if self.grid_size * self.grid_size != self.grid_points: + raise ValueError('grid_points must be a square number') + + # the predicted heatmap is half of whole_map_size + if not isinstance(self.roi_feat_size, int): + raise ValueError('Only square RoIs are supporeted in Grid R-CNN') + self.whole_map_size = self.roi_feat_size * 4 + + # compute point-wise sub-regions + self.sub_regions = self.calc_sub_regions() + + self.convs = [] + for i in range(self.num_convs): + in_channels = ( + self.in_channels if i == 0 else self.conv_out_channels) + stride = 2 if i == 0 else 1 + padding = (self.conv_kernel_size - 1) // 2 + self.convs.append( + ConvModule( + in_channels, + self.conv_out_channels, + self.conv_kernel_size, + stride=stride, + padding=padding, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + bias=True)) + self.convs = nn.Sequential(*self.convs) + + self.deconv1 = nn.ConvTranspose2d( + self.conv_out_channels, + self.conv_out_channels, + kernel_size=deconv_kernel_size, + stride=2, + padding=(deconv_kernel_size - 2) // 2, + groups=grid_points) + self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels) + self.deconv2 = nn.ConvTranspose2d( + self.conv_out_channels, + grid_points, + kernel_size=deconv_kernel_size, + stride=2, + padding=(deconv_kernel_size - 2) // 2, + groups=grid_points) + + # find the 4-neighbor of each grid point + self.neighbor_points = [] + grid_size = self.grid_size + for i in range(grid_size): # i-th column + for j in range(grid_size): # j-th row + neighbors = [] + if i > 0: # left: (i - 1, j) + neighbors.append((i - 1) * grid_size + j) + if j > 0: # up: (i, j - 1) + neighbors.append(i * grid_size + j - 1) + if j < grid_size - 1: # down: (i, j + 1) + neighbors.append(i * grid_size + j + 1) + if i < grid_size - 1: # right: (i + 1, j) + neighbors.append((i + 1) * grid_size + j) + self.neighbor_points.append(tuple(neighbors)) + # total edges in the grid + self.num_edges = sum([len(p) for p in self.neighbor_points]) + + self.forder_trans = nn.ModuleList() # first-order feature transition + self.sorder_trans = nn.ModuleList() # second-order feature transition + for neighbors in self.neighbor_points: + fo_trans = nn.ModuleList() + so_trans = nn.ModuleList() + for _ in range(len(neighbors)): + # each transition module consists of a 5x5 depth-wise conv and + # 1x1 conv. + fo_trans.append( + nn.Sequential( + nn.Conv2d( + self.point_feat_channels, + self.point_feat_channels, + 5, + stride=1, + padding=2, + groups=self.point_feat_channels), + nn.Conv2d(self.point_feat_channels, + self.point_feat_channels, 1))) + so_trans.append( + nn.Sequential( + nn.Conv2d( + self.point_feat_channels, + self.point_feat_channels, + 5, + 1, + 2, + groups=self.point_feat_channels), + nn.Conv2d(self.point_feat_channels, + self.point_feat_channels, 1))) + self.forder_trans.append(fo_trans) + self.sorder_trans.append(so_trans) + + self.loss_grid = build_loss(loss_grid) + + def forward(self, x): + assert x.shape[-1] == x.shape[-2] == self.roi_feat_size + # RoI feature transformation, downsample 2x + x = self.convs(x) + + c = self.point_feat_channels + # first-order fusion + x_fo = [None for _ in range(self.grid_points)] + for i, points in enumerate(self.neighbor_points): + x_fo[i] = x[:, i * c:(i + 1) * c] + for j, point_idx in enumerate(points): + x_fo[i] = x_fo[i] + self.forder_trans[i][j]( + x[:, point_idx * c:(point_idx + 1) * c]) + + # second-order fusion + x_so = [None for _ in range(self.grid_points)] + for i, points in enumerate(self.neighbor_points): + x_so[i] = x[:, i * c:(i + 1) * c] + for j, point_idx in enumerate(points): + x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx]) + + # predicted heatmap with fused features + x2 = torch.cat(x_so, dim=1) + x2 = self.deconv1(x2) + x2 = F.relu(self.norm1(x2), inplace=True) + heatmap = self.deconv2(x2) + + # predicted heatmap with original features (applicable during training) + if self.training: + x1 = x + x1 = self.deconv1(x1) + x1 = F.relu(self.norm1(x1), inplace=True) + heatmap_unfused = self.deconv2(x1) + else: + heatmap_unfused = heatmap + + return dict(fused=heatmap, unfused=heatmap_unfused) + + def calc_sub_regions(self): + """Compute point specific representation regions. + + See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details. + """ + # to make it consistent with the original implementation, half_size + # is computed as 2 * quarter_size, which is smaller + half_size = self.whole_map_size // 4 * 2 + sub_regions = [] + for i in range(self.grid_points): + x_idx = i // self.grid_size + y_idx = i % self.grid_size + if x_idx == 0: + sub_x1 = 0 + elif x_idx == self.grid_size - 1: + sub_x1 = half_size + else: + ratio = x_idx / (self.grid_size - 1) - 0.25 + sub_x1 = max(int(ratio * self.whole_map_size), 0) + + if y_idx == 0: + sub_y1 = 0 + elif y_idx == self.grid_size - 1: + sub_y1 = half_size + else: + ratio = y_idx / (self.grid_size - 1) - 0.25 + sub_y1 = max(int(ratio * self.whole_map_size), 0) + sub_regions.append( + (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size)) + return sub_regions + + def get_targets(self, sampling_results, rcnn_train_cfg): + # mix all samples (across images) together. + pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results], + dim=0).cpu() + pos_gt_bboxes = torch.cat( + [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu() + assert pos_bboxes.shape == pos_gt_bboxes.shape + + # expand pos_bboxes to 2x of original size + x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 + y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 + x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 + y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 + pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) + pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1) + pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1) + + num_rois = pos_bboxes.shape[0] + map_size = self.whole_map_size + # this is not the final target shape + targets = torch.zeros((num_rois, self.grid_points, map_size, map_size), + dtype=torch.float) + + # pre-compute interpolation factors for all grid points. + # the first item is the factor of x-dim, and the second is y-dim. + # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1) + factors = [] + for j in range(self.grid_points): + x_idx = j // self.grid_size + y_idx = j % self.grid_size + factors.append((1 - x_idx / (self.grid_size - 1), + 1 - y_idx / (self.grid_size - 1))) + + radius = rcnn_train_cfg.pos_radius + radius2 = radius**2 + for i in range(num_rois): + # ignore small bboxes + if (pos_bbox_ws[i] <= self.grid_size + or pos_bbox_hs[i] <= self.grid_size): + continue + # for each grid point, mark a small circle as positive + for j in range(self.grid_points): + factor_x, factor_y = factors[j] + gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + ( + 1 - factor_x) * pos_gt_bboxes[i, 2] + gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + ( + 1 - factor_y) * pos_gt_bboxes[i, 3] + + cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] * + map_size) + cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] * + map_size) + + for x in range(cx - radius, cx + radius + 1): + for y in range(cy - radius, cy + radius + 1): + if x >= 0 and x < map_size and y >= 0 and y < map_size: + if (x - cx)**2 + (y - cy)**2 <= radius2: + targets[i, j, y, x] = 1 + # reduce the target heatmap size by a half + # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688). + sub_targets = [] + for i in range(self.grid_points): + sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i] + sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2]) + sub_targets = torch.cat(sub_targets, dim=1) + sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device) + return sub_targets + + def loss(self, grid_pred, grid_targets): + loss_fused = self.loss_grid(grid_pred['fused'], grid_targets) + loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets) + loss_grid = loss_fused + loss_unfused + return dict(loss_grid=loss_grid) + + def get_bboxes(self, det_bboxes, grid_pred, img_metas): + # TODO: refactoring + assert det_bboxes.shape[0] == grid_pred.shape[0] + det_bboxes = det_bboxes.cpu() + cls_scores = det_bboxes[:, [4]] + det_bboxes = det_bboxes[:, :4] + grid_pred = grid_pred.sigmoid().cpu() + + R, c, h, w = grid_pred.shape + half_size = self.whole_map_size // 4 * 2 + assert h == w == half_size + assert c == self.grid_points + + # find the point with max scores in the half-sized heatmap + grid_pred = grid_pred.view(R * c, h * w) + pred_scores, pred_position = grid_pred.max(dim=1) + xs = pred_position % w + ys = pred_position // w + + # get the position in the whole heatmap instead of half-sized heatmap + for i in range(self.grid_points): + xs[i::self.grid_points] += self.sub_regions[i][0] + ys[i::self.grid_points] += self.sub_regions[i][1] + + # reshape to (num_rois, grid_points) + pred_scores, xs, ys = tuple( + map(lambda x: x.view(R, c), [pred_scores, xs, ys])) + + # get expanded pos_bboxes + widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1) + heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1) + x1 = (det_bboxes[:, 0, None] - widths / 2) + y1 = (det_bboxes[:, 1, None] - heights / 2) + # map the grid point to the absolute coordinates + abs_xs = (xs.float() + 0.5) / w * widths + x1 + abs_ys = (ys.float() + 0.5) / h * heights + y1 + + # get the grid points indices that fall on the bbox boundaries + x1_inds = [i for i in range(self.grid_size)] + y1_inds = [i * self.grid_size for i in range(self.grid_size)] + x2_inds = [ + self.grid_points - self.grid_size + i + for i in range(self.grid_size) + ] + y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)] + + # voting of all grid points on some boundary + bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum( + dim=1, keepdim=True) / ( + pred_scores[:, x1_inds].sum(dim=1, keepdim=True)) + bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum( + dim=1, keepdim=True) / ( + pred_scores[:, y1_inds].sum(dim=1, keepdim=True)) + bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum( + dim=1, keepdim=True) / ( + pred_scores[:, x2_inds].sum(dim=1, keepdim=True)) + bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum( + dim=1, keepdim=True) / ( + pred_scores[:, y2_inds].sum(dim=1, keepdim=True)) + + bbox_res = torch.cat( + [bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1) + bbox_res[:, [0, 2]].clamp_(min=0, max=img_metas[0]['img_shape'][1]) + bbox_res[:, [1, 3]].clamp_(min=0, max=img_metas[0]['img_shape'][0]) + + return bbox_res diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/htc_mask_head.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/htc_mask_head.py new file mode 100644 index 0000000..7ad8592 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/htc_mask_head.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import ConvModule + +from mmdet.models.builder import HEADS +from .fcn_mask_head import FCNMaskHead + + +@HEADS.register_module() +class HTCMaskHead(FCNMaskHead): + + def __init__(self, with_conv_res=True, *args, **kwargs): + super(HTCMaskHead, self).__init__(*args, **kwargs) + self.with_conv_res = with_conv_res + if self.with_conv_res: + self.conv_res = ConvModule( + self.conv_out_channels, + self.conv_out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + + def forward(self, x, res_feat=None, return_logits=True, return_feat=True): + if res_feat is not None: + assert self.with_conv_res + res_feat = self.conv_res(res_feat) + x = x + res_feat + for conv in self.convs: + x = conv(x) + res_feat = x + outs = [] + if return_logits: + x = self.upsample(x) + if self.upsample_method == 'deconv': + x = self.relu(x) + mask_pred = self.conv_logits(x) + outs.append(mask_pred) + if return_feat: + outs.append(res_feat) + return outs if len(outs) > 1 else outs[0] diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/mask_point_head.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/mask_point_head.py new file mode 100644 index 0000000..c77c46d --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/mask_point_head.py @@ -0,0 +1,253 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point +from mmcv.runner import BaseModule + +from mmdet.models.builder import HEADS, build_loss +from mmdet.models.utils import (get_uncertain_point_coords_with_randomness, + get_uncertainty) + + +@HEADS.register_module() +class MaskPointHead(BaseModule): + """A mask point head use in PointRend. + + ``MaskPointHead`` use shared multi-layer perceptron (equivalent to + nn.Conv1d) to predict the logit of input points. The fine-grained feature + and coarse feature will be concatenate together for predication. + + Args: + num_fcs (int): Number of fc layers in the head. Default: 3. + in_channels (int): Number of input channels. Default: 256. + fc_channels (int): Number of fc channels. Default: 256. + num_classes (int): Number of classes for logits. Default: 80. + class_agnostic (bool): Whether use class agnostic classification. + If so, the output channels of logits will be 1. Default: False. + coarse_pred_each_layer (bool): Whether concatenate coarse feature with + the output of each fc layer. Default: True. + conv_cfg (dict | None): Dictionary to construct and config conv layer. + Default: dict(type='Conv1d')) + norm_cfg (dict | None): Dictionary to construct and config norm layer. + Default: None. + loss_point (dict): Dictionary to construct and config loss layer of + point head. Default: dict(type='CrossEntropyLoss', use_mask=True, + loss_weight=1.0). + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_classes, + num_fcs=3, + in_channels=256, + fc_channels=256, + class_agnostic=False, + coarse_pred_each_layer=True, + conv_cfg=dict(type='Conv1d'), + norm_cfg=None, + act_cfg=dict(type='ReLU'), + loss_point=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0), + init_cfg=dict( + type='Normal', std=0.001, + override=dict(name='fc_logits'))): + super().__init__(init_cfg) + self.num_fcs = num_fcs + self.in_channels = in_channels + self.fc_channels = fc_channels + self.num_classes = num_classes + self.class_agnostic = class_agnostic + self.coarse_pred_each_layer = coarse_pred_each_layer + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.loss_point = build_loss(loss_point) + + fc_in_channels = in_channels + num_classes + self.fcs = nn.ModuleList() + for _ in range(num_fcs): + fc = ConvModule( + fc_in_channels, + fc_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.fcs.append(fc) + fc_in_channels = fc_channels + fc_in_channels += num_classes if self.coarse_pred_each_layer else 0 + + out_channels = 1 if self.class_agnostic else self.num_classes + self.fc_logits = nn.Conv1d( + fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0) + + def forward(self, fine_grained_feats, coarse_feats): + """Classify each point base on fine grained and coarse feats. + + Args: + fine_grained_feats (Tensor): Fine grained feature sampled from FPN, + shape (num_rois, in_channels, num_points). + coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead, + shape (num_rois, num_classes, num_points). + + Returns: + Tensor: Point classification results, + shape (num_rois, num_class, num_points). + """ + + x = torch.cat([fine_grained_feats, coarse_feats], dim=1) + for fc in self.fcs: + x = fc(x) + if self.coarse_pred_each_layer: + x = torch.cat((x, coarse_feats), dim=1) + return self.fc_logits(x) + + def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks, + cfg): + """Get training targets of MaskPointHead for all images. + + Args: + rois (Tensor): Region of Interest, shape (num_rois, 5). + rel_roi_points: Points coordinates relative to RoI, shape + (num_rois, num_points, 2). + sampling_results (:obj:`SamplingResult`): Sampling result after + sampling and assignment. + gt_masks (Tensor) : Ground truth segmentation masks of + corresponding boxes, shape (num_rois, height, width). + cfg (dict): Training cfg. + + Returns: + Tensor: Point target, shape (num_rois, num_points). + """ + + num_imgs = len(sampling_results) + rois_list = [] + rel_roi_points_list = [] + for batch_ind in range(num_imgs): + inds = (rois[:, 0] == batch_ind) + rois_list.append(rois[inds]) + rel_roi_points_list.append(rel_roi_points[inds]) + pos_assigned_gt_inds_list = [ + res.pos_assigned_gt_inds for res in sampling_results + ] + cfg_list = [cfg for _ in range(num_imgs)] + + point_targets = map(self._get_target_single, rois_list, + rel_roi_points_list, pos_assigned_gt_inds_list, + gt_masks, cfg_list) + point_targets = list(point_targets) + + if len(point_targets) > 0: + point_targets = torch.cat(point_targets) + + return point_targets + + def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds, + gt_masks, cfg): + """Get training target of MaskPointHead for each image.""" + num_pos = rois.size(0) + num_points = cfg.num_points + if num_pos > 0: + gt_masks_th = ( + gt_masks.to_tensor(rois.dtype, rois.device).index_select( + 0, pos_assigned_gt_inds)) + gt_masks_th = gt_masks_th.unsqueeze(1) + rel_img_points = rel_roi_point_to_rel_img_point( + rois, rel_roi_points, gt_masks_th) + point_targets = point_sample(gt_masks_th, + rel_img_points).squeeze(1) + else: + point_targets = rois.new_zeros((0, num_points)) + return point_targets + + def loss(self, point_pred, point_targets, labels): + """Calculate loss for MaskPointHead. + + Args: + point_pred (Tensor): Point predication result, shape + (num_rois, num_classes, num_points). + point_targets (Tensor): Point targets, shape (num_roi, num_points). + labels (Tensor): Class label of corresponding boxes, + shape (num_rois, ) + + Returns: + dict[str, Tensor]: a dictionary of point loss components + """ + + loss = dict() + if self.class_agnostic: + loss_point = self.loss_point(point_pred, point_targets, + torch.zeros_like(labels)) + else: + loss_point = self.loss_point(point_pred, point_targets, labels) + loss['loss_point'] = loss_point + return loss + + def get_roi_rel_points_train(self, mask_pred, labels, cfg): + """Get ``num_points`` most uncertain points with random points during + train. + + Sample points in [0, 1] x [0, 1] coordinate space based on their + uncertainty. The uncertainties are calculated for each point using + '_get_uncertainty()' function that takes point's logit prediction as + input. + + Args: + mask_pred (Tensor): A tensor of shape (num_rois, num_classes, + mask_height, mask_width) for class-specific or class-agnostic + prediction. + labels (list): The ground truth class for each instance. + cfg (dict): Training config of point head. + + Returns: + point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) + that contains the coordinates sampled points. + """ + point_coords = get_uncertain_point_coords_with_randomness( + mask_pred, labels, cfg.num_points, cfg.oversample_ratio, + cfg.importance_sample_ratio) + return point_coords + + def get_roi_rel_points_test(self, mask_pred, pred_label, cfg): + """Get ``num_points`` most uncertain points during test. + + Args: + mask_pred (Tensor): A tensor of shape (num_rois, num_classes, + mask_height, mask_width) for class-specific or class-agnostic + prediction. + pred_label (list): The predication class for each instance. + cfg (dict): Testing config of point head. + + Returns: + point_indices (Tensor): A tensor of shape (num_rois, num_points) + that contains indices from [0, mask_height x mask_width) of the + most uncertain points. + point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) + that contains [0, 1] x [0, 1] normalized coordinates of the + most uncertain points from the [mask_height, mask_width] grid . + """ + num_points = cfg.subdivision_num_points + uncertainty_map = get_uncertainty(mask_pred, pred_label) + num_rois, _, mask_height, mask_width = uncertainty_map.shape + + # During ONNX exporting, the type of each elements of 'shape' is + # `Tensor(float)`, while it is `float` during PyTorch inference. + if isinstance(mask_height, torch.Tensor): + h_step = 1.0 / mask_height.float() + w_step = 1.0 / mask_width.float() + else: + h_step = 1.0 / mask_height + w_step = 1.0 / mask_width + # cast to int to avoid dynamic K for TopK op in ONNX + mask_size = int(mask_height * mask_width) + uncertainty_map = uncertainty_map.view(num_rois, mask_size) + num_points = min(mask_size, num_points) + point_indices = uncertainty_map.topk(num_points, dim=1)[1] + xs = w_step / 2.0 + (point_indices % mask_width).float() * w_step + ys = h_step / 2.0 + (point_indices // mask_width).float() * h_step + point_coords = torch.stack([xs, ys], dim=2) + return point_indices, point_coords diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/maskiou_head.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/maskiou_head.py new file mode 100644 index 0000000..a7ff7c7 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/maskiou_head.py @@ -0,0 +1,183 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import Conv2d, Linear, MaxPool2d +from mmcv.runner import BaseModule, force_fp32 +from torch.nn.modules.utils import _pair + +from mmdet.models.builder import HEADS, build_loss + + +@HEADS.register_module() +class MaskIoUHead(BaseModule): + """Mask IoU Head. + + This head predicts the IoU of predicted masks and corresponding gt masks. + """ + + def __init__(self, + num_convs=4, + num_fcs=2, + roi_feat_size=14, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + num_classes=80, + loss_iou=dict(type='MSELoss', loss_weight=0.5), + init_cfg=[ + dict(type='Kaiming', override=dict(name='convs')), + dict(type='Caffe2Xavier', override=dict(name='fcs')), + dict( + type='Normal', + std=0.01, + override=dict(name='fc_mask_iou')) + ]): + super(MaskIoUHead, self).__init__(init_cfg) + self.in_channels = in_channels + self.conv_out_channels = conv_out_channels + self.fc_out_channels = fc_out_channels + self.num_classes = num_classes + self.fp16_enabled = False + + self.convs = nn.ModuleList() + for i in range(num_convs): + if i == 0: + # concatenation of mask feature and mask prediction + in_channels = self.in_channels + 1 + else: + in_channels = self.conv_out_channels + stride = 2 if i == num_convs - 1 else 1 + self.convs.append( + Conv2d( + in_channels, + self.conv_out_channels, + 3, + stride=stride, + padding=1)) + + roi_feat_size = _pair(roi_feat_size) + pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2) + self.fcs = nn.ModuleList() + for i in range(num_fcs): + in_channels = ( + self.conv_out_channels * + pooled_area if i == 0 else self.fc_out_channels) + self.fcs.append(Linear(in_channels, self.fc_out_channels)) + + self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes) + self.relu = nn.ReLU() + self.max_pool = MaxPool2d(2, 2) + self.loss_iou = build_loss(loss_iou) + + def forward(self, mask_feat, mask_pred): + mask_pred = mask_pred.sigmoid() + mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1)) + + x = torch.cat((mask_feat, mask_pred_pooled), 1) + + for conv in self.convs: + x = self.relu(conv(x)) + x = x.flatten(1) + for fc in self.fcs: + x = self.relu(fc(x)) + mask_iou = self.fc_mask_iou(x) + return mask_iou + + @force_fp32(apply_to=('mask_iou_pred', )) + def loss(self, mask_iou_pred, mask_iou_targets): + pos_inds = mask_iou_targets > 0 + if pos_inds.sum() > 0: + loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds], + mask_iou_targets[pos_inds]) + else: + loss_mask_iou = mask_iou_pred.sum() * 0 + return dict(loss_mask_iou=loss_mask_iou) + + @force_fp32(apply_to=('mask_pred', )) + def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targets, + rcnn_train_cfg): + """Compute target of mask IoU. + + Mask IoU target is the IoU of the predicted mask (inside a bbox) and + the gt mask of corresponding gt mask (the whole instance). + The intersection area is computed inside the bbox, and the gt mask area + is computed with two steps, firstly we compute the gt area inside the + bbox, then divide it by the area ratio of gt area inside the bbox and + the gt area of the whole instance. + + Args: + sampling_results (list[:obj:`SamplingResult`]): sampling results. + gt_masks (BitmapMask | PolygonMask): Gt masks (the whole instance) + of each image, with the same shape of the input image. + mask_pred (Tensor): Predicted masks of each positive proposal, + shape (num_pos, h, w). + mask_targets (Tensor): Gt mask of each positive proposal, + binary map of the shape (num_pos, h, w). + rcnn_train_cfg (dict): Training config for R-CNN part. + + Returns: + Tensor: mask iou target (length == num positive). + """ + pos_proposals = [res.pos_bboxes for res in sampling_results] + pos_assigned_gt_inds = [ + res.pos_assigned_gt_inds for res in sampling_results + ] + + # compute the area ratio of gt areas inside the proposals and + # the whole instance + area_ratios = map(self._get_area_ratio, pos_proposals, + pos_assigned_gt_inds, gt_masks) + area_ratios = torch.cat(list(area_ratios)) + assert mask_targets.size(0) == area_ratios.size(0) + + mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float() + mask_pred_areas = mask_pred.sum((-1, -2)) + + # mask_pred and mask_targets are binary maps + overlap_areas = (mask_pred * mask_targets).sum((-1, -2)) + + # compute the mask area of the whole instance + gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7) + + mask_iou_targets = overlap_areas / ( + mask_pred_areas + gt_full_areas - overlap_areas) + return mask_iou_targets + + def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks): + """Compute area ratio of the gt mask inside the proposal and the gt + mask of the corresponding instance.""" + num_pos = pos_proposals.size(0) + if num_pos > 0: + area_ratios = [] + proposals_np = pos_proposals.cpu().numpy() + pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() + # compute mask areas of gt instances (batch processing for speedup) + gt_instance_mask_area = gt_masks.areas + for i in range(num_pos): + gt_mask = gt_masks[pos_assigned_gt_inds[i]] + + # crop the gt mask inside the proposal + bbox = proposals_np[i, :].astype(np.int32) + gt_mask_in_proposal = gt_mask.crop(bbox) + + ratio = gt_mask_in_proposal.areas[0] / ( + gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7) + area_ratios.append(ratio) + area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to( + pos_proposals.device) + else: + area_ratios = pos_proposals.new_zeros((0, )) + return area_ratios + + @force_fp32(apply_to=('mask_iou_pred', )) + def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels): + """Get the mask scores. + + mask_score = bbox_score * mask_iou + """ + inds = range(det_labels.size(0)) + mask_scores = mask_iou_pred[inds, det_labels] * det_bboxes[inds, -1] + mask_scores = mask_scores.cpu().numpy() + det_labels = det_labels.cpu().numpy() + return [mask_scores[det_labels == i] for i in range(self.num_classes)] diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py new file mode 100644 index 0000000..ca62486 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models.builder import HEADS +from mmdet.models.utils import ResLayer, SimplifiedBasicBlock +from .fcn_mask_head import FCNMaskHead + + +@HEADS.register_module() +class SCNetMaskHead(FCNMaskHead): + """Mask head for `SCNet `_. + + Args: + conv_to_res (bool, optional): if True, change the conv layers to + ``SimplifiedBasicBlock``. + """ + + def __init__(self, conv_to_res=True, **kwargs): + super(SCNetMaskHead, self).__init__(**kwargs) + self.conv_to_res = conv_to_res + if conv_to_res: + assert self.conv_kernel_size == 3 + self.num_res_blocks = self.num_convs // 2 + self.convs = ResLayer( + SimplifiedBasicBlock, + self.in_channels, + self.conv_out_channels, + self.num_res_blocks, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py new file mode 100644 index 0000000..2b8c5c3 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models.builder import HEADS +from mmdet.models.utils import ResLayer, SimplifiedBasicBlock +from .fused_semantic_head import FusedSemanticHead + + +@HEADS.register_module() +class SCNetSemanticHead(FusedSemanticHead): + """Mask head for `SCNet `_. + + Args: + conv_to_res (bool, optional): if True, change the conv layers to + ``SimplifiedBasicBlock``. + """ + + def __init__(self, conv_to_res=True, **kwargs): + super(SCNetSemanticHead, self).__init__(**kwargs) + self.conv_to_res = conv_to_res + if self.conv_to_res: + num_res_blocks = self.num_convs // 2 + self.convs = ResLayer( + SimplifiedBasicBlock, + self.in_channels, + self.conv_out_channels, + num_res_blocks, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + self.num_convs = num_res_blocks diff --git a/downstream/mmdetection/mmdet/models/roi_heads/mask_scoring_roi_head.py b/downstream/mmdetection/mmdet/models/roi_heads/mask_scoring_roi_head.py new file mode 100644 index 0000000..4617988 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/mask_scoring_roi_head.py @@ -0,0 +1,113 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet.core import bbox2roi +from ..builder import HEADS, build_head +from .standard_roi_head import StandardRoIHead + + +@HEADS.register_module() +class MaskScoringRoIHead(StandardRoIHead): + """Mask Scoring RoIHead for Mask Scoring RCNN. + + https://arxiv.org/abs/1903.00241 + """ + + def __init__(self, mask_iou_head, **kwargs): + assert mask_iou_head is not None + super(MaskScoringRoIHead, self).__init__(**kwargs) + self.mask_iou_head = build_head(mask_iou_head) + + def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, + img_metas): + """Run forward function and calculate loss for Mask head in + training.""" + pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) + mask_results = super(MaskScoringRoIHead, + self)._mask_forward_train(x, sampling_results, + bbox_feats, gt_masks, + img_metas) + if mask_results['loss_mask'] is None: + return mask_results + + # mask iou head forward and loss + pos_mask_pred = mask_results['mask_pred'][ + range(mask_results['mask_pred'].size(0)), pos_labels] + mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'], + pos_mask_pred) + pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), + pos_labels] + + mask_iou_targets = self.mask_iou_head.get_targets( + sampling_results, gt_masks, pos_mask_pred, + mask_results['mask_targets'], self.train_cfg) + loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred, + mask_iou_targets) + mask_results['loss_mask'].update(loss_mask_iou) + return mask_results + + def simple_test_mask(self, + x, + img_metas, + det_bboxes, + det_labels, + rescale=False): + """Obtain mask prediction without augmentation.""" + # image shapes of images in the batch + ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) + scale_factors = tuple(meta['scale_factor'] for meta in img_metas) + + num_imgs = len(det_bboxes) + if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): + num_classes = self.mask_head.num_classes + segm_results = [[[] for _ in range(num_classes)] + for _ in range(num_imgs)] + mask_scores = [[[] for _ in range(num_classes)] + for _ in range(num_imgs)] + else: + # if det_bboxes is rescaled to the original image size, we need to + # rescale it back to the testing scale to obtain RoIs. + if rescale and not isinstance(scale_factors[0], float): + scale_factors = [ + torch.from_numpy(scale_factor).to(det_bboxes[0].device) + for scale_factor in scale_factors + ] + _bboxes = [ + det_bboxes[i][:, :4] * + scale_factors[i] if rescale else det_bboxes[i] + for i in range(num_imgs) + ] + mask_rois = bbox2roi(_bboxes) + mask_results = self._mask_forward(x, mask_rois) + concat_det_labels = torch.cat(det_labels) + # get mask scores with mask iou head + mask_feats = mask_results['mask_feats'] + mask_pred = mask_results['mask_pred'] + mask_iou_pred = self.mask_iou_head( + mask_feats, mask_pred[range(concat_det_labels.size(0)), + concat_det_labels]) + # split batch mask prediction back to each image + num_bboxes_per_img = tuple(len(_bbox) for _bbox in _bboxes) + mask_preds = mask_pred.split(num_bboxes_per_img, 0) + mask_iou_preds = mask_iou_pred.split(num_bboxes_per_img, 0) + + # apply mask post-processing to each image individually + segm_results = [] + mask_scores = [] + for i in range(num_imgs): + if det_bboxes[i].shape[0] == 0: + segm_results.append( + [[] for _ in range(self.mask_head.num_classes)]) + mask_scores.append( + [[] for _ in range(self.mask_head.num_classes)]) + else: + segm_result = self.mask_head.get_seg_masks( + mask_preds[i], _bboxes[i], det_labels[i], + self.test_cfg, ori_shapes[i], scale_factors[i], + rescale) + # get mask scores with mask iou head + mask_score = self.mask_iou_head.get_mask_scores( + mask_iou_preds[i], det_bboxes[i], det_labels[i]) + segm_results.append(segm_result) + mask_scores.append(mask_score) + return list(zip(segm_results, mask_scores)) diff --git a/downstream/mmdetection/mmdet/models/roi_heads/pisa_roi_head.py b/downstream/mmdetection/mmdet/models/roi_heads/pisa_roi_head.py new file mode 100644 index 0000000..92a5118 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/pisa_roi_head.py @@ -0,0 +1,160 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.core import bbox2roi +from ..builder import HEADS +from ..losses.pisa_loss import carl_loss, isr_p +from .standard_roi_head import StandardRoIHead + + +@HEADS.register_module() +class PISARoIHead(StandardRoIHead): + r"""The RoI head for `Prime Sample Attention in Object Detection + `_.""" + + def forward_train(self, + x, + img_metas, + proposal_list, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None): + """Forward function for training. + + Args: + x (list[Tensor]): List of multi-level img features. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmdet/datasets/pipelines/formatting.py:Collect`. + proposals (list[Tensors]): List of region proposals. + gt_bboxes (list[Tensor]): Each item are the truth boxes for each + image in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): Class indices corresponding to each box + gt_bboxes_ignore (list[Tensor], optional): Specify which bounding + boxes can be ignored when computing the loss. + gt_masks (None | Tensor) : True segmentation masks for each box + used if the architecture supports a segmentation task. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + # assign gts and sample proposals + if self.with_bbox or self.with_mask: + num_imgs = len(img_metas) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + sampling_results = [] + neg_label_weights = [] + for i in range(num_imgs): + assign_result = self.bbox_assigner.assign( + proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], + gt_labels[i]) + sampling_result = self.bbox_sampler.sample( + assign_result, + proposal_list[i], + gt_bboxes[i], + gt_labels[i], + feats=[lvl_feat[i][None] for lvl_feat in x]) + # neg label weight is obtained by sampling when using ISR-N + neg_label_weight = None + if isinstance(sampling_result, tuple): + sampling_result, neg_label_weight = sampling_result + sampling_results.append(sampling_result) + neg_label_weights.append(neg_label_weight) + + losses = dict() + # bbox head forward and loss + if self.with_bbox: + bbox_results = self._bbox_forward_train( + x, + sampling_results, + gt_bboxes, + gt_labels, + img_metas, + neg_label_weights=neg_label_weights) + losses.update(bbox_results['loss_bbox']) + + # mask head forward and loss + if self.with_mask: + mask_results = self._mask_forward_train(x, sampling_results, + bbox_results['bbox_feats'], + gt_masks, img_metas) + losses.update(mask_results['loss_mask']) + + return losses + + def _bbox_forward(self, x, rois): + """Box forward function used in both training and testing.""" + # TODO: a more flexible way to decide which feature maps to use + bbox_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], rois) + if self.with_shared_head: + bbox_feats = self.shared_head(bbox_feats) + cls_score, bbox_pred = self.bbox_head(bbox_feats) + + bbox_results = dict( + cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) + return bbox_results + + def _bbox_forward_train(self, + x, + sampling_results, + gt_bboxes, + gt_labels, + img_metas, + neg_label_weights=None): + """Run forward function and calculate loss for box head in training.""" + rois = bbox2roi([res.bboxes for res in sampling_results]) + + bbox_results = self._bbox_forward(x, rois) + + bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, + gt_labels, self.train_cfg) + + # neg_label_weights obtained by sampler is image-wise, mapping back to + # the corresponding location in label weights + if neg_label_weights[0] is not None: + label_weights = bbox_targets[1] + cur_num_rois = 0 + for i in range(len(sampling_results)): + num_pos = sampling_results[i].pos_inds.size(0) + num_neg = sampling_results[i].neg_inds.size(0) + label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos + + num_neg] = neg_label_weights[i] + cur_num_rois += num_pos + num_neg + + cls_score = bbox_results['cls_score'] + bbox_pred = bbox_results['bbox_pred'] + + # Apply ISR-P + isr_cfg = self.train_cfg.get('isr', None) + if isr_cfg is not None: + bbox_targets = isr_p( + cls_score, + bbox_pred, + bbox_targets, + rois, + sampling_results, + self.bbox_head.loss_cls, + self.bbox_head.bbox_coder, + **isr_cfg, + num_class=self.bbox_head.num_classes) + loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois, + *bbox_targets) + + # Add CARL Loss + carl_cfg = self.train_cfg.get('carl', None) + if carl_cfg is not None: + loss_carl = carl_loss( + cls_score, + bbox_targets[0], + bbox_pred, + bbox_targets[2], + self.bbox_head.loss_bbox, + **carl_cfg, + num_class=self.bbox_head.num_classes) + loss_bbox.update(loss_carl) + + bbox_results.update(loss_bbox=loss_bbox) + return bbox_results diff --git a/downstream/mmdetection/mmdet/models/roi_heads/point_rend_roi_head.py b/downstream/mmdetection/mmdet/models/roi_heads/point_rend_roi_head.py new file mode 100644 index 0000000..9f66779 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/point_rend_roi_head.py @@ -0,0 +1,393 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa +import os +import warnings + +import numpy as np +import torch +import torch.nn.functional as F +from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point + +from mmdet.core import bbox2roi, bbox_mapping, merge_aug_masks +from .. import builder +from ..builder import HEADS +from .standard_roi_head import StandardRoIHead + + +@HEADS.register_module() +class PointRendRoIHead(StandardRoIHead): + """`PointRend `_.""" + + def __init__(self, point_head, *args, **kwargs): + super().__init__(*args, **kwargs) + assert self.with_bbox and self.with_mask + self.init_point_head(point_head) + + def init_point_head(self, point_head): + """Initialize ``point_head``""" + self.point_head = builder.build_head(point_head) + + def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, + img_metas): + """Run forward function and calculate loss for mask head and point head + in training.""" + mask_results = super()._mask_forward_train(x, sampling_results, + bbox_feats, gt_masks, + img_metas) + if mask_results['loss_mask'] is not None: + loss_point = self._mask_point_forward_train( + x, sampling_results, mask_results['mask_pred'], gt_masks, + img_metas) + mask_results['loss_mask'].update(loss_point) + + return mask_results + + def _mask_point_forward_train(self, x, sampling_results, mask_pred, + gt_masks, img_metas): + """Run forward function and calculate loss for point head in + training.""" + pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) + rel_roi_points = self.point_head.get_roi_rel_points_train( + mask_pred, pos_labels, cfg=self.train_cfg) + rois = bbox2roi([res.pos_bboxes for res in sampling_results]) + + fine_grained_point_feats = self._get_fine_grained_point_feats( + x, rois, rel_roi_points, img_metas) + coarse_point_feats = point_sample(mask_pred, rel_roi_points) + mask_point_pred = self.point_head(fine_grained_point_feats, + coarse_point_feats) + mask_point_target = self.point_head.get_targets( + rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg) + loss_mask_point = self.point_head.loss(mask_point_pred, + mask_point_target, pos_labels) + + return loss_mask_point + + def _get_fine_grained_point_feats(self, x, rois, rel_roi_points, + img_metas): + """Sample fine grained feats from each level feature map and + concatenate them together. + + Args: + x (tuple[Tensor]): Feature maps of all scale level. + rois (Tensor): shape (num_rois, 5). + rel_roi_points (Tensor): A tensor of shape (num_rois, num_points, + 2) that contains [0, 1] x [0, 1] normalized coordinates of the + most uncertain points from the [mask_height, mask_width] grid. + img_metas (list[dict]): Image meta info. + + Returns: + Tensor: The fine grained features for each points, + has shape (num_rois, feats_channels, num_points). + """ + num_imgs = len(img_metas) + fine_grained_feats = [] + for idx in range(self.mask_roi_extractor.num_inputs): + feats = x[idx] + spatial_scale = 1. / float( + self.mask_roi_extractor.featmap_strides[idx]) + point_feats = [] + for batch_ind in range(num_imgs): + # unravel batch dim + feat = feats[batch_ind].unsqueeze(0) + inds = (rois[:, 0].long() == batch_ind) + if inds.any(): + rel_img_points = rel_roi_point_to_rel_img_point( + rois[inds], rel_roi_points[inds], feat.shape[2:], + spatial_scale).unsqueeze(0) + point_feat = point_sample(feat, rel_img_points) + point_feat = point_feat.squeeze(0).transpose(0, 1) + point_feats.append(point_feat) + fine_grained_feats.append(torch.cat(point_feats, dim=0)) + return torch.cat(fine_grained_feats, dim=1) + + def _mask_point_forward_test(self, x, rois, label_pred, mask_pred, + img_metas): + """Mask refining process with point head in testing. + + Args: + x (tuple[Tensor]): Feature maps of all scale level. + rois (Tensor): shape (num_rois, 5). + label_pred (Tensor): The predication class for each rois. + mask_pred (Tensor): The predication coarse masks of + shape (num_rois, num_classes, small_size, small_size). + img_metas (list[dict]): Image meta info. + + Returns: + Tensor: The refined masks of shape (num_rois, num_classes, + large_size, large_size). + """ + refined_mask_pred = mask_pred.clone() + for subdivision_step in range(self.test_cfg.subdivision_steps): + refined_mask_pred = F.interpolate( + refined_mask_pred, + scale_factor=self.test_cfg.scale_factor, + mode='bilinear', + align_corners=False) + # If `subdivision_num_points` is larger or equal to the + # resolution of the next step, then we can skip this step + num_rois, channels, mask_height, mask_width = \ + refined_mask_pred.shape + if (self.test_cfg.subdivision_num_points >= + self.test_cfg.scale_factor**2 * mask_height * mask_width + and + subdivision_step < self.test_cfg.subdivision_steps - 1): + continue + point_indices, rel_roi_points = \ + self.point_head.get_roi_rel_points_test( + refined_mask_pred, label_pred, cfg=self.test_cfg) + fine_grained_point_feats = self._get_fine_grained_point_feats( + x, rois, rel_roi_points, img_metas) + coarse_point_feats = point_sample(mask_pred, rel_roi_points) + mask_point_pred = self.point_head(fine_grained_point_feats, + coarse_point_feats) + + point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) + refined_mask_pred = refined_mask_pred.reshape( + num_rois, channels, mask_height * mask_width) + refined_mask_pred = refined_mask_pred.scatter_( + 2, point_indices, mask_point_pred) + refined_mask_pred = refined_mask_pred.view(num_rois, channels, + mask_height, mask_width) + + return refined_mask_pred + + def simple_test_mask(self, + x, + img_metas, + det_bboxes, + det_labels, + rescale=False): + """Obtain mask prediction without augmentation.""" + ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) + scale_factors = tuple(meta['scale_factor'] for meta in img_metas) + + if isinstance(scale_factors[0], float): + warnings.warn( + 'Scale factor in img_metas should be a ' + 'ndarray with shape (4,) ' + 'arrange as (factor_w, factor_h, factor_w, factor_h), ' + 'The scale_factor with float type has been deprecated. ') + scale_factors = np.array([scale_factors] * 4, dtype=np.float32) + + num_imgs = len(det_bboxes) + if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): + segm_results = [[[] for _ in range(self.mask_head.num_classes)] + for _ in range(num_imgs)] + else: + # if det_bboxes is rescaled to the original image size, we need to + # rescale it back to the testing scale to obtain RoIs. + _bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))] + if rescale: + scale_factors = [ + torch.from_numpy(scale_factor).to(det_bboxes[0].device) + for scale_factor in scale_factors + ] + _bboxes = [ + _bboxes[i] * scale_factors[i] for i in range(len(_bboxes)) + ] + + mask_rois = bbox2roi(_bboxes) + mask_results = self._mask_forward(x, mask_rois) + # split batch mask prediction back to each image + mask_pred = mask_results['mask_pred'] + num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] + mask_preds = mask_pred.split(num_mask_roi_per_img, 0) + mask_rois = mask_rois.split(num_mask_roi_per_img, 0) + + # apply mask post-processing to each image individually + segm_results = [] + for i in range(num_imgs): + if det_bboxes[i].shape[0] == 0: + segm_results.append( + [[] for _ in range(self.mask_head.num_classes)]) + else: + x_i = [xx[[i]] for xx in x] + mask_rois_i = mask_rois[i] + mask_rois_i[:, 0] = 0 # TODO: remove this hack + mask_pred_i = self._mask_point_forward_test( + x_i, mask_rois_i, det_labels[i], mask_preds[i], + [img_metas]) + segm_result = self.mask_head.get_seg_masks( + mask_pred_i, _bboxes[i], det_labels[i], self.test_cfg, + ori_shapes[i], scale_factors[i], rescale) + segm_results.append(segm_result) + return segm_results + + def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): + """Test for mask head with test time augmentation.""" + if det_bboxes.shape[0] == 0: + segm_result = [[] for _ in range(self.mask_head.num_classes)] + else: + aug_masks = [] + for x, img_meta in zip(feats, img_metas): + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, + scale_factor, flip) + mask_rois = bbox2roi([_bboxes]) + mask_results = self._mask_forward(x, mask_rois) + mask_results['mask_pred'] = self._mask_point_forward_test( + x, mask_rois, det_labels, mask_results['mask_pred'], + img_meta) + # convert to numpy array to save memory + aug_masks.append( + mask_results['mask_pred'].sigmoid().cpu().numpy()) + merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) + + ori_shape = img_metas[0][0]['ori_shape'] + segm_result = self.mask_head.get_seg_masks( + merged_masks, + det_bboxes, + det_labels, + self.test_cfg, + ori_shape, + scale_factor=1.0, + rescale=False) + return segm_result + + def _onnx_get_fine_grained_point_feats(self, x, rois, rel_roi_points): + """Export the process of sampling fine grained feats to onnx. + + Args: + x (tuple[Tensor]): Feature maps of all scale level. + rois (Tensor): shape (num_rois, 5). + rel_roi_points (Tensor): A tensor of shape (num_rois, num_points, + 2) that contains [0, 1] x [0, 1] normalized coordinates of the + most uncertain points from the [mask_height, mask_width] grid. + + Returns: + Tensor: The fine grained features for each points, + has shape (num_rois, feats_channels, num_points). + """ + batch_size = x[0].shape[0] + num_rois = rois.shape[0] + fine_grained_feats = [] + for idx in range(self.mask_roi_extractor.num_inputs): + feats = x[idx] + spatial_scale = 1. / float( + self.mask_roi_extractor.featmap_strides[idx]) + + rel_img_points = rel_roi_point_to_rel_img_point( + rois, rel_roi_points, feats, spatial_scale) + channels = feats.shape[1] + num_points = rel_img_points.shape[1] + rel_img_points = rel_img_points.reshape(batch_size, -1, num_points, + 2) + point_feats = point_sample(feats, rel_img_points) + point_feats = point_feats.transpose(1, 2).reshape( + num_rois, channels, num_points) + fine_grained_feats.append(point_feats) + return torch.cat(fine_grained_feats, dim=1) + + def _mask_point_onnx_export(self, x, rois, label_pred, mask_pred): + """Export mask refining process with point head to onnx. + + Args: + x (tuple[Tensor]): Feature maps of all scale level. + rois (Tensor): shape (num_rois, 5). + label_pred (Tensor): The predication class for each rois. + mask_pred (Tensor): The predication coarse masks of + shape (num_rois, num_classes, small_size, small_size). + + Returns: + Tensor: The refined masks of shape (num_rois, num_classes, + large_size, large_size). + """ + refined_mask_pred = mask_pred.clone() + for subdivision_step in range(self.test_cfg.subdivision_steps): + refined_mask_pred = F.interpolate( + refined_mask_pred, + scale_factor=self.test_cfg.scale_factor, + mode='bilinear', + align_corners=False) + # If `subdivision_num_points` is larger or equal to the + # resolution of the next step, then we can skip this step + num_rois, channels, mask_height, mask_width = \ + refined_mask_pred.shape + if (self.test_cfg.subdivision_num_points >= + self.test_cfg.scale_factor**2 * mask_height * mask_width + and + subdivision_step < self.test_cfg.subdivision_steps - 1): + continue + point_indices, rel_roi_points = \ + self.point_head.get_roi_rel_points_test( + refined_mask_pred, label_pred, cfg=self.test_cfg) + fine_grained_point_feats = self._onnx_get_fine_grained_point_feats( + x, rois, rel_roi_points) + coarse_point_feats = point_sample(mask_pred, rel_roi_points) + mask_point_pred = self.point_head(fine_grained_point_feats, + coarse_point_feats) + + point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) + refined_mask_pred = refined_mask_pred.reshape( + num_rois, channels, mask_height * mask_width) + + is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT' + # avoid ScatterElements op in ONNX for TensorRT + if is_trt_backend: + mask_shape = refined_mask_pred.shape + point_shape = point_indices.shape + inds_dim0 = torch.arange(point_shape[0]).reshape( + point_shape[0], 1, 1).expand_as(point_indices) + inds_dim1 = torch.arange(point_shape[1]).reshape( + 1, point_shape[1], 1).expand_as(point_indices) + inds_1d = inds_dim0.reshape( + -1) * mask_shape[1] * mask_shape[2] + inds_dim1.reshape( + -1) * mask_shape[2] + point_indices.reshape(-1) + refined_mask_pred = refined_mask_pred.reshape(-1) + refined_mask_pred[inds_1d] = mask_point_pred.reshape(-1) + refined_mask_pred = refined_mask_pred.reshape(*mask_shape) + else: + refined_mask_pred = refined_mask_pred.scatter_( + 2, point_indices, mask_point_pred) + + refined_mask_pred = refined_mask_pred.view(num_rois, channels, + mask_height, mask_width) + + return refined_mask_pred + + def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs): + """Export mask branch to onnx which supports batch inference. + + Args: + x (tuple[Tensor]): Feature maps of all scale level. + img_metas (list[dict]): Image meta info. + det_bboxes (Tensor): Bboxes and corresponding scores. + has shape [N, num_bboxes, 5]. + det_labels (Tensor): class labels of + shape [N, num_bboxes]. + + Returns: + Tensor: The segmentation results of shape [N, num_bboxes, + image_height, image_width]. + """ + if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): + raise RuntimeError('[ONNX Error] Can not record MaskHead ' + 'as it has not been executed this time') + batch_size = det_bboxes.size(0) + # if det_bboxes is rescaled to the original image size, we need to + # rescale it back to the testing scale to obtain RoIs. + det_bboxes = det_bboxes[..., :4] + batch_index = torch.arange( + det_bboxes.size(0), device=det_bboxes.device).float().view( + -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1) + mask_rois = torch.cat([batch_index, det_bboxes], dim=-1) + mask_rois = mask_rois.view(-1, 5) + mask_results = self._mask_forward(x, mask_rois) + mask_pred = mask_results['mask_pred'] + max_shape = img_metas[0]['img_shape_for_onnx'] + num_det = det_bboxes.shape[1] + det_bboxes = det_bboxes.reshape(-1, 4) + det_labels = det_labels.reshape(-1) + + mask_pred = self._mask_point_onnx_export(x, mask_rois, det_labels, + mask_pred) + + segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes, + det_labels, self.test_cfg, + max_shape) + segm_results = segm_results.reshape(batch_size, num_det, max_shape[0], + max_shape[1]) + return segm_results diff --git a/downstream/mmdetection/mmdet/models/roi_heads/roi_extractors/__init__.py b/downstream/mmdetection/mmdet/models/roi_heads/roi_extractors/__init__.py new file mode 100644 index 0000000..0f60214 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/roi_extractors/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_roi_extractor import BaseRoIExtractor +from .generic_roi_extractor import GenericRoIExtractor +from .single_level_roi_extractor import SingleRoIExtractor + +__all__ = ['BaseRoIExtractor', 'SingleRoIExtractor', 'GenericRoIExtractor'] diff --git a/downstream/mmdetection/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py b/downstream/mmdetection/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py new file mode 100644 index 0000000..8262975 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import torch +import torch.nn as nn +from mmcv import ops +from mmcv.runner import BaseModule + + +class BaseRoIExtractor(BaseModule, metaclass=ABCMeta): + """Base class for RoI extractor. + + Args: + roi_layer (dict): Specify RoI layer type and arguments. + out_channels (int): Output channels of RoI layers. + featmap_strides (int): Strides of input feature maps. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + roi_layer, + out_channels, + featmap_strides, + init_cfg=None): + super(BaseRoIExtractor, self).__init__(init_cfg) + self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides) + self.out_channels = out_channels + self.featmap_strides = featmap_strides + self.fp16_enabled = False + + @property + def num_inputs(self): + """int: Number of input feature maps.""" + return len(self.featmap_strides) + + def build_roi_layers(self, layer_cfg, featmap_strides): + """Build RoI operator to extract feature from each level feature map. + + Args: + layer_cfg (dict): Dictionary to construct and config RoI layer + operation. Options are modules under ``mmcv/ops`` such as + ``RoIAlign``. + featmap_strides (List[int]): The stride of input feature map w.r.t + to the original image size, which would be used to scale RoI + coordinate (original image coordinate system) to feature + coordinate system. + + Returns: + nn.ModuleList: The RoI extractor modules for each level feature + map. + """ + + cfg = layer_cfg.copy() + layer_type = cfg.pop('type') + assert hasattr(ops, layer_type) + layer_cls = getattr(ops, layer_type) + roi_layers = nn.ModuleList( + [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides]) + return roi_layers + + def roi_rescale(self, rois, scale_factor): + """Scale RoI coordinates by scale factor. + + Args: + rois (torch.Tensor): RoI (Region of Interest), shape (n, 5) + scale_factor (float): Scale factor that RoI will be multiplied by. + + Returns: + torch.Tensor: Scaled RoI. + """ + + cx = (rois[:, 1] + rois[:, 3]) * 0.5 + cy = (rois[:, 2] + rois[:, 4]) * 0.5 + w = rois[:, 3] - rois[:, 1] + h = rois[:, 4] - rois[:, 2] + new_w = w * scale_factor + new_h = h * scale_factor + x1 = cx - new_w * 0.5 + x2 = cx + new_w * 0.5 + y1 = cy - new_h * 0.5 + y2 = cy + new_h * 0.5 + new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1) + return new_rois + + @abstractmethod + def forward(self, feats, rois, roi_scale_factor=None): + pass diff --git a/downstream/mmdetection/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py b/downstream/mmdetection/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py new file mode 100644 index 0000000..566d3de --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py @@ -0,0 +1,84 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn.bricks import build_plugin_layer +from mmcv.runner import force_fp32 + +from mmdet.models.builder import ROI_EXTRACTORS +from .base_roi_extractor import BaseRoIExtractor + + +@ROI_EXTRACTORS.register_module() +class GenericRoIExtractor(BaseRoIExtractor): + """Extract RoI features from all level feature maps levels. + + This is the implementation of `A novel Region of Interest Extraction Layer + for Instance Segmentation `_. + + Args: + aggregation (str): The method to aggregate multiple feature maps. + Options are 'sum', 'concat'. Default: 'sum'. + pre_cfg (dict | None): Specify pre-processing modules. Default: None. + post_cfg (dict | None): Specify post-processing modules. Default: None. + kwargs (keyword arguments): Arguments that are the same + as :class:`BaseRoIExtractor`. + """ + + def __init__(self, + aggregation='sum', + pre_cfg=None, + post_cfg=None, + **kwargs): + super(GenericRoIExtractor, self).__init__(**kwargs) + + assert aggregation in ['sum', 'concat'] + + self.aggregation = aggregation + self.with_post = post_cfg is not None + self.with_pre = pre_cfg is not None + # build pre/post processing modules + if self.with_post: + self.post_module = build_plugin_layer(post_cfg, '_post_module')[1] + if self.with_pre: + self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1] + + @force_fp32(apply_to=('feats', ), out_fp16=True) + def forward(self, feats, rois, roi_scale_factor=None): + """Forward function.""" + if len(feats) == 1: + return self.roi_layers[0](feats[0], rois) + + out_size = self.roi_layers[0].output_size + num_levels = len(feats) + roi_feats = feats[0].new_zeros( + rois.size(0), self.out_channels, *out_size) + + # some times rois is an empty tensor + if roi_feats.shape[0] == 0: + return roi_feats + + if roi_scale_factor is not None: + rois = self.roi_rescale(rois, roi_scale_factor) + + # mark the starting channels for concat mode + start_channels = 0 + for i in range(num_levels): + roi_feats_t = self.roi_layers[i](feats[i], rois) + end_channels = start_channels + roi_feats_t.size(1) + if self.with_pre: + # apply pre-processing to a RoI extracted from each layer + roi_feats_t = self.pre_module(roi_feats_t) + if self.aggregation == 'sum': + # and sum them all + roi_feats += roi_feats_t + else: + # and concat them along channel dimension + roi_feats[:, start_channels:end_channels] = roi_feats_t + # update channels starting position + start_channels = end_channels + # check if concat channels match at the end + if self.aggregation == 'concat': + assert start_channels == self.out_channels + + if self.with_post: + # apply post-processing before return the result + roi_feats = self.post_module(roi_feats) + return roi_feats diff --git a/downstream/mmdetection/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py b/downstream/mmdetection/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py new file mode 100644 index 0000000..1b569ce --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py @@ -0,0 +1,115 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.runner import force_fp32 + +from mmdet.models.builder import ROI_EXTRACTORS +from .base_roi_extractor import BaseRoIExtractor + + +@ROI_EXTRACTORS.register_module() +class SingleRoIExtractor(BaseRoIExtractor): + """Extract RoI features from a single level feature map. + + If there are multiple input feature levels, each RoI is mapped to a level + according to its scale. The mapping rule is proposed in + `FPN `_. + + Args: + roi_layer (dict): Specify RoI layer type and arguments. + out_channels (int): Output channels of RoI layers. + featmap_strides (List[int]): Strides of input feature maps. + finest_scale (int): Scale threshold of mapping to level 0. Default: 56. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + roi_layer, + out_channels, + featmap_strides, + finest_scale=56, + init_cfg=None): + super(SingleRoIExtractor, self).__init__(roi_layer, out_channels, + featmap_strides, init_cfg) + self.finest_scale = finest_scale + + def map_roi_levels(self, rois, num_levels): + """Map rois to corresponding feature levels by scales. + + - scale < finest_scale * 2: level 0 + - finest_scale * 2 <= scale < finest_scale * 4: level 1 + - finest_scale * 4 <= scale < finest_scale * 8: level 2 + - scale >= finest_scale * 8: level 3 + + Args: + rois (Tensor): Input RoIs, shape (k, 5). + num_levels (int): Total level number. + + Returns: + Tensor: Level index (0-based) of each RoI, shape (k, ) + """ + scale = torch.sqrt( + (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2])) + target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6)) + target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long() + return target_lvls + + @force_fp32(apply_to=('feats', ), out_fp16=True) + def forward(self, feats, rois, roi_scale_factor=None): + """Forward function.""" + out_size = self.roi_layers[0].output_size + num_levels = len(feats) + expand_dims = (-1, self.out_channels * out_size[0] * out_size[1]) + if torch.onnx.is_in_onnx_export(): + # Work around to export mask-rcnn to onnx + roi_feats = rois[:, :1].clone().detach() + roi_feats = roi_feats.expand(*expand_dims) + roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size) + roi_feats = roi_feats * 0 + else: + roi_feats = feats[0].new_zeros( + rois.size(0), self.out_channels, *out_size) + # TODO: remove this when parrots supports + if torch.__version__ == 'parrots': + roi_feats.requires_grad = True + + if num_levels == 1: + if len(rois) == 0: + return roi_feats + return self.roi_layers[0](feats[0], rois) + + target_lvls = self.map_roi_levels(rois, num_levels) + + if roi_scale_factor is not None: + rois = self.roi_rescale(rois, roi_scale_factor) + + for i in range(num_levels): + mask = target_lvls == i + if torch.onnx.is_in_onnx_export(): + # To keep all roi_align nodes exported to onnx + # and skip nonzero op + mask = mask.float().unsqueeze(-1) + # select target level rois and reset the rest rois to zero. + rois_i = rois.clone().detach() + rois_i *= mask + mask_exp = mask.expand(*expand_dims).reshape(roi_feats.shape) + roi_feats_t = self.roi_layers[i](feats[i], rois_i) + roi_feats_t *= mask_exp + roi_feats += roi_feats_t + continue + inds = mask.nonzero(as_tuple=False).squeeze(1) + if inds.numel() > 0: + rois_ = rois[inds] + roi_feats_t = self.roi_layers[i](feats[i], rois_) + roi_feats[inds] = roi_feats_t + else: + # Sometimes some pyramid levels will not be used for RoI + # feature extraction and this will cause an incomplete + # computation graph in one GPU, which is different from those + # in other GPUs and will cause a hanging error. + # Therefore, we add it to ensure each feature pyramid is + # included in the computation graph to avoid runtime bugs. + roi_feats += sum( + x.view(-1)[0] + for x in self.parameters()) * 0. + feats[i].sum() * 0. + return roi_feats diff --git a/downstream/mmdetection/mmdet/models/roi_heads/scnet_roi_head.py b/downstream/mmdetection/mmdet/models/roi_heads/scnet_roi_head.py new file mode 100644 index 0000000..705430a --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/scnet_roi_head.py @@ -0,0 +1,605 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +import torch.nn.functional as F + +from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, + merge_aug_masks, multiclass_nms) +from ..builder import HEADS, build_head, build_roi_extractor +from ..utils.brick_wrappers import adaptive_avg_pool2d +from .cascade_roi_head import CascadeRoIHead + + +@HEADS.register_module() +class SCNetRoIHead(CascadeRoIHead): + """RoIHead for `SCNet `_. + + Args: + num_stages (int): number of cascade stages. + stage_loss_weights (list): loss weight of cascade stages. + semantic_roi_extractor (dict): config to init semantic roi extractor. + semantic_head (dict): config to init semantic head. + feat_relay_head (dict): config to init feature_relay_head. + glbctx_head (dict): config to init global context head. + """ + + def __init__(self, + num_stages, + stage_loss_weights, + semantic_roi_extractor=None, + semantic_head=None, + feat_relay_head=None, + glbctx_head=None, + **kwargs): + super(SCNetRoIHead, self).__init__(num_stages, stage_loss_weights, + **kwargs) + assert self.with_bbox and self.with_mask + assert not self.with_shared_head # shared head is not supported + + if semantic_head is not None: + self.semantic_roi_extractor = build_roi_extractor( + semantic_roi_extractor) + self.semantic_head = build_head(semantic_head) + + if feat_relay_head is not None: + self.feat_relay_head = build_head(feat_relay_head) + + if glbctx_head is not None: + self.glbctx_head = build_head(glbctx_head) + + def init_mask_head(self, mask_roi_extractor, mask_head): + """Initialize ``mask_head``""" + if mask_roi_extractor is not None: + self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) + self.mask_head = build_head(mask_head) + + @property + def with_semantic(self): + """bool: whether the head has semantic head""" + return hasattr(self, + 'semantic_head') and self.semantic_head is not None + + @property + def with_feat_relay(self): + """bool: whether the head has feature relay head""" + return (hasattr(self, 'feat_relay_head') + and self.feat_relay_head is not None) + + @property + def with_glbctx(self): + """bool: whether the head has global context head""" + return hasattr(self, 'glbctx_head') and self.glbctx_head is not None + + def _fuse_glbctx(self, roi_feats, glbctx_feat, rois): + """Fuse global context feats with roi feats.""" + assert roi_feats.size(0) == rois.size(0) + img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long() + fused_feats = torch.zeros_like(roi_feats) + for img_id in img_inds: + inds = (rois[:, 0] == img_id.item()) + fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id] + return fused_feats + + def _slice_pos_feats(self, feats, sampling_results): + """Get features from pos rois.""" + num_rois = [res.bboxes.size(0) for res in sampling_results] + num_pos_rois = [res.pos_bboxes.size(0) for res in sampling_results] + inds = torch.zeros(sum(num_rois), dtype=torch.bool) + start = 0 + for i in range(len(num_rois)): + start = 0 if i == 0 else start + num_rois[i - 1] + stop = start + num_pos_rois[i] + inds[start:stop] = 1 + sliced_feats = feats[inds] + return sliced_feats + + def _bbox_forward(self, + stage, + x, + rois, + semantic_feat=None, + glbctx_feat=None): + """Box head forward function used in both training and testing.""" + bbox_roi_extractor = self.bbox_roi_extractor[stage] + bbox_head = self.bbox_head[stage] + bbox_feats = bbox_roi_extractor( + x[:len(bbox_roi_extractor.featmap_strides)], rois) + if self.with_semantic and semantic_feat is not None: + bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], + rois) + if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: + bbox_semantic_feat = adaptive_avg_pool2d( + bbox_semantic_feat, bbox_feats.shape[-2:]) + bbox_feats += bbox_semantic_feat + if self.with_glbctx and glbctx_feat is not None: + bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois) + cls_score, bbox_pred, relayed_feat = bbox_head( + bbox_feats, return_shared_feat=True) + + bbox_results = dict( + cls_score=cls_score, + bbox_pred=bbox_pred, + relayed_feat=relayed_feat) + return bbox_results + + def _mask_forward(self, + x, + rois, + semantic_feat=None, + glbctx_feat=None, + relayed_feat=None): + """Mask head forward function used in both training and testing.""" + mask_feats = self.mask_roi_extractor( + x[:self.mask_roi_extractor.num_inputs], rois) + if self.with_semantic and semantic_feat is not None: + mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], + rois) + if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: + mask_semantic_feat = F.adaptive_avg_pool2d( + mask_semantic_feat, mask_feats.shape[-2:]) + mask_feats += mask_semantic_feat + if self.with_glbctx and glbctx_feat is not None: + mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois) + if self.with_feat_relay and relayed_feat is not None: + mask_feats = mask_feats + relayed_feat + mask_pred = self.mask_head(mask_feats) + mask_results = dict(mask_pred=mask_pred) + + return mask_results + + def _bbox_forward_train(self, + stage, + x, + sampling_results, + gt_bboxes, + gt_labels, + rcnn_train_cfg, + semantic_feat=None, + glbctx_feat=None): + """Run forward function and calculate loss for box head in training.""" + bbox_head = self.bbox_head[stage] + rois = bbox2roi([res.bboxes for res in sampling_results]) + bbox_results = self._bbox_forward( + stage, + x, + rois, + semantic_feat=semantic_feat, + glbctx_feat=glbctx_feat) + + bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes, + gt_labels, rcnn_train_cfg) + loss_bbox = bbox_head.loss(bbox_results['cls_score'], + bbox_results['bbox_pred'], rois, + *bbox_targets) + + bbox_results.update( + loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets) + return bbox_results + + def _mask_forward_train(self, + x, + sampling_results, + gt_masks, + rcnn_train_cfg, + semantic_feat=None, + glbctx_feat=None, + relayed_feat=None): + """Run forward function and calculate loss for mask head in + training.""" + pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) + mask_results = self._mask_forward( + x, + pos_rois, + semantic_feat=semantic_feat, + glbctx_feat=glbctx_feat, + relayed_feat=relayed_feat) + + mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, + rcnn_train_cfg) + pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) + loss_mask = self.mask_head.loss(mask_results['mask_pred'], + mask_targets, pos_labels) + + mask_results = loss_mask + return mask_results + + def forward_train(self, + x, + img_metas, + proposal_list, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + gt_semantic_seg=None): + """ + Args: + x (list[Tensor]): list of multi-level img features. + img_metas (list[dict]): list of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmdet/datasets/pipelines/formatting.py:Collect`. + proposal_list (list[Tensors]): list of region proposals. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + gt_bboxes_ignore (None, list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + gt_masks (None, Tensor) : true segmentation masks for each box + used if the architecture supports a segmentation task. + gt_semantic_seg (None, list[Tensor]): semantic segmentation masks + used if the architecture supports semantic segmentation task. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + losses = dict() + + # semantic segmentation branch + if self.with_semantic: + semantic_pred, semantic_feat = self.semantic_head(x) + loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) + losses['loss_semantic_seg'] = loss_seg + else: + semantic_feat = None + + # global context branch + if self.with_glbctx: + mc_pred, glbctx_feat = self.glbctx_head(x) + loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels) + losses['loss_glbctx'] = loss_glbctx + else: + glbctx_feat = None + + for i in range(self.num_stages): + self.current_stage = i + rcnn_train_cfg = self.train_cfg[i] + lw = self.stage_loss_weights[i] + + # assign gts and sample proposals + sampling_results = [] + bbox_assigner = self.bbox_assigner[i] + bbox_sampler = self.bbox_sampler[i] + num_imgs = len(img_metas) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + + for j in range(num_imgs): + assign_result = bbox_assigner.assign(proposal_list[j], + gt_bboxes[j], + gt_bboxes_ignore[j], + gt_labels[j]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[j], + gt_bboxes[j], + gt_labels[j], + feats=[lvl_feat[j][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + bbox_results = \ + self._bbox_forward_train( + i, x, sampling_results, gt_bboxes, gt_labels, + rcnn_train_cfg, semantic_feat, glbctx_feat) + roi_labels = bbox_results['bbox_targets'][0] + + for name, value in bbox_results['loss_bbox'].items(): + losses[f's{i}.{name}'] = ( + value * lw if 'loss' in name else value) + + # refine boxes + if i < self.num_stages - 1: + pos_is_gts = [res.pos_is_gt for res in sampling_results] + with torch.no_grad(): + proposal_list = self.bbox_head[i].refine_bboxes( + bbox_results['rois'], roi_labels, + bbox_results['bbox_pred'], pos_is_gts, img_metas) + + if self.with_feat_relay: + relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'], + sampling_results) + relayed_feat = self.feat_relay_head(relayed_feat) + else: + relayed_feat = None + + mask_results = self._mask_forward_train(x, sampling_results, gt_masks, + rcnn_train_cfg, semantic_feat, + glbctx_feat, relayed_feat) + mask_lw = sum(self.stage_loss_weights) + losses['loss_mask'] = mask_lw * mask_results['loss_mask'] + + return losses + + def simple_test(self, x, proposal_list, img_metas, rescale=False): + """Test without augmentation. + + Args: + x (tuple[Tensor]): Features from upstream network. Each + has shape (batch_size, c, h, w). + proposal_list (list(Tensor)): Proposals from rpn head. + Each has shape (num_proposals, 5), last dimension + 5 represent (x1, y1, x2, y2, score). + img_metas (list[dict]): Meta information of images. + rescale (bool): Whether to rescale the results to + the original image. Default: True. + + Returns: + list[list[np.ndarray]] or list[tuple]: When no mask branch, + it is bbox results of each image and classes with type + `list[list[np.ndarray]]`. The outer list + corresponds to each image. The inner list + corresponds to each class. When the model has mask branch, + it contains bbox results and mask results. + The outer list corresponds to each image, and first element + of tuple is bbox results, second element is mask results. + """ + if self.with_semantic: + _, semantic_feat = self.semantic_head(x) + else: + semantic_feat = None + + if self.with_glbctx: + mc_pred, glbctx_feat = self.glbctx_head(x) + else: + glbctx_feat = None + + num_imgs = len(proposal_list) + img_shapes = tuple(meta['img_shape'] for meta in img_metas) + ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) + scale_factors = tuple(meta['scale_factor'] for meta in img_metas) + + # "ms" in variable names means multi-stage + ms_scores = [] + rcnn_test_cfg = self.test_cfg + + rois = bbox2roi(proposal_list) + + if rois.shape[0] == 0: + # There is no proposal in the whole batch + bbox_results = [[ + np.zeros((0, 5), dtype=np.float32) + for _ in range(self.bbox_head[-1].num_classes) + ]] * num_imgs + + if self.with_mask: + mask_classes = self.mask_head.num_classes + segm_results = [[[] for _ in range(mask_classes)] + for _ in range(num_imgs)] + results = list(zip(bbox_results, segm_results)) + else: + results = bbox_results + + return results + + for i in range(self.num_stages): + bbox_head = self.bbox_head[i] + bbox_results = self._bbox_forward( + i, + x, + rois, + semantic_feat=semantic_feat, + glbctx_feat=glbctx_feat) + # split batch bbox prediction back to each image + cls_score = bbox_results['cls_score'] + bbox_pred = bbox_results['bbox_pred'] + num_proposals_per_img = tuple(len(p) for p in proposal_list) + rois = rois.split(num_proposals_per_img, 0) + cls_score = cls_score.split(num_proposals_per_img, 0) + bbox_pred = bbox_pred.split(num_proposals_per_img, 0) + ms_scores.append(cls_score) + + if i < self.num_stages - 1: + refine_rois_list = [] + for j in range(num_imgs): + if rois[j].shape[0] > 0: + bbox_label = cls_score[j][:, :-1].argmax(dim=1) + refine_rois = bbox_head.regress_by_class( + rois[j], bbox_label, bbox_pred[j], img_metas[j]) + refine_rois_list.append(refine_rois) + rois = torch.cat(refine_rois_list) + + # average scores of each image by stages + cls_score = [ + sum([score[i] for score in ms_scores]) / float(len(ms_scores)) + for i in range(num_imgs) + ] + + # apply bbox post-processing to each image individually + det_bboxes = [] + det_labels = [] + for i in range(num_imgs): + det_bbox, det_label = self.bbox_head[-1].get_bboxes( + rois[i], + cls_score[i], + bbox_pred[i], + img_shapes[i], + scale_factors[i], + rescale=rescale, + cfg=rcnn_test_cfg) + det_bboxes.append(det_bbox) + det_labels.append(det_label) + det_bbox_results = [ + bbox2result(det_bboxes[i], det_labels[i], + self.bbox_head[-1].num_classes) + for i in range(num_imgs) + ] + + if self.with_mask: + if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): + mask_classes = self.mask_head.num_classes + det_segm_results = [[[] for _ in range(mask_classes)] + for _ in range(num_imgs)] + else: + if rescale and not isinstance(scale_factors[0], float): + scale_factors = [ + torch.from_numpy(scale_factor).to(det_bboxes[0].device) + for scale_factor in scale_factors + ] + _bboxes = [ + det_bboxes[i][:, :4] * + scale_factors[i] if rescale else det_bboxes[i] + for i in range(num_imgs) + ] + mask_rois = bbox2roi(_bboxes) + + # get relay feature on mask_rois + bbox_results = self._bbox_forward( + -1, + x, + mask_rois, + semantic_feat=semantic_feat, + glbctx_feat=glbctx_feat) + relayed_feat = bbox_results['relayed_feat'] + relayed_feat = self.feat_relay_head(relayed_feat) + + mask_results = self._mask_forward( + x, + mask_rois, + semantic_feat=semantic_feat, + glbctx_feat=glbctx_feat, + relayed_feat=relayed_feat) + mask_pred = mask_results['mask_pred'] + + # split batch mask prediction back to each image + num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes) + mask_preds = mask_pred.split(num_bbox_per_img, 0) + + # apply mask post-processing to each image individually + det_segm_results = [] + for i in range(num_imgs): + if det_bboxes[i].shape[0] == 0: + det_segm_results.append( + [[] for _ in range(self.mask_head.num_classes)]) + else: + segm_result = self.mask_head.get_seg_masks( + mask_preds[i], _bboxes[i], det_labels[i], + self.test_cfg, ori_shapes[i], scale_factors[i], + rescale) + det_segm_results.append(segm_result) + + # return results + if self.with_mask: + return list(zip(det_bbox_results, det_segm_results)) + else: + return det_bbox_results + + def aug_test(self, img_feats, proposal_list, img_metas, rescale=False): + if self.with_semantic: + semantic_feats = [ + self.semantic_head(feat)[1] for feat in img_feats + ] + else: + semantic_feats = [None] * len(img_metas) + + if self.with_glbctx: + glbctx_feats = [self.glbctx_head(feat)[1] for feat in img_feats] + else: + glbctx_feats = [None] * len(img_metas) + + rcnn_test_cfg = self.test_cfg + aug_bboxes = [] + aug_scores = [] + for x, img_meta, semantic_feat, glbctx_feat in zip( + img_feats, img_metas, semantic_feats, glbctx_feats): + # only one image in the batch + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + + proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, + scale_factor, flip) + # "ms" in variable names means multi-stage + ms_scores = [] + + rois = bbox2roi([proposals]) + + if rois.shape[0] == 0: + # There is no proposal in the single image + aug_bboxes.append(rois.new_zeros(0, 4)) + aug_scores.append(rois.new_zeros(0, 1)) + continue + + for i in range(self.num_stages): + bbox_head = self.bbox_head[i] + bbox_results = self._bbox_forward( + i, + x, + rois, + semantic_feat=semantic_feat, + glbctx_feat=glbctx_feat) + ms_scores.append(bbox_results['cls_score']) + if i < self.num_stages - 1: + bbox_label = bbox_results['cls_score'].argmax(dim=1) + rois = bbox_head.regress_by_class( + rois, bbox_label, bbox_results['bbox_pred'], + img_meta[0]) + + cls_score = sum(ms_scores) / float(len(ms_scores)) + bboxes, scores = self.bbox_head[-1].get_bboxes( + rois, + cls_score, + bbox_results['bbox_pred'], + img_shape, + scale_factor, + rescale=False, + cfg=None) + aug_bboxes.append(bboxes) + aug_scores.append(scores) + + # after merging, bboxes will be rescaled to the original image size + merged_bboxes, merged_scores = merge_aug_bboxes( + aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) + det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, + rcnn_test_cfg.score_thr, + rcnn_test_cfg.nms, + rcnn_test_cfg.max_per_img) + + det_bbox_results = bbox2result(det_bboxes, det_labels, + self.bbox_head[-1].num_classes) + + if self.with_mask: + if det_bboxes.shape[0] == 0: + det_segm_results = [[] + for _ in range(self.mask_head.num_classes)] + else: + aug_masks = [] + for x, img_meta, semantic_feat, glbctx_feat in zip( + img_feats, img_metas, semantic_feats, glbctx_feats): + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, + scale_factor, flip) + mask_rois = bbox2roi([_bboxes]) + # get relay feature on mask_rois + bbox_results = self._bbox_forward( + -1, + x, + mask_rois, + semantic_feat=semantic_feat, + glbctx_feat=glbctx_feat) + relayed_feat = bbox_results['relayed_feat'] + relayed_feat = self.feat_relay_head(relayed_feat) + mask_results = self._mask_forward( + x, + mask_rois, + semantic_feat=semantic_feat, + glbctx_feat=glbctx_feat, + relayed_feat=relayed_feat) + mask_pred = mask_results['mask_pred'] + aug_masks.append(mask_pred.sigmoid().cpu().numpy()) + merged_masks = merge_aug_masks(aug_masks, img_metas, + self.test_cfg) + ori_shape = img_metas[0][0]['ori_shape'] + det_segm_results = self.mask_head.get_seg_masks( + merged_masks, + det_bboxes, + det_labels, + rcnn_test_cfg, + ori_shape, + scale_factor=1.0, + rescale=False) + return [(det_bbox_results, det_segm_results)] + else: + return [det_bbox_results] diff --git a/downstream/mmdetection/mmdet/models/roi_heads/shared_heads/__init__.py b/downstream/mmdetection/mmdet/models/roi_heads/shared_heads/__init__.py new file mode 100644 index 0000000..d56636a --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/shared_heads/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .res_layer import ResLayer + +__all__ = ['ResLayer'] diff --git a/downstream/mmdetection/mmdet/models/roi_heads/shared_heads/res_layer.py b/downstream/mmdetection/mmdet/models/roi_heads/shared_heads/res_layer.py new file mode 100644 index 0000000..bef00a0 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/shared_heads/res_layer.py @@ -0,0 +1,80 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +from mmcv.runner import BaseModule, auto_fp16 + +from mmdet.models.backbones import ResNet +from mmdet.models.builder import SHARED_HEADS +from mmdet.models.utils import ResLayer as _ResLayer + + +@SHARED_HEADS.register_module() +class ResLayer(BaseModule): + + def __init__(self, + depth, + stage=3, + stride=2, + dilation=1, + style='pytorch', + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + with_cp=False, + dcn=None, + pretrained=None, + init_cfg=None): + super(ResLayer, self).__init__(init_cfg) + + self.norm_eval = norm_eval + self.norm_cfg = norm_cfg + self.stage = stage + self.fp16_enabled = False + block, stage_blocks = ResNet.arch_settings[depth] + stage_block = stage_blocks[stage] + planes = 64 * 2**stage + inplanes = 64 * 2**(stage - 1) * block.expansion + + res_layer = _ResLayer( + block, + inplanes, + planes, + stage_block, + stride=stride, + dilation=dilation, + style=style, + with_cp=with_cp, + norm_cfg=self.norm_cfg, + dcn=dcn) + self.add_module(f'layer{stage + 1}', res_layer) + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + else: + raise TypeError('pretrained must be a str or None') + + @auto_fp16() + def forward(self, x): + res_layer = getattr(self, f'layer{self.stage + 1}') + out = res_layer(x) + return out + + def train(self, mode=True): + super(ResLayer, self).train(mode) + if self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() diff --git a/downstream/mmdetection/mmdet/models/roi_heads/sparse_roi_head.py b/downstream/mmdetection/mmdet/models/roi_heads/sparse_roi_head.py new file mode 100644 index 0000000..2613469 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/sparse_roi_head.py @@ -0,0 +1,424 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmdet.core import bbox2result, bbox2roi, bbox_xyxy_to_cxcywh +from mmdet.core.bbox.samplers import PseudoSampler +from ..builder import HEADS +from .cascade_roi_head import CascadeRoIHead + + +@HEADS.register_module() +class SparseRoIHead(CascadeRoIHead): + r"""The RoIHead for `Sparse R-CNN: End-to-End Object Detection with + Learnable Proposals `_ + and `Instances as Queries `_ + + Args: + num_stages (int): Number of stage whole iterative process. + Defaults to 6. + stage_loss_weights (Tuple[float]): The loss + weight of each stage. By default all stages have + the same weight 1. + bbox_roi_extractor (dict): Config of box roi extractor. + mask_roi_extractor (dict): Config of mask roi extractor. + bbox_head (dict): Config of box head. + mask_head (dict): Config of mask head. + train_cfg (dict, optional): Configuration information in train stage. + Defaults to None. + test_cfg (dict, optional): Configuration information in test stage. + Defaults to None. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + """ + + def __init__(self, + num_stages=6, + stage_loss_weights=(1, 1, 1, 1, 1, 1), + proposal_feature_channel=256, + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict( + type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_roi_extractor=None, + bbox_head=dict( + type='DIIHead', + num_classes=80, + num_fcs=2, + num_heads=8, + num_cls_fcs=1, + num_reg_fcs=3, + feedforward_channels=2048, + hidden_channels=256, + dropout=0.0, + roi_feat_size=7, + ffn_act_cfg=dict(type='ReLU', inplace=True)), + mask_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + assert bbox_roi_extractor is not None + assert bbox_head is not None + assert len(stage_loss_weights) == num_stages + self.num_stages = num_stages + self.stage_loss_weights = stage_loss_weights + self.proposal_feature_channel = proposal_feature_channel + super(SparseRoIHead, self).__init__( + num_stages, + stage_loss_weights, + bbox_roi_extractor=bbox_roi_extractor, + mask_roi_extractor=mask_roi_extractor, + bbox_head=bbox_head, + mask_head=mask_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + init_cfg=init_cfg) + # train_cfg would be None when run the test.py + if train_cfg is not None: + for stage in range(num_stages): + assert isinstance(self.bbox_sampler[stage], PseudoSampler), \ + 'Sparse R-CNN and QueryInst only support `PseudoSampler`' + + def _bbox_forward(self, stage, x, rois, object_feats, img_metas): + """Box head forward function used in both training and testing. Returns + all regression, classification results and a intermediate feature. + + Args: + stage (int): The index of current stage in + iterative process. + x (List[Tensor]): List of FPN features + rois (Tensor): Rois in total batch. With shape (num_proposal, 5). + the last dimension 5 represents (img_index, x1, y1, x2, y2). + object_feats (Tensor): The object feature extracted from + the previous stage. + img_metas (dict): meta information of images. + + Returns: + dict[str, Tensor]: a dictionary of bbox head outputs, + Containing the following results: + + - cls_score (Tensor): The score of each class, has + shape (batch_size, num_proposals, num_classes) + when use focal loss or + (batch_size, num_proposals, num_classes+1) + otherwise. + - decode_bbox_pred (Tensor): The regression results + with shape (batch_size, num_proposal, 4). + The last dimension 4 represents + [tl_x, tl_y, br_x, br_y]. + - object_feats (Tensor): The object feature extracted + from current stage + - detach_cls_score_list (list[Tensor]): The detached + classification results, length is batch_size, and + each tensor has shape (num_proposal, num_classes). + - detach_proposal_list (list[tensor]): The detached + regression results, length is batch_size, and each + tensor has shape (num_proposal, 4). The last + dimension 4 represents [tl_x, tl_y, br_x, br_y]. + """ + num_imgs = len(img_metas) + bbox_roi_extractor = self.bbox_roi_extractor[stage] + bbox_head = self.bbox_head[stage] + bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], + rois) + cls_score, bbox_pred, object_feats, attn_feats = bbox_head( + bbox_feats, object_feats) + proposal_list = self.bbox_head[stage].refine_bboxes( + rois, + rois.new_zeros(len(rois)), # dummy arg + bbox_pred.view(-1, bbox_pred.size(-1)), + [rois.new_zeros(object_feats.size(1)) for _ in range(num_imgs)], + img_metas) + bbox_results = dict( + cls_score=cls_score, + decode_bbox_pred=torch.cat(proposal_list), + object_feats=object_feats, + attn_feats=attn_feats, + # detach then use it in label assign + detach_cls_score_list=[ + cls_score[i].detach() for i in range(num_imgs) + ], + detach_proposal_list=[item.detach() for item in proposal_list]) + + return bbox_results + + def _mask_forward(self, stage, x, rois, attn_feats): + """Mask head forward function used in both training and testing.""" + mask_roi_extractor = self.mask_roi_extractor[stage] + mask_head = self.mask_head[stage] + mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], + rois) + # do not support caffe_c4 model anymore + mask_pred = mask_head(mask_feats, attn_feats) + + mask_results = dict(mask_pred=mask_pred) + return mask_results + + def _mask_forward_train(self, stage, x, attn_feats, sampling_results, + gt_masks, rcnn_train_cfg): + """Run forward function and calculate loss for mask head in + training.""" + pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) + attn_feats = torch.cat([ + feats[res.pos_inds] + for (feats, res) in zip(attn_feats, sampling_results) + ]) + mask_results = self._mask_forward(stage, x, pos_rois, attn_feats) + + mask_targets = self.mask_head[stage].get_targets( + sampling_results, gt_masks, rcnn_train_cfg) + + pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) + + loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'], + mask_targets, pos_labels) + mask_results.update(loss_mask) + return mask_results + + def forward_train(self, + x, + proposal_boxes, + proposal_features, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + imgs_whwh=None, + gt_masks=None): + """Forward function in training stage. + + Args: + x (list[Tensor]): list of multi-level img features. + proposals (Tensor): Decoded proposal bboxes, has shape + (batch_size, num_proposals, 4) + proposal_features (Tensor): Expanded proposal + features, has shape + (batch_size, num_proposals, proposal_feature_channel) + img_metas (list[dict]): list of image info dict where + each dict has: 'img_shape', 'scale_factor', 'flip', + and may also contain 'filename', 'ori_shape', + 'pad_shape', and 'img_norm_cfg'. For details on the + values of these keys see + `mmdet/datasets/pipelines/formatting.py:Collect`. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + imgs_whwh (Tensor): Tensor with shape (batch_size, 4), + the dimension means + [img_width,img_height, img_width, img_height]. + gt_masks (None | Tensor) : true segmentation masks for each box + used if the architecture supports a segmentation task. + + Returns: + dict[str, Tensor]: a dictionary of loss components of all stage. + """ + + num_imgs = len(img_metas) + num_proposals = proposal_boxes.size(1) + imgs_whwh = imgs_whwh.repeat(1, num_proposals, 1) + all_stage_bbox_results = [] + proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))] + object_feats = proposal_features + all_stage_loss = {} + for stage in range(self.num_stages): + rois = bbox2roi(proposal_list) + bbox_results = self._bbox_forward(stage, x, rois, object_feats, + img_metas) + all_stage_bbox_results.append(bbox_results) + if gt_bboxes_ignore is None: + # TODO support ignore + gt_bboxes_ignore = [None for _ in range(num_imgs)] + sampling_results = [] + cls_pred_list = bbox_results['detach_cls_score_list'] + proposal_list = bbox_results['detach_proposal_list'] + for i in range(num_imgs): + normalize_bbox_ccwh = bbox_xyxy_to_cxcywh(proposal_list[i] / + imgs_whwh[i]) + assign_result = self.bbox_assigner[stage].assign( + normalize_bbox_ccwh, cls_pred_list[i], gt_bboxes[i], + gt_labels[i], img_metas[i]) + sampling_result = self.bbox_sampler[stage].sample( + assign_result, proposal_list[i], gt_bboxes[i]) + sampling_results.append(sampling_result) + bbox_targets = self.bbox_head[stage].get_targets( + sampling_results, gt_bboxes, gt_labels, self.train_cfg[stage], + True) + cls_score = bbox_results['cls_score'] + decode_bbox_pred = bbox_results['decode_bbox_pred'] + + single_stage_loss = self.bbox_head[stage].loss( + cls_score.view(-1, cls_score.size(-1)), + decode_bbox_pred.view(-1, 4), + *bbox_targets, + imgs_whwh=imgs_whwh) + + if self.with_mask: + mask_results = self._mask_forward_train( + stage, x, bbox_results['attn_feats'], sampling_results, + gt_masks, self.train_cfg[stage]) + single_stage_loss['loss_mask'] = mask_results['loss_mask'] + + for key, value in single_stage_loss.items(): + all_stage_loss[f'stage{stage}_{key}'] = value * \ + self.stage_loss_weights[stage] + object_feats = bbox_results['object_feats'] + + return all_stage_loss + + def simple_test(self, + x, + proposal_boxes, + proposal_features, + img_metas, + imgs_whwh, + rescale=False): + """Test without augmentation. + + Args: + x (list[Tensor]): list of multi-level img features. + proposal_boxes (Tensor): Decoded proposal bboxes, has shape + (batch_size, num_proposals, 4) + proposal_features (Tensor): Expanded proposal + features, has shape + (batch_size, num_proposals, proposal_feature_channel) + img_metas (dict): meta information of images. + imgs_whwh (Tensor): Tensor with shape (batch_size, 4), + the dimension means + [img_width,img_height, img_width, img_height]. + rescale (bool): If True, return boxes in original image + space. Defaults to False. + + Returns: + list[list[np.ndarray]] or list[tuple]: When no mask branch, + it is bbox results of each image and classes with type + `list[list[np.ndarray]]`. The outer list + corresponds to each image. The inner list + corresponds to each class. When the model has a mask branch, + it is a list[tuple] that contains bbox results and mask results. + The outer list corresponds to each image, and first element + of tuple is bbox results, second element is mask results. + """ + assert self.with_bbox, 'Bbox head must be implemented.' + # Decode initial proposals + num_imgs = len(img_metas) + proposal_list = [proposal_boxes[i] for i in range(num_imgs)] + ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) + scale_factors = tuple(meta['scale_factor'] for meta in img_metas) + + object_feats = proposal_features + if all([proposal.shape[0] == 0 for proposal in proposal_list]): + # There is no proposal in the whole batch + bbox_results = [[ + np.zeros((0, 5), dtype=np.float32) + for i in range(self.bbox_head[-1].num_classes) + ]] * num_imgs + return bbox_results + + for stage in range(self.num_stages): + rois = bbox2roi(proposal_list) + bbox_results = self._bbox_forward(stage, x, rois, object_feats, + img_metas) + object_feats = bbox_results['object_feats'] + cls_score = bbox_results['cls_score'] + proposal_list = bbox_results['detach_proposal_list'] + + if self.with_mask: + rois = bbox2roi(proposal_list) + mask_results = self._mask_forward(stage, x, rois, + bbox_results['attn_feats']) + mask_results['mask_pred'] = mask_results['mask_pred'].reshape( + num_imgs, -1, *mask_results['mask_pred'].size()[1:]) + + num_classes = self.bbox_head[-1].num_classes + det_bboxes = [] + det_labels = [] + + if self.bbox_head[-1].loss_cls.use_sigmoid: + cls_score = cls_score.sigmoid() + else: + cls_score = cls_score.softmax(-1)[..., :-1] + + for img_id in range(num_imgs): + cls_score_per_img = cls_score[img_id] + scores_per_img, topk_indices = cls_score_per_img.flatten( + 0, 1).topk( + self.test_cfg.max_per_img, sorted=False) + labels_per_img = topk_indices % num_classes + bbox_pred_per_img = proposal_list[img_id][topk_indices // + num_classes] + if rescale: + scale_factor = img_metas[img_id]['scale_factor'] + bbox_pred_per_img /= bbox_pred_per_img.new_tensor(scale_factor) + det_bboxes.append( + torch.cat([bbox_pred_per_img, scores_per_img[:, None]], dim=1)) + det_labels.append(labels_per_img) + + bbox_results = [ + bbox2result(det_bboxes[i], det_labels[i], num_classes) + for i in range(num_imgs) + ] + + if self.with_mask: + if rescale and not isinstance(scale_factors[0], float): + scale_factors = [ + torch.from_numpy(scale_factor).to(det_bboxes[0].device) + for scale_factor in scale_factors + ] + _bboxes = [ + det_bboxes[i][:, :4] * + scale_factors[i] if rescale else det_bboxes[i][:, :4] + for i in range(len(det_bboxes)) + ] + segm_results = [] + mask_pred = mask_results['mask_pred'] + for img_id in range(num_imgs): + mask_pred_per_img = mask_pred[img_id].flatten(0, + 1)[topk_indices] + mask_pred_per_img = mask_pred_per_img[:, None, ...].repeat( + 1, num_classes, 1, 1) + segm_result = self.mask_head[-1].get_seg_masks( + mask_pred_per_img, _bboxes[img_id], det_labels[img_id], + self.test_cfg, ori_shapes[img_id], scale_factors[img_id], + rescale) + segm_results.append(segm_result) + + if self.with_mask: + results = list(zip(bbox_results, segm_results)) + else: + results = bbox_results + + return results + + def aug_test(self, features, proposal_list, img_metas, rescale=False): + raise NotImplementedError( + 'Sparse R-CNN and QueryInst does not support `aug_test`') + + def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas): + """Dummy forward function when do the flops computing.""" + all_stage_bbox_results = [] + proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))] + object_feats = proposal_features + if self.with_bbox: + for stage in range(self.num_stages): + rois = bbox2roi(proposal_list) + bbox_results = self._bbox_forward(stage, x, rois, object_feats, + img_metas) + + all_stage_bbox_results.append((bbox_results, )) + proposal_list = bbox_results['detach_proposal_list'] + object_feats = bbox_results['object_feats'] + + if self.with_mask: + rois = bbox2roi(proposal_list) + mask_results = self._mask_forward( + stage, x, rois, bbox_results['attn_feats']) + all_stage_bbox_results[-1] += (mask_results, ) + return all_stage_bbox_results diff --git a/downstream/mmdetection/mmdet/models/roi_heads/standard_roi_head.py b/downstream/mmdetection/mmdet/models/roi_heads/standard_roi_head.py new file mode 100644 index 0000000..3f33db4 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/standard_roi_head.py @@ -0,0 +1,401 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler +from ..builder import HEADS, build_head, build_roi_extractor +from .base_roi_head import BaseRoIHead +from .test_mixins import BBoxTestMixin, MaskTestMixin + + +@HEADS.register_module() +class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): + """Simplest base roi head including one bbox head and one mask head.""" + + def init_assigner_sampler(self): + """Initialize assigner and sampler.""" + self.bbox_assigner = None + self.bbox_sampler = None + if self.train_cfg: + self.bbox_assigner = build_assigner(self.train_cfg.assigner) + self.bbox_sampler = build_sampler( + self.train_cfg.sampler, context=self) + + def init_bbox_head(self, bbox_roi_extractor, bbox_head): + """Initialize ``bbox_head``""" + self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor) + self.bbox_head = build_head(bbox_head) + + def init_mask_head(self, mask_roi_extractor, mask_head): + """Initialize ``mask_head``""" + if mask_roi_extractor is not None: + self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) + self.share_roi_extractor = False + else: + self.share_roi_extractor = True + self.mask_roi_extractor = self.bbox_roi_extractor + self.mask_head = build_head(mask_head) + + def forward_dummy(self, x, proposals): + """Dummy forward function.""" + # bbox head + outs = () + rois = bbox2roi([proposals]) + if self.with_bbox: + bbox_results = self._bbox_forward(x, rois) + outs = outs + (bbox_results['cls_score'], + bbox_results['bbox_pred']) + # mask head + if self.with_mask: + mask_rois = rois[:100] + mask_results = self._mask_forward(x, mask_rois) + outs = outs + (mask_results['mask_pred'], ) + return outs + + def forward_train(self, + x, + img_metas, + proposal_list, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + **kwargs): + """ + Args: + x (list[Tensor]): list of multi-level img features. + img_metas (list[dict]): list of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmdet/datasets/pipelines/formatting.py:Collect`. + proposals (list[Tensors]): list of region proposals. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + gt_masks (None | Tensor) : true segmentation masks for each box + used if the architecture supports a segmentation task. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + # assign gts and sample proposals + if self.with_bbox or self.with_mask: + num_imgs = len(img_metas) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + sampling_results = [] + for i in range(num_imgs): + assign_result = self.bbox_assigner.assign( + proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], + gt_labels[i]) + sampling_result = self.bbox_sampler.sample( + assign_result, + proposal_list[i], + gt_bboxes[i], + gt_labels[i], + feats=[lvl_feat[i][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + losses = dict() + # bbox head forward and loss + if self.with_bbox: + bbox_results = self._bbox_forward_train(x, sampling_results, + gt_bboxes, gt_labels, + img_metas) + losses.update(bbox_results['loss_bbox']) + + # mask head forward and loss + if self.with_mask: + mask_results = self._mask_forward_train(x, sampling_results, + bbox_results['bbox_feats'], + gt_masks, img_metas) + losses.update(mask_results['loss_mask']) + + return losses + + def _bbox_forward(self, x, rois): + """Box head forward function used in both training and testing.""" + # TODO: a more flexible way to decide which feature maps to use + bbox_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], rois) + if self.with_shared_head: + bbox_feats = self.shared_head(bbox_feats) + cls_score, bbox_pred = self.bbox_head(bbox_feats) + + bbox_results = dict( + cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) + return bbox_results + + def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, + img_metas): + """Run forward function and calculate loss for box head in training.""" + rois = bbox2roi([res.bboxes for res in sampling_results]) + bbox_results = self._bbox_forward(x, rois) + + bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, + gt_labels, self.train_cfg) + loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], + bbox_results['bbox_pred'], rois, + *bbox_targets) + + bbox_results.update(loss_bbox=loss_bbox) + return bbox_results + + def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, + img_metas): + """Run forward function and calculate loss for mask head in + training.""" + if not self.share_roi_extractor: + pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) + mask_results = self._mask_forward(x, pos_rois) + else: + pos_inds = [] + device = bbox_feats.device + for res in sampling_results: + pos_inds.append( + torch.ones( + res.pos_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds.append( + torch.zeros( + res.neg_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds = torch.cat(pos_inds) + + mask_results = self._mask_forward( + x, pos_inds=pos_inds, bbox_feats=bbox_feats) + + # if x.size(0) == 0: + # mask_results.update(loss_mask=mask_results['mask_pred'].sum() * 0.) + # return mask_results + + mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, + self.train_cfg) + pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) + loss_mask = self.mask_head.loss(mask_results['mask_pred'], + mask_targets, pos_labels) + + mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets) + return mask_results + + def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None): + """Mask head forward function used in both training and testing.""" + assert ((rois is not None) ^ + (pos_inds is not None and bbox_feats is not None)) + if rois is not None: + mask_feats = self.mask_roi_extractor( + x[:self.mask_roi_extractor.num_inputs], rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + else: + assert bbox_feats is not None + mask_feats = bbox_feats[pos_inds] + + mask_pred = self.mask_head(mask_feats) + mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats) + return mask_results + + async def async_simple_test(self, + x, + proposal_list, + img_metas, + proposals=None, + rescale=False): + """Async test without augmentation.""" + assert self.with_bbox, 'Bbox head must be implemented.' + + det_bboxes, det_labels = await self.async_test_bboxes( + x, img_metas, proposal_list, self.test_cfg, rescale=rescale) + bbox_results = bbox2result(det_bboxes, det_labels, + self.bbox_head.num_classes) + if not self.with_mask: + return bbox_results + else: + segm_results = await self.async_test_mask( + x, + img_metas, + det_bboxes, + det_labels, + rescale=rescale, + mask_test_cfg=self.test_cfg.get('mask')) + return bbox_results, segm_results + + def simple_test(self, + x, + proposal_list, + img_metas, + proposals=None, + rescale=False): + """Test without augmentation. + + Args: + x (tuple[Tensor]): Features from upstream network. Each + has shape (batch_size, c, h, w). + proposal_list (list(Tensor)): Proposals from rpn head. + Each has shape (num_proposals, 5), last dimension + 5 represent (x1, y1, x2, y2, score). + img_metas (list[dict]): Meta information of images. + rescale (bool): Whether to rescale the results to + the original image. Default: True. + + Returns: + list[list[np.ndarray]] or list[tuple]: When no mask branch, + it is bbox results of each image and classes with type + `list[list[np.ndarray]]`. The outer list + corresponds to each image. The inner list + corresponds to each class. When the model has mask branch, + it contains bbox results and mask results. + The outer list corresponds to each image, and first element + of tuple is bbox results, second element is mask results. + """ + assert self.with_bbox, 'Bbox head must be implemented.' + + det_bboxes, det_labels = self.simple_test_bboxes( + x, img_metas, proposal_list, self.test_cfg, rescale=rescale) + + bbox_results = [ + bbox2result(det_bboxes[i], det_labels[i], + self.bbox_head.num_classes) + for i in range(len(det_bboxes)) + ] + + if not self.with_mask: + return bbox_results + else: + segm_results = self.simple_test_mask( + x, img_metas, det_bboxes, det_labels, rescale=rescale) + return list(zip(bbox_results, segm_results)) + + def aug_test(self, x, proposal_list, img_metas, rescale=False): + """Test with augmentations. + + If rescale is False, then returned bboxes and masks will fit the scale + of imgs[0]. + """ + det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas, + proposal_list, + self.test_cfg) + if rescale: + _det_bboxes = det_bboxes + else: + _det_bboxes = det_bboxes.clone() + _det_bboxes[:, :4] *= det_bboxes.new_tensor( + img_metas[0][0]['scale_factor']) + bbox_results = bbox2result(_det_bboxes, det_labels, + self.bbox_head.num_classes) + + # det_bboxes always keep the original scale + if self.with_mask: + segm_results = self.aug_test_mask(x, img_metas, det_bboxes, + det_labels) + return [(bbox_results, segm_results)] + else: + return [bbox_results] + + def onnx_export(self, x, proposals, img_metas, rescale=False): + """Test without augmentation.""" + assert self.with_bbox, 'Bbox head must be implemented.' + det_bboxes, det_labels = self.bbox_onnx_export( + x, img_metas, proposals, self.test_cfg, rescale=rescale) + + if not self.with_mask: + return det_bboxes, det_labels + else: + segm_results = self.mask_onnx_export( + x, img_metas, det_bboxes, det_labels, rescale=rescale) + return det_bboxes, det_labels, segm_results + + def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs): + """Export mask branch to onnx which supports batch inference. + + Args: + x (tuple[Tensor]): Feature maps of all scale level. + img_metas (list[dict]): Image meta info. + det_bboxes (Tensor): Bboxes and corresponding scores. + has shape [N, num_bboxes, 5]. + det_labels (Tensor): class labels of + shape [N, num_bboxes]. + + Returns: + Tensor: The segmentation results of shape [N, num_bboxes, + image_height, image_width]. + """ + # image shapes of images in the batch + + if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): + raise RuntimeError('[ONNX Error] Can not record MaskHead ' + 'as it has not been executed this time') + batch_size = det_bboxes.size(0) + # if det_bboxes is rescaled to the original image size, we need to + # rescale it back to the testing scale to obtain RoIs. + det_bboxes = det_bboxes[..., :4] + batch_index = torch.arange( + det_bboxes.size(0), device=det_bboxes.device).float().view( + -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1) + mask_rois = torch.cat([batch_index, det_bboxes], dim=-1) + mask_rois = mask_rois.view(-1, 5) + mask_results = self._mask_forward(x, mask_rois) + mask_pred = mask_results['mask_pred'] + max_shape = img_metas[0]['img_shape_for_onnx'] + num_det = det_bboxes.shape[1] + det_bboxes = det_bboxes.reshape(-1, 4) + det_labels = det_labels.reshape(-1) + segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes, + det_labels, self.test_cfg, + max_shape) + segm_results = segm_results.reshape(batch_size, num_det, max_shape[0], + max_shape[1]) + return segm_results + + def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg, + **kwargs): + """Export bbox branch to onnx which supports batch inference. + + Args: + x (tuple[Tensor]): Feature maps of all scale level. + img_metas (list[dict]): Image meta info. + proposals (Tensor): Region proposals with + batch dimension, has shape [N, num_bboxes, 5]. + rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. + + Returns: + tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5] + and class labels of shape [N, num_bboxes]. + """ + # get origin input shape to support onnx dynamic input shape + assert len( + img_metas + ) == 1, 'Only support one input image while in exporting to ONNX' + img_shapes = img_metas[0]['img_shape_for_onnx'] + + rois = proposals + + batch_index = torch.arange( + rois.size(0), device=rois.device).float().view(-1, 1, 1).expand( + rois.size(0), rois.size(1), 1) + + rois = torch.cat([batch_index, rois[..., :4]], dim=-1) + batch_size = rois.shape[0] + num_proposals_per_img = rois.shape[1] + + # Eliminate the batch dimension + rois = rois.view(-1, 5) + bbox_results = self._bbox_forward(x, rois) + cls_score = bbox_results['cls_score'] + bbox_pred = bbox_results['bbox_pred'] + + # Recover the batch dimension + rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1)) + cls_score = cls_score.reshape(batch_size, num_proposals_per_img, + cls_score.size(-1)) + + bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, + bbox_pred.size(-1)) + det_bboxes, det_labels = self.bbox_head.onnx_export( + rois, cls_score, bbox_pred, img_shapes, cfg=rcnn_test_cfg) + + return det_bboxes, det_labels diff --git a/downstream/mmdetection/mmdet/models/roi_heads/test_mixins.py b/downstream/mmdetection/mmdet/models/roi_heads/test_mixins.py new file mode 100644 index 0000000..ae6e79a --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/test_mixins.py @@ -0,0 +1,311 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import sys +import warnings + +import numpy as np +import torch + +from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes, + merge_aug_masks, multiclass_nms) + +if sys.version_info >= (3, 7): + from mmdet.utils.contextmanagers import completed + + +class BBoxTestMixin: + + if sys.version_info >= (3, 7): + + async def async_test_bboxes(self, + x, + img_metas, + proposals, + rcnn_test_cfg, + rescale=False, + **kwargs): + """Asynchronized test for box head without augmentation.""" + rois = bbox2roi(proposals) + roi_feats = self.bbox_roi_extractor( + x[:len(self.bbox_roi_extractor.featmap_strides)], rois) + if self.with_shared_head: + roi_feats = self.shared_head(roi_feats) + sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017) + + async with completed( + __name__, 'bbox_head_forward', + sleep_interval=sleep_interval): + cls_score, bbox_pred = self.bbox_head(roi_feats) + + img_shape = img_metas[0]['img_shape'] + scale_factor = img_metas[0]['scale_factor'] + det_bboxes, det_labels = self.bbox_head.get_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=rescale, + cfg=rcnn_test_cfg) + return det_bboxes, det_labels + + def simple_test_bboxes(self, + x, + img_metas, + proposals, + rcnn_test_cfg, + rescale=False): + """Test only det bboxes without augmentation. + + Args: + x (tuple[Tensor]): Feature maps of all scale level. + img_metas (list[dict]): Image meta info. + proposals (List[Tensor]): Region proposals. + rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. + rescale (bool): If True, return boxes in original image space. + Default: False. + + Returns: + tuple[list[Tensor], list[Tensor]]: The first list contains + the boxes of the corresponding image in a batch, each + tensor has the shape (num_boxes, 5) and last dimension + 5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor + in the second list is the labels with shape (num_boxes, ). + The length of both lists should be equal to batch_size. + """ + + rois = bbox2roi(proposals) + + if rois.shape[0] == 0: + batch_size = len(proposals) + det_bbox = rois.new_zeros(0, 5) + det_label = rois.new_zeros((0, ), dtype=torch.long) + if rcnn_test_cfg is None: + det_bbox = det_bbox[:, :4] + det_label = rois.new_zeros( + (0, self.bbox_head.fc_cls.out_features)) + # There is no proposal in the whole batch + return [det_bbox] * batch_size, [det_label] * batch_size + + bbox_results = self._bbox_forward(x, rois) + img_shapes = tuple(meta['img_shape'] for meta in img_metas) + scale_factors = tuple(meta['scale_factor'] for meta in img_metas) + + # split batch bbox prediction back to each image + cls_score = bbox_results['cls_score'] + bbox_pred = bbox_results['bbox_pred'] + num_proposals_per_img = tuple(len(p) for p in proposals) + rois = rois.split(num_proposals_per_img, 0) + cls_score = cls_score.split(num_proposals_per_img, 0) + + # some detector with_reg is False, bbox_pred will be None + if bbox_pred is not None: + # TODO move this to a sabl_roi_head + # the bbox prediction of some detectors like SABL is not Tensor + if isinstance(bbox_pred, torch.Tensor): + bbox_pred = bbox_pred.split(num_proposals_per_img, 0) + else: + bbox_pred = self.bbox_head.bbox_pred_split( + bbox_pred, num_proposals_per_img) + else: + bbox_pred = (None, ) * len(proposals) + + # apply bbox post-processing to each image individually + det_bboxes = [] + det_labels = [] + for i in range(len(proposals)): + if rois[i].shape[0] == 0: + # There is no proposal in the single image + det_bbox = rois[i].new_zeros(0, 5) + det_label = rois[i].new_zeros((0, ), dtype=torch.long) + if rcnn_test_cfg is None: + det_bbox = det_bbox[:, :4] + det_label = rois[i].new_zeros( + (0, self.bbox_head.fc_cls.out_features)) + + else: + det_bbox, det_label = self.bbox_head.get_bboxes( + rois[i], + cls_score[i], + bbox_pred[i], + img_shapes[i], + scale_factors[i], + rescale=rescale, + cfg=rcnn_test_cfg) + det_bboxes.append(det_bbox) + det_labels.append(det_label) + return det_bboxes, det_labels + + def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg): + """Test det bboxes with test time augmentation.""" + aug_bboxes = [] + aug_scores = [] + for x, img_meta in zip(feats, img_metas): + # only one image in the batch + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + flip_direction = img_meta[0]['flip_direction'] + # TODO more flexible + proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, + scale_factor, flip, flip_direction) + rois = bbox2roi([proposals]) + bbox_results = self._bbox_forward(x, rois) + bboxes, scores = self.bbox_head.get_bboxes( + rois, + bbox_results['cls_score'], + bbox_results['bbox_pred'], + img_shape, + scale_factor, + rescale=False, + cfg=None) + aug_bboxes.append(bboxes) + aug_scores.append(scores) + # after merging, bboxes will be rescaled to the original image size + merged_bboxes, merged_scores = merge_aug_bboxes( + aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) + if merged_bboxes.shape[0] == 0: + # There is no proposal in the single image + det_bboxes = merged_bboxes.new_zeros(0, 5) + det_labels = merged_bboxes.new_zeros((0, ), dtype=torch.long) + else: + det_bboxes, det_labels = multiclass_nms(merged_bboxes, + merged_scores, + rcnn_test_cfg.score_thr, + rcnn_test_cfg.nms, + rcnn_test_cfg.max_per_img) + return det_bboxes, det_labels + + +class MaskTestMixin: + + if sys.version_info >= (3, 7): + + async def async_test_mask(self, + x, + img_metas, + det_bboxes, + det_labels, + rescale=False, + mask_test_cfg=None): + """Asynchronized test for mask head without augmentation.""" + # image shape of the first image in the batch (only one) + ori_shape = img_metas[0]['ori_shape'] + scale_factor = img_metas[0]['scale_factor'] + if det_bboxes.shape[0] == 0: + segm_result = [[] for _ in range(self.mask_head.num_classes)] + else: + if rescale and not isinstance(scale_factor, + (float, torch.Tensor)): + scale_factor = det_bboxes.new_tensor(scale_factor) + _bboxes = ( + det_bboxes[:, :4] * + scale_factor if rescale else det_bboxes) + mask_rois = bbox2roi([_bboxes]) + mask_feats = self.mask_roi_extractor( + x[:len(self.mask_roi_extractor.featmap_strides)], + mask_rois) + + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'): + sleep_interval = mask_test_cfg['async_sleep_interval'] + else: + sleep_interval = 0.035 + async with completed( + __name__, + 'mask_head_forward', + sleep_interval=sleep_interval): + mask_pred = self.mask_head(mask_feats) + segm_result = self.mask_head.get_seg_masks( + mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape, + scale_factor, rescale) + return segm_result + + def simple_test_mask(self, + x, + img_metas, + det_bboxes, + det_labels, + rescale=False): + """Simple test for mask head without augmentation.""" + # image shapes of images in the batch + ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) + scale_factors = tuple(meta['scale_factor'] for meta in img_metas) + + if isinstance(scale_factors[0], float): + warnings.warn( + 'Scale factor in img_metas should be a ' + 'ndarray with shape (4,) ' + 'arrange as (factor_w, factor_h, factor_w, factor_h), ' + 'The scale_factor with float type has been deprecated. ') + scale_factors = np.array([scale_factors] * 4, dtype=np.float32) + + num_imgs = len(det_bboxes) + if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): + segm_results = [[[] for _ in range(self.mask_head.num_classes)] + for _ in range(num_imgs)] + else: + # if det_bboxes is rescaled to the original image size, we need to + # rescale it back to the testing scale to obtain RoIs. + if rescale: + scale_factors = [ + torch.from_numpy(scale_factor).to(det_bboxes[0].device) + for scale_factor in scale_factors + ] + _bboxes = [ + det_bboxes[i][:, :4] * + scale_factors[i] if rescale else det_bboxes[i][:, :4] + for i in range(len(det_bboxes)) + ] + mask_rois = bbox2roi(_bboxes) + mask_results = self._mask_forward(x, mask_rois) + mask_pred = mask_results['mask_pred'] + # split batch mask prediction back to each image + num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] + mask_preds = mask_pred.split(num_mask_roi_per_img, 0) + + # apply mask post-processing to each image individually + segm_results = [] + for i in range(num_imgs): + if det_bboxes[i].shape[0] == 0: + segm_results.append( + [[] for _ in range(self.mask_head.num_classes)]) + else: + segm_result = self.mask_head.get_seg_masks( + mask_preds[i], _bboxes[i], det_labels[i], + self.test_cfg, ori_shapes[i], scale_factors[i], + rescale) + segm_results.append(segm_result) + return segm_results + + def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): + """Test for mask head with test time augmentation.""" + if det_bboxes.shape[0] == 0: + segm_result = [[] for _ in range(self.mask_head.num_classes)] + else: + aug_masks = [] + for x, img_meta in zip(feats, img_metas): + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + flip_direction = img_meta[0]['flip_direction'] + _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, + scale_factor, flip, flip_direction) + mask_rois = bbox2roi([_bboxes]) + mask_results = self._mask_forward(x, mask_rois) + # convert to numpy array to save memory + aug_masks.append( + mask_results['mask_pred'].sigmoid().cpu().numpy()) + merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) + + ori_shape = img_metas[0][0]['ori_shape'] + scale_factor = det_bboxes.new_ones(4) + segm_result = self.mask_head.get_seg_masks( + merged_masks, + det_bboxes, + det_labels, + self.test_cfg, + ori_shape, + scale_factor=scale_factor, + rescale=False) + return segm_result diff --git a/downstream/mmdetection/mmdet/models/roi_heads/trident_roi_head.py b/downstream/mmdetection/mmdet/models/roi_heads/trident_roi_head.py new file mode 100644 index 0000000..0975879 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/roi_heads/trident_roi_head.py @@ -0,0 +1,120 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.ops import batched_nms + +from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, + multiclass_nms) +from mmdet.models.roi_heads.standard_roi_head import StandardRoIHead +from ..builder import HEADS + + +@HEADS.register_module() +class TridentRoIHead(StandardRoIHead): + """Trident roi head. + + Args: + num_branch (int): Number of branches in TridentNet. + test_branch_idx (int): In inference, all 3 branches will be used + if `test_branch_idx==-1`, otherwise only branch with index + `test_branch_idx` will be used. + """ + + def __init__(self, num_branch, test_branch_idx, **kwargs): + self.num_branch = num_branch + self.test_branch_idx = test_branch_idx + super(TridentRoIHead, self).__init__(**kwargs) + + def merge_trident_bboxes(self, trident_det_bboxes, trident_det_labels): + """Merge bbox predictions of each branch.""" + if trident_det_bboxes.numel() == 0: + det_bboxes = trident_det_bboxes.new_zeros((0, 5)) + det_labels = trident_det_bboxes.new_zeros((0, ), dtype=torch.long) + else: + nms_bboxes = trident_det_bboxes[:, :4] + nms_scores = trident_det_bboxes[:, 4].contiguous() + nms_inds = trident_det_labels + nms_cfg = self.test_cfg['nms'] + det_bboxes, keep = batched_nms(nms_bboxes, nms_scores, nms_inds, + nms_cfg) + det_labels = trident_det_labels[keep] + if self.test_cfg['max_per_img'] > 0: + det_labels = det_labels[:self.test_cfg['max_per_img']] + det_bboxes = det_bboxes[:self.test_cfg['max_per_img']] + + return det_bboxes, det_labels + + def simple_test(self, + x, + proposal_list, + img_metas, + proposals=None, + rescale=False): + """Test without augmentation as follows: + + 1. Compute prediction bbox and label per branch. + 2. Merge predictions of each branch according to scores of + bboxes, i.e., bboxes with higher score are kept to give + top-k prediction. + """ + assert self.with_bbox, 'Bbox head must be implemented.' + det_bboxes_list, det_labels_list = self.simple_test_bboxes( + x, img_metas, proposal_list, self.test_cfg, rescale=rescale) + num_branch = self.num_branch if self.test_branch_idx == -1 else 1 + for _ in range(len(det_bboxes_list)): + if det_bboxes_list[_].shape[0] == 0: + det_bboxes_list[_] = det_bboxes_list[_].new_empty((0, 5)) + det_bboxes, det_labels = [], [] + for i in range(len(img_metas) // num_branch): + det_result = self.merge_trident_bboxes( + torch.cat(det_bboxes_list[i * num_branch:(i + 1) * + num_branch]), + torch.cat(det_labels_list[i * num_branch:(i + 1) * + num_branch])) + det_bboxes.append(det_result[0]) + det_labels.append(det_result[1]) + + bbox_results = [ + bbox2result(det_bboxes[i], det_labels[i], + self.bbox_head.num_classes) + for i in range(len(det_bboxes)) + ] + return bbox_results + + def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg): + """Test det bboxes with test time augmentation.""" + aug_bboxes = [] + aug_scores = [] + for x, img_meta in zip(feats, img_metas): + # only one image in the batch + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + flip_direction = img_meta[0]['flip_direction'] + + trident_bboxes, trident_scores = [], [] + for branch_idx in range(len(proposal_list)): + proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, + scale_factor, flip, flip_direction) + rois = bbox2roi([proposals]) + bbox_results = self._bbox_forward(x, rois) + bboxes, scores = self.bbox_head.get_bboxes( + rois, + bbox_results['cls_score'], + bbox_results['bbox_pred'], + img_shape, + scale_factor, + rescale=False, + cfg=None) + trident_bboxes.append(bboxes) + trident_scores.append(scores) + + aug_bboxes.append(torch.cat(trident_bboxes, 0)) + aug_scores.append(torch.cat(trident_scores, 0)) + # after merging, bboxes will be rescaled to the original image size + merged_bboxes, merged_scores = merge_aug_bboxes( + aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) + det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, + rcnn_test_cfg.score_thr, + rcnn_test_cfg.nms, + rcnn_test_cfg.max_per_img) + return det_bboxes, det_labels diff --git a/downstream/mmdetection/mmdet/models/seg_heads/__init__.py b/downstream/mmdetection/mmdet/models/seg_heads/__init__.py new file mode 100644 index 0000000..b489a90 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/seg_heads/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .panoptic_fpn_head import PanopticFPNHead # noqa: F401,F403 +from .panoptic_fusion_heads import * # noqa: F401,F403 diff --git a/downstream/mmdetection/mmdet/models/seg_heads/base_semantic_head.py b/downstream/mmdetection/mmdet/models/seg_heads/base_semantic_head.py new file mode 100644 index 0000000..2b6ca14 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/seg_heads/base_semantic_head.py @@ -0,0 +1,86 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import torch.nn.functional as F +from mmcv.runner import BaseModule, force_fp32 + +from ..builder import build_loss +from ..utils import interpolate_as + + +class BaseSemanticHead(BaseModule, metaclass=ABCMeta): + """Base module of Semantic Head. + + Args: + num_classes (int): the number of classes. + init_cfg (dict): the initialization config. + loss_seg (dict): the loss of the semantic head. + """ + + def __init__(self, + num_classes, + init_cfg=None, + loss_seg=dict( + type='CrossEntropyLoss', + ignore_index=255, + loss_weight=1.0)): + super(BaseSemanticHead, self).__init__(init_cfg) + self.loss_seg = build_loss(loss_seg) + self.num_classes = num_classes + + @force_fp32(apply_to=('seg_preds', )) + def loss(self, seg_preds, gt_semantic_seg): + """Get the loss of semantic head. + + Args: + seg_preds (Tensor): The input logits with the shape (N, C, H, W). + gt_semantic_seg: The ground truth of semantic segmentation with + the shape (N, H, W). + label_bias: The starting number of the semantic label. + Default: 1. + + Returns: + dict: the loss of semantic head. + """ + if seg_preds.shape[-2:] != gt_semantic_seg.shape[-2:]: + seg_preds = interpolate_as(seg_preds, gt_semantic_seg) + seg_preds = seg_preds.permute((0, 2, 3, 1)) + + loss_seg = self.loss_seg( + seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C] + gt_semantic_seg.reshape(-1).long()) + return dict(loss_seg=loss_seg) + + @abstractmethod + def forward(self, x): + """Placeholder of forward function. + + Returns: + dict[str, Tensor]: A dictionary, including features + and predicted scores. Required keys: 'seg_preds' + and 'feats'. + """ + pass + + def forward_train(self, x, gt_semantic_seg): + output = self.forward(x) + seg_preds = output['seg_preds'] + return self.loss(seg_preds, gt_semantic_seg) + + def simple_test(self, x, img_metas, rescale=False): + output = self.forward(x) + seg_preds = output['seg_preds'] + seg_preds = F.interpolate( + seg_preds, + size=img_metas[0]['pad_shape'][:2], + mode='bilinear', + align_corners=False) + + if rescale: + h, w, _ = img_metas[0]['img_shape'] + seg_preds = seg_preds[:, :, :h, :w] + + h, w, _ = img_metas[0]['ori_shape'] + seg_preds = F.interpolate( + seg_preds, size=(h, w), mode='bilinear', align_corners=False) + return seg_preds diff --git a/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fpn_head.py b/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fpn_head.py new file mode 100644 index 0000000..f1df297 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fpn_head.py @@ -0,0 +1,155 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn +from mmcv.runner import ModuleList + +from ..builder import HEADS +from ..utils import ConvUpsample +from .base_semantic_head import BaseSemanticHead + + +@HEADS.register_module() +class PanopticFPNHead(BaseSemanticHead): + """PanopticFPNHead used in Panoptic FPN. + + In this head, the number of output channels is ``num_stuff_classes + + 1``, including all stuff classes and one thing class. The stuff + classes will be reset from ``0`` to ``num_stuff_classes - 1``, the + thing classes will be merged to ``num_stuff_classes``-th channel. + + Arg: + num_things_classes (int): Number of thing classes. Default: 80. + num_stuff_classes (int): Number of stuff classes. Default: 53. + num_classes (int): Number of classes, including all stuff + classes and one thing class. This argument is deprecated, + please use ``num_things_classes`` and ``num_stuff_classes``. + The module will automatically infer the num_classes by + ``num_stuff_classes + 1``. + in_channels (int): Number of channels in the input feature + map. + inner_channels (int): Number of channels in inner features. + start_level (int): The start level of the input features + used in PanopticFPN. + end_level (int): The end level of the used features, the + ``end_level``-th layer will not be used. + fg_range (tuple): Range of the foreground classes. It starts + from ``0`` to ``num_things_classes-1``. Deprecated, please use + ``num_things_classes`` directly. + bg_range (tuple): Range of the background classes. It starts + from ``num_things_classes`` to ``num_things_classes + + num_stuff_classes - 1``. Deprecated, please use + ``num_stuff_classes`` and ``num_things_classes`` directly. + conv_cfg (dict): Dictionary to construct and config + conv layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Use ``GN`` by default. + init_cfg (dict or list[dict], optional): Initialization config dict. + loss_seg (dict): the loss of the semantic head. + """ + + def __init__(self, + num_things_classes=80, + num_stuff_classes=53, + num_classes=None, + in_channels=256, + inner_channels=128, + start_level=0, + end_level=4, + fg_range=None, + bg_range=None, + conv_cfg=None, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + init_cfg=None, + loss_seg=dict( + type='CrossEntropyLoss', ignore_index=-1, + loss_weight=1.0)): + if num_classes is not None: + warnings.warn( + '`num_classes` is deprecated now, please set ' + '`num_stuff_classes` directly, the `num_classes` will be ' + 'set to `num_stuff_classes + 1`') + # num_classes = num_stuff_classes + 1 for PanopticFPN. + assert num_classes == num_stuff_classes + 1 + super(PanopticFPNHead, self).__init__(num_stuff_classes + 1, init_cfg, + loss_seg) + self.num_things_classes = num_things_classes + self.num_stuff_classes = num_stuff_classes + if fg_range is not None and bg_range is not None: + self.fg_range = fg_range + self.bg_range = bg_range + self.num_things_classes = fg_range[1] - fg_range[0] + 1 + self.num_stuff_classes = bg_range[1] - bg_range[0] + 1 + warnings.warn( + '`fg_range` and `bg_range` are deprecated now, ' + f'please use `num_things_classes`={self.num_things_classes} ' + f'and `num_stuff_classes`={self.num_stuff_classes} instead.') + + # Used feature layers are [start_level, end_level) + self.start_level = start_level + self.end_level = end_level + self.num_stages = end_level - start_level + self.inner_channels = inner_channels + + self.conv_upsample_layers = ModuleList() + for i in range(start_level, end_level): + self.conv_upsample_layers.append( + ConvUpsample( + in_channels, + inner_channels, + num_layers=i if i > 0 else 1, + num_upsample=i if i > 0 else 0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + )) + self.conv_logits = nn.Conv2d(inner_channels, self.num_classes, 1) + + def _set_things_to_void(self, gt_semantic_seg): + """Merge thing classes to one class. + + In PanopticFPN, the background labels will be reset from `0` to + `self.num_stuff_classes-1`, the foreground labels will be merged to + `self.num_stuff_classes`-th channel. + """ + gt_semantic_seg = gt_semantic_seg.int() + fg_mask = gt_semantic_seg < self.num_things_classes + bg_mask = (gt_semantic_seg >= self.num_things_classes) * ( + gt_semantic_seg < self.num_things_classes + self.num_stuff_classes) + + new_gt_seg = torch.clone(gt_semantic_seg) + new_gt_seg = torch.where(bg_mask, + gt_semantic_seg - self.num_things_classes, + new_gt_seg) + new_gt_seg = torch.where(fg_mask, + fg_mask.int() * self.num_stuff_classes, + new_gt_seg) + return new_gt_seg + + def loss(self, seg_preds, gt_semantic_seg): + """The loss of PanopticFPN head. + + Things classes will be merged to one class in PanopticFPN. + """ + gt_semantic_seg = self._set_things_to_void(gt_semantic_seg) + return super().loss(seg_preds, gt_semantic_seg) + + def init_weights(self): + super().init_weights() + nn.init.normal_(self.conv_logits.weight.data, 0, 0.01) + self.conv_logits.bias.data.zero_() + + def forward(self, x): + # the number of subnets must be not more than + # the length of features. + assert self.num_stages <= len(x) + + feats = [] + for i, layer in enumerate(self.conv_upsample_layers): + f = layer(x[self.start_level + i]) + feats.append(f) + + feats = torch.sum(torch.stack(feats, dim=0), dim=0) + seg_preds = self.conv_logits(feats) + out = dict(seg_preds=seg_preds, feats=feats) + return out diff --git a/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/__init__.py b/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/__init__.py new file mode 100644 index 0000000..41625a6 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_panoptic_fusion_head import \ + BasePanopticFusionHead # noqa: F401,F403 +from .heuristic_fusion_head import HeuristicFusionHead # noqa: F401,F403 +from .maskformer_fusion_head import MaskFormerFusionHead # noqa: F401,F403 diff --git a/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py b/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py new file mode 100644 index 0000000..a38ac1c --- /dev/null +++ b/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py @@ -0,0 +1,48 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +from mmcv.runner import BaseModule + +from ...builder import build_loss + + +class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta): + """Base class for panoptic heads.""" + + def __init__(self, + num_things_classes=80, + num_stuff_classes=53, + test_cfg=None, + loss_panoptic=None, + init_cfg=None, + **kwargs): + super(BasePanopticFusionHead, self).__init__(init_cfg) + self.num_things_classes = num_things_classes + self.num_stuff_classes = num_stuff_classes + self.num_classes = num_things_classes + num_stuff_classes + self.test_cfg = test_cfg + + if loss_panoptic: + self.loss_panoptic = build_loss(loss_panoptic) + else: + self.loss_panoptic = None + + @property + def with_loss(self): + """bool: whether the panoptic head contains loss function.""" + return self.loss_panoptic is not None + + @abstractmethod + def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs): + """Forward function during training.""" + + @abstractmethod + def simple_test(self, + img_metas, + det_labels, + mask_preds, + seg_preds, + det_bboxes, + cfg=None, + **kwargs): + """Test without augmentation.""" diff --git a/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py b/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py new file mode 100644 index 0000000..06c1de2 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py @@ -0,0 +1,126 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET +from mmdet.models.builder import HEADS +from .base_panoptic_fusion_head import BasePanopticFusionHead + + +@HEADS.register_module() +class HeuristicFusionHead(BasePanopticFusionHead): + """Fusion Head with Heuristic method.""" + + def __init__(self, + num_things_classes=80, + num_stuff_classes=53, + test_cfg=None, + init_cfg=None, + **kwargs): + super(HeuristicFusionHead, + self).__init__(num_things_classes, num_stuff_classes, test_cfg, + None, init_cfg, **kwargs) + + def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs): + """HeuristicFusionHead has no training loss.""" + return dict() + + def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5): + """Lay instance masks to a result map. + + Args: + bboxes: The bboxes results, (K, 4). + labels: The labels of bboxes, (K, ). + masks: The instance masks, (K, H, W). + overlap_thr: Threshold to determine whether two masks overlap. + default: 0.5. + + Returns: + Tensor: The result map, (H, W). + """ + num_insts = bboxes.shape[0] + id_map = torch.zeros( + masks.shape[-2:], device=bboxes.device, dtype=torch.long) + if num_insts == 0: + return id_map, labels + + scores, bboxes = bboxes[:, -1], bboxes[:, :4] + + # Sort by score to use heuristic fusion + order = torch.argsort(-scores) + bboxes = bboxes[order] + labels = labels[order] + segm_masks = masks[order] + + instance_id = 1 + left_labels = [] + for idx in range(bboxes.shape[0]): + _cls = labels[idx] + _mask = segm_masks[idx] + instance_id_map = torch.ones_like( + _mask, dtype=torch.long) * instance_id + area = _mask.sum() + if area == 0: + continue + + pasted = id_map > 0 + intersect = (_mask * pasted).sum() + if (intersect / (area + 1e-5)) > overlap_thr: + continue + + _part = _mask * (~pasted) + id_map = torch.where(_part, instance_id_map, id_map) + left_labels.append(_cls) + instance_id += 1 + + if len(left_labels) > 0: + instance_labels = torch.stack(left_labels) + else: + instance_labels = bboxes.new_zeros((0, ), dtype=torch.long) + assert instance_id == (len(instance_labels) + 1) + return id_map, instance_labels + + def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds, + **kwargs): + """Fuse the results of instance and semantic segmentations. + + Args: + det_bboxes: The bboxes results, (K, 4). + det_labels: The labels of bboxes, (K,). + mask_preds: The masks results, (K, H, W). + seg_preds: The semantic segmentation results, + (K, num_stuff + 1, H, W). + + Returns: + Tensor : The panoptic segmentation result, (H, W). + """ + mask_preds = mask_preds >= self.test_cfg.mask_thr_binary + id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds, + self.test_cfg.mask_overlap) + + seg_results = seg_preds.argmax(dim=0) + seg_results = seg_results + self.num_things_classes + + pan_results = seg_results + instance_id = 1 + for idx in range(det_labels.shape[0]): + _mask = id_map == (idx + 1) + if _mask.sum() == 0: + continue + _cls = labels[idx] + # simply trust detection + segment_id = _cls + instance_id * INSTANCE_OFFSET + pan_results[_mask] = segment_id + instance_id += 1 + + ids, counts = torch.unique( + pan_results % INSTANCE_OFFSET, return_counts=True) + stuff_ids = ids[ids >= self.num_things_classes] + stuff_counts = counts[ids >= self.num_things_classes] + ignore_stuff_ids = stuff_ids[ + stuff_counts < self.test_cfg.stuff_area_limit] + + assert pan_results.ndim == 2 + pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape( + 1, 1, -1)).any(dim=2)] = self.num_classes + + return pan_results diff --git a/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py b/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py new file mode 100644 index 0000000..5b59ce4 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py @@ -0,0 +1,241 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F + +from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET +from mmdet.core.mask import mask2bbox +from mmdet.models.builder import HEADS +from .base_panoptic_fusion_head import BasePanopticFusionHead + + +@HEADS.register_module() +class MaskFormerFusionHead(BasePanopticFusionHead): + + def __init__(self, + num_things_classes=80, + num_stuff_classes=53, + test_cfg=None, + loss_panoptic=None, + init_cfg=None, + **kwargs): + super().__init__(num_things_classes, num_stuff_classes, test_cfg, + loss_panoptic, init_cfg, **kwargs) + + def forward_train(self, **kwargs): + """MaskFormerFusionHead has no training loss.""" + return dict() + + def panoptic_postprocess(self, mask_cls, mask_pred): + """Panoptic segmengation inference. + + Args: + mask_cls (Tensor): Classfication outputs of shape + (num_queries, cls_out_channels) for a image. + Note `cls_out_channels` should includes + background. + mask_pred (Tensor): Mask outputs of shape + (num_queries, h, w) for a image. + + Returns: + Tensor: Panoptic segment result of shape \ + (h, w), each element in Tensor means: \ + ``segment_id = _cls + instance_id * INSTANCE_OFFSET``. + """ + object_mask_thr = self.test_cfg.get('object_mask_thr', 0.8) + iou_thr = self.test_cfg.get('iou_thr', 0.8) + filter_low_score = self.test_cfg.get('filter_low_score', False) + + scores, labels = F.softmax(mask_cls, dim=-1).max(-1) + mask_pred = mask_pred.sigmoid() + + keep = labels.ne(self.num_classes) & (scores > object_mask_thr) + cur_scores = scores[keep] + cur_classes = labels[keep] + cur_masks = mask_pred[keep] + + cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks + + h, w = cur_masks.shape[-2:] + panoptic_seg = torch.full((h, w), + self.num_classes, + dtype=torch.int32, + device=cur_masks.device) + if cur_masks.shape[0] == 0: + # We didn't detect any mask :( + pass + else: + cur_mask_ids = cur_prob_masks.argmax(0) + instance_id = 1 + for k in range(cur_classes.shape[0]): + pred_class = int(cur_classes[k].item()) + isthing = pred_class < self.num_things_classes + mask = cur_mask_ids == k + mask_area = mask.sum().item() + original_area = (cur_masks[k] >= 0.5).sum().item() + + if filter_low_score: + mask = mask & (cur_masks[k] >= 0.5) + + if mask_area > 0 and original_area > 0: + if mask_area / original_area < iou_thr: + continue + + if not isthing: + # different stuff regions of same class will be + # merged here, and stuff share the instance_id 0. + panoptic_seg[mask] = pred_class + else: + panoptic_seg[mask] = ( + pred_class + instance_id * INSTANCE_OFFSET) + instance_id += 1 + + return panoptic_seg + + def semantic_postprocess(self, mask_cls, mask_pred): + """Semantic segmengation postprocess. + + Args: + mask_cls (Tensor): Classfication outputs of shape + (num_queries, cls_out_channels) for a image. + Note `cls_out_channels` should includes + background. + mask_pred (Tensor): Mask outputs of shape + (num_queries, h, w) for a image. + + Returns: + Tensor: Semantic segment result of shape \ + (cls_out_channels, h, w). + """ + # TODO add semantic segmentation result + raise NotImplementedError + + def instance_postprocess(self, mask_cls, mask_pred): + """Instance segmengation postprocess. + + Args: + mask_cls (Tensor): Classfication outputs of shape + (num_queries, cls_out_channels) for a image. + Note `cls_out_channels` should includes + background. + mask_pred (Tensor): Mask outputs of shape + (num_queries, h, w) for a image. + + Returns: + tuple[Tensor]: Instance segmentation results. + + - labels_per_image (Tensor): Predicted labels,\ + shape (n, ). + - bboxes (Tensor): Bboxes and scores with shape (n, 5) of \ + positive region in binary mask, the last column is scores. + - mask_pred_binary (Tensor): Instance masks of \ + shape (n, h, w). + """ + max_per_image = self.test_cfg.get('max_per_image', 100) + num_queries = mask_cls.shape[0] + # shape (num_queries, num_class) + scores = F.softmax(mask_cls, dim=-1)[:, :-1] + # shape (num_queries * num_class, ) + labels = torch.arange(self.num_classes, device=mask_cls.device).\ + unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) + scores_per_image, top_indices = scores.flatten(0, 1).topk( + max_per_image, sorted=False) + labels_per_image = labels[top_indices] + + query_indices = top_indices // self.num_classes + mask_pred = mask_pred[query_indices] + + # extract things + is_thing = labels_per_image < self.num_things_classes + scores_per_image = scores_per_image[is_thing] + labels_per_image = labels_per_image[is_thing] + mask_pred = mask_pred[is_thing] + + mask_pred_binary = (mask_pred > 0).float() + mask_scores_per_image = (mask_pred.sigmoid() * + mask_pred_binary).flatten(1).sum(1) / ( + mask_pred_binary.flatten(1).sum(1) + 1e-6) + det_scores = scores_per_image * mask_scores_per_image + mask_pred_binary = mask_pred_binary.bool() + bboxes = mask2bbox(mask_pred_binary) + bboxes = torch.cat([bboxes, det_scores[:, None]], dim=-1) + + return labels_per_image, bboxes, mask_pred_binary + + def simple_test(self, + mask_cls_results, + mask_pred_results, + img_metas, + rescale=False, + **kwargs): + """Test segment without test-time aumengtation. + + Only the output of last decoder layers was used. + + Args: + mask_cls_results (Tensor): Mask classification logits, + shape (batch_size, num_queries, cls_out_channels). + Note `cls_out_channels` should includes background. + mask_pred_results (Tensor): Mask logits, shape + (batch_size, num_queries, h, w). + img_metas (list[dict]): List of image information. + rescale (bool, optional): If True, return boxes in + original image space. Default False. + + Returns: + list[dict[str, Tensor | tuple[Tensor]]]: Semantic segmentation \ + results and panoptic segmentation results for each \ + image. + + .. code-block:: none + + [ + { + 'pan_results': Tensor, # shape = [h, w] + 'ins_results': tuple[Tensor], + # semantic segmentation results are not supported yet + 'sem_results': Tensor + }, + ... + ] + """ + panoptic_on = self.test_cfg.get('panoptic_on', True) + semantic_on = self.test_cfg.get('semantic_on', False) + instance_on = self.test_cfg.get('instance_on', False) + assert not semantic_on, 'segmantic segmentation '\ + 'results are not supported yet.' + + results = [] + for mask_cls_result, mask_pred_result, meta in zip( + mask_cls_results, mask_pred_results, img_metas): + # remove padding + img_height, img_width = meta['img_shape'][:2] + mask_pred_result = mask_pred_result[:, :img_height, :img_width] + + if rescale: + # return result in original resolution + ori_height, ori_width = meta['ori_shape'][:2] + mask_pred_result = F.interpolate( + mask_pred_result[:, None], + size=(ori_height, ori_width), + mode='bilinear', + align_corners=False)[:, 0] + + result = dict() + if panoptic_on: + pan_results = self.panoptic_postprocess( + mask_cls_result, mask_pred_result) + result['pan_results'] = pan_results + + if instance_on: + ins_results = self.instance_postprocess( + mask_cls_result, mask_pred_result) + result['ins_results'] = ins_results + + if semantic_on: + sem_results = self.semantic_postprocess( + mask_cls_result, mask_pred_result) + result['sem_results'] = sem_results + + results.append(result) + + return results diff --git a/downstream/mmdetection/mmdet/models/utils/__init__.py b/downstream/mmdetection/mmdet/models/utils/__init__.py new file mode 100644 index 0000000..e74ba89 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/__init__.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d +from .builder import build_linear_layer, build_transformer +from .ckpt_convert import pvt_convert +from .conv_upsample import ConvUpsample +from .csp_layer import CSPLayer +from .gaussian_target import gaussian_radius, gen_gaussian_target +from .inverted_residual import InvertedResidual +from .make_divisible import make_divisible +from .misc import interpolate_as, sigmoid_geometric_mean +from .normed_predictor import NormedConv2d, NormedLinear +from .panoptic_gt_processing import preprocess_panoptic_gt +from .point_sample import (get_uncertain_point_coords_with_randomness, + get_uncertainty) +from .positional_encoding import (LearnedPositionalEncoding, + SinePositionalEncoding) +from .res_layer import ResLayer, SimplifiedBasicBlock +from .se_layer import DyReLU, SELayer +from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer, + DynamicConv, PatchEmbed, Transformer, nchw_to_nlc, + nlc_to_nchw) + +__all__ = [ + 'ResLayer', 'gaussian_radius', 'gen_gaussian_target', + 'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer', + 'build_transformer', 'build_linear_layer', 'SinePositionalEncoding', + 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock', + 'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual', + 'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer', + 'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', + 'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean', + 'preprocess_panoptic_gt', 'DyReLU', + 'get_uncertain_point_coords_with_randomness', 'get_uncertainty' +] diff --git a/downstream/mmdetection/mmdet/models/utils/brick_wrappers.py b/downstream/mmdetection/mmdet/models/utils/brick_wrappers.py new file mode 100644 index 0000000..fa0279a --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/brick_wrappers.py @@ -0,0 +1,51 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version + +if torch.__version__ == 'parrots': + TORCH_VERSION = torch.__version__ +else: + # torch.__version__ could be 1.3.1+cu92, we only need the first two + # for comparison + TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2]) + + +def adaptive_avg_pool2d(input, output_size): + """Handle empty batch dimension to adaptive_avg_pool2d. + + Args: + input (tensor): 4D tensor. + output_size (int, tuple[int,int]): the target output size. + """ + if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): + if isinstance(output_size, int): + output_size = [output_size, output_size] + output_size = [*input.shape[:2], *output_size] + empty = NewEmptyTensorOp.apply(input, output_size) + return empty + else: + return F.adaptive_avg_pool2d(input, output_size) + + +class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d): + """Handle empty batch dimension to AdaptiveAvgPool2d.""" + + def forward(self, x): + # PyTorch 1.9 does not support empty tensor inference yet + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): + output_size = self.output_size + if isinstance(output_size, int): + output_size = [output_size, output_size] + else: + output_size = [ + v if v is not None else d + for v, d in zip(output_size, + x.size()[-2:]) + ] + output_size = [*x.shape[:2], *output_size] + empty = NewEmptyTensorOp.apply(x, output_size) + return empty + + return super().forward(x) diff --git a/downstream/mmdetection/mmdet/models/utils/builder.py b/downstream/mmdetection/mmdet/models/utils/builder.py new file mode 100644 index 0000000..20fe7a6 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/builder.py @@ -0,0 +1,47 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.utils import Registry, build_from_cfg + +TRANSFORMER = Registry('Transformer') +LINEAR_LAYERS = Registry('linear layers') + + +def build_transformer(cfg, default_args=None): + """Builder for Transformer.""" + return build_from_cfg(cfg, TRANSFORMER, default_args) + + +LINEAR_LAYERS.register_module('Linear', module=nn.Linear) + + +def build_linear_layer(cfg, *args, **kwargs): + """Build linear layer. + Args: + cfg (None or dict): The linear layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate an linear layer. + args (argument list): Arguments passed to the `__init__` + method of the corresponding linear layer. + kwargs (keyword arguments): Keyword arguments passed to the `__init__` + method of the corresponding linear layer. + Returns: + nn.Module: Created linear layer. + """ + if cfg is None: + cfg_ = dict(type='Linear') + else: + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in LINEAR_LAYERS: + raise KeyError(f'Unrecognized linear type {layer_type}') + else: + linear_layer = LINEAR_LAYERS.get(layer_type) + + layer = linear_layer(*args, **kwargs, **cfg_) + + return layer diff --git a/downstream/mmdetection/mmdet/models/utils/ckpt_convert.py b/downstream/mmdetection/mmdet/models/utils/ckpt_convert.py new file mode 100644 index 0000000..4d660c4 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/ckpt_convert.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +# This script consists of several convert functions which +# can modify the weights of model in original repo to be +# pre-trained weights. + +from collections import OrderedDict + +import torch + + +def pvt_convert(ckpt): + new_ckpt = OrderedDict() + # Process the concat between q linear weights and kv linear weights + use_abs_pos_embed = False + use_conv_ffn = False + for k in ckpt.keys(): + if k.startswith('pos_embed'): + use_abs_pos_embed = True + if k.find('dwconv') >= 0: + use_conv_ffn = True + for k, v in ckpt.items(): + if k.startswith('head'): + continue + if k.startswith('norm.'): + continue + if k.startswith('cls_token'): + continue + if k.startswith('pos_embed'): + stage_i = int(k.replace('pos_embed', '')) + new_k = k.replace(f'pos_embed{stage_i}', + f'layers.{stage_i - 1}.1.0.pos_embed') + if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7 + new_v = v[:, 1:, :] # remove cls token + else: + new_v = v + elif k.startswith('patch_embed'): + stage_i = int(k.split('.')[0].replace('patch_embed', '')) + new_k = k.replace(f'patch_embed{stage_i}', + f'layers.{stage_i - 1}.0') + new_v = v + if 'proj.' in new_k: + new_k = new_k.replace('proj.', 'projection.') + elif k.startswith('block'): + stage_i = int(k.split('.')[0].replace('block', '')) + layer_i = int(k.split('.')[1]) + new_layer_i = layer_i + use_abs_pos_embed + new_k = k.replace(f'block{stage_i}.{layer_i}', + f'layers.{stage_i - 1}.1.{new_layer_i}') + new_v = v + if 'attn.q.' in new_k: + sub_item_k = k.replace('q.', 'kv.') + new_k = new_k.replace('q.', 'attn.in_proj_') + new_v = torch.cat([v, ckpt[sub_item_k]], dim=0) + elif 'attn.kv.' in new_k: + continue + elif 'attn.proj.' in new_k: + new_k = new_k.replace('proj.', 'attn.out_proj.') + elif 'attn.sr.' in new_k: + new_k = new_k.replace('sr.', 'sr.') + elif 'mlp.' in new_k: + string = f'{new_k}-' + new_k = new_k.replace('mlp.', 'ffn.layers.') + if 'fc1.weight' in new_k or 'fc2.weight' in new_k: + new_v = v.reshape((*v.shape, 1, 1)) + new_k = new_k.replace('fc1.', '0.') + new_k = new_k.replace('dwconv.dwconv.', '1.') + if use_conv_ffn: + new_k = new_k.replace('fc2.', '4.') + else: + new_k = new_k.replace('fc2.', '3.') + string += f'{new_k} {v.shape}-{new_v.shape}' + elif k.startswith('norm'): + stage_i = int(k[4]) + new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2') + new_v = v + else: + new_k = k + new_v = v + new_ckpt[new_k] = new_v + + return new_ckpt + + +def swin_converter(ckpt): + + new_ckpt = OrderedDict() + + def correct_unfold_reduction_order(x): + out_channel, in_channel = x.shape + x = x.reshape(out_channel, 4, in_channel // 4) + x = x[:, [0, 2, 1, 3], :].transpose(1, + 2).reshape(out_channel, in_channel) + return x + + def correct_unfold_norm_order(x): + in_channel = x.shape[0] + x = x.reshape(4, in_channel // 4) + x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) + return x + + for k, v in ckpt.items(): + if k.startswith('head'): + continue + elif k.startswith('layers'): + new_v = v + if 'attn.' in k: + new_k = k.replace('attn.', 'attn.w_msa.') + elif 'mlp.' in k: + if 'mlp.fc1.' in k: + new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.') + elif 'mlp.fc2.' in k: + new_k = k.replace('mlp.fc2.', 'ffn.layers.1.') + else: + new_k = k.replace('mlp.', 'ffn.') + elif 'downsample' in k: + new_k = k + if 'reduction.' in k: + new_v = correct_unfold_reduction_order(v) + elif 'norm.' in k: + new_v = correct_unfold_norm_order(v) + else: + new_k = k + new_k = new_k.replace('layers', 'stages', 1) + elif k.startswith('patch_embed'): + new_v = v + if 'proj' in k: + new_k = k.replace('proj', 'projection') + else: + new_k = k + else: + new_v = v + new_k = k + + new_ckpt['backbone.' + new_k] = new_v + + return new_ckpt diff --git a/downstream/mmdetection/mmdet/models/utils/conv_upsample.py b/downstream/mmdetection/mmdet/models/utils/conv_upsample.py new file mode 100644 index 0000000..bb5ba76 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/conv_upsample.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule, ModuleList + + +class ConvUpsample(BaseModule): + """ConvUpsample performs 2x upsampling after Conv. + + There are several `ConvModule` layers. In the first few layers, upsampling + will be applied after each layer of convolution. The number of upsampling + must be no more than the number of ConvModule layers. + + Args: + in_channels (int): Number of channels in the input feature map. + inner_channels (int): Number of channels produced by the convolution. + num_layers (int): Number of convolution layers. + num_upsample (int | optional): Number of upsampling layer. Must be no + more than num_layers. Upsampling will be applied after the first + ``num_upsample`` layers of convolution. Default: ``num_layers``. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. Default: None. + init_cfg (dict): Config dict for initialization. Default: None. + kwargs (key word augments): Other augments used in ConvModule. + """ + + def __init__(self, + in_channels, + inner_channels, + num_layers=1, + num_upsample=None, + conv_cfg=None, + norm_cfg=None, + init_cfg=None, + **kwargs): + super(ConvUpsample, self).__init__(init_cfg) + if num_upsample is None: + num_upsample = num_layers + assert num_upsample <= num_layers, \ + f'num_upsample({num_upsample})must be no more than ' \ + f'num_layers({num_layers})' + self.num_layers = num_layers + self.num_upsample = num_upsample + self.conv = ModuleList() + for i in range(num_layers): + self.conv.append( + ConvModule( + in_channels, + inner_channels, + 3, + padding=1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + in_channels = inner_channels + + def forward(self, x): + num_upsample = self.num_upsample + for i in range(self.num_layers): + x = self.conv[i](x) + if num_upsample > 0: + num_upsample -= 1 + x = F.interpolate( + x, scale_factor=2, mode='bilinear', align_corners=False) + return x diff --git a/downstream/mmdetection/mmdet/models/utils/csp_layer.py b/downstream/mmdetection/mmdet/models/utils/csp_layer.py new file mode 100644 index 0000000..5760b01 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/csp_layer.py @@ -0,0 +1,150 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmcv.runner import BaseModule + + +class DarknetBottleneck(BaseModule): + """The basic bottleneck block used in Darknet. + + Each ResBlock consists of two ConvModules and the input is added to the + final output. Each ConvModule is composed of Conv, BN, and LeakyReLU. + The first convLayer has filter size of 1x1 and the second one has the + filter size of 3x3. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + expansion (int): The kernel size of the convolution. Default: 0.5 + add_identity (bool): Whether to add identity to the out. + Default: True + use_depthwise (bool): Whether to use depthwise separable convolution. + Default: False + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='Swish'). + """ + + def __init__(self, + in_channels, + out_channels, + expansion=0.5, + add_identity=True, + use_depthwise=False, + conv_cfg=None, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=None): + super().__init__(init_cfg) + hidden_channels = int(out_channels * expansion) + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + self.conv1 = ConvModule( + in_channels, + hidden_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv2 = conv( + hidden_channels, + out_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.add_identity = \ + add_identity and in_channels == out_channels + + def forward(self, x): + identity = x + out = self.conv1(x) + out = self.conv2(out) + + if self.add_identity: + return out + identity + else: + return out + + +class CSPLayer(BaseModule): + """Cross Stage Partial Layer. + + Args: + in_channels (int): The input channels of the CSP layer. + out_channels (int): The output channels of the CSP layer. + expand_ratio (float): Ratio to adjust the number of channels of the + hidden layer. Default: 0.5 + num_blocks (int): Number of blocks. Default: 1 + add_identity (bool): Whether to add identity in blocks. + Default: True + use_depthwise (bool): Whether to depthwise separable convolution in + blocks. Default: False + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN') + act_cfg (dict): Config dict for activation layer. + Default: dict(type='Swish') + """ + + def __init__(self, + in_channels, + out_channels, + expand_ratio=0.5, + num_blocks=1, + add_identity=True, + use_depthwise=False, + conv_cfg=None, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='Swish'), + init_cfg=None): + super().__init__(init_cfg) + mid_channels = int(out_channels * expand_ratio) + self.main_conv = ConvModule( + in_channels, + mid_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.short_conv = ConvModule( + in_channels, + mid_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.final_conv = ConvModule( + 2 * mid_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.blocks = nn.Sequential(*[ + DarknetBottleneck( + mid_channels, + mid_channels, + 1.0, + add_identity, + use_depthwise, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) for _ in range(num_blocks) + ]) + + def forward(self, x): + x_short = self.short_conv(x) + + x_main = self.main_conv(x) + x_main = self.blocks(x_main) + + x_final = torch.cat((x_main, x_short), dim=1) + return self.final_conv(x_final) diff --git a/downstream/mmdetection/mmdet/models/utils/gaussian_target.py b/downstream/mmdetection/mmdet/models/utils/gaussian_target.py new file mode 100644 index 0000000..5bf4d55 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/gaussian_target.py @@ -0,0 +1,268 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from math import sqrt + +import torch +import torch.nn.functional as F + + +def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'): + """Generate 2D gaussian kernel. + + Args: + radius (int): Radius of gaussian kernel. + sigma (int): Sigma of gaussian function. Default: 1. + dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32. + device (str): Device of gaussian tensor. Default: 'cpu'. + + Returns: + h (Tensor): Gaussian kernel with a + ``(2 * radius + 1) * (2 * radius + 1)`` shape. + """ + x = torch.arange( + -radius, radius + 1, dtype=dtype, device=device).view(1, -1) + y = torch.arange( + -radius, radius + 1, dtype=dtype, device=device).view(-1, 1) + + h = (-(x * x + y * y) / (2 * sigma * sigma)).exp() + + h[h < torch.finfo(h.dtype).eps * h.max()] = 0 + return h + + +def gen_gaussian_target(heatmap, center, radius, k=1): + """Generate 2D gaussian heatmap. + + Args: + heatmap (Tensor): Input heatmap, the gaussian kernel will cover on + it and maintain the max value. + center (list[int]): Coord of gaussian kernel's center. + radius (int): Radius of gaussian kernel. + k (int): Coefficient of gaussian kernel. Default: 1. + + Returns: + out_heatmap (Tensor): Updated heatmap covered by gaussian kernel. + """ + diameter = 2 * radius + 1 + gaussian_kernel = gaussian2D( + radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device) + + x, y = center + + height, width = heatmap.shape[:2] + + left, right = min(x, radius), min(width - x, radius + 1) + top, bottom = min(y, radius), min(height - y, radius + 1) + + masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] + masked_gaussian = gaussian_kernel[radius - top:radius + bottom, + radius - left:radius + right] + out_heatmap = heatmap + torch.max( + masked_heatmap, + masked_gaussian * k, + out=out_heatmap[y - top:y + bottom, x - left:x + right]) + + return out_heatmap + + +def gaussian_radius(det_size, min_overlap): + r"""Generate 2D gaussian radius. + + This function is modified from the `official github repo + `_. + + Given ``min_overlap``, radius could computed by a quadratic equation + according to Vieta's formulas. + + There are 3 cases for computing gaussian radius, details are following: + + - Explanation of figure: ``lt`` and ``br`` indicates the left-top and + bottom-right corner of ground truth box. ``x`` indicates the + generated corner at the limited position when ``radius=r``. + + - Case1: one corner is inside the gt box and the other is outside. + + .. code:: text + + |< width >| + + lt-+----------+ - + | | | ^ + +--x----------+--+ + | | | | + | | | | height + | | overlap | | + | | | | + | | | | v + +--+---------br--+ - + | | | + +----------+--x + + To ensure IoU of generated box and gt box is larger than ``min_overlap``: + + .. math:: + \cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad + {r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\ + {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h} + {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} + + - Case2: both two corners are inside the gt box. + + .. code:: text + + |< width >| + + lt-+----------+ - + | | | ^ + +--x-------+ | + | | | | + | |overlap| | height + | | | | + | +-------x--+ + | | | v + +----------+-br - + + To ensure IoU of generated box and gt box is larger than ``min_overlap``: + + .. math:: + \cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad + {4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\ + {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h} + {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} + + - Case3: both two corners are outside the gt box. + + .. code:: text + + |< width >| + + x--+----------------+ + | | | + +-lt-------------+ | - + | | | | ^ + | | | | + | | overlap | | height + | | | | + | | | | v + | +------------br--+ - + | | | + +----------------+--x + + To ensure IoU of generated box and gt box is larger than ``min_overlap``: + + .. math:: + \cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad + {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\ + {a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\ + {r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a} + + Args: + det_size (list[int]): Shape of object. + min_overlap (float): Min IoU with ground truth for boxes generated by + keypoints inside the gaussian kernel. + + Returns: + radius (int): Radius of gaussian kernel. + """ + height, width = det_size + + a1 = 1 + b1 = (height + width) + c1 = width * height * (1 - min_overlap) / (1 + min_overlap) + sq1 = sqrt(b1**2 - 4 * a1 * c1) + r1 = (b1 - sq1) / (2 * a1) + + a2 = 4 + b2 = 2 * (height + width) + c2 = (1 - min_overlap) * width * height + sq2 = sqrt(b2**2 - 4 * a2 * c2) + r2 = (b2 - sq2) / (2 * a2) + + a3 = 4 * min_overlap + b3 = -2 * min_overlap * (height + width) + c3 = (min_overlap - 1) * width * height + sq3 = sqrt(b3**2 - 4 * a3 * c3) + r3 = (b3 + sq3) / (2 * a3) + return min(r1, r2, r3) + + +def get_local_maximum(heat, kernel=3): + """Extract local maximum pixel with given kernel. + + Args: + heat (Tensor): Target heatmap. + kernel (int): Kernel size of max pooling. Default: 3. + + Returns: + heat (Tensor): A heatmap where local maximum pixels maintain its + own value and other positions are 0. + """ + pad = (kernel - 1) // 2 + hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad) + keep = (hmax == heat).float() + return heat * keep + + +def get_topk_from_heatmap(scores, k=20): + """Get top k positions from heatmap. + + Args: + scores (Tensor): Target heatmap with shape + [batch, num_classes, height, width]. + k (int): Target number. Default: 20. + + Returns: + tuple[torch.Tensor]: Scores, indexes, categories and coords of + topk keypoint. Containing following Tensors: + + - topk_scores (Tensor): Max scores of each topk keypoint. + - topk_inds (Tensor): Indexes of each topk keypoint. + - topk_clses (Tensor): Categories of each topk keypoint. + - topk_ys (Tensor): Y-coord of each topk keypoint. + - topk_xs (Tensor): X-coord of each topk keypoint. + """ + batch, _, height, width = scores.size() + topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k) + topk_clses = topk_inds // (height * width) + topk_inds = topk_inds % (height * width) + topk_ys = topk_inds // width + topk_xs = (topk_inds % width).int().float() + return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs + + +def gather_feat(feat, ind, mask=None): + """Gather feature according to index. + + Args: + feat (Tensor): Target feature map. + ind (Tensor): Target coord index. + mask (Tensor | None): Mask of feature map. Default: None. + + Returns: + feat (Tensor): Gathered feature. + """ + dim = feat.size(2) + ind = ind.unsqueeze(2).repeat(1, 1, dim) + feat = feat.gather(1, ind) + if mask is not None: + mask = mask.unsqueeze(2).expand_as(feat) + feat = feat[mask] + feat = feat.view(-1, dim) + return feat + + +def transpose_and_gather_feat(feat, ind): + """Transpose and gather feature according to index. + + Args: + feat (Tensor): Target feature map. + ind (Tensor): Target coord index. + + Returns: + feat (Tensor): Transposed and gathered feature. + """ + feat = feat.permute(0, 2, 3, 1).contiguous() + feat = feat.view(feat.size(0), -1, feat.size(3)) + feat = gather_feat(feat, ind) + return feat diff --git a/downstream/mmdetection/mmdet/models/utils/inverted_residual.py b/downstream/mmdetection/mmdet/models/utils/inverted_residual.py new file mode 100644 index 0000000..1f241ae --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/inverted_residual.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule +from mmcv.cnn.bricks import DropPath +from mmcv.runner import BaseModule + +from .se_layer import SELayer + + +class InvertedResidual(BaseModule): + """Inverted Residual Block. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + mid_channels (int): The input channels of the depthwise convolution. + kernel_size (int): The kernel size of the depthwise convolution. + Default: 3. + stride (int): The stride of the depthwise convolution. Default: 1. + se_cfg (dict): Config dict for se layer. Default: None, which means no + se layer. + with_expand_conv (bool): Use expand conv or not. If set False, + mid_channels must be the same with in_channels. + Default: True. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + drop_path_rate (float): stochastic depth rate. Defaults to 0. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels, + kernel_size=3, + stride=1, + se_cfg=None, + with_expand_conv=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + drop_path_rate=0., + with_cp=False, + init_cfg=None): + super(InvertedResidual, self).__init__(init_cfg) + self.with_res_shortcut = (stride == 1 and in_channels == out_channels) + assert stride in [1, 2], f'stride must in [1, 2]. ' \ + f'But received {stride}.' + self.with_cp = with_cp + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.with_se = se_cfg is not None + self.with_expand_conv = with_expand_conv + + if self.with_se: + assert isinstance(se_cfg, dict) + if not self.with_expand_conv: + assert mid_channels == in_channels + + if self.with_expand_conv: + self.expand_conv = ConvModule( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.depthwise_conv = ConvModule( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + padding=kernel_size // 2, + groups=mid_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + if self.with_se: + self.se = SELayer(**se_cfg) + + self.linear_conv = ConvModule( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x): + + def _inner_forward(x): + out = x + + if self.with_expand_conv: + out = self.expand_conv(out) + + out = self.depthwise_conv(out) + + if self.with_se: + out = self.se(out) + + out = self.linear_conv(out) + + if self.with_res_shortcut: + return x + self.drop_path(out) + else: + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out diff --git a/downstream/mmdetection/mmdet/models/utils/make_divisible.py b/downstream/mmdetection/mmdet/models/utils/make_divisible.py new file mode 100644 index 0000000..ed42c2e --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/make_divisible.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +def make_divisible(value, divisor, min_value=None, min_ratio=0.9): + """Make divisible function. + + This function rounds the channel number to the nearest value that can be + divisible by the divisor. It is taken from the original tf repo. It ensures + that all layers have a channel number that is divisible by divisor. It can + be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa + + Args: + value (int): The original channel number. + divisor (int): The divisor to fully divide the channel number. + min_value (int): The minimum value of the output channel. + Default: None, means that the minimum value equal to the divisor. + min_ratio (float): The minimum ratio of the rounded channel number to + the original channel number. Default: 0.9. + + Returns: + int: The modified output channel number. + """ + + if min_value is None: + min_value = divisor + new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than (1-min_ratio). + if new_value < min_ratio * value: + new_value += divisor + return new_value diff --git a/downstream/mmdetection/mmdet/models/utils/misc.py b/downstream/mmdetection/mmdet/models/utils/misc.py new file mode 100644 index 0000000..8f9be9a --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/misc.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from torch.autograd import Function +from torch.nn import functional as F + + +class SigmoidGeometricMean(Function): + """Forward and backward function of geometric mean of two sigmoid + functions. + + This implementation with analytical gradient function substitutes + the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The + original implementation incurs none during gradient backprapagation + if both x and y are very small values. + """ + + @staticmethod + def forward(ctx, x, y): + x_sigmoid = x.sigmoid() + y_sigmoid = y.sigmoid() + z = (x_sigmoid * y_sigmoid).sqrt() + ctx.save_for_backward(x_sigmoid, y_sigmoid, z) + return z + + @staticmethod + def backward(ctx, grad_output): + x_sigmoid, y_sigmoid, z = ctx.saved_tensors + grad_x = grad_output * z * (1 - x_sigmoid) / 2 + grad_y = grad_output * z * (1 - y_sigmoid) / 2 + return grad_x, grad_y + + +sigmoid_geometric_mean = SigmoidGeometricMean.apply + + +def interpolate_as(source, target, mode='bilinear', align_corners=False): + """Interpolate the `source` to the shape of the `target`. + + The `source` must be a Tensor, but the `target` can be a Tensor or a + np.ndarray with the shape (..., target_h, target_w). + + Args: + source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or + (N, C, H, W). + target (Tensor | np.ndarray): The interpolation target with the shape + (..., target_h, target_w). + mode (str): Algorithm used for interpolation. The options are the + same as those in F.interpolate(). Default: ``'bilinear'``. + align_corners (bool): The same as the argument in F.interpolate(). + + Returns: + Tensor: The interpolated source Tensor. + """ + assert len(target.shape) >= 2 + + def _interpolate_as(source, target, mode='bilinear', align_corners=False): + """Interpolate the `source` (4D) to the shape of the `target`.""" + target_h, target_w = target.shape[-2:] + source_h, source_w = source.shape[-2:] + if target_h != source_h or target_w != source_w: + source = F.interpolate( + source, + size=(target_h, target_w), + mode=mode, + align_corners=align_corners) + return source + + if len(source.shape) == 3: + source = source[:, None, :, :] + source = _interpolate_as(source, target, mode, align_corners) + return source[:, 0, :, :] + else: + return _interpolate_as(source, target, mode, align_corners) diff --git a/downstream/mmdetection/mmdet/models/utils/normed_predictor.py b/downstream/mmdetection/mmdet/models/utils/normed_predictor.py new file mode 100644 index 0000000..f0eeef7 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/normed_predictor.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import CONV_LAYERS + +from .builder import LINEAR_LAYERS + + +@LINEAR_LAYERS.register_module(name='NormedLinear') +class NormedLinear(nn.Linear): + """Normalized Linear Layer. + + Args: + tempeature (float, optional): Tempeature term. Default to 20. + power (int, optional): Power term. Default to 1.0. + eps (float, optional): The minimal value of divisor to + keep numerical stability. Default to 1e-6. + """ + + def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs): + super(NormedLinear, self).__init__(*args, **kwargs) + self.tempearture = tempearture + self.power = power + self.eps = eps + self.init_weights() + + def init_weights(self): + nn.init.normal_(self.weight, mean=0, std=0.01) + if self.bias is not None: + nn.init.constant_(self.bias, 0) + + def forward(self, x): + weight_ = self.weight / ( + self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps) + x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) + x_ = x_ * self.tempearture + + return F.linear(x_, weight_, self.bias) + + +@CONV_LAYERS.register_module(name='NormedConv2d') +class NormedConv2d(nn.Conv2d): + """Normalized Conv2d Layer. + + Args: + tempeature (float, optional): Tempeature term. Default to 20. + power (int, optional): Power term. Default to 1.0. + eps (float, optional): The minimal value of divisor to + keep numerical stability. Default to 1e-6. + norm_over_kernel (bool, optional): Normalize over kernel. + Default to False. + """ + + def __init__(self, + *args, + tempearture=20, + power=1.0, + eps=1e-6, + norm_over_kernel=False, + **kwargs): + super(NormedConv2d, self).__init__(*args, **kwargs) + self.tempearture = tempearture + self.power = power + self.norm_over_kernel = norm_over_kernel + self.eps = eps + + def forward(self, x): + if not self.norm_over_kernel: + weight_ = self.weight / ( + self.weight.norm(dim=1, keepdim=True).pow(self.power) + + self.eps) + else: + weight_ = self.weight / ( + self.weight.view(self.weight.size(0), -1).norm( + dim=1, keepdim=True).pow(self.power)[..., None, None] + + self.eps) + x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) + x_ = x_ * self.tempearture + + if hasattr(self, 'conv2d_forward'): + x_ = self.conv2d_forward(x_, weight_) + else: + if torch.__version__ >= '1.8': + x_ = self._conv_forward(x_, weight_, self.bias) + else: + x_ = self._conv_forward(x_, weight_) + return x_ diff --git a/downstream/mmdetection/mmdet/models/utils/panoptic_gt_processing.py b/downstream/mmdetection/mmdet/models/utils/panoptic_gt_processing.py new file mode 100644 index 0000000..7685ac9 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/panoptic_gt_processing.py @@ -0,0 +1,68 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_things, + num_stuff, img_metas): + """Preprocess the ground truth for a image. + + Args: + gt_labels (Tensor): Ground truth labels of each bbox, + with shape (num_gts, ). + gt_masks (BitmapMasks): Ground truth masks of each instances + of a image, shape (num_gts, h, w). + gt_semantic_seg (Tensor | None): Ground truth of semantic + segmentation with the shape (1, h, w). + [0, num_thing_class - 1] means things, + [num_thing_class, num_class-1] means stuff, + 255 means VOID. It's None when training instance segmentation. + img_metas (dict): List of image meta information. + + Returns: + tuple: a tuple containing the following targets. + + - labels (Tensor): Ground truth class indices for a + image, with shape (n, ), n is the sum of number + of stuff type and number of instance in a image. + - masks (Tensor): Ground truth mask for a image, with + shape (n, h, w). Contains stuff and things when training + panoptic segmentation, and things only when training + instance segmentation. + """ + num_classes = num_things + num_stuff + + things_masks = gt_masks.pad(img_metas['pad_shape'][:2], pad_val=0)\ + .to_tensor(dtype=torch.bool, device=gt_labels.device) + + if gt_semantic_seg is None: + masks = things_masks.long() + return gt_labels, masks + + things_labels = gt_labels + gt_semantic_seg = gt_semantic_seg.squeeze(0) + + semantic_labels = torch.unique( + gt_semantic_seg, + sorted=False, + return_inverse=False, + return_counts=False) + stuff_masks_list = [] + stuff_labels_list = [] + for label in semantic_labels: + if label < num_things or label >= num_classes: + continue + stuff_mask = gt_semantic_seg == label + stuff_masks_list.append(stuff_mask) + stuff_labels_list.append(label) + + if len(stuff_masks_list) > 0: + stuff_masks = torch.stack(stuff_masks_list, dim=0) + stuff_labels = torch.stack(stuff_labels_list, dim=0) + labels = torch.cat([things_labels, stuff_labels], dim=0) + masks = torch.cat([things_masks, stuff_masks], dim=0) + else: + labels = things_labels + masks = things_masks + + masks = masks.long() + return labels, masks diff --git a/downstream/mmdetection/mmdet/models/utils/point_sample.py b/downstream/mmdetection/mmdet/models/utils/point_sample.py new file mode 100644 index 0000000..c2c3cf9 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/point_sample.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.ops import point_sample + + +def get_uncertainty(mask_pred, labels): + """Estimate uncertainty based on pred logits. + + We estimate uncertainty as L1 distance between 0.0 and the logits + prediction in 'mask_pred' for the foreground class in `classes`. + + Args: + mask_pred (Tensor): mask predication logits, shape (num_rois, + num_classes, mask_height, mask_width). + + labels (list[Tensor]): Either predicted or ground truth label for + each predicted mask, of length num_rois. + + Returns: + scores (Tensor): Uncertainty scores with the most uncertain + locations having the highest uncertainty score, + shape (num_rois, 1, mask_height, mask_width) + """ + if mask_pred.shape[1] == 1: + gt_class_logits = mask_pred.clone() + else: + inds = torch.arange(mask_pred.shape[0], device=mask_pred.device) + gt_class_logits = mask_pred[inds, labels].unsqueeze(1) + return -torch.abs(gt_class_logits) + + +def get_uncertain_point_coords_with_randomness(mask_pred, labels, num_points, + oversample_ratio, + importance_sample_ratio): + """Get ``num_points`` most uncertain points with random points during + train. + + Sample points in [0, 1] x [0, 1] coordinate space based on their + uncertainty. The uncertainties are calculated for each point using + 'get_uncertainty()' function that takes point's logit prediction as + input. + + Args: + mask_pred (Tensor): A tensor of shape (num_rois, num_classes, + mask_height, mask_width) for class-specific or class-agnostic + prediction. + labels (list): The ground truth class for each instance. + num_points (int): The number of points to sample. + oversample_ratio (int): Oversampling parameter. + importance_sample_ratio (float): Ratio of points that are sampled + via importnace sampling. + + Returns: + point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) + that contains the coordinates sampled points. + """ + assert oversample_ratio >= 1 + assert 0 <= importance_sample_ratio <= 1 + batch_size = mask_pred.shape[0] + num_sampled = int(num_points * oversample_ratio) + point_coords = torch.rand( + batch_size, num_sampled, 2, device=mask_pred.device) + point_logits = point_sample(mask_pred, point_coords) + # It is crucial to calculate uncertainty based on the sampled + # prediction value for the points. Calculating uncertainties of the + # coarse predictions first and sampling them for points leads to + # incorrect results. To illustrate this: assume uncertainty func( + # logits)=-abs(logits), a sampled point between two coarse + # predictions with -1 and 1 logits has 0 logits, and therefore 0 + # uncertainty value. However, if we calculate uncertainties for the + # coarse predictions first, both will have -1 uncertainty, + # and sampled point will get -1 uncertainty. + point_uncertainties = get_uncertainty(point_logits, labels) + num_uncertain_points = int(importance_sample_ratio * num_points) + num_random_points = num_points - num_uncertain_points + idx = torch.topk( + point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] + shift = num_sampled * torch.arange( + batch_size, dtype=torch.long, device=mask_pred.device) + idx += shift[:, None] + point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( + batch_size, num_uncertain_points, 2) + if num_random_points > 0: + rand_roi_coords = torch.rand( + batch_size, num_random_points, 2, device=mask_pred.device) + point_coords = torch.cat((point_coords, rand_roi_coords), dim=1) + return point_coords diff --git a/downstream/mmdetection/mmdet/models/utils/positional_encoding.py b/downstream/mmdetection/mmdet/models/utils/positional_encoding.py new file mode 100644 index 0000000..dd29cd6 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/positional_encoding.py @@ -0,0 +1,163 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from mmcv.cnn.bricks.transformer import POSITIONAL_ENCODING +from mmcv.runner import BaseModule + + +@POSITIONAL_ENCODING.register_module() +class SinePositionalEncoding(BaseModule): + """Position encoding with sine and cosine functions. + + See `End-to-End Object Detection with Transformers + `_ for details. + + Args: + num_feats (int): The feature dimension for each position + along x-axis or y-axis. Note the final returned dimension + for each position is 2 times of this value. + temperature (int, optional): The temperature used for scaling + the position embedding. Defaults to 10000. + normalize (bool, optional): Whether to normalize the position + embedding. Defaults to False. + scale (float, optional): A scale factor that scales the position + embedding. The scale will be used only when `normalize` is True. + Defaults to 2*pi. + eps (float, optional): A value added to the denominator for + numerical stability. Defaults to 1e-6. + offset (float): offset add to embed when do the normalization. + Defaults to 0. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + num_feats, + temperature=10000, + normalize=False, + scale=2 * math.pi, + eps=1e-6, + offset=0., + init_cfg=None): + super(SinePositionalEncoding, self).__init__(init_cfg) + if normalize: + assert isinstance(scale, (float, int)), 'when normalize is set,' \ + 'scale should be provided and in float or int type, ' \ + f'found {type(scale)}' + self.num_feats = num_feats + self.temperature = temperature + self.normalize = normalize + self.scale = scale + self.eps = eps + self.offset = offset + + def forward(self, mask): + """Forward function for `SinePositionalEncoding`. + + Args: + mask (Tensor): ByteTensor mask. Non-zero values representing + ignored positions, while zero values means valid positions + for this image. Shape [bs, h, w]. + + Returns: + pos (Tensor): Returned position embedding with shape + [bs, num_feats*2, h, w]. + """ + # For convenience of exporting to ONNX, it's required to convert + # `masks` from bool to int. + mask = mask.to(torch.int) + not_mask = 1 - mask # logical_not + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + y_embed = (y_embed + self.offset) / \ + (y_embed[:, -1:, :] + self.eps) * self.scale + x_embed = (x_embed + self.offset) / \ + (x_embed[:, :, -1:] + self.eps) * self.scale + dim_t = torch.arange( + self.num_feats, dtype=torch.float32, device=mask.device) + dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats) + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + # use `view` instead of `flatten` for dynamically exporting to ONNX + B, H, W = mask.size() + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), + dim=4).view(B, H, W, -1) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), + dim=4).view(B, H, W, -1) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + def __repr__(self): + """str: a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(num_feats={self.num_feats}, ' + repr_str += f'temperature={self.temperature}, ' + repr_str += f'normalize={self.normalize}, ' + repr_str += f'scale={self.scale}, ' + repr_str += f'eps={self.eps})' + return repr_str + + +@POSITIONAL_ENCODING.register_module() +class LearnedPositionalEncoding(BaseModule): + """Position embedding with learnable embedding weights. + + Args: + num_feats (int): The feature dimension for each position + along x-axis or y-axis. The final returned dimension for + each position is 2 times of this value. + row_num_embed (int, optional): The dictionary size of row embeddings. + Default 50. + col_num_embed (int, optional): The dictionary size of col embeddings. + Default 50. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_feats, + row_num_embed=50, + col_num_embed=50, + init_cfg=dict(type='Uniform', layer='Embedding')): + super(LearnedPositionalEncoding, self).__init__(init_cfg) + self.row_embed = nn.Embedding(row_num_embed, num_feats) + self.col_embed = nn.Embedding(col_num_embed, num_feats) + self.num_feats = num_feats + self.row_num_embed = row_num_embed + self.col_num_embed = col_num_embed + + def forward(self, mask): + """Forward function for `LearnedPositionalEncoding`. + + Args: + mask (Tensor): ByteTensor mask. Non-zero values representing + ignored positions, while zero values means valid positions + for this image. Shape [bs, h, w]. + + Returns: + pos (Tensor): Returned position embedding with shape + [bs, num_feats*2, h, w]. + """ + h, w = mask.shape[-2:] + x = torch.arange(w, device=mask.device) + y = torch.arange(h, device=mask.device) + x_embed = self.col_embed(x) + y_embed = self.row_embed(y) + pos = torch.cat( + (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat( + 1, w, 1)), + dim=-1).permute(2, 0, + 1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1) + return pos + + def __repr__(self): + """str: a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(num_feats={self.num_feats}, ' + repr_str += f'row_num_embed={self.row_num_embed}, ' + repr_str += f'col_num_embed={self.col_num_embed})' + return repr_str diff --git a/downstream/mmdetection/mmdet/models/utils/res_layer.py b/downstream/mmdetection/mmdet/models/utils/res_layer.py new file mode 100644 index 0000000..5c3e89f --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/res_layer.py @@ -0,0 +1,190 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.runner import BaseModule, Sequential +from torch import nn as nn + + +class ResLayer(Sequential): + """ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): block used to build ResLayer. + inplanes (int): inplanes of block. + planes (int): planes of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + downsample_first (bool): Downsample at the first block or last block. + False for Hourglass, True for ResNet. Default: True + """ + + def __init__(self, + block, + inplanes, + planes, + num_blocks, + stride=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + downsample_first=True, + **kwargs): + self.block = block + + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = [] + conv_stride = stride + if avg_down: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + if downsample_first: + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + inplanes = planes * block.expansion + for _ in range(1, num_blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + + else: # downsample_first=False is for HourglassModule + for _ in range(num_blocks - 1): + layers.append( + block( + inplanes=inplanes, + planes=inplanes, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + super(ResLayer, self).__init__(*layers) + + +class SimplifiedBasicBlock(BaseModule): + """Simplified version of original basic residual block. This is used in + `SCNet `_. + + - Norm layer is now optional + - Last ReLU in forward function is removed + """ + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_fg=None): + super(SimplifiedBasicBlock, self).__init__(init_fg) + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + assert not with_cp, 'Not implemented yet.' + self.with_norm = norm_cfg is not None + with_bias = True if norm_cfg is None else False + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=with_bias) + if self.with_norm: + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, planes, postfix=1) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, planes, planes, 3, padding=1, bias=with_bias) + if self.with_norm: + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, planes, postfix=2) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.with_cp = with_cp + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) if self.with_norm else None + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) if self.with_norm else None + + def forward(self, x): + """Forward function.""" + + identity = x + + out = self.conv1(x) + if self.with_norm: + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + if self.with_norm: + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out diff --git a/downstream/mmdetection/mmdet/models/utils/se_layer.py b/downstream/mmdetection/mmdet/models/utils/se_layer.py new file mode 100644 index 0000000..a249210 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/se_layer.py @@ -0,0 +1,127 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule + + +class SELayer(BaseModule): + """Squeeze-and-Excitation Module. + + Args: + channels (int): The input (and output) channels of the SE layer. + ratio (int): Squeeze ratio in SELayer, the intermediate channel will be + ``int(channels/ratio)``. Default: 16. + conv_cfg (None or dict): Config dict for convolution layer. + Default: None, which means using conv2d. + act_cfg (dict or Sequence[dict]): Config dict for activation layer. + If act_cfg is a dict, two activation layers will be configurated + by this dict. If act_cfg is a sequence of dicts, the first + activation layer will be configurated by the first dict and the + second activation layer will be configurated by the second dict. + Default: (dict(type='ReLU'), dict(type='Sigmoid')) + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + channels, + ratio=16, + conv_cfg=None, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), + init_cfg=None): + super(SELayer, self).__init__(init_cfg) + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert mmcv.is_tuple_of(act_cfg, dict) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.conv1 = ConvModule( + in_channels=channels, + out_channels=int(channels / ratio), + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=int(channels / ratio), + out_channels=channels, + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + out = self.global_avgpool(x) + out = self.conv1(out) + out = self.conv2(out) + return x * out + + +class DyReLU(BaseModule): + """Dynamic ReLU (DyReLU) module. + + See `Dynamic ReLU `_ for details. + Current implementation is specialized for task-aware attention in DyHead. + HSigmoid arguments in default act_cfg follow DyHead official code. + https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py + + Args: + channels (int): The input (and output) channels of DyReLU module. + ratio (int): Squeeze ratio in Squeeze-and-Excitation-like module, + the intermediate channel will be ``int(channels/ratio)``. + Default: 4. + conv_cfg (None or dict): Config dict for convolution layer. + Default: None, which means using conv2d. + act_cfg (dict or Sequence[dict]): Config dict for activation layer. + If act_cfg is a dict, two activation layers will be configurated + by this dict. If act_cfg is a sequence of dicts, the first + activation layer will be configurated by the first dict and the + second activation layer will be configurated by the second dict. + Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, + divisor=6.0)) + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + channels, + ratio=4, + conv_cfg=None, + act_cfg=(dict(type='ReLU'), + dict(type='HSigmoid', bias=3.0, divisor=6.0)), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert mmcv.is_tuple_of(act_cfg, dict) + self.channels = channels + self.expansion = 4 # for a1, b1, a2, b2 + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.conv1 = ConvModule( + in_channels=channels, + out_channels=int(channels / ratio), + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=int(channels / ratio), + out_channels=channels * self.expansion, + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + """Forward function.""" + coeffs = self.global_avgpool(x) + coeffs = self.conv1(coeffs) + coeffs = self.conv2(coeffs) - 0.5 # value range: [-0.5, 0.5] + a1, b1, a2, b2 = torch.split(coeffs, self.channels, dim=1) + a1 = a1 * 2.0 + 1.0 # [-1.0, 1.0] + 1.0 + a2 = a2 * 2.0 # [-1.0, 1.0] + out = torch.max(x * a1 + b1, x * a2 + b2) + return out diff --git a/downstream/mmdetection/mmdet/models/utils/transformer.py b/downstream/mmdetection/mmdet/models/utils/transformer.py new file mode 100644 index 0000000..3c390c8 --- /dev/null +++ b/downstream/mmdetection/mmdet/models/utils/transformer.py @@ -0,0 +1,1167 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import (build_activation_layer, build_conv_layer, + build_norm_layer, xavier_init) +from mmcv.cnn.bricks.registry import (TRANSFORMER_LAYER, + TRANSFORMER_LAYER_SEQUENCE) +from mmcv.cnn.bricks.transformer import (BaseTransformerLayer, + TransformerLayerSequence, + build_transformer_layer_sequence) +from mmcv.runner.base_module import BaseModule +from mmcv.utils import to_2tuple +from torch.nn.init import normal_ + +from mmdet.models.utils.builder import TRANSFORMER + +try: + from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention + +except ImportError: + warnings.warn( + '`MultiScaleDeformableAttention` in MMCV has been moved to ' + '`mmcv.ops.multi_scale_deform_attn`, please update your MMCV') + from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention + + +def nlc_to_nchw(x, hw_shape): + """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, L, C] before conversion. + hw_shape (Sequence[int]): The height and width of output feature map. + + Returns: + Tensor: The output tensor of shape [N, C, H, W] after conversion. + """ + H, W = hw_shape + assert len(x.shape) == 3 + B, L, C = x.shape + assert L == H * W, 'The seq_len does not match H, W' + return x.transpose(1, 2).reshape(B, C, H, W).contiguous() + + +def nchw_to_nlc(x): + """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, C, H, W] before conversion. + + Returns: + Tensor: The output tensor of shape [N, L, C] after conversion. + """ + assert len(x.shape) == 4 + return x.flatten(2).transpose(1, 2).contiguous() + + +class AdaptivePadding(nn.Module): + """Applies padding to input (if needed) so that input can get fully covered + by filter you specified. It support two modes "same" and "corner". The + "same" mode is same with "SAME" padding mode in TensorFlow, pad zero around + input. The "corner" mode would pad zero to bottom right. + + Args: + kernel_size (int | tuple): Size of the kernel: + stride (int | tuple): Stride of the filter. Default: 1: + dilation (int | tuple): Spacing between kernel elements. + Default: 1 + padding (str): Support "same" and "corner", "corner" mode + would pad zero to bottom right, and "same" mode would + pad zero around input. Default: "corner". + Example: + >>> kernel_size = 16 + >>> stride = 16 + >>> dilation = 1 + >>> input = torch.rand(1, 1, 15, 17) + >>> adap_pad = AdaptivePadding( + >>> kernel_size=kernel_size, + >>> stride=stride, + >>> dilation=dilation, + >>> padding="corner") + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + >>> input = torch.rand(1, 1, 16, 17) + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + """ + + def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): + + super(AdaptivePadding, self).__init__() + + assert padding in ('same', 'corner') + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + padding = to_2tuple(padding) + dilation = to_2tuple(dilation) + + self.padding = padding + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + + def get_pad_shape(self, input_shape): + input_h, input_w = input_shape + kernel_h, kernel_w = self.kernel_size + stride_h, stride_w = self.stride + output_h = math.ceil(input_h / stride_h) + output_w = math.ceil(input_w / stride_w) + pad_h = max((output_h - 1) * stride_h + + (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0) + pad_w = max((output_w - 1) * stride_w + + (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0) + return pad_h, pad_w + + def forward(self, x): + pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) + if pad_h > 0 or pad_w > 0: + if self.padding == 'corner': + x = F.pad(x, [0, pad_w, 0, pad_h]) + elif self.padding == 'same': + x = F.pad(x, [ + pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2 + ]) + return x + + +class PatchEmbed(BaseModule): + """Image to Patch Embedding. + + We use a conv layer to implement PatchEmbed. + + Args: + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_type (str): The config dict for embedding + conv layer type selection. Default: "Conv2d. + kernel_size (int): The kernel_size of embedding conv. Default: 16. + stride (int): The slide stride of embedding conv. + Default: None (Would be set as `kernel_size`). + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int): The dilation rate of embedding conv. Default: 1. + bias (bool): Bias of embed conv. Default: True. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + input_size (int | tuple | None): The size of input, which will be + used to calculate the out size. Only work when `dynamic_size` + is False. Default: None. + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None. + """ + + def __init__( + self, + in_channels=3, + embed_dims=768, + conv_type='Conv2d', + kernel_size=16, + stride=16, + padding='corner', + dilation=1, + bias=True, + norm_cfg=None, + input_size=None, + init_cfg=None, + ): + super(PatchEmbed, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + if stride is None: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of conv + padding = 0 + else: + self.adap_padding = None + padding = to_2tuple(padding) + + self.projection = build_conv_layer( + dict(type=conv_type), + in_channels=in_channels, + out_channels=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = None + + if input_size: + input_size = to_2tuple(input_size) + # `init_out_size` would be used outside to + # calculate the num_patches + # when `use_abs_pos_embed` outside + self.init_input_size = input_size + if self.adap_padding: + pad_h, pad_w = self.adap_padding.get_pad_shape(input_size) + input_h, input_w = input_size + input_h = input_h + pad_h + input_w = input_w + pad_w + input_size = (input_h, input_w) + + # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html + h_out = (input_size[0] + 2 * padding[0] - dilation[0] * + (kernel_size[0] - 1) - 1) // stride[0] + 1 + w_out = (input_size[1] + 2 * padding[1] - dilation[1] * + (kernel_size[1] - 1) - 1) // stride[1] + 1 + self.init_out_size = (h_out, w_out) + else: + self.init_input_size = None + self.init_out_size = None + + def forward(self, x): + """ + Args: + x (Tensor): Has shape (B, C, H, W). In most case, C is 3. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, out_h * out_w, embed_dims) + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_h, out_w). + """ + + if self.adap_padding: + x = self.adap_padding(x) + + x = self.projection(x) + out_size = (x.shape[2], x.shape[3]) + x = x.flatten(2).transpose(1, 2) + if self.norm is not None: + x = self.norm(x) + return x, out_size + + +class PatchMerging(BaseModule): + """Merge patch feature map. + + This layer groups feature map by kernel_size, and applies norm and linear + layers to the grouped feature map. Our implementation uses `nn.Unfold` to + merge patch, which is about 25% faster than original implementation. + Instead, we need to modify pretrained models for compatibility. + + Args: + in_channels (int): The num of input channels. + to gets fully covered by filter and stride you specified.. + Default: True. + out_channels (int): The num of output channels. + kernel_size (int | tuple, optional): the kernel size in the unfold + layer. Defaults to 2. + stride (int | tuple, optional): the stride of the sliding blocks in the + unfold layer. Default: None. (Would be set as `kernel_size`) + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int | tuple, optional): dilation parameter in the unfold + layer. Default: 1. + bias (bool, optional): Whether to add bias in linear layer or not. + Defaults: False. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: dict(type='LN'). + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=2, + stride=None, + padding='corner', + dilation=1, + bias=False, + norm_cfg=dict(type='LN'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + if stride: + stride = stride + else: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of unfold + padding = 0 + else: + self.adap_padding = None + + padding = to_2tuple(padding) + self.sampler = nn.Unfold( + kernel_size=kernel_size, + dilation=dilation, + padding=padding, + stride=stride) + + sample_dim = kernel_size[0] * kernel_size[1] * in_channels + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, sample_dim)[1] + else: + self.norm = None + + self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) + + def forward(self, x, input_size): + """ + Args: + x (Tensor): Has shape (B, H*W, C_in). + input_size (tuple[int]): The spatial shape of x, arrange as (H, W). + Default: None. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) + - out_size (tuple[int]): Spatial shape of x, arrange as + (Merged_H, Merged_W). + """ + B, L, C = x.shape + assert isinstance(input_size, Sequence), f'Expect ' \ + f'input_size is ' \ + f'`Sequence` ' \ + f'but get {input_size}' + + H, W = input_size + assert L == H * W, 'input feature has wrong size' + + x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W + # Use nn.Unfold to merge patch. About 25% faster than original method, + # but need to modify pretrained model for compatibility + + if self.adap_padding: + x = self.adap_padding(x) + H, W = x.shape[-2:] + + x = self.sampler(x) + # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) + + out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * + (self.sampler.kernel_size[0] - 1) - + 1) // self.sampler.stride[0] + 1 + out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * + (self.sampler.kernel_size[1] - 1) - + 1) // self.sampler.stride[1] + 1 + + output_size = (out_h, out_w) + x = x.transpose(1, 2) # B, H/2*W/2, 4*C + x = self.norm(x) if self.norm else x + x = self.reduction(x) + return x, output_size + + +def inverse_sigmoid(x, eps=1e-5): + """Inverse function of sigmoid. + + Args: + x (Tensor): The tensor to do the + inverse. + eps (float): EPS avoid numerical + overflow. Defaults 1e-5. + Returns: + Tensor: The x has passed the inverse + function of sigmoid, has same + shape with input. + """ + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +@TRANSFORMER_LAYER.register_module() +class DetrTransformerDecoderLayer(BaseTransformerLayer): + """Implements decoder layer in DETR transformer. + + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): + Configs for self_attention or cross_attention, the order + should be consistent with it in `operation_order`. If it is + a dict, it would be expand to the number of attention in + `operation_order`. + feedforward_channels (int): The hidden dimension for FFNs. + ffn_dropout (float): Probability of an element to be zeroed + in ffn. Default 0.0. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Default:None + act_cfg (dict): The activation config for FFNs. Default: `LN` + norm_cfg (dict): Config dict for normalization layer. + Default: `LN`. + ffn_num_fcs (int): The number of fully-connected layers in FFNs. + Default:2. + """ + + def __init__(self, + attn_cfgs, + feedforward_channels, + ffn_dropout=0.0, + operation_order=None, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + ffn_num_fcs=2, + **kwargs): + super(DetrTransformerDecoderLayer, self).__init__( + attn_cfgs=attn_cfgs, + feedforward_channels=feedforward_channels, + ffn_dropout=ffn_dropout, + operation_order=operation_order, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + ffn_num_fcs=ffn_num_fcs, + **kwargs) + assert len(operation_order) == 6 + assert set(operation_order) == set( + ['self_attn', 'norm', 'cross_attn', 'ffn']) + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class DetrTransformerEncoder(TransformerLayerSequence): + """TransformerEncoder of DETR. + + Args: + post_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. Only used when `self.pre_norm` is `True` + """ + + def __init__(self, *args, post_norm_cfg=dict(type='LN'), **kwargs): + super(DetrTransformerEncoder, self).__init__(*args, **kwargs) + if post_norm_cfg is not None: + self.post_norm = build_norm_layer( + post_norm_cfg, self.embed_dims)[1] if self.pre_norm else None + else: + assert not self.pre_norm, f'Use prenorm in ' \ + f'{self.__class__.__name__},' \ + f'Please specify post_norm_cfg' + self.post_norm = None + + def forward(self, *args, **kwargs): + """Forward function for `TransformerCoder`. + + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + x = super(DetrTransformerEncoder, self).forward(*args, **kwargs) + if self.post_norm is not None: + x = self.post_norm(x) + return x + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class DetrTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR transformer. + + Args: + return_intermediate (bool): Whether to return intermediate outputs. + post_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, + *args, + post_norm_cfg=dict(type='LN'), + return_intermediate=False, + **kwargs): + + super(DetrTransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + if post_norm_cfg is not None: + self.post_norm = build_norm_layer(post_norm_cfg, + self.embed_dims)[1] + else: + self.post_norm = None + + def forward(self, query, *args, **kwargs): + """Forward function for `TransformerDecoder`. + + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + if not self.return_intermediate: + x = super().forward(query, *args, **kwargs) + if self.post_norm: + x = self.post_norm(x)[None] + return x + + intermediate = [] + for layer in self.layers: + query = layer(query, *args, **kwargs) + if self.return_intermediate: + if self.post_norm is not None: + intermediate.append(self.post_norm(query)) + else: + intermediate.append(query) + return torch.stack(intermediate) + + +@TRANSFORMER.register_module() +class Transformer(BaseModule): + """Implements the DETR transformer. + + Following the official DETR implementation, this module copy-paste + from torch.nn.Transformer with modifications: + + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + + See `paper: End-to-End Object Detection with Transformers + `_ for details. + + Args: + encoder (`mmcv.ConfigDict` | Dict): Config of + TransformerEncoder. Defaults to None. + decoder ((`mmcv.ConfigDict` | Dict)): Config of + TransformerDecoder. Defaults to None + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Defaults to None. + """ + + def __init__(self, encoder=None, decoder=None, init_cfg=None): + super(Transformer, self).__init__(init_cfg=init_cfg) + self.encoder = build_transformer_layer_sequence(encoder) + self.decoder = build_transformer_layer_sequence(decoder) + self.embed_dims = self.encoder.embed_dims + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, x, mask, query_embed, pos_embed): + """Forward function for `Transformer`. + + Args: + x (Tensor): Input query with shape [bs, c, h, w] where + c = embed_dims. + mask (Tensor): The key_padding_mask used for encoder and decoder, + with shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, with shape + [num_query, c]. + pos_embed (Tensor): The positional encoding for encoder and + decoder, with the same shape as `x`. + + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + + - out_dec: Output from decoder. If return_intermediate_dec \ + is True output has shape [num_dec_layers, bs, + num_query, embed_dims], else has shape [1, bs, \ + num_query, embed_dims]. + - memory: Output results from encoder, with shape \ + [bs, embed_dims, h, w]. + """ + bs, c, h, w = x.shape + # use `view` instead of `flatten` for dynamically exporting to ONNX + x = x.view(bs, c, -1).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c] + pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1) + query_embed = query_embed.unsqueeze(1).repeat( + 1, bs, 1) # [num_query, dim] -> [num_query, bs, dim] + mask = mask.view(bs, -1) # [bs, h, w] -> [bs, h*w] + memory = self.encoder( + query=x, + key=None, + value=None, + query_pos=pos_embed, + query_key_padding_mask=mask) + target = torch.zeros_like(query_embed) + # out_dec: [num_layers, num_query, bs, dim] + out_dec = self.decoder( + query=target, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_embed, + key_padding_mask=mask) + out_dec = out_dec.transpose(1, 2) + memory = memory.permute(1, 2, 0).reshape(bs, c, h, w) + return out_dec, memory + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class DeformableDetrTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR transformer. + + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, *args, return_intermediate=False, **kwargs): + + super(DeformableDetrTransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + + def forward(self, + query, + *args, + reference_points=None, + valid_ratios=None, + reg_branches=None, + **kwargs): + """Forward function for `TransformerDecoder`. + + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + reference_points (Tensor): The reference + points of offset. has shape + (bs, num_query, 4) when as_two_stage, + otherwise has shape ((bs, num_query, 2). + valid_ratios (Tensor): The radios of valid + points on the feature map, has shape + (bs, num_levels, 2) + reg_branch: (obj:`nn.ModuleList`): Used for + refining the regression results. Only would + be passed when with_box_refine is True, + otherwise would be passed a `None`. + + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + output = query + intermediate = [] + intermediate_reference_points = [] + for lid, layer in enumerate(self.layers): + if reference_points.shape[-1] == 4: + reference_points_input = reference_points[:, :, None] * \ + torch.cat([valid_ratios, valid_ratios], -1)[:, None] + else: + assert reference_points.shape[-1] == 2 + reference_points_input = reference_points[:, :, None] * \ + valid_ratios[:, None] + output = layer( + output, + *args, + reference_points=reference_points_input, + **kwargs) + output = output.permute(1, 0, 2) + + if reg_branches is not None: + tmp = reg_branches[lid](output) + if reference_points.shape[-1] == 4: + new_reference_points = tmp + inverse_sigmoid( + reference_points) + new_reference_points = new_reference_points.sigmoid() + else: + assert reference_points.shape[-1] == 2 + new_reference_points = tmp + new_reference_points[..., :2] = tmp[ + ..., :2] + inverse_sigmoid(reference_points) + new_reference_points = new_reference_points.sigmoid() + reference_points = new_reference_points.detach() + + output = output.permute(1, 0, 2) + if self.return_intermediate: + intermediate.append(output) + intermediate_reference_points.append(reference_points) + + if self.return_intermediate: + return torch.stack(intermediate), torch.stack( + intermediate_reference_points) + + return output, reference_points + + +@TRANSFORMER.register_module() +class DeformableDetrTransformer(Transformer): + """Implements the DeformableDETR transformer. + + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + + def __init__(self, + as_two_stage=False, + num_feature_levels=4, + two_stage_num_proposals=300, + **kwargs): + super(DeformableDetrTransformer, self).__init__(**kwargs) + self.as_two_stage = as_two_stage + self.num_feature_levels = num_feature_levels + self.two_stage_num_proposals = two_stage_num_proposals + self.embed_dims = self.encoder.embed_dims + self.init_layers() + + def init_layers(self): + """Initialize layers of the DeformableDetrTransformer.""" + self.level_embeds = nn.Parameter( + torch.Tensor(self.num_feature_levels, self.embed_dims)) + + if self.as_two_stage: + self.enc_output = nn.Linear(self.embed_dims, self.embed_dims) + self.enc_output_norm = nn.LayerNorm(self.embed_dims) + self.pos_trans = nn.Linear(self.embed_dims * 2, + self.embed_dims * 2) + self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2) + else: + self.reference_points = nn.Linear(self.embed_dims, 2) + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MultiScaleDeformableAttention): + m.init_weights() + if not self.as_two_stage: + xavier_init(self.reference_points, distribution='uniform', bias=0.) + normal_(self.level_embeds) + + def gen_encoder_output_proposals(self, memory, memory_padding_mask, + spatial_shapes): + """Generate proposals from encoded memory. + + Args: + memory (Tensor) : The output of encoder, + has shape (bs, num_key, embed_dim). num_key is + equal the number of points on feature map from + all level. + memory_padding_mask (Tensor): Padding mask for memory. + has shape (bs, num_key). + spatial_shapes (Tensor): The shape of all feature maps. + has shape (num_level, 2). + + Returns: + tuple: A tuple of feature map and bbox prediction. + + - output_memory (Tensor): The input of decoder, \ + has shape (bs, num_key, embed_dim). num_key is \ + equal the number of points on feature map from \ + all levels. + - output_proposals (Tensor): The normalized proposal \ + after a inverse sigmoid, has shape \ + (bs, num_keys, 4). + """ + + N, S, C = memory.shape + proposals = [] + _cur = 0 + for lvl, (H, W) in enumerate(spatial_shapes): + mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H * W)].view( + N, H, W, 1) + valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) + valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) + + grid_y, grid_x = torch.meshgrid( + torch.linspace( + 0, H - 1, H, dtype=torch.float32, device=memory.device), + torch.linspace( + 0, W - 1, W, dtype=torch.float32, device=memory.device)) + grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) + + scale = torch.cat([valid_W.unsqueeze(-1), + valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2) + grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale + wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) + proposal = torch.cat((grid, wh), -1).view(N, -1, 4) + proposals.append(proposal) + _cur += (H * W) + output_proposals = torch.cat(proposals, 1) + output_proposals_valid = ((output_proposals > 0.01) & + (output_proposals < 0.99)).all( + -1, keepdim=True) + output_proposals = torch.log(output_proposals / (1 - output_proposals)) + output_proposals = output_proposals.masked_fill( + memory_padding_mask.unsqueeze(-1), float('inf')) + output_proposals = output_proposals.masked_fill( + ~output_proposals_valid, float('inf')) + + output_memory = memory + output_memory = output_memory.masked_fill( + memory_padding_mask.unsqueeze(-1), float(0)) + output_memory = output_memory.masked_fill(~output_proposals_valid, + float(0)) + output_memory = self.enc_output_norm(self.enc_output(output_memory)) + return output_memory, output_proposals + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + """Get the reference points used in decoder. + + Args: + spatial_shapes (Tensor): The shape of all + feature maps, has shape (num_level, 2). + valid_ratios (Tensor): The radios of valid + points on the feature map, has shape + (bs, num_levels, 2) + device (obj:`device`): The device where + reference_points should be. + + Returns: + Tensor: reference points used in decoder, has \ + shape (bs, num_keys, num_levels, 2). + """ + reference_points_list = [] + for lvl, (H, W) in enumerate(spatial_shapes): + # TODO check this 0.5 + ref_y, ref_x = torch.meshgrid( + torch.linspace( + 0.5, H - 0.5, H, dtype=torch.float32, device=device), + torch.linspace( + 0.5, W - 0.5, W, dtype=torch.float32, device=device)) + ref_y = ref_y.reshape(-1)[None] / ( + valid_ratios[:, None, lvl, 1] * H) + ref_x = ref_x.reshape(-1)[None] / ( + valid_ratios[:, None, lvl, 0] * W) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + return reference_points + + def get_valid_ratio(self, mask): + """Get the valid radios of feature maps of all level.""" + _, H, W = mask.shape + valid_H = torch.sum(~mask[:, :, 0], 1) + valid_W = torch.sum(~mask[:, 0, :], 1) + valid_ratio_h = valid_H.float() / H + valid_ratio_w = valid_W.float() / W + valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) + return valid_ratio + + def get_proposal_pos_embed(self, + proposals, + num_pos_feats=128, + temperature=10000): + """Get the position embedding of proposal.""" + scale = 2 * math.pi + dim_t = torch.arange( + num_pos_feats, dtype=torch.float32, device=proposals.device) + dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats) + # N, L, 4 + proposals = proposals.sigmoid() * scale + # N, L, 4, 128 + pos = proposals[:, :, :, None] / dim_t + # N, L, 4, 64, 2 + pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), + dim=4).flatten(2) + return pos + + def forward(self, + mlvl_feats, + mlvl_masks, + query_embed, + mlvl_pos_embeds, + reg_branches=None, + cls_branches=None, + **kwargs): + """Forward function for `Transformer`. + + Args: + mlvl_feats (list(Tensor)): Input queries from + different level. Each element has shape + [bs, embed_dims, h, w]. + mlvl_masks (list(Tensor)): The key_padding_mask from + different level used for encoder and decoder, + each element has shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, + with shape [num_query, c]. + mlvl_pos_embeds (list(Tensor)): The positional encoding + of feats from different level, has the shape + [bs, embed_dims, h, w]. + reg_branches (obj:`nn.ModuleList`): Regression heads for + feature maps from each decoder layer. Only would + be passed when + `with_box_refine` is True. Default to None. + cls_branches (obj:`nn.ModuleList`): Classification heads + for feature maps from each decoder layer. Only would + be passed when `as_two_stage` + is True. Default to None. + + + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + + - inter_states: Outputs from decoder. If + return_intermediate_dec is True output has shape \ + (num_dec_layers, bs, num_query, embed_dims), else has \ + shape (1, bs, num_query, embed_dims). + - init_reference_out: The initial value of reference \ + points, has shape (bs, num_queries, 4). + - inter_references_out: The internal value of reference \ + points in decoder, has shape \ + (num_dec_layers, bs,num_query, embed_dims) + - enc_outputs_class: The classification score of \ + proposals generated from \ + encoder's feature maps, has shape \ + (batch, h*w, num_classes). \ + Only would be returned when `as_two_stage` is True, \ + otherwise None. + - enc_outputs_coord_unact: The regression results \ + generated from encoder's feature maps., has shape \ + (batch, h*w, 4). Only would \ + be returned when `as_two_stage` is True, \ + otherwise None. + """ + assert self.as_two_stage or query_embed is not None + + feat_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for lvl, (feat, mask, pos_embed) in enumerate( + zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): + bs, c, h, w = feat.shape + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) + feat = feat.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) + lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1) + lvl_pos_embed_flatten.append(lvl_pos_embed) + feat_flatten.append(feat) + mask_flatten.append(mask) + feat_flatten = torch.cat(feat_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=feat_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack( + [self.get_valid_ratio(m) for m in mlvl_masks], 1) + + reference_points = \ + self.get_reference_points(spatial_shapes, + valid_ratios, + device=feat.device) + + feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) + lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute( + 1, 0, 2) # (H*W, bs, embed_dims) + memory = self.encoder( + query=feat_flatten, + key=None, + value=None, + query_pos=lvl_pos_embed_flatten, + query_key_padding_mask=mask_flatten, + spatial_shapes=spatial_shapes, + reference_points=reference_points, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + **kwargs) + + memory = memory.permute(1, 0, 2) + bs, _, c = memory.shape + if self.as_two_stage: + output_memory, output_proposals = \ + self.gen_encoder_output_proposals( + memory, mask_flatten, spatial_shapes) + enc_outputs_class = cls_branches[self.decoder.num_layers]( + output_memory) + enc_outputs_coord_unact = \ + reg_branches[ + self.decoder.num_layers](output_memory) + output_proposals + + topk = self.two_stage_num_proposals + # We only use the first channel in enc_outputs_class as foreground, + # the other (num_classes - 1) channels are actually not used. + # Its targets are set to be 0s, which indicates the first + # class (foreground) because we use [0, num_classes - 1] to + # indicate class labels, background class is indicated by + # num_classes (similar convention in RPN). + # See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/deformable_detr_head.py#L241 # noqa + # This follows the official implementation of Deformable DETR. + topk_proposals = torch.topk( + enc_outputs_class[..., 0], topk, dim=1)[1] + topk_coords_unact = torch.gather( + enc_outputs_coord_unact, 1, + topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) + topk_coords_unact = topk_coords_unact.detach() + reference_points = topk_coords_unact.sigmoid() + init_reference_out = reference_points + pos_trans_out = self.pos_trans_norm( + self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))) + query_pos, query = torch.split(pos_trans_out, c, dim=2) + else: + query_pos, query = torch.split(query_embed, c, dim=1) + query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) + query = query.unsqueeze(0).expand(bs, -1, -1) + reference_points = self.reference_points(query_pos).sigmoid() + init_reference_out = reference_points + + # decoder + query = query.permute(1, 0, 2) + memory = memory.permute(1, 0, 2) + query_pos = query_pos.permute(1, 0, 2) + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=memory, + query_pos=query_pos, + key_padding_mask=mask_flatten, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reg_branches=reg_branches, + **kwargs) + + inter_references_out = inter_references + if self.as_two_stage: + return inter_states, init_reference_out,\ + inter_references_out, enc_outputs_class,\ + enc_outputs_coord_unact + return inter_states, init_reference_out, \ + inter_references_out, None, None + + +@TRANSFORMER.register_module() +class DynamicConv(BaseModule): + """Implements Dynamic Convolution. + + This module generate parameters for each sample and + use bmm to implement 1*1 convolution. Code is modified + from the `official github repo `_ . + + Args: + in_channels (int): The input feature channel. + Defaults to 256. + feat_channels (int): The inner feature channel. + Defaults to 64. + out_channels (int, optional): The output feature channel. + When not specified, it will be set to `in_channels` + by default + input_feat_shape (int): The shape of input feature. + Defaults to 7. + with_proj (bool): Project two-dimentional feature to + one-dimentional feature. Default to True. + act_cfg (dict): The activation config for DynamicConv. + norm_cfg (dict): Config dict for normalization layer. Default + layer normalization. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + in_channels=256, + feat_channels=64, + out_channels=None, + input_feat_shape=7, + with_proj=True, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(DynamicConv, self).__init__(init_cfg) + self.in_channels = in_channels + self.feat_channels = feat_channels + self.out_channels_raw = out_channels + self.input_feat_shape = input_feat_shape + self.with_proj = with_proj + self.act_cfg = act_cfg + self.norm_cfg = norm_cfg + self.out_channels = out_channels if out_channels else in_channels + + self.num_params_in = self.in_channels * self.feat_channels + self.num_params_out = self.out_channels * self.feat_channels + self.dynamic_layer = nn.Linear( + self.in_channels, self.num_params_in + self.num_params_out) + + self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1] + self.norm_out = build_norm_layer(norm_cfg, self.out_channels)[1] + + self.activation = build_activation_layer(act_cfg) + + num_output = self.out_channels * input_feat_shape**2 + if self.with_proj: + self.fc_layer = nn.Linear(num_output, self.out_channels) + self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1] + + def forward(self, param_feature, input_feature): + """Forward function for `DynamicConv`. + + Args: + param_feature (Tensor): The feature can be used + to generate the parameter, has shape + (num_all_proposals, in_channels). + input_feature (Tensor): Feature that + interact with parameters, has shape + (num_all_proposals, in_channels, H, W). + + Returns: + Tensor: The output feature has shape + (num_all_proposals, out_channels). + """ + input_feature = input_feature.flatten(2).permute(2, 0, 1) + + input_feature = input_feature.permute(1, 0, 2) + parameters = self.dynamic_layer(param_feature) + + param_in = parameters[:, :self.num_params_in].view( + -1, self.in_channels, self.feat_channels) + param_out = parameters[:, -self.num_params_out:].view( + -1, self.feat_channels, self.out_channels) + + # input_feature has shape (num_all_proposals, H*W, in_channels) + # param_in has shape (num_all_proposals, in_channels, feat_channels) + # feature has shape (num_all_proposals, H*W, feat_channels) + features = torch.bmm(input_feature, param_in) + features = self.norm_in(features) + features = self.activation(features) + + # param_out has shape (batch_size, feat_channels, out_channels) + features = torch.bmm(features, param_out) + features = self.norm_out(features) + features = self.activation(features) + + if self.with_proj: + features = features.flatten(1) + features = self.fc_layer(features) + features = self.fc_norm(features) + features = self.activation(features) + + return features diff --git a/downstream/mmdetection/mmdet/utils/__init__.py b/downstream/mmdetection/mmdet/utils/__init__.py new file mode 100644 index 0000000..f57acb5 --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .collect_env import collect_env +from .compat_config import compat_cfg +from .logger import get_caller_name, get_root_logger, log_img_scale +from .memory import AvoidCUDAOOM, AvoidOOM +from .misc import find_latest_checkpoint, update_data_root +from .replace_cfg_vals import replace_cfg_vals +from .setup_env import setup_multi_processes +from .split_batch import split_batch +from .util_distribution import build_ddp, build_dp, get_device + +__all__ = [ + 'get_root_logger', 'collect_env', 'find_latest_checkpoint', + 'update_data_root', 'setup_multi_processes', 'get_caller_name', + 'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp', + 'get_device', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM' +] diff --git a/downstream/mmdetection/mmdet/utils/collect_env.py b/downstream/mmdetection/mmdet/utils/collect_env.py new file mode 100644 index 0000000..97e25c0 --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/collect_env.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.utils import collect_env as collect_base_env +from mmcv.utils import get_git_hash + +import mmdet + + +def collect_env(): + """Collect the information of the running environments.""" + env_info = collect_base_env() + env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7] + return env_info + + +if __name__ == '__main__': + for name, val in collect_env().items(): + print(f'{name}: {val}') diff --git a/downstream/mmdetection/mmdet/utils/compat_config.py b/downstream/mmdetection/mmdet/utils/compat_config.py new file mode 100644 index 0000000..05aa37d --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/compat_config.py @@ -0,0 +1,139 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings + +from mmcv import ConfigDict + + +def compat_cfg(cfg): + """This function would modify some filed to keep the compatibility of + config. + + For example, it will move some args which will be deprecated to the correct + fields. + """ + cfg = copy.deepcopy(cfg) + cfg = compat_imgs_per_gpu(cfg) + cfg = compat_loader_args(cfg) + cfg = compat_runner_args(cfg) + return cfg + + +def compat_runner_args(cfg): + if 'runner' not in cfg: + cfg.runner = ConfigDict({ + 'type': 'EpochBasedRunner', + 'max_epochs': cfg.total_epochs + }) + warnings.warn( + 'config is now expected to have a `runner` section, ' + 'please set `runner` in your config.', UserWarning) + else: + if 'total_epochs' in cfg: + assert cfg.total_epochs == cfg.runner.max_epochs + return cfg + + +def compat_imgs_per_gpu(cfg): + cfg = copy.deepcopy(cfg) + if 'imgs_per_gpu' in cfg.data: + warnings.warn('"imgs_per_gpu" is deprecated in MMDet V2.0. ' + 'Please use "samples_per_gpu" instead') + if 'samples_per_gpu' in cfg.data: + warnings.warn( + f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and ' + f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"' + f'={cfg.data.imgs_per_gpu} is used in this experiments') + else: + warnings.warn('Automatically set "samples_per_gpu"="imgs_per_gpu"=' + f'{cfg.data.imgs_per_gpu} in this experiments') + cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu + return cfg + + +def compat_loader_args(cfg): + """Deprecated sample_per_gpu in cfg.data.""" + + cfg = copy.deepcopy(cfg) + if 'train_dataloader' not in cfg.data: + cfg.data['train_dataloader'] = ConfigDict() + if 'val_dataloader' not in cfg.data: + cfg.data['val_dataloader'] = ConfigDict() + if 'test_dataloader' not in cfg.data: + cfg.data['test_dataloader'] = ConfigDict() + + # special process for train_dataloader + if 'samples_per_gpu' in cfg.data: + + samples_per_gpu = cfg.data.pop('samples_per_gpu') + assert 'samples_per_gpu' not in \ + cfg.data.train_dataloader, ('`samples_per_gpu` are set ' + 'in `data` field and ` ' + 'data.train_dataloader` ' + 'at the same time. ' + 'Please only set it in ' + '`data.train_dataloader`. ') + cfg.data.train_dataloader['samples_per_gpu'] = samples_per_gpu + + if 'persistent_workers' in cfg.data: + + persistent_workers = cfg.data.pop('persistent_workers') + assert 'persistent_workers' not in \ + cfg.data.train_dataloader, ('`persistent_workers` are set ' + 'in `data` field and ` ' + 'data.train_dataloader` ' + 'at the same time. ' + 'Please only set it in ' + '`data.train_dataloader`. ') + cfg.data.train_dataloader['persistent_workers'] = persistent_workers + + if 'workers_per_gpu' in cfg.data: + + workers_per_gpu = cfg.data.pop('workers_per_gpu') + cfg.data.train_dataloader['workers_per_gpu'] = workers_per_gpu + cfg.data.val_dataloader['workers_per_gpu'] = workers_per_gpu + cfg.data.test_dataloader['workers_per_gpu'] = workers_per_gpu + + # special process for val_dataloader + if 'samples_per_gpu' in cfg.data.val: + # keep default value of `sample_per_gpu` is 1 + assert 'samples_per_gpu' not in \ + cfg.data.val_dataloader, ('`samples_per_gpu` are set ' + 'in `data.val` field and ` ' + 'data.val_dataloader` at ' + 'the same time. ' + 'Please only set it in ' + '`data.val_dataloader`. ') + cfg.data.val_dataloader['samples_per_gpu'] = \ + cfg.data.val.pop('samples_per_gpu') + # special process for val_dataloader + + # in case the test dataset is concatenated + if isinstance(cfg.data.test, dict): + if 'samples_per_gpu' in cfg.data.test: + assert 'samples_per_gpu' not in \ + cfg.data.test_dataloader, ('`samples_per_gpu` are set ' + 'in `data.test` field and ` ' + 'data.test_dataloader` ' + 'at the same time. ' + 'Please only set it in ' + '`data.test_dataloader`. ') + + cfg.data.test_dataloader['samples_per_gpu'] = \ + cfg.data.test.pop('samples_per_gpu') + + elif isinstance(cfg.data.test, list): + for ds_cfg in cfg.data.test: + if 'samples_per_gpu' in ds_cfg: + assert 'samples_per_gpu' not in \ + cfg.data.test_dataloader, ('`samples_per_gpu` are set ' + 'in `data.test` field and ` ' + 'data.test_dataloader` at' + ' the same time. ' + 'Please only set it in ' + '`data.test_dataloader`. ') + samples_per_gpu = max( + [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) + cfg.data.test_dataloader['samples_per_gpu'] = samples_per_gpu + + return cfg diff --git a/downstream/mmdetection/mmdet/utils/contextmanagers.py b/downstream/mmdetection/mmdet/utils/contextmanagers.py new file mode 100644 index 0000000..fa12bfc --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/contextmanagers.py @@ -0,0 +1,122 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import asyncio +import contextlib +import logging +import os +import time +from typing import List + +import torch + +logger = logging.getLogger(__name__) + +DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False)) + + +@contextlib.asynccontextmanager +async def completed(trace_name='', + name='', + sleep_interval=0.05, + streams: List[torch.cuda.Stream] = None): + """Async context manager that waits for work to complete on given CUDA + streams.""" + if not torch.cuda.is_available(): + yield + return + + stream_before_context_switch = torch.cuda.current_stream() + if not streams: + streams = [stream_before_context_switch] + else: + streams = [s if s else stream_before_context_switch for s in streams] + + end_events = [ + torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams + ] + + if DEBUG_COMPLETED_TIME: + start = torch.cuda.Event(enable_timing=True) + stream_before_context_switch.record_event(start) + + cpu_start = time.monotonic() + logger.debug('%s %s starting, streams: %s', trace_name, name, streams) + grad_enabled_before = torch.is_grad_enabled() + try: + yield + finally: + current_stream = torch.cuda.current_stream() + assert current_stream == stream_before_context_switch + + if DEBUG_COMPLETED_TIME: + cpu_end = time.monotonic() + for i, stream in enumerate(streams): + event = end_events[i] + stream.record_event(event) + + grad_enabled_after = torch.is_grad_enabled() + + # observed change of torch.is_grad_enabled() during concurrent run of + # async_test_bboxes code + assert (grad_enabled_before == grad_enabled_after + ), 'Unexpected is_grad_enabled() value change' + + are_done = [e.query() for e in end_events] + logger.debug('%s %s completed: %s streams: %s', trace_name, name, + are_done, streams) + with torch.cuda.stream(stream_before_context_switch): + while not all(are_done): + await asyncio.sleep(sleep_interval) + are_done = [e.query() for e in end_events] + logger.debug( + '%s %s completed: %s streams: %s', + trace_name, + name, + are_done, + streams, + ) + + current_stream = torch.cuda.current_stream() + assert current_stream == stream_before_context_switch + + if DEBUG_COMPLETED_TIME: + cpu_time = (cpu_end - cpu_start) * 1000 + stream_times_ms = '' + for i, stream in enumerate(streams): + elapsed_time = start.elapsed_time(end_events[i]) + stream_times_ms += f' {stream} {elapsed_time:.2f} ms' + logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time, + stream_times_ms) + + +@contextlib.asynccontextmanager +async def concurrent(streamqueue: asyncio.Queue, + trace_name='concurrent', + name='stream'): + """Run code concurrently in different streams. + + :param streamqueue: asyncio.Queue instance. + + Queue tasks define the pool of streams used for concurrent execution. + """ + if not torch.cuda.is_available(): + yield + return + + initial_stream = torch.cuda.current_stream() + + with torch.cuda.stream(initial_stream): + stream = await streamqueue.get() + assert isinstance(stream, torch.cuda.Stream) + + try: + with torch.cuda.stream(stream): + logger.debug('%s %s is starting, stream: %s', trace_name, name, + stream) + yield + current = torch.cuda.current_stream() + assert current == stream + logger.debug('%s %s has finished, stream: %s', trace_name, + name, stream) + finally: + streamqueue.task_done() + streamqueue.put_nowait(stream) diff --git a/downstream/mmdetection/mmdet/utils/logger.py b/downstream/mmdetection/mmdet/utils/logger.py new file mode 100644 index 0000000..485f641 --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/logger.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import inspect +import logging + +from mmcv.utils import get_logger + + +def get_root_logger(log_file=None, log_level=logging.INFO): + """Get root logger. + + Args: + log_file (str, optional): File path of log. Defaults to None. + log_level (int, optional): The level of logger. + Defaults to logging.INFO. + + Returns: + :obj:`logging.Logger`: The obtained logger + """ + logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level) + + return logger + + +def get_caller_name(): + """Get name of caller method.""" + # this_func_frame = inspect.stack()[0][0] # i.e., get_caller_name + # callee_frame = inspect.stack()[1][0] # e.g., log_img_scale + caller_frame = inspect.stack()[2][0] # e.g., caller of log_img_scale + caller_method = caller_frame.f_code.co_name + try: + caller_class = caller_frame.f_locals['self'].__class__.__name__ + return f'{caller_class}.{caller_method}' + except KeyError: # caller is a function + return caller_method + + +def log_img_scale(img_scale, shape_order='hw', skip_square=False): + """Log image size. + + Args: + img_scale (tuple): Image size to be logged. + shape_order (str, optional): The order of image shape. + 'hw' for (height, width) and 'wh' for (width, height). + Defaults to 'hw'. + skip_square (bool, optional): Whether to skip logging for square + img_scale. Defaults to False. + + Returns: + bool: Whether to have done logging. + """ + if shape_order == 'hw': + height, width = img_scale + elif shape_order == 'wh': + width, height = img_scale + else: + raise ValueError(f'Invalid shape_order {shape_order}.') + + if skip_square and (height == width): + return False + + logger = get_root_logger() + caller = get_caller_name() + logger.info(f'image shape: height={height}, width={width} in {caller}') + + return True diff --git a/downstream/mmdetection/mmdet/utils/memory.py b/downstream/mmdetection/mmdet/utils/memory.py new file mode 100644 index 0000000..eb212bc --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/memory.py @@ -0,0 +1,213 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from collections import abc +from contextlib import contextmanager +from functools import wraps + +import torch + +from mmdet.utils import get_root_logger + + +def cast_tensor_type(inputs, src_type=None, dst_type=None): + """Recursively convert Tensor in inputs from ``src_type`` to ``dst_type``. + + Args: + inputs: Inputs that to be casted. + src_type (torch.dtype | torch.device): Source type. + src_type (torch.dtype | torch.device): Destination type. + + Returns: + The same type with inputs, but all contained Tensors have been cast. + """ + assert dst_type is not None + if isinstance(inputs, torch.Tensor): + if isinstance(dst_type, torch.device): + # convert Tensor to dst_device + if hasattr(inputs, 'to') and \ + hasattr(inputs, 'device') and \ + (inputs.device == src_type or src_type is None): + return inputs.to(dst_type) + else: + return inputs + else: + # convert Tensor to dst_dtype + if hasattr(inputs, 'to') and \ + hasattr(inputs, 'dtype') and \ + (inputs.dtype == src_type or src_type is None): + return inputs.to(dst_type) + else: + return inputs + # we need to ensure that the type of inputs to be casted are the same + # as the argument `src_type`. + elif isinstance(inputs, abc.Mapping): + return type(inputs)({ + k: cast_tensor_type(v, src_type=src_type, dst_type=dst_type) + for k, v in inputs.items() + }) + elif isinstance(inputs, abc.Iterable): + return type(inputs)( + cast_tensor_type(item, src_type=src_type, dst_type=dst_type) + for item in inputs) + # TODO: Currently not supported + # elif isinstance(inputs, InstanceData): + # for key, value in inputs.items(): + # inputs[key] = cast_tensor_type( + # value, src_type=src_type, dst_type=dst_type) + # return inputs + else: + return inputs + + +@contextmanager +def _ignore_torch_cuda_oom(): + """A context which ignores CUDA OOM exception from pytorch. + + Code is modified from + # noqa: E501 + """ + try: + yield + except RuntimeError as e: + # NOTE: the string may change? + if 'CUDA out of memory. ' in str(e): + pass + else: + raise + + +class AvoidOOM: + """Try to convert inputs to FP16 and CPU if got a PyTorch's CUDA Out of + Memory error. It will do the following steps: + + 1. First retry after calling `torch.cuda.empty_cache()`. + 2. If that still fails, it will then retry by converting inputs + to FP16. + 3. If that still fails trying to convert inputs to CPUs. + In this case, it expects the function to dispatch to + CPU implementation. + + Args: + to_cpu (bool): Whether to convert outputs to CPU if get an OOM + error. This will slow down the code significantly. + Defaults to True. + test (bool): Skip `_ignore_torch_cuda_oom` operate that can use + lightweight data in unit test, only used in + test unit. Defaults to False. + + Examples: + >>> from mmdet.utils.memory import AvoidOOM + >>> AvoidCUDAOOM = AvoidOOM() + >>> output = AvoidOOM.retry_if_cuda_oom( + >>> some_torch_function)(input1, input2) + >>> # To use as a decorator + >>> # from mmdet.utils import AvoidCUDAOOM + >>> @AvoidCUDAOOM.retry_if_cuda_oom + >>> def function(*args, **kwargs): + >>> return None + ``` + + Note: + 1. The output may be on CPU even if inputs are on GPU. Processing + on CPU will slow down the code significantly. + 2. When converting inputs to CPU, it will only look at each argument + and check if it has `.device` and `.to` for conversion. Nested + structures of tensors are not supported. + 3. Since the function might be called more than once, it has to be + stateless. + """ + + def __init__(self, to_cpu=True, test=False): + self.to_cpu = to_cpu + self.test = test + + def retry_if_cuda_oom(self, func): + """Makes a function retry itself after encountering pytorch's CUDA OOM + error. + + The implementation logic is referred to + https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py + + Args: + func: a stateless callable that takes tensor-like objects + as arguments. + Returns: + func: a callable which retries `func` if OOM is encountered. + """ # noqa: W605 + + @wraps(func) + def wrapped(*args, **kwargs): + + # raw function + if not self.test: + with _ignore_torch_cuda_oom(): + return func(*args, **kwargs) + + # Clear cache and retry + torch.cuda.empty_cache() + with _ignore_torch_cuda_oom(): + return func(*args, **kwargs) + + # get the type and device of first tensor + dtype, device = None, None + values = args + tuple(kwargs.values()) + for value in values: + if isinstance(value, torch.Tensor): + dtype = value.dtype + device = value.device + break + if dtype is None or device is None: + raise ValueError('There is no tensor in the inputs, ' + 'cannot get dtype and device.') + + # Convert to FP16 + fp16_args = cast_tensor_type(args, dst_type=torch.half) + fp16_kwargs = cast_tensor_type(kwargs, dst_type=torch.half) + logger = get_root_logger() + logger.warning(f'Attempting to copy inputs of {str(func)} ' + 'to FP16 due to CUDA OOM') + + # get input tensor type, the output type will same as + # the first parameter type. + with _ignore_torch_cuda_oom(): + output = func(*fp16_args, **fp16_kwargs) + output = cast_tensor_type( + output, src_type=torch.half, dst_type=dtype) + if not self.test: + return output + logger.warning('Using FP16 still meet CUDA OOM') + + # Try on CPU. This will slow down the code significantly, + # therefore print a notice. + if self.to_cpu: + logger.warning(f'Attempting to copy inputs of {str(func)} ' + 'to CPU due to CUDA OOM') + cpu_device = torch.empty(0).device + cpu_args = cast_tensor_type(args, dst_type=cpu_device) + cpu_kwargs = cast_tensor_type(kwargs, dst_type=cpu_device) + + # convert outputs to GPU + with _ignore_torch_cuda_oom(): + logger.warning(f'Convert outputs to GPU (device={device})') + output = func(*cpu_args, **cpu_kwargs) + output = cast_tensor_type( + output, src_type=cpu_device, dst_type=device) + return output + + warnings.warn('Cannot convert output to GPU due to CUDA OOM, ' + 'the output is now on CPU, which might cause ' + 'errors if the output need to interact with GPU ' + 'data in subsequent operations') + logger.warning('Cannot convert output to GPU due to ' + 'CUDA OOM, the output is on CPU now.') + + return func(*cpu_args, **cpu_kwargs) + else: + # may still get CUDA OOM error + return func(*args, **kwargs) + + return wrapped + + +# To use AvoidOOM as a decorator +AvoidCUDAOOM = AvoidOOM() diff --git a/downstream/mmdetection/mmdet/utils/misc.py b/downstream/mmdetection/mmdet/utils/misc.py new file mode 100644 index 0000000..4113672 --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/misc.py @@ -0,0 +1,76 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import glob +import os +import os.path as osp +import warnings + +import mmcv +from mmcv.utils import print_log + + +def find_latest_checkpoint(path, suffix='pth'): + """Find the latest checkpoint from the working directory. + + Args: + path(str): The path to find checkpoints. + suffix(str): File extension. + Defaults to pth. + + Returns: + latest_path(str | None): File path of the latest checkpoint. + References: + .. [1] https://github.com/microsoft/SoftTeacher + /blob/main/ssod/utils/patch.py + """ + if not osp.exists(path): + warnings.warn('The path of checkpoints does not exist.') + return None + if osp.exists(osp.join(path, f'latest.{suffix}')): + return osp.join(path, f'latest.{suffix}') + + checkpoints = glob.glob(osp.join(path, f'*.{suffix}')) + if len(checkpoints) == 0: + warnings.warn('There are no checkpoints in the path.') + return None + latest = -1 + latest_path = None + for checkpoint in checkpoints: + count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0]) + if count > latest: + latest = count + latest_path = checkpoint + return latest_path + + +def update_data_root(cfg, logger=None): + """Update data root according to env MMDET_DATASETS. + + If set env MMDET_DATASETS, update cfg.data_root according to + MMDET_DATASETS. Otherwise, using cfg.data_root as default. + + Args: + cfg (mmcv.Config): The model config need to modify + logger (logging.Logger | str | None): the way to print msg + """ + assert isinstance(cfg, mmcv.Config), \ + f'cfg got wrong type: {type(cfg)}, expected mmcv.Config' + + if 'MMDET_DATASETS' in os.environ: + dst_root = os.environ['MMDET_DATASETS'] + print_log(f'MMDET_DATASETS has been set to be {dst_root}.' + f'Using {dst_root} as data root.') + else: + return + + assert isinstance(cfg, mmcv.Config), \ + f'cfg got wrong type: {type(cfg)}, expected mmcv.Config' + + def update(cfg, src_str, dst_str): + for k, v in cfg.items(): + if isinstance(v, mmcv.ConfigDict): + update(cfg[k], src_str, dst_str) + if isinstance(v, str) and src_str in v: + cfg[k] = v.replace(src_str, dst_str) + + update(cfg.data, cfg.data_root, dst_root) + cfg.data_root = dst_root diff --git a/downstream/mmdetection/mmdet/utils/profiling.py b/downstream/mmdetection/mmdet/utils/profiling.py new file mode 100644 index 0000000..2f53f45 --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/profiling.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import contextlib +import sys +import time + +import torch + +if sys.version_info >= (3, 7): + + @contextlib.contextmanager + def profile_time(trace_name, + name, + enabled=True, + stream=None, + end_stream=None): + """Print time spent by CPU and GPU. + + Useful as a temporary context manager to find sweet spots of code + suitable for async implementation. + """ + if (not enabled) or not torch.cuda.is_available(): + yield + return + stream = stream if stream else torch.cuda.current_stream() + end_stream = end_stream if end_stream else stream + start = torch.cuda.Event(enable_timing=True) + end = torch.cuda.Event(enable_timing=True) + stream.record_event(start) + try: + cpu_start = time.monotonic() + yield + finally: + cpu_end = time.monotonic() + end_stream.record_event(end) + end.synchronize() + cpu_time = (cpu_end - cpu_start) * 1000 + gpu_time = start.elapsed_time(end) + msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms ' + msg += f'gpu_time {gpu_time:.2f} ms stream {stream}' + print(msg, end_stream) diff --git a/downstream/mmdetection/mmdet/utils/replace_cfg_vals.py b/downstream/mmdetection/mmdet/utils/replace_cfg_vals.py new file mode 100644 index 0000000..6ca301d --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/replace_cfg_vals.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import re + +from mmcv.utils import Config + + +def replace_cfg_vals(ori_cfg): + """Replace the string "${key}" with the corresponding value. + + Replace the "${key}" with the value of ori_cfg.key in the config. And + support replacing the chained ${key}. Such as, replace "${key0.key1}" + with the value of cfg.key0.key1. Code is modified from `vars.py + < https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/vars.py>`_ # noqa: E501 + + Args: + ori_cfg (mmcv.utils.config.Config): + The origin config with "${key}" generated from a file. + + Returns: + updated_cfg [mmcv.utils.config.Config]: + The config with "${key}" replaced by the corresponding value. + """ + + def get_value(cfg, key): + for k in key.split('.'): + cfg = cfg[k] + return cfg + + def replace_value(cfg): + if isinstance(cfg, dict): + return {key: replace_value(value) for key, value in cfg.items()} + elif isinstance(cfg, list): + return [replace_value(item) for item in cfg] + elif isinstance(cfg, tuple): + return tuple([replace_value(item) for item in cfg]) + elif isinstance(cfg, str): + # the format of string cfg may be: + # 1) "${key}", which will be replaced with cfg.key directly + # 2) "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx", + # which will be replaced with the string of the cfg.key + keys = pattern_key.findall(cfg) + values = [get_value(ori_cfg, key[2:-1]) for key in keys] + if len(keys) == 1 and keys[0] == cfg: + # the format of string cfg is "${key}" + cfg = values[0] + else: + for key, value in zip(keys, values): + # the format of string cfg is + # "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx" + assert not isinstance(value, (dict, list, tuple)), \ + f'for the format of string cfg is ' \ + f"'xxxxx${key}xxxxx' or 'xxx${key}xxx${key}xxx', " \ + f"the type of the value of '${key}' " \ + f'can not be dict, list, or tuple' \ + f'but you input {type(value)} in {cfg}' + cfg = cfg.replace(key, str(value)) + return cfg + else: + return cfg + + # the pattern of string "${key}" + pattern_key = re.compile(r'\$\{[a-zA-Z\d_.]*\}') + # the type of ori_cfg._cfg_dict is mmcv.utils.config.ConfigDict + updated_cfg = Config( + replace_value(ori_cfg._cfg_dict), filename=ori_cfg.filename) + # replace the model with model_wrapper + if updated_cfg.get('model_wrapper', None) is not None: + updated_cfg.model = updated_cfg.model_wrapper + updated_cfg.pop('model_wrapper') + return updated_cfg diff --git a/downstream/mmdetection/mmdet/utils/setup_env.py b/downstream/mmdetection/mmdet/utils/setup_env.py new file mode 100644 index 0000000..6637cf8 --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/setup_env.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import platform +import warnings + +import cv2 +import torch.multiprocessing as mp + + +def setup_multi_processes(cfg): + """Setup multi-processing environment variables.""" + # set multi-process start method as `fork` to speed up the training + if platform.system() != 'Windows': + mp_start_method = cfg.get('mp_start_method', 'fork') + current_method = mp.get_start_method(allow_none=True) + if current_method is not None and current_method != mp_start_method: + warnings.warn( + f'Multi-processing start method `{mp_start_method}` is ' + f'different from the previous setting `{current_method}`.' + f'It will be force set to `{mp_start_method}`. You can change ' + f'this behavior by changing `mp_start_method` in your config.') + mp.set_start_method(mp_start_method, force=True) + + # disable opencv multithreading to avoid system being overloaded + opencv_num_threads = cfg.get('opencv_num_threads', 0) + cv2.setNumThreads(opencv_num_threads) + + # setup OMP threads + # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa + workers_per_gpu = cfg.data.get('workers_per_gpu', 1) + if 'train_dataloader' in cfg.data: + workers_per_gpu = \ + max(cfg.data.train_dataloader.get('workers_per_gpu', 1), + workers_per_gpu) + + if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1: + omp_num_threads = 1 + warnings.warn( + f'Setting OMP_NUM_THREADS environment variable for each process ' + f'to be {omp_num_threads} in default, to avoid your system being ' + f'overloaded, please further tune the variable for optimal ' + f'performance in your application as needed.') + os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) + + # setup MKL threads + if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1: + mkl_num_threads = 1 + warnings.warn( + f'Setting MKL_NUM_THREADS environment variable for each process ' + f'to be {mkl_num_threads} in default, to avoid your system being ' + f'overloaded, please further tune the variable for optimal ' + f'performance in your application as needed.') + os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) diff --git a/downstream/mmdetection/mmdet/utils/split_batch.py b/downstream/mmdetection/mmdet/utils/split_batch.py new file mode 100644 index 0000000..0276fb3 --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/split_batch.py @@ -0,0 +1,45 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def split_batch(img, img_metas, kwargs): + """Split data_batch by tags. + + Code is modified from + # noqa: E501 + + Args: + img (Tensor): of shape (N, C, H, W) encoding input images. + Typically these should be mean centered and std scaled. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys, see + :class:`mmdet.datasets.pipelines.Collect`. + kwargs (dict): Specific to concrete implementation. + + Returns: + data_groups (dict): a dict that data_batch splited by tags, + such as 'sup', 'unsup_teacher', and 'unsup_student'. + """ + + # only stack img in the batch + def fuse_list(obj_list, obj): + return torch.stack(obj_list) if isinstance(obj, + torch.Tensor) else obj_list + + # select data with tag from data_batch + def select_group(data_batch, current_tag): + group_flag = [tag == current_tag for tag in data_batch['tag']] + return { + k: fuse_list([vv for vv, gf in zip(v, group_flag) if gf], v) + for k, v in data_batch.items() + } + + kwargs.update({'img': img, 'img_metas': img_metas}) + kwargs.update({'tag': [meta['tag'] for meta in img_metas]}) + tags = list(set(kwargs['tag'])) + data_groups = {tag: select_group(kwargs, tag) for tag in tags} + for tag, group in data_groups.items(): + group.pop('tag') + return data_groups diff --git a/downstream/mmdetection/mmdet/utils/util_distribution.py b/downstream/mmdetection/mmdet/utils/util_distribution.py new file mode 100644 index 0000000..a186bf6 --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/util_distribution.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel + +dp_factory = {'cuda': MMDataParallel, 'cpu': MMDataParallel} + +ddp_factory = {'cuda': MMDistributedDataParallel} + + +def build_dp(model, device='cuda', dim=0, *args, **kwargs): + """build DataParallel module by device type. + + if device is cuda, return a MMDataParallel model; if device is mlu, + return a MLUDataParallel model. + + Args: + model (:class:`nn.Module`): model to be parallelized. + device (str): device type, cuda, cpu or mlu. Defaults to cuda. + dim (int): Dimension used to scatter the data. Defaults to 0. + + Returns: + nn.Module: the model to be parallelized. + """ + if device == 'cuda': + model = model.cuda() + elif device == 'mlu': + from mmcv.device.mlu import MLUDataParallel + dp_factory['mlu'] = MLUDataParallel + model = model.mlu() + + return dp_factory[device](model, dim=dim, *args, **kwargs) + + +def build_ddp(model, device='cuda', *args, **kwargs): + """Build DistributedDataParallel module by device type. + + If device is cuda, return a MMDistributedDataParallel model; + if device is mlu, return a MLUDistributedDataParallel model. + + Args: + model (:class:`nn.Module`): module to be parallelized. + device (str): device type, mlu or cuda. + + Returns: + :class:`nn.Module`: the module to be parallelized + + References: + .. [1] https://pytorch.org/docs/stable/generated/torch.nn.parallel. + DistributedDataParallel.html + """ + assert device in ['cuda', 'mlu'], 'Only available for cuda or mlu devices.' + if device == 'cuda': + model = model.cuda() + elif device == 'mlu': + from mmcv.device.mlu import MLUDistributedDataParallel + ddp_factory['mlu'] = MLUDistributedDataParallel + model = model.mlu() + + return ddp_factory[device](model, *args, **kwargs) + + +def is_mlu_available(): + """Returns a bool indicating if MLU is currently available.""" + return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available() + + +def get_device(): + """Returns an available device, cpu, cuda or mlu.""" + is_device_available = { + 'cuda': torch.cuda.is_available(), + 'mlu': is_mlu_available() + } + device_list = [k for k, v in is_device_available.items() if v] + return device_list[0] if len(device_list) == 1 else 'cpu' diff --git a/downstream/mmdetection/mmdet/utils/util_mixins.py b/downstream/mmdetection/mmdet/utils/util_mixins.py new file mode 100644 index 0000000..b83b661 --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/util_mixins.py @@ -0,0 +1,105 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""This module defines the :class:`NiceRepr` mixin class, which defines a +``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__`` +method, which you must define. This means you only have to overload one +function instead of two. Furthermore, if the object defines a ``__len__`` +method, then the ``__nice__`` method defaults to something sensible, otherwise +it is treated as abstract and raises ``NotImplementedError``. + +To use simply have your object inherit from :class:`NiceRepr` +(multi-inheritance should be ok). + +This code was copied from the ubelt library: https://github.com/Erotemic/ubelt + +Example: + >>> # Objects that define __nice__ have a default __str__ and __repr__ + >>> class Student(NiceRepr): + ... def __init__(self, name): + ... self.name = name + ... def __nice__(self): + ... return self.name + >>> s1 = Student('Alice') + >>> s2 = Student('Bob') + >>> print(f's1 = {s1}') + >>> print(f's2 = {s2}') + s1 = + s2 = + +Example: + >>> # Objects that define __len__ have a default __nice__ + >>> class Group(NiceRepr): + ... def __init__(self, data): + ... self.data = data + ... def __len__(self): + ... return len(self.data) + >>> g = Group([1, 2, 3]) + >>> print(f'g = {g}') + g = +""" +import warnings + + +class NiceRepr: + """Inherit from this class and define ``__nice__`` to "nicely" print your + objects. + + Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function + Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``. + If the inheriting class has a ``__len__``, method then the default + ``__nice__`` method will return its length. + + Example: + >>> class Foo(NiceRepr): + ... def __nice__(self): + ... return 'info' + >>> foo = Foo() + >>> assert str(foo) == '' + >>> assert repr(foo).startswith('>> class Bar(NiceRepr): + ... pass + >>> bar = Bar() + >>> import pytest + >>> with pytest.warns(None) as record: + >>> assert 'object at' in str(bar) + >>> assert 'object at' in repr(bar) + + Example: + >>> class Baz(NiceRepr): + ... def __len__(self): + ... return 5 + >>> baz = Baz() + >>> assert str(baz) == '' + """ + + def __nice__(self): + """str: a "nice" summary string describing this module""" + if hasattr(self, '__len__'): + # It is a common pattern for objects to use __len__ in __nice__ + # As a convenience we define a default __nice__ for these objects + return str(len(self)) + else: + # In all other cases force the subclass to overload __nice__ + raise NotImplementedError( + f'Define the __nice__ method for {self.__class__!r}') + + def __repr__(self): + """str: the string of the module""" + try: + nice = self.__nice__() + classname = self.__class__.__name__ + return f'<{classname}({nice}) at {hex(id(self))}>' + except NotImplementedError as ex: + warnings.warn(str(ex), category=RuntimeWarning) + return object.__repr__(self) + + def __str__(self): + """str: the string of the module""" + try: + classname = self.__class__.__name__ + nice = self.__nice__() + return f'<{classname}({nice})>' + except NotImplementedError as ex: + warnings.warn(str(ex), category=RuntimeWarning) + return object.__repr__(self) diff --git a/downstream/mmdetection/mmdet/utils/util_random.py b/downstream/mmdetection/mmdet/utils/util_random.py new file mode 100644 index 0000000..dc1ecb6 --- /dev/null +++ b/downstream/mmdetection/mmdet/utils/util_random.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Helpers for random number generators.""" +import numpy as np + + +def ensure_rng(rng=None): + """Coerces input into a random number generator. + + If the input is None, then a global random state is returned. + + If the input is a numeric value, then that is used as a seed to construct a + random state. Otherwise the input is returned as-is. + + Adapted from [1]_. + + Args: + rng (int | numpy.random.RandomState | None): + if None, then defaults to the global rng. Otherwise this can be an + integer or a RandomState class + Returns: + (numpy.random.RandomState) : rng - + a numpy random number generator + + References: + .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501 + """ + + if rng is None: + rng = np.random.mtrand._rand + elif isinstance(rng, int): + rng = np.random.RandomState(rng) + else: + rng = rng + return rng diff --git a/downstream/mmdetection/mmdet/version.py b/downstream/mmdetection/mmdet/version.py new file mode 100644 index 0000000..56e9b07 --- /dev/null +++ b/downstream/mmdetection/mmdet/version.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +__version__ = '2.25.0' +short_version = __version__ + + +def parse_version_info(version_str): + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__) diff --git a/downstream/mmdetection/model-index.yml b/downstream/mmdetection/model-index.yml new file mode 100644 index 0000000..5dcf8ed --- /dev/null +++ b/downstream/mmdetection/model-index.yml @@ -0,0 +1,72 @@ +Import: + - configs/atss/metafile.yml + - configs/autoassign/metafile.yml + - configs/carafe/metafile.yml + - configs/cascade_rcnn/metafile.yml + - configs/cascade_rpn/metafile.yml + - configs/centernet/metafile.yml + - configs/centripetalnet/metafile.yml + - configs/cornernet/metafile.yml + - configs/convnext/metafile.yml + - configs/dcn/metafile.yml + - configs/dcnv2/metafile.yml + - configs/deformable_detr/metafile.yml + - configs/detectors/metafile.yml + - configs/detr/metafile.yml + - configs/double_heads/metafile.yml + - configs/dyhead/metafile.yml + - configs/dynamic_rcnn/metafile.yml + - configs/efficientnet/metafile.yml + - configs/empirical_attention/metafile.yml + - configs/faster_rcnn/metafile.yml + - configs/fcos/metafile.yml + - configs/foveabox/metafile.yml + - configs/fpg/metafile.yml + - configs/free_anchor/metafile.yml + - configs/fsaf/metafile.yml + - configs/gcnet/metafile.yml + - configs/gfl/metafile.yml + - configs/ghm/metafile.yml + - configs/gn/metafile.yml + - configs/gn+ws/metafile.yml + - configs/grid_rcnn/metafile.yml + - configs/groie/metafile.yml + - configs/guided_anchoring/metafile.yml + - configs/hrnet/metafile.yml + - configs/htc/metafile.yml + - configs/instaboost/metafile.yml + - configs/lad/metafile.yml + - configs/ld/metafile.yml + - configs/libra_rcnn/metafile.yml + - configs/mask_rcnn/metafile.yml + - configs/ms_rcnn/metafile.yml + - configs/nas_fcos/metafile.yml + - configs/nas_fpn/metafile.yml + - configs/openimages/metafile.yml + - configs/paa/metafile.yml + - configs/pafpn/metafile.yml + - configs/panoptic_fpn/metafile.yml + - configs/pvt/metafile.yml + - configs/pisa/metafile.yml + - configs/point_rend/metafile.yml + - configs/queryinst/metafile.yml + - configs/regnet/metafile.yml + - configs/reppoints/metafile.yml + - configs/res2net/metafile.yml + - configs/resnest/metafile.yml + - configs/retinanet/metafile.yml + - configs/sabl/metafile.yml + - configs/scnet/metafile.yml + - configs/scratch/metafile.yml + - configs/seesaw_loss/metafile.yml + - configs/sparse_rcnn/metafile.yml + - configs/solo/metafile.yml + - configs/ssd/metafile.yml + - configs/swin/metafile.yml + - configs/tridentnet/metafile.yml + - configs/tood/metafile.yml + - configs/vfnet/metafile.yml + - configs/yolact/metafile.yml + - configs/yolo/metafile.yml + - configs/yolof/metafile.yml + - configs/yolox/metafile.yml diff --git a/downstream/mmdetection/pytest.ini b/downstream/mmdetection/pytest.ini new file mode 100644 index 0000000..9796e87 --- /dev/null +++ b/downstream/mmdetection/pytest.ini @@ -0,0 +1,7 @@ +[pytest] +addopts = --xdoctest --xdoctest-style=auto +norecursedirs = .git ignore build __pycache__ data docker docs .eggs + +filterwarnings= default + ignore:.*No cfgstr given in Cacher constructor or call.*:Warning + ignore:.*Define the __nice__ method for.*:Warning diff --git a/downstream/mmdetection/requirements.txt b/downstream/mmdetection/requirements.txt new file mode 100644 index 0000000..6981bd7 --- /dev/null +++ b/downstream/mmdetection/requirements.txt @@ -0,0 +1,4 @@ +-r requirements/build.txt +-r requirements/optional.txt +-r requirements/runtime.txt +-r requirements/tests.txt diff --git a/downstream/mmdetection/requirements/albu.txt b/downstream/mmdetection/requirements/albu.txt new file mode 100644 index 0000000..f421fbb --- /dev/null +++ b/downstream/mmdetection/requirements/albu.txt @@ -0,0 +1 @@ +albumentations>=0.3.2 --no-binary qudida,albumentations diff --git a/downstream/mmdetection/requirements/build.txt b/downstream/mmdetection/requirements/build.txt new file mode 100644 index 0000000..8155829 --- /dev/null +++ b/downstream/mmdetection/requirements/build.txt @@ -0,0 +1,3 @@ +# These must be installed before building mmdetection +cython +numpy diff --git a/downstream/mmdetection/requirements/docs.txt b/downstream/mmdetection/requirements/docs.txt new file mode 100644 index 0000000..d251554 --- /dev/null +++ b/downstream/mmdetection/requirements/docs.txt @@ -0,0 +1,7 @@ +docutils==0.16.0 +myst-parser +-e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme +sphinx==4.0.2 +sphinx-copybutton +sphinx_markdown_tables +sphinx_rtd_theme==0.5.2 diff --git a/downstream/mmdetection/requirements/mminstall.txt b/downstream/mmdetection/requirements/mminstall.txt new file mode 100644 index 0000000..b53dbf4 --- /dev/null +++ b/downstream/mmdetection/requirements/mminstall.txt @@ -0,0 +1 @@ +mmcv-full>=1.3.17 diff --git a/downstream/mmdetection/requirements/optional.txt b/downstream/mmdetection/requirements/optional.txt new file mode 100644 index 0000000..6782747 --- /dev/null +++ b/downstream/mmdetection/requirements/optional.txt @@ -0,0 +1,5 @@ +cityscapesscripts +imagecorruptions +scipy +sklearn +timm diff --git a/downstream/mmdetection/requirements/readthedocs.txt b/downstream/mmdetection/requirements/readthedocs.txt new file mode 100644 index 0000000..0542bfc --- /dev/null +++ b/downstream/mmdetection/requirements/readthedocs.txt @@ -0,0 +1,3 @@ +mmcv +torch +torchvision diff --git a/downstream/mmdetection/requirements/runtime.txt b/downstream/mmdetection/requirements/runtime.txt new file mode 100644 index 0000000..f7a2cc7 --- /dev/null +++ b/downstream/mmdetection/requirements/runtime.txt @@ -0,0 +1,5 @@ +matplotlib +numpy +pycocotools +six +terminaltables diff --git a/downstream/mmdetection/requirements/tests.txt b/downstream/mmdetection/requirements/tests.txt new file mode 100644 index 0000000..2ff795a --- /dev/null +++ b/downstream/mmdetection/requirements/tests.txt @@ -0,0 +1,15 @@ +asynctest +codecov +flake8 +interrogate +isort==4.3.21 +# Note: used for kwarray.group_items, this may be ported to mmcv in the future. +kwarray +-e git+https://github.com/open-mmlab/mmtracking#egg=mmtrack +onnx==1.7.0 +onnxruntime>=1.8.0 +protobuf<=3.20.1 +pytest +ubelt +xdoctest>=0.10.0 +yapf diff --git a/downstream/mmdetection/setup.cfg b/downstream/mmdetection/setup.cfg new file mode 100644 index 0000000..56407a1 --- /dev/null +++ b/downstream/mmdetection/setup.cfg @@ -0,0 +1,21 @@ +[isort] +line_length = 79 +multi_line_output = 0 +extra_standard_library = setuptools +known_first_party = mmdet +known_third_party = PIL,asynctest,cityscapesscripts,cv2,gather_models,matplotlib,mmcv,numpy,onnx,onnxruntime,pycocotools,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,six,terminaltables,torch,ts,yaml +no_lines_before = STDLIB,LOCALFOLDER +default_section = THIRDPARTY + +[yapf] +BASED_ON_STYLE = pep8 +BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true +SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true + +# ignore-words-list needs to be lowercase format. For example, if we want to +# ignore word "BA", then we need to append "ba" to ignore-words-list rather +# than "BA" +[codespell] +skip = *.ipynb +quiet-level = 3 +ignore-words-list = patten,nd,ty,mot,hist,formating,winn,gool,datas,wan,confids,TOOD,tood,ba diff --git a/downstream/mmdetection/setup.py b/downstream/mmdetection/setup.py new file mode 100755 index 0000000..6bc7885 --- /dev/null +++ b/downstream/mmdetection/setup.py @@ -0,0 +1,220 @@ +#!/usr/bin/env python +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import platform +import shutil +import sys +import warnings +from setuptools import find_packages, setup + +import torch +from torch.utils.cpp_extension import (BuildExtension, CppExtension, + CUDAExtension) + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +version_file = 'mmdet/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +def make_cuda_ext(name, module, sources, sources_cuda=[]): + + define_macros = [] + extra_compile_args = {'cxx': []} + + if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1': + define_macros += [('WITH_CUDA', None)] + extension = CUDAExtension + extra_compile_args['nvcc'] = [ + '-D__CUDA_NO_HALF_OPERATORS__', + '-D__CUDA_NO_HALF_CONVERSIONS__', + '-D__CUDA_NO_HALF2_OPERATORS__', + ] + sources += sources_cuda + else: + print(f'Compiling {name} without CUDA') + extension = CppExtension + + return extension( + name=f'{module}.{name}', + sources=[os.path.join(*module.split('.'), p) for p in sources], + define_macros=define_macros, + extra_compile_args=extra_compile_args) + + +def parse_requirements(fname='requirements.txt', with_version=True): + """Parse the package dependencies listed in a requirements file but strips + specific versioning information. + + Args: + fname (str): path to requirements file + with_version (bool, default=False): if True include version specs + + Returns: + List[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + import re + import sys + from os.path import exists + require_fpath = fname + + def parse_line(line): + """Parse information from a line in a requirements text file.""" + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + elif '@git+' in line: + info['package'] = line + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + info['version'] = (op, version) + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + + +def add_mim_extension(): + """Add extra files that are required to support MIM into the package. + + These files will be added by creating a symlink to the originals if the + package is installed in `editable` mode (e.g. pip install -e .), or by + copying from the originals otherwise. + """ + + # parse installment mode + if 'develop' in sys.argv: + # installed by `pip install -e .` + if platform.system() == 'Windows': + # set `copy` mode here since symlink fails on Windows. + mode = 'copy' + else: + mode = 'symlink' + elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: + # installed by `pip install .` + # or create source distribution by `python setup.py sdist` + mode = 'copy' + else: + return + + filenames = ['tools', 'configs', 'demo', 'model-index.yml'] + repo_path = osp.dirname(__file__) + mim_path = osp.join(repo_path, 'mmdet', '.mim') + os.makedirs(mim_path, exist_ok=True) + + for filename in filenames: + if osp.exists(filename): + src_path = osp.join(repo_path, filename) + tar_path = osp.join(mim_path, filename) + + if osp.isfile(tar_path) or osp.islink(tar_path): + os.remove(tar_path) + elif osp.isdir(tar_path): + shutil.rmtree(tar_path) + + if mode == 'symlink': + src_relpath = osp.relpath(src_path, osp.dirname(tar_path)) + os.symlink(src_relpath, tar_path) + elif mode == 'copy': + if osp.isfile(src_path): + shutil.copyfile(src_path, tar_path) + elif osp.isdir(src_path): + shutil.copytree(src_path, tar_path) + else: + warnings.warn(f'Cannot copy file {src_path}.') + else: + raise ValueError(f'Invalid mode {mode}') + + +if __name__ == '__main__': + add_mim_extension() + setup( + name='mmdet', + version=get_version(), + description='OpenMMLab Detection Toolbox and Benchmark', + long_description=readme(), + long_description_content_type='text/markdown', + author='MMDetection Contributors', + author_email='openmmlab@gmail.com', + keywords='computer vision, object detection', + url='https://github.com/open-mmlab/mmdetection', + packages=find_packages(exclude=('configs', 'tools', 'demo')), + include_package_data=True, + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + ], + license='Apache License 2.0', + install_requires=parse_requirements('requirements/runtime.txt'), + extras_require={ + 'all': parse_requirements('requirements.txt'), + 'tests': parse_requirements('requirements/tests.txt'), + 'build': parse_requirements('requirements/build.txt'), + 'optional': parse_requirements('requirements/optional.txt'), + }, + ext_modules=[], + cmdclass={'build_ext': BuildExtension}, + zip_safe=False) diff --git a/downstream/mmdetection/tools/analysis_tools/analyze_logs.py b/downstream/mmdetection/tools/analysis_tools/analyze_logs.py new file mode 100755 index 0000000..ca13ea8 --- /dev/null +++ b/downstream/mmdetection/tools/analysis_tools/analyze_logs.py @@ -0,0 +1,204 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import json +from collections import defaultdict + +import matplotlib.pyplot as plt +import numpy as np +import seaborn as sns + + +def cal_train_time(log_dicts, args): + for i, log_dict in enumerate(log_dicts): + print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') + all_times = [] + for epoch in log_dict.keys(): + if args.include_outliers: + all_times.append(log_dict[epoch]['time']) + else: + all_times.append(log_dict[epoch]['time'][1:]) + if not all_times: + raise KeyError( + 'Please reduce the log interval in the config so that' + 'interval is less than iterations of one epoch.') + all_times = np.array(all_times) + epoch_ave_time = all_times.mean(-1) + slowest_epoch = epoch_ave_time.argmax() + fastest_epoch = epoch_ave_time.argmin() + std_over_epoch = epoch_ave_time.std() + print(f'slowest epoch {slowest_epoch + 1}, ' + f'average time is {epoch_ave_time[slowest_epoch]:.4f}') + print(f'fastest epoch {fastest_epoch + 1}, ' + f'average time is {epoch_ave_time[fastest_epoch]:.4f}') + print(f'time std over epochs is {std_over_epoch:.4f}') + print(f'average iter time: {np.mean(all_times):.4f} s/iter') + print() + + +def plot_curve(log_dicts, args): + if args.backend is not None: + plt.switch_backend(args.backend) + sns.set_style(args.style) + # if legend is None, use {filename}_{key} as legend + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + legend.append(f'{json_log}_{metric}') + assert len(legend) == (len(args.json_logs) * len(args.keys)) + metrics = args.keys + + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + epochs = list(log_dict.keys()) + for j, metric in enumerate(metrics): + print(f'plot curve of {args.json_logs[i]}, metric is {metric}') + if metric not in log_dict[epochs[int(args.eval_interval) - 1]]: + if 'mAP' in metric: + raise KeyError( + f'{args.json_logs[i]} does not contain metric ' + f'{metric}. Please check if "--no-validate" is ' + 'specified when you trained the model.') + raise KeyError( + f'{args.json_logs[i]} does not contain metric {metric}. ' + 'Please reduce the log interval in the config so that ' + 'interval is less than iterations of one epoch.') + + if 'mAP' in metric: + xs = [] + ys = [] + for epoch in epochs: + ys += log_dict[epoch][metric] + if 'val' in log_dict[epoch]['mode']: + xs.append(epoch) + plt.xlabel('epoch') + plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o') + else: + xs = [] + ys = [] + num_iters_per_epoch = log_dict[epochs[0]]['iter'][-2] + for epoch in epochs: + iters = log_dict[epoch]['iter'] + if log_dict[epoch]['mode'][-1] == 'val': + iters = iters[:-1] + xs.append( + np.array(iters) + (epoch - 1) * num_iters_per_epoch) + ys.append(np.array(log_dict[epoch][metric][:len(iters)])) + xs = np.concatenate(xs) + ys = np.concatenate(ys) + plt.xlabel('iter') + plt.plot( + xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) + plt.legend() + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print(f'save curve to: {args.out}') + plt.savefig(args.out) + plt.cla() + + +def add_plot_parser(subparsers): + parser_plt = subparsers.add_parser( + 'plot_curve', help='parser for plotting curves') + parser_plt.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_plt.add_argument( + '--keys', + type=str, + nargs='+', + default=['bbox_mAP'], + help='the metric that you want to plot') + parser_plt.add_argument( + '--start-epoch', + type=str, + default='1', + help='the epoch that you want to start') + parser_plt.add_argument( + '--eval-interval', + type=str, + default='1', + help='the eval interval when training') + parser_plt.add_argument('--title', type=str, help='title of figure') + parser_plt.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser_plt.add_argument( + '--backend', type=str, default=None, help='backend of plt') + parser_plt.add_argument( + '--style', type=str, default='dark', help='style of plt') + parser_plt.add_argument('--out', type=str, default=None) + + +def add_time_parser(subparsers): + parser_time = subparsers.add_parser( + 'cal_train_time', + help='parser for computing the average time per training iteration') + parser_time.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_time.add_argument( + '--include-outliers', + action='store_true', + help='include the first value of every epoch when computing ' + 'the average time') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + # currently only support plot curve and calculate average train time + subparsers = parser.add_subparsers(dest='task', help='task parser') + add_plot_parser(subparsers) + add_time_parser(subparsers) + args = parser.parse_args() + return args + + +def load_json_logs(json_logs): + # load and convert json_logs to log_dict, key is epoch, value is a sub dict + # keys of sub dict is different metrics, e.g. memory, bbox_mAP + # value of sub dict is a list of corresponding values of all iterations + log_dicts = [dict() for _ in json_logs] + for json_log, log_dict in zip(json_logs, log_dicts): + with open(json_log, 'r') as log_file: + for i, line in enumerate(log_file): + log = json.loads(line.strip()) + # skip the first training info line + if i == 0: + continue + # skip lines without `epoch` field + if 'epoch' not in log: + continue + epoch = log.pop('epoch') + if epoch not in log_dict: + log_dict[epoch] = defaultdict(list) + for k, v in log.items(): + log_dict[epoch][k].append(v) + return log_dicts + + +def main(): + args = parse_args() + + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + + log_dicts = load_json_logs(json_logs) + + eval(args.task)(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/analysis_tools/analyze_results.py b/downstream/mmdetection/tools/analysis_tools/analyze_results.py new file mode 100644 index 0000000..9988144 --- /dev/null +++ b/downstream/mmdetection/tools/analysis_tools/analyze_results.py @@ -0,0 +1,365 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from multiprocessing import Pool + +import mmcv +import numpy as np +from mmcv import Config, DictAction + +from mmdet.core.evaluation import eval_map +from mmdet.core.visualization import imshow_gt_det_bboxes +from mmdet.datasets import build_dataset, get_loading_pipeline +from mmdet.datasets.api_wrappers import pq_compute_single_core +from mmdet.utils import replace_cfg_vals, update_data_root + + +def bbox_map_eval(det_result, annotation, nproc=4): + """Evaluate mAP of single image det result. + + Args: + det_result (list[list]): [[cls1_det, cls2_det, ...], ...]. + The outer list indicates images, and the inner list indicates + per-class detected bboxes. + annotation (dict): Ground truth annotations where keys of + annotations are: + + - bboxes: numpy array of shape (n, 4) + - labels: numpy array of shape (n, ) + - bboxes_ignore (optional): numpy array of shape (k, 4) + - labels_ignore (optional): numpy array of shape (k, ) + + nproc (int): Processes used for computing mAP. + Default: 4. + + Returns: + float: mAP + """ + + # use only bbox det result + if isinstance(det_result, tuple): + bbox_det_result = [det_result[0]] + else: + bbox_det_result = [det_result] + # mAP + iou_thrs = np.linspace( + .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) + + processes = [] + workers = Pool(processes=nproc) + for thr in iou_thrs: + p = workers.apply_async(eval_map, (bbox_det_result, [annotation]), { + 'iou_thr': thr, + 'logger': 'silent', + 'nproc': 1 + }) + processes.append(p) + + workers.close() + workers.join() + + mean_aps = [] + for p in processes: + mean_aps.append(p.get()[0]) + + return sum(mean_aps) / len(mean_aps) + + +class ResultVisualizer: + """Display and save evaluation results. + + Args: + show (bool): Whether to show the image. Default: True. + wait_time (float): Value of waitKey param. Default: 0. + score_thr (float): Minimum score of bboxes to be shown. + Default: 0. + overlay_gt_pred (bool): Whether to plot gts and predictions on the + same image. If False, predictions and gts will be plotted on two + same image which will be concatenated in vertical direction. + The image above is drawn with gt, and the image below is drawn + with the prediction result. Default: False. + """ + + def __init__(self, + show=False, + wait_time=0, + score_thr=0, + overlay_gt_pred=False): + self.show = show + self.wait_time = wait_time + self.score_thr = score_thr + self.overlay_gt_pred = overlay_gt_pred + + def _save_image_gts_results(self, + dataset, + results, + performances, + out_dir=None): + """Display or save image with groung truths and predictions from a + model. + + Args: + dataset (Dataset): A PyTorch dataset. + results (list): Object detection or panoptic segmentation + results from test results pkl file. + performances (dict): A dict contains samples's indices + in dataset and model's performance on them. + out_dir (str, optional): The filename to write the image. + Defaults: None. + """ + mmcv.mkdir_or_exist(out_dir) + + for performance_info in performances: + index, performance = performance_info + data_info = dataset.prepare_train_img(index) + + # calc save file path + filename = data_info['filename'] + if data_info['img_prefix'] is not None: + filename = osp.join(data_info['img_prefix'], filename) + else: + filename = data_info['filename'] + fname, name = osp.splitext(osp.basename(filename)) + save_filename = fname + '_' + str(round(performance, 3)) + name + out_file = osp.join(out_dir, save_filename) + imshow_gt_det_bboxes( + data_info['img'], + data_info, + results[index], + dataset.CLASSES, + gt_bbox_color=dataset.PALETTE, + gt_text_color=(200, 200, 200), + gt_mask_color=dataset.PALETTE, + det_bbox_color=dataset.PALETTE, + det_text_color=(200, 200, 200), + det_mask_color=dataset.PALETTE, + show=self.show, + score_thr=self.score_thr, + wait_time=self.wait_time, + out_file=out_file, + overlay_gt_pred=self.overlay_gt_pred) + + def evaluate_and_show(self, + dataset, + results, + topk=20, + show_dir='work_dir'): + """Evaluate and show results. + + Args: + dataset (Dataset): A PyTorch dataset. + results (list): Object detection or panoptic segmentation + results from test results pkl file. + topk (int): Number of the highest topk and + lowest topk after evaluation index sorting. Default: 20. + show_dir (str, optional): The filename to write the image. + Default: 'work_dir' + eval_fn (callable, optional): Eval function, Default: None. + """ + + assert topk > 0 + if (topk * 2) > len(dataset): + topk = len(dataset) // 2 + + if isinstance(results[0], dict): + good_samples, bad_samples = self.panoptic_evaluate( + dataset, results, topk=topk) + elif isinstance(results[0], list): + good_samples, bad_samples = self.detection_evaluate( + dataset, results, topk=topk) + else: + raise 'The format of result is not supported yet. ' \ + 'Current dict for panoptic segmentation and list ' \ + 'for object detection are supported.' + + good_dir = osp.abspath(osp.join(show_dir, 'good')) + bad_dir = osp.abspath(osp.join(show_dir, 'bad')) + self._save_image_gts_results(dataset, results, good_samples, good_dir) + self._save_image_gts_results(dataset, results, bad_samples, bad_dir) + + def detection_evaluate(self, dataset, results, topk=20, eval_fn=None): + """Evaluation for object detection. + + Args: + dataset (Dataset): A PyTorch dataset. + results (list): Object detection results from test + results pkl file. + topk (int): Number of the highest topk and + lowest topk after evaluation index sorting. Default: 20. + eval_fn (callable, optional): Eval function, Default: None. + + Returns: + tuple: A tuple contains good samples and bad samples. + good_mAPs (dict[int, float]): A dict contains good + samples's indices in dataset and model's + performance on them. + bad_mAPs (dict[int, float]): A dict contains bad + samples's indices in dataset and model's + performance on them. + """ + if eval_fn is None: + eval_fn = bbox_map_eval + else: + assert callable(eval_fn) + + prog_bar = mmcv.ProgressBar(len(results)) + _mAPs = {} + for i, (result, ) in enumerate(zip(results)): + # self.dataset[i] should not call directly + # because there is a risk of mismatch + data_info = dataset.prepare_train_img(i) + mAP = eval_fn(result, data_info['ann_info']) + _mAPs[i] = mAP + prog_bar.update() + # descending select topk image + _mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1])) + good_mAPs = _mAPs[-topk:] + bad_mAPs = _mAPs[:topk] + + return good_mAPs, bad_mAPs + + def panoptic_evaluate(self, dataset, results, topk=20): + """Evaluation for panoptic segmentation. + + Args: + dataset (Dataset): A PyTorch dataset. + results (list): Panoptic segmentation results from test + results pkl file. + topk (int): Number of the highest topk and + lowest topk after evaluation index sorting. Default: 20. + + Returns: + tuple: A tuple contains good samples and bad samples. + good_pqs (dict[int, float]): A dict contains good + samples's indices in dataset and model's + performance on them. + bad_pqs (dict[int, float]): A dict contains bad + samples's indices in dataset and model's + performance on them. + """ + # image to annotations + gt_json = dataset.coco.img_ann_map + + result_files, tmp_dir = dataset.format_results(results) + pred_json = mmcv.load(result_files['panoptic'])['annotations'] + pred_folder = osp.join(tmp_dir.name, 'panoptic') + gt_folder = dataset.seg_prefix + + pqs = {} + prog_bar = mmcv.ProgressBar(len(results)) + for i in range(len(results)): + data_info = dataset.prepare_train_img(i) + image_id = data_info['img_info']['id'] + gt_ann = { + 'image_id': image_id, + 'segments_info': gt_json[image_id], + 'file_name': data_info['img_info']['segm_file'] + } + pred_ann = pred_json[i] + pq_stat = pq_compute_single_core( + i, [(gt_ann, pred_ann)], + gt_folder, + pred_folder, + dataset.categories, + dataset.file_client, + print_log=False) + pq_results, classwise_results = pq_stat.pq_average( + dataset.categories, isthing=None) + pqs[i] = pq_results['pq'] + prog_bar.update() + + if tmp_dir is not None: + tmp_dir.cleanup() + + # descending select topk image + pqs = list(sorted(pqs.items(), key=lambda kv: kv[1])) + good_pqs = pqs[-topk:] + bad_pqs = pqs[:topk] + + return good_pqs, bad_pqs + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet eval image prediction result for each') + parser.add_argument('config', help='test config file path') + parser.add_argument( + 'prediction_path', help='prediction path where test pkl result') + parser.add_argument( + 'show_dir', help='directory where painted images will be saved') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--wait-time', + type=float, + default=0, + help='the interval of show (s), 0 is block') + parser.add_argument( + '--topk', + default=20, + type=int, + help='saved Number of the highest topk ' + 'and lowest topk after index sorting') + parser.add_argument( + '--show-score-thr', + type=float, + default=0, + help='score threshold (default: 0.)') + parser.add_argument( + '--overlay-gt-pred', + action='store_true', + help='whether to plot gts and predictions on the same image.' + 'If False, predictions and gts will be plotted on two same' + 'image which will be concatenated in vertical direction.' + 'The image above is drawn with gt, and the image below is' + 'drawn with the prediction result.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + mmcv.check_file_exist(args.prediction_path) + + cfg = Config.fromfile(args.config) + + # replace the ${key} with the value of cfg.key + cfg = replace_cfg_vals(cfg) + + # update data root according to MMDET_DATASETS + update_data_root(cfg) + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + cfg.data.test.test_mode = True + + cfg.data.test.pop('samples_per_gpu', 0) + if cfg.data.train.type in ('MultiImageMixDataset', 'ClassBalancedDataset', + 'RepeatDataset', 'ConcatDataset'): + cfg.data.test.pipeline = get_loading_pipeline( + cfg.data.train.dataset.pipeline) + else: + cfg.data.test.pipeline = get_loading_pipeline(cfg.data.train.pipeline) + + dataset = build_dataset(cfg.data.test) + outputs = mmcv.load(args.prediction_path) + + result_visualizer = ResultVisualizer(args.show, args.wait_time, + args.show_score_thr, + args.overlay_gt_pred) + result_visualizer.evaluate_and_show( + dataset, outputs, topk=args.topk, show_dir=args.show_dir) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/analysis_tools/benchmark.py b/downstream/mmdetection/tools/analysis_tools/benchmark.py new file mode 100644 index 0000000..c956968 --- /dev/null +++ b/downstream/mmdetection/tools/analysis_tools/benchmark.py @@ -0,0 +1,195 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import copy +import os +import time + +import torch +from mmcv import Config, DictAction +from mmcv.cnn import fuse_conv_bn +from mmcv.parallel import MMDistributedDataParallel +from mmcv.runner import init_dist, load_checkpoint, wrap_fp16_model + +from mmdet.datasets import (build_dataloader, build_dataset, + replace_ImageToTensor) +from mmdet.models import build_detector +from mmdet.utils import replace_cfg_vals, update_data_root + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMDet benchmark a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--repeat-num', + type=int, + default=1, + help='number of repeat times of measurement for averaging the results') + parser.add_argument( + '--max-iter', type=int, default=2000, help='num of max iter') + parser.add_argument( + '--log-interval', type=int, default=50, help='interval of logging') + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def measure_inference_speed(cfg, checkpoint, max_iter, log_interval, + is_fuse_conv_bn): + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + cfg.model.pretrained = None + cfg.data.test.test_mode = True + + # build the dataloader + samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) + if samples_per_gpu > 1: + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + samples_per_gpu=1, + # Because multiple processes will occupy additional CPU resources, + # FPS statistics will be more unstable when workers_per_gpu is not 0. + # It is reasonable to set workers_per_gpu to 0. + workers_per_gpu=0, + dist=True, + shuffle=False) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + load_checkpoint(model, checkpoint, map_location='cpu') + if is_fuse_conv_bn: + model = fuse_conv_bn(model) + + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False) + model.eval() + + # the first several iterations may be very slow so skip them + num_warmup = 5 + pure_inf_time = 0 + fps = 0 + + # benchmark with 2000 image and take the average + for i, data in enumerate(data_loader): + + torch.cuda.synchronize() + start_time = time.perf_counter() + + with torch.no_grad(): + model(return_loss=False, rescale=True, **data) + + torch.cuda.synchronize() + elapsed = time.perf_counter() - start_time + + if i >= num_warmup: + pure_inf_time += elapsed + if (i + 1) % log_interval == 0: + fps = (i + 1 - num_warmup) / pure_inf_time + print( + f'Done image [{i + 1:<3}/ {max_iter}], ' + f'fps: {fps:.1f} img / s, ' + f'times per image: {1000 / fps:.1f} ms / img', + flush=True) + + if (i + 1) == max_iter: + fps = (i + 1 - num_warmup) / pure_inf_time + print( + f'Overall fps: {fps:.1f} img / s, ' + f'times per image: {1000 / fps:.1f} ms / img', + flush=True) + break + return fps + + +def repeat_measure_inference_speed(cfg, + checkpoint, + max_iter, + log_interval, + is_fuse_conv_bn, + repeat_num=1): + assert repeat_num >= 1 + + fps_list = [] + + for _ in range(repeat_num): + # + cp_cfg = copy.deepcopy(cfg) + + fps_list.append( + measure_inference_speed(cp_cfg, checkpoint, max_iter, log_interval, + is_fuse_conv_bn)) + + if repeat_num > 1: + fps_list_ = [round(fps, 1) for fps in fps_list] + times_pre_image_list_ = [round(1000 / fps, 1) for fps in fps_list] + mean_fps_ = sum(fps_list_) / len(fps_list_) + mean_times_pre_image_ = sum(times_pre_image_list_) / len( + times_pre_image_list_) + print( + f'Overall fps: {fps_list_}[{mean_fps_:.1f}] img / s, ' + f'times per image: ' + f'{times_pre_image_list_}[{mean_times_pre_image_:.1f}] ms / img', + flush=True) + return fps_list + + return fps_list[0] + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + + # replace the ${key} with the value of cfg.key + cfg = replace_cfg_vals(cfg) + + # update data root according to MMDET_DATASETS + update_data_root(cfg) + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + if args.launcher == 'none': + raise NotImplementedError('Only supports distributed mode') + else: + init_dist(args.launcher, **cfg.dist_params) + + repeat_measure_inference_speed(cfg, args.checkpoint, args.max_iter, + args.log_interval, args.fuse_conv_bn, + args.repeat_num) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/analysis_tools/coco_error_analysis.py b/downstream/mmdetection/tools/analysis_tools/coco_error_analysis.py new file mode 100644 index 0000000..102ea4e --- /dev/null +++ b/downstream/mmdetection/tools/analysis_tools/coco_error_analysis.py @@ -0,0 +1,339 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os +from argparse import ArgumentParser +from multiprocessing import Pool + +import matplotlib.pyplot as plt +import numpy as np +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval + + +def makeplot(rs, ps, outDir, class_name, iou_type): + cs = np.vstack([ + np.ones((2, 3)), + np.array([0.31, 0.51, 0.74]), + np.array([0.75, 0.31, 0.30]), + np.array([0.36, 0.90, 0.38]), + np.array([0.50, 0.39, 0.64]), + np.array([1, 0.6, 0]), + ]) + areaNames = ['allarea', 'small', 'medium', 'large'] + types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN'] + for i in range(len(areaNames)): + area_ps = ps[..., i, 0] + figure_title = iou_type + '-' + class_name + '-' + areaNames[i] + aps = [ps_.mean() for ps_ in area_ps] + ps_curve = [ + ps_.mean(axis=1) if ps_.ndim > 1 else ps_ for ps_ in area_ps + ] + ps_curve.insert(0, np.zeros(ps_curve[0].shape)) + fig = plt.figure() + ax = plt.subplot(111) + for k in range(len(types)): + ax.plot(rs, ps_curve[k + 1], color=[0, 0, 0], linewidth=0.5) + ax.fill_between( + rs, + ps_curve[k], + ps_curve[k + 1], + color=cs[k], + label=str(f'[{aps[k]:.3f}]' + types[k]), + ) + plt.xlabel('recall') + plt.ylabel('precision') + plt.xlim(0, 1.0) + plt.ylim(0, 1.0) + plt.title(figure_title) + plt.legend() + # plt.show() + fig.savefig(outDir + f'/{figure_title}.png') + plt.close(fig) + + +def autolabel(ax, rects): + """Attach a text label above each bar in *rects*, displaying its height.""" + for rect in rects: + height = rect.get_height() + if height > 0 and height <= 1: # for percent values + text_label = '{:2.0f}'.format(height * 100) + else: + text_label = '{:2.0f}'.format(height) + ax.annotate( + text_label, + xy=(rect.get_x() + rect.get_width() / 2, height), + xytext=(0, 3), # 3 points vertical offset + textcoords='offset points', + ha='center', + va='bottom', + fontsize='x-small', + ) + + +def makebarplot(rs, ps, outDir, class_name, iou_type): + areaNames = ['allarea', 'small', 'medium', 'large'] + types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN'] + fig, ax = plt.subplots() + x = np.arange(len(areaNames)) # the areaNames locations + width = 0.60 # the width of the bars + rects_list = [] + figure_title = iou_type + '-' + class_name + '-' + 'ap bar plot' + for i in range(len(types) - 1): + type_ps = ps[i, ..., 0] + aps = [ps_.mean() for ps_ in type_ps.T] + rects_list.append( + ax.bar( + x - width / 2 + (i + 1) * width / len(types), + aps, + width / len(types), + label=types[i], + )) + + # Add some text for labels, title and custom x-axis tick labels, etc. + ax.set_ylabel('Mean Average Precision (mAP)') + ax.set_title(figure_title) + ax.set_xticks(x) + ax.set_xticklabels(areaNames) + ax.legend() + + # Add score texts over bars + for rects in rects_list: + autolabel(ax, rects) + + # Save plot + fig.savefig(outDir + f'/{figure_title}.png') + plt.close(fig) + + +def get_gt_area_group_numbers(cocoEval): + areaRng = cocoEval.params.areaRng + areaRngStr = [str(aRng) for aRng in areaRng] + areaRngLbl = cocoEval.params.areaRngLbl + areaRngStr2areaRngLbl = dict(zip(areaRngStr, areaRngLbl)) + areaRngLbl2Number = dict.fromkeys(areaRngLbl, 0) + for evalImg in cocoEval.evalImgs: + if evalImg: + for gtIgnore in evalImg['gtIgnore']: + if not gtIgnore: + aRngLbl = areaRngStr2areaRngLbl[str(evalImg['aRng'])] + areaRngLbl2Number[aRngLbl] += 1 + return areaRngLbl2Number + + +def make_gt_area_group_numbers_plot(cocoEval, outDir, verbose=True): + areaRngLbl2Number = get_gt_area_group_numbers(cocoEval) + areaRngLbl = areaRngLbl2Number.keys() + if verbose: + print('number of annotations per area group:', areaRngLbl2Number) + + # Init figure + fig, ax = plt.subplots() + x = np.arange(len(areaRngLbl)) # the areaNames locations + width = 0.60 # the width of the bars + figure_title = 'number of annotations per area group' + + rects = ax.bar(x, areaRngLbl2Number.values(), width) + + # Add some text for labels, title and custom x-axis tick labels, etc. + ax.set_ylabel('Number of annotations') + ax.set_title(figure_title) + ax.set_xticks(x) + ax.set_xticklabels(areaRngLbl) + + # Add score texts over bars + autolabel(ax, rects) + + # Save plot + fig.tight_layout() + fig.savefig(outDir + f'/{figure_title}.png') + plt.close(fig) + + +def make_gt_area_histogram_plot(cocoEval, outDir): + n_bins = 100 + areas = [ann['area'] for ann in cocoEval.cocoGt.anns.values()] + + # init figure + figure_title = 'gt annotation areas histogram plot' + fig, ax = plt.subplots() + + # Set the number of bins + ax.hist(np.sqrt(areas), bins=n_bins) + + # Add some text for labels, title and custom x-axis tick labels, etc. + ax.set_xlabel('Squareroot Area') + ax.set_ylabel('Number of annotations') + ax.set_title(figure_title) + + # Save plot + fig.tight_layout() + fig.savefig(outDir + f'/{figure_title}.png') + plt.close(fig) + + +def analyze_individual_category(k, + cocoDt, + cocoGt, + catId, + iou_type, + areas=None): + nm = cocoGt.loadCats(catId)[0] + print(f'--------------analyzing {k + 1}-{nm["name"]}---------------') + ps_ = {} + dt = copy.deepcopy(cocoDt) + nm = cocoGt.loadCats(catId)[0] + imgIds = cocoGt.getImgIds() + dt_anns = dt.dataset['annotations'] + select_dt_anns = [] + for ann in dt_anns: + if ann['category_id'] == catId: + select_dt_anns.append(ann) + dt.dataset['annotations'] = select_dt_anns + dt.createIndex() + # compute precision but ignore superclass confusion + gt = copy.deepcopy(cocoGt) + child_catIds = gt.getCatIds(supNms=[nm['supercategory']]) + for idx, ann in enumerate(gt.dataset['annotations']): + if ann['category_id'] in child_catIds and ann['category_id'] != catId: + gt.dataset['annotations'][idx]['ignore'] = 1 + gt.dataset['annotations'][idx]['iscrowd'] = 1 + gt.dataset['annotations'][idx]['category_id'] = catId + cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type) + cocoEval.params.imgIds = imgIds + cocoEval.params.maxDets = [100] + cocoEval.params.iouThrs = [0.1] + cocoEval.params.useCats = 1 + if areas: + cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]], + [areas[0], areas[1]], [areas[1], areas[2]]] + cocoEval.evaluate() + cocoEval.accumulate() + ps_supercategory = cocoEval.eval['precision'][0, :, k, :, :] + ps_['ps_supercategory'] = ps_supercategory + # compute precision but ignore any class confusion + gt = copy.deepcopy(cocoGt) + for idx, ann in enumerate(gt.dataset['annotations']): + if ann['category_id'] != catId: + gt.dataset['annotations'][idx]['ignore'] = 1 + gt.dataset['annotations'][idx]['iscrowd'] = 1 + gt.dataset['annotations'][idx]['category_id'] = catId + cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type) + cocoEval.params.imgIds = imgIds + cocoEval.params.maxDets = [100] + cocoEval.params.iouThrs = [0.1] + cocoEval.params.useCats = 1 + if areas: + cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]], + [areas[0], areas[1]], [areas[1], areas[2]]] + cocoEval.evaluate() + cocoEval.accumulate() + ps_allcategory = cocoEval.eval['precision'][0, :, k, :, :] + ps_['ps_allcategory'] = ps_allcategory + return k, ps_ + + +def analyze_results(res_file, + ann_file, + res_types, + out_dir, + extraplots=None, + areas=None): + for res_type in res_types: + assert res_type in ['bbox', 'segm'] + if areas: + assert len(areas) == 3, '3 integers should be specified as areas, \ + representing 3 area regions' + + directory = os.path.dirname(out_dir + '/') + if not os.path.exists(directory): + print(f'-------------create {out_dir}-----------------') + os.makedirs(directory) + + cocoGt = COCO(ann_file) + cocoDt = cocoGt.loadRes(res_file) + imgIds = cocoGt.getImgIds() + for res_type in res_types: + res_out_dir = out_dir + '/' + res_type + '/' + res_directory = os.path.dirname(res_out_dir) + if not os.path.exists(res_directory): + print(f'-------------create {res_out_dir}-----------------') + os.makedirs(res_directory) + iou_type = res_type + cocoEval = COCOeval( + copy.deepcopy(cocoGt), copy.deepcopy(cocoDt), iou_type) + cocoEval.params.imgIds = imgIds + cocoEval.params.iouThrs = [0.75, 0.5, 0.1] + cocoEval.params.maxDets = [100] + if areas: + cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]], + [areas[0], areas[1]], + [areas[1], areas[2]]] + cocoEval.evaluate() + cocoEval.accumulate() + ps = cocoEval.eval['precision'] + ps = np.vstack([ps, np.zeros((4, *ps.shape[1:]))]) + catIds = cocoGt.getCatIds() + recThrs = cocoEval.params.recThrs + with Pool(processes=48) as pool: + args = [(k, cocoDt, cocoGt, catId, iou_type, areas) + for k, catId in enumerate(catIds)] + analyze_results = pool.starmap(analyze_individual_category, args) + for k, catId in enumerate(catIds): + nm = cocoGt.loadCats(catId)[0] + print(f'--------------saving {k + 1}-{nm["name"]}---------------') + analyze_result = analyze_results[k] + assert k == analyze_result[0] + ps_supercategory = analyze_result[1]['ps_supercategory'] + ps_allcategory = analyze_result[1]['ps_allcategory'] + # compute precision but ignore superclass confusion + ps[3, :, k, :, :] = ps_supercategory + # compute precision but ignore any class confusion + ps[4, :, k, :, :] = ps_allcategory + # fill in background and false negative errors and plot + ps[ps == -1] = 0 + ps[5, :, k, :, :] = ps[4, :, k, :, :] > 0 + ps[6, :, k, :, :] = 1.0 + makeplot(recThrs, ps[:, :, k], res_out_dir, nm['name'], iou_type) + if extraplots: + makebarplot(recThrs, ps[:, :, k], res_out_dir, nm['name'], + iou_type) + makeplot(recThrs, ps, res_out_dir, 'allclass', iou_type) + if extraplots: + makebarplot(recThrs, ps, res_out_dir, 'allclass', iou_type) + make_gt_area_group_numbers_plot( + cocoEval=cocoEval, outDir=res_out_dir, verbose=True) + make_gt_area_histogram_plot(cocoEval=cocoEval, outDir=res_out_dir) + + +def main(): + parser = ArgumentParser(description='COCO Error Analysis Tool') + parser.add_argument('result', help='result file (json format) path') + parser.add_argument('out_dir', help='dir to save analyze result images') + parser.add_argument( + '--ann', + default='data/coco/annotations/instances_val2017.json', + help='annotation file path') + parser.add_argument( + '--types', type=str, nargs='+', default=['bbox'], help='result types') + parser.add_argument( + '--extraplots', + action='store_true', + help='export extra bar/stat plots') + parser.add_argument( + '--areas', + type=int, + nargs='+', + default=[1024, 9216, 10000000000], + help='area regions') + args = parser.parse_args() + analyze_results( + args.result, + args.ann, + args.types, + out_dir=args.out_dir, + extraplots=args.extraplots, + areas=args.areas) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/analysis_tools/confusion_matrix.py b/downstream/mmdetection/tools/analysis_tools/confusion_matrix.py new file mode 100644 index 0000000..5b52ea4 --- /dev/null +++ b/downstream/mmdetection/tools/analysis_tools/confusion_matrix.py @@ -0,0 +1,273 @@ +import argparse +import os + +import matplotlib.pyplot as plt +import mmcv +import numpy as np +from matplotlib.ticker import MultipleLocator +from mmcv import Config, DictAction +from mmcv.ops import nms + +from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps +from mmdet.datasets import build_dataset +from mmdet.utils import replace_cfg_vals, update_data_root + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Generate confusion matrix from detection results') + parser.add_argument('config', help='test config file path') + parser.add_argument( + 'prediction_path', help='prediction path where test .pkl result') + parser.add_argument( + 'save_dir', help='directory where confusion matrix will be saved') + parser.add_argument( + '--show', action='store_true', help='show confusion matrix') + parser.add_argument( + '--color-theme', + default='plasma', + help='theme of the matrix color map') + parser.add_argument( + '--score-thr', + type=float, + default=0.3, + help='score threshold to filter detection bboxes') + parser.add_argument( + '--tp-iou-thr', + type=float, + default=0.5, + help='IoU threshold to be considered as matched') + parser.add_argument( + '--nms-iou-thr', + type=float, + default=None, + help='nms IoU threshold, only applied when users want to change the' + 'nms IoU threshold.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def calculate_confusion_matrix(dataset, + results, + score_thr=0, + nms_iou_thr=None, + tp_iou_thr=0.5): + """Calculate the confusion matrix. + + Args: + dataset (Dataset): Test or val dataset. + results (list[ndarray]): A list of detection results in each image. + score_thr (float|optional): Score threshold to filter bboxes. + Default: 0. + nms_iou_thr (float|optional): nms IoU threshold, the detection results + have done nms in the detector, only applied when users want to + change the nms IoU threshold. Default: None. + tp_iou_thr (float|optional): IoU threshold to be considered as matched. + Default: 0.5. + """ + num_classes = len(dataset.CLASSES) + confusion_matrix = np.zeros(shape=[num_classes + 1, num_classes + 1]) + assert len(dataset) == len(results) + prog_bar = mmcv.ProgressBar(len(results)) + for idx, per_img_res in enumerate(results): + if isinstance(per_img_res, tuple): + res_bboxes, _ = per_img_res + else: + res_bboxes = per_img_res + ann = dataset.get_ann_info(idx) + gt_bboxes = ann['bboxes'] + labels = ann['labels'] + analyze_per_img_dets(confusion_matrix, gt_bboxes, labels, res_bboxes, + score_thr, tp_iou_thr, nms_iou_thr) + prog_bar.update() + return confusion_matrix + + +def analyze_per_img_dets(confusion_matrix, + gt_bboxes, + gt_labels, + result, + score_thr=0, + tp_iou_thr=0.5, + nms_iou_thr=None): + """Analyze detection results on each image. + + Args: + confusion_matrix (ndarray): The confusion matrix, + has shape (num_classes + 1, num_classes + 1). + gt_bboxes (ndarray): Ground truth bboxes, has shape (num_gt, 4). + gt_labels (ndarray): Ground truth labels, has shape (num_gt). + result (ndarray): Detection results, has shape + (num_classes, num_bboxes, 5). + score_thr (float): Score threshold to filter bboxes. + Default: 0. + tp_iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + nms_iou_thr (float|optional): nms IoU threshold, the detection results + have done nms in the detector, only applied when users want to + change the nms IoU threshold. Default: None. + """ + true_positives = np.zeros_like(gt_labels) + for det_label, det_bboxes in enumerate(result): + if nms_iou_thr: + det_bboxes, _ = nms( + det_bboxes[:, :4], + det_bboxes[:, -1], + nms_iou_thr, + score_threshold=score_thr) + ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes) + for i, det_bbox in enumerate(det_bboxes): + score = det_bbox[4] + det_match = 0 + if score >= score_thr: + for j, gt_label in enumerate(gt_labels): + if ious[i, j] >= tp_iou_thr: + det_match += 1 + if gt_label == det_label: + true_positives[j] += 1 # TP + confusion_matrix[gt_label, det_label] += 1 + if det_match == 0: # BG FP + confusion_matrix[-1, det_label] += 1 + for num_tp, gt_label in zip(true_positives, gt_labels): + if num_tp == 0: # FN + confusion_matrix[gt_label, -1] += 1 + + +def plot_confusion_matrix(confusion_matrix, + labels, + save_dir=None, + show=True, + title='Normalized Confusion Matrix', + color_theme='plasma'): + """Draw confusion matrix with matplotlib. + + Args: + confusion_matrix (ndarray): The confusion matrix. + labels (list[str]): List of class names. + save_dir (str|optional): If set, save the confusion matrix plot to the + given path. Default: None. + show (bool): Whether to show the plot. Default: True. + title (str): Title of the plot. Default: `Normalized Confusion Matrix`. + color_theme (str): Theme of the matrix color map. Default: `plasma`. + """ + # normalize the confusion matrix + per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis] + confusion_matrix = \ + confusion_matrix.astype(np.float32) / per_label_sums * 100 + + num_classes = len(labels) + fig, ax = plt.subplots( + figsize=(0.5 * num_classes, 0.5 * num_classes * 0.8), dpi=180) + cmap = plt.get_cmap(color_theme) + im = ax.imshow(confusion_matrix, cmap=cmap) + plt.colorbar(mappable=im, ax=ax) + + title_font = {'weight': 'bold', 'size': 12} + ax.set_title(title, fontdict=title_font) + label_font = {'size': 10} + plt.ylabel('Ground Truth Label', fontdict=label_font) + plt.xlabel('Prediction Label', fontdict=label_font) + + # draw locator + xmajor_locator = MultipleLocator(1) + xminor_locator = MultipleLocator(0.5) + ax.xaxis.set_major_locator(xmajor_locator) + ax.xaxis.set_minor_locator(xminor_locator) + ymajor_locator = MultipleLocator(1) + yminor_locator = MultipleLocator(0.5) + ax.yaxis.set_major_locator(ymajor_locator) + ax.yaxis.set_minor_locator(yminor_locator) + + # draw grid + ax.grid(True, which='minor', linestyle='-') + + # draw label + ax.set_xticks(np.arange(num_classes)) + ax.set_yticks(np.arange(num_classes)) + ax.set_xticklabels(labels) + ax.set_yticklabels(labels) + + ax.tick_params( + axis='x', bottom=False, top=True, labelbottom=False, labeltop=True) + plt.setp( + ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor') + + # draw confution matrix value + for i in range(num_classes): + for j in range(num_classes): + ax.text( + j, + i, + '{}%'.format( + int(confusion_matrix[ + i, + j]) if not np.isnan(confusion_matrix[i, j]) else -1), + ha='center', + va='center', + color='w', + size=7) + + ax.set_ylim(len(confusion_matrix) - 0.5, -0.5) # matplotlib>3.1.1 + + fig.tight_layout() + if save_dir is not None: + plt.savefig( + os.path.join(save_dir, 'confusion_matrix.png'), format='png') + if show: + plt.show() + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + + # replace the ${key} with the value of cfg.key + cfg = replace_cfg_vals(cfg) + + # update data root according to MMDET_DATASETS + update_data_root(cfg) + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + results = mmcv.load(args.prediction_path) + assert isinstance(results, list) + if isinstance(results[0], list): + pass + elif isinstance(results[0], tuple): + results = [result[0] for result in results] + else: + raise TypeError('invalid type of prediction results') + + if isinstance(cfg.data.test, dict): + cfg.data.test.test_mode = True + elif isinstance(cfg.data.test, list): + for ds_cfg in cfg.data.test: + ds_cfg.test_mode = True + dataset = build_dataset(cfg.data.test) + + confusion_matrix = calculate_confusion_matrix(dataset, results, + args.score_thr, + args.nms_iou_thr, + args.tp_iou_thr) + plot_confusion_matrix( + confusion_matrix, + dataset.CLASSES + ('background', ), + save_dir=args.save_dir, + show=args.show, + color_theme=args.color_theme) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/analysis_tools/eval_metric.py b/downstream/mmdetection/tools/analysis_tools/eval_metric.py new file mode 100644 index 0000000..7caafe9 --- /dev/null +++ b/downstream/mmdetection/tools/analysis_tools/eval_metric.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import mmcv +from mmcv import Config, DictAction + +from mmdet.datasets import build_dataset +from mmdet.utils import replace_cfg_vals, update_data_root + + +def parse_args(): + parser = argparse.ArgumentParser(description='Evaluate metric of the ' + 'results saved in pkl format') + parser.add_argument('config', help='Config of the model') + parser.add_argument('pkl_results', help='Results in pickle format') + parser.add_argument( + '--format-only', + action='store_true', + help='Format the output results without perform evaluation. It is' + 'useful when you want to format the result to a specific format and ' + 'submit it to the test server') + parser.add_argument( + '--eval', + type=str, + nargs='+', + help='Evaluation metrics, which depends on the dataset, e.g., "bbox",' + ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--eval-options', + nargs='+', + action=DictAction, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be kwargs for dataset.evaluate() function') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + + # replace the ${key} with the value of cfg.key + cfg = replace_cfg_vals(cfg) + + # update data root according to MMDET_DATASETS + update_data_root(cfg) + + assert args.eval or args.format_only, ( + 'Please specify at least one operation (eval/format the results) with ' + 'the argument "--eval", "--format-only"') + if args.eval and args.format_only: + raise ValueError('--eval and --format_only cannot be both specified') + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + cfg.data.test.test_mode = True + + dataset = build_dataset(cfg.data.test) + outputs = mmcv.load(args.pkl_results) + + kwargs = {} if args.eval_options is None else args.eval_options + if args.format_only: + dataset.format_results(outputs, **kwargs) + if args.eval: + eval_kwargs = cfg.get('evaluation', {}).copy() + # hard-code way to remove EvalHook args + for key in [ + 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', + 'rule' + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update(dict(metric=args.eval, **kwargs)) + print(dataset.evaluate(outputs, **eval_kwargs)) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/analysis_tools/get_flops.py b/downstream/mmdetection/tools/analysis_tools/get_flops.py new file mode 100644 index 0000000..4df8732 --- /dev/null +++ b/downstream/mmdetection/tools/analysis_tools/get_flops.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import numpy as np +import torch +from mmcv import Config, DictAction + +from mmdet.models import build_detector + +try: + from mmcv.cnn import get_model_complexity_info +except ImportError: + raise ImportError('Please upgrade mmcv to >0.6.2') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[1280, 800], + help='input image size') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--size-divisor', + type=int, + default=32, + help='Pad the input image, the minimum size that is divisible ' + 'by size_divisor, -1 means do not pad the image.') + args = parser.parse_args() + return args + + +def main(): + + args = parse_args() + + if len(args.shape) == 1: + h = w = args.shape[0] + elif len(args.shape) == 2: + h, w = args.shape + else: + raise ValueError('invalid input shape') + ori_shape = (3, h, w) + divisor = args.size_divisor + if divisor > 0: + h = int(np.ceil(h / divisor)) * divisor + w = int(np.ceil(w / divisor)) * divisor + + input_shape = (3, h, w) + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + model = build_detector( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + if torch.cuda.is_available(): + model.cuda() + model.eval() + + if hasattr(model, 'forward_dummy'): + model.forward = model.forward_dummy + else: + raise NotImplementedError( + 'FLOPs counter is currently not currently supported with {}'. + format(model.__class__.__name__)) + + flops, params = get_model_complexity_info(model, input_shape) + split_line = '=' * 30 + + if divisor > 0 and \ + input_shape != ori_shape: + print(f'{split_line}\nUse size divisor set input shape ' + f'from {ori_shape} to {input_shape}\n') + print(f'{split_line}\nInput shape: {input_shape}\n' + f'Flops: {flops}\nParams: {params}\n{split_line}') + print('!!!Please be cautious if you use the results in papers. ' + 'You may need to check if all ops are supported and verify that the ' + 'flops computation is correct.') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/analysis_tools/optimize_anchors.py b/downstream/mmdetection/tools/analysis_tools/optimize_anchors.py new file mode 100644 index 0000000..421998f --- /dev/null +++ b/downstream/mmdetection/tools/analysis_tools/optimize_anchors.py @@ -0,0 +1,376 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Optimize anchor settings on a specific dataset. + +This script provides two method to optimize YOLO anchors including k-means +anchor cluster and differential evolution. You can use ``--algorithm k-means`` +and ``--algorithm differential_evolution`` to switch two method. + +Example: + Use k-means anchor cluster:: + + python tools/analysis_tools/optimize_anchors.py ${CONFIG} \ + --algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \ + --output-dir ${OUTPUT_DIR} + Use differential evolution to optimize anchors:: + + python tools/analysis_tools/optimize_anchors.py ${CONFIG} \ + --algorithm differential_evolution \ + --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \ + --output-dir ${OUTPUT_DIR} +""" +import argparse +import os.path as osp + +import mmcv +import numpy as np +import torch +from mmcv import Config +from scipy.optimize import differential_evolution + +from mmdet.core import bbox_cxcywh_to_xyxy, bbox_overlaps, bbox_xyxy_to_cxcywh +from mmdet.datasets import build_dataset +from mmdet.utils import get_root_logger, replace_cfg_vals, update_data_root + + +def parse_args(): + parser = argparse.ArgumentParser(description='Optimize anchor parameters.') + parser.add_argument('config', help='Train config file path.') + parser.add_argument( + '--device', default='cuda:0', help='Device used for calculating.') + parser.add_argument( + '--input-shape', + type=int, + nargs='+', + default=[608, 608], + help='input image size') + parser.add_argument( + '--algorithm', + default='differential_evolution', + help='Algorithm used for anchor optimizing.' + 'Support k-means and differential_evolution for YOLO.') + parser.add_argument( + '--iters', + default=1000, + type=int, + help='Maximum iterations for optimizer.') + parser.add_argument( + '--output-dir', + default=None, + type=str, + help='Path to save anchor optimize result.') + + args = parser.parse_args() + return args + + +class BaseAnchorOptimizer: + """Base class for anchor optimizer. + + Args: + dataset (obj:`Dataset`): Dataset object. + input_shape (list[int]): Input image shape of the model. + Format in [width, height]. + logger (obj:`logging.Logger`): The logger for logging. + device (str, optional): Device used for calculating. + Default: 'cuda:0' + out_dir (str, optional): Path to save anchor optimize result. + Default: None + """ + + def __init__(self, + dataset, + input_shape, + logger, + device='cuda:0', + out_dir=None): + self.dataset = dataset + self.input_shape = input_shape + self.logger = logger + self.device = device + self.out_dir = out_dir + bbox_whs, img_shapes = self.get_whs_and_shapes() + ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape]) + + # resize to input shape + self.bbox_whs = bbox_whs / ratios + + def get_whs_and_shapes(self): + """Get widths and heights of bboxes and shapes of images. + + Returns: + tuple[np.ndarray]: Array of bbox shapes and array of image + shapes with shape (num_bboxes, 2) in [width, height] format. + """ + self.logger.info('Collecting bboxes from annotation...') + bbox_whs = [] + img_shapes = [] + prog_bar = mmcv.ProgressBar(len(self.dataset)) + for idx in range(len(self.dataset)): + ann = self.dataset.get_ann_info(idx) + data_info = self.dataset.data_infos[idx] + img_shape = np.array([data_info['width'], data_info['height']]) + gt_bboxes = ann['bboxes'] + for bbox in gt_bboxes: + wh = bbox[2:4] - bbox[0:2] + img_shapes.append(img_shape) + bbox_whs.append(wh) + prog_bar.update() + print('\n') + bbox_whs = np.array(bbox_whs) + img_shapes = np.array(img_shapes) + self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.') + return bbox_whs, img_shapes + + def get_zero_center_bbox_tensor(self): + """Get a tensor of bboxes centered at (0, 0). + + Returns: + Tensor: Tensor of bboxes with shape (num_bboxes, 4) + in [xmin, ymin, xmax, ymax] format. + """ + whs = torch.from_numpy(self.bbox_whs).to( + self.device, dtype=torch.float32) + bboxes = bbox_cxcywh_to_xyxy( + torch.cat([torch.zeros_like(whs), whs], dim=1)) + return bboxes + + def optimize(self): + raise NotImplementedError + + def save_result(self, anchors, path=None): + anchor_results = [] + for w, h in anchors: + anchor_results.append([round(w), round(h)]) + self.logger.info(f'Anchor optimize result:{anchor_results}') + if path: + json_path = osp.join(path, 'anchor_optimize_result.json') + mmcv.dump(anchor_results, json_path) + self.logger.info(f'Result saved in {json_path}') + + +class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer): + r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet. + `_. + + Args: + num_anchors (int) : Number of anchors. + iters (int): Maximum iterations for k-means. + """ + + def __init__(self, num_anchors, iters, **kwargs): + + super(YOLOKMeansAnchorOptimizer, self).__init__(**kwargs) + self.num_anchors = num_anchors + self.iters = iters + + def optimize(self): + anchors = self.kmeans_anchors() + self.save_result(anchors, self.out_dir) + + def kmeans_anchors(self): + self.logger.info( + f'Start cluster {self.num_anchors} YOLO anchors with K-means...') + bboxes = self.get_zero_center_bbox_tensor() + cluster_center_idx = torch.randint( + 0, bboxes.shape[0], (self.num_anchors, )).to(self.device) + + assignments = torch.zeros((bboxes.shape[0], )).to(self.device) + cluster_centers = bboxes[cluster_center_idx] + if self.num_anchors == 1: + cluster_centers = self.kmeans_maximization(bboxes, assignments, + cluster_centers) + anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() + anchors = sorted(anchors, key=lambda x: x[0] * x[1]) + return anchors + + prog_bar = mmcv.ProgressBar(self.iters) + for i in range(self.iters): + converged, assignments = self.kmeans_expectation( + bboxes, assignments, cluster_centers) + if converged: + self.logger.info(f'K-means process has converged at iter {i}.') + break + cluster_centers = self.kmeans_maximization(bboxes, assignments, + cluster_centers) + prog_bar.update() + print('\n') + avg_iou = bbox_overlaps(bboxes, + cluster_centers).max(1)[0].mean().item() + + anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() + anchors = sorted(anchors, key=lambda x: x[0] * x[1]) + self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}') + + return anchors + + def kmeans_maximization(self, bboxes, assignments, centers): + """Maximization part of EM algorithm(Expectation-Maximization)""" + new_centers = torch.zeros_like(centers) + for i in range(centers.shape[0]): + mask = (assignments == i) + if mask.sum(): + new_centers[i, :] = bboxes[mask].mean(0) + return new_centers + + def kmeans_expectation(self, bboxes, assignments, centers): + """Expectation part of EM algorithm(Expectation-Maximization)""" + ious = bbox_overlaps(bboxes, centers) + closest = ious.argmax(1) + converged = (closest == assignments).all() + return converged, closest + + +class YOLODEAnchorOptimizer(BaseAnchorOptimizer): + """YOLO anchor optimizer using differential evolution algorithm. + + Args: + num_anchors (int) : Number of anchors. + iters (int): Maximum iterations for k-means. + strategy (str): The differential evolution strategy to use. + Should be one of: + + - 'best1bin' + - 'best1exp' + - 'rand1exp' + - 'randtobest1exp' + - 'currenttobest1exp' + - 'best2exp' + - 'rand2exp' + - 'randtobest1bin' + - 'currenttobest1bin' + - 'best2bin' + - 'rand2bin' + - 'rand1bin' + + Default: 'best1bin'. + population_size (int): Total population size of evolution algorithm. + Default: 15. + convergence_thr (float): Tolerance for convergence, the + optimizing stops when ``np.std(pop) <= abs(convergence_thr) + + convergence_thr * np.abs(np.mean(population_energies))``, + respectively. Default: 0.0001. + mutation (tuple[float]): Range of dithering randomly changes the + mutation constant. Default: (0.5, 1). + recombination (float): Recombination constant of crossover probability. + Default: 0.7. + """ + + def __init__(self, + num_anchors, + iters, + strategy='best1bin', + population_size=15, + convergence_thr=0.0001, + mutation=(0.5, 1), + recombination=0.7, + **kwargs): + + super(YOLODEAnchorOptimizer, self).__init__(**kwargs) + + self.num_anchors = num_anchors + self.iters = iters + self.strategy = strategy + self.population_size = population_size + self.convergence_thr = convergence_thr + self.mutation = mutation + self.recombination = recombination + + def optimize(self): + anchors = self.differential_evolution() + self.save_result(anchors, self.out_dir) + + def differential_evolution(self): + bboxes = self.get_zero_center_bbox_tensor() + + bounds = [] + for i in range(self.num_anchors): + bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])]) + + result = differential_evolution( + func=self.avg_iou_cost, + bounds=bounds, + args=(bboxes, ), + strategy=self.strategy, + maxiter=self.iters, + popsize=self.population_size, + tol=self.convergence_thr, + mutation=self.mutation, + recombination=self.recombination, + updating='immediate', + disp=True) + self.logger.info( + f'Anchor evolution finish. Average IOU: {1 - result.fun}') + anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])] + anchors = sorted(anchors, key=lambda x: x[0] * x[1]) + return anchors + + @staticmethod + def avg_iou_cost(anchor_params, bboxes): + assert len(anchor_params) % 2 == 0 + anchor_whs = torch.tensor( + [[w, h] + for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to( + bboxes.device, dtype=bboxes.dtype) + anchor_boxes = bbox_cxcywh_to_xyxy( + torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1)) + ious = bbox_overlaps(bboxes, anchor_boxes) + max_ious, _ = ious.max(1) + cost = 1 - max_ious.mean().item() + return cost + + +def main(): + logger = get_root_logger() + args = parse_args() + cfg = args.config + cfg = Config.fromfile(cfg) + + # replace the ${key} with the value of cfg.key + cfg = replace_cfg_vals(cfg) + + # update data root according to MMDET_DATASETS + update_data_root(cfg) + + input_shape = args.input_shape + assert len(input_shape) == 2 + + anchor_type = cfg.model.bbox_head.anchor_generator.type + assert anchor_type == 'YOLOAnchorGenerator', \ + f'Only support optimize YOLOAnchor, but get {anchor_type}.' + + base_sizes = cfg.model.bbox_head.anchor_generator.base_sizes + num_anchors = sum([len(sizes) for sizes in base_sizes]) + + train_data_cfg = cfg.data.train + while 'dataset' in train_data_cfg: + train_data_cfg = train_data_cfg['dataset'] + dataset = build_dataset(train_data_cfg) + + if args.algorithm == 'k-means': + optimizer = YOLOKMeansAnchorOptimizer( + dataset=dataset, + input_shape=input_shape, + device=args.device, + num_anchors=num_anchors, + iters=args.iters, + logger=logger, + out_dir=args.output_dir) + elif args.algorithm == 'differential_evolution': + optimizer = YOLODEAnchorOptimizer( + dataset=dataset, + input_shape=input_shape, + device=args.device, + num_anchors=num_anchors, + iters=args.iters, + logger=logger, + out_dir=args.output_dir) + else: + raise NotImplementedError( + f'Only support k-means and differential_evolution, ' + f'but get {args.algorithm}') + + optimizer.optimize() + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/analysis_tools/robustness_eval.py b/downstream/mmdetection/tools/analysis_tools/robustness_eval.py new file mode 100644 index 0000000..da5ec28 --- /dev/null +++ b/downstream/mmdetection/tools/analysis_tools/robustness_eval.py @@ -0,0 +1,251 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from argparse import ArgumentParser + +import mmcv +import numpy as np + + +def print_coco_results(results): + + def _print(result, ap=1, iouThr=None, areaRng='all', maxDets=100): + titleStr = 'Average Precision' if ap == 1 else 'Average Recall' + typeStr = '(AP)' if ap == 1 else '(AR)' + iouStr = '0.50:0.95' \ + if iouThr is None else f'{iouThr:0.2f}' + iStr = f' {titleStr:<18} {typeStr} @[ IoU={iouStr:<9} | ' + iStr += f'area={areaRng:>6s} | maxDets={maxDets:>3d} ] = {result:0.3f}' + print(iStr) + + stats = np.zeros((12, )) + stats[0] = _print(results[0], 1) + stats[1] = _print(results[1], 1, iouThr=.5) + stats[2] = _print(results[2], 1, iouThr=.75) + stats[3] = _print(results[3], 1, areaRng='small') + stats[4] = _print(results[4], 1, areaRng='medium') + stats[5] = _print(results[5], 1, areaRng='large') + stats[6] = _print(results[6], 0, maxDets=1) + stats[7] = _print(results[7], 0, maxDets=10) + stats[8] = _print(results[8], 0) + stats[9] = _print(results[9], 0, areaRng='small') + stats[10] = _print(results[10], 0, areaRng='medium') + stats[11] = _print(results[11], 0, areaRng='large') + + +def get_coco_style_results(filename, + task='bbox', + metric=None, + prints='mPC', + aggregate='benchmark'): + + assert aggregate in ['benchmark', 'all'] + + if prints == 'all': + prints = ['P', 'mPC', 'rPC'] + elif isinstance(prints, str): + prints = [prints] + for p in prints: + assert p in ['P', 'mPC', 'rPC'] + + if metric is None: + metrics = [ + 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100', + 'ARs', 'ARm', 'ARl' + ] + elif isinstance(metric, list): + metrics = metric + else: + metrics = [metric] + + for metric_name in metrics: + assert metric_name in [ + 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100', + 'ARs', 'ARm', 'ARl' + ] + + eval_output = mmcv.load(filename) + + num_distortions = len(list(eval_output.keys())) + results = np.zeros((num_distortions, 6, len(metrics)), dtype='float32') + + for corr_i, distortion in enumerate(eval_output): + for severity in eval_output[distortion]: + for metric_j, metric_name in enumerate(metrics): + mAP = eval_output[distortion][severity][task][metric_name] + results[corr_i, severity, metric_j] = mAP + + P = results[0, 0, :] + if aggregate == 'benchmark': + mPC = np.mean(results[:15, 1:, :], axis=(0, 1)) + else: + mPC = np.mean(results[:, 1:, :], axis=(0, 1)) + rPC = mPC / P + + print(f'\nmodel: {osp.basename(filename)}') + if metric is None: + if 'P' in prints: + print(f'Performance on Clean Data [P] ({task})') + print_coco_results(P) + if 'mPC' in prints: + print(f'Mean Performance under Corruption [mPC] ({task})') + print_coco_results(mPC) + if 'rPC' in prints: + print(f'Relative Performance under Corruption [rPC] ({task})') + print_coco_results(rPC) + else: + if 'P' in prints: + print(f'Performance on Clean Data [P] ({task})') + for metric_i, metric_name in enumerate(metrics): + print(f'{metric_name:5} = {P[metric_i]:0.3f}') + if 'mPC' in prints: + print(f'Mean Performance under Corruption [mPC] ({task})') + for metric_i, metric_name in enumerate(metrics): + print(f'{metric_name:5} = {mPC[metric_i]:0.3f}') + if 'rPC' in prints: + print(f'Relative Performance under Corruption [rPC] ({task})') + for metric_i, metric_name in enumerate(metrics): + print(f'{metric_name:5} => {rPC[metric_i] * 100:0.1f} %') + + return results + + +def get_voc_style_results(filename, prints='mPC', aggregate='benchmark'): + + assert aggregate in ['benchmark', 'all'] + + if prints == 'all': + prints = ['P', 'mPC', 'rPC'] + elif isinstance(prints, str): + prints = [prints] + for p in prints: + assert p in ['P', 'mPC', 'rPC'] + + eval_output = mmcv.load(filename) + + num_distortions = len(list(eval_output.keys())) + results = np.zeros((num_distortions, 6, 20), dtype='float32') + + for i, distortion in enumerate(eval_output): + for severity in eval_output[distortion]: + mAP = [ + eval_output[distortion][severity][j]['ap'] + for j in range(len(eval_output[distortion][severity])) + ] + results[i, severity, :] = mAP + + P = results[0, 0, :] + if aggregate == 'benchmark': + mPC = np.mean(results[:15, 1:, :], axis=(0, 1)) + else: + mPC = np.mean(results[:, 1:, :], axis=(0, 1)) + rPC = mPC / P + + print(f'\nmodel: {osp.basename(filename)}') + if 'P' in prints: + print(f'Performance on Clean Data [P] in AP50 = {np.mean(P):0.3f}') + if 'mPC' in prints: + print('Mean Performance under Corruption [mPC] in AP50 = ' + f'{np.mean(mPC):0.3f}') + if 'rPC' in prints: + print('Relative Performance under Corruption [rPC] in % = ' + f'{np.mean(rPC) * 100:0.1f}') + + return np.mean(results, axis=2, keepdims=True) + + +def get_results(filename, + dataset='coco', + task='bbox', + metric=None, + prints='mPC', + aggregate='benchmark'): + assert dataset in ['coco', 'voc', 'cityscapes'] + + if dataset in ['coco', 'cityscapes']: + results = get_coco_style_results( + filename, + task=task, + metric=metric, + prints=prints, + aggregate=aggregate) + elif dataset == 'voc': + if task != 'bbox': + print('Only bbox analysis is supported for Pascal VOC') + print('Will report bbox results\n') + if metric not in [None, ['AP'], ['AP50']]: + print('Only the AP50 metric is supported for Pascal VOC') + print('Will report AP50 metric\n') + results = get_voc_style_results( + filename, prints=prints, aggregate=aggregate) + + return results + + +def get_distortions_from_file(filename): + + eval_output = mmcv.load(filename) + + return get_distortions_from_results(eval_output) + + +def get_distortions_from_results(eval_output): + distortions = [] + for i, distortion in enumerate(eval_output): + distortions.append(distortion.replace('_', ' ')) + return distortions + + +def main(): + parser = ArgumentParser(description='Corruption Result Analysis') + parser.add_argument('filename', help='result file path') + parser.add_argument( + '--dataset', + type=str, + choices=['coco', 'voc', 'cityscapes'], + default='coco', + help='dataset type') + parser.add_argument( + '--task', + type=str, + nargs='+', + choices=['bbox', 'segm'], + default=['bbox'], + help='task to report') + parser.add_argument( + '--metric', + nargs='+', + choices=[ + None, 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', + 'AR100', 'ARs', 'ARm', 'ARl' + ], + default=None, + help='metric to report') + parser.add_argument( + '--prints', + type=str, + nargs='+', + choices=['P', 'mPC', 'rPC'], + default='mPC', + help='corruption benchmark metric to print') + parser.add_argument( + '--aggregate', + type=str, + choices=['all', 'benchmark'], + default='benchmark', + help='aggregate all results or only those \ + for benchmark corruptions') + + args = parser.parse_args() + + for task in args.task: + get_results( + args.filename, + dataset=args.dataset, + task=task, + metric=args.metric, + prints=args.prints, + aggregate=args.aggregate) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/analysis_tools/test_robustness.py b/downstream/mmdetection/tools/analysis_tools/test_robustness.py new file mode 100644 index 0000000..0c1ddbe --- /dev/null +++ b/downstream/mmdetection/tools/analysis_tools/test_robustness.py @@ -0,0 +1,387 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import copy +import os +import os.path as osp + +import mmcv +import torch +from mmcv import DictAction +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, + wrap_fp16_model) +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval + +from mmdet import datasets +from mmdet.apis import multi_gpu_test, set_random_seed, single_gpu_test +from mmdet.core import eval_map +from mmdet.datasets import build_dataloader, build_dataset +from mmdet.models import build_detector +from tools.analysis_tools.robustness_eval import get_results + + +def coco_eval_with_return(result_files, + result_types, + coco, + max_dets=(100, 300, 1000)): + for res_type in result_types: + assert res_type in ['proposal', 'bbox', 'segm', 'keypoints'] + + if mmcv.is_str(coco): + coco = COCO(coco) + assert isinstance(coco, COCO) + + eval_results = {} + for res_type in result_types: + result_file = result_files[res_type] + assert result_file.endswith('.json') + + coco_dets = coco.loadRes(result_file) + img_ids = coco.getImgIds() + iou_type = 'bbox' if res_type == 'proposal' else res_type + cocoEval = COCOeval(coco, coco_dets, iou_type) + cocoEval.params.imgIds = img_ids + if res_type == 'proposal': + cocoEval.params.useCats = 0 + cocoEval.params.maxDets = list(max_dets) + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + if res_type == 'segm' or res_type == 'bbox': + metric_names = [ + 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', + 'AR100', 'ARs', 'ARm', 'ARl' + ] + eval_results[res_type] = { + metric_names[i]: cocoEval.stats[i] + for i in range(len(metric_names)) + } + else: + eval_results[res_type] = cocoEval.stats + + return eval_results + + +def voc_eval_with_return(result_file, + dataset, + iou_thr=0.5, + logger='print', + only_ap=True): + det_results = mmcv.load(result_file) + annotations = [dataset.get_ann_info(i) for i in range(len(dataset))] + if hasattr(dataset, 'year') and dataset.year == 2007: + dataset_name = 'voc07' + else: + dataset_name = dataset.CLASSES + mean_ap, eval_results = eval_map( + det_results, + annotations, + scale_ranges=None, + iou_thr=iou_thr, + dataset=dataset_name, + logger=logger) + + if only_ap: + eval_results = [{ + 'ap': eval_results[i]['ap'] + } for i in range(len(eval_results))] + + return mean_ap, eval_results + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMDet test detector') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--out', help='output result file') + parser.add_argument( + '--corruptions', + type=str, + nargs='+', + default='benchmark', + choices=[ + 'all', 'benchmark', 'noise', 'blur', 'weather', 'digital', + 'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise', + 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', + 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', + 'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur', + 'spatter', 'saturate' + ], + help='corruptions') + parser.add_argument( + '--severities', + type=int, + nargs='+', + default=[0, 1, 2, 3, 4, 5], + help='corruption severity levels') + parser.add_argument( + '--eval', + type=str, + nargs='+', + choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'], + help='eval types') + parser.add_argument( + '--iou-thr', + type=float, + default=0.5, + help='IoU threshold for pascal voc evaluation') + parser.add_argument( + '--summaries', + type=bool, + default=False, + help='Print summaries for every corruption and severity') + parser.add_argument( + '--workers', type=int, default=32, help='workers per gpu') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--show-dir', help='directory where painted images will be saved') + parser.add_argument( + '--show-score-thr', + type=float, + default=0.3, + help='score threshold (default: 0.3)') + parser.add_argument('--tmpdir', help='tmp dir for writing some results') + parser.add_argument('--seed', type=int, default=None, help='random seed') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument( + '--final-prints', + type=str, + nargs='+', + choices=['P', 'mPC', 'rPC'], + default='mPC', + help='corruption benchmark metric to print at the end') + parser.add_argument( + '--final-prints-aggregate', + type=str, + choices=['all', 'benchmark'], + default='benchmark', + help='aggregate all results or only those for benchmark corruptions') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def main(): + args = parse_args() + + assert args.out or args.show or args.show_dir, \ + ('Please specify at least one operation (save or show the results) ' + 'with the argument "--out", "--show" or "show-dir"') + + if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): + raise ValueError('The output file must be a pkl file.') + + cfg = mmcv.Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + cfg.model.pretrained = None + cfg.data.test.test_mode = True + if args.workers == 0: + args.workers = cfg.data.workers_per_gpu + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # set random seeds + if args.seed is not None: + set_random_seed(args.seed) + + if 'all' in args.corruptions: + corruptions = [ + 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', + 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', + 'brightness', 'contrast', 'elastic_transform', 'pixelate', + 'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter', + 'saturate' + ] + elif 'benchmark' in args.corruptions: + corruptions = [ + 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', + 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', + 'brightness', 'contrast', 'elastic_transform', 'pixelate', + 'jpeg_compression' + ] + elif 'noise' in args.corruptions: + corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise'] + elif 'blur' in args.corruptions: + corruptions = [ + 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur' + ] + elif 'weather' in args.corruptions: + corruptions = ['snow', 'frost', 'fog', 'brightness'] + elif 'digital' in args.corruptions: + corruptions = [ + 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression' + ] + elif 'holdout' in args.corruptions: + corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate'] + elif 'None' in args.corruptions: + corruptions = ['None'] + args.severities = [0] + else: + corruptions = args.corruptions + + rank, _ = get_dist_info() + aggregated_results = {} + for corr_i, corruption in enumerate(corruptions): + aggregated_results[corruption] = {} + for sev_i, corruption_severity in enumerate(args.severities): + # evaluate severity 0 (= no corruption) only once + if corr_i > 0 and corruption_severity == 0: + aggregated_results[corruption][0] = \ + aggregated_results[corruptions[0]][0] + continue + + test_data_cfg = copy.deepcopy(cfg.data.test) + # assign corruption and severity + if corruption_severity > 0: + corruption_trans = dict( + type='Corrupt', + corruption=corruption, + severity=corruption_severity) + # TODO: hard coded "1", we assume that the first step is + # loading images, which needs to be fixed in the future + test_data_cfg['pipeline'].insert(1, corruption_trans) + + # print info + print(f'\nTesting {corruption} at severity {corruption_severity}') + + # build the dataloader + # TODO: support multiple images per gpu + # (only minor changes are needed) + dataset = build_dataset(test_data_cfg) + data_loader = build_dataloader( + dataset, + samples_per_gpu=1, + workers_per_gpu=args.workers, + dist=distributed, + shuffle=False) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + checkpoint = load_checkpoint( + model, args.checkpoint, map_location='cpu') + # old versions did not save class info in checkpoints, + # this walkaround is for backward compatibility + if 'CLASSES' in checkpoint.get('meta', {}): + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + model.CLASSES = dataset.CLASSES + + if not distributed: + model = MMDataParallel(model, device_ids=[0]) + show_dir = args.show_dir + if show_dir is not None: + show_dir = osp.join(show_dir, corruption) + show_dir = osp.join(show_dir, str(corruption_severity)) + if not osp.exists(show_dir): + osp.makedirs(show_dir) + outputs = single_gpu_test(model, data_loader, args.show, + show_dir, args.show_score_thr) + else: + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False) + outputs = multi_gpu_test(model, data_loader, args.tmpdir) + + if args.out and rank == 0: + eval_results_filename = ( + osp.splitext(args.out)[0] + '_results' + + osp.splitext(args.out)[1]) + mmcv.dump(outputs, args.out) + eval_types = args.eval + if cfg.dataset_type == 'VOCDataset': + if eval_types: + for eval_type in eval_types: + if eval_type == 'bbox': + test_dataset = mmcv.runner.obj_from_dict( + cfg.data.test, datasets) + logger = 'print' if args.summaries else None + mean_ap, eval_results = \ + voc_eval_with_return( + args.out, test_dataset, + args.iou_thr, logger) + aggregated_results[corruption][ + corruption_severity] = eval_results + else: + print('\nOnly "bbox" evaluation \ + is supported for pascal voc') + else: + if eval_types: + print(f'Starting evaluate {" and ".join(eval_types)}') + if eval_types == ['proposal_fast']: + result_file = args.out + else: + if not isinstance(outputs[0], dict): + result_files = dataset.results2json( + outputs, args.out) + else: + for name in outputs[0]: + print(f'\nEvaluating {name}') + outputs_ = [out[name] for out in outputs] + result_file = args.out + + f'.{name}' + result_files = dataset.results2json( + outputs_, result_file) + eval_results = coco_eval_with_return( + result_files, eval_types, dataset.coco) + aggregated_results[corruption][ + corruption_severity] = eval_results + else: + print('\nNo task was selected for evaluation;' + '\nUse --eval to select a task') + + # save results after each evaluation + mmcv.dump(aggregated_results, eval_results_filename) + + if rank == 0: + # print final results + print('\nAggregated results:') + prints = args.final_prints + aggregate = args.final_prints_aggregate + + if cfg.dataset_type == 'VOCDataset': + get_results( + eval_results_filename, + dataset='voc', + prints=prints, + aggregate=aggregate) + else: + get_results( + eval_results_filename, + dataset='coco', + prints=prints, + aggregate=aggregate) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/dataset_converters/cityscapes.py b/downstream/mmdetection/tools/dataset_converters/cityscapes.py new file mode 100644 index 0000000..c8e44b9 --- /dev/null +++ b/downstream/mmdetection/tools/dataset_converters/cityscapes.py @@ -0,0 +1,152 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import os.path as osp + +import cityscapesscripts.helpers.labels as CSLabels +import mmcv +import numpy as np +import pycocotools.mask as maskUtils + + +def collect_files(img_dir, gt_dir): + suffix = 'leftImg8bit.png' + files = [] + for img_file in glob.glob(osp.join(img_dir, '**/*.png')): + assert img_file.endswith(suffix), img_file + inst_file = gt_dir + img_file[ + len(img_dir):-len(suffix)] + 'gtFine_instanceIds.png' + # Note that labelIds are not converted to trainId for seg map + segm_file = gt_dir + img_file[ + len(img_dir):-len(suffix)] + 'gtFine_labelIds.png' + files.append((img_file, inst_file, segm_file)) + assert len(files), f'No images found in {img_dir}' + print(f'Loaded {len(files)} images from {img_dir}') + + return files + + +def collect_annotations(files, nproc=1): + print('Loading annotation images') + if nproc > 1: + images = mmcv.track_parallel_progress( + load_img_info, files, nproc=nproc) + else: + images = mmcv.track_progress(load_img_info, files) + + return images + + +def load_img_info(files): + img_file, inst_file, segm_file = files + inst_img = mmcv.imread(inst_file, 'unchanged') + # ids < 24 are stuff labels (filtering them first is about 5% faster) + unique_inst_ids = np.unique(inst_img[inst_img >= 24]) + anno_info = [] + for inst_id in unique_inst_ids: + # For non-crowd annotations, inst_id // 1000 is the label_id + # Crowd annotations have <1000 instance ids + label_id = inst_id // 1000 if inst_id >= 1000 else inst_id + label = CSLabels.id2label[label_id] + if not label.hasInstances or label.ignoreInEval: + continue + + category_id = label.id + iscrowd = int(inst_id < 1000) + mask = np.asarray(inst_img == inst_id, dtype=np.uint8, order='F') + mask_rle = maskUtils.encode(mask[:, :, None])[0] + + area = maskUtils.area(mask_rle) + # convert to COCO style XYWH format + bbox = maskUtils.toBbox(mask_rle) + + # for json encoding + mask_rle['counts'] = mask_rle['counts'].decode() + + anno = dict( + iscrowd=iscrowd, + category_id=category_id, + bbox=bbox.tolist(), + area=area.tolist(), + segmentation=mask_rle) + anno_info.append(anno) + video_name = osp.basename(osp.dirname(img_file)) + img_info = dict( + # remove img_prefix for filename + file_name=osp.join(video_name, osp.basename(img_file)), + height=inst_img.shape[0], + width=inst_img.shape[1], + anno_info=anno_info, + segm_file=osp.join(video_name, osp.basename(segm_file))) + + return img_info + + +def cvt_annotations(image_infos, out_json_name): + out_json = dict() + img_id = 0 + ann_id = 0 + out_json['images'] = [] + out_json['categories'] = [] + out_json['annotations'] = [] + for image_info in image_infos: + image_info['id'] = img_id + anno_infos = image_info.pop('anno_info') + out_json['images'].append(image_info) + for anno_info in anno_infos: + anno_info['image_id'] = img_id + anno_info['id'] = ann_id + out_json['annotations'].append(anno_info) + ann_id += 1 + img_id += 1 + for label in CSLabels.labels: + if label.hasInstances and not label.ignoreInEval: + cat = dict(id=label.id, name=label.name) + out_json['categories'].append(cat) + + if len(out_json['annotations']) == 0: + out_json.pop('annotations') + + mmcv.dump(out_json, out_json_name) + return out_json + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert Cityscapes annotations to COCO format') + parser.add_argument('cityscapes_path', help='cityscapes data path') + parser.add_argument('--img-dir', default='leftImg8bit', type=str) + parser.add_argument('--gt-dir', default='gtFine', type=str) + parser.add_argument('-o', '--out-dir', help='output path') + parser.add_argument( + '--nproc', default=1, type=int, help='number of process') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + cityscapes_path = args.cityscapes_path + out_dir = args.out_dir if args.out_dir else cityscapes_path + mmcv.mkdir_or_exist(out_dir) + + img_dir = osp.join(cityscapes_path, args.img_dir) + gt_dir = osp.join(cityscapes_path, args.gt_dir) + + set_name = dict( + train='instancesonly_filtered_gtFine_train.json', + val='instancesonly_filtered_gtFine_val.json', + test='instancesonly_filtered_gtFine_test.json') + + for split, json_name in set_name.items(): + print(f'Converting {split} into {json_name}') + with mmcv.Timer( + print_tmpl='It took {}s to convert Cityscapes annotation'): + files = collect_files( + osp.join(img_dir, split), osp.join(gt_dir, split)) + image_infos = collect_annotations(files, nproc=args.nproc) + cvt_annotations(image_infos, osp.join(out_dir, json_name)) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/dataset_converters/images2coco.py b/downstream/mmdetection/tools/dataset_converters/images2coco.py new file mode 100644 index 0000000..1c4e2f1 --- /dev/null +++ b/downstream/mmdetection/tools/dataset_converters/images2coco.py @@ -0,0 +1,101 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os + +import mmcv +from PIL import Image + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert images to coco format without annotations') + parser.add_argument('img_path', help='The root path of images') + parser.add_argument( + 'classes', type=str, help='The text file name of storage class list') + parser.add_argument( + 'out', + type=str, + help='The output annotation json file name, The save dir is in the ' + 'same directory as img_path') + parser.add_argument( + '-e', + '--exclude-extensions', + type=str, + nargs='+', + help='The suffix of images to be excluded, such as "png" and "bmp"') + args = parser.parse_args() + return args + + +def collect_image_infos(path, exclude_extensions=None): + img_infos = [] + + images_generator = mmcv.scandir(path, recursive=True) + for image_path in mmcv.track_iter_progress(list(images_generator)): + if exclude_extensions is None or ( + exclude_extensions is not None + and not image_path.lower().endswith(exclude_extensions)): + image_path = os.path.join(path, image_path) + img_pillow = Image.open(image_path) + img_info = { + 'filename': image_path, + 'width': img_pillow.width, + 'height': img_pillow.height, + } + img_infos.append(img_info) + return img_infos + + +def cvt_to_coco_json(img_infos, classes): + image_id = 0 + coco = dict() + coco['images'] = [] + coco['type'] = 'instance' + coco['categories'] = [] + coco['annotations'] = [] + image_set = set() + + for category_id, name in enumerate(classes): + category_item = dict() + category_item['supercategory'] = str('none') + category_item['id'] = int(category_id) + category_item['name'] = str(name) + coco['categories'].append(category_item) + + for img_dict in img_infos: + file_name = img_dict['filename'] + assert file_name not in image_set + image_item = dict() + image_item['id'] = int(image_id) + image_item['file_name'] = str(file_name) + image_item['height'] = int(img_dict['height']) + image_item['width'] = int(img_dict['width']) + coco['images'].append(image_item) + image_set.add(file_name) + + image_id += 1 + return coco + + +def main(): + args = parse_args() + assert args.out.endswith( + 'json'), 'The output file name must be json suffix' + + # 1 load image list info + img_infos = collect_image_infos(args.img_path, args.exclude_extensions) + + # 2 convert to coco format data + classes = mmcv.list_from_file(args.classes) + coco_info = cvt_to_coco_json(img_infos, classes) + + # 3 dump + save_dir = os.path.join(args.img_path, '..', 'annotations') + mmcv.mkdir_or_exist(save_dir) + save_path = os.path.join(save_dir, args.out) + mmcv.dump(coco_info, save_path) + print(f'save json file: {save_path}') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/dataset_converters/pascal_voc.py b/downstream/mmdetection/tools/dataset_converters/pascal_voc.py new file mode 100644 index 0000000..20f8801 --- /dev/null +++ b/downstream/mmdetection/tools/dataset_converters/pascal_voc.py @@ -0,0 +1,237 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import xml.etree.ElementTree as ET + +import mmcv +import numpy as np + +from mmdet.core import voc_classes + +label_ids = {name: i for i, name in enumerate(voc_classes())} + + +def parse_xml(args): + xml_path, img_path = args + tree = ET.parse(xml_path) + root = tree.getroot() + size = root.find('size') + w = int(size.find('width').text) + h = int(size.find('height').text) + bboxes = [] + labels = [] + bboxes_ignore = [] + labels_ignore = [] + for obj in root.findall('object'): + name = obj.find('name').text + label = label_ids[name] + difficult = int(obj.find('difficult').text) + bnd_box = obj.find('bndbox') + bbox = [ + int(bnd_box.find('xmin').text), + int(bnd_box.find('ymin').text), + int(bnd_box.find('xmax').text), + int(bnd_box.find('ymax').text) + ] + if difficult: + bboxes_ignore.append(bbox) + labels_ignore.append(label) + else: + bboxes.append(bbox) + labels.append(label) + if not bboxes: + bboxes = np.zeros((0, 4)) + labels = np.zeros((0, )) + else: + bboxes = np.array(bboxes, ndmin=2) - 1 + labels = np.array(labels) + if not bboxes_ignore: + bboxes_ignore = np.zeros((0, 4)) + labels_ignore = np.zeros((0, )) + else: + bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1 + labels_ignore = np.array(labels_ignore) + annotation = { + 'filename': img_path, + 'width': w, + 'height': h, + 'ann': { + 'bboxes': bboxes.astype(np.float32), + 'labels': labels.astype(np.int64), + 'bboxes_ignore': bboxes_ignore.astype(np.float32), + 'labels_ignore': labels_ignore.astype(np.int64) + } + } + return annotation + + +def cvt_annotations(devkit_path, years, split, out_file): + if not isinstance(years, list): + years = [years] + annotations = [] + for year in years: + filelist = osp.join(devkit_path, + f'VOC{year}/ImageSets/Main/{split}.txt') + if not osp.isfile(filelist): + print(f'filelist does not exist: {filelist}, ' + f'skip voc{year} {split}') + return + img_names = mmcv.list_from_file(filelist) + xml_paths = [ + osp.join(devkit_path, f'VOC{year}/Annotations/{img_name}.xml') + for img_name in img_names + ] + img_paths = [ + f'VOC{year}/JPEGImages/{img_name}.jpg' for img_name in img_names + ] + part_annotations = mmcv.track_progress(parse_xml, + list(zip(xml_paths, img_paths))) + annotations.extend(part_annotations) + if out_file.endswith('json'): + annotations = cvt_to_coco_json(annotations) + mmcv.dump(annotations, out_file) + return annotations + + +def cvt_to_coco_json(annotations): + image_id = 0 + annotation_id = 0 + coco = dict() + coco['images'] = [] + coco['type'] = 'instance' + coco['categories'] = [] + coco['annotations'] = [] + image_set = set() + + def addAnnItem(annotation_id, image_id, category_id, bbox, difficult_flag): + annotation_item = dict() + annotation_item['segmentation'] = [] + + seg = [] + # bbox[] is x1,y1,x2,y2 + # left_top + seg.append(int(bbox[0])) + seg.append(int(bbox[1])) + # left_bottom + seg.append(int(bbox[0])) + seg.append(int(bbox[3])) + # right_bottom + seg.append(int(bbox[2])) + seg.append(int(bbox[3])) + # right_top + seg.append(int(bbox[2])) + seg.append(int(bbox[1])) + + annotation_item['segmentation'].append(seg) + + xywh = np.array( + [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]) + annotation_item['area'] = int(xywh[2] * xywh[3]) + if difficult_flag == 1: + annotation_item['ignore'] = 0 + annotation_item['iscrowd'] = 1 + else: + annotation_item['ignore'] = 0 + annotation_item['iscrowd'] = 0 + annotation_item['image_id'] = int(image_id) + annotation_item['bbox'] = xywh.astype(int).tolist() + annotation_item['category_id'] = int(category_id) + annotation_item['id'] = int(annotation_id) + coco['annotations'].append(annotation_item) + return annotation_id + 1 + + for category_id, name in enumerate(voc_classes()): + category_item = dict() + category_item['supercategory'] = str('none') + category_item['id'] = int(category_id) + category_item['name'] = str(name) + coco['categories'].append(category_item) + + for ann_dict in annotations: + file_name = ann_dict['filename'] + ann = ann_dict['ann'] + assert file_name not in image_set + image_item = dict() + image_item['id'] = int(image_id) + image_item['file_name'] = str(file_name) + image_item['height'] = int(ann_dict['height']) + image_item['width'] = int(ann_dict['width']) + coco['images'].append(image_item) + image_set.add(file_name) + + bboxes = ann['bboxes'][:, :4] + labels = ann['labels'] + for bbox_id in range(len(bboxes)): + bbox = bboxes[bbox_id] + label = labels[bbox_id] + annotation_id = addAnnItem( + annotation_id, image_id, label, bbox, difficult_flag=0) + + bboxes_ignore = ann['bboxes_ignore'][:, :4] + labels_ignore = ann['labels_ignore'] + for bbox_id in range(len(bboxes_ignore)): + bbox = bboxes_ignore[bbox_id] + label = labels_ignore[bbox_id] + annotation_id = addAnnItem( + annotation_id, image_id, label, bbox, difficult_flag=1) + + image_id += 1 + + return coco + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert PASCAL VOC annotations to mmdetection format') + parser.add_argument('devkit_path', help='pascal voc devkit path') + parser.add_argument('-o', '--out-dir', help='output path') + parser.add_argument( + '--out-format', + default='pkl', + choices=('pkl', 'coco'), + help='output format, "coco" indicates coco annotation format') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + devkit_path = args.devkit_path + out_dir = args.out_dir if args.out_dir else devkit_path + mmcv.mkdir_or_exist(out_dir) + + years = [] + if osp.isdir(osp.join(devkit_path, 'VOC2007')): + years.append('2007') + if osp.isdir(osp.join(devkit_path, 'VOC2012')): + years.append('2012') + if '2007' in years and '2012' in years: + years.append(['2007', '2012']) + if not years: + raise IOError(f'The devkit path {devkit_path} contains neither ' + '"VOC2007" nor "VOC2012" subfolder') + out_fmt = f'.{args.out_format}' + if args.out_format == 'coco': + out_fmt = '.json' + for year in years: + if year == '2007': + prefix = 'voc07' + elif year == '2012': + prefix = 'voc12' + elif year == ['2007', '2012']: + prefix = 'voc0712' + for split in ['train', 'val', 'trainval']: + dataset_name = prefix + '_' + split + print(f'processing {dataset_name} ...') + cvt_annotations(devkit_path, year, split, + osp.join(out_dir, dataset_name + out_fmt)) + if not isinstance(year, list): + dataset_name = prefix + '_test' + print(f'processing {dataset_name} ...') + cvt_annotations(devkit_path, year, 'test', + osp.join(out_dir, dataset_name + out_fmt)) + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/deployment/mmdet2torchserve.py b/downstream/mmdetection/tools/deployment/mmdet2torchserve.py new file mode 100644 index 0000000..70a081a --- /dev/null +++ b/downstream/mmdetection/tools/deployment/mmdet2torchserve.py @@ -0,0 +1,110 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser, Namespace +from pathlib import Path +from tempfile import TemporaryDirectory + +import mmcv + +try: + from model_archiver.model_packaging import package_model + from model_archiver.model_packaging_utils import ModelExportUtils +except ImportError: + package_model = None + + +def mmdet2torchserve( + config_file: str, + checkpoint_file: str, + output_folder: str, + model_name: str, + model_version: str = '1.0', + force: bool = False, +): + """Converts MMDetection model (config + checkpoint) to TorchServe `.mar`. + + Args: + config_file: + In MMDetection config format. + The contents vary for each task repository. + checkpoint_file: + In MMDetection checkpoint format. + The contents vary for each task repository. + output_folder: + Folder where `{model_name}.mar` will be created. + The file created will be in TorchServe archive format. + model_name: + If not None, used for naming the `{model_name}.mar` file + that will be created under `output_folder`. + If None, `{Path(checkpoint_file).stem}` will be used. + model_version: + Model's version. + force: + If True, if there is an existing `{model_name}.mar` + file under `output_folder` it will be overwritten. + """ + mmcv.mkdir_or_exist(output_folder) + + config = mmcv.Config.fromfile(config_file) + + with TemporaryDirectory() as tmpdir: + config.dump(f'{tmpdir}/config.py') + + args = Namespace( + **{ + 'model_file': f'{tmpdir}/config.py', + 'serialized_file': checkpoint_file, + 'handler': f'{Path(__file__).parent}/mmdet_handler.py', + 'model_name': model_name or Path(checkpoint_file).stem, + 'version': model_version, + 'export_path': output_folder, + 'force': force, + 'requirements_file': None, + 'extra_files': None, + 'runtime': 'python', + 'archive_format': 'default' + }) + manifest = ModelExportUtils.generate_manifest_json(args) + package_model(args, manifest) + + +def parse_args(): + parser = ArgumentParser( + description='Convert MMDetection models to TorchServe `.mar` format.') + parser.add_argument('config', type=str, help='config file path') + parser.add_argument('checkpoint', type=str, help='checkpoint file path') + parser.add_argument( + '--output-folder', + type=str, + required=True, + help='Folder where `{model_name}.mar` will be created.') + parser.add_argument( + '--model-name', + type=str, + default=None, + help='If not None, used for naming the `{model_name}.mar`' + 'file that will be created under `output_folder`.' + 'If None, `{Path(checkpoint_file).stem}` will be used.') + parser.add_argument( + '--model-version', + type=str, + default='1.0', + help='Number used for versioning.') + parser.add_argument( + '-f', + '--force', + action='store_true', + help='overwrite the existing `{model_name}.mar`') + args = parser.parse_args() + + return args + + +if __name__ == '__main__': + args = parse_args() + + if package_model is None: + raise ImportError('`torch-model-archiver` is required.' + 'Try: pip install torch-model-archiver') + + mmdet2torchserve(args.config, args.checkpoint, args.output_folder, + args.model_name, args.model_version, args.force) diff --git a/downstream/mmdetection/tools/deployment/mmdet_handler.py b/downstream/mmdetection/tools/deployment/mmdet_handler.py new file mode 100644 index 0000000..18fc230 --- /dev/null +++ b/downstream/mmdetection/tools/deployment/mmdet_handler.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import base64 +import os + +import mmcv +import torch +from ts.torch_handler.base_handler import BaseHandler + +from mmdet.apis import inference_detector, init_detector + + +class MMdetHandler(BaseHandler): + threshold = 0.5 + + def initialize(self, context): + properties = context.system_properties + self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu' + self.device = torch.device(self.map_location + ':' + + str(properties.get('gpu_id')) if torch.cuda. + is_available() else self.map_location) + self.manifest = context.manifest + + model_dir = properties.get('model_dir') + serialized_file = self.manifest['model']['serializedFile'] + checkpoint = os.path.join(model_dir, serialized_file) + self.config_file = os.path.join(model_dir, 'config.py') + + self.model = init_detector(self.config_file, checkpoint, self.device) + self.initialized = True + + def preprocess(self, data): + images = [] + + for row in data: + image = row.get('data') or row.get('body') + if isinstance(image, str): + image = base64.b64decode(image) + image = mmcv.imfrombytes(image) + images.append(image) + + return images + + def inference(self, data, *args, **kwargs): + results = inference_detector(self.model, data) + return results + + def postprocess(self, data): + # Format output following the example ObjectDetectionHandler format + output = [] + for image_index, image_result in enumerate(data): + output.append([]) + if isinstance(image_result, tuple): + bbox_result, segm_result = image_result + if isinstance(segm_result, tuple): + segm_result = segm_result[0] # ms rcnn + else: + bbox_result, segm_result = image_result, None + + for class_index, class_result in enumerate(bbox_result): + class_name = self.model.CLASSES[class_index] + for bbox in class_result: + bbox_coords = bbox[:-1].tolist() + score = float(bbox[-1]) + if score >= self.threshold: + output[image_index].append({ + 'class_name': class_name, + 'bbox': bbox_coords, + 'score': score + }) + + return output diff --git a/downstream/mmdetection/tools/deployment/onnx2tensorrt.py b/downstream/mmdetection/tools/deployment/onnx2tensorrt.py new file mode 100644 index 0000000..b59e52a --- /dev/null +++ b/downstream/mmdetection/tools/deployment/onnx2tensorrt.py @@ -0,0 +1,266 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import warnings + +import numpy as np +import onnx +import torch +from mmcv import Config +from mmcv.tensorrt import is_tensorrt_plugin_loaded, onnx2trt, save_trt_engine + +from mmdet.core.export import preprocess_example_input +from mmdet.core.export.model_wrappers import (ONNXRuntimeDetector, + TensorRTDetector) +from mmdet.datasets import DATASETS + + +def get_GiB(x: int): + """return x GiB.""" + return x * (1 << 30) + + +def onnx2tensorrt(onnx_file, + trt_file, + input_config, + verify=False, + show=False, + workspace_size=1, + verbose=False): + import tensorrt as trt + onnx_model = onnx.load(onnx_file) + max_shape = input_config['max_shape'] + min_shape = input_config['min_shape'] + opt_shape = input_config['opt_shape'] + fp16_mode = False + # create trt engine and wrapper + opt_shape_dict = {'input': [min_shape, opt_shape, max_shape]} + max_workspace_size = get_GiB(workspace_size) + trt_engine = onnx2trt( + onnx_model, + opt_shape_dict, + log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR, + fp16_mode=fp16_mode, + max_workspace_size=max_workspace_size) + save_dir, _ = osp.split(trt_file) + if save_dir: + os.makedirs(save_dir, exist_ok=True) + save_trt_engine(trt_engine, trt_file) + print(f'Successfully created TensorRT engine: {trt_file}') + + if verify: + # prepare input + one_img, one_meta = preprocess_example_input(input_config) + img_list, img_meta_list = [one_img], [[one_meta]] + img_list = [_.cuda().contiguous() for _ in img_list] + + # wrap ONNX and TensorRT model + onnx_model = ONNXRuntimeDetector(onnx_file, CLASSES, device_id=0) + trt_model = TensorRTDetector(trt_file, CLASSES, device_id=0) + + # inference with wrapped model + with torch.no_grad(): + onnx_results = onnx_model( + img_list, img_metas=img_meta_list, return_loss=False)[0] + trt_results = trt_model( + img_list, img_metas=img_meta_list, return_loss=False)[0] + + if show: + out_file_ort, out_file_trt = None, None + else: + out_file_ort, out_file_trt = 'show-ort.png', 'show-trt.png' + show_img = one_meta['show_img'] + score_thr = 0.3 + onnx_model.show_result( + show_img, + onnx_results, + score_thr=score_thr, + show=True, + win_name='ONNXRuntime', + out_file=out_file_ort) + trt_model.show_result( + show_img, + trt_results, + score_thr=score_thr, + show=True, + win_name='TensorRT', + out_file=out_file_trt) + with_mask = trt_model.with_masks + # compare a part of result + if with_mask: + compare_pairs = list(zip(onnx_results, trt_results)) + else: + compare_pairs = [(onnx_results, trt_results)] + err_msg = 'The numerical values are different between Pytorch' + \ + ' and ONNX, but it does not necessarily mean the' + \ + ' exported ONNX model is problematic.' + # check the numerical value + for onnx_res, pytorch_res in compare_pairs: + for o_res, p_res in zip(onnx_res, pytorch_res): + np.testing.assert_allclose( + o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg) + print('The numerical values are the same between Pytorch and ONNX') + + +def parse_normalize_cfg(test_pipeline): + transforms = None + for pipeline in test_pipeline: + if 'transforms' in pipeline: + transforms = pipeline['transforms'] + break + assert transforms is not None, 'Failed to find `transforms`' + norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize'] + assert len(norm_config_li) == 1, '`norm_config` should only have one' + norm_config = norm_config_li[0] + return norm_config + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert MMDetection models from ONNX to TensorRT') + parser.add_argument('config', help='test config file path') + parser.add_argument('model', help='Filename of input ONNX model') + parser.add_argument( + '--trt-file', + type=str, + default='tmp.trt', + help='Filename of output TensorRT engine') + parser.add_argument( + '--input-img', type=str, default='', help='Image for test') + parser.add_argument( + '--show', action='store_true', help='Whether to show output results') + parser.add_argument( + '--dataset', + type=str, + default='coco', + help='Dataset name. This argument is deprecated and will be \ + removed in future releases.') + parser.add_argument( + '--verify', + action='store_true', + help='Verify the outputs of ONNXRuntime and TensorRT') + parser.add_argument( + '--verbose', + action='store_true', + help='Whether to verbose logging messages while creating \ + TensorRT engine. Defaults to False.') + parser.add_argument( + '--to-rgb', + action='store_false', + help='Feed model with RGB or BGR image. Default is RGB. This \ + argument is deprecated and will be removed in future releases.') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[400, 600], + help='Input size of the model') + parser.add_argument( + '--mean', + type=float, + nargs='+', + default=[123.675, 116.28, 103.53], + help='Mean value used for preprocess input data. This argument \ + is deprecated and will be removed in future releases.') + parser.add_argument( + '--std', + type=float, + nargs='+', + default=[58.395, 57.12, 57.375], + help='Variance value used for preprocess input data. \ + This argument is deprecated and will be removed in future releases.') + parser.add_argument( + '--min-shape', + type=int, + nargs='+', + default=None, + help='Minimum input size of the model in TensorRT') + parser.add_argument( + '--max-shape', + type=int, + nargs='+', + default=None, + help='Maximum input size of the model in TensorRT') + parser.add_argument( + '--workspace-size', + type=int, + default=1, + help='Max workspace size in GiB') + + args = parser.parse_args() + return args + + +if __name__ == '__main__': + + assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.' + args = parse_args() + warnings.warn( + 'Arguments like `--to-rgb`, `--mean`, `--std`, `--dataset` would be \ + parsed directly from config file and are deprecated and will be \ + removed in future releases.') + if not args.input_img: + args.input_img = osp.join(osp.dirname(__file__), '../../demo/demo.jpg') + + cfg = Config.fromfile(args.config) + + def parse_shape(shape): + if len(shape) == 1: + shape = (1, 3, shape[0], shape[0]) + elif len(args.shape) == 2: + shape = (1, 3) + tuple(shape) + else: + raise ValueError('invalid input shape') + return shape + + if args.shape: + input_shape = parse_shape(args.shape) + else: + img_scale = cfg.test_pipeline[1]['img_scale'] + input_shape = (1, 3, img_scale[1], img_scale[0]) + + if not args.max_shape: + max_shape = input_shape + else: + max_shape = parse_shape(args.max_shape) + + if not args.min_shape: + min_shape = input_shape + else: + min_shape = parse_shape(args.min_shape) + + dataset = DATASETS.get(cfg.data.test['type']) + assert (dataset is not None) + CLASSES = dataset.CLASSES + normalize_cfg = parse_normalize_cfg(cfg.test_pipeline) + + input_config = { + 'min_shape': min_shape, + 'opt_shape': input_shape, + 'max_shape': max_shape, + 'input_shape': input_shape, + 'input_path': args.input_img, + 'normalize_cfg': normalize_cfg + } + # Create TensorRT engine + onnx2tensorrt( + args.model, + args.trt_file, + input_config, + verify=args.verify, + show=args.show, + workspace_size=args.workspace_size, + verbose=args.verbose) + + # Following strings of text style are from colorama package + bright_style, reset_style = '\x1b[1m', '\x1b[0m' + red_text, blue_text = '\x1b[31m', '\x1b[34m' + white_background = '\x1b[107m' + + msg = white_background + bright_style + red_text + msg += 'DeprecationWarning: This tool will be deprecated in future. ' + msg += blue_text + 'Welcome to use the unified model deployment toolbox ' + msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' + msg += reset_style + warnings.warn(msg) diff --git a/downstream/mmdetection/tools/deployment/pytorch2onnx.py b/downstream/mmdetection/tools/deployment/pytorch2onnx.py new file mode 100644 index 0000000..5c786f8 --- /dev/null +++ b/downstream/mmdetection/tools/deployment/pytorch2onnx.py @@ -0,0 +1,357 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import warnings +from functools import partial + +import numpy as np +import onnx +import torch +from mmcv import Config, DictAction + +from mmdet.core.export import build_model_from_cfg, preprocess_example_input +from mmdet.core.export.model_wrappers import ONNXRuntimeDetector + + +def pytorch2onnx(model, + input_img, + input_shape, + normalize_cfg, + opset_version=11, + show=False, + output_file='tmp.onnx', + verify=False, + test_img=None, + do_simplify=False, + dynamic_export=None, + skip_postprocess=False): + + input_config = { + 'input_shape': input_shape, + 'input_path': input_img, + 'normalize_cfg': normalize_cfg + } + # prepare input + one_img, one_meta = preprocess_example_input(input_config) + img_list, img_meta_list = [one_img], [[one_meta]] + + if skip_postprocess: + warnings.warn('Not all models support export onnx without post ' + 'process, especially two stage detectors!') + model.forward = model.forward_dummy + torch.onnx.export( + model, + one_img, + output_file, + input_names=['input'], + export_params=True, + keep_initializers_as_inputs=True, + do_constant_folding=True, + verbose=show, + opset_version=opset_version) + + print(f'Successfully exported ONNX model without ' + f'post process: {output_file}') + return + + # replace original forward function + origin_forward = model.forward + model.forward = partial( + model.forward, + img_metas=img_meta_list, + return_loss=False, + rescale=False) + + output_names = ['dets', 'labels'] + if model.with_mask: + output_names.append('masks') + input_name = 'input' + dynamic_axes = None + if dynamic_export: + dynamic_axes = { + input_name: { + 0: 'batch', + 2: 'height', + 3: 'width' + }, + 'dets': { + 0: 'batch', + 1: 'num_dets', + }, + 'labels': { + 0: 'batch', + 1: 'num_dets', + }, + } + if model.with_mask: + dynamic_axes['masks'] = {0: 'batch', 1: 'num_dets'} + + torch.onnx.export( + model, + img_list, + output_file, + input_names=[input_name], + output_names=output_names, + export_params=True, + keep_initializers_as_inputs=True, + do_constant_folding=True, + verbose=show, + opset_version=opset_version, + dynamic_axes=dynamic_axes) + + model.forward = origin_forward + + # get the custom op path + ort_custom_op_path = '' + try: + from mmcv.ops import get_onnxruntime_op_path + ort_custom_op_path = get_onnxruntime_op_path() + except (ImportError, ModuleNotFoundError): + warnings.warn('If input model has custom op from mmcv, \ + you may have to build mmcv with ONNXRuntime from source.') + + if do_simplify: + import onnxsim + + from mmdet import digit_version + + min_required_version = '0.3.0' + assert digit_version(onnxsim.__version__) >= digit_version( + min_required_version + ), f'Requires to install onnx-simplify>={min_required_version}' + + input_dic = {'input': img_list[0].detach().cpu().numpy()} + model_opt, check_ok = onnxsim.simplify( + output_file, + input_data=input_dic, + custom_lib=ort_custom_op_path, + dynamic_input_shape=dynamic_export) + if check_ok: + onnx.save(model_opt, output_file) + print(f'Successfully simplified ONNX model: {output_file}') + else: + warnings.warn('Failed to simplify ONNX model.') + print(f'Successfully exported ONNX model: {output_file}') + + if verify: + # check by onnx + onnx_model = onnx.load(output_file) + onnx.checker.check_model(onnx_model) + + # wrap onnx model + onnx_model = ONNXRuntimeDetector(output_file, model.CLASSES, 0) + if dynamic_export: + # scale up to test dynamic shape + h, w = [int((_ * 1.5) // 32 * 32) for _ in input_shape[2:]] + h, w = min(1344, h), min(1344, w) + input_config['input_shape'] = (1, 3, h, w) + + if test_img is None: + input_config['input_path'] = input_img + + # prepare input once again + one_img, one_meta = preprocess_example_input(input_config) + img_list, img_meta_list = [one_img], [[one_meta]] + + # get pytorch output + with torch.no_grad(): + pytorch_results = model( + img_list, + img_metas=img_meta_list, + return_loss=False, + rescale=True)[0] + + img_list = [_.cuda().contiguous() for _ in img_list] + if dynamic_export: + img_list = img_list + [_.flip(-1).contiguous() for _ in img_list] + img_meta_list = img_meta_list * 2 + # get onnx output + onnx_results = onnx_model( + img_list, img_metas=img_meta_list, return_loss=False)[0] + # visualize predictions + score_thr = 0.3 + if show: + out_file_ort, out_file_pt = None, None + else: + out_file_ort, out_file_pt = 'show-ort.png', 'show-pt.png' + + show_img = one_meta['show_img'] + model.show_result( + show_img, + pytorch_results, + score_thr=score_thr, + show=True, + win_name='PyTorch', + out_file=out_file_pt) + onnx_model.show_result( + show_img, + onnx_results, + score_thr=score_thr, + show=True, + win_name='ONNXRuntime', + out_file=out_file_ort) + + # compare a part of result + if model.with_mask: + compare_pairs = list(zip(onnx_results, pytorch_results)) + else: + compare_pairs = [(onnx_results, pytorch_results)] + err_msg = 'The numerical values are different between Pytorch' + \ + ' and ONNX, but it does not necessarily mean the' + \ + ' exported ONNX model is problematic.' + # check the numerical value + for onnx_res, pytorch_res in compare_pairs: + for o_res, p_res in zip(onnx_res, pytorch_res): + np.testing.assert_allclose( + o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg) + print('The numerical values are the same between Pytorch and ONNX') + + +def parse_normalize_cfg(test_pipeline): + transforms = None + for pipeline in test_pipeline: + if 'transforms' in pipeline: + transforms = pipeline['transforms'] + break + assert transforms is not None, 'Failed to find `transforms`' + norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize'] + assert len(norm_config_li) == 1, '`norm_config` should only have one' + norm_config = norm_config_li[0] + return norm_config + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert MMDetection models to ONNX') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--input-img', type=str, help='Images for input') + parser.add_argument( + '--show', + action='store_true', + help='Show onnx graph and detection outputs') + parser.add_argument('--output-file', type=str, default='tmp.onnx') + parser.add_argument('--opset-version', type=int, default=11) + parser.add_argument( + '--test-img', type=str, default=None, help='Images for test') + parser.add_argument( + '--dataset', + type=str, + default='coco', + help='Dataset name. This argument is deprecated and will be removed \ + in future releases.') + parser.add_argument( + '--verify', + action='store_true', + help='verify the onnx model output against pytorch output') + parser.add_argument( + '--simplify', + action='store_true', + help='Whether to simplify onnx model.') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[800, 1216], + help='input image size') + parser.add_argument( + '--mean', + type=float, + nargs='+', + default=[123.675, 116.28, 103.53], + help='mean value used for preprocess input data.This argument \ + is deprecated and will be removed in future releases.') + parser.add_argument( + '--std', + type=float, + nargs='+', + default=[58.395, 57.12, 57.375], + help='variance value used for preprocess input data. ' + 'This argument is deprecated and will be removed in future releases.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='Override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--dynamic-export', + action='store_true', + help='Whether to export onnx with dynamic axis.') + parser.add_argument( + '--skip-postprocess', + action='store_true', + help='Whether to export model without post process. Experimental ' + 'option. We do not guarantee the correctness of the exported ' + 'model.') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + warnings.warn('Arguments like `--mean`, `--std`, `--dataset` would be \ + parsed directly from config file and are deprecated and \ + will be removed in future releases.') + + assert args.opset_version == 11, 'MMDet only support opset 11 now' + + try: + from mmcv.onnx.symbolic import register_extra_symbolics + except ModuleNotFoundError: + raise NotImplementedError('please update mmcv to version>=v1.0.4') + register_extra_symbolics(args.opset_version) + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + if args.shape is None: + img_scale = cfg.test_pipeline[1]['img_scale'] + input_shape = (1, 3, img_scale[1], img_scale[0]) + elif len(args.shape) == 1: + input_shape = (1, 3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = (1, 3) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + # build the model and load checkpoint + model = build_model_from_cfg(args.config, args.checkpoint, + args.cfg_options) + + if not args.input_img: + args.input_img = osp.join(osp.dirname(__file__), '../../demo/demo.jpg') + + normalize_cfg = parse_normalize_cfg(cfg.test_pipeline) + + # convert model to onnx file + pytorch2onnx( + model, + args.input_img, + input_shape, + normalize_cfg, + opset_version=args.opset_version, + show=args.show, + output_file=args.output_file, + verify=args.verify, + test_img=args.test_img, + do_simplify=args.simplify, + dynamic_export=args.dynamic_export, + skip_postprocess=args.skip_postprocess) + + # Following strings of text style are from colorama package + bright_style, reset_style = '\x1b[1m', '\x1b[0m' + red_text, blue_text = '\x1b[31m', '\x1b[34m' + white_background = '\x1b[107m' + + msg = white_background + bright_style + red_text + msg += 'DeprecationWarning: This tool will be deprecated in future. ' + msg += blue_text + 'Welcome to use the unified model deployment toolbox ' + msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' + msg += reset_style + warnings.warn(msg) diff --git a/downstream/mmdetection/tools/deployment/test.py b/downstream/mmdetection/tools/deployment/test.py new file mode 100644 index 0000000..db8d696 --- /dev/null +++ b/downstream/mmdetection/tools/deployment/test.py @@ -0,0 +1,157 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import warnings + +import mmcv +from mmcv import Config, DictAction +from mmcv.parallel import MMDataParallel + +from mmdet.apis import single_gpu_test +from mmdet.datasets import (build_dataloader, build_dataset, + replace_ImageToTensor) +from mmdet.utils import compat_cfg + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet test (and eval) an ONNX model using ONNXRuntime') + parser.add_argument('config', help='test config file path') + parser.add_argument('model', help='Input model file') + parser.add_argument('--out', help='output result file in pickle format') + parser.add_argument( + '--format-only', + action='store_true', + help='Format the output results without perform evaluation. It is' + 'useful when you want to format the result to a specific format and ' + 'submit it to the test server') + parser.add_argument( + '--backend', + required=True, + choices=['onnxruntime', 'tensorrt'], + help='Backend for input model to run. ') + parser.add_argument( + '--eval', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g., "bbox",' + ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--show-dir', help='directory where painted images will be saved') + parser.add_argument( + '--show-score-thr', + type=float, + default=0.3, + help='score threshold (default: 0.3)') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--eval-options', + nargs='+', + action=DictAction, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be kwargs for dataset.evaluate() function') + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + assert args.out or args.eval or args.format_only or args.show \ + or args.show_dir, \ + ('Please specify at least one operation (save/eval/format/show the ' + 'results / save the results) with the argument "--out", "--eval"' + ', "--format-only", "--show" or "--show-dir"') + + if args.eval and args.format_only: + raise ValueError('--eval and --format_only cannot be both specified') + + if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): + raise ValueError('The output file must be a pkl file.') + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + cfg = compat_cfg(cfg) + # in case the test dataset is concatenated + samples_per_gpu = 1 + if isinstance(cfg.data.test, dict): + cfg.data.test.test_mode = True + samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) + if samples_per_gpu > 1: + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.test.pipeline = replace_ImageToTensor( + cfg.data.test.pipeline) + elif isinstance(cfg.data.test, list): + for ds_cfg in cfg.data.test: + ds_cfg.test_mode = True + samples_per_gpu = max( + [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) + if samples_per_gpu > 1: + for ds_cfg in cfg.data.test: + ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) + + # build the dataloader + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + samples_per_gpu=samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=False, + shuffle=False) + + if args.backend == 'onnxruntime': + from mmdet.core.export.model_wrappers import ONNXRuntimeDetector + model = ONNXRuntimeDetector( + args.model, class_names=dataset.CLASSES, device_id=0) + elif args.backend == 'tensorrt': + from mmdet.core.export.model_wrappers import TensorRTDetector + model = TensorRTDetector( + args.model, class_names=dataset.CLASSES, device_id=0) + + model = MMDataParallel(model, device_ids=[0]) + outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, + args.show_score_thr) + + if args.out: + print(f'\nwriting results to {args.out}') + mmcv.dump(outputs, args.out) + kwargs = {} if args.eval_options is None else args.eval_options + if args.format_only: + dataset.format_results(outputs, **kwargs) + if args.eval: + eval_kwargs = cfg.get('evaluation', {}).copy() + # hard-code way to remove EvalHook args + for key in [ + 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', + 'rule' + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update(dict(metric=args.eval, **kwargs)) + print(dataset.evaluate(outputs, **eval_kwargs)) + + +if __name__ == '__main__': + main() + + # Following strings of text style are from colorama package + bright_style, reset_style = '\x1b[1m', '\x1b[0m' + red_text, blue_text = '\x1b[31m', '\x1b[34m' + white_background = '\x1b[107m' + + msg = white_background + bright_style + red_text + msg += 'DeprecationWarning: This tool will be deprecated in future. ' + msg += blue_text + 'Welcome to use the unified model deployment toolbox ' + msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' + msg += reset_style + warnings.warn(msg) diff --git a/downstream/mmdetection/tools/deployment/test_torchserver.py b/downstream/mmdetection/tools/deployment/test_torchserver.py new file mode 100644 index 0000000..dd45234 --- /dev/null +++ b/downstream/mmdetection/tools/deployment/test_torchserver.py @@ -0,0 +1,74 @@ +from argparse import ArgumentParser + +import numpy as np +import requests + +from mmdet.apis import inference_detector, init_detector, show_result_pyplot +from mmdet.core import bbox2result + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('img', help='Image file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('model_name', help='The model name in the server') + parser.add_argument( + '--inference-addr', + default='127.0.0.1:8080', + help='Address and port of the inference server') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--score-thr', type=float, default=0.5, help='bbox score threshold') + args = parser.parse_args() + return args + + +def parse_result(input, model_class): + bbox = [] + label = [] + score = [] + for anchor in input: + bbox.append(anchor['bbox']) + label.append(model_class.index(anchor['class_name'])) + score.append([anchor['score']]) + bboxes = np.append(bbox, score, axis=1) + labels = np.array(label) + result = bbox2result(bboxes, labels, len(model_class)) + return result + + +def main(args): + # build the model from a config file and a checkpoint file + model = init_detector(args.config, args.checkpoint, device=args.device) + # test a single image + model_result = inference_detector(model, args.img) + for i, anchor_set in enumerate(model_result): + anchor_set = anchor_set[anchor_set[:, 4] >= 0.5] + model_result[i] = anchor_set + # show the results + show_result_pyplot( + model, + args.img, + model_result, + score_thr=args.score_thr, + title='pytorch_result') + url = 'http://' + args.inference_addr + '/predictions/' + args.model_name + with open(args.img, 'rb') as image: + response = requests.post(url, image) + server_result = parse_result(response.json(), model.CLASSES) + show_result_pyplot( + model, + args.img, + server_result, + score_thr=args.score_thr, + title='server_result') + + for i in range(len(model.CLASSES)): + assert np.allclose(model_result[i], server_result[i]) + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/downstream/mmdetection/tools/dist_test.sh b/downstream/mmdetection/tools/dist_test.sh new file mode 100755 index 0000000..dea131b --- /dev/null +++ b/downstream/mmdetection/tools/dist_test.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/test.py \ + $CONFIG \ + $CHECKPOINT \ + --launcher pytorch \ + ${@:4} diff --git a/downstream/mmdetection/tools/dist_train.sh b/downstream/mmdetection/tools/dist_train.sh new file mode 100755 index 0000000..b76fbbb --- /dev/null +++ b/downstream/mmdetection/tools/dist_train.sh @@ -0,0 +1,18 @@ +CONFIG=$1 +GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/train.py \ + $CONFIG \ + --seed 0 \ + --launcher pytorch ${@:3} diff --git a/downstream/mmdetection/tools/misc/browse_dataset.py b/downstream/mmdetection/tools/misc/browse_dataset.py new file mode 100644 index 0000000..d9fb285 --- /dev/null +++ b/downstream/mmdetection/tools/misc/browse_dataset.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +from collections import Sequence +from pathlib import Path + +import mmcv +import numpy as np +from mmcv import Config, DictAction + +from mmdet.core.utils import mask2ndarray +from mmdet.core.visualization import imshow_det_bboxes +from mmdet.datasets.builder import build_dataset +from mmdet.utils import replace_cfg_vals, update_data_root + + +def parse_args(): + parser = argparse.ArgumentParser(description='Browse a dataset') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--skip-type', + type=str, + nargs='+', + default=['DefaultFormatBundle', 'Normalize', 'Collect'], + help='skip some useless pipeline') + parser.add_argument( + '--output-dir', + default=None, + type=str, + help='If there is no display interface, you can save it') + parser.add_argument('--not-show', default=False, action='store_true') + parser.add_argument( + '--show-interval', + type=float, + default=2, + help='the interval of show (s)') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def retrieve_data_cfg(config_path, skip_type, cfg_options): + + def skip_pipeline_steps(config): + config['pipeline'] = [ + x for x in config.pipeline if x['type'] not in skip_type + ] + + cfg = Config.fromfile(config_path) + + # replace the ${key} with the value of cfg.key + cfg = replace_cfg_vals(cfg) + + # update data root according to MMDET_DATASETS + update_data_root(cfg) + + if cfg_options is not None: + cfg.merge_from_dict(cfg_options) + train_data_cfg = cfg.data.train + while 'dataset' in train_data_cfg and train_data_cfg[ + 'type'] != 'MultiImageMixDataset': + train_data_cfg = train_data_cfg['dataset'] + + if isinstance(train_data_cfg, Sequence): + [skip_pipeline_steps(c) for c in train_data_cfg] + else: + skip_pipeline_steps(train_data_cfg) + + return cfg + + +def main(): + args = parse_args() + cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options) + + if 'gt_semantic_seg' in cfg.train_pipeline[-1]['keys']: + cfg.data.train.pipeline = [ + p for p in cfg.data.train.pipeline if p['type'] != 'SegRescale' + ] + dataset = build_dataset(cfg.data.train) + + progress_bar = mmcv.ProgressBar(len(dataset)) + + for item in dataset: + filename = os.path.join(args.output_dir, + Path(item['filename']).name + ) if args.output_dir is not None else None + + gt_bboxes = item['gt_bboxes'] + gt_labels = item['gt_labels'] + gt_masks = item.get('gt_masks', None) + if gt_masks is not None: + gt_masks = mask2ndarray(gt_masks) + + gt_seg = item.get('gt_semantic_seg', None) + if gt_seg is not None: + pad_value = 255 # the padding value of gt_seg + sem_labels = np.unique(gt_seg) + all_labels = np.concatenate((gt_labels, sem_labels), axis=0) + all_labels, counts = np.unique(all_labels, return_counts=True) + stuff_labels = all_labels[np.logical_and(counts < 2, + all_labels != pad_value)] + stuff_masks = gt_seg[None] == stuff_labels[:, None, None] + gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0) + gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)), + axis=0) + # If you need to show the bounding boxes, + # please comment the following line + gt_bboxes = None + + imshow_det_bboxes( + item['img'], + gt_bboxes, + gt_labels, + gt_masks, + class_names=dataset.CLASSES, + show=not args.not_show, + wait_time=args.show_interval, + out_file=filename, + bbox_color=dataset.PALETTE, + text_color=(200, 200, 200), + mask_color=dataset.PALETTE) + + progress_bar.update() + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/misc/download_dataset.py b/downstream/mmdetection/tools/misc/download_dataset.py new file mode 100644 index 0000000..09c777d --- /dev/null +++ b/downstream/mmdetection/tools/misc/download_dataset.py @@ -0,0 +1,102 @@ +import argparse +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from tarfile import TarFile +from zipfile import ZipFile + +import torch + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Download datasets for training') + parser.add_argument( + '--dataset-name', type=str, help='dataset name', default='coco2017') + parser.add_argument( + '--save-dir', + type=str, + help='the dir to save dataset', + default='data/coco') + parser.add_argument( + '--unzip', + action='store_true', + help='whether unzip dataset or not, zipped files will be saved') + parser.add_argument( + '--delete', + action='store_true', + help='delete the download zipped files') + parser.add_argument( + '--threads', type=int, help='number of threading', default=4) + args = parser.parse_args() + return args + + +def download(url, dir, unzip=True, delete=False, threads=1): + + def download_one(url, dir): + f = dir / Path(url).name + if Path(url).is_file(): + Path(url).rename(f) + elif not f.exists(): + print('Downloading {} to {}'.format(url, f)) + torch.hub.download_url_to_file(url, f, progress=True) + if unzip and f.suffix in ('.zip', '.tar'): + print('Unzipping {}'.format(f.name)) + if f.suffix == '.zip': + ZipFile(f).extractall(path=dir) + elif f.suffix == '.tar': + TarFile(f).extractall(path=dir) + if delete: + f.unlink() + print('Delete {}'.format(f)) + + dir = Path(dir) + if threads > 1: + pool = ThreadPool(threads) + pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) + pool.close() + pool.join() + else: + for u in [url] if isinstance(url, (str, Path)) else url: + download_one(u, dir) + + +def main(): + args = parse_args() + path = Path(args.save_dir) + if not path.exists(): + path.mkdir(parents=True, exist_ok=True) + data2url = dict( + # TODO: Support for downloading Panoptic Segmentation of COCO + coco2017=[ + 'http://images.cocodataset.org/zips/train2017.zip', + 'http://images.cocodataset.org/zips/val2017.zip', + 'http://images.cocodataset.org/zips/test2017.zip', + 'http://images.cocodataset.org/annotations/' + + 'annotations_trainval2017.zip' + ], + lvis=[ + 'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa + 'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa + ], + voc2007=[ + 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', # noqa + 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', # noqa + 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', # noqa + ], + ) + url = data2url.get(args.dataset_name, None) + if url is None: + print('Only support COCO, VOC, and LVIS now!') + return + download( + url, + dir=path, + unzip=args.unzip, + delete=args.delete, + threads=args.threads) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/misc/gen_coco_panoptic_test_info.py b/downstream/mmdetection/tools/misc/gen_coco_panoptic_test_info.py new file mode 100644 index 0000000..5ad315d --- /dev/null +++ b/downstream/mmdetection/tools/misc/gen_coco_panoptic_test_info.py @@ -0,0 +1,34 @@ +import argparse +import os.path as osp + +import mmcv + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Generate COCO test image information ' + 'for COCO panoptic segmentation.') + parser.add_argument('data_root', help='Path to COCO annotation directory.') + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + data_root = args.data_root + val_info = mmcv.load(osp.join(data_root, 'panoptic_val2017.json')) + test_old_info = mmcv.load( + osp.join(data_root, 'image_info_test-dev2017.json')) + + # replace categories from image_info_test-dev2017.json + # with categories from panoptic_val2017.json which + # has attribute `isthing`. + test_info = test_old_info + test_info.update({'categories': val_info['categories']}) + mmcv.dump(test_info, + osp.join(data_root, 'panoptic_image_info_test-dev2017.json')) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/misc/get_image_metas.py b/downstream/mmdetection/tools/misc/get_image_metas.py new file mode 100644 index 0000000..a9957d9 --- /dev/null +++ b/downstream/mmdetection/tools/misc/get_image_metas.py @@ -0,0 +1,116 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Get test image metas on a specific dataset. + +Here is an example to run this script. + +Example: + python tools/misc/get_image_metas.py ${CONFIG} \ + --out ${OUTPUT FILE NAME} +""" +import argparse +import csv +import os.path as osp +from multiprocessing import Pool + +import mmcv +from mmcv import Config + + +def parse_args(): + parser = argparse.ArgumentParser(description='Collect image metas') + parser.add_argument('config', help='Config file path') + parser.add_argument( + '--out', + default='validation-image-metas.pkl', + help='The output image metas file name. The save dir is in the ' + 'same directory as `dataset.ann_file` path') + parser.add_argument( + '--nproc', + default=4, + type=int, + help='Processes used for get image metas') + args = parser.parse_args() + return args + + +def get_metas_from_csv_style_ann_file(ann_file): + data_infos = [] + cp_filename = None + with open(ann_file, 'r') as f: + reader = csv.reader(f) + for i, line in enumerate(reader): + if i == 0: + continue + img_id = line[0] + filename = f'{img_id}.jpg' + if filename != cp_filename: + data_infos.append(dict(filename=filename)) + cp_filename = filename + return data_infos + + +def get_metas_from_txt_style_ann_file(ann_file): + with open(ann_file) as f: + lines = f.readlines() + i = 0 + data_infos = [] + while i < len(lines): + filename = lines[i].rstrip() + data_infos.append(dict(filename=filename)) + skip_lines = int(lines[i + 2]) + 3 + i += skip_lines + return data_infos + + +def get_image_metas(data_info, img_prefix): + file_client = mmcv.FileClient(backend='disk') + filename = data_info.get('filename', None) + if filename is not None: + if img_prefix is not None: + filename = osp.join(img_prefix, filename) + img_bytes = file_client.get(filename) + img = mmcv.imfrombytes(img_bytes, flag='color') + meta = dict(filename=filename, ori_shape=img.shape) + else: + raise NotImplementedError('Missing `filename` in data_info') + return meta + + +def main(): + args = parse_args() + assert args.out.endswith('pkl'), 'The output file name must be pkl suffix' + + # load config files + cfg = Config.fromfile(args.config) + ann_file = cfg.data.test.ann_file + img_prefix = cfg.data.test.img_prefix + + print(f'{"-" * 5} Start Processing {"-" * 5}') + if ann_file.endswith('csv'): + data_infos = get_metas_from_csv_style_ann_file(ann_file) + elif ann_file.endswith('txt'): + data_infos = get_metas_from_txt_style_ann_file(ann_file) + else: + shuffix = ann_file.split('.')[-1] + raise NotImplementedError('File name must be csv or txt suffix but ' + f'get {shuffix}') + + print(f'Successfully load annotation file from {ann_file}') + print(f'Processing {len(data_infos)} images...') + pool = Pool(args.nproc) + # get image metas with multiple processes + image_metas = pool.starmap( + get_image_metas, + zip(data_infos, [img_prefix for _ in range(len(data_infos))]), + ) + pool.close() + + # save image metas + root_path = cfg.data.test.ann_file.rsplit('/', 1)[0] + save_path = osp.join(root_path, args.out) + mmcv.dump(image_metas, save_path) + print(f'Image meta file save to: {save_path}') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/misc/print_config.py b/downstream/mmdetection/tools/misc/print_config.py new file mode 100644 index 0000000..f10f538 --- /dev/null +++ b/downstream/mmdetection/tools/misc/print_config.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import warnings + +from mmcv import Config, DictAction + +from mmdet.utils import replace_cfg_vals, update_data_root + + +def parse_args(): + parser = argparse.ArgumentParser(description='Print the whole config') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file (deprecate), ' + 'change to --cfg-options instead.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both ' + 'specified, --options is deprecated in favor of --cfg-options') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + + # replace the ${key} with the value of cfg.key + cfg = replace_cfg_vals(cfg) + + # update data root according to MMDET_DATASETS + update_data_root(cfg) + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + print(f'Config:\n{cfg.pretty_text}') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/misc/split_coco.py b/downstream/mmdetection/tools/misc/split_coco.py new file mode 100644 index 0000000..78cc655 --- /dev/null +++ b/downstream/mmdetection/tools/misc/split_coco.py @@ -0,0 +1,109 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +import mmcv +import numpy as np + +prog_description = '''K-Fold coco split. + +To split coco data for semi-supervised object detection: + python tools/misc/split_coco.py +''' + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--data-root', + type=str, + help='The data root of coco dataset.', + default='./data/coco/') + parser.add_argument( + '--out-dir', + type=str, + help='The output directory of coco semi-supervised annotations.', + default='./data/coco_semi_annos/') + parser.add_argument( + '--labeled-percent', + type=float, + nargs='+', + help='The percentage of labeled data in the training set.', + default=[1, 2, 5, 10]) + parser.add_argument( + '--fold', + type=int, + help='K-fold cross validation for semi-supervised object detection.', + default=5) + args = parser.parse_args() + return args + + +def split_coco(data_root, out_dir, percent, fold): + """Split COCO data for Semi-supervised object detection. + + Args: + data_root (str): The data root of coco dataset. + out_dir (str): The output directory of coco semi-supervised + annotations. + percent (float): The percentage of labeled data in the training set. + fold (int): The fold of dataset and set as random seed for data split. + """ + + def save_anns(name, images, annotations): + sub_anns = dict() + sub_anns['images'] = images + sub_anns['annotations'] = annotations + sub_anns['licenses'] = anns['licenses'] + sub_anns['categories'] = anns['categories'] + sub_anns['info'] = anns['info'] + + mmcv.mkdir_or_exist(out_dir) + mmcv.dump(sub_anns, f'{out_dir}/{name}.json') + + # set random seed with the fold + np.random.seed(fold) + ann_file = osp.join(data_root, 'annotations/instances_train2017.json') + anns = mmcv.load(ann_file) + + image_list = anns['images'] + labeled_total = int(percent / 100. * len(image_list)) + labeled_inds = set( + np.random.choice(range(len(image_list)), size=labeled_total)) + labeled_ids, labeled_images, unlabeled_images = [], [], [] + + for i in range(len(image_list)): + if i in labeled_inds: + labeled_images.append(image_list[i]) + labeled_ids.append(image_list[i]['id']) + else: + unlabeled_images.append(image_list[i]) + + # get all annotations of labeled images + labeled_ids = set(labeled_ids) + labeled_annotations, unlabeled_annotations = [], [] + + for ann in anns['annotations']: + if ann['image_id'] in labeled_ids: + labeled_annotations.append(ann) + else: + unlabeled_annotations.append(ann) + + # save labeled and unlabeled + labeled_name = f'instances_train2017.{fold}@{percent}' + unlabeled_name = f'instances_train2017.{fold}@{percent}-unlabeled' + + save_anns(labeled_name, labeled_images, labeled_annotations) + save_anns(unlabeled_name, unlabeled_images, unlabeled_annotations) + + +def multi_wrapper(args): + return split_coco(*args) + + +if __name__ == '__main__': + args = parse_args() + arguments_list = [(args.data_root, args.out_dir, p, f) + for f in range(1, args.fold + 1) + for p in args.labeled_percent] + mmcv.track_parallel_progress(multi_wrapper, arguments_list, args.fold) diff --git a/downstream/mmdetection/tools/model_converters/detectron2pytorch.py b/downstream/mmdetection/tools/model_converters/detectron2pytorch.py new file mode 100644 index 0000000..b7264d5 --- /dev/null +++ b/downstream/mmdetection/tools/model_converters/detectron2pytorch.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import mmcv +import torch + +arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)} + + +def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names): + # detectron replace bn with affine channel layer + state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name + + '_b']) + state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name + + '_s']) + bn_size = state_dict[torch_name + '.weight'].size() + state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size) + state_dict[torch_name + '.running_var'] = torch.ones(bn_size) + converted_names.add(caffe_name + '_b') + converted_names.add(caffe_name + '_s') + + +def convert_conv_fc(blobs, state_dict, caffe_name, torch_name, + converted_names): + state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name + + '_w']) + converted_names.add(caffe_name + '_w') + if caffe_name + '_b' in blobs: + state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name + + '_b']) + converted_names.add(caffe_name + '_b') + + +def convert(src, dst, depth): + """Convert keys in detectron pretrained ResNet models to pytorch style.""" + # load arch_settings + if depth not in arch_settings: + raise ValueError('Only support ResNet-50 and ResNet-101 currently') + block_nums = arch_settings[depth] + # load caffe model + caffe_model = mmcv.load(src, encoding='latin1') + blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model + # convert to pytorch style + state_dict = OrderedDict() + converted_names = set() + convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names) + convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names) + for i in range(1, len(block_nums) + 1): + for j in range(block_nums[i - 1]): + if j == 0: + convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1', + f'layer{i}.{j}.downsample.0', converted_names) + convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn', + f'layer{i}.{j}.downsample.1', converted_names) + for k, letter in enumerate(['a', 'b', 'c']): + convert_conv_fc(blobs, state_dict, + f'res{i + 1}_{j}_branch2{letter}', + f'layer{i}.{j}.conv{k+1}', converted_names) + convert_bn(blobs, state_dict, + f'res{i + 1}_{j}_branch2{letter}_bn', + f'layer{i}.{j}.bn{k + 1}', converted_names) + # check if all layers are converted + for key in blobs: + if key not in converted_names: + print(f'Not Convert: {key}') + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + parser.add_argument('depth', type=int, help='ResNet model depth') + args = parser.parse_args() + convert(args.src, args.dst, args.depth) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/model_converters/publish_model.py b/downstream/mmdetection/tools/model_converters/publish_model.py new file mode 100644 index 0000000..219fcdf --- /dev/null +++ b/downstream/mmdetection/tools/model_converters/publish_model.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import subprocess + +import torch + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + parser.add_argument('out_file', help='output checkpoint filename') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + if torch.__version__ >= '1.6': + torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) + else: + torch.save(checkpoint, out_file) + sha = subprocess.check_output(['sha256sum', out_file]).decode() + if out_file.endswith('.pth'): + out_file_name = out_file[:-4] + else: + out_file_name = out_file + final_file = out_file_name + f'-{sha[:8]}.pth' + subprocess.Popen(['mv', out_file, final_file]) + + +def main(): + args = parse_args() + process_checkpoint(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/model_converters/regnet2mmdet.py b/downstream/mmdetection/tools/model_converters/regnet2mmdet.py new file mode 100644 index 0000000..fbf8c8f --- /dev/null +++ b/downstream/mmdetection/tools/model_converters/regnet2mmdet.py @@ -0,0 +1,90 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + + +def convert_stem(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('stem.conv', 'conv1') + new_key = new_key.replace('stem.bn', 'bn1') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_head(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('head.fc', 'fc') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_reslayer(model_key, model_weight, state_dict, converted_names): + split_keys = model_key.split('.') + layer, block, module = split_keys[:3] + block_id = int(block[1:]) + layer_name = f'layer{int(layer[1:])}' + block_name = f'{block_id - 1}' + + if block_id == 1 and module == 'bn': + new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}' + elif block_id == 1 and module == 'proj': + new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}' + elif module == 'f': + if split_keys[3] == 'a_bn': + module_name = 'bn1' + elif split_keys[3] == 'b_bn': + module_name = 'bn2' + elif split_keys[3] == 'c_bn': + module_name = 'bn3' + elif split_keys[3] == 'a': + module_name = 'conv1' + elif split_keys[3] == 'b': + module_name = 'conv2' + elif split_keys[3] == 'c': + module_name = 'conv3' + new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}' + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + print(f'Convert {model_key} to {new_key}') + state_dict[new_key] = model_weight + converted_names.add(model_key) + + +def convert(src, dst): + """Convert keys in pycls pretrained RegNet models to mmdet style.""" + # load caffe model + regnet_model = torch.load(src) + blobs = regnet_model['model_state'] + # convert to pytorch style + state_dict = OrderedDict() + converted_names = set() + for key, weight in blobs.items(): + if 'stem' in key: + convert_stem(key, weight, state_dict, converted_names) + elif 'head' in key: + convert_head(key, weight, state_dict, converted_names) + elif key.startswith('s'): + convert_reslayer(key, weight, state_dict, converted_names) + + # check if all layers are converted + for key in blobs: + if key not in converted_names: + print(f'not converted: {key}') + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/model_converters/selfsup2mmdet.py b/downstream/mmdetection/tools/model_converters/selfsup2mmdet.py new file mode 100644 index 0000000..bc8cce1 --- /dev/null +++ b/downstream/mmdetection/tools/model_converters/selfsup2mmdet.py @@ -0,0 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + + +def moco_convert(src, dst): + """Convert keys in pycls pretrained moco models to mmdet style.""" + # load caffe model + moco_model = torch.load(src) + blobs = moco_model['state_dict'] + # convert to pytorch style + state_dict = OrderedDict() + for k, v in blobs.items(): + if not k.startswith('module.encoder_q.'): + continue + old_k = k + k = k.replace('module.encoder_q.', '') + state_dict[k] = v + print(old_k, '->', k) + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + parser.add_argument( + '--selfsup', type=str, choices=['moco', 'swav'], help='save path') + args = parser.parse_args() + if args.selfsup == 'moco': + moco_convert(args.src, args.dst) + elif args.selfsup == 'swav': + print('SWAV does not need to convert the keys') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/model_converters/upgrade_model_version.py b/downstream/mmdetection/tools/model_converters/upgrade_model_version.py new file mode 100644 index 0000000..36ee607 --- /dev/null +++ b/downstream/mmdetection/tools/model_converters/upgrade_model_version.py @@ -0,0 +1,210 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import re +import tempfile +from collections import OrderedDict + +import torch +from mmcv import Config + + +def is_head(key): + valid_head_list = [ + 'bbox_head', 'mask_head', 'semantic_head', 'grid_head', 'mask_iou_head' + ] + + return any(key.startswith(h) for h in valid_head_list) + + +def parse_config(config_strings): + temp_file = tempfile.NamedTemporaryFile() + config_path = f'{temp_file.name}.py' + with open(config_path, 'w') as f: + f.write(config_strings) + + config = Config.fromfile(config_path) + is_two_stage = True + is_ssd = False + is_retina = False + reg_cls_agnostic = False + if 'rpn_head' not in config.model: + is_two_stage = False + # check whether it is SSD + if config.model.bbox_head.type == 'SSDHead': + is_ssd = True + elif config.model.bbox_head.type == 'RetinaHead': + is_retina = True + elif isinstance(config.model['bbox_head'], list): + reg_cls_agnostic = True + elif 'reg_class_agnostic' in config.model.bbox_head: + reg_cls_agnostic = config.model.bbox_head \ + .reg_class_agnostic + temp_file.close() + return is_two_stage, is_ssd, is_retina, reg_cls_agnostic + + +def reorder_cls_channel(val, num_classes=81): + # bias + if val.dim() == 1: + new_val = torch.cat((val[1:], val[:1]), dim=0) + # weight + else: + out_channels, in_channels = val.shape[:2] + # conv_cls for softmax output + if out_channels != num_classes and out_channels % num_classes == 0: + new_val = val.reshape(-1, num_classes, in_channels, *val.shape[2:]) + new_val = torch.cat((new_val[:, 1:], new_val[:, :1]), dim=1) + new_val = new_val.reshape(val.size()) + # fc_cls + elif out_channels == num_classes: + new_val = torch.cat((val[1:], val[:1]), dim=0) + # agnostic | retina_cls | rpn_cls + else: + new_val = val + + return new_val + + +def truncate_cls_channel(val, num_classes=81): + + # bias + if val.dim() == 1: + if val.size(0) % num_classes == 0: + new_val = val[:num_classes - 1] + else: + new_val = val + # weight + else: + out_channels, in_channels = val.shape[:2] + # conv_logits + if out_channels % num_classes == 0: + new_val = val.reshape(num_classes, in_channels, *val.shape[2:])[1:] + new_val = new_val.reshape(-1, *val.shape[1:]) + # agnostic + else: + new_val = val + + return new_val + + +def truncate_reg_channel(val, num_classes=81): + # bias + if val.dim() == 1: + # fc_reg | rpn_reg + if val.size(0) % num_classes == 0: + new_val = val.reshape(num_classes, -1)[:num_classes - 1] + new_val = new_val.reshape(-1) + # agnostic + else: + new_val = val + # weight + else: + out_channels, in_channels = val.shape[:2] + # fc_reg | rpn_reg + if out_channels % num_classes == 0: + new_val = val.reshape(num_classes, -1, in_channels, + *val.shape[2:])[1:] + new_val = new_val.reshape(-1, *val.shape[1:]) + # agnostic + else: + new_val = val + + return new_val + + +def convert(in_file, out_file, num_classes): + """Convert keys in checkpoints. + + There can be some breaking changes during the development of mmdetection, + and this tool is used for upgrading checkpoints trained with old versions + to the latest one. + """ + checkpoint = torch.load(in_file) + in_state_dict = checkpoint.pop('state_dict') + out_state_dict = OrderedDict() + meta_info = checkpoint['meta'] + is_two_stage, is_ssd, is_retina, reg_cls_agnostic = parse_config( + '#' + meta_info['config']) + if meta_info['mmdet_version'] <= '0.5.3' and is_retina: + upgrade_retina = True + else: + upgrade_retina = False + + # MMDetection v2.5.0 unifies the class order in RPN + # if the model is trained in version=2.5.0 + if meta_info['mmdet_version'] < '2.5.0': + upgrade_rpn = True + else: + upgrade_rpn = False + + for key, val in in_state_dict.items(): + new_key = key + new_val = val + if is_two_stage and is_head(key): + new_key = 'roi_head.{}'.format(key) + + # classification + if upgrade_rpn: + m = re.search( + r'(conv_cls|retina_cls|rpn_cls|fc_cls|fcos_cls|' + r'fovea_cls).(weight|bias)', new_key) + else: + m = re.search( + r'(conv_cls|retina_cls|fc_cls|fcos_cls|' + r'fovea_cls).(weight|bias)', new_key) + if m is not None: + print(f'reorder cls channels of {new_key}') + new_val = reorder_cls_channel(val, num_classes) + + # regression + if upgrade_rpn: + m = re.search(r'(fc_reg).(weight|bias)', new_key) + else: + m = re.search(r'(fc_reg|rpn_reg).(weight|bias)', new_key) + if m is not None and not reg_cls_agnostic: + print(f'truncate regression channels of {new_key}') + new_val = truncate_reg_channel(val, num_classes) + + # mask head + m = re.search(r'(conv_logits).(weight|bias)', new_key) + if m is not None: + print(f'truncate mask prediction channels of {new_key}') + new_val = truncate_cls_channel(val, num_classes) + + m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key) + # Legacy issues in RetinaNet since V1.x + # Use ConvModule instead of nn.Conv2d in RetinaNet + # cls_convs.0.weight -> cls_convs.0.conv.weight + if m is not None and upgrade_retina: + param = m.groups()[1] + new_key = key.replace(param, f'conv.{param}') + out_state_dict[new_key] = val + print(f'rename the name of {key} to {new_key}') + continue + + m = re.search(r'(cls_convs).\d.(weight|bias)', key) + if m is not None and is_ssd: + print(f'reorder cls channels of {new_key}') + new_val = reorder_cls_channel(val, num_classes) + + out_state_dict[new_key] = new_val + checkpoint['state_dict'] = out_state_dict + torch.save(checkpoint, out_file) + + +def main(): + parser = argparse.ArgumentParser(description='Upgrade model version') + parser.add_argument('in_file', help='input checkpoint file') + parser.add_argument('out_file', help='output checkpoint file') + parser.add_argument( + '--num-classes', + type=int, + default=81, + help='number of classes of the original model') + args = parser.parse_args() + convert(args.in_file, args.out_file, args.num_classes) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/model_converters/upgrade_ssd_version.py b/downstream/mmdetection/tools/model_converters/upgrade_ssd_version.py new file mode 100644 index 0000000..befff45 --- /dev/null +++ b/downstream/mmdetection/tools/model_converters/upgrade_ssd_version.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import tempfile +from collections import OrderedDict + +import torch +from mmcv import Config + + +def parse_config(config_strings): + temp_file = tempfile.NamedTemporaryFile() + config_path = f'{temp_file.name}.py' + with open(config_path, 'w') as f: + f.write(config_strings) + + config = Config.fromfile(config_path) + # check whether it is SSD + if config.model.bbox_head.type != 'SSDHead': + raise AssertionError('This is not a SSD model.') + + +def convert(in_file, out_file): + checkpoint = torch.load(in_file) + in_state_dict = checkpoint.pop('state_dict') + out_state_dict = OrderedDict() + meta_info = checkpoint['meta'] + parse_config('#' + meta_info['config']) + for key, value in in_state_dict.items(): + if 'extra' in key: + layer_idx = int(key.split('.')[2]) + new_key = 'neck.extra_layers.{}.{}.conv.'.format( + layer_idx // 2, layer_idx % 2) + key.split('.')[-1] + elif 'l2_norm' in key: + new_key = 'neck.l2_norm.weight' + elif 'bbox_head' in key: + new_key = key[:21] + '.0' + key[21:] + else: + new_key = key + out_state_dict[new_key] = value + checkpoint['state_dict'] = out_state_dict + + if torch.__version__ >= '1.6': + torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) + else: + torch.save(checkpoint, out_file) + + +def main(): + parser = argparse.ArgumentParser(description='Upgrade SSD version') + parser.add_argument('in_file', help='input checkpoint file') + parser.add_argument('out_file', help='output checkpoint file') + + args = parser.parse_args() + convert(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/slurm_test.sh b/downstream/mmdetection/tools/slurm_test.sh new file mode 100755 index 0000000..6dd67e5 --- /dev/null +++ b/downstream/mmdetection/tools/slurm_test.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +CHECKPOINT=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +PY_ARGS=${@:5} +SRUN_ARGS=${SRUN_ARGS:-""} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} diff --git a/downstream/mmdetection/tools/slurm_train.sh b/downstream/mmdetection/tools/slurm_train.sh new file mode 100755 index 0000000..b3feb3d --- /dev/null +++ b/downstream/mmdetection/tools/slurm_train.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +WORK_DIR=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:5} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} diff --git a/downstream/mmdetection/tools/test.py b/downstream/mmdetection/tools/test.py new file mode 100644 index 0000000..892ff41 --- /dev/null +++ b/downstream/mmdetection/tools/test.py @@ -0,0 +1,247 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import time +import warnings + +import mmcv +import mmcv_custom # noqa: F401,F403 +import mmdet_custom # noqa: F401,F403 +import torch +from mmcv import Config, DictAction +from mmcv.cnn import fuse_conv_bn +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, + wrap_fp16_model) +from mmdet.apis import multi_gpu_test, single_gpu_test +from mmdet.datasets import (build_dataloader, build_dataset, + replace_ImageToTensor) +from mmdet.models import build_detector + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet test (and eval) a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--work-dir', + help='the directory to save the file containing evaluation metrics') + parser.add_argument('--out', help='output result file in pickle format') + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + parser.add_argument('--gpu-ids', + type=int, + nargs='+', + help='ids of gpus to use ' + '(only applicable to non-distributed testing)') + parser.add_argument( + '--format-only', + action='store_true', + help='Format the output results without perform evaluation. It is' + 'useful when you want to format the result to a specific format and ' + 'submit it to the test server') + parser.add_argument( + '--eval', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g., "bbox",' + ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument('--show-dir', + help='directory where painted images will be saved') + parser.add_argument('--show-score-thr', + type=float, + default=0.3, + help='score threshold (default: 0.3)') + parser.add_argument('--gpu-collect', + action='store_true', + help='whether to use gpu to collect results.') + parser.add_argument( + '--tmpdir', + help='tmp directory used for collecting results from multiple ' + 'workers, available when gpu-collect is not specified') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be kwargs for dataset.evaluate() function (deprecate), ' + 'change to --eval-options instead.') + parser.add_argument( + '--eval-options', + nargs='+', + action=DictAction, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be kwargs for dataset.evaluate() function') + parser.add_argument('--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.eval_options: + raise ValueError( + '--options and --eval-options cannot be both ' + 'specified, --options is deprecated in favor of --eval-options') + if args.options: + warnings.warn('--options is deprecated in favor of --eval-options') + args.eval_options = args.options + return args + + +def main(): + args = parse_args() + + assert args.out or args.eval or args.format_only or args.show \ + or args.show_dir, \ + ('Please specify at least one operation (save/eval/format/show the ' + 'results / save the results) with the argument "--out", "--eval"' + ', "--format-only", "--show" or "--show-dir"') + + if args.eval and args.format_only: + raise ValueError('--eval and --format_only cannot be both specified') + + if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): + raise ValueError('The output file must be a pkl file.') + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + + cfg.model.pretrained = None + if cfg.model.get('neck'): + if isinstance(cfg.model.neck, list): + for neck_cfg in cfg.model.neck: + if neck_cfg.get('rfp_backbone'): + if neck_cfg.rfp_backbone.get('pretrained'): + neck_cfg.rfp_backbone.pretrained = None + elif cfg.model.neck.get('rfp_backbone'): + if cfg.model.neck.rfp_backbone.get('pretrained'): + cfg.model.neck.rfp_backbone.pretrained = None + + # in case the test dataset is concatenated + samples_per_gpu = 1 + if isinstance(cfg.data.test, dict): + cfg.data.test.test_mode = True + samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) + if samples_per_gpu > 1: + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.test.pipeline = replace_ImageToTensor( + cfg.data.test.pipeline) + elif isinstance(cfg.data.test, list): + for ds_cfg in cfg.data.test: + ds_cfg.test_mode = True + samples_per_gpu = max( + [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) + if samples_per_gpu > 1: + for ds_cfg in cfg.data.test: + ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) + + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids + else: + cfg.gpu_ids = range(1) + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + if len(cfg.gpu_ids) > 1: + warnings.warn( + f'We treat {cfg.gpu_ids} as gpu-ids, and reset to ' + f'{cfg.gpu_ids[0:1]} as gpu-ids to avoid potential error in ' + 'non-distribute testing time.') + cfg.gpu_ids = cfg.gpu_ids[0:1] + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + rank, _ = get_dist_info() + # allows not to create + if args.work_dir is not None and rank == 0: + mmcv.mkdir_or_exist(osp.abspath(args.work_dir)) + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + json_file = osp.join(args.work_dir, f'eval_{timestamp}.json') + + # build the dataloader + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader(dataset, + samples_per_gpu=samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') + if args.fuse_conv_bn: + model = fuse_conv_bn(model) + # old versions did not save class info in checkpoints, this walkaround is + # for backward compatibility + if 'CLASSES' in checkpoint.get('meta', {}): + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + model.CLASSES = dataset.CLASSES + + if not distributed: + model = MMDataParallel(model, device_ids=cfg.gpu_ids) + outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, + args.show_score_thr) + else: + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False) + outputs = multi_gpu_test(model, data_loader, args.tmpdir, + args.gpu_collect) + + rank, _ = get_dist_info() + if rank == 0: + if args.out: + print(f'\nwriting results to {args.out}') + mmcv.dump(outputs, args.out) + kwargs = {} if args.eval_options is None else args.eval_options + if args.format_only: + dataset.format_results(outputs, **kwargs) + if args.eval: + eval_kwargs = cfg.get('evaluation', {}).copy() + # hard-code way to remove EvalHook args + for key in [ + 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', + 'rule', 'dynamic_intervals' + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update(dict(metric=args.eval, **kwargs)) + metric = dataset.evaluate(outputs, **eval_kwargs) + print(metric) + metric_dict = dict(config=args.config, metric=metric) + if args.work_dir is not None and rank == 0: + mmcv.dump(metric_dict, json_file) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmdetection/tools/train.py b/downstream/mmdetection/tools/train.py new file mode 100644 index 0000000..2fbc757 --- /dev/null +++ b/downstream/mmdetection/tools/train.py @@ -0,0 +1,278 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import copy +import os +import os.path as osp +import time +import warnings + +import mmcv +# import mmcv_custom # noqa: F401,F403 +# import mmdet_custom # noqa: F401,F403 +import torch +from mmcv import Config, DictAction +from mmcv.runner import get_dist_info, init_dist +from mmcv.utils import get_git_hash +from mmdet import __version__ +from mmdet.apis import init_random_seed, set_random_seed, train_detector +from mmdet.datasets import build_dataset +from mmdet.models import build_detector +# from mmdet.utils import collect_env, get_root_logger + +from mmdet.utils import (collect_env, get_device, get_root_logger, + replace_cfg_vals, setup_multi_processes, + update_data_root) +from mmcls.gpvit_dev.amp.runner import AmpEpochBasedRunner + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume-from', help='the checkpoint file to resume from') + parser.add_argument( + '--auto-resume', + action='store_true', + help='resume from the latest checkpoint automatically') + parser.add_argument( + '--no-validate', + action='store_true', + help='whether not to evaluate the checkpoint during training') + group_gpus = parser.add_mutually_exclusive_group() + group_gpus.add_argument( + '--gpus', + type=int, + help='(Deprecated, please use --gpu-id) number of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-ids', + type=int, + nargs='+', + help='(Deprecated, please use --gpu-id) ids of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-id', + type=int, + default=0, + help='id of gpu to use ' + '(only applicable to non-distributed training)') + parser.add_argument('--seed', type=int, default=None, help='random seed') + parser.add_argument( + '--diff-seed', + action='store_true', + help='Whether or not set different seeds for different ranks') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file (deprecate), ' + 'change to --cfg-options instead.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument( + '--auto-scale-lr', + action='store_true', + help='enable automatically scaling LR.') + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both ' + 'specified, --options is deprecated in favor of --cfg-options') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + + # replace the ${key} with the value of cfg.key + cfg = replace_cfg_vals(cfg) + + # update data root according to MMDET_DATASETS + update_data_root(cfg) + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + if args.auto_scale_lr: + if 'auto_scale_lr' in cfg and \ + 'enable' in cfg.auto_scale_lr and \ + 'base_batch_size' in cfg.auto_scale_lr: + cfg.auto_scale_lr.enable = True + else: + warnings.warn('Can not find "auto_scale_lr" or ' + '"auto_scale_lr.enable" or ' + '"auto_scale_lr.base_batch_size" in your' + ' configuration file. Please update all the ' + 'configuration files to mmdet >= 2.24.1.') + + # set multi-process settings + setup_multi_processes(cfg) + + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + if args.resume_from is not None: + cfg.resume_from = args.resume_from + cfg.auto_resume = args.auto_resume + if args.gpus is not None: + cfg.gpu_ids = range(1) + warnings.warn('`--gpus` is deprecated because we only support ' + 'single GPU mode in non-distributed training. ' + 'Use `gpus=1` now.') + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids[0:1] + warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' + 'Because we only support single GPU mode in ' + 'non-distributed training. Use the first GPU ' + 'in `gpu_ids` now.') + if args.gpus is None and args.gpu_ids is None: + cfg.gpu_ids = [args.gpu_id] + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + # NOTE: launching with "dist_train.sh" from env only works for torch<1.10 somehow + # for torch>=1.10, the dist launch will hang forever. + # So we explicitly set init args here. + if "NGC_MASTER_ADDR" in os.environ: + init_dict = { + "init_method": f'tcp://{os.environ["NGC_MASTER_ADDR"]}:{os.environ["MASTER_PORT"]}', + "world_size": int(os.environ["WORLD_SIZE"]), + "rank": int(os.environ["RANK"]), + } + # we also enable wandb on the fly here + wandb_cfg = dict( + type='WandbLoggerHook', + with_step=False, + init_kwargs=dict( + project='HRGViT', + name=osp.splitext(osp.basename(args.config))[0], + resume=True, + tags=['det'], + dir=cfg.work_dir, + # config=cfg.self._cfg_dict.to_dict(), + )) + cfg.log_config.hooks.append(mmcv.ConfigDict(wandb_cfg)) + else: + init_dict = {} + warnings.warn(f"override init_dict: {init_dict}") + init_dist(args.launcher, **cfg.dist_params, **init_dict) + # re-set gpu_ids with distributed training mode + _, world_size = get_dist_info() + cfg.gpu_ids = range(world_size) + + # create work_dir + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + # dump config + cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) + # init the logger before other steps + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + log_file = osp.join(cfg.work_dir, f'{timestamp}.log') + logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) + + # init the meta dict to record some important information such as + # environment info and seed, which will be logged + meta = dict() + # log env info + env_info_dict = collect_env() + env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) + dash_line = '-' * 60 + '\n' + logger.info('Environment info:\n' + dash_line + env_info + '\n' + + dash_line) + meta['env_info'] = env_info + meta['config'] = cfg.pretty_text + # log some basic info + logger.info(f'Distributed training: {distributed}') + logger.info(f'Config:\n{cfg.pretty_text}') + + cfg.device = get_device() + # set random seeds + seed = init_random_seed(args.seed, device=cfg.device) + rank, _ = get_dist_info() + seed = seed + rank if args.diff_seed else seed + logger.info(f'Set random seed to {seed}, ' + f'deterministic: {args.deterministic}') + set_random_seed(seed, deterministic=args.deterministic) + cfg.seed = seed + meta['seed'] = seed + meta['exp_name'] = osp.basename(args.config) + + model = build_detector( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + model.init_weights() + + if distributed and hasattr(model.backbone, 'convert_syncbn'): + if model.backbone.convert_syncbn: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + # the converted SyncBNs may be in training mode + if hasattr(model.backbone, 'set_freeze_patch_embed'): + if model.backbone.set_freeze_patch_embed: + model.backbone.set_freeze_patch_embed() + + datasets = [build_dataset(cfg.data.train)] + if len(cfg.workflow) == 2: + val_dataset = copy.deepcopy(cfg.data.val) + val_dataset.pipeline = cfg.data.train.pipeline + datasets.append(build_dataset(val_dataset)) + if cfg.checkpoint_config is not None: + # save mmdet version, config file content and class names in + # checkpoints as meta data + cfg.checkpoint_config.meta = dict( + mmdet_version=__version__ + get_git_hash()[:7], + CLASSES=datasets[0].CLASSES) + # add an attribute for visualization convenience + model.CLASSES = datasets[0].CLASSES + train_detector( + model, + datasets, + cfg, + distributed=distributed, + validate=(not args.no_validate), + timestamp=timestamp, + meta=meta) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/.circleci/config.yml b/downstream/mmsegmentation/.circleci/config.yml new file mode 100644 index 0000000..9456918 --- /dev/null +++ b/downstream/mmsegmentation/.circleci/config.yml @@ -0,0 +1,161 @@ +version: 2.1 + +jobs: + lint: + docker: + - image: cimg/python:3.7.4 + steps: + - checkout + - run: + name: Install dependencies + command: | + sudo apt-add-repository ppa:brightbox/ruby-ng -y + sudo apt-get update + sudo apt-get install -y ruby2.7 + - run: + name: Install pre-commit hook + command: | + pip install pre-commit + pre-commit install + - run: + name: Linting + command: pre-commit run --all-files + - run: + name: Check docstring coverage + command: | + pip install interrogate + interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 50 mmseg + + build_cpu: + parameters: + # The python version must match available image tags in + # https://circleci.com/developer/images/image/cimg/python + python: + type: string + default: "3.7.4" + torch: + type: string + torchvision: + type: string + docker: + - image: cimg/python:<< parameters.python >> + resource_class: large + steps: + - checkout + - run: + name: Install Libraries + command: | + sudo apt-get update + sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5 + - run: + name: Configure Python & pip + command: | + python -m pip install --upgrade pip + python -m pip install wheel + - run: + name: Install PyTorch + command: | + python -V + python -m pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html + - run: + name: Install mmseg dependencies + command: | + python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch<< parameters.torch >>/index.html + python -m pip install mmdet + python -m pip install -r requirements.txt + - run: + name: Build and install + command: | + python -m pip install -e . + - run: + name: Run unittests + command: | + python -m pip install timm + python -m coverage run --branch --source mmseg -m pytest tests/ + python -m coverage xml + python -m coverage report -m + + build_cu101: + machine: + image: ubuntu-1604-cuda-10.1:201909-23 + resource_class: gpu.nvidia.small + steps: + - checkout + - run: + name: Install Libraries + command: | + sudo apt-get update + sudo apt-get install -y git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx + - run: + name: Configure Python & pip + command: | + pyenv global 3.7.0 + python -m pip install --upgrade pip + python -m pip install wheel + - run: + name: Install PyTorch + command: | + python -V + python -m pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html + - run: + name: Install mmseg dependencies + # python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/torch${{matrix.torch_version}}/index.html + command: | + python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/torch1.6.0/index.html + python -m pip install mmdet + python -m pip install -r requirements.txt + - run: + name: Build and install + command: | + python setup.py check -m -s + TORCH_CUDA_ARCH_LIST=7.0 python -m pip install -e . + - run: + name: Run unittests + command: | + python -m pip install timm + python -m pytest tests/ + +workflows: + unit_tests: + jobs: + - lint + - build_cpu: + name: build_cpu_th1.6 + torch: 1.6.0 + torchvision: 0.7.0 + requires: + - lint + - build_cpu: + name: build_cpu_th1.7 + torch: 1.7.0 + torchvision: 0.8.1 + requires: + - lint + - build_cpu: + name: build_cpu_th1.8_py3.9 + torch: 1.8.0 + torchvision: 0.9.0 + python: "3.9.0" + requires: + - lint + - build_cpu: + name: build_cpu_th1.9_py3.8 + torch: 1.9.0 + torchvision: 0.10.0 + python: "3.8.0" + requires: + - lint + - build_cpu: + name: build_cpu_th1.9_py3.9 + torch: 1.9.0 + torchvision: 0.10.0 + python: "3.9.0" + requires: + - lint + - build_cu101: + requires: + - build_cpu_th1.6 + - build_cpu_th1.7 + - build_cpu_th1.8_py3.9 + - build_cpu_th1.9_py3.8 + - build_cpu_th1.9_py3.9 diff --git a/downstream/mmsegmentation/.dev/batch_test_list.py b/downstream/mmsegmentation/.dev/batch_test_list.py new file mode 100644 index 0000000..c4fd8f9 --- /dev/null +++ b/downstream/mmsegmentation/.dev/batch_test_list.py @@ -0,0 +1,133 @@ +# yapf: disable +# Inference Speed is tested on NVIDIA V100 +hrnet = [ + dict( + config='configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py', + checkpoint='fcn_hr18s_512x512_160k_ade20k_20200614_214413-870f65ac.pth', # noqa + eval='mIoU', + metric=dict(mIoU=33.0), + ), + dict( + config='configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py', + checkpoint='fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth', # noqa + eval='mIoU', + metric=dict(mIoU=76.31), + ), + dict( + config='configs/hrnet/fcn_hr48_512x512_160k_ade20k.py', + checkpoint='fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth', + eval='mIoU', + metric=dict(mIoU=42.02), + ), + dict( + config='configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py', + checkpoint='fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth', # noqa + eval='mIoU', + metric=dict(mIoU=80.65), + ), +] +pspnet = [ + dict( + config='configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py', + checkpoint='pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth', # noqa + eval='mIoU', + metric=dict(mIoU=78.55), + ), + dict( + config='configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py', + checkpoint='pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth', # noqa + eval='mIoU', + metric=dict(mIoU=79.76), + ), + dict( + config='configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py', + checkpoint='pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth', # noqa + eval='mIoU', + metric=dict(mIoU=44.39), + ), + dict( + config='configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py', + checkpoint='pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth', # noqa + eval='mIoU', + metric=dict(mIoU=42.48), + ), +] +resnest = [ + dict( + config='configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py', + checkpoint='pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth', # noqa + eval='mIoU', + metric=dict(mIoU=45.44), + ), + dict( + config='configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py', + checkpoint='pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth', # noqa + eval='mIoU', + metric=dict(mIoU=78.57), + ), +] +fastscnn = [ + dict( + config='configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py', + checkpoint='fast_scnn_8x4_160k_lr0.12_cityscapes-0cec9937.pth', + eval='mIoU', + metric=dict(mIoU=70.96), + ) +] +deeplabv3plus = [ + dict( + config='configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py', # noqa + checkpoint='deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405-a7573d20.pth', # noqa + eval='mIoU', + metric=dict(mIoU=80.98), + ), + dict( + config='configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py', # noqa + checkpoint='deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth', # noqa + eval='mIoU', + metric=dict(mIoU=80.97), + ), + dict( + config='configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py', # noqa + checkpoint='deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth', # noqa + eval='mIoU', + metric=dict(mIoU=80.09), + ), + dict( + config='configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py', # noqa + checkpoint='deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth', # noqa + eval='mIoU', + metric=dict(mIoU=79.83), + ), +] +vit = [ + dict( + config='configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py', + checkpoint='upernet_vit-b16_ln_mln_512x512_160k_ade20k-f444c077.pth', + eval='mIoU', + metric=dict(mIoU=47.73), + ), + dict( + config='configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py', + checkpoint='upernet_deit-s16_ln_mln_512x512_160k_ade20k-c0cd652f.pth', + eval='mIoU', + metric=dict(mIoU=43.52), + ), +] +fp16 = [ + dict( + config='configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py', # noqa + checkpoint='deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920-f1104f4b.pth', # noqa + eval='mIoU', + metric=dict(mIoU=80.46), + ) +] +swin = [ + dict( + config='configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py', # noqa + checkpoint='upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', # noqa + eval='mIoU', + metric=dict(mIoU=44.41), + ) +] +# yapf: enable diff --git a/downstream/mmsegmentation/.dev/batch_train_list.txt b/downstream/mmsegmentation/.dev/batch_train_list.txt new file mode 100644 index 0000000..17d1993 --- /dev/null +++ b/downstream/mmsegmentation/.dev/batch_train_list.txt @@ -0,0 +1,19 @@ +configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py +configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py +configs/hrnet/fcn_hr48_512x512_160k_ade20k.py +configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py +configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py +configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py +configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py +configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py +configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py +configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py +configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py +configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py +configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py +configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py +configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py +configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py +configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py +configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py +configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py diff --git a/downstream/mmsegmentation/.dev/benchmark_evaluation.sh b/downstream/mmsegmentation/.dev/benchmark_evaluation.sh new file mode 100755 index 0000000..68dc272 --- /dev/null +++ b/downstream/mmsegmentation/.dev/benchmark_evaluation.sh @@ -0,0 +1,41 @@ +PARTITION=$1 +CHECKPOINT_DIR=$2 + +echo 'configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr18s_512x512_160k_ade20k configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py $CHECKPOINT_DIR/fcn_hr18s_512x512_160k_ade20k_20200614_214413-870f65ac.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr18s_512x512_160k_ade20k --cfg-options dist_params.port=28171 & +echo 'configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr18s_512x1024_160k_cityscapes configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py $CHECKPOINT_DIR/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr18s_512x1024_160k_cityscapes --cfg-options dist_params.port=28172 & +echo 'configs/hrnet/fcn_hr48_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr48_512x512_160k_ade20k configs/hrnet/fcn_hr48_512x512_160k_ade20k.py $CHECKPOINT_DIR/fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr48_512x512_160k_ade20k --cfg-options dist_params.port=28173 & +echo 'configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr48_512x1024_160k_cityscapes configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py $CHECKPOINT_DIR/fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr48_512x1024_160k_cityscapes --cfg-options dist_params.port=28174 & +echo 'configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r50-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r50-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28175 & +echo 'configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r101-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r101-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28176 & +echo 'configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r101-d8_512x512_160k_ade20k configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py $CHECKPOINT_DIR/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r101-d8_512x512_160k_ade20k --cfg-options dist_params.port=28177 & +echo 'configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r50-d8_512x512_160k_ade20k configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py $CHECKPOINT_DIR/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r50-d8_512x512_160k_ade20k --cfg-options dist_params.port=28178 & +echo 'configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_s101-d8_512x512_160k_ade20k configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py $CHECKPOINT_DIR/pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_s101-d8_512x512_160k_ade20k --cfg-options dist_params.port=28179 & +echo 'configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_s101-d8_512x1024_80k_cityscapes configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_s101-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28180 & +echo 'configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fast_scnn_lr0.12_8x4_160k_cityscapes configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py $CHECKPOINT_DIR/fast_scnn_8x4_160k_lr0.12_cityscapes-0cec9937.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fast_scnn_lr0.12_8x4_160k_cityscapes --cfg-options dist_params.port=28181 & +echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r101-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405-a7573d20.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r101-d8_769x769_80k_cityscapes --cfg-options dist_params.port=28182 & +echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r101-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r101-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28183 & +echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r50-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r50-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28184 & +echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r50-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r50-d8_769x769_80k_cityscapes --cfg-options dist_params.port=28185 & +echo 'configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION upernet_vit-b16_ln_mln_512x512_160k_ade20k configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py $CHECKPOINT_DIR/upernet_vit-b16_ln_mln_512x512_160k_ade20k-f444c077.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/upernet_vit-b16_ln_mln_512x512_160k_ade20k --cfg-options dist_params.port=28186 & +echo 'configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION upernet_deit-s16_ln_mln_512x512_160k_ade20k configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py $CHECKPOINT_DIR/upernet_deit-s16_ln_mln_512x512_160k_ade20k-c0cd652f.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/upernet_deit-s16_ln_mln_512x512_160k_ade20k --cfg-options dist_params.port=28187 & +echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes-cc58bc8d.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes --cfg-options dist_params.port=28188 & +echo 'configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py $CHECKPOINT_DIR/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K --cfg-options dist_params.port=28189 & diff --git a/downstream/mmsegmentation/.dev/benchmark_inference.py b/downstream/mmsegmentation/.dev/benchmark_inference.py new file mode 100644 index 0000000..5124811 --- /dev/null +++ b/downstream/mmsegmentation/.dev/benchmark_inference.py @@ -0,0 +1,149 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import hashlib +import logging +import os +import os.path as osp +import warnings +from argparse import ArgumentParser + +import requests +from mmcv import Config + +from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot +from mmseg.utils import get_root_logger + +# ignore warnings when segmentors inference +warnings.filterwarnings('ignore') + + +def download_checkpoint(checkpoint_name, model_name, config_name, collect_dir): + """Download checkpoint and check if hash code is true.""" + url = f'https://download.openmmlab.com/mmsegmentation/v0.5/{model_name}/{config_name}/{checkpoint_name}' # noqa + + r = requests.get(url) + assert r.status_code != 403, f'{url} Access denied.' + + with open(osp.join(collect_dir, checkpoint_name), 'wb') as code: + code.write(r.content) + + true_hash_code = osp.splitext(checkpoint_name)[0].split('-')[1] + + # check hash code + with open(osp.join(collect_dir, checkpoint_name), 'rb') as fp: + sha256_cal = hashlib.sha256() + sha256_cal.update(fp.read()) + cur_hash_code = sha256_cal.hexdigest()[:8] + + assert true_hash_code == cur_hash_code, f'{url} download failed, ' + 'incomplete downloaded file or url invalid.' + + if cur_hash_code != true_hash_code: + os.remove(osp.join(collect_dir, checkpoint_name)) + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint_root', help='Checkpoint file root path') + parser.add_argument( + '-i', '--img', default='demo/demo.png', help='Image file') + parser.add_argument('-a', '--aug', action='store_true', help='aug test') + parser.add_argument('-m', '--model-name', help='model name to inference') + parser.add_argument( + '-s', '--show', action='store_true', help='show results') + parser.add_argument( + '-d', '--device', default='cuda:0', help='Device used for inference') + args = parser.parse_args() + return args + + +def inference_model(config_name, checkpoint, args, logger=None): + cfg = Config.fromfile(config_name) + if args.aug: + if 'flip' in cfg.data.test.pipeline[ + 1] and 'img_scale' in cfg.data.test.pipeline[1]: + cfg.data.test.pipeline[1].img_ratios = [ + 0.5, 0.75, 1.0, 1.25, 1.5, 1.75 + ] + cfg.data.test.pipeline[1].flip = True + else: + if logger is not None: + logger.error(f'{config_name}: unable to start aug test') + else: + print(f'{config_name}: unable to start aug test', flush=True) + + model = init_segmentor(cfg, checkpoint, device=args.device) + # test a single image + result = inference_segmentor(model, args.img) + + # show the results + if args.show: + show_result_pyplot(model, args.img, result) + return result + + +# Sample test whether the inference code is correct +def main(args): + config = Config.fromfile(args.config) + + if not os.path.exists(args.checkpoint_root): + os.makedirs(args.checkpoint_root, 0o775) + + # test single model + if args.model_name: + if args.model_name in config: + model_infos = config[args.model_name] + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + config_name = model_info['config'].strip() + print(f'processing: {config_name}', flush=True) + checkpoint = osp.join(args.checkpoint_root, + model_info['checkpoint'].strip()) + try: + # build the model from a config file and a checkpoint file + inference_model(config_name, checkpoint, args) + except Exception: + print(f'{config_name} test failed!') + continue + return + else: + raise RuntimeError('model name input error.') + + # test all model + logger = get_root_logger( + log_file='benchmark_inference_image.log', log_level=logging.ERROR) + + for model_name in config: + model_infos = config[model_name] + + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + print('processing: ', model_info['config'], flush=True) + config_path = model_info['config'].strip() + config_name = osp.splitext(osp.basename(config_path))[0] + checkpoint_name = model_info['checkpoint'].strip() + checkpoint = osp.join(args.checkpoint_root, checkpoint_name) + + # ensure checkpoint exists + try: + if not osp.exists(checkpoint): + download_checkpoint(checkpoint_name, model_name, + config_name.rstrip('.py'), + args.checkpoint_root) + except Exception: + logger.error(f'{checkpoint_name} download error') + continue + + # test model inference with checkpoint + try: + # build the model from a config file and a checkpoint file + inference_model(config_path, checkpoint, args, logger) + except Exception as e: + logger.error(f'{config_path} " : {repr(e)}') + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/downstream/mmsegmentation/.dev/benchmark_train.sh b/downstream/mmsegmentation/.dev/benchmark_train.sh new file mode 100755 index 0000000..cde47a0 --- /dev/null +++ b/downstream/mmsegmentation/.dev/benchmark_train.sh @@ -0,0 +1,40 @@ +PARTITION=$1 + +echo 'configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr18s_512x512_160k_ade20k configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24727 --work-dir work_dirs/hrnet/fcn_hr18s_512x512_160k_ade20k >/dev/null & +echo 'configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr18s_512x1024_160k_cityscapes configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24728 --work-dir work_dirs/hrnet/fcn_hr18s_512x1024_160k_cityscapes >/dev/null & +echo 'configs/hrnet/fcn_hr48_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr48_512x512_160k_ade20k configs/hrnet/fcn_hr48_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24729 --work-dir work_dirs/hrnet/fcn_hr48_512x512_160k_ade20k >/dev/null & +echo 'configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr48_512x1024_160k_cityscapes configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24730 --work-dir work_dirs/hrnet/fcn_hr48_512x1024_160k_cityscapes >/dev/null & +echo 'configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r50-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24731 --work-dir work_dirs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes >/dev/null & +echo 'configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r101-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24732 --work-dir work_dirs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes >/dev/null & +echo 'configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r101-d8_512x512_160k_ade20k configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24733 --work-dir work_dirs/pspnet/pspnet_r101-d8_512x512_160k_ade20k >/dev/null & +echo 'configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r50-d8_512x512_160k_ade20k configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24734 --work-dir work_dirs/pspnet/pspnet_r50-d8_512x512_160k_ade20k >/dev/null & +echo 'configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_s101-d8_512x512_160k_ade20k configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24735 --work-dir work_dirs/resnest/pspnet_s101-d8_512x512_160k_ade20k >/dev/null & +echo 'configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_s101-d8_512x1024_80k_cityscapes configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24736 --work-dir work_dirs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes >/dev/null & +echo 'configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fast_scnn_lr0.12_8x4_160k_cityscapes configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24737 --work-dir work_dirs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes >/dev/null & +echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r101-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24738 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes >/dev/null & +echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r101-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24739 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes >/dev/null & +echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r50-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24740 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes >/dev/null & +echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r50-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24741 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes >/dev/null & +echo 'configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION upernet_vit-b16_ln_mln_512x512_160k_ade20k configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24742 --work-dir work_dirs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k >/dev/null & +echo 'configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION upernet_deit-s16_ln_mln_512x512_160k_ade20k configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24743 --work-dir work_dirs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k >/dev/null & +echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py' & +GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24744 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes >/dev/null & +echo 'configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py' & +GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24745 --work-dir work_dirs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K >/dev/null & diff --git a/downstream/mmsegmentation/.dev/check_urls.py b/downstream/mmsegmentation/.dev/check_urls.py new file mode 100644 index 0000000..42b6474 --- /dev/null +++ b/downstream/mmsegmentation/.dev/check_urls.py @@ -0,0 +1,101 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import os +from argparse import ArgumentParser + +import requests +import yaml as yml + +from mmseg.utils import get_root_logger + + +def check_url(url): + """Check url response status. + + Args: + url (str): url needed to check. + + Returns: + int, bool: status code and check flag. + """ + flag = True + r = requests.head(url) + status_code = r.status_code + if status_code == 403 or status_code == 404: + flag = False + + return status_code, flag + + +def parse_args(): + parser = ArgumentParser('url valid check.') + parser.add_argument( + '-m', + '--model-name', + type=str, + help='Select the model needed to check') + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + model_name = args.model_name + + # yml path generate. + # If model_name is not set, script will check all of the models. + if model_name is not None: + yml_list = [(model_name, f'configs/{model_name}/{model_name}.yml')] + else: + # check all + yml_list = [(x, f'configs/{x}/{x}.yml') for x in os.listdir('configs/') + if x != '_base_'] + + logger = get_root_logger(log_file='url_check.log', log_level=logging.ERROR) + + for model_name, yml_path in yml_list: + # Default yaml loader unsafe. + model_infos = yml.load( + open(yml_path, 'r'), Loader=yml.CLoader)['Models'] + for model_info in model_infos: + config_name = model_info['Name'] + checkpoint_url = model_info['Weights'] + # checkpoint url check + status_code, flag = check_url(checkpoint_url) + if flag: + logger.info(f'checkpoint | {config_name} | {checkpoint_url} | ' + f'{status_code} valid') + else: + logger.error( + f'checkpoint | {config_name} | {checkpoint_url} | ' + f'{status_code} | error') + # log_json check + checkpoint_name = checkpoint_url.split('/')[-1] + model_time = '-'.join(checkpoint_name.split('-')[:-1]).replace( + f'{config_name}_', '') + # two style of log_json name + # use '_' to link model_time (will be deprecated) + log_json_url_1 = f'https://download.openmmlab.com/mmsegmentation/v0.5/{model_name}/{config_name}/{config_name}_{model_time}.log.json' # noqa + status_code_1, flag_1 = check_url(log_json_url_1) + # use '-' to link model_time + log_json_url_2 = f'https://download.openmmlab.com/mmsegmentation/v0.5/{model_name}/{config_name}/{config_name}-{model_time}.log.json' # noqa + status_code_2, flag_2 = check_url(log_json_url_2) + if flag_1 or flag_2: + if flag_1: + logger.info( + f'log.json | {config_name} | {log_json_url_1} | ' + f'{status_code_1} | valid') + else: + logger.info( + f'log.json | {config_name} | {log_json_url_2} | ' + f'{status_code_2} | valid') + else: + logger.error( + f'log.json | {config_name} | {log_json_url_1} & ' + f'{log_json_url_2} | {status_code_1} & {status_code_2} | ' + 'error') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/.dev/gather_benchmark_evaluation_results.py b/downstream/mmsegmentation/.dev/gather_benchmark_evaluation_results.py new file mode 100644 index 0000000..47b557a --- /dev/null +++ b/downstream/mmsegmentation/.dev/gather_benchmark_evaluation_results.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import os.path as osp + +import mmcv +from mmcv import Config + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Gather benchmarked model evaluation results') + parser.add_argument('config', help='test config file path') + parser.add_argument( + 'root', + type=str, + help='root path of benchmarked models to be gathered') + parser.add_argument( + '--out', + type=str, + default='benchmark_evaluation_info.json', + help='output path of gathered metrics and compared ' + 'results to be stored') + + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + root_path = args.root + metrics_out = args.out + result_dict = {} + + cfg = Config.fromfile(args.config) + + for model_key in cfg: + model_infos = cfg[model_key] + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + previous_metrics = model_info['metric'] + config = model_info['config'].strip() + fname, _ = osp.splitext(osp.basename(config)) + + # Load benchmark evaluation json + metric_json_dir = osp.join(root_path, fname) + if not osp.exists(metric_json_dir): + print(f'{metric_json_dir} not existed.') + continue + + json_list = glob.glob(osp.join(metric_json_dir, '*.json')) + if len(json_list) == 0: + print(f'There is no eval json in {metric_json_dir}.') + continue + + log_json_path = list(sorted(json_list))[-1] + metric = mmcv.load(log_json_path) + if config not in metric.get('config', {}): + print(f'{config} not included in {log_json_path}') + continue + + # Compare between new benchmark results and previous metrics + differential_results = dict() + new_metrics = dict() + for record_metric_key in previous_metrics: + if record_metric_key not in metric['metric']: + raise KeyError('record_metric_key not exist, please ' + 'check your config') + old_metric = previous_metrics[record_metric_key] + new_metric = round(metric['metric'][record_metric_key] * 100, + 2) + + differential = new_metric - old_metric + flag = '+' if differential > 0 else '-' + differential_results[ + record_metric_key] = f'{flag}{abs(differential):.2f}' + new_metrics[record_metric_key] = new_metric + + result_dict[config] = dict( + differential=differential_results, + previous=previous_metrics, + new=new_metrics) + + if metrics_out: + mmcv.dump(result_dict, metrics_out, indent=4) + print('===================================') + for config_name, metrics in result_dict.items(): + print(config_name, metrics) + print('===================================') diff --git a/downstream/mmsegmentation/.dev/gather_benchmark_train_results.py b/downstream/mmsegmentation/.dev/gather_benchmark_train_results.py new file mode 100644 index 0000000..8aff2c4 --- /dev/null +++ b/downstream/mmsegmentation/.dev/gather_benchmark_train_results.py @@ -0,0 +1,100 @@ +import argparse +import glob +import os.path as osp + +import mmcv +from gather_models import get_final_results +from mmcv import Config + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Gather benchmarked models train results') + parser.add_argument('config', help='test config file path') + parser.add_argument( + 'root', + type=str, + help='root path of benchmarked models to be gathered') + parser.add_argument( + '--out', + type=str, + default='benchmark_train_info.json', + help='output path of gathered metrics to be stored') + + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + root_path = args.root + metrics_out = args.out + + evaluation_cfg = Config.fromfile(args.config) + + result_dict = {} + for model_key in evaluation_cfg: + model_infos = evaluation_cfg[model_key] + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + config = model_info['config'] + + # benchmark train dir + model_name = osp.split(osp.dirname(config))[1] + config_name = osp.splitext(osp.basename(config))[0] + exp_dir = osp.join(root_path, model_name, config_name) + if not osp.exists(exp_dir): + print(f'{config} hasn\'t {exp_dir}') + continue + + # parse config + cfg = mmcv.Config.fromfile(config) + total_iters = cfg.runner.max_iters + exp_metric = cfg.evaluation.metric + if not isinstance(exp_metric, list): + exp_metrics = [exp_metric] + + # determine whether total_iters ckpt exists + ckpt_path = f'iter_{total_iters}.pth' + if not osp.exists(osp.join(exp_dir, ckpt_path)): + print(f'{config} hasn\'t {ckpt_path}') + continue + + # only the last log json counts + log_json_path = list( + sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1] + + # extract metric value + model_performance = get_final_results(log_json_path, total_iters) + if model_performance is None: + print(f'log file error: {log_json_path}') + continue + + differential_results = dict() + old_results = dict() + new_results = dict() + for metric_key in model_performance: + if metric_key in ['mIoU']: + metric = round(model_performance[metric_key] * 100, 2) + old_metric = model_info['metric'][metric_key] + old_results[metric_key] = old_metric + new_results[metric_key] = metric + differential = metric - old_metric + flag = '+' if differential > 0 else '-' + differential_results[ + metric_key] = f'{flag}{abs(differential):.2f}' + result_dict[config] = dict( + differential_results=differential_results, + old_results=old_results, + new_results=new_results, + ) + + # 4 save or print results + if metrics_out: + mmcv.dump(result_dict, metrics_out, indent=4) + print('===================================') + for config_name, metrics in result_dict.items(): + print(config_name, metrics) + print('===================================') diff --git a/downstream/mmsegmentation/.dev/gather_models.py b/downstream/mmsegmentation/.dev/gather_models.py new file mode 100644 index 0000000..3eedf61 --- /dev/null +++ b/downstream/mmsegmentation/.dev/gather_models.py @@ -0,0 +1,211 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import hashlib +import json +import os +import os.path as osp +import shutil + +import mmcv +import torch + +# build schedule look-up table to automatically find the final model +RESULTS_LUT = ['mIoU', 'mAcc', 'aAcc'] + + +def calculate_file_sha256(file_path): + """calculate file sha256 hash code.""" + with open(file_path, 'rb') as fp: + sha256_cal = hashlib.sha256() + sha256_cal.update(fp.read()) + return sha256_cal.hexdigest() + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + torch.save(checkpoint, out_file) + # The hash code calculation and rename command differ on different system + # platform. + sha = calculate_file_sha256(out_file) + final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) + os.rename(out_file, final_file) + + # Remove prefix and suffix + final_file_name = osp.split(final_file)[1] + final_file_name = osp.splitext(final_file_name)[0] + + return final_file_name + + +def get_final_iter(config): + iter_num = config.split('_')[-2] + assert iter_num.endswith('k') + return int(iter_num[:-1]) * 1000 + + +def get_final_results(log_json_path, iter_num): + result_dict = dict() + last_iter = 0 + with open(log_json_path, 'r') as f: + for line in f.readlines(): + log_line = json.loads(line) + if 'mode' not in log_line.keys(): + continue + + # When evaluation, the 'iter' of new log json is the evaluation + # steps on single gpu. + flag1 = ('aAcc' in log_line) or (log_line['mode'] == 'val') + flag2 = (last_iter == iter_num - 50) or (last_iter == iter_num) + if flag1 and flag2: + result_dict.update({ + key: log_line[key] + for key in RESULTS_LUT if key in log_line + }) + return result_dict + + last_iter = log_line['iter'] + + +def parse_args(): + parser = argparse.ArgumentParser(description='Gather benchmarked models') + parser.add_argument( + '-f', '--config-name', type=str, help='Process the selected config.') + parser.add_argument( + '-w', + '--work-dir', + default='work_dirs/', + type=str, + help='Ckpt storage root folder of benchmarked models to be gathered.') + parser.add_argument( + '-c', + '--collect-dir', + default='work_dirs/gather', + type=str, + help='Ckpt collect root folder of gathered models.') + parser.add_argument( + '--all', action='store_true', help='whether include .py and .log') + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + work_dir = args.work_dir + collect_dir = args.collect_dir + selected_config_name = args.config_name + mmcv.mkdir_or_exist(collect_dir) + + # find all models in the root directory to be gathered + raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True)) + + # filter configs that is not trained in the experiments dir + used_configs = [] + for raw_config in raw_configs: + config_name = osp.splitext(osp.basename(raw_config))[0] + if osp.exists(osp.join(work_dir, config_name)): + if (selected_config_name is None + or selected_config_name == config_name): + used_configs.append(raw_config) + print(f'Find {len(used_configs)} models to be gathered') + + # find final_ckpt and log file for trained each config + # and parse the best performance + model_infos = [] + for used_config in used_configs: + config_name = osp.splitext(osp.basename(used_config))[0] + exp_dir = osp.join(work_dir, config_name) + # check whether the exps is finished + final_iter = get_final_iter(used_config) + final_model = 'iter_{}.pth'.format(final_iter) + model_path = osp.join(exp_dir, final_model) + + # skip if the model is still training + if not osp.exists(model_path): + print(f'{used_config} train not finished yet') + continue + + # get logs + log_json_paths = glob.glob(osp.join(exp_dir, '*.log.json')) + log_json_path = log_json_paths[0] + model_performance = None + for idx, _log_json_path in enumerate(log_json_paths): + model_performance = get_final_results(_log_json_path, final_iter) + if model_performance is not None: + log_json_path = _log_json_path + break + + if model_performance is None: + print(f'{used_config} model_performance is None') + continue + + model_time = osp.split(log_json_path)[-1].split('.')[0] + model_infos.append( + dict( + config_name=config_name, + results=model_performance, + iters=final_iter, + model_time=model_time, + log_json_path=osp.split(log_json_path)[-1])) + + # publish model for each checkpoint + publish_model_infos = [] + for model in model_infos: + config_name = model['config_name'] + model_publish_dir = osp.join(collect_dir, config_name) + + publish_model_path = osp.join(model_publish_dir, + config_name + '_' + model['model_time']) + trained_model_path = osp.join(work_dir, config_name, + 'iter_{}.pth'.format(model['iters'])) + if osp.exists(model_publish_dir): + for file in os.listdir(model_publish_dir): + if file.endswith('.pth'): + print(f'model {file} found') + model['model_path'] = osp.abspath( + osp.join(model_publish_dir, file)) + break + if 'model_path' not in model: + print(f'dir {model_publish_dir} exists, no model found') + + else: + mmcv.mkdir_or_exist(model_publish_dir) + + # convert model + final_model_path = process_checkpoint(trained_model_path, + publish_model_path) + model['model_path'] = final_model_path + + new_json_path = f'{config_name}_{model["log_json_path"]}' + # copy log + shutil.copy( + osp.join(work_dir, config_name, model['log_json_path']), + osp.join(model_publish_dir, new_json_path)) + + if args.all: + new_txt_path = new_json_path.rstrip('.json') + shutil.copy( + osp.join(work_dir, config_name, + model['log_json_path'].rstrip('.json')), + osp.join(model_publish_dir, new_txt_path)) + + if args.all: + # copy config to guarantee reproducibility + raw_config = osp.join('./configs', f'{config_name}.py') + mmcv.Config.fromfile(raw_config).dump( + osp.join(model_publish_dir, osp.basename(raw_config))) + + publish_model_infos.append(model) + + models = dict(models=publish_model_infos) + mmcv.dump(models, osp.join(collect_dir, 'model_infos.json'), indent=4) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/.dev/generate_benchmark_evaluation_script.py b/downstream/mmsegmentation/.dev/generate_benchmark_evaluation_script.py new file mode 100644 index 0000000..d86e94b --- /dev/null +++ b/downstream/mmsegmentation/.dev/generate_benchmark_evaluation_script.py @@ -0,0 +1,114 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +from mmcv import Config + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert benchmark test model list to script') + parser.add_argument('config', help='test config file path') + parser.add_argument('--port', type=int, default=28171, help='dist port') + parser.add_argument( + '--work-dir', + default='work_dirs/benchmark_evaluation', + help='the dir to save metric') + parser.add_argument( + '--out', + type=str, + default='.dev/benchmark_evaluation.sh', + help='path to save model benchmark script') + + args = parser.parse_args() + return args + + +def process_model_info(model_info, work_dir): + config = model_info['config'].strip() + fname, _ = osp.splitext(osp.basename(config)) + job_name = fname + checkpoint = model_info['checkpoint'].strip() + work_dir = osp.join(work_dir, fname) + if not isinstance(model_info['eval'], list): + evals = [model_info['eval']] + else: + evals = model_info['eval'] + eval = ' '.join(evals) + return dict( + config=config, + job_name=job_name, + checkpoint=checkpoint, + work_dir=work_dir, + eval=eval) + + +def create_test_bash_info(commands, model_test_dict, port, script_name, + partition): + config = model_test_dict['config'] + job_name = model_test_dict['job_name'] + checkpoint = model_test_dict['checkpoint'] + work_dir = model_test_dict['work_dir'] + eval = model_test_dict['eval'] + + echo_info = f'\necho \'{config}\' &' + commands.append(echo_info) + commands.append('\n') + + command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \ + f'CPUS_PER_TASK=2 {script_name} ' + + command_info += f'{partition} ' + command_info += f'{job_name} ' + command_info += f'{config} ' + command_info += f'$CHECKPOINT_DIR/{checkpoint} ' + + command_info += f'--eval {eval} ' + command_info += f'--work-dir {work_dir} ' + command_info += f'--cfg-options dist_params.port={port} ' + command_info += '&' + + commands.append(command_info) + + +def main(): + args = parse_args() + if args.out: + out_suffix = args.out.split('.')[-1] + assert args.out.endswith('.sh'), \ + f'Expected out file path suffix is .sh, but get .{out_suffix}' + + commands = [] + partition_name = 'PARTITION=$1' + commands.append(partition_name) + commands.append('\n') + + checkpoint_root = 'CHECKPOINT_DIR=$2' + commands.append(checkpoint_root) + commands.append('\n') + + script_name = osp.join('tools', 'slurm_test.sh') + port = args.port + work_dir = args.work_dir + + cfg = Config.fromfile(args.config) + + for model_key in cfg: + model_infos = cfg[model_key] + if not isinstance(model_infos, list): + model_infos = [model_infos] + for model_info in model_infos: + print('processing: ', model_info['config']) + model_test_dict = process_model_info(model_info, work_dir) + create_test_bash_info(commands, model_test_dict, port, script_name, + '$PARTITION') + port += 1 + + command_str = ''.join(commands) + if args.out: + with open(args.out, 'w') as f: + f.write(command_str + '\n') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/.dev/generate_benchmark_train_script.py b/downstream/mmsegmentation/.dev/generate_benchmark_train_script.py new file mode 100644 index 0000000..6e8a0ae --- /dev/null +++ b/downstream/mmsegmentation/.dev/generate_benchmark_train_script.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +# Default using 4 gpu when training +config_8gpu_list = [ + 'configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py', # noqa + 'configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py', + 'configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py', +] + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert benchmark model json to script') + parser.add_argument( + 'txt_path', type=str, help='txt path output by benchmark_filter') + parser.add_argument('--port', type=int, default=24727, help='dist port') + parser.add_argument( + '--out', + type=str, + default='.dev/benchmark_train.sh', + help='path to save model benchmark script') + + args = parser.parse_args() + return args + + +def create_train_bash_info(commands, config, script_name, partition, port): + cfg = config.strip() + + # print cfg name + echo_info = f'echo \'{cfg}\' &' + commands.append(echo_info) + commands.append('\n') + + _, model_name = osp.split(osp.dirname(cfg)) + config_name, _ = osp.splitext(osp.basename(cfg)) + # default setting + if cfg in config_8gpu_list: + command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \ + f'CPUS_PER_TASK=2 {script_name} ' + else: + command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \ + f'CPUS_PER_TASK=2 {script_name} ' + command_info += f'{partition} ' + command_info += f'{config_name} ' + command_info += f'{cfg} ' + command_info += f'--cfg-options ' \ + f'checkpoint_config.max_keep_ckpts=1 ' \ + f'dist_params.port={port} ' + command_info += f'--work-dir work_dirs/{model_name}/{config_name} ' + # Let the script shut up + command_info += '>/dev/null &' + + commands.append(command_info) + commands.append('\n') + + +def main(): + args = parse_args() + if args.out: + out_suffix = args.out.split('.')[-1] + assert args.out.endswith('.sh'), \ + f'Expected out file path suffix is .sh, but get .{out_suffix}' + + root_name = './tools' + script_name = osp.join(root_name, 'slurm_train.sh') + port = args.port + partition_name = 'PARTITION=$1' + + commands = [] + commands.append(partition_name) + commands.append('\n') + commands.append('\n') + + with open(args.txt_path, 'r') as f: + model_cfgs = f.readlines() + for i, cfg in enumerate(model_cfgs): + create_train_bash_info(commands, cfg, script_name, '$PARTITION', + port) + port += 1 + + command_str = ''.join(commands) + if args.out: + with open(args.out, 'w') as f: + f.write(command_str) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/.dev/log_collector/example_config.py b/downstream/mmsegmentation/.dev/log_collector/example_config.py new file mode 100644 index 0000000..bc2b4d6 --- /dev/null +++ b/downstream/mmsegmentation/.dev/log_collector/example_config.py @@ -0,0 +1,18 @@ +work_dir = '../../work_dirs' +metric = 'mIoU' + +# specify the log files we would like to collect in `log_items` +log_items = [ + 'segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup', + 'segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr', + 'segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr', + 'segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr' +] +# or specify ignore_keywords, then the folders whose name contain +# `'segformer'` won't be collected +# ignore_keywords = ['segformer'] + +# should not include metric +other_info_keys = ['mAcc'] +markdown_file = 'markdowns/lr_in_trans.json.md' +json_file = 'jsons/trans_in_cnn.json' diff --git a/downstream/mmsegmentation/.dev/log_collector/log_collector.py b/downstream/mmsegmentation/.dev/log_collector/log_collector.py new file mode 100644 index 0000000..d0f4080 --- /dev/null +++ b/downstream/mmsegmentation/.dev/log_collector/log_collector.py @@ -0,0 +1,143 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import datetime +import json +import os +import os.path as osp +from collections import OrderedDict + +from utils import load_config + +# automatically collect all the results + +# The structure of the directory: +# ├── work-dir +# │ ├── config_1 +# │ │ ├── time1.log.json +# │ │ ├── time2.log.json +# │ │ ├── time3.log.json +# │ │ ├── time4.log.json +# │ ├── config_2 +# │ │ ├── time5.log.json +# │ │ ├── time6.log.json +# │ │ ├── time7.log.json +# │ │ ├── time8.log.json + + +def parse_args(): + parser = argparse.ArgumentParser(description='extract info from log.json') + parser.add_argument('config_dir') + args = parser.parse_args() + return args + + +def has_keyword(name: str, keywords: list): + for a_keyword in keywords: + if a_keyword in name: + return True + return False + + +def main(): + args = parse_args() + cfg = load_config(args.config_dir) + work_dir = cfg['work_dir'] + metric = cfg['metric'] + log_items = cfg.get('log_items', []) + ignore_keywords = cfg.get('ignore_keywords', []) + other_info_keys = cfg.get('other_info_keys', []) + markdown_file = cfg.get('markdown_file', None) + json_file = cfg.get('json_file', None) + + if json_file and osp.split(json_file)[0] != '': + os.makedirs(osp.split(json_file)[0], exist_ok=True) + if markdown_file and osp.split(markdown_file)[0] != '': + os.makedirs(osp.split(markdown_file)[0], exist_ok=True) + + assert not (log_items and ignore_keywords), \ + 'log_items and ignore_keywords cannot be specified at the same time' + assert metric not in other_info_keys, \ + 'other_info_keys should not contain metric' + + if ignore_keywords and isinstance(ignore_keywords, str): + ignore_keywords = [ignore_keywords] + if other_info_keys and isinstance(other_info_keys, str): + other_info_keys = [other_info_keys] + if log_items and isinstance(log_items, str): + log_items = [log_items] + + if not log_items: + log_items = [ + item for item in sorted(os.listdir(work_dir)) + if not has_keyword(item, ignore_keywords) + ] + + experiment_info_list = [] + for config_dir in log_items: + preceding_path = os.path.join(work_dir, config_dir) + log_list = [ + item for item in os.listdir(preceding_path) + if item.endswith('.log.json') + ] + log_list = sorted( + log_list, + key=lambda time_str: datetime.datetime.strptime( + time_str, '%Y%m%d_%H%M%S.log.json')) + val_list = [] + last_iter = 0 + for log_name in log_list: + with open(os.path.join(preceding_path, log_name), 'r') as f: + # ignore the info line + f.readline() + all_lines = f.readlines() + val_list.extend([ + json.loads(line) for line in all_lines + if json.loads(line)['mode'] == 'val' + ]) + for index in range(len(all_lines) - 1, -1, -1): + line_dict = json.loads(all_lines[index]) + if line_dict['mode'] == 'train': + last_iter = max(last_iter, line_dict['iter']) + break + + new_log_dict = dict( + method=config_dir, metric_used=metric, last_iter=last_iter) + for index, log in enumerate(val_list, 1): + new_ordered_dict = OrderedDict() + new_ordered_dict['eval_index'] = index + new_ordered_dict[metric] = log[metric] + for key in other_info_keys: + if key in log: + new_ordered_dict[key] = log[key] + val_list[index - 1] = new_ordered_dict + + assert len(val_list) >= 1, \ + f"work dir {config_dir} doesn't contain any evaluation." + new_log_dict['last eval'] = val_list[-1] + new_log_dict['best eval'] = max(val_list, key=lambda x: x[metric]) + experiment_info_list.append(new_log_dict) + print(f'{config_dir} is processed') + + if json_file: + with open(json_file, 'w') as f: + json.dump(experiment_info_list, f, indent=4) + + if markdown_file: + lines_to_write = [] + for index, log in enumerate(experiment_info_list, 1): + lines_to_write.append( + f"|{index}|{log['method']}|{log['best eval'][metric]}" + f"|{log['best eval']['eval_index']}|" + f"{log['last eval'][metric]}|" + f"{log['last eval']['eval_index']}|{log['last_iter']}|\n") + with open(markdown_file, 'w') as f: + f.write(f'|exp_num|method|{metric} best|best index|' + f'{metric} last|last index|last iter num|\n') + f.write('|:---:|:---:|:---:|:---:|:---:|:---:|:---:|\n') + f.writelines(lines_to_write) + + print('processed successfully') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/.dev/log_collector/readme.md b/downstream/mmsegmentation/.dev/log_collector/readme.md new file mode 100644 index 0000000..41ea235 --- /dev/null +++ b/downstream/mmsegmentation/.dev/log_collector/readme.md @@ -0,0 +1,143 @@ +# Log Collector + +## Function + +Automatically collect logs and write the result in a json file or markdown file. + +If there are several `.log.json` files in one folder, Log Collector assumes that the `.log.json` files other than the first one are resume from the preceding `.log.json` file. Log Collector returns the result considering all `.log.json` files. + +## Usage: + +To use log collector, you need to write a config file to configure the log collector first. + +For example: + +example_config.py: + +```python +# The work directory that contains folders that contains .log.json files. +work_dir = '../../work_dirs' +# The metric used to find the best evaluation. +metric = 'mIoU' + +# **Don't specify the log_items and ignore_keywords at the same time.** +# Specify the log files we would like to collect in `log_items`. +# The folders specified should be the subdirectories of `work_dir`. +log_items = [ + 'segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup', + 'segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr', + 'segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr', + 'segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr' +] +# Or specify `ignore_keywords`. The folders whose name contain one +# of the keywords in the `ignore_keywords` list(e.g., `'segformer'`) +# won't be collected. +# ignore_keywords = ['segformer'] + +# Other log items in .log.json that you want to collect. +# should not include metric. +other_info_keys = ["mAcc"] +# The output markdown file's name. +markdown_file ='markdowns/lr_in_trans.json.md' +# The output json file's name. (optional) +json_file = 'jsons/trans_in_cnn.json' +``` + + The structure of the work-dir directory should be like: + +```text +├── work-dir +│ ├── folder1 +│ │ ├── time1.log.json +│ │ ├── time2.log.json +│ │ ├── time3.log.json +│ │ ├── time4.log.json +│ ├── folder2 +│ │ ├── time5.log.json +│ │ ├── time6.log.json +│ │ ├── time7.log.json +│ │ ├── time8.log.json +``` + +Then , cd to the log collector folder. + +Now you can run log_collector.py by using command: + +```bash +python log_collector.py ./example_config.py +``` + +The output markdown file is like: + +|exp_num|method|mIoU best|best index|mIoU last|last index|last iter num| +|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +|1|segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup|0.2776|10|0.2776|10|160000| +|2|segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr|0.2802|10|0.2802|10|160000| +|3|segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr|0.4943|11|0.4943|11|160000| +|4|segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr|0.4883|11|0.4883|11|160000| + +The output json file is like: +```json +[ + { + "method": "segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup", + "metric_used": "mIoU", + "last_iter": 160000, + "last eval": { + "eval_index": 10, + "mIoU": 0.2776, + "mAcc": 0.3779 + }, + "best eval": { + "eval_index": 10, + "mIoU": 0.2776, + "mAcc": 0.3779 + } + }, + { + "method": "segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr", + "metric_used": "mIoU", + "last_iter": 160000, + "last eval": { + "eval_index": 10, + "mIoU": 0.2802, + "mAcc": 0.3764 + }, + "best eval": { + "eval_index": 10, + "mIoU": 0.2802, + "mAcc": 0.3764 + } + }, + { + "method": "segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr", + "metric_used": "mIoU", + "last_iter": 160000, + "last eval": { + "eval_index": 11, + "mIoU": 0.4943, + "mAcc": 0.6097 + }, + "best eval": { + "eval_index": 11, + "mIoU": 0.4943, + "mAcc": 0.6097 + } + }, + { + "method": "segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr", + "metric_used": "mIoU", + "last_iter": 160000, + "last eval": { + "eval_index": 11, + "mIoU": 0.4883, + "mAcc": 0.6061 + }, + "best eval": { + "eval_index": 11, + "mIoU": 0.4883, + "mAcc": 0.6061 + } + } +] +``` diff --git a/downstream/mmsegmentation/.dev/log_collector/utils.py b/downstream/mmsegmentation/.dev/log_collector/utils.py new file mode 100644 index 0000000..848516a --- /dev/null +++ b/downstream/mmsegmentation/.dev/log_collector/utils.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# modified from https://github.dev/open-mmlab/mmcv +import os.path as osp +import sys +from importlib import import_module + + +def load_config(cfg_dir: str) -> dict: + assert cfg_dir.endswith('.py') + root_path, file_name = osp.split(cfg_dir) + temp_module = osp.splitext(file_name)[0] + sys.path.insert(0, root_path) + mod = import_module(temp_module) + sys.path.pop(0) + cfg_dict = { + k: v + for k, v in mod.__dict__.items() if not k.startswith('__') + } + del sys.modules[temp_module] + return cfg_dict diff --git a/downstream/mmsegmentation/.dev/md2yml.py b/downstream/mmsegmentation/.dev/md2yml.py new file mode 100755 index 0000000..0fa41dc --- /dev/null +++ b/downstream/mmsegmentation/.dev/md2yml.py @@ -0,0 +1,291 @@ +#!/usr/bin/env python + +# Copyright (c) OpenMMLab. All rights reserved. +# This tool is used to update model-index.yml which is required by MIM, and +# will be automatically called as a pre-commit hook. The updating will be +# triggered if any change of model information (.md files in configs/) has been +# detected before a commit. + +import glob +import os +import os.path as osp +import re +import sys + +import mmcv +from lxml import etree + +MMSEG_ROOT = osp.dirname(osp.dirname((osp.dirname(__file__)))) + + +def dump_yaml_and_check_difference(obj, filename, sort_keys=False): + """Dump object to a yaml file, and check if the file content is different + from the original. + + Args: + obj (any): The python object to be dumped. + filename (str): YAML filename to dump the object to. + sort_keys (str); Sort key by dictionary order. + Returns: + Bool: If the target YAML file is different from the original. + """ + + str_dump = mmcv.dump(obj, None, file_format='yaml', sort_keys=sort_keys) + if osp.isfile(filename): + file_exists = True + with open(filename, 'r', encoding='utf-8') as f: + str_orig = f.read() + else: + file_exists = False + str_orig = None + + if file_exists and str_orig == str_dump: + is_different = False + else: + is_different = True + with open(filename, 'w', encoding='utf-8') as f: + f.write(str_dump) + + return is_different + + +def parse_md(md_file): + """Parse .md file and convert it to a .yml file which can be used for MIM. + + Args: + md_file (str): Path to .md file. + Returns: + Bool: If the target YAML file is different from the original. + """ + collection_name = osp.split(osp.dirname(md_file))[1] + configs = os.listdir(osp.dirname(md_file)) + + collection = dict( + Name=collection_name, + Metadata={'Training Data': []}, + Paper={ + 'URL': '', + 'Title': '' + }, + README=md_file, + Code={ + 'URL': '', + 'Version': '' + }) + collection.update({'Converted From': {'Weights': '', 'Code': ''}}) + models = [] + datasets = [] + paper_url = None + paper_title = None + code_url = None + code_version = None + repo_url = None + + # To avoid re-counting number of backbone model in OpenMMLab, + # if certain model in configs folder is backbone whose name is already + # recorded in MMClassification, then the `COLLECTION` dict of this model + # in MMSegmentation should be deleted, and `In Collection` in `Models` + # should be set with head or neck of this config file. + is_backbone = None + + with open(md_file, 'r') as md: + lines = md.readlines() + i = 0 + current_dataset = '' + while i < len(lines): + line = lines[i].strip() + # In latest README.md the title and url are in the third line. + if i == 2: + paper_url = lines[i].split('](')[1].split(')')[0] + paper_title = lines[i].split('](')[0].split('[')[1] + if len(line) == 0: + i += 1 + continue + elif line[:3] == 'Before you create a PR, make sure that your code lints and is formatted by yapf. + +### C++ and CUDA + +We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). diff --git a/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/config.yml b/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..aa982e5 --- /dev/null +++ b/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,6 @@ +blank_issues_enabled: false + +contact_links: + - name: MMSegmentation Documentation + url: https://mmsegmentation.readthedocs.io + about: Check the docs and FAQ to see if you question is already answered. diff --git a/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/error-report.md b/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/error-report.md new file mode 100644 index 0000000..f977b7d --- /dev/null +++ b/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/error-report.md @@ -0,0 +1,48 @@ +--- +name: Error report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +Thanks for your error report and we appreciate it a lot. + +**Checklist** + +1. I have searched related issues but cannot get the expected help. +2. The bug has not been fixed in the latest version. + +**Describe the bug** +A clear and concise description of what the bug is. + +**Reproduction** + +1. What command or script did you run? + + ```none + A placeholder for the command. + ``` + +2. Did you make any modifications on the code or config? Did you understand what you have modified? +3. What dataset did you use? + +**Environment** + +1. Please run `python mmseg/utils/collect_env.py` to collect necessary environment information and paste it here. +2. You may add addition that may be helpful for locating the problem, such as + - How you installed PyTorch [e.g., pip, conda, source] + - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) + +**Error traceback** + +If applicable, paste the error trackback here. + +```none +A placeholder for trackback. +``` + +**Bug fix** + +If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated! diff --git a/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/feature_request.md b/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..ec59b78 --- /dev/null +++ b/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,22 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +# Describe the feature + +**Motivation** +A clear and concise description of the motivation of the feature. +Ex1. It is inconvenient when [....]. +Ex2. There is a recent paper [....], which is very helpful for [....]. + +**Related resources** +If there is an official code release or third-party implementations, please also provide the information here, which would be very helpful. + +**Additional context** +Add any other context or screenshots about the feature request here. +If you would like to implement the feature and create a PR, please leave a comment here and that would be much appreciated. diff --git a/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/general_questions.md b/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/general_questions.md new file mode 100644 index 0000000..b5a6451 --- /dev/null +++ b/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/general_questions.md @@ -0,0 +1,8 @@ +--- +name: General questions +about: Ask general questions to get help +title: '' +labels: '' +assignees: '' + +--- diff --git a/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/reimplementation_questions.md b/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/reimplementation_questions.md new file mode 100644 index 0000000..c82397b --- /dev/null +++ b/downstream/mmsegmentation/.github/ISSUE_TEMPLATE/reimplementation_questions.md @@ -0,0 +1,70 @@ +--- +name: Reimplementation Questions +about: Ask about questions during model reimplementation +title: '' +labels: 'reimplementation' +assignees: '' + +--- + +If you feel we have helped you, give us a STAR! :satisfied: + +**Notice** + +There are several common situations in the reimplementation issues as below + +1. Reimplement a model in the model zoo using the provided configs +2. Reimplement a model in the model zoo on other datasets (e.g., custom datasets) +3. Reimplement a custom model but all the components are implemented in MMSegmentation +4. Reimplement a custom model with new modules implemented by yourself + +There are several things to do for different cases as below. + +- For cases 1 & 3, please follow the steps in the following sections thus we could help to quickly identify the issue. +- For cases 2 & 4, please understand that we are not able to do much help here because we usually do not know the full code, and the users should be responsible for the code they write. +- One suggestion for cases 2 & 4 is that the users should first check whether the bug lies in the self-implemented code or the original code. For example, users can first make sure that the same model runs well on supported datasets. If you still need help, please describe what you have done and what you obtain in the issue, and follow the steps in the following sections, and try as clear as possible so that we can better help you. + +**Checklist** + +1. I have searched related issues but cannot get the expected help. +2. The issue has not been fixed in the latest version. + +**Describe the issue** + +A clear and concise description of the problem you meet and what you have done. + +**Reproduction** + +1. What command or script did you run? + +``` +A placeholder for the command. +``` + +2. What config dir you run? + +``` +A placeholder for the config. +``` + +3. Did you make any modifications to the code or config? Did you understand what you have modified? +4. What dataset did you use? + +**Environment** + +1. Please run `PYTHONPATH=${PWD}:$PYTHONPATH python mmseg/utils/collect_env.py` to collect the necessary environment information and paste it here. +2. You may add an addition that may be helpful for locating the problem, such as + 1. How you installed PyTorch [e.g., pip, conda, source] + 2. Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) + +**Results** + +If applicable, paste the related results here, e.g., what you expect and what you get. + +``` +A placeholder for results comparison +``` + +**Issue fix** + +If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated! diff --git a/downstream/mmsegmentation/.github/pull_request_template.md b/downstream/mmsegmentation/.github/pull_request_template.md new file mode 100644 index 0000000..09d5305 --- /dev/null +++ b/downstream/mmsegmentation/.github/pull_request_template.md @@ -0,0 +1,25 @@ +Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily get feedback. If you do not understand some items, don't worry, just make the pull request and seek help from maintainers. + +## Motivation + +Please describe the motivation of this PR and the goal you want to achieve through this PR. + +## Modification + +Please briefly describe what modification is made in this PR. + +## BC-breaking (Optional) + +Does the modification introduce changes that break the backward-compatibility of the downstream repos? +If so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR. + +## Use cases (Optional) + +If this PR introduces a new feature, it is better to list some use cases here, and update the documentation. + +## Checklist + +1. Pre-commit or other linting tools are used to fix the potential lint issues. +2. The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness. +3. If the modification has potential influence on downstream projects, this PR should be tested with downstream projects, like MMDet or MMDet3D. +4. The documentation has been modified accordingly, like docstring or example tutorials. diff --git a/downstream/mmsegmentation/.github/workflows/build.yml b/downstream/mmsegmentation/.github/workflows/build.yml new file mode 100644 index 0000000..9eccc5c --- /dev/null +++ b/downstream/mmsegmentation/.github/workflows/build.yml @@ -0,0 +1,252 @@ +name: build + +on: + push: + paths-ignore: + - 'demo/**' + - '.dev/**' + - 'docker/**' + - 'tools/**' + - '**.md' + + pull_request: + paths-ignore: + - 'demo/**' + - '.dev/**' + - 'docker/**' + - 'tools/**' + - 'docs/**' + - '**.md' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build_cpu: + runs-on: ubuntu-18.04 + strategy: + matrix: + python-version: [3.7] + torch: [1.5.1, 1.6.0, 1.7.0, 1.8.0, 1.9.0] + include: + - torch: 1.5.1 + torch_version: torch1.5 + torchvision: 0.6.1 + - torch: 1.6.0 + torch_version: torch1.6 + torchvision: 0.7.0 + - torch: 1.7.0 + torch_version: torch1.7 + torchvision: 0.8.1 + - torch: 1.8.0 + torch_version: torch1.8 + torchvision: 0.9.0 + - torch: 1.9.0 + torch_version: torch1.9 + torchvision: 0.10.0 + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: pip install pip --upgrade + - name: Install Pillow + run: pip install Pillow==6.2.2 + if: ${{matrix.torchvision == '0.4.2'}} + - name: Install PyTorch + run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html + - name: Install MMCV + run: | + pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/${{matrix.torch_version}}/index.html + python -c 'import mmcv; print(mmcv.__version__)' + - name: Install unittest dependencies + run: | + pip install -r requirements.txt + - name: Build and install + run: rm -rf .eggs && pip install -e . + - name: Run unittests and generate coverage report + run: | + pip install timm + coverage run --branch --source mmseg -m pytest tests/ + coverage xml + coverage report -m + if: ${{matrix.torch >= '1.5.0'}} + - name: Skip timm unittests and generate coverage report + run: | + coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py + coverage xml + coverage report -m + if: ${{matrix.torch < '1.5.0'}} + + build_cuda101: + runs-on: ubuntu-18.04 + container: + image: pytorch/pytorch:1.6.0-cuda10.1-cudnn7-devel + + strategy: + matrix: + python-version: [3.7] + torch: + [ + 1.5.1+cu101, + 1.6.0+cu101, + 1.7.0+cu101, + 1.8.0+cu101 + ] + include: + - torch: 1.5.1+cu101 + torch_version: torch1.5 + torchvision: 0.6.1+cu101 + - torch: 1.6.0+cu101 + torch_version: torch1.6 + torchvision: 0.7.0+cu101 + - torch: 1.7.0+cu101 + torch_version: torch1.7 + torchvision: 0.8.1+cu101 + - torch: 1.8.0+cu101 + torch_version: torch1.8 + torchvision: 0.9.0+cu101 + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install system dependencies + run: | + apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 python${{matrix.python-version}}-dev + apt-get clean + rm -rf /var/lib/apt/lists/* + - name: Install Pillow + run: python -m pip install Pillow==6.2.2 + if: ${{matrix.torchvision < 0.5}} + - name: Install PyTorch + run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html + - name: Install mmseg dependencies + run: | + python -V + python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/${{matrix.torch_version}}/index.html + python -m pip install -r requirements.txt + python -c 'import mmcv; print(mmcv.__version__)' + - name: Build and install + run: | + rm -rf .eggs + python setup.py check -m -s + TORCH_CUDA_ARCH_LIST=7.0 pip install . + - name: Run unittests and generate coverage report + run: | + python -m pip install timm + coverage run --branch --source mmseg -m pytest tests/ + coverage xml + coverage report -m + if: ${{matrix.torch >= '1.5.0'}} + - name: Skip timm unittests and generate coverage report + run: | + coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py + coverage xml + coverage report -m + if: ${{matrix.torch < '1.5.0'}} + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1.0.10 + with: + file: ./coverage.xml + flags: unittests + env_vars: OS,PYTHON + name: codecov-umbrella + fail_ci_if_error: false + + build_cuda102: + runs-on: ubuntu-18.04 + container: + image: pytorch/pytorch:1.9.0-cuda10.2-cudnn7-devel + + strategy: + matrix: + python-version: [3.6, 3.7, 3.8, 3.9] + torch: [1.9.0+cu102] + include: + - torch: 1.9.0+cu102 + torch_version: torch1.9 + torchvision: 0.10.0+cu102 + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install system dependencies + run: | + apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 + apt-get clean + rm -rf /var/lib/apt/lists/* + - name: Install Pillow + run: python -m pip install Pillow==6.2.2 + if: ${{matrix.torchvision < 0.5}} + - name: Install PyTorch + run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html + - name: Install mmseg dependencies + run: | + python -V + python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu102/${{matrix.torch_version}}/index.html + python -m pip install -r requirements.txt + python -c 'import mmcv; print(mmcv.__version__)' + - name: Build and install + run: | + rm -rf .eggs + python setup.py check -m -s + TORCH_CUDA_ARCH_LIST=7.0 pip install . + - name: Run unittests and generate coverage report + run: | + python -m pip install timm + coverage run --branch --source mmseg -m pytest tests/ + coverage xml + coverage report -m + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v2 + with: + files: ./coverage.xml + flags: unittests + env_vars: OS,PYTHON + name: codecov-umbrella + fail_ci_if_error: false + + test_windows: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [windows-2022] + python: [3.8] + platform: [cpu, cu111] + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Upgrade pip + run: pip install pip --upgrade --user + - name: Install OpenCV + run: pip install opencv-python>=3 + - name: Install PyTorch + # As a complement to Linux CI, we test on PyTorch LTS version + run: pip install torch==1.8.2+${{ matrix.platform }} torchvision==0.9.2+${{ matrix.platform }} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html + - name: Install MMCV + run: | + pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8/index.html --only-binary mmcv-full + - name: Install unittest dependencies + run: pip install -r requirements/tests.txt -r requirements/optional.txt + - name: Build and install + run: pip install -e . + - name: Run unittests + run: | + python -m pip install timm + coverage run --branch --source mmseg -m pytest tests/ + - name: Generate coverage report + run: | + coverage xml + coverage report -m diff --git a/downstream/mmsegmentation/.github/workflows/deploy.yml b/downstream/mmsegmentation/.github/workflows/deploy.yml new file mode 100644 index 0000000..ab64085 --- /dev/null +++ b/downstream/mmsegmentation/.github/workflows/deploy.yml @@ -0,0 +1,26 @@ +name: deploy + +on: push + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build-n-publish: + runs-on: ubuntu-latest + if: startsWith(github.event.ref, 'refs/tags') + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.7 + uses: actions/setup-python@v2 + with: + python-version: 3.7 + - name: Build MMSegmentation + run: | + pip install wheel + python setup.py sdist bdist_wheel + - name: Publish distribution to PyPI + run: | + pip install twine + twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }} diff --git a/downstream/mmsegmentation/.github/workflows/lint.yml b/downstream/mmsegmentation/.github/workflows/lint.yml new file mode 100644 index 0000000..7f7a309 --- /dev/null +++ b/downstream/mmsegmentation/.github/workflows/lint.yml @@ -0,0 +1,31 @@ +name: lint + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + lint: + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.7 + uses: actions/setup-python@v2 + with: + python-version: 3.7 + - name: Install pre-commit hook + run: | + pip install pre-commit + pre-commit install + - name: Linting + run: | + sudo apt-add-repository ppa:brightbox/ruby-ng -y + sudo apt-get update + sudo apt-get install -y ruby2.7 + pre-commit run --all-files + - name: Check docstring coverage + run: | + pip install interrogate + interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --exclude mmseg/ops --ignore-regex "__repr__" --fail-under 80 mmseg diff --git a/downstream/mmsegmentation/.gitignore b/downstream/mmsegmentation/.gitignore new file mode 100644 index 0000000..2c1ffb5 --- /dev/null +++ b/downstream/mmsegmentation/.gitignore @@ -0,0 +1,119 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/en/_build/ +docs/zh_cn/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +data +.vscode +.idea + +# custom +*.pkl +*.pkl.json +*.log.json +work_dirs/ +mmseg/.mim + +# Pytorch +*.pth diff --git a/downstream/mmsegmentation/.owners.yml b/downstream/mmsegmentation/.owners.yml new file mode 100644 index 0000000..2b1508a --- /dev/null +++ b/downstream/mmsegmentation/.owners.yml @@ -0,0 +1,9 @@ +assign: + strategy: + # random + daily-shift-based + assignees: + - MeowZheng + - MengzhangLI + - linfangjian01 + - xiaoachen98 diff --git a/downstream/mmsegmentation/.pre-commit-config.yaml b/downstream/mmsegmentation/.pre-commit-config.yaml new file mode 100644 index 0000000..9448c7a --- /dev/null +++ b/downstream/mmsegmentation/.pre-commit-config.yaml @@ -0,0 +1,57 @@ +repos: + - repo: https://gitlab.com/pycqa/flake8.git + rev: 3.8.3 + hooks: + - id: flake8 + - repo: https://github.com/PyCQA/isort + rev: 5.10.1 + hooks: + - id: isort + - repo: https://github.com/pre-commit/mirrors-yapf + rev: v0.30.0 + hooks: + - id: yapf + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.1.0 + hooks: + - id: trailing-whitespace + - id: check-yaml + - id: end-of-file-fixer + - id: requirements-txt-fixer + - id: double-quote-string-fixer + - id: check-merge-conflict + - id: fix-encoding-pragma + args: ["--remove"] + - id: mixed-line-ending + args: ["--fix=lf"] + - repo: https://github.com/markdownlint/markdownlint + rev: v0.11.0 + hooks: + - id: markdownlint + args: ["-r", "~MD002,~MD013,~MD029,~MD033,~MD034", + "-t", "allow_different_nesting"] + - repo: https://github.com/codespell-project/codespell + rev: v2.1.0 + hooks: + - id: codespell + - repo: https://github.com/myint/docformatter + rev: v1.3.1 + hooks: + - id: docformatter + args: ["--in-place", "--wrap-descriptions", "79"] + - repo: local + hooks: + - id: update-model-index + name: update-model-index + description: Collect model information and update model-index.yml + entry: .dev/md2yml.py + additional_dependencies: [mmcv, lxml] + language: python + files: ^configs/.*\.md$ + require_serial: true + - repo: https://github.com/open-mmlab/pre-commit-hooks + rev: v0.2.0 # Use the rev to fix revision + hooks: + - id: check-algo-readme + - id: check-copyright + args: ["mmseg", "tools", "tests", "demo"] # the dir_to_check with expected directory to check diff --git a/downstream/mmsegmentation/.readthedocs.yml b/downstream/mmsegmentation/.readthedocs.yml new file mode 100644 index 0000000..6cfbf5d --- /dev/null +++ b/downstream/mmsegmentation/.readthedocs.yml @@ -0,0 +1,9 @@ +version: 2 + +formats: all + +python: + version: 3.7 + install: + - requirements: requirements/docs.txt + - requirements: requirements/readthedocs.txt diff --git a/downstream/mmsegmentation/CITATION.cff b/downstream/mmsegmentation/CITATION.cff new file mode 100644 index 0000000..cfd7cab --- /dev/null +++ b/downstream/mmsegmentation/CITATION.cff @@ -0,0 +1,8 @@ +cff-version: 1.2.0 +message: "If you use this software, please cite it as below." +authors: + - name: "MMSegmentation Contributors" +title: "OpenMMLab Semantic Segmentation Toolbox and Benchmark" +date-released: 2020-07-10 +url: "https://github.com/open-mmlab/mmsegmentation" +license: Apache-2.0 diff --git a/downstream/mmsegmentation/LICENSE b/downstream/mmsegmentation/LICENSE new file mode 100644 index 0000000..38e625b --- /dev/null +++ b/downstream/mmsegmentation/LICENSE @@ -0,0 +1,203 @@ +Copyright 2020 The MMSegmentation Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 The MMSegmentation Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/downstream/mmsegmentation/MANIFEST.in b/downstream/mmsegmentation/MANIFEST.in new file mode 100644 index 0000000..e307d81 --- /dev/null +++ b/downstream/mmsegmentation/MANIFEST.in @@ -0,0 +1,4 @@ +include requirements/*.txt +include mmseg/.mim/model-index.yml +recursive-include mmseg/.mim/configs *.py *.yml +recursive-include mmseg/.mim/tools *.py *.sh diff --git a/downstream/mmsegmentation/README.md b/downstream/mmsegmentation/README.md new file mode 100644 index 0000000..cfa2720 --- /dev/null +++ b/downstream/mmsegmentation/README.md @@ -0,0 +1,204 @@ +
    + +
     
    +
    + OpenMMLab website + + + HOT + + +      + OpenMMLab platform + + + TRY IT OUT + + +
    +
     
    +
    +
    + +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/mmsegmentation)](https://pypi.org/project/mmsegmentation/) +[![PyPI](https://img.shields.io/pypi/v/mmsegmentation)](https://pypi.org/project/mmsegmentation) +[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmsegmentation.readthedocs.io/en/latest/) +[![badge](https://github.com/open-mmlab/mmsegmentation/workflows/build/badge.svg)](https://github.com/open-mmlab/mmsegmentation/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmsegmentation/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmsegmentation) +[![license](https://img.shields.io/github/license/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/blob/master/LICENSE) +[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/issues) +[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmsegmentation.svg)](https://github.com/open-mmlab/mmsegmentation/issues) + +Documentation: https://mmsegmentation.readthedocs.io/ + +English | [简体中文](README_zh-CN.md) + +## Introduction + +MMSegmentation is an open source semantic segmentation toolbox based on PyTorch. +It is a part of the OpenMMLab project. + +The master branch works with **PyTorch 1.5+**. + +![demo image](resources/seg_demo.gif) + +### Major features + +- **Unified Benchmark** + + We provide a unified benchmark toolbox for various semantic segmentation methods. + +- **Modular Design** + + We decompose the semantic segmentation framework into different components and one can easily construct a customized semantic segmentation framework by combining different modules. + +- **Support of multiple methods out of box** + + The toolbox directly supports popular and contemporary semantic segmentation frameworks, *e.g.* PSPNet, DeepLabV3, PSANet, DeepLabV3+, etc. + +- **High efficiency** + + The training speed is faster than or comparable to other codebases. + +## License + +This project is released under the [Apache 2.0 license](LICENSE). + +## Changelog + +v0.23.0 was released in 4/1/2022. +Please refer to [changelog.md](docs/en/changelog.md) for details and release history. + +## Benchmark and model zoo + +Results and models are available in the [model zoo](docs/en/model_zoo.md). + +Supported backbones: + +- [x] ResNet (CVPR'2016) +- [x] ResNeXt (CVPR'2017) +- [x] [HRNet (CVPR'2019)](configs/hrnet) +- [x] [ResNeSt (ArXiv'2020)](configs/resnest) +- [x] [MobileNetV2 (CVPR'2018)](configs/mobilenet_v2) +- [x] [MobileNetV3 (ICCV'2019)](configs/mobilenet_v3) +- [x] [Vision Transformer (ICLR'2021)](configs/vit) +- [x] [Swin Transformer (ICCV'2021)](configs/swin) +- [x] [Twins (NeurIPS'2021)](configs/twins) +- [x] [ConvNeXt (CVPR'2022)](configs/convnext) +- [x] [BEiT (ICLR'2022)](configs/beit) + +Supported methods: + +- [x] [FCN (CVPR'2015/TPAMI'2017)](configs/fcn) +- [x] [ERFNet (T-ITS'2017)](configs/erfnet) +- [x] [UNet (MICCAI'2016/Nat. Methods'2019)](configs/unet) +- [x] [PSPNet (CVPR'2017)](configs/pspnet) +- [x] [DeepLabV3 (ArXiv'2017)](configs/deeplabv3) +- [x] [BiSeNetV1 (ECCV'2018)](configs/bisenetv1) +- [x] [PSANet (ECCV'2018)](configs/psanet) +- [x] [DeepLabV3+ (CVPR'2018)](configs/deeplabv3plus) +- [x] [UPerNet (ECCV'2018)](configs/upernet) +- [x] [ICNet (ECCV'2018)](configs/icnet) +- [x] [NonLocal Net (CVPR'2018)](configs/nonlocal_net) +- [x] [EncNet (CVPR'2018)](configs/encnet) +- [x] [Semantic FPN (CVPR'2019)](configs/sem_fpn) +- [x] [DANet (CVPR'2019)](configs/danet) +- [x] [APCNet (CVPR'2019)](configs/apcnet) +- [x] [EMANet (ICCV'2019)](configs/emanet) +- [x] [CCNet (ICCV'2019)](configs/ccnet) +- [x] [DMNet (ICCV'2019)](configs/dmnet) +- [x] [ANN (ICCV'2019)](configs/ann) +- [x] [GCNet (ICCVW'2019/TPAMI'2020)](configs/gcnet) +- [x] [FastFCN (ArXiv'2019)](configs/fastfcn) +- [x] [Fast-SCNN (ArXiv'2019)](configs/fastscnn) +- [x] [ISANet (ArXiv'2019/IJCV'2021)](configs/isanet) +- [x] [OCRNet (ECCV'2020)](configs/ocrnet) +- [x] [DNLNet (ECCV'2020)](configs/dnlnet) +- [x] [PointRend (CVPR'2020)](configs/point_rend) +- [x] [CGNet (TIP'2020)](configs/cgnet) +- [x] [BiSeNetV2 (IJCV'2021)](configs/bisenetv2) +- [x] [STDC (CVPR'2021)](configs/stdc) +- [x] [SETR (CVPR'2021)](configs/setr) +- [x] [DPT (ArXiv'2021)](configs/dpt) +- [x] [Segmenter (ICCV'2021)](configs/segmenter) +- [x] [SegFormer (NeurIPS'2021)](configs/segformer) +- [x] [K-Net (NeurIPS'2021)](configs/knet) + +Supported datasets: + +- [x] [Cityscapes](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#cityscapes) +- [x] [PASCAL VOC](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#pascal-voc) +- [x] [ADE20K](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#ade20k) +- [x] [Pascal Context](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#pascal-context) +- [x] [COCO-Stuff 10k](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#coco-stuff-10k) +- [x] [COCO-Stuff 164k](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#coco-stuff-164k) +- [x] [CHASE_DB1](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#chase-db1) +- [x] [DRIVE](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#drive) +- [x] [HRF](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#hrf) +- [x] [STARE](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#stare) +- [x] [Dark Zurich](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#dark-zurich) +- [x] [Nighttime Driving](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#nighttime-driving) +- [x] [LoveDA](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#loveda) +- [x] [Potsdam](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#isprs-potsdam) +- [x] [Vaihingen](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#isprs-vaihingen) +- [x] [iSAID](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#isaid) + +## Installation + +Please refer to [get_started.md](docs/en/get_started.md#installation) for installation and [dataset_prepare.md](docs/en/dataset_prepare.md#prepare-datasets) for dataset preparation. + +## Get Started + +Please see [train.md](docs/en/train.md) and [inference.md](docs/en/inference.md) for the basic usage of MMSegmentation. +There are also tutorials for [customizing dataset](docs/en/tutorials/customize_datasets.md), [designing data pipeline](docs/en/tutorials/data_pipeline.md), [customizing modules](docs/en/tutorials/customize_models.md), and [customizing runtime](docs/en/tutorials/customize_runtime.md). +We also provide many [training tricks](docs/en/tutorials/training_tricks.md) for better training and [useful tools](docs/en/useful_tools.md) for deployment. + +A Colab tutorial is also provided. You may preview the notebook [here](demo/MMSegmentation_Tutorial.ipynb) or directly [run](https://colab.research.google.com/github/open-mmlab/mmsegmentation/blob/master/demo/MMSegmentation_Tutorial.ipynb) on Colab. + +Please refer to [FAQ](docs/en/faq.md) for frequently asked questions. + +## Citation + +If you find this project useful in your research, please consider cite: + +```bibtex +@misc{mmseg2020, + title={{MMSegmentation}: OpenMMLab Semantic Segmentation Toolbox and Benchmark}, + author={MMSegmentation Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmsegmentation}}, + year={2020} +} +``` + +## Contributing + +We appreciate all contributions to improve MMSegmentation. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline. + +## Acknowledgement + +MMSegmentation is an open source project that welcome any contribution and feedback. +We wish that the toolbox and benchmark could serve the growing research +community by providing a flexible as well as standardized toolkit to reimplement existing methods +and develop their own new semantic segmentation methods. + +## Projects in OpenMMLab + +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision. +- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages. +- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark. +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark. +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark. +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox. +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. +- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark. +- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark. +- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark. +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark. +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark. +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark. +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark. +- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox. +- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox. +- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab Model Deployment Framework. diff --git a/downstream/mmsegmentation/configs/_base_/datasets/ade20k.py b/downstream/mmsegmentation/configs/_base_/datasets/ade20k.py new file mode 100644 index 0000000..efc8b4b --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/ade20k.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'ADE20KDataset' +data_root = 'data/ade/ADEChallengeData2016' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/ade20k_640x640.py b/downstream/mmsegmentation/configs/_base_/datasets/ade20k_640x640.py new file mode 100644 index 0000000..14a4bb0 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/ade20k_640x640.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'ADE20KDataset' +data_root = 'data/ade/ADEChallengeData2016' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (640, 640) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2560, 640), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2560, 640), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/chase_db1.py b/downstream/mmsegmentation/configs/_base_/datasets/chase_db1.py new file mode 100644 index 0000000..298594e --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/chase_db1.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'ChaseDB1Dataset' +data_root = 'data/CHASE_DB1' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_scale = (960, 999) +crop_size = (128, 128) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/cityscapes.py b/downstream/mmsegmentation/configs/_base_/datasets/cityscapes.py new file mode 100644 index 0000000..f21867c --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/cityscapes.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/train', + ann_dir='gtFine/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/cityscapes_1024x1024.py b/downstream/mmsegmentation/configs/_base_/datasets/cityscapes_1024x1024.py new file mode 100644 index 0000000..f98d929 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/cityscapes_1024x1024.py @@ -0,0 +1,35 @@ +_base_ = './cityscapes.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/cityscapes_768x768.py b/downstream/mmsegmentation/configs/_base_/datasets/cityscapes_768x768.py new file mode 100644 index 0000000..fde9d7c --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/cityscapes_768x768.py @@ -0,0 +1,35 @@ +_base_ = './cityscapes.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (768, 768) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2049, 1025), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/cityscapes_769x769.py b/downstream/mmsegmentation/configs/_base_/datasets/cityscapes_769x769.py new file mode 100644 index 0000000..336c7b2 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/cityscapes_769x769.py @@ -0,0 +1,35 @@ +_base_ = './cityscapes.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (769, 769) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2049, 1025), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/cityscapes_832x832.py b/downstream/mmsegmentation/configs/_base_/datasets/cityscapes_832x832.py new file mode 100644 index 0000000..b9325cc --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/cityscapes_832x832.py @@ -0,0 +1,35 @@ +_base_ = './cityscapes.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (832, 832) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/coco-stuff10k.py b/downstream/mmsegmentation/configs/_base_/datasets/coco-stuff10k.py new file mode 100644 index 0000000..ec04969 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/coco-stuff10k.py @@ -0,0 +1,57 @@ +# dataset settings +dataset_type = 'COCOStuffDataset' +data_root = 'data/coco_stuff10k' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + reduce_zero_label=True, + img_dir='images/train2014', + ann_dir='annotations/train2014', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + reduce_zero_label=True, + img_dir='images/test2014', + ann_dir='annotations/test2014', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + reduce_zero_label=True, + img_dir='images/test2014', + ann_dir='annotations/test2014', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/coco-stuff164k.py b/downstream/mmsegmentation/configs/_base_/datasets/coco-stuff164k.py new file mode 100644 index 0000000..a6a38f2 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/coco-stuff164k.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'COCOStuffDataset' +data_root = 'data/coco_stuff164k' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/train2017', + ann_dir='annotations/train2017', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/val2017', + ann_dir='annotations/val2017', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/val2017', + ann_dir='annotations/val2017', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/drive.py b/downstream/mmsegmentation/configs/_base_/datasets/drive.py new file mode 100644 index 0000000..06e8ff6 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/drive.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'DRIVEDataset' +data_root = 'data/DRIVE' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_scale = (584, 565) +crop_size = (64, 64) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/hrf.py b/downstream/mmsegmentation/configs/_base_/datasets/hrf.py new file mode 100644 index 0000000..242d790 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/hrf.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'HRFDataset' +data_root = 'data/HRF' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_scale = (2336, 3504) +crop_size = (256, 256) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/isaid.py b/downstream/mmsegmentation/configs/_base_/datasets/isaid.py new file mode 100644 index 0000000..8e4c26a --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/isaid.py @@ -0,0 +1,62 @@ +# dataset settings +dataset_type = 'iSAIDDataset' +data_root = 'data/iSAID' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +""" +This crop_size setting is followed by the implementation of +`PointFlow: Flowing Semantics Through Points for Aerial Image +Segmentation `_. +""" + +crop_size = (896, 896) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(896, 896), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(896, 896), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/train', + ann_dir='ann_dir/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/val', + ann_dir='ann_dir/val', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/val', + ann_dir='ann_dir/val', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/loveda.py b/downstream/mmsegmentation/configs/_base_/datasets/loveda.py new file mode 100644 index 0000000..e553356 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/loveda.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'LoveDADataset' +data_root = 'data/loveDA' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1024, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/train', + ann_dir='ann_dir/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/val', + ann_dir='ann_dir/val', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/val', + ann_dir='ann_dir/val', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/pascal_context.py b/downstream/mmsegmentation/configs/_base_/datasets/pascal_context.py new file mode 100644 index 0000000..ff65bad --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/pascal_context.py @@ -0,0 +1,60 @@ +# dataset settings +dataset_type = 'PascalContextDataset' +data_root = 'data/VOCdevkit/VOC2010/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +img_scale = (520, 520) +crop_size = (480, 480) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/train.txt', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/pascal_context_59.py b/downstream/mmsegmentation/configs/_base_/datasets/pascal_context_59.py new file mode 100644 index 0000000..37585ab --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/pascal_context_59.py @@ -0,0 +1,60 @@ +# dataset settings +dataset_type = 'PascalContextDataset59' +data_root = 'data/VOCdevkit/VOC2010/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +img_scale = (520, 520) +crop_size = (480, 480) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/train.txt', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/pascal_voc12.py b/downstream/mmsegmentation/configs/_base_/datasets/pascal_voc12.py new file mode 100644 index 0000000..ba1d42d --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/pascal_voc12.py @@ -0,0 +1,57 @@ +# dataset settings +dataset_type = 'PascalVOCDataset' +data_root = 'data/VOCdevkit/VOC2012' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClass', + split='ImageSets/Segmentation/train.txt', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClass', + split='ImageSets/Segmentation/val.txt', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClass', + split='ImageSets/Segmentation/val.txt', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/pascal_voc12_aug.py b/downstream/mmsegmentation/configs/_base_/datasets/pascal_voc12_aug.py new file mode 100644 index 0000000..3f23b67 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/pascal_voc12_aug.py @@ -0,0 +1,9 @@ +_base_ = './pascal_voc12.py' +# dataset settings +data = dict( + train=dict( + ann_dir=['SegmentationClass', 'SegmentationClassAug'], + split=[ + 'ImageSets/Segmentation/train.txt', + 'ImageSets/Segmentation/aug.txt' + ])) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/potsdam.py b/downstream/mmsegmentation/configs/_base_/datasets/potsdam.py new file mode 100644 index 0000000..f74c4a5 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/potsdam.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'PotsdamDataset' +data_root = 'data/potsdam' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(512, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(512, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/train', + ann_dir='ann_dir/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/val', + ann_dir='ann_dir/val', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/val', + ann_dir='ann_dir/val', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/stare.py b/downstream/mmsegmentation/configs/_base_/datasets/stare.py new file mode 100644 index 0000000..3f71b25 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/stare.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'STAREDataset' +data_root = 'data/STARE' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_scale = (605, 700) +crop_size = (128, 128) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/datasets/vaihingen.py b/downstream/mmsegmentation/configs/_base_/datasets/vaihingen.py new file mode 100644 index 0000000..c0df282 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/datasets/vaihingen.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'ISPRSDataset' +data_root = 'data/vaihingen' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(512, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(512, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/train', + ann_dir='ann_dir/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/val', + ann_dir='ann_dir/val', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/val', + ann_dir='ann_dir/val', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/_base_/default_runtime.py b/downstream/mmsegmentation/configs/_base_/default_runtime.py new file mode 100644 index 0000000..b564cc4 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/default_runtime.py @@ -0,0 +1,14 @@ +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True diff --git a/downstream/mmsegmentation/configs/_base_/models/ann_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/ann_r50-d8.py new file mode 100644 index 0000000..a2cb653 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/ann_r50-d8.py @@ -0,0 +1,46 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='ANNHead', + in_channels=[1024, 2048], + in_index=[2, 3], + channels=512, + project_channels=256, + query_scales=(1, ), + key_pool_scales=(1, 3, 6, 8), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/apcnet_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/apcnet_r50-d8.py new file mode 100644 index 0000000..c8f5316 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/apcnet_r50-d8.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='APCHead', + in_channels=2048, + in_index=3, + channels=512, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=dict(type='SyncBN', requires_grad=True), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/bisenetv1_r18-d32.py b/downstream/mmsegmentation/configs/_base_/models/bisenetv1_r18-d32.py new file mode 100644 index 0000000..4069864 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/bisenetv1_r18-d32.py @@ -0,0 +1,68 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='BiSeNetV1', + in_channels=3, + context_channels=(128, 256, 512), + spatial_channels=(64, 64, 64, 128), + out_indices=(0, 1, 2), + out_channels=256, + backbone_cfg=dict( + type='ResNet', + in_channels=3, + depth=18, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + norm_cfg=norm_cfg, + align_corners=False, + init_cfg=None), + decode_head=dict( + type='FCNHead', + in_channels=256, + in_index=0, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=128, + channels=64, + num_convs=1, + num_classes=19, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=128, + channels=64, + num_convs=1, + num_classes=19, + in_index=2, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/bisenetv2.py b/downstream/mmsegmentation/configs/_base_/models/bisenetv2.py new file mode 100644 index 0000000..f8fffee --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/bisenetv2.py @@ -0,0 +1,80 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='BiSeNetV2', + detail_channels=(64, 64, 128), + semantic_channels=(16, 32, 64, 128), + semantic_expansion_ratio=6, + bga_channels=128, + out_indices=(0, 1, 2, 3, 4), + init_cfg=None, + align_corners=False), + decode_head=dict( + type='FCNHead', + in_channels=128, + in_index=0, + channels=1024, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=16, + channels=16, + num_convs=2, + num_classes=19, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=32, + channels=64, + num_convs=2, + num_classes=19, + in_index=2, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=64, + channels=256, + num_convs=2, + num_classes=19, + in_index=3, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=128, + channels=1024, + num_convs=2, + num_classes=19, + in_index=4, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/ccnet_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/ccnet_r50-d8.py new file mode 100644 index 0000000..794148f --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/ccnet_r50-d8.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='CCHead', + in_channels=2048, + in_index=3, + channels=512, + recurrence=2, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/cgnet.py b/downstream/mmsegmentation/configs/_base_/models/cgnet.py new file mode 100644 index 0000000..eff8d94 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/cgnet.py @@ -0,0 +1,35 @@ +# model settings +norm_cfg = dict(type='SyncBN', eps=1e-03, requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='CGNet', + norm_cfg=norm_cfg, + in_channels=3, + num_channels=(32, 64, 128), + num_blocks=(3, 21), + dilations=(2, 4), + reductions=(8, 16)), + decode_head=dict( + type='FCNHead', + in_channels=256, + in_index=2, + channels=256, + num_convs=0, + concat_input=False, + dropout_ratio=0, + num_classes=19, + norm_cfg=norm_cfg, + loss_decode=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0, + class_weight=[ + 2.5959933, 6.7415504, 3.5354059, 9.8663225, 9.690899, 9.369352, + 10.289121, 9.953208, 4.3097677, 9.490387, 7.674431, 9.396905, + 10.347791, 6.3927646, 10.226669, 10.241062, 10.280587, + 10.396974, 10.055647 + ])), + # model training and testing settings + train_cfg=dict(sampler=None), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/danet_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/danet_r50-d8.py new file mode 100644 index 0000000..2c93493 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/danet_r50-d8.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='DAHead', + in_channels=2048, + in_index=3, + channels=512, + pam_channels=64, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/deeplabv3_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/deeplabv3_r50-d8.py new file mode 100644 index 0000000..d7a43be --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/deeplabv3_r50-d8.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='ASPPHead', + in_channels=2048, + in_index=3, + channels=512, + dilations=(1, 12, 24, 36), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/deeplabv3_unet_s5-d16.py b/downstream/mmsegmentation/configs/_base_/models/deeplabv3_unet_s5-d16.py new file mode 100644 index 0000000..0cd2629 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/deeplabv3_unet_s5-d16.py @@ -0,0 +1,50 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='UNet', + in_channels=3, + base_channels=64, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + with_cp=False, + conv_cfg=None, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + norm_eval=False), + decode_head=dict( + type='ASPPHead', + in_channels=64, + in_index=4, + channels=16, + dilations=(1, 12, 24, 36), + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=128, + in_index=3, + channels=64, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide', crop_size=256, stride=170)) diff --git a/downstream/mmsegmentation/configs/_base_/models/deeplabv3plus_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/deeplabv3plus_r50-d8.py new file mode 100644 index 0000000..050e39e --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/deeplabv3plus_r50-d8.py @@ -0,0 +1,46 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='DepthwiseSeparableASPPHead', + in_channels=2048, + in_index=3, + channels=512, + dilations=(1, 12, 24, 36), + c1_in_channels=256, + c1_channels=48, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/dmnet_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/dmnet_r50-d8.py new file mode 100644 index 0000000..d22ba52 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/dmnet_r50-d8.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='DMHead', + in_channels=2048, + in_index=3, + channels=512, + filter_sizes=(1, 3, 5, 7), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=dict(type='SyncBN', requires_grad=True), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/dnl_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/dnl_r50-d8.py new file mode 100644 index 0000000..edb4c17 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/dnl_r50-d8.py @@ -0,0 +1,46 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='DNLHead', + in_channels=2048, + in_index=3, + channels=512, + dropout_ratio=0.1, + reduction=2, + use_scale=True, + mode='embedded_gaussian', + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/dpt_vit-b16.py b/downstream/mmsegmentation/configs/_base_/models/dpt_vit-b16.py new file mode 100644 index 0000000..dfd48a9 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/dpt_vit-b16.py @@ -0,0 +1,31 @@ +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='pretrain/vit-b16_p16_224-80ecf9dd.pth', # noqa + backbone=dict( + type='VisionTransformer', + img_size=224, + embed_dims=768, + num_layers=12, + num_heads=12, + out_indices=(2, 5, 8, 11), + final_norm=False, + with_cls_token=True, + output_cls_token=True), + decode_head=dict( + type='DPTHead', + in_channels=(768, 768, 768, 768), + channels=256, + embed_dims=768, + post_process_channels=[96, 192, 384, 768], + num_classes=150, + readout_type='project', + input_transform='multiple_select', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=None, + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) # yapf: disable diff --git a/downstream/mmsegmentation/configs/_base_/models/emanet_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/emanet_r50-d8.py new file mode 100644 index 0000000..26adcd4 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/emanet_r50-d8.py @@ -0,0 +1,47 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='EMAHead', + in_channels=2048, + in_index=3, + channels=256, + ema_channels=512, + num_bases=64, + num_stages=3, + momentum=0.1, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/encnet_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/encnet_r50-d8.py new file mode 100644 index 0000000..be77712 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/encnet_r50-d8.py @@ -0,0 +1,48 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='EncHead', + in_channels=[512, 1024, 2048], + in_index=(1, 2, 3), + channels=512, + num_codes=32, + use_se_loss=True, + add_lateral=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_se_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/erfnet_fcn.py b/downstream/mmsegmentation/configs/_base_/models/erfnet_fcn.py new file mode 100644 index 0000000..7f2e9bf --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/erfnet_fcn.py @@ -0,0 +1,32 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='ERFNet', + in_channels=3, + enc_downsample_channels=(16, 64, 128), + enc_stage_non_bottlenecks=(5, 8), + enc_non_bottleneck_dilations=(2, 4, 8, 16), + enc_non_bottleneck_channels=(64, 128), + dec_upsample_channels=(64, 16), + dec_stages_non_bottleneck=(2, 2), + dec_non_bottleneck_channels=(64, 16), + dropout_ratio=0.1, + init_cfg=None), + decode_head=dict( + type='FCNHead', + in_channels=16, + channels=128, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/fast_scnn.py b/downstream/mmsegmentation/configs/_base_/models/fast_scnn.py new file mode 100644 index 0000000..8e89d91 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/fast_scnn.py @@ -0,0 +1,57 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='FastSCNN', + downsample_dw_channels=(32, 48), + global_in_channels=64, + global_block_channels=(64, 96, 128), + global_block_strides=(2, 2, 1), + global_out_channels=128, + higher_in_channels=64, + lower_in_channels=128, + fusion_out_channels=128, + out_indices=(0, 1, 2), + norm_cfg=norm_cfg, + align_corners=False), + decode_head=dict( + type='DepthwiseSeparableFCNHead', + in_channels=128, + channels=128, + concat_input=False, + num_classes=19, + in_index=-1, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=128, + channels=32, + num_convs=1, + num_classes=19, + in_index=-2, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=64, + channels=32, + num_convs=1, + num_classes=19, + in_index=-3, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/fastfcn_r50-d32_jpu_psp.py b/downstream/mmsegmentation/configs/_base_/models/fastfcn_r50-d32_jpu_psp.py new file mode 100644 index 0000000..9dc8609 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/fastfcn_r50-d32_jpu_psp.py @@ -0,0 +1,53 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + dilations=(1, 1, 2, 4), + strides=(1, 2, 2, 2), + out_indices=(1, 2, 3), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + neck=dict( + type='JPU', + in_channels=(512, 1024, 2048), + mid_channels=512, + start_level=0, + end_level=-1, + dilations=(1, 2, 4, 8), + align_corners=False, + norm_cfg=norm_cfg), + decode_head=dict( + type='PSPHead', + in_channels=2048, + in_index=2, + channels=512, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=1, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/fcn_hr18.py b/downstream/mmsegmentation/configs/_base_/models/fcn_hr18.py new file mode 100644 index 0000000..c3e299b --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/fcn_hr18.py @@ -0,0 +1,52 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://msra/hrnetv2_w18', + backbone=dict( + type='HRNet', + norm_cfg=norm_cfg, + norm_eval=False, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144)))), + decode_head=dict( + type='FCNHead', + in_channels=[18, 36, 72, 144], + in_index=(0, 1, 2, 3), + channels=sum([18, 36, 72, 144]), + input_transform='resize_concat', + kernel_size=1, + num_convs=1, + concat_input=False, + dropout_ratio=-1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/fcn_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/fcn_r50-d8.py new file mode 100644 index 0000000..5e98f6c --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/fcn_r50-d8.py @@ -0,0 +1,45 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='FCNHead', + in_channels=2048, + in_index=3, + channels=512, + num_convs=2, + concat_input=True, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/fcn_unet_s5-d16.py b/downstream/mmsegmentation/configs/_base_/models/fcn_unet_s5-d16.py new file mode 100644 index 0000000..a33e797 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/fcn_unet_s5-d16.py @@ -0,0 +1,51 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='UNet', + in_channels=3, + base_channels=64, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + with_cp=False, + conv_cfg=None, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + norm_eval=False), + decode_head=dict( + type='FCNHead', + in_channels=64, + in_index=4, + channels=64, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=128, + in_index=3, + channels=64, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide', crop_size=256, stride=170)) diff --git a/downstream/mmsegmentation/configs/_base_/models/fpn_r50.py b/downstream/mmsegmentation/configs/_base_/models/fpn_r50.py new file mode 100644 index 0000000..86ab327 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/fpn_r50.py @@ -0,0 +1,36 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=4), + decode_head=dict( + type='FPNHead', + in_channels=[256, 256, 256, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/gcnet_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/gcnet_r50-d8.py new file mode 100644 index 0000000..3d2ad69 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/gcnet_r50-d8.py @@ -0,0 +1,46 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='GCHead', + in_channels=2048, + in_index=3, + channels=512, + ratio=1 / 4., + pooling_type='att', + fusion_types=('channel_add', ), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/icnet_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/icnet_r50-d8.py new file mode 100644 index 0000000..d7273cd --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/icnet_r50-d8.py @@ -0,0 +1,74 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='ICNet', + backbone_cfg=dict( + type='ResNetV1c', + in_channels=3, + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + in_channels=3, + layer_channels=(512, 2048), + light_branch_middle_channels=32, + psp_out_channels=512, + out_channels=(64, 256, 256), + norm_cfg=norm_cfg, + align_corners=False, + ), + neck=dict( + type='ICNeck', + in_channels=(64, 256, 256), + out_channels=128, + norm_cfg=norm_cfg, + align_corners=False), + decode_head=dict( + type='FCNHead', + in_channels=128, + channels=128, + num_convs=1, + in_index=2, + dropout_ratio=0, + num_classes=19, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=128, + channels=128, + num_convs=1, + num_classes=19, + in_index=0, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=128, + channels=128, + num_convs=1, + num_classes=19, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/isanet_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/isanet_r50-d8.py new file mode 100644 index 0000000..c0221a3 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/isanet_r50-d8.py @@ -0,0 +1,45 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='ISAHead', + in_channels=2048, + in_index=3, + channels=512, + isa_channels=256, + down_factor=(8, 8), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/lraspp_m-v3-d8.py b/downstream/mmsegmentation/configs/_base_/models/lraspp_m-v3-d8.py new file mode 100644 index 0000000..9325824 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/lraspp_m-v3-d8.py @@ -0,0 +1,25 @@ +# model settings +norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='MobileNetV3', + arch='large', + out_indices=(1, 3, 16), + norm_cfg=norm_cfg), + decode_head=dict( + type='LRASPPHead', + in_channels=(16, 24, 960), + in_index=(0, 1, 2), + channels=128, + input_transform='multiple_select', + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/nonlocal_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/nonlocal_r50-d8.py new file mode 100644 index 0000000..5674a39 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/nonlocal_r50-d8.py @@ -0,0 +1,46 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='NLHead', + in_channels=2048, + in_index=3, + channels=512, + dropout_ratio=0.1, + reduction=2, + use_scale=True, + mode='embedded_gaussian', + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/ocrnet_hr18.py b/downstream/mmsegmentation/configs/_base_/models/ocrnet_hr18.py new file mode 100644 index 0000000..c60f62a --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/ocrnet_hr18.py @@ -0,0 +1,68 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='CascadeEncoderDecoder', + num_stages=2, + pretrained='open-mmlab://msra/hrnetv2_w18', + backbone=dict( + type='HRNet', + norm_cfg=norm_cfg, + norm_eval=False, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[18, 36, 72, 144], + channels=sum([18, 36, 72, 144]), + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + kernel_size=1, + num_convs=1, + concat_input=False, + dropout_ratio=-1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[18, 36, 72, 144], + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + channels=512, + ocr_channels=256, + dropout_ratio=-1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/ocrnet_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/ocrnet_r50-d8.py new file mode 100644 index 0000000..615aa3f --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/ocrnet_r50-d8.py @@ -0,0 +1,47 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='CascadeEncoderDecoder', + num_stages=2, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=[ + dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=2048, + in_index=3, + channels=512, + ocr_channels=256, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/pointrend_r50.py b/downstream/mmsegmentation/configs/_base_/models/pointrend_r50.py new file mode 100644 index 0000000..9d323db --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/pointrend_r50.py @@ -0,0 +1,56 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='CascadeEncoderDecoder', + num_stages=2, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=4), + decode_head=[ + dict( + type='FPNHead', + in_channels=[256, 256, 256, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=-1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='PointHead', + in_channels=[256], + in_index=[0], + channels=256, + num_fcs=3, + coarse_pred_each_layer=True, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ], + # model training and testing settings + train_cfg=dict( + num_points=2048, oversample_ratio=3, importance_sample_ratio=0.75), + test_cfg=dict( + mode='whole', + subdivision_steps=2, + subdivision_num_points=8196, + scale_factor=2)) diff --git a/downstream/mmsegmentation/configs/_base_/models/psanet_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/psanet_r50-d8.py new file mode 100644 index 0000000..689513f --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/psanet_r50-d8.py @@ -0,0 +1,49 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='PSAHead', + in_channels=2048, + in_index=3, + channels=512, + mask_size=(97, 97), + psa_type='bi-direction', + compact=False, + shrink_factor=2, + normalization_factor=1.0, + psa_softmax=True, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/pspnet_r50-d8.py b/downstream/mmsegmentation/configs/_base_/models/pspnet_r50-d8.py new file mode 100644 index 0000000..f451e08 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/pspnet_r50-d8.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='PSPHead', + in_channels=2048, + in_index=3, + channels=512, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/pspnet_unet_s5-d16.py b/downstream/mmsegmentation/configs/_base_/models/pspnet_unet_s5-d16.py new file mode 100644 index 0000000..fcff9ec --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/pspnet_unet_s5-d16.py @@ -0,0 +1,50 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='UNet', + in_channels=3, + base_channels=64, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + with_cp=False, + conv_cfg=None, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + norm_eval=False), + decode_head=dict( + type='PSPHead', + in_channels=64, + in_index=4, + channels=16, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=128, + in_index=3, + channels=64, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide', crop_size=256, stride=170)) diff --git a/downstream/mmsegmentation/configs/_base_/models/segformer_mit-b0.py b/downstream/mmsegmentation/configs/_base_/models/segformer_mit-b0.py new file mode 100644 index 0000000..5b3e073 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/segformer_mit-b0.py @@ -0,0 +1,34 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='MixVisionTransformer', + in_channels=3, + embed_dims=32, + num_stages=4, + num_layers=[2, 2, 2, 2], + num_heads=[1, 2, 5, 8], + patch_sizes=[7, 3, 3, 3], + sr_ratios=[8, 4, 2, 1], + out_indices=(0, 1, 2, 3), + mlp_ratio=4, + qkv_bias=True, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.1), + decode_head=dict( + type='SegformerHead', + in_channels=[32, 64, 160, 256], + in_index=[0, 1, 2, 3], + channels=256, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/segmenter_vit-b16_mask.py b/downstream/mmsegmentation/configs/_base_/models/segmenter_vit-b16_mask.py new file mode 100644 index 0000000..622f122 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/segmenter_vit-b16_mask.py @@ -0,0 +1,36 @@ +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segmenter/vit_base_p16_384_20220308-96dfe169.pth' # noqa +# model settings +backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=checkpoint, + backbone=dict( + type='VisionTransformer', + img_size=(512, 512), + patch_size=16, + in_channels=3, + embed_dims=768, + num_layers=12, + num_heads=12, + drop_path_rate=0.1, + attn_drop_rate=0.0, + drop_rate=0.0, + final_norm=True, + norm_cfg=backbone_norm_cfg, + with_cls_token=True, + interpolate_mode='bicubic', + ), + decode_head=dict( + type='SegmenterMaskTransformerHead', + in_channels=768, + channels=768, + num_classes=150, + num_layers=2, + num_heads=12, + embed_dims=768, + dropout_ratio=0.0, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + ), + test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(480, 480)), +) diff --git a/downstream/mmsegmentation/configs/_base_/models/setr_mla.py b/downstream/mmsegmentation/configs/_base_/models/setr_mla.py new file mode 100644 index 0000000..af4ba24 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/setr_mla.py @@ -0,0 +1,95 @@ +# model settings +backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='pretrain/jx_vit_large_p16_384-b3be5167.pth', + backbone=dict( + type='VisionTransformer', + img_size=(768, 768), + patch_size=16, + in_channels=3, + embed_dims=1024, + num_layers=24, + num_heads=16, + out_indices=(5, 11, 17, 23), + drop_rate=0.1, + norm_cfg=backbone_norm_cfg, + with_cls_token=False, + interpolate_mode='bilinear', + ), + neck=dict( + type='MLANeck', + in_channels=[1024, 1024, 1024, 1024], + out_channels=256, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + ), + decode_head=dict( + type='SETRMLAHead', + in_channels=(256, 256, 256, 256), + channels=512, + in_index=(0, 1, 2, 3), + dropout_ratio=0, + mla_channels=128, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=0, + dropout_ratio=0, + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=1, + dropout_ratio=0, + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=2, + dropout_ratio=0, + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=3, + dropout_ratio=0, + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + ], + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/setr_naive.py b/downstream/mmsegmentation/configs/_base_/models/setr_naive.py new file mode 100644 index 0000000..0c330ea --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/setr_naive.py @@ -0,0 +1,80 @@ +# model settings +backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='pretrain/jx_vit_large_p16_384-b3be5167.pth', + backbone=dict( + type='VisionTransformer', + img_size=(768, 768), + patch_size=16, + in_channels=3, + embed_dims=1024, + num_layers=24, + num_heads=16, + out_indices=(9, 14, 19, 23), + drop_rate=0.1, + norm_cfg=backbone_norm_cfg, + with_cls_token=True, + interpolate_mode='bilinear', + ), + decode_head=dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=3, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=0, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=1, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=2, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)) + ], + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/setr_pup.py b/downstream/mmsegmentation/configs/_base_/models/setr_pup.py new file mode 100644 index 0000000..8e5f23b --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/setr_pup.py @@ -0,0 +1,80 @@ +# model settings +backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='pretrain/jx_vit_large_p16_384-b3be5167.pth', + backbone=dict( + type='VisionTransformer', + img_size=(768, 768), + patch_size=16, + in_channels=3, + embed_dims=1024, + num_layers=24, + num_heads=16, + out_indices=(9, 14, 19, 23), + drop_rate=0.1, + norm_cfg=backbone_norm_cfg, + with_cls_token=True, + interpolate_mode='bilinear', + ), + decode_head=dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=3, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=4, + up_scale=2, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=0, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=1, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=2, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=1, + up_scale=4, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + ], + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/stdc.py b/downstream/mmsegmentation/configs/_base_/models/stdc.py new file mode 100644 index 0000000..341a4ec --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/stdc.py @@ -0,0 +1,83 @@ +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='STDCContextPathNet', + backbone_cfg=dict( + type='STDCNet', + stdc_type='STDCNet1', + in_channels=3, + channels=(32, 64, 256, 512, 1024), + bottleneck_type='cat', + num_convs=4, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + with_final_conv=False), + last_in_channels=(1024, 512), + out_channels=128, + ffm_cfg=dict(in_channels=384, out_channels=256, scale_factor=4)), + decode_head=dict( + type='FCNHead', + in_channels=256, + channels=256, + num_convs=1, + num_classes=19, + in_index=3, + concat_input=False, + dropout_ratio=0.1, + norm_cfg=norm_cfg, + align_corners=True, + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=128, + channels=64, + num_convs=1, + num_classes=19, + in_index=2, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='FCNHead', + in_channels=128, + channels=64, + num_convs=1, + num_classes=19, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='STDCHead', + in_channels=256, + channels=64, + num_convs=1, + num_classes=2, + boundary_threshold=0.1, + in_index=0, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=True, + loss_decode=[ + dict( + type='CrossEntropyLoss', + loss_name='loss_ce', + use_sigmoid=True, + loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=1.0) + ]), + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/twins_pcpvt-s_fpn.py b/downstream/mmsegmentation/configs/_base_/models/twins_pcpvt-s_fpn.py new file mode 100644 index 0000000..0f4488a --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/twins_pcpvt-s_fpn.py @@ -0,0 +1,45 @@ +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_small_20220308-e638c41c.pth' # noqa + +# model settings +backbone_norm_cfg = dict(type='LN') +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='PCPVT', + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + in_channels=3, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + patch_sizes=[4, 2, 2, 2], + strides=[4, 2, 2, 2], + mlp_ratios=[8, 8, 4, 4], + out_indices=(0, 1, 2, 3), + qkv_bias=True, + norm_cfg=backbone_norm_cfg, + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + norm_after_stage=False, + drop_rate=0.0, + attn_drop_rate=0., + drop_path_rate=0.2), + neck=dict( + type='FPN', + in_channels=[64, 128, 320, 512], + out_channels=256, + num_outs=4), + decode_head=dict( + type='FPNHead', + in_channels=[256, 256, 256, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/twins_pcpvt-s_upernet.py b/downstream/mmsegmentation/configs/_base_/models/twins_pcpvt-s_upernet.py new file mode 100644 index 0000000..14a74b9 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/twins_pcpvt-s_upernet.py @@ -0,0 +1,53 @@ +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_small_20220308-e638c41c.pth' # noqa + +# model settings +backbone_norm_cfg = dict(type='LN') +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='PCPVT', + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + in_channels=3, + embed_dims=[64, 128, 320, 512], + num_heads=[1, 2, 5, 8], + patch_sizes=[4, 2, 2, 2], + strides=[4, 2, 2, 2], + mlp_ratios=[8, 8, 4, 4], + out_indices=(0, 1, 2, 3), + qkv_bias=True, + norm_cfg=backbone_norm_cfg, + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + norm_after_stage=False, + drop_rate=0.0, + attn_drop_rate=0., + drop_path_rate=0.2), + decode_head=dict( + type='UPerHead', + in_channels=[64, 128, 320, 512], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=320, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/upernet_beit.py b/downstream/mmsegmentation/configs/_base_/models/upernet_beit.py new file mode 100644 index 0000000..9c5bfa3 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/upernet_beit.py @@ -0,0 +1,50 @@ +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='BEiT', + img_size=(640, 640), + patch_size=16, + in_channels=3, + embed_dims=768, + num_layers=12, + num_heads=12, + mlp_ratio=4, + out_indices=(3, 5, 7, 11), + qv_bias=True, + attn_drop_rate=0.0, + drop_path_rate=0.1, + norm_cfg=dict(type='LN', eps=1e-6), + act_cfg=dict(type='GELU'), + norm_eval=False, + init_values=0.1), + neck=dict(type='Feature2Pyramid', embed_dim=768, rescales=[4, 2, 1, 0.5]), + decode_head=dict( + type='UPerHead', + in_channels=[768, 768, 768, 768], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=768, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=768, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/upernet_convnext.py b/downstream/mmsegmentation/configs/_base_/models/upernet_convnext.py new file mode 100644 index 0000000..36b882f --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/upernet_convnext.py @@ -0,0 +1,44 @@ +norm_cfg = dict(type='SyncBN', requires_grad=True) +custom_imports = dict(imports='mmcls.models', allow_failed_imports=False) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-base_3rdparty_32xb128-noema_in1k_20220301-2a0ee547.pth' # noqa +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='mmcls.ConvNeXt', + arch='base', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + decode_head=dict( + type='UPerHead', + in_channels=[128, 256, 512, 1024], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=384, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/upernet_r50.py b/downstream/mmsegmentation/configs/_base_/models/upernet_r50.py new file mode 100644 index 0000000..1097496 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/upernet_r50.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='UPerHead', + in_channels=[256, 512, 1024, 2048], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/upernet_swin.py b/downstream/mmsegmentation/configs/_base_/models/upernet_swin.py new file mode 100644 index 0000000..71b5162 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/upernet_swin.py @@ -0,0 +1,54 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +backbone_norm_cfg = dict(type='LN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='SwinTransformer', + pretrain_img_size=224, + embed_dims=96, + patch_size=4, + window_size=7, + mlp_ratio=4, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + strides=(4, 2, 2, 2), + out_indices=(0, 1, 2, 3), + qkv_bias=True, + qk_scale=None, + patch_norm=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + use_abs_pos_embed=False, + act_cfg=dict(type='GELU'), + norm_cfg=backbone_norm_cfg), + decode_head=dict( + type='UPerHead', + in_channels=[96, 192, 384, 768], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=384, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/_base_/models/upernet_vit-b16_ln_mln.py b/downstream/mmsegmentation/configs/_base_/models/upernet_vit-b16_ln_mln.py new file mode 100644 index 0000000..cd6587d --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/models/upernet_vit-b16_ln_mln.py @@ -0,0 +1,57 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='pretrain/jx_vit_base_p16_224-80ecf9dd.pth', + backbone=dict( + type='VisionTransformer', + img_size=(512, 512), + patch_size=16, + in_channels=3, + embed_dims=768, + num_layers=12, + num_heads=12, + mlp_ratio=4, + out_indices=(2, 5, 8, 11), + qkv_bias=True, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.0, + with_cls_token=True, + norm_cfg=dict(type='LN', eps=1e-6), + act_cfg=dict(type='GELU'), + norm_eval=False, + interpolate_mode='bicubic'), + neck=dict( + type='MultiLevelNeck', + in_channels=[768, 768, 768, 768], + out_channels=768, + scales=[4, 2, 1, 0.5]), + decode_head=dict( + type='UPerHead', + in_channels=[768, 768, 768, 768], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=768, + in_index=3, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) # yapf: disable diff --git a/downstream/mmsegmentation/configs/_base_/schedules/schedule_160k.py b/downstream/mmsegmentation/configs/_base_/schedules/schedule_160k.py new file mode 100644 index 0000000..39630f2 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/schedules/schedule_160k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=160000) +checkpoint_config = dict(by_epoch=False, interval=16000) +evaluation = dict(interval=16000, metric='mIoU', pre_eval=True) diff --git a/downstream/mmsegmentation/configs/_base_/schedules/schedule_20k.py b/downstream/mmsegmentation/configs/_base_/schedules/schedule_20k.py new file mode 100644 index 0000000..73c7021 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/schedules/schedule_20k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=20000) +checkpoint_config = dict(by_epoch=False, interval=2000) +evaluation = dict(interval=2000, metric='mIoU', pre_eval=True) diff --git a/downstream/mmsegmentation/configs/_base_/schedules/schedule_320k.py b/downstream/mmsegmentation/configs/_base_/schedules/schedule_320k.py new file mode 100644 index 0000000..a0b2306 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/schedules/schedule_320k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=320000) +checkpoint_config = dict(by_epoch=False, interval=32000) +evaluation = dict(interval=32000, metric='mIoU') diff --git a/downstream/mmsegmentation/configs/_base_/schedules/schedule_40k.py b/downstream/mmsegmentation/configs/_base_/schedules/schedule_40k.py new file mode 100644 index 0000000..d2c5023 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/schedules/schedule_40k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=40000) +checkpoint_config = dict(by_epoch=False, interval=4000) +evaluation = dict(interval=4000, metric='mIoU', pre_eval=True) diff --git a/downstream/mmsegmentation/configs/_base_/schedules/schedule_80k.py b/downstream/mmsegmentation/configs/_base_/schedules/schedule_80k.py new file mode 100644 index 0000000..8365a87 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/schedules/schedule_80k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=8000) +evaluation = dict(interval=8000, metric='mIoU', pre_eval=True) diff --git a/downstream/mmsegmentation/configs/_base_/segformer/ade20k_repeat.py b/downstream/mmsegmentation/configs/_base_/segformer/ade20k_repeat.py new file mode 100644 index 0000000..ff767eb --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/segformer/ade20k_repeat.py @@ -0,0 +1,56 @@ +# dataset settings +dataset_type = 'ADE20KDataset' +data_root = 'data/ade/ADEChallengeData2016' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + flip=False, + transforms=[ + dict(type='AlignedResize', keep_ratio=True, size_divisor=32), # Ensure the long and short sides are divisible by 32 + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=50, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) \ No newline at end of file diff --git a/downstream/mmsegmentation/configs/_base_/segformer/default_runtime.py b/downstream/mmsegmentation/configs/_base_/segformer/default_runtime.py new file mode 100644 index 0000000..5f8d0f5 --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/segformer/default_runtime.py @@ -0,0 +1,14 @@ +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True \ No newline at end of file diff --git a/downstream/mmsegmentation/configs/_base_/segformer/schedule_160k_adamw.py b/downstream/mmsegmentation/configs/_base_/segformer/schedule_160k_adamw.py new file mode 100644 index 0000000..729269a --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/segformer/schedule_160k_adamw.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='AdamW', lr=0.0002, weight_decay=0.0001) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=0.0, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=160000) +checkpoint_config = dict(by_epoch=False, interval=4000) +evaluation = dict(interval=4000, metric='mIoU') \ No newline at end of file diff --git a/downstream/mmsegmentation/configs/_base_/segformer/segformer.py b/downstream/mmsegmentation/configs/_base_/segformer/segformer.py new file mode 100644 index 0000000..cb7978c --- /dev/null +++ b/downstream/mmsegmentation/configs/_base_/segformer/segformer.py @@ -0,0 +1,23 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='IMTRv21_5', + style='pytorch'), + decode_head=dict( + type='SegFormerHead', + in_channels=[64, 128, 320, 512], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + decoder_params=dict(), + loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) \ No newline at end of file diff --git a/downstream/mmsegmentation/configs/ann/README.md b/downstream/mmsegmentation/configs/ann/README.md new file mode 100644 index 0000000..30a59c3 --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/README.md @@ -0,0 +1,69 @@ +# ANN + +[Asymmetric Non-local Neural Networks for Semantic Segmentation](https://arxiv.org/abs/1908.07678) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The non-local module works as a particularly useful technique for semantic segmentation while criticized for its prohibitive computation and GPU memory occupation. In this paper, we present Asymmetric Non-local Neural Network to semantic segmentation, which has two prominent components: Asymmetric Pyramid Non-local Block (APNB) and Asymmetric Fusion Non-local Block (AFNB). APNB leverages a pyramid sampling module into the non-local block to largely reduce the computation and memory consumption without sacrificing the performance. AFNB is adapted from APNB to fuse the features of different levels under a sufficient consideration of long range dependencies and thus considerably improves the performance. Extensive experiments on semantic segmentation benchmarks demonstrate the effectiveness and efficiency of our work. In particular, we report the state-of-the-art performance of 81.3 mIoU on the Cityscapes test set. For a 256x128 input, APNB is around 6 times faster than a non-local block on GPU while 28 times smaller in GPU running memory occupation. Code is available at: [this https URL](https://github.com/MendelXu/ANN). + + +
    + +
    + + +## Citation + +```bibtex +@inproceedings{zhu2019asymmetric, + title={Asymmetric non-local neural networks for semantic segmentation}, + author={Zhu, Zhen and Xu, Mengde and Bai, Song and Huang, Tengteng and Bai, Xiang}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={593--602}, + year={2019} +} +``` + + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| ANN | R-50-D8 | 512x1024 | 40000 | 6 | 3.71 | 77.40 | 78.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211-049fc292.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211.log.json) | +| ANN | R-101-D8 | 512x1024 | 40000 | 9.5 | 2.55 | 76.55 | 78.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243-adf6eece.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243.log.json) | +| ANN | R-50-D8 | 769x769 | 40000 | 6.8 | 1.70 | 78.89 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712-2b46b04d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712.log.json) | +| ANN | R-101-D8 | 769x769 | 40000 | 10.7 | 1.15 | 79.32 | 80.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720-059bff28.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720.log.json) | +| ANN | R-50-D8 | 512x1024 | 80000 | - | - | 77.34 | 78.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911-5a9ad545.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911.log.json) | +| ANN | R-101-D8 | 512x1024 | 80000 | - | - | 77.14 | 78.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728-aceccc6e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728.log.json) | +| ANN | R-50-D8 | 769x769 | 80000 | - | - | 78.88 | 80.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426-cc7ff323.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426.log.json) | +| ANN | R-101-D8 | 769x769 | 80000 | - | - | 78.80 | 80.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713-a9d4be8d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ANN | R-50-D8 | 512x512 | 80000 | 9.1 | 21.01 | 41.01 | 42.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818-26f75e11.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818.log.json) | +| ANN | R-101-D8 | 512x512 | 80000 | 12.5 | 14.12 | 42.94 | 44.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818-c0153543.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818.log.json) | +| ANN | R-50-D8 | 512x512 | 160000 | - | - | 41.74 | 42.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733-892247bc.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733.log.json) | +| ANN | R-101-D8 | 512x512 | 160000 | - | - | 42.94 | 44.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733-955eb1ec.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| ANN | R-50-D8 | 512x512 | 20000 | 6 | 20.92 | 74.86 | 76.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246-dfcb1c62.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246.log.json) | +| ANN | R-101-D8 | 512x512 | 20000 | 9.5 | 13.94 | 77.47 | 78.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246-2fad0042.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246.log.json) | +| ANN | R-50-D8 | 512x512 | 40000 | - | - | 76.56 | 77.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314-b5dac322.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314.log.json) | +| ANN | R-101-D8 | 512x512 | 40000 | - | - | 76.70 | 78.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314-bd205bbe.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314.log.json) | diff --git a/downstream/mmsegmentation/configs/ann/ann.yml b/downstream/mmsegmentation/configs/ann/ann.yml new file mode 100644 index 0000000..ff6bea6 --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann.yml @@ -0,0 +1,305 @@ +Collections: +- Name: ANN + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1908.07678 + Title: Asymmetric Non-local Neural Networks for Semantic Segmentation + README: configs/ann/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/ann_head.py#L185 + Version: v0.17.0 + Converted From: + Code: https://github.com/MendelXu/ANN +Models: +- Name: ann_r50-d8_512x1024_40k_cityscapes + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 269.54 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.4 + mIoU(ms+flip): 78.57 + Config: configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211-049fc292.pth +- Name: ann_r101-d8_512x1024_40k_cityscapes + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 392.16 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.55 + mIoU(ms+flip): 78.85 + Config: configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243-adf6eece.pth +- Name: ann_r50-d8_769x769_40k_cityscapes + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 588.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.89 + mIoU(ms+flip): 80.46 + Config: configs/ann/ann_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712-2b46b04d.pth +- Name: ann_r101-d8_769x769_40k_cityscapes + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 869.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.32 + mIoU(ms+flip): 80.94 + Config: configs/ann/ann_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720-059bff28.pth +- Name: ann_r50-d8_512x1024_80k_cityscapes + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.34 + mIoU(ms+flip): 78.65 + Config: configs/ann/ann_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911-5a9ad545.pth +- Name: ann_r101-d8_512x1024_80k_cityscapes + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.14 + mIoU(ms+flip): 78.81 + Config: configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728-aceccc6e.pth +- Name: ann_r50-d8_769x769_80k_cityscapes + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.88 + mIoU(ms+flip): 80.57 + Config: configs/ann/ann_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426-cc7ff323.pth +- Name: ann_r101-d8_769x769_80k_cityscapes + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.8 + mIoU(ms+flip): 80.34 + Config: configs/ann/ann_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713-a9d4be8d.pth +- Name: ann_r50-d8_512x512_80k_ade20k + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.6 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.01 + mIoU(ms+flip): 42.3 + Config: configs/ann/ann_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818-26f75e11.pth +- Name: ann_r101-d8_512x512_80k_ade20k + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 70.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.94 + mIoU(ms+flip): 44.18 + Config: configs/ann/ann_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818-c0153543.pth +- Name: ann_r50-d8_512x512_160k_ade20k + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.74 + mIoU(ms+flip): 42.62 + Config: configs/ann/ann_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733-892247bc.pth +- Name: ann_r101-d8_512x512_160k_ade20k + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.94 + mIoU(ms+flip): 44.06 + Config: configs/ann/ann_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733-955eb1ec.pth +- Name: ann_r50-d8_512x512_20k_voc12aug + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 47.8 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.0 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 74.86 + mIoU(ms+flip): 76.13 + Config: configs/ann/ann_r50-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246-dfcb1c62.pth +- Name: ann_r101-d8_512x512_20k_voc12aug + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 71.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.5 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.47 + mIoU(ms+flip): 78.7 + Config: configs/ann/ann_r101-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246-2fad0042.pth +- Name: ann_r50-d8_512x512_40k_voc12aug + In Collection: ANN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.56 + mIoU(ms+flip): 77.51 + Config: configs/ann/ann_r50-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314-b5dac322.pth +- Name: ann_r101-d8_512x512_40k_voc12aug + In Collection: ANN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.7 + mIoU(ms+flip): 78.06 + Config: configs/ann/ann_r101-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314-bd205bbe.pth diff --git a/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..d494e07 --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..1eeff0b --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..9e43af5 --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..d854f2e --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x512_20k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_512x512_20k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..893c53b --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x512_40k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_512x512_40k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..a64dac6 --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ann/ann_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/ann/ann_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..5950824 --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ann/ann_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/ann/ann_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..a9c712d --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './ann_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..00b2594 --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..ef7b369 --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..ca6bb24 --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..071f190 --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x512_20k_voc12aug.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..82a1c93 --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..5e04aa7 --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..4912bdb --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/ann/ann_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/ann/ann_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..d1cc072 --- /dev/null +++ b/downstream/mmsegmentation/configs/ann/ann_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/ann_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/apcnet/README.md b/downstream/mmsegmentation/configs/apcnet/README.md new file mode 100644 index 0000000..5e1fd6b --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/README.md @@ -0,0 +1,58 @@ +# APCNet + +[Adaptive Pyramid Context Network for Semantic Segmentation](https://openaccess.thecvf.com/content_CVPR_2019/html/He_Adaptive_Pyramid_Context_Network_for_Semantic_Segmentation_CVPR_2019_paper.html) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Recent studies witnessed that context features can significantly improve the performance of deep semantic segmentation networks. Current context based segmentation methods differ with each other in how to construct context features and perform differently in practice. This paper firstly introduces three desirable properties of context features in segmentation task. Specially, we find that Global-guided Local Affinity (GLA) can play a vital role in constructing effective context features, while this property has been largely ignored in previous works. Based on this analysis, this paper proposes Adaptive Pyramid Context Network (APCNet)for semantic segmentation. APCNet adaptively constructs multi-scale contextual representations with multiple welldesigned Adaptive Context Modules (ACMs). Specifically, each ACM leverages a global image representation as a guidance to estimate the local affinity coefficients for each sub-region, and then calculates a context vector with these affinities. We empirically evaluate our APCNet on three semantic segmentation and scene parsing datasets, including PASCAL VOC 2012, Pascal-Context, and ADE20K dataset. Experimental results show that APCNet achieves state-ofthe-art performance on all three benchmarks, and obtains a new record 84.2% on PASCAL VOC 2012 test set without MS COCO pre-trained and any post-processing. + + +
    + +
    + +## Citation + +```bibtex +@InProceedings{He_2019_CVPR, +author = {He, Junjun and Deng, Zhongying and Zhou, Lei and Wang, Yali and Qiao, Yu}, +title = {Adaptive Pyramid Context Network for Semantic Segmentation}, +booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, +month = {June}, +year = {2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| APCNet | R-50-D8 | 512x1024 | 40000 | 7.7 | 3.57 | 78.02 | 79.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes/apcnet_r50-d8_512x1024_40k_cityscapes_20201214_115717-5e88fa33.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes/apcnet_r50-d8_512x1024_40k_cityscapes-20201214_115717.log.json) | +| APCNet | R-101-D8 | 512x1024 | 40000 | 11.2 | 2.15 | 79.08 | 80.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes/apcnet_r101-d8_512x1024_40k_cityscapes_20201214_115716-abc9d111.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes/apcnet_r101-d8_512x1024_40k_cityscapes-20201214_115716.log.json) | +| APCNet | R-50-D8 | 769x769 | 40000 | 8.7 | 1.52 | 77.89 | 79.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_40k_cityscapes/apcnet_r50-d8_769x769_40k_cityscapes_20201214_115717-2a2628d7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_40k_cityscapes/apcnet_r50-d8_769x769_40k_cityscapes-20201214_115717.log.json) | +| APCNet | R-101-D8 | 769x769 | 40000 | 12.7 | 1.03 | 77.96 | 79.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_40k_cityscapes/apcnet_r101-d8_769x769_40k_cityscapes_20201214_115718-b650de90.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_40k_cityscapes/apcnet_r101-d8_769x769_40k_cityscapes-20201214_115718.log.json) | +| APCNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.96 | 79.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes/apcnet_r50-d8_512x1024_80k_cityscapes_20201214_115716-987f51e3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes/apcnet_r50-d8_512x1024_80k_cityscapes-20201214_115716.log.json) | +| APCNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.64 | 80.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes/apcnet_r101-d8_512x1024_80k_cityscapes_20201214_115705-b1ff208a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes/apcnet_r101-d8_512x1024_80k_cityscapes-20201214_115705.log.json) | +| APCNet | R-50-D8 | 769x769 | 80000 | - | - | 78.79 | 80.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_80k_cityscapes/apcnet_r50-d8_769x769_80k_cityscapes_20201214_115718-7ea9fa12.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_80k_cityscapes/apcnet_r50-d8_769x769_80k_cityscapes-20201214_115718.log.json) | +| APCNet | R-101-D8 | 769x769 | 80000 | - | - | 78.45 | 79.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_80k_cityscapes/apcnet_r101-d8_769x769_80k_cityscapes_20201214_115716-a7fbc2ab.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_80k_cityscapes/apcnet_r101-d8_769x769_80k_cityscapes-20201214_115716.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| APCNet | R-50-D8 | 512x512 | 80000 | 10.1 | 19.61 | 42.20 | 43.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_80k_ade20k/apcnet_r50-d8_512x512_80k_ade20k_20201214_115705-a8626293.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_80k_ade20k/apcnet_r50-d8_512x512_80k_ade20k-20201214_115705.log.json) | +| APCNet | R-101-D8 | 512x512 | 80000 | 13.6 | 13.10 | 45.54 | 46.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_80k_ade20k/apcnet_r101-d8_512x512_80k_ade20k_20201214_115704-c656c3fb.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_80k_ade20k/apcnet_r101-d8_512x512_80k_ade20k-20201214_115704.log.json) | +| APCNet | R-50-D8 | 512x512 | 160000 | - | - | 43.40 | 43.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_160k_ade20k/apcnet_r50-d8_512x512_160k_ade20k_20201214_115706-25fb92c2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_160k_ade20k/apcnet_r50-d8_512x512_160k_ade20k-20201214_115706.log.json) | +| APCNet | R-101-D8 | 512x512 | 160000 | - | - | 45.41 | 46.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_160k_ade20k/apcnet_r101-d8_512x512_160k_ade20k_20201214_115705-73f9a8d7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_160k_ade20k/apcnet_r101-d8_512x512_160k_ade20k-20201214_115705.log.json) | diff --git a/downstream/mmsegmentation/configs/apcnet/apcnet.yml b/downstream/mmsegmentation/configs/apcnet/apcnet.yml new file mode 100644 index 0000000..7a453a3 --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/apcnet.yml @@ -0,0 +1,232 @@ +Collections: +- Name: APCNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://openaccess.thecvf.com/content_CVPR_2019/html/He_Adaptive_Pyramid_Context_Network_for_Semantic_Segmentation_CVPR_2019_paper.html + Title: Adaptive Pyramid Context Network for Semantic Segmentation + README: configs/apcnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/apc_head.py#L111 + Version: v0.17.0 + Converted From: + Code: https://github.com/Junjun2016/APCNet +Models: +- Name: apcnet_r50-d8_512x1024_40k_cityscapes + In Collection: APCNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 280.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.02 + mIoU(ms+flip): 79.26 + Config: configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes/apcnet_r50-d8_512x1024_40k_cityscapes_20201214_115717-5e88fa33.pth +- Name: apcnet_r101-d8_512x1024_40k_cityscapes + In Collection: APCNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 465.12 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 11.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.08 + mIoU(ms+flip): 80.34 + Config: configs/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes/apcnet_r101-d8_512x1024_40k_cityscapes_20201214_115716-abc9d111.pth +- Name: apcnet_r50-d8_769x769_40k_cityscapes + In Collection: APCNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 657.89 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.89 + mIoU(ms+flip): 79.75 + Config: configs/apcnet/apcnet_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_40k_cityscapes/apcnet_r50-d8_769x769_40k_cityscapes_20201214_115717-2a2628d7.pth +- Name: apcnet_r101-d8_769x769_40k_cityscapes + In Collection: APCNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 970.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.96 + mIoU(ms+flip): 79.24 + Config: configs/apcnet/apcnet_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_40k_cityscapes/apcnet_r101-d8_769x769_40k_cityscapes_20201214_115718-b650de90.pth +- Name: apcnet_r50-d8_512x1024_80k_cityscapes + In Collection: APCNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.96 + mIoU(ms+flip): 79.94 + Config: configs/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes/apcnet_r50-d8_512x1024_80k_cityscapes_20201214_115716-987f51e3.pth +- Name: apcnet_r101-d8_512x1024_80k_cityscapes + In Collection: APCNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.64 + mIoU(ms+flip): 80.61 + Config: configs/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes/apcnet_r101-d8_512x1024_80k_cityscapes_20201214_115705-b1ff208a.pth +- Name: apcnet_r50-d8_769x769_80k_cityscapes + In Collection: APCNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.79 + mIoU(ms+flip): 80.35 + Config: configs/apcnet/apcnet_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_80k_cityscapes/apcnet_r50-d8_769x769_80k_cityscapes_20201214_115718-7ea9fa12.pth +- Name: apcnet_r101-d8_769x769_80k_cityscapes + In Collection: APCNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.45 + mIoU(ms+flip): 79.91 + Config: configs/apcnet/apcnet_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_80k_cityscapes/apcnet_r101-d8_769x769_80k_cityscapes_20201214_115716-a7fbc2ab.pth +- Name: apcnet_r50-d8_512x512_80k_ade20k + In Collection: APCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 50.99 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.2 + mIoU(ms+flip): 43.3 + Config: configs/apcnet/apcnet_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_80k_ade20k/apcnet_r50-d8_512x512_80k_ade20k_20201214_115705-a8626293.pth +- Name: apcnet_r101-d8_512x512_80k_ade20k + In Collection: APCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 76.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.54 + mIoU(ms+flip): 46.65 + Config: configs/apcnet/apcnet_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_80k_ade20k/apcnet_r101-d8_512x512_80k_ade20k_20201214_115704-c656c3fb.pth +- Name: apcnet_r50-d8_512x512_160k_ade20k + In Collection: APCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.4 + mIoU(ms+flip): 43.94 + Config: configs/apcnet/apcnet_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_160k_ade20k/apcnet_r50-d8_512x512_160k_ade20k_20201214_115706-25fb92c2.pth +- Name: apcnet_r101-d8_512x512_160k_ade20k + In Collection: APCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.41 + mIoU(ms+flip): 46.63 + Config: configs/apcnet/apcnet_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_160k_ade20k/apcnet_r101-d8_512x512_160k_ade20k_20201214_115705-73f9a8d7.pth diff --git a/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..1e1cec6 --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './apcnet_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..04cb006 --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './apcnet_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..1ce2279 --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './apcnet_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..8f10b98 --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './apcnet_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..5c44ebc --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './apcnet_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..6169845 --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/apcnet_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './apcnet_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..99c61a9 --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..62a0627 --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..f7821c5 --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..daafa5f --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..3db6140 --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/apcnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..9cac425 --- /dev/null +++ b/downstream/mmsegmentation/configs/apcnet/apcnet_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/apcnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/beit/README.md b/downstream/mmsegmentation/configs/beit/README.md new file mode 100644 index 0000000..31bf285 --- /dev/null +++ b/downstream/mmsegmentation/configs/beit/README.md @@ -0,0 +1,84 @@ +# BEiT + +[BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +We introduce a self-supervised vision representation model BEiT, which stands for Bidirectional Encoder representation from Image Transformers. Following BERT developed in the natural language processing area, we propose a masked image modeling task to pretrain vision Transformers. Specifically, each image has two views in our pre-training, i.e, image patches (such as 16x16 pixels), and visual tokens (i.e., discrete tokens). We first "tokenize" the original image into visual tokens. Then we randomly mask some image patches and fed them into the backbone Transformer. The pre-training objective is to recover the original visual tokens based on the corrupted image patches. After pre-training BEiT, we directly fine-tune the model parameters on downstream tasks by appending task layers upon the pretrained encoder. Experimental results on image classification and semantic segmentation show that our model achieves competitive results with previous pre-training methods. For example, base-size BEiT achieves 83.2% top-1 accuracy on ImageNet-1K, significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains 86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%). The code and pretrained models are available at [this https URL](https://github.com/microsoft/unilm/tree/master/beit). + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{beit, + title={{BEiT}: {BERT} Pre-Training of Image Transformers}, + author={Hangbo Bao and Li Dong and Songhao Piao and Furu Wei}, + booktitle={International Conference on Learning Representations}, + year={2022}, + url={https://openreview.net/forum?id=p-BhZSz59o4} +} +``` + +## Usage + +To use other repositories' pre-trained models, it is necessary to convert keys. + +We provide a script [`beit2mmseg.py`](../../tools/model_converters/beit2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/microsoft/unilm/tree/master/beit/semantic_segmentation) to MMSegmentation style. + +```shell +python tools/model_converters/beit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/beit2mmseg.py https://unilm.blob.core.windows.net/beit/beit_base_patch16_224_pt22k_ft22k.pth pretrain/beit_base_patch16_224_pt22k_ft22k.pth +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +In our default setting, pretrained models could be defined below: + + | pretrained models | original models | + | ------ | -------- | + |BEiT_base.pth | ['BEiT_base'](https://unilm.blob.core.windows.net/beit/beit_base_patch16_224_pt22k_ft22k.pth) | + |BEiT_large.pth | ['BEiT_large'](https://unilm.blob.core.windows.net/beit/beit_large_patch16_224_pt22k_ft22k.pth) | + +Verify the single-scale results of the model: + +```shell +sh tools/dist_test.sh \ +configs/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.py \ +upernet_beit-large_fp16_8x1_640x640_160k_ade20k-8fc0dd5d.pth $GPUS --eval mIoU +``` + +Since relative position embedding requires the input length and width to be equal, the sliding window is adopted for multi-scale inference. So we set min_size=640, that is, the shortest edge is 640. So the multi-scale inference of config is performed separately, instead of '--aug-test'. For multi-scale inference: + +```shell +sh tools/dist_test.sh \ +configs/beit/upernet_beit-large_fp16_640x640_160k_ade20k_ms.py \ +upernet_beit-large_fp16_8x1_640x640_160k_ade20k-8fc0dd5d.pth $GPUS --eval mIoU +``` + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | pretrain | pretrain img size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ---------- | ------- | -------- | --- | --- | -------------- | ----- | ------------: | -------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| UperNet | BEiT-B | 640x640 | ImageNet-22K | 224x224 | 16 | 160000 | 15.88 | 2.00 | 53.08 | 53.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/beit/upernet_beit-base_8x2_640x640_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-base_8x2_640x640_160k_ade20k/upernet_beit-base_8x2_640x640_160k_ade20k-eead221d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-base_8x2_640x640_160k_ade20k/upernet_beit-base_8x2_640x640_160k_ade20k.log.json) | +| UperNet | BEiT-L | 640x640 | ImageNet-22K | 224x224 | 8 | 320000 | 22.64 | 0.96 | 56.33 | 56.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k/upernet_beit-large_fp16_8x1_640x640_160k_ade20k-8fc0dd5d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.log.json) | diff --git a/downstream/mmsegmentation/configs/beit/beit.yml b/downstream/mmsegmentation/configs/beit/beit.yml new file mode 100644 index 0000000..6f3cee3 --- /dev/null +++ b/downstream/mmsegmentation/configs/beit/beit.yml @@ -0,0 +1,45 @@ +Models: +- Name: upernet_beit-base_8x2_640x640_160k_ade20k + In Collection: UperNet + Metadata: + backbone: BEiT-B + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 500.0 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (640,640) + Training Memory (GB): 15.88 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 53.08 + mIoU(ms+flip): 53.84 + Config: configs/beit/upernet_beit-base_8x2_640x640_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-base_8x2_640x640_160k_ade20k/upernet_beit-base_8x2_640x640_160k_ade20k-eead221d.pth +- Name: upernet_beit-large_fp16_8x1_640x640_160k_ade20k + In Collection: UperNet + Metadata: + backbone: BEiT-L + crop size: (640,640) + lr schd: 320000 + inference time (ms/im): + - value: 1041.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (640,640) + Training Memory (GB): 22.64 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 56.33 + mIoU(ms+flip): 56.84 + Config: configs/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k/upernet_beit-large_fp16_8x1_640x640_160k_ade20k-8fc0dd5d.pth diff --git a/downstream/mmsegmentation/configs/beit/upernet_beit-base_640x640_160k_ade20k_ms.py b/downstream/mmsegmentation/configs/beit/upernet_beit-base_640x640_160k_ade20k_ms.py new file mode 100644 index 0000000..f764c92 --- /dev/null +++ b/downstream/mmsegmentation/configs/beit/upernet_beit-base_640x640_160k_ade20k_ms.py @@ -0,0 +1,24 @@ +_base_ = './upernet_beit-base_8x2_640x640_160k_ade20k.py' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2560, 640), + img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=True, + transforms=[ + dict(type='Resize', keep_ratio=True, min_size=640), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), + samples_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/beit/upernet_beit-base_8x2_640x640_160k_ade20k.py b/downstream/mmsegmentation/configs/beit/upernet_beit-base_8x2_640x640_160k_ade20k.py new file mode 100644 index 0000000..b36adc3 --- /dev/null +++ b/downstream/mmsegmentation/configs/beit/upernet_beit-base_8x2_640x640_160k_ade20k.py @@ -0,0 +1,30 @@ +_base_ = [ + '../_base_/models/upernet_beit.py', '../_base_/datasets/ade20k_640x640.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] + +model = dict( + pretrained='pretrain/beit_base_patch16_224_pt22k_ft22k.pth', + test_cfg=dict(mode='slide', crop_size=(640, 640), stride=(426, 426))) + +optimizer = dict( + _delete_=True, + type='AdamW', + lr=3e-5, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='LayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.9)) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/beit/upernet_beit-large_fp16_640x640_160k_ade20k_ms.py b/downstream/mmsegmentation/configs/beit/upernet_beit-large_fp16_640x640_160k_ade20k_ms.py new file mode 100644 index 0000000..fd4d947 --- /dev/null +++ b/downstream/mmsegmentation/configs/beit/upernet_beit-large_fp16_640x640_160k_ade20k_ms.py @@ -0,0 +1,22 @@ +_base_ = './upernet_beit-large_fp16_8x1_640x640_160k_ade20k.py' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2560, 640), + img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=True, + transforms=[ + dict(type='Resize', keep_ratio=True, min_size=640), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.py b/downstream/mmsegmentation/configs/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.py new file mode 100644 index 0000000..e6247b7 --- /dev/null +++ b/downstream/mmsegmentation/configs/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.py @@ -0,0 +1,47 @@ +_base_ = [ + '../_base_/models/upernet_beit.py', '../_base_/datasets/ade20k_640x640.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_320k.py' +] + +model = dict( + pretrained='pretrain/beit_large_patch16_224_pt22k_ft22k.pth', + backbone=dict( + type='BEiT', + embed_dims=1024, + num_layers=24, + num_heads=16, + mlp_ratio=4, + qv_bias=True, + init_values=1e-6, + drop_path_rate=0.2, + out_indices=[7, 11, 15, 23]), + neck=dict(embed_dim=1024, rescales=[4, 2, 1, 0.5]), + decode_head=dict( + in_channels=[1024, 1024, 1024, 1024], num_classes=150, channels=1024), + auxiliary_head=dict(in_channels=1024, num_classes=150), + test_cfg=dict(mode='slide', crop_size=(640, 640), stride=(426, 426))) + +optimizer = dict( + _delete_=True, + type='AdamW', + lr=2e-5, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='LayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.95)) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=3000, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +data = dict(samples_per_gpu=1) +optimizer_config = dict( + type='GradientCumulativeFp16OptimizerHook', cumulative_iters=2) + +fp16 = dict() diff --git a/downstream/mmsegmentation/configs/bisenetv1/README.md b/downstream/mmsegmentation/configs/bisenetv1/README.md new file mode 100644 index 0000000..75ac37c --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv1/README.md @@ -0,0 +1,63 @@ +# BiSeNetV1 + +[BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation](https://arxiv.org/abs/1808.00897) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Semantic segmentation requires both rich spatial information and sizeable receptive field. However, modern approaches usually compromise spatial resolution to achieve real-time inference speed, which leads to poor performance. In this paper, we address this dilemma with a novel Bilateral Segmentation Network (BiSeNet). We first design a Spatial Path with a small stride to preserve the spatial information and generate high-resolution features. Meanwhile, a Context Path with a fast downsampling strategy is employed to obtain sufficient receptive field. On top of the two paths, we introduce a new Feature Fusion Module to combine features efficiently. The proposed architecture makes a right balance between the speed and segmentation performance on Cityscapes, CamVid, and COCO-Stuff datasets. Specifically, for a 2048x1024 input, we achieve 68.4% Mean IOU on the Cityscapes test dataset with speed of 105 FPS on one NVIDIA Titan XP card, which is significantly faster than the existing methods with comparable performance. + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{yu2018bisenet, + title={Bisenet: Bilateral segmentation network for real-time semantic segmentation}, + author={Yu, Changqian and Wang, Jingbo and Peng, Chao and Gao, Changxin and Yu, Gang and Sang, Nong}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={325--341}, + year={2018} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | --------- | --------- | ------: | -------- | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| BiSeNetV1 (No Pretrain) | R-18-D32 | 1024x1024 | 160000 | 5.69 | 31.77 | 74.44 | 77.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes_20210922_172239-c55e78e2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes_20210922_172239.log.json) | +| BiSeNetV1| R-18-D32 | 1024x1024 | 160000 | 5.69 | 31.77 | 74.37 | 76.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210905_220251-8ba80eff.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210905_220251.log.json) | +| BiSeNetV1 (4x8) | R-18-D32 | 1024x1024 | 160000 | 11.17 | 31.77 | 75.16 | 77.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes_20210905_220322-bb8db75f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes_20210905_220322.log.json) | +| BiSeNetV1 (No Pretrain) | R-50-D32 | 1024x1024 | 160000 | 15.39 | 7.71 | 76.92 | 78.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes_20210923_222639-7b28a2a6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes_20210923_222639.log.json) | +| BiSeNetV1 | R-50-D32 | 1024x1024 | 160000 | 15.39 | 7.71 | 77.68 | 79.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210917_234628-8b304447.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210917_234628.log.json) | + +### COCO-Stuff 164k + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | --------- | --------- | ------: | -------- | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| BiSeNetV1 (No Pretrain) | R-18-D32 | 512x512 | 160000 | - | - | 25.45 | 26.15 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211022_054328-046aa2f2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211022_054328.log.json) | +| BiSeNetV1| R-18-D32 | 512x512 | 160000 | 6.33 | 74.24 | 28.55 | 29.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211023_013100-f700dbf7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211023_013100.log.json) | +| BiSeNetV1 (No Pretrain) | R-50-D32 | 512x512 | 160000 | - | - | 29.82 | 30.33 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_040616-d2bb0df4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_040616.log.json) | +| BiSeNetV1 | R-50-D32 | 512x512 | 160000 | 9.28 | 32.60 | 34.88 | 35.37 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_181932-66747911.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_181932.log.json) | +| BiSeNetV1 (No Pretrain) | R-101-D32 | 512x512 | 160000 | - | - | 31.14 | 31.76 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211102_164147-c6b32c3b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211102_164147.log.json) | +| BiSeNetV1 | R-101-D32 | 512x512 | 160000 | 10.36 | 25.25 | 37.38 | 37.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_225220-28c8f092.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_225220.log.json) | + +Note: + +- `4x8`: Using 4 GPUs with 8 samples per GPU in training. +- For BiSeNetV1 on Cityscapes dataset, default setting is 4 GPUs with 4 samples per GPU in training. +- `No Pretrain` means the model is trained from scratch. diff --git a/downstream/mmsegmentation/configs/bisenetv1/bisenetv1.yml b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1.yml new file mode 100644 index 0000000..61f264b --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1.yml @@ -0,0 +1,234 @@ +Collections: +- Name: BiSeNetV1 + Metadata: + Training Data: + - Cityscapes + - COCO-Stuff 164k + Paper: + URL: https://arxiv.org/abs/1808.00897 + Title: 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation' + README: configs/bisenetv1/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/backbones/bisenetv1.py#L266 + Version: v0.18.0 + Converted From: + Code: https://github.com/ycszen/TorchSeg/tree/master/model/bisenet +Models: +- Name: bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes + In Collection: BiSeNetV1 + Metadata: + backbone: R-18-D32 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 31.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 5.69 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.44 + mIoU(ms+flip): 77.05 + Config: configs/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes_20210922_172239-c55e78e2.pth +- Name: bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes + In Collection: BiSeNetV1 + Metadata: + backbone: R-18-D32 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 31.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 5.69 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.37 + mIoU(ms+flip): 76.91 + Config: configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210905_220251-8ba80eff.pth +- Name: bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes + In Collection: BiSeNetV1 + Metadata: + backbone: R-18-D32 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 31.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 11.17 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.16 + mIoU(ms+flip): 77.24 + Config: configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes_20210905_220322-bb8db75f.pth +- Name: bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes + In Collection: BiSeNetV1 + Metadata: + backbone: R-50-D32 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 129.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 15.39 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.92 + mIoU(ms+flip): 78.87 + Config: configs/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes_20210923_222639-7b28a2a6.pth +- Name: bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes + In Collection: BiSeNetV1 + Metadata: + backbone: R-50-D32 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 129.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 15.39 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.68 + mIoU(ms+flip): 79.57 + Config: configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210917_234628-8b304447.pth +- Name: bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k + In Collection: BiSeNetV1 + Metadata: + backbone: R-18-D32 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 25.45 + mIoU(ms+flip): 26.15 + Config: configs/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211022_054328-046aa2f2.pth +- Name: bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k + In Collection: BiSeNetV1 + Metadata: + backbone: R-18-D32 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 13.47 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.33 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 28.55 + mIoU(ms+flip): 29.26 + Config: configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211023_013100-f700dbf7.pth +- Name: bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k + In Collection: BiSeNetV1 + Metadata: + backbone: R-50-D32 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 29.82 + mIoU(ms+flip): 30.33 + Config: configs/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_040616-d2bb0df4.pth +- Name: bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k + In Collection: BiSeNetV1 + Metadata: + backbone: R-50-D32 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 30.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.28 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 34.88 + mIoU(ms+flip): 35.37 + Config: configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_181932-66747911.pth +- Name: bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k + In Collection: BiSeNetV1 + Metadata: + backbone: R-101-D32 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 31.14 + mIoU(ms+flip): 31.76 + Config: configs/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211102_164147-c6b32c3b.pth +- Name: bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k + In Collection: BiSeNetV1 + Metadata: + backbone: R-101-D32 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 39.6 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.36 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 37.38 + mIoU(ms+flip): 37.99 + Config: configs/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_225220-28c8f092.pth diff --git a/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py new file mode 100644 index 0000000..c3fe215 --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py @@ -0,0 +1,6 @@ +_base_ = './bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet101_v1c')))) diff --git a/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py new file mode 100644 index 0000000..b1e1c3e --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py @@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/bisenetv1_r18-d32.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +model = dict( + backbone=dict( + context_channels=(512, 1024, 2048), + spatial_channels=(256, 256, 256, 512), + out_channels=1024, + backbone_cfg=dict(type='ResNet', depth=101)), + decode_head=dict(in_channels=1024, channels=1024, num_classes=171), + auxiliary_head=[ + dict(in_channels=512, channels=256, num_classes=171), + dict(in_channels=512, channels=256, num_classes=171), + ]) +lr_config = dict(warmup='linear', warmup_iters=1000) +optimizer = dict(lr=0.005) diff --git a/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..f4019e9 --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/bisenetv1_r18-d32.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +lr_config = dict(warmup='linear', warmup_iters=1000) +optimizer = dict(lr=0.025) +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, +) diff --git a/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..ef061a1 --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py @@ -0,0 +1,16 @@ +_base_ = [ + '../_base_/models/bisenetv1_r18-d32.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')))) +lr_config = dict(warmup='linear', warmup_iters=1000) +optimizer = dict(lr=0.025) +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, +) diff --git a/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..ea27ef0 --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = './bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py' +data = dict( + samples_per_gpu=8, + workers_per_gpu=8, +) diff --git a/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py new file mode 100644 index 0000000..c6d9304 --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py @@ -0,0 +1,6 @@ +_base_ = './bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet18_v1c'))), ) diff --git a/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py new file mode 100644 index 0000000..78d7fea --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/bisenetv1_r18-d32.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=171), + auxiliary_head=[ + dict(num_classes=171), + dict(num_classes=171), + ]) +lr_config = dict(warmup='linear', warmup_iters=1000) +optimizer = dict(lr=0.005) diff --git a/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..7cadd50 --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes.py @@ -0,0 +1,42 @@ +_base_ = [ + '../_base_/models/bisenetv1_r18-d32.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='BiSeNetV1', + context_channels=(512, 1024, 2048), + spatial_channels=(256, 256, 256, 512), + out_channels=1024, + backbone_cfg=dict(type='ResNet', depth=50)), + decode_head=dict( + type='FCNHead', in_channels=1024, in_index=0, channels=1024), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=512, + channels=256, + num_convs=1, + num_classes=19, + in_index=1, + norm_cfg=norm_cfg, + concat_input=False), + dict( + type='FCNHead', + in_channels=512, + channels=256, + num_convs=1, + num_classes=19, + in_index=2, + norm_cfg=norm_cfg, + concat_input=False), + ]) +lr_config = dict(warmup='linear', warmup_iters=1000) +optimizer = dict(lr=0.05) +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, +) diff --git a/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..5625a76 --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py @@ -0,0 +1,7 @@ +_base_ = './bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes.py' +model = dict( + type='EncoderDecoder', + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet50_v1c')))) diff --git a/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py new file mode 100644 index 0000000..f0fea69 --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py @@ -0,0 +1,7 @@ +_base_ = './bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py' + +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet50_v1c')))) diff --git a/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py new file mode 100644 index 0000000..dbbccc6 --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py @@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/bisenetv1_r18-d32.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +model = dict( + backbone=dict( + context_channels=(512, 1024, 2048), + spatial_channels=(256, 256, 256, 512), + out_channels=1024, + backbone_cfg=dict(type='ResNet', depth=50)), + decode_head=dict(in_channels=1024, channels=1024, num_classes=171), + auxiliary_head=[ + dict(in_channels=512, channels=256, num_classes=171), + dict(in_channels=512, channels=256, num_classes=171), + ]) +lr_config = dict(warmup='linear', warmup_iters=1000) +optimizer = dict(lr=0.005) diff --git a/downstream/mmsegmentation/configs/bisenetv2/README.md b/downstream/mmsegmentation/configs/bisenetv2/README.md new file mode 100644 index 0000000..1bc7424 --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv2/README.md @@ -0,0 +1,53 @@ +# BiSeNetV2 + +[Bisenet v2: Bilateral Network with Guided Aggregation for Real-time Semantic Segmentation](https://arxiv.org/abs/2004.02147) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The low-level details and high-level semantics are both essential to the semantic segmentation task. However, to speed up the model inference, current approaches almost always sacrifice the low-level details, which leads to a considerable accuracy decrease. We propose to treat these spatial details and categorical semantics separately to achieve high accuracy and high efficiency for realtime semantic segmentation. To this end, we propose an efficient and effective architecture with a good trade-off between speed and accuracy, termed Bilateral Segmentation Network (BiSeNet V2). This architecture involves: (i) a Detail Branch, with wide channels and shallow layers to capture low-level details and generate high-resolution feature representation; (ii) a Semantic Branch, with narrow channels and deep layers to obtain high-level semantic context. The Semantic Branch is lightweight due to reducing the channel capacity and a fast-downsampling strategy. Furthermore, we design a Guided Aggregation Layer to enhance mutual connections and fuse both types of feature representation. Besides, a booster training strategy is designed to improve the segmentation performance without any extra inference cost. Extensive quantitative and qualitative evaluations demonstrate that the proposed architecture performs favourably against a few state-of-the-art real-time semantic segmentation approaches. Specifically, for a 2,048x1,024 input, we achieve 72.6% Mean IoU on the Cityscapes test set with a speed of 156 FPS on one NVIDIA GeForce GTX 1080 Ti card, which is significantly faster than existing methods, yet we achieve better segmentation accuracy. + + +
    + +
    + +## Citation + +```bibtex +@article{yu2021bisenet, + title={Bisenet v2: Bilateral network with guided aggregation for real-time semantic segmentation}, + author={Yu, Changqian and Gao, Changxin and Wang, Jingbo and Yu, Gang and Shen, Chunhua and Sang, Nong}, + journal={International Journal of Computer Vision}, + pages={1--18}, + year={2021}, + publisher={Springer} +} +``` + + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| BiSeNetV2 | BiSeNetV2 | 1024x1024 | 160000 | 7.64 | 31.77 | 73.21 | 75.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes_20210902_015551-bcf10f09.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes_20210902_015551.log.json) | +| BiSeNetV2 (OHEM) | BiSeNetV2 | 1024x1024 | 160000 | 7.64 | - | 73.57 | 75.80 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes_20210902_112947-5f8103b4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes_20210902_112947.log.json) | +| BiSeNetV2 (4x8) | BiSeNetV2 | 1024x1024 | 160000 | 15.05 | - | 75.76 | 77.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes_20210903_000032-e1a2eed6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes_20210903_000032.log.json) | +| BiSeNetV2 (FP16) | BiSeNetV2 | 1024x1024 | 160000 | 5.77 | 36.65 | 73.07 | 75.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes_20210902_045942-b979777b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes_20210902_045942.log.json) | + +Note: + +- `OHEM` means Online Hard Example Mining (OHEM) is adopted in training. +- `FP16` means Mixed Precision (FP16) is adopted in training. +- `4x8` means 4 GPUs with 8 samples per GPU in training. diff --git a/downstream/mmsegmentation/configs/bisenetv2/bisenetv2.yml b/downstream/mmsegmentation/configs/bisenetv2/bisenetv2.yml new file mode 100644 index 0000000..455fa6c --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv2/bisenetv2.yml @@ -0,0 +1,88 @@ +Collections: +- Name: BiSeNetV2 + Metadata: + Training Data: + - Cityscapes + Paper: + URL: https://arxiv.org/abs/2004.02147 + Title: 'Bisenet v2: Bilateral Network with Guided Aggregation for Real-time Semantic + Segmentation' + README: configs/bisenetv2/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/backbones/bisenetv2.py#L545 + Version: v0.18.0 +Models: +- Name: bisenetv2_fcn_4x4_1024x1024_160k_cityscapes + In Collection: BiSeNetV2 + Metadata: + backbone: BiSeNetV2 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 31.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 7.64 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.21 + mIoU(ms+flip): 75.74 + Config: configs/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes_20210902_015551-bcf10f09.pth +- Name: bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes + In Collection: BiSeNetV2 + Metadata: + backbone: BiSeNetV2 + crop size: (1024,1024) + lr schd: 160000 + Training Memory (GB): 7.64 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.57 + mIoU(ms+flip): 75.8 + Config: configs/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes_20210902_112947-5f8103b4.pth +- Name: bisenetv2_fcn_4x8_1024x1024_160k_cityscapes + In Collection: BiSeNetV2 + Metadata: + backbone: BiSeNetV2 + crop size: (1024,1024) + lr schd: 160000 + Training Memory (GB): 15.05 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.76 + mIoU(ms+flip): 77.79 + Config: configs/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes_20210903_000032-e1a2eed6.pth +- Name: bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes + In Collection: BiSeNetV2 + Metadata: + backbone: BiSeNetV2 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 27.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (1024,1024) + Training Memory (GB): 5.77 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.07 + mIoU(ms+flip): 75.13 + Config: configs/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes_20210902_045942-b979777b.pth diff --git a/downstream/mmsegmentation/configs/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..1248bd8 --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/bisenetv2.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +lr_config = dict(warmup='linear', warmup_iters=1000) +optimizer = dict(lr=0.05) +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, +) diff --git a/downstream/mmsegmentation/configs/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..babc2cd --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/bisenetv2.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +lr_config = dict(warmup='linear', warmup_iters=1000) +optimizer = dict(lr=0.05) +data = dict( + samples_per_gpu=8, + workers_per_gpu=8, +) diff --git a/downstream/mmsegmentation/configs/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..0196214 --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = './bisenetv2_fcn_4x4_1024x1024_160k_cityscapes.py' +# fp16 settings +optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.) +# fp16 placeholder +fp16 = dict() diff --git a/downstream/mmsegmentation/configs/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..f14e528 --- /dev/null +++ b/downstream/mmsegmentation/configs/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/bisenetv2.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +sampler = dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000) +lr_config = dict(warmup='linear', warmup_iters=1000) +optimizer = dict(lr=0.05) +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, +) diff --git a/downstream/mmsegmentation/configs/ccnet/README.md b/downstream/mmsegmentation/configs/ccnet/README.md new file mode 100644 index 0000000..9cefcf0 --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/README.md @@ -0,0 +1,67 @@ +# CCNet + +[CCNet: Criss-Cross Attention for Semantic Segmentation](https://arxiv.org/abs/1811.11721) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Contextual information is vital in visual understanding problems, such as semantic segmentation and object detection. We propose a Criss-Cross Network (CCNet) for obtaining full-image contextual information in a very effective and efficient way. Concretely, for each pixel, a novel criss-cross attention module harvests the contextual information of all the pixels on its criss-cross path. By taking a further recurrent operation, each pixel can finally capture the full-image dependencies. Besides, a category consistent loss is proposed to enforce the criss-cross attention module to produce more discriminative features. Overall, CCNet is with the following merits: 1) GPU memory friendly. Compared with the non-local block, the proposed recurrent criss-cross attention module requires 11x less GPU memory usage. 2) High computational efficiency. The recurrent criss-cross attention significantly reduces FLOPs by about 85% of the non-local block. 3) The state-of-the-art performance. We conduct extensive experiments on semantic segmentation benchmarks including Cityscapes, ADE20K, human parsing benchmark LIP, instance segmentation benchmark COCO, video segmentation benchmark CamVid. In particular, our CCNet achieves the mIoU scores of 81.9%, 45.76% and 55.47% on the Cityscapes test set, the ADE20K validation set and the LIP validation set respectively, which are the new state-of-the-art results. The source codes are available at [this https URL](https://github.com/speedinghzl/CCNet). + + +
    + +
    + +## Citation + +```bibtex +@article{huang2018ccnet, + title={CCNet: Criss-Cross Attention for Semantic Segmentation}, + author={Huang, Zilong and Wang, Xinggang and Huang, Lichao and Huang, Chang and Wei, Yunchao and Liu, Wenyu}, + booktitle={ICCV}, + year={2019} +} +``` + + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| CCNet | R-50-D8 | 512x1024 | 40000 | 6 | 3.32 | 77.76 | 78.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes/ccnet_r50-d8_512x1024_40k_cityscapes_20200616_142517-4123f401.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes/ccnet_r50-d8_512x1024_40k_cityscapes_20200616_142517.log.json) | +| CCNet | R-101-D8 | 512x1024 | 40000 | 9.5 | 2.31 | 76.35 | 78.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes/ccnet_r101-d8_512x1024_40k_cityscapes_20200616_142540-a3b84ba6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes/ccnet_r101-d8_512x1024_40k_cityscapes_20200616_142540.log.json) | +| CCNet | R-50-D8 | 769x769 | 40000 | 6.8 | 1.43 | 78.46 | 79.93 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_40k_cityscapes/ccnet_r50-d8_769x769_40k_cityscapes_20200616_145125-76d11884.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_40k_cityscapes/ccnet_r50-d8_769x769_40k_cityscapes_20200616_145125.log.json) | +| CCNet | R-101-D8 | 769x769 | 40000 | 10.7 | 1.01 | 76.94 | 78.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_40k_cityscapes/ccnet_r101-d8_769x769_40k_cityscapes_20200617_101428-4f57c8d0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_40k_cityscapes/ccnet_r101-d8_769x769_40k_cityscapes_20200617_101428.log.json) | +| CCNet | R-50-D8 | 512x1024 | 80000 | - | - | 79.03 | 80.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes/ccnet_r50-d8_512x1024_80k_cityscapes_20200617_010421-869a3423.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes/ccnet_r50-d8_512x1024_80k_cityscapes_20200617_010421.log.json) | +| CCNet | R-101-D8 | 512x1024 | 80000 | - | - | 78.87 | 79.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes/ccnet_r101-d8_512x1024_80k_cityscapes_20200617_203935-ffae8917.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes/ccnet_r101-d8_512x1024_80k_cityscapes_20200617_203935.log.json) | +| CCNet | R-50-D8 | 769x769 | 80000 | - | - | 79.29 | 81.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_80k_cityscapes/ccnet_r50-d8_769x769_80k_cityscapes_20200617_010421-73eed8ca.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_80k_cityscapes/ccnet_r50-d8_769x769_80k_cityscapes_20200617_010421.log.json) | +| CCNet | R-101-D8 | 769x769 | 80000 | - | - | 79.45 | 80.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_80k_cityscapes/ccnet_r101-d8_769x769_80k_cityscapes_20200618_011502-ad3cd481.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_80k_cityscapes/ccnet_r101-d8_769x769_80k_cityscapes_20200618_011502.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| CCNet | R-50-D8 | 512x512 | 80000 | 8.8 | 20.89 | 41.78 | 42.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_80k_ade20k/ccnet_r50-d8_512x512_80k_ade20k_20200615_014848-aa37f61e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_80k_ade20k/ccnet_r50-d8_512x512_80k_ade20k_20200615_014848.log.json) | +| CCNet | R-101-D8 | 512x512 | 80000 | 12.2 | 14.11 | 43.97 | 45.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_80k_ade20k/ccnet_r101-d8_512x512_80k_ade20k_20200615_014848-1f4929a3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_80k_ade20k/ccnet_r101-d8_512x512_80k_ade20k_20200615_014848.log.json) | +| CCNet | R-50-D8 | 512x512 | 160000 | - | - | 42.08 | 43.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_160k_ade20k/ccnet_r50-d8_512x512_160k_ade20k_20200616_084435-7c97193b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_160k_ade20k/ccnet_r50-d8_512x512_160k_ade20k_20200616_084435.log.json) | +| CCNet | R-101-D8 | 512x512 | 160000 | - | - | 43.71 | 45.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_160k_ade20k/ccnet_r101-d8_512x512_160k_ade20k_20200616_000644-e849e007.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_160k_ade20k/ccnet_r101-d8_512x512_160k_ade20k_20200616_000644.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| CCNet | R-50-D8 | 512x512 | 20000 | 6 | 20.45 | 76.17 | 77.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_20k_voc12aug/ccnet_r50-d8_512x512_20k_voc12aug_20200617_193212-fad81784.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_20k_voc12aug/ccnet_r50-d8_512x512_20k_voc12aug_20200617_193212.log.json) | +| CCNet | R-101-D8 | 512x512 | 20000 | 9.5 | 13.64 | 77.27 | 79.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_20k_voc12aug/ccnet_r101-d8_512x512_20k_voc12aug_20200617_193212-0007b61d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_20k_voc12aug/ccnet_r101-d8_512x512_20k_voc12aug_20200617_193212.log.json) | +| CCNet | R-50-D8 | 512x512 | 40000 | - | - | 75.96 | 77.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_40k_voc12aug/ccnet_r50-d8_512x512_40k_voc12aug_20200613_232127-c2a15f02.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_40k_voc12aug/ccnet_r50-d8_512x512_40k_voc12aug_20200613_232127.log.json) | +| CCNet | R-101-D8 | 512x512 | 40000 | - | - | 77.87 | 78.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_40k_voc12aug/ccnet_r101-d8_512x512_40k_voc12aug_20200613_232127-c30da577.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_40k_voc12aug/ccnet_r101-d8_512x512_40k_voc12aug_20200613_232127.log.json) | diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet.yml b/downstream/mmsegmentation/configs/ccnet/ccnet.yml new file mode 100644 index 0000000..b264f2e --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet.yml @@ -0,0 +1,305 @@ +Collections: +- Name: CCNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1811.11721 + Title: 'CCNet: Criss-Cross Attention for Semantic Segmentation' + README: configs/ccnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/apc_head.py#L111 + Version: v0.17.0 + Converted From: + Code: https://github.com/speedinghzl/CCNet +Models: +- Name: ccnet_r50-d8_512x1024_40k_cityscapes + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 301.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.76 + mIoU(ms+flip): 78.87 + Config: configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes/ccnet_r50-d8_512x1024_40k_cityscapes_20200616_142517-4123f401.pth +- Name: ccnet_r101-d8_512x1024_40k_cityscapes + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 432.9 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.35 + mIoU(ms+flip): 78.19 + Config: configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes/ccnet_r101-d8_512x1024_40k_cityscapes_20200616_142540-a3b84ba6.pth +- Name: ccnet_r50-d8_769x769_40k_cityscapes + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 699.3 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.46 + mIoU(ms+flip): 79.93 + Config: configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_40k_cityscapes/ccnet_r50-d8_769x769_40k_cityscapes_20200616_145125-76d11884.pth +- Name: ccnet_r101-d8_769x769_40k_cityscapes + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 990.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.94 + mIoU(ms+flip): 78.62 + Config: configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_40k_cityscapes/ccnet_r101-d8_769x769_40k_cityscapes_20200617_101428-4f57c8d0.pth +- Name: ccnet_r50-d8_512x1024_80k_cityscapes + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.03 + mIoU(ms+flip): 80.16 + Config: configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes/ccnet_r50-d8_512x1024_80k_cityscapes_20200617_010421-869a3423.pth +- Name: ccnet_r101-d8_512x1024_80k_cityscapes + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.87 + mIoU(ms+flip): 79.9 + Config: configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes/ccnet_r101-d8_512x1024_80k_cityscapes_20200617_203935-ffae8917.pth +- Name: ccnet_r50-d8_769x769_80k_cityscapes + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.29 + mIoU(ms+flip): 81.08 + Config: configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_80k_cityscapes/ccnet_r50-d8_769x769_80k_cityscapes_20200617_010421-73eed8ca.pth +- Name: ccnet_r101-d8_769x769_80k_cityscapes + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.45 + mIoU(ms+flip): 80.66 + Config: configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_80k_cityscapes/ccnet_r101-d8_769x769_80k_cityscapes_20200618_011502-ad3cd481.pth +- Name: ccnet_r50-d8_512x512_80k_ade20k + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.78 + mIoU(ms+flip): 42.98 + Config: configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_80k_ade20k/ccnet_r50-d8_512x512_80k_ade20k_20200615_014848-aa37f61e.pth +- Name: ccnet_r101-d8_512x512_80k_ade20k + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 70.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.97 + mIoU(ms+flip): 45.13 + Config: configs/ccnet/ccnet_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_80k_ade20k/ccnet_r101-d8_512x512_80k_ade20k_20200615_014848-1f4929a3.pth +- Name: ccnet_r50-d8_512x512_160k_ade20k + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.08 + mIoU(ms+flip): 43.13 + Config: configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_160k_ade20k/ccnet_r50-d8_512x512_160k_ade20k_20200616_084435-7c97193b.pth +- Name: ccnet_r101-d8_512x512_160k_ade20k + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.71 + mIoU(ms+flip): 45.04 + Config: configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_160k_ade20k/ccnet_r101-d8_512x512_160k_ade20k_20200616_000644-e849e007.pth +- Name: ccnet_r50-d8_512x512_20k_voc12aug + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 48.9 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.0 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.17 + mIoU(ms+flip): 77.51 + Config: configs/ccnet/ccnet_r50-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_20k_voc12aug/ccnet_r50-d8_512x512_20k_voc12aug_20200617_193212-fad81784.pth +- Name: ccnet_r101-d8_512x512_20k_voc12aug + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 73.31 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.5 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.27 + mIoU(ms+flip): 79.02 + Config: configs/ccnet/ccnet_r101-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_20k_voc12aug/ccnet_r101-d8_512x512_20k_voc12aug_20200617_193212-0007b61d.pth +- Name: ccnet_r50-d8_512x512_40k_voc12aug + In Collection: CCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 75.96 + mIoU(ms+flip): 77.04 + Config: configs/ccnet/ccnet_r50-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_40k_voc12aug/ccnet_r50-d8_512x512_40k_voc12aug_20200613_232127-c2a15f02.pth +- Name: ccnet_r101-d8_512x512_40k_voc12aug + In Collection: CCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.87 + mIoU(ms+flip): 78.9 + Config: configs/ccnet/ccnet_r101-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_40k_voc12aug/ccnet_r101-d8_512x512_40k_voc12aug_20200613_232127-c30da577.pth diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..d2bac38 --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..989928a --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..c32bf48 --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..53eb77c --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x512_20k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_512x512_20k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..d7eb668 --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x512_40k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_512x512_40k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..029c1d5 --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..43f05fa --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..654f377 --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './ccnet_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..6a4316d --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..16e3435 --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..1ad94d8 --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..bbcd29c --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x512_20k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..947b8ac --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x512_40k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..1a1f49c --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..580d59c --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..c6dac64 --- /dev/null +++ b/downstream/mmsegmentation/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/ccnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/cgnet/README.md b/downstream/mmsegmentation/configs/cgnet/README.md new file mode 100644 index 0000000..fefb291 --- /dev/null +++ b/downstream/mmsegmentation/configs/cgnet/README.md @@ -0,0 +1,45 @@ +# CGNet + +[CGNet: A Light-weight Context Guided Network for Semantic Segmentation](https://arxiv.org/abs/1811.08201) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The demand of applying semantic segmentation model on mobile devices has been increasing rapidly. Current state-of-the-art networks have enormous amount of parameters hence unsuitable for mobile devices, while other small memory footprint models follow the spirit of classification network and ignore the inherent characteristic of semantic segmentation. To tackle this problem, we propose a novel Context Guided Network (CGNet), which is a light-weight and efficient network for semantic segmentation. We first propose the Context Guided (CG) block, which learns the joint feature of both local feature and surrounding context, and further improves the joint feature with the global context. Based on the CG block, we develop CGNet which captures contextual information in all stages of the network and is specially tailored for increasing segmentation accuracy. CGNet is also elaborately designed to reduce the number of parameters and save memory footprint. Under an equivalent number of parameters, the proposed CGNet significantly outperforms existing segmentation networks. Extensive experiments on Cityscapes and CamVid datasets verify the effectiveness of the proposed approach. Specifically, without any post-processing and multi-scale testing, the proposed CGNet achieves 64.8% mean IoU on Cityscapes with less than 0.5 M parameters. The source code for the complete system can be found at [this https URL](https://github.com/wutianyiRosun/CGNet). + + +
    + +
    + +## Citation + +```bibtext +@article{wu2020cgnet, + title={Cgnet: A light-weight context guided network for semantic segmentation}, + author={Wu, Tianyi and Tang, Sheng and Zhang, Rui and Cao, Juan and Zhang, Yongdong}, + journal={IEEE Transactions on Image Processing}, + volume={30}, + pages={1169--1179}, + year={2020}, + publisher={IEEE} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| CGNet | M3N21 | 680x680 | 60000 | 7.5 | 30.51 | 65.63 | 68.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/cgnet/cgnet_680x680_60k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_680x680_60k_cityscapes/cgnet_680x680_60k_cityscapes_20201101_110253-4c0b2f2d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_680x680_60k_cityscapes/cgnet_680x680_60k_cityscapes-20201101_110253.log.json) | +| CGNet | M3N21 | 512x1024 | 60000 | 8.3 | 31.14 | 68.27 | 70.33 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/cgnet/cgnet_512x1024_60k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_512x1024_60k_cityscapes/cgnet_512x1024_60k_cityscapes_20201101_110254-124ea03b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_512x1024_60k_cityscapes/cgnet_512x1024_60k_cityscapes-20201101_110254.log.json) | diff --git a/downstream/mmsegmentation/configs/cgnet/cgnet.yml b/downstream/mmsegmentation/configs/cgnet/cgnet.yml new file mode 100644 index 0000000..bcd6d89 --- /dev/null +++ b/downstream/mmsegmentation/configs/cgnet/cgnet.yml @@ -0,0 +1,59 @@ +Collections: +- Name: CGNet + Metadata: + Training Data: + - Cityscapes + Paper: + URL: https://arxiv.org/abs/1811.08201 + Title: 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation' + README: configs/cgnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/cgnet.py#L187 + Version: v0.17.0 + Converted From: + Code: https://github.com/wutianyiRosun/CGNet +Models: +- Name: cgnet_680x680_60k_cityscapes + In Collection: CGNet + Metadata: + backbone: M3N21 + crop size: (680,680) + lr schd: 60000 + inference time (ms/im): + - value: 32.78 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (680,680) + Training Memory (GB): 7.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 65.63 + mIoU(ms+flip): 68.04 + Config: configs/cgnet/cgnet_680x680_60k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_680x680_60k_cityscapes/cgnet_680x680_60k_cityscapes_20201101_110253-4c0b2f2d.pth +- Name: cgnet_512x1024_60k_cityscapes + In Collection: CGNet + Metadata: + backbone: M3N21 + crop size: (512,1024) + lr schd: 60000 + inference time (ms/im): + - value: 32.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 68.27 + mIoU(ms+flip): 70.33 + Config: configs/cgnet/cgnet_512x1024_60k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_512x1024_60k_cityscapes/cgnet_512x1024_60k_cityscapes_20201101_110254-124ea03b.pth diff --git a/downstream/mmsegmentation/configs/cgnet/cgnet_512x1024_60k_cityscapes.py b/downstream/mmsegmentation/configs/cgnet/cgnet_512x1024_60k_cityscapes.py new file mode 100644 index 0000000..11421ef --- /dev/null +++ b/downstream/mmsegmentation/configs/cgnet/cgnet_512x1024_60k_cityscapes.py @@ -0,0 +1,66 @@ +_base_ = ['../_base_/models/cgnet.py', '../_base_/default_runtime.py'] + +# optimizer +optimizer = dict(type='Adam', lr=0.001, eps=1e-08, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +total_iters = 60000 +checkpoint_config = dict(by_epoch=False, interval=4000) +evaluation = dict(interval=4000, metric='mIoU') + +# dataset settings +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[72.39239876, 82.90891754, 73.15835921], std=[1, 1, 1], to_rgb=True) +crop_size = (512, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/train', + ann_dir='gtFine/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/cgnet/cgnet_680x680_60k_cityscapes.py b/downstream/mmsegmentation/configs/cgnet/cgnet_680x680_60k_cityscapes.py new file mode 100644 index 0000000..2b2f8ee --- /dev/null +++ b/downstream/mmsegmentation/configs/cgnet/cgnet_680x680_60k_cityscapes.py @@ -0,0 +1,50 @@ +_base_ = [ + '../_base_/models/cgnet.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py' +] + +# optimizer +optimizer = dict(type='Adam', lr=0.001, eps=1e-08, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +total_iters = 60000 +checkpoint_config = dict(by_epoch=False, interval=4000) +evaluation = dict(interval=4000, metric='mIoU') + +img_norm_cfg = dict( + mean=[72.39239876, 82.90891754, 73.15835921], std=[1, 1, 1], to_rgb=True) +crop_size = (680, 680) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=8, + workers_per_gpu=8, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/convnext/README.md b/downstream/mmsegmentation/configs/convnext/README.md new file mode 100644 index 0000000..48c37af --- /dev/null +++ b/downstream/mmsegmentation/configs/convnext/README.md @@ -0,0 +1,71 @@ +# ConvNeXt + +[A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The "Roaring 20s" of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually "modernize" a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets. + + +
    + +
    + +```bibtex +@article{liu2022convnet, + title={A ConvNet for the 2020s}, + author={Liu, Zhuang and Mao, Hanzi and Wu, Chao-Yuan and Feichtenhofer, Christoph and Darrell, Trevor and Xie, Saining}, + journal={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2022} +} +``` + +### Usage + +- ConvNeXt backbone needs to install [MMClassification](https://github.com/open-mmlab/mmclassification) first, which has abundant backbones for downstream tasks. + +```shell +pip install mmcls>=0.20.1 +``` + +### Pre-trained Models + +The pre-trained models on ImageNet-1k or ImageNet-21k are used to fine-tune on the downstream tasks. + +| Model | Training Data | Params(M) | Flops(G) | Download | +|:--------------:|:-------------:|:---------:|:--------:|:--------:| +| ConvNeXt-T\* | ImageNet-1k | 28.59 | 4.46 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth) | +| ConvNeXt-S\* | ImageNet-1k | 50.22 | 8.69 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth) | +| ConvNeXt-B\* | ImageNet-1k | 88.59 | 15.36 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-base_3rdparty_32xb128-noema_in1k_20220301-2a0ee547.pth) | +| ConvNeXt-B\* | ImageNet-21k | 88.59 | 15.36 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-base_3rdparty_in21k_20220301-262fd037.pth) | +| ConvNeXt-L\* | ImageNet-21k | 197.77 | 34.37 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-large_3rdparty_in21k_20220301-e6e0ea0a.pth) | +| ConvNeXt-XL\* | ImageNet-21k | 350.20 | 60.93 | [model](https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-xlarge_3rdparty_in21k_20220301-08aa5ddc.pth) | + +*Models with \* are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt/tree/main/semantic_segmentation#results-and-fine-tuned-models).* + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ---------- | ------- | -------- | --- | --- | -------------- | ----- | +| UperNet | ConvNeXt-T | 512x512 | 160000 | 4.23 | 19.90 | 46.11 | 46.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553.log.json) | +| UperNet | ConvNeXt-S | 512x512 | 160000 | 5.16 | 15.18 | 48.56 | 49.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208.log.json) | +| UperNet | ConvNeXt-B | 512x512 | 160000 | 6.33 | 14.41 | 48.71 | 49.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227.log.json) | +| UperNet | ConvNeXt-B |640x640 | 160000 | 8.53 | 10.88 | 52.13 | 52.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/convnext/upernet_convnext_base_fp16_640x640_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_640x640_160k_ade20k/upernet_convnext_base_fp16_640x640_160k_ade20k_20220227_182859-9280e39b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_640x640_160k_ade20k/upernet_convnext_base_fp16_640x640_160k_ade20k_20220227_182859.log.json) | +| UperNet | ConvNeXt-L |640x640 | 160000 | 12.08 | 7.69 | 53.16 | 53.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532.log.json) | +| UperNet | ConvNeXt-XL |640x640 | 160000 | 26.16\* | 6.33 | 53.58 | 54.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344.log.json) | + +Note: + +- `Mem (GB)` with \* is collected when `cudnn_benchmark=True`, and hardware is V100. diff --git a/downstream/mmsegmentation/configs/convnext/convnext.yml b/downstream/mmsegmentation/configs/convnext/convnext.yml new file mode 100644 index 0000000..3e521ef --- /dev/null +++ b/downstream/mmsegmentation/configs/convnext/convnext.yml @@ -0,0 +1,133 @@ +Models: +- Name: upernet_convnext_tiny_fp16_512x512_160k_ade20k + In Collection: UperNet + Metadata: + backbone: ConvNeXt-T + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 50.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (512,512) + Training Memory (GB): 4.23 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.11 + mIoU(ms+flip): 46.62 + Config: configs/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth +- Name: upernet_convnext_small_fp16_512x512_160k_ade20k + In Collection: UperNet + Metadata: + backbone: ConvNeXt-S + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 65.88 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (512,512) + Training Memory (GB): 5.16 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.56 + mIoU(ms+flip): 49.02 + Config: configs/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth +- Name: upernet_convnext_base_fp16_512x512_160k_ade20k + In Collection: UperNet + Metadata: + backbone: ConvNeXt-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 69.4 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (512,512) + Training Memory (GB): 6.33 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.71 + mIoU(ms+flip): 49.54 + Config: configs/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth +- Name: upernet_convnext_base_fp16_640x640_160k_ade20k + In Collection: UperNet + Metadata: + backbone: ConvNeXt-B + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 91.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (640,640) + Training Memory (GB): 8.53 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 52.13 + mIoU(ms+flip): 52.66 + Config: configs/convnext/upernet_convnext_base_fp16_640x640_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_640x640_160k_ade20k/upernet_convnext_base_fp16_640x640_160k_ade20k_20220227_182859-9280e39b.pth +- Name: upernet_convnext_large_fp16_640x640_160k_ade20k + In Collection: UperNet + Metadata: + backbone: ConvNeXt-L + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 130.04 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (640,640) + Training Memory (GB): 12.08 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 53.16 + mIoU(ms+flip): 53.38 + Config: configs/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth +- Name: upernet_convnext_xlarge_fp16_640x640_160k_ade20k + In Collection: UperNet + Metadata: + backbone: ConvNeXt-XL + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 157.98 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (640,640) + Training Memory (GB): 26.16 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 53.58 + mIoU(ms+flip): 54.11 + Config: configs/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth diff --git a/downstream/mmsegmentation/configs/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k.py new file mode 100644 index 0000000..7bf35b2 --- /dev/null +++ b/downstream/mmsegmentation/configs/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k.py @@ -0,0 +1,40 @@ +_base_ = [ + '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +model = dict( + decode_head=dict(in_channels=[128, 256, 512, 1024], num_classes=150), + auxiliary_head=dict(in_channels=512, num_classes=150), + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(341, 341)), +) + +optimizer = dict( + constructor='LearningRateDecayOptimizerConstructor', + _delete_=True, + type='AdamW', + lr=0.0001, + betas=(0.9, 0.999), + weight_decay=0.05, + paramwise_cfg={ + 'decay_rate': 0.9, + 'decay_type': 'stage_wise', + 'num_layers': 12 + }) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +# fp16 settings +optimizer_config = dict(type='Fp16OptimizerHook', loss_scale='dynamic') +# fp16 placeholder +fp16 = dict() diff --git a/downstream/mmsegmentation/configs/convnext/upernet_convnext_base_fp16_640x640_160k_ade20k.py b/downstream/mmsegmentation/configs/convnext/upernet_convnext_base_fp16_640x640_160k_ade20k.py new file mode 100644 index 0000000..8d2c0c2 --- /dev/null +++ b/downstream/mmsegmentation/configs/convnext/upernet_convnext_base_fp16_640x640_160k_ade20k.py @@ -0,0 +1,55 @@ +_base_ = [ + '../_base_/models/upernet_convnext.py', + '../_base_/datasets/ade20k_640x640.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (640, 640) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-base_3rdparty_in21k_20220301-262fd037.pth' # noqa +model = dict( + backbone=dict( + type='mmcls.ConvNeXt', + arch='base', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + decode_head=dict( + in_channels=[128, 256, 512, 1024], + num_classes=150, + ), + auxiliary_head=dict(in_channels=512, num_classes=150), + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(426, 426)), +) + +optimizer = dict( + constructor='LearningRateDecayOptimizerConstructor', + _delete_=True, + type='AdamW', + lr=0.0001, + betas=(0.9, 0.999), + weight_decay=0.05, + paramwise_cfg={ + 'decay_rate': 0.9, + 'decay_type': 'stage_wise', + 'num_layers': 12 + }) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +# fp16 settings +optimizer_config = dict(type='Fp16OptimizerHook', loss_scale='dynamic') +# fp16 placeholder +fp16 = dict() diff --git a/downstream/mmsegmentation/configs/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k.py b/downstream/mmsegmentation/configs/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k.py new file mode 100644 index 0000000..7527ed5 --- /dev/null +++ b/downstream/mmsegmentation/configs/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k.py @@ -0,0 +1,55 @@ +_base_ = [ + '../_base_/models/upernet_convnext.py', + '../_base_/datasets/ade20k_640x640.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (640, 640) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-large_3rdparty_in21k_20220301-e6e0ea0a.pth' # noqa +model = dict( + backbone=dict( + type='mmcls.ConvNeXt', + arch='large', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + decode_head=dict( + in_channels=[192, 384, 768, 1536], + num_classes=150, + ), + auxiliary_head=dict(in_channels=768, num_classes=150), + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(426, 426)), +) + +optimizer = dict( + constructor='LearningRateDecayOptimizerConstructor', + _delete_=True, + type='AdamW', + lr=0.0001, + betas=(0.9, 0.999), + weight_decay=0.05, + paramwise_cfg={ + 'decay_rate': 0.9, + 'decay_type': 'stage_wise', + 'num_layers': 12 + }) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +# fp16 settings +optimizer_config = dict(type='Fp16OptimizerHook', loss_scale='dynamic') +# fp16 placeholder +fp16 = dict() diff --git a/downstream/mmsegmentation/configs/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k.py new file mode 100644 index 0000000..2e95f3a --- /dev/null +++ b/downstream/mmsegmentation/configs/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k.py @@ -0,0 +1,54 @@ +_base_ = [ + '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa +model = dict( + backbone=dict( + type='mmcls.ConvNeXt', + arch='small', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.3, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + decode_head=dict( + in_channels=[96, 192, 384, 768], + num_classes=150, + ), + auxiliary_head=dict(in_channels=384, num_classes=150), + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(341, 341)), +) + +optimizer = dict( + constructor='LearningRateDecayOptimizerConstructor', + _delete_=True, + type='AdamW', + lr=0.0001, + betas=(0.9, 0.999), + weight_decay=0.05, + paramwise_cfg={ + 'decay_rate': 0.9, + 'decay_type': 'stage_wise', + 'num_layers': 12 + }) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +# fp16 settings +optimizer_config = dict(type='Fp16OptimizerHook', loss_scale='dynamic') +# fp16 placeholder +fp16 = dict() diff --git a/downstream/mmsegmentation/configs/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k.py new file mode 100644 index 0000000..35c72a8 --- /dev/null +++ b/downstream/mmsegmentation/configs/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k.py @@ -0,0 +1,54 @@ +_base_ = [ + '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +crop_size = (512, 512) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa +model = dict( + backbone=dict( + type='mmcls.ConvNeXt', + arch='tiny', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + decode_head=dict( + in_channels=[96, 192, 384, 768], + num_classes=150, + ), + auxiliary_head=dict(in_channels=384, num_classes=150), + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(341, 341)), +) + +optimizer = dict( + constructor='LearningRateDecayOptimizerConstructor', + _delete_=True, + type='AdamW', + lr=0.0001, + betas=(0.9, 0.999), + weight_decay=0.05, + paramwise_cfg={ + 'decay_rate': 0.9, + 'decay_type': 'stage_wise', + 'num_layers': 6 + }) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +# fp16 settings +optimizer_config = dict(type='Fp16OptimizerHook', loss_scale='dynamic') +# fp16 placeholder +fp16 = dict() diff --git a/downstream/mmsegmentation/configs/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k.py b/downstream/mmsegmentation/configs/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k.py new file mode 100644 index 0000000..0e2f38e --- /dev/null +++ b/downstream/mmsegmentation/configs/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k.py @@ -0,0 +1,55 @@ +_base_ = [ + '../_base_/models/upernet_convnext.py', + '../_base_/datasets/ade20k_640x640.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +crop_size = (640, 640) +checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-xlarge_3rdparty_in21k_20220301-08aa5ddc.pth' # noqa +model = dict( + backbone=dict( + type='mmcls.ConvNeXt', + arch='xlarge', + out_indices=[0, 1, 2, 3], + drop_path_rate=0.4, + layer_scale_init_value=1.0, + gap_before_final_norm=False, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint_file, + prefix='backbone.')), + decode_head=dict( + in_channels=[256, 512, 1024, 2048], + num_classes=150, + ), + auxiliary_head=dict(in_channels=1024, num_classes=150), + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(426, 426)), +) + +optimizer = dict( + constructor='LearningRateDecayOptimizerConstructor', + _delete_=True, + type='AdamW', + lr=0.00008, + betas=(0.9, 0.999), + weight_decay=0.05, + paramwise_cfg={ + 'decay_rate': 0.9, + 'decay_type': 'stage_wise', + 'num_layers': 12 + }) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +# fp16 settings +optimizer_config = dict(type='Fp16OptimizerHook', loss_scale='dynamic') +# fp16 placeholder +fp16 = dict() diff --git a/downstream/mmsegmentation/configs/danet/README.md b/downstream/mmsegmentation/configs/danet/README.md new file mode 100644 index 0000000..411c595 --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/README.md @@ -0,0 +1,66 @@ +# DANet + +[Dual Attention Network for Scene Segmentation](https://arxiv.org/abs/1809.02983) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +In this paper, we address the scene segmentation task by capturing rich contextual dependencies based on the selfattention mechanism. Unlike previous works that capture contexts by multi-scale features fusion, we propose a Dual Attention Networks (DANet) to adaptively integrate local features with their global dependencies. Specifically, we append two types of attention modules on top of traditional dilated FCN, which model the semantic interdependencies in spatial and channel dimensions respectively. The position attention module selectively aggregates the features at each position by a weighted sum of the features at all positions. Similar features would be related to each other regardless of their distances. Meanwhile, the channel attention module selectively emphasizes interdependent channel maps by integrating associated features among all channel maps. We sum the outputs of the two attention modules to further improve feature representation which contributes to more precise segmentation results. We achieve new state-of-the-art segmentation performance on three challenging scene segmentation datasets, i.e., Cityscapes, PASCAL Context and COCO Stuff dataset. In particular, a Mean IoU score of 81.5% on Cityscapes test set is achieved without using coarse data. We make the code and trained model publicly available at [this https URL](https://github.com/junfu1115/DANet). + + +
    + +
    + +## Citation + +```bibtex +@article{fu2018dual, + title={Dual Attention Network for Scene Segmentation}, + author={Jun Fu, Jing Liu, Haijie Tian, Yong Li, Yongjun Bao, Zhiwei Fang,and Hanqing Lu}, + booktitle={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DANet | R-50-D8 | 512x1024 | 40000 | 7.4 | 2.66 | 78.74 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324-c0dbfa5f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324.log.json) | +| DANet | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.99 | 80.52 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831-c57a7157.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831.log.json) | +| DANet | R-50-D8 | 769x769 | 40000 | 8.8 | 1.56 | 78.88 | 80.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703-76681c60.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703.log.json) | +| DANet | R-101-D8 | 769x769 | 40000 | 12.8 | 1.07 | 79.88 | 81.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717-dcb7fd4e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717.log.json) | +| DANet | R-50-D8 | 512x1024 | 80000 | - | - | 79.34 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029-2bfa2293.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029.log.json) | +| DANet | R-101-D8 | 512x1024 | 80000 | - | - | 80.41 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918-955e6350.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918.log.json) | +| DANet | R-50-D8 | 769x769 | 80000 | - | - | 79.27 | 80.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954-495689b4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954.log.json) | +| DANet | R-101-D8 | 769x769 | 80000 | - | - | 80.47 | 82.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918-f3a929e7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DANet | R-50-D8 | 512x512 | 80000 | 11.5 | 21.20 | 41.66 | 42.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125-edb18e08.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125.log.json) | +| DANet | R-101-D8 | 512x512 | 80000 | 15 | 14.18 | 43.64 | 45.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126-d0357c73.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126.log.json) | +| DANet | R-50-D8 | 512x512 | 160000 | - | - | 42.45 | 43.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340-9cb35dcd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340.log.json) | +| DANet | R-101-D8 | 512x512 | 160000 | - | - | 44.17 | 45.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348-23bf12f9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DANet | R-50-D8 | 512x512 | 20000 | 6.5 | 20.94 | 74.45 | 75.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026-9e9e3ab3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026.log.json) | +| DANet | R-101-D8 | 512x512 | 20000 | 9.9 | 13.76 | 76.02 | 77.23 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026-d48d23b2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026.log.json) | +| DANet | R-50-D8 | 512x512 | 40000 | - | - | 76.37 | 77.29 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526-426e3a64.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526.log.json) | +| DANet | R-101-D8 | 512x512 | 40000 | - | - | 76.51 | 77.32 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/danet/danet_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031-788e232a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031.log.json) | diff --git a/downstream/mmsegmentation/configs/danet/danet.yml b/downstream/mmsegmentation/configs/danet/danet.yml new file mode 100644 index 0000000..ca2d6ff --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet.yml @@ -0,0 +1,301 @@ +Collections: +- Name: DANet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1809.02983 + Title: Dual Attention Network for Scene Segmentation + README: configs/danet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/da_head.py#L76 + Version: v0.17.0 + Converted From: + Code: https://github.com/junfu1115/DANet/ +Models: +- Name: danet_r50-d8_512x1024_40k_cityscapes + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 375.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.74 + Config: configs/danet/danet_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_40k_cityscapes/danet_r50-d8_512x1024_40k_cityscapes_20200605_191324-c0dbfa5f.pth +- Name: danet_r101-d8_512x1024_40k_cityscapes + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 502.51 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 10.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.52 + Config: configs/danet/danet_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_40k_cityscapes/danet_r101-d8_512x1024_40k_cityscapes_20200605_200831-c57a7157.pth +- Name: danet_r50-d8_769x769_40k_cityscapes + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 641.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.88 + mIoU(ms+flip): 80.62 + Config: configs/danet/danet_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_40k_cityscapes/danet_r50-d8_769x769_40k_cityscapes_20200530_025703-76681c60.pth +- Name: danet_r101-d8_769x769_40k_cityscapes + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 934.58 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.88 + mIoU(ms+flip): 81.47 + Config: configs/danet/danet_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_40k_cityscapes/danet_r101-d8_769x769_40k_cityscapes_20200530_025717-dcb7fd4e.pth +- Name: danet_r50-d8_512x1024_80k_cityscapes + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.34 + Config: configs/danet/danet_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x1024_80k_cityscapes/danet_r50-d8_512x1024_80k_cityscapes_20200607_133029-2bfa2293.pth +- Name: danet_r101-d8_512x1024_80k_cityscapes + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.41 + Config: configs/danet/danet_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x1024_80k_cityscapes/danet_r101-d8_512x1024_80k_cityscapes_20200607_132918-955e6350.pth +- Name: danet_r50-d8_769x769_80k_cityscapes + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.27 + mIoU(ms+flip): 80.96 + Config: configs/danet/danet_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_769x769_80k_cityscapes/danet_r50-d8_769x769_80k_cityscapes_20200607_132954-495689b4.pth +- Name: danet_r101-d8_769x769_80k_cityscapes + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.47 + mIoU(ms+flip): 82.02 + Config: configs/danet/danet_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_769x769_80k_cityscapes/danet_r101-d8_769x769_80k_cityscapes_20200607_132918-f3a929e7.pth +- Name: danet_r50-d8_512x512_80k_ade20k + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.17 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 11.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.66 + mIoU(ms+flip): 42.9 + Config: configs/danet/danet_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_80k_ade20k/danet_r50-d8_512x512_80k_ade20k_20200615_015125-edb18e08.pth +- Name: danet_r101-d8_512x512_80k_ade20k + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 70.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 15.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.64 + mIoU(ms+flip): 45.19 + Config: configs/danet/danet_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_80k_ade20k/danet_r101-d8_512x512_80k_ade20k_20200615_015126-d0357c73.pth +- Name: danet_r50-d8_512x512_160k_ade20k + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.45 + mIoU(ms+flip): 43.25 + Config: configs/danet/danet_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_160k_ade20k/danet_r50-d8_512x512_160k_ade20k_20200616_082340-9cb35dcd.pth +- Name: danet_r101-d8_512x512_160k_ade20k + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.17 + mIoU(ms+flip): 45.02 + Config: configs/danet/danet_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_160k_ade20k/danet_r101-d8_512x512_160k_ade20k_20200616_082348-23bf12f9.pth +- Name: danet_r50-d8_512x512_20k_voc12aug + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 47.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.5 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 74.45 + mIoU(ms+flip): 75.69 + Config: configs/danet/danet_r50-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_20k_voc12aug/danet_r50-d8_512x512_20k_voc12aug_20200618_070026-9e9e3ab3.pth +- Name: danet_r101-d8_512x512_20k_voc12aug + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 72.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.9 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.02 + mIoU(ms+flip): 77.23 + Config: configs/danet/danet_r101-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_20k_voc12aug/danet_r101-d8_512x512_20k_voc12aug_20200618_070026-d48d23b2.pth +- Name: danet_r50-d8_512x512_40k_voc12aug + In Collection: DANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.37 + mIoU(ms+flip): 77.29 + Config: configs/danet/danet_r50-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r50-d8_512x512_40k_voc12aug/danet_r50-d8_512x512_40k_voc12aug_20200613_235526-426e3a64.pth +- Name: danet_r101-d8_512x512_40k_voc12aug + In Collection: DANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.51 + mIoU(ms+flip): 77.32 + Config: configs/danet/danet_r101-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/danet/danet_r101-d8_512x512_40k_voc12aug/danet_r101-d8_512x512_40k_voc12aug_20200613_223031-788e232a.pth diff --git a/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..3bfb9bd --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..d80b2ec --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..0f22d0f --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..709f93c --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x512_20k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_512x512_20k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..5c623eb --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x512_40k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_512x512_40k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..bd31bc8 --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/danet/danet_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/danet/danet_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..597d76d --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/danet/danet_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/danet/danet_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..70f9b31 --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './danet_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..1b70c5b --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..0373431 --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..22aaf85 --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..010f86f --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x512_20k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..0cef0f0 --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x512_40k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..154e848 --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..5c5b94e --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/danet/danet_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/danet/danet_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..c7237ae --- /dev/null +++ b/downstream/mmsegmentation/configs/danet/danet_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/danet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/deeplabv3/README.md b/downstream/mmsegmentation/configs/deeplabv3/README.md new file mode 100644 index 0000000..a5d85a5 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/README.md @@ -0,0 +1,116 @@ +# DeepLabV3 + +[Rethinking atrous convolution for semantic image segmentation](https://arxiv.org/abs/1706.05587) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +In this work, we revisit atrous convolution, a powerful tool to explicitly adjust filter's field-of-view as well as control the resolution of feature responses computed by Deep Convolutional Neural Networks, in the application of semantic image segmentation. To handle the problem of segmenting objects at multiple scales, we design modules which employ atrous convolution in cascade or in parallel to capture multi-scale context by adopting multiple atrous rates. Furthermore, we propose to augment our previously proposed Atrous Spatial Pyramid Pooling module, which probes convolutional features at multiple scales, with image-level features encoding global context and further boost performance. We also elaborate on implementation details and share our experience on training our system. The proposed `DeepLabv3' system significantly improves over our previous DeepLab versions without DenseCRF post-processing and attains comparable performance with other state-of-art models on the PASCAL VOC 2012 semantic image segmentation benchmark. + + +
    + +
    + +## Citation + +```bibtext +@article{chen2017rethinking, + title={Rethinking atrous convolution for semantic image segmentation}, + author={Chen, Liang-Chieh and Papandreou, George and Schroff, Florian and Adam, Hartwig}, + journal={arXiv preprint arXiv:1706.05587}, + year={2017} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | --------------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3 | R-50-D8 | 512x1024 | 40000 | 6.1 | 2.57 | 79.09 | 80.45 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes/deeplabv3_r50-d8_512x1024_40k_cityscapes_20200605_022449-acadc2f8.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes/deeplabv3_r50-d8_512x1024_40k_cityscapes_20200605_022449.log.json) | +| DeepLabV3 | R-101-D8 | 512x1024 | 40000 | 9.6 | 1.92 | 77.12 | 79.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes/deeplabv3_r101-d8_512x1024_40k_cityscapes_20200605_012241-7fd3f799.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes/deeplabv3_r101-d8_512x1024_40k_cityscapes_20200605_012241.log.json) | +| DeepLabV3 | R-50-D8 | 769x769 | 40000 | 6.9 | 1.11 | 78.58 | 79.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes/deeplabv3_r50-d8_769x769_40k_cityscapes_20200606_113723-7eda553c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes/deeplabv3_r50-d8_769x769_40k_cityscapes_20200606_113723.log.json) | +| DeepLabV3 | R-101-D8 | 769x769 | 40000 | 10.9 | 0.83 | 79.27 | 80.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes/deeplabv3_r101-d8_769x769_40k_cityscapes_20200606_113809-c64f889f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes/deeplabv3_r101-d8_769x769_40k_cityscapes_20200606_113809.log.json) | +| DeepLabV3 | R-18-D8 | 512x1024 | 80000 | 1.7 | 13.78 | 76.70 | 78.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes/deeplabv3_r18-d8_512x1024_80k_cityscapes_20201225_021506-23dffbe2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes/deeplabv3_r18-d8_512x1024_80k_cityscapes-20201225_021506.log.json) | +| DeepLabV3 | R-50-D8 | 512x1024 | 80000 | - | - | 79.32 | 80.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes/deeplabv3_r50-d8_512x1024_80k_cityscapes_20200606_113404-b92cfdd4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes/deeplabv3_r50-d8_512x1024_80k_cityscapes_20200606_113404.log.json) | +| DeepLabV3 | R-101-D8 | 512x1024 | 80000 | - | - | 80.20 | 81.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes/deeplabv3_r101-d8_512x1024_80k_cityscapes_20200606_113503-9e428899.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes/deeplabv3_r101-d8_512x1024_80k_cityscapes_20200606_113503.log.json) | +| DeepLabV3 (FP16) | R-101-D8 | 512x1024 | 80000 | 5.75 | 3.86 | 80.48 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920-774d9cec.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920.log.json) | +| DeepLabV3 | R-18-D8 | 769x769 | 80000 | 1.9 | 5.55 | 76.60 | 78.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes/deeplabv3_r18-d8_769x769_80k_cityscapes_20201225_021506-6452126a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes/deeplabv3_r18-d8_769x769_80k_cityscapes-20201225_021506.log.json) | +| DeepLabV3 | R-50-D8 | 769x769 | 80000 | - | - | 79.89 | 81.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes/deeplabv3_r50-d8_769x769_80k_cityscapes_20200606_221338-788d6228.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes/deeplabv3_r50-d8_769x769_80k_cityscapes_20200606_221338.log.json) | +| DeepLabV3 | R-101-D8 | 769x769 | 80000 | - | - | 79.67 | 80.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes/deeplabv3_r101-d8_769x769_80k_cityscapes_20200607_013353-60e95418.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes/deeplabv3_r101-d8_769x769_80k_cityscapes_20200607_013353.log.json) | +| DeepLabV3 | R-101-D16-MG124 | 512x1024 | 40000 | 4.7 | - 6.96 | 76.71 | 78.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes_20200908_005644-67b0c992.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes-20200908_005644.log.json) | +| DeepLabV3 | R-101-D16-MG124 | 512x1024 | 80000 | - | - | 78.36 | 79.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes_20200908_005644-57bb8425.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes-20200908_005644.log.json) | +| DeepLabV3 | R-18b-D8 | 512x1024 | 80000 | 1.6 | 13.93 | 76.26 | 77.88 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes/deeplabv3_r18b-d8_512x1024_80k_cityscapes_20201225_094144-46040cef.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes/deeplabv3_r18b-d8_512x1024_80k_cityscapes-20201225_094144.log.json) | +| DeepLabV3 | R-50b-D8 | 512x1024 | 80000 | 6.0 | 2.74 | 79.63 | 80.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes/deeplabv3_r50b-d8_512x1024_80k_cityscapes_20201225_155148-ec368954.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes/deeplabv3_r50b-d8_512x1024_80k_cityscapes-20201225_155148.log.json) | +| DeepLabV3 | R-101b-D8 | 512x1024 | 80000 | 9.5 | 1.81 | 80.01 | 81.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes/deeplabv3_r101b-d8_512x1024_80k_cityscapes_20201226_171821-8fd49503.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes/deeplabv3_r101b-d8_512x1024_80k_cityscapes-20201226_171821.log.json) | +| DeepLabV3 | R-18b-D8 | 769x769 | 80000 | 1.8 | 5.79 | 76.63 | 77.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes/deeplabv3_r18b-d8_769x769_80k_cityscapes_20201225_094144-fdc985d9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes/deeplabv3_r18b-d8_769x769_80k_cityscapes-20201225_094144.log.json) | +| DeepLabV3 | R-50b-D8 | 769x769 | 80000 | 6.8 | 1.16 | 78.80 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes/deeplabv3_r50b-d8_769x769_80k_cityscapes_20201225_155404-87fb0cf4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes/deeplabv3_r50b-d8_769x769_80k_cityscapes-20201225_155404.log.json) | +| DeepLabV3 | R-101b-D8 | 769x769 | 80000 | 10.7 | 0.82 | 79.41 | 80.73 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes/deeplabv3_r101b-d8_769x769_80k_cityscapes_20201226_190843-9142ee57.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes/deeplabv3_r101b-d8_769x769_80k_cityscapes-20201226_190843.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3 | R-50-D8 | 512x512 | 80000 | 8.9 | 14.76 | 42.42 | 43.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k/deeplabv3_r50-d8_512x512_80k_ade20k_20200614_185028-0bb3f844.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k/deeplabv3_r50-d8_512x512_80k_ade20k_20200614_185028.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 80000 | 12.4 | 10.14 | 44.08 | 45.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k/deeplabv3_r101-d8_512x512_80k_ade20k_20200615_021256-d89c7fa4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k/deeplabv3_r101-d8_512x512_80k_ade20k_20200615_021256.log.json) | +| DeepLabV3 | R-50-D8 | 512x512 | 160000 | - | - | 42.66 | 44.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k/deeplabv3_r50-d8_512x512_160k_ade20k_20200615_123227-5d0ee427.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k/deeplabv3_r50-d8_512x512_160k_ade20k_20200615_123227.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 160000 | - | - | 45.00 | 46.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k/deeplabv3_r101-d8_512x512_160k_ade20k_20200615_105816-b1f72b3b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k/deeplabv3_r101-d8_512x512_160k_ade20k_20200615_105816.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DeepLabV3 | R-50-D8 | 512x512 | 20000 | 6.1 | 13.88 | 76.17 | 77.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug/deeplabv3_r50-d8_512x512_20k_voc12aug_20200617_010906-596905ef.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug/deeplabv3_r50-d8_512x512_20k_voc12aug_20200617_010906.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 20000 | 9.6 | 9.81 | 78.70 | 79.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug/deeplabv3_r101-d8_512x512_20k_voc12aug_20200617_010932-8d13832f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug/deeplabv3_r101-d8_512x512_20k_voc12aug_20200617_010932.log.json) | +| DeepLabV3 | R-50-D8 | 512x512 | 40000 | - | - | 77.68 | 78.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug/deeplabv3_r50-d8_512x512_40k_voc12aug_20200613_161546-2ae96e7e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug/deeplabv3_r50-d8_512x512_40k_voc12aug_20200613_161546.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 40000 | - | - | 77.92 | 79.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug/deeplabv3_r101-d8_512x512_40k_voc12aug_20200613_161432-0017d784.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug/deeplabv3_r101-d8_512x512_40k_voc12aug_20200613_161432.log.json) | + +### Pascal Context + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DeepLabV3 | R-101-D8 | 480x480 | 40000 | 9.2 | 7.09 | 46.55 | 47.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context/deeplabv3_r101-d8_480x480_40k_pascal_context_20200911_204118-1aa27336.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context/deeplabv3_r101-d8_480x480_40k_pascal_context-20200911_204118.log.json) | +| DeepLabV3 | R-101-D8 | 480x480 | 80000 | - | - | 46.42 | 47.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context/deeplabv3_r101-d8_480x480_80k_pascal_context_20200911_170155-2a21fff3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context/deeplabv3_r101-d8_480x480_80k_pascal_context-20200911_170155.log.json) | + +### Pascal Context 59 + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DeepLabV3 | R-101-D8 | 480x480 | 40000 | - | - | 52.61 | 54.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59/deeplabv3_r101-d8_480x480_40k_pascal_context_59_20210416_110332-cb08ea46.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59/deeplabv3_r101-d8_480x480_40k_pascal_context_59-20210416_110332.log.json) | +| DeepLabV3 | R-101-D8 | 480x480 | 80000 | - | - | 52.46 | 54.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59/deeplabv3_r101-d8_480x480_80k_pascal_context_59_20210416_113002-26303993.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59/deeplabv3_r101-d8_480x480_80k_pascal_context_59-20210416_113002.log.json) | + +### COCO-Stuff 10k + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3 | R-50-D8 | 512x512 | 20000 | 9.6 | 10.8 | 34.66 | 36.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025-b35f789d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 20000 | 13.2 | 8.7 | 37.30 | 38.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025-c49752cb.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025.log.json) | +| DeepLabV3 | R-50-D8 | 512x512 | 40000 | - | - | 35.73 | 37.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305-dc76f3ff.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 40000 | - | - | 37.81 | 38.80 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305-636cb433.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305.log.json) | + +### COCO-Stuff 164k + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3 | R-50-D8 | 512x512 | 80000 | 9.6 | 10.8 | 39.38 | 40.03 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k_20210709_163016-88675c24.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k_20210709_163016.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 80000 | 13.2 | 8.7 | 40.87 | 41.50 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k_20210709_201252-13600dc2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k_20210709_201252.log.json) | +| DeepLabV3 | R-50-D8 | 512x512 | 160000 | - | - | 41.09 | 41.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k_20210709_163016-49f2812b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k_20210709_163016.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 160000 | - | - | 41.82 | 42.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k_20210709_155402-f035acfd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k_20210709_155402.log.json) | +| DeepLabV3 | R-50-D8 | 512x512 | 320000 | - | - | 41.37 | 42.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k_20210709_155403-51b21115.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k_20210709_155403.log.json) | +| DeepLabV3 | R-101-D8 | 512x512 | 320000 | - | - | 42.61 | 43.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k_20210709_155402-3cbca14d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k_20210709_155402.log.json) | + +Note: + +- `D-8` here corresponding to the output stride 8 setting for DeepLab series. +- `FP16` means Mixed Precision (FP16) is adopted in training. diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3.yml b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3.yml new file mode 100644 index 0000000..559af4f --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3.yml @@ -0,0 +1,756 @@ +Collections: +- Name: DeepLabV3 + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + - Pascal Context + - Pascal Context 59 + - COCO-Stuff 10k + - COCO-Stuff 164k + Paper: + URL: https://arxiv.org/abs/1706.05587 + Title: Rethinking atrous convolution for semantic image segmentation + README: configs/deeplabv3/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/aspp_head.py#L54 + Version: v0.17.0 + Converted From: + Code: https://github.com/tensorflow/models/tree/master/research/deeplab +Models: +- Name: deeplabv3_r50-d8_512x1024_40k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 389.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.09 + mIoU(ms+flip): 80.45 + Config: configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes/deeplabv3_r50-d8_512x1024_40k_cityscapes_20200605_022449-acadc2f8.pth +- Name: deeplabv3_r101-d8_512x1024_40k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 520.83 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.12 + mIoU(ms+flip): 79.61 + Config: configs/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes/deeplabv3_r101-d8_512x1024_40k_cityscapes_20200605_012241-7fd3f799.pth +- Name: deeplabv3_r50-d8_769x769_40k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 900.9 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.58 + mIoU(ms+flip): 79.89 + Config: configs/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes/deeplabv3_r50-d8_769x769_40k_cityscapes_20200606_113723-7eda553c.pth +- Name: deeplabv3_r101-d8_769x769_40k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 1204.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.27 + mIoU(ms+flip): 80.11 + Config: configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes/deeplabv3_r101-d8_769x769_40k_cityscapes_20200606_113809-c64f889f.pth +- Name: deeplabv3_r18-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-18-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 72.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.7 + mIoU(ms+flip): 78.27 + Config: configs/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes/deeplabv3_r18-d8_512x1024_80k_cityscapes_20201225_021506-23dffbe2.pth +- Name: deeplabv3_r50-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.32 + mIoU(ms+flip): 80.57 + Config: configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes/deeplabv3_r50-d8_512x1024_80k_cityscapes_20200606_113404-b92cfdd4.pth +- Name: deeplabv3_r101-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.2 + mIoU(ms+flip): 81.21 + Config: configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes/deeplabv3_r101-d8_512x1024_80k_cityscapes_20200606_113503-9e428899.pth +- Name: deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 259.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (512,1024) + Training Memory (GB): 5.75 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.48 + Config: configs/deeplabv3/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920-774d9cec.pth +- Name: deeplabv3_r18-d8_769x769_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-18-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 180.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 1.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.6 + mIoU(ms+flip): 78.26 + Config: configs/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes/deeplabv3_r18-d8_769x769_80k_cityscapes_20201225_021506-6452126a.pth +- Name: deeplabv3_r50-d8_769x769_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.89 + mIoU(ms+flip): 81.06 + Config: configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes/deeplabv3_r50-d8_769x769_80k_cityscapes_20200606_221338-788d6228.pth +- Name: deeplabv3_r101-d8_769x769_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.67 + mIoU(ms+flip): 80.81 + Config: configs/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes/deeplabv3_r101-d8_769x769_80k_cityscapes_20200607_013353-60e95418.pth +- Name: deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D16-MG124 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.36 + mIoU(ms+flip): 79.84 + Config: configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes_20200908_005644-57bb8425.pth +- Name: deeplabv3_r18b-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-18b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 71.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.26 + mIoU(ms+flip): 77.88 + Config: configs/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes/deeplabv3_r18b-d8_512x1024_80k_cityscapes_20201225_094144-46040cef.pth +- Name: deeplabv3_r50b-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-50b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 364.96 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.63 + mIoU(ms+flip): 80.98 + Config: configs/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes/deeplabv3_r50b-d8_512x1024_80k_cityscapes_20201225_155148-ec368954.pth +- Name: deeplabv3_r101b-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-101b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 552.49 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.01 + mIoU(ms+flip): 81.21 + Config: configs/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes/deeplabv3_r101b-d8_512x1024_80k_cityscapes_20201226_171821-8fd49503.pth +- Name: deeplabv3_r18b-d8_769x769_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-18b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 172.71 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 1.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.63 + mIoU(ms+flip): 77.51 + Config: configs/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes/deeplabv3_r18b-d8_769x769_80k_cityscapes_20201225_094144-fdc985d9.pth +- Name: deeplabv3_r50b-d8_769x769_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-50b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 862.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.8 + mIoU(ms+flip): 80.27 + Config: configs/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes/deeplabv3_r50b-d8_769x769_80k_cityscapes_20201225_155404-87fb0cf4.pth +- Name: deeplabv3_r101b-d8_769x769_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: R-101b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 1219.51 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.41 + mIoU(ms+flip): 80.73 + Config: configs/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes/deeplabv3_r101b-d8_769x769_80k_cityscapes_20201226_190843-9142ee57.pth +- Name: deeplabv3_r50-d8_512x512_80k_ade20k + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 67.75 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.9 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.42 + mIoU(ms+flip): 43.28 + Config: configs/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k/deeplabv3_r50-d8_512x512_80k_ade20k_20200614_185028-0bb3f844.pth +- Name: deeplabv3_r101-d8_512x512_80k_ade20k + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 98.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.4 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.08 + mIoU(ms+flip): 45.19 + Config: configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k/deeplabv3_r101-d8_512x512_80k_ade20k_20200615_021256-d89c7fa4.pth +- Name: deeplabv3_r50-d8_512x512_160k_ade20k + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.66 + mIoU(ms+flip): 44.09 + Config: configs/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k/deeplabv3_r50-d8_512x512_160k_ade20k_20200615_123227-5d0ee427.pth +- Name: deeplabv3_r101-d8_512x512_160k_ade20k + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.0 + mIoU(ms+flip): 46.66 + Config: configs/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k/deeplabv3_r101-d8_512x512_160k_ade20k_20200615_105816-b1f72b3b.pth +- Name: deeplabv3_r50-d8_512x512_20k_voc12aug + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 72.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.17 + mIoU(ms+flip): 77.42 + Config: configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug/deeplabv3_r50-d8_512x512_20k_voc12aug_20200617_010906-596905ef.pth +- Name: deeplabv3_r101-d8_512x512_20k_voc12aug + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 101.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.7 + mIoU(ms+flip): 79.95 + Config: configs/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug/deeplabv3_r101-d8_512x512_20k_voc12aug_20200617_010932-8d13832f.pth +- Name: deeplabv3_r50-d8_512x512_40k_voc12aug + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.68 + mIoU(ms+flip): 78.78 + Config: configs/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug/deeplabv3_r50-d8_512x512_40k_voc12aug_20200613_161546-2ae96e7e.pth +- Name: deeplabv3_r101-d8_512x512_40k_voc12aug + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.92 + mIoU(ms+flip): 79.18 + Config: configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug/deeplabv3_r101-d8_512x512_40k_voc12aug_20200613_161432-0017d784.pth +- Name: deeplabv3_r101-d8_480x480_40k_pascal_context + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + inference time (ms/im): + - value: 141.04 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (480,480) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 46.55 + mIoU(ms+flip): 47.81 + Config: configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context/deeplabv3_r101-d8_480x480_40k_pascal_context_20200911_204118-1aa27336.pth +- Name: deeplabv3_r101-d8_480x480_80k_pascal_context + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 46.42 + mIoU(ms+flip): 47.53 + Config: configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context/deeplabv3_r101-d8_480x480_80k_pascal_context_20200911_170155-2a21fff3.pth +- Name: deeplabv3_r101-d8_480x480_40k_pascal_context_59 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 52.61 + mIoU(ms+flip): 54.28 + Config: configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59/deeplabv3_r101-d8_480x480_40k_pascal_context_59_20210416_110332-cb08ea46.pth +- Name: deeplabv3_r101-d8_480x480_80k_pascal_context_59 + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 52.46 + mIoU(ms+flip): 54.09 + Config: configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59/deeplabv3_r101-d8_480x480_80k_pascal_context_59_20210416_113002-26303993.pth +- Name: deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 92.59 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 34.66 + mIoU(ms+flip): 36.08 + Config: configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025-b35f789d.pth +- Name: deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 114.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.2 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 37.3 + mIoU(ms+flip): 38.42 + Config: configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k_20210821_043025-c49752cb.pth +- Name: deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 35.73 + mIoU(ms+flip): 37.09 + Config: configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305-dc76f3ff.pth +- Name: deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 37.81 + mIoU(ms+flip): 38.8 + Config: configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k_20210821_043305-636cb433.pth +- Name: deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 92.59 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 39.38 + mIoU(ms+flip): 40.03 + Config: configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k_20210709_163016-88675c24.pth +- Name: deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 114.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.2 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 40.87 + mIoU(ms+flip): 41.5 + Config: configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k_20210709_201252-13600dc2.pth +- Name: deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 41.09 + mIoU(ms+flip): 41.69 + Config: configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k_20210709_163016-49f2812b.pth +- Name: deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 41.82 + mIoU(ms+flip): 42.49 + Config: configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k_20210709_155402-f035acfd.pth +- Name: deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k + In Collection: DeepLabV3 + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 320000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 41.37 + mIoU(ms+flip): 42.22 + Config: configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k_20210709_155403-51b21115.pth +- Name: deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k + In Collection: DeepLabV3 + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 320000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 42.61 + mIoU(ms+flip): 43.42 + Config: configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k_20210709_155402-3cbca14d.pth diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..f20f260 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3_r50-d8_512x1024_40k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnet101_v1c', + backbone=dict( + depth=101, + dilations=(1, 1, 1, 2), + strides=(1, 2, 2, 1), + multi_grid=(1, 2, 4)), + decode_head=dict( + dilations=(1, 6, 12, 18), + sampler=dict(type='OHEMPixelSampler', min_kept=100000))) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..de4a8a5 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnet101_v1c', + backbone=dict( + depth=101, + dilations=(1, 1, 1, 2), + strides=(1, 2, 2, 1), + multi_grid=(1, 2, 4)), + decode_head=dict( + dilations=(1, 6, 12, 18), + sampler=dict(type='OHEMPixelSampler', min_kept=100000))) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context.py new file mode 100644 index 0000000..0b5256f --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_480x480_40k_pascal_context.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59.py new file mode 100644 index 0000000..4874121 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_480x480_40k_pascal_context_59.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context.py new file mode 100644 index 0000000..001b7a6 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_480x480_80k_pascal_context.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59.py new file mode 100644 index 0000000..032dc8b --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_480x480_80k_pascal_context_59.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..8c707c7 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..6804a57 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..df6f36e --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..40f5f62 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_512x512_20k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..fb2be22 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_512x512_40k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k.py new file mode 100644 index 0000000..76b1242 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k.py new file mode 100644 index 0000000..d476c66 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k.py new file mode 100644 index 0000000..50669c8 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k.py new file mode 100644 index 0000000..37d09cf --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k.py new file mode 100644 index 0000000..a0eb3dd --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..796ba3f --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..e6d58a6 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..13094a9 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..e326109 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = './deeplabv3_r101-d8_512x1024_80k_cityscapes.py' +# fp16 settings +optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.) +# fp16 placeholder +fp16 = dict() diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..5186bf6 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..d185db9 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = './deeplabv3_r50-d8_769x769_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..e084e95 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r18-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..a990c07 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './deeplabv3_r50-d8_769x769_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..b25e725 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..fd920f0 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r18b-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './deeplabv3_r50-d8_769x769_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_40k_pascal_context.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_40k_pascal_context.py new file mode 100644 index 0000000..9d493ef --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_40k_pascal_context.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_40k_pascal_context_59.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_40k_pascal_context_59.py new file mode 100644 index 0000000..038993c --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_40k_pascal_context_59.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_80k_pascal_context.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_80k_pascal_context.py new file mode 100644 index 0000000..71a0fda --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_80k_pascal_context.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_80k_pascal_context_59.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_80k_pascal_context_59.py new file mode 100644 index 0000000..bcdc0b4 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_80k_pascal_context_59.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..8e7420d --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..132787d --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..b4a9d4e --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..f62da1a --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..492bd3d --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_40k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k.py new file mode 100644 index 0000000..22d647e --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k.py new file mode 100644 index 0000000..45e0b56 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/coco-stuff10k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k.py new file mode 100644 index 0000000..3e43234 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_320k.py' +] +model = dict( + decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k.py new file mode 100644 index 0000000..f02772a --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/coco-stuff10k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k.py new file mode 100644 index 0000000..8697e92 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..78f4d0d --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..e35d198 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..dd7c165 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/deeplabv3_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..e742d9a --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..332d9cf --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/README.md b/downstream/mmsegmentation/configs/deeplabv3plus/README.md new file mode 100644 index 0000000..4fb7d13 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/README.md @@ -0,0 +1,129 @@ +# DeepLabV3+ + +[Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1802.02611) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Spatial pyramid pooling module or encode-decoder structure are used in deep neural networks for semantic segmentation task. The former networks are able to encode multi-scale contextual information by probing the incoming features with filters or pooling operations at multiple rates and multiple effective fields-of-view, while the latter networks can capture sharper object boundaries by gradually recovering the spatial information. In this work, we propose to combine the advantages from both methods. Specifically, our proposed model, DeepLabv3+, extends DeepLabv3 by adding a simple yet effective decoder module to refine the segmentation results especially along object boundaries. We further explore the Xception model and apply the depthwise separable convolution to both Atrous Spatial Pyramid Pooling and decoder modules, resulting in a faster and stronger encoder-decoder network. We demonstrate the effectiveness of the proposed model on PASCAL VOC 2012 and Cityscapes datasets, achieving the test set performance of 89.0\% and 82.1\% without any post-processing. Our paper is accompanied with a publicly available reference implementation of the proposed models in Tensorflow at [this https URL](https://github.com/tensorflow/models/tree/master/research/deeplab). + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{deeplabv3plus2018, + title={Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation}, + author={Liang-Chieh Chen and Yukun Zhu and George Papandreou and Florian Schroff and Hartwig Adam}, + booktitle={ECCV}, + year={2018} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | --------------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3+ | R-50-D8 | 512x1024 | 40000 | 7.5 | 3.94 | 79.61 | 81.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610-d222ffcd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610.log.json) | +| DeepLabV3+ | R-101-D8 | 512x1024 | 40000 | 11 | 2.60 | 80.21 | 81.82 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes/deeplabv3plus_r101-d8_512x1024_40k_cityscapes_20200605_094614-3769eecf.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes/deeplabv3plus_r101-d8_512x1024_40k_cityscapes_20200605_094614.log.json) | +| DeepLabV3+ | R-50-D8 | 769x769 | 40000 | 8.5 | 1.72 | 78.97 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143-1dcb0e3c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143.log.json) | +| DeepLabV3+ | R-101-D8 | 769x769 | 40000 | 12.5 | 1.15 | 79.46 | 80.50 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes/deeplabv3plus_r101-d8_769x769_40k_cityscapes_20200606_114304-ff414b9e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes/deeplabv3plus_r101-d8_769x769_40k_cityscapes_20200606_114304.log.json) | +| DeepLabV3+ | R-18-D8 | 512x1024 | 80000 | 2.2 | 14.27 | 76.89 | 78.76 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes/deeplabv3plus_r18-d8_512x1024_80k_cityscapes_20201226_080942-cff257fe.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes/deeplabv3plus_r18-d8_512x1024_80k_cityscapes-20201226_080942.log.json) | +| DeepLabV3+ | R-50-D8 | 512x1024 | 80000 | - | - | 80.09 | 81.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049.log.json) | +| DeepLabV3+ | R-101-D8 | 512x1024 | 80000 | - | - | 80.97 | 82.03 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143.log.json) | +| DeepLabV3+ (FP16)| R-101-D8 | 512x1024 | 80000 | 6.35 | 7.87 | 80.46 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920-f1104f4b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920.log.json) | +| DeepLabV3+ | R-18-D8 | 769x769 | 80000 | 2.5 | 5.74 | 76.26 | 77.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes/deeplabv3plus_r18-d8_769x769_80k_cityscapes_20201226_083346-f326e06a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes/deeplabv3plus_r18-d8_769x769_80k_cityscapes-20201226_083346.log.json) | +| DeepLabV3+ | R-50-D8 | 769x769 | 80000 | - | - | 79.83 | 81.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233.log.json) | +| DeepLabV3+ | R-101-D8 | 769x769 | 80000 | - | - | 80.98 | 82.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405-a7573d20.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405.log.json) | +| DeepLabV3+ | R-101-D16-MG124 | 512x1024 | 40000 | 5.8 | 7.48 | 79.09 | 80.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes_20200908_005644-cf9ce186.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes-20200908_005644.log.json) | +| DeepLabV3+ | R-101-D16-MG124 | 512x1024 | 80000 | 9.9 | - | 79.90 | 81.33 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes_20200908_005644-ee6158e0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes-20200908_005644.log.json) | +| DeepLabV3+ | R-18b-D8 | 512x1024 | 80000 | 2.1 | 14.95 | 75.87 | 77.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes_20201226_090828-e451abd9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes-20201226_090828.log.json) | +| DeepLabV3+ | R-50b-D8 | 512x1024 | 80000 | 7.4 | 3.94 | 80.28 | 81.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes_20201225_213645-a97e4e43.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes-20201225_213645.log.json) | +| DeepLabV3+ | R-101b-D8 | 512x1024 | 80000 | 10.9 | 2.60 | 80.16 | 81.41 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes_20201226_190843-9c3c93a4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes-20201226_190843.log.json) | +| DeepLabV3+ | R-18b-D8 | 769x769 | 80000 | 2.4 | 5.96 | 76.36 | 78.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes/deeplabv3plus_r18b-d8_769x769_80k_cityscapes_20201226_151312-2c868aff.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes/deeplabv3plus_r18b-d8_769x769_80k_cityscapes-20201226_151312.log.json) | +| DeepLabV3+ | R-50b-D8 | 769x769 | 80000 | 8.4 | 1.72 | 79.41 | 80.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes/deeplabv3plus_r50b-d8_769x769_80k_cityscapes_20201225_224655-8b596d1c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes/deeplabv3plus_r50b-d8_769x769_80k_cityscapes-20201225_224655.log.json) | +| DeepLabV3+ | R-101b-D8 | 769x769 | 80000 | 12.3 | 1.10 | 79.88 | 81.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes/deeplabv3plus_r101b-d8_769x769_80k_cityscapes_20201226_205041-227cdf7c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes/deeplabv3plus_r101b-d8_769x769_80k_cityscapes-20201226_205041.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DeepLabV3+ | R-50-D8 | 512x512 | 80000 | 10.6 | 21.01 | 42.72 | 43.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k/deeplabv3plus_r50-d8_512x512_80k_ade20k_20200614_185028-bf1400d8.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k/deeplabv3plus_r50-d8_512x512_80k_ade20k_20200614_185028.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 80000 | 14.1 | 14.16 | 44.60 | 46.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k/deeplabv3plus_r101-d8_512x512_80k_ade20k_20200615_014139-d5730af7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k/deeplabv3plus_r101-d8_512x512_80k_ade20k_20200615_014139.log.json) | +| DeepLabV3+ | R-50-D8 | 512x512 | 160000 | - | - | 43.95 | 44.93 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k/deeplabv3plus_r50-d8_512x512_160k_ade20k_20200615_124504-6135c7e0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k/deeplabv3plus_r50-d8_512x512_160k_ade20k_20200615_124504.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 160000 | - | - | 45.47 | 46.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k/deeplabv3plus_r101-d8_512x512_160k_ade20k_20200615_123232-38ed86bb.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k/deeplabv3plus_r101-d8_512x512_160k_ade20k_20200615_123232.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DeepLabV3+ | R-50-D8 | 512x512 | 20000 | 7.6 | 21 | 75.93 | 77.50 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug/deeplabv3plus_r50-d8_512x512_20k_voc12aug_20200617_102323-aad58ef1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug/deeplabv3plus_r50-d8_512x512_20k_voc12aug_20200617_102323.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 20000 | 11 | 13.88 | 77.22 | 78.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug/deeplabv3plus_r101-d8_512x512_20k_voc12aug_20200617_102345-c7ff3d56.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug/deeplabv3plus_r101-d8_512x512_20k_voc12aug_20200617_102345.log.json) | +| DeepLabV3+ | R-50-D8 | 512x512 | 40000 | - | - | 76.81 | 77.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug/deeplabv3plus_r50-d8_512x512_40k_voc12aug_20200613_161759-e1b43aa9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug/deeplabv3plus_r50-d8_512x512_40k_voc12aug_20200613_161759.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 40000 | - | - | 78.62 | 79.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug/deeplabv3plus_r101-d8_512x512_40k_voc12aug_20200613_205333-faf03387.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug/deeplabv3plus_r101-d8_512x512_40k_voc12aug_20200613_205333.log.json) | + +### Pascal Context + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DeepLabV3+ | R-101-D8 | 480x480 | 40000 | - | 9.09 | 47.30 | 48.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context/deeplabv3plus_r101-d8_480x480_40k_pascal_context_20200911_165459-d3c8a29e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context/deeplabv3plus_r101-d8_480x480_40k_pascal_context-20200911_165459.log.json) | +| DeepLabV3+ | R-101-D8 | 480x480 | 80000 | - | - | 47.23 | 48.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context/deeplabv3plus_r101-d8_480x480_80k_pascal_context_20200911_155322-145d3ee8.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context/deeplabv3plus_r101-d8_480x480_80k_pascal_context-20200911_155322.log.json) | + +### Pascal Context 59 + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DeepLabV3+ | R-101-D8 | 480x480 | 40000 | - | - | 52.86 | 54.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59_20210416_111233-ed937f15.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59-20210416_111233.log.json) | +| DeepLabV3+ | R-101-D8 | 480x480 | 80000 | - | - | 53.2 | 54.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59_20210416_111127-7ca0331d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59-20210416_111127.log.json) | + +### LoveDA + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DeepLabV3+ | R-18-D8 | 512x512 | 80000 | 1.93 | 25.57 | 50.28 | 50.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_loveda.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_loveda/deeplabv3plus_r18-d8_512x512_80k_loveda_20211104_132800-ce0fa0ca.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_loveda/deeplabv3plus_r18-d8_512x512_80k_loveda_20211104_132800.log.json) | +| DeepLabV3+ | R-50-D8 | 512x512 | 80000 | 7.37 | 6.00 | 50.99 | 50.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_loveda.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_loveda/deeplabv3plus_r50-d8_512x512_80k_loveda_20211105_080442-f0720392.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_loveda/deeplabv3plus_r50-d8_512x512_80k_loveda_20211105_080442.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 80000 | 10.84 | 4.33 | 51.47 | 51.32 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_loveda.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_loveda/deeplabv3plus_r101-d8_512x512_80k_loveda_20211105_110759-4c1f297e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_loveda/deeplabv3plus_r101-d8_512x512_80k_loveda_20211105_110759.log.json) | + +### Potsdam + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DeepLabV3+ | R-18-D8 | 512x512 | 80000 | 1.91 | 81.68 | 77.09 | 78.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_potsdam.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_potsdam/deeplabv3plus_r18-d8_512x512_80k_potsdam_20211219_020601-75fd5bc3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_potsdam/deeplabv3plus_r18-d8_512x512_80k_potsdam_20211219_020601.log.json) | +| DeepLabV3+ | R-50-D8 | 512x512 | 80000 | 7.36 | 26.44 | 78.33 | 79.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_potsdam.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_potsdam/deeplabv3plus_r50-d8_512x512_80k_potsdam_20211219_031508-7e7a2b24.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_potsdam/deeplabv3plus_r50-d8_512x512_80k_potsdam_20211219_031508.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 80000 | 10.83 | 17.56 | 78.7 | 79.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_potsdam.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_potsdam/deeplabv3plus_r101-d8_512x512_80k_potsdam_20211219_031508-8b112708.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_potsdam/deeplabv3plus_r101-d8_512x512_80k_potsdam_20211219_031508.log.json) | + +### Vaihingen + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DeepLabV3+ | R-18-D8 | 512x512 | 80000 | 1.91 | 72.79 | 72.50 | 74.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen_20211231_230805-7626a263.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen_20211231_230805.log.json) | +| DeepLabV3+ | R-50-D8 | 512x512 | 80000 | 7.36 | 26.91 | 73.97 | 75.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen_20211231_230816-5040938d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen_20211231_230816.log.json) | +| DeepLabV3+ | R-101-D8 | 512x512 | 80000 | 10.83 | 18.59 | 73.06 | 74.14 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen_20211231_230816-8a095afa.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen_20211231_230816.log.json) | + +### iSAID + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DeepLabV3+ | R-18-D8 | 896x896 | 80000 | 6.19 | 24.81 | 61.35 | 62.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid_20220110_180526-7059991d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid_20220110_180526.log.json) | +| DeepLabV3+ | R-50-D8 | 896x896 | 80000 | 21.45 | 8.42 | 67.06 | 68.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid_20220110_180526-598be439.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid_20220110_180526.log.json) | + +Note: + +- `D-8`/`D-16` here corresponding to the output stride 8/16 setting for DeepLab series. +- `MG-124` stands for multi-grid dilation in the last stage of ResNet. +- `FP16` means Mixed Precision (FP16) is adopted in training. +- `896x896` is the Crop Size of iSAID dataset, which is followed by the implementation of [PointFlow: Flowing Semantics Through Points for Aerial Image Segmentation](https://arxiv.org/pdf/2103.06564.pdf) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus.yml b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus.yml new file mode 100644 index 0000000..c4521ee --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus.yml @@ -0,0 +1,850 @@ +Collections: +- Name: DeepLabV3+ + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + - Pascal Context + - Pascal Context 59 + - LoveDA + - Potsdam + - Vaihingen + - iSAID + Paper: + URL: https://arxiv.org/abs/1802.02611 + Title: Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation + README: configs/deeplabv3plus/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/sep_aspp_head.py#L30 + Version: v0.17.0 + Converted From: + Code: https://github.com/tensorflow/models/tree/master/research/deeplab +Models: +- Name: deeplabv3plus_r50-d8_512x1024_40k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 253.81 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.61 + mIoU(ms+flip): 81.01 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes/deeplabv3plus_r50-d8_512x1024_40k_cityscapes_20200605_094610-d222ffcd.pth +- Name: deeplabv3plus_r101-d8_512x1024_40k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 384.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 11.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.21 + mIoU(ms+flip): 81.82 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes/deeplabv3plus_r101-d8_512x1024_40k_cityscapes_20200605_094614-3769eecf.pth +- Name: deeplabv3plus_r50-d8_769x769_40k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 581.4 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.97 + mIoU(ms+flip): 80.46 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes/deeplabv3plus_r50-d8_769x769_40k_cityscapes_20200606_114143-1dcb0e3c.pth +- Name: deeplabv3plus_r101-d8_769x769_40k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 869.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.46 + mIoU(ms+flip): 80.5 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes/deeplabv3plus_r101-d8_769x769_40k_cityscapes_20200606_114304-ff414b9e.pth +- Name: deeplabv3plus_r18-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-18-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 70.08 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 2.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.89 + mIoU(ms+flip): 78.76 + Config: configs/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes/deeplabv3plus_r18-d8_512x1024_80k_cityscapes_20201226_080942-cff257fe.pth +- Name: deeplabv3plus_r50-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.09 + mIoU(ms+flip): 81.13 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth +- Name: deeplabv3plus_r101-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.97 + mIoU(ms+flip): 82.03 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth +- Name: deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 127.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (512,1024) + Training Memory (GB): 6.35 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.46 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920-f1104f4b.pth +- Name: deeplabv3plus_r18-d8_769x769_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-18-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 174.22 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 2.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.26 + mIoU(ms+flip): 77.91 + Config: configs/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes/deeplabv3plus_r18-d8_769x769_80k_cityscapes_20201226_083346-f326e06a.pth +- Name: deeplabv3plus_r50-d8_769x769_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.83 + mIoU(ms+flip): 81.48 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth +- Name: deeplabv3plus_r101-d8_769x769_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.98 + mIoU(ms+flip): 82.18 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405-a7573d20.pth +- Name: deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D16-MG124 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 133.69 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.09 + mIoU(ms+flip): 80.36 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes_20200908_005644-cf9ce186.pth +- Name: deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D16-MG124 + crop size: (512,1024) + lr schd: 80000 + Training Memory (GB): 9.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.9 + mIoU(ms+flip): 81.33 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes_20200908_005644-ee6158e0.pth +- Name: deeplabv3plus_r18b-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-18b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 66.89 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 2.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.87 + mIoU(ms+flip): 77.52 + Config: configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes_20201226_090828-e451abd9.pth +- Name: deeplabv3plus_r50b-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-50b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 253.81 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.28 + mIoU(ms+flip): 81.44 + Config: configs/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes_20201225_213645-a97e4e43.pth +- Name: deeplabv3plus_r101b-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-101b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 384.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 10.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.16 + mIoU(ms+flip): 81.41 + Config: configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes_20201226_190843-9c3c93a4.pth +- Name: deeplabv3plus_r18b-d8_769x769_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-18b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 167.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 2.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.36 + mIoU(ms+flip): 78.24 + Config: configs/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes/deeplabv3plus_r18b-d8_769x769_80k_cityscapes_20201226_151312-2c868aff.pth +- Name: deeplabv3plus_r50b-d8_769x769_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-50b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 581.4 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.41 + mIoU(ms+flip): 80.56 + Config: configs/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes/deeplabv3plus_r50b-d8_769x769_80k_cityscapes_20201225_224655-8b596d1c.pth +- Name: deeplabv3plus_r101b-d8_769x769_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: R-101b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 909.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.88 + mIoU(ms+flip): 81.46 + Config: configs/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes/deeplabv3plus_r101b-d8_769x769_80k_cityscapes_20201226_205041-227cdf7c.pth +- Name: deeplabv3plus_r50-d8_512x512_80k_ade20k + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.6 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.72 + mIoU(ms+flip): 43.75 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k/deeplabv3plus_r50-d8_512x512_80k_ade20k_20200614_185028-bf1400d8.pth +- Name: deeplabv3plus_r101-d8_512x512_80k_ade20k + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 70.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 14.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.6 + mIoU(ms+flip): 46.06 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k/deeplabv3plus_r101-d8_512x512_80k_ade20k_20200615_014139-d5730af7.pth +- Name: deeplabv3plus_r50-d8_512x512_160k_ade20k + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.95 + mIoU(ms+flip): 44.93 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k/deeplabv3plus_r50-d8_512x512_160k_ade20k_20200615_124504-6135c7e0.pth +- Name: deeplabv3plus_r101-d8_512x512_160k_ade20k + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.47 + mIoU(ms+flip): 46.35 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k/deeplabv3plus_r101-d8_512x512_160k_ade20k_20200615_123232-38ed86bb.pth +- Name: deeplabv3plus_r50-d8_512x512_20k_voc12aug + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 47.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.6 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 75.93 + mIoU(ms+flip): 77.5 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug/deeplabv3plus_r50-d8_512x512_20k_voc12aug_20200617_102323-aad58ef1.pth +- Name: deeplabv3plus_r101-d8_512x512_20k_voc12aug + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 72.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 11.0 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.22 + mIoU(ms+flip): 78.59 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug/deeplabv3plus_r101-d8_512x512_20k_voc12aug_20200617_102345-c7ff3d56.pth +- Name: deeplabv3plus_r50-d8_512x512_40k_voc12aug + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.81 + mIoU(ms+flip): 77.57 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug/deeplabv3plus_r50-d8_512x512_40k_voc12aug_20200613_161759-e1b43aa9.pth +- Name: deeplabv3plus_r101-d8_512x512_40k_voc12aug + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.62 + mIoU(ms+flip): 79.53 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug/deeplabv3plus_r101-d8_512x512_40k_voc12aug_20200613_205333-faf03387.pth +- Name: deeplabv3plus_r101-d8_480x480_40k_pascal_context + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + inference time (ms/im): + - value: 110.01 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (480,480) + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 47.3 + mIoU(ms+flip): 48.47 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context/deeplabv3plus_r101-d8_480x480_40k_pascal_context_20200911_165459-d3c8a29e.pth +- Name: deeplabv3plus_r101-d8_480x480_80k_pascal_context + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 47.23 + mIoU(ms+flip): 48.26 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context/deeplabv3plus_r101-d8_480x480_80k_pascal_context_20200911_155322-145d3ee8.pth +- Name: deeplabv3plus_r101-d8_480x480_40k_pascal_context_59 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 52.86 + mIoU(ms+flip): 54.54 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59_20210416_111233-ed937f15.pth +- Name: deeplabv3plus_r101-d8_480x480_80k_pascal_context_59 + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 53.2 + mIoU(ms+flip): 54.67 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59_20210416_111127-7ca0331d.pth +- Name: deeplabv3plus_r18-d8_512x512_80k_loveda + In Collection: DeepLabV3+ + Metadata: + backbone: R-18-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 39.11 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.93 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 50.28 + mIoU(ms+flip): 50.47 + Config: configs/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_loveda.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_loveda/deeplabv3plus_r18-d8_512x512_80k_loveda_20211104_132800-ce0fa0ca.pth +- Name: deeplabv3plus_r50-d8_512x512_80k_loveda + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 166.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.37 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 50.99 + mIoU(ms+flip): 50.65 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_loveda.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_loveda/deeplabv3plus_r50-d8_512x512_80k_loveda_20211105_080442-f0720392.pth +- Name: deeplabv3plus_r101-d8_512x512_80k_loveda + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 230.95 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.84 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 51.47 + mIoU(ms+flip): 51.32 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_loveda.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_loveda/deeplabv3plus_r101-d8_512x512_80k_loveda_20211105_110759-4c1f297e.pth +- Name: deeplabv3plus_r18-d8_512x512_80k_potsdam + In Collection: DeepLabV3+ + Metadata: + backbone: R-18-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 12.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.91 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 77.09 + mIoU(ms+flip): 78.44 + Config: configs/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_potsdam.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_potsdam/deeplabv3plus_r18-d8_512x512_80k_potsdam_20211219_020601-75fd5bc3.pth +- Name: deeplabv3plus_r50-d8_512x512_80k_potsdam + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 37.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.36 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 78.33 + mIoU(ms+flip): 79.27 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_potsdam.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_potsdam/deeplabv3plus_r50-d8_512x512_80k_potsdam_20211219_031508-7e7a2b24.pth +- Name: deeplabv3plus_r101-d8_512x512_80k_potsdam + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 56.95 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.83 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 78.7 + mIoU(ms+flip): 79.47 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_potsdam.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_potsdam/deeplabv3plus_r101-d8_512x512_80k_potsdam_20211219_031508-8b112708.pth +- Name: deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen + In Collection: DeepLabV3+ + Metadata: + backbone: R-18-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 13.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.91 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 72.5 + mIoU(ms+flip): 74.13 + Config: configs/deeplabv3plus/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen_20211231_230805-7626a263.pth +- Name: deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 37.16 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.36 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 73.97 + mIoU(ms+flip): 75.05 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen_20211231_230816-5040938d.pth +- Name: deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen + In Collection: DeepLabV3+ + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 53.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.83 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 73.06 + mIoU(ms+flip): 74.14 + Config: configs/deeplabv3plus/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen_20211231_230816-8a095afa.pth +- Name: deeplabv3plus_r18-d8_4x4_896x896_80k_isaid + In Collection: DeepLabV3+ + Metadata: + backbone: R-18-D8 + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 40.31 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 6.19 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 61.35 + mIoU(ms+flip): 62.61 + Config: configs/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid_20220110_180526-7059991d.pth +- Name: deeplabv3plus_r50-d8_4x4_896x896_80k_isaid + In Collection: DeepLabV3+ + Metadata: + backbone: R-50-D8 + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 118.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 21.45 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 67.06 + mIoU(ms+flip): 68.02 + Config: configs/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid_20220110_180526-598be439.pth diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..bf39d2f --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnet101_v1c', + backbone=dict( + depth=101, + dilations=(1, 1, 1, 2), + strides=(1, 2, 2, 1), + multi_grid=(1, 2, 4)), + decode_head=dict( + dilations=(1, 6, 12, 18), + sampler=dict(type='OHEMPixelSampler', min_kept=100000))) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..c53ec41 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnet101_v1c', + backbone=dict( + depth=101, + dilations=(1, 1, 1, 2), + strides=(1, 2, 2, 1), + multi_grid=(1, 2, 4)), + decode_head=dict( + dilations=(1, 6, 12, 18), + sampler=dict(type='OHEMPixelSampler', min_kept=100000))) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context.py new file mode 100644 index 0000000..68e2b07 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_480x480_40k_pascal_context.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59.py new file mode 100644 index 0000000..36a510f --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_480x480_40k_pascal_context_59.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context.py new file mode 100644 index 0000000..3a46c28 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_480x480_80k_pascal_context.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59.py new file mode 100644 index 0000000..a6a7688 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_480x480_80k_pascal_context_59.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen.py new file mode 100644 index 0000000..4bddf4f --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..d6ce85a --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..0ebbd3c --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..a75c9d3 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..ebb1a8e --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_512x512_20k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..3caa6cf --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_512x512_40k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..53fd3a9 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_loveda.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_loveda.py new file mode 100644 index 0000000..b3ad3ca --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_loveda.py @@ -0,0 +1,6 @@ +_base_ = './deeplabv3plus_r50-d8_512x512_80k_loveda.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet101_v1c'))) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_potsdam.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_potsdam.py new file mode 100644 index 0000000..d894914 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_potsdam.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_512x512_80k_potsdam.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..c3c92eb --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..5ea9cdb --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..fc36940 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = './deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' +# fp16 settings +optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.) +# fp16 placeholder +fp16 = dict() diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..398d975 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..1364490 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen.py new file mode 100644 index 0000000..879e941 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid.py new file mode 100644 index 0000000..892a8a3 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_4x4_896x896_80k_isaid.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..aff70c9 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_loveda.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_loveda.py new file mode 100644 index 0000000..11fe640 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_loveda.py @@ -0,0 +1,13 @@ +_base_ = './deeplabv3plus_r50-d8_512x512_80k_loveda.py' +model = dict( + backbone=dict( + depth=18, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_potsdam.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_potsdam.py new file mode 100644 index 0000000..ffb20df --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_potsdam.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_512x512_80k_potsdam.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..0172d9a --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..b90b292 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..b49da35 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes.py @@ -0,0 +1,11 @@ +_base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + c1_in_channels=64, + c1_channels=12, + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_40k_pascal_context.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_40k_pascal_context.py new file mode 100644 index 0000000..318845d --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_40k_pascal_context.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_40k_pascal_context_59.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_40k_pascal_context_59.py new file mode 100644 index 0000000..f9e831b --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_40k_pascal_context_59.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_80k_pascal_context.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_80k_pascal_context.py new file mode 100644 index 0000000..1736c23 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_80k_pascal_context.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_80k_pascal_context_59.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_80k_pascal_context_59.py new file mode 100644 index 0000000..d2af575 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_80k_pascal_context_59.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen.py new file mode 100644 index 0000000..fed9314 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/vaihingen.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=6), auxiliary_head=dict(num_classes=6)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid.py new file mode 100644 index 0000000..a1a8beb --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/isaid.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=16), auxiliary_head=dict(num_classes=16)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..7243d03 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..3304d36 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..1491e3b --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..1056ad4 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..e36c83b --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..352d870 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_loveda.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_loveda.py new file mode 100644 index 0000000..62756f6 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_loveda.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/loveda.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=7), auxiliary_head=dict(num_classes=7)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_potsdam.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_potsdam.py new file mode 100644 index 0000000..d5ae03f --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_potsdam.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/potsdam.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=6), auxiliary_head=dict(num_classes=6)) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..e4bda3e --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..1420b97 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/deeplabv3plus_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..dd8e1da --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..c0ba019 --- /dev/null +++ b/downstream/mmsegmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/downstream/mmsegmentation/configs/dmnet/README.md b/downstream/mmsegmentation/configs/dmnet/README.md new file mode 100644 index 0000000..0729268 --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/README.md @@ -0,0 +1,58 @@ +# DMNet + +[Dynamic Multi-scale Filters for Semantic Segmentation](https://openaccess.thecvf.com/content_ICCV_2019/papers/He_Dynamic_Multi-Scale_Filters_for_Semantic_Segmentation_ICCV_2019_paper.pdf) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Multi-scale representation provides an effective way toaddress scale variation of objects and stuff in semantic seg-mentation. Previous works construct multi-scale represen-tation by utilizing different filter sizes, expanding filter sizeswith dilated filters or pooling grids, and the parameters ofthese filters are fixed after training. These methods oftensuffer from heavy computational cost or have more param-eters, and are not adaptive to the input image during in-ference. To address these problems, this paper proposes aDynamic Multi-scale Network (DMNet) to adaptively cap-ture multi-scale contents for predicting pixel-level semanticlabels. DMNet is composed of multiple Dynamic Convolu-tional Modules (DCMs) arranged in parallel, each of whichexploits context-aware filters to estimate semantic represen-tation for a specific scale. The outputs of multiple DCMsare further integrated for final segmentation. We conductextensive experiments to evaluate our DMNet on three chal-lenging semantic segmentation and scene parsing datasets,PASCAL VOC 2012, Pascal-Context, and ADE20K. DMNetachieves a new record 84.4% mIoU on PASCAL VOC 2012test set without MS COCO pre-trained and post-processing,and also obtains state-of-the-art performance on Pascal-Context and ADE20K. + + +
    + +
    + +## Citation + +```bibtex +@InProceedings{He_2019_ICCV, +author = {He, Junjun and Deng, Zhongying and Qiao, Yu}, +title = {Dynamic Multi-Scale Filters for Semantic Segmentation}, +booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, +month = {October}, +year = {2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DMNet | R-50-D8 | 512x1024 | 40000 | 7.0 | 3.66 | 77.78 | 79.14 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes/dmnet_r50-d8_512x1024_40k_cityscapes_20201215_042326-615373cf.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes/dmnet_r50-d8_512x1024_40k_cityscapes-20201215_042326.log.json) | +| DMNet | R-101-D8 | 512x1024 | 40000 | 10.6 | 2.54 | 78.37 | 79.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes/dmnet_r101-d8_512x1024_40k_cityscapes_20201215_043100-8291e976.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes/dmnet_r101-d8_512x1024_40k_cityscapes-20201215_043100.log.json) | +| DMNet | R-50-D8 | 769x769 | 40000 | 7.9 | 1.57 | 78.49 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_40k_cityscapes/dmnet_r50-d8_769x769_40k_cityscapes_20201215_093706-e7f0e23e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_40k_cityscapes/dmnet_r50-d8_769x769_40k_cityscapes-20201215_093706.log.json) | +| DMNet | R-101-D8 | 769x769 | 40000 | 12.0 | 1.01 | 77.62 | 78.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_40k_cityscapes/dmnet_r101-d8_769x769_40k_cityscapes_20201215_081348-a74261f6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_40k_cityscapes/dmnet_r101-d8_769x769_40k_cityscapes-20201215_081348.log.json) | +| DMNet | R-50-D8 | 512x1024 | 80000 | - | - | 79.07 | 80.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes/dmnet_r50-d8_512x1024_80k_cityscapes_20201215_053728-3c8893b9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes/dmnet_r50-d8_512x1024_80k_cityscapes-20201215_053728.log.json) | +| DMNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.64 | 80.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes/dmnet_r101-d8_512x1024_80k_cityscapes_20201215_031718-fa081cb8.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes/dmnet_r101-d8_512x1024_80k_cityscapes-20201215_031718.log.json) | +| DMNet | R-50-D8 | 769x769 | 80000 | - | - | 79.22 | 80.55 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_80k_cityscapes/dmnet_r50-d8_769x769_80k_cityscapes_20201215_034006-6060840e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_80k_cityscapes/dmnet_r50-d8_769x769_80k_cityscapes-20201215_034006.log.json) | +| DMNet | R-101-D8 | 769x769 | 80000 | - | - | 79.19 | 80.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_80k_cityscapes/dmnet_r101-d8_769x769_80k_cityscapes_20201215_082810-7f0de59a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_80k_cityscapes/dmnet_r101-d8_769x769_80k_cityscapes-20201215_082810.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DMNet | R-50-D8 | 512x512 | 80000 | 9.4 | 20.95 | 42.37 | 43.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_80k_ade20k/dmnet_r50-d8_512x512_80k_ade20k_20201215_144744-f89092a6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_80k_ade20k/dmnet_r50-d8_512x512_80k_ade20k-20201215_144744.log.json) | +| DMNet | R-101-D8 | 512x512 | 80000 | 13.0 | 13.88 | 45.34 | 46.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_80k_ade20k/dmnet_r101-d8_512x512_80k_ade20k_20201215_104812-bfa45311.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_80k_ade20k/dmnet_r101-d8_512x512_80k_ade20k-20201215_104812.log.json) | +| DMNet | R-50-D8 | 512x512 | 160000 | - | - | 43.15 | 44.17 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_160k_ade20k/dmnet_r50-d8_512x512_160k_ade20k_20201215_115313-025ab3f9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_160k_ade20k/dmnet_r50-d8_512x512_160k_ade20k-20201215_115313.log.json) | +| DMNet | R-101-D8 | 512x512 | 160000 | - | - | 45.42 | 46.76 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_160k_ade20k/dmnet_r101-d8_512x512_160k_ade20k_20201215_111145-a0bc02ef.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_160k_ade20k/dmnet_r101-d8_512x512_160k_ade20k-20201215_111145.log.json) | diff --git a/downstream/mmsegmentation/configs/dmnet/dmnet.yml b/downstream/mmsegmentation/configs/dmnet/dmnet.yml new file mode 100644 index 0000000..1fab2dc --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/dmnet.yml @@ -0,0 +1,232 @@ +Collections: +- Name: DMNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://openaccess.thecvf.com/content_ICCV_2019/papers/He_Dynamic_Multi-Scale_Filters_for_Semantic_Segmentation_ICCV_2019_paper.pdf + Title: Dynamic Multi-scale Filters for Semantic Segmentation + README: configs/dmnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dm_head.py#L93 + Version: v0.17.0 + Converted From: + Code: https://github.com/Junjun2016/DMNet +Models: +- Name: dmnet_r50-d8_512x1024_40k_cityscapes + In Collection: DMNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 273.22 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.78 + mIoU(ms+flip): 79.14 + Config: configs/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes/dmnet_r50-d8_512x1024_40k_cityscapes_20201215_042326-615373cf.pth +- Name: dmnet_r101-d8_512x1024_40k_cityscapes + In Collection: DMNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 393.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 10.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.37 + mIoU(ms+flip): 79.72 + Config: configs/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes/dmnet_r101-d8_512x1024_40k_cityscapes_20201215_043100-8291e976.pth +- Name: dmnet_r50-d8_769x769_40k_cityscapes + In Collection: DMNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 636.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 7.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.49 + mIoU(ms+flip): 80.27 + Config: configs/dmnet/dmnet_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_40k_cityscapes/dmnet_r50-d8_769x769_40k_cityscapes_20201215_093706-e7f0e23e.pth +- Name: dmnet_r101-d8_769x769_40k_cityscapes + In Collection: DMNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 990.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.62 + mIoU(ms+flip): 78.94 + Config: configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_40k_cityscapes/dmnet_r101-d8_769x769_40k_cityscapes_20201215_081348-a74261f6.pth +- Name: dmnet_r50-d8_512x1024_80k_cityscapes + In Collection: DMNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.07 + mIoU(ms+flip): 80.22 + Config: configs/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes/dmnet_r50-d8_512x1024_80k_cityscapes_20201215_053728-3c8893b9.pth +- Name: dmnet_r101-d8_512x1024_80k_cityscapes + In Collection: DMNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.64 + mIoU(ms+flip): 80.67 + Config: configs/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes/dmnet_r101-d8_512x1024_80k_cityscapes_20201215_031718-fa081cb8.pth +- Name: dmnet_r50-d8_769x769_80k_cityscapes + In Collection: DMNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.22 + mIoU(ms+flip): 80.55 + Config: configs/dmnet/dmnet_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_80k_cityscapes/dmnet_r50-d8_769x769_80k_cityscapes_20201215_034006-6060840e.pth +- Name: dmnet_r101-d8_769x769_80k_cityscapes + In Collection: DMNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.19 + mIoU(ms+flip): 80.65 + Config: configs/dmnet/dmnet_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_80k_cityscapes/dmnet_r101-d8_769x769_80k_cityscapes_20201215_082810-7f0de59a.pth +- Name: dmnet_r50-d8_512x512_80k_ade20k + In Collection: DMNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.73 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.4 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.37 + mIoU(ms+flip): 43.62 + Config: configs/dmnet/dmnet_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_80k_ade20k/dmnet_r50-d8_512x512_80k_ade20k_20201215_144744-f89092a6.pth +- Name: dmnet_r101-d8_512x512_80k_ade20k + In Collection: DMNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 72.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.34 + mIoU(ms+flip): 46.13 + Config: configs/dmnet/dmnet_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_80k_ade20k/dmnet_r101-d8_512x512_80k_ade20k_20201215_104812-bfa45311.pth +- Name: dmnet_r50-d8_512x512_160k_ade20k + In Collection: DMNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.15 + mIoU(ms+flip): 44.17 + Config: configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_160k_ade20k/dmnet_r50-d8_512x512_160k_ade20k_20201215_115313-025ab3f9.pth +- Name: dmnet_r101-d8_512x512_160k_ade20k + In Collection: DMNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.42 + mIoU(ms+flip): 46.76 + Config: configs/dmnet/dmnet_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_160k_ade20k/dmnet_r101-d8_512x512_160k_ade20k_20201215_111145-a0bc02ef.pth diff --git a/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..fd68976 --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './dmnet_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..116cbdc --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './dmnet_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..d78d46c --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './dmnet_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..9713b73 --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './dmnet_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..6b222e7 --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './dmnet_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..f36d490 --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/dmnet_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './dmnet_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..1f9a917 --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..1b38f90 --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..a8fbd9b --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..74f6d6a --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..1984154 --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/dmnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..31d95f9 --- /dev/null +++ b/downstream/mmsegmentation/configs/dmnet/dmnet_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/dmnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/dnlnet/README.md b/downstream/mmsegmentation/configs/dnlnet/README.md new file mode 100644 index 0000000..d36f099 --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/README.md @@ -0,0 +1,61 @@ +# DNLNet + +[Disentangled Non-Local Neural Networks](https://arxiv.org/abs/2006.06668) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The non-local block is a popular module for strengthening the context modeling ability of a regular convolutional neural network. This paper first studies the non-local block in depth, where we find that its attention computation can be split into two terms, a whitened pairwise term accounting for the relationship between two pixels and a unary term representing the saliency of every pixel. We also observe that the two terms trained alone tend to model different visual clues, e.g. the whitened pairwise term learns within-region relationships while the unary term learns salient boundaries. However, the two terms are tightly coupled in the non-local block, which hinders the learning of each. Based on these findings, we present the disentangled non-local block, where the two terms are decoupled to facilitate learning for both terms. We demonstrate the effectiveness of the decoupled design on various tasks, such as semantic segmentation on Cityscapes, ADE20K and PASCAL Context, object detection on COCO, and action recognition on Kinetics. + + +
    + +
    + +## Citation + +This example is to reproduce ["Disentangled Non-Local Neural Networks"](https://arxiv.org/abs/2006.06668) for semantic segmentation. It is still in progress. + +## Citation + +```bibtex +@misc{yin2020disentangled, + title={Disentangled Non-Local Neural Networks}, + author={Minghao Yin and Zhuliang Yao and Yue Cao and Xiu Li and Zheng Zhang and Stephen Lin and Han Hu}, + year={2020}, + booktitle={ECCV} +} +``` + +## Results and models (in progress) + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| DNLNet | R-50-D8 | 512x1024 | 40000 | 7.3 | 2.56 | 78.61 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes/dnl_r50-d8_512x1024_40k_cityscapes_20200904_233629-53d4ea93.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes/dnl_r50-d8_512x1024_40k_cityscapes-20200904_233629.log.json) | +| DNLNet | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.96 | 78.31 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes/dnl_r101-d8_512x1024_40k_cityscapes_20200904_233629-9928ffef.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes/dnl_r101-d8_512x1024_40k_cityscapes-20200904_233629.log.json) | +| DNLNet | R-50-D8 | 769x769 | 40000 | 9.2 | 1.50 | 78.44 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_40k_cityscapes/dnl_r50-d8_769x769_40k_cityscapes_20200820_232206-0f283785.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_40k_cityscapes/dnl_r50-d8_769x769_40k_cityscapes-20200820_232206.log.json) | +| DNLNet | R-101-D8 | 769x769 | 40000 | 12.6 | 1.02 | 76.39 | 77.77 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_40k_cityscapes/dnl_r101-d8_769x769_40k_cityscapes_20200820_171256-76c596df.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_40k_cityscapes/dnl_r101-d8_769x769_40k_cityscapes-20200820_171256.log.json) | +| DNLNet | R-50-D8 | 512x1024 | 80000 | - | - | 79.33 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes/dnl_r50-d8_512x1024_80k_cityscapes_20200904_233629-58b2f778.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes/dnl_r50-d8_512x1024_80k_cityscapes-20200904_233629.log.json) | +| DNLNet | R-101-D8 | 512x1024 | 80000 | - | - | 80.41 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes/dnl_r101-d8_512x1024_80k_cityscapes_20200904_233629-758e2dd4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes/dnl_r101-d8_512x1024_80k_cityscapes-20200904_233629.log.json) | +| DNLNet | R-50-D8 | 769x769 | 80000 | - | - | 79.36 | 80.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_80k_cityscapes/dnl_r50-d8_769x769_80k_cityscapes_20200820_011925-366bc4c7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_80k_cityscapes/dnl_r50-d8_769x769_80k_cityscapes-20200820_011925.log.json) | +| DNLNet | R-101-D8 | 769x769 | 80000 | - | - | 79.41 | 80.68 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_80k_cityscapes/dnl_r101-d8_769x769_80k_cityscapes_20200821_051111-95ff84ab.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_80k_cityscapes/dnl_r101-d8_769x769_80k_cityscapes-20200821_051111.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DNLNet | R-50-D8 | 512x512 | 80000 | 8.8 | 20.66 | 41.76 | 42.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_80k_ade20k/dnl_r50-d8_512x512_80k_ade20k_20200826_183354-1cf6e0c1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_80k_ade20k/dnl_r50-d8_512x512_80k_ade20k-20200826_183354.log.json) | +| DNLNet | R-101-D8 | 512x512 | 80000 | 12.8 | 12.54 | 43.76 | 44.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_80k_ade20k/dnl_r101-d8_512x512_80k_ade20k_20200826_183354-d820d6ea.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_80k_ade20k/dnl_r101-d8_512x512_80k_ade20k-20200826_183354.log.json) | +| DNLNet | R-50-D8 | 512x512 | 160000 | - | - | 41.87 | 43.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_160k_ade20k/dnl_r50-d8_512x512_160k_ade20k_20200826_183350-37837798.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_160k_ade20k/dnl_r50-d8_512x512_160k_ade20k-20200826_183350.log.json) | +| DNLNet | R-101-D8 | 512x512 | 160000 | - | - | 44.25 | 45.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_160k_ade20k/dnl_r101-d8_512x512_160k_ade20k_20200826_183350-ed522c61.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_160k_ade20k/dnl_r101-d8_512x512_160k_ade20k-20200826_183350.log.json) | diff --git a/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..1a36e3c --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './dnl_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..0f2e1b6 --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './dnl_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..aca44e4 --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './dnl_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..ebd27a1 --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './dnl_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..575e9d0 --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './dnl_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..4f1b9e1 --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './dnl_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..f7aa744 --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..fdff93f --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..5305689 --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..09604c3 --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..0666199 --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/dnl_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..f7b07c4 --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/dnl_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) +optimizer = dict( + paramwise_cfg=dict( + custom_keys=dict(theta=dict(wd_mult=0.), phi=dict(wd_mult=0.)))) diff --git a/downstream/mmsegmentation/configs/dnlnet/dnlnet.yml b/downstream/mmsegmentation/configs/dnlnet/dnlnet.yml new file mode 100644 index 0000000..8ee7b54 --- /dev/null +++ b/downstream/mmsegmentation/configs/dnlnet/dnlnet.yml @@ -0,0 +1,228 @@ +Collections: +- Name: DNLNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://arxiv.org/abs/2006.06668 + Title: Disentangled Non-Local Neural Networks + README: configs/dnlnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dnl_head.py#L88 + Version: v0.17.0 + Converted From: + Code: https://github.com/yinmh17/DNL-Semantic-Segmentation +Models: +- Name: dnl_r50-d8_512x1024_40k_cityscapes + In Collection: DNLNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 390.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.61 + Config: configs/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes/dnl_r50-d8_512x1024_40k_cityscapes_20200904_233629-53d4ea93.pth +- Name: dnl_r101-d8_512x1024_40k_cityscapes + In Collection: DNLNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 510.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 10.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.31 + Config: configs/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes/dnl_r101-d8_512x1024_40k_cityscapes_20200904_233629-9928ffef.pth +- Name: dnl_r50-d8_769x769_40k_cityscapes + In Collection: DNLNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 666.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.44 + mIoU(ms+flip): 80.27 + Config: configs/dnlnet/dnl_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_40k_cityscapes/dnl_r50-d8_769x769_40k_cityscapes_20200820_232206-0f283785.pth +- Name: dnl_r101-d8_769x769_40k_cityscapes + In Collection: DNLNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 980.39 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.39 + mIoU(ms+flip): 77.77 + Config: configs/dnlnet/dnl_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_40k_cityscapes/dnl_r101-d8_769x769_40k_cityscapes_20200820_171256-76c596df.pth +- Name: dnl_r50-d8_512x1024_80k_cityscapes + In Collection: DNLNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.33 + Config: configs/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes/dnl_r50-d8_512x1024_80k_cityscapes_20200904_233629-58b2f778.pth +- Name: dnl_r101-d8_512x1024_80k_cityscapes + In Collection: DNLNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.41 + Config: configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes/dnl_r101-d8_512x1024_80k_cityscapes_20200904_233629-758e2dd4.pth +- Name: dnl_r50-d8_769x769_80k_cityscapes + In Collection: DNLNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.36 + mIoU(ms+flip): 80.7 + Config: configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_80k_cityscapes/dnl_r50-d8_769x769_80k_cityscapes_20200820_011925-366bc4c7.pth +- Name: dnl_r101-d8_769x769_80k_cityscapes + In Collection: DNLNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.41 + mIoU(ms+flip): 80.68 + Config: configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_80k_cityscapes/dnl_r101-d8_769x769_80k_cityscapes_20200821_051111-95ff84ab.pth +- Name: dnl_r50-d8_512x512_80k_ade20k + In Collection: DNLNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 48.4 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.76 + mIoU(ms+flip): 42.99 + Config: configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_80k_ade20k/dnl_r50-d8_512x512_80k_ade20k_20200826_183354-1cf6e0c1.pth +- Name: dnl_r101-d8_512x512_80k_ade20k + In Collection: DNLNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 79.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.76 + mIoU(ms+flip): 44.91 + Config: configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_80k_ade20k/dnl_r101-d8_512x512_80k_ade20k_20200826_183354-d820d6ea.pth +- Name: dnl_r50-d8_512x512_160k_ade20k + In Collection: DNLNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.87 + mIoU(ms+flip): 43.01 + Config: configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_160k_ade20k/dnl_r50-d8_512x512_160k_ade20k_20200826_183350-37837798.pth +- Name: dnl_r101-d8_512x512_160k_ade20k + In Collection: DNLNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.25 + mIoU(ms+flip): 45.78 + Config: configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_160k_ade20k/dnl_r101-d8_512x512_160k_ade20k_20200826_183350-ed522c61.pth diff --git a/downstream/mmsegmentation/configs/dpt/README.md b/downstream/mmsegmentation/configs/dpt/README.md new file mode 100644 index 0000000..2fd8d32 --- /dev/null +++ b/downstream/mmsegmentation/configs/dpt/README.md @@ -0,0 +1,66 @@ +# DPT + +[Vision Transformer for Dense Prediction](https://arxiv.org/abs/2103.13413) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +We introduce dense vision transformers, an architecture that leverages vision transformers in place of convolutional networks as a backbone for dense prediction tasks. We assemble tokens from various stages of the vision transformer into image-like representations at various resolutions and progressively combine them into full-resolution predictions using a convolutional decoder. The transformer backbone processes representations at a constant and relatively high resolution and has a global receptive field at every stage. These properties allow the dense vision transformer to provide finer-grained and more globally coherent predictions when compared to fully-convolutional networks. Our experiments show that this architecture yields substantial improvements on dense prediction tasks, especially when a large amount of training data is available. For monocular depth estimation, we observe an improvement of up to 28% in relative performance when compared to a state-of-the-art fully-convolutional network. When applied to semantic segmentation, dense vision transformers set a new state of the art on ADE20K with 49.02% mIoU. We further show that the architecture can be fine-tuned on smaller datasets such as NYUv2, KITTI, and Pascal Context where it also sets the new state of the art. Our models are available at [this https URL](https://github.com/isl-org/DPT). + + +
    + +
    + +## Citation + +```bibtex +@article{dosoViTskiy2020, + title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, + author={DosoViTskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil}, + journal={arXiv preprint arXiv:2010.11929}, + year={2020} +} + +@article{Ranftl2021, + author = {Ren\'{e} Ranftl and Alexey Bochkovskiy and Vladlen Koltun}, + title = {Vision Transformers for Dense Prediction}, + journal = {ArXiv preprint}, + year = {2021}, +} +``` + +## Usage + +To use other repositories' pre-trained models, it is necessary to convert keys. + +We provide a script [`vit2mmseg.py`](../../tools/model_converters/vit2mmseg.py) in the tools directory to convert the key of models from [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py) to MMSegmentation style. + +```shell +python tools/model_converters/vit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/vit2mmseg.py https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth pretrain/jx_vit_base_p16_224-80ecf9dd.pth +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| DPT | ViT-B | 512x512 | 160000 | 8.09 | 10.41 | 46.97 | 48.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dpt/dpt_vit-b16_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dpt/dpt_vit-b16_512x512_160k_ade20k/dpt_vit-b16_512x512_160k_ade20k-db31cf52.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/dpt/dpt_vit-b16_512x512_160k_ade20k/dpt_vit-b16_512x512_160k_ade20k-20210809_172025.log.json) | diff --git a/downstream/mmsegmentation/configs/dpt/dpt.yml b/downstream/mmsegmentation/configs/dpt/dpt.yml new file mode 100644 index 0000000..a4f9c65 --- /dev/null +++ b/downstream/mmsegmentation/configs/dpt/dpt.yml @@ -0,0 +1,37 @@ +Collections: +- Name: DPT + Metadata: + Training Data: + - ADE20K + Paper: + URL: https://arxiv.org/abs/2103.13413 + Title: Vision Transformer for Dense Prediction + README: configs/dpt/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dpt_head.py#L215 + Version: v0.17.0 + Converted From: + Code: https://github.com/isl-org/DPT +Models: +- Name: dpt_vit-b16_512x512_160k_ade20k + In Collection: DPT + Metadata: + backbone: ViT-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 96.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.09 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.97 + mIoU(ms+flip): 48.34 + Config: configs/dpt/dpt_vit-b16_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dpt/dpt_vit-b16_512x512_160k_ade20k/dpt_vit-b16_512x512_160k_ade20k-db31cf52.pth diff --git a/downstream/mmsegmentation/configs/dpt/dpt_vit-b16_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/dpt/dpt_vit-b16_512x512_160k_ade20k.py new file mode 100644 index 0000000..c751a68 --- /dev/null +++ b/downstream/mmsegmentation/configs/dpt/dpt_vit-b16_512x512_160k_ade20k.py @@ -0,0 +1,32 @@ +_base_ = [ + '../_base_/models/dpt_vit-b16.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] + +# AdamW optimizer, no weight decay for position embedding & layer norm +# in backbone +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006, + betas=(0.9, 0.999), + weight_decay=0.01, + paramwise_cfg=dict( + custom_keys={ + 'pos_embed': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2, workers_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/emanet/README.md b/downstream/mmsegmentation/configs/emanet/README.md new file mode 100644 index 0000000..34dba42 --- /dev/null +++ b/downstream/mmsegmentation/configs/emanet/README.md @@ -0,0 +1,45 @@ +# EMANet + +[Expectation-Maximization Attention Networks for Semantic Segmentation](https://arxiv.org/abs/1907.13426) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Self-attention mechanism has been widely used for various tasks. It is designed to compute the representation of each position by a weighted sum of the features at all positions. Thus, it can capture long-range relations for computer vision tasks. However, it is computationally consuming. Since the attention maps are computed w.r.t all other positions. In this paper, we formulate the attention mechanism into an expectation-maximization manner and iteratively estimate a much more compact set of bases upon which the attention maps are computed. By a weighted summation upon these bases, the resulting representation is low-rank and deprecates noisy information from the input. The proposed Expectation-Maximization Attention (EMA) module is robust to the variance of input and is also friendly in memory and computation. Moreover, we set up the bases maintenance and normalization methods to stabilize its training procedure. We conduct extensive experiments on popular semantic segmentation benchmarks including PASCAL VOC, PASCAL Context and COCO Stuff, on which we set new records. + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{li2019expectation, + title={Expectation-maximization attention networks for semantic segmentation}, + author={Li, Xia and Zhong, Zhisheng and Wu, Jianlong and Yang, Yibo and Lin, Zhouchen and Liu, Hong}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + pages={9167--9176}, + year={2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| EMANet | R-50-D8 | 512x1024 | 80000 | 5.4 | 4.58 | 77.59 | 79.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet/emanet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_512x1024_80k_cityscapes/emanet_r50-d8_512x1024_80k_cityscapes_20200901_100301-c43fcef1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_512x1024_80k_cityscapes/emanet_r50-d8_512x1024_80k_cityscapes-20200901_100301.log.json) | +| EMANet | R-101-D8 | 512x1024 | 80000 | 6.2 | 2.87 | 79.10 | 81.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_512x1024_80k_cityscapes/emanet_r101-d8_512x1024_80k_cityscapes_20200901_100301-2d970745.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_512x1024_80k_cityscapes/emanet_r101-d8_512x1024_80k_cityscapes-20200901_100301.log.json) | +| EMANet | R-50-D8 | 769x769 | 80000 | 8.9 | 1.97 | 79.33 | 80.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet/emanet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_769x769_80k_cityscapes/emanet_r50-d8_769x769_80k_cityscapes_20200901_100301-16f8de52.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_769x769_80k_cityscapes/emanet_r50-d8_769x769_80k_cityscapes-20200901_100301.log.json) | +| EMANet | R-101-D8 | 769x769 | 80000 | 10.1 | 1.22 | 79.62 | 81.00 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet/emanet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_769x769_80k_cityscapes/emanet_r101-d8_769x769_80k_cityscapes_20200901_100301-47a324ce.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_769x769_80k_cityscapes/emanet_r101-d8_769x769_80k_cityscapes-20200901_100301.log.json) | diff --git a/downstream/mmsegmentation/configs/emanet/emanet.yml b/downstream/mmsegmentation/configs/emanet/emanet.yml new file mode 100644 index 0000000..22ebcdb --- /dev/null +++ b/downstream/mmsegmentation/configs/emanet/emanet.yml @@ -0,0 +1,103 @@ +Collections: +- Name: EMANet + Metadata: + Training Data: + - Cityscapes + Paper: + URL: https://arxiv.org/abs/1907.13426 + Title: Expectation-Maximization Attention Networks for Semantic Segmentation + README: configs/emanet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/ema_head.py#L80 + Version: v0.17.0 + Converted From: + Code: https://xialipku.github.io/EMANet +Models: +- Name: emanet_r50-d8_512x1024_80k_cityscapes + In Collection: EMANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 218.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.59 + mIoU(ms+flip): 79.44 + Config: configs/emanet/emanet_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_512x1024_80k_cityscapes/emanet_r50-d8_512x1024_80k_cityscapes_20200901_100301-c43fcef1.pth +- Name: emanet_r101-d8_512x1024_80k_cityscapes + In Collection: EMANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 348.43 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.1 + mIoU(ms+flip): 81.21 + Config: configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_512x1024_80k_cityscapes/emanet_r101-d8_512x1024_80k_cityscapes_20200901_100301-2d970745.pth +- Name: emanet_r50-d8_769x769_80k_cityscapes + In Collection: EMANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 507.61 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.33 + mIoU(ms+flip): 80.49 + Config: configs/emanet/emanet_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_769x769_80k_cityscapes/emanet_r50-d8_769x769_80k_cityscapes_20200901_100301-16f8de52.pth +- Name: emanet_r101-d8_769x769_80k_cityscapes + In Collection: EMANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 819.67 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.62 + mIoU(ms+flip): 81.0 + Config: configs/emanet/emanet_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_769x769_80k_cityscapes/emanet_r101-d8_769x769_80k_cityscapes_20200901_100301-47a324ce.pth diff --git a/downstream/mmsegmentation/configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..58f28b4 --- /dev/null +++ b/downstream/mmsegmentation/configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './emanet_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/emanet/emanet_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/emanet/emanet_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..c5dbf20 --- /dev/null +++ b/downstream/mmsegmentation/configs/emanet/emanet_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './emanet_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/emanet/emanet_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/emanet/emanet_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..73b7788 --- /dev/null +++ b/downstream/mmsegmentation/configs/emanet/emanet_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/emanet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/emanet/emanet_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/emanet/emanet_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..699aa21 --- /dev/null +++ b/downstream/mmsegmentation/configs/emanet/emanet_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/emanet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/encnet/README.md b/downstream/mmsegmentation/configs/encnet/README.md new file mode 100644 index 0000000..64cfe1a --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/README.md @@ -0,0 +1,58 @@ +# EncNet + +[Context Encoding for Semantic Segmentation](https://arxiv.org/abs/1803.08904) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Recent work has made significant progress in improving spatial resolution for pixelwise labeling with Fully Convolutional Network (FCN) framework by employing Dilated/Atrous convolution, utilizing multi-scale features and refining boundaries. In this paper, we explore the impact of global contextual information in semantic segmentation by introducing the Context Encoding Module, which captures the semantic context of scenes and selectively highlights class-dependent featuremaps. The proposed Context Encoding Module significantly improves semantic segmentation results with only marginal extra computation cost over FCN. Our approach has achieved new state-of-the-art results 51.7% mIoU on PASCAL-Context, 85.9% mIoU on PASCAL VOC 2012. Our single model achieves a final score of 0.5567 on ADE20K test set, which surpass the winning entry of COCO-Place Challenge in 2017. In addition, we also explore how the Context Encoding Module can improve the feature representation of relatively shallow networks for the image classification on CIFAR-10 dataset. Our 14 layer network has achieved an error rate of 3.45%, which is comparable with state-of-the-art approaches with over 10 times more layers. The source code for the complete system are publicly available. + + +
    + +
    + +## Citation + +```bibtex +@InProceedings{Zhang_2018_CVPR, +author = {Zhang, Hang and Dana, Kristin and Shi, Jianping and Zhang, Zhongyue and Wang, Xiaogang and Tyagi, Ambrish and Agrawal, Amit}, +title = {Context Encoding for Semantic Segmentation}, +booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, +month = {June}, +year = {2018} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| EncNet | R-50-D8 | 512x1024 | 40000 | 8.6 | 4.58 | 75.67 | 77.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_40k_cityscapes/encnet_r50-d8_512x1024_40k_cityscapes_20200621_220958-68638a47.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_40k_cityscapes/encnet_r50-d8_512x1024_40k_cityscapes-20200621_220958.log.json) | +| EncNet | R-101-D8 | 512x1024 | 40000 | 12.1 | 2.66 | 75.81 | 77.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_40k_cityscapes/encnet_r101-d8_512x1024_40k_cityscapes_20200621_220933-35e0a3e8.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_40k_cityscapes/encnet_r101-d8_512x1024_40k_cityscapes-20200621_220933.log.json) | +| EncNet | R-50-D8 | 769x769 | 40000 | 9.8 | 1.82 | 76.24 | 77.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_40k_cityscapes/encnet_r50-d8_769x769_40k_cityscapes_20200621_220958-3bcd2884.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_40k_cityscapes/encnet_r50-d8_769x769_40k_cityscapes-20200621_220958.log.json) | +| EncNet | R-101-D8 | 769x769 | 40000 | 13.7 | 1.26 | 74.25 | 76.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_40k_cityscapes/encnet_r101-d8_769x769_40k_cityscapes_20200621_220933-2fafed55.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_40k_cityscapes/encnet_r101-d8_769x769_40k_cityscapes-20200621_220933.log.json) | +| EncNet | R-50-D8 | 512x1024 | 80000 | - | - | 77.94 | 79.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_80k_cityscapes/encnet_r50-d8_512x1024_80k_cityscapes_20200622_003554-fc5c5624.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_80k_cityscapes/encnet_r50-d8_512x1024_80k_cityscapes-20200622_003554.log.json) | +| EncNet | R-101-D8 | 512x1024 | 80000 | - | - | 78.55 | 79.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_80k_cityscapes/encnet_r101-d8_512x1024_80k_cityscapes_20200622_003555-1de64bec.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_80k_cityscapes/encnet_r101-d8_512x1024_80k_cityscapes-20200622_003555.log.json) | +| EncNet | R-50-D8 | 769x769 | 80000 | - | - | 77.44 | 78.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_80k_cityscapes/encnet_r50-d8_769x769_80k_cityscapes_20200622_003554-55096dcb.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_80k_cityscapes/encnet_r50-d8_769x769_80k_cityscapes-20200622_003554.log.json) | +| EncNet | R-101-D8 | 769x769 | 80000 | - | - | 76.10 | 76.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_80k_cityscapes/encnet_r101-d8_769x769_80k_cityscapes_20200622_003555-470ef79d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_80k_cityscapes/encnet_r101-d8_769x769_80k_cityscapes-20200622_003555.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| EncNet | R-50-D8 | 512x512 | 80000 | 10.1 | 22.81 | 39.53 | 41.17 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_80k_ade20k/encnet_r50-d8_512x512_80k_ade20k_20200622_042412-44b46b04.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_80k_ade20k/encnet_r50-d8_512x512_80k_ade20k-20200622_042412.log.json) | +| EncNet | R-101-D8 | 512x512 | 80000 | 13.6 | 14.87 | 42.11 | 43.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_80k_ade20k/encnet_r101-d8_512x512_80k_ade20k_20200622_101128-dd35e237.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_80k_ade20k/encnet_r101-d8_512x512_80k_ade20k-20200622_101128.log.json) | +| EncNet | R-50-D8 | 512x512 | 160000 | - | - | 40.10 | 41.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_160k_ade20k/encnet_r50-d8_512x512_160k_ade20k_20200622_101059-b2db95e0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_160k_ade20k/encnet_r50-d8_512x512_160k_ade20k-20200622_101059.log.json) | +| EncNet | R-101-D8 | 512x512 | 160000 | - | - | 42.61 | 44.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_160k_ade20k/encnet_r101-d8_512x512_160k_ade20k_20200622_073348-7989641f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_160k_ade20k/encnet_r101-d8_512x512_160k_ade20k-20200622_073348.log.json) | diff --git a/downstream/mmsegmentation/configs/encnet/encnet.yml b/downstream/mmsegmentation/configs/encnet/encnet.yml new file mode 100644 index 0000000..18fb32a --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet.yml @@ -0,0 +1,232 @@ +Collections: +- Name: EncNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://arxiv.org/abs/1803.08904 + Title: Context Encoding for Semantic Segmentation + README: configs/encnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/enc_head.py#L63 + Version: v0.17.0 + Converted From: + Code: https://github.com/zhanghang1989/PyTorch-Encoding +Models: +- Name: encnet_r50-d8_512x1024_40k_cityscapes + In Collection: EncNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 218.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.67 + mIoU(ms+flip): 77.08 + Config: configs/encnet/encnet_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_40k_cityscapes/encnet_r50-d8_512x1024_40k_cityscapes_20200621_220958-68638a47.pth +- Name: encnet_r101-d8_512x1024_40k_cityscapes + In Collection: EncNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 375.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 12.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.81 + mIoU(ms+flip): 77.21 + Config: configs/encnet/encnet_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_40k_cityscapes/encnet_r101-d8_512x1024_40k_cityscapes_20200621_220933-35e0a3e8.pth +- Name: encnet_r50-d8_769x769_40k_cityscapes + In Collection: EncNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 549.45 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 9.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.24 + mIoU(ms+flip): 77.85 + Config: configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_40k_cityscapes/encnet_r50-d8_769x769_40k_cityscapes_20200621_220958-3bcd2884.pth +- Name: encnet_r101-d8_769x769_40k_cityscapes + In Collection: EncNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 793.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 13.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.25 + mIoU(ms+flip): 76.25 + Config: configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_40k_cityscapes/encnet_r101-d8_769x769_40k_cityscapes_20200621_220933-2fafed55.pth +- Name: encnet_r50-d8_512x1024_80k_cityscapes + In Collection: EncNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.94 + mIoU(ms+flip): 79.13 + Config: configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_80k_cityscapes/encnet_r50-d8_512x1024_80k_cityscapes_20200622_003554-fc5c5624.pth +- Name: encnet_r101-d8_512x1024_80k_cityscapes + In Collection: EncNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.55 + mIoU(ms+flip): 79.47 + Config: configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_80k_cityscapes/encnet_r101-d8_512x1024_80k_cityscapes_20200622_003555-1de64bec.pth +- Name: encnet_r50-d8_769x769_80k_cityscapes + In Collection: EncNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.44 + mIoU(ms+flip): 78.72 + Config: configs/encnet/encnet_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_80k_cityscapes/encnet_r50-d8_769x769_80k_cityscapes_20200622_003554-55096dcb.pth +- Name: encnet_r101-d8_769x769_80k_cityscapes + In Collection: EncNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.1 + mIoU(ms+flip): 76.97 + Config: configs/encnet/encnet_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_80k_cityscapes/encnet_r101-d8_769x769_80k_cityscapes_20200622_003555-470ef79d.pth +- Name: encnet_r50-d8_512x512_80k_ade20k + In Collection: EncNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 43.84 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 39.53 + mIoU(ms+flip): 41.17 + Config: configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_80k_ade20k/encnet_r50-d8_512x512_80k_ade20k_20200622_042412-44b46b04.pth +- Name: encnet_r101-d8_512x512_80k_ade20k + In Collection: EncNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 67.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.11 + mIoU(ms+flip): 43.61 + Config: configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_80k_ade20k/encnet_r101-d8_512x512_80k_ade20k_20200622_101128-dd35e237.pth +- Name: encnet_r50-d8_512x512_160k_ade20k + In Collection: EncNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 40.1 + mIoU(ms+flip): 41.71 + Config: configs/encnet/encnet_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_160k_ade20k/encnet_r50-d8_512x512_160k_ade20k_20200622_101059-b2db95e0.pth +- Name: encnet_r101-d8_512x512_160k_ade20k + In Collection: EncNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.61 + mIoU(ms+flip): 44.01 + Config: configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_160k_ade20k/encnet_r101-d8_512x512_160k_ade20k_20200622_073348-7989641f.pth diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..f34373d --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..0b0207b --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..8fec6ba --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..c264af9 --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x512_20k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_512x512_20k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..8a6968e --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x512_40k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_512x512_40k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..9415100 --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..d6ade67 --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..55648c0 --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './encnet_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..4ea6ed0 --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..d2feeef --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..2a5dc20 --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..9cb7952 --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x512_20k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..81f3cbf --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x512_40k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..835375c --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..d311e33 --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..7b535f3 --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/encnet/encnet_r50s-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/encnet/encnet_r50s-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..600b701 --- /dev/null +++ b/downstream/mmsegmentation/configs/encnet/encnet_r50s-d8_512x512_80k_ade20k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + backbone=dict(stem_channels=128), + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/erfnet/README.md b/downstream/mmsegmentation/configs/erfnet/README.md new file mode 100644 index 0000000..83c74c5 --- /dev/null +++ b/downstream/mmsegmentation/configs/erfnet/README.md @@ -0,0 +1,51 @@ +# ERFNet + +[ERFNet: Efficient Residual Factorized ConvNet for Real-time Semantic Segmentation](http://www.robesafe.uah.es/personal/eduardo.romera/pdfs/Romera17tits.pdf) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Semantic segmentation is a challenging task that addresses most of the perception needs of intelligent vehicles (IVs) in an unified way. Deep neural networks excel at this task, as they can be trained end-to-end to accurately classify multiple object categories in an image at pixel level. However, a good tradeoff between high quality and computational resources is yet not present in the state-of-the-art semantic segmentation approaches, limiting their application in real vehicles. In this paper, we propose a deep architecture that is able to run in real time while providing accurate semantic segmentation. The core of our architecture is a novel layer that uses residual connections and factorized convolutions in order to remain efficient while retaining remarkable accuracy. Our approach is able to run at over 83 FPS in a single Titan X, and 7 FPS in a Jetson TX1 (embedded device). A comprehensive set of experiments on the publicly available Cityscapes data set demonstrates that our system achieves an accuracy that is similar to the state of the art, while being orders of magnitude faster to compute than other architectures that achieve top precision. The resulting tradeoff makes our model an ideal approach for scene understanding in IV applications. The code is publicly available at: https://github.com/Eromera/erfnet. + + +
    + +
    + +## Citation + +```bibtex +@article{romera2017erfnet, + title={Erfnet: Efficient residual factorized convnet for real-time semantic segmentation}, + author={Romera, Eduardo and Alvarez, Jos{\'e} M and Bergasa, Luis M and Arroyo, Roberto}, + journal={IEEE Transactions on Intelligent Transportation Systems}, + volume={19}, + number={1}, + pages={263--272}, + year={2017}, + publisher={IEEE} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | --------- | --------- | ------: | -------- | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ERFNet | ERFNet | 512x1024 | 160000 | 6.04 | 15.26 | 71.08 | 72.6 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes/erfnet_fcn_4x4_512x1024_160k_cityscapes_20211126_082056-03d333ed.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes/erfnet_fcn_4x4_512x1024_160k_cityscapes_20211126_082056.log.json) | + +Note: + +- The model is trained from scratch. + +- Last deconvolution layer in the [original paper](https://github.com/Eromera/erfnet_pytorch/blob/master/train/erfnet.py#L123) is replaced by a naive `FCNHead` decoder head and a bilinear upsampling layer, found more effective and efficient. diff --git a/downstream/mmsegmentation/configs/erfnet/erfnet.yml b/downstream/mmsegmentation/configs/erfnet/erfnet.yml new file mode 100644 index 0000000..e4c34f9 --- /dev/null +++ b/downstream/mmsegmentation/configs/erfnet/erfnet.yml @@ -0,0 +1,37 @@ +Collections: +- Name: ERFNet + Metadata: + Training Data: + - Cityscapes + Paper: + URL: http://www.robesafe.uah.es/personal/eduardo.romera/pdfs/Romera17tits.pdf + Title: 'ERFNet: Efficient Residual Factorized ConvNet for Real-time Semantic Segmentation' + README: configs/erfnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.20.0/mmseg/models/backbones/erfnet.py#L321 + Version: v0.20.0 + Converted From: + Code: https://github.com/Eromera/erfnet_pytorch +Models: +- Name: erfnet_fcn_4x4_512x1024_160k_cityscapes + In Collection: ERFNet + Metadata: + backbone: ERFNet + crop size: (512,1024) + lr schd: 160000 + inference time (ms/im): + - value: 65.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.04 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 71.08 + mIoU(ms+flip): 72.6 + Config: configs/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes/erfnet_fcn_4x4_512x1024_160k_cityscapes_20211126_082056-03d333ed.pth diff --git a/downstream/mmsegmentation/configs/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes.py new file mode 100644 index 0000000..8cb8e51 --- /dev/null +++ b/downstream/mmsegmentation/configs/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/erfnet_fcn.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, +) diff --git a/downstream/mmsegmentation/configs/fastfcn/README.md b/downstream/mmsegmentation/configs/fastfcn/README.md new file mode 100644 index 0000000..a969a4d --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/README.md @@ -0,0 +1,62 @@ +# FastFCN + +[FastFCN: Rethinking Dilated Convolution in the Backbone for Semantic Segmentation](https://arxiv.org/abs/1903.11816) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Modern approaches for semantic segmentation usually employ dilated convolutions in the backbone to extract high-resolution feature maps, which brings heavy computation complexity and memory footprint. To replace the time and memory consuming dilated convolutions, we propose a novel joint upsampling module named Joint Pyramid Upsampling (JPU) by formulating the task of extracting high-resolution feature maps into a joint upsampling problem. With the proposed JPU, our method reduces the computation complexity by more than three times without performance loss. Experiments show that JPU is superior to other upsampling modules, which can be plugged into many existing approaches to reduce computation complexity and improve performance. By replacing dilated convolutions with the proposed JPU module, our method achieves the state-of-the-art performance in Pascal Context dataset (mIoU of 53.13%) and ADE20K dataset (final score of 0.5584) while running 3 times faster. + + +
    + +
    + +## Citation + +```bibtex +@article{wu2019fastfcn, +title={Fastfcn: Rethinking dilated convolution in the backbone for semantic segmentation}, +author={Wu, Huikai and Zhang, Junge and Huang, Kaiqi and Liang, Kongming and Yu, Yizhou}, +journal={arXiv preprint arXiv:1903.11816}, +year={2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | --------- | --------- | ------: | -------- | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FastFCN + DeepLabV3 | R-50-D32 | 512x1024 | 80000 | 5.67 | 2.64 | 79.12 | 80.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes_20210928_053722-5d1a2648.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes_20210928_053722.log.json) | +| FastFCN + DeepLabV3 (4x4) | R-50-D32 | 512x1024 | 80000 | 9.79 | - | 79.52 | 80.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes_20210924_214357-72220849.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes_20210924_214357.log.json) | +| FastFCN + PSPNet | R-50-D32 | 512x1024 | 80000 | 5.67 | 4.40 | 79.26 | 80.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes_20210928_053722-57749bed.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes_20210928_053722.log.json) | +| FastFCN + PSPNet (4x4) | R-50-D32 | 512x1024 | 80000 | 9.94 | - | 78.76 | 80.03 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes_20210925_061841-77e87b0a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes_20210925_061841.log.json) | +| FastFCN + EncNet | R-50-D32 | 512x1024 | 80000 | 8.15 | 4.77 | 77.97 |79.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes_20210928_030036-78da5046.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes_20210928_030036.log.json) | +| FastFCN + EncNet (4x4)| R-50-D32 | 512x1024 | 80000 | 15.45 | - | 78.6 | 80.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes_20210926_093217-e1eb6dbb.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes_20210926_093217.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | --------- | --------- | ------: | -------- | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FastFCN + DeepLabV3 | R-50-D32 | 512x1024 | 80000 | 8.46 | 12.06 | 41.88 | 42.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k_20211013_190619-3aa40f2d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k_20211013_190619.log.json) | +| FastFCN + DeepLabV3 | R-50-D32 | 512x1024 | 160000 | - | - | 43.58 | 44.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k_20211008_152246-27036aee.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k_20211008_152246.log.json) | +| FastFCN + PSPNet | R-50-D32 | 512x1024 | 80000 | 8.02 | 19.21 | 41.40 | 42.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k_20210930_225137-993d07c8.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k_20210930_225137.log.json) | +| FastFCN + PSPNet | R-50-D32 | 512x1024 | 160000 | - | - | 42.63 | 43.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k_20211008_105455-e8f5a2fd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k_20211008_105455.log.json) | +| FastFCN + EncNet | R-50-D32 | 512x1024 | 80000 | 9.67 | 17.23 | 40.88 | 42.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k_20210930_225214-65aef6dd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k_20210930_225214.log.json) | +| FastFCN + EncNet | R-50-D32 | 512x1024 | 160000 | - | - | 42.50 | 44.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k_20211008_105456-d875ce3c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k_20211008_105456.log.json) | + +Note: + +- `4x4` means 4 GPUs with 4 samples per GPU in training, default setting is 4 GPUs with 2 samples per GPU in training. +- Results of [DeepLabV3 (mIoU: 79.32)](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3), [PSPNet (mIoU: 78.55)](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet) and [ENCNet (mIoU: 77.94)](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet) can be found in each original repository. diff --git a/downstream/mmsegmentation/configs/fastfcn/fastfcn.yml b/downstream/mmsegmentation/configs/fastfcn/fastfcn.yml new file mode 100644 index 0000000..6fdc556 --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/fastfcn.yml @@ -0,0 +1,235 @@ +Collections: +- Name: FastFCN + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://arxiv.org/abs/1903.11816 + Title: 'FastFCN: Rethinking Dilated Convolution in the Backbone for Semantic Segmentation' + README: configs/fastfcn/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/necks/jpu.py#L12 + Version: v0.18.0 + Converted From: + Code: https://github.com/wuhuikai/FastFCN +Models: +- Name: fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 378.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.67 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.12 + mIoU(ms+flip): 80.58 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes_20210928_053722-5d1a2648.pth +- Name: fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + Training Memory (GB): 9.79 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.52 + mIoU(ms+flip): 80.91 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes_20210924_214357-72220849.pth +- Name: fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 227.27 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.67 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.26 + mIoU(ms+flip): 80.86 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes_20210928_053722-57749bed.pth +- Name: fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + Training Memory (GB): 9.94 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.76 + mIoU(ms+flip): 80.03 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes_20210925_061841-77e87b0a.pth +- Name: fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 209.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.15 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.97 + mIoU(ms+flip): 79.92 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes_20210928_030036-78da5046.pth +- Name: fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + Training Memory (GB): 15.45 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.6 + mIoU(ms+flip): 80.25 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes_20210926_093217-e1eb6dbb.pth +- Name: fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 82.92 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.46 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.88 + mIoU(ms+flip): 42.91 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k_20211013_190619-3aa40f2d.pth +- Name: fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.58 + mIoU(ms+flip): 44.92 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k_20211008_152246-27036aee.pth +- Name: fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 52.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.02 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.4 + mIoU(ms+flip): 42.12 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k_20210930_225137-993d07c8.pth +- Name: fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.63 + mIoU(ms+flip): 43.71 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k_20211008_105455-e8f5a2fd.pth +- Name: fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 58.04 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.67 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 40.88 + mIoU(ms+flip): 42.36 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k_20210930_225214-65aef6dd.pth +- Name: fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k + In Collection: FastFCN + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.5 + mIoU(ms+flip): 44.21 + Config: configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k_20211008_105456-d875ce3c.pth diff --git a/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..87fc274 --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes.py @@ -0,0 +1,6 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes.py' +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, +) diff --git a/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..dc86da3 --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes.py @@ -0,0 +1,20 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + decode_head=dict( + _delete_=True, + type='ASPPHead', + in_channels=2048, + in_index=2, + channels=512, + dilations=(1, 12, 24, 36), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k.py new file mode 100644 index 0000000..dbf9f80 --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k.py @@ -0,0 +1,20 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + decode_head=dict( + _delete_=True, + type='ASPPHead', + in_channels=2048, + in_index=2, + channels=512, + dilations=(1, 12, 24, 36), + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k.py new file mode 100644 index 0000000..b14b1f6 --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k.py @@ -0,0 +1,20 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + decode_head=dict( + _delete_=True, + type='ASPPHead', + in_channels=2048, + in_index=2, + channels=512, + dilations=(1, 12, 24, 36), + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..59d294b --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes.py @@ -0,0 +1,6 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes.py' +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, +) diff --git a/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..cc68edf --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes.py @@ -0,0 +1,24 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + decode_head=dict( + _delete_=True, + type='EncHead', + in_channels=[512, 1024, 2048], + in_index=(0, 1, 2), + channels=512, + num_codes=32, + use_se_loss=True, + add_lateral=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_se_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k.py new file mode 100644 index 0000000..12f0add --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k.py @@ -0,0 +1,24 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + decode_head=dict( + _delete_=True, + type='EncHead', + in_channels=[512, 1024, 2048], + in_index=(0, 1, 2), + channels=512, + num_codes=32, + use_se_loss=True, + add_lateral=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_se_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k.py new file mode 100644 index 0000000..d3e2e9c --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k.py @@ -0,0 +1,24 @@ +# model settings +_base_ = './fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + decode_head=dict( + _delete_=True, + type='EncHead', + in_channels=[512, 1024, 2048], + in_index=(0, 1, 2), + channels=512, + num_codes=32, + use_se_loss=True, + add_lateral=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_se_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..5fe5ca1 --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/fastfcn_r50-d32_jpu_psp.py', + '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, +) diff --git a/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..e7637fa --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/fastfcn_r50-d32_jpu_psp.py', + '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k.py new file mode 100644 index 0000000..e267ac6 --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/fastfcn_r50-d32_jpu_psp.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k.py new file mode 100644 index 0000000..22e0447 --- /dev/null +++ b/downstream/mmsegmentation/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/fastfcn_r50-d32_jpu_psp.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/fastscnn/README.md b/downstream/mmsegmentation/configs/fastscnn/README.md new file mode 100644 index 0000000..b0023b2 --- /dev/null +++ b/downstream/mmsegmentation/configs/fastscnn/README.md @@ -0,0 +1,41 @@ +# Fast-SCNN + +[Fast-SCNN for Semantic Segmentation](https://arxiv.org/abs/1902.04502) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The encoder-decoder framework is state-of-the-art for offline semantic image segmentation. Since the rise in autonomous systems, real-time computation is increasingly desirable. In this paper, we introduce fast segmentation convolutional neural network (Fast-SCNN), an above real-time semantic segmentation model on high resolution image data (1024x2048px) suited to efficient computation on embedded devices with low memory. Building on existing two-branch methods for fast segmentation, we introduce our `learning to downsample' module which computes low-level features for multiple resolution branches simultaneously. Our network combines spatial detail at high resolution with deep features extracted at lower resolution, yielding an accuracy of 68.0% mean intersection over union at 123.5 frames per second on Cityscapes. We also show that large scale pre-training is unnecessary. We thoroughly validate our metric in experiments with ImageNet pre-training and the coarse labeled data of Cityscapes. Finally, we show even faster computation with competitive results on subsampled inputs, without any network modifications. + + +
    + +
    + +## Citation + +```bibtex +@article{poudel2019fast, + title={Fast-scnn: Fast semantic segmentation network}, + author={Poudel, Rudra PK and Liwicki, Stephan and Cipolla, Roberto}, + journal={arXiv preprint arXiv:1902.04502}, + year={2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | --------- | --------- | ------: | -------- | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FastSCNN | FastSCNN | 512x1024 | 160000 | 3.3 | 56.45 | 70.96 | 72.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_lr0.12_8x4_160k_cityscapes/fast_scnn_lr0.12_8x4_160k_cityscapes_20210630_164853-0cec9937.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_lr0.12_8x4_160k_cityscapes/fast_scnn_lr0.12_8x4_160k_cityscapes_20210630_164853.log.json) | diff --git a/downstream/mmsegmentation/configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py b/downstream/mmsegmentation/configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py new file mode 100644 index 0000000..4698125 --- /dev/null +++ b/downstream/mmsegmentation/configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] + +# Re-config the data sampler. +data = dict(samples_per_gpu=4, workers_per_gpu=4) + +# Re-config the optimizer. +optimizer = dict(type='SGD', lr=0.12, momentum=0.9, weight_decay=4e-5) diff --git a/downstream/mmsegmentation/configs/fastscnn/fastscnn.yml b/downstream/mmsegmentation/configs/fastscnn/fastscnn.yml new file mode 100644 index 0000000..cad0360 --- /dev/null +++ b/downstream/mmsegmentation/configs/fastscnn/fastscnn.yml @@ -0,0 +1,35 @@ +Collections: +- Name: FastSCNN + Metadata: + Training Data: + - Cityscapes + Paper: + URL: https://arxiv.org/abs/1902.04502 + Title: Fast-SCNN for Semantic Segmentation + README: configs/fastscnn/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/fast_scnn.py#L272 + Version: v0.17.0 +Models: +- Name: fast_scnn_lr0.12_8x4_160k_cityscapes + In Collection: FastSCNN + Metadata: + backbone: FastSCNN + crop size: (512,1024) + lr schd: 160000 + inference time (ms/im): + - value: 17.71 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 70.96 + mIoU(ms+flip): 72.65 + Config: configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_lr0.12_8x4_160k_cityscapes/fast_scnn_lr0.12_8x4_160k_cityscapes_20210630_164853-0cec9937.pth diff --git a/downstream/mmsegmentation/configs/fcn/README.md b/downstream/mmsegmentation/configs/fcn/README.md new file mode 100644 index 0000000..f08851e --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/README.md @@ -0,0 +1,110 @@ +# FCN + +[Fully Convolutional Networks for Semantic Segmentation](https://arxiv.org/abs/1411.4038) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Convolutional networks are powerful visual models that yield hierarchies of features. We show that convolutional networks by themselves, trained end-to-end, pixels-to-pixels, exceed the state-of-the-art in semantic segmentation. Our key insight is to build "fully convolutional" networks that take input of arbitrary size and produce correspondingly-sized output with efficient inference and learning. We define and detail the space of fully convolutional networks, explain their application to spatially dense prediction tasks, and draw connections to prior models. We adapt contemporary classification networks (AlexNet, the VGG net, and GoogLeNet) into fully convolutional networks and transfer their learned representations by fine-tuning to the segmentation task. We then define a novel architecture that combines semantic information from a deep, coarse layer with appearance information from a shallow, fine layer to produce accurate and detailed segmentations. Our fully convolutional network achieves state-of-the-art segmentation of PASCAL VOC (20% relative improvement to 62.2% mean IU on 2012), NYUDv2, and SIFT Flow, while inference takes one third of a second for a typical image. + + +
    + +
    + +## Citation + +```bibtex +@article{shelhamer2017fully, + title={Fully convolutional networks for semantic segmentation}, + author={Shelhamer, Evan and Long, Jonathan and Darrell, Trevor}, + journal={IEEE transactions on pattern analysis and machine intelligence}, + volume={39}, + number={4}, + pages={640--651}, + year={2017}, + publisher={IEEE Trans Pattern Anal Mach Intell} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ---------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | R-50-D8 | 512x1024 | 40000 | 5.7 | 4.17 | 72.25 | 73.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608.log.json) | +| FCN | R-101-D8 | 512x1024 | 40000 | 9.2 | 2.66 | 75.45 | 76.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_40k_cityscapes/fcn_r101-d8_512x1024_40k_cityscapes_20200604_181852-a883d3a1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_40k_cityscapes/fcn_r101-d8_512x1024_40k_cityscapes_20200604_181852.log.json) | +| FCN | R-50-D8 | 769x769 | 40000 | 6.5 | 1.80 | 71.47 | 72.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_40k_cityscapes/fcn_r50-d8_769x769_40k_cityscapes_20200606_113104-977b5d02.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_40k_cityscapes/fcn_r50-d8_769x769_40k_cityscapes_20200606_113104.log.json) | +| FCN | R-101-D8 | 769x769 | 40000 | 10.4 | 1.19 | 73.93 | 75.14 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_40k_cityscapes/fcn_r101-d8_769x769_40k_cityscapes_20200606_113208-7d4ab69c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_40k_cityscapes/fcn_r101-d8_769x769_40k_cityscapes_20200606_113208.log.json) | +| FCN | R-18-D8 | 512x1024 | 80000 | 1.7 | 14.65 | 71.11 | 72.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r18-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_512x1024_80k_cityscapes/fcn_r18-d8_512x1024_80k_cityscapes_20201225_021327-6c50f8b4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_512x1024_80k_cityscapes/fcn_r18-d8_512x1024_80k_cityscapes-20201225_021327.log.json) | +| FCN | R-50-D8 | 512x1024 | 80000 | - | | 73.61 | 74.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_80k_cityscapes/fcn_r50-d8_512x1024_80k_cityscapes_20200606_113019-03aa804d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_80k_cityscapes/fcn_r50-d8_512x1024_80k_cityscapes_20200606_113019.log.json) | +| FCN | R-101-D8 | 512x1024 | 80000 | - | - | 75.13 | 75.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_80k_cityscapes/fcn_r101-d8_512x1024_80k_cityscapes_20200606_113038-3fb937eb.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_80k_cityscapes/fcn_r101-d8_512x1024_80k_cityscapes_20200606_113038.log.json) | +| FCN (FP16)| R-101-D8 | 512x1024 | 80000 | 5.37 | 8.64 | 76.80 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_fp16_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_fp16_512x1024_80k_cityscapes/fcn_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230921-fb13e883.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_fp16_512x1024_80k_cityscapes/fcn_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230921.log.json) | +| FCN | R-18-D8 | 769x769 | 80000 | 1.9 | 6.40 | 70.80 | 73.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r18-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_769x769_80k_cityscapes/fcn_r18-d8_769x769_80k_cityscapes_20201225_021451-9739d1b8.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_769x769_80k_cityscapes/fcn_r18-d8_769x769_80k_cityscapes-20201225_021451.log.json) | +| FCN | R-50-D8 | 769x769 | 80000 | - | - | 72.64 | 73.32 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_80k_cityscapes/fcn_r50-d8_769x769_80k_cityscapes_20200606_195749-f5caeabc.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_80k_cityscapes/fcn_r50-d8_769x769_80k_cityscapes_20200606_195749.log.json) | +| FCN | R-101-D8 | 769x769 | 80000 | - | - | 75.52 | 76.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_80k_cityscapes/fcn_r101-d8_769x769_80k_cityscapes_20200606_214354-45cbac68.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_80k_cityscapes/fcn_r101-d8_769x769_80k_cityscapes_20200606_214354.log.json) | +| FCN | R-18b-D8 | 512x1024 | 80000 | 1.6 | 16.74 | 70.24 | 72.77 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r18b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_512x1024_80k_cityscapes/fcn_r18b-d8_512x1024_80k_cityscapes_20201225_230143-92c0f445.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_512x1024_80k_cityscapes/fcn_r18b-d8_512x1024_80k_cityscapes-20201225_230143.log.json) | +| FCN | R-50b-D8 | 512x1024 | 80000 | 5.6 | 4.20 | 75.65 | 77.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_512x1024_80k_cityscapes/fcn_r50b-d8_512x1024_80k_cityscapes_20201225_094221-82957416.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_512x1024_80k_cityscapes/fcn_r50b-d8_512x1024_80k_cityscapes-20201225_094221.log.json) | +| FCN | R-101b-D8 | 512x1024 | 80000 | 9.1 | 2.73 | 77.37 | 78.77 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_512x1024_80k_cityscapes/fcn_r101b-d8_512x1024_80k_cityscapes_20201226_160213-4543858f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_512x1024_80k_cityscapes/fcn_r101b-d8_512x1024_80k_cityscapes-20201226_160213.log.json) | +| FCN | R-18b-D8 | 769x769 | 80000 | 1.7 | 6.70 | 69.66 | 72.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r18b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_769x769_80k_cityscapes/fcn_r18b-d8_769x769_80k_cityscapes_20201226_004430-32d504e5.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_769x769_80k_cityscapes/fcn_r18b-d8_769x769_80k_cityscapes-20201226_004430.log.json) | +| FCN | R-50b-D8 | 769x769 | 80000 | 6.3 | 1.82 | 73.83 | 76.60 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_769x769_80k_cityscapes/fcn_r50b-d8_769x769_80k_cityscapes_20201225_094223-94552d38.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_769x769_80k_cityscapes/fcn_r50b-d8_769x769_80k_cityscapes-20201225_094223.log.json) | +| FCN | R-101b-D8 | 769x769 | 80000 | 10.3 | 1.15 | 77.02 | 78.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_769x769_80k_cityscapes/fcn_r101b-d8_769x769_80k_cityscapes_20201226_170012-82be37e2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_769x769_80k_cityscapes/fcn_r101b-d8_769x769_80k_cityscapes-20201226_170012.log.json) | +| FCN (D6) | R-50-D16 | 512x1024 | 40000 | 3.4 | 10.22 | 77.06 | 78.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes/fcn_d6_r50-d16_512x1024_40k_cityscapes_20210305_130133-98d5d1bc.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes/fcn_d6_r50-d16_512x1024_40k_cityscapes-20210305_130133.log.json) | +| FCN (D6) | R-50-D16 | 512x1024 | 80000 | - | 10.35 | 77.27 | 78.88 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes/fcn_d6_r50-d16_512x1024_80k_cityscapes_20210306_115604-133c292f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes/fcn_d6_r50-d16_512x1024_80k_cityscapes-20210306_115604.log.json) | +| FCN (D6) | R-50-D16 | 769x769 | 40000 | 3.7 | 4.17 | 76.82 | 78.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes/fcn_d6_r50-d16_769x769_40k_cityscapes_20210305_185744-1aab18ed.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes/fcn_d6_r50-d16_769x769_40k_cityscapes-20210305_185744.log.json) | +| FCN (D6) | R-50-D16 | 769x769 | 80000 | - | 4.15 | 77.04 | 78.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes/fcn_d6_r50-d16_769x769_80k_cityscapes_20210305_200413-109d88eb.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes/fcn_d6_r50-d16_769x769_80k_cityscapes-20210305_200413.log.json) | +| FCN (D6) | R-101-D16 | 512x1024 | 40000 | 4.5 | 8.04 | 77.36 | 79.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes/fcn_d6_r101-d16_512x1024_40k_cityscapes_20210305_130337-9cf2b450.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes/fcn_d6_r101-d16_512x1024_40k_cityscapes-20210305_130337.log.json) | +| FCN (D6) | R-101-D16 | 512x1024 | 80000 | - | 8.26 | 78.46 | 80.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes/fcn_d6_r101-d16_512x1024_80k_cityscapes_20210308_102747-cb336445.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes/fcn_d6_r101-d16_512x1024_80k_cityscapes-20210308_102747.log.json) | +| FCN (D6) | R-101-D16 | 769x769 | 40000 | 5.0 | 3.12 | 77.28 | 78.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes/fcn_d6_r101-d16_769x769_40k_cityscapes_20210308_102453-60b114e9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes/fcn_d6_r101-d16_769x769_40k_cityscapes-20210308_102453.log.json) | +| FCN (D6) | R-101-D16 | 769x769 | 80000 | - | 3.21 | 78.06 | 79.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes/fcn_d6_r101-d16_769x769_80k_cityscapes_20210306_120016-e33adc4f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes/fcn_d6_r101-d16_769x769_80k_cityscapes-20210306_120016.log.json) | +| FCN (D6) | R-50b-D16 | 512x1024 | 80000 | 3.2 | 10.16 | 76.99 | 79.03 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r50b-d16_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b-d16_512x1024_80k_cityscapes/fcn_d6_r50b-d16_512x1024_80k_cityscapes_20210311_125550-6a0b62e9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b_d16_512x1024_80k_cityscapes/fcn_d6_r50b_d16_512x1024_80k_cityscapes-20210311_125550.log.json) | +| FCN (D6) | R-50b-D16 | 769x769 | 80000 | 3.6 | 4.17 | 76.86 | 78.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes/fcn_d6_r50b-d16_769x769_80k_cityscapes_20210311_131012-d665f231.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b_d16_769x769_80k_cityscapes/fcn_d6_r50b_d16_769x769_80k_cityscapes-20210311_131012.log.json) | +| FCN (D6) | R-101b-D16 | 512x1024 | 80000 | 4.3 | 8.46 | 77.72 | 79.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r101b-d16_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b-d16_512x1024_80k_cityscapes/fcn_d6_r101b-d16_512x1024_80k_cityscapes_20210311_144305-3f2eb5b4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b_d16_512x1024_80k_cityscapes/fcn_d6_r101b_d16_512x1024_80k_cityscapes-20210311_144305.log.json) | +| FCN (D6) | R-101b-D16 | 769x769 | 80000 | 4.8 | 3.32 | 77.34 | 78.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes/fcn_d6_r101b-d16_769x769_80k_cityscapes_20210311_154527-c4d8bfbc.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b_d16_769x769_80k_cityscapes/fcn_d6_r101b_d16_769x769_80k_cityscapes-20210311_154527.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | R-50-D8 | 512x512 | 80000 | 8.5 | 23.49 | 35.94 | 37.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_80k_ade20k/fcn_r50-d8_512x512_80k_ade20k_20200614_144016-f8ac5082.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_80k_ade20k/fcn_r50-d8_512x512_80k_ade20k_20200614_144016.log.json) | +| FCN | R-101-D8 | 512x512 | 80000 | 12 | 14.78 | 39.61 | 40.83 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_80k_ade20k/fcn_r101-d8_512x512_80k_ade20k_20200615_014143-bc1809f7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_80k_ade20k/fcn_r101-d8_512x512_80k_ade20k_20200615_014143.log.json) | +| FCN | R-50-D8 | 512x512 | 160000 | - | - | 36.10 | 38.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_160k_ade20k/fcn_r50-d8_512x512_160k_ade20k_20200615_100713-4edbc3b4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_160k_ade20k/fcn_r50-d8_512x512_160k_ade20k_20200615_100713.log.json) | +| FCN | R-101-D8 | 512x512 | 160000 | - | - | 39.91 | 41.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_160k_ade20k/fcn_r101-d8_512x512_160k_ade20k_20200615_105816-fd192bd5.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_160k_ade20k/fcn_r101-d8_512x512_160k_ade20k_20200615_105816.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | R-50-D8 | 512x512 | 20000 | 5.7 | 23.28 | 67.08 | 69.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_20k_voc12aug/fcn_r50-d8_512x512_20k_voc12aug_20200617_010715-52dc5306.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_20k_voc12aug/fcn_r50-d8_512x512_20k_voc12aug_20200617_010715.log.json) | +| FCN | R-101-D8 | 512x512 | 20000 | 9.2 | 14.81 | 71.16 | 73.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_20k_voc12aug/fcn_r101-d8_512x512_20k_voc12aug_20200617_010842-0bb4e798.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_20k_voc12aug/fcn_r101-d8_512x512_20k_voc12aug_20200617_010842.log.json) | +| FCN | R-50-D8 | 512x512 | 40000 | - | - | 66.97 | 69.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_40k_voc12aug/fcn_r50-d8_512x512_40k_voc12aug_20200613_161222-5e2dbf40.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_40k_voc12aug/fcn_r50-d8_512x512_40k_voc12aug_20200613_161222.log.json) | +| FCN | R-101-D8 | 512x512 | 40000 | - | - | 69.91 | 72.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_40k_voc12aug/fcn_r101-d8_512x512_40k_voc12aug_20200613_161240-4c8bcefd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_40k_voc12aug/fcn_r101-d8_512x512_40k_voc12aug_20200613_161240.log.json) | + +### Pascal Context + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | R-101-D8 | 480x480 | 40000 | - | 9.93 | 44.43 | 45.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context/fcn_r101-d8_480x480_40k_pascal_context_20210421_154757-b5e97937.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context/fcn_r101-d8_480x480_40k_pascal_context-20210421_154757.log.json) | +| FCN | R-101-D8 | 480x480 | 80000 | - | - | 44.13 | 45.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context/fcn_r101-d8_480x480_80k_pascal_context_20210421_163310-4711813f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context/fcn_r101-d8_480x480_80k_pascal_context-20210421_163310.log.json) | + +### Pascal Context 59 + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | R-101-D8 | 480x480 | 40000 | - | - | 48.42 | 50.4 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context_59/fcn_r101-d8_480x480_40k_pascal_context_59_20210415_230724-8cf83682.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context_59/fcn_r101-d8_480x480_40k_pascal_context_59-20210415_230724.log.json) | +| FCN | R-101-D8 | 480x480 | 80000 | - | - | 49.35 | 51.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context_59/fcn_r101-d8_480x480_80k_pascal_context_59_20210416_110804-9a6f2c94.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context_59/fcn_r101-d8_480x480_80k_pascal_context_59-20210416_110804.log.json) | + +Note: + +- `FP16` means Mixed Precision (FP16) is adopted in training. +- `FCN D6` means dilation rate of convolution operator in FCN is 6. diff --git a/downstream/mmsegmentation/configs/fcn/fcn.yml b/downstream/mmsegmentation/configs/fcn/fcn.yml new file mode 100644 index 0000000..563391c --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn.yml @@ -0,0 +1,827 @@ +Collections: +- Name: FCN + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + - Pascal Context + - Pascal Context 59 + Paper: + URL: https://arxiv.org/abs/1411.4038 + Title: Fully Convolutional Networks for Semantic Segmentation + README: configs/fcn/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/fcn_head.py#L11 + Version: v0.17.0 + Converted From: + Code: https://github.com/BVLC/caffe/wiki/Model-Zoo#fcn +Models: +- Name: fcn_r50-d8_512x1024_40k_cityscapes + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 239.81 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 72.25 + mIoU(ms+flip): 73.36 + Config: configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_40k_cityscapes/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth +- Name: fcn_r101-d8_512x1024_40k_cityscapes + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 375.94 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.45 + mIoU(ms+flip): 76.58 + Config: configs/fcn/fcn_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_40k_cityscapes/fcn_r101-d8_512x1024_40k_cityscapes_20200604_181852-a883d3a1.pth +- Name: fcn_r50-d8_769x769_40k_cityscapes + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 555.56 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 71.47 + mIoU(ms+flip): 72.54 + Config: configs/fcn/fcn_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_40k_cityscapes/fcn_r50-d8_769x769_40k_cityscapes_20200606_113104-977b5d02.pth +- Name: fcn_r101-d8_769x769_40k_cityscapes + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 840.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.93 + mIoU(ms+flip): 75.14 + Config: configs/fcn/fcn_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_40k_cityscapes/fcn_r101-d8_769x769_40k_cityscapes_20200606_113208-7d4ab69c.pth +- Name: fcn_r18-d8_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-18-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 68.26 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 71.11 + mIoU(ms+flip): 72.91 + Config: configs/fcn/fcn_r18-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_512x1024_80k_cityscapes/fcn_r18-d8_512x1024_80k_cityscapes_20201225_021327-6c50f8b4.pth +- Name: fcn_r50-d8_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.61 + mIoU(ms+flip): 74.24 + Config: configs/fcn/fcn_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x1024_80k_cityscapes/fcn_r50-d8_512x1024_80k_cityscapes_20200606_113019-03aa804d.pth +- Name: fcn_r101-d8_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.13 + mIoU(ms+flip): 75.94 + Config: configs/fcn/fcn_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x1024_80k_cityscapes/fcn_r101-d8_512x1024_80k_cityscapes_20200606_113038-3fb937eb.pth +- Name: fcn_r101-d8_fp16_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 115.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (512,1024) + Training Memory (GB): 5.37 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.8 + Config: configs/fcn/fcn_r101-d8_fp16_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_fp16_512x1024_80k_cityscapes/fcn_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230921-fb13e883.pth +- Name: fcn_r18-d8_769x769_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-18-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 156.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 1.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 70.8 + mIoU(ms+flip): 73.16 + Config: configs/fcn/fcn_r18-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18-d8_769x769_80k_cityscapes/fcn_r18-d8_769x769_80k_cityscapes_20201225_021451-9739d1b8.pth +- Name: fcn_r50-d8_769x769_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 72.64 + mIoU(ms+flip): 73.32 + Config: configs/fcn/fcn_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_769x769_80k_cityscapes/fcn_r50-d8_769x769_80k_cityscapes_20200606_195749-f5caeabc.pth +- Name: fcn_r101-d8_769x769_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.52 + mIoU(ms+flip): 76.61 + Config: configs/fcn/fcn_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_769x769_80k_cityscapes/fcn_r101-d8_769x769_80k_cityscapes_20200606_214354-45cbac68.pth +- Name: fcn_r18b-d8_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-18b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 59.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 70.24 + mIoU(ms+flip): 72.77 + Config: configs/fcn/fcn_r18b-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_512x1024_80k_cityscapes/fcn_r18b-d8_512x1024_80k_cityscapes_20201225_230143-92c0f445.pth +- Name: fcn_r50b-d8_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-50b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 238.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.65 + mIoU(ms+flip): 77.59 + Config: configs/fcn/fcn_r50b-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_512x1024_80k_cityscapes/fcn_r50b-d8_512x1024_80k_cityscapes_20201225_094221-82957416.pth +- Name: fcn_r101b-d8_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-101b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 366.3 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.37 + mIoU(ms+flip): 78.77 + Config: configs/fcn/fcn_r101b-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_512x1024_80k_cityscapes/fcn_r101b-d8_512x1024_80k_cityscapes_20201226_160213-4543858f.pth +- Name: fcn_r18b-d8_769x769_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-18b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 149.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 69.66 + mIoU(ms+flip): 72.07 + Config: configs/fcn/fcn_r18b-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r18b-d8_769x769_80k_cityscapes/fcn_r18b-d8_769x769_80k_cityscapes_20201226_004430-32d504e5.pth +- Name: fcn_r50b-d8_769x769_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-50b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 549.45 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.83 + mIoU(ms+flip): 76.6 + Config: configs/fcn/fcn_r50b-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50b-d8_769x769_80k_cityscapes/fcn_r50b-d8_769x769_80k_cityscapes_20201225_094223-94552d38.pth +- Name: fcn_r101b-d8_769x769_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-101b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 869.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.02 + mIoU(ms+flip): 78.67 + Config: configs/fcn/fcn_r101b-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101b-d8_769x769_80k_cityscapes/fcn_r101b-d8_769x769_80k_cityscapes_20201226_170012-82be37e2.pth +- Name: fcn_d6_r50-d16_512x1024_40k_cityscapes + In Collection: FCN + Metadata: + backbone: R-50-D16 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 97.85 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.06 + mIoU(ms+flip): 78.85 + Config: configs/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes/fcn_d6_r50-d16_512x1024_40k_cityscapes_20210305_130133-98d5d1bc.pth +- Name: fcn_d6_r50-d16_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-50-D16 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 96.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.27 + mIoU(ms+flip): 78.88 + Config: configs/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes/fcn_d6_r50-d16_512x1024_80k_cityscapes_20210306_115604-133c292f.pth +- Name: fcn_d6_r50-d16_769x769_40k_cityscapes + In Collection: FCN + Metadata: + backbone: R-50-D16 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 239.81 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 3.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.82 + mIoU(ms+flip): 78.22 + Config: configs/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes/fcn_d6_r50-d16_769x769_40k_cityscapes_20210305_185744-1aab18ed.pth +- Name: fcn_d6_r50-d16_769x769_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-50-D16 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 240.96 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.04 + mIoU(ms+flip): 78.4 + Config: configs/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes/fcn_d6_r50-d16_769x769_80k_cityscapes_20210305_200413-109d88eb.pth +- Name: fcn_d6_r101-d16_512x1024_40k_cityscapes + In Collection: FCN + Metadata: + backbone: R-101-D16 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 124.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 4.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.36 + mIoU(ms+flip): 79.18 + Config: configs/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes/fcn_d6_r101-d16_512x1024_40k_cityscapes_20210305_130337-9cf2b450.pth +- Name: fcn_d6_r101-d16_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-101-D16 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 121.07 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.46 + mIoU(ms+flip): 80.42 + Config: configs/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes/fcn_d6_r101-d16_512x1024_80k_cityscapes_20210308_102747-cb336445.pth +- Name: fcn_d6_r101-d16_769x769_40k_cityscapes + In Collection: FCN + Metadata: + backbone: R-101-D16 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 320.51 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 5.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.28 + mIoU(ms+flip): 78.95 + Config: configs/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes/fcn_d6_r101-d16_769x769_40k_cityscapes_20210308_102453-60b114e9.pth +- Name: fcn_d6_r101-d16_769x769_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-101-D16 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 311.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.06 + mIoU(ms+flip): 79.58 + Config: configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes/fcn_d6_r101-d16_769x769_80k_cityscapes_20210306_120016-e33adc4f.pth +- Name: fcn_d6_r50b-d16_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-50b-D16 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 98.43 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.99 + mIoU(ms+flip): 79.03 + Config: configs/fcn/fcn_d6_r50b-d16_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b-d16_512x1024_80k_cityscapes/fcn_d6_r50b-d16_512x1024_80k_cityscapes_20210311_125550-6a0b62e9.pth +- Name: fcn_d6_r50b-d16_769x769_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-50b-D16 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 239.81 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 3.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.86 + mIoU(ms+flip): 78.52 + Config: configs/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes/fcn_d6_r50b-d16_769x769_80k_cityscapes_20210311_131012-d665f231.pth +- Name: fcn_d6_r101b-d16_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-101b-D16 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 118.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 4.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.72 + mIoU(ms+flip): 79.53 + Config: configs/fcn/fcn_d6_r101b-d16_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b-d16_512x1024_80k_cityscapes/fcn_d6_r101b-d16_512x1024_80k_cityscapes_20210311_144305-3f2eb5b4.pth +- Name: fcn_d6_r101b-d16_769x769_80k_cityscapes + In Collection: FCN + Metadata: + backbone: R-101b-D16 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 301.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 4.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.34 + mIoU(ms+flip): 78.91 + Config: configs/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes/fcn_d6_r101b-d16_769x769_80k_cityscapes_20210311_154527-c4d8bfbc.pth +- Name: fcn_r50-d8_512x512_80k_ade20k + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 42.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 35.94 + mIoU(ms+flip): 37.94 + Config: configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_80k_ade20k/fcn_r50-d8_512x512_80k_ade20k_20200614_144016-f8ac5082.pth +- Name: fcn_r101-d8_512x512_80k_ade20k + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 67.66 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 39.61 + mIoU(ms+flip): 40.83 + Config: configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_80k_ade20k/fcn_r101-d8_512x512_80k_ade20k_20200615_014143-bc1809f7.pth +- Name: fcn_r50-d8_512x512_160k_ade20k + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 36.1 + mIoU(ms+flip): 38.08 + Config: configs/fcn/fcn_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_160k_ade20k/fcn_r50-d8_512x512_160k_ade20k_20200615_100713-4edbc3b4.pth +- Name: fcn_r101-d8_512x512_160k_ade20k + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 39.91 + mIoU(ms+flip): 41.4 + Config: configs/fcn/fcn_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_160k_ade20k/fcn_r101-d8_512x512_160k_ade20k_20200615_105816-fd192bd5.pth +- Name: fcn_r50-d8_512x512_20k_voc12aug + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 42.96 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.7 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 67.08 + mIoU(ms+flip): 69.94 + Config: configs/fcn/fcn_r50-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_20k_voc12aug/fcn_r50-d8_512x512_20k_voc12aug_20200617_010715-52dc5306.pth +- Name: fcn_r101-d8_512x512_20k_voc12aug + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 67.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 71.16 + mIoU(ms+flip): 73.57 + Config: configs/fcn/fcn_r101-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_20k_voc12aug/fcn_r101-d8_512x512_20k_voc12aug_20200617_010842-0bb4e798.pth +- Name: fcn_r50-d8_512x512_40k_voc12aug + In Collection: FCN + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 66.97 + mIoU(ms+flip): 69.04 + Config: configs/fcn/fcn_r50-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r50-d8_512x512_40k_voc12aug/fcn_r50-d8_512x512_40k_voc12aug_20200613_161222-5e2dbf40.pth +- Name: fcn_r101-d8_512x512_40k_voc12aug + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 69.91 + mIoU(ms+flip): 72.38 + Config: configs/fcn/fcn_r101-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_512x512_40k_voc12aug/fcn_r101-d8_512x512_40k_voc12aug_20200613_161240-4c8bcefd.pth +- Name: fcn_r101-d8_480x480_40k_pascal_context + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + inference time (ms/im): + - value: 100.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (480,480) + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 44.43 + mIoU(ms+flip): 45.63 + Config: configs/fcn/fcn_r101-d8_480x480_40k_pascal_context.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context/fcn_r101-d8_480x480_40k_pascal_context_20210421_154757-b5e97937.pth +- Name: fcn_r101-d8_480x480_80k_pascal_context + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 44.13 + mIoU(ms+flip): 45.26 + Config: configs/fcn/fcn_r101-d8_480x480_80k_pascal_context.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context/fcn_r101-d8_480x480_80k_pascal_context_20210421_163310-4711813f.pth +- Name: fcn_r101-d8_480x480_40k_pascal_context_59 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 48.42 + mIoU(ms+flip): 50.4 + Config: configs/fcn/fcn_r101-d8_480x480_40k_pascal_context_59.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_40k_pascal_context_59/fcn_r101-d8_480x480_40k_pascal_context_59_20210415_230724-8cf83682.pth +- Name: fcn_r101-d8_480x480_80k_pascal_context_59 + In Collection: FCN + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 49.35 + mIoU(ms+flip): 51.38 + Config: configs/fcn/fcn_r101-d8_480x480_80k_pascal_context_59.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fcn/fcn_r101-d8_480x480_80k_pascal_context_59/fcn_r101-d8_480x480_80k_pascal_context_59_20210416_110804-9a6f2c94.pth diff --git a/downstream/mmsegmentation/configs/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..aec4254 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './fcn_d6_r50-d16_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..d0bafc5 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './fcn_d6_r50-d16_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes.py new file mode 100644 index 0000000..29a9f98 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './fcn_d6_r50-d16_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py new file mode 100644 index 0000000..1f21c65 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './fcn_d6_r50-d16_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_d6_r101b-d16_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_d6_r101b-d16_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..af3f765 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_d6_r101b-d16_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = './fcn_d6_r50b-d16_512x1024_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes.py new file mode 100644 index 0000000..e3d4d88 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = './fcn_d6_r50b-d16_769x769_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..f30646e --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict( + backbone=dict(dilations=(1, 1, 1, 2), strides=(1, 2, 2, 1)), + decode_head=dict(dilation=6), + auxiliary_head=dict(dilation=6)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..e4b623a --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_d6_r50-d16_512x1024_80k_cityscapes.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + backbone=dict(dilations=(1, 1, 1, 2), strides=(1, 2, 2, 1)), + decode_head=dict(dilation=6), + auxiliary_head=dict(dilation=6)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes.py new file mode 100644 index 0000000..01d8f27 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_d6_r50-d16_769x769_40k_cityscapes.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + backbone=dict(dilations=(1, 1, 1, 2), strides=(1, 2, 2, 1)), + decode_head=dict(align_corners=True, dilation=6), + auxiliary_head=dict(align_corners=True, dilation=6), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes.py new file mode 100644 index 0000000..c5ef3b8 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_d6_r50-d16_769x769_80k_cityscapes.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + backbone=dict(dilations=(1, 1, 1, 2), strides=(1, 2, 2, 1)), + decode_head=dict(align_corners=True, dilation=6), + auxiliary_head=dict(align_corners=True, dilation=6), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_d6_r50b-d16_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_d6_r50b-d16_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..0749ff1 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_d6_r50b-d16_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './fcn_d6_r50-d16_512x1024_80k_cityscapes.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes.py new file mode 100644 index 0000000..fba8948 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './fcn_d6_r50-d16_769x769_80k_cityscapes.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context.py b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context.py new file mode 100644 index 0000000..f3a15b4 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_480x480_40k_pascal_context.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context_59.py b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context_59.py new file mode 100644 index 0000000..908f4bf --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context_59.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_480x480_40k_pascal_context_59.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context.py b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context.py new file mode 100644 index 0000000..bdccfd9 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_480x480_80k_pascal_context.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context_59.py b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context_59.py new file mode 100644 index 0000000..09cb612 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context_59.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_480x480_80k_pascal_context_59.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..7918dd1 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..528110d --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..1bf6780 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..09a5fe5 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x512_20k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_512x512_20k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..eafefaa --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x512_40k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_512x512_40k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..6d02945 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..6b4cc57 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..3503c76 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_fp16_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_fp16_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..c6739d9 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101-d8_fp16_512x1024_80k_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = './fcn_r101-d8_512x1024_80k_cityscapes.py' +# fp16 settings +optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.) +# fp16 placeholder +fp16 = dict() diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101b-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r101b-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..1b9bf60 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101b-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = './fcn_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r101b-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r101b-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..f36eb02 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r101b-d8_769x769_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = './fcn_r50-d8_769x769_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r18-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r18-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..5a1d29e --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r18-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './fcn_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r18-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r18-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..6644a58 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r18-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './fcn_r50-d8_769x769_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r18b-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r18b-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..92accfc --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r18b-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './fcn_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r18b-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r18b-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..5dd34dd --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r18b-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './fcn_r50-d8_769x769_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_480x480_40k_pascal_context.py b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_480x480_40k_pascal_context.py new file mode 100644 index 0000000..7c57a6f --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_480x480_40k_pascal_context.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_context.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_480x480_40k_pascal_context_59.py b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_480x480_40k_pascal_context_59.py new file mode 100644 index 0000000..4a81800 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_480x480_40k_pascal_context_59.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_480x480_80k_pascal_context.py b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_480x480_80k_pascal_context.py new file mode 100644 index 0000000..df6d25b --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_480x480_80k_pascal_context.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_context.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_480x480_80k_pascal_context_59.py b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_480x480_80k_pascal_context_59.py new file mode 100644 index 0000000..02507cc --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_480x480_80k_pascal_context_59.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..401c6ea --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..990a085 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..9ca7fd2 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..17206a5 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x512_20k_voc12aug.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..8cec429 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x512_40k_voc12aug.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..ef194cb --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..fca98c1 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..7d75cd9 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/fcn_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50b-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r50b-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..28ef13f --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50b-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/downstream/mmsegmentation/configs/fcn/fcn_r50b-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/fcn/fcn_r50b-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..106f7b6 --- /dev/null +++ b/downstream/mmsegmentation/configs/fcn/fcn_r50b-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './fcn_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/downstream/mmsegmentation/configs/gcnet/README.md b/downstream/mmsegmentation/configs/gcnet/README.md new file mode 100644 index 0000000..47f2f43 --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/README.md @@ -0,0 +1,67 @@ +# GCNet + +[GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond](https://arxiv.org/abs/1904.11492) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The Non-Local Network (NLNet) presents a pioneering approach for capturing long-range dependencies, via aggregating query-specific global context to each query position. However, through a rigorous empirical analysis, we have found that the global contexts modeled by non-local network are almost the same for different query positions within an image. In this paper, we take advantage of this finding to create a simplified network based on a query-independent formulation, which maintains the accuracy of NLNet but with significantly less computation. We further observe that this simplified design shares similar structure with Squeeze-Excitation Network (SENet). Hence we unify them into a three-step general framework for global context modeling. Within the general framework, we design a better instantiation, called the global context (GC) block, which is lightweight and can effectively model the global context. The lightweight property allows us to apply it for multiple layers in a backbone network to construct a global context network (GCNet), which generally outperforms both simplified NLNet and SENet on major benchmarks for various recognition tasks. The code and configurations are released at [this https URL](https://github.com/xvjiarui/GCNet). + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{cao2019gcnet, + title={Gcnet: Non-local networks meet squeeze-excitation networks and beyond}, + author={Cao, Yue and Xu, Jiarui and Lin, Stephen and Wei, Fangyun and Hu, Han}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision Workshops}, + pages={0--0}, + year={2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| GCNet | R-50-D8 | 512x1024 | 40000 | 5.8 | 3.93 | 77.69 | 78.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes/gcnet_r50-d8_512x1024_40k_cityscapes_20200618_074436-4b0fd17b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes/gcnet_r50-d8_512x1024_40k_cityscapes_20200618_074436.log.json) | +| GCNet | R-101-D8 | 512x1024 | 40000 | 9.2 | 2.61 | 78.28 | 79.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes/gcnet_r101-d8_512x1024_40k_cityscapes_20200618_074436-5e62567f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes/gcnet_r101-d8_512x1024_40k_cityscapes_20200618_074436.log.json) | +| GCNet | R-50-D8 | 769x769 | 40000 | 6.5 | 1.67 | 78.12 | 80.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_40k_cityscapes/gcnet_r50-d8_769x769_40k_cityscapes_20200618_182814-a26f4471.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_40k_cityscapes/gcnet_r50-d8_769x769_40k_cityscapes_20200618_182814.log.json) | +| GCNet | R-101-D8 | 769x769 | 40000 | 10.5 | 1.13 | 78.95 | 80.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_40k_cityscapes/gcnet_r101-d8_769x769_40k_cityscapes_20200619_092550-ca4f0a84.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_40k_cityscapes/gcnet_r101-d8_769x769_40k_cityscapes_20200619_092550.log.json) | +| GCNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.48 | 80.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes/gcnet_r50-d8_512x1024_80k_cityscapes_20200618_074450-ef8f069b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes/gcnet_r50-d8_512x1024_80k_cityscapes_20200618_074450.log.json) | +| GCNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.03 | 79.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes/gcnet_r101-d8_512x1024_80k_cityscapes_20200618_074450-778ebf69.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes/gcnet_r101-d8_512x1024_80k_cityscapes_20200618_074450.log.json) | +| GCNet | R-50-D8 | 769x769 | 80000 | - | - | 78.68 | 80.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_80k_cityscapes/gcnet_r50-d8_769x769_80k_cityscapes_20200619_092516-4839565b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_80k_cityscapes/gcnet_r50-d8_769x769_80k_cityscapes_20200619_092516.log.json) | +| GCNet | R-101-D8 | 769x769 | 80000 | - | - | 79.18 | 80.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_80k_cityscapes/gcnet_r101-d8_769x769_80k_cityscapes_20200619_092628-8e043423.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_80k_cityscapes/gcnet_r101-d8_769x769_80k_cityscapes_20200619_092628.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| GCNet | R-50-D8 | 512x512 | 80000 | 8.5 | 23.38 | 41.47 | 42.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_80k_ade20k/gcnet_r50-d8_512x512_80k_ade20k_20200614_185146-91a6da41.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_80k_ade20k/gcnet_r50-d8_512x512_80k_ade20k_20200614_185146.log.json) | +| GCNet | R-101-D8 | 512x512 | 80000 | 12 | 15.20 | 42.82 | 44.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_80k_ade20k/gcnet_r101-d8_512x512_80k_ade20k_20200615_020811-c3fcb6dd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_80k_ade20k/gcnet_r101-d8_512x512_80k_ade20k_20200615_020811.log.json) | +| GCNet | R-50-D8 | 512x512 | 160000 | - | - | 42.37 | 43.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_160k_ade20k/gcnet_r50-d8_512x512_160k_ade20k_20200615_224122-d95f3e1f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_160k_ade20k/gcnet_r50-d8_512x512_160k_ade20k_20200615_224122.log.json) | +| GCNet | R-101-D8 | 512x512 | 160000 | - | - | 43.69 | 45.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_160k_ade20k/gcnet_r101-d8_512x512_160k_ade20k_20200615_225406-615528d7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_160k_ade20k/gcnet_r101-d8_512x512_160k_ade20k_20200615_225406.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| GCNet | R-50-D8 | 512x512 | 20000 | 5.8 | 23.35 | 76.42 | 77.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_20k_voc12aug/gcnet_r50-d8_512x512_20k_voc12aug_20200617_165701-3cbfdab1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_20k_voc12aug/gcnet_r50-d8_512x512_20k_voc12aug_20200617_165701.log.json) | +| GCNet | R-101-D8 | 512x512 | 20000 | 9.2 | 14.80 | 77.41 | 78.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_20k_voc12aug/gcnet_r101-d8_512x512_20k_voc12aug_20200617_165713-6c720aa9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_20k_voc12aug/gcnet_r101-d8_512x512_20k_voc12aug_20200617_165713.log.json) | +| GCNet | R-50-D8 | 512x512 | 40000 | - | - | 76.24 | 77.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_40k_voc12aug/gcnet_r50-d8_512x512_40k_voc12aug_20200613_195105-9797336d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_40k_voc12aug/gcnet_r50-d8_512x512_40k_voc12aug_20200613_195105.log.json) | +| GCNet | R-101-D8 | 512x512 | 40000 | - | - | 77.84 | 78.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/gcnet/gcnet_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_40k_voc12aug/gcnet_r101-d8_512x512_40k_voc12aug_20200613_185806-1e38208d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_40k_voc12aug/gcnet_r101-d8_512x512_40k_voc12aug_20200613_185806.log.json) | diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet.yml b/downstream/mmsegmentation/configs/gcnet/gcnet.yml new file mode 100644 index 0000000..1d5eecf --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet.yml @@ -0,0 +1,305 @@ +Collections: +- Name: GCNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1904.11492 + Title: 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond' + README: configs/gcnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/gc_head.py#L10 + Version: v0.17.0 + Converted From: + Code: https://github.com/xvjiarui/GCNet +Models: +- Name: gcnet_r50-d8_512x1024_40k_cityscapes + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 254.45 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.69 + mIoU(ms+flip): 78.56 + Config: configs/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes/gcnet_r50-d8_512x1024_40k_cityscapes_20200618_074436-4b0fd17b.pth +- Name: gcnet_r101-d8_512x1024_40k_cityscapes + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 383.14 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.28 + mIoU(ms+flip): 79.34 + Config: configs/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes/gcnet_r101-d8_512x1024_40k_cityscapes_20200618_074436-5e62567f.pth +- Name: gcnet_r50-d8_769x769_40k_cityscapes + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 598.8 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.12 + mIoU(ms+flip): 80.09 + Config: configs/gcnet/gcnet_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_40k_cityscapes/gcnet_r50-d8_769x769_40k_cityscapes_20200618_182814-a26f4471.pth +- Name: gcnet_r101-d8_769x769_40k_cityscapes + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 884.96 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.95 + mIoU(ms+flip): 80.71 + Config: configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_40k_cityscapes/gcnet_r101-d8_769x769_40k_cityscapes_20200619_092550-ca4f0a84.pth +- Name: gcnet_r50-d8_512x1024_80k_cityscapes + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.48 + mIoU(ms+flip): 80.01 + Config: configs/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes/gcnet_r50-d8_512x1024_80k_cityscapes_20200618_074450-ef8f069b.pth +- Name: gcnet_r101-d8_512x1024_80k_cityscapes + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.03 + mIoU(ms+flip): 79.84 + Config: configs/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes/gcnet_r101-d8_512x1024_80k_cityscapes_20200618_074450-778ebf69.pth +- Name: gcnet_r50-d8_769x769_80k_cityscapes + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.68 + mIoU(ms+flip): 80.66 + Config: configs/gcnet/gcnet_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_769x769_80k_cityscapes/gcnet_r50-d8_769x769_80k_cityscapes_20200619_092516-4839565b.pth +- Name: gcnet_r101-d8_769x769_80k_cityscapes + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.18 + mIoU(ms+flip): 80.71 + Config: configs/gcnet/gcnet_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_769x769_80k_cityscapes/gcnet_r101-d8_769x769_80k_cityscapes_20200619_092628-8e043423.pth +- Name: gcnet_r50-d8_512x512_80k_ade20k + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 42.77 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.47 + mIoU(ms+flip): 42.85 + Config: configs/gcnet/gcnet_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_80k_ade20k/gcnet_r50-d8_512x512_80k_ade20k_20200614_185146-91a6da41.pth +- Name: gcnet_r101-d8_512x512_80k_ade20k + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 65.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.82 + mIoU(ms+flip): 44.54 + Config: configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_80k_ade20k/gcnet_r101-d8_512x512_80k_ade20k_20200615_020811-c3fcb6dd.pth +- Name: gcnet_r50-d8_512x512_160k_ade20k + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.37 + mIoU(ms+flip): 43.52 + Config: configs/gcnet/gcnet_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_160k_ade20k/gcnet_r50-d8_512x512_160k_ade20k_20200615_224122-d95f3e1f.pth +- Name: gcnet_r101-d8_512x512_160k_ade20k + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.69 + mIoU(ms+flip): 45.21 + Config: configs/gcnet/gcnet_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_160k_ade20k/gcnet_r101-d8_512x512_160k_ade20k_20200615_225406-615528d7.pth +- Name: gcnet_r50-d8_512x512_20k_voc12aug + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 42.83 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.8 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.42 + mIoU(ms+flip): 77.51 + Config: configs/gcnet/gcnet_r50-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_20k_voc12aug/gcnet_r50-d8_512x512_20k_voc12aug_20200617_165701-3cbfdab1.pth +- Name: gcnet_r101-d8_512x512_20k_voc12aug + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 67.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.41 + mIoU(ms+flip): 78.56 + Config: configs/gcnet/gcnet_r101-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_20k_voc12aug/gcnet_r101-d8_512x512_20k_voc12aug_20200617_165713-6c720aa9.pth +- Name: gcnet_r50-d8_512x512_40k_voc12aug + In Collection: GCNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.24 + mIoU(ms+flip): 77.63 + Config: configs/gcnet/gcnet_r50-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r50-d8_512x512_40k_voc12aug/gcnet_r50-d8_512x512_40k_voc12aug_20200613_195105-9797336d.pth +- Name: gcnet_r101-d8_512x512_40k_voc12aug + In Collection: GCNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.84 + mIoU(ms+flip): 78.59 + Config: configs/gcnet/gcnet_r101-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/gcnet/gcnet_r101-d8_512x512_40k_voc12aug/gcnet_r101-d8_512x512_40k_voc12aug_20200613_185806-1e38208d.pth diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..27bd942 --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..7f0f83f --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..9888120 --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..1b70ca8 --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x512_20k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_512x512_20k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..b17c7a1 --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x512_40k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_512x512_40k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..a2183fc --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..08a6031 --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..5efb613 --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './gcnet_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..610467c --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..155e28f --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..1549a4d --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..a496204 --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x512_20k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..d85cf65 --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x512_40k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..89d5e1a --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..332495d --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..d6d9cb1 --- /dev/null +++ b/downstream/mmsegmentation/configs/gcnet/gcnet_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/gcnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/gpvit/gpvit_l1_segformer.py b/downstream/mmsegmentation/configs/gpvit/gpvit_l1_segformer.py new file mode 100644 index 0000000..e722a33 --- /dev/null +++ b/downstream/mmsegmentation/configs/gpvit/gpvit_l1_segformer.py @@ -0,0 +1,75 @@ +_base_ = [ + '../_base_/segformer/segformer.py', + '../_base_/segformer/ade20k_repeat.py', + '../_base_/segformer/default_runtime.py', + '../_base_/segformer/schedule_160k_adamw.py' +] + +backbone_channels = 216 +checkpoint_url = '' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='GPViTSeg', + arch='L1', + drop_path_rate=0.1, + out_indices=(2, 5, 8, 11), + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix='backbone.'), + convert_syncbn=True, + att_with_cp=False, + group_with_cp=False), + decode_head=dict( + type='SegformerHead', + in_channels=[backbone_channels, backbone_channels, backbone_channels, backbone_channels], + in_index=[0, 1, 2, 3], + channels=256, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + train_cfg=dict(), + test_cfg=dict(mode='whole')) + +# optimizer +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006 * 2, + betas=(0.9, 0.999), + weight_decay=0.01, + constructor='CustomOptimizerConstructor', + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'head': dict(lr_mult=10.), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0)}) +) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +data = dict(samples_per_gpu=2, workers_per_gpu=2) + +runner = dict(type='IterBasedRunner', max_iters=160000) +checkpoint_config = dict(by_epoch=False, interval=16000) +evaluation = dict(interval=16000, metric='mIoU', pre_eval=True) + + +work_dir = 'work_dirs/gpvit_l1_segformer' \ No newline at end of file diff --git a/downstream/mmsegmentation/configs/gpvit/gpvit_l1_upernet.py b/downstream/mmsegmentation/configs/gpvit/gpvit_l1_upernet.py new file mode 100644 index 0000000..5fa111f --- /dev/null +++ b/downstream/mmsegmentation/configs/gpvit/gpvit_l1_upernet.py @@ -0,0 +1,85 @@ +_base_ = [ + '../../configs/_base_/segformer/ade20k_repeat.py', + '../../configs/_base_/default_runtime.py', + '../../configs/_base_/schedules/schedule_160k.py' +] + +backbone_channels = 216 +checkpoint_url = '' + +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='GPViTSeg', + arch='L1', + out_indices=(2, 5, 8, 11), + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix='backbone.'), + drop_path_rate=0.1, + convert_syncbn=True, + att_with_cp=False, + group_with_cp=False), + neck=None, + decode_head=dict( + type='UPerHead', + in_channels=[backbone_channels, backbone_channels, backbone_channels, backbone_channels], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=backbone_channels, + in_index=3, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) # yapf: disable +# ---- +# AdamW optimizer, no weight decay for position embedding & layer norm in backbone +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006 * 2, + betas=(0.9, 0.999), + weight_decay=0.01, + paramwise_cfg=dict( + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) + + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +evaluation = dict(interval=160000, metric='mIoU', pre_eval=True) + +work_dir = 'work_dirs/gpvit_l1_upernet' \ No newline at end of file diff --git a/downstream/mmsegmentation/configs/gpvit/gpvit_l2_segformer.py b/downstream/mmsegmentation/configs/gpvit/gpvit_l2_segformer.py new file mode 100644 index 0000000..f179230 --- /dev/null +++ b/downstream/mmsegmentation/configs/gpvit/gpvit_l2_segformer.py @@ -0,0 +1,75 @@ +_base_ = [ + '../_base_/segformer/segformer.py', + '../_base_/segformer/ade20k_repeat.py', + '../_base_/segformer/default_runtime.py', + '../_base_/segformer/schedule_160k_adamw.py' +] + +backbone_channels = 348 +checkpoint_url = '' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='GPViTSeg', + arch='L2', + drop_path_rate=0.1, + out_indices=(2, 5, 8, 11), + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix='backbone.'), + convert_syncbn=True, + att_with_cp=False, + group_with_cp=False), + decode_head=dict( + type='SegformerHead', + in_channels=[backbone_channels, backbone_channels, backbone_channels, backbone_channels], + in_index=[0, 1, 2, 3], + channels=256, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + train_cfg=dict(), + test_cfg=dict(mode='whole')) + +# optimizer +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006 * 2, + betas=(0.9, 0.999), + weight_decay=0.01, + constructor='CustomOptimizerConstructor', + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'head': dict(lr_mult=10.), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0)}) +) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +data = dict(samples_per_gpu=2, workers_per_gpu=2) + +runner = dict(type='IterBasedRunner', max_iters=160000) +checkpoint_config = dict(by_epoch=False, interval=16000) +evaluation = dict(interval=16000, metric='mIoU', pre_eval=True) + + +work_dir = 'work_dirs/gpvit_l2_segformer' \ No newline at end of file diff --git a/downstream/mmsegmentation/configs/gpvit/gpvit_l2_upernet.py b/downstream/mmsegmentation/configs/gpvit/gpvit_l2_upernet.py new file mode 100644 index 0000000..219b862 --- /dev/null +++ b/downstream/mmsegmentation/configs/gpvit/gpvit_l2_upernet.py @@ -0,0 +1,85 @@ +_base_ = [ + '../../configs/_base_/segformer/ade20k_repeat.py', + '../../configs/_base_/default_runtime.py', + '../../configs/_base_/schedules/schedule_160k.py' +] + +backbone_channels = 348 +checkpoint_url = '' + +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='GPViTSeg', + arch='L2', + out_indices=(2, 5, 8, 11), + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix='backbone.'), + drop_path_rate=0.1, + convert_syncbn=True, + att_with_cp=False, + group_with_cp=False), + neck=None, + decode_head=dict( + type='UPerHead', + in_channels=[backbone_channels, backbone_channels, backbone_channels, backbone_channels], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=backbone_channels, + in_index=3, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) # yapf: disable +# ---- +# AdamW optimizer, no weight decay for position embedding & layer norm in backbone +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006 * 2, + betas=(0.9, 0.999), + weight_decay=0.01, + paramwise_cfg=dict( + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) + + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +evaluation = dict(interval=160000, metric='mIoU', pre_eval=True) + +work_dir = 'work_dirs/gpvit_l2_upernet' \ No newline at end of file diff --git a/downstream/mmsegmentation/configs/gpvit/gpvit_l3_segformer.py b/downstream/mmsegmentation/configs/gpvit/gpvit_l3_segformer.py new file mode 100644 index 0000000..b65ecc9 --- /dev/null +++ b/downstream/mmsegmentation/configs/gpvit/gpvit_l3_segformer.py @@ -0,0 +1,75 @@ +_base_ = [ + '../_base_/segformer/segformer.py', + '../_base_/segformer/ade20k_repeat.py', + '../_base_/segformer/default_runtime.py', + '../_base_/segformer/schedule_160k_adamw.py' +] + +backbone_channels = 432 +checkpoint_url = '' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='GPViTSeg', + arch='L3', + drop_path_rate=0.2, + out_indices=(2, 5, 8, 11), + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix='backbone.'), + convert_syncbn=True, + att_with_cp=False, + group_with_cp=False), + decode_head=dict( + type='SegformerHead', + in_channels=[backbone_channels, backbone_channels, backbone_channels, backbone_channels], + in_index=[0, 1, 2, 3], + channels=256, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + train_cfg=dict(), + test_cfg=dict(mode='whole')) + +# optimizer +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006 * 2, + betas=(0.9, 0.999), + weight_decay=0.01, + constructor='CustomOptimizerConstructor', + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'head': dict(lr_mult=10.), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0)}) +) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +data = dict(samples_per_gpu=2, workers_per_gpu=2) + +runner = dict(type='IterBasedRunner', max_iters=160000) +checkpoint_config = dict(by_epoch=False, interval=16000) +evaluation = dict(interval=16000, metric='mIoU', pre_eval=True) + + +work_dir = 'work_dirs/gpvit_l3_segformer' \ No newline at end of file diff --git a/downstream/mmsegmentation/configs/gpvit/gpvit_l3_upernet.py b/downstream/mmsegmentation/configs/gpvit/gpvit_l3_upernet.py new file mode 100644 index 0000000..688d2dd --- /dev/null +++ b/downstream/mmsegmentation/configs/gpvit/gpvit_l3_upernet.py @@ -0,0 +1,85 @@ +_base_ = [ + '../../configs/_base_/segformer/ade20k_repeat.py', + '../../configs/_base_/default_runtime.py', + '../../configs/_base_/schedules/schedule_160k.py' +] + +backbone_channels = 432 +checkpoint_url = '' + +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='GPViTSeg', + arch='L3', + out_indices=(2, 5, 8, 11), + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix='backbone.'), + drop_path_rate=0.2, + convert_syncbn=True, + att_with_cp=False, + group_with_cp=False), + neck=None, + decode_head=dict( + type='UPerHead', + in_channels=[backbone_channels, backbone_channels, backbone_channels, backbone_channels], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=backbone_channels, + in_index=3, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) # yapf: disable +# ---- +# AdamW optimizer, no weight decay for position embedding & layer norm in backbone +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006 * 2, + betas=(0.9, 0.999), + weight_decay=0.01, + paramwise_cfg=dict( + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) + + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +evaluation = dict(interval=160000, metric='mIoU', pre_eval=True) + +work_dir = 'work_dirs/gpvit_l3_upernet' \ No newline at end of file diff --git a/downstream/mmsegmentation/configs/gpvit/gpvit_l4_segformer.py b/downstream/mmsegmentation/configs/gpvit/gpvit_l4_segformer.py new file mode 100644 index 0000000..e218cd8 --- /dev/null +++ b/downstream/mmsegmentation/configs/gpvit/gpvit_l4_segformer.py @@ -0,0 +1,75 @@ +_base_ = [ + '../_base_/segformer/segformer.py', + '../_base_/segformer/ade20k_repeat.py', + '../_base_/segformer/default_runtime.py', + '../_base_/segformer/schedule_160k_adamw.py' +] + +backbone_channels = 624 +checkpoint_url = '' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='GPViTSeg', + arch='L4', + drop_path_rate=0.2, + out_indices=(2, 5, 8, 11), + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix='backbone.'), + convert_syncbn=True, + att_with_cp=False, + group_with_cp=False), + decode_head=dict( + type='SegformerHead', + in_channels=[backbone_channels, backbone_channels, backbone_channels, backbone_channels], + in_index=[0, 1, 2, 3], + channels=256, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + train_cfg=dict(), + test_cfg=dict(mode='whole')) + +# optimizer +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006 * 2, + betas=(0.9, 0.999), + weight_decay=0.01, + constructor='CustomOptimizerConstructor', + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'head': dict(lr_mult=10.), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0)}) +) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +data = dict(samples_per_gpu=2, workers_per_gpu=2) + +runner = dict(type='IterBasedRunner', max_iters=160000) +checkpoint_config = dict(by_epoch=False, interval=16000) +evaluation = dict(interval=16000, metric='mIoU', pre_eval=True) + + +work_dir = 'work_dirs/gpvit_l4_segformer' \ No newline at end of file diff --git a/downstream/mmsegmentation/configs/gpvit/gpvit_l4_upernet.py b/downstream/mmsegmentation/configs/gpvit/gpvit_l4_upernet.py new file mode 100644 index 0000000..f0bbd19 --- /dev/null +++ b/downstream/mmsegmentation/configs/gpvit/gpvit_l4_upernet.py @@ -0,0 +1,85 @@ +_base_ = [ + '../_base_/segformer/ade20k_repeat.py', + '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] + +backbone_channels = 624 +checkpoint_url = '' + +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='GPViTSeg', + arch='L4', + out_indices=(2, 5, 8, 11), + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_url, prefix='backbone.'), + drop_path_rate=0.2, + convert_syncbn=True, + att_with_cp=False, + group_with_cp=False), + neck=None, + decode_head=dict( + type='UPerHead', + in_channels=[backbone_channels, backbone_channels, backbone_channels, backbone_channels], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=backbone_channels, + in_index=3, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) # yapf: disable +# ---- +# AdamW optimizer, no weight decay for position embedding & layer norm in backbone +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006 * 2, + betas=(0.9, 0.999), + weight_decay=0.01, + paramwise_cfg=dict( + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0), + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + '.pos_embed': dict(decay_mult=0.0), + '.group_token': dict(decay_mult=0.0), + '.dw_norm': dict(decay_mult=0.0) + })) + + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +evaluation = dict(interval=160000, metric='mIoU', pre_eval=True) + +work_dir = 'work_dirs/gpvit_l4_upernet' \ No newline at end of file diff --git a/downstream/mmsegmentation/configs/hrnet/README.md b/downstream/mmsegmentation/configs/hrnet/README.md new file mode 100644 index 0000000..225a06f --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/README.md @@ -0,0 +1,121 @@ +# HRNet + +[Deep High-Resolution Representation Learning for Human Pose Estimation](https://arxiv.org/abs/1908.07919) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +High-resolution representations are essential for position-sensitive vision problems, such as human pose estimation, semantic segmentation, and object detection. Existing state-of-the-art frameworks first encode the input image as a low-resolution representation through a subnetwork that is formed by connecting high-to-low resolution convolutions \emph{in series} (e.g., ResNet, VGGNet), and then recover the high-resolution representation from the encoded low-resolution representation. Instead, our proposed network, named as High-Resolution Network (HRNet), maintains high-resolution representations through the whole process. There are two key characteristics: (i) Connect the high-to-low resolution convolution streams \emph{in parallel}; (ii) Repeatedly exchange the information across resolutions. The benefit is that the resulting representation is semantically richer and spatially more precise. We show the superiority of the proposed HRNet in a wide range of applications, including human pose estimation, semantic segmentation, and object detection, suggesting that the HRNet is a stronger backbone for computer vision problems. All the codes are available at [this https URL](https://github.com/HRNet). + + +
    + +
    + +## Citation + +```bibtext +@inproceedings{SunXLW19, + title={Deep High-Resolution Representation Learning for Human Pose Estimation}, + author={Ke Sun and Bin Xiao and Dong Liu and Jingdong Wang}, + booktitle={CVPR}, + year={2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | HRNetV2p-W18-Small | 512x1024 | 40000 | 1.7 | 23.74 | 73.86 | 75.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216-93db27d0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216.log.json) | +| FCN | HRNetV2p-W18 | 512x1024 | 40000 | 2.9 | 12.97 | 77.19 | 78.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216-f196fb4e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216.log.json) | +| FCN | HRNetV2p-W48 | 512x1024 | 40000 | 6.2 | 6.42 | 78.48 | 79.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_40k_cityscapes/fcn_hr48_512x1024_40k_cityscapes_20200601_014240-a989b146.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_40k_cityscapes/fcn_hr48_512x1024_40k_cityscapes_20200601_014240.log.json) | +| FCN | HRNetV2p-W18-Small | 512x1024 | 80000 | - | - | 75.31 | 77.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700-1462b75d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700.log.json) | +| FCN | HRNetV2p-W18 | 512x1024 | 80000 | - | - | 78.65 | 80.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255-4e7b345e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255.log.json) | +| FCN | HRNetV2p-W48 | 512x1024 | 80000 | - | - | 79.93 | 80.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_80k_cityscapes/fcn_hr48_512x1024_80k_cityscapes_20200601_202606-58ea95d6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_80k_cityscapes/fcn_hr48_512x1024_80k_cityscapes_20200601_202606.log.json) | +| FCN | HRNetV2p-W18-Small | 512x1024 | 160000 | - | - | 76.31 | 78.31 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901.log.json) | +| FCN | HRNetV2p-W18 | 512x1024 | 160000 | - | - | 78.80 | 80.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822-221e4a4f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822.log.json) | +| FCN | HRNetV2p-W48 | 512x1024 | 160000 | - | - | 80.65 | 81.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_160k_cityscapes/fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_160k_cityscapes/fcn_hr48_512x1024_160k_cityscapes_20200602_190946.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | HRNetV2p-W18-Small | 512x512 | 80000 | 3.8 | 38.66 | 31.38 | 32.45 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_ade20k/fcn_hr18s_512x512_80k_ade20k_20200614_144345-77fc814a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_ade20k/fcn_hr18s_512x512_80k_ade20k_20200614_144345.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 80000 | 4.9 | 22.57 | 36.27 | 37.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_ade20k/fcn_hr18_512x512_80k_ade20k_20210827_114910-6c9382c0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_ade20k/fcn_hr18_512x512_80k_ade20k_20210827_114910.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 80000 | 8.2 | 21.23 | 41.90 | 43.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_ade20k/fcn_hr48_512x512_80k_ade20k_20200614_193946-7ba5258d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_ade20k/fcn_hr48_512x512_80k_ade20k_20200614_193946.log.json) | +| FCN | HRNetV2p-W18-Small | 512x512 | 160000 | - | - | 33.07 | 34.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_160k_ade20k/fcn_hr18s_512x512_160k_ade20k_20210829_174739-f1e7c2e7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_160k_ade20k/fcn_hr18s_512x512_160k_ade20k_20210829_174739.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 160000 | - | - | 36.79 | 38.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_160k_ade20k/fcn_hr18_512x512_160k_ade20k_20200614_214426-ca961836.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_160k_ade20k/fcn_hr18_512x512_160k_ade20k_20200614_214426.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 160000 | - | - | 42.02 | 43.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_160k_ade20k/fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_160k_ade20k/fcn_hr48_512x512_160k_ade20k_20200614_214407.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | HRNetV2p-W18-Small | 512x512 | 20000 | 1.8 | 43.36 | 65.5 | 68.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_20k_voc12aug/fcn_hr18s_512x512_20k_voc12aug_20210829_174910-0aceadb4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_20k_voc12aug/fcn_hr18s_512x512_20k_voc12aug_20210829_174910.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 20000 | 2.9 | 23.48 | 72.30 | 74.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_20k_voc12aug/fcn_hr18_512x512_20k_voc12aug_20200617_224503-488d45f7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_20k_voc12aug/fcn_hr18_512x512_20k_voc12aug_20200617_224503.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 20000 | 6.2 | 22.05 | 75.87 | 78.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_20k_voc12aug/fcn_hr48_512x512_20k_voc12aug_20200617_224419-89de05cd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_20k_voc12aug/fcn_hr48_512x512_20k_voc12aug_20200617_224419.log.json) | +| FCN | HRNetV2p-W18-Small | 512x512 | 40000 | - | - | 66.61 | 70.00 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_40k_voc12aug/fcn_hr18s_512x512_40k_voc12aug_20200614_000648-4f8d6e7f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_40k_voc12aug/fcn_hr18s_512x512_40k_voc12aug_20200614_000648.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 40000 | - | - | 72.90 | 75.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_40k_voc12aug/fcn_hr18_512x512_40k_voc12aug_20200613_224401-1b4b76cd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_40k_voc12aug/fcn_hr18_512x512_40k_voc12aug_20200613_224401.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 40000 | - | - | 76.24 | 78.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_40k_voc12aug/fcn_hr48_512x512_40k_voc12aug_20200613_222111-1b0f18bc.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_40k_voc12aug/fcn_hr48_512x512_40k_voc12aug_20200613_222111.log.json) | + +### Pascal Context + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | HRNetV2p-W48 | 480x480 | 40000 | 6.1 | 8.86 | 45.14 | 47.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_480x480_40k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context/fcn_hr48_480x480_40k_pascal_context_20200911_164852-667d00b0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context/fcn_hr48_480x480_40k_pascal_context-20200911_164852.log.json) | +| FCN | HRNetV2p-W48 | 480x480 | 80000 | - | - | 45.84 | 47.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_480x480_80k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context/fcn_hr48_480x480_80k_pascal_context_20200911_155322-847a6711.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context/fcn_hr48_480x480_80k_pascal_context-20200911_155322.log.json) | + +### Pascal Context 59 + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | HRNetV2p-W48 | 480x480 | 40000 | - | - | 50.33 | 52.83 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_480x480_40k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context_59/fcn_hr48_480x480_40k_pascal_context_59_20210410_122738-b808b8b2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context_59/fcn_hr48_480x480_40k_pascal_context_59-20210410_122738.log.json) | +| FCN | HRNetV2p-W48 | 480x480 | 80000 | - | - | 51.12 | 53.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_480x480_80k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context_59/fcn_hr48_480x480_80k_pascal_context_59_20210411_003240-3ae7081e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context_59/fcn_hr48_480x480_80k_pascal_context_59-20210411_003240.log.json) | + +### LoveDA + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | HRNetV2p-W18-Small | 512x512 | 80000 | 1.59 | 24.87 | 49.28 | 49.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x512_80k_loveda.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_loveda/fcn_hr18s_512x512_80k_loveda_20211210_203228-60a86a7a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_loveda/fcn_hr18s_512x512_80k_loveda_20211210_203228.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 80000 | 2.76 | 12.92 | 50.81 | 50.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x512_80k_loveda.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_loveda/fcn_hr18_512x512_80k_loveda_20211210_203952-93d9c3b3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_loveda/fcn_hr18_512x512_80k_loveda_20211210_203952.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 80000 | 6.20 | 9.61 | 51.42 | 51.64 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x512_80k_loveda.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_loveda/fcn_hr48_512x512_80k_loveda_20211211_044756-67072f55.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_loveda/fcn_hr48_512x512_80k_loveda_20211211_044756.log.json) | + +### Potsdam + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | HRNetV2p-W18-Small | 512x512 | 80000 | 1.58 | 36.00 | 77.64 | 78.8 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x512_80k_potsdam.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_potsdam/fcn_hr18s_512x512_80k_potsdam_20211218_205517-ba32af63.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_potsdam/fcn_hr18s_512x512_80k_potsdam_20211218_205517.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 80000 | 2.76 | 19.25 | 78.26 | 79.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x512_80k_potsdam.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_potsdam/fcn_hr18_512x512_80k_potsdam_20211218_205517-5d0387ad.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_potsdam/fcn_hr18_512x512_80k_potsdam_20211218_205517.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 80000 | 6.20 | 16.42 | 78.39 | 79.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x512_80k_potsdam.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_potsdam/fcn_hr48_512x512_80k_potsdam_20211219_020601-97434c78.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_potsdam/fcn_hr48_512x512_80k_potsdam_20211219_020601.log.json) | + +### Vaihingen + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | HRNetV2p-W18-Small | 512x512 | 80000 | 1.58 | 38.11 | 71.81 | 73.1 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_4x4_512x512_80k_vaihingen.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_512x512_80k_vaihingen/fcn_hr18s_4x4_512x512_80k_vaihingen_20211231_230909-b23aae02.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_512x512_80k_vaihingen/fcn_hr18s_4x4_512x512_80k_vaihingen_20211231_230909.log.json) | +| FCN | HRNetV2p-W18 | 512x512 | 80000 | 2.76 | 19.55 | 72.57 | 74.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen/fcn_hr18_4x4_512x512_80k_vaihingen_20211231_231216-2ec3ae8a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen/fcn_hr18_4x4_512x512_80k_vaihingen_20211231_231216.log.json) | +| FCN | HRNetV2p-W48 | 512x512 | 80000 | 6.20 | 17.25 | 72.50 | 73.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen/fcn_hr48_4x4_512x512_80k_vaihingen_20211231_231244-7133cb22.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen/fcn_hr48_4x4_512x512_80k_vaihingen_20211231_231244.log.json) | + +### iSAID + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | HRNetV2p-W18-Small | 896x896 | 80000 | 4.95 | 13.84 | 62.30 | 62.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_4x4_896x896_80k_isaid.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_896x896_80k_isaid/fcn_hr18s_4x4_896x896_80k_isaid_20220118_001603-3cc0769b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_896x896_80k_isaid/fcn_hr18s_4x4_896x896_80k_isaid_20220118_001603.log.json) | +| FCN | HRNetV2p-W18 | 896x896 | 80000 | 8.30 | 7.71 | 65.06 | 65.60 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_4x4_896x896_80k_isaid.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_896x896_80k_isaid/fcn_hr18_4x4_896x896_80k_isaid_20220110_182230-49bf752e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_896x896_80k_isaid/fcn_hr18_4x4_896x896_80k_isaid_20220110_182230.log.json) | +| FCN | HRNetV2p-W48 | 896x896 | 80000 | 16.89 | 7.34 | 67.80 | 68.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_4x4_896x896_80k_isaid.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_896x896_80k_isaid/fcn_hr48_4x4_896x896_80k_isaid_20220114_174643-547fc420.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_896x896_80k_isaid/fcn_hr48_4x4_896x896_80k_isaid_20220114_174643.log.json) | + +Note: + +- `896x896` is the Crop Size of iSAID dataset, which is followed by the implementation of [PointFlow: Flowing Semantics Through Points for Aerial Image Segmentation](https://arxiv.org/pdf/2103.06564.pdf) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_480x480_40k_pascal_context.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_480x480_40k_pascal_context.py new file mode 100644 index 0000000..5ff05aa --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_480x480_40k_pascal_context.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_context.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_480x480_40k_pascal_context_59.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_480x480_40k_pascal_context_59.py new file mode 100644 index 0000000..d2eecf0 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_480x480_40k_pascal_context_59.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_context_59.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_480x480_80k_pascal_context.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_480x480_80k_pascal_context.py new file mode 100644 index 0000000..cf315a4 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_480x480_80k_pascal_context.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_context.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_480x480_80k_pascal_context_59.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_480x480_80k_pascal_context_59.py new file mode 100644 index 0000000..9cbf410 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_480x480_80k_pascal_context_59.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_context_59.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen.py new file mode 100644 index 0000000..3585a7c --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/vaihingen.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict(decode_head=dict(num_classes=6)) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_4x4_896x896_80k_isaid.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_4x4_896x896_80k_isaid.py new file mode 100644 index 0000000..62e6d6b --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_4x4_896x896_80k_isaid.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/isaid.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict(decode_head=dict(num_classes=16)) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x1024_160k_cityscapes.py new file mode 100644 index 0000000..9f04e93 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x1024_160k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..99760c3 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..a653dda --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_160k_ade20k.py new file mode 100644 index 0000000..45ed99b --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_160k_ade20k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict(decode_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_20k_voc12aug.py new file mode 100644 index 0000000..f06448b --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_20k_voc12aug.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_voc12_aug.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' +] +model = dict(decode_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_40k_voc12aug.py new file mode 100644 index 0000000..d74e959 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_40k_voc12aug.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_voc12_aug.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict(decode_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_80k_ade20k.py new file mode 100644 index 0000000..52bc9f5 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_80k_ade20k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict(decode_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_80k_loveda.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_80k_loveda.py new file mode 100644 index 0000000..3bc4d0a --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_80k_loveda.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/loveda.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict(decode_head=dict(num_classes=7)) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_80k_potsdam.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_80k_potsdam.py new file mode 100644 index 0000000..043017f --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18_512x512_80k_potsdam.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/fcn_hr18.py', '../_base_/datasets/potsdam.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict(decode_head=dict(num_classes=6)) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_480x480_40k_pascal_context.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_480x480_40k_pascal_context.py new file mode 100644 index 0000000..d099310 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_480x480_40k_pascal_context.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_480x480_40k_pascal_context.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_480x480_40k_pascal_context_59.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_480x480_40k_pascal_context_59.py new file mode 100644 index 0000000..0412c64 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_480x480_40k_pascal_context_59.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_480x480_40k_pascal_context_59.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_480x480_80k_pascal_context.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_480x480_80k_pascal_context.py new file mode 100644 index 0000000..584b713 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_480x480_80k_pascal_context.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_480x480_80k_pascal_context.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_480x480_80k_pascal_context_59.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_480x480_80k_pascal_context_59.py new file mode 100644 index 0000000..babd88d --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_480x480_80k_pascal_context_59.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_480x480_80k_pascal_context_59.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_4x4_512x512_80k_vaihingen.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_4x4_512x512_80k_vaihingen.py new file mode 100644 index 0000000..5828fe1 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_4x4_512x512_80k_vaihingen.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4x4_512x512_80k_vaihingen.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_4x4_896x896_80k_isaid.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_4x4_896x896_80k_isaid.py new file mode 100644 index 0000000..d6f6c65 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_4x4_896x896_80k_isaid.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_4x4_896x896_80k_isaid.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py new file mode 100644 index 0000000..ddbe380 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_512x1024_160k_cityscapes.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..4e31d26 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_512x1024_40k_cityscapes.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..ee2831d --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_512x1024_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py new file mode 100644 index 0000000..22a3ce0 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_512x512_160k_ade20k.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_20k_voc12aug.py new file mode 100644 index 0000000..d0de5df --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_20k_voc12aug.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_512x512_20k_voc12aug.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_40k_voc12aug.py new file mode 100644 index 0000000..409db3c --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_40k_voc12aug.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_512x512_40k_voc12aug.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_80k_ade20k.py new file mode 100644 index 0000000..a840097 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_80k_ade20k.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_512x512_80k_ade20k.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_80k_loveda.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_80k_loveda.py new file mode 100644 index 0000000..b39769f --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_80k_loveda.py @@ -0,0 +1,11 @@ +_base_ = './fcn_hr18_512x512_80k_loveda.py' +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://msra/hrnetv2_w18_small'), + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_80k_potsdam.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_80k_potsdam.py new file mode 100644 index 0000000..0555127 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr18s_512x512_80k_potsdam.py @@ -0,0 +1,9 @@ +_base_ = './fcn_hr18_512x512_80k_potsdam.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_480x480_40k_pascal_context.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_480x480_40k_pascal_context.py new file mode 100644 index 0000000..0e2d96c --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_480x480_40k_pascal_context.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_480x480_40k_pascal_context.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_480x480_40k_pascal_context_59.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_480x480_40k_pascal_context_59.py new file mode 100644 index 0000000..655b460 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_480x480_40k_pascal_context_59.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_480x480_40k_pascal_context_59.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_480x480_80k_pascal_context.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_480x480_80k_pascal_context.py new file mode 100644 index 0000000..e28164e --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_480x480_80k_pascal_context.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_480x480_80k_pascal_context.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_480x480_80k_pascal_context_59.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_480x480_80k_pascal_context_59.py new file mode 100644 index 0000000..012ad0a --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_480x480_80k_pascal_context_59.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_480x480_80k_pascal_context_59.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen.py new file mode 100644 index 0000000..7cb22d8 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4x4_512x512_80k_vaihingen.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_4x4_896x896_80k_isaid.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_4x4_896x896_80k_isaid.py new file mode 100644 index 0000000..55cf1b5 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_4x4_896x896_80k_isaid.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_4x4_896x896_80k_isaid.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py new file mode 100644 index 0000000..394a61c --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_512x1024_160k_cityscapes.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..d37ab1d --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x1024_40k_cityscapes.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_512x1024_40k_cityscapes.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..a9bab32 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_512x1024_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_160k_ade20k.py new file mode 100644 index 0000000..dff4fea --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_160k_ade20k.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_512x512_160k_ade20k.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py new file mode 100644 index 0000000..a8d1deb --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_512x512_20k_voc12aug.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_40k_voc12aug.py new file mode 100644 index 0000000..1084a57 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_40k_voc12aug.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_512x512_40k_voc12aug.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_80k_ade20k.py new file mode 100644 index 0000000..7eca7fa --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_80k_ade20k.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_512x512_80k_ade20k.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_80k_loveda.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_80k_loveda.py new file mode 100644 index 0000000..269dbf6 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_80k_loveda.py @@ -0,0 +1,11 @@ +_base_ = './fcn_hr18_512x512_80k_loveda.py' +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w48'), + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_80k_potsdam.py b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_80k_potsdam.py new file mode 100644 index 0000000..608fee3 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/fcn_hr48_512x512_80k_potsdam.py @@ -0,0 +1,10 @@ +_base_ = './fcn_hr18_512x512_80k_potsdam.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=dict( + in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]))) diff --git a/downstream/mmsegmentation/configs/hrnet/hrnet.yml b/downstream/mmsegmentation/configs/hrnet/hrnet.yml new file mode 100644 index 0000000..960a937 --- /dev/null +++ b/downstream/mmsegmentation/configs/hrnet/hrnet.yml @@ -0,0 +1,695 @@ +Models: +- Name: fcn_hr18s_512x1024_40k_cityscapes + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 42.12 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.86 + mIoU(ms+flip): 75.91 + Config: configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216-93db27d0.pth +- Name: fcn_hr18_512x1024_40k_cityscapes + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 77.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 2.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.19 + mIoU(ms+flip): 78.92 + Config: configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216-f196fb4e.pth +- Name: fcn_hr48_512x1024_40k_cityscapes + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 155.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.48 + mIoU(ms+flip): 79.69 + Config: configs/hrnet/fcn_hr48_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_40k_cityscapes/fcn_hr48_512x1024_40k_cityscapes_20200601_014240-a989b146.pth +- Name: fcn_hr18s_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.31 + mIoU(ms+flip): 77.48 + Config: configs/hrnet/fcn_hr18s_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700-1462b75d.pth +- Name: fcn_hr18_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.65 + mIoU(ms+flip): 80.35 + Config: configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255-4e7b345e.pth +- Name: fcn_hr48_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.93 + mIoU(ms+flip): 80.72 + Config: configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_80k_cityscapes/fcn_hr48_512x1024_80k_cityscapes_20200601_202606-58ea95d6.pth +- Name: fcn_hr18s_512x1024_160k_cityscapes + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.31 + mIoU(ms+flip): 78.31 + Config: configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth +- Name: fcn_hr18_512x1024_160k_cityscapes + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.8 + mIoU(ms+flip): 80.74 + Config: configs/hrnet/fcn_hr18_512x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822-221e4a4f.pth +- Name: fcn_hr48_512x1024_160k_cityscapes + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.65 + mIoU(ms+flip): 81.92 + Config: configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_160k_cityscapes/fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth +- Name: fcn_hr18s_512x512_80k_ade20k + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 25.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 3.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 31.38 + mIoU(ms+flip): 32.45 + Config: configs/hrnet/fcn_hr18s_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_ade20k/fcn_hr18s_512x512_80k_ade20k_20200614_144345-77fc814a.pth +- Name: fcn_hr18_512x512_80k_ade20k + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 44.31 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.9 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 36.27 + mIoU(ms+flip): 37.28 + Config: configs/hrnet/fcn_hr18_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_ade20k/fcn_hr18_512x512_80k_ade20k_20210827_114910-6c9382c0.pth +- Name: fcn_hr48_512x512_80k_ade20k + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.9 + mIoU(ms+flip): 43.27 + Config: configs/hrnet/fcn_hr48_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_ade20k/fcn_hr48_512x512_80k_ade20k_20200614_193946-7ba5258d.pth +- Name: fcn_hr18s_512x512_160k_ade20k + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 33.07 + mIoU(ms+flip): 34.56 + Config: configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_160k_ade20k/fcn_hr18s_512x512_160k_ade20k_20210829_174739-f1e7c2e7.pth +- Name: fcn_hr18_512x512_160k_ade20k + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 36.79 + mIoU(ms+flip): 38.58 + Config: configs/hrnet/fcn_hr18_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_160k_ade20k/fcn_hr18_512x512_160k_ade20k_20200614_214426-ca961836.pth +- Name: fcn_hr48_512x512_160k_ade20k + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.02 + mIoU(ms+flip): 43.86 + Config: configs/hrnet/fcn_hr48_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_160k_ade20k/fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth +- Name: fcn_hr18s_512x512_20k_voc12aug + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 23.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.8 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 65.5 + mIoU(ms+flip): 68.89 + Config: configs/hrnet/fcn_hr18s_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_20k_voc12aug/fcn_hr18s_512x512_20k_voc12aug_20210829_174910-0aceadb4.pth +- Name: fcn_hr18_512x512_20k_voc12aug + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 42.59 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.9 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 72.3 + mIoU(ms+flip): 74.71 + Config: configs/hrnet/fcn_hr18_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_20k_voc12aug/fcn_hr18_512x512_20k_voc12aug_20200617_224503-488d45f7.pth +- Name: fcn_hr48_512x512_20k_voc12aug + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 45.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 75.87 + mIoU(ms+flip): 78.58 + Config: configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_20k_voc12aug/fcn_hr48_512x512_20k_voc12aug_20200617_224419-89de05cd.pth +- Name: fcn_hr18s_512x512_40k_voc12aug + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 66.61 + mIoU(ms+flip): 70.0 + Config: configs/hrnet/fcn_hr18s_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_40k_voc12aug/fcn_hr18s_512x512_40k_voc12aug_20200614_000648-4f8d6e7f.pth +- Name: fcn_hr18_512x512_40k_voc12aug + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 72.9 + mIoU(ms+flip): 75.59 + Config: configs/hrnet/fcn_hr18_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_40k_voc12aug/fcn_hr18_512x512_40k_voc12aug_20200613_224401-1b4b76cd.pth +- Name: fcn_hr48_512x512_40k_voc12aug + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.24 + mIoU(ms+flip): 78.49 + Config: configs/hrnet/fcn_hr48_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_40k_voc12aug/fcn_hr48_512x512_40k_voc12aug_20200613_222111-1b0f18bc.pth +- Name: fcn_hr48_480x480_40k_pascal_context + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (480,480) + lr schd: 40000 + inference time (ms/im): + - value: 112.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (480,480) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 45.14 + mIoU(ms+flip): 47.42 + Config: configs/hrnet/fcn_hr48_480x480_40k_pascal_context.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context/fcn_hr48_480x480_40k_pascal_context_20200911_164852-667d00b0.pth +- Name: fcn_hr48_480x480_80k_pascal_context + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 45.84 + mIoU(ms+flip): 47.84 + Config: configs/hrnet/fcn_hr48_480x480_80k_pascal_context.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context/fcn_hr48_480x480_80k_pascal_context_20200911_155322-847a6711.pth +- Name: fcn_hr48_480x480_40k_pascal_context_59 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (480,480) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 50.33 + mIoU(ms+flip): 52.83 + Config: configs/hrnet/fcn_hr48_480x480_40k_pascal_context_59.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context_59/fcn_hr48_480x480_40k_pascal_context_59_20210410_122738-b808b8b2.pth +- Name: fcn_hr48_480x480_80k_pascal_context_59 + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 51.12 + mIoU(ms+flip): 53.56 + Config: configs/hrnet/fcn_hr48_480x480_80k_pascal_context_59.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context_59/fcn_hr48_480x480_80k_pascal_context_59_20210411_003240-3ae7081e.pth +- Name: fcn_hr18s_512x512_80k_loveda + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 40.21 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.59 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 49.28 + mIoU(ms+flip): 49.42 + Config: configs/hrnet/fcn_hr18s_512x512_80k_loveda.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_loveda/fcn_hr18s_512x512_80k_loveda_20211210_203228-60a86a7a.pth +- Name: fcn_hr18_512x512_80k_loveda + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 77.4 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.76 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 50.81 + mIoU(ms+flip): 50.95 + Config: configs/hrnet/fcn_hr18_512x512_80k_loveda.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_loveda/fcn_hr18_512x512_80k_loveda_20211210_203952-93d9c3b3.pth +- Name: fcn_hr48_512x512_80k_loveda + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 104.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 51.42 + mIoU(ms+flip): 51.64 + Config: configs/hrnet/fcn_hr48_512x512_80k_loveda.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_loveda/fcn_hr48_512x512_80k_loveda_20211211_044756-67072f55.pth +- Name: fcn_hr18s_512x512_80k_potsdam + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 27.78 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.58 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 77.64 + mIoU(ms+flip): 78.8 + Config: configs/hrnet/fcn_hr18s_512x512_80k_potsdam.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_potsdam/fcn_hr18s_512x512_80k_potsdam_20211218_205517-ba32af63.pth +- Name: fcn_hr18_512x512_80k_potsdam + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 51.95 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.76 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 78.26 + mIoU(ms+flip): 79.24 + Config: configs/hrnet/fcn_hr18_512x512_80k_potsdam.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_potsdam/fcn_hr18_512x512_80k_potsdam_20211218_205517-5d0387ad.pth +- Name: fcn_hr48_512x512_80k_potsdam + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 60.9 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 78.39 + mIoU(ms+flip): 79.34 + Config: configs/hrnet/fcn_hr48_512x512_80k_potsdam.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_potsdam/fcn_hr48_512x512_80k_potsdam_20211219_020601-97434c78.pth +- Name: fcn_hr18s_4x4_512x512_80k_vaihingen + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 26.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.58 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 71.81 + mIoU(ms+flip): 73.1 + Config: configs/hrnet/fcn_hr18s_4x4_512x512_80k_vaihingen.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_512x512_80k_vaihingen/fcn_hr18s_4x4_512x512_80k_vaihingen_20211231_230909-b23aae02.pth +- Name: fcn_hr18_4x4_512x512_80k_vaihingen + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 51.15 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.76 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 72.57 + mIoU(ms+flip): 74.09 + Config: configs/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen/fcn_hr18_4x4_512x512_80k_vaihingen_20211231_231216-2ec3ae8a.pth +- Name: fcn_hr48_4x4_512x512_80k_vaihingen + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 57.97 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 72.5 + mIoU(ms+flip): 73.52 + Config: configs/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen/fcn_hr48_4x4_512x512_80k_vaihingen_20211231_231244-7133cb22.pth +- Name: fcn_hr18s_4x4_896x896_80k_isaid + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 72.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 4.95 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 62.3 + mIoU(ms+flip): 62.97 + Config: configs/hrnet/fcn_hr18s_4x4_896x896_80k_isaid.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_896x896_80k_isaid/fcn_hr18s_4x4_896x896_80k_isaid_20220118_001603-3cc0769b.pth +- Name: fcn_hr18_4x4_896x896_80k_isaid + In Collection: FCN + Metadata: + backbone: HRNetV2p-W18 + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 129.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 8.3 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 65.06 + mIoU(ms+flip): 65.6 + Config: configs/hrnet/fcn_hr18_4x4_896x896_80k_isaid.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_896x896_80k_isaid/fcn_hr18_4x4_896x896_80k_isaid_20220110_182230-49bf752e.pth +- Name: fcn_hr48_4x4_896x896_80k_isaid + In Collection: FCN + Metadata: + backbone: HRNetV2p-W48 + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 136.24 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 16.89 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 67.8 + mIoU(ms+flip): 68.53 + Config: configs/hrnet/fcn_hr48_4x4_896x896_80k_isaid.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_896x896_80k_isaid/fcn_hr48_4x4_896x896_80k_isaid_20220114_174643-547fc420.pth diff --git a/downstream/mmsegmentation/configs/icnet/README.md b/downstream/mmsegmentation/configs/icnet/README.md new file mode 100644 index 0000000..48e8b46 --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/README.md @@ -0,0 +1,55 @@ +# ICNet + +[ICNet for Real-time Semantic Segmentation on High-resolution Images](https://arxiv.org/abs/1704.08545) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +We focus on the challenging task of real-time semantic segmentation in this paper. It finds many practical applications and yet is with fundamental difficulty of reducing a large portion of computation for pixel-wise label inference. We propose an image cascade network (ICNet) that incorporates multi-resolution branches under proper label guidance to address this challenge. We provide in-depth analysis of our framework and introduce the cascade feature fusion unit to quickly achieve high-quality segmentation. Our system yields real-time inference on a single GPU card with decent quality results evaluated on challenging datasets like Cityscapes, CamVid and COCO-Stuff. + + +
    + +
    + +## Citation + +```bibtext +@inproceedings{zhao2018icnet, + title={Icnet for real-time semantic segmentation on high-resolution images}, + author={Zhao, Hengshuang and Qi, Xiaojuan and Shen, Xiaoyong and Shi, Jianping and Jia, Jiaya}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={405--420}, + year={2018} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ---------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ICNet | R-18-D8 | 832x832 | 80000 | 1.70 | 27.12 | 68.14 | 70.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r18-d8_832x832_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_80k_cityscapes/icnet_r18-d8_832x832_80k_cityscapes_20210925_225521-2e36638d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_80k_cityscapes/icnet_r18-d8_832x832_80k_cityscapes_20210925_225521.log.json) | +| ICNet | R-18-D8 | 832x832 | 160000 | - | - | 71.64 | 74.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r18-d8_832x832_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_160k_cityscapes/icnet_r18-d8_832x832_160k_cityscapes_20210925_230153-2c6eb6e0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_160k_cityscapes/icnet_r18-d8_832x832_160k_cityscapes_20210925_230153.log.json) | +| ICNet (in1k-pre) | R-18-D8 | 832x832 | 80000 | - | - | 72.51 | 74.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes_20210925_230354-1cbe3022.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes_20210925_230354.log.json) | +| ICNet (in1k-pre) | R-18-D8 | 832x832 | 160000 | - | - | 74.43 | 76.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes_20210926_052702-619c8ae1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes_20210926_052702.log.json) | +| ICNet | R-50-D8 | 832x832 | 80000 | 2.53 | 20.08 | 68.91 | 69.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r50-d8_832x832_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_80k_cityscapes/icnet_r50-d8_832x832_80k_cityscapes_20210926_044625-c6407341.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_80k_cityscapes/icnet_r50-d8_832x832_80k_cityscapes_20210926_044625.log.json) | +| ICNet | R-50-D8 | 832x832 | 160000 | - | - | 73.82 | 75.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r50-d8_832x832_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_160k_cityscapes/icnet_r50-d8_832x832_160k_cityscapes_20210925_232612-a95f0d4e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_160k_cityscapes/icnet_r50-d8_832x832_160k_cityscapes_20210925_232612.log.json) | +| ICNet (in1k-pre) | R-50-D8 | 832x832 | 80000 | - | - | 74.58 | 76.41 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes_20210926_032943-1743dc7b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes_20210926_032943.log.json) | +| ICNet (in1k-pre) | R-50-D8 | 832x832 | 160000 | - | - | 76.29 | 78.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes_20210926_042715-ce310aea.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes_20210926_042715.log.json) | +| ICNet | R-101-D8 | 832x832 | 80000 | 3.08 | 16.95 | 70.28 | 71.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r101-d8_832x832_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_80k_cityscapes/icnet_r101-d8_832x832_80k_cityscapes_20210926_072447-b52f936e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_80k_cityscapes/icnet_r101-d8_832x832_80k_cityscapes_20210926_072447.log.json) | +| ICNet | R-101-D8 | 832x832 | 160000 | - | - | 73.80 | 76.10 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r101-d8_832x832_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_160k_cityscapes/icnet_r101-d8_832x832_160k_cityscapes_20210926_092350-3a1ebf1a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_160k_cityscapes/icnet_r101-d8_832x832_160k_cityscapes_20210926_092350.log.json) | +| ICNet (in1k-pre) | R-101-D8 | 832x832 | 80000 | - | - | 75.57 | 77.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes_20210926_020414-7ceb12c5.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes_20210926_020414.log.json) | +| ICNet (in1k-pre) | R-101-D8 | 832x832 | 160000 | - | - | 76.15 | 77.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes_20210925_232612-9484ae8a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes_20210925_232612.log.json) | + +Note: `in1k-pre` means pretrained model is used. diff --git a/downstream/mmsegmentation/configs/icnet/icnet.yml b/downstream/mmsegmentation/configs/icnet/icnet.yml new file mode 100644 index 0000000..ebaf934 --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/icnet.yml @@ -0,0 +1,207 @@ +Collections: +- Name: ICNet + Metadata: + Training Data: + - Cityscapes + Paper: + URL: https://arxiv.org/abs/1704.08545 + Title: ICNet for Real-time Semantic Segmentation on High-resolution Images + README: configs/icnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/necks/ic_neck.py#L77 + Version: v0.18.0 + Converted From: + Code: https://github.com/hszhao/ICNet +Models: +- Name: icnet_r18-d8_832x832_80k_cityscapes + In Collection: ICNet + Metadata: + backbone: R-18-D8 + crop size: (832,832) + lr schd: 80000 + inference time (ms/im): + - value: 36.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (832,832) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 68.14 + mIoU(ms+flip): 70.16 + Config: configs/icnet/icnet_r18-d8_832x832_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_80k_cityscapes/icnet_r18-d8_832x832_80k_cityscapes_20210925_225521-2e36638d.pth +- Name: icnet_r18-d8_832x832_160k_cityscapes + In Collection: ICNet + Metadata: + backbone: R-18-D8 + crop size: (832,832) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 71.64 + mIoU(ms+flip): 74.18 + Config: configs/icnet/icnet_r18-d8_832x832_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_160k_cityscapes/icnet_r18-d8_832x832_160k_cityscapes_20210925_230153-2c6eb6e0.pth +- Name: icnet_r18-d8_in1k-pre_832x832_80k_cityscapes + In Collection: ICNet + Metadata: + backbone: R-18-D8 + crop size: (832,832) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 72.51 + mIoU(ms+flip): 74.78 + Config: configs/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes_20210925_230354-1cbe3022.pth +- Name: icnet_r18-d8_in1k-pre_832x832_160k_cityscapes + In Collection: ICNet + Metadata: + backbone: R-18-D8 + crop size: (832,832) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.43 + mIoU(ms+flip): 76.72 + Config: configs/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes_20210926_052702-619c8ae1.pth +- Name: icnet_r50-d8_832x832_80k_cityscapes + In Collection: ICNet + Metadata: + backbone: R-50-D8 + crop size: (832,832) + lr schd: 80000 + inference time (ms/im): + - value: 49.8 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (832,832) + Training Memory (GB): 2.53 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 68.91 + mIoU(ms+flip): 69.72 + Config: configs/icnet/icnet_r50-d8_832x832_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_80k_cityscapes/icnet_r50-d8_832x832_80k_cityscapes_20210926_044625-c6407341.pth +- Name: icnet_r50-d8_832x832_160k_cityscapes + In Collection: ICNet + Metadata: + backbone: R-50-D8 + crop size: (832,832) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.82 + mIoU(ms+flip): 75.67 + Config: configs/icnet/icnet_r50-d8_832x832_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_160k_cityscapes/icnet_r50-d8_832x832_160k_cityscapes_20210925_232612-a95f0d4e.pth +- Name: icnet_r50-d8_in1k-pre_832x832_80k_cityscapes + In Collection: ICNet + Metadata: + backbone: R-50-D8 + crop size: (832,832) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.58 + mIoU(ms+flip): 76.41 + Config: configs/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes_20210926_032943-1743dc7b.pth +- Name: icnet_r50-d8_in1k-pre_832x832_160k_cityscapes + In Collection: ICNet + Metadata: + backbone: R-50-D8 + crop size: (832,832) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.29 + mIoU(ms+flip): 78.09 + Config: configs/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes_20210926_042715-ce310aea.pth +- Name: icnet_r101-d8_832x832_80k_cityscapes + In Collection: ICNet + Metadata: + backbone: R-101-D8 + crop size: (832,832) + lr schd: 80000 + inference time (ms/im): + - value: 59.0 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (832,832) + Training Memory (GB): 3.08 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 70.28 + mIoU(ms+flip): 71.95 + Config: configs/icnet/icnet_r101-d8_832x832_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_80k_cityscapes/icnet_r101-d8_832x832_80k_cityscapes_20210926_072447-b52f936e.pth +- Name: icnet_r101-d8_832x832_160k_cityscapes + In Collection: ICNet + Metadata: + backbone: R-101-D8 + crop size: (832,832) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.8 + mIoU(ms+flip): 76.1 + Config: configs/icnet/icnet_r101-d8_832x832_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_160k_cityscapes/icnet_r101-d8_832x832_160k_cityscapes_20210926_092350-3a1ebf1a.pth +- Name: icnet_r101-d8_in1k-pre_832x832_80k_cityscapes + In Collection: ICNet + Metadata: + backbone: R-101-D8 + crop size: (832,832) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.57 + mIoU(ms+flip): 77.86 + Config: configs/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes_20210926_020414-7ceb12c5.pth +- Name: icnet_r101-d8_in1k-pre_832x832_160k_cityscapes + In Collection: ICNet + Metadata: + backbone: R-101-D8 + crop size: (832,832) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.15 + mIoU(ms+flip): 77.98 + Config: configs/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes_20210925_232612-9484ae8a.pth diff --git a/downstream/mmsegmentation/configs/icnet/icnet_r101-d8_832x832_160k_cityscapes.py b/downstream/mmsegmentation/configs/icnet/icnet_r101-d8_832x832_160k_cityscapes.py new file mode 100644 index 0000000..24cbf53 --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/icnet_r101-d8_832x832_160k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './icnet_r50-d8_832x832_160k_cityscapes.py' +model = dict(backbone=dict(backbone_cfg=dict(depth=101))) diff --git a/downstream/mmsegmentation/configs/icnet/icnet_r101-d8_832x832_80k_cityscapes.py b/downstream/mmsegmentation/configs/icnet/icnet_r101-d8_832x832_80k_cityscapes.py new file mode 100644 index 0000000..f3338b5 --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/icnet_r101-d8_832x832_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './icnet_r50-d8_832x832_80k_cityscapes.py' +model = dict(backbone=dict(backbone_cfg=dict(depth=101))) diff --git a/downstream/mmsegmentation/configs/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes.py b/downstream/mmsegmentation/configs/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes.py new file mode 100644 index 0000000..74ac355 --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes.py @@ -0,0 +1,7 @@ +_base_ = './icnet_r50-d8_832x832_160k_cityscapes.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet101_v1c')))) diff --git a/downstream/mmsegmentation/configs/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes.py b/downstream/mmsegmentation/configs/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes.py new file mode 100644 index 0000000..b4ba6d6 --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes.py @@ -0,0 +1,7 @@ +_base_ = './icnet_r50-d8_832x832_80k_cityscapes.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet101_v1c')))) diff --git a/downstream/mmsegmentation/configs/icnet/icnet_r18-d8_832x832_160k_cityscapes.py b/downstream/mmsegmentation/configs/icnet/icnet_r18-d8_832x832_160k_cityscapes.py new file mode 100644 index 0000000..877b775 --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/icnet_r18-d8_832x832_160k_cityscapes.py @@ -0,0 +1,3 @@ +_base_ = './icnet_r50-d8_832x832_160k_cityscapes.py' +model = dict( + backbone=dict(layer_channels=(128, 512), backbone_cfg=dict(depth=18))) diff --git a/downstream/mmsegmentation/configs/icnet/icnet_r18-d8_832x832_80k_cityscapes.py b/downstream/mmsegmentation/configs/icnet/icnet_r18-d8_832x832_80k_cityscapes.py new file mode 100644 index 0000000..786c7cc --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/icnet_r18-d8_832x832_80k_cityscapes.py @@ -0,0 +1,3 @@ +_base_ = './icnet_r50-d8_832x832_80k_cityscapes.py' +model = dict( + backbone=dict(layer_channels=(128, 512), backbone_cfg=dict(depth=18))) diff --git a/downstream/mmsegmentation/configs/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes.py b/downstream/mmsegmentation/configs/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes.py new file mode 100644 index 0000000..cc47951 --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes.py @@ -0,0 +1,8 @@ +_base_ = './icnet_r50-d8_832x832_160k_cityscapes.py' +model = dict( + backbone=dict( + layer_channels=(128, 512), + backbone_cfg=dict( + depth=18, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')))) diff --git a/downstream/mmsegmentation/configs/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes.py b/downstream/mmsegmentation/configs/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes.py new file mode 100644 index 0000000..00b0fe0 --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes.py @@ -0,0 +1,8 @@ +_base_ = './icnet_r50-d8_832x832_80k_cityscapes.py' +model = dict( + backbone=dict( + layer_channels=(128, 512), + backbone_cfg=dict( + depth=18, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')))) diff --git a/downstream/mmsegmentation/configs/icnet/icnet_r50-d8_832x832_160k_cityscapes.py b/downstream/mmsegmentation/configs/icnet/icnet_r50-d8_832x832_160k_cityscapes.py new file mode 100644 index 0000000..5b9fd9b --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/icnet_r50-d8_832x832_160k_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/icnet_r50-d8.py', + '../_base_/datasets/cityscapes_832x832.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] diff --git a/downstream/mmsegmentation/configs/icnet/icnet_r50-d8_832x832_80k_cityscapes.py b/downstream/mmsegmentation/configs/icnet/icnet_r50-d8_832x832_80k_cityscapes.py new file mode 100644 index 0000000..e0336c9 --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/icnet_r50-d8_832x832_80k_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/icnet_r50-d8.py', + '../_base_/datasets/cityscapes_832x832.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes.py b/downstream/mmsegmentation/configs/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes.py new file mode 100644 index 0000000..6f7a0a1 --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes.py @@ -0,0 +1,6 @@ +_base_ = './icnet_r50-d8_832x832_160k_cityscapes.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet50_v1c')))) diff --git a/downstream/mmsegmentation/configs/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes.py b/downstream/mmsegmentation/configs/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes.py new file mode 100644 index 0000000..57546cd --- /dev/null +++ b/downstream/mmsegmentation/configs/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes.py @@ -0,0 +1,6 @@ +_base_ = './icnet_r50-d8_832x832_80k_cityscapes.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet50_v1c')))) diff --git a/downstream/mmsegmentation/configs/isanet/README.md b/downstream/mmsegmentation/configs/isanet/README.md new file mode 100644 index 0000000..ef91226 --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/README.md @@ -0,0 +1,79 @@ +# ISANet + +[Interlaced Sparse Self-Attention for Semantic Segmentation](https://arxiv.org/abs/1907.12273) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +In this paper, we present a so-called interlaced sparse self-attention approach to improve the efficiency of the \emph{self-attention} mechanism for semantic segmentation. The main idea is that we factorize the dense affinity matrix as the product of two sparse affinity matrices. There are two successive attention modules each estimating a sparse affinity matrix. The first attention module is used to estimate the affinities within a subset of positions that have long spatial interval distances and the second attention module is used to estimate the affinities within a subset of positions that have short spatial interval distances. These two attention modules are designed so that each position is able to receive the information from all the other positions. In contrast to the original self-attention module, our approach decreases the computation and memory complexity substantially especially when processing high-resolution feature maps. We empirically verify the effectiveness of our approach on six challenging semantic segmentation benchmarks. + + +
    + +
    + +## Citation + +```bibetex +@article{huang2019isa, + title={Interlaced Sparse Self-Attention for Semantic Segmentation}, + author={Huang, Lang and Yuan, Yuhui and Guo, Jianyuan and Zhang, Chao and Chen, Xilin and Wang, Jingdong}, + journal={arXiv preprint arXiv:1907.12273}, + year={2019} +} +``` + +The technical report above is also presented at: + +```bibetex +@article{yuan2021ocnet, + title={OCNet: Object Context for Semantic Segmentation}, + author={Yuan, Yuhui and Huang, Lang and Guo, Jianyuan and Zhang, Chao and Chen, Xilin and Wang, Jingdong}, + journal={International Journal of Computer Vision}, + pages={1--24}, + year={2021}, + publisher={Springer} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config |download | +| --------|----------|-----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ISANet | R-50-D8 | 512x1024 | 40000 | 5.869 | 2.91 | 78.49 | 79.44 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_512x1024_40k_cityscapes.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_40k_cityscapes/isanet_r50-d8_512x1024_40k_cityscapes_20210901_054739-981bd763.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_40k_cityscapes/isanet_r50-d8_512x1024_40k_cityscapes_20210901_054739.log.json) | +| ISANet | R-50-D8 | 512x1024 | 80000 | 5.869 | 2.91 | 78.68 | 80.25 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_512x1024_80k_cityscapes.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_80k_cityscapes/isanet_r50-d8_512x1024_80k_cityscapes_20210901_074202-89384497.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_80k_cityscapes/isanet_r50-d8_512x1024_80k_cityscapes_20210901_074202.log.json) | +| ISANet | R-50-D8 | 769x769 | 40000 | 6.759 | 1.54 | 78.70 | 80.28 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_769x769_40k_cityscapes.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_40k_cityscapes/isanet_r50-d8_769x769_40k_cityscapes_20210903_050200-4ae7e65b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_40k_cityscapes/isanet_r50-d8_769x769_40k_cityscapes_20210903_050200.log.json) | +| ISANet | R-50-D8 | 769x769 | 80000 | 6.759 | 1.54 | 79.29 | 80.53 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_769x769_80k_cityscapes.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_80k_cityscapes/isanet_r50-d8_769x769_80k_cityscapes_20210903_101126-99b54519.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_80k_cityscapes/isanet_r50-d8_769x769_80k_cityscapes_20210903_101126.log.json) | +| ISANet | R-101-D8 | 512x1024 | 40000 | 9.425 | 2.35 | 79.58 | 81.05 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_512x1024_40k_cityscapes.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_40k_cityscapes/isanet_r101-d8_512x1024_40k_cityscapes_20210901_145553-293e6bd6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_40k_cityscapes/isanet_r101-d8_512x1024_40k_cityscapes_20210901_145553.log.json) | +| ISANet | R-101-D8 | 512x1024 | 80000 | 9.425 | 2.35 | 80.32 | 81.58 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_512x1024_80k_cityscapes.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_80k_cityscapes/isanet_r101-d8_512x1024_80k_cityscapes_20210901_145243-5b99c9b2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_80k_cityscapes/isanet_r101-d8_512x1024_80k_cityscapes_20210901_145243.log.json) | +| ISANet | R-101-D8 | 769x769 | 40000 | 10.815 | 0.92 | 79.68 | 80.95 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_769x769_40k_cityscapes.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_40k_cityscapes/isanet_r101-d8_769x769_40k_cityscapes_20210903_111320-509e7224.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_40k_cityscapes/isanet_r101-d8_769x769_40k_cityscapes_20210903_111320.log.json) | +| ISANet | R-101-D8 | 769x769 | 80000 | 10.815 | 0.92 | 80.61 | 81.59 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_769x769_80k_cityscapes.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_80k_cityscapes/isanet_r101-d8_769x769_80k_cityscapes_20210903_111319-24f71dfa.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_80k_cityscapes/isanet_r101-d8_769x769_80k_cityscapes_20210903_111319.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config |download | +| --------|----------|-----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ISANet | R-50-D8 | 512x512 | 80000 | 9.0 | 22.55 | 41.12 | 42.35 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_512x512_80k_ade20k.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_80k_ade20k/isanet_r50-d8_512x512_80k_ade20k_20210903_124557-6ed83a0c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_80k_ade20k/isanet_r50-d8_512x512_80k_ade20k_20210903_124557.log.json)| +| ISANet | R-50-D8 | 512x512 | 160000 | 9.0 | 22.55 | 42.59 | 43.07 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_512x512_160k_ade20k.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_160k_ade20k/isanet_r50-d8_512x512_160k_ade20k_20210903_104850-f752d0a3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_160k_ade20k/isanet_r50-d8_512x512_160k_ade20k_20210903_104850.log.json)| +| ISANet | R-101-D8 | 512x512 | 80000 | 12.562 | 10.56 | 43.51 | 44.38 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_512x512_80k_ade20k.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_80k_ade20k/isanet_r101-d8_512x512_80k_ade20k_20210903_162056-68b235c2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_80k_ade20k/isanet_r101-d8_512x512_80k_ade20k_20210903_162056.log.json)| +| ISANet | R-101-D8 | 512x512 | 160000 | 12.562 | 10.56 | 43.80 | 45.4 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_512x512_160k_ade20k.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_160k_ade20k/isanet_r101-d8_512x512_160k_ade20k_20210903_211431-a7879dcd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_160k_ade20k/isanet_r101-d8_512x512_160k_ade20k_20210903_211431.log.json)| + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config |download | +| --------|----------|-----------|-----------|--------:|----------|----------------|------:|--------------:|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ISANet | R-50-D8 | 512x512 | 20000 | 5.9 | 23.08 | 76.78 | 77.79 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_512x512_20k_voc12aug.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_20k_voc12aug/isanet_r50-d8_512x512_20k_voc12aug_20210901_164838-79d59b80.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_20k_voc12aug/isanet_r50-d8_512x512_20k_voc12aug_20210901_164838.log.json)| +| ISANet | R-50-D8 | 512x512 | 40000 | 5.9 | 23.08 | 76.20 | 77.22 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_512x512_40k_voc12aug.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_40k_voc12aug/isanet_r50-d8_512x512_40k_voc12aug_20210901_151349-7d08a54e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_40k_voc12aug/isanet_r50-d8_512x512_40k_voc12aug_20210901_151349.log.json)| +| ISANet | R-101-D8 | 512x512 | 20000 | 9.465 | 7.42 | 78.46 | 79.16 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_512x512_20k_voc12aug.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_20k_voc12aug/isanet_r101-d8_512x512_20k_voc12aug_20210901_115805-3ccbf355.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_20k_voc12aug/isanet_r101-d8_512x512_20k_voc12aug_20210901_115805.log.json)| +| ISANet | R-101-D8 | 512x512 | 40000 | 9.465 | 7.42 | 78.12 | 79.04 |[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_512x512_40k_voc12aug.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_40k_voc12aug/isanet_r101-d8_512x512_40k_voc12aug_20210901_145814-bc71233b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_40k_voc12aug/isanet_r101-d8_512x512_40k_voc12aug_20210901_145814.log.json)| diff --git a/downstream/mmsegmentation/configs/isanet/isanet.yml b/downstream/mmsegmentation/configs/isanet/isanet.yml new file mode 100644 index 0000000..8c65bcf --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet.yml @@ -0,0 +1,369 @@ +Collections: +- Name: ISANet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1907.12273 + Title: Interlaced Sparse Self-Attention for Semantic Segmentation + README: configs/isanet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/decode_heads/isa_head.py#L58 + Version: v0.18.0 + Converted From: + Code: https://github.com/openseg-group/openseg.pytorch +Models: +- Name: isanet_r50-d8_512x1024_40k_cityscapes + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 343.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.869 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.49 + mIoU(ms+flip): 79.44 + Config: configs/isanet/isanet_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_40k_cityscapes/isanet_r50-d8_512x1024_40k_cityscapes_20210901_054739-981bd763.pth +- Name: isanet_r50-d8_512x1024_80k_cityscapes + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 343.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.869 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.68 + mIoU(ms+flip): 80.25 + Config: configs/isanet/isanet_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_80k_cityscapes/isanet_r50-d8_512x1024_80k_cityscapes_20210901_074202-89384497.pth +- Name: isanet_r50-d8_769x769_40k_cityscapes + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 649.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.759 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.7 + mIoU(ms+flip): 80.28 + Config: configs/isanet/isanet_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_40k_cityscapes/isanet_r50-d8_769x769_40k_cityscapes_20210903_050200-4ae7e65b.pth +- Name: isanet_r50-d8_769x769_80k_cityscapes + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 649.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.759 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.29 + mIoU(ms+flip): 80.53 + Config: configs/isanet/isanet_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_80k_cityscapes/isanet_r50-d8_769x769_80k_cityscapes_20210903_101126-99b54519.pth +- Name: isanet_r101-d8_512x1024_40k_cityscapes + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 425.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.425 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.58 + mIoU(ms+flip): 81.05 + Config: configs/isanet/isanet_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_40k_cityscapes/isanet_r101-d8_512x1024_40k_cityscapes_20210901_145553-293e6bd6.pth +- Name: isanet_r101-d8_512x1024_80k_cityscapes + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 425.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.425 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.32 + mIoU(ms+flip): 81.58 + Config: configs/isanet/isanet_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_80k_cityscapes/isanet_r101-d8_512x1024_80k_cityscapes_20210901_145243-5b99c9b2.pth +- Name: isanet_r101-d8_769x769_40k_cityscapes + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 1086.96 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.815 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.68 + mIoU(ms+flip): 80.95 + Config: configs/isanet/isanet_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_40k_cityscapes/isanet_r101-d8_769x769_40k_cityscapes_20210903_111320-509e7224.pth +- Name: isanet_r101-d8_769x769_80k_cityscapes + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 1086.96 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.815 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.61 + mIoU(ms+flip): 81.59 + Config: configs/isanet/isanet_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_80k_cityscapes/isanet_r101-d8_769x769_80k_cityscapes_20210903_111319-24f71dfa.pth +- Name: isanet_r50-d8_512x512_80k_ade20k + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 44.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.12 + mIoU(ms+flip): 42.35 + Config: configs/isanet/isanet_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_80k_ade20k/isanet_r50-d8_512x512_80k_ade20k_20210903_124557-6ed83a0c.pth +- Name: isanet_r50-d8_512x512_160k_ade20k + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 44.35 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.59 + mIoU(ms+flip): 43.07 + Config: configs/isanet/isanet_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_160k_ade20k/isanet_r50-d8_512x512_160k_ade20k_20210903_104850-f752d0a3.pth +- Name: isanet_r101-d8_512x512_80k_ade20k + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 94.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.562 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.51 + mIoU(ms+flip): 44.38 + Config: configs/isanet/isanet_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_80k_ade20k/isanet_r101-d8_512x512_80k_ade20k_20210903_162056-68b235c2.pth +- Name: isanet_r101-d8_512x512_160k_ade20k + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 94.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.562 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.8 + mIoU(ms+flip): 45.4 + Config: configs/isanet/isanet_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_160k_ade20k/isanet_r101-d8_512x512_160k_ade20k_20210903_211431-a7879dcd.pth +- Name: isanet_r50-d8_512x512_20k_voc12aug + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 43.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.9 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.78 + mIoU(ms+flip): 77.79 + Config: configs/isanet/isanet_r50-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_20k_voc12aug/isanet_r50-d8_512x512_20k_voc12aug_20210901_164838-79d59b80.pth +- Name: isanet_r50-d8_512x512_40k_voc12aug + In Collection: ISANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + inference time (ms/im): + - value: 43.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.9 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.2 + mIoU(ms+flip): 77.22 + Config: configs/isanet/isanet_r50-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_40k_voc12aug/isanet_r50-d8_512x512_40k_voc12aug_20210901_151349-7d08a54e.pth +- Name: isanet_r101-d8_512x512_20k_voc12aug + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 134.77 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.465 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.46 + mIoU(ms+flip): 79.16 + Config: configs/isanet/isanet_r101-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_20k_voc12aug/isanet_r101-d8_512x512_20k_voc12aug_20210901_115805-3ccbf355.pth +- Name: isanet_r101-d8_512x512_40k_voc12aug + In Collection: ISANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + inference time (ms/im): + - value: 134.77 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.465 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.12 + mIoU(ms+flip): 79.04 + Config: configs/isanet/isanet_r101-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_40k_voc12aug/isanet_r101-d8_512x512_40k_voc12aug_20210901_145814-bc71233b.pth diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..f5cd8cb --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..ebc15cb --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..3329010 --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..46fee91 --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x512_20k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_512x512_20k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..64bd8c1 --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x512_40k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_512x512_40k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..6e13e20 --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..cf362aa --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..3c2283b --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './isanet_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..f8675e9 --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..46119fb --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..7d5c235 --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..d8b60ba --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x512_20k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..4729899 --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x512_40k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..e35480d --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..201a358 --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..5604350 --- /dev/null +++ b/downstream/mmsegmentation/configs/isanet/isanet_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/isanet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/knet/README.md b/downstream/mmsegmentation/configs/knet/README.md new file mode 100644 index 0000000..ef22336 --- /dev/null +++ b/downstream/mmsegmentation/configs/knet/README.md @@ -0,0 +1,49 @@ +# K-Net + +[K-Net: Towards Unified Image Segmentation](https://arxiv.org/abs/2106.14855) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Semantic, instance, and panoptic segmentations have been addressed using different and specialized frameworks despite their underlying connections. This paper presents a unified, simple, and effective framework for these essentially similar tasks. The framework, named K-Net, segments both instances and semantic categories consistently by a group of learnable kernels, where each kernel is responsible for generating a mask for either a potential instance or a stuff class. To remedy the difficulties of distinguishing various instances, we propose a kernel update strategy that enables each kernel dynamic and conditional on its meaningful group in the input image. K-Net can be trained in an end-to-end manner with bipartite matching, and its training and inference are naturally NMS-free and box-free. Without bells and whistles, K-Net surpasses all previous published state-of-the-art single-model results of panoptic segmentation on MS COCO test-dev split and semantic segmentation on ADE20K val split with 55.2% PQ and 54.3% mIoU, respectively. Its instance segmentation performance is also on par with Cascade Mask R-CNN on MS COCO with 60%-90% faster inference speeds. Code and models will be released at [this https URL](https://github.com/ZwwWayne/K-Net/). + + +
    + +
    + +```bibtex +@inproceedings{zhang2021knet, + title={{K-Net: Towards} Unified Image Segmentation}, + author={Wenwei Zhang and Jiangmiao Pang and Kai Chen and Chen Change Loy}, + year={2021}, + booktitle={NeurIPS}, +} +``` + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------------- | -------- | --------- | ------- | -------- | -------------- | ----- | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ----- | +| KNet + FCN | R-50-D8 | 512x512 | 80000 | 7.01 | 19.24 | 43.60 | 45.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_043751-abcab920.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_043751.log.json) | +| KNet + PSPNet | R-50-D8 | 512x512 | 80000 | 6.98 | 20.04 | 44.18 | 45.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_054634-d2c72240.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_054634.log.json) | +| KNet + DeepLabV3| R-50-D8 | 512x512 | 80000 | 7.42 | 12.10 | 45.06 | 46.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_041642-00c8fbeb.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_041642.log.json) | +| KNet + UperNet | R-50-D8 | 512x512 | 80000 | 7.34 | 17.11 | 43.45 | 44.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220304_125657-215753b0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220304_125657.log.json) | +| KNet + UperNet | Swin-T | 512x512 | 80000 | 7.57 | 15.56 | 45.84 | 46.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k_20220303_133059-7545e1dc.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k_20220303_133059.log.json) | +| KNet + UperNet | Swin-L | 512x512 | 80000 | 13.5 | 8.29 | 52.05 | 53.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k_20220303_154559-d8da9a90.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k_20220303_154559.log.json) | +| KNet + UperNet | Swin-L | 640x640 | 80000 | 13.54 | 8.29 | 52.21 | 53.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k_20220301_220747-8787fc71.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k_20220301_220747.log.json) | + +Note: + +- All experiments of K-Net are implemented with 8 V100 (32G) GPUs with 2 samplers per GPU. diff --git a/downstream/mmsegmentation/configs/knet/knet.yml b/downstream/mmsegmentation/configs/knet/knet.yml new file mode 100644 index 0000000..5e2e529 --- /dev/null +++ b/downstream/mmsegmentation/configs/knet/knet.yml @@ -0,0 +1,169 @@ +Collections: +- Name: KNet + Metadata: + Training Data: + - ADE20K + Paper: + URL: https://arxiv.org/abs/2106.14855 + Title: 'K-Net: Towards Unified Image Segmentation' + README: configs/knet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.23.0/mmseg/models/decode_heads/knet_head.py#L392 + Version: v0.23.0 + Converted From: + Code: https://github.com/ZwwWayne/K-Net/ +Models: +- Name: knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k + In Collection: KNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 51.98 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.01 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.6 + mIoU(ms+flip): 45.12 + Config: configs/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_043751-abcab920.pth +- Name: knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k + In Collection: KNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 49.9 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.98 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.18 + mIoU(ms+flip): 45.58 + Config: configs/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_054634-d2c72240.pth +- Name: knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k + In Collection: KNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 82.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.42 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.06 + mIoU(ms+flip): 46.11 + Config: configs/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_041642-00c8fbeb.pth +- Name: knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k + In Collection: KNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 58.45 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.34 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.45 + mIoU(ms+flip): 44.07 + Config: configs/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220304_125657-215753b0.pth +- Name: knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k + In Collection: KNet + Metadata: + backbone: Swin-T + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 64.27 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.57 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.84 + mIoU(ms+flip): 46.27 + Config: configs/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k_20220303_133059-7545e1dc.pth +- Name: knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k + In Collection: KNet + Metadata: + backbone: Swin-L + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 120.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 52.05 + mIoU(ms+flip): 53.24 + Config: configs/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k_20220303_154559-d8da9a90.pth +- Name: knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k + In Collection: KNet + Metadata: + backbone: Swin-L + crop size: (640,640) + lr schd: 80000 + inference time (ms/im): + - value: 120.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (640,640) + Training Memory (GB): 13.54 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 52.21 + mIoU(ms+flip): 53.34 + Config: configs/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k_20220301_220747-8787fc71.pth diff --git a/downstream/mmsegmentation/configs/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k.py b/downstream/mmsegmentation/configs/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k.py new file mode 100644 index 0000000..3edb05c --- /dev/null +++ b/downstream/mmsegmentation/configs/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k.py @@ -0,0 +1,93 @@ +_base_ = [ + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +num_stages = 3 +conv_kernel_size = 1 +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='IterativeDecodeHead', + num_stages=num_stages, + kernel_update_head=[ + dict( + type='KernelUpdateHead', + num_classes=150, + num_ffn_fcs=2, + num_heads=8, + num_mask_fcs=1, + feedforward_channels=2048, + in_channels=512, + out_channels=512, + dropout=0.0, + conv_kernel_size=conv_kernel_size, + ffn_act_cfg=dict(type='ReLU', inplace=True), + with_ffn=True, + feat_transform_cfg=dict( + conv_cfg=dict(type='Conv2d'), act_cfg=None), + kernel_updator_cfg=dict( + type='KernelUpdator', + in_channels=256, + feat_channels=256, + out_channels=256, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'))) for _ in range(num_stages) + ], + kernel_generate_head=dict( + type='ASPPHead', + in_channels=2048, + in_index=3, + channels=512, + dilations=(1, 12, 24, 36), + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) + +# optimizer +optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) +# learning policy +lr_config = dict( + _delete_=True, + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.001, + step=[60000, 72000], + by_epoch=False) +# In K-Net implementation we use batch size 2 per GPU as default +data = dict(samples_per_gpu=2, workers_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k.py b/downstream/mmsegmentation/configs/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k.py new file mode 100644 index 0000000..29a088f --- /dev/null +++ b/downstream/mmsegmentation/configs/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k.py @@ -0,0 +1,93 @@ +_base_ = [ + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +num_stages = 3 +conv_kernel_size = 1 +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='IterativeDecodeHead', + num_stages=num_stages, + kernel_update_head=[ + dict( + type='KernelUpdateHead', + num_classes=150, + num_ffn_fcs=2, + num_heads=8, + num_mask_fcs=1, + feedforward_channels=2048, + in_channels=512, + out_channels=512, + dropout=0.0, + conv_kernel_size=conv_kernel_size, + ffn_act_cfg=dict(type='ReLU', inplace=True), + with_ffn=True, + feat_transform_cfg=dict( + conv_cfg=dict(type='Conv2d'), act_cfg=None), + kernel_updator_cfg=dict( + type='KernelUpdator', + in_channels=256, + feat_channels=256, + out_channels=256, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'))) for _ in range(num_stages) + ], + kernel_generate_head=dict( + type='FCNHead', + in_channels=2048, + in_index=3, + channels=512, + num_convs=2, + concat_input=True, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) +# optimizer +optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) +# learning policy +lr_config = dict( + _delete_=True, + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.001, + step=[60000, 72000], + by_epoch=False) +# In K-Net implementation we use batch size 2 per GPU as default +data = dict(samples_per_gpu=2, workers_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k.py b/downstream/mmsegmentation/configs/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k.py new file mode 100644 index 0000000..d77a3b4 --- /dev/null +++ b/downstream/mmsegmentation/configs/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k.py @@ -0,0 +1,92 @@ +_base_ = [ + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +num_stages = 3 +conv_kernel_size = 1 +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='IterativeDecodeHead', + num_stages=num_stages, + kernel_update_head=[ + dict( + type='KernelUpdateHead', + num_classes=150, + num_ffn_fcs=2, + num_heads=8, + num_mask_fcs=1, + feedforward_channels=2048, + in_channels=512, + out_channels=512, + dropout=0.0, + conv_kernel_size=conv_kernel_size, + ffn_act_cfg=dict(type='ReLU', inplace=True), + with_ffn=True, + feat_transform_cfg=dict( + conv_cfg=dict(type='Conv2d'), act_cfg=None), + kernel_updator_cfg=dict( + type='KernelUpdator', + in_channels=256, + feat_channels=256, + out_channels=256, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'))) for _ in range(num_stages) + ], + kernel_generate_head=dict( + type='PSPHead', + in_channels=2048, + in_index=3, + channels=512, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) +# optimizer +optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) +# learning policy +lr_config = dict( + _delete_=True, + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.001, + step=[60000, 72000], + by_epoch=False) +# In K-Net implementation we use batch size 2 per GPU as default +data = dict(samples_per_gpu=2, workers_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k.py b/downstream/mmsegmentation/configs/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k.py new file mode 100644 index 0000000..0071cea --- /dev/null +++ b/downstream/mmsegmentation/configs/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k.py @@ -0,0 +1,93 @@ +_base_ = [ + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +num_stages = 3 +conv_kernel_size = 1 + +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='IterativeDecodeHead', + num_stages=num_stages, + kernel_update_head=[ + dict( + type='KernelUpdateHead', + num_classes=150, + num_ffn_fcs=2, + num_heads=8, + num_mask_fcs=1, + feedforward_channels=2048, + in_channels=512, + out_channels=512, + dropout=0.0, + conv_kernel_size=conv_kernel_size, + ffn_act_cfg=dict(type='ReLU', inplace=True), + with_ffn=True, + feat_transform_cfg=dict( + conv_cfg=dict(type='Conv2d'), act_cfg=None), + kernel_updator_cfg=dict( + type='KernelUpdator', + in_channels=256, + feat_channels=256, + out_channels=256, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'))) for _ in range(num_stages) + ], + kernel_generate_head=dict( + type='UPerHead', + in_channels=[256, 512, 1024, 2048], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) +# optimizer +optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) +# learning policy +lr_config = dict( + _delete_=True, + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.001, + step=[60000, 72000], + by_epoch=False) +# In K-Net implementation we use batch size 2 per GPU as default +data = dict(samples_per_gpu=2, workers_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k.py b/downstream/mmsegmentation/configs/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k.py new file mode 100644 index 0000000..b9d1a09 --- /dev/null +++ b/downstream/mmsegmentation/configs/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k.py @@ -0,0 +1,19 @@ +_base_ = 'knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k.py' + +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window7_224_22k_20220308-d5bdebaf.pth' # noqa +# model settings +model = dict( + pretrained=checkpoint_file, + backbone=dict( + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=7, + use_abs_pos_embed=False, + drop_path_rate=0.3, + patch_norm=True), + decode_head=dict( + kernel_generate_head=dict(in_channels=[192, 384, 768, 1536])), + auxiliary_head=dict(in_channels=768)) +# In K-Net implementation we use batch size 2 per GPU as default +data = dict(samples_per_gpu=2, workers_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k.py b/downstream/mmsegmentation/configs/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k.py new file mode 100644 index 0000000..fc6e9fe --- /dev/null +++ b/downstream/mmsegmentation/configs/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k.py @@ -0,0 +1,54 @@ +_base_ = 'knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k.py' + +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window7_224_22k_20220308-d5bdebaf.pth' # noqa +# model settings +model = dict( + pretrained=checkpoint_file, + backbone=dict( + embed_dims=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=7, + use_abs_pos_embed=False, + drop_path_rate=0.4, + patch_norm=True), + decode_head=dict( + kernel_generate_head=dict(in_channels=[192, 384, 768, 1536])), + auxiliary_head=dict(in_channels=768)) + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (640, 640) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 640), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 640), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +# In K-Net implementation we use batch size 2 per GPU as default +data = dict(samples_per_gpu=2, workers_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k.py b/downstream/mmsegmentation/configs/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k.py new file mode 100644 index 0000000..0b29b2b --- /dev/null +++ b/downstream/mmsegmentation/configs/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k.py @@ -0,0 +1,57 @@ +_base_ = 'knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k.py' + +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_tiny_patch4_window7_224_20220308-f41b89d3.pth' # noqa + +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +num_stages = 3 +conv_kernel_size = 1 + +model = dict( + type='EncoderDecoder', + pretrained=checkpoint_file, + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.3, + use_abs_pos_embed=False, + patch_norm=True, + out_indices=(0, 1, 2, 3)), + decode_head=dict( + kernel_generate_head=dict(in_channels=[96, 192, 384, 768])), + auxiliary_head=dict(in_channels=384)) + +# modify learning rate following the official implementation of Swin Transformer # noqa +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006, + betas=(0.9, 0.999), + weight_decay=0.0005, + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) +optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) +# learning policy +lr_config = dict( + _delete_=True, + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.001, + step=[60000, 72000], + by_epoch=False) +# In K-Net implementation we use batch size 2 per GPU as default +data = dict(samples_per_gpu=2, workers_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/mobilenet_v2/README.md b/downstream/mmsegmentation/configs/mobilenet_v2/README.md new file mode 100644 index 0000000..bef8898 --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v2/README.md @@ -0,0 +1,55 @@ +# MobileNetV2 + +[MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +In this paper we describe a new mobile architecture, MobileNetV2, that improves the state of the art performance of mobile models on multiple tasks and benchmarks as well as across a spectrum of different model sizes. We also describe efficient ways of applying these mobile models to object detection in a novel framework we call SSDLite. Additionally, we demonstrate how to build mobile semantic segmentation models through a reduced form of DeepLabv3 which we call Mobile DeepLabv3. +The MobileNetV2 architecture is based on an inverted residual structure where the input and output of the residual block are thin bottleneck layers opposite to traditional residual models which use expanded representations in the input an MobileNetV2 uses lightweight depthwise convolutions to filter features in the intermediate expansion layer. Additionally, we find that it is important to remove non-linearities in the narrow layers in order to maintain representational power. We demonstrate that this improves performance and provide an intuition that led to this design. Finally, our approach allows decoupling of the input/output domains from the expressiveness of the transformation, which provides a convenient framework for further analysis. We measure our performance on Imagenet classification, COCO object detection, VOC image segmentation. We evaluate the trade-offs between accuracy, and number of operations measured by multiply-adds (MAdd), as well as the number of parameters. + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{sandler2018mobilenetv2, + title={Mobilenetv2: Inverted residuals and linear bottlenecks}, + author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={4510--4520}, + year={2018} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | M-V2-D8 | 512x1024 | 80000 | 3.4 | 14.2 | 61.54 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes/fcn_m-v2-d8_512x1024_80k_cityscapes_20200825_124817-d24c28c1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes/fcn_m-v2-d8_512x1024_80k_cityscapes-20200825_124817.log.json) | +| PSPNet | M-V2-D8 | 512x1024 | 80000 | 3.6 | 11.2 | 70.23 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes/pspnet_m-v2-d8_512x1024_80k_cityscapes_20200825_124817-19e81d51.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes/pspnet_m-v2-d8_512x1024_80k_cityscapes-20200825_124817.log.json) | +| DeepLabV3 | M-V2-D8 | 512x1024 | 80000 | 3.9 | 8.4 | 73.84 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes/deeplabv3_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-bef03590.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes/deeplabv3_m-v2-d8_512x1024_80k_cityscapes-20200825_124836.log.json) | +| DeepLabV3+ | M-V2-D8 | 512x1024 | 80000 | 5.1 | 8.4 | 75.20 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-d256dd4b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes-20200825_124836.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | M-V2-D8 | 512x512 | 160000 | 6.5 | 64.4 | 19.71 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k/fcn_m-v2-d8_512x512_160k_ade20k_20200825_214953-c40e1095.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k/fcn_m-v2-d8_512x512_160k_ade20k-20200825_214953.log.json) | +| PSPNet | M-V2-D8 | 512x512 | 160000 | 6.5 | 57.7 | 29.68 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k/pspnet_m-v2-d8_512x512_160k_ade20k_20200825_214953-f5942f7a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k/pspnet_m-v2-d8_512x512_160k_ade20k-20200825_214953.log.json) | +| DeepLabV3 | M-V2-D8 | 512x512 | 160000 | 6.8 | 39.9 | 34.08 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k/deeplabv3_m-v2-d8_512x512_160k_ade20k_20200825_223255-63986343.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k/deeplabv3_m-v2-d8_512x512_160k_ade20k-20200825_223255.log.json) | +| DeepLabV3+ | M-V2-D8 | 512x512 | 160000 | 8.2 | 43.1 | 34.02 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k/deeplabv3plus_m-v2-d8_512x512_160k_ade20k_20200825_223255-465a01d4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k/deeplabv3plus_m-v2-d8_512x512_160k_ade20k-20200825_223255.log.json) | diff --git a/downstream/mmsegmentation/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..267483d --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,12 @@ +_base_ = '../deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6)), + decode_head=dict(in_channels=320), + auxiliary_head=dict(in_channels=96)) diff --git a/downstream/mmsegmentation/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..e15b8cc --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k.py @@ -0,0 +1,12 @@ +_base_ = '../deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6)), + decode_head=dict(in_channels=320), + auxiliary_head=dict(in_channels=96)) diff --git a/downstream/mmsegmentation/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..d4533d7 --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,12 @@ +_base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6)), + decode_head=dict(in_channels=320, c1_in_channels=24), + auxiliary_head=dict(in_channels=96)) diff --git a/downstream/mmsegmentation/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..7615a7c --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k.py @@ -0,0 +1,12 @@ +_base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6)), + decode_head=dict(in_channels=320, c1_in_channels=24), + auxiliary_head=dict(in_channels=96)) diff --git a/downstream/mmsegmentation/configs/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..a535bd0 --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,12 @@ +_base_ = '../fcn/fcn_r101-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6)), + decode_head=dict(in_channels=320), + auxiliary_head=dict(in_channels=96)) diff --git a/downstream/mmsegmentation/configs/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..c5f6ab0 --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k.py @@ -0,0 +1,12 @@ +_base_ = '../fcn/fcn_r101-d8_512x512_160k_ade20k.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6)), + decode_head=dict(in_channels=320), + auxiliary_head=dict(in_channels=96)) diff --git a/downstream/mmsegmentation/configs/mobilenet_v2/mobilenet_v2.yml b/downstream/mmsegmentation/configs/mobilenet_v2/mobilenet_v2.yml new file mode 100644 index 0000000..5527ba8 --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v2/mobilenet_v2.yml @@ -0,0 +1,169 @@ +Models: +- Name: fcn_m-v2-d8_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: M-V2-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 70.42 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 61.54 + Config: configs/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes/fcn_m-v2-d8_512x1024_80k_cityscapes_20200825_124817-d24c28c1.pth +- Name: pspnet_m-v2-d8_512x1024_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: M-V2-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 89.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 70.23 + Config: configs/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes/pspnet_m-v2-d8_512x1024_80k_cityscapes_20200825_124817-19e81d51.pth +- Name: deeplabv3_m-v2-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: M-V2-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 119.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.84 + Config: configs/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes/deeplabv3_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-bef03590.pth +- Name: deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: M-V2-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 119.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.2 + Config: configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-d256dd4b.pth +- Name: fcn_m-v2-d8_512x512_160k_ade20k + In Collection: FCN + Metadata: + backbone: M-V2-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 15.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 19.71 + Config: configs/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k/fcn_m-v2-d8_512x512_160k_ade20k_20200825_214953-c40e1095.pth +- Name: pspnet_m-v2-d8_512x512_160k_ade20k + In Collection: PSPNet + Metadata: + backbone: M-V2-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 17.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 29.68 + Config: configs/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k/pspnet_m-v2-d8_512x512_160k_ade20k_20200825_214953-f5942f7a.pth +- Name: deeplabv3_m-v2-d8_512x512_160k_ade20k + In Collection: DeepLabV3 + Metadata: + backbone: M-V2-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 25.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 34.08 + Config: configs/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k/deeplabv3_m-v2-d8_512x512_160k_ade20k_20200825_223255-63986343.pth +- Name: deeplabv3plus_m-v2-d8_512x512_160k_ade20k + In Collection: DeepLabV3+ + Metadata: + backbone: M-V2-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 23.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 34.02 + Config: configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k/deeplabv3plus_m-v2-d8_512x512_160k_ade20k_20200825_223255-465a01d4.pth diff --git a/downstream/mmsegmentation/configs/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..7403bee --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,12 @@ +_base_ = '../pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6)), + decode_head=dict(in_channels=320), + auxiliary_head=dict(in_channels=96)) diff --git a/downstream/mmsegmentation/configs/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..5b72ac8 --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k.py @@ -0,0 +1,12 @@ +_base_ = '../pspnet/pspnet_r101-d8_512x512_160k_ade20k.py' +model = dict( + pretrained='mmcls://mobilenet_v2', + backbone=dict( + _delete_=True, + type='MobileNetV2', + widen_factor=1., + strides=(1, 2, 2, 1, 1, 1, 1), + dilations=(1, 1, 1, 2, 2, 4, 4), + out_indices=(1, 2, 4, 6)), + decode_head=dict(in_channels=320), + auxiliary_head=dict(in_channels=96)) diff --git a/downstream/mmsegmentation/configs/mobilenet_v3/README.md b/downstream/mmsegmentation/configs/mobilenet_v3/README.md new file mode 100644 index 0000000..b08ac27 --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v3/README.md @@ -0,0 +1,47 @@ +# MobileNetV3 + +[Searching for MobileNetV3](https://arxiv.org/abs/1905.02244) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +We present the next generation of MobileNets based on a combination of complementary search techniques as well as a novel architecture design. MobileNetV3 is tuned to mobile phone CPUs through a combination of hardware-aware network architecture search (NAS) complemented by the NetAdapt algorithm and then subsequently improved through novel architecture advances. This paper starts the exploration of how automated search algorithms and network design can work together to harness complementary approaches improving the overall state of the art. Through this process we create two new MobileNet models for release: MobileNetV3-Large and MobileNetV3-Small which are targeted for high and low resource use cases. These models are then adapted and applied to the tasks of object detection and semantic segmentation. For the task of semantic segmentation (or any dense pixel prediction), we propose a new efficient segmentation decoder Lite Reduced Atrous Spatial Pyramid Pooling (LR-ASPP). We achieve new state of the art results for mobile classification, detection and segmentation. MobileNetV3-Large is 3.2\% more accurate on ImageNet classification while reducing latency by 15\% compared to MobileNetV2. MobileNetV3-Small is 4.6\% more accurate while reducing latency by 5\% compared to MobileNetV2. MobileNetV3-Large detection is 25\% faster at roughly the same accuracy as MobileNetV2 on COCO detection. MobileNetV3-Large LR-ASPP is 30\% faster than MobileNetV2 R-ASPP at similar accuracy for Cityscapes segmentation. + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{Howard_2019_ICCV, + title={Searching for MobileNetV3}, + author={Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and Le, Quoc V. and Adam, Hartwig}, + booktitle={The IEEE International Conference on Computer Vision (ICCV)}, + pages={1314-1324}, + month={October}, + year={2019}, + doi={10.1109/ICCV.2019.00140}} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| LRASPP | M-V3-D8 | 512x1024 | 320000 | 8.9 | 15.22 | 69.54 | 70.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes/lraspp_m-v3-d8_512x1024_320k_cityscapes_20201224_220337-cfe8fb07.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes/lraspp_m-v3-d8_512x1024_320k_cityscapes-20201224_220337.log.json) | +| LRASPP | M-V3-D8 (scratch) | 512x1024 | 320000 | 8.9 | 14.77 | 67.87 | 69.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes_20201224_220337-9f29cd72.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes-20201224_220337.log.json) | +| LRASPP | M-V3s-D8 | 512x1024 | 320000 | 5.3 | 23.64 | 64.11 | 66.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes/lraspp_m-v3s-d8_512x1024_320k_cityscapes_20201224_223935-61565b34.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes/lraspp_m-v3s-d8_512x1024_320k_cityscapes-20201224_223935.log.json) | +| LRASPP | M-V3s-D8 (scratch) | 512x1024 | 320000 | 5.3 | 24.50 | 62.74 | 65.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes_20201224_223935-03daeabb.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes-20201224_223935.log.json) | diff --git a/downstream/mmsegmentation/configs/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes.py b/downstream/mmsegmentation/configs/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes.py new file mode 100644 index 0000000..e59a78b --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/lraspp_m-v3-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] + +model = dict(pretrained='open-mmlab://contrib/mobilenet_v3_large') + +# Re-config the data sampler. +data = dict(samples_per_gpu=4, workers_per_gpu=4) + +runner = dict(type='IterBasedRunner', max_iters=320000) diff --git a/downstream/mmsegmentation/configs/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py b/downstream/mmsegmentation/configs/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py new file mode 100644 index 0000000..a3c5435 --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/lraspp_m-v3-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] + +# Re-config the data sampler. +data = dict(samples_per_gpu=4, workers_per_gpu=4) + +runner = dict(type='IterBasedRunner', max_iters=320000) diff --git a/downstream/mmsegmentation/configs/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes.py b/downstream/mmsegmentation/configs/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes.py new file mode 100644 index 0000000..d4e368b --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes.py @@ -0,0 +1,23 @@ +_base_ = './lraspp_m-v3-d8_512x1024_320k_cityscapes.py' +norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://contrib/mobilenet_v3_small', + backbone=dict( + type='MobileNetV3', + arch='small', + out_indices=(0, 1, 12), + norm_cfg=norm_cfg), + decode_head=dict( + type='LRASPPHead', + in_channels=(16, 16, 576), + in_index=(0, 1, 2), + channels=128, + input_transform='multiple_select', + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) diff --git a/downstream/mmsegmentation/configs/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes.py b/downstream/mmsegmentation/configs/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes.py new file mode 100644 index 0000000..0c5f707 --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes.py @@ -0,0 +1,22 @@ +_base_ = './lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py' +norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='MobileNetV3', + arch='small', + out_indices=(0, 1, 12), + norm_cfg=norm_cfg), + decode_head=dict( + type='LRASPPHead', + in_channels=(16, 16, 576), + in_index=(0, 1, 2), + channels=128, + input_transform='multiple_select', + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) diff --git a/downstream/mmsegmentation/configs/mobilenet_v3/mobilenet_v3.yml b/downstream/mmsegmentation/configs/mobilenet_v3/mobilenet_v3.yml new file mode 100644 index 0000000..81a1796 --- /dev/null +++ b/downstream/mmsegmentation/configs/mobilenet_v3/mobilenet_v3.yml @@ -0,0 +1,89 @@ +Models: +- Name: lraspp_m-v3-d8_512x1024_320k_cityscapes + In Collection: LRASPP + Metadata: + backbone: M-V3-D8 + crop size: (512,1024) + lr schd: 320000 + inference time (ms/im): + - value: 65.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 69.54 + mIoU(ms+flip): 70.89 + Config: configs/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes/lraspp_m-v3-d8_512x1024_320k_cityscapes_20201224_220337-cfe8fb07.pth +- Name: lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes + In Collection: LRASPP + Metadata: + backbone: M-V3-D8 (scratch) + crop size: (512,1024) + lr schd: 320000 + inference time (ms/im): + - value: 67.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 67.87 + mIoU(ms+flip): 69.78 + Config: configs/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes_20201224_220337-9f29cd72.pth +- Name: lraspp_m-v3s-d8_512x1024_320k_cityscapes + In Collection: LRASPP + Metadata: + backbone: M-V3s-D8 + crop size: (512,1024) + lr schd: 320000 + inference time (ms/im): + - value: 42.3 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 64.11 + mIoU(ms+flip): 66.42 + Config: configs/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes/lraspp_m-v3s-d8_512x1024_320k_cityscapes_20201224_223935-61565b34.pth +- Name: lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes + In Collection: LRASPP + Metadata: + backbone: M-V3s-D8 (scratch) + crop size: (512,1024) + lr schd: 320000 + inference time (ms/im): + - value: 40.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 5.3 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 62.74 + mIoU(ms+flip): 65.01 + Config: configs/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes_20201224_223935-03daeabb.pth diff --git a/downstream/mmsegmentation/configs/nonlocal_net/README.md b/downstream/mmsegmentation/configs/nonlocal_net/README.md new file mode 100644 index 0000000..e1bd9d4 --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/README.md @@ -0,0 +1,67 @@ +# NonLocal Net + +[Non-local Neural Networks](https://arxiv.org/abs/1711.07971) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Both convolutional and recurrent operations are building blocks that process one local neighborhood at a time. In this paper, we present non-local operations as a generic family of building blocks for capturing long-range dependencies. Inspired by the classical non-local means method in computer vision, our non-local operation computes the response at a position as a weighted sum of the features at all positions. This building block can be plugged into many computer vision architectures. On the task of video classification, even without any bells and whistles, our non-local models can compete or outperform current competition winners on both Kinetics and Charades datasets. In static image recognition, our non-local models improve object detection/segmentation and pose estimation on the COCO suite of tasks. Code is available at [this https URL](https://github.com/facebookresearch/video-nonlocal-net). + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{wang2018non, + title={Non-local neural networks}, + author={Wang, Xiaolong and Girshick, Ross and Gupta, Abhinav and He, Kaiming}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={7794--7803}, + year={2018} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| -------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| NonLocalNet | R-50-D8 | 512x1024 | 40000 | 7.4 | 2.72 | 78.24 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748-c75e81e3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748.log.json) | +| NonLocalNet | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.95 | 78.66 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748-d63729fa.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748.log.json) | +| NonLocalNet | R-50-D8 | 769x769 | 40000 | 8.9 | 1.52 | 78.33 | 79.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243-82ef6749.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243.log.json) | +| NonLocalNet | R-101-D8 | 769x769 | 40000 | 12.8 | 1.05 | 78.57 | 80.29 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348-8fe9a9dc.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348.log.json) | +| NonLocalNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.01 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518-d6839fae.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518.log.json) | +| NonLocalNet | R-101-D8 | 512x1024 | 80000 | - | - | 78.93 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411-32700183.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411.log.json) | +| NonLocalNet | R-50-D8 | 769x769 | 80000 | - | - | 79.05 | 80.68 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506-1f9792f6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506.log.json) | +| NonLocalNet | R-101-D8 | 769x769 | 80000 | - | - | 79.40 | 80.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428-0e1fa4f9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| -------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| NonLocalNet | R-50-D8 | 512x512 | 80000 | 9.1 | 21.37 | 40.75 | 42.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801-5ae0aa33.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801.log.json) | +| NonLocalNet | R-101-D8 | 512x512 | 80000 | 12.6 | 13.97 | 42.90 | 44.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758-24105919.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758.log.json) | +| NonLocalNet | R-50-D8 | 512x512 | 160000 | - | - | 42.03 | 43.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410-baef45e3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410.log.json) | +| NonLocalNet | R-101-D8 | 512x512 | 160000 | - | - | 44.63 | 45.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20210827_221502-7881aa1a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20210827_221502.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| -------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| NonLocalNet | R-50-D8 | 512x512 | 20000 | 6.4 | 21.21 | 76.20 | 77.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613-07f2a57c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613.log.json) | +| NonLocalNet | R-101-D8 | 512x512 | 20000 | 9.8 | 14.01 | 78.15 | 78.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615-948c68ab.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615.log.json) | +| NonLocalNet | R-50-D8 | 512x512 | 40000 | - | - | 76.65 | 77.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028-0139d4a9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028.log.json) | +| NonLocalNet | R-101-D8 | 512x512 | 40000 | - | - | 78.27 | 79.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028-7e5ff470.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028.log.json) | diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_net.yml b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_net.yml new file mode 100644 index 0000000..bab38ce --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_net.yml @@ -0,0 +1,301 @@ +Collections: +- Name: NonLocalNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1711.07971 + Title: Non-local Neural Networks + README: configs/nonlocal_net/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/nl_head.py#L10 + Version: v0.17.0 + Converted From: + Code: https://github.com/facebookresearch/video-nonlocal-net +Models: +- Name: nonlocal_r50-d8_512x1024_40k_cityscapes + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 367.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.24 + Config: configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748-c75e81e3.pth +- Name: nonlocal_r101-d8_512x1024_40k_cityscapes + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 512.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 10.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.66 + Config: configs/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748-d63729fa.pth +- Name: nonlocal_r50-d8_769x769_40k_cityscapes + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 657.89 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.33 + mIoU(ms+flip): 79.92 + Config: configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243-82ef6749.pth +- Name: nonlocal_r101-d8_769x769_40k_cityscapes + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 952.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 12.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.57 + mIoU(ms+flip): 80.29 + Config: configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348-8fe9a9dc.pth +- Name: nonlocal_r50-d8_512x1024_80k_cityscapes + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.01 + Config: configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518-d6839fae.pth +- Name: nonlocal_r101-d8_512x1024_80k_cityscapes + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.93 + Config: configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411-32700183.pth +- Name: nonlocal_r50-d8_769x769_80k_cityscapes + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.05 + mIoU(ms+flip): 80.68 + Config: configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506-1f9792f6.pth +- Name: nonlocal_r101-d8_769x769_80k_cityscapes + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.4 + mIoU(ms+flip): 80.85 + Config: configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428-0e1fa4f9.pth +- Name: nonlocal_r50-d8_512x512_80k_ade20k + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 46.79 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 40.75 + mIoU(ms+flip): 42.05 + Config: configs/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801-5ae0aa33.pth +- Name: nonlocal_r101-d8_512x512_80k_ade20k + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 71.58 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.9 + mIoU(ms+flip): 44.27 + Config: configs/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758-24105919.pth +- Name: nonlocal_r50-d8_512x512_160k_ade20k + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.03 + mIoU(ms+flip): 43.04 + Config: configs/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410-baef45e3.pth +- Name: nonlocal_r101-d8_512x512_160k_ade20k + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.63 + mIoU(ms+flip): 45.79 + Config: configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20210827_221502-7881aa1a.pth +- Name: nonlocal_r50-d8_512x512_20k_voc12aug + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 47.15 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.4 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.2 + mIoU(ms+flip): 77.12 + Config: configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613-07f2a57c.pth +- Name: nonlocal_r101-d8_512x512_20k_voc12aug + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 71.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.8 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.15 + mIoU(ms+flip): 78.86 + Config: configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615-948c68ab.pth +- Name: nonlocal_r50-d8_512x512_40k_voc12aug + In Collection: NonLocalNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.65 + mIoU(ms+flip): 77.47 + Config: configs/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028-0139d4a9.pth +- Name: nonlocal_r101-d8_512x512_40k_voc12aug + In Collection: NonLocalNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.27 + mIoU(ms+flip): 79.12 + Config: configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028-7e5ff470.pth diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..ef7b06d --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..7a1e66c --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..df9c2ac --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..490f987 --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_512x512_20k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..40d9190 --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_512x512_40k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..0c6f60d --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..23e6da7 --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..0627e2b --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './nonlocal_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..9d4dc73 --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..b0672b6 --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..b1adfba --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..2e808d8 --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..66b443a --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..8a7a2f5 --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..75adef3 --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..a0726c2 --- /dev/null +++ b/downstream/mmsegmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/nonlocal_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/ocrnet/README.md b/downstream/mmsegmentation/configs/ocrnet/README.md new file mode 100644 index 0000000..ef7312a --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/README.md @@ -0,0 +1,88 @@ +# OCRNet + +[Object-Contextual Representations for Semantic Segmentation](https://arxiv.org/abs/1909.11065) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +In this paper, we address the problem of semantic segmentation and focus on the context aggregation strategy for robust segmentation. Our motivation is that the label of a pixel is the category of the object that the pixel belongs to. We present a simple yet effective approach, object-contextual representations, characterizing a pixel by exploiting the representation of the corresponding object class. First, we construct object regions based on a feature map supervised by the ground-truth segmentation, and then compute the object region representations. Second, we compute the representation similarity between each pixel and each object region, and augment the representation of each pixel with an object contextual representation, which is a weighted aggregation of all the object region representations according to their similarities with the pixel. We empirically demonstrate that the proposed approach achieves competitive performance on six challenging semantic segmentation benchmarks: Cityscapes, ADE20K, LIP, PASCAL VOC 2012, PASCAL-Context and COCO-Stuff. Notably, we achieved the \nth{2} place on the Cityscapes leader-board with a single model. + + +
    + +
    + +## Citation + +```bibtex +@article{YuanW18, + title={Ocnet: Object context network for scene parsing}, + author={Yuhui Yuan and Jingdong Wang}, + booktitle={arXiv preprint arXiv:1809.00916}, + year={2018} +} + +@article{YuanCW20, + title={Object-Contextual Representations for Semantic Segmentation}, + author={Yuhui Yuan and Xilin Chen and Jingdong Wang}, + booktitle={ECCV}, + year={2020} +} +``` + +## Results and models + +### Cityscapes + +#### HRNet backbone + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| OCRNet | HRNetV2p-W18-Small | 512x1024 | 40000 | 3.5 | 10.45 | 74.30 | 75.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304-fa2436c2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304.log.json) | +| OCRNet | HRNetV2p-W18 | 512x1024 | 40000 | 4.7 | 7.50 | 77.72 | 79.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320-401c5bdd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320.log.json) | +| OCRNet | HRNetV2p-W48 | 512x1024 | 40000 | 8 | 4.22 | 80.58 | 81.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes/ocrnet_hr48_512x1024_40k_cityscapes_20200601_033336-55b32491.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes/ocrnet_hr48_512x1024_40k_cityscapes_20200601_033336.log.json) | +| OCRNet | HRNetV2p-W18-Small | 512x1024 | 80000 | - | - | 77.16 | 78.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735-55979e63.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735.log.json) | +| OCRNet | HRNetV2p-W18 | 512x1024 | 80000 | - | - | 78.57 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521-c2e1dd4a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521.log.json) | +| OCRNet | HRNetV2p-W48 | 512x1024 | 80000 | - | - | 80.70 | 81.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes/ocrnet_hr48_512x1024_80k_cityscapes_20200601_222752-9076bcdf.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes/ocrnet_hr48_512x1024_80k_cityscapes_20200601_222752.log.json) | +| OCRNet | HRNetV2p-W18-Small | 512x1024 | 160000 | - | - | 78.45 | 79.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005-f4a7af28.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005.log.json) | +| OCRNet | HRNetV2p-W18 | 512x1024 | 160000 | - | - | 79.47 | 80.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001-b9172d0c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001.log.json) | +| OCRNet | HRNetV2p-W48 | 512x1024 | 160000 | - | - | 81.35 | 82.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes/ocrnet_hr48_512x1024_160k_cityscapes_20200602_191037-dfbf1b0c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes/ocrnet_hr48_512x1024_160k_cityscapes_20200602_191037.log.json) | + +#### ResNet backbone + +| Method | Backbone | Crop Size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| OCRNet | R-101-D8 | 512x1024 | 8 | 40000 | - | - | 80.09 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes/ocrnet_r101-d8_512x1024_40k_b8_cityscapes_20200717_110721-02ac0f13.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes/ocrnet_r101-d8_512x1024_40k_b8_cityscapes_20200717_110721.log.json) | +| OCRNet | R-101-D8 | 512x1024 | 16 | 40000 | 8.8 | 3.02 | 80.30 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes/ocrnet_r101-d8_512x1024_40k_b16_cityscapes_20200723_193726-db500f80.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes/ocrnet_r101-d8_512x1024_40k_b16_cityscapes_20200723_193726.log.json) | +| OCRNet | R-101-D8 | 512x1024 | 16 | 80000 | 8.8 | 3.02 | 80.81 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes/ocrnet_r101-d8_512x1024_80k_b16_cityscapes_20200723_192421-78688424.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes/ocrnet_r101-d8_512x1024_80k_b16_cityscapes_20200723_192421.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| OCRNet | HRNetV2p-W18-Small | 512x512 | 80000 | 6.7 | 28.98 | 35.06 | 35.80 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_80k_ade20k/ocrnet_hr18s_512x512_80k_ade20k_20200615_055600-e80b62af.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_80k_ade20k/ocrnet_hr18s_512x512_80k_ade20k_20200615_055600.log.json) | +| OCRNet | HRNetV2p-W18 | 512x512 | 80000 | 7.9 | 18.93 | 37.79 | 39.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_80k_ade20k/ocrnet_hr18_512x512_80k_ade20k_20200615_053157-d173d83b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_80k_ade20k/ocrnet_hr18_512x512_80k_ade20k_20200615_053157.log.json) | +| OCRNet | HRNetV2p-W48 | 512x512 | 80000 | 11.2 | 16.99 | 43.00 | 44.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_80k_ade20k/ocrnet_hr48_512x512_80k_ade20k_20200615_021518-d168c2d1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_80k_ade20k/ocrnet_hr48_512x512_80k_ade20k_20200615_021518.log.json) | +| OCRNet | HRNetV2p-W18-Small | 512x512 | 160000 | - | - | 37.19 | 38.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_160k_ade20k/ocrnet_hr18s_512x512_160k_ade20k_20200615_184505-8e913058.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_160k_ade20k/ocrnet_hr18s_512x512_160k_ade20k_20200615_184505.log.json) | +| OCRNet | HRNetV2p-W18 | 512x512 | 160000 | - | - | 39.32 | 40.80 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_160k_ade20k/ocrnet_hr18_512x512_160k_ade20k_20200615_200940-d8fcd9d1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_160k_ade20k/ocrnet_hr18_512x512_160k_ade20k_20200615_200940.log.json) | +| OCRNet | HRNetV2p-W48 | 512x512 | 160000 | - | - | 43.25 | 44.88 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_160k_ade20k/ocrnet_hr48_512x512_160k_ade20k_20200615_184705-a073726d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_160k_ade20k/ocrnet_hr48_512x512_160k_ade20k_20200615_184705.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| OCRNet | HRNetV2p-W18-Small | 512x512 | 20000 | 3.5 | 31.55 | 71.70 | 73.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug/ocrnet_hr18s_512x512_20k_voc12aug_20200617_233913-02b04fcb.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug/ocrnet_hr18s_512x512_20k_voc12aug_20200617_233913.log.json) | +| OCRNet | HRNetV2p-W18 | 512x512 | 20000 | 4.7 | 19.91 | 74.75 | 77.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_20k_voc12aug/ocrnet_hr18_512x512_20k_voc12aug_20200617_233932-8954cbb7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_20k_voc12aug/ocrnet_hr18_512x512_20k_voc12aug_20200617_233932.log.json) | +| OCRNet | HRNetV2p-W48 | 512x512 | 20000 | 8.1 | 17.83 | 77.72 | 79.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_20k_voc12aug/ocrnet_hr48_512x512_20k_voc12aug_20200617_233932-9e82080a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_20k_voc12aug/ocrnet_hr48_512x512_20k_voc12aug_20200617_233932.log.json) | +| OCRNet | HRNetV2p-W18-Small | 512x512 | 40000 | - | - | 72.76 | 74.60 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug/ocrnet_hr18s_512x512_40k_voc12aug_20200614_002025-42b587ac.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug/ocrnet_hr18s_512x512_40k_voc12aug_20200614_002025.log.json) | +| OCRNet | HRNetV2p-W18 | 512x512 | 40000 | - | - | 74.98 | 77.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_40k_voc12aug/ocrnet_hr18_512x512_40k_voc12aug_20200614_015958-714302be.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_40k_voc12aug/ocrnet_hr18_512x512_40k_voc12aug_20200614_015958.log.json) | +| OCRNet | HRNetV2p-W48 | 512x512 | 40000 | - | - | 77.14 | 79.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_40k_voc12aug/ocrnet_hr48_512x512_40k_voc12aug_20200614_015958-255bc5ce.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_40k_voc12aug/ocrnet_hr48_512x512_40k_voc12aug_20200614_015958.log.json) | diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet.yml b/downstream/mmsegmentation/configs/ocrnet/ocrnet.yml new file mode 100644 index 0000000..d599f0a --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet.yml @@ -0,0 +1,438 @@ +Collections: +- Name: OCRNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/abs/1909.11065 + Title: Object-Contextual Representations for Semantic Segmentation + README: configs/ocrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/ocr_head.py#L86 + Version: v0.17.0 + Converted From: + Code: https://github.com/openseg-group/OCNet.pytorch +Models: +- Name: ocrnet_hr18s_512x1024_40k_cityscapes + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 95.69 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.3 + mIoU(ms+flip): 75.95 + Config: configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304-fa2436c2.pth +- Name: ocrnet_hr18_512x1024_40k_cityscapes + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 133.33 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 4.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.72 + mIoU(ms+flip): 79.49 + Config: configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320-401c5bdd.pth +- Name: ocrnet_hr48_512x1024_40k_cityscapes + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 236.97 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.58 + mIoU(ms+flip): 81.79 + Config: configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes/ocrnet_hr48_512x1024_40k_cityscapes_20200601_033336-55b32491.pth +- Name: ocrnet_hr18s_512x1024_80k_cityscapes + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.16 + mIoU(ms+flip): 78.66 + Config: configs/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735-55979e63.pth +- Name: ocrnet_hr18_512x1024_80k_cityscapes + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.57 + mIoU(ms+flip): 80.46 + Config: configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521-c2e1dd4a.pth +- Name: ocrnet_hr48_512x1024_80k_cityscapes + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.7 + mIoU(ms+flip): 81.87 + Config: configs/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes/ocrnet_hr48_512x1024_80k_cityscapes_20200601_222752-9076bcdf.pth +- Name: ocrnet_hr18s_512x1024_160k_cityscapes + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.45 + mIoU(ms+flip): 79.97 + Config: configs/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005-f4a7af28.pth +- Name: ocrnet_hr18_512x1024_160k_cityscapes + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.47 + mIoU(ms+flip): 80.91 + Config: configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001-b9172d0c.pth +- Name: ocrnet_hr48_512x1024_160k_cityscapes + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,1024) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 81.35 + mIoU(ms+flip): 82.7 + Config: configs/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes/ocrnet_hr48_512x1024_160k_cityscapes_20200602_191037-dfbf1b0c.pth +- Name: ocrnet_r101-d8_512x1024_40k_b8_cityscapes + In Collection: OCRNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.09 + Config: configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes/ocrnet_r101-d8_512x1024_40k_b8_cityscapes_20200717_110721-02ac0f13.pth +- Name: ocrnet_r101-d8_512x1024_40k_b16_cityscapes + In Collection: OCRNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 331.13 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.3 + Config: configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes/ocrnet_r101-d8_512x1024_40k_b16_cityscapes_20200723_193726-db500f80.pth +- Name: ocrnet_r101-d8_512x1024_80k_b16_cityscapes + In Collection: OCRNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 331.13 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.81 + Config: configs/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes/ocrnet_r101-d8_512x1024_80k_b16_cityscapes_20200723_192421-78688424.pth +- Name: ocrnet_hr18s_512x512_80k_ade20k + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 34.51 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.7 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 35.06 + mIoU(ms+flip): 35.8 + Config: configs/ocrnet/ocrnet_hr18s_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_80k_ade20k/ocrnet_hr18s_512x512_80k_ade20k_20200615_055600-e80b62af.pth +- Name: ocrnet_hr18_512x512_80k_ade20k + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 52.83 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.9 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 37.79 + mIoU(ms+flip): 39.16 + Config: configs/ocrnet/ocrnet_hr18_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_80k_ade20k/ocrnet_hr18_512x512_80k_ade20k_20200615_053157-d173d83b.pth +- Name: ocrnet_hr48_512x512_80k_ade20k + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 58.86 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 11.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.0 + mIoU(ms+flip): 44.3 + Config: configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_80k_ade20k/ocrnet_hr48_512x512_80k_ade20k_20200615_021518-d168c2d1.pth +- Name: ocrnet_hr18s_512x512_160k_ade20k + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 37.19 + mIoU(ms+flip): 38.4 + Config: configs/ocrnet/ocrnet_hr18s_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_160k_ade20k/ocrnet_hr18s_512x512_160k_ade20k_20200615_184505-8e913058.pth +- Name: ocrnet_hr18_512x512_160k_ade20k + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 39.32 + mIoU(ms+flip): 40.8 + Config: configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_160k_ade20k/ocrnet_hr18_512x512_160k_ade20k_20200615_200940-d8fcd9d1.pth +- Name: ocrnet_hr48_512x512_160k_ade20k + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.25 + mIoU(ms+flip): 44.88 + Config: configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_160k_ade20k/ocrnet_hr48_512x512_160k_ade20k_20200615_184705-a073726d.pth +- Name: ocrnet_hr18s_512x512_20k_voc12aug + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 31.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 3.5 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 71.7 + mIoU(ms+flip): 73.84 + Config: configs/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug/ocrnet_hr18s_512x512_20k_voc12aug_20200617_233913-02b04fcb.pth +- Name: ocrnet_hr18_512x512_20k_voc12aug + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 50.23 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.7 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 74.75 + mIoU(ms+flip): 77.11 + Config: configs/ocrnet/ocrnet_hr18_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_20k_voc12aug/ocrnet_hr18_512x512_20k_voc12aug_20200617_233932-8954cbb7.pth +- Name: ocrnet_hr48_512x512_20k_voc12aug + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 56.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.1 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.72 + mIoU(ms+flip): 79.87 + Config: configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_20k_voc12aug/ocrnet_hr48_512x512_20k_voc12aug_20200617_233932-9e82080a.pth +- Name: ocrnet_hr18s_512x512_40k_voc12aug + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18-Small + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 72.76 + mIoU(ms+flip): 74.6 + Config: configs/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug/ocrnet_hr18s_512x512_40k_voc12aug_20200614_002025-42b587ac.pth +- Name: ocrnet_hr18_512x512_40k_voc12aug + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W18 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 74.98 + mIoU(ms+flip): 77.4 + Config: configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_40k_voc12aug/ocrnet_hr18_512x512_40k_voc12aug_20200614_015958-714302be.pth +- Name: ocrnet_hr48_512x512_40k_voc12aug + In Collection: OCRNet + Metadata: + backbone: HRNetV2p-W48 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.14 + mIoU(ms+flip): 79.71 + Config: configs/ocrnet/ocrnet_hr48_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_40k_voc12aug/ocrnet_hr48_512x512_40k_voc12aug_20200614_015958-255bc5ce.pth diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py new file mode 100644 index 0000000..1c86eba --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..2c73b38 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..506ad93 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py new file mode 100644 index 0000000..a3c86e1 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict(decode_head=[ + dict( + type='FCNHead', + in_channels=[18, 36, 72, 144], + channels=sum([18, 36, 72, 144]), + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + kernel_size=1, + num_convs=1, + concat_input=False, + dropout_ratio=-1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[18, 36, 72, 144], + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + channels=512, + ocr_channels=256, + dropout_ratio=-1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), +]) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x512_20k_voc12aug.py new file mode 100644 index 0000000..ab9d644 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x512_20k_voc12aug.py @@ -0,0 +1,36 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict(decode_head=[ + dict( + type='FCNHead', + in_channels=[18, 36, 72, 144], + channels=sum([18, 36, 72, 144]), + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + kernel_size=1, + num_convs=1, + concat_input=False, + dropout_ratio=-1, + num_classes=21, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[18, 36, 72, 144], + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + channels=512, + ocr_channels=256, + dropout_ratio=-1, + num_classes=21, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), +]) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py new file mode 100644 index 0000000..df79a9c --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py @@ -0,0 +1,36 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict(decode_head=[ + dict( + type='FCNHead', + in_channels=[18, 36, 72, 144], + channels=sum([18, 36, 72, 144]), + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + kernel_size=1, + num_convs=1, + concat_input=False, + dropout_ratio=-1, + num_classes=21, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[18, 36, 72, 144], + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + channels=512, + ocr_channels=256, + dropout_ratio=-1, + num_classes=21, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), +]) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x512_80k_ade20k.py new file mode 100644 index 0000000..6ad6772 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18_512x512_80k_ade20k.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict(decode_head=[ + dict( + type='FCNHead', + in_channels=[18, 36, 72, 144], + channels=sum([18, 36, 72, 144]), + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + kernel_size=1, + num_convs=1, + concat_input=False, + dropout_ratio=-1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[18, 36, 72, 144], + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + channels=512, + ocr_channels=256, + dropout_ratio=-1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), +]) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes.py new file mode 100644 index 0000000..fc79097 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_512x1024_160k_cityscapes.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..923731f --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_512x1024_40k_cityscapes.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..be6bf16 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_512x1024_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x512_160k_ade20k.py new file mode 100644 index 0000000..81f3d5c --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x512_160k_ade20k.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_512x512_160k_ade20k.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug.py new file mode 100644 index 0000000..ceb9448 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_512x512_20k_voc12aug.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug.py new file mode 100644 index 0000000..70babc9 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_512x512_40k_voc12aug.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x512_80k_ade20k.py new file mode 100644 index 0000000..36e7721 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr18s_512x512_80k_ade20k.py @@ -0,0 +1,9 @@ +_base_ = './ocrnet_hr18_512x512_80k_ade20k.py' +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w18_small', + backbone=dict( + extra=dict( + stage1=dict(num_blocks=(2, )), + stage2=dict(num_blocks=(2, 2)), + stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), + stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes.py new file mode 100644 index 0000000..c094391 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_512x1024_160k_cityscapes.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..0aada9d --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_512x1024_40k_cityscapes.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..1b2e009 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_512x1024_80k_cityscapes.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py new file mode 100644 index 0000000..3b3e8af --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_512x512_160k_ade20k.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py new file mode 100644 index 0000000..c2dd6d1 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_512x512_20k_voc12aug.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=21, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=21, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x512_40k_voc12aug.py new file mode 100644 index 0000000..89e6309 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x512_40k_voc12aug.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_512x512_40k_voc12aug.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=21, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=21, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py new file mode 100644 index 0000000..0497122 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py @@ -0,0 +1,39 @@ +_base_ = './ocrnet_hr18_512x512_80k_ade20k.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained='open-mmlab://msra/hrnetv2_w48', + backbone=dict( + extra=dict( + stage2=dict(num_channels=(48, 96)), + stage3=dict(num_channels=(48, 96, 192)), + stage4=dict(num_channels=(48, 96, 192, 384)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[48, 96, 192, 384], + channels=sum([48, 96, 192, 384]), + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + kernel_size=1, + num_convs=1, + norm_cfg=norm_cfg, + concat_input=False, + dropout_ratio=-1, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[48, 96, 192, 384], + channels=512, + ocr_channels=256, + input_transform='resize_concat', + in_index=(0, 1, 2, 3), + norm_cfg=norm_cfg, + dropout_ratio=-1, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ]) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes.py new file mode 100644 index 0000000..3dd70b7 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/ocrnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) +optimizer = dict(lr=0.02) +lr_config = dict(min_lr=2e-4) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py new file mode 100644 index 0000000..e34f343 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/ocrnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes.py b/downstream/mmsegmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes.py new file mode 100644 index 0000000..33d96c7 --- /dev/null +++ b/downstream/mmsegmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/ocrnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) +optimizer = dict(lr=0.02) +lr_config = dict(min_lr=2e-4) diff --git a/downstream/mmsegmentation/configs/point_rend/README.md b/downstream/mmsegmentation/configs/point_rend/README.md new file mode 100644 index 0000000..34448e3 --- /dev/null +++ b/downstream/mmsegmentation/configs/point_rend/README.md @@ -0,0 +1,50 @@ +# PointRend + +[PointRend: Image Segmentation as Rendering](https://arxiv.org/abs/1912.08193) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +We present a new method for efficient high-quality image segmentation of objects and scenes. By analogizing classical computer graphics methods for efficient rendering with over- and undersampling challenges faced in pixel labeling tasks, we develop a unique perspective of image segmentation as a rendering problem. From this vantage, we present the PointRend (Point-based Rendering) neural network module: a module that performs point-based segmentation predictions at adaptively selected locations based on an iterative subdivision algorithm. PointRend can be flexibly applied to both instance and semantic segmentation tasks by building on top of existing state-of-the-art models. While many concrete implementations of the general idea are possible, we show that a simple design already achieves excellent results. Qualitatively, PointRend outputs crisp object boundaries in regions that are over-smoothed by previous methods. Quantitatively, PointRend yields significant gains on COCO and Cityscapes, for both instance and semantic segmentation. PointRend's efficiency enables output resolutions that are otherwise impractical in terms of memory or computation compared to existing approaches. Code has been made available at [this https URL](https://github.com/facebookresearch/detectron2/tree/main/projects/PointRend). + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{kirillov2020pointrend, + title={Pointrend: Image segmentation as rendering}, + author={Kirillov, Alexander and Wu, Yuxin and He, Kaiming and Girshick, Ross}, + booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition}, + pages={9799--9808}, + year={2020} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PointRend | R-50 | 512x1024 | 80000 | 3.1 | 8.48 | 76.47 | 78.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/point_rend/pointrend_r50_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r50_512x1024_80k_cityscapes/pointrend_r50_512x1024_80k_cityscapes_20200711_015821-bb1ff523.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r50_512x1024_80k_cityscapes/pointrend_r50_512x1024_80k_cityscapes-20200715_214714.log.json) | +| PointRend | R-101 | 512x1024 | 80000 | 4.2 | 7.00 | 78.30 | 79.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/point_rend/pointrend_r101_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r101_512x1024_80k_cityscapes/pointrend_r101_512x1024_80k_cityscapes_20200711_170850-d0ca84be.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r101_512x1024_80k_cityscapes/pointrend_r101_512x1024_80k_cityscapes-20200715_214824.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PointRend | R-50 | 512x512 | 160000 | 5.1 | 17.31 | 37.64 | 39.17 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/point_rend/pointrend_r50_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r50_512x512_160k_ade20k/pointrend_r50_512x512_160k_ade20k_20200807_232644-ac3febf2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r50_512x512_160k_ade20k/pointrend_r50_512x512_160k_ade20k-20200807_232644.log.json) | +| PointRend | R-101 | 512x512 | 160000 | 6.1 | 15.50 | 40.02 | 41.60 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/point_rend/pointrend_r101_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r101_512x512_160k_ade20k/pointrend_r101_512x512_160k_ade20k_20200808_030852-8834902a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r101_512x512_160k_ade20k/pointrend_r101_512x512_160k_ade20k-20200808_030852.log.json) | diff --git a/downstream/mmsegmentation/configs/point_rend/point_rend.yml b/downstream/mmsegmentation/configs/point_rend/point_rend.yml new file mode 100644 index 0000000..3abe81d --- /dev/null +++ b/downstream/mmsegmentation/configs/point_rend/point_rend.yml @@ -0,0 +1,104 @@ +Collections: +- Name: PointRend + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://arxiv.org/abs/1912.08193 + Title: 'PointRend: Image Segmentation as Rendering' + README: configs/point_rend/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/point_head.py#L36 + Version: v0.17.0 + Converted From: + Code: https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend +Models: +- Name: pointrend_r50_512x1024_80k_cityscapes + In Collection: PointRend + Metadata: + backbone: R-50 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 117.92 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.47 + mIoU(ms+flip): 78.13 + Config: configs/point_rend/pointrend_r50_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r50_512x1024_80k_cityscapes/pointrend_r50_512x1024_80k_cityscapes_20200711_015821-bb1ff523.pth +- Name: pointrend_r101_512x1024_80k_cityscapes + In Collection: PointRend + Metadata: + backbone: R-101 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 142.86 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 4.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.3 + mIoU(ms+flip): 79.97 + Config: configs/point_rend/pointrend_r101_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r101_512x1024_80k_cityscapes/pointrend_r101_512x1024_80k_cityscapes_20200711_170850-d0ca84be.pth +- Name: pointrend_r50_512x512_160k_ade20k + In Collection: PointRend + Metadata: + backbone: R-50 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 57.77 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 37.64 + mIoU(ms+flip): 39.17 + Config: configs/point_rend/pointrend_r50_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r50_512x512_160k_ade20k/pointrend_r50_512x512_160k_ade20k_20200807_232644-ac3febf2.pth +- Name: pointrend_r101_512x512_160k_ade20k + In Collection: PointRend + Metadata: + backbone: R-101 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 64.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 40.02 + mIoU(ms+flip): 41.6 + Config: configs/point_rend/pointrend_r101_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/point_rend/pointrend_r101_512x512_160k_ade20k/pointrend_r101_512x512_160k_ade20k_20200808_030852-8834902a.pth diff --git a/downstream/mmsegmentation/configs/point_rend/pointrend_r101_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/point_rend/pointrend_r101_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..a8c14c8 --- /dev/null +++ b/downstream/mmsegmentation/configs/point_rend/pointrend_r101_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './pointrend_r50_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/point_rend/pointrend_r101_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/point_rend/pointrend_r101_512x512_160k_ade20k.py new file mode 100644 index 0000000..4d1f8c8 --- /dev/null +++ b/downstream/mmsegmentation/configs/point_rend/pointrend_r101_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './pointrend_r50_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/point_rend/pointrend_r50_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/point_rend/pointrend_r50_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..96cbaa4 --- /dev/null +++ b/downstream/mmsegmentation/configs/point_rend/pointrend_r50_512x1024_80k_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/pointrend_r50.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +lr_config = dict(warmup='linear', warmup_iters=200) diff --git a/downstream/mmsegmentation/configs/point_rend/pointrend_r50_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/point_rend/pointrend_r50_512x512_160k_ade20k.py new file mode 100644 index 0000000..db8c634 --- /dev/null +++ b/downstream/mmsegmentation/configs/point_rend/pointrend_r50_512x512_160k_ade20k.py @@ -0,0 +1,32 @@ +_base_ = [ + '../_base_/models/pointrend_r50.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict(decode_head=[ + dict( + type='FPNHead', + in_channels=[256, 256, 256, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=-1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='PointHead', + in_channels=[256], + in_index=[0], + channels=256, + num_fcs=3, + coarse_pred_each_layer=True, + dropout_ratio=-1, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) +]) +lr_config = dict(warmup='linear', warmup_iters=200) diff --git a/downstream/mmsegmentation/configs/psanet/README.md b/downstream/mmsegmentation/configs/psanet/README.md new file mode 100644 index 0000000..fede7d4 --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/README.md @@ -0,0 +1,67 @@ +# PSANet + +[PSANet: Point-wise Spatial Attention Network for Scene Parsing](https://openaccess.thecvf.com/content_ECCV_2018/papers/Hengshuang_Zhao_PSANet_Point-wise_Spatial_ECCV_2018_paper.pdf) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +We notice information flow in convolutional neural networksis restricted inside local neighborhood regions due to the physical de-sign of convolutional filters, which limits the overall understanding ofcomplex scenes. In this paper, we propose thepoint-wise spatial atten-tion network(PSANet) to relax the local neighborhood constraint. Eachposition on the feature map is connected to all the other ones througha self-adaptively learned attention mask. Moreover, information propa-gation in bi-direction for scene parsing is enabled. Information at otherpositions can be collected to help the prediction of the current positionand vice versa, information at the current position can be distributedto assist the prediction of other ones. Our proposed approach achievestop performance on various competitive scene parsing datasets, includ-ing ADE20K, PASCAL VOC 2012 and Cityscapes, demonstrating itseffectiveness and generality. + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{zhao2018psanet, + title={Psanet: Point-wise spatial attention network for scene parsing}, + author={Zhao, Hengshuang and Zhang, Yi and Liu, Shu and Shi, Jianping and Change Loy, Chen and Lin, Dahua and Jia, Jiaya}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + pages={267--283}, + year={2018} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PSANet | R-50-D8 | 512x1024 | 40000 | 7 | 3.17 | 77.63 | 79.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_40k_cityscapes/psanet_r50-d8_512x1024_40k_cityscapes_20200606_103117-99fac37c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_40k_cityscapes/psanet_r50-d8_512x1024_40k_cityscapes_20200606_103117.log.json) | +| PSANet | R-101-D8 | 512x1024 | 40000 | 10.5 | 2.20 | 79.14 | 80.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_40k_cityscapes/psanet_r101-d8_512x1024_40k_cityscapes_20200606_001418-27b9cfa7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_40k_cityscapes/psanet_r101-d8_512x1024_40k_cityscapes_20200606_001418.log.json) | +| PSANet | R-50-D8 | 769x769 | 40000 | 7.9 | 1.40 | 77.99 | 79.64 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_40k_cityscapes/psanet_r50-d8_769x769_40k_cityscapes_20200530_033717-d5365506.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_40k_cityscapes/psanet_r50-d8_769x769_40k_cityscapes_20200530_033717.log.json) | +| PSANet | R-101-D8 | 769x769 | 40000 | 11.9 | 0.98 | 78.43 | 80.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_40k_cityscapes/psanet_r101-d8_769x769_40k_cityscapes_20200530_035107-997da1e6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_40k_cityscapes/psanet_r101-d8_769x769_40k_cityscapes_20200530_035107.log.json) | +| PSANet | R-50-D8 | 512x1024 | 80000 | - | - | 77.24 | 78.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_80k_cityscapes/psanet_r50-d8_512x1024_80k_cityscapes_20200606_161842-ab60a24f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_80k_cityscapes/psanet_r50-d8_512x1024_80k_cityscapes_20200606_161842.log.json) | +| PSANet | R-101-D8 | 512x1024 | 80000 | - | - | 79.31 | 80.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_80k_cityscapes/psanet_r101-d8_512x1024_80k_cityscapes_20200606_161823-0f73a169.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_80k_cityscapes/psanet_r101-d8_512x1024_80k_cityscapes_20200606_161823.log.json) | +| PSANet | R-50-D8 | 769x769 | 80000 | - | - | 79.31 | 80.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_80k_cityscapes/psanet_r50-d8_769x769_80k_cityscapes_20200606_225134-fe42f49e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_80k_cityscapes/psanet_r50-d8_769x769_80k_cityscapes_20200606_225134.log.json) | +| PSANet | R-101-D8 | 769x769 | 80000 | - | - | 79.69 | 80.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_80k_cityscapes/psanet_r101-d8_769x769_80k_cityscapes_20200606_214550-7665827b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_80k_cityscapes/psanet_r101-d8_769x769_80k_cityscapes_20200606_214550.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSANet | R-50-D8 | 512x512 | 80000 | 9 | 18.91 | 41.14 | 41.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_80k_ade20k/psanet_r50-d8_512x512_80k_ade20k_20200614_144141-835e4b97.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_80k_ade20k/psanet_r50-d8_512x512_80k_ade20k_20200614_144141.log.json) | +| PSANet | R-101-D8 | 512x512 | 80000 | 12.5 | 13.13 | 43.80 | 44.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_80k_ade20k/psanet_r101-d8_512x512_80k_ade20k_20200614_185117-1fab60d4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_80k_ade20k/psanet_r101-d8_512x512_80k_ade20k_20200614_185117.log.json) | +| PSANet | R-50-D8 | 512x512 | 160000 | - | - | 41.67 | 42.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_160k_ade20k/psanet_r50-d8_512x512_160k_ade20k_20200615_161258-148077dd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_160k_ade20k/psanet_r50-d8_512x512_160k_ade20k_20200615_161258.log.json) | +| PSANet | R-101-D8 | 512x512 | 160000 | - | - | 43.74 | 45.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_160k_ade20k/psanet_r101-d8_512x512_160k_ade20k_20200615_161537-dbfa564c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_160k_ade20k/psanet_r101-d8_512x512_160k_ade20k_20200615_161537.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PSANet | R-50-D8 | 512x512 | 20000 | 6.9 | 18.24 | 76.39 | 77.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_20k_voc12aug/psanet_r50-d8_512x512_20k_voc12aug_20200617_102413-2f1bbaa1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_20k_voc12aug/psanet_r50-d8_512x512_20k_voc12aug_20200617_102413.log.json) | +| PSANet | R-101-D8 | 512x512 | 20000 | 10.4 | 12.63 | 77.91 | 79.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_20k_voc12aug/psanet_r101-d8_512x512_20k_voc12aug_20200617_110624-946fef11.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_20k_voc12aug/psanet_r101-d8_512x512_20k_voc12aug_20200617_110624.log.json) | +| PSANet | R-50-D8 | 512x512 | 40000 | - | - | 76.30 | 77.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_40k_voc12aug/psanet_r50-d8_512x512_40k_voc12aug_20200613_161946-f596afb5.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_40k_voc12aug/psanet_r50-d8_512x512_40k_voc12aug_20200613_161946.log.json) | +| PSANet | R-101-D8 | 512x512 | 40000 | - | - | 77.73 | 79.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_40k_voc12aug/psanet_r101-d8_512x512_40k_voc12aug_20200613_161946-1f560f9e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_40k_voc12aug/psanet_r101-d8_512x512_40k_voc12aug_20200613_161946.log.json) | diff --git a/downstream/mmsegmentation/configs/psanet/psanet.yml b/downstream/mmsegmentation/configs/psanet/psanet.yml new file mode 100644 index 0000000..353c890 --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet.yml @@ -0,0 +1,305 @@ +Collections: +- Name: PSANet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://openaccess.thecvf.com/content_ECCV_2018/papers/Hengshuang_Zhao_PSANet_Point-wise_Spatial_ECCV_2018_paper.pdf + Title: 'PSANet: Point-wise Spatial Attention Network for Scene Parsing' + README: configs/psanet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/psa_head.py#L18 + Version: v0.17.0 + Converted From: + Code: https://github.com/hszhao/PSANet +Models: +- Name: psanet_r50-d8_512x1024_40k_cityscapes + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 315.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.63 + mIoU(ms+flip): 79.04 + Config: configs/psanet/psanet_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_40k_cityscapes/psanet_r50-d8_512x1024_40k_cityscapes_20200606_103117-99fac37c.pth +- Name: psanet_r101-d8_512x1024_40k_cityscapes + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 454.55 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 10.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.14 + mIoU(ms+flip): 80.19 + Config: configs/psanet/psanet_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_40k_cityscapes/psanet_r101-d8_512x1024_40k_cityscapes_20200606_001418-27b9cfa7.pth +- Name: psanet_r50-d8_769x769_40k_cityscapes + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 714.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 7.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.99 + mIoU(ms+flip): 79.64 + Config: configs/psanet/psanet_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_40k_cityscapes/psanet_r50-d8_769x769_40k_cityscapes_20200530_033717-d5365506.pth +- Name: psanet_r101-d8_769x769_40k_cityscapes + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 1020.41 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 11.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.43 + mIoU(ms+flip): 80.26 + Config: configs/psanet/psanet_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_40k_cityscapes/psanet_r101-d8_769x769_40k_cityscapes_20200530_035107-997da1e6.pth +- Name: psanet_r50-d8_512x1024_80k_cityscapes + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.24 + mIoU(ms+flip): 78.69 + Config: configs/psanet/psanet_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_80k_cityscapes/psanet_r50-d8_512x1024_80k_cityscapes_20200606_161842-ab60a24f.pth +- Name: psanet_r101-d8_512x1024_80k_cityscapes + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.31 + mIoU(ms+flip): 80.53 + Config: configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_80k_cityscapes/psanet_r101-d8_512x1024_80k_cityscapes_20200606_161823-0f73a169.pth +- Name: psanet_r50-d8_769x769_80k_cityscapes + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.31 + mIoU(ms+flip): 80.91 + Config: configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_80k_cityscapes/psanet_r50-d8_769x769_80k_cityscapes_20200606_225134-fe42f49e.pth +- Name: psanet_r101-d8_769x769_80k_cityscapes + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.69 + mIoU(ms+flip): 80.89 + Config: configs/psanet/psanet_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_80k_cityscapes/psanet_r101-d8_769x769_80k_cityscapes_20200606_214550-7665827b.pth +- Name: psanet_r50-d8_512x512_80k_ade20k + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 52.88 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.14 + mIoU(ms+flip): 41.91 + Config: configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_80k_ade20k/psanet_r50-d8_512x512_80k_ade20k_20200614_144141-835e4b97.pth +- Name: psanet_r101-d8_512x512_80k_ade20k + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 76.16 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.8 + mIoU(ms+flip): 44.75 + Config: configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_80k_ade20k/psanet_r101-d8_512x512_80k_ade20k_20200614_185117-1fab60d4.pth +- Name: psanet_r50-d8_512x512_160k_ade20k + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.67 + mIoU(ms+flip): 42.95 + Config: configs/psanet/psanet_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_160k_ade20k/psanet_r50-d8_512x512_160k_ade20k_20200615_161258-148077dd.pth +- Name: psanet_r101-d8_512x512_160k_ade20k + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.74 + mIoU(ms+flip): 45.38 + Config: configs/psanet/psanet_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_160k_ade20k/psanet_r101-d8_512x512_160k_ade20k_20200615_161537-dbfa564c.pth +- Name: psanet_r50-d8_512x512_20k_voc12aug + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 54.82 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.9 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.39 + mIoU(ms+flip): 77.34 + Config: configs/psanet/psanet_r50-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_20k_voc12aug/psanet_r50-d8_512x512_20k_voc12aug_20200617_102413-2f1bbaa1.pth +- Name: psanet_r101-d8_512x512_20k_voc12aug + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 79.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.4 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.91 + mIoU(ms+flip): 79.3 + Config: configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_20k_voc12aug/psanet_r101-d8_512x512_20k_voc12aug_20200617_110624-946fef11.pth +- Name: psanet_r50-d8_512x512_40k_voc12aug + In Collection: PSANet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.3 + mIoU(ms+flip): 77.35 + Config: configs/psanet/psanet_r50-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_40k_voc12aug/psanet_r50-d8_512x512_40k_voc12aug_20200613_161946-f596afb5.pth +- Name: psanet_r101-d8_512x512_40k_voc12aug + In Collection: PSANet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.73 + mIoU(ms+flip): 79.05 + Config: configs/psanet/psanet_r101-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_40k_voc12aug/psanet_r101-d8_512x512_40k_voc12aug_20200613_161946-1f560f9e.pth diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..69d212f --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..bc25d6a --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..7f6795e --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..1a3c434 --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_512x512_20k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..f62eef9 --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x512_40k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_512x512_40k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..f8865a7 --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..ffc99f0 --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..6a9efc5 --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './psanet_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..6671fcb --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..a441013 --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..9c6364e --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(mask_size=(66, 66), num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..af06cb6 --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x512_20k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..803c42d --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x512_40k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..0141a6d --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(mask_size=(66, 66), num_classes=150), + auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..690f8b5 --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..0966b47 --- /dev/null +++ b/downstream/mmsegmentation/configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/psanet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/pspnet/README.md b/downstream/mmsegmentation/configs/pspnet/README.md new file mode 100644 index 0000000..9770df0 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/README.md @@ -0,0 +1,177 @@ +# PSPNet + +[Pyramid Scene Parsing Network](https://arxiv.org/abs/1612.01105) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Scene parsing is challenging for unrestricted open vocabulary and diverse scenes. In this paper, we exploit the capability of global context information by different-region-based context aggregation through our pyramid pooling module together with the proposed pyramid scene parsing network (PSPNet). Our global prior representation is effective to produce good quality results on the scene parsing task, while PSPNet provides a superior framework for pixel-level prediction tasks. The proposed approach achieves state-of-the-art performance on various datasets. It came first in ImageNet scene parsing challenge 2016, PASCAL VOC 2012 benchmark and Cityscapes benchmark. A single PSPNet yields new record of mIoU accuracy 85.4% on PASCAL VOC 2012 and accuracy 80.2% on Cityscapes. + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{zhao2017pspnet, + title={Pyramid Scene Parsing Network}, + author={Zhao, Hengshuang and Shi, Jianping and Qi, Xiaojuan and Wang, Xiaogang and Jia, Jiaya}, + booktitle={CVPR}, + year={2017} +} +``` + +```bibtex +@article{wightman2021resnet, + title={Resnet strikes back: An improved training procedure in timm}, + author={Wightman, Ross and Touvron, Hugo and J{\'e}gou, Herv{\'e}}, + journal={arXiv preprint arXiv:2110.00476}, + year={2021} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ |---------------| --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSPNet | R-50-D8 | 512x1024 | 40000 | 6.1 | 4.07 | 77.85 | 79.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338.log.json) | +| PSPNet | R-101-D8 | 512x1024 | 40000 | 9.6 | 2.68 | 78.34 | 79.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751.log.json) | +| PSPNet | R-50-D8 | 769x769 | 40000 | 6.9 | 1.76 | 78.26 | 79.88 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725-86638686.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725.log.json) | +| PSPNet | R-101-D8 | 769x769 | 40000 | 10.9 | 1.15 | 79.08 | 80.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753-61c6f5be.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753.log.json) | +| PSPNet | R-18-D8 | 512x1024 | 80000 | 1.7 | 15.71 | 74.87 | 76.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes/pspnet_r18-d8_512x1024_80k_cityscapes_20201225_021458-09ffa746.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes/pspnet_r18-d8_512x1024_80k_cityscapes-20201225_021458.log.json) | +| PSPNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.55 | 79.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131.log.json) | +| PSPNet | R-50b-D8 rsb | 512x1024 | 80000 | 6.2 | 3.82 | 78.47 | 79.45 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_rsb-pretrain_512x1024_adamw_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_rsb-pretrain_512x1024_adamw_80k_cityscapes_20220315_123238-588c30be.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_rsb-pretrain_512x1024_adamw_80k_cityscapes_20220315_123238.log.json) | +| PSPNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.76 | 81.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211.log.json) | +| PSPNet (FP16) | R-101-D8 | 512x1024 | 80000 | 5.34 | 8.77 | 79.46 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_fp16_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_fp16_512x1024_80k_cityscapes/pspnet_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230919-a0875e5c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_fp16_512x1024_80k_cityscapes/pspnet_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230919.log.json) | +| PSPNet | R-18-D8 | 769x769 | 80000 | 1.9 | 6.20 | 75.90 | 77.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_769x769_80k_cityscapes/pspnet_r18-d8_769x769_80k_cityscapes_20201225_021458-3deefc62.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_769x769_80k_cityscapes/pspnet_r18-d8_769x769_80k_cityscapes-20201225_021458.log.json) | +| PSPNet | R-50-D8 | 769x769 | 80000 | - | - | 79.59 | 80.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121-5ccf03dd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121.log.json) | +| PSPNet | R-101-D8 | 769x769 | 80000 | - | - | 79.77 | 81.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055-dba412fa.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055.log.json) | +| PSPNet | R-18b-D8 | 512x1024 | 80000 | 1.5 | 16.28 | 74.23 | 75.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes/pspnet_r18b-d8_512x1024_80k_cityscapes_20201226_063116-26928a60.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes/pspnet_r18b-d8_512x1024_80k_cityscapes-20201226_063116.log.json) | +| PSPNet | R-50b-D8 | 512x1024 | 80000 | 6.0 | 4.30 | 78.22 | 79.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes/pspnet_r50b-d8_512x1024_80k_cityscapes_20201225_094315-6344287a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes/pspnet_r50b-d8_512x1024_80k_cityscapes-20201225_094315.log.json) | +| PSPNet | R-101b-D8 | 512x1024 | 80000 | 9.5 | 2.76 | 79.69 | 80.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes_20201226_170012-3a4d38ab.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes-20201226_170012.log.json) | +| PSPNet | R-18b-D8 | 769x769 | 80000 | 1.7 | 6.41 | 74.92 | 76.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes/pspnet_r18b-d8_769x769_80k_cityscapes_20201226_080942-bf98d186.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes/pspnet_r18b-d8_769x769_80k_cityscapes-20201226_080942.log.json) | +| PSPNet | R-50b-D8 | 769x769 | 80000 | 6.8 | 1.88 | 78.50 | 79.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes/pspnet_r50b-d8_769x769_80k_cityscapes_20201225_094316-4c643cf6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes/pspnet_r50b-d8_769x769_80k_cityscapes-20201225_094316.log.json) | +| PSPNet | R-101b-D8 | 769x769 | 80000 | 10.8 | 1.17 | 78.87 | 80.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes/pspnet_r101b-d8_769x769_80k_cityscapes_20201226_171823-f0e7c293.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes/pspnet_r101b-d8_769x769_80k_cityscapes-20201226_171823.log.json) | +| PSPNet | R-50-D32 | 512x1024 | 80000 | 3.0 | 15.21 | 73.88 | 76.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d32_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d32_512x1024_80k_cityscapes/pspnet_r50-d32_512x1024_80k_cityscapes_20220316_224840-9092b254.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d32_512x1024_80k_cityscapes/pspnet_r50-d32_512x1024_80k_cityscapes_20220316_224840.log.json) | +| PSPNet | R-50b-D32 rsb | 512x1024 | 80000 | 3.1 | 16.08 | 74.09 | 77.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes_20220316_141229-dd9c9610.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes_20220316_141229.log.json) | +| PSPNet | R-50b-D32 | 512x1024 | 80000 | 2.9 | 15.41 | 72.61 | 75.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50b-d32_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d32_512x1024_80k_cityscapes/pspnet_r50b-d32_512x1024_80k_cityscapes_20220311_152152-23bcaf8c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d32_512x1024_80k_cityscapes/pspnet_r50b-d32_512x1024_80k_cityscapes_20220311_152152.log.json) | + + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSPNet | R-50-D8 | 512x512 | 80000 | 8.5 | 23.53 | 41.13 | 41.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128-15a8b914.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128.log.json) | +| PSPNet | R-101-D8 | 512x512 | 80000 | 12 | 15.30 | 43.57 | 44.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423-b6e782f0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423.log.json) | +| PSPNet | R-50-D8 | 512x512 | 160000 | - | - | 42.48 | 43.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358.log.json) | +| PSPNet | R-101-D8 | 512x512 | 160000 | - | - | 44.39 | 45.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PSPNet | R-50-D8 | 512x512 | 20000 | 6.1 | 23.59 | 76.78 | 77.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958-ed5dfbd9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958.log.json) | +| PSPNet | R-101-D8 | 512x512 | 20000 | 9.6 | 15.02 | 78.47 | 79.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003-4aef3c9a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003.log.json) | +| PSPNet | R-50-D8 | 512x512 | 40000 | - | - | 77.29 | 78.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222-ae9c1b8c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222.log.json) | +| PSPNet | R-101-D8 | 512x512 | 40000 | - | - | 78.52 | 79.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222-bc933b18.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222.log.json) | + +### Pascal Context + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PSPNet | R-101-D8 | 480x480 | 40000 | 8.8 | 9.68 | 46.60 | 47.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context/pspnet_r101-d8_480x480_40k_pascal_context_20200911_211210-bf0f5d7c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context/pspnet_r101-d8_480x480_40k_pascal_context-20200911_211210.log.json) | +| PSPNet | R-101-D8 | 480x480 | 80000 | - | - | 46.03 | 47.15 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context/pspnet_r101-d8_480x480_80k_pascal_context_20200911_190530-c86d6233.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context/pspnet_r101-d8_480x480_80k_pascal_context-20200911_190530.log.json) | + +### Pascal Context 59 + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PSPNet | R-101-D8 | 480x480 | 40000 | - | - | 52.02 | 53.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59/pspnet_r101-d8_480x480_40k_pascal_context_59_20210416_114524-86d44cd4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59/pspnet_r101-d8_480x480_40k_pascal_context_59-20210416_114524.log.json) | +| PSPNet | R-101-D8 | 480x480 | 80000 | - | - | 52.47 | 53.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59/pspnet_r101-d8_480x480_80k_pascal_context_59_20210416_114418-fa6caaa2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59/pspnet_r101-d8_480x480_80k_pascal_context_59-20210416_114418.log.json) | + +### Dark Zurich and Nighttime Driving + +We support evaluation results on these two datasets using models above trained on Cityscapes training set. + + |Method|Backbone |Training Dataset |Test Dataset |mIoU |config| evaluation checkpoint| + |------ |------ |------ |----- |-----|-----|-----| + |PSPNet|R-50-D8 |Cityscapes Training set |Dark Zurich |10.91|[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x1024_40k_dark.py)|[model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338.log.json) | + |PSPNet|R-50-D8 |Cityscapes Training set |Nighttime Driving|23.02|[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x1024_40k_night_driving.py)| [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338.log.json) | + |PSPNet|R-50-D8 |Cityscapes Training set |Cityscapes Validation set|77.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338.log.json) | + |PSPNet|R-101-D8 |Cityscapes Training set |Dark Zurich |10.16|[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x1024_40k_dark.py)| [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751.log.json) | + |PSPNet|R-101-D8 |Cityscapes Training set |Nighttime Driving|20.25|[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x1024_40k_night_driving.py)| [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751.log.json) | + |PSPNet|R-101-D8 |Cityscapes Training set |Cityscapes Validation set|78.34|[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751.log.json) | + |PSPNet|R-101b-D8|Cityscapes Training set |Dark Zurich |15.54|[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101b-d8_512x1024_80k_dark.py)| [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes_20201226_170012-3a4d38ab.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes-20201226_170012.log.json) | + |PSPNet|R-101b-D8|Cityscapes Training set |Nighttime Driving|22.25|[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101b-d8_512x1024_80k_night_driving.py)| [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes_20201226_170012-3a4d38ab.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes-20201226_170012.log.json) | + |PSPNet|R-101b-D8|Cityscapes Training set |Cityscapes Validation set|79.69|[config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes_20201226_170012-3a4d38ab.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes-20201226_170012.log.json) | + +### COCO-Stuff 10k + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSPNet | R-50-D8 | 512x512 | 20000 | 9.6 | 20.5 | 35.69 | 36.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k_20210820_203258-b88df27f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k_20210820_203258.log.json) | +| PSPNet | R-101-D8 | 512x512 | 20000 | 13.2 | 11.1 | 37.26 | 38.52 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k_20210820_232135-76aae482.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k_20210820_232135.log.json) | +| PSPNet | R-50-D8 | 512x512 | 40000 | - | - | 36.33 | 37.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k_20210821_030857-92e2902b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k_20210821_030857.log.json) | +| PSPNet | R-101-D8 | 512x512 | 40000 | - | - | 37.76 | 38.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k_20210821_014022-831aec95.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k_20210821_014022.log.json) | + +### COCO-Stuff 164k + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PSPNet | R-50-D8 | 512x512 | 80000 | 9.6 | 20.5 | 38.80 | 39.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k_20210707_152034-0e41b2db.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k_20210707_152034.log.json) | +| PSPNet | R-101-D8 | 512x512 | 80000 | 13.2 | 11.1 | 40.34 | 40.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k_20210707_152034-7eb41789.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k_20210707_152034.log.json) | +| PSPNet | R-50-D8 | 512x512 | 160000 | - | - | 39.64 | 39.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k_20210707_152004-51276a57.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k_20210707_152004.log.json) | +| PSPNet | R-101-D8 | 512x512 | 160000 | - | - | 41.28 | 41.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k_20210707_152004-4af9621b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k_20210707_152004.log.json) | +| PSPNet | R-50-D8 | 512x512 | 320000 | - | - | 40.53 | 40.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k_20210707_152004-be9610cc.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k_20210707_152004.log.json) | +| PSPNet | R-101-D8 | 512x512 | 320000 | - | - | 41.95 | 42.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k_20210707_152004-72220c60.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k_20210707_152004.log.json) | + +### LoveDA + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PSPNet | R-18-D8 | 512x512 | 80000 | 1.45 | 26.87 | 48.62 | 47.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x512_80k_loveda/pspnet_r18-d8_512x512_80k_loveda_20211105_052100-b97697f1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x512_80k_loveda/pspnet_r18-d8_512x512_80k_loveda_20211105_052100.log.json) | +| PSPNet | R-50-D8 | 512x512 | 80000 | 6.14 | 6.60 | 50.46 | 50.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_80k_loveda.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_loveda/pspnet_r50-d8_512x512_80k_loveda_20211104_155728-88610f9f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_loveda/pspnet_r50-d8_512x512_80k_loveda_20211104_155728.log.json) | +| PSPNet | R-101-D8 | 512x512 | 80000 | 9.61 | 4.58 | 51.86 | 51.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_80k_loveda.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_loveda/pspnet_r101-d8_512x512_80k_loveda_20211104_153212-1c06c6a8.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_loveda/pspnet_r101-d8_512x512_80k_loveda_20211104_153212.log.json) | + +### Potsdam + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PSPNet | R-18-D8 | 512x512 | 80000 | 1.50 | 85.12 | 77.09 | 78.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18-d8_4x4_512x512_80k_potsdam.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_512x512_80k_potsdam/pspnet_r18-d8_4x4_512x512_80k_potsdam_20211220_125612-7cd046e1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_512x512_80k_potsdam/pspnet_r18-d8_4x4_512x512_80k_potsdam_20211220_125612.log.json) | +| PSPNet | R-50-D8 | 512x512 | 80000 | 6.14 | 30.21 | 78.12 | 78.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_4x4_512x512_80k_potsdam.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_512x512_80k_potsdam/pspnet_r50-d8_4x4_512x512_80k_potsdam_20211219_043541-2dd5fe67.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_512x512_80k_potsdam/pspnet_r50-d8_4x4_512x512_80k_potsdam_20211219_043541.log.json) | +| PSPNet | R-101-D8 | 512x512 | 80000 | 9.61 | 19.40 | 78.62 | 79.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_4x4_512x512_80k_potsdam.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_4x4_512x512_80k_potsdam/pspnet_r101-d8_4x4_512x512_80k_potsdam_20211220_125612-aed036c4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_4x4_512x512_80k_potsdam/pspnet_r101-d8_4x4_512x512_80k_potsdam_20211220_125612.log.json) | + +### Vaihingen + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PSPNet | R-18-D8 | 512x512 | 80000 | 1.45 | 85.06 | 71.46 | 73.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18-d8_4x4_512x512_80k_vaihingen.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_512x512_80k_vaihingen/pspnet_r18-d8_4x4_512x512_80k_vaihingen_20211228_160355-52a8a6f6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_512x512_80k_vaihingen/pspnet_r18-d8_4x4_512x512_80k_vaihingen_20211228_160355.log.json) | +| PSPNet | R-50-D8 | 512x512 | 80000 | 6.14 | 30.29 | 72.36 | 73.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_4x4_512x512_80k_vaihingen.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_512x512_80k_vaihingen/pspnet_r50-d8_4x4_512x512_80k_vaihingen_20211228_160355-382f8f5b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_512x512_80k_vaihingen/pspnet_r50-d8_4x4_512x512_80k_vaihingen_20211228_160355.log.json) | +| PSPNet | R-101-D8 | 512x512 | 80000 | 9.61 | 19.97 | 72.61 | 74.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_4x4_512x512_80k_vaihingen.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_4x4_512x512_80k_vaihingen/pspnet_r101-d8_4x4_512x512_80k_vaihingen_20211231_230806-8eba0a09.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_4x4_512x512_80k_vaihingen/pspnet_r101-d8_4x4_512x512_80k_vaihingen_20211231_230806.log.json) | + +### iSAID + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PSPNet | R-18-D8 | 896x896 | 80000 | 4.52 | 26.91 | 60.22 | 61.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18-d8_4x4_896x896_80k_isaid.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_896x896_80k_isaid/pspnet_r18-d8_4x4_896x896_80k_isaid_20220110_180526-e84c0b6a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_896x896_80k_isaid/pspnet_r18-d8_4x4_896x896_80k_isaid_20220110_180526.log.json) | +| PSPNet | R-50-D8 | 896x896 | 80000 | 16.58 | 8.88 | 65.36 | 66.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_4x4_896x896_80k_isaid.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_896x896_80k_isaid/pspnet_r50-d8_4x4_896x896_80k_isaid_20220110_180629-1f21dc32.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_896x896_80k_isaid/pspnet_r50-d8_4x4_896x896_80k_isaid_20220110_180629.log.json) | + +Note: + +- `FP16` means Mixed Precision (FP16) is adopted in training. +- `896x896` is the Crop Size of iSAID dataset, which is followed by the implementation of [PointFlow: Flowing Semantics Through Points for Aerial Image Segmentation](https://arxiv.org/pdf/2103.06564.pdf) +- `rsb` is short for 'Resnet strikes back'. +- The `b` in `R-50b` means ResNetV1b, which is a standard ResNet backbone. In MMSegmentation, default backbone is ResNetV1c, which usually performs better in semantic segmentation task. diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet.yml b/downstream/mmsegmentation/configs/pspnet/pspnet.yml new file mode 100644 index 0000000..2a1fa88 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet.yml @@ -0,0 +1,1077 @@ +Collections: +- Name: PSPNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + - Pascal Context + - Pascal Context 59 + - Dark Zurich and Nighttime Driving + - COCO-Stuff 10k + - COCO-Stuff 164k + - LoveDA + - Potsdam + - Vaihingen + - iSAID + Paper: + URL: https://arxiv.org/abs/1612.01105 + Title: Pyramid Scene Parsing Network + README: configs/pspnet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/psp_head.py#L63 + Version: v0.17.0 + Converted From: + Code: https://github.com/hszhao/PSPNet +Models: +- Name: pspnet_r50-d8_512x1024_40k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 245.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.85 + mIoU(ms+flip): 79.18 + Config: configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth +- Name: pspnet_r101-d8_512x1024_40k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 373.13 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.34 + mIoU(ms+flip): 79.74 + Config: configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth +- Name: pspnet_r50-d8_769x769_40k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 568.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.26 + mIoU(ms+flip): 79.88 + Config: configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725-86638686.pth +- Name: pspnet_r101-d8_769x769_40k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 869.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.08 + mIoU(ms+flip): 80.28 + Config: configs/pspnet/pspnet_r101-d8_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753-61c6f5be.pth +- Name: pspnet_r18-d8_512x1024_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-18-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 63.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.87 + mIoU(ms+flip): 76.04 + Config: configs/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes/pspnet_r18-d8_512x1024_80k_cityscapes_20201225_021458-09ffa746.pth +- Name: pspnet_r50-d8_512x1024_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.55 + mIoU(ms+flip): 79.79 + Config: configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth +- Name: pspnet_r50-d8_rsb-pretrain_512x1024_adamw_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-50b-D8 rsb + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 261.78 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.47 + mIoU(ms+flip): 79.45 + Config: configs/pspnet/pspnet_r50-d8_rsb-pretrain_512x1024_adamw_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_rsb-pretrain_512x1024_adamw_80k_cityscapes_20220315_123238-588c30be.pth +- Name: pspnet_r101-d8_512x1024_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.76 + mIoU(ms+flip): 81.01 + Config: configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth +- Name: pspnet_r101-d8_fp16_512x1024_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 114.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (512,1024) + Training Memory (GB): 5.34 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.46 + Config: configs/pspnet/pspnet_r101-d8_fp16_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_fp16_512x1024_80k_cityscapes/pspnet_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230919-a0875e5c.pth +- Name: pspnet_r18-d8_769x769_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-18-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 161.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 1.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.9 + mIoU(ms+flip): 77.86 + Config: configs/pspnet/pspnet_r18-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_769x769_80k_cityscapes/pspnet_r18-d8_769x769_80k_cityscapes_20201225_021458-3deefc62.pth +- Name: pspnet_r50-d8_769x769_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.59 + mIoU(ms+flip): 80.69 + Config: configs/pspnet/pspnet_r50-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121-5ccf03dd.pth +- Name: pspnet_r101-d8_769x769_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.77 + mIoU(ms+flip): 81.06 + Config: configs/pspnet/pspnet_r101-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055-dba412fa.pth +- Name: pspnet_r18b-d8_512x1024_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-18b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 61.43 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 1.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.23 + mIoU(ms+flip): 75.79 + Config: configs/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes/pspnet_r18b-d8_512x1024_80k_cityscapes_20201226_063116-26928a60.pth +- Name: pspnet_r50b-d8_512x1024_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-50b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 232.56 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.22 + mIoU(ms+flip): 79.46 + Config: configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes/pspnet_r50b-d8_512x1024_80k_cityscapes_20201225_094315-6344287a.pth +- Name: pspnet_r101b-d8_512x1024_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-101b-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 362.32 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 9.5 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.69 + mIoU(ms+flip): 80.79 + Config: configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes_20201226_170012-3a4d38ab.pth +- Name: pspnet_r18b-d8_769x769_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-18b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 156.01 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 1.7 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.92 + mIoU(ms+flip): 76.9 + Config: configs/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes/pspnet_r18b-d8_769x769_80k_cityscapes_20201226_080942-bf98d186.pth +- Name: pspnet_r50b-d8_769x769_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-50b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 531.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 6.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.5 + mIoU(ms+flip): 79.96 + Config: configs/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes/pspnet_r50b-d8_769x769_80k_cityscapes_20201225_094316-4c643cf6.pth +- Name: pspnet_r101b-d8_769x769_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-101b-D8 + crop size: (769,769) + lr schd: 80000 + inference time (ms/im): + - value: 854.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 10.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.87 + mIoU(ms+flip): 80.04 + Config: configs/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes/pspnet_r101b-d8_769x769_80k_cityscapes_20201226_171823-f0e7c293.pth +- Name: pspnet_r50-d32_512x1024_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-50-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 65.75 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.88 + mIoU(ms+flip): 76.85 + Config: configs/pspnet/pspnet_r50-d32_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d32_512x1024_80k_cityscapes/pspnet_r50-d32_512x1024_80k_cityscapes_20220316_224840-9092b254.pth +- Name: pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-50b-D32 rsb + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 62.19 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.09 + mIoU(ms+flip): 77.18 + Config: configs/pspnet/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes_20220316_141229-dd9c9610.pth +- Name: pspnet_r50b-d32_512x1024_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: R-50b-D32 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 64.89 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 2.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 72.61 + mIoU(ms+flip): 75.51 + Config: configs/pspnet/pspnet_r50b-d32_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d32_512x1024_80k_cityscapes/pspnet_r50b-d32_512x1024_80k_cityscapes_20220311_152152-23bcaf8c.pth +- Name: pspnet_r50-d8_512x512_80k_ade20k + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 42.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 41.13 + mIoU(ms+flip): 41.94 + Config: configs/pspnet/pspnet_r50-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128-15a8b914.pth +- Name: pspnet_r101-d8_512x512_80k_ade20k + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 65.36 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 12.0 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.57 + mIoU(ms+flip): 44.35 + Config: configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423-b6e782f0.pth +- Name: pspnet_r50-d8_512x512_160k_ade20k + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.48 + mIoU(ms+flip): 43.44 + Config: configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth +- Name: pspnet_r101-d8_512x512_160k_ade20k + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.39 + mIoU(ms+flip): 45.35 + Config: configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth +- Name: pspnet_r50-d8_512x512_20k_voc12aug + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 42.39 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 76.78 + mIoU(ms+flip): 77.61 + Config: configs/pspnet/pspnet_r50-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958-ed5dfbd9.pth +- Name: pspnet_r101-d8_512x512_20k_voc12aug + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 66.58 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.47 + mIoU(ms+flip): 79.25 + Config: configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003-4aef3c9a.pth +- Name: pspnet_r50-d8_512x512_40k_voc12aug + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.29 + mIoU(ms+flip): 78.48 + Config: configs/pspnet/pspnet_r50-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222-ae9c1b8c.pth +- Name: pspnet_r101-d8_512x512_40k_voc12aug + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 78.52 + mIoU(ms+flip): 79.57 + Config: configs/pspnet/pspnet_r101-d8_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222-bc933b18.pth +- Name: pspnet_r101-d8_480x480_40k_pascal_context + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + inference time (ms/im): + - value: 103.31 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (480,480) + Training Memory (GB): 8.8 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 46.6 + mIoU(ms+flip): 47.78 + Config: configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context/pspnet_r101-d8_480x480_40k_pascal_context_20200911_211210-bf0f5d7c.pth +- Name: pspnet_r101-d8_480x480_80k_pascal_context + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context + Metrics: + mIoU: 46.03 + mIoU(ms+flip): 47.15 + Config: configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context/pspnet_r101-d8_480x480_80k_pascal_context_20200911_190530-c86d6233.pth +- Name: pspnet_r101-d8_480x480_40k_pascal_context_59 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 52.02 + mIoU(ms+flip): 53.54 + Config: configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59/pspnet_r101-d8_480x480_40k_pascal_context_59_20210416_114524-86d44cd4.pth +- Name: pspnet_r101-d8_480x480_80k_pascal_context_59 + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (480,480) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal Context 59 + Metrics: + mIoU: 52.47 + mIoU(ms+flip): 53.99 + Config: configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59/pspnet_r101-d8_480x480_80k_pascal_context_59_20210416_114418-fa6caaa2.pth +- Name: pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 48.78 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 35.69 + mIoU(ms+flip): 36.62 + Config: configs/pspnet/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k_20210820_203258-b88df27f.pth +- Name: pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 90.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.2 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 37.26 + mIoU(ms+flip): 38.52 + Config: configs/pspnet/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k_20210820_232135-76aae482.pth +- Name: pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 36.33 + mIoU(ms+flip): 37.24 + Config: configs/pspnet/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k_20210821_030857-92e2902b.pth +- Name: pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 10k + Metrics: + mIoU: 37.76 + mIoU(ms+flip): 38.86 + Config: configs/pspnet/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k_20210821_014022-831aec95.pth +- Name: pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 48.78 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.6 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 38.8 + mIoU(ms+flip): 39.19 + Config: configs/pspnet/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k_20210707_152034-0e41b2db.pth +- Name: pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 90.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 13.2 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 40.34 + mIoU(ms+flip): 40.79 + Config: configs/pspnet/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k_20210707_152034-7eb41789.pth +- Name: pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 39.64 + mIoU(ms+flip): 39.97 + Config: configs/pspnet/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k_20210707_152004-51276a57.pth +- Name: pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 41.28 + mIoU(ms+flip): 41.66 + Config: configs/pspnet/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k_20210707_152004-4af9621b.pth +- Name: pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 320000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 40.53 + mIoU(ms+flip): 40.75 + Config: configs/pspnet/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k_20210707_152004-be9610cc.pth +- Name: pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 320000 + Results: + - Task: Semantic Segmentation + Dataset: COCO-Stuff 164k + Metrics: + mIoU: 41.95 + mIoU(ms+flip): 42.42 + Config: configs/pspnet/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k_20210707_152004-72220c60.pth +- Name: pspnet_r18-d8_512x512_80k_loveda + In Collection: PSPNet + Metadata: + backbone: R-18-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 37.22 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.45 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 48.62 + mIoU(ms+flip): 47.57 + Config: configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x512_80k_loveda/pspnet_r18-d8_512x512_80k_loveda_20211105_052100-b97697f1.pth +- Name: pspnet_r50-d8_512x512_80k_loveda + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 151.52 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.14 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 50.46 + mIoU(ms+flip): 50.19 + Config: configs/pspnet/pspnet_r50-d8_512x512_80k_loveda.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_loveda/pspnet_r50-d8_512x512_80k_loveda_20211104_155728-88610f9f.pth +- Name: pspnet_r101-d8_512x512_80k_loveda + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 218.34 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.61 + Results: + - Task: Semantic Segmentation + Dataset: LoveDA + Metrics: + mIoU: 51.86 + mIoU(ms+flip): 51.34 + Config: configs/pspnet/pspnet_r101-d8_512x512_80k_loveda.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_loveda/pspnet_r101-d8_512x512_80k_loveda_20211104_153212-1c06c6a8.pth +- Name: pspnet_r18-d8_4x4_512x512_80k_potsdam + In Collection: PSPNet + Metadata: + backbone: R-18-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 11.75 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.5 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 77.09 + mIoU(ms+flip): 78.3 + Config: configs/pspnet/pspnet_r18-d8_4x4_512x512_80k_potsdam.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_512x512_80k_potsdam/pspnet_r18-d8_4x4_512x512_80k_potsdam_20211220_125612-7cd046e1.pth +- Name: pspnet_r50-d8_4x4_512x512_80k_potsdam + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 33.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.14 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 78.12 + mIoU(ms+flip): 78.98 + Config: configs/pspnet/pspnet_r50-d8_4x4_512x512_80k_potsdam.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_512x512_80k_potsdam/pspnet_r50-d8_4x4_512x512_80k_potsdam_20211219_043541-2dd5fe67.pth +- Name: pspnet_r101-d8_4x4_512x512_80k_potsdam + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 51.55 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.61 + Results: + - Task: Semantic Segmentation + Dataset: Potsdam + Metrics: + mIoU: 78.62 + mIoU(ms+flip): 79.47 + Config: configs/pspnet/pspnet_r101-d8_4x4_512x512_80k_potsdam.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_4x4_512x512_80k_potsdam/pspnet_r101-d8_4x4_512x512_80k_potsdam_20211220_125612-aed036c4.pth +- Name: pspnet_r18-d8_4x4_512x512_80k_vaihingen + In Collection: PSPNet + Metadata: + backbone: R-18-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 11.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.45 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 71.46 + mIoU(ms+flip): 73.36 + Config: configs/pspnet/pspnet_r18-d8_4x4_512x512_80k_vaihingen.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_512x512_80k_vaihingen/pspnet_r18-d8_4x4_512x512_80k_vaihingen_20211228_160355-52a8a6f6.pth +- Name: pspnet_r50-d8_4x4_512x512_80k_vaihingen + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 33.01 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.14 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 72.36 + mIoU(ms+flip): 73.75 + Config: configs/pspnet/pspnet_r50-d8_4x4_512x512_80k_vaihingen.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_512x512_80k_vaihingen/pspnet_r50-d8_4x4_512x512_80k_vaihingen_20211228_160355-382f8f5b.pth +- Name: pspnet_r101-d8_4x4_512x512_80k_vaihingen + In Collection: PSPNet + Metadata: + backbone: R-101-D8 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 50.08 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.61 + Results: + - Task: Semantic Segmentation + Dataset: Vaihingen + Metrics: + mIoU: 72.61 + mIoU(ms+flip): 74.18 + Config: configs/pspnet/pspnet_r101-d8_4x4_512x512_80k_vaihingen.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_4x4_512x512_80k_vaihingen/pspnet_r101-d8_4x4_512x512_80k_vaihingen_20211231_230806-8eba0a09.pth +- Name: pspnet_r18-d8_4x4_896x896_80k_isaid + In Collection: PSPNet + Metadata: + backbone: R-18-D8 + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 37.16 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 4.52 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 60.22 + mIoU(ms+flip): 61.25 + Config: configs/pspnet/pspnet_r18-d8_4x4_896x896_80k_isaid.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_4x4_896x896_80k_isaid/pspnet_r18-d8_4x4_896x896_80k_isaid_20220110_180526-e84c0b6a.pth +- Name: pspnet_r50-d8_4x4_896x896_80k_isaid + In Collection: PSPNet + Metadata: + backbone: R-50-D8 + crop size: (896,896) + lr schd: 80000 + inference time (ms/im): + - value: 112.61 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (896,896) + Training Memory (GB): 16.58 + Results: + - Task: Semantic Segmentation + Dataset: iSAID + Metrics: + mIoU: 65.36 + mIoU(ms+flip): 66.48 + Config: configs/pspnet/pspnet_r50-d8_4x4_896x896_80k_isaid.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_4x4_896x896_80k_isaid/pspnet_r50-d8_4x4_896x896_80k_isaid_20220110_180629-1f21dc32.pth diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context.py new file mode 100644 index 0000000..0b5a990 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_480x480_40k_pascal_context.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59.py new file mode 100644 index 0000000..081cb37 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_480x480_40k_pascal_context_59.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context.py new file mode 100644 index 0000000..fda9110 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_480x480_80k_pascal_context.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py new file mode 100644 index 0000000..795c51f --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_480x480_80k_pascal_context_59.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_4x4_512x512_80k_potsdam.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_4x4_512x512_80k_potsdam.py new file mode 100644 index 0000000..98343dd --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_4x4_512x512_80k_potsdam.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4x4_512x512_80k_potsdam.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_4x4_512x512_80k_vaihingen.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_4x4_512x512_80k_vaihingen.py new file mode 100644 index 0000000..fd79492 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_4x4_512x512_80k_vaihingen.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_4x4_512x512_80k_vaihingen.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..38fee11 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_dark.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_dark.py new file mode 100644 index 0000000..1057639 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_dark.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x1024_40k_dark.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_night_driving.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_night_driving.py new file mode 100644 index 0000000..0ecb930 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_night_driving.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x1024_40k_night_driving.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..9931a07 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..6107b41 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..2221b20 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x512_20k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..15f578b --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_40k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x512_40k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k.py new file mode 100644 index 0000000..7ae2061 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k.py new file mode 100644 index 0000000..a448496 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k.py new file mode 100644 index 0000000..90512b8 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k.py new file mode 100644 index 0000000..36aa443 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k.py new file mode 100644 index 0000000..fdddec4 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..fb7c3d5 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_loveda.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_loveda.py new file mode 100644 index 0000000..03c0251 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_loveda.py @@ -0,0 +1,6 @@ +_base_ = './pspnet_r50-d8_512x512_80k_loveda.py' +model = dict( + backbone=dict( + depth=101, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet101_v1c'))) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..c6e7e58 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..59b8c6d --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_fp16_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_fp16_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..c71b7f6 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101-d8_fp16_512x1024_80k_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = './pspnet_r101-d8_512x1024_80k_cityscapes.py' +# fp16 settings +optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.) +# fp16 placeholder +fp16 = dict() diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..ab8a3d3 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_dark.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_dark.py new file mode 100644 index 0000000..49231d8 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_dark.py @@ -0,0 +1,4 @@ +_base_ = './pspnet_r50-d8_512x1024_80k_dark.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_night_driving.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_night_driving.py new file mode 100644 index 0000000..c3ed2f1 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_night_driving.py @@ -0,0 +1,4 @@ +_base_ = './pspnet_r50-d8_512x1024_80k_night_driving.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..1a7cb70 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = './pspnet_r50-d8_769x769_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet101', + backbone=dict(type='ResNet', depth=101)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_4x4_512x512_80k_potsdam.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_4x4_512x512_80k_potsdam.py new file mode 100644 index 0000000..be9dc72 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_4x4_512x512_80k_potsdam.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_4x4_512x512_80k_potsdam.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_4x4_512x512_80k_vaihingen.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_4x4_512x512_80k_vaihingen.py new file mode 100644 index 0000000..2cb6922 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_4x4_512x512_80k_vaihingen.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_4x4_512x512_80k_vaihingen.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_4x4_896x896_80k_isaid.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_4x4_896x896_80k_isaid.py new file mode 100644 index 0000000..4f6f9ab --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_4x4_896x896_80k_isaid.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_4x4_896x896_80k_isaid.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..d914f93 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py new file mode 100644 index 0000000..dbb832b --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py @@ -0,0 +1,11 @@ +_base_ = './pspnet_r50-d8_512x512_80k_loveda.py' +model = dict( + backbone=dict( + depth=18, + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..5893e66 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r18-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_769x769_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnet18_v1c', + backbone=dict(depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..abeeedf --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..284be6d --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = './pspnet_r50-d8_769x769_80k_cityscapes.py' +model = dict( + pretrained='torchvision://resnet18', + backbone=dict(type='ResNet', depth=18), + decode_head=dict( + in_channels=512, + channels=128, + ), + auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d32_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d32_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..6bfeef3 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d32_512x1024_80k_cityscapes.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict(backbone=dict(dilations=(1, 1, 2, 4), strides=(1, 2, 2, 2))) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes.py new file mode 100644 index 0000000..0283876 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d32_rsb-pretrain_512x1024_adamw_80k_cityscapes.py @@ -0,0 +1,25 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa +model = dict( + pretrained=None, + backbone=dict( + type='ResNet', + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint), + dilations=(1, 1, 2, 4), + strides=(1, 2, 2, 2))) + +optimizer = dict(_delete_=True, type='AdamW', lr=0.0005, weight_decay=0.05) +optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) +# learning policy +lr_config = dict( + _delete_=True, + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.001, + step=[60000, 72000], + by_epoch=False) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_480x480_40k_pascal_context.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_480x480_40k_pascal_context.py new file mode 100644 index 0000000..30abe46 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_480x480_40k_pascal_context.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_480x480_40k_pascal_context_59.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_480x480_40k_pascal_context_59.py new file mode 100644 index 0000000..88041c6 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_480x480_40k_pascal_context_59.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_480x480_80k_pascal_context.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_480x480_80k_pascal_context.py new file mode 100644 index 0000000..09e96da --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_480x480_80k_pascal_context.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=60), + auxiliary_head=dict(num_classes=60), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_480x480_80k_pascal_context_59.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_480x480_80k_pascal_context_59.py new file mode 100644 index 0000000..d4065ec --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_480x480_80k_pascal_context_59.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=59), + auxiliary_head=dict(num_classes=59), + test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) +optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_4x4_512x512_80k_potsdam.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_4x4_512x512_80k_potsdam.py new file mode 100644 index 0000000..f78faff --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_4x4_512x512_80k_potsdam.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/potsdam.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=6), auxiliary_head=dict(num_classes=6)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_4x4_512x512_80k_vaihingen.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_4x4_512x512_80k_vaihingen.py new file mode 100644 index 0000000..dfdd294 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_4x4_512x512_80k_vaihingen.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/vaihingen.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=6), auxiliary_head=dict(num_classes=6)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_4x4_896x896_80k_isaid.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_4x4_896x896_80k_isaid.py new file mode 100644 index 0000000..ef7eb99 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_4x4_896x896_80k_isaid.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/isaid.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=16), auxiliary_head=dict(num_classes=16)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..5deb587 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_40k_dark.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_40k_dark.py new file mode 100644 index 0000000..9abb511 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_40k_dark.py @@ -0,0 +1,29 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1920, 1080), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + test=dict( + type='DarkZurichDataset', + data_root='data/dark_zurich/', + img_dir='rgb_anon/val/night/GOPR0356', + ann_dir='gt/val/night/GOPR0356', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_40k_night_driving.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_40k_night_driving.py new file mode 100644 index 0000000..195aeea --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_40k_night_driving.py @@ -0,0 +1,29 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1920, 1080), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + test=dict( + type='NightDrivingDataset', + data_root='data/NighttimeDrivingTest/', + img_dir='leftImg8bit/test/night', + ann_dir='gtCoarse_daytime_trainvaltest/test/night', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..4e99728 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_80k_dark.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_80k_dark.py new file mode 100644 index 0000000..2f16171 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_80k_dark.py @@ -0,0 +1,30 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1920, 1080), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + test=dict( + type='DarkZurichDataset', + data_root='data/dark_zurich/', + img_dir='rgb_anon/val/night/GOPR0356', + ann_dir='gt/val/night/GOPR0356', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_80k_night_driving.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_80k_night_driving.py new file mode 100644 index 0000000..ecc5d99 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x1024_80k_night_driving.py @@ -0,0 +1,29 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1920, 1080), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + test=dict( + type='NightDrivingDataset', + data_root='data/NighttimeDrivingTest/', + img_dir='leftImg8bit/test/night', + ann_dir='gtCoarse_daytime_trainvaltest/test/night', + pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..8658457 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_20k_voc12aug.py new file mode 100644 index 0000000..cd88154 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_20k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_40k_voc12aug.py new file mode 100644 index 0000000..f0c20c1 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_40k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k.py new file mode 100644 index 0000000..e1f8887 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k.py new file mode 100644 index 0000000..6cd94f9 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/coco-stuff10k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k.py new file mode 100644 index 0000000..32b3281 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_320k.py' +] +model = dict( + decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k.py new file mode 100644 index 0000000..c792bb4 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/coco-stuff10k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k.py new file mode 100644 index 0000000..7f7bc64 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_80k_ade20k.py new file mode 100644 index 0000000..52efdf5 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_80k_loveda.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_80k_loveda.py new file mode 100644 index 0000000..830af48 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_512x512_80k_loveda.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/loveda.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=7), auxiliary_head=dict(num_classes=7)) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py new file mode 100644 index 0000000..145cadb --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..23a81eb --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_rsb-pretrain_512x1024_adamw_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_rsb-pretrain_512x1024_adamw_80k_cityscapes.py new file mode 100644 index 0000000..a8a80bf --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50-d8_rsb-pretrain_512x1024_adamw_80k_cityscapes.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa +model = dict( + pretrained=None, + backbone=dict( + type='ResNet', + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) + +optimizer = dict(_delete_=True, type='AdamW', lr=0.0005, weight_decay=0.05) +optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) +# learning policy +lr_config = dict( + _delete_=True, + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=0.001, + step=[60000, 72000], + by_epoch=False) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50b-d32_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50b-d32_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..7f4f6c9 --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50b-d32_512x1024_80k_cityscapes.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + pretrained='torchvision://resnet50', + backbone=dict(type='ResNet', dilations=(1, 1, 2, 4), strides=(1, 2, 2, 2))) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..946bf4f --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/downstream/mmsegmentation/configs/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes.py new file mode 100644 index 0000000..b6087dc --- /dev/null +++ b/downstream/mmsegmentation/configs/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './pspnet_r50-d8_769x769_80k_cityscapes.py' +model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/downstream/mmsegmentation/configs/resnest/README.md b/downstream/mmsegmentation/configs/resnest/README.md new file mode 100644 index 0000000..fbabf98 --- /dev/null +++ b/downstream/mmsegmentation/configs/resnest/README.md @@ -0,0 +1,53 @@ +# ResNeSt + +[ResNeSt: Split-Attention Networks](https://arxiv.org/abs/2004.08955) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +It is well known that featuremap attention and multi-path representation are important for visual recognition. In this paper, we present a modularized architecture, which applies the channel-wise attention on different network branches to leverage their success in capturing cross-feature interactions and learning diverse representations. Our design results in a simple and unified computation block, which can be parameterized using only a few variables. Our model, named ResNeSt, outperforms EfficientNet in accuracy and latency trade-off on image classification. In addition, ResNeSt has achieved superior transfer learning results on several public benchmarks serving as the backbone, and has been adopted by the winning entries of COCO-LVIS challenge. The source code for complete system and pretrained models are publicly available. + + +
    + +
    + +## Citation + +```bibtex +@article{zhang2020resnest, +title={ResNeSt: Split-Attention Networks}, +author={Zhang, Hang and Wu, Chongruo and Zhang, Zhongyue and Zhu, Yi and Zhang, Zhi and Lin, Haibin and Sun, Yue and He, Tong and Muller, Jonas and Manmatha, R. and Li, Mu and Smola, Alexander}, +journal={arXiv preprint arXiv:2004.08955}, +year={2020} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FCN | S-101-D8 | 512x1024 | 80000 | 11.4 | 2.39 | 77.56 | 78.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/fcn_s101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x1024_80k_cityscapes/fcn_s101-d8_512x1024_80k_cityscapes_20200807_140631-f8d155b3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x1024_80k_cityscapes/fcn_s101-d8_512x1024_80k_cityscapes-20200807_140631.log.json) | +| PSPNet | S-101-D8 | 512x1024 | 80000 | 11.8 | 2.52 | 78.57 | 79.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x1024_80k_cityscapes/pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x1024_80k_cityscapes/pspnet_s101-d8_512x1024_80k_cityscapes-20200807_140631.log.json) | +| DeepLabV3 | S-101-D8 | 512x1024 | 80000 | 11.9 | 1.88 | 79.67 | 80.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes/deeplabv3_s101-d8_512x1024_80k_cityscapes_20200807_144429-b73c4270.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes/deeplabv3_s101-d8_512x1024_80k_cityscapes-20200807_144429.log.json) | +| DeepLabV3+ | S-101-D8 | 512x1024 | 80000 | 13.2 | 2.36 | 79.62 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes/deeplabv3plus_s101-d8_512x1024_80k_cityscapes_20200807_144429-1239eb43.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes/deeplabv3plus_s101-d8_512x1024_80k_cityscapes-20200807_144429.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FCN | S-101-D8 | 512x512 | 160000 | 14.2 | 12.86 | 45.62 | 46.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/fcn_s101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x512_160k_ade20k/fcn_s101-d8_512x512_160k_ade20k_20200807_145416-d3160329.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x512_160k_ade20k/fcn_s101-d8_512x512_160k_ade20k-20200807_145416.log.json) | +| PSPNet | S-101-D8 | 512x512 | 160000 | 14.2 | 13.02 | 45.44 | 46.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x512_160k_ade20k/pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x512_160k_ade20k/pspnet_s101-d8_512x512_160k_ade20k-20200807_145416.log.json) | +| DeepLabV3 | S-101-D8 | 512x512 | 160000 | 14.6 | 9.28 | 45.71 | 46.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/deeplabv3_s101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x512_160k_ade20k/deeplabv3_s101-d8_512x512_160k_ade20k_20200807_144503-17ecabe5.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x512_160k_ade20k/deeplabv3_s101-d8_512x512_160k_ade20k-20200807_144503.log.json) | +| DeepLabV3+ | S-101-D8 | 512x512 | 160000 | 16.2 | 11.96 | 46.47 | 47.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k/deeplabv3plus_s101-d8_512x512_160k_ade20k_20200807_144503-27b26226.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k/deeplabv3plus_s101-d8_512x512_160k_ade20k-20200807_144503.log.json) | diff --git a/downstream/mmsegmentation/configs/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..f983986 --- /dev/null +++ b/downstream/mmsegmentation/configs/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = '../deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/downstream/mmsegmentation/configs/resnest/deeplabv3_s101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/resnest/deeplabv3_s101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..e3924ad --- /dev/null +++ b/downstream/mmsegmentation/configs/resnest/deeplabv3_s101-d8_512x512_160k_ade20k.py @@ -0,0 +1,9 @@ +_base_ = '../deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/downstream/mmsegmentation/configs/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..69bef72 --- /dev/null +++ b/downstream/mmsegmentation/configs/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/downstream/mmsegmentation/configs/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..d51bccb --- /dev/null +++ b/downstream/mmsegmentation/configs/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k.py @@ -0,0 +1,9 @@ +_base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/downstream/mmsegmentation/configs/resnest/fcn_s101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/resnest/fcn_s101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..33fa025 --- /dev/null +++ b/downstream/mmsegmentation/configs/resnest/fcn_s101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = '../fcn/fcn_r101-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/downstream/mmsegmentation/configs/resnest/fcn_s101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/resnest/fcn_s101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..dcee8c2 --- /dev/null +++ b/downstream/mmsegmentation/configs/resnest/fcn_s101-d8_512x512_160k_ade20k.py @@ -0,0 +1,9 @@ +_base_ = '../fcn/fcn_r101-d8_512x512_160k_ade20k.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/downstream/mmsegmentation/configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..9737849 --- /dev/null +++ b/downstream/mmsegmentation/configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = '../pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/downstream/mmsegmentation/configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py new file mode 100644 index 0000000..6a622ea --- /dev/null +++ b/downstream/mmsegmentation/configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py @@ -0,0 +1,9 @@ +_base_ = '../pspnet/pspnet_r101-d8_512x512_160k_ade20k.py' +model = dict( + pretrained='open-mmlab://resnest101', + backbone=dict( + type='ResNeSt', + stem_channels=128, + radix=2, + reduction_factor=4, + avg_down_stride=True)) diff --git a/downstream/mmsegmentation/configs/resnest/resnest.yml b/downstream/mmsegmentation/configs/resnest/resnest.yml new file mode 100644 index 0000000..b2ca259 --- /dev/null +++ b/downstream/mmsegmentation/configs/resnest/resnest.yml @@ -0,0 +1,177 @@ +Models: +- Name: fcn_s101-d8_512x1024_80k_cityscapes + In Collection: FCN + Metadata: + backbone: S-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 418.41 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 11.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.56 + mIoU(ms+flip): 78.98 + Config: configs/resnest/fcn_s101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x1024_80k_cityscapes/fcn_s101-d8_512x1024_80k_cityscapes_20200807_140631-f8d155b3.pth +- Name: pspnet_s101-d8_512x1024_80k_cityscapes + In Collection: PSPNet + Metadata: + backbone: S-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 396.83 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 11.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.57 + mIoU(ms+flip): 79.19 + Config: configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x1024_80k_cityscapes/pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth +- Name: deeplabv3_s101-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3 + Metadata: + backbone: S-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 531.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 11.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.67 + mIoU(ms+flip): 80.51 + Config: configs/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes/deeplabv3_s101-d8_512x1024_80k_cityscapes_20200807_144429-b73c4270.pth +- Name: deeplabv3plus_s101-d8_512x1024_80k_cityscapes + In Collection: DeepLabV3+ + Metadata: + backbone: S-101-D8 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 423.73 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 13.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.62 + mIoU(ms+flip): 80.27 + Config: configs/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes/deeplabv3plus_s101-d8_512x1024_80k_cityscapes_20200807_144429-1239eb43.pth +- Name: fcn_s101-d8_512x512_160k_ade20k + In Collection: FCN + Metadata: + backbone: S-101-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 77.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 14.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.62 + mIoU(ms+flip): 46.16 + Config: configs/resnest/fcn_s101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x512_160k_ade20k/fcn_s101-d8_512x512_160k_ade20k_20200807_145416-d3160329.pth +- Name: pspnet_s101-d8_512x512_160k_ade20k + In Collection: PSPNet + Metadata: + backbone: S-101-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 76.8 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 14.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.44 + mIoU(ms+flip): 46.28 + Config: configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x512_160k_ade20k/pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth +- Name: deeplabv3_s101-d8_512x512_160k_ade20k + In Collection: DeepLabV3 + Metadata: + backbone: S-101-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 107.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 14.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.71 + mIoU(ms+flip): 46.59 + Config: configs/resnest/deeplabv3_s101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x512_160k_ade20k/deeplabv3_s101-d8_512x512_160k_ade20k_20200807_144503-17ecabe5.pth +- Name: deeplabv3plus_s101-d8_512x512_160k_ade20k + In Collection: DeepLabV3+ + Metadata: + backbone: S-101-D8 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 83.61 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 16.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.47 + mIoU(ms+flip): 47.27 + Config: configs/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k/deeplabv3plus_s101-d8_512x512_160k_ade20k_20200807_144503-27b26226.pth diff --git a/downstream/mmsegmentation/configs/segformer/README.md b/downstream/mmsegmentation/configs/segformer/README.md new file mode 100644 index 0000000..790c0f5 --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/README.md @@ -0,0 +1,107 @@ +# SegFormer + +[SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +We present SegFormer, a simple, efficient yet powerful semantic segmentation framework which unifies Transformers with lightweight multilayer perception (MLP) decoders. SegFormer has two appealing features: 1) SegFormer comprises a novel hierarchically structured Transformer encoder which outputs multiscale features. It does not need positional encoding, thereby avoiding the interpolation of positional codes which leads to decreased performance when the testing resolution differs from training. 2) SegFormer avoids complex decoders. The proposed MLP decoder aggregates information from different layers, and thus combining both local attention and global attention to render powerful representations. We show that this simple and lightweight design is the key to efficient segmentation on Transformers. We scale our approach up to obtain a series of models from SegFormer-B0 to SegFormer-B5, reaching significantly better performance and efficiency than previous counterparts. For example, SegFormer-B4 achieves 50.3% mIoU on ADE20K with 64M parameters, being 5x smaller and 2.2% better than the previous best method. Our best model, SegFormer-B5, achieves 84.0% mIoU on Cityscapes validation set and shows excellent zero-shot robustness on Cityscapes-C. Code will be released at: [this http URL](https://github.com/NVlabs/SegFormer). + + +
    + +
    + +## Citation + +```bibtex +@article{xie2021segformer, + title={SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers}, + author={Xie, Enze and Wang, Wenhai and Yu, Zhiding and Anandkumar, Anima and Alvarez, Jose M and Luo, Ping}, + journal={arXiv preprint arXiv:2105.15203}, + year={2021} +} +``` + +## Usage + +To use other repositories' pre-trained models, it is necessary to convert keys. + +We provide a script [`mit2mmseg.py`](../../tools/model_converters/mit2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/NVlabs/SegFormer) to MMSegmentation style. + +```shell +python tools/model_converters/mit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------: | -------------- | ---: | ------------- | ------ | -------- | +|Segformer | MIT-B0 | 512x512 | 160000 | 2.1 | 51.32 | 37.41 | 38.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b0_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_512x512_160k_ade20k/segformer_mit-b0_512x512_160k_ade20k_20210726_101530-8ffa8fda.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_512x512_160k_ade20k/segformer_mit-b0_512x512_160k_ade20k_20210726_101530.log.json) | +|Segformer | MIT-B1 | 512x512 | 160000 | 2.6 | 47.66 | 40.97 | 42.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b1_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_512x512_160k_ade20k/segformer_mit-b1_512x512_160k_ade20k_20210726_112106-d70e859d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_512x512_160k_ade20k/segformer_mit-b1_512x512_160k_ade20k_20210726_112106.log.json) | +|Segformer | MIT-B2 | 512x512 | 160000 | 3.6 | 30.88 | 45.58 | 47.03 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b2_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_512x512_160k_ade20k/segformer_mit-b2_512x512_160k_ade20k_20210726_112103-cbd414ac.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_512x512_160k_ade20k/segformer_mit-b2_512x512_160k_ade20k_20210726_112103.log.json) | +|Segformer | MIT-B3 | 512x512 | 160000 | 4.8 | 22.11 | 47.82 | 48.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b3_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_512x512_160k_ade20k/segformer_mit-b3_512x512_160k_ade20k_20210726_081410-962b98d2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_512x512_160k_ade20k/segformer_mit-b3_512x512_160k_ade20k_20210726_081410.log.json) | +|Segformer | MIT-B4 | 512x512 | 160000 | 6.1 | 15.45 | 48.46 | 49.76 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b4_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_512x512_160k_ade20k/segformer_mit-b4_512x512_160k_ade20k_20210728_183055-7f509d7d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_512x512_160k_ade20k/segformer_mit-b4_512x512_160k_ade20k_20210728_183055.log.json) | +|Segformer | MIT-B5 | 512x512 | 160000 | 7.2 | 11.89 | 49.13 | 50.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b5_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_512x512_160k_ade20k/segformer_mit-b5_512x512_160k_ade20k_20210726_145235-94cedf59.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_512x512_160k_ade20k/segformer_mit-b5_512x512_160k_ade20k_20210726_145235.log.json) | +|Segformer | MIT-B5 | 640x640 | 160000 | 11.5 | 11.30 | 49.62 | 50.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b5_640x640_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_640x640_160k_ade20k/segformer_mit-b5_640x640_160k_ade20k_20210801_121243-41d2845b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_640x640_160k_ade20k/segformer_mit-b5_640x640_160k_ade20k_20210801_121243.log.json) | + +Evaluation with AlignedResize: + + | Method | Backbone | Crop Size | Lr schd | mIoU | mIoU(ms+flip) | + | ------ | -------- | --------- | ------: | ---: | ------------- | + |Segformer | MIT-B0 | 512x512 | 160000 | 38.1 | 38.57 | + |Segformer | MIT-B1 | 512x512 | 160000 | 41.64 | 42.76 | + |Segformer | MIT-B2 | 512x512 | 160000 | 46.53 | 47.49 | + |Segformer | MIT-B3 | 512x512 | 160000 | 48.46 | 49.14 | + |Segformer | MIT-B4 | 512x512 | 160000 | 49.34 | 50.29 | + |Segformer | MIT-B5 | 512x512 | 160000 | 50.08 | 50.72 | + |Segformer | MIT-B5 | 640x640 | 160000 | 50.58 | 50.8 | + +We replace `AlignedResize` in original implementatiuon to `Resize + ResizeToMultiple`. If you want to test by +using `AlignedResize`, you can change the dataset pipeline like this: + +```python +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + # resize image to multiple of 32, improve SegFormer by 0.5-1.0 mIoU. + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +``` + +### Cityscapes + +The lower fps result is caused by the sliding window inference scheme (window size:1024x1024). + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------: | -------------- | ---: | ------------- | ------ | -------- | +|Segformer | MIT-B0 | 1024x1024 | 160000 | 3.64 | 4.74 | 76.54 | 78.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes/segformer_mit-b0_8x1_1024x1024_160k_cityscapes_20211208_101857-e7f88502.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes/segformer_mit-b0_8x1_1024x1024_160k_cityscapes_20211208_101857.log.json) | +|Segformer | MIT-B1 | 1024x1024 | 160000 | 4.49 | 4.3 | 78.56 | 79.73 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes/segformer_mit-b1_8x1_1024x1024_160k_cityscapes_20211208_064213-655c7b3f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes/segformer_mit-b1_8x1_1024x1024_160k_cityscapes_20211208_064213.log.json) | +|Segformer | MIT-B2 | 1024x1024 | 160000 | 7.42 | 3.36 | 81.08 | 82.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes/segformer_mit-b2_8x1_1024x1024_160k_cityscapes_20211207_134205-6096669a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes/segformer_mit-b2_8x1_1024x1024_160k_cityscapes_20211207_134205.log.json) | +|Segformer | MIT-B3 | 1024x1024 | 160000 | 10.86 | 2.53 | 81.94 | 83.14 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes/segformer_mit-b3_8x1_1024x1024_160k_cityscapes_20211206_224823-a8f8a177.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes/segformer_mit-b3_8x1_1024x1024_160k_cityscapes_20211206_224823.log.json) | +|Segformer | MIT-B4 | 1024x1024 | 160000 | 15.07 | 1.88 | 81.89 | 83.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes/segformer_mit-b4_8x1_1024x1024_160k_cityscapes_20211207_080709-07f6c333.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes/segformer_mit-b4_8x1_1024x1024_160k_cityscapes_20211207_080709.log.json) | +|Segformer | MIT-B5 | 1024x1024 | 160000 | 18.00 | 1.39 | 82.25 | 83.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes/segformer_mit-b5_8x1_1024x1024_160k_cityscapes_20211206_072934-87a052ec.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes/segformer_mit-b5_8x1_1024x1024_160k_cityscapes_20211206_072934.log.json) | diff --git a/downstream/mmsegmentation/configs/segformer/segformer.yml b/downstream/mmsegmentation/configs/segformer/segformer.yml new file mode 100644 index 0000000..d28cb16 --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer.yml @@ -0,0 +1,303 @@ +Collections: +- Name: Segformer + Metadata: + Training Data: + - ADE20K + - Cityscapes + Paper: + URL: https://arxiv.org/abs/2105.15203 + Title: 'SegFormer: Simple and Efficient Design for Semantic Segmentation with + Transformers' + README: configs/segformer/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/mit.py#L246 + Version: v0.17.0 + Converted From: + Code: https://github.com/NVlabs/SegFormer +Models: +- Name: segformer_mit-b0_512x512_160k_ade20k + In Collection: Segformer + Metadata: + backbone: MIT-B0 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 19.49 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 37.41 + mIoU(ms+flip): 38.34 + Config: configs/segformer/segformer_mit-b0_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_512x512_160k_ade20k/segformer_mit-b0_512x512_160k_ade20k_20210726_101530-8ffa8fda.pth +- Name: segformer_mit-b1_512x512_160k_ade20k + In Collection: Segformer + Metadata: + backbone: MIT-B1 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 20.98 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 40.97 + mIoU(ms+flip): 42.54 + Config: configs/segformer/segformer_mit-b1_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_512x512_160k_ade20k/segformer_mit-b1_512x512_160k_ade20k_20210726_112106-d70e859d.pth +- Name: segformer_mit-b2_512x512_160k_ade20k + In Collection: Segformer + Metadata: + backbone: MIT-B2 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 32.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 3.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.58 + mIoU(ms+flip): 47.03 + Config: configs/segformer/segformer_mit-b2_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_512x512_160k_ade20k/segformer_mit-b2_512x512_160k_ade20k_20210726_112103-cbd414ac.pth +- Name: segformer_mit-b3_512x512_160k_ade20k + In Collection: Segformer + Metadata: + backbone: MIT-B3 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 45.23 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.82 + mIoU(ms+flip): 48.81 + Config: configs/segformer/segformer_mit-b3_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_512x512_160k_ade20k/segformer_mit-b3_512x512_160k_ade20k_20210726_081410-962b98d2.pth +- Name: segformer_mit-b4_512x512_160k_ade20k + In Collection: Segformer + Metadata: + backbone: MIT-B4 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 64.72 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.46 + mIoU(ms+flip): 49.76 + Config: configs/segformer/segformer_mit-b4_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_512x512_160k_ade20k/segformer_mit-b4_512x512_160k_ade20k_20210728_183055-7f509d7d.pth +- Name: segformer_mit-b5_512x512_160k_ade20k + In Collection: Segformer + Metadata: + backbone: MIT-B5 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 84.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 49.13 + mIoU(ms+flip): 50.22 + Config: configs/segformer/segformer_mit-b5_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_512x512_160k_ade20k/segformer_mit-b5_512x512_160k_ade20k_20210726_145235-94cedf59.pth +- Name: segformer_mit-b5_640x640_160k_ade20k + In Collection: Segformer + Metadata: + backbone: MIT-B5 + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 88.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (640,640) + Training Memory (GB): 11.5 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 49.62 + mIoU(ms+flip): 50.36 + Config: configs/segformer/segformer_mit-b5_640x640_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_640x640_160k_ade20k/segformer_mit-b5_640x640_160k_ade20k_20210801_121243-41d2845b.pth +- Name: segformer_mit-b0_8x1_1024x1024_160k_cityscapes + In Collection: Segformer + Metadata: + backbone: MIT-B0 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 210.97 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 3.64 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.54 + mIoU(ms+flip): 78.22 + Config: configs/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes/segformer_mit-b0_8x1_1024x1024_160k_cityscapes_20211208_101857-e7f88502.pth +- Name: segformer_mit-b1_8x1_1024x1024_160k_cityscapes + In Collection: Segformer + Metadata: + backbone: MIT-B1 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 232.56 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 4.49 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.56 + mIoU(ms+flip): 79.73 + Config: configs/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes/segformer_mit-b1_8x1_1024x1024_160k_cityscapes_20211208_064213-655c7b3f.pth +- Name: segformer_mit-b2_8x1_1024x1024_160k_cityscapes + In Collection: Segformer + Metadata: + backbone: MIT-B2 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 297.62 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 7.42 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 81.08 + mIoU(ms+flip): 82.18 + Config: configs/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes/segformer_mit-b2_8x1_1024x1024_160k_cityscapes_20211207_134205-6096669a.pth +- Name: segformer_mit-b3_8x1_1024x1024_160k_cityscapes + In Collection: Segformer + Metadata: + backbone: MIT-B3 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 395.26 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 10.86 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 81.94 + mIoU(ms+flip): 83.14 + Config: configs/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes/segformer_mit-b3_8x1_1024x1024_160k_cityscapes_20211206_224823-a8f8a177.pth +- Name: segformer_mit-b4_8x1_1024x1024_160k_cityscapes + In Collection: Segformer + Metadata: + backbone: MIT-B4 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 531.91 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 15.07 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 81.89 + mIoU(ms+flip): 83.38 + Config: configs/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes/segformer_mit-b4_8x1_1024x1024_160k_cityscapes_20211207_080709-07f6c333.pth +- Name: segformer_mit-b5_8x1_1024x1024_160k_cityscapes + In Collection: Segformer + Metadata: + backbone: MIT-B5 + crop size: (1024,1024) + lr schd: 160000 + inference time (ms/im): + - value: 719.42 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1024,1024) + Training Memory (GB): 18.0 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 82.25 + mIoU(ms+flip): 83.48 + Config: configs/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes/segformer_mit-b5_8x1_1024x1024_160k_cityscapes_20211206_072934-87a052ec.pth diff --git a/downstream/mmsegmentation/configs/segformer/segformer_mit-b0_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/segformer/segformer_mit-b0_512x512_160k_ade20k.py new file mode 100644 index 0000000..03065a7 --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer_mit-b0_512x512_160k_ade20k.py @@ -0,0 +1,33 @@ +_base_ = [ + '../_base_/models/segformer_mit-b0.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] + +model = dict( + pretrained='pretrain/mit_b0.pth', decode_head=dict(num_classes=150)) + +# optimizer +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006, + betas=(0.9, 0.999), + weight_decay=0.01, + paramwise_cfg=dict( + custom_keys={ + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'head': dict(lr_mult=10.) + })) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +data = dict(samples_per_gpu=2, workers_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..6444500 --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py @@ -0,0 +1,36 @@ +_base_ = [ + '../_base_/models/segformer_mit-b0.py', + '../_base_/datasets/cityscapes_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='pretrain/mit_b0.pth')), + test_cfg=dict(mode='slide', crop_size=(1024, 1024), stride=(768, 768))) + +# optimizer +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006, + betas=(0.9, 0.999), + weight_decay=0.01, + paramwise_cfg=dict( + custom_keys={ + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.), + 'head': dict(lr_mult=10.) + })) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +data = dict(samples_per_gpu=1, workers_per_gpu=1) diff --git a/downstream/mmsegmentation/configs/segformer/segformer_mit-b1_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/segformer/segformer_mit-b1_512x512_160k_ade20k.py new file mode 100644 index 0000000..5fce602 --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer_mit-b1_512x512_160k_ade20k.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_512x512_160k_ade20k.py'] + +# model settings +model = dict( + pretrained='pretrain/mit_b1.pth', + backbone=dict( + embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[2, 2, 2, 2]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmsegmentation/configs/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..a93e33b --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes.py @@ -0,0 +1,7 @@ +_base_ = ['./segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py'] + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='pretrain/mit_b1.pth'), + embed_dims=64), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmsegmentation/configs/segformer/segformer_mit-b2_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/segformer/segformer_mit-b2_512x512_160k_ade20k.py new file mode 100644 index 0000000..afb24b0 --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer_mit-b2_512x512_160k_ade20k.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_512x512_160k_ade20k.py'] + +# model settings +model = dict( + pretrained='pretrain/mit_b2.pth', + backbone=dict( + embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 4, 6, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmsegmentation/configs/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..fab6be2 --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py'] + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='pretrain/mit_b2.pth'), + embed_dims=64, + num_layers=[3, 4, 6, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmsegmentation/configs/segformer/segformer_mit-b3_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/segformer/segformer_mit-b3_512x512_160k_ade20k.py new file mode 100644 index 0000000..52348f6 --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer_mit-b3_512x512_160k_ade20k.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_512x512_160k_ade20k.py'] + +# model settings +model = dict( + pretrained='pretrain/mit_b3.pth', + backbone=dict( + embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 4, 18, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmsegmentation/configs/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..479ce04 --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py'] + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='pretrain/mit_b3.pth'), + embed_dims=64, + num_layers=[3, 4, 18, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmsegmentation/configs/segformer/segformer_mit-b4_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/segformer/segformer_mit-b4_512x512_160k_ade20k.py new file mode 100644 index 0000000..7b50b75 --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer_mit-b4_512x512_160k_ade20k.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_512x512_160k_ade20k.py'] + +# model settings +model = dict( + pretrained='pretrain/mit_b4.pth', + backbone=dict( + embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 8, 27, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmsegmentation/configs/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..808a1eb --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py'] + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='pretrain/mit_b4.pth'), + embed_dims=64, + num_layers=[3, 8, 27, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmsegmentation/configs/segformer/segformer_mit-b5_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/segformer/segformer_mit-b5_512x512_160k_ade20k.py new file mode 100644 index 0000000..5212fb1 --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer_mit-b5_512x512_160k_ade20k.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_512x512_160k_ade20k.py'] + +# model settings +model = dict( + pretrained='pretrain/mit_b5.pth', + backbone=dict( + embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 6, 40, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmsegmentation/configs/segformer/segformer_mit-b5_640x640_160k_ade20k.py b/downstream/mmsegmentation/configs/segformer/segformer_mit-b5_640x640_160k_ade20k.py new file mode 100644 index 0000000..d21774c --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer_mit-b5_640x640_160k_ade20k.py @@ -0,0 +1,44 @@ +_base_ = ['./segformer_mit-b0_512x512_160k_ade20k.py'] + +# dataset settings +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (640, 640) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 640), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 640), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +# model settings +model = dict( + pretrained='pretrain/mit_b5.pth', + backbone=dict( + embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 6, 40, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmsegmentation/configs/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes.py new file mode 100644 index 0000000..1c9422d --- /dev/null +++ b/downstream/mmsegmentation/configs/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes.py @@ -0,0 +1,8 @@ +_base_ = ['./segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py'] + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='pretrain/mit_b5.pth'), + embed_dims=64, + num_layers=[3, 6, 40, 3]), + decode_head=dict(in_channels=[64, 128, 320, 512])) diff --git a/downstream/mmsegmentation/configs/segmenter/README.md b/downstream/mmsegmentation/configs/segmenter/README.md new file mode 100644 index 0000000..c072956 --- /dev/null +++ b/downstream/mmsegmentation/configs/segmenter/README.md @@ -0,0 +1,73 @@ +# Segmenter + +[Segmenter: Transformer for Semantic Segmentation](https://arxiv.org/abs/2105.05633) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Image segmentation is often ambiguous at the level of individual image patches and requires contextual information to reach label consensus. In this paper we introduce Segmenter, a transformer model for semantic segmentation. In contrast to convolution-based methods, our approach allows to model global context already at the first layer and throughout the network. We build on the recent Vision Transformer (ViT) and extend it to semantic segmentation. To do so, we rely on the output embeddings corresponding to image patches and obtain class labels from these embeddings with a point-wise linear decoder or a mask transformer decoder. We leverage models pre-trained for image classification and show that we can fine-tune them on moderate sized datasets available for semantic segmentation. The linear decoder allows to obtain excellent results already, but the performance can be further improved by a mask transformer generating class masks. We conduct an extensive ablation study to show the impact of the different parameters, in particular the performance is better for large models and small patch sizes. Segmenter attains excellent results for semantic segmentation. It outperforms the state of the art on both ADE20K and Pascal Context datasets and is competitive on Cityscapes. + + +
    + +
    + +```bibtex +@article{strudel2021Segmenter, + title={Segmenter: Transformer for Semantic Segmentation}, + author={Strudel, Robin and Ricardo, Garcia, and Laptev, Ivan and Schmid, Cordelia}, + journal={arXiv preprint arXiv:2105.05633}, + year={2021} +} +``` + + +## Usage + +We have provided pretrained models converted from [ViT-AugReg](https://github.com/rwightman/pytorch-image-models/blob/f55c22bebf9d8afc449d317a723231ef72e0d662/timm/models/vision_transformer.py#L54-L106). + +If you want to convert keys on your own to use the pre-trained ViT model from [Segmenter](https://github.com/rstrudel/segmenter), we also provide a script [`vitjax2mmseg.py`](../../tools/model_converters/vitjax2mmseg.py) in the tools directory to convert the key of models from [ViT-AugReg](https://github.com/rwightman/pytorch-image-models/blob/f55c22bebf9d8afc449d317a723231ef72e0d662/timm/models/vision_transformer.py#L54-L106) to MMSegmentation style. + +```shell +python tools/model_converters/vitjax2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/vitjax2mmseg.py \ +Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz \ +pretrain/vit_tiny_p16_384.pth +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +In our default setting, pretrained models and their corresponding [ViT-AugReg](https://github.com/rwightman/pytorch-image-models/blob/f55c22bebf9d8afc449d317a723231ef72e0d662/timm/models/vision_transformer.py#L54-L106) models could be defined below: + + | pretrained models | original models | + | ------ | -------- | + |vit_tiny_p16_384.pth | ['vit_tiny_patch16_384'](https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz) | + |vit_small_p16_384.pth | ['vit_small_patch16_384'](https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz) | + |vit_base_p16_384.pth | ['vit_base_patch16_384'](https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz) | + |vit_large_p16_384.pth | ['vit_large_patch16_384'](https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz) | + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ---------- | ------- | -------- | --- | --- | -------------- | ----- | +| Segmenter Mask | ViT-T_16 | 512x512 | 160000 | 1.21 | 27.98 | 39.99 | 40.83 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k/segmenter_vit-t_mask_8x1_512x512_160k_ade20k_20220105_151706-ffcf7509.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k/segmenter_vit-t_mask_8x1_512x512_160k_ade20k_20220105_151706.log.json) | +| Segmenter Linear | ViT-S_16 | 512x512 | 160000 | 1.78 | 28.07 | 45.75 | 46.82 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k/segmenter_vit-s_linear_8x1_512x512_160k_ade20k_20220105_151713-39658c46.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k/segmenter_vit-s_linear_8x1_512x512_160k_ade20k_20220105_151713.log.json) | +| Segmenter Mask | ViT-S_16 | 512x512 | 160000 | 2.03 | 24.80 | 46.19 | 47.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k/segmenter_vit-s_mask_8x1_512x512_160k_ade20k_20220105_151706-511bb103.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k/segmenter_vit-s_mask_8x1_512x512_160k_ade20k_20220105_151706.log.json) | +| Segmenter Mask | ViT-B_16 |512x512 | 160000 | 4.20 | 13.20 | 49.60 | 51.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k/segmenter_vit-b_mask_8x1_512x512_160k_ade20k_20220105_151706-bc533b08.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k/segmenter_vit-b_mask_8x1_512x512_160k_ade20k_20220105_151706.log.json) | +| Segmenter Mask | ViT-L_16 |640x640 | 160000 | 16.56 | 2.62 | 52.16 | 53.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segmenter/segmenter_vit-l_mask_8x1_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-l_mask_8x1_512x512_160k_ade20k/segmenter_vit-l_mask_8x1_512x512_160k_ade20k_20220105_162750-7ef345be.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-l_mask_8x1_512x512_160k_ade20k/segmenter_vit-l_mask_8x1_512x512_160k_ade20k_20220105_162750.log.json) | diff --git a/downstream/mmsegmentation/configs/segmenter/segmenter.yml b/downstream/mmsegmentation/configs/segmenter/segmenter.yml new file mode 100644 index 0000000..dc6e68d --- /dev/null +++ b/downstream/mmsegmentation/configs/segmenter/segmenter.yml @@ -0,0 +1,125 @@ +Collections: +- Name: Segmenter + Metadata: + Training Data: + - ADE20K + Paper: + URL: https://arxiv.org/abs/2105.05633 + Title: 'Segmenter: Transformer for Semantic Segmentation' + README: configs/segmenter/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.21.0/mmseg/models/decode_heads/segmenter_mask_head.py#L15 + Version: v0.21.0 + Converted From: + Code: https://github.com/rstrudel/segmenter +Models: +- Name: segmenter_vit-t_mask_8x1_512x512_160k_ade20k + In Collection: Segmenter + Metadata: + backbone: ViT-T_16 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 35.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.21 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 39.99 + mIoU(ms+flip): 40.83 + Config: configs/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k/segmenter_vit-t_mask_8x1_512x512_160k_ade20k_20220105_151706-ffcf7509.pth +- Name: segmenter_vit-s_linear_8x1_512x512_160k_ade20k + In Collection: Segmenter + Metadata: + backbone: ViT-S_16 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 35.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 1.78 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.75 + mIoU(ms+flip): 46.82 + Config: configs/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k/segmenter_vit-s_linear_8x1_512x512_160k_ade20k_20220105_151713-39658c46.pth +- Name: segmenter_vit-s_mask_8x1_512x512_160k_ade20k + In Collection: Segmenter + Metadata: + backbone: ViT-S_16 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 40.32 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 2.03 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.19 + mIoU(ms+flip): 47.85 + Config: configs/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k/segmenter_vit-s_mask_8x1_512x512_160k_ade20k_20220105_151706-511bb103.pth +- Name: segmenter_vit-b_mask_8x1_512x512_160k_ade20k + In Collection: Segmenter + Metadata: + backbone: ViT-B_16 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 75.76 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 49.6 + mIoU(ms+flip): 51.07 + Config: configs/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k/segmenter_vit-b_mask_8x1_512x512_160k_ade20k_20220105_151706-bc533b08.pth +- Name: segmenter_vit-l_mask_8x1_512x512_160k_ade20k + In Collection: Segmenter + Metadata: + backbone: ViT-L_16 + crop size: (640,640) + lr schd: 160000 + inference time (ms/im): + - value: 381.68 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (640,640) + Training Memory (GB): 16.56 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 52.16 + mIoU(ms+flip): 53.65 + Config: configs/segmenter/segmenter_vit-l_mask_8x1_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-l_mask_8x1_512x512_160k_ade20k/segmenter_vit-l_mask_8x1_512x512_160k_ade20k_20220105_162750-7ef345be.pth diff --git a/downstream/mmsegmentation/configs/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k.py new file mode 100644 index 0000000..766a99f --- /dev/null +++ b/downstream/mmsegmentation/configs/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k.py @@ -0,0 +1,43 @@ +_base_ = [ + '../_base_/models/segmenter_vit-b16_mask.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +optimizer = dict(lr=0.001, weight_decay=0.0) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + # num_gpus: 8 -> batch_size: 8 + samples_per_gpu=1, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/segmenter/segmenter_vit-l_mask_8x1_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/segmenter/segmenter_vit-l_mask_8x1_512x512_160k_ade20k.py new file mode 100644 index 0000000..7186570 --- /dev/null +++ b/downstream/mmsegmentation/configs/segmenter/segmenter_vit-l_mask_8x1_512x512_160k_ade20k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/segmenter_vit-b16_mask.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segmenter/vit_large_p16_384_20220308-d4efb41d.pth' # noqa + +model = dict( + pretrained=checkpoint, + backbone=dict( + type='VisionTransformer', + img_size=(640, 640), + embed_dims=1024, + num_layers=24, + num_heads=16), + decode_head=dict( + type='SegmenterMaskTransformerHead', + in_channels=1024, + channels=1024, + num_heads=16, + embed_dims=1024), + test_cfg=dict(mode='slide', crop_size=(640, 640), stride=(608, 608))) + +optimizer = dict(lr=0.001, weight_decay=0.0) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +crop_size = (640, 640) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 640), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 640), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + # num_gpus: 8 -> batch_size: 8 + samples_per_gpu=1, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k.py new file mode 100644 index 0000000..adc8c1b --- /dev/null +++ b/downstream/mmsegmentation/configs/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k.py @@ -0,0 +1,14 @@ +_base_ = './segmenter_vit-s_mask_8x1_512x512_160k_ade20k.py' + +model = dict( + decode_head=dict( + _delete_=True, + type='FCNHead', + in_channels=384, + channels=384, + num_convs=0, + dropout_ratio=0.0, + concat_input=False, + num_classes=150, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) diff --git a/downstream/mmsegmentation/configs/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k.py new file mode 100644 index 0000000..7e0eeb1 --- /dev/null +++ b/downstream/mmsegmentation/configs/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k.py @@ -0,0 +1,66 @@ +_base_ = [ + '../_base_/models/segmenter_vit-b16_mask.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segmenter/vit_small_p16_384_20220308-410f6037.pth' # noqa + +backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) +model = dict( + pretrained=checkpoint, + backbone=dict( + img_size=(512, 512), + embed_dims=384, + num_heads=6, + ), + decode_head=dict( + type='SegmenterMaskTransformerHead', + in_channels=384, + channels=384, + num_classes=150, + num_layers=2, + num_heads=6, + embed_dims=384, + dropout_ratio=0.0, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) + +optimizer = dict(lr=0.001, weight_decay=0.0) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + # num_gpus: 8 -> batch_size: 8 + samples_per_gpu=1, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k.py new file mode 100644 index 0000000..ec0107d --- /dev/null +++ b/downstream/mmsegmentation/configs/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k.py @@ -0,0 +1,56 @@ +_base_ = [ + '../_base_/models/segmenter_vit-b16_mask.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segmenter/vit_tiny_p16_384_20220308-cce8c795.pth' # noqa + +model = dict( + pretrained=checkpoint, + backbone=dict(embed_dims=192, num_heads=3), + decode_head=dict( + type='SegmenterMaskTransformerHead', + in_channels=192, + channels=192, + num_heads=3, + embed_dims=192)) + +optimizer = dict(lr=0.001, weight_decay=0.0) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + # num_gpus: 8 -> batch_size: 8 + samples_per_gpu=1, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/downstream/mmsegmentation/configs/sem_fpn/README.md b/downstream/mmsegmentation/configs/sem_fpn/README.md new file mode 100644 index 0000000..a3732fd --- /dev/null +++ b/downstream/mmsegmentation/configs/sem_fpn/README.md @@ -0,0 +1,50 @@ +# Semantic FPN + +[Panoptic Feature Pyramid Networks](https://arxiv.org/abs/1901.02446) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +The recently introduced panoptic segmentation task has renewed our community's interest in unifying the tasks of instance segmentation (for thing classes) and semantic segmentation (for stuff classes). However, current state-of-the-art methods for this joint task use separate and dissimilar networks for instance and semantic segmentation, without performing any shared computation. In this work, we aim to unify these methods at the architectural level, designing a single network for both tasks. Our approach is to endow Mask R-CNN, a popular instance segmentation method, with a semantic segmentation branch using a shared Feature Pyramid Network (FPN) backbone. Surprisingly, this simple baseline not only remains effective for instance segmentation, but also yields a lightweight, top-performing method for semantic segmentation. In this work, we perform a detailed study of this minimally extended version of Mask R-CNN with FPN, which we refer to as Panoptic FPN, and show it is a robust and accurate baseline for both tasks. Given its effectiveness and conceptual simplicity, we hope our method can serve as a strong baseline and aid future research in panoptic segmentation. + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{kirillov2019panoptic, + title={Panoptic feature pyramid networks}, + author={Kirillov, Alexander and Girshick, Ross and He, Kaiming and Doll{\'a}r, Piotr}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={6399--6408}, + year={2019} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FPN | R-50 | 512x1024 | 80000 | 2.8 | 13.54 | 74.52 | 76.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x1024_80k_cityscapes/fpn_r50_512x1024_80k_cityscapes_20200717_021437-94018a0d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x1024_80k_cityscapes/fpn_r50_512x1024_80k_cityscapes-20200717_021437.log.json) | +| FPN | R-101 | 512x1024 | 80000 | 3.9 | 10.29 | 75.80 | 77.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x1024_80k_cityscapes/fpn_r101_512x1024_80k_cityscapes_20200717_012416-c5800d4c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x1024_80k_cityscapes/fpn_r101_512x1024_80k_cityscapes-20200717_012416.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FPN | R-50 | 512x512 | 160000 | 4.9 | 55.77 | 37.49 | 39.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/sem_fpn/fpn_r50_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x512_160k_ade20k/fpn_r50_512x512_160k_ade20k_20200718_131734-5b5a6ab9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x512_160k_ade20k/fpn_r50_512x512_160k_ade20k-20200718_131734.log.json) | +| FPN | R-101 | 512x512 | 160000 | 5.9 | 40.58 | 39.35 | 40.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/sem_fpn/fpn_r101_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x512_160k_ade20k/fpn_r101_512x512_160k_ade20k_20200718_131734-306b5004.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x512_160k_ade20k/fpn_r101_512x512_160k_ade20k-20200718_131734.log.json) | diff --git a/downstream/mmsegmentation/configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..7f8710d --- /dev/null +++ b/downstream/mmsegmentation/configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './fpn_r50_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/sem_fpn/fpn_r101_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/sem_fpn/fpn_r101_512x512_160k_ade20k.py new file mode 100644 index 0000000..2654096 --- /dev/null +++ b/downstream/mmsegmentation/configs/sem_fpn/fpn_r101_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './fpn_r50_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..4bf3edd --- /dev/null +++ b/downstream/mmsegmentation/configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/fpn_r50.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/sem_fpn/fpn_r50_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/sem_fpn/fpn_r50_512x512_160k_ade20k.py new file mode 100644 index 0000000..5cdfc8c --- /dev/null +++ b/downstream/mmsegmentation/configs/sem_fpn/fpn_r50_512x512_160k_ade20k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/fpn_r50.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict(decode_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/sem_fpn/sem_fpn.yml b/downstream/mmsegmentation/configs/sem_fpn/sem_fpn.yml new file mode 100644 index 0000000..d7ebdfe --- /dev/null +++ b/downstream/mmsegmentation/configs/sem_fpn/sem_fpn.yml @@ -0,0 +1,104 @@ +Collections: +- Name: FPN + Metadata: + Training Data: + - Cityscapes + - ADE20K + Paper: + URL: https://arxiv.org/abs/1901.02446 + Title: Panoptic Feature Pyramid Networks + README: configs/sem_fpn/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/fpn_head.py#L12 + Version: v0.17.0 + Converted From: + Code: https://github.com/facebookresearch/detectron2 +Models: +- Name: fpn_r50_512x1024_80k_cityscapes + In Collection: FPN + Metadata: + backbone: R-50 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 73.86 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 2.8 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.52 + mIoU(ms+flip): 76.08 + Config: configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x1024_80k_cityscapes/fpn_r50_512x1024_80k_cityscapes_20200717_021437-94018a0d.pth +- Name: fpn_r101_512x1024_80k_cityscapes + In Collection: FPN + Metadata: + backbone: R-101 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 97.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 3.9 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 75.8 + mIoU(ms+flip): 77.4 + Config: configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x1024_80k_cityscapes/fpn_r101_512x1024_80k_cityscapes_20200717_012416-c5800d4c.pth +- Name: fpn_r50_512x512_160k_ade20k + In Collection: FPN + Metadata: + backbone: R-50 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 17.93 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.9 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 37.49 + mIoU(ms+flip): 39.09 + Config: configs/sem_fpn/fpn_r50_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x512_160k_ade20k/fpn_r50_512x512_160k_ade20k_20200718_131734-5b5a6ab9.pth +- Name: fpn_r101_512x512_160k_ade20k + In Collection: FPN + Metadata: + backbone: R-101 + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 24.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.9 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 39.35 + mIoU(ms+flip): 40.72 + Config: configs/sem_fpn/fpn_r101_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x512_160k_ade20k/fpn_r101_512x512_160k_ade20k_20200718_131734-306b5004.pth diff --git a/downstream/mmsegmentation/configs/setr/README.md b/downstream/mmsegmentation/configs/setr/README.md new file mode 100644 index 0000000..e42be7e --- /dev/null +++ b/downstream/mmsegmentation/configs/setr/README.md @@ -0,0 +1,73 @@ +# SETR + +[Rethinking Semantic Segmentation from a Sequence-to-Sequence Perspective with Transformers](https://arxiv.org/abs/2012.15840) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Most recent semantic segmentation methods adopt a fully-convolutional network (FCN) with an encoder-decoder architecture. The encoder progressively reduces the spatial resolution and learns more abstract/semantic visual concepts with larger receptive fields. Since context modeling is critical for segmentation, the latest efforts have been focused on increasing the receptive field, through either dilated/atrous convolutions or inserting attention modules. However, the encoder-decoder based FCN architecture remains unchanged. In this paper, we aim to provide an alternative perspective by treating semantic segmentation as a sequence-to-sequence prediction task. Specifically, we deploy a pure transformer (ie, without convolution and resolution reduction) to encode an image as a sequence of patches. With the global context modeled in every layer of the transformer, this encoder can be combined with a simple decoder to provide a powerful segmentation model, termed SEgmentation TRansformer (SETR). Extensive experiments show that SETR achieves new state of the art on ADE20K (50.28% mIoU), Pascal Context (55.83% mIoU) and competitive results on Cityscapes. Particularly, we achieve the first position in the highly competitive ADE20K test server leaderboard on the day of submission. + + +
    + +
    + +```None +This head has two version head. +``` + +## Citation + +```bibtex +@article{zheng2020rethinking, + title={Rethinking Semantic Segmentation from a Sequence-to-Sequence Perspective with Transformers}, + author={Zheng, Sixiao and Lu, Jiachen and Zhao, Hengshuang and Zhu, Xiatian and Luo, Zekun and Wang, Yabiao and Fu, Yanwei and Feng, Jianfeng and Xiang, Tao and Torr, Philip HS and others}, + journal={arXiv preprint arXiv:2012.15840}, + year={2020} +} +``` + +## Usage + +You can download the pretrain from [here](https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth). Then you can convert its keys with the script `vit2mmseg.py` in the tools directory. + +```shell +python tools/model_converters/vit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/vit2mmseg.py \ +jx_vit_large_p16_384-b3be5167.pth pretrain/vit_large_p16.pth +``` + +This script convert the model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| SETR Naive | ViT-L | 512x512 | 16 | 160000 | 18.40 | 4.72 | 48.28 | 49.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_naive_512x512_160k_b16_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_512x512_160k_b16_ade20k/setr_naive_512x512_160k_b16_ade20k_20210619_191258-061f24f5.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_512x512_160k_b16_ade20k/setr_naive_512x512_160k_b16_ade20k_20210619_191258.log.json) | +| SETR PUP | ViT-L | 512x512 | 16 | 160000 | 19.54 | 4.50 | 48.24 | 49.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_pup_512x512_160k_b16_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_512x512_160k_b16_ade20k/setr_pup_512x512_160k_b16_ade20k_20210619_191343-7e0ce826.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_512x512_160k_b16_ade20k/setr_pup_512x512_160k_b16_ade20k_20210619_191343.log.json) | +| SETR MLA | ViT-L | 512x512 | 8 | 160000 | 10.96 | - | 47.34 | 49.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_mla_512x512_160k_b8_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b8_ade20k/setr_mla_512x512_160k_b8_ade20k_20210619_191118-c6d21df0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b8_ade20k/setr_mla_512x512_160k_b8_ade20k_20210619_191118.log.json) | +| SETR MLA | ViT-L | 512x512 | 16 | 160000 | 17.30 | 5.25 | 47.54 | 49.37 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_mla_512x512_160k_b16_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b16_ade20k/setr_mla_512x512_160k_b16_ade20k_20210619_191057-f9741de7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b16_ade20k/setr_mla_512x512_160k_b16_ade20k_20210619_191057.log.json) | + +### Cityscapes + +| Method | Backbone | Crop Size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| SETR Naive | ViT-L | 768x768 | 8 | 80000 | 24.06 | 0.39 | 78.10 | 80.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_vit-large_8x1_768x768_80k_cityscapes/setr_naive_vit-large_8x1_768x768_80k_cityscapes_20211123_000505-20728e80.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_vit-large_8x1_768x768_80k_cityscapes/setr_naive_vit-large_8x1_768x768_80k_cityscapes_20211123_000505.log.json) | +| SETR PUP | ViT-L | 768x768 | 8 | 80000 | 27.96 | 0.37 | 79.21 | 81.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_vit-large_8x1_768x768_80k_cityscapes/setr_pup_vit-large_8x1_768x768_80k_cityscapes_20211122_155115-f6f37b8f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_vit-large_8x1_768x768_80k_cityscapes/setr_pup_vit-large_8x1_768x768_80k_cityscapes_20211122_155115.log.json) | +| SETR MLA | ViT-L | 768x768 | 8 | 80000 | 24.10 | 0.41 | 77.00 | 79.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_vit-large_8x1_768x768_80k_cityscapes/setr_mla_vit-large_8x1_768x768_80k_cityscapes_20211119_101003-7f8dccbe.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_vit-large_8x1_768x768_80k_cityscapes/setr_mla_vit-large_8x1_768x768_80k_cityscapes_20211119_101003.log.json) | diff --git a/downstream/mmsegmentation/configs/setr/setr.yml b/downstream/mmsegmentation/configs/setr/setr.yml new file mode 100644 index 0000000..27f58e4 --- /dev/null +++ b/downstream/mmsegmentation/configs/setr/setr.yml @@ -0,0 +1,164 @@ +Collections: +- Name: SETR + Metadata: + Training Data: + - ADE20K + - Cityscapes + Paper: + URL: https://arxiv.org/abs/2012.15840 + Title: Rethinking Semantic Segmentation from a Sequence-to-Sequence Perspective + with Transformers + README: configs/setr/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/setr_up_head.py#L11 + Version: v0.17.0 + Converted From: + Code: https://github.com/fudan-zvg/SETR +Models: +- Name: setr_naive_512x512_160k_b16_ade20k + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 211.86 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 18.4 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.28 + mIoU(ms+flip): 49.56 + Config: configs/setr/setr_naive_512x512_160k_b16_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_512x512_160k_b16_ade20k/setr_naive_512x512_160k_b16_ade20k_20210619_191258-061f24f5.pth +- Name: setr_pup_512x512_160k_b16_ade20k + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 222.22 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 19.54 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.24 + mIoU(ms+flip): 49.99 + Config: configs/setr/setr_pup_512x512_160k_b16_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_512x512_160k_b16_ade20k/setr_pup_512x512_160k_b16_ade20k_20210619_191343-7e0ce826.pth +- Name: setr_mla_512x512_160k_b8_ade20k + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (512,512) + lr schd: 160000 + Training Memory (GB): 10.96 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.34 + mIoU(ms+flip): 49.05 + Config: configs/setr/setr_mla_512x512_160k_b8_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b8_ade20k/setr_mla_512x512_160k_b8_ade20k_20210619_191118-c6d21df0.pth +- Name: setr_mla_512x512_160k_b16_ade20k + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 190.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 17.3 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.54 + mIoU(ms+flip): 49.37 + Config: configs/setr/setr_mla_512x512_160k_b16_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b16_ade20k/setr_mla_512x512_160k_b16_ade20k_20210619_191057-f9741de7.pth +- Name: setr_vit-large_naive_8x1_768x768_80k_cityscapes + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (768,768) + lr schd: 80000 + inference time (ms/im): + - value: 2564.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (768,768) + Training Memory (GB): 24.06 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.1 + mIoU(ms+flip): 80.22 + Config: configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_vit-large_8x1_768x768_80k_cityscapes/setr_naive_vit-large_8x1_768x768_80k_cityscapes_20211123_000505-20728e80.pth +- Name: setr_vit-large_pup_8x1_768x768_80k_cityscapes + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (768,768) + lr schd: 80000 + inference time (ms/im): + - value: 2702.7 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (768,768) + Training Memory (GB): 27.96 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.21 + mIoU(ms+flip): 81.02 + Config: configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_vit-large_8x1_768x768_80k_cityscapes/setr_pup_vit-large_8x1_768x768_80k_cityscapes_20211122_155115-f6f37b8f.pth +- Name: setr_vit-large_mla_8x1_768x768_80k_cityscapes + In Collection: SETR + Metadata: + backbone: ViT-L + crop size: (768,768) + lr schd: 80000 + inference time (ms/im): + - value: 2439.02 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (768,768) + Training Memory (GB): 24.1 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.0 + mIoU(ms+flip): 79.59 + Config: configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_vit-large_8x1_768x768_80k_cityscapes/setr_mla_vit-large_8x1_768x768_80k_cityscapes_20211119_101003-7f8dccbe.pth diff --git a/downstream/mmsegmentation/configs/setr/setr_mla_512x512_160k_b16_ade20k.py b/downstream/mmsegmentation/configs/setr/setr_mla_512x512_160k_b16_ade20k.py new file mode 100644 index 0000000..c8418c6 --- /dev/null +++ b/downstream/mmsegmentation/configs/setr/setr_mla_512x512_160k_b16_ade20k.py @@ -0,0 +1,4 @@ +_base_ = ['./setr_mla_512x512_160k_b8_ade20k.py'] + +# num_gpus: 8 -> batch_size: 16 +data = dict(samples_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/setr/setr_mla_512x512_160k_b8_ade20k.py b/downstream/mmsegmentation/configs/setr/setr_mla_512x512_160k_b8_ade20k.py new file mode 100644 index 0000000..e1a07ce --- /dev/null +++ b/downstream/mmsegmentation/configs/setr/setr_mla_512x512_160k_b8_ade20k.py @@ -0,0 +1,85 @@ +_base_ = [ + '../_base_/models/setr_mla.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained=None, + backbone=dict( + img_size=(512, 512), + drop_rate=0., + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), + decode_head=dict(num_classes=150), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=0, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=1, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=2, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=256, + channels=256, + in_index=3, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=0, + kernel_size=1, + concat_input=False, + num_classes=150, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + ], + test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341)), +) + +optimizer = dict( + lr=0.001, + weight_decay=0.0, + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) + +# num_gpus: 8 -> batch_size: 8 +data = dict(samples_per_gpu=1) diff --git a/downstream/mmsegmentation/configs/setr/setr_naive_512x512_160k_b16_ade20k.py b/downstream/mmsegmentation/configs/setr/setr_naive_512x512_160k_b16_ade20k.py new file mode 100644 index 0000000..8ad8c9f --- /dev/null +++ b/downstream/mmsegmentation/configs/setr/setr_naive_512x512_160k_b16_ade20k.py @@ -0,0 +1,67 @@ +_base_ = [ + '../_base_/models/setr_naive.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained=None, + backbone=dict( + img_size=(512, 512), + drop_rate=0., + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), + decode_head=dict(num_classes=150), + auxiliary_head=[ + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=0, + num_classes=150, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=2, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=1, + num_classes=150, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=2, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=2, + num_classes=150, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=2, + kernel_size=1, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)) + ], + test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341)), +) + +optimizer = dict( + lr=0.01, + weight_decay=0.0, + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) + +# num_gpus: 8 -> batch_size: 16 +data = dict(samples_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/setr/setr_pup_512x512_160k_b16_ade20k.py b/downstream/mmsegmentation/configs/setr/setr_pup_512x512_160k_b16_ade20k.py new file mode 100644 index 0000000..83997a2 --- /dev/null +++ b/downstream/mmsegmentation/configs/setr/setr_pup_512x512_160k_b16_ade20k.py @@ -0,0 +1,67 @@ +_base_ = [ + '../_base_/models/setr_pup.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + pretrained=None, + backbone=dict( + img_size=(512, 512), + drop_rate=0., + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), + decode_head=dict(num_classes=150), + auxiliary_head=[ + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=0, + num_classes=150, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=2, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=1, + num_classes=150, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=2, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=2, + num_classes=150, + dropout_ratio=0, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + num_convs=2, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + ], + test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341)), +) + +optimizer = dict( + lr=0.001, + weight_decay=0.0, + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) + +# num_gpus: 8 -> batch_size: 16 +data = dict(samples_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py b/downstream/mmsegmentation/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py new file mode 100644 index 0000000..4237cd5 --- /dev/null +++ b/downstream/mmsegmentation/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/setr_mla.py', '../_base_/datasets/cityscapes_768x768.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + pretrained=None, + backbone=dict( + drop_rate=0, + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), + test_cfg=dict(mode='slide', crop_size=(768, 768), stride=(512, 512))) + +optimizer = dict( + lr=0.002, + weight_decay=0.0, + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) +data = dict(samples_per_gpu=1) diff --git a/downstream/mmsegmentation/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py b/downstream/mmsegmentation/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py new file mode 100644 index 0000000..0c6621e --- /dev/null +++ b/downstream/mmsegmentation/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py @@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/setr_naive.py', + '../_base_/datasets/cityscapes_768x768.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + pretrained=None, + backbone=dict( + drop_rate=0., + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), + test_cfg=dict(mode='slide', crop_size=(768, 768), stride=(512, 512))) + +optimizer = dict( + weight_decay=0.0, + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) + +data = dict(samples_per_gpu=1) diff --git a/downstream/mmsegmentation/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py b/downstream/mmsegmentation/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py new file mode 100644 index 0000000..e108988 --- /dev/null +++ b/downstream/mmsegmentation/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py @@ -0,0 +1,64 @@ +_base_ = [ + '../_base_/models/setr_pup.py', '../_base_/datasets/cityscapes_768x768.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] + +norm_cfg = dict(type='SyncBN', requires_grad=True) +crop_size = (768, 768) +model = dict( + pretrained=None, + backbone=dict( + drop_rate=0., + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), + auxiliary_head=[ + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=0, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=2, + up_scale=4, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=1, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=2, + up_scale=4, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='SETRUPHead', + in_channels=1024, + channels=256, + in_index=2, + num_classes=19, + dropout_ratio=0, + norm_cfg=norm_cfg, + num_convs=2, + up_scale=4, + kernel_size=3, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)) + ], + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(512, 512))) + +optimizer = dict( + weight_decay=0.0, + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) + +data = dict(samples_per_gpu=1) diff --git a/downstream/mmsegmentation/configs/stdc/README.md b/downstream/mmsegmentation/configs/stdc/README.md new file mode 100644 index 0000000..466a91a --- /dev/null +++ b/downstream/mmsegmentation/configs/stdc/README.md @@ -0,0 +1,72 @@ +# STDC + +[Rethinking BiSeNet For Real-time Semantic Segmentation](https://arxiv.org/abs/2104.13188) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +BiSeNet has been proved to be a popular two-stream network for real-time segmentation. However, its principle of adding an extra path to encode spatial information is time-consuming, and the backbones borrowed from pretrained tasks, e.g., image classification, may be inefficient for image segmentation due to the deficiency of task-specific design. To handle these problems, we propose a novel and efficient structure named Short-Term Dense Concatenate network (STDC network) by removing structure redundancy. Specifically, we gradually reduce the dimension of feature maps and use the aggregation of them for image representation, which forms the basic module of STDC network. In the decoder, we propose a Detail Aggregation module by integrating the learning of spatial information into low-level layers in single-stream manner. Finally, the low-level features and deep features are fused to predict the final segmentation results. Extensive experiments on Cityscapes and CamVid dataset demonstrate the effectiveness of our method by achieving promising trade-off between segmentation accuracy and inference speed. On Cityscapes, we achieve 71.9% mIoU on the test set with a speed of 250.4 FPS on NVIDIA GTX 1080Ti, which is 45.2% faster than the latest methods, and achieve 76.8% mIoU with 97.0 FPS while inferring on higher resolution images. + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{fan2021rethinking, + title={Rethinking BiSeNet For Real-time Semantic Segmentation}, + author={Fan, Mingyuan and Lai, Shenqi and Huang, Junshi and Wei, Xiaoming and Chai, Zhenhua and Luo, Junfeng and Wei, Xiaolin}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={9716--9725}, + year={2021} +} +``` + +## Usage + +We have provided [ImageNet Pretrained STDCNet Weights](https://drive.google.com/drive/folders/1wROFwRt8qWHD4jSo8Zu1gp1d6oYJ3ns1) models converted from [official repo](https://github.com/MichaelFan01/STDC-Seg). + +If you want to convert keys on your own to use official repositories' pre-trained models, we also provide a script [`stdc2mmseg.py`](../../tools/model_converters/stdc2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/MichaelFan01/STDC-Seg) to MMSegmentation style. + +```shell +python tools/model_converters/stdc2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} ${STDC_TYPE} +``` + +E.g. + +```shell +python tools/model_converters/stdc2mmseg.py ./STDCNet813M_73.91.tar ./pretrained/stdc1.pth STDC1 + +python tools/model_converters/stdc2mmseg.py ./STDCNet1446_76.47.tar ./pretrained/stdc2.pth STDC2 +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| --------- | --------- | --------- | ------: | -------- | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| STDC 1 (No Pretrain) | STDC1 | 512x1024 | 80000 | 7.15 | 23.06 | 71.82 | 73.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/stdc/stdc1_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_512x1024_80k_cityscapes/stdc1_512x1024_80k_cityscapes_20220224_073048-74e6920a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_512x1024_80k_cityscapes/stdc1_512x1024_80k_cityscapes_20220224_073048.log.json) | +| STDC 1| STDC1 | 512x1024 | 80000 | - | - | 74.94 | 76.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes/stdc1_in1k-pre_512x1024_80k_cityscapes_20220224_141648-3d4c2981.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes/stdc1_in1k-pre_512x1024_80k_cityscapes_20220224_141648.log.json) | +| STDC 2 (No Pretrain) | STDC2 | 512x1024 | 80000 | 8.27 | 23.71 | 73.15 | 76.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/stdc/stdc2_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_512x1024_80k_cityscapes/stdc2_512x1024_80k_cityscapes_20220222_132015-fb1e3a1a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_512x1024_80k_cityscapes/stdc2_512x1024_80k_cityscapes_20220222_132015.log.json) | +| STDC 2 | STDC2 | 512x1024 | 80000 | - | - | 76.67 | 78.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes/stdc2_in1k-pre_512x1024_80k_cityscapes_20220224_073048-1f8f0f6c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes/stdc2_in1k-pre_512x1024_80k_cityscapes_20220224_073048.log.json) | + +Note: + +- For STDC on Cityscapes dataset, default setting is 4 GPUs with 12 samples per GPU in training. +- `No Pretrain` means the model is trained from scratch. +- The FPS is for reference only. The environment is also different from paper setting, whose input size is `512x1024` and `768x1536`, i.e., 50% and 75% of our input size, respectively and using TensorRT. +- The parameter `fusion_kernel` in `STDCHead` is not learnable. In official repo, `find_unused_parameters=True` is set [here](https://github.com/MichaelFan01/STDC-Seg/blob/59ff37fbd693b99972c76fcefe97caa14aeb619f/train.py#L220). You may check it by printing model parameters of original repo on your own. diff --git a/downstream/mmsegmentation/configs/stdc/stdc.yml b/downstream/mmsegmentation/configs/stdc/stdc.yml new file mode 100644 index 0000000..f584b74 --- /dev/null +++ b/downstream/mmsegmentation/configs/stdc/stdc.yml @@ -0,0 +1,87 @@ +Collections: +- Name: STDC + Metadata: + Training Data: + - Cityscapes + Paper: + URL: https://arxiv.org/abs/2104.13188 + Title: Rethinking BiSeNet For Real-time Semantic Segmentation + README: configs/stdc/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.20.0/mmseg/models/backbones/stdc.py#L394 + Version: v0.20.0 + Converted From: + Code: https://github.com/MichaelFan01/STDC-Seg +Models: +- Name: stdc1_512x1024_80k_cityscapes + In Collection: STDC + Metadata: + backbone: STDC1 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 43.37 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.15 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 71.82 + mIoU(ms+flip): 73.89 + Config: configs/stdc/stdc1_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_512x1024_80k_cityscapes/stdc1_512x1024_80k_cityscapes_20220224_073048-74e6920a.pth +- Name: stdc1_in1k-pre_512x1024_80k_cityscapes + In Collection: STDC + Metadata: + backbone: STDC1 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 74.94 + mIoU(ms+flip): 76.97 + Config: configs/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes/stdc1_in1k-pre_512x1024_80k_cityscapes_20220224_141648-3d4c2981.pth +- Name: stdc2_512x1024_80k_cityscapes + In Collection: STDC + Metadata: + backbone: STDC2 + crop size: (512,1024) + lr schd: 80000 + inference time (ms/im): + - value: 42.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 8.27 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 73.15 + mIoU(ms+flip): 76.13 + Config: configs/stdc/stdc2_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_512x1024_80k_cityscapes/stdc2_512x1024_80k_cityscapes_20220222_132015-fb1e3a1a.pth +- Name: stdc2_in1k-pre_512x1024_80k_cityscapes + In Collection: STDC + Metadata: + backbone: STDC2 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 76.67 + mIoU(ms+flip): 78.67 + Config: configs/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes/stdc2_in1k-pre_512x1024_80k_cityscapes_20220224_073048-1f8f0f6c.pth diff --git a/downstream/mmsegmentation/configs/stdc/stdc1_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/stdc/stdc1_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..849e771 --- /dev/null +++ b/downstream/mmsegmentation/configs/stdc/stdc1_512x1024_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/stdc.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +lr_config = dict(warmup='linear', warmup_iters=1000) +data = dict( + samples_per_gpu=12, + workers_per_gpu=4, +) diff --git a/downstream/mmsegmentation/configs/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..f295bf4 --- /dev/null +++ b/downstream/mmsegmentation/configs/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes.py @@ -0,0 +1,6 @@ +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/stdc/stdc1_20220308-5368626c.pth' # noqa +_base_ = './stdc1_512x1024_80k_cityscapes.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint)))) diff --git a/downstream/mmsegmentation/configs/stdc/stdc2_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/stdc/stdc2_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..f7afb50 --- /dev/null +++ b/downstream/mmsegmentation/configs/stdc/stdc2_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './stdc1_512x1024_80k_cityscapes.py' +model = dict(backbone=dict(backbone_cfg=dict(stdc_type='STDCNet2'))) diff --git a/downstream/mmsegmentation/configs/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..4148ac4 --- /dev/null +++ b/downstream/mmsegmentation/configs/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes.py @@ -0,0 +1,6 @@ +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/stdc/stdc2_20220308-7dbd9127.pth' # noqa +_base_ = './stdc2_512x1024_80k_cityscapes.py' +model = dict( + backbone=dict( + backbone_cfg=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint)))) diff --git a/downstream/mmsegmentation/configs/swin/README.md b/downstream/mmsegmentation/configs/swin/README.md new file mode 100644 index 0000000..0f9acd4 --- /dev/null +++ b/downstream/mmsegmentation/configs/swin/README.md @@ -0,0 +1,75 @@ +# Swin Transformer + +[Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with Shifted windows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. The code and models are publicly available at [this https URL](https://github.com/microsoft/Swin-Transformer). + + +
    + +
    + +## Citation + +```bibtex +@article{liu2021Swin, + title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, + author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, + journal={arXiv preprint arXiv:2103.14030}, + year={2021} +} +``` + +## Usage + +We have provided pretrained models converted from [official repo](https://github.com/microsoft/Swin-Transformer). + +If you want to convert keys on your own to use official repositories' pre-trained models, we also provide a script [`swin2mmseg.py`](../../tools/model_converters/swin2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/SwinTransformer/Swin-Transformer-Semantic-Segmentation) to MMSegmentation style. + +```shell +python tools/model_converters/swin2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/swin2mmseg.py https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth pretrain/swin_base_patch4_window7_224.pth +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +In our default setting, pretrained models and their corresponding [original models](https://github.com/microsoft/Swin-Transforme) models could be defined below: + + | pretrained models | original models | + | ------ | -------- | + |pretrain/swin_tiny_patch4_window7_224.pth | [swin_tiny_patch4_window7_224.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth) | + |pretrain/swin_small_patch4_window7_224.pth | [swin_small_patch4_window7_224.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth) | + |pretrain/swin_base_patch4_window7_224.pth | [swin_base_patch4_window7_224.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth) | + |pretrain/swin_base_patch4_window7_224_22k.pth | [swin_base_patch4_window7_224_22k.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth) | + |pretrain/swin_base_patch4_window12_384.pth | [swin_base_patch4_window12_384.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth) | + |pretrain/swin_base_patch4_window12_384_22k.pth | [swin_base_patch4_window12_384_22k.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth) | + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | pretrain | pretrain img size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | -------- | --------- | ---------- | ------- | -------- | --- | --- | -------------- | ----- | ------------: | -------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| UperNet | Swin-T | 512x512 | ImageNet-1K | 224x224 | 16 | 160000 | 5.02 | 21.06 | 44.41 | 45.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542.log.json) | +| UperNet | Swin-S | 512x512 | ImageNet-1K | 224x224 | 16 | 160000 | 6.17 | 14.72 | 47.72 | 49.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015.log.json) | +| UperNet | Swin-B | 512x512 | ImageNet-1K | 224x224 | 16 | 160000 | 7.61 | 12.65 | 47.99 | 49.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192340-593b0e13.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192340.log.json) | +| UperNet | Swin-B | 512x512 | ImageNet-22K | 224x224 | 16 | 160000 | - | - | 50.31 | 51.9 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K_20210526_211650-762e2178.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K_20210526_211650.log.json) | +| UperNet | Swin-B | 512x512 | ImageNet-1K | 384x384 | 16 | 160000 | 8.52 | 12.10 | 48.35 | 49.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K_20210531_132020-05b22ea4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K_20210531_132020.log.json) | +| UperNet | Swin-B | 512x512 | ImageNet-22K | 384x384 | 16 | 160000 | - | - | 50.76 | 52.4 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459.log.json) | diff --git a/downstream/mmsegmentation/configs/swin/swin.yml b/downstream/mmsegmentation/configs/swin/swin.yml new file mode 100644 index 0000000..cf7c465 --- /dev/null +++ b/downstream/mmsegmentation/configs/swin/swin.yml @@ -0,0 +1,117 @@ +Models: +- Name: upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K + In Collection: UperNet + Metadata: + backbone: Swin-T + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 47.48 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.02 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.41 + mIoU(ms+flip): 45.79 + Config: configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth +- Name: upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K + In Collection: UperNet + Metadata: + backbone: Swin-S + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 67.93 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.17 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.72 + mIoU(ms+flip): 49.24 + Config: configs/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth +- Name: upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K + In Collection: UperNet + Metadata: + backbone: Swin-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 79.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.61 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.99 + mIoU(ms+flip): 49.57 + Config: configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192340-593b0e13.pth +- Name: upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K + In Collection: UperNet + Metadata: + backbone: Swin-B + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 50.31 + mIoU(ms+flip): 51.9 + Config: configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K_20210526_211650-762e2178.pth +- Name: upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K + In Collection: UperNet + Metadata: + backbone: Swin-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 82.64 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.52 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.35 + mIoU(ms+flip): 49.65 + Config: configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K_20210531_132020-05b22ea4.pth +- Name: upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K + In Collection: UperNet + Metadata: + backbone: Swin-B + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 50.76 + mIoU(ms+flip): 52.4 + Config: configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth diff --git a/downstream/mmsegmentation/configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K.py b/downstream/mmsegmentation/configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K.py new file mode 100644 index 0000000..027bd6f --- /dev/null +++ b/downstream/mmsegmentation/configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K.py @@ -0,0 +1,15 @@ +_base_ = [ + 'upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_' + 'pretrain_224x224_1K.py' +] +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window12_384_20220317-55b0104a.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), + pretrain_img_size=384, + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12), + decode_head=dict(in_channels=[128, 256, 512, 1024], num_classes=150), + auxiliary_head=dict(in_channels=512, num_classes=150)) diff --git a/downstream/mmsegmentation/configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K.py b/downstream/mmsegmentation/configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K.py new file mode 100644 index 0000000..e662d4f --- /dev/null +++ b/downstream/mmsegmentation/configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K.py @@ -0,0 +1,8 @@ +_base_ = [ + './upernet_swin_base_patch4_window12_512x512_160k_ade20k_' + 'pretrain_384x384_1K.py' +] +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window12_384_22k_20220317-e5c09f74.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file))) diff --git a/downstream/mmsegmentation/configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py b/downstream/mmsegmentation/configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py new file mode 100644 index 0000000..6e05677 --- /dev/null +++ b/downstream/mmsegmentation/configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py @@ -0,0 +1,13 @@ +_base_ = [ + './upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_' + 'pretrain_224x224_1K.py' +] +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window7_224_20220317-e9b98025.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32]), + decode_head=dict(in_channels=[128, 256, 512, 1024], num_classes=150), + auxiliary_head=dict(in_channels=512, num_classes=150)) diff --git a/downstream/mmsegmentation/configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K.py b/downstream/mmsegmentation/configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K.py new file mode 100644 index 0000000..7a9c506 --- /dev/null +++ b/downstream/mmsegmentation/configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K.py @@ -0,0 +1,8 @@ +_base_ = [ + './upernet_swin_base_patch4_window7_512x512_160k_ade20k_' + 'pretrain_224x224_1K.py' +] +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window7_224_22k_20220317-4f79f7c0.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file))) diff --git a/downstream/mmsegmentation/configs/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py b/downstream/mmsegmentation/configs/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py new file mode 100644 index 0000000..1958e0e --- /dev/null +++ b/downstream/mmsegmentation/configs/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py @@ -0,0 +1,11 @@ +_base_ = [ + './upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_' + 'pretrain_224x224_1K.py' +] +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_small_patch4_window7_224_20220317-7ba6d6dd.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), + depths=[2, 2, 18, 2]), + decode_head=dict(in_channels=[96, 192, 384, 768], num_classes=150), + auxiliary_head=dict(in_channels=384, num_classes=150)) diff --git a/downstream/mmsegmentation/configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py b/downstream/mmsegmentation/configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py new file mode 100644 index 0000000..6d8c413 --- /dev/null +++ b/downstream/mmsegmentation/configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py @@ -0,0 +1,45 @@ +_base_ = [ + '../_base_/models/upernet_swin.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_tiny_patch4_window7_224_20220317-1cdeb081.pth' # noqa +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + use_abs_pos_embed=False, + drop_path_rate=0.3, + patch_norm=True), + decode_head=dict(in_channels=[96, 192, 384, 768], num_classes=150), + auxiliary_head=dict(in_channels=384, num_classes=150)) + +# AdamW optimizer, no weight decay for position embedding & layer norm +# in backbone +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006, + betas=(0.9, 0.999), + weight_decay=0.01, + paramwise_cfg=dict( + custom_keys={ + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/twins/README.md b/downstream/mmsegmentation/configs/twins/README.md new file mode 100644 index 0000000..e221835 --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/README.md @@ -0,0 +1,76 @@ +# Twins + +[Twins: Revisiting the Design of Spatial Attention in Vision Transformers](https://arxiv.org/pdf/2104.13840.pdf) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Very recently, a variety of vision transformer architectures for dense prediction tasks have been proposed and they show that the design of spatial attention is critical to their success in these tasks. In this work, we revisit the design of the spatial attention and demonstrate that a carefully-devised yet simple spatial attention mechanism performs favourably against the state-of-the-art schemes. As a result, we propose two vision transformer architectures, namely, Twins-PCPVT and Twins-SVT. Our proposed architectures are highly-efficient and easy to implement, only involving matrix multiplications that are highly optimized in modern deep learning frameworks. More importantly, the proposed architectures achieve excellent performance on a wide range of visual tasks, including image level classification as well as dense detection and segmentation. The simplicity and strong performance suggest that our proposed architectures may serve as stronger backbones for many vision tasks. Our code is released at [this https URL](https://github.com/Meituan-AutoML/Twins). + + +
    + +
    + +## Citation + +```bibtex +@article{chu2021twins, + title={Twins: Revisiting spatial attention design in vision transformers}, + author={Chu, Xiangxiang and Tian, Zhi and Wang, Yuqing and Zhang, Bo and Ren, Haibing and Wei, Xiaolin and Xia, Huaxia and Shen, Chunhua}, + journal={arXiv preprint arXiv:2104.13840}, + year={2021}altgvt +} +``` + +## Usage + +We have provided pretrained models converted from [official repo](https://github.com/Meituan-AutoML/Twins). + +If you want to convert keys on your own to use official repositories' pre-trained models, we also provide a script [`twins2mmseg.py`](../../tools/model_converters/twins2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/Meituan-AutoML/Twins) to MMSegmentation style. + +```shell +python tools/model_converters/twins2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} ${MODEL_TYPE} +``` + +This script convert `pcpvt` or `svt` pretrained model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +For example, + +```shell +python tools/model_converters/twins2mmseg.py ./alt_gvt_base.pth ./pretrained/alt_gvt_base.pth svt +``` + +## Results and models + +### ADE20K + +| Method| Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ----- | ------- | --------- | ------| ------ | -------------- | ----- | ------------- | ------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Twins-FPN | PCPVT-S | 512x512 | 80000| 6.60 | 27.15 | 43.26 | 44.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_204132-41acd132.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_204132.log.json) | +| Twins-UPerNet | PCPVT-S | 512x512 | 160000| 9.67 | 14.24 | 46.04 | 46.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k_20211201_233537-8e99c07a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k_20211201_233537.log.json) | +| Twins-FPN | PCPVT-B | 512x512 | 80000| 8.41 | 19.67 | 45.66 | 46.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141019-d396db72.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141019.log.json) | +| Twins-UPerNet (8x2) | PCPVT-B | 512x512 | 160000| 6.46 | 12.04 | 47.91 | 48.64 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k_20211130_141020-02094ea5.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k_20211130_141020.log.json) | +| Twins-FPN | PCPVT-L | 512x512 | 80000| 10.78 | 14.32 | 45.94 | 46.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_105226-bc6d61dc.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_105226.log.json) | +| Twins-UPerNet (8x2) | PCPVT-L | 512x512 | 160000| 7.82 | 10.70 | 49.35 | 50.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k.py) |[model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k_20211201_075053-c6095c07.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k_20211201_075053.log.json)| +| Twins-FPN | SVT-S| 512x512 | 80000| 5.80 | 29.79 | 44.47 | 45.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py) |[model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141006-0a0d3317.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141006.log.json)| +| Twins-UPerNet (8x2) | SVT-S| 512x512 | 160000| 4.93 | 15.09 | 46.08 | 46.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k.py) |[model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k/twins_svt-s_uperhead_8x2_512x512_160k_ade20k_20211130_141005-e48a2d94.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k/twins_svt-s_uperhead_8x2_512x512_160k_ade20k_20211130_141005.log.json)| +| Twins-FPN | SVT-B| 512x512 | 80000| 8.75 | 21.10 | 46.77 | 47.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py) |[model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_113849-88b2907c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_113849.log.json)| +| Twins-UPerNet (8x2) | SVT-B| 512x512 | 160000| 6.77 | 12.66 | 48.04 | 48.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k.py) |[model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k/twins_svt-b_uperhead_8x2_512x512_160k_ade20k_20211202_040826-0943a1f1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k/twins_svt-b_uperhead_8x2_512x512_160k_ade20k_20211202_040826.log.json)| +| Twins-FPN | SVT-L| 512x512 | 80000| 11.20 | 17.80 | 46.55 | 47.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py) |[model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141005-1d59bee2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141005.log.json)| +| Twins-UPerNet (8x2) | SVT-L| 512x512 | 160000| 8.41 | 10.73 | 49.65 | 50.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k.py) |[model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k/twins_svt-l_uperhead_8x2_512x512_160k_ade20k_20211130_141005-3e2cae61.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k/twins_svt-l_uperhead_8x2_512x512_160k_ade20k_20211130_141005.log.json)| + + +Note: + +- `8x2` means 8 GPUs with 2 samples per GPU in training. Default setting of Twins on ADE20K is 8 GPUs with 4 samples per GPU in training. +- `UPerNet` and `FPN` are decoder heads utilized in corresponding Twins model, which is `UPerHead` and `FPNHead`, respectively. Specifically, models in [official repo](https://github.com/Meituan-AutoML/Twins) all use `UPerHead`. diff --git a/downstream/mmsegmentation/configs/twins/twins.yml b/downstream/mmsegmentation/configs/twins/twins.yml new file mode 100644 index 0000000..6b5f5c1 --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/twins.yml @@ -0,0 +1,265 @@ +Models: +- Name: twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k + In Collection: FPN + Metadata: + backbone: PCPVT-S + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 36.83 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.6 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.26 + mIoU(ms+flip): 44.11 + Config: configs/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_204132-41acd132.pth +- Name: twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: PCPVT-S + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 70.22 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.67 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.04 + mIoU(ms+flip): 46.92 + Config: configs/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k_20211201_233537-8e99c07a.pth +- Name: twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k + In Collection: FPN + Metadata: + backbone: PCPVT-B + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 50.84 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.41 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.66 + mIoU(ms+flip): 46.48 + Config: configs/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141019-d396db72.pth +- Name: twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: PCPVT-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 83.06 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.46 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.91 + mIoU(ms+flip): 48.64 + Config: configs/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k_20211130_141020-02094ea5.pth +- Name: twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k + In Collection: FPN + Metadata: + backbone: PCPVT-L + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 69.83 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 10.78 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.94 + mIoU(ms+flip): 46.7 + Config: configs/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_105226-bc6d61dc.pth +- Name: twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: PCPVT-L + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 93.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.82 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 49.35 + mIoU(ms+flip): 50.08 + Config: configs/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k_20211201_075053-c6095c07.pth +- Name: twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k + In Collection: FPN + Metadata: + backbone: SVT-S + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 33.57 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.8 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 44.47 + mIoU(ms+flip): 45.42 + Config: configs/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141006-0a0d3317.pth +- Name: twins_svt-s_uperhead_8x2_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: SVT-S + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 66.27 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.93 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.08 + mIoU(ms+flip): 46.96 + Config: configs/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k/twins_svt-s_uperhead_8x2_512x512_160k_ade20k_20211130_141005-e48a2d94.pth +- Name: twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k + In Collection: FPN + Metadata: + backbone: SVT-B + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 47.39 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.75 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.77 + mIoU(ms+flip): 47.47 + Config: configs/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_113849-88b2907c.pth +- Name: twins_svt-b_uperhead_8x2_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: SVT-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 78.99 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.77 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 48.04 + mIoU(ms+flip): 48.87 + Config: configs/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k/twins_svt-b_uperhead_8x2_512x512_160k_ade20k_20211202_040826-0943a1f1.pth +- Name: twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k + In Collection: FPN + Metadata: + backbone: SVT-L + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 56.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 11.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.55 + mIoU(ms+flip): 47.74 + Config: configs/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141005-1d59bee2.pth +- Name: twins_svt-l_uperhead_8x2_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: SVT-L + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 93.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.41 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 49.65 + mIoU(ms+flip): 50.63 + Config: configs/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k/twins_svt-l_uperhead_8x2_512x512_160k_ade20k_20211130_141005-3e2cae61.pth diff --git a/downstream/mmsegmentation/configs/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py new file mode 100644 index 0000000..b79fefd --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py @@ -0,0 +1,8 @@ +_base_ = ['./twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_base_20220308-0621964c.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + depths=[3, 4, 18, 3]), ) diff --git a/downstream/mmsegmentation/configs/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k.py new file mode 100644 index 0000000..8c299d3 --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k.py @@ -0,0 +1,11 @@ +_base_ = ['./twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_base_20220308-0621964c.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + depths=[3, 4, 18, 3], + drop_path_rate=0.3)) + +data = dict(samples_per_gpu=2, workers_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py new file mode 100644 index 0000000..abb652e --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py @@ -0,0 +1,8 @@ +_base_ = ['./twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_large_20220308-37579dc6.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + depths=[3, 8, 27, 3])) diff --git a/downstream/mmsegmentation/configs/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k.py new file mode 100644 index 0000000..f6f7d27 --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k.py @@ -0,0 +1,11 @@ +_base_ = ['./twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_large_20220308-37579dc6.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + depths=[3, 8, 27, 3], + drop_path_rate=0.3)) + +data = dict(samples_per_gpu=2, workers_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py new file mode 100644 index 0000000..3d7be96 --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/twins_pcpvt-s_fpn.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] + +optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k.py new file mode 100644 index 0000000..c888b92 --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k.py @@ -0,0 +1,26 @@ +_base_ = [ + '../_base_/models/twins_pcpvt-s_upernet.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] + +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006, + betas=(0.9, 0.999), + weight_decay=0.01, + paramwise_cfg=dict(custom_keys={ + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) diff --git a/downstream/mmsegmentation/configs/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py new file mode 100644 index 0000000..00d8957 --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py @@ -0,0 +1,12 @@ +_base_ = ['./twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_base_20220308-1b7eb711.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + embed_dims=[96, 192, 384, 768], + num_heads=[3, 6, 12, 24], + depths=[2, 2, 18, 2]), + neck=dict(in_channels=[96, 192, 384, 768]), +) diff --git a/downstream/mmsegmentation/configs/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k.py new file mode 100644 index 0000000..a969fed --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k.py @@ -0,0 +1,12 @@ +_base_ = ['./twins_svt-s_uperhead_8x2_512x512_160k_ade20k.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_base_20220308-1b7eb711.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + embed_dims=[96, 192, 384, 768], + num_heads=[3, 6, 12, 24], + depths=[2, 2, 18, 2]), + decode_head=dict(in_channels=[96, 192, 384, 768]), + auxiliary_head=dict(in_channels=384)) diff --git a/downstream/mmsegmentation/configs/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py new file mode 100644 index 0000000..c68bfd4 --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py @@ -0,0 +1,13 @@ +_base_ = ['./twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_large_20220308-fb5936f3.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + embed_dims=[128, 256, 512, 1024], + num_heads=[4, 8, 16, 32], + depths=[2, 2, 18, 2], + drop_path_rate=0.3), + neck=dict(in_channels=[128, 256, 512, 1024]), +) diff --git a/downstream/mmsegmentation/configs/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k.py new file mode 100644 index 0000000..f98c070 --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k.py @@ -0,0 +1,13 @@ +_base_ = ['./twins_svt-s_uperhead_8x2_512x512_160k_ade20k.py'] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_large_20220308-fb5936f3.pth' # noqa + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + embed_dims=[128, 256, 512, 1024], + num_heads=[4, 8, 16, 32], + depths=[2, 2, 18, 2], + drop_path_rate=0.3), + decode_head=dict(in_channels=[128, 256, 512, 1024]), + auxiliary_head=dict(in_channels=512)) diff --git a/downstream/mmsegmentation/configs/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py new file mode 100644 index 0000000..dbb944c --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py @@ -0,0 +1,22 @@ +_base_ = [ + '../_base_/models/twins_pcpvt-s_fpn.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_small_20220308-7e1c3695.pth' # noqa + +model = dict( + backbone=dict( + type='SVT', + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + embed_dims=[64, 128, 256, 512], + num_heads=[2, 4, 8, 16], + mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 10, 4], + windiow_sizes=[7, 7, 7, 7], + norm_after_stage=True), + neck=dict(in_channels=[64, 128, 256, 512], out_channels=256, num_outs=4), + decode_head=dict(num_classes=150), +) + +optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001) diff --git a/downstream/mmsegmentation/configs/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k.py new file mode 100644 index 0000000..44bf60b --- /dev/null +++ b/downstream/mmsegmentation/configs/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k.py @@ -0,0 +1,43 @@ +_base_ = [ + '../_base_/models/twins_pcpvt-s_upernet.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] + +checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/alt_gvt_small_20220308-7e1c3695.pth' # noqa + +model = dict( + backbone=dict( + type='SVT', + init_cfg=dict(type='Pretrained', checkpoint=checkpoint), + embed_dims=[64, 128, 256, 512], + num_heads=[2, 4, 8, 16], + mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 10, 4], + windiow_sizes=[7, 7, 7, 7], + norm_after_stage=True), + decode_head=dict(in_channels=[64, 128, 256, 512]), + auxiliary_head=dict(in_channels=256)) + +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006, + betas=(0.9, 0.999), + weight_decay=0.01, + paramwise_cfg=dict(custom_keys={ + 'pos_block': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +data = dict(samples_per_gpu=2, workers_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/unet/README.md b/downstream/mmsegmentation/configs/unet/README.md new file mode 100644 index 0000000..96e50e0 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/README.md @@ -0,0 +1,92 @@ +# UNet + +[U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +There is large consent that successful training of deep networks requires many thousand annotated training samples. In this paper, we present a network and training strategy that relies on the strong use of data augmentation to use the available annotated samples more efficiently. The architecture consists of a contracting path to capture context and a symmetric expanding path that enables precise localization. We show that such a network can be trained end-to-end from very few images and outperforms the prior best method (a sliding-window convolutional network) on the ISBI challenge for segmentation of neuronal structures in electron microscopic stacks. Using the same network trained on transmitted light microscopy images (phase contrast and DIC) we won the ISBI cell tracking challenge 2015 in these categories by a large margin. Moreover, the network is fast. Segmentation of a 512x512 image takes less than a second on a recent GPU. The full implementation (based on Caffe) and the trained networks are available at [this http URL](https://lmb.informatik.uni-freiburg.de/people/ronneber/u-net/). + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{ronneberger2015u, + title={U-net: Convolutional networks for biomedical image segmentation}, + author={Ronneberger, Olaf and Fischer, Philipp and Brox, Thomas}, + booktitle={International Conference on Medical image computing and computer-assisted intervention}, + pages={234--241}, + year={2015}, + organization={Springer} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Loss | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------ | --------- | --- |--------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UNet + FCN | UNet-S5-D16 | Cross Entropy | 512x1024 | 160000 | 17.91 | 3.05 | 69.10 | 71.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes_20211210_145204-6860854e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes_20211210_145204.log.json) | + + +### DRIVE + +| Method | Backbone | Loss | Image Size | Crop Size | Stride | Lr schd | Mem (GB) | Inf time (fps) | mDice | Dice | config | download | +| ----------- | --------- | -------------------- |---------- | --------- | -----: | ------- | -------- | -------------: | --: |----: | ------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UNet + FCN | UNet-S5-D16 | Cross Entropy | 584x565 | 64x64 | 42x42 | 40000 | 0.680 | - | 88.38 | 78.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/fcn_unet_s5-d16_64x64_40k_drive.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_64x64_40k_drive/fcn_unet_s5-d16_64x64_40k_drive_20201223_191051-5daf6d3b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/unet_s5-d16_64x64_40k_drive/unet_s5-d16_64x64_40k_drive-20201223_191051.log.json) | +| UNet + FCN | UNet-S5-D16 | Cross Entropy + Dice | 584x565 | 64x64 | 42x42 | 40000 | 0.582 | - | 88.71 | 79.32 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201820-785de5c2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201820.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy | 584x565 | 64x64 | 42x42 | 40000 | 0.599 | - | 88.35 | 78.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/pspnet_unet_s5-d16_64x64_40k_drive.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_64x64_40k_drive/pspnet_unet_s5-d16_64x64_40k_drive_20201227_181818-aac73387.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_64x64_40k_drive/pspnet_unet_s5-d16_64x64_40k_drive-20201227_181818.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy + Dice | 584x565 | 64x64 | 42x42 | 40000 | 0.585 | - | 88.76 | 79.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201821-22b3e3ba.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201821.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy | 584x565 | 64x64 | 42x42 | 40000 | 0.596 | - | 88.38 |78.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/deeplabv3_unet_s5-d16_64x64_40k_drive.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_64x64_40k_drive/deeplabv3_unet_s5-d16_64x64_40k_drive_20201226_094047-0671ff20.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_64x64_40k_drive/deeplabv3_unet_s5-d16_64x64_40k_drive-20201226_094047.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy + Dice | 584x565 | 64x64 | 42x42 | 40000 | 0.582 | - | 88.84 | 79.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201825-6bf0efd7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201825.log.json) | + +### STARE + +| Method | Backbone | Loss | Image Size | Crop Size | Stride | Lr schd | Mem (GB) | Inf time (fps) | mDice | Dice | config | download | +| ----------- | --------| --------------- | ---------- | --------- | -----: | ------- | -------- | -------------: | --: |----: | -------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| UNet + FCN | UNet-S5-D16 | Cross Entropy | 605x700 | 128x128 | 85x85 | 40000 | 0.968 | - | 89.78 | 81.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/fcn_unet_s5-d16_128x128_40k_stare.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_128x128_40k_stare/fcn_unet_s5-d16_128x128_40k_stare_20201223_191051-7d77e78b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/unet_s5-d16_128x128_40k_stare/unet_s5-d16_128x128_40k_stare-20201223_191051.log.json) | +| UNet + FCN | UNet-S5-D16 | Cross Entropy + Dice | 605x700 | 128x128 | 85x85 | 40000 | 0.986 | - | 90.65 | 82.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201821-f75705a9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201821.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy | 605x700 | 128x128 | 85x85 | 40000 | 0.982 | - | 89.89 | 81.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/pspnet_unet_s5-d16_128x128_40k_stare.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_128x128_40k_stare/pspnet_unet_s5-d16_128x128_40k_stare_20201227_181818-3c2923c4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_128x128_40k_stare/pspnet_unet_s5-d16_128x128_40k_stare-20201227_181818.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy + Dice | 605x700 | 128x128 | 85x85 | 40000 | 1.028 | - | 90.72 | 82.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201823-f1063ef7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201823.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy | 605x700 | 128x128 | 85x85 | 40000 | 0.999 | - | 89.73 | 80.93 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/deeplabv3_unet_s5-d16_128x128_40k_stare.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_128x128_40k_stare/deeplabv3_unet_s5-d16_128x128_40k_stare_20201226_094047-93dcb93c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_128x128_40k_stare/deeplabv3_unet_s5-d16_128x128_40k_stare-20201226_094047.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy + Dice | 605x700 | 128x128 | 85x85 | 40000 | 1.010 | - | 90.65 | 82.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201825-21db614c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201825.log.json) | + +### CHASE_DB1 + +| Method | Backbone | Loss | Image Size | Crop Size | Stride | Lr schd | Mem (GB) | Inf time (fps) | mDice | Dice | config | download | +| ----------- | --------- | --------------- | ---------- | --------- | -----: | ------- | -------- | -------------: | --: |----: | -------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| UNet + FCN | UNet-S5-D16 | Cross Entropy | 960x999 | 128x128 | 85x85 | 40000 | 0.968 | - | 89.46 |80.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/fcn_unet_s5-d16_128x128_40k_chase_db1.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_128x128_40k_chase_db1/fcn_unet_s5-d16_128x128_40k_chase_db1_20201223_191051-11543527.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/unet_s5-d16_128x128_40k_chase_db1/unet_s5-d16_128x128_40k_chase_db1-20201223_191051.log.json) | +| UNet + FCN | UNet-S5-D16 | Cross Entropy + Dice | 960x999 | 128x128 | 85x85 | 40000 | 0.986 | - | 89.52 | 80.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201821-1c4eb7cf.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201821.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy | 960x999 | 128x128 | 85x85 | 40000 | 0.982 | - | 89.52 |80.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/pspnet_unet_s5-d16_128x128_40k_chase_db1.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_128x128_40k_chase_db1/pspnet_unet_s5-d16_128x128_40k_chase_db1_20201227_181818-68d4e609.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_128x128_40k_chase_db1/pspnet_unet_s5-d16_128x128_40k_chase_db1-20201227_181818.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy + Dice | 960x999 | 128x128 | 85x85 | 40000 | 1.028 | - | 89.45 | 80.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201823-c0802c4d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201823.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy | 960x999 | 128x128 | 85x85 | 40000 | 0.999 | - | 89.57 |80.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/deeplabv3_unet_s5-d16_128x128_40k_chase_db1.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_128x128_40k_chase_db1/deeplabv3_unet_s5-d16_128x128_40k_chase_db1_20201226_094047-4c5aefa3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_128x128_40k_chase_db1/deeplabv3_unet_s5-d16_128x128_40k_chase_db1-20201226_094047.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16 | Cross Entropy + Dice | 960x999 | 128x128 | 85x85 | 40000 | 1.010 | - | 89.49 | 80.37 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201825-4ef29df5.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201825.log.json) | + +### HRF + +| Method | Backbone | Loss | Image Size | Crop Size | Stride | Lr schd | Mem (GB) | Inf time (fps) | mDice | Dice | config | download | +| ----------- | --------- | --------------- | ---------- | --------- | -----: | ------- | -------- | -------------: | --: |----: | -------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| UNet + FCN | UNet-S5-D16 | Cross Entropy | 2336x3504 | 256x256 | 170x170 | 40000 | 2.525 | - | 88.92 |79.45 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/fcn_unet_s5-d16_256x256_40k_hrf.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_256x256_40k_hrf/fcn_unet_s5-d16_256x256_40k_hrf_20201223_173724-d89cf1ed.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/unet_s5-d16_256x256_40k_hrf/unet_s5-d16_256x256_40k_hrf-20201223_173724.log.json) | +| UNet + FCN | UNet-S5-D16 | Cross Entropy + Dice | 2336x3504 | 256x256 | 170x170 | 40000 | 2.623 | - | 89.64 | 80.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_201821-c314da8a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_201821.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy | 2336x3504 | 256x256 | 170x170 | 40000 | 2.588 | - | 89.24 |80.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/pspnet_unet_s5-d16_256x256_40k_hrf.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_256x256_40k_hrf/pspnet_unet_s5-d16_256x256_40k_hrf_20201227_181818-fdb7e29b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_256x256_40k_hrf/pspnet_unet_s5-d16_256x256_40k_hrf-20201227_181818.log.json) | +| UNet + PSPNet | UNet-S5-D16 | Cross Entropy + Dice | 2336x3504 | 256x256 | 170x170 | 40000 | 2.798 | - | 89.69 | 80.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_201823-53d492fa.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_201823.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16| Cross Entropy | 2336x3504 | 256x256 | 170x170 | 40000 | 2.604 | - | 89.32 |80.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/deeplabv3_unet_s5-d16_256x256_40k_hrf.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_256x256_40k_hrf/deeplabv3_unet_s5-d16_256x256_40k_hrf_20201226_094047-3a1fdf85.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_256x256_40k_hrf/deeplabv3_unet_s5-d16_256x256_40k_hrf-20201226_094047.log.json) | +| UNet + DeepLabV3 | UNet-S5-D16| Cross Entropy + Dice | 2336x3504 | 256x256 | 170x170 | 40000 | 2.607 | - | 89.56 | 80.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_202032-59daf7a4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_202032.log.json) | + +Note: + +- In `DRIVE`, `STARE`, `CHASE_DB1`, and `HRF` dataset, `mDice` is mean dice of background and vessel, while `Dice` is dice metric of vessel(foreground) only. diff --git a/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_128x128_40k_chase_db1.py b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_128x128_40k_chase_db1.py new file mode 100644 index 0000000..c706cf3 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_128x128_40k_chase_db1.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/deeplabv3_unet_s5-d16.py', + '../_base_/datasets/chase_db1.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) +evaluation = dict(metric='mDice') diff --git a/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_128x128_40k_stare.py b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_128x128_40k_stare.py new file mode 100644 index 0000000..0ef02dc --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_128x128_40k_stare.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/deeplabv3_unet_s5-d16.py', '../_base_/datasets/stare.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) +evaluation = dict(metric='mDice') diff --git a/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_256x256_40k_hrf.py b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_256x256_40k_hrf.py new file mode 100644 index 0000000..118428b --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_256x256_40k_hrf.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/deeplabv3_unet_s5-d16.py', '../_base_/datasets/hrf.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict(test_cfg=dict(crop_size=(256, 256), stride=(170, 170))) +evaluation = dict(metric='mDice') diff --git a/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_64x64_40k_drive.py b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_64x64_40k_drive.py new file mode 100644 index 0000000..1f8862a --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_64x64_40k_drive.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/deeplabv3_unet_s5-d16.py', '../_base_/datasets/drive.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict(test_cfg=dict(crop_size=(64, 64), stride=(42, 42))) +evaluation = dict(metric='mDice') diff --git a/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py new file mode 100644 index 0000000..1c48cbc --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py @@ -0,0 +1,6 @@ +_base_ = './deeplabv3_unet_s5-d16_128x128_40k_chase_db1.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py new file mode 100644 index 0000000..1022ede --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py @@ -0,0 +1,6 @@ +_base_ = './deeplabv3_unet_s5-d16_128x128_40k_stare.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py new file mode 100644 index 0000000..fc17da7 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py @@ -0,0 +1,6 @@ +_base_ = './deeplabv3_unet_s5-d16_256x256_40k_hrf.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py new file mode 100644 index 0000000..3f1f12e --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py @@ -0,0 +1,6 @@ +_base_ = './deeplabv3_unet_s5-d16_64x64_40k_drive.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_128x128_40k_chase_db1.py b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_128x128_40k_chase_db1.py new file mode 100644 index 0000000..2bc52d9 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_128x128_40k_chase_db1.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/fcn_unet_s5-d16.py', '../_base_/datasets/chase_db1.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) +evaluation = dict(metric='mDice') diff --git a/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_128x128_40k_stare.py b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_128x128_40k_stare.py new file mode 100644 index 0000000..5d836c6 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_128x128_40k_stare.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/fcn_unet_s5-d16.py', '../_base_/datasets/stare.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) +evaluation = dict(metric='mDice') diff --git a/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_256x256_40k_hrf.py b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_256x256_40k_hrf.py new file mode 100644 index 0000000..be8eec7 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_256x256_40k_hrf.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/fcn_unet_s5-d16.py', '../_base_/datasets/hrf.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict(test_cfg=dict(crop_size=(256, 256), stride=(170, 170))) +evaluation = dict(metric='mDice') diff --git a/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes.py b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes.py new file mode 100644 index 0000000..a2f7dbe --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes.py @@ -0,0 +1,16 @@ +_base_ = [ + '../_base_/models/fcn_unet_s5-d16.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] + +model = dict( + decode_head=dict(num_classes=19), + auxiliary_head=dict(num_classes=19), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, +) diff --git a/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_64x64_40k_drive.py b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_64x64_40k_drive.py new file mode 100644 index 0000000..80483ad --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_64x64_40k_drive.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/fcn_unet_s5-d16.py', '../_base_/datasets/drive.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict(test_cfg=dict(crop_size=(64, 64), stride=(42, 42))) +evaluation = dict(metric='mDice') diff --git a/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py new file mode 100644 index 0000000..5264866 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py @@ -0,0 +1,6 @@ +_base_ = './fcn_unet_s5-d16_128x128_40k_chase_db1.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py new file mode 100644 index 0000000..cf5fa1f --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py @@ -0,0 +1,6 @@ +_base_ = './fcn_unet_s5-d16_128x128_40k_stare.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py new file mode 100644 index 0000000..a154d7e --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py @@ -0,0 +1,6 @@ +_base_ = './fcn_unet_s5-d16_256x256_40k_hrf.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py new file mode 100644 index 0000000..1b8f860 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py @@ -0,0 +1,6 @@ +_base_ = './fcn_unet_s5-d16_64x64_40k_drive.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_128x128_40k_chase_db1.py b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_128x128_40k_chase_db1.py new file mode 100644 index 0000000..b085a17 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_128x128_40k_chase_db1.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/pspnet_unet_s5-d16.py', + '../_base_/datasets/chase_db1.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) +evaluation = dict(metric='mDice') diff --git a/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_128x128_40k_stare.py b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_128x128_40k_stare.py new file mode 100644 index 0000000..9d729ce --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_128x128_40k_stare.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/pspnet_unet_s5-d16.py', '../_base_/datasets/stare.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict(test_cfg=dict(crop_size=(128, 128), stride=(85, 85))) +evaluation = dict(metric='mDice') diff --git a/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_256x256_40k_hrf.py b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_256x256_40k_hrf.py new file mode 100644 index 0000000..f57c916 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_256x256_40k_hrf.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/pspnet_unet_s5-d16.py', '../_base_/datasets/hrf.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict(test_cfg=dict(crop_size=(256, 256), stride=(170, 170))) +evaluation = dict(metric='mDice') diff --git a/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_64x64_40k_drive.py b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_64x64_40k_drive.py new file mode 100644 index 0000000..7b5421a --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_64x64_40k_drive.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/pspnet_unet_s5-d16.py', '../_base_/datasets/drive.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] +model = dict(test_cfg=dict(crop_size=(64, 64), stride=(42, 42))) +evaluation = dict(metric='mDice') diff --git a/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py new file mode 100644 index 0000000..a63dc11 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py @@ -0,0 +1,6 @@ +_base_ = './pspnet_unet_s5-d16_128x128_40k_chase_db1.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py new file mode 100644 index 0000000..1a3b665 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py @@ -0,0 +1,6 @@ +_base_ = './pspnet_unet_s5-d16_128x128_40k_stare.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py new file mode 100644 index 0000000..e19d6cf --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py @@ -0,0 +1,6 @@ +_base_ = './pspnet_unet_s5-d16_256x256_40k_hrf.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py new file mode 100644 index 0000000..7934923 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py @@ -0,0 +1,6 @@ +_base_ = './pspnet_unet_s5-d16_64x64_40k_drive.py' +model = dict( + decode_head=dict(loss_decode=[ + dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0), + dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0) + ])) diff --git a/downstream/mmsegmentation/configs/unet/unet.yml b/downstream/mmsegmentation/configs/unet/unet.yml new file mode 100644 index 0000000..5bb5014 --- /dev/null +++ b/downstream/mmsegmentation/configs/unet/unet.yml @@ -0,0 +1,377 @@ +Collections: +- Name: UNet + Metadata: + Training Data: + - Cityscapes + - DRIVE + - STARE + - CHASE_DB1 + - HRF + Paper: + URL: https://arxiv.org/abs/1505.04597 + Title: 'U-Net: Convolutional Networks for Biomedical Image Segmentation' + README: configs/unet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/unet.py#L225 + Version: v0.17.0 + Converted From: + Code: http://lmb.informatik.uni-freiburg.de/people/ronneber/u-net +Models: +- Name: fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (512,1024) + lr schd: 160000 + inference time (ms/im): + - value: 327.87 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 17.91 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 69.1 + mIoU(ms+flip): 71.05 + Config: configs/unet/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes/fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes_20211210_145204-6860854e.pth +- Name: fcn_unet_s5-d16_64x64_40k_drive + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (64,64) + lr schd: 40000 + Training Memory (GB): 0.68 + Results: + - Task: Semantic Segmentation + Dataset: DRIVE + Metrics: + Dice: 78.67 + Config: configs/unet/fcn_unet_s5-d16_64x64_40k_drive.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_64x64_40k_drive/fcn_unet_s5-d16_64x64_40k_drive_20201223_191051-5daf6d3b.pth +- Name: fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (64,64) + lr schd: 40000 + Training Memory (GB): 0.582 + Results: + - Task: Semantic Segmentation + Dataset: DRIVE + Metrics: + Dice: 79.32 + Config: configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201820-785de5c2.pth +- Name: pspnet_unet_s5-d16_64x64_40k_drive + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (64,64) + lr schd: 40000 + Training Memory (GB): 0.599 + Results: + - Task: Semantic Segmentation + Dataset: DRIVE + Metrics: + Dice: 78.62 + Config: configs/unet/pspnet_unet_s5-d16_64x64_40k_drive.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_64x64_40k_drive/pspnet_unet_s5-d16_64x64_40k_drive_20201227_181818-aac73387.pth +- Name: pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (64,64) + lr schd: 40000 + Training Memory (GB): 0.585 + Results: + - Task: Semantic Segmentation + Dataset: DRIVE + Metrics: + Dice: 79.42 + Config: configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201821-22b3e3ba.pth +- Name: deeplabv3_unet_s5-d16_64x64_40k_drive + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (64,64) + lr schd: 40000 + Training Memory (GB): 0.596 + Results: + - Task: Semantic Segmentation + Dataset: DRIVE + Metrics: + Dice: 78.69 + Config: configs/unet/deeplabv3_unet_s5-d16_64x64_40k_drive.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_64x64_40k_drive/deeplabv3_unet_s5-d16_64x64_40k_drive_20201226_094047-0671ff20.pth +- Name: deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (64,64) + lr schd: 40000 + Training Memory (GB): 0.582 + Results: + - Task: Semantic Segmentation + Dataset: DRIVE + Metrics: + Dice: 79.56 + Config: configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive_20211210_201825-6bf0efd7.pth +- Name: fcn_unet_s5-d16_128x128_40k_stare + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.968 + Results: + - Task: Semantic Segmentation + Dataset: STARE + Metrics: + Dice: 81.02 + Config: configs/unet/fcn_unet_s5-d16_128x128_40k_stare.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_128x128_40k_stare/fcn_unet_s5-d16_128x128_40k_stare_20201223_191051-7d77e78b.pth +- Name: fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.986 + Results: + - Task: Semantic Segmentation + Dataset: STARE + Metrics: + Dice: 82.7 + Config: configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201821-f75705a9.pth +- Name: pspnet_unet_s5-d16_128x128_40k_stare + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.982 + Results: + - Task: Semantic Segmentation + Dataset: STARE + Metrics: + Dice: 81.22 + Config: configs/unet/pspnet_unet_s5-d16_128x128_40k_stare.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_128x128_40k_stare/pspnet_unet_s5-d16_128x128_40k_stare_20201227_181818-3c2923c4.pth +- Name: pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 1.028 + Results: + - Task: Semantic Segmentation + Dataset: STARE + Metrics: + Dice: 82.84 + Config: configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201823-f1063ef7.pth +- Name: deeplabv3_unet_s5-d16_128x128_40k_stare + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.999 + Results: + - Task: Semantic Segmentation + Dataset: STARE + Metrics: + Dice: 80.93 + Config: configs/unet/deeplabv3_unet_s5-d16_128x128_40k_stare.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_128x128_40k_stare/deeplabv3_unet_s5-d16_128x128_40k_stare_20201226_094047-93dcb93c.pth +- Name: deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 1.01 + Results: + - Task: Semantic Segmentation + Dataset: STARE + Metrics: + Dice: 82.71 + Config: configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare_20211210_201825-21db614c.pth +- Name: fcn_unet_s5-d16_128x128_40k_chase_db1 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.968 + Results: + - Task: Semantic Segmentation + Dataset: CHASE_DB1 + Metrics: + Dice: 80.24 + Config: configs/unet/fcn_unet_s5-d16_128x128_40k_chase_db1.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_128x128_40k_chase_db1/fcn_unet_s5-d16_128x128_40k_chase_db1_20201223_191051-11543527.pth +- Name: fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.986 + Results: + - Task: Semantic Segmentation + Dataset: CHASE_DB1 + Metrics: + Dice: 80.4 + Config: configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201821-1c4eb7cf.pth +- Name: pspnet_unet_s5-d16_128x128_40k_chase_db1 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.982 + Results: + - Task: Semantic Segmentation + Dataset: CHASE_DB1 + Metrics: + Dice: 80.36 + Config: configs/unet/pspnet_unet_s5-d16_128x128_40k_chase_db1.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_128x128_40k_chase_db1/pspnet_unet_s5-d16_128x128_40k_chase_db1_20201227_181818-68d4e609.pth +- Name: pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 1.028 + Results: + - Task: Semantic Segmentation + Dataset: CHASE_DB1 + Metrics: + Dice: 80.28 + Config: configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201823-c0802c4d.pth +- Name: deeplabv3_unet_s5-d16_128x128_40k_chase_db1 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 0.999 + Results: + - Task: Semantic Segmentation + Dataset: CHASE_DB1 + Metrics: + Dice: 80.47 + Config: configs/unet/deeplabv3_unet_s5-d16_128x128_40k_chase_db1.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_128x128_40k_chase_db1/deeplabv3_unet_s5-d16_128x128_40k_chase_db1_20201226_094047-4c5aefa3.pth +- Name: deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1 + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (128,128) + lr schd: 40000 + Training Memory (GB): 1.01 + Results: + - Task: Semantic Segmentation + Dataset: CHASE_DB1 + Metrics: + Dice: 80.37 + Config: configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1_20211210_201825-4ef29df5.pth +- Name: fcn_unet_s5-d16_256x256_40k_hrf + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (256,256) + lr schd: 40000 + Training Memory (GB): 2.525 + Results: + - Task: Semantic Segmentation + Dataset: HRF + Metrics: + Dice: 79.45 + Config: configs/unet/fcn_unet_s5-d16_256x256_40k_hrf.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_256x256_40k_hrf/fcn_unet_s5-d16_256x256_40k_hrf_20201223_173724-d89cf1ed.pth +- Name: fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (256,256) + lr schd: 40000 + Training Memory (GB): 2.623 + Results: + - Task: Semantic Segmentation + Dataset: HRF + Metrics: + Dice: 80.87 + Config: configs/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_201821-c314da8a.pth +- Name: pspnet_unet_s5-d16_256x256_40k_hrf + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (256,256) + lr schd: 40000 + Training Memory (GB): 2.588 + Results: + - Task: Semantic Segmentation + Dataset: HRF + Metrics: + Dice: 80.07 + Config: configs/unet/pspnet_unet_s5-d16_256x256_40k_hrf.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_256x256_40k_hrf/pspnet_unet_s5-d16_256x256_40k_hrf_20201227_181818-fdb7e29b.pth +- Name: pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (256,256) + lr schd: 40000 + Training Memory (GB): 2.798 + Results: + - Task: Semantic Segmentation + Dataset: HRF + Metrics: + Dice: 80.96 + Config: configs/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_201823-53d492fa.pth +- Name: deeplabv3_unet_s5-d16_256x256_40k_hrf + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (256,256) + lr schd: 40000 + Training Memory (GB): 2.604 + Results: + - Task: Semantic Segmentation + Dataset: HRF + Metrics: + Dice: 80.21 + Config: configs/unet/deeplabv3_unet_s5-d16_256x256_40k_hrf.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_256x256_40k_hrf/deeplabv3_unet_s5-d16_256x256_40k_hrf_20201226_094047-3a1fdf85.pth +- Name: deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf + In Collection: UNet + Metadata: + backbone: UNet-S5-D16 + crop size: (256,256) + lr schd: 40000 + Training Memory (GB): 2.607 + Results: + - Task: Semantic Segmentation + Dataset: HRF + Metrics: + Dice: 80.71 + Config: configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_202032-59daf7a4.pth diff --git a/downstream/mmsegmentation/configs/upernet/README.md b/downstream/mmsegmentation/configs/upernet/README.md new file mode 100644 index 0000000..0ab3cb3 --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/README.md @@ -0,0 +1,67 @@ +# UPerNet + +[Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/pdf/1807.10221.pdf) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +Humans recognize the visual world at multiple levels: we effortlessly categorize scenes and detect objects inside, while also identifying the textures and surfaces of the objects along with their different compositional parts. In this paper, we study a new task called Unified Perceptual Parsing, which requires the machine vision systems to recognize as many visual concepts as possible from a given image. A multi-task framework called UPerNet and a training strategy are developed to learn from heterogeneous image annotations. We benchmark our framework on Unified Perceptual Parsing and show that it is able to effectively segment a wide range of concepts from images. The trained networks are further applied to discover visual knowledge in natural scenes. Models are available at [this https URL](https://github.com/CSAILVision/unifiedparsing). + + +
    + +
    + +## Citation + +```bibtex +@inproceedings{xiao2018unified, + title={Unified perceptual parsing for scene understanding}, + author={Xiao, Tete and Liu, Yingcheng and Zhou, Bolei and Jiang, Yuning and Sun, Jian}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + pages={418--434}, + year={2018} +} +``` + +## Results and models + +### Cityscapes + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| UPerNet | R-50 | 512x1024 | 40000 | 6.4 | 4.25 | 77.10 | 78.37 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r50_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_40k_cityscapes/upernet_r50_512x1024_40k_cityscapes_20200605_094827-aa54cb54.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_40k_cityscapes/upernet_r50_512x1024_40k_cityscapes_20200605_094827.log.json) | +| UPerNet | R-101 | 512x1024 | 40000 | 7.4 | 3.79 | 78.69 | 80.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r101_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_40k_cityscapes/upernet_r101_512x1024_40k_cityscapes_20200605_094933-ebce3b10.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_40k_cityscapes/upernet_r101_512x1024_40k_cityscapes_20200605_094933.log.json) | +| UPerNet | R-50 | 769x769 | 40000 | 7.2 | 1.76 | 77.98 | 79.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r50_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_40k_cityscapes/upernet_r50_769x769_40k_cityscapes_20200530_033048-92d21539.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_40k_cityscapes/upernet_r50_769x769_40k_cityscapes_20200530_033048.log.json) | +| UPerNet | R-101 | 769x769 | 40000 | 8.4 | 1.56 | 79.03 | 80.77 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r101_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_40k_cityscapes/upernet_r101_769x769_40k_cityscapes_20200530_040819-83c95d01.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_40k_cityscapes/upernet_r101_769x769_40k_cityscapes_20200530_040819.log.json) | +| UPerNet | R-50 | 512x1024 | 80000 | - | - | 78.19 | 79.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r50_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_80k_cityscapes/upernet_r50_512x1024_80k_cityscapes_20200607_052207-848beca8.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_80k_cityscapes/upernet_r50_512x1024_80k_cityscapes_20200607_052207.log.json) | +| UPerNet | R-101 | 512x1024 | 80000 | - | - | 79.40 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r101_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_80k_cityscapes/upernet_r101_512x1024_80k_cityscapes_20200607_002403-f05f2345.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_80k_cityscapes/upernet_r101_512x1024_80k_cityscapes_20200607_002403.log.json) | +| UPerNet | R-50 | 769x769 | 80000 | - | - | 79.39 | 80.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r50_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_80k_cityscapes/upernet_r50_769x769_80k_cityscapes_20200607_005107-82ae7d15.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_80k_cityscapes/upernet_r50_769x769_80k_cityscapes_20200607_005107.log.json) | +| UPerNet | R-101 | 769x769 | 80000 | - | - | 80.10 | 81.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r101_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_80k_cityscapes/upernet_r101_769x769_80k_cityscapes_20200607_001014-082fc334.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_80k_cityscapes/upernet_r101_769x769_80k_cityscapes_20200607_001014.log.json) | + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UPerNet | R-50 | 512x512 | 80000 | 8.1 | 23.40 | 40.70 | 41.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r50_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_80k_ade20k/upernet_r50_512x512_80k_ade20k_20200614_144127-ecc8377b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_80k_ade20k/upernet_r50_512x512_80k_ade20k_20200614_144127.log.json) | +| UPerNet | R-101 | 512x512 | 80000 | 9.1 | 20.34 | 42.91 | 43.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r101_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_80k_ade20k/upernet_r101_512x512_80k_ade20k_20200614_185117-32e4db94.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_80k_ade20k/upernet_r101_512x512_80k_ade20k_20200614_185117.log.json) | +| UPerNet | R-50 | 512x512 | 160000 | - | - | 42.05 | 42.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r50_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_160k_ade20k/upernet_r50_512x512_160k_ade20k_20200615_184328-8534de8d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_160k_ade20k/upernet_r50_512x512_160k_ade20k_20200615_184328.log.json) | +| UPerNet | R-101 | 512x512 | 160000 | - | - | 43.82 | 44.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r101_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_160k_ade20k/upernet_r101_512x512_160k_ade20k_20200615_161951-91b32684.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_160k_ade20k/upernet_r101_512x512_160k_ade20k_20200615_161951.log.json) | + +### Pascal VOC 2012 + Aug + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| UPerNet | R-50 | 512x512 | 20000 | 6.4 | 23.17 | 74.82 | 76.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r50_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_20k_voc12aug/upernet_r50_512x512_20k_voc12aug_20200617_165330-5b5890a7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_20k_voc12aug/upernet_r50_512x512_20k_voc12aug_20200617_165330.log.json) | +| UPerNet | R-101 | 512x512 | 20000 | 7.5 | 19.98 | 77.10 | 78.29 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r101_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_20k_voc12aug/upernet_r101_512x512_20k_voc12aug_20200617_165629-f14e7f27.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_20k_voc12aug/upernet_r101_512x512_20k_voc12aug_20200617_165629.log.json) | +| UPerNet | R-50 | 512x512 | 40000 | - | - | 75.92 | 77.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r50_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_40k_voc12aug/upernet_r50_512x512_40k_voc12aug_20200613_162257-ca9bcc6b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_40k_voc12aug/upernet_r50_512x512_40k_voc12aug_20200613_162257.log.json) | +| UPerNet | R-101 | 512x512 | 40000 | - | - | 77.43 | 78.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/upernet/upernet_r101_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_40k_voc12aug/upernet_r101_512x512_40k_voc12aug_20200613_163549-e26476ac.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_40k_voc12aug/upernet_r101_512x512_40k_voc12aug_20200613_163549.log.json) | diff --git a/downstream/mmsegmentation/configs/upernet/upernet.yml b/downstream/mmsegmentation/configs/upernet/upernet.yml new file mode 100644 index 0000000..7c3872a --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet.yml @@ -0,0 +1,305 @@ +Collections: +- Name: UPerNet + Metadata: + Training Data: + - Cityscapes + - ADE20K + - Pascal VOC 2012 + Aug + Paper: + URL: https://arxiv.org/pdf/1807.10221.pdf + Title: Unified Perceptual Parsing for Scene Understanding + README: configs/upernet/README.md + Code: + URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/uper_head.py#L13 + Version: v0.17.0 + Converted From: + Code: https://github.com/CSAILVision/unifiedparsing +Models: +- Name: upernet_r50_512x1024_40k_cityscapes + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 235.29 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 6.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.1 + mIoU(ms+flip): 78.37 + Config: configs/upernet/upernet_r50_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_40k_cityscapes/upernet_r50_512x1024_40k_cityscapes_20200605_094827-aa54cb54.pth +- Name: upernet_r101_512x1024_40k_cityscapes + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (512,1024) + lr schd: 40000 + inference time (ms/im): + - value: 263.85 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,1024) + Training Memory (GB): 7.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.69 + mIoU(ms+flip): 80.11 + Config: configs/upernet/upernet_r101_512x1024_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_40k_cityscapes/upernet_r101_512x1024_40k_cityscapes_20200605_094933-ebce3b10.pth +- Name: upernet_r50_769x769_40k_cityscapes + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 568.18 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 7.2 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 77.98 + mIoU(ms+flip): 79.7 + Config: configs/upernet/upernet_r50_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_40k_cityscapes/upernet_r50_769x769_40k_cityscapes_20200530_033048-92d21539.pth +- Name: upernet_r101_769x769_40k_cityscapes + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (769,769) + lr schd: 40000 + inference time (ms/im): + - value: 641.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (769,769) + Training Memory (GB): 8.4 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.03 + mIoU(ms+flip): 80.77 + Config: configs/upernet/upernet_r101_769x769_40k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_40k_cityscapes/upernet_r101_769x769_40k_cityscapes_20200530_040819-83c95d01.pth +- Name: upernet_r50_512x1024_80k_cityscapes + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 78.19 + mIoU(ms+flip): 79.19 + Config: configs/upernet/upernet_r50_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x1024_80k_cityscapes/upernet_r50_512x1024_80k_cityscapes_20200607_052207-848beca8.pth +- Name: upernet_r101_512x1024_80k_cityscapes + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (512,1024) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.4 + mIoU(ms+flip): 80.46 + Config: configs/upernet/upernet_r101_512x1024_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x1024_80k_cityscapes/upernet_r101_512x1024_80k_cityscapes_20200607_002403-f05f2345.pth +- Name: upernet_r50_769x769_80k_cityscapes + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 79.39 + mIoU(ms+flip): 80.92 + Config: configs/upernet/upernet_r50_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_769x769_80k_cityscapes/upernet_r50_769x769_80k_cityscapes_20200607_005107-82ae7d15.pth +- Name: upernet_r101_769x769_80k_cityscapes + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (769,769) + lr schd: 80000 + Results: + - Task: Semantic Segmentation + Dataset: Cityscapes + Metrics: + mIoU: 80.1 + mIoU(ms+flip): 81.49 + Config: configs/upernet/upernet_r101_769x769_80k_cityscapes.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_769x769_80k_cityscapes/upernet_r101_769x769_80k_cityscapes_20200607_001014-082fc334.pth +- Name: upernet_r50_512x512_80k_ade20k + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 42.74 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 8.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 40.7 + mIoU(ms+flip): 41.81 + Config: configs/upernet/upernet_r50_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_80k_ade20k/upernet_r50_512x512_80k_ade20k_20200614_144127-ecc8377b.pth +- Name: upernet_r101_512x512_80k_ade20k + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 49.16 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.1 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.91 + mIoU(ms+flip): 43.96 + Config: configs/upernet/upernet_r101_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_80k_ade20k/upernet_r101_512x512_80k_ade20k_20200614_185117-32e4db94.pth +- Name: upernet_r50_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.05 + mIoU(ms+flip): 42.78 + Config: configs/upernet/upernet_r50_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_160k_ade20k/upernet_r50_512x512_160k_ade20k_20200615_184328-8534de8d.pth +- Name: upernet_r101_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (512,512) + lr schd: 160000 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.82 + mIoU(ms+flip): 44.85 + Config: configs/upernet/upernet_r101_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_160k_ade20k/upernet_r101_512x512_160k_ade20k_20200615_161951-91b32684.pth +- Name: upernet_r50_512x512_20k_voc12aug + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 43.16 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 6.4 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 74.82 + mIoU(ms+flip): 76.35 + Config: configs/upernet/upernet_r50_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_20k_voc12aug/upernet_r50_512x512_20k_voc12aug_20200617_165330-5b5890a7.pth +- Name: upernet_r101_512x512_20k_voc12aug + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (512,512) + lr schd: 20000 + inference time (ms/im): + - value: 50.05 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.5 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.1 + mIoU(ms+flip): 78.29 + Config: configs/upernet/upernet_r101_512x512_20k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_20k_voc12aug/upernet_r101_512x512_20k_voc12aug_20200617_165629-f14e7f27.pth +- Name: upernet_r50_512x512_40k_voc12aug + In Collection: UPerNet + Metadata: + backbone: R-50 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 75.92 + mIoU(ms+flip): 77.44 + Config: configs/upernet/upernet_r50_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r50_512x512_40k_voc12aug/upernet_r50_512x512_40k_voc12aug_20200613_162257-ca9bcc6b.pth +- Name: upernet_r101_512x512_40k_voc12aug + In Collection: UPerNet + Metadata: + backbone: R-101 + crop size: (512,512) + lr schd: 40000 + Results: + - Task: Semantic Segmentation + Dataset: Pascal VOC 2012 + Aug + Metrics: + mIoU: 77.43 + mIoU(ms+flip): 78.56 + Config: configs/upernet/upernet_r101_512x512_40k_voc12aug.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/upernet/upernet_r101_512x512_40k_voc12aug/upernet_r101_512x512_40k_voc12aug_20200613_163549-e26476ac.pth diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r101_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/upernet/upernet_r101_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..b90b597 --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r101_512x1024_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_512x1024_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r101_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/upernet/upernet_r101_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..420ca2e --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r101_512x1024_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_512x1024_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r101_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/upernet/upernet_r101_512x512_160k_ade20k.py new file mode 100644 index 0000000..146f13e --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r101_512x512_160k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_512x512_160k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r101_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/upernet/upernet_r101_512x512_20k_voc12aug.py new file mode 100644 index 0000000..56345d1 --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r101_512x512_20k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_512x512_20k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r101_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/upernet/upernet_r101_512x512_40k_voc12aug.py new file mode 100644 index 0000000..0669b74 --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r101_512x512_40k_voc12aug.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_512x512_40k_voc12aug.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r101_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/upernet/upernet_r101_512x512_80k_ade20k.py new file mode 100644 index 0000000..abfb9c5 --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r101_512x512_80k_ade20k.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_512x512_80k_ade20k.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r101_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/upernet/upernet_r101_769x769_40k_cityscapes.py new file mode 100644 index 0000000..e5f3a3f --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r101_769x769_40k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_769x769_40k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r101_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/upernet/upernet_r101_769x769_80k_cityscapes.py new file mode 100644 index 0000000..a709165 --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r101_769x769_80k_cityscapes.py @@ -0,0 +1,2 @@ +_base_ = './upernet_r50_769x769_80k_cityscapes.py' +model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r50_512x1024_40k_cityscapes.py b/downstream/mmsegmentation/configs/upernet/upernet_r50_512x1024_40k_cityscapes.py new file mode 100644 index 0000000..d621e89 --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r50_512x1024_40k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' +] diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r50_512x1024_80k_cityscapes.py b/downstream/mmsegmentation/configs/upernet/upernet_r50_512x1024_80k_cityscapes.py new file mode 100644 index 0000000..95fffcc --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r50_512x1024_80k_cityscapes.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r50_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/upernet/upernet_r50_512x512_160k_ade20k.py new file mode 100644 index 0000000..f5dd9aa --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r50_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r50_512x512_20k_voc12aug.py b/downstream/mmsegmentation/configs/upernet/upernet_r50_512x512_20k_voc12aug.py new file mode 100644 index 0000000..95f5c09 --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r50_512x512_20k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_20k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r50_512x512_40k_voc12aug.py b/downstream/mmsegmentation/configs/upernet/upernet_r50_512x512_40k_voc12aug.py new file mode 100644 index 0000000..9621fd1 --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r50_512x512_40k_voc12aug.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', + '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r50_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/upernet/upernet_r50_512x512_80k_ade20k.py new file mode 100644 index 0000000..f561e30 --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r50_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r50_769x769_40k_cityscapes.py b/downstream/mmsegmentation/configs/upernet/upernet_r50_769x769_40k_cityscapes.py new file mode 100644 index 0000000..89b18aa --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r50_769x769_40k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_40k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/upernet/upernet_r50_769x769_80k_cityscapes.py b/downstream/mmsegmentation/configs/upernet/upernet_r50_769x769_80k_cityscapes.py new file mode 100644 index 0000000..29af98f --- /dev/null +++ b/downstream/mmsegmentation/configs/upernet/upernet_r50_769x769_80k_cityscapes.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/upernet_r50.py', + '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] +model = dict( + decode_head=dict(align_corners=True), + auxiliary_head=dict(align_corners=True), + test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/downstream/mmsegmentation/configs/vit/README.md b/downstream/mmsegmentation/configs/vit/README.md new file mode 100644 index 0000000..eec65b5 --- /dev/null +++ b/downstream/mmsegmentation/configs/vit/README.md @@ -0,0 +1,69 @@ +# Vision Transformer + +[An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/pdf/2010.11929.pdf) + +## Introduction + + + +Official Repo + +Code Snippet + +## Abstract + + + +While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train. + + +
    + +
    + +## Citation + +```bibtex +@article{dosoViTskiy2020, + title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, + author={DosoViTskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil}, + journal={arXiv preprint arXiv:2010.11929}, + year={2020} +} +``` + +## Usage + +To use other repositories' pre-trained models, it is necessary to convert keys. + +We provide a script [`vit2mmseg.py`](../../tools/model_converters/vit2mmseg.py) in the tools directory to convert the key of models from [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py) to MMSegmentation style. + +```shell +python tools/model_converters/vit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/vit2mmseg.py https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth pretrain/jx_vit_base_p16_224-80ecf9dd.pth +``` + +This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + +## Results and models + +### ADE20K + +| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | +| ------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UPerNet | ViT-B + MLN | 512x512 | 80000 | 9.20 | 6.94 | 47.71 | 49.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/vit/upernet_vit-b16_mln_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_mln_512x512_80k_ade20k/upernet_vit-b16_mln_512x512_80k_ade20k_20210624_130547-0403cee1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_mln_512x512_80k_ade20k/20210624_130547.log.json) | +| UPerNet | ViT-B + MLN | 512x512 | 160000 | 9.20 | 7.58 | 46.75 | 48.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/vit/upernet_vit-b16_mln_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_mln_512x512_160k_ade20k/upernet_vit-b16_mln_512x512_160k_ade20k_20210624_130547-852fa768.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_mln_512x512_160k_ade20k/20210623_192432.log.json) | +| UPerNet | ViT-B + LN + MLN | 512x512 | 160000 | 9.21 | 6.82 | 47.73 | 49.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k/upernet_vit-b16_ln_mln_512x512_160k_ade20k_20210621_172828-f444c077.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k/20210621_172828.log.json) | +| UPerNet | DeiT-S | 512x512 | 80000 | 4.68 | 29.85 | 42.96 | 43.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/vit/upernet_deit-s16_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_512x512_80k_ade20k/upernet_deit-s16_512x512_80k_ade20k_20210624_095228-afc93ec2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_512x512_80k_ade20k/20210624_095228.log.json) | +| UPerNet | DeiT-S | 512x512 | 160000 | 4.68 | 29.19 | 42.87 | 43.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/vit/upernet_deit-s16_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_512x512_160k_ade20k/upernet_deit-s16_512x512_160k_ade20k_20210621_160903-5110d916.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_512x512_160k_ade20k/20210621_160903.log.json) | +| UPerNet | DeiT-S + MLN | 512x512 | 160000 | 5.69 | 11.18 | 43.82 | 45.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/vit/upernet_deit-s16_mln_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_mln_512x512_160k_ade20k/upernet_deit-s16_mln_512x512_160k_ade20k_20210621_161021-fb9a5dfb.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_mln_512x512_160k_ade20k/20210621_161021.log.json) | +| UPerNet | DeiT-S + LN + MLN | 512x512 | 160000 | 5.69 | 12.39 | 43.52 | 45.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k/upernet_deit-s16_ln_mln_512x512_160k_ade20k_20210621_161021-c0cd652f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k/20210621_161021.log.json) | +| UPerNet | DeiT-B | 512x512 | 80000 | 7.75 | 9.69 | 45.24 | 46.73 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/vit/upernet_deit-b16_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_512x512_80k_ade20k/upernet_deit-b16_512x512_80k_ade20k_20210624_130529-1e090789.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_512x512_80k_ade20k/20210624_130529.log.json) | +| UPerNet | DeiT-B | 512x512 | 160000 | 7.75 | 10.39 | 45.36 | 47.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/vit/upernet_deit-b16_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_512x512_160k_ade20k/upernet_deit-b16_512x512_160k_ade20k_20210621_180100-828705d7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_512x512_160k_ade20k/20210621_180100.log.json) | +| UPerNet | DeiT-B + MLN | 512x512 | 160000 | 9.21 | 7.78 | 45.46 | 47.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/vit/upernet_deit-b16_mln_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_mln_512x512_160k_ade20k/upernet_deit-b16_mln_512x512_160k_ade20k_20210621_191949-4e1450f3.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_mln_512x512_160k_ade20k/20210621_191949.log.json) | +| UPerNet | DeiT-B + LN + MLN | 512x512 | 160000 | 9.21 | 7.75 | 45.37 | 47.23 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/vit/upernet_deit-b16_ln_mln_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_ln_mln_512x512_160k_ade20k/upernet_deit-b16_ln_mln_512x512_160k_ade20k_20210623_153535-8a959c14.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_ln_mln_512x512_160k_ade20k/20210623_153535.log.json) | diff --git a/downstream/mmsegmentation/configs/vit/upernet_deit-b16_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/vit/upernet_deit-b16_512x512_160k_ade20k.py new file mode 100644 index 0000000..68f4bd4 --- /dev/null +++ b/downstream/mmsegmentation/configs/vit/upernet_deit-b16_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = './upernet_vit-b16_mln_512x512_160k_ade20k.py' + +model = dict( + pretrained='pretrain/deit_base_patch16_224-b5f2ef4d.pth', + backbone=dict(drop_path_rate=0.1), + neck=None) diff --git a/downstream/mmsegmentation/configs/vit/upernet_deit-b16_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/vit/upernet_deit-b16_512x512_80k_ade20k.py new file mode 100644 index 0000000..7204826 --- /dev/null +++ b/downstream/mmsegmentation/configs/vit/upernet_deit-b16_512x512_80k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = './upernet_vit-b16_mln_512x512_80k_ade20k.py' + +model = dict( + pretrained='pretrain/deit_base_patch16_224-b5f2ef4d.pth', + backbone=dict(drop_path_rate=0.1), + neck=None) diff --git a/downstream/mmsegmentation/configs/vit/upernet_deit-b16_ln_mln_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/vit/upernet_deit-b16_ln_mln_512x512_160k_ade20k.py new file mode 100644 index 0000000..32909ff --- /dev/null +++ b/downstream/mmsegmentation/configs/vit/upernet_deit-b16_ln_mln_512x512_160k_ade20k.py @@ -0,0 +1,5 @@ +_base_ = './upernet_vit-b16_mln_512x512_160k_ade20k.py' + +model = dict( + pretrained='pretrain/deit_base_patch16_224-b5f2ef4d.pth', + backbone=dict(drop_path_rate=0.1, final_norm=True)) diff --git a/downstream/mmsegmentation/configs/vit/upernet_deit-b16_mln_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/vit/upernet_deit-b16_mln_512x512_160k_ade20k.py new file mode 100644 index 0000000..4abefe8 --- /dev/null +++ b/downstream/mmsegmentation/configs/vit/upernet_deit-b16_mln_512x512_160k_ade20k.py @@ -0,0 +1,6 @@ +_base_ = './upernet_vit-b16_mln_512x512_160k_ade20k.py' + +model = dict( + pretrained='pretrain/deit_base_patch16_224-b5f2ef4d.pth', + backbone=dict(drop_path_rate=0.1), +) diff --git a/downstream/mmsegmentation/configs/vit/upernet_deit-s16_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/vit/upernet_deit-s16_512x512_160k_ade20k.py new file mode 100644 index 0000000..290ff19 --- /dev/null +++ b/downstream/mmsegmentation/configs/vit/upernet_deit-s16_512x512_160k_ade20k.py @@ -0,0 +1,8 @@ +_base_ = './upernet_vit-b16_mln_512x512_160k_ade20k.py' + +model = dict( + pretrained='pretrain/deit_small_patch16_224-cd65a155.pth', + backbone=dict(num_heads=6, embed_dims=384, drop_path_rate=0.1), + decode_head=dict(num_classes=150, in_channels=[384, 384, 384, 384]), + neck=None, + auxiliary_head=dict(num_classes=150, in_channels=384)) diff --git a/downstream/mmsegmentation/configs/vit/upernet_deit-s16_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/vit/upernet_deit-s16_512x512_80k_ade20k.py new file mode 100644 index 0000000..605d264 --- /dev/null +++ b/downstream/mmsegmentation/configs/vit/upernet_deit-s16_512x512_80k_ade20k.py @@ -0,0 +1,8 @@ +_base_ = './upernet_vit-b16_mln_512x512_80k_ade20k.py' + +model = dict( + pretrained='pretrain/deit_small_patch16_224-cd65a155.pth', + backbone=dict(num_heads=6, embed_dims=384, drop_path_rate=0.1), + decode_head=dict(num_classes=150, in_channels=[384, 384, 384, 384]), + neck=None, + auxiliary_head=dict(num_classes=150, in_channels=384)) diff --git a/downstream/mmsegmentation/configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py new file mode 100644 index 0000000..ef743a2 --- /dev/null +++ b/downstream/mmsegmentation/configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py @@ -0,0 +1,9 @@ +_base_ = './upernet_vit-b16_mln_512x512_160k_ade20k.py' + +model = dict( + pretrained='pretrain/deit_small_patch16_224-cd65a155.pth', + backbone=dict( + num_heads=6, embed_dims=384, drop_path_rate=0.1, final_norm=True), + decode_head=dict(num_classes=150, in_channels=[384, 384, 384, 384]), + neck=dict(in_channels=[384, 384, 384, 384], out_channels=384), + auxiliary_head=dict(num_classes=150, in_channels=384)) diff --git a/downstream/mmsegmentation/configs/vit/upernet_deit-s16_mln_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/vit/upernet_deit-s16_mln_512x512_160k_ade20k.py new file mode 100644 index 0000000..069cab7 --- /dev/null +++ b/downstream/mmsegmentation/configs/vit/upernet_deit-s16_mln_512x512_160k_ade20k.py @@ -0,0 +1,8 @@ +_base_ = './upernet_vit-b16_mln_512x512_160k_ade20k.py' + +model = dict( + pretrained='pretrain/deit_small_patch16_224-cd65a155.pth', + backbone=dict(num_heads=6, embed_dims=384, drop_path_rate=0.1), + decode_head=dict(num_classes=150, in_channels=[384, 384, 384, 384]), + neck=dict(in_channels=[384, 384, 384, 384], out_channels=384), + auxiliary_head=dict(num_classes=150, in_channels=384)) diff --git a/downstream/mmsegmentation/configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py new file mode 100644 index 0000000..51eeda0 --- /dev/null +++ b/downstream/mmsegmentation/configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/upernet_vit-b16_ln_mln.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] + +model = dict( + pretrained='pretrain/vit_base_patch16_224.pth', + backbone=dict(drop_path_rate=0.1, final_norm=True), + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) + +# AdamW optimizer, no weight decay for position embedding & layer norm +# in backbone +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006, + betas=(0.9, 0.999), + weight_decay=0.01, + paramwise_cfg=dict( + custom_keys={ + 'pos_embed': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/vit/upernet_vit-b16_mln_512x512_160k_ade20k.py b/downstream/mmsegmentation/configs/vit/upernet_vit-b16_mln_512x512_160k_ade20k.py new file mode 100644 index 0000000..5b148d7 --- /dev/null +++ b/downstream/mmsegmentation/configs/vit/upernet_vit-b16_mln_512x512_160k_ade20k.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/upernet_vit-b16_ln_mln.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_160k.py' +] + +model = dict( + pretrained='pretrain/vit_base_patch16_224.pth', + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) + +# AdamW optimizer, no weight decay for position embedding & layer norm +# in backbone +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006, + betas=(0.9, 0.999), + weight_decay=0.01, + paramwise_cfg=dict( + custom_keys={ + 'pos_embed': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/vit/upernet_vit-b16_mln_512x512_80k_ade20k.py b/downstream/mmsegmentation/configs/vit/upernet_vit-b16_mln_512x512_80k_ade20k.py new file mode 100644 index 0000000..f893500 --- /dev/null +++ b/downstream/mmsegmentation/configs/vit/upernet_vit-b16_mln_512x512_80k_ade20k.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/upernet_vit-b16_ln_mln.py', + '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', + '../_base_/schedules/schedule_80k.py' +] + +model = dict( + pretrained='pretrain/vit_base_patch16_224.pth', + decode_head=dict(num_classes=150), + auxiliary_head=dict(num_classes=150)) + +# AdamW optimizer, no weight decay for position embedding & layer norm +# in backbone +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.00006, + betas=(0.9, 0.999), + weight_decay=0.01, + paramwise_cfg=dict( + custom_keys={ + 'pos_embed': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.) + })) + +lr_config = dict( + _delete_=True, + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, + min_lr=0.0, + by_epoch=False) + +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) diff --git a/downstream/mmsegmentation/configs/vit/vit.yml b/downstream/mmsegmentation/configs/vit/vit.yml new file mode 100644 index 0000000..35e4952 --- /dev/null +++ b/downstream/mmsegmentation/configs/vit/vit.yml @@ -0,0 +1,243 @@ +Models: +- Name: upernet_vit-b16_mln_512x512_80k_ade20k + In Collection: UPerNet + Metadata: + backbone: ViT-B + MLN + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 144.09 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.71 + mIoU(ms+flip): 49.51 + Config: configs/vit/upernet_vit-b16_mln_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_mln_512x512_80k_ade20k/upernet_vit-b16_mln_512x512_80k_ade20k_20210624_130547-0403cee1.pth +- Name: upernet_vit-b16_mln_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: ViT-B + MLN + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 131.93 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.2 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 46.75 + mIoU(ms+flip): 48.46 + Config: configs/vit/upernet_vit-b16_mln_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_mln_512x512_160k_ade20k/upernet_vit-b16_mln_512x512_160k_ade20k_20210624_130547-852fa768.pth +- Name: upernet_vit-b16_ln_mln_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: ViT-B + LN + MLN + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 146.63 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.21 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 47.73 + mIoU(ms+flip): 49.95 + Config: configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k/upernet_vit-b16_ln_mln_512x512_160k_ade20k_20210621_172828-f444c077.pth +- Name: upernet_deit-s16_512x512_80k_ade20k + In Collection: UPerNet + Metadata: + backbone: DeiT-S + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 33.5 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.68 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.96 + mIoU(ms+flip): 43.79 + Config: configs/vit/upernet_deit-s16_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_512x512_80k_ade20k/upernet_deit-s16_512x512_80k_ade20k_20210624_095228-afc93ec2.pth +- Name: upernet_deit-s16_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: DeiT-S + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 34.26 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 4.68 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 42.87 + mIoU(ms+flip): 43.79 + Config: configs/vit/upernet_deit-s16_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_512x512_160k_ade20k/upernet_deit-s16_512x512_160k_ade20k_20210621_160903-5110d916.pth +- Name: upernet_deit-s16_mln_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: DeiT-S + MLN + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 89.45 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.69 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.82 + mIoU(ms+flip): 45.07 + Config: configs/vit/upernet_deit-s16_mln_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_mln_512x512_160k_ade20k/upernet_deit-s16_mln_512x512_160k_ade20k_20210621_161021-fb9a5dfb.pth +- Name: upernet_deit-s16_ln_mln_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: DeiT-S + LN + MLN + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 80.71 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 5.69 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 43.52 + mIoU(ms+flip): 45.01 + Config: configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k/upernet_deit-s16_ln_mln_512x512_160k_ade20k_20210621_161021-c0cd652f.pth +- Name: upernet_deit-b16_512x512_80k_ade20k + In Collection: UPerNet + Metadata: + backbone: DeiT-B + crop size: (512,512) + lr schd: 80000 + inference time (ms/im): + - value: 103.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.75 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.24 + mIoU(ms+flip): 46.73 + Config: configs/vit/upernet_deit-b16_512x512_80k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_512x512_80k_ade20k/upernet_deit-b16_512x512_80k_ade20k_20210624_130529-1e090789.pth +- Name: upernet_deit-b16_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: DeiT-B + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 96.25 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 7.75 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.36 + mIoU(ms+flip): 47.16 + Config: configs/vit/upernet_deit-b16_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_512x512_160k_ade20k/upernet_deit-b16_512x512_160k_ade20k_20210621_180100-828705d7.pth +- Name: upernet_deit-b16_mln_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: DeiT-B + MLN + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 128.53 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.21 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.46 + mIoU(ms+flip): 47.16 + Config: configs/vit/upernet_deit-b16_mln_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_mln_512x512_160k_ade20k/upernet_deit-b16_mln_512x512_160k_ade20k_20210621_191949-4e1450f3.pth +- Name: upernet_deit-b16_ln_mln_512x512_160k_ade20k + In Collection: UPerNet + Metadata: + backbone: DeiT-B + LN + MLN + crop size: (512,512) + lr schd: 160000 + inference time (ms/im): + - value: 129.03 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (512,512) + Training Memory (GB): 9.21 + Results: + - Task: Semantic Segmentation + Dataset: ADE20K + Metrics: + mIoU: 45.37 + mIoU(ms+flip): 47.23 + Config: configs/vit/upernet_deit-b16_ln_mln_512x512_160k_ade20k.py + Weights: https://download.openmmlab.com/mmsegmentation/v0.5/vit/upernet_deit-b16_ln_mln_512x512_160k_ade20k/upernet_deit-b16_ln_mln_512x512_160k_ade20k_20210623_153535-8a959c14.pth diff --git a/downstream/mmsegmentation/mmseg/__init__.py b/downstream/mmsegmentation/mmseg/__init__.py new file mode 100644 index 0000000..8da9bc6 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/__init__.py @@ -0,0 +1,62 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import mmcv +from packaging.version import parse + +from .version import __version__, version_info + +MMCV_MIN = '1.3.13' +MMCV_MAX = '1.5.0' + + +def digit_version(version_str: str, length: int = 4): + """Convert a version string into a tuple of integers. + + This method is usually used for comparing two versions. For pre-release + versions: alpha < beta < rc. + + Args: + version_str (str): The version string. + length (int): The maximum number of version levels. Default: 4. + + Returns: + tuple[int]: The version info in digits (integers). + """ + version = parse(version_str) + assert version.release, f'failed to parse version {version_str}' + release = list(version.release) + release = release[:length] + if len(release) < length: + release = release + [0] * (length - len(release)) + if version.is_prerelease: + mapping = {'a': -3, 'b': -2, 'rc': -1} + val = -4 + # version.pre can be None + if version.pre: + if version.pre[0] not in mapping: + warnings.warn(f'unknown prerelease version {version.pre[0]}, ' + 'version checking may go wrong') + else: + val = mapping[version.pre[0]] + release.extend([val, version.pre[-1]]) + else: + release.extend([val, 0]) + + elif version.is_postrelease: + release.extend([1, version.post]) + else: + release.extend([0, 0]) + return tuple(release) + + +mmcv_min_version = digit_version(MMCV_MIN) +mmcv_max_version = digit_version(MMCV_MAX) +mmcv_version = digit_version(mmcv.__version__) + + +assert (mmcv_min_version <= mmcv_version <= mmcv_max_version), \ + f'MMCV=={mmcv.__version__} is used but incompatible. ' \ + f'Please install mmcv>={mmcv_min_version}, <={mmcv_max_version}.' + +__all__ = ['__version__', 'version_info', 'digit_version'] diff --git a/downstream/mmsegmentation/mmseg/apis/__init__.py b/downstream/mmsegmentation/mmseg/apis/__init__.py new file mode 100644 index 0000000..c688180 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/apis/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .inference import inference_segmentor, init_segmentor, show_result_pyplot +from .test import multi_gpu_test, single_gpu_test +from .train import (get_root_logger, init_random_seed, set_random_seed, + train_segmentor) + +__all__ = [ + 'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor', + 'inference_segmentor', 'multi_gpu_test', 'single_gpu_test', + 'show_result_pyplot', 'init_random_seed' +] diff --git a/downstream/mmsegmentation/mmseg/apis/inference.py b/downstream/mmsegmentation/mmseg/apis/inference.py new file mode 100644 index 0000000..9069438 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/apis/inference.py @@ -0,0 +1,136 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import matplotlib.pyplot as plt +import mmcv +import torch +from mmcv.parallel import collate, scatter +from mmcv.runner import load_checkpoint + +from mmseg.datasets.pipelines import Compose +from mmseg.models import build_segmentor + + +def init_segmentor(config, checkpoint=None, device='cuda:0'): + """Initialize a segmentor from config file. + + Args: + config (str or :obj:`mmcv.Config`): Config file path or the config + object. + checkpoint (str, optional): Checkpoint path. If left as None, the model + will not load any weights. + device (str, optional) CPU/CUDA device option. Default 'cuda:0'. + Use 'cpu' for loading model on CPU. + Returns: + nn.Module: The constructed segmentor. + """ + if isinstance(config, str): + config = mmcv.Config.fromfile(config) + elif not isinstance(config, mmcv.Config): + raise TypeError('config must be a filename or Config object, ' + 'but got {}'.format(type(config))) + config.model.pretrained = None + config.model.train_cfg = None + model = build_segmentor(config.model, test_cfg=config.get('test_cfg')) + if checkpoint is not None: + checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') + model.CLASSES = checkpoint['meta']['CLASSES'] + model.PALETTE = checkpoint['meta']['PALETTE'] + model.cfg = config # save the config in the model for convenience + model.to(device) + model.eval() + return model + + +class LoadImage: + """A simple pipeline to load image.""" + + def __call__(self, results): + """Call function to load images into results. + + Args: + results (dict): A result dict contains the file name + of the image to be read. + + Returns: + dict: ``results`` will be returned containing loaded image. + """ + + if isinstance(results['img'], str): + results['filename'] = results['img'] + results['ori_filename'] = results['img'] + else: + results['filename'] = None + results['ori_filename'] = None + img = mmcv.imread(results['img']) + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + return results + + +def inference_segmentor(model, img): + """Inference image(s) with the segmentor. + + Args: + model (nn.Module): The loaded segmentor. + imgs (str/ndarray or list[str/ndarray]): Either image files or loaded + images. + + Returns: + (list[Tensor]): The segmentation result. + """ + cfg = model.cfg + device = next(model.parameters()).device # model device + # build the data pipeline + test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] + test_pipeline = Compose(test_pipeline) + # prepare data + data = dict(img=img) + data = test_pipeline(data) + data = collate([data], samples_per_gpu=1) + if next(model.parameters()).is_cuda: + # scatter to specified GPU + data = scatter(data, [device])[0] + else: + data['img_metas'] = [i.data[0] for i in data['img_metas']] + + # forward the model + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) + return result + + +def show_result_pyplot(model, + img, + result, + palette=None, + fig_size=(15, 10), + opacity=0.5, + title='', + block=True): + """Visualize the segmentation results on the image. + + Args: + model (nn.Module): The loaded segmentor. + img (str or np.ndarray): Image filename or loaded image. + result (list): The segmentation result. + palette (list[list[int]]] | None): The palette of segmentation + map. If None is given, random palette will be generated. + Default: None + fig_size (tuple): Figure size of the pyplot figure. + opacity(float): Opacity of painted segmentation map. + Default 0.5. + Must be in (0, 1] range. + title (str): The title of pyplot figure. + Default is ''. + block (bool): Whether to block the pyplot figure. + Default is True. + """ + if hasattr(model, 'module'): + model = model.module + img = model.show_result( + img, result, palette=palette, show=False, opacity=opacity) + plt.figure(figsize=fig_size) + plt.imshow(mmcv.bgr2rgb(img)) + plt.title(title) + plt.tight_layout() + plt.show(block=block) diff --git a/downstream/mmsegmentation/mmseg/apis/test.py b/downstream/mmsegmentation/mmseg/apis/test.py new file mode 100644 index 0000000..cc4fcc9 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/apis/test.py @@ -0,0 +1,233 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import tempfile +import warnings + +import mmcv +import numpy as np +import torch +from mmcv.engine import collect_results_cpu, collect_results_gpu +from mmcv.image import tensor2imgs +from mmcv.runner import get_dist_info + + +def np2tmp(array, temp_file_name=None, tmpdir=None): + """Save ndarray to local numpy file. + + Args: + array (ndarray): Ndarray to save. + temp_file_name (str): Numpy file name. If 'temp_file_name=None', this + function will generate a file name with tempfile.NamedTemporaryFile + to save ndarray. Default: None. + tmpdir (str): Temporary directory to save Ndarray files. Default: None. + Returns: + str: The numpy file name. + """ + + if temp_file_name is None: + temp_file_name = tempfile.NamedTemporaryFile( + suffix='.npy', delete=False, dir=tmpdir).name + np.save(temp_file_name, array) + return temp_file_name + + +def single_gpu_test(model, + data_loader, + show=False, + out_dir=None, + efficient_test=False, + opacity=0.5, + pre_eval=False, + format_only=False, + format_args={}): + """Test with single GPU by progressive mode. + + Args: + model (nn.Module): Model to be tested. + data_loader (utils.data.Dataloader): Pytorch data loader. + show (bool): Whether show results during inference. Default: False. + out_dir (str, optional): If specified, the results will be dumped into + the directory to save output results. + efficient_test (bool): Whether save the results as local numpy files to + save CPU memory during evaluation. Mutually exclusive with + pre_eval and format_results. Default: False. + opacity(float): Opacity of painted segmentation map. + Default 0.5. + Must be in (0, 1] range. + pre_eval (bool): Use dataset.pre_eval() function to generate + pre_results for metric evaluation. Mutually exclusive with + efficient_test and format_results. Default: False. + format_only (bool): Only format result for results commit. + Mutually exclusive with pre_eval and efficient_test. + Default: False. + format_args (dict): The args for format_results. Default: {}. + Returns: + list: list of evaluation pre-results or list of save file names. + """ + if efficient_test: + warnings.warn( + 'DeprecationWarning: ``efficient_test`` will be deprecated, the ' + 'evaluation is CPU memory friendly with pre_eval=True') + mmcv.mkdir_or_exist('.efficient_test') + # when none of them is set true, return segmentation results as + # a list of np.array. + assert [efficient_test, pre_eval, format_only].count(True) <= 1, \ + '``efficient_test``, ``pre_eval`` and ``format_only`` are mutually ' \ + 'exclusive, only one of them could be true .' + + model.eval() + results = [] + dataset = data_loader.dataset + prog_bar = mmcv.ProgressBar(len(dataset)) + # The pipeline about how the data_loader retrieval samples from dataset: + # sampler -> batch_sampler -> indices + # The indices are passed to dataset_fetcher to get data from dataset. + # data_fetcher -> collate_fn(dataset[index]) -> data_sample + # we use batch_sampler to get correct data idx + loader_indices = data_loader.batch_sampler + + for batch_indices, data in zip(loader_indices, data_loader): + with torch.no_grad(): + result = model(return_loss=False, **data) + + if show or out_dir: + img_tensor = data['img'][0] + img_metas = data['img_metas'][0].data[0] + imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) + assert len(imgs) == len(img_metas) + + for img, img_meta in zip(imgs, img_metas): + h, w, _ = img_meta['img_shape'] + img_show = img[:h, :w, :] + + ori_h, ori_w = img_meta['ori_shape'][:-1] + img_show = mmcv.imresize(img_show, (ori_w, ori_h)) + + if out_dir: + out_file = osp.join(out_dir, img_meta['ori_filename']) + else: + out_file = None + + model.module.show_result( + img_show, + result, + palette=dataset.PALETTE, + show=show, + out_file=out_file, + opacity=opacity) + + if efficient_test: + result = [np2tmp(_, tmpdir='.efficient_test') for _ in result] + + if format_only: + result = dataset.format_results( + result, indices=batch_indices, **format_args) + if pre_eval: + # TODO: adapt samples_per_gpu > 1. + # only samples_per_gpu=1 valid now + result = dataset.pre_eval(result, indices=batch_indices) + results.extend(result) + else: + results.extend(result) + + batch_size = len(result) + for _ in range(batch_size): + prog_bar.update() + + return results + + +def multi_gpu_test(model, + data_loader, + tmpdir=None, + gpu_collect=False, + efficient_test=False, + pre_eval=False, + format_only=False, + format_args={}): + """Test model with multiple gpus by progressive mode. + + This method tests model with multiple gpus and collects the results + under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' + it encodes results to gpu tensors and use gpu communication for results + collection. On cpu mode it saves the results on different gpus to 'tmpdir' + and collects them by the rank 0 worker. + + Args: + model (nn.Module): Model to be tested. + data_loader (utils.data.Dataloader): Pytorch data loader. + tmpdir (str): Path of directory to save the temporary results from + different gpus under cpu mode. The same path is used for efficient + test. Default: None. + gpu_collect (bool): Option to use either gpu or cpu to collect results. + Default: False. + efficient_test (bool): Whether save the results as local numpy files to + save CPU memory during evaluation. Mutually exclusive with + pre_eval and format_results. Default: False. + pre_eval (bool): Use dataset.pre_eval() function to generate + pre_results for metric evaluation. Mutually exclusive with + efficient_test and format_results. Default: False. + format_only (bool): Only format result for results commit. + Mutually exclusive with pre_eval and efficient_test. + Default: False. + format_args (dict): The args for format_results. Default: {}. + + Returns: + list: list of evaluation pre-results or list of save file names. + """ + if efficient_test: + warnings.warn( + 'DeprecationWarning: ``efficient_test`` will be deprecated, the ' + 'evaluation is CPU memory friendly with pre_eval=True') + mmcv.mkdir_or_exist('.efficient_test') + # when none of them is set true, return segmentation results as + # a list of np.array. + assert [efficient_test, pre_eval, format_only].count(True) <= 1, \ + '``efficient_test``, ``pre_eval`` and ``format_only`` are mutually ' \ + 'exclusive, only one of them could be true .' + + model.eval() + results = [] + dataset = data_loader.dataset + # The pipeline about how the data_loader retrieval samples from dataset: + # sampler -> batch_sampler -> indices + # The indices are passed to dataset_fetcher to get data from dataset. + # data_fetcher -> collate_fn(dataset[index]) -> data_sample + # we use batch_sampler to get correct data idx + + # batch_sampler based on DistributedSampler, the indices only point to data + # samples of related machine. + loader_indices = data_loader.batch_sampler + + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = mmcv.ProgressBar(len(dataset)) + + for batch_indices, data in zip(loader_indices, data_loader): + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) + + if efficient_test: + result = [np2tmp(_, tmpdir='.efficient_test') for _ in result] + + if format_only: + result = dataset.format_results( + result, indices=batch_indices, **format_args) + if pre_eval: + # TODO: adapt samples_per_gpu > 1. + # only samples_per_gpu=1 valid now + result = dataset.pre_eval(result, indices=batch_indices) + + results.extend(result) + + if rank == 0: + batch_size = len(result) * world_size + for _ in range(batch_size): + prog_bar.update() + + # collect results from all ranks + if gpu_collect: + results = collect_results_gpu(results, len(dataset)) + else: + results = collect_results_cpu(results, len(dataset), tmpdir) + return results diff --git a/downstream/mmsegmentation/mmseg/apis/train.py b/downstream/mmsegmentation/mmseg/apis/train.py new file mode 100644 index 0000000..3563e36 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/apis/train.py @@ -0,0 +1,191 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +import warnings + +import mmcv +import numpy as np +import torch +import torch.distributed as dist +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner, + build_runner, get_dist_info) +from mmcv.utils import build_from_cfg + +from mmseg import digit_version +from mmseg.core import DistEvalHook, EvalHook, build_optimizer +from mmseg.datasets import build_dataloader, build_dataset +from mmseg.utils import find_latest_checkpoint, get_root_logger + + +def init_random_seed(seed=None, device='cuda'): + """Initialize random seed. + + If the seed is not set, the seed will be automatically randomized, + and then broadcast to all processes to prevent some potential bugs. + Args: + seed (int, Optional): The seed. Default to None. + device (str): The device where the seed will be put on. + Default to 'cuda'. + Returns: + int: Seed to be used. + """ + if seed is not None: + return seed + + # Make sure all ranks share the same random seed to prevent + # some potential bugs. Please refer to + # https://github.com/open-mmlab/mmdetection/issues/6339 + rank, world_size = get_dist_info() + seed = np.random.randint(2**31) + if world_size == 1: + return seed + + if rank == 0: + random_num = torch.tensor(seed, dtype=torch.int32, device=device) + else: + random_num = torch.tensor(0, dtype=torch.int32, device=device) + dist.broadcast(random_num, src=0) + return random_num.item() + + +def set_random_seed(seed, deterministic=False): + """Set random seed. + + Args: + seed (int): Seed to be used. + deterministic (bool): Whether to set the deterministic option for + CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` + to True and `torch.backends.cudnn.benchmark` to False. + Default: False. + """ + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + if deterministic: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def train_segmentor(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + meta=None): + """Launch segmentor training.""" + logger = get_root_logger(cfg.log_level) + + # prepare data loaders + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + # The default loader config + loader_cfg = dict( + # cfg.gpus will be ignored if distributed + num_gpus=len(cfg.gpu_ids), + dist=distributed, + seed=cfg.seed, + drop_last=True) + # The overall dataloader settings + loader_cfg.update({ + k: v + for k, v in cfg.data.items() if k not in [ + 'train', 'val', 'test', 'train_dataloader', 'val_dataloader', + 'test_dataloader' + ] + }) + + # The specific dataloader settings + train_loader_cfg = {**loader_cfg, **cfg.data.get('train_dataloader', {})} + data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] + + # put model on gpus + if distributed: + find_unused_parameters = cfg.get('find_unused_parameters', False) + # Sets the `find_unused_parameters` parameter in + # torch.nn.parallel.DistributedDataParallel + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters) + else: + if not torch.cuda.is_available(): + assert digit_version(mmcv.__version__) >= digit_version('1.4.4'), \ + 'Please use MMCV >= 1.4.4 for CPU training!' + model = MMDataParallel(model, device_ids=cfg.gpu_ids) + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + + if cfg.get('runner') is None: + cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters} + warnings.warn( + 'config is now expected to have a `runner` section, ' + 'please set `runner` in your config.', UserWarning) + + runner = build_runner( + cfg.runner, + default_args=dict( + model=model, + batch_processor=None, + optimizer=optimizer, + work_dir=cfg.work_dir, + logger=logger, + meta=meta)) + + # register hooks + runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, + cfg.checkpoint_config, cfg.log_config, + cfg.get('momentum_config', None)) + if distributed: + # when distributed training by epoch, using`DistSamplerSeedHook` to set + # the different seed to distributed sampler for each epoch, it will + # shuffle dataset at each epoch and avoid overfitting. + if isinstance(runner, EpochBasedRunner): + runner.register_hook(DistSamplerSeedHook()) + + # an ugly walkaround to make the .log and .log.json filenames the same + runner.timestamp = timestamp + + # register eval hooks + if validate: + val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) + # The specific dataloader settings + val_loader_cfg = { + **loader_cfg, + 'samples_per_gpu': 1, + 'shuffle': False, # Not shuffle by default + **cfg.data.get('val_dataloader', {}), + } + val_dataloader = build_dataloader(val_dataset, **val_loader_cfg) + eval_cfg = cfg.get('evaluation', {}) + eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' + eval_hook = DistEvalHook if distributed else EvalHook + # In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the + # priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'. + runner.register_hook( + eval_hook(val_dataloader, **eval_cfg), priority='LOW') + + # user-defined hooks + if cfg.get('custom_hooks', None): + custom_hooks = cfg.custom_hooks + assert isinstance(custom_hooks, list), \ + f'custom_hooks expect list type, but got {type(custom_hooks)}' + for hook_cfg in cfg.custom_hooks: + assert isinstance(hook_cfg, dict), \ + 'Each item in custom_hooks expects dict type, but got ' \ + f'{type(hook_cfg)}' + hook_cfg = hook_cfg.copy() + priority = hook_cfg.pop('priority', 'NORMAL') + hook = build_from_cfg(hook_cfg, HOOKS) + runner.register_hook(hook, priority=priority) + + if cfg.resume_from is None and cfg.get('auto_resume'): + resume_from = find_latest_checkpoint(cfg.work_dir) + if resume_from is not None: + cfg.resume_from = resume_from + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow) diff --git a/downstream/mmsegmentation/mmseg/core/__init__.py b/downstream/mmsegmentation/mmseg/core/__init__.py new file mode 100644 index 0000000..c913349 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import (OPTIMIZER_BUILDERS, build_optimizer, + build_optimizer_constructor) +from .evaluation import * # noqa: F401, F403 +from .layer_decay_optimizer_constructor import \ + LayerDecayOptimizerConstructor # noqa: F401 +from .seg import * # noqa: F401, F403 +from .utils import * # noqa: F401, F403 + +__all__ = [ + 'LayerDecayOptimizerConstructor', 'OPTIMIZER_BUILDERS', 'build_optimizer', + 'build_optimizer_constructor' +] diff --git a/downstream/mmsegmentation/mmseg/core/builder.py b/downstream/mmsegmentation/mmseg/core/builder.py new file mode 100644 index 0000000..406dd9b --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/builder.py @@ -0,0 +1,33 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +from mmcv.runner.optimizer import OPTIMIZER_BUILDERS as MMCV_OPTIMIZER_BUILDERS +from mmcv.utils import Registry, build_from_cfg + +OPTIMIZER_BUILDERS = Registry( + 'optimizer builder', parent=MMCV_OPTIMIZER_BUILDERS) + + +def build_optimizer_constructor(cfg): + constructor_type = cfg.get('type') + if constructor_type in OPTIMIZER_BUILDERS: + return build_from_cfg(cfg, OPTIMIZER_BUILDERS) + elif constructor_type in MMCV_OPTIMIZER_BUILDERS: + return build_from_cfg(cfg, MMCV_OPTIMIZER_BUILDERS) + else: + raise KeyError(f'{constructor_type} is not registered ' + 'in the optimizer builder registry.') + + +def build_optimizer(model, cfg): + optimizer_cfg = copy.deepcopy(cfg) + constructor_type = optimizer_cfg.pop('constructor', + 'DefaultOptimizerConstructor') + paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) + optim_constructor = build_optimizer_constructor( + dict( + type=constructor_type, + optimizer_cfg=optimizer_cfg, + paramwise_cfg=paramwise_cfg)) + optimizer = optim_constructor(model) + return optimizer diff --git a/downstream/mmsegmentation/mmseg/core/evaluation/__init__.py b/downstream/mmsegmentation/mmseg/core/evaluation/__init__.py new file mode 100644 index 0000000..3d16d17 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/evaluation/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .class_names import get_classes, get_palette +from .eval_hooks import DistEvalHook, EvalHook +from .metrics import (eval_metrics, intersect_and_union, mean_dice, + mean_fscore, mean_iou, pre_eval_to_metrics) + +__all__ = [ + 'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore', + 'eval_metrics', 'get_classes', 'get_palette', 'pre_eval_to_metrics', + 'intersect_and_union' +] diff --git a/downstream/mmsegmentation/mmseg/core/evaluation/class_names.py b/downstream/mmsegmentation/mmseg/core/evaluation/class_names.py new file mode 100644 index 0000000..e3bff62 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/evaluation/class_names.py @@ -0,0 +1,316 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv + + +def cityscapes_classes(): + """Cityscapes class names for external use.""" + return [ + 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', + 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', + 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', + 'bicycle' + ] + + +def ade_classes(): + """ADE20K class names for external use.""" + return [ + 'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', + 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', + 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', + 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', + 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', + 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', + 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', + 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', + 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', + 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', + 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', + 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', + 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', + 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', + 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', + 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', + 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', + 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', + 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', + 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', + 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', + 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', + 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', + 'clock', 'flag' + ] + + +def voc_classes(): + """Pascal VOC class names for external use.""" + return [ + 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', + 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor' + ] + + +def cocostuff_classes(): + """CocoStuff class names for external use.""" + return [ + 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', + 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', + 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', + 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', + 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', + 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', + 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', + 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', + 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', + 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', + 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', + 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', + 'blanket', 'branch', 'bridge', 'building-other', 'bush', 'cabinet', + 'cage', 'cardboard', 'carpet', 'ceiling-other', 'ceiling-tile', + 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain', + 'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble', + 'floor-other', 'floor-stone', 'floor-tile', 'floor-wood', 'flower', + 'fog', 'food-other', 'fruit', 'furniture-other', 'grass', 'gravel', + 'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal', + 'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper', + 'pavement', 'pillow', 'plant-other', 'plastic', 'platform', + 'playingfield', 'railing', 'railroad', 'river', 'road', 'rock', 'roof', + 'rug', 'salad', 'sand', 'sea', 'shelf', 'sky-other', 'skyscraper', + 'snow', 'solid-other', 'stairs', 'stone', 'straw', 'structural-other', + 'table', 'tent', 'textile-other', 'towel', 'tree', 'vegetable', + 'wall-brick', 'wall-concrete', 'wall-other', 'wall-panel', + 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'waterdrops', + 'window-blind', 'window-other', 'wood' + ] + + +def loveda_classes(): + """LoveDA class names for external use.""" + return [ + 'background', 'building', 'road', 'water', 'barren', 'forest', + 'agricultural' + ] + + +def potsdam_classes(): + """Potsdam class names for external use.""" + return [ + 'impervious_surface', 'building', 'low_vegetation', 'tree', 'car', + 'clutter' + ] + + +def vaihingen_classes(): + """Vaihingen class names for external use.""" + return [ + 'impervious_surface', 'building', 'low_vegetation', 'tree', 'car', + 'clutter' + ] + + +def isaid_classes(): + """iSAID class names for external use.""" + return [ + 'background', 'ship', 'store_tank', 'baseball_diamond', 'tennis_court', + 'basketball_court', 'Ground_Track_Field', 'Bridge', 'Large_Vehicle', + 'Small_Vehicle', 'Helicopter', 'Swimming_pool', 'Roundabout', + 'Soccer_ball_field', 'plane', 'Harbor' + ] + + +def stare_classes(): + """stare class names for external use.""" + return ['background', 'vessel'] + + +def cityscapes_palette(): + """Cityscapes palette for external use.""" + return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], + [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], + [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], + [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], + [0, 0, 230], [119, 11, 32]] + + +def ade_palette(): + """ADE20K palette for external use.""" + return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], + [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], + [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], + [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], + [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], + [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], + [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], + [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], + [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], + [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], + [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], + [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], + [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], + [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], + [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], + [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], + [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], + [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], + [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], + [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], + [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], + [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], + [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], + [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], + [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], + [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], + [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], + [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], + [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], + [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], + [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], + [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], + [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], + [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], + [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], + [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], + [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], + [102, 255, 0], [92, 0, 255]] + + +def voc_palette(): + """Pascal VOC palette for external use.""" + return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], + [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], + [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], + [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], + [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] + + +def cocostuff_palette(): + """CocoStuff palette for external use.""" + return [[0, 192, 64], [0, 192, 64], [0, 64, 96], [128, 192, 192], + [0, 64, 64], [0, 192, 224], [0, 192, 192], [128, 192, 64], + [0, 192, 96], [128, 192, 64], [128, 32, 192], [0, 0, 224], + [0, 0, 64], [0, 160, 192], [128, 0, 96], [128, 0, 192], + [0, 32, 192], [128, 128, 224], [0, 0, 192], [128, 160, 192], + [128, 128, 0], [128, 0, 32], [128, 32, 0], [128, 0, 128], + [64, 128, 32], [0, 160, 0], [0, 0, 0], [192, 128, 160], [0, 32, 0], + [0, 128, 128], [64, 128, 160], [128, 160, 0], [0, 128, 0], + [192, 128, 32], [128, 96, 128], [0, 0, 128], [64, 0, 32], + [0, 224, 128], [128, 0, 0], [192, 0, 160], [0, 96, 128], + [128, 128, 128], [64, 0, 160], [128, 224, 128], [128, 128, 64], + [192, 0, 32], [128, 96, 0], [128, 0, 192], [0, 128, 32], + [64, 224, 0], [0, 0, 64], [128, 128, 160], [64, 96, 0], + [0, 128, 192], [0, 128, 160], [192, 224, 0], [0, 128, 64], + [128, 128, 32], [192, 32, 128], [0, 64, 192], [0, 0, 32], + [64, 160, 128], [128, 64, 64], [128, 0, 160], [64, 32, 128], + [128, 192, 192], [0, 0, 160], [192, 160, 128], [128, 192, 0], + [128, 0, 96], [192, 32, 0], [128, 64, 128], [64, 128, 96], + [64, 160, 0], [0, 64, 0], [192, 128, 224], [64, 32, 0], + [0, 192, 128], [64, 128, 224], [192, 160, 0], [0, 192, 0], + [192, 128, 96], [192, 96, 128], [0, 64, 128], [64, 0, 96], + [64, 224, 128], [128, 64, 0], [192, 0, 224], [64, 96, 128], + [128, 192, 128], [64, 0, 224], [192, 224, 128], [128, 192, 64], + [192, 0, 96], [192, 96, 0], [128, 64, 192], [0, 128, 96], + [0, 224, 0], [64, 64, 64], [128, 128, 224], [0, 96, 0], + [64, 192, 192], [0, 128, 224], [128, 224, 0], [64, 192, 64], + [128, 128, 96], [128, 32, 128], [64, 0, 192], [0, 64, 96], + [0, 160, 128], [192, 0, 64], [128, 64, 224], [0, 32, 128], + [192, 128, 192], [0, 64, 224], [128, 160, 128], [192, 128, 0], + [128, 64, 32], [128, 32, 64], [192, 0, 128], [64, 192, 32], + [0, 160, 64], [64, 0, 0], [192, 192, 160], [0, 32, 64], + [64, 128, 128], [64, 192, 160], [128, 160, 64], [64, 128, 0], + [192, 192, 32], [128, 96, 192], [64, 0, 128], [64, 64, 32], + [0, 224, 192], [192, 0, 0], [192, 64, 160], [0, 96, 192], + [192, 128, 128], [64, 64, 160], [128, 224, 192], [192, 128, 64], + [192, 64, 32], [128, 96, 64], [192, 0, 192], [0, 192, 32], + [64, 224, 64], [64, 0, 64], [128, 192, 160], [64, 96, 64], + [64, 128, 192], [0, 192, 160], [192, 224, 64], [64, 128, 64], + [128, 192, 32], [192, 32, 192], [64, 64, 192], [0, 64, 32], + [64, 160, 192], [192, 64, 64], [128, 64, 160], [64, 32, 192], + [192, 192, 192], [0, 64, 160], [192, 160, 192], [192, 192, 0], + [128, 64, 96], [192, 32, 64], [192, 64, 128], [64, 192, 96], + [64, 160, 64], [64, 64, 0]] + + +def loveda_palette(): + """LoveDA palette for external use.""" + return [[255, 255, 255], [255, 0, 0], [255, 255, 0], [0, 0, 255], + [159, 129, 183], [0, 255, 0], [255, 195, 128]] + + +def potsdam_palette(): + """Potsdam palette for external use.""" + return [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], + [255, 255, 0], [255, 0, 0]] + + +def vaihingen_palette(): + """Vaihingen palette for external use.""" + return [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], + [255, 255, 0], [255, 0, 0]] + + +def isaid_palette(): + """iSAID palette for external use.""" + return [[0, 0, 0], [0, 0, 63], [0, 63, 63], [0, 63, 0], [0, 63, 127], + [0, 63, 191], [0, 63, 255], [0, 127, 63], [0, 127, + 127], [0, 0, 127], + [0, 0, 191], [0, 0, 255], [0, 191, 127], [0, 127, 191], + [0, 127, 255], [0, 100, 155]] + + +def stare_palette(): + """STARE palette for external use.""" + return [[120, 120, 120], [6, 230, 230]] + + +dataset_aliases = { + 'cityscapes': ['cityscapes'], + 'ade': ['ade', 'ade20k'], + 'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug'], + 'loveda': ['loveda'], + 'potsdam': ['potsdam'], + 'vaihingen': ['vaihingen'], + 'cocostuff': [ + 'cocostuff', 'cocostuff10k', 'cocostuff164k', 'coco-stuff', + 'coco-stuff10k', 'coco-stuff164k', 'coco_stuff', 'coco_stuff10k', + 'coco_stuff164k' + ], + 'isaid': ['isaid', 'iSAID'], + 'stare': ['stare', 'STARE'] +} + + +def get_classes(dataset): + """Get class names of a dataset.""" + alias2name = {} + for name, aliases in dataset_aliases.items(): + for alias in aliases: + alias2name[alias] = name + + if mmcv.is_str(dataset): + if dataset in alias2name: + labels = eval(alias2name[dataset] + '_classes()') + else: + raise ValueError(f'Unrecognized dataset: {dataset}') + else: + raise TypeError(f'dataset must a str, but got {type(dataset)}') + return labels + + +def get_palette(dataset): + """Get class palette (RGB) of a dataset.""" + alias2name = {} + for name, aliases in dataset_aliases.items(): + for alias in aliases: + alias2name[alias] = name + + if mmcv.is_str(dataset): + if dataset in alias2name: + labels = eval(alias2name[dataset] + '_palette()') + else: + raise ValueError(f'Unrecognized dataset: {dataset}') + else: + raise TypeError(f'dataset must a str, but got {type(dataset)}') + return labels diff --git a/downstream/mmsegmentation/mmseg/core/evaluation/eval_hooks.py b/downstream/mmsegmentation/mmseg/core/evaluation/eval_hooks.py new file mode 100644 index 0000000..952db3b --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/evaluation/eval_hooks.py @@ -0,0 +1,128 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings + +import torch.distributed as dist +from mmcv.runner import DistEvalHook as _DistEvalHook +from mmcv.runner import EvalHook as _EvalHook +from torch.nn.modules.batchnorm import _BatchNorm + + +class EvalHook(_EvalHook): + """Single GPU EvalHook, with efficient test support. + + Args: + by_epoch (bool): Determine perform evaluation by epoch or by iteration. + If set to True, it will perform by epoch. Otherwise, by iteration. + Default: False. + efficient_test (bool): Whether save the results as local numpy files to + save CPU memory during evaluation. Default: False. + pre_eval (bool): Whether to use progressive mode to evaluate model. + Default: False. + Returns: + list: The prediction results. + """ + + greater_keys = ['mIoU', 'mAcc', 'aAcc'] + + def __init__(self, + *args, + by_epoch=False, + efficient_test=False, + pre_eval=False, + **kwargs): + super().__init__(*args, by_epoch=by_epoch, **kwargs) + self.pre_eval = pre_eval + if efficient_test: + warnings.warn( + 'DeprecationWarning: ``efficient_test`` for evaluation hook ' + 'is deprecated, the evaluation hook is CPU memory friendly ' + 'with ``pre_eval=True`` as argument for ``single_gpu_test()`` ' + 'function') + + def _do_evaluate(self, runner): + """perform evaluation and save ckpt.""" + if not self._should_evaluate(runner): + return + + from mmseg.apis import single_gpu_test + results = single_gpu_test( + runner.model, self.dataloader, show=False, pre_eval=self.pre_eval) + runner.log_buffer.clear() + runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) + key_score = self.evaluate(runner, results) + if self.save_best: + self._save_ckpt(runner, key_score) + + +class DistEvalHook(_DistEvalHook): + """Distributed EvalHook, with efficient test support. + + Args: + by_epoch (bool): Determine perform evaluation by epoch or by iteration. + If set to True, it will perform by epoch. Otherwise, by iteration. + Default: False. + efficient_test (bool): Whether save the results as local numpy files to + save CPU memory during evaluation. Default: False. + pre_eval (bool): Whether to use progressive mode to evaluate model. + Default: False. + Returns: + list: The prediction results. + """ + + greater_keys = ['mIoU', 'mAcc', 'aAcc'] + + def __init__(self, + *args, + by_epoch=False, + efficient_test=False, + pre_eval=False, + **kwargs): + super().__init__(*args, by_epoch=by_epoch, **kwargs) + self.pre_eval = pre_eval + if efficient_test: + warnings.warn( + 'DeprecationWarning: ``efficient_test`` for evaluation hook ' + 'is deprecated, the evaluation hook is CPU memory friendly ' + 'with ``pre_eval=True`` as argument for ``multi_gpu_test()`` ' + 'function') + + def _do_evaluate(self, runner): + """perform evaluation and save ckpt.""" + # Synchronization of BatchNorm's buffer (running_mean + # and running_var) is not supported in the DDP of pytorch, + # which may cause the inconsistent performance of models in + # different ranks, so we broadcast BatchNorm's buffers + # of rank 0 to other ranks to avoid this. + if self.broadcast_bn_buffer: + model = runner.model + for name, module in model.named_modules(): + if isinstance(module, + _BatchNorm) and module.track_running_stats: + dist.broadcast(module.running_var, 0) + dist.broadcast(module.running_mean, 0) + + if not self._should_evaluate(runner): + return + + tmpdir = self.tmpdir + if tmpdir is None: + tmpdir = osp.join(runner.work_dir, '.eval_hook') + + from mmseg.apis import multi_gpu_test + results = multi_gpu_test( + runner.model, + self.dataloader, + tmpdir=tmpdir, + gpu_collect=self.gpu_collect, + pre_eval=self.pre_eval) + + runner.log_buffer.clear() + + if runner.rank == 0: + print('\n') + runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) + key_score = self.evaluate(runner, results) + + if self.save_best: + self._save_ckpt(runner, key_score) diff --git a/downstream/mmsegmentation/mmseg/core/evaluation/metrics.py b/downstream/mmsegmentation/mmseg/core/evaluation/metrics.py new file mode 100644 index 0000000..a1c0908 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/evaluation/metrics.py @@ -0,0 +1,395 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict + +import mmcv +import numpy as np +import torch + + +def f_score(precision, recall, beta=1): + """calculate the f-score value. + + Args: + precision (float | torch.Tensor): The precision value. + recall (float | torch.Tensor): The recall value. + beta (int): Determines the weight of recall in the combined score. + Default: False. + + Returns: + [torch.tensor]: The f-score value. + """ + score = (1 + beta**2) * (precision * recall) / ( + (beta**2 * precision) + recall) + return score + + +def intersect_and_union(pred_label, + label, + num_classes, + ignore_index, + label_map=dict(), + reduce_zero_label=False): + """Calculate intersection and Union. + + Args: + pred_label (ndarray | str): Prediction segmentation map + or predict result filename. + label (ndarray | str): Ground truth segmentation map + or label filename. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + label_map (dict): Mapping old labels to new labels. The parameter will + work only when label is str. Default: dict(). + reduce_zero_label (bool): Whether ignore zero label. The parameter will + work only when label is str. Default: False. + + Returns: + torch.Tensor: The intersection of prediction and ground truth + histogram on all classes. + torch.Tensor: The union of prediction and ground truth histogram on + all classes. + torch.Tensor: The prediction histogram on all classes. + torch.Tensor: The ground truth histogram on all classes. + """ + + if isinstance(pred_label, str): + pred_label = torch.from_numpy(np.load(pred_label)) + else: + pred_label = torch.from_numpy((pred_label)) + + if isinstance(label, str): + label = torch.from_numpy( + mmcv.imread(label, flag='unchanged', backend='pillow')) + else: + label = torch.from_numpy(label) + + if label_map is not None: + for old_id, new_id in label_map.items(): + label[label == old_id] = new_id + if reduce_zero_label: + label[label == 0] = 255 + label = label - 1 + label[label == 254] = 255 + + mask = (label != ignore_index) + pred_label = pred_label[mask] + label = label[mask] + + intersect = pred_label[pred_label == label] + area_intersect = torch.histc( + intersect.float(), bins=(num_classes), min=0, max=num_classes - 1) + area_pred_label = torch.histc( + pred_label.float(), bins=(num_classes), min=0, max=num_classes - 1) + area_label = torch.histc( + label.float(), bins=(num_classes), min=0, max=num_classes - 1) + area_union = area_pred_label + area_label - area_intersect + return area_intersect, area_union, area_pred_label, area_label + + +def total_intersect_and_union(results, + gt_seg_maps, + num_classes, + ignore_index, + label_map=dict(), + reduce_zero_label=False): + """Calculate Total Intersection and Union. + + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str] | Iterables): list of ground + truth segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Whether ignore zero label. Default: False. + + Returns: + ndarray: The intersection of prediction and ground truth histogram + on all classes. + ndarray: The union of prediction and ground truth histogram on all + classes. + ndarray: The prediction histogram on all classes. + ndarray: The ground truth histogram on all classes. + """ + total_area_intersect = torch.zeros((num_classes, ), dtype=torch.float64) + total_area_union = torch.zeros((num_classes, ), dtype=torch.float64) + total_area_pred_label = torch.zeros((num_classes, ), dtype=torch.float64) + total_area_label = torch.zeros((num_classes, ), dtype=torch.float64) + for result, gt_seg_map in zip(results, gt_seg_maps): + area_intersect, area_union, area_pred_label, area_label = \ + intersect_and_union( + result, gt_seg_map, num_classes, ignore_index, + label_map, reduce_zero_label) + total_area_intersect += area_intersect + total_area_union += area_union + total_area_pred_label += area_pred_label + total_area_label += area_label + return total_area_intersect, total_area_union, total_area_pred_label, \ + total_area_label + + +def mean_iou(results, + gt_seg_maps, + num_classes, + ignore_index, + nan_to_num=None, + label_map=dict(), + reduce_zero_label=False): + """Calculate Mean Intersection and Union (mIoU) + + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str]): list of ground truth + segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Whether ignore zero label. Default: False. + + Returns: + dict[str, float | ndarray]: + float: Overall accuracy on all images. + ndarray: Per category accuracy, shape (num_classes, ). + ndarray: Per category IoU, shape (num_classes, ). + """ + iou_result = eval_metrics( + results=results, + gt_seg_maps=gt_seg_maps, + num_classes=num_classes, + ignore_index=ignore_index, + metrics=['mIoU'], + nan_to_num=nan_to_num, + label_map=label_map, + reduce_zero_label=reduce_zero_label) + return iou_result + + +def mean_dice(results, + gt_seg_maps, + num_classes, + ignore_index, + nan_to_num=None, + label_map=dict(), + reduce_zero_label=False): + """Calculate Mean Dice (mDice) + + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str]): list of ground truth + segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Whether ignore zero label. Default: False. + + Returns: + dict[str, float | ndarray]: Default metrics. + float: Overall accuracy on all images. + ndarray: Per category accuracy, shape (num_classes, ). + ndarray: Per category dice, shape (num_classes, ). + """ + + dice_result = eval_metrics( + results=results, + gt_seg_maps=gt_seg_maps, + num_classes=num_classes, + ignore_index=ignore_index, + metrics=['mDice'], + nan_to_num=nan_to_num, + label_map=label_map, + reduce_zero_label=reduce_zero_label) + return dice_result + + +def mean_fscore(results, + gt_seg_maps, + num_classes, + ignore_index, + nan_to_num=None, + label_map=dict(), + reduce_zero_label=False, + beta=1): + """Calculate Mean Intersection and Union (mIoU) + + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str]): list of ground truth + segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Whether ignore zero label. Default: False. + beta (int): Determines the weight of recall in the combined score. + Default: False. + + + Returns: + dict[str, float | ndarray]: Default metrics. + float: Overall accuracy on all images. + ndarray: Per category recall, shape (num_classes, ). + ndarray: Per category precision, shape (num_classes, ). + ndarray: Per category f-score, shape (num_classes, ). + """ + fscore_result = eval_metrics( + results=results, + gt_seg_maps=gt_seg_maps, + num_classes=num_classes, + ignore_index=ignore_index, + metrics=['mFscore'], + nan_to_num=nan_to_num, + label_map=label_map, + reduce_zero_label=reduce_zero_label, + beta=beta) + return fscore_result + + +def eval_metrics(results, + gt_seg_maps, + num_classes, + ignore_index, + metrics=['mIoU'], + nan_to_num=None, + label_map=dict(), + reduce_zero_label=False, + beta=1): + """Calculate evaluation metrics + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str] | Iterables): list of ground + truth segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Whether ignore zero label. Default: False. + Returns: + float: Overall accuracy on all images. + ndarray: Per category accuracy, shape (num_classes, ). + ndarray: Per category evaluation metrics, shape (num_classes, ). + """ + + total_area_intersect, total_area_union, total_area_pred_label, \ + total_area_label = total_intersect_and_union( + results, gt_seg_maps, num_classes, ignore_index, label_map, + reduce_zero_label) + ret_metrics = total_area_to_metrics(total_area_intersect, total_area_union, + total_area_pred_label, + total_area_label, metrics, nan_to_num, + beta) + + return ret_metrics + + +def pre_eval_to_metrics(pre_eval_results, + metrics=['mIoU'], + nan_to_num=None, + beta=1): + """Convert pre-eval results to metrics. + + Args: + pre_eval_results (list[tuple[torch.Tensor]]): per image eval results + for computing evaluation metric + metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + Returns: + float: Overall accuracy on all images. + ndarray: Per category accuracy, shape (num_classes, ). + ndarray: Per category evaluation metrics, shape (num_classes, ). + """ + + # convert list of tuples to tuple of lists, e.g. + # [(A_1, B_1, C_1, D_1), ..., (A_n, B_n, C_n, D_n)] to + # ([A_1, ..., A_n], ..., [D_1, ..., D_n]) + pre_eval_results = tuple(zip(*pre_eval_results)) + assert len(pre_eval_results) == 4 + + total_area_intersect = sum(pre_eval_results[0]) + total_area_union = sum(pre_eval_results[1]) + total_area_pred_label = sum(pre_eval_results[2]) + total_area_label = sum(pre_eval_results[3]) + + ret_metrics = total_area_to_metrics(total_area_intersect, total_area_union, + total_area_pred_label, + total_area_label, metrics, nan_to_num, + beta) + + return ret_metrics + + +def total_area_to_metrics(total_area_intersect, + total_area_union, + total_area_pred_label, + total_area_label, + metrics=['mIoU'], + nan_to_num=None, + beta=1): + """Calculate evaluation metrics + Args: + total_area_intersect (ndarray): The intersection of prediction and + ground truth histogram on all classes. + total_area_union (ndarray): The union of prediction and ground truth + histogram on all classes. + total_area_pred_label (ndarray): The prediction histogram on all + classes. + total_area_label (ndarray): The ground truth histogram on all classes. + metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + Returns: + float: Overall accuracy on all images. + ndarray: Per category accuracy, shape (num_classes, ). + ndarray: Per category evaluation metrics, shape (num_classes, ). + """ + if isinstance(metrics, str): + metrics = [metrics] + allowed_metrics = ['mIoU', 'mDice', 'mFscore'] + if not set(metrics).issubset(set(allowed_metrics)): + raise KeyError('metrics {} is not supported'.format(metrics)) + + all_acc = total_area_intersect.sum() / total_area_label.sum() + ret_metrics = OrderedDict({'aAcc': all_acc}) + for metric in metrics: + if metric == 'mIoU': + iou = total_area_intersect / total_area_union + acc = total_area_intersect / total_area_label + ret_metrics['IoU'] = iou + ret_metrics['Acc'] = acc + elif metric == 'mDice': + dice = 2 * total_area_intersect / ( + total_area_pred_label + total_area_label) + acc = total_area_intersect / total_area_label + ret_metrics['Dice'] = dice + ret_metrics['Acc'] = acc + elif metric == 'mFscore': + precision = total_area_intersect / total_area_pred_label + recall = total_area_intersect / total_area_label + f_value = torch.tensor( + [f_score(x[0], x[1], beta) for x in zip(precision, recall)]) + ret_metrics['Fscore'] = f_value + ret_metrics['Precision'] = precision + ret_metrics['Recall'] = recall + + ret_metrics = { + metric: value.numpy() + for metric, value in ret_metrics.items() + } + if nan_to_num is not None: + ret_metrics = OrderedDict({ + metric: np.nan_to_num(metric_value, nan=nan_to_num) + for metric, metric_value in ret_metrics.items() + }) + return ret_metrics diff --git a/downstream/mmsegmentation/mmseg/core/layer_decay_optimizer_constructor.py b/downstream/mmsegmentation/mmseg/core/layer_decay_optimizer_constructor.py new file mode 100644 index 0000000..bd3db92 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/layer_decay_optimizer_constructor.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.runner import DefaultOptimizerConstructor, get_dist_info + +from mmseg.utils import get_root_logger +from .builder import OPTIMIZER_BUILDERS + + +def get_num_layer_for_vit(var_name, num_max_layer): + """Get the layer id to set the different learning rates. + + Args: + var_name (str): The key of the model. + num_max_layer (int): Maximum number of backbone layers. + Returns: + layer id (int): Returns the layer id of the key. + """ + + if var_name in ('backbone.cls_token', 'backbone.mask_token', + 'backbone.pos_embed'): + return 0 + elif var_name.startswith('backbone.patch_embed'): + return 0 + elif var_name.startswith('backbone.layers'): + layer_id = int(var_name.split('.')[2]) + return layer_id + 1 + else: + return num_max_layer - 1 + + +@OPTIMIZER_BUILDERS.register_module() +class LayerDecayOptimizerConstructor(DefaultOptimizerConstructor): + """Different learning rates are set for different layers of backbone.""" + + def add_params(self, params, module): + """Add all parameters of module to the params list. + + The parameters of the given module will be added to the list of param + groups, with specific rules defined by paramwise_cfg. + Args: + params (list[dict]): A list of param groups, it will be modified + in place. + module (nn.Module): The module to be added. + """ + parameter_groups = {} + logger = get_root_logger() + logger.info(self.paramwise_cfg) + num_layers = self.paramwise_cfg.get('num_layers') + 2 + layer_decay_rate = self.paramwise_cfg.get('layer_decay_rate') + logger.info(f'Build LayerDecayOptimizerConstructor ' + f'{layer_decay_rate} - {num_layers}') + weight_decay = self.base_wd + for name, param in module.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith('.bias') or name in ( + 'pos_embed', 'cls_token'): + group_name = 'no_decay' + this_weight_decay = 0. + else: + group_name = 'decay' + this_weight_decay = weight_decay + layer_id = get_num_layer_for_vit(name, num_layers) + group_name = f'layer_{layer_id}_{group_name}' + if group_name not in parameter_groups: + scale = layer_decay_rate**(num_layers - layer_id - 1) + parameter_groups[group_name] = { + 'weight_decay': this_weight_decay, + 'params': [], + 'param_names': [], + 'lr_scale': scale, + 'group_name': group_name, + 'lr': scale * self.base_lr + } + parameter_groups[group_name]['params'].append(param) + parameter_groups[group_name]['param_names'].append(name) + rank, _ = get_dist_info() + if rank == 0: + to_display = {} + for key in parameter_groups: + to_display[key] = { + 'param_names': parameter_groups[key]['param_names'], + 'lr_scale': parameter_groups[key]['lr_scale'], + 'lr': parameter_groups[key]['lr'], + 'weight_decay': parameter_groups[key]['weight_decay'] + } + logger.info(f'Param groups ={to_display}') + params.extend(parameter_groups.values()) diff --git a/downstream/mmsegmentation/mmseg/core/seg/__init__.py b/downstream/mmsegmentation/mmseg/core/seg/__init__.py new file mode 100644 index 0000000..5206b96 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/seg/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import build_pixel_sampler +from .sampler import BasePixelSampler, OHEMPixelSampler + +__all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler'] diff --git a/downstream/mmsegmentation/mmseg/core/seg/builder.py b/downstream/mmsegmentation/mmseg/core/seg/builder.py new file mode 100644 index 0000000..1cecd34 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/seg/builder.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.utils import Registry, build_from_cfg + +PIXEL_SAMPLERS = Registry('pixel sampler') + + +def build_pixel_sampler(cfg, **default_args): + """Build pixel sampler for segmentation map.""" + return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args) diff --git a/downstream/mmsegmentation/mmseg/core/seg/sampler/__init__.py b/downstream/mmsegmentation/mmseg/core/seg/sampler/__init__.py new file mode 100644 index 0000000..5a76485 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/seg/sampler/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_pixel_sampler import BasePixelSampler +from .ohem_pixel_sampler import OHEMPixelSampler + +__all__ = ['BasePixelSampler', 'OHEMPixelSampler'] diff --git a/downstream/mmsegmentation/mmseg/core/seg/sampler/base_pixel_sampler.py b/downstream/mmsegmentation/mmseg/core/seg/sampler/base_pixel_sampler.py new file mode 100644 index 0000000..03672cd --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/seg/sampler/base_pixel_sampler.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + + +class BasePixelSampler(metaclass=ABCMeta): + """Base class of pixel sampler.""" + + def __init__(self, **kwargs): + pass + + @abstractmethod + def sample(self, seg_logit, seg_label): + """Placeholder for sample function.""" diff --git a/downstream/mmsegmentation/mmseg/core/seg/sampler/ohem_pixel_sampler.py b/downstream/mmsegmentation/mmseg/core/seg/sampler/ohem_pixel_sampler.py new file mode 100644 index 0000000..833a287 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/seg/sampler/ohem_pixel_sampler.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import PIXEL_SAMPLERS +from .base_pixel_sampler import BasePixelSampler + + +@PIXEL_SAMPLERS.register_module() +class OHEMPixelSampler(BasePixelSampler): + """Online Hard Example Mining Sampler for segmentation. + + Args: + context (nn.Module): The context of sampler, subclass of + :obj:`BaseDecodeHead`. + thresh (float, optional): The threshold for hard example selection. + Below which, are prediction with low confidence. If not + specified, the hard examples will be pixels of top ``min_kept`` + loss. Default: None. + min_kept (int, optional): The minimum number of predictions to keep. + Default: 100000. + """ + + def __init__(self, context, thresh=None, min_kept=100000): + super(OHEMPixelSampler, self).__init__() + self.context = context + assert min_kept > 1 + self.thresh = thresh + self.min_kept = min_kept + + def sample(self, seg_logit, seg_label): + """Sample pixels that have high loss or with low prediction confidence. + + Args: + seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W) + seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W) + + Returns: + torch.Tensor: segmentation weight, shape (N, H, W) + """ + with torch.no_grad(): + assert seg_logit.shape[2:] == seg_label.shape[2:] + assert seg_label.shape[1] == 1 + seg_label = seg_label.squeeze(1).long() + batch_kept = self.min_kept * seg_label.size(0) + valid_mask = seg_label != self.context.ignore_index + seg_weight = seg_logit.new_zeros(size=seg_label.size()) + valid_seg_weight = seg_weight[valid_mask] + if self.thresh is not None: + seg_prob = F.softmax(seg_logit, dim=1) + + tmp_seg_label = seg_label.clone().unsqueeze(1) + tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0 + seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1) + sort_prob, sort_indices = seg_prob[valid_mask].sort() + + if sort_prob.numel() > 0: + min_threshold = sort_prob[min(batch_kept, + sort_prob.numel() - 1)] + else: + min_threshold = 0.0 + threshold = max(min_threshold, self.thresh) + valid_seg_weight[seg_prob[valid_mask] < threshold] = 1. + else: + if not isinstance(self.context.loss_decode, nn.ModuleList): + losses_decode = [self.context.loss_decode] + else: + losses_decode = self.context.loss_decode + losses = 0.0 + for loss_module in losses_decode: + losses += loss_module( + seg_logit, + seg_label, + weight=None, + ignore_index=self.context.ignore_index, + reduction_override='none') + + # faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa + _, sort_indices = losses[valid_mask].sort(descending=True) + valid_seg_weight[sort_indices[:batch_kept]] = 1. + + seg_weight[valid_mask] = valid_seg_weight + + return seg_weight diff --git a/downstream/mmsegmentation/mmseg/core/utils/__init__.py b/downstream/mmsegmentation/mmseg/core/utils/__init__.py new file mode 100644 index 0000000..cb5a0c3 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/utils/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dist_util import check_dist_init, sync_random_seed +from .layer_decay_optimizer_constructor import \ + LearningRateDecayOptimizerConstructor +from .misc import add_prefix + +__all__ = [ + 'add_prefix', 'LearningRateDecayOptimizerConstructor', 'check_dist_init', + 'sync_random_seed' +] diff --git a/downstream/mmsegmentation/mmseg/core/utils/dist_util.py b/downstream/mmsegmentation/mmseg/core/utils/dist_util.py new file mode 100644 index 0000000..b328851 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/utils/dist_util.py @@ -0,0 +1,46 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +import torch.distributed as dist +from mmcv.runner import get_dist_info + + +def check_dist_init(): + return dist.is_available() and dist.is_initialized() + + +def sync_random_seed(seed=None, device='cuda'): + """Make sure different ranks share the same seed. All workers must call + this function, otherwise it will deadlock. This method is generally used in + `DistributedSampler`, because the seed should be identical across all + processes in the distributed group. + + In distributed sampling, different ranks should sample non-overlapped + data in the dataset. Therefore, this function is used to make sure that + each rank shuffles the data indices in the same order based + on the same seed. Then different ranks could use different indices + to select non-overlapped data from the same data list. + + Args: + seed (int, Optional): The seed. Default to None. + device (str): The device where the seed will be put on. + Default to 'cuda'. + Returns: + int: Seed to be used. + """ + + if seed is None: + seed = np.random.randint(2**31) + assert isinstance(seed, int) + + rank, world_size = get_dist_info() + + if world_size == 1: + return seed + + if rank == 0: + random_num = torch.tensor(seed, dtype=torch.int32, device=device) + else: + random_num = torch.tensor(0, dtype=torch.int32, device=device) + dist.broadcast(random_num, src=0) + return random_num.item() diff --git a/downstream/mmsegmentation/mmseg/core/utils/layer_decay_optimizer_constructor.py b/downstream/mmsegmentation/mmseg/core/utils/layer_decay_optimizer_constructor.py new file mode 100644 index 0000000..2980487 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/utils/layer_decay_optimizer_constructor.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json + +from mmcv.runner import DefaultOptimizerConstructor, get_dist_info + +from mmseg.utils import get_root_logger +from ..builder import OPTIMIZER_BUILDERS + + +def get_num_layer_layer_wise(var_name, num_max_layer=12): + """Get the layer id to set the different learning rates in ``layer_wise`` + decay_type. + + Args: + var_name (str): The key of the model. + num_max_layer (int): Maximum number of backbone layers. + + Returns: + int: The id number corresponding to different learning rate in + ``LearningRateDecayOptimizerConstructor``. + """ + + if var_name in ('backbone.cls_token', 'backbone.mask_token', + 'backbone.pos_embed'): + return 0 + elif var_name.startswith('backbone.downsample_layers'): + stage_id = int(var_name.split('.')[2]) + if stage_id == 0: + layer_id = 0 + elif stage_id == 1: + layer_id = 2 + elif stage_id == 2: + layer_id = 3 + elif stage_id == 3: + layer_id = num_max_layer + return layer_id + elif var_name.startswith('backbone.stages'): + stage_id = int(var_name.split('.')[2]) + block_id = int(var_name.split('.')[3]) + if stage_id == 0: + layer_id = 1 + elif stage_id == 1: + layer_id = 2 + elif stage_id == 2: + layer_id = 3 + block_id // 3 + elif stage_id == 3: + layer_id = num_max_layer + return layer_id + else: + return num_max_layer + 1 + + +def get_num_layer_stage_wise(var_name, num_max_layer): + """Get the layer id to set the different learning rates in ``stage_wise`` + decay_type. + + Args: + var_name (str): The key of the model. + num_max_layer (int): Maximum number of backbone layers. + Returns: + int: The id number corresponding to different learning rate in + ``LearningRateDecayOptimizerConstructor``. + """ + + if var_name in ('backbone.cls_token', 'backbone.mask_token', + 'backbone.pos_embed'): + return 0 + elif var_name.startswith('backbone.downsample_layers'): + return 0 + elif var_name.startswith('backbone.stages'): + stage_id = int(var_name.split('.')[2]) + return stage_id + 1 + else: + return num_max_layer - 1 + + +@OPTIMIZER_BUILDERS.register_module() +class LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor): + """Different learning rates are set for different layers of backbone.""" + + def add_params(self, params, module): + """Add all parameters of module to the params list. + + The parameters of the given module will be added to the list of param + groups, with specific rules defined by paramwise_cfg. + + Args: + params (list[dict]): A list of param groups, it will be modified + in place. + module (nn.Module): The module to be added. + """ + logger = get_root_logger() + + parameter_groups = {} + logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}') + num_layers = self.paramwise_cfg.get('num_layers') + 2 + decay_rate = self.paramwise_cfg.get('decay_rate') + decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise') + logger.info('Build LearningRateDecayOptimizerConstructor ' + f'{decay_type} {decay_rate} - {num_layers}') + weight_decay = self.base_wd + + for name, param in module.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith('.bias') or name in ( + 'pos_embed', 'cls_token'): + group_name = 'no_decay' + this_weight_decay = 0. + else: + group_name = 'decay' + this_weight_decay = weight_decay + + if decay_type == 'layer_wise': + layer_id = get_num_layer_layer_wise( + name, self.paramwise_cfg.get('num_layers')) + logger.info(f'set param {name} as id {layer_id}') + elif decay_type == 'stage_wise': + layer_id = get_num_layer_stage_wise(name, num_layers) + logger.info(f'set param {name} as id {layer_id}') + group_name = f'layer_{layer_id}_{group_name}' + + if group_name not in parameter_groups: + scale = decay_rate**(num_layers - layer_id - 1) + + parameter_groups[group_name] = { + 'weight_decay': this_weight_decay, + 'params': [], + 'param_names': [], + 'lr_scale': scale, + 'group_name': group_name, + 'lr': scale * self.base_lr, + } + + parameter_groups[group_name]['params'].append(param) + parameter_groups[group_name]['param_names'].append(name) + rank, _ = get_dist_info() + if rank == 0: + to_display = {} + for key in parameter_groups: + to_display[key] = { + 'param_names': parameter_groups[key]['param_names'], + 'lr_scale': parameter_groups[key]['lr_scale'], + 'lr': parameter_groups[key]['lr'], + 'weight_decay': parameter_groups[key]['weight_decay'], + } + logger.info(f'Param groups = {json.dumps(to_display, indent=2)}') + params.extend(parameter_groups.values()) diff --git a/downstream/mmsegmentation/mmseg/core/utils/misc.py b/downstream/mmsegmentation/mmseg/core/utils/misc.py new file mode 100644 index 0000000..282bb8d --- /dev/null +++ b/downstream/mmsegmentation/mmseg/core/utils/misc.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +def add_prefix(inputs, prefix): + """Add prefix for dict. + + Args: + inputs (dict): The input dict with str keys. + prefix (str): The prefix to add. + + Returns: + + dict: The dict with keys updated with ``prefix``. + """ + + outputs = dict() + for name, value in inputs.items(): + outputs[f'{prefix}.{name}'] = value + + return outputs diff --git a/downstream/mmsegmentation/mmseg/datasets/__init__.py b/downstream/mmsegmentation/mmseg/datasets/__init__.py new file mode 100644 index 0000000..5d42a11 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/__init__.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .ade import ADE20KDataset +from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset +from .chase_db1 import ChaseDB1Dataset +from .cityscapes import CityscapesDataset +from .coco_stuff import COCOStuffDataset +from .custom import CustomDataset +from .dark_zurich import DarkZurichDataset +from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset, + RepeatDataset) +from .drive import DRIVEDataset +from .hrf import HRFDataset +from .isaid import iSAIDDataset +from .isprs import ISPRSDataset +from .loveda import LoveDADataset +from .night_driving import NightDrivingDataset +from .pascal_context import PascalContextDataset, PascalContextDataset59 +from .potsdam import PotsdamDataset +from .stare import STAREDataset +from .voc import PascalVOCDataset + +__all__ = [ + 'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', + 'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset', + 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset', + 'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset', + 'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset', + 'COCOStuffDataset', 'LoveDADataset', 'MultiImageMixDataset', + 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset' +] diff --git a/downstream/mmsegmentation/mmseg/datasets/ade.py b/downstream/mmsegmentation/mmseg/datasets/ade.py new file mode 100644 index 0000000..db94ceb --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/ade.py @@ -0,0 +1,167 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import mmcv +import numpy as np +from PIL import Image + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class ADE20KDataset(CustomDataset): + """ADE20K dataset. + + In segmentation map annotation for ADE20K, 0 stands for background, which + is not included in 150 categories. ``reduce_zero_label`` is fixed to True. + The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to + '.png'. + """ + CLASSES = ( + 'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', + 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', + 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', + 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', + 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', + 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', + 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', + 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', + 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', + 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', + 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', + 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', + 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', + 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', + 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', + 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', + 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', + 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', + 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', + 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', + 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', + 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', + 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', + 'clock', 'flag') + + PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], + [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], + [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], + [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], + [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], + [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], + [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], + [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], + [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], + [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], + [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], + [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], + [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], + [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], + [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], + [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], + [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], + [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], + [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], + [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], + [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], + [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], + [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], + [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], + [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], + [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], + [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], + [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], + [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], + [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], + [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], + [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], + [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], + [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], + [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], + [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], + [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], + [102, 255, 0], [92, 0, 255]] + + def __init__(self, **kwargs): + super(ADE20KDataset, self).__init__( + img_suffix='.jpg', + seg_map_suffix='.png', + reduce_zero_label=True, + **kwargs) + + def results2img(self, results, imgfile_prefix, to_label_id, indices=None): + """Write the segmentation results to images. + + Args: + results (list[ndarray]): Testing results of the + dataset. + imgfile_prefix (str): The filename prefix of the png files. + If the prefix is "somepath/xxx", + the png files will be named "somepath/xxx.png". + to_label_id (bool): whether convert output to label_id for + submission. + indices (list[int], optional): Indices of input results, if not + set, all the indices of the dataset will be used. + Default: None. + + Returns: + list[str: str]: result txt files which contains corresponding + semantic segmentation images. + """ + if indices is None: + indices = list(range(len(self))) + + mmcv.mkdir_or_exist(imgfile_prefix) + result_files = [] + for result, idx in zip(results, indices): + + filename = self.img_infos[idx]['filename'] + basename = osp.splitext(osp.basename(filename))[0] + + png_filename = osp.join(imgfile_prefix, f'{basename}.png') + + # The index range of official requirement is from 0 to 150. + # But the index range of output is from 0 to 149. + # That is because we set reduce_zero_label=True. + result = result + 1 + + output = Image.fromarray(result.astype(np.uint8)) + output.save(png_filename) + result_files.append(png_filename) + + return result_files + + def format_results(self, + results, + imgfile_prefix, + to_label_id=True, + indices=None): + """Format the results into dir (standard format for ade20k evaluation). + + Args: + results (list): Testing results of the dataset. + imgfile_prefix (str | None): The prefix of images files. It + includes the file path and the prefix of filename, e.g., + "a/b/prefix". + to_label_id (bool): whether convert output to label_id for + submission. Default: False + indices (list[int], optional): Indices of input results, if not + set, all the indices of the dataset will be used. + Default: None. + + Returns: + tuple: (result_files, tmp_dir), result_files is a list containing + the image paths, tmp_dir is the temporal directory created + for saving json/png files when img_prefix is not specified. + """ + + if indices is None: + indices = list(range(len(self))) + + assert isinstance(results, list), 'results must be a list.' + assert isinstance(indices, list), 'indices must be a list.' + + result_files = self.results2img(results, imgfile_prefix, to_label_id, + indices) + return result_files diff --git a/downstream/mmsegmentation/mmseg/datasets/builder.py b/downstream/mmsegmentation/mmseg/datasets/builder.py new file mode 100644 index 0000000..4d852d3 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/builder.py @@ -0,0 +1,191 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import platform +import random +from functools import partial + +import numpy as np +import torch +from mmcv.parallel import collate +from mmcv.runner import get_dist_info +from mmcv.utils import Registry, build_from_cfg, digit_version +from torch.utils.data import DataLoader + +from .samplers import DistributedSampler + +if platform.system() != 'Windows': + # https://github.com/pytorch/pytorch/issues/973 + import resource + rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) + base_soft_limit = rlimit[0] + hard_limit = rlimit[1] + soft_limit = min(max(4096, base_soft_limit), hard_limit) + resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) + +DATASETS = Registry('dataset') +PIPELINES = Registry('pipeline') + + +def _concat_dataset(cfg, default_args=None): + """Build :obj:`ConcatDataset by.""" + from .dataset_wrappers import ConcatDataset + img_dir = cfg['img_dir'] + ann_dir = cfg.get('ann_dir', None) + split = cfg.get('split', None) + # pop 'separate_eval' since it is not a valid key for common datasets. + separate_eval = cfg.pop('separate_eval', True) + num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1 + if ann_dir is not None: + num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1 + else: + num_ann_dir = 0 + if split is not None: + num_split = len(split) if isinstance(split, (list, tuple)) else 1 + else: + num_split = 0 + if num_img_dir > 1: + assert num_img_dir == num_ann_dir or num_ann_dir == 0 + assert num_img_dir == num_split or num_split == 0 + else: + assert num_split == num_ann_dir or num_ann_dir <= 1 + num_dset = max(num_split, num_img_dir) + + datasets = [] + for i in range(num_dset): + data_cfg = copy.deepcopy(cfg) + if isinstance(img_dir, (list, tuple)): + data_cfg['img_dir'] = img_dir[i] + if isinstance(ann_dir, (list, tuple)): + data_cfg['ann_dir'] = ann_dir[i] + if isinstance(split, (list, tuple)): + data_cfg['split'] = split[i] + datasets.append(build_dataset(data_cfg, default_args)) + + return ConcatDataset(datasets, separate_eval) + + +def build_dataset(cfg, default_args=None): + """Build datasets.""" + from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset, + RepeatDataset) + if isinstance(cfg, (list, tuple)): + dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) + elif cfg['type'] == 'RepeatDataset': + dataset = RepeatDataset( + build_dataset(cfg['dataset'], default_args), cfg['times']) + elif cfg['type'] == 'MultiImageMixDataset': + cp_cfg = copy.deepcopy(cfg) + cp_cfg['dataset'] = build_dataset(cp_cfg['dataset']) + cp_cfg.pop('type') + dataset = MultiImageMixDataset(**cp_cfg) + elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance( + cfg.get('split', None), (list, tuple)): + dataset = _concat_dataset(cfg, default_args) + else: + dataset = build_from_cfg(cfg, DATASETS, default_args) + + return dataset + + +def build_dataloader(dataset, + samples_per_gpu, + workers_per_gpu, + num_gpus=1, + dist=True, + shuffle=True, + seed=None, + drop_last=False, + pin_memory=True, + persistent_workers=True, + **kwargs): + """Build PyTorch DataLoader. + + In distributed training, each GPU/process has a dataloader. + In non-distributed training, there is only one dataloader for all GPUs. + + Args: + dataset (Dataset): A PyTorch dataset. + samples_per_gpu (int): Number of training samples on each GPU, i.e., + batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data loading + for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed training. + dist (bool): Distributed training/test or not. Default: True. + shuffle (bool): Whether to shuffle the data at every epoch. + Default: True. + seed (int | None): Seed to be used. Default: None. + drop_last (bool): Whether to drop the last incomplete batch in epoch. + Default: False + pin_memory (bool): Whether to use pin_memory in DataLoader. + Default: True + persistent_workers (bool): If True, the data loader will not shutdown + the worker processes after a dataset has been consumed once. + This allows to maintain the workers Dataset instances alive. + The argument also has effect in PyTorch>=1.7.0. + Default: True + kwargs: any keyword argument to be used to initialize DataLoader + + Returns: + DataLoader: A PyTorch dataloader. + """ + rank, world_size = get_dist_info() + if dist: + sampler = DistributedSampler( + dataset, world_size, rank, shuffle=shuffle, seed=seed) + shuffle = False + batch_size = samples_per_gpu + num_workers = workers_per_gpu + else: + sampler = None + batch_size = num_gpus * samples_per_gpu + num_workers = num_gpus * workers_per_gpu + + init_fn = partial( + worker_init_fn, num_workers=num_workers, rank=rank, + seed=seed) if seed is not None else None + + if digit_version(torch.__version__) >= digit_version('1.8.0'): + data_loader = DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + num_workers=num_workers, + collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), + pin_memory=pin_memory, + shuffle=shuffle, + worker_init_fn=init_fn, + drop_last=drop_last, + persistent_workers=persistent_workers, + **kwargs) + else: + data_loader = DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + num_workers=num_workers, + collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), + pin_memory=pin_memory, + shuffle=shuffle, + worker_init_fn=init_fn, + drop_last=drop_last, + **kwargs) + + return data_loader + + +def worker_init_fn(worker_id, num_workers, rank, seed): + """Worker init func for dataloader. + + The seed of each worker equals to num_worker * rank + worker_id + user_seed + + Args: + worker_id (int): Worker id. + num_workers (int): Number of workers. + rank (int): The rank of current process. + seed (int): The random seed to use. + """ + + worker_seed = num_workers * rank + worker_id + seed + np.random.seed(worker_seed) + random.seed(worker_seed) + torch.manual_seed(worker_seed) diff --git a/downstream/mmsegmentation/mmseg/datasets/chase_db1.py b/downstream/mmsegmentation/mmseg/datasets/chase_db1.py new file mode 100644 index 0000000..5cdc8d8 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/chase_db1.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class ChaseDB1Dataset(CustomDataset): + """Chase_db1 dataset. + + In segmentation map annotation for Chase_db1, 0 stands for background, + which is included in 2 categories. ``reduce_zero_label`` is fixed to False. + The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '_1stHO.png'. + """ + + CLASSES = ('background', 'vessel') + + PALETTE = [[120, 120, 120], [6, 230, 230]] + + def __init__(self, **kwargs): + super(ChaseDB1Dataset, self).__init__( + img_suffix='.png', + seg_map_suffix='_1stHO.png', + reduce_zero_label=False, + **kwargs) + assert self.file_client.exists(self.img_dir) diff --git a/downstream/mmsegmentation/mmseg/datasets/cityscapes.py b/downstream/mmsegmentation/mmseg/datasets/cityscapes.py new file mode 100644 index 0000000..ed633d0 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/cityscapes.py @@ -0,0 +1,214 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import mmcv +import numpy as np +from mmcv.utils import print_log +from PIL import Image + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class CityscapesDataset(CustomDataset): + """Cityscapes dataset. + + The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is + fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset. + """ + + CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', + 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', + 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', + 'bicycle') + + PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], + [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], + [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], + [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], + [0, 80, 100], [0, 0, 230], [119, 11, 32]] + + def __init__(self, + img_suffix='_leftImg8bit.png', + seg_map_suffix='_gtFine_labelTrainIds.png', + **kwargs): + super(CityscapesDataset, self).__init__( + img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs) + + @staticmethod + def _convert_to_label_id(result): + """Convert trainId to id for cityscapes.""" + if isinstance(result, str): + result = np.load(result) + import cityscapesscripts.helpers.labels as CSLabels + result_copy = result.copy() + for trainId, label in CSLabels.trainId2label.items(): + result_copy[result == trainId] = label.id + + return result_copy + + def results2img(self, results, imgfile_prefix, to_label_id, indices=None): + """Write the segmentation results to images. + + Args: + results (list[ndarray]): Testing results of the + dataset. + imgfile_prefix (str): The filename prefix of the png files. + If the prefix is "somepath/xxx", + the png files will be named "somepath/xxx.png". + to_label_id (bool): whether convert output to label_id for + submission. + indices (list[int], optional): Indices of input results, + if not set, all the indices of the dataset will be used. + Default: None. + + Returns: + list[str: str]: result txt files which contains corresponding + semantic segmentation images. + """ + if indices is None: + indices = list(range(len(self))) + + mmcv.mkdir_or_exist(imgfile_prefix) + result_files = [] + for result, idx in zip(results, indices): + if to_label_id: + result = self._convert_to_label_id(result) + filename = self.img_infos[idx]['filename'] + basename = osp.splitext(osp.basename(filename))[0] + + png_filename = osp.join(imgfile_prefix, f'{basename}.png') + + output = Image.fromarray(result.astype(np.uint8)).convert('P') + import cityscapesscripts.helpers.labels as CSLabels + palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8) + for label_id, label in CSLabels.id2label.items(): + palette[label_id] = label.color + + output.putpalette(palette) + output.save(png_filename) + result_files.append(png_filename) + + return result_files + + def format_results(self, + results, + imgfile_prefix, + to_label_id=True, + indices=None): + """Format the results into dir (standard format for Cityscapes + evaluation). + + Args: + results (list): Testing results of the dataset. + imgfile_prefix (str): The prefix of images files. It + includes the file path and the prefix of filename, e.g., + "a/b/prefix". + to_label_id (bool): whether convert output to label_id for + submission. Default: False + indices (list[int], optional): Indices of input results, + if not set, all the indices of the dataset will be used. + Default: None. + + Returns: + tuple: (result_files, tmp_dir), result_files is a list containing + the image paths, tmp_dir is the temporal directory created + for saving json/png files when img_prefix is not specified. + """ + if indices is None: + indices = list(range(len(self))) + + assert isinstance(results, list), 'results must be a list.' + assert isinstance(indices, list), 'indices must be a list.' + + result_files = self.results2img(results, imgfile_prefix, to_label_id, + indices) + + return result_files + + def evaluate(self, + results, + metric='mIoU', + logger=None, + imgfile_prefix=None): + """Evaluation in Cityscapes/default protocol. + + Args: + results (list): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | None | str): Logger used for printing + related information during evaluation. Default: None. + imgfile_prefix (str | None): The prefix of output image file, + for cityscapes evaluation only. It includes the file path and + the prefix of filename, e.g., "a/b/prefix". + If results are evaluated with cityscapes protocol, it would be + the prefix of output png files. The output files would be + png images under folder "a/b/prefix/xxx.png", where "xxx" is + the image name of cityscapes. If not specified, a temp file + will be created for evaluation. + Default: None. + + Returns: + dict[str, float]: Cityscapes/default metrics. + """ + + eval_results = dict() + metrics = metric.copy() if isinstance(metric, list) else [metric] + if 'cityscapes' in metrics: + eval_results.update( + self._evaluate_cityscapes(results, logger, imgfile_prefix)) + metrics.remove('cityscapes') + if len(metrics) > 0: + eval_results.update( + super(CityscapesDataset, + self).evaluate(results, metrics, logger)) + + return eval_results + + def _evaluate_cityscapes(self, results, logger, imgfile_prefix): + """Evaluation in Cityscapes protocol. + + Args: + results (list): Testing results of the dataset. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + imgfile_prefix (str | None): The prefix of output image file + + Returns: + dict[str: float]: Cityscapes evaluation results. + """ + try: + import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa + except ImportError: + raise ImportError('Please run "pip install cityscapesscripts" to ' + 'install cityscapesscripts first.') + msg = 'Evaluating in Cityscapes style' + if logger is None: + msg = '\n' + msg + print_log(msg, logger=logger) + + result_dir = imgfile_prefix + + eval_results = dict() + print_log(f'Evaluating results under {result_dir} ...', logger=logger) + + CSEval.args.evalInstLevelScore = True + CSEval.args.predictionPath = osp.abspath(result_dir) + CSEval.args.evalPixelAccuracy = True + CSEval.args.JSONOutput = False + + seg_map_list = [] + pred_list = [] + + # when evaluating with official cityscapesscripts, + # **_gtFine_labelIds.png is used + for seg_map in mmcv.scandir( + self.ann_dir, 'gtFine_labelIds.png', recursive=True): + seg_map_list.append(osp.join(self.ann_dir, seg_map)) + pred_list.append(CSEval.getPrediction(CSEval.args, seg_map)) + + eval_results.update( + CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args)) + + return eval_results diff --git a/downstream/mmsegmentation/mmseg/datasets/coco_stuff.py b/downstream/mmsegmentation/mmseg/datasets/coco_stuff.py new file mode 100644 index 0000000..24d0895 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/coco_stuff.py @@ -0,0 +1,94 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class COCOStuffDataset(CustomDataset): + """COCO-Stuff dataset. + + In segmentation map annotation for COCO-Stuff, Train-IDs of the 10k version + are from 1 to 171, where 0 is the ignore index, and Train-ID of COCO Stuff + 164k is from 0 to 170, where 255 is the ignore index. So, they are all 171 + semantic categories. ``reduce_zero_label`` is set to True and False for the + 10k and 164k versions, respectively. The ``img_suffix`` is fixed to '.jpg', + and ``seg_map_suffix`` is fixed to '.png'. + """ + CLASSES = ( + 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', + 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', + 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', + 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', + 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', + 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', + 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', + 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', + 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', + 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', + 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', + 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', + 'blanket', 'branch', 'bridge', 'building-other', 'bush', 'cabinet', + 'cage', 'cardboard', 'carpet', 'ceiling-other', 'ceiling-tile', + 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain', + 'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble', + 'floor-other', 'floor-stone', 'floor-tile', 'floor-wood', + 'flower', 'fog', 'food-other', 'fruit', 'furniture-other', 'grass', + 'gravel', 'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', + 'metal', 'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', + 'paper', 'pavement', 'pillow', 'plant-other', 'plastic', 'platform', + 'playingfield', 'railing', 'railroad', 'river', 'road', 'rock', 'roof', + 'rug', 'salad', 'sand', 'sea', 'shelf', 'sky-other', 'skyscraper', + 'snow', 'solid-other', 'stairs', 'stone', 'straw', 'structural-other', + 'table', 'tent', 'textile-other', 'towel', 'tree', 'vegetable', + 'wall-brick', 'wall-concrete', 'wall-other', 'wall-panel', + 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'waterdrops', + 'window-blind', 'window-other', 'wood') + + PALETTE = [[0, 192, 64], [0, 192, 64], [0, 64, 96], [128, 192, 192], + [0, 64, 64], [0, 192, 224], [0, 192, 192], [128, 192, 64], + [0, 192, 96], [128, 192, 64], [128, 32, 192], [0, 0, 224], + [0, 0, 64], [0, 160, 192], [128, 0, 96], [128, 0, 192], + [0, 32, 192], [128, 128, 224], [0, 0, 192], [128, 160, 192], + [128, 128, 0], [128, 0, 32], [128, 32, 0], [128, 0, 128], + [64, 128, 32], [0, 160, 0], [0, 0, 0], [192, 128, 160], + [0, 32, 0], [0, 128, 128], [64, 128, 160], [128, 160, 0], + [0, 128, 0], [192, 128, 32], [128, 96, 128], [0, 0, 128], + [64, 0, 32], [0, 224, 128], [128, 0, 0], [192, 0, 160], + [0, 96, 128], [128, 128, 128], [64, 0, 160], [128, 224, 128], + [128, 128, 64], [192, 0, 32], [128, 96, 0], [128, 0, 192], + [0, 128, 32], [64, 224, 0], [0, 0, 64], [128, 128, 160], + [64, 96, 0], [0, 128, 192], [0, 128, 160], [192, 224, 0], + [0, 128, 64], [128, 128, 32], [192, 32, 128], [0, 64, 192], + [0, 0, 32], [64, 160, 128], [128, 64, 64], [128, 0, 160], + [64, 32, 128], [128, 192, 192], [0, 0, 160], [192, 160, 128], + [128, 192, 0], [128, 0, 96], [192, 32, 0], [128, 64, 128], + [64, 128, 96], [64, 160, 0], [0, 64, 0], [192, 128, 224], + [64, 32, 0], [0, 192, 128], [64, 128, 224], [192, 160, 0], + [0, 192, 0], [192, 128, 96], [192, 96, 128], [0, 64, 128], + [64, 0, 96], [64, 224, 128], [128, 64, 0], [192, 0, 224], + [64, 96, 128], [128, 192, 128], [64, 0, 224], [192, 224, 128], + [128, 192, 64], [192, 0, 96], [192, 96, 0], [128, 64, 192], + [0, 128, 96], [0, 224, 0], [64, 64, 64], [128, 128, 224], + [0, 96, 0], [64, 192, 192], [0, 128, 224], [128, 224, 0], + [64, 192, 64], [128, 128, 96], [128, 32, 128], [64, 0, 192], + [0, 64, 96], [0, 160, 128], [192, 0, 64], [128, 64, 224], + [0, 32, 128], [192, 128, 192], [0, 64, 224], [128, 160, 128], + [192, 128, 0], [128, 64, 32], [128, 32, 64], [192, 0, 128], + [64, 192, 32], [0, 160, 64], [64, 0, 0], [192, 192, 160], + [0, 32, 64], [64, 128, 128], [64, 192, 160], [128, 160, 64], + [64, 128, 0], [192, 192, 32], [128, 96, 192], [64, 0, 128], + [64, 64, 32], [0, 224, 192], [192, 0, 0], [192, 64, 160], + [0, 96, 192], [192, 128, 128], [64, 64, 160], [128, 224, 192], + [192, 128, 64], [192, 64, 32], [128, 96, 64], [192, 0, 192], + [0, 192, 32], [64, 224, 64], [64, 0, 64], [128, 192, 160], + [64, 96, 64], [64, 128, 192], [0, 192, 160], [192, 224, 64], + [64, 128, 64], [128, 192, 32], [192, 32, 192], [64, 64, 192], + [0, 64, 32], [64, 160, 192], [192, 64, 64], [128, 64, 160], + [64, 32, 192], [192, 192, 192], [0, 64, 160], [192, 160, 192], + [192, 192, 0], [128, 64, 96], [192, 32, 64], [192, 64, 128], + [64, 192, 96], [64, 160, 64], [64, 64, 0]] + + def __init__(self, **kwargs): + super(COCOStuffDataset, self).__init__( + img_suffix='.jpg', seg_map_suffix='_labelTrainIds.png', **kwargs) diff --git a/downstream/mmsegmentation/mmseg/datasets/custom.py b/downstream/mmsegmentation/mmseg/datasets/custom.py new file mode 100644 index 0000000..4615d41 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/custom.py @@ -0,0 +1,487 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings +from collections import OrderedDict + +import mmcv +import numpy as np +from mmcv.utils import print_log +from prettytable import PrettyTable +from torch.utils.data import Dataset + +from mmseg.core import eval_metrics, intersect_and_union, pre_eval_to_metrics +from mmseg.utils import get_root_logger +from .builder import DATASETS +from .pipelines import Compose, LoadAnnotations + + +@DATASETS.register_module() +class CustomDataset(Dataset): + """Custom dataset for semantic segmentation. An example of file structure + is as followed. + + .. code-block:: none + + ├── data + │ ├── my_dataset + │ │ ├── img_dir + │ │ │ ├── train + │ │ │ │ ├── xxx{img_suffix} + │ │ │ │ ├── yyy{img_suffix} + │ │ │ │ ├── zzz{img_suffix} + │ │ │ ├── val + │ │ ├── ann_dir + │ │ │ ├── train + │ │ │ │ ├── xxx{seg_map_suffix} + │ │ │ │ ├── yyy{seg_map_suffix} + │ │ │ │ ├── zzz{seg_map_suffix} + │ │ │ ├── val + + The img/gt_semantic_seg pair of CustomDataset should be of the same + except suffix. A valid img/gt_semantic_seg filename pair should be like + ``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included + in the suffix). If split is given, then ``xxx`` is specified in txt file. + Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded. + Please refer to ``docs/en/tutorials/new_dataset.md`` for more details. + + + Args: + pipeline (list[dict]): Processing pipeline + img_dir (str): Path to image directory + img_suffix (str): Suffix of images. Default: '.jpg' + ann_dir (str, optional): Path to annotation directory. Default: None + seg_map_suffix (str): Suffix of segmentation maps. Default: '.png' + split (str, optional): Split txt file. If split is specified, only + file with suffix in the splits will be loaded. Otherwise, all + images in img_dir/ann_dir will be loaded. Default: None + data_root (str, optional): Data root for img_dir/ann_dir. Default: + None. + test_mode (bool): If test_mode=True, gt wouldn't be loaded. + ignore_index (int): The label index to be ignored. Default: 255 + reduce_zero_label (bool): Whether to mark label zero as ignored. + Default: False + classes (str | Sequence[str], optional): Specify classes to load. + If is None, ``cls.CLASSES`` will be used. Default: None. + palette (Sequence[Sequence[int]]] | np.ndarray | None): + The palette of segmentation map. If None is given, and + self.PALETTE is None, random palette will be generated. + Default: None + gt_seg_map_loader_cfg (dict, optional): build LoadAnnotations to + load gt for evaluation, load from disk by default. Default: None. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + CLASSES = None + + PALETTE = None + + def __init__(self, + pipeline, + img_dir, + img_suffix='.jpg', + ann_dir=None, + seg_map_suffix='.png', + split=None, + data_root=None, + test_mode=False, + ignore_index=255, + reduce_zero_label=False, + classes=None, + palette=None, + gt_seg_map_loader_cfg=None, + file_client_args=dict(backend='disk')): + self.pipeline = Compose(pipeline) + self.img_dir = img_dir + self.img_suffix = img_suffix + self.ann_dir = ann_dir + self.seg_map_suffix = seg_map_suffix + self.split = split + self.data_root = data_root + self.test_mode = test_mode + self.ignore_index = ignore_index + self.reduce_zero_label = reduce_zero_label + self.label_map = None + self.CLASSES, self.PALETTE = self.get_classes_and_palette( + classes, palette) + self.gt_seg_map_loader = LoadAnnotations( + ) if gt_seg_map_loader_cfg is None else LoadAnnotations( + **gt_seg_map_loader_cfg) + + self.file_client_args = file_client_args + self.file_client = mmcv.FileClient.infer_client(self.file_client_args) + + if test_mode: + assert self.CLASSES is not None, \ + '`cls.CLASSES` or `classes` should be specified when testing' + + # join paths if data_root is specified + if self.data_root is not None: + if not osp.isabs(self.img_dir): + self.img_dir = osp.join(self.data_root, self.img_dir) + if not (self.ann_dir is None or osp.isabs(self.ann_dir)): + self.ann_dir = osp.join(self.data_root, self.ann_dir) + if not (self.split is None or osp.isabs(self.split)): + self.split = osp.join(self.data_root, self.split) + + # load annotations + self.img_infos = self.load_annotations(self.img_dir, self.img_suffix, + self.ann_dir, + self.seg_map_suffix, self.split) + + def __len__(self): + """Total number of samples of data.""" + return len(self.img_infos) + + def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix, + split): + """Load annotation from directory. + + Args: + img_dir (str): Path to image directory + img_suffix (str): Suffix of images. + ann_dir (str|None): Path to annotation directory. + seg_map_suffix (str|None): Suffix of segmentation maps. + split (str|None): Split txt file. If split is specified, only file + with suffix in the splits will be loaded. Otherwise, all images + in img_dir/ann_dir will be loaded. Default: None + + Returns: + list[dict]: All image info of dataset. + """ + + img_infos = [] + if split is not None: + lines = mmcv.list_from_file( + split, file_client_args=self.file_client_args) + for line in lines: + img_name = line.strip() + img_info = dict(filename=img_name + img_suffix) + if ann_dir is not None: + seg_map = img_name + seg_map_suffix + img_info['ann'] = dict(seg_map=seg_map) + img_infos.append(img_info) + else: + for img in self.file_client.list_dir_or_file( + dir_path=img_dir, + list_dir=False, + suffix=img_suffix, + recursive=True): + img_info = dict(filename=img) + if ann_dir is not None: + seg_map = img.replace(img_suffix, seg_map_suffix) + img_info['ann'] = dict(seg_map=seg_map) + img_infos.append(img_info) + img_infos = sorted(img_infos, key=lambda x: x['filename']) + + print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger()) + return img_infos + + def get_ann_info(self, idx): + """Get annotation by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + + return self.img_infos[idx]['ann'] + + def pre_pipeline(self, results): + """Prepare results dict for pipeline.""" + results['seg_fields'] = [] + results['img_prefix'] = self.img_dir + results['seg_prefix'] = self.ann_dir + if self.custom_classes: + results['label_map'] = self.label_map + + def __getitem__(self, idx): + """Get training/test data after pipeline. + + Args: + idx (int): Index of data. + + Returns: + dict: Training/test data (with annotation if `test_mode` is set + False). + """ + + if self.test_mode: + return self.prepare_test_img(idx) + else: + return self.prepare_train_img(idx) + + def prepare_train_img(self, idx): + """Get training data and annotations after pipeline. + + Args: + idx (int): Index of data. + + Returns: + dict: Training data and annotation after pipeline with new keys + introduced by pipeline. + """ + + img_info = self.img_infos[idx] + ann_info = self.get_ann_info(idx) + results = dict(img_info=img_info, ann_info=ann_info) + self.pre_pipeline(results) + return self.pipeline(results) + + def prepare_test_img(self, idx): + """Get testing data after pipeline. + + Args: + idx (int): Index of data. + + Returns: + dict: Testing data after pipeline with new keys introduced by + pipeline. + """ + + img_info = self.img_infos[idx] + results = dict(img_info=img_info) + self.pre_pipeline(results) + return self.pipeline(results) + + def format_results(self, results, imgfile_prefix, indices=None, **kwargs): + """Place holder to format result to dataset specific output.""" + raise NotImplementedError + + def get_gt_seg_map_by_idx(self, index): + """Get one ground truth segmentation map for evaluation.""" + ann_info = self.get_ann_info(index) + results = dict(ann_info=ann_info) + self.pre_pipeline(results) + self.gt_seg_map_loader(results) + return results['gt_semantic_seg'] + + def get_gt_seg_maps(self, efficient_test=None): + """Get ground truth segmentation maps for evaluation.""" + if efficient_test is not None: + warnings.warn( + 'DeprecationWarning: ``efficient_test`` has been deprecated ' + 'since MMSeg v0.16, the ``get_gt_seg_maps()`` is CPU memory ' + 'friendly by default. ') + + for idx in range(len(self)): + ann_info = self.get_ann_info(idx) + results = dict(ann_info=ann_info) + self.pre_pipeline(results) + self.gt_seg_map_loader(results) + yield results['gt_semantic_seg'] + + def pre_eval(self, preds, indices): + """Collect eval result from each iteration. + + Args: + preds (list[torch.Tensor] | torch.Tensor): the segmentation logit + after argmax, shape (N, H, W). + indices (list[int] | int): the prediction related ground truth + indices. + + Returns: + list[torch.Tensor]: (area_intersect, area_union, area_prediction, + area_ground_truth). + """ + # In order to compat with batch inference + if not isinstance(indices, list): + indices = [indices] + if not isinstance(preds, list): + preds = [preds] + + pre_eval_results = [] + + for pred, index in zip(preds, indices): + seg_map = self.get_gt_seg_map_by_idx(index) + pre_eval_results.append( + intersect_and_union( + pred, + seg_map, + len(self.CLASSES), + self.ignore_index, + # as the labels has been converted when dataset initialized + # in `get_palette_for_custom_classes ` this `label_map` + # should be `dict()`, see + # https://github.com/open-mmlab/mmsegmentation/issues/1415 + # for more ditails + label_map=dict(), + reduce_zero_label=self.reduce_zero_label)) + + return pre_eval_results + + def get_classes_and_palette(self, classes=None, palette=None): + """Get class names of current dataset. + + Args: + classes (Sequence[str] | str | None): If classes is None, use + default CLASSES defined by builtin dataset. If classes is a + string, take it as a file name. The file contains the name of + classes where each line contains one class name. If classes is + a tuple or list, override the CLASSES defined by the dataset. + palette (Sequence[Sequence[int]]] | np.ndarray | None): + The palette of segmentation map. If None is given, random + palette will be generated. Default: None + """ + if classes is None: + self.custom_classes = False + return self.CLASSES, self.PALETTE + + self.custom_classes = True + if isinstance(classes, str): + # take it as a file path + class_names = mmcv.list_from_file(classes) + elif isinstance(classes, (tuple, list)): + class_names = classes + else: + raise ValueError(f'Unsupported type {type(classes)} of classes.') + + if self.CLASSES: + if not set(class_names).issubset(self.CLASSES): + raise ValueError('classes is not a subset of CLASSES.') + + # dictionary, its keys are the old label ids and its values + # are the new label ids. + # used for changing pixel labels in load_annotations. + self.label_map = {} + for i, c in enumerate(self.CLASSES): + if c not in class_names: + self.label_map[i] = -1 + else: + self.label_map[i] = class_names.index(c) + + palette = self.get_palette_for_custom_classes(class_names, palette) + + return class_names, palette + + def get_palette_for_custom_classes(self, class_names, palette=None): + + if self.label_map is not None: + # return subset of palette + palette = [] + for old_id, new_id in sorted( + self.label_map.items(), key=lambda x: x[1]): + if new_id != -1: + palette.append(self.PALETTE[old_id]) + palette = type(self.PALETTE)(palette) + + elif palette is None: + if self.PALETTE is None: + # Get random state before set seed, and restore + # random state later. + # It will prevent loss of randomness, as the palette + # may be different in each iteration if not specified. + # See: https://github.com/open-mmlab/mmdetection/issues/5844 + state = np.random.get_state() + np.random.seed(42) + # random palette + palette = np.random.randint(0, 255, size=(len(class_names), 3)) + np.random.set_state(state) + else: + palette = self.PALETTE + + return palette + + def evaluate(self, + results, + metric='mIoU', + logger=None, + gt_seg_maps=None, + **kwargs): + """Evaluate the dataset. + + Args: + results (list[tuple[torch.Tensor]] | list[str]): per image pre_eval + results or predict segmentation map for computing evaluation + metric. + metric (str | list[str]): Metrics to be evaluated. 'mIoU', + 'mDice' and 'mFscore' are supported. + logger (logging.Logger | None | str): Logger used for printing + related information during evaluation. Default: None. + gt_seg_maps (generator[ndarray]): Custom gt seg maps as input, + used in ConcatDataset + + Returns: + dict[str, float]: Default metrics. + """ + if isinstance(metric, str): + metric = [metric] + allowed_metrics = ['mIoU', 'mDice', 'mFscore'] + if not set(metric).issubset(set(allowed_metrics)): + raise KeyError('metric {} is not supported'.format(metric)) + + eval_results = {} + # test a list of files + if mmcv.is_list_of(results, np.ndarray) or mmcv.is_list_of( + results, str): + if gt_seg_maps is None: + gt_seg_maps = self.get_gt_seg_maps() + num_classes = len(self.CLASSES) + ret_metrics = eval_metrics( + results, + gt_seg_maps, + num_classes, + self.ignore_index, + metric, + label_map=dict(), + reduce_zero_label=self.reduce_zero_label) + # test a list of pre_eval_results + else: + ret_metrics = pre_eval_to_metrics(results, metric) + + # Because dataset.CLASSES is required for per-eval. + if self.CLASSES is None: + class_names = tuple(range(num_classes)) + else: + class_names = self.CLASSES + + # summary table + ret_metrics_summary = OrderedDict({ + ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2) + for ret_metric, ret_metric_value in ret_metrics.items() + }) + + # each class table + ret_metrics.pop('aAcc', None) + ret_metrics_class = OrderedDict({ + ret_metric: np.round(ret_metric_value * 100, 2) + for ret_metric, ret_metric_value in ret_metrics.items() + }) + ret_metrics_class.update({'Class': class_names}) + ret_metrics_class.move_to_end('Class', last=False) + + # for logger + class_table_data = PrettyTable() + for key, val in ret_metrics_class.items(): + class_table_data.add_column(key, val) + + summary_table_data = PrettyTable() + for key, val in ret_metrics_summary.items(): + if key == 'aAcc': + summary_table_data.add_column(key, [val]) + else: + summary_table_data.add_column('m' + key, [val]) + + print_log('per class results:', logger) + print_log('\n' + class_table_data.get_string(), logger=logger) + print_log('Summary:', logger) + print_log('\n' + summary_table_data.get_string(), logger=logger) + + # each metric dict + for key, value in ret_metrics_summary.items(): + if key == 'aAcc': + eval_results[key] = value / 100.0 + else: + eval_results['m' + key] = value / 100.0 + + ret_metrics_class.pop('Class', None) + for key, value in ret_metrics_class.items(): + eval_results.update({ + key + '.' + str(name): value[idx] / 100.0 + for idx, name in enumerate(class_names) + }) + + return eval_results diff --git a/downstream/mmsegmentation/mmseg/datasets/dark_zurich.py b/downstream/mmsegmentation/mmseg/datasets/dark_zurich.py new file mode 100644 index 0000000..0b6fda6 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/dark_zurich.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import DATASETS +from .cityscapes import CityscapesDataset + + +@DATASETS.register_module() +class DarkZurichDataset(CityscapesDataset): + """DarkZurichDataset dataset.""" + + def __init__(self, **kwargs): + super().__init__( + img_suffix='_rgb_anon.png', + seg_map_suffix='_gt_labelTrainIds.png', + **kwargs) diff --git a/downstream/mmsegmentation/mmseg/datasets/dataset_wrappers.py b/downstream/mmsegmentation/mmseg/datasets/dataset_wrappers.py new file mode 100644 index 0000000..1fb089f --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/dataset_wrappers.py @@ -0,0 +1,277 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import bisect +import collections +import copy +from itertools import chain + +import mmcv +import numpy as np +from mmcv.utils import build_from_cfg, print_log +from torch.utils.data.dataset import ConcatDataset as _ConcatDataset + +from .builder import DATASETS, PIPELINES +from .cityscapes import CityscapesDataset + + +@DATASETS.register_module() +class ConcatDataset(_ConcatDataset): + """A wrapper of concatenated dataset. + + Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but + support evaluation and formatting results + + Args: + datasets (list[:obj:`Dataset`]): A list of datasets. + separate_eval (bool): Whether to evaluate the concatenated + dataset results separately, Defaults to True. + """ + + def __init__(self, datasets, separate_eval=True): + super(ConcatDataset, self).__init__(datasets) + self.CLASSES = datasets[0].CLASSES + self.PALETTE = datasets[0].PALETTE + self.separate_eval = separate_eval + assert separate_eval in [True, False], \ + f'separate_eval can only be True or False,' \ + f'but get {separate_eval}' + if any([isinstance(ds, CityscapesDataset) for ds in datasets]): + raise NotImplementedError( + 'Evaluating ConcatDataset containing CityscapesDataset' + 'is not supported!') + + def evaluate(self, results, logger=None, **kwargs): + """Evaluate the results. + + Args: + results (list[tuple[torch.Tensor]] | list[str]]): per image + pre_eval results or predict segmentation map for + computing evaluation metric. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + + Returns: + dict[str: float]: evaluate results of the total dataset + or each separate + dataset if `self.separate_eval=True`. + """ + assert len(results) == self.cumulative_sizes[-1], \ + ('Dataset and results have different sizes: ' + f'{self.cumulative_sizes[-1]} v.s. {len(results)}') + + # Check whether all the datasets support evaluation + for dataset in self.datasets: + assert hasattr(dataset, 'evaluate'), \ + f'{type(dataset)} does not implement evaluate function' + + if self.separate_eval: + dataset_idx = -1 + total_eval_results = dict() + for size, dataset in zip(self.cumulative_sizes, self.datasets): + start_idx = 0 if dataset_idx == -1 else \ + self.cumulative_sizes[dataset_idx] + end_idx = self.cumulative_sizes[dataset_idx + 1] + + results_per_dataset = results[start_idx:end_idx] + print_log( + f'\nEvaluateing {dataset.img_dir} with ' + f'{len(results_per_dataset)} images now', + logger=logger) + + eval_results_per_dataset = dataset.evaluate( + results_per_dataset, logger=logger, **kwargs) + dataset_idx += 1 + for k, v in eval_results_per_dataset.items(): + total_eval_results.update({f'{dataset_idx}_{k}': v}) + + return total_eval_results + + if len(set([type(ds) for ds in self.datasets])) != 1: + raise NotImplementedError( + 'All the datasets should have same types when ' + 'self.separate_eval=False') + else: + if mmcv.is_list_of(results, np.ndarray) or mmcv.is_list_of( + results, str): + # merge the generators of gt_seg_maps + gt_seg_maps = chain( + *[dataset.get_gt_seg_maps() for dataset in self.datasets]) + else: + # if the results are `pre_eval` results, + # we do not need gt_seg_maps to evaluate + gt_seg_maps = None + eval_results = self.datasets[0].evaluate( + results, gt_seg_maps=gt_seg_maps, logger=logger, **kwargs) + return eval_results + + def get_dataset_idx_and_sample_idx(self, indice): + """Return dataset and sample index when given an indice of + ConcatDataset. + + Args: + indice (int): indice of sample in ConcatDataset + + Returns: + int: the index of sub dataset the sample belong to + int: the index of sample in its corresponding subset + """ + if indice < 0: + if -indice > len(self): + raise ValueError( + 'absolute value of index should not exceed dataset length') + indice = len(self) + indice + dataset_idx = bisect.bisect_right(self.cumulative_sizes, indice) + if dataset_idx == 0: + sample_idx = indice + else: + sample_idx = indice - self.cumulative_sizes[dataset_idx - 1] + return dataset_idx, sample_idx + + def format_results(self, results, imgfile_prefix, indices=None, **kwargs): + """format result for every sample of ConcatDataset.""" + if indices is None: + indices = list(range(len(self))) + + assert isinstance(results, list), 'results must be a list.' + assert isinstance(indices, list), 'indices must be a list.' + + ret_res = [] + for i, indice in enumerate(indices): + dataset_idx, sample_idx = self.get_dataset_idx_and_sample_idx( + indice) + res = self.datasets[dataset_idx].format_results( + [results[i]], + imgfile_prefix + f'/{dataset_idx}', + indices=[sample_idx], + **kwargs) + ret_res.append(res) + return sum(ret_res, []) + + def pre_eval(self, preds, indices): + """do pre eval for every sample of ConcatDataset.""" + # In order to compat with batch inference + if not isinstance(indices, list): + indices = [indices] + if not isinstance(preds, list): + preds = [preds] + ret_res = [] + for i, indice in enumerate(indices): + dataset_idx, sample_idx = self.get_dataset_idx_and_sample_idx( + indice) + res = self.datasets[dataset_idx].pre_eval(preds[i], sample_idx) + ret_res.append(res) + return sum(ret_res, []) + + +@DATASETS.register_module() +class RepeatDataset(object): + """A wrapper of repeated dataset. + + The length of repeated dataset will be `times` larger than the original + dataset. This is useful when the data loading time is long but the dataset + is small. Using RepeatDataset can reduce the data loading time between + epochs. + + Args: + dataset (:obj:`Dataset`): The dataset to be repeated. + times (int): Repeat times. + """ + + def __init__(self, dataset, times): + self.dataset = dataset + self.times = times + self.CLASSES = dataset.CLASSES + self.PALETTE = dataset.PALETTE + self._ori_len = len(self.dataset) + + def __getitem__(self, idx): + """Get item from original dataset.""" + return self.dataset[idx % self._ori_len] + + def __len__(self): + """The length is multiplied by ``times``""" + return self.times * self._ori_len + + +@DATASETS.register_module() +class MultiImageMixDataset: + """A wrapper of multiple images mixed dataset. + + Suitable for training on multiple images mixed data augmentation like + mosaic and mixup. For the augmentation pipeline of mixed image data, + the `get_indexes` method needs to be provided to obtain the image + indexes, and you can set `skip_flags` to change the pipeline running + process. + + + Args: + dataset (:obj:`CustomDataset`): The dataset to be mixed. + pipeline (Sequence[dict]): Sequence of transform object or + config dict to be composed. + skip_type_keys (list[str], optional): Sequence of type string to + be skip pipeline. Default to None. + """ + + def __init__(self, dataset, pipeline, skip_type_keys=None): + assert isinstance(pipeline, collections.abc.Sequence) + if skip_type_keys is not None: + assert all([ + isinstance(skip_type_key, str) + for skip_type_key in skip_type_keys + ]) + self._skip_type_keys = skip_type_keys + + self.pipeline = [] + self.pipeline_types = [] + for transform in pipeline: + if isinstance(transform, dict): + self.pipeline_types.append(transform['type']) + transform = build_from_cfg(transform, PIPELINES) + self.pipeline.append(transform) + else: + raise TypeError('pipeline must be a dict') + + self.dataset = dataset + self.CLASSES = dataset.CLASSES + self.PALETTE = dataset.PALETTE + self.num_samples = len(dataset) + + def __len__(self): + return self.num_samples + + def __getitem__(self, idx): + results = copy.deepcopy(self.dataset[idx]) + for (transform, transform_type) in zip(self.pipeline, + self.pipeline_types): + if self._skip_type_keys is not None and \ + transform_type in self._skip_type_keys: + continue + + if hasattr(transform, 'get_indexes'): + indexes = transform.get_indexes(self.dataset) + if not isinstance(indexes, collections.abc.Sequence): + indexes = [indexes] + mix_results = [ + copy.deepcopy(self.dataset[index]) for index in indexes + ] + results['mix_results'] = mix_results + + results = transform(results) + + if 'mix_results' in results: + results.pop('mix_results') + + return results + + def update_skip_type_keys(self, skip_type_keys): + """Update skip_type_keys. + + It is called by an external hook. + + Args: + skip_type_keys (list[str], optional): Sequence of type + string to be skip pipeline. + """ + assert all([ + isinstance(skip_type_key, str) for skip_type_key in skip_type_keys + ]) + self._skip_type_keys = skip_type_keys diff --git a/downstream/mmsegmentation/mmseg/datasets/drive.py b/downstream/mmsegmentation/mmseg/datasets/drive.py new file mode 100644 index 0000000..d44fb0d --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/drive.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class DRIVEDataset(CustomDataset): + """DRIVE dataset. + + In segmentation map annotation for DRIVE, 0 stands for background, which is + included in 2 categories. ``reduce_zero_label`` is fixed to False. The + ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '_manual1.png'. + """ + + CLASSES = ('background', 'vessel') + + PALETTE = [[120, 120, 120], [6, 230, 230]] + + def __init__(self, **kwargs): + super(DRIVEDataset, self).__init__( + img_suffix='.png', + seg_map_suffix='_manual1.png', + reduce_zero_label=False, + **kwargs) + assert self.file_client.exists(self.img_dir) diff --git a/downstream/mmsegmentation/mmseg/datasets/hrf.py b/downstream/mmsegmentation/mmseg/datasets/hrf.py new file mode 100644 index 0000000..cf3ea8d --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/hrf.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class HRFDataset(CustomDataset): + """HRF dataset. + + In segmentation map annotation for HRF, 0 stands for background, which is + included in 2 categories. ``reduce_zero_label`` is fixed to False. The + ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '.png'. + """ + + CLASSES = ('background', 'vessel') + + PALETTE = [[120, 120, 120], [6, 230, 230]] + + def __init__(self, **kwargs): + super(HRFDataset, self).__init__( + img_suffix='.png', + seg_map_suffix='.png', + reduce_zero_label=False, + **kwargs) + assert self.file_client.exists(self.img_dir) diff --git a/downstream/mmsegmentation/mmseg/datasets/isaid.py b/downstream/mmsegmentation/mmseg/datasets/isaid.py new file mode 100644 index 0000000..db24f93 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/isaid.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import mmcv +from mmcv.utils import print_log + +from ..utils import get_root_logger +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class iSAIDDataset(CustomDataset): + """ iSAID: A Large-scale Dataset for Instance Segmentation in Aerial Images + In segmentation map annotation for iSAID dataset, which is included + in 16 categories. ``reduce_zero_label`` is fixed to False. The + ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '_manual1.png'. + """ + + CLASSES = ('background', 'ship', 'store_tank', 'baseball_diamond', + 'tennis_court', 'basketball_court', 'Ground_Track_Field', + 'Bridge', 'Large_Vehicle', 'Small_Vehicle', 'Helicopter', + 'Swimming_pool', 'Roundabout', 'Soccer_ball_field', 'plane', + 'Harbor') + + PALETTE = [[0, 0, 0], [0, 0, 63], [0, 63, 63], [0, 63, 0], [0, 63, 127], + [0, 63, 191], [0, 63, 255], [0, 127, 63], [0, 127, 127], + [0, 0, 127], [0, 0, 191], [0, 0, 255], [0, 191, 127], + [0, 127, 191], [0, 127, 255], [0, 100, 155]] + + def __init__(self, **kwargs): + super(iSAIDDataset, self).__init__( + img_suffix='.png', + seg_map_suffix='.png', + ignore_index=255, + **kwargs) + assert self.file_client.exists(self.img_dir) + + def load_annotations(self, + img_dir, + img_suffix, + ann_dir, + seg_map_suffix=None, + split=None): + """Load annotation from directory. + + Args: + img_dir (str): Path to image directory + img_suffix (str): Suffix of images. + ann_dir (str|None): Path to annotation directory. + seg_map_suffix (str|None): Suffix of segmentation maps. + split (str|None): Split txt file. If split is specified, only file + with suffix in the splits will be loaded. Otherwise, all images + in img_dir/ann_dir will be loaded. Default: None + + Returns: + list[dict]: All image info of dataset. + """ + + img_infos = [] + if split is not None: + with open(split) as f: + for line in f: + name = line.strip() + img_info = dict(filename=name + img_suffix) + if ann_dir is not None: + ann_name = name + '_instance_color_RGB' + seg_map = ann_name + seg_map_suffix + img_info['ann'] = dict(seg_map=seg_map) + img_infos.append(img_info) + else: + for img in mmcv.scandir(img_dir, img_suffix, recursive=True): + img_info = dict(filename=img) + if ann_dir is not None: + seg_img = img + seg_map = seg_img.replace( + img_suffix, '_instance_color_RGB' + seg_map_suffix) + img_info['ann'] = dict(seg_map=seg_map) + img_infos.append(img_info) + + print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger()) + return img_infos diff --git a/downstream/mmsegmentation/mmseg/datasets/isprs.py b/downstream/mmsegmentation/mmseg/datasets/isprs.py new file mode 100644 index 0000000..5f23e1a --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/isprs.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class ISPRSDataset(CustomDataset): + """ISPRS dataset. + + In segmentation map annotation for LoveDA, 0 is the ignore index. + ``reduce_zero_label`` should be set to True. The ``img_suffix`` and + ``seg_map_suffix`` are both fixed to '.png'. + """ + CLASSES = ('impervious_surface', 'building', 'low_vegetation', 'tree', + 'car', 'clutter') + + PALETTE = [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], + [255, 255, 0], [255, 0, 0]] + + def __init__(self, **kwargs): + super(ISPRSDataset, self).__init__( + img_suffix='.png', + seg_map_suffix='.png', + reduce_zero_label=True, + **kwargs) diff --git a/downstream/mmsegmentation/mmseg/datasets/loveda.py b/downstream/mmsegmentation/mmseg/datasets/loveda.py new file mode 100644 index 0000000..90d654f --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/loveda.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import mmcv +import numpy as np +from PIL import Image + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class LoveDADataset(CustomDataset): + """LoveDA dataset. + + In segmentation map annotation for LoveDA, 0 is the ignore index. + ``reduce_zero_label`` should be set to True. The ``img_suffix`` and + ``seg_map_suffix`` are both fixed to '.png'. + """ + CLASSES = ('background', 'building', 'road', 'water', 'barren', 'forest', + 'agricultural') + + PALETTE = [[255, 255, 255], [255, 0, 0], [255, 255, 0], [0, 0, 255], + [159, 129, 183], [0, 255, 0], [255, 195, 128]] + + def __init__(self, **kwargs): + super(LoveDADataset, self).__init__( + img_suffix='.png', + seg_map_suffix='.png', + reduce_zero_label=True, + **kwargs) + + def results2img(self, results, imgfile_prefix, indices=None): + """Write the segmentation results to images. + + Args: + results (list[ndarray]): Testing results of the + dataset. + imgfile_prefix (str): The filename prefix of the png files. + If the prefix is "somepath/xxx", + the png files will be named "somepath/xxx.png". + indices (list[int], optional): Indices of input results, if not + set, all the indices of the dataset will be used. + Default: None. + + Returns: + list[str: str]: result txt files which contains corresponding + semantic segmentation images. + """ + + mmcv.mkdir_or_exist(imgfile_prefix) + result_files = [] + for result, idx in zip(results, indices): + + filename = self.img_infos[idx]['filename'] + basename = osp.splitext(osp.basename(filename))[0] + + png_filename = osp.join(imgfile_prefix, f'{basename}.png') + + # The index range of official requirement is from 0 to 6. + output = Image.fromarray(result.astype(np.uint8)) + output.save(png_filename) + result_files.append(png_filename) + + return result_files + + def format_results(self, results, imgfile_prefix, indices=None): + """Format the results into dir (standard format for LoveDA evaluation). + + Args: + results (list): Testing results of the dataset. + imgfile_prefix (str): The prefix of images files. It + includes the file path and the prefix of filename, e.g., + "a/b/prefix". + indices (list[int], optional): Indices of input results, + if not set, all the indices of the dataset will be used. + Default: None. + + Returns: + tuple: (result_files, tmp_dir), result_files is a list containing + the image paths, tmp_dir is the temporal directory created + for saving json/png files when img_prefix is not specified. + """ + if indices is None: + indices = list(range(len(self))) + + assert isinstance(results, list), 'results must be a list.' + assert isinstance(indices, list), 'indices must be a list.' + + result_files = self.results2img(results, imgfile_prefix, indices) + + return result_files diff --git a/downstream/mmsegmentation/mmseg/datasets/night_driving.py b/downstream/mmsegmentation/mmseg/datasets/night_driving.py new file mode 100644 index 0000000..6620586 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/night_driving.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import DATASETS +from .cityscapes import CityscapesDataset + + +@DATASETS.register_module() +class NightDrivingDataset(CityscapesDataset): + """NightDrivingDataset dataset.""" + + def __init__(self, **kwargs): + super().__init__( + img_suffix='_leftImg8bit.png', + seg_map_suffix='_gtCoarse_labelTrainIds.png', + **kwargs) diff --git a/downstream/mmsegmentation/mmseg/datasets/pascal_context.py b/downstream/mmsegmentation/mmseg/datasets/pascal_context.py new file mode 100644 index 0000000..efacee0 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/pascal_context.py @@ -0,0 +1,103 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class PascalContextDataset(CustomDataset): + """PascalContext dataset. + + In segmentation map annotation for PascalContext, 0 stands for background, + which is included in 60 categories. ``reduce_zero_label`` is fixed to + False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is + fixed to '.png'. + + Args: + split (str): Split txt file for PascalContext. + """ + + CLASSES = ('background', 'aeroplane', 'bag', 'bed', 'bedclothes', 'bench', + 'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus', + 'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth', + 'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence', + 'floor', 'flower', 'food', 'grass', 'ground', 'horse', + 'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person', + 'plate', 'platform', 'pottedplant', 'road', 'rock', 'sheep', + 'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table', + 'track', 'train', 'tree', 'truck', 'tvmonitor', 'wall', 'water', + 'window', 'wood') + + PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], + [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], + [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], + [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], + [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], + [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], + [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], + [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], + [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], + [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], + [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], + [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], + [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], + [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], + [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]] + + def __init__(self, split, **kwargs): + super(PascalContextDataset, self).__init__( + img_suffix='.jpg', + seg_map_suffix='.png', + split=split, + reduce_zero_label=False, + **kwargs) + assert self.file_client.exists(self.img_dir) and self.split is not None + + +@DATASETS.register_module() +class PascalContextDataset59(CustomDataset): + """PascalContext dataset. + + In segmentation map annotation for PascalContext, 0 stands for background, + which is included in 60 categories. ``reduce_zero_label`` is fixed to + False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is + fixed to '.png'. + + Args: + split (str): Split txt file for PascalContext. + """ + + CLASSES = ('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle', + 'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet', + 'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow', + 'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower', + 'food', 'grass', 'ground', 'horse', 'keyboard', 'light', + 'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform', + 'pottedplant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk', + 'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train', + 'tree', 'truck', 'tvmonitor', 'wall', 'water', 'window', 'wood') + + PALETTE = [[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], + [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], + [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], + [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], + [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], + [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], + [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], + [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], + [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], + [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], + [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], + [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], + [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], + [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], + [0, 235, 255], [0, 173, 255], [31, 0, 255]] + + def __init__(self, split, **kwargs): + super(PascalContextDataset59, self).__init__( + img_suffix='.jpg', + seg_map_suffix='.png', + split=split, + reduce_zero_label=True, + **kwargs) + assert self.file_client.exists(self.img_dir) and self.split is not None diff --git a/downstream/mmsegmentation/mmseg/datasets/pipelines/__init__.py b/downstream/mmsegmentation/mmseg/datasets/pipelines/__init__.py new file mode 100644 index 0000000..2317d92 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/pipelines/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .compose import Compose +from .formatting import (Collect, ImageToTensor, ToDataContainer, ToTensor, + Transpose, to_tensor) +from .loading import LoadAnnotations, LoadImageFromFile +from .test_time_aug import MultiScaleFlipAug +from .transforms import (AlignedResize, CLAHE, AdjustGamma, Normalize, Pad, + PhotoMetricDistortion, RandomCrop, RandomCutOut, + RandomFlip, RandomMosaic, RandomRotate, Rerange, + Resize, RGB2Gray, SegRescale) + +__all__ = [ + 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', + 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile', + 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', + 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate', + 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray', 'RandomCutOut', + 'RandomMosaic', 'AlignedResize' +] diff --git a/downstream/mmsegmentation/mmseg/datasets/pipelines/compose.py b/downstream/mmsegmentation/mmseg/datasets/pipelines/compose.py new file mode 100644 index 0000000..30280c1 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/pipelines/compose.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import collections + +from mmcv.utils import build_from_cfg + +from ..builder import PIPELINES + + +@PIPELINES.register_module() +class Compose(object): + """Compose multiple transforms sequentially. + + Args: + transforms (Sequence[dict | callable]): Sequence of transform object or + config dict to be composed. + """ + + def __init__(self, transforms): + assert isinstance(transforms, collections.abc.Sequence) + self.transforms = [] + for transform in transforms: + if isinstance(transform, dict): + transform = build_from_cfg(transform, PIPELINES) + self.transforms.append(transform) + elif callable(transform): + self.transforms.append(transform) + else: + raise TypeError('transform must be callable or a dict') + + def __call__(self, data): + """Call function to apply transforms sequentially. + + Args: + data (dict): A result dict contains the data to transform. + + Returns: + dict: Transformed data. + """ + + for t in self.transforms: + data = t(data) + if data is None: + return None + return data + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += '\n' + format_string += f' {t}' + format_string += '\n)' + return format_string diff --git a/downstream/mmsegmentation/mmseg/datasets/pipelines/formating.py b/downstream/mmsegmentation/mmseg/datasets/pipelines/formating.py new file mode 100644 index 0000000..f6e53bf --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/pipelines/formating.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# flake8: noqa +import warnings + +from .formatting import * + +warnings.warn('DeprecationWarning: mmseg.datasets.pipelines.formating will be ' + 'deprecated in 2021, please replace it with ' + 'mmseg.datasets.pipelines.formatting.') diff --git a/downstream/mmsegmentation/mmseg/datasets/pipelines/formatting.py b/downstream/mmsegmentation/mmseg/datasets/pipelines/formatting.py new file mode 100644 index 0000000..4e057c1 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/pipelines/formatting.py @@ -0,0 +1,289 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections.abc import Sequence + +import mmcv +import numpy as np +import torch +from mmcv.parallel import DataContainer as DC + +from ..builder import PIPELINES + + +def to_tensor(data): + """Convert objects of various python types to :obj:`torch.Tensor`. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`, :class:`int` and :class:`float`. + + Args: + data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to + be converted. + """ + + if isinstance(data, torch.Tensor): + return data + elif isinstance(data, np.ndarray): + return torch.from_numpy(data) + elif isinstance(data, Sequence) and not mmcv.is_str(data): + return torch.tensor(data) + elif isinstance(data, int): + return torch.LongTensor([data]) + elif isinstance(data, float): + return torch.FloatTensor([data]) + else: + raise TypeError(f'type {type(data)} cannot be converted to tensor.') + + +@PIPELINES.register_module() +class ToTensor(object): + """Convert some results to :obj:`torch.Tensor` by given keys. + + Args: + keys (Sequence[str]): Keys that need to be converted to Tensor. + """ + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + """Call function to convert data in results to :obj:`torch.Tensor`. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data converted + to :obj:`torch.Tensor`. + """ + + for key in self.keys: + results[key] = to_tensor(results[key]) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(keys={self.keys})' + + +@PIPELINES.register_module() +class ImageToTensor(object): + """Convert image to :obj:`torch.Tensor` by given keys. + + The dimension order of input image is (H, W, C). The pipeline will convert + it to (C, H, W). If only 2 dimension (H, W) is given, the output would be + (1, H, W). + + Args: + keys (Sequence[str]): Key of images to be converted to Tensor. + """ + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + """Call function to convert image in results to :obj:`torch.Tensor` and + transpose the channel order. + + Args: + results (dict): Result dict contains the image data to convert. + + Returns: + dict: The result dict contains the image converted + to :obj:`torch.Tensor` and transposed to (C, H, W) order. + """ + + for key in self.keys: + img = results[key] + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + results[key] = to_tensor(img.transpose(2, 0, 1)) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(keys={self.keys})' + + +@PIPELINES.register_module() +class Transpose(object): + """Transpose some results by given keys. + + Args: + keys (Sequence[str]): Keys of results to be transposed. + order (Sequence[int]): Order of transpose. + """ + + def __init__(self, keys, order): + self.keys = keys + self.order = order + + def __call__(self, results): + """Call function to convert image in results to :obj:`torch.Tensor` and + transpose the channel order. + + Args: + results (dict): Result dict contains the image data to convert. + + Returns: + dict: The result dict contains the image converted + to :obj:`torch.Tensor` and transposed to (C, H, W) order. + """ + + for key in self.keys: + results[key] = results[key].transpose(self.order) + return results + + def __repr__(self): + return self.__class__.__name__ + \ + f'(keys={self.keys}, order={self.order})' + + +@PIPELINES.register_module() +class ToDataContainer(object): + """Convert results to :obj:`mmcv.DataContainer` by given fields. + + Args: + fields (Sequence[dict]): Each field is a dict like + ``dict(key='xxx', **kwargs)``. The ``key`` in result will + be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. + Default: ``(dict(key='img', stack=True), + dict(key='gt_semantic_seg'))``. + """ + + def __init__(self, + fields=(dict(key='img', + stack=True), dict(key='gt_semantic_seg'))): + self.fields = fields + + def __call__(self, results): + """Call function to convert data in results to + :obj:`mmcv.DataContainer`. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data converted to + :obj:`mmcv.DataContainer`. + """ + + for field in self.fields: + field = field.copy() + key = field.pop('key') + results[key] = DC(results[key], **field) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(fields={self.fields})' + + +@PIPELINES.register_module() +class DefaultFormatBundle(object): + """Default formatting bundle. + + It simplifies the pipeline of formatting common fields, including "img" + and "gt_semantic_seg". These fields are formatted as follows. + + - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) + - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, + (3)to DataContainer (stack=True) + """ + + def __call__(self, results): + """Call function to transform and format common fields in results. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data that is formatted with + default bundle. + """ + + if 'img' in results: + img = results['img'] + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + img = np.ascontiguousarray(img.transpose(2, 0, 1)) + results['img'] = DC(to_tensor(img), stack=True) + if 'gt_semantic_seg' in results: + # convert to long + results['gt_semantic_seg'] = DC( + to_tensor(results['gt_semantic_seg'][None, + ...].astype(np.int64)), + stack=True) + return results + + def __repr__(self): + return self.__class__.__name__ + + +@PIPELINES.register_module() +class Collect(object): + """Collect data from the loader relevant to the specific task. + + This is usually the last stage of the data loader pipeline. Typically keys + is set to some subset of "img", "gt_semantic_seg". + + The "img_meta" item is always populated. The contents of the "img_meta" + dictionary depends on "meta_keys". By default this includes: + + - "img_shape": shape of the image input to the network as a tuple + (h, w, c). Note that images may be zero padded on the bottom/right + if the batch tensor is larger than this shape. + + - "scale_factor": a float indicating the preprocessing scale + + - "flip": a boolean indicating if image flip transform was used + + - "filename": path to the image file + + - "ori_shape": original shape of the image as a tuple (h, w, c) + + - "pad_shape": image shape after padding + + - "img_norm_cfg": a dict of normalization information: + - mean - per channel mean subtraction + - std - per channel std divisor + - to_rgb - bool indicating if bgr was converted to rgb + + Args: + keys (Sequence[str]): Keys of results to be collected in ``data``. + meta_keys (Sequence[str], optional): Meta keys to be converted to + ``mmcv.DataContainer`` and collected in ``data[img_metas]``. + Default: (``filename``, ``ori_filename``, ``ori_shape``, + ``img_shape``, ``pad_shape``, ``scale_factor``, ``flip``, + ``flip_direction``, ``img_norm_cfg``) + """ + + def __init__(self, + keys, + meta_keys=('filename', 'ori_filename', 'ori_shape', + 'img_shape', 'pad_shape', 'scale_factor', 'flip', + 'flip_direction', 'img_norm_cfg')): + self.keys = keys + self.meta_keys = meta_keys + + def __call__(self, results): + """Call function to collect keys in results. The keys in ``meta_keys`` + will be converted to :obj:mmcv.DataContainer. + + Args: + results (dict): Result dict contains the data to collect. + + Returns: + dict: The result dict contains the following keys + - keys in``self.keys`` + - ``img_metas`` + """ + + data = {} + img_meta = {} + for key in self.meta_keys: + img_meta[key] = results[key] + data['img_metas'] = DC(img_meta, cpu_only=True) + for key in self.keys: + data[key] = results[key] + return data + + def __repr__(self): + return self.__class__.__name__ + \ + f'(keys={self.keys}, meta_keys={self.meta_keys})' diff --git a/downstream/mmsegmentation/mmseg/datasets/pipelines/loading.py b/downstream/mmsegmentation/mmseg/datasets/pipelines/loading.py new file mode 100644 index 0000000..572e434 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/pipelines/loading.py @@ -0,0 +1,158 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import mmcv +import numpy as np + +from ..builder import PIPELINES + + +@PIPELINES.register_module() +class LoadImageFromFile(object): + """Load an image from file. + + Required keys are "img_prefix" and "img_info" (a dict that must contain the + key "filename"). Added or updated keys are "filename", "img", "img_shape", + "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), + "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). + + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + color_type (str): The flag argument for :func:`mmcv.imfrombytes`. + Defaults to 'color'. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default: + 'cv2' + """ + + def __init__(self, + to_float32=False, + color_type='color', + file_client_args=dict(backend='disk'), + imdecode_backend='cv2'): + self.to_float32 = to_float32 + self.color_type = color_type + self.file_client_args = file_client_args.copy() + self.file_client = None + self.imdecode_backend = imdecode_backend + + def __call__(self, results): + """Call functions to load image and get image meta information. + + Args: + results (dict): Result dict from :obj:`mmseg.CustomDataset`. + + Returns: + dict: The dict contains loaded image and meta information. + """ + + if self.file_client is None: + self.file_client = mmcv.FileClient(**self.file_client_args) + + if results.get('img_prefix') is not None: + filename = osp.join(results['img_prefix'], + results['img_info']['filename']) + else: + filename = results['img_info']['filename'] + img_bytes = self.file_client.get(filename) + img = mmcv.imfrombytes( + img_bytes, flag=self.color_type, backend=self.imdecode_backend) + if self.to_float32: + img = img.astype(np.float32) + + results['filename'] = filename + results['ori_filename'] = results['img_info']['filename'] + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results['img_norm_cfg'] = dict( + mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(to_float32={self.to_float32},' + repr_str += f"color_type='{self.color_type}'," + repr_str += f"imdecode_backend='{self.imdecode_backend}')" + return repr_str + + +@PIPELINES.register_module() +class LoadAnnotations(object): + """Load annotations for semantic segmentation. + + Args: + reduce_zero_label (bool): Whether reduce all label value by 1. + Usually used for datasets where 0 is background label. + Default: False. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default: + 'pillow' + """ + + def __init__(self, + reduce_zero_label=False, + file_client_args=dict(backend='disk'), + imdecode_backend='pillow'): + self.reduce_zero_label = reduce_zero_label + self.file_client_args = file_client_args.copy() + self.file_client = None + self.imdecode_backend = imdecode_backend + + def __call__(self, results): + """Call function to load multiple types annotations. + + Args: + results (dict): Result dict from :obj:`mmseg.CustomDataset`. + + Returns: + dict: The dict contains loaded semantic segmentation annotations. + """ + + if self.file_client is None: + self.file_client = mmcv.FileClient(**self.file_client_args) + + if results.get('seg_prefix', None) is not None: + filename = osp.join(results['seg_prefix'], + results['ann_info']['seg_map']) + else: + filename = results['ann_info']['seg_map'] + img_bytes = self.file_client.get(filename) + gt_semantic_seg = mmcv.imfrombytes( + img_bytes, flag='unchanged', + backend=self.imdecode_backend).squeeze().astype(np.uint8) + # modify if custom classes + if results.get('label_map', None) is not None: + # Add deep copy to solve bug of repeatedly + # replace `gt_semantic_seg`, which is reported in + # https://github.com/open-mmlab/mmsegmentation/pull/1445/ + gt_semantic_seg_copy = gt_semantic_seg.copy() + for old_id, new_id in results['label_map'].items(): + gt_semantic_seg[gt_semantic_seg_copy == old_id] = new_id + # reduce zero_label + if self.reduce_zero_label: + # avoid using underflow conversion + gt_semantic_seg[gt_semantic_seg == 0] = 255 + gt_semantic_seg = gt_semantic_seg - 1 + gt_semantic_seg[gt_semantic_seg == 254] = 255 + results['gt_semantic_seg'] = gt_semantic_seg + results['seg_fields'].append('gt_semantic_seg') + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(reduce_zero_label={self.reduce_zero_label},' + repr_str += f"imdecode_backend='{self.imdecode_backend}')" + return repr_str diff --git a/downstream/mmsegmentation/mmseg/datasets/pipelines/test_time_aug.py b/downstream/mmsegmentation/mmseg/datasets/pipelines/test_time_aug.py new file mode 100644 index 0000000..5c17cbb --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/pipelines/test_time_aug.py @@ -0,0 +1,134 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import mmcv + +from ..builder import PIPELINES +from .compose import Compose + + +@PIPELINES.register_module() +class MultiScaleFlipAug(object): + """Test-time augmentation with multiple scales and flipping. + + An example configuration is as followed: + + .. code-block:: + + img_scale=(2048, 1024), + img_ratios=[0.5, 1.0], + flip=True, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ] + + After MultiScaleFLipAug with above configuration, the results are wrapped + into lists of the same length as followed: + + .. code-block:: + + dict( + img=[...], + img_shape=[...], + scale=[(1024, 512), (1024, 512), (2048, 1024), (2048, 1024)] + flip=[False, True, False, True] + ... + ) + + Args: + transforms (list[dict]): Transforms to apply in each augmentation. + img_scale (None | tuple | list[tuple]): Images scales for resizing. + img_ratios (float | list[float]): Image ratios for resizing + flip (bool): Whether apply flip augmentation. Default: False. + flip_direction (str | list[str]): Flip augmentation directions, + options are "horizontal" and "vertical". If flip_direction is list, + multiple flip augmentations will be applied. + It has no effect when flip == False. Default: "horizontal". + """ + + def __init__(self, + transforms, + img_scale, + img_ratios=None, + flip=False, + flip_direction='horizontal'): + self.transforms = Compose(transforms) + if img_ratios is not None: + img_ratios = img_ratios if isinstance(img_ratios, + list) else [img_ratios] + assert mmcv.is_list_of(img_ratios, float) + if img_scale is None: + # mode 1: given img_scale=None and a range of image ratio + self.img_scale = None + assert mmcv.is_list_of(img_ratios, float) + elif isinstance(img_scale, tuple) and mmcv.is_list_of( + img_ratios, float): + assert len(img_scale) == 2 + # mode 2: given a scale and a range of image ratio + self.img_scale = [(int(img_scale[0] * ratio), + int(img_scale[1] * ratio)) + for ratio in img_ratios] + else: + # mode 3: given multiple scales + self.img_scale = img_scale if isinstance(img_scale, + list) else [img_scale] + assert mmcv.is_list_of(self.img_scale, tuple) or self.img_scale is None + self.flip = flip + self.img_ratios = img_ratios + self.flip_direction = flip_direction if isinstance( + flip_direction, list) else [flip_direction] + assert mmcv.is_list_of(self.flip_direction, str) + if not self.flip and self.flip_direction != ['horizontal']: + warnings.warn( + 'flip_direction has no effect when flip is set to False') + if (self.flip + and not any([t['type'] == 'RandomFlip' for t in transforms])): + warnings.warn( + 'flip has no effect when RandomFlip is not in transforms') + + def __call__(self, results): + """Call function to apply test time augment transforms on results. + + Args: + results (dict): Result dict contains the data to transform. + + Returns: + dict[str: list]: The augmented data, where each value is wrapped + into a list. + """ + + aug_data = [] + if self.img_scale is None and mmcv.is_list_of(self.img_ratios, float): + h, w = results['img'].shape[:2] + img_scale = [(int(w * ratio), int(h * ratio)) + for ratio in self.img_ratios] + else: + img_scale = self.img_scale + flip_aug = [False, True] if self.flip else [False] + for scale in img_scale: + for flip in flip_aug: + for direction in self.flip_direction: + _results = results.copy() + _results['scale'] = scale + _results['flip'] = flip + _results['flip_direction'] = direction + data = self.transforms(_results) + aug_data.append(data) + # list of dict to dict of list + aug_data_dict = {key: [] for key in aug_data[0]} + for data in aug_data: + for key, val in data.items(): + aug_data_dict[key].append(val) + return aug_data_dict + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(transforms={self.transforms}, ' + repr_str += f'img_scale={self.img_scale}, flip={self.flip})' + repr_str += f'flip_direction={self.flip_direction}' + return repr_str diff --git a/downstream/mmsegmentation/mmseg/datasets/pipelines/transforms.py b/downstream/mmsegmentation/mmseg/datasets/pipelines/transforms.py new file mode 100644 index 0000000..4fdfaef --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/pipelines/transforms.py @@ -0,0 +1,1546 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import mmcv +import numpy as np +from mmcv.utils import deprecated_api_warning, is_tuple_of +from numpy import random + +from ..builder import PIPELINES + +@PIPELINES.register_module() +class AlignedResize(object): + """Resize images & seg. Align + """ + + def __init__(self, + img_scale=None, + multiscale_mode='range', + ratio_range=None, + keep_ratio=True, + size_divisor=32): + if img_scale is None: + self.img_scale = None + else: + if isinstance(img_scale, list): + self.img_scale = img_scale + else: + self.img_scale = [img_scale] + assert mmcv.is_list_of(self.img_scale, tuple) + + if ratio_range is not None: + # mode 1: given img_scale=None and a range of image ratio + # mode 2: given a scale and a range of image ratio + assert self.img_scale is None or len(self.img_scale) == 1 + else: + # mode 3 and 4: given multiple scales or a range of scales + assert multiscale_mode in ['value', 'range'] + + self.multiscale_mode = multiscale_mode + self.ratio_range = ratio_range + self.keep_ratio = keep_ratio + self.size_divisor = size_divisor + + @staticmethod + def random_select(img_scales): + """Randomly select an img_scale from given candidates. + Args: + img_scales (list[tuple]): Images scales for selection. + Returns: + (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, + where ``img_scale`` is the selected image scale and + ``scale_idx`` is the selected index in the given candidates. + """ + + assert mmcv.is_list_of(img_scales, tuple) + scale_idx = np.random.randint(len(img_scales)) + img_scale = img_scales[scale_idx] + return img_scale, scale_idx + + @staticmethod + def random_sample(img_scales): + """Randomly sample an img_scale when ``multiscale_mode=='range'``. + Args: + img_scales (list[tuple]): Images scale range for sampling. + There must be two tuples in img_scales, which specify the lower + and uper bound of image scales. + Returns: + (tuple, None): Returns a tuple ``(img_scale, None)``, where + ``img_scale`` is sampled scale and None is just a placeholder + to be consistent with :func:`random_select`. + """ + + assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2 + img_scale_long = [max(s) for s in img_scales] + img_scale_short = [min(s) for s in img_scales] + long_edge = np.random.randint( + min(img_scale_long), + max(img_scale_long) + 1) + short_edge = np.random.randint( + min(img_scale_short), + max(img_scale_short) + 1) + img_scale = (long_edge, short_edge) + return img_scale, None + + @staticmethod + def random_sample_ratio(img_scale, ratio_range): + """Randomly sample an img_scale when ``ratio_range`` is specified. + A ratio will be randomly sampled from the range specified by + ``ratio_range``. Then it would be multiplied with ``img_scale`` to + generate sampled scale. + Args: + img_scale (tuple): Images scale base to multiply with ratio. + ratio_range (tuple[float]): The minimum and maximum ratio to scale + the ``img_scale``. + Returns: + (tuple, None): Returns a tuple ``(scale, None)``, where + ``scale`` is sampled ratio multiplied with ``img_scale`` and + None is just a placeholder to be consistent with + :func:`random_select`. + """ + + assert isinstance(img_scale, tuple) and len(img_scale) == 2 + min_ratio, max_ratio = ratio_range + assert min_ratio <= max_ratio + ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio + scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio) + return scale, None + + def _random_scale(self, results): + """Randomly sample an img_scale according to ``ratio_range`` and + ``multiscale_mode``. + If ``ratio_range`` is specified, a ratio will be sampled and be + multiplied with ``img_scale``. + If multiple scales are specified by ``img_scale``, a scale will be + sampled according to ``multiscale_mode``. + Otherwise, single scale will be used. + Args: + results (dict): Result dict from :obj:`dataset`. + Returns: + dict: Two new keys 'scale` and 'scale_idx` are added into + ``results``, which would be used by subsequent pipelines. + """ + + if self.ratio_range is not None: + if self.img_scale is None: + h, w = results['img'].shape[:2] + scale, scale_idx = self.random_sample_ratio((w, h), + self.ratio_range) + else: + scale, scale_idx = self.random_sample_ratio( + self.img_scale[0], self.ratio_range) + elif len(self.img_scale) == 1: + scale, scale_idx = self.img_scale[0], 0 + elif self.multiscale_mode == 'range': + scale, scale_idx = self.random_sample(self.img_scale) + elif self.multiscale_mode == 'value': + scale, scale_idx = self.random_select(self.img_scale) + else: + raise NotImplementedError + + results['scale'] = scale + results['scale_idx'] = scale_idx + + def _align(self, img, size_divisor, interpolation=None): + align_h = int(np.ceil(img.shape[0] / size_divisor)) * size_divisor + align_w = int(np.ceil(img.shape[1] / size_divisor)) * size_divisor + if interpolation == None: + img = mmcv.imresize(img, (align_w, align_h)) + else: + img = mmcv.imresize(img, (align_w, align_h), interpolation=interpolation) + return img + + def _resize_img(self, results): + """Resize images with ``results['scale']``.""" + if self.keep_ratio: + img, scale_factor = mmcv.imrescale( + results['img'], results['scale'], return_scale=True) + #### align #### + img = self._align(img, self.size_divisor) + # the w_scale and h_scale has minor difference + # a real fix should be done in the mmcv.imrescale in the future + new_h, new_w = img.shape[:2] + h, w = results['img'].shape[:2] + w_scale = new_w / w + h_scale = new_h / h + else: + img, w_scale, h_scale = mmcv.imresize( + results['img'], results['scale'], return_scale=True) + + h, w = img.shape[:2] + assert int(np.ceil(h / self.size_divisor)) * self.size_divisor == h and \ + int(np.ceil(w / self.size_divisor)) * self.size_divisor == w, \ + "img size not align. h:{} w:{}".format(h,w) + scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], + dtype=np.float32) + results['img'] = img + results['img_shape'] = img.shape + results['pad_shape'] = img.shape # in case that there is no padding + results['scale_factor'] = scale_factor + results['keep_ratio'] = self.keep_ratio + + def _resize_seg(self, results): + """Resize semantic segmentation map with ``results['scale']``.""" + for key in results.get('seg_fields', []): + if self.keep_ratio: + gt_seg = mmcv.imrescale( + results[key], results['scale'], interpolation='nearest') + gt_seg = self._align(gt_seg, self.size_divisor, interpolation='nearest') + else: + gt_seg = mmcv.imresize( + results[key], results['scale'], interpolation='nearest') + h, w = gt_seg.shape[:2] + assert int(np.ceil(h / self.size_divisor)) * self.size_divisor == h and \ + int(np.ceil(w / self.size_divisor)) * self.size_divisor == w, \ + "gt_seg size not align. h:{} w:{}".format(h, w) + results[key] = gt_seg + + def __call__(self, results): + """Call function to resize images, bounding boxes, masks, semantic + segmentation map. + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', + 'keep_ratio' keys are added into result dict. + """ + + if 'scale' not in results: + self._random_scale(results) + self._resize_img(results) + self._resize_seg(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += (f'(img_scale={self.img_scale}, ' + f'multiscale_mode={self.multiscale_mode}, ' + f'ratio_range={self.ratio_range}, ' + f'keep_ratio={self.keep_ratio})') + return repr_str + + +@PIPELINES.register_module() +class ResizeToMultiple(object): + """Resize images & seg to multiple of divisor. + + Args: + size_divisor (int): images and gt seg maps need to resize to multiple + of size_divisor. Default: 32. + interpolation (str, optional): The interpolation mode of image resize. + Default: None + """ + + def __init__(self, size_divisor=32, interpolation=None): + self.size_divisor = size_divisor + self.interpolation = interpolation + + def __call__(self, results): + """Call function to resize images, semantic segmentation map to + multiple of size divisor. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Resized results, 'img_shape', 'pad_shape' keys are updated. + """ + # Align image to multiple of size divisor. + img = results['img'] + img = mmcv.imresize_to_multiple( + img, + self.size_divisor, + scale_factor=1, + interpolation=self.interpolation + if self.interpolation else 'bilinear') + + results['img'] = img + results['img_shape'] = img.shape + results['pad_shape'] = img.shape + + # Align segmentation map to multiple of size divisor. + for key in results.get('seg_fields', []): + gt_seg = results[key] + gt_seg = mmcv.imresize_to_multiple( + gt_seg, + self.size_divisor, + scale_factor=1, + interpolation='nearest') + results[key] = gt_seg + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += (f'(size_divisor={self.size_divisor}, ' + f'interpolation={self.interpolation})') + return repr_str + + +@PIPELINES.register_module() +class Resize(object): + """Resize images & seg. + + This transform resizes the input image to some scale. If the input dict + contains the key "scale", then the scale in the input dict is used, + otherwise the specified scale in the init method is used. + + ``img_scale`` can be None, a tuple (single-scale) or a list of tuple + (multi-scale). There are 4 multiscale modes: + + - ``ratio_range is not None``: + 1. When img_scale is None, img_scale is the shape of image in results + (img_scale = results['img'].shape[:2]) and the image is resized based + on the original size. (mode 1) + 2. When img_scale is a tuple (single-scale), randomly sample a ratio from + the ratio range and multiply it with the image scale. (mode 2) + + - ``ratio_range is None and multiscale_mode == "range"``: randomly sample a + scale from the a range. (mode 3) + + - ``ratio_range is None and multiscale_mode == "value"``: randomly sample a + scale from multiple scales. (mode 4) + + Args: + img_scale (tuple or list[tuple]): Images scales for resizing. + Default:None. + multiscale_mode (str): Either "range" or "value". + Default: 'range' + ratio_range (tuple[float]): (min_ratio, max_ratio). + Default: None + keep_ratio (bool): Whether to keep the aspect ratio when resizing the + image. Default: True + min_size (int, optional): The minimum size for input and the shape + of the image and seg map will not be less than ``min_size``. + As the shape of model input is fixed like 'SETR' and 'BEiT'. + Following the setting in these models, resized images must be + bigger than the crop size in ``slide_inference``. Default: None + """ + + def __init__(self, + img_scale=None, + multiscale_mode='range', + ratio_range=None, + keep_ratio=True, + min_size=None): + if img_scale is None: + self.img_scale = None + else: + if isinstance(img_scale, list): + self.img_scale = img_scale + else: + self.img_scale = [img_scale] + assert mmcv.is_list_of(self.img_scale, tuple) + + if ratio_range is not None: + # mode 1: given img_scale=None and a range of image ratio + # mode 2: given a scale and a range of image ratio + assert self.img_scale is None or len(self.img_scale) == 1 + else: + # mode 3 and 4: given multiple scales or a range of scales + assert multiscale_mode in ['value', 'range'] + + self.multiscale_mode = multiscale_mode + self.ratio_range = ratio_range + self.keep_ratio = keep_ratio + self.min_size = min_size + + @staticmethod + def random_select(img_scales): + """Randomly select an img_scale from given candidates. + + Args: + img_scales (list[tuple]): Images scales for selection. + + Returns: + (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, + where ``img_scale`` is the selected image scale and + ``scale_idx`` is the selected index in the given candidates. + """ + + assert mmcv.is_list_of(img_scales, tuple) + scale_idx = np.random.randint(len(img_scales)) + img_scale = img_scales[scale_idx] + return img_scale, scale_idx + + @staticmethod + def random_sample(img_scales): + """Randomly sample an img_scale when ``multiscale_mode=='range'``. + + Args: + img_scales (list[tuple]): Images scale range for sampling. + There must be two tuples in img_scales, which specify the lower + and upper bound of image scales. + + Returns: + (tuple, None): Returns a tuple ``(img_scale, None)``, where + ``img_scale`` is sampled scale and None is just a placeholder + to be consistent with :func:`random_select`. + """ + + assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2 + img_scale_long = [max(s) for s in img_scales] + img_scale_short = [min(s) for s in img_scales] + long_edge = np.random.randint( + min(img_scale_long), + max(img_scale_long) + 1) + short_edge = np.random.randint( + min(img_scale_short), + max(img_scale_short) + 1) + img_scale = (long_edge, short_edge) + return img_scale, None + + @staticmethod + def random_sample_ratio(img_scale, ratio_range): + """Randomly sample an img_scale when ``ratio_range`` is specified. + + A ratio will be randomly sampled from the range specified by + ``ratio_range``. Then it would be multiplied with ``img_scale`` to + generate sampled scale. + + Args: + img_scale (tuple): Images scale base to multiply with ratio. + ratio_range (tuple[float]): The minimum and maximum ratio to scale + the ``img_scale``. + + Returns: + (tuple, None): Returns a tuple ``(scale, None)``, where + ``scale`` is sampled ratio multiplied with ``img_scale`` and + None is just a placeholder to be consistent with + :func:`random_select`. + """ + + assert isinstance(img_scale, tuple) and len(img_scale) == 2 + min_ratio, max_ratio = ratio_range + assert min_ratio <= max_ratio + ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio + scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio) + return scale, None + + def _random_scale(self, results): + """Randomly sample an img_scale according to ``ratio_range`` and + ``multiscale_mode``. + + If ``ratio_range`` is specified, a ratio will be sampled and be + multiplied with ``img_scale``. + If multiple scales are specified by ``img_scale``, a scale will be + sampled according to ``multiscale_mode``. + Otherwise, single scale will be used. + + Args: + results (dict): Result dict from :obj:`dataset`. + + Returns: + dict: Two new keys 'scale` and 'scale_idx` are added into + ``results``, which would be used by subsequent pipelines. + """ + + if self.ratio_range is not None: + if self.img_scale is None: + h, w = results['img'].shape[:2] + scale, scale_idx = self.random_sample_ratio((w, h), + self.ratio_range) + else: + scale, scale_idx = self.random_sample_ratio( + self.img_scale[0], self.ratio_range) + elif len(self.img_scale) == 1: + scale, scale_idx = self.img_scale[0], 0 + elif self.multiscale_mode == 'range': + scale, scale_idx = self.random_sample(self.img_scale) + elif self.multiscale_mode == 'value': + scale, scale_idx = self.random_select(self.img_scale) + else: + raise NotImplementedError + + results['scale'] = scale + results['scale_idx'] = scale_idx + + def _resize_img(self, results): + """Resize images with ``results['scale']``.""" + if self.keep_ratio: + if self.min_size is not None: + # TODO: Now 'min_size' is an 'int' which means the minimum + # shape of images is (min_size, min_size, 3). 'min_size' + # with tuple type will be supported, i.e. the width and + # height are not equal. + if min(results['scale']) < self.min_size: + new_short = self.min_size + else: + new_short = min(results['scale']) + + h, w = results['img'].shape[:2] + if h > w: + new_h, new_w = new_short * h / w, new_short + else: + new_h, new_w = new_short, new_short * w / h + results['scale'] = (new_h, new_w) + + img, scale_factor = mmcv.imrescale( + results['img'], results['scale'], return_scale=True) + # the w_scale and h_scale has minor difference + # a real fix should be done in the mmcv.imrescale in the future + new_h, new_w = img.shape[:2] + h, w = results['img'].shape[:2] + w_scale = new_w / w + h_scale = new_h / h + else: + img, w_scale, h_scale = mmcv.imresize( + results['img'], results['scale'], return_scale=True) + scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], + dtype=np.float32) + results['img'] = img + results['img_shape'] = img.shape + results['pad_shape'] = img.shape # in case that there is no padding + results['scale_factor'] = scale_factor + results['keep_ratio'] = self.keep_ratio + + def _resize_seg(self, results): + """Resize semantic segmentation map with ``results['scale']``.""" + for key in results.get('seg_fields', []): + if self.keep_ratio: + gt_seg = mmcv.imrescale( + results[key], results['scale'], interpolation='nearest') + else: + gt_seg = mmcv.imresize( + results[key], results['scale'], interpolation='nearest') + results[key] = gt_seg + + def __call__(self, results): + """Call function to resize images, bounding boxes, masks, semantic + segmentation map. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', + 'keep_ratio' keys are added into result dict. + """ + + if 'scale' not in results: + self._random_scale(results) + self._resize_img(results) + self._resize_seg(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += (f'(img_scale={self.img_scale}, ' + f'multiscale_mode={self.multiscale_mode}, ' + f'ratio_range={self.ratio_range}, ' + f'keep_ratio={self.keep_ratio})') + return repr_str + + +@PIPELINES.register_module() +class RandomFlip(object): + """Flip the image & seg. + + If the input dict contains the key "flip", then the flag will be used, + otherwise it will be randomly decided by a ratio specified in the init + method. + + Args: + prob (float, optional): The flipping probability. Default: None. + direction(str, optional): The flipping direction. Options are + 'horizontal' and 'vertical'. Default: 'horizontal'. + """ + + @deprecated_api_warning({'flip_ratio': 'prob'}, cls_name='RandomFlip') + def __init__(self, prob=None, direction='horizontal'): + self.prob = prob + self.direction = direction + if prob is not None: + assert prob >= 0 and prob <= 1 + assert direction in ['horizontal', 'vertical'] + + def __call__(self, results): + """Call function to flip bounding boxes, masks, semantic segmentation + maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Flipped results, 'flip', 'flip_direction' keys are added into + result dict. + """ + + if 'flip' not in results: + flip = True if np.random.rand() < self.prob else False + results['flip'] = flip + if 'flip_direction' not in results: + results['flip_direction'] = self.direction + if results['flip']: + # flip image + results['img'] = mmcv.imflip( + results['img'], direction=results['flip_direction']) + + # flip segs + for key in results.get('seg_fields', []): + # use copy() to make numpy stride positive + results[key] = mmcv.imflip( + results[key], direction=results['flip_direction']).copy() + return results + + def __repr__(self): + return self.__class__.__name__ + f'(prob={self.prob})' + + +@PIPELINES.register_module() +class Pad(object): + """Pad the image & mask. + + There are two padding modes: (1) pad to a fixed size and (2) pad to the + minimum size that is divisible by some number. + Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor", + + Args: + size (tuple, optional): Fixed padding size. + size_divisor (int, optional): The divisor of padded size. + pad_val (float, optional): Padding value. Default: 0. + seg_pad_val (float, optional): Padding value of segmentation map. + Default: 255. + """ + + def __init__(self, + size=None, + size_divisor=None, + pad_val=0, + seg_pad_val=255): + self.size = size + self.size_divisor = size_divisor + self.pad_val = pad_val + self.seg_pad_val = seg_pad_val + # only one of size and size_divisor should be valid + assert size is not None or size_divisor is not None + assert size is None or size_divisor is None + + def _pad_img(self, results): + """Pad images according to ``self.size``.""" + if self.size is not None: + padded_img = mmcv.impad( + results['img'], shape=self.size, pad_val=self.pad_val) + elif self.size_divisor is not None: + padded_img = mmcv.impad_to_multiple( + results['img'], self.size_divisor, pad_val=self.pad_val) + results['img'] = padded_img + results['pad_shape'] = padded_img.shape + results['pad_fixed_size'] = self.size + results['pad_size_divisor'] = self.size_divisor + + def _pad_seg(self, results): + """Pad masks according to ``results['pad_shape']``.""" + for key in results.get('seg_fields', []): + results[key] = mmcv.impad( + results[key], + shape=results['pad_shape'][:2], + pad_val=self.seg_pad_val) + + def __call__(self, results): + """Call function to pad images, masks, semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Updated result dict. + """ + + self._pad_img(results) + self._pad_seg(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(size={self.size}, size_divisor={self.size_divisor}, ' \ + f'pad_val={self.pad_val})' + return repr_str + + +@PIPELINES.register_module() +class Normalize(object): + """Normalize the image. + + Added key is "img_norm_cfg". + + Args: + mean (sequence): Mean values of 3 channels. + std (sequence): Std values of 3 channels. + to_rgb (bool): Whether to convert the image from BGR to RGB, + default is true. + """ + + def __init__(self, mean, std, to_rgb=True): + self.mean = np.array(mean, dtype=np.float32) + self.std = np.array(std, dtype=np.float32) + self.to_rgb = to_rgb + + def __call__(self, results): + """Call function to normalize images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Normalized results, 'img_norm_cfg' key is added into + result dict. + """ + + results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std, + self.to_rgb) + results['img_norm_cfg'] = dict( + mean=self.mean, std=self.std, to_rgb=self.to_rgb) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(mean={self.mean}, std={self.std}, to_rgb=' \ + f'{self.to_rgb})' + return repr_str + + +@PIPELINES.register_module() +class Rerange(object): + """Rerange the image pixel value. + + Args: + min_value (float or int): Minimum value of the reranged image. + Default: 0. + max_value (float or int): Maximum value of the reranged image. + Default: 255. + """ + + def __init__(self, min_value=0, max_value=255): + assert isinstance(min_value, float) or isinstance(min_value, int) + assert isinstance(max_value, float) or isinstance(max_value, int) + assert min_value < max_value + self.min_value = min_value + self.max_value = max_value + + def __call__(self, results): + """Call function to rerange images. + + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Reranged results. + """ + + img = results['img'] + img_min_value = np.min(img) + img_max_value = np.max(img) + + assert img_min_value < img_max_value + # rerange to [0, 1] + img = (img - img_min_value) / (img_max_value - img_min_value) + # rerange to [min_value, max_value] + img = img * (self.max_value - self.min_value) + self.min_value + results['img'] = img + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(min_value={self.min_value}, max_value={self.max_value})' + return repr_str + + +@PIPELINES.register_module() +class CLAHE(object): + """Use CLAHE method to process the image. + + See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J]. + Graphics Gems, 1994:474-485.` for more information. + + Args: + clip_limit (float): Threshold for contrast limiting. Default: 40.0. + tile_grid_size (tuple[int]): Size of grid for histogram equalization. + Input image will be divided into equally sized rectangular tiles. + It defines the number of tiles in row and column. Default: (8, 8). + """ + + def __init__(self, clip_limit=40.0, tile_grid_size=(8, 8)): + assert isinstance(clip_limit, (float, int)) + self.clip_limit = clip_limit + assert is_tuple_of(tile_grid_size, int) + assert len(tile_grid_size) == 2 + self.tile_grid_size = tile_grid_size + + def __call__(self, results): + """Call function to Use CLAHE method process images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Processed results. + """ + + for i in range(results['img'].shape[2]): + results['img'][:, :, i] = mmcv.clahe( + np.array(results['img'][:, :, i], dtype=np.uint8), + self.clip_limit, self.tile_grid_size) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(clip_limit={self.clip_limit}, '\ + f'tile_grid_size={self.tile_grid_size})' + return repr_str + + +@PIPELINES.register_module() +class RandomCrop(object): + """Random crop the image & seg. + + Args: + crop_size (tuple): Expected size after cropping, (h, w). + cat_max_ratio (float): The maximum ratio that single category could + occupy. + """ + + def __init__(self, crop_size, cat_max_ratio=1., ignore_index=255): + assert crop_size[0] > 0 and crop_size[1] > 0 + self.crop_size = crop_size + self.cat_max_ratio = cat_max_ratio + self.ignore_index = ignore_index + + def get_crop_bbox(self, img): + """Randomly get a crop bounding box.""" + margin_h = max(img.shape[0] - self.crop_size[0], 0) + margin_w = max(img.shape[1] - self.crop_size[1], 0) + offset_h = np.random.randint(0, margin_h + 1) + offset_w = np.random.randint(0, margin_w + 1) + crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0] + crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1] + + return crop_y1, crop_y2, crop_x1, crop_x2 + + def crop(self, img, crop_bbox): + """Crop from ``img``""" + crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox + img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] + return img + + def __call__(self, results): + """Call function to randomly crop images, semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Randomly cropped results, 'img_shape' key in result dict is + updated according to crop size. + """ + + img = results['img'] + crop_bbox = self.get_crop_bbox(img) + if self.cat_max_ratio < 1.: + # Repeat 10 times + for _ in range(10): + seg_temp = self.crop(results['gt_semantic_seg'], crop_bbox) + labels, cnt = np.unique(seg_temp, return_counts=True) + cnt = cnt[labels != self.ignore_index] + if len(cnt) > 1 and np.max(cnt) / np.sum( + cnt) < self.cat_max_ratio: + break + crop_bbox = self.get_crop_bbox(img) + + # crop the image + img = self.crop(img, crop_bbox) + img_shape = img.shape + results['img'] = img + results['img_shape'] = img_shape + + # crop semantic seg + for key in results.get('seg_fields', []): + results[key] = self.crop(results[key], crop_bbox) + + return results + + def __repr__(self): + return self.__class__.__name__ + f'(crop_size={self.crop_size})' + + +@PIPELINES.register_module() +class RandomRotate(object): + """Rotate the image & seg. + + Args: + prob (float): The rotation probability. + degree (float, tuple[float]): Range of degrees to select from. If + degree is a number instead of tuple like (min, max), + the range of degree will be (``-degree``, ``+degree``) + pad_val (float, optional): Padding value of image. Default: 0. + seg_pad_val (float, optional): Padding value of segmentation map. + Default: 255. + center (tuple[float], optional): Center point (w, h) of the rotation in + the source image. If not specified, the center of the image will be + used. Default: None. + auto_bound (bool): Whether to adjust the image size to cover the whole + rotated image. Default: False + """ + + def __init__(self, + prob, + degree, + pad_val=0, + seg_pad_val=255, + center=None, + auto_bound=False): + self.prob = prob + assert prob >= 0 and prob <= 1 + if isinstance(degree, (float, int)): + assert degree > 0, f'degree {degree} should be positive' + self.degree = (-degree, degree) + else: + self.degree = degree + assert len(self.degree) == 2, f'degree {self.degree} should be a ' \ + f'tuple of (min, max)' + self.pal_val = pad_val + self.seg_pad_val = seg_pad_val + self.center = center + self.auto_bound = auto_bound + + def __call__(self, results): + """Call function to rotate image, semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Rotated results. + """ + + rotate = True if np.random.rand() < self.prob else False + degree = np.random.uniform(min(*self.degree), max(*self.degree)) + if rotate: + # rotate image + results['img'] = mmcv.imrotate( + results['img'], + angle=degree, + border_value=self.pal_val, + center=self.center, + auto_bound=self.auto_bound) + + # rotate segs + for key in results.get('seg_fields', []): + results[key] = mmcv.imrotate( + results[key], + angle=degree, + border_value=self.seg_pad_val, + center=self.center, + auto_bound=self.auto_bound, + interpolation='nearest') + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob}, ' \ + f'degree={self.degree}, ' \ + f'pad_val={self.pal_val}, ' \ + f'seg_pad_val={self.seg_pad_val}, ' \ + f'center={self.center}, ' \ + f'auto_bound={self.auto_bound})' + return repr_str + + +@PIPELINES.register_module() +class RGB2Gray(object): + """Convert RGB image to grayscale image. + + This transform calculate the weighted mean of input image channels with + ``weights`` and then expand the channels to ``out_channels``. When + ``out_channels`` is None, the number of output channels is the same as + input channels. + + Args: + out_channels (int): Expected number of output channels after + transforming. Default: None. + weights (tuple[float]): The weights to calculate the weighted mean. + Default: (0.299, 0.587, 0.114). + """ + + def __init__(self, out_channels=None, weights=(0.299, 0.587, 0.114)): + assert out_channels is None or out_channels > 0 + self.out_channels = out_channels + assert isinstance(weights, tuple) + for item in weights: + assert isinstance(item, (float, int)) + self.weights = weights + + def __call__(self, results): + """Call function to convert RGB image to grayscale image. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with grayscale image. + """ + img = results['img'] + assert len(img.shape) == 3 + assert img.shape[2] == len(self.weights) + weights = np.array(self.weights).reshape((1, 1, -1)) + img = (img * weights).sum(2, keepdims=True) + if self.out_channels is None: + img = img.repeat(weights.shape[2], axis=2) + else: + img = img.repeat(self.out_channels, axis=2) + + results['img'] = img + results['img_shape'] = img.shape + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(out_channels={self.out_channels}, ' \ + f'weights={self.weights})' + return repr_str + + +@PIPELINES.register_module() +class AdjustGamma(object): + """Using gamma correction to process the image. + + Args: + gamma (float or int): Gamma value used in gamma correction. + Default: 1.0. + """ + + def __init__(self, gamma=1.0): + assert isinstance(gamma, float) or isinstance(gamma, int) + assert gamma > 0 + self.gamma = gamma + inv_gamma = 1.0 / gamma + self.table = np.array([(i / 255.0)**inv_gamma * 255 + for i in np.arange(256)]).astype('uint8') + + def __call__(self, results): + """Call function to process the image with gamma correction. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Processed results. + """ + + results['img'] = mmcv.lut_transform( + np.array(results['img'], dtype=np.uint8), self.table) + + return results + + def __repr__(self): + return self.__class__.__name__ + f'(gamma={self.gamma})' + + +@PIPELINES.register_module() +class SegRescale(object): + """Rescale semantic segmentation maps. + + Args: + scale_factor (float): The scale factor of the final output. + """ + + def __init__(self, scale_factor=1): + self.scale_factor = scale_factor + + def __call__(self, results): + """Call function to scale the semantic segmentation map. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with semantic segmentation map scaled. + """ + for key in results.get('seg_fields', []): + if self.scale_factor != 1: + results[key] = mmcv.imrescale( + results[key], self.scale_factor, interpolation='nearest') + return results + + def __repr__(self): + return self.__class__.__name__ + f'(scale_factor={self.scale_factor})' + + +@PIPELINES.register_module() +class PhotoMetricDistortion(object): + """Apply photometric distortion to image sequentially, every transformation + is applied with a probability of 0.5. The position of random contrast is in + second or second to last. + + 1. random brightness + 2. random contrast (mode 0) + 3. convert color from BGR to HSV + 4. random saturation + 5. random hue + 6. convert color from HSV to BGR + 7. random contrast (mode 1) + + Args: + brightness_delta (int): delta of brightness. + contrast_range (tuple): range of contrast. + saturation_range (tuple): range of saturation. + hue_delta (int): delta of hue. + """ + + def __init__(self, + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18): + self.brightness_delta = brightness_delta + self.contrast_lower, self.contrast_upper = contrast_range + self.saturation_lower, self.saturation_upper = saturation_range + self.hue_delta = hue_delta + + def convert(self, img, alpha=1, beta=0): + """Multiple with alpha and add beat with clip.""" + img = img.astype(np.float32) * alpha + beta + img = np.clip(img, 0, 255) + return img.astype(np.uint8) + + def brightness(self, img): + """Brightness distortion.""" + if random.randint(2): + return self.convert( + img, + beta=random.uniform(-self.brightness_delta, + self.brightness_delta)) + return img + + def contrast(self, img): + """Contrast distortion.""" + if random.randint(2): + return self.convert( + img, + alpha=random.uniform(self.contrast_lower, self.contrast_upper)) + return img + + def saturation(self, img): + """Saturation distortion.""" + if random.randint(2): + img = mmcv.bgr2hsv(img) + img[:, :, 1] = self.convert( + img[:, :, 1], + alpha=random.uniform(self.saturation_lower, + self.saturation_upper)) + img = mmcv.hsv2bgr(img) + return img + + def hue(self, img): + """Hue distortion.""" + if random.randint(2): + img = mmcv.bgr2hsv(img) + img[:, :, + 0] = (img[:, :, 0].astype(int) + + random.randint(-self.hue_delta, self.hue_delta)) % 180 + img = mmcv.hsv2bgr(img) + return img + + def __call__(self, results): + """Call function to perform photometric distortion on images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with images distorted. + """ + + img = results['img'] + # random brightness + img = self.brightness(img) + + # mode == 0 --> do random contrast first + # mode == 1 --> do random contrast last + mode = random.randint(2) + if mode == 1: + img = self.contrast(img) + + # random saturation + img = self.saturation(img) + + # random hue + img = self.hue(img) + + # random contrast + if mode == 0: + img = self.contrast(img) + + results['img'] = img + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += (f'(brightness_delta={self.brightness_delta}, ' + f'contrast_range=({self.contrast_lower}, ' + f'{self.contrast_upper}), ' + f'saturation_range=({self.saturation_lower}, ' + f'{self.saturation_upper}), ' + f'hue_delta={self.hue_delta})') + return repr_str + + +@PIPELINES.register_module() +class RandomCutOut(object): + """CutOut operation. + + Randomly drop some regions of image used in + `Cutout `_. + Args: + prob (float): cutout probability. + n_holes (int | tuple[int, int]): Number of regions to be dropped. + If it is given as a list, number of holes will be randomly + selected from the closed interval [`n_holes[0]`, `n_holes[1]`]. + cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate + shape of dropped regions. It can be `tuple[int, int]` to use a + fixed cutout shape, or `list[tuple[int, int]]` to randomly choose + shape from the list. + cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The + candidate ratio of dropped regions. It can be `tuple[float, float]` + to use a fixed ratio or `list[tuple[float, float]]` to randomly + choose ratio from the list. Please note that `cutout_shape` + and `cutout_ratio` cannot be both given at the same time. + fill_in (tuple[float, float, float] | tuple[int, int, int]): The value + of pixel to fill in the dropped regions. Default: (0, 0, 0). + seg_fill_in (int): The labels of pixel to fill in the dropped regions. + If seg_fill_in is None, skip. Default: None. + """ + + def __init__(self, + prob, + n_holes, + cutout_shape=None, + cutout_ratio=None, + fill_in=(0, 0, 0), + seg_fill_in=None): + + assert 0 <= prob and prob <= 1 + assert (cutout_shape is None) ^ (cutout_ratio is None), \ + 'Either cutout_shape or cutout_ratio should be specified.' + assert (isinstance(cutout_shape, (list, tuple)) + or isinstance(cutout_ratio, (list, tuple))) + if isinstance(n_holes, tuple): + assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1] + else: + n_holes = (n_holes, n_holes) + if seg_fill_in is not None: + assert (isinstance(seg_fill_in, int) and 0 <= seg_fill_in + and seg_fill_in <= 255) + self.prob = prob + self.n_holes = n_holes + self.fill_in = fill_in + self.seg_fill_in = seg_fill_in + self.with_ratio = cutout_ratio is not None + self.candidates = cutout_ratio if self.with_ratio else cutout_shape + if not isinstance(self.candidates, list): + self.candidates = [self.candidates] + + def __call__(self, results): + """Call function to drop some regions of image.""" + cutout = True if np.random.rand() < self.prob else False + if cutout: + h, w, c = results['img'].shape + n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1) + for _ in range(n_holes): + x1 = np.random.randint(0, w) + y1 = np.random.randint(0, h) + index = np.random.randint(0, len(self.candidates)) + if not self.with_ratio: + cutout_w, cutout_h = self.candidates[index] + else: + cutout_w = int(self.candidates[index][0] * w) + cutout_h = int(self.candidates[index][1] * h) + + x2 = np.clip(x1 + cutout_w, 0, w) + y2 = np.clip(y1 + cutout_h, 0, h) + results['img'][y1:y2, x1:x2, :] = self.fill_in + + if self.seg_fill_in is not None: + for key in results.get('seg_fields', []): + results[key][y1:y2, x1:x2] = self.seg_fill_in + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob}, ' + repr_str += f'n_holes={self.n_holes}, ' + repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio + else f'cutout_shape={self.candidates}, ') + repr_str += f'fill_in={self.fill_in}, ' + repr_str += f'seg_fill_in={self.seg_fill_in})' + return repr_str + + +@PIPELINES.register_module() +class RandomMosaic(object): + """Mosaic augmentation. Given 4 images, mosaic transform combines them into + one output image. The output image is composed of the parts from each sub- + image. + + .. code:: text + + mosaic transform + center_x + +------------------------------+ + | pad | pad | + | +-----------+ | + | | | | + | | image1 |--------+ | + | | | | | + | | | image2 | | + center_y |----+-------------+-----------| + | | cropped | | + |pad | image3 | image4 | + | | | | + +----|-------------+-----------+ + | | + +-------------+ + + The mosaic transform steps are as follows: + 1. Choose the mosaic center as the intersections of 4 images + 2. Get the left top image according to the index, and randomly + sample another 3 images from the custom dataset. + 3. Sub image will be cropped if image is larger than mosaic patch + + Args: + prob (float): mosaic probability. + img_scale (Sequence[int]): Image size after mosaic pipeline of + a single image. The size of the output image is four times + that of a single image. The output image comprises 4 single images. + Default: (640, 640). + center_ratio_range (Sequence[float]): Center ratio range of mosaic + output. Default: (0.5, 1.5). + pad_val (int): Pad value. Default: 0. + seg_pad_val (int): Pad value of segmentation map. Default: 255. + """ + + def __init__(self, + prob, + img_scale=(640, 640), + center_ratio_range=(0.5, 1.5), + pad_val=0, + seg_pad_val=255): + assert 0 <= prob and prob <= 1 + assert isinstance(img_scale, tuple) + self.prob = prob + self.img_scale = img_scale + self.center_ratio_range = center_ratio_range + self.pad_val = pad_val + self.seg_pad_val = seg_pad_val + + def __call__(self, results): + """Call function to make a mosaic of image. + + Args: + results (dict): Result dict. + + Returns: + dict: Result dict with mosaic transformed. + """ + mosaic = True if np.random.rand() < self.prob else False + if mosaic: + results = self._mosaic_transform_img(results) + results = self._mosaic_transform_seg(results) + return results + + def get_indexes(self, dataset): + """Call function to collect indexes. + + Args: + dataset (:obj:`MultiImageMixDataset`): The dataset. + + Returns: + list: indexes. + """ + + indexes = [random.randint(0, len(dataset)) for _ in range(3)] + return indexes + + def _mosaic_transform_img(self, results): + """Mosaic transform function. + + Args: + results (dict): Result dict. + + Returns: + dict: Updated result dict. + """ + + assert 'mix_results' in results + if len(results['img'].shape) == 3: + mosaic_img = np.full( + (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2), 3), + self.pad_val, + dtype=results['img'].dtype) + else: + mosaic_img = np.full( + (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)), + self.pad_val, + dtype=results['img'].dtype) + + # mosaic center x, y + self.center_x = int( + random.uniform(*self.center_ratio_range) * self.img_scale[1]) + self.center_y = int( + random.uniform(*self.center_ratio_range) * self.img_scale[0]) + center_position = (self.center_x, self.center_y) + + loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right') + for i, loc in enumerate(loc_strs): + if loc == 'top_left': + result_patch = copy.deepcopy(results) + else: + result_patch = copy.deepcopy(results['mix_results'][i - 1]) + + img_i = result_patch['img'] + h_i, w_i = img_i.shape[:2] + # keep_ratio resize + scale_ratio_i = min(self.img_scale[0] / h_i, + self.img_scale[1] / w_i) + img_i = mmcv.imresize( + img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i))) + + # compute the combine parameters + paste_coord, crop_coord = self._mosaic_combine( + loc, center_position, img_i.shape[:2][::-1]) + x1_p, y1_p, x2_p, y2_p = paste_coord + x1_c, y1_c, x2_c, y2_c = crop_coord + + # crop and paste image + mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c] + + results['img'] = mosaic_img + results['img_shape'] = mosaic_img.shape + results['ori_shape'] = mosaic_img.shape + + return results + + def _mosaic_transform_seg(self, results): + """Mosaic transform function for label annotations. + + Args: + results (dict): Result dict. + + Returns: + dict: Updated result dict. + """ + + assert 'mix_results' in results + for key in results.get('seg_fields', []): + mosaic_seg = np.full( + (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)), + self.seg_pad_val, + dtype=results[key].dtype) + + # mosaic center x, y + center_position = (self.center_x, self.center_y) + + loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right') + for i, loc in enumerate(loc_strs): + if loc == 'top_left': + result_patch = copy.deepcopy(results) + else: + result_patch = copy.deepcopy(results['mix_results'][i - 1]) + + gt_seg_i = result_patch[key] + h_i, w_i = gt_seg_i.shape[:2] + # keep_ratio resize + scale_ratio_i = min(self.img_scale[0] / h_i, + self.img_scale[1] / w_i) + gt_seg_i = mmcv.imresize( + gt_seg_i, + (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)), + interpolation='nearest') + + # compute the combine parameters + paste_coord, crop_coord = self._mosaic_combine( + loc, center_position, gt_seg_i.shape[:2][::-1]) + x1_p, y1_p, x2_p, y2_p = paste_coord + x1_c, y1_c, x2_c, y2_c = crop_coord + + # crop and paste image + mosaic_seg[y1_p:y2_p, x1_p:x2_p] = gt_seg_i[y1_c:y2_c, + x1_c:x2_c] + + results[key] = mosaic_seg + + return results + + def _mosaic_combine(self, loc, center_position_xy, img_shape_wh): + """Calculate global coordinate of mosaic image and local coordinate of + cropped sub-image. + + Args: + loc (str): Index for the sub-image, loc in ('top_left', + 'top_right', 'bottom_left', 'bottom_right'). + center_position_xy (Sequence[float]): Mixing center for 4 images, + (x, y). + img_shape_wh (Sequence[int]): Width and height of sub-image + + Returns: + tuple[tuple[float]]: Corresponding coordinate of pasting and + cropping + - paste_coord (tuple): paste corner coordinate in mosaic image. + - crop_coord (tuple): crop corner coordinate in mosaic image. + """ + + assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') + if loc == 'top_left': + # index0 to top left part of image + x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ + max(center_position_xy[1] - img_shape_wh[1], 0), \ + center_position_xy[0], \ + center_position_xy[1] + crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - ( + y2 - y1), img_shape_wh[0], img_shape_wh[1] + + elif loc == 'top_right': + # index1 to top right part of image + x1, y1, x2, y2 = center_position_xy[0], \ + max(center_position_xy[1] - img_shape_wh[1], 0), \ + min(center_position_xy[0] + img_shape_wh[0], + self.img_scale[1] * 2), \ + center_position_xy[1] + crop_coord = 0, img_shape_wh[1] - (y2 - y1), min( + img_shape_wh[0], x2 - x1), img_shape_wh[1] + + elif loc == 'bottom_left': + # index2 to bottom left part of image + x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ + center_position_xy[1], \ + center_position_xy[0], \ + min(self.img_scale[0] * 2, center_position_xy[1] + + img_shape_wh[1]) + crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min( + y2 - y1, img_shape_wh[1]) + + else: + # index3 to bottom right part of image + x1, y1, x2, y2 = center_position_xy[0], \ + center_position_xy[1], \ + min(center_position_xy[0] + img_shape_wh[0], + self.img_scale[1] * 2), \ + min(self.img_scale[0] * 2, center_position_xy[1] + + img_shape_wh[1]) + crop_coord = 0, 0, min(img_shape_wh[0], + x2 - x1), min(y2 - y1, img_shape_wh[1]) + + paste_coord = x1, y1, x2, y2 + return paste_coord, crop_coord + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob}, ' + repr_str += f'img_scale={self.img_scale}, ' + repr_str += f'center_ratio_range={self.center_ratio_range}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'seg_pad_val={self.pad_val})' + return repr_str diff --git a/downstream/mmsegmentation/mmseg/datasets/potsdam.py b/downstream/mmsegmentation/mmseg/datasets/potsdam.py new file mode 100644 index 0000000..2986b8f --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/potsdam.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class PotsdamDataset(CustomDataset): + """ISPRS Potsdam dataset. + + In segmentation map annotation for Potsdam dataset, 0 is the ignore index. + ``reduce_zero_label`` should be set to True. The ``img_suffix`` and + ``seg_map_suffix`` are both fixed to '.png'. + """ + CLASSES = ('impervious_surface', 'building', 'low_vegetation', 'tree', + 'car', 'clutter') + + PALETTE = [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], + [255, 255, 0], [255, 0, 0]] + + def __init__(self, **kwargs): + super(PotsdamDataset, self).__init__( + img_suffix='.png', + seg_map_suffix='.png', + reduce_zero_label=True, + **kwargs) diff --git a/downstream/mmsegmentation/mmseg/datasets/samplers/__init__.py b/downstream/mmsegmentation/mmseg/datasets/samplers/__init__.py new file mode 100644 index 0000000..da09eff --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/samplers/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .distributed_sampler import DistributedSampler + +__all__ = ['DistributedSampler'] diff --git a/downstream/mmsegmentation/mmseg/datasets/samplers/distributed_sampler.py b/downstream/mmsegmentation/mmseg/datasets/samplers/distributed_sampler.py new file mode 100644 index 0000000..d1a13c7 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/samplers/distributed_sampler.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from __future__ import division +from typing import Iterator, Optional + +import torch +from torch.utils.data import Dataset +from torch.utils.data import DistributedSampler as _DistributedSampler + +from mmseg.core.utils import sync_random_seed + + +class DistributedSampler(_DistributedSampler): + """DistributedSampler inheriting from + `torch.utils.data.DistributedSampler`. + + Args: + datasets (Dataset): the dataset will be loaded. + num_replicas (int, optional): Number of processes participating in + distributed training. By default, world_size is retrieved from the + current distributed group. + rank (int, optional): Rank of the current process within num_replicas. + By default, rank is retrieved from the current distributed group. + shuffle (bool): If True (default), sampler will shuffle the indices. + seed (int): random seed used to shuffle the sampler if + :attr:`shuffle=True`. This number should be identical across all + processes in the distributed group. Default: ``0``. + """ + + def __init__(self, + dataset: Dataset, + num_replicas: Optional[int] = None, + rank: Optional[int] = None, + shuffle: bool = True, + seed=0) -> None: + super().__init__( + dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) + + # In distributed sampling, different ranks should sample + # non-overlapped data in the dataset. Therefore, this function + # is used to make sure that each rank shuffles the data indices + # in the same order based on the same seed. Then different ranks + # could use different indices to select non-overlapped data from the + # same data list. + self.seed = sync_random_seed(seed) + + def __iter__(self) -> Iterator: + """ + Yields: + Iterator: iterator of indices for rank. + """ + # deterministically shuffle based on epoch + if self.shuffle: + g = torch.Generator() + # When :attr:`shuffle=True`, this ensures all replicas + # use a different random ordering for each epoch. + # Otherwise, the next iteration of this sampler will + # yield the same ordering. + g.manual_seed(self.epoch + self.seed) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) diff --git a/downstream/mmsegmentation/mmseg/datasets/stare.py b/downstream/mmsegmentation/mmseg/datasets/stare.py new file mode 100644 index 0000000..a24d1d9 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/stare.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class STAREDataset(CustomDataset): + """STARE dataset. + + In segmentation map annotation for STARE, 0 stands for background, which is + included in 2 categories. ``reduce_zero_label`` is fixed to False. The + ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '.ah.png'. + """ + + CLASSES = ('background', 'vessel') + + PALETTE = [[120, 120, 120], [6, 230, 230]] + + def __init__(self, **kwargs): + super(STAREDataset, self).__init__( + img_suffix='.png', + seg_map_suffix='.ah.png', + reduce_zero_label=False, + **kwargs) + assert osp.exists(self.img_dir) diff --git a/downstream/mmsegmentation/mmseg/datasets/voc.py b/downstream/mmsegmentation/mmseg/datasets/voc.py new file mode 100644 index 0000000..3cec9e3 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/datasets/voc.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class PascalVOCDataset(CustomDataset): + """Pascal VOC dataset. + + Args: + split (str): Split txt file for Pascal VOC. + """ + + CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', + 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', + 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', + 'train', 'tvmonitor') + + PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], + [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], + [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], + [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], + [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] + + def __init__(self, split, **kwargs): + super(PascalVOCDataset, self).__init__( + img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs) + assert osp.exists(self.img_dir) and self.split is not None diff --git a/downstream/mmsegmentation/mmseg/dev/custom_opt_constructor.py b/downstream/mmsegmentation/mmseg/dev/custom_opt_constructor.py new file mode 100644 index 0000000..18a6d0a --- /dev/null +++ b/downstream/mmsegmentation/mmseg/dev/custom_opt_constructor.py @@ -0,0 +1,258 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Dict, List, Optional, Union + +import torch +import torch.nn as nn +from torch.nn import GroupNorm, LayerNorm + +from mmcv.utils import _BatchNorm, _InstanceNorm, SyncBatchNorm, build_from_cfg, is_list_of +from mmcv.utils.ext_loader import check_ops_exist +from mmcv.runner.optimizer.builder import OPTIMIZER_BUILDERS, OPTIMIZERS + + +@OPTIMIZER_BUILDERS.register_module() +class CustomOptimizerConstructor: + """Default constructor for optimizers. + + By default each parameter share the same optimizer settings, and we + provide an argument ``paramwise_cfg`` to specify parameter-wise settings. + It is a dict and may contain the following fields: + + - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If + one of the keys in ``custom_keys`` is a substring of the name of one + parameter, then the setting of the parameter will be specified by + ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will + be ignored. It should be noted that the aforementioned ``key`` is the + longest key that is a substring of the name of the parameter. If there + are multiple matched keys with the same length, then the key with lower + alphabet order will be chosen. + ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult`` + and ``decay_mult``. See Example 2 below. + - ``bias_lr_mult`` (float): It will be multiplied to the learning + rate for all bias parameters (except for those in normalization + layers and offset layers of DCN). + - ``bias_decay_mult`` (float): It will be multiplied to the weight + decay for all bias parameters (except for those in + normalization layers, depthwise conv layers, offset layers of DCN). + - ``norm_decay_mult`` (float): It will be multiplied to the weight + decay for all weight and bias parameters of normalization + layers. + - ``dwconv_decay_mult`` (float): It will be multiplied to the weight + decay for all weight and bias parameters of depthwise conv + layers. + - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning + rate for parameters of offset layer in the deformable convs + of a model. + - ``bypass_duplicate`` (bool): If true, the duplicate parameters + would not be added into optimizer. Default: False. + + Note: + + 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will + override the effect of ``bias_lr_mult`` in the bias of offset layer. + So be careful when using both ``bias_lr_mult`` and + ``dcn_offset_lr_mult``. If you wish to apply both of them to the offset + layer in deformable convs, set ``dcn_offset_lr_mult`` to the original + ``dcn_offset_lr_mult`` * ``bias_lr_mult``. + + 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will + apply it to all the DCN layers in the model. So be careful when the + model contains multiple DCN layers in places other than backbone. + + Args: + model (:obj:`nn.Module`): The model with parameters to be optimized. + optimizer_cfg (dict): The config dict of the optimizer. + Positional fields are + + - `type`: class name of the optimizer. + + Optional fields are + + - any arguments of the corresponding optimizer type, e.g., + lr, weight_decay, momentum, etc. + paramwise_cfg (dict, optional): Parameter-wise options. + + Example 1: + >>> model = torch.nn.modules.Conv1d(1, 1, 1) + >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9, + >>> weight_decay=0.0001) + >>> paramwise_cfg = dict(norm_decay_mult=0.) + >>> optim_builder = DefaultOptimizerConstructor( + >>> optimizer_cfg, paramwise_cfg) + >>> optimizer = optim_builder(model) + + Example 2: + >>> # assume model have attribute model.backbone and model.cls_head + >>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95) + >>> paramwise_cfg = dict(custom_keys={ + 'backbone': dict(lr_mult=0.1, decay_mult=0.9)}) + >>> optim_builder = DefaultOptimizerConstructor( + >>> optimizer_cfg, paramwise_cfg) + >>> optimizer = optim_builder(model) + >>> # Then the `lr` and `weight_decay` for model.backbone is + >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for + >>> # model.cls_head is (0.01, 0.95). + """ + + def __init__(self, + optimizer_cfg: Dict, + paramwise_cfg: Optional[Dict] = None): + if not isinstance(optimizer_cfg, dict): + raise TypeError('optimizer_cfg should be a dict', + f'but got {type(optimizer_cfg)}') + self.optimizer_cfg = optimizer_cfg + self.paramwise_cfg = {} if paramwise_cfg is None else paramwise_cfg + self.base_lr = optimizer_cfg.get('lr', None) + self.base_wd = optimizer_cfg.get('weight_decay', None) + self._validate_cfg() + + def _validate_cfg(self) -> None: + if not isinstance(self.paramwise_cfg, dict): + raise TypeError('paramwise_cfg should be None or a dict, ' + f'but got {type(self.paramwise_cfg)}') + + if 'custom_keys' in self.paramwise_cfg: + if not isinstance(self.paramwise_cfg['custom_keys'], dict): + raise TypeError( + 'If specified, custom_keys must be a dict, ' + f'but got {type(self.paramwise_cfg["custom_keys"])}') + if self.base_wd is None: + for key in self.paramwise_cfg['custom_keys']: + if 'decay_mult' in self.paramwise_cfg['custom_keys'][key]: + raise ValueError('base_wd should not be None') + + # get base lr and weight decay + # weight_decay must be explicitly specified if mult is specified + if ('bias_decay_mult' in self.paramwise_cfg + or 'norm_decay_mult' in self.paramwise_cfg + or 'dwconv_decay_mult' in self.paramwise_cfg): + if self.base_wd is None: + raise ValueError('base_wd should not be None') + + def _is_in(self, param_group: Dict, param_group_list: List) -> bool: + assert is_list_of(param_group_list, dict) + param = set(param_group['params']) + param_set = set() + for group in param_group_list: + param_set.update(set(group['params'])) + + return not param.isdisjoint(param_set) + + def add_params(self, + params: List[Dict], + module: nn.Module, + prefix: str = '', + is_dcn_module: Union[int, float, None] = None) -> None: + """Add all parameters of module to the params list. + + The parameters of the given module will be added to the list of param + groups, with specific rules defined by paramwise_cfg. + + Args: + params (list[dict]): A list of param groups, it will be modified + in place. + module (nn.Module): The module to be added. + prefix (str): The prefix of the module + is_dcn_module (int|float|None): If the current module is a + submodule of DCN, `is_dcn_module` will be passed to + control conv_offset layer's learning rate. Defaults to None. + """ + # get param-wise options + custom_keys = self.paramwise_cfg.get('custom_keys', {}) + # first sort with alphabet order and then sort with reversed len of str + sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True) + + bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.) + bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.) + norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.) + dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.) + bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False) + dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.) + + # special rules for norm layers and depth-wise conv layers + is_norm = isinstance(module, + (_BatchNorm, _InstanceNorm, SyncBatchNorm, GroupNorm, LayerNorm)) + is_dwconv = ( + isinstance(module, torch.nn.Conv2d) + and module.in_channels == module.groups) + + for name, param in module.named_parameters(recurse=False): + param_group = {'params': [param]} + if not param.requires_grad: + params.append(param_group) + continue + if bypass_duplicate and self._is_in(param_group, params): + warnings.warn(f'{prefix} is duplicate. It is skipped since ' + f'bypass_duplicate={bypass_duplicate}') + continue + # if the parameter match one of the custom keys, ignore other rules + is_custom = False + for key in sorted_keys: + if key in f'{prefix}.{name}': + is_custom = True + lr_mult = custom_keys[key].get('lr_mult', 1.) + param_group['lr'] = self.base_lr * lr_mult + if self.base_wd is not None: + decay_mult = custom_keys[key].get('decay_mult', 1.) + param_group['weight_decay'] = self.base_wd * decay_mult + break + + if not is_custom: + # bias_lr_mult affects all bias parameters + # except for norm.bias dcn.conv_offset.bias + if name == 'bias' and not (is_norm or is_dcn_module): + param_group['lr'] = self.base_lr * bias_lr_mult + + if (prefix.find('conv_offset') != -1 and is_dcn_module + and isinstance(module, torch.nn.Conv2d)): + # deal with both dcn_offset's bias & weight + param_group['lr'] = self.base_lr * dcn_offset_lr_mult + + # apply weight decay policies + if self.base_wd is not None: + # norm decay + if is_norm: + param_group[ + 'weight_decay'] = self.base_wd * norm_decay_mult + # depth-wise conv + elif is_dwconv: + param_group[ + 'weight_decay'] = self.base_wd * dwconv_decay_mult + # bias lr and decay + elif name == 'bias' and not is_dcn_module: + # TODO: current bias_decay_mult will have affect on DCN + param_group[ + 'weight_decay'] = self.base_wd * bias_decay_mult + params.append(param_group) + + if check_ops_exist(): + from mmcv.ops import DeformConv2d, ModulatedDeformConv2d + is_dcn_module = isinstance(module, + (DeformConv2d, ModulatedDeformConv2d)) + else: + is_dcn_module = False + for child_name, child_mod in module.named_children(): + child_prefix = f'{prefix}.{child_name}' if prefix else child_name + self.add_params( + params, + child_mod, + prefix=child_prefix, + is_dcn_module=is_dcn_module) + + def __call__(self, model: nn.Module): + if hasattr(model, 'module'): + model = model.module + + optimizer_cfg = self.optimizer_cfg.copy() + # if no paramwise option is specified, just use the global setting + if not self.paramwise_cfg: + optimizer_cfg['params'] = model.parameters() + return build_from_cfg(optimizer_cfg, OPTIMIZERS) + + # set param-wise lr and weight decay recursively + params: List[Dict] = [] + self.add_params(params, model) + optimizer_cfg['params'] = params + + return build_from_cfg(optimizer_cfg, OPTIMIZERS) \ No newline at end of file diff --git a/downstream/mmsegmentation/mmseg/models/__init__.py b/downstream/mmsegmentation/mmseg/models/__init__.py new file mode 100644 index 0000000..87d8108 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .backbones import * # noqa: F401,F403 +from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone, + build_head, build_loss, build_segmentor) +from .decode_heads import * # noqa: F401,F403 +from .losses import * # noqa: F401,F403 +from .necks import * # noqa: F401,F403 +from .segmentors import * # noqa: F401,F403 + +__all__ = [ + 'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone', + 'build_head', 'build_loss', 'build_segmentor' +] diff --git a/downstream/mmsegmentation/mmseg/models/backbones/__init__.py b/downstream/mmsegmentation/mmseg/models/backbones/__init__.py new file mode 100644 index 0000000..167155e --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/__init__.py @@ -0,0 +1,31 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .beit import BEiT +from .bisenetv1 import BiSeNetV1 +from .bisenetv2 import BiSeNetV2 +from .cgnet import CGNet +from .erfnet import ERFNet +from .fast_scnn import FastSCNN +from .hrnet import HRNet +from .icnet import ICNet +from .mit import MixVisionTransformer +from .mobilenet_v2 import MobileNetV2 +from .mobilenet_v3 import MobileNetV3 +from .resnest import ResNeSt +from .resnet import ResNet, ResNetV1c, ResNetV1d +from .resnext import ResNeXt +from .stdc import STDCContextPathNet, STDCNet +from .swin import SwinTransformer +from .timm_backbone import TIMMBackbone +from .twins import PCPVT, SVT +from .unet import UNet +from .vit import VisionTransformer + +from .gpvit import GPViTSeg + +__all__ = [ + 'ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet', 'FastSCNN', + 'ResNeSt', 'MobileNetV2', 'UNet', 'CGNet', 'MobileNetV3', + 'VisionTransformer', 'SwinTransformer', 'MixVisionTransformer', + 'BiSeNetV1', 'BiSeNetV2', 'ICNet', 'TIMMBackbone', 'ERFNet', 'PCPVT', + 'SVT', 'STDCNet', 'STDCContextPathNet', 'BEiT', 'GPViTSeg' +] diff --git a/downstream/mmsegmentation/mmseg/models/backbones/beit.py b/downstream/mmsegmentation/mmseg/models/backbones/beit.py new file mode 100644 index 0000000..26be315 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/beit.py @@ -0,0 +1,532 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.transformer import FFN +from mmcv.cnn.utils.weight_init import (constant_init, kaiming_init, + trunc_normal_) +from mmcv.runner import BaseModule, ModuleList, _load_checkpoint +from torch.nn.modules.batchnorm import _BatchNorm +from torch.nn.modules.utils import _pair as to_2tuple + +from mmseg.utils import get_root_logger +from ..builder import BACKBONES +from ..utils import PatchEmbed + +try: + from scipy import interpolate +except ImportError: + interpolate = None + + +class BEiTAttention(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (tuple[int]): The height and width of the window. + qv_bias (bool): If True, add a learnable bias to q, v. + Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float): Dropout ratio of output. Default: 0. + init_cfg (dict | None, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + qv_bias=True, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.embed_dims = embed_dims + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = qk_scale or head_embed_dims**-0.5 + if qv_bias: + self.q_bias = nn.Parameter(torch.zeros(embed_dims)) + self.v_bias = nn.Parameter(torch.zeros(embed_dims)) + else: + self.q_bias = None + self.v_bias = None + + self.window_size = window_size + # cls to token & token 2 cls & cls to cls + self.num_relative_distance = (2 * window_size[0] - + 1) * (2 * window_size[1] - 1) + 3 + # relative_position_bias_table shape is (2*Wh-1 * 2*Ww-1 + 3, nH) + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) + + # get pair-wise relative position index for + # each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + # coords shape is (2, Wh, Ww) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) + # coords_flatten shape is (2, Wh*Ww) + coords_flatten = torch.flatten(coords, 1) + relative_coords = ( + coords_flatten[:, :, None] - coords_flatten[:, None, :]) + # relative_coords shape is (Wh*Ww, Wh*Ww, 2) + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + # shift to start from 0 + relative_coords[:, :, 0] += window_size[0] - 1 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = torch.zeros( + size=(window_size[0] * window_size[1] + 1, ) * 2, + dtype=relative_coords.dtype) + # relative_position_index shape is (Wh*Ww, Wh*Ww) + relative_position_index[1:, 1:] = relative_coords.sum(-1) + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer('relative_position_index', + relative_position_index) + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=False) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + + def init_weights(self): + trunc_normal_(self.relative_position_bias_table, std=0.02) + + def forward(self, x): + """ + Args: + x (tensor): input features with shape of (num_windows*B, N, C). + """ + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + k_bias = torch.zeros_like(self.v_bias, requires_grad=False) + qkv_bias = torch.cat((self.q_bias, k_bias, self.v_bias)) + + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + if self.relative_position_bias_table is not None: + Wh = self.window_size[0] + Ww = self.window_size[1] + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + Wh * Ww + 1, Wh * Ww + 1, -1) + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class TransformerEncoderLayer(BaseModule): + """Implements one encoder layer in Vision Transformer. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + attn_drop_rate (float): The drop out rate for attention layer. + Default: 0.0. + drop_path_rate (float): Stochastic depth rate. Default 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + qv_bias (bool): Enable bias for qv if True. Default: True + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + window_size (tuple[int], optional): The height and width of the window. + Default: None. + init_values (float, optional): Initialize the values of BEiTAttention + and FFN with learnable scaling. Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + window_size=None, + init_values=None): + super(TransformerEncoderLayer, self).__init__() + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + self.attn = BEiTAttention( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=window_size, + qv_bias=qv_bias, + qk_scale=None, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=0., + init_cfg=None) + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=0., + dropout_layer=None, + act_cfg=act_cfg, + add_identity=False) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, embed_dims, postfix=2) + self.add_module(self.norm2_name, norm2) + # NOTE: drop path for stochastic depth, we shall see if + # this is better than dropout here + dropout_layer = dict(type='DropPath', drop_prob=drop_path_rate) + self.drop_path = build_dropout( + dropout_layer) if dropout_layer else nn.Identity() + self.gamma_1 = nn.Parameter( + init_values * torch.ones((embed_dims)), requires_grad=True) + self.gamma_2 = nn.Parameter( + init_values * torch.ones((embed_dims)), requires_grad=True) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def forward(self, x): + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.ffn(self.norm2(x))) + return x + + +@BACKBONES.register_module() +class BEiT(BaseModule): + """BERT Pre-Training of Image Transformers. + + Args: + img_size (int | tuple): Input image size. Default: 224. + patch_size (int): The patch size. Default: 16. + in_channels (int): Number of input channels. Default: 3. + embed_dims (int): Embedding dimension. Default: 768. + num_layers (int): Depth of transformer. Default: 12. + num_heads (int): Number of attention heads. Default: 12. + mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. + Default: 4. + out_indices (list | tuple | int): Output from which stages. + Default: -1. + qv_bias (bool): Enable bias for qv if True. Default: True. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0 + drop_path_rate (float): Stochastic depth rate. Default 0.0. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + patch_norm (bool): Whether to add a norm in PatchEmbed Block. + Default: False. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Default: False. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + pretrained (str, optional): Model pretrained path. Default: None. + init_values (float): Initialize the values of BEiTAttention and FFN + with learnable scaling. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_channels=3, + embed_dims=768, + num_layers=12, + num_heads=12, + mlp_ratio=4, + out_indices=-1, + qv_bias=True, + attn_drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN'), + act_cfg=dict(type='GELU'), + patch_norm=False, + final_norm=False, + num_fcs=2, + norm_eval=False, + pretrained=None, + init_values=0.1, + init_cfg=None): + super(BEiT, self).__init__(init_cfg=init_cfg) + if isinstance(img_size, int): + img_size = to_2tuple(img_size) + elif isinstance(img_size, tuple): + if len(img_size) == 1: + img_size = to_2tuple(img_size[0]) + assert len(img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(img_size)}' + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be set at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is not None: + raise TypeError('pretrained must be a str or None') + + self.img_size = img_size + self.patch_size = patch_size + self.norm_eval = norm_eval + self.pretrained = pretrained + + self.patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + padding=0, + norm_cfg=norm_cfg if patch_norm else None, + init_cfg=None) + + window_size = (img_size[0] // patch_size, img_size[1] // patch_size) + self.patch_shape = window_size + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims)) + + if isinstance(out_indices, int): + if out_indices == -1: + out_indices = num_layers - 1 + self.out_indices = [out_indices] + elif isinstance(out_indices, list) or isinstance(out_indices, tuple): + self.out_indices = out_indices + else: + raise TypeError('out_indices must be type of int, list or tuple') + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_layers)] + self.layers = ModuleList() + for i in range(num_layers): + self.layers.append( + TransformerEncoderLayer( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=mlp_ratio * embed_dims, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[i], + num_fcs=num_fcs, + qv_bias=qv_bias, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + window_size=window_size, + init_values=init_values)) + + self.final_norm = final_norm + if final_norm: + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def _geometric_sequence_interpolation(self, src_size, dst_size, sequence, + num): + """Get new sequence via geometric sequence interpolation. + + Args: + src_size (int): Pos_embedding size in pre-trained model. + dst_size (int): Pos_embedding size in the current model. + sequence (tensor): The relative position bias of the pretrain + model after removing the extra tokens. + num (int): Number of attention heads. + Returns: + new_sequence (tensor): Geometric sequence interpolate the + pre-trained relative position bias to the size of + the current model. + """ + + def geometric_progression(a, r, n): + return a * (1.0 - r**n) / (1.0 - r) + + # Here is a binary function. + left, right = 1.01, 1.5 + while right - left > 1e-6: + q = (left + right) / 2.0 + gp = geometric_progression(1, q, src_size // 2) + if gp > dst_size // 2: + right = q + else: + left = q + # The position of each interpolated point is determined + # by the ratio obtained by dichotomy. + dis = [] + cur = 1 + for i in range(src_size // 2): + dis.append(cur) + cur += q**(i + 1) + r_ids = [-_ for _ in reversed(dis)] + x = r_ids + [0] + dis + y = r_ids + [0] + dis + t = dst_size // 2.0 + dx = np.arange(-t, t + 0.1, 1.0) + dy = np.arange(-t, t + 0.1, 1.0) + # Interpolation functions are being executed and called. + new_sequence = [] + for i in range(num): + z = sequence[:, i].view(src_size, src_size).float().numpy() + f = interpolate.interp2d(x, y, z, kind='cubic') + new_sequence.append( + torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(sequence)) + new_sequence = torch.cat(new_sequence, dim=-1) + return new_sequence + + def resize_rel_pos_embed(self, checkpoint): + """Resize relative pos_embed weights. + + This function is modified from + https://github.com/microsoft/unilm/blob/master/beit/semantic_segmentation/mmcv_custom/checkpoint.py. # noqa: E501 + Copyright (c) Microsoft Corporation + Licensed under the MIT License + + Args: + checkpoint (dict): Key and value of the pretrain model. + Returns: + state_dict (dict): Interpolate the relative pos_embed weights + in the pre-train model to the current model size. + """ + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + all_keys = list(state_dict.keys()) + for key in all_keys: + if 'relative_position_index' in key: + state_dict.pop(key) + # In order to keep the center of pos_bias as consistent as + # possible after interpolation, and vice versa in the edge + # area, the geometric sequence interpolation method is adopted. + if 'relative_position_bias_table' in key: + rel_pos_bias = state_dict[key] + src_num_pos, num_attn_heads = rel_pos_bias.size() + dst_num_pos, _ = self.state_dict()[key].size() + dst_patch_shape = self.patch_shape + if dst_patch_shape[0] != dst_patch_shape[1]: + raise NotImplementedError() + # Count the number of extra tokens. + num_extra_tokens = dst_num_pos - ( + dst_patch_shape[0] * 2 - 1) * ( + dst_patch_shape[1] * 2 - 1) + src_size = int((src_num_pos - num_extra_tokens)**0.5) + dst_size = int((dst_num_pos - num_extra_tokens)**0.5) + if src_size != dst_size: + extra_tokens = rel_pos_bias[-num_extra_tokens:, :] + rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] + new_rel_pos_bias = self._geometric_sequence_interpolation( + src_size, dst_size, rel_pos_bias, num_attn_heads) + new_rel_pos_bias = torch.cat( + (new_rel_pos_bias, extra_tokens), dim=0) + state_dict[key] = new_rel_pos_bias + + return state_dict + + def init_weights(self): + + def _init_weights(m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + self.apply(_init_weights) + + if (isinstance(self.init_cfg, dict) + and self.init_cfg.get('type') == 'Pretrained'): + logger = get_root_logger() + checkpoint = _load_checkpoint( + self.init_cfg['checkpoint'], logger=logger, map_location='cpu') + state_dict = self.resize_rel_pos_embed(checkpoint) + self.load_state_dict(state_dict, False) + elif self.init_cfg is not None: + super(BEiT, self).init_weights() + else: + # We only implement the 'jax_impl' initialization implemented at + # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py#L353 # noqa: E501 + # Copyright 2019 Ross Wightman + # Licensed under the Apache License, Version 2.0 (the "License") + trunc_normal_(self.cls_token, std=.02) + for n, m in self.named_modules(): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + if 'ffn' in n: + nn.init.normal_(m.bias, mean=0., std=1e-6) + else: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv2d): + kaiming_init(m, mode='fan_in', bias=0.) + elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)): + constant_init(m, val=1.0, bias=0.) + + def forward(self, inputs): + B = inputs.shape[0] + + x, hw_shape = self.patch_embed(inputs) + + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i == len(self.layers) - 1: + if self.final_norm: + x = self.norm1(x) + if i in self.out_indices: + # Remove class token and reshape token for decoder head + out = x[:, 1:] + B, _, C = out.shape + out = out.reshape(B, hw_shape[0], hw_shape[1], + C).permute(0, 3, 1, 2).contiguous() + outs.append(out) + + return tuple(outs) + + def train(self, mode=True): + super(BEiT, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.LayerNorm): + m.eval() diff --git a/downstream/mmsegmentation/mmseg/models/backbones/bisenetv1.py b/downstream/mmsegmentation/mmseg/models/backbones/bisenetv1.py new file mode 100644 index 0000000..4beb7b3 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/bisenetv1.py @@ -0,0 +1,332 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule + +from mmseg.ops import resize +from ..builder import BACKBONES, build_backbone + + +class SpatialPath(BaseModule): + """Spatial Path to preserve the spatial size of the original input image + and encode affluent spatial information. + + Args: + in_channels(int): The number of channels of input + image. Default: 3. + num_channels (Tuple[int]): The number of channels of + each layers in Spatial Path. + Default: (64, 64, 64, 128). + Returns: + x (torch.Tensor): Feature map for Feature Fusion Module. + """ + + def __init__(self, + in_channels=3, + num_channels=(64, 64, 64, 128), + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(SpatialPath, self).__init__(init_cfg=init_cfg) + assert len(num_channels) == 4, 'Length of input channels \ + of Spatial Path must be 4!' + + self.layers = [] + for i in range(len(num_channels)): + layer_name = f'layer{i + 1}' + self.layers.append(layer_name) + if i == 0: + self.add_module( + layer_name, + ConvModule( + in_channels=in_channels, + out_channels=num_channels[i], + kernel_size=7, + stride=2, + padding=3, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + elif i == len(num_channels) - 1: + self.add_module( + layer_name, + ConvModule( + in_channels=num_channels[i - 1], + out_channels=num_channels[i], + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + else: + self.add_module( + layer_name, + ConvModule( + in_channels=num_channels[i - 1], + out_channels=num_channels[i], + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, x): + for i, layer_name in enumerate(self.layers): + layer_stage = getattr(self, layer_name) + x = layer_stage(x) + return x + + +class AttentionRefinementModule(BaseModule): + """Attention Refinement Module (ARM) to refine the features of each stage. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + Returns: + x_out (torch.Tensor): Feature map of Attention Refinement Module. + """ + + def __init__(self, + in_channels, + out_channel, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(AttentionRefinementModule, self).__init__(init_cfg=init_cfg) + self.conv_layer = ConvModule( + in_channels=in_channels, + out_channels=out_channel, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.atten_conv_layer = nn.Sequential( + nn.AdaptiveAvgPool2d((1, 1)), + ConvModule( + in_channels=out_channel, + out_channels=out_channel, + kernel_size=1, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), nn.Sigmoid()) + + def forward(self, x): + x = self.conv_layer(x) + x_atten = self.atten_conv_layer(x) + x_out = x * x_atten + return x_out + + +class ContextPath(BaseModule): + """Context Path to provide sufficient receptive field. + + Args: + backbone_cfg:(dict): Config of backbone of + Context Path. + context_channels (Tuple[int]): The number of channel numbers + of various modules in Context Path. + Default: (128, 256, 512). + align_corners (bool, optional): The align_corners argument of + resize operation. Default: False. + Returns: + x_16_up, x_32_up (torch.Tensor, torch.Tensor): Two feature maps + undergoing upsampling from 1/16 and 1/32 downsampling + feature maps. These two feature maps are used for Feature + Fusion Module and Auxiliary Head. + """ + + def __init__(self, + backbone_cfg, + context_channels=(128, 256, 512), + align_corners=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(ContextPath, self).__init__(init_cfg=init_cfg) + assert len(context_channels) == 3, 'Length of input channels \ + of Context Path must be 3!' + + self.backbone = build_backbone(backbone_cfg) + + self.align_corners = align_corners + self.arm16 = AttentionRefinementModule(context_channels[1], + context_channels[0]) + self.arm32 = AttentionRefinementModule(context_channels[2], + context_channels[0]) + self.conv_head32 = ConvModule( + in_channels=context_channels[0], + out_channels=context_channels[0], + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv_head16 = ConvModule( + in_channels=context_channels[0], + out_channels=context_channels[0], + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.gap_conv = nn.Sequential( + nn.AdaptiveAvgPool2d((1, 1)), + ConvModule( + in_channels=context_channels[2], + out_channels=context_channels[0], + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, x): + x_4, x_8, x_16, x_32 = self.backbone(x) + x_gap = self.gap_conv(x_32) + + x_32_arm = self.arm32(x_32) + x_32_sum = x_32_arm + x_gap + x_32_up = resize(input=x_32_sum, size=x_16.shape[2:], mode='nearest') + x_32_up = self.conv_head32(x_32_up) + + x_16_arm = self.arm16(x_16) + x_16_sum = x_16_arm + x_32_up + x_16_up = resize(input=x_16_sum, size=x_8.shape[2:], mode='nearest') + x_16_up = self.conv_head16(x_16_up) + + return x_16_up, x_32_up + + +class FeatureFusionModule(BaseModule): + """Feature Fusion Module to fuse low level output feature of Spatial Path + and high level output feature of Context Path. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + Returns: + x_out (torch.Tensor): Feature map of Feature Fusion Module. + """ + + def __init__(self, + in_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(FeatureFusionModule, self).__init__(init_cfg=init_cfg) + self.conv1 = ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + self.conv_atten = nn.Sequential( + ConvModule( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), nn.Sigmoid()) + + def forward(self, x_sp, x_cp): + x_concat = torch.cat([x_sp, x_cp], dim=1) + x_fuse = self.conv1(x_concat) + x_atten = self.gap(x_fuse) + # Note: No BN and more 1x1 conv in paper. + x_atten = self.conv_atten(x_atten) + x_atten = x_fuse * x_atten + x_out = x_atten + x_fuse + return x_out + + +@BACKBONES.register_module() +class BiSeNetV1(BaseModule): + """BiSeNetV1 backbone. + + This backbone is the implementation of `BiSeNet: Bilateral + Segmentation Network for Real-time Semantic + Segmentation `_. + + Args: + backbone_cfg:(dict): Config of backbone of + Context Path. + in_channels (int): The number of channels of input + image. Default: 3. + spatial_channels (Tuple[int]): Size of channel numbers of + various layers in Spatial Path. + Default: (64, 64, 64, 128). + context_channels (Tuple[int]): Size of channel numbers of + various modules in Context Path. + Default: (128, 256, 512). + out_indices (Tuple[int] | int, optional): Output from which stages. + Default: (0, 1, 2). + align_corners (bool, optional): The align_corners argument of + resize operation in Bilateral Guided Aggregation Layer. + Default: False. + out_channels(int): The number of channels of output. + It must be the same with `in_channels` of decode_head. + Default: 256. + """ + + def __init__(self, + backbone_cfg, + in_channels=3, + spatial_channels=(64, 64, 64, 128), + context_channels=(128, 256, 512), + out_indices=(0, 1, 2), + align_corners=False, + out_channels=256, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU'), + init_cfg=None): + + super(BiSeNetV1, self).__init__(init_cfg=init_cfg) + assert len(spatial_channels) == 4, 'Length of input channels \ + of Spatial Path must be 4!' + + assert len(context_channels) == 3, 'Length of input channels \ + of Context Path must be 3!' + + self.out_indices = out_indices + self.align_corners = align_corners + self.context_path = ContextPath(backbone_cfg, context_channels, + self.align_corners) + self.spatial_path = SpatialPath(in_channels, spatial_channels) + self.ffm = FeatureFusionModule(context_channels[1], out_channels) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + def forward(self, x): + # stole refactoring code from Coin Cheung, thanks + x_context8, x_context16 = self.context_path(x) + x_spatial = self.spatial_path(x) + x_fuse = self.ffm(x_spatial, x_context8) + + outs = [x_fuse, x_context8, x_context16] + outs = [outs[i] for i in self.out_indices] + return tuple(outs) diff --git a/downstream/mmsegmentation/mmseg/models/backbones/bisenetv2.py b/downstream/mmsegmentation/mmseg/models/backbones/bisenetv2.py new file mode 100644 index 0000000..d908b32 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/bisenetv2.py @@ -0,0 +1,622 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, + build_activation_layer, build_norm_layer) +from mmcv.runner import BaseModule + +from mmseg.ops import resize +from ..builder import BACKBONES + + +class DetailBranch(BaseModule): + """Detail Branch with wide channels and shallow layers to capture low-level + details and generate high-resolution feature representation. + + Args: + detail_channels (Tuple[int]): Size of channel numbers of each stage + in Detail Branch, in paper it has 3 stages. + Default: (64, 64, 128). + in_channels (int): Number of channels of input image. Default: 3. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Returns: + x (torch.Tensor): Feature map of Detail Branch. + """ + + def __init__(self, + detail_channels=(64, 64, 128), + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(DetailBranch, self).__init__(init_cfg=init_cfg) + detail_branch = [] + for i in range(len(detail_channels)): + if i == 0: + detail_branch.append( + nn.Sequential( + ConvModule( + in_channels=in_channels, + out_channels=detail_channels[i], + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=detail_channels[i], + out_channels=detail_channels[i], + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg))) + else: + detail_branch.append( + nn.Sequential( + ConvModule( + in_channels=detail_channels[i - 1], + out_channels=detail_channels[i], + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=detail_channels[i], + out_channels=detail_channels[i], + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=detail_channels[i], + out_channels=detail_channels[i], + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg))) + self.detail_branch = nn.ModuleList(detail_branch) + + def forward(self, x): + for stage in self.detail_branch: + x = stage(x) + return x + + +class StemBlock(BaseModule): + """Stem Block at the beginning of Semantic Branch. + + Args: + in_channels (int): Number of input channels. + Default: 3. + out_channels (int): Number of output channels. + Default: 16. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Returns: + x (torch.Tensor): First feature map in Semantic Branch. + """ + + def __init__(self, + in_channels=3, + out_channels=16, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(StemBlock, self).__init__(init_cfg=init_cfg) + + self.conv_first = ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.convs = nn.Sequential( + ConvModule( + in_channels=out_channels, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=out_channels // 2, + out_channels=out_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.pool = nn.MaxPool2d( + kernel_size=3, stride=2, padding=1, ceil_mode=False) + self.fuse_last = ConvModule( + in_channels=out_channels * 2, + out_channels=out_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x): + x = self.conv_first(x) + x_left = self.convs(x) + x_right = self.pool(x) + x = self.fuse_last(torch.cat([x_left, x_right], dim=1)) + return x + + +class GELayer(BaseModule): + """Gather-and-Expansion Layer. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + exp_ratio (int): Expansion ratio for middle channels. + Default: 6. + stride (int): Stride of GELayer. Default: 1 + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Returns: + x (torch.Tensor): Intermediate feature map in + Semantic Branch. + """ + + def __init__(self, + in_channels, + out_channels, + exp_ratio=6, + stride=1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(GELayer, self).__init__(init_cfg=init_cfg) + mid_channel = in_channels * exp_ratio + self.conv1 = ConvModule( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + if stride == 1: + self.dwconv = nn.Sequential( + # ReLU in ConvModule not shown in paper + ConvModule( + in_channels=in_channels, + out_channels=mid_channel, + kernel_size=3, + stride=stride, + padding=1, + groups=in_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.shortcut = None + else: + self.dwconv = nn.Sequential( + ConvModule( + in_channels=in_channels, + out_channels=mid_channel, + kernel_size=3, + stride=stride, + padding=1, + groups=in_channels, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + # ReLU in ConvModule not shown in paper + ConvModule( + in_channels=mid_channel, + out_channels=mid_channel, + kernel_size=3, + stride=1, + padding=1, + groups=mid_channel, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ) + self.shortcut = nn.Sequential( + DepthwiseSeparableConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + padding=1, + dw_norm_cfg=norm_cfg, + dw_act_cfg=None, + pw_norm_cfg=norm_cfg, + pw_act_cfg=None, + )) + + self.conv2 = nn.Sequential( + ConvModule( + in_channels=mid_channel, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None, + )) + + self.act = build_activation_layer(act_cfg) + + def forward(self, x): + identity = x + x = self.conv1(x) + x = self.dwconv(x) + x = self.conv2(x) + if self.shortcut is not None: + shortcut = self.shortcut(identity) + x = x + shortcut + else: + x = x + identity + x = self.act(x) + return x + + +class CEBlock(BaseModule): + """Context Embedding Block for large receptive filed in Semantic Branch. + + Args: + in_channels (int): Number of input channels. + Default: 3. + out_channels (int): Number of output channels. + Default: 16. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Returns: + x (torch.Tensor): Last feature map in Semantic Branch. + """ + + def __init__(self, + in_channels=3, + out_channels=16, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(CEBlock, self).__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.gap = nn.Sequential( + nn.AdaptiveAvgPool2d((1, 1)), + build_norm_layer(norm_cfg, self.in_channels)[1]) + self.conv_gap = ConvModule( + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + # Note: in paper here is naive conv2d, no bn-relu + self.conv_last = ConvModule( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x): + identity = x + x = self.gap(x) + x = self.conv_gap(x) + x = identity + x + x = self.conv_last(x) + return x + + +class SemanticBranch(BaseModule): + """Semantic Branch which is lightweight with narrow channels and deep + layers to obtain high-level semantic context. + + Args: + semantic_channels(Tuple[int]): Size of channel numbers of + various stages in Semantic Branch. + Default: (16, 32, 64, 128). + in_channels (int): Number of channels of input image. Default: 3. + exp_ratio (int): Expansion ratio for middle channels. + Default: 6. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Returns: + semantic_outs (List[torch.Tensor]): List of several feature maps + for auxiliary heads (Booster) and Bilateral + Guided Aggregation Layer. + """ + + def __init__(self, + semantic_channels=(16, 32, 64, 128), + in_channels=3, + exp_ratio=6, + init_cfg=None): + super(SemanticBranch, self).__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.semantic_channels = semantic_channels + self.semantic_stages = [] + for i in range(len(semantic_channels)): + stage_name = f'stage{i + 1}' + self.semantic_stages.append(stage_name) + if i == 0: + self.add_module( + stage_name, + StemBlock(self.in_channels, semantic_channels[i])) + elif i == (len(semantic_channels) - 1): + self.add_module( + stage_name, + nn.Sequential( + GELayer(semantic_channels[i - 1], semantic_channels[i], + exp_ratio, 2), + GELayer(semantic_channels[i], semantic_channels[i], + exp_ratio, 1), + GELayer(semantic_channels[i], semantic_channels[i], + exp_ratio, 1), + GELayer(semantic_channels[i], semantic_channels[i], + exp_ratio, 1))) + else: + self.add_module( + stage_name, + nn.Sequential( + GELayer(semantic_channels[i - 1], semantic_channels[i], + exp_ratio, 2), + GELayer(semantic_channels[i], semantic_channels[i], + exp_ratio, 1))) + + self.add_module(f'stage{len(semantic_channels)}_CEBlock', + CEBlock(semantic_channels[-1], semantic_channels[-1])) + self.semantic_stages.append(f'stage{len(semantic_channels)}_CEBlock') + + def forward(self, x): + semantic_outs = [] + for stage_name in self.semantic_stages: + semantic_stage = getattr(self, stage_name) + x = semantic_stage(x) + semantic_outs.append(x) + return semantic_outs + + +class BGALayer(BaseModule): + """Bilateral Guided Aggregation Layer to fuse the complementary information + from both Detail Branch and Semantic Branch. + + Args: + out_channels (int): Number of output channels. + Default: 128. + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Returns: + output (torch.Tensor): Output feature map for Segment heads. + """ + + def __init__(self, + out_channels=128, + align_corners=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(BGALayer, self).__init__(init_cfg=init_cfg) + self.out_channels = out_channels + self.align_corners = align_corners + self.detail_dwconv = nn.Sequential( + DepthwiseSeparableConvModule( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + dw_norm_cfg=norm_cfg, + dw_act_cfg=None, + pw_norm_cfg=None, + pw_act_cfg=None, + )) + self.detail_down = nn.Sequential( + ConvModule( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + nn.AvgPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False)) + self.semantic_conv = nn.Sequential( + ConvModule( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None)) + self.semantic_dwconv = nn.Sequential( + DepthwiseSeparableConvModule( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + dw_norm_cfg=norm_cfg, + dw_act_cfg=None, + pw_norm_cfg=None, + pw_act_cfg=None, + )) + self.conv = ConvModule( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + inplace=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + ) + + def forward(self, x_d, x_s): + detail_dwconv = self.detail_dwconv(x_d) + detail_down = self.detail_down(x_d) + semantic_conv = self.semantic_conv(x_s) + semantic_dwconv = self.semantic_dwconv(x_s) + semantic_conv = resize( + input=semantic_conv, + size=detail_dwconv.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + fuse_1 = detail_dwconv * torch.sigmoid(semantic_conv) + fuse_2 = detail_down * torch.sigmoid(semantic_dwconv) + fuse_2 = resize( + input=fuse_2, + size=fuse_1.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + output = self.conv(fuse_1 + fuse_2) + return output + + +@BACKBONES.register_module() +class BiSeNetV2(BaseModule): + """BiSeNetV2: Bilateral Network with Guided Aggregation for + Real-time Semantic Segmentation. + + This backbone is the implementation of + `BiSeNetV2 `_. + + Args: + in_channels (int): Number of channel of input image. Default: 3. + detail_channels (Tuple[int], optional): Channels of each stage + in Detail Branch. Default: (64, 64, 128). + semantic_channels (Tuple[int], optional): Channels of each stage + in Semantic Branch. Default: (16, 32, 64, 128). + See Table 1 and Figure 3 of paper for more details. + semantic_expansion_ratio (int, optional): The expansion factor + expanding channel number of middle channels in Semantic Branch. + Default: 6. + bga_channels (int, optional): Number of middle channels in + Bilateral Guided Aggregation Layer. Default: 128. + out_indices (Tuple[int] | int, optional): Output from which stages. + Default: (0, 1, 2, 3, 4). + align_corners (bool, optional): The align_corners argument of + resize operation in Bilateral Guided Aggregation Layer. + Default: False. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels=3, + detail_channels=(64, 64, 128), + semantic_channels=(16, 32, 64, 128), + semantic_expansion_ratio=6, + bga_channels=128, + out_indices=(0, 1, 2, 3, 4), + align_corners=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + if init_cfg is None: + init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ] + super(BiSeNetV2, self).__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_indices = out_indices + self.detail_channels = detail_channels + self.semantic_channels = semantic_channels + self.semantic_expansion_ratio = semantic_expansion_ratio + self.bga_channels = bga_channels + self.align_corners = align_corners + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self.detail = DetailBranch(self.detail_channels, self.in_channels) + self.semantic = SemanticBranch(self.semantic_channels, + self.in_channels, + self.semantic_expansion_ratio) + self.bga = BGALayer(self.bga_channels, self.align_corners) + + def forward(self, x): + # stole refactoring code from Coin Cheung, thanks + x_detail = self.detail(x) + x_semantic_lst = self.semantic(x) + x_head = self.bga(x_detail, x_semantic_lst[-1]) + outs = [x_head] + x_semantic_lst[:-1] + outs = [outs[i] for i in self.out_indices] + return tuple(outs) diff --git a/downstream/mmsegmentation/mmseg/models/backbones/cgnet.py b/downstream/mmsegmentation/mmseg/models/backbones/cgnet.py new file mode 100644 index 0000000..168194c --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/cgnet.py @@ -0,0 +1,372 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, build_conv_layer, build_norm_layer +from mmcv.runner import BaseModule +from mmcv.utils.parrots_wrapper import _BatchNorm + +from ..builder import BACKBONES + + +class GlobalContextExtractor(nn.Module): + """Global Context Extractor for CGNet. + + This class is employed to refine the joint feature of both local feature + and surrounding context. + + Args: + channel (int): Number of input feature channels. + reduction (int): Reductions for global context extractor. Default: 16. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, channel, reduction=16, with_cp=False): + super(GlobalContextExtractor, self).__init__() + self.channel = channel + self.reduction = reduction + assert reduction >= 1 and channel >= reduction + self.with_cp = with_cp + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Linear(channel, channel // reduction), nn.ReLU(inplace=True), + nn.Linear(channel // reduction, channel), nn.Sigmoid()) + + def forward(self, x): + + def _inner_forward(x): + num_batch, num_channel = x.size()[:2] + y = self.avg_pool(x).view(num_batch, num_channel) + y = self.fc(y).view(num_batch, num_channel, 1, 1) + return x * y + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class ContextGuidedBlock(nn.Module): + """Context Guided Block for CGNet. + + This class consists of four components: local feature extractor, + surrounding feature extractor, joint feature extractor and global + context extractor. + + Args: + in_channels (int): Number of input feature channels. + out_channels (int): Number of output feature channels. + dilation (int): Dilation rate for surrounding context extractor. + Default: 2. + reduction (int): Reduction for global context extractor. Default: 16. + skip_connect (bool): Add input to output or not. Default: True. + downsample (bool): Downsample the input to 1/2 or not. Default: False. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='PReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + in_channels, + out_channels, + dilation=2, + reduction=16, + skip_connect=True, + downsample=False, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='PReLU'), + with_cp=False): + super(ContextGuidedBlock, self).__init__() + self.with_cp = with_cp + self.downsample = downsample + + channels = out_channels if downsample else out_channels // 2 + if 'type' in act_cfg and act_cfg['type'] == 'PReLU': + act_cfg['num_parameters'] = channels + kernel_size = 3 if downsample else 1 + stride = 2 if downsample else 1 + padding = (kernel_size - 1) // 2 + + self.conv1x1 = ConvModule( + in_channels, + channels, + kernel_size, + stride, + padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.f_loc = build_conv_layer( + conv_cfg, + channels, + channels, + kernel_size=3, + padding=1, + groups=channels, + bias=False) + self.f_sur = build_conv_layer( + conv_cfg, + channels, + channels, + kernel_size=3, + padding=dilation, + groups=channels, + dilation=dilation, + bias=False) + + self.bn = build_norm_layer(norm_cfg, 2 * channels)[1] + self.activate = nn.PReLU(2 * channels) + + if downsample: + self.bottleneck = build_conv_layer( + conv_cfg, + 2 * channels, + out_channels, + kernel_size=1, + bias=False) + + self.skip_connect = skip_connect and not downsample + self.f_glo = GlobalContextExtractor(out_channels, reduction, with_cp) + + def forward(self, x): + + def _inner_forward(x): + out = self.conv1x1(x) + loc = self.f_loc(out) + sur = self.f_sur(out) + + joi_feat = torch.cat([loc, sur], 1) # the joint feature + joi_feat = self.bn(joi_feat) + joi_feat = self.activate(joi_feat) + if self.downsample: + joi_feat = self.bottleneck(joi_feat) # channel = out_channels + # f_glo is employed to refine the joint feature + out = self.f_glo(joi_feat) + + if self.skip_connect: + return x + out + else: + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class InputInjection(nn.Module): + """Downsampling module for CGNet.""" + + def __init__(self, num_downsampling): + super(InputInjection, self).__init__() + self.pool = nn.ModuleList() + for i in range(num_downsampling): + self.pool.append(nn.AvgPool2d(3, stride=2, padding=1)) + + def forward(self, x): + for pool in self.pool: + x = pool(x) + return x + + +@BACKBONES.register_module() +class CGNet(BaseModule): + """CGNet backbone. + + This backbone is the implementation of `A Light-weight Context Guided + Network for Semantic Segmentation `_. + + Args: + in_channels (int): Number of input image channels. Normally 3. + num_channels (tuple[int]): Numbers of feature channels at each stages. + Default: (32, 64, 128). + num_blocks (tuple[int]): Numbers of CG blocks at stage 1 and stage 2. + Default: (3, 21). + dilations (tuple[int]): Dilation rate for surrounding context + extractors at stage 1 and stage 2. Default: (2, 4). + reductions (tuple[int]): Reductions for global context extractors at + stage 1 and stage 2. Default: (8, 16). + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='PReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels=3, + num_channels=(32, 64, 128), + num_blocks=(3, 21), + dilations=(2, 4), + reductions=(8, 16), + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='PReLU'), + norm_eval=False, + with_cp=False, + pretrained=None, + init_cfg=None): + + super(CGNet, self).__init__(init_cfg) + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer=['Conv2d', 'Linear']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']), + dict(type='Constant', val=0, layer='PReLU') + ] + else: + raise TypeError('pretrained must be a str or None') + + self.in_channels = in_channels + self.num_channels = num_channels + assert isinstance(self.num_channels, tuple) and len( + self.num_channels) == 3 + self.num_blocks = num_blocks + assert isinstance(self.num_blocks, tuple) and len(self.num_blocks) == 2 + self.dilations = dilations + assert isinstance(self.dilations, tuple) and len(self.dilations) == 2 + self.reductions = reductions + assert isinstance(self.reductions, tuple) and len(self.reductions) == 2 + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + if 'type' in self.act_cfg and self.act_cfg['type'] == 'PReLU': + self.act_cfg['num_parameters'] = num_channels[0] + self.norm_eval = norm_eval + self.with_cp = with_cp + + cur_channels = in_channels + self.stem = nn.ModuleList() + for i in range(3): + self.stem.append( + ConvModule( + cur_channels, + num_channels[0], + 3, + 2 if i == 0 else 1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + cur_channels = num_channels[0] + + self.inject_2x = InputInjection(1) # down-sample for Input, factor=2 + self.inject_4x = InputInjection(2) # down-sample for Input, factor=4 + + cur_channels += in_channels + self.norm_prelu_0 = nn.Sequential( + build_norm_layer(norm_cfg, cur_channels)[1], + nn.PReLU(cur_channels)) + + # stage 1 + self.level1 = nn.ModuleList() + for i in range(num_blocks[0]): + self.level1.append( + ContextGuidedBlock( + cur_channels if i == 0 else num_channels[1], + num_channels[1], + dilations[0], + reductions[0], + downsample=(i == 0), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + with_cp=with_cp)) # CG block + + cur_channels = 2 * num_channels[1] + in_channels + self.norm_prelu_1 = nn.Sequential( + build_norm_layer(norm_cfg, cur_channels)[1], + nn.PReLU(cur_channels)) + + # stage 2 + self.level2 = nn.ModuleList() + for i in range(num_blocks[1]): + self.level2.append( + ContextGuidedBlock( + cur_channels if i == 0 else num_channels[2], + num_channels[2], + dilations[1], + reductions[1], + downsample=(i == 0), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + with_cp=with_cp)) # CG block + + cur_channels = 2 * num_channels[2] + self.norm_prelu_2 = nn.Sequential( + build_norm_layer(norm_cfg, cur_channels)[1], + nn.PReLU(cur_channels)) + + def forward(self, x): + output = [] + + # stage 0 + inp_2x = self.inject_2x(x) + inp_4x = self.inject_4x(x) + for layer in self.stem: + x = layer(x) + x = self.norm_prelu_0(torch.cat([x, inp_2x], 1)) + output.append(x) + + # stage 1 + for i, layer in enumerate(self.level1): + x = layer(x) + if i == 0: + down1 = x + x = self.norm_prelu_1(torch.cat([x, down1, inp_4x], 1)) + output.append(x) + + # stage 2 + for i, layer in enumerate(self.level2): + x = layer(x) + if i == 0: + down2 = x + x = self.norm_prelu_2(torch.cat([down2, x], 1)) + output.append(x) + + return output + + def train(self, mode=True): + """Convert the model into training mode will keeping the normalization + layer freezed.""" + super(CGNet, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/downstream/mmsegmentation/mmseg/models/backbones/erfnet.py b/downstream/mmsegmentation/mmseg/models/backbones/erfnet.py new file mode 100644 index 0000000..8921c18 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/erfnet.py @@ -0,0 +1,329 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer +from mmcv.runner import BaseModule + +from mmseg.ops import resize +from ..builder import BACKBONES + + +class DownsamplerBlock(BaseModule): + """Downsampler block of ERFNet. + + This module is a little different from basical ConvModule. + The features from Conv and MaxPool layers are + concatenated before BatchNorm. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-3), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(DownsamplerBlock, self).__init__(init_cfg=init_cfg) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self.conv = build_conv_layer( + self.conv_cfg, + in_channels, + out_channels - in_channels, + kernel_size=3, + stride=2, + padding=1) + self.pool = nn.MaxPool2d(kernel_size=2, stride=2) + self.bn = build_norm_layer(self.norm_cfg, out_channels)[1] + self.act = build_activation_layer(self.act_cfg) + + def forward(self, input): + conv_out = self.conv(input) + pool_out = self.pool(input) + pool_out = resize( + input=pool_out, + size=conv_out.size()[2:], + mode='bilinear', + align_corners=False) + output = torch.cat([conv_out, pool_out], 1) + output = self.bn(output) + output = self.act(output) + return output + + +class NonBottleneck1d(BaseModule): + """Non-bottleneck block of ERFNet. + + Args: + channels (int): Number of channels in Non-bottleneck block. + drop_rate (float): Probability of an element to be zeroed. + Default 0. + dilation (int): Dilation rate for last two conv layers. + Default 1. + num_conv_layer (int): Number of 3x1 and 1x3 convolution layers. + Default 2. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + channels, + drop_rate=0, + dilation=1, + num_conv_layer=2, + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-3), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(NonBottleneck1d, self).__init__(init_cfg=init_cfg) + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.act = build_activation_layer(self.act_cfg) + + self.convs_layers = nn.ModuleList() + for conv_layer in range(num_conv_layer): + first_conv_padding = (1, 0) if conv_layer == 0 else (dilation, 0) + first_conv_dilation = 1 if conv_layer == 0 else (dilation, 1) + second_conv_padding = (0, 1) if conv_layer == 0 else (0, dilation) + second_conv_dilation = 1 if conv_layer == 0 else (1, dilation) + + self.convs_layers.append( + build_conv_layer( + self.conv_cfg, + channels, + channels, + kernel_size=(3, 1), + stride=1, + padding=first_conv_padding, + bias=True, + dilation=first_conv_dilation)) + self.convs_layers.append(self.act) + self.convs_layers.append( + build_conv_layer( + self.conv_cfg, + channels, + channels, + kernel_size=(1, 3), + stride=1, + padding=second_conv_padding, + bias=True, + dilation=second_conv_dilation)) + self.convs_layers.append( + build_norm_layer(self.norm_cfg, channels)[1]) + if conv_layer == 0: + self.convs_layers.append(self.act) + else: + self.convs_layers.append(nn.Dropout(p=drop_rate)) + + def forward(self, input): + output = input + for conv in self.convs_layers: + output = conv(output) + output = self.act(output + input) + return output + + +class UpsamplerBlock(BaseModule): + """Upsampler block of ERFNet. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-3), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(UpsamplerBlock, self).__init__(init_cfg=init_cfg) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self.conv = nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=2, + padding=1, + output_padding=1, + bias=True) + self.bn = build_norm_layer(self.norm_cfg, out_channels)[1] + self.act = build_activation_layer(self.act_cfg) + + def forward(self, input): + output = self.conv(input) + output = self.bn(output) + output = self.act(output) + return output + + +@BACKBONES.register_module() +class ERFNet(BaseModule): + """ERFNet backbone. + + This backbone is the implementation of `ERFNet: Efficient Residual + Factorized ConvNet for Real-time SemanticSegmentation + `_. + + Args: + in_channels (int): The number of channels of input + image. Default: 3. + enc_downsample_channels (Tuple[int]): Size of channel + numbers of various Downsampler block in encoder. + Default: (16, 64, 128). + enc_stage_non_bottlenecks (Tuple[int]): Number of stages of + Non-bottleneck block in encoder. + Default: (5, 8). + enc_non_bottleneck_dilations (Tuple[int]): Dilation rate of each + stage of Non-bottleneck block of encoder. + Default: (2, 4, 8, 16). + enc_non_bottleneck_channels (Tuple[int]): Size of channel + numbers of various Non-bottleneck block in encoder. + Default: (64, 128). + dec_upsample_channels (Tuple[int]): Size of channel numbers of + various Deconvolution block in decoder. + Default: (64, 16). + dec_stages_non_bottleneck (Tuple[int]): Number of stages of + Non-bottleneck block in decoder. + Default: (2, 2). + dec_non_bottleneck_channels (Tuple[int]): Size of channel + numbers of various Non-bottleneck block in decoder. + Default: (64, 16). + drop_rate (float): Probability of an element to be zeroed. + Default 0.1. + """ + + def __init__(self, + in_channels=3, + enc_downsample_channels=(16, 64, 128), + enc_stage_non_bottlenecks=(5, 8), + enc_non_bottleneck_dilations=(2, 4, 8, 16), + enc_non_bottleneck_channels=(64, 128), + dec_upsample_channels=(64, 16), + dec_stages_non_bottleneck=(2, 2), + dec_non_bottleneck_channels=(64, 16), + dropout_ratio=0.1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU'), + init_cfg=None): + + super(ERFNet, self).__init__(init_cfg=init_cfg) + assert len(enc_downsample_channels) \ + == len(dec_upsample_channels)+1, 'Number of downsample\ + block of encoder does not \ + match number of upsample block of decoder!' + assert len(enc_downsample_channels) \ + == len(enc_stage_non_bottlenecks)+1, 'Number of \ + downsample block of encoder does not match \ + number of Non-bottleneck block of encoder!' + assert len(enc_downsample_channels) \ + == len(enc_non_bottleneck_channels)+1, 'Number of \ + downsample block of encoder does not match \ + number of channels of Non-bottleneck block of encoder!' + assert enc_stage_non_bottlenecks[-1] \ + % len(enc_non_bottleneck_dilations) == 0, 'Number of \ + Non-bottleneck block of encoder does not match \ + number of Non-bottleneck block of encoder!' + assert len(dec_upsample_channels) \ + == len(dec_stages_non_bottleneck), 'Number of \ + upsample block of decoder does not match \ + number of Non-bottleneck block of decoder!' + assert len(dec_stages_non_bottleneck) \ + == len(dec_non_bottleneck_channels), 'Number of \ + Non-bottleneck block of decoder does not match \ + number of channels of Non-bottleneck block of decoder!' + + self.in_channels = in_channels + self.enc_downsample_channels = enc_downsample_channels + self.enc_stage_non_bottlenecks = enc_stage_non_bottlenecks + self.enc_non_bottleneck_dilations = enc_non_bottleneck_dilations + self.enc_non_bottleneck_channels = enc_non_bottleneck_channels + self.dec_upsample_channels = dec_upsample_channels + self.dec_stages_non_bottleneck = dec_stages_non_bottleneck + self.dec_non_bottleneck_channels = dec_non_bottleneck_channels + self.dropout_ratio = dropout_ratio + + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self.encoder.append( + DownsamplerBlock(self.in_channels, enc_downsample_channels[0])) + + for i in range(len(enc_downsample_channels) - 1): + self.encoder.append( + DownsamplerBlock(enc_downsample_channels[i], + enc_downsample_channels[i + 1])) + # Last part of encoder is some dilated NonBottleneck1d blocks. + if i == len(enc_downsample_channels) - 2: + iteration_times = int(enc_stage_non_bottlenecks[-1] / + len(enc_non_bottleneck_dilations)) + for j in range(iteration_times): + for k in range(len(enc_non_bottleneck_dilations)): + self.encoder.append( + NonBottleneck1d(enc_downsample_channels[-1], + self.dropout_ratio, + enc_non_bottleneck_dilations[k])) + else: + for j in range(enc_stage_non_bottlenecks[i]): + self.encoder.append( + NonBottleneck1d(enc_downsample_channels[i + 1], + self.dropout_ratio)) + + for i in range(len(dec_upsample_channels)): + if i == 0: + self.decoder.append( + UpsamplerBlock(enc_downsample_channels[-1], + dec_non_bottleneck_channels[i])) + else: + self.decoder.append( + UpsamplerBlock(dec_non_bottleneck_channels[i - 1], + dec_non_bottleneck_channels[i])) + for j in range(dec_stages_non_bottleneck[i]): + self.decoder.append( + NonBottleneck1d(dec_non_bottleneck_channels[i])) + + def forward(self, x): + for enc in self.encoder: + x = enc(x) + for dec in self.decoder: + x = dec(x) + return [x] diff --git a/downstream/mmsegmentation/mmseg/models/backbones/fast_scnn.py b/downstream/mmsegmentation/mmseg/models/backbones/fast_scnn.py new file mode 100644 index 0000000..cbfbcaf --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/fast_scnn.py @@ -0,0 +1,409 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmcv.runner import BaseModule + +from mmseg.models.decode_heads.psp_head import PPM +from mmseg.ops import resize +from ..builder import BACKBONES +from ..utils import InvertedResidual + + +class LearningToDownsample(nn.Module): + """Learning to downsample module. + + Args: + in_channels (int): Number of input channels. + dw_channels (tuple[int]): Number of output channels of the first and + the second depthwise conv (dwconv) layers. + out_channels (int): Number of output channels of the whole + 'learning to downsample' module. + conv_cfg (dict | None): Config of conv layers. Default: None + norm_cfg (dict | None): Config of norm layers. Default: + dict(type='BN') + act_cfg (dict): Config of activation layers. Default: + dict(type='ReLU') + dw_act_cfg (dict): In DepthwiseSeparableConvModule, activation config + of depthwise ConvModule. If it is 'default', it will be the same + as `act_cfg`. Default: None. + """ + + def __init__(self, + in_channels, + dw_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + dw_act_cfg=None): + super(LearningToDownsample, self).__init__() + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.dw_act_cfg = dw_act_cfg + dw_channels1 = dw_channels[0] + dw_channels2 = dw_channels[1] + + self.conv = ConvModule( + in_channels, + dw_channels1, + 3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.dsconv1 = DepthwiseSeparableConvModule( + dw_channels1, + dw_channels2, + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + dw_act_cfg=self.dw_act_cfg) + + self.dsconv2 = DepthwiseSeparableConvModule( + dw_channels2, + out_channels, + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + dw_act_cfg=self.dw_act_cfg) + + def forward(self, x): + x = self.conv(x) + x = self.dsconv1(x) + x = self.dsconv2(x) + return x + + +class GlobalFeatureExtractor(nn.Module): + """Global feature extractor module. + + Args: + in_channels (int): Number of input channels of the GFE module. + Default: 64 + block_channels (tuple[int]): Tuple of ints. Each int specifies the + number of output channels of each Inverted Residual module. + Default: (64, 96, 128) + out_channels(int): Number of output channels of the GFE module. + Default: 128 + expand_ratio (int): Adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + Default: 6 + num_blocks (tuple[int]): Tuple of ints. Each int specifies the + number of times each Inverted Residual module is repeated. + The repeated Inverted Residual modules are called a 'group'. + Default: (3, 3, 3) + strides (tuple[int]): Tuple of ints. Each int specifies + the downsampling factor of each 'group'. + Default: (2, 2, 1) + pool_scales (tuple[int]): Tuple of ints. Each int specifies + the parameter required in 'global average pooling' within PPM. + Default: (1, 2, 3, 6) + conv_cfg (dict | None): Config of conv layers. Default: None + norm_cfg (dict | None): Config of norm layers. Default: + dict(type='BN') + act_cfg (dict): Config of activation layers. Default: + dict(type='ReLU') + align_corners (bool): align_corners argument of F.interpolate. + Default: False + """ + + def __init__(self, + in_channels=64, + block_channels=(64, 96, 128), + out_channels=128, + expand_ratio=6, + num_blocks=(3, 3, 3), + strides=(2, 2, 1), + pool_scales=(1, 2, 3, 6), + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False): + super(GlobalFeatureExtractor, self).__init__() + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + assert len(block_channels) == len(num_blocks) == 3 + self.bottleneck1 = self._make_layer(in_channels, block_channels[0], + num_blocks[0], strides[0], + expand_ratio) + self.bottleneck2 = self._make_layer(block_channels[0], + block_channels[1], num_blocks[1], + strides[1], expand_ratio) + self.bottleneck3 = self._make_layer(block_channels[1], + block_channels[2], num_blocks[2], + strides[2], expand_ratio) + self.ppm = PPM( + pool_scales, + block_channels[2], + block_channels[2] // 4, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=align_corners) + + self.out = ConvModule( + block_channels[2] * 2, + out_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def _make_layer(self, + in_channels, + out_channels, + blocks, + stride=1, + expand_ratio=6): + layers = [ + InvertedResidual( + in_channels, + out_channels, + stride, + expand_ratio, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + ] + for i in range(1, blocks): + layers.append( + InvertedResidual( + out_channels, + out_channels, + 1, + expand_ratio, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + return nn.Sequential(*layers) + + def forward(self, x): + x = self.bottleneck1(x) + x = self.bottleneck2(x) + x = self.bottleneck3(x) + x = torch.cat([x, *self.ppm(x)], dim=1) + x = self.out(x) + return x + + +class FeatureFusionModule(nn.Module): + """Feature fusion module. + + Args: + higher_in_channels (int): Number of input channels of the + higher-resolution branch. + lower_in_channels (int): Number of input channels of the + lower-resolution branch. + out_channels (int): Number of output channels. + conv_cfg (dict | None): Config of conv layers. Default: None + norm_cfg (dict | None): Config of norm layers. Default: + dict(type='BN') + dwconv_act_cfg (dict): Config of activation layers in 3x3 conv. + Default: dict(type='ReLU'). + conv_act_cfg (dict): Config of activation layers in the two 1x1 conv. + Default: None. + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + """ + + def __init__(self, + higher_in_channels, + lower_in_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dwconv_act_cfg=dict(type='ReLU'), + conv_act_cfg=None, + align_corners=False): + super(FeatureFusionModule, self).__init__() + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.dwconv_act_cfg = dwconv_act_cfg + self.conv_act_cfg = conv_act_cfg + self.align_corners = align_corners + self.dwconv = ConvModule( + lower_in_channels, + out_channels, + 3, + padding=1, + groups=out_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.dwconv_act_cfg) + self.conv_lower_res = ConvModule( + out_channels, + out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.conv_act_cfg) + + self.conv_higher_res = ConvModule( + higher_in_channels, + out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.conv_act_cfg) + + self.relu = nn.ReLU(True) + + def forward(self, higher_res_feature, lower_res_feature): + lower_res_feature = resize( + lower_res_feature, + size=higher_res_feature.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + lower_res_feature = self.dwconv(lower_res_feature) + lower_res_feature = self.conv_lower_res(lower_res_feature) + + higher_res_feature = self.conv_higher_res(higher_res_feature) + out = higher_res_feature + lower_res_feature + return self.relu(out) + + +@BACKBONES.register_module() +class FastSCNN(BaseModule): + """Fast-SCNN Backbone. + + This backbone is the implementation of `Fast-SCNN: Fast Semantic + Segmentation Network `_. + + Args: + in_channels (int): Number of input image channels. Default: 3. + downsample_dw_channels (tuple[int]): Number of output channels after + the first conv layer & the second conv layer in + Learning-To-Downsample (LTD) module. + Default: (32, 48). + global_in_channels (int): Number of input channels of + Global Feature Extractor(GFE). + Equal to number of output channels of LTD. + Default: 64. + global_block_channels (tuple[int]): Tuple of integers that describe + the output channels for each of the MobileNet-v2 bottleneck + residual blocks in GFE. + Default: (64, 96, 128). + global_block_strides (tuple[int]): Tuple of integers + that describe the strides (downsampling factors) for each of the + MobileNet-v2 bottleneck residual blocks in GFE. + Default: (2, 2, 1). + global_out_channels (int): Number of output channels of GFE. + Default: 128. + higher_in_channels (int): Number of input channels of the higher + resolution branch in FFM. + Equal to global_in_channels. + Default: 64. + lower_in_channels (int): Number of input channels of the lower + resolution branch in FFM. + Equal to global_out_channels. + Default: 128. + fusion_out_channels (int): Number of output channels of FFM. + Default: 128. + out_indices (tuple): Tuple of indices of list + [higher_res_features, lower_res_features, fusion_output]. + Often set to (0,1,2) to enable aux. heads. + Default: (0, 1, 2). + conv_cfg (dict | None): Config of conv layers. Default: None + norm_cfg (dict | None): Config of norm layers. Default: + dict(type='BN') + act_cfg (dict): Config of activation layers. Default: + dict(type='ReLU') + align_corners (bool): align_corners argument of F.interpolate. + Default: False + dw_act_cfg (dict): In DepthwiseSeparableConvModule, activation config + of depthwise ConvModule. If it is 'default', it will be the same + as `act_cfg`. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels=3, + downsample_dw_channels=(32, 48), + global_in_channels=64, + global_block_channels=(64, 96, 128), + global_block_strides=(2, 2, 1), + global_out_channels=128, + higher_in_channels=64, + lower_in_channels=128, + fusion_out_channels=128, + out_indices=(0, 1, 2), + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False, + dw_act_cfg=None, + init_cfg=None): + + super(FastSCNN, self).__init__(init_cfg) + + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ] + + if global_in_channels != higher_in_channels: + raise AssertionError('Global Input Channels must be the same \ + with Higher Input Channels!') + elif global_out_channels != lower_in_channels: + raise AssertionError('Global Output Channels must be the same \ + with Lower Input Channels!') + + self.in_channels = in_channels + self.downsample_dw_channels1 = downsample_dw_channels[0] + self.downsample_dw_channels2 = downsample_dw_channels[1] + self.global_in_channels = global_in_channels + self.global_block_channels = global_block_channels + self.global_block_strides = global_block_strides + self.global_out_channels = global_out_channels + self.higher_in_channels = higher_in_channels + self.lower_in_channels = lower_in_channels + self.fusion_out_channels = fusion_out_channels + self.out_indices = out_indices + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.align_corners = align_corners + self.learning_to_downsample = LearningToDownsample( + in_channels, + downsample_dw_channels, + global_in_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + dw_act_cfg=dw_act_cfg) + self.global_feature_extractor = GlobalFeatureExtractor( + global_in_channels, + global_block_channels, + global_out_channels, + strides=self.global_block_strides, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=self.align_corners) + self.feature_fusion = FeatureFusionModule( + higher_in_channels, + lower_in_channels, + fusion_out_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dwconv_act_cfg=self.act_cfg, + align_corners=self.align_corners) + + def forward(self, x): + higher_res_features = self.learning_to_downsample(x) + lower_res_features = self.global_feature_extractor(higher_res_features) + fusion_output = self.feature_fusion(higher_res_features, + lower_res_features) + + outs = [higher_res_features, lower_res_features, fusion_output] + outs = [outs[i] for i in self.out_indices] + return tuple(outs) diff --git a/downstream/mmsegmentation/mmseg/models/backbones/gpvit.py b/downstream/mmsegmentation/mmseg/models/backbones/gpvit.py new file mode 100644 index 0000000..8156cc7 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/gpvit.py @@ -0,0 +1,51 @@ +from mmcls.models.backbones import GPViT + +from ..builder import BACKBONES + + + +@BACKBONES.register_module() +class GPViTSeg(GPViT): + def __init__(self, + arch='', + img_size=224, + in_channels=3, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + qkv_bias=True, + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + interpolate_mode='bicubic', + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None, + test_cfg=dict(vis_group=False), + convert_syncbn=False, + freeze_patch_embed=False, + **kwargs): + + self.att_with_cp = False + self.group_with_cp = False + + super(GPViTSeg, self).__init__( + arch, + img_size, + in_channels, + out_indices, + drop_rate, + drop_path_rate, + qkv_bias, + norm_cfg, + final_norm, + interpolate_mode, + patch_cfg, + layer_cfgs, + init_cfg, + test_cfg, + convert_syncbn, + freeze_patch_embed) + + def dummy(self): + pass + diff --git a/downstream/mmsegmentation/mmseg/models/backbones/hrnet.py b/downstream/mmsegmentation/mmseg/models/backbones/hrnet.py new file mode 100644 index 0000000..90feadc --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/hrnet.py @@ -0,0 +1,642 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.runner import BaseModule, ModuleList, Sequential +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmseg.ops import Upsample, resize +from ..builder import BACKBONES +from .resnet import BasicBlock, Bottleneck + + +class HRModule(BaseModule): + """High-Resolution Module for HRNet. + + In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange + is in this module. + """ + + def __init__(self, + num_branches, + blocks, + num_blocks, + in_channels, + num_channels, + multiscale_output=True, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + block_init_cfg=None, + init_cfg=None): + super(HRModule, self).__init__(init_cfg) + self.block_init_cfg = block_init_cfg + self._check_branches(num_branches, num_blocks, in_channels, + num_channels) + + self.in_channels = in_channels + self.num_branches = num_branches + + self.multiscale_output = multiscale_output + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + self.with_cp = with_cp + self.branches = self._make_branches(num_branches, blocks, num_blocks, + num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(inplace=False) + + def _check_branches(self, num_branches, num_blocks, in_channels, + num_channels): + """Check branches configuration.""" + if num_branches != len(num_blocks): + error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_BLOCKS(' \ + f'{len(num_blocks)})' + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_CHANNELS(' \ + f'{len(num_channels)})' + raise ValueError(error_msg) + + if num_branches != len(in_channels): + error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_INCHANNELS(' \ + f'{len(in_channels)})' + raise ValueError(error_msg) + + def _make_one_branch(self, + branch_index, + block, + num_blocks, + num_channels, + stride=1): + """Build one branch.""" + downsample = None + if stride != 1 or \ + self.in_channels[branch_index] != \ + num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + self.in_channels[branch_index], + num_channels[branch_index] * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(self.norm_cfg, num_channels[branch_index] * + block.expansion)[1]) + + layers = [] + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index], + stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + init_cfg=self.block_init_cfg)) + self.in_channels[branch_index] = \ + num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index], + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + init_cfg=self.block_init_cfg)) + + return Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + """Build multiple branch.""" + branches = [] + + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels)) + + return ModuleList(branches) + + def _make_fuse_layers(self): + """Build fuse layer.""" + if self.num_branches == 1: + return None + + num_branches = self.num_branches + in_channels = self.in_channels + fuse_layers = [] + num_out_branches = num_branches if self.multiscale_output else 1 + for i in range(num_out_branches): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, in_channels[i])[1], + # we set align_corners=False for HRNet + Upsample( + scale_factor=2**(j - i), + mode='bilinear', + align_corners=False))) + elif j == i: + fuse_layer.append(None) + else: + conv_downsamples = [] + for k in range(i - j): + if k == i - j - 1: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[i])[1])) + else: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + nn.ReLU(inplace=False))) + fuse_layer.append(nn.Sequential(*conv_downsamples)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def forward(self, x): + """Forward function.""" + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + for i in range(len(self.fuse_layers)): + y = 0 + for j in range(self.num_branches): + if i == j: + y += x[j] + elif j > i: + y = y + resize( + self.fuse_layers[i][j](x[j]), + size=x[i].shape[2:], + mode='bilinear', + align_corners=False) + else: + y += self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + return x_fuse + + +@BACKBONES.register_module() +class HRNet(BaseModule): + """HRNet backbone. + + This backbone is the implementation of `High-Resolution Representations + for Labeling Pixels and Regions `_. + + Args: + extra (dict): Detailed configuration for each stage of HRNet. + There must be 4 stages, the configuration for each stage must have + 5 keys: + + - num_modules (int): The number of HRModule in this stage. + - num_branches (int): The number of branches in the HRModule. + - block (str): The type of convolution block. + - num_blocks (tuple): The number of blocks in each branch. + The length must be equal to num_branches. + - num_channels (tuple): The number of channels in each branch. + The length must be equal to num_branches. + in_channels (int): Number of input image channels. Normally 3. + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Use `BN` by default. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: False. + multiscale_output (bool): Whether to output multi-level features + produced by multiple branches. If False, only the first level + feature will be output. Default: True. + pretrained (str, optional): Model pretrained path. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + + Example: + >>> from mmseg.models import HRNet + >>> import torch + >>> extra = dict( + >>> stage1=dict( + >>> num_modules=1, + >>> num_branches=1, + >>> block='BOTTLENECK', + >>> num_blocks=(4, ), + >>> num_channels=(64, )), + >>> stage2=dict( + >>> num_modules=1, + >>> num_branches=2, + >>> block='BASIC', + >>> num_blocks=(4, 4), + >>> num_channels=(32, 64)), + >>> stage3=dict( + >>> num_modules=4, + >>> num_branches=3, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4), + >>> num_channels=(32, 64, 128)), + >>> stage4=dict( + >>> num_modules=3, + >>> num_branches=4, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4, 4), + >>> num_channels=(32, 64, 128, 256))) + >>> self = HRNet(extra, in_channels=1) + >>> self.eval() + >>> inputs = torch.rand(1, 1, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 32, 8, 8) + (1, 64, 4, 4) + (1, 128, 2, 2) + (1, 256, 1, 1) + """ + + blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} + + def __init__(self, + extra, + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + frozen_stages=-1, + zero_init_residual=False, + multiscale_output=True, + pretrained=None, + init_cfg=None): + super(HRNet, self).__init__(init_cfg) + + self.pretrained = pretrained + self.zero_init_residual = zero_init_residual + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + else: + raise TypeError('pretrained must be a str or None') + + # Assert configurations of 4 stages are in extra + assert 'stage1' in extra and 'stage2' in extra \ + and 'stage3' in extra and 'stage4' in extra + # Assert whether the length of `num_blocks` and `num_channels` are + # equal to `num_branches` + for i in range(4): + cfg = extra[f'stage{i + 1}'] + assert len(cfg['num_blocks']) == cfg['num_branches'] and \ + len(cfg['num_channels']) == cfg['num_branches'] + + self.extra = extra + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + self.frozen_stages = frozen_stages + + # stem net + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) + + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + 64, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.relu = nn.ReLU(inplace=True) + + # stage 1 + self.stage1_cfg = self.extra['stage1'] + num_channels = self.stage1_cfg['num_channels'][0] + block_type = self.stage1_cfg['block'] + num_blocks = self.stage1_cfg['num_blocks'][0] + + block = self.blocks_dict[block_type] + stage1_out_channels = num_channels * block.expansion + self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) + + # stage 2 + self.stage2_cfg = self.extra['stage2'] + num_channels = self.stage2_cfg['num_channels'] + block_type = self.stage2_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [channel * block.expansion for channel in num_channels] + self.transition1 = self._make_transition_layer([stage1_out_channels], + num_channels) + self.stage2, pre_stage_channels = self._make_stage( + self.stage2_cfg, num_channels) + + # stage 3 + self.stage3_cfg = self.extra['stage3'] + num_channels = self.stage3_cfg['num_channels'] + block_type = self.stage3_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [channel * block.expansion for channel in num_channels] + self.transition2 = self._make_transition_layer(pre_stage_channels, + num_channels) + self.stage3, pre_stage_channels = self._make_stage( + self.stage3_cfg, num_channels) + + # stage 4 + self.stage4_cfg = self.extra['stage4'] + num_channels = self.stage4_cfg['num_channels'] + block_type = self.stage4_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [channel * block.expansion for channel in num_channels] + self.transition3 = self._make_transition_layer(pre_stage_channels, + num_channels) + self.stage4, pre_stage_channels = self._make_stage( + self.stage4_cfg, num_channels, multiscale_output=multiscale_output) + + self._freeze_stages() + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + def _make_transition_layer(self, num_channels_pre_layer, + num_channels_cur_layer): + """Make transition layer.""" + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_cur_layer[i], + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_cur_layer[i])[1], + nn.ReLU(inplace=True))) + else: + transition_layers.append(None) + else: + conv_downsamples = [] + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] \ + if j == i - num_branches_pre else in_channels + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1], + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv_downsamples)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1): + """Make each layer.""" + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(self.norm_cfg, planes * block.expansion)[1]) + + layers = [] + block_init_cfg = None + if self.pretrained is None and not hasattr( + self, 'init_cfg') and self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm3')) + + layers.append( + block( + inplanes, + planes, + stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + init_cfg=block_init_cfg)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + inplanes, + planes, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + init_cfg=block_init_cfg)) + + return Sequential(*layers) + + def _make_stage(self, layer_config, in_channels, multiscale_output=True): + """Make each stage.""" + num_modules = layer_config['num_modules'] + num_branches = layer_config['num_branches'] + num_blocks = layer_config['num_blocks'] + num_channels = layer_config['num_channels'] + block = self.blocks_dict[layer_config['block']] + + hr_modules = [] + block_init_cfg = None + if self.pretrained is None and not hasattr( + self, 'init_cfg') and self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm3')) + + for i in range(num_modules): + # multi_scale_output is only used for the last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + hr_modules.append( + HRModule( + num_branches, + block, + num_blocks, + in_channels, + num_channels, + reset_multiscale_output, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + block_init_cfg=block_init_cfg)) + + return Sequential(*hr_modules), in_channels + + def _freeze_stages(self): + """Freeze stages param and norm stats.""" + if self.frozen_stages >= 0: + + self.norm1.eval() + self.norm2.eval() + for m in [self.conv1, self.norm1, self.conv2, self.norm2]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + if i == 1: + m = getattr(self, f'layer{i}') + t = getattr(self, f'transition{i}') + elif i == 4: + m = getattr(self, f'stage{i}') + else: + m = getattr(self, f'stage{i}') + t = getattr(self, f'transition{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + t.eval() + for param in t.parameters(): + param.requires_grad = False + + def forward(self, x): + """Forward function.""" + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg['num_branches']): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg['num_branches']): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg['num_branches']): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + return y_list + + def train(self, mode=True): + """Convert the model into training mode will keeping the normalization + layer freezed.""" + super(HRNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/downstream/mmsegmentation/mmseg/models/backbones/icnet.py b/downstream/mmsegmentation/mmseg/models/backbones/icnet.py new file mode 100644 index 0000000..6faaeab --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/icnet.py @@ -0,0 +1,166 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule + +from mmseg.ops import resize +from ..builder import BACKBONES, build_backbone +from ..decode_heads.psp_head import PPM + + +@BACKBONES.register_module() +class ICNet(BaseModule): + """ICNet for Real-Time Semantic Segmentation on High-Resolution Images. + + This backbone is the implementation of + `ICNet `_. + + Args: + backbone_cfg (dict): Config dict to build backbone. Usually it is + ResNet but it can also be other backbones. + in_channels (int): The number of input image channels. Default: 3. + layer_channels (Sequence[int]): The numbers of feature channels at + layer 2 and layer 4 in ResNet. It can also be other backbones. + Default: (512, 2048). + light_branch_middle_channels (int): The number of channels of the + middle layer in light branch. Default: 32. + psp_out_channels (int): The number of channels of the output of PSP + module. Default: 512. + out_channels (Sequence[int]): The numbers of output feature channels + at each branches. Default: (64, 256, 256). + pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module. Default: (1, 2, 3, 6). + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN'). + act_cfg (dict): Dictionary to construct and config act layer. + Default: dict(type='ReLU'). + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + backbone_cfg, + in_channels=3, + layer_channels=(512, 2048), + light_branch_middle_channels=32, + psp_out_channels=512, + out_channels=(64, 256, 256), + pool_scales=(1, 2, 3, 6), + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU'), + align_corners=False, + init_cfg=None): + if backbone_cfg is None: + raise TypeError('backbone_cfg must be passed from config file!') + if init_cfg is None: + init_cfg = [ + dict(type='Kaiming', mode='fan_out', layer='Conv2d'), + dict(type='Constant', val=1, layer='_BatchNorm'), + dict(type='Normal', mean=0.01, layer='Linear') + ] + super(ICNet, self).__init__(init_cfg=init_cfg) + self.align_corners = align_corners + self.backbone = build_backbone(backbone_cfg) + + # Note: Default `ceil_mode` is false in nn.MaxPool2d, set + # `ceil_mode=True` to keep information in the corner of feature map. + self.backbone.maxpool = nn.MaxPool2d( + kernel_size=3, stride=2, padding=1, ceil_mode=True) + + self.psp_modules = PPM( + pool_scales=pool_scales, + in_channels=layer_channels[1], + channels=psp_out_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + align_corners=align_corners) + + self.psp_bottleneck = ConvModule( + layer_channels[1] + len(pool_scales) * psp_out_channels, + psp_out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.conv_sub1 = nn.Sequential( + ConvModule( + in_channels=in_channels, + out_channels=light_branch_middle_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + ConvModule( + in_channels=light_branch_middle_channels, + out_channels=light_branch_middle_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg), + ConvModule( + in_channels=light_branch_middle_channels, + out_channels=out_channels[0], + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + + self.conv_sub2 = ConvModule( + layer_channels[0], + out_channels[1], + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + + self.conv_sub4 = ConvModule( + psp_out_channels, + out_channels[2], + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + + def forward(self, x): + output = [] + + # sub 1 + output.append(self.conv_sub1(x)) + + # sub 2 + x = resize( + x, + scale_factor=0.5, + mode='bilinear', + align_corners=self.align_corners) + x = self.backbone.stem(x) + x = self.backbone.maxpool(x) + x = self.backbone.layer1(x) + x = self.backbone.layer2(x) + output.append(self.conv_sub2(x)) + + # sub 4 + x = resize( + x, + scale_factor=0.5, + mode='bilinear', + align_corners=self.align_corners) + x = self.backbone.layer3(x) + x = self.backbone.layer4(x) + psp_outs = self.psp_modules(x) + [x] + psp_outs = torch.cat(psp_outs, dim=1) + x = self.psp_bottleneck(psp_outs) + + output.append(self.conv_sub4(x)) + + return output diff --git a/downstream/mmsegmentation/mmseg/models/backbones/mit.py b/downstream/mmsegmentation/mmseg/models/backbones/mit.py new file mode 100644 index 0000000..4417cf1 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/mit.py @@ -0,0 +1,450 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import Conv2d, build_activation_layer, build_norm_layer +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.transformer import MultiheadAttention +from mmcv.cnn.utils.weight_init import (constant_init, normal_init, + trunc_normal_init) +from mmcv.runner import BaseModule, ModuleList, Sequential + +from ..builder import BACKBONES +from ..utils import PatchEmbed, nchw_to_nlc, nlc_to_nchw + + +class MixFFN(BaseModule): + """An implementation of MixFFN of Segformer. + + The differences between MixFFN & FFN: + 1. Use 1X1 Conv to replace Linear layer. + 2. Introduce 3X3 Conv to encode positional information. + Args: + embed_dims (int): The feature dimension. Same as + `MultiheadAttention`. Defaults: 256. + feedforward_channels (int): The hidden dimension of FFNs. + Defaults: 1024. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='ReLU') + ffn_drop (float, optional): Probability of an element to be + zeroed in FFN. Default 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + feedforward_channels, + act_cfg=dict(type='GELU'), + ffn_drop=0., + dropout_layer=None, + init_cfg=None): + super(MixFFN, self).__init__(init_cfg) + + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.act_cfg = act_cfg + self.activate = build_activation_layer(act_cfg) + + in_channels = embed_dims + fc1 = Conv2d( + in_channels=in_channels, + out_channels=feedforward_channels, + kernel_size=1, + stride=1, + bias=True) + # 3x3 depth wise conv to provide positional encode information + pe_conv = Conv2d( + in_channels=feedforward_channels, + out_channels=feedforward_channels, + kernel_size=3, + stride=1, + padding=(3 - 1) // 2, + bias=True, + groups=feedforward_channels) + fc2 = Conv2d( + in_channels=feedforward_channels, + out_channels=in_channels, + kernel_size=1, + stride=1, + bias=True) + drop = nn.Dropout(ffn_drop) + layers = [fc1, pe_conv, self.activate, drop, fc2, drop] + self.layers = Sequential(*layers) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else torch.nn.Identity() + + def forward(self, x, hw_shape, identity=None): + out = nlc_to_nchw(x, hw_shape) + out = self.layers(out) + out = nchw_to_nlc(out) + if identity is None: + identity = x + return identity + self.dropout_layer(out) + + +class EfficientMultiheadAttention(MultiheadAttention): + """An implementation of Efficient Multi-head Attention of Segformer. + + This module is modified from MultiheadAttention which is a module from + mmcv.cnn.bricks.transformer. + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. Default: None. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default: False. + qkv_bias (bool): enable bias for qkv if True. Default True. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (int): The ratio of spatial reduction of Efficient Multi-head + Attention of Segformer. Default: 1. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=None, + init_cfg=None, + batch_first=True, + qkv_bias=False, + norm_cfg=dict(type='LN'), + sr_ratio=1): + super().__init__( + embed_dims, + num_heads, + attn_drop, + proj_drop, + dropout_layer=dropout_layer, + init_cfg=init_cfg, + batch_first=batch_first, + bias=qkv_bias) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = Conv2d( + in_channels=embed_dims, + out_channels=embed_dims, + kernel_size=sr_ratio, + stride=sr_ratio) + # The ret[0] of build_norm_layer is norm name. + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + + # handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa + from mmseg import digit_version, mmcv_version + if mmcv_version < digit_version('1.3.17'): + warnings.warn('The legacy version of forward function in' + 'EfficientMultiheadAttention is deprecated in' + 'mmcv>=1.3.17 and will no longer support in the' + 'future. Please upgrade your mmcv.') + self.forward = self.legacy_forward + + def forward(self, x, hw_shape, identity=None): + + x_q = x + if self.sr_ratio > 1: + x_kv = nlc_to_nchw(x, hw_shape) + x_kv = self.sr(x_kv) + x_kv = nchw_to_nlc(x_kv) + x_kv = self.norm(x_kv) + else: + x_kv = x + + if identity is None: + identity = x_q + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + x_q = x_q.transpose(0, 1) + x_kv = x_kv.transpose(0, 1) + + out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] + + if self.batch_first: + out = out.transpose(0, 1) + + return identity + self.dropout_layer(self.proj_drop(out)) + + def legacy_forward(self, x, hw_shape, identity=None): + """multi head attention forward in mmcv version < 1.3.17.""" + + x_q = x + if self.sr_ratio > 1: + x_kv = nlc_to_nchw(x, hw_shape) + x_kv = self.sr(x_kv) + x_kv = nchw_to_nlc(x_kv) + x_kv = self.norm(x_kv) + else: + x_kv = x + + if identity is None: + identity = x_q + + # `need_weights=True` will let nn.MultiHeadAttention + # `return attn_output, attn_output_weights.sum(dim=1) / num_heads` + # The `attn_output_weights.sum(dim=1)` may cause cuda error. So, we set + # `need_weights=False` to ignore `attn_output_weights.sum(dim=1)`. + # This issue - `https://github.com/pytorch/pytorch/issues/37583` report + # the error that large scale tensor sum operation may cause cuda error. + out = self.attn(query=x_q, key=x_kv, value=x_kv, need_weights=False)[0] + + return identity + self.dropout_layer(self.proj_drop(out)) + + +class TransformerEncoderLayer(BaseModule): + """Implements one encoder layer in Segformer. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed. + after the feed forward layer. Default 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0. + drop_path_rate (float): stochastic depth rate. Default 0.0. + qkv_bias (bool): enable bias for qkv if True. + Default: True. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default: False. + init_cfg (dict, optional): Initialization config dict. + Default:None. + sr_ratio (int): The ratio of spatial reduction of Efficient Multi-head + Attention of Segformer. Default: 1. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + batch_first=True, + sr_ratio=1, + with_cp=False): + super(TransformerEncoderLayer, self).__init__() + + # The ret[0] of build_norm_layer is norm name. + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + + self.attn = EfficientMultiheadAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + batch_first=batch_first, + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + sr_ratio=sr_ratio) + + # The ret[0] of build_norm_layer is norm name. + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + + self.ffn = MixFFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + self.with_cp = with_cp + + def forward(self, x, hw_shape): + + def _inner_forward(x): + x = self.attn(self.norm1(x), hw_shape, identity=x) + x = self.ffn(self.norm2(x), hw_shape, identity=x) + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + return x + + +@BACKBONES.register_module() +class MixVisionTransformer(BaseModule): + """The backbone of Segformer. + + This backbone is the implementation of `SegFormer: Simple and + Efficient Design for Semantic Segmentation with + Transformers `_. + Args: + in_channels (int): Number of input channels. Default: 3. + embed_dims (int): Embedding dimension. Default: 768. + num_stags (int): The num of stages. Default: 4. + num_layers (Sequence[int]): The layer number of each transformer encode + layer. Default: [3, 4, 6, 3]. + num_heads (Sequence[int]): The attention heads of each transformer + encode layer. Default: [1, 2, 4, 8]. + patch_sizes (Sequence[int]): The patch_size of each overlapped patch + embedding. Default: [7, 3, 3, 3]. + strides (Sequence[int]): The stride of each overlapped patch embedding. + Default: [4, 2, 2, 2]. + sr_ratios (Sequence[int]): The spatial reduction rate of each + transformer encode layer. Default: [8, 4, 2, 1]. + out_indices (Sequence[int] | int): Output from which stages. + Default: (0, 1, 2, 3). + mlp_ratio (int): ratio of mlp hidden dim to embedding dim. + Default: 4. + qkv_bias (bool): Enable bias for qkv if True. Default: True. + drop_rate (float): Probability of an element to be zeroed. + Default 0.0 + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0 + drop_path_rate (float): stochastic depth rate. Default 0.0 + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + pretrained (str, optional): model pretrained path. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + in_channels=3, + embed_dims=64, + num_stages=4, + num_layers=[3, 4, 6, 3], + num_heads=[1, 2, 4, 8], + patch_sizes=[7, 3, 3, 3], + strides=[4, 2, 2, 2], + sr_ratios=[8, 4, 2, 1], + out_indices=(0, 1, 2, 3), + mlp_ratio=4, + qkv_bias=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN', eps=1e-6), + pretrained=None, + init_cfg=None, + with_cp=False): + super(MixVisionTransformer, self).__init__(init_cfg=init_cfg) + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be set at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is not None: + raise TypeError('pretrained must be a str or None') + + self.embed_dims = embed_dims + self.num_stages = num_stages + self.num_layers = num_layers + self.num_heads = num_heads + self.patch_sizes = patch_sizes + self.strides = strides + self.sr_ratios = sr_ratios + self.with_cp = with_cp + assert num_stages == len(num_layers) == len(num_heads) \ + == len(patch_sizes) == len(strides) == len(sr_ratios) + + self.out_indices = out_indices + assert max(out_indices) < self.num_stages + + # transformer encoder + dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, sum(num_layers)) + ] # stochastic num_layer decay rule + + cur = 0 + self.layers = ModuleList() + for i, num_layer in enumerate(num_layers): + embed_dims_i = embed_dims * num_heads[i] + patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims_i, + kernel_size=patch_sizes[i], + stride=strides[i], + padding=patch_sizes[i] // 2, + norm_cfg=norm_cfg) + layer = ModuleList([ + TransformerEncoderLayer( + embed_dims=embed_dims_i, + num_heads=num_heads[i], + feedforward_channels=mlp_ratio * embed_dims_i, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[cur + idx], + qkv_bias=qkv_bias, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp, + sr_ratio=sr_ratios[i]) for idx in range(num_layer) + ]) + in_channels = embed_dims_i + # The ret[0] of build_norm_layer is norm name. + norm = build_norm_layer(norm_cfg, embed_dims_i)[1] + self.layers.append(ModuleList([patch_embed, layer, norm])) + cur += num_layer + + def init_weights(self): + if self.init_cfg is None: + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=.02, bias=0.) + elif isinstance(m, nn.LayerNorm): + constant_init(m, val=1.0, bias=0.) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[ + 1] * m.out_channels + fan_out //= m.groups + normal_init( + m, mean=0, std=math.sqrt(2.0 / fan_out), bias=0) + else: + super(MixVisionTransformer, self).init_weights() + + def forward(self, x): + outs = [] + + for i, layer in enumerate(self.layers): + x, hw_shape = layer[0](x) + for block in layer[1]: + x = block(x, hw_shape) + x = layer[2](x) + x = nlc_to_nchw(x, hw_shape) + if i in self.out_indices: + outs.append(x) + + return outs diff --git a/downstream/mmsegmentation/mmseg/models/backbones/mobilenet_v2.py b/downstream/mmsegmentation/mmseg/models/backbones/mobilenet_v2.py new file mode 100644 index 0000000..cbb9c6c --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/mobilenet_v2.py @@ -0,0 +1,197 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES +from ..utils import InvertedResidual, make_divisible + + +@BACKBONES.register_module() +class MobileNetV2(BaseModule): + """MobileNetV2 backbone. + + This backbone is the implementation of + `MobileNetV2: Inverted Residuals and Linear Bottlenecks + `_. + + Args: + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Default: 1.0. + strides (Sequence[int], optional): Strides of the first block of each + layer. If not specified, default config in ``arch_setting`` will + be used. + dilations (Sequence[int]): Dilation of each layer. + out_indices (None or Sequence[int]): Output from which stages. + Default: (7, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + # Parameters to build layers. 3 parameters are needed to construct a + # layer, from left to right: expand_ratio, channel, num_blocks. + arch_settings = [[1, 16, 1], [6, 24, 2], [6, 32, 3], [6, 64, 4], + [6, 96, 3], [6, 160, 3], [6, 320, 1]] + + def __init__(self, + widen_factor=1., + strides=(1, 2, 2, 2, 1, 2, 1), + dilations=(1, 1, 1, 1, 1, 1, 1), + out_indices=(1, 2, 4, 6), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + norm_eval=False, + with_cp=False, + pretrained=None, + init_cfg=None): + super(MobileNetV2, self).__init__(init_cfg) + + self.pretrained = pretrained + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + else: + raise TypeError('pretrained must be a str or None') + + self.widen_factor = widen_factor + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == len(self.arch_settings) + self.out_indices = out_indices + for index in out_indices: + if index not in range(0, 7): + raise ValueError('the item in out_indices must in ' + f'range(0, 7). But received {index}') + + if frozen_stages not in range(-1, 7): + raise ValueError('frozen_stages must be in range(-1, 7). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.in_channels = make_divisible(32 * widen_factor, 8) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.layers = [] + + for i, layer_cfg in enumerate(self.arch_settings): + expand_ratio, channel, num_blocks = layer_cfg + stride = self.strides[i] + dilation = self.dilations[i] + out_channels = make_divisible(channel * widen_factor, 8) + inverted_res_layer = self.make_layer( + out_channels=out_channels, + num_blocks=num_blocks, + stride=stride, + dilation=dilation, + expand_ratio=expand_ratio) + layer_name = f'layer{i + 1}' + self.add_module(layer_name, inverted_res_layer) + self.layers.append(layer_name) + + def make_layer(self, out_channels, num_blocks, stride, dilation, + expand_ratio): + """Stack InvertedResidual blocks to build a layer for MobileNetV2. + + Args: + out_channels (int): out_channels of block. + num_blocks (int): Number of blocks. + stride (int): Stride of the first block. + dilation (int): Dilation of the first block. + expand_ratio (int): Expand the number of channels of the + hidden layer in InvertedResidual by this ratio. + """ + layers = [] + for i in range(num_blocks): + layers.append( + InvertedResidual( + self.in_channels, + out_channels, + stride if i == 0 else 1, + expand_ratio=expand_ratio, + dilation=dilation if i == 0 else 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + + if len(outs) == 1: + return outs[0] + else: + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(MobileNetV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/downstream/mmsegmentation/mmseg/models/backbones/mobilenet_v3.py b/downstream/mmsegmentation/mmseg/models/backbones/mobilenet_v3.py new file mode 100644 index 0000000..dd3d6eb --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/mobilenet_v3.py @@ -0,0 +1,267 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import mmcv +from mmcv.cnn import ConvModule +from mmcv.cnn.bricks import Conv2dAdaptivePadding +from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES +from ..utils import InvertedResidualV3 as InvertedResidual + + +@BACKBONES.register_module() +class MobileNetV3(BaseModule): + """MobileNetV3 backbone. + + This backbone is the improved implementation of `Searching for MobileNetV3 + `_. + + Args: + arch (str): Architecture of mobilnetv3, from {'small', 'large'}. + Default: 'small'. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + out_indices (tuple[int]): Output from which layer. + Default: (0, 1, 12). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. + Default: False. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + # Parameters to build each block: + # [kernel size, mid channels, out channels, with_se, act type, stride] + arch_settings = { + 'small': [[3, 16, 16, True, 'ReLU', 2], # block0 layer1 os=4 + [3, 72, 24, False, 'ReLU', 2], # block1 layer2 os=8 + [3, 88, 24, False, 'ReLU', 1], + [5, 96, 40, True, 'HSwish', 2], # block2 layer4 os=16 + [5, 240, 40, True, 'HSwish', 1], + [5, 240, 40, True, 'HSwish', 1], + [5, 120, 48, True, 'HSwish', 1], # block3 layer7 os=16 + [5, 144, 48, True, 'HSwish', 1], + [5, 288, 96, True, 'HSwish', 2], # block4 layer9 os=32 + [5, 576, 96, True, 'HSwish', 1], + [5, 576, 96, True, 'HSwish', 1]], + 'large': [[3, 16, 16, False, 'ReLU', 1], # block0 layer1 os=2 + [3, 64, 24, False, 'ReLU', 2], # block1 layer2 os=4 + [3, 72, 24, False, 'ReLU', 1], + [5, 72, 40, True, 'ReLU', 2], # block2 layer4 os=8 + [5, 120, 40, True, 'ReLU', 1], + [5, 120, 40, True, 'ReLU', 1], + [3, 240, 80, False, 'HSwish', 2], # block3 layer7 os=16 + [3, 200, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 480, 112, True, 'HSwish', 1], # block4 layer11 os=16 + [3, 672, 112, True, 'HSwish', 1], + [5, 672, 160, True, 'HSwish', 2], # block5 layer13 os=32 + [5, 960, 160, True, 'HSwish', 1], + [5, 960, 160, True, 'HSwish', 1]] + } # yapf: disable + + def __init__(self, + arch='small', + conv_cfg=None, + norm_cfg=dict(type='BN'), + out_indices=(0, 1, 12), + frozen_stages=-1, + reduction_factor=1, + norm_eval=False, + with_cp=False, + pretrained=None, + init_cfg=None): + super(MobileNetV3, self).__init__(init_cfg) + + self.pretrained = pretrained + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + else: + raise TypeError('pretrained must be a str or None') + + assert arch in self.arch_settings + assert isinstance(reduction_factor, int) and reduction_factor > 0 + assert mmcv.is_tuple_of(out_indices, int) + for index in out_indices: + if index not in range(0, len(self.arch_settings[arch]) + 2): + raise ValueError( + 'the item in out_indices must in ' + f'range(0, {len(self.arch_settings[arch])+2}). ' + f'But received {index}') + + if frozen_stages not in range(-1, len(self.arch_settings[arch]) + 2): + raise ValueError('frozen_stages must be in range(-1, ' + f'{len(self.arch_settings[arch])+2}). ' + f'But received {frozen_stages}') + self.arch = arch + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.reduction_factor = reduction_factor + self.norm_eval = norm_eval + self.with_cp = with_cp + self.layers = self._make_layer() + + def _make_layer(self): + layers = [] + + # build the first layer (layer0) + in_channels = 16 + layer = ConvModule( + in_channels=3, + out_channels=in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=dict(type='Conv2dAdaptivePadding'), + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + self.add_module('layer0', layer) + layers.append('layer0') + + layer_setting = self.arch_settings[self.arch] + for i, params in enumerate(layer_setting): + (kernel_size, mid_channels, out_channels, with_se, act, + stride) = params + + if self.arch == 'large' and i >= 12 or self.arch == 'small' and \ + i >= 8: + mid_channels = mid_channels // self.reduction_factor + out_channels = out_channels // self.reduction_factor + + if with_se: + se_cfg = dict( + channels=mid_channels, + ratio=4, + act_cfg=(dict(type='ReLU'), + dict(type='HSigmoid', bias=3.0, divisor=6.0))) + else: + se_cfg = None + + layer = InvertedResidual( + in_channels=in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + se_cfg=se_cfg, + with_expand_conv=(in_channels != mid_channels), + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type=act), + with_cp=self.with_cp) + in_channels = out_channels + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, layer) + layers.append(layer_name) + + # build the last layer + # block5 layer12 os=32 for small model + # block6 layer16 os=32 for large model + layer = ConvModule( + in_channels=in_channels, + out_channels=576 if self.arch == 'small' else 960, + kernel_size=1, + stride=1, + dilation=4, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + layer_name = 'layer{}'.format(len(layer_setting) + 1) + self.add_module(layer_name, layer) + layers.append(layer_name) + + # next, convert backbone MobileNetV3 to a semantic segmentation version + if self.arch == 'small': + self.layer4.depthwise_conv.conv.stride = (1, 1) + self.layer9.depthwise_conv.conv.stride = (1, 1) + for i in range(4, len(layers)): + layer = getattr(self, layers[i]) + if isinstance(layer, InvertedResidual): + modified_module = layer.depthwise_conv.conv + else: + modified_module = layer.conv + + if i < 9: + modified_module.dilation = (2, 2) + pad = 2 + else: + modified_module.dilation = (4, 4) + pad = 4 + + if not isinstance(modified_module, Conv2dAdaptivePadding): + # Adjust padding + pad *= (modified_module.kernel_size[0] - 1) // 2 + modified_module.padding = (pad, pad) + else: + self.layer7.depthwise_conv.conv.stride = (1, 1) + self.layer13.depthwise_conv.conv.stride = (1, 1) + for i in range(7, len(layers)): + layer = getattr(self, layers[i]) + if isinstance(layer, InvertedResidual): + modified_module = layer.depthwise_conv.conv + else: + modified_module = layer.conv + + if i < 13: + modified_module.dilation = (2, 2) + pad = 2 + else: + modified_module.dilation = (4, 4) + pad = 4 + + if not isinstance(modified_module, Conv2dAdaptivePadding): + # Adjust padding + pad *= (modified_module.kernel_size[0] - 1) // 2 + modified_module.padding = (pad, pad) + + return layers + + def forward(self, x): + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + return outs + + def _freeze_stages(self): + for i in range(self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(MobileNetV3, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/downstream/mmsegmentation/mmseg/models/backbones/resnest.py b/downstream/mmsegmentation/mmseg/models/backbones/resnest.py new file mode 100644 index 0000000..91952c2 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/resnest.py @@ -0,0 +1,318 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from ..utils import ResLayer +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNetV1d + + +class RSoftmax(nn.Module): + """Radix Softmax module in ``SplitAttentionConv2d``. + + Args: + radix (int): Radix of input. + groups (int): Groups of input. + """ + + def __init__(self, radix, groups): + super().__init__() + self.radix = radix + self.groups = groups + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttentionConv2d(nn.Module): + """Split-Attention Conv2d in ResNeSt. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int | tuple[int]): Same as nn.Conv2d. + stride (int | tuple[int]): Same as nn.Conv2d. + padding (int | tuple[int]): Same as nn.Conv2d. + dilation (int | tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of inter_channels. Default: 4. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. Default: None. + dcn (dict): Config dict for DCN. Default: None. + """ + + def __init__(self, + in_channels, + channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + radix=2, + reduction_factor=4, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None): + super(SplitAttentionConv2d, self).__init__() + inter_channels = max(in_channels * radix // reduction_factor, 32) + self.radix = radix + self.groups = groups + self.channels = channels + self.with_dcn = dcn is not None + self.dcn = dcn + fallback_on_stride = False + if self.with_dcn: + fallback_on_stride = self.dcn.pop('fallback_on_stride', False) + if self.with_dcn and not fallback_on_stride: + assert conv_cfg is None, 'conv_cfg must be None for DCN' + conv_cfg = dcn + self.conv = build_conv_layer( + conv_cfg, + in_channels, + channels * radix, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups * radix, + bias=False) + self.norm0_name, norm0 = build_norm_layer( + norm_cfg, channels * radix, postfix=0) + self.add_module(self.norm0_name, norm0) + self.relu = nn.ReLU(inplace=True) + self.fc1 = build_conv_layer( + None, channels, inter_channels, 1, groups=self.groups) + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, inter_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.fc2 = build_conv_layer( + None, inter_channels, channels * radix, 1, groups=self.groups) + self.rsoftmax = RSoftmax(radix, groups) + + @property + def norm0(self): + """nn.Module: the normalization layer named "norm0" """ + return getattr(self, self.norm0_name) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def forward(self, x): + x = self.conv(x) + x = self.norm0(x) + x = self.relu(x) + + batch, rchannel = x.shape[:2] + batch = x.size(0) + if self.radix > 1: + splits = x.view(batch, self.radix, -1, *x.shape[2:]) + gap = splits.sum(dim=1) + else: + gap = x + gap = F.adaptive_avg_pool2d(gap, 1) + gap = self.fc1(gap) + + gap = self.norm1(gap) + gap = self.relu(gap) + + atten = self.fc2(gap) + atten = self.rsoftmax(atten).view(batch, -1, 1, 1) + + if self.radix > 1: + attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) + out = torch.sum(attens * splits, dim=1) + else: + out = atten * x + return out.contiguous() + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeSt. + + Args: + inplane (int): Input planes of this block. + planes (int): Middle planes of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of inter_channels in + SplitAttentionConv2d. Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + kwargs (dict): Key word arguments for base class. + """ + expansion = 4 + + def __init__(self, + inplanes, + planes, + groups=1, + base_width=4, + base_channels=64, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + """Bottleneck block for ResNeSt.""" + super(Bottleneck, self).__init__(inplanes, planes, **kwargs) + + if groups == 1: + width = self.planes + else: + width = math.floor(self.planes * + (base_width / base_channels)) * groups + + self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width, postfix=1) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.inplanes, + width, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.with_modulated_dcn = False + self.conv2 = SplitAttentionConv2d( + width, + width, + kernel_size=3, + stride=1 if self.avg_down_stride else self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + radix=radix, + reduction_factor=reduction_factor, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=self.dcn) + delattr(self, self.norm2_name) + + if self.avg_down_stride: + self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) + + self.conv3 = build_conv_layer( + self.conv_cfg, + width, + self.planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv1_plugin_names) + + out = self.conv2(out) + + if self.avg_down_stride: + out = self.avd_layer(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv2_plugin_names) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv3_plugin_names) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@BACKBONES.register_module() +class ResNeSt(ResNetV1d): + """ResNeSt backbone. + + This backbone is the implementation of `ResNeSt: + Split-Attention Networks `_. + + Args: + groups (int): Number of groups of Bottleneck. Default: 1 + base_width (int): Base width of Bottleneck. Default: 4 + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of inter_channels in + SplitAttentionConv2d. Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + kwargs (dict): Keyword arguments for ResNet. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)), + 200: (Bottleneck, (3, 24, 36, 3)) + } + + def __init__(self, + groups=1, + base_width=4, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + self.groups = groups + self.base_width = base_width + self.radix = radix + self.reduction_factor = reduction_factor + self.avg_down_stride = avg_down_stride + super(ResNeSt, self).__init__(**kwargs) + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``.""" + return ResLayer( + groups=self.groups, + base_width=self.base_width, + base_channels=self.base_channels, + radix=self.radix, + reduction_factor=self.reduction_factor, + avg_down_stride=self.avg_down_stride, + **kwargs) diff --git a/downstream/mmsegmentation/mmseg/models/backbones/resnet.py b/downstream/mmsegmentation/mmseg/models/backbones/resnet.py new file mode 100644 index 0000000..e8b961d --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/resnet.py @@ -0,0 +1,714 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer +from mmcv.runner import BaseModule +from mmcv.utils.parrots_wrapper import _BatchNorm + +from ..builder import BACKBONES +from ..utils import ResLayer + + +class BasicBlock(BaseModule): + """Basic block for ResNet.""" + + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_cfg=None): + super(BasicBlock, self).__init__(init_cfg) + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, planes, planes, 3, padding=1, bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.with_cp = with_cp + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Bottleneck(BaseModule): + """Bottleneck block for ResNet. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is + "caffe", the stride-two layer is the first 1x1 conv layer. + """ + + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_cfg=None): + super(Bottleneck, self).__init__(init_cfg) + assert style in ['pytorch', 'caffe'] + assert dcn is None or isinstance(dcn, dict) + assert plugins is None or isinstance(plugins, list) + if plugins is not None: + allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] + assert all(p['position'] in allowed_position for p in plugins) + + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.dcn = dcn + self.with_dcn = dcn is not None + self.plugins = plugins + self.with_plugins = plugins is not None + + if self.with_plugins: + # collect plugins for conv1/conv2/conv3 + self.after_conv1_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv1' + ] + self.after_conv2_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv2' + ] + self.after_conv3_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv3' + ] + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + if self.with_dcn: + fallback_on_stride = dcn.pop('fallback_on_stride', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + conv_cfg, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + self.conv2 = build_conv_layer( + dcn, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + planes, + planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + if self.with_plugins: + self.after_conv1_plugin_names = self.make_block_plugins( + planes, self.after_conv1_plugins) + self.after_conv2_plugin_names = self.make_block_plugins( + planes, self.after_conv2_plugins) + self.after_conv3_plugin_names = self.make_block_plugins( + planes * self.expansion, self.after_conv3_plugins) + + def make_block_plugins(self, in_channels, plugins): + """make plugins for block. + + Args: + in_channels (int): Input channels of plugin. + plugins (list[dict]): List of plugins cfg to build. + + Returns: + list[str]: List of the names of plugin. + """ + assert isinstance(plugins, list) + plugin_names = [] + for plugin in plugins: + plugin = plugin.copy() + name, layer = build_plugin_layer( + plugin, + in_channels=in_channels, + postfix=plugin.pop('postfix', '')) + assert not hasattr(self, name), f'duplicate plugin {name}' + self.add_module(name, layer) + plugin_names.append(name) + return plugin_names + + def forward_plugin(self, x, plugin_names): + """Forward function for plugins.""" + out = x + for name in plugin_names: + out = getattr(self, name)(x) + return out + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + @property + def norm3(self): + """nn.Module: normalization layer after the third convolution layer""" + return getattr(self, self.norm3_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv1_plugin_names) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv2_plugin_names) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv3_plugin_names) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@BACKBONES.register_module() +class ResNet(BaseModule): + """ResNet backbone. + + This backbone is the improved implementation of `Deep Residual Learning + for Image Recognition `_. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Number of stem channels. Default: 64. + base_channels (int): Number of base channels of res layer. Default: 64. + num_stages (int): Resnet stages, normally 4. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: (1, 2, 2, 2). + dilations (Sequence[int]): Dilation of each stage. + Default: (1, 1, 1, 1). + out_indices (Sequence[int]): Output from which stages. + Default: (0, 1, 2, 3). + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. Default: 'pytorch'. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): Dictionary to construct and config conv layer. + When conv_cfg is None, cfg will be set to dict(type='Conv2d'). + Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + dcn (dict | None): Dictionary to construct and config DCN conv layer. + When dcn is not None, conv_cfg must be None. Default: None. + stage_with_dcn (Sequence[bool]): Whether to set DCN conv for each + stage. The length of stage_with_dcn is equal to num_stages. + Default: (False, False, False, False). + plugins (list[dict]): List of plugins for stages, each dict contains: + + - cfg (dict, required): Cfg dict to build plugin. + + - position (str, required): Position inside block to insert plugin, + options: 'after_conv1', 'after_conv2', 'after_conv3'. + + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + Default: None. + multi_grid (Sequence[int]|None): Multi grid dilation rates of last + stage. Default: None. + contract_dilation (bool): Whether contract first dilation of each layer + Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + pretrained (str, optional): model pretrained path. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + + Example: + >>> from mmseg.models import ResNet + >>> import torch + >>> self = ResNet(depth=18) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + stem_channels=64, + base_channels=64, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + dcn=None, + stage_with_dcn=(False, False, False, False), + plugins=None, + multi_grid=None, + contract_dilation=False, + with_cp=False, + zero_init_residual=True, + pretrained=None, + init_cfg=None): + super(ResNet, self).__init__(init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + + self.pretrained = pretrained + self.zero_init_residual = zero_init_residual + block_init_cfg = None + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + block = self.arch_settings[depth][0] + if self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm3')) + else: + raise TypeError('pretrained must be a str or None') + + self.depth = depth + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.dcn = dcn + self.stage_with_dcn = stage_with_dcn + if dcn is not None: + assert len(stage_with_dcn) == num_stages + self.plugins = plugins + self.multi_grid = multi_grid + self.contract_dilation = contract_dilation + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = stem_channels + + self._make_stem_layer(in_channels, stem_channels) + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + if plugins is not None: + stage_plugins = self.make_stage_plugins(plugins, i) + else: + stage_plugins = None + # multi grid is applied to last layer only + stage_multi_grid = multi_grid if i == len( + self.stage_blocks) - 1 else None + planes = base_channels * 2**i + res_layer = self.make_res_layer( + block=self.block, + inplanes=self.inplanes, + planes=planes, + num_blocks=num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + plugins=stage_plugins, + multi_grid=stage_multi_grid, + contract_dilation=contract_dilation, + init_cfg=block_init_cfg) + self.inplanes = planes * self.block.expansion + layer_name = f'layer{i+1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = self.block.expansion * base_channels * 2**( + len(self.stage_blocks) - 1) + + def make_stage_plugins(self, plugins, stage_idx): + """make plugins for ResNet 'stage_idx'th stage . + + Currently we support to insert 'context_block', + 'empirical_attention_block', 'nonlocal_block' into the backbone like + ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of + Bottleneck. + + An example of plugins format could be : + >>> plugins=[ + ... dict(cfg=dict(type='xxx', arg1='xxx'), + ... stages=(False, True, True, True), + ... position='after_conv2'), + ... dict(cfg=dict(type='yyy'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='1'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='2'), + ... stages=(True, True, True, True), + ... position='after_conv3') + ... ] + >>> self = ResNet(depth=18) + >>> stage_plugins = self.make_stage_plugins(plugins, 0) + >>> assert len(stage_plugins) == 3 + + Suppose 'stage_idx=0', the structure of blocks in the stage would be: + conv1-> conv2->conv3->yyy->zzz1->zzz2 + Suppose 'stage_idx=1', the structure of blocks in the stage would be: + conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2 + + If stages is missing, the plugin would be applied to all stages. + + Args: + plugins (list[dict]): List of plugins cfg to build. The postfix is + required if multiple same type plugins are inserted. + stage_idx (int): Index of stage to build + + Returns: + list[dict]: Plugins for current stage + """ + stage_plugins = [] + for plugin in plugins: + plugin = plugin.copy() + stages = plugin.pop('stages', None) + assert stages is None or len(stages) == self.num_stages + # whether to insert plugin into current stage + if stages is None or stages[stage_idx]: + stage_plugins.append(plugin) + + return stage_plugins + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``.""" + return ResLayer(**kwargs) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels): + """Make stem layer for ResNet.""" + if self.deep_stem: + self.stem = nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels)[1], + nn.ReLU(inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + """Freeze stages param and norm stats.""" + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + """Forward function.""" + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +@BACKBONES.register_module() +class ResNetV1c(ResNet): + """ResNetV1c variant described in [1]_. + + Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv in + the input stem with three 3x3 convs. For more details please refer to `Bag + of Tricks for Image Classification with Convolutional Neural Networks + `_. + """ + + def __init__(self, **kwargs): + super(ResNetV1c, self).__init__( + deep_stem=True, avg_down=False, **kwargs) + + +@BACKBONES.register_module() +class ResNetV1d(ResNet): + """ResNetV1d variant described in [1]_. + + Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in + the input stem with three 3x3 convs. And in the downsampling block, a 2x2 + avg_pool with stride 2 is added before conv, whose stride is changed to 1. + """ + + def __init__(self, **kwargs): + super(ResNetV1d, self).__init__( + deep_stem=True, avg_down=True, **kwargs) diff --git a/downstream/mmsegmentation/mmseg/models/backbones/resnext.py b/downstream/mmsegmentation/mmseg/models/backbones/resnext.py new file mode 100644 index 0000000..805c27b --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/resnext.py @@ -0,0 +1,150 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from ..utils import ResLayer +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNet + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeXt. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is + "caffe", the stride-two layer is the first 1x1 conv layer. + """ + + def __init__(self, + inplanes, + planes, + groups=1, + base_width=4, + base_channels=64, + **kwargs): + super(Bottleneck, self).__init__(inplanes, planes, **kwargs) + + if groups == 1: + width = self.planes + else: + width = math.floor(self.planes * + (base_width / base_channels)) * groups + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, width, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.inplanes, + width, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + self.with_modulated_dcn = False + if self.with_dcn: + fallback_on_stride = self.dcn.pop('fallback_on_stride', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + self.conv_cfg, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + self.conv2 = build_conv_layer( + self.dcn, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + width, + self.planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@BACKBONES.register_module() +class ResNeXt(ResNet): + """ResNeXt backbone. + + This backbone is the implementation of `Aggregated + Residual Transformations for Deep Neural + Networks `_. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Normally 3. + num_stages (int): Resnet stages, normally 4. + groups (int): Group of resnext. + base_width (int): Base width of resnext. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + + Example: + >>> from mmseg.models import ResNeXt + >>> import torch + >>> self = ResNeXt(depth=50) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 8, 8) + (1, 512, 4, 4) + (1, 1024, 2, 2) + (1, 2048, 1, 1) + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, groups=1, base_width=4, **kwargs): + self.groups = groups + self.base_width = base_width + super(ResNeXt, self).__init__(**kwargs) + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``""" + return ResLayer( + groups=self.groups, + base_width=self.base_width, + base_channels=self.base_channels, + **kwargs) diff --git a/downstream/mmsegmentation/mmseg/models/backbones/stdc.py b/downstream/mmsegmentation/mmseg/models/backbones/stdc.py new file mode 100644 index 0000000..04f2f7a --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/stdc.py @@ -0,0 +1,422 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Modified from https://github.com/MichaelFan01/STDC-Seg.""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner.base_module import BaseModule, ModuleList, Sequential + +from mmseg.ops import resize +from ..builder import BACKBONES, build_backbone +from .bisenetv1 import AttentionRefinementModule + + +class STDCModule(BaseModule): + """STDCModule. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels before scaling. + stride (int): The number of stride for the first conv layer. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): The activation config for conv layers. + num_convs (int): Numbers of conv layers. + fusion_type (str): Type of fusion operation. Default: 'add'. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + stride, + norm_cfg=None, + act_cfg=None, + num_convs=4, + fusion_type='add', + init_cfg=None): + super(STDCModule, self).__init__(init_cfg=init_cfg) + assert num_convs > 1 + assert fusion_type in ['add', 'cat'] + self.stride = stride + self.with_downsample = True if self.stride == 2 else False + self.fusion_type = fusion_type + + self.layers = ModuleList() + conv_0 = ConvModule( + in_channels, out_channels // 2, kernel_size=1, norm_cfg=norm_cfg) + + if self.with_downsample: + self.downsample = ConvModule( + out_channels // 2, + out_channels // 2, + kernel_size=3, + stride=2, + padding=1, + groups=out_channels // 2, + norm_cfg=norm_cfg, + act_cfg=None) + + if self.fusion_type == 'add': + self.layers.append(nn.Sequential(conv_0, self.downsample)) + self.skip = Sequential( + ConvModule( + in_channels, + in_channels, + kernel_size=3, + stride=2, + padding=1, + groups=in_channels, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + in_channels, + out_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=None)) + else: + self.layers.append(conv_0) + self.skip = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) + else: + self.layers.append(conv_0) + + for i in range(1, num_convs): + out_factor = 2**(i + 1) if i != num_convs - 1 else 2**i + self.layers.append( + ConvModule( + out_channels // 2**i, + out_channels // out_factor, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, inputs): + if self.fusion_type == 'add': + out = self.forward_add(inputs) + else: + out = self.forward_cat(inputs) + return out + + def forward_add(self, inputs): + layer_outputs = [] + x = inputs.clone() + for layer in self.layers: + x = layer(x) + layer_outputs.append(x) + if self.with_downsample: + inputs = self.skip(inputs) + + return torch.cat(layer_outputs, dim=1) + inputs + + def forward_cat(self, inputs): + x0 = self.layers[0](inputs) + layer_outputs = [x0] + for i, layer in enumerate(self.layers[1:]): + if i == 0: + if self.with_downsample: + x = layer(self.downsample(x0)) + else: + x = layer(x0) + else: + x = layer(x) + layer_outputs.append(x) + if self.with_downsample: + layer_outputs[0] = self.skip(x0) + return torch.cat(layer_outputs, dim=1) + + +class FeatureFusionModule(BaseModule): + """Feature Fusion Module. This module is different from FeatureFusionModule + in BiSeNetV1. It uses two ConvModules in `self.attention` whose inter + channel number is calculated by given `scale_factor`, while + FeatureFusionModule in BiSeNetV1 only uses one ConvModule in + `self.conv_atten`. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + scale_factor (int): The number of channel scale factor. + Default: 4. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): The activation config for conv layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + scale_factor=4, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(FeatureFusionModule, self).__init__(init_cfg=init_cfg) + channels = out_channels // scale_factor + self.conv0 = ConvModule( + in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=act_cfg) + self.attention = nn.Sequential( + nn.AdaptiveAvgPool2d((1, 1)), + ConvModule( + out_channels, + channels, + 1, + norm_cfg=None, + bias=False, + act_cfg=act_cfg), + ConvModule( + channels, + out_channels, + 1, + norm_cfg=None, + bias=False, + act_cfg=None), nn.Sigmoid()) + + def forward(self, spatial_inputs, context_inputs): + inputs = torch.cat([spatial_inputs, context_inputs], dim=1) + x = self.conv0(inputs) + attn = self.attention(x) + x_attn = x * attn + return x_attn + x + + +@BACKBONES.register_module() +class STDCNet(BaseModule): + """This backbone is the implementation of `Rethinking BiSeNet For Real-time + Semantic Segmentation `_. + + Args: + stdc_type (int): The type of backbone structure, + `STDCNet1` and`STDCNet2` denotes two main backbones in paper, + whose FLOPs is 813M and 1446M, respectively. + in_channels (int): The num of input_channels. + channels (tuple[int]): The output channels for each stage. + bottleneck_type (str): The type of STDC Module type, the value must + be 'add' or 'cat'. + norm_cfg (dict): Config dict for normalization layer. + act_cfg (dict): The activation config for conv layers. + num_convs (int): Numbers of conv layer at each STDC Module. + Default: 4. + with_final_conv (bool): Whether add a conv layer at the Module output. + Default: True. + pretrained (str, optional): Model pretrained path. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + + Example: + >>> import torch + >>> stdc_type = 'STDCNet1' + >>> in_channels = 3 + >>> channels = (32, 64, 256, 512, 1024) + >>> bottleneck_type = 'cat' + >>> inputs = torch.rand(1, 3, 1024, 2048) + >>> self = STDCNet(stdc_type, in_channels, + ... channels, bottleneck_type).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 256, 128, 256]) + outputs[1].shape = torch.Size([1, 512, 64, 128]) + outputs[2].shape = torch.Size([1, 1024, 32, 64]) + """ + + arch_settings = { + 'STDCNet1': [(2, 1), (2, 1), (2, 1)], + 'STDCNet2': [(2, 1, 1, 1), (2, 1, 1, 1, 1), (2, 1, 1)] + } + + def __init__(self, + stdc_type, + in_channels, + channels, + bottleneck_type, + norm_cfg, + act_cfg, + num_convs=4, + with_final_conv=False, + pretrained=None, + init_cfg=None): + super(STDCNet, self).__init__(init_cfg=init_cfg) + assert stdc_type in self.arch_settings, \ + f'invalid structure {stdc_type} for STDCNet.' + assert bottleneck_type in ['add', 'cat'],\ + f'bottleneck_type must be `add` or `cat`, got {bottleneck_type}' + + assert len(channels) == 5,\ + f'invalid channels length {len(channels)} for STDCNet.' + + self.in_channels = in_channels + self.channels = channels + self.stage_strides = self.arch_settings[stdc_type] + self.prtrained = pretrained + self.num_convs = num_convs + self.with_final_conv = with_final_conv + + self.stages = ModuleList([ + ConvModule( + self.in_channels, + self.channels[0], + kernel_size=3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + self.channels[0], + self.channels[1], + kernel_size=3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + ]) + # `self.num_shallow_features` is the number of shallow modules in + # `STDCNet`, which is noted as `Stage1` and `Stage2` in original paper. + # They are both not used for following modules like Attention + # Refinement Module and Feature Fusion Module. + # Thus they would be cut from `outs`. Please refer to Figure 4 + # of original paper for more details. + self.num_shallow_features = len(self.stages) + + for strides in self.stage_strides: + idx = len(self.stages) - 1 + self.stages.append( + self._make_stage(self.channels[idx], self.channels[idx + 1], + strides, norm_cfg, act_cfg, bottleneck_type)) + # After appending, `self.stages` is a ModuleList including several + # shallow modules and STDCModules. + # (len(self.stages) == + # self.num_shallow_features + len(self.stage_strides)) + if self.with_final_conv: + self.final_conv = ConvModule( + self.channels[-1], + max(1024, self.channels[-1]), + 1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def _make_stage(self, in_channels, out_channels, strides, norm_cfg, + act_cfg, bottleneck_type): + layers = [] + for i, stride in enumerate(strides): + layers.append( + STDCModule( + in_channels if i == 0 else out_channels, + out_channels, + stride, + norm_cfg, + act_cfg, + num_convs=self.num_convs, + fusion_type=bottleneck_type)) + return Sequential(*layers) + + def forward(self, x): + outs = [] + for stage in self.stages: + x = stage(x) + outs.append(x) + if self.with_final_conv: + outs[-1] = self.final_conv(outs[-1]) + outs = outs[self.num_shallow_features:] + return tuple(outs) + + +@BACKBONES.register_module() +class STDCContextPathNet(BaseModule): + """STDCNet with Context Path. The `outs` below is a list of three feature + maps from deep to shallow, whose height and width is from small to big, + respectively. The biggest feature map of `outs` is outputted for + `STDCHead`, where Detail Loss would be calculated by Detail Ground-truth. + The other two feature maps are used for Attention Refinement Module, + respectively. Besides, the biggest feature map of `outs` and the last + output of Attention Refinement Module are concatenated for Feature Fusion + Module. Then, this fusion feature map `feat_fuse` would be outputted for + `decode_head`. More details please refer to Figure 4 of original paper. + + Args: + backbone_cfg (dict): Config dict for stdc backbone. + last_in_channels (tuple(int)), The number of channels of last + two feature maps from stdc backbone. Default: (1024, 512). + out_channels (int): The channels of output feature maps. + Default: 128. + ffm_cfg (dict): Config dict for Feature Fusion Module. Default: + `dict(in_channels=512, out_channels=256, scale_factor=4)`. + upsample_mode (str): Algorithm used for upsampling: + ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | + ``'trilinear'``. Default: ``'nearest'``. + align_corners (str): align_corners argument of F.interpolate. It + must be `None` if upsample_mode is ``'nearest'``. Default: None. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + + Return: + outputs (tuple): The tuple of list of output feature map for + auxiliary heads and decoder head. + """ + + def __init__(self, + backbone_cfg, + last_in_channels=(1024, 512), + out_channels=128, + ffm_cfg=dict( + in_channels=512, out_channels=256, scale_factor=4), + upsample_mode='nearest', + align_corners=None, + norm_cfg=dict(type='BN'), + init_cfg=None): + super(STDCContextPathNet, self).__init__(init_cfg=init_cfg) + self.backbone = build_backbone(backbone_cfg) + self.arms = ModuleList() + self.convs = ModuleList() + for channels in last_in_channels: + self.arms.append(AttentionRefinementModule(channels, out_channels)) + self.convs.append( + ConvModule( + out_channels, + out_channels, + 3, + padding=1, + norm_cfg=norm_cfg)) + self.conv_avg = ConvModule( + last_in_channels[0], out_channels, 1, norm_cfg=norm_cfg) + + self.ffm = FeatureFusionModule(**ffm_cfg) + + self.upsample_mode = upsample_mode + self.align_corners = align_corners + + def forward(self, x): + outs = list(self.backbone(x)) + avg = F.adaptive_avg_pool2d(outs[-1], 1) + avg_feat = self.conv_avg(avg) + + feature_up = resize( + avg_feat, + size=outs[-1].shape[2:], + mode=self.upsample_mode, + align_corners=self.align_corners) + arms_out = [] + for i in range(len(self.arms)): + x_arm = self.arms[i](outs[len(outs) - 1 - i]) + feature_up + feature_up = resize( + x_arm, + size=outs[len(outs) - 1 - i - 1].shape[2:], + mode=self.upsample_mode, + align_corners=self.align_corners) + feature_up = self.convs[i](feature_up) + arms_out.append(feature_up) + + feat_fuse = self.ffm(outs[0], arms_out[1]) + + # The `outputs` has four feature maps. + # `outs[0]` is outputted for `STDCHead` auxiliary head. + # Two feature maps of `arms_out` are outputted for auxiliary head. + # `feat_fuse` is outputted for decoder head. + outputs = [outs[0]] + list(arms_out) + [feat_fuse] + return tuple(outputs) diff --git a/downstream/mmsegmentation/mmseg/models/backbones/swin.py b/downstream/mmsegmentation/mmseg/models/backbones/swin.py new file mode 100644 index 0000000..cbf1328 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/swin.py @@ -0,0 +1,756 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from collections import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, build_dropout +from mmcv.cnn.utils.weight_init import (constant_init, trunc_normal_, + trunc_normal_init) +from mmcv.runner import (BaseModule, CheckpointLoader, ModuleList, + load_state_dict) +from mmcv.utils import to_2tuple + +from ...utils import get_root_logger +from ..builder import BACKBONES +from ..utils.embed import PatchEmbed, PatchMerging + + +class WindowMSA(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (tuple[int]): The height and width of the window. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + init_cfg (dict | None, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = qk_scale or head_embed_dims**-0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # About 2x faster than original impl + Wh, Ww = self.window_size + rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) + rel_position_index = rel_index_coords + rel_index_coords.T + rel_position_index = rel_position_index.flip(1).contiguous() + self.register_buffer('relative_position_index', rel_position_index) + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + + self.softmax = nn.Softmax(dim=-1) + + def init_weights(self): + trunc_normal_(self.relative_position_bias_table, std=0.02) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (num_windows*B, N, C) + mask (tensor | None, Optional): mask with shape of (num_windows, + Wh*Ww, Wh*Ww), value should be between (-inf, 0]. + """ + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + # make torchscript happy (cannot use tensor as tuple) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @staticmethod + def double_step_seq(step1, len1, step2, len2): + seq1 = torch.arange(0, step1 * len1, step1) + seq2 = torch.arange(0, step2 * len2, step2) + return (seq1[:, None] + seq2[None, :]).reshape(1, -1) + + +class ShiftWindowMSA(BaseModule): + """Shifted Window Multihead Self-Attention Module. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. + shift_size (int, optional): The shift step of each window towards + right-bottom. If zero, act as regular window-msa. Defaults to 0. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Defaults: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Defaults: 0. + proj_drop_rate (float, optional): Dropout ratio of output. + Defaults: 0. + dropout_layer (dict, optional): The dropout_layer used before output. + Defaults: dict(type='DropPath', drop_prob=0.). + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + shift_size=0, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0, + proj_drop_rate=0, + dropout_layer=dict(type='DropPath', drop_prob=0.), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.window_size = window_size + self.shift_size = shift_size + assert 0 <= self.shift_size < self.window_size + + self.w_msa = WindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=to_2tuple(window_size), + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=proj_drop_rate, + init_cfg=None) + + self.drop = build_dropout(dropout_layer) + + def forward(self, query, hw_shape): + B, L, C = query.shape + H, W = hw_shape + assert L == H * W, 'input feature has wrong size' + query = query.view(B, H, W, C) + + # pad feature maps to multiples of window size + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b)) + H_pad, W_pad = query.shape[1], query.shape[2] + + # cyclic shift + if self.shift_size > 0: + shifted_query = torch.roll( + query, + shifts=(-self.shift_size, -self.shift_size), + dims=(1, 2)) + + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device) + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + # nW, window_size, window_size, 1 + mask_windows = self.window_partition(img_mask) + mask_windows = mask_windows.view( + -1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, + float(-100.0)).masked_fill( + attn_mask == 0, float(0.0)) + else: + shifted_query = query + attn_mask = None + + # nW*B, window_size, window_size, C + query_windows = self.window_partition(shifted_query) + # nW*B, window_size*window_size, C + query_windows = query_windows.view(-1, self.window_size**2, C) + + # W-MSA/SW-MSA (nW*B, window_size*window_size, C) + attn_windows = self.w_msa(query_windows, mask=attn_mask) + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, + self.window_size, C) + + # B H' W' C + shifted_x = self.window_reverse(attn_windows, H_pad, W_pad) + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll( + shifted_x, + shifts=(self.shift_size, self.shift_size), + dims=(1, 2)) + else: + x = shifted_x + + if pad_r > 0 or pad_b: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + x = self.drop(x) + return x + + def window_reverse(self, windows, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + window_size = self.window_size + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, + window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + def window_partition(self, x): + """ + Args: + x: (B, H, W, C) + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + window_size = self.window_size + x = x.view(B, H // window_size, window_size, W // window_size, + window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() + windows = windows.view(-1, window_size, window_size, C) + return windows + + +class SwinBlock(BaseModule): + """" + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + window_size (int, optional): The local window scale. Default: 7. + shift (bool, optional): whether to shift window or not. Default False. + qkv_bias (bool, optional): enable bias for qkv if True. Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop_rate (float, optional): Dropout rate. Default: 0. + attn_drop_rate (float, optional): Attention dropout rate. Default: 0. + drop_path_rate (float, optional): Stochastic depth rate. Default: 0. + act_cfg (dict, optional): The config dict of activation function. + Default: dict(type='GELU'). + norm_cfg (dict, optional): The config dict of normalization. + Default: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + init_cfg (dict | list | None, optional): The init config. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + window_size=7, + shift=False, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + + super(SwinBlock, self).__init__(init_cfg=init_cfg) + + self.with_cp = with_cp + + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = ShiftWindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=window_size, + shift_size=window_size // 2 if shift else 0, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + init_cfg=None) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=2, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=True, + init_cfg=None) + + def forward(self, x, hw_shape): + + def _inner_forward(x): + identity = x + x = self.norm1(x) + x = self.attn(x, hw_shape) + + x = x + identity + + identity = x + x = self.norm2(x) + x = self.ffn(x, identity=identity) + + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + + return x + + +class SwinBlockSequence(BaseModule): + """Implements one stage in Swin Transformer. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + depth (int): The number of blocks in this stage. + window_size (int, optional): The local window scale. Default: 7. + qkv_bias (bool, optional): enable bias for qkv if True. Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop_rate (float, optional): Dropout rate. Default: 0. + attn_drop_rate (float, optional): Attention dropout rate. Default: 0. + drop_path_rate (float | list[float], optional): Stochastic depth + rate. Default: 0. + downsample (BaseModule | None, optional): The downsample operation + module. Default: None. + act_cfg (dict, optional): The config dict of activation function. + Default: dict(type='GELU'). + norm_cfg (dict, optional): The config dict of normalization. + Default: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + init_cfg (dict | list | None, optional): The init config. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + depth, + window_size=7, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + downsample=None, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(drop_path_rate, list): + drop_path_rates = drop_path_rate + assert len(drop_path_rates) == depth + else: + drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)] + + self.blocks = ModuleList() + for i in range(depth): + block = SwinBlock( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=feedforward_channels, + window_size=window_size, + shift=False if i % 2 == 0 else True, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rates[i], + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp, + init_cfg=None) + self.blocks.append(block) + + self.downsample = downsample + + def forward(self, x, hw_shape): + for block in self.blocks: + x = block(x, hw_shape) + + if self.downsample: + x_down, down_hw_shape = self.downsample(x, hw_shape) + return x_down, down_hw_shape, x, hw_shape + else: + return x, hw_shape, x, hw_shape + + +@BACKBONES.register_module() +class SwinTransformer(BaseModule): + """Swin Transformer backbone. + + This backbone is the implementation of `Swin Transformer: + Hierarchical Vision Transformer using Shifted + Windows `_. + Inspiration from https://github.com/microsoft/Swin-Transformer. + + Args: + pretrain_img_size (int | tuple[int]): The size of input image when + pretrain. Defaults: 224. + in_channels (int): The num of input channels. + Defaults: 3. + embed_dims (int): The feature dimension. Default: 96. + patch_size (int | tuple[int]): Patch size. Default: 4. + window_size (int): Window size. Default: 7. + mlp_ratio (int | float): Ratio of mlp hidden dim to embedding dim. + Default: 4. + depths (tuple[int]): Depths of each Swin Transformer stage. + Default: (2, 2, 6, 2). + num_heads (tuple[int]): Parallel attention heads of each Swin + Transformer stage. Default: (3, 6, 12, 24). + strides (tuple[int]): The patch merging or patch embedding stride of + each Swin Transformer stage. (In swin, we set kernel size equal to + stride.) Default: (4, 2, 2, 2). + out_indices (tuple[int]): Output from which stages. + Default: (0, 1, 2, 3). + qkv_bias (bool, optional): If True, add a learnable bias to query, key, + value. Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + patch_norm (bool): If add a norm layer for patch embed and patch + merging. Default: True. + drop_rate (float): Dropout rate. Defaults: 0. + attn_drop_rate (float): Attention dropout rate. Default: 0. + drop_path_rate (float): Stochastic depth rate. Defaults: 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults: False. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LN'). + norm_cfg (dict): Config dict for normalization layer at + output of backone. Defaults: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + pretrained (str, optional): model pretrained path. Default: None. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + pretrain_img_size=224, + in_channels=3, + embed_dims=96, + patch_size=4, + window_size=7, + mlp_ratio=4, + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + strides=(4, 2, 2, 2), + out_indices=(0, 1, 2, 3), + qkv_bias=True, + qk_scale=None, + patch_norm=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + use_abs_pos_embed=False, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False, + pretrained=None, + frozen_stages=-1, + init_cfg=None): + self.frozen_stages = frozen_stages + + if isinstance(pretrain_img_size, int): + pretrain_img_size = to_2tuple(pretrain_img_size) + elif isinstance(pretrain_img_size, tuple): + if len(pretrain_img_size) == 1: + pretrain_img_size = to_2tuple(pretrain_img_size[0]) + assert len(pretrain_img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(pretrain_img_size)}' + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + init_cfg = init_cfg + else: + raise TypeError('pretrained must be a str or None') + + super(SwinTransformer, self).__init__(init_cfg=init_cfg) + + num_layers = len(depths) + self.out_indices = out_indices + self.use_abs_pos_embed = use_abs_pos_embed + + assert strides[0] == patch_size, 'Use non-overlapping patch embed.' + + self.patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=strides[0], + padding='corner', + norm_cfg=norm_cfg if patch_norm else None, + init_cfg=None) + + if self.use_abs_pos_embed: + patch_row = pretrain_img_size[0] // patch_size + patch_col = pretrain_img_size[1] // patch_size + num_patches = patch_row * patch_col + self.absolute_pos_embed = nn.Parameter( + torch.zeros((1, num_patches, embed_dims))) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + # set stochastic depth decay rule + total_depth = sum(depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] + + self.stages = ModuleList() + in_channels = embed_dims + for i in range(num_layers): + if i < num_layers - 1: + downsample = PatchMerging( + in_channels=in_channels, + out_channels=2 * in_channels, + stride=strides[i + 1], + norm_cfg=norm_cfg if patch_norm else None, + init_cfg=None) + else: + downsample = None + + stage = SwinBlockSequence( + embed_dims=in_channels, + num_heads=num_heads[i], + feedforward_channels=int(mlp_ratio * in_channels), + depth=depths[i], + window_size=window_size, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])], + downsample=downsample, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp, + init_cfg=None) + self.stages.append(stage) + if downsample: + in_channels = downsample.out_channels + + self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)] + # Add a norm layer for each output + for i in out_indices: + layer = build_norm_layer(norm_cfg, self.num_features[i])[1] + layer_name = f'norm{i}' + self.add_module(layer_name, layer) + + def train(self, mode=True): + """Convert the model into training mode while keep layers freezed.""" + super(SwinTransformer, self).train(mode) + self._freeze_stages() + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + if self.use_abs_pos_embed: + self.absolute_pos_embed.requires_grad = False + self.drop_after_pos.eval() + + for i in range(1, self.frozen_stages + 1): + + if (i - 1) in self.out_indices: + norm_layer = getattr(self, f'norm{i-1}') + norm_layer.eval() + for param in norm_layer.parameters(): + param.requires_grad = False + + m = self.stages[i - 1] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + logger = get_root_logger() + if self.init_cfg is None: + logger.warn(f'No pre-trained weights for ' + f'{self.__class__.__name__}, ' + f'training start from scratch') + if self.use_abs_pos_embed: + trunc_normal_(self.absolute_pos_embed, std=0.02) + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=.02, bias=0.) + elif isinstance(m, nn.LayerNorm): + constant_init(m, val=1.0, bias=0.) + else: + assert 'checkpoint' in self.init_cfg, f'Only support ' \ + f'specify `Pretrained` in ' \ + f'`init_cfg` in ' \ + f'{self.__class__.__name__} ' + ckpt = CheckpointLoader.load_checkpoint( + self.init_cfg['checkpoint'], logger=logger, map_location='cpu') + if 'state_dict' in ckpt: + _state_dict = ckpt['state_dict'] + elif 'model' in ckpt: + _state_dict = ckpt['model'] + else: + _state_dict = ckpt + + state_dict = OrderedDict() + for k, v in _state_dict.items(): + if k.startswith('backbone.'): + state_dict[k[9:]] = v + else: + state_dict[k] = v + + # strip prefix of state_dict + if list(state_dict.keys())[0].startswith('module.'): + state_dict = {k[7:]: v for k, v in state_dict.items()} + + # reshape absolute position embedding + if state_dict.get('absolute_pos_embed') is not None: + absolute_pos_embed = state_dict['absolute_pos_embed'] + N1, L, C1 = absolute_pos_embed.size() + N2, C2, H, W = self.absolute_pos_embed.size() + if N1 != N2 or C1 != C2 or L != H * W: + logger.warning('Error in loading absolute_pos_embed, pass') + else: + state_dict['absolute_pos_embed'] = absolute_pos_embed.view( + N2, H, W, C2).permute(0, 3, 1, 2).contiguous() + + # interpolate position bias table if needed + relative_position_bias_table_keys = [ + k for k in state_dict.keys() + if 'relative_position_bias_table' in k + ] + for table_key in relative_position_bias_table_keys: + table_pretrained = state_dict[table_key] + table_current = self.state_dict()[table_key] + L1, nH1 = table_pretrained.size() + L2, nH2 = table_current.size() + if nH1 != nH2: + logger.warning(f'Error in loading {table_key}, pass') + elif L1 != L2: + S1 = int(L1**0.5) + S2 = int(L2**0.5) + table_pretrained_resized = F.interpolate( + table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1), + size=(S2, S2), + mode='bicubic') + state_dict[table_key] = table_pretrained_resized.view( + nH2, L2).permute(1, 0).contiguous() + + # load state_dict + load_state_dict(self, state_dict, strict=False, logger=logger) + + def forward(self, x): + x, hw_shape = self.patch_embed(x) + + if self.use_abs_pos_embed: + x = x + self.absolute_pos_embed + x = self.drop_after_pos(x) + + outs = [] + for i, stage in enumerate(self.stages): + x, hw_shape, out, out_hw_shape = stage(x, hw_shape) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(out) + out = out.view(-1, *out_hw_shape, + self.num_features[i]).permute(0, 3, 1, + 2).contiguous() + outs.append(out) + + return outs diff --git a/downstream/mmsegmentation/mmseg/models/backbones/timm_backbone.py b/downstream/mmsegmentation/mmseg/models/backbones/timm_backbone.py new file mode 100644 index 0000000..01b29fc --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/timm_backbone.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +try: + import timm +except ImportError: + timm = None + +from mmcv.cnn.bricks.registry import NORM_LAYERS +from mmcv.runner import BaseModule + +from ..builder import BACKBONES + + +@BACKBONES.register_module() +class TIMMBackbone(BaseModule): + """Wrapper to use backbones from timm library. More details can be found in + `timm `_ . + + Args: + model_name (str): Name of timm model to instantiate. + pretrained (bool): Load pretrained weights if True. + checkpoint_path (str): Path of checkpoint to load after + model is initialized. + in_channels (int): Number of input image channels. Default: 3. + init_cfg (dict, optional): Initialization config dict + **kwargs: Other timm & model specific arguments. + """ + + def __init__( + self, + model_name, + features_only=True, + pretrained=True, + checkpoint_path='', + in_channels=3, + init_cfg=None, + **kwargs, + ): + if timm is None: + raise RuntimeError('timm is not installed') + super(TIMMBackbone, self).__init__(init_cfg) + if 'norm_layer' in kwargs: + kwargs['norm_layer'] = NORM_LAYERS.get(kwargs['norm_layer']) + self.timm_model = timm.create_model( + model_name=model_name, + features_only=features_only, + pretrained=pretrained, + in_chans=in_channels, + checkpoint_path=checkpoint_path, + **kwargs, + ) + + # Make unused parameters None + self.timm_model.global_pool = None + self.timm_model.fc = None + self.timm_model.classifier = None + + # Hack to use pretrained weights from timm + if pretrained or checkpoint_path: + self._is_init = True + + def forward(self, x): + features = self.timm_model(x) + return features diff --git a/downstream/mmsegmentation/mmseg/models/backbones/twins.py b/downstream/mmsegmentation/mmseg/models/backbones/twins.py new file mode 100644 index 0000000..6bd9469 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/twins.py @@ -0,0 +1,588 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.transformer import FFN +from mmcv.cnn.utils.weight_init import (constant_init, normal_init, + trunc_normal_init) +from mmcv.runner import BaseModule, ModuleList +from torch.nn.modules.batchnorm import _BatchNorm + +from mmseg.models.backbones.mit import EfficientMultiheadAttention +from mmseg.models.builder import BACKBONES +from ..utils.embed import PatchEmbed + + +class GlobalSubsampledAttention(EfficientMultiheadAttention): + """Global Sub-sampled Attention (Spatial Reduction Attention) + + This module is modified from EfficientMultiheadAttention, + which is a module from mmseg.models.backbones.mit.py. + Specifically, there is no difference between + `GlobalSubsampledAttention` and `EfficientMultiheadAttention`, + `GlobalSubsampledAttention` is built as a brand new class + because it is renamed as `Global sub-sampled attention (GSA)` + in paper. + + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. Default: None. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dims) + or (n, batch, embed_dims). Default: False. + qkv_bias (bool): enable bias for qkv if True. Default: True. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (int): The ratio of spatial reduction of GSA of PCPVT. + Default: 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=None, + batch_first=True, + qkv_bias=True, + norm_cfg=dict(type='LN'), + sr_ratio=1, + init_cfg=None): + super(GlobalSubsampledAttention, self).__init__( + embed_dims, + num_heads, + attn_drop=attn_drop, + proj_drop=proj_drop, + dropout_layer=dropout_layer, + batch_first=batch_first, + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + sr_ratio=sr_ratio, + init_cfg=init_cfg) + + +class GSAEncoderLayer(BaseModule): + """Implements one encoder layer with GSA. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default: 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default: 0.0. + drop_path_rate (float): Stochastic depth rate. Default 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + qkv_bias (bool): Enable bias for qkv if True. Default: True + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (float): Kernel_size of conv in Attention modules. Default: 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + sr_ratio=1., + init_cfg=None): + super(GSAEncoderLayer, self).__init__(init_cfg=init_cfg) + + self.norm1 = build_norm_layer(norm_cfg, embed_dims, postfix=1)[1] + self.attn = GlobalSubsampledAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + sr_ratio=sr_ratio) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims, postfix=2)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=False) + + self.drop_path = build_dropout( + dict(type='DropPath', drop_prob=drop_path_rate) + ) if drop_path_rate > 0. else nn.Identity() + + def forward(self, x, hw_shape): + x = x + self.drop_path(self.attn(self.norm1(x), hw_shape, identity=0.)) + x = x + self.drop_path(self.ffn(self.norm2(x))) + return x + + +class LocallyGroupedSelfAttention(BaseModule): + """Locally-grouped Self Attention (LSA) module. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. Default: 8 + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: False. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + window_size(int): Window size of LSA. Default: 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + window_size=1, + init_cfg=None): + super(LocallyGroupedSelfAttention, self).__init__(init_cfg=init_cfg) + + assert embed_dims % num_heads == 0, f'dim {embed_dims} should be ' \ + f'divided by num_heads ' \ + f'{num_heads}.' + self.embed_dims = embed_dims + self.num_heads = num_heads + head_dim = embed_dims // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + self.window_size = window_size + + def forward(self, x, hw_shape): + b, n, c = x.shape + h, w = hw_shape + x = x.view(b, h, w, c) + + # pad feature maps to multiples of Local-groups + pad_l = pad_t = 0 + pad_r = (self.window_size - w % self.window_size) % self.window_size + pad_b = (self.window_size - h % self.window_size) % self.window_size + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + + # calculate attention mask for LSA + Hp, Wp = x.shape[1:-1] + _h, _w = Hp // self.window_size, Wp // self.window_size + mask = torch.zeros((1, Hp, Wp), device=x.device) + mask[:, -pad_b:, :].fill_(1) + mask[:, :, -pad_r:].fill_(1) + + # [B, _h, _w, window_size, window_size, C] + x = x.reshape(b, _h, self.window_size, _w, self.window_size, + c).transpose(2, 3) + mask = mask.reshape(1, _h, self.window_size, _w, + self.window_size).transpose(2, 3).reshape( + 1, _h * _w, + self.window_size * self.window_size) + # [1, _h*_w, window_size*window_size, window_size*window_size] + attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) + attn_mask = attn_mask.masked_fill(attn_mask != 0, + float(-1000.0)).masked_fill( + attn_mask == 0, float(0.0)) + + # [3, B, _w*_h, nhead, window_size*window_size, dim] + qkv = self.qkv(x).reshape(b, _h * _w, + self.window_size * self.window_size, 3, + self.num_heads, c // self.num_heads).permute( + 3, 0, 1, 4, 2, 5) + q, k, v = qkv[0], qkv[1], qkv[2] + # [B, _h*_w, n_head, window_size*window_size, window_size*window_size] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn + attn_mask.unsqueeze(2) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + attn = (attn @ v).transpose(2, 3).reshape(b, _h, _w, self.window_size, + self.window_size, c) + x = attn.transpose(2, 3).reshape(b, _h * self.window_size, + _w * self.window_size, c) + if pad_r > 0 or pad_b > 0: + x = x[:, :h, :w, :].contiguous() + + x = x.reshape(b, n, c) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LSAEncoderLayer(BaseModule): + """Implements one encoder layer in Twins-SVT. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default: 0.0. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + drop_path_rate (float): Stochastic depth rate. Default 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + qkv_bias (bool): Enable bias for qkv if True. Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + window_size (int): Window size of LSA. Default: 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + qk_scale=None, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + window_size=1, + init_cfg=None): + + super(LSAEncoderLayer, self).__init__(init_cfg=init_cfg) + + self.norm1 = build_norm_layer(norm_cfg, embed_dims, postfix=1)[1] + self.attn = LocallyGroupedSelfAttention(embed_dims, num_heads, + qkv_bias, qk_scale, + attn_drop_rate, drop_rate, + window_size) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims, postfix=2)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=False) + + self.drop_path = build_dropout( + dict(type='DropPath', drop_prob=drop_path_rate) + ) if drop_path_rate > 0. else nn.Identity() + + def forward(self, x, hw_shape): + x = x + self.drop_path(self.attn(self.norm1(x), hw_shape)) + x = x + self.drop_path(self.ffn(self.norm2(x))) + return x + + +class ConditionalPositionEncoding(BaseModule): + """The Conditional Position Encoding (CPE) module. + + The CPE is the implementation of 'Conditional Positional Encodings + for Vision Transformers '_. + + Args: + in_channels (int): Number of input channels. + embed_dims (int): The feature dimension. Default: 768. + stride (int): Stride of conv layer. Default: 1. + """ + + def __init__(self, in_channels, embed_dims=768, stride=1, init_cfg=None): + super(ConditionalPositionEncoding, self).__init__(init_cfg=init_cfg) + self.proj = nn.Conv2d( + in_channels, + embed_dims, + kernel_size=3, + stride=stride, + padding=1, + bias=True, + groups=embed_dims) + self.stride = stride + + def forward(self, x, hw_shape): + b, n, c = x.shape + h, w = hw_shape + feat_token = x + cnn_feat = feat_token.transpose(1, 2).view(b, c, h, w) + if self.stride == 1: + x = self.proj(cnn_feat) + cnn_feat + else: + x = self.proj(cnn_feat) + x = x.flatten(2).transpose(1, 2) + return x + + +@BACKBONES.register_module() +class PCPVT(BaseModule): + """The backbone of Twins-PCPVT. + + This backbone is the implementation of `Twins: Revisiting the Design + of Spatial Attention in Vision Transformers + `_. + + Args: + in_channels (int): Number of input channels. Default: 3. + embed_dims (list): Embedding dimension. Default: [64, 128, 256, 512]. + patch_sizes (list): The patch sizes. Default: [4, 2, 2, 2]. + strides (list): The strides. Default: [4, 2, 2, 2]. + num_heads (int): Number of attention heads. Default: [1, 2, 4, 8]. + mlp_ratios (int): Ratio of mlp hidden dim to embedding dim. + Default: [4, 4, 4, 4]. + out_indices (tuple[int]): Output from which stages. + Default: (0, 1, 2, 3). + qkv_bias (bool): Enable bias for qkv if True. Default: False. + drop_rate (float): Probability of an element to be zeroed. + Default 0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0 + drop_path_rate (float): Stochastic depth rate. Default 0.0 + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + depths (list): Depths of each stage. Default [3, 4, 6, 3] + sr_ratios (list): Kernel_size of conv in each Attn module in + Transformer encoder layer. Default: [8, 4, 2, 1]. + norm_after_stage(bool): Add extra norm. Default False. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + in_channels=3, + embed_dims=[64, 128, 256, 512], + patch_sizes=[4, 2, 2, 2], + strides=[4, 2, 2, 2], + num_heads=[1, 2, 4, 8], + mlp_ratios=[4, 4, 4, 4], + out_indices=(0, 1, 2, 3), + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN'), + depths=[3, 4, 6, 3], + sr_ratios=[8, 4, 2, 1], + norm_after_stage=False, + pretrained=None, + init_cfg=None): + super(PCPVT, self).__init__(init_cfg=init_cfg) + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be set at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is not None: + raise TypeError('pretrained must be a str or None') + self.depths = depths + + # patch_embed + self.patch_embeds = ModuleList() + self.position_encoding_drops = ModuleList() + self.layers = ModuleList() + + for i in range(len(depths)): + self.patch_embeds.append( + PatchEmbed( + in_channels=in_channels if i == 0 else embed_dims[i - 1], + embed_dims=embed_dims[i], + conv_type='Conv2d', + kernel_size=patch_sizes[i], + stride=strides[i], + padding='corner', + norm_cfg=norm_cfg)) + + self.position_encoding_drops.append(nn.Dropout(p=drop_rate)) + + self.position_encodings = ModuleList([ + ConditionalPositionEncoding(embed_dim, embed_dim) + for embed_dim in embed_dims + ]) + + # transformer encoder + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + cur = 0 + + for k in range(len(depths)): + _block = ModuleList([ + GSAEncoderLayer( + embed_dims=embed_dims[k], + num_heads=num_heads[k], + feedforward_channels=mlp_ratios[k] * embed_dims[k], + attn_drop_rate=attn_drop_rate, + drop_rate=drop_rate, + drop_path_rate=dpr[cur + i], + num_fcs=2, + qkv_bias=qkv_bias, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + sr_ratio=sr_ratios[k]) for i in range(depths[k]) + ]) + self.layers.append(_block) + cur += depths[k] + + self.norm_name, norm = build_norm_layer( + norm_cfg, embed_dims[-1], postfix=1) + + self.out_indices = out_indices + self.norm_after_stage = norm_after_stage + if self.norm_after_stage: + self.norm_list = ModuleList() + for dim in embed_dims: + self.norm_list.append(build_norm_layer(norm_cfg, dim)[1]) + + def init_weights(self): + if self.init_cfg is not None: + super(PCPVT, self).init_weights() + else: + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=.02, bias=0.) + elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)): + constant_init(m, val=1.0, bias=0.) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[ + 1] * m.out_channels + fan_out //= m.groups + normal_init( + m, mean=0, std=math.sqrt(2.0 / fan_out), bias=0) + + def forward(self, x): + outputs = list() + + b = x.shape[0] + + for i in range(len(self.depths)): + x, hw_shape = self.patch_embeds[i](x) + h, w = hw_shape + x = self.position_encoding_drops[i](x) + for j, blk in enumerate(self.layers[i]): + x = blk(x, hw_shape) + if j == 0: + x = self.position_encodings[i](x, hw_shape) + if self.norm_after_stage: + x = self.norm_list[i](x) + x = x.reshape(b, h, w, -1).permute(0, 3, 1, 2).contiguous() + + if i in self.out_indices: + outputs.append(x) + + return tuple(outputs) + + +@BACKBONES.register_module() +class SVT(PCPVT): + """The backbone of Twins-SVT. + + This backbone is the implementation of `Twins: Revisiting the Design + of Spatial Attention in Vision Transformers + `_. + + Args: + in_channels (int): Number of input channels. Default: 3. + embed_dims (list): Embedding dimension. Default: [64, 128, 256, 512]. + patch_sizes (list): The patch sizes. Default: [4, 2, 2, 2]. + strides (list): The strides. Default: [4, 2, 2, 2]. + num_heads (int): Number of attention heads. Default: [1, 2, 4]. + mlp_ratios (int): Ratio of mlp hidden dim to embedding dim. + Default: [4, 4, 4]. + out_indices (tuple[int]): Output from which stages. + Default: (0, 1, 2, 3). + qkv_bias (bool): Enable bias for qkv if True. Default: False. + drop_rate (float): Dropout rate. Default 0. + attn_drop_rate (float): Dropout ratio of attention weight. + Default 0.0 + drop_path_rate (float): Stochastic depth rate. Default 0.2. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + depths (list): Depths of each stage. Default [4, 4, 4]. + sr_ratios (list): Kernel_size of conv in each Attn module in + Transformer encoder layer. Default: [4, 2, 1]. + windiow_sizes (list): Window size of LSA. Default: [7, 7, 7], + input_features_slice(bool): Input features need slice. Default: False. + norm_after_stage(bool): Add extra norm. Default False. + strides (list): Strides in patch-Embedding modules. Default: (2, 2, 2) + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + in_channels=3, + embed_dims=[64, 128, 256], + patch_sizes=[4, 2, 2, 2], + strides=[4, 2, 2, 2], + num_heads=[1, 2, 4], + mlp_ratios=[4, 4, 4], + out_indices=(0, 1, 2, 3), + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + norm_cfg=dict(type='LN'), + depths=[4, 4, 4], + sr_ratios=[4, 2, 1], + windiow_sizes=[7, 7, 7], + norm_after_stage=True, + pretrained=None, + init_cfg=None): + super(SVT, self).__init__(in_channels, embed_dims, patch_sizes, + strides, num_heads, mlp_ratios, out_indices, + qkv_bias, drop_rate, attn_drop_rate, + drop_path_rate, norm_cfg, depths, sr_ratios, + norm_after_stage, pretrained, init_cfg) + # transformer encoder + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + + for k in range(len(depths)): + for i in range(depths[k]): + if i % 2 == 0: + self.layers[k][i] = \ + LSAEncoderLayer( + embed_dims=embed_dims[k], + num_heads=num_heads[k], + feedforward_channels=mlp_ratios[k] * embed_dims[k], + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[sum(depths[:k])+i], + qkv_bias=qkv_bias, + window_size=windiow_sizes[k]) diff --git a/downstream/mmsegmentation/mmseg/models/backbones/unet.py b/downstream/mmsegmentation/mmseg/models/backbones/unet.py new file mode 100644 index 0000000..c2d3366 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/unet.py @@ -0,0 +1,438 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import (UPSAMPLE_LAYERS, ConvModule, build_activation_layer, + build_norm_layer) +from mmcv.runner import BaseModule +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmseg.ops import Upsample +from ..builder import BACKBONES +from ..utils import UpConvBlock + + +class BasicConvBlock(nn.Module): + """Basic convolutional block for UNet. + + This module consists of several plain convolutional layers. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + num_convs (int): Number of convolutional layers. Default: 2. + stride (int): Whether use stride convolution to downsample + the input feature map. If stride=2, it only uses stride convolution + in the first convolutional layer to downsample the input feature + map. Options are 1 or 2. Default: 1. + dilation (int): Whether use dilated convolution to expand the + receptive field. Set dilation rate of each convolutional layer and + the dilation rate of the first convolutional layer is always 1. + Default: 1. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + dcn (bool): Use deformable convolution in convolutional layer or not. + Default: None. + plugins (dict): plugins for convolutional layers. Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + num_convs=2, + stride=1, + dilation=1, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + dcn=None, + plugins=None): + super(BasicConvBlock, self).__init__() + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.with_cp = with_cp + convs = [] + for i in range(num_convs): + convs.append( + ConvModule( + in_channels=in_channels if i == 0 else out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride if i == 0 else 1, + dilation=1 if i == 0 else dilation, + padding=1 if i == 0 else dilation, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + self.convs = nn.Sequential(*convs) + + def forward(self, x): + """Forward function.""" + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self.convs, x) + else: + out = self.convs(x) + return out + + +@UPSAMPLE_LAYERS.register_module() +class DeconvModule(nn.Module): + """Deconvolution upsample module in decoder for UNet (2X upsample). + + This module uses deconvolution to upsample feature map in the decoder + of UNet. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + kernel_size (int): Kernel size of the convolutional layer. Default: 4. + """ + + def __init__(self, + in_channels, + out_channels, + with_cp=False, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + *, + kernel_size=4, + scale_factor=2): + super(DeconvModule, self).__init__() + + assert (kernel_size - scale_factor >= 0) and\ + (kernel_size - scale_factor) % 2 == 0,\ + f'kernel_size should be greater than or equal to scale_factor '\ + f'and (kernel_size - scale_factor) should be even numbers, '\ + f'while the kernel size is {kernel_size} and scale_factor is '\ + f'{scale_factor}.' + + stride = scale_factor + padding = (kernel_size - scale_factor) // 2 + self.with_cp = with_cp + deconv = nn.ConvTranspose2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding) + + norm_name, norm = build_norm_layer(norm_cfg, out_channels) + activate = build_activation_layer(act_cfg) + self.deconv_upsamping = nn.Sequential(deconv, norm, activate) + + def forward(self, x): + """Forward function.""" + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self.deconv_upsamping, x) + else: + out = self.deconv_upsamping(x) + return out + + +@UPSAMPLE_LAYERS.register_module() +class InterpConv(nn.Module): + """Interpolation upsample module in decoder for UNet. + + This module uses interpolation to upsample feature map in the decoder + of UNet. It consists of one interpolation upsample layer and one + convolutional layer. It can be one interpolation upsample layer followed + by one convolutional layer (conv_first=False) or one convolutional layer + followed by one interpolation upsample layer (conv_first=True). + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + conv_first (bool): Whether convolutional layer or interpolation + upsample layer first. Default: False. It means interpolation + upsample layer followed by one convolutional layer. + kernel_size (int): Kernel size of the convolutional layer. Default: 1. + stride (int): Stride of the convolutional layer. Default: 1. + padding (int): Padding of the convolutional layer. Default: 1. + upsample_cfg (dict): Interpolation config of the upsample layer. + Default: dict( + scale_factor=2, mode='bilinear', align_corners=False). + """ + + def __init__(self, + in_channels, + out_channels, + with_cp=False, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + *, + conv_cfg=None, + conv_first=False, + kernel_size=1, + stride=1, + padding=0, + upsample_cfg=dict( + scale_factor=2, mode='bilinear', align_corners=False)): + super(InterpConv, self).__init__() + + self.with_cp = with_cp + conv = ConvModule( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + upsample = Upsample(**upsample_cfg) + if conv_first: + self.interp_upsample = nn.Sequential(conv, upsample) + else: + self.interp_upsample = nn.Sequential(upsample, conv) + + def forward(self, x): + """Forward function.""" + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self.interp_upsample, x) + else: + out = self.interp_upsample(x) + return out + + +@BACKBONES.register_module() +class UNet(BaseModule): + """UNet backbone. + + This backbone is the implementation of `U-Net: Convolutional Networks + for Biomedical Image Segmentation `_. + + Args: + in_channels (int): Number of input image channels. Default" 3. + base_channels (int): Number of base channels of each stage. + The output channels of the first stage. Default: 64. + num_stages (int): Number of stages in encoder, normally 5. Default: 5. + strides (Sequence[int 1 | 2]): Strides of each stage in encoder. + len(strides) is equal to num_stages. Normally the stride of the + first stage in encoder is 1. If strides[i]=2, it uses stride + convolution to downsample in the correspondence encoder stage. + Default: (1, 1, 1, 1, 1). + enc_num_convs (Sequence[int]): Number of convolutional layers in the + convolution block of the correspondence encoder stage. + Default: (2, 2, 2, 2, 2). + dec_num_convs (Sequence[int]): Number of convolutional layers in the + convolution block of the correspondence decoder stage. + Default: (2, 2, 2, 2). + downsamples (Sequence[int]): Whether use MaxPool to downsample the + feature map after the first stage of encoder + (stages: [1, num_stages)). If the correspondence encoder stage use + stride convolution (strides[i]=2), it will never use MaxPool to + downsample, even downsamples[i-1]=True. + Default: (True, True, True, True). + enc_dilations (Sequence[int]): Dilation rate of each stage in encoder. + Default: (1, 1, 1, 1, 1). + dec_dilations (Sequence[int]): Dilation rate of each stage in decoder. + Default: (1, 1, 1, 1). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + upsample_cfg (dict): The upsample config of the upsample module in + decoder. Default: dict(type='InterpConv'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + dcn (bool): Use deformable convolution in convolutional layer or not. + Default: None. + plugins (dict): plugins for convolutional layers. Default: None. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Notice: + The input image size should be divisible by the whole downsample rate + of the encoder. More detail of the whole downsample rate can be found + in UNet._check_input_divisible. + """ + + def __init__(self, + in_channels=3, + base_channels=64, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + norm_eval=False, + dcn=None, + plugins=None, + pretrained=None, + init_cfg=None): + super(UNet, self).__init__(init_cfg) + + self.pretrained = pretrained + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + else: + raise TypeError('pretrained must be a str or None') + + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + assert len(strides) == num_stages, \ + 'The length of strides should be equal to num_stages, '\ + f'while the strides is {strides}, the length of '\ + f'strides is {len(strides)}, and the num_stages is '\ + f'{num_stages}.' + assert len(enc_num_convs) == num_stages, \ + 'The length of enc_num_convs should be equal to num_stages, '\ + f'while the enc_num_convs is {enc_num_convs}, the length of '\ + f'enc_num_convs is {len(enc_num_convs)}, and the num_stages is '\ + f'{num_stages}.' + assert len(dec_num_convs) == (num_stages-1), \ + 'The length of dec_num_convs should be equal to (num_stages-1), '\ + f'while the dec_num_convs is {dec_num_convs}, the length of '\ + f'dec_num_convs is {len(dec_num_convs)}, and the num_stages is '\ + f'{num_stages}.' + assert len(downsamples) == (num_stages-1), \ + 'The length of downsamples should be equal to (num_stages-1), '\ + f'while the downsamples is {downsamples}, the length of '\ + f'downsamples is {len(downsamples)}, and the num_stages is '\ + f'{num_stages}.' + assert len(enc_dilations) == num_stages, \ + 'The length of enc_dilations should be equal to num_stages, '\ + f'while the enc_dilations is {enc_dilations}, the length of '\ + f'enc_dilations is {len(enc_dilations)}, and the num_stages is '\ + f'{num_stages}.' + assert len(dec_dilations) == (num_stages-1), \ + 'The length of dec_dilations should be equal to (num_stages-1), '\ + f'while the dec_dilations is {dec_dilations}, the length of '\ + f'dec_dilations is {len(dec_dilations)}, and the num_stages is '\ + f'{num_stages}.' + self.num_stages = num_stages + self.strides = strides + self.downsamples = downsamples + self.norm_eval = norm_eval + self.base_channels = base_channels + + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + for i in range(num_stages): + enc_conv_block = [] + if i != 0: + if strides[i] == 1 and downsamples[i - 1]: + enc_conv_block.append(nn.MaxPool2d(kernel_size=2)) + upsample = (strides[i] != 1 or downsamples[i - 1]) + self.decoder.append( + UpConvBlock( + conv_block=BasicConvBlock, + in_channels=base_channels * 2**i, + skip_channels=base_channels * 2**(i - 1), + out_channels=base_channels * 2**(i - 1), + num_convs=dec_num_convs[i - 1], + stride=1, + dilation=dec_dilations[i - 1], + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + upsample_cfg=upsample_cfg if upsample else None, + dcn=None, + plugins=None)) + + enc_conv_block.append( + BasicConvBlock( + in_channels=in_channels, + out_channels=base_channels * 2**i, + num_convs=enc_num_convs[i], + stride=strides[i], + dilation=enc_dilations[i], + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dcn=None, + plugins=None)) + self.encoder.append((nn.Sequential(*enc_conv_block))) + in_channels = base_channels * 2**i + + def forward(self, x): + self._check_input_divisible(x) + enc_outs = [] + for enc in self.encoder: + x = enc(x) + enc_outs.append(x) + dec_outs = [x] + for i in reversed(range(len(self.decoder))): + x = self.decoder[i](enc_outs[i], x) + dec_outs.append(x) + + return dec_outs + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(UNet, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def _check_input_divisible(self, x): + h, w = x.shape[-2:] + whole_downsample_rate = 1 + for i in range(1, self.num_stages): + if self.strides[i] == 2 or self.downsamples[i - 1]: + whole_downsample_rate *= 2 + assert (h % whole_downsample_rate == 0) \ + and (w % whole_downsample_rate == 0),\ + f'The input image size {(h, w)} should be divisible by the whole '\ + f'downsample rate {whole_downsample_rate}, when num_stages is '\ + f'{self.num_stages}, strides is {self.strides}, and downsamples '\ + f'is {self.downsamples}.' diff --git a/downstream/mmsegmentation/mmseg/models/backbones/vit.py b/downstream/mmsegmentation/mmseg/models/backbones/vit.py new file mode 100644 index 0000000..fe65039 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/backbones/vit.py @@ -0,0 +1,428 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention +from mmcv.cnn.utils.weight_init import (constant_init, kaiming_init, + trunc_normal_) +from mmcv.runner import (BaseModule, CheckpointLoader, ModuleList, + load_state_dict) +from torch.nn.modules.batchnorm import _BatchNorm +from torch.nn.modules.utils import _pair as to_2tuple + +from mmseg.ops import resize +from mmseg.utils import get_root_logger +from ..builder import BACKBONES +from ..utils import PatchEmbed + + +class TransformerEncoderLayer(BaseModule): + """Implements one encoder layer in Vision Transformer. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default: 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default: 0.0. + drop_path_rate (float): stochastic depth rate. Default 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + qkv_bias (bool): enable bias for qkv if True. Default: True + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default: True. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + batch_first=True, + with_cp=False): + super(TransformerEncoderLayer, self).__init__() + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + self.attn = MultiheadAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + batch_first=batch_first, + bias=qkv_bias) + + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, embed_dims, postfix=2) + self.add_module(self.norm2_name, norm2) + + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + self.with_cp = with_cp + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def forward(self, x): + + def _inner_forward(x): + x = self.attn(self.norm1(x), identity=x) + x = self.ffn(self.norm2(x), identity=x) + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + return x + + +@BACKBONES.register_module() +class VisionTransformer(BaseModule): + """Vision Transformer. + + This backbone is the implementation of `An Image is Worth 16x16 Words: + Transformers for Image Recognition at + Scale `_. + + Args: + img_size (int | tuple): Input image size. Default: 224. + patch_size (int): The patch size. Default: 16. + in_channels (int): Number of input channels. Default: 3. + embed_dims (int): embedding dimension. Default: 768. + num_layers (int): depth of transformer. Default: 12. + num_heads (int): number of attention heads. Default: 12. + mlp_ratio (int): ratio of mlp hidden dim to embedding dim. + Default: 4. + out_indices (list | tuple | int): Output from which stages. + Default: -1. + qkv_bias (bool): enable bias for qkv if True. Default: True. + drop_rate (float): Probability of an element to be zeroed. + Default 0.0 + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0 + drop_path_rate (float): stochastic depth rate. Default 0.0 + with_cls_token (bool): Whether concatenating class token into image + tokens as transformer input. Default: True. + output_cls_token (bool): Whether output the cls_token. If set True, + `with_cls_token` must be True. Default: False. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + patch_norm (bool): Whether to add a norm in PatchEmbed Block. + Default: False. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Default: False. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Default: bicubic. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. Default: False. + pretrained (str, optional): model pretrained path. Default: None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_channels=3, + embed_dims=768, + num_layers=12, + num_heads=12, + mlp_ratio=4, + out_indices=-1, + qkv_bias=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + with_cls_token=True, + output_cls_token=False, + norm_cfg=dict(type='LN'), + act_cfg=dict(type='GELU'), + patch_norm=False, + final_norm=False, + interpolate_mode='bicubic', + num_fcs=2, + norm_eval=False, + with_cp=False, + pretrained=None, + init_cfg=None): + super(VisionTransformer, self).__init__(init_cfg=init_cfg) + + if isinstance(img_size, int): + img_size = to_2tuple(img_size) + elif isinstance(img_size, tuple): + if len(img_size) == 1: + img_size = to_2tuple(img_size[0]) + assert len(img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(img_size)}' + + if output_cls_token: + assert with_cls_token is True, f'with_cls_token must be True if' \ + f'set output_cls_token to True, but got {with_cls_token}' + + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be set at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is not None: + raise TypeError('pretrained must be a str or None') + + self.img_size = img_size + self.patch_size = patch_size + self.interpolate_mode = interpolate_mode + self.norm_eval = norm_eval + self.with_cp = with_cp + self.pretrained = pretrained + + self.patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + padding='corner', + norm_cfg=norm_cfg if patch_norm else None, + init_cfg=None, + ) + + num_patches = (img_size[0] // patch_size) * \ + (img_size[1] // patch_size) + + self.with_cls_token = with_cls_token + self.output_cls_token = output_cls_token + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims)) + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches + 1, embed_dims)) + self.drop_after_pos = nn.Dropout(p=drop_rate) + + if isinstance(out_indices, int): + if out_indices == -1: + out_indices = num_layers - 1 + self.out_indices = [out_indices] + elif isinstance(out_indices, list) or isinstance(out_indices, tuple): + self.out_indices = out_indices + else: + raise TypeError('out_indices must be type of int, list or tuple') + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, num_layers) + ] # stochastic depth decay rule + + self.layers = ModuleList() + for i in range(num_layers): + self.layers.append( + TransformerEncoderLayer( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=mlp_ratio * embed_dims, + attn_drop_rate=attn_drop_rate, + drop_rate=drop_rate, + drop_path_rate=dpr[i], + num_fcs=num_fcs, + qkv_bias=qkv_bias, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp, + batch_first=True)) + + self.final_norm = final_norm + if final_norm: + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def init_weights(self): + if (isinstance(self.init_cfg, dict) + and self.init_cfg.get('type') == 'Pretrained'): + logger = get_root_logger() + checkpoint = CheckpointLoader.load_checkpoint( + self.init_cfg['checkpoint'], logger=logger, map_location='cpu') + + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + if 'pos_embed' in state_dict.keys(): + if self.pos_embed.shape != state_dict['pos_embed'].shape: + logger.info(msg=f'Resize the pos_embed shape from ' + f'{state_dict["pos_embed"].shape} to ' + f'{self.pos_embed.shape}') + h, w = self.img_size + pos_size = int( + math.sqrt(state_dict['pos_embed'].shape[1] - 1)) + state_dict['pos_embed'] = self.resize_pos_embed( + state_dict['pos_embed'], + (h // self.patch_size, w // self.patch_size), + (pos_size, pos_size), self.interpolate_mode) + + load_state_dict(self, state_dict, strict=False, logger=logger) + elif self.init_cfg is not None: + super(VisionTransformer, self).init_weights() + else: + # We only implement the 'jax_impl' initialization implemented at + # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py#L353 # noqa: E501 + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + for n, m in self.named_modules(): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + if 'ffn' in n: + nn.init.normal_(m.bias, mean=0., std=1e-6) + else: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv2d): + kaiming_init(m, mode='fan_in', bias=0.) + elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)): + constant_init(m, val=1.0, bias=0.) + + def _pos_embeding(self, patched_img, hw_shape, pos_embed): + """Positiong embeding method. + + Resize the pos_embed, if the input image size doesn't match + the training size. + Args: + patched_img (torch.Tensor): The patched image, it should be + shape of [B, L1, C]. + hw_shape (tuple): The downsampled image resolution. + pos_embed (torch.Tensor): The pos_embed weighs, it should be + shape of [B, L2, c]. + Return: + torch.Tensor: The pos encoded image feature. + """ + assert patched_img.ndim == 3 and pos_embed.ndim == 3, \ + 'the shapes of patched_img and pos_embed must be [B, L, C]' + x_len, pos_len = patched_img.shape[1], pos_embed.shape[1] + if x_len != pos_len: + if pos_len == (self.img_size[0] // self.patch_size) * ( + self.img_size[1] // self.patch_size) + 1: + pos_h = self.img_size[0] // self.patch_size + pos_w = self.img_size[1] // self.patch_size + else: + raise ValueError( + 'Unexpected shape of pos_embed, got {}.'.format( + pos_embed.shape)) + pos_embed = self.resize_pos_embed(pos_embed, hw_shape, + (pos_h, pos_w), + self.interpolate_mode) + return self.drop_after_pos(patched_img + pos_embed) + + @staticmethod + def resize_pos_embed(pos_embed, input_shpae, pos_shape, mode): + """Resize pos_embed weights. + + Resize pos_embed using bicubic interpolate method. + Args: + pos_embed (torch.Tensor): Position embedding weights. + input_shpae (tuple): Tuple for (downsampled input image height, + downsampled input image width). + pos_shape (tuple): The resolution of downsampled origin training + image. + mode (str): Algorithm used for upsampling: + ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | + ``'trilinear'``. Default: ``'nearest'`` + Return: + torch.Tensor: The resized pos_embed of shape [B, L_new, C] + """ + assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]' + pos_h, pos_w = pos_shape + cls_token_weight = pos_embed[:, 0] + pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):] + pos_embed_weight = pos_embed_weight.reshape( + 1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2) + pos_embed_weight = resize( + pos_embed_weight, size=input_shpae, align_corners=False, mode=mode) + cls_token_weight = cls_token_weight.unsqueeze(1) + pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2) + pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1) + return pos_embed + + def forward(self, inputs): + B = inputs.shape[0] + + x, hw_shape = self.patch_embed(inputs) + + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + x = self._pos_embeding(x, hw_shape, self.pos_embed) + + if not self.with_cls_token: + # Remove class token for transformer encoder input + x = x[:, 1:] + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i == len(self.layers) - 1: + if self.final_norm: + x = self.norm1(x) + if i in self.out_indices: + if self.with_cls_token: + # Remove class token and reshape token for decoder head + out = x[:, 1:] + else: + out = x + B, _, C = out.shape + out = out.reshape(B, hw_shape[0], hw_shape[1], + C).permute(0, 3, 1, 2).contiguous() + if self.output_cls_token: + out = [out, x[:, 0]] + outs.append(out) + + return tuple(outs) + + def train(self, mode=True): + super(VisionTransformer, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.LayerNorm): + m.eval() diff --git a/downstream/mmsegmentation/mmseg/models/builder.py b/downstream/mmsegmentation/mmseg/models/builder.py new file mode 100644 index 0000000..5e18e4e --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/builder.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +from mmcv.cnn import MODELS as MMCV_MODELS +from mmcv.cnn.bricks.registry import ATTENTION as MMCV_ATTENTION +from mmcv.utils import Registry + +MODELS = Registry('models', parent=MMCV_MODELS) +ATTENTION = Registry('attention', parent=MMCV_ATTENTION) + +BACKBONES = MODELS +NECKS = MODELS +HEADS = MODELS +LOSSES = MODELS +SEGMENTORS = MODELS + + +def build_backbone(cfg): + """Build backbone.""" + return BACKBONES.build(cfg) + + +def build_neck(cfg): + """Build neck.""" + return NECKS.build(cfg) + + +def build_head(cfg): + """Build head.""" + return HEADS.build(cfg) + + +def build_loss(cfg): + """Build loss.""" + return LOSSES.build(cfg) + + +def build_segmentor(cfg, train_cfg=None, test_cfg=None): + """Build segmentor.""" + if train_cfg is not None or test_cfg is not None: + warnings.warn( + 'train_cfg and test_cfg is deprecated, ' + 'please specify them in model', UserWarning) + assert cfg.get('train_cfg') is None or train_cfg is None, \ + 'train_cfg specified in both outer field and model field ' + assert cfg.get('test_cfg') is None or test_cfg is None, \ + 'test_cfg specified in both outer field and model field ' + return SEGMENTORS.build( + cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/__init__.py b/downstream/mmsegmentation/mmseg/models/decode_heads/__init__.py new file mode 100644 index 0000000..fda8f05 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .ann_head import ANNHead +from .apc_head import APCHead +from .aspp_head import ASPPHead +from .cc_head import CCHead +from .da_head import DAHead +from .dm_head import DMHead +from .dnl_head import DNLHead +from .dpt_head import DPTHead +from .ema_head import EMAHead +from .enc_head import EncHead +from .fcn_head import FCNHead +from .fpn_head import FPNHead +from .gc_head import GCHead +from .isa_head import ISAHead +from .knet_head import IterativeDecodeHead, KernelUpdateHead, KernelUpdator +from .lraspp_head import LRASPPHead +from .nl_head import NLHead +from .ocr_head import OCRHead +from .point_head import PointHead +from .psa_head import PSAHead +from .psp_head import PSPHead +from .segformer_head import SegformerHead +from .segmenter_mask_head import SegmenterMaskTransformerHead +from .sep_aspp_head import DepthwiseSeparableASPPHead +from .sep_fcn_head import DepthwiseSeparableFCNHead +from .setr_mla_head import SETRMLAHead +from .setr_up_head import SETRUPHead +from .stdc_head import STDCHead +from .uper_head import UPerHead +from .segformer_head_ori import SegFormerHeadOri + +__all__ = [ + 'FCNHead', 'PSPHead', 'ASPPHead', 'PSAHead', 'NLHead', 'GCHead', 'CCHead', + 'UPerHead', 'DepthwiseSeparableASPPHead', 'ANNHead', 'DAHead', 'OCRHead', + 'EncHead', 'DepthwiseSeparableFCNHead', 'FPNHead', 'EMAHead', 'DNLHead', + 'PointHead', 'APCHead', 'DMHead', 'LRASPPHead', 'SETRUPHead', + 'SETRMLAHead', 'DPTHead', 'SETRMLAHead', 'SegmenterMaskTransformerHead', + 'SegformerHead', 'ISAHead', 'STDCHead', 'IterativeDecodeHead', + 'KernelUpdateHead', 'KernelUpdator', 'SegFormerHeadOri' +] diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/ann_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/ann_head.py new file mode 100644 index 0000000..c8d882e --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/ann_head.py @@ -0,0 +1,246 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule + +from ..builder import HEADS +from ..utils import SelfAttentionBlock as _SelfAttentionBlock +from .decode_head import BaseDecodeHead + + +class PPMConcat(nn.ModuleList): + """Pyramid Pooling Module that only concat the features of each layer. + + Args: + pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module. + """ + + def __init__(self, pool_scales=(1, 3, 6, 8)): + super(PPMConcat, self).__init__( + [nn.AdaptiveAvgPool2d(pool_scale) for pool_scale in pool_scales]) + + def forward(self, feats): + """Forward function.""" + ppm_outs = [] + for ppm in self: + ppm_out = ppm(feats) + ppm_outs.append(ppm_out.view(*feats.shape[:2], -1)) + concat_outs = torch.cat(ppm_outs, dim=2) + return concat_outs + + +class SelfAttentionBlock(_SelfAttentionBlock): + """Make a ANN used SelfAttentionBlock. + + Args: + low_in_channels (int): Input channels of lower level feature, + which is the key feature for self-attention. + high_in_channels (int): Input channels of higher level feature, + which is the query feature for self-attention. + channels (int): Output channels of key/query transform. + out_channels (int): Output channels. + share_key_query (bool): Whether share projection weight between key + and query projection. + query_scale (int): The scale of query feature map. + key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module of key feature. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict|None): Config of activation layers. + """ + + def __init__(self, low_in_channels, high_in_channels, channels, + out_channels, share_key_query, query_scale, key_pool_scales, + conv_cfg, norm_cfg, act_cfg): + key_psp = PPMConcat(key_pool_scales) + if query_scale > 1: + query_downsample = nn.MaxPool2d(kernel_size=query_scale) + else: + query_downsample = None + super(SelfAttentionBlock, self).__init__( + key_in_channels=low_in_channels, + query_in_channels=high_in_channels, + channels=channels, + out_channels=out_channels, + share_key_query=share_key_query, + query_downsample=query_downsample, + key_downsample=key_psp, + key_query_num_convs=1, + key_query_norm=True, + value_out_num_convs=1, + value_out_norm=False, + matmul_norm=True, + with_out=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + +class AFNB(nn.Module): + """Asymmetric Fusion Non-local Block(AFNB) + + Args: + low_in_channels (int): Input channels of lower level feature, + which is the key feature for self-attention. + high_in_channels (int): Input channels of higher level feature, + which is the query feature for self-attention. + channels (int): Output channels of key/query transform. + out_channels (int): Output channels. + and query projection. + query_scales (tuple[int]): The scales of query feature map. + Default: (1,) + key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module of key feature. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict|None): Config of activation layers. + """ + + def __init__(self, low_in_channels, high_in_channels, channels, + out_channels, query_scales, key_pool_scales, conv_cfg, + norm_cfg, act_cfg): + super(AFNB, self).__init__() + self.stages = nn.ModuleList() + for query_scale in query_scales: + self.stages.append( + SelfAttentionBlock( + low_in_channels=low_in_channels, + high_in_channels=high_in_channels, + channels=channels, + out_channels=out_channels, + share_key_query=False, + query_scale=query_scale, + key_pool_scales=key_pool_scales, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.bottleneck = ConvModule( + out_channels + high_in_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, low_feats, high_feats): + """Forward function.""" + priors = [stage(high_feats, low_feats) for stage in self.stages] + context = torch.stack(priors, dim=0).sum(dim=0) + output = self.bottleneck(torch.cat([context, high_feats], 1)) + return output + + +class APNB(nn.Module): + """Asymmetric Pyramid Non-local Block (APNB) + + Args: + in_channels (int): Input channels of key/query feature, + which is the key feature for self-attention. + channels (int): Output channels of key/query transform. + out_channels (int): Output channels. + query_scales (tuple[int]): The scales of query feature map. + Default: (1,) + key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module of key feature. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict|None): Config of activation layers. + """ + + def __init__(self, in_channels, channels, out_channels, query_scales, + key_pool_scales, conv_cfg, norm_cfg, act_cfg): + super(APNB, self).__init__() + self.stages = nn.ModuleList() + for query_scale in query_scales: + self.stages.append( + SelfAttentionBlock( + low_in_channels=in_channels, + high_in_channels=in_channels, + channels=channels, + out_channels=out_channels, + share_key_query=True, + query_scale=query_scale, + key_pool_scales=key_pool_scales, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.bottleneck = ConvModule( + 2 * in_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, feats): + """Forward function.""" + priors = [stage(feats, feats) for stage in self.stages] + context = torch.stack(priors, dim=0).sum(dim=0) + output = self.bottleneck(torch.cat([context, feats], 1)) + return output + + +@HEADS.register_module() +class ANNHead(BaseDecodeHead): + """Asymmetric Non-local Neural Networks for Semantic Segmentation. + + This head is the implementation of `ANNNet + `_. + + Args: + project_channels (int): Projection channels for Nonlocal. + query_scales (tuple[int]): The scales of query feature map. + Default: (1,) + key_pool_scales (tuple[int]): The pooling scales of key feature map. + Default: (1, 3, 6, 8). + """ + + def __init__(self, + project_channels, + query_scales=(1, ), + key_pool_scales=(1, 3, 6, 8), + **kwargs): + super(ANNHead, self).__init__( + input_transform='multiple_select', **kwargs) + assert len(self.in_channels) == 2 + low_in_channels, high_in_channels = self.in_channels + self.project_channels = project_channels + self.fusion = AFNB( + low_in_channels=low_in_channels, + high_in_channels=high_in_channels, + out_channels=high_in_channels, + channels=project_channels, + query_scales=query_scales, + key_pool_scales=key_pool_scales, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.bottleneck = ConvModule( + high_in_channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.context = APNB( + in_channels=self.channels, + out_channels=self.channels, + channels=project_channels, + query_scales=query_scales, + key_pool_scales=key_pool_scales, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + low_feats, high_feats = self._transform_inputs(inputs) + output = self.fusion(low_feats, high_feats) + output = self.dropout(output) + output = self.bottleneck(output) + output = self.context(output) + output = self.cls_seg(output) + + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/apc_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/apc_head.py new file mode 100644 index 0000000..3198fd1 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/apc_head.py @@ -0,0 +1,159 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule + +from mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +class ACM(nn.Module): + """Adaptive Context Module used in APCNet. + + Args: + pool_scale (int): Pooling scale used in Adaptive Context + Module to extract region features. + fusion (bool): Add one conv to fuse residual feature. + in_channels (int): Input channels. + channels (int): Channels after modules, before conv_seg. + conv_cfg (dict | None): Config of conv layers. + norm_cfg (dict | None): Config of norm layers. + act_cfg (dict): Config of activation layers. + """ + + def __init__(self, pool_scale, fusion, in_channels, channels, conv_cfg, + norm_cfg, act_cfg): + super(ACM, self).__init__() + self.pool_scale = pool_scale + self.fusion = fusion + self.in_channels = in_channels + self.channels = channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.pooled_redu_conv = ConvModule( + self.in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.input_redu_conv = ConvModule( + self.in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.global_info = ConvModule( + self.channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.gla = nn.Conv2d(self.channels, self.pool_scale**2, 1, 1, 0) + + self.residual_conv = ConvModule( + self.channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + if self.fusion: + self.fusion_conv = ConvModule( + self.channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, x): + """Forward function.""" + pooled_x = F.adaptive_avg_pool2d(x, self.pool_scale) + # [batch_size, channels, h, w] + x = self.input_redu_conv(x) + # [batch_size, channels, pool_scale, pool_scale] + pooled_x = self.pooled_redu_conv(pooled_x) + batch_size = x.size(0) + # [batch_size, pool_scale * pool_scale, channels] + pooled_x = pooled_x.view(batch_size, self.channels, + -1).permute(0, 2, 1).contiguous() + # [batch_size, h * w, pool_scale * pool_scale] + affinity_matrix = self.gla(x + resize( + self.global_info(F.adaptive_avg_pool2d(x, 1)), size=x.shape[2:]) + ).permute(0, 2, 3, 1).reshape( + batch_size, -1, self.pool_scale**2) + affinity_matrix = F.sigmoid(affinity_matrix) + # [batch_size, h * w, channels] + z_out = torch.matmul(affinity_matrix, pooled_x) + # [batch_size, channels, h * w] + z_out = z_out.permute(0, 2, 1).contiguous() + # [batch_size, channels, h, w] + z_out = z_out.view(batch_size, self.channels, x.size(2), x.size(3)) + z_out = self.residual_conv(z_out) + z_out = F.relu(z_out + x) + if self.fusion: + z_out = self.fusion_conv(z_out) + + return z_out + + +@HEADS.register_module() +class APCHead(BaseDecodeHead): + """Adaptive Pyramid Context Network for Semantic Segmentation. + + This head is the implementation of + `APCNet `_. + + Args: + pool_scales (tuple[int]): Pooling scales used in Adaptive Context + Module. Default: (1, 2, 3, 6). + fusion (bool): Add one conv to fuse residual feature. + """ + + def __init__(self, pool_scales=(1, 2, 3, 6), fusion=True, **kwargs): + super(APCHead, self).__init__(**kwargs) + assert isinstance(pool_scales, (list, tuple)) + self.pool_scales = pool_scales + self.fusion = fusion + acm_modules = [] + for pool_scale in self.pool_scales: + acm_modules.append( + ACM(pool_scale, + self.fusion, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.acm_modules = nn.ModuleList(acm_modules) + self.bottleneck = ConvModule( + self.in_channels + len(pool_scales) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + acm_outs = [x] + for acm_module in self.acm_modules: + acm_outs.append(acm_module(x)) + acm_outs = torch.cat(acm_outs, dim=1) + output = self.bottleneck(acm_outs) + output = self.cls_seg(output) + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/aspp_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/aspp_head.py new file mode 100644 index 0000000..7059aee --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/aspp_head.py @@ -0,0 +1,122 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule + +from mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +class ASPPModule(nn.ModuleList): + """Atrous Spatial Pyramid Pooling (ASPP) Module. + + Args: + dilations (tuple[int]): Dilation rate of each layer. + in_channels (int): Input channels. + channels (int): Channels after modules, before conv_seg. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict): Config of activation layers. + """ + + def __init__(self, dilations, in_channels, channels, conv_cfg, norm_cfg, + act_cfg): + super(ASPPModule, self).__init__() + self.dilations = dilations + self.in_channels = in_channels + self.channels = channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + for dilation in dilations: + self.append( + ConvModule( + self.in_channels, + self.channels, + 1 if dilation == 1 else 3, + dilation=dilation, + padding=0 if dilation == 1 else dilation, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + def forward(self, x): + """Forward function.""" + aspp_outs = [] + for aspp_module in self: + aspp_outs.append(aspp_module(x)) + + return aspp_outs + + +@HEADS.register_module() +class ASPPHead(BaseDecodeHead): + """Rethinking Atrous Convolution for Semantic Image Segmentation. + + This head is the implementation of `DeepLabV3 + `_. + + Args: + dilations (tuple[int]): Dilation rates for ASPP module. + Default: (1, 6, 12, 18). + """ + + def __init__(self, dilations=(1, 6, 12, 18), **kwargs): + super(ASPPHead, self).__init__(**kwargs) + assert isinstance(dilations, (list, tuple)) + self.dilations = dilations + self.image_pool = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + ConvModule( + self.in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.aspp_modules = ASPPModule( + dilations, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.bottleneck = ConvModule( + (len(dilations) + 1) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def _forward_feature(self, inputs): + """Forward function for feature maps before classifying each pixel with + ``self.cls_seg`` fc. + + Args: + inputs (list[Tensor]): List of multi-level img features. + + Returns: + feats (Tensor): A tensor of shape (batch_size, self.channels, + H, W) which is feature map for last layer of decoder head. + """ + x = self._transform_inputs(inputs) + aspp_outs = [ + resize( + self.image_pool(x), + size=x.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + ] + aspp_outs.extend(self.aspp_modules(x)) + aspp_outs = torch.cat(aspp_outs, dim=1) + feats = self.bottleneck(aspp_outs) + return feats + + def forward(self, inputs): + """Forward function.""" + output = self._forward_feature(inputs) + output = self.cls_seg(output) + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/cascade_decode_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/cascade_decode_head.py new file mode 100644 index 0000000..f7c3da0 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/cascade_decode_head.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +from .decode_head import BaseDecodeHead + + +class BaseCascadeDecodeHead(BaseDecodeHead, metaclass=ABCMeta): + """Base class for cascade decode head used in + :class:`CascadeEncoderDecoder.""" + + def __init__(self, *args, **kwargs): + super(BaseCascadeDecodeHead, self).__init__(*args, **kwargs) + + @abstractmethod + def forward(self, inputs, prev_output): + """Placeholder of forward function.""" + pass + + def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg, + train_cfg): + """Forward function for training. + Args: + inputs (list[Tensor]): List of multi-level img features. + prev_output (Tensor): The output of previous decode head. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + gt_semantic_seg (Tensor): Semantic segmentation masks + used if the architecture supports semantic segmentation task. + train_cfg (dict): The training config. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + seg_logits = self.forward(inputs, prev_output) + losses = self.losses(seg_logits, gt_semantic_seg) + + return losses + + def forward_test(self, inputs, prev_output, img_metas, test_cfg): + """Forward function for testing. + + Args: + inputs (list[Tensor]): List of multi-level img features. + prev_output (Tensor): The output of previous decode head. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + test_cfg (dict): The testing config. + + Returns: + Tensor: Output segmentation map. + """ + return self.forward(inputs, prev_output) diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/cc_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/cc_head.py new file mode 100644 index 0000000..ed19eb4 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/cc_head.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..builder import HEADS +from .fcn_head import FCNHead + +try: + from mmcv.ops import CrissCrossAttention +except ModuleNotFoundError: + CrissCrossAttention = None + + +@HEADS.register_module() +class CCHead(FCNHead): + """CCNet: Criss-Cross Attention for Semantic Segmentation. + + This head is the implementation of `CCNet + `_. + + Args: + recurrence (int): Number of recurrence of Criss Cross Attention + module. Default: 2. + """ + + def __init__(self, recurrence=2, **kwargs): + if CrissCrossAttention is None: + raise RuntimeError('Please install mmcv-full for ' + 'CrissCrossAttention ops') + super(CCHead, self).__init__(num_convs=2, **kwargs) + self.recurrence = recurrence + self.cca = CrissCrossAttention(self.channels) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + output = self.convs[0](x) + for _ in range(self.recurrence): + output = self.cca(output) + output = self.convs[1](output) + if self.concat_input: + output = self.conv_cat(torch.cat([x, output], dim=1)) + output = self.cls_seg(output) + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/da_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/da_head.py new file mode 100644 index 0000000..77fd663 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/da_head.py @@ -0,0 +1,179 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F +from mmcv.cnn import ConvModule, Scale +from torch import nn + +from mmseg.core import add_prefix +from ..builder import HEADS +from ..utils import SelfAttentionBlock as _SelfAttentionBlock +from .decode_head import BaseDecodeHead + + +class PAM(_SelfAttentionBlock): + """Position Attention Module (PAM) + + Args: + in_channels (int): Input channels of key/query feature. + channels (int): Output channels of key/query transform. + """ + + def __init__(self, in_channels, channels): + super(PAM, self).__init__( + key_in_channels=in_channels, + query_in_channels=in_channels, + channels=channels, + out_channels=in_channels, + share_key_query=False, + query_downsample=None, + key_downsample=None, + key_query_num_convs=1, + key_query_norm=False, + value_out_num_convs=1, + value_out_norm=False, + matmul_norm=False, + with_out=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None) + + self.gamma = Scale(0) + + def forward(self, x): + """Forward function.""" + out = super(PAM, self).forward(x, x) + + out = self.gamma(out) + x + return out + + +class CAM(nn.Module): + """Channel Attention Module (CAM)""" + + def __init__(self): + super(CAM, self).__init__() + self.gamma = Scale(0) + + def forward(self, x): + """Forward function.""" + batch_size, channels, height, width = x.size() + proj_query = x.view(batch_size, channels, -1) + proj_key = x.view(batch_size, channels, -1).permute(0, 2, 1) + energy = torch.bmm(proj_query, proj_key) + energy_new = torch.max( + energy, -1, keepdim=True)[0].expand_as(energy) - energy + attention = F.softmax(energy_new, dim=-1) + proj_value = x.view(batch_size, channels, -1) + + out = torch.bmm(attention, proj_value) + out = out.view(batch_size, channels, height, width) + + out = self.gamma(out) + x + return out + + +@HEADS.register_module() +class DAHead(BaseDecodeHead): + """Dual Attention Network for Scene Segmentation. + + This head is the implementation of `DANet + `_. + + Args: + pam_channels (int): The channels of Position Attention Module(PAM). + """ + + def __init__(self, pam_channels, **kwargs): + super(DAHead, self).__init__(**kwargs) + self.pam_channels = pam_channels + self.pam_in_conv = ConvModule( + self.in_channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.pam = PAM(self.channels, pam_channels) + self.pam_out_conv = ConvModule( + self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.pam_conv_seg = nn.Conv2d( + self.channels, self.num_classes, kernel_size=1) + + self.cam_in_conv = ConvModule( + self.in_channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.cam = CAM() + self.cam_out_conv = ConvModule( + self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.cam_conv_seg = nn.Conv2d( + self.channels, self.num_classes, kernel_size=1) + + def pam_cls_seg(self, feat): + """PAM feature classification.""" + if self.dropout is not None: + feat = self.dropout(feat) + output = self.pam_conv_seg(feat) + return output + + def cam_cls_seg(self, feat): + """CAM feature classification.""" + if self.dropout is not None: + feat = self.dropout(feat) + output = self.cam_conv_seg(feat) + return output + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + pam_feat = self.pam_in_conv(x) + pam_feat = self.pam(pam_feat) + pam_feat = self.pam_out_conv(pam_feat) + pam_out = self.pam_cls_seg(pam_feat) + + cam_feat = self.cam_in_conv(x) + cam_feat = self.cam(cam_feat) + cam_feat = self.cam_out_conv(cam_feat) + cam_out = self.cam_cls_seg(cam_feat) + + feat_sum = pam_feat + cam_feat + pam_cam_out = self.cls_seg(feat_sum) + + return pam_cam_out, pam_out, cam_out + + def forward_test(self, inputs, img_metas, test_cfg): + """Forward function for testing, only ``pam_cam`` is used.""" + return self.forward(inputs)[0] + + def losses(self, seg_logit, seg_label): + """Compute ``pam_cam``, ``pam``, ``cam`` loss.""" + pam_cam_seg_logit, pam_seg_logit, cam_seg_logit = seg_logit + loss = dict() + loss.update( + add_prefix( + super(DAHead, self).losses(pam_cam_seg_logit, seg_label), + 'pam_cam')) + loss.update( + add_prefix( + super(DAHead, self).losses(pam_seg_logit, seg_label), 'pam')) + loss.update( + add_prefix( + super(DAHead, self).losses(cam_seg_logit, seg_label), 'cam')) + return loss diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/decode_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/decode_head.py new file mode 100644 index 0000000..274d8a6 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/decode_head.py @@ -0,0 +1,267 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import torch +import torch.nn as nn +from mmcv.runner import BaseModule, auto_fp16, force_fp32 + +from mmseg.core import build_pixel_sampler +from mmseg.ops import resize +from ..builder import build_loss +from ..losses import accuracy + + +class BaseDecodeHead(BaseModule, metaclass=ABCMeta): + """Base class for BaseDecodeHead. + + Args: + in_channels (int|Sequence[int]): Input channels. + channels (int): Channels after modules, before conv_seg. + num_classes (int): Number of classes. + dropout_ratio (float): Ratio of dropout layer. Default: 0.1. + conv_cfg (dict|None): Config of conv layers. Default: None. + norm_cfg (dict|None): Config of norm layers. Default: None. + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU') + in_index (int|Sequence[int]): Input feature index. Default: -1 + input_transform (str|None): Transformation type of input features. + Options: 'resize_concat', 'multiple_select', None. + 'resize_concat': Multiple feature maps will be resize to the + same size as first one and than concat together. + Usually used in FCN head of HRNet. + 'multiple_select': Multiple feature maps will be bundle into + a list and passed into decode head. + None: Only one select feature map is allowed. + Default: None. + loss_decode (dict | Sequence[dict]): Config of decode loss. + The `loss_name` is property of corresponding loss function which + could be shown in training log. If you want this loss + item to be included into the backward graph, `loss_` must be the + prefix of the name. Defaults to 'loss_ce'. + e.g. dict(type='CrossEntropyLoss'), + [dict(type='CrossEntropyLoss', loss_name='loss_ce'), + dict(type='DiceLoss', loss_name='loss_dice')] + Default: dict(type='CrossEntropyLoss'). + ignore_index (int | None): The label index to be ignored. When using + masked BCE loss, ignore_index should be set to None. Default: 255. + sampler (dict|None): The config of segmentation map sampler. + Default: None. + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + channels, + *, + num_classes, + dropout_ratio=0.1, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + in_index=-1, + input_transform=None, + loss_decode=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + ignore_index=255, + sampler=None, + align_corners=False, + init_cfg=dict( + type='Normal', std=0.01, override=dict(name='conv_seg')), + **kwargs): + super(BaseDecodeHead, self).__init__(init_cfg) + self._init_inputs(in_channels, in_index, input_transform) + self.channels = channels + self.num_classes = num_classes + self.dropout_ratio = dropout_ratio + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.in_index = in_index + + self.ignore_index = ignore_index + self.align_corners = align_corners + + if isinstance(loss_decode, dict): + self.loss_decode = build_loss(loss_decode) + elif isinstance(loss_decode, (list, tuple)): + self.loss_decode = nn.ModuleList() + for loss in loss_decode: + self.loss_decode.append(build_loss(loss)) + else: + raise TypeError(f'loss_decode must be a dict or sequence of dict,\ + but got {type(loss_decode)}') + + if sampler is not None: + self.sampler = build_pixel_sampler(sampler, context=self) + else: + self.sampler = None + + self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1) + if dropout_ratio > 0: + self.dropout = nn.Dropout2d(dropout_ratio) + else: + self.dropout = None + self.fp16_enabled = False + + def extra_repr(self): + """Extra repr.""" + s = f'input_transform={self.input_transform}, ' \ + f'ignore_index={self.ignore_index}, ' \ + f'align_corners={self.align_corners}' + return s + + def _init_inputs(self, in_channels, in_index, input_transform): + """Check and initialize input transforms. + + The in_channels, in_index and input_transform must match. + Specifically, when input_transform is None, only single feature map + will be selected. So in_channels and in_index must be of type int. + When input_transform + + Args: + in_channels (int|Sequence[int]): Input channels. + in_index (int|Sequence[int]): Input feature index. + input_transform (str|None): Transformation type of input features. + Options: 'resize_concat', 'multiple_select', None. + 'resize_concat': Multiple feature maps will be resize to the + same size as first one and than concat together. + Usually used in FCN head of HRNet. + 'multiple_select': Multiple feature maps will be bundle into + a list and passed into decode head. + None: Only one select feature map is allowed. + """ + + if input_transform is not None: + assert input_transform in ['resize_concat', 'multiple_select'] + self.input_transform = input_transform + self.in_index = in_index + if input_transform is not None: + assert isinstance(in_channels, (list, tuple)) + assert isinstance(in_index, (list, tuple)) + assert len(in_channels) == len(in_index) + if input_transform == 'resize_concat': + self.in_channels = sum(in_channels) + else: + self.in_channels = in_channels + else: + assert isinstance(in_channels, int) + assert isinstance(in_index, int) + self.in_channels = in_channels + + def _transform_inputs(self, inputs): + """Transform inputs for decoder. + + Args: + inputs (list[Tensor]): List of multi-level img features. + + Returns: + Tensor: The transformed inputs + """ + + if self.input_transform == 'resize_concat': + inputs = [inputs[i] for i in self.in_index] + upsampled_inputs = [ + resize( + input=x, + size=inputs[0].shape[2:], + mode='bilinear', + align_corners=self.align_corners) for x in inputs + ] + inputs = torch.cat(upsampled_inputs, dim=1) + elif self.input_transform == 'multiple_select': + inputs = [inputs[i] for i in self.in_index] + else: + inputs = inputs[self.in_index] + + return inputs + + @auto_fp16() + @abstractmethod + def forward(self, inputs): + """Placeholder of forward function.""" + pass + + def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg): + """Forward function for training. + Args: + inputs (list[Tensor]): List of multi-level img features. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + gt_semantic_seg (Tensor): Semantic segmentation masks + used if the architecture supports semantic segmentation task. + train_cfg (dict): The training config. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + seg_logits = self.forward(inputs) + losses = self.losses(seg_logits, gt_semantic_seg) + return losses + + def forward_test(self, inputs, img_metas, test_cfg): + """Forward function for testing. + + Args: + inputs (list[Tensor]): List of multi-level img features. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + test_cfg (dict): The testing config. + + Returns: + Tensor: Output segmentation map. + """ + return self.forward(inputs) + + def cls_seg(self, feat): + """Classify each pixel.""" + if self.dropout is not None: + feat = self.dropout(feat) + output = self.conv_seg(feat) + return output + + @force_fp32(apply_to=('seg_logit', )) + def losses(self, seg_logit, seg_label): + """Compute segmentation loss.""" + loss = dict() + seg_logit = resize( + input=seg_logit, + size=seg_label.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + if self.sampler is not None: + seg_weight = self.sampler.sample(seg_logit, seg_label) + else: + seg_weight = None + seg_label = seg_label.squeeze(1) + + if not isinstance(self.loss_decode, nn.ModuleList): + losses_decode = [self.loss_decode] + else: + losses_decode = self.loss_decode + for loss_decode in losses_decode: + if loss_decode.loss_name not in loss: + loss[loss_decode.loss_name] = loss_decode( + seg_logit, + seg_label, + weight=seg_weight, + ignore_index=self.ignore_index) + else: + loss[loss_decode.loss_name] += loss_decode( + seg_logit, + seg_label, + weight=seg_weight, + ignore_index=self.ignore_index) + + loss['acc_seg'] = accuracy( + seg_logit, seg_label, ignore_index=self.ignore_index) + return loss diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/dm_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/dm_head.py new file mode 100644 index 0000000..ffaa870 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/dm_head.py @@ -0,0 +1,141 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer + +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +class DCM(nn.Module): + """Dynamic Convolutional Module used in DMNet. + + Args: + filter_size (int): The filter size of generated convolution kernel + used in Dynamic Convolutional Module. + fusion (bool): Add one conv to fuse DCM output feature. + in_channels (int): Input channels. + channels (int): Channels after modules, before conv_seg. + conv_cfg (dict | None): Config of conv layers. + norm_cfg (dict | None): Config of norm layers. + act_cfg (dict): Config of activation layers. + """ + + def __init__(self, filter_size, fusion, in_channels, channels, conv_cfg, + norm_cfg, act_cfg): + super(DCM, self).__init__() + self.filter_size = filter_size + self.fusion = fusion + self.in_channels = in_channels + self.channels = channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.filter_gen_conv = nn.Conv2d(self.in_channels, self.channels, 1, 1, + 0) + + self.input_redu_conv = ConvModule( + self.in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + if self.norm_cfg is not None: + self.norm = build_norm_layer(self.norm_cfg, self.channels)[1] + else: + self.norm = None + self.activate = build_activation_layer(self.act_cfg) + + if self.fusion: + self.fusion_conv = ConvModule( + self.channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, x): + """Forward function.""" + generated_filter = self.filter_gen_conv( + F.adaptive_avg_pool2d(x, self.filter_size)) + x = self.input_redu_conv(x) + b, c, h, w = x.shape + # [1, b * c, h, w], c = self.channels + x = x.view(1, b * c, h, w) + # [b * c, 1, filter_size, filter_size] + generated_filter = generated_filter.view(b * c, 1, self.filter_size, + self.filter_size) + pad = (self.filter_size - 1) // 2 + if (self.filter_size - 1) % 2 == 0: + p2d = (pad, pad, pad, pad) + else: + p2d = (pad + 1, pad, pad + 1, pad) + x = F.pad(input=x, pad=p2d, mode='constant', value=0) + # [1, b * c, h, w] + output = F.conv2d(input=x, weight=generated_filter, groups=b * c) + # [b, c, h, w] + output = output.view(b, c, h, w) + if self.norm is not None: + output = self.norm(output) + output = self.activate(output) + + if self.fusion: + output = self.fusion_conv(output) + + return output + + +@HEADS.register_module() +class DMHead(BaseDecodeHead): + """Dynamic Multi-scale Filters for Semantic Segmentation. + + This head is the implementation of + `DMNet `_. + + Args: + filter_sizes (tuple[int]): The size of generated convolutional filters + used in Dynamic Convolutional Module. Default: (1, 3, 5, 7). + fusion (bool): Add one conv to fuse DCM output feature. + """ + + def __init__(self, filter_sizes=(1, 3, 5, 7), fusion=False, **kwargs): + super(DMHead, self).__init__(**kwargs) + assert isinstance(filter_sizes, (list, tuple)) + self.filter_sizes = filter_sizes + self.fusion = fusion + dcm_modules = [] + for filter_size in self.filter_sizes: + dcm_modules.append( + DCM(filter_size, + self.fusion, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.dcm_modules = nn.ModuleList(dcm_modules) + self.bottleneck = ConvModule( + self.in_channels + len(filter_sizes) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + dcm_outs = [x] + for dcm_module in self.dcm_modules: + dcm_outs.append(dcm_module(x)) + dcm_outs = torch.cat(dcm_outs, dim=1) + output = self.bottleneck(dcm_outs) + output = self.cls_seg(output) + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/dnl_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/dnl_head.py new file mode 100644 index 0000000..dabf154 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/dnl_head.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.cnn import NonLocal2d +from torch import nn + +from ..builder import HEADS +from .fcn_head import FCNHead + + +class DisentangledNonLocal2d(NonLocal2d): + """Disentangled Non-Local Blocks. + + Args: + temperature (float): Temperature to adjust attention. Default: 0.05 + """ + + def __init__(self, *arg, temperature, **kwargs): + super().__init__(*arg, **kwargs) + self.temperature = temperature + self.conv_mask = nn.Conv2d(self.in_channels, 1, kernel_size=1) + + def embedded_gaussian(self, theta_x, phi_x): + """Embedded gaussian with temperature.""" + + # NonLocal2d pairwise_weight: [N, HxW, HxW] + pairwise_weight = torch.matmul(theta_x, phi_x) + if self.use_scale: + # theta_x.shape[-1] is `self.inter_channels` + pairwise_weight /= torch.tensor( + theta_x.shape[-1], + dtype=torch.float, + device=pairwise_weight.device)**torch.tensor( + 0.5, device=pairwise_weight.device) + pairwise_weight /= torch.tensor( + self.temperature, device=pairwise_weight.device) + pairwise_weight = pairwise_weight.softmax(dim=-1) + return pairwise_weight + + def forward(self, x): + # x: [N, C, H, W] + n = x.size(0) + + # g_x: [N, HxW, C] + g_x = self.g(x).view(n, self.inter_channels, -1) + g_x = g_x.permute(0, 2, 1) + + # theta_x: [N, HxW, C], phi_x: [N, C, HxW] + if self.mode == 'gaussian': + theta_x = x.view(n, self.in_channels, -1) + theta_x = theta_x.permute(0, 2, 1) + if self.sub_sample: + phi_x = self.phi(x).view(n, self.in_channels, -1) + else: + phi_x = x.view(n, self.in_channels, -1) + elif self.mode == 'concatenation': + theta_x = self.theta(x).view(n, self.inter_channels, -1, 1) + phi_x = self.phi(x).view(n, self.inter_channels, 1, -1) + else: + theta_x = self.theta(x).view(n, self.inter_channels, -1) + theta_x = theta_x.permute(0, 2, 1) + phi_x = self.phi(x).view(n, self.inter_channels, -1) + + # subtract mean + theta_x -= theta_x.mean(dim=-2, keepdim=True) + phi_x -= phi_x.mean(dim=-1, keepdim=True) + + pairwise_func = getattr(self, self.mode) + # pairwise_weight: [N, HxW, HxW] + pairwise_weight = pairwise_func(theta_x, phi_x) + + # y: [N, HxW, C] + y = torch.matmul(pairwise_weight, g_x) + # y: [N, C, H, W] + y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels, + *x.size()[2:]) + + # unary_mask: [N, 1, HxW] + unary_mask = self.conv_mask(x) + unary_mask = unary_mask.view(n, 1, -1) + unary_mask = unary_mask.softmax(dim=-1) + # unary_x: [N, 1, C] + unary_x = torch.matmul(unary_mask, g_x) + # unary_x: [N, C, 1, 1] + unary_x = unary_x.permute(0, 2, 1).contiguous().reshape( + n, self.inter_channels, 1, 1) + + output = x + self.conv_out(y + unary_x) + + return output + + +@HEADS.register_module() +class DNLHead(FCNHead): + """Disentangled Non-Local Neural Networks. + + This head is the implementation of `DNLNet + `_. + + Args: + reduction (int): Reduction factor of projection transform. Default: 2. + use_scale (bool): Whether to scale pairwise_weight by + sqrt(1/inter_channels). Default: False. + mode (str): The nonlocal mode. Options are 'embedded_gaussian', + 'dot_product'. Default: 'embedded_gaussian.'. + temperature (float): Temperature to adjust attention. Default: 0.05 + """ + + def __init__(self, + reduction=2, + use_scale=True, + mode='embedded_gaussian', + temperature=0.05, + **kwargs): + super(DNLHead, self).__init__(num_convs=2, **kwargs) + self.reduction = reduction + self.use_scale = use_scale + self.mode = mode + self.temperature = temperature + self.dnl_block = DisentangledNonLocal2d( + in_channels=self.channels, + reduction=self.reduction, + use_scale=self.use_scale, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + mode=self.mode, + temperature=self.temperature) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + output = self.convs[0](x) + output = self.dnl_block(output) + output = self.convs[1](output) + if self.concat_input: + output = self.conv_cat(torch.cat([x, output], dim=1)) + output = self.cls_seg(output) + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/dpt_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/dpt_head.py new file mode 100644 index 0000000..6c895d0 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/dpt_head.py @@ -0,0 +1,294 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, Linear, build_activation_layer +from mmcv.runner import BaseModule + +from mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +class ReassembleBlocks(BaseModule): + """ViTPostProcessBlock, process cls_token in ViT backbone output and + rearrange the feature vector to feature map. + + Args: + in_channels (int): ViT feature channels. Default: 768. + out_channels (List): output channels of each stage. + Default: [96, 192, 384, 768]. + readout_type (str): Type of readout operation. Default: 'ignore'. + patch_size (int): The patch size. Default: 16. + init_cfg (dict, optional): Initialization config dict. Default: None. + """ + + def __init__(self, + in_channels=768, + out_channels=[96, 192, 384, 768], + readout_type='ignore', + patch_size=16, + init_cfg=None): + super(ReassembleBlocks, self).__init__(init_cfg) + + assert readout_type in ['ignore', 'add', 'project'] + self.readout_type = readout_type + self.patch_size = patch_size + + self.projects = nn.ModuleList([ + ConvModule( + in_channels=in_channels, + out_channels=out_channel, + kernel_size=1, + act_cfg=None, + ) for out_channel in out_channels + ]) + + self.resize_layers = nn.ModuleList([ + nn.ConvTranspose2d( + in_channels=out_channels[0], + out_channels=out_channels[0], + kernel_size=4, + stride=4, + padding=0), + nn.ConvTranspose2d( + in_channels=out_channels[1], + out_channels=out_channels[1], + kernel_size=2, + stride=2, + padding=0), + nn.Identity(), + nn.Conv2d( + in_channels=out_channels[3], + out_channels=out_channels[3], + kernel_size=3, + stride=2, + padding=1) + ]) + if self.readout_type == 'project': + self.readout_projects = nn.ModuleList() + for _ in range(len(self.projects)): + self.readout_projects.append( + nn.Sequential( + Linear(2 * in_channels, in_channels), + build_activation_layer(dict(type='GELU')))) + + def forward(self, inputs): + assert isinstance(inputs, list) + out = [] + for i, x in enumerate(inputs): + assert len(x) == 2 + x, cls_token = x[0], x[1] + feature_shape = x.shape + if self.readout_type == 'project': + x = x.flatten(2).permute((0, 2, 1)) + readout = cls_token.unsqueeze(1).expand_as(x) + x = self.readout_projects[i](torch.cat((x, readout), -1)) + x = x.permute(0, 2, 1).reshape(feature_shape) + elif self.readout_type == 'add': + x = x.flatten(2) + cls_token.unsqueeze(-1) + x = x.reshape(feature_shape) + else: + pass + x = self.projects[i](x) + x = self.resize_layers[i](x) + out.append(x) + return out + + +class PreActResidualConvUnit(BaseModule): + """ResidualConvUnit, pre-activate residual unit. + + Args: + in_channels (int): number of channels in the input feature map. + act_cfg (dict): dictionary to construct and config activation layer. + norm_cfg (dict): dictionary to construct and config norm layer. + stride (int): stride of the first block. Default: 1 + dilation (int): dilation rate for convs layers. Default: 1. + init_cfg (dict, optional): Initialization config dict. Default: None. + """ + + def __init__(self, + in_channels, + act_cfg, + norm_cfg, + stride=1, + dilation=1, + init_cfg=None): + super(PreActResidualConvUnit, self).__init__(init_cfg) + + self.conv1 = ConvModule( + in_channels, + in_channels, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + bias=False, + order=('act', 'conv', 'norm')) + + self.conv2 = ConvModule( + in_channels, + in_channels, + 3, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + bias=False, + order=('act', 'conv', 'norm')) + + def forward(self, inputs): + inputs_ = inputs.clone() + x = self.conv1(inputs) + x = self.conv2(x) + return x + inputs_ + + +class FeatureFusionBlock(BaseModule): + """FeatureFusionBlock, merge feature map from different stages. + + Args: + in_channels (int): Input channels. + act_cfg (dict): The activation config for ResidualConvUnit. + norm_cfg (dict): Config dict for normalization layer. + expand (bool): Whether expand the channels in post process block. + Default: False. + align_corners (bool): align_corner setting for bilinear upsample. + Default: True. + init_cfg (dict, optional): Initialization config dict. Default: None. + """ + + def __init__(self, + in_channels, + act_cfg, + norm_cfg, + expand=False, + align_corners=True, + init_cfg=None): + super(FeatureFusionBlock, self).__init__(init_cfg) + + self.in_channels = in_channels + self.expand = expand + self.align_corners = align_corners + + self.out_channels = in_channels + if self.expand: + self.out_channels = in_channels // 2 + + self.project = ConvModule( + self.in_channels, + self.out_channels, + kernel_size=1, + act_cfg=None, + bias=True) + + self.res_conv_unit1 = PreActResidualConvUnit( + in_channels=self.in_channels, act_cfg=act_cfg, norm_cfg=norm_cfg) + self.res_conv_unit2 = PreActResidualConvUnit( + in_channels=self.in_channels, act_cfg=act_cfg, norm_cfg=norm_cfg) + + def forward(self, *inputs): + x = inputs[0] + if len(inputs) == 2: + if x.shape != inputs[1].shape: + res = resize( + inputs[1], + size=(x.shape[2], x.shape[3]), + mode='bilinear', + align_corners=False) + else: + res = inputs[1] + x = x + self.res_conv_unit1(res) + x = self.res_conv_unit2(x) + x = resize( + x, + scale_factor=2, + mode='bilinear', + align_corners=self.align_corners) + x = self.project(x) + return x + + +@HEADS.register_module() +class DPTHead(BaseDecodeHead): + """Vision Transformers for Dense Prediction. + + This head is implemented of `DPT `_. + + Args: + embed_dims (int): The embed dimension of the ViT backbone. + Default: 768. + post_process_channels (List): Out channels of post process conv + layers. Default: [96, 192, 384, 768]. + readout_type (str): Type of readout operation. Default: 'ignore'. + patch_size (int): The patch size. Default: 16. + expand_channels (bool): Whether expand the channels in post process + block. Default: False. + act_cfg (dict): The activation config for residual conv unit. + Default dict(type='ReLU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + """ + + def __init__(self, + embed_dims=768, + post_process_channels=[96, 192, 384, 768], + readout_type='ignore', + patch_size=16, + expand_channels=False, + act_cfg=dict(type='ReLU'), + norm_cfg=dict(type='BN'), + **kwargs): + super(DPTHead, self).__init__(**kwargs) + + self.in_channels = self.in_channels + self.expand_channels = expand_channels + self.reassemble_blocks = ReassembleBlocks(embed_dims, + post_process_channels, + readout_type, patch_size) + + self.post_process_channels = [ + channel * math.pow(2, i) if expand_channels else channel + for i, channel in enumerate(post_process_channels) + ] + self.convs = nn.ModuleList() + for channel in self.post_process_channels: + self.convs.append( + ConvModule( + channel, + self.channels, + kernel_size=3, + padding=1, + act_cfg=None, + bias=False)) + self.fusion_blocks = nn.ModuleList() + for _ in range(len(self.convs)): + self.fusion_blocks.append( + FeatureFusionBlock(self.channels, act_cfg, norm_cfg)) + self.fusion_blocks[0].res_conv_unit1 = None + self.project = ConvModule( + self.channels, + self.channels, + kernel_size=3, + padding=1, + norm_cfg=norm_cfg) + self.num_fusion_blocks = len(self.fusion_blocks) + self.num_reassemble_blocks = len(self.reassemble_blocks.resize_layers) + self.num_post_process_channels = len(self.post_process_channels) + assert self.num_fusion_blocks == self.num_reassemble_blocks + assert self.num_reassemble_blocks == self.num_post_process_channels + + def forward(self, inputs): + assert len(inputs) == self.num_reassemble_blocks + x = self._transform_inputs(inputs) + x = self.reassemble_blocks(x) + x = [self.convs[i](feature) for i, feature in enumerate(x)] + out = self.fusion_blocks[0](x[-1]) + for i in range(1, len(self.fusion_blocks)): + out = self.fusion_blocks[i](out, x[-(i + 1)]) + out = self.project(out) + out = self.cls_seg(out) + return out diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/ema_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/ema_head.py new file mode 100644 index 0000000..f6de167 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/ema_head.py @@ -0,0 +1,169 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule + +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +def reduce_mean(tensor): + """Reduce mean when distributed training.""" + if not (dist.is_available() and dist.is_initialized()): + return tensor + tensor = tensor.clone() + dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) + return tensor + + +class EMAModule(nn.Module): + """Expectation Maximization Attention Module used in EMANet. + + Args: + channels (int): Channels of the whole module. + num_bases (int): Number of bases. + num_stages (int): Number of the EM iterations. + """ + + def __init__(self, channels, num_bases, num_stages, momentum): + super(EMAModule, self).__init__() + assert num_stages >= 1, 'num_stages must be at least 1!' + self.num_bases = num_bases + self.num_stages = num_stages + self.momentum = momentum + + bases = torch.zeros(1, channels, self.num_bases) + bases.normal_(0, math.sqrt(2. / self.num_bases)) + # [1, channels, num_bases] + bases = F.normalize(bases, dim=1, p=2) + self.register_buffer('bases', bases) + + def forward(self, feats): + """Forward function.""" + batch_size, channels, height, width = feats.size() + # [batch_size, channels, height*width] + feats = feats.view(batch_size, channels, height * width) + # [batch_size, channels, num_bases] + bases = self.bases.repeat(batch_size, 1, 1) + + with torch.no_grad(): + for i in range(self.num_stages): + # [batch_size, height*width, num_bases] + attention = torch.einsum('bcn,bck->bnk', feats, bases) + attention = F.softmax(attention, dim=2) + # l1 norm + attention_normed = F.normalize(attention, dim=1, p=1) + # [batch_size, channels, num_bases] + bases = torch.einsum('bcn,bnk->bck', feats, attention_normed) + # l2 norm + bases = F.normalize(bases, dim=1, p=2) + + feats_recon = torch.einsum('bck,bnk->bcn', bases, attention) + feats_recon = feats_recon.view(batch_size, channels, height, width) + + if self.training: + bases = bases.mean(dim=0, keepdim=True) + bases = reduce_mean(bases) + # l2 norm + bases = F.normalize(bases, dim=1, p=2) + self.bases = (1 - + self.momentum) * self.bases + self.momentum * bases + + return feats_recon + + +@HEADS.register_module() +class EMAHead(BaseDecodeHead): + """Expectation Maximization Attention Networks for Semantic Segmentation. + + This head is the implementation of `EMANet + `_. + + Args: + ema_channels (int): EMA module channels + num_bases (int): Number of bases. + num_stages (int): Number of the EM iterations. + concat_input (bool): Whether concat the input and output of convs + before classification layer. Default: True + momentum (float): Momentum to update the base. Default: 0.1. + """ + + def __init__(self, + ema_channels, + num_bases, + num_stages, + concat_input=True, + momentum=0.1, + **kwargs): + super(EMAHead, self).__init__(**kwargs) + self.ema_channels = ema_channels + self.num_bases = num_bases + self.num_stages = num_stages + self.concat_input = concat_input + self.momentum = momentum + self.ema_module = EMAModule(self.ema_channels, self.num_bases, + self.num_stages, self.momentum) + + self.ema_in_conv = ConvModule( + self.in_channels, + self.ema_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + # project (0, inf) -> (-inf, inf) + self.ema_mid_conv = ConvModule( + self.ema_channels, + self.ema_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=None, + act_cfg=None) + for param in self.ema_mid_conv.parameters(): + param.requires_grad = False + + self.ema_out_conv = ConvModule( + self.ema_channels, + self.ema_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=None) + self.bottleneck = ConvModule( + self.ema_channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + if self.concat_input: + self.conv_cat = ConvModule( + self.in_channels + self.channels, + self.channels, + kernel_size=3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + feats = self.ema_in_conv(x) + identity = feats + feats = self.ema_mid_conv(feats) + recon = self.ema_module(feats) + recon = F.relu(recon, inplace=True) + recon = self.ema_out_conv(recon) + output = F.relu(identity + recon, inplace=True) + output = self.bottleneck(output) + if self.concat_input: + output = self.conv_cat(torch.cat([x, output], dim=1)) + output = self.cls_seg(output) + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/enc_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/enc_head.py new file mode 100644 index 0000000..648c890 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/enc_head.py @@ -0,0 +1,188 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, build_norm_layer + +from mmseg.ops import Encoding, resize +from ..builder import HEADS, build_loss +from .decode_head import BaseDecodeHead + + +class EncModule(nn.Module): + """Encoding Module used in EncNet. + + Args: + in_channels (int): Input channels. + num_codes (int): Number of code words. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict): Config of activation layers. + """ + + def __init__(self, in_channels, num_codes, conv_cfg, norm_cfg, act_cfg): + super(EncModule, self).__init__() + self.encoding_project = ConvModule( + in_channels, + in_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + # TODO: resolve this hack + # change to 1d + if norm_cfg is not None: + encoding_norm_cfg = norm_cfg.copy() + if encoding_norm_cfg['type'] in ['BN', 'IN']: + encoding_norm_cfg['type'] += '1d' + else: + encoding_norm_cfg['type'] = encoding_norm_cfg['type'].replace( + '2d', '1d') + else: + # fallback to BN1d + encoding_norm_cfg = dict(type='BN1d') + self.encoding = nn.Sequential( + Encoding(channels=in_channels, num_codes=num_codes), + build_norm_layer(encoding_norm_cfg, num_codes)[1], + nn.ReLU(inplace=True)) + self.fc = nn.Sequential( + nn.Linear(in_channels, in_channels), nn.Sigmoid()) + + def forward(self, x): + """Forward function.""" + encoding_projection = self.encoding_project(x) + encoding_feat = self.encoding(encoding_projection).mean(dim=1) + batch_size, channels, _, _ = x.size() + gamma = self.fc(encoding_feat) + y = gamma.view(batch_size, channels, 1, 1) + output = F.relu_(x + x * y) + return encoding_feat, output + + +@HEADS.register_module() +class EncHead(BaseDecodeHead): + """Context Encoding for Semantic Segmentation. + + This head is the implementation of `EncNet + `_. + + Args: + num_codes (int): Number of code words. Default: 32. + use_se_loss (bool): Whether use Semantic Encoding Loss (SE-loss) to + regularize the training. Default: True. + add_lateral (bool): Whether use lateral connection to fuse features. + Default: False. + loss_se_decode (dict): Config of decode loss. + Default: dict(type='CrossEntropyLoss', use_sigmoid=True). + """ + + def __init__(self, + num_codes=32, + use_se_loss=True, + add_lateral=False, + loss_se_decode=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=0.2), + **kwargs): + super(EncHead, self).__init__( + input_transform='multiple_select', **kwargs) + self.use_se_loss = use_se_loss + self.add_lateral = add_lateral + self.num_codes = num_codes + self.bottleneck = ConvModule( + self.in_channels[-1], + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + if add_lateral: + self.lateral_convs = nn.ModuleList() + for in_channels in self.in_channels[:-1]: # skip the last one + self.lateral_convs.append( + ConvModule( + in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.fusion = ConvModule( + len(self.in_channels) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.enc_module = EncModule( + self.channels, + num_codes=num_codes, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + if self.use_se_loss: + self.loss_se_decode = build_loss(loss_se_decode) + self.se_layer = nn.Linear(self.channels, self.num_classes) + + def forward(self, inputs): + """Forward function.""" + inputs = self._transform_inputs(inputs) + feat = self.bottleneck(inputs[-1]) + if self.add_lateral: + laterals = [ + resize( + lateral_conv(inputs[i]), + size=feat.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + feat = self.fusion(torch.cat([feat, *laterals], 1)) + encode_feat, output = self.enc_module(feat) + output = self.cls_seg(output) + if self.use_se_loss: + se_output = self.se_layer(encode_feat) + return output, se_output + else: + return output + + def forward_test(self, inputs, img_metas, test_cfg): + """Forward function for testing, ignore se_loss.""" + if self.use_se_loss: + return self.forward(inputs)[0] + else: + return self.forward(inputs) + + @staticmethod + def _convert_to_onehot_labels(seg_label, num_classes): + """Convert segmentation label to onehot. + + Args: + seg_label (Tensor): Segmentation label of shape (N, H, W). + num_classes (int): Number of classes. + + Returns: + Tensor: Onehot labels of shape (N, num_classes). + """ + + batch_size = seg_label.size(0) + onehot_labels = seg_label.new_zeros((batch_size, num_classes)) + for i in range(batch_size): + hist = seg_label[i].float().histc( + bins=num_classes, min=0, max=num_classes - 1) + onehot_labels[i] = hist > 0 + return onehot_labels + + def losses(self, seg_logit, seg_label): + """Compute segmentation and semantic encoding loss.""" + seg_logit, se_seg_logit = seg_logit + loss = dict() + loss.update(super(EncHead, self).losses(seg_logit, seg_label)) + se_loss = self.loss_se_decode( + se_seg_logit, + self._convert_to_onehot_labels(seg_label, self.num_classes)) + loss['loss_se'] = se_loss + return loss diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/fcn_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/fcn_head.py new file mode 100644 index 0000000..fb79a0d --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/fcn_head.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule + +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +@HEADS.register_module() +class FCNHead(BaseDecodeHead): + """Fully Convolution Networks for Semantic Segmentation. + + This head is implemented of `FCNNet `_. + + Args: + num_convs (int): Number of convs in the head. Default: 2. + kernel_size (int): The kernel size for convs in the head. Default: 3. + concat_input (bool): Whether concat the input and output of convs + before classification layer. + dilation (int): The dilation rate for convs in the head. Default: 1. + """ + + def __init__(self, + num_convs=2, + kernel_size=3, + concat_input=True, + dilation=1, + **kwargs): + assert num_convs >= 0 and dilation > 0 and isinstance(dilation, int) + self.num_convs = num_convs + self.concat_input = concat_input + self.kernel_size = kernel_size + super(FCNHead, self).__init__(**kwargs) + if num_convs == 0: + assert self.in_channels == self.channels + + conv_padding = (kernel_size // 2) * dilation + convs = [] + convs.append( + ConvModule( + self.in_channels, + self.channels, + kernel_size=kernel_size, + padding=conv_padding, + dilation=dilation, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + for i in range(num_convs - 1): + convs.append( + ConvModule( + self.channels, + self.channels, + kernel_size=kernel_size, + padding=conv_padding, + dilation=dilation, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + if num_convs == 0: + self.convs = nn.Identity() + else: + self.convs = nn.Sequential(*convs) + if self.concat_input: + self.conv_cat = ConvModule( + self.in_channels + self.channels, + self.channels, + kernel_size=kernel_size, + padding=kernel_size // 2, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def _forward_feature(self, inputs): + """Forward function for feature maps before classifying each pixel with + ``self.cls_seg`` fc. + + Args: + inputs (list[Tensor]): List of multi-level img features. + + Returns: + feats (Tensor): A tensor of shape (batch_size, self.channels, + H, W) which is feature map for last layer of decoder head. + """ + x = self._transform_inputs(inputs) + feats = self.convs(x) + if self.concat_input: + feats = self.conv_cat(torch.cat([x, feats], dim=1)) + return feats + + def forward(self, inputs): + """Forward function.""" + output = self._forward_feature(inputs) + output = self.cls_seg(output) + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/fpn_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/fpn_head.py new file mode 100644 index 0000000..e41f324 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/fpn_head.py @@ -0,0 +1,69 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch.nn as nn +from mmcv.cnn import ConvModule + +from mmseg.ops import Upsample, resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +@HEADS.register_module() +class FPNHead(BaseDecodeHead): + """Panoptic Feature Pyramid Networks. + + This head is the implementation of `Semantic FPN + `_. + + Args: + feature_strides (tuple[int]): The strides for input feature maps. + stack_lateral. All strides suppose to be power of 2. The first + one is of largest resolution. + """ + + def __init__(self, feature_strides, **kwargs): + super(FPNHead, self).__init__( + input_transform='multiple_select', **kwargs) + assert len(feature_strides) == len(self.in_channels) + assert min(feature_strides) == feature_strides[0] + self.feature_strides = feature_strides + + self.scale_heads = nn.ModuleList() + for i in range(len(feature_strides)): + head_length = max( + 1, + int(np.log2(feature_strides[i]) - np.log2(feature_strides[0]))) + scale_head = [] + for k in range(head_length): + scale_head.append( + ConvModule( + self.in_channels[i] if k == 0 else self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + if feature_strides[i] != feature_strides[0]: + scale_head.append( + Upsample( + scale_factor=2, + mode='bilinear', + align_corners=self.align_corners)) + self.scale_heads.append(nn.Sequential(*scale_head)) + + def forward(self, inputs): + + x = self._transform_inputs(inputs) + + output = self.scale_heads[0](x[0]) + for i in range(1, len(self.feature_strides)): + # non inplace + output = output + resize( + self.scale_heads[i](x[i]), + size=output.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + + output = self.cls_seg(output) + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/gc_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/gc_head.py new file mode 100644 index 0000000..eed5074 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/gc_head.py @@ -0,0 +1,48 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.cnn import ContextBlock + +from ..builder import HEADS +from .fcn_head import FCNHead + + +@HEADS.register_module() +class GCHead(FCNHead): + """GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond. + + This head is the implementation of `GCNet + `_. + + Args: + ratio (float): Multiplier of channels ratio. Default: 1/4. + pooling_type (str): The pooling type of context aggregation. + Options are 'att', 'avg'. Default: 'avg'. + fusion_types (tuple[str]): The fusion type for feature fusion. + Options are 'channel_add', 'channel_mul'. Default: ('channel_add',) + """ + + def __init__(self, + ratio=1 / 4., + pooling_type='att', + fusion_types=('channel_add', ), + **kwargs): + super(GCHead, self).__init__(num_convs=2, **kwargs) + self.ratio = ratio + self.pooling_type = pooling_type + self.fusion_types = fusion_types + self.gc_block = ContextBlock( + in_channels=self.channels, + ratio=self.ratio, + pooling_type=self.pooling_type, + fusion_types=self.fusion_types) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + output = self.convs[0](x) + output = self.gc_block(output) + output = self.convs[1](output) + if self.concat_input: + output = self.conv_cat(torch.cat([x, output], dim=1)) + output = self.cls_seg(output) + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/isa_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/isa_head.py new file mode 100644 index 0000000..0bf3455 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/isa_head.py @@ -0,0 +1,143 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn.functional as F +from mmcv.cnn import ConvModule + +from ..builder import HEADS +from ..utils import SelfAttentionBlock as _SelfAttentionBlock +from .decode_head import BaseDecodeHead + + +class SelfAttentionBlock(_SelfAttentionBlock): + """Self-Attention Module. + + Args: + in_channels (int): Input channels of key/query feature. + channels (int): Output channels of key/query transform. + conv_cfg (dict | None): Config of conv layers. + norm_cfg (dict | None): Config of norm layers. + act_cfg (dict | None): Config of activation layers. + """ + + def __init__(self, in_channels, channels, conv_cfg, norm_cfg, act_cfg): + super(SelfAttentionBlock, self).__init__( + key_in_channels=in_channels, + query_in_channels=in_channels, + channels=channels, + out_channels=in_channels, + share_key_query=False, + query_downsample=None, + key_downsample=None, + key_query_num_convs=2, + key_query_norm=True, + value_out_num_convs=1, + value_out_norm=False, + matmul_norm=True, + with_out=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.output_project = self.build_project( + in_channels, + in_channels, + num_convs=1, + use_conv_module=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x): + """Forward function.""" + context = super(SelfAttentionBlock, self).forward(x, x) + return self.output_project(context) + + +@HEADS.register_module() +class ISAHead(BaseDecodeHead): + """Interlaced Sparse Self-Attention for Semantic Segmentation. + + This head is the implementation of `ISA + `_. + + Args: + isa_channels (int): The channels of ISA Module. + down_factor (tuple[int]): The local group size of ISA. + """ + + def __init__(self, isa_channels, down_factor=(8, 8), **kwargs): + super(ISAHead, self).__init__(**kwargs) + self.down_factor = down_factor + + self.in_conv = ConvModule( + self.in_channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.global_relation = SelfAttentionBlock( + self.channels, + isa_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.local_relation = SelfAttentionBlock( + self.channels, + isa_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.out_conv = ConvModule( + self.channels * 2, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x_ = self._transform_inputs(inputs) + x = self.in_conv(x_) + residual = x + + n, c, h, w = x.size() + loc_h, loc_w = self.down_factor # size of local group in H- and W-axes + glb_h, glb_w = math.ceil(h / loc_h), math.ceil(w / loc_w) + pad_h, pad_w = glb_h * loc_h - h, glb_w * loc_w - w + if pad_h > 0 or pad_w > 0: # pad if the size is not divisible + padding = (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2) + x = F.pad(x, padding) + + # global relation + x = x.view(n, c, glb_h, loc_h, glb_w, loc_w) + # do permutation to gather global group + x = x.permute(0, 3, 5, 1, 2, 4) # (n, loc_h, loc_w, c, glb_h, glb_w) + x = x.reshape(-1, c, glb_h, glb_w) + # apply attention within each global group + x = self.global_relation(x) # (n * loc_h * loc_w, c, glb_h, glb_w) + + # local relation + x = x.view(n, loc_h, loc_w, c, glb_h, glb_w) + # do permutation to gather local group + x = x.permute(0, 4, 5, 3, 1, 2) # (n, glb_h, glb_w, c, loc_h, loc_w) + x = x.reshape(-1, c, loc_h, loc_w) + # apply attention within each local group + x = self.local_relation(x) # (n * glb_h * glb_w, c, loc_h, loc_w) + + # permute each pixel back to its original position + x = x.view(n, glb_h, glb_w, c, loc_h, loc_w) + x = x.permute(0, 3, 1, 4, 2, 5) # (n, c, glb_h, loc_h, glb_w, loc_w) + x = x.reshape(n, c, glb_h * loc_h, glb_w * loc_w) + if pad_h > 0 or pad_w > 0: # remove padding + x = x[:, :, pad_h // 2:pad_h // 2 + h, pad_w // 2:pad_w // 2 + w] + + x = self.out_conv(torch.cat([x, residual], dim=1)) + out = self.cls_seg(x) + + return out diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/knet_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/knet_head.py new file mode 100644 index 0000000..f73dacc --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/knet_head.py @@ -0,0 +1,453 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer +from mmcv.cnn.bricks.transformer import (FFN, TRANSFORMER_LAYER, + MultiheadAttention, + build_transformer_layer) + +from mmseg.models.builder import HEADS, build_head +from mmseg.models.decode_heads.decode_head import BaseDecodeHead +from mmseg.utils import get_root_logger + + +@TRANSFORMER_LAYER.register_module() +class KernelUpdator(nn.Module): + """Dynamic Kernel Updator in Kernel Update Head. + + Args: + in_channels (int): The number of channels of input feature map. + Default: 256. + feat_channels (int): The number of middle-stage channels in + the kernel updator. Default: 64. + out_channels (int): The number of output channels. + gate_sigmoid (bool): Whether use sigmoid function in gate + mechanism. Default: True. + gate_norm_act (bool): Whether add normalization and activation + layer in gate mechanism. Default: False. + activate_out: Whether add activation after gate mechanism. + Default: False. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='LN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + """ + + def __init__( + self, + in_channels=256, + feat_channels=64, + out_channels=None, + gate_sigmoid=True, + gate_norm_act=False, + activate_out=False, + norm_cfg=dict(type='LN'), + act_cfg=dict(type='ReLU', inplace=True), + ): + super(KernelUpdator, self).__init__() + self.in_channels = in_channels + self.feat_channels = feat_channels + self.out_channels_raw = out_channels + self.gate_sigmoid = gate_sigmoid + self.gate_norm_act = gate_norm_act + self.activate_out = activate_out + self.act_cfg = act_cfg + self.norm_cfg = norm_cfg + self.out_channels = out_channels if out_channels else in_channels + + self.num_params_in = self.feat_channels + self.num_params_out = self.feat_channels + self.dynamic_layer = nn.Linear( + self.in_channels, self.num_params_in + self.num_params_out) + self.input_layer = nn.Linear(self.in_channels, + self.num_params_in + self.num_params_out, + 1) + self.input_gate = nn.Linear(self.in_channels, self.feat_channels, 1) + self.update_gate = nn.Linear(self.in_channels, self.feat_channels, 1) + if self.gate_norm_act: + self.gate_norm = build_norm_layer(norm_cfg, self.feat_channels)[1] + + self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1] + self.norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1] + self.input_norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1] + self.input_norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1] + + self.activation = build_activation_layer(act_cfg) + + self.fc_layer = nn.Linear(self.feat_channels, self.out_channels, 1) + self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1] + + def forward(self, update_feature, input_feature): + """Forward function of KernelUpdator. + + Args: + update_feature (torch.Tensor): Feature map assembled from + each group. It would be reshaped with last dimension + shape: `self.in_channels`. + input_feature (torch.Tensor): Intermediate feature + with shape: (N, num_classes, conv_kernel_size**2, channels). + Returns: + Tensor: The output tensor of shape (N*C1/C2, K*K, C2), where N is + the number of classes, C1 and C2 are the feature map channels of + KernelUpdateHead and KernelUpdator, respectively. + """ + + update_feature = update_feature.reshape(-1, self.in_channels) + num_proposals = update_feature.size(0) + # dynamic_layer works for + # phi_1 and psi_3 in Eq.(4) and (5) of K-Net paper + parameters = self.dynamic_layer(update_feature) + param_in = parameters[:, :self.num_params_in].view( + -1, self.feat_channels) + param_out = parameters[:, -self.num_params_out:].view( + -1, self.feat_channels) + + # input_layer works for + # phi_2 and psi_4 in Eq.(4) and (5) of K-Net paper + input_feats = self.input_layer( + input_feature.reshape(num_proposals, -1, self.feat_channels)) + input_in = input_feats[..., :self.num_params_in] + input_out = input_feats[..., -self.num_params_out:] + + # `gate_feats` is F^G in K-Net paper + gate_feats = input_in * param_in.unsqueeze(-2) + if self.gate_norm_act: + gate_feats = self.activation(self.gate_norm(gate_feats)) + + input_gate = self.input_norm_in(self.input_gate(gate_feats)) + update_gate = self.norm_in(self.update_gate(gate_feats)) + if self.gate_sigmoid: + input_gate = input_gate.sigmoid() + update_gate = update_gate.sigmoid() + param_out = self.norm_out(param_out) + input_out = self.input_norm_out(input_out) + + if self.activate_out: + param_out = self.activation(param_out) + input_out = self.activation(input_out) + + # Gate mechanism. Eq.(5) in original paper. + # param_out has shape (batch_size, feat_channels, out_channels) + features = update_gate * param_out.unsqueeze( + -2) + input_gate * input_out + + features = self.fc_layer(features) + features = self.fc_norm(features) + features = self.activation(features) + + return features + + +@HEADS.register_module() +class KernelUpdateHead(nn.Module): + """Kernel Update Head in K-Net. + + Args: + num_classes (int): Number of classes. Default: 150. + num_ffn_fcs (int): The number of fully-connected layers in + FFNs. Default: 2. + num_heads (int): The number of parallel attention heads. + Default: 8. + num_mask_fcs (int): The number of fully connected layers for + mask prediction. Default: 3. + feedforward_channels (int): The hidden dimension of FFNs. + Defaults: 2048. + in_channels (int): The number of channels of input feature map. + Default: 256. + out_channels (int): The number of output channels. + Default: 256. + dropout (float): The Probability of an element to be + zeroed in MultiheadAttention and FFN. Default 0.0. + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + ffn_act_cfg (dict): Config of activation layers in FFN. + Default: dict(type='ReLU'). + conv_kernel_size (int): The kernel size of convolution in + Kernel Update Head for dynamic kernel updation. + Default: 1. + feat_transform_cfg (dict | None): Config of feature transform. + Default: None. + kernel_init (bool): Whether initiate mask kernel in mask head. + Default: False. + with_ffn (bool): Whether add FFN in kernel update head. + Default: True. + feat_gather_stride (int): Stride of convolution in feature transform. + Default: 1. + mask_transform_stride (int): Stride of mask transform. + Default: 1. + kernel_updator_cfg (dict): Config of kernel updator. + Default: dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN')). + """ + + def __init__(self, + num_classes=150, + num_ffn_fcs=2, + num_heads=8, + num_mask_fcs=3, + feedforward_channels=2048, + in_channels=256, + out_channels=256, + dropout=0.0, + act_cfg=dict(type='ReLU', inplace=True), + ffn_act_cfg=dict(type='ReLU', inplace=True), + conv_kernel_size=1, + feat_transform_cfg=None, + kernel_init=False, + with_ffn=True, + feat_gather_stride=1, + mask_transform_stride=1, + kernel_updator_cfg=dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'))): + super(KernelUpdateHead, self).__init__() + self.num_classes = num_classes + self.in_channels = in_channels + self.out_channels = out_channels + self.fp16_enabled = False + self.dropout = dropout + self.num_heads = num_heads + self.kernel_init = kernel_init + self.with_ffn = with_ffn + self.conv_kernel_size = conv_kernel_size + self.feat_gather_stride = feat_gather_stride + self.mask_transform_stride = mask_transform_stride + + self.attention = MultiheadAttention(in_channels * conv_kernel_size**2, + num_heads, dropout) + self.attention_norm = build_norm_layer( + dict(type='LN'), in_channels * conv_kernel_size**2)[1] + self.kernel_update_conv = build_transformer_layer(kernel_updator_cfg) + + if feat_transform_cfg is not None: + kernel_size = feat_transform_cfg.pop('kernel_size', 1) + transform_channels = in_channels + self.feat_transform = ConvModule( + transform_channels, + in_channels, + kernel_size, + stride=feat_gather_stride, + padding=int(feat_gather_stride // 2), + **feat_transform_cfg) + else: + self.feat_transform = None + + if self.with_ffn: + self.ffn = FFN( + in_channels, + feedforward_channels, + num_ffn_fcs, + act_cfg=ffn_act_cfg, + dropout=dropout) + self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1] + + self.mask_fcs = nn.ModuleList() + for _ in range(num_mask_fcs): + self.mask_fcs.append( + nn.Linear(in_channels, in_channels, bias=False)) + self.mask_fcs.append( + build_norm_layer(dict(type='LN'), in_channels)[1]) + self.mask_fcs.append(build_activation_layer(act_cfg)) + + self.fc_mask = nn.Linear(in_channels, out_channels) + + def init_weights(self): + """Use xavier initialization for all weight parameter and set + classification head bias as a specific value when use focal loss.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + else: + # adopt the default initialization for + # the weight and bias of the layer norm + pass + if self.kernel_init: + logger = get_root_logger() + logger.info( + 'mask kernel in mask head is normal initialized by std 0.01') + nn.init.normal_(self.fc_mask.weight, mean=0, std=0.01) + + def forward(self, x, proposal_feat, mask_preds, mask_shape=None): + """Forward function of Dynamic Instance Interactive Head. + + Args: + x (Tensor): Feature map from FPN with shape + (batch_size, feature_dimensions, H , W). + proposal_feat (Tensor): Intermediate feature get from + diihead in last stage, has shape + (batch_size, num_proposals, feature_dimensions) + mask_preds (Tensor): mask prediction from the former stage in shape + (batch_size, num_proposals, H, W). + + Returns: + Tuple: The first tensor is predicted mask with shape + (N, num_classes, H, W), the second tensor is dynamic kernel + with shape (N, num_classes, channels, K, K). + """ + N, num_proposals = proposal_feat.shape[:2] + if self.feat_transform is not None: + x = self.feat_transform(x) + + C, H, W = x.shape[-3:] + + mask_h, mask_w = mask_preds.shape[-2:] + if mask_h != H or mask_w != W: + gather_mask = F.interpolate( + mask_preds, (H, W), align_corners=False, mode='bilinear') + else: + gather_mask = mask_preds + + sigmoid_masks = gather_mask.softmax(dim=1) + + # Group Feature Assembling. Eq.(3) in original paper. + # einsum is faster than bmm by 30% + x_feat = torch.einsum('bnhw,bchw->bnc', sigmoid_masks, x) + + # obj_feat in shape [B, N, C, K, K] -> [B, N, C, K*K] -> [B, N, K*K, C] + proposal_feat = proposal_feat.reshape(N, num_proposals, + self.in_channels, + -1).permute(0, 1, 3, 2) + obj_feat = self.kernel_update_conv(x_feat, proposal_feat) + + # [B, N, K*K, C] -> [B, N, K*K*C] -> [N, B, K*K*C] + obj_feat = obj_feat.reshape(N, num_proposals, -1).permute(1, 0, 2) + obj_feat = self.attention_norm(self.attention(obj_feat)) + # [N, B, K*K*C] -> [B, N, K*K*C] + obj_feat = obj_feat.permute(1, 0, 2) + + # obj_feat in shape [B, N, K*K*C] -> [B, N, K*K, C] + obj_feat = obj_feat.reshape(N, num_proposals, -1, self.in_channels) + + # FFN + if self.with_ffn: + obj_feat = self.ffn_norm(self.ffn(obj_feat)) + + mask_feat = obj_feat + + for reg_layer in self.mask_fcs: + mask_feat = reg_layer(mask_feat) + + # [B, N, K*K, C] -> [B, N, C, K*K] + mask_feat = self.fc_mask(mask_feat).permute(0, 1, 3, 2) + + if (self.mask_transform_stride == 2 and self.feat_gather_stride == 1): + mask_x = F.interpolate( + x, scale_factor=0.5, mode='bilinear', align_corners=False) + H, W = mask_x.shape[-2:] + else: + mask_x = x + # group conv is 5x faster than unfold and uses about 1/5 memory + # Group conv vs. unfold vs. concat batch, 2.9ms :13.5ms :3.8ms + # Group conv vs. unfold vs. concat batch, 278 : 1420 : 369 + # but in real training group conv is slower than concat batch + # so we keep using concat batch. + # fold_x = F.unfold( + # mask_x, + # self.conv_kernel_size, + # padding=int(self.conv_kernel_size // 2)) + # mask_feat = mask_feat.reshape(N, num_proposals, -1) + # new_mask_preds = torch.einsum('bnc,bcl->bnl', mask_feat, fold_x) + # [B, N, C, K*K] -> [B*N, C, K, K] + mask_feat = mask_feat.reshape(N, num_proposals, C, + self.conv_kernel_size, + self.conv_kernel_size) + # [B, C, H, W] -> [1, B*C, H, W] + new_mask_preds = [] + for i in range(N): + new_mask_preds.append( + F.conv2d( + mask_x[i:i + 1], + mask_feat[i], + padding=int(self.conv_kernel_size // 2))) + + new_mask_preds = torch.cat(new_mask_preds, dim=0) + new_mask_preds = new_mask_preds.reshape(N, num_proposals, H, W) + if self.mask_transform_stride == 2: + new_mask_preds = F.interpolate( + new_mask_preds, + scale_factor=2, + mode='bilinear', + align_corners=False) + + if mask_shape is not None and mask_shape[0] != H: + new_mask_preds = F.interpolate( + new_mask_preds, + mask_shape, + align_corners=False, + mode='bilinear') + + return new_mask_preds, obj_feat.permute(0, 1, 3, 2).reshape( + N, num_proposals, self.in_channels, self.conv_kernel_size, + self.conv_kernel_size) + + +@HEADS.register_module() +class IterativeDecodeHead(BaseDecodeHead): + """K-Net: Towards Unified Image Segmentation. + + This head is the implementation of + `K-Net: `_. + + Args: + num_stages (int): The number of stages (kernel update heads) + in IterativeDecodeHead. Default: 3. + kernel_generate_head:(dict): Config of kernel generate head which + generate mask predictions, dynamic kernels and class predictions + for next kernel update heads. + kernel_update_head (dict): Config of kernel update head which refine + dynamic kernels and class predictions iteratively. + + """ + + def __init__(self, num_stages, kernel_generate_head, kernel_update_head, + **kwargs): + super(BaseDecodeHead, self).__init__(**kwargs) + assert num_stages == len(kernel_update_head) + self.num_stages = num_stages + self.kernel_generate_head = build_head(kernel_generate_head) + self.kernel_update_head = nn.ModuleList() + self.align_corners = self.kernel_generate_head.align_corners + self.num_classes = self.kernel_generate_head.num_classes + self.input_transform = self.kernel_generate_head.input_transform + self.ignore_index = self.kernel_generate_head.ignore_index + + for head_cfg in kernel_update_head: + self.kernel_update_head.append(build_head(head_cfg)) + + def forward(self, inputs): + """Forward function.""" + feats = self.kernel_generate_head._forward_feature(inputs) + sem_seg = self.kernel_generate_head.cls_seg(feats) + seg_kernels = self.kernel_generate_head.conv_seg.weight.clone() + seg_kernels = seg_kernels[None].expand( + feats.size(0), *seg_kernels.size()) + + stage_segs = [sem_seg] + for i in range(self.num_stages): + sem_seg, seg_kernels = self.kernel_update_head[i](feats, + seg_kernels, + sem_seg) + stage_segs.append(sem_seg) + if self.training: + return stage_segs + # only return the prediction of the last stage during testing + return stage_segs[-1] + + def losses(self, seg_logit, seg_label): + losses = dict() + for i, logit in enumerate(seg_logit): + loss = self.kernel_generate_head.losses(logit, seg_label) + for k, v in loss.items(): + losses[f'{k}.s{i}'] = v + + return losses diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/lraspp_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/lraspp_head.py new file mode 100644 index 0000000..c10ff0d --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/lraspp_head.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv import is_tuple_of +from mmcv.cnn import ConvModule + +from mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +@HEADS.register_module() +class LRASPPHead(BaseDecodeHead): + """Lite R-ASPP (LRASPP) head is proposed in Searching for MobileNetV3. + + This head is the improved implementation of `Searching for MobileNetV3 + `_. + + Args: + branch_channels (tuple[int]): The number of output channels in every + each branch. Default: (32, 64). + """ + + def __init__(self, branch_channels=(32, 64), **kwargs): + super(LRASPPHead, self).__init__(**kwargs) + if self.input_transform != 'multiple_select': + raise ValueError('in Lite R-ASPP (LRASPP) head, input_transform ' + f'must be \'multiple_select\'. But received ' + f'\'{self.input_transform}\'') + assert is_tuple_of(branch_channels, int) + assert len(branch_channels) == len(self.in_channels) - 1 + self.branch_channels = branch_channels + + self.convs = nn.Sequential() + self.conv_ups = nn.Sequential() + for i in range(len(branch_channels)): + self.convs.add_module( + f'conv{i}', + nn.Conv2d( + self.in_channels[i], branch_channels[i], 1, bias=False)) + self.conv_ups.add_module( + f'conv_up{i}', + ConvModule( + self.channels + branch_channels[i], + self.channels, + 1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=False)) + + self.conv_up_input = nn.Conv2d(self.channels, self.channels, 1) + + self.aspp_conv = ConvModule( + self.in_channels[-1], + self.channels, + 1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=False) + self.image_pool = nn.Sequential( + nn.AvgPool2d(kernel_size=49, stride=(16, 20)), + ConvModule( + self.in_channels[2], + self.channels, + 1, + act_cfg=dict(type='Sigmoid'), + bias=False)) + + def forward(self, inputs): + """Forward function.""" + inputs = self._transform_inputs(inputs) + + x = inputs[-1] + + x = self.aspp_conv(x) * resize( + self.image_pool(x), + size=x.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + x = self.conv_up_input(x) + + for i in range(len(self.branch_channels) - 1, -1, -1): + x = resize( + x, + size=inputs[i].size()[2:], + mode='bilinear', + align_corners=self.align_corners) + x = torch.cat([x, self.convs[i](inputs[i])], 1) + x = self.conv_ups[i](x) + + return self.cls_seg(x) diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/nl_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/nl_head.py new file mode 100644 index 0000000..637517e --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/nl_head.py @@ -0,0 +1,50 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.cnn import NonLocal2d + +from ..builder import HEADS +from .fcn_head import FCNHead + + +@HEADS.register_module() +class NLHead(FCNHead): + """Non-local Neural Networks. + + This head is the implementation of `NLNet + `_. + + Args: + reduction (int): Reduction factor of projection transform. Default: 2. + use_scale (bool): Whether to scale pairwise_weight by + sqrt(1/inter_channels). Default: True. + mode (str): The nonlocal mode. Options are 'embedded_gaussian', + 'dot_product'. Default: 'embedded_gaussian.'. + """ + + def __init__(self, + reduction=2, + use_scale=True, + mode='embedded_gaussian', + **kwargs): + super(NLHead, self).__init__(num_convs=2, **kwargs) + self.reduction = reduction + self.use_scale = use_scale + self.mode = mode + self.nl_block = NonLocal2d( + in_channels=self.channels, + reduction=self.reduction, + use_scale=self.use_scale, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + mode=self.mode) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + output = self.convs[0](x) + output = self.nl_block(output) + output = self.convs[1](output) + if self.concat_input: + output = self.conv_cat(torch.cat([x, output], dim=1)) + output = self.cls_seg(output) + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/ocr_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/ocr_head.py new file mode 100644 index 0000000..09eadfb --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/ocr_head.py @@ -0,0 +1,128 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule + +from mmseg.ops import resize +from ..builder import HEADS +from ..utils import SelfAttentionBlock as _SelfAttentionBlock +from .cascade_decode_head import BaseCascadeDecodeHead + + +class SpatialGatherModule(nn.Module): + """Aggregate the context features according to the initial predicted + probability distribution. + + Employ the soft-weighted method to aggregate the context. + """ + + def __init__(self, scale): + super(SpatialGatherModule, self).__init__() + self.scale = scale + + def forward(self, feats, probs): + """Forward function.""" + batch_size, num_classes, height, width = probs.size() + channels = feats.size(1) + probs = probs.view(batch_size, num_classes, -1) + feats = feats.view(batch_size, channels, -1) + # [batch_size, height*width, num_classes] + feats = feats.permute(0, 2, 1) + # [batch_size, channels, height*width] + probs = F.softmax(self.scale * probs, dim=2) + # [batch_size, channels, num_classes] + ocr_context = torch.matmul(probs, feats) + ocr_context = ocr_context.permute(0, 2, 1).contiguous().unsqueeze(3) + return ocr_context + + +class ObjectAttentionBlock(_SelfAttentionBlock): + """Make a OCR used SelfAttentionBlock.""" + + def __init__(self, in_channels, channels, scale, conv_cfg, norm_cfg, + act_cfg): + if scale > 1: + query_downsample = nn.MaxPool2d(kernel_size=scale) + else: + query_downsample = None + super(ObjectAttentionBlock, self).__init__( + key_in_channels=in_channels, + query_in_channels=in_channels, + channels=channels, + out_channels=in_channels, + share_key_query=False, + query_downsample=query_downsample, + key_downsample=None, + key_query_num_convs=2, + key_query_norm=True, + value_out_num_convs=1, + value_out_norm=True, + matmul_norm=True, + with_out=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.bottleneck = ConvModule( + in_channels * 2, + in_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, query_feats, key_feats): + """Forward function.""" + context = super(ObjectAttentionBlock, + self).forward(query_feats, key_feats) + output = self.bottleneck(torch.cat([context, query_feats], dim=1)) + if self.query_downsample is not None: + output = resize(query_feats) + + return output + + +@HEADS.register_module() +class OCRHead(BaseCascadeDecodeHead): + """Object-Contextual Representations for Semantic Segmentation. + + This head is the implementation of `OCRNet + `_. + + Args: + ocr_channels (int): The intermediate channels of OCR block. + scale (int): The scale of probability map in SpatialGatherModule in + Default: 1. + """ + + def __init__(self, ocr_channels, scale=1, **kwargs): + super(OCRHead, self).__init__(**kwargs) + self.ocr_channels = ocr_channels + self.scale = scale + self.object_context_block = ObjectAttentionBlock( + self.channels, + self.ocr_channels, + self.scale, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.spatial_gather_module = SpatialGatherModule(self.scale) + + self.bottleneck = ConvModule( + self.in_channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs, prev_output): + """Forward function.""" + x = self._transform_inputs(inputs) + feats = self.bottleneck(x) + context = self.spatial_gather_module(feats, prev_output) + object_context = self.object_context_block(feats, context) + output = self.cls_seg(object_context) + + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/point_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/point_head.py new file mode 100644 index 0000000..5e60527 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/point_head.py @@ -0,0 +1,364 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule + +try: + from mmcv.ops import point_sample +except ModuleNotFoundError: + point_sample = None + +from mmseg.models.builder import HEADS +from mmseg.ops import resize +from ..losses import accuracy +from .cascade_decode_head import BaseCascadeDecodeHead + + +def calculate_uncertainty(seg_logits): + """Estimate uncertainty based on seg logits. + + For each location of the prediction ``seg_logits`` we estimate + uncertainty as the difference between top first and top second + predicted logits. + + Args: + seg_logits (Tensor): Semantic segmentation logits, + shape (batch_size, num_classes, height, width). + + Returns: + scores (Tensor): T uncertainty scores with the most uncertain + locations having the highest uncertainty score, shape ( + batch_size, 1, height, width) + """ + top2_scores = torch.topk(seg_logits, k=2, dim=1)[0] + return (top2_scores[:, 1] - top2_scores[:, 0]).unsqueeze(1) + + +@HEADS.register_module() +class PointHead(BaseCascadeDecodeHead): + """A mask point head use in PointRend. + + This head is implemented of `PointRend: Image Segmentation as + Rendering `_. + ``PointHead`` use shared multi-layer perceptron (equivalent to + nn.Conv1d) to predict the logit of input points. The fine-grained feature + and coarse feature will be concatenate together for predication. + + Args: + num_fcs (int): Number of fc layers in the head. Default: 3. + in_channels (int): Number of input channels. Default: 256. + fc_channels (int): Number of fc channels. Default: 256. + num_classes (int): Number of classes for logits. Default: 80. + class_agnostic (bool): Whether use class agnostic classification. + If so, the output channels of logits will be 1. Default: False. + coarse_pred_each_layer (bool): Whether concatenate coarse feature with + the output of each fc layer. Default: True. + conv_cfg (dict|None): Dictionary to construct and config conv layer. + Default: dict(type='Conv1d')) + norm_cfg (dict|None): Dictionary to construct and config norm layer. + Default: None. + loss_point (dict): Dictionary to construct and config loss layer of + point head. Default: dict(type='CrossEntropyLoss', use_mask=True, + loss_weight=1.0). + """ + + def __init__(self, + num_fcs=3, + coarse_pred_each_layer=True, + conv_cfg=dict(type='Conv1d'), + norm_cfg=None, + act_cfg=dict(type='ReLU', inplace=False), + **kwargs): + super(PointHead, self).__init__( + input_transform='multiple_select', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=dict( + type='Normal', std=0.01, override=dict(name='fc_seg')), + **kwargs) + if point_sample is None: + raise RuntimeError('Please install mmcv-full for ' + 'point_sample ops') + + self.num_fcs = num_fcs + self.coarse_pred_each_layer = coarse_pred_each_layer + + fc_in_channels = sum(self.in_channels) + self.num_classes + fc_channels = self.channels + self.fcs = nn.ModuleList() + for k in range(num_fcs): + fc = ConvModule( + fc_in_channels, + fc_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.fcs.append(fc) + fc_in_channels = fc_channels + fc_in_channels += self.num_classes if self.coarse_pred_each_layer \ + else 0 + self.fc_seg = nn.Conv1d( + fc_in_channels, + self.num_classes, + kernel_size=1, + stride=1, + padding=0) + if self.dropout_ratio > 0: + self.dropout = nn.Dropout(self.dropout_ratio) + delattr(self, 'conv_seg') + + def cls_seg(self, feat): + """Classify each pixel with fc.""" + if self.dropout is not None: + feat = self.dropout(feat) + output = self.fc_seg(feat) + return output + + def forward(self, fine_grained_point_feats, coarse_point_feats): + x = torch.cat([fine_grained_point_feats, coarse_point_feats], dim=1) + for fc in self.fcs: + x = fc(x) + if self.coarse_pred_each_layer: + x = torch.cat((x, coarse_point_feats), dim=1) + return self.cls_seg(x) + + def _get_fine_grained_point_feats(self, x, points): + """Sample from fine grained features. + + Args: + x (list[Tensor]): Feature pyramid from by neck or backbone. + points (Tensor): Point coordinates, shape (batch_size, + num_points, 2). + + Returns: + fine_grained_feats (Tensor): Sampled fine grained feature, + shape (batch_size, sum(channels of x), num_points). + """ + + fine_grained_feats_list = [ + point_sample(_, points, align_corners=self.align_corners) + for _ in x + ] + if len(fine_grained_feats_list) > 1: + fine_grained_feats = torch.cat(fine_grained_feats_list, dim=1) + else: + fine_grained_feats = fine_grained_feats_list[0] + + return fine_grained_feats + + def _get_coarse_point_feats(self, prev_output, points): + """Sample from fine grained features. + + Args: + prev_output (list[Tensor]): Prediction of previous decode head. + points (Tensor): Point coordinates, shape (batch_size, + num_points, 2). + + Returns: + coarse_feats (Tensor): Sampled coarse feature, shape (batch_size, + num_classes, num_points). + """ + + coarse_feats = point_sample( + prev_output, points, align_corners=self.align_corners) + + return coarse_feats + + def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg, + train_cfg): + """Forward function for training. + Args: + inputs (list[Tensor]): List of multi-level img features. + prev_output (Tensor): The output of previous decode head. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + gt_semantic_seg (Tensor): Semantic segmentation masks + used if the architecture supports semantic segmentation task. + train_cfg (dict): The training config. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + x = self._transform_inputs(inputs) + with torch.no_grad(): + points = self.get_points_train( + prev_output, calculate_uncertainty, cfg=train_cfg) + fine_grained_point_feats = self._get_fine_grained_point_feats( + x, points) + coarse_point_feats = self._get_coarse_point_feats(prev_output, points) + point_logits = self.forward(fine_grained_point_feats, + coarse_point_feats) + point_label = point_sample( + gt_semantic_seg.float(), + points, + mode='nearest', + align_corners=self.align_corners) + point_label = point_label.squeeze(1).long() + + losses = self.losses(point_logits, point_label) + + return losses + + def forward_test(self, inputs, prev_output, img_metas, test_cfg): + """Forward function for testing. + + Args: + inputs (list[Tensor]): List of multi-level img features. + prev_output (Tensor): The output of previous decode head. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + test_cfg (dict): The testing config. + + Returns: + Tensor: Output segmentation map. + """ + + x = self._transform_inputs(inputs) + refined_seg_logits = prev_output.clone() + for _ in range(test_cfg.subdivision_steps): + refined_seg_logits = resize( + refined_seg_logits, + scale_factor=test_cfg.scale_factor, + mode='bilinear', + align_corners=self.align_corners) + batch_size, channels, height, width = refined_seg_logits.shape + point_indices, points = self.get_points_test( + refined_seg_logits, calculate_uncertainty, cfg=test_cfg) + fine_grained_point_feats = self._get_fine_grained_point_feats( + x, points) + coarse_point_feats = self._get_coarse_point_feats( + prev_output, points) + point_logits = self.forward(fine_grained_point_feats, + coarse_point_feats) + + point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) + refined_seg_logits = refined_seg_logits.reshape( + batch_size, channels, height * width) + refined_seg_logits = refined_seg_logits.scatter_( + 2, point_indices, point_logits) + refined_seg_logits = refined_seg_logits.view( + batch_size, channels, height, width) + + return refined_seg_logits + + def losses(self, point_logits, point_label): + """Compute segmentation loss.""" + loss = dict() + if not isinstance(self.loss_decode, nn.ModuleList): + losses_decode = [self.loss_decode] + else: + losses_decode = self.loss_decode + for loss_module in losses_decode: + loss['point' + loss_module.loss_name] = loss_module( + point_logits, point_label, ignore_index=self.ignore_index) + + loss['acc_point'] = accuracy( + point_logits, point_label, ignore_index=self.ignore_index) + return loss + + def get_points_train(self, seg_logits, uncertainty_func, cfg): + """Sample points for training. + + Sample points in [0, 1] x [0, 1] coordinate space based on their + uncertainty. The uncertainties are calculated for each point using + 'uncertainty_func' function that takes point's logit prediction as + input. + + Args: + seg_logits (Tensor): Semantic segmentation logits, shape ( + batch_size, num_classes, height, width). + uncertainty_func (func): uncertainty calculation function. + cfg (dict): Training config of point head. + + Returns: + point_coords (Tensor): A tensor of shape (batch_size, num_points, + 2) that contains the coordinates of ``num_points`` sampled + points. + """ + num_points = cfg.num_points + oversample_ratio = cfg.oversample_ratio + importance_sample_ratio = cfg.importance_sample_ratio + assert oversample_ratio >= 1 + assert 0 <= importance_sample_ratio <= 1 + batch_size = seg_logits.shape[0] + num_sampled = int(num_points * oversample_ratio) + point_coords = torch.rand( + batch_size, num_sampled, 2, device=seg_logits.device) + point_logits = point_sample(seg_logits, point_coords) + # It is crucial to calculate uncertainty based on the sampled + # prediction value for the points. Calculating uncertainties of the + # coarse predictions first and sampling them for points leads to + # incorrect results. To illustrate this: assume uncertainty func( + # logits)=-abs(logits), a sampled point between two coarse + # predictions with -1 and 1 logits has 0 logits, and therefore 0 + # uncertainty value. However, if we calculate uncertainties for the + # coarse predictions first, both will have -1 uncertainty, + # and sampled point will get -1 uncertainty. + point_uncertainties = uncertainty_func(point_logits) + num_uncertain_points = int(importance_sample_ratio * num_points) + num_random_points = num_points - num_uncertain_points + idx = torch.topk( + point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] + shift = num_sampled * torch.arange( + batch_size, dtype=torch.long, device=seg_logits.device) + idx += shift[:, None] + point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( + batch_size, num_uncertain_points, 2) + if num_random_points > 0: + rand_point_coords = torch.rand( + batch_size, num_random_points, 2, device=seg_logits.device) + point_coords = torch.cat((point_coords, rand_point_coords), dim=1) + return point_coords + + def get_points_test(self, seg_logits, uncertainty_func, cfg): + """Sample points for testing. + + Find ``num_points`` most uncertain points from ``uncertainty_map``. + + Args: + seg_logits (Tensor): A tensor of shape (batch_size, num_classes, + height, width) for class-specific or class-agnostic prediction. + uncertainty_func (func): uncertainty calculation function. + cfg (dict): Testing config of point head. + + Returns: + point_indices (Tensor): A tensor of shape (batch_size, num_points) + that contains indices from [0, height x width) of the most + uncertain points. + point_coords (Tensor): A tensor of shape (batch_size, num_points, + 2) that contains [0, 1] x [0, 1] normalized coordinates of the + most uncertain points from the ``height x width`` grid . + """ + + num_points = cfg.subdivision_num_points + uncertainty_map = uncertainty_func(seg_logits) + batch_size, _, height, width = uncertainty_map.shape + h_step = 1.0 / height + w_step = 1.0 / width + + uncertainty_map = uncertainty_map.view(batch_size, height * width) + num_points = min(height * width, num_points) + point_indices = uncertainty_map.topk(num_points, dim=1)[1] + point_coords = torch.zeros( + batch_size, + num_points, + 2, + dtype=torch.float, + device=seg_logits.device) + point_coords[:, :, 0] = w_step / 2.0 + (point_indices % + width).float() * w_step + point_coords[:, :, 1] = h_step / 2.0 + (point_indices // + width).float() * h_step + return point_indices, point_coords diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/psa_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/psa_head.py new file mode 100644 index 0000000..df7593c --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/psa_head.py @@ -0,0 +1,197 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule + +from mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead + +try: + from mmcv.ops import PSAMask +except ModuleNotFoundError: + PSAMask = None + + +@HEADS.register_module() +class PSAHead(BaseDecodeHead): + """Point-wise Spatial Attention Network for Scene Parsing. + + This head is the implementation of `PSANet + `_. + + Args: + mask_size (tuple[int]): The PSA mask size. It usually equals input + size. + psa_type (str): The type of psa module. Options are 'collect', + 'distribute', 'bi-direction'. Default: 'bi-direction' + compact (bool): Whether use compact map for 'collect' mode. + Default: True. + shrink_factor (int): The downsample factors of psa mask. Default: 2. + normalization_factor (float): The normalize factor of attention. + psa_softmax (bool): Whether use softmax for attention. + """ + + def __init__(self, + mask_size, + psa_type='bi-direction', + compact=False, + shrink_factor=2, + normalization_factor=1.0, + psa_softmax=True, + **kwargs): + if PSAMask is None: + raise RuntimeError('Please install mmcv-full for PSAMask ops') + super(PSAHead, self).__init__(**kwargs) + assert psa_type in ['collect', 'distribute', 'bi-direction'] + self.psa_type = psa_type + self.compact = compact + self.shrink_factor = shrink_factor + self.mask_size = mask_size + mask_h, mask_w = mask_size + self.psa_softmax = psa_softmax + if normalization_factor is None: + normalization_factor = mask_h * mask_w + self.normalization_factor = normalization_factor + + self.reduce = ConvModule( + self.in_channels, + self.channels, + kernel_size=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.attention = nn.Sequential( + ConvModule( + self.channels, + self.channels, + kernel_size=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Conv2d( + self.channels, mask_h * mask_w, kernel_size=1, bias=False)) + if psa_type == 'bi-direction': + self.reduce_p = ConvModule( + self.in_channels, + self.channels, + kernel_size=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.attention_p = nn.Sequential( + ConvModule( + self.channels, + self.channels, + kernel_size=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Conv2d( + self.channels, mask_h * mask_w, kernel_size=1, bias=False)) + self.psamask_collect = PSAMask('collect', mask_size) + self.psamask_distribute = PSAMask('distribute', mask_size) + else: + self.psamask = PSAMask(psa_type, mask_size) + self.proj = ConvModule( + self.channels * (2 if psa_type == 'bi-direction' else 1), + self.in_channels, + kernel_size=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.bottleneck = ConvModule( + self.in_channels * 2, + self.channels, + kernel_size=3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + identity = x + align_corners = self.align_corners + if self.psa_type in ['collect', 'distribute']: + out = self.reduce(x) + n, c, h, w = out.size() + if self.shrink_factor != 1: + if h % self.shrink_factor and w % self.shrink_factor: + h = (h - 1) // self.shrink_factor + 1 + w = (w - 1) // self.shrink_factor + 1 + align_corners = True + else: + h = h // self.shrink_factor + w = w // self.shrink_factor + align_corners = False + out = resize( + out, + size=(h, w), + mode='bilinear', + align_corners=align_corners) + y = self.attention(out) + if self.compact: + if self.psa_type == 'collect': + y = y.view(n, h * w, + h * w).transpose(1, 2).view(n, h * w, h, w) + else: + y = self.psamask(y) + if self.psa_softmax: + y = F.softmax(y, dim=1) + out = torch.bmm( + out.view(n, c, h * w), y.view(n, h * w, h * w)).view( + n, c, h, w) * (1.0 / self.normalization_factor) + else: + x_col = self.reduce(x) + x_dis = self.reduce_p(x) + n, c, h, w = x_col.size() + if self.shrink_factor != 1: + if h % self.shrink_factor and w % self.shrink_factor: + h = (h - 1) // self.shrink_factor + 1 + w = (w - 1) // self.shrink_factor + 1 + align_corners = True + else: + h = h // self.shrink_factor + w = w // self.shrink_factor + align_corners = False + x_col = resize( + x_col, + size=(h, w), + mode='bilinear', + align_corners=align_corners) + x_dis = resize( + x_dis, + size=(h, w), + mode='bilinear', + align_corners=align_corners) + y_col = self.attention(x_col) + y_dis = self.attention_p(x_dis) + if self.compact: + y_dis = y_dis.view(n, h * w, + h * w).transpose(1, 2).view(n, h * w, h, w) + else: + y_col = self.psamask_collect(y_col) + y_dis = self.psamask_distribute(y_dis) + if self.psa_softmax: + y_col = F.softmax(y_col, dim=1) + y_dis = F.softmax(y_dis, dim=1) + x_col = torch.bmm( + x_col.view(n, c, h * w), y_col.view(n, h * w, h * w)).view( + n, c, h, w) * (1.0 / self.normalization_factor) + x_dis = torch.bmm( + x_dis.view(n, c, h * w), y_dis.view(n, h * w, h * w)).view( + n, c, h, w) * (1.0 / self.normalization_factor) + out = torch.cat([x_col, x_dis], 1) + out = self.proj(out) + out = resize( + out, + size=identity.shape[2:], + mode='bilinear', + align_corners=align_corners) + out = self.bottleneck(torch.cat((identity, out), dim=1)) + out = self.cls_seg(out) + return out diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/psp_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/psp_head.py new file mode 100644 index 0000000..6990676 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/psp_head.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule + +from mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +class PPM(nn.ModuleList): + """Pooling Pyramid Module used in PSPNet. + + Args: + pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module. + in_channels (int): Input channels. + channels (int): Channels after modules, before conv_seg. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict): Config of activation layers. + align_corners (bool): align_corners argument of F.interpolate. + """ + + def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg, + act_cfg, align_corners, **kwargs): + super(PPM, self).__init__() + self.pool_scales = pool_scales + self.align_corners = align_corners + self.in_channels = in_channels + self.channels = channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + for pool_scale in pool_scales: + self.append( + nn.Sequential( + nn.AdaptiveAvgPool2d(pool_scale), + ConvModule( + self.in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + **kwargs))) + + def forward(self, x): + """Forward function.""" + ppm_outs = [] + for ppm in self: + ppm_out = ppm(x) + upsampled_ppm_out = resize( + ppm_out, + size=x.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + ppm_outs.append(upsampled_ppm_out) + return ppm_outs + + +@HEADS.register_module() +class PSPHead(BaseDecodeHead): + """Pyramid Scene Parsing Network. + + This head is the implementation of + `PSPNet `_. + + Args: + pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module. Default: (1, 2, 3, 6). + """ + + def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): + super(PSPHead, self).__init__(**kwargs) + assert isinstance(pool_scales, (list, tuple)) + self.pool_scales = pool_scales + self.psp_modules = PPM( + self.pool_scales, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=self.align_corners) + self.bottleneck = ConvModule( + self.in_channels + len(pool_scales) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def _forward_feature(self, inputs): + """Forward function for feature maps before classifying each pixel with + ``self.cls_seg`` fc. + + Args: + inputs (list[Tensor]): List of multi-level img features. + + Returns: + feats (Tensor): A tensor of shape (batch_size, self.channels, + H, W) which is feature map for last layer of decoder head. + """ + x = self._transform_inputs(inputs) + psp_outs = [x] + psp_outs.extend(self.psp_modules(x)) + psp_outs = torch.cat(psp_outs, dim=1) + feats = self.bottleneck(psp_outs) + return feats + + def forward(self, inputs): + """Forward function.""" + output = self._forward_feature(inputs) + output = self.cls_seg(output) + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/segformer_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/segformer_head.py new file mode 100644 index 0000000..2e75d50 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/segformer_head.py @@ -0,0 +1,66 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule + +from mmseg.models.builder import HEADS +from mmseg.models.decode_heads.decode_head import BaseDecodeHead +from mmseg.ops import resize + + +@HEADS.register_module() +class SegformerHead(BaseDecodeHead): + """The all mlp Head of segformer. + + This head is the implementation of + `Segformer ` _. + + Args: + interpolate_mode: The interpolate mode of MLP head upsample operation. + Default: 'bilinear'. + """ + + def __init__(self, interpolate_mode='bilinear', **kwargs): + super().__init__(input_transform='multiple_select', **kwargs) + + self.interpolate_mode = interpolate_mode + num_inputs = len(self.in_channels) + + assert num_inputs == len(self.in_index) + + self.convs = nn.ModuleList() + for i in range(num_inputs): + self.convs.append( + ConvModule( + in_channels=self.in_channels[i], + out_channels=self.channels, + kernel_size=1, + stride=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + self.fusion_conv = ConvModule( + in_channels=self.channels * num_inputs, + out_channels=self.channels, + kernel_size=1, + norm_cfg=self.norm_cfg) + + def forward(self, inputs): + # Receive 4 stage backbone feature map: 1/4, 1/8, 1/16, 1/32 + inputs = self._transform_inputs(inputs) + outs = [] + for idx in range(len(inputs)): + x = inputs[idx] + conv = self.convs[idx] + outs.append( + resize( + input=conv(x), + size=inputs[0].shape[2:], + mode=self.interpolate_mode, + align_corners=self.align_corners)) + + out = self.fusion_conv(torch.cat(outs, dim=1)) + + out = self.cls_seg(out) + + return out diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/segformer_head_ori.py b/downstream/mmsegmentation/mmseg/models/decode_heads/segformer_head_ori.py new file mode 100644 index 0000000..446ba4c --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/segformer_head_ori.py @@ -0,0 +1,85 @@ +# --------------------------------------------------------------- +# Copyright (c) 2021, NVIDIA Corporation. All rights reserved. +# +# This work is licensed under the NVIDIA Source Code License +# --------------------------------------------------------------- +import numpy as np +import torch.nn as nn +import torch +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from collections import OrderedDict + +from mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead +# from mmseg.models.utils import * + + +class MLP(nn.Module): + """ + Linear Embedding + """ + def __init__(self, input_dim=2048, embed_dim=768): + super().__init__() + self.proj = nn.Linear(input_dim, embed_dim) + + def forward(self, x): + x = x.flatten(2).transpose(1, 2) + x = self.proj(x) + return x + + +@HEADS.register_module() +class SegFormerHeadOri(BaseDecodeHead): + """ + SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers + """ + def __init__(self, feature_strides, **kwargs): + super(SegFormerHeadOri, self).__init__(input_transform='multiple_select', **kwargs) + assert len(feature_strides) == len(self.in_channels) + assert min(feature_strides) == feature_strides[0] + self.feature_strides = feature_strides + + c1_in_channels, c2_in_channels, c3_in_channels, c4_in_channels = self.in_channels + + decoder_params = kwargs['decoder_params'] + embedding_dim = decoder_params['embed_dim'] + + self.linear_c4 = MLP(input_dim=c4_in_channels, embed_dim=embedding_dim) + self.linear_c3 = MLP(input_dim=c3_in_channels, embed_dim=embedding_dim) + self.linear_c2 = MLP(input_dim=c2_in_channels, embed_dim=embedding_dim) + self.linear_c1 = MLP(input_dim=c1_in_channels, embed_dim=embedding_dim) + + self.linear_fuse = ConvModule( + in_channels=embedding_dim*4, + out_channels=embedding_dim, + kernel_size=1, + norm_cfg=dict(type='SyncBN', requires_grad=True) + ) + + # self.linear_pred = nn.Conv2d(embedding_dim, self.num_classes, kernel_size=(1,1)) + + def forward(self, inputs): + x = self._transform_inputs(inputs) # len=4, 1/4,1/8,1/16,1/32 + c1, c2, c3, c4 = x + + ############## MLP decoder on C1-C4 ########### + n, _, h, w = c4.shape + + _c4 = self.linear_c4(c4).permute(0,2,1).reshape(n, -1, c4.shape[2], c4.shape[3]) + _c4 = resize(_c4, size=c1.size()[2:],mode='bilinear',align_corners=False) + + _c3 = self.linear_c3(c3).permute(0,2,1).reshape(n, -1, c3.shape[2], c3.shape[3]) + _c3 = resize(_c3, size=c1.size()[2:],mode='bilinear',align_corners=False) + + _c2 = self.linear_c2(c2).permute(0,2,1).reshape(n, -1, c2.shape[2], c2.shape[3]) + _c2 = resize(_c2, size=c1.size()[2:],mode='bilinear',align_corners=False) + + _c1 = self.linear_c1(c1).permute(0,2,1).reshape(n, -1, c1.shape[2], c1.shape[3]) + + _c = self.linear_fuse(torch.cat([_c4, _c3, _c2, _c1], dim=1)) + + x = self.dropout(_c) + x = self.conv_seg(x) + + return x \ No newline at end of file diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/segmenter_mask_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/segmenter_mask_head.py new file mode 100644 index 0000000..6a9b3d4 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/segmenter_mask_head.py @@ -0,0 +1,133 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_norm_layer +from mmcv.cnn.utils.weight_init import (constant_init, trunc_normal_, + trunc_normal_init) +from mmcv.runner import ModuleList + +from mmseg.models.backbones.vit import TransformerEncoderLayer +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +@HEADS.register_module() +class SegmenterMaskTransformerHead(BaseDecodeHead): + """Segmenter: Transformer for Semantic Segmentation. + + This head is the implementation of + `Segmenter: `_. + + Args: + backbone_cfg:(dict): Config of backbone of + Context Path. + in_channels (int): The number of channels of input image. + num_layers (int): The depth of transformer. + num_heads (int): The number of attention heads. + embed_dims (int): The number of embedding dimension. + mlp_ratio (int): ratio of mlp hidden dim to embedding dim. + Default: 4. + drop_path_rate (float): stochastic depth rate. Default 0.1. + drop_rate (float): Probability of an element to be zeroed. + Default 0.0 + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0 + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + qkv_bias (bool): Enable bias for qkv if True. Default: True. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + init_std (float): The value of std in weight initialization. + Default: 0.02. + """ + + def __init__( + self, + in_channels, + num_layers, + num_heads, + embed_dims, + mlp_ratio=4, + drop_path_rate=0.1, + drop_rate=0.0, + attn_drop_rate=0.0, + num_fcs=2, + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + init_std=0.02, + **kwargs, + ): + super(SegmenterMaskTransformerHead, self).__init__( + in_channels=in_channels, **kwargs) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_layers)] + self.layers = ModuleList() + for i in range(num_layers): + self.layers.append( + TransformerEncoderLayer( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=mlp_ratio * embed_dims, + attn_drop_rate=attn_drop_rate, + drop_rate=drop_rate, + drop_path_rate=dpr[i], + num_fcs=num_fcs, + qkv_bias=qkv_bias, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + batch_first=True, + )) + + self.dec_proj = nn.Linear(in_channels, embed_dims) + + self.cls_emb = nn.Parameter( + torch.randn(1, self.num_classes, embed_dims)) + self.patch_proj = nn.Linear(embed_dims, embed_dims, bias=False) + self.classes_proj = nn.Linear(embed_dims, embed_dims, bias=False) + + self.decoder_norm = build_norm_layer( + norm_cfg, embed_dims, postfix=1)[1] + self.mask_norm = build_norm_layer( + norm_cfg, self.num_classes, postfix=2)[1] + + self.init_std = init_std + + delattr(self, 'conv_seg') + + def init_weights(self): + trunc_normal_(self.cls_emb, std=self.init_std) + trunc_normal_init(self.patch_proj, std=self.init_std) + trunc_normal_init(self.classes_proj, std=self.init_std) + for n, m in self.named_modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=self.init_std, bias=0) + elif isinstance(m, nn.LayerNorm): + constant_init(m, val=1.0, bias=0.0) + + def forward(self, inputs): + x = self._transform_inputs(inputs) + b, c, h, w = x.shape + x = x.permute(0, 2, 3, 1).contiguous().view(b, -1, c) + + x = self.dec_proj(x) + cls_emb = self.cls_emb.expand(x.size(0), -1, -1) + x = torch.cat((x, cls_emb), 1) + for layer in self.layers: + x = layer(x) + x = self.decoder_norm(x) + + patches = self.patch_proj(x[:, :-self.num_classes]) + cls_seg_feat = self.classes_proj(x[:, -self.num_classes:]) + + patches = F.normalize(patches, dim=2, p=2) + cls_seg_feat = F.normalize(cls_seg_feat, dim=2, p=2) + + masks = patches @ cls_seg_feat.transpose(1, 2) + masks = self.mask_norm(masks) + masks = masks.permute(0, 2, 1).contiguous().view(b, -1, h, w) + + return masks diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/sep_aspp_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/sep_aspp_head.py new file mode 100644 index 0000000..4e894e2 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/sep_aspp_head.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule + +from mmseg.ops import resize +from ..builder import HEADS +from .aspp_head import ASPPHead, ASPPModule + + +class DepthwiseSeparableASPPModule(ASPPModule): + """Atrous Spatial Pyramid Pooling (ASPP) Module with depthwise separable + conv.""" + + def __init__(self, **kwargs): + super(DepthwiseSeparableASPPModule, self).__init__(**kwargs) + for i, dilation in enumerate(self.dilations): + if dilation > 1: + self[i] = DepthwiseSeparableConvModule( + self.in_channels, + self.channels, + 3, + dilation=dilation, + padding=dilation, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + +@HEADS.register_module() +class DepthwiseSeparableASPPHead(ASPPHead): + """Encoder-Decoder with Atrous Separable Convolution for Semantic Image + Segmentation. + + This head is the implementation of `DeepLabV3+ + `_. + + Args: + c1_in_channels (int): The input channels of c1 decoder. If is 0, + the no decoder will be used. + c1_channels (int): The intermediate channels of c1 decoder. + """ + + def __init__(self, c1_in_channels, c1_channels, **kwargs): + super(DepthwiseSeparableASPPHead, self).__init__(**kwargs) + assert c1_in_channels >= 0 + self.aspp_modules = DepthwiseSeparableASPPModule( + dilations=self.dilations, + in_channels=self.in_channels, + channels=self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + if c1_in_channels > 0: + self.c1_bottleneck = ConvModule( + c1_in_channels, + c1_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + else: + self.c1_bottleneck = None + self.sep_bottleneck = nn.Sequential( + DepthwiseSeparableConvModule( + self.channels + c1_channels, + self.channels, + 3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + DepthwiseSeparableConvModule( + self.channels, + self.channels, + 3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + aspp_outs = [ + resize( + self.image_pool(x), + size=x.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + ] + aspp_outs.extend(self.aspp_modules(x)) + aspp_outs = torch.cat(aspp_outs, dim=1) + output = self.bottleneck(aspp_outs) + if self.c1_bottleneck is not None: + c1_output = self.c1_bottleneck(inputs[0]) + output = resize( + input=output, + size=c1_output.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + output = torch.cat([output, c1_output], dim=1) + output = self.sep_bottleneck(output) + output = self.cls_seg(output) + return output diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/sep_fcn_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/sep_fcn_head.py new file mode 100644 index 0000000..7f9658e --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/sep_fcn_head.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import DepthwiseSeparableConvModule + +from ..builder import HEADS +from .fcn_head import FCNHead + + +@HEADS.register_module() +class DepthwiseSeparableFCNHead(FCNHead): + """Depthwise-Separable Fully Convolutional Network for Semantic + Segmentation. + + This head is implemented according to `Fast-SCNN: Fast Semantic + Segmentation Network `_. + + Args: + in_channels(int): Number of output channels of FFM. + channels(int): Number of middle-stage channels in the decode head. + concat_input(bool): Whether to concatenate original decode input into + the result of several consecutive convolution layers. + Default: True. + num_classes(int): Used to determine the dimension of + final prediction tensor. + in_index(int): Correspond with 'out_indices' in FastSCNN backbone. + norm_cfg (dict | None): Config of norm layers. + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + loss_decode(dict): Config of loss type and some + relevant additional options. + dw_act_cfg (dict):Activation config of depthwise ConvModule. If it is + 'default', it will be the same as `act_cfg`. Default: None. + """ + + def __init__(self, dw_act_cfg=None, **kwargs): + super(DepthwiseSeparableFCNHead, self).__init__(**kwargs) + self.convs[0] = DepthwiseSeparableConvModule( + self.in_channels, + self.channels, + kernel_size=self.kernel_size, + padding=self.kernel_size // 2, + norm_cfg=self.norm_cfg, + dw_act_cfg=dw_act_cfg) + + for i in range(1, self.num_convs): + self.convs[i] = DepthwiseSeparableConvModule( + self.channels, + self.channels, + kernel_size=self.kernel_size, + padding=self.kernel_size // 2, + norm_cfg=self.norm_cfg, + dw_act_cfg=dw_act_cfg) + + if self.concat_input: + self.conv_cat = DepthwiseSeparableConvModule( + self.in_channels + self.channels, + self.channels, + kernel_size=self.kernel_size, + padding=self.kernel_size // 2, + norm_cfg=self.norm_cfg, + dw_act_cfg=dw_act_cfg) diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/setr_mla_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/setr_mla_head.py new file mode 100644 index 0000000..6bb94ae --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/setr_mla_head.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule + +from mmseg.ops import Upsample +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +@HEADS.register_module() +class SETRMLAHead(BaseDecodeHead): + """Multi level feature aggretation head of SETR. + + MLA head of `SETR `_. + + Args: + mlahead_channels (int): Channels of conv-conv-4x of multi-level feature + aggregation. Default: 128. + up_scale (int): The scale factor of interpolate. Default:4. + """ + + def __init__(self, mla_channels=128, up_scale=4, **kwargs): + super(SETRMLAHead, self).__init__( + input_transform='multiple_select', **kwargs) + self.mla_channels = mla_channels + + num_inputs = len(self.in_channels) + + # Refer to self.cls_seg settings of BaseDecodeHead + assert self.channels == num_inputs * mla_channels + + self.up_convs = nn.ModuleList() + for i in range(num_inputs): + self.up_convs.append( + nn.Sequential( + ConvModule( + in_channels=self.in_channels[i], + out_channels=mla_channels, + kernel_size=3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + in_channels=mla_channels, + out_channels=mla_channels, + kernel_size=3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + Upsample( + scale_factor=up_scale, + mode='bilinear', + align_corners=self.align_corners))) + + def forward(self, inputs): + inputs = self._transform_inputs(inputs) + outs = [] + for x, up_conv in zip(inputs, self.up_convs): + outs.append(up_conv(x)) + out = torch.cat(outs, dim=1) + out = self.cls_seg(out) + return out diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/setr_up_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/setr_up_head.py new file mode 100644 index 0000000..87e7ea7 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/setr_up_head.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule, build_norm_layer + +from mmseg.ops import Upsample +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +@HEADS.register_module() +class SETRUPHead(BaseDecodeHead): + """Naive upsampling head and Progressive upsampling head of SETR. + + Naive or PUP head of `SETR `_. + + Args: + norm_layer (dict): Config dict for input normalization. + Default: norm_layer=dict(type='LN', eps=1e-6, requires_grad=True). + num_convs (int): Number of decoder convolutions. Default: 1. + up_scale (int): The scale factor of interpolate. Default:4. + kernel_size (int): The kernel size of convolution when decoding + feature information from backbone. Default: 3. + init_cfg (dict | list[dict] | None): Initialization config dict. + Default: dict( + type='Constant', val=1.0, bias=0, layer='LayerNorm'). + """ + + def __init__(self, + norm_layer=dict(type='LN', eps=1e-6, requires_grad=True), + num_convs=1, + up_scale=4, + kernel_size=3, + init_cfg=[ + dict(type='Constant', val=1.0, bias=0, layer='LayerNorm'), + dict( + type='Normal', + std=0.01, + override=dict(name='conv_seg')) + ], + **kwargs): + + assert kernel_size in [1, 3], 'kernel_size must be 1 or 3.' + + super(SETRUPHead, self).__init__(init_cfg=init_cfg, **kwargs) + + assert isinstance(self.in_channels, int) + + _, self.norm = build_norm_layer(norm_layer, self.in_channels) + + self.up_convs = nn.ModuleList() + in_channels = self.in_channels + out_channels = self.channels + for _ in range(num_convs): + self.up_convs.append( + nn.Sequential( + ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=1, + padding=int(kernel_size - 1) // 2, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + Upsample( + scale_factor=up_scale, + mode='bilinear', + align_corners=self.align_corners))) + in_channels = out_channels + + def forward(self, x): + x = self._transform_inputs(x) + + n, c, h, w = x.shape + x = x.reshape(n, c, h * w).transpose(2, 1).contiguous() + x = self.norm(x) + x = x.transpose(1, 2).reshape(n, c, h, w).contiguous() + + for up_conv in self.up_convs: + x = up_conv(x) + out = self.cls_seg(x) + return out diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/stdc_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/stdc_head.py new file mode 100644 index 0000000..1cf3732 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/stdc_head.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F + +from ..builder import HEADS +from .fcn_head import FCNHead + + +@HEADS.register_module() +class STDCHead(FCNHead): + """This head is the implementation of `Rethinking BiSeNet For Real-time + Semantic Segmentation `_. + + Args: + boundary_threshold (float): The threshold of calculating boundary. + Default: 0.1. + """ + + def __init__(self, boundary_threshold=0.1, **kwargs): + super(STDCHead, self).__init__(**kwargs) + self.boundary_threshold = boundary_threshold + # Using register buffer to make laplacian kernel on the same + # device of `seg_label`. + self.register_buffer( + 'laplacian_kernel', + torch.tensor([-1, -1, -1, -1, 8, -1, -1, -1, -1], + dtype=torch.float32, + requires_grad=False).reshape((1, 1, 3, 3))) + self.fusion_kernel = torch.nn.Parameter( + torch.tensor([[6. / 10], [3. / 10], [1. / 10]], + dtype=torch.float32).reshape(1, 3, 1, 1), + requires_grad=False) + + def losses(self, seg_logit, seg_label): + """Compute Detail Aggregation Loss.""" + # Note: The paper claims `fusion_kernel` is a trainable 1x1 conv + # parameters. However, it is a constant in original repo and other + # codebase because it would not be added into computation graph + # after threshold operation. + seg_label = seg_label.to(self.laplacian_kernel) + boundary_targets = F.conv2d( + seg_label, self.laplacian_kernel, padding=1) + boundary_targets = boundary_targets.clamp(min=0) + boundary_targets[boundary_targets > self.boundary_threshold] = 1 + boundary_targets[boundary_targets <= self.boundary_threshold] = 0 + + boundary_targets_x2 = F.conv2d( + seg_label, self.laplacian_kernel, stride=2, padding=1) + boundary_targets_x2 = boundary_targets_x2.clamp(min=0) + + boundary_targets_x4 = F.conv2d( + seg_label, self.laplacian_kernel, stride=4, padding=1) + boundary_targets_x4 = boundary_targets_x4.clamp(min=0) + + boundary_targets_x4_up = F.interpolate( + boundary_targets_x4, boundary_targets.shape[2:], mode='nearest') + boundary_targets_x2_up = F.interpolate( + boundary_targets_x2, boundary_targets.shape[2:], mode='nearest') + + boundary_targets_x2_up[ + boundary_targets_x2_up > self.boundary_threshold] = 1 + boundary_targets_x2_up[ + boundary_targets_x2_up <= self.boundary_threshold] = 0 + + boundary_targets_x4_up[ + boundary_targets_x4_up > self.boundary_threshold] = 1 + boundary_targets_x4_up[ + boundary_targets_x4_up <= self.boundary_threshold] = 0 + + boudary_targets_pyramids = torch.stack( + (boundary_targets, boundary_targets_x2_up, boundary_targets_x4_up), + dim=1) + + boudary_targets_pyramids = boudary_targets_pyramids.squeeze(2) + boudary_targets_pyramid = F.conv2d(boudary_targets_pyramids, + self.fusion_kernel) + + boudary_targets_pyramid[ + boudary_targets_pyramid > self.boundary_threshold] = 1 + boudary_targets_pyramid[ + boudary_targets_pyramid <= self.boundary_threshold] = 0 + + loss = super(STDCHead, self).losses(seg_logit, + boudary_targets_pyramid.long()) + return loss diff --git a/downstream/mmsegmentation/mmseg/models/decode_heads/uper_head.py b/downstream/mmsegmentation/mmseg/models/decode_heads/uper_head.py new file mode 100644 index 0000000..06b152a --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/decode_heads/uper_head.py @@ -0,0 +1,140 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule + +from mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead +from .psp_head import PPM + + +@HEADS.register_module() +class UPerHead(BaseDecodeHead): + """Unified Perceptual Parsing for Scene Understanding. + + This head is the implementation of `UPerNet + `_. + + Args: + pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module applied on the last feature. Default: (1, 2, 3, 6). + """ + + def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): + super(UPerHead, self).__init__( + input_transform='multiple_select', **kwargs) + # PSP Module + self.psp_modules = PPM( + pool_scales, + self.in_channels[-1], + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=self.align_corners) + self.bottleneck = ConvModule( + self.in_channels[-1] + len(pool_scales) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + # FPN Module + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + for in_channels in self.in_channels[:-1]: # skip the top layer + l_conv = ConvModule( + in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + inplace=False) + fpn_conv = ConvModule( + self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + inplace=False) + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + self.fpn_bottleneck = ConvModule( + len(self.in_channels) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def psp_forward(self, inputs): + """Forward function of PSP module.""" + x = inputs[-1] + psp_outs = [x] + psp_outs.extend(self.psp_modules(x)) + psp_outs = torch.cat(psp_outs, dim=1) + output = self.bottleneck(psp_outs) + + return output + + def _forward_feature(self, inputs): + """Forward function for feature maps before classifying each pixel with + ``self.cls_seg`` fc. + + Args: + inputs (list[Tensor]): List of multi-level img features. + + Returns: + feats (Tensor): A tensor of shape (batch_size, self.channels, + H, W) which is feature map for last layer of decoder head. + """ + inputs = self._transform_inputs(inputs) + + # build laterals + laterals = [ + lateral_conv(inputs[i]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + laterals.append(self.psp_forward(inputs)) + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] = laterals[i - 1] + resize( + laterals[i], + size=prev_shape, + mode='bilinear', + align_corners=self.align_corners) + + # build outputs + fpn_outs = [ + self.fpn_convs[i](laterals[i]) + for i in range(used_backbone_levels - 1) + ] + # append psp feature + fpn_outs.append(laterals[-1]) + + for i in range(used_backbone_levels - 1, 0, -1): + fpn_outs[i] = resize( + fpn_outs[i], + size=fpn_outs[0].shape[2:], + mode='bilinear', + align_corners=self.align_corners) + fpn_outs = torch.cat(fpn_outs, dim=1) + feats = self.fpn_bottleneck(fpn_outs) + return feats + + def forward(self, inputs): + """Forward function.""" + output = self._forward_feature(inputs) + output = self.cls_seg(output) + return output diff --git a/downstream/mmsegmentation/mmseg/models/losses/__init__.py b/downstream/mmsegmentation/mmseg/models/losses/__init__.py new file mode 100644 index 0000000..fbc5b2d --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/losses/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .accuracy import Accuracy, accuracy +from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, + cross_entropy, mask_cross_entropy) +from .dice_loss import DiceLoss +from .focal_loss import FocalLoss +from .lovasz_loss import LovaszLoss +from .utils import reduce_loss, weight_reduce_loss, weighted_loss + +__all__ = [ + 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', + 'mask_cross_entropy', 'CrossEntropyLoss', 'reduce_loss', + 'weight_reduce_loss', 'weighted_loss', 'LovaszLoss', 'DiceLoss', + 'FocalLoss' +] diff --git a/downstream/mmsegmentation/mmseg/models/losses/accuracy.py b/downstream/mmsegmentation/mmseg/models/losses/accuracy.py new file mode 100644 index 0000000..28d55c4 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/losses/accuracy.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + + +def accuracy(pred, target, topk=1, thresh=None, ignore_index=None): + """Calculate accuracy according to the prediction and target. + + Args: + pred (torch.Tensor): The model prediction, shape (N, num_class, ...) + target (torch.Tensor): The target of each prediction, shape (N, , ...) + ignore_index (int | None): The label index to be ignored. Default: None + topk (int | tuple[int], optional): If the predictions in ``topk`` + matches the target, the predictions will be regarded as + correct ones. Defaults to 1. + thresh (float, optional): If not None, predictions with scores under + this threshold are considered incorrect. Default to None. + + Returns: + float | tuple[float]: If the input ``topk`` is a single integer, + the function will return a single float as accuracy. If + ``topk`` is a tuple containing multiple integers, the + function will return a tuple containing accuracies of + each ``topk`` number. + """ + assert isinstance(topk, (int, tuple)) + if isinstance(topk, int): + topk = (topk, ) + return_single = True + else: + return_single = False + + maxk = max(topk) + if pred.size(0) == 0: + accu = [pred.new_tensor(0.) for i in range(len(topk))] + return accu[0] if return_single else accu + assert pred.ndim == target.ndim + 1 + assert pred.size(0) == target.size(0) + assert maxk <= pred.size(1), \ + f'maxk {maxk} exceeds pred dimension {pred.size(1)}' + pred_value, pred_label = pred.topk(maxk, dim=1) + # transpose to shape (maxk, N, ...) + pred_label = pred_label.transpose(0, 1) + correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label)) + if thresh is not None: + # Only prediction values larger than thresh are counted as correct + correct = correct & (pred_value > thresh).t() + correct = correct[:, target != ignore_index] + res = [] + eps = torch.finfo(torch.float32).eps + for k in topk: + # Avoid causing ZeroDivisionError when all pixels + # of an image are ignored + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + eps + total_num = target[target != ignore_index].numel() + eps + res.append(correct_k.mul_(100.0 / total_num)) + return res[0] if return_single else res + + +class Accuracy(nn.Module): + """Accuracy calculation module.""" + + def __init__(self, topk=(1, ), thresh=None, ignore_index=None): + """Module to calculate the accuracy. + + Args: + topk (tuple, optional): The criterion used to calculate the + accuracy. Defaults to (1,). + thresh (float, optional): If not None, predictions with scores + under this threshold are considered incorrect. Default to None. + """ + super().__init__() + self.topk = topk + self.thresh = thresh + self.ignore_index = ignore_index + + def forward(self, pred, target): + """Forward function to calculate accuracy. + + Args: + pred (torch.Tensor): Prediction of models. + target (torch.Tensor): Target for each prediction. + + Returns: + tuple[float]: The accuracies under different topk criterions. + """ + return accuracy(pred, target, self.topk, self.thresh, + self.ignore_index) diff --git a/downstream/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py b/downstream/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py new file mode 100644 index 0000000..cd4cfc4 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py @@ -0,0 +1,295 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.runner import force_fp32 + +from ..builder import LOSSES +from .utils import get_class_weight, weight_reduce_loss + + +def cross_entropy(pred, + label, + weight=None, + class_weight=None, + reduction='mean', + avg_factor=None, + ignore_index=-100, + avg_non_ignore=False): + """cross_entropy. The wrapper function for :func:`F.cross_entropy` + + Args: + pred (torch.Tensor): The prediction with shape (N, 1). + label (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + Default: None. + class_weight (list[float], optional): The weight for each class. + Default: None. + reduction (str, optional): The method used to reduce the loss. + Options are 'none', 'mean' and 'sum'. Default: 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Default: None. + ignore_index (int): Specifies a target value that is ignored and + does not contribute to the input gradients. When + ``avg_non_ignore `` is ``True``, and the ``reduction`` is + ``''mean''``, the loss is averaged over non-ignored targets. + Defaults: -100. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + `New in version 0.23.0.` + """ + + # class_weight is a manual rescaling weight given to each class. + # If given, has to be a Tensor of size C element-wise losses + loss = F.cross_entropy( + pred, + label, + weight=class_weight, + reduction='none', + ignore_index=ignore_index) + + # apply weights and do the reduction + # average loss over non-ignored elements + # pytorch's official cross_entropy average loss over non-ignored elements + # refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660 # noqa + if (avg_factor is None) and avg_non_ignore and reduction == 'mean': + avg_factor = label.numel() - (label == ignore_index).sum().item() + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index): + """Expand onehot labels to match the size of prediction.""" + bin_labels = labels.new_zeros(target_shape) + valid_mask = (labels >= 0) & (labels != ignore_index) + inds = torch.nonzero(valid_mask, as_tuple=True) + + if inds[0].numel() > 0: + if labels.dim() == 3: + bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1 + else: + bin_labels[inds[0], labels[valid_mask]] = 1 + + valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float() + + if label_weights is None: + bin_label_weights = valid_mask + else: + bin_label_weights = label_weights.unsqueeze(1).expand(target_shape) + bin_label_weights = bin_label_weights * valid_mask + + return bin_labels, bin_label_weights, valid_mask + + +def binary_cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None, + ignore_index=-100, + avg_non_ignore=False, + **kwargs): + """Calculate the binary CrossEntropy loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, 1). + label (torch.Tensor): The learning label of the prediction. + Note: In bce loss, label < 0 is invalid. + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (list[float], optional): The weight for each class. + ignore_index (int): The label index to be ignored. Default: -100. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + `New in version 0.23.0.` + + Returns: + torch.Tensor: The calculated loss + """ + if pred.size(1) == 1: + # For binary class segmentation, the shape of pred is + # [N, 1, H, W] and that of label is [N, H, W]. + assert label.max() <= 1, \ + 'For pred with shape [N, 1, H, W], its label must have at ' \ + 'most 2 classes' + pred = pred.squeeze() + if pred.dim() != label.dim(): + assert (pred.dim() == 2 and label.dim() == 1) or ( + pred.dim() == 4 and label.dim() == 3), \ + 'Only pred shape [N, C], label shape [N] or pred shape [N, C, ' \ + 'H, W], label shape [N, H, W] are supported' + # `weight` returned from `_expand_onehot_labels` + # has been treated for valid (non-ignore) pixels + label, weight, valid_mask = _expand_onehot_labels( + label, weight, pred.shape, ignore_index) + else: + # should mask out the ignored elements + valid_mask = ((label >= 0) & (label != ignore_index)).float() + if weight is not None: + weight = weight * valid_mask + else: + weight = valid_mask + # average loss over non-ignored and valid elements + if reduction == 'mean' and avg_factor is None and avg_non_ignore: + avg_factor = valid_mask.sum().item() + + loss = F.binary_cross_entropy_with_logits( + pred, label.float(), pos_weight=class_weight, reduction='none') + # do the reduction for the weighted loss + loss = weight_reduce_loss( + loss, weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def mask_cross_entropy(pred, + target, + label, + reduction='mean', + avg_factor=None, + class_weight=None, + ignore_index=None, + **kwargs): + """Calculate the CrossEntropy loss for masks. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + target (torch.Tensor): The learning label of the prediction. + label (torch.Tensor): ``label`` indicates the class label of the mask' + corresponding object. This will be used to select the mask in the + of the class which the object belongs to when the mask prediction + if not class-agnostic. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (list[float], optional): The weight for each class. + ignore_index (None): Placeholder, to be consistent with other loss. + Default: None. + + Returns: + torch.Tensor: The calculated loss + """ + assert ignore_index is None, 'BCE loss does not support ignore_index' + # TODO: handle these two reserved arguments + assert reduction == 'mean' and avg_factor is None + num_rois = pred.size()[0] + inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) + pred_slice = pred[inds, label].squeeze(1) + return F.binary_cross_entropy_with_logits( + pred_slice, target, weight=class_weight, reduction='mean')[None] + + +@LOSSES.register_module() +class CrossEntropyLoss(nn.Module): + """CrossEntropyLoss. + + Args: + use_sigmoid (bool, optional): Whether the prediction uses sigmoid + of softmax. Defaults to False. + use_mask (bool, optional): Whether to use mask cross entropy loss. + Defaults to False. + reduction (str, optional): . Defaults to 'mean'. + Options are "none", "mean" and "sum". + class_weight (list[float] | str, optional): Weight of each class. If in + str format, read them from a file. Defaults to None. + loss_weight (float, optional): Weight of the loss. Defaults to 1.0. + loss_name (str, optional): Name of the loss item. If you want this loss + item to be included into the backward graph, `loss_` must be the + prefix of the name. Defaults to 'loss_ce'. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + `New in version 0.23.0.` + """ + + def __init__(self, + use_sigmoid=False, + use_mask=False, + reduction='mean', + class_weight=None, + loss_weight=1.0, + loss_name='loss_ce', + avg_non_ignore=False): + super(CrossEntropyLoss, self).__init__() + assert (use_sigmoid is False) or (use_mask is False) + self.use_sigmoid = use_sigmoid + self.use_mask = use_mask + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = get_class_weight(class_weight) + self.avg_non_ignore = avg_non_ignore + if not self.avg_non_ignore and self.reduction == 'mean': + warnings.warn( + 'Default ``avg_non_ignore`` is False, if you would like to ' + 'ignore the certain label and average loss over non-ignore ' + 'labels, which is the same with PyTorch official ' + 'cross_entropy, set ``avg_non_ignore=True``.') + + if self.use_sigmoid: + self.cls_criterion = binary_cross_entropy + elif self.use_mask: + self.cls_criterion = mask_cross_entropy + else: + self.cls_criterion = cross_entropy + self._loss_name = loss_name + + def extra_repr(self): + """Extra repr.""" + s = f'avg_non_ignore={self.avg_non_ignore}' + return s + + @force_fp32(apply_to='cls_score') + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + ignore_index=-100, + **kwargs): + """Forward function.""" + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.class_weight is not None: + class_weight = cls_score.new_tensor(self.class_weight) + else: + class_weight = None + # Note: for BCE loss, label < 0 is invalid. + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + weight, + class_weight=class_weight, + reduction=reduction, + avg_factor=avg_factor, + avg_non_ignore=self.avg_non_ignore, + ignore_index=ignore_index, + **kwargs) + return loss_cls + + @property + def loss_name(self): + """Loss Name. + + This function must be implemented and will return the name of this + loss function. This name will be used to combine different loss items + by simple sum operation. In addition, if you want this loss item to be + included into the backward graph, `loss_` must be the prefix of the + name. + + Returns: + str: The name of this loss item. + """ + return self._loss_name diff --git a/downstream/mmsegmentation/mmseg/models/losses/dice_loss.py b/downstream/mmsegmentation/mmseg/models/losses/dice_loss.py new file mode 100644 index 0000000..79a3abf --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/losses/dice_loss.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Modified from https://github.com/LikeLy-Journey/SegmenTron/blob/master/ +segmentron/solver/loss.py (Apache-2.0 License)""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import get_class_weight, weighted_loss + + +@weighted_loss +def dice_loss(pred, + target, + valid_mask, + smooth=1, + exponent=2, + class_weight=None, + ignore_index=255): + assert pred.shape[0] == target.shape[0] + total_loss = 0 + num_classes = pred.shape[1] + for i in range(num_classes): + if i != ignore_index: + dice_loss = binary_dice_loss( + pred[:, i], + target[..., i], + valid_mask=valid_mask, + smooth=smooth, + exponent=exponent) + if class_weight is not None: + dice_loss *= class_weight[i] + total_loss += dice_loss + return total_loss / num_classes + + +@weighted_loss +def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards): + assert pred.shape[0] == target.shape[0] + pred = pred.reshape(pred.shape[0], -1) + target = target.reshape(target.shape[0], -1) + valid_mask = valid_mask.reshape(valid_mask.shape[0], -1) + + num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth + den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth + + return 1 - num / den + + +@LOSSES.register_module() +class DiceLoss(nn.Module): + """DiceLoss. + + This loss is proposed in `V-Net: Fully Convolutional Neural Networks for + Volumetric Medical Image Segmentation `_. + + Args: + smooth (float): A float number to smooth loss, and avoid NaN error. + Default: 1 + exponent (float): An float number to calculate denominator + value: \\sum{x^exponent} + \\sum{y^exponent}. Default: 2. + reduction (str, optional): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_image is True. Default: 'mean'. + class_weight (list[float] | str, optional): Weight of each class. If in + str format, read them from a file. Defaults to None. + loss_weight (float, optional): Weight of the loss. Default to 1.0. + ignore_index (int | None): The label index to be ignored. Default: 255. + loss_name (str, optional): Name of the loss item. If you want this loss + item to be included into the backward graph, `loss_` must be the + prefix of the name. Defaults to 'loss_dice'. + """ + + def __init__(self, + smooth=1, + exponent=2, + reduction='mean', + class_weight=None, + loss_weight=1.0, + ignore_index=255, + loss_name='loss_dice', + **kwards): + super(DiceLoss, self).__init__() + self.smooth = smooth + self.exponent = exponent + self.reduction = reduction + self.class_weight = get_class_weight(class_weight) + self.loss_weight = loss_weight + self.ignore_index = ignore_index + self._loss_name = loss_name + + def forward(self, + pred, + target, + avg_factor=None, + reduction_override=None, + **kwards): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.class_weight is not None: + class_weight = pred.new_tensor(self.class_weight) + else: + class_weight = None + + pred = F.softmax(pred, dim=1) + num_classes = pred.shape[1] + one_hot_target = F.one_hot( + torch.clamp(target.long(), 0, num_classes - 1), + num_classes=num_classes) + valid_mask = (target != self.ignore_index).long() + + loss = self.loss_weight * dice_loss( + pred, + one_hot_target, + valid_mask=valid_mask, + reduction=reduction, + avg_factor=avg_factor, + smooth=self.smooth, + exponent=self.exponent, + class_weight=class_weight, + ignore_index=self.ignore_index) + return loss + + @property + def loss_name(self): + """Loss Name. + + This function must be implemented and will return the name of this + loss function. This name will be used to combine different loss items + by simple sum operation. In addition, if you want this loss item to be + included into the backward graph, `loss_` must be the prefix of the + name. + Returns: + str: The name of this loss item. + """ + return self._loss_name diff --git a/downstream/mmsegmentation/mmseg/models/losses/focal_loss.py b/downstream/mmsegmentation/mmseg/models/losses/focal_loss.py new file mode 100644 index 0000000..af1c711 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/losses/focal_loss.py @@ -0,0 +1,327 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/open-mmlab/mmdetection +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss + +from ..builder import LOSSES +from .utils import weight_reduce_loss + + +# This method is used when cuda is not available +def py_sigmoid_focal_loss(pred, + target, + one_hot_target=None, + weight=None, + gamma=2.0, + alpha=0.5, + class_weight=None, + valid_mask=None, + reduction='mean', + avg_factor=None): + """PyTorch version of `Focal Loss `_. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the + number of classes + target (torch.Tensor): The learning label of the prediction with + shape (N, C) + one_hot_target (None): Placeholder. It should be None. + weight (torch.Tensor, optional): Sample-wise loss weight. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float | list[float], optional): A balanced form for Focal Loss. + Defaults to 0.5. + class_weight (list[float], optional): Weight of each class. + Defaults to None. + valid_mask (torch.Tensor, optional): A mask uses 1 to mark the valid + samples and uses 0 to mark the ignored samples. Default: None. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + if isinstance(alpha, list): + alpha = pred.new_tensor(alpha) + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + one_minus_pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * one_minus_pt.pow(gamma) + + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + final_weight = torch.ones(1, pred.size(1)).type_as(loss) + if weight is not None: + if weight.shape != loss.shape and weight.size(0) == loss.size(0): + # For most cases, weight is of shape (N, ), + # which means it does not have the second axis num_class + weight = weight.view(-1, 1) + assert weight.dim() == loss.dim() + final_weight = final_weight * weight + if class_weight is not None: + final_weight = final_weight * pred.new_tensor(class_weight) + if valid_mask is not None: + final_weight = final_weight * valid_mask + loss = weight_reduce_loss(loss, final_weight, reduction, avg_factor) + return loss + + +def sigmoid_focal_loss(pred, + target, + one_hot_target, + weight=None, + gamma=2.0, + alpha=0.5, + class_weight=None, + valid_mask=None, + reduction='mean', + avg_factor=None): + r"""A warpper of cuda version `Focal Loss + `_. + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + target (torch.Tensor): The learning label of the prediction. It's shape + should be (N, ) + one_hot_target (torch.Tensor): The learning label with shape (N, C) + weight (torch.Tensor, optional): Sample-wise loss weight. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float | list[float], optional): A balanced form for Focal Loss. + Defaults to 0.5. + class_weight (list[float], optional): Weight of each class. + Defaults to None. + valid_mask (torch.Tensor, optional): A mask uses 1 to mark the valid + samples and uses 0 to mark the ignored samples. Default: None. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + # Function.apply does not accept keyword arguments, so the decorator + # "weighted_loss" is not applicable + final_weight = torch.ones(1, pred.size(1)).type_as(pred) + if isinstance(alpha, list): + # _sigmoid_focal_loss doesn't accept alpha of list type. Therefore, if + # a list is given, we set the input alpha as 0.5. This means setting + # equal weight for foreground class and background class. By + # multiplying the loss by 2, the effect of setting alpha as 0.5 is + # undone. The alpha of type list is used to regulate the loss in the + # post-processing process. + loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), + gamma, 0.5, None, 'none') * 2 + alpha = pred.new_tensor(alpha) + final_weight = final_weight * ( + alpha * one_hot_target + (1 - alpha) * (1 - one_hot_target)) + else: + loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), + gamma, alpha, None, 'none') + if weight is not None: + if weight.shape != loss.shape and weight.size(0) == loss.size(0): + # For most cases, weight is of shape (N, ), + # which means it does not have the second axis num_class + weight = weight.view(-1, 1) + assert weight.dim() == loss.dim() + final_weight = final_weight * weight + if class_weight is not None: + final_weight = final_weight * pred.new_tensor(class_weight) + if valid_mask is not None: + final_weight = final_weight * valid_mask + loss = weight_reduce_loss(loss, final_weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module() +class FocalLoss(nn.Module): + + def __init__(self, + use_sigmoid=True, + gamma=2.0, + alpha=0.5, + reduction='mean', + class_weight=None, + loss_weight=1.0, + loss_name='loss_focal'): + """`Focal Loss `_ + Args: + use_sigmoid (bool, optional): Whether to the prediction is + used for sigmoid or softmax. Defaults to True. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float | list[float], optional): A balanced form for Focal + Loss. Defaults to 0.5. When a list is provided, the length + of the list should be equal to the number of classes. + Please be careful that this parameter is not the + class-wise weight but the weight of a binary classification + problem. This binary classification problem regards the + pixels which belong to one class as the foreground + and the other pixels as the background, each element in + the list is the weight of the corresponding foreground class. + The value of alpha or each element of alpha should be a float + in the interval [0, 1]. If you want to specify the class-wise + weight, please use `class_weight` parameter. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. Options are "none", "mean" and + "sum". + class_weight (list[float], optional): Weight of each class. + Defaults to None. + loss_weight (float, optional): Weight of loss. Defaults to 1.0. + loss_name (str, optional): Name of the loss item. If you want this + loss item to be included into the backward graph, `loss_` must + be the prefix of the name. Defaults to 'loss_focal'. + """ + super(FocalLoss, self).__init__() + assert use_sigmoid is True, \ + 'AssertionError: Only sigmoid focal loss supported now.' + assert reduction in ('none', 'mean', 'sum'), \ + "AssertionError: reduction should be 'none', 'mean' or " \ + "'sum'" + assert isinstance(alpha, (float, list)), \ + 'AssertionError: alpha should be of type float' + assert isinstance(gamma, float), \ + 'AssertionError: gamma should be of type float' + assert isinstance(loss_weight, float), \ + 'AssertionError: loss_weight should be of type float' + assert isinstance(loss_name, str), \ + 'AssertionError: loss_name should be of type str' + assert isinstance(class_weight, list) or class_weight is None, \ + 'AssertionError: class_weight must be None or of type list' + self.use_sigmoid = use_sigmoid + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + self.class_weight = class_weight + self.loss_weight = loss_weight + self._loss_name = loss_name + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + ignore_index=255, + **kwargs): + """Forward function. + + Args: + pred (torch.Tensor): The prediction with shape + (N, C) where C = number of classes, or + (N, C, d_1, d_2, ..., d_K) with K≥1 in the + case of K-dimensional loss. + target (torch.Tensor): The ground truth. If containing class + indices, shape (N) where each value is 0≤targets[i]≤C−1, + or (N, d_1, d_2, ..., d_K) with K≥1 in the case of + K-dimensional loss. If containing class probabilities, + same shape as the input. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to + average the loss. Defaults to None. + reduction_override (str, optional): The reduction method used + to override the original reduction method of the loss. + Options are "none", "mean" and "sum". + ignore_index (int, optional): The label index to be ignored. + Default: 255 + Returns: + torch.Tensor: The calculated loss + """ + assert isinstance(ignore_index, int), \ + 'ignore_index must be of type int' + assert reduction_override in (None, 'none', 'mean', 'sum'), \ + "AssertionError: reduction should be 'none', 'mean' or " \ + "'sum'" + assert pred.shape == target.shape or \ + (pred.size(0) == target.size(0) and + pred.shape[2:] == target.shape[1:]), \ + "The shape of pred doesn't match the shape of target" + + original_shape = pred.shape + + # [B, C, d_1, d_2, ..., d_k] -> [C, B, d_1, d_2, ..., d_k] + pred = pred.transpose(0, 1) + # [C, B, d_1, d_2, ..., d_k] -> [C, N] + pred = pred.reshape(pred.size(0), -1) + # [C, N] -> [N, C] + pred = pred.transpose(0, 1).contiguous() + + if original_shape == target.shape: + # target with shape [B, C, d_1, d_2, ...] + # transform it's shape into [N, C] + # [B, C, d_1, d_2, ...] -> [C, B, d_1, d_2, ..., d_k] + target = target.transpose(0, 1) + # [C, B, d_1, d_2, ..., d_k] -> [C, N] + target = target.reshape(target.size(0), -1) + # [C, N] -> [N, C] + target = target.transpose(0, 1).contiguous() + else: + # target with shape [B, d_1, d_2, ...] + # transform it's shape into [N, ] + target = target.view(-1).contiguous() + valid_mask = (target != ignore_index).view(-1, 1) + # avoid raising error when using F.one_hot() + target = torch.where(target == ignore_index, target.new_tensor(0), + target) + + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.use_sigmoid: + num_classes = pred.size(1) + if torch.cuda.is_available() and pred.is_cuda: + if target.dim() == 1: + one_hot_target = F.one_hot(target, num_classes=num_classes) + else: + one_hot_target = target + target = target.argmax(dim=1) + valid_mask = (target != ignore_index).view(-1, 1) + calculate_loss_func = sigmoid_focal_loss + else: + one_hot_target = None + if target.dim() == 1: + target = F.one_hot(target, num_classes=num_classes) + else: + valid_mask = (target.argmax(dim=1) != ignore_index).view( + -1, 1) + calculate_loss_func = py_sigmoid_focal_loss + + loss_cls = self.loss_weight * calculate_loss_func( + pred, + target, + one_hot_target, + weight, + gamma=self.gamma, + alpha=self.alpha, + class_weight=self.class_weight, + valid_mask=valid_mask, + reduction=reduction, + avg_factor=avg_factor) + + if reduction == 'none': + # [N, C] -> [C, N] + loss_cls = loss_cls.transpose(0, 1) + # [C, N] -> [C, B, d1, d2, ...] + # original_shape: [B, C, d1, d2, ...] + loss_cls = loss_cls.reshape(original_shape[1], + original_shape[0], + *original_shape[2:]) + # [C, B, d1, d2, ...] -> [B, C, d1, d2, ...] + loss_cls = loss_cls.transpose(0, 1).contiguous() + else: + raise NotImplementedError + return loss_cls + + @property + def loss_name(self): + """Loss Name. + + This function must be implemented and will return the name of this + loss function. This name will be used to combine different loss items + by simple sum operation. In addition, if you want this loss item to be + included into the backward graph, `loss_` must be the prefix of the + name. + Returns: + str: The name of this loss item. + """ + return self._loss_name diff --git a/downstream/mmsegmentation/mmseg/models/losses/lovasz_loss.py b/downstream/mmsegmentation/mmseg/models/losses/lovasz_loss.py new file mode 100644 index 0000000..2bb0fad --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/losses/lovasz_loss.py @@ -0,0 +1,323 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Modified from https://github.com/bermanmaxim/LovaszSoftmax/blob/master/pytor +ch/lovasz_losses.py Lovasz-Softmax and Jaccard hinge loss in PyTorch Maxim +Berman 2018 ESAT-PSI KU Leuven (MIT License)""" + +import mmcv +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import get_class_weight, weight_reduce_loss + + +def lovasz_grad(gt_sorted): + """Computes gradient of the Lovasz extension w.r.t sorted errors. + + See Alg. 1 in paper. + """ + p = len(gt_sorted) + gts = gt_sorted.sum() + intersection = gts - gt_sorted.float().cumsum(0) + union = gts + (1 - gt_sorted).float().cumsum(0) + jaccard = 1. - intersection / union + if p > 1: # cover 1-pixel case + jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] + return jaccard + + +def flatten_binary_logits(logits, labels, ignore_index=None): + """Flattens predictions in the batch (binary case) Remove labels equal to + 'ignore_index'.""" + logits = logits.view(-1) + labels = labels.view(-1) + if ignore_index is None: + return logits, labels + valid = (labels != ignore_index) + vlogits = logits[valid] + vlabels = labels[valid] + return vlogits, vlabels + + +def flatten_probs(probs, labels, ignore_index=None): + """Flattens predictions in the batch.""" + if probs.dim() == 3: + # assumes output of a sigmoid layer + B, H, W = probs.size() + probs = probs.view(B, 1, H, W) + B, C, H, W = probs.size() + probs = probs.permute(0, 2, 3, 1).contiguous().view(-1, C) # B*H*W, C=P,C + labels = labels.view(-1) + if ignore_index is None: + return probs, labels + valid = (labels != ignore_index) + vprobs = probs[valid.nonzero().squeeze()] + vlabels = labels[valid] + return vprobs, vlabels + + +def lovasz_hinge_flat(logits, labels): + """Binary Lovasz hinge loss. + + Args: + logits (torch.Tensor): [P], logits at each prediction + (between -infty and +infty). + labels (torch.Tensor): [P], binary ground truth labels (0 or 1). + + Returns: + torch.Tensor: The calculated loss. + """ + if len(labels) == 0: + # only void pixels, the gradients should be 0 + return logits.sum() * 0. + signs = 2. * labels.float() - 1. + errors = (1. - logits * signs) + errors_sorted, perm = torch.sort(errors, dim=0, descending=True) + perm = perm.data + gt_sorted = labels[perm] + grad = lovasz_grad(gt_sorted) + loss = torch.dot(F.relu(errors_sorted), grad) + return loss + + +def lovasz_hinge(logits, + labels, + classes='present', + per_image=False, + class_weight=None, + reduction='mean', + avg_factor=None, + ignore_index=255): + """Binary Lovasz hinge loss. + + Args: + logits (torch.Tensor): [B, H, W], logits at each pixel + (between -infty and +infty). + labels (torch.Tensor): [B, H, W], binary ground truth masks (0 or 1). + classes (str | list[int], optional): Placeholder, to be consistent with + other loss. Default: None. + per_image (bool, optional): If per_image is True, compute the loss per + image instead of per batch. Default: False. + class_weight (list[float], optional): Placeholder, to be consistent + with other loss. Default: None. + reduction (str, optional): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_image is True. Default: 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. This parameter only works when per_image is True. + Default: None. + ignore_index (int | None): The label index to be ignored. Default: 255. + + Returns: + torch.Tensor: The calculated loss. + """ + if per_image: + loss = [ + lovasz_hinge_flat(*flatten_binary_logits( + logit.unsqueeze(0), label.unsqueeze(0), ignore_index)) + for logit, label in zip(logits, labels) + ] + loss = weight_reduce_loss( + torch.stack(loss), None, reduction, avg_factor) + else: + loss = lovasz_hinge_flat( + *flatten_binary_logits(logits, labels, ignore_index)) + return loss + + +def lovasz_softmax_flat(probs, labels, classes='present', class_weight=None): + """Multi-class Lovasz-Softmax loss. + + Args: + probs (torch.Tensor): [P, C], class probabilities at each prediction + (between 0 and 1). + labels (torch.Tensor): [P], ground truth labels (between 0 and C - 1). + classes (str | list[int], optional): Classes chosen to calculate loss. + 'all' for all classes, 'present' for classes present in labels, or + a list of classes to average. Default: 'present'. + class_weight (list[float], optional): The weight for each class. + Default: None. + + Returns: + torch.Tensor: The calculated loss. + """ + if probs.numel() == 0: + # only void pixels, the gradients should be 0 + return probs * 0. + C = probs.size(1) + losses = [] + class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes + for c in class_to_sum: + fg = (labels == c).float() # foreground for class c + if (classes == 'present' and fg.sum() == 0): + continue + if C == 1: + if len(classes) > 1: + raise ValueError('Sigmoid output possible only with 1 class') + class_pred = probs[:, 0] + else: + class_pred = probs[:, c] + errors = (fg - class_pred).abs() + errors_sorted, perm = torch.sort(errors, 0, descending=True) + perm = perm.data + fg_sorted = fg[perm] + loss = torch.dot(errors_sorted, lovasz_grad(fg_sorted)) + if class_weight is not None: + loss *= class_weight[c] + losses.append(loss) + return torch.stack(losses).mean() + + +def lovasz_softmax(probs, + labels, + classes='present', + per_image=False, + class_weight=None, + reduction='mean', + avg_factor=None, + ignore_index=255): + """Multi-class Lovasz-Softmax loss. + + Args: + probs (torch.Tensor): [B, C, H, W], class probabilities at each + prediction (between 0 and 1). + labels (torch.Tensor): [B, H, W], ground truth labels (between 0 and + C - 1). + classes (str | list[int], optional): Classes chosen to calculate loss. + 'all' for all classes, 'present' for classes present in labels, or + a list of classes to average. Default: 'present'. + per_image (bool, optional): If per_image is True, compute the loss per + image instead of per batch. Default: False. + class_weight (list[float], optional): The weight for each class. + Default: None. + reduction (str, optional): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_image is True. Default: 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. This parameter only works when per_image is True. + Default: None. + ignore_index (int | None): The label index to be ignored. Default: 255. + + Returns: + torch.Tensor: The calculated loss. + """ + + if per_image: + loss = [ + lovasz_softmax_flat( + *flatten_probs( + prob.unsqueeze(0), label.unsqueeze(0), ignore_index), + classes=classes, + class_weight=class_weight) + for prob, label in zip(probs, labels) + ] + loss = weight_reduce_loss( + torch.stack(loss), None, reduction, avg_factor) + else: + loss = lovasz_softmax_flat( + *flatten_probs(probs, labels, ignore_index), + classes=classes, + class_weight=class_weight) + return loss + + +@LOSSES.register_module() +class LovaszLoss(nn.Module): + """LovaszLoss. + + This loss is proposed in `The Lovasz-Softmax loss: A tractable surrogate + for the optimization of the intersection-over-union measure in neural + networks `_. + + Args: + loss_type (str, optional): Binary or multi-class loss. + Default: 'multi_class'. Options are "binary" and "multi_class". + classes (str | list[int], optional): Classes chosen to calculate loss. + 'all' for all classes, 'present' for classes present in labels, or + a list of classes to average. Default: 'present'. + per_image (bool, optional): If per_image is True, compute the loss per + image instead of per batch. Default: False. + reduction (str, optional): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_image is True. Default: 'mean'. + class_weight (list[float] | str, optional): Weight of each class. If in + str format, read them from a file. Defaults to None. + loss_weight (float, optional): Weight of the loss. Defaults to 1.0. + loss_name (str, optional): Name of the loss item. If you want this loss + item to be included into the backward graph, `loss_` must be the + prefix of the name. Defaults to 'loss_lovasz'. + """ + + def __init__(self, + loss_type='multi_class', + classes='present', + per_image=False, + reduction='mean', + class_weight=None, + loss_weight=1.0, + loss_name='loss_lovasz'): + super(LovaszLoss, self).__init__() + assert loss_type in ('binary', 'multi_class'), "loss_type should be \ + 'binary' or 'multi_class'." + + if loss_type == 'binary': + self.cls_criterion = lovasz_hinge + else: + self.cls_criterion = lovasz_softmax + assert classes in ('all', 'present') or mmcv.is_list_of(classes, int) + if not per_image: + assert reduction == 'none', "reduction should be 'none' when \ + per_image is False." + + self.classes = classes + self.per_image = per_image + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = get_class_weight(class_weight) + self._loss_name = loss_name + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + """Forward function.""" + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.class_weight is not None: + class_weight = cls_score.new_tensor(self.class_weight) + else: + class_weight = None + + # if multi-class loss, transform logits to probs + if self.cls_criterion == lovasz_softmax: + cls_score = F.softmax(cls_score, dim=1) + + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + self.classes, + self.per_image, + class_weight=class_weight, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_cls + + @property + def loss_name(self): + """Loss Name. + + This function must be implemented and will return the name of this + loss function. This name will be used to combine different loss items + by simple sum operation. In addition, if you want this loss item to be + included into the backward graph, `loss_` must be the prefix of the + name. + Returns: + str: The name of this loss item. + """ + return self._loss_name diff --git a/downstream/mmsegmentation/mmseg/models/losses/utils.py b/downstream/mmsegmentation/mmseg/models/losses/utils.py new file mode 100644 index 0000000..621f57c --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/losses/utils.py @@ -0,0 +1,126 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools + +import mmcv +import numpy as np +import torch +import torch.nn.functional as F + + +def get_class_weight(class_weight): + """Get class weight for loss function. + + Args: + class_weight (list[float] | str | None): If class_weight is a str, + take it as a file name and read from it. + """ + if isinstance(class_weight, str): + # take it as a file path + if class_weight.endswith('.npy'): + class_weight = np.load(class_weight) + else: + # pkl, json or yaml + class_weight = mmcv.load(class_weight) + + return class_weight + + +def reduce_loss(loss, reduction): + """Reduce loss as specified. + + Args: + loss (Tensor): Elementwise loss tensor. + reduction (str): Options are "none", "mean" and "sum". + + Return: + Tensor: Reduced loss tensor. + """ + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, elementwise_mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() + + +def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): Element-wise loss. + weight (Tensor): Element-wise weights. + reduction (str): Same as built-in losses of PyTorch. + avg_factor (float): Average factor when computing the mean of losses. + + Returns: + Tensor: Processed loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + assert weight.dim() == loss.dim() + if weight.dim() > 1: + assert weight.size(1) == 1 or weight.size(1) == loss.size(1) + loss = loss * weight + + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + loss = reduce_loss(loss, reduction) + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + # Avoid causing ZeroDivisionError when avg_factor is 0.0, + # i.e., all labels of an image belong to ignore index. + eps = torch.finfo(torch.float32).eps + loss = loss.sum() / (avg_factor + eps) + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return loss + + +def weighted_loss(loss_func): + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + `loss_func(pred, target, **kwargs)`. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like `loss_func(pred, target, weight=None, reduction='mean', + avg_factor=None, **kwargs)`. + + :Example: + + >>> import torch + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, avg_factor=2) + tensor(1.5000) + """ + + @functools.wraps(loss_func) + def wrapper(pred, + target, + weight=None, + reduction='mean', + avg_factor=None, + **kwargs): + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + return wrapper diff --git a/downstream/mmsegmentation/mmseg/models/necks/__init__.py b/downstream/mmsegmentation/mmseg/models/necks/__init__.py new file mode 100644 index 0000000..ff03186 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/necks/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .featurepyramid import Feature2Pyramid +from .fpn import FPN +from .ic_neck import ICNeck +from .jpu import JPU +from .mla_neck import MLANeck +from .multilevel_neck import MultiLevelNeck + +__all__ = [ + 'FPN', 'MultiLevelNeck', 'MLANeck', 'ICNeck', 'JPU', 'Feature2Pyramid' +] diff --git a/downstream/mmsegmentation/mmseg/models/necks/featurepyramid.py b/downstream/mmsegmentation/mmseg/models/necks/featurepyramid.py new file mode 100644 index 0000000..82a00ce --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/necks/featurepyramid.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import build_norm_layer + +from ..builder import NECKS + + +@NECKS.register_module() +class Feature2Pyramid(nn.Module): + """Feature2Pyramid. + + A neck structure connect ViT backbone and decoder_heads. + + Args: + embed_dims (int): Embedding dimension. + rescales (list[float]): Different sampling multiples were + used to obtain pyramid features. Default: [4, 2, 1, 0.5]. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='SyncBN', requires_grad=True). + """ + + def __init__(self, + embed_dim, + rescales=[4, 2, 1, 0.5], + norm_cfg=dict(type='SyncBN', requires_grad=True)): + super(Feature2Pyramid, self).__init__() + self.rescales = rescales + self.upsample_4x = None + for k in self.rescales: + if k == 4: + self.upsample_4x = nn.Sequential( + nn.ConvTranspose2d( + embed_dim, embed_dim, kernel_size=2, stride=2), + build_norm_layer(norm_cfg, embed_dim)[1], + nn.GELU(), + nn.ConvTranspose2d( + embed_dim, embed_dim, kernel_size=2, stride=2), + ) + elif k == 2: + self.upsample_2x = nn.Sequential( + nn.ConvTranspose2d( + embed_dim, embed_dim, kernel_size=2, stride=2)) + elif k == 1: + self.identity = nn.Identity() + elif k == 0.5: + self.downsample_2x = nn.MaxPool2d(kernel_size=2, stride=2) + elif k == 0.25: + self.downsample_4x = nn.MaxPool2d(kernel_size=4, stride=4) + else: + raise KeyError(f'invalid {k} for feature2pyramid') + + def forward(self, inputs): + assert len(inputs) == len(self.rescales) + outputs = [] + if self.upsample_4x is not None: + ops = [ + self.upsample_4x, self.upsample_2x, self.identity, + self.downsample_2x + ] + else: + ops = [ + self.upsample_2x, self.identity, self.downsample_2x, + self.downsample_4x + ] + for i in range(len(inputs)): + outputs.append(ops[i](inputs[i])) + return tuple(outputs) diff --git a/downstream/mmsegmentation/mmseg/models/necks/fpn.py b/downstream/mmsegmentation/mmseg/models/necks/fpn.py new file mode 100644 index 0000000..6997de9 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/necks/fpn.py @@ -0,0 +1,213 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule, auto_fp16 + +from mmseg.ops import resize +from ..builder import NECKS + + +@NECKS.register_module() +class FPN(BaseModule): + """Feature Pyramid Network. + + This neck is the implementation of `Feature Pyramid Networks for Object + Detection `_. + + Args: + in_channels (list[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + num_outs (int): Number of output scales. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool | str): If bool, it decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, its actual mode is specified by `extra_convs_on_inputs`. + If str, it specifies the source feature map of the extra convs. + Only the following options are allowed + + - 'on_input': Last feat map of neck inputs (i.e. backbone feature). + - 'on_lateral': Last feature map after lateral convs. + - 'on_output': The last output feature map after fpn convs. + extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs + on the original feature from the backbone. If True, + it is equivalent to `add_extra_convs='on_input'`. If False, it is + equivalent to set `add_extra_convs='on_output'`. Default to True. + relu_before_extra_convs (bool): Whether to apply relu before the extra + conv. Default: False. + no_norm_on_lateral (bool): Whether to apply norm on lateral. + Default: False. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer in ConvModule. + Default: None. + upsample_cfg (dict): Config dict for interpolate layer. + Default: dict(mode='nearest'). + init_cfg (dict or list[dict], optional): Initialization config dict. + + Example: + >>> import torch + >>> in_channels = [2, 3, 5, 7] + >>> scales = [340, 170, 84, 43] + >>> inputs = [torch.rand(1, c, s, s) + ... for c, s in zip(in_channels, scales)] + >>> self = FPN(in_channels, 11, len(in_channels)).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 11, 340, 340]) + outputs[1].shape = torch.Size([1, 11, 170, 170]) + outputs[2].shape = torch.Size([1, 11, 84, 84]) + outputs[3].shape = torch.Size([1, 11, 43, 43]) + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + extra_convs_on_inputs=False, + relu_before_extra_convs=False, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + upsample_cfg=dict(mode='nearest'), + init_cfg=dict( + type='Xavier', layer='Conv2d', distribution='uniform')): + super(FPN, self).__init__(init_cfg) + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.relu_before_extra_convs = relu_before_extra_convs + self.no_norm_on_lateral = no_norm_on_lateral + self.fp16_enabled = False + self.upsample_cfg = upsample_cfg.copy() + + if end_level == -1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + assert isinstance(add_extra_convs, (str, bool)) + if isinstance(add_extra_convs, str): + # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' + assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') + elif add_extra_convs: # True + if extra_convs_on_inputs: + # For compatibility with previous release + # TODO: deprecate `extra_convs_on_inputs` + self.add_extra_convs = 'on_input' + else: + self.add_extra_convs = 'on_output' + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, + act_cfg=act_cfg, + inplace=False) + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if self.add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.add_extra_convs == 'on_input': + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + + @auto_fp16() + def forward(self, inputs): + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + # In some cases, fixing `scale factor` (e.g. 2) is preferred, but + # it cannot co-exist with `size` in `F.interpolate`. + if 'scale_factor' in self.upsample_cfg: + laterals[i - 1] = laterals[i - 1] + resize( + laterals[i], **self.upsample_cfg) + else: + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] = laterals[i - 1] + resize( + laterals[i], size=prev_shape, **self.upsample_cfg) + + # build outputs + # part 1: from original levels + outs = [ + self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) + ] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.add_extra_convs == 'on_input': + extra_source = inputs[self.backbone_end_level - 1] + elif self.add_extra_convs == 'on_lateral': + extra_source = laterals[-1] + elif self.add_extra_convs == 'on_output': + extra_source = outs[-1] + else: + raise NotImplementedError + outs.append(self.fpn_convs[used_backbone_levels](extra_source)) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return tuple(outs) diff --git a/downstream/mmsegmentation/mmseg/models/necks/ic_neck.py b/downstream/mmsegmentation/mmseg/models/necks/ic_neck.py new file mode 100644 index 0000000..a5d81ce --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/necks/ic_neck.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule + +from mmseg.ops import resize +from ..builder import NECKS + + +class CascadeFeatureFusion(BaseModule): + """Cascade Feature Fusion Unit in ICNet. + + Args: + low_channels (int): The number of input channels for + low resolution feature map. + high_channels (int): The number of input channels for + high resolution feature map. + out_channels (int): The number of output channels. + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN'). + act_cfg (dict): Dictionary to construct and config act layer. + Default: dict(type='ReLU'). + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + + Returns: + x (Tensor): The output tensor of shape (N, out_channels, H, W). + x_low (Tensor): The output tensor of shape (N, out_channels, H, W) + for Cascade Label Guidance in auxiliary heads. + """ + + def __init__(self, + low_channels, + high_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False, + init_cfg=None): + super(CascadeFeatureFusion, self).__init__(init_cfg=init_cfg) + self.align_corners = align_corners + self.conv_low = ConvModule( + low_channels, + out_channels, + 3, + padding=2, + dilation=2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv_high = ConvModule( + high_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x_low, x_high): + x_low = resize( + x_low, + size=x_high.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + # Note: Different from original paper, `x_low` is underwent + # `self.conv_low` rather than another 1x1 conv classifier + # before being used for auxiliary head. + x_low = self.conv_low(x_low) + x_high = self.conv_high(x_high) + x = x_low + x_high + x = F.relu(x, inplace=True) + return x, x_low + + +@NECKS.register_module() +class ICNeck(BaseModule): + """ICNet for Real-Time Semantic Segmentation on High-Resolution Images. + + This head is the implementation of `ICHead + `_. + + Args: + in_channels (int): The number of input image channels. Default: 3. + out_channels (int): The numbers of output feature channels. + Default: 128. + conv_cfg (dict): Dictionary to construct and config conv layer. + Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN'). + act_cfg (dict): Dictionary to construct and config act layer. + Default: dict(type='ReLU'). + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels=(64, 256, 256), + out_channels=128, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False, + init_cfg=None): + super(ICNeck, self).__init__(init_cfg=init_cfg) + assert len(in_channels) == 3, 'Length of input channels \ + must be 3!' + + self.in_channels = in_channels + self.out_channels = out_channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.align_corners = align_corners + self.cff_24 = CascadeFeatureFusion( + self.in_channels[2], + self.in_channels[1], + self.out_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=self.align_corners) + + self.cff_12 = CascadeFeatureFusion( + self.out_channels, + self.in_channels[0], + self.out_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=self.align_corners) + + def forward(self, inputs): + assert len(inputs) == 3, 'Length of input feature \ + maps must be 3!' + + x_sub1, x_sub2, x_sub4 = inputs + x_cff_24, x_24 = self.cff_24(x_sub4, x_sub2) + x_cff_12, x_12 = self.cff_12(x_cff_24, x_sub1) + # Note: `x_cff_12` is used for decode_head, + # `x_24` and `x_12` are used for auxiliary head. + return x_24, x_12, x_cff_12 diff --git a/downstream/mmsegmentation/mmseg/models/necks/jpu.py b/downstream/mmsegmentation/mmseg/models/necks/jpu.py new file mode 100644 index 0000000..3cc6b9f --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/necks/jpu.py @@ -0,0 +1,131 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmcv.runner import BaseModule + +from mmseg.ops import resize +from ..builder import NECKS + + +@NECKS.register_module() +class JPU(BaseModule): + """FastFCN: Rethinking Dilated Convolution in the Backbone + for Semantic Segmentation. + + This Joint Pyramid Upsampling (JPU) neck is the implementation of + `FastFCN `_. + + Args: + in_channels (Tuple[int], optional): The number of input channels + for each convolution operations before upsampling. + Default: (512, 1024, 2048). + mid_channels (int): The number of output channels of JPU. + Default: 512. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + dilations (tuple[int]): Dilation rate of each Depthwise + Separable ConvModule. Default: (1, 2, 4, 8). + align_corners (bool, optional): The align_corners argument of + resize operation. Default: False. + conv_cfg (dict | None): Config of conv layers. + Default: None. + norm_cfg (dict | None): Config of norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels=(512, 1024, 2048), + mid_channels=512, + start_level=0, + end_level=-1, + dilations=(1, 2, 4, 8), + align_corners=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(JPU, self).__init__(init_cfg=init_cfg) + assert isinstance(in_channels, tuple) + assert isinstance(dilations, tuple) + self.in_channels = in_channels + self.mid_channels = mid_channels + self.start_level = start_level + self.num_ins = len(in_channels) + if end_level == -1: + self.backbone_end_level = self.num_ins + else: + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + + self.dilations = dilations + self.align_corners = align_corners + + self.conv_layers = nn.ModuleList() + self.dilation_layers = nn.ModuleList() + for i in range(self.start_level, self.backbone_end_level): + conv_layer = nn.Sequential( + ConvModule( + self.in_channels[i], + self.mid_channels, + kernel_size=3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.conv_layers.append(conv_layer) + for i in range(len(dilations)): + dilation_layer = nn.Sequential( + DepthwiseSeparableConvModule( + in_channels=(self.backbone_end_level - self.start_level) * + self.mid_channels, + out_channels=self.mid_channels, + kernel_size=3, + stride=1, + padding=dilations[i], + dilation=dilations[i], + dw_norm_cfg=norm_cfg, + dw_act_cfg=None, + pw_norm_cfg=norm_cfg, + pw_act_cfg=act_cfg)) + self.dilation_layers.append(dilation_layer) + + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.in_channels), 'Length of inputs must \ + be the same with self.in_channels!' + + feats = [ + self.conv_layers[i - self.start_level](inputs[i]) + for i in range(self.start_level, self.backbone_end_level) + ] + + h, w = feats[0].shape[2:] + for i in range(1, len(feats)): + feats[i] = resize( + feats[i], + size=(h, w), + mode='bilinear', + align_corners=self.align_corners) + + feat = torch.cat(feats, dim=1) + concat_feat = torch.cat([ + self.dilation_layers[i](feat) for i in range(len(self.dilations)) + ], + dim=1) + + outs = [] + + # Default: outs[2] is the output of JPU for decoder head, outs[1] is + # the feature map from backbone for auxiliary head. Additionally, + # outs[0] can also be used for auxiliary head. + for i in range(self.start_level, self.backbone_end_level - 1): + outs.append(inputs[i]) + outs.append(concat_feat) + return tuple(outs) diff --git a/downstream/mmsegmentation/mmseg/models/necks/mla_neck.py b/downstream/mmsegmentation/mmseg/models/necks/mla_neck.py new file mode 100644 index 0000000..1513e29 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/necks/mla_neck.py @@ -0,0 +1,118 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule, build_norm_layer + +from ..builder import NECKS + + +class MLAModule(nn.Module): + + def __init__(self, + in_channels=[1024, 1024, 1024, 1024], + out_channels=256, + norm_cfg=None, + act_cfg=None): + super(MLAModule, self).__init__() + self.channel_proj = nn.ModuleList() + for i in range(len(in_channels)): + self.channel_proj.append( + ConvModule( + in_channels=in_channels[i], + out_channels=out_channels, + kernel_size=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.feat_extract = nn.ModuleList() + for i in range(len(in_channels)): + self.feat_extract.append( + ConvModule( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, inputs): + + # feat_list -> [p2, p3, p4, p5] + feat_list = [] + for x, conv in zip(inputs, self.channel_proj): + feat_list.append(conv(x)) + + # feat_list -> [p5, p4, p3, p2] + # mid_list -> [m5, m4, m3, m2] + feat_list = feat_list[::-1] + mid_list = [] + for feat in feat_list: + if len(mid_list) == 0: + mid_list.append(feat) + else: + mid_list.append(mid_list[-1] + feat) + + # mid_list -> [m5, m4, m3, m2] + # out_list -> [o2, o3, o4, o5] + out_list = [] + for mid, conv in zip(mid_list, self.feat_extract): + out_list.append(conv(mid)) + + return tuple(out_list) + + +@NECKS.register_module() +class MLANeck(nn.Module): + """Multi-level Feature Aggregation. + + This neck is `The Multi-level Feature Aggregation construction of + SETR `_. + + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + norm_layer (dict): Config dict for input normalization. + Default: norm_layer=dict(type='LN', eps=1e-6, requires_grad=True). + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer in ConvModule. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + norm_layer=dict(type='LN', eps=1e-6, requires_grad=True), + norm_cfg=None, + act_cfg=None): + super(MLANeck, self).__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + + # In order to build general vision transformer backbone, we have to + # move MLA to neck. + self.norm = nn.ModuleList([ + build_norm_layer(norm_layer, in_channels[i])[1] + for i in range(len(in_channels)) + ]) + + self.mla = MLAModule( + in_channels=in_channels, + out_channels=out_channels, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, inputs): + assert len(inputs) == len(self.in_channels) + + # Convert from nchw to nlc + outs = [] + for i in range(len(inputs)): + x = inputs[i] + n, c, h, w = x.shape + x = x.reshape(n, c, h * w).transpose(2, 1).contiguous() + x = self.norm[i](x) + x = x.transpose(1, 2).reshape(n, c, h, w).contiguous() + outs.append(x) + + outs = self.mla(outs) + return tuple(outs) diff --git a/downstream/mmsegmentation/mmseg/models/necks/multilevel_neck.py b/downstream/mmsegmentation/mmseg/models/necks/multilevel_neck.py new file mode 100644 index 0000000..9ac9627 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/necks/multilevel_neck.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule, xavier_init + +from mmseg.ops import resize +from ..builder import NECKS + + +@NECKS.register_module() +class MultiLevelNeck(nn.Module): + """MultiLevelNeck. + + A neck structure connect vit backbone and decoder_heads. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + scales (List[float]): Scale factors for each input feature map. + Default: [0.5, 1, 2, 4] + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer in ConvModule. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + scales=[0.5, 1, 2, 4], + norm_cfg=None, + act_cfg=None): + super(MultiLevelNeck, self).__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.scales = scales + self.num_outs = len(scales) + self.lateral_convs = nn.ModuleList() + self.convs = nn.ModuleList() + for in_channel in in_channels: + self.lateral_convs.append( + ConvModule( + in_channel, + out_channels, + kernel_size=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + for _ in range(self.num_outs): + self.convs.append( + ConvModule( + out_channels, + out_channels, + kernel_size=3, + padding=1, + stride=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + # default init_weights for conv(msra) and norm in ConvModule + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + + def forward(self, inputs): + # assert len(inputs) == len(self.in_channels) + if len(inputs) == 1: + inputs = [ + lateral_conv(inputs[0]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + else: + inputs = [ + lateral_conv(inputs[i]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + outs = [] + for i in range(self.num_outs): + x_resize = resize( + inputs[i], scale_factor=self.scales[i], mode='bilinear') + outs.append(self.convs[i](x_resize)) + return tuple(outs) diff --git a/downstream/mmsegmentation/mmseg/models/segmentors/__init__.py b/downstream/mmsegmentation/mmseg/models/segmentors/__init__.py new file mode 100644 index 0000000..387c858 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/segmentors/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base import BaseSegmentor +from .cascade_encoder_decoder import CascadeEncoderDecoder +from .encoder_decoder import EncoderDecoder + +__all__ = ['BaseSegmentor', 'EncoderDecoder', 'CascadeEncoderDecoder'] diff --git a/downstream/mmsegmentation/mmseg/models/segmentors/base.py b/downstream/mmsegmentation/mmseg/models/segmentors/base.py new file mode 100644 index 0000000..9b22a7c --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/segmentors/base.py @@ -0,0 +1,286 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from abc import ABCMeta, abstractmethod +from collections import OrderedDict + +import mmcv +import numpy as np +import torch +import torch.distributed as dist +from mmcv.runner import BaseModule, auto_fp16 + + +class BaseSegmentor(BaseModule, metaclass=ABCMeta): + """Base class for segmentors.""" + + def __init__(self, init_cfg=None): + super(BaseSegmentor, self).__init__(init_cfg) + self.fp16_enabled = False + + @property + def with_neck(self): + """bool: whether the segmentor has neck""" + return hasattr(self, 'neck') and self.neck is not None + + @property + def with_auxiliary_head(self): + """bool: whether the segmentor has auxiliary head""" + return hasattr(self, + 'auxiliary_head') and self.auxiliary_head is not None + + @property + def with_decode_head(self): + """bool: whether the segmentor has decode head""" + return hasattr(self, 'decode_head') and self.decode_head is not None + + @abstractmethod + def extract_feat(self, imgs): + """Placeholder for extract features from images.""" + pass + + @abstractmethod + def encode_decode(self, img, img_metas): + """Placeholder for encode images with backbone and decode into a + semantic segmentation map of the same size as input.""" + pass + + @abstractmethod + def forward_train(self, imgs, img_metas, **kwargs): + """Placeholder for Forward function for training.""" + pass + + @abstractmethod + def simple_test(self, img, img_meta, **kwargs): + """Placeholder for single image test.""" + pass + + @abstractmethod + def aug_test(self, imgs, img_metas, **kwargs): + """Placeholder for augmentation test.""" + pass + + def forward_test(self, imgs, img_metas, **kwargs): + """ + Args: + imgs (List[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains all images in the batch. + img_metas (List[List[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. + """ + for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError(f'{name} must be a list, but got ' + f'{type(var)}') + + num_augs = len(imgs) + if num_augs != len(img_metas): + raise ValueError(f'num of augmentations ({len(imgs)}) != ' + f'num of image meta ({len(img_metas)})') + # all images in the same aug batch all of the same ori_shape and pad + # shape + for img_meta in img_metas: + ori_shapes = [_['ori_shape'] for _ in img_meta] + assert all(shape == ori_shapes[0] for shape in ori_shapes) + img_shapes = [_['img_shape'] for _ in img_meta] + assert all(shape == img_shapes[0] for shape in img_shapes) + pad_shapes = [_['pad_shape'] for _ in img_meta] + assert all(shape == pad_shapes[0] for shape in pad_shapes) + + if num_augs == 1: + return self.simple_test(imgs[0], img_metas[0], **kwargs) + else: + return self.aug_test(imgs, img_metas, **kwargs) + + @auto_fp16(apply_to=('img', )) + def forward(self, img, img_metas, return_loss=True, **kwargs): + """Calls either :func:`forward_train` or :func:`forward_test` depending + on whether ``return_loss`` is ``True``. + + Note this setting will change the expected inputs. When + ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor + and List[dict]), and when ``resturn_loss=False``, img and img_meta + should be double nested (i.e. List[Tensor], List[List[dict]]), with + the outer list indicating test time augmentations. + """ + if return_loss: + return self.forward_train(img, img_metas, **kwargs) + else: + return self.forward_test(img, img_metas, **kwargs) + + def train_step(self, data_batch, optimizer, **kwargs): + """The iteration step during training. + + This method defines an iteration step during training, except for the + back propagation and optimizer updating, which are done in an optimizer + hook. Note that in some complicated cases or models, the whole process + including back propagation and optimizer updating is also defined in + this method, such as GAN. + + Args: + data (dict): The output of dataloader. + optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of + runner is passed to ``train_step()``. This argument is unused + and reserved. + + Returns: + dict: It should contain at least 3 keys: ``loss``, ``log_vars``, + ``num_samples``. + ``loss`` is a tensor for back propagation, which can be a + weighted sum of multiple losses. + ``log_vars`` contains all the variables to be sent to the + logger. + ``num_samples`` indicates the batch size (when the model is + DDP, it means the batch size on each GPU), which is used for + averaging the logs. + """ + losses = self(**data_batch) + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, + log_vars=log_vars, + num_samples=len(data_batch['img_metas'])) + + return outputs + + def val_step(self, data_batch, optimizer=None, **kwargs): + """The iteration step during validation. + + This method shares the same signature as :func:`train_step`, but used + during val epochs. Note that the evaluation after training epochs is + not implemented with this method, but an evaluation hook. + """ + losses = self(**data_batch) + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, + log_vars=log_vars, + num_samples=len(data_batch['img_metas'])) + + return outputs + + @staticmethod + def _parse_losses(losses): + """Parse the raw outputs (losses) of the network. + + Args: + losses (dict): Raw output of the network, which usually contain + losses and other necessary information. + + Returns: + tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor + which may be a weighted sum of all losses, log_vars contains + all the variables to be sent to the logger. + """ + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + else: + raise TypeError( + f'{loss_name} is not a tensor or list of tensors') + + loss = sum(_value for _key, _value in log_vars.items() + if 'loss' in _key) + + # If the loss_vars has different length, raise assertion error + # to prevent GPUs from infinite waiting. + if dist.is_available() and dist.is_initialized(): + log_var_length = torch.tensor(len(log_vars), device=loss.device) + dist.all_reduce(log_var_length) + message = (f'rank {dist.get_rank()}' + + f' len(log_vars): {len(log_vars)}' + ' keys: ' + + ','.join(log_vars.keys()) + '\n') + assert log_var_length == len(log_vars) * dist.get_world_size(), \ + 'loss log variables are different across GPUs!\n' + message + + log_vars['loss'] = loss + for loss_name, loss_value in log_vars.items(): + # reduce loss when distributed training + if dist.is_available() and dist.is_initialized(): + loss_value = loss_value.data.clone() + dist.all_reduce(loss_value.div_(dist.get_world_size())) + log_vars[loss_name] = loss_value.item() + + return loss, log_vars + + def show_result(self, + img, + result, + palette=None, + win_name='', + show=False, + wait_time=0, + out_file=None, + opacity=0.5): + """Draw `result` over `img`. + + Args: + img (str or Tensor): The image to be displayed. + result (Tensor): The semantic segmentation results to draw over + `img`. + palette (list[list[int]]] | np.ndarray | None): The palette of + segmentation map. If None is given, random palette will be + generated. Default: None + win_name (str): The window name. + wait_time (int): Value of waitKey param. + Default: 0. + show (bool): Whether to show the image. + Default: False. + out_file (str or None): The filename to write the image. + Default: None. + opacity(float): Opacity of painted segmentation map. + Default 0.5. + Must be in (0, 1] range. + Returns: + img (Tensor): Only if not `show` or `out_file` + """ + img = mmcv.imread(img) + img = img.copy() + seg = result[0] + if palette is None: + if self.PALETTE is None: + # Get random state before set seed, + # and restore random state later. + # It will prevent loss of randomness, as the palette + # may be different in each iteration if not specified. + # See: https://github.com/open-mmlab/mmdetection/issues/5844 + state = np.random.get_state() + np.random.seed(42) + # random palette + palette = np.random.randint( + 0, 255, size=(len(self.CLASSES), 3)) + np.random.set_state(state) + else: + palette = self.PALETTE + palette = np.array(palette) + assert palette.shape[0] == len(self.CLASSES) + assert palette.shape[1] == 3 + assert len(palette.shape) == 2 + assert 0 < opacity <= 1.0 + color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) + for label, color in enumerate(palette): + color_seg[seg == label, :] = color + # convert to BGR + color_seg = color_seg[..., ::-1] + + img = img * (1 - opacity) + color_seg * opacity + img = img.astype(np.uint8) + # if out_file specified, do not show image in window + if out_file is not None: + show = False + + if show: + mmcv.imshow(img, win_name, wait_time) + if out_file is not None: + mmcv.imwrite(img, out_file) + + if not (show or out_file): + warnings.warn('show==False and out_file is not specified, only ' + 'result image will be returned') + return img diff --git a/downstream/mmsegmentation/mmseg/models/segmentors/cascade_encoder_decoder.py b/downstream/mmsegmentation/mmseg/models/segmentors/cascade_encoder_decoder.py new file mode 100644 index 0000000..1913a22 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/segmentors/cascade_encoder_decoder.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from torch import nn + +from mmseg.core import add_prefix +from mmseg.ops import resize +from .. import builder +from ..builder import SEGMENTORS +from .encoder_decoder import EncoderDecoder + + +@SEGMENTORS.register_module() +class CascadeEncoderDecoder(EncoderDecoder): + """Cascade Encoder Decoder segmentors. + + CascadeEncoderDecoder almost the same as EncoderDecoder, while decoders of + CascadeEncoderDecoder are cascaded. The output of previous decoder_head + will be the input of next decoder_head. + """ + + def __init__(self, + num_stages, + backbone, + decode_head, + neck=None, + auxiliary_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + self.num_stages = num_stages + super(CascadeEncoderDecoder, self).__init__( + backbone=backbone, + decode_head=decode_head, + neck=neck, + auxiliary_head=auxiliary_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + init_cfg=init_cfg) + + def _init_decode_head(self, decode_head): + """Initialize ``decode_head``""" + assert isinstance(decode_head, list) + assert len(decode_head) == self.num_stages + self.decode_head = nn.ModuleList() + for i in range(self.num_stages): + self.decode_head.append(builder.build_head(decode_head[i])) + self.align_corners = self.decode_head[-1].align_corners + self.num_classes = self.decode_head[-1].num_classes + + def encode_decode(self, img, img_metas): + """Encode images with backbone and decode into a semantic segmentation + map of the same size as input.""" + x = self.extract_feat(img) + out = self.decode_head[0].forward_test(x, img_metas, self.test_cfg) + for i in range(1, self.num_stages): + out = self.decode_head[i].forward_test(x, out, img_metas, + self.test_cfg) + out = resize( + input=out, + size=img.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + return out + + def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg): + """Run forward function and calculate loss for decode head in + training.""" + losses = dict() + + loss_decode = self.decode_head[0].forward_train( + x, img_metas, gt_semantic_seg, self.train_cfg) + + losses.update(add_prefix(loss_decode, 'decode_0')) + + for i in range(1, self.num_stages): + # forward test again, maybe unnecessary for most methods. + if i == 1: + prev_outputs = self.decode_head[0].forward_test( + x, img_metas, self.test_cfg) + else: + prev_outputs = self.decode_head[i - 1].forward_test( + x, prev_outputs, img_metas, self.test_cfg) + loss_decode = self.decode_head[i].forward_train( + x, prev_outputs, img_metas, gt_semantic_seg, self.train_cfg) + losses.update(add_prefix(loss_decode, f'decode_{i}')) + + return losses diff --git a/downstream/mmsegmentation/mmseg/models/segmentors/encoder_decoder.py b/downstream/mmsegmentation/mmseg/models/segmentors/encoder_decoder.py new file mode 100644 index 0000000..72467b4 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/segmentors/encoder_decoder.py @@ -0,0 +1,284 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmseg.core import add_prefix +from mmseg.ops import resize +from .. import builder +from ..builder import SEGMENTORS +from .base import BaseSegmentor + + +@SEGMENTORS.register_module() +class EncoderDecoder(BaseSegmentor): + """Encoder Decoder segmentors. + + EncoderDecoder typically consists of backbone, decode_head, auxiliary_head. + Note that auxiliary_head is only used for deep supervision during training, + which could be dumped during inference. + """ + + def __init__(self, + backbone, + decode_head, + neck=None, + auxiliary_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(EncoderDecoder, self).__init__(init_cfg) + if pretrained is not None: + assert backbone.get('pretrained') is None, \ + 'both backbone and segmentor set pretrained weight' + backbone.pretrained = pretrained + self.backbone = builder.build_backbone(backbone) + if neck is not None: + self.neck = builder.build_neck(neck) + self._init_decode_head(decode_head) + self._init_auxiliary_head(auxiliary_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + assert self.with_decode_head + + def _init_decode_head(self, decode_head): + """Initialize ``decode_head``""" + self.decode_head = builder.build_head(decode_head) + self.align_corners = self.decode_head.align_corners + self.num_classes = self.decode_head.num_classes + + def _init_auxiliary_head(self, auxiliary_head): + """Initialize ``auxiliary_head``""" + if auxiliary_head is not None: + if isinstance(auxiliary_head, list): + self.auxiliary_head = nn.ModuleList() + for head_cfg in auxiliary_head: + self.auxiliary_head.append(builder.build_head(head_cfg)) + else: + self.auxiliary_head = builder.build_head(auxiliary_head) + + def extract_feat(self, img): + """Extract features from images.""" + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def encode_decode(self, img, img_metas): + """Encode images with backbone and decode into a semantic segmentation + map of the same size as input.""" + x = self.extract_feat(img) + out = self._decode_head_forward_test(x, img_metas) + out = resize( + input=out, + size=img.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + return out + + def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg): + """Run forward function and calculate loss for decode head in + training.""" + losses = dict() + loss_decode = self.decode_head.forward_train(x, img_metas, + gt_semantic_seg, + self.train_cfg) + + losses.update(add_prefix(loss_decode, 'decode')) + return losses + + def _decode_head_forward_test(self, x, img_metas): + """Run forward function and calculate loss for decode head in + inference.""" + seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg) + return seg_logits + + def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg): + """Run forward function and calculate loss for auxiliary head in + training.""" + losses = dict() + if isinstance(self.auxiliary_head, nn.ModuleList): + for idx, aux_head in enumerate(self.auxiliary_head): + loss_aux = aux_head.forward_train(x, img_metas, + gt_semantic_seg, + self.train_cfg) + losses.update(add_prefix(loss_aux, f'aux_{idx}')) + else: + loss_aux = self.auxiliary_head.forward_train( + x, img_metas, gt_semantic_seg, self.train_cfg) + losses.update(add_prefix(loss_aux, 'aux')) + + return losses + + def forward_dummy(self, img): + """Dummy forward function.""" + seg_logit = self.encode_decode(img, None) + + return seg_logit + + def forward_train(self, img, img_metas, gt_semantic_seg): + """Forward function for training. + + Args: + img (Tensor): Input images. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + gt_semantic_seg (Tensor): Semantic segmentation masks + used if the architecture supports semantic segmentation task. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + + x = self.extract_feat(img) + + losses = dict() + + loss_decode = self._decode_head_forward_train(x, img_metas, + gt_semantic_seg) + losses.update(loss_decode) + + if self.with_auxiliary_head: + loss_aux = self._auxiliary_head_forward_train( + x, img_metas, gt_semantic_seg) + losses.update(loss_aux) + + return losses + + # TODO refactor + def slide_inference(self, img, img_meta, rescale): + """Inference by sliding-window with overlap. + + If h_crop > h_img or w_crop > w_img, the small patch will be used to + decode without padding. + """ + + h_stride, w_stride = self.test_cfg.stride + h_crop, w_crop = self.test_cfg.crop_size + batch_size, _, h_img, w_img = img.size() + num_classes = self.num_classes + h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1 + w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1 + preds = img.new_zeros((batch_size, num_classes, h_img, w_img)) + count_mat = img.new_zeros((batch_size, 1, h_img, w_img)) + for h_idx in range(h_grids): + for w_idx in range(w_grids): + y1 = h_idx * h_stride + x1 = w_idx * w_stride + y2 = min(y1 + h_crop, h_img) + x2 = min(x1 + w_crop, w_img) + y1 = max(y2 - h_crop, 0) + x1 = max(x2 - w_crop, 0) + crop_img = img[:, :, y1:y2, x1:x2] + crop_seg_logit = self.encode_decode(crop_img, img_meta) + preds += F.pad(crop_seg_logit, + (int(x1), int(preds.shape[3] - x2), int(y1), + int(preds.shape[2] - y2))) + + count_mat[:, :, y1:y2, x1:x2] += 1 + assert (count_mat == 0).sum() == 0 + if torch.onnx.is_in_onnx_export(): + # cast count_mat to constant while exporting to ONNX + count_mat = torch.from_numpy( + count_mat.cpu().detach().numpy()).to(device=img.device) + preds = preds / count_mat + if rescale: + preds = resize( + preds, + size=img_meta[0]['ori_shape'][:2], + mode='bilinear', + align_corners=self.align_corners, + warning=False) + return preds + + def whole_inference(self, img, img_meta, rescale): + """Inference with full image.""" + + seg_logit = self.encode_decode(img, img_meta) + if rescale: + # support dynamic shape for onnx + if torch.onnx.is_in_onnx_export(): + size = img.shape[2:] + else: + size = img_meta[0]['ori_shape'][:2] + seg_logit = resize( + seg_logit, + size=size, + mode='bilinear', + align_corners=self.align_corners, + warning=False) + + return seg_logit + + def inference(self, img, img_meta, rescale): + """Inference with slide/whole style. + + Args: + img (Tensor): The input image of shape (N, 3, H, W). + img_meta (dict): Image info dict where each dict has: 'img_shape', + 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + rescale (bool): Whether rescale back to original shape. + + Returns: + Tensor: The output segmentation map. + """ + + assert self.test_cfg.mode in ['slide', 'whole'] + ori_shape = img_meta[0]['ori_shape'] + assert all(_['ori_shape'] == ori_shape for _ in img_meta) + if self.test_cfg.mode == 'slide': + seg_logit = self.slide_inference(img, img_meta, rescale) + else: + seg_logit = self.whole_inference(img, img_meta, rescale) + output = F.softmax(seg_logit, dim=1) + flip = img_meta[0]['flip'] + if flip: + flip_direction = img_meta[0]['flip_direction'] + assert flip_direction in ['horizontal', 'vertical'] + if flip_direction == 'horizontal': + output = output.flip(dims=(3, )) + elif flip_direction == 'vertical': + output = output.flip(dims=(2, )) + + return output + + def simple_test(self, img, img_meta, rescale=True): + """Simple test with single image.""" + seg_logit = self.inference(img, img_meta, rescale) + seg_pred = seg_logit.argmax(dim=1) + if torch.onnx.is_in_onnx_export(): + # our inference backend only support 4D output + seg_pred = seg_pred.unsqueeze(0) + return seg_pred + seg_pred = seg_pred.cpu().numpy() + # unravel batch dim + seg_pred = list(seg_pred) + return seg_pred + + def aug_test(self, imgs, img_metas, rescale=True): + """Test with augmentations. + + Only rescale=True is supported. + """ + # aug_test rescale all imgs back to ori_shape for now + assert rescale + # to save memory, we get augmented seg logit inplace + seg_logit = self.inference(imgs[0], img_metas[0], rescale) + for i in range(1, len(imgs)): + cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale) + seg_logit += cur_seg_logit + seg_logit /= len(imgs) + seg_pred = seg_logit.argmax(dim=1) + seg_pred = seg_pred.cpu().numpy() + # unravel batch dim + seg_pred = list(seg_pred) + return seg_pred diff --git a/downstream/mmsegmentation/mmseg/models/utils/__init__.py b/downstream/mmsegmentation/mmseg/models/utils/__init__.py new file mode 100644 index 0000000..6d83290 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/utils/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .embed import PatchEmbed +from .inverted_residual import InvertedResidual, InvertedResidualV3 +from .make_divisible import make_divisible +from .res_layer import ResLayer +from .se_layer import SELayer +from .self_attention_block import SelfAttentionBlock +from .shape_convert import (nchw2nlc2nchw, nchw_to_nlc, nlc2nchw2nlc, + nlc_to_nchw) +from .up_conv_block import UpConvBlock + +__all__ = [ + 'ResLayer', 'SelfAttentionBlock', 'make_divisible', 'InvertedResidual', + 'UpConvBlock', 'InvertedResidualV3', 'SELayer', 'PatchEmbed', + 'nchw_to_nlc', 'nlc_to_nchw', 'nchw2nlc2nchw', 'nlc2nchw2nlc' +] diff --git a/downstream/mmsegmentation/mmseg/models/utils/embed.py b/downstream/mmsegmentation/mmseg/models/utils/embed.py new file mode 100644 index 0000000..1515675 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/utils/embed.py @@ -0,0 +1,330 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Sequence + +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.runner.base_module import BaseModule +from mmcv.utils import to_2tuple + + +class AdaptivePadding(nn.Module): + """Applies padding to input (if needed) so that input can get fully covered + by filter you specified. It support two modes "same" and "corner". The + "same" mode is same with "SAME" padding mode in TensorFlow, pad zero around + input. The "corner" mode would pad zero to bottom right. + + Args: + kernel_size (int | tuple): Size of the kernel: + stride (int | tuple): Stride of the filter. Default: 1: + dilation (int | tuple): Spacing between kernel elements. + Default: 1. + padding (str): Support "same" and "corner", "corner" mode + would pad zero to bottom right, and "same" mode would + pad zero around input. Default: "corner". + Example: + >>> kernel_size = 16 + >>> stride = 16 + >>> dilation = 1 + >>> input = torch.rand(1, 1, 15, 17) + >>> adap_pad = AdaptivePadding( + >>> kernel_size=kernel_size, + >>> stride=stride, + >>> dilation=dilation, + >>> padding="corner") + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + >>> input = torch.rand(1, 1, 16, 17) + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + """ + + def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): + + super(AdaptivePadding, self).__init__() + + assert padding in ('same', 'corner') + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + self.padding = padding + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + + def get_pad_shape(self, input_shape): + input_h, input_w = input_shape + kernel_h, kernel_w = self.kernel_size + stride_h, stride_w = self.stride + output_h = math.ceil(input_h / stride_h) + output_w = math.ceil(input_w / stride_w) + pad_h = max((output_h - 1) * stride_h + + (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0) + pad_w = max((output_w - 1) * stride_w + + (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0) + return pad_h, pad_w + + def forward(self, x): + pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) + if pad_h > 0 or pad_w > 0: + if self.padding == 'corner': + x = F.pad(x, [0, pad_w, 0, pad_h]) + elif self.padding == 'same': + x = F.pad(x, [ + pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2 + ]) + return x + + +class PatchEmbed(BaseModule): + """Image to Patch Embedding. + + We use a conv layer to implement PatchEmbed. + + Args: + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_type (str): The config dict for embedding + conv layer type selection. Default: "Conv2d". + kernel_size (int): The kernel_size of embedding conv. Default: 16. + stride (int, optional): The slide stride of embedding conv. + Default: None (Would be set as `kernel_size`). + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int): The dilation rate of embedding conv. Default: 1. + bias (bool): Bias of embed conv. Default: True. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + input_size (int | tuple | None): The size of input, which will be + used to calculate the out size. Only work when `dynamic_size` + is False. Default: None. + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, + in_channels=3, + embed_dims=768, + conv_type='Conv2d', + kernel_size=16, + stride=None, + padding='corner', + dilation=1, + bias=True, + norm_cfg=None, + input_size=None, + init_cfg=None): + super(PatchEmbed, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + if stride is None: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of conv + padding = 0 + else: + self.adap_padding = None + padding = to_2tuple(padding) + + self.projection = build_conv_layer( + dict(type=conv_type), + in_channels=in_channels, + out_channels=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = None + + if input_size: + input_size = to_2tuple(input_size) + # `init_out_size` would be used outside to + # calculate the num_patches + # when `use_abs_pos_embed` outside + self.init_input_size = input_size + if self.adap_padding: + pad_h, pad_w = self.adap_padding.get_pad_shape(input_size) + input_h, input_w = input_size + input_h = input_h + pad_h + input_w = input_w + pad_w + input_size = (input_h, input_w) + + # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html + h_out = (input_size[0] + 2 * padding[0] - dilation[0] * + (kernel_size[0] - 1) - 1) // stride[0] + 1 + w_out = (input_size[1] + 2 * padding[1] - dilation[1] * + (kernel_size[1] - 1) - 1) // stride[1] + 1 + self.init_out_size = (h_out, w_out) + else: + self.init_input_size = None + self.init_out_size = None + + def forward(self, x): + """ + Args: + x (Tensor): Has shape (B, C, H, W). In most case, C is 3. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, out_h * out_w, embed_dims) + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_h, out_w). + """ + + if self.adap_padding: + x = self.adap_padding(x) + + x = self.projection(x) + out_size = (x.shape[2], x.shape[3]) + x = x.flatten(2).transpose(1, 2) + if self.norm is not None: + x = self.norm(x) + return x, out_size + + +class PatchMerging(BaseModule): + """Merge patch feature map. + + This layer groups feature map by kernel_size, and applies norm and linear + layers to the grouped feature map. Our implementation uses `nn.Unfold` to + merge patch, which is about 25% faster than original implementation. + Instead, we need to modify pretrained models for compatibility. + + Args: + in_channels (int): The num of input channels. + out_channels (int): The num of output channels. + kernel_size (int | tuple, optional): the kernel size in the unfold + layer. Defaults to 2. + stride (int | tuple, optional): the stride of the sliding blocks in the + unfold layer. Default: None. (Would be set as `kernel_size`) + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int | tuple, optional): dilation parameter in the unfold + layer. Default: 1. + bias (bool, optional): Whether to add bias in linear layer or not. + Defaults: False. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: dict(type='LN'). + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=2, + stride=None, + padding='corner', + dilation=1, + bias=False, + norm_cfg=dict(type='LN'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + if stride: + stride = stride + else: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of unfold + padding = 0 + else: + self.adap_padding = None + + padding = to_2tuple(padding) + self.sampler = nn.Unfold( + kernel_size=kernel_size, + dilation=dilation, + padding=padding, + stride=stride) + + sample_dim = kernel_size[0] * kernel_size[1] * in_channels + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, sample_dim)[1] + else: + self.norm = None + + self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) + + def forward(self, x, input_size): + """ + Args: + x (Tensor): Has shape (B, H*W, C_in). + input_size (tuple[int]): The spatial shape of x, arrange as (H, W). + Default: None. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) + - out_size (tuple[int]): Spatial shape of x, arrange as + (Merged_H, Merged_W). + """ + B, L, C = x.shape + assert isinstance(input_size, Sequence), f'Expect ' \ + f'input_size is ' \ + f'`Sequence` ' \ + f'but get {input_size}' + + H, W = input_size + assert L == H * W, 'input feature has wrong size' + + x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W + # Use nn.Unfold to merge patch. About 25% faster than original method, + # but need to modify pretrained model for compatibility + + if self.adap_padding: + x = self.adap_padding(x) + H, W = x.shape[-2:] + + x = self.sampler(x) + # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) + + out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * + (self.sampler.kernel_size[0] - 1) - + 1) // self.sampler.stride[0] + 1 + out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * + (self.sampler.kernel_size[1] - 1) - + 1) // self.sampler.stride[1] + 1 + + output_size = (out_h, out_w) + x = x.transpose(1, 2) # B, H/2*W/2, 4*C + x = self.norm(x) if self.norm else x + x = self.reduction(x) + return x, output_size diff --git a/downstream/mmsegmentation/mmseg/models/utils/inverted_residual.py b/downstream/mmsegmentation/mmseg/models/utils/inverted_residual.py new file mode 100644 index 0000000..c9cda76 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/utils/inverted_residual.py @@ -0,0 +1,213 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import ConvModule +from torch import nn +from torch.utils import checkpoint as cp + +from .se_layer import SELayer + + +class InvertedResidual(nn.Module): + """InvertedResidual block for MobileNetV2. + + Args: + in_channels (int): The input channels of the InvertedResidual block. + out_channels (int): The output channels of the InvertedResidual block. + stride (int): Stride of the middle (first) 3x3 convolution. + expand_ratio (int): Adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + dilation (int): Dilation rate of depthwise conv. Default: 1 + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + stride, + expand_ratio, + dilation=1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + with_cp=False, + **kwargs): + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2], f'stride must in [1, 2]. ' \ + f'But received {stride}.' + self.with_cp = with_cp + self.use_res_connect = self.stride == 1 and in_channels == out_channels + hidden_dim = int(round(in_channels * expand_ratio)) + + layers = [] + if expand_ratio != 1: + layers.append( + ConvModule( + in_channels=in_channels, + out_channels=hidden_dim, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + **kwargs)) + layers.extend([ + ConvModule( + in_channels=hidden_dim, + out_channels=hidden_dim, + kernel_size=3, + stride=stride, + padding=dilation, + dilation=dilation, + groups=hidden_dim, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + **kwargs), + ConvModule( + in_channels=hidden_dim, + out_channels=out_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None, + **kwargs) + ]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + + def _inner_forward(x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class InvertedResidualV3(nn.Module): + """Inverted Residual Block for MobileNetV3. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + mid_channels (int): The input channels of the depthwise convolution. + kernel_size (int): The kernel size of the depthwise convolution. + Default: 3. + stride (int): The stride of the depthwise convolution. Default: 1. + se_cfg (dict): Config dict for se layer. Default: None, which means no + se layer. + with_expand_conv (bool): Use expand conv or not. If set False, + mid_channels must be the same with in_channels. Default: True. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels, + kernel_size=3, + stride=1, + se_cfg=None, + with_expand_conv=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False): + super(InvertedResidualV3, self).__init__() + self.with_res_shortcut = (stride == 1 and in_channels == out_channels) + assert stride in [1, 2] + self.with_cp = with_cp + self.with_se = se_cfg is not None + self.with_expand_conv = with_expand_conv + + if self.with_se: + assert isinstance(se_cfg, dict) + if not self.with_expand_conv: + assert mid_channels == in_channels + + if self.with_expand_conv: + self.expand_conv = ConvModule( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.depthwise_conv = ConvModule( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + padding=kernel_size // 2, + groups=mid_channels, + conv_cfg=dict( + type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + if self.with_se: + self.se = SELayer(**se_cfg) + + self.linear_conv = ConvModule( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x): + + def _inner_forward(x): + out = x + + if self.with_expand_conv: + out = self.expand_conv(out) + + out = self.depthwise_conv(out) + + if self.with_se: + out = self.se(out) + + out = self.linear_conv(out) + + if self.with_res_shortcut: + return x + out + else: + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out diff --git a/downstream/mmsegmentation/mmseg/models/utils/make_divisible.py b/downstream/mmsegmentation/mmseg/models/utils/make_divisible.py new file mode 100644 index 0000000..ed42c2e --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/utils/make_divisible.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +def make_divisible(value, divisor, min_value=None, min_ratio=0.9): + """Make divisible function. + + This function rounds the channel number to the nearest value that can be + divisible by the divisor. It is taken from the original tf repo. It ensures + that all layers have a channel number that is divisible by divisor. It can + be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa + + Args: + value (int): The original channel number. + divisor (int): The divisor to fully divide the channel number. + min_value (int): The minimum value of the output channel. + Default: None, means that the minimum value equal to the divisor. + min_ratio (float): The minimum ratio of the rounded channel number to + the original channel number. Default: 0.9. + + Returns: + int: The modified output channel number. + """ + + if min_value is None: + min_value = divisor + new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than (1-min_ratio). + if new_value < min_ratio * value: + new_value += divisor + return new_value diff --git a/downstream/mmsegmentation/mmseg/models/utils/res_layer.py b/downstream/mmsegmentation/mmseg/models/utils/res_layer.py new file mode 100644 index 0000000..190a0c5 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/utils/res_layer.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.runner import Sequential +from torch import nn as nn + + +class ResLayer(Sequential): + """ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): block used to build ResLayer. + inplanes (int): inplanes of block. + planes (int): planes of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + multi_grid (int | None): Multi grid dilation rates of last + stage. Default: None + contract_dilation (bool): Whether contract first dilation of each layer + Default: False + """ + + def __init__(self, + block, + inplanes, + planes, + num_blocks, + stride=1, + dilation=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + multi_grid=None, + contract_dilation=False, + **kwargs): + self.block = block + + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = [] + conv_stride = stride + if avg_down: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + if multi_grid is None: + if dilation > 1 and contract_dilation: + first_dilation = dilation // 2 + else: + first_dilation = dilation + else: + first_dilation = multi_grid[0] + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + dilation=first_dilation, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + inplanes = planes * block.expansion + for i in range(1, num_blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + dilation=dilation if multi_grid is None else multi_grid[i], + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + super(ResLayer, self).__init__(*layers) diff --git a/downstream/mmsegmentation/mmseg/models/utils/se_layer.py b/downstream/mmsegmentation/mmseg/models/utils/se_layer.py new file mode 100644 index 0000000..16f52aa --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/utils/se_layer.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch.nn as nn +from mmcv.cnn import ConvModule + +from .make_divisible import make_divisible + + +class SELayer(nn.Module): + """Squeeze-and-Excitation Module. + + Args: + channels (int): The input (and output) channels of the SE layer. + ratio (int): Squeeze ratio in SELayer, the intermediate channel will be + ``int(channels/ratio)``. Default: 16. + conv_cfg (None or dict): Config dict for convolution layer. + Default: None, which means using conv2d. + act_cfg (dict or Sequence[dict]): Config dict for activation layer. + If act_cfg is a dict, two activation layers will be configured + by this dict. If act_cfg is a sequence of dicts, the first + activation layer will be configured by the first dict and the + second activation layer will be configured by the second dict. + Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, + divisor=6.0)). + """ + + def __init__(self, + channels, + ratio=16, + conv_cfg=None, + act_cfg=(dict(type='ReLU'), + dict(type='HSigmoid', bias=3.0, divisor=6.0))): + super(SELayer, self).__init__() + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert mmcv.is_tuple_of(act_cfg, dict) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.conv1 = ConvModule( + in_channels=channels, + out_channels=make_divisible(channels // ratio, 8), + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=make_divisible(channels // ratio, 8), + out_channels=channels, + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + out = self.global_avgpool(x) + out = self.conv1(out) + out = self.conv2(out) + return x * out diff --git a/downstream/mmsegmentation/mmseg/models/utils/self_attention_block.py b/downstream/mmsegmentation/mmseg/models/utils/self_attention_block.py new file mode 100644 index 0000000..c945fa7 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/utils/self_attention_block.py @@ -0,0 +1,160 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.cnn import ConvModule, constant_init +from torch import nn as nn +from torch.nn import functional as F + + +class SelfAttentionBlock(nn.Module): + """General self-attention block/non-local block. + + Please refer to https://arxiv.org/abs/1706.03762 for details about key, + query and value. + + Args: + key_in_channels (int): Input channels of key feature. + query_in_channels (int): Input channels of query feature. + channels (int): Output channels of key/query transform. + out_channels (int): Output channels. + share_key_query (bool): Whether share projection weight between key + and query projection. + query_downsample (nn.Module): Query downsample module. + key_downsample (nn.Module): Key downsample module. + key_query_num_convs (int): Number of convs for key/query projection. + value_num_convs (int): Number of convs for value projection. + matmul_norm (bool): Whether normalize attention map with sqrt of + channels + with_out (bool): Whether use out projection. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict|None): Config of activation layers. + """ + + def __init__(self, key_in_channels, query_in_channels, channels, + out_channels, share_key_query, query_downsample, + key_downsample, key_query_num_convs, value_out_num_convs, + key_query_norm, value_out_norm, matmul_norm, with_out, + conv_cfg, norm_cfg, act_cfg): + super(SelfAttentionBlock, self).__init__() + if share_key_query: + assert key_in_channels == query_in_channels + self.key_in_channels = key_in_channels + self.query_in_channels = query_in_channels + self.out_channels = out_channels + self.channels = channels + self.share_key_query = share_key_query + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.key_project = self.build_project( + key_in_channels, + channels, + num_convs=key_query_num_convs, + use_conv_module=key_query_norm, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + if share_key_query: + self.query_project = self.key_project + else: + self.query_project = self.build_project( + query_in_channels, + channels, + num_convs=key_query_num_convs, + use_conv_module=key_query_norm, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.value_project = self.build_project( + key_in_channels, + channels if with_out else out_channels, + num_convs=value_out_num_convs, + use_conv_module=value_out_norm, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + if with_out: + self.out_project = self.build_project( + channels, + out_channels, + num_convs=value_out_num_convs, + use_conv_module=value_out_norm, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + self.out_project = None + + self.query_downsample = query_downsample + self.key_downsample = key_downsample + self.matmul_norm = matmul_norm + + self.init_weights() + + def init_weights(self): + """Initialize weight of later layer.""" + if self.out_project is not None: + if not isinstance(self.out_project, ConvModule): + constant_init(self.out_project, 0) + + def build_project(self, in_channels, channels, num_convs, use_conv_module, + conv_cfg, norm_cfg, act_cfg): + """Build projection layer for key/query/value/out.""" + if use_conv_module: + convs = [ + ConvModule( + in_channels, + channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + ] + for _ in range(num_convs - 1): + convs.append( + ConvModule( + channels, + channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + else: + convs = [nn.Conv2d(in_channels, channels, 1)] + for _ in range(num_convs - 1): + convs.append(nn.Conv2d(channels, channels, 1)) + if len(convs) > 1: + convs = nn.Sequential(*convs) + else: + convs = convs[0] + return convs + + def forward(self, query_feats, key_feats): + """Forward function.""" + batch_size = query_feats.size(0) + query = self.query_project(query_feats) + if self.query_downsample is not None: + query = self.query_downsample(query) + query = query.reshape(*query.shape[:2], -1) + query = query.permute(0, 2, 1).contiguous() + + key = self.key_project(key_feats) + value = self.value_project(key_feats) + if self.key_downsample is not None: + key = self.key_downsample(key) + value = self.key_downsample(value) + key = key.reshape(*key.shape[:2], -1) + value = value.reshape(*value.shape[:2], -1) + value = value.permute(0, 2, 1).contiguous() + + sim_map = torch.matmul(query, key) + if self.matmul_norm: + sim_map = (self.channels**-.5) * sim_map + sim_map = F.softmax(sim_map, dim=-1) + + context = torch.matmul(sim_map, value) + context = context.permute(0, 2, 1).contiguous() + context = context.reshape(batch_size, -1, *query_feats.shape[2:]) + if self.out_project is not None: + context = self.out_project(context) + return context diff --git a/downstream/mmsegmentation/mmseg/models/utils/shape_convert.py b/downstream/mmsegmentation/mmseg/models/utils/shape_convert.py new file mode 100644 index 0000000..cce1e22 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/utils/shape_convert.py @@ -0,0 +1,107 @@ +# Copyright (c) OpenMMLab. All rights reserved. +def nlc_to_nchw(x, hw_shape): + """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, L, C] before conversion. + hw_shape (Sequence[int]): The height and width of output feature map. + + Returns: + Tensor: The output tensor of shape [N, C, H, W] after conversion. + """ + H, W = hw_shape + assert len(x.shape) == 3 + B, L, C = x.shape + assert L == H * W, 'The seq_len doesn\'t match H, W' + return x.transpose(1, 2).reshape(B, C, H, W) + + +def nchw_to_nlc(x): + """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, C, H, W] before conversion. + + Returns: + Tensor: The output tensor of shape [N, L, C] after conversion. + """ + assert len(x.shape) == 4 + return x.flatten(2).transpose(1, 2).contiguous() + + +def nchw2nlc2nchw(module, x, contiguous=False, **kwargs): + """Flatten [N, C, H, W] shape tensor `x` to [N, L, C] shape tensor. Use the + reshaped tensor as the input of `module`, and the convert the output of + `module`, whose shape is. + + [N, L, C], to [N, C, H, W]. + + Args: + module (Callable): A callable object the takes a tensor + with shape [N, L, C] as input. + x (Tensor): The input tensor of shape [N, C, H, W]. + contiguous: + contiguous (Bool): Whether to make the tensor contiguous + after each shape transform. + + Returns: + Tensor: The output tensor of shape [N, C, H, W]. + + Example: + >>> import torch + >>> import torch.nn as nn + >>> norm = nn.LayerNorm(4) + >>> feature_map = torch.rand(4, 4, 5, 5) + >>> output = nchw2nlc2nchw(norm, feature_map) + """ + B, C, H, W = x.shape + if not contiguous: + x = x.flatten(2).transpose(1, 2) + x = module(x, **kwargs) + x = x.transpose(1, 2).reshape(B, C, H, W) + else: + x = x.flatten(2).transpose(1, 2).contiguous() + x = module(x, **kwargs) + x = x.transpose(1, 2).reshape(B, C, H, W).contiguous() + return x + + +def nlc2nchw2nlc(module, x, hw_shape, contiguous=False, **kwargs): + """Convert [N, L, C] shape tensor `x` to [N, C, H, W] shape tensor. Use the + reshaped tensor as the input of `module`, and convert the output of + `module`, whose shape is. + + [N, C, H, W], to [N, L, C]. + + Args: + module (Callable): A callable object the takes a tensor + with shape [N, C, H, W] as input. + x (Tensor): The input tensor of shape [N, L, C]. + hw_shape: (Sequence[int]): The height and width of the + feature map with shape [N, C, H, W]. + contiguous (Bool): Whether to make the tensor contiguous + after each shape transform. + + Returns: + Tensor: The output tensor of shape [N, L, C]. + + Example: + >>> import torch + >>> import torch.nn as nn + >>> conv = nn.Conv2d(16, 16, 3, 1, 1) + >>> feature_map = torch.rand(4, 25, 16) + >>> output = nlc2nchw2nlc(conv, feature_map, (5, 5)) + """ + H, W = hw_shape + assert len(x.shape) == 3 + B, L, C = x.shape + assert L == H * W, 'The seq_len doesn\'t match H, W' + if not contiguous: + x = x.transpose(1, 2).reshape(B, C, H, W) + x = module(x, **kwargs) + x = x.flatten(2).transpose(1, 2) + else: + x = x.transpose(1, 2).reshape(B, C, H, W).contiguous() + x = module(x, **kwargs) + x = x.flatten(2).transpose(1, 2).contiguous() + return x diff --git a/downstream/mmsegmentation/mmseg/models/utils/up_conv_block.py b/downstream/mmsegmentation/mmseg/models/utils/up_conv_block.py new file mode 100644 index 0000000..d8396d9 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/models/utils/up_conv_block.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, build_upsample_layer + + +class UpConvBlock(nn.Module): + """Upsample convolution block in decoder for UNet. + + This upsample convolution block consists of one upsample module + followed by one convolution block. The upsample module expands the + high-level low-resolution feature map and the convolution block fuses + the upsampled high-level low-resolution feature map and the low-level + high-resolution feature map from encoder. + + Args: + conv_block (nn.Sequential): Sequential of convolutional layers. + in_channels (int): Number of input channels of the high-level + skip_channels (int): Number of input channels of the low-level + high-resolution feature map from encoder. + out_channels (int): Number of output channels. + num_convs (int): Number of convolutional layers in the conv_block. + Default: 2. + stride (int): Stride of convolutional layer in conv_block. Default: 1. + dilation (int): Dilation rate of convolutional layer in conv_block. + Default: 1. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + upsample_cfg (dict): The upsample config of the upsample module in + decoder. Default: dict(type='InterpConv'). If the size of + high-level feature map is the same as that of skip feature map + (low-level feature map from encoder), it does not need upsample the + high-level feature map and the upsample_cfg is None. + dcn (bool): Use deformable convolution in convolutional layer or not. + Default: None. + plugins (dict): plugins for convolutional layers. Default: None. + """ + + def __init__(self, + conv_block, + in_channels, + skip_channels, + out_channels, + num_convs=2, + stride=1, + dilation=1, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + dcn=None, + plugins=None): + super(UpConvBlock, self).__init__() + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.conv_block = conv_block( + in_channels=2 * skip_channels, + out_channels=out_channels, + num_convs=num_convs, + stride=stride, + dilation=dilation, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dcn=None, + plugins=None) + if upsample_cfg is not None: + self.upsample = build_upsample_layer( + cfg=upsample_cfg, + in_channels=in_channels, + out_channels=skip_channels, + with_cp=with_cp, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + self.upsample = ConvModule( + in_channels, + skip_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, skip, x): + """Forward function.""" + + x = self.upsample(x) + out = torch.cat([skip, x], dim=1) + out = self.conv_block(out) + + return out diff --git a/downstream/mmsegmentation/mmseg/ops/__init__.py b/downstream/mmsegmentation/mmseg/ops/__init__.py new file mode 100644 index 0000000..bc075cd --- /dev/null +++ b/downstream/mmsegmentation/mmseg/ops/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .encoding import Encoding +from .wrappers import Upsample, resize + +__all__ = ['Upsample', 'resize', 'Encoding'] diff --git a/downstream/mmsegmentation/mmseg/ops/encoding.py b/downstream/mmsegmentation/mmseg/ops/encoding.py new file mode 100644 index 0000000..f397cc5 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/ops/encoding.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import nn +from torch.nn import functional as F + + +class Encoding(nn.Module): + """Encoding Layer: a learnable residual encoder. + + Input is of shape (batch_size, channels, height, width). + Output is of shape (batch_size, num_codes, channels). + + Args: + channels: dimension of the features or feature channels + num_codes: number of code words + """ + + def __init__(self, channels, num_codes): + super(Encoding, self).__init__() + # init codewords and smoothing factor + self.channels, self.num_codes = channels, num_codes + std = 1. / ((num_codes * channels)**0.5) + # [num_codes, channels] + self.codewords = nn.Parameter( + torch.empty(num_codes, channels, + dtype=torch.float).uniform_(-std, std), + requires_grad=True) + # [num_codes] + self.scale = nn.Parameter( + torch.empty(num_codes, dtype=torch.float).uniform_(-1, 0), + requires_grad=True) + + @staticmethod + def scaled_l2(x, codewords, scale): + num_codes, channels = codewords.size() + batch_size = x.size(0) + reshaped_scale = scale.view((1, 1, num_codes)) + expanded_x = x.unsqueeze(2).expand( + (batch_size, x.size(1), num_codes, channels)) + reshaped_codewords = codewords.view((1, 1, num_codes, channels)) + + scaled_l2_norm = reshaped_scale * ( + expanded_x - reshaped_codewords).pow(2).sum(dim=3) + return scaled_l2_norm + + @staticmethod + def aggregate(assignment_weights, x, codewords): + num_codes, channels = codewords.size() + reshaped_codewords = codewords.view((1, 1, num_codes, channels)) + batch_size = x.size(0) + + expanded_x = x.unsqueeze(2).expand( + (batch_size, x.size(1), num_codes, channels)) + encoded_feat = (assignment_weights.unsqueeze(3) * + (expanded_x - reshaped_codewords)).sum(dim=1) + return encoded_feat + + def forward(self, x): + assert x.dim() == 4 and x.size(1) == self.channels + # [batch_size, channels, height, width] + batch_size = x.size(0) + # [batch_size, height x width, channels] + x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous() + # assignment_weights: [batch_size, channels, num_codes] + assignment_weights = F.softmax( + self.scaled_l2(x, self.codewords, self.scale), dim=2) + # aggregate + encoded_feat = self.aggregate(assignment_weights, x, self.codewords) + return encoded_feat + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(Nx{self.channels}xHxW =>Nx{self.num_codes}' \ + f'x{self.channels})' + return repr_str diff --git a/downstream/mmsegmentation/mmseg/ops/wrappers.py b/downstream/mmsegmentation/mmseg/ops/wrappers.py new file mode 100644 index 0000000..ce67e4b --- /dev/null +++ b/downstream/mmsegmentation/mmseg/ops/wrappers.py @@ -0,0 +1,51 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn +import torch.nn.functional as F + + +def resize(input, + size=None, + scale_factor=None, + mode='nearest', + align_corners=None, + warning=True): + if warning: + if size is not None and align_corners: + input_h, input_w = tuple(int(x) for x in input.shape[2:]) + output_h, output_w = tuple(int(x) for x in size) + if output_h > input_h or output_w > output_h: + if ((output_h > 1 and output_w > 1 and input_h > 1 + and input_w > 1) and (output_h - 1) % (input_h - 1) + and (output_w - 1) % (input_w - 1)): + warnings.warn( + f'When align_corners={align_corners}, ' + 'the output would more aligned if ' + f'input size {(input_h, input_w)} is `x+1` and ' + f'out size {(output_h, output_w)} is `nx+1`') + return F.interpolate(input, size, scale_factor, mode, align_corners) + + +class Upsample(nn.Module): + + def __init__(self, + size=None, + scale_factor=None, + mode='nearest', + align_corners=None): + super(Upsample, self).__init__() + self.size = size + if isinstance(scale_factor, tuple): + self.scale_factor = tuple(float(factor) for factor in scale_factor) + else: + self.scale_factor = float(scale_factor) if scale_factor else None + self.mode = mode + self.align_corners = align_corners + + def forward(self, x): + if not self.size: + size = [int(t * self.scale_factor) for t in x.shape[-2:]] + else: + size = self.size + return resize(x, size, None, self.mode, self.align_corners) diff --git a/downstream/mmsegmentation/mmseg/utils/__init__.py b/downstream/mmsegmentation/mmseg/utils/__init__.py new file mode 100644 index 0000000..ed002c7 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/utils/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .collect_env import collect_env +from .logger import get_root_logger +from .misc import find_latest_checkpoint +from .set_env import setup_multi_processes + +__all__ = [ + 'get_root_logger', 'collect_env', 'find_latest_checkpoint', + 'setup_multi_processes' +] diff --git a/downstream/mmsegmentation/mmseg/utils/collect_env.py b/downstream/mmsegmentation/mmseg/utils/collect_env.py new file mode 100644 index 0000000..3379ecb --- /dev/null +++ b/downstream/mmsegmentation/mmseg/utils/collect_env.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.utils import collect_env as collect_base_env +from mmcv.utils import get_git_hash + +import mmseg + + +def collect_env(): + """Collect the information of the running environments.""" + env_info = collect_base_env() + env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}' + + return env_info + + +if __name__ == '__main__': + for name, val in collect_env().items(): + print('{}: {}'.format(name, val)) diff --git a/downstream/mmsegmentation/mmseg/utils/logger.py b/downstream/mmsegmentation/mmseg/utils/logger.py new file mode 100644 index 0000000..0cb3c78 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/utils/logger.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging + +from mmcv.utils import get_logger + + +def get_root_logger(log_file=None, log_level=logging.INFO): + """Get the root logger. + + The logger will be initialized if it has not been initialized. By default a + StreamHandler will be added. If `log_file` is specified, a FileHandler will + also be added. The name of the root logger is the top-level package name, + e.g., "mmseg". + + Args: + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the root logger. + log_level (int): The root logger level. Note that only the process of + rank 0 is affected, while other processes will set the level to + "Error" and be silent most of the time. + + Returns: + logging.Logger: The root logger. + """ + + logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level) + + return logger diff --git a/downstream/mmsegmentation/mmseg/utils/misc.py b/downstream/mmsegmentation/mmseg/utils/misc.py new file mode 100644 index 0000000..bd1b6b1 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/utils/misc.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import glob +import os.path as osp +import warnings + + +def find_latest_checkpoint(path, suffix='pth'): + """This function is for finding the latest checkpoint. + + It will be used when automatically resume, modified from + https://github.com/open-mmlab/mmdetection/blob/dev-v2.20.0/mmdet/utils/misc.py + + Args: + path (str): The path to find checkpoints. + suffix (str): File extension for the checkpoint. Defaults to pth. + + Returns: + latest_path(str | None): File path of the latest checkpoint. + """ + if not osp.exists(path): + warnings.warn("The path of the checkpoints doesn't exist.") + return None + if osp.exists(osp.join(path, f'latest.{suffix}')): + return osp.join(path, f'latest.{suffix}') + + checkpoints = glob.glob(osp.join(path, f'*.{suffix}')) + if len(checkpoints) == 0: + warnings.warn('The are no checkpoints in the path') + return None + latest = -1 + latest_path = '' + for checkpoint in checkpoints: + if len(checkpoint) < len(latest_path): + continue + # `count` is iteration number, as checkpoints are saved as + # 'iter_xx.pth' or 'epoch_xx.pth' and xx is iteration number. + count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0]) + if count > latest: + latest = count + latest_path = checkpoint + return latest_path diff --git a/downstream/mmsegmentation/mmseg/utils/set_env.py b/downstream/mmsegmentation/mmseg/utils/set_env.py new file mode 100644 index 0000000..b2d3aaf --- /dev/null +++ b/downstream/mmsegmentation/mmseg/utils/set_env.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import platform + +import cv2 +import torch.multiprocessing as mp + +from ..utils import get_root_logger + + +def setup_multi_processes(cfg): + """Setup multi-processing environment variables.""" + logger = get_root_logger() + + # set multi-process start method + if platform.system() != 'Windows': + mp_start_method = cfg.get('mp_start_method', None) + current_method = mp.get_start_method(allow_none=True) + if mp_start_method in ('fork', 'spawn', 'forkserver'): + logger.info( + f'Multi-processing start method `{mp_start_method}` is ' + f'different from the previous setting `{current_method}`.' + f'It will be force set to `{mp_start_method}`.') + mp.set_start_method(mp_start_method, force=True) + else: + logger.info( + f'Multi-processing start method is `{mp_start_method}`') + + # disable opencv multithreading to avoid system being overloaded + opencv_num_threads = cfg.get('opencv_num_threads', None) + if isinstance(opencv_num_threads, int): + logger.info(f'OpenCV num_threads is `{opencv_num_threads}`') + cv2.setNumThreads(opencv_num_threads) + else: + logger.info(f'OpenCV num_threads is `{cv2.getNumThreads}') + + if cfg.data.workers_per_gpu > 1: + # setup OMP threads + # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa + omp_num_threads = cfg.get('omp_num_threads', None) + if 'OMP_NUM_THREADS' not in os.environ: + if isinstance(omp_num_threads, int): + logger.info(f'OMP num threads is {omp_num_threads}') + os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) + else: + logger.info(f'OMP num threads is {os.environ["OMP_NUM_THREADS"] }') + + # setup MKL threads + if 'MKL_NUM_THREADS' not in os.environ: + mkl_num_threads = cfg.get('mkl_num_threads', None) + if isinstance(mkl_num_threads, int): + logger.info(f'MKL num threads is {mkl_num_threads}') + os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) + else: + logger.info(f'MKL num threads is {os.environ["MKL_NUM_THREADS"]}') diff --git a/downstream/mmsegmentation/mmseg/version.py b/downstream/mmsegmentation/mmseg/version.py new file mode 100644 index 0000000..85c6bf4 --- /dev/null +++ b/downstream/mmsegmentation/mmseg/version.py @@ -0,0 +1,18 @@ +# Copyright (c) Open-MMLab. All rights reserved. + +__version__ = '0.23.0' + + +def parse_version_info(version_str): + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__) diff --git a/downstream/mmsegmentation/model-index.yml b/downstream/mmsegmentation/model-index.yml new file mode 100644 index 0000000..d8e9516 --- /dev/null +++ b/downstream/mmsegmentation/model-index.yml @@ -0,0 +1,44 @@ +Import: +- configs/ann/ann.yml +- configs/apcnet/apcnet.yml +- configs/beit/beit.yml +- configs/bisenetv1/bisenetv1.yml +- configs/bisenetv2/bisenetv2.yml +- configs/ccnet/ccnet.yml +- configs/cgnet/cgnet.yml +- configs/convnext/convnext.yml +- configs/danet/danet.yml +- configs/deeplabv3/deeplabv3.yml +- configs/deeplabv3plus/deeplabv3plus.yml +- configs/dmnet/dmnet.yml +- configs/dnlnet/dnlnet.yml +- configs/dpt/dpt.yml +- configs/emanet/emanet.yml +- configs/encnet/encnet.yml +- configs/erfnet/erfnet.yml +- configs/fastfcn/fastfcn.yml +- configs/fastscnn/fastscnn.yml +- configs/fcn/fcn.yml +- configs/gcnet/gcnet.yml +- configs/hrnet/hrnet.yml +- configs/icnet/icnet.yml +- configs/isanet/isanet.yml +- configs/knet/knet.yml +- configs/mobilenet_v2/mobilenet_v2.yml +- configs/mobilenet_v3/mobilenet_v3.yml +- configs/nonlocal_net/nonlocal_net.yml +- configs/ocrnet/ocrnet.yml +- configs/point_rend/point_rend.yml +- configs/psanet/psanet.yml +- configs/pspnet/pspnet.yml +- configs/resnest/resnest.yml +- configs/segformer/segformer.yml +- configs/segmenter/segmenter.yml +- configs/sem_fpn/sem_fpn.yml +- configs/setr/setr.yml +- configs/stdc/stdc.yml +- configs/swin/swin.yml +- configs/twins/twins.yml +- configs/unet/unet.yml +- configs/upernet/upernet.yml +- configs/vit/vit.yml diff --git a/downstream/mmsegmentation/pytest.ini b/downstream/mmsegmentation/pytest.ini new file mode 100644 index 0000000..9796e87 --- /dev/null +++ b/downstream/mmsegmentation/pytest.ini @@ -0,0 +1,7 @@ +[pytest] +addopts = --xdoctest --xdoctest-style=auto +norecursedirs = .git ignore build __pycache__ data docker docs .eggs + +filterwarnings= default + ignore:.*No cfgstr given in Cacher constructor or call.*:Warning + ignore:.*Define the __nice__ method for.*:Warning diff --git a/downstream/mmsegmentation/requirements.txt b/downstream/mmsegmentation/requirements.txt new file mode 100644 index 0000000..6da5ade --- /dev/null +++ b/downstream/mmsegmentation/requirements.txt @@ -0,0 +1,3 @@ +-r requirements/optional.txt +-r requirements/runtime.txt +-r requirements/tests.txt diff --git a/downstream/mmsegmentation/requirements/docs.txt b/downstream/mmsegmentation/requirements/docs.txt new file mode 100644 index 0000000..2017084 --- /dev/null +++ b/downstream/mmsegmentation/requirements/docs.txt @@ -0,0 +1,6 @@ +docutils==0.16.0 +myst-parser +-e git+https://github.com/gaotongxiao/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme +sphinx==4.0.2 +sphinx_copybutton +sphinx_markdown_tables diff --git a/downstream/mmsegmentation/requirements/mminstall.txt b/downstream/mmsegmentation/requirements/mminstall.txt new file mode 100644 index 0000000..131b2b8 --- /dev/null +++ b/downstream/mmsegmentation/requirements/mminstall.txt @@ -0,0 +1,2 @@ +mmcls>=0.20.1 +mmcv-full>=1.4.4,<=1.5.0 diff --git a/downstream/mmsegmentation/requirements/optional.txt b/downstream/mmsegmentation/requirements/optional.txt new file mode 100644 index 0000000..47fa593 --- /dev/null +++ b/downstream/mmsegmentation/requirements/optional.txt @@ -0,0 +1 @@ +cityscapesscripts diff --git a/downstream/mmsegmentation/requirements/readthedocs.txt b/downstream/mmsegmentation/requirements/readthedocs.txt new file mode 100644 index 0000000..22a894b --- /dev/null +++ b/downstream/mmsegmentation/requirements/readthedocs.txt @@ -0,0 +1,4 @@ +mmcv +prettytable +torch +torchvision diff --git a/downstream/mmsegmentation/requirements/runtime.txt b/downstream/mmsegmentation/requirements/runtime.txt new file mode 100644 index 0000000..520408f --- /dev/null +++ b/downstream/mmsegmentation/requirements/runtime.txt @@ -0,0 +1,5 @@ +matplotlib +mmcls>=0.20.1 +numpy +packaging +prettytable diff --git a/downstream/mmsegmentation/requirements/tests.txt b/downstream/mmsegmentation/requirements/tests.txt new file mode 100644 index 0000000..74fc761 --- /dev/null +++ b/downstream/mmsegmentation/requirements/tests.txt @@ -0,0 +1,6 @@ +codecov +flake8 +interrogate +pytest +xdoctest>=0.10.0 +yapf diff --git a/downstream/mmsegmentation/setup.cfg b/downstream/mmsegmentation/setup.cfg new file mode 100644 index 0000000..23cb09e --- /dev/null +++ b/downstream/mmsegmentation/setup.cfg @@ -0,0 +1,19 @@ +[yapf] +based_on_style = pep8 +blank_line_before_nested_class_or_def = true +split_before_expression_after_opening_paren = true + +[isort] +line_length = 79 +multi_line_output = 0 +extra_standard_library = setuptools +known_first_party = mmseg +known_third_party = PIL,cityscapesscripts,cv2,detail,matplotlib,mmcv,numpy,onnxruntime,packaging,prettytable,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,torch,ts +no_lines_before = STDLIB,LOCALFOLDER +default_section = THIRDPARTY + +[codespell] +skip = *.po,*.ts,*.ipynb +count = +quiet-level = 3 +ignore-words-list = formating,sur,hist,dota diff --git a/downstream/mmsegmentation/setup.py b/downstream/mmsegmentation/setup.py new file mode 100755 index 0000000..91afefb --- /dev/null +++ b/downstream/mmsegmentation/setup.py @@ -0,0 +1,200 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import platform +import shutil +import sys +import warnings +from setuptools import find_packages, setup + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +version_file = 'mmseg/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +def parse_requirements(fname='requirements.txt', with_version=True): + """Parse the package dependencies listed in a requirements file but strips + specific versioning information. + + Args: + fname (str): path to requirements file + with_version (bool, default=False): if True include version specs + + Returns: + List[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + import re + import sys + from os.path import exists + require_fpath = fname + + def parse_line(line): + """Parse information from a line in a requirements text file.""" + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + info['version'] = (op, version) + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + + +def add_mim_extension(): + """Add extra files that are required to support MIM into the package. + + These files will be added by creating a symlink to the originals if the + package is installed in `editable` mode (e.g. pip install -e .), or by + copying from the originals otherwise. + """ + + # parse installment mode + if 'develop' in sys.argv: + # installed by `pip install -e .` + if platform.system() == 'Windows': + # set `copy` mode here since symlink fails on Windows. + mode = 'copy' + else: + mode = 'symlink' + elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv or \ + platform.system() == 'Windows': + # installed by `pip install .` + # or create source distribution by `python setup.py sdist` + # set `copy` mode here since symlink fails with WinError on Windows. + mode = 'copy' + else: + return + + filenames = ['tools', 'configs', 'model-index.yml'] + repo_path = osp.dirname(__file__) + mim_path = osp.join(repo_path, 'mmseg', '.mim') + os.makedirs(mim_path, exist_ok=True) + + for filename in filenames: + if osp.exists(filename): + src_path = osp.join(repo_path, filename) + tar_path = osp.join(mim_path, filename) + + if osp.isfile(tar_path) or osp.islink(tar_path): + os.remove(tar_path) + elif osp.isdir(tar_path): + shutil.rmtree(tar_path) + + if mode == 'symlink': + src_relpath = osp.relpath(src_path, osp.dirname(tar_path)) + try: + os.symlink(src_relpath, tar_path) + except OSError: + # Creating a symbolic link on windows may raise an + # `OSError: [WinError 1314]` due to privilege. If + # the error happens, the src file will be copied + mode = 'copy' + warnings.warn( + f'Failed to create a symbolic link for {src_relpath}, ' + f'and it will be copied to {tar_path}') + else: + continue + + if mode == 'copy': + if osp.isfile(src_path): + shutil.copyfile(src_path, tar_path) + elif osp.isdir(src_path): + shutil.copytree(src_path, tar_path) + else: + warnings.warn(f'Cannot copy file {src_path}.') + else: + raise ValueError(f'Invalid mode {mode}') + + +if __name__ == '__main__': + add_mim_extension() + setup( + name='mmsegmentation', + version=get_version(), + description='Open MMLab Semantic Segmentation Toolbox and Benchmark', + long_description=readme(), + long_description_content_type='text/markdown', + author='MMSegmentation Contributors', + author_email='openmmlab@gmail.com', + keywords='computer vision, semantic segmentation', + url='http://github.com/open-mmlab/mmsegmentation', + packages=find_packages(exclude=('configs', 'tools', 'demo')), + include_package_data=True, + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + ], + license='Apache License 2.0', + install_requires=parse_requirements('requirements/runtime.txt'), + extras_require={ + 'all': parse_requirements('requirements.txt'), + 'tests': parse_requirements('requirements/tests.txt'), + 'build': parse_requirements('requirements/build.txt'), + 'optional': parse_requirements('requirements/optional.txt'), + }, + ext_modules=[], + zip_safe=False) diff --git a/downstream/mmsegmentation/tools/analyze_logs.py b/downstream/mmsegmentation/tools/analyze_logs.py new file mode 100644 index 0000000..e2127d4 --- /dev/null +++ b/downstream/mmsegmentation/tools/analyze_logs.py @@ -0,0 +1,128 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Modified from https://github.com/open- +mmlab/mmdetection/blob/master/tools/analysis_tools/analyze_logs.py.""" +import argparse +import json +from collections import defaultdict + +import matplotlib.pyplot as plt +import seaborn as sns + + +def plot_curve(log_dicts, args): + if args.backend is not None: + plt.switch_backend(args.backend) + sns.set_style(args.style) + # if legend is None, use {filename}_{key} as legend + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + legend.append(f'{json_log}_{metric}') + assert len(legend) == (len(args.json_logs) * len(args.keys)) + metrics = args.keys + + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + epochs = list(log_dict.keys()) + for j, metric in enumerate(metrics): + print(f'plot curve of {args.json_logs[i]}, metric is {metric}') + plot_epochs = [] + plot_iters = [] + plot_values = [] + # In some log files exist lines of validation, + # `mode` list is used to only collect iter number + # of training line. + for epoch in epochs: + epoch_logs = log_dict[epoch] + if metric not in epoch_logs.keys(): + continue + if metric in ['mIoU', 'mAcc', 'aAcc']: + plot_epochs.append(epoch) + plot_values.append(epoch_logs[metric][0]) + else: + for idx in range(len(epoch_logs[metric])): + if epoch_logs['mode'][idx] == 'train': + plot_iters.append(epoch_logs['iter'][idx]) + plot_values.append(epoch_logs[metric][idx]) + ax = plt.gca() + label = legend[i * num_metrics + j] + if metric in ['mIoU', 'mAcc', 'aAcc']: + ax.set_xticks(plot_epochs) + plt.xlabel('epoch') + plt.plot(plot_epochs, plot_values, label=label, marker='o') + else: + plt.xlabel('iter') + plt.plot(plot_iters, plot_values, label=label, linewidth=0.5) + plt.legend() + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print(f'save curve to: {args.out}') + plt.savefig(args.out) + plt.cla() + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + parser.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser.add_argument( + '--keys', + type=str, + nargs='+', + default=['mIoU'], + help='the metric that you want to plot') + parser.add_argument('--title', type=str, help='title of figure') + parser.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser.add_argument( + '--backend', type=str, default=None, help='backend of plt') + parser.add_argument( + '--style', type=str, default='dark', help='style of plt') + parser.add_argument('--out', type=str, default=None) + args = parser.parse_args() + return args + + +def load_json_logs(json_logs): + # load and convert json_logs to log_dict, key is epoch, value is a sub dict + # keys of sub dict is different metrics + # value of sub dict is a list of corresponding values of all iterations + log_dicts = [dict() for _ in json_logs] + for json_log, log_dict in zip(json_logs, log_dicts): + with open(json_log, 'r') as log_file: + for line in log_file: + log = json.loads(line.strip()) + # skip lines without `epoch` field + if 'epoch' not in log: + continue + epoch = log.pop('epoch') + if epoch not in log_dict: + log_dict[epoch] = defaultdict(list) + for k, v in log.items(): + log_dict[epoch][k].append(v) + return log_dicts + + +def main(): + args = parse_args() + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + log_dicts = load_json_logs(json_logs) + plot_curve(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/benchmark.py b/downstream/mmsegmentation/tools/benchmark.py new file mode 100644 index 0000000..f6d6888 --- /dev/null +++ b/downstream/mmsegmentation/tools/benchmark.py @@ -0,0 +1,120 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import time + +import mmcv +import numpy as np +import torch +from mmcv import Config +from mmcv.parallel import MMDataParallel +from mmcv.runner import load_checkpoint, wrap_fp16_model + +from mmseg.datasets import build_dataloader, build_dataset +from mmseg.models import build_segmentor + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMSeg benchmark a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--log-interval', type=int, default=50, help='interval of logging') + parser.add_argument( + '--work-dir', + help=('if specified, the results will be dumped ' + 'into the directory as json')) + parser.add_argument('--repeat-times', type=int, default=1) + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + if args.work_dir is not None: + mmcv.mkdir_or_exist(osp.abspath(args.work_dir)) + json_file = osp.join(args.work_dir, f'fps_{timestamp}.json') + else: + # use config filename as default work_dir if cfg.work_dir is None + work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + mmcv.mkdir_or_exist(osp.abspath(work_dir)) + json_file = osp.join(work_dir, f'fps_{timestamp}.json') + + repeat_times = args.repeat_times + # set cudnn_benchmark + torch.backends.cudnn.benchmark = False + cfg.model.pretrained = None + cfg.data.test.test_mode = True + + benchmark_dict = dict(config=args.config, unit='img / s') + overall_fps_list = [] + for time_index in range(repeat_times): + print(f'Run {time_index + 1}:') + # build the dataloader + # TODO: support multiple images per gpu (only minor changes are needed) + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + samples_per_gpu=1, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=False, + shuffle=False) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg')) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + if 'checkpoint' in args and osp.exists(args.checkpoint): + load_checkpoint(model, args.checkpoint, map_location='cpu') + + model = MMDataParallel(model, device_ids=[0]) + + model.eval() + + # the first several iterations may be very slow so skip them + num_warmup = 5 + pure_inf_time = 0 + total_iters = 200 + + # benchmark with 200 image and take the average + for i, data in enumerate(data_loader): + + torch.cuda.synchronize() + start_time = time.perf_counter() + + with torch.no_grad(): + model(return_loss=False, rescale=True, **data) + + torch.cuda.synchronize() + elapsed = time.perf_counter() - start_time + + if i >= num_warmup: + pure_inf_time += elapsed + if (i + 1) % args.log_interval == 0: + fps = (i + 1 - num_warmup) / pure_inf_time + print(f'Done image [{i + 1:<3}/ {total_iters}], ' + f'fps: {fps:.2f} img / s') + + if (i + 1) == total_iters: + fps = (i + 1 - num_warmup) / pure_inf_time + print(f'Overall fps: {fps:.2f} img / s\n') + benchmark_dict[f'overall_fps_{time_index + 1}'] = round(fps, 2) + overall_fps_list.append(fps) + break + benchmark_dict['average_fps'] = round(np.mean(overall_fps_list), 2) + benchmark_dict['fps_variance'] = round(np.var(overall_fps_list), 4) + print(f'Average fps of {repeat_times} evaluations: ' + f'{benchmark_dict["average_fps"]}') + print(f'The variance of {repeat_times} evaluations: ' + f'{benchmark_dict["fps_variance"]}') + mmcv.dump(benchmark_dict, json_file, indent=4) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/browse_dataset.py b/downstream/mmsegmentation/tools/browse_dataset.py new file mode 100644 index 0000000..0aa9430 --- /dev/null +++ b/downstream/mmsegmentation/tools/browse_dataset.py @@ -0,0 +1,182 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import warnings +from pathlib import Path + +import mmcv +import numpy as np +from mmcv import Config, DictAction + +from mmseg.datasets.builder import build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser(description='Browse a dataset') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--show-origin', + default=False, + action='store_true', + help='if True, omit all augmentation in pipeline,' + ' show origin image and seg map') + parser.add_argument( + '--skip-type', + type=str, + nargs='+', + default=['DefaultFormatBundle', 'Normalize', 'Collect'], + help='skip some useless pipeline,if `show-origin` is true, ' + 'all pipeline except `Load` will be skipped') + parser.add_argument( + '--output-dir', + default='./output', + type=str, + help='If there is no display interface, you can save it') + parser.add_argument('--show', default=False, action='store_true') + parser.add_argument( + '--show-interval', + type=int, + default=999, + help='the interval of show (ms)') + parser.add_argument( + '--opacity', + type=float, + default=0.5, + help='the opacity of semantic map') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def imshow_semantic(img, + seg, + class_names, + palette=None, + win_name='', + show=False, + wait_time=0, + out_file=None, + opacity=0.5): + """Draw `result` over `img`. + + Args: + img (str or Tensor): The image to be displayed. + seg (Tensor): The semantic segmentation results to draw over + `img`. + class_names (list[str]): Names of each classes. + palette (list[list[int]]] | np.ndarray | None): The palette of + segmentation map. If None is given, random palette will be + generated. Default: None + win_name (str): The window name. + wait_time (int): Value of waitKey param. + Default: 0. + show (bool): Whether to show the image. + Default: False. + out_file (str or None): The filename to write the image. + Default: None. + opacity(float): Opacity of painted segmentation map. + Default 0.5. + Must be in (0, 1] range. + Returns: + img (Tensor): Only if not `show` or `out_file` + """ + img = mmcv.imread(img) + img = img.copy() + if palette is None: + palette = np.random.randint(0, 255, size=(len(class_names), 3)) + palette = np.array(palette) + assert palette.shape[0] == len(class_names) + assert palette.shape[1] == 3 + assert len(palette.shape) == 2 + assert 0 < opacity <= 1.0 + color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) + for label, color in enumerate(palette): + color_seg[seg == label, :] = color + # convert to BGR + color_seg = color_seg[..., ::-1] + + img = img * (1 - opacity) + color_seg * opacity + img = img.astype(np.uint8) + # if out_file specified, do not show image in window + if out_file is not None: + show = False + + if show: + mmcv.imshow(img, win_name, wait_time) + if out_file is not None: + mmcv.imwrite(img, out_file) + + if not (show or out_file): + warnings.warn('show==False and out_file is not specified, only ' + 'result image will be returned') + return img + + +def _retrieve_data_cfg(_data_cfg, skip_type, show_origin): + if show_origin is True: + # only keep pipeline of Loading data and ann + _data_cfg['pipeline'] = [ + x for x in _data_cfg.pipeline if 'Load' in x['type'] + ] + else: + _data_cfg['pipeline'] = [ + x for x in _data_cfg.pipeline if x['type'] not in skip_type + ] + + +def retrieve_data_cfg(config_path, skip_type, cfg_options, show_origin=False): + cfg = Config.fromfile(config_path) + if cfg_options is not None: + cfg.merge_from_dict(cfg_options) + train_data_cfg = cfg.data.train + if isinstance(train_data_cfg, list): + for _data_cfg in train_data_cfg: + while 'dataset' in _data_cfg and _data_cfg[ + 'type'] != 'MultiImageMixDataset': + _data_cfg = _data_cfg['dataset'] + if 'pipeline' in _data_cfg: + _retrieve_data_cfg(_data_cfg, skip_type, show_origin) + else: + raise ValueError + else: + while 'dataset' in train_data_cfg and train_data_cfg[ + 'type'] != 'MultiImageMixDataset': + train_data_cfg = train_data_cfg['dataset'] + _retrieve_data_cfg(train_data_cfg, skip_type, show_origin) + return cfg + + +def main(): + args = parse_args() + cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options, + args.show_origin) + dataset = build_dataset(cfg.data.train) + progress_bar = mmcv.ProgressBar(len(dataset)) + for item in dataset: + filename = os.path.join(args.output_dir, + Path(item['filename']).name + ) if args.output_dir is not None else None + imshow_semantic( + item['img'], + item['gt_semantic_seg'], + dataset.CLASSES, + dataset.PALETTE, + show=args.show, + wait_time=args.show_interval, + out_file=filename, + opacity=args.opacity, + ) + progress_bar.update() + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/confusion_matrix.py b/downstream/mmsegmentation/tools/confusion_matrix.py new file mode 100644 index 0000000..2c5b64c --- /dev/null +++ b/downstream/mmsegmentation/tools/confusion_matrix.py @@ -0,0 +1,184 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os + +import matplotlib.pyplot as plt +import mmcv +import numpy as np +from matplotlib.ticker import MultipleLocator +from mmcv import Config, DictAction + +from mmseg.datasets import build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Generate confusion matrix from segmentation results') + parser.add_argument('config', help='test config file path') + parser.add_argument( + 'prediction_path', help='prediction path where test .pkl result') + parser.add_argument( + 'save_dir', help='directory where confusion matrix will be saved') + parser.add_argument( + '--show', action='store_true', help='show confusion matrix') + parser.add_argument( + '--color-theme', + default='winter', + help='theme of the matrix color map') + parser.add_argument( + '--title', + default='Normalized Confusion Matrix', + help='title of the matrix color map') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def calculate_confusion_matrix(dataset, results): + """Calculate the confusion matrix. + + Args: + dataset (Dataset): Test or val dataset. + results (list[ndarray]): A list of segmentation results in each image. + """ + n = len(dataset.CLASSES) + confusion_matrix = np.zeros(shape=[n, n]) + assert len(dataset) == len(results) + prog_bar = mmcv.ProgressBar(len(results)) + for idx, per_img_res in enumerate(results): + res_segm = per_img_res + gt_segm = dataset.get_gt_seg_map_by_idx(idx) + inds = n * gt_segm + res_segm + inds = inds.flatten() + mat = np.bincount(inds, minlength=n**2).reshape(n, n) + confusion_matrix += mat + prog_bar.update() + return confusion_matrix + + +def plot_confusion_matrix(confusion_matrix, + labels, + save_dir=None, + show=True, + title='Normalized Confusion Matrix', + color_theme='winter'): + """Draw confusion matrix with matplotlib. + + Args: + confusion_matrix (ndarray): The confusion matrix. + labels (list[str]): List of class names. + save_dir (str|optional): If set, save the confusion matrix plot to the + given path. Default: None. + show (bool): Whether to show the plot. Default: True. + title (str): Title of the plot. Default: `Normalized Confusion Matrix`. + color_theme (str): Theme of the matrix color map. Default: `winter`. + """ + # normalize the confusion matrix + per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis] + confusion_matrix = \ + confusion_matrix.astype(np.float32) / per_label_sums * 100 + + num_classes = len(labels) + fig, ax = plt.subplots( + figsize=(2 * num_classes, 2 * num_classes * 0.8), dpi=180) + cmap = plt.get_cmap(color_theme) + im = ax.imshow(confusion_matrix, cmap=cmap) + plt.colorbar(mappable=im, ax=ax) + + title_font = {'weight': 'bold', 'size': 12} + ax.set_title(title, fontdict=title_font) + label_font = {'size': 10} + plt.ylabel('Ground Truth Label', fontdict=label_font) + plt.xlabel('Prediction Label', fontdict=label_font) + + # draw locator + xmajor_locator = MultipleLocator(1) + xminor_locator = MultipleLocator(0.5) + ax.xaxis.set_major_locator(xmajor_locator) + ax.xaxis.set_minor_locator(xminor_locator) + ymajor_locator = MultipleLocator(1) + yminor_locator = MultipleLocator(0.5) + ax.yaxis.set_major_locator(ymajor_locator) + ax.yaxis.set_minor_locator(yminor_locator) + + # draw grid + ax.grid(True, which='minor', linestyle='-') + + # draw label + ax.set_xticks(np.arange(num_classes)) + ax.set_yticks(np.arange(num_classes)) + ax.set_xticklabels(labels) + ax.set_yticklabels(labels) + + ax.tick_params( + axis='x', bottom=False, top=True, labelbottom=False, labeltop=True) + plt.setp( + ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor') + + # draw confusion matrix value + for i in range(num_classes): + for j in range(num_classes): + ax.text( + j, + i, + '{}%'.format( + round(confusion_matrix[i, j], 2 + ) if not np.isnan(confusion_matrix[i, j]) else -1), + ha='center', + va='center', + color='w', + size=7) + + ax.set_ylim(len(confusion_matrix) - 0.5, -0.5) # matplotlib>3.1.1 + + fig.tight_layout() + if save_dir is not None: + plt.savefig( + os.path.join(save_dir, 'confusion_matrix.png'), format='png') + if show: + plt.show() + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + results = mmcv.load(args.prediction_path) + + assert isinstance(results, list) + if isinstance(results[0], np.ndarray): + pass + else: + raise TypeError('invalid type of prediction results') + + if isinstance(cfg.data.test, dict): + cfg.data.test.test_mode = True + elif isinstance(cfg.data.test, list): + for ds_cfg in cfg.data.test: + ds_cfg.test_mode = True + + dataset = build_dataset(cfg.data.test) + confusion_matrix = calculate_confusion_matrix(dataset, results) + plot_confusion_matrix( + confusion_matrix, + dataset.CLASSES, + save_dir=args.save_dir, + show=args.show, + title=args.title, + color_theme=args.color_theme) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/convert_datasets/chase_db1.py b/downstream/mmsegmentation/tools/convert_datasets/chase_db1.py new file mode 100644 index 0000000..580e6e7 --- /dev/null +++ b/downstream/mmsegmentation/tools/convert_datasets/chase_db1.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import tempfile +import zipfile + +import mmcv + +CHASE_DB1_LEN = 28 * 3 +TRAINING_LEN = 60 + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert CHASE_DB1 dataset to mmsegmentation format') + parser.add_argument('dataset_path', help='path of CHASEDB1.zip') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + dataset_path = args.dataset_path + if args.out_dir is None: + out_dir = osp.join('data', 'CHASE_DB1') + else: + out_dir = args.out_dir + + print('Making directories...') + mmcv.mkdir_or_exist(out_dir) + mmcv.mkdir_or_exist(osp.join(out_dir, 'images')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'training')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'validation')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'training')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation')) + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + print('Extracting CHASEDB1.zip...') + zip_file = zipfile.ZipFile(dataset_path) + zip_file.extractall(tmp_dir) + + print('Generating training dataset...') + + assert len(os.listdir(tmp_dir)) == CHASE_DB1_LEN, \ + 'len(os.listdir(tmp_dir)) != {}'.format(CHASE_DB1_LEN) + + for img_name in sorted(os.listdir(tmp_dir))[:TRAINING_LEN]: + img = mmcv.imread(osp.join(tmp_dir, img_name)) + if osp.splitext(img_name)[1] == '.jpg': + mmcv.imwrite( + img, + osp.join(out_dir, 'images', 'training', + osp.splitext(img_name)[0] + '.png')) + else: + # The annotation img should be divided by 128, because some of + # the annotation imgs are not standard. We should set a + # threshold to convert the nonstandard annotation imgs. The + # value divided by 128 is equivalent to '1 if value >= 128 + # else 0' + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'training', + osp.splitext(img_name)[0] + '.png')) + + for img_name in sorted(os.listdir(tmp_dir))[TRAINING_LEN:]: + img = mmcv.imread(osp.join(tmp_dir, img_name)) + if osp.splitext(img_name)[1] == '.jpg': + mmcv.imwrite( + img, + osp.join(out_dir, 'images', 'validation', + osp.splitext(img_name)[0] + '.png')) + else: + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'validation', + osp.splitext(img_name)[0] + '.png')) + + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/convert_datasets/cityscapes.py b/downstream/mmsegmentation/tools/convert_datasets/cityscapes.py new file mode 100644 index 0000000..17b6168 --- /dev/null +++ b/downstream/mmsegmentation/tools/convert_datasets/cityscapes.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +import mmcv +from cityscapesscripts.preparation.json2labelImg import json2labelImg + + +def convert_json_to_label(json_file): + label_file = json_file.replace('_polygons.json', '_labelTrainIds.png') + json2labelImg(json_file, label_file, 'trainIds') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert Cityscapes annotations to TrainIds') + parser.add_argument('cityscapes_path', help='cityscapes data path') + parser.add_argument('--gt-dir', default='gtFine', type=str) + parser.add_argument('-o', '--out-dir', help='output path') + parser.add_argument( + '--nproc', default=1, type=int, help='number of process') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + cityscapes_path = args.cityscapes_path + out_dir = args.out_dir if args.out_dir else cityscapes_path + mmcv.mkdir_or_exist(out_dir) + + gt_dir = osp.join(cityscapes_path, args.gt_dir) + + poly_files = [] + for poly in mmcv.scandir(gt_dir, '_polygons.json', recursive=True): + poly_file = osp.join(gt_dir, poly) + poly_files.append(poly_file) + if args.nproc > 1: + mmcv.track_parallel_progress(convert_json_to_label, poly_files, + args.nproc) + else: + mmcv.track_progress(convert_json_to_label, poly_files) + + split_names = ['train', 'val', 'test'] + + for split in split_names: + filenames = [] + for poly in mmcv.scandir( + osp.join(gt_dir, split), '_polygons.json', recursive=True): + filenames.append(poly.replace('_gtFine_polygons.json', '')) + with open(osp.join(out_dir, f'{split}.txt'), 'w') as f: + f.writelines(f + '\n' for f in filenames) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/convert_datasets/coco_stuff10k.py b/downstream/mmsegmentation/tools/convert_datasets/coco_stuff10k.py new file mode 100644 index 0000000..374f819 --- /dev/null +++ b/downstream/mmsegmentation/tools/convert_datasets/coco_stuff10k.py @@ -0,0 +1,307 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import shutil +from functools import partial + +import mmcv +import numpy as np +from PIL import Image +from scipy.io import loadmat + +COCO_LEN = 10000 + +clsID_to_trID = { + 0: 0, + 1: 1, + 2: 2, + 3: 3, + 4: 4, + 5: 5, + 6: 6, + 7: 7, + 8: 8, + 9: 9, + 10: 10, + 11: 11, + 13: 12, + 14: 13, + 15: 14, + 16: 15, + 17: 16, + 18: 17, + 19: 18, + 20: 19, + 21: 20, + 22: 21, + 23: 22, + 24: 23, + 25: 24, + 27: 25, + 28: 26, + 31: 27, + 32: 28, + 33: 29, + 34: 30, + 35: 31, + 36: 32, + 37: 33, + 38: 34, + 39: 35, + 40: 36, + 41: 37, + 42: 38, + 43: 39, + 44: 40, + 46: 41, + 47: 42, + 48: 43, + 49: 44, + 50: 45, + 51: 46, + 52: 47, + 53: 48, + 54: 49, + 55: 50, + 56: 51, + 57: 52, + 58: 53, + 59: 54, + 60: 55, + 61: 56, + 62: 57, + 63: 58, + 64: 59, + 65: 60, + 67: 61, + 70: 62, + 72: 63, + 73: 64, + 74: 65, + 75: 66, + 76: 67, + 77: 68, + 78: 69, + 79: 70, + 80: 71, + 81: 72, + 82: 73, + 84: 74, + 85: 75, + 86: 76, + 87: 77, + 88: 78, + 89: 79, + 90: 80, + 92: 81, + 93: 82, + 94: 83, + 95: 84, + 96: 85, + 97: 86, + 98: 87, + 99: 88, + 100: 89, + 101: 90, + 102: 91, + 103: 92, + 104: 93, + 105: 94, + 106: 95, + 107: 96, + 108: 97, + 109: 98, + 110: 99, + 111: 100, + 112: 101, + 113: 102, + 114: 103, + 115: 104, + 116: 105, + 117: 106, + 118: 107, + 119: 108, + 120: 109, + 121: 110, + 122: 111, + 123: 112, + 124: 113, + 125: 114, + 126: 115, + 127: 116, + 128: 117, + 129: 118, + 130: 119, + 131: 120, + 132: 121, + 133: 122, + 134: 123, + 135: 124, + 136: 125, + 137: 126, + 138: 127, + 139: 128, + 140: 129, + 141: 130, + 142: 131, + 143: 132, + 144: 133, + 145: 134, + 146: 135, + 147: 136, + 148: 137, + 149: 138, + 150: 139, + 151: 140, + 152: 141, + 153: 142, + 154: 143, + 155: 144, + 156: 145, + 157: 146, + 158: 147, + 159: 148, + 160: 149, + 161: 150, + 162: 151, + 163: 152, + 164: 153, + 165: 154, + 166: 155, + 167: 156, + 168: 157, + 169: 158, + 170: 159, + 171: 160, + 172: 161, + 173: 162, + 174: 163, + 175: 164, + 176: 165, + 177: 166, + 178: 167, + 179: 168, + 180: 169, + 181: 170, + 182: 171 +} + + +def convert_to_trainID(tuple_path, in_img_dir, in_ann_dir, out_img_dir, + out_mask_dir, is_train): + imgpath, maskpath = tuple_path + shutil.copyfile( + osp.join(in_img_dir, imgpath), + osp.join(out_img_dir, 'train2014', imgpath) if is_train else osp.join( + out_img_dir, 'test2014', imgpath)) + annotate = loadmat(osp.join(in_ann_dir, maskpath)) + mask = annotate['S'].astype(np.uint8) + mask_copy = mask.copy() + for clsID, trID in clsID_to_trID.items(): + mask_copy[mask == clsID] = trID + seg_filename = osp.join(out_mask_dir, 'train2014', + maskpath.split('.')[0] + + '_labelTrainIds.png') if is_train else osp.join( + out_mask_dir, 'test2014', + maskpath.split('.')[0] + '_labelTrainIds.png') + Image.fromarray(mask_copy).save(seg_filename, 'PNG') + + +def generate_coco_list(folder): + train_list = osp.join(folder, 'imageLists', 'train.txt') + test_list = osp.join(folder, 'imageLists', 'test.txt') + train_paths = [] + test_paths = [] + + with open(train_list) as f: + for filename in f: + basename = filename.strip() + imgpath = basename + '.jpg' + maskpath = basename + '.mat' + train_paths.append((imgpath, maskpath)) + + with open(test_list) as f: + for filename in f: + basename = filename.strip() + imgpath = basename + '.jpg' + maskpath = basename + '.mat' + test_paths.append((imgpath, maskpath)) + + return train_paths, test_paths + + +def parse_args(): + parser = argparse.ArgumentParser( + description=\ + 'Convert COCO Stuff 10k annotations to mmsegmentation format') # noqa + parser.add_argument('coco_path', help='coco stuff path') + parser.add_argument('-o', '--out_dir', help='output path') + parser.add_argument( + '--nproc', default=16, type=int, help='number of process') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + coco_path = args.coco_path + nproc = args.nproc + + out_dir = args.out_dir or coco_path + out_img_dir = osp.join(out_dir, 'images') + out_mask_dir = osp.join(out_dir, 'annotations') + + mmcv.mkdir_or_exist(osp.join(out_img_dir, 'train2014')) + mmcv.mkdir_or_exist(osp.join(out_img_dir, 'test2014')) + mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'train2014')) + mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'test2014')) + + train_list, test_list = generate_coco_list(coco_path) + assert (len(train_list) + + len(test_list)) == COCO_LEN, 'Wrong length of list {} & {}'.format( + len(train_list), len(test_list)) + + if args.nproc > 1: + mmcv.track_parallel_progress( + partial( + convert_to_trainID, + in_img_dir=osp.join(coco_path, 'images'), + in_ann_dir=osp.join(coco_path, 'annotations'), + out_img_dir=out_img_dir, + out_mask_dir=out_mask_dir, + is_train=True), + train_list, + nproc=nproc) + mmcv.track_parallel_progress( + partial( + convert_to_trainID, + in_img_dir=osp.join(coco_path, 'images'), + in_ann_dir=osp.join(coco_path, 'annotations'), + out_img_dir=out_img_dir, + out_mask_dir=out_mask_dir, + is_train=False), + test_list, + nproc=nproc) + else: + mmcv.track_progress( + partial( + convert_to_trainID, + in_img_dir=osp.join(coco_path, 'images'), + in_ann_dir=osp.join(coco_path, 'annotations'), + out_img_dir=out_img_dir, + out_mask_dir=out_mask_dir, + is_train=True), train_list) + mmcv.track_progress( + partial( + convert_to_trainID, + in_img_dir=osp.join(coco_path, 'images'), + in_ann_dir=osp.join(coco_path, 'annotations'), + out_img_dir=out_img_dir, + out_mask_dir=out_mask_dir, + is_train=False), test_list) + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/convert_datasets/coco_stuff164k.py b/downstream/mmsegmentation/tools/convert_datasets/coco_stuff164k.py new file mode 100644 index 0000000..6d8e2f2 --- /dev/null +++ b/downstream/mmsegmentation/tools/convert_datasets/coco_stuff164k.py @@ -0,0 +1,264 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import shutil +from functools import partial +from glob import glob + +import mmcv +import numpy as np +from PIL import Image + +COCO_LEN = 123287 + +clsID_to_trID = { + 0: 0, + 1: 1, + 2: 2, + 3: 3, + 4: 4, + 5: 5, + 6: 6, + 7: 7, + 8: 8, + 9: 9, + 10: 10, + 12: 11, + 13: 12, + 14: 13, + 15: 14, + 16: 15, + 17: 16, + 18: 17, + 19: 18, + 20: 19, + 21: 20, + 22: 21, + 23: 22, + 24: 23, + 26: 24, + 27: 25, + 30: 26, + 31: 27, + 32: 28, + 33: 29, + 34: 30, + 35: 31, + 36: 32, + 37: 33, + 38: 34, + 39: 35, + 40: 36, + 41: 37, + 42: 38, + 43: 39, + 45: 40, + 46: 41, + 47: 42, + 48: 43, + 49: 44, + 50: 45, + 51: 46, + 52: 47, + 53: 48, + 54: 49, + 55: 50, + 56: 51, + 57: 52, + 58: 53, + 59: 54, + 60: 55, + 61: 56, + 62: 57, + 63: 58, + 64: 59, + 66: 60, + 69: 61, + 71: 62, + 72: 63, + 73: 64, + 74: 65, + 75: 66, + 76: 67, + 77: 68, + 78: 69, + 79: 70, + 80: 71, + 81: 72, + 83: 73, + 84: 74, + 85: 75, + 86: 76, + 87: 77, + 88: 78, + 89: 79, + 91: 80, + 92: 81, + 93: 82, + 94: 83, + 95: 84, + 96: 85, + 97: 86, + 98: 87, + 99: 88, + 100: 89, + 101: 90, + 102: 91, + 103: 92, + 104: 93, + 105: 94, + 106: 95, + 107: 96, + 108: 97, + 109: 98, + 110: 99, + 111: 100, + 112: 101, + 113: 102, + 114: 103, + 115: 104, + 116: 105, + 117: 106, + 118: 107, + 119: 108, + 120: 109, + 121: 110, + 122: 111, + 123: 112, + 124: 113, + 125: 114, + 126: 115, + 127: 116, + 128: 117, + 129: 118, + 130: 119, + 131: 120, + 132: 121, + 133: 122, + 134: 123, + 135: 124, + 136: 125, + 137: 126, + 138: 127, + 139: 128, + 140: 129, + 141: 130, + 142: 131, + 143: 132, + 144: 133, + 145: 134, + 146: 135, + 147: 136, + 148: 137, + 149: 138, + 150: 139, + 151: 140, + 152: 141, + 153: 142, + 154: 143, + 155: 144, + 156: 145, + 157: 146, + 158: 147, + 159: 148, + 160: 149, + 161: 150, + 162: 151, + 163: 152, + 164: 153, + 165: 154, + 166: 155, + 167: 156, + 168: 157, + 169: 158, + 170: 159, + 171: 160, + 172: 161, + 173: 162, + 174: 163, + 175: 164, + 176: 165, + 177: 166, + 178: 167, + 179: 168, + 180: 169, + 181: 170, + 255: 255 +} + + +def convert_to_trainID(maskpath, out_mask_dir, is_train): + mask = np.array(Image.open(maskpath)) + mask_copy = mask.copy() + for clsID, trID in clsID_to_trID.items(): + mask_copy[mask == clsID] = trID + seg_filename = osp.join( + out_mask_dir, 'train2017', + osp.basename(maskpath).split('.')[0] + + '_labelTrainIds.png') if is_train else osp.join( + out_mask_dir, 'val2017', + osp.basename(maskpath).split('.')[0] + '_labelTrainIds.png') + Image.fromarray(mask_copy).save(seg_filename, 'PNG') + + +def parse_args(): + parser = argparse.ArgumentParser( + description=\ + 'Convert COCO Stuff 164k annotations to mmsegmentation format') # noqa + parser.add_argument('coco_path', help='coco stuff path') + parser.add_argument('-o', '--out_dir', help='output path') + parser.add_argument( + '--nproc', default=16, type=int, help='number of process') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + coco_path = args.coco_path + nproc = args.nproc + + out_dir = args.out_dir or coco_path + out_img_dir = osp.join(out_dir, 'images') + out_mask_dir = osp.join(out_dir, 'annotations') + + mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'train2017')) + mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'val2017')) + + if out_dir != coco_path: + shutil.copytree(osp.join(coco_path, 'images'), out_img_dir) + + train_list = glob(osp.join(coco_path, 'annotations', 'train2017', '*.png')) + train_list = [file for file in train_list if '_labelTrainIds' not in file] + test_list = glob(osp.join(coco_path, 'annotations', 'val2017', '*.png')) + test_list = [file for file in test_list if '_labelTrainIds' not in file] + assert (len(train_list) + + len(test_list)) == COCO_LEN, 'Wrong length of list {} & {}'.format( + len(train_list), len(test_list)) + + if args.nproc > 1: + mmcv.track_parallel_progress( + partial( + convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True), + train_list, + nproc=nproc) + mmcv.track_parallel_progress( + partial( + convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False), + test_list, + nproc=nproc) + else: + mmcv.track_progress( + partial( + convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True), + train_list) + mmcv.track_progress( + partial( + convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False), + test_list) + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/convert_datasets/drive.py b/downstream/mmsegmentation/tools/convert_datasets/drive.py new file mode 100644 index 0000000..f547579 --- /dev/null +++ b/downstream/mmsegmentation/tools/convert_datasets/drive.py @@ -0,0 +1,113 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import tempfile +import zipfile + +import cv2 +import mmcv + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert DRIVE dataset to mmsegmentation format') + parser.add_argument( + 'training_path', help='the training part of DRIVE dataset') + parser.add_argument( + 'testing_path', help='the testing part of DRIVE dataset') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + training_path = args.training_path + testing_path = args.testing_path + if args.out_dir is None: + out_dir = osp.join('data', 'DRIVE') + else: + out_dir = args.out_dir + + print('Making directories...') + mmcv.mkdir_or_exist(out_dir) + mmcv.mkdir_or_exist(osp.join(out_dir, 'images')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'training')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'validation')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'training')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation')) + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + print('Extracting training.zip...') + zip_file = zipfile.ZipFile(training_path) + zip_file.extractall(tmp_dir) + + print('Generating training dataset...') + now_dir = osp.join(tmp_dir, 'training', 'images') + for img_name in os.listdir(now_dir): + img = mmcv.imread(osp.join(now_dir, img_name)) + mmcv.imwrite( + img, + osp.join( + out_dir, 'images', 'training', + osp.splitext(img_name)[0].replace('_training', '') + + '.png')) + + now_dir = osp.join(tmp_dir, 'training', '1st_manual') + for img_name in os.listdir(now_dir): + cap = cv2.VideoCapture(osp.join(now_dir, img_name)) + ret, img = cap.read() + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'training', + osp.splitext(img_name)[0] + '.png')) + + print('Extracting test.zip...') + zip_file = zipfile.ZipFile(testing_path) + zip_file.extractall(tmp_dir) + + print('Generating validation dataset...') + now_dir = osp.join(tmp_dir, 'test', 'images') + for img_name in os.listdir(now_dir): + img = mmcv.imread(osp.join(now_dir, img_name)) + mmcv.imwrite( + img, + osp.join( + out_dir, 'images', 'validation', + osp.splitext(img_name)[0].replace('_test', '') + '.png')) + + now_dir = osp.join(tmp_dir, 'test', '1st_manual') + if osp.exists(now_dir): + for img_name in os.listdir(now_dir): + cap = cv2.VideoCapture(osp.join(now_dir, img_name)) + ret, img = cap.read() + # The annotation img should be divided by 128, because some of + # the annotation imgs are not standard. We should set a + # threshold to convert the nonstandard annotation imgs. The + # value divided by 128 is equivalent to '1 if value >= 128 + # else 0' + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'validation', + osp.splitext(img_name)[0] + '.png')) + + now_dir = osp.join(tmp_dir, 'test', '2nd_manual') + if osp.exists(now_dir): + for img_name in os.listdir(now_dir): + cap = cv2.VideoCapture(osp.join(now_dir, img_name)) + ret, img = cap.read() + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'validation', + osp.splitext(img_name)[0] + '.png')) + + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/convert_datasets/hrf.py b/downstream/mmsegmentation/tools/convert_datasets/hrf.py new file mode 100644 index 0000000..5e016e3 --- /dev/null +++ b/downstream/mmsegmentation/tools/convert_datasets/hrf.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import tempfile +import zipfile + +import mmcv + +HRF_LEN = 15 +TRAINING_LEN = 5 + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert HRF dataset to mmsegmentation format') + parser.add_argument('healthy_path', help='the path of healthy.zip') + parser.add_argument( + 'healthy_manualsegm_path', help='the path of healthy_manualsegm.zip') + parser.add_argument('glaucoma_path', help='the path of glaucoma.zip') + parser.add_argument( + 'glaucoma_manualsegm_path', help='the path of glaucoma_manualsegm.zip') + parser.add_argument( + 'diabetic_retinopathy_path', + help='the path of diabetic_retinopathy.zip') + parser.add_argument( + 'diabetic_retinopathy_manualsegm_path', + help='the path of diabetic_retinopathy_manualsegm.zip') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + images_path = [ + args.healthy_path, args.glaucoma_path, args.diabetic_retinopathy_path + ] + annotations_path = [ + args.healthy_manualsegm_path, args.glaucoma_manualsegm_path, + args.diabetic_retinopathy_manualsegm_path + ] + if args.out_dir is None: + out_dir = osp.join('data', 'HRF') + else: + out_dir = args.out_dir + + print('Making directories...') + mmcv.mkdir_or_exist(out_dir) + mmcv.mkdir_or_exist(osp.join(out_dir, 'images')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'training')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'validation')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'training')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation')) + + print('Generating images...') + for now_path in images_path: + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + zip_file = zipfile.ZipFile(now_path) + zip_file.extractall(tmp_dir) + + assert len(os.listdir(tmp_dir)) == HRF_LEN, \ + 'len(os.listdir(tmp_dir)) != {}'.format(HRF_LEN) + + for filename in sorted(os.listdir(tmp_dir))[:TRAINING_LEN]: + img = mmcv.imread(osp.join(tmp_dir, filename)) + mmcv.imwrite( + img, + osp.join(out_dir, 'images', 'training', + osp.splitext(filename)[0] + '.png')) + for filename in sorted(os.listdir(tmp_dir))[TRAINING_LEN:]: + img = mmcv.imread(osp.join(tmp_dir, filename)) + mmcv.imwrite( + img, + osp.join(out_dir, 'images', 'validation', + osp.splitext(filename)[0] + '.png')) + + print('Generating annotations...') + for now_path in annotations_path: + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + zip_file = zipfile.ZipFile(now_path) + zip_file.extractall(tmp_dir) + + assert len(os.listdir(tmp_dir)) == HRF_LEN, \ + 'len(os.listdir(tmp_dir)) != {}'.format(HRF_LEN) + + for filename in sorted(os.listdir(tmp_dir))[:TRAINING_LEN]: + img = mmcv.imread(osp.join(tmp_dir, filename)) + # The annotation img should be divided by 128, because some of + # the annotation imgs are not standard. We should set a + # threshold to convert the nonstandard annotation imgs. The + # value divided by 128 is equivalent to '1 if value >= 128 + # else 0' + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'training', + osp.splitext(filename)[0] + '.png')) + for filename in sorted(os.listdir(tmp_dir))[TRAINING_LEN:]: + img = mmcv.imread(osp.join(tmp_dir, filename)) + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'validation', + osp.splitext(filename)[0] + '.png')) + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/convert_datasets/isaid.py b/downstream/mmsegmentation/tools/convert_datasets/isaid.py new file mode 100644 index 0000000..314fb89 --- /dev/null +++ b/downstream/mmsegmentation/tools/convert_datasets/isaid.py @@ -0,0 +1,245 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import os +import os.path as osp +import shutil +import tempfile +import zipfile + +import mmcv +import numpy as np +from PIL import Image + +iSAID_palette = \ + { + 0: (0, 0, 0), + 1: (0, 0, 63), + 2: (0, 63, 63), + 3: (0, 63, 0), + 4: (0, 63, 127), + 5: (0, 63, 191), + 6: (0, 63, 255), + 7: (0, 127, 63), + 8: (0, 127, 127), + 9: (0, 0, 127), + 10: (0, 0, 191), + 11: (0, 0, 255), + 12: (0, 191, 127), + 13: (0, 127, 191), + 14: (0, 127, 255), + 15: (0, 100, 155) + } + +iSAID_invert_palette = {v: k for k, v in iSAID_palette.items()} + + +def iSAID_convert_from_color(arr_3d, palette=iSAID_invert_palette): + """RGB-color encoding to grayscale labels.""" + arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8) + + for c, i in palette.items(): + m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2) + arr_2d[m] = i + + return arr_2d + + +def slide_crop_image(src_path, out_dir, mode, patch_H, patch_W, overlap): + img = np.asarray(Image.open(src_path).convert('RGB')) + + img_H, img_W, _ = img.shape + + if img_H < patch_H and img_W > patch_W: + + img = mmcv.impad(img, shape=(patch_H, img_W), pad_val=0) + + img_H, img_W, _ = img.shape + + elif img_H > patch_H and img_W < patch_W: + + img = mmcv.impad(img, shape=(img_H, patch_W), pad_val=0) + + img_H, img_W, _ = img.shape + + elif img_H < patch_H and img_W < patch_W: + + img = mmcv.impad(img, shape=(patch_H, patch_W), pad_val=0) + + img_H, img_W, _ = img.shape + + for x in range(0, img_W, patch_W - overlap): + for y in range(0, img_H, patch_H - overlap): + x_str = x + x_end = x + patch_W + if x_end > img_W: + diff_x = x_end - img_W + x_str -= diff_x + x_end = img_W + y_str = y + y_end = y + patch_H + if y_end > img_H: + diff_y = y_end - img_H + y_str -= diff_y + y_end = img_H + + img_patch = img[y_str:y_end, x_str:x_end, :] + img_patch = Image.fromarray(img_patch.astype(np.uint8)) + image = osp.basename(src_path).split('.')[0] + '_' + str( + y_str) + '_' + str(y_end) + '_' + str(x_str) + '_' + str( + x_end) + '.png' + # print(image) + save_path_image = osp.join(out_dir, 'img_dir', mode, str(image)) + img_patch.save(save_path_image) + + +def slide_crop_label(src_path, out_dir, mode, patch_H, patch_W, overlap): + label = mmcv.imread(src_path, channel_order='rgb') + label = iSAID_convert_from_color(label) + img_H, img_W = label.shape + + if img_H < patch_H and img_W > patch_W: + + label = mmcv.impad(label, shape=(patch_H, img_W), pad_val=255) + + img_H = patch_H + + elif img_H > patch_H and img_W < patch_W: + + label = mmcv.impad(label, shape=(img_H, patch_W), pad_val=255) + + img_W = patch_W + + elif img_H < patch_H and img_W < patch_W: + + label = mmcv.impad(label, shape=(patch_H, patch_W), pad_val=255) + + img_H = patch_H + img_W = patch_W + + for x in range(0, img_W, patch_W - overlap): + for y in range(0, img_H, patch_H - overlap): + x_str = x + x_end = x + patch_W + if x_end > img_W: + diff_x = x_end - img_W + x_str -= diff_x + x_end = img_W + y_str = y + y_end = y + patch_H + if y_end > img_H: + diff_y = y_end - img_H + y_str -= diff_y + y_end = img_H + + lab_patch = label[y_str:y_end, x_str:x_end] + lab_patch = Image.fromarray(lab_patch.astype(np.uint8), mode='P') + + image = osp.basename(src_path).split('.')[0].split( + '_')[0] + '_' + str(y_str) + '_' + str(y_end) + '_' + str( + x_str) + '_' + str(x_end) + '_instance_color_RGB' + '.png' + lab_patch.save(osp.join(out_dir, 'ann_dir', mode, str(image))) + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert iSAID dataset to mmsegmentation format') + parser.add_argument('dataset_path', help='iSAID folder path') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + + parser.add_argument( + '--patch_width', + default=896, + type=int, + help='Width of the cropped image patch') + parser.add_argument( + '--patch_height', + default=896, + type=int, + help='Height of the cropped image patch') + parser.add_argument( + '--overlap_area', default=384, type=int, help='Overlap area') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + dataset_path = args.dataset_path + # image patch width and height + patch_H, patch_W = args.patch_width, args.patch_height + + overlap = args.overlap_area # overlap area + + if args.out_dir is None: + out_dir = osp.join('data', 'iSAID') + else: + out_dir = args.out_dir + + print('Making directories...') + mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'train')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'val')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'test')) + + mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'train')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'val')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'test')) + + assert os.path.exists(os.path.join(dataset_path, 'train')), \ + 'train is not in {}'.format(dataset_path) + assert os.path.exists(os.path.join(dataset_path, 'val')), \ + 'val is not in {}'.format(dataset_path) + assert os.path.exists(os.path.join(dataset_path, 'test')), \ + 'test is not in {}'.format(dataset_path) + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + for dataset_mode in ['train', 'val', 'test']: + + # for dataset_mode in [ 'test']: + print('Extracting {}ing.zip...'.format(dataset_mode)) + img_zipp_list = glob.glob( + os.path.join(dataset_path, dataset_mode, 'images', '*.zip')) + print('Find the data', img_zipp_list) + for img_zipp in img_zipp_list: + zip_file = zipfile.ZipFile(img_zipp) + zip_file.extractall(os.path.join(tmp_dir, dataset_mode, 'img')) + src_path_list = glob.glob( + os.path.join(tmp_dir, dataset_mode, 'img', 'images', '*.png')) + + src_prog_bar = mmcv.ProgressBar(len(src_path_list)) + for i, img_path in enumerate(src_path_list): + if dataset_mode != 'test': + slide_crop_image(img_path, out_dir, dataset_mode, patch_H, + patch_W, overlap) + + else: + shutil.move(img_path, + os.path.join(out_dir, 'img_dir', dataset_mode)) + src_prog_bar.update() + + if dataset_mode != 'test': + label_zipp_list = glob.glob( + os.path.join(dataset_path, dataset_mode, 'Semantic_masks', + '*.zip')) + for label_zipp in label_zipp_list: + zip_file = zipfile.ZipFile(label_zipp) + zip_file.extractall( + os.path.join(tmp_dir, dataset_mode, 'lab')) + + lab_path_list = glob.glob( + os.path.join(tmp_dir, dataset_mode, 'lab', 'images', + '*.png')) + lab_prog_bar = mmcv.ProgressBar(len(lab_path_list)) + for i, lab_path in enumerate(lab_path_list): + slide_crop_label(lab_path, out_dir, dataset_mode, patch_H, + patch_W, overlap) + lab_prog_bar.update() + + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/convert_datasets/loveda.py b/downstream/mmsegmentation/tools/convert_datasets/loveda.py new file mode 100644 index 0000000..3a06268 --- /dev/null +++ b/downstream/mmsegmentation/tools/convert_datasets/loveda.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import shutil +import tempfile +import zipfile + +import mmcv + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert LoveDA dataset to mmsegmentation format') + parser.add_argument('dataset_path', help='LoveDA folder path') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + dataset_path = args.dataset_path + if args.out_dir is None: + out_dir = osp.join('data', 'loveDA') + else: + out_dir = args.out_dir + + print('Making directories...') + mmcv.mkdir_or_exist(out_dir) + mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'train')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'val')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'test')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'train')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'val')) + + assert 'Train.zip' in os.listdir(dataset_path), \ + 'Train.zip is not in {}'.format(dataset_path) + assert 'Val.zip' in os.listdir(dataset_path), \ + 'Val.zip is not in {}'.format(dataset_path) + assert 'Test.zip' in os.listdir(dataset_path), \ + 'Test.zip is not in {}'.format(dataset_path) + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + for dataset in ['Train', 'Val', 'Test']: + zip_file = zipfile.ZipFile( + os.path.join(dataset_path, dataset + '.zip')) + zip_file.extractall(tmp_dir) + data_type = dataset.lower() + for location in ['Rural', 'Urban']: + for image_type in ['images_png', 'masks_png']: + if image_type == 'images_png': + dst = osp.join(out_dir, 'img_dir', data_type) + else: + dst = osp.join(out_dir, 'ann_dir', data_type) + if dataset == 'Test' and image_type == 'masks_png': + continue + else: + src_dir = osp.join(tmp_dir, dataset, location, + image_type) + src_lst = os.listdir(src_dir) + for file in src_lst: + shutil.move(osp.join(src_dir, file), dst) + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/convert_datasets/pascal_context.py b/downstream/mmsegmentation/tools/convert_datasets/pascal_context.py new file mode 100644 index 0000000..03b79d5 --- /dev/null +++ b/downstream/mmsegmentation/tools/convert_datasets/pascal_context.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from functools import partial + +import mmcv +import numpy as np +from detail import Detail +from PIL import Image + +_mapping = np.sort( + np.array([ + 0, 2, 259, 260, 415, 324, 9, 258, 144, 18, 19, 22, 23, 397, 25, 284, + 158, 159, 416, 33, 162, 420, 454, 295, 296, 427, 44, 45, 46, 308, 59, + 440, 445, 31, 232, 65, 354, 424, 68, 326, 72, 458, 34, 207, 80, 355, + 85, 347, 220, 349, 360, 98, 187, 104, 105, 366, 189, 368, 113, 115 + ])) +_key = np.array(range(len(_mapping))).astype('uint8') + + +def generate_labels(img_id, detail, out_dir): + + def _class_to_index(mask, _mapping, _key): + # assert the values + values = np.unique(mask) + for i in range(len(values)): + assert (values[i] in _mapping) + index = np.digitize(mask.ravel(), _mapping, right=True) + return _key[index].reshape(mask.shape) + + mask = Image.fromarray( + _class_to_index(detail.getMask(img_id), _mapping=_mapping, _key=_key)) + filename = img_id['file_name'] + mask.save(osp.join(out_dir, filename.replace('jpg', 'png'))) + return osp.splitext(osp.basename(filename))[0] + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert PASCAL VOC annotations to mmsegmentation format') + parser.add_argument('devkit_path', help='pascal voc devkit path') + parser.add_argument('json_path', help='annoation json filepath') + parser.add_argument('-o', '--out_dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + devkit_path = args.devkit_path + if args.out_dir is None: + out_dir = osp.join(devkit_path, 'VOC2010', 'SegmentationClassContext') + else: + out_dir = args.out_dir + json_path = args.json_path + mmcv.mkdir_or_exist(out_dir) + img_dir = osp.join(devkit_path, 'VOC2010', 'JPEGImages') + + train_detail = Detail(json_path, img_dir, 'train') + train_ids = train_detail.getImgs() + + val_detail = Detail(json_path, img_dir, 'val') + val_ids = val_detail.getImgs() + + mmcv.mkdir_or_exist( + osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext')) + + train_list = mmcv.track_progress( + partial(generate_labels, detail=train_detail, out_dir=out_dir), + train_ids) + with open( + osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext', + 'train.txt'), 'w') as f: + f.writelines(line + '\n' for line in sorted(train_list)) + + val_list = mmcv.track_progress( + partial(generate_labels, detail=val_detail, out_dir=out_dir), val_ids) + with open( + osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext', + 'val.txt'), 'w') as f: + f.writelines(line + '\n' for line in sorted(val_list)) + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/convert_datasets/potsdam.py b/downstream/mmsegmentation/tools/convert_datasets/potsdam.py new file mode 100644 index 0000000..87e67d5 --- /dev/null +++ b/downstream/mmsegmentation/tools/convert_datasets/potsdam.py @@ -0,0 +1,157 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import math +import os +import os.path as osp +import tempfile +import zipfile + +import mmcv +import numpy as np + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert potsdam dataset to mmsegmentation format') + parser.add_argument('dataset_path', help='potsdam folder path') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + parser.add_argument( + '--clip_size', + type=int, + help='clipped size of image after preparation', + default=512) + parser.add_argument( + '--stride_size', + type=int, + help='stride of clipping original images', + default=256) + args = parser.parse_args() + return args + + +def clip_big_image(image_path, clip_save_dir, args, to_label=False): + # Original image of Potsdam dataset is very large, thus pre-processing + # of them is adopted. Given fixed clip size and stride size to generate + # clipped image, the intersection of width and height is determined. + # For example, given one 5120 x 5120 original image, the clip size is + # 512 and stride size is 256, thus it would generate 20x20 = 400 images + # whose size are all 512x512. + image = mmcv.imread(image_path) + + h, w, c = image.shape + clip_size = args.clip_size + stride_size = args.stride_size + + num_rows = math.ceil((h - clip_size) / stride_size) if math.ceil( + (h - clip_size) / + stride_size) * stride_size + clip_size >= h else math.ceil( + (h - clip_size) / stride_size) + 1 + num_cols = math.ceil((w - clip_size) / stride_size) if math.ceil( + (w - clip_size) / + stride_size) * stride_size + clip_size >= w else math.ceil( + (w - clip_size) / stride_size) + 1 + + x, y = np.meshgrid(np.arange(num_cols + 1), np.arange(num_rows + 1)) + xmin = x * clip_size + ymin = y * clip_size + + xmin = xmin.ravel() + ymin = ymin.ravel() + xmin_offset = np.where(xmin + clip_size > w, w - xmin - clip_size, + np.zeros_like(xmin)) + ymin_offset = np.where(ymin + clip_size > h, h - ymin - clip_size, + np.zeros_like(ymin)) + boxes = np.stack([ + xmin + xmin_offset, ymin + ymin_offset, + np.minimum(xmin + clip_size, w), + np.minimum(ymin + clip_size, h) + ], + axis=1) + + if to_label: + color_map = np.array([[0, 0, 0], [255, 255, 255], [255, 0, 0], + [255, 255, 0], [0, 255, 0], [0, 255, 255], + [0, 0, 255]]) + flatten_v = np.matmul( + image.reshape(-1, c), + np.array([2, 3, 4]).reshape(3, 1)) + out = np.zeros_like(flatten_v) + for idx, class_color in enumerate(color_map): + value_idx = np.matmul(class_color, + np.array([2, 3, 4]).reshape(3, 1)) + out[flatten_v == value_idx] = idx + image = out.reshape(h, w) + + for box in boxes: + start_x, start_y, end_x, end_y = box + clipped_image = image[start_y:end_y, + start_x:end_x] if to_label else image[ + start_y:end_y, start_x:end_x, :] + idx_i, idx_j = osp.basename(image_path).split('_')[2:4] + mmcv.imwrite( + clipped_image.astype(np.uint8), + osp.join( + clip_save_dir, + f'{idx_i}_{idx_j}_{start_x}_{start_y}_{end_x}_{end_y}.png')) + + +def main(): + args = parse_args() + splits = { + 'train': [ + '2_10', '2_11', '2_12', '3_10', '3_11', '3_12', '4_10', '4_11', + '4_12', '5_10', '5_11', '5_12', '6_10', '6_11', '6_12', '6_7', + '6_8', '6_9', '7_10', '7_11', '7_12', '7_7', '7_8', '7_9' + ], + 'val': [ + '5_15', '6_15', '6_13', '3_13', '4_14', '6_14', '5_14', '2_13', + '4_15', '2_14', '5_13', '4_13', '3_14', '7_13' + ] + } + + dataset_path = args.dataset_path + if args.out_dir is None: + out_dir = osp.join('data', 'potsdam') + else: + out_dir = args.out_dir + + print('Making directories...') + mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'train')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'val')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'train')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'val')) + + zipp_list = glob.glob(os.path.join(dataset_path, '*.zip')) + print('Find the data', zipp_list) + + for zipp in zipp_list: + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + zip_file = zipfile.ZipFile(zipp) + zip_file.extractall(tmp_dir) + src_path_list = glob.glob(os.path.join(tmp_dir, '*.tif')) + if not len(src_path_list): + sub_tmp_dir = os.path.join(tmp_dir, os.listdir(tmp_dir)[0]) + src_path_list = glob.glob(os.path.join(sub_tmp_dir, '*.tif')) + + prog_bar = mmcv.ProgressBar(len(src_path_list)) + for i, src_path in enumerate(src_path_list): + idx_i, idx_j = osp.basename(src_path).split('_')[2:4] + data_type = 'train' if f'{idx_i}_{idx_j}' in splits[ + 'train'] else 'val' + if 'label' in src_path: + dst_dir = osp.join(out_dir, 'ann_dir', data_type) + clip_big_image(src_path, dst_dir, args, to_label=True) + else: + dst_dir = osp.join(out_dir, 'img_dir', data_type) + clip_big_image(src_path, dst_dir, args, to_label=False) + prog_bar.update() + + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/convert_datasets/stare.py b/downstream/mmsegmentation/tools/convert_datasets/stare.py new file mode 100644 index 0000000..29b78c0 --- /dev/null +++ b/downstream/mmsegmentation/tools/convert_datasets/stare.py @@ -0,0 +1,166 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import gzip +import os +import os.path as osp +import tarfile +import tempfile + +import mmcv + +STARE_LEN = 20 +TRAINING_LEN = 10 + + +def un_gz(src, dst): + g_file = gzip.GzipFile(src) + with open(dst, 'wb+') as f: + f.write(g_file.read()) + g_file.close() + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert STARE dataset to mmsegmentation format') + parser.add_argument('image_path', help='the path of stare-images.tar') + parser.add_argument('labels_ah', help='the path of labels-ah.tar') + parser.add_argument('labels_vk', help='the path of labels-vk.tar') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + image_path = args.image_path + labels_ah = args.labels_ah + labels_vk = args.labels_vk + if args.out_dir is None: + out_dir = osp.join('data', 'STARE') + else: + out_dir = args.out_dir + + print('Making directories...') + mmcv.mkdir_or_exist(out_dir) + mmcv.mkdir_or_exist(osp.join(out_dir, 'images')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'training')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'validation')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'training')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation')) + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + mmcv.mkdir_or_exist(osp.join(tmp_dir, 'gz')) + mmcv.mkdir_or_exist(osp.join(tmp_dir, 'files')) + + print('Extracting stare-images.tar...') + with tarfile.open(image_path) as f: + f.extractall(osp.join(tmp_dir, 'gz')) + + for filename in os.listdir(osp.join(tmp_dir, 'gz')): + un_gz( + osp.join(tmp_dir, 'gz', filename), + osp.join(tmp_dir, 'files', + osp.splitext(filename)[0])) + + now_dir = osp.join(tmp_dir, 'files') + + assert len(os.listdir(now_dir)) == STARE_LEN, \ + 'len(os.listdir(now_dir)) != {}'.format(STARE_LEN) + + for filename in sorted(os.listdir(now_dir))[:TRAINING_LEN]: + img = mmcv.imread(osp.join(now_dir, filename)) + mmcv.imwrite( + img, + osp.join(out_dir, 'images', 'training', + osp.splitext(filename)[0] + '.png')) + + for filename in sorted(os.listdir(now_dir))[TRAINING_LEN:]: + img = mmcv.imread(osp.join(now_dir, filename)) + mmcv.imwrite( + img, + osp.join(out_dir, 'images', 'validation', + osp.splitext(filename)[0] + '.png')) + + print('Removing the temporary files...') + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + mmcv.mkdir_or_exist(osp.join(tmp_dir, 'gz')) + mmcv.mkdir_or_exist(osp.join(tmp_dir, 'files')) + + print('Extracting labels-ah.tar...') + with tarfile.open(labels_ah) as f: + f.extractall(osp.join(tmp_dir, 'gz')) + + for filename in os.listdir(osp.join(tmp_dir, 'gz')): + un_gz( + osp.join(tmp_dir, 'gz', filename), + osp.join(tmp_dir, 'files', + osp.splitext(filename)[0])) + + now_dir = osp.join(tmp_dir, 'files') + + assert len(os.listdir(now_dir)) == STARE_LEN, \ + 'len(os.listdir(now_dir)) != {}'.format(STARE_LEN) + + for filename in sorted(os.listdir(now_dir))[:TRAINING_LEN]: + img = mmcv.imread(osp.join(now_dir, filename)) + # The annotation img should be divided by 128, because some of + # the annotation imgs are not standard. We should set a threshold + # to convert the nonstandard annotation imgs. The value divided by + # 128 equivalent to '1 if value >= 128 else 0' + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'training', + osp.splitext(filename)[0] + '.png')) + + for filename in sorted(os.listdir(now_dir))[TRAINING_LEN:]: + img = mmcv.imread(osp.join(now_dir, filename)) + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'validation', + osp.splitext(filename)[0] + '.png')) + + print('Removing the temporary files...') + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + mmcv.mkdir_or_exist(osp.join(tmp_dir, 'gz')) + mmcv.mkdir_or_exist(osp.join(tmp_dir, 'files')) + + print('Extracting labels-vk.tar...') + with tarfile.open(labels_vk) as f: + f.extractall(osp.join(tmp_dir, 'gz')) + + for filename in os.listdir(osp.join(tmp_dir, 'gz')): + un_gz( + osp.join(tmp_dir, 'gz', filename), + osp.join(tmp_dir, 'files', + osp.splitext(filename)[0])) + + now_dir = osp.join(tmp_dir, 'files') + + assert len(os.listdir(now_dir)) == STARE_LEN, \ + 'len(os.listdir(now_dir)) != {}'.format(STARE_LEN) + + for filename in sorted(os.listdir(now_dir))[:TRAINING_LEN]: + img = mmcv.imread(osp.join(now_dir, filename)) + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'training', + osp.splitext(filename)[0] + '.png')) + + for filename in sorted(os.listdir(now_dir))[TRAINING_LEN:]: + img = mmcv.imread(osp.join(now_dir, filename)) + mmcv.imwrite( + img[:, :, 0] // 128, + osp.join(out_dir, 'annotations', 'validation', + osp.splitext(filename)[0] + '.png')) + + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/convert_datasets/vaihingen.py b/downstream/mmsegmentation/tools/convert_datasets/vaihingen.py new file mode 100644 index 0000000..b025ae5 --- /dev/null +++ b/downstream/mmsegmentation/tools/convert_datasets/vaihingen.py @@ -0,0 +1,155 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import math +import os +import os.path as osp +import tempfile +import zipfile + +import mmcv +import numpy as np + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert vaihingen dataset to mmsegmentation format') + parser.add_argument('dataset_path', help='vaihingen folder path') + parser.add_argument('--tmp_dir', help='path of the temporary directory') + parser.add_argument('-o', '--out_dir', help='output path') + parser.add_argument( + '--clip_size', + type=int, + help='clipped size of image after preparation', + default=512) + parser.add_argument( + '--stride_size', + type=int, + help='stride of clipping original images', + default=256) + args = parser.parse_args() + return args + + +def clip_big_image(image_path, clip_save_dir, to_label=False): + # Original image of Vaihingen dataset is very large, thus pre-processing + # of them is adopted. Given fixed clip size and stride size to generate + # clipped image, the intersection of width and height is determined. + # For example, given one 5120 x 5120 original image, the clip size is + # 512 and stride size is 256, thus it would generate 20x20 = 400 images + # whose size are all 512x512. + image = mmcv.imread(image_path) + + h, w, c = image.shape + cs = args.clip_size + ss = args.stride_size + + num_rows = math.ceil((h - cs) / ss) if math.ceil( + (h - cs) / ss) * ss + cs >= h else math.ceil((h - cs) / ss) + 1 + num_cols = math.ceil((w - cs) / ss) if math.ceil( + (w - cs) / ss) * ss + cs >= w else math.ceil((w - cs) / ss) + 1 + + x, y = np.meshgrid(np.arange(num_cols + 1), np.arange(num_rows + 1)) + xmin = x * cs + ymin = y * cs + + xmin = xmin.ravel() + ymin = ymin.ravel() + xmin_offset = np.where(xmin + cs > w, w - xmin - cs, np.zeros_like(xmin)) + ymin_offset = np.where(ymin + cs > h, h - ymin - cs, np.zeros_like(ymin)) + boxes = np.stack([ + xmin + xmin_offset, ymin + ymin_offset, + np.minimum(xmin + cs, w), + np.minimum(ymin + cs, h) + ], + axis=1) + + if to_label: + color_map = np.array([[0, 0, 0], [255, 255, 255], [255, 0, 0], + [255, 255, 0], [0, 255, 0], [0, 255, 255], + [0, 0, 255]]) + flatten_v = np.matmul( + image.reshape(-1, c), + np.array([2, 3, 4]).reshape(3, 1)) + out = np.zeros_like(flatten_v) + for idx, class_color in enumerate(color_map): + value_idx = np.matmul(class_color, + np.array([2, 3, 4]).reshape(3, 1)) + out[flatten_v == value_idx] = idx + image = out.reshape(h, w) + + for box in boxes: + start_x, start_y, end_x, end_y = box + clipped_image = image[start_y:end_y, + start_x:end_x] if to_label else image[ + start_y:end_y, start_x:end_x, :] + area_idx = osp.basename(image_path).split('_')[3].strip('.tif') + mmcv.imwrite( + clipped_image.astype(np.uint8), + osp.join(clip_save_dir, + f'{area_idx}_{start_x}_{start_y}_{end_x}_{end_y}.png')) + + +def main(): + splits = { + 'train': [ + 'area1', 'area11', 'area13', 'area15', 'area17', 'area21', + 'area23', 'area26', 'area28', 'area3', 'area30', 'area32', + 'area34', 'area37', 'area5', 'area7' + ], + 'val': [ + 'area6', 'area24', 'area35', 'area16', 'area14', 'area22', + 'area10', 'area4', 'area2', 'area20', 'area8', 'area31', 'area33', + 'area27', 'area38', 'area12', 'area29' + ], + } + + dataset_path = args.dataset_path + if args.out_dir is None: + out_dir = osp.join('data', 'vaihingen') + else: + out_dir = args.out_dir + + print('Making directories...') + mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'train')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'val')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'train')) + mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'val')) + + zipp_list = glob.glob(os.path.join(dataset_path, '*.zip')) + print('Find the data', zipp_list) + + with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: + for zipp in zipp_list: + zip_file = zipfile.ZipFile(zipp) + zip_file.extractall(tmp_dir) + src_path_list = glob.glob(os.path.join(tmp_dir, '*.tif')) + if 'ISPRS_semantic_labeling_Vaihingen' in zipp: + src_path_list = glob.glob( + os.path.join(os.path.join(tmp_dir, 'top'), '*.tif')) + if 'ISPRS_semantic_labeling_Vaihingen_ground_truth_eroded_COMPLETE' in zipp: # noqa + src_path_list = glob.glob(os.path.join(tmp_dir, '*.tif')) + # delete unused area9 ground truth + for area_ann in src_path_list: + if 'area9' in area_ann: + src_path_list.remove(area_ann) + prog_bar = mmcv.ProgressBar(len(src_path_list)) + for i, src_path in enumerate(src_path_list): + area_idx = osp.basename(src_path).split('_')[3].strip('.tif') + data_type = 'train' if area_idx in splits['train'] else 'val' + if 'noBoundary' in src_path: + dst_dir = osp.join(out_dir, 'ann_dir', data_type) + clip_big_image(src_path, dst_dir, to_label=True) + else: + dst_dir = osp.join(out_dir, 'img_dir', data_type) + clip_big_image(src_path, dst_dir, to_label=False) + prog_bar.update() + + print('Removing the temporary files...') + + print('Done!') + + +if __name__ == '__main__': + args = parse_args() + main() diff --git a/downstream/mmsegmentation/tools/convert_datasets/voc_aug.py b/downstream/mmsegmentation/tools/convert_datasets/voc_aug.py new file mode 100644 index 0000000..1d42c27 --- /dev/null +++ b/downstream/mmsegmentation/tools/convert_datasets/voc_aug.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from functools import partial + +import mmcv +import numpy as np +from PIL import Image +from scipy.io import loadmat + +AUG_LEN = 10582 + + +def convert_mat(mat_file, in_dir, out_dir): + data = loadmat(osp.join(in_dir, mat_file)) + mask = data['GTcls'][0]['Segmentation'][0].astype(np.uint8) + seg_filename = osp.join(out_dir, mat_file.replace('.mat', '.png')) + Image.fromarray(mask).save(seg_filename, 'PNG') + + +def generate_aug_list(merged_list, excluded_list): + return list(set(merged_list) - set(excluded_list)) + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert PASCAL VOC annotations to mmsegmentation format') + parser.add_argument('devkit_path', help='pascal voc devkit path') + parser.add_argument('aug_path', help='pascal voc aug path') + parser.add_argument('-o', '--out_dir', help='output path') + parser.add_argument( + '--nproc', default=1, type=int, help='number of process') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + devkit_path = args.devkit_path + aug_path = args.aug_path + nproc = args.nproc + if args.out_dir is None: + out_dir = osp.join(devkit_path, 'VOC2012', 'SegmentationClassAug') + else: + out_dir = args.out_dir + mmcv.mkdir_or_exist(out_dir) + in_dir = osp.join(aug_path, 'dataset', 'cls') + + mmcv.track_parallel_progress( + partial(convert_mat, in_dir=in_dir, out_dir=out_dir), + list(mmcv.scandir(in_dir, suffix='.mat')), + nproc=nproc) + + full_aug_list = [] + with open(osp.join(aug_path, 'dataset', 'train.txt')) as f: + full_aug_list += [line.strip() for line in f] + with open(osp.join(aug_path, 'dataset', 'val.txt')) as f: + full_aug_list += [line.strip() for line in f] + + with open( + osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', + 'train.txt')) as f: + ori_train_list = [line.strip() for line in f] + with open( + osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', + 'val.txt')) as f: + val_list = [line.strip() for line in f] + + aug_train_list = generate_aug_list(ori_train_list + full_aug_list, + val_list) + assert len(aug_train_list) == AUG_LEN, 'len(aug_train_list) != {}'.format( + AUG_LEN) + + with open( + osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', + 'trainaug.txt'), 'w') as f: + f.writelines(line + '\n' for line in aug_train_list) + + aug_list = generate_aug_list(full_aug_list, ori_train_list + val_list) + assert len(aug_list) == AUG_LEN - len( + ori_train_list), 'len(aug_list) != {}'.format(AUG_LEN - + len(ori_train_list)) + with open( + osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'aug.txt'), + 'w') as f: + f.writelines(line + '\n' for line in aug_list) + + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/deploy_test.py b/downstream/mmsegmentation/tools/deploy_test.py new file mode 100644 index 0000000..eca5430 --- /dev/null +++ b/downstream/mmsegmentation/tools/deploy_test.py @@ -0,0 +1,338 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import shutil +import warnings +from typing import Any, Iterable + +import mmcv +import numpy as np +import torch +from mmcv.parallel import MMDataParallel +from mmcv.runner import get_dist_info +from mmcv.utils import DictAction + +from mmseg.apis import single_gpu_test +from mmseg.datasets import build_dataloader, build_dataset +from mmseg.models.segmentors.base import BaseSegmentor +from mmseg.ops import resize + + +class ONNXRuntimeSegmentor(BaseSegmentor): + + def __init__(self, onnx_file: str, cfg: Any, device_id: int): + super(ONNXRuntimeSegmentor, self).__init__() + import onnxruntime as ort + + # get the custom op path + ort_custom_op_path = '' + try: + from mmcv.ops import get_onnxruntime_op_path + ort_custom_op_path = get_onnxruntime_op_path() + except (ImportError, ModuleNotFoundError): + warnings.warn('If input model has custom op from mmcv, \ + you may have to build mmcv with ONNXRuntime from source.') + session_options = ort.SessionOptions() + # register custom op for onnxruntime + if osp.exists(ort_custom_op_path): + session_options.register_custom_ops_library(ort_custom_op_path) + sess = ort.InferenceSession(onnx_file, session_options) + providers = ['CPUExecutionProvider'] + options = [{}] + is_cuda_available = ort.get_device() == 'GPU' + if is_cuda_available: + providers.insert(0, 'CUDAExecutionProvider') + options.insert(0, {'device_id': device_id}) + + sess.set_providers(providers, options) + + self.sess = sess + self.device_id = device_id + self.io_binding = sess.io_binding() + self.output_names = [_.name for _ in sess.get_outputs()] + for name in self.output_names: + self.io_binding.bind_output(name) + self.cfg = cfg + self.test_mode = cfg.model.test_cfg.mode + self.is_cuda_available = is_cuda_available + + def extract_feat(self, imgs): + raise NotImplementedError('This method is not implemented.') + + def encode_decode(self, img, img_metas): + raise NotImplementedError('This method is not implemented.') + + def forward_train(self, imgs, img_metas, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def simple_test(self, img: torch.Tensor, img_meta: Iterable, + **kwargs) -> list: + if not self.is_cuda_available: + img = img.detach().cpu() + elif self.device_id >= 0: + img = img.cuda(self.device_id) + device_type = img.device.type + self.io_binding.bind_input( + name='input', + device_type=device_type, + device_id=self.device_id, + element_type=np.float32, + shape=img.shape, + buffer_ptr=img.data_ptr()) + self.sess.run_with_iobinding(self.io_binding) + seg_pred = self.io_binding.copy_outputs_to_cpu()[0] + # whole might support dynamic reshape + ori_shape = img_meta[0]['ori_shape'] + if not (ori_shape[0] == seg_pred.shape[-2] + and ori_shape[1] == seg_pred.shape[-1]): + seg_pred = torch.from_numpy(seg_pred).float() + seg_pred = resize( + seg_pred, size=tuple(ori_shape[:2]), mode='nearest') + seg_pred = seg_pred.long().detach().cpu().numpy() + seg_pred = seg_pred[0] + seg_pred = list(seg_pred) + return seg_pred + + def aug_test(self, imgs, img_metas, **kwargs): + raise NotImplementedError('This method is not implemented.') + + +class TensorRTSegmentor(BaseSegmentor): + + def __init__(self, trt_file: str, cfg: Any, device_id: int): + super(TensorRTSegmentor, self).__init__() + from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin + try: + load_tensorrt_plugin() + except (ImportError, ModuleNotFoundError): + warnings.warn('If input model has custom op from mmcv, \ + you may have to build mmcv with TensorRT from source.') + model = TRTWraper( + trt_file, input_names=['input'], output_names=['output']) + + self.model = model + self.device_id = device_id + self.cfg = cfg + self.test_mode = cfg.model.test_cfg.mode + + def extract_feat(self, imgs): + raise NotImplementedError('This method is not implemented.') + + def encode_decode(self, img, img_metas): + raise NotImplementedError('This method is not implemented.') + + def forward_train(self, imgs, img_metas, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def simple_test(self, img: torch.Tensor, img_meta: Iterable, + **kwargs) -> list: + with torch.cuda.device(self.device_id), torch.no_grad(): + seg_pred = self.model({'input': img})['output'] + seg_pred = seg_pred.detach().cpu().numpy() + # whole might support dynamic reshape + ori_shape = img_meta[0]['ori_shape'] + if not (ori_shape[0] == seg_pred.shape[-2] + and ori_shape[1] == seg_pred.shape[-1]): + seg_pred = torch.from_numpy(seg_pred).float() + seg_pred = resize( + seg_pred, size=tuple(ori_shape[:2]), mode='nearest') + seg_pred = seg_pred.long().detach().cpu().numpy() + seg_pred = seg_pred[0] + seg_pred = list(seg_pred) + return seg_pred + + def aug_test(self, imgs, img_metas, **kwargs): + raise NotImplementedError('This method is not implemented.') + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description='mmseg backend test (and eval)') + parser.add_argument('config', help='test config file path') + parser.add_argument('model', help='Input model file') + parser.add_argument( + '--backend', + help='Backend of the model.', + choices=['onnxruntime', 'tensorrt']) + parser.add_argument('--out', help='output result file in pickle format') + parser.add_argument( + '--format-only', + action='store_true', + help='Format the output results without perform evaluation. It is' + 'useful when you want to format the result to a specific format and ' + 'submit it to the test server') + parser.add_argument( + '--eval', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g., "mIoU"' + ' for generic datasets, and "cityscapes" for Cityscapes') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--show-dir', help='directory where painted images will be saved') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help="--options is deprecated in favor of --cfg_options' and it will " + 'not be supported in version v0.22.0. Override some settings in the ' + 'used config, the key-value pair in xxx=yyy format will be merged ' + 'into config file. If the value to be overwritten is a list, it ' + 'should be like key="[a,b]" or key=a,b It also allows nested ' + 'list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation ' + 'marks are necessary and that no white space is allowed.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--eval-options', + nargs='+', + action=DictAction, + help='custom options for evaluation') + parser.add_argument( + '--opacity', + type=float, + default=0.5, + help='Opacity of painted segmentation map. In (0, 1] range.') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both ' + 'specified, --options is deprecated in favor of --cfg-options. ' + '--options will not be supported in version v0.22.0.') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options. ' + '--options will not be supported in version v0.22.0.') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + + assert args.out or args.eval or args.format_only or args.show \ + or args.show_dir, \ + ('Please specify at least one operation (save/eval/format/show the ' + 'results / save the results) with the argument "--out", "--eval"' + ', "--format-only", "--show" or "--show-dir"') + + if args.eval and args.format_only: + raise ValueError('--eval and --format_only cannot be both specified') + + if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): + raise ValueError('The output file must be a pkl file.') + + cfg = mmcv.Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + cfg.model.pretrained = None + cfg.data.test.test_mode = True + + # init distributed env first, since logger depends on the dist info. + distributed = False + + # build the dataloader + # TODO: support multiple images per gpu (only minor changes are needed) + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + samples_per_gpu=1, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False) + + # load onnx config and meta + cfg.model.train_cfg = None + + if args.backend == 'onnxruntime': + model = ONNXRuntimeSegmentor(args.model, cfg=cfg, device_id=0) + elif args.backend == 'tensorrt': + model = TensorRTSegmentor(args.model, cfg=cfg, device_id=0) + + model.CLASSES = dataset.CLASSES + model.PALETTE = dataset.PALETTE + + # clean gpu memory when starting a new evaluation. + torch.cuda.empty_cache() + eval_kwargs = {} if args.eval_options is None else args.eval_options + + # Deprecated + efficient_test = eval_kwargs.get('efficient_test', False) + if efficient_test: + warnings.warn( + '``efficient_test=True`` does not have effect in tools/test.py, ' + 'the evaluation and format results are CPU memory efficient by ' + 'default') + + eval_on_format_results = ( + args.eval is not None and 'cityscapes' in args.eval) + if eval_on_format_results: + assert len(args.eval) == 1, 'eval on format results is not ' \ + 'applicable for metrics other than ' \ + 'cityscapes' + if args.format_only or eval_on_format_results: + if 'imgfile_prefix' in eval_kwargs: + tmpdir = eval_kwargs['imgfile_prefix'] + else: + tmpdir = '.format_cityscapes' + eval_kwargs.setdefault('imgfile_prefix', tmpdir) + mmcv.mkdir_or_exist(tmpdir) + else: + tmpdir = None + + model = MMDataParallel(model, device_ids=[0]) + results = single_gpu_test( + model, + data_loader, + args.show, + args.show_dir, + False, + args.opacity, + pre_eval=args.eval is not None and not eval_on_format_results, + format_only=args.format_only or eval_on_format_results, + format_args=eval_kwargs) + + rank, _ = get_dist_info() + if rank == 0: + if args.out: + warnings.warn( + 'The behavior of ``args.out`` has been changed since MMSeg ' + 'v0.16, the pickled outputs could be seg map as type of ' + 'np.array, pre-eval results or file paths for ' + '``dataset.format_results()``.') + print(f'\nwriting results to {args.out}') + mmcv.dump(results, args.out) + if args.eval: + dataset.evaluate(results, args.eval, **eval_kwargs) + if tmpdir is not None and eval_on_format_results: + # remove tmp dir when cityscapes evaluation + shutil.rmtree(tmpdir) + + +if __name__ == '__main__': + main() + + # Following strings of text style are from colorama package + bright_style, reset_style = '\x1b[1m', '\x1b[0m' + red_text, blue_text = '\x1b[31m', '\x1b[34m' + white_background = '\x1b[107m' + + msg = white_background + bright_style + red_text + msg += 'DeprecationWarning: This tool will be deprecated in future. ' + msg += blue_text + 'Welcome to use the unified model deployment toolbox ' + msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' + msg += reset_style + warnings.warn(msg) diff --git a/downstream/mmsegmentation/tools/dist_test.sh b/downstream/mmsegmentation/tools/dist_test.sh new file mode 100755 index 0000000..89711fd --- /dev/null +++ b/downstream/mmsegmentation/tools/dist_test.sh @@ -0,0 +1,20 @@ +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/test.py \ + $CONFIG \ + $CHECKPOINT \ + --launcher pytorch \ + ${@:4} diff --git a/downstream/mmsegmentation/tools/dist_train.sh b/downstream/mmsegmentation/tools/dist_train.sh new file mode 100755 index 0000000..b76fbbb --- /dev/null +++ b/downstream/mmsegmentation/tools/dist_train.sh @@ -0,0 +1,18 @@ +CONFIG=$1 +GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/train.py \ + $CONFIG \ + --seed 0 \ + --launcher pytorch ${@:3} diff --git a/downstream/mmsegmentation/tools/get_flops.py b/downstream/mmsegmentation/tools/get_flops.py new file mode 100644 index 0000000..d193a9e --- /dev/null +++ b/downstream/mmsegmentation/tools/get_flops.py @@ -0,0 +1,131 @@ +import argparse + +from mmcv import Config +from mmcv.cnn import get_model_complexity_info +from mmcv.cnn.utils.flops_counter import flops_to_string, params_to_string + +from mmseg.models import build_segmentor +import torch + +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import torch +from mmcv import Config +from mmcv.cnn.utils import get_model_complexity_info +from mmseg.models.builder import build_segmentor +import numpy as np + +from mmcls.models import build_classifier + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a segmentor') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[2048, 1024], + help='input image size') + args = parser.parse_args() + return args + +def sra_flops(h, w, r, dim, num_heads): + dim_h = dim / num_heads + n1 = h * w + n2 = h / r * w / r + + f1 = n1 * dim_h * n2 * num_heads + f2 = n1 * n2 * dim_h * num_heads + + return f1 + f2 + + +def get_tr_flops(net, input_shape): + flops, params = get_model_complexity_info(net, input_shape, as_strings=False) + _, H, W = input_shape + net = net.backbone + try: + stage1 = sra_flops(H // 4, W // 4, + net.block1[0].attn.sr_ratio, + net.block1[0].attn.dim, + net.block1[0].attn.num_heads) * len(net.block1) + stage2 = sra_flops(H // 8, W // 8, + net.block2[0].attn.sr_ratio, + net.block2[0].attn.dim, + net.block2[0].attn.num_heads) * len(net.block2) + stage3 = sra_flops(H // 16, W // 16, + net.block3[0].attn.sr_ratio, + net.block3[0].attn.dim, + net.block3[0].attn.num_heads) * len(net.block3) + stage4 = sra_flops(H // 32, W // 32, + net.block4[0].attn.sr_ratio, + net.block4[0].attn.dim, + net.block4[0].attn.num_heads) * len(net.block4) + except: + stage1 = sra_flops(H // 4, W // 4, + net.block1[0].attn.squeeze_ratio, + 64, + net.block1[0].attn.num_heads) * len(net.block1) + stage2 = sra_flops(H // 8, W // 8, + net.block2[0].attn.squeeze_ratio, + 128, + net.block2[0].attn.num_heads) * len(net.block2) + stage3 = sra_flops(H // 16, W // 16, + net.block3[0].attn.squeeze_ratio, + 320, + net.block3[0].attn.num_heads) * len(net.block3) + stage4 = sra_flops(H // 32, W // 32, + net.block4[0].attn.squeeze_ratio, + 512, + net.block4[0].attn.num_heads) * len(net.block4) + + print(stage1 + stage2 + stage3 + stage4) + flops += stage1 + stage2 + stage3 + stage4 + return flops_to_string(flops), params_to_string(params) + +def main(): + + args = parse_args() + + if len(args.shape) == 1: + input_shape = (3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = (3, ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + cfg = Config.fromfile(args.config) + cfg.model.pretrained = None + model = build_segmentor( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')).cuda() + model.eval() + + if hasattr(model, 'forward_dummy'): + model.forward = model.forward_dummy + else: + raise NotImplementedError( + 'FLOPs counter is currently not currently supported with {}'. + format(model.__class__.__name__)) + + # from IPython import embed; embed() + # if hasattr(model.backbone, 'block1'): + # print('#### get transformer flops ####') + # with torch.no_grad(): + # flops, params = get_tr_flops(model, input_shape) + # else: + # print('#### get CNN flops ####') + # flops, params = get_model_complexity_info(model, input_shape) + flops, params = get_tr_flops(model, input_shape) + + split_line = '=' * 30 + print('{0}\nInput shape: {1}\nFlops: {2}\nParams: {3}\n{0}'.format( + split_line, input_shape, flops, params)) + print('!!!Please be cautious if you use the results in papers. ' + 'You may need to check if all ops are supported and verify that the ' + 'flops computation is correct.') + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/downstream/mmsegmentation/tools/model_converters/beit2mmseg.py b/downstream/mmsegmentation/tools/model_converters/beit2mmseg.py new file mode 100644 index 0000000..d23cfdb --- /dev/null +++ b/downstream/mmsegmentation/tools/model_converters/beit2mmseg.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmcv +import torch +from mmcv.runner import CheckpointLoader + + +def convert_beit(ckpt): + new_ckpt = OrderedDict() + + for k, v in ckpt.items(): + if k.startswith('patch_embed'): + new_key = k.replace('patch_embed.proj', 'patch_embed.projection') + new_ckpt[new_key] = v + if k.startswith('blocks'): + new_key = k.replace('blocks', 'layers') + if 'norm' in new_key: + new_key = new_key.replace('norm', 'ln') + elif 'mlp.fc1' in new_key: + new_key = new_key.replace('mlp.fc1', 'ffn.layers.0.0') + elif 'mlp.fc2' in new_key: + new_key = new_key.replace('mlp.fc2', 'ffn.layers.1') + new_ckpt[new_key] = v + else: + new_key = k + new_ckpt[new_key] = v + + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in official pretrained beit models to' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + weight = convert_beit(state_dict) + mmcv.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/model_converters/mit2mmseg.py b/downstream/mmsegmentation/tools/model_converters/mit2mmseg.py new file mode 100644 index 0000000..2eff1f7 --- /dev/null +++ b/downstream/mmsegmentation/tools/model_converters/mit2mmseg.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmcv +import torch +from mmcv.runner import CheckpointLoader + + +def convert_mit(ckpt): + new_ckpt = OrderedDict() + # Process the concat between q linear weights and kv linear weights + for k, v in ckpt.items(): + if k.startswith('head'): + continue + # patch embedding conversion + elif k.startswith('patch_embed'): + stage_i = int(k.split('.')[0].replace('patch_embed', '')) + new_k = k.replace(f'patch_embed{stage_i}', f'layers.{stage_i-1}.0') + new_v = v + if 'proj.' in new_k: + new_k = new_k.replace('proj.', 'projection.') + # transformer encoder layer conversion + elif k.startswith('block'): + stage_i = int(k.split('.')[0].replace('block', '')) + new_k = k.replace(f'block{stage_i}', f'layers.{stage_i-1}.1') + new_v = v + if 'attn.q.' in new_k: + sub_item_k = k.replace('q.', 'kv.') + new_k = new_k.replace('q.', 'attn.in_proj_') + new_v = torch.cat([v, ckpt[sub_item_k]], dim=0) + elif 'attn.kv.' in new_k: + continue + elif 'attn.proj.' in new_k: + new_k = new_k.replace('proj.', 'attn.out_proj.') + elif 'attn.sr.' in new_k: + new_k = new_k.replace('sr.', 'sr.') + elif 'mlp.' in new_k: + string = f'{new_k}-' + new_k = new_k.replace('mlp.', 'ffn.layers.') + if 'fc1.weight' in new_k or 'fc2.weight' in new_k: + new_v = v.reshape((*v.shape, 1, 1)) + new_k = new_k.replace('fc1.', '0.') + new_k = new_k.replace('dwconv.dwconv.', '1.') + new_k = new_k.replace('fc2.', '4.') + string += f'{new_k} {v.shape}-{new_v.shape}' + # norm layer conversion + elif k.startswith('norm'): + stage_i = int(k.split('.')[0].replace('norm', '')) + new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i-1}.2') + new_v = v + else: + new_k = k + new_v = v + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in official pretrained segformer to ' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + weight = convert_mit(state_dict) + mmcv.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/model_converters/stdc2mmseg.py b/downstream/mmsegmentation/tools/model_converters/stdc2mmseg.py new file mode 100644 index 0000000..9241f86 --- /dev/null +++ b/downstream/mmsegmentation/tools/model_converters/stdc2mmseg.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +import mmcv +import torch +from mmcv.runner import CheckpointLoader + + +def convert_stdc(ckpt, stdc_type): + new_state_dict = {} + if stdc_type == 'STDC1': + stage_lst = ['0', '1', '2.0', '2.1', '3.0', '3.1', '4.0', '4.1'] + else: + stage_lst = [ + '0', '1', '2.0', '2.1', '2.2', '2.3', '3.0', '3.1', '3.2', '3.3', + '3.4', '4.0', '4.1', '4.2' + ] + for k, v in ckpt.items(): + ori_k = k + flag = False + if 'cp.' in k: + k = k.replace('cp.', '') + if 'features.' in k: + num_layer = int(k.split('.')[1]) + feature_key_lst = 'features.' + str(num_layer) + '.' + stages_key_lst = 'stages.' + stage_lst[num_layer] + '.' + k = k.replace(feature_key_lst, stages_key_lst) + flag = True + if 'conv_list' in k: + k = k.replace('conv_list', 'layers') + flag = True + if 'avd_layer.' in k: + if 'avd_layer.0' in k: + k = k.replace('avd_layer.0', 'downsample.conv') + elif 'avd_layer.1' in k: + k = k.replace('avd_layer.1', 'downsample.bn') + flag = True + if flag: + new_state_dict[k] = ckpt[ori_k] + + return new_state_dict + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in official pretrained STDC1/2 to ' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + parser.add_argument('type', help='model type: STDC1 or STDC2') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + + assert args.type in ['STDC1', + 'STDC2'], 'STD type should be STDC1 or STDC2!' + weight = convert_stdc(state_dict, args.type) + mmcv.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/model_converters/swin2mmseg.py b/downstream/mmsegmentation/tools/model_converters/swin2mmseg.py new file mode 100644 index 0000000..03b24ce --- /dev/null +++ b/downstream/mmsegmentation/tools/model_converters/swin2mmseg.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmcv +import torch +from mmcv.runner import CheckpointLoader + + +def convert_swin(ckpt): + new_ckpt = OrderedDict() + + def correct_unfold_reduction_order(x): + out_channel, in_channel = x.shape + x = x.reshape(out_channel, 4, in_channel // 4) + x = x[:, [0, 2, 1, 3], :].transpose(1, + 2).reshape(out_channel, in_channel) + return x + + def correct_unfold_norm_order(x): + in_channel = x.shape[0] + x = x.reshape(4, in_channel // 4) + x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) + return x + + for k, v in ckpt.items(): + if k.startswith('head'): + continue + elif k.startswith('layers'): + new_v = v + if 'attn.' in k: + new_k = k.replace('attn.', 'attn.w_msa.') + elif 'mlp.' in k: + if 'mlp.fc1.' in k: + new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.') + elif 'mlp.fc2.' in k: + new_k = k.replace('mlp.fc2.', 'ffn.layers.1.') + else: + new_k = k.replace('mlp.', 'ffn.') + elif 'downsample' in k: + new_k = k + if 'reduction.' in k: + new_v = correct_unfold_reduction_order(v) + elif 'norm.' in k: + new_v = correct_unfold_norm_order(v) + else: + new_k = k + new_k = new_k.replace('layers', 'stages', 1) + elif k.startswith('patch_embed'): + new_v = v + if 'proj' in k: + new_k = k.replace('proj', 'projection') + else: + new_k = k + else: + new_v = v + new_k = k + + new_ckpt[new_k] = new_v + + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in official pretrained swin models to' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + weight = convert_swin(state_dict) + mmcv.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/model_converters/twins2mmseg.py b/downstream/mmsegmentation/tools/model_converters/twins2mmseg.py new file mode 100644 index 0000000..ab64aa5 --- /dev/null +++ b/downstream/mmsegmentation/tools/model_converters/twins2mmseg.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmcv +import torch +from mmcv.runner import CheckpointLoader + + +def convert_twins(args, ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('head'): + continue + elif k.startswith('patch_embeds'): + if 'proj.' in k: + new_k = k.replace('proj.', 'projection.') + else: + new_k = k + elif k.startswith('blocks'): + # Union + if 'attn.q.' in k: + new_k = k.replace('q.', 'attn.in_proj_') + new_v = torch.cat([v, ckpt[k.replace('attn.q.', 'attn.kv.')]], + dim=0) + elif 'mlp.fc1' in k: + new_k = k.replace('mlp.fc1', 'ffn.layers.0.0') + elif 'mlp.fc2' in k: + new_k = k.replace('mlp.fc2', 'ffn.layers.1') + # Only pcpvt + elif args.model == 'pcpvt': + if 'attn.proj.' in k: + new_k = k.replace('proj.', 'attn.out_proj.') + else: + new_k = k + + # Only svt + else: + if 'attn.proj.' in k: + k_lst = k.split('.') + if int(k_lst[2]) % 2 == 1: + new_k = k.replace('proj.', 'attn.out_proj.') + else: + new_k = k + else: + new_k = k + new_k = new_k.replace('blocks.', 'layers.') + elif k.startswith('pos_block'): + new_k = k.replace('pos_block', 'position_encodings') + if 'proj.0.' in new_k: + new_k = new_k.replace('proj.0.', 'proj.') + else: + new_k = k + if 'attn.kv.' not in k: + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in timm pretrained vit models to ' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + parser.add_argument('model', help='model: pcpvt or svt') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'state_dict' in checkpoint: + # timm checkpoint + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + weight = convert_twins(args, state_dict) + mmcv.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/model_converters/vit2mmseg.py b/downstream/mmsegmentation/tools/model_converters/vit2mmseg.py new file mode 100644 index 0000000..bc18ebe --- /dev/null +++ b/downstream/mmsegmentation/tools/model_converters/vit2mmseg.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmcv +import torch +from mmcv.runner import CheckpointLoader + + +def convert_vit(ckpt): + + new_ckpt = OrderedDict() + + for k, v in ckpt.items(): + if k.startswith('head'): + continue + if k.startswith('norm'): + new_k = k.replace('norm.', 'ln1.') + elif k.startswith('patch_embed'): + if 'proj' in k: + new_k = k.replace('proj', 'projection') + else: + new_k = k + elif k.startswith('blocks'): + if 'norm' in k: + new_k = k.replace('norm', 'ln') + elif 'mlp.fc1' in k: + new_k = k.replace('mlp.fc1', 'ffn.layers.0.0') + elif 'mlp.fc2' in k: + new_k = k.replace('mlp.fc2', 'ffn.layers.1') + elif 'attn.qkv' in k: + new_k = k.replace('attn.qkv.', 'attn.attn.in_proj_') + elif 'attn.proj' in k: + new_k = k.replace('attn.proj', 'attn.attn.out_proj') + else: + new_k = k + new_k = new_k.replace('blocks.', 'layers.') + else: + new_k = k + new_ckpt[new_k] = v + + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in timm pretrained vit models to ' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + if 'state_dict' in checkpoint: + # timm checkpoint + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + # deit checkpoint + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + weight = convert_vit(state_dict) + mmcv.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/model_converters/vitjax2mmseg.py b/downstream/mmsegmentation/tools/model_converters/vitjax2mmseg.py new file mode 100644 index 0000000..585f408 --- /dev/null +++ b/downstream/mmsegmentation/tools/model_converters/vitjax2mmseg.py @@ -0,0 +1,123 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +import mmcv +import numpy as np +import torch + + +def vit_jax_to_torch(jax_weights, num_layer=12): + torch_weights = dict() + + # patch embedding + conv_filters = jax_weights['embedding/kernel'] + conv_filters = conv_filters.permute(3, 2, 0, 1) + torch_weights['patch_embed.projection.weight'] = conv_filters + torch_weights['patch_embed.projection.bias'] = jax_weights[ + 'embedding/bias'] + + # pos embedding + torch_weights['pos_embed'] = jax_weights[ + 'Transformer/posembed_input/pos_embedding'] + + # cls token + torch_weights['cls_token'] = jax_weights['cls'] + + # head + torch_weights['ln1.weight'] = jax_weights['Transformer/encoder_norm/scale'] + torch_weights['ln1.bias'] = jax_weights['Transformer/encoder_norm/bias'] + + # transformer blocks + for i in range(num_layer): + jax_block = f'Transformer/encoderblock_{i}' + torch_block = f'layers.{i}' + + # attention norm + torch_weights[f'{torch_block}.ln1.weight'] = jax_weights[ + f'{jax_block}/LayerNorm_0/scale'] + torch_weights[f'{torch_block}.ln1.bias'] = jax_weights[ + f'{jax_block}/LayerNorm_0/bias'] + + # attention + query_weight = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/query/kernel'] + query_bias = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/query/bias'] + key_weight = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/key/kernel'] + key_bias = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/key/bias'] + value_weight = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/value/kernel'] + value_bias = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/value/bias'] + + qkv_weight = torch.from_numpy( + np.stack((query_weight, key_weight, value_weight), 1)) + qkv_weight = torch.flatten(qkv_weight, start_dim=1) + qkv_bias = torch.from_numpy( + np.stack((query_bias, key_bias, value_bias), 0)) + qkv_bias = torch.flatten(qkv_bias, start_dim=0) + + torch_weights[f'{torch_block}.attn.attn.in_proj_weight'] = qkv_weight + torch_weights[f'{torch_block}.attn.attn.in_proj_bias'] = qkv_bias + to_out_weight = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/out/kernel'] + to_out_weight = torch.flatten(to_out_weight, start_dim=0, end_dim=1) + torch_weights[ + f'{torch_block}.attn.attn.out_proj.weight'] = to_out_weight + torch_weights[f'{torch_block}.attn.attn.out_proj.bias'] = jax_weights[ + f'{jax_block}/MultiHeadDotProductAttention_1/out/bias'] + + # mlp norm + torch_weights[f'{torch_block}.ln2.weight'] = jax_weights[ + f'{jax_block}/LayerNorm_2/scale'] + torch_weights[f'{torch_block}.ln2.bias'] = jax_weights[ + f'{jax_block}/LayerNorm_2/bias'] + + # mlp + torch_weights[f'{torch_block}.ffn.layers.0.0.weight'] = jax_weights[ + f'{jax_block}/MlpBlock_3/Dense_0/kernel'] + torch_weights[f'{torch_block}.ffn.layers.0.0.bias'] = jax_weights[ + f'{jax_block}/MlpBlock_3/Dense_0/bias'] + torch_weights[f'{torch_block}.ffn.layers.1.weight'] = jax_weights[ + f'{jax_block}/MlpBlock_3/Dense_1/kernel'] + torch_weights[f'{torch_block}.ffn.layers.1.bias'] = jax_weights[ + f'{jax_block}/MlpBlock_3/Dense_1/bias'] + + # transpose weights + for k, v in torch_weights.items(): + if 'weight' in k and 'patch_embed' not in k and 'ln' not in k: + v = v.permute(1, 0) + torch_weights[k] = v + + return torch_weights + + +def main(): + # stole refactoring code from Robin Strudel, thanks + parser = argparse.ArgumentParser( + description='Convert keys from jax official pretrained vit models to ' + 'MMSegmentation style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + jax_weights = np.load(args.src) + jax_weights_tensor = {} + for key in jax_weights.files: + value = torch.from_numpy(jax_weights[key]) + jax_weights_tensor[key] = value + if 'L_16-i21k' in args.src: + num_layer = 24 + else: + num_layer = 12 + torch_weights = vit_jax_to_torch(jax_weights_tensor, num_layer) + mmcv.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(torch_weights, args.dst) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/onnx2tensorrt.py b/downstream/mmsegmentation/tools/onnx2tensorrt.py new file mode 100644 index 0000000..0f60dce --- /dev/null +++ b/downstream/mmsegmentation/tools/onnx2tensorrt.py @@ -0,0 +1,289 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import warnings +from typing import Iterable, Optional, Union + +import matplotlib.pyplot as plt +import mmcv +import numpy as np +import onnxruntime as ort +import torch +from mmcv.ops import get_onnxruntime_op_path +from mmcv.tensorrt import (TRTWraper, is_tensorrt_plugin_loaded, onnx2trt, + save_trt_engine) + +from mmseg.apis.inference import LoadImage +from mmseg.datasets import DATASETS +from mmseg.datasets.pipelines import Compose + + +def get_GiB(x: int): + """return x GiB.""" + return x * (1 << 30) + + +def _prepare_input_img(img_path: str, + test_pipeline: Iterable[dict], + shape: Optional[Iterable] = None, + rescale_shape: Optional[Iterable] = None) -> dict: + # build the data pipeline + if shape is not None: + test_pipeline[1]['img_scale'] = (shape[1], shape[0]) + test_pipeline[1]['transforms'][0]['keep_ratio'] = False + test_pipeline = [LoadImage()] + test_pipeline[1:] + test_pipeline = Compose(test_pipeline) + # prepare data + data = dict(img=img_path) + data = test_pipeline(data) + imgs = data['img'] + img_metas = [i.data for i in data['img_metas']] + + if rescale_shape is not None: + for img_meta in img_metas: + img_meta['ori_shape'] = tuple(rescale_shape) + (3, ) + + mm_inputs = {'imgs': imgs, 'img_metas': img_metas} + + return mm_inputs + + +def _update_input_img(img_list: Iterable, img_meta_list: Iterable): + # update img and its meta list + N = img_list[0].size(0) + img_meta = img_meta_list[0][0] + img_shape = img_meta['img_shape'] + ori_shape = img_meta['ori_shape'] + pad_shape = img_meta['pad_shape'] + new_img_meta_list = [[{ + 'img_shape': + img_shape, + 'ori_shape': + ori_shape, + 'pad_shape': + pad_shape, + 'filename': + img_meta['filename'], + 'scale_factor': + (img_shape[1] / ori_shape[1], img_shape[0] / ori_shape[0]) * 2, + 'flip': + False, + } for _ in range(N)]] + + return img_list, new_img_meta_list + + +def show_result_pyplot(img: Union[str, np.ndarray], + result: np.ndarray, + palette: Optional[Iterable] = None, + fig_size: Iterable[int] = (15, 10), + opacity: float = 0.5, + title: str = '', + block: bool = True): + img = mmcv.imread(img) + img = img.copy() + seg = result[0] + seg = mmcv.imresize(seg, img.shape[:2][::-1]) + palette = np.array(palette) + assert palette.shape[1] == 3 + assert len(palette.shape) == 2 + assert 0 < opacity <= 1.0 + color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) + for label, color in enumerate(palette): + color_seg[seg == label, :] = color + # convert to BGR + color_seg = color_seg[..., ::-1] + + img = img * (1 - opacity) + color_seg * opacity + img = img.astype(np.uint8) + + plt.figure(figsize=fig_size) + plt.imshow(mmcv.bgr2rgb(img)) + plt.title(title) + plt.tight_layout() + plt.show(block=block) + + +def onnx2tensorrt(onnx_file: str, + trt_file: str, + config: dict, + input_config: dict, + fp16: bool = False, + verify: bool = False, + show: bool = False, + dataset: str = 'CityscapesDataset', + workspace_size: int = 1, + verbose: bool = False): + import tensorrt as trt + min_shape = input_config['min_shape'] + max_shape = input_config['max_shape'] + # create trt engine and wrapper + opt_shape_dict = {'input': [min_shape, min_shape, max_shape]} + max_workspace_size = get_GiB(workspace_size) + trt_engine = onnx2trt( + onnx_file, + opt_shape_dict, + log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR, + fp16_mode=fp16, + max_workspace_size=max_workspace_size) + save_dir, _ = osp.split(trt_file) + if save_dir: + os.makedirs(save_dir, exist_ok=True) + save_trt_engine(trt_engine, trt_file) + print(f'Successfully created TensorRT engine: {trt_file}') + + if verify: + inputs = _prepare_input_img( + input_config['input_path'], + config.data.test.pipeline, + shape=min_shape[2:]) + + imgs = inputs['imgs'] + img_metas = inputs['img_metas'] + img_list = [img[None, :] for img in imgs] + img_meta_list = [[img_meta] for img_meta in img_metas] + # update img_meta + img_list, img_meta_list = _update_input_img(img_list, img_meta_list) + + if max_shape[0] > 1: + # concate flip image for batch test + flip_img_list = [_.flip(-1) for _ in img_list] + img_list = [ + torch.cat((ori_img, flip_img), 0) + for ori_img, flip_img in zip(img_list, flip_img_list) + ] + + # Get results from ONNXRuntime + ort_custom_op_path = get_onnxruntime_op_path() + session_options = ort.SessionOptions() + if osp.exists(ort_custom_op_path): + session_options.register_custom_ops_library(ort_custom_op_path) + sess = ort.InferenceSession(onnx_file, session_options) + sess.set_providers(['CPUExecutionProvider'], [{}]) # use cpu mode + onnx_output = sess.run(['output'], + {'input': img_list[0].detach().numpy()})[0][0] + + # Get results from TensorRT + trt_model = TRTWraper(trt_file, ['input'], ['output']) + with torch.no_grad(): + trt_outputs = trt_model({'input': img_list[0].contiguous().cuda()}) + trt_output = trt_outputs['output'][0].cpu().detach().numpy() + + if show: + dataset = DATASETS.get(dataset) + assert dataset is not None + palette = dataset.PALETTE + + show_result_pyplot( + input_config['input_path'], + (onnx_output[0].astype(np.uint8), ), + palette=palette, + title='ONNXRuntime', + block=False) + show_result_pyplot( + input_config['input_path'], (trt_output[0].astype(np.uint8), ), + palette=palette, + title='TensorRT') + + np.testing.assert_allclose( + onnx_output, trt_output, rtol=1e-03, atol=1e-05) + print('TensorRT and ONNXRuntime output all close.') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert MMSegmentation models from ONNX to TensorRT') + parser.add_argument('config', help='Config file of the model') + parser.add_argument('model', help='Path to the input ONNX model') + parser.add_argument( + '--trt-file', type=str, help='Path to the output TensorRT engine') + parser.add_argument( + '--max-shape', + type=int, + nargs=4, + default=[1, 3, 400, 600], + help='Maximum shape of model input.') + parser.add_argument( + '--min-shape', + type=int, + nargs=4, + default=[1, 3, 400, 600], + help='Minimum shape of model input.') + parser.add_argument('--fp16', action='store_true', help='Enable fp16 mode') + parser.add_argument( + '--workspace-size', + type=int, + default=1, + help='Max workspace size in GiB') + parser.add_argument( + '--input-img', type=str, default='', help='Image for test') + parser.add_argument( + '--show', action='store_true', help='Whether to show output results') + parser.add_argument( + '--dataset', + type=str, + default='CityscapesDataset', + help='Dataset name') + parser.add_argument( + '--verify', + action='store_true', + help='Verify the outputs of ONNXRuntime and TensorRT') + parser.add_argument( + '--verbose', + action='store_true', + help='Whether to verbose logging messages while creating \ + TensorRT engine.') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + + assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.' + args = parse_args() + + if not args.input_img: + args.input_img = osp.join(osp.dirname(__file__), '../demo/demo.png') + + # check arguments + assert osp.exists(args.config), 'Config {} not found.'.format(args.config) + assert osp.exists(args.model), \ + 'ONNX model {} not found.'.format(args.model) + assert args.workspace_size >= 0, 'Workspace size less than 0.' + assert DATASETS.get(args.dataset) is not None, \ + 'Dataset {} does not found.'.format(args.dataset) + for max_value, min_value in zip(args.max_shape, args.min_shape): + assert max_value >= min_value, \ + 'max_shape should be larger than min shape' + + input_config = { + 'min_shape': args.min_shape, + 'max_shape': args.max_shape, + 'input_path': args.input_img + } + + cfg = mmcv.Config.fromfile(args.config) + onnx2tensorrt( + args.model, + args.trt_file, + cfg, + input_config, + fp16=args.fp16, + verify=args.verify, + show=args.show, + dataset=args.dataset, + workspace_size=args.workspace_size, + verbose=args.verbose) + + # Following strings of text style are from colorama package + bright_style, reset_style = '\x1b[1m', '\x1b[0m' + red_text, blue_text = '\x1b[31m', '\x1b[34m' + white_background = '\x1b[107m' + + msg = white_background + bright_style + red_text + msg += 'DeprecationWarning: This tool will be deprecated in future. ' + msg += blue_text + 'Welcome to use the unified model deployment toolbox ' + msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' + msg += reset_style + warnings.warn(msg) diff --git a/downstream/mmsegmentation/tools/print_config.py b/downstream/mmsegmentation/tools/print_config.py new file mode 100644 index 0000000..3f9c08d --- /dev/null +++ b/downstream/mmsegmentation/tools/print_config.py @@ -0,0 +1,69 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import warnings + +from mmcv import Config, DictAction + +from mmseg.apis import init_segmentor + + +def parse_args(): + parser = argparse.ArgumentParser(description='Print the whole config') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--graph', action='store_true', help='print the models graph') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help="--options is deprecated in favor of --cfg_options' and it will " + 'not be supported in version v0.22.0. Override some settings in the ' + 'used config, the key-value pair in xxx=yyy format will be merged ' + 'into config file. If the value to be overwritten is a list, it ' + 'should be like key="[a,b]" or key=a,b It also allows nested ' + 'list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation ' + 'marks are necessary and that no white space is allowed.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both ' + 'specified, --options is deprecated in favor of --cfg-options. ' + '--options will not be supported in version v0.22.0.') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options, ' + '--options will not be supported in version v0.22.0.') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + print(f'Config:\n{cfg.pretty_text}') + # dump config + cfg.dump('example.py') + # dump models graph + if args.graph: + model = init_segmentor(args.config, device='cpu') + print(f'Model graph:\n{str(model)}') + with open('example-graph.txt', 'w') as f: + f.writelines(str(model)) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/publish_model.py b/downstream/mmsegmentation/tools/publish_model.py new file mode 100644 index 0000000..e266057 --- /dev/null +++ b/downstream/mmsegmentation/tools/publish_model.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import subprocess + +import torch + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + parser.add_argument('out_file', help='output checkpoint filename') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + torch.save(checkpoint, out_file) + sha = subprocess.check_output(['sha256sum', out_file]).decode() + final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) + subprocess.Popen(['mv', out_file, final_file]) + + +def main(): + args = parse_args() + process_checkpoint(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/pytorch2onnx.py b/downstream/mmsegmentation/tools/pytorch2onnx.py new file mode 100644 index 0000000..060d187 --- /dev/null +++ b/downstream/mmsegmentation/tools/pytorch2onnx.py @@ -0,0 +1,405 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import warnings +from functools import partial + +import mmcv +import numpy as np +import onnxruntime as rt +import torch +import torch._C +import torch.serialization +from mmcv import DictAction +from mmcv.onnx import register_extra_symbolics +from mmcv.runner import load_checkpoint +from torch import nn + +from mmseg.apis import show_result_pyplot +from mmseg.apis.inference import LoadImage +from mmseg.datasets.pipelines import Compose +from mmseg.models import build_segmentor +from mmseg.ops import resize + +torch.manual_seed(3) + + +def _convert_batchnorm(module): + module_output = module + if isinstance(module, torch.nn.SyncBatchNorm): + module_output = torch.nn.BatchNorm2d(module.num_features, module.eps, + module.momentum, module.affine, + module.track_running_stats) + if module.affine: + module_output.weight.data = module.weight.data.clone().detach() + module_output.bias.data = module.bias.data.clone().detach() + # keep requires_grad unchanged + module_output.weight.requires_grad = module.weight.requires_grad + module_output.bias.requires_grad = module.bias.requires_grad + module_output.running_mean = module.running_mean + module_output.running_var = module.running_var + module_output.num_batches_tracked = module.num_batches_tracked + for name, child in module.named_children(): + module_output.add_module(name, _convert_batchnorm(child)) + del module + return module_output + + +def _demo_mm_inputs(input_shape, num_classes): + """Create a superset of inputs needed to run test or train batches. + + Args: + input_shape (tuple): + input batch dimensions + num_classes (int): + number of semantic classes + """ + (N, C, H, W) = input_shape + rng = np.random.RandomState(0) + imgs = rng.rand(*input_shape) + segs = rng.randint( + low=0, high=num_classes - 1, size=(N, 1, H, W)).astype(np.uint8) + img_metas = [{ + 'img_shape': (H, W, C), + 'ori_shape': (H, W, C), + 'pad_shape': (H, W, C), + 'filename': '.png', + 'scale_factor': 1.0, + 'flip': False, + } for _ in range(N)] + mm_inputs = { + 'imgs': torch.FloatTensor(imgs).requires_grad_(True), + 'img_metas': img_metas, + 'gt_semantic_seg': torch.LongTensor(segs) + } + return mm_inputs + + +def _prepare_input_img(img_path, + test_pipeline, + shape=None, + rescale_shape=None): + # build the data pipeline + if shape is not None: + test_pipeline[1]['img_scale'] = (shape[1], shape[0]) + test_pipeline[1]['transforms'][0]['keep_ratio'] = False + test_pipeline = [LoadImage()] + test_pipeline[1:] + test_pipeline = Compose(test_pipeline) + # prepare data + data = dict(img=img_path) + data = test_pipeline(data) + imgs = data['img'] + img_metas = [i.data for i in data['img_metas']] + + if rescale_shape is not None: + for img_meta in img_metas: + img_meta['ori_shape'] = tuple(rescale_shape) + (3, ) + + mm_inputs = {'imgs': imgs, 'img_metas': img_metas} + + return mm_inputs + + +def _update_input_img(img_list, img_meta_list, update_ori_shape=False): + # update img and its meta list + N, C, H, W = img_list[0].shape + img_meta = img_meta_list[0][0] + img_shape = (H, W, C) + if update_ori_shape: + ori_shape = img_shape + else: + ori_shape = img_meta['ori_shape'] + pad_shape = img_shape + new_img_meta_list = [[{ + 'img_shape': + img_shape, + 'ori_shape': + ori_shape, + 'pad_shape': + pad_shape, + 'filename': + img_meta['filename'], + 'scale_factor': + (img_shape[1] / ori_shape[1], img_shape[0] / ori_shape[0]) * 2, + 'flip': + False, + } for _ in range(N)]] + + return img_list, new_img_meta_list + + +def pytorch2onnx(model, + mm_inputs, + opset_version=11, + show=False, + output_file='tmp.onnx', + verify=False, + dynamic_export=False): + """Export Pytorch model to ONNX model and verify the outputs are same + between Pytorch and ONNX. + + Args: + model (nn.Module): Pytorch model we want to export. + mm_inputs (dict): Contain the input tensors and img_metas information. + opset_version (int): The onnx op version. Default: 11. + show (bool): Whether print the computation graph. Default: False. + output_file (string): The path to where we store the output ONNX model. + Default: `tmp.onnx`. + verify (bool): Whether compare the outputs between Pytorch and ONNX. + Default: False. + dynamic_export (bool): Whether to export ONNX with dynamic axis. + Default: False. + """ + model.cpu().eval() + test_mode = model.test_cfg.mode + + if isinstance(model.decode_head, nn.ModuleList): + num_classes = model.decode_head[-1].num_classes + else: + num_classes = model.decode_head.num_classes + + imgs = mm_inputs.pop('imgs') + img_metas = mm_inputs.pop('img_metas') + + img_list = [img[None, :] for img in imgs] + img_meta_list = [[img_meta] for img_meta in img_metas] + # update img_meta + img_list, img_meta_list = _update_input_img(img_list, img_meta_list) + + # replace original forward function + origin_forward = model.forward + model.forward = partial( + model.forward, + img_metas=img_meta_list, + return_loss=False, + rescale=True) + dynamic_axes = None + if dynamic_export: + if test_mode == 'slide': + dynamic_axes = {'input': {0: 'batch'}, 'output': {1: 'batch'}} + else: + dynamic_axes = { + 'input': { + 0: 'batch', + 2: 'height', + 3: 'width' + }, + 'output': { + 1: 'batch', + 2: 'height', + 3: 'width' + } + } + + register_extra_symbolics(opset_version) + with torch.no_grad(): + torch.onnx.export( + model, (img_list, ), + output_file, + input_names=['input'], + output_names=['output'], + export_params=True, + keep_initializers_as_inputs=False, + verbose=show, + opset_version=opset_version, + dynamic_axes=dynamic_axes) + print(f'Successfully exported ONNX model: {output_file}') + model.forward = origin_forward + + if verify: + # check by onnx + import onnx + onnx_model = onnx.load(output_file) + onnx.checker.check_model(onnx_model) + + if dynamic_export and test_mode == 'whole': + # scale image for dynamic shape test + img_list = [resize(_, scale_factor=1.5) for _ in img_list] + # concate flip image for batch test + flip_img_list = [_.flip(-1) for _ in img_list] + img_list = [ + torch.cat((ori_img, flip_img), 0) + for ori_img, flip_img in zip(img_list, flip_img_list) + ] + + # update img_meta + img_list, img_meta_list = _update_input_img( + img_list, img_meta_list, test_mode == 'whole') + + # check the numerical value + # get pytorch output + with torch.no_grad(): + pytorch_result = model(img_list, img_meta_list, return_loss=False) + pytorch_result = np.stack(pytorch_result, 0) + + # get onnx output + input_all = [node.name for node in onnx_model.graph.input] + input_initializer = [ + node.name for node in onnx_model.graph.initializer + ] + net_feed_input = list(set(input_all) - set(input_initializer)) + assert (len(net_feed_input) == 1) + sess = rt.InferenceSession(output_file) + onnx_result = sess.run( + None, {net_feed_input[0]: img_list[0].detach().numpy()})[0][0] + # show segmentation results + if show: + import os.path as osp + + import cv2 + img = img_meta_list[0][0]['filename'] + if not osp.exists(img): + img = imgs[0][:3, ...].permute(1, 2, 0) * 255 + img = img.detach().numpy().astype(np.uint8) + ori_shape = img.shape[:2] + else: + ori_shape = LoadImage()({'img': img})['ori_shape'] + + # resize onnx_result to ori_shape + onnx_result_ = cv2.resize(onnx_result[0].astype(np.uint8), + (ori_shape[1], ori_shape[0])) + show_result_pyplot( + model, + img, (onnx_result_, ), + palette=model.PALETTE, + block=False, + title='ONNXRuntime', + opacity=0.5) + + # resize pytorch_result to ori_shape + pytorch_result_ = cv2.resize(pytorch_result[0].astype(np.uint8), + (ori_shape[1], ori_shape[0])) + show_result_pyplot( + model, + img, (pytorch_result_, ), + title='PyTorch', + palette=model.PALETTE, + opacity=0.5) + # compare results + np.testing.assert_allclose( + pytorch_result.astype(np.float32) / num_classes, + onnx_result.astype(np.float32) / num_classes, + rtol=1e-5, + atol=1e-5, + err_msg='The outputs are different between Pytorch and ONNX') + print('The outputs are same between Pytorch and ONNX') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Convert MMSeg to ONNX') + parser.add_argument('config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file', default=None) + parser.add_argument( + '--input-img', type=str, help='Images for input', default=None) + parser.add_argument( + '--show', + action='store_true', + help='show onnx graph and segmentation results') + parser.add_argument( + '--verify', action='store_true', help='verify the onnx model') + parser.add_argument('--output-file', type=str, default='tmp.onnx') + parser.add_argument('--opset-version', type=int, default=11) + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=None, + help='input image height and width.') + parser.add_argument( + '--rescale_shape', + type=int, + nargs='+', + default=None, + help='output image rescale height and width, work for slide mode.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='Override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--dynamic-export', + action='store_true', + help='Whether to export onnx with dynamic axis.') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + cfg = mmcv.Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + cfg.model.pretrained = None + + if args.shape is None: + img_scale = cfg.test_pipeline[1]['img_scale'] + input_shape = (1, 3, img_scale[1], img_scale[0]) + elif len(args.shape) == 1: + input_shape = (1, 3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = ( + 1, + 3, + ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + test_mode = cfg.model.test_cfg.mode + + # build the model and load checkpoint + cfg.model.train_cfg = None + segmentor = build_segmentor( + cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg')) + # convert SyncBN to BN + segmentor = _convert_batchnorm(segmentor) + + if args.checkpoint: + checkpoint = load_checkpoint( + segmentor, args.checkpoint, map_location='cpu') + segmentor.CLASSES = checkpoint['meta']['CLASSES'] + segmentor.PALETTE = checkpoint['meta']['PALETTE'] + + # read input or create dummpy input + if args.input_img is not None: + preprocess_shape = (input_shape[2], input_shape[3]) + rescale_shape = None + if args.rescale_shape is not None: + rescale_shape = [args.rescale_shape[0], args.rescale_shape[1]] + mm_inputs = _prepare_input_img( + args.input_img, + cfg.data.test.pipeline, + shape=preprocess_shape, + rescale_shape=rescale_shape) + else: + if isinstance(segmentor.decode_head, nn.ModuleList): + num_classes = segmentor.decode_head[-1].num_classes + else: + num_classes = segmentor.decode_head.num_classes + mm_inputs = _demo_mm_inputs(input_shape, num_classes) + + # convert model to onnx file + pytorch2onnx( + segmentor, + mm_inputs, + opset_version=args.opset_version, + show=args.show, + output_file=args.output_file, + verify=args.verify, + dynamic_export=args.dynamic_export) + + # Following strings of text style are from colorama package + bright_style, reset_style = '\x1b[1m', '\x1b[0m' + red_text, blue_text = '\x1b[31m', '\x1b[34m' + white_background = '\x1b[107m' + + msg = white_background + bright_style + red_text + msg += 'DeprecationWarning: This tool will be deprecated in future. ' + msg += blue_text + 'Welcome to use the unified model deployment toolbox ' + msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' + msg += reset_style + warnings.warn(msg) diff --git a/downstream/mmsegmentation/tools/pytorch2torchscript.py b/downstream/mmsegmentation/tools/pytorch2torchscript.py new file mode 100644 index 0000000..d76f5ec --- /dev/null +++ b/downstream/mmsegmentation/tools/pytorch2torchscript.py @@ -0,0 +1,185 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import mmcv +import numpy as np +import torch +import torch._C +import torch.serialization +from mmcv.runner import load_checkpoint +from torch import nn + +from mmseg.models import build_segmentor + +torch.manual_seed(3) + + +def digit_version(version_str): + digit_version = [] + for x in version_str.split('.'): + if x.isdigit(): + digit_version.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + digit_version.append(int(patch_version[0]) - 1) + digit_version.append(int(patch_version[1])) + return digit_version + + +def check_torch_version(): + torch_minimum_version = '1.8.0' + torch_version = digit_version(torch.__version__) + + assert (torch_version >= digit_version(torch_minimum_version)), \ + f'Torch=={torch.__version__} is not support for converting to ' \ + f'torchscript. Please install pytorch>={torch_minimum_version}.' + + +def _convert_batchnorm(module): + module_output = module + if isinstance(module, torch.nn.SyncBatchNorm): + module_output = torch.nn.BatchNorm2d(module.num_features, module.eps, + module.momentum, module.affine, + module.track_running_stats) + if module.affine: + module_output.weight.data = module.weight.data.clone().detach() + module_output.bias.data = module.bias.data.clone().detach() + # keep requires_grad unchanged + module_output.weight.requires_grad = module.weight.requires_grad + module_output.bias.requires_grad = module.bias.requires_grad + module_output.running_mean = module.running_mean + module_output.running_var = module.running_var + module_output.num_batches_tracked = module.num_batches_tracked + for name, child in module.named_children(): + module_output.add_module(name, _convert_batchnorm(child)) + del module + return module_output + + +def _demo_mm_inputs(input_shape, num_classes): + """Create a superset of inputs needed to run test or train batches. + + Args: + input_shape (tuple): + input batch dimensions + num_classes (int): + number of semantic classes + """ + (N, C, H, W) = input_shape + rng = np.random.RandomState(0) + imgs = rng.rand(*input_shape) + segs = rng.randint( + low=0, high=num_classes - 1, size=(N, 1, H, W)).astype(np.uint8) + img_metas = [{ + 'img_shape': (H, W, C), + 'ori_shape': (H, W, C), + 'pad_shape': (H, W, C), + 'filename': '.png', + 'scale_factor': 1.0, + 'flip': False, + } for _ in range(N)] + mm_inputs = { + 'imgs': torch.FloatTensor(imgs).requires_grad_(True), + 'img_metas': img_metas, + 'gt_semantic_seg': torch.LongTensor(segs) + } + return mm_inputs + + +def pytorch2libtorch(model, + input_shape, + show=False, + output_file='tmp.pt', + verify=False): + """Export Pytorch model to TorchScript model and verify the outputs are + same between Pytorch and TorchScript. + + Args: + model (nn.Module): Pytorch model we want to export. + input_shape (tuple): Use this input shape to construct + the corresponding dummy input and execute the model. + show (bool): Whether print the computation graph. Default: False. + output_file (string): The path to where we store the + output TorchScript model. Default: `tmp.pt`. + verify (bool): Whether compare the outputs between + Pytorch and TorchScript. Default: False. + """ + if isinstance(model.decode_head, nn.ModuleList): + num_classes = model.decode_head[-1].num_classes + else: + num_classes = model.decode_head.num_classes + + mm_inputs = _demo_mm_inputs(input_shape, num_classes) + + imgs = mm_inputs.pop('imgs') + + # replace the original forword with forward_dummy + model.forward = model.forward_dummy + model.eval() + traced_model = torch.jit.trace( + model, + example_inputs=imgs, + check_trace=verify, + ) + + if show: + print(traced_model.graph) + + traced_model.save(output_file) + print('Successfully exported TorchScript model: {}'.format(output_file)) + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert MMSeg to TorchScript') + parser.add_argument('config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file', default=None) + parser.add_argument( + '--show', action='store_true', help='show TorchScript graph') + parser.add_argument( + '--verify', action='store_true', help='verify the TorchScript model') + parser.add_argument('--output-file', type=str, default='tmp.pt') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[512, 512], + help='input image size (height, width)') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + check_torch_version() + + if len(args.shape) == 1: + input_shape = (1, 3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = ( + 1, + 3, + ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + cfg = mmcv.Config.fromfile(args.config) + cfg.model.pretrained = None + + # build the model and load checkpoint + cfg.model.train_cfg = None + segmentor = build_segmentor( + cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg')) + # convert SyncBN to BN + segmentor = _convert_batchnorm(segmentor) + + if args.checkpoint: + load_checkpoint(segmentor, args.checkpoint, map_location='cpu') + + # convert the PyTorch model to LibTorch model + pytorch2libtorch( + segmentor, + input_shape, + show=args.show, + output_file=args.output_file, + verify=args.verify) diff --git a/downstream/mmsegmentation/tools/slurm_test.sh b/downstream/mmsegmentation/tools/slurm_test.sh new file mode 100755 index 0000000..4e6f7bf --- /dev/null +++ b/downstream/mmsegmentation/tools/slurm_test.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +CHECKPOINT=$4 +GPUS=${GPUS:-4} +GPUS_PER_NODE=${GPUS_PER_NODE:-4} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +PY_ARGS=${@:5} +SRUN_ARGS=${SRUN_ARGS:-""} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} diff --git a/downstream/mmsegmentation/tools/slurm_train.sh b/downstream/mmsegmentation/tools/slurm_train.sh new file mode 100755 index 0000000..ab23210 --- /dev/null +++ b/downstream/mmsegmentation/tools/slurm_train.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +GPUS=${GPUS:-4} +GPUS_PER_NODE=${GPUS_PER_NODE:-4} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py ${CONFIG} --launcher="slurm" ${PY_ARGS} diff --git a/downstream/mmsegmentation/tools/test.py b/downstream/mmsegmentation/tools/test.py new file mode 100644 index 0000000..12892ec --- /dev/null +++ b/downstream/mmsegmentation/tools/test.py @@ -0,0 +1,319 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import shutil +import time +import warnings + +import mmcv +import torch +from mmcv.cnn.utils import revert_sync_batchnorm +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, + wrap_fp16_model) +from mmcv.utils import DictAction + +from mmseg import digit_version +from mmseg.apis import multi_gpu_test, single_gpu_test +from mmseg.datasets import build_dataloader, build_dataset +from mmseg.models import build_segmentor +from mmseg.utils import setup_multi_processes + + +def parse_args(): + parser = argparse.ArgumentParser( + description='mmseg test (and eval) a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--work-dir', + help=('if specified, the evaluation metric results will be dumped' + 'into the directory as json')) + parser.add_argument( + '--aug-test', action='store_true', help='Use Flip and Multi scale aug') + parser.add_argument('--out', help='output result file in pickle format') + parser.add_argument( + '--format-only', + action='store_true', + help='Format the output results without perform evaluation. It is' + 'useful when you want to format the result to a specific format and ' + 'submit it to the test server') + parser.add_argument( + '--eval', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g., "mIoU"' + ' for generic datasets, and "cityscapes" for Cityscapes') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--show-dir', help='directory where painted images will be saved') + parser.add_argument( + '--gpu-collect', + action='store_true', + help='whether to use gpu to collect results.') + parser.add_argument( + '--gpu-id', + type=int, + default=0, + help='id of gpu to use ' + '(only applicable to non-distributed testing)') + parser.add_argument( + '--tmpdir', + help='tmp directory used for collecting results from multiple ' + 'workers, available when gpu_collect is not specified') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help="--options is deprecated in favor of --cfg_options' and it will " + 'not be supported in version v0.22.0. Override some settings in the ' + 'used config, the key-value pair in xxx=yyy format will be merged ' + 'into config file. If the value to be overwritten is a list, it ' + 'should be like key="[a,b]" or key=a,b It also allows nested ' + 'list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation ' + 'marks are necessary and that no white space is allowed.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--eval-options', + nargs='+', + action=DictAction, + help='custom options for evaluation') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument( + '--opacity', + type=float, + default=0.5, + help='Opacity of painted segmentation map. In (0, 1] range.') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both ' + 'specified, --options is deprecated in favor of --cfg-options. ' + '--options will not be supported in version v0.22.0.') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options. ' + '--options will not be supported in version v0.22.0.') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + assert args.out or args.eval or args.format_only or args.show \ + or args.show_dir, \ + ('Please specify at least one operation (save/eval/format/show the ' + 'results / save the results) with the argument "--out", "--eval"' + ', "--format-only", "--show" or "--show-dir"') + + if args.eval and args.format_only: + raise ValueError('--eval and --format_only cannot be both specified') + + if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): + raise ValueError('The output file must be a pkl file.') + + cfg = mmcv.Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # set multi-process settings + setup_multi_processes(cfg) + + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + if args.aug_test: + # hard code index + cfg.data.test.pipeline[1].img_ratios = [ + 0.5, 0.75, 1.0, 1.25, 1.5, 1.75 + ] + cfg.data.test.pipeline[1].flip = True + cfg.model.pretrained = None + cfg.data.test.test_mode = True + + if args.gpu_id is not None: + cfg.gpu_ids = [args.gpu_id] + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + cfg.gpu_ids = [args.gpu_id] + distributed = False + if len(cfg.gpu_ids) > 1: + warnings.warn(f'The gpu-ids is reset from {cfg.gpu_ids} to ' + f'{cfg.gpu_ids[0:1]} to avoid potential error in ' + 'non-distribute testing time.') + cfg.gpu_ids = cfg.gpu_ids[0:1] + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + rank, _ = get_dist_info() + # allows not to create + if args.work_dir is not None and rank == 0: + mmcv.mkdir_or_exist(osp.abspath(args.work_dir)) + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + if args.aug_test: + json_file = osp.join(args.work_dir, + f'eval_multi_scale_{timestamp}.json') + else: + json_file = osp.join(args.work_dir, + f'eval_single_scale_{timestamp}.json') + elif rank == 0: + work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + mmcv.mkdir_or_exist(osp.abspath(work_dir)) + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + if args.aug_test: + json_file = osp.join(work_dir, + f'eval_multi_scale_{timestamp}.json') + else: + json_file = osp.join(work_dir, + f'eval_single_scale_{timestamp}.json') + + # build the dataloader + # TODO: support multiple images per gpu (only minor changes are needed) + dataset = build_dataset(cfg.data.test) + # The default loader config + loader_cfg = dict( + # cfg.gpus will be ignored if distributed + num_gpus=len(cfg.gpu_ids), + dist=distributed, + shuffle=False) + # The overall dataloader settings + loader_cfg.update({ + k: v + for k, v in cfg.data.items() if k not in [ + 'train', 'val', 'test', 'train_dataloader', 'val_dataloader', + 'test_dataloader' + ] + }) + test_loader_cfg = { + **loader_cfg, + 'samples_per_gpu': 1, + 'shuffle': False, # Not shuffle by default + **cfg.data.get('test_dataloader', {}) + } + # build the dataloader + data_loader = build_dataloader(dataset, **test_loader_cfg) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg')) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') + if 'CLASSES' in checkpoint.get('meta', {}): + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + print('"CLASSES" not found in meta, use dataset.CLASSES instead') + model.CLASSES = dataset.CLASSES + if 'PALETTE' in checkpoint.get('meta', {}): + model.PALETTE = checkpoint['meta']['PALETTE'] + else: + print('"PALETTE" not found in meta, use dataset.PALETTE instead') + model.PALETTE = dataset.PALETTE + + # clean gpu memory when starting a new evaluation. + torch.cuda.empty_cache() + eval_kwargs = {} if args.eval_options is None else args.eval_options + + # Deprecated + efficient_test = eval_kwargs.get('efficient_test', False) + if efficient_test: + warnings.warn( + '``efficient_test=True`` does not have effect in tools/test.py, ' + 'the evaluation and format results are CPU memory efficient by ' + 'default') + + eval_on_format_results = ( + args.eval is not None and 'cityscapes' in args.eval) + if eval_on_format_results: + assert len(args.eval) == 1, 'eval on format results is not ' \ + 'applicable for metrics other than ' \ + 'cityscapes' + if args.format_only or eval_on_format_results: + if 'imgfile_prefix' in eval_kwargs: + tmpdir = eval_kwargs['imgfile_prefix'] + else: + tmpdir = '.format_cityscapes' + eval_kwargs.setdefault('imgfile_prefix', tmpdir) + mmcv.mkdir_or_exist(tmpdir) + else: + tmpdir = None + + if not distributed: + warnings.warn( + 'SyncBN is only supported with DDP. To be compatible with DP, ' + 'we convert SyncBN to BN. Please use dist_train.sh which can ' + 'avoid this error.') + if not torch.cuda.is_available(): + assert digit_version(mmcv.__version__) >= digit_version('1.4.4'), \ + 'Please use MMCV >= 1.4.4 for CPU training!' + model = revert_sync_batchnorm(model) + model = MMDataParallel(model, device_ids=cfg.gpu_ids) + results = single_gpu_test( + model, + data_loader, + args.show, + args.show_dir, + False, + args.opacity, + pre_eval=args.eval is not None and not eval_on_format_results, + format_only=args.format_only or eval_on_format_results, + format_args=eval_kwargs) + else: + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False) + results = multi_gpu_test( + model, + data_loader, + args.tmpdir, + args.gpu_collect, + False, + pre_eval=args.eval is not None and not eval_on_format_results, + format_only=args.format_only or eval_on_format_results, + format_args=eval_kwargs) + + rank, _ = get_dist_info() + if rank == 0: + if args.out: + warnings.warn( + 'The behavior of ``args.out`` has been changed since MMSeg ' + 'v0.16, the pickled outputs could be seg map as type of ' + 'np.array, pre-eval results or file paths for ' + '``dataset.format_results()``.') + print(f'\nwriting results to {args.out}') + mmcv.dump(results, args.out) + if args.eval: + eval_kwargs.update(metric=args.eval) + metric = dataset.evaluate(results, **eval_kwargs) + metric_dict = dict(config=args.config, metric=metric) + mmcv.dump(metric_dict, json_file, indent=4) + if tmpdir is not None and eval_on_format_results: + # remove tmp dir when cityscapes evaluation + shutil.rmtree(tmpdir) + + +if __name__ == '__main__': + main() diff --git a/downstream/mmsegmentation/tools/torchserve/mmseg2torchserve.py b/downstream/mmsegmentation/tools/torchserve/mmseg2torchserve.py new file mode 100644 index 0000000..9063634 --- /dev/null +++ b/downstream/mmsegmentation/tools/torchserve/mmseg2torchserve.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser, Namespace +from pathlib import Path +from tempfile import TemporaryDirectory + +import mmcv + +try: + from model_archiver.model_packaging import package_model + from model_archiver.model_packaging_utils import ModelExportUtils +except ImportError: + package_model = None + + +def mmseg2torchserve( + config_file: str, + checkpoint_file: str, + output_folder: str, + model_name: str, + model_version: str = '1.0', + force: bool = False, +): + """Converts mmsegmentation model (config + checkpoint) to TorchServe + `.mar`. + + Args: + config_file: + In MMSegmentation config format. + The contents vary for each task repository. + checkpoint_file: + In MMSegmentation checkpoint format. + The contents vary for each task repository. + output_folder: + Folder where `{model_name}.mar` will be created. + The file created will be in TorchServe archive format. + model_name: + If not None, used for naming the `{model_name}.mar` file + that will be created under `output_folder`. + If None, `{Path(checkpoint_file).stem}` will be used. + model_version: + Model's version. + force: + If True, if there is an existing `{model_name}.mar` + file under `output_folder` it will be overwritten. + """ + mmcv.mkdir_or_exist(output_folder) + + config = mmcv.Config.fromfile(config_file) + + with TemporaryDirectory() as tmpdir: + config.dump(f'{tmpdir}/config.py') + + args = Namespace( + **{ + 'model_file': f'{tmpdir}/config.py', + 'serialized_file': checkpoint_file, + 'handler': f'{Path(__file__).parent}/mmseg_handler.py', + 'model_name': model_name or Path(checkpoint_file).stem, + 'version': model_version, + 'export_path': output_folder, + 'force': force, + 'requirements_file': None, + 'extra_files': None, + 'runtime': 'python', + 'archive_format': 'default' + }) + manifest = ModelExportUtils.generate_manifest_json(args) + package_model(args, manifest) + + +def parse_args(): + parser = ArgumentParser( + description='Convert mmseg models to TorchServe `.mar` format.') + parser.add_argument('config', type=str, help='config file path') + parser.add_argument('checkpoint', type=str, help='checkpoint file path') + parser.add_argument( + '--output-folder', + type=str, + required=True, + help='Folder where `{model_name}.mar` will be created.') + parser.add_argument( + '--model-name', + type=str, + default=None, + help='If not None, used for naming the `{model_name}.mar`' + 'file that will be created under `output_folder`.' + 'If None, `{Path(checkpoint_file).stem}` will be used.') + parser.add_argument( + '--model-version', + type=str, + default='1.0', + help='Number used for versioning.') + parser.add_argument( + '-f', + '--force', + action='store_true', + help='overwrite the existing `{model_name}.mar`') + args = parser.parse_args() + + return args + + +if __name__ == '__main__': + args = parse_args() + + if package_model is None: + raise ImportError('`torch-model-archiver` is required.' + 'Try: pip install torch-model-archiver') + + mmseg2torchserve(args.config, args.checkpoint, args.output_folder, + args.model_name, args.model_version, args.force) diff --git a/downstream/mmsegmentation/tools/torchserve/mmseg_handler.py b/downstream/mmsegmentation/tools/torchserve/mmseg_handler.py new file mode 100644 index 0000000..28fe501 --- /dev/null +++ b/downstream/mmsegmentation/tools/torchserve/mmseg_handler.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import base64 +import os + +import cv2 +import mmcv +import torch +from mmcv.cnn.utils.sync_bn import revert_sync_batchnorm +from ts.torch_handler.base_handler import BaseHandler + +from mmseg.apis import inference_segmentor, init_segmentor + + +class MMsegHandler(BaseHandler): + + def initialize(self, context): + properties = context.system_properties + self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu' + self.device = torch.device(self.map_location + ':' + + str(properties.get('gpu_id')) if torch.cuda. + is_available() else self.map_location) + self.manifest = context.manifest + + model_dir = properties.get('model_dir') + serialized_file = self.manifest['model']['serializedFile'] + checkpoint = os.path.join(model_dir, serialized_file) + self.config_file = os.path.join(model_dir, 'config.py') + + self.model = init_segmentor(self.config_file, checkpoint, self.device) + self.model = revert_sync_batchnorm(self.model) + self.initialized = True + + def preprocess(self, data): + images = [] + + for row in data: + image = row.get('data') or row.get('body') + if isinstance(image, str): + image = base64.b64decode(image) + image = mmcv.imfrombytes(image) + images.append(image) + + return images + + def inference(self, data, *args, **kwargs): + results = [inference_segmentor(self.model, img) for img in data] + return results + + def postprocess(self, data): + output = [] + + for image_result in data: + _, buffer = cv2.imencode('.png', image_result[0].astype('uint8')) + content = buffer.tobytes() + output.append(content) + return output diff --git a/downstream/mmsegmentation/tools/torchserve/test_torchserve.py b/downstream/mmsegmentation/tools/torchserve/test_torchserve.py new file mode 100644 index 0000000..432834a --- /dev/null +++ b/downstream/mmsegmentation/tools/torchserve/test_torchserve.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser +from io import BytesIO + +import matplotlib.pyplot as plt +import mmcv +import requests + +from mmseg.apis import inference_segmentor, init_segmentor + + +def parse_args(): + parser = ArgumentParser( + description='Compare result of torchserve and pytorch,' + 'and visualize them.') + parser.add_argument('img', help='Image file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('model_name', help='The model name in the server') + parser.add_argument( + '--inference-addr', + default='127.0.0.1:8080', + help='Address and port of the inference server') + parser.add_argument( + '--result-image', + type=str, + default=None, + help='save server output in result-image') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + + args = parser.parse_args() + return args + + +def main(args): + url = 'http://' + args.inference_addr + '/predictions/' + args.model_name + with open(args.img, 'rb') as image: + tmp_res = requests.post(url, image) + content = tmp_res.content + if args.result_image: + with open(args.result_image, 'wb') as out_image: + out_image.write(content) + plt.imshow(mmcv.imread(args.result_image, 'grayscale')) + plt.show() + else: + plt.imshow(plt.imread(BytesIO(content))) + plt.show() + model = init_segmentor(args.config, args.checkpoint, args.device) + image = mmcv.imread(args.img) + result = inference_segmentor(model, image) + plt.imshow(result[0]) + plt.show() + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/downstream/mmsegmentation/tools/train.py b/downstream/mmsegmentation/tools/train.py new file mode 100644 index 0000000..fcf30a8 --- /dev/null +++ b/downstream/mmsegmentation/tools/train.py @@ -0,0 +1,392 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import copy +import os +import os.path as osp +import time +import warnings + +import mmcv +import torch +import torch.distributed as dist +from mmcv.cnn.utils import revert_sync_batchnorm +from mmcv.runner import get_dist_info, init_dist +from mmcv.utils import Config, DictAction, get_git_hash + +from mmseg import __version__ +from mmseg.apis import init_random_seed, set_random_seed, train_segmentor +from mmseg.datasets import build_dataset +from mmseg.models import build_segmentor +from mmseg.utils import collect_env, get_root_logger, setup_multi_processes + + +# -------------------------------------------------------------------------------------------- +from typing import Dict, List, Union +import torch.nn as nn +from torch.nn import GroupNorm, LayerNorm + +from mmcv.utils import _BatchNorm, _InstanceNorm, SyncBatchNorm +from mmcv.utils.ext_loader import check_ops_exist +from mmcv.runner.optimizer.builder import OPTIMIZER_BUILDERS +from mmcv.runner.optimizer.default_constructor import DefaultOptimizerConstructor + +from mmcls.gpvit_dev.amp.runner import AmpEpochBasedRunner, AmpIterBasedRunner + +@OPTIMIZER_BUILDERS.register_module() +class CustomOptimizerConstructor(DefaultOptimizerConstructor): + def add_params(self, + params: List[Dict], + module: nn.Module, + prefix: str = '', + is_dcn_module: Union[int, float, None] = None) -> None: + """Add all parameters of module to the params list. + + The parameters of the given module will be added to the list of param + groups, with specific rules defined by paramwise_cfg. + + Args: + params (list[dict]): A list of param groups, it will be modified + in place. + module (nn.Module): The module to be added. + prefix (str): The prefix of the module + is_dcn_module (int|float|None): If the current module is a + submodule of DCN, `is_dcn_module` will be passed to + control conv_offset layer's learning rate. Defaults to None. + """ + # get param-wise options + custom_keys = self.paramwise_cfg.get('custom_keys', {}) + # first sort with alphabet order and then sort with reversed len of str + sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True) + + bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.) + bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.) + norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.) + dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.) + bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False) + dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.) + + # special rules for norm layers and depth-wise conv layers + is_norm = isinstance(module, + (_BatchNorm, _InstanceNorm, SyncBatchNorm, GroupNorm, LayerNorm)) + is_dwconv = ( + isinstance(module, torch.nn.Conv2d) + and module.in_channels == module.groups) + + for name, param in module.named_parameters(recurse=False): + param_group = {'params': [param]} + if not param.requires_grad: + params.append(param_group) + continue + if bypass_duplicate and self._is_in(param_group, params): + warnings.warn(f'{prefix} is duplicate. It is skipped since ' + f'bypass_duplicate={bypass_duplicate}') + continue + # if the parameter match one of the custom keys, ignore other rules + is_custom = False + for key in sorted_keys: + if key in f'{prefix}.{name}': + is_custom = True + lr_mult = custom_keys[key].get('lr_mult', 1.) + param_group['lr'] = self.base_lr * lr_mult + if self.base_wd is not None: + decay_mult = custom_keys[key].get('decay_mult', 1.) + param_group['weight_decay'] = self.base_wd * decay_mult + break + + if not is_custom: + # bias_lr_mult affects all bias parameters + # except for norm.bias dcn.conv_offset.bias + if name == 'bias' and not (is_norm or is_dcn_module): + param_group['lr'] = self.base_lr * bias_lr_mult + + if (prefix.find('conv_offset') != -1 and is_dcn_module + and isinstance(module, torch.nn.Conv2d)): + # deal with both dcn_offset's bias & weight + param_group['lr'] = self.base_lr * dcn_offset_lr_mult + + # apply weight decay policies + if self.base_wd is not None: + # norm decay + if is_norm: + param_group[ + 'weight_decay'] = self.base_wd * norm_decay_mult + # depth-wise conv + elif is_dwconv: + param_group[ + 'weight_decay'] = self.base_wd * dwconv_decay_mult + # bias lr and decay + elif name == 'bias' and not is_dcn_module: + # TODO: current bias_decay_mult will have affect on DCN + param_group[ + 'weight_decay'] = self.base_wd * bias_decay_mult + params.append(param_group) + + if check_ops_exist(): + from mmcv.ops import DeformConv2d, ModulatedDeformConv2d + is_dcn_module = isinstance(module, + (DeformConv2d, ModulatedDeformConv2d)) + else: + is_dcn_module = False + for child_name, child_mod in module.named_children(): + child_prefix = f'{prefix}.{child_name}' if prefix else child_name + self.add_params( + params, + child_mod, + prefix=child_prefix, + is_dcn_module=is_dcn_module) +# -------------------------------------------------------------------------------------------- + + + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a segmentor') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--load-from', help='the checkpoint file to load weights from') + parser.add_argument( + '--resume-from', help='the checkpoint file to resume from') + parser.add_argument( + '--no-validate', + action='store_true', + help='whether not to evaluate the checkpoint during training') + group_gpus = parser.add_mutually_exclusive_group() + group_gpus.add_argument( + '--gpus', + type=int, + help='(Deprecated, please use --gpu-id) number of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-ids', + type=int, + nargs='+', + help='(Deprecated, please use --gpu-id) ids of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-id', + type=int, + default=0, + help='id of gpu to use ' + '(only applicable to non-distributed training)') + parser.add_argument('--seed', type=int, default=None, help='random seed') + parser.add_argument( + '--diff_seed', + action='store_true', + help='Whether or not set different seeds for different ranks') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help="--options is deprecated in favor of --cfg_options' and it will " + 'not be supported in version v0.22.0. Override some settings in the ' + 'used config, the key-value pair in xxx=yyy format will be merged ' + 'into config file. If the value to be overwritten is a list, it ' + 'should be like key="[a,b]" or key=a,b It also allows nested ' + 'list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation ' + 'marks are necessary and that no white space is allowed.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument( + '--auto-resume', + action='store_true', + help='resume from the latest checkpoint automatically.') + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both ' + 'specified, --options is deprecated in favor of --cfg-options. ' + '--options will not be supported in version v0.22.0.') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options. ' + '--options will not be supported in version v0.22.0.') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + if args.load_from is not None: + cfg.load_from = args.load_from + if args.resume_from is not None: + cfg.resume_from = args.resume_from + if args.gpus is not None: + cfg.gpu_ids = range(1) + warnings.warn('`--gpus` is deprecated because we only support ' + 'single GPU mode in non-distributed training. ' + 'Use `gpus=1` now.') + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids[0:1] + warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' + 'Because we only support single GPU mode in ' + 'non-distributed training. Use the first GPU ' + 'in `gpu_ids` now.') + if args.gpus is None and args.gpu_ids is None: + cfg.gpu_ids = [args.gpu_id] + + cfg.auto_resume = args.auto_resume + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + # NOTE: launching with "dist_train.sh" from env only works for torch<1.10 somehow + # for torch>=1.10, the dist launch will hang forever. + # So we explicitly set init args here. + if "NGC_MASTER_ADDR" in os.environ: + init_dict = { + "init_method": f'tcp://{os.environ["NGC_MASTER_ADDR"]}:{os.environ["MASTER_PORT"]}', + "world_size": int(os.environ["WORLD_SIZE"]), + "rank": int(os.environ["RANK"]), + } + # we also enable wandb on the fly here + wandb_cfg = dict( + type='WandbLoggerHook', + with_step=False, + init_kwargs=dict( + project='HRGViT', + name=osp.splitext(osp.basename(args.config))[0], + resume=True, + tags=['seg'], + dir=cfg.work_dir, + # config=cfg.self._cfg_dict.to_dict(), + )) + cfg.log_config.hooks.append(mmcv.ConfigDict(wandb_cfg)) + else: + init_dict = {} + warnings.warn(f"override init_dict: {init_dict}") + init_dist(args.launcher, **cfg.dist_params, **init_dict) + # gpu_ids is used to calculate iter when resuming checkpoint + _, world_size = get_dist_info() + cfg.gpu_ids = range(world_size) + + # create work_dir + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + # dump config + cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) + # init the logger before other steps + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + log_file = osp.join(cfg.work_dir, f'{timestamp}.log') + logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) + + # set multi-process settings + setup_multi_processes(cfg) + + # init the meta dict to record some important information such as + # environment info and seed, which will be logged + meta = dict() + # log env info + env_info_dict = collect_env() + env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()]) + dash_line = '-' * 60 + '\n' + logger.info('Environment info:\n' + dash_line + env_info + '\n' + + dash_line) + meta['env_info'] = env_info + + # log some basic info + logger.info(f'Distributed training: {distributed}') + logger.info(f'Config:\n{cfg.pretty_text}') + + # set random seeds + seed = init_random_seed(args.seed) + seed = seed + dist.get_rank() if args.diff_seed else seed + logger.info(f'Set random seed to {seed}, ' + f'deterministic: {args.deterministic}') + set_random_seed(seed, deterministic=args.deterministic) + cfg.seed = seed + meta['seed'] = seed + meta['exp_name'] = osp.basename(args.config) + + model = build_segmentor( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + model.init_weights() + + if distributed and hasattr(model.backbone, 'convert_syncbn'): + if model.backbone.convert_syncbn: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + # the converted SyncBNs may be in training mode + if hasattr(model.backbone, 'set_freeze_patch_embed'): + if model.backbone.set_freeze_patch_embed: + model.backbone.set_freeze_patch_embed() + + # SyncBN is not support for DP + if not distributed: + warnings.warn( + 'SyncBN is only supported with DDP. To be compatible with DP, ' + 'we convert SyncBN to BN. Please use dist_train.sh which can ' + 'avoid this error.') + model = revert_sync_batchnorm(model) + + logger.info(model) + + datasets = [build_dataset(cfg.data.train)] + if len(cfg.workflow) == 2: + val_dataset = copy.deepcopy(cfg.data.val) + val_dataset.pipeline = cfg.data.train.pipeline + datasets.append(build_dataset(val_dataset)) + if cfg.checkpoint_config is not None: + # save mmseg version, config file content and class names in + # checkpoints as meta data + cfg.checkpoint_config.meta = dict( + mmseg_version=f'{__version__}+{get_git_hash()[:7]}', + config=cfg.pretty_text, + CLASSES=datasets[0].CLASSES, + PALETTE=datasets[0].PALETTE) + # add an attribute for visualization convenience + model.CLASSES = datasets[0].CLASSES + # passing checkpoint meta for saving best checkpoint + meta.update(cfg.checkpoint_config.meta) + train_segmentor( + model, + datasets, + cfg, + distributed=distributed, + validate=(not args.no_validate), + timestamp=timestamp, + meta=meta) + + +if __name__ == '__main__': + main() diff --git a/mmcls/__init__.py b/mmcls/__init__.py new file mode 100644 index 0000000..097c8fe --- /dev/null +++ b/mmcls/__init__.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import mmcv +from packaging.version import parse + +from .version import __version__ + + +def digit_version(version_str: str, length: int = 4): + """Convert a version string into a tuple of integers. + + This method is usually used for comparing two versions. For pre-release + versions: alpha < beta < rc. + + Args: + version_str (str): The version string. + length (int): The maximum number of version levels. Default: 4. + + Returns: + tuple[int]: The version info in digits (integers). + """ + version = parse(version_str) + assert version.release, f'failed to parse version {version_str}' + release = list(version.release) + release = release[:length] + if len(release) < length: + release = release + [0] * (length - len(release)) + if version.is_prerelease: + mapping = {'a': -3, 'b': -2, 'rc': -1} + val = -4 + # version.pre can be None + if version.pre: + if version.pre[0] not in mapping: + warnings.warn(f'unknown prerelease version {version.pre[0]}, ' + 'version checking may go wrong') + else: + val = mapping[version.pre[0]] + release.extend([val, version.pre[-1]]) + else: + release.extend([val, 0]) + + elif version.is_postrelease: + release.extend([1, version.post]) + else: + release.extend([0, 0]) + return tuple(release) + + +mmcv_minimum_version = '1.4.2' +mmcv_maximum_version = '1.9.0' +mmcv_version = digit_version(mmcv.__version__) + + +assert (mmcv_version >= digit_version(mmcv_minimum_version) + and mmcv_version <= digit_version(mmcv_maximum_version)), \ + f'MMCV=={mmcv.__version__} is used but incompatible. ' \ + f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' + +__all__ = ['__version__', 'digit_version'] diff --git a/mmcls/apis/__init__.py b/mmcls/apis/__init__.py new file mode 100644 index 0000000..b632f2a --- /dev/null +++ b/mmcls/apis/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .inference import inference_model, init_model, show_result_pyplot +from .test import multi_gpu_test, single_gpu_test +from .train import init_random_seed, set_random_seed, train_model + +__all__ = [ + 'set_random_seed', 'train_model', 'init_model', 'inference_model', + 'multi_gpu_test', 'single_gpu_test', 'show_result_pyplot', + 'init_random_seed' +] diff --git a/mmcls/apis/inference.py b/mmcls/apis/inference.py new file mode 100644 index 0000000..09e0041 --- /dev/null +++ b/mmcls/apis/inference.py @@ -0,0 +1,120 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import mmcv +import numpy as np +import torch +from mmcv.parallel import collate, scatter +from mmcv.runner import load_checkpoint + +from mmcls.datasets.pipelines import Compose +from mmcls.models import build_classifier + + +def init_model(config, checkpoint=None, device='cuda:0', options=None): + """Initialize a classifier from config file. + + Args: + config (str or :obj:`mmcv.Config`): Config file path or the config + object. + checkpoint (str, optional): Checkpoint path. If left as None, the model + will not load any weights. + options (dict): Options to override some settings in the used config. + + Returns: + nn.Module: The constructed classifier. + """ + if isinstance(config, str): + config = mmcv.Config.fromfile(config) + elif not isinstance(config, mmcv.Config): + raise TypeError('config must be a filename or Config object, ' + f'but got {type(config)}') + if options is not None: + config.merge_from_dict(options) + config.model.pretrained = None + model = build_classifier(config.model) + if checkpoint is not None: + # Mapping the weights to GPU may cause unexpected video memory leak + # which refers to https://github.com/open-mmlab/mmdetection/pull/6405 + checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') + if 'CLASSES' in checkpoint.get('meta', {}): + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + from mmcls.datasets import ImageNet + warnings.simplefilter('once') + warnings.warn('Class names are not saved in the checkpoint\'s ' + 'meta data, use imagenet by default.') + model.CLASSES = ImageNet.CLASSES + model.cfg = config # save the config in the model for convenience + model.to(device) + model.eval() + return model + + +def inference_model(model, img): + """Inference image(s) with the classifier. + + Args: + model (nn.Module): The loaded classifier. + img (str/ndarray): The image filename or loaded image. + + Returns: + result (dict): The classification results that contains + `class_name`, `pred_label` and `pred_score`. + """ + cfg = model.cfg + device = next(model.parameters()).device # model device + # build the data pipeline + if isinstance(img, str): + if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile': + cfg.data.test.pipeline.insert(0, dict(type='LoadImageFromFile')) + data = dict(img_info=dict(filename=img), img_prefix=None) + else: + if cfg.data.test.pipeline[0]['type'] == 'LoadImageFromFile': + cfg.data.test.pipeline.pop(0) + data = dict(img=img) + test_pipeline = Compose(cfg.data.test.pipeline) + data = test_pipeline(data) + data = collate([data], samples_per_gpu=1) + if next(model.parameters()).is_cuda: + # scatter to specified GPU + data = scatter(data, [device])[0] + + # forward the model + with torch.no_grad(): + scores = model(return_loss=False, **data) + pred_score = np.max(scores, axis=1)[0] + pred_label = np.argmax(scores, axis=1)[0] + result = {'pred_label': pred_label, 'pred_score': float(pred_score)} + result['pred_class'] = model.CLASSES[result['pred_label']] + return result + + +def show_result_pyplot(model, + img, + result, + fig_size=(15, 10), + title='result', + wait_time=0): + """Visualize the classification results on the image. + + Args: + model (nn.Module): The loaded classifier. + img (str or np.ndarray): Image filename or loaded image. + result (list): The classification result. + fig_size (tuple): Figure size of the pyplot figure. + Defaults to (15, 10). + title (str): Title of the pyplot figure. + Defaults to 'result'. + wait_time (int): How many seconds to display the image. + Defaults to 0. + """ + if hasattr(model, 'module'): + model = model.module + model.show_result( + img, + result, + show=True, + fig_size=fig_size, + win_name=title, + wait_time=wait_time) diff --git a/mmcls/apis/test.py b/mmcls/apis/test.py new file mode 100644 index 0000000..621962c --- /dev/null +++ b/mmcls/apis/test.py @@ -0,0 +1,213 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import pickle +import shutil +import tempfile +import time + +import mmcv +import numpy as np +import torch +import torch.distributed as dist +from mmcv.image import tensor2imgs +from mmcv.runner import get_dist_info + + +def single_gpu_test(model, + data_loader, + show=False, + out_dir=None, + **show_kwargs): + """Test model with local single gpu. + + This method tests model with a single gpu and supports showing results. + + Args: + model (:obj:`torch.nn.Module`): Model to be tested. + data_loader (:obj:`torch.utils.data.DataLoader`): Pytorch data loader. + show (bool): Whether to show the test results. Defaults to False. + out_dir (str): The output directory of result plots of all samples. + Defaults to None, which means not to write output files. + **show_kwargs: Any other keyword arguments for showing results. + + Returns: + list: The prediction results. + """ + model.eval() + results = [] + dataset = data_loader.dataset + prog_bar = mmcv.ProgressBar(len(dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, **data) + + batch_size = len(result) + results.extend(result) + + if show or out_dir: + scores = np.vstack(result) + pred_score = np.max(scores, axis=1) + pred_label = np.argmax(scores, axis=1) + pred_class = [model.CLASSES[lb] for lb in pred_label] + + img_metas = data['img_metas'].data[0] + imgs = tensor2imgs(data['img'], **img_metas[0]['img_norm_cfg']) + assert len(imgs) == len(img_metas) + + for i, (img, img_meta) in enumerate(zip(imgs, img_metas)): + h, w, _ = img_meta['img_shape'] + img_show = img[:h, :w, :] + + ori_h, ori_w = img_meta['ori_shape'][:-1] + img_show = mmcv.imresize(img_show, (ori_w, ori_h)) + + if out_dir: + out_file = osp.join(out_dir, img_meta['ori_filename']) + else: + out_file = None + + result_show = { + 'pred_score': pred_score[i], + 'pred_label': pred_label[i], + 'pred_class': pred_class[i] + } + model.module.show_result( + img_show, + result_show, + show=show, + out_file=out_file, + **show_kwargs) + + batch_size = data['img'].size(0) + prog_bar.update(batch_size) + return results + + +def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): + """Test model with multiple gpus. + + This method tests model with multiple gpus and collects the results + under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' + it encodes results to gpu tensors and use gpu communication for results + collection. On cpu mode it saves the results on different gpus to 'tmpdir' + and collects them by the rank 0 worker. + + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + tmpdir (str): Path of directory to save the temporary results from + different gpus under cpu mode. + gpu_collect (bool): Option to use either gpu or cpu to collect results. + + Returns: + list: The prediction results. + """ + model.eval() + results = [] + dataset = data_loader.dataset + rank, world_size = get_dist_info() + if rank == 0: + # Check if tmpdir is valid for cpu_collect + if (not gpu_collect) and (tmpdir is not None and osp.exists(tmpdir)): + raise OSError((f'The tmpdir {tmpdir} already exists.', + ' Since tmpdir will be deleted after testing,', + ' please make sure you specify an empty one.')) + prog_bar = mmcv.ProgressBar(len(dataset)) + time.sleep(2) + dist.barrier() + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, **data) + if isinstance(result, list): + results.extend(result) + else: + results.append(result) + + if rank == 0: + batch_size = data['img'].size(0) + for _ in range(batch_size * world_size): + prog_bar.update() + + # collect results from all ranks + if gpu_collect: + results = collect_results_gpu(results, len(dataset)) + else: + results = collect_results_cpu(results, len(dataset), tmpdir) + return results + + +def collect_results_cpu(result_part, size, tmpdir=None): + rank, world_size = get_dist_info() + # create a tmp dir if it is not specified + if tmpdir is None: + MAX_LEN = 512 + # 32 is whitespace + dir_tensor = torch.full((MAX_LEN, ), + 32, + dtype=torch.uint8, + device='cuda') + if rank == 0: + mmcv.mkdir_or_exist('.dist_test') + tmpdir = tempfile.mkdtemp(dir='.dist_test') + tmpdir = torch.tensor( + bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') + dir_tensor[:len(tmpdir)] = tmpdir + dist.broadcast(dir_tensor, 0) + tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() + else: + mmcv.mkdir_or_exist(tmpdir) + # dump the part result to the dir + mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) + dist.barrier() + # collect all parts + if rank != 0: + return None + else: + # load results of all parts from tmp dir + part_list = [] + for i in range(world_size): + part_file = osp.join(tmpdir, f'part_{i}.pkl') + part_result = mmcv.load(part_file) + part_list.append(part_result) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + # remove tmp dir + shutil.rmtree(tmpdir) + return ordered_results + + +def collect_results_gpu(result_part, size): + rank, world_size = get_dist_info() + # dump result part to tensor with pickle + part_tensor = torch.tensor( + bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') + # gather all result part tensor shape + shape_tensor = torch.tensor(part_tensor.shape, device='cuda') + shape_list = [shape_tensor.clone() for _ in range(world_size)] + dist.all_gather(shape_list, shape_tensor) + # padding result part tensor to max length + shape_max = torch.tensor(shape_list).max() + part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') + part_send[:shape_tensor[0]] = part_tensor + part_recv_list = [ + part_tensor.new_zeros(shape_max) for _ in range(world_size) + ] + # gather all result part + dist.all_gather(part_recv_list, part_send) + + if rank == 0: + part_list = [] + for recv, shape in zip(part_recv_list, shape_list): + part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()) + part_list.append(part_result) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + return ordered_results diff --git a/mmcls/apis/train.py b/mmcls/apis/train.py new file mode 100644 index 0000000..c240632 --- /dev/null +++ b/mmcls/apis/train.py @@ -0,0 +1,233 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +import warnings + +import numpy as np +import torch +import torch.distributed as dist +from mmcv.runner import (DistSamplerSeedHook, Fp16OptimizerHook, + build_optimizer, build_runner, get_dist_info) + +from mmcls.core import DistEvalHook, DistOptimizerHook, EvalHook +from mmcls.datasets import build_dataloader, build_dataset +from mmcls.utils import (auto_select_device, get_root_logger, + wrap_distributed_model, wrap_non_distributed_model) + + +def init_random_seed(seed=None, device=None): + """Initialize random seed. + + If the seed is not set, the seed will be automatically randomized, + and then broadcast to all processes to prevent some potential bugs. + + Args: + seed (int, Optional): The seed. Default to None. + device (str): The device where the seed will be put on. + Default to 'cuda'. + + Returns: + int: Seed to be used. + """ + if seed is not None: + return seed + if device is None: + device = auto_select_device() + # Make sure all ranks share the same random seed to prevent + # some potential bugs. Please refer to + # https://github.com/open-mmlab/mmdetection/issues/6339 + rank, world_size = get_dist_info() + seed = np.random.randint(2**31) + if world_size == 1: + return seed + + if rank == 0: + random_num = torch.tensor(seed, dtype=torch.int32, device=device) + else: + random_num = torch.tensor(0, dtype=torch.int32, device=device) + dist.broadcast(random_num, src=0) + return random_num.item() + + +def set_random_seed(seed, deterministic=False): + """Set random seed. + + Args: + seed (int): Seed to be used. + deterministic (bool): Whether to set the deterministic option for + CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` + to True and `torch.backends.cudnn.benchmark` to False. + Default: False. + """ + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + if deterministic: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def train_model(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + device=None, + meta=None): + """Train a model. + + This method will build dataloaders, wrap the model and build a runner + according to the provided config. + + Args: + model (:obj:`torch.nn.Module`): The model to be run. + dataset (:obj:`mmcls.datasets.BaseDataset` | List[BaseDataset]): + The dataset used to train the model. It can be a single dataset, + or a list of dataset with the same length as workflow. + cfg (:obj:`mmcv.utils.Config`): The configs of the experiment. + distributed (bool): Whether to train the model in a distributed + environment. Defaults to False. + validate (bool): Whether to do validation with + :obj:`mmcv.runner.EvalHook`. Defaults to False. + timestamp (str, optional): The timestamp string to auto generate the + name of log files. Defaults to None. + device (str, optional): TODO + meta (dict, optional): A dict records some import information such as + environment info and seed, which will be logged in logger hook. + Defaults to None. + """ + logger = get_root_logger() + + # prepare data loaders + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + + # The default loader config + loader_cfg = dict( + # cfg.gpus will be ignored if distributed + num_gpus=cfg.ipu_replicas if device == 'ipu' else len(cfg.gpu_ids), + dist=distributed, + round_up=True, + seed=cfg.get('seed'), + sampler_cfg=cfg.get('sampler', None), + ) + # The overall dataloader settings + loader_cfg.update({ + k: v + for k, v in cfg.data.items() if k not in [ + 'train', 'val', 'test', 'train_dataloader', 'val_dataloader', + 'test_dataloader' + ] + }) + # The specific dataloader settings + train_loader_cfg = {**loader_cfg, **cfg.data.get('train_dataloader', {})} + + data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] + + # put model on gpus + if distributed: + find_unused_parameters = cfg.get('find_unused_parameters', False) + # Sets the `find_unused_parameters` parameter in + # torch.nn.parallel.DistributedDataParallel + model = wrap_distributed_model( + model, + cfg.device, + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters) + else: + model = wrap_non_distributed_model( + model, cfg.device, device_ids=cfg.gpu_ids) + + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + + if cfg.get('runner') is None: + cfg.runner = { + 'type': 'EpochBasedRunner', + 'max_epochs': cfg.total_epochs + } + warnings.warn( + 'config is now expected to have a `runner` section, ' + 'please set `runner` in your config.', UserWarning) + + if device == 'ipu': + if not cfg.runner['type'].startswith('IPU'): + cfg.runner['type'] = 'IPU' + cfg.runner['type'] + if 'options_cfg' not in cfg.runner: + cfg.runner['options_cfg'] = {} + cfg.runner['options_cfg']['replicationFactor'] = cfg.ipu_replicas + cfg.runner['fp16_cfg'] = cfg.get('fp16', None) + + runner = build_runner( + cfg.runner, + default_args=dict( + model=model, + batch_processor=None, + optimizer=optimizer, + work_dir=cfg.work_dir, + logger=logger, + meta=meta)) + + # an ugly walkaround to make the .log and .log.json filenames the same + runner.timestamp = timestamp + + # fp16 setting + fp16_cfg = cfg.get('fp16', None) + + if fp16_cfg is None and device == 'npu': + fp16_cfg = {'loss_scale': 'dynamic'} + + if fp16_cfg is not None: + if device == 'ipu': + from mmcv.device.ipu import IPUFp16OptimizerHook + optimizer_config = IPUFp16OptimizerHook( + **cfg.optimizer_config, + loss_scale=fp16_cfg['loss_scale'], + distributed=distributed) + else: + optimizer_config = Fp16OptimizerHook( + **cfg.optimizer_config, + loss_scale=fp16_cfg['loss_scale'], + distributed=distributed) + elif distributed and 'type' not in cfg.optimizer_config: + optimizer_config = DistOptimizerHook(**cfg.optimizer_config) + else: + optimizer_config = cfg.optimizer_config + + # register hooks + runner.register_training_hooks( + cfg.lr_config, + optimizer_config, + cfg.checkpoint_config, + cfg.log_config, + cfg.get('momentum_config', None), + custom_hooks_config=cfg.get('custom_hooks', None)) + if distributed and cfg.runner['type'] == 'EpochBasedRunner': + runner.register_hook(DistSamplerSeedHook()) + + # register eval hooks + if validate: + val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) + # The specific dataloader settings + val_loader_cfg = { + **loader_cfg, + 'shuffle': False, # Not shuffle by default + 'sampler_cfg': None, # Not use sampler by default + 'drop_last': False, # Not drop last by default + **cfg.data.get('val_dataloader', {}), + } + val_dataloader = build_dataloader(val_dataset, **val_loader_cfg) + eval_cfg = cfg.get('evaluation', {}) + eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' + eval_hook = DistEvalHook if distributed else EvalHook + # `EvalHook` needs to be executed after `IterTimerHook`. + # Otherwise, it will cause a bug if use `IterBasedRunner`. + # Refers to https://github.com/open-mmlab/mmcv/issues/1261 + runner.register_hook( + eval_hook(val_dataloader, **eval_cfg), priority='LOW') + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow) diff --git a/mmcls/core/__init__.py b/mmcls/core/__init__.py new file mode 100644 index 0000000..dd10803 --- /dev/null +++ b/mmcls/core/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .evaluation import * # noqa: F401, F403 +from .hook import * # noqa: F401, F403 +from .optimizers import * # noqa: F401, F403 +from .utils import * # noqa: F401, F403 diff --git a/mmcls/core/evaluation/__init__.py b/mmcls/core/evaluation/__init__.py new file mode 100644 index 0000000..dd4e57c --- /dev/null +++ b/mmcls/core/evaluation/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .eval_hooks import DistEvalHook, EvalHook +from .eval_metrics import (calculate_confusion_matrix, f1_score, precision, + precision_recall_f1, recall, support) +from .mean_ap import average_precision, mAP +from .multilabel_eval_metrics import average_performance + +__all__ = [ + 'precision', 'recall', 'f1_score', 'support', 'average_precision', 'mAP', + 'average_performance', 'calculate_confusion_matrix', 'precision_recall_f1', + 'EvalHook', 'DistEvalHook' +] diff --git a/mmcls/core/evaluation/eval_hooks.py b/mmcls/core/evaluation/eval_hooks.py new file mode 100644 index 0000000..412eab4 --- /dev/null +++ b/mmcls/core/evaluation/eval_hooks.py @@ -0,0 +1,78 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import torch.distributed as dist +from mmcv.runner import DistEvalHook as BaseDistEvalHook +from mmcv.runner import EvalHook as BaseEvalHook +from torch.nn.modules.batchnorm import _BatchNorm + + +class EvalHook(BaseEvalHook): + """Non-Distributed evaluation hook. + + Comparing with the ``EvalHook`` in MMCV, this hook will save the latest + evaluation results as an attribute for other hooks to use (like + `MMClsWandbHook`). + """ + + def __init__(self, dataloader, **kwargs): + super(EvalHook, self).__init__(dataloader, **kwargs) + self.latest_results = None + + def _do_evaluate(self, runner): + """perform evaluation and save ckpt.""" + results = self.test_fn(runner.model, self.dataloader) + self.latest_results = results + runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) + key_score = self.evaluate(runner, results) + # the key_score may be `None` so it needs to skip the action to save + # the best checkpoint + if self.save_best and key_score: + self._save_ckpt(runner, key_score) + + +class DistEvalHook(BaseDistEvalHook): + """Non-Distributed evaluation hook. + + Comparing with the ``EvalHook`` in MMCV, this hook will save the latest + evaluation results as an attribute for other hooks to use (like + `MMClsWandbHook`). + """ + + def __init__(self, dataloader, **kwargs): + super(DistEvalHook, self).__init__(dataloader, **kwargs) + self.latest_results = None + + def _do_evaluate(self, runner): + """perform evaluation and save ckpt.""" + # Synchronization of BatchNorm's buffer (running_mean + # and running_var) is not supported in the DDP of pytorch, + # which may cause the inconsistent performance of models in + # different ranks, so we broadcast BatchNorm's buffers + # of rank 0 to other ranks to avoid this. + if self.broadcast_bn_buffer: + model = runner.model + for name, module in model.named_modules(): + if isinstance(module, + _BatchNorm) and module.track_running_stats: + dist.broadcast(module.running_var, 0) + dist.broadcast(module.running_mean, 0) + + tmpdir = self.tmpdir + if tmpdir is None: + tmpdir = osp.join(runner.work_dir, '.eval_hook') + + results = self.test_fn( + runner.model, + self.dataloader, + tmpdir=tmpdir, + gpu_collect=self.gpu_collect) + self.latest_results = results + if runner.rank == 0: + print('\n') + runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) + key_score = self.evaluate(runner, results) + # the key_score may be `None` so it needs to skip the action to + # save the best checkpoint + if self.save_best and key_score: + self._save_ckpt(runner, key_score) diff --git a/mmcls/core/evaluation/eval_metrics.py b/mmcls/core/evaluation/eval_metrics.py new file mode 100644 index 0000000..365b408 --- /dev/null +++ b/mmcls/core/evaluation/eval_metrics.py @@ -0,0 +1,259 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from numbers import Number + +import numpy as np +import torch +from torch.nn.functional import one_hot + + +def calculate_confusion_matrix(pred, target): + """Calculate confusion matrix according to the prediction and target. + + Args: + pred (torch.Tensor | np.array): The model prediction with shape (N, C). + target (torch.Tensor | np.array): The target of each prediction with + shape (N, 1) or (N,). + + Returns: + torch.Tensor: Confusion matrix + The shape is (C, C), where C is the number of classes. + """ + + if isinstance(pred, np.ndarray): + pred = torch.from_numpy(pred) + if isinstance(target, np.ndarray): + target = torch.from_numpy(target) + assert ( + isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor)), \ + (f'pred and target should be torch.Tensor or np.ndarray, ' + f'but got {type(pred)} and {type(target)}.') + + # Modified from PyTorch-Ignite + num_classes = pred.size(1) + pred_label = torch.argmax(pred, dim=1).flatten() + target_label = target.flatten() + assert len(pred_label) == len(target_label) + + with torch.no_grad(): + indices = num_classes * target_label + pred_label + matrix = torch.bincount(indices, minlength=num_classes**2) + matrix = matrix.reshape(num_classes, num_classes) + return matrix + + +def precision_recall_f1(pred, target, average_mode='macro', thrs=0.): + """Calculate precision, recall and f1 score according to the prediction and + target. + + Args: + pred (torch.Tensor | np.array): The model prediction with shape (N, C). + target (torch.Tensor | np.array): The target of each prediction with + shape (N, 1) or (N,). + average_mode (str): The type of averaging performed on the result. + Options are 'macro' and 'none'. If 'none', the scores for each + class are returned. If 'macro', calculate metrics for each class, + and find their unweighted mean. + Defaults to 'macro'. + thrs (Number | tuple[Number], optional): Predictions with scores under + the thresholds are considered negative. Default to 0. + + Returns: + tuple: tuple containing precision, recall, f1 score. + + The type of precision, recall, f1 score is one of the following: + + +----------------------------+--------------------+-------------------+ + | Args | ``thrs`` is number | ``thrs`` is tuple | + +============================+====================+===================+ + | ``average_mode`` = "macro" | float | list[float] | + +----------------------------+--------------------+-------------------+ + | ``average_mode`` = "none" | np.array | list[np.array] | + +----------------------------+--------------------+-------------------+ + """ + + allowed_average_mode = ['macro', 'none'] + if average_mode not in allowed_average_mode: + raise ValueError(f'Unsupport type of averaging {average_mode}.') + + if isinstance(pred, np.ndarray): + pred = torch.from_numpy(pred) + assert isinstance(pred, torch.Tensor), \ + (f'pred should be torch.Tensor or np.ndarray, but got {type(pred)}.') + if isinstance(target, np.ndarray): + target = torch.from_numpy(target).long() + assert isinstance(target, torch.Tensor), \ + f'target should be torch.Tensor or np.ndarray, ' \ + f'but got {type(target)}.' + + if isinstance(thrs, Number): + thrs = (thrs, ) + return_single = True + elif isinstance(thrs, tuple): + return_single = False + else: + raise TypeError( + f'thrs should be a number or tuple, but got {type(thrs)}.') + + num_classes = pred.size(1) + pred_score, pred_label = torch.topk(pred, k=1) + pred_score = pred_score.flatten() + pred_label = pred_label.flatten() + + gt_positive = one_hot(target.flatten(), num_classes) + + precisions = [] + recalls = [] + f1_scores = [] + for thr in thrs: + # Only prediction values larger than thr are counted as positive + pred_positive = one_hot(pred_label, num_classes) + if thr is not None: + pred_positive[pred_score <= thr] = 0 + class_correct = (pred_positive & gt_positive).sum(0) + precision = class_correct / np.maximum(pred_positive.sum(0), 1.) * 100 + recall = class_correct / np.maximum(gt_positive.sum(0), 1.) * 100 + f1_score = 2 * precision * recall / np.maximum( + precision + recall, + torch.finfo(torch.float32).eps) + if average_mode == 'macro': + precision = float(precision.mean()) + recall = float(recall.mean()) + f1_score = float(f1_score.mean()) + elif average_mode == 'none': + precision = precision.detach().cpu().numpy() + recall = recall.detach().cpu().numpy() + f1_score = f1_score.detach().cpu().numpy() + else: + raise ValueError(f'Unsupport type of averaging {average_mode}.') + precisions.append(precision) + recalls.append(recall) + f1_scores.append(f1_score) + + if return_single: + return precisions[0], recalls[0], f1_scores[0] + else: + return precisions, recalls, f1_scores + + +def precision(pred, target, average_mode='macro', thrs=0.): + """Calculate precision according to the prediction and target. + + Args: + pred (torch.Tensor | np.array): The model prediction with shape (N, C). + target (torch.Tensor | np.array): The target of each prediction with + shape (N, 1) or (N,). + average_mode (str): The type of averaging performed on the result. + Options are 'macro' and 'none'. If 'none', the scores for each + class are returned. If 'macro', calculate metrics for each class, + and find their unweighted mean. + Defaults to 'macro'. + thrs (Number | tuple[Number], optional): Predictions with scores under + the thresholds are considered negative. Default to 0. + + Returns: + float | np.array | list[float | np.array]: Precision. + + +----------------------------+--------------------+-------------------+ + | Args | ``thrs`` is number | ``thrs`` is tuple | + +============================+====================+===================+ + | ``average_mode`` = "macro" | float | list[float] | + +----------------------------+--------------------+-------------------+ + | ``average_mode`` = "none" | np.array | list[np.array] | + +----------------------------+--------------------+-------------------+ + """ + precisions, _, _ = precision_recall_f1(pred, target, average_mode, thrs) + return precisions + + +def recall(pred, target, average_mode='macro', thrs=0.): + """Calculate recall according to the prediction and target. + + Args: + pred (torch.Tensor | np.array): The model prediction with shape (N, C). + target (torch.Tensor | np.array): The target of each prediction with + shape (N, 1) or (N,). + average_mode (str): The type of averaging performed on the result. + Options are 'macro' and 'none'. If 'none', the scores for each + class are returned. If 'macro', calculate metrics for each class, + and find their unweighted mean. + Defaults to 'macro'. + thrs (Number | tuple[Number], optional): Predictions with scores under + the thresholds are considered negative. Default to 0. + + Returns: + float | np.array | list[float | np.array]: Recall. + + +----------------------------+--------------------+-------------------+ + | Args | ``thrs`` is number | ``thrs`` is tuple | + +============================+====================+===================+ + | ``average_mode`` = "macro" | float | list[float] | + +----------------------------+--------------------+-------------------+ + | ``average_mode`` = "none" | np.array | list[np.array] | + +----------------------------+--------------------+-------------------+ + """ + _, recalls, _ = precision_recall_f1(pred, target, average_mode, thrs) + return recalls + + +def f1_score(pred, target, average_mode='macro', thrs=0.): + """Calculate F1 score according to the prediction and target. + + Args: + pred (torch.Tensor | np.array): The model prediction with shape (N, C). + target (torch.Tensor | np.array): The target of each prediction with + shape (N, 1) or (N,). + average_mode (str): The type of averaging performed on the result. + Options are 'macro' and 'none'. If 'none', the scores for each + class are returned. If 'macro', calculate metrics for each class, + and find their unweighted mean. + Defaults to 'macro'. + thrs (Number | tuple[Number], optional): Predictions with scores under + the thresholds are considered negative. Default to 0. + + Returns: + float | np.array | list[float | np.array]: F1 score. + + +----------------------------+--------------------+-------------------+ + | Args | ``thrs`` is number | ``thrs`` is tuple | + +============================+====================+===================+ + | ``average_mode`` = "macro" | float | list[float] | + +----------------------------+--------------------+-------------------+ + | ``average_mode`` = "none" | np.array | list[np.array] | + +----------------------------+--------------------+-------------------+ + """ + _, _, f1_scores = precision_recall_f1(pred, target, average_mode, thrs) + return f1_scores + + +def support(pred, target, average_mode='macro'): + """Calculate the total number of occurrences of each label according to the + prediction and target. + + Args: + pred (torch.Tensor | np.array): The model prediction with shape (N, C). + target (torch.Tensor | np.array): The target of each prediction with + shape (N, 1) or (N,). + average_mode (str): The type of averaging performed on the result. + Options are 'macro' and 'none'. If 'none', the scores for each + class are returned. If 'macro', calculate metrics for each class, + and find their unweighted sum. + Defaults to 'macro'. + + Returns: + float | np.array: Support. + + - If the ``average_mode`` is set to macro, the function returns + a single float. + - If the ``average_mode`` is set to none, the function returns + a np.array with shape C. + """ + confusion_matrix = calculate_confusion_matrix(pred, target) + with torch.no_grad(): + res = confusion_matrix.sum(1) + if average_mode == 'macro': + res = float(res.sum().numpy()) + elif average_mode == 'none': + res = res.numpy() + else: + raise ValueError(f'Unsupport type of averaging {average_mode}.') + return res diff --git a/mmcls/core/evaluation/mean_ap.py b/mmcls/core/evaluation/mean_ap.py new file mode 100644 index 0000000..2771a2a --- /dev/null +++ b/mmcls/core/evaluation/mean_ap.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + + +def average_precision(pred, target): + r"""Calculate the average precision for a single class. + + AP summarizes a precision-recall curve as the weighted mean of maximum + precisions obtained for any r'>r, where r is the recall: + + .. math:: + \text{AP} = \sum_n (R_n - R_{n-1}) P_n + + Note that no approximation is involved since the curve is piecewise + constant. + + Args: + pred (np.ndarray): The model prediction with shape (N, ). + target (np.ndarray): The target of each prediction with shape (N, ). + + Returns: + float: a single float as average precision value. + """ + eps = np.finfo(np.float32).eps + + # sort examples + sort_inds = np.argsort(-pred) + sort_target = target[sort_inds] + + # count true positive examples + pos_inds = sort_target == 1 + tp = np.cumsum(pos_inds) + total_pos = tp[-1] + + # count not difficult examples + pn_inds = sort_target != -1 + pn = np.cumsum(pn_inds) + + tp[np.logical_not(pos_inds)] = 0 + precision = tp / np.maximum(pn, eps) + ap = np.sum(precision) / np.maximum(total_pos, eps) + return ap + + +def mAP(pred, target): + """Calculate the mean average precision with respect of classes. + + Args: + pred (torch.Tensor | np.ndarray): The model prediction with shape + (N, C), where C is the number of classes. + target (torch.Tensor | np.ndarray): The target of each prediction with + shape (N, C), where C is the number of classes. 1 stands for + positive examples, 0 stands for negative examples and -1 stands for + difficult examples. + + Returns: + float: A single float as mAP value. + """ + if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor): + pred = pred.detach().cpu().numpy() + target = target.detach().cpu().numpy() + elif not (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)): + raise TypeError('pred and target should both be torch.Tensor or' + 'np.ndarray') + + assert pred.shape == \ + target.shape, 'pred and target should be in the same shape.' + num_classes = pred.shape[1] + ap = np.zeros(num_classes) + for k in range(num_classes): + ap[k] = average_precision(pred[:, k], target[:, k]) + mean_ap = ap.mean() * 100.0 + return mean_ap diff --git a/mmcls/core/evaluation/multilabel_eval_metrics.py b/mmcls/core/evaluation/multilabel_eval_metrics.py new file mode 100644 index 0000000..1d34e2b --- /dev/null +++ b/mmcls/core/evaluation/multilabel_eval_metrics.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np +import torch + + +def average_performance(pred, target, thr=None, k=None): + """Calculate CP, CR, CF1, OP, OR, OF1, where C stands for per-class + average, O stands for overall average, P stands for precision, R stands for + recall and F1 stands for F1-score. + + Args: + pred (torch.Tensor | np.ndarray): The model prediction with shape + (N, C), where C is the number of classes. + target (torch.Tensor | np.ndarray): The target of each prediction with + shape (N, C), where C is the number of classes. 1 stands for + positive examples, 0 stands for negative examples and -1 stands for + difficult examples. + thr (float): The confidence threshold. Defaults to None. + k (int): Top-k performance. Note that if thr and k are both given, k + will be ignored. Defaults to None. + + Returns: + tuple: (CP, CR, CF1, OP, OR, OF1) + """ + if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor): + pred = pred.detach().cpu().numpy() + target = target.detach().cpu().numpy() + elif not (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)): + raise TypeError('pred and target should both be torch.Tensor or' + 'np.ndarray') + if thr is None and k is None: + thr = 0.5 + warnings.warn('Neither thr nor k is given, set thr as 0.5 by ' + 'default.') + elif thr is not None and k is not None: + warnings.warn('Both thr and k are given, use threshold in favor of ' + 'top-k.') + + assert pred.shape == \ + target.shape, 'pred and target should be in the same shape.' + + eps = np.finfo(np.float32).eps + target[target == -1] = 0 + if thr is not None: + # a label is predicted positive if the confidence is no lower than thr + pos_inds = pred >= thr + + else: + # top-k labels will be predicted positive for any example + sort_inds = np.argsort(-pred, axis=1) + sort_inds_ = sort_inds[:, :k] + inds = np.indices(sort_inds_.shape) + pos_inds = np.zeros_like(pred) + pos_inds[inds[0], sort_inds_] = 1 + + tp = (pos_inds * target) == 1 + fp = (pos_inds * (1 - target)) == 1 + fn = ((1 - pos_inds) * target) == 1 + + precision_class = tp.sum(axis=0) / np.maximum( + tp.sum(axis=0) + fp.sum(axis=0), eps) + recall_class = tp.sum(axis=0) / np.maximum( + tp.sum(axis=0) + fn.sum(axis=0), eps) + CP = precision_class.mean() * 100.0 + CR = recall_class.mean() * 100.0 + CF1 = 2 * CP * CR / np.maximum(CP + CR, eps) + OP = tp.sum() / np.maximum(tp.sum() + fp.sum(), eps) * 100.0 + OR = tp.sum() / np.maximum(tp.sum() + fn.sum(), eps) * 100.0 + OF1 = 2 * OP * OR / np.maximum(OP + OR, eps) + return CP, CR, CF1, OP, OR, OF1 diff --git a/mmcls/core/export/__init__.py b/mmcls/core/export/__init__.py new file mode 100644 index 0000000..1c6ec1b --- /dev/null +++ b/mmcls/core/export/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .test import ONNXRuntimeClassifier, TensorRTClassifier + +__all__ = ['ONNXRuntimeClassifier', 'TensorRTClassifier'] diff --git a/mmcls/core/export/test.py b/mmcls/core/export/test.py new file mode 100644 index 0000000..f7caed6 --- /dev/null +++ b/mmcls/core/export/test.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np +import onnxruntime as ort +import torch + +from mmcls.models.classifiers import BaseClassifier + + +class ONNXRuntimeClassifier(BaseClassifier): + """Wrapper for classifier's inference with ONNXRuntime.""" + + def __init__(self, onnx_file, class_names, device_id): + super(ONNXRuntimeClassifier, self).__init__() + sess = ort.InferenceSession(onnx_file) + + providers = ['CPUExecutionProvider'] + options = [{}] + is_cuda_available = ort.get_device() == 'GPU' + if is_cuda_available: + providers.insert(0, 'CUDAExecutionProvider') + options.insert(0, {'device_id': device_id}) + sess.set_providers(providers, options) + + self.sess = sess + self.CLASSES = class_names + self.device_id = device_id + self.io_binding = sess.io_binding() + self.output_names = [_.name for _ in sess.get_outputs()] + self.is_cuda_available = is_cuda_available + + def simple_test(self, img, img_metas, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def extract_feat(self, imgs): + raise NotImplementedError('This method is not implemented.') + + def forward_train(self, imgs, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def forward_test(self, imgs, img_metas, **kwargs): + input_data = imgs + # set io binding for inputs/outputs + device_type = 'cuda' if self.is_cuda_available else 'cpu' + if not self.is_cuda_available: + input_data = input_data.cpu() + self.io_binding.bind_input( + name='input', + device_type=device_type, + device_id=self.device_id, + element_type=np.float32, + shape=input_data.shape, + buffer_ptr=input_data.data_ptr()) + + for name in self.output_names: + self.io_binding.bind_output(name) + # run session to get outputs + self.sess.run_with_iobinding(self.io_binding) + results = self.io_binding.copy_outputs_to_cpu()[0] + return list(results) + + +class TensorRTClassifier(BaseClassifier): + + def __init__(self, trt_file, class_names, device_id): + super(TensorRTClassifier, self).__init__() + from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin + try: + load_tensorrt_plugin() + except (ImportError, ModuleNotFoundError): + warnings.warn('If input model has custom op from mmcv, \ + you may have to build mmcv with TensorRT from source.') + model = TRTWraper( + trt_file, input_names=['input'], output_names=['probs']) + + self.model = model + self.device_id = device_id + self.CLASSES = class_names + + def simple_test(self, img, img_metas, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def extract_feat(self, imgs): + raise NotImplementedError('This method is not implemented.') + + def forward_train(self, imgs, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def forward_test(self, imgs, img_metas, **kwargs): + input_data = imgs + with torch.cuda.device(self.device_id), torch.no_grad(): + results = self.model({'input': input_data})['probs'] + results = results.detach().cpu().numpy() + + return list(results) diff --git a/mmcls/core/hook/__init__.py b/mmcls/core/hook/__init__.py new file mode 100644 index 0000000..4212dcf --- /dev/null +++ b/mmcls/core/hook/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .class_num_check_hook import ClassNumCheckHook +from .lr_updater import CosineAnnealingCooldownLrUpdaterHook +from .precise_bn_hook import PreciseBNHook +from .wandblogger_hook import MMClsWandbHook + +__all__ = [ + 'ClassNumCheckHook', 'PreciseBNHook', + 'CosineAnnealingCooldownLrUpdaterHook', 'MMClsWandbHook' +] diff --git a/mmcls/core/hook/class_num_check_hook.py b/mmcls/core/hook/class_num_check_hook.py new file mode 100644 index 0000000..52c2c9a --- /dev/null +++ b/mmcls/core/hook/class_num_check_hook.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved +from mmcv.runner import IterBasedRunner +from mmcv.runner.hooks import HOOKS, Hook +from mmcv.utils import is_seq_of + + +@HOOKS.register_module() +class ClassNumCheckHook(Hook): + + def _check_head(self, runner, dataset): + """Check whether the `num_classes` in head matches the length of + `CLASSES` in `dataset`. + + Args: + runner (obj:`EpochBasedRunner`, `IterBasedRunner`): runner object. + dataset (obj: `BaseDataset`): the dataset to check. + """ + model = runner.model + if dataset.CLASSES is None: + runner.logger.warning( + f'Please set `CLASSES` ' + f'in the {dataset.__class__.__name__} and' + f'check if it is consistent with the `num_classes` ' + f'of head') + else: + assert is_seq_of(dataset.CLASSES, str), \ + (f'`CLASSES` in {dataset.__class__.__name__}' + f'should be a tuple of str.') + for name, module in model.named_modules(): + if hasattr(module, 'num_classes'): + assert module.num_classes == len(dataset.CLASSES), \ + (f'The `num_classes` ({module.num_classes}) in ' + f'{module.__class__.__name__} of ' + f'{model.__class__.__name__} does not matches ' + f'the length of `CLASSES` ' + f'{len(dataset.CLASSES)}) in ' + f'{dataset.__class__.__name__}') + + def before_train_iter(self, runner): + """Check whether the training dataset is compatible with head. + + Args: + runner (obj: `IterBasedRunner`): Iter based Runner. + """ + if not isinstance(runner, IterBasedRunner): + return + self._check_head(runner, runner.data_loader._dataloader.dataset) + + def before_val_iter(self, runner): + """Check whether the eval dataset is compatible with head. + + Args: + runner (obj:`IterBasedRunner`): Iter based Runner. + """ + if not isinstance(runner, IterBasedRunner): + return + self._check_head(runner, runner.data_loader._dataloader.dataset) + + def before_train_epoch(self, runner): + """Check whether the training dataset is compatible with head. + + Args: + runner (obj:`EpochBasedRunner`): Epoch based Runner. + """ + self._check_head(runner, runner.data_loader.dataset) + + def before_val_epoch(self, runner): + """Check whether the eval dataset is compatible with head. + + Args: + runner (obj:`EpochBasedRunner`): Epoch based Runner. + """ + self._check_head(runner, runner.data_loader.dataset) diff --git a/mmcls/core/hook/lr_updater.py b/mmcls/core/hook/lr_updater.py new file mode 100644 index 0000000..021f66b --- /dev/null +++ b/mmcls/core/hook/lr_updater.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from math import cos, pi + +from mmcv.runner.hooks import HOOKS, LrUpdaterHook + + +@HOOKS.register_module() +class CosineAnnealingCooldownLrUpdaterHook(LrUpdaterHook): + """Cosine annealing learning rate scheduler with cooldown. + + Args: + min_lr (float, optional): The minimum learning rate after annealing. + Defaults to None. + min_lr_ratio (float, optional): The minimum learning ratio after + nnealing. Defaults to None. + cool_down_ratio (float): The cooldown ratio. Defaults to 0.1. + cool_down_time (int): The cooldown time. Defaults to 10. + by_epoch (bool): If True, the learning rate changes epoch by epoch. If + False, the learning rate changes iter by iter. Defaults to True. + warmup (string, optional): Type of warmup used. It can be None (use no + warmup), 'constant', 'linear' or 'exp'. Defaults to None. + warmup_iters (int): The number of iterations or epochs that warmup + lasts. Defaults to 0. + warmup_ratio (float): LR used at the beginning of warmup equals to + ``warmup_ratio * initial_lr``. Defaults to 0.1. + warmup_by_epoch (bool): If True, the ``warmup_iters`` + means the number of epochs that warmup lasts, otherwise means the + number of iteration that warmup lasts. Defaults to False. + + Note: + You need to set one and only one of ``min_lr`` and ``min_lr_ratio``. + """ + + def __init__(self, + min_lr=None, + min_lr_ratio=None, + cool_down_ratio=0.1, + cool_down_time=10, + **kwargs): + assert (min_lr is None) ^ (min_lr_ratio is None) + self.min_lr = min_lr + self.min_lr_ratio = min_lr_ratio + self.cool_down_time = cool_down_time + self.cool_down_ratio = cool_down_ratio + super(CosineAnnealingCooldownLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + + if self.min_lr_ratio is not None: + target_lr = base_lr * self.min_lr_ratio + else: + target_lr = self.min_lr + + if progress > max_progress - self.cool_down_time: + return target_lr * self.cool_down_ratio + else: + max_progress = max_progress - self.cool_down_time + + return annealing_cos(base_lr, target_lr, progress / max_progress) + + +def annealing_cos(start, end, factor, weight=1): + """Calculate annealing cos learning rate. + + Cosine anneal from `weight * start + (1 - weight) * end` to `end` as + percentage goes from 0.0 to 1.0. + + Args: + start (float): The starting learning rate of the cosine annealing. + end (float): The ending learing rate of the cosine annealing. + factor (float): The coefficient of `pi` when calculating the current + percentage. Range from 0.0 to 1.0. + weight (float, optional): The combination factor of `start` and `end` + when calculating the actual starting learning rate. Default to 1. + """ + cos_out = cos(pi * factor) + 1 + return end + 0.5 * weight * (start - end) * cos_out diff --git a/mmcls/core/hook/precise_bn_hook.py b/mmcls/core/hook/precise_bn_hook.py new file mode 100644 index 0000000..e6d4598 --- /dev/null +++ b/mmcls/core/hook/precise_bn_hook.py @@ -0,0 +1,180 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Adapted from https://github.com/facebookresearch/pycls/blob/f8cd962737e33ce9e19b3083a33551da95c2d9c0/pycls/core/net.py # noqa: E501 +# Original licence: Copyright (c) 2019 Facebook, Inc under the Apache License 2.0 # noqa: E501 + +import itertools +import logging +from typing import List, Optional + +import mmcv +import torch +import torch.nn as nn +from mmcv.runner import EpochBasedRunner, get_dist_info +from mmcv.runner.hooks import HOOKS, Hook +from mmcv.utils import print_log +from torch.functional import Tensor +from torch.nn import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm +from torch.nn.modules.instancenorm import _InstanceNorm +from torch.utils.data import DataLoader + + +def scaled_all_reduce(tensors: List[Tensor], num_gpus: int) -> List[Tensor]: + """Performs the scaled all_reduce operation on the provided tensors. + + The input tensors are modified in-place. Currently supports only the sum + reduction operator. The reduced values are scaled by the inverse size of + the process group. + + Args: + tensors (List[torch.Tensor]): The tensors to process. + num_gpus (int): The number of gpus to use + Returns: + List[torch.Tensor]: The processed tensors. + """ + # There is no need for reduction in the single-proc case + if num_gpus == 1: + return tensors + # Queue the reductions + reductions = [] + for tensor in tensors: + reduction = torch.distributed.all_reduce(tensor, async_op=True) + reductions.append(reduction) + # Wait for reductions to finish + for reduction in reductions: + reduction.wait() + # Scale the results + for tensor in tensors: + tensor.mul_(1.0 / num_gpus) + return tensors + + +@torch.no_grad() +def update_bn_stats(model: nn.Module, + loader: DataLoader, + num_samples: int = 8192, + logger: Optional[logging.Logger] = None) -> None: + """Computes precise BN stats on training data. + + Args: + model (nn.module): The model whose bn stats will be recomputed. + loader (DataLoader): PyTorch dataloader._dataloader + num_samples (int): The number of samples to update the bn stats. + Defaults to 8192. + logger (:obj:`logging.Logger` | None): Logger for logging. + Default: None. + """ + # get dist info + rank, world_size = get_dist_info() + # Compute the number of mini-batches to use, if the size of dataloader is + # less than num_iters, use all the samples in dataloader. + num_iter = num_samples // (loader.batch_size * world_size) + num_iter = min(num_iter, len(loader)) + # Retrieve the BN layers + bn_layers = [ + m for m in model.modules() + if m.training and isinstance(m, (_BatchNorm)) + ] + + if len(bn_layers) == 0: + print_log('No BN found in model', logger=logger, level=logging.WARNING) + return + print_log( + f'{len(bn_layers)} BN found, run {num_iter} iters...', logger=logger) + + # Finds all the other norm layers with training=True. + other_norm_layers = [ + m for m in model.modules() + if m.training and isinstance(m, (_InstanceNorm, GroupNorm)) + ] + if len(other_norm_layers) > 0: + print_log( + 'IN/GN stats will not be updated in PreciseHook.', + logger=logger, + level=logging.INFO) + + # Initialize BN stats storage for computing + # mean(mean(batch)) and mean(var(batch)) + running_means = [torch.zeros_like(bn.running_mean) for bn in bn_layers] + running_vars = [torch.zeros_like(bn.running_var) for bn in bn_layers] + # Remember momentum values + momentums = [bn.momentum for bn in bn_layers] + # Set momentum to 1.0 to compute BN stats that reflect the current batch + for bn in bn_layers: + bn.momentum = 1.0 + # Average the BN stats for each BN layer over the batches + if rank == 0: + prog_bar = mmcv.ProgressBar(num_iter) + + for data in itertools.islice(loader, num_iter): + model.train_step(data) + for i, bn in enumerate(bn_layers): + running_means[i] += bn.running_mean / num_iter + running_vars[i] += bn.running_var / num_iter + if rank == 0: + prog_bar.update() + + # Sync BN stats across GPUs (no reduction if 1 GPU used) + running_means = scaled_all_reduce(running_means, world_size) + running_vars = scaled_all_reduce(running_vars, world_size) + # Set BN stats and restore original momentum values + for i, bn in enumerate(bn_layers): + bn.running_mean = running_means[i] + bn.running_var = running_vars[i] + bn.momentum = momentums[i] + + +@HOOKS.register_module() +class PreciseBNHook(Hook): + """Precise BN hook. + + Recompute and update the batch norm stats to make them more precise. During + training both BN stats and the weight are changing after every iteration, + so the running average can not precisely reflect the actual stats of the + current model. + + With this hook, the BN stats are recomputed with fixed weights, to make the + running average more precise. Specifically, it computes the true average of + per-batch mean/variance instead of the running average. See Sec. 3 of the + paper `Rethinking Batch in BatchNorm ` + for details. + + This hook will update BN stats, so it should be executed before + ``CheckpointHook`` and ``EMAHook``, generally set its priority to + "ABOVE_NORMAL". + + Args: + num_samples (int): The number of samples to update the bn stats. + Defaults to 8192. + interval (int): Perform precise bn interval. Defaults to 1. + """ + + def __init__(self, num_samples: int = 8192, interval: int = 1) -> None: + assert interval > 0 and num_samples > 0 + + self.interval = interval + self.num_samples = num_samples + + def _perform_precise_bn(self, runner: EpochBasedRunner) -> None: + print_log( + f'Running Precise BN for {self.num_samples} items...', + logger=runner.logger) + update_bn_stats( + runner.model, + runner.data_loader, + self.num_samples, + logger=runner.logger) + print_log('Finish Precise BN, BN stats updated.', logger=runner.logger) + + def after_train_epoch(self, runner: EpochBasedRunner) -> None: + """Calculate prcise BN and broadcast BN stats across GPUs. + + Args: + runner (obj:`EpochBasedRunner`): runner object. + """ + assert isinstance(runner, EpochBasedRunner), \ + 'PreciseBN only supports `EpochBasedRunner` by now' + + # if by epoch, do perform precise every `self.interval` epochs; + if self.every_n_epochs(runner, self.interval): + self._perform_precise_bn(runner) diff --git a/mmcls/core/hook/wandblogger_hook.py b/mmcls/core/hook/wandblogger_hook.py new file mode 100644 index 0000000..ef67ee4 --- /dev/null +++ b/mmcls/core/hook/wandblogger_hook.py @@ -0,0 +1,343 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import numpy as np +from mmcv.runner import HOOKS, BaseRunner +from mmcv.runner.dist_utils import get_dist_info, master_only +from mmcv.runner.hooks.checkpoint import CheckpointHook +from mmcv.runner.hooks.evaluation import DistEvalHook, EvalHook +from mmcv.runner.hooks.logger.wandb import WandbLoggerHook + + +@HOOKS.register_module() +class MMClsWandbHook(WandbLoggerHook): + """Enhanced Wandb logger hook for classification. + + Comparing with the :cls:`mmcv.runner.WandbLoggerHook`, this hook can not + only automatically log all information in ``log_buffer`` but also log + the following extra information. + + - **Checkpoints**: If ``log_checkpoint`` is True, the checkpoint saved at + every checkpoint interval will be saved as W&B Artifacts. This depends on + the : class:`mmcv.runner.CheckpointHook` whose priority is higher than + this hook. Please refer to + https://docs.wandb.ai/guides/artifacts/model-versioning to learn more + about model versioning with W&B Artifacts. + + - **Checkpoint Metadata**: If ``log_checkpoint_metadata`` is True, every + checkpoint artifact will have a metadata associated with it. The metadata + contains the evaluation metrics computed on validation data with that + checkpoint along with the current epoch/iter. It depends on + :class:`EvalHook` whose priority is higher than this hook. + + - **Evaluation**: At every interval, this hook logs the model prediction as + interactive W&B Tables. The number of samples logged is given by + ``num_eval_images``. Currently, this hook logs the predicted labels along + with the ground truth at every evaluation interval. This depends on the + :class:`EvalHook` whose priority is higher than this hook. Also note that + the data is just logged once and subsequent evaluation tables uses + reference to the logged data to save memory usage. Please refer to + https://docs.wandb.ai/guides/data-vis to learn more about W&B Tables. + + Here is a config example: + + .. code:: python + + checkpoint_config = dict(interval=10) + + # To log checkpoint metadata, the interval of checkpoint saving should + # be divisible by the interval of evaluation. + evaluation = dict(interval=5) + + log_config = dict( + ... + hooks=[ + ... + dict(type='MMClsWandbHook', + init_kwargs={ + 'entity': "YOUR_ENTITY", + 'project': "YOUR_PROJECT_NAME" + }, + log_checkpoint=True, + log_checkpoint_metadata=True, + num_eval_images=100) + ]) + + Args: + init_kwargs (dict): A dict passed to wandb.init to initialize + a W&B run. Please refer to https://docs.wandb.ai/ref/python/init + for possible key-value pairs. + interval (int): Logging interval (every k iterations). Defaults to 10. + log_checkpoint (bool): Save the checkpoint at every checkpoint interval + as W&B Artifacts. Use this for model versioning where each version + is a checkpoint. Defaults to False. + log_checkpoint_metadata (bool): Log the evaluation metrics computed + on the validation data with the checkpoint, along with current + epoch as a metadata to that checkpoint. + Defaults to True. + num_eval_images (int): The number of validation images to be logged. + If zero, the evaluation won't be logged. Defaults to 100. + """ + + def __init__(self, + init_kwargs=None, + interval=10, + log_checkpoint=False, + log_checkpoint_metadata=False, + num_eval_images=100, + **kwargs): + super(MMClsWandbHook, self).__init__(init_kwargs, interval, **kwargs) + + self.log_checkpoint = log_checkpoint + self.log_checkpoint_metadata = ( + log_checkpoint and log_checkpoint_metadata) + self.num_eval_images = num_eval_images + self.log_evaluation = (num_eval_images > 0) + self.ckpt_hook: CheckpointHook = None + self.eval_hook: EvalHook = None + + @master_only + def before_run(self, runner: BaseRunner): + super(MMClsWandbHook, self).before_run(runner) + + # Inspect CheckpointHook and EvalHook + for hook in runner.hooks: + if isinstance(hook, CheckpointHook): + self.ckpt_hook = hook + if isinstance(hook, (EvalHook, DistEvalHook)): + self.eval_hook = hook + + # Check conditions to log checkpoint + if self.log_checkpoint: + if self.ckpt_hook is None: + self.log_checkpoint = False + self.log_checkpoint_metadata = False + runner.logger.warning( + 'To log checkpoint in MMClsWandbHook, `CheckpointHook` is' + 'required, please check hooks in the runner.') + else: + self.ckpt_interval = self.ckpt_hook.interval + + # Check conditions to log evaluation + if self.log_evaluation or self.log_checkpoint_metadata: + if self.eval_hook is None: + self.log_evaluation = False + self.log_checkpoint_metadata = False + runner.logger.warning( + 'To log evaluation or checkpoint metadata in ' + 'MMClsWandbHook, `EvalHook` or `DistEvalHook` in mmcls ' + 'is required, please check whether the validation ' + 'is enabled.') + else: + self.eval_interval = self.eval_hook.interval + self.val_dataset = self.eval_hook.dataloader.dataset + if (self.log_evaluation + and self.num_eval_images > len(self.val_dataset)): + self.num_eval_images = len(self.val_dataset) + runner.logger.warning( + f'The num_eval_images ({self.num_eval_images}) is ' + 'greater than the total number of validation samples ' + f'({len(self.val_dataset)}). The complete validation ' + 'dataset will be logged.') + + # Check conditions to log checkpoint metadata + if self.log_checkpoint_metadata: + assert self.ckpt_interval % self.eval_interval == 0, \ + 'To log checkpoint metadata in MMClsWandbHook, the interval ' \ + f'of checkpoint saving ({self.ckpt_interval}) should be ' \ + 'divisible by the interval of evaluation ' \ + f'({self.eval_interval}).' + + # Initialize evaluation table + if self.log_evaluation: + # Initialize data table + self._init_data_table() + # Add ground truth to the data table + self._add_ground_truth() + # Log ground truth data + self._log_data_table() + + @master_only + def after_train_epoch(self, runner): + super(MMClsWandbHook, self).after_train_epoch(runner) + + if not self.by_epoch: + return + + # Save checkpoint and metadata + if (self.log_checkpoint + and self.every_n_epochs(runner, self.ckpt_interval) + or (self.ckpt_hook.save_last and self.is_last_epoch(runner))): + if self.log_checkpoint_metadata and self.eval_hook: + metadata = { + 'epoch': runner.epoch + 1, + **self._get_eval_results() + } + else: + metadata = None + aliases = [f'epoch_{runner.epoch+1}', 'latest'] + model_path = osp.join(self.ckpt_hook.out_dir, + f'epoch_{runner.epoch+1}.pth') + self._log_ckpt_as_artifact(model_path, aliases, metadata) + + # Save prediction table + if self.log_evaluation and self.eval_hook._should_evaluate(runner): + results = self.eval_hook.latest_results + # Initialize evaluation table + self._init_pred_table() + # Add predictions to evaluation table + self._add_predictions(results, runner.epoch + 1) + # Log the evaluation table + self._log_eval_table(runner.epoch + 1) + + def after_train_iter(self, runner): + if self.get_mode(runner) == 'train': + # An ugly patch. The iter-based eval hook will call the + # `after_train_iter` method of all logger hooks before evaluation. + # Use this trick to skip that call. + # Don't call super method at first, it will clear the log_buffer + return super(MMClsWandbHook, self).after_train_iter(runner) + else: + super(MMClsWandbHook, self).after_train_iter(runner) + + rank, _ = get_dist_info() + if rank != 0: + return + + if self.by_epoch: + return + + # Save checkpoint and metadata + if (self.log_checkpoint + and self.every_n_iters(runner, self.ckpt_interval) + or (self.ckpt_hook.save_last and self.is_last_iter(runner))): + if self.log_checkpoint_metadata and self.eval_hook: + metadata = { + 'iter': runner.iter + 1, + **self._get_eval_results() + } + else: + metadata = None + aliases = [f'iter_{runner.iter+1}', 'latest'] + model_path = osp.join(self.ckpt_hook.out_dir, + f'iter_{runner.iter+1}.pth') + self._log_ckpt_as_artifact(model_path, aliases, metadata) + + # Save prediction table + if self.log_evaluation and self.eval_hook._should_evaluate(runner): + results = self.eval_hook.latest_results + # Initialize evaluation table + self._init_pred_table() + # Log predictions + self._add_predictions(results, runner.iter + 1) + # Log the table + self._log_eval_table(runner.iter + 1) + + @master_only + def after_run(self, runner): + self.wandb.finish() + + def _log_ckpt_as_artifact(self, model_path, aliases, metadata=None): + """Log model checkpoint as W&B Artifact. + + Args: + model_path (str): Path of the checkpoint to log. + aliases (list): List of the aliases associated with this artifact. + metadata (dict, optional): Metadata associated with this artifact. + """ + model_artifact = self.wandb.Artifact( + f'run_{self.wandb.run.id}_model', type='model', metadata=metadata) + model_artifact.add_file(model_path) + self.wandb.log_artifact(model_artifact, aliases=aliases) + + def _get_eval_results(self): + """Get model evaluation results.""" + results = self.eval_hook.latest_results + eval_results = self.val_dataset.evaluate( + results, logger='silent', **self.eval_hook.eval_kwargs) + return eval_results + + def _init_data_table(self): + """Initialize the W&B Tables for validation data.""" + columns = ['image_name', 'image', 'ground_truth'] + self.data_table = self.wandb.Table(columns=columns) + + def _init_pred_table(self): + """Initialize the W&B Tables for model evaluation.""" + columns = ['epoch'] if self.by_epoch else ['iter'] + columns += ['image_name', 'image', 'ground_truth', 'prediction' + ] + list(self.val_dataset.CLASSES) + self.eval_table = self.wandb.Table(columns=columns) + + def _add_ground_truth(self): + # Get image loading pipeline + from mmcls.datasets.pipelines import LoadImageFromFile + img_loader = None + for t in self.val_dataset.pipeline.transforms: + if isinstance(t, LoadImageFromFile): + img_loader = t + + CLASSES = self.val_dataset.CLASSES + self.eval_image_indexs = np.arange(len(self.val_dataset)) + # Set seed so that same validation set is logged each time. + np.random.seed(42) + np.random.shuffle(self.eval_image_indexs) + self.eval_image_indexs = self.eval_image_indexs[:self.num_eval_images] + + for idx in self.eval_image_indexs: + img_info = self.val_dataset.data_infos[idx] + if img_loader is not None: + img_info = img_loader(img_info) + # Get image and convert from BGR to RGB + image = img_info['img'][..., ::-1] + else: + # For CIFAR dataset. + image = img_info['img'] + image_name = img_info.get('filename', f'img_{idx}') + gt_label = img_info.get('gt_label').item() + + self.data_table.add_data(image_name, self.wandb.Image(image), + CLASSES[gt_label]) + + def _add_predictions(self, results, idx): + table_idxs = self.data_table_ref.get_index() + assert len(table_idxs) == len(self.eval_image_indexs) + + for ndx, eval_image_index in enumerate(self.eval_image_indexs): + result = results[eval_image_index] + + self.eval_table.add_data( + idx, self.data_table_ref.data[ndx][0], + self.data_table_ref.data[ndx][1], + self.data_table_ref.data[ndx][2], + self.val_dataset.CLASSES[np.argmax(result)], *tuple(result)) + + def _log_data_table(self): + """Log the W&B Tables for validation data as artifact and calls + `use_artifact` on it so that the evaluation table can use the reference + of already uploaded images. + + This allows the data to be uploaded just once. + """ + data_artifact = self.wandb.Artifact('val', type='dataset') + data_artifact.add(self.data_table, 'val_data') + + self.wandb.run.use_artifact(data_artifact) + data_artifact.wait() + + self.data_table_ref = data_artifact.get('val_data') + + def _log_eval_table(self, idx): + """Log the W&B Tables for model evaluation. + + The table will be logged multiple times creating new version. Use this + to compare models at different intervals interactively. + """ + pred_artifact = self.wandb.Artifact( + f'run_{self.wandb.run.id}_pred', type='evaluation') + pred_artifact.add(self.eval_table, 'eval_data') + if self.by_epoch: + aliases = ['latest', f'epoch_{idx}'] + else: + aliases = ['latest', f'iter_{idx}'] + self.wandb.run.log_artifact(pred_artifact, aliases=aliases) diff --git a/mmcls/core/optimizers/__init__.py b/mmcls/core/optimizers/__init__.py new file mode 100644 index 0000000..aa9cc43 --- /dev/null +++ b/mmcls/core/optimizers/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .lamb import Lamb + +__all__ = [ + 'Lamb', +] diff --git a/mmcls/core/optimizers/lamb.py b/mmcls/core/optimizers/lamb.py new file mode 100644 index 0000000..c65fbae --- /dev/null +++ b/mmcls/core/optimizers/lamb.py @@ -0,0 +1,227 @@ +"""PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb. + +This optimizer code was adapted from the following (starting with latest) +* https://github.com/HabanaAI/Model-References/blob/ +2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py +* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/ +LanguageModeling/Transformer-XL/pytorch/lamb.py +* https://github.com/cybertronai/pytorch-lamb + +Use FusedLamb if you can (GPU). The reason for including this variant of Lamb +is to have a version that is +similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or +cannot install/use APEX. + +In addition to some cleanup, this Lamb impl has been modified to support +PyTorch XLA and has been tested on TPU. + +Original copyrights for above sources are below. + +Modifications Copyright 2021 Ross Wightman +""" +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. + +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2019 cybertronai +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import math + +import torch +from mmcv.runner import OPTIMIZERS +from torch.optim import Optimizer + + +@OPTIMIZERS.register_module() +class Lamb(Optimizer): + """A pure pytorch variant of FuseLAMB (NvLamb variant) optimizer. + + This class is copied from `timm`_. The LAMB was proposed in `Large Batch + Optimization for Deep Learning - Training BERT in 76 minutes`_. + + .. _timm: + https://github.com/rwightman/pytorch-image-models/blob/master/timm/optim/lamb.py + .. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its norm. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging (bool, optional): whether apply (1-beta2) to grad when + calculating running averages of gradient. (default: True) + max_grad_norm (float, optional): value used to clip global grad norm + (default: 1.0) + trust_clip (bool): enable LAMBC trust ratio clipping (default: False) + always_adapt (boolean, optional): Apply adaptive learning rate to 0.0 + weight decay parameter (default: False) + """ # noqa: E501 + + def __init__(self, + params, + lr=1e-3, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-6, + weight_decay=0.01, + grad_averaging=True, + max_grad_norm=1.0, + trust_clip=False, + always_adapt=False): + defaults = dict( + lr=lr, + bias_correction=bias_correction, + betas=betas, + eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + max_grad_norm=max_grad_norm, + trust_clip=trust_clip, + always_adapt=always_adapt) + super().__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor( + 1.0, device=device + ) # because torch.where doesn't handle scalars correctly + global_grad_norm = torch.zeros(1, device=device) + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError( + 'Lamb does not support sparse gradients, consider ' + 'SparseAdam instead.') + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm = torch.sqrt(global_grad_norm) + # FIXME it'd be nice to remove explicit tensor conversion of scalars + # when torch.where promotes + # scalar types properly https://github.com/pytorch/pytorch/issues/9190 + max_grad_norm = torch.tensor( + self.defaults['max_grad_norm'], device=device) + clip_global_grad_norm = torch.where(global_grad_norm > max_grad_norm, + global_grad_norm / max_grad_norm, + one_tensor) + + for group in self.param_groups: + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + grad_averaging = 1 if group['grad_averaging'] else 0 + beta3 = 1 - beta1 if grad_averaging else 1.0 + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or + # pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + if bias_correction: + bias_correction1 = 1 - beta1**group['step'] + bias_correction2 = 1 - beta2**group['step'] + else: + bias_correction1, bias_correction2 = 1.0, 1.0 + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.div_(clip_global_grad_norm) + state = self.state[p] + + # State initialization + if len(state) == 0: + # Exponential moving average of gradient valuesa + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t + exp_avg_sq.mul_(beta2).addcmul_( + grad, grad, value=1 - beta2) # v_t + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_( + group['eps']) + update = (exp_avg / bias_correction1).div_(denom) + + weight_decay = group['weight_decay'] + if weight_decay != 0: + update.add_(p, alpha=weight_decay) + + if weight_decay != 0 or group['always_adapt']: + # Layer-wise LR adaptation. By default, skip adaptation on + # parameters that are + # excluded from weight decay, unless always_adapt == True, + # then always enabled. + w_norm = p.norm(2.0) + g_norm = update.norm(2.0) + # FIXME nested where required since logical and/or not + # working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, w_norm / g_norm, one_tensor), + one_tensor, + ) + if group['trust_clip']: + # LAMBC trust clipping, upper bound fixed at one + trust_ratio = torch.minimum(trust_ratio, one_tensor) + update.mul_(trust_ratio) + + p.add_(update, alpha=-group['lr']) + + return loss diff --git a/mmcls/core/utils/__init__.py b/mmcls/core/utils/__init__.py new file mode 100644 index 0000000..7170f23 --- /dev/null +++ b/mmcls/core/utils/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dist_utils import DistOptimizerHook, allreduce_grads, sync_random_seed +from .misc import multi_apply + +__all__ = [ + 'allreduce_grads', 'DistOptimizerHook', 'multi_apply', 'sync_random_seed' +] diff --git a/mmcls/core/utils/dist_utils.py b/mmcls/core/utils/dist_utils.py new file mode 100644 index 0000000..15cf13c --- /dev/null +++ b/mmcls/core/utils/dist_utils.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict + +import numpy as np +import torch +import torch.distributed as dist +from mmcv.runner import OptimizerHook, get_dist_info +from torch._utils import (_flatten_dense_tensors, _take_tensors, + _unflatten_dense_tensors) + +from mmcls.utils import auto_select_device + + +def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): + if bucket_size_mb > 0: + bucket_size_bytes = bucket_size_mb * 1024 * 1024 + buckets = _take_tensors(tensors, bucket_size_bytes) + else: + buckets = OrderedDict() + for tensor in tensors: + tp = tensor.type() + if tp not in buckets: + buckets[tp] = [] + buckets[tp].append(tensor) + buckets = buckets.values() + + for bucket in buckets: + flat_tensors = _flatten_dense_tensors(bucket) + dist.all_reduce(flat_tensors) + flat_tensors.div_(world_size) + for tensor, synced in zip( + bucket, _unflatten_dense_tensors(flat_tensors, bucket)): + tensor.copy_(synced) + + +def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): + grads = [ + param.grad.data for param in params + if param.requires_grad and param.grad is not None + ] + world_size = dist.get_world_size() + if coalesce: + _allreduce_coalesced(grads, world_size, bucket_size_mb) + else: + for tensor in grads: + dist.all_reduce(tensor.div_(world_size)) + + +class DistOptimizerHook(OptimizerHook): + + def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1): + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + + def after_train_iter(self, runner): + runner.optimizer.zero_grad() + runner.outputs['loss'].backward() + if self.grad_clip is not None: + self.clip_grads(runner.model.parameters()) + runner.optimizer.step() + + +def sync_random_seed(seed=None, device=None): + """Make sure different ranks share the same seed. + + All workers must call this function, otherwise it will deadlock. + This method is generally used in `DistributedSampler`, + because the seed should be identical across all processes + in the distributed group. + + In distributed sampling, different ranks should sample non-overlapped + data in the dataset. Therefore, this function is used to make sure that + each rank shuffles the data indices in the same order based + on the same seed. Then different ranks could use different indices + to select non-overlapped data from the same data list. + + Args: + seed (int, Optional): The seed. Default to None. + device (str): The device where the seed will be put on. + Default to 'cuda'. + + Returns: + int: Seed to be used. + """ + if device is None: + device = auto_select_device() + if seed is None: + seed = np.random.randint(2**31) + assert isinstance(seed, int) + + rank, world_size = get_dist_info() + + if world_size == 1: + return seed + + if rank == 0: + random_num = torch.tensor(seed, dtype=torch.int32, device=device) + else: + random_num = torch.tensor(0, dtype=torch.int32, device=device) + dist.broadcast(random_num, src=0) + return random_num.item() diff --git a/mmcls/core/utils/misc.py b/mmcls/core/utils/misc.py new file mode 100644 index 0000000..31f8463 --- /dev/null +++ b/mmcls/core/utils/misc.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial + + +def multi_apply(func, *args, **kwargs): + pfunc = partial(func, **kwargs) if kwargs else func + map_results = map(pfunc, *args) + return tuple(map(list, zip(*map_results))) diff --git a/mmcls/core/visualization/__init__.py b/mmcls/core/visualization/__init__.py new file mode 100644 index 0000000..bdd0c18 --- /dev/null +++ b/mmcls/core/visualization/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .image import (BaseFigureContextManager, ImshowInfosContextManager, + color_val_matplotlib, imshow_infos) + +__all__ = [ + 'BaseFigureContextManager', 'ImshowInfosContextManager', 'imshow_infos', + 'color_val_matplotlib' +] diff --git a/mmcls/core/visualization/image.py b/mmcls/core/visualization/image.py new file mode 100644 index 0000000..d016974 --- /dev/null +++ b/mmcls/core/visualization/image.py @@ -0,0 +1,343 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import matplotlib.pyplot as plt +import mmcv +import numpy as np +from matplotlib.backend_bases import CloseEvent + +# A small value +EPS = 1e-2 + + +def color_val_matplotlib(color): + """Convert various input in BGR order to normalized RGB matplotlib color + tuples, + + Args: + color (:obj:`mmcv.Color`/str/tuple/int/ndarray): Color inputs + + Returns: + tuple[float]: A tuple of 3 normalized floats indicating RGB channels. + """ + color = mmcv.color_val(color) + color = [color / 255 for color in color[::-1]] + return tuple(color) + + +class BaseFigureContextManager: + """Context Manager to reuse matplotlib figure. + + It provides a figure for saving and a figure for showing to support + different settings. + + Args: + axis (bool): Whether to show the axis lines. + fig_save_cfg (dict): Keyword parameters of figure for saving. + Defaults to empty dict. + fig_show_cfg (dict): Keyword parameters of figure for showing. + Defaults to empty dict. + """ + + def __init__(self, axis=False, fig_save_cfg={}, fig_show_cfg={}) -> None: + self.is_inline = 'inline' in plt.get_backend() + + # Because save and show need different figure size + # We set two figure and axes to handle save and show + self.fig_save: plt.Figure = None + self.fig_save_cfg = fig_save_cfg + self.ax_save: plt.Axes = None + + self.fig_show: plt.Figure = None + self.fig_show_cfg = fig_show_cfg + self.ax_show: plt.Axes = None + + self.axis = axis + + def __enter__(self): + if not self.is_inline: + # If use inline backend, we cannot control which figure to show, + # so disable the interactive fig_show, and put the initialization + # of fig_save to `prepare` function. + self._initialize_fig_save() + self._initialize_fig_show() + return self + + def _initialize_fig_save(self): + fig = plt.figure(**self.fig_save_cfg) + ax = fig.add_subplot() + + # remove white edges by set subplot margin + fig.subplots_adjust(left=0, right=1, bottom=0, top=1) + + self.fig_save, self.ax_save = fig, ax + + def _initialize_fig_show(self): + # fig_save will be resized to image size, only fig_show needs fig_size. + fig = plt.figure(**self.fig_show_cfg) + ax = fig.add_subplot() + + # remove white edges by set subplot margin + fig.subplots_adjust(left=0, right=1, bottom=0, top=1) + + self.fig_show, self.ax_show = fig, ax + + def __exit__(self, exc_type, exc_value, traceback): + if self.is_inline: + # If use inline backend, whether to close figure depends on if + # users want to show the image. + return + + plt.close(self.fig_save) + plt.close(self.fig_show) + + def prepare(self): + if self.is_inline: + # if use inline backend, just rebuild the fig_save. + self._initialize_fig_save() + self.ax_save.cla() + self.ax_save.axis(self.axis) + return + + # If users force to destroy the window, rebuild fig_show. + if not plt.fignum_exists(self.fig_show.number): + self._initialize_fig_show() + + # Clear all axes + self.ax_save.cla() + self.ax_save.axis(self.axis) + self.ax_show.cla() + self.ax_show.axis(self.axis) + + def wait_continue(self, timeout=0, continue_key=' ') -> int: + """Show the image and wait for the user's input. + + This implementation refers to + https://github.com/matplotlib/matplotlib/blob/v3.5.x/lib/matplotlib/_blocking_input.py + + Args: + timeout (int): If positive, continue after ``timeout`` seconds. + Defaults to 0. + continue_key (str): The key for users to continue. Defaults to + the space key. + + Returns: + int: If zero, means time out or the user pressed ``continue_key``, + and if one, means the user closed the show figure. + """ # noqa: E501 + if self.is_inline: + # If use inline backend, interactive input and timeout is no use. + return + + if self.fig_show.canvas.manager: + # Ensure that the figure is shown + self.fig_show.show() + + while True: + + # Connect the events to the handler function call. + event = None + + def handler(ev): + # Set external event variable + nonlocal event + # Qt backend may fire two events at the same time, + # use a condition to avoid missing close event. + event = ev if not isinstance(event, CloseEvent) else event + self.fig_show.canvas.stop_event_loop() + + cids = [ + self.fig_show.canvas.mpl_connect(name, handler) + for name in ('key_press_event', 'close_event') + ] + + try: + self.fig_show.canvas.start_event_loop(timeout) + finally: # Run even on exception like ctrl-c. + # Disconnect the callbacks. + for cid in cids: + self.fig_show.canvas.mpl_disconnect(cid) + + if isinstance(event, CloseEvent): + return 1 # Quit for close. + elif event is None or event.key == continue_key: + return 0 # Quit for continue. + + +class ImshowInfosContextManager(BaseFigureContextManager): + """Context Manager to reuse matplotlib figure and put infos on images. + + Args: + fig_size (tuple[int]): Size of the figure to show image. + + Examples: + >>> import mmcv + >>> from mmcls.core import visualization as vis + >>> img1 = mmcv.imread("./1.png") + >>> info1 = {'class': 'cat', 'label': 0} + >>> img2 = mmcv.imread("./2.png") + >>> info2 = {'class': 'dog', 'label': 1} + >>> with vis.ImshowInfosContextManager() as manager: + ... # Show img1 + ... manager.put_img_infos(img1, info1) + ... # Show img2 on the same figure and save output image. + ... manager.put_img_infos( + ... img2, info2, out_file='./2_out.png') + """ + + def __init__(self, fig_size=(15, 10)): + super().__init__( + axis=False, + # A proper dpi for image save with default font size. + fig_save_cfg=dict(frameon=False, dpi=36), + fig_show_cfg=dict(frameon=False, figsize=fig_size)) + + def _put_text(self, ax, text, x, y, text_color, font_size): + ax.text( + x, + y, + f'{text}', + bbox={ + 'facecolor': 'black', + 'alpha': 0.7, + 'pad': 0.2, + 'edgecolor': 'none', + 'boxstyle': 'round' + }, + color=text_color, + fontsize=font_size, + family='monospace', + verticalalignment='top', + horizontalalignment='left') + + def put_img_infos(self, + img, + infos, + text_color='white', + font_size=26, + row_width=20, + win_name='', + show=True, + wait_time=0, + out_file=None): + """Show image with extra information. + + Args: + img (str | ndarray): The image to be displayed. + infos (dict): Extra infos to display in the image. + text_color (:obj:`mmcv.Color`/str/tuple/int/ndarray): Extra infos + display color. Defaults to 'white'. + font_size (int): Extra infos display font size. Defaults to 26. + row_width (int): width between each row of results on the image. + win_name (str): The image title. Defaults to '' + show (bool): Whether to show the image. Defaults to True. + wait_time (int): How many seconds to display the image. + Defaults to 0. + out_file (Optional[str]): The filename to write the image. + Defaults to None. + + Returns: + np.ndarray: The image with extra infomations. + """ + self.prepare() + + text_color = color_val_matplotlib(text_color) + img = mmcv.imread(img).astype(np.uint8) + + x, y = 3, row_width // 2 + img = mmcv.bgr2rgb(img) + width, height = img.shape[1], img.shape[0] + img = np.ascontiguousarray(img) + + # add a small EPS to avoid precision lost due to matplotlib's + # truncation (https://github.com/matplotlib/matplotlib/issues/15363) + dpi = self.fig_save.get_dpi() + self.fig_save.set_size_inches((width + EPS) / dpi, + (height + EPS) / dpi) + + for k, v in infos.items(): + if isinstance(v, float): + v = f'{v:.2f}' + label_text = f'{k}: {v}' + self._put_text(self.ax_save, label_text, x, y, text_color, + font_size) + if show and not self.is_inline: + self._put_text(self.ax_show, label_text, x, y, text_color, + font_size) + y += row_width + + self.ax_save.imshow(img) + stream, _ = self.fig_save.canvas.print_to_buffer() + buffer = np.frombuffer(stream, dtype='uint8') + img_rgba = buffer.reshape(height, width, 4) + rgb, _ = np.split(img_rgba, [3], axis=2) + img_save = rgb.astype('uint8') + img_save = mmcv.rgb2bgr(img_save) + + if out_file is not None: + mmcv.imwrite(img_save, out_file) + + ret = 0 + if show and not self.is_inline: + # Reserve some space for the tip. + self.ax_show.set_title(win_name) + self.ax_show.set_ylim(height + 20) + self.ax_show.text( + width // 2, + height + 18, + 'Press SPACE to continue.', + ha='center', + fontsize=font_size) + self.ax_show.imshow(img) + + # Refresh canvas, necessary for Qt5 backend. + self.fig_show.canvas.draw() + + ret = self.wait_continue(timeout=wait_time) + elif (not show) and self.is_inline: + # If use inline backend, we use fig_save to show the image + # So we need to close it if users don't want to show. + plt.close(self.fig_save) + + return ret, img_save + + +def imshow_infos(img, + infos, + text_color='white', + font_size=26, + row_width=20, + win_name='', + show=True, + fig_size=(15, 10), + wait_time=0, + out_file=None): + """Show image with extra information. + + Args: + img (str | ndarray): The image to be displayed. + infos (dict): Extra infos to display in the image. + text_color (:obj:`mmcv.Color`/str/tuple/int/ndarray): Extra infos + display color. Defaults to 'white'. + font_size (int): Extra infos display font size. Defaults to 26. + row_width (int): width between each row of results on the image. + win_name (str): The image title. Defaults to '' + show (bool): Whether to show the image. Defaults to True. + fig_size (tuple): Image show figure size. Defaults to (15, 10). + wait_time (int): How many seconds to display the image. Defaults to 0. + out_file (Optional[str]): The filename to write the image. + Defaults to None. + + Returns: + np.ndarray: The image with extra infomations. + """ + with ImshowInfosContextManager(fig_size=fig_size) as manager: + _, img = manager.put_img_infos( + img, + infos, + text_color=text_color, + font_size=font_size, + row_width=row_width, + win_name=win_name, + show=show, + wait_time=wait_time, + out_file=out_file) + return img diff --git a/mmcls/datasets/__init__.py b/mmcls/datasets/__init__.py new file mode 100644 index 0000000..095077e --- /dev/null +++ b/mmcls/datasets/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_dataset import BaseDataset +from .builder import (DATASETS, PIPELINES, SAMPLERS, build_dataloader, + build_dataset, build_sampler) +from .cifar import CIFAR10, CIFAR100 +from .cub import CUB +from .custom import CustomDataset +from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, + KFoldDataset, RepeatDataset) +from .imagenet import ImageNet +from .imagenet21k import ImageNet21k +from .mnist import MNIST, FashionMNIST +from .multi_label import MultiLabelDataset +from .samplers import DistributedSampler, RepeatAugSampler +from .stanford_cars import StanfordCars +from .voc import VOC + +__all__ = [ + 'BaseDataset', 'ImageNet', 'CIFAR10', 'CIFAR100', 'MNIST', 'FashionMNIST', + 'VOC', 'MultiLabelDataset', 'build_dataloader', 'build_dataset', + 'DistributedSampler', 'ConcatDataset', 'RepeatDataset', + 'ClassBalancedDataset', 'DATASETS', 'PIPELINES', 'ImageNet21k', 'SAMPLERS', + 'build_sampler', 'RepeatAugSampler', 'KFoldDataset', 'CUB', + 'CustomDataset', 'StanfordCars' +] diff --git a/mmcls/datasets/base_dataset.py b/mmcls/datasets/base_dataset.py new file mode 100644 index 0000000..fb6578a --- /dev/null +++ b/mmcls/datasets/base_dataset.py @@ -0,0 +1,221 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +from abc import ABCMeta, abstractmethod +from os import PathLike +from typing import List + +import mmcv +import numpy as np +from torch.utils.data import Dataset + +from mmcls.core.evaluation import precision_recall_f1, support +from mmcls.models.losses import accuracy +from .pipelines import Compose + + +def expanduser(path): + if isinstance(path, (str, PathLike)): + return osp.expanduser(path) + else: + return path + + +class BaseDataset(Dataset, metaclass=ABCMeta): + """Base dataset. + + Args: + data_prefix (str): the prefix of data path + pipeline (list): a list of dict, where each element represents + a operation defined in `mmcls.datasets.pipelines` + ann_file (str | None): the annotation file. When ann_file is str, + the subclass is expected to read from the ann_file. When ann_file + is None, the subclass is expected to read according to data_prefix + test_mode (bool): in train mode or test mode + """ + + CLASSES = None + + def __init__(self, + data_prefix, + pipeline, + classes=None, + ann_file=None, + test_mode=False): + super(BaseDataset, self).__init__() + self.data_prefix = expanduser(data_prefix) + self.pipeline = Compose(pipeline) + self.CLASSES = self.get_classes(classes) + self.ann_file = expanduser(ann_file) + self.test_mode = test_mode + self.data_infos = self.load_annotations() + + @abstractmethod + def load_annotations(self): + pass + + @property + def class_to_idx(self): + """Map mapping class name to class index. + + Returns: + dict: mapping from class name to class index. + """ + + return {_class: i for i, _class in enumerate(self.CLASSES)} + + def get_gt_labels(self): + """Get all ground-truth labels (categories). + + Returns: + np.ndarray: categories for all images. + """ + + gt_labels = np.array([data['gt_label'] for data in self.data_infos]) + return gt_labels + + def get_cat_ids(self, idx: int) -> List[int]: + """Get category id by index. + + Args: + idx (int): Index of data. + + Returns: + cat_ids (List[int]): Image category of specified index. + """ + + return [int(self.data_infos[idx]['gt_label'])] + + def prepare_data(self, idx): + results = copy.deepcopy(self.data_infos[idx]) + return self.pipeline(results) + + def __len__(self): + return len(self.data_infos) + + def __getitem__(self, idx): + return self.prepare_data(idx) + + @classmethod + def get_classes(cls, classes=None): + """Get class names of current dataset. + + Args: + classes (Sequence[str] | str | None): If classes is None, use + default CLASSES defined by builtin dataset. If classes is a + string, take it as a file name. The file contains the name of + classes where each line contains one class name. If classes is + a tuple or list, override the CLASSES defined by the dataset. + + Returns: + tuple[str] or list[str]: Names of categories of the dataset. + """ + if classes is None: + return cls.CLASSES + + if isinstance(classes, str): + # take it as a file path + class_names = mmcv.list_from_file(expanduser(classes)) + elif isinstance(classes, (tuple, list)): + class_names = classes + else: + raise ValueError(f'Unsupported type {type(classes)} of classes.') + + return class_names + + def evaluate(self, + results, + metric='accuracy', + metric_options=None, + indices=None, + logger=None): + """Evaluate the dataset. + + Args: + results (list): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + Default value is `accuracy`. + metric_options (dict, optional): Options for calculating metrics. + Allowed keys are 'topk', 'thrs' and 'average_mode'. + Defaults to None. + indices (list, optional): The indices of samples corresponding to + the results. Defaults to None. + logger (logging.Logger | str, optional): Logger used for printing + related information during evaluation. Defaults to None. + Returns: + dict: evaluation results + """ + if metric_options is None: + metric_options = {'topk': (1, 5)} + if isinstance(metric, str): + metrics = [metric] + else: + metrics = metric + allowed_metrics = [ + 'accuracy', 'precision', 'recall', 'f1_score', 'support' + ] + eval_results = {} + results = np.vstack(results) + gt_labels = self.get_gt_labels() + if indices is not None: + gt_labels = gt_labels[indices] + num_imgs = len(results) + assert len(gt_labels) == num_imgs, 'dataset testing results should '\ + 'be of the same length as gt_labels.' + + invalid_metrics = set(metrics) - set(allowed_metrics) + if len(invalid_metrics) != 0: + raise ValueError(f'metric {invalid_metrics} is not supported.') + + topk = metric_options.get('topk', (1, 5)) + thrs = metric_options.get('thrs') + average_mode = metric_options.get('average_mode', 'macro') + + if 'accuracy' in metrics: + if thrs is not None: + acc = accuracy(results, gt_labels, topk=topk, thrs=thrs) + else: + acc = accuracy(results, gt_labels, topk=topk) + if isinstance(topk, tuple): + eval_results_ = { + f'accuracy_top-{k}': a + for k, a in zip(topk, acc) + } + else: + eval_results_ = {'accuracy': acc} + if isinstance(thrs, tuple): + for key, values in eval_results_.items(): + eval_results.update({ + f'{key}_thr_{thr:.2f}': value.item() + for thr, value in zip(thrs, values) + }) + else: + eval_results.update( + {k: v.item() + for k, v in eval_results_.items()}) + + if 'support' in metrics: + support_value = support( + results, gt_labels, average_mode=average_mode) + eval_results['support'] = support_value + + precision_recall_f1_keys = ['precision', 'recall', 'f1_score'] + if len(set(metrics) & set(precision_recall_f1_keys)) != 0: + if thrs is not None: + precision_recall_f1_values = precision_recall_f1( + results, gt_labels, average_mode=average_mode, thrs=thrs) + else: + precision_recall_f1_values = precision_recall_f1( + results, gt_labels, average_mode=average_mode) + for key, values in zip(precision_recall_f1_keys, + precision_recall_f1_values): + if key in metrics: + if isinstance(thrs, tuple): + eval_results.update({ + f'{key}_thr_{thr:.2f}': value + for thr, value in zip(thrs, values) + }) + else: + eval_results[key] = values + + return eval_results diff --git a/mmcls/datasets/builder.py b/mmcls/datasets/builder.py new file mode 100644 index 0000000..1b626b4 --- /dev/null +++ b/mmcls/datasets/builder.py @@ -0,0 +1,183 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import platform +import random +from functools import partial + +import numpy as np +import torch +from mmcv.parallel import collate +from mmcv.runner import get_dist_info +from mmcv.utils import Registry, build_from_cfg, digit_version +from torch.utils.data import DataLoader + +try: + from mmcv.utils import IS_IPU_AVAILABLE +except ImportError: + IS_IPU_AVAILABLE = False + +if platform.system() != 'Windows': + # https://github.com/pytorch/pytorch/issues/973 + import resource + rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) + hard_limit = rlimit[1] + soft_limit = min(4096, hard_limit) + resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) + +DATASETS = Registry('dataset') +PIPELINES = Registry('pipeline') +SAMPLERS = Registry('sampler') + + +def build_dataset(cfg, default_args=None): + from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, + KFoldDataset, RepeatDataset) + if isinstance(cfg, (list, tuple)): + dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) + elif cfg['type'] == 'ConcatDataset': + dataset = ConcatDataset( + [build_dataset(c, default_args) for c in cfg['datasets']], + separate_eval=cfg.get('separate_eval', True)) + elif cfg['type'] == 'RepeatDataset': + dataset = RepeatDataset( + build_dataset(cfg['dataset'], default_args), cfg['times']) + elif cfg['type'] == 'ClassBalancedDataset': + dataset = ClassBalancedDataset( + build_dataset(cfg['dataset'], default_args), cfg['oversample_thr']) + elif cfg['type'] == 'KFoldDataset': + cp_cfg = copy.deepcopy(cfg) + if cp_cfg.get('test_mode', None) is None: + cp_cfg['test_mode'] = (default_args or {}).pop('test_mode', False) + cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'], default_args) + cp_cfg.pop('type') + dataset = KFoldDataset(**cp_cfg) + else: + dataset = build_from_cfg(cfg, DATASETS, default_args) + + return dataset + + +def build_dataloader(dataset, + samples_per_gpu, + workers_per_gpu, + num_gpus=1, + dist=True, + shuffle=True, + round_up=True, + seed=None, + pin_memory=True, + persistent_workers=True, + sampler_cfg=None, + **kwargs): + """Build PyTorch DataLoader. + + In distributed training, each GPU/process has a dataloader. + In non-distributed training, there is only one dataloader for all GPUs. + + Args: + dataset (Dataset): A PyTorch dataset. + samples_per_gpu (int): Number of training samples on each GPU, i.e., + batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data loading + for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed training. + dist (bool): Distributed training/test or not. Default: True. + shuffle (bool): Whether to shuffle the data at every epoch. + Default: True. + round_up (bool): Whether to round up the length of dataset by adding + extra samples to make it evenly divisible. Default: True. + pin_memory (bool): Whether to use pin_memory in DataLoader. + Default: True + persistent_workers (bool): If True, the data loader will not shutdown + the worker processes after a dataset has been consumed once. + This allows to maintain the workers Dataset instances alive. + The argument also has effect in PyTorch>=1.7.0. + Default: True + sampler_cfg (dict): sampler configuration to override the default + sampler + kwargs: any keyword argument to be used to initialize DataLoader + + Returns: + DataLoader: A PyTorch dataloader. + """ + rank, world_size = get_dist_info() + + # Custom sampler logic + if sampler_cfg: + # shuffle=False when val and test + sampler_cfg.update(shuffle=shuffle) + sampler = build_sampler( + sampler_cfg, + default_args=dict( + dataset=dataset, num_replicas=world_size, rank=rank, + seed=seed)) + # Default sampler logic + elif dist: + sampler = build_sampler( + dict( + type='DistributedSampler', + dataset=dataset, + num_replicas=world_size, + rank=rank, + shuffle=shuffle, + round_up=round_up, + seed=seed)) + else: + sampler = None + + # If sampler exists, turn off dataloader shuffle + if sampler is not None: + shuffle = False + + if dist: + batch_size = samples_per_gpu + num_workers = workers_per_gpu + else: + batch_size = num_gpus * samples_per_gpu + num_workers = num_gpus * workers_per_gpu + + init_fn = partial( + worker_init_fn, num_workers=num_workers, rank=rank, + seed=seed) if seed is not None else None + + if digit_version(torch.__version__) >= digit_version('1.8.0'): + kwargs['persistent_workers'] = persistent_workers + if IS_IPU_AVAILABLE: + from mmcv.device.ipu import IPUDataLoader + data_loader = IPUDataLoader( + dataset, + None, + batch_size=samples_per_gpu, + num_workers=num_workers, + shuffle=shuffle, + worker_init_fn=init_fn, + **kwargs) + else: + data_loader = DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + num_workers=num_workers, + collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), + pin_memory=pin_memory, + shuffle=shuffle, + worker_init_fn=init_fn, + **kwargs) + + return data_loader + + +def worker_init_fn(worker_id, num_workers, rank, seed): + # The seed of each worker equals to + # num_worker * rank + worker_id + user_seed + worker_seed = num_workers * rank + worker_id + seed + np.random.seed(worker_seed) + random.seed(worker_seed) + torch.manual_seed(worker_seed) + + +def build_sampler(cfg, default_args=None): + if cfg is None: + return None + else: + return build_from_cfg(cfg, SAMPLERS, default_args=default_args) diff --git a/mmcls/datasets/cifar.py b/mmcls/datasets/cifar.py new file mode 100644 index 0000000..453b8d9 --- /dev/null +++ b/mmcls/datasets/cifar.py @@ -0,0 +1,155 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path +import pickle + +import numpy as np +import torch.distributed as dist +from mmcv.runner import get_dist_info + +from .base_dataset import BaseDataset +from .builder import DATASETS +from .utils import check_integrity, download_and_extract_archive + + +@DATASETS.register_module() +class CIFAR10(BaseDataset): + """`CIFAR10 `_ Dataset. + + This implementation is modified from + https://github.com/pytorch/vision/blob/master/torchvision/datasets/cifar.py + """ # noqa: E501 + + base_folder = 'cifar-10-batches-py' + url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' + filename = 'cifar-10-python.tar.gz' + tgz_md5 = 'c58f30108f718f92721af3b95e74349a' + train_list = [ + ['data_batch_1', 'c99cafc152244af753f735de768cd75f'], + ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'], + ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'], + ['data_batch_4', '634d18415352ddfa80567beed471001a'], + ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'], + ] + + test_list = [ + ['test_batch', '40351d587109b95175f43aff81a1287e'], + ] + meta = { + 'filename': 'batches.meta', + 'key': 'label_names', + 'md5': '5ff9c542aee3614f3951f8cda6e48888', + } + CLASSES = [ + 'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', + 'horse', 'ship', 'truck' + ] + + def load_annotations(self): + + rank, world_size = get_dist_info() + + if rank == 0 and not self._check_integrity(): + download_and_extract_archive( + self.url, + self.data_prefix, + filename=self.filename, + md5=self.tgz_md5) + + if world_size > 1: + dist.barrier() + assert self._check_integrity(), \ + 'Shared storage seems unavailable. ' \ + f'Please download the dataset manually through {self.url}.' + + if not self.test_mode: + downloaded_list = self.train_list + else: + downloaded_list = self.test_list + + self.imgs = [] + self.gt_labels = [] + + # load the picked numpy arrays + for file_name, checksum in downloaded_list: + file_path = os.path.join(self.data_prefix, self.base_folder, + file_name) + with open(file_path, 'rb') as f: + entry = pickle.load(f, encoding='latin1') + self.imgs.append(entry['data']) + if 'labels' in entry: + self.gt_labels.extend(entry['labels']) + else: + self.gt_labels.extend(entry['fine_labels']) + + self.imgs = np.vstack(self.imgs).reshape(-1, 3, 32, 32) + self.imgs = self.imgs.transpose((0, 2, 3, 1)) # convert to HWC + + self._load_meta() + + data_infos = [] + for img, gt_label in zip(self.imgs, self.gt_labels): + gt_label = np.array(gt_label, dtype=np.int64) + info = {'img': img, 'gt_label': gt_label} + data_infos.append(info) + return data_infos + + def _load_meta(self): + path = os.path.join(self.data_prefix, self.base_folder, + self.meta['filename']) + if not check_integrity(path, self.meta['md5']): + raise RuntimeError( + 'Dataset metadata file not found or corrupted.' + + ' You can use download=True to download it') + with open(path, 'rb') as infile: + data = pickle.load(infile, encoding='latin1') + self.CLASSES = data[self.meta['key']] + + def _check_integrity(self): + root = self.data_prefix + for fentry in (self.train_list + self.test_list): + filename, md5 = fentry[0], fentry[1] + fpath = os.path.join(root, self.base_folder, filename) + if not check_integrity(fpath, md5): + return False + return True + + +@DATASETS.register_module() +class CIFAR100(CIFAR10): + """`CIFAR100 `_ Dataset.""" + + base_folder = 'cifar-100-python' + url = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz' + filename = 'cifar-100-python.tar.gz' + tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85' + train_list = [ + ['train', '16019d7e3df5f24257cddd939b257f8d'], + ] + + test_list = [ + ['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'], + ] + meta = { + 'filename': 'meta', + 'key': 'fine_label_names', + 'md5': '7973b15100ade9c7d40fb424638fde48', + } + CLASSES = [ + 'apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', + 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', + 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', + 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', + 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', + 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'keyboard', + 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', + 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', + 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', + 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', + 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', + 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', + 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', + 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', + 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', + 'willow_tree', 'wolf', 'woman', 'worm' + ] diff --git a/mmcls/datasets/cub.py b/mmcls/datasets/cub.py new file mode 100644 index 0000000..6199bc7 --- /dev/null +++ b/mmcls/datasets/cub.py @@ -0,0 +1,129 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np + +from .base_dataset import BaseDataset +from .builder import DATASETS + + +@DATASETS.register_module() +class CUB(BaseDataset): + """The CUB-200-2011 Dataset. + + Support the `CUB-200-2011 `_ Dataset. + Comparing with the `CUB-200 `_ Dataset, + there are much more pictures in `CUB-200-2011`. + + Args: + ann_file (str): the annotation file. + images.txt in CUB. + image_class_labels_file (str): the label file. + image_class_labels.txt in CUB. + train_test_split_file (str): the split file. + train_test_split_file.txt in CUB. + """ # noqa: E501 + + CLASSES = [ + 'Black_footed_Albatross', 'Laysan_Albatross', 'Sooty_Albatross', + 'Groove_billed_Ani', 'Crested_Auklet', 'Least_Auklet', + 'Parakeet_Auklet', 'Rhinoceros_Auklet', 'Brewer_Blackbird', + 'Red_winged_Blackbird', 'Rusty_Blackbird', 'Yellow_headed_Blackbird', + 'Bobolink', 'Indigo_Bunting', 'Lazuli_Bunting', 'Painted_Bunting', + 'Cardinal', 'Spotted_Catbird', 'Gray_Catbird', 'Yellow_breasted_Chat', + 'Eastern_Towhee', 'Chuck_will_Widow', 'Brandt_Cormorant', + 'Red_faced_Cormorant', 'Pelagic_Cormorant', 'Bronzed_Cowbird', + 'Shiny_Cowbird', 'Brown_Creeper', 'American_Crow', 'Fish_Crow', + 'Black_billed_Cuckoo', 'Mangrove_Cuckoo', 'Yellow_billed_Cuckoo', + 'Gray_crowned_Rosy_Finch', 'Purple_Finch', 'Northern_Flicker', + 'Acadian_Flycatcher', 'Great_Crested_Flycatcher', 'Least_Flycatcher', + 'Olive_sided_Flycatcher', 'Scissor_tailed_Flycatcher', + 'Vermilion_Flycatcher', 'Yellow_bellied_Flycatcher', 'Frigatebird', + 'Northern_Fulmar', 'Gadwall', 'American_Goldfinch', + 'European_Goldfinch', 'Boat_tailed_Grackle', 'Eared_Grebe', + 'Horned_Grebe', 'Pied_billed_Grebe', 'Western_Grebe', 'Blue_Grosbeak', + 'Evening_Grosbeak', 'Pine_Grosbeak', 'Rose_breasted_Grosbeak', + 'Pigeon_Guillemot', 'California_Gull', 'Glaucous_winged_Gull', + 'Heermann_Gull', 'Herring_Gull', 'Ivory_Gull', 'Ring_billed_Gull', + 'Slaty_backed_Gull', 'Western_Gull', 'Anna_Hummingbird', + 'Ruby_throated_Hummingbird', 'Rufous_Hummingbird', 'Green_Violetear', + 'Long_tailed_Jaeger', 'Pomarine_Jaeger', 'Blue_Jay', 'Florida_Jay', + 'Green_Jay', 'Dark_eyed_Junco', 'Tropical_Kingbird', 'Gray_Kingbird', + 'Belted_Kingfisher', 'Green_Kingfisher', 'Pied_Kingfisher', + 'Ringed_Kingfisher', 'White_breasted_Kingfisher', + 'Red_legged_Kittiwake', 'Horned_Lark', 'Pacific_Loon', 'Mallard', + 'Western_Meadowlark', 'Hooded_Merganser', 'Red_breasted_Merganser', + 'Mockingbird', 'Nighthawk', 'Clark_Nutcracker', + 'White_breasted_Nuthatch', 'Baltimore_Oriole', 'Hooded_Oriole', + 'Orchard_Oriole', 'Scott_Oriole', 'Ovenbird', 'Brown_Pelican', + 'White_Pelican', 'Western_Wood_Pewee', 'Sayornis', 'American_Pipit', + 'Whip_poor_Will', 'Horned_Puffin', 'Common_Raven', + 'White_necked_Raven', 'American_Redstart', 'Geococcyx', + 'Loggerhead_Shrike', 'Great_Grey_Shrike', 'Baird_Sparrow', + 'Black_throated_Sparrow', 'Brewer_Sparrow', 'Chipping_Sparrow', + 'Clay_colored_Sparrow', 'House_Sparrow', 'Field_Sparrow', + 'Fox_Sparrow', 'Grasshopper_Sparrow', 'Harris_Sparrow', + 'Henslow_Sparrow', 'Le_Conte_Sparrow', 'Lincoln_Sparrow', + 'Nelson_Sharp_tailed_Sparrow', 'Savannah_Sparrow', 'Seaside_Sparrow', + 'Song_Sparrow', 'Tree_Sparrow', 'Vesper_Sparrow', + 'White_crowned_Sparrow', 'White_throated_Sparrow', + 'Cape_Glossy_Starling', 'Bank_Swallow', 'Barn_Swallow', + 'Cliff_Swallow', 'Tree_Swallow', 'Scarlet_Tanager', 'Summer_Tanager', + 'Artic_Tern', 'Black_Tern', 'Caspian_Tern', 'Common_Tern', + 'Elegant_Tern', 'Forsters_Tern', 'Least_Tern', 'Green_tailed_Towhee', + 'Brown_Thrasher', 'Sage_Thrasher', 'Black_capped_Vireo', + 'Blue_headed_Vireo', 'Philadelphia_Vireo', 'Red_eyed_Vireo', + 'Warbling_Vireo', 'White_eyed_Vireo', 'Yellow_throated_Vireo', + 'Bay_breasted_Warbler', 'Black_and_white_Warbler', + 'Black_throated_Blue_Warbler', 'Blue_winged_Warbler', 'Canada_Warbler', + 'Cape_May_Warbler', 'Cerulean_Warbler', 'Chestnut_sided_Warbler', + 'Golden_winged_Warbler', 'Hooded_Warbler', 'Kentucky_Warbler', + 'Magnolia_Warbler', 'Mourning_Warbler', 'Myrtle_Warbler', + 'Nashville_Warbler', 'Orange_crowned_Warbler', 'Palm_Warbler', + 'Pine_Warbler', 'Prairie_Warbler', 'Prothonotary_Warbler', + 'Swainson_Warbler', 'Tennessee_Warbler', 'Wilson_Warbler', + 'Worm_eating_Warbler', 'Yellow_Warbler', 'Northern_Waterthrush', + 'Louisiana_Waterthrush', 'Bohemian_Waxwing', 'Cedar_Waxwing', + 'American_Three_toed_Woodpecker', 'Pileated_Woodpecker', + 'Red_bellied_Woodpecker', 'Red_cockaded_Woodpecker', + 'Red_headed_Woodpecker', 'Downy_Woodpecker', 'Bewick_Wren', + 'Cactus_Wren', 'Carolina_Wren', 'House_Wren', 'Marsh_Wren', + 'Rock_Wren', 'Winter_Wren', 'Common_Yellowthroat' + ] + + def __init__(self, *args, ann_file, image_class_labels_file, + train_test_split_file, **kwargs): + self.image_class_labels_file = image_class_labels_file + self.train_test_split_file = train_test_split_file + super(CUB, self).__init__(*args, ann_file=ann_file, **kwargs) + + def load_annotations(self): + with open(self.ann_file) as f: + samples = [x.strip().split(' ')[1] for x in f.readlines()] + + with open(self.image_class_labels_file) as f: + gt_labels = [ + # in the official CUB-200-2011 dataset, labels in + # image_class_labels_file are started from 1, so + # here we need to '- 1' to let them start from 0. + int(x.strip().split(' ')[1]) - 1 for x in f.readlines() + ] + + with open(self.train_test_split_file) as f: + splits = [int(x.strip().split(' ')[1]) for x in f.readlines()] + + assert len(samples) == len(gt_labels) == len(splits),\ + f'samples({len(samples)}), gt_labels({len(gt_labels)}) and ' \ + f'splits({len(splits)}) should have same length.' + + data_infos = [] + for filename, gt_label, split in zip(samples, gt_labels, splits): + if split and self.test_mode: + # skip train samples when test_mode=True + continue + elif not split and not self.test_mode: + # skip test samples when test_mode=False + continue + info = {'img_prefix': self.data_prefix} + info['img_info'] = {'filename': filename} + info['gt_label'] = np.array(gt_label, dtype=np.int64) + data_infos.append(info) + return data_infos diff --git a/mmcls/datasets/custom.py b/mmcls/datasets/custom.py new file mode 100644 index 0000000..61458f6 --- /dev/null +++ b/mmcls/datasets/custom.py @@ -0,0 +1,229 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +from mmcv import FileClient + +from .base_dataset import BaseDataset +from .builder import DATASETS + + +def find_folders(root: str, + file_client: FileClient) -> Tuple[List[str], Dict[str, int]]: + """Find classes by folders under a root. + + Args: + root (string): root directory of folders + + Returns: + Tuple[List[str], Dict[str, int]]: + + - folders: The name of sub folders under the root. + - folder_to_idx: The map from folder name to class idx. + """ + folders = list( + file_client.list_dir_or_file( + root, + list_dir=True, + list_file=False, + recursive=False, + )) + folders.sort() + folder_to_idx = {folders[i]: i for i in range(len(folders))} + return folders, folder_to_idx + + +def get_samples(root: str, folder_to_idx: Dict[str, int], + is_valid_file: Callable, file_client: FileClient): + """Make dataset by walking all images under a root. + + Args: + root (string): root directory of folders + folder_to_idx (dict): the map from class name to class idx + is_valid_file (Callable): A function that takes path of a file + and check if the file is a valid sample file. + + Returns: + Tuple[list, set]: + + - samples: a list of tuple where each element is (image, class_idx) + - empty_folders: The folders don't have any valid files. + """ + samples = [] + available_classes = set() + + for folder_name in sorted(list(folder_to_idx.keys())): + _dir = file_client.join_path(root, folder_name) + files = list( + file_client.list_dir_or_file( + _dir, + list_dir=False, + list_file=True, + recursive=True, + )) + for file in sorted(list(files)): + if is_valid_file(file): + path = file_client.join_path(folder_name, file) + item = (path, folder_to_idx[folder_name]) + samples.append(item) + available_classes.add(folder_name) + + empty_folders = set(folder_to_idx.keys()) - available_classes + + return samples, empty_folders + + +@DATASETS.register_module() +class CustomDataset(BaseDataset): + """Custom dataset for classification. + + The dataset supports two kinds of annotation format. + + 1. An annotation file is provided, and each line indicates a sample: + + The sample files: :: + + data_prefix/ + ├── folder_1 + │ ├── xxx.png + │ ├── xxy.png + │ └── ... + └── folder_2 + ├── 123.png + ├── nsdf3.png + └── ... + + The annotation file (the first column is the image path and the second + column is the index of category): :: + + folder_1/xxx.png 0 + folder_1/xxy.png 1 + folder_2/123.png 5 + folder_2/nsdf3.png 3 + ... + + Please specify the name of categories by the argument ``classes``. + + 2. The samples are arranged in the specific way: :: + + data_prefix/ + ├── class_x + │ ├── xxx.png + │ ├── xxy.png + │ └── ... + │ └── xxz.png + └── class_y + ├── 123.png + ├── nsdf3.png + ├── ... + └── asd932_.png + + If the ``ann_file`` is specified, the dataset will be generated by the + first way, otherwise, try the second way. + + Args: + data_prefix (str): The path of data directory. + pipeline (Sequence[dict]): A list of dict, where each element + represents a operation defined in :mod:`mmcls.datasets.pipelines`. + Defaults to an empty tuple. + classes (str | Sequence[str], optional): Specify names of classes. + + - If is string, it should be a file path, and the every line of + the file is a name of a class. + - If is a sequence of string, every item is a name of class. + - If is None, use ``cls.CLASSES`` or the names of sub folders + (If use the second way to arrange samples). + + Defaults to None. + ann_file (str, optional): The annotation file. If is string, read + samples paths from the ann_file. If is None, find samples in + ``data_prefix``. Defaults to None. + extensions (Sequence[str]): A sequence of allowed extensions. Defaults + to ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif'). + test_mode (bool): In train mode or test mode. It's only a mark and + won't be used in this class. Defaults to False. + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + If None, automatically inference from the specified path. + Defaults to None. + """ + + def __init__(self, + data_prefix: str, + pipeline: Sequence = (), + classes: Union[str, Sequence[str], None] = None, + ann_file: Optional[str] = None, + extensions: Sequence[str] = ('.jpg', '.jpeg', '.png', '.ppm', + '.bmp', '.pgm', '.tif'), + test_mode: bool = False, + file_client_args: Optional[dict] = None): + self.extensions = tuple(set([i.lower() for i in extensions])) + self.file_client_args = file_client_args + + super().__init__( + data_prefix=data_prefix, + pipeline=pipeline, + classes=classes, + ann_file=ann_file, + test_mode=test_mode) + + def _find_samples(self): + """find samples from ``data_prefix``.""" + file_client = FileClient.infer_client(self.file_client_args, + self.data_prefix) + classes, folder_to_idx = find_folders(self.data_prefix, file_client) + samples, empty_classes = get_samples( + self.data_prefix, + folder_to_idx, + is_valid_file=self.is_valid_file, + file_client=file_client, + ) + + if len(samples) == 0: + raise RuntimeError( + f'Found 0 files in subfolders of: {self.data_prefix}. ' + f'Supported extensions are: {",".join(self.extensions)}') + + if self.CLASSES is not None: + assert len(self.CLASSES) == len(classes), \ + f"The number of subfolders ({len(classes)}) doesn't match " \ + f'the number of specified classes ({len(self.CLASSES)}). ' \ + 'Please check the data folder.' + else: + self.CLASSES = classes + + if empty_classes: + warnings.warn( + 'Found no valid file in the folder ' + f'{", ".join(empty_classes)}. ' + f"Supported extensions are: {', '.join(self.extensions)}", + UserWarning) + + self.folder_to_idx = folder_to_idx + + return samples + + def load_annotations(self): + """Load image paths and gt_labels.""" + if self.ann_file is None: + samples = self._find_samples() + elif isinstance(self.ann_file, str): + lines = mmcv.list_from_file( + self.ann_file, file_client_args=self.file_client_args) + samples = [x.strip().rsplit(' ', 1) for x in lines] + else: + raise TypeError('ann_file must be a str or None') + + data_infos = [] + for filename, gt_label in samples: + info = {'img_prefix': self.data_prefix} + info['img_info'] = {'filename': filename} + info['gt_label'] = np.array(gt_label, dtype=np.int64) + data_infos.append(info) + return data_infos + + def is_valid_file(self, filename: str) -> bool: + """Check if a file is a valid sample.""" + return filename.lower().endswith(self.extensions) diff --git a/mmcls/datasets/dataset_wrappers.py b/mmcls/datasets/dataset_wrappers.py new file mode 100644 index 0000000..86e257c --- /dev/null +++ b/mmcls/datasets/dataset_wrappers.py @@ -0,0 +1,332 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import bisect +import math +from collections import defaultdict + +import numpy as np +from mmcv.utils import print_log +from torch.utils.data.dataset import ConcatDataset as _ConcatDataset + +from .builder import DATASETS + + +@DATASETS.register_module() +class ConcatDataset(_ConcatDataset): + """A wrapper of concatenated dataset. + + Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but + add `get_cat_ids` function. + + Args: + datasets (list[:obj:`BaseDataset`]): A list of datasets. + separate_eval (bool): Whether to evaluate the results + separately if it is used as validation dataset. + Defaults to True. + """ + + def __init__(self, datasets, separate_eval=True): + super(ConcatDataset, self).__init__(datasets) + self.separate_eval = separate_eval + + self.CLASSES = datasets[0].CLASSES + + if not separate_eval: + if len(set([type(ds) for ds in datasets])) != 1: + raise NotImplementedError( + 'To evaluate a concat dataset non-separately, ' + 'all the datasets should have same types') + + def get_cat_ids(self, idx): + if idx < 0: + if -idx > len(self): + raise ValueError( + 'absolute value of index should not exceed dataset length') + idx = len(self) + idx + dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) + if dataset_idx == 0: + sample_idx = idx + else: + sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] + return self.datasets[dataset_idx].get_cat_ids(sample_idx) + + def evaluate(self, results, *args, indices=None, logger=None, **kwargs): + """Evaluate the results. + + Args: + results (list[list | tuple]): Testing results of the dataset. + indices (list, optional): The indices of samples corresponding to + the results. It's unavailable on ConcatDataset. + Defaults to None. + logger (logging.Logger | str, optional): Logger used for printing + related information during evaluation. Defaults to None. + + Returns: + dict[str: float]: AP results of the total dataset or each separate + dataset if `self.separate_eval=True`. + """ + if indices is not None: + raise NotImplementedError( + 'Use indices to evaluate speific samples in a ConcatDataset ' + 'is not supported by now.') + + assert len(results) == len(self), \ + ('Dataset and results have different sizes: ' + f'{len(self)} v.s. {len(results)}') + + # Check whether all the datasets support evaluation + for dataset in self.datasets: + assert hasattr(dataset, 'evaluate'), \ + f"{type(dataset)} haven't implemented the evaluate function." + + if self.separate_eval: + total_eval_results = dict() + for dataset_idx, dataset in enumerate(self.datasets): + start_idx = 0 if dataset_idx == 0 else \ + self.cumulative_sizes[dataset_idx-1] + end_idx = self.cumulative_sizes[dataset_idx] + + results_per_dataset = results[start_idx:end_idx] + print_log( + f'Evaluateing dataset-{dataset_idx} with ' + f'{len(results_per_dataset)} images now', + logger=logger) + + eval_results_per_dataset = dataset.evaluate( + results_per_dataset, *args, logger=logger, **kwargs) + for k, v in eval_results_per_dataset.items(): + total_eval_results.update({f'{dataset_idx}_{k}': v}) + + return total_eval_results + else: + original_data_infos = self.datasets[0].data_infos + self.datasets[0].data_infos = sum( + [dataset.data_infos for dataset in self.datasets], []) + eval_results = self.datasets[0].evaluate( + results, logger=logger, **kwargs) + self.datasets[0].data_infos = original_data_infos + return eval_results + + +@DATASETS.register_module() +class RepeatDataset(object): + """A wrapper of repeated dataset. + + The length of repeated dataset will be `times` larger than the original + dataset. This is useful when the data loading time is long but the dataset + is small. Using RepeatDataset can reduce the data loading time between + epochs. + + Args: + dataset (:obj:`BaseDataset`): The dataset to be repeated. + times (int): Repeat times. + """ + + def __init__(self, dataset, times): + self.dataset = dataset + self.times = times + self.CLASSES = dataset.CLASSES + + self._ori_len = len(self.dataset) + + def __getitem__(self, idx): + return self.dataset[idx % self._ori_len] + + def get_cat_ids(self, idx): + return self.dataset.get_cat_ids(idx % self._ori_len) + + def __len__(self): + return self.times * self._ori_len + + def evaluate(self, *args, **kwargs): + raise NotImplementedError( + 'evaluate results on a repeated dataset is weird. ' + 'Please inference and evaluate on the original dataset.') + + def __repr__(self): + """Print the number of instance number.""" + dataset_type = 'Test' if self.test_mode else 'Train' + result = ( + f'\n{self.__class__.__name__} ({self.dataset.__class__.__name__}) ' + f'{dataset_type} dataset with total number of samples {len(self)}.' + ) + return result + + +# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa +@DATASETS.register_module() +class ClassBalancedDataset(object): + r"""A wrapper of repeated dataset with repeat factor. + + Suitable for training on class imbalanced datasets like LVIS. Following the + sampling strategy in `this paper`_, in each epoch, an image may appear + multiple times based on its "repeat factor". + + .. _this paper: https://arxiv.org/pdf/1908.03195.pdf + + The repeat factor for an image is a function of the frequency the rarest + category labeled in that image. The "frequency of category c" in [0, 1] + is defined by the fraction of images in the training set (without repeats) + in which category c appears. + + The dataset needs to implement :func:`self.get_cat_ids` to support + ClassBalancedDataset. + + The repeat factor is computed as followed. + + 1. For each category c, compute the fraction :math:`f(c)` of images that + contain it. + 2. For each category c, compute the category-level repeat factor. + + .. math:: + r(c) = \max(1, \sqrt{\frac{t}{f(c)}}) + + where :math:`t` is `oversample_thr`. + 3. For each image I and its labels :math:`L(I)`, compute the image-level + repeat factor. + + .. math:: + r(I) = \max_{c \in L(I)} r(c) + + Each image repeats :math:`\lceil r(I) \rceil` times. + + Args: + dataset (:obj:`BaseDataset`): The dataset to be repeated. + oversample_thr (float): frequency threshold below which data is + repeated. For categories with ``f_c`` >= ``oversample_thr``, there + is no oversampling. For categories with ``f_c`` < + ``oversample_thr``, the degree of oversampling following the + square-root inverse frequency heuristic above. + """ + + def __init__(self, dataset, oversample_thr): + self.dataset = dataset + self.oversample_thr = oversample_thr + self.CLASSES = dataset.CLASSES + + repeat_factors = self._get_repeat_factors(dataset, oversample_thr) + repeat_indices = [] + for dataset_index, repeat_factor in enumerate(repeat_factors): + repeat_indices.extend([dataset_index] * math.ceil(repeat_factor)) + self.repeat_indices = repeat_indices + + flags = [] + if hasattr(self.dataset, 'flag'): + for flag, repeat_factor in zip(self.dataset.flag, repeat_factors): + flags.extend([flag] * int(math.ceil(repeat_factor))) + assert len(flags) == len(repeat_indices) + self.flag = np.asarray(flags, dtype=np.uint8) + + def _get_repeat_factors(self, dataset, repeat_thr): + # 1. For each category c, compute the fraction of images + # that contain it: f(c) + category_freq = defaultdict(int) + num_images = len(dataset) + for idx in range(num_images): + cat_ids = set(self.dataset.get_cat_ids(idx)) + for cat_id in cat_ids: + category_freq[cat_id] += 1 + for k, v in category_freq.items(): + assert v > 0, f'caterogy {k} does not contain any images' + category_freq[k] = v / num_images + + # 2. For each category c, compute the category-level repeat factor: + # r(c) = max(1, sqrt(t/f(c))) + category_repeat = { + cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq)) + for cat_id, cat_freq in category_freq.items() + } + + # 3. For each image I and its labels L(I), compute the image-level + # repeat factor: + # r(I) = max_{c in L(I)} r(c) + repeat_factors = [] + for idx in range(num_images): + cat_ids = set(self.dataset.get_cat_ids(idx)) + repeat_factor = max( + {category_repeat[cat_id] + for cat_id in cat_ids}) + repeat_factors.append(repeat_factor) + + return repeat_factors + + def __getitem__(self, idx): + ori_index = self.repeat_indices[idx] + return self.dataset[ori_index] + + def __len__(self): + return len(self.repeat_indices) + + def evaluate(self, *args, **kwargs): + raise NotImplementedError( + 'evaluate results on a class-balanced dataset is weird. ' + 'Please inference and evaluate on the original dataset.') + + def __repr__(self): + """Print the number of instance number.""" + dataset_type = 'Test' if self.test_mode else 'Train' + result = ( + f'\n{self.__class__.__name__} ({self.dataset.__class__.__name__}) ' + f'{dataset_type} dataset with total number of samples {len(self)}.' + ) + return result + + +@DATASETS.register_module() +class KFoldDataset: + """A wrapper of dataset for K-Fold cross-validation. + + K-Fold cross-validation divides all the samples in groups of samples, + called folds, of almost equal sizes. And we use k-1 of folds to do training + and use the fold left to do validation. + + Args: + dataset (:obj:`BaseDataset`): The dataset to be divided. + fold (int): The fold used to do validation. Defaults to 0. + num_splits (int): The number of all folds. Defaults to 5. + test_mode (bool): Use the training dataset or validation dataset. + Defaults to False. + seed (int, optional): The seed to shuffle the dataset before splitting. + If None, not shuffle the dataset. Defaults to None. + """ + + def __init__(self, + dataset, + fold=0, + num_splits=5, + test_mode=False, + seed=None): + self.dataset = dataset + self.CLASSES = dataset.CLASSES + self.test_mode = test_mode + self.num_splits = num_splits + + length = len(dataset) + indices = list(range(length)) + if isinstance(seed, int): + rng = np.random.default_rng(seed) + rng.shuffle(indices) + + test_start = length * fold // num_splits + test_end = length * (fold + 1) // num_splits + if test_mode: + self.indices = indices[test_start:test_end] + else: + self.indices = indices[:test_start] + indices[test_end:] + + def get_cat_ids(self, idx): + return self.dataset.get_cat_ids(self.indices[idx]) + + def get_gt_labels(self): + dataset_gt_labels = self.dataset.get_gt_labels() + gt_labels = np.array([dataset_gt_labels[idx] for idx in self.indices]) + return gt_labels + + def __getitem__(self, idx): + return self.dataset[self.indices[idx]] + + def __len__(self): + return len(self.indices) + + def evaluate(self, *args, **kwargs): + kwargs['indices'] = self.indices + return self.dataset.evaluate(*args, **kwargs) diff --git a/mmcls/datasets/imagenet.py b/mmcls/datasets/imagenet.py new file mode 100644 index 0000000..84341dc --- /dev/null +++ b/mmcls/datasets/imagenet.py @@ -0,0 +1,1059 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Union + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class ImageNet(CustomDataset): + """`ImageNet `_ Dataset. + + The dataset supports two kinds of annotation format. More details can be + found in :class:`CustomDataset`. + + Args: + data_prefix (str): The path of data directory. + pipeline (Sequence[dict]): A list of dict, where each element + represents a operation defined in :mod:`mmcls.datasets.pipelines`. + Defaults to an empty tuple. + classes (str | Sequence[str], optional): Specify names of classes. + + - If is string, it should be a file path, and the every line of + the file is a name of a class. + - If is a sequence of string, every item is a name of class. + - If is None, use the default ImageNet-1k classes names. + + Defaults to None. + ann_file (str, optional): The annotation file. If is string, read + samples paths from the ann_file. If is None, find samples in + ``data_prefix``. Defaults to None. + extensions (Sequence[str]): A sequence of allowed extensions. Defaults + to ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif'). + test_mode (bool): In train mode or test mode. It's only a mark and + won't be used in this class. Defaults to False. + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + If None, automatically inference from the specified path. + Defaults to None. + """ # noqa: E501 + + IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif') + CLASSES = [ + 'tench, Tinca tinca', + 'goldfish, Carassius auratus', + 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias', # noqa: E501 + 'tiger shark, Galeocerdo cuvieri', + 'hammerhead, hammerhead shark', + 'electric ray, crampfish, numbfish, torpedo', + 'stingray', + 'cock', + 'hen', + 'ostrich, Struthio camelus', + 'brambling, Fringilla montifringilla', + 'goldfinch, Carduelis carduelis', + 'house finch, linnet, Carpodacus mexicanus', + 'junco, snowbird', + 'indigo bunting, indigo finch, indigo bird, Passerina cyanea', + 'robin, American robin, Turdus migratorius', + 'bulbul', + 'jay', + 'magpie', + 'chickadee', + 'water ouzel, dipper', + 'kite', + 'bald eagle, American eagle, Haliaeetus leucocephalus', + 'vulture', + 'great grey owl, great gray owl, Strix nebulosa', + 'European fire salamander, Salamandra salamandra', + 'common newt, Triturus vulgaris', + 'eft', + 'spotted salamander, Ambystoma maculatum', + 'axolotl, mud puppy, Ambystoma mexicanum', + 'bullfrog, Rana catesbeiana', + 'tree frog, tree-frog', + 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui', + 'loggerhead, loggerhead turtle, Caretta caretta', + 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea', # noqa: E501 + 'mud turtle', + 'terrapin', + 'box turtle, box tortoise', + 'banded gecko', + 'common iguana, iguana, Iguana iguana', + 'American chameleon, anole, Anolis carolinensis', + 'whiptail, whiptail lizard', + 'agama', + 'frilled lizard, Chlamydosaurus kingi', + 'alligator lizard', + 'Gila monster, Heloderma suspectum', + 'green lizard, Lacerta viridis', + 'African chameleon, Chamaeleo chamaeleon', + 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis', # noqa: E501 + 'African crocodile, Nile crocodile, Crocodylus niloticus', + 'American alligator, Alligator mississipiensis', + 'triceratops', + 'thunder snake, worm snake, Carphophis amoenus', + 'ringneck snake, ring-necked snake, ring snake', + 'hognose snake, puff adder, sand viper', + 'green snake, grass snake', + 'king snake, kingsnake', + 'garter snake, grass snake', + 'water snake', + 'vine snake', + 'night snake, Hypsiglena torquata', + 'boa constrictor, Constrictor constrictor', + 'rock python, rock snake, Python sebae', + 'Indian cobra, Naja naja', + 'green mamba', + 'sea snake', + 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus', + 'diamondback, diamondback rattlesnake, Crotalus adamanteus', + 'sidewinder, horned rattlesnake, Crotalus cerastes', + 'trilobite', + 'harvestman, daddy longlegs, Phalangium opilio', + 'scorpion', + 'black and gold garden spider, Argiope aurantia', + 'barn spider, Araneus cavaticus', + 'garden spider, Aranea diademata', + 'black widow, Latrodectus mactans', + 'tarantula', + 'wolf spider, hunting spider', + 'tick', + 'centipede', + 'black grouse', + 'ptarmigan', + 'ruffed grouse, partridge, Bonasa umbellus', + 'prairie chicken, prairie grouse, prairie fowl', + 'peacock', + 'quail', + 'partridge', + 'African grey, African gray, Psittacus erithacus', + 'macaw', + 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita', + 'lorikeet', + 'coucal', + 'bee eater', + 'hornbill', + 'hummingbird', + 'jacamar', + 'toucan', + 'drake', + 'red-breasted merganser, Mergus serrator', + 'goose', + 'black swan, Cygnus atratus', + 'tusker', + 'echidna, spiny anteater, anteater', + 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus', # noqa: E501 + 'wallaby, brush kangaroo', + 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus', # noqa: E501 + 'wombat', + 'jellyfish', + 'sea anemone, anemone', + 'brain coral', + 'flatworm, platyhelminth', + 'nematode, nematode worm, roundworm', + 'conch', + 'snail', + 'slug', + 'sea slug, nudibranch', + 'chiton, coat-of-mail shell, sea cradle, polyplacophore', + 'chambered nautilus, pearly nautilus, nautilus', + 'Dungeness crab, Cancer magister', + 'rock crab, Cancer irroratus', + 'fiddler crab', + 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica', # noqa: E501 + 'American lobster, Northern lobster, Maine lobster, Homarus americanus', # noqa: E501 + 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish', # noqa: E501 + 'crayfish, crawfish, crawdad, crawdaddy', + 'hermit crab', + 'isopod', + 'white stork, Ciconia ciconia', + 'black stork, Ciconia nigra', + 'spoonbill', + 'flamingo', + 'little blue heron, Egretta caerulea', + 'American egret, great white heron, Egretta albus', + 'bittern', + 'crane', + 'limpkin, Aramus pictus', + 'European gallinule, Porphyrio porphyrio', + 'American coot, marsh hen, mud hen, water hen, Fulica americana', + 'bustard', + 'ruddy turnstone, Arenaria interpres', + 'red-backed sandpiper, dunlin, Erolia alpina', + 'redshank, Tringa totanus', + 'dowitcher', + 'oystercatcher, oyster catcher', + 'pelican', + 'king penguin, Aptenodytes patagonica', + 'albatross, mollymawk', + 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus', # noqa: E501 + 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca', + 'dugong, Dugong dugon', + 'sea lion', + 'Chihuahua', + 'Japanese spaniel', + 'Maltese dog, Maltese terrier, Maltese', + 'Pekinese, Pekingese, Peke', + 'Shih-Tzu', + 'Blenheim spaniel', + 'papillon', + 'toy terrier', + 'Rhodesian ridgeback', + 'Afghan hound, Afghan', + 'basset, basset hound', + 'beagle', + 'bloodhound, sleuthhound', + 'bluetick', + 'black-and-tan coonhound', + 'Walker hound, Walker foxhound', + 'English foxhound', + 'redbone', + 'borzoi, Russian wolfhound', + 'Irish wolfhound', + 'Italian greyhound', + 'whippet', + 'Ibizan hound, Ibizan Podenco', + 'Norwegian elkhound, elkhound', + 'otterhound, otter hound', + 'Saluki, gazelle hound', + 'Scottish deerhound, deerhound', + 'Weimaraner', + 'Staffordshire bullterrier, Staffordshire bull terrier', + 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier', # noqa: E501 + 'Bedlington terrier', + 'Border terrier', + 'Kerry blue terrier', + 'Irish terrier', + 'Norfolk terrier', + 'Norwich terrier', + 'Yorkshire terrier', + 'wire-haired fox terrier', + 'Lakeland terrier', + 'Sealyham terrier, Sealyham', + 'Airedale, Airedale terrier', + 'cairn, cairn terrier', + 'Australian terrier', + 'Dandie Dinmont, Dandie Dinmont terrier', + 'Boston bull, Boston terrier', + 'miniature schnauzer', + 'giant schnauzer', + 'standard schnauzer', + 'Scotch terrier, Scottish terrier, Scottie', + 'Tibetan terrier, chrysanthemum dog', + 'silky terrier, Sydney silky', + 'soft-coated wheaten terrier', + 'West Highland white terrier', + 'Lhasa, Lhasa apso', + 'flat-coated retriever', + 'curly-coated retriever', + 'golden retriever', + 'Labrador retriever', + 'Chesapeake Bay retriever', + 'German short-haired pointer', + 'vizsla, Hungarian pointer', + 'English setter', + 'Irish setter, red setter', + 'Gordon setter', + 'Brittany spaniel', + 'clumber, clumber spaniel', + 'English springer, English springer spaniel', + 'Welsh springer spaniel', + 'cocker spaniel, English cocker spaniel, cocker', + 'Sussex spaniel', + 'Irish water spaniel', + 'kuvasz', + 'schipperke', + 'groenendael', + 'malinois', + 'briard', + 'kelpie', + 'komondor', + 'Old English sheepdog, bobtail', + 'Shetland sheepdog, Shetland sheep dog, Shetland', + 'collie', + 'Border collie', + 'Bouvier des Flandres, Bouviers des Flandres', + 'Rottweiler', + 'German shepherd, German shepherd dog, German police dog, alsatian', + 'Doberman, Doberman pinscher', + 'miniature pinscher', + 'Greater Swiss Mountain dog', + 'Bernese mountain dog', + 'Appenzeller', + 'EntleBucher', + 'boxer', + 'bull mastiff', + 'Tibetan mastiff', + 'French bulldog', + 'Great Dane', + 'Saint Bernard, St Bernard', + 'Eskimo dog, husky', + 'malamute, malemute, Alaskan malamute', + 'Siberian husky', + 'dalmatian, coach dog, carriage dog', + 'affenpinscher, monkey pinscher, monkey dog', + 'basenji', + 'pug, pug-dog', + 'Leonberg', + 'Newfoundland, Newfoundland dog', + 'Great Pyrenees', + 'Samoyed, Samoyede', + 'Pomeranian', + 'chow, chow chow', + 'keeshond', + 'Brabancon griffon', + 'Pembroke, Pembroke Welsh corgi', + 'Cardigan, Cardigan Welsh corgi', + 'toy poodle', + 'miniature poodle', + 'standard poodle', + 'Mexican hairless', + 'timber wolf, grey wolf, gray wolf, Canis lupus', + 'white wolf, Arctic wolf, Canis lupus tundrarum', + 'red wolf, maned wolf, Canis rufus, Canis niger', + 'coyote, prairie wolf, brush wolf, Canis latrans', + 'dingo, warrigal, warragal, Canis dingo', + 'dhole, Cuon alpinus', + 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus', + 'hyena, hyaena', + 'red fox, Vulpes vulpes', + 'kit fox, Vulpes macrotis', + 'Arctic fox, white fox, Alopex lagopus', + 'grey fox, gray fox, Urocyon cinereoargenteus', + 'tabby, tabby cat', + 'tiger cat', + 'Persian cat', + 'Siamese cat, Siamese', + 'Egyptian cat', + 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor', # noqa: E501 + 'lynx, catamount', + 'leopard, Panthera pardus', + 'snow leopard, ounce, Panthera uncia', + 'jaguar, panther, Panthera onca, Felis onca', + 'lion, king of beasts, Panthera leo', + 'tiger, Panthera tigris', + 'cheetah, chetah, Acinonyx jubatus', + 'brown bear, bruin, Ursus arctos', + 'American black bear, black bear, Ursus americanus, Euarctos americanus', # noqa: E501 + 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus', + 'sloth bear, Melursus ursinus, Ursus ursinus', + 'mongoose', + 'meerkat, mierkat', + 'tiger beetle', + 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle', + 'ground beetle, carabid beetle', + 'long-horned beetle, longicorn, longicorn beetle', + 'leaf beetle, chrysomelid', + 'dung beetle', + 'rhinoceros beetle', + 'weevil', + 'fly', + 'bee', + 'ant, emmet, pismire', + 'grasshopper, hopper', + 'cricket', + 'walking stick, walkingstick, stick insect', + 'cockroach, roach', + 'mantis, mantid', + 'cicada, cicala', + 'leafhopper', + 'lacewing, lacewing fly', + "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", # noqa: E501 + 'damselfly', + 'admiral', + 'ringlet, ringlet butterfly', + 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus', + 'cabbage butterfly', + 'sulphur butterfly, sulfur butterfly', + 'lycaenid, lycaenid butterfly', + 'starfish, sea star', + 'sea urchin', + 'sea cucumber, holothurian', + 'wood rabbit, cottontail, cottontail rabbit', + 'hare', + 'Angora, Angora rabbit', + 'hamster', + 'porcupine, hedgehog', + 'fox squirrel, eastern fox squirrel, Sciurus niger', + 'marmot', + 'beaver', + 'guinea pig, Cavia cobaya', + 'sorrel', + 'zebra', + 'hog, pig, grunter, squealer, Sus scrofa', + 'wild boar, boar, Sus scrofa', + 'warthog', + 'hippopotamus, hippo, river horse, Hippopotamus amphibius', + 'ox', + 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis', + 'bison', + 'ram, tup', + 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis', # noqa: E501 + 'ibex, Capra ibex', + 'hartebeest', + 'impala, Aepyceros melampus', + 'gazelle', + 'Arabian camel, dromedary, Camelus dromedarius', + 'llama', + 'weasel', + 'mink', + 'polecat, fitch, foulmart, foumart, Mustela putorius', + 'black-footed ferret, ferret, Mustela nigripes', + 'otter', + 'skunk, polecat, wood pussy', + 'badger', + 'armadillo', + 'three-toed sloth, ai, Bradypus tridactylus', + 'orangutan, orang, orangutang, Pongo pygmaeus', + 'gorilla, Gorilla gorilla', + 'chimpanzee, chimp, Pan troglodytes', + 'gibbon, Hylobates lar', + 'siamang, Hylobates syndactylus, Symphalangus syndactylus', + 'guenon, guenon monkey', + 'patas, hussar monkey, Erythrocebus patas', + 'baboon', + 'macaque', + 'langur', + 'colobus, colobus monkey', + 'proboscis monkey, Nasalis larvatus', + 'marmoset', + 'capuchin, ringtail, Cebus capucinus', + 'howler monkey, howler', + 'titi, titi monkey', + 'spider monkey, Ateles geoffroyi', + 'squirrel monkey, Saimiri sciureus', + 'Madagascar cat, ring-tailed lemur, Lemur catta', + 'indri, indris, Indri indri, Indri brevicaudatus', + 'Indian elephant, Elephas maximus', + 'African elephant, Loxodonta africana', + 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens', + 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca', + 'barracouta, snoek', + 'eel', + 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch', # noqa: E501 + 'rock beauty, Holocanthus tricolor', + 'anemone fish', + 'sturgeon', + 'gar, garfish, garpike, billfish, Lepisosteus osseus', + 'lionfish', + 'puffer, pufferfish, blowfish, globefish', + 'abacus', + 'abaya', + "academic gown, academic robe, judge's robe", + 'accordion, piano accordion, squeeze box', + 'acoustic guitar', + 'aircraft carrier, carrier, flattop, attack aircraft carrier', + 'airliner', + 'airship, dirigible', + 'altar', + 'ambulance', + 'amphibian, amphibious vehicle', + 'analog clock', + 'apiary, bee house', + 'apron', + 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin', # noqa: E501 + 'assault rifle, assault gun', + 'backpack, back pack, knapsack, packsack, rucksack, haversack', + 'bakery, bakeshop, bakehouse', + 'balance beam, beam', + 'balloon', + 'ballpoint, ballpoint pen, ballpen, Biro', + 'Band Aid', + 'banjo', + 'bannister, banister, balustrade, balusters, handrail', + 'barbell', + 'barber chair', + 'barbershop', + 'barn', + 'barometer', + 'barrel, cask', + 'barrow, garden cart, lawn cart, wheelbarrow', + 'baseball', + 'basketball', + 'bassinet', + 'bassoon', + 'bathing cap, swimming cap', + 'bath towel', + 'bathtub, bathing tub, bath, tub', + 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon', # noqa: E501 + 'beacon, lighthouse, beacon light, pharos', + 'beaker', + 'bearskin, busby, shako', + 'beer bottle', + 'beer glass', + 'bell cote, bell cot', + 'bib', + 'bicycle-built-for-two, tandem bicycle, tandem', + 'bikini, two-piece', + 'binder, ring-binder', + 'binoculars, field glasses, opera glasses', + 'birdhouse', + 'boathouse', + 'bobsled, bobsleigh, bob', + 'bolo tie, bolo, bola tie, bola', + 'bonnet, poke bonnet', + 'bookcase', + 'bookshop, bookstore, bookstall', + 'bottlecap', + 'bow', + 'bow tie, bow-tie, bowtie', + 'brass, memorial tablet, plaque', + 'brassiere, bra, bandeau', + 'breakwater, groin, groyne, mole, bulwark, seawall, jetty', + 'breastplate, aegis, egis', + 'broom', + 'bucket, pail', + 'buckle', + 'bulletproof vest', + 'bullet train, bullet', + 'butcher shop, meat market', + 'cab, hack, taxi, taxicab', + 'caldron, cauldron', + 'candle, taper, wax light', + 'cannon', + 'canoe', + 'can opener, tin opener', + 'cardigan', + 'car mirror', + 'carousel, carrousel, merry-go-round, roundabout, whirligig', + "carpenter's kit, tool kit", + 'carton', + 'car wheel', + 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM', # noqa: E501 + 'cassette', + 'cassette player', + 'castle', + 'catamaran', + 'CD player', + 'cello, violoncello', + 'cellular telephone, cellular phone, cellphone, cell, mobile phone', + 'chain', + 'chainlink fence', + 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour', # noqa: E501 + 'chain saw, chainsaw', + 'chest', + 'chiffonier, commode', + 'chime, bell, gong', + 'china cabinet, china closet', + 'Christmas stocking', + 'church, church building', + 'cinema, movie theater, movie theatre, movie house, picture palace', + 'cleaver, meat cleaver, chopper', + 'cliff dwelling', + 'cloak', + 'clog, geta, patten, sabot', + 'cocktail shaker', + 'coffee mug', + 'coffeepot', + 'coil, spiral, volute, whorl, helix', + 'combination lock', + 'computer keyboard, keypad', + 'confectionery, confectionary, candy store', + 'container ship, containership, container vessel', + 'convertible', + 'corkscrew, bottle screw', + 'cornet, horn, trumpet, trump', + 'cowboy boot', + 'cowboy hat, ten-gallon hat', + 'cradle', + 'crane', + 'crash helmet', + 'crate', + 'crib, cot', + 'Crock Pot', + 'croquet ball', + 'crutch', + 'cuirass', + 'dam, dike, dyke', + 'desk', + 'desktop computer', + 'dial telephone, dial phone', + 'diaper, nappy, napkin', + 'digital clock', + 'digital watch', + 'dining table, board', + 'dishrag, dishcloth', + 'dishwasher, dish washer, dishwashing machine', + 'disk brake, disc brake', + 'dock, dockage, docking facility', + 'dogsled, dog sled, dog sleigh', + 'dome', + 'doormat, welcome mat', + 'drilling platform, offshore rig', + 'drum, membranophone, tympan', + 'drumstick', + 'dumbbell', + 'Dutch oven', + 'electric fan, blower', + 'electric guitar', + 'electric locomotive', + 'entertainment center', + 'envelope', + 'espresso maker', + 'face powder', + 'feather boa, boa', + 'file, file cabinet, filing cabinet', + 'fireboat', + 'fire engine, fire truck', + 'fire screen, fireguard', + 'flagpole, flagstaff', + 'flute, transverse flute', + 'folding chair', + 'football helmet', + 'forklift', + 'fountain', + 'fountain pen', + 'four-poster', + 'freight car', + 'French horn, horn', + 'frying pan, frypan, skillet', + 'fur coat', + 'garbage truck, dustcart', + 'gasmask, respirator, gas helmet', + 'gas pump, gasoline pump, petrol pump, island dispenser', + 'goblet', + 'go-kart', + 'golf ball', + 'golfcart, golf cart', + 'gondola', + 'gong, tam-tam', + 'gown', + 'grand piano, grand', + 'greenhouse, nursery, glasshouse', + 'grille, radiator grille', + 'grocery store, grocery, food market, market', + 'guillotine', + 'hair slide', + 'hair spray', + 'half track', + 'hammer', + 'hamper', + 'hand blower, blow dryer, blow drier, hair dryer, hair drier', + 'hand-held computer, hand-held microcomputer', + 'handkerchief, hankie, hanky, hankey', + 'hard disc, hard disk, fixed disk', + 'harmonica, mouth organ, harp, mouth harp', + 'harp', + 'harvester, reaper', + 'hatchet', + 'holster', + 'home theater, home theatre', + 'honeycomb', + 'hook, claw', + 'hoopskirt, crinoline', + 'horizontal bar, high bar', + 'horse cart, horse-cart', + 'hourglass', + 'iPod', + 'iron, smoothing iron', + "jack-o'-lantern", + 'jean, blue jean, denim', + 'jeep, landrover', + 'jersey, T-shirt, tee shirt', + 'jigsaw puzzle', + 'jinrikisha, ricksha, rickshaw', + 'joystick', + 'kimono', + 'knee pad', + 'knot', + 'lab coat, laboratory coat', + 'ladle', + 'lampshade, lamp shade', + 'laptop, laptop computer', + 'lawn mower, mower', + 'lens cap, lens cover', + 'letter opener, paper knife, paperknife', + 'library', + 'lifeboat', + 'lighter, light, igniter, ignitor', + 'limousine, limo', + 'liner, ocean liner', + 'lipstick, lip rouge', + 'Loafer', + 'lotion', + 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system', # noqa: E501 + "loupe, jeweler's loupe", + 'lumbermill, sawmill', + 'magnetic compass', + 'mailbag, postbag', + 'mailbox, letter box', + 'maillot', + 'maillot, tank suit', + 'manhole cover', + 'maraca', + 'marimba, xylophone', + 'mask', + 'matchstick', + 'maypole', + 'maze, labyrinth', + 'measuring cup', + 'medicine chest, medicine cabinet', + 'megalith, megalithic structure', + 'microphone, mike', + 'microwave, microwave oven', + 'military uniform', + 'milk can', + 'minibus', + 'miniskirt, mini', + 'minivan', + 'missile', + 'mitten', + 'mixing bowl', + 'mobile home, manufactured home', + 'Model T', + 'modem', + 'monastery', + 'monitor', + 'moped', + 'mortar', + 'mortarboard', + 'mosque', + 'mosquito net', + 'motor scooter, scooter', + 'mountain bike, all-terrain bike, off-roader', + 'mountain tent', + 'mouse, computer mouse', + 'mousetrap', + 'moving van', + 'muzzle', + 'nail', + 'neck brace', + 'necklace', + 'nipple', + 'notebook, notebook computer', + 'obelisk', + 'oboe, hautboy, hautbois', + 'ocarina, sweet potato', + 'odometer, hodometer, mileometer, milometer', + 'oil filter', + 'organ, pipe organ', + 'oscilloscope, scope, cathode-ray oscilloscope, CRO', + 'overskirt', + 'oxcart', + 'oxygen mask', + 'packet', + 'paddle, boat paddle', + 'paddlewheel, paddle wheel', + 'padlock', + 'paintbrush', + "pajama, pyjama, pj's, jammies", + 'palace', + 'panpipe, pandean pipe, syrinx', + 'paper towel', + 'parachute, chute', + 'parallel bars, bars', + 'park bench', + 'parking meter', + 'passenger car, coach, carriage', + 'patio, terrace', + 'pay-phone, pay-station', + 'pedestal, plinth, footstall', + 'pencil box, pencil case', + 'pencil sharpener', + 'perfume, essence', + 'Petri dish', + 'photocopier', + 'pick, plectrum, plectron', + 'pickelhaube', + 'picket fence, paling', + 'pickup, pickup truck', + 'pier', + 'piggy bank, penny bank', + 'pill bottle', + 'pillow', + 'ping-pong ball', + 'pinwheel', + 'pirate, pirate ship', + 'pitcher, ewer', + "plane, carpenter's plane, woodworking plane", + 'planetarium', + 'plastic bag', + 'plate rack', + 'plow, plough', + "plunger, plumber's helper", + 'Polaroid camera, Polaroid Land camera', + 'pole', + 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria', # noqa: E501 + 'poncho', + 'pool table, billiard table, snooker table', + 'pop bottle, soda bottle', + 'pot, flowerpot', + "potter's wheel", + 'power drill', + 'prayer rug, prayer mat', + 'printer', + 'prison, prison house', + 'projectile, missile', + 'projector', + 'puck, hockey puck', + 'punching bag, punch bag, punching ball, punchball', + 'purse', + 'quill, quill pen', + 'quilt, comforter, comfort, puff', + 'racer, race car, racing car', + 'racket, racquet', + 'radiator', + 'radio, wireless', + 'radio telescope, radio reflector', + 'rain barrel', + 'recreational vehicle, RV, R.V.', + 'reel', + 'reflex camera', + 'refrigerator, icebox', + 'remote control, remote', + 'restaurant, eating house, eating place, eatery', + 'revolver, six-gun, six-shooter', + 'rifle', + 'rocking chair, rocker', + 'rotisserie', + 'rubber eraser, rubber, pencil eraser', + 'rugby ball', + 'rule, ruler', + 'running shoe', + 'safe', + 'safety pin', + 'saltshaker, salt shaker', + 'sandal', + 'sarong', + 'sax, saxophone', + 'scabbard', + 'scale, weighing machine', + 'school bus', + 'schooner', + 'scoreboard', + 'screen, CRT screen', + 'screw', + 'screwdriver', + 'seat belt, seatbelt', + 'sewing machine', + 'shield, buckler', + 'shoe shop, shoe-shop, shoe store', + 'shoji', + 'shopping basket', + 'shopping cart', + 'shovel', + 'shower cap', + 'shower curtain', + 'ski', + 'ski mask', + 'sleeping bag', + 'slide rule, slipstick', + 'sliding door', + 'slot, one-armed bandit', + 'snorkel', + 'snowmobile', + 'snowplow, snowplough', + 'soap dispenser', + 'soccer ball', + 'sock', + 'solar dish, solar collector, solar furnace', + 'sombrero', + 'soup bowl', + 'space bar', + 'space heater', + 'space shuttle', + 'spatula', + 'speedboat', + "spider web, spider's web", + 'spindle', + 'sports car, sport car', + 'spotlight, spot', + 'stage', + 'steam locomotive', + 'steel arch bridge', + 'steel drum', + 'stethoscope', + 'stole', + 'stone wall', + 'stopwatch, stop watch', + 'stove', + 'strainer', + 'streetcar, tram, tramcar, trolley, trolley car', + 'stretcher', + 'studio couch, day bed', + 'stupa, tope', + 'submarine, pigboat, sub, U-boat', + 'suit, suit of clothes', + 'sundial', + 'sunglass', + 'sunglasses, dark glasses, shades', + 'sunscreen, sunblock, sun blocker', + 'suspension bridge', + 'swab, swob, mop', + 'sweatshirt', + 'swimming trunks, bathing trunks', + 'swing', + 'switch, electric switch, electrical switch', + 'syringe', + 'table lamp', + 'tank, army tank, armored combat vehicle, armoured combat vehicle', + 'tape player', + 'teapot', + 'teddy, teddy bear', + 'television, television system', + 'tennis ball', + 'thatch, thatched roof', + 'theater curtain, theatre curtain', + 'thimble', + 'thresher, thrasher, threshing machine', + 'throne', + 'tile roof', + 'toaster', + 'tobacco shop, tobacconist shop, tobacconist', + 'toilet seat', + 'torch', + 'totem pole', + 'tow truck, tow car, wrecker', + 'toyshop', + 'tractor', + 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi', # noqa: E501 + 'tray', + 'trench coat', + 'tricycle, trike, velocipede', + 'trimaran', + 'tripod', + 'triumphal arch', + 'trolleybus, trolley coach, trackless trolley', + 'trombone', + 'tub, vat', + 'turnstile', + 'typewriter keyboard', + 'umbrella', + 'unicycle, monocycle', + 'upright, upright piano', + 'vacuum, vacuum cleaner', + 'vase', + 'vault', + 'velvet', + 'vending machine', + 'vestment', + 'viaduct', + 'violin, fiddle', + 'volleyball', + 'waffle iron', + 'wall clock', + 'wallet, billfold, notecase, pocketbook', + 'wardrobe, closet, press', + 'warplane, military plane', + 'washbasin, handbasin, washbowl, lavabo, wash-hand basin', + 'washer, automatic washer, washing machine', + 'water bottle', + 'water jug', + 'water tower', + 'whiskey jug', + 'whistle', + 'wig', + 'window screen', + 'window shade', + 'Windsor tie', + 'wine bottle', + 'wing', + 'wok', + 'wooden spoon', + 'wool, woolen, woollen', + 'worm fence, snake fence, snake-rail fence, Virginia fence', + 'wreck', + 'yawl', + 'yurt', + 'web site, website, internet site, site', + 'comic book', + 'crossword puzzle, crossword', + 'street sign', + 'traffic light, traffic signal, stoplight', + 'book jacket, dust cover, dust jacket, dust wrapper', + 'menu', + 'plate', + 'guacamole', + 'consomme', + 'hot pot, hotpot', + 'trifle', + 'ice cream, icecream', + 'ice lolly, lolly, lollipop, popsicle', + 'French loaf', + 'bagel, beigel', + 'pretzel', + 'cheeseburger', + 'hotdog, hot dog, red hot', + 'mashed potato', + 'head cabbage', + 'broccoli', + 'cauliflower', + 'zucchini, courgette', + 'spaghetti squash', + 'acorn squash', + 'butternut squash', + 'cucumber, cuke', + 'artichoke, globe artichoke', + 'bell pepper', + 'cardoon', + 'mushroom', + 'Granny Smith', + 'strawberry', + 'orange', + 'lemon', + 'fig', + 'pineapple, ananas', + 'banana', + 'jackfruit, jak, jack', + 'custard apple', + 'pomegranate', + 'hay', + 'carbonara', + 'chocolate sauce, chocolate syrup', + 'dough', + 'meat loaf, meatloaf', + 'pizza, pizza pie', + 'potpie', + 'burrito', + 'red wine', + 'espresso', + 'cup', + 'eggnog', + 'alp', + 'bubble', + 'cliff, drop, drop-off', + 'coral reef', + 'geyser', + 'lakeside, lakeshore', + 'promontory, headland, head, foreland', + 'sandbar, sand bar', + 'seashore, coast, seacoast, sea-coast', + 'valley, vale', + 'volcano', + 'ballplayer, baseball player', + 'groom, bridegroom', + 'scuba diver', + 'rapeseed', + 'daisy', + "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", # noqa: E501 + 'corn', + 'acorn', + 'hip, rose hip, rosehip', + 'buckeye, horse chestnut, conker', + 'coral fungus', + 'agaric', + 'gyromitra', + 'stinkhorn, carrion fungus', + 'earthstar', + 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa', # noqa: E501 + 'bolete', + 'ear, spike, capitulum', + 'toilet tissue, toilet paper, bathroom tissue' + ] + + def __init__(self, + data_prefix: str, + pipeline: Sequence = (), + classes: Union[str, Sequence[str], None] = None, + ann_file: Optional[str] = None, + test_mode: bool = False, + file_client_args: Optional[dict] = None): + super().__init__( + data_prefix=data_prefix, + pipeline=pipeline, + classes=classes, + ann_file=ann_file, + extensions=self.IMG_EXTENSIONS, + test_mode=test_mode, + file_client_args=file_client_args) diff --git a/mmcls/datasets/imagenet21k.py b/mmcls/datasets/imagenet21k.py new file mode 100644 index 0000000..864e215 --- /dev/null +++ b/mmcls/datasets/imagenet21k.py @@ -0,0 +1,174 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import gc +import pickle +import warnings +from typing import List, Optional, Sequence, Tuple, Union + +import numpy as np + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class ImageNet21k(CustomDataset): + """ImageNet21k Dataset. + + Since the dataset ImageNet21k is extremely big, cantains 21k+ classes + and 1.4B files. This class has improved the following points on the + basis of the class ``ImageNet``, in order to save memory, we enable the + ``serialize_data`` optional by default. With this option, the annotation + won't be stored in the list ``data_infos``, but be serialized as an + array. + + Args: + data_prefix (str): The path of data directory. + pipeline (Sequence[dict]): A list of dict, where each element + represents a operation defined in :mod:`mmcls.datasets.pipelines`. + Defaults to an empty tuple. + classes (str | Sequence[str], optional): Specify names of classes. + + - If is string, it should be a file path, and the every line of + the file is a name of a class. + - If is a sequence of string, every item is a name of class. + - If is None, the object won't have category information. + (Not recommended) + + Defaults to None. + ann_file (str, optional): The annotation file. If is string, read + samples paths from the ann_file. If is None, find samples in + ``data_prefix``. Defaults to None. + serialize_data (bool): Whether to hold memory using serialized objects, + when enabled, data loader workers can use shared RAM from master + process instead of making a copy. Defaults to True. + multi_label (bool): Not implement by now. Use multi label or not. + Defaults to False. + recursion_subdir(bool): Deprecated, and the dataset will recursively + get all images now. + test_mode (bool): In train mode or test mode. It's only a mark and + won't be used in this class. Defaults to False. + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + If None, automatically inference from the specified path. + Defaults to None. + """ + + IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif') + CLASSES = None + + def __init__(self, + data_prefix: str, + pipeline: Sequence = (), + classes: Union[str, Sequence[str], None] = None, + ann_file: Optional[str] = None, + serialize_data: bool = True, + multi_label: bool = False, + recursion_subdir: bool = True, + test_mode=False, + file_client_args: Optional[dict] = None): + assert recursion_subdir, 'The `recursion_subdir` option is ' \ + 'deprecated. Now the dataset will recursively get all images.' + if multi_label: + raise NotImplementedError( + 'The `multi_label` option is not supported by now.') + self.multi_label = multi_label + self.serialize_data = serialize_data + + if ann_file is None: + warnings.warn( + 'The ImageNet21k dataset is large, and scanning directory may ' + 'consume long time. Considering to specify the `ann_file` to ' + 'accelerate the initialization.', UserWarning) + + if classes is None: + warnings.warn( + 'The CLASSES is not stored in the `ImageNet21k` class. ' + 'Considering to specify the `classes` argument if you need ' + 'do inference on the ImageNet-21k dataset', UserWarning) + + super().__init__( + data_prefix=data_prefix, + pipeline=pipeline, + classes=classes, + ann_file=ann_file, + extensions=self.IMG_EXTENSIONS, + test_mode=test_mode, + file_client_args=file_client_args) + + if self.serialize_data: + self.data_infos_bytes, self.data_address = self._serialize_data() + # Empty cache for preventing making multiple copies of + # `self.data_infos` when loading data multi-processes. + self.data_infos.clear() + gc.collect() + + def get_cat_ids(self, idx: int) -> List[int]: + """Get category id by index. + + Args: + idx (int): Index of data. + + Returns: + cat_ids (List[int]): Image category of specified index. + """ + + return [int(self.get_data_info(idx)['gt_label'])] + + def get_data_info(self, idx: int) -> dict: + """Get annotation by index. + + Args: + idx (int): The index of data. + + Returns: + dict: The idx-th annotation of the dataset. + """ + if self.serialize_data: + start_addr = 0 if idx == 0 else self.data_address[idx - 1].item() + end_addr = self.data_address[idx].item() + bytes = memoryview(self.data_infos_bytes[start_addr:end_addr]) + data_info = pickle.loads(bytes) + else: + data_info = self.data_infos[idx] + + return data_info + + def prepare_data(self, idx): + data_info = self.get_data_info(idx) + return self.pipeline(data_info) + + def _serialize_data(self) -> Tuple[np.ndarray, np.ndarray]: + """Serialize ``self.data_infos`` to save memory when launching multiple + workers in data loading. This function will be called in ``full_init``. + + Hold memory using serialized objects, and data loader workers can use + shared RAM from master process instead of making a copy. + + Returns: + Tuple[np.ndarray, np.ndarray]: serialize result and corresponding + address. + """ + + def _serialize(data): + buffer = pickle.dumps(data, protocol=4) + return np.frombuffer(buffer, dtype=np.uint8) + + serialized_data_infos_list = [_serialize(x) for x in self.data_infos] + address_list = np.asarray([len(x) for x in serialized_data_infos_list], + dtype=np.int64) + data_address: np.ndarray = np.cumsum(address_list) + serialized_data_infos = np.concatenate(serialized_data_infos_list) + + return serialized_data_infos, data_address + + def __len__(self) -> int: + """Get the length of filtered dataset and automatically call + ``full_init`` if the dataset has not been fully init. + + Returns: + int: The length of filtered dataset. + """ + if self.serialize_data: + return len(self.data_address) + else: + return len(self.data_infos) diff --git a/mmcls/datasets/mnist.py b/mmcls/datasets/mnist.py new file mode 100644 index 0000000..4065e0d --- /dev/null +++ b/mmcls/datasets/mnist.py @@ -0,0 +1,185 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import codecs +import os +import os.path as osp + +import numpy as np +import torch +import torch.distributed as dist +from mmcv.runner import get_dist_info, master_only + +from .base_dataset import BaseDataset +from .builder import DATASETS +from .utils import download_and_extract_archive, rm_suffix + + +@DATASETS.register_module() +class MNIST(BaseDataset): + """`MNIST `_ Dataset. + + This implementation is modified from + https://github.com/pytorch/vision/blob/master/torchvision/datasets/mnist.py + """ # noqa: E501 + + resource_prefix = 'http://yann.lecun.com/exdb/mnist/' + resources = { + 'train_image_file': + ('train-images-idx3-ubyte.gz', 'f68b3c2dcbeaaa9fbdd348bbdeb94873'), + 'train_label_file': + ('train-labels-idx1-ubyte.gz', 'd53e105ee54ea40749a09fcbcd1e9432'), + 'test_image_file': + ('t10k-images-idx3-ubyte.gz', '9fb629c4189551a2d022fa330f9573f3'), + 'test_label_file': + ('t10k-labels-idx1-ubyte.gz', 'ec29112dd5afa0611ce80d1b7f02629c') + } + + CLASSES = [ + '0 - zero', '1 - one', '2 - two', '3 - three', '4 - four', '5 - five', + '6 - six', '7 - seven', '8 - eight', '9 - nine' + ] + + def load_annotations(self): + train_image_file = osp.join( + self.data_prefix, rm_suffix(self.resources['train_image_file'][0])) + train_label_file = osp.join( + self.data_prefix, rm_suffix(self.resources['train_label_file'][0])) + test_image_file = osp.join( + self.data_prefix, rm_suffix(self.resources['test_image_file'][0])) + test_label_file = osp.join( + self.data_prefix, rm_suffix(self.resources['test_label_file'][0])) + + if not osp.exists(train_image_file) or not osp.exists( + train_label_file) or not osp.exists( + test_image_file) or not osp.exists(test_label_file): + self.download() + + _, world_size = get_dist_info() + if world_size > 1: + dist.barrier() + assert osp.exists(train_image_file) and osp.exists( + train_label_file) and osp.exists( + test_image_file) and osp.exists(test_label_file), \ + 'Shared storage seems unavailable. Please download dataset ' \ + f'manually through {self.resource_prefix}.' + + train_set = (read_image_file(train_image_file), + read_label_file(train_label_file)) + test_set = (read_image_file(test_image_file), + read_label_file(test_label_file)) + + if not self.test_mode: + imgs, gt_labels = train_set + else: + imgs, gt_labels = test_set + + data_infos = [] + for img, gt_label in zip(imgs, gt_labels): + gt_label = np.array(gt_label, dtype=np.int64) + info = {'img': img.numpy(), 'gt_label': gt_label} + data_infos.append(info) + return data_infos + + @master_only + def download(self): + os.makedirs(self.data_prefix, exist_ok=True) + + # download files + for url, md5 in self.resources.values(): + url = osp.join(self.resource_prefix, url) + filename = url.rpartition('/')[2] + download_and_extract_archive( + url, + download_root=self.data_prefix, + filename=filename, + md5=md5) + + +@DATASETS.register_module() +class FashionMNIST(MNIST): + """`Fashion-MNIST `_ + Dataset.""" + + resource_prefix = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/' # noqa: E501 + resources = { + 'train_image_file': + ('train-images-idx3-ubyte.gz', '8d4fb7e6c68d591d4c3dfef9ec88bf0d'), + 'train_label_file': + ('train-labels-idx1-ubyte.gz', '25c81989df183df01b3e8a0aad5dffbe'), + 'test_image_file': + ('t10k-images-idx3-ubyte.gz', 'bef4ecab320f06d8554ea6380940ec79'), + 'test_label_file': + ('t10k-labels-idx1-ubyte.gz', 'bb300cfdad3c16e7a12a480ee83cd310') + } + CLASSES = [ + 'T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', + 'Shirt', 'Sneaker', 'Bag', 'Ankle boot' + ] + + +def get_int(b): + return int(codecs.encode(b, 'hex'), 16) + + +def open_maybe_compressed_file(path): + """Return a file object that possibly decompresses 'path' on the fly. + + Decompression occurs when argument `path` is a string and ends with '.gz' + or '.xz'. + """ + if not isinstance(path, str): + return path + if path.endswith('.gz'): + import gzip + return gzip.open(path, 'rb') + if path.endswith('.xz'): + import lzma + return lzma.open(path, 'rb') + return open(path, 'rb') + + +def read_sn3_pascalvincent_tensor(path, strict=True): + """Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx- + io.lsh'). + + Argument may be a filename, compressed filename, or file object. + """ + # typemap + if not hasattr(read_sn3_pascalvincent_tensor, 'typemap'): + read_sn3_pascalvincent_tensor.typemap = { + 8: (torch.uint8, np.uint8, np.uint8), + 9: (torch.int8, np.int8, np.int8), + 11: (torch.int16, np.dtype('>i2'), 'i2'), + 12: (torch.int32, np.dtype('>i4'), 'i4'), + 13: (torch.float32, np.dtype('>f4'), 'f4'), + 14: (torch.float64, np.dtype('>f8'), 'f8') + } + # read + with open_maybe_compressed_file(path) as f: + data = f.read() + # parse + magic = get_int(data[0:4]) + nd = magic % 256 + ty = magic // 256 + assert nd >= 1 and nd <= 3 + assert ty >= 8 and ty <= 14 + m = read_sn3_pascalvincent_tensor.typemap[ty] + s = [get_int(data[4 * (i + 1):4 * (i + 2)]) for i in range(nd)] + parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1))) + assert parsed.shape[0] == np.prod(s) or not strict + return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s) + + +def read_label_file(path): + with open(path, 'rb') as f: + x = read_sn3_pascalvincent_tensor(f, strict=False) + assert (x.dtype == torch.uint8) + assert (x.ndimension() == 1) + return x.long() + + +def read_image_file(path): + with open(path, 'rb') as f: + x = read_sn3_pascalvincent_tensor(f, strict=False) + assert (x.dtype == torch.uint8) + assert (x.ndimension() == 3) + return x diff --git a/mmcls/datasets/multi_label.py b/mmcls/datasets/multi_label.py new file mode 100644 index 0000000..02480f0 --- /dev/null +++ b/mmcls/datasets/multi_label.py @@ -0,0 +1,79 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import numpy as np + +from mmcls.core import average_performance, mAP +from .base_dataset import BaseDataset + + +class MultiLabelDataset(BaseDataset): + """Multi-label Dataset.""" + + def get_cat_ids(self, idx: int) -> List[int]: + """Get category ids by index. + + Args: + idx (int): Index of data. + + Returns: + cat_ids (List[int]): Image categories of specified index. + """ + gt_labels = self.data_infos[idx]['gt_label'] + cat_ids = np.where(gt_labels == 1)[0].tolist() + return cat_ids + + def evaluate(self, + results, + metric='mAP', + metric_options=None, + indices=None, + logger=None): + """Evaluate the dataset. + + Args: + results (list): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + Default value is 'mAP'. Options are 'mAP', 'CP', 'CR', 'CF1', + 'OP', 'OR' and 'OF1'. + metric_options (dict, optional): Options for calculating metrics. + Allowed keys are 'k' and 'thr'. Defaults to None + logger (logging.Logger | str, optional): Logger used for printing + related information during evaluation. Defaults to None. + + Returns: + dict: evaluation results + """ + if metric_options is None or metric_options == {}: + metric_options = {'thr': 0.5} + + if isinstance(metric, str): + metrics = [metric] + else: + metrics = metric + allowed_metrics = ['mAP', 'CP', 'CR', 'CF1', 'OP', 'OR', 'OF1'] + eval_results = {} + results = np.vstack(results) + gt_labels = self.get_gt_labels() + if indices is not None: + gt_labels = gt_labels[indices] + num_imgs = len(results) + assert len(gt_labels) == num_imgs, 'dataset testing results should '\ + 'be of the same length as gt_labels.' + + invalid_metrics = set(metrics) - set(allowed_metrics) + if len(invalid_metrics) != 0: + raise ValueError(f'metric {invalid_metrics} is not supported.') + + if 'mAP' in metrics: + mAP_value = mAP(results, gt_labels) + eval_results['mAP'] = mAP_value + if len(set(metrics) - {'mAP'}) != 0: + performance_keys = ['CP', 'CR', 'CF1', 'OP', 'OR', 'OF1'] + performance_values = average_performance(results, gt_labels, + **metric_options) + for k, v in zip(performance_keys, performance_values): + if k in metrics: + eval_results[k] = v + + return eval_results diff --git a/mmcls/datasets/pipelines/__init__.py b/mmcls/datasets/pipelines/__init__.py new file mode 100644 index 0000000..929891b --- /dev/null +++ b/mmcls/datasets/pipelines/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .auto_augment import (AutoAugment, AutoContrast, Brightness, + ColorTransform, Contrast, Cutout, Equalize, Invert, + Posterize, RandAugment, Rotate, Sharpness, Shear, + Solarize, SolarizeAdd, Translate) +from .compose import Compose +from .formatting import (Collect, ImageToTensor, ToNumpy, ToPIL, ToTensor, + Transpose, to_tensor) +from .loading import LoadImageFromFile, LoadImageFromFileLMDB +from .transforms import (CenterCrop, ColorJitter, Lighting, Normalize, Pad, + RandomCrop, RandomErasing, RandomFlip, + RandomGrayscale, RandomResizedCrop, Resize) + +__all__ = [ + 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToPIL', 'ToNumpy', + 'Transpose', 'Collect', 'LoadImageFromFile', 'Resize', 'CenterCrop', + 'RandomFlip', 'Normalize', 'RandomCrop', 'RandomResizedCrop', + 'RandomGrayscale', 'Shear', 'Translate', 'Rotate', 'Invert', + 'ColorTransform', 'Solarize', 'Posterize', 'AutoContrast', 'Equalize', + 'Contrast', 'Brightness', 'Sharpness', 'AutoAugment', 'SolarizeAdd', + 'Cutout', 'RandAugment', 'Lighting', 'ColorJitter', 'RandomErasing', 'Pad', 'LoadImageFromFileLMDB' +] diff --git a/mmcls/datasets/pipelines/auto_augment.py b/mmcls/datasets/pipelines/auto_augment.py new file mode 100644 index 0000000..e7fffd6 --- /dev/null +++ b/mmcls/datasets/pipelines/auto_augment.py @@ -0,0 +1,921 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import inspect +import random +from math import ceil +from numbers import Number +from typing import Sequence + +import mmcv +import numpy as np + +from ..builder import PIPELINES +from .compose import Compose + +# Default hyperparameters for all Ops +_HPARAMS_DEFAULT = dict(pad_val=128) + + +def random_negative(value, random_negative_prob): + """Randomly negate value based on random_negative_prob.""" + return -value if np.random.rand() < random_negative_prob else value + + +def merge_hparams(policy: dict, hparams: dict): + """Merge hyperparameters into policy config. + + Only merge partial hyperparameters required of the policy. + + Args: + policy (dict): Original policy config dict. + hparams (dict): Hyperparameters need to be merged. + + Returns: + dict: Policy config dict after adding ``hparams``. + """ + op = PIPELINES.get(policy['type']) + assert op is not None, f'Invalid policy type "{policy["type"]}".' + for key, value in hparams.items(): + if policy.get(key, None) is not None: + continue + if key in inspect.getfullargspec(op.__init__).args: + policy[key] = value + return policy + + +@PIPELINES.register_module() +class AutoAugment(object): + """Auto augmentation. + + This data augmentation is proposed in `AutoAugment: Learning Augmentation + Policies from Data `_. + + Args: + policies (list[list[dict]]): The policies of auto augmentation. Each + policy in ``policies`` is a specific augmentation policy, and is + composed by several augmentations (dict). When AutoAugment is + called, a random policy in ``policies`` will be selected to + augment images. + hparams (dict): Configs of hyperparameters. Hyperparameters will be + used in policies that require these arguments if these arguments + are not set in policy dicts. Defaults to use _HPARAMS_DEFAULT. + """ + + def __init__(self, policies, hparams=_HPARAMS_DEFAULT): + assert isinstance(policies, list) and len(policies) > 0, \ + 'Policies must be a non-empty list.' + for policy in policies: + assert isinstance(policy, list) and len(policy) > 0, \ + 'Each policy in policies must be a non-empty list.' + for augment in policy: + assert isinstance(augment, dict) and 'type' in augment, \ + 'Each specific augmentation must be a dict with key' \ + ' "type".' + + self.hparams = hparams + policies = copy.deepcopy(policies) + self.policies = [] + for sub in policies: + merged_sub = [merge_hparams(policy, hparams) for policy in sub] + self.policies.append(merged_sub) + + self.sub_policy = [Compose(policy) for policy in self.policies] + + def __call__(self, results): + sub_policy = random.choice(self.sub_policy) + return sub_policy(results) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(policies={self.policies})' + return repr_str + + +@PIPELINES.register_module() +class RandAugment(object): + r"""Random augmentation. + + This data augmentation is proposed in `RandAugment: Practical automated + data augmentation with a reduced search space + `_. + + Args: + policies (list[dict]): The policies of random augmentation. Each + policy in ``policies`` is one specific augmentation policy (dict). + The policy shall at least have key `type`, indicating the type of + augmentation. For those which have magnitude, (given to the fact + they are named differently in different augmentation, ) + `magnitude_key` and `magnitude_range` shall be the magnitude + argument (str) and the range of magnitude (tuple in the format of + (val1, val2)), respectively. Note that val1 is not necessarily + less than val2. + num_policies (int): Number of policies to select from policies each + time. + magnitude_level (int | float): Magnitude level for all the augmentation + selected. + total_level (int | float): Total level for the magnitude. Defaults to + 30. + magnitude_std (Number | str): Deviation of magnitude noise applied. + + - If positive number, magnitude is sampled from normal distribution + (mean=magnitude, std=magnitude_std). + - If 0 or negative number, magnitude remains unchanged. + - If str "inf", magnitude is sampled from uniform distribution + (range=[min, magnitude]). + hparams (dict): Configs of hyperparameters. Hyperparameters will be + used in policies that require these arguments if these arguments + are not set in policy dicts. Defaults to use _HPARAMS_DEFAULT. + + Note: + `magnitude_std` will introduce some randomness to policy, modified by + https://github.com/rwightman/pytorch-image-models. + + When magnitude_std=0, we calculate the magnitude as follows: + + .. math:: + \text{magnitude} = \frac{\text{magnitude_level}} + {\text{totallevel}} \times (\text{val2} - \text{val1}) + + \text{val1} + """ + + def __init__(self, + policies, + num_policies, + magnitude_level, + magnitude_std=0., + total_level=30, + hparams=_HPARAMS_DEFAULT): + assert isinstance(num_policies, int), 'Number of policies must be ' \ + f'of int type, got {type(num_policies)} instead.' + assert isinstance(magnitude_level, (int, float)), \ + 'Magnitude level must be of int or float type, ' \ + f'got {type(magnitude_level)} instead.' + assert isinstance(total_level, (int, float)), 'Total level must be ' \ + f'of int or float type, got {type(total_level)} instead.' + assert isinstance(policies, list) and len(policies) > 0, \ + 'Policies must be a non-empty list.' + + assert isinstance(magnitude_std, (Number, str)), \ + 'Magnitude std must be of number or str type, ' \ + f'got {type(magnitude_std)} instead.' + if isinstance(magnitude_std, str): + assert magnitude_std == 'inf', \ + 'Magnitude std must be of number or "inf", ' \ + f'got "{magnitude_std}" instead.' + + assert num_policies > 0, 'num_policies must be greater than 0.' + assert magnitude_level >= 0, 'magnitude_level must be no less than 0.' + assert total_level > 0, 'total_level must be greater than 0.' + + self.num_policies = num_policies + self.magnitude_level = magnitude_level + self.magnitude_std = magnitude_std + self.total_level = total_level + self.hparams = hparams + policies = copy.deepcopy(policies) + self._check_policies(policies) + self.policies = [merge_hparams(policy, hparams) for policy in policies] + + def _check_policies(self, policies): + for policy in policies: + assert isinstance(policy, dict) and 'type' in policy, \ + 'Each policy must be a dict with key "type".' + type_name = policy['type'] + + magnitude_key = policy.get('magnitude_key', None) + if magnitude_key is not None: + assert 'magnitude_range' in policy, \ + f'RandAugment policy {type_name} needs `magnitude_range`.' + magnitude_range = policy['magnitude_range'] + assert (isinstance(magnitude_range, Sequence) + and len(magnitude_range) == 2), \ + f'`magnitude_range` of RandAugment policy {type_name} ' \ + f'should be a Sequence with two numbers.' + + def _process_policies(self, policies): + processed_policies = [] + for policy in policies: + processed_policy = copy.deepcopy(policy) + magnitude_key = processed_policy.pop('magnitude_key', None) + if magnitude_key is not None: + magnitude = self.magnitude_level + # if magnitude_std is positive number or 'inf', move + # magnitude_value randomly. + if self.magnitude_std == 'inf': + magnitude = random.uniform(0, magnitude) + elif self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + magnitude = min(self.total_level, max(0, magnitude)) + + val1, val2 = processed_policy.pop('magnitude_range') + magnitude = (magnitude / self.total_level) * (val2 - + val1) + val1 + + processed_policy.update({magnitude_key: magnitude}) + processed_policies.append(processed_policy) + return processed_policies + + def __call__(self, results): + if self.num_policies == 0: + return results + sub_policy = random.choices(self.policies, k=self.num_policies) + sub_policy = self._process_policies(sub_policy) + sub_policy = Compose(sub_policy) + return sub_policy(results) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(policies={self.policies}, ' + repr_str += f'num_policies={self.num_policies}, ' + repr_str += f'magnitude_level={self.magnitude_level}, ' + repr_str += f'total_level={self.total_level})' + return repr_str + + +@PIPELINES.register_module() +class Shear(object): + """Shear images. + + Args: + magnitude (int | float): The magnitude used for shear. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If a sequence of length 3, it is used to pad_val R, G, B channels + respectively. Defaults to 128. + prob (float): The probability for performing Shear therefore should be + in range [0, 1]. Defaults to 0.5. + direction (str): The shearing direction. Options are 'horizontal' and + 'vertical'. Defaults to 'horizontal'. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + interpolation (str): Interpolation method. Options are 'nearest', + 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'bicubic'. + """ + + def __init__(self, + magnitude, + pad_val=128, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='bicubic'): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + if isinstance(pad_val, int): + pad_val = tuple([pad_val] * 3) + elif isinstance(pad_val, Sequence): + assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \ + f'elements, got {len(pad_val)} instead.' + assert all(isinstance(i, int) for i in pad_val), 'pad_val as a '\ + 'tuple must got elements of int type.' + else: + raise TypeError('pad_val must be int or tuple with 3 elements.') + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert direction in ('horizontal', 'vertical'), 'direction must be ' \ + f'either "horizontal" or "vertical", got {direction} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.pad_val = tuple(pad_val) + self.prob = prob + self.direction = direction + self.random_negative_prob = random_negative_prob + self.interpolation = interpolation + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + magnitude = random_negative(self.magnitude, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + img_sheared = mmcv.imshear( + img, + magnitude, + direction=self.direction, + border_value=self.pad_val, + interpolation=self.interpolation) + results[key] = img_sheared.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'direction={self.direction}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}, ' + repr_str += f'interpolation={self.interpolation})' + return repr_str + + +@PIPELINES.register_module() +class Translate(object): + """Translate images. + + Args: + magnitude (int | float): The magnitude used for translate. Note that + the offset is calculated by magnitude * size in the corresponding + direction. With a magnitude of 1, the whole image will be moved out + of the range. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If a sequence of length 3, it is used to pad_val R, G, B channels + respectively. Defaults to 128. + prob (float): The probability for performing translate therefore should + be in range [0, 1]. Defaults to 0.5. + direction (str): The translating direction. Options are 'horizontal' + and 'vertical'. Defaults to 'horizontal'. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + interpolation (str): Interpolation method. Options are 'nearest', + 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'nearest'. + """ + + def __init__(self, + magnitude, + pad_val=128, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='nearest'): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + if isinstance(pad_val, int): + pad_val = tuple([pad_val] * 3) + elif isinstance(pad_val, Sequence): + assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \ + f'elements, got {len(pad_val)} instead.' + assert all(isinstance(i, int) for i in pad_val), 'pad_val as a '\ + 'tuple must got elements of int type.' + else: + raise TypeError('pad_val must be int or tuple with 3 elements.') + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert direction in ('horizontal', 'vertical'), 'direction must be ' \ + f'either "horizontal" or "vertical", got {direction} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.pad_val = tuple(pad_val) + self.prob = prob + self.direction = direction + self.random_negative_prob = random_negative_prob + self.interpolation = interpolation + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + magnitude = random_negative(self.magnitude, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + height, width = img.shape[:2] + if self.direction == 'horizontal': + offset = magnitude * width + else: + offset = magnitude * height + img_translated = mmcv.imtranslate( + img, + offset, + direction=self.direction, + border_value=self.pad_val, + interpolation=self.interpolation) + results[key] = img_translated.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'direction={self.direction}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}, ' + repr_str += f'interpolation={self.interpolation})' + return repr_str + + +@PIPELINES.register_module() +class Rotate(object): + """Rotate images. + + Args: + angle (float): The angle used for rotate. Positive values stand for + clockwise rotation. + center (tuple[float], optional): Center point (w, h) of the rotation in + the source image. If None, the center of the image will be used. + Defaults to None. + scale (float): Isotropic scale factor. Defaults to 1.0. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If a sequence of length 3, it is used to pad_val R, G, B channels + respectively. Defaults to 128. + prob (float): The probability for performing Rotate therefore should be + in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the angle + negative, which should be in range [0,1]. Defaults to 0.5. + interpolation (str): Interpolation method. Options are 'nearest', + 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'nearest'. + """ + + def __init__(self, + angle, + center=None, + scale=1.0, + pad_val=128, + prob=0.5, + random_negative_prob=0.5, + interpolation='nearest'): + assert isinstance(angle, float), 'The angle type must be float, but ' \ + f'got {type(angle)} instead.' + if isinstance(center, tuple): + assert len(center) == 2, 'center as a tuple must have 2 ' \ + f'elements, got {len(center)} elements instead.' + else: + assert center is None, 'The center type' \ + f'must be tuple or None, got {type(center)} instead.' + assert isinstance(scale, float), 'the scale type must be float, but ' \ + f'got {type(scale)} instead.' + if isinstance(pad_val, int): + pad_val = tuple([pad_val] * 3) + elif isinstance(pad_val, Sequence): + assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \ + f'elements, got {len(pad_val)} instead.' + assert all(isinstance(i, int) for i in pad_val), 'pad_val as a '\ + 'tuple must got elements of int type.' + else: + raise TypeError('pad_val must be int or tuple with 3 elements.') + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.angle = angle + self.center = center + self.scale = scale + self.pad_val = tuple(pad_val) + self.prob = prob + self.random_negative_prob = random_negative_prob + self.interpolation = interpolation + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + angle = random_negative(self.angle, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + img_rotated = mmcv.imrotate( + img, + angle, + center=self.center, + scale=self.scale, + border_value=self.pad_val, + interpolation=self.interpolation) + results[key] = img_rotated.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(angle={self.angle}, ' + repr_str += f'center={self.center}, ' + repr_str += f'scale={self.scale}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}, ' + repr_str += f'interpolation={self.interpolation})' + return repr_str + + +@PIPELINES.register_module() +class AutoContrast(object): + """Auto adjust image contrast. + + Args: + prob (float): The probability for performing invert therefore should + be in range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, prob=0.5): + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_contrasted = mmcv.auto_contrast(img) + results[key] = img_contrasted.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Invert(object): + """Invert images. + + Args: + prob (float): The probability for performing invert therefore should + be in range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, prob=0.5): + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_inverted = mmcv.iminvert(img) + results[key] = img_inverted.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Equalize(object): + """Equalize the image histogram. + + Args: + prob (float): The probability for performing invert therefore should + be in range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, prob=0.5): + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_equalized = mmcv.imequalize(img) + results[key] = img_equalized.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Solarize(object): + """Solarize images (invert all pixel values above a threshold). + + Args: + thr (int | float): The threshold above which the pixels value will be + inverted. + prob (float): The probability for solarizing therefore should be in + range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, thr, prob=0.5): + assert isinstance(thr, (int, float)), 'The thr type must '\ + f'be int or float, but got {type(thr)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.thr = thr + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_solarized = mmcv.solarize(img, thr=self.thr) + results[key] = img_solarized.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(thr={self.thr}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class SolarizeAdd(object): + """SolarizeAdd images (add a certain value to pixels below a threshold). + + Args: + magnitude (int | float): The value to be added to pixels below the thr. + thr (int | float): The threshold below which the pixels value will be + adjusted. + prob (float): The probability for solarizing therefore should be in + range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, thr=128, prob=0.5): + assert isinstance(magnitude, (int, float)), 'The thr magnitude must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert isinstance(thr, (int, float)), 'The thr type must '\ + f'be int or float, but got {type(thr)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.magnitude = magnitude + self.thr = thr + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_solarized = np.where(img < self.thr, + np.minimum(img + self.magnitude, 255), + img) + results[key] = img_solarized.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'thr={self.thr}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Posterize(object): + """Posterize images (reduce the number of bits for each color channel). + + Args: + bits (int | float): Number of bits for each pixel in the output img, + which should be less or equal to 8. + prob (float): The probability for posterizing therefore should be in + range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, bits, prob=0.5): + assert bits <= 8, f'The bits must be less than 8, got {bits} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + # To align timm version, we need to round up to integer here. + self.bits = ceil(bits) + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_posterized = mmcv.posterize(img, bits=self.bits) + results[key] = img_posterized.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(bits={self.bits}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Contrast(object): + """Adjust images contrast. + + Args: + magnitude (int | float): The magnitude used for adjusting contrast. A + positive magnitude would enhance the contrast and a negative + magnitude would make the image grayer. A magnitude=0 gives the + origin img. + prob (float): The probability for performing contrast adjusting + therefore should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.prob = prob + self.random_negative_prob = random_negative_prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + magnitude = random_negative(self.magnitude, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + img_contrasted = mmcv.adjust_contrast(img, factor=1 + magnitude) + results[key] = img_contrasted.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob})' + return repr_str + + +@PIPELINES.register_module() +class ColorTransform(object): + """Adjust images color balance. + + Args: + magnitude (int | float): The magnitude used for color transform. A + positive magnitude would enhance the color and a negative magnitude + would make the image grayer. A magnitude=0 gives the origin img. + prob (float): The probability for performing ColorTransform therefore + should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.prob = prob + self.random_negative_prob = random_negative_prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + magnitude = random_negative(self.magnitude, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + img_color_adjusted = mmcv.adjust_color(img, alpha=1 + magnitude) + results[key] = img_color_adjusted.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob})' + return repr_str + + +@PIPELINES.register_module() +class Brightness(object): + """Adjust images brightness. + + Args: + magnitude (int | float): The magnitude used for adjusting brightness. A + positive magnitude would enhance the brightness and a negative + magnitude would make the image darker. A magnitude=0 gives the + origin img. + prob (float): The probability for performing contrast adjusting + therefore should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.prob = prob + self.random_negative_prob = random_negative_prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + magnitude = random_negative(self.magnitude, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + img_brightened = mmcv.adjust_brightness(img, factor=1 + magnitude) + results[key] = img_brightened.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob})' + return repr_str + + +@PIPELINES.register_module() +class Sharpness(object): + """Adjust images sharpness. + + Args: + magnitude (int | float): The magnitude used for adjusting sharpness. A + positive magnitude would enhance the sharpness and a negative + magnitude would make the image bulr. A magnitude=0 gives the + origin img. + prob (float): The probability for performing contrast adjusting + therefore should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.prob = prob + self.random_negative_prob = random_negative_prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + magnitude = random_negative(self.magnitude, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + img_sharpened = mmcv.adjust_sharpness(img, factor=1 + magnitude) + results[key] = img_sharpened.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob})' + return repr_str + + +@PIPELINES.register_module() +class Cutout(object): + """Cutout images. + + Args: + shape (int | float | tuple(int | float)): Expected cutout shape (h, w). + If given as a single value, the value will be used for + both h and w. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If it is a sequence, it must have the same length with the image + channels. Defaults to 128. + prob (float): The probability for performing cutout therefore should + be in range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, shape, pad_val=128, prob=0.5): + if isinstance(shape, float): + shape = int(shape) + elif isinstance(shape, tuple): + shape = tuple(int(i) for i in shape) + elif not isinstance(shape, int): + raise TypeError( + 'shape must be of ' + f'type int, float or tuple, got {type(shape)} instead') + if isinstance(pad_val, int): + pad_val = tuple([pad_val] * 3) + elif isinstance(pad_val, Sequence): + assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \ + f'elements, got {len(pad_val)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.shape = shape + self.pad_val = tuple(pad_val) + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_cutout = mmcv.cutout(img, self.shape, pad_val=self.pad_val) + results[key] = img_cutout.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(shape={self.shape}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob})' + return repr_str diff --git a/mmcls/datasets/pipelines/compose.py b/mmcls/datasets/pipelines/compose.py new file mode 100644 index 0000000..012d2b6 --- /dev/null +++ b/mmcls/datasets/pipelines/compose.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections.abc import Sequence + +from mmcv.utils import build_from_cfg + +from ..builder import PIPELINES + + +@PIPELINES.register_module() +class Compose(object): + """Compose a data pipeline with a sequence of transforms. + + Args: + transforms (list[dict | callable]): + Either config dicts of transforms or transform objects. + """ + + def __init__(self, transforms): + assert isinstance(transforms, Sequence) + self.transforms = [] + for transform in transforms: + if isinstance(transform, dict): + transform = build_from_cfg(transform, PIPELINES) + self.transforms.append(transform) + elif callable(transform): + self.transforms.append(transform) + else: + raise TypeError('transform must be callable or a dict, but got' + f' {type(transform)}') + + def __call__(self, data): + for t in self.transforms: + data = t(data) + if data is None: + return None + return data + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += f'\n {t}' + format_string += '\n)' + return format_string diff --git a/mmcls/datasets/pipelines/formatting.py b/mmcls/datasets/pipelines/formatting.py new file mode 100644 index 0000000..eeb1650 --- /dev/null +++ b/mmcls/datasets/pipelines/formatting.py @@ -0,0 +1,195 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections.abc import Sequence + +import mmcv +import numpy as np +import torch +from mmcv.parallel import DataContainer as DC +from PIL import Image + +from ..builder import PIPELINES + + +def to_tensor(data): + """Convert objects of various python types to :obj:`torch.Tensor`. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`, :class:`int` and :class:`float`. + """ + if isinstance(data, torch.Tensor): + return data + elif isinstance(data, np.ndarray): + return torch.from_numpy(data) + elif isinstance(data, Sequence) and not mmcv.is_str(data): + return torch.tensor(data) + elif isinstance(data, int): + return torch.LongTensor([data]) + elif isinstance(data, float): + return torch.FloatTensor([data]) + else: + raise TypeError( + f'Type {type(data)} cannot be converted to tensor.' + 'Supported types are: `numpy.ndarray`, `torch.Tensor`, ' + '`Sequence`, `int` and `float`') + + +@PIPELINES.register_module() +class ToTensor(object): + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + for key in self.keys: + results[key] = to_tensor(results[key]) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(keys={self.keys})' + + +@PIPELINES.register_module() +class ImageToTensor(object): + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + for key in self.keys: + img = results[key] + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + results[key] = to_tensor(img.transpose(2, 0, 1)) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(keys={self.keys})' + + +@PIPELINES.register_module() +class Transpose(object): + + def __init__(self, keys, order): + self.keys = keys + self.order = order + + def __call__(self, results): + for key in self.keys: + results[key] = results[key].transpose(self.order) + return results + + def __repr__(self): + return self.__class__.__name__ + \ + f'(keys={self.keys}, order={self.order})' + + +@PIPELINES.register_module() +class ToPIL(object): + + def __init__(self): + pass + + def __call__(self, results): + results['img'] = Image.fromarray(results['img']) + return results + + +@PIPELINES.register_module() +class ToNumpy(object): + + def __init__(self): + pass + + def __call__(self, results): + results['img'] = np.array(results['img'], dtype=np.float32) + return results + + +@PIPELINES.register_module() +class Collect(object): + """Collect data from the loader relevant to the specific task. + + This is usually the last stage of the data loader pipeline. Typically keys + is set to some subset of "img" and "gt_label". + + Args: + keys (Sequence[str]): Keys of results to be collected in ``data``. + meta_keys (Sequence[str], optional): Meta keys to be converted to + ``mmcv.DataContainer`` and collected in ``data[img_metas]``. + Default: ('filename', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'img_norm_cfg') + + Returns: + dict: The result dict contains the following keys + + - keys in ``self.keys`` + - ``img_metas`` if available + """ + + def __init__(self, + keys, + meta_keys=('filename', 'ori_filename', 'ori_shape', + 'img_shape', 'flip', 'flip_direction', + 'img_norm_cfg')): + self.keys = keys + self.meta_keys = meta_keys + + def __call__(self, results): + data = {} + img_meta = {} + for key in self.meta_keys: + if key in results: + img_meta[key] = results[key] + data['img_metas'] = DC(img_meta, cpu_only=True) + for key in self.keys: + data[key] = results[key] + return data + + def __repr__(self): + return self.__class__.__name__ + \ + f'(keys={self.keys}, meta_keys={self.meta_keys})' + + +@PIPELINES.register_module() +class WrapFieldsToLists(object): + """Wrap fields of the data dictionary into lists for evaluation. + + This class can be used as a last step of a test or validation + pipeline for single image evaluation or inference. + + Example: + >>> test_pipeline = [ + >>> dict(type='LoadImageFromFile'), + >>> dict(type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + >>> dict(type='ImageToTensor', keys=['img']), + >>> dict(type='Collect', keys=['img']), + >>> dict(type='WrapIntoLists') + >>> ] + """ + + def __call__(self, results): + # Wrap dict fields into lists + for key, val in results.items(): + results[key] = [val] + return results + + def __repr__(self): + return f'{self.__class__.__name__}()' + + +@PIPELINES.register_module() +class ToHalf(object): + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + for k in self.keys: + if isinstance(results[k], torch.Tensor): + results[k] = results[k].to(torch.half) + else: + results[k] = results[k].astype(np.float16) + return results diff --git a/mmcls/datasets/pipelines/loading.py b/mmcls/datasets/pipelines/loading.py new file mode 100644 index 0000000..887de01 --- /dev/null +++ b/mmcls/datasets/pipelines/loading.py @@ -0,0 +1,138 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import mmcv +import numpy as np + +from ..builder import PIPELINES + + +@PIPELINES.register_module() +class LoadImageFromFile(object): + """Load an image from file. + + Required keys are "img_prefix" and "img_info" (a dict that must contain the + key "filename"). Added or updated keys are "filename", "img", "img_shape", + "ori_shape" (same as `img_shape`) and "img_norm_cfg" (means=0 and stds=1). + + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + color_type (str): The flag argument for :func:`mmcv.imfrombytes()`. + Defaults to 'color'. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__(self, + to_float32=False, + color_type='color', + file_client_args=dict(backend='disk')): + self.to_float32 = to_float32 + self.color_type = color_type + self.file_client_args = file_client_args.copy() + self.file_client = None + + def __call__(self, results): + if self.file_client is None: + self.file_client = mmcv.FileClient(**self.file_client_args) + + if results['img_prefix'] is not None: + filename = osp.join(results['img_prefix'], + results['img_info']['filename']) + else: + filename = results['img_info']['filename'] + + img_bytes = self.file_client.get(filename) + img = mmcv.imfrombytes(img_bytes, flag=self.color_type) + if self.to_float32: + img = img.astype(np.float32) + + results['filename'] = filename + results['ori_filename'] = results['img_info']['filename'] + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results['img_norm_cfg'] = dict( + mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False) + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'to_float32={self.to_float32}, ' + f"color_type='{self.color_type}', " + f'file_client_args={self.file_client_args})') + return repr_str + + +@PIPELINES.register_module() +class LoadImageFromFileLMDB(object): + """Load an image from file. + + Required keys are "img_prefix" and "img_info" (a dict that must contain the + key "filename"). Added or updated keys are "filename", "img", "img_shape", + "ori_shape" (same as `img_shape`) and "img_norm_cfg" (means=0 and stds=1). + + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + color_type (str): The flag argument for :func:`mmcv.imfrombytes()`. + Defaults to 'color'. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__(self, + to_float32=False, + color_type='color', + file_client_args=dict(backend='disk')): + self.to_float32 = to_float32 + self.color_type = color_type + self.file_client_args = file_client_args.copy() + self.file_client = None + + + def __call__(self, results): + if self.file_client is None: + self.file_client = mmcv.FileClient(**self.file_client_args) + # assert isinstance(self.file_client.backend, LmdbBackend) + + if results['img_prefix'] is not None: + filename = osp.join(results['img_prefix'], + results['img_info']['filename']) + else: + filename = results['img_info']['filename'] + + _filename = osp.split(filename)[-1] + + img_bytes = self.file_client.get(_filename) + img = mmcv.imfrombytes(img_bytes, flag=self.color_type) + if self.to_float32: + img = img.astype(np.float32) + + results['filename'] = filename + results['ori_filename'] = results['img_info']['filename'] + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results['img_norm_cfg'] = dict( + mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False) + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'to_float32={self.to_float32}, ' + f"color_type='{self.color_type}', " + f'file_client_args={self.file_client_args})') + return repr_str + diff --git a/mmcls/datasets/pipelines/transforms.py b/mmcls/datasets/pipelines/transforms.py new file mode 100644 index 0000000..a56ce3c --- /dev/null +++ b/mmcls/datasets/pipelines/transforms.py @@ -0,0 +1,1146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import inspect +import math +import random +from numbers import Number +from typing import Sequence + +import mmcv +import numpy as np + +from ..builder import PIPELINES +from .compose import Compose + +try: + import albumentations +except ImportError: + albumentations = None + + +@PIPELINES.register_module() +class RandomCrop(object): + """Crop the given Image at a random location. + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. + padding (int or sequence, optional): Optional padding on each border + of the image. If a sequence of length 4 is provided, it is used to + pad left, top, right, bottom borders respectively. If a sequence + of length 2 is provided, it is used to pad left/right, top/bottom + borders, respectively. Default: None, which means no padding. + pad_if_needed (boolean): It will pad the image if smaller than the + desired size to avoid raising an exception. Since cropping is done + after padding, the padding seems to be done at a random offset. + Default: False. + pad_val (Number | Sequence[Number]): Pixel pad_val value for constant + fill. If a tuple of length 3, it is used to pad_val R, G, B + channels respectively. Default: 0. + padding_mode (str): Type of padding. Defaults to "constant". Should + be one of the following: + + - constant: Pads with a constant value, this value is specified \ + with pad_val. + - edge: pads with the last value at the edge of the image. + - reflect: Pads with reflection of image without repeating the \ + last value on the edge. For example, padding [1, 2, 3, 4] \ + with 2 elements on both sides in reflect mode will result \ + in [3, 2, 1, 2, 3, 4, 3, 2]. + - symmetric: Pads with reflection of image repeating the last \ + value on the edge. For example, padding [1, 2, 3, 4] with \ + 2 elements on both sides in symmetric mode will result in \ + [2, 1, 1, 2, 3, 4, 4, 3]. + """ + + def __init__(self, + size, + padding=None, + pad_if_needed=False, + pad_val=0, + padding_mode='constant'): + if isinstance(size, (tuple, list)): + self.size = size + else: + self.size = (size, size) + # check padding mode + assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'] + self.padding = padding + self.pad_if_needed = pad_if_needed + self.pad_val = pad_val + self.padding_mode = padding_mode + + @staticmethod + def get_params(img, output_size): + """Get parameters for ``crop`` for a random crop. + + Args: + img (ndarray): Image to be cropped. + output_size (tuple): Expected output size of the crop. + + Returns: + tuple: Params (xmin, ymin, target_height, target_width) to be + passed to ``crop`` for random crop. + """ + height = img.shape[0] + width = img.shape[1] + target_height, target_width = output_size + if width == target_width and height == target_height: + return 0, 0, height, width + + ymin = random.randint(0, height - target_height) + xmin = random.randint(0, width - target_width) + return ymin, xmin, target_height, target_width + + def __call__(self, results): + """ + Args: + img (ndarray): Image to be cropped. + """ + for key in results.get('img_fields', ['img']): + img = results[key] + if self.padding is not None: + img = mmcv.impad( + img, padding=self.padding, pad_val=self.pad_val) + + # pad the height if needed + if self.pad_if_needed and img.shape[0] < self.size[0]: + img = mmcv.impad( + img, + padding=(0, self.size[0] - img.shape[0], 0, + self.size[0] - img.shape[0]), + pad_val=self.pad_val, + padding_mode=self.padding_mode) + + # pad the width if needed + if self.pad_if_needed and img.shape[1] < self.size[1]: + img = mmcv.impad( + img, + padding=(self.size[1] - img.shape[1], 0, + self.size[1] - img.shape[1], 0), + pad_val=self.pad_val, + padding_mode=self.padding_mode) + + ymin, xmin, height, width = self.get_params(img, self.size) + results[key] = mmcv.imcrop( + img, + np.array([ + xmin, + ymin, + xmin + width - 1, + ymin + height - 1, + ])) + return results + + def __repr__(self): + return (self.__class__.__name__ + + f'(size={self.size}, padding={self.padding})') + + +@PIPELINES.register_module() +class RandomResizedCrop(object): + """Crop the given image to random size and aspect ratio. + + A crop of random size (default: of 0.08 to 1.0) of the original size and a + random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio + is made. This crop is finally resized to given size. + + Args: + size (sequence | int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. + scale (tuple): Range of the random size of the cropped image compared + to the original image. Defaults to (0.08, 1.0). + ratio (tuple): Range of the random aspect ratio of the cropped image + compared to the original image. Defaults to (3. / 4., 4. / 3.). + max_attempts (int): Maximum number of attempts before falling back to + Central Crop. Defaults to 10. + efficientnet_style (bool): Whether to use efficientnet style Random + ResizedCrop. Defaults to False. + min_covered (Number): Minimum ratio of the cropped area to the original + area. Only valid if efficientnet_style is true. Defaults to 0.1. + crop_padding (int): The crop padding parameter in efficientnet style + center crop. Only valid if efficientnet_style is true. + Defaults to 32. + interpolation (str): Interpolation method, accepted values are + 'nearest', 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to + 'bilinear'. + backend (str): The image resize backend type, accepted values are + `cv2` and `pillow`. Defaults to `cv2`. + """ + + def __init__(self, + size, + scale=(0.08, 1.0), + ratio=(3. / 4., 4. / 3.), + max_attempts=10, + efficientnet_style=False, + min_covered=0.1, + crop_padding=32, + interpolation='bilinear', + backend='cv2'): + if efficientnet_style: + assert isinstance(size, int) + self.size = (size, size) + assert crop_padding >= 0 + else: + if isinstance(size, (tuple, list)): + self.size = size + else: + self.size = (size, size) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + raise ValueError('range should be of kind (min, max). ' + f'But received scale {scale} and rato {ratio}.') + assert min_covered >= 0, 'min_covered should be no less than 0.' + assert isinstance(max_attempts, int) and max_attempts >= 0, \ + 'max_attempts mush be int and no less than 0.' + assert interpolation in ('nearest', 'bilinear', 'bicubic', 'area', + 'lanczos') + if backend not in ['cv2', 'pillow']: + raise ValueError(f'backend: {backend} is not supported for resize.' + 'Supported backends are "cv2", "pillow"') + + self.scale = scale + self.ratio = ratio + self.max_attempts = max_attempts + self.efficientnet_style = efficientnet_style + self.min_covered = min_covered + self.crop_padding = crop_padding + self.interpolation = interpolation + self.backend = backend + + @staticmethod + def get_params(img, scale, ratio, max_attempts=10): + """Get parameters for ``crop`` for a random sized crop. + + Args: + img (ndarray): Image to be cropped. + scale (tuple): Range of the random size of the cropped image + compared to the original image size. + ratio (tuple): Range of the random aspect ratio of the cropped + image compared to the original image area. + max_attempts (int): Maximum number of attempts before falling back + to central crop. Defaults to 10. + + Returns: + tuple: Params (ymin, xmin, ymax, xmax) to be passed to `crop` for + a random sized crop. + """ + height = img.shape[0] + width = img.shape[1] + area = height * width + + for _ in range(max_attempts): + target_area = random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + + target_width = int(round(math.sqrt(target_area * aspect_ratio))) + target_height = int(round(math.sqrt(target_area / aspect_ratio))) + + if 0 < target_width <= width and 0 < target_height <= height: + ymin = random.randint(0, height - target_height) + xmin = random.randint(0, width - target_width) + ymax = ymin + target_height - 1 + xmax = xmin + target_width - 1 + return ymin, xmin, ymax, xmax + + # Fallback to central crop + in_ratio = float(width) / float(height) + if in_ratio < min(ratio): + target_width = width + target_height = int(round(target_width / min(ratio))) + elif in_ratio > max(ratio): + target_height = height + target_width = int(round(target_height * max(ratio))) + else: # whole image + target_width = width + target_height = height + ymin = (height - target_height) // 2 + xmin = (width - target_width) // 2 + ymax = ymin + target_height - 1 + xmax = xmin + target_width - 1 + return ymin, xmin, ymax, xmax + + # https://github.com/kakaobrain/fast-autoaugment/blob/master/FastAutoAugment/data.py # noqa + @staticmethod + def get_params_efficientnet_style(img, + size, + scale, + ratio, + max_attempts=10, + min_covered=0.1, + crop_padding=32): + """Get parameters for ``crop`` for a random sized crop in efficientnet + style. + + Args: + img (ndarray): Image to be cropped. + size (sequence): Desired output size of the crop. + scale (tuple): Range of the random size of the cropped image + compared to the original image size. + ratio (tuple): Range of the random aspect ratio of the cropped + image compared to the original image area. + max_attempts (int): Maximum number of attempts before falling back + to central crop. Defaults to 10. + min_covered (Number): Minimum ratio of the cropped area to the + original area. Only valid if efficientnet_style is true. + Defaults to 0.1. + crop_padding (int): The crop padding parameter in efficientnet + style center crop. Defaults to 32. + + Returns: + tuple: Params (ymin, xmin, ymax, xmax) to be passed to `crop` for + a random sized crop. + """ + height, width = img.shape[:2] + area = height * width + min_target_area = scale[0] * area + max_target_area = scale[1] * area + + for _ in range(max_attempts): + aspect_ratio = random.uniform(*ratio) + min_target_height = int( + round(math.sqrt(min_target_area / aspect_ratio))) + max_target_height = int( + round(math.sqrt(max_target_area / aspect_ratio))) + + if max_target_height * aspect_ratio > width: + max_target_height = int((width + 0.5 - 1e-7) / aspect_ratio) + if max_target_height * aspect_ratio > width: + max_target_height -= 1 + + max_target_height = min(max_target_height, height) + min_target_height = min(max_target_height, min_target_height) + + # slightly differs from tf implementation + target_height = int( + round(random.uniform(min_target_height, max_target_height))) + target_width = int(round(target_height * aspect_ratio)) + target_area = target_height * target_width + + # slight differs from tf. In tf, if target_area > max_target_area, + # area will be recalculated + if (target_area < min_target_area or target_area > max_target_area + or target_width > width or target_height > height + or target_area < min_covered * area): + continue + + ymin = random.randint(0, height - target_height) + xmin = random.randint(0, width - target_width) + ymax = ymin + target_height - 1 + xmax = xmin + target_width - 1 + + return ymin, xmin, ymax, xmax + + # Fallback to central crop + img_short = min(height, width) + crop_size = size[0] / (size[0] + crop_padding) * img_short + + ymin = max(0, int(round((height - crop_size) / 2.))) + xmin = max(0, int(round((width - crop_size) / 2.))) + ymax = min(height, ymin + crop_size) - 1 + xmax = min(width, xmin + crop_size) - 1 + + return ymin, xmin, ymax, xmax + + def __call__(self, results): + for key in results.get('img_fields', ['img']): + img = results[key] + if self.efficientnet_style: + get_params_func = self.get_params_efficientnet_style + get_params_args = dict( + img=img, + size=self.size, + scale=self.scale, + ratio=self.ratio, + max_attempts=self.max_attempts, + min_covered=self.min_covered, + crop_padding=self.crop_padding) + else: + get_params_func = self.get_params + get_params_args = dict( + img=img, + scale=self.scale, + ratio=self.ratio, + max_attempts=self.max_attempts) + ymin, xmin, ymax, xmax = get_params_func(**get_params_args) + img = mmcv.imcrop(img, bboxes=np.array([xmin, ymin, xmax, ymax])) + results[key] = mmcv.imresize( + img, + tuple(self.size[::-1]), + interpolation=self.interpolation, + backend=self.backend) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + f'(size={self.size}' + repr_str += f', scale={tuple(round(s, 4) for s in self.scale)}' + repr_str += f', ratio={tuple(round(r, 4) for r in self.ratio)}' + repr_str += f', max_attempts={self.max_attempts}' + repr_str += f', efficientnet_style={self.efficientnet_style}' + repr_str += f', min_covered={self.min_covered}' + repr_str += f', crop_padding={self.crop_padding}' + repr_str += f', interpolation={self.interpolation}' + repr_str += f', backend={self.backend})' + return repr_str + + +@PIPELINES.register_module() +class RandomGrayscale(object): + """Randomly convert image to grayscale with a probability of gray_prob. + + Args: + gray_prob (float): Probability that image should be converted to + grayscale. Default: 0.1. + + Returns: + ndarray: Image after randomly grayscale transform. + + Notes: + - If input image is 1 channel: grayscale version is 1 channel. + - If input image is 3 channel: grayscale version is 3 channel + with r == g == b. + """ + + def __init__(self, gray_prob=0.1): + self.gray_prob = gray_prob + + def __call__(self, results): + """ + Args: + img (ndarray): Image to be converted to grayscale. + + Returns: + ndarray: Randomly grayscaled image. + """ + for key in results.get('img_fields', ['img']): + img = results[key] + num_output_channels = img.shape[2] + if random.random() < self.gray_prob: + if num_output_channels > 1: + img = mmcv.rgb2gray(img)[:, :, None] + results[key] = np.dstack( + [img for _ in range(num_output_channels)]) + return results + results[key] = img + return results + + def __repr__(self): + return self.__class__.__name__ + f'(gray_prob={self.gray_prob})' + + +@PIPELINES.register_module() +class RandomFlip(object): + """Flip the image randomly. + + Flip the image randomly based on flip probaility and flip direction. + + Args: + flip_prob (float): probability of the image being flipped. Default: 0.5 + direction (str): The flipping direction. Options are + 'horizontal' and 'vertical'. Default: 'horizontal'. + """ + + def __init__(self, flip_prob=0.5, direction='horizontal'): + assert 0 <= flip_prob <= 1 + assert direction in ['horizontal', 'vertical'] + self.flip_prob = flip_prob + self.direction = direction + + def __call__(self, results): + """Call function to flip image. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Flipped results, 'flip', 'flip_direction' keys are added into + result dict. + """ + flip = True if np.random.rand() < self.flip_prob else False + results['flip'] = flip + results['flip_direction'] = self.direction + if results['flip']: + # flip image + for key in results.get('img_fields', ['img']): + results[key] = mmcv.imflip( + results[key], direction=results['flip_direction']) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(flip_prob={self.flip_prob})' + + +@PIPELINES.register_module() +class RandomErasing(object): + """Randomly selects a rectangle region in an image and erase pixels. + + Args: + erase_prob (float): Probability that image will be randomly erased. + Default: 0.5 + min_area_ratio (float): Minimum erased area / input image area + Default: 0.02 + max_area_ratio (float): Maximum erased area / input image area + Default: 0.4 + aspect_range (sequence | float): Aspect ratio range of erased area. + if float, it will be converted to (aspect_ratio, 1/aspect_ratio) + Default: (3/10, 10/3) + mode (str): Fill method in erased area, can be: + + - const (default): All pixels are assign with the same value. + - rand: each pixel is assigned with a random value in [0, 255] + + fill_color (sequence | Number): Base color filled in erased area. + Defaults to (128, 128, 128). + fill_std (sequence | Number, optional): If set and ``mode`` is 'rand', + fill erased area with random color from normal distribution + (mean=fill_color, std=fill_std); If not set, fill erased area with + random color from uniform distribution (0~255). Defaults to None. + + Note: + See `Random Erasing Data Augmentation + `_ + + This paper provided 4 modes: RE-R, RE-M, RE-0, RE-255, and use RE-M as + default. The config of these 4 modes are: + + - RE-R: RandomErasing(mode='rand') + - RE-M: RandomErasing(mode='const', fill_color=(123.67, 116.3, 103.5)) + - RE-0: RandomErasing(mode='const', fill_color=0) + - RE-255: RandomErasing(mode='const', fill_color=255) + """ + + def __init__(self, + erase_prob=0.5, + min_area_ratio=0.02, + max_area_ratio=0.4, + aspect_range=(3 / 10, 10 / 3), + mode='const', + fill_color=(128, 128, 128), + fill_std=None): + assert isinstance(erase_prob, float) and 0. <= erase_prob <= 1. + assert isinstance(min_area_ratio, float) and 0. <= min_area_ratio <= 1. + assert isinstance(max_area_ratio, float) and 0. <= max_area_ratio <= 1. + assert min_area_ratio <= max_area_ratio, \ + 'min_area_ratio should be smaller than max_area_ratio' + if isinstance(aspect_range, float): + aspect_range = min(aspect_range, 1 / aspect_range) + aspect_range = (aspect_range, 1 / aspect_range) + assert isinstance(aspect_range, Sequence) and len(aspect_range) == 2 \ + and all(isinstance(x, float) for x in aspect_range), \ + 'aspect_range should be a float or Sequence with two float.' + assert all(x > 0 for x in aspect_range), \ + 'aspect_range should be positive.' + assert aspect_range[0] <= aspect_range[1], \ + 'In aspect_range (min, max), min should be smaller than max.' + assert mode in ['const', 'rand'] + if isinstance(fill_color, Number): + fill_color = [fill_color] * 3 + assert isinstance(fill_color, Sequence) and len(fill_color) == 3 \ + and all(isinstance(x, Number) for x in fill_color), \ + 'fill_color should be a float or Sequence with three int.' + if fill_std is not None: + if isinstance(fill_std, Number): + fill_std = [fill_std] * 3 + assert isinstance(fill_std, Sequence) and len(fill_std) == 3 \ + and all(isinstance(x, Number) for x in fill_std), \ + 'fill_std should be a float or Sequence with three int.' + + self.erase_prob = erase_prob + self.min_area_ratio = min_area_ratio + self.max_area_ratio = max_area_ratio + self.aspect_range = aspect_range + self.mode = mode + self.fill_color = fill_color + self.fill_std = fill_std + + def _fill_pixels(self, img, top, left, h, w): + if self.mode == 'const': + patch = np.empty((h, w, 3), dtype=np.uint8) + patch[:, :] = np.array(self.fill_color, dtype=np.uint8) + elif self.fill_std is None: + # Uniform distribution + patch = np.random.uniform(0, 256, (h, w, 3)).astype(np.uint8) + else: + # Normal distribution + patch = np.random.normal(self.fill_color, self.fill_std, (h, w, 3)) + patch = np.clip(patch.astype(np.int32), 0, 255).astype(np.uint8) + + img[top:top + h, left:left + w] = patch + return img + + def __call__(self, results): + """ + Args: + results (dict): Results dict from pipeline + + Returns: + dict: Results after the transformation. + """ + for key in results.get('img_fields', ['img']): + if np.random.rand() > self.erase_prob: + continue + img = results[key] + img_h, img_w = img.shape[:2] + + # convert to log aspect to ensure equal probability of aspect ratio + log_aspect_range = np.log( + np.array(self.aspect_range, dtype=np.float32)) + aspect_ratio = np.exp(np.random.uniform(*log_aspect_range)) + area = img_h * img_w + area *= np.random.uniform(self.min_area_ratio, self.max_area_ratio) + + h = min(int(round(np.sqrt(area * aspect_ratio))), img_h) + w = min(int(round(np.sqrt(area / aspect_ratio))), img_w) + top = np.random.randint(0, img_h - h) if img_h > h else 0 + left = np.random.randint(0, img_w - w) if img_w > w else 0 + img = self._fill_pixels(img, top, left, h, w) + + results[key] = img + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(erase_prob={self.erase_prob}, ' + repr_str += f'min_area_ratio={self.min_area_ratio}, ' + repr_str += f'max_area_ratio={self.max_area_ratio}, ' + repr_str += f'aspect_range={self.aspect_range}, ' + repr_str += f'mode={self.mode}, ' + repr_str += f'fill_color={self.fill_color}, ' + repr_str += f'fill_std={self.fill_std})' + return repr_str + + +@PIPELINES.register_module() +class Pad(object): + """Pad images. + + Args: + size (tuple[int] | None): Expected padding size (h, w). Conflicts with + pad_to_square. Defaults to None. + pad_to_square (bool): Pad any image to square shape. Defaults to False. + pad_val (Number | Sequence[Number]): Values to be filled in padding + areas when padding_mode is 'constant'. Default to 0. + padding_mode (str): Type of padding. Should be: constant, edge, + reflect or symmetric. Default to "constant". + """ + + def __init__(self, + size=None, + pad_to_square=False, + pad_val=0, + padding_mode='constant'): + assert (size is None) ^ (pad_to_square is False), \ + 'Only one of [size, pad_to_square] should be given, ' \ + f'but get {(size is not None) + (pad_to_square is not False)}' + self.size = size + self.pad_to_square = pad_to_square + self.pad_val = pad_val + self.padding_mode = padding_mode + + def __call__(self, results): + for key in results.get('img_fields', ['img']): + img = results[key] + if self.pad_to_square: + target_size = tuple( + max(img.shape[0], img.shape[1]) for _ in range(2)) + else: + target_size = self.size + img = mmcv.impad( + img, + shape=target_size, + pad_val=self.pad_val, + padding_mode=self.padding_mode) + results[key] = img + results['img_shape'] = img.shape + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(size={self.size}, ' + repr_str += f'(pad_val={self.pad_val}, ' + repr_str += f'padding_mode={self.padding_mode})' + return repr_str + + +@PIPELINES.register_module() +class Resize(object): + """Resize images. + + Args: + size (int | tuple): Images scales for resizing (h, w). + When size is int, the default behavior is to resize an image + to (size, size). When size is tuple and the second value is -1, + the image will be resized according to adaptive_side. For example, + when size is 224, the image is resized to 224x224. When size is + (224, -1) and adaptive_size is "short", the short side is resized + to 224 and the other side is computed based on the short side, + maintaining the aspect ratio. + interpolation (str): Interpolation method. For "cv2" backend, accepted + values are "nearest", "bilinear", "bicubic", "area", "lanczos". For + "pillow" backend, accepted values are "nearest", "bilinear", + "bicubic", "box", "lanczos", "hamming". + More details can be found in `mmcv.image.geometric`. + adaptive_side(str): Adaptive resize policy, accepted values are + "short", "long", "height", "width". Default to "short". + backend (str): The image resize backend type, accepted values are + `cv2` and `pillow`. Default: `cv2`. + """ + + def __init__(self, + size, + interpolation='bilinear', + adaptive_side='short', + backend='cv2'): + assert isinstance(size, int) or (isinstance(size, tuple) + and len(size) == 2) + assert adaptive_side in {'short', 'long', 'height', 'width'} + + self.adaptive_side = adaptive_side + self.adaptive_resize = False + if isinstance(size, int): + assert size > 0 + size = (size, size) + else: + assert size[0] > 0 and (size[1] > 0 or size[1] == -1) + if size[1] == -1: + self.adaptive_resize = True + if backend not in ['cv2', 'pillow']: + raise ValueError(f'backend: {backend} is not supported for resize.' + 'Supported backends are "cv2", "pillow"') + if backend == 'cv2': + assert interpolation in ('nearest', 'bilinear', 'bicubic', 'area', + 'lanczos') + else: + assert interpolation in ('nearest', 'bilinear', 'bicubic', 'box', + 'lanczos', 'hamming') + self.size = size + self.interpolation = interpolation + self.backend = backend + + def _resize_img(self, results): + for key in results.get('img_fields', ['img']): + img = results[key] + ignore_resize = False + if self.adaptive_resize: + h, w = img.shape[:2] + target_size = self.size[0] + + condition_ignore_resize = { + 'short': min(h, w) == target_size, + 'long': max(h, w) == target_size, + 'height': h == target_size, + 'width': w == target_size + } + + if condition_ignore_resize[self.adaptive_side]: + ignore_resize = True + elif any([ + self.adaptive_side == 'short' and w < h, + self.adaptive_side == 'long' and w > h, + self.adaptive_side == 'width', + ]): + width = target_size + height = int(target_size * h / w) + else: + height = target_size + width = int(target_size * w / h) + else: + height, width = self.size + if not ignore_resize: + img = mmcv.imresize( + img, + size=(width, height), + interpolation=self.interpolation, + return_scale=False, + backend=self.backend) + results[key] = img + results['img_shape'] = img.shape + + def __call__(self, results): + self._resize_img(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(size={self.size}, ' + repr_str += f'interpolation={self.interpolation})' + return repr_str + + +@PIPELINES.register_module() +class CenterCrop(object): + r"""Center crop the image. + + Args: + crop_size (int | tuple): Expected size after cropping with the format + of (h, w). + efficientnet_style (bool): Whether to use efficientnet style center + crop. Defaults to False. + crop_padding (int): The crop padding parameter in efficientnet style + center crop. Only valid if efficientnet style is True. Defaults to + 32. + interpolation (str): Interpolation method, accepted values are + 'nearest', 'bilinear', 'bicubic', 'area', 'lanczos'. Only valid if + ``efficientnet_style`` is True. Defaults to 'bilinear'. + backend (str): The image resize backend type, accepted values are + `cv2` and `pillow`. Only valid if efficientnet style is True. + Defaults to `cv2`. + + + Notes: + - If the image is smaller than the crop size, return the original + image. + - If efficientnet_style is set to False, the pipeline would be a simple + center crop using the crop_size. + - If efficientnet_style is set to True, the pipeline will be to first + to perform the center crop with the ``crop_size_`` as: + + .. math:: + \text{crop_size_} = \frac{\text{crop_size}}{\text{crop_size} + + \text{crop_padding}} \times \text{short_edge} + + And then the pipeline resizes the img to the input crop size. + """ + + def __init__(self, + crop_size, + efficientnet_style=False, + crop_padding=32, + interpolation='bilinear', + backend='cv2'): + if efficientnet_style: + assert isinstance(crop_size, int) + assert crop_padding >= 0 + assert interpolation in ('nearest', 'bilinear', 'bicubic', 'area', + 'lanczos') + if backend not in ['cv2', 'pillow']: + raise ValueError( + f'backend: {backend} is not supported for ' + 'resize. Supported backends are "cv2", "pillow"') + else: + assert isinstance(crop_size, int) or (isinstance(crop_size, tuple) + and len(crop_size) == 2) + if isinstance(crop_size, int): + crop_size = (crop_size, crop_size) + assert crop_size[0] > 0 and crop_size[1] > 0 + self.crop_size = crop_size + self.efficientnet_style = efficientnet_style + self.crop_padding = crop_padding + self.interpolation = interpolation + self.backend = backend + + def __call__(self, results): + crop_height, crop_width = self.crop_size[0], self.crop_size[1] + for key in results.get('img_fields', ['img']): + img = results[key] + # img.shape has length 2 for grayscale, length 3 for color + img_height, img_width = img.shape[:2] + + # https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/preprocessing.py#L118 # noqa + if self.efficientnet_style: + img_short = min(img_height, img_width) + crop_height = crop_height / (crop_height + + self.crop_padding) * img_short + crop_width = crop_width / (crop_width + + self.crop_padding) * img_short + + y1 = max(0, int(round((img_height - crop_height) / 2.))) + x1 = max(0, int(round((img_width - crop_width) / 2.))) + y2 = min(img_height, y1 + crop_height) - 1 + x2 = min(img_width, x1 + crop_width) - 1 + + # crop the image + img = mmcv.imcrop(img, bboxes=np.array([x1, y1, x2, y2])) + + if self.efficientnet_style: + img = mmcv.imresize( + img, + tuple(self.crop_size[::-1]), + interpolation=self.interpolation, + backend=self.backend) + img_shape = img.shape + results[key] = img + results['img_shape'] = img_shape + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + f'(crop_size={self.crop_size}' + repr_str += f', efficientnet_style={self.efficientnet_style}' + repr_str += f', crop_padding={self.crop_padding}' + repr_str += f', interpolation={self.interpolation}' + repr_str += f', backend={self.backend})' + return repr_str + + +@PIPELINES.register_module() +class Normalize(object): + """Normalize the image. + + Args: + mean (sequence): Mean values of 3 channels. + std (sequence): Std values of 3 channels. + to_rgb (bool): Whether to convert the image from BGR to RGB, + default is true. + """ + + def __init__(self, mean, std, to_rgb=True): + self.mean = np.array(mean, dtype=np.float32) + self.std = np.array(std, dtype=np.float32) + self.to_rgb = to_rgb + + def __call__(self, results): + for key in results.get('img_fields', ['img']): + results[key] = mmcv.imnormalize(results[key], self.mean, self.std, + self.to_rgb) + results['img_norm_cfg'] = dict( + mean=self.mean, std=self.std, to_rgb=self.to_rgb) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(mean={list(self.mean)}, ' + repr_str += f'std={list(self.std)}, ' + repr_str += f'to_rgb={self.to_rgb})' + return repr_str + + +@PIPELINES.register_module() +class ColorJitter(object): + """Randomly change the brightness, contrast and saturation of an image. + + Args: + brightness (float): How much to jitter brightness. + brightness_factor is chosen uniformly from + [max(0, 1 - brightness), 1 + brightness]. + contrast (float): How much to jitter contrast. + contrast_factor is chosen uniformly from + [max(0, 1 - contrast), 1 + contrast]. + saturation (float): How much to jitter saturation. + saturation_factor is chosen uniformly from + [max(0, 1 - saturation), 1 + saturation]. + """ + + def __init__(self, brightness, contrast, saturation): + self.brightness = brightness + self.contrast = contrast + self.saturation = saturation + + def __call__(self, results): + brightness_factor = random.uniform(0, self.brightness) + contrast_factor = random.uniform(0, self.contrast) + saturation_factor = random.uniform(0, self.saturation) + color_jitter_transforms = [ + dict( + type='Brightness', + magnitude=brightness_factor, + prob=1., + random_negative_prob=0.5), + dict( + type='Contrast', + magnitude=contrast_factor, + prob=1., + random_negative_prob=0.5), + dict( + type='ColorTransform', + magnitude=saturation_factor, + prob=1., + random_negative_prob=0.5) + ] + random.shuffle(color_jitter_transforms) + transform = Compose(color_jitter_transforms) + return transform(results) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(brightness={self.brightness}, ' + repr_str += f'contrast={self.contrast}, ' + repr_str += f'saturation={self.saturation})' + return repr_str + + +@PIPELINES.register_module() +class Lighting(object): + """Adjust images lighting using AlexNet-style PCA jitter. + + Args: + eigval (list): the eigenvalue of the convariance matrix of pixel + values, respectively. + eigvec (list[list]): the eigenvector of the convariance matrix of pixel + values, respectively. + alphastd (float): The standard deviation for distribution of alpha. + Defaults to 0.1 + to_rgb (bool): Whether to convert img to rgb. + """ + + def __init__(self, eigval, eigvec, alphastd=0.1, to_rgb=True): + assert isinstance(eigval, list), \ + f'eigval must be of type list, got {type(eigval)} instead.' + assert isinstance(eigvec, list), \ + f'eigvec must be of type list, got {type(eigvec)} instead.' + for vec in eigvec: + assert isinstance(vec, list) and len(vec) == len(eigvec[0]), \ + 'eigvec must contains lists with equal length.' + self.eigval = np.array(eigval) + self.eigvec = np.array(eigvec) + self.alphastd = alphastd + self.to_rgb = to_rgb + + def __call__(self, results): + for key in results.get('img_fields', ['img']): + img = results[key] + results[key] = mmcv.adjust_lighting( + img, + self.eigval, + self.eigvec, + alphastd=self.alphastd, + to_rgb=self.to_rgb) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(eigval={self.eigval.tolist()}, ' + repr_str += f'eigvec={self.eigvec.tolist()}, ' + repr_str += f'alphastd={self.alphastd}, ' + repr_str += f'to_rgb={self.to_rgb})' + return repr_str + + +@PIPELINES.register_module() +class Albu(object): + """Albumentation augmentation. + + Adds custom transformations from Albumentations library. + Please, visit `https://albumentations.readthedocs.io` + to get more information. + An example of ``transforms`` is as followed: + + .. code-block:: + [ + dict( + type='ShiftScaleRotate', + shift_limit=0.0625, + scale_limit=0.0, + rotate_limit=0, + interpolation=1, + p=0.5), + dict( + type='RandomBrightnessContrast', + brightness_limit=[0.1, 0.3], + contrast_limit=[0.1, 0.3], + p=0.2), + dict(type='ChannelShuffle', p=0.1), + dict( + type='OneOf', + transforms=[ + dict(type='Blur', blur_limit=3, p=1.0), + dict(type='MedianBlur', blur_limit=3, p=1.0) + ], + p=0.1), + ] + + Args: + transforms (list[dict]): A list of albu transformations + keymap (dict): Contains {'input key':'albumentation-style key'} + """ + + def __init__(self, transforms, keymap=None, update_pad_shape=False): + if albumentations is None: + raise RuntimeError('albumentations is not installed') + else: + from albumentations import Compose + + self.transforms = transforms + self.filter_lost_elements = False + self.update_pad_shape = update_pad_shape + + self.aug = Compose([self.albu_builder(t) for t in self.transforms]) + + if not keymap: + self.keymap_to_albu = { + 'img': 'image', + } + else: + self.keymap_to_albu = keymap + self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()} + + def albu_builder(self, cfg): + """Import a module from albumentations. + + It inherits some of :func:`build_from_cfg` logic. + Args: + cfg (dict): Config dict. It should at least contain the key "type". + Returns: + obj: The constructed object. + """ + + assert isinstance(cfg, dict) and 'type' in cfg + args = cfg.copy() + + obj_type = args.pop('type') + if mmcv.is_str(obj_type): + if albumentations is None: + raise RuntimeError('albumentations is not installed') + obj_cls = getattr(albumentations, obj_type) + elif inspect.isclass(obj_type): + obj_cls = obj_type + else: + raise TypeError( + f'type must be a str or valid type, but got {type(obj_type)}') + + if 'transforms' in args: + args['transforms'] = [ + self.albu_builder(transform) + for transform in args['transforms'] + ] + + return obj_cls(**args) + + @staticmethod + def mapper(d, keymap): + """Dictionary mapper. + + Renames keys according to keymap provided. + Args: + d (dict): old dict + keymap (dict): {'old_key':'new_key'} + Returns: + dict: new dict. + """ + + updated_dict = {} + for k, v in zip(d.keys(), d.values()): + new_k = keymap.get(k, k) + updated_dict[new_k] = d[k] + return updated_dict + + def __call__(self, results): + + # backup gt_label in case Albu modify it. + _gt_label = copy.deepcopy(results.get('gt_label', None)) + + # dict to albumentations format + results = self.mapper(results, self.keymap_to_albu) + + # process aug + results = self.aug(**results) + + # back to the original format + results = self.mapper(results, self.keymap_back) + + if _gt_label is not None: + # recover backup gt_label + results.update({'gt_label': _gt_label}) + + # update final shape + if self.update_pad_shape: + results['pad_shape'] = results['img'].shape + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' + return repr_str diff --git a/mmcls/datasets/samplers/__init__.py b/mmcls/datasets/samplers/__init__.py new file mode 100644 index 0000000..7016288 --- /dev/null +++ b/mmcls/datasets/samplers/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .distributed_sampler import DistributedSampler +from .repeat_aug import RepeatAugSampler + +__all__ = ('DistributedSampler', 'RepeatAugSampler') diff --git a/mmcls/datasets/samplers/distributed_sampler.py b/mmcls/datasets/samplers/distributed_sampler.py new file mode 100644 index 0000000..a38c5ac --- /dev/null +++ b/mmcls/datasets/samplers/distributed_sampler.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch.utils.data import DistributedSampler as _DistributedSampler + +from mmcls.core.utils import sync_random_seed +from mmcls.datasets import SAMPLERS + + +@SAMPLERS.register_module() +class DistributedSampler(_DistributedSampler): + + def __init__(self, + dataset, + num_replicas=None, + rank=None, + shuffle=True, + round_up=True, + seed=0): + super().__init__(dataset, num_replicas=num_replicas, rank=rank) + self.shuffle = shuffle + self.round_up = round_up + if self.round_up: + self.total_size = self.num_samples * self.num_replicas + else: + self.total_size = len(self.dataset) + + # In distributed sampling, different ranks should sample + # non-overlapped data in the dataset. Therefore, this function + # is used to make sure that each rank shuffles the data indices + # in the same order based on the same seed. Then different ranks + # could use different indices to select non-overlapped data from the + # same data list. + self.seed = sync_random_seed(seed) + + def __iter__(self): + # deterministically shuffle based on epoch + if self.shuffle: + g = torch.Generator() + # When :attr:`shuffle=True`, this ensures all replicas + # use a different random ordering for each epoch. + # Otherwise, the next iteration of this sampler will + # yield the same ordering. + g.manual_seed(self.epoch + self.seed) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + if self.round_up: + indices = ( + indices * + int(self.total_size / len(indices) + 1))[:self.total_size] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + if self.round_up: + assert len(indices) == self.num_samples + + return iter(indices) diff --git a/mmcls/datasets/samplers/repeat_aug.py b/mmcls/datasets/samplers/repeat_aug.py new file mode 100644 index 0000000..5de096b --- /dev/null +++ b/mmcls/datasets/samplers/repeat_aug.py @@ -0,0 +1,106 @@ +import math + +import torch +from mmcv.runner import get_dist_info +from torch.utils.data import Sampler + +from mmcls.core.utils import sync_random_seed +from mmcls.datasets import SAMPLERS + + +@SAMPLERS.register_module() +class RepeatAugSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset for + distributed, with repeated augmentation. It ensures that different each + augmented version of a sample will be visible to a different process (GPU). + Heavily based on torch.utils.data.DistributedSampler. + + This sampler was taken from + https://github.com/facebookresearch/deit/blob/0c4b8f60/samplers.py + Used in + Copyright (c) 2015-present, Facebook, Inc. + """ + + def __init__(self, + dataset, + num_replicas=None, + rank=None, + shuffle=True, + num_repeats=3, + selected_round=256, + selected_ratio=0, + seed=0): + default_rank, default_world_size = get_dist_info() + rank = default_rank if rank is None else rank + num_replicas = ( + default_world_size if num_replicas is None else num_replicas) + + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.shuffle = shuffle + self.num_repeats = num_repeats + self.epoch = 0 + self.num_samples = int( + math.ceil(len(self.dataset) * num_repeats / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + # Determine the number of samples to select per epoch for each rank. + # num_selected logic defaults to be the same as original RASampler + # impl, but this one can be tweaked + # via selected_ratio and selected_round args. + selected_ratio = selected_ratio or num_replicas # ratio to reduce + # selected samples by, num_replicas if 0 + if selected_round: + self.num_selected_samples = int( + math.floor( + len(self.dataset) // selected_round * selected_round / + selected_ratio)) + else: + self.num_selected_samples = int( + math.ceil(len(self.dataset) / selected_ratio)) + + # In distributed sampling, different ranks should sample + # non-overlapped data in the dataset. Therefore, this function + # is used to make sure that each rank shuffles the data indices + # in the same order based on the same seed. Then different ranks + # could use different indices to select non-overlapped data from the + # same data list. + self.seed = sync_random_seed(seed) + + def __iter__(self): + # deterministically shuffle based on epoch + if self.shuffle: + if self.num_replicas > 1: # In distributed environment + # deterministically shuffle based on epoch + g = torch.Generator() + # When :attr:`shuffle=True`, this ensures all replicas + # use a different random ordering for each epoch. + # Otherwise, the next iteration of this sampler will + # yield the same ordering. + g.manual_seed(self.epoch + self.seed) + indices = torch.randperm( + len(self.dataset), generator=g).tolist() + else: + indices = torch.randperm(len(self.dataset)).tolist() + else: + indices = list(range(len(self.dataset))) + + # produce repeats e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2....] + indices = [x for x in indices for _ in range(self.num_repeats)] + # add extra samples to make it evenly divisible + padding_size = self.total_size - len(indices) + indices += indices[:padding_size] + assert len(indices) == self.total_size + + # subsample per rank + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + # return up to num selected samples + return iter(indices[:self.num_selected_samples]) + + def __len__(self): + return self.num_selected_samples + + def set_epoch(self, epoch): + self.epoch = epoch diff --git a/mmcls/datasets/stanford_cars.py b/mmcls/datasets/stanford_cars.py new file mode 100644 index 0000000..df1f951 --- /dev/null +++ b/mmcls/datasets/stanford_cars.py @@ -0,0 +1,210 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Optional + +import numpy as np + +from .base_dataset import BaseDataset +from .builder import DATASETS + + +@DATASETS.register_module() +class StanfordCars(BaseDataset): + """`Stanford Cars`_ Dataset. + + After downloading and decompression, the dataset + directory structure is as follows. + + Stanford Cars dataset directory:: + + Stanford Cars + ├── cars_train + │ ├── 00001.jpg + │ ├── 00002.jpg + │ └── ... + ├── cars_test + │ ├── 00001.jpg + │ ├── 00002.jpg + │ └── ... + └── devkit + ├── cars_meta.mat + ├── cars_train_annos.mat + ├── cars_test_annos.mat + ├── cars_test_annoswithlabels.mat + ├── eval_train.m + └── train_perfect_preds.txt + + .. _Stanford Cars: https://ai.stanford.edu/~jkrause/cars/car_dataset.html + + Args: + data_prefix (str): the prefix of data path + test_mode (bool): ``test_mode=True`` means in test phase. It determines + to use the training set or test set. + ann_file (str, optional): The annotation file. If is string, read + samples paths from the ann_file. If is None, read samples path + from cars_{train|test}_annos.mat file. Defaults to None. + """ # noqa: E501 + + CLASSES = [ + 'AM General Hummer SUV 2000', 'Acura RL Sedan 2012', + 'Acura TL Sedan 2012', 'Acura TL Type-S 2008', 'Acura TSX Sedan 2012', + 'Acura Integra Type R 2001', 'Acura ZDX Hatchback 2012', + 'Aston Martin V8 Vantage Convertible 2012', + 'Aston Martin V8 Vantage Coupe 2012', + 'Aston Martin Virage Convertible 2012', + 'Aston Martin Virage Coupe 2012', 'Audi RS 4 Convertible 2008', + 'Audi A5 Coupe 2012', 'Audi TTS Coupe 2012', 'Audi R8 Coupe 2012', + 'Audi V8 Sedan 1994', 'Audi 100 Sedan 1994', 'Audi 100 Wagon 1994', + 'Audi TT Hatchback 2011', 'Audi S6 Sedan 2011', + 'Audi S5 Convertible 2012', 'Audi S5 Coupe 2012', 'Audi S4 Sedan 2012', + 'Audi S4 Sedan 2007', 'Audi TT RS Coupe 2012', + 'BMW ActiveHybrid 5 Sedan 2012', 'BMW 1 Series Convertible 2012', + 'BMW 1 Series Coupe 2012', 'BMW 3 Series Sedan 2012', + 'BMW 3 Series Wagon 2012', 'BMW 6 Series Convertible 2007', + 'BMW X5 SUV 2007', 'BMW X6 SUV 2012', 'BMW M3 Coupe 2012', + 'BMW M5 Sedan 2010', 'BMW M6 Convertible 2010', 'BMW X3 SUV 2012', + 'BMW Z4 Convertible 2012', + 'Bentley Continental Supersports Conv. Convertible 2012', + 'Bentley Arnage Sedan 2009', 'Bentley Mulsanne Sedan 2011', + 'Bentley Continental GT Coupe 2012', + 'Bentley Continental GT Coupe 2007', + 'Bentley Continental Flying Spur Sedan 2007', + 'Bugatti Veyron 16.4 Convertible 2009', + 'Bugatti Veyron 16.4 Coupe 2009', 'Buick Regal GS 2012', + 'Buick Rainier SUV 2007', 'Buick Verano Sedan 2012', + 'Buick Enclave SUV 2012', 'Cadillac CTS-V Sedan 2012', + 'Cadillac SRX SUV 2012', 'Cadillac Escalade EXT Crew Cab 2007', + 'Chevrolet Silverado 1500 Hybrid Crew Cab 2012', + 'Chevrolet Corvette Convertible 2012', 'Chevrolet Corvette ZR1 2012', + 'Chevrolet Corvette Ron Fellows Edition Z06 2007', + 'Chevrolet Traverse SUV 2012', 'Chevrolet Camaro Convertible 2012', + 'Chevrolet HHR SS 2010', 'Chevrolet Impala Sedan 2007', + 'Chevrolet Tahoe Hybrid SUV 2012', 'Chevrolet Sonic Sedan 2012', + 'Chevrolet Express Cargo Van 2007', + 'Chevrolet Avalanche Crew Cab 2012', 'Chevrolet Cobalt SS 2010', + 'Chevrolet Malibu Hybrid Sedan 2010', 'Chevrolet TrailBlazer SS 2009', + 'Chevrolet Silverado 2500HD Regular Cab 2012', + 'Chevrolet Silverado 1500 Classic Extended Cab 2007', + 'Chevrolet Express Van 2007', 'Chevrolet Monte Carlo Coupe 2007', + 'Chevrolet Malibu Sedan 2007', + 'Chevrolet Silverado 1500 Extended Cab 2012', + 'Chevrolet Silverado 1500 Regular Cab 2012', 'Chrysler Aspen SUV 2009', + 'Chrysler Sebring Convertible 2010', + 'Chrysler Town and Country Minivan 2012', 'Chrysler 300 SRT-8 2010', + 'Chrysler Crossfire Convertible 2008', + 'Chrysler PT Cruiser Convertible 2008', 'Daewoo Nubira Wagon 2002', + 'Dodge Caliber Wagon 2012', 'Dodge Caliber Wagon 2007', + 'Dodge Caravan Minivan 1997', 'Dodge Ram Pickup 3500 Crew Cab 2010', + 'Dodge Ram Pickup 3500 Quad Cab 2009', 'Dodge Sprinter Cargo Van 2009', + 'Dodge Journey SUV 2012', 'Dodge Dakota Crew Cab 2010', + 'Dodge Dakota Club Cab 2007', 'Dodge Magnum Wagon 2008', + 'Dodge Challenger SRT8 2011', 'Dodge Durango SUV 2012', + 'Dodge Durango SUV 2007', 'Dodge Charger Sedan 2012', + 'Dodge Charger SRT-8 2009', 'Eagle Talon Hatchback 1998', + 'FIAT 500 Abarth 2012', 'FIAT 500 Convertible 2012', + 'Ferrari FF Coupe 2012', 'Ferrari California Convertible 2012', + 'Ferrari 458 Italia Convertible 2012', 'Ferrari 458 Italia Coupe 2012', + 'Fisker Karma Sedan 2012', 'Ford F-450 Super Duty Crew Cab 2012', + 'Ford Mustang Convertible 2007', 'Ford Freestar Minivan 2007', + 'Ford Expedition EL SUV 2009', 'Ford Edge SUV 2012', + 'Ford Ranger SuperCab 2011', 'Ford GT Coupe 2006', + 'Ford F-150 Regular Cab 2012', 'Ford F-150 Regular Cab 2007', + 'Ford Focus Sedan 2007', 'Ford E-Series Wagon Van 2012', + 'Ford Fiesta Sedan 2012', 'GMC Terrain SUV 2012', + 'GMC Savana Van 2012', 'GMC Yukon Hybrid SUV 2012', + 'GMC Acadia SUV 2012', 'GMC Canyon Extended Cab 2012', + 'Geo Metro Convertible 1993', 'HUMMER H3T Crew Cab 2010', + 'HUMMER H2 SUT Crew Cab 2009', 'Honda Odyssey Minivan 2012', + 'Honda Odyssey Minivan 2007', 'Honda Accord Coupe 2012', + 'Honda Accord Sedan 2012', 'Hyundai Veloster Hatchback 2012', + 'Hyundai Santa Fe SUV 2012', 'Hyundai Tucson SUV 2012', + 'Hyundai Veracruz SUV 2012', 'Hyundai Sonata Hybrid Sedan 2012', + 'Hyundai Elantra Sedan 2007', 'Hyundai Accent Sedan 2012', + 'Hyundai Genesis Sedan 2012', 'Hyundai Sonata Sedan 2012', + 'Hyundai Elantra Touring Hatchback 2012', 'Hyundai Azera Sedan 2012', + 'Infiniti G Coupe IPL 2012', 'Infiniti QX56 SUV 2011', + 'Isuzu Ascender SUV 2008', 'Jaguar XK XKR 2012', + 'Jeep Patriot SUV 2012', 'Jeep Wrangler SUV 2012', + 'Jeep Liberty SUV 2012', 'Jeep Grand Cherokee SUV 2012', + 'Jeep Compass SUV 2012', 'Lamborghini Reventon Coupe 2008', + 'Lamborghini Aventador Coupe 2012', + 'Lamborghini Gallardo LP 570-4 Superleggera 2012', + 'Lamborghini Diablo Coupe 2001', 'Land Rover Range Rover SUV 2012', + 'Land Rover LR2 SUV 2012', 'Lincoln Town Car Sedan 2011', + 'MINI Cooper Roadster Convertible 2012', + 'Maybach Landaulet Convertible 2012', 'Mazda Tribute SUV 2011', + 'McLaren MP4-12C Coupe 2012', + 'Mercedes-Benz 300-Class Convertible 1993', + 'Mercedes-Benz C-Class Sedan 2012', + 'Mercedes-Benz SL-Class Coupe 2009', + 'Mercedes-Benz E-Class Sedan 2012', 'Mercedes-Benz S-Class Sedan 2012', + 'Mercedes-Benz Sprinter Van 2012', 'Mitsubishi Lancer Sedan 2012', + 'Nissan Leaf Hatchback 2012', 'Nissan NV Passenger Van 2012', + 'Nissan Juke Hatchback 2012', 'Nissan 240SX Coupe 1998', + 'Plymouth Neon Coupe 1999', 'Porsche Panamera Sedan 2012', + 'Ram C/V Cargo Van Minivan 2012', + 'Rolls-Royce Phantom Drophead Coupe Convertible 2012', + 'Rolls-Royce Ghost Sedan 2012', 'Rolls-Royce Phantom Sedan 2012', + 'Scion xD Hatchback 2012', 'Spyker C8 Convertible 2009', + 'Spyker C8 Coupe 2009', 'Suzuki Aerio Sedan 2007', + 'Suzuki Kizashi Sedan 2012', 'Suzuki SX4 Hatchback 2012', + 'Suzuki SX4 Sedan 2012', 'Tesla Model S Sedan 2012', + 'Toyota Sequoia SUV 2012', 'Toyota Camry Sedan 2012', + 'Toyota Corolla Sedan 2012', 'Toyota 4Runner SUV 2012', + 'Volkswagen Golf Hatchback 2012', 'Volkswagen Golf Hatchback 1991', + 'Volkswagen Beetle Hatchback 2012', 'Volvo C30 Hatchback 2012', + 'Volvo 240 Sedan 1993', 'Volvo XC90 SUV 2007', + 'smart fortwo Convertible 2012' + ] + + def __init__(self, + data_prefix: str, + test_mode: bool, + ann_file: Optional[str] = None, + **kwargs): + if test_mode: + if ann_file is not None: + self.test_ann_file = ann_file + else: + self.test_ann_file = osp.join( + data_prefix, 'devkit/cars_test_annos_withlabels.mat') + data_prefix = osp.join(data_prefix, 'cars_test') + else: + if ann_file is not None: + self.train_ann_file = ann_file + else: + self.train_ann_file = osp.join(data_prefix, + 'devkit/cars_train_annos.mat') + data_prefix = osp.join(data_prefix, 'cars_train') + super(StanfordCars, self).__init__( + ann_file=ann_file, + data_prefix=data_prefix, + test_mode=test_mode, + **kwargs) + + def load_annotations(self): + try: + import scipy.io as sio + except ImportError: + raise ImportError( + 'please run `pip install scipy` to install package `scipy`.') + + data_infos = [] + if self.test_mode: + data = sio.loadmat(self.test_ann_file) + else: + data = sio.loadmat(self.train_ann_file) + for img in data['annotations'][0]: + info = {'img_prefix': self.data_prefix} + # The organization of each record is as follows, + # 0: bbox_x1 of each image + # 1: bbox_y1 of each image + # 2: bbox_x2 of each image + # 3: bbox_y2 of each image + # 4: class_id, start from 0, so + # here we need to '- 1' to let them start from 0 + # 5: file name of each image + info['img_info'] = {'filename': img[5][0]} + info['gt_label'] = np.array(img[4][0][0] - 1, dtype=np.int64) + data_infos.append(info) + return data_infos diff --git a/mmcls/datasets/utils.py b/mmcls/datasets/utils.py new file mode 100644 index 0000000..75070bc --- /dev/null +++ b/mmcls/datasets/utils.py @@ -0,0 +1,153 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import gzip +import hashlib +import os +import os.path +import shutil +import tarfile +import urllib.error +import urllib.request +import zipfile + +__all__ = ['rm_suffix', 'check_integrity', 'download_and_extract_archive'] + + +def rm_suffix(s, suffix=None): + if suffix is None: + return s[:s.rfind('.')] + else: + return s[:s.rfind(suffix)] + + +def calculate_md5(fpath, chunk_size=1024 * 1024): + md5 = hashlib.md5() + with open(fpath, 'rb') as f: + for chunk in iter(lambda: f.read(chunk_size), b''): + md5.update(chunk) + return md5.hexdigest() + + +def check_md5(fpath, md5, **kwargs): + return md5 == calculate_md5(fpath, **kwargs) + + +def check_integrity(fpath, md5=None): + if not os.path.isfile(fpath): + return False + if md5 is None: + return True + return check_md5(fpath, md5) + + +def download_url_to_file(url, fpath): + with urllib.request.urlopen(url) as resp, open(fpath, 'wb') as of: + shutil.copyfileobj(resp, of) + + +def download_url(url, root, filename=None, md5=None): + """Download a file from a url and place it in root. + + Args: + url (str): URL to download file from. + root (str): Directory to place downloaded file in. + filename (str | None): Name to save the file under. + If filename is None, use the basename of the URL. + md5 (str | None): MD5 checksum of the download. + If md5 is None, download without md5 check. + """ + root = os.path.expanduser(root) + if not filename: + filename = os.path.basename(url) + fpath = os.path.join(root, filename) + + os.makedirs(root, exist_ok=True) + + if check_integrity(fpath, md5): + print(f'Using downloaded and verified file: {fpath}') + else: + try: + print(f'Downloading {url} to {fpath}') + download_url_to_file(url, fpath) + except (urllib.error.URLError, IOError) as e: + if url[:5] == 'https': + url = url.replace('https:', 'http:') + print('Failed download. Trying https -> http instead.' + f' Downloading {url} to {fpath}') + download_url_to_file(url, fpath) + else: + raise e + # check integrity of downloaded file + if not check_integrity(fpath, md5): + raise RuntimeError('File not found or corrupted.') + + +def _is_tarxz(filename): + return filename.endswith('.tar.xz') + + +def _is_tar(filename): + return filename.endswith('.tar') + + +def _is_targz(filename): + return filename.endswith('.tar.gz') + + +def _is_tgz(filename): + return filename.endswith('.tgz') + + +def _is_gzip(filename): + return filename.endswith('.gz') and not filename.endswith('.tar.gz') + + +def _is_zip(filename): + return filename.endswith('.zip') + + +def extract_archive(from_path, to_path=None, remove_finished=False): + if to_path is None: + to_path = os.path.dirname(from_path) + + if _is_tar(from_path): + with tarfile.open(from_path, 'r') as tar: + tar.extractall(path=to_path) + elif _is_targz(from_path) or _is_tgz(from_path): + with tarfile.open(from_path, 'r:gz') as tar: + tar.extractall(path=to_path) + elif _is_tarxz(from_path): + with tarfile.open(from_path, 'r:xz') as tar: + tar.extractall(path=to_path) + elif _is_gzip(from_path): + to_path = os.path.join( + to_path, + os.path.splitext(os.path.basename(from_path))[0]) + with open(to_path, 'wb') as out_f, gzip.GzipFile(from_path) as zip_f: + out_f.write(zip_f.read()) + elif _is_zip(from_path): + with zipfile.ZipFile(from_path, 'r') as z: + z.extractall(to_path) + else: + raise ValueError(f'Extraction of {from_path} not supported') + + if remove_finished: + os.remove(from_path) + + +def download_and_extract_archive(url, + download_root, + extract_root=None, + filename=None, + md5=None, + remove_finished=False): + download_root = os.path.expanduser(download_root) + if extract_root is None: + extract_root = download_root + if not filename: + filename = os.path.basename(url) + + download_url(url, download_root, filename, md5) + + archive = os.path.join(download_root, filename) + print(f'Extracting {archive} to {extract_root}') + extract_archive(archive, extract_root, remove_finished) diff --git a/mmcls/datasets/voc.py b/mmcls/datasets/voc.py new file mode 100644 index 0000000..e9c8bce --- /dev/null +++ b/mmcls/datasets/voc.py @@ -0,0 +1,94 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import xml.etree.ElementTree as ET + +import mmcv +import numpy as np + +from .builder import DATASETS +from .multi_label import MultiLabelDataset + + +@DATASETS.register_module() +class VOC(MultiLabelDataset): + """`Pascal VOC `_ Dataset. + + Args: + data_prefix (str): the prefix of data path + pipeline (list): a list of dict, where each element represents + a operation defined in `mmcls.datasets.pipelines` + ann_file (str | None): the annotation file. When ann_file is str, + the subclass is expected to read from the ann_file. When ann_file + is None, the subclass is expected to read according to data_prefix + difficult_as_postive (Optional[bool]): Whether to map the difficult + labels as positive. If it set to True, map difficult examples to + positive ones(1), If it set to False, map difficult examples to + negative ones(0). Defaults to None, the difficult labels will be + set to '-1'. + """ + + CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor') + + def __init__(self, difficult_as_postive=None, **kwargs): + self.difficult_as_postive = difficult_as_postive + super(VOC, self).__init__(**kwargs) + if 'VOC2007' in self.data_prefix: + self.year = 2007 + else: + raise ValueError('Cannot infer dataset year from img_prefix.') + + def load_annotations(self): + """Load annotations. + + Returns: + list[dict]: Annotation info from XML file. + """ + data_infos = [] + img_ids = mmcv.list_from_file(self.ann_file) + for img_id in img_ids: + filename = f'JPEGImages/{img_id}.jpg' + xml_path = osp.join(self.data_prefix, 'Annotations', + f'{img_id}.xml') + tree = ET.parse(xml_path) + root = tree.getroot() + labels = [] + labels_difficult = [] + for obj in root.findall('object'): + label_name = obj.find('name').text + # in case customized dataset has wrong labels + # or CLASSES has been override. + if label_name not in self.CLASSES: + continue + label = self.class_to_idx[label_name] + difficult = int(obj.find('difficult').text) + if difficult: + labels_difficult.append(label) + else: + labels.append(label) + + gt_label = np.zeros(len(self.CLASSES)) + # set difficult example first, then set postivate examples. + # The order cannot be swapped for the case where multiple objects + # of the same kind exist and some are difficult. + if self.difficult_as_postive is None: + # map difficult examples to -1, + # it may be used in evaluation to ignore difficult targets. + gt_label[labels_difficult] = -1 + elif self.difficult_as_postive: + # map difficult examples to positive ones(1). + gt_label[labels_difficult] = 1 + else: + # map difficult examples to negative ones(0). + gt_label[labels_difficult] = 0 + gt_label[labels] = 1 + + info = dict( + img_prefix=self.data_prefix, + img_info=dict(filename=filename), + gt_label=gt_label.astype(np.int8)) + data_infos.append(info) + + return data_infos diff --git a/mmcls/gpvit_dev/amp/runner.py b/mmcls/gpvit_dev/amp/runner.py new file mode 100644 index 0000000..8a30459 --- /dev/null +++ b/mmcls/gpvit_dev/amp/runner.py @@ -0,0 +1,140 @@ +# Auto Mixed Precision Trainer +import time +from typing import Any, Dict, List, Optional, Tuple + +import torch +from torch.utils.data import DataLoader +from torch.cuda.amp import GradScaler +from torch.cuda.amp import autocast + +from mmcv.runner.epoch_based_runner import EpochBasedRunner +from mmcv.runner.iter_based_runner import IterBasedRunner +from mmcv.runner.builder import RUNNERS +from mmcv.runner.utils import get_host_info +from mmcv.runner.hooks.optimizer import Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook + +from mmcls.core.utils.dist_utils import DistOptimizerHook + + + +@RUNNERS.register_module() +class AmpEpochBasedRunner(EpochBasedRunner): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + + grad_scaler = GradScaler() + self.grad_scaler = grad_scaler + + def _amp_train_step(self, data_batch, **kwargs): + with autocast(): + self.run_iter(data_batch, train_mode=True, **kwargs) + losses = self.outputs['loss'] + self.optimizer.zero_grad() + self.grad_scaler.scale(losses).backward() + if self.grad_clip is not None: + self.grad_scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip['max_norm']) + + self.grad_scaler.step(self.optimizer) + self.grad_scaler.update() + + def train(self, data_loader, **kwargs): + self.model.train() + self.mode = 'train' + self.data_loader = data_loader + self._max_iters = self._max_epochs * len(self.data_loader) + self.call_hook('before_train_epoch') + time.sleep(2) # Prevent possible deadlock during epoch transition + for i, data_batch in enumerate(self.data_loader): + self.data_batch = data_batch + self._inner_iter = i + self.call_hook('before_train_iter') + self._amp_train_step(data_batch, **kwargs) + self.call_hook('after_train_iter') + del self.data_batch + self._iter += 1 + + self.call_hook('after_train_epoch') + self._epoch += 1 + + def run(self, + data_loaders: List[DataLoader], + workflow: List[Tuple[str, int]], + max_epochs: Optional[int] = None, + **kwargs) -> None: + + self.grad_clip = None + _hooks = [] + for hook in self._hooks: + if isinstance(hook, Fp16OptimizerHook) or \ + isinstance(hook, GradientCumulativeFp16OptimizerHook): + raise AttributeError('MMCV based FP16 is not supported by %s' % self.__class__.__name__) + elif isinstance(hook, DistOptimizerHook): + self.grad_clip = hook.grad_clip + elif not isinstance(hook, DistOptimizerHook): + _hooks.append(hook) + self._hooks = _hooks + + super(AmpEpochBasedRunner, self).run(data_loaders, workflow, max_epochs, **kwargs) + + +@RUNNERS.register_module() +class AmpIterBasedRunner(IterBasedRunner): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + grad_scaler = GradScaler() + self.grad_scaler = grad_scaler + + def _amp_train_step(self, data_batch, **kwargs): + with autocast(): + outputs = self.model.train_step(data_batch, None, **kwargs) + if not isinstance(outputs, dict): + raise TypeError('model.train_step() must return a dict') + if 'log_vars' in outputs: + self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) + self.outputs = outputs + losses = self.outputs['loss'] + self.optimizer.zero_grad() + self.grad_scaler.scale(losses).backward() + if self.grad_clip is not None: + self.grad_scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip['max_norm']) + + self.grad_scaler.step(self.optimizer) + self.grad_scaler.update() + + def train(self, data_loader, **kwargs): + self.model.train() + self.mode = 'train' + self.data_loader = data_loader + self._epoch = data_loader.epoch + data_batch = next(data_loader) + self.call_hook('before_train_iter') + self._amp_train_step(data_batch, **kwargs) + self.call_hook('after_train_iter') + self._inner_iter += 1 + self._iter += 1 + + def run(self, + data_loaders: List[DataLoader], + workflow: List[Tuple[str, int]], + max_epochs: Optional[int] = None, + **kwargs) -> None: + + self.grad_clip = None + + _hooks = [] + for hook in self._hooks: + if isinstance(hook, Fp16OptimizerHook) or \ + isinstance(hook, GradientCumulativeFp16OptimizerHook): + raise AttributeError('MMCV based FP16 is not supported by %s' % self.__class__.__name__) + elif isinstance(hook, DistOptimizerHook): + self.grad_clip = hook.grad_clip + elif not isinstance(hook, DistOptimizerHook): + _hooks.append(hook) + self._hooks = _hooks + + super(AmpIterBasedRunner, self).run(data_loaders, workflow, max_epochs, **kwargs) + diff --git a/mmcls/gpvit_dev/models/backbones/gpvit.py b/mmcls/gpvit_dev/models/backbones/gpvit.py new file mode 100644 index 0000000..3459d19 --- /dev/null +++ b/mmcls/gpvit_dev/models/backbones/gpvit.py @@ -0,0 +1,290 @@ +""" +Author: Chenhongyi Yang +""" +from typing import Sequence + +import copy +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmcv.cnn import build_norm_layer, build_conv_layer, build_activation_layer +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner.base_module import BaseModule, ModuleList +from mmcv.cnn.bricks import DropPath + +from mmcls.utils import get_root_logger +from mmcls.models.builder import BACKBONES +from mmcls.models.utils import resize_pos_embed, to_2tuple +from mmcls.models.backbones.base_backbone import BaseBackbone + +from mmcls.gpvit_dev.models.modules.patch_embed import PatchEmbed, ConvPatchEmbed +from mmcls.gpvit_dev.models.build import build_patch_embed +from mmcls.gpvit_dev.models.utils.attentions import LePEAttnSimpleDWBlock, GPBlock + +@BACKBONES.register_module() +class GPViT(BaseBackbone): + arch_zoo = { + **dict.fromkeys( + ['L1', 'L1'], { + 'embed_dims': 216, + 'patch_size': 8, + 'window_size': 2, + 'num_layers': 12, + 'num_heads': 12, + 'num_group_heads': 6, + 'num_group_forward_heads': 6, + 'num_ungroup_heads': 6, + 'ffn_ratio': 4., + 'patch_embed': dict(type='ConvPatchEmbed', num_convs=0), + 'mlpmixer_depth': 1, + 'group_layers': {1: 64, 4: 32, 7: 32, 10: 16}, + 'drop_path_rate': 0.2 + }), + **dict.fromkeys( + ['L2', 'L2'], { + 'embed_dims': 348, + 'patch_size': 8, + 'window_size': 2, + 'num_layers': 12, + 'num_heads': 12, + 'num_group_heads': 6, + 'num_group_forward_heads': 6, + 'num_ungroup_heads': 6, + 'ffn_ratio': 4., + 'patch_embed': dict(type='ConvPatchEmbed', num_convs=1), + 'mlpmixer_depth': 1, + 'group_layers': {1: 64, 4: 32, 7: 32, 10: 16}, + 'drop_path_rate': 0.2 + }), + **dict.fromkeys( + ['L3', 'L3'], { + 'embed_dims': 432, + 'patch_size': 8, + 'window_size': 2, + 'num_layers': 12, + 'num_heads': 12, + 'num_group_heads': 6, + 'num_group_forward_heads': 6, + 'num_ungroup_heads': 6, + 'ffn_ratio': 4., + 'patch_embed': dict(type='ConvPatchEmbed', num_convs=1), + 'mlpmixer_depth': 1, + 'group_layers': {1: 64, 4: 32, 7: 32, 10: 16}, + 'drop_path_rate': 0.3 + }), + **dict.fromkeys( + ['L4', 'L4'], { + 'embed_dims': 624, + 'patch_size': 8, + 'window_size': 2, + 'num_layers': 12, + 'num_heads': 12, + 'num_group_heads': 6, + 'num_group_forward_heads': 6, + 'num_ungroup_heads': 6, + 'ffn_ratio': 4., + 'patch_embed': dict(type='ConvPatchEmbed', num_convs=2), + 'mlpmixer_depth': 1, + 'group_layers': {1: 64, 4: 32, 7: 32, 10: 16}, + 'drop_path_rate': 0.3 + }), + } + def __init__(self, + arch='', + img_size=224, + in_channels=3, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + qkv_bias=True, + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + interpolate_mode='bicubic', + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None, + test_cfg=dict(vis_group=False), + convert_syncbn=False, + freeze_patch_embed=False, + **kwargs): + super(GPViT, self).__init__(init_cfg) + self.arch = arch + + if isinstance(arch, str): + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'num_heads', 'feedforward_channels' + } + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.test_cfg = test_cfg + if kwargs.get('embed_dims', None) is not None: + self.embed_dims = kwargs.get('embed_dims', None) + else: + self.embed_dims = self.arch_settings['embed_dims'] + self.num_layers = self.arch_settings['num_layers'] + self.img_size = to_2tuple(img_size) + self.convert_syncbn = convert_syncbn + + # set gradient checkpoint + _att_with_cp = False + if _att_with_cp is None: + if not hasattr(self, "att_with_cp"): + self.att_with_cp = self.arch_settings['with_cp'] + else: + self.att_with_cp = _att_with_cp + _group_with_cp = kwargs.pop('group_with_cp', None) + if _group_with_cp is None: + if not hasattr(self, "group_with_cp"): + self.group_with_cp = self.att_with_cp + else: + self.group_with_cp = _group_with_cp + + # Set patch embedding + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + patch_size=self.arch_settings['patch_size'], + stride=self.arch_settings['patch_size'], + ) + _patch_cfg.update(patch_cfg) + _patch_cfg.update(self.arch_settings['patch_embed']) + self.patch_embed = build_patch_embed(_patch_cfg) + self.freeze_patch_embed = freeze_patch_embed + + self.patch_size = self.arch_settings['patch_size'] + + self.patch_resolution = self.patch_embed.init_out_size + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + # Set position embedding + self.interpolate_mode = interpolate_mode + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches,self.embed_dims)) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_layers + index + assert 0 <= out_indices[i] <= self.num_layers, \ + f'Invalid out_indices {index}' + self.out_indices = out_indices + + # stochastic depth decay rule + if drop_path_rate < 0: + _drop_path_rate = self.arch_settings.get('drop_path_rate', None) + if _drop_path_rate is None: + raise ValueError + else: + _drop_path_rate = drop_path_rate + + dpr = np.linspace(0, _drop_path_rate, self.num_layers) + self.drop_path_rate = _drop_path_rate + + self.layers = ModuleList() + if isinstance(layer_cfgs, dict): + layer_cfgs = [layer_cfgs] * self.num_layers + + for i in range(self.num_layers): + _arch_settings = copy.deepcopy(self.arch_settings) + if i not in _arch_settings['group_layers'].keys(): + _layer_cfg = dict( + embed_dims=self.embed_dims, + num_heads=_arch_settings['num_heads'], + window_size=_arch_settings['window_size'], + ffn_ratio=_arch_settings['ffn_ratio'], + drop_rate=drop_rate, + drop_path=dpr[i], + norm_cfg=norm_cfg, + with_cp=self.att_with_cp) + _layer_cfg.update(layer_cfgs[i]) + attn_layer = LePEAttnSimpleDWBlock(**_layer_cfg) + self.layers.append(attn_layer) + else: + _layer_cfg = dict( + embed_dims=self.embed_dims, + depth=_arch_settings['mlpmixer_depth'], + num_group_heads=_arch_settings['num_group_heads'], + num_forward_heads=_arch_settings['num_group_forward_heads'], + num_ungroup_heads=_arch_settings['num_ungroup_heads'], + num_group_token=_arch_settings['group_layers'][i], + ffn_ratio=_arch_settings['ffn_ratio'], + drop_path=dpr[i], + with_cp=self.group_with_cp) + group_layer = GPBlock(**_layer_cfg) + self.layers.append(group_layer) + self.final_norm = final_norm + # assert final_norm + if final_norm: + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + for i in out_indices: + if i != self.num_layers - 1: + if norm_cfg is not None: + norm_layer = build_norm_layer(norm_cfg, self.embed_dims)[1] + else: + norm_layer = nn.Identity() + self.add_module(f'norm{i}', norm_layer) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def init_weights(self): + super(GPViT, self).init_weights() + if not (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + trunc_normal_(self.pos_embed, std=0.02) + self.set_freeze_patch_embed() + + def set_freeze_patch_embed(self): + if self.freeze_patch_embed: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + def forward(self, x): + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + pos_embed = resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=0) + + x = x + pos_embed + x = self.drop_after_pos(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x, hw_shape=patch_resolution) + + if i == len(self.layers) - 1 and self.final_norm: + x = self.norm1(x) + + if i in self.out_indices: + B, _, C = x.shape + patch_token = x.reshape(B, *patch_resolution, C) + if i != self.num_layers - 1: + norm_layer = getattr(self, f'norm{i}') + patch_token = norm_layer(patch_token) + patch_token = patch_token.permute(0, 3, 1, 2) + outs.append(patch_token) + return tuple(outs) diff --git a/mmcls/gpvit_dev/models/build.py b/mmcls/gpvit_dev/models/build.py new file mode 100644 index 0000000..f5473ac --- /dev/null +++ b/mmcls/gpvit_dev/models/build.py @@ -0,0 +1,27 @@ +from mmcv.utils import Registry, build_from_cfg +from torch import nn +import warnings + + +PATCH_EMBED = Registry('patch_embed') +GROUP_LAYER = Registry('group_layer') +ATTN_LAYER = Registry('attn_layer') + +def build(cfg, registry, default_args=None): + if isinstance(cfg, list): + modules = [ + build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg + ] + return nn.Sequential(*modules) + else: + return build_from_cfg(cfg, registry, default_args) + +def build_patch_embed(cfg): + return build(cfg, PATCH_EMBED) + +def build_group_layer(cfg): + return build(cfg, GROUP_LAYER) + +def build_attn_layer(cfg): + return build(cfg, ATTN_LAYER) + diff --git a/mmcls/gpvit_dev/models/modules/patch_embed.py b/mmcls/gpvit_dev/models/modules/patch_embed.py new file mode 100644 index 0000000..f155c12 --- /dev/null +++ b/mmcls/gpvit_dev/models/modules/patch_embed.py @@ -0,0 +1,290 @@ +''' +Author: Chenhongyi Yang +Reference: https://github.com/OliverRensu/Shunted-Transformer +''' + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmcv.cnn.bricks.transformer import FFN, AdaptivePadding, build_dropout +from mmcv.cnn import (Linear, build_activation_layer, build_conv_layer, + build_norm_layer) +from mmcv.runner.base_module import BaseModule, ModuleList, Sequential +from mmcv.utils import (ConfigDict, build_from_cfg, deprecated_api_warning, + to_2tuple) + + +from mmcls.gpvit_dev.models.build import PATCH_EMBED + +@PATCH_EMBED.register_module() +class PatchEmbed(BaseModule): + """Image to Patch Embedding. + + We use a conv layer to implement PatchEmbed. + + Args: + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_type (str): The type of convolution + to generate patch embedding. Default: "Conv2d". + kernel_size (int): The kernel_size of embedding conv. Default: 16. + stride (int): The slide stride of embedding conv. + Default: 16. + padding (int | tuple | string): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int): The dilation rate of embedding conv. Default: 1. + bias (bool): Bias of embed conv. Default: True. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + input_size (int | tuple | None): The size of input, which will be + used to calculate the out size. Only works when `dynamic_size` + is False. Default: None. + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, + in_channels=3, + embed_dims=768, + conv_type='Conv2d', + patch_size=16, + stride=16, + padding='corner', + dilation=1, + bias=True, + norm_cfg=None, + input_size=None, + init_cfg=None): + super(PatchEmbed, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + if stride is None: + stride = patch_size + + kernel_size = to_2tuple(patch_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adaptive_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of conv + padding = 0 + else: + self.adaptive_padding = None + padding = to_2tuple(padding) + + self.projection = build_conv_layer( + dict(type=conv_type), + in_channels=in_channels, + out_channels=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = None + + if input_size: + input_size = to_2tuple(input_size) + # `init_out_size` would be used outside to + # calculate the num_patches + # e.g. when `use_abs_pos_embed` outside + self.init_input_size = input_size + if self.adaptive_padding: + pad_h, pad_w = self.adaptive_padding.get_pad_shape(input_size) + input_h, input_w = input_size + input_h = input_h + pad_h + input_w = input_w + pad_w + input_size = (input_h, input_w) + + # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html + h_out = (input_size[0] + 2 * padding[0] - dilation[0] * + (kernel_size[0] - 1) - 1) // stride[0] + 1 + w_out = (input_size[1] + 2 * padding[1] - dilation[1] * + (kernel_size[1] - 1) - 1) // stride[1] + 1 + self.init_out_size = (h_out, w_out) + else: + self.init_input_size = None + self.init_out_size = None + + def forward(self, x): + """ + Args: + x (Tensor): Has shape (B, C, H, W). In most case, C is 3. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, out_h * out_w, embed_dims) + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_h, out_w). + """ + + if self.adaptive_padding: + x = self.adaptive_padding(x) + + x = self.projection(x) + out_size = (x.shape[2], x.shape[3]) + x = x.flatten(2).transpose(1, 2) + if self.norm is not None: + x = self.norm(x) + return x, out_size + +@PATCH_EMBED.register_module() +class ConvPatchEmbed(BaseModule): + """Image to Patch Embedding. + + We use a conv layer to implement PatchEmbed. + + Args: + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_type (str): The type of convolution + to generate patch embedding. Default: "Conv2d". + kernel_size (int): The kernel_size of embedding conv. Default: 16. + stride (int): The slide stride of embedding conv. + Default: 16. + padding (int | tuple | string): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int): The dilation rate of embedding conv. Default: 1. + bias (bool): Bias of embed conv. Default: True. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + input_size (int | tuple | None): The size of input, which will be + used to calculate the out size. Only works when `dynamic_size` + is False. Default: None. + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, + in_channels=3, + embed_dims=768, + num_convs=0, + conv_type='Conv2d', + patch_size=16, + stride=16, + padding='corner', + dilation=1, + bias=True, + norm_cfg=None, + input_size=None, + init_cfg=None): + super(ConvPatchEmbed, self).__init__(init_cfg=init_cfg) + + assert patch_size % 2 == 0 + + self.embed_dims = embed_dims + if stride is None: + stride = patch_size // 2 + else: + stride = stride // 2 + + self.stem = torch.nn.Sequential( + nn.Conv2d(in_channels, 64, kernel_size=(7,7), stride=(2,2), padding=3, bias=False), + nn.BatchNorm2d(64), + nn.ReLU(True)) + + if num_convs > 0: + convs = [] + for _ in range(num_convs): + convs.append(torch.nn.Conv2d(64, 64, (3,3), (1,1), padding=1, bias=False)) + convs.append(torch.nn.BatchNorm2d(64)) + convs.append(torch.nn.ReLU(True)) + self.convs = torch.nn.Sequential(*convs) + else: + self.convs = None + + kernel_size = to_2tuple(patch_size//2) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adaptive_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of conv + padding = 0 + else: + self.adaptive_padding = None + padding = to_2tuple(padding) + + self.projection = build_conv_layer( + dict(type=conv_type), + in_channels=64, + out_channels=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = None + + if input_size: + input_size = to_2tuple(input_size) + # `init_out_size` would be used outside to + # calculate the num_patches + # e.g. when `use_abs_pos_embed` outside + self.init_input_size = input_size + _input_size = (input_size[0] // 2, input_size[1] // 2) + if self.adaptive_padding: + pad_h, pad_w = self.adaptive_padding.get_pad_shape(_input_size) + input_h, input_w = _input_size + input_h = input_h + pad_h + input_w = input_w + pad_w + _input_size = (input_h, input_w) + + # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html + h_out = (_input_size[0] + 2 * padding[0] - dilation[0] * + (kernel_size[0] - 1) - 1) // stride[0] + 1 + w_out = (_input_size[1] + 2 * padding[1] - dilation[1] * + (kernel_size[1] - 1) - 1) // stride[1] + 1 + self.init_out_size = (h_out, w_out) + else: + self.init_input_size = None + self.init_out_size = None + + def forward(self, x): + """ + Args: + x (Tensor): Has shape (B, C, H, W). In most case, C is 3. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, out_h * out_w, embed_dims) + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_h, out_w). + """ + x = self.stem(x) + if self.convs is not None: + x = self.convs(x) + + if self.adaptive_padding: + x = self.adaptive_padding(x) + + x = self.projection(x) + out_size = (x.shape[2], x.shape[3]) + x = x.flatten(2).transpose(1, 2) + if self.norm is not None: + x = self.norm(x) + return x, out_size \ No newline at end of file diff --git a/mmcls/gpvit_dev/models/necks/group_neck.py b/mmcls/gpvit_dev/models/necks/group_neck.py new file mode 100644 index 0000000..375f74c --- /dev/null +++ b/mmcls/gpvit_dev/models/necks/group_neck.py @@ -0,0 +1,52 @@ +""" +Author: Chenhongyi Yang +""" +import torch +import torch.nn as nn + +from mmcv.cnn import build_norm_layer + +from mmcls.gpvit_dev.models.utils.attentions import LightAttModule +from mmcls.models.builder import NECKS + + +@NECKS.register_module() +class GroupNeck(nn.Module): + def __init__(self, + embed_dims, + num_heads=6, + qkv_bias=False, + qk_scale=None, + norm_cfg=dict(type='LN')): + super().__init__() + + self.group_token = nn.Parameter(torch.zeros(1, 1, embed_dims)) + self.norm_query = build_norm_layer(norm_cfg, embed_dims)[1] + + self.attn = LightAttModule( + embed_dims, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=0., + proj_drop=0., + q_project=False, + k_project=True, + v_project=False, + proj_after_att=False) + + def forward(self, inputs): + # assume inputs are normalized + if isinstance(inputs, tuple): + assert len(inputs) == 1 + x = inputs[0] + if len(x.shape) == 4: + x = x.reshape(x.size(0), x.size(1), -1).permute(0, 2, 1).contiguous() + group_token = self.group_token.expand(x.size(0), -1, -1) + group_token = self.norm_query(group_token) + out = self.attn(query=group_token, key=x, value=x) + out = out.view(out.size(0), -1) + return (out,) + else: + raise TypeError('neck inputs should be tuple or torch.tensor') + diff --git a/mmcls/gpvit_dev/models/utils/attentions.py b/mmcls/gpvit_dev/models/utils/attentions.py new file mode 100644 index 0000000..c1febc4 --- /dev/null +++ b/mmcls/gpvit_dev/models/utils/attentions.py @@ -0,0 +1,599 @@ +""" +Author: Chenhongyi Yang +LePE attention References: https://github.com/microsoft/CSWin-Transformer +""" +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp + +from einops import rearrange + +from mmcv.cnn import build_norm_layer, build_conv_layer, build_activation_layer +from mmcv.cnn.bricks.transformer import FFN, AdaptivePadding, build_dropout +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner.base_module import BaseModule, ModuleList +from mmcv.cnn.bricks import DropPath + +def img2windows(img, H_sp, W_sp): + B, C, H, W = img.shape + img_reshape = img.view(B, C, H // H_sp, H_sp, W // W_sp, W_sp) + img_perm = img_reshape.permute(0, 2, 4, 3, 5, 1).contiguous().reshape(-1, H_sp * W_sp, C) + return img_perm + +def windows2img(img_splits_hw, H_sp, W_sp, H, W): + B = int(img_splits_hw.shape[0] / (H * W / H_sp / W_sp)) + img = img_splits_hw.view(B, H // H_sp, W // W_sp, H_sp, W_sp, -1) + img = img.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return img + +class LePEAttention(nn.Module): + def __init__(self, dim, mode, split_size=7, dim_out=None, num_heads=8, attn_drop=0., proj_drop=0., qk_scale=None): + super().__init__() + self.dim = dim + self.dim_out = dim_out or dim + self.split_size = split_size + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + assert mode in (0, 1) + self.mode = mode + self.get_v = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=1, groups=dim) + self.attn_drop = nn.Dropout(attn_drop) + + def im2cswin(self, x, hw_shape): + B, N, C = x.shape + H, W = hw_shape + x = x.transpose(-2, -1).contiguous().view(B, C, H, W) + if self.mode == 0: + H_sp, W_sp = H, self.split_size + else: + H_sp, W_sp = self.split_size, W + x = img2windows(x, H_sp, W_sp) + x = x.reshape(-1, H_sp * W_sp, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3).contiguous() + return x + + def get_lepe(self, x, hw_shape, func): + B, N, C = x.shape + H, W = hw_shape + x = x.transpose(-2, -1).contiguous().view(B, C, H, W) + if self.mode == 0: + H_sp, W_sp = H, self.split_size + else: + H_sp, W_sp = self.split_size, W + x = x.view(B, C, H // H_sp, H_sp, W // W_sp, W_sp) + x = x.permute(0, 2, 4, 1, 3, 5).contiguous().reshape(-1, C, H_sp, W_sp) ### B', C, H', W' + lepe = func(x) ### B', C, H', W' + lepe = lepe.reshape(-1, self.num_heads, C // self.num_heads, H_sp * W_sp).permute(0, 1, 3, 2).contiguous() + x = x.reshape(-1, self.num_heads, C // self.num_heads, H_sp * W_sp).permute(0, 1, 3, 2).contiguous() + return x, lepe + + def forward(self, qkv, hw_shape): + """ + x: B L C + """ + q, k, v = qkv[0], qkv[1], qkv[2] + ### Img2Window + H, W = hw_shape + B, L, C = q.shape + assert L == H * W, "flatten img_tokens has wrong size" + + q = self.im2cswin(q, hw_shape) + k = self.im2cswin(k, hw_shape) + v, lepe = self.get_lepe(v, hw_shape, self.get_v) + + if self.mode == 0: + H_sp, W_sp = H, self.split_size + else: + H_sp, W_sp = self.split_size, W + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) # B head N C @ B head C N --> B head N N + attn = nn.functional.softmax(attn, dim=-1, dtype=attn.dtype) + attn = self.attn_drop(attn) + + x = (attn @ v) + lepe + x = x.transpose(1, 2).reshape(-1, H_sp * W_sp, C) # B head N N @ B head N C + + ### Window2Img + x = windows2img(x, H_sp, W_sp, H, W).view(B, -1, C) # B H' W' C + + return x + +class LePEAttnSimpleDWBlock(BaseModule): + def __init__(self, + embed_dims, + num_heads, + window_size, # For convenience, we use window size to denote split size + ffn_ratio=4., + drop_rate=0., + drop_path=0., + attn_cfgs=dict(), + ffn_cfgs=dict(), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + + super().__init__(init_cfg) + self.with_cp = with_cp + self.dim = embed_dims + self.num_heads = num_heads + self.split_size = window_size + self.ffn_ratio = ffn_ratio + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=True) + + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + + self.branch_num = 2 + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(0.) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.attns = nn.ModuleList([ + LePEAttention( + embed_dims // 2, mode=i, + split_size=self.split_size, num_heads=num_heads // 2, dim_out=embed_dims // 2, + qk_scale=None, attn_drop=0., proj_drop=drop_rate) + for i in range(self.branch_num)]) + + _ffn_cfgs = { + 'embed_dims': embed_dims, + 'feedforward_channels': int(embed_dims * ffn_ratio), + 'num_fcs': 2, + 'ffn_drop': drop_rate, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'act_cfg': dict(type='GELU'), + **ffn_cfgs + } + self.ffn = FFN(**_ffn_cfgs) + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + + self.dw = nn.Conv2d(embed_dims, embed_dims, kernel_size=(3, 3), padding=(1, 1), bias=False, groups=embed_dims) + + def forward(self, x, hw_shape): + """ + x: B, H*W, C + """ + def _inner_forward(x, hw_shape): + H, W = hw_shape + B, L, C = x.shape + assert L == H * W, "flatten img_tokens has wrong size" + img = self.norm1(x) + qkv = self.qkv(img).reshape(B, -1, 3, C).permute(2, 0, 1, 3).contiguous() + + x1 = self.attns[0](qkv[:, :, :, :C // 2], hw_shape) + x2 = self.attns[1](qkv[:, :, :, C // 2:], hw_shape) + attened_x = torch.cat([x1, x2], dim=2) + attened_x = self.proj(attened_x) + x = x + self.drop_path(attened_x) + + identity = x + x = self.norm2(x) + x = self.ffn(x, identity=identity) + + B, L, C = x.shape + x = x.permute(0, 2, 1).contiguous().reshape(B, C, hw_shape[0], hw_shape[1]) + x = self.dw(x) + x = x.reshape(B, C, L).permute(0, 2, 1).contiguous() + return x + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x, hw_shape) + else: + x = _inner_forward(x, hw_shape) + return x + +class MLPMixerLayer(nn.Module): + def __init__(self, + num_patches, + embed_dims, + patch_expansion, + channel_expansion, + drop_path, + drop_out, + **kwargs): + + super(MLPMixerLayer, self).__init__() + + patch_mix_dims = int(patch_expansion * embed_dims) + channel_mix_dims = int(channel_expansion * embed_dims) + + self.patch_mixer = nn.Sequential( + nn.Linear(num_patches, patch_mix_dims), + nn.GELU(), + nn.Dropout(drop_out), + nn.Linear(patch_mix_dims, num_patches), + nn.Dropout(drop_out) + ) + + self.channel_mixer = nn.Sequential( + nn.Linear(embed_dims, channel_mix_dims), + nn.GELU(), + nn.Dropout(drop_out), + nn.Linear(channel_mix_dims, embed_dims), + nn.Dropout(drop_out) + ) + + self.drop_path1 = build_dropout(dict(type='DropPath', drop_prob=drop_path)) + self.drop_path2 = build_dropout(dict(type='DropPath', drop_prob=drop_path)) + + self.norm1 = nn.LayerNorm(embed_dims) + self.norm2 = nn.LayerNorm(embed_dims) + + def forward(self, x): + x = x + self.drop_path1(self.patch_mixer(self.norm1(x).transpose(1,2)).transpose(1,2)) + x = x + self.drop_path2(self.channel_mixer(self.norm2(x))) + return x + +class MLPMixer(BaseModule): + def __init__(self, + num_patches, + embed_dims, + patch_expansion=0.5, + channel_expansion=4.0, + depth=1, + drop_path=0., + drop_out=0., + init_cfg=None, + **kwargs): + super(MLPMixer, self).__init__(init_cfg) + layers = [ + MLPMixerLayer(num_patches, embed_dims, patch_expansion, channel_expansion, drop_path, drop_out) + for _ in range(depth) + ] + self.layers = nn.Sequential(*layers) + + def forward(self, x): + return self.layers(x) + +class LightAttModule(nn.Module): + def __init__(self, + dim, + num_heads, + out_dim=None, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + q_project=True, + k_project=True, + v_project=True, + proj_after_att=True): + super().__init__() + if out_dim is None: + out_dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.q_proj = nn.Linear(dim, dim, bias=qkv_bias) if q_project else None + self.k_proj = nn.Linear(dim, dim, bias=qkv_bias) if k_project else None + self.v_proj = nn.Linear(dim, dim, bias=qkv_bias) if v_project else None + + self.attn_drop = nn.Dropout(attn_drop) + + if proj_after_att: + self.proj = nn.Sequential(nn.Linear(dim, out_dim), nn.Dropout(proj_drop)) + else: + self.proj = None + + def forward(self, query, key, value, att_bias=None): + bq, nq, cq = query.shape + bk, nk, ck = key.shape + bv, nv, cv = value.shape + + # [bq, nh, nq, cq//nh] + if self.q_proj: + q = rearrange(self.q_proj(query), 'b n (h c)-> b h n c', h=self.num_heads, b=bq, n=nq, c=cq // self.num_heads) + else: + q = rearrange(query, 'b n (h c)-> b h n c', h=self.num_heads, b=bq, n=nq, c=cq // self.num_heads) + # [bk, nh, nk, ck//nh] + if self.k_proj: + k = rearrange(self.k_proj(key), 'b n (h c)-> b h n c', h=self.num_heads, b=bk, n=nk, c=ck // self.num_heads) + else: + k = rearrange(key, 'b n (h c)-> b h n c', h=self.num_heads, b=bk, n=nk, c=ck // self.num_heads) + # [bv, nh, nv, cv//nh] + if self.v_proj: + v = rearrange(self.v_proj(value), 'b n (h c)-> b h n c', h=self.num_heads, b=bv, n=nv, c=cv // self.num_heads) + else: + v = rearrange(value, 'b n (h c)-> b h n c', h=self.num_heads, b=bv, n=nv, c=cv // self.num_heads) + + # [B, nh, N, S] + attn = (q @ k.transpose(-2, -1)) * self.scale + if att_bias is not None: + attn = attn + att_bias.unsqueeze(dim=1) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + assert attn.shape == (bq, self.num_heads, nq, nk) + + # [B, nh, N, C//nh] -> [B, N, C] + # out = (attn @ v).transpose(1, 2).reshape(B, N, C) + out = rearrange(attn @ v, 'b h n c -> b n (h c)', h=self.num_heads, b=bq, n=nq, c=cv // self.num_heads) + if self.proj: + out = self.proj(out) + return out + +class FullAttnModule(nn.Module): + def __init__(self, + dim, + num_heads, + out_dim=None, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + q_project=True): + super().__init__() + if out_dim is None: + out_dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.q_proj = nn.Linear(dim, dim, bias=qkv_bias) if q_project else None + self.k_proj = nn.Linear(dim, dim, bias=qkv_bias) + self.v_proj = nn.Linear(dim, dim, bias=qkv_bias) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, out_dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, query, key, value, att_bias=None): + bq, nq, cq = query.shape + bk, nk, ck = key.shape + bv, nv, cv = value.shape + + # [bq, nh, nq, cq//nh] + if self.q_proj: + q = rearrange(self.q_proj(query), 'b n (h c)-> b h n c', h=self.num_heads, b=bq, n=nq, c=cq // self.num_heads) + else: + q = rearrange(query, 'b n (h c)-> b h n c', h=self.num_heads, b=bq, n=nq, c=cq // self.num_heads) + # [bk, nh, nk, ck//nh] + k = rearrange(self.k_proj(key), 'b n (h c)-> b h n c', h=self.num_heads, b=bk, n=nk, c=ck // self.num_heads) + # [bv, nh, nv, cv//nh] + v = rearrange(self.v_proj(value), 'b n (h c)-> b h n c', h=self.num_heads, b=bv, n=nv, c=cv // self.num_heads) + + # [B, nh, N, S] + attn = (q @ k.transpose(-2, -1)) * self.scale + if att_bias is not None: + attn = attn + att_bias.unsqueeze(dim=1) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + assert attn.shape == (bq, self.num_heads, nq, nk) + + # [B, nh, N, C//nh] -> [B, N, C] + # out = (attn @ v).transpose(1, 2).reshape(B, N, C) + out = rearrange(attn @ v, 'b h n c -> b n (h c)', h=self.num_heads, b=bq, n=nq, c=cv // self.num_heads) + out = self.proj(out) + out = self.proj_drop(out) + return out + +class FullAttnCatBlock(nn.Module): + def __init__(self, + embed_dims, + num_heads, + ffn_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + key_is_query=False, + value_is_key=False, + q_project=True, + with_cp=False, + **kwargs): + super().__init__() + self.with_cp = with_cp + + self.norm_query = build_norm_layer(norm_cfg, embed_dims)[1] + + if not key_is_query: + self.norm_key = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm_key = None + self.key_is_query = key_is_query + + if not value_is_key: + self.norm_value = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm_value = None + self.value_is_key = value_is_key + + self.attn = FullAttnModule( + embed_dims, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + q_project=q_project) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + _ffn_cfgs = { + 'embed_dims': embed_dims, + 'feedforward_channels': int(embed_dims * ffn_ratio), + 'num_fcs': 2, + 'ffn_drop': drop, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'act_cfg': act_cfg, + } + self.ffn = FFN(**_ffn_cfgs) + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + + self.proj = nn.Linear(embed_dims * 2, embed_dims, bias=True) + + def forward(self, query, key, value, att_bias=None): + def _inner_forward(query, key, value, att_bias): + q = self.norm_query(query) + k = q if self.key_is_query else self.norm_key(key) + v = k if self.value_is_key else self.norm_value(value) + + x = torch.cat((query, self.drop_path(self.attn(q, k, v, att_bias=att_bias))), dim=-1) + x = self.proj(x) + x = self.ffn(self.norm2(x), identity=x) + return x + + if self.with_cp: + return cp.checkpoint(_inner_forward, query, key, value, att_bias) + else: + return _inner_forward(query, key, value, att_bias) + +class LightGroupAttnBlock(nn.Module): + def __init__(self, + embed_dims, + num_heads, + ffn_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + key_is_query=False, + value_is_key=False, + with_cp=False): + super().__init__() + + self.with_cp = with_cp + + self.norm_query = build_norm_layer(norm_cfg, embed_dims)[1] + + if not key_is_query: + self.norm_key = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm_key = None + self.key_is_query = key_is_query + + if not value_is_key: + self.norm_value = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm_value = None + self.value_is_key = value_is_key + + self.attn = LightAttModule( + embed_dims, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + q_project=False, + k_project=True, + v_project=False, + proj_after_att=False) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, query, key, value, att_bias=None): + def _inner_forward(query, key, value, att_bias): + q = self.norm_query(query) + k = q if self.key_is_query else self.norm_key(key) + v = k if self.value_is_key else self.norm_value(value) + x = self.drop_path(self.attn(q, k, v, att_bias=att_bias)) + return x + + if self.with_cp: + return cp.checkpoint(_inner_forward, query, key, value, att_bias) + else: + return _inner_forward(query, key, value, att_bias) + +class GPBlock(nn.Module): + def __init__(self, + embed_dims, + depth, + num_group_heads, + num_ungroup_heads, + num_group_token, + ffn_ratio=4., + qkv_bias=True, + group_qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + with_cp=False, + group_att_cfg=dict(), + fwd_att_cfg=dict(), + ungroup_att_cfg=dict(), + **kwargs): + + super().__init__() + + self.embed_dims = embed_dims + self.num_group_token = num_group_token + self.with_cp = with_cp + + self.group_token = nn.Parameter(torch.zeros(1, num_group_token, embed_dims)) + trunc_normal_(self.group_token, std=.02) + + _group_att_cfg = dict( + embed_dims=embed_dims, + num_heads=num_group_heads, + ffn_ratio=ffn_ratio, + qkv_bias=qkv_bias, + qk_scale=group_qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=0., + key_is_query=False, + value_is_key=True, + with_cp=with_cp) + _group_att_cfg.update(group_att_cfg) + self.group_layer = LightGroupAttnBlock(**_group_att_cfg) + + _mixer_cfg = dict( + num_patches=num_group_token, + embed_dims=embed_dims, + patch_expansion=0.5, + channel_expansion=4.0, + depth=depth, + drop_path=drop_path) + _mixer_cfg.update(fwd_att_cfg) + self.mixer = MLPMixer(**_mixer_cfg) + + _ungroup_att_cfg = dict( + embed_dims=embed_dims, + num_heads=num_ungroup_heads, + ffn_ratio=ffn_ratio, + qkv_bias=qkv_bias, + qk_scale=None, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path, + key_is_query=False, + value_is_key=True, + with_cp=with_cp) + _ungroup_att_cfg.update(ungroup_att_cfg) + self.un_group_layer = FullAttnCatBlock(**_ungroup_att_cfg) + + self.dwconv = torch.nn.Sequential( + nn.Conv2d(embed_dims, embed_dims, kernel_size=(3,3), padding=(1,1), bias=False, groups=embed_dims), + nn.BatchNorm2d(num_features=embed_dims), + nn.ReLU(True)) + + def forward(self, x, hw_shape): + """ + Args: + x: image tokens, shape [B, L, C] + hw_shape: tuple or list (H, W) + Returns: + proj_tokens: shape [B, L, C] + """ + B, L, C = x.size() + group_token = self.group_token.expand(x.size(0), -1, -1) + gt = group_token + + gt = self.group_layer(query=gt, key=x, value=x) + gt = self.mixer(gt) + ungroup_tokens = self.un_group_layer(query=x, key=gt, value=gt) + ungroup_tokens = ungroup_tokens.permute(0,2,1).contiguous().reshape(B, C, hw_shape[0], hw_shape[1]) + proj_tokens = self.dwconv(ungroup_tokens).view(B, C, -1).permute(0,2,1).contiguous().view(B, L, C) + return proj_tokens diff --git a/mmcls/models/__init__.py b/mmcls/models/__init__.py new file mode 100644 index 0000000..b501833 --- /dev/null +++ b/mmcls/models/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .backbones import * # noqa: F401,F403 +from .builder import (BACKBONES, CLASSIFIERS, HEADS, LOSSES, NECKS, + build_backbone, build_classifier, build_head, build_loss, + build_neck) +from .classifiers import * # noqa: F401,F403 +from .heads import * # noqa: F401,F403 +from .losses import * # noqa: F401,F403 +from .necks import * # noqa: F401,F403 + +__all__ = [ + 'BACKBONES', 'HEADS', 'NECKS', 'LOSSES', 'CLASSIFIERS', 'build_backbone', + 'build_head', 'build_neck', 'build_loss', 'build_classifier' +] diff --git a/mmcls/models/backbones/__init__.py b/mmcls/models/backbones/__init__.py new file mode 100644 index 0000000..f39118e --- /dev/null +++ b/mmcls/models/backbones/__init__.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .alexnet import AlexNet +from .conformer import Conformer +from .convmixer import ConvMixer +from .convnext import ConvNeXt +from .cspnet import CSPDarkNet, CSPNet, CSPResNet, CSPResNeXt +from .deit import DistilledVisionTransformer +from .densenet import DenseNet +from .efficientformer import EfficientFormer +from .efficientnet import EfficientNet +from .hornet import HorNet +from .hrnet import HRNet +from .lenet import LeNet5 +from .mlp_mixer import MlpMixer +from .mobilenet_v2 import MobileNetV2 +from .mobilenet_v3 import MobileNetV3 +from .mvit import MViT +from .poolformer import PoolFormer +from .regnet import RegNet +from .repmlp import RepMLPNet +from .repvgg import RepVGG +from .res2net import Res2Net +from .resnest import ResNeSt +from .resnet import ResNet, ResNetV1c, ResNetV1d +from .resnet_cifar import ResNet_CIFAR +from .resnext import ResNeXt +from .seresnet import SEResNet +from .seresnext import SEResNeXt +from .shufflenet_v1 import ShuffleNetV1 +from .shufflenet_v2 import ShuffleNetV2 +from .swin_transformer import SwinTransformer +from .swin_transformer_v2 import SwinTransformerV2 +from .t2t_vit import T2T_ViT +from .timm_backbone import TIMMBackbone +from .tnt import TNT +from .twins import PCPVT, SVT +from .van import VAN +from .vgg import VGG +from .vision_transformer import VisionTransformer + +from ...gpvit_dev.models.backbones.gpvit import GPViT + +__all__ = [ + 'LeNet5', 'AlexNet', 'VGG', 'RegNet', 'ResNet', 'ResNeXt', 'ResNetV1d', + 'ResNeSt', 'ResNet_CIFAR', 'SEResNet', 'SEResNeXt', 'ShuffleNetV1', + 'ShuffleNetV2', 'MobileNetV2', 'MobileNetV3', 'VisionTransformer', + 'SwinTransformer', 'SwinTransformerV2', 'TNT', 'TIMMBackbone', 'T2T_ViT', + 'Res2Net', 'RepVGG', 'Conformer', 'MlpMixer', 'DistilledVisionTransformer', + 'PCPVT', 'SVT', 'EfficientNet', 'ConvNeXt', 'HRNet', 'ResNetV1c', + 'ConvMixer', 'CSPDarkNet', 'CSPResNet', 'CSPResNeXt', 'CSPNet', + 'RepMLPNet', 'PoolFormer', 'DenseNet', 'VAN', 'MViT', 'EfficientFormer', + 'HorNet', 'GPViT' +] diff --git a/mmcls/models/backbones/alexnet.py b/mmcls/models/backbones/alexnet.py new file mode 100644 index 0000000..1b74dc7 --- /dev/null +++ b/mmcls/models/backbones/alexnet.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +@BACKBONES.register_module() +class AlexNet(BaseBackbone): + """`AlexNet `_ backbone. + + The input for AlexNet is a 224x224 RGB image. + + Args: + num_classes (int): number of classes for classification. + The default value is -1, which uses the backbone as + a feature extractor without the top classifier. + """ + + def __init__(self, num_classes=-1): + super(AlexNet, self).__init__() + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(64, 192, kernel_size=5, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(192, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + ) + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Dropout(), + nn.Linear(256 * 6 * 6, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Linear(4096, num_classes), + ) + + def forward(self, x): + + x = self.features(x) + if self.num_classes > 0: + x = x.view(x.size(0), 256 * 6 * 6) + x = self.classifier(x) + + return (x, ) diff --git a/mmcls/models/backbones/base_backbone.py b/mmcls/models/backbones/base_backbone.py new file mode 100644 index 0000000..c1050fa --- /dev/null +++ b/mmcls/models/backbones/base_backbone.py @@ -0,0 +1,33 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +from mmcv.runner import BaseModule + + +class BaseBackbone(BaseModule, metaclass=ABCMeta): + """Base backbone. + + This class defines the basic functions of a backbone. Any backbone that + inherits this class should at least define its own `forward` function. + """ + + def __init__(self, init_cfg=None): + super(BaseBackbone, self).__init__(init_cfg) + + @abstractmethod + def forward(self, x): + """Forward computation. + + Args: + x (tensor | tuple[tensor]): x could be a Torch.tensor or a tuple of + Torch.tensor, containing input data for forward computation. + """ + pass + + def train(self, mode=True): + """Set module status before forward computation. + + Args: + mode (bool): Whether it is train_mode or test_mode + """ + super(BaseBackbone, self).train(mode) diff --git a/mmcls/models/backbones/conformer.py b/mmcls/models/backbones/conformer.py new file mode 100644 index 0000000..e70c62d --- /dev/null +++ b/mmcls/models/backbones/conformer.py @@ -0,0 +1,626 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_activation_layer, build_norm_layer +from mmcv.cnn.bricks.drop import DropPath +from mmcv.cnn.bricks.transformer import AdaptivePadding +from mmcv.cnn.utils.weight_init import trunc_normal_ + +from mmcls.utils import get_root_logger +from ..builder import BACKBONES +from .base_backbone import BaseBackbone, BaseModule +from .vision_transformer import TransformerEncoderLayer + + +class ConvBlock(BaseModule): + """Basic convluation block used in Conformer. + + This block includes three convluation modules, and supports three new + functions: + 1. Returns the output of both the final layers and the second convluation + module. + 2. Fuses the input of the second convluation module with an extra input + feature map. + 3. Supports to add an extra convluation module to the identity connection. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + stride (int): The stride of the second convluation module. + Defaults to 1. + groups (int): The groups of the second convluation module. + Defaults to 1. + drop_path_rate (float): The rate of the DropPath layer. Defaults to 0. + with_residual_conv (bool): Whether to add an extra convluation module + to the identity connection. Defaults to False. + norm_cfg (dict): The config of normalization layers. + Defaults to ``dict(type='BN', eps=1e-6)``. + act_cfg (dict): The config of activative functions. + Defaults to ``dict(type='ReLU', inplace=True))``. + init_cfg (dict, optional): The extra config to initialize the module. + Defaults to None. + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + groups=1, + drop_path_rate=0., + with_residual_conv=False, + norm_cfg=dict(type='BN', eps=1e-6), + act_cfg=dict(type='ReLU', inplace=True), + init_cfg=None): + super(ConvBlock, self).__init__(init_cfg=init_cfg) + + expansion = 4 + mid_channels = out_channels // expansion + + self.conv1 = nn.Conv2d( + in_channels, + mid_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False) + self.bn1 = build_norm_layer(norm_cfg, mid_channels)[1] + self.act1 = build_activation_layer(act_cfg) + + self.conv2 = nn.Conv2d( + mid_channels, + mid_channels, + kernel_size=3, + stride=stride, + groups=groups, + padding=1, + bias=False) + self.bn2 = build_norm_layer(norm_cfg, mid_channels)[1] + self.act2 = build_activation_layer(act_cfg) + + self.conv3 = nn.Conv2d( + mid_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False) + self.bn3 = build_norm_layer(norm_cfg, out_channels)[1] + self.act3 = build_activation_layer(act_cfg) + + if with_residual_conv: + self.residual_conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + padding=0, + bias=False) + self.residual_bn = build_norm_layer(norm_cfg, out_channels)[1] + + self.with_residual_conv = with_residual_conv + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x, fusion_features=None, out_conv2=True): + identity = x + + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) if fusion_features is None else self.conv2( + x + fusion_features) + x = self.bn2(x) + x2 = self.act2(x) + + x = self.conv3(x2) + x = self.bn3(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.with_residual_conv: + identity = self.residual_conv(identity) + identity = self.residual_bn(identity) + + x += identity + x = self.act3(x) + + if out_conv2: + return x, x2 + else: + return x + + +class FCUDown(BaseModule): + """CNN feature maps -> Transformer patch embeddings.""" + + def __init__(self, + in_channels, + out_channels, + down_stride, + with_cls_token=True, + norm_cfg=dict(type='LN', eps=1e-6), + act_cfg=dict(type='GELU'), + init_cfg=None): + super(FCUDown, self).__init__(init_cfg=init_cfg) + self.down_stride = down_stride + self.with_cls_token = with_cls_token + + self.conv_project = nn.Conv2d( + in_channels, out_channels, kernel_size=1, stride=1, padding=0) + self.sample_pooling = nn.AvgPool2d( + kernel_size=down_stride, stride=down_stride) + + self.ln = build_norm_layer(norm_cfg, out_channels)[1] + self.act = build_activation_layer(act_cfg) + + def forward(self, x, x_t): + x = self.conv_project(x) # [N, C, H, W] + + x = self.sample_pooling(x).flatten(2).transpose(1, 2) + x = self.ln(x) + x = self.act(x) + + if self.with_cls_token: + x = torch.cat([x_t[:, 0][:, None, :], x], dim=1) + + return x + + +class FCUUp(BaseModule): + """Transformer patch embeddings -> CNN feature maps.""" + + def __init__(self, + in_channels, + out_channels, + up_stride, + with_cls_token=True, + norm_cfg=dict(type='BN', eps=1e-6), + act_cfg=dict(type='ReLU', inplace=True), + init_cfg=None): + super(FCUUp, self).__init__(init_cfg=init_cfg) + + self.up_stride = up_stride + self.with_cls_token = with_cls_token + + self.conv_project = nn.Conv2d( + in_channels, out_channels, kernel_size=1, stride=1, padding=0) + self.bn = build_norm_layer(norm_cfg, out_channels)[1] + self.act = build_activation_layer(act_cfg) + + def forward(self, x, H, W): + B, _, C = x.shape + # [N, 197, 384] -> [N, 196, 384] -> [N, 384, 196] -> [N, 384, 14, 14] + if self.with_cls_token: + x_r = x[:, 1:].transpose(1, 2).reshape(B, C, H, W) + else: + x_r = x.transpose(1, 2).reshape(B, C, H, W) + + x_r = self.act(self.bn(self.conv_project(x_r))) + + return F.interpolate( + x_r, size=(H * self.up_stride, W * self.up_stride)) + + +class ConvTransBlock(BaseModule): + """Basic module for Conformer. + + This module is a fusion of CNN block transformer encoder block. + + Args: + in_channels (int): The number of input channels in conv blocks. + out_channels (int): The number of output channels in conv blocks. + embed_dims (int): The embedding dimension in transformer blocks. + conv_stride (int): The stride of conv2d layers. Defaults to 1. + groups (int): The groups of conv blocks. Defaults to 1. + with_residual_conv (bool): Whether to add a conv-bn layer to the + identity connect in the conv block. Defaults to False. + down_stride (int): The stride of the downsample pooling layer. + Defaults to 4. + num_heads (int): The number of heads in transformer attention layers. + Defaults to 12. + mlp_ratio (float): The expansion ratio in transformer FFN module. + Defaults to 4. + qkv_bias (bool): Enable bias for qkv if True. Defaults to False. + with_cls_token (bool): Whether use class token or not. + Defaults to True. + drop_rate (float): The dropout rate of the output projection and + FFN in the transformer block. Defaults to 0. + attn_drop_rate (float): The dropout rate after the attention + calculation in the transformer block. Defaults to 0. + drop_path_rate (bloat): The drop path rate in both the conv block + and the transformer block. Defaults to 0. + last_fusion (bool): Whether this block is the last stage. If so, + downsample the fusion feature map. + init_cfg (dict, optional): The extra config to initialize the module. + Defaults to None. + """ + + def __init__(self, + in_channels, + out_channels, + embed_dims, + conv_stride=1, + groups=1, + with_residual_conv=False, + down_stride=4, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + with_cls_token=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + last_fusion=False, + init_cfg=None): + super(ConvTransBlock, self).__init__(init_cfg=init_cfg) + expansion = 4 + self.cnn_block = ConvBlock( + in_channels=in_channels, + out_channels=out_channels, + with_residual_conv=with_residual_conv, + stride=conv_stride, + groups=groups) + + if last_fusion: + self.fusion_block = ConvBlock( + in_channels=out_channels, + out_channels=out_channels, + stride=2, + with_residual_conv=True, + groups=groups, + drop_path_rate=drop_path_rate) + else: + self.fusion_block = ConvBlock( + in_channels=out_channels, + out_channels=out_channels, + groups=groups, + drop_path_rate=drop_path_rate) + + self.squeeze_block = FCUDown( + in_channels=out_channels // expansion, + out_channels=embed_dims, + down_stride=down_stride, + with_cls_token=with_cls_token) + + self.expand_block = FCUUp( + in_channels=embed_dims, + out_channels=out_channels // expansion, + up_stride=down_stride, + with_cls_token=with_cls_token) + + self.trans_block = TransformerEncoderLayer( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=int(embed_dims * mlp_ratio), + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + attn_drop_rate=attn_drop_rate, + qkv_bias=qkv_bias, + norm_cfg=dict(type='LN', eps=1e-6)) + + self.down_stride = down_stride + self.embed_dim = embed_dims + self.last_fusion = last_fusion + + def forward(self, cnn_input, trans_input): + x, x_conv2 = self.cnn_block(cnn_input, out_conv2=True) + + _, _, H, W = x_conv2.shape + + # Convert the feature map of conv2 to transformer embedding + # and concat with class token. + conv2_embedding = self.squeeze_block(x_conv2, trans_input) + + trans_output = self.trans_block(conv2_embedding + trans_input) + + # Convert the transformer output embedding to feature map + trans_features = self.expand_block(trans_output, H // self.down_stride, + W // self.down_stride) + x = self.fusion_block( + x, fusion_features=trans_features, out_conv2=False) + + return x, trans_output + + +@BACKBONES.register_module() +class Conformer(BaseBackbone): + """Conformer backbone. + + A PyTorch implementation of : `Conformer: Local Features Coupling Global + Representations for Visual Recognition `_ + + Args: + arch (str | dict): Conformer architecture. Defaults to 'tiny'. + patch_size (int): The patch size. Defaults to 16. + base_channels (int): The base number of channels in CNN network. + Defaults to 64. + mlp_ratio (float): The expansion ratio of FFN network in transformer + block. Defaults to 4. + with_cls_token (bool): Whether use class token or not. + Defaults to True. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys(['t', 'tiny'], + {'embed_dims': 384, + 'channel_ratio': 1, + 'num_heads': 6, + 'depths': 12 + }), + **dict.fromkeys(['s', 'small'], + {'embed_dims': 384, + 'channel_ratio': 4, + 'num_heads': 6, + 'depths': 12 + }), + **dict.fromkeys(['b', 'base'], + {'embed_dims': 576, + 'channel_ratio': 6, + 'num_heads': 9, + 'depths': 12 + }), + } # yapf: disable + + _version = 1 + + def __init__(self, + arch='tiny', + patch_size=16, + base_channels=64, + mlp_ratio=4., + qkv_bias=True, + with_cls_token=True, + drop_path_rate=0., + norm_eval=True, + frozen_stages=0, + out_indices=-1, + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'depths', 'num_heads', 'channel_ratio' + } + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.num_features = self.embed_dims = self.arch_settings['embed_dims'] + self.depths = self.arch_settings['depths'] + self.num_heads = self.arch_settings['num_heads'] + self.channel_ratio = self.arch_settings['channel_ratio'] + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.depths + index + 1 + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + + self.norm_eval = norm_eval + self.frozen_stages = frozen_stages + + self.with_cls_token = with_cls_token + if self.with_cls_token: + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + + # stochastic depth decay rule + self.trans_dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, self.depths) + ] + + # Stem stage: get the feature maps by conv block + self.conv1 = nn.Conv2d( + 3, 64, kernel_size=7, stride=2, padding=3, + bias=False) # 1 / 2 [112, 112] + self.bn1 = nn.BatchNorm2d(64) + self.act1 = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d( + kernel_size=3, stride=2, padding=1) # 1 / 4 [56, 56] + + assert patch_size % 16 == 0, 'The patch size of Conformer must ' \ + 'be divisible by 16.' + trans_down_stride = patch_size // 4 + + # To solve the issue #680 + # Auto pad the feature map to be divisible by trans_down_stride + self.auto_pad = AdaptivePadding(trans_down_stride, trans_down_stride) + + # 1 stage + stage1_channels = int(base_channels * self.channel_ratio) + self.conv_1 = ConvBlock( + in_channels=64, + out_channels=stage1_channels, + with_residual_conv=True, + stride=1) + self.trans_patch_conv = nn.Conv2d( + 64, + self.embed_dims, + kernel_size=trans_down_stride, + stride=trans_down_stride, + padding=0) + + self.trans_1 = TransformerEncoderLayer( + embed_dims=self.embed_dims, + num_heads=self.num_heads, + feedforward_channels=int(self.embed_dims * mlp_ratio), + drop_path_rate=self.trans_dpr[0], + qkv_bias=qkv_bias, + norm_cfg=dict(type='LN', eps=1e-6)) + + # 2~4 stage + init_stage = 2 + fin_stage = self.depths // 3 + 1 + for i in range(init_stage, fin_stage): + self.add_module( + f'conv_trans_{i}', + ConvTransBlock( + in_channels=stage1_channels, + out_channels=stage1_channels, + embed_dims=self.embed_dims, + conv_stride=1, + with_residual_conv=False, + down_stride=trans_down_stride, + num_heads=self.num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path_rate=self.trans_dpr[i - 1], + with_cls_token=self.with_cls_token)) + + stage2_channels = int(base_channels * self.channel_ratio * 2) + # 5~8 stage + init_stage = fin_stage # 5 + fin_stage = fin_stage + self.depths // 3 # 9 + for i in range(init_stage, fin_stage): + if i == init_stage: + conv_stride = 2 + in_channels = stage1_channels + else: + conv_stride = 1 + in_channels = stage2_channels + + with_residual_conv = True if i == init_stage else False + self.add_module( + f'conv_trans_{i}', + ConvTransBlock( + in_channels=in_channels, + out_channels=stage2_channels, + embed_dims=self.embed_dims, + conv_stride=conv_stride, + with_residual_conv=with_residual_conv, + down_stride=trans_down_stride // 2, + num_heads=self.num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path_rate=self.trans_dpr[i - 1], + with_cls_token=self.with_cls_token)) + + stage3_channels = int(base_channels * self.channel_ratio * 2 * 2) + # 9~12 stage + init_stage = fin_stage # 9 + fin_stage = fin_stage + self.depths // 3 # 13 + for i in range(init_stage, fin_stage): + if i == init_stage: + conv_stride = 2 + in_channels = stage2_channels + with_residual_conv = True + else: + conv_stride = 1 + in_channels = stage3_channels + with_residual_conv = False + + last_fusion = (i == self.depths) + + self.add_module( + f'conv_trans_{i}', + ConvTransBlock( + in_channels=in_channels, + out_channels=stage3_channels, + embed_dims=self.embed_dims, + conv_stride=conv_stride, + with_residual_conv=with_residual_conv, + down_stride=trans_down_stride // 4, + num_heads=self.num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path_rate=self.trans_dpr[i - 1], + with_cls_token=self.with_cls_token, + last_fusion=last_fusion)) + self.fin_stage = fin_stage + + self.pooling = nn.AdaptiveAvgPool2d(1) + self.trans_norm = nn.LayerNorm(self.embed_dims) + + if self.with_cls_token: + trunc_normal_(self.cls_token, std=.02) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def init_weights(self): + super(Conformer, self).init_weights() + logger = get_root_logger() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + else: + logger.info(f'No pre-trained weights for ' + f'{self.__class__.__name__}, ' + f'training start from scratch') + self.apply(self._init_weights) + + def forward(self, x): + output = [] + B = x.shape[0] + if self.with_cls_token: + cls_tokens = self.cls_token.expand(B, -1, -1) + + # stem + x_base = self.maxpool(self.act1(self.bn1(self.conv1(x)))) + x_base = self.auto_pad(x_base) + + # 1 stage [N, 64, 56, 56] -> [N, 128, 56, 56] + x = self.conv_1(x_base, out_conv2=False) + x_t = self.trans_patch_conv(x_base).flatten(2).transpose(1, 2) + if self.with_cls_token: + x_t = torch.cat([cls_tokens, x_t], dim=1) + x_t = self.trans_1(x_t) + + # 2 ~ final + for i in range(2, self.fin_stage): + stage = getattr(self, f'conv_trans_{i}') + x, x_t = stage(x, x_t) + if i in self.out_indices: + if self.with_cls_token: + output.append([ + self.pooling(x).flatten(1), + self.trans_norm(x_t)[:, 0] + ]) + else: + # if no class token, use the mean patch token + # as the transformer feature. + output.append([ + self.pooling(x).flatten(1), + self.trans_norm(x_t).mean(dim=1) + ]) + + return tuple(output) diff --git a/mmcls/models/backbones/convmixer.py b/mmcls/models/backbones/convmixer.py new file mode 100644 index 0000000..cb33fbf --- /dev/null +++ b/mmcls/models/backbones/convmixer.py @@ -0,0 +1,176 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import torch +import torch.nn as nn +from mmcv.cnn.bricks import (Conv2dAdaptivePadding, build_activation_layer, + build_norm_layer) +from mmcv.utils import digit_version + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +class Residual(nn.Module): + + def __init__(self, fn): + super().__init__() + self.fn = fn + + def forward(self, x): + return self.fn(x) + x + + +@BACKBONES.register_module() +class ConvMixer(BaseBackbone): + """ConvMixer. . + + A PyTorch implementation of : `Patches Are All You Need? + `_ + + Modified from the `official repo + `_ + and `timm + `_. + + Args: + arch (str | dict): The model's architecture. If string, it should be + one of architecture in ``ConvMixer.arch_settings``. And if dict, it + should include the following two keys: + + - embed_dims (int): The dimensions of patch embedding. + - depth (int): Number of repetitions of ConvMixer Layer. + - patch_size (int): The patch size. + - kernel_size (int): The kernel size of depthwise conv layers. + + Defaults to '768/32'. + in_channels (int): Number of input image channels. Defaults to 3. + patch_size (int): The size of one patch in the patch embed layer. + Defaults to 7. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='BN')``. + act_cfg (dict): The config dict for activation after each convolution. + Defaults to ``dict(type='GELU')``. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + init_cfg (dict, optional): Initialization config dict. + """ + arch_settings = { + '768/32': { + 'embed_dims': 768, + 'depth': 32, + 'patch_size': 7, + 'kernel_size': 7 + }, + '1024/20': { + 'embed_dims': 1024, + 'depth': 20, + 'patch_size': 14, + 'kernel_size': 9 + }, + '1536/20': { + 'embed_dims': 1536, + 'depth': 20, + 'patch_size': 7, + 'kernel_size': 9 + }, + } + + def __init__(self, + arch='768/32', + in_channels=3, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='GELU'), + out_indices=-1, + frozen_stages=0, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'Unavailable arch, please choose from ' \ + f'({set(self.arch_settings)}) or pass a dict.' + arch = self.arch_settings[arch] + elif isinstance(arch, dict): + essential_keys = { + 'embed_dims', 'depth', 'patch_size', 'kernel_size' + } + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + + self.embed_dims = arch['embed_dims'] + self.depth = arch['depth'] + self.patch_size = arch['patch_size'] + self.kernel_size = arch['kernel_size'] + self.act = build_activation_layer(act_cfg) + + # check out indices and frozen stages + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.depth + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + self.frozen_stages = frozen_stages + + # Set stem layers + self.stem = nn.Sequential( + nn.Conv2d( + in_channels, + self.embed_dims, + kernel_size=self.patch_size, + stride=self.patch_size), self.act, + build_norm_layer(norm_cfg, self.embed_dims)[1]) + + # Set conv2d according to torch version + convfunc = nn.Conv2d + if digit_version(torch.__version__) < digit_version('1.9.0'): + convfunc = Conv2dAdaptivePadding + + # Repetitions of ConvMixer Layer + self.stages = nn.Sequential(*[ + nn.Sequential( + Residual( + nn.Sequential( + convfunc( + self.embed_dims, + self.embed_dims, + self.kernel_size, + groups=self.embed_dims, + padding='same'), self.act, + build_norm_layer(norm_cfg, self.embed_dims)[1])), + nn.Conv2d(self.embed_dims, self.embed_dims, kernel_size=1), + self.act, + build_norm_layer(norm_cfg, self.embed_dims)[1]) + for _ in range(self.depth) + ]) + + self._freeze_stages() + + def forward(self, x): + x = self.stem(x) + outs = [] + for i, stage in enumerate(self.stages): + x = stage(x) + if i in self.out_indices: + outs.append(x) + + # x = self.pooling(x).flatten(1) + return tuple(outs) + + def train(self, mode=True): + super(ConvMixer, self).train(mode) + self._freeze_stages() + + def _freeze_stages(self): + for i in range(self.frozen_stages): + stage = self.stages[i] + stage.eval() + for param in stage.parameters(): + param.requires_grad = False diff --git a/mmcls/models/backbones/convnext.py b/mmcls/models/backbones/convnext.py new file mode 100644 index 0000000..02f2687 --- /dev/null +++ b/mmcls/models/backbones/convnext.py @@ -0,0 +1,349 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial +from itertools import chain +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn.bricks import (NORM_LAYERS, DropPath, build_activation_layer, + build_norm_layer) +from mmcv.runner import BaseModule +from mmcv.runner.base_module import ModuleList, Sequential + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +@NORM_LAYERS.register_module('LN2d') +class LayerNorm2d(nn.LayerNorm): + """LayerNorm on channels for 2d images. + + Args: + num_channels (int): The number of channels of the input tensor. + eps (float): a value added to the denominator for numerical stability. + Defaults to 1e-5. + elementwise_affine (bool): a boolean value that when set to ``True``, + this module has learnable per-element affine parameters initialized + to ones (for weights) and zeros (for biases). Defaults to True. + """ + + def __init__(self, num_channels: int, **kwargs) -> None: + super().__init__(num_channels, **kwargs) + self.num_channels = self.normalized_shape[0] + + def forward(self, x): + assert x.dim() == 4, 'LayerNorm2d only supports inputs with shape ' \ + f'(N, C, H, W), but got tensor with shape {x.shape}' + return F.layer_norm( + x.permute(0, 2, 3, 1).contiguous(), self.normalized_shape, + self.weight, self.bias, self.eps).permute(0, 3, 1, 2).contiguous() + + +class ConvNeXtBlock(BaseModule): + """ConvNeXt Block. + + Args: + in_channels (int): The number of input channels. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='LN2d', eps=1e-6)``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + mlp_ratio (float): The expansion ratio in both pointwise convolution. + Defaults to 4. + linear_pw_conv (bool): Whether to use linear layer to do pointwise + convolution. More details can be found in the note. + Defaults to True. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + layer_scale_init_value (float): Init value for Layer Scale. + Defaults to 1e-6. + + Note: + There are two equivalent implementations: + + 1. DwConv -> LayerNorm -> 1x1 Conv -> GELU -> 1x1 Conv; + all outputs are in (N, C, H, W). + 2. DwConv -> LayerNorm -> Permute to (N, H, W, C) -> Linear -> GELU + -> Linear; Permute back + + As default, we use the second to align with the official repository. + And it may be slightly faster. + """ + + def __init__(self, + in_channels, + norm_cfg=dict(type='LN2d', eps=1e-6), + act_cfg=dict(type='GELU'), + mlp_ratio=4., + linear_pw_conv=True, + drop_path_rate=0., + layer_scale_init_value=1e-6, + with_cp=False): + super().__init__() + self.with_cp = with_cp + + self.depthwise_conv = nn.Conv2d( + in_channels, + in_channels, + kernel_size=7, + padding=3, + groups=in_channels) + + self.linear_pw_conv = linear_pw_conv + self.norm = build_norm_layer(norm_cfg, in_channels)[1] + + mid_channels = int(mlp_ratio * in_channels) + if self.linear_pw_conv: + # Use linear layer to do pointwise conv. + pw_conv = nn.Linear + else: + pw_conv = partial(nn.Conv2d, kernel_size=1) + + self.pointwise_conv1 = pw_conv(in_channels, mid_channels) + self.act = build_activation_layer(act_cfg) + self.pointwise_conv2 = pw_conv(mid_channels, in_channels) + + self.gamma = nn.Parameter( + layer_scale_init_value * torch.ones((in_channels)), + requires_grad=True) if layer_scale_init_value > 0 else None + + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + def forward(self, x): + + def _inner_forward(x): + shortcut = x + x = self.depthwise_conv(x) + x = self.norm(x) + + if self.linear_pw_conv: + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + + x = self.pointwise_conv1(x) + x = self.act(x) + x = self.pointwise_conv2(x) + + if self.linear_pw_conv: + x = x.permute(0, 3, 1, 2) # permute back + + if self.gamma is not None: + x = x.mul(self.gamma.view(1, -1, 1, 1)) + + x = shortcut + self.drop_path(x) + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + + return x + + +@BACKBONES.register_module() +class ConvNeXt(BaseBackbone): + """ConvNeXt. + + A PyTorch implementation of : `A ConvNet for the 2020s + `_ + + Modified from the `official repo + `_ + and `timm + `_. + + Args: + arch (str | dict): The model's architecture. If string, it should be + one of architecture in ``ConvNeXt.arch_settings``. And if dict, it + should include the following two keys: + + - depths (list[int]): Number of blocks at each stage. + - channels (list[int]): The number of channels at each stage. + + Defaults to 'tiny'. + in_channels (int): Number of input image channels. Defaults to 3. + stem_patch_size (int): The size of one patch in the stem layer. + Defaults to 4. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='LN2d', eps=1e-6)``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + linear_pw_conv (bool): Whether to use linear layer to do pointwise + convolution. Defaults to True. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + layer_scale_init_value (float): Init value for Layer Scale. + Defaults to 1e-6. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + gap_before_final_norm (bool): Whether to globally average the feature + map before the final norm layer. In the official repo, it's only + used in classification task. Defaults to True. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict, optional): Initialization config dict + """ # noqa: E501 + arch_settings = { + 'tiny': { + 'depths': [3, 3, 9, 3], + 'channels': [96, 192, 384, 768] + }, + 'small': { + 'depths': [3, 3, 27, 3], + 'channels': [96, 192, 384, 768] + }, + 'base': { + 'depths': [3, 3, 27, 3], + 'channels': [128, 256, 512, 1024] + }, + 'large': { + 'depths': [3, 3, 27, 3], + 'channels': [192, 384, 768, 1536] + }, + 'xlarge': { + 'depths': [3, 3, 27, 3], + 'channels': [256, 512, 1024, 2048] + }, + } + + def __init__(self, + arch='tiny', + in_channels=3, + stem_patch_size=4, + norm_cfg=dict(type='LN2d', eps=1e-6), + act_cfg=dict(type='GELU'), + linear_pw_conv=True, + drop_path_rate=0., + layer_scale_init_value=1e-6, + out_indices=-1, + frozen_stages=0, + gap_before_final_norm=True, + with_cp=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'Unavailable arch, please choose from ' \ + f'({set(self.arch_settings)}) or pass a dict.' + arch = self.arch_settings[arch] + elif isinstance(arch, dict): + assert 'depths' in arch and 'channels' in arch, \ + f'The arch dict must have "depths" and "channels", ' \ + f'but got {list(arch.keys())}.' + + self.depths = arch['depths'] + self.channels = arch['channels'] + assert (isinstance(self.depths, Sequence) + and isinstance(self.channels, Sequence) + and len(self.depths) == len(self.channels)), \ + f'The "depths" ({self.depths}) and "channels" ({self.channels}) ' \ + 'should be both sequence with the same length.' + + self.num_stages = len(self.depths) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = 4 + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + + self.frozen_stages = frozen_stages + self.gap_before_final_norm = gap_before_final_norm + + # stochastic depth decay rule + dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, sum(self.depths)) + ] + block_idx = 0 + + # 4 downsample layers between stages, including the stem layer. + self.downsample_layers = ModuleList() + stem = nn.Sequential( + nn.Conv2d( + in_channels, + self.channels[0], + kernel_size=stem_patch_size, + stride=stem_patch_size), + build_norm_layer(norm_cfg, self.channels[0])[1], + ) + self.downsample_layers.append(stem) + + # 4 feature resolution stages, each consisting of multiple residual + # blocks + self.stages = nn.ModuleList() + + for i in range(self.num_stages): + depth = self.depths[i] + channels = self.channels[i] + + if i >= 1: + downsample_layer = nn.Sequential( + LayerNorm2d(self.channels[i - 1]), + nn.Conv2d( + self.channels[i - 1], + channels, + kernel_size=2, + stride=2), + ) + self.downsample_layers.append(downsample_layer) + + stage = Sequential(*[ + ConvNeXtBlock( + in_channels=channels, + drop_path_rate=dpr[block_idx + j], + norm_cfg=norm_cfg, + act_cfg=act_cfg, + linear_pw_conv=linear_pw_conv, + layer_scale_init_value=layer_scale_init_value, + with_cp=with_cp) for j in range(depth) + ]) + block_idx += depth + + self.stages.append(stage) + + if i in self.out_indices: + norm_layer = build_norm_layer(norm_cfg, channels)[1] + self.add_module(f'norm{i}', norm_layer) + + self._freeze_stages() + + def forward(self, x): + outs = [] + for i, stage in enumerate(self.stages): + x = self.downsample_layers[i](x) + x = stage(x) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + if self.gap_before_final_norm: + gap = x.mean([-2, -1], keepdim=True) + outs.append(norm_layer(gap).flatten(1)) + else: + # The output of LayerNorm2d may be discontiguous, which + # may cause some problem in the downstream tasks + outs.append(norm_layer(x).contiguous()) + + return tuple(outs) + + def _freeze_stages(self): + for i in range(self.frozen_stages): + downsample_layer = self.downsample_layers[i] + stage = self.stages[i] + downsample_layer.eval() + stage.eval() + for param in chain(downsample_layer.parameters(), + stage.parameters()): + param.requires_grad = False + + def train(self, mode=True): + super(ConvNeXt, self).train(mode) + self._freeze_stages() diff --git a/mmcls/models/backbones/cspnet.py b/mmcls/models/backbones/cspnet.py new file mode 100644 index 0000000..70aff4c --- /dev/null +++ b/mmcls/models/backbones/cspnet.py @@ -0,0 +1,679 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Sequence + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmcv.cnn.bricks import DropPath +from mmcv.runner import BaseModule, Sequential +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES +from ..utils import to_ntuple +from .resnet import Bottleneck as ResNetBottleneck +from .resnext import Bottleneck as ResNeXtBottleneck + +eps = 1.0e-5 + + +class DarknetBottleneck(BaseModule): + """The basic bottleneck block used in Darknet. Each DarknetBottleneck + consists of two ConvModules and the input is added to the final output. + Each ConvModule is composed of Conv, BN, and LeakyReLU. The first convLayer + has filter size of 1x1 and the second one has the filter size of 3x3. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the input/output channels of conv2. + Defaults to 4. + add_identity (bool): Whether to add identity to the out. + Defaults to True. + use_depthwise (bool): Whether to use depthwise separable convolution. + Defaults to False. + conv_cfg (dict): Config dict for convolution layer. Defaults to None, + which means using conv2d. + drop_path_rate (float): The ratio of the drop path layer. Default: 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='BN', eps=1e-5)``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='Swish')``. + """ + + def __init__(self, + in_channels, + out_channels, + expansion=2, + add_identity=True, + use_depthwise=False, + conv_cfg=None, + drop_path_rate=0, + norm_cfg=dict(type='BN', eps=1e-5), + act_cfg=dict(type='LeakyReLU', inplace=True), + init_cfg=None): + super().__init__(init_cfg) + hidden_channels = int(out_channels / expansion) + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + self.conv1 = ConvModule( + in_channels, + hidden_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv2 = conv( + hidden_channels, + out_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.add_identity = \ + add_identity and in_channels == out_channels + + self.drop_path = DropPath(drop_prob=drop_path_rate + ) if drop_path_rate > eps else nn.Identity() + + def forward(self, x): + identity = x + out = self.conv1(x) + out = self.conv2(out) + out = self.drop_path(out) + + if self.add_identity: + return out + identity + else: + return out + + +class CSPStage(BaseModule): + """Cross Stage Partial Stage. + + .. code:: text + + Downsample Convolution (optional) + | + | + Expand Convolution + | + | + Split to xa, xb + | \ + | \ + | blocks(xb) + | / + | / transition + | / + Concat xa, blocks(xb) + | + Transition Convolution + + Args: + block_fn (nn.module): The basic block function in the Stage. + in_channels (int): The input channels of the CSP layer. + out_channels (int): The output channels of the CSP layer. + has_downsampler (bool): Whether to add a downsampler in the stage. + Default: False. + down_growth (bool): Whether to expand the channels in the + downsampler layer of the stage. Default: False. + expand_ratio (float): The expand ratio to adjust the number of + channels of the expand conv layer. Default: 0.5 + bottle_ratio (float): Ratio to adjust the number of channels of the + hidden layer. Default: 0.5 + block_dpr (float): The ratio of the drop path layer in the + blocks of the stage. Default: 0. + num_blocks (int): Number of blocks. Default: 1 + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN') + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', inplace=True) + """ + + def __init__(self, + block_fn, + in_channels, + out_channels, + has_downsampler=True, + down_growth=False, + expand_ratio=0.5, + bottle_ratio=2, + num_blocks=1, + block_dpr=0, + block_args={}, + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-5), + act_cfg=dict(type='LeakyReLU', inplace=True), + init_cfg=None): + super().__init__(init_cfg) + # grow downsample channels to output channels + down_channels = out_channels if down_growth else in_channels + block_dpr = to_ntuple(num_blocks)(block_dpr) + + if has_downsampler: + self.downsample_conv = ConvModule( + in_channels=in_channels, + out_channels=down_channels, + kernel_size=3, + stride=2, + padding=1, + groups=32 if block_fn is ResNeXtBottleneck else 1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + self.downsample_conv = nn.Identity() + + exp_channels = int(down_channels * expand_ratio) + self.expand_conv = ConvModule( + in_channels=down_channels, + out_channels=exp_channels, + kernel_size=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg if block_fn is DarknetBottleneck else None) + + assert exp_channels % 2 == 0, \ + 'The channel number before blocks must be divisible by 2.' + block_channels = exp_channels // 2 + blocks = [] + for i in range(num_blocks): + block_cfg = dict( + in_channels=block_channels, + out_channels=block_channels, + expansion=bottle_ratio, + drop_path_rate=block_dpr[i], + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + **block_args) + blocks.append(block_fn(**block_cfg)) + self.blocks = Sequential(*blocks) + self.atfer_blocks_conv = ConvModule( + block_channels, + block_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.final_conv = ConvModule( + 2 * block_channels, + out_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x): + x = self.downsample_conv(x) + x = self.expand_conv(x) + + split = x.shape[1] // 2 + xa, xb = x[:, :split], x[:, split:] + + xb = self.blocks(xb) + xb = self.atfer_blocks_conv(xb).contiguous() + + x_final = torch.cat((xa, xb), dim=1) + return self.final_conv(x_final) + + +class CSPNet(BaseModule): + """The abstract CSP Network class. + + A Pytorch implementation of `CSPNet: A New Backbone that can Enhance + Learning Capability of CNN `_ + + This class is an abstract class because the Cross Stage Partial Network + (CSPNet) is a kind of universal network structure, and you + network block to implement networks like CSPResNet, CSPResNeXt and + CSPDarkNet. + + Args: + arch (dict): The architecture of the CSPNet. + It should have the following keys: + + - block_fn (Callable): A function or class to return a block + module, and it should accept at least ``in_channels``, + ``out_channels``, ``expansion``, ``drop_path_rate``, ``norm_cfg`` + and ``act_cfg``. + - in_channels (Tuple[int]): The number of input channels of each + stage. + - out_channels (Tuple[int]): The number of output channels of each + stage. + - num_blocks (Tuple[int]): The number of blocks in each stage. + - expansion_ratio (float | Tuple[float]): The expansion ratio in + the expand convolution of each stage. Defaults to 0.5. + - bottle_ratio (float | Tuple[float]): The expansion ratio of + blocks in each stage. Defaults to 2. + - has_downsampler (bool | Tuple[bool]): Whether to add a + downsample convolution in each stage. Defaults to True + - down_growth (bool | Tuple[bool]): Whether to expand the channels + in the downsampler layer of each stage. Defaults to False. + - block_args (dict | Tuple[dict], optional): The extra arguments to + the blocks in each stage. Defaults to None. + + stem_fn (Callable): A function or class to return a stem module. + And it should accept ``in_channels``. + in_channels (int): Number of input image channels. Defaults to 3. + out_indices (int | Sequence[int]): Output from which stages. + Defaults to -1, which means the last stage. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + conv_cfg (dict, optional): The config dict for conv layers in blocks. + Defaults to None, which means use Conv2d. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='BN', eps=1e-5)``. + act_cfg (dict): The config dict for activation functions. + Defaults to ``dict(type='LeakyReLU', inplace=True)``. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + init_cfg (dict, optional): The initialization settings. + Defaults to ``dict(type='Kaiming', layer='Conv2d'))``. + + Example: + >>> from functools import partial + >>> import torch + >>> import torch.nn as nn + >>> from mmcls.models import CSPNet + >>> from mmcls.models.backbones.resnet import Bottleneck + >>> + >>> # A simple example to build CSPNet. + >>> arch = dict( + ... block_fn=Bottleneck, + ... in_channels=[32, 64], + ... out_channels=[64, 128], + ... num_blocks=[3, 4] + ... ) + >>> stem_fn = partial(nn.Conv2d, out_channels=32, kernel_size=3) + >>> model = CSPNet(arch=arch, stem_fn=stem_fn, out_indices=(0, 1)) + >>> inputs = torch.rand(1, 3, 224, 224) + >>> outs = model(inputs) + >>> for out in outs: + ... print(out.shape) + ... + (1, 64, 111, 111) + (1, 128, 56, 56) + """ + + def __init__(self, + arch, + stem_fn, + in_channels=3, + out_indices=-1, + frozen_stages=-1, + drop_path_rate=0., + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-5), + act_cfg=dict(type='LeakyReLU', inplace=True), + norm_eval=False, + init_cfg=dict(type='Kaiming', layer='Conv2d')): + super().__init__(init_cfg=init_cfg) + self.arch = self.expand_arch(arch) + self.num_stages = len(self.arch['in_channels']) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + if frozen_stages not in range(-1, self.num_stages): + raise ValueError('frozen_stages must be in range(-1, ' + f'{self.num_stages}). But received ' + f'{frozen_stages}') + self.frozen_stages = frozen_stages + + self.stem = stem_fn(in_channels) + + stages = [] + depths = self.arch['num_blocks'] + dpr = torch.linspace(0, drop_path_rate, sum(depths)).split(depths) + + for i in range(self.num_stages): + stage_cfg = {k: v[i] for k, v in self.arch.items()} + csp_stage = CSPStage( + **stage_cfg, + block_dpr=dpr[i].tolist(), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + stages.append(csp_stage) + self.stages = Sequential(*stages) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + out_indices = list(out_indices) + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = len(self.stages) + index + assert 0 <= out_indices[i] <= len(self.stages), \ + f'Invalid out_indices {index}.' + self.out_indices = out_indices + + @staticmethod + def expand_arch(arch): + num_stages = len(arch['in_channels']) + + def to_tuple(x, name=''): + if isinstance(x, (list, tuple)): + assert len(x) == num_stages, \ + f'The length of {name} ({len(x)}) does not ' \ + f'equals to the number of stages ({num_stages})' + return tuple(x) + else: + return (x, ) * num_stages + + full_arch = {k: to_tuple(v, k) for k, v in arch.items()} + if 'block_args' not in full_arch: + full_arch['block_args'] = to_tuple({}) + return full_arch + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + + for i in range(self.frozen_stages + 1): + m = self.stages[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(CSPNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + def forward(self, x): + outs = [] + + x = self.stem(x) + for i, stage in enumerate(self.stages): + x = stage(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + +@BACKBONES.register_module() +class CSPDarkNet(CSPNet): + """CSP-Darknet backbone used in YOLOv4. + + Args: + depth (int): Depth of CSP-Darknet. Default: 53. + in_channels (int): Number of input image channels. Default: 3. + out_indices (Sequence[int]): Output from which stages. + Default: (3, ). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Default: -1. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + + Example: + >>> from mmcls.models import CSPDarkNet + >>> import torch + >>> model = CSPDarkNet(depth=53, out_indices=(0, 1, 2, 3, 4)) + >>> model.eval() + >>> inputs = torch.rand(1, 3, 416, 416) + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 64, 208, 208) + (1, 128, 104, 104) + (1, 256, 52, 52) + (1, 512, 26, 26) + (1, 1024, 13, 13) + """ + arch_settings = { + 53: + dict( + block_fn=DarknetBottleneck, + in_channels=(32, 64, 128, 256, 512), + out_channels=(64, 128, 256, 512, 1024), + num_blocks=(1, 2, 8, 8, 4), + expand_ratio=(2, 1, 1, 1, 1), + bottle_ratio=(2, 1, 1, 1, 1), + has_downsampler=True, + down_growth=True, + ), + } + + def __init__(self, + depth, + in_channels=3, + out_indices=(4, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-5), + act_cfg=dict(type='LeakyReLU', inplace=True), + norm_eval=False, + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=math.sqrt(5), + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu')): + + assert depth in self.arch_settings, 'depth must be one of ' \ + f'{list(self.arch_settings.keys())}, but get {depth}.' + + super().__init__( + arch=self.arch_settings[depth], + stem_fn=self._make_stem_layer, + in_channels=in_channels, + out_indices=out_indices, + frozen_stages=frozen_stages, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + norm_eval=norm_eval, + init_cfg=init_cfg) + + def _make_stem_layer(self, in_channels): + """using a stride=1 conv as the stem in CSPDarknet.""" + # `stem_channels` equals to the `in_channels` in the first stage. + stem_channels = self.arch['in_channels'][0] + stem = ConvModule( + in_channels=in_channels, + out_channels=stem_channels, + kernel_size=3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + return stem + + +@BACKBONES.register_module() +class CSPResNet(CSPNet): + """CSP-ResNet backbone. + + Args: + depth (int): Depth of CSP-ResNet. Default: 50. + out_indices (Sequence[int]): Output from which stages. + Default: (4, ). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Default: -1. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Example: + >>> from mmcls.models import CSPResNet + >>> import torch + >>> model = CSPResNet(depth=50, out_indices=(0, 1, 2, 3)) + >>> model.eval() + >>> inputs = torch.rand(1, 3, 416, 416) + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 128, 104, 104) + (1, 256, 52, 52) + (1, 512, 26, 26) + (1, 1024, 13, 13) + """ + arch_settings = { + 50: + dict( + block_fn=ResNetBottleneck, + in_channels=(64, 128, 256, 512), + out_channels=(128, 256, 512, 1024), + num_blocks=(3, 3, 5, 2), + expand_ratio=4, + bottle_ratio=2, + has_downsampler=(False, True, True, True), + down_growth=False), + } + + def __init__(self, + depth, + in_channels=3, + out_indices=(3, ), + frozen_stages=-1, + deep_stem=False, + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-5), + act_cfg=dict(type='LeakyReLU', inplace=True), + norm_eval=False, + init_cfg=dict(type='Kaiming', layer='Conv2d')): + assert depth in self.arch_settings, 'depth must be one of ' \ + f'{list(self.arch_settings.keys())}, but get {depth}.' + self.deep_stem = deep_stem + + super().__init__( + arch=self.arch_settings[depth], + stem_fn=self._make_stem_layer, + in_channels=in_channels, + out_indices=out_indices, + frozen_stages=frozen_stages, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + norm_eval=norm_eval, + init_cfg=init_cfg) + + def _make_stem_layer(self, in_channels): + # `stem_channels` equals to the `in_channels` in the first stage. + stem_channels = self.arch['in_channels'][0] + if self.deep_stem: + stem = nn.Sequential( + ConvModule( + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + else: + stem = nn.Sequential( + ConvModule( + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + return stem + + +@BACKBONES.register_module() +class CSPResNeXt(CSPResNet): + """CSP-ResNeXt backbone. + + Args: + depth (int): Depth of CSP-ResNeXt. Default: 50. + out_indices (Sequence[int]): Output from which stages. + Default: (4, ). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Default: -1. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Example: + >>> from mmcls.models import CSPResNeXt + >>> import torch + >>> model = CSPResNeXt(depth=50, out_indices=(0, 1, 2, 3)) + >>> model.eval() + >>> inputs = torch.rand(1, 3, 224, 224) + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 256, 56, 56) + (1, 512, 28, 28) + (1, 1024, 14, 14) + (1, 2048, 7, 7) + """ + arch_settings = { + 50: + dict( + block_fn=ResNeXtBottleneck, + in_channels=(64, 256, 512, 1024), + out_channels=(256, 512, 1024, 2048), + num_blocks=(3, 3, 5, 2), + expand_ratio=(4, 2, 2, 2), + bottle_ratio=4, + has_downsampler=(False, True, True, True), + down_growth=False, + # the base_channels is changed from 64 to 32 in CSPNet + block_args=dict(base_channels=32), + ), + } + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) diff --git a/mmcls/models/backbones/deit.py b/mmcls/models/backbones/deit.py new file mode 100644 index 0000000..56e74e0 --- /dev/null +++ b/mmcls/models/backbones/deit.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn.utils.weight_init import trunc_normal_ + +from ..builder import BACKBONES +from .vision_transformer import VisionTransformer + + +@BACKBONES.register_module() +class DistilledVisionTransformer(VisionTransformer): + """Distilled Vision Transformer. + + A PyTorch implement of : `Training data-efficient image transformers & + distillation through attention `_ + + Args: + arch (str | dict): Vision Transformer architecture. If use string, + choose from 'small', 'base', 'large', 'deit-tiny', 'deit-small' + and 'deit-base'. If use dict, it should have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of transformer encoder layers. + - **num_heads** (int): The number of heads in attention modules. + - **feedforward_channels** (int): The hidden dimensions in + feedforward modules. + + Defaults to 'deit-base'. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 16. + in_channels (int): The num of input channels. Defaults to 3. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + with_cls_token (bool): Whether concatenating class token into image + tokens as transformer input. Defaults to True. + output_cls_token (bool): Whether output the cls_token. If set True, + ``with_cls_token`` must be True. Defaults to True. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + num_extra_tokens = 2 # cls_token, dist_token + + def __init__(self, arch='deit-base', *args, **kwargs): + super(DistilledVisionTransformer, self).__init__( + arch=arch, *args, **kwargs) + self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + + def forward(self, x): + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand(B, -1, -1) + dist_token = self.dist_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, dist_token, x), dim=1) + x = x + self.resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + x = self.drop_after_pos(x) + + if not self.with_cls_token: + # Remove class token for transformer encoder input + x = x[:, 2:] + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + + if i == len(self.layers) - 1 and self.final_norm: + x = self.norm1(x) + + if i in self.out_indices: + B, _, C = x.shape + if self.with_cls_token: + patch_token = x[:, 2:].reshape(B, *patch_resolution, C) + patch_token = patch_token.permute(0, 3, 1, 2) + cls_token = x[:, 0] + dist_token = x[:, 1] + else: + patch_token = x.reshape(B, *patch_resolution, C) + patch_token = patch_token.permute(0, 3, 1, 2) + cls_token = None + dist_token = None + if self.output_cls_token: + out = [patch_token, cls_token, dist_token] + else: + out = patch_token + outs.append(out) + + return tuple(outs) + + def init_weights(self): + super(DistilledVisionTransformer, self).init_weights() + + if not (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + trunc_normal_(self.dist_token, std=0.02) diff --git a/mmcls/models/backbones/densenet.py b/mmcls/models/backbones/densenet.py new file mode 100644 index 0000000..9947fbf --- /dev/null +++ b/mmcls/models/backbones/densenet.py @@ -0,0 +1,332 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from itertools import chain +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn.bricks import build_activation_layer, build_norm_layer +from torch.jit.annotations import List + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +class DenseLayer(BaseBackbone): + """DenseBlock layers.""" + + def __init__(self, + in_channels, + growth_rate, + bn_size, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + drop_rate=0., + memory_efficient=False): + super(DenseLayer, self).__init__() + + self.norm1 = build_norm_layer(norm_cfg, in_channels)[1] + self.conv1 = nn.Conv2d( + in_channels, + bn_size * growth_rate, + kernel_size=1, + stride=1, + bias=False) + self.act = build_activation_layer(act_cfg) + self.norm2 = build_norm_layer(norm_cfg, bn_size * growth_rate)[1] + self.conv2 = nn.Conv2d( + bn_size * growth_rate, + growth_rate, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.drop_rate = float(drop_rate) + self.memory_efficient = memory_efficient + + def bottleneck_fn(self, xs): + # type: (List[torch.Tensor]) -> torch.Tensor + concated_features = torch.cat(xs, 1) + bottleneck_output = self.conv1( + self.act(self.norm1(concated_features))) # noqa: T484 + return bottleneck_output + + # todo: rewrite when torchscript supports any + def any_requires_grad(self, x): + # type: (List[torch.Tensor]) -> bool + for tensor in x: + if tensor.requires_grad: + return True + return False + + # This decorator indicates to the compiler that a function or method + # should be ignored and replaced with the raising of an exception. + # Here this function is incompatible with torchscript. + @torch.jit.unused # noqa: T484 + def call_checkpoint_bottleneck(self, x): + # type: (List[torch.Tensor]) -> torch.Tensor + def closure(*xs): + return self.bottleneck_fn(xs) + + # Here use torch.utils.checkpoint to rerun a forward-pass during + # backward in bottleneck to save memories. + return cp.checkpoint(closure, *x) + + def forward(self, x): # noqa: F811 + # type: (List[torch.Tensor]) -> torch.Tensor + # assert input features is a list of Tensor + assert isinstance(x, list) + + if self.memory_efficient and self.any_requires_grad(x): + if torch.jit.is_scripting(): + raise Exception('Memory Efficient not supported in JIT') + bottleneck_output = self.call_checkpoint_bottleneck(x) + else: + bottleneck_output = self.bottleneck_fn(x) + + new_features = self.conv2(self.act(self.norm2(bottleneck_output))) + if self.drop_rate > 0: + new_features = F.dropout( + new_features, p=self.drop_rate, training=self.training) + return new_features + + +class DenseBlock(nn.Module): + """DenseNet Blocks.""" + + def __init__(self, + num_layers, + in_channels, + bn_size, + growth_rate, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + drop_rate=0., + memory_efficient=False): + super(DenseBlock, self).__init__() + self.block = nn.ModuleList([ + DenseLayer( + in_channels + i * growth_rate, + growth_rate=growth_rate, + bn_size=bn_size, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + drop_rate=drop_rate, + memory_efficient=memory_efficient) for i in range(num_layers) + ]) + + def forward(self, init_features): + features = [init_features] + for layer in self.block: + new_features = layer(features) + features.append(new_features) + return torch.cat(features, 1) + + +class DenseTransition(nn.Sequential): + """DenseNet Transition Layers.""" + + def __init__(self, + in_channels, + out_channels, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU')): + super(DenseTransition, self).__init__() + self.add_module('norm', build_norm_layer(norm_cfg, in_channels)[1]) + self.add_module('act', build_activation_layer(act_cfg)) + self.add_module( + 'conv', + nn.Conv2d( + in_channels, out_channels, kernel_size=1, stride=1, + bias=False)) + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +@BACKBONES.register_module() +class DenseNet(BaseBackbone): + """DenseNet. + + A PyTorch implementation of : `Densely Connected Convolutional Networks + `_ + + Modified from the `official repo + `_ + and `pytorch + `_. + + Args: + arch (str | dict): The model's architecture. If string, it should be + one of architecture in ``DenseNet.arch_settings``. And if dict, it + should include the following two keys: + + - growth_rate (int): Each layer of DenseBlock produce `k` feature + maps. Here refers `k` as the growth rate of the network. + - depths (list[int]): Number of repeated layers in each DenseBlock. + - init_channels (int): The output channels of stem layers. + + Defaults to '121'. + in_channels (int): Number of input image channels. Defaults to 3. + bn_size (int): Refers to channel expansion parameter of 1x1 + convolution layer. Defaults to 4. + drop_rate (float): Drop rate of Dropout Layer. Defaults to 0. + compression_factor (float): The reduction rate of transition layers. + Defaults to 0.5. + memory_efficient (bool): If True, uses checkpointing. Much more memory + efficient, but slower. Defaults to False. + See `"paper" `_. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='BN')``. + act_cfg (dict): The config dict for activation after each convolution. + Defaults to ``dict(type='ReLU')``. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + init_cfg (dict, optional): Initialization config dict. + """ + arch_settings = { + '121': { + 'growth_rate': 32, + 'depths': [6, 12, 24, 16], + 'init_channels': 64, + }, + '169': { + 'growth_rate': 32, + 'depths': [6, 12, 32, 32], + 'init_channels': 64, + }, + '201': { + 'growth_rate': 32, + 'depths': [6, 12, 48, 32], + 'init_channels': 64, + }, + '161': { + 'growth_rate': 48, + 'depths': [6, 12, 36, 24], + 'init_channels': 96, + }, + } + + def __init__(self, + arch='121', + in_channels=3, + bn_size=4, + drop_rate=0, + compression_factor=0.5, + memory_efficient=False, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + out_indices=-1, + frozen_stages=0, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'Unavailable arch, please choose from ' \ + f'({set(self.arch_settings)}) or pass a dict.' + arch = self.arch_settings[arch] + elif isinstance(arch, dict): + essential_keys = {'growth_rate', 'depths', 'init_channels'} + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + + self.growth_rate = arch['growth_rate'] + self.depths = arch['depths'] + self.init_channels = arch['init_channels'] + self.act = build_activation_layer(act_cfg) + + self.num_stages = len(self.depths) + + # check out indices and frozen stages + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_stages + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + self.frozen_stages = frozen_stages + + # Set stem layers + self.stem = nn.Sequential( + nn.Conv2d( + in_channels, + self.init_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False), + build_norm_layer(norm_cfg, self.init_channels)[1], self.act, + nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + + # Repetitions of DenseNet Blocks + self.stages = nn.ModuleList() + self.transitions = nn.ModuleList() + + channels = self.init_channels + for i in range(self.num_stages): + depth = self.depths[i] + + stage = DenseBlock( + num_layers=depth, + in_channels=channels, + bn_size=bn_size, + growth_rate=self.growth_rate, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + drop_rate=drop_rate, + memory_efficient=memory_efficient) + self.stages.append(stage) + channels += depth * self.growth_rate + + if i != self.num_stages - 1: + transition = DenseTransition( + in_channels=channels, + out_channels=math.floor(channels * compression_factor), + norm_cfg=norm_cfg, + act_cfg=act_cfg, + ) + channels = math.floor(channels * compression_factor) + else: + # Final layers after dense block is just bn with act. + # Unlike the paper, the original repo also put this in + # transition layer, whereas torchvision take this out. + # We reckon this as transition layer here. + transition = nn.Sequential( + build_norm_layer(norm_cfg, channels)[1], + self.act, + ) + self.transitions.append(transition) + + self._freeze_stages() + + def forward(self, x): + x = self.stem(x) + outs = [] + for i in range(self.num_stages): + x = self.stages[i](x) + x = self.transitions[i](x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + for i in range(self.frozen_stages): + downsample_layer = self.transitions[i] + stage = self.stages[i] + downsample_layer.eval() + stage.eval() + for param in chain(downsample_layer.parameters(), + stage.parameters()): + param.requires_grad = False + + def train(self, mode=True): + super(DenseNet, self).train(mode) + self._freeze_stages() diff --git a/mmcls/models/backbones/efficientformer.py b/mmcls/models/backbones/efficientformer.py new file mode 100644 index 0000000..173444f --- /dev/null +++ b/mmcls/models/backbones/efficientformer.py @@ -0,0 +1,606 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import itertools +from typing import Optional, Sequence + +import torch +import torch.nn as nn +from mmcv.cnn.bricks import (ConvModule, DropPath, build_activation_layer, + build_norm_layer) +from mmcv.runner import BaseModule, ModuleList, Sequential + +from ..builder import BACKBONES +from ..utils import LayerScale +from .base_backbone import BaseBackbone +from .poolformer import Pooling + + +class AttentionWithBias(BaseModule): + """Multi-head Attention Module with attention_bias. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. Defaults to 8. + key_dim (int): The dimension of q, k. Defaults to 32. + attn_ratio (float): The dimension of v equals to + ``key_dim * attn_ratio``. Defaults to 4. + resolution (int): The height and width of attention_bias. + Defaults to 7. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads=8, + key_dim=32, + attn_ratio=4., + resolution=7, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.num_heads = num_heads + self.scale = key_dim**-0.5 + self.attn_ratio = attn_ratio + self.key_dim = key_dim + self.nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + h = self.dh + self.nh_kd * 2 + self.qkv = nn.Linear(embed_dims, h) + self.proj = nn.Linear(self.dh, embed_dims) + + points = list(itertools.product(range(resolution), range(resolution))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = nn.Parameter( + torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', + torch.LongTensor(idxs).view(N, N)) + + @torch.no_grad() + def train(self, mode=True): + """change the mode of model.""" + super().train(mode) + if mode and hasattr(self, 'ab'): + del self.ab + else: + self.ab = self.attention_biases[:, self.attention_bias_idxs] + + def forward(self, x): + """forward function. + + Args: + x (tensor): input features with shape of (B, N, C) + """ + B, N, _ = x.shape + qkv = self.qkv(x) + qkv = qkv.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + q, k, v = qkv.split([self.key_dim, self.key_dim, self.d], dim=-1) + + attn = ((q @ k.transpose(-2, -1)) * self.scale + + (self.attention_biases[:, self.attention_bias_idxs] + if self.training else self.ab)) + attn = attn.softmax(dim=-1) + x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh) + x = self.proj(x) + return x + + +class Flat(nn.Module): + """Flat the input from (B, C, H, W) to (B, H*W, C).""" + + def __init__(self, ): + super().__init__() + + def forward(self, x: torch.Tensor): + x = x.flatten(2).transpose(1, 2) + return x + + +class LinearMlp(BaseModule): + """Mlp implemented with linear. + + The shape of input and output tensor are (B, N, C). + + Args: + in_features (int): Dimension of input features. + hidden_features (int): Dimension of hidden features. + out_features (int): Dimension of output features. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='BN')``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + drop (float): Dropout rate. Defaults to 0.0. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + act_cfg=dict(type='GELU'), + drop=0., + init_cfg=None): + super().__init__(init_cfg=init_cfg) + out_features = out_features or in_features + hidden_features = hidden_features or in_features + + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = build_activation_layer(act_cfg) + self.drop1 = nn.Dropout(drop) + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop2 = nn.Dropout(drop) + + def forward(self, x): + """ + Args: + x (torch.Tensor): input tensor with shape (B, N, C). + + Returns: + torch.Tensor: output tensor with shape (B, N, C). + """ + x = self.drop1(self.act(self.fc1(x))) + x = self.drop2(self.fc2(x)) + return x + + +class ConvMlp(BaseModule): + """Mlp implemented with 1*1 convolutions. + + Args: + in_features (int): Dimension of input features. + hidden_features (int): Dimension of hidden features. + out_features (int): Dimension of output features. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='BN')``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + drop (float): Dropout rate. Defaults to 0.0. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='GELU'), + drop=0., + init_cfg=None): + super().__init__(init_cfg=init_cfg) + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2d(in_features, hidden_features, 1) + self.act = build_activation_layer(act_cfg) + self.fc2 = nn.Conv2d(hidden_features, out_features, 1) + self.norm1 = build_norm_layer(norm_cfg, hidden_features)[1] + self.norm2 = build_norm_layer(norm_cfg, out_features)[1] + + self.drop = nn.Dropout(drop) + + def forward(self, x): + """ + Args: + x (torch.Tensor): input tensor with shape (B, C, H, W). + + Returns: + torch.Tensor: output tensor with shape (B, C, H, W). + """ + + x = self.act(self.norm1(self.fc1(x))) + x = self.drop(x) + x = self.norm2(self.fc2(x)) + x = self.drop(x) + return x + + +class Meta3D(BaseModule): + """Meta Former block using 3 dimensions inputs, ``torch.Tensor`` with shape + (B, N, C).""" + + def __init__(self, + dim, + mlp_ratio=4., + norm_cfg=dict(type='LN'), + act_cfg=dict(type='GELU'), + drop=0., + drop_path=0., + use_layer_scale=True, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.norm1 = build_norm_layer(norm_cfg, dim)[1] + self.token_mixer = AttentionWithBias(dim) + self.norm2 = build_norm_layer(norm_cfg, dim)[1] + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = LinearMlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_cfg=act_cfg, + drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. \ + else nn.Identity() + if use_layer_scale: + self.ls1 = LayerScale(dim) + self.ls2 = LayerScale(dim) + else: + self.ls1, self.ls2 = nn.Identity(), nn.Identity() + + def forward(self, x): + x = x + self.drop_path(self.ls1(self.token_mixer(self.norm1(x)))) + x = x + self.drop_path(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class Meta4D(BaseModule): + """Meta Former block using 4 dimensions inputs, ``torch.Tensor`` with shape + (B, C, H, W).""" + + def __init__(self, + dim, + pool_size=3, + mlp_ratio=4., + act_cfg=dict(type='GELU'), + drop=0., + drop_path=0., + use_layer_scale=True, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.token_mixer = Pooling(pool_size=pool_size) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = ConvMlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_cfg=act_cfg, + drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. \ + else nn.Identity() + if use_layer_scale: + self.ls1 = LayerScale(dim, data_format='channels_first') + self.ls2 = LayerScale(dim, data_format='channels_first') + else: + self.ls1, self.ls2 = nn.Identity(), nn.Identity() + + def forward(self, x): + x = x + self.drop_path(self.ls1(self.token_mixer(x))) + x = x + self.drop_path(self.ls2(self.mlp(x))) + return x + + +def basic_blocks(in_channels, + out_channels, + index, + layers, + pool_size=3, + mlp_ratio=4., + act_cfg=dict(type='GELU'), + drop_rate=.0, + drop_path_rate=0., + use_layer_scale=True, + vit_num=1, + has_downsamper=False): + """generate EfficientFormer blocks for a stage.""" + blocks = [] + if has_downsamper: + blocks.append( + ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=True, + norm_cfg=dict(type='BN'), + act_cfg=None)) + if index == 3 and vit_num == layers[index]: + blocks.append(Flat()) + for block_idx in range(layers[index]): + block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / ( + sum(layers) - 1) + if index == 3 and layers[index] - block_idx <= vit_num: + blocks.append( + Meta3D( + out_channels, + mlp_ratio=mlp_ratio, + act_cfg=act_cfg, + drop=drop_rate, + drop_path=block_dpr, + use_layer_scale=use_layer_scale, + )) + else: + blocks.append( + Meta4D( + out_channels, + pool_size=pool_size, + act_cfg=act_cfg, + drop=drop_rate, + drop_path=block_dpr, + use_layer_scale=use_layer_scale)) + if index == 3 and layers[index] - block_idx - 1 == vit_num: + blocks.append(Flat()) + blocks = nn.Sequential(*blocks) + return blocks + + +@BACKBONES.register_module() +class EfficientFormer(BaseBackbone): + """EfficientFormer. + + A PyTorch implementation of EfficientFormer introduced by: + `EfficientFormer: Vision Transformers at MobileNet Speed `_ + + Modified from the `official repo + `. + + Args: + arch (str | dict): The model's architecture. If string, it should be + one of architecture in ``EfficientFormer.arch_settings``. And if dict, + it should include the following 4 keys: + + - layers (list[int]): Number of blocks at each stage. + - embed_dims (list[int]): The number of channels at each stage. + - downsamples (list[int]): Has downsample or not in the four stages. + - vit_num (int): The num of vit blocks in the last stage. + + Defaults to 'l1'. + + in_channels (int): The num of input channels. Defaults to 3. + pool_size (int): The pooling size of ``Meta4D`` blocks. Defaults to 3. + mlp_ratios (int): The dimension ratio of multi-head attention mechanism + in ``Meta4D`` blocks. Defaults to 3. + reshape_last_feat (bool): Whether to reshape the feature map from + (B, N, C) to (B, C, H, W) in the last stage, when the ``vit-num`` + in ``arch`` is not 0. Defaults to False. Usually set to True + in downstream tasks. + out_indices (Sequence[int]): Output from which stages. + Defaults to -1. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + drop_rate (float): Dropout rate. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + use_layer_scale (bool): Whether to use use_layer_scale in MetaFormer + block. Defaults to True. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + + Example: + >>> from mmcls.models import EfficientFormer + >>> import torch + >>> inputs = torch.rand((1, 3, 224, 224)) + >>> # build EfficientFormer backbone for classification task + >>> model = EfficientFormer(arch="l1") + >>> model.eval() + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 448, 49) + >>> # build EfficientFormer backbone for downstream task + >>> model = EfficientFormer( + >>> arch="l3", + >>> out_indices=(0, 1, 2, 3), + >>> reshape_last_feat=True) + >>> model.eval() + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 56, 56) + (1, 128, 28, 28) + (1, 320, 14, 14) + (1, 512, 7, 7) + """ # noqa: E501 + + # --layers: [x,x,x,x], numbers of layers for the four stages + # --embed_dims: [x,x,x,x], embedding dims for the four stages + # --downsamples: [x,x,x,x], has downsample or not in the four stages + # --vit_num:(int), the num of vit blocks in the last stage + arch_settings = { + 'l1': { + 'layers': [3, 2, 6, 4], + 'embed_dims': [48, 96, 224, 448], + 'downsamples': [False, True, True, True], + 'vit_num': 1, + }, + 'l3': { + 'layers': [4, 4, 12, 6], + 'embed_dims': [64, 128, 320, 512], + 'downsamples': [False, True, True, True], + 'vit_num': 4, + }, + 'l7': { + 'layers': [6, 6, 18, 8], + 'embed_dims': [96, 192, 384, 768], + 'downsamples': [False, True, True, True], + 'vit_num': 8, + }, + } + + def __init__(self, + arch='l1', + in_channels=3, + pool_size=3, + mlp_ratios=4, + reshape_last_feat=False, + out_indices=-1, + frozen_stages=-1, + act_cfg=dict(type='GELU'), + drop_rate=0., + drop_path_rate=0., + use_layer_scale=True, + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + self.num_extra_tokens = 0 # no cls_token, no dist_token + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'Unavailable arch, please choose from ' \ + f'({set(self.arch_settings)}) or pass a dict.' + arch = self.arch_settings[arch] + elif isinstance(arch, dict): + default_keys = set(self.arch_settings['l1'].keys()) + assert set(arch.keys()) == default_keys, \ + f'The arch dict must have {default_keys}, ' \ + f'but got {list(arch.keys())}.' + + self.layers = arch['layers'] + self.embed_dims = arch['embed_dims'] + self.downsamples = arch['downsamples'] + assert isinstance(self.layers, list) and isinstance( + self.embed_dims, list) and isinstance(self.downsamples, list) + assert len(self.layers) == len(self.embed_dims) == len( + self.downsamples) + + self.vit_num = arch['vit_num'] + self.reshape_last_feat = reshape_last_feat + + assert self.vit_num >= 0, "'vit_num' must be an integer " \ + 'greater than or equal to 0.' + assert self.vit_num <= self.layers[-1], ( + "'vit_num' must be an integer smaller than layer number") + + self._make_stem(in_channels, self.embed_dims[0]) + + # set the main block in network + network = [] + for i in range(len(self.layers)): + if i != 0: + in_channels = self.embed_dims[i - 1] + else: + in_channels = self.embed_dims[i] + out_channels = self.embed_dims[i] + stage = basic_blocks( + in_channels, + out_channels, + i, + self.layers, + pool_size=pool_size, + mlp_ratio=mlp_ratios, + act_cfg=act_cfg, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + vit_num=self.vit_num, + use_layer_scale=use_layer_scale, + has_downsamper=self.downsamples[i]) + network.append(stage) + + self.network = ModuleList(network) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = 4 + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + + self.out_indices = out_indices + for i_layer in self.out_indices: + if not self.reshape_last_feat and \ + i_layer == 3 and self.vit_num > 0: + layer = build_norm_layer( + dict(type='LN'), self.embed_dims[i_layer])[1] + else: + # use GN with 1 group as channel-first LN2D + layer = build_norm_layer( + dict(type='GN', num_groups=1), self.embed_dims[i_layer])[1] + + layer_name = f'norm{i_layer}' + self.add_module(layer_name, layer) + + self.frozen_stages = frozen_stages + self._freeze_stages() + + def _make_stem(self, in_channels: int, stem_channels: int): + """make 2-ConvBNReLu stem layer.""" + self.patch_embed = Sequential( + ConvModule( + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + bias=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=2, + padding=1, + bias=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + inplace=True)) + + def forward_tokens(self, x): + outs = [] + for idx, block in enumerate(self.network): + if idx == len(self.network) - 1: + N, _, H, W = x.shape + if self.downsamples[idx]: + H, W = H // 2, W // 2 + x = block(x) + if idx in self.out_indices: + norm_layer = getattr(self, f'norm{idx}') + + if idx == len(self.network) - 1 and x.dim() == 3: + # when ``vit-num`` > 0 and in the last stage, + # if `self.reshape_last_feat`` is True, reshape the + # features to `BCHW` format before the final normalization. + # if `self.reshape_last_feat`` is False, do + # normalization directly and permute the features to `BCN`. + if self.reshape_last_feat: + x = x.permute((0, 2, 1)).reshape(N, -1, H, W) + x_out = norm_layer(x) + else: + x_out = norm_layer(x).permute((0, 2, 1)) + else: + x_out = norm_layer(x) + + outs.append(x_out.contiguous()) + return tuple(outs) + + def forward(self, x): + # input embedding + x = self.patch_embed(x) + # through stages + x = self.forward_tokens(x) + return x + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + for i in range(self.frozen_stages): + # Include both block and downsample layer. + module = self.network[i] + module.eval() + for param in module.parameters(): + param.requires_grad = False + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + norm_layer.eval() + for param in norm_layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(EfficientFormer, self).train(mode) + self._freeze_stages() diff --git a/mmcls/models/backbones/efficientnet.py b/mmcls/models/backbones/efficientnet.py new file mode 100644 index 0000000..ede2c18 --- /dev/null +++ b/mmcls/models/backbones/efficientnet.py @@ -0,0 +1,407 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn.bricks import ConvModule, DropPath +from mmcv.runner import BaseModule, Sequential + +from mmcls.models.backbones.base_backbone import BaseBackbone +from mmcls.models.utils import InvertedResidual, SELayer, make_divisible +from ..builder import BACKBONES + + +class EdgeResidual(BaseModule): + """Edge Residual Block. + + Args: + in_channels (int): The input channels of this module. + out_channels (int): The output channels of this module. + mid_channels (int): The input channels of the second convolution. + kernel_size (int): The kernel size of the first convolution. + Defaults to 3. + stride (int): The stride of the first convolution. Defaults to 1. + se_cfg (dict, optional): Config dict for se layer. Defaults to None, + which means no se layer. + with_residual (bool): Use residual connection. Defaults to True. + conv_cfg (dict, optional): Config dict for convolution layer. + Defaults to None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='BN')``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='ReLU')``. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict | list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels, + kernel_size=3, + stride=1, + se_cfg=None, + with_residual=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + drop_path_rate=0., + with_cp=False, + init_cfg=None): + super(EdgeResidual, self).__init__(init_cfg=init_cfg) + assert stride in [1, 2] + self.with_cp = with_cp + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.with_se = se_cfg is not None + self.with_residual = ( + stride == 1 and in_channels == out_channels and with_residual) + + if self.with_se: + assert isinstance(se_cfg, dict) + + self.conv1 = ConvModule( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=1, + padding=kernel_size // 2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + if self.with_se: + self.se = SELayer(**se_cfg) + + self.conv2 = ConvModule( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=stride, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x): + + def _inner_forward(x): + out = x + out = self.conv1(out) + + if self.with_se: + out = self.se(out) + + out = self.conv2(out) + + if self.with_residual: + return x + self.drop_path(out) + else: + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +def model_scaling(layer_setting, arch_setting): + """Scaling operation to the layer's parameters according to the + arch_setting.""" + # scale width + new_layer_setting = copy.deepcopy(layer_setting) + for layer_cfg in new_layer_setting: + for block_cfg in layer_cfg: + block_cfg[1] = make_divisible(block_cfg[1] * arch_setting[0], 8) + + # scale depth + split_layer_setting = [new_layer_setting[0]] + for layer_cfg in new_layer_setting[1:-1]: + tmp_index = [0] + for i in range(len(layer_cfg) - 1): + if layer_cfg[i + 1][1] != layer_cfg[i][1]: + tmp_index.append(i + 1) + tmp_index.append(len(layer_cfg)) + for i in range(len(tmp_index) - 1): + split_layer_setting.append(layer_cfg[tmp_index[i]:tmp_index[i + + 1]]) + split_layer_setting.append(new_layer_setting[-1]) + + num_of_layers = [len(layer_cfg) for layer_cfg in split_layer_setting[1:-1]] + new_layers = [ + int(math.ceil(arch_setting[1] * num)) for num in num_of_layers + ] + + merge_layer_setting = [split_layer_setting[0]] + for i, layer_cfg in enumerate(split_layer_setting[1:-1]): + if new_layers[i] <= num_of_layers[i]: + tmp_layer_cfg = layer_cfg[:new_layers[i]] + else: + tmp_layer_cfg = copy.deepcopy(layer_cfg) + [layer_cfg[-1]] * ( + new_layers[i] - num_of_layers[i]) + if tmp_layer_cfg[0][3] == 1 and i != 0: + merge_layer_setting[-1] += tmp_layer_cfg.copy() + else: + merge_layer_setting.append(tmp_layer_cfg.copy()) + merge_layer_setting.append(split_layer_setting[-1]) + + return merge_layer_setting + + +@BACKBONES.register_module() +class EfficientNet(BaseBackbone): + """EfficientNet backbone. + + Args: + arch (str): Architecture of efficientnet. Defaults to b0. + out_indices (Sequence[int]): Output from which stages. + Defaults to (6, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. + Defaults to None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='Swish'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + """ + + # Parameters to build layers. + # 'b' represents the architecture of normal EfficientNet family includes + # 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8'. + # 'e' represents the architecture of EfficientNet-EdgeTPU including 'es', + # 'em', 'el'. + # 6 parameters are needed to construct a layer, From left to right: + # - kernel_size: The kernel size of the block + # - out_channel: The number of out_channels of the block + # - se_ratio: The sequeeze ratio of SELayer. + # - stride: The stride of the block + # - expand_ratio: The expand_ratio of the mid_channels + # - block_type: -1: Not a block, 0: InvertedResidual, 1: EdgeResidual + layer_settings = { + 'b': [[[3, 32, 0, 2, 0, -1]], + [[3, 16, 4, 1, 1, 0]], + [[3, 24, 4, 2, 6, 0], + [3, 24, 4, 1, 6, 0]], + [[5, 40, 4, 2, 6, 0], + [5, 40, 4, 1, 6, 0]], + [[3, 80, 4, 2, 6, 0], + [3, 80, 4, 1, 6, 0], + [3, 80, 4, 1, 6, 0], + [5, 112, 4, 1, 6, 0], + [5, 112, 4, 1, 6, 0], + [5, 112, 4, 1, 6, 0]], + [[5, 192, 4, 2, 6, 0], + [5, 192, 4, 1, 6, 0], + [5, 192, 4, 1, 6, 0], + [5, 192, 4, 1, 6, 0], + [3, 320, 4, 1, 6, 0]], + [[1, 1280, 0, 1, 0, -1]] + ], + 'e': [[[3, 32, 0, 2, 0, -1]], + [[3, 24, 0, 1, 3, 1]], + [[3, 32, 0, 2, 8, 1], + [3, 32, 0, 1, 8, 1]], + [[3, 48, 0, 2, 8, 1], + [3, 48, 0, 1, 8, 1], + [3, 48, 0, 1, 8, 1], + [3, 48, 0, 1, 8, 1]], + [[5, 96, 0, 2, 8, 0], + [5, 96, 0, 1, 8, 0], + [5, 96, 0, 1, 8, 0], + [5, 96, 0, 1, 8, 0], + [5, 96, 0, 1, 8, 0], + [5, 144, 0, 1, 8, 0], + [5, 144, 0, 1, 8, 0], + [5, 144, 0, 1, 8, 0], + [5, 144, 0, 1, 8, 0]], + [[5, 192, 0, 2, 8, 0], + [5, 192, 0, 1, 8, 0]], + [[1, 1280, 0, 1, 0, -1]] + ] + } # yapf: disable + + # Parameters to build different kinds of architecture. + # From left to right: scaling factor for width, scaling factor for depth, + # resolution. + arch_settings = { + 'b0': (1.0, 1.0, 224), + 'b1': (1.0, 1.1, 240), + 'b2': (1.1, 1.2, 260), + 'b3': (1.2, 1.4, 300), + 'b4': (1.4, 1.8, 380), + 'b5': (1.6, 2.2, 456), + 'b6': (1.8, 2.6, 528), + 'b7': (2.0, 3.1, 600), + 'b8': (2.2, 3.6, 672), + 'es': (1.0, 1.0, 224), + 'em': (1.0, 1.1, 240), + 'el': (1.2, 1.4, 300) + } + + def __init__(self, + arch='b0', + drop_path_rate=0., + out_indices=(6, ), + frozen_stages=0, + conv_cfg=dict(type='Conv2dAdaptivePadding'), + norm_cfg=dict(type='BN', eps=1e-3), + act_cfg=dict(type='Swish'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + layer=['_BatchNorm', 'GroupNorm'], + val=1) + ]): + super(EfficientNet, self).__init__(init_cfg) + assert arch in self.arch_settings, \ + f'"{arch}" is not one of the arch_settings ' \ + f'({", ".join(self.arch_settings.keys())})' + self.arch_setting = self.arch_settings[arch] + self.layer_setting = self.layer_settings[arch[:1]] + for index in out_indices: + if index not in range(0, len(self.layer_setting)): + raise ValueError('the item in out_indices must in ' + f'range(0, {len(self.layer_setting)}). ' + f'But received {index}') + + if frozen_stages not in range(len(self.layer_setting) + 1): + raise ValueError('frozen_stages must be in range(0, ' + f'{len(self.layer_setting) + 1}). ' + f'But received {frozen_stages}') + self.drop_path_rate = drop_path_rate + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.layer_setting = model_scaling(self.layer_setting, + self.arch_setting) + block_cfg_0 = self.layer_setting[0][0] + block_cfg_last = self.layer_setting[-1][0] + self.in_channels = make_divisible(block_cfg_0[1], 8) + self.out_channels = block_cfg_last[1] + self.layers = nn.ModuleList() + self.layers.append( + ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=block_cfg_0[0], + stride=block_cfg_0[3], + padding=block_cfg_0[0] // 2, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.make_layer() + self.layers.append( + ConvModule( + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=block_cfg_last[0], + stride=block_cfg_last[3], + padding=block_cfg_last[0] // 2, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + def make_layer(self): + # Without the first and the final conv block. + layer_setting = self.layer_setting[1:-1] + + total_num_blocks = sum([len(x) for x in layer_setting]) + block_idx = 0 + dpr = [ + x.item() + for x in torch.linspace(0, self.drop_path_rate, total_num_blocks) + ] # stochastic depth decay rule + + for layer_cfg in layer_setting: + layer = [] + for i, block_cfg in enumerate(layer_cfg): + (kernel_size, out_channels, se_ratio, stride, expand_ratio, + block_type) = block_cfg + + mid_channels = int(self.in_channels * expand_ratio) + out_channels = make_divisible(out_channels, 8) + if se_ratio <= 0: + se_cfg = None + else: + se_cfg = dict( + channels=mid_channels, + ratio=expand_ratio * se_ratio, + divisor=1, + act_cfg=(self.act_cfg, dict(type='Sigmoid'))) + if block_type == 1: # edge tpu + if i > 0 and expand_ratio == 3: + with_residual = False + expand_ratio = 4 + else: + with_residual = True + mid_channels = int(self.in_channels * expand_ratio) + if se_cfg is not None: + se_cfg = dict( + channels=mid_channels, + ratio=se_ratio * expand_ratio, + divisor=1, + act_cfg=(self.act_cfg, dict(type='Sigmoid'))) + block = partial(EdgeResidual, with_residual=with_residual) + else: + block = InvertedResidual + layer.append( + block( + in_channels=self.in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + se_cfg=se_cfg, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + drop_path_rate=dpr[block_idx], + with_cp=self.with_cp)) + self.in_channels = out_channels + block_idx += 1 + self.layers.append(Sequential(*layer)) + + def forward(self, x): + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + for i in range(self.frozen_stages): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(EfficientNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() diff --git a/mmcls/models/backbones/hornet.py b/mmcls/models/backbones/hornet.py new file mode 100644 index 0000000..1822b7c --- /dev/null +++ b/mmcls/models/backbones/hornet.py @@ -0,0 +1,499 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Adapted from official impl at https://github.com/raoyongming/HorNet. +try: + import torch.fft + fft = True +except ImportError: + fft = None + +import copy +from functools import partial +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from mmcv.cnn.bricks import DropPath + +from mmcls.models.builder import BACKBONES +from ..utils import LayerScale +from .base_backbone import BaseBackbone + + +def get_dwconv(dim, kernel_size, bias=True): + """build a pepth-wise convolution.""" + return nn.Conv2d( + dim, + dim, + kernel_size=kernel_size, + padding=(kernel_size - 1) // 2, + bias=bias, + groups=dim) + + +class HorNetLayerNorm(nn.Module): + """An implementation of LayerNorm of HorNet. + + The differences between HorNetLayerNorm & torch LayerNorm: + 1. Supports two data formats channels_last or channels_first. + + Args: + normalized_shape (int or list or torch.Size): input shape from an + expected input of size. + eps (float): a value added to the denominator for numerical stability. + Defaults to 1e-5. + data_format (str): The ordering of the dimensions in the inputs. + channels_last corresponds to inputs with shape (batch_size, height, + width, channels) while channels_first corresponds to inputs with + shape (batch_size, channels, height, width). + Defaults to 'channels_last'. + """ + + def __init__(self, + normalized_shape, + eps=1e-6, + data_format='channels_last'): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.data_format = data_format + if self.data_format not in ['channels_last', 'channels_first']: + raise ValueError( + 'data_format must be channels_last or channels_first') + self.normalized_shape = (normalized_shape, ) + + def forward(self, x): + if self.data_format == 'channels_last': + return F.layer_norm(x, self.normalized_shape, self.weight, + self.bias, self.eps) + elif self.data_format == 'channels_first': + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + +class GlobalLocalFilter(nn.Module): + """A GlobalLocalFilter of HorNet. + + Args: + dim (int): Number of input channels. + h (int): Height of complex_weight. + Defaults to 14. + w (int): Width of complex_weight. + Defaults to 8. + """ + + def __init__(self, dim, h=14, w=8): + super().__init__() + self.dw = nn.Conv2d( + dim // 2, + dim // 2, + kernel_size=3, + padding=1, + bias=False, + groups=dim // 2) + self.complex_weight = nn.Parameter( + torch.randn(dim // 2, h, w, 2, dtype=torch.float32) * 0.02) + self.pre_norm = HorNetLayerNorm( + dim, eps=1e-6, data_format='channels_first') + self.post_norm = HorNetLayerNorm( + dim, eps=1e-6, data_format='channels_first') + + def forward(self, x): + x = self.pre_norm(x) + x1, x2 = torch.chunk(x, 2, dim=1) + x1 = self.dw(x1) + + x2 = x2.to(torch.float32) + B, C, a, b = x2.shape + x2 = torch.fft.rfft2(x2, dim=(2, 3), norm='ortho') + + weight = self.complex_weight + if not weight.shape[1:3] == x2.shape[2:4]: + weight = F.interpolate( + weight.permute(3, 0, 1, 2), + size=x2.shape[2:4], + mode='bilinear', + align_corners=True).permute(1, 2, 3, 0) + + weight = torch.view_as_complex(weight.contiguous()) + + x2 = x2 * weight + x2 = torch.fft.irfft2(x2, s=(a, b), dim=(2, 3), norm='ortho') + + x = torch.cat([x1.unsqueeze(2), x2.unsqueeze(2)], + dim=2).reshape(B, 2 * C, a, b) + x = self.post_norm(x) + return x + + +class gnConv(nn.Module): + """A gnConv of HorNet. + + Args: + dim (int): Number of input channels. + order (int): Order of gnConv. + Defaults to 5. + dw_cfg (dict): The Config for dw conv. + Defaults to ``dict(type='DW', kernel_size=7)``. + scale (float): Scaling parameter of gflayer outputs. + Defaults to 1.0. + """ + + def __init__(self, + dim, + order=5, + dw_cfg=dict(type='DW', kernel_size=7), + scale=1.0): + super().__init__() + self.order = order + self.dims = [dim // 2**i for i in range(order)] + self.dims.reverse() + self.proj_in = nn.Conv2d(dim, 2 * dim, 1) + + cfg = copy.deepcopy(dw_cfg) + dw_type = cfg.pop('type') + assert dw_type in ['DW', 'GF'],\ + 'dw_type should be `DW` or `GF`' + if dw_type == 'DW': + self.dwconv = get_dwconv(sum(self.dims), **cfg) + elif dw_type == 'GF': + self.dwconv = GlobalLocalFilter(sum(self.dims), **cfg) + + self.proj_out = nn.Conv2d(dim, dim, 1) + + self.projs = nn.ModuleList([ + nn.Conv2d(self.dims[i], self.dims[i + 1], 1) + for i in range(order - 1) + ]) + + self.scale = scale + + def forward(self, x): + x = self.proj_in(x) + y, x = torch.split(x, (self.dims[0], sum(self.dims)), dim=1) + + x = self.dwconv(x) * self.scale + + dw_list = torch.split(x, self.dims, dim=1) + x = y * dw_list[0] + + for i in range(self.order - 1): + x = self.projs[i](x) * dw_list[i + 1] + + x = self.proj_out(x) + + return x + + +class HorNetBlock(nn.Module): + """A block of HorNet. + + Args: + dim (int): Number of input channels. + order (int): Order of gnConv. + Defaults to 5. + dw_cfg (dict): The Config for dw conv. + Defaults to ``dict(type='DW', kernel_size=7)``. + scale (float): Scaling parameter of gflayer outputs. + Defaults to 1.0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + use_layer_scale (bool): Whether to use use_layer_scale in HorNet + block. Defaults to True. + """ + + def __init__(self, + dim, + order=5, + dw_cfg=dict(type='DW', kernel_size=7), + scale=1.0, + drop_path_rate=0., + use_layer_scale=True): + super().__init__() + self.out_channels = dim + + self.norm1 = HorNetLayerNorm( + dim, eps=1e-6, data_format='channels_first') + self.gnconv = gnConv(dim, order, dw_cfg, scale) + self.norm2 = HorNetLayerNorm(dim, eps=1e-6) + self.pwconv1 = nn.Linear(dim, 4 * dim) + self.act = nn.GELU() + self.pwconv2 = nn.Linear(4 * dim, dim) + + if use_layer_scale: + self.gamma1 = LayerScale(dim, data_format='channels_first') + self.gamma2 = LayerScale(dim) + else: + self.gamma1, self.gamma2 = nn.Identity(), nn.Identity() + + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path(self.gamma1(self.gnconv(self.norm1(x)))) + + input = x + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.norm2(x) + x = self.pwconv1(x) + x = self.act(x) + x = self.pwconv2(x) + x = self.gamma2(x) + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + + x = input + self.drop_path(x) + return x + + +@BACKBONES.register_module() +class HorNet(BaseBackbone): + """HorNet + A PyTorch impl of : `HorNet: Efficient High-Order Spatial Interactions + with Recursive Gated Convolutions` + + Inspiration from + https://github.com/raoyongming/HorNet + + Args: + arch (str | dict): HorNet architecture. + If use string, choose from 'tiny', 'small', 'base' and 'large'. + If use dict, it should have below keys: + - **base_dim** (int): The base dimensions of embedding. + - **depths** (List[int]): The number of blocks in each stage. + - **orders** (List[int]): The number of order of gnConv in each + stage. + - **dw_cfg** (List[dict]): The Config for dw conv. + + Defaults to 'tiny'. + in_channels (int): Number of input image channels. Defaults to 3. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + scale (float): Scaling parameter of gflayer outputs. Defaults to 1/3. + use_layer_scale (bool): Whether to use use_layer_scale in HorNet + block. Defaults to True. + out_indices (Sequence[int]): Output from which stages. + Default: ``(3, )``. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + gap_before_final_norm (bool): Whether to globally average the feature + map before the final norm layer. In the official repo, it's only + used in classification task. Defaults to True. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys(['t', 'tiny'], + {'base_dim': 64, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [dict(type='DW', kernel_size=7)] * 4}), + **dict.fromkeys(['t-gf', 'tiny-gf'], + {'base_dim': 64, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=14, w=8), + dict(type='GF', h=7, w=4)]}), + **dict.fromkeys(['s', 'small'], + {'base_dim': 96, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [dict(type='DW', kernel_size=7)] * 4}), + **dict.fromkeys(['s-gf', 'small-gf'], + {'base_dim': 96, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=14, w=8), + dict(type='GF', h=7, w=4)]}), + **dict.fromkeys(['b', 'base'], + {'base_dim': 128, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [dict(type='DW', kernel_size=7)] * 4}), + **dict.fromkeys(['b-gf', 'base-gf'], + {'base_dim': 128, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=14, w=8), + dict(type='GF', h=7, w=4)]}), + **dict.fromkeys(['b-gf384', 'base-gf384'], + {'base_dim': 128, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=24, w=12), + dict(type='GF', h=13, w=7)]}), + **dict.fromkeys(['l', 'large'], + {'base_dim': 192, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [dict(type='DW', kernel_size=7)] * 4}), + **dict.fromkeys(['l-gf', 'large-gf'], + {'base_dim': 192, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=14, w=8), + dict(type='GF', h=7, w=4)]}), + **dict.fromkeys(['l-gf384', 'large-gf384'], + {'base_dim': 192, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=24, w=12), + dict(type='GF', h=13, w=7)]}), + } # yapf: disable + + def __init__(self, + arch='tiny', + in_channels=3, + drop_path_rate=0., + scale=1 / 3, + use_layer_scale=True, + out_indices=(3, ), + frozen_stages=-1, + with_cp=False, + gap_before_final_norm=True, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + if fft is None: + raise RuntimeError( + 'Failed to import torch.fft. Please install "torch>=1.7".') + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = {'base_dim', 'depths', 'orders', 'dw_cfg'} + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.scale = scale + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.with_cp = with_cp + self.gap_before_final_norm = gap_before_final_norm + + base_dim = self.arch_settings['base_dim'] + dims = list(map(lambda x: 2**x * base_dim, range(4))) + + self.downsample_layers = nn.ModuleList() + stem = nn.Sequential( + nn.Conv2d(in_channels, dims[0], kernel_size=4, stride=4), + HorNetLayerNorm(dims[0], eps=1e-6, data_format='channels_first')) + self.downsample_layers.append(stem) + for i in range(3): + downsample_layer = nn.Sequential( + HorNetLayerNorm( + dims[i], eps=1e-6, data_format='channels_first'), + nn.Conv2d(dims[i], dims[i + 1], kernel_size=2, stride=2), + ) + self.downsample_layers.append(downsample_layer) + + total_depth = sum(self.arch_settings['depths']) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] # stochastic depth decay rule + + cur_block_idx = 0 + self.stages = nn.ModuleList() + for i in range(4): + stage = nn.Sequential(*[ + HorNetBlock( + dim=dims[i], + order=self.arch_settings['orders'][i], + dw_cfg=self.arch_settings['dw_cfg'][i], + scale=self.scale, + drop_path_rate=dpr[cur_block_idx + j], + use_layer_scale=use_layer_scale) + for j in range(self.arch_settings['depths'][i]) + ]) + self.stages.append(stage) + cur_block_idx += self.arch_settings['depths'][i] + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + out_indices = list(out_indices) + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = len(self.stages) + index + assert 0 <= out_indices[i] <= len(self.stages), \ + f'Invalid out_indices {index}.' + self.out_indices = out_indices + + norm_layer = partial( + HorNetLayerNorm, eps=1e-6, data_format='channels_first') + for i_layer in out_indices: + layer = norm_layer(dims[i_layer]) + layer_name = f'norm{i_layer}' + self.add_module(layer_name, layer) + + def train(self, mode=True): + super(HorNet, self).train(mode) + self._freeze_stages() + + def _freeze_stages(self): + for i in range(0, self.frozen_stages + 1): + # freeze patch embed + m = self.downsample_layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + # freeze blocks + m = self.stages[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + if i in self.out_indices: + # freeze norm + m = getattr(self, f'norm{i + 1}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + outs = [] + for i in range(4): + x = self.downsample_layers[i](x) + if self.with_cp: + x = checkpoint.checkpoint_sequential(self.stages[i], + len(self.stages[i]), x) + else: + x = self.stages[i](x) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + if self.gap_before_final_norm: + gap = x.mean([-2, -1], keepdim=True) + outs.append(norm_layer(gap).flatten(1)) + else: + # The output of LayerNorm2d may be discontiguous, which + # may cause some problem in the downstream tasks + outs.append(norm_layer(x).contiguous()) + return tuple(outs) diff --git a/mmcls/models/backbones/hrnet.py b/mmcls/models/backbones/hrnet.py new file mode 100644 index 0000000..57baf0c --- /dev/null +++ b/mmcls/models/backbones/hrnet.py @@ -0,0 +1,563 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.runner import BaseModule, ModuleList, Sequential +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES +from .resnet import BasicBlock, Bottleneck, ResLayer, get_expansion + + +class HRModule(BaseModule): + """High-Resolution Module for HRNet. + + In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange + is in this module. + + Args: + num_branches (int): The number of branches. + block (``BaseModule``): Convolution block module. + num_blocks (tuple): The number of blocks in each branch. + The length must be equal to ``num_branches``. + num_channels (tuple): The number of base channels in each branch. + The length must be equal to ``num_branches``. + multiscale_output (bool): Whether to output multi-level features + produced by multiple branches. If False, only the first level + feature will be output. Defaults to True. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + conv_cfg (dict, optional): Dictionary to construct and config conv + layer. Defaults to None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to ``dict(type='BN')``. + block_init_cfg (dict, optional): The initialization configs of every + blocks. Defaults to None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + num_branches, + block, + num_blocks, + in_channels, + num_channels, + multiscale_output=True, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + block_init_cfg=None, + init_cfg=None): + super(HRModule, self).__init__(init_cfg) + self.block_init_cfg = block_init_cfg + self._check_branches(num_branches, num_blocks, in_channels, + num_channels) + + self.in_channels = in_channels + self.num_branches = num_branches + + self.multiscale_output = multiscale_output + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + self.with_cp = with_cp + self.branches = self._make_branches(num_branches, block, num_blocks, + num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(inplace=False) + + def _check_branches(self, num_branches, num_blocks, in_channels, + num_channels): + if num_branches != len(num_blocks): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_BLOCKS({len(num_blocks)})' + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_CHANNELS({len(num_channels)})' + raise ValueError(error_msg) + + if num_branches != len(in_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_INCHANNELS({len(in_channels)})' + raise ValueError(error_msg) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + + for i in range(num_branches): + out_channels = num_channels[i] * get_expansion(block) + branches.append( + ResLayer( + block=block, + num_blocks=num_blocks[i], + in_channels=self.in_channels[i], + out_channels=out_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + with_cp=self.with_cp, + init_cfg=self.block_init_cfg, + )) + + return ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return None + + num_branches = self.num_branches + in_channels = self.in_channels + fuse_layers = [] + num_out_branches = num_branches if self.multiscale_output else 1 + for i in range(num_out_branches): + fuse_layer = [] + for j in range(num_branches): + if j > i: + # Upsample the feature maps of smaller scales. + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, in_channels[i])[1], + nn.Upsample( + scale_factor=2**(j - i), mode='nearest'))) + elif j == i: + # Keep the feature map with the same scale. + fuse_layer.append(None) + else: + # Downsample the feature maps of larger scales. + conv_downsamples = [] + for k in range(i - j): + # Use stacked convolution layers to downsample. + if k == i - j - 1: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[i])[1])) + else: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + nn.ReLU(inplace=False))) + fuse_layer.append(nn.Sequential(*conv_downsamples)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def forward(self, x): + """Forward function.""" + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + for i in range(len(self.fuse_layers)): + y = 0 + for j in range(self.num_branches): + if i == j: + y += x[j] + else: + y += self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + return x_fuse + + +@BACKBONES.register_module() +class HRNet(BaseModule): + """HRNet backbone. + + `High-Resolution Representations for Labeling Pixels and Regions + `_. + + Args: + arch (str): The preset HRNet architecture, includes 'w18', 'w30', + 'w32', 'w40', 'w44', 'w48', 'w64'. It will only be used if + extra is ``None``. Defaults to 'w32'. + extra (dict, optional): Detailed configuration for each stage of HRNet. + There must be 4 stages, the configuration for each stage must have + 5 keys: + + - num_modules (int): The number of HRModule in this stage. + - num_branches (int): The number of branches in the HRModule. + - block (str): The type of convolution block. Please choose between + 'BOTTLENECK' and 'BASIC'. + - num_blocks (tuple): The number of blocks in each branch. + The length must be equal to num_branches. + - num_channels (tuple): The number of base channels in each branch. + The length must be equal to num_branches. + + Defaults to None. + in_channels (int): Number of input image channels. Defaults to 3. + conv_cfg (dict, optional): Dictionary to construct and config conv + layer. Defaults to None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to ``dict(type='BN')``. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Defaults to False. + multiscale_output (bool): Whether to output multi-level features + produced by multiple branches. If False, only the first level + feature will be output. Defaults to True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + + Example: + >>> import torch + >>> from mmcls.models import HRNet + >>> extra = dict( + >>> stage1=dict( + >>> num_modules=1, + >>> num_branches=1, + >>> block='BOTTLENECK', + >>> num_blocks=(4, ), + >>> num_channels=(64, )), + >>> stage2=dict( + >>> num_modules=1, + >>> num_branches=2, + >>> block='BASIC', + >>> num_blocks=(4, 4), + >>> num_channels=(32, 64)), + >>> stage3=dict( + >>> num_modules=4, + >>> num_branches=3, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4), + >>> num_channels=(32, 64, 128)), + >>> stage4=dict( + >>> num_modules=3, + >>> num_branches=4, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4, 4), + >>> num_channels=(32, 64, 128, 256))) + >>> self = HRNet(extra, in_channels=1) + >>> self.eval() + >>> inputs = torch.rand(1, 1, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 32, 8, 8) + (1, 64, 4, 4) + (1, 128, 2, 2) + (1, 256, 1, 1) + """ + + blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} + arch_zoo = { + # num_modules, num_branches, block, num_blocks, num_channels + 'w18': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (18, 36)], + [4, 3, 'BASIC', (4, 4, 4), (18, 36, 72)], + [3, 4, 'BASIC', (4, 4, 4, 4), (18, 36, 72, 144)]], + 'w30': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (30, 60)], + [4, 3, 'BASIC', (4, 4, 4), (30, 60, 120)], + [3, 4, 'BASIC', (4, 4, 4, 4), (30, 60, 120, 240)]], + 'w32': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (32, 64)], + [4, 3, 'BASIC', (4, 4, 4), (32, 64, 128)], + [3, 4, 'BASIC', (4, 4, 4, 4), (32, 64, 128, 256)]], + 'w40': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (40, 80)], + [4, 3, 'BASIC', (4, 4, 4), (40, 80, 160)], + [3, 4, 'BASIC', (4, 4, 4, 4), (40, 80, 160, 320)]], + 'w44': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (44, 88)], + [4, 3, 'BASIC', (4, 4, 4), (44, 88, 176)], + [3, 4, 'BASIC', (4, 4, 4, 4), (44, 88, 176, 352)]], + 'w48': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (48, 96)], + [4, 3, 'BASIC', (4, 4, 4), (48, 96, 192)], + [3, 4, 'BASIC', (4, 4, 4, 4), (48, 96, 192, 384)]], + 'w64': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (64, 128)], + [4, 3, 'BASIC', (4, 4, 4), (64, 128, 256)], + [3, 4, 'BASIC', (4, 4, 4, 4), (64, 128, 256, 512)]], + } # yapf:disable + + def __init__(self, + arch='w32', + extra=None, + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN'), + norm_eval=False, + with_cp=False, + zero_init_residual=False, + multiscale_output=True, + init_cfg=[ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + super(HRNet, self).__init__(init_cfg) + + extra = self.parse_arch(arch, extra) + + # Assert configurations of 4 stages are in extra + for i in range(1, 5): + assert f'stage{i}' in extra, f'Missing stage{i} config in "extra".' + # Assert whether the length of `num_blocks` and `num_channels` are + # equal to `num_branches` + cfg = extra[f'stage{i}'] + assert len(cfg['num_blocks']) == cfg['num_branches'] and \ + len(cfg['num_channels']) == cfg['num_branches'] + + self.extra = extra + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + self.zero_init_residual = zero_init_residual + + # -------------------- stem net -------------------- + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + out_channels=64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.add_module(self.norm1_name, norm1) + + self.conv2 = build_conv_layer( + self.conv_cfg, + in_channels=64, + out_channels=64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) + self.add_module(self.norm2_name, norm2) + self.relu = nn.ReLU(inplace=True) + + # -------------------- stage 1 -------------------- + self.stage1_cfg = self.extra['stage1'] + base_channels = self.stage1_cfg['num_channels'] + block_type = self.stage1_cfg['block'] + num_blocks = self.stage1_cfg['num_blocks'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in base_channels + ] + # To align with the original code, use layer1 instead of stage1 here. + self.layer1 = ResLayer( + block, + in_channels=64, + out_channels=num_channels[0], + num_blocks=num_blocks[0]) + pre_num_channels = num_channels + + # -------------------- stage 2~4 -------------------- + for i in range(2, 5): + stage_cfg = self.extra[f'stage{i}'] + base_channels = stage_cfg['num_channels'] + block = self.blocks_dict[stage_cfg['block']] + multiscale_output_ = multiscale_output if i == 4 else True + + num_channels = [ + channel * get_expansion(block) for channel in base_channels + ] + # The transition layer from layer1 to stage2 + transition = self._make_transition_layer(pre_num_channels, + num_channels) + self.add_module(f'transition{i-1}', transition) + stage = self._make_stage( + stage_cfg, num_channels, multiscale_output=multiscale_output_) + self.add_module(f'stage{i}', stage) + + pre_num_channels = num_channels + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + def _make_transition_layer(self, num_channels_pre_layer, + num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + # For existing scale branches, + # add conv block when the channels are not the same. + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_cur_layer[i], + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_cur_layer[i])[1], + nn.ReLU(inplace=True))) + else: + transition_layers.append(nn.Identity()) + else: + # For new scale branches, add stacked downsample conv blocks. + # For example, num_branches_pre = 2, for the 4th branch, add + # stacked two downsample conv blocks. + conv_downsamples = [] + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] \ + if j == i - num_branches_pre else in_channels + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1], + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv_downsamples)) + + return nn.ModuleList(transition_layers) + + def _make_stage(self, layer_config, in_channels, multiscale_output=True): + num_modules = layer_config['num_modules'] + num_branches = layer_config['num_branches'] + num_blocks = layer_config['num_blocks'] + num_channels = layer_config['num_channels'] + block = self.blocks_dict[layer_config['block']] + + hr_modules = [] + block_init_cfg = None + if self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm3')) + + for i in range(num_modules): + # multi_scale_output is only used for the last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + hr_modules.append( + HRModule( + num_branches, + block, + num_blocks, + in_channels, + num_channels, + reset_multiscale_output, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + block_init_cfg=block_init_cfg)) + + return Sequential(*hr_modules) + + def forward(self, x): + """Forward function.""" + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [x] + + for i in range(2, 5): + # Apply transition + transition = getattr(self, f'transition{i-1}') + inputs = [] + for j, layer in enumerate(transition): + if j < len(x_list): + inputs.append(layer(x_list[j])) + else: + inputs.append(layer(x_list[-1])) + # Forward HRModule + stage = getattr(self, f'stage{i}') + x_list = stage(inputs) + + return tuple(x_list) + + def train(self, mode=True): + """Convert the model into training mode will keeping the normalization + layer freezed.""" + super(HRNet, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def parse_arch(self, arch, extra=None): + if extra is not None: + return extra + + assert arch in self.arch_zoo, \ + ('Invalid arch, please choose arch from ' + f'{list(self.arch_zoo.keys())}, or specify `extra` ' + 'argument directly.') + + extra = dict() + for i, stage_setting in enumerate(self.arch_zoo[arch], start=1): + extra[f'stage{i}'] = dict( + num_modules=stage_setting[0], + num_branches=stage_setting[1], + block=stage_setting[2], + num_blocks=stage_setting[3], + num_channels=stage_setting[4], + ) + + return extra diff --git a/mmcls/models/backbones/lenet.py b/mmcls/models/backbones/lenet.py new file mode 100644 index 0000000..1168661 --- /dev/null +++ b/mmcls/models/backbones/lenet.py @@ -0,0 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +@BACKBONES.register_module() +class LeNet5(BaseBackbone): + """`LeNet5 `_ backbone. + + The input for LeNet-5 is a 32×32 grayscale image. + + Args: + num_classes (int): number of classes for classification. + The default value is -1, which uses the backbone as + a feature extractor without the top classifier. + """ + + def __init__(self, num_classes=-1): + super(LeNet5, self).__init__() + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2d(1, 6, kernel_size=5, stride=1), nn.Tanh(), + nn.AvgPool2d(kernel_size=2), + nn.Conv2d(6, 16, kernel_size=5, stride=1), nn.Tanh(), + nn.AvgPool2d(kernel_size=2), + nn.Conv2d(16, 120, kernel_size=5, stride=1), nn.Tanh()) + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Linear(120, 84), + nn.Tanh(), + nn.Linear(84, num_classes), + ) + + def forward(self, x): + + x = self.features(x) + if self.num_classes > 0: + x = self.classifier(x.squeeze()) + + return (x, ) diff --git a/mmcls/models/backbones/mlp_mixer.py b/mmcls/models/backbones/mlp_mixer.py new file mode 100644 index 0000000..13171a4 --- /dev/null +++ b/mmcls/models/backbones/mlp_mixer.py @@ -0,0 +1,263 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed +from mmcv.runner.base_module import BaseModule, ModuleList + +from ..builder import BACKBONES +from ..utils import to_2tuple +from .base_backbone import BaseBackbone + + +class MixerBlock(BaseModule): + """Mlp-Mixer basic block. + + Basic module of `MLP-Mixer: An all-MLP Architecture for Vision + `_ + + Args: + num_tokens (int): The number of patched tokens + embed_dims (int): The feature dimension + tokens_mlp_dims (int): The hidden dimension for tokens FFNs + channels_mlp_dims (int): The hidden dimension for channels FFNs + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + num_fcs (int): The number of fully-connected layers for FFNs. + Defaults to 2. + act_cfg (dict): The activation config for FFNs. + Defaluts to ``dict(type='GELU')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + num_tokens, + embed_dims, + tokens_mlp_dims, + channels_mlp_dims, + drop_rate=0., + drop_path_rate=0., + num_fcs=2, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(MixerBlock, self).__init__(init_cfg=init_cfg) + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + self.token_mix = FFN( + embed_dims=num_tokens, + feedforward_channels=tokens_mlp_dims, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=False) + + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, embed_dims, postfix=2) + self.add_module(self.norm2_name, norm2) + self.channel_mix = FFN( + embed_dims=embed_dims, + feedforward_channels=channels_mlp_dims, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def init_weights(self): + super(MixerBlock, self).init_weights() + for m in self.token_mix.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + nn.init.normal_(m.bias, std=1e-6) + for m in self.channel_mix.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + nn.init.normal_(m.bias, std=1e-6) + + def forward(self, x): + out = self.norm1(x).transpose(1, 2) + x = x + self.token_mix(out).transpose(1, 2) + x = self.channel_mix(self.norm2(x), identity=x) + return x + + +@BACKBONES.register_module() +class MlpMixer(BaseBackbone): + """Mlp-Mixer backbone. + + Pytorch implementation of `MLP-Mixer: An all-MLP Architecture for Vision + `_ + + Args: + arch (str | dict): MLP Mixer architecture. If use string, choose from + 'small', 'base' and 'large'. If use dict, it should have below + keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of MLP blocks. + - **tokens_mlp_dims** (int): The hidden dimensions for tokens FFNs. + - **channels_mlp_dims** (int): The The hidden dimensions for + channels FFNs. + + Defaults to 'base'. + img_size (int | tuple): The input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 16. + out_indices (Sequence | int): Output from which layer. + Defaults to -1, means the last layer. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + act_cfg (dict): The activation config for FFNs. Default GELU. + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each mixer block layer. + Defaults to an empty dict. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + arch_zoo = { + **dict.fromkeys( + ['s', 'small'], { + 'embed_dims': 512, + 'num_layers': 8, + 'tokens_mlp_dims': 256, + 'channels_mlp_dims': 2048, + }), + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'tokens_mlp_dims': 384, + 'channels_mlp_dims': 3072, + }), + **dict.fromkeys( + ['l', 'large'], { + 'embed_dims': 1024, + 'num_layers': 24, + 'tokens_mlp_dims': 512, + 'channels_mlp_dims': 4096, + }), + } + + def __init__(self, + arch='base', + img_size=224, + patch_size=16, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN'), + act_cfg=dict(type='GELU'), + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None): + super(MlpMixer, self).__init__(init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'tokens_mlp_dims', + 'channels_mlp_dims' + } + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.num_layers = self.arch_settings['num_layers'] + self.tokens_mlp_dims = self.arch_settings['tokens_mlp_dims'] + self.channels_mlp_dims = self.arch_settings['channels_mlp_dims'] + + self.img_size = to_2tuple(img_size) + + _patch_cfg = dict( + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must be a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_layers + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + else: + assert index >= self.num_layers, f'Invalid out_indices {index}' + self.out_indices = out_indices + + self.layers = ModuleList() + if isinstance(layer_cfgs, dict): + layer_cfgs = [layer_cfgs] * self.num_layers + for i in range(self.num_layers): + _layer_cfg = dict( + num_tokens=num_patches, + embed_dims=self.embed_dims, + tokens_mlp_dims=self.tokens_mlp_dims, + channels_mlp_dims=self.channels_mlp_dims, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + ) + _layer_cfg.update(layer_cfgs[i]) + self.layers.append(MixerBlock(**_layer_cfg)) + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def forward(self, x): + assert x.shape[2:] == self.img_size, \ + "The MLP-Mixer doesn't support dynamic input shape. " \ + f'Please input images with shape {self.img_size}' + x, _ = self.patch_embed(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + + if i == len(self.layers) - 1: + x = self.norm1(x) + + if i in self.out_indices: + out = x.transpose(1, 2) + outs.append(out) + + return tuple(outs) diff --git a/mmcls/models/backbones/mobilenet_v2.py b/mmcls/models/backbones/mobilenet_v2.py new file mode 100644 index 0000000..8f171ed --- /dev/null +++ b/mmcls/models/backbones/mobilenet_v2.py @@ -0,0 +1,264 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.utils import make_divisible +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +class InvertedResidual(BaseModule): + """InvertedResidual block for MobileNetV2. + + Args: + in_channels (int): The input channels of the InvertedResidual block. + out_channels (int): The output channels of the InvertedResidual block. + stride (int): Stride of the middle (first) 3x3 convolution. + expand_ratio (int): adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor + """ + + def __init__(self, + in_channels, + out_channels, + stride, + expand_ratio, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + with_cp=False, + init_cfg=None): + super(InvertedResidual, self).__init__(init_cfg) + self.stride = stride + assert stride in [1, 2], f'stride must in [1, 2]. ' \ + f'But received {stride}.' + self.with_cp = with_cp + self.use_res_connect = self.stride == 1 and in_channels == out_channels + hidden_dim = int(round(in_channels * expand_ratio)) + + layers = [] + if expand_ratio != 1: + layers.append( + ConvModule( + in_channels=in_channels, + out_channels=hidden_dim, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + layers.extend([ + ConvModule( + in_channels=hidden_dim, + out_channels=hidden_dim, + kernel_size=3, + stride=stride, + padding=1, + groups=hidden_dim, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=hidden_dim, + out_channels=out_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + ]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + + def _inner_forward(x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@BACKBONES.register_module() +class MobileNetV2(BaseBackbone): + """MobileNetV2 backbone. + + Args: + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Default: 1.0. + out_indices (None or Sequence[int]): Output from which stages. + Default: (7, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + # Parameters to build layers. 4 parameters are needed to construct a + # layer, from left to right: expand_ratio, channel, num_blocks, stride. + arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], + [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], + [6, 320, 1, 1]] + + def __init__(self, + widen_factor=1., + out_indices=(7, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + super(MobileNetV2, self).__init__(init_cfg) + self.widen_factor = widen_factor + self.out_indices = out_indices + for index in out_indices: + if index not in range(0, 8): + raise ValueError('the item in out_indices must in ' + f'range(0, 8). But received {index}') + + if frozen_stages not in range(-1, 8): + raise ValueError('frozen_stages must be in range(-1, 8). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.in_channels = make_divisible(32 * widen_factor, 8) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.layers = [] + + for i, layer_cfg in enumerate(self.arch_settings): + expand_ratio, channel, num_blocks, stride = layer_cfg + out_channels = make_divisible(channel * widen_factor, 8) + inverted_res_layer = self.make_layer( + out_channels=out_channels, + num_blocks=num_blocks, + stride=stride, + expand_ratio=expand_ratio) + layer_name = f'layer{i + 1}' + self.add_module(layer_name, inverted_res_layer) + self.layers.append(layer_name) + + if widen_factor > 1.0: + self.out_channel = int(1280 * widen_factor) + else: + self.out_channel = 1280 + + layer = ConvModule( + in_channels=self.in_channels, + out_channels=self.out_channel, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.add_module('conv2', layer) + self.layers.append('conv2') + + def make_layer(self, out_channels, num_blocks, stride, expand_ratio): + """Stack InvertedResidual blocks to build a layer for MobileNetV2. + + Args: + out_channels (int): out_channels of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + expand_ratio (int): Expand the number of channels of the + hidden layer in InvertedResidual by this ratio. Default: 6. + """ + layers = [] + for i in range(num_blocks): + if i >= 1: + stride = 1 + layers.append( + InvertedResidual( + self.in_channels, + out_channels, + stride, + expand_ratio=expand_ratio, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(MobileNetV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmcls/models/backbones/mobilenet_v3.py b/mmcls/models/backbones/mobilenet_v3.py new file mode 100644 index 0000000..b612b88 --- /dev/null +++ b/mmcls/models/backbones/mobilenet_v3.py @@ -0,0 +1,195 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import ConvModule +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES +from ..utils import InvertedResidual +from .base_backbone import BaseBackbone + + +@BACKBONES.register_module() +class MobileNetV3(BaseBackbone): + """MobileNetV3 backbone. + + Args: + arch (str): Architecture of mobilnetv3, from {small, large}. + Default: small. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + out_indices (None or Sequence[int]): Output from which stages. + Default: None, which means output tensors from final stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. + Default: False. + """ + # Parameters to build each block: + # [kernel size, mid channels, out channels, with_se, act type, stride] + arch_settings = { + 'small': [[3, 16, 16, True, 'ReLU', 2], + [3, 72, 24, False, 'ReLU', 2], + [3, 88, 24, False, 'ReLU', 1], + [5, 96, 40, True, 'HSwish', 2], + [5, 240, 40, True, 'HSwish', 1], + [5, 240, 40, True, 'HSwish', 1], + [5, 120, 48, True, 'HSwish', 1], + [5, 144, 48, True, 'HSwish', 1], + [5, 288, 96, True, 'HSwish', 2], + [5, 576, 96, True, 'HSwish', 1], + [5, 576, 96, True, 'HSwish', 1]], + 'large': [[3, 16, 16, False, 'ReLU', 1], + [3, 64, 24, False, 'ReLU', 2], + [3, 72, 24, False, 'ReLU', 1], + [5, 72, 40, True, 'ReLU', 2], + [5, 120, 40, True, 'ReLU', 1], + [5, 120, 40, True, 'ReLU', 1], + [3, 240, 80, False, 'HSwish', 2], + [3, 200, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 480, 112, True, 'HSwish', 1], + [3, 672, 112, True, 'HSwish', 1], + [5, 672, 160, True, 'HSwish', 2], + [5, 960, 160, True, 'HSwish', 1], + [5, 960, 160, True, 'HSwish', 1]] + } # yapf: disable + + def __init__(self, + arch='small', + conv_cfg=None, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.01), + out_indices=None, + frozen_stages=-1, + norm_eval=False, + with_cp=False, + init_cfg=[ + dict( + type='Kaiming', + layer=['Conv2d'], + nonlinearity='leaky_relu'), + dict(type='Normal', layer=['Linear'], std=0.01), + dict(type='Constant', layer=['BatchNorm2d'], val=1) + ]): + super(MobileNetV3, self).__init__(init_cfg) + assert arch in self.arch_settings + if out_indices is None: + out_indices = (12, ) if arch == 'small' else (16, ) + for order, index in enumerate(out_indices): + if index not in range(0, len(self.arch_settings[arch]) + 2): + raise ValueError( + 'the item in out_indices must in ' + f'range(0, {len(self.arch_settings[arch]) + 2}). ' + f'But received {index}') + + if frozen_stages not in range(-1, len(self.arch_settings[arch]) + 2): + raise ValueError('frozen_stages must be in range(-1, ' + f'{len(self.arch_settings[arch]) + 2}). ' + f'But received {frozen_stages}') + self.arch = arch + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.layers = self._make_layer() + self.feat_dim = self.arch_settings[arch][-1][1] + + def _make_layer(self): + layers = [] + layer_setting = self.arch_settings[self.arch] + in_channels = 16 + + layer = ConvModule( + in_channels=3, + out_channels=in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + self.add_module('layer0', layer) + layers.append('layer0') + + for i, params in enumerate(layer_setting): + (kernel_size, mid_channels, out_channels, with_se, act, + stride) = params + if with_se: + se_cfg = dict( + channels=mid_channels, + ratio=4, + act_cfg=(dict(type='ReLU'), + dict( + type='HSigmoid', + bias=3, + divisor=6, + min_value=0, + max_value=1))) + else: + se_cfg = None + + layer = InvertedResidual( + in_channels=in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + se_cfg=se_cfg, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type=act), + with_cp=self.with_cp) + in_channels = out_channels + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, layer) + layers.append(layer_name) + + # Build the last layer before pooling + # TODO: No dilation + layer = ConvModule( + in_channels=in_channels, + out_channels=576 if self.arch == 'small' else 960, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + layer_name = 'layer{}'.format(len(layer_setting) + 1) + self.add_module(layer_name, layer) + layers.append(layer_name) + + return layers + + def forward(self, x): + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + for i in range(0, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(MobileNetV3, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmcls/models/backbones/mvit.py b/mmcls/models/backbones/mvit.py new file mode 100644 index 0000000..b9e67df --- /dev/null +++ b/mmcls/models/backbones/mvit.py @@ -0,0 +1,700 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks import DropPath +from mmcv.cnn.bricks.transformer import PatchEmbed, build_activation_layer +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner import BaseModule, ModuleList +from mmcv.utils import to_2tuple + +from ..builder import BACKBONES +from ..utils import resize_pos_embed +from .base_backbone import BaseBackbone + + +def resize_decomposed_rel_pos(rel_pos, q_size, k_size): + """Get relative positional embeddings according to the relative positions + of query and key sizes. + + Args: + q_size (int): size of query q. + k_size (int): size of key k. + rel_pos (Tensor): relative position embeddings (L, C). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + resized = F.interpolate( + # (L, C) -> (1, C, L) + rel_pos.transpose(0, 1).unsqueeze(0), + size=max_rel_dist, + mode='linear', + ) + # (1, C, L) -> (L, C) + resized = resized.squeeze(0).transpose(0, 1) + else: + resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_h_ratio = max(k_size / q_size, 1.0) + k_h_ratio = max(q_size / k_size, 1.0) + q_coords = torch.arange(q_size)[:, None] * q_h_ratio + k_coords = torch.arange(k_size)[None, :] * k_h_ratio + relative_coords = (q_coords - k_coords) + (k_size - 1) * k_h_ratio + + return resized[relative_coords.long()] + + +def add_decomposed_rel_pos(attn, + q, + q_shape, + k_shape, + rel_pos_h, + rel_pos_w, + has_cls_token=False): + """Spatial Relative Positional Embeddings.""" + sp_idx = 1 if has_cls_token else 0 + B, num_heads, _, C = q.shape + q_h, q_w = q_shape + k_h, k_w = k_shape + + Rh = resize_decomposed_rel_pos(rel_pos_h, q_h, k_h) + Rw = resize_decomposed_rel_pos(rel_pos_w, q_w, k_w) + + r_q = q[:, :, sp_idx:].reshape(B, num_heads, q_h, q_w, C) + rel_h = torch.einsum('byhwc,hkc->byhwk', r_q, Rh) + rel_w = torch.einsum('byhwc,wkc->byhwk', r_q, Rw) + rel_pos_embed = rel_h[:, :, :, :, :, None] + rel_w[:, :, :, :, None, :] + + attn_map = attn[:, :, sp_idx:, sp_idx:].view(B, -1, q_h, q_w, k_h, k_w) + attn_map += rel_pos_embed + attn[:, :, sp_idx:, sp_idx:] = attn_map.view(B, -1, q_h * q_w, k_h * k_w) + + return attn + + +class MLP(BaseModule): + """Two-layer multilayer perceptron. + + Comparing with :class:`mmcv.cnn.bricks.transformer.FFN`, this class allows + different input and output channel numbers. + + Args: + in_channels (int): The number of input channels. + hidden_channels (int, optional): The number of hidden layer channels. + If None, same as the ``in_channels``. Defaults to None. + out_channels (int, optional): The number of output channels. If None, + same as the ``in_channels``. Defaults to None. + act_cfg (dict): The config of activation function. + Defaults to ``dict(type='GELU')``. + init_cfg (dict, optional): The config of weight initialization. + Defaults to None. + """ + + def __init__(self, + in_channels, + hidden_channels=None, + out_channels=None, + act_cfg=dict(type='GELU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + out_channels = out_channels or in_channels + hidden_channels = hidden_channels or in_channels + self.fc1 = nn.Linear(in_channels, hidden_channels) + self.act = build_activation_layer(act_cfg) + self.fc2 = nn.Linear(hidden_channels, out_channels) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + return x + + +def attention_pool(x: torch.Tensor, + pool: nn.Module, + in_size: tuple, + norm: Optional[nn.Module] = None): + """Pooling the feature tokens. + + Args: + x (torch.Tensor): The input tensor, should be with shape + ``(B, num_heads, L, C)`` or ``(B, L, C)``. + pool (nn.Module): The pooling module. + in_size (Tuple[int]): The shape of the input feature map. + norm (nn.Module, optional): The normalization module. + Defaults to None. + """ + ndim = x.ndim + if ndim == 4: + B, num_heads, L, C = x.shape + elif ndim == 3: + num_heads = 1 + B, L, C = x.shape + else: + raise RuntimeError(f'Unsupported input dimension {x.shape}') + + H, W = in_size + assert L == H * W + + # (B, num_heads, H*W, C) -> (B*num_heads, C, H, W) + x = x.reshape(B * num_heads, H, W, C).permute(0, 3, 1, 2).contiguous() + x = pool(x) + out_size = x.shape[-2:] + + # (B*num_heads, C, H', W') -> (B, num_heads, H'*W', C) + x = x.reshape(B, num_heads, C, -1).transpose(2, 3) + + if norm is not None: + x = norm(x) + + if ndim == 3: + x = x.squeeze(1) + + return x, out_size + + +class MultiScaleAttention(BaseModule): + """Multiscale Multi-head Attention block. + + Args: + in_dims (int): Number of input channels. + out_dims (int): Number of output channels. + num_heads (int): Number of attention heads. + qkv_bias (bool): If True, add a learnable bias to query, key and + value. Defaults to True. + norm_cfg (dict): The config of normalization layers. + Defaults to ``dict(type='LN')``. + pool_kernel (tuple): kernel size for qkv pooling layers. + Defaults to (3, 3). + stride_q (int): stride size for q pooling layer. Defaults to 1. + stride_kv (int): stride size for kv pooling layer. Defaults to 1. + rel_pos_spatial (bool): Whether to enable the spatial relative + position embedding. Defaults to True. + residual_pooling (bool): Whether to enable the residual connection + after attention pooling. Defaults to True. + input_size (Tuple[int], optional): The input resolution, necessary + if enable the ``rel_pos_spatial``. Defaults to None. + rel_pos_zero_init (bool): If True, zero initialize relative + positional parameters. Defaults to False. + init_cfg (dict, optional): The config of weight initialization. + Defaults to None. + """ + + def __init__(self, + in_dims, + out_dims, + num_heads, + qkv_bias=True, + norm_cfg=dict(type='LN'), + pool_kernel=(3, 3), + stride_q=1, + stride_kv=1, + rel_pos_spatial=False, + residual_pooling=True, + input_size=None, + rel_pos_zero_init=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.num_heads = num_heads + self.in_dims = in_dims + self.out_dims = out_dims + + head_dim = out_dims // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(in_dims, out_dims * 3, bias=qkv_bias) + self.proj = nn.Linear(out_dims, out_dims) + + # qkv pooling + pool_padding = [k // 2 for k in pool_kernel] + pool_dims = out_dims // num_heads + + def build_pooling(stride): + pool = nn.Conv2d( + pool_dims, + pool_dims, + pool_kernel, + stride=stride, + padding=pool_padding, + groups=pool_dims, + bias=False, + ) + norm = build_norm_layer(norm_cfg, pool_dims)[1] + return pool, norm + + self.pool_q, self.norm_q = build_pooling(stride_q) + self.pool_k, self.norm_k = build_pooling(stride_kv) + self.pool_v, self.norm_v = build_pooling(stride_kv) + + self.residual_pooling = residual_pooling + + self.rel_pos_spatial = rel_pos_spatial + self.rel_pos_zero_init = rel_pos_zero_init + if self.rel_pos_spatial: + # initialize relative positional embeddings + assert input_size[0] == input_size[1] + + size = input_size[0] + rel_dim = 2 * max(size // stride_q, size // stride_kv) - 1 + self.rel_pos_h = nn.Parameter(torch.zeros(rel_dim, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(rel_dim, head_dim)) + + def init_weights(self): + """Weight initialization.""" + super().init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress rel_pos_zero_init if use pretrained model. + return + + if not self.rel_pos_zero_init: + trunc_normal_(self.rel_pos_h, std=0.02) + trunc_normal_(self.rel_pos_w, std=0.02) + + def forward(self, x, in_size): + """Forward the MultiScaleAttention.""" + B, N, _ = x.shape # (B, H*W, C) + + # qkv: (B, H*W, 3, num_heads, C) + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1) + # q, k, v: (B, num_heads, H*W, C) + q, k, v = qkv.permute(2, 0, 3, 1, 4).unbind(0) + + q, q_shape = attention_pool(q, self.pool_q, in_size, norm=self.norm_q) + k, k_shape = attention_pool(k, self.pool_k, in_size, norm=self.norm_k) + v, v_shape = attention_pool(v, self.pool_v, in_size, norm=self.norm_v) + + attn = (q * self.scale) @ k.transpose(-2, -1) + if self.rel_pos_spatial: + attn = add_decomposed_rel_pos(attn, q, q_shape, k_shape, + self.rel_pos_h, self.rel_pos_w) + + attn = attn.softmax(dim=-1) + x = attn @ v + + if self.residual_pooling: + x = x + q + + # (B, num_heads, H'*W', C'//num_heads) -> (B, H'*W', C') + x = x.transpose(1, 2).reshape(B, -1, self.out_dims) + x = self.proj(x) + + return x, q_shape + + +class MultiScaleBlock(BaseModule): + """Multiscale Transformer blocks. + + Args: + in_dims (int): Number of input channels. + out_dims (int): Number of output channels. + num_heads (int): Number of attention heads. + mlp_ratio (float): Ratio of hidden dimensions in MLP layers. + Defaults to 4.0. + qkv_bias (bool): If True, add a learnable bias to query, key and + value. Defaults to True. + drop_path (float): Stochastic depth rate. Defaults to 0. + norm_cfg (dict): The config of normalization layers. + Defaults to ``dict(type='LN')``. + act_cfg (dict): The config of activation function. + Defaults to ``dict(type='GELU')``. + qkv_pool_kernel (tuple): kernel size for qkv pooling layers. + Defaults to (3, 3). + stride_q (int): stride size for q pooling layer. Defaults to 1. + stride_kv (int): stride size for kv pooling layer. Defaults to 1. + rel_pos_spatial (bool): Whether to enable the spatial relative + position embedding. Defaults to True. + residual_pooling (bool): Whether to enable the residual connection + after attention pooling. Defaults to True. + dim_mul_in_attention (bool): Whether to multiply the ``embed_dims`` in + attention layers. If False, multiply it in MLP layers. + Defaults to True. + input_size (Tuple[int], optional): The input resolution, necessary + if enable the ``rel_pos_spatial``. Defaults to None. + rel_pos_zero_init (bool): If True, zero initialize relative + positional parameters. Defaults to False. + init_cfg (dict, optional): The config of weight initialization. + Defaults to None. + """ + + def __init__( + self, + in_dims, + out_dims, + num_heads, + mlp_ratio=4.0, + qkv_bias=True, + drop_path=0.0, + norm_cfg=dict(type='LN'), + act_cfg=dict(type='GELU'), + qkv_pool_kernel=(3, 3), + stride_q=1, + stride_kv=1, + rel_pos_spatial=True, + residual_pooling=True, + dim_mul_in_attention=True, + input_size=None, + rel_pos_zero_init=False, + init_cfg=None, + ): + super().__init__(init_cfg=init_cfg) + self.in_dims = in_dims + self.out_dims = out_dims + self.norm1 = build_norm_layer(norm_cfg, in_dims)[1] + self.dim_mul_in_attention = dim_mul_in_attention + + attn_dims = out_dims if dim_mul_in_attention else in_dims + self.attn = MultiScaleAttention( + in_dims, + attn_dims, + num_heads=num_heads, + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + pool_kernel=qkv_pool_kernel, + stride_q=stride_q, + stride_kv=stride_kv, + rel_pos_spatial=rel_pos_spatial, + residual_pooling=residual_pooling, + input_size=input_size, + rel_pos_zero_init=rel_pos_zero_init) + self.drop_path = DropPath( + drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = build_norm_layer(norm_cfg, attn_dims)[1] + + self.mlp = MLP( + in_channels=attn_dims, + hidden_channels=int(attn_dims * mlp_ratio), + out_channels=out_dims, + act_cfg=act_cfg) + + if in_dims != out_dims: + self.proj = nn.Linear(in_dims, out_dims) + else: + self.proj = None + + if stride_q > 1: + kernel_skip = stride_q + 1 + padding_skip = int(kernel_skip // 2) + self.pool_skip = nn.MaxPool2d( + kernel_skip, stride_q, padding_skip, ceil_mode=False) + + if input_size is not None: + input_size = to_2tuple(input_size) + out_size = [size // stride_q for size in input_size] + self.init_out_size = out_size + else: + self.init_out_size = None + else: + self.pool_skip = None + self.init_out_size = input_size + + def forward(self, x, in_size): + x_norm = self.norm1(x) + x_attn, out_size = self.attn(x_norm, in_size) + + if self.dim_mul_in_attention and self.proj is not None: + skip = self.proj(x_norm) + else: + skip = x + + if self.pool_skip is not None: + skip, _ = attention_pool(skip, self.pool_skip, in_size) + + x = skip + self.drop_path(x_attn) + x_norm = self.norm2(x) + x_mlp = self.mlp(x_norm) + + if not self.dim_mul_in_attention and self.proj is not None: + skip = self.proj(x_norm) + else: + skip = x + + x = skip + self.drop_path(x_mlp) + + return x, out_size + + +@BACKBONES.register_module() +class MViT(BaseBackbone): + """Multi-scale ViT v2. + + A PyTorch implement of : `MViTv2: Improved Multiscale Vision Transformers + for Classification and Detection `_ + + Inspiration from `the official implementation + `_ and `the detectron2 + implementation `_ + + Args: + arch (str | dict): MViT architecture. If use string, choose + from 'tiny', 'small', 'base' and 'large'. If use dict, it should + have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of layers. + - **num_heads** (int): The number of heads in attention + modules of the initial layer. + - **downscale_indices** (List[int]): The layer indices to downscale + the feature map. + + Defaults to 'base'. + img_size (int): The expected input image shape. Defaults to 224. + in_channels (int): The num of input channels. Defaults to 3. + out_scales (int | Sequence[int]): The output scale indices. + They should not exceed the length of ``downscale_indices``. + Defaults to -1, which means the last scale. + drop_path_rate (float): Stochastic depth rate. Defaults to 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults to False. + interpolate_mode (str): Select the interpolate mode for absolute + position embedding vector resize. Defaults to "bicubic". + pool_kernel (tuple): kernel size for qkv pooling layers. + Defaults to (3, 3). + dim_mul (int): The magnification for ``embed_dims`` in the downscale + layers. Defaults to 2. + head_mul (int): The magnification for ``num_heads`` in the downscale + layers. Defaults to 2. + adaptive_kv_stride (int): The stride size for kv pooling in the initial + layer. Defaults to 4. + rel_pos_spatial (bool): Whether to enable the spatial relative position + embedding. Defaults to True. + residual_pooling (bool): Whether to enable the residual connection + after attention pooling. Defaults to True. + dim_mul_in_attention (bool): Whether to multiply the ``embed_dims`` in + attention layers. If False, multiply it in MLP layers. + Defaults to True. + rel_pos_zero_init (bool): If True, zero initialize relative + positional parameters. Defaults to False. + mlp_ratio (float): Ratio of hidden dimensions in MLP layers. + Defaults to 4.0. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + norm_cfg (dict): Config dict for normalization layer for all output + features. Defaults to ``dict(type='LN', eps=1e-6)``. + patch_cfg (dict): Config dict for the patch embedding layer. + Defaults to ``dict(kernel_size=7, stride=4, padding=3)``. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + + Examples: + >>> import torch + >>> from mmcls.models import build_backbone + >>> + >>> cfg = dict(type='MViT', arch='tiny', out_scales=[0, 1, 2, 3]) + >>> model = build_backbone(cfg) + >>> inputs = torch.rand(1, 3, 224, 224) + >>> outputs = model(inputs) + >>> for i, output in enumerate(outputs): + >>> print(f'scale{i}: {output.shape}') + scale0: torch.Size([1, 96, 56, 56]) + scale1: torch.Size([1, 192, 28, 28]) + scale2: torch.Size([1, 384, 14, 14]) + scale3: torch.Size([1, 768, 7, 7]) + """ + arch_zoo = { + 'tiny': { + 'embed_dims': 96, + 'num_layers': 10, + 'num_heads': 1, + 'downscale_indices': [1, 3, 8] + }, + 'small': { + 'embed_dims': 96, + 'num_layers': 16, + 'num_heads': 1, + 'downscale_indices': [1, 3, 14] + }, + 'base': { + 'embed_dims': 96, + 'num_layers': 24, + 'num_heads': 1, + 'downscale_indices': [2, 5, 21] + }, + 'large': { + 'embed_dims': 144, + 'num_layers': 48, + 'num_heads': 2, + 'downscale_indices': [2, 8, 44] + }, + } + num_extra_tokens = 0 + + def __init__(self, + arch='base', + img_size=224, + in_channels=3, + out_scales=-1, + drop_path_rate=0., + use_abs_pos_embed=False, + interpolate_mode='bicubic', + pool_kernel=(3, 3), + dim_mul=2, + head_mul=2, + adaptive_kv_stride=4, + rel_pos_spatial=True, + residual_pooling=True, + dim_mul_in_attention=True, + rel_pos_zero_init=False, + mlp_ratio=4., + qkv_bias=True, + norm_cfg=dict(type='LN', eps=1e-6), + patch_cfg=dict(kernel_size=7, stride=4, padding=3), + init_cfg=None): + super().__init__(init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'num_heads', 'downscale_indices' + } + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.num_layers = self.arch_settings['num_layers'] + self.num_heads = self.arch_settings['num_heads'] + self.downscale_indices = self.arch_settings['downscale_indices'] + self.num_scales = len(self.downscale_indices) + 1 + self.stage_indices = { + index - 1: i + for i, index in enumerate(self.downscale_indices) + } + self.stage_indices[self.num_layers - 1] = self.num_scales - 1 + self.use_abs_pos_embed = use_abs_pos_embed + self.interpolate_mode = interpolate_mode + + if isinstance(out_scales, int): + out_scales = [out_scales] + assert isinstance(out_scales, Sequence), \ + f'"out_scales" must by a sequence or int, ' \ + f'get {type(out_scales)} instead.' + for i, index in enumerate(out_scales): + if index < 0: + out_scales[i] = self.num_scales + index + assert 0 <= out_scales[i] <= self.num_scales, \ + f'Invalid out_scales {index}' + self.out_scales = sorted(list(out_scales)) + + # Set patch embedding + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + + # Set absolute position embedding + if self.use_abs_pos_embed: + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches, self.embed_dims)) + + # stochastic depth decay rule + dpr = np.linspace(0, drop_path_rate, self.num_layers) + + self.blocks = ModuleList() + out_dims_list = [self.embed_dims] + num_heads = self.num_heads + stride_kv = adaptive_kv_stride + input_size = self.patch_resolution + for i in range(self.num_layers): + if i in self.downscale_indices: + num_heads *= head_mul + stride_q = 2 + stride_kv = max(stride_kv // 2, 1) + else: + stride_q = 1 + + # Set output embed_dims + if dim_mul_in_attention and i in self.downscale_indices: + # multiply embed_dims in downscale layers. + out_dims = out_dims_list[-1] * dim_mul + elif not dim_mul_in_attention and i + 1 in self.downscale_indices: + # multiply embed_dims before downscale layers. + out_dims = out_dims_list[-1] * dim_mul + else: + out_dims = out_dims_list[-1] + + attention_block = MultiScaleBlock( + in_dims=out_dims_list[-1], + out_dims=out_dims, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path=dpr[i], + norm_cfg=norm_cfg, + qkv_pool_kernel=pool_kernel, + stride_q=stride_q, + stride_kv=stride_kv, + rel_pos_spatial=rel_pos_spatial, + residual_pooling=residual_pooling, + dim_mul_in_attention=dim_mul_in_attention, + input_size=input_size, + rel_pos_zero_init=rel_pos_zero_init) + self.blocks.append(attention_block) + + input_size = attention_block.init_out_size + out_dims_list.append(out_dims) + + if i in self.stage_indices: + stage_index = self.stage_indices[i] + if stage_index in self.out_scales: + norm_layer = build_norm_layer(norm_cfg, out_dims)[1] + self.add_module(f'norm{stage_index}', norm_layer) + + def init_weights(self): + super().init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + if self.use_abs_pos_embed: + trunc_normal_(self.pos_embed, std=0.02) + + def forward(self, x): + """Forward the MViT.""" + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + + if self.use_abs_pos_embed: + x = x + resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + + outs = [] + for i, block in enumerate(self.blocks): + x, patch_resolution = block(x, patch_resolution) + + if i in self.stage_indices: + stage_index = self.stage_indices[i] + if stage_index in self.out_scales: + B, _, C = x.shape + x = getattr(self, f'norm{stage_index}')(x) + out = x.transpose(1, 2).reshape(B, C, *patch_resolution) + outs.append(out.contiguous()) + + return tuple(outs) diff --git a/mmcls/models/backbones/poolformer.py b/mmcls/models/backbones/poolformer.py new file mode 100644 index 0000000..e3fc4e1 --- /dev/null +++ b/mmcls/models/backbones/poolformer.py @@ -0,0 +1,416 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import torch +import torch.nn as nn +from mmcv.cnn.bricks import DropPath, build_activation_layer, build_norm_layer +from mmcv.runner import BaseModule + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +class PatchEmbed(nn.Module): + """Patch Embedding module implemented by a layer of convolution. + + Input: tensor in shape [B, C, H, W] + Output: tensor in shape [B, C, H/stride, W/stride] + Args: + patch_size (int): Patch size of the patch embedding. Defaults to 16. + stride (int): Stride of the patch embedding. Defaults to 16. + padding (int): Padding of the patch embedding. Defaults to 0. + in_chans (int): Input channels. Defaults to 3. + embed_dim (int): Output dimension of the patch embedding. + Defaults to 768. + norm_layer (module): Normalization module. Defaults to None (not use). + """ + + def __init__(self, + patch_size=16, + stride=16, + padding=0, + in_chans=3, + embed_dim=768, + norm_layer=None): + super().__init__() + self.proj = nn.Conv2d( + in_chans, + embed_dim, + kernel_size=patch_size, + stride=stride, + padding=padding) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + x = self.proj(x) + x = self.norm(x) + return x + + +class Pooling(nn.Module): + """Pooling module. + + Args: + pool_size (int): Pooling size. Defaults to 3. + """ + + def __init__(self, pool_size=3): + super().__init__() + self.pool = nn.AvgPool2d( + pool_size, + stride=1, + padding=pool_size // 2, + count_include_pad=False) + + def forward(self, x): + return self.pool(x) - x + + +class Mlp(nn.Module): + """Mlp implemented by with 1*1 convolutions. + + Input: Tensor with shape [B, C, H, W]. + Output: Tensor with shape [B, C, H, W]. + Args: + in_features (int): Dimension of input features. + hidden_features (int): Dimension of hidden features. + out_features (int): Dimension of output features. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + drop (float): Dropout rate. Defaults to 0.0. + """ + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_cfg=dict(type='GELU'), + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2d(in_features, hidden_features, 1) + self.act = build_activation_layer(act_cfg) + self.fc2 = nn.Conv2d(hidden_features, out_features, 1) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class PoolFormerBlock(BaseModule): + """PoolFormer Block. + + Args: + dim (int): Embedding dim. + pool_size (int): Pooling size. Defaults to 3. + mlp_ratio (float): Mlp expansion ratio. Defaults to 4. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='GN', num_groups=1)``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + drop (float): Dropout rate. Defaults to 0. + drop_path (float): Stochastic depth rate. Defaults to 0. + layer_scale_init_value (float): Init value for Layer Scale. + Defaults to 1e-5. + """ + + def __init__(self, + dim, + pool_size=3, + mlp_ratio=4., + norm_cfg=dict(type='GN', num_groups=1), + act_cfg=dict(type='GELU'), + drop=0., + drop_path=0., + layer_scale_init_value=1e-5): + + super().__init__() + + self.norm1 = build_norm_layer(norm_cfg, dim)[1] + self.token_mixer = Pooling(pool_size=pool_size) + self.norm2 = build_norm_layer(norm_cfg, dim)[1] + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_cfg=act_cfg, + drop=drop) + + # The following two techniques are useful to train deep PoolFormers. + self.drop_path = DropPath(drop_path) if drop_path > 0. \ + else nn.Identity() + self.layer_scale_1 = nn.Parameter( + layer_scale_init_value * torch.ones((dim)), requires_grad=True) + self.layer_scale_2 = nn.Parameter( + layer_scale_init_value * torch.ones((dim)), requires_grad=True) + + def forward(self, x): + x = x + self.drop_path( + self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * + self.token_mixer(self.norm1(x))) + x = x + self.drop_path( + self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * + self.mlp(self.norm2(x))) + return x + + +def basic_blocks(dim, + index, + layers, + pool_size=3, + mlp_ratio=4., + norm_cfg=dict(type='GN', num_groups=1), + act_cfg=dict(type='GELU'), + drop_rate=.0, + drop_path_rate=0., + layer_scale_init_value=1e-5): + """ + generate PoolFormer blocks for a stage + return: PoolFormer blocks + """ + blocks = [] + for block_idx in range(layers[index]): + block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / ( + sum(layers) - 1) + blocks.append( + PoolFormerBlock( + dim, + pool_size=pool_size, + mlp_ratio=mlp_ratio, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + drop=drop_rate, + drop_path=block_dpr, + layer_scale_init_value=layer_scale_init_value, + )) + blocks = nn.Sequential(*blocks) + + return blocks + + +@BACKBONES.register_module() +class PoolFormer(BaseBackbone): + """PoolFormer. + + A PyTorch implementation of PoolFormer introduced by: + `MetaFormer is Actually What You Need for Vision `_ + + Modified from the `official repo + `. + + Args: + arch (str | dict): The model's architecture. If string, it should be + one of architecture in ``PoolFormer.arch_settings``. And if dict, it + should include the following two keys: + + - layers (list[int]): Number of blocks at each stage. + - embed_dims (list[int]): The number of channels at each stage. + - mlp_ratios (list[int]): Expansion ratio of MLPs. + - layer_scale_init_value (float): Init value for Layer Scale. + + Defaults to 'S12'. + + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='LN2d', eps=1e-6)``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + in_patch_size (int): The patch size of input image patch embedding. + Defaults to 7. + in_stride (int): The stride of input image patch embedding. + Defaults to 4. + in_pad (int): The padding of input image patch embedding. + Defaults to 2. + down_patch_size (int): The patch size of downsampling patch embedding. + Defaults to 3. + down_stride (int): The stride of downsampling patch embedding. + Defaults to 2. + down_pad (int): The padding of downsampling patch embedding. + Defaults to 1. + drop_rate (float): Dropout rate. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + out_indices (Sequence | int): Output from which network position. + Index 0-6 respectively corresponds to + [stage1, downsampling, stage2, downsampling, stage3, downsampling, stage4] + Defaults to -1, means the last stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + init_cfg (dict, optional): Initialization config dict + """ # noqa: E501 + + # --layers: [x,x,x,x], numbers of layers for the four stages + # --embed_dims, --mlp_ratios: + # embedding dims and mlp ratios for the four stages + # --downsamples: flags to apply downsampling or not in four blocks + arch_settings = { + 's12': { + 'layers': [2, 2, 6, 2], + 'embed_dims': [64, 128, 320, 512], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-5, + }, + 's24': { + 'layers': [4, 4, 12, 4], + 'embed_dims': [64, 128, 320, 512], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-5, + }, + 's36': { + 'layers': [6, 6, 18, 6], + 'embed_dims': [64, 128, 320, 512], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-6, + }, + 'm36': { + 'layers': [6, 6, 18, 6], + 'embed_dims': [96, 192, 384, 768], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-6, + }, + 'm48': { + 'layers': [8, 8, 24, 8], + 'embed_dims': [96, 192, 384, 768], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-6, + }, + } + + def __init__(self, + arch='s12', + pool_size=3, + norm_cfg=dict(type='GN', num_groups=1), + act_cfg=dict(type='GELU'), + in_patch_size=7, + in_stride=4, + in_pad=2, + down_patch_size=3, + down_stride=2, + down_pad=1, + drop_rate=0., + drop_path_rate=0., + out_indices=-1, + frozen_stages=0, + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'Unavailable arch, please choose from ' \ + f'({set(self.arch_settings)}) or pass a dict.' + arch = self.arch_settings[arch] + elif isinstance(arch, dict): + assert 'layers' in arch and 'embed_dims' in arch, \ + f'The arch dict must have "layers" and "embed_dims", ' \ + f'but got {list(arch.keys())}.' + + layers = arch['layers'] + embed_dims = arch['embed_dims'] + mlp_ratios = arch['mlp_ratios'] \ + if 'mlp_ratios' in arch else [4, 4, 4, 4] + layer_scale_init_value = arch['layer_scale_init_value'] \ + if 'layer_scale_init_value' in arch else 1e-5 + + self.patch_embed = PatchEmbed( + patch_size=in_patch_size, + stride=in_stride, + padding=in_pad, + in_chans=3, + embed_dim=embed_dims[0]) + + # set the main block in network + network = [] + for i in range(len(layers)): + stage = basic_blocks( + embed_dims[i], + i, + layers, + pool_size=pool_size, + mlp_ratio=mlp_ratios[i], + norm_cfg=norm_cfg, + act_cfg=act_cfg, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + layer_scale_init_value=layer_scale_init_value) + network.append(stage) + if i >= len(layers) - 1: + break + if embed_dims[i] != embed_dims[i + 1]: + # downsampling between two stages + network.append( + PatchEmbed( + patch_size=down_patch_size, + stride=down_stride, + padding=down_pad, + in_chans=embed_dims[i], + embed_dim=embed_dims[i + 1])) + + self.network = nn.ModuleList(network) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = 7 + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + if self.out_indices: + for i_layer in self.out_indices: + layer = build_norm_layer(norm_cfg, + embed_dims[(i_layer + 1) // 2])[1] + layer_name = f'norm{i_layer}' + self.add_module(layer_name, layer) + + self.frozen_stages = frozen_stages + self._freeze_stages() + + def forward_embeddings(self, x): + x = self.patch_embed(x) + return x + + def forward_tokens(self, x): + outs = [] + for idx, block in enumerate(self.network): + x = block(x) + if idx in self.out_indices: + norm_layer = getattr(self, f'norm{idx}') + x_out = norm_layer(x) + outs.append(x_out) + return tuple(outs) + + def forward(self, x): + # input embedding + x = self.forward_embeddings(x) + # through backbone + x = self.forward_tokens(x) + return x + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + for i in range(self.frozen_stages): + # Include both block and downsample layer. + module = self.network[i] + module.eval() + for param in module.parameters(): + param.requires_grad = False + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + norm_layer.eval() + for param in norm_layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(PoolFormer, self).train(mode) + self._freeze_stages() diff --git a/mmcls/models/backbones/regnet.py b/mmcls/models/backbones/regnet.py new file mode 100644 index 0000000..036b699 --- /dev/null +++ b/mmcls/models/backbones/regnet.py @@ -0,0 +1,323 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from .resnet import ResNet +from .resnext import Bottleneck + + +@BACKBONES.register_module() +class RegNet(ResNet): + """RegNet backbone. + + More details can be found in `paper `_ . + + Args: + arch (dict): The parameter of RegNets. + - w0 (int): initial width + - wa (float): slope of width + - wm (float): quantization parameter to quantize the width + - depth (int): depth of the backbone + - group_w (int): width of group + - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck. + strides (Sequence[int]): Strides of the first block of each stage. + base_channels (int): Base channels after stem layer. + in_channels (int): Number of input image channels. Default: 3. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. Default: "pytorch". + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. Default: -1. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + + Example: + >>> from mmcls.models import RegNet + >>> import torch + >>> inputs = torch.rand(1, 3, 32, 32) + >>> # use str type 'arch' + >>> # Note that default out_indices is (3,) + >>> regnet_cfg = dict(arch='regnetx_4.0gf') + >>> model = RegNet(**regnet_cfg) + >>> model.eval() + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 1360, 1, 1) + >>> # use dict type 'arch' + >>> arch_cfg =dict(w0=88, wa=26.31, wm=2.25, + >>> group_w=48, depth=25, bot_mul=1.0) + >>> regnet_cfg = dict(arch=arch_cfg, out_indices=(0, 1, 2, 3)) + >>> model = RegNet(**regnet_cfg) + >>> model.eval() + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 96, 8, 8) + (1, 192, 4, 4) + (1, 432, 2, 2) + (1, 1008, 1, 1) + """ + + arch_settings = { + 'regnetx_400mf': + dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), + 'regnetx_800mf': + dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), + 'regnetx_1.6gf': + dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), + 'regnetx_3.2gf': + dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), + 'regnetx_4.0gf': + dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), + 'regnetx_6.4gf': + dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), + 'regnetx_8.0gf': + dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), + 'regnetx_12gf': + dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), + } + + def __init__( + self, + arch, + in_channels=3, + stem_channels=32, + base_channels=32, + strides=(2, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(3, ), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True, + init_cfg=None, + ): + super(ResNet, self).__init__(init_cfg) + + # Generate RegNet parameters first + if isinstance(arch, str): + assert arch in self.arch_settings, ( + f'"arch": "{arch}" is not one of the' + ' arch_settings') + arch = self.arch_settings[arch] + elif not isinstance(arch, dict): + raise TypeError('Expect "arch" to be either a string ' + f'or a dict, got {type(arch)}') + + widths, num_stages = self.generate_regnet( + arch['w0'], + arch['wa'], + arch['wm'], + arch['depth'], + ) + # Convert to per stage format + stage_widths, stage_blocks = self.get_stages_from_blocks(widths) + # Generate group widths and bot muls + group_widths = [arch['group_w'] for _ in range(num_stages)] + self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] + # Adjust the compatibility of stage_widths and group_widths + stage_widths, group_widths = self.adjust_width_group( + stage_widths, self.bottleneck_ratio, group_widths) + + # Group params by stage + self.stage_widths = stage_widths + self.group_widths = group_widths + self.depth = sum(stage_blocks) + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + if self.deep_stem: + raise NotImplementedError( + 'deep_stem has not been implemented for RegNet') + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.stage_blocks = stage_blocks[:num_stages] + + self._make_stem_layer(in_channels, stem_channels) + + _in_channels = stem_channels + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = self.strides[i] + dilation = self.dilations[i] + group_width = self.group_widths[i] + width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i])) + stage_groups = width // group_width + + res_layer = self.make_res_layer( + block=Bottleneck, + num_blocks=num_blocks, + in_channels=_in_channels, + out_channels=self.stage_widths[i], + expansion=1, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + base_channels=self.stage_widths[i], + groups=stage_groups, + width_per_group=group_width, + ) + _in_channels = self.stage_widths[i] + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = stage_widths[-1] + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False, + ) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def generate_regnet(self, + initial_width, + width_slope, + width_parameter, + depth, + divisor=8): + """Generates per block width from RegNet parameters. + + Args: + initial_width ([int]): Initial width of the backbone + width_slope ([float]): Slope of the quantized linear function + width_parameter ([int]): Parameter used to quantize the width. + depth ([int]): Depth of the backbone. + divisor (int): The divisor of channels. Defaults to 8. + + Returns: + tuple: tuple containing: + - list: Widths of each stage. + - int: The number of stages. + """ + assert width_slope >= 0 + assert initial_width > 0 + assert width_parameter > 1 + assert initial_width % divisor == 0 + widths_cont = np.arange(depth) * width_slope + initial_width + ks = np.round( + np.log(widths_cont / initial_width) / np.log(width_parameter)) + widths = initial_width * np.power(width_parameter, ks) + widths = np.round(np.divide(widths, divisor)) * divisor + num_stages = len(np.unique(widths)) + widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() + return widths, num_stages + + @staticmethod + def quantize_float(number, divisor): + """Converts a float to closest non-zero int divisible by divior. + + Args: + number (int): Original number to be quantized. + divisor (int): Divisor used to quantize the number. + + Returns: + int: quantized number that is divisible by devisor. + """ + return int(round(number / divisor) * divisor) + + def adjust_width_group(self, widths, bottleneck_ratio, groups): + """Adjusts the compatibility of widths and groups. + + Args: + widths (list[int]): Width of each stage. + bottleneck_ratio (float): Bottleneck ratio. + groups (int): number of groups in each stage + + Returns: + tuple(list): The adjusted widths and groups of each stage. + """ + bottleneck_width = [ + int(w * b) for w, b in zip(widths, bottleneck_ratio) + ] + groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)] + bottleneck_width = [ + self.quantize_float(w_bot, g) + for w_bot, g in zip(bottleneck_width, groups) + ] + widths = [ + int(w_bot / b) + for w_bot, b in zip(bottleneck_width, bottleneck_ratio) + ] + return widths, groups + + def get_stages_from_blocks(self, widths): + """Gets widths/stage_blocks of network at each stage. + + Args: + widths (list[int]): Width in each stage. + + Returns: + tuple(list): width and depth of each stage + """ + width_diff = [ + width != width_prev + for width, width_prev in zip(widths + [0], [0] + widths) + ] + stage_widths = [ + width for width, diff in zip(widths, width_diff[:-1]) if diff + ] + stage_blocks = np.diff([ + depth for depth, diff in zip(range(len(width_diff)), width_diff) + if diff + ]).tolist() + return stage_widths, stage_blocks + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) diff --git a/mmcls/models/backbones/repmlp.py b/mmcls/models/backbones/repmlp.py new file mode 100644 index 0000000..9e6e2ed --- /dev/null +++ b/mmcls/models/backbones/repmlp.py @@ -0,0 +1,578 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Adapted from official impl at https://github.com/DingXiaoH/RepMLP. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import (ConvModule, build_activation_layer, build_conv_layer, + build_norm_layer) +from mmcv.cnn.bricks.transformer import PatchEmbed as _PatchEmbed +from mmcv.runner import BaseModule, ModuleList, Sequential + +from mmcls.models.builder import BACKBONES +from mmcls.models.utils import SELayer, to_2tuple + + +def fuse_bn(conv_or_fc, bn): + """fuse conv and bn.""" + std = (bn.running_var + bn.eps).sqrt() + tmp_weight = bn.weight / std + tmp_weight = tmp_weight.reshape(-1, 1, 1, 1) + + if len(tmp_weight) == conv_or_fc.weight.size(0): + return (conv_or_fc.weight * tmp_weight, + bn.bias - bn.running_mean * bn.weight / std) + else: + # in RepMLPBlock, dim0 of fc3 weights and fc3_bn weights + # are different. + repeat_times = conv_or_fc.weight.size(0) // len(tmp_weight) + repeated = tmp_weight.repeat_interleave(repeat_times, 0) + fused_weight = conv_or_fc.weight * repeated + bias = bn.bias - bn.running_mean * bn.weight / std + fused_bias = (bias).repeat_interleave(repeat_times, 0) + return (fused_weight, fused_bias) + + +class PatchEmbed(_PatchEmbed): + """Image to Patch Embedding. + + Compared with default Patch Embedding(in ViT), Patch Embedding of RepMLP + have ReLu and do not convert output tensor into shape (N, L, C). + + Args: + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_type (str): The type of convolution + to generate patch embedding. Default: "Conv2d". + kernel_size (int): The kernel_size of embedding conv. Default: 16. + stride (int): The slide stride of embedding conv. + Default: 16. + padding (int | tuple | string): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int): The dilation rate of embedding conv. Default: 1. + bias (bool): Bias of embed conv. Default: True. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + input_size (int | tuple | None): The size of input, which will be + used to calculate the out size. Only works when `dynamic_size` + is False. Default: None. + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, *args, **kwargs): + super(PatchEmbed, self).__init__(*args, **kwargs) + self.relu = nn.ReLU() + + def forward(self, x): + """ + Args: + x (Tensor): Has shape (B, C, H, W). In most case, C is 3. + Returns: + tuple: Contains merged results and its spatial shape. + - x (Tensor): The output tensor. + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_h, out_w). + """ + + if self.adaptive_padding: + x = self.adaptive_padding(x) + + x = self.projection(x) + if self.norm is not None: + x = self.norm(x) + x = self.relu(x) + out_size = (x.shape[2], x.shape[3]) + return x, out_size + + +class GlobalPerceptron(SELayer): + """GlobalPerceptron implemented by using ``mmcls.modes.SELayer``. + + Args: + input_channels (int): The number of input (and output) channels + in the GlobalPerceptron. + ratio (int): Squeeze ratio in GlobalPerceptron, the intermediate + channel will be ``make_divisible(channels // ratio, divisor)``. + """ + + def __init__(self, input_channels: int, ratio: int, **kwargs) -> None: + super(GlobalPerceptron, self).__init__( + channels=input_channels, + ratio=ratio, + return_weight=True, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), + **kwargs) + + +class RepMLPBlock(BaseModule): + """Basic RepMLPNet, consists of PartitionPerceptron and GlobalPerceptron. + + Args: + channels (int): The number of input and the output channels of the + block. + path_h (int): The height of patches. + path_w (int): The weidth of patches. + reparam_conv_kernels (Squeue(int) | None): The conv kernels in the + GlobalPerceptron. Default: None. + globalperceptron_ratio (int): The reducation ratio in the + GlobalPerceptron. Default: 4. + num_sharesets (int): The number of sharesets in the + PartitionPerceptron. Default 1. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + deploy (bool): Whether to switch the model structure to + deployment mode. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + channels, + path_h, + path_w, + reparam_conv_kernels=None, + globalperceptron_ratio=4, + num_sharesets=1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + deploy=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.deploy = deploy + self.channels = channels + self.num_sharesets = num_sharesets + self.path_h, self.path_w = path_h, path_w + # the input channel of fc3 + self._path_vec_channles = path_h * path_w * num_sharesets + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.gp = GlobalPerceptron( + input_channels=channels, ratio=globalperceptron_ratio) + + # using a conv layer to implement a fc layer + self.fc3 = build_conv_layer( + conv_cfg, + in_channels=self._path_vec_channles, + out_channels=self._path_vec_channles, + kernel_size=1, + stride=1, + padding=0, + bias=deploy, + groups=num_sharesets) + if deploy: + self.fc3_bn = nn.Identity() + else: + norm_layer = build_norm_layer(norm_cfg, num_sharesets)[1] + self.add_module('fc3_bn', norm_layer) + + self.reparam_conv_kernels = reparam_conv_kernels + if not deploy and reparam_conv_kernels is not None: + for k in reparam_conv_kernels: + conv_branch = ConvModule( + in_channels=num_sharesets, + out_channels=num_sharesets, + kernel_size=k, + stride=1, + padding=k // 2, + norm_cfg=dict(type='BN', requires_grad=True), + groups=num_sharesets, + act_cfg=None) + self.__setattr__('repconv{}'.format(k), conv_branch) + + def partition(self, x, h_parts, w_parts): + # convert (N, C, H, W) to (N, h_parts, w_parts, C, path_h, path_w) + x = x.reshape(-1, self.channels, h_parts, self.path_h, w_parts, + self.path_w) + x = x.permute(0, 2, 4, 1, 3, 5) + return x + + def partition_affine(self, x, h_parts, w_parts): + """perform Partition Perceptron.""" + fc_inputs = x.reshape(-1, self._path_vec_channles, 1, 1) + out = self.fc3(fc_inputs) + out = out.reshape(-1, self.num_sharesets, self.path_h, self.path_w) + out = self.fc3_bn(out) + out = out.reshape(-1, h_parts, w_parts, self.num_sharesets, + self.path_h, self.path_w) + return out + + def forward(self, inputs): + # Global Perceptron + global_vec = self.gp(inputs) + + origin_shape = inputs.size() + h_parts = origin_shape[2] // self.path_h + w_parts = origin_shape[3] // self.path_w + + partitions = self.partition(inputs, h_parts, w_parts) + + # Channel Perceptron + fc3_out = self.partition_affine(partitions, h_parts, w_parts) + + # perform Local Perceptron + if self.reparam_conv_kernels is not None and not self.deploy: + conv_inputs = partitions.reshape(-1, self.num_sharesets, + self.path_h, self.path_w) + conv_out = 0 + for k in self.reparam_conv_kernels: + conv_branch = self.__getattr__('repconv{}'.format(k)) + conv_out += conv_branch(conv_inputs) + conv_out = conv_out.reshape(-1, h_parts, w_parts, + self.num_sharesets, self.path_h, + self.path_w) + fc3_out += conv_out + + # N, h_parts, w_parts, num_sharesets, out_h, out_w + fc3_out = fc3_out.permute(0, 3, 1, 4, 2, 5) + out = fc3_out.reshape(*origin_shape) + out = out * global_vec + return out + + def get_equivalent_fc3(self): + """get the equivalent fc3 weight and bias.""" + fc_weight, fc_bias = fuse_bn(self.fc3, self.fc3_bn) + if self.reparam_conv_kernels is not None: + largest_k = max(self.reparam_conv_kernels) + largest_branch = self.__getattr__('repconv{}'.format(largest_k)) + total_kernel, total_bias = fuse_bn(largest_branch.conv, + largest_branch.bn) + for k in self.reparam_conv_kernels: + if k != largest_k: + k_branch = self.__getattr__('repconv{}'.format(k)) + kernel, bias = fuse_bn(k_branch.conv, k_branch.bn) + total_kernel += F.pad(kernel, [(largest_k - k) // 2] * 4) + total_bias += bias + rep_weight, rep_bias = self._convert_conv_to_fc( + total_kernel, total_bias) + final_fc3_weight = rep_weight.reshape_as(fc_weight) + fc_weight + final_fc3_bias = rep_bias + fc_bias + else: + final_fc3_weight = fc_weight + final_fc3_bias = fc_bias + return final_fc3_weight, final_fc3_bias + + def local_inject(self): + """inject the Local Perceptron into Partition Perceptron.""" + self.deploy = True + # Locality Injection + fc3_weight, fc3_bias = self.get_equivalent_fc3() + # Remove Local Perceptron + if self.reparam_conv_kernels is not None: + for k in self.reparam_conv_kernels: + self.__delattr__('repconv{}'.format(k)) + self.__delattr__('fc3') + self.__delattr__('fc3_bn') + self.fc3 = build_conv_layer( + self.conv_cfg, + self._path_vec_channles, + self._path_vec_channles, + 1, + 1, + 0, + bias=True, + groups=self.num_sharesets) + self.fc3_bn = nn.Identity() + self.fc3.weight.data = fc3_weight + self.fc3.bias.data = fc3_bias + + def _convert_conv_to_fc(self, conv_kernel, conv_bias): + """convert conv_k1 to fc, which is still a conv_k2, and the k2 > k1.""" + in_channels = torch.eye(self.path_h * self.path_w).repeat( + 1, self.num_sharesets).reshape(self.path_h * self.path_w, + self.num_sharesets, self.path_h, + self.path_w).to(conv_kernel.device) + fc_k = F.conv2d( + in_channels, + conv_kernel, + padding=(conv_kernel.size(2) // 2, conv_kernel.size(3) // 2), + groups=self.num_sharesets) + fc_k = fc_k.reshape(self.path_w * self.path_w, self.num_sharesets * + self.path_h * self.path_w).t() + fc_bias = conv_bias.repeat_interleave(self.path_h * self.path_w) + return fc_k, fc_bias + + +class RepMLPNetUnit(BaseModule): + """A basic unit in RepMLPNet : [REPMLPBlock + BN + ConvFFN + BN]. + + Args: + channels (int): The number of input and the output channels of the + unit. + path_h (int): The height of patches. + path_w (int): The weidth of patches. + reparam_conv_kernels (Squeue(int) | None): The conv kernels in the + GlobalPerceptron. Default: None. + globalperceptron_ratio (int): The reducation ratio in the + GlobalPerceptron. Default: 4. + num_sharesets (int): The number of sharesets in the + PartitionPerceptron. Default 1. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + deploy (bool): Whether to switch the model structure to + deployment mode. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + channels, + path_h, + path_w, + reparam_conv_kernels, + globalperceptron_ratio, + norm_cfg=dict(type='BN', requires_grad=True), + ffn_expand=4, + num_sharesets=1, + deploy=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.repmlp_block = RepMLPBlock( + channels=channels, + path_h=path_h, + path_w=path_w, + reparam_conv_kernels=reparam_conv_kernels, + globalperceptron_ratio=globalperceptron_ratio, + num_sharesets=num_sharesets, + deploy=deploy) + self.ffn_block = ConvFFN(channels, channels * ffn_expand) + norm1 = build_norm_layer(norm_cfg, channels)[1] + self.add_module('norm1', norm1) + norm2 = build_norm_layer(norm_cfg, channels)[1] + self.add_module('norm2', norm2) + + def forward(self, x): + y = x + self.repmlp_block(self.norm1(x)) + out = y + self.ffn_block(self.norm2(y)) + return out + + +class ConvFFN(nn.Module): + """ConvFFN implemented by using point-wise convs.""" + + def __init__(self, + in_channels, + hidden_channels=None, + out_channels=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='GELU')): + super().__init__() + out_features = out_channels or in_channels + hidden_features = hidden_channels or in_channels + self.ffn_fc1 = ConvModule( + in_channels=in_channels, + out_channels=hidden_features, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=norm_cfg, + act_cfg=None) + self.ffn_fc2 = ConvModule( + in_channels=hidden_features, + out_channels=out_features, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=norm_cfg, + act_cfg=None) + self.act = build_activation_layer(act_cfg) + + def forward(self, x): + x = self.ffn_fc1(x) + x = self.act(x) + x = self.ffn_fc2(x) + return x + + +@BACKBONES.register_module() +class RepMLPNet(BaseModule): + """RepMLPNet backbone. + + A PyTorch impl of : `RepMLP: Re-parameterizing Convolutions into + Fully-connected Layers for Image Recognition + `_ + + Args: + arch (str | dict): RepMLP architecture. If use string, choose + from 'base' and 'b'. If use dict, it should have below keys: + + - channels (List[int]): Number of blocks in each stage. + - depths (List[int]): The number of blocks in each branch. + - sharesets_nums (List[int]): RepVGG Block that declares + the need to apply group convolution. + + img_size (int | tuple): The size of input image. Defaults: 224. + in_channels (int): Number of input image channels. Default: 3. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 4. + out_indices (Sequence[int]): Output from which stages. + Default: ``(3, )``. + reparam_conv_kernels (Squeue(int) | None): The conv kernels in the + GlobalPerceptron. Default: None. + globalperceptron_ratio (int): The reducation ratio in the + GlobalPerceptron. Default: 4. + num_sharesets (int): The number of sharesets in the + PartitionPerceptron. Default 1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + Default: dict(type='BN', requires_grad=True). + patch_cfg (dict): Extra config dict for patch embedding. + Defaults to an empty dict. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + deploy (bool): Whether to switch the model structure to deployment + mode. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + arch_zoo = { + **dict.fromkeys(['b', 'base'], + {'channels': [96, 192, 384, 768], + 'depths': [2, 2, 12, 2], + 'sharesets_nums': [1, 4, 32, 128]}), + } # yapf: disable + + num_extra_tokens = 0 # there is no cls-token in RepMLP + + def __init__(self, + arch, + img_size=224, + in_channels=3, + patch_size=4, + out_indices=(3, ), + reparam_conv_kernels=(3, ), + globalperceptron_ratio=4, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + patch_cfg=dict(), + final_norm=True, + deploy=False, + init_cfg=None): + super(RepMLPNet, self).__init__(init_cfg=init_cfg) + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = {'channels', 'depths', 'sharesets_nums'} + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}.' + self.arch_settings = arch + + self.img_size = to_2tuple(img_size) + self.patch_size = to_2tuple(patch_size) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.num_stage = len(self.arch_settings['channels']) + for value in self.arch_settings.values(): + assert isinstance(value, list) and len(value) == self.num_stage, ( + 'Length of setting item in arch dict must be type of list and' + ' have the same length.') + + self.channels = self.arch_settings['channels'] + self.depths = self.arch_settings['depths'] + self.sharesets_nums = self.arch_settings['sharesets_nums'] + + _patch_cfg = dict( + in_channels=in_channels, + input_size=self.img_size, + embed_dims=self.channels[0], + conv_type='Conv2d', + kernel_size=self.patch_size, + stride=self.patch_size, + norm_cfg=self.norm_cfg, + bias=False) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + + self.patch_hs = [ + self.patch_resolution[0] // 2**i for i in range(self.num_stage) + ] + self.patch_ws = [ + self.patch_resolution[1] // 2**i for i in range(self.num_stage) + ] + + self.stages = ModuleList() + self.downsample_layers = ModuleList() + for stage_idx in range(self.num_stage): + # make stage layers + _stage_cfg = dict( + channels=self.channels[stage_idx], + path_h=self.patch_hs[stage_idx], + path_w=self.patch_ws[stage_idx], + reparam_conv_kernels=reparam_conv_kernels, + globalperceptron_ratio=globalperceptron_ratio, + norm_cfg=self.norm_cfg, + ffn_expand=4, + num_sharesets=self.sharesets_nums[stage_idx], + deploy=deploy) + stage_blocks = [ + RepMLPNetUnit(**_stage_cfg) + for _ in range(self.depths[stage_idx]) + ] + self.stages.append(Sequential(*stage_blocks)) + + # make downsample layers + if stage_idx < self.num_stage - 1: + self.downsample_layers.append( + ConvModule( + in_channels=self.channels[stage_idx], + out_channels=self.channels[stage_idx + 1], + kernel_size=2, + stride=2, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True)) + + self.out_indice = out_indices + + if final_norm: + norm_layer = build_norm_layer(norm_cfg, self.channels[-1])[1] + else: + norm_layer = nn.Identity() + self.add_module('final_norm', norm_layer) + + def forward(self, x): + assert x.shape[2:] == self.img_size, \ + "The Rep-MLP doesn't support dynamic input shape. " \ + f'Please input images with shape {self.img_size}' + + outs = [] + + x, _ = self.patch_embed(x) + for i, stage in enumerate(self.stages): + x = stage(x) + + # downsample after each stage except last stage + if i < len(self.stages) - 1: + downsample = self.downsample_layers[i] + x = downsample(x) + + if i in self.out_indice: + if self.final_norm and i == len(self.stages) - 1: + out = self.final_norm(x) + else: + out = x + outs.append(out) + + return tuple(outs) + + def switch_to_deploy(self): + for m in self.modules(): + if hasattr(m, 'local_inject'): + m.local_inject() diff --git a/mmcls/models/backbones/repvgg.py b/mmcls/models/backbones/repvgg.py new file mode 100644 index 0000000..bbdbda2 --- /dev/null +++ b/mmcls/models/backbones/repvgg.py @@ -0,0 +1,619 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import (ConvModule, build_activation_layer, build_conv_layer, + build_norm_layer) +from mmcv.runner import BaseModule, Sequential +from mmcv.utils.parrots_wrapper import _BatchNorm +from torch import nn + +from ..builder import BACKBONES +from ..utils.se_layer import SELayer +from .base_backbone import BaseBackbone + + +class RepVGGBlock(BaseModule): + """RepVGG block for RepVGG backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + stride (int): Stride of the 3x3 and 1x1 convolution layer. Default: 1. + padding (int): Padding of the 3x3 convolution layer. + dilation (int): Dilation of the 3x3 convolution layer. + groups (int): Groups of the 3x3 and 1x1 convolution layer. Default: 1. + padding_mode (str): Padding mode of the 3x3 convolution layer. + Default: 'zeros'. + se_cfg (None or dict): The configuration of the se module. + Default: None. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + deploy (bool): Whether to switch the model structure to + deployment mode. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + padding=1, + dilation=1, + groups=1, + padding_mode='zeros', + se_cfg=None, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + deploy=False, + init_cfg=None): + super(RepVGGBlock, self).__init__(init_cfg) + + assert se_cfg is None or isinstance(se_cfg, dict) + + self.in_channels = in_channels + self.out_channels = out_channels + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.se_cfg = se_cfg + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.deploy = deploy + + if deploy: + self.branch_reparam = build_conv_layer( + conv_cfg, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=True, + padding_mode=padding_mode) + else: + # judge if input shape and output shape are the same. + # If true, add a normalized identity shortcut. + if out_channels == in_channels and stride == 1 and \ + padding == dilation: + self.branch_norm = build_norm_layer(norm_cfg, in_channels)[1] + else: + self.branch_norm = None + + self.branch_3x3 = self.create_conv_bn( + kernel_size=3, + dilation=dilation, + padding=padding, + ) + self.branch_1x1 = self.create_conv_bn(kernel_size=1) + + if se_cfg is not None: + self.se_layer = SELayer(channels=out_channels, **se_cfg) + else: + self.se_layer = None + + self.act = build_activation_layer(act_cfg) + + def create_conv_bn(self, kernel_size, dilation=1, padding=0): + conv_bn = Sequential() + conv_bn.add_module( + 'conv', + build_conv_layer( + self.conv_cfg, + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=kernel_size, + stride=self.stride, + dilation=dilation, + padding=padding, + groups=self.groups, + bias=False)) + conv_bn.add_module( + 'norm', + build_norm_layer(self.norm_cfg, num_features=self.out_channels)[1]) + + return conv_bn + + def forward(self, x): + + def _inner_forward(inputs): + if self.deploy: + return self.branch_reparam(inputs) + + if self.branch_norm is None: + branch_norm_out = 0 + else: + branch_norm_out = self.branch_norm(inputs) + + inner_out = self.branch_3x3(inputs) + self.branch_1x1( + inputs) + branch_norm_out + + if self.se_cfg is not None: + inner_out = self.se_layer(inner_out) + + return inner_out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.act(out) + + return out + + def switch_to_deploy(self): + """Switch the model structure from training mode to deployment mode.""" + if self.deploy: + return + assert self.norm_cfg['type'] == 'BN', \ + "Switch is not allowed when norm_cfg['type'] != 'BN'." + + reparam_weight, reparam_bias = self.reparameterize() + self.branch_reparam = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.out_channels, + kernel_size=3, + stride=self.stride, + padding=self.padding, + dilation=self.dilation, + groups=self.groups, + bias=True) + self.branch_reparam.weight.data = reparam_weight + self.branch_reparam.bias.data = reparam_bias + + for param in self.parameters(): + param.detach_() + delattr(self, 'branch_3x3') + delattr(self, 'branch_1x1') + delattr(self, 'branch_norm') + + self.deploy = True + + def reparameterize(self): + """Fuse all the parameters of all branches. + + Returns: + tuple[torch.Tensor, torch.Tensor]: Parameters after fusion of all + branches. the first element is the weights and the second is + the bias. + """ + weight_3x3, bias_3x3 = self._fuse_conv_bn(self.branch_3x3) + weight_1x1, bias_1x1 = self._fuse_conv_bn(self.branch_1x1) + # pad a conv1x1 weight to a conv3x3 weight + weight_1x1 = F.pad(weight_1x1, [1, 1, 1, 1], value=0) + + weight_norm, bias_norm = 0, 0 + if self.branch_norm: + tmp_conv_bn = self._norm_to_conv3x3(self.branch_norm) + weight_norm, bias_norm = self._fuse_conv_bn(tmp_conv_bn) + + return (weight_3x3 + weight_1x1 + weight_norm, + bias_3x3 + bias_1x1 + bias_norm) + + def _fuse_conv_bn(self, branch): + """Fuse the parameters in a branch with a conv and bn. + + Args: + branch (mmcv.runner.Sequential): A branch with conv and bn. + + Returns: + tuple[torch.Tensor, torch.Tensor]: The parameters obtained after + fusing the parameters of conv and bn in one branch. + The first element is the weight and the second is the bias. + """ + if branch is None: + return 0, 0 + conv_weight = branch.conv.weight + running_mean = branch.norm.running_mean + running_var = branch.norm.running_var + gamma = branch.norm.weight + beta = branch.norm.bias + eps = branch.norm.eps + + std = (running_var + eps).sqrt() + fused_weight = (gamma / std).reshape(-1, 1, 1, 1) * conv_weight + fused_bias = -running_mean * gamma / std + beta + + return fused_weight, fused_bias + + def _norm_to_conv3x3(self, branch_norm): + """Convert a norm layer to a conv3x3-bn sequence. + + Args: + branch (nn.BatchNorm2d): A branch only with bn in the block. + + Returns: + tmp_conv3x3 (mmcv.runner.Sequential): a sequential with conv3x3 and + bn. + """ + input_dim = self.in_channels // self.groups + conv_weight = torch.zeros((self.in_channels, input_dim, 3, 3), + dtype=branch_norm.weight.dtype) + + for i in range(self.in_channels): + conv_weight[i, i % input_dim, 1, 1] = 1 + conv_weight = conv_weight.to(branch_norm.weight.device) + + tmp_conv3x3 = self.create_conv_bn(kernel_size=3) + tmp_conv3x3.conv.weight.data = conv_weight + tmp_conv3x3.norm = branch_norm + return tmp_conv3x3 + + +class MTSPPF(nn.Module): + """MTSPPF block for YOLOX-PAI RepVGG backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + kernel_size (int): Kernel size of pooling. Default: 5. + """ + + def __init__(self, + in_channels, + out_channels, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + kernel_size=5): + super().__init__() + hidden_features = in_channels // 2 # hidden channels + self.conv1 = ConvModule( + in_channels, + hidden_features, + 1, + stride=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv2 = ConvModule( + hidden_features * 4, + out_channels, + 1, + stride=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.maxpool = nn.MaxPool2d( + kernel_size=kernel_size, stride=1, padding=kernel_size // 2) + + def forward(self, x): + x = self.conv1(x) + y1 = self.maxpool(x) + y2 = self.maxpool(y1) + return self.conv2(torch.cat([x, y1, y2, self.maxpool(y2)], 1)) + + +@BACKBONES.register_module() +class RepVGG(BaseBackbone): + """RepVGG backbone. + + A PyTorch impl of : `RepVGG: Making VGG-style ConvNets Great Again + `_ + + Args: + arch (str | dict): RepVGG architecture. If use string, + choose from 'A0', 'A1`', 'A2', 'B0', 'B1', 'B1g2', 'B1g4', 'B2' + , 'B2g2', 'B2g4', 'B3', 'B3g2', 'B3g4' or 'D2se'. If use dict, + it should have below keys: + + - num_blocks (Sequence[int]): Number of blocks in each stage. + - width_factor (Sequence[float]): Width deflator in each stage. + - group_layer_map (dict | None): RepVGG Block that declares + the need to apply group convolution. + - se_cfg (dict | None): Se Layer config. + - stem_channels (int, optional): The stem channels, the final + stem channels will be + ``min(stem_channels, base_channels*width_factor[0])``. + If not set here, 64 is used by default in the code. + + in_channels (int): Number of input image channels. Default: 3. + base_channels (int): Base channels of RepVGG backbone, work with + width_factor together. Defaults to 64. + out_indices (Sequence[int]): Output from which stages. Default: (3, ). + strides (Sequence[int]): Strides of the first block of each stage. + Default: (2, 2, 2, 2). + dilations (Sequence[int]): Dilation of each stage. + Default: (1, 1, 1, 1). + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + deploy (bool): Whether to switch the model structure to deployment + mode. Default: False. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + add_ppf (bool): Whether to use the MTSPPF block. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + groupwise_layers = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26] + g2_layer_map = {layer: 2 for layer in groupwise_layers} + g4_layer_map = {layer: 4 for layer in groupwise_layers} + + arch_settings = { + 'A0': + dict( + num_blocks=[2, 4, 14, 1], + width_factor=[0.75, 0.75, 0.75, 2.5], + group_layer_map=None, + se_cfg=None), + 'A1': + dict( + num_blocks=[2, 4, 14, 1], + width_factor=[1, 1, 1, 2.5], + group_layer_map=None, + se_cfg=None), + 'A2': + dict( + num_blocks=[2, 4, 14, 1], + width_factor=[1.5, 1.5, 1.5, 2.75], + group_layer_map=None, + se_cfg=None), + 'B0': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[1, 1, 1, 2.5], + group_layer_map=None, + se_cfg=None, + stem_channels=64), + 'B1': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2, 2, 2, 4], + group_layer_map=None, + se_cfg=None), + 'B1g2': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2, 2, 2, 4], + group_layer_map=g2_layer_map, + se_cfg=None), + 'B1g4': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2, 2, 2, 4], + group_layer_map=g4_layer_map, + se_cfg=None), + 'B2': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2.5, 2.5, 2.5, 5], + group_layer_map=None, + se_cfg=None), + 'B2g2': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2.5, 2.5, 2.5, 5], + group_layer_map=g2_layer_map, + se_cfg=None), + 'B2g4': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2.5, 2.5, 2.5, 5], + group_layer_map=g4_layer_map, + se_cfg=None), + 'B3': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[3, 3, 3, 5], + group_layer_map=None, + se_cfg=None), + 'B3g2': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[3, 3, 3, 5], + group_layer_map=g2_layer_map, + se_cfg=None), + 'B3g4': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[3, 3, 3, 5], + group_layer_map=g4_layer_map, + se_cfg=None), + 'D2se': + dict( + num_blocks=[8, 14, 24, 1], + width_factor=[2.5, 2.5, 2.5, 5], + group_layer_map=None, + se_cfg=dict(ratio=16, divisor=1)), + 'yolox-pai-small': + dict( + num_blocks=[3, 5, 7, 3], + width_factor=[1, 1, 1, 1], + group_layer_map=None, + se_cfg=None, + stem_channels=32), + } + + def __init__(self, + arch, + in_channels=3, + base_channels=64, + out_indices=(3, ), + strides=(2, 2, 2, 2), + dilations=(1, 1, 1, 1), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + deploy=False, + norm_eval=False, + add_ppf=False, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + super(RepVGG, self).__init__(init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'"arch": "{arch}" is not one of the arch_settings' + arch = self.arch_settings[arch] + elif not isinstance(arch, dict): + raise TypeError('Expect "arch" to be either a string ' + f'or a dict, got {type(arch)}') + + assert len(arch['num_blocks']) == len( + arch['width_factor']) == len(strides) == len(dilations) + assert max(out_indices) < len(arch['num_blocks']) + if arch['group_layer_map'] is not None: + assert max(arch['group_layer_map'].keys()) <= sum( + arch['num_blocks']) + + if arch['se_cfg'] is not None: + assert isinstance(arch['se_cfg'], dict) + + self.base_channels = base_channels + self.arch = arch + self.in_channels = in_channels + self.out_indices = out_indices + self.strides = strides + self.dilations = dilations + self.deploy = deploy + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + + # defaults to 64 to prevert BC-breaking if stem_channels + # not in arch dict; + # the stem channels should not be larger than that of stage1. + channels = min( + arch.get('stem_channels', 64), + int(self.base_channels * self.arch['width_factor'][0])) + self.stem = RepVGGBlock( + self.in_channels, + channels, + stride=2, + se_cfg=arch['se_cfg'], + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + deploy=deploy) + + next_create_block_idx = 1 + self.stages = [] + for i in range(len(arch['num_blocks'])): + num_blocks = self.arch['num_blocks'][i] + stride = self.strides[i] + dilation = self.dilations[i] + out_channels = int(self.base_channels * 2**i * + self.arch['width_factor'][i]) + + stage, next_create_block_idx = self._make_stage( + channels, out_channels, num_blocks, stride, dilation, + next_create_block_idx, init_cfg) + stage_name = f'stage_{i + 1}' + self.add_module(stage_name, stage) + self.stages.append(stage_name) + + channels = out_channels + + if add_ppf: + self.ppf = MTSPPF( + out_channels, + out_channels, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + kernel_size=5) + else: + self.ppf = None + + def _make_stage(self, in_channels, out_channels, num_blocks, stride, + dilation, next_create_block_idx, init_cfg): + strides = [stride] + [1] * (num_blocks - 1) + dilations = [dilation] * num_blocks + + blocks = [] + for i in range(num_blocks): + groups = self.arch['group_layer_map'].get( + next_create_block_idx, + 1) if self.arch['group_layer_map'] is not None else 1 + blocks.append( + RepVGGBlock( + in_channels, + out_channels, + stride=strides[i], + padding=dilations[i], + dilation=dilations[i], + groups=groups, + se_cfg=self.arch['se_cfg'], + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + deploy=self.deploy, + init_cfg=init_cfg)) + in_channels = out_channels + next_create_block_idx += 1 + + return Sequential(*blocks), next_create_block_idx + + def forward(self, x): + x = self.stem(x) + outs = [] + for i, stage_name in enumerate(self.stages): + stage = getattr(self, stage_name) + x = stage(x) + if i + 1 == len(self.stages) and self.ppf is not None: + x = self.ppf(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + for i in range(self.frozen_stages): + stage = getattr(self, f'stage_{i+1}') + stage.eval() + for param in stage.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(RepVGG, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + def switch_to_deploy(self): + for m in self.modules(): + if isinstance(m, RepVGGBlock): + m.switch_to_deploy() + self.deploy = True diff --git a/mmcls/models/backbones/res2net.py b/mmcls/models/backbones/res2net.py new file mode 100644 index 0000000..491b6f4 --- /dev/null +++ b/mmcls/models/backbones/res2net.py @@ -0,0 +1,306 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.runner import ModuleList, Sequential + +from ..builder import BACKBONES +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNet + + +class Bottle2neck(_Bottleneck): + expansion = 4 + + def __init__(self, + in_channels, + out_channels, + scales=4, + base_width=26, + base_channels=64, + stage_type='normal', + **kwargs): + """Bottle2neck block for Res2Net.""" + super(Bottle2neck, self).__init__(in_channels, out_channels, **kwargs) + assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.' + + mid_channels = out_channels // self.expansion + width = int(math.floor(mid_channels * (base_width / base_channels))) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width * scales, postfix=1) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + width * scales, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + + if stage_type == 'stage': + self.pool = nn.AvgPool2d( + kernel_size=3, stride=self.conv2_stride, padding=1) + + self.convs = ModuleList() + self.bns = ModuleList() + for i in range(scales - 1): + self.convs.append( + build_conv_layer( + self.conv_cfg, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + bias=False)) + self.bns.append( + build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) + + self.conv3 = build_conv_layer( + self.conv_cfg, + width * scales, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.stage_type = stage_type + self.scales = scales + self.width = width + delattr(self, 'conv2') + delattr(self, self.norm2_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + sp = self.convs[0](spx[0].contiguous()) + sp = self.relu(self.bns[0](sp)) + out = sp + for i in range(1, self.scales - 1): + if self.stage_type == 'stage': + sp = spx[i] + else: + sp = sp + spx[i] + sp = self.convs[i](sp.contiguous()) + sp = self.relu(self.bns[i](sp)) + out = torch.cat((out, sp), 1) + + if self.stage_type == 'normal' and self.scales != 1: + out = torch.cat((out, spx[self.scales - 1]), 1) + elif self.stage_type == 'stage' and self.scales != 1: + out = torch.cat((out, self.pool(spx[self.scales - 1])), 1) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Res2Layer(Sequential): + """Res2Layer to build Res2Net style backbone. + + Args: + block (nn.Module): block used to build ResLayer. + inplanes (int): inplanes of block. + planes (int): planes of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottle2neck. Defaults to True. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + scales (int): Scales used in Res2Net. Default: 4 + base_width (int): Basic width of each scale. Default: 26 + """ + + def __init__(self, + block, + in_channels, + out_channels, + num_blocks, + stride=1, + avg_down=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + scales=4, + base_width=26, + **kwargs): + self.block = block + + downsample = None + if stride != 1 or in_channels != out_channels: + if avg_down: + downsample = nn.Sequential( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False), + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=1, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1], + ) + else: + downsample = nn.Sequential( + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1], + ) + + layers = [] + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + scales=scales, + base_width=base_width, + stage_type='stage', + **kwargs)) + in_channels = out_channels + for _ in range(1, num_blocks): + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + scales=scales, + base_width=base_width, + **kwargs)) + super(Res2Layer, self).__init__(*layers) + + +@BACKBONES.register_module() +class Res2Net(ResNet): + """Res2Net backbone. + + A PyTorch implement of : `Res2Net: A New Multi-scale Backbone + Architecture `_ + + Args: + depth (int): Depth of Res2Net, choose from {50, 101, 152}. + scales (int): Scales used in Res2Net. Defaults to 4. + base_width (int): Basic width of each scale. Defaults to 26. + in_channels (int): Number of input image channels. Defaults to 3. + num_stages (int): Number of Res2Net stages. Defaults to 4. + strides (Sequence[int]): Strides of the first block of each stage. + Defaults to ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Defaults to ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. + Defaults to ``(3, )``. + style (str): "pytorch" or "caffe". If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. Defaults to "pytorch". + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Defaults to True. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottle2neck. Defaults to True. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to ``dict(type='BN', requires_grad=True)``. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Defaults to True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + + Example: + >>> from mmcls.models import Res2Net + >>> import torch + >>> model = Res2Net(depth=50, + ... scales=4, + ... base_width=26, + ... out_indices=(0, 1, 2, 3)) + >>> model.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = model.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 8, 8) + (1, 512, 4, 4) + (1, 1024, 2, 2) + (1, 2048, 1, 1) + """ + + arch_settings = { + 50: (Bottle2neck, (3, 4, 6, 3)), + 101: (Bottle2neck, (3, 4, 23, 3)), + 152: (Bottle2neck, (3, 8, 36, 3)) + } + + def __init__(self, + scales=4, + base_width=26, + style='pytorch', + deep_stem=True, + avg_down=True, + init_cfg=None, + **kwargs): + self.scales = scales + self.base_width = base_width + super(Res2Net, self).__init__( + style=style, + deep_stem=deep_stem, + avg_down=avg_down, + init_cfg=init_cfg, + **kwargs) + + def make_res_layer(self, **kwargs): + return Res2Layer( + scales=self.scales, + base_width=self.base_width, + base_channels=self.base_channels, + **kwargs) diff --git a/mmcls/models/backbones/resnest.py b/mmcls/models/backbones/resnest.py new file mode 100644 index 0000000..0a82398 --- /dev/null +++ b/mmcls/models/backbones/resnest.py @@ -0,0 +1,339 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResLayer, ResNetV1d + + +class RSoftmax(nn.Module): + """Radix Softmax module in ``SplitAttentionConv2d``. + + Args: + radix (int): Radix of input. + groups (int): Groups of input. + """ + + def __init__(self, radix, groups): + super().__init__() + self.radix = radix + self.groups = groups + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttentionConv2d(nn.Module): + """Split-Attention Conv2d. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int | tuple[int]): Same as nn.Conv2d. + stride (int | tuple[int]): Same as nn.Conv2d. + padding (int | tuple[int]): Same as nn.Conv2d. + dilation (int | tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + """ + + def __init__(self, + in_channels, + channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + radix=2, + reduction_factor=4, + conv_cfg=None, + norm_cfg=dict(type='BN')): + super(SplitAttentionConv2d, self).__init__() + inter_channels = max(in_channels * radix // reduction_factor, 32) + self.radix = radix + self.groups = groups + self.channels = channels + self.conv = build_conv_layer( + conv_cfg, + in_channels, + channels * radix, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups * radix, + bias=False) + self.norm0_name, norm0 = build_norm_layer( + norm_cfg, channels * radix, postfix=0) + self.add_module(self.norm0_name, norm0) + self.relu = nn.ReLU(inplace=True) + self.fc1 = build_conv_layer( + None, channels, inter_channels, 1, groups=self.groups) + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, inter_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.fc2 = build_conv_layer( + None, inter_channels, channels * radix, 1, groups=self.groups) + self.rsoftmax = RSoftmax(radix, groups) + + @property + def norm0(self): + return getattr(self, self.norm0_name) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def forward(self, x): + x = self.conv(x) + x = self.norm0(x) + x = self.relu(x) + + batch, rchannel = x.shape[:2] + if self.radix > 1: + splits = x.view(batch, self.radix, -1, *x.shape[2:]) + gap = splits.sum(dim=1) + else: + gap = x + gap = F.adaptive_avg_pool2d(gap, 1) + gap = self.fc1(gap) + + gap = self.norm1(gap) + gap = self.relu(gap) + + atten = self.fc2(gap) + atten = self.rsoftmax(atten).view(batch, -1, 1, 1) + + if self.radix > 1: + attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) + out = torch.sum(attens * splits, dim=1) + else: + out = atten * x + return out.contiguous() + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeSt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + def __init__(self, + in_channels, + out_channels, + groups=1, + width_per_group=4, + base_channels=64, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + super(Bottleneck, self).__init__(in_channels, out_channels, **kwargs) + + self.groups = groups + self.width_per_group = width_per_group + + # For ResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for ResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = SplitAttentionConv2d( + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=1 if self.avg_down_stride else self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + radix=radix, + reduction_factor=reduction_factor, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + delattr(self, self.norm2_name) + + if self.avg_down_stride: + self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) + + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + + if self.avg_down_stride: + out = self.avd_layer(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@BACKBONES.register_module() +class ResNeSt(ResNetV1d): + """ResNeSt backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152, 200}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)), + 200: (Bottleneck, (3, 24, 36, 3)), + 269: (Bottleneck, (3, 30, 48, 8)) + } + + def __init__(self, + depth, + groups=1, + width_per_group=4, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + self.groups = groups + self.width_per_group = width_per_group + self.radix = radix + self.reduction_factor = reduction_factor + self.avg_down_stride = avg_down_stride + super(ResNeSt, self).__init__(depth=depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + radix=self.radix, + reduction_factor=self.reduction_factor, + avg_down_stride=self.avg_down_stride, + **kwargs) diff --git a/mmcls/models/backbones/resnet.py b/mmcls/models/backbones/resnet.py new file mode 100644 index 0000000..d01ebe0 --- /dev/null +++ b/mmcls/models/backbones/resnet.py @@ -0,0 +1,688 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import (ConvModule, build_activation_layer, build_conv_layer, + build_norm_layer, constant_init) +from mmcv.cnn.bricks import DropPath +from mmcv.runner import BaseModule +from mmcv.utils.parrots_wrapper import _BatchNorm + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + +eps = 1.0e-5 + + +class BasicBlock(BaseModule): + """BasicBlock for ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the output channels of conv1. This is a + reserved argument in BasicBlock and should always be 1. Default: 1. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None. + style (str): `pytorch` or `caffe`. It is unused and reserved for + unified API with Bottleneck. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + """ + + def __init__(self, + in_channels, + out_channels, + expansion=1, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + drop_path_rate=0.0, + act_cfg=dict(type='ReLU', inplace=True), + init_cfg=None): + super(BasicBlock, self).__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert self.expansion == 1 + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, out_channels, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + 3, + padding=1, + bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = build_activation_layer(act_cfg) + self.downsample = downsample + self.drop_path = DropPath(drop_prob=drop_path_rate + ) if drop_path_rate > eps else nn.Identity() + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out = self.drop_path(out) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Bottleneck(BaseModule): + """Bottleneck block for ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the input/output channels of conv2. Default: 4. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None. + style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the + stride-two layer is the 3x3 conv layer, otherwise the stride-two + layer is the first 1x1 conv layer. Default: "pytorch". + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + """ + + def __init__(self, + in_channels, + out_channels, + expansion=4, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU', inplace=True), + drop_path_rate=0.0, + init_cfg=None): + super(Bottleneck, self).__init__(init_cfg=init_cfg) + assert style in ['pytorch', 'caffe'] + + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, out_channels, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = build_activation_layer(act_cfg) + self.downsample = downsample + self.drop_path = DropPath(drop_prob=drop_path_rate + ) if drop_path_rate > eps else nn.Identity() + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + @property + def norm3(self): + return getattr(self, self.norm3_name) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out = self.drop_path(out) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def get_expansion(block, expansion=None): + """Get the expansion of a residual block. + + The block expansion will be obtained by the following order: + + 1. If ``expansion`` is given, just return it. + 2. If ``block`` has the attribute ``expansion``, then return + ``block.expansion``. + 3. Return the default value according the the block type: + 1 for ``BasicBlock`` and 4 for ``Bottleneck``. + + Args: + block (class): The block class. + expansion (int | None): The given expansion ratio. + + Returns: + int: The expansion of the block. + """ + if isinstance(expansion, int): + assert expansion > 0 + elif expansion is None: + if hasattr(block, 'expansion'): + expansion = block.expansion + elif issubclass(block, BasicBlock): + expansion = 1 + elif issubclass(block, Bottleneck): + expansion = 4 + else: + raise TypeError(f'expansion is not specified for {block.__name__}') + else: + raise TypeError('expansion must be an integer or None') + + return expansion + + +class ResLayer(nn.Sequential): + """ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): Residual block used to build ResLayer. + num_blocks (int): Number of blocks. + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int, optional): The expansion for BasicBlock/Bottleneck. + If not specified, it will firstly be obtained via + ``block.expansion``. If the block has no attribute "expansion", + the following default values will be used: 1 for BasicBlock and + 4 for Bottleneck. Default: None. + stride (int): stride of the first block. Default: 1. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + """ + + def __init__(self, + block, + num_blocks, + in_channels, + out_channels, + expansion=None, + stride=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + **kwargs): + self.block = block + self.expansion = get_expansion(block, expansion) + + downsample = None + if stride != 1 or in_channels != out_channels: + downsample = [] + conv_stride = stride + if avg_down and stride != 1: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + in_channels = out_channels + for i in range(1, num_blocks): + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + super(ResLayer, self).__init__(*layers) + + +@BACKBONES.register_module() +class ResNet(BaseBackbone): + """ResNet backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + base_channels (int): Middle channels of the first stage. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. + Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + + Example: + >>> from mmcls.models import ResNet + >>> import torch + >>> self = ResNet(depth=18) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + stem_channels=64, + base_channels=64, + expansion=None, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(3, ), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ], + drop_path_rate=0.0): + super(ResNet, self).__init__(init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + self.depth = depth + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.expansion = get_expansion(self.block, expansion) + + self._make_stem_layer(in_channels, stem_channels) + + self.res_layers = [] + _in_channels = stem_channels + _out_channels = base_channels * self.expansion + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + res_layer = self.make_res_layer( + block=self.block, + num_blocks=num_blocks, + in_channels=_in_channels, + out_channels=_out_channels, + expansion=self.expansion, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + drop_path_rate=drop_path_rate) + _in_channels = _out_channels + _out_channels *= 2 + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = res_layer[-1].out_channels + + def make_res_layer(self, **kwargs): + return ResLayer(**kwargs) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels): + if self.deep_stem: + self.stem = nn.Sequential( + ConvModule( + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + super(ResNet, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress zero_init_residual if use pretrained model. + return + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + + def forward(self, x): + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +@BACKBONES.register_module() +class ResNetV1c(ResNet): + """ResNetV1c backbone. + + This variant is described in `Bag of Tricks. + `_. + + Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv + in the input stem with three 3x3 convs. + """ + + def __init__(self, **kwargs): + super(ResNetV1c, self).__init__( + deep_stem=True, avg_down=False, **kwargs) + + +@BACKBONES.register_module() +class ResNetV1d(ResNet): + """ResNetV1d backbone. + + This variant is described in `Bag of Tricks. + `_. + + Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in + the input stem with three 3x3 convs. And in the downsampling block, a 2x2 + avg_pool with stride 2 is added before conv, whose stride is changed to 1. + """ + + def __init__(self, **kwargs): + super(ResNetV1d, self).__init__( + deep_stem=True, avg_down=True, **kwargs) diff --git a/mmcls/models/backbones/resnet_cifar.py b/mmcls/models/backbones/resnet_cifar.py new file mode 100644 index 0000000..54b8a48 --- /dev/null +++ b/mmcls/models/backbones/resnet_cifar.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from .resnet import ResNet + + +@BACKBONES.register_module() +class ResNet_CIFAR(ResNet): + """ResNet backbone for CIFAR. + + Compared to standard ResNet, it uses `kernel_size=3` and `stride=1` in + conv1, and does not apply MaxPoolinng after stem. It has been proven to + be more efficient than standard ResNet in other public codebase, e.g., + `https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py`. + + Args: + depth (int): Network depth, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + base_channels (int): Middle channels of the first stage. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): This network has specific designed stem, thus it is + asserted to be False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + def __init__(self, depth, deep_stem=False, **kwargs): + super(ResNet_CIFAR, self).__init__( + depth, deep_stem=deep_stem, **kwargs) + assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) diff --git a/mmcls/models/backbones/resnext.py b/mmcls/models/backbones/resnext.py new file mode 100644 index 0000000..2370b71 --- /dev/null +++ b/mmcls/models/backbones/resnext.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResLayer, ResNet + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeXt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + def __init__(self, + in_channels, + out_channels, + base_channels=64, + groups=32, + width_per_group=4, + **kwargs): + super(Bottleneck, self).__init__(in_channels, out_channels, **kwargs) + self.groups = groups + self.width_per_group = width_per_group + + # For ResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for ResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@BACKBONES.register_module() +class ResNeXt(ResNet): + """ResNeXt backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, groups=32, width_per_group=4, **kwargs): + self.groups = groups + self.width_per_group = width_per_group + super(ResNeXt, self).__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + **kwargs) diff --git a/mmcls/models/backbones/seresnet.py b/mmcls/models/backbones/seresnet.py new file mode 100644 index 0000000..0cfc5d1 --- /dev/null +++ b/mmcls/models/backbones/seresnet.py @@ -0,0 +1,125 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.utils.checkpoint as cp + +from ..builder import BACKBONES +from ..utils.se_layer import SELayer +from .resnet import Bottleneck, ResLayer, ResNet + + +class SEBottleneck(Bottleneck): + """SEBottleneck block for SEResNet. + + Args: + in_channels (int): The input channels of the SEBottleneck block. + out_channels (int): The output channel of the SEBottleneck block. + se_ratio (int): Squeeze ratio in SELayer. Default: 16 + """ + + def __init__(self, in_channels, out_channels, se_ratio=16, **kwargs): + super(SEBottleneck, self).__init__(in_channels, out_channels, **kwargs) + self.se_layer = SELayer(out_channels, ratio=se_ratio) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + out = self.se_layer(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@BACKBONES.register_module() +class SEResNet(ResNet): + """SEResNet backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + se_ratio (int): Squeeze ratio in SELayer. Default: 16. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + + Example: + >>> from mmcls.models import SEResNet + >>> import torch + >>> self = SEResNet(depth=50) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 224, 224) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 56, 56) + (1, 128, 28, 28) + (1, 256, 14, 14) + (1, 512, 7, 7) + """ + + arch_settings = { + 50: (SEBottleneck, (3, 4, 6, 3)), + 101: (SEBottleneck, (3, 4, 23, 3)), + 152: (SEBottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, se_ratio=16, **kwargs): + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for SEResNet') + self.se_ratio = se_ratio + super(SEResNet, self).__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer(se_ratio=self.se_ratio, **kwargs) diff --git a/mmcls/models/backbones/seresnext.py b/mmcls/models/backbones/seresnext.py new file mode 100644 index 0000000..aff5cb4 --- /dev/null +++ b/mmcls/models/backbones/seresnext.py @@ -0,0 +1,155 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from .resnet import ResLayer +from .seresnet import SEBottleneck as _SEBottleneck +from .seresnet import SEResNet + + +class SEBottleneck(_SEBottleneck): + """SEBottleneck block for SEResNeXt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + base_channels (int): Middle channels of the first stage. Default: 64. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None + se_ratio (int): Squeeze ratio in SELayer. Default: 16 + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + def __init__(self, + in_channels, + out_channels, + base_channels=64, + groups=32, + width_per_group=4, + se_ratio=16, + **kwargs): + super(SEBottleneck, self).__init__(in_channels, out_channels, se_ratio, + **kwargs) + self.groups = groups + self.width_per_group = width_per_group + + # We follow the same rational of ResNext to compute mid_channels. + # For SEResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for SEResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@BACKBONES.register_module() +class SEResNeXt(SEResNet): + """SEResNeXt backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + se_ratio (int): Squeeze ratio in SELayer. Default: 16. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + arch_settings = { + 50: (SEBottleneck, (3, 4, 6, 3)), + 101: (SEBottleneck, (3, 4, 23, 3)), + 152: (SEBottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, groups=32, width_per_group=4, **kwargs): + self.groups = groups + self.width_per_group = width_per_group + super(SEResNeXt, self).__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + **kwargs) diff --git a/mmcls/models/backbones/shufflenet_v1.py b/mmcls/models/backbones/shufflenet_v1.py new file mode 100644 index 0000000..0b6c70f --- /dev/null +++ b/mmcls/models/backbones/shufflenet_v1.py @@ -0,0 +1,321 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import (ConvModule, build_activation_layer, constant_init, + normal_init) +from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.utils import channel_shuffle, make_divisible +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +class ShuffleUnit(BaseModule): + """ShuffleUnit block. + + ShuffleNet unit with pointwise group convolution (GConv) and channel + shuffle. + + Args: + in_channels (int): The input channels of the ShuffleUnit. + out_channels (int): The output channels of the ShuffleUnit. + groups (int): The number of groups to be used in grouped 1x1 + convolutions in each ShuffleUnit. Default: 3 + first_block (bool): Whether it is the first ShuffleUnit of a + sequential ShuffleUnits. Default: True, which means not using the + grouped 1x1 convolution. + combine (str): The ways to combine the input and output + branches. Default: 'add'. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + groups=3, + first_block=True, + combine='add', + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False): + super(ShuffleUnit, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.first_block = first_block + self.combine = combine + self.groups = groups + self.bottleneck_channels = self.out_channels // 4 + self.with_cp = with_cp + + if self.combine == 'add': + self.depthwise_stride = 1 + self._combine_func = self._add + assert in_channels == out_channels, ( + 'in_channels must be equal to out_channels when combine ' + 'is add') + elif self.combine == 'concat': + self.depthwise_stride = 2 + self._combine_func = self._concat + self.out_channels -= self.in_channels + self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) + else: + raise ValueError(f'Cannot combine tensors with {self.combine}. ' + 'Only "add" and "concat" are supported') + + self.first_1x1_groups = 1 if first_block else self.groups + self.g_conv_1x1_compress = ConvModule( + in_channels=self.in_channels, + out_channels=self.bottleneck_channels, + kernel_size=1, + groups=self.first_1x1_groups, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.depthwise_conv3x3_bn = ConvModule( + in_channels=self.bottleneck_channels, + out_channels=self.bottleneck_channels, + kernel_size=3, + stride=self.depthwise_stride, + padding=1, + groups=self.bottleneck_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + self.g_conv_1x1_expand = ConvModule( + in_channels=self.bottleneck_channels, + out_channels=self.out_channels, + kernel_size=1, + groups=self.groups, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + self.act = build_activation_layer(act_cfg) + + @staticmethod + def _add(x, out): + # residual connection + return x + out + + @staticmethod + def _concat(x, out): + # concatenate along channel axis + return torch.cat((x, out), 1) + + def forward(self, x): + + def _inner_forward(x): + residual = x + + out = self.g_conv_1x1_compress(x) + out = self.depthwise_conv3x3_bn(out) + + if self.groups > 1: + out = channel_shuffle(out, self.groups) + + out = self.g_conv_1x1_expand(out) + + if self.combine == 'concat': + residual = self.avgpool(residual) + out = self.act(out) + out = self._combine_func(residual, out) + else: + out = self._combine_func(residual, out) + out = self.act(out) + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@BACKBONES.register_module() +class ShuffleNetV1(BaseBackbone): + """ShuffleNetV1 backbone. + + Args: + groups (int): The number of groups to be used in grouped 1x1 + convolutions in each ShuffleUnit. Default: 3. + widen_factor (float): Width multiplier - adjusts the number + of channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int]): Output from which stages. + Default: (2, ) + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + groups=3, + widen_factor=1.0, + out_indices=(2, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + norm_eval=False, + with_cp=False, + init_cfg=None): + super(ShuffleNetV1, self).__init__(init_cfg) + self.init_cfg = init_cfg + self.stage_blocks = [4, 8, 4] + self.groups = groups + + for index in out_indices: + if index not in range(0, 3): + raise ValueError('the item in out_indices must in ' + f'range(0, 3). But received {index}') + + if frozen_stages not in range(-1, 3): + raise ValueError('frozen_stages must be in range(-1, 3). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + if groups == 1: + channels = (144, 288, 576) + elif groups == 2: + channels = (200, 400, 800) + elif groups == 3: + channels = (240, 480, 960) + elif groups == 4: + channels = (272, 544, 1088) + elif groups == 8: + channels = (384, 768, 1536) + else: + raise ValueError(f'{groups} groups is not supported for 1x1 ' + 'Grouped Convolutions') + + channels = [make_divisible(ch * widen_factor, 8) for ch in channels] + + self.in_channels = int(24 * widen_factor) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layers = nn.ModuleList() + for i, num_blocks in enumerate(self.stage_blocks): + first_block = True if i == 0 else False + layer = self.make_layer(channels[i], num_blocks, first_block) + self.layers.append(layer) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(self.frozen_stages): + layer = self.layers[i] + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def init_weights(self): + super(ShuffleNetV1, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + if 'conv1' in name: + normal_init(m, mean=0, std=0.01) + else: + normal_init(m, mean=0, std=1.0 / m.weight.shape[1]) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, val=1, bias=0.0001) + if isinstance(m, _BatchNorm): + if m.running_mean is not None: + nn.init.constant_(m.running_mean, 0) + + def make_layer(self, out_channels, num_blocks, first_block=False): + """Stack ShuffleUnit blocks to make a layer. + + Args: + out_channels (int): out_channels of the block. + num_blocks (int): Number of blocks. + first_block (bool): Whether is the first ShuffleUnit of a + sequential ShuffleUnits. Default: False, which means using + the grouped 1x1 convolution. + """ + layers = [] + for i in range(num_blocks): + first_block = first_block if i == 0 else False + combine_mode = 'concat' if i == 0 else 'add' + layers.append( + ShuffleUnit( + self.in_channels, + out_channels, + groups=self.groups, + first_block=first_block, + combine=combine_mode, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.maxpool(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def train(self, mode=True): + super(ShuffleNetV1, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmcls/models/backbones/shufflenet_v2.py b/mmcls/models/backbones/shufflenet_v2.py new file mode 100644 index 0000000..bfe7ac8 --- /dev/null +++ b/mmcls/models/backbones/shufflenet_v2.py @@ -0,0 +1,304 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, constant_init, normal_init +from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.utils import channel_shuffle +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +class InvertedResidual(BaseModule): + """InvertedResidual block for ShuffleNetV2 backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + stride (int): Stride of the 3x3 convolution layer. Default: 1 + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + init_cfg=None): + super(InvertedResidual, self).__init__(init_cfg) + self.stride = stride + self.with_cp = with_cp + + branch_features = out_channels // 2 + if self.stride == 1: + assert in_channels == branch_features * 2, ( + f'in_channels ({in_channels}) should equal to ' + f'branch_features * 2 ({branch_features * 2}) ' + 'when stride is 1') + + if in_channels != branch_features * 2: + assert self.stride != 1, ( + f'stride ({self.stride}) should not equal 1 when ' + f'in_channels != branch_features * 2') + + if self.stride > 1: + self.branch1 = nn.Sequential( + ConvModule( + in_channels, + in_channels, + kernel_size=3, + stride=self.stride, + padding=1, + groups=in_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + in_channels, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ) + + self.branch2 = nn.Sequential( + ConvModule( + in_channels if (self.stride > 1) else branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + branch_features, + branch_features, + kernel_size=3, + stride=self.stride, + padding=1, + groups=branch_features, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, x): + + def _inner_forward(x): + if self.stride > 1: + out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) + else: + # Channel Split operation. using these lines of code to replace + # ``chunk(x, 2, dim=1)`` can make it easier to deploy a + # shufflenetv2 model by using mmdeploy. + channels = x.shape[1] + c = channels // 2 + channels % 2 + x1 = x[:, :c, :, :] + x2 = x[:, c:, :, :] + + out = torch.cat((x1, self.branch2(x2)), dim=1) + + out = channel_shuffle(out, 2) + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@BACKBONES.register_module() +class ShuffleNetV2(BaseBackbone): + """ShuffleNetV2 backbone. + + Args: + widen_factor (float): Width multiplier - adjusts the number of + channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int]): Output from which stages. + Default: (0, 1, 2, 3). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + widen_factor=1.0, + out_indices=(3, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + norm_eval=False, + with_cp=False, + init_cfg=None): + super(ShuffleNetV2, self).__init__(init_cfg) + self.stage_blocks = [4, 8, 4] + for index in out_indices: + if index not in range(0, 4): + raise ValueError('the item in out_indices must in ' + f'range(0, 4). But received {index}') + + if frozen_stages not in range(-1, 4): + raise ValueError('frozen_stages must be in range(-1, 4). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + if widen_factor == 0.5: + channels = [48, 96, 192, 1024] + elif widen_factor == 1.0: + channels = [116, 232, 464, 1024] + elif widen_factor == 1.5: + channels = [176, 352, 704, 1024] + elif widen_factor == 2.0: + channels = [244, 488, 976, 2048] + else: + raise ValueError('widen_factor must be in [0.5, 1.0, 1.5, 2.0]. ' + f'But received {widen_factor}') + + self.in_channels = 24 + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layers = nn.ModuleList() + for i, num_blocks in enumerate(self.stage_blocks): + layer = self._make_layer(channels[i], num_blocks) + self.layers.append(layer) + + output_channels = channels[-1] + self.layers.append( + ConvModule( + in_channels=self.in_channels, + out_channels=output_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def _make_layer(self, out_channels, num_blocks): + """Stack blocks to make a layer. + + Args: + out_channels (int): out_channels of the block. + num_blocks (int): number of blocks. + """ + layers = [] + for i in range(num_blocks): + stride = 2 if i == 0 else 1 + layers.append( + InvertedResidual( + in_channels=self.in_channels, + out_channels=out_channels, + stride=stride, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + + for i in range(self.frozen_stages): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + super(ShuffleNetV2, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + if 'conv1' in name: + normal_init(m, mean=0, std=0.01) + else: + normal_init(m, mean=0, std=1.0 / m.weight.shape[1]) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m.weight, val=1, bias=0.0001) + if isinstance(m, _BatchNorm): + if m.running_mean is not None: + nn.init.constant_(m.running_mean, 0) + + def forward(self, x): + x = self.conv1(x) + x = self.maxpool(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def train(self, mode=True): + super(ShuffleNetV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() diff --git a/mmcls/models/backbones/swin_transformer.py b/mmcls/models/backbones/swin_transformer.py new file mode 100644 index 0000000..962d41d --- /dev/null +++ b/mmcls/models/backbones/swin_transformer.py @@ -0,0 +1,548 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed, PatchMerging +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner.base_module import BaseModule, ModuleList +from mmcv.utils.parrots_wrapper import _BatchNorm + +from ..builder import BACKBONES +from ..utils import (ShiftWindowMSA, resize_pos_embed, + resize_relative_position_bias_table, to_2tuple) +from .base_backbone import BaseBackbone + + +class SwinBlock(BaseModule): + """Swin Transformer block. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. Defaults to 7. + shift (bool): Shift the attention window or not. Defaults to False. + ffn_ratio (float): The expansion ratio of feedforward network hidden + layer channels. Defaults to 4. + drop_path (float): The drop path rate after attention and ffn. + Defaults to 0. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + attn_cfgs (dict): The extra config of Shift Window-MSA. + Defaults to empty dict. + ffn_cfgs (dict): The extra config of FFN. Defaults to empty dict. + norm_cfg (dict): The config of norm layers. + Defaults to ``dict(type='LN')``. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size=7, + shift=False, + ffn_ratio=4., + drop_path=0., + pad_small_map=False, + attn_cfgs=dict(), + ffn_cfgs=dict(), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + + super(SwinBlock, self).__init__(init_cfg) + self.with_cp = with_cp + + _attn_cfgs = { + 'embed_dims': embed_dims, + 'num_heads': num_heads, + 'shift_size': window_size // 2 if shift else 0, + 'window_size': window_size, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'pad_small_map': pad_small_map, + **attn_cfgs + } + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = ShiftWindowMSA(**_attn_cfgs) + + _ffn_cfgs = { + 'embed_dims': embed_dims, + 'feedforward_channels': int(embed_dims * ffn_ratio), + 'num_fcs': 2, + 'ffn_drop': 0, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'act_cfg': dict(type='GELU'), + **ffn_cfgs + } + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + self.ffn = FFN(**_ffn_cfgs) + + def forward(self, x, hw_shape): + + def _inner_forward(x): + identity = x + x = self.norm1(x) + x = self.attn(x, hw_shape) + x = x + identity + + identity = x + x = self.norm2(x) + x = self.ffn(x, identity=identity) + + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + + return x + + +class SwinBlockSequence(BaseModule): + """Module with successive Swin Transformer blocks and downsample layer. + + Args: + embed_dims (int): Number of input channels. + depth (int): Number of successive swin transformer blocks. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. Defaults to 7. + downsample (bool): Downsample the output of blocks by patch merging. + Defaults to False. + downsample_cfg (dict): The extra config of the patch merging layer. + Defaults to empty dict. + drop_paths (Sequence[float] | float): The drop path rate in each block. + Defaults to 0. + block_cfgs (Sequence[dict] | dict): The extra config of each block. + Defaults to empty dicts. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + depth, + num_heads, + window_size=7, + downsample=False, + downsample_cfg=dict(), + drop_paths=0., + block_cfgs=dict(), + with_cp=False, + pad_small_map=False, + init_cfg=None): + super().__init__(init_cfg) + + if not isinstance(drop_paths, Sequence): + drop_paths = [drop_paths] * depth + + if not isinstance(block_cfgs, Sequence): + block_cfgs = [deepcopy(block_cfgs) for _ in range(depth)] + + self.embed_dims = embed_dims + self.blocks = ModuleList() + for i in range(depth): + _block_cfg = { + 'embed_dims': embed_dims, + 'num_heads': num_heads, + 'window_size': window_size, + 'shift': False if i % 2 == 0 else True, + 'drop_path': drop_paths[i], + 'with_cp': with_cp, + 'pad_small_map': pad_small_map, + **block_cfgs[i] + } + block = SwinBlock(**_block_cfg) + self.blocks.append(block) + + if downsample: + _downsample_cfg = { + 'in_channels': embed_dims, + 'out_channels': 2 * embed_dims, + 'norm_cfg': dict(type='LN'), + **downsample_cfg + } + self.downsample = PatchMerging(**_downsample_cfg) + else: + self.downsample = None + + def forward(self, x, in_shape, do_downsample=True): + for block in self.blocks: + x = block(x, in_shape) + + if self.downsample is not None and do_downsample: + x, out_shape = self.downsample(x, in_shape) + else: + out_shape = in_shape + return x, out_shape + + @property + def out_channels(self): + if self.downsample: + return self.downsample.out_channels + else: + return self.embed_dims + + +@BACKBONES.register_module() +class SwinTransformer(BaseBackbone): + """Swin Transformer. + + A PyTorch implement of : `Swin Transformer: + Hierarchical Vision Transformer using Shifted Windows + `_ + + Inspiration from + https://github.com/microsoft/Swin-Transformer + + Args: + arch (str | dict): Swin Transformer architecture. If use string, choose + from 'tiny', 'small', 'base' and 'large'. If use dict, it should + have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **depths** (List[int]): The number of blocks in each stage. + - **num_heads** (List[int]): The number of heads in attention + modules of each stage. + + Defaults to 'tiny'. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 4. + in_channels (int): The num of input channels. Defaults to 3. + window_size (int): The height and width of the window. Defaults to 7. + drop_rate (float): Dropout rate after embedding. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0.1. + out_after_downsample (bool): Whether to output the feature map of a + stage after the following downsample layer. Defaults to False. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults to False. + interpolate_mode (str): Select the interpolate mode for absolute + position embeding vector resize. Defaults to "bicubic". + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + norm_cfg (dict): Config dict for normalization layer for all output + features. Defaults to ``dict(type='LN')`` + stage_cfgs (Sequence[dict] | dict): Extra config dict for each + stage. Defaults to an empty dict. + patch_cfg (dict): Extra config dict for patch embedding. + Defaults to an empty dict. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + + Examples: + >>> from mmcls.models import SwinTransformer + >>> import torch + >>> extra_config = dict( + >>> arch='tiny', + >>> stage_cfgs=dict(downsample_cfg={'kernel_size': 3, + >>> 'expansion_ratio': 3})) + >>> self = SwinTransformer(**extra_config) + >>> inputs = torch.rand(1, 3, 224, 224) + >>> output = self.forward(inputs) + >>> print(output.shape) + (1, 2592, 4) + """ + arch_zoo = { + **dict.fromkeys(['t', 'tiny'], + {'embed_dims': 96, + 'depths': [2, 2, 6, 2], + 'num_heads': [3, 6, 12, 24]}), + **dict.fromkeys(['s', 'small'], + {'embed_dims': 96, + 'depths': [2, 2, 18, 2], + 'num_heads': [3, 6, 12, 24]}), + **dict.fromkeys(['b', 'base'], + {'embed_dims': 128, + 'depths': [2, 2, 18, 2], + 'num_heads': [4, 8, 16, 32]}), + **dict.fromkeys(['l', 'large'], + {'embed_dims': 192, + 'depths': [2, 2, 18, 2], + 'num_heads': [6, 12, 24, 48]}), + } # yapf: disable + + _version = 3 + num_extra_tokens = 0 + + def __init__(self, + arch='tiny', + img_size=224, + patch_size=4, + in_channels=3, + window_size=7, + drop_rate=0., + drop_path_rate=0.1, + out_indices=(3, ), + out_after_downsample=False, + use_abs_pos_embed=False, + interpolate_mode='bicubic', + with_cp=False, + frozen_stages=-1, + norm_eval=False, + pad_small_map=False, + norm_cfg=dict(type='LN'), + stage_cfgs=dict(), + patch_cfg=dict(), + init_cfg=None): + super(SwinTransformer, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = {'embed_dims', 'depths', 'num_heads'} + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.depths = self.arch_settings['depths'] + self.num_heads = self.arch_settings['num_heads'] + self.num_layers = len(self.depths) + self.out_indices = out_indices + self.out_after_downsample = out_after_downsample + self.use_abs_pos_embed = use_abs_pos_embed + self.interpolate_mode = interpolate_mode + self.frozen_stages = frozen_stages + + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + norm_cfg=dict(type='LN'), + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + + if self.use_abs_pos_embed: + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + self.absolute_pos_embed = nn.Parameter( + torch.zeros(1, num_patches, self.embed_dims)) + self._register_load_state_dict_pre_hook( + self._prepare_abs_pos_embed) + + self._register_load_state_dict_pre_hook( + self._prepare_relative_position_bias_table) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + self.norm_eval = norm_eval + + # stochastic depth + total_depth = sum(self.depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] # stochastic depth decay rule + + self.stages = ModuleList() + embed_dims = [self.embed_dims] + for i, (depth, + num_heads) in enumerate(zip(self.depths, self.num_heads)): + if isinstance(stage_cfgs, Sequence): + stage_cfg = stage_cfgs[i] + else: + stage_cfg = deepcopy(stage_cfgs) + downsample = True if i < self.num_layers - 1 else False + _stage_cfg = { + 'embed_dims': embed_dims[-1], + 'depth': depth, + 'num_heads': num_heads, + 'window_size': window_size, + 'downsample': downsample, + 'drop_paths': dpr[:depth], + 'with_cp': with_cp, + 'pad_small_map': pad_small_map, + **stage_cfg + } + + stage = SwinBlockSequence(**_stage_cfg) + self.stages.append(stage) + + dpr = dpr[depth:] + embed_dims.append(stage.out_channels) + + if self.out_after_downsample: + self.num_features = embed_dims[1:] + else: + self.num_features = embed_dims[:-1] + + for i in out_indices: + if norm_cfg is not None: + norm_layer = build_norm_layer(norm_cfg, + self.num_features[i])[1] + else: + norm_layer = nn.Identity() + + self.add_module(f'norm{i}', norm_layer) + + def init_weights(self): + super(SwinTransformer, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + if self.use_abs_pos_embed: + trunc_normal_(self.absolute_pos_embed, std=0.02) + + def forward(self, x): + x, hw_shape = self.patch_embed(x) + if self.use_abs_pos_embed: + x = x + resize_pos_embed( + self.absolute_pos_embed, self.patch_resolution, hw_shape, + self.interpolate_mode, self.num_extra_tokens) + x = self.drop_after_pos(x) + + outs = [] + for i, stage in enumerate(self.stages): + x, hw_shape = stage( + x, hw_shape, do_downsample=self.out_after_downsample) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(x) + out = out.view(-1, *hw_shape, + self.num_features[i]).permute(0, 3, 1, + 2).contiguous() + outs.append(out) + if stage.downsample is not None and not self.out_after_downsample: + x, hw_shape = stage.downsample(x, hw_shape) + + return tuple(outs) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, *args, + **kwargs): + """load checkpoints.""" + # Names of some parameters in has been changed. + version = local_metadata.get('version', None) + if (version is None + or version < 2) and self.__class__ is SwinTransformer: + final_stage_num = len(self.stages) - 1 + state_dict_keys = list(state_dict.keys()) + for k in state_dict_keys: + if k.startswith('norm.') or k.startswith('backbone.norm.'): + convert_key = k.replace('norm.', f'norm{final_stage_num}.') + state_dict[convert_key] = state_dict[k] + del state_dict[k] + if (version is None + or version < 3) and self.__class__ is SwinTransformer: + state_dict_keys = list(state_dict.keys()) + for k in state_dict_keys: + if 'attn_mask' in k: + del state_dict[k] + + super()._load_from_state_dict(state_dict, prefix, local_metadata, + *args, **kwargs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + for i in range(0, self.frozen_stages + 1): + m = self.stages[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + for i in self.out_indices: + if i <= self.frozen_stages: + for param in getattr(self, f'norm{i}').parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(SwinTransformer, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def _prepare_abs_pos_embed(self, state_dict, prefix, *args, **kwargs): + name = prefix + 'absolute_pos_embed' + if name not in state_dict.keys(): + return + + ckpt_pos_embed_shape = state_dict[name].shape + if self.absolute_pos_embed.shape != ckpt_pos_embed_shape: + from mmcls.utils import get_root_logger + logger = get_root_logger() + logger.info( + 'Resize the absolute_pos_embed shape from ' + f'{ckpt_pos_embed_shape} to {self.absolute_pos_embed.shape}.') + + ckpt_pos_embed_shape = to_2tuple( + int(np.sqrt(ckpt_pos_embed_shape[1] - self.num_extra_tokens))) + pos_embed_shape = self.patch_embed.init_out_size + + state_dict[name] = resize_pos_embed(state_dict[name], + ckpt_pos_embed_shape, + pos_embed_shape, + self.interpolate_mode, + self.num_extra_tokens) + + def _prepare_relative_position_bias_table(self, state_dict, prefix, *args, + **kwargs): + state_dict_model = self.state_dict() + all_keys = list(state_dict_model.keys()) + for key in all_keys: + if 'relative_position_bias_table' in key: + ckpt_key = prefix + key + if ckpt_key not in state_dict: + continue + relative_position_bias_table_pretrained = state_dict[ckpt_key] + relative_position_bias_table_current = state_dict_model[key] + L1, nH1 = relative_position_bias_table_pretrained.size() + L2, nH2 = relative_position_bias_table_current.size() + if L1 != L2: + src_size = int(L1**0.5) + dst_size = int(L2**0.5) + new_rel_pos_bias = resize_relative_position_bias_table( + src_size, dst_size, + relative_position_bias_table_pretrained, nH1) + from mmcls.utils import get_root_logger + logger = get_root_logger() + logger.info('Resize the relative_position_bias_table from ' + f'{state_dict[ckpt_key].shape} to ' + f'{new_rel_pos_bias.shape}') + state_dict[ckpt_key] = new_rel_pos_bias + + # The index buffer need to be re-generated. + index_buffer = ckpt_key.replace('bias_table', 'index') + del state_dict[index_buffer] diff --git a/mmcls/models/backbones/swin_transformer_v2.py b/mmcls/models/backbones/swin_transformer_v2.py new file mode 100644 index 0000000..c26b4e6 --- /dev/null +++ b/mmcls/models/backbones/swin_transformer_v2.py @@ -0,0 +1,560 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner.base_module import BaseModule, ModuleList +from mmcv.utils.parrots_wrapper import _BatchNorm + +from ..builder import BACKBONES +from ..utils import (PatchMerging, ShiftWindowMSA, WindowMSAV2, + resize_pos_embed, to_2tuple) +from .base_backbone import BaseBackbone + + +class SwinBlockV2(BaseModule): + """Swin Transformer V2 block. Use post normalization. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. Defaults to 7. + shift (bool): Shift the attention window or not. Defaults to False. + extra_norm (bool): Whether add extra norm at the end of main branch. + ffn_ratio (float): The expansion ratio of feedforward network hidden + layer channels. Defaults to 4. + drop_path (float): The drop path rate after attention and ffn. + Defaults to 0. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + attn_cfgs (dict): The extra config of Shift Window-MSA. + Defaults to empty dict. + ffn_cfgs (dict): The extra config of FFN. Defaults to empty dict. + norm_cfg (dict): The config of norm layers. + Defaults to ``dict(type='LN')``. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + pretrained_window_size (int): Window size in pretrained. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size=8, + shift=False, + extra_norm=False, + ffn_ratio=4., + drop_path=0., + pad_small_map=False, + attn_cfgs=dict(), + ffn_cfgs=dict(), + norm_cfg=dict(type='LN'), + with_cp=False, + pretrained_window_size=0, + init_cfg=None): + + super(SwinBlockV2, self).__init__(init_cfg) + self.with_cp = with_cp + self.extra_norm = extra_norm + + _attn_cfgs = { + 'embed_dims': embed_dims, + 'num_heads': num_heads, + 'shift_size': window_size // 2 if shift else 0, + 'window_size': window_size, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'pad_small_map': pad_small_map, + **attn_cfgs + } + # use V2 attention implementation + _attn_cfgs.update( + window_msa=WindowMSAV2, + msa_cfg=dict( + pretrained_window_size=to_2tuple(pretrained_window_size))) + self.attn = ShiftWindowMSA(**_attn_cfgs) + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + + _ffn_cfgs = { + 'embed_dims': embed_dims, + 'feedforward_channels': int(embed_dims * ffn_ratio), + 'num_fcs': 2, + 'ffn_drop': 0, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'act_cfg': dict(type='GELU'), + 'add_identity': False, + **ffn_cfgs + } + self.ffn = FFN(**_ffn_cfgs) + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + + # add extra norm for every n blocks in huge and giant model + if self.extra_norm: + self.norm3 = build_norm_layer(norm_cfg, embed_dims)[1] + + def forward(self, x, hw_shape): + + def _inner_forward(x): + # Use post normalization + identity = x + x = self.attn(x, hw_shape) + x = self.norm1(x) + x = x + identity + + identity = x + x = self.ffn(x) + x = self.norm2(x) + x = x + identity + + if self.extra_norm: + x = self.norm3(x) + + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + + return x + + +class SwinBlockV2Sequence(BaseModule): + """Module with successive Swin Transformer blocks and downsample layer. + + Args: + embed_dims (int): Number of input channels. + depth (int): Number of successive swin transformer blocks. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. Defaults to 7. + downsample (bool): Downsample the output of blocks by patch merging. + Defaults to False. + downsample_cfg (dict): The extra config of the patch merging layer. + Defaults to empty dict. + drop_paths (Sequence[float] | float): The drop path rate in each block. + Defaults to 0. + block_cfgs (Sequence[dict] | dict): The extra config of each block. + Defaults to empty dicts. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + extra_norm_every_n_blocks (int): Add extra norm at the end of main + branch every n blocks. Defaults to 0, which means no needs for + extra norm layer. + pretrained_window_size (int): Window size in pretrained. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + depth, + num_heads, + window_size=8, + downsample=False, + downsample_cfg=dict(), + drop_paths=0., + block_cfgs=dict(), + with_cp=False, + pad_small_map=False, + extra_norm_every_n_blocks=0, + pretrained_window_size=0, + init_cfg=None): + super().__init__(init_cfg) + + if not isinstance(drop_paths, Sequence): + drop_paths = [drop_paths] * depth + + if not isinstance(block_cfgs, Sequence): + block_cfgs = [deepcopy(block_cfgs) for _ in range(depth)] + + if downsample: + self.out_channels = 2 * embed_dims + _downsample_cfg = { + 'in_channels': embed_dims, + 'out_channels': self.out_channels, + 'norm_cfg': dict(type='LN'), + **downsample_cfg + } + self.downsample = PatchMerging(**_downsample_cfg) + else: + self.out_channels = embed_dims + self.downsample = None + + self.blocks = ModuleList() + for i in range(depth): + extra_norm = True if extra_norm_every_n_blocks and \ + (i + 1) % extra_norm_every_n_blocks == 0 else False + _block_cfg = { + 'embed_dims': self.out_channels, + 'num_heads': num_heads, + 'window_size': window_size, + 'shift': False if i % 2 == 0 else True, + 'extra_norm': extra_norm, + 'drop_path': drop_paths[i], + 'with_cp': with_cp, + 'pad_small_map': pad_small_map, + 'pretrained_window_size': pretrained_window_size, + **block_cfgs[i] + } + block = SwinBlockV2(**_block_cfg) + self.blocks.append(block) + + def forward(self, x, in_shape): + if self.downsample: + x, out_shape = self.downsample(x, in_shape) + else: + out_shape = in_shape + + for block in self.blocks: + x = block(x, out_shape) + + return x, out_shape + + +@BACKBONES.register_module() +class SwinTransformerV2(BaseBackbone): + """Swin Transformer V2. + + A PyTorch implement of : `Swin Transformer V2: + Scaling Up Capacity and Resolution + `_ + + Inspiration from + https://github.com/microsoft/Swin-Transformer + + Args: + arch (str | dict): Swin Transformer architecture. If use string, choose + from 'tiny', 'small', 'base' and 'large'. If use dict, it should + have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **depths** (List[int]): The number of blocks in each stage. + - **num_heads** (List[int]): The number of heads in attention + modules of each stage. + - **extra_norm_every_n_blocks** (int): Add extra norm at the end + of main branch every n blocks. + + Defaults to 'tiny'. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 4. + in_channels (int): The num of input channels. Defaults to 3. + window_size (int | Sequence): The height and width of the window. + Defaults to 7. + drop_rate (float): Dropout rate after embedding. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults to False. + interpolate_mode (str): Select the interpolate mode for absolute + position embeding vector resize. Defaults to "bicubic". + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + norm_cfg (dict): Config dict for normalization layer for all output + features. Defaults to ``dict(type='LN')`` + stage_cfgs (Sequence[dict] | dict): Extra config dict for each + stage. Defaults to an empty dict. + patch_cfg (dict): Extra config dict for patch embedding. + Defaults to an empty dict. + pretrained_window_sizes (tuple(int)): Pretrained window sizes of + each layer. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + + Examples: + >>> from mmcls.models import SwinTransformerV2 + >>> import torch + >>> extra_config = dict( + >>> arch='tiny', + >>> stage_cfgs=dict(downsample_cfg={'kernel_size': 3, + >>> 'padding': 'same'})) + >>> self = SwinTransformerV2(**extra_config) + >>> inputs = torch.rand(1, 3, 224, 224) + >>> output = self.forward(inputs) + >>> print(output.shape) + (1, 2592, 4) + """ + arch_zoo = { + **dict.fromkeys(['t', 'tiny'], + {'embed_dims': 96, + 'depths': [2, 2, 6, 2], + 'num_heads': [3, 6, 12, 24], + 'extra_norm_every_n_blocks': 0}), + **dict.fromkeys(['s', 'small'], + {'embed_dims': 96, + 'depths': [2, 2, 18, 2], + 'num_heads': [3, 6, 12, 24], + 'extra_norm_every_n_blocks': 0}), + **dict.fromkeys(['b', 'base'], + {'embed_dims': 128, + 'depths': [2, 2, 18, 2], + 'num_heads': [4, 8, 16, 32], + 'extra_norm_every_n_blocks': 0}), + **dict.fromkeys(['l', 'large'], + {'embed_dims': 192, + 'depths': [2, 2, 18, 2], + 'num_heads': [6, 12, 24, 48], + 'extra_norm_every_n_blocks': 0}), + # head count not certain for huge, and is employed for another + # parallel study about self-supervised learning. + **dict.fromkeys(['h', 'huge'], + {'embed_dims': 352, + 'depths': [2, 2, 18, 2], + 'num_heads': [8, 16, 32, 64], + 'extra_norm_every_n_blocks': 6}), + **dict.fromkeys(['g', 'giant'], + {'embed_dims': 512, + 'depths': [2, 2, 42, 4], + 'num_heads': [16, 32, 64, 128], + 'extra_norm_every_n_blocks': 6}), + } # yapf: disable + + _version = 1 + num_extra_tokens = 0 + + def __init__(self, + arch='tiny', + img_size=256, + patch_size=4, + in_channels=3, + window_size=8, + drop_rate=0., + drop_path_rate=0.1, + out_indices=(3, ), + use_abs_pos_embed=False, + interpolate_mode='bicubic', + with_cp=False, + frozen_stages=-1, + norm_eval=False, + pad_small_map=False, + norm_cfg=dict(type='LN'), + stage_cfgs=dict(downsample_cfg=dict(is_post_norm=True)), + patch_cfg=dict(), + pretrained_window_sizes=[0, 0, 0, 0], + init_cfg=None): + super(SwinTransformerV2, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'depths', 'num_heads', + 'extra_norm_every_n_blocks' + } + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.depths = self.arch_settings['depths'] + self.num_heads = self.arch_settings['num_heads'] + self.extra_norm_every_n_blocks = self.arch_settings[ + 'extra_norm_every_n_blocks'] + self.num_layers = len(self.depths) + self.out_indices = out_indices + self.use_abs_pos_embed = use_abs_pos_embed + self.interpolate_mode = interpolate_mode + self.frozen_stages = frozen_stages + + if isinstance(window_size, int): + self.window_sizes = [window_size for _ in range(self.num_layers)] + elif isinstance(window_size, Sequence): + assert len(window_size) == self.num_layers, \ + f'Length of window_sizes {len(window_size)} is not equal to '\ + f'length of stages {self.num_layers}.' + self.window_sizes = window_size + else: + raise TypeError('window_size should be a Sequence or int.') + + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + norm_cfg=dict(type='LN'), + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + + if self.use_abs_pos_embed: + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + self.absolute_pos_embed = nn.Parameter( + torch.zeros(1, num_patches, self.embed_dims)) + self._register_load_state_dict_pre_hook( + self._prepare_abs_pos_embed) + + self._register_load_state_dict_pre_hook(self._delete_reinit_params) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + self.norm_eval = norm_eval + + # stochastic depth + total_depth = sum(self.depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] # stochastic depth decay rule + + self.stages = ModuleList() + embed_dims = [self.embed_dims] + for i, (depth, + num_heads) in enumerate(zip(self.depths, self.num_heads)): + if isinstance(stage_cfgs, Sequence): + stage_cfg = stage_cfgs[i] + else: + stage_cfg = deepcopy(stage_cfgs) + downsample = True if i > 0 else False + _stage_cfg = { + 'embed_dims': embed_dims[-1], + 'depth': depth, + 'num_heads': num_heads, + 'window_size': self.window_sizes[i], + 'downsample': downsample, + 'drop_paths': dpr[:depth], + 'with_cp': with_cp, + 'pad_small_map': pad_small_map, + 'extra_norm_every_n_blocks': self.extra_norm_every_n_blocks, + 'pretrained_window_size': pretrained_window_sizes[i], + **stage_cfg + } + + stage = SwinBlockV2Sequence(**_stage_cfg) + self.stages.append(stage) + + dpr = dpr[depth:] + embed_dims.append(stage.out_channels) + + for i in out_indices: + if norm_cfg is not None: + norm_layer = build_norm_layer(norm_cfg, embed_dims[i + 1])[1] + else: + norm_layer = nn.Identity() + + self.add_module(f'norm{i}', norm_layer) + + def init_weights(self): + super(SwinTransformerV2, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + if self.use_abs_pos_embed: + trunc_normal_(self.absolute_pos_embed, std=0.02) + + def forward(self, x): + x, hw_shape = self.patch_embed(x) + + if self.use_abs_pos_embed: + x = x + resize_pos_embed( + self.absolute_pos_embed, self.patch_resolution, hw_shape, + self.interpolate_mode, self.num_extra_tokens) + x = self.drop_after_pos(x) + + outs = [] + for i, stage in enumerate(self.stages): + x, hw_shape = stage(x, hw_shape) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(x) + out = out.view(-1, *hw_shape, + stage.out_channels).permute(0, 3, 1, + 2).contiguous() + outs.append(out) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + for i in range(0, self.frozen_stages + 1): + m = self.stages[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + for i in self.out_indices: + if i <= self.frozen_stages: + for param in getattr(self, f'norm{i}').parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(SwinTransformerV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def _prepare_abs_pos_embed(self, state_dict, prefix, *args, **kwargs): + name = prefix + 'absolute_pos_embed' + if name not in state_dict.keys(): + return + + ckpt_pos_embed_shape = state_dict[name].shape + if self.absolute_pos_embed.shape != ckpt_pos_embed_shape: + from mmcls.utils import get_root_logger + logger = get_root_logger() + logger.info( + 'Resize the absolute_pos_embed shape from ' + f'{ckpt_pos_embed_shape} to {self.absolute_pos_embed.shape}.') + + ckpt_pos_embed_shape = to_2tuple( + int(np.sqrt(ckpt_pos_embed_shape[1] - self.num_extra_tokens))) + pos_embed_shape = self.patch_embed.init_out_size + + state_dict[name] = resize_pos_embed(state_dict[name], + ckpt_pos_embed_shape, + pos_embed_shape, + self.interpolate_mode, + self.num_extra_tokens) + + def _delete_reinit_params(self, state_dict, prefix, *args, **kwargs): + # delete relative_position_index since we always re-init it + relative_position_index_keys = [ + k for k in state_dict.keys() if 'relative_position_index' in k + ] + for k in relative_position_index_keys: + del state_dict[k] + + # delete relative_coords_table since we always re-init it + relative_position_index_keys = [ + k for k in state_dict.keys() if 'relative_coords_table' in k + ] + for k in relative_position_index_keys: + del state_dict[k] diff --git a/mmcls/models/backbones/t2t_vit.py b/mmcls/models/backbones/t2t_vit.py new file mode 100644 index 0000000..2edb991 --- /dev/null +++ b/mmcls/models/backbones/t2t_vit.py @@ -0,0 +1,440 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner.base_module import BaseModule, ModuleList + +from ..builder import BACKBONES +from ..utils import MultiheadAttention, resize_pos_embed, to_2tuple +from .base_backbone import BaseBackbone + + +class T2TTransformerLayer(BaseModule): + """Transformer Layer for T2T_ViT. + + Comparing with :obj:`TransformerEncoderLayer` in ViT, it supports + different ``input_dims`` and ``embed_dims``. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs + input_dims (int, optional): The input token dimension. + Defaults to None. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + attn_drop_rate (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + num_fcs (int): The number of fully-connected layers for FFNs. + Defaults to 2. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``(input_dims // num_heads) ** -0.5`` if set. Defaults to None. + act_cfg (dict): The activation config for FFNs. + Defaluts to ``dict(type='GELU')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + + Notes: + In general, ``qk_scale`` should be ``head_dims ** -0.5``, i.e. + ``(embed_dims // num_heads) ** -0.5``. However, in the official + code, it uses ``(input_dims // num_heads) ** -0.5``, so here we + keep the same with the official implementation. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + input_dims=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=False, + qk_scale=None, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(T2TTransformerLayer, self).__init__(init_cfg=init_cfg) + + self.v_shortcut = True if input_dims is not None else False + input_dims = input_dims or embed_dims + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, input_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + self.attn = MultiheadAttention( + input_dims=input_dims, + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias, + qk_scale=qk_scale or (input_dims // num_heads)**-0.5, + v_shortcut=self.v_shortcut) + + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, embed_dims, postfix=2) + self.add_module(self.norm2_name, norm2) + + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def forward(self, x): + if self.v_shortcut: + x = self.attn(self.norm1(x)) + else: + x = x + self.attn(self.norm1(x)) + x = self.ffn(self.norm2(x), identity=x) + return x + + +class T2TModule(BaseModule): + """Tokens-to-Token module. + + "Tokens-to-Token module" (T2T Module) can model the local structure + information of images and reduce the length of tokens progressively. + + Args: + img_size (int): Input image size + in_channels (int): Number of input channels + embed_dims (int): Embedding dimension + token_dims (int): Tokens dimension in T2TModuleAttention. + use_performer (bool): If True, use Performer version self-attention to + adopt regular self-attention. Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Default: None. + + Notes: + Usually, ``token_dim`` is set as a small value (32 or 64) to reduce + MACs + """ + + def __init__( + self, + img_size=224, + in_channels=3, + embed_dims=384, + token_dims=64, + use_performer=False, + init_cfg=None, + ): + super(T2TModule, self).__init__(init_cfg) + + self.embed_dims = embed_dims + + self.soft_split0 = nn.Unfold( + kernel_size=(7, 7), stride=(4, 4), padding=(2, 2)) + self.soft_split1 = nn.Unfold( + kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) + self.soft_split2 = nn.Unfold( + kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) + + if not use_performer: + self.attention1 = T2TTransformerLayer( + input_dims=in_channels * 7 * 7, + embed_dims=token_dims, + num_heads=1, + feedforward_channels=token_dims) + + self.attention2 = T2TTransformerLayer( + input_dims=token_dims * 3 * 3, + embed_dims=token_dims, + num_heads=1, + feedforward_channels=token_dims) + + self.project = nn.Linear(token_dims * 3 * 3, embed_dims) + else: + raise NotImplementedError("Performer hasn't been implemented.") + + # there are 3 soft split, stride are 4,2,2 separately + out_side = img_size // (4 * 2 * 2) + self.init_out_size = [out_side, out_side] + self.num_patches = out_side**2 + + @staticmethod + def _get_unfold_size(unfold: nn.Unfold, input_size): + h, w = input_size + kernel_size = to_2tuple(unfold.kernel_size) + stride = to_2tuple(unfold.stride) + padding = to_2tuple(unfold.padding) + dilation = to_2tuple(unfold.dilation) + + h_out = (h + 2 * padding[0] - dilation[0] * + (kernel_size[0] - 1) - 1) // stride[0] + 1 + w_out = (w + 2 * padding[1] - dilation[1] * + (kernel_size[1] - 1) - 1) // stride[1] + 1 + return (h_out, w_out) + + def forward(self, x): + # step0: soft split + hw_shape = self._get_unfold_size(self.soft_split0, x.shape[2:]) + x = self.soft_split0(x).transpose(1, 2) + + for step in [1, 2]: + # re-structurization/reconstruction + attn = getattr(self, f'attention{step}') + x = attn(x).transpose(1, 2) + B, C, _ = x.shape + x = x.reshape(B, C, hw_shape[0], hw_shape[1]) + + # soft split + soft_split = getattr(self, f'soft_split{step}') + hw_shape = self._get_unfold_size(soft_split, hw_shape) + x = soft_split(x).transpose(1, 2) + + # final tokens + x = self.project(x) + return x, hw_shape + + +def get_sinusoid_encoding(n_position, embed_dims): + """Generate sinusoid encoding table. + + Sinusoid encoding is a kind of relative position encoding method came from + `Attention Is All You Need`_. + Args: + n_position (int): The length of the input token. + embed_dims (int): The position embedding dimension. + Returns: + :obj:`torch.FloatTensor`: The sinusoid encoding table. + """ + + vec = torch.arange(embed_dims, dtype=torch.float64) + vec = (vec - vec % 2) / embed_dims + vec = torch.pow(10000, -vec).view(1, -1) + + sinusoid_table = torch.arange(n_position).view(-1, 1) * vec + sinusoid_table[:, 0::2].sin_() # dim 2i + sinusoid_table[:, 1::2].cos_() # dim 2i+1 + + sinusoid_table = sinusoid_table.to(torch.float32) + + return sinusoid_table.unsqueeze(0) + + +@BACKBONES.register_module() +class T2T_ViT(BaseBackbone): + """Tokens-to-Token Vision Transformer (T2T-ViT) + + A PyTorch implementation of `Tokens-to-Token ViT: Training Vision + Transformers from Scratch on ImageNet `_ + + Args: + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + in_channels (int): Number of input channels. + embed_dims (int): Embedding dimension. + num_layers (int): Num of transformer layers in encoder. + Defaults to 14. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Dropout rate after position embedding. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. Defaults to + ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + with_cls_token (bool): Whether concatenating class token into image + tokens as transformer input. Defaults to True. + output_cls_token (bool): Whether output the cls_token. If set True, + ``with_cls_token`` must be True. Defaults to True. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + t2t_cfg (dict): Extra config of Tokens-to-Token module. + Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + num_extra_tokens = 1 # cls_token + + def __init__(self, + img_size=224, + in_channels=3, + embed_dims=384, + num_layers=14, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN'), + final_norm=True, + with_cls_token=True, + output_cls_token=True, + interpolate_mode='bicubic', + t2t_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None): + super(T2T_ViT, self).__init__(init_cfg) + + # Token-to-Token Module + self.tokens_to_token = T2TModule( + img_size=img_size, + in_channels=in_channels, + embed_dims=embed_dims, + **t2t_cfg) + self.patch_resolution = self.tokens_to_token.init_out_size + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + # Set cls token + if output_cls_token: + assert with_cls_token is True, f'with_cls_token must be True if' \ + f'set output_cls_token to True, but got {with_cls_token}' + self.with_cls_token = with_cls_token + self.output_cls_token = output_cls_token + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims)) + + # Set position embedding + self.interpolate_mode = interpolate_mode + sinusoid_table = get_sinusoid_encoding( + num_patches + self.num_extra_tokens, embed_dims) + self.register_buffer('pos_embed', sinusoid_table) + self._register_load_state_dict_pre_hook(self._prepare_pos_embed) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must be a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = num_layers + index + assert 0 <= out_indices[i] <= num_layers, \ + f'Invalid out_indices {index}' + self.out_indices = out_indices + + # stochastic depth decay rule + dpr = [x for x in np.linspace(0, drop_path_rate, num_layers)] + + self.encoder = ModuleList() + for i in range(num_layers): + if isinstance(layer_cfgs, Sequence): + layer_cfg = layer_cfgs[i] + else: + layer_cfg = deepcopy(layer_cfgs) + layer_cfg = { + 'embed_dims': embed_dims, + 'num_heads': 6, + 'feedforward_channels': 3 * embed_dims, + 'drop_path_rate': dpr[i], + 'qkv_bias': False, + 'norm_cfg': norm_cfg, + **layer_cfg + } + + layer = T2TTransformerLayer(**layer_cfg) + self.encoder.append(layer) + + self.final_norm = final_norm + if final_norm: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = nn.Identity() + + def init_weights(self): + super().init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress custom init if use pretrained model. + return + + trunc_normal_(self.cls_token, std=.02) + + def _prepare_pos_embed(self, state_dict, prefix, *args, **kwargs): + name = prefix + 'pos_embed' + if name not in state_dict.keys(): + return + + ckpt_pos_embed_shape = state_dict[name].shape + if self.pos_embed.shape != ckpt_pos_embed_shape: + from mmcls.utils import get_root_logger + logger = get_root_logger() + logger.info( + f'Resize the pos_embed shape from {ckpt_pos_embed_shape} ' + f'to {self.pos_embed.shape}.') + + ckpt_pos_embed_shape = to_2tuple( + int(np.sqrt(ckpt_pos_embed_shape[1] - self.num_extra_tokens))) + pos_embed_shape = self.tokens_to_token.init_out_size + + state_dict[name] = resize_pos_embed(state_dict[name], + ckpt_pos_embed_shape, + pos_embed_shape, + self.interpolate_mode, + self.num_extra_tokens) + + def forward(self, x): + B = x.shape[0] + x, patch_resolution = self.tokens_to_token(x) + + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + x = x + resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + x = self.drop_after_pos(x) + + if not self.with_cls_token: + # Remove class token for transformer encoder input + x = x[:, 1:] + + outs = [] + for i, layer in enumerate(self.encoder): + x = layer(x) + + if i == len(self.encoder) - 1 and self.final_norm: + x = self.norm(x) + + if i in self.out_indices: + B, _, C = x.shape + if self.with_cls_token: + patch_token = x[:, 1:].reshape(B, *patch_resolution, C) + patch_token = patch_token.permute(0, 3, 1, 2) + cls_token = x[:, 0] + else: + patch_token = x.reshape(B, *patch_resolution, C) + patch_token = patch_token.permute(0, 3, 1, 2) + cls_token = None + if self.output_cls_token: + out = [patch_token, cls_token] + else: + out = patch_token + outs.append(out) + + return tuple(outs) diff --git a/mmcls/models/backbones/timm_backbone.py b/mmcls/models/backbones/timm_backbone.py new file mode 100644 index 0000000..1506619 --- /dev/null +++ b/mmcls/models/backbones/timm_backbone.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +try: + import timm +except ImportError: + timm = None + +import warnings + +from mmcv.cnn.bricks.registry import NORM_LAYERS + +from ...utils import get_root_logger +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +def print_timm_feature_info(feature_info): + """Print feature_info of timm backbone to help development and debug. + + Args: + feature_info (list[dict] | timm.models.features.FeatureInfo | None): + feature_info of timm backbone. + """ + logger = get_root_logger() + if feature_info is None: + logger.warning('This backbone does not have feature_info') + elif isinstance(feature_info, list): + for feat_idx, each_info in enumerate(feature_info): + logger.info(f'backbone feature_info[{feat_idx}]: {each_info}') + else: + try: + logger.info(f'backbone out_indices: {feature_info.out_indices}') + logger.info(f'backbone out_channels: {feature_info.channels()}') + logger.info(f'backbone out_strides: {feature_info.reduction()}') + except AttributeError: + logger.warning('Unexpected format of backbone feature_info') + + +@BACKBONES.register_module() +class TIMMBackbone(BaseBackbone): + """Wrapper to use backbones from timm library. + + More details can be found in + `timm `_. + See especially the document for `feature extraction + `_. + + Args: + model_name (str): Name of timm model to instantiate. + features_only (bool): Whether to extract feature pyramid (multi-scale + feature maps from the deepest layer at each stride). For Vision + Transformer models that do not support this argument, + set this False. Defaults to False. + pretrained (bool): Whether to load pretrained weights. + Defaults to False. + checkpoint_path (str): Path of checkpoint to load at the last of + ``timm.create_model``. Defaults to empty string, which means + not loading. + in_channels (int): Number of input image channels. Defaults to 3. + init_cfg (dict or list[dict], optional): Initialization config dict of + OpenMMLab projects. Defaults to None. + **kwargs: Other timm & model specific arguments. + """ + + def __init__(self, + model_name, + features_only=False, + pretrained=False, + checkpoint_path='', + in_channels=3, + init_cfg=None, + **kwargs): + if timm is None: + raise RuntimeError( + 'Failed to import timm. Please run "pip install timm". ' + '"pip install dataclasses" may also be needed for Python 3.6.') + if not isinstance(pretrained, bool): + raise TypeError('pretrained must be bool, not str for model path') + if features_only and checkpoint_path: + warnings.warn( + 'Using both features_only and checkpoint_path will cause error' + ' in timm. See ' + 'https://github.com/rwightman/pytorch-image-models/issues/488') + + super(TIMMBackbone, self).__init__(init_cfg) + if 'norm_layer' in kwargs: + kwargs['norm_layer'] = NORM_LAYERS.get(kwargs['norm_layer']) + self.timm_model = timm.create_model( + model_name=model_name, + features_only=features_only, + pretrained=pretrained, + in_chans=in_channels, + checkpoint_path=checkpoint_path, + **kwargs) + + # reset classifier + if hasattr(self.timm_model, 'reset_classifier'): + self.timm_model.reset_classifier(0, '') + + # Hack to use pretrained weights from timm + if pretrained or checkpoint_path: + self._is_init = True + + feature_info = getattr(self.timm_model, 'feature_info', None) + print_timm_feature_info(feature_info) + + def forward(self, x): + features = self.timm_model(x) + if isinstance(features, (list, tuple)): + features = tuple(features) + else: + features = (features, ) + return features diff --git a/mmcls/models/backbones/tnt.py b/mmcls/models/backbones/tnt.py new file mode 100644 index 0000000..b03120b --- /dev/null +++ b/mmcls/models/backbones/tnt.py @@ -0,0 +1,368 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner.base_module import BaseModule, ModuleList + +from ..builder import BACKBONES +from ..utils import to_2tuple +from .base_backbone import BaseBackbone + + +class TransformerBlock(BaseModule): + """Implement a transformer block in TnTLayer. + + Args: + embed_dims (int): The feature dimension + num_heads (int): Parallel attention heads + ffn_ratio (int): A ratio to calculate the hidden_dims in ffn layer. + Default: 4 + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default 0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0. + drop_path_rate (float): stochastic depth rate. Default 0. + num_fcs (int): The number of fully-connected layers for FFNs. Default 2 + qkv_bias (bool): Enable bias for qkv if True. Default False + act_cfg (dict): The activation config for FFNs. Defaults to GELU. + norm_cfg (dict): Config dict for normalization layer. Default + layer normalization + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) or (n, batch, embed_dim). + (batch, n, embed_dim) is common case in CV. Default to False + init_cfg (dict, optional): Initialization config dict. Default to None + """ + + def __init__(self, + embed_dims, + num_heads, + ffn_ratio=4, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=False, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + batch_first=True, + init_cfg=None): + super(TransformerBlock, self).__init__(init_cfg=init_cfg) + + self.norm_attn = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = MultiheadAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + batch_first=batch_first) + + self.norm_ffn = build_norm_layer(norm_cfg, embed_dims)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=embed_dims * ffn_ratio, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + if not qkv_bias: + self.attn.attn.in_proj_bias = None + + def forward(self, x): + x = self.attn(self.norm_attn(x), identity=x) + x = self.ffn(self.norm_ffn(x), identity=x) + return x + + +class TnTLayer(BaseModule): + """Implement one encoder layer in Transformer in Transformer. + + Args: + num_pixel (int): The pixel number in target patch transformed with + a linear projection in inner transformer + embed_dims_inner (int): Feature dimension in inner transformer block + embed_dims_outer (int): Feature dimension in outer transformer block + num_heads_inner (int): Parallel attention heads in inner transformer. + num_heads_outer (int): Parallel attention heads in outer transformer. + inner_block_cfg (dict): Extra config of inner transformer block. + Defaults to empty dict. + outer_block_cfg (dict): Extra config of outer transformer block. + Defaults to empty dict. + norm_cfg (dict): Config dict for normalization layer. Default + layer normalization + init_cfg (dict, optional): Initialization config dict. Default to None + """ + + def __init__(self, + num_pixel, + embed_dims_inner, + embed_dims_outer, + num_heads_inner, + num_heads_outer, + inner_block_cfg=dict(), + outer_block_cfg=dict(), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(TnTLayer, self).__init__(init_cfg=init_cfg) + + self.inner_block = TransformerBlock( + embed_dims=embed_dims_inner, + num_heads=num_heads_inner, + **inner_block_cfg) + + self.norm_proj = build_norm_layer(norm_cfg, embed_dims_inner)[1] + self.projection = nn.Linear( + embed_dims_inner * num_pixel, embed_dims_outer, bias=True) + + self.outer_block = TransformerBlock( + embed_dims=embed_dims_outer, + num_heads=num_heads_outer, + **outer_block_cfg) + + def forward(self, pixel_embed, patch_embed): + pixel_embed = self.inner_block(pixel_embed) + + B, N, C = patch_embed.size() + patch_embed[:, 1:] = patch_embed[:, 1:] + self.projection( + self.norm_proj(pixel_embed).reshape(B, N - 1, -1)) + patch_embed = self.outer_block(patch_embed) + + return pixel_embed, patch_embed + + +class PixelEmbed(BaseModule): + """Image to Pixel Embedding. + + Args: + img_size (int | tuple): The size of input image + patch_size (int): The size of one patch + in_channels (int): The num of input channels + embed_dims_inner (int): The num of channels of the target patch + transformed with a linear projection in inner transformer + stride (int): The stride of the conv2d layer. We use a conv2d layer + and a unfold layer to implement image to pixel embedding. + init_cfg (dict, optional): Initialization config dict + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_channels=3, + embed_dims_inner=48, + stride=4, + init_cfg=None): + super(PixelEmbed, self).__init__(init_cfg=init_cfg) + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + # patches_resolution property necessary for resizing + # positional embedding + patches_resolution = [ + img_size[0] // patch_size[0], img_size[1] // patch_size[1] + ] + num_patches = patches_resolution[0] * patches_resolution[1] + + self.img_size = img_size + self.num_patches = num_patches + self.embed_dims_inner = embed_dims_inner + + new_patch_size = [math.ceil(ps / stride) for ps in patch_size] + self.new_patch_size = new_patch_size + + self.proj = nn.Conv2d( + in_channels, + self.embed_dims_inner, + kernel_size=7, + padding=3, + stride=stride) + self.unfold = nn.Unfold( + kernel_size=new_patch_size, stride=new_patch_size) + + def forward(self, x, pixel_pos): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model " \ + f'({self.img_size[0]}*{self.img_size[1]}).' + x = self.proj(x) + x = self.unfold(x) + x = x.transpose(1, + 2).reshape(B * self.num_patches, self.embed_dims_inner, + self.new_patch_size[0], + self.new_patch_size[1]) + x = x + pixel_pos + x = x.reshape(B * self.num_patches, self.embed_dims_inner, + -1).transpose(1, 2) + return x + + +@BACKBONES.register_module() +class TNT(BaseBackbone): + """Transformer in Transformer. + + A PyTorch implement of: `Transformer in Transformer + `_ + + Inspiration from + https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/tnt.py + + Args: + arch (str | dict): Vision Transformer architecture + Default: 'b' + img_size (int | tuple): Input image size. Default to 224 + patch_size (int | tuple): The patch size. Deault to 16 + in_channels (int): Number of input channels. Default to 3 + ffn_ratio (int): A ratio to calculate the hidden_dims in ffn layer. + Default: 4 + qkv_bias (bool): Enable bias for qkv if True. Default False + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default 0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0. + drop_path_rate (float): stochastic depth rate. Default 0. + act_cfg (dict): The activation config for FFNs. Defaults to GELU. + norm_cfg (dict): Config dict for normalization layer. Default + layer normalization + first_stride (int): The stride of the conv2d layer. We use a conv2d + layer and a unfold layer to implement image to pixel embedding. + num_fcs (int): The number of fully-connected layers for FFNs. Default 2 + init_cfg (dict, optional): Initialization config dict + """ + arch_zoo = { + **dict.fromkeys( + ['s', 'small'], { + 'embed_dims_outer': 384, + 'embed_dims_inner': 24, + 'num_layers': 12, + 'num_heads_outer': 6, + 'num_heads_inner': 4 + }), + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims_outer': 640, + 'embed_dims_inner': 40, + 'num_layers': 12, + 'num_heads_outer': 10, + 'num_heads_inner': 4 + }) + } + + def __init__(self, + arch='b', + img_size=224, + patch_size=16, + in_channels=3, + ffn_ratio=4, + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + first_stride=4, + num_fcs=2, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ]): + super(TNT, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims_outer', 'embed_dims_inner', 'num_layers', + 'num_heads_inner', 'num_heads_outer' + } + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims_inner = self.arch_settings['embed_dims_inner'] + self.embed_dims_outer = self.arch_settings['embed_dims_outer'] + # embed_dims for consistency with other models + self.embed_dims = self.embed_dims_outer + self.num_layers = self.arch_settings['num_layers'] + self.num_heads_inner = self.arch_settings['num_heads_inner'] + self.num_heads_outer = self.arch_settings['num_heads_outer'] + + self.pixel_embed = PixelEmbed( + img_size=img_size, + patch_size=patch_size, + in_channels=in_channels, + embed_dims_inner=self.embed_dims_inner, + stride=first_stride) + num_patches = self.pixel_embed.num_patches + self.num_patches = num_patches + new_patch_size = self.pixel_embed.new_patch_size + num_pixel = new_patch_size[0] * new_patch_size[1] + + self.norm1_proj = build_norm_layer(norm_cfg, num_pixel * + self.embed_dims_inner)[1] + self.projection = nn.Linear(num_pixel * self.embed_dims_inner, + self.embed_dims_outer) + self.norm2_proj = build_norm_layer(norm_cfg, self.embed_dims_outer)[1] + + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims_outer)) + self.patch_pos = nn.Parameter( + torch.zeros(1, num_patches + 1, self.embed_dims_outer)) + self.pixel_pos = nn.Parameter( + torch.zeros(1, self.embed_dims_inner, new_patch_size[0], + new_patch_size[1])) + self.drop_after_pos = nn.Dropout(p=drop_rate) + + dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, self.num_layers) + ] # stochastic depth decay rule + self.layers = ModuleList() + for i in range(self.num_layers): + block_cfg = dict( + ffn_ratio=ffn_ratio, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[i], + num_fcs=num_fcs, + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + batch_first=True) + self.layers.append( + TnTLayer( + num_pixel=num_pixel, + embed_dims_inner=self.embed_dims_inner, + embed_dims_outer=self.embed_dims_outer, + num_heads_inner=self.num_heads_inner, + num_heads_outer=self.num_heads_outer, + inner_block_cfg=block_cfg, + outer_block_cfg=block_cfg, + norm_cfg=norm_cfg)) + + self.norm = build_norm_layer(norm_cfg, self.embed_dims_outer)[1] + + trunc_normal_(self.cls_token, std=.02) + trunc_normal_(self.patch_pos, std=.02) + trunc_normal_(self.pixel_pos, std=.02) + + def forward(self, x): + B = x.shape[0] + pixel_embed = self.pixel_embed(x, self.pixel_pos) + + patch_embed = self.norm2_proj( + self.projection( + self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1)))) + patch_embed = torch.cat( + (self.cls_token.expand(B, -1, -1), patch_embed), dim=1) + patch_embed = patch_embed + self.patch_pos + patch_embed = self.drop_after_pos(patch_embed) + + for layer in self.layers: + pixel_embed, patch_embed = layer(pixel_embed, patch_embed) + + patch_embed = self.norm(patch_embed) + return (patch_embed[:, 0], ) diff --git a/mmcls/models/backbones/twins.py b/mmcls/models/backbones/twins.py new file mode 100644 index 0000000..0e3c47a --- /dev/null +++ b/mmcls/models/backbones/twins.py @@ -0,0 +1,723 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import Conv2d, build_norm_layer +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed +from mmcv.cnn.utils.weight_init import (constant_init, normal_init, + trunc_normal_init) +from mmcv.runner import BaseModule, ModuleList +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.builder import BACKBONES +from mmcls.models.utils.attention import MultiheadAttention +from mmcls.models.utils.position_encoding import ConditionalPositionEncoding + + +class GlobalSubsampledAttention(MultiheadAttention): + """Global Sub-sampled Attention (GSA) module. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + input_dims (int, optional): The input dimension, and if None, + use ``embed_dims``. Defaults to None. + attn_drop (float): Dropout rate of the dropout layer after the + attention calculation of query and key. Defaults to 0. + proj_drop (float): Dropout rate of the dropout layer after the + output projection. Defaults to 0. + dropout_layer (dict): The dropout config before adding the shortcut. + Defaults to ``dict(type='Dropout', drop_prob=0.)``. + qkv_bias (bool): If True, add a learnable bias to q, k, v. + Defaults to True. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + proj_bias (bool) If True, add a learnable bias to output projection. + Defaults to True. + v_shortcut (bool): Add a shortcut from value to output. It's usually + used if ``input_dims`` is different from ``embed_dims``. + Defaults to False. + sr_ratio (float): The ratio of spatial reduction in attention modules. + Defaults to 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + norm_cfg=dict(type='LN'), + qkv_bias=True, + sr_ratio=1, + **kwargs): + super(GlobalSubsampledAttention, + self).__init__(embed_dims, num_heads, **kwargs) + + self.qkv_bias = qkv_bias + self.q = nn.Linear(self.input_dims, embed_dims, bias=qkv_bias) + self.kv = nn.Linear(self.input_dims, embed_dims * 2, bias=qkv_bias) + + # remove self.qkv, here split into self.q, self.kv + delattr(self, 'qkv') + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + # use a conv as the spatial-reduction operation, the kernel_size + # and stride in conv are equal to the sr_ratio. + self.sr = Conv2d( + in_channels=embed_dims, + out_channels=embed_dims, + kernel_size=sr_ratio, + stride=sr_ratio) + # The ret[0] of build_norm_layer is norm name. + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + + def forward(self, x, hw_shape): + B, N, C = x.shape + H, W = hw_shape + assert H * W == N, 'The product of h and w of hw_shape must be N, ' \ + 'which is the 2nd dim number of the input Tensor x.' + + q = self.q(x).reshape(B, N, self.num_heads, + C // self.num_heads).permute(0, 2, 1, 3) + + if self.sr_ratio > 1: + x = x.permute(0, 2, 1).reshape(B, C, *hw_shape) # BNC_2_BCHW + x = self.sr(x) + x = x.reshape(B, C, -1).permute(0, 2, 1) # BCHW_2_BNC + x = self.norm(x) + + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, + self.head_dims).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[1] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.out_drop(self.proj_drop(x)) + + if self.v_shortcut: + x = v.squeeze(1) + x + return x + + +class GSAEncoderLayer(BaseModule): + """Implements one encoder layer with GlobalSubsampledAttention(GSA). + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default: 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default: 0.0. + drop_path_rate (float): Stochastic depth rate. Default 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + qkv_bias (bool): Enable bias for qkv if True. Default: True + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (float): The ratio of spatial reduction in attention modules. + Defaults to 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + sr_ratio=1., + init_cfg=None): + super(GSAEncoderLayer, self).__init__(init_cfg=init_cfg) + + self.norm1 = build_norm_layer(norm_cfg, embed_dims, postfix=1)[1] + self.attn = GlobalSubsampledAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + sr_ratio=sr_ratio) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims, postfix=2)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=False) + + self.drop_path = build_dropout( + dict(type='DropPath', drop_prob=drop_path_rate) + ) if drop_path_rate > 0. else nn.Identity() + + def forward(self, x, hw_shape): + x = x + self.drop_path(self.attn(self.norm1(x), hw_shape)) + x = x + self.drop_path(self.ffn(self.norm2(x))) + return x + + +class LocallyGroupedSelfAttention(BaseModule): + """Locally-grouped Self Attention (LSA) module. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. Default: 8 + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: False. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + window_size(int): Window size of LSA. Default: 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + window_size=1, + init_cfg=None): + super(LocallyGroupedSelfAttention, self).__init__(init_cfg=init_cfg) + + assert embed_dims % num_heads == 0, \ + f'dim {embed_dims} should be divided by num_heads {num_heads}' + + self.embed_dims = embed_dims + self.num_heads = num_heads + head_dim = embed_dims // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + self.window_size = window_size + + def forward(self, x, hw_shape): + B, N, C = x.shape + H, W = hw_shape + x = x.view(B, H, W, C) + + # pad feature maps to multiples of Local-groups + pad_l = pad_t = 0 + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + + # calculate attention mask for LSA + Hp, Wp = x.shape[1:-1] + _h, _w = Hp // self.window_size, Wp // self.window_size + mask = torch.zeros((1, Hp, Wp), device=x.device) + mask[:, -pad_b:, :].fill_(1) + mask[:, :, -pad_r:].fill_(1) + + # [B, _h, _w, window_size, window_size, C] + x = x.reshape(B, _h, self.window_size, _w, self.window_size, + C).transpose(2, 3) + mask = mask.reshape(1, _h, self.window_size, _w, + self.window_size).transpose(2, 3).reshape( + 1, _h * _w, + self.window_size * self.window_size) + # [1, _h*_w, window_size*window_size, window_size*window_size] + attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) + attn_mask = attn_mask.masked_fill(attn_mask != 0, + float(-1000.0)).masked_fill( + attn_mask == 0, float(0.0)) + + # [3, B, _w*_h, nhead, window_size*window_size, dim] + qkv = self.qkv(x).reshape(B, _h * _w, + self.window_size * self.window_size, 3, + self.num_heads, C // self.num_heads).permute( + 3, 0, 1, 4, 2, 5) + q, k, v = qkv[0], qkv[1], qkv[2] + # [B, _h*_w, n_head, window_size*window_size, window_size*window_size] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn + attn_mask.unsqueeze(2) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.window_size, + self.window_size, C) + x = attn.transpose(2, 3).reshape(B, _h * self.window_size, + _w * self.window_size, C) + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + + x = x.reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LSAEncoderLayer(BaseModule): + """Implements one encoder layer with LocallyGroupedSelfAttention(LSA). + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default: 0.0. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + drop_path_rate (float): Stochastic depth rate. Default 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + qkv_bias (bool): Enable bias for qkv if True. Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + window_size (int): Window size of LSA. Default: 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + qk_scale=None, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + window_size=1, + init_cfg=None): + + super(LSAEncoderLayer, self).__init__(init_cfg=init_cfg) + + self.norm1 = build_norm_layer(norm_cfg, embed_dims, postfix=1)[1] + self.attn = LocallyGroupedSelfAttention(embed_dims, num_heads, + qkv_bias, qk_scale, + attn_drop_rate, drop_rate, + window_size) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims, postfix=2)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=False) + + self.drop_path = build_dropout( + dict(type='DropPath', drop_prob=drop_path_rate) + ) if drop_path_rate > 0. else nn.Identity() + + def forward(self, x, hw_shape): + x = x + self.drop_path(self.attn(self.norm1(x), hw_shape)) + x = x + self.drop_path(self.ffn(self.norm2(x))) + return x + + +@BACKBONES.register_module() +class PCPVT(BaseModule): + """The backbone of Twins-PCPVT. + + This backbone is the implementation of `Twins: Revisiting the Design + of Spatial Attention in Vision Transformers + `_. + + Args: + arch (dict, str): PCPVT architecture, a str value in arch zoo or a + detailed configuration dict with 7 keys, and the length of all the + values in dict should be the same: + + - depths (List[int]): The number of encoder layers in each stage. + - embed_dims (List[int]): Embedding dimension in each stage. + - patch_sizes (List[int]): The patch sizes in each stage. + - num_heads (List[int]): Numbers of attention head in each stage. + - strides (List[int]): The strides in each stage. + - mlp_ratios (List[int]): The ratios of mlp in each stage. + - sr_ratios (List[int]): The ratios of GSA-encoder layers in each + stage. + + in_channels (int): Number of input channels. Default: 3. + out_indices (tuple[int]): Output from which stages. + Default: (3, ). + qkv_bias (bool): Enable bias for qkv if True. Default: False. + drop_rate (float): Probability of an element to be zeroed. + Default 0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0.0 + drop_path_rate (float): Stochastic depth rate. Default 0.0 + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + norm_after_stage(bool, List[bool]): Add extra norm after each stage. + Default False. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + + Examples: + >>> from mmcls.models import PCPVT + >>> import torch + >>> pcpvt_cfg = {'arch': "small", + >>> 'norm_after_stage': [False, False, False, True]} + >>> model = PCPVT(**pcpvt_cfg) + >>> x = torch.rand(1, 3, 224, 224) + >>> outputs = model(x) + >>> print(outputs[-1].shape) + torch.Size([1, 512, 7, 7]) + >>> pcpvt_cfg['norm_after_stage'] = [True, True, True, True] + >>> pcpvt_cfg['out_indices'] = (0, 1, 2, 3) + >>> model = PCPVT(**pcpvt_cfg) + >>> outputs = model(x) + >>> for feat in outputs: + >>> print(feat.shape) + torch.Size([1, 64, 56, 56]) + torch.Size([1, 128, 28, 28]) + torch.Size([1, 320, 14, 14]) + torch.Size([1, 512, 7, 7]) + """ + arch_zoo = { + **dict.fromkeys(['s', 'small'], + {'embed_dims': [64, 128, 320, 512], + 'depths': [3, 4, 6, 3], + 'num_heads': [1, 2, 5, 8], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [8, 8, 4, 4], + 'sr_ratios': [8, 4, 2, 1]}), + **dict.fromkeys(['b', 'base'], + {'embed_dims': [64, 128, 320, 512], + 'depths': [3, 4, 18, 3], + 'num_heads': [1, 2, 5, 8], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [8, 8, 4, 4], + 'sr_ratios': [8, 4, 2, 1]}), + **dict.fromkeys(['l', 'large'], + {'embed_dims': [64, 128, 320, 512], + 'depths': [3, 8, 27, 3], + 'num_heads': [1, 2, 5, 8], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [8, 8, 4, 4], + 'sr_ratios': [8, 4, 2, 1]}), + } # yapf: disable + + essential_keys = { + 'embed_dims', 'depths', 'num_heads', 'patch_sizes', 'strides', + 'mlp_ratios', 'sr_ratios' + } + + def __init__(self, + arch, + in_channels=3, + out_indices=(3, ), + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN'), + norm_after_stage=False, + init_cfg=None): + super(PCPVT, self).__init__(init_cfg=init_cfg) + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + assert isinstance(arch, dict) and ( + set(arch) == self.essential_keys + ), f'Custom arch needs a dict with keys {self.essential_keys}.' + self.arch_settings = arch + + self.depths = self.arch_settings['depths'] + self.embed_dims = self.arch_settings['embed_dims'] + self.patch_sizes = self.arch_settings['patch_sizes'] + self.strides = self.arch_settings['strides'] + self.mlp_ratios = self.arch_settings['mlp_ratios'] + self.num_heads = self.arch_settings['num_heads'] + self.sr_ratios = self.arch_settings['sr_ratios'] + + self.num_extra_tokens = 0 # there is no cls-token in Twins + self.num_stage = len(self.depths) + for key, value in self.arch_settings.items(): + assert isinstance(value, list) and len(value) == self.num_stage, ( + 'Length of setting item in arch dict must be type of list and' + ' have the same length.') + + # patch_embeds + self.patch_embeds = ModuleList() + self.position_encoding_drops = ModuleList() + self.stages = ModuleList() + + for i in range(self.num_stage): + # use in_channels of the model in the first stage + if i == 0: + stage_in_channels = in_channels + else: + stage_in_channels = self.embed_dims[i - 1] + + self.patch_embeds.append( + PatchEmbed( + in_channels=stage_in_channels, + embed_dims=self.embed_dims[i], + conv_type='Conv2d', + kernel_size=self.patch_sizes[i], + stride=self.strides[i], + padding='corner', + norm_cfg=dict(type='LN'))) + + self.position_encoding_drops.append(nn.Dropout(p=drop_rate)) + + # PEGs + self.position_encodings = ModuleList([ + ConditionalPositionEncoding(embed_dim, embed_dim) + for embed_dim in self.embed_dims + ]) + + # stochastic depth + total_depth = sum(self.depths) + self.dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] # stochastic depth decay rule + cur = 0 + + for k in range(len(self.depths)): + _block = ModuleList([ + GSAEncoderLayer( + embed_dims=self.embed_dims[k], + num_heads=self.num_heads[k], + feedforward_channels=self.mlp_ratios[k] * + self.embed_dims[k], + attn_drop_rate=attn_drop_rate, + drop_rate=drop_rate, + drop_path_rate=self.dpr[cur + i], + num_fcs=2, + qkv_bias=qkv_bias, + act_cfg=dict(type='GELU'), + norm_cfg=norm_cfg, + sr_ratio=self.sr_ratios[k]) for i in range(self.depths[k]) + ]) + self.stages.append(_block) + cur += self.depths[k] + + self.out_indices = out_indices + + assert isinstance(norm_after_stage, (bool, list)) + if isinstance(norm_after_stage, bool): + self.norm_after_stage = [norm_after_stage] * self.num_stage + else: + self.norm_after_stage = norm_after_stage + assert len(self.norm_after_stage) == self.num_stage, \ + (f'Number of norm_after_stage({len(self.norm_after_stage)}) should' + f' be equal to the number of stages({self.num_stage}).') + + for i, has_norm in enumerate(self.norm_after_stage): + assert isinstance(has_norm, bool), 'norm_after_stage should be ' \ + 'bool or List[bool].' + if has_norm and norm_cfg is not None: + norm_layer = build_norm_layer(norm_cfg, self.embed_dims[i])[1] + else: + norm_layer = nn.Identity() + + self.add_module(f'norm_after_stage{i}', norm_layer) + + def init_weights(self): + if self.init_cfg is not None: + super(PCPVT, self).init_weights() + else: + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=.02, bias=0.) + elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)): + constant_init(m, val=1.0, bias=0.) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[ + 1] * m.out_channels + fan_out //= m.groups + normal_init( + m, mean=0, std=math.sqrt(2.0 / fan_out), bias=0) + + def forward(self, x): + outputs = list() + + b = x.shape[0] + + for i in range(self.num_stage): + x, hw_shape = self.patch_embeds[i](x) + h, w = hw_shape + x = self.position_encoding_drops[i](x) + for j, blk in enumerate(self.stages[i]): + x = blk(x, hw_shape) + if j == 0: + x = self.position_encodings[i](x, hw_shape) + + norm_layer = getattr(self, f'norm_after_stage{i}') + x = norm_layer(x) + x = x.reshape(b, h, w, -1).permute(0, 3, 1, 2).contiguous() + + if i in self.out_indices: + outputs.append(x) + + return tuple(outputs) + + +@BACKBONES.register_module() +class SVT(PCPVT): + """The backbone of Twins-SVT. + + This backbone is the implementation of `Twins: Revisiting the Design + of Spatial Attention in Vision Transformers + `_. + + Args: + arch (dict, str): SVT architecture, a str value in arch zoo or a + detailed configuration dict with 8 keys, and the length of all the + values in dict should be the same: + + - depths (List[int]): The number of encoder layers in each stage. + - embed_dims (List[int]): Embedding dimension in each stage. + - patch_sizes (List[int]): The patch sizes in each stage. + - num_heads (List[int]): Numbers of attention head in each stage. + - strides (List[int]): The strides in each stage. + - mlp_ratios (List[int]): The ratios of mlp in each stage. + - sr_ratios (List[int]): The ratios of GSA-encoder layers in each + stage. + - windiow_sizes (List[int]): The window sizes in LSA-encoder layers + in each stage. + + in_channels (int): Number of input channels. Default: 3. + out_indices (tuple[int]): Output from which stages. + Default: (3, ). + qkv_bias (bool): Enable bias for qkv if True. Default: False. + drop_rate (float): Dropout rate. Default 0. + attn_drop_rate (float): Dropout ratio of attention weight. + Default 0.0 + drop_path_rate (float): Stochastic depth rate. Default 0.2. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + norm_after_stage(bool, List[bool]): Add extra norm after each stage. + Default False. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + + Examples: + >>> from mmcls.models import SVT + >>> import torch + >>> svt_cfg = {'arch': "small", + >>> 'norm_after_stage': [False, False, False, True]} + >>> model = SVT(**svt_cfg) + >>> x = torch.rand(1, 3, 224, 224) + >>> outputs = model(x) + >>> print(outputs[-1].shape) + torch.Size([1, 512, 7, 7]) + >>> svt_cfg["out_indices"] = (0, 1, 2, 3) + >>> svt_cfg["norm_after_stage"] = [True, True, True, True] + >>> model = SVT(**svt_cfg) + >>> output = model(x) + >>> for feat in output: + >>> print(feat.shape) + torch.Size([1, 64, 56, 56]) + torch.Size([1, 128, 28, 28]) + torch.Size([1, 320, 14, 14]) + torch.Size([1, 512, 7, 7]) + """ + arch_zoo = { + **dict.fromkeys(['s', 'small'], + {'embed_dims': [64, 128, 256, 512], + 'depths': [2, 2, 10, 4], + 'num_heads': [2, 4, 8, 16], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [4, 4, 4, 4], + 'sr_ratios': [8, 4, 2, 1], + 'window_sizes': [7, 7, 7, 7]}), + **dict.fromkeys(['b', 'base'], + {'embed_dims': [96, 192, 384, 768], + 'depths': [2, 2, 18, 2], + 'num_heads': [3, 6, 12, 24], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [4, 4, 4, 4], + 'sr_ratios': [8, 4, 2, 1], + 'window_sizes': [7, 7, 7, 7]}), + **dict.fromkeys(['l', 'large'], + {'embed_dims': [128, 256, 512, 1024], + 'depths': [2, 2, 18, 2], + 'num_heads': [4, 8, 16, 32], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [4, 4, 4, 4], + 'sr_ratios': [8, 4, 2, 1], + 'window_sizes': [7, 7, 7, 7]}), + } # yapf: disable + + essential_keys = { + 'embed_dims', 'depths', 'num_heads', 'patch_sizes', 'strides', + 'mlp_ratios', 'sr_ratios', 'window_sizes' + } + + def __init__(self, + arch, + in_channels=3, + out_indices=(3, ), + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.0, + norm_cfg=dict(type='LN'), + norm_after_stage=False, + init_cfg=None): + super(SVT, self).__init__(arch, in_channels, out_indices, qkv_bias, + drop_rate, attn_drop_rate, drop_path_rate, + norm_cfg, norm_after_stage, init_cfg) + + self.window_sizes = self.arch_settings['window_sizes'] + + for k in range(self.num_stage): + for i in range(self.depths[k]): + # in even-numbered layers of each stage, replace GSA with LSA + if i % 2 == 0: + ffn_channels = self.mlp_ratios[k] * self.embed_dims[k] + self.stages[k][i] = \ + LSAEncoderLayer( + embed_dims=self.embed_dims[k], + num_heads=self.num_heads[k], + feedforward_channels=ffn_channels, + drop_rate=drop_rate, + norm_cfg=norm_cfg, + attn_drop_rate=attn_drop_rate, + drop_path_rate=self.dpr[sum(self.depths[:k])+i], + qkv_bias=qkv_bias, + window_size=self.window_sizes[k]) diff --git a/mmcls/models/backbones/van.py b/mmcls/models/backbones/van.py new file mode 100644 index 0000000..925240e --- /dev/null +++ b/mmcls/models/backbones/van.py @@ -0,0 +1,445 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import Conv2d, build_activation_layer, build_norm_layer +from mmcv.cnn.bricks import DropPath +from mmcv.cnn.bricks.transformer import PatchEmbed +from mmcv.runner import BaseModule, ModuleList +from mmcv.utils.parrots_wrapper import _BatchNorm + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +class MixFFN(BaseModule): + """An implementation of MixFFN of VAN. Refer to + mmdetection/mmdet/models/backbones/pvt.py. + + The differences between MixFFN & FFN: + 1. Use 1X1 Conv to replace Linear layer. + 2. Introduce 3X3 Depth-wise Conv to encode positional information. + + Args: + embed_dims (int): The feature dimension. Same as + `MultiheadAttention`. + feedforward_channels (int): The hidden dimension of FFNs. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='GELU'). + ffn_drop (float, optional): Probability of an element to be + zeroed in FFN. Default 0.0. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + feedforward_channels, + act_cfg=dict(type='GELU'), + ffn_drop=0., + init_cfg=None): + super(MixFFN, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.act_cfg = act_cfg + + self.fc1 = Conv2d( + in_channels=embed_dims, + out_channels=feedforward_channels, + kernel_size=1) + self.dwconv = Conv2d( + in_channels=feedforward_channels, + out_channels=feedforward_channels, + kernel_size=3, + stride=1, + padding=1, + bias=True, + groups=feedforward_channels) + self.act = build_activation_layer(act_cfg) + self.fc2 = Conv2d( + in_channels=feedforward_channels, + out_channels=embed_dims, + kernel_size=1) + self.drop = nn.Dropout(ffn_drop) + + def forward(self, x): + x = self.fc1(x) + x = self.dwconv(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class LKA(BaseModule): + """Large Kernel Attention(LKA) of VAN. + + .. code:: text + DW_conv (depth-wise convolution) + | + | + DW_D_conv (depth-wise dilation convolution) + | + | + Transition Convolution (1×1 convolution) + + Args: + embed_dims (int): Number of input channels. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, embed_dims, init_cfg=None): + super(LKA, self).__init__(init_cfg=init_cfg) + + # a spatial local convolution (depth-wise convolution) + self.DW_conv = Conv2d( + in_channels=embed_dims, + out_channels=embed_dims, + kernel_size=5, + padding=2, + groups=embed_dims) + + # a spatial long-range convolution (depth-wise dilation convolution) + self.DW_D_conv = Conv2d( + in_channels=embed_dims, + out_channels=embed_dims, + kernel_size=7, + stride=1, + padding=9, + groups=embed_dims, + dilation=3) + + self.conv1 = Conv2d( + in_channels=embed_dims, out_channels=embed_dims, kernel_size=1) + + def forward(self, x): + u = x.clone() + attn = self.DW_conv(x) + attn = self.DW_D_conv(attn) + attn = self.conv1(attn) + + return u * attn + + +class SpatialAttention(BaseModule): + """Basic attention module in VANBloack. + + Args: + embed_dims (int): Number of input channels. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='GELU'). + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, embed_dims, act_cfg=dict(type='GELU'), init_cfg=None): + super(SpatialAttention, self).__init__(init_cfg=init_cfg) + + self.proj_1 = Conv2d( + in_channels=embed_dims, out_channels=embed_dims, kernel_size=1) + self.activation = build_activation_layer(act_cfg) + self.spatial_gating_unit = LKA(embed_dims) + self.proj_2 = Conv2d( + in_channels=embed_dims, out_channels=embed_dims, kernel_size=1) + + def forward(self, x): + shorcut = x.clone() + x = self.proj_1(x) + x = self.activation(x) + x = self.spatial_gating_unit(x) + x = self.proj_2(x) + x = x + shorcut + return x + + +class VANBlock(BaseModule): + """A block of VAN. + + Args: + embed_dims (int): Number of input channels. + ffn_ratio (float): The expansion ratio of feedforward network hidden + layer channels. Defaults to 4. + drop_rate (float): Dropout rate after embedding. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0.1. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='GELU'). + layer_scale_init_value (float): Init value for Layer Scale. + Defaults to 1e-2. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + ffn_ratio=4., + drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='BN', eps=1e-5), + layer_scale_init_value=1e-2, + init_cfg=None): + super(VANBlock, self).__init__(init_cfg=init_cfg) + self.out_channels = embed_dims + + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = SpatialAttention(embed_dims, act_cfg=act_cfg) + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + mlp_hidden_dim = int(embed_dims * ffn_ratio) + self.mlp = MixFFN( + embed_dims=embed_dims, + feedforward_channels=mlp_hidden_dim, + act_cfg=act_cfg, + ffn_drop=drop_rate) + self.layer_scale_1 = nn.Parameter( + layer_scale_init_value * torch.ones((embed_dims)), + requires_grad=True) if layer_scale_init_value > 0 else None + self.layer_scale_2 = nn.Parameter( + layer_scale_init_value * torch.ones((embed_dims)), + requires_grad=True) if layer_scale_init_value > 0 else None + + def forward(self, x): + identity = x + x = self.norm1(x) + x = self.attn(x) + if self.layer_scale_1 is not None: + x = self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * x + x = identity + self.drop_path(x) + + identity = x + x = self.norm2(x) + x = self.mlp(x) + if self.layer_scale_2 is not None: + x = self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * x + x = identity + self.drop_path(x) + + return x + + +class VANPatchEmbed(PatchEmbed): + """Image to Patch Embedding of VAN. + + The differences between VANPatchEmbed & PatchEmbed: + 1. Use BN. + 2. Do not use 'flatten' and 'transpose'. + """ + + def __init__(self, *args, norm_cfg=dict(type='BN'), **kwargs): + super(VANPatchEmbed, self).__init__(*args, norm_cfg=norm_cfg, **kwargs) + + def forward(self, x): + """ + Args: + x (Tensor): Has shape (B, C, H, W). In most case, C is 3. + Returns: + tuple: Contains merged results and its spatial shape. + - x (Tensor): Has shape (B, out_h * out_w, embed_dims) + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_h, out_w). + """ + + if self.adaptive_padding: + x = self.adaptive_padding(x) + + x = self.projection(x) + out_size = (x.shape[2], x.shape[3]) + if self.norm is not None: + x = self.norm(x) + return x, out_size + + +@BACKBONES.register_module() +class VAN(BaseBackbone): + """Visual Attention Network. + + A PyTorch implement of : `Visual Attention Network + `_ + + Inspiration from + https://github.com/Visual-Attention-Network/VAN-Classification + + Args: + arch (str | dict): Visual Attention Network architecture. + If use string, choose from 'b0', 'b1', b2', b3' and etc., + if use dict, it should have below keys: + + - **embed_dims** (List[int]): The dimensions of embedding. + - **depths** (List[int]): The number of blocks in each stage. + - **ffn_ratios** (List[int]): The number of expansion ratio of + feedforward network hidden layer channels. + + Defaults to 'tiny'. + patch_sizes (List[int | tuple]): The patch size in patch embeddings. + Defaults to [7, 3, 3, 3]. + in_channels (int): The num of input channels. Defaults to 3. + drop_rate (float): Dropout rate after embedding. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0.1. + out_indices (Sequence[int]): Output from which stages. + Default: ``(3, )``. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + norm_cfg (dict): Config dict for normalization layer for all output + features. Defaults to ``dict(type='LN')`` + block_cfgs (Sequence[dict] | dict): The extra config of each block. + Defaults to empty dicts. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + + Examples: + >>> from mmcls.models import VAN + >>> import torch + >>> model = VAN(arch='b0') + >>> inputs = torch.rand(1, 3, 224, 224) + >>> outputs = model(inputs) + >>> for out in outputs: + >>> print(out.size()) + (1, 256, 7, 7) + """ + arch_zoo = { + **dict.fromkeys(['b0', 't', 'tiny'], + {'embed_dims': [32, 64, 160, 256], + 'depths': [3, 3, 5, 2], + 'ffn_ratios': [8, 8, 4, 4]}), + **dict.fromkeys(['b1', 's', 'small'], + {'embed_dims': [64, 128, 320, 512], + 'depths': [2, 2, 4, 2], + 'ffn_ratios': [8, 8, 4, 4]}), + **dict.fromkeys(['b2', 'b', 'base'], + {'embed_dims': [64, 128, 320, 512], + 'depths': [3, 3, 12, 3], + 'ffn_ratios': [8, 8, 4, 4]}), + **dict.fromkeys(['b3', 'l', 'large'], + {'embed_dims': [64, 128, 320, 512], + 'depths': [3, 5, 27, 3], + 'ffn_ratios': [8, 8, 4, 4]}), + **dict.fromkeys(['b4'], + {'embed_dims': [64, 128, 320, 512], + 'depths': [3, 6, 40, 3], + 'ffn_ratios': [8, 8, 4, 4]}), + **dict.fromkeys(['b5'], + {'embed_dims': [96, 192, 480, 768], + 'depths': [3, 3, 24, 3], + 'ffn_ratios': [8, 8, 4, 4]}), + **dict.fromkeys(['b6'], + {'embed_dims': [96, 192, 384, 768], + 'depths': [6, 6, 90, 6], + 'ffn_ratios': [8, 8, 4, 4]}), + } # yapf: disable + + def __init__(self, + arch='tiny', + patch_sizes=[7, 3, 3, 3], + in_channels=3, + drop_rate=0., + drop_path_rate=0., + out_indices=(3, ), + frozen_stages=-1, + norm_eval=False, + norm_cfg=dict(type='LN'), + block_cfgs=dict(), + init_cfg=None): + super(VAN, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = {'embed_dims', 'depths', 'ffn_ratios'} + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.depths = self.arch_settings['depths'] + self.ffn_ratios = self.arch_settings['ffn_ratios'] + self.num_stages = len(self.depths) + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + + total_depth = sum(self.depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] # stochastic depth decay rule + + cur_block_idx = 0 + for i, depth in enumerate(self.depths): + patch_embed = VANPatchEmbed( + in_channels=in_channels if i == 0 else self.embed_dims[i - 1], + input_size=None, + embed_dims=self.embed_dims[i], + kernel_size=patch_sizes[i], + stride=patch_sizes[i] // 2 + 1, + padding=(patch_sizes[i] // 2, patch_sizes[i] // 2), + norm_cfg=dict(type='BN')) + + blocks = ModuleList([ + VANBlock( + embed_dims=self.embed_dims[i], + ffn_ratio=self.ffn_ratios[i], + drop_rate=drop_rate, + drop_path_rate=dpr[cur_block_idx + j], + **block_cfgs) for j in range(depth) + ]) + cur_block_idx += depth + norm = build_norm_layer(norm_cfg, self.embed_dims[i])[1] + + self.add_module(f'patch_embed{i + 1}', patch_embed) + self.add_module(f'blocks{i + 1}', blocks) + self.add_module(f'norm{i + 1}', norm) + + def train(self, mode=True): + super(VAN, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def _freeze_stages(self): + for i in range(0, self.frozen_stages + 1): + # freeze patch embed + m = getattr(self, f'patch_embed{i + 1}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + # freeze blocks + m = getattr(self, f'blocks{i + 1}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + # freeze norm + m = getattr(self, f'norm{i + 1}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + outs = [] + for i in range(self.num_stages): + patch_embed = getattr(self, f'patch_embed{i + 1}') + blocks = getattr(self, f'blocks{i + 1}') + norm = getattr(self, f'norm{i + 1}') + x, hw_shape = patch_embed(x) + for block in blocks: + x = block(x) + x = x.flatten(2).transpose(1, 2) + x = norm(x) + x = x.reshape(-1, *hw_shape, + block.out_channels).permute(0, 3, 1, 2).contiguous() + if i in self.out_indices: + outs.append(x) + + return tuple(outs) diff --git a/mmcls/models/backbones/vgg.py b/mmcls/models/backbones/vgg.py new file mode 100644 index 0000000..b21151c --- /dev/null +++ b/mmcls/models/backbones/vgg.py @@ -0,0 +1,183 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.utils.parrots_wrapper import _BatchNorm + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +def make_vgg_layer(in_channels, + out_channels, + num_blocks, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + dilation=1, + with_norm=False, + ceil_mode=False): + layers = [] + for _ in range(num_blocks): + layer = ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + dilation=dilation, + padding=dilation, + bias=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + layers.append(layer) + in_channels = out_channels + layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode)) + + return layers + + +@BACKBONES.register_module() +class VGG(BaseBackbone): + """VGG backbone. + + Args: + depth (int): Depth of vgg, from {11, 13, 16, 19}. + with_norm (bool): Use BatchNorm or not. + num_classes (int): number of classes for classification. + num_stages (int): VGG stages, normally 5. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int], optional): Output from which stages. + When it is None, the default behavior depends on whether + num_classes is specified. If num_classes <= 0, the default value is + (4, ), output the last feature map before classifier. If + num_classes > 0, the default value is (5, ), output the + classification score. Default: None. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + ceil_mode (bool): Whether to use ceil_mode of MaxPool. Default: False. + with_last_pool (bool): Whether to keep the last pooling before + classifier. Default: True. + """ + + # Parameters to build layers. Each element specifies the number of conv in + # each stage. For example, VGG11 contains 11 layers with learnable + # parameters. 11 is computed as 11 = (1 + 1 + 2 + 2 + 2) + 3, + # where 3 indicates the last three fully-connected layers. + arch_settings = { + 11: (1, 1, 2, 2, 2), + 13: (2, 2, 2, 2, 2), + 16: (2, 2, 3, 3, 3), + 19: (2, 2, 4, 4, 4) + } + + def __init__(self, + depth, + num_classes=-1, + num_stages=5, + dilations=(1, 1, 1, 1, 1), + out_indices=None, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + norm_eval=False, + ceil_mode=False, + with_last_pool=True, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict(type='Constant', val=1., layer=['_BatchNorm']), + dict(type='Normal', std=0.01, layer=['Linear']) + ]): + super(VGG, self).__init__(init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for vgg') + assert num_stages >= 1 and num_stages <= 5 + stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + assert len(dilations) == num_stages + + self.num_classes = num_classes + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + with_norm = norm_cfg is not None + + if out_indices is None: + out_indices = (5, ) if num_classes > 0 else (4, ) + assert max(out_indices) <= num_stages + self.out_indices = out_indices + + self.in_channels = 3 + start_idx = 0 + vgg_layers = [] + self.range_sub_modules = [] + for i, num_blocks in enumerate(self.stage_blocks): + num_modules = num_blocks + 1 + end_idx = start_idx + num_modules + dilation = dilations[i] + out_channels = 64 * 2**i if i < 4 else 512 + vgg_layer = make_vgg_layer( + self.in_channels, + out_channels, + num_blocks, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dilation=dilation, + with_norm=with_norm, + ceil_mode=ceil_mode) + vgg_layers.extend(vgg_layer) + self.in_channels = out_channels + self.range_sub_modules.append([start_idx, end_idx]) + start_idx = end_idx + if not with_last_pool: + vgg_layers.pop(-1) + self.range_sub_modules[-1][1] -= 1 + self.module_name = 'features' + self.add_module(self.module_name, nn.Sequential(*vgg_layers)) + + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + + def forward(self, x): + outs = [] + vgg_layers = getattr(self, self.module_name) + for i in range(len(self.stage_blocks)): + for j in range(*self.range_sub_modules[i]): + vgg_layer = vgg_layers[j] + x = vgg_layer(x) + if i in self.out_indices: + outs.append(x) + if self.num_classes > 0: + x = x.view(x.size(0), -1) + x = self.classifier(x) + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + vgg_layers = getattr(self, self.module_name) + for i in range(self.frozen_stages): + for j in range(*self.range_sub_modules[i]): + m = vgg_layers[j] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(VGG, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmcls/models/backbones/vision_transformer.py b/mmcls/models/backbones/vision_transformer.py new file mode 100644 index 0000000..87a7064 --- /dev/null +++ b/mmcls/models/backbones/vision_transformer.py @@ -0,0 +1,383 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner.base_module import BaseModule, ModuleList + +from mmcls.utils import get_root_logger +from ..builder import BACKBONES +from ..utils import MultiheadAttention, resize_pos_embed, to_2tuple +from .base_backbone import BaseBackbone + + +class TransformerEncoderLayer(BaseModule): + """Implements one encoder layer in Vision Transformer. + + Args: + embed_dims (int): The feature dimension + num_heads (int): Parallel attention heads + feedforward_channels (int): The hidden dimension for FFNs + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + attn_drop_rate (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + num_fcs (int): The number of fully-connected layers for FFNs. + Defaults to 2. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + act_cfg (dict): The activation config for FFNs. + Defaluts to ``dict(type='GELU')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(TransformerEncoderLayer, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + self.attn = MultiheadAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias) + + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, self.embed_dims, postfix=2) + self.add_module(self.norm2_name, norm2) + + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def init_weights(self): + super(TransformerEncoderLayer, self).init_weights() + for m in self.ffn.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + nn.init.normal_(m.bias, std=1e-6) + + def forward(self, x): + x = x + self.attn(self.norm1(x)) + x = self.ffn(self.norm2(x), identity=x) + return x + + +@BACKBONES.register_module() +class VisionTransformer(BaseBackbone): + """Vision Transformer. + + A PyTorch implement of : `An Image is Worth 16x16 Words: Transformers + for Image Recognition at Scale `_ + + Args: + arch (str | dict): Vision Transformer architecture. If use string, + choose from 'small', 'base', 'large', 'deit-tiny', 'deit-small' + and 'deit-base'. If use dict, it should have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of transformer encoder layers. + - **num_heads** (int): The number of heads in attention modules. + - **feedforward_channels** (int): The hidden dimensions in + feedforward modules. + + Defaults to 'base'. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 16. + in_channels (int): The num of input channels. Defaults to 3. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + with_cls_token (bool): Whether concatenating class token into image + tokens as transformer input. Defaults to True. + output_cls_token (bool): Whether output the cls_token. If set True, + ``with_cls_token`` must be True. Defaults to True. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys( + ['s', 'small'], { + 'embed_dims': 768, + 'num_layers': 8, + 'num_heads': 8, + 'feedforward_channels': 768 * 3, + }), + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 3072 + }), + **dict.fromkeys( + ['l', 'large'], { + 'embed_dims': 1024, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + }), + **dict.fromkeys( + ['deit-t', 'deit-tiny'], { + 'embed_dims': 192, + 'num_layers': 12, + 'num_heads': 3, + 'feedforward_channels': 192 * 4 + }), + **dict.fromkeys( + ['deit-s', 'deit-small'], { + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 6, + 'feedforward_channels': 384 * 4 + }), + **dict.fromkeys( + ['deit-b', 'deit-base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 768 * 4 + }), + } + # Some structures have multiple extra tokens, like DeiT. + num_extra_tokens = 1 # cls_token + + def __init__(self, + arch='base', + img_size=224, + patch_size=16, + in_channels=3, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + qkv_bias=True, + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + with_cls_token=True, + output_cls_token=True, + interpolate_mode='bicubic', + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None): + super(VisionTransformer, self).__init__(init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'num_heads', 'feedforward_channels' + } + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.num_layers = self.arch_settings['num_layers'] + self.img_size = to_2tuple(img_size) + + # Set patch embedding + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + # Set cls token + if output_cls_token: + assert with_cls_token is True, f'with_cls_token must be True if' \ + f'set output_cls_token to True, but got {with_cls_token}' + self.with_cls_token = with_cls_token + self.output_cls_token = output_cls_token + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + + # Set position embedding + self.interpolate_mode = interpolate_mode + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches + self.num_extra_tokens, + self.embed_dims)) + self._register_load_state_dict_pre_hook(self._prepare_pos_embed) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_layers + index + assert 0 <= out_indices[i] <= self.num_layers, \ + f'Invalid out_indices {index}' + self.out_indices = out_indices + + # stochastic depth decay rule + dpr = np.linspace(0, drop_path_rate, self.num_layers) + + self.layers = ModuleList() + if isinstance(layer_cfgs, dict): + layer_cfgs = [layer_cfgs] * self.num_layers + for i in range(self.num_layers): + _layer_cfg = dict( + embed_dims=self.embed_dims, + num_heads=self.arch_settings['num_heads'], + feedforward_channels=self. + arch_settings['feedforward_channels'], + drop_rate=drop_rate, + drop_path_rate=dpr[i], + qkv_bias=qkv_bias, + norm_cfg=norm_cfg) + _layer_cfg.update(layer_cfgs[i]) + self.layers.append(TransformerEncoderLayer(**_layer_cfg)) + + self.final_norm = final_norm + if final_norm: + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def init_weights(self): + super(VisionTransformer, self).init_weights() + + if not (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + trunc_normal_(self.pos_embed, std=0.02) + + def _prepare_pos_embed(self, state_dict, prefix, *args, **kwargs): + name = prefix + 'pos_embed' + if name not in state_dict.keys(): + return + + ckpt_pos_embed_shape = state_dict[name].shape + if self.pos_embed.shape != ckpt_pos_embed_shape: + from mmcv.utils import print_log + logger = get_root_logger() + print_log( + f'Resize the pos_embed shape from {ckpt_pos_embed_shape} ' + f'to {self.pos_embed.shape}.', + logger=logger) + + ckpt_pos_embed_shape = to_2tuple( + int(np.sqrt(ckpt_pos_embed_shape[1] - self.num_extra_tokens))) + pos_embed_shape = self.patch_embed.init_out_size + + state_dict[name] = resize_pos_embed(state_dict[name], + ckpt_pos_embed_shape, + pos_embed_shape, + self.interpolate_mode, + self.num_extra_tokens) + + @staticmethod + def resize_pos_embed(*args, **kwargs): + """Interface for backward-compatibility.""" + return resize_pos_embed(*args, **kwargs) + + def forward(self, x): + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + x = x + resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + x = self.drop_after_pos(x) + + if not self.with_cls_token: + # Remove class token for transformer encoder input + x = x[:, 1:] + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + + if i == len(self.layers) - 1 and self.final_norm: + x = self.norm1(x) + + if i in self.out_indices: + B, _, C = x.shape + if self.with_cls_token: + patch_token = x[:, 1:].reshape(B, *patch_resolution, C) + patch_token = patch_token.permute(0, 3, 1, 2) + cls_token = x[:, 0] + else: + patch_token = x.reshape(B, *patch_resolution, C) + patch_token = patch_token.permute(0, 3, 1, 2) + cls_token = None + if self.output_cls_token: + out = [patch_token, cls_token] + else: + out = patch_token + outs.append(out) + + return tuple(outs) diff --git a/mmcls/models/builder.py b/mmcls/models/builder.py new file mode 100644 index 0000000..9b43913 --- /dev/null +++ b/mmcls/models/builder.py @@ -0,0 +1,38 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import MODELS as MMCV_MODELS +from mmcv.cnn.bricks.registry import ATTENTION as MMCV_ATTENTION +from mmcv.utils import Registry + +MODELS = Registry('models', parent=MMCV_MODELS) + +BACKBONES = MODELS +NECKS = MODELS +HEADS = MODELS +LOSSES = MODELS +CLASSIFIERS = MODELS + +ATTENTION = Registry('attention', parent=MMCV_ATTENTION) + + +def build_backbone(cfg): + """Build backbone.""" + return BACKBONES.build(cfg) + + +def build_neck(cfg): + """Build neck.""" + return NECKS.build(cfg) + + +def build_head(cfg): + """Build head.""" + return HEADS.build(cfg) + + +def build_loss(cfg): + """Build loss.""" + return LOSSES.build(cfg) + + +def build_classifier(cfg): + return CLASSIFIERS.build(cfg) diff --git a/mmcls/models/classifiers/__init__.py b/mmcls/models/classifiers/__init__.py new file mode 100644 index 0000000..5fdfb91 --- /dev/null +++ b/mmcls/models/classifiers/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base import BaseClassifier +from .image import ImageClassifier + +__all__ = ['BaseClassifier', 'ImageClassifier'] diff --git a/mmcls/models/classifiers/base.py b/mmcls/models/classifiers/base.py new file mode 100644 index 0000000..acb5ef3 --- /dev/null +++ b/mmcls/models/classifiers/base.py @@ -0,0 +1,224 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from collections import OrderedDict +from typing import Sequence + +import mmcv +import torch +import torch.distributed as dist +from mmcv.runner import BaseModule, auto_fp16 + +from mmcls.core.visualization import imshow_infos + + +class BaseClassifier(BaseModule, metaclass=ABCMeta): + """Base class for classifiers.""" + + def __init__(self, init_cfg=None): + super(BaseClassifier, self).__init__(init_cfg) + self.fp16_enabled = False + + @property + def with_neck(self): + return hasattr(self, 'neck') and self.neck is not None + + @property + def with_head(self): + return hasattr(self, 'head') and self.head is not None + + @abstractmethod + def extract_feat(self, imgs, stage=None): + pass + + def extract_feats(self, imgs, stage=None): + assert isinstance(imgs, Sequence) + kwargs = {} if stage is None else {'stage': stage} + for img in imgs: + yield self.extract_feat(img, **kwargs) + + @abstractmethod + def forward_train(self, imgs, **kwargs): + """ + Args: + img (list[Tensor]): List of tensors of shape (1, C, H, W). + Typically these should be mean centered and std scaled. + kwargs (keyword arguments): Specific to concrete implementation. + """ + pass + + @abstractmethod + def simple_test(self, img, **kwargs): + pass + + def forward_test(self, imgs, **kwargs): + """ + Args: + imgs (List[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains all images in the batch. + """ + if isinstance(imgs, torch.Tensor): + imgs = [imgs] + for var, name in [(imgs, 'imgs')]: + if not isinstance(var, list): + raise TypeError(f'{name} must be a list, but got {type(var)}') + + if len(imgs) == 1: + return self.simple_test(imgs[0], **kwargs) + else: + raise NotImplementedError('aug_test has not been implemented') + + @auto_fp16(apply_to=('img', )) + def forward(self, img, return_loss=True, **kwargs): + """Calls either forward_train or forward_test depending on whether + return_loss=True. + + Note this setting will change the expected inputs. When + `return_loss=True`, img and img_meta are single-nested (i.e. Tensor and + List[dict]), and when `resturn_loss=False`, img and img_meta should be + double nested (i.e. List[Tensor], List[List[dict]]), with the outer + list indicating test time augmentations. + """ + if return_loss: + return self.forward_train(img, **kwargs) + else: + return self.forward_test(img, **kwargs) + + def _parse_losses(self, losses): + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + elif isinstance(loss_value, dict): + for name, value in loss_value.items(): + log_vars[name] = value + else: + raise TypeError( + f'{loss_name} is not a tensor or list of tensors') + + loss = sum(_value for _key, _value in log_vars.items() + if 'loss' in _key) + + log_vars['loss'] = loss + for loss_name, loss_value in log_vars.items(): + # reduce loss when distributed training + if dist.is_available() and dist.is_initialized(): + loss_value = loss_value.data.clone() + dist.all_reduce(loss_value.div_(dist.get_world_size())) + log_vars[loss_name] = loss_value.item() + + return loss, log_vars + + def train_step(self, data, optimizer=None, **kwargs): + """The iteration step during training. + + This method defines an iteration step during training, except for the + back propagation and optimizer updating, which are done in an optimizer + hook. Note that in some complicated cases or models, the whole process + including back propagation and optimizer updating are also defined in + this method, such as GAN. + + Args: + data (dict): The output of dataloader. + optimizer (:obj:`torch.optim.Optimizer` | dict, optional): The + optimizer of runner is passed to ``train_step()``. This + argument is unused and reserved. + + Returns: + dict: Dict of outputs. The following fields are contained. + - loss (torch.Tensor): A tensor for back propagation, which \ + can be a weighted sum of multiple losses. + - log_vars (dict): Dict contains all the variables to be sent \ + to the logger. + - num_samples (int): Indicates the batch size (when the model \ + is DDP, it means the batch size on each GPU), which is \ + used for averaging the logs. + """ + losses = self(**data) + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, log_vars=log_vars, num_samples=len(data['img'].data)) + + return outputs + + def val_step(self, data, optimizer=None, **kwargs): + """The iteration step during validation. + + This method shares the same signature as :func:`train_step`, but used + during val epochs. Note that the evaluation after training epochs is + not implemented with this method, but an evaluation hook. + + Args: + data (dict): The output of dataloader. + optimizer (:obj:`torch.optim.Optimizer` | dict, optional): The + optimizer of runner is passed to ``train_step()``. This + argument is unused and reserved. + + Returns: + dict: Dict of outputs. The following fields are contained. + - loss (torch.Tensor): A tensor for back propagation, which \ + can be a weighted sum of multiple losses. + - log_vars (dict): Dict contains all the variables to be sent \ + to the logger. + - num_samples (int): Indicates the batch size (when the model \ + is DDP, it means the batch size on each GPU), which is \ + used for averaging the logs. + """ + losses = self(**data) + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, log_vars=log_vars, num_samples=len(data['img'].data)) + + return outputs + + def show_result(self, + img, + result, + text_color='white', + font_scale=0.5, + row_width=20, + show=False, + fig_size=(15, 10), + win_name='', + wait_time=0, + out_file=None): + """Draw `result` over `img`. + + Args: + img (str or ndarray): The image to be displayed. + result (dict): The classification results to draw over `img`. + text_color (str or tuple or :obj:`Color`): Color of texts. + font_scale (float): Font scales of texts. + row_width (int): width between each row of results on the image. + show (bool): Whether to show the image. + Default: False. + fig_size (tuple): Image show figure size. Defaults to (15, 10). + win_name (str): The window name. + wait_time (int): How many seconds to display the image. + Defaults to 0. + out_file (str or None): The filename to write the image. + Default: None. + + Returns: + img (ndarray): Image with overlaid results. + """ + img = mmcv.imread(img) + img = img.copy() + + img = imshow_infos( + img, + result, + text_color=text_color, + font_size=int(font_scale * 50), + row_width=row_width, + win_name=win_name, + show=show, + fig_size=fig_size, + wait_time=wait_time, + out_file=out_file) + + return img diff --git a/mmcls/models/classifiers/image.py b/mmcls/models/classifiers/image.py new file mode 100644 index 0000000..95ffa46 --- /dev/null +++ b/mmcls/models/classifiers/image.py @@ -0,0 +1,160 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..builder import CLASSIFIERS, build_backbone, build_head, build_neck +from ..heads import MultiLabelClsHead +from ..utils.augment import Augments +from .base import BaseClassifier + + +@CLASSIFIERS.register_module() +class ImageClassifier(BaseClassifier): + + def __init__(self, + backbone, + neck=None, + head=None, + pretrained=None, + train_cfg=None, + init_cfg=None): + super(ImageClassifier, self).__init__(init_cfg) + + if pretrained is not None: + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + self.backbone = build_backbone(backbone) + + if neck is not None: + self.neck = build_neck(neck) + + if head is not None: + self.head = build_head(head) + + self.augments = None + if train_cfg is not None: + augments_cfg = train_cfg.get('augments', None) + if augments_cfg is not None: + self.augments = Augments(augments_cfg) + + def forward_dummy(self, img): + """Used for computing network flops. + + See `mmclassificaiton/tools/analysis_tools/get_flops.py` + """ + return self.extract_feat(img, stage='pre_logits') + + def extract_feat(self, img, stage='neck'): + """Directly extract features from the specified stage. + + Args: + img (Tensor): The input images. The shape of it should be + ``(num_samples, num_channels, *img_shape)``. + stage (str): Which stage to output the feature. Choose from + "backbone", "neck" and "pre_logits". Defaults to "neck". + + Returns: + tuple | Tensor: The output of specified stage. + The output depends on detailed implementation. In general, the + output of backbone and neck is a tuple and the output of + pre_logits is a tensor. + + Examples: + 1. Backbone output + + >>> import torch + >>> from mmcv import Config + >>> from mmcls.models import build_classifier + >>> + >>> cfg = Config.fromfile('configs/resnet/resnet18_8xb32_in1k.py').model + >>> cfg.backbone.out_indices = (0, 1, 2, 3) # Output multi-scale feature maps + >>> model = build_classifier(cfg) + >>> outs = model.extract_feat(torch.rand(1, 3, 224, 224), stage='backbone') + >>> for out in outs: + ... print(out.shape) + torch.Size([1, 64, 56, 56]) + torch.Size([1, 128, 28, 28]) + torch.Size([1, 256, 14, 14]) + torch.Size([1, 512, 7, 7]) + + 2. Neck output + + >>> import torch + >>> from mmcv import Config + >>> from mmcls.models import build_classifier + >>> + >>> cfg = Config.fromfile('configs/resnet/resnet18_8xb32_in1k.py').model + >>> cfg.backbone.out_indices = (0, 1, 2, 3) # Output multi-scale feature maps + >>> model = build_classifier(cfg) + >>> + >>> outs = model.extract_feat(torch.rand(1, 3, 224, 224), stage='neck') + >>> for out in outs: + ... print(out.shape) + torch.Size([1, 64]) + torch.Size([1, 128]) + torch.Size([1, 256]) + torch.Size([1, 512]) + + 3. Pre-logits output (without the final linear classifier head) + + >>> import torch + >>> from mmcv import Config + >>> from mmcls.models import build_classifier + >>> + >>> cfg = Config.fromfile('configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py').model + >>> model = build_classifier(cfg) + >>> + >>> out = model.extract_feat(torch.rand(1, 3, 224, 224), stage='pre_logits') + >>> print(out.shape) # The hidden dims in head is 3072 + torch.Size([1, 3072]) + """ # noqa: E501 + assert stage in ['backbone', 'neck', 'pre_logits'], \ + (f'Invalid output stage "{stage}", please choose from "backbone", ' + '"neck" and "pre_logits"') + + x = self.backbone(img) + + if stage == 'backbone': + return x + + if self.with_neck: + x = self.neck(x) + if stage == 'neck': + return x + + if self.with_head and hasattr(self.head, 'pre_logits'): + x = self.head.pre_logits(x) + return x + + def forward_train(self, img, gt_label, **kwargs): + """Forward computation during training. + + Args: + img (Tensor): of shape (N, C, H, W) encoding input images. + Typically these should be mean centered and std scaled. + gt_label (Tensor): It should be of shape (N, 1) encoding the + ground-truth label of input images for single label task. It + should be of shape (N, C) encoding the ground-truth label + of input images for multi-labels task. + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + if self.augments is not None: + img, gt_label = self.augments(img, gt_label) + + x = self.extract_feat(img) + + losses = dict() + loss = self.head.forward_train(x, gt_label) + + losses.update(loss) + + return losses + + def simple_test(self, img, img_metas=None, **kwargs): + """Test without augmentation.""" + x = self.extract_feat(img) + + if isinstance(self.head, MultiLabelClsHead): + assert 'softmax' not in kwargs, ( + 'Please use `sigmoid` instead of `softmax` ' + 'in multi-label tasks.') + res = self.head.simple_test(x, **kwargs) + + return res diff --git a/mmcls/models/heads/__init__.py b/mmcls/models/heads/__init__.py new file mode 100644 index 0000000..d730161 --- /dev/null +++ b/mmcls/models/heads/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .cls_head import ClsHead +from .conformer_head import ConformerHead +from .deit_head import DeiTClsHead +from .efficientformer_head import EfficientFormerClsHead +from .linear_head import LinearClsHead +from .multi_label_csra_head import CSRAClsHead +from .multi_label_head import MultiLabelClsHead +from .multi_label_linear_head import MultiLabelLinearClsHead +from .stacked_head import StackedLinearClsHead +from .vision_transformer_head import VisionTransformerClsHead + +__all__ = [ + 'ClsHead', 'LinearClsHead', 'StackedLinearClsHead', 'MultiLabelClsHead', + 'MultiLabelLinearClsHead', 'VisionTransformerClsHead', 'DeiTClsHead', + 'ConformerHead', 'EfficientFormerClsHead', 'CSRAClsHead' +] diff --git a/mmcls/models/heads/base_head.py b/mmcls/models/heads/base_head.py new file mode 100644 index 0000000..e8936f2 --- /dev/null +++ b/mmcls/models/heads/base_head.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +from mmcv.runner import BaseModule + + +class BaseHead(BaseModule, metaclass=ABCMeta): + """Base head.""" + + def __init__(self, init_cfg=None): + super(BaseHead, self).__init__(init_cfg) + + @abstractmethod + def forward_train(self, x, gt_label, **kwargs): + pass diff --git a/mmcls/models/heads/cls_head.py b/mmcls/models/heads/cls_head.py new file mode 100644 index 0000000..2e430c5 --- /dev/null +++ b/mmcls/models/heads/cls_head.py @@ -0,0 +1,116 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn.functional as F + +from mmcls.models.losses import Accuracy +from ..builder import HEADS, build_loss +from ..utils import is_tracing +from .base_head import BaseHead + + +@HEADS.register_module() +class ClsHead(BaseHead): + """classification head. + + Args: + loss (dict): Config of classification loss. + topk (int | tuple): Top-k accuracy. + cal_acc (bool): Whether to calculate accuracy during training. + If you use Mixup/CutMix or something like that during training, + it is not reasonable to calculate accuracy. Defaults to False. + """ + + def __init__(self, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, ), + cal_acc=False, + init_cfg=None): + super(ClsHead, self).__init__(init_cfg=init_cfg) + + assert isinstance(loss, dict) + assert isinstance(topk, (int, tuple)) + if isinstance(topk, int): + topk = (topk, ) + for _topk in topk: + assert _topk > 0, 'Top-k should be larger than 0' + self.topk = topk + + self.compute_loss = build_loss(loss) + self.compute_accuracy = Accuracy(topk=self.topk) + self.cal_acc = cal_acc + + def loss(self, cls_score, gt_label, **kwargs): + num_samples = len(cls_score) + losses = dict() + # compute loss + loss = self.compute_loss( + cls_score, gt_label, avg_factor=num_samples, **kwargs) + if self.cal_acc: + # compute accuracy + acc = self.compute_accuracy(cls_score, gt_label) + assert len(acc) == len(self.topk) + losses['accuracy'] = { + f'top-{k}': a + for k, a in zip(self.topk, acc) + } + losses['loss'] = loss + return losses + + def forward_train(self, cls_score, gt_label, **kwargs): + if isinstance(cls_score, tuple): + cls_score = cls_score[-1] + losses = self.loss(cls_score, gt_label, **kwargs) + return losses + + def pre_logits(self, x): + if isinstance(x, tuple): + x = x[-1] + + warnings.warn( + 'The input of ClsHead should be already logits. ' + 'Please modify the backbone if you want to get pre-logits feature.' + ) + return x + + def simple_test(self, cls_score, softmax=True, post_process=True): + """Inference without augmentation. + + Args: + cls_score (tuple[Tensor]): The input classification score logits. + Multi-stage inputs are acceptable but only the last stage will + be used to classify. The shape of every item should be + ``(num_samples, num_classes)``. + softmax (bool): Whether to softmax the classification score. + post_process (bool): Whether to do post processing the + inference results. It will convert the output to a list. + + Returns: + Tensor | list: The inference results. + + - If no post processing, the output is a tensor with shape + ``(num_samples, num_classes)``. + - If post processing, the output is a multi-dimentional list of + float and the dimensions are ``(num_samples, num_classes)``. + """ + if isinstance(cls_score, tuple): + cls_score = cls_score[-1] + + if softmax: + pred = ( + F.softmax(cls_score, dim=1) if cls_score is not None else None) + else: + pred = cls_score + + if post_process: + return self.post_process(pred) + else: + return pred + + def post_process(self, pred): + on_trace = is_tracing() + if torch.onnx.is_in_onnx_export() or on_trace: + return pred + pred = list(pred.detach().cpu().numpy()) + return pred diff --git a/mmcls/models/heads/conformer_head.py b/mmcls/models/heads/conformer_head.py new file mode 100644 index 0000000..c655796 --- /dev/null +++ b/mmcls/models/heads/conformer_head.py @@ -0,0 +1,132 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn.utils.weight_init import trunc_normal_ + +from ..builder import HEADS +from .cls_head import ClsHead + + +@HEADS.register_module() +class ConformerHead(ClsHead): + """Linear classifier head. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + init_cfg (dict | optional): The extra init config of layers. + Defaults to use ``dict(type='Normal', layer='Linear', std=0.01)``. + """ + + def __init__( + self, + num_classes, + in_channels, # [conv_dim, trans_dim] + init_cfg=dict(type='Normal', layer='Linear', std=0.01), + *args, + **kwargs): + super(ConformerHead, self).__init__(init_cfg=None, *args, **kwargs) + + self.in_channels = in_channels + self.num_classes = num_classes + self.init_cfg = init_cfg + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self.conv_cls_head = nn.Linear(self.in_channels[0], num_classes) + self.trans_cls_head = nn.Linear(self.in_channels[1], num_classes) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + def init_weights(self): + super(ConformerHead, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + else: + self.apply(self._init_weights) + + def pre_logits(self, x): + if isinstance(x, tuple): + x = x[-1] + return x + + def simple_test(self, x, softmax=True, post_process=True): + """Inference without augmentation. + + Args: + x (tuple[tuple[tensor, tensor]]): The input features. + Multi-stage inputs are acceptable but only the last stage will + be used to classify. Every item should be a tuple which + includes convluation features and transformer features. The + shape of them should be ``(num_samples, in_channels[0])`` and + ``(num_samples, in_channels[1])``. + softmax (bool): Whether to softmax the classification score. + post_process (bool): Whether to do post processing the + inference results. It will convert the output to a list. + + Returns: + Tensor | list: The inference results. + + - If no post processing, the output is a tensor with shape + ``(num_samples, num_classes)``. + - If post processing, the output is a multi-dimentional list of + float and the dimensions are ``(num_samples, num_classes)``. + """ + x = self.pre_logits(x) + # There are two outputs in the Conformer model + assert len(x) == 2 + + conv_cls_score = self.conv_cls_head(x[0]) + tran_cls_score = self.trans_cls_head(x[1]) + + if softmax: + cls_score = conv_cls_score + tran_cls_score + pred = ( + F.softmax(cls_score, dim=1) if cls_score is not None else None) + if post_process: + pred = self.post_process(pred) + else: + pred = [conv_cls_score, tran_cls_score] + if post_process: + pred = list(map(self.post_process, pred)) + return pred + + def forward_train(self, x, gt_label): + x = self.pre_logits(x) + assert isinstance(x, list) and len(x) == 2, \ + 'There should be two outputs in the Conformer model' + + conv_cls_score = self.conv_cls_head(x[0]) + tran_cls_score = self.trans_cls_head(x[1]) + + losses = self.loss([conv_cls_score, tran_cls_score], gt_label) + return losses + + def loss(self, cls_score, gt_label): + num_samples = len(cls_score[0]) + losses = dict() + # compute loss + loss = sum([ + self.compute_loss(score, gt_label, avg_factor=num_samples) / + len(cls_score) for score in cls_score + ]) + if self.cal_acc: + # compute accuracy + acc = self.compute_accuracy(cls_score[0] + cls_score[1], gt_label) + assert len(acc) == len(self.topk) + losses['accuracy'] = { + f'top-{k}': a + for k, a in zip(self.topk, acc) + } + losses['loss'] = loss + return losses diff --git a/mmcls/models/heads/deit_head.py b/mmcls/models/heads/deit_head.py new file mode 100644 index 0000000..1e9f22a --- /dev/null +++ b/mmcls/models/heads/deit_head.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from mmcls.utils import get_root_logger +from ..builder import HEADS +from .vision_transformer_head import VisionTransformerClsHead + + +@HEADS.register_module() +class DeiTClsHead(VisionTransformerClsHead): + """Distilled Vision Transformer classifier head. + + Comparing with the :class:`VisionTransformerClsHead`, this head adds an + extra linear layer to handle the dist token. The final classification score + is the average of both linear transformation results of ``cls_token`` and + ``dist_token``. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + hidden_dim (int): Number of the dimensions for hidden layer. + Defaults to None, which means no extra hidden layer. + act_cfg (dict): The activation config. Only available during + pre-training. Defaults to ``dict(type='Tanh')``. + init_cfg (dict): The extra initialization configs. Defaults to + ``dict(type='Constant', layer='Linear', val=0)``. + """ + + def __init__(self, *args, **kwargs): + super(DeiTClsHead, self).__init__(*args, **kwargs) + if self.hidden_dim is None: + head_dist = nn.Linear(self.in_channels, self.num_classes) + else: + head_dist = nn.Linear(self.hidden_dim, self.num_classes) + self.layers.add_module('head_dist', head_dist) + + def pre_logits(self, x): + if isinstance(x, tuple): + x = x[-1] + _, cls_token, dist_token = x + + if self.hidden_dim is None: + return cls_token, dist_token + else: + cls_token = self.layers.act(self.layers.pre_logits(cls_token)) + dist_token = self.layers.act(self.layers.pre_logits(dist_token)) + return cls_token, dist_token + + def simple_test(self, x, softmax=True, post_process=True): + """Inference without augmentation. + + Args: + x (tuple[tuple[tensor, tensor, tensor]]): The input features. + Multi-stage inputs are acceptable but only the last stage will + be used to classify. Every item should be a tuple which + includes patch token, cls token and dist token. The cls token + and dist token will be used to classify and the shape of them + should be ``(num_samples, in_channels)``. + softmax (bool): Whether to softmax the classification score. + post_process (bool): Whether to do post processing the + inference results. It will convert the output to a list. + + Returns: + Tensor | list: The inference results. + + - If no post processing, the output is a tensor with shape + ``(num_samples, num_classes)``. + - If post processing, the output is a multi-dimentional list of + float and the dimensions are ``(num_samples, num_classes)``. + """ + cls_token, dist_token = self.pre_logits(x) + cls_score = (self.layers.head(cls_token) + + self.layers.head_dist(dist_token)) / 2 + + if softmax: + pred = F.softmax( + cls_score, dim=1) if cls_score is not None else None + else: + pred = cls_score + + if post_process: + return self.post_process(pred) + else: + return pred + + def forward_train(self, x, gt_label): + logger = get_root_logger() + logger.warning("MMClassification doesn't support to train the " + 'distilled version DeiT.') + cls_token, dist_token = self.pre_logits(x) + cls_score = (self.layers.head(cls_token) + + self.layers.head_dist(dist_token)) / 2 + losses = self.loss(cls_score, gt_label) + return losses diff --git a/mmcls/models/heads/efficientformer_head.py b/mmcls/models/heads/efficientformer_head.py new file mode 100644 index 0000000..3127f12 --- /dev/null +++ b/mmcls/models/heads/efficientformer_head.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import HEADS +from .cls_head import ClsHead + + +@HEADS.register_module() +class EfficientFormerClsHead(ClsHead): + """EfficientFormer classifier head. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + distillation (bool): Whether use a additional distilled head. + Defaults to True. + init_cfg (dict): The extra initialization configs. Defaults to + ``dict(type='Normal', layer='Linear', std=0.01)``. + """ + + def __init__(self, + num_classes, + in_channels, + distillation=True, + init_cfg=dict(type='Normal', layer='Linear', std=0.01), + *args, + **kwargs): + super(EfficientFormerClsHead, self).__init__( + init_cfg=init_cfg, *args, **kwargs) + self.in_channels = in_channels + self.num_classes = num_classes + self.dist = distillation + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self.head = nn.Linear(self.in_channels, self.num_classes) + if self.dist: + self.dist_head = nn.Linear(self.in_channels, self.num_classes) + + def pre_logits(self, x): + if isinstance(x, tuple): + x = x[-1] + return x + + def simple_test(self, x, softmax=True, post_process=True): + """Inference without augmentation. + + Args: + x (tuple[tuple[tensor, tensor]]): The input features. + Multi-stage inputs are acceptable but only the last stage will + be used to classify. Every item should be a tuple which + includes patch token and cls token. The cls token will be used + to classify and the shape of it should be + ``(num_samples, in_channels)``. + softmax (bool): Whether to softmax the classification score. + post_process (bool): Whether to do post processing the + inference results. It will convert the output to a list. + + Returns: + Tensor | list: The inference results. + + - If no post processing, the output is a tensor with shape + ``(num_samples, num_classes)``. + - If post processing, the output is a multi-dimentional list of + float and the dimensions are ``(num_samples, num_classes)``. + """ + x = self.pre_logits(x) + cls_score = self.head(x) + if self.dist: + cls_score = (cls_score + self.dist_head(x)) / 2 + + if softmax: + pred = ( + F.softmax(cls_score, dim=1) if cls_score is not None else None) + else: + pred = cls_score + + if post_process: + return self.post_process(pred) + else: + return pred + + def forward_train(self, x, gt_label, **kwargs): + if self.dist: + raise NotImplementedError( + "MMClassification doesn't support to train" + ' the distilled version EfficientFormer.') + else: + x = self.pre_logits(x) + cls_score = self.head(x) + losses = self.loss(cls_score, gt_label, **kwargs) + return losses diff --git a/mmcls/models/heads/linear_head.py b/mmcls/models/heads/linear_head.py new file mode 100644 index 0000000..113b41b --- /dev/null +++ b/mmcls/models/heads/linear_head.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import HEADS +from .cls_head import ClsHead + + +@HEADS.register_module() +class LinearClsHead(ClsHead): + """Linear classifier head. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + init_cfg (dict | optional): The extra init config of layers. + Defaults to use dict(type='Normal', layer='Linear', std=0.01). + """ + + def __init__(self, + num_classes, + in_channels, + init_cfg=dict(type='Normal', layer='Linear', std=0.01), + *args, + **kwargs): + super(LinearClsHead, self).__init__(init_cfg=init_cfg, *args, **kwargs) + + self.in_channels = in_channels + self.num_classes = num_classes + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self.fc = nn.Linear(self.in_channels, self.num_classes) + + def pre_logits(self, x): + if isinstance(x, tuple): + x = x[-1] + return x + + def simple_test(self, x, softmax=True, post_process=True): + """Inference without augmentation. + + Args: + x (tuple[Tensor]): The input features. + Multi-stage inputs are acceptable but only the last stage will + be used to classify. The shape of every item should be + ``(num_samples, in_channels)``. + softmax (bool): Whether to softmax the classification score. + post_process (bool): Whether to do post processing the + inference results. It will convert the output to a list. + + Returns: + Tensor | list: The inference results. + + - If no post processing, the output is a tensor with shape + ``(num_samples, num_classes)``. + - If post processing, the output is a multi-dimentional list of + float and the dimensions are ``(num_samples, num_classes)``. + """ + x = self.pre_logits(x) + cls_score = self.fc(x) + + if softmax: + pred = ( + F.softmax(cls_score, dim=1) if cls_score is not None else None) + else: + pred = cls_score + + if post_process: + return self.post_process(pred) + else: + return pred + + def forward_train(self, x, gt_label, **kwargs): + x = self.pre_logits(x) + cls_score = self.fc(x) + losses = self.loss(cls_score, gt_label, **kwargs) + return losses diff --git a/mmcls/models/heads/multi_label_csra_head.py b/mmcls/models/heads/multi_label_csra_head.py new file mode 100755 index 0000000..f28ba42 --- /dev/null +++ b/mmcls/models/heads/multi_label_csra_head.py @@ -0,0 +1,121 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/Kevinz-code/CSRA +import torch +import torch.nn as nn +from mmcv.runner import BaseModule, ModuleList + +from ..builder import HEADS +from .multi_label_head import MultiLabelClsHead + + +@HEADS.register_module() +class CSRAClsHead(MultiLabelClsHead): + """Class-specific residual attention classifier head. + + Residual Attention: A Simple but Effective Method for Multi-Label + Recognition (ICCV 2021) + Please refer to the `paper `__ for + details. + + Args: + num_classes (int): Number of categories. + in_channels (int): Number of channels in the input feature map. + num_heads (int): Number of residual at tensor heads. + loss (dict): Config of classification loss. + lam (float): Lambda that combines global average and max pooling + scores. + init_cfg (dict | optional): The extra init config of layers. + Defaults to use dict(type='Normal', layer='Linear', std=0.01). + """ + temperature_settings = { # softmax temperature settings + 1: [1], + 2: [1, 99], + 4: [1, 2, 4, 99], + 6: [1, 2, 3, 4, 5, 99], + 8: [1, 2, 3, 4, 5, 6, 7, 99] + } + + def __init__(self, + num_classes, + in_channels, + num_heads, + lam, + loss=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0), + init_cfg=dict(type='Normal', layer='Linear', std=0.01), + *args, + **kwargs): + assert num_heads in self.temperature_settings.keys( + ), 'The num of heads is not in temperature setting.' + assert lam > 0, 'Lambda should be between 0 and 1.' + super(CSRAClsHead, self).__init__( + init_cfg=init_cfg, loss=loss, *args, **kwargs) + self.temp_list = self.temperature_settings[num_heads] + self.csra_heads = ModuleList([ + CSRAModule(num_classes, in_channels, self.temp_list[i], lam) + for i in range(num_heads) + ]) + + def pre_logits(self, x): + if isinstance(x, tuple): + x = x[-1] + return x + + def simple_test(self, x, post_process=True, **kwargs): + logit = 0. + x = self.pre_logits(x) + for head in self.csra_heads: + logit += head(x) + if post_process: + return self.post_process(logit) + else: + return logit + + def forward_train(self, x, gt_label, **kwargs): + logit = 0. + x = self.pre_logits(x) + for head in self.csra_heads: + logit += head(x) + gt_label = gt_label.type_as(logit) + _gt_label = torch.abs(gt_label) + losses = self.loss(logit, _gt_label, **kwargs) + return losses + + +class CSRAModule(BaseModule): + """Basic module of CSRA with different temperature. + + Args: + num_classes (int): Number of categories. + in_channels (int): Number of channels in the input feature map. + T (int): Temperature setting. + lam (float): Lambda that combines global average and max pooling + scores. + init_cfg (dict | optional): The extra init config of layers. + Defaults to use dict(type='Normal', layer='Linear', std=0.01). + """ + + def __init__(self, num_classes, in_channels, T, lam, init_cfg=None): + + super(CSRAModule, self).__init__(init_cfg=init_cfg) + self.T = T # temperature + self.lam = lam # Lambda + self.head = nn.Conv2d(in_channels, num_classes, 1, bias=False) + self.softmax = nn.Softmax(dim=2) + + def forward(self, x): + score = self.head(x) / torch.norm( + self.head.weight, dim=1, keepdim=True).transpose(0, 1) + score = score.flatten(2) + base_logit = torch.mean(score, dim=2) + + if self.T == 99: # max-pooling + att_logit = torch.max(score, dim=2)[0] + else: + score_soft = self.softmax(score * self.T) + att_logit = torch.sum(score * score_soft, dim=2) + + return base_logit + self.lam * att_logit diff --git a/mmcls/models/heads/multi_label_head.py b/mmcls/models/heads/multi_label_head.py new file mode 100644 index 0000000..e11a773 --- /dev/null +++ b/mmcls/models/heads/multi_label_head.py @@ -0,0 +1,99 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..builder import HEADS, build_loss +from ..utils import is_tracing +from .base_head import BaseHead + + +@HEADS.register_module() +class MultiLabelClsHead(BaseHead): + """Classification head for multilabel task. + + Args: + loss (dict): Config of classification loss. + """ + + def __init__(self, + loss=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0), + init_cfg=None): + super(MultiLabelClsHead, self).__init__(init_cfg=init_cfg) + + assert isinstance(loss, dict) + + self.compute_loss = build_loss(loss) + + def loss(self, cls_score, gt_label): + gt_label = gt_label.type_as(cls_score) + num_samples = len(cls_score) + losses = dict() + + # map difficult examples to positive ones + _gt_label = torch.abs(gt_label) + # compute loss + loss = self.compute_loss(cls_score, _gt_label, avg_factor=num_samples) + losses['loss'] = loss + return losses + + def forward_train(self, cls_score, gt_label, **kwargs): + if isinstance(cls_score, tuple): + cls_score = cls_score[-1] + gt_label = gt_label.type_as(cls_score) + losses = self.loss(cls_score, gt_label, **kwargs) + return losses + + def pre_logits(self, x): + if isinstance(x, tuple): + x = x[-1] + + from mmcls.utils import get_root_logger + logger = get_root_logger() + logger.warning( + 'The input of MultiLabelClsHead should be already logits. ' + 'Please modify the backbone if you want to get pre-logits feature.' + ) + return x + + def simple_test(self, x, sigmoid=True, post_process=True): + """Inference without augmentation. + + Args: + cls_score (tuple[Tensor]): The input classification score logits. + Multi-stage inputs are acceptable but only the last stage will + be used to classify. The shape of every item should be + ``(num_samples, num_classes)``. + sigmoid (bool): Whether to sigmoid the classification score. + post_process (bool): Whether to do post processing the + inference results. It will convert the output to a list. + + Returns: + Tensor | list: The inference results. + + - If no post processing, the output is a tensor with shape + ``(num_samples, num_classes)``. + - If post processing, the output is a multi-dimentional list of + float and the dimensions are ``(num_samples, num_classes)``. + """ + if isinstance(x, tuple): + x = x[-1] + + if sigmoid: + pred = torch.sigmoid(x) if x is not None else None + else: + pred = x + + if post_process: + return self.post_process(pred) + else: + return pred + + def post_process(self, pred): + on_trace = is_tracing() + if torch.onnx.is_in_onnx_export() or on_trace: + return pred + pred = list(pred.detach().cpu().numpy()) + return pred diff --git a/mmcls/models/heads/multi_label_linear_head.py b/mmcls/models/heads/multi_label_linear_head.py new file mode 100644 index 0000000..0e9d068 --- /dev/null +++ b/mmcls/models/heads/multi_label_linear_head.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from ..builder import HEADS +from .multi_label_head import MultiLabelClsHead + + +@HEADS.register_module() +class MultiLabelLinearClsHead(MultiLabelClsHead): + """Linear classification head for multilabel task. + + Args: + num_classes (int): Number of categories. + in_channels (int): Number of channels in the input feature map. + loss (dict): Config of classification loss. + init_cfg (dict | optional): The extra init config of layers. + Defaults to use dict(type='Normal', layer='Linear', std=0.01). + """ + + def __init__(self, + num_classes, + in_channels, + loss=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0), + init_cfg=dict(type='Normal', layer='Linear', std=0.01)): + super(MultiLabelLinearClsHead, self).__init__( + loss=loss, init_cfg=init_cfg) + + if num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self.in_channels = in_channels + self.num_classes = num_classes + + self.fc = nn.Linear(self.in_channels, self.num_classes) + + def pre_logits(self, x): + if isinstance(x, tuple): + x = x[-1] + return x + + def forward_train(self, x, gt_label, **kwargs): + x = self.pre_logits(x) + gt_label = gt_label.type_as(x) + cls_score = self.fc(x) + losses = self.loss(cls_score, gt_label, **kwargs) + return losses + + def simple_test(self, x, sigmoid=True, post_process=True): + """Inference without augmentation. + + Args: + x (tuple[Tensor]): The input features. + Multi-stage inputs are acceptable but only the last stage will + be used to classify. The shape of every item should be + ``(num_samples, in_channels)``. + sigmoid (bool): Whether to sigmoid the classification score. + post_process (bool): Whether to do post processing the + inference results. It will convert the output to a list. + + Returns: + Tensor | list: The inference results. + + - If no post processing, the output is a tensor with shape + ``(num_samples, num_classes)``. + - If post processing, the output is a multi-dimentional list of + float and the dimensions are ``(num_samples, num_classes)``. + """ + x = self.pre_logits(x) + cls_score = self.fc(x) + + if sigmoid: + pred = torch.sigmoid(cls_score) if cls_score is not None else None + else: + pred = cls_score + + if post_process: + return self.post_process(pred) + else: + return pred diff --git a/mmcls/models/heads/stacked_head.py b/mmcls/models/heads/stacked_head.py new file mode 100644 index 0000000..bbb0dc2 --- /dev/null +++ b/mmcls/models/heads/stacked_head.py @@ -0,0 +1,163 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Sequence + +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_activation_layer, build_norm_layer +from mmcv.runner import BaseModule, ModuleList + +from ..builder import HEADS +from .cls_head import ClsHead + + +class LinearBlock(BaseModule): + + def __init__(self, + in_channels, + out_channels, + dropout_rate=0., + norm_cfg=None, + act_cfg=None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.fc = nn.Linear(in_channels, out_channels) + + self.norm = None + self.act = None + self.dropout = None + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, out_channels)[1] + if act_cfg is not None: + self.act = build_activation_layer(act_cfg) + if dropout_rate > 0: + self.dropout = nn.Dropout(p=dropout_rate) + + def forward(self, x): + x = self.fc(x) + if self.norm is not None: + x = self.norm(x) + if self.act is not None: + x = self.act(x) + if self.dropout is not None: + x = self.dropout(x) + return x + + +@HEADS.register_module() +class StackedLinearClsHead(ClsHead): + """Classifier head with several hidden fc layer and a output fc layer. + + Args: + num_classes (int): Number of categories. + in_channels (int): Number of channels in the input feature map. + mid_channels (Sequence): Number of channels in the hidden fc layers. + dropout_rate (float): Dropout rate after each hidden fc layer, + except the last layer. Defaults to 0. + norm_cfg (dict, optional): Config dict of normalization layer after + each hidden fc layer, except the last layer. Defaults to None. + act_cfg (dict, optional): Config dict of activation function after each + hidden layer, except the last layer. Defaults to use "ReLU". + """ + + def __init__(self, + num_classes: int, + in_channels: int, + mid_channels: Sequence, + dropout_rate: float = 0., + norm_cfg: Dict = None, + act_cfg: Dict = dict(type='ReLU'), + **kwargs): + super(StackedLinearClsHead, self).__init__(**kwargs) + assert num_classes > 0, \ + f'`num_classes` of StackedLinearClsHead must be a positive ' \ + f'integer, got {num_classes} instead.' + self.num_classes = num_classes + + self.in_channels = in_channels + + assert isinstance(mid_channels, Sequence), \ + f'`mid_channels` of StackedLinearClsHead should be a sequence, ' \ + f'instead of {type(mid_channels)}' + self.mid_channels = mid_channels + + self.dropout_rate = dropout_rate + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self._init_layers() + + def _init_layers(self): + self.layers = ModuleList() + in_channels = self.in_channels + for hidden_channels in self.mid_channels: + self.layers.append( + LinearBlock( + in_channels, + hidden_channels, + dropout_rate=self.dropout_rate, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + in_channels = hidden_channels + + self.layers.append( + LinearBlock( + self.mid_channels[-1], + self.num_classes, + dropout_rate=0., + norm_cfg=None, + act_cfg=None)) + + def init_weights(self): + self.layers.init_weights() + + def pre_logits(self, x): + if isinstance(x, tuple): + x = x[-1] + for layer in self.layers[:-1]: + x = layer(x) + return x + + @property + def fc(self): + return self.layers[-1] + + def simple_test(self, x, softmax=True, post_process=True): + """Inference without augmentation. + + Args: + x (tuple[Tensor]): The input features. + Multi-stage inputs are acceptable but only the last stage will + be used to classify. The shape of every item should be + ``(num_samples, in_channels)``. + softmax (bool): Whether to softmax the classification score. + post_process (bool): Whether to do post processing the + inference results. It will convert the output to a list. + + Returns: + Tensor | list: The inference results. + + - If no post processing, the output is a tensor with shape + ``(num_samples, num_classes)``. + - If post processing, the output is a multi-dimentional list of + float and the dimensions are ``(num_samples, num_classes)``. + """ + x = self.pre_logits(x) + cls_score = self.fc(x) + + if softmax: + pred = ( + F.softmax(cls_score, dim=1) if cls_score is not None else None) + else: + pred = cls_score + + if post_process: + return self.post_process(pred) + else: + return pred + + def forward_train(self, x, gt_label, **kwargs): + x = self.pre_logits(x) + cls_score = self.fc(x) + losses = self.loss(cls_score, gt_label, **kwargs) + return losses diff --git a/mmcls/models/heads/vision_transformer_head.py b/mmcls/models/heads/vision_transformer_head.py new file mode 100644 index 0000000..d0586cb --- /dev/null +++ b/mmcls/models/heads/vision_transformer_head.py @@ -0,0 +1,123 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from collections import OrderedDict + +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_activation_layer +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner import Sequential + +from ..builder import HEADS +from .cls_head import ClsHead + + +@HEADS.register_module() +class VisionTransformerClsHead(ClsHead): + """Vision Transformer classifier head. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + hidden_dim (int): Number of the dimensions for hidden layer. + Defaults to None, which means no extra hidden layer. + act_cfg (dict): The activation config. Only available during + pre-training. Defaults to ``dict(type='Tanh')``. + init_cfg (dict): The extra initialization configs. Defaults to + ``dict(type='Constant', layer='Linear', val=0)``. + """ + + def __init__(self, + num_classes, + in_channels, + hidden_dim=None, + act_cfg=dict(type='Tanh'), + init_cfg=dict(type='Constant', layer='Linear', val=0), + *args, + **kwargs): + super(VisionTransformerClsHead, self).__init__( + init_cfg=init_cfg, *args, **kwargs) + self.in_channels = in_channels + self.num_classes = num_classes + self.hidden_dim = hidden_dim + self.act_cfg = act_cfg + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self._init_layers() + + def _init_layers(self): + if self.hidden_dim is None: + layers = [('head', nn.Linear(self.in_channels, self.num_classes))] + else: + layers = [ + ('pre_logits', nn.Linear(self.in_channels, self.hidden_dim)), + ('act', build_activation_layer(self.act_cfg)), + ('head', nn.Linear(self.hidden_dim, self.num_classes)), + ] + self.layers = Sequential(OrderedDict(layers)) + + def init_weights(self): + super(VisionTransformerClsHead, self).init_weights() + # Modified from ClassyVision + if hasattr(self.layers, 'pre_logits'): + # Lecun norm + trunc_normal_( + self.layers.pre_logits.weight, + std=math.sqrt(1 / self.layers.pre_logits.in_features)) + nn.init.zeros_(self.layers.pre_logits.bias) + + def pre_logits(self, x): + if isinstance(x, tuple): + x = x[-1] + _, cls_token = x + if self.hidden_dim is None: + return cls_token + else: + x = self.layers.pre_logits(cls_token) + return self.layers.act(x) + + def simple_test(self, x, softmax=True, post_process=True): + """Inference without augmentation. + + Args: + x (tuple[tuple[tensor, tensor]]): The input features. + Multi-stage inputs are acceptable but only the last stage will + be used to classify. Every item should be a tuple which + includes patch token and cls token. The cls token will be used + to classify and the shape of it should be + ``(num_samples, in_channels)``. + softmax (bool): Whether to softmax the classification score. + post_process (bool): Whether to do post processing the + inference results. It will convert the output to a list. + + Returns: + Tensor | list: The inference results. + + - If no post processing, the output is a tensor with shape + ``(num_samples, num_classes)``. + - If post processing, the output is a multi-dimentional list of + float and the dimensions are ``(num_samples, num_classes)``. + """ + x = self.pre_logits(x) + cls_score = self.layers.head(x) + + if softmax: + pred = ( + F.softmax(cls_score, dim=1) if cls_score is not None else None) + else: + pred = cls_score + + if post_process: + return self.post_process(pred) + else: + return pred + + def forward_train(self, x, gt_label, **kwargs): + x = self.pre_logits(x) + cls_score = self.layers.head(x) + losses = self.loss(cls_score, gt_label, **kwargs) + return losses diff --git a/mmcls/models/losses/__init__.py b/mmcls/models/losses/__init__.py new file mode 100644 index 0000000..9c90086 --- /dev/null +++ b/mmcls/models/losses/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .accuracy import Accuracy, accuracy +from .asymmetric_loss import AsymmetricLoss, asymmetric_loss +from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, + cross_entropy) +from .focal_loss import FocalLoss, sigmoid_focal_loss +from .label_smooth_loss import LabelSmoothLoss +from .seesaw_loss import SeesawLoss +from .utils import (convert_to_one_hot, reduce_loss, weight_reduce_loss, + weighted_loss) + +__all__ = [ + 'accuracy', 'Accuracy', 'asymmetric_loss', 'AsymmetricLoss', + 'cross_entropy', 'binary_cross_entropy', 'CrossEntropyLoss', 'reduce_loss', + 'weight_reduce_loss', 'LabelSmoothLoss', 'weighted_loss', 'FocalLoss', + 'sigmoid_focal_loss', 'convert_to_one_hot', 'SeesawLoss' +] diff --git a/mmcls/models/losses/accuracy.py b/mmcls/models/losses/accuracy.py new file mode 100644 index 0000000..1b142bc --- /dev/null +++ b/mmcls/models/losses/accuracy.py @@ -0,0 +1,143 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from numbers import Number + +import numpy as np +import torch +import torch.nn as nn + + +def accuracy_numpy(pred, target, topk=(1, ), thrs=0.): + if isinstance(thrs, Number): + thrs = (thrs, ) + res_single = True + elif isinstance(thrs, tuple): + res_single = False + else: + raise TypeError( + f'thrs should be a number or tuple, but got {type(thrs)}.') + + res = [] + maxk = max(topk) + num = pred.shape[0] + + static_inds = np.indices((num, maxk))[0] + pred_label = pred.argpartition(-maxk, axis=1)[:, -maxk:] + pred_score = pred[static_inds, pred_label] + + sort_inds = np.argsort(pred_score, axis=1)[:, ::-1] + pred_label = pred_label[static_inds, sort_inds] + pred_score = pred_score[static_inds, sort_inds] + + for k in topk: + correct_k = pred_label[:, :k] == target.reshape(-1, 1) + res_thr = [] + for thr in thrs: + # Only prediction values larger than thr are counted as correct + _correct_k = correct_k & (pred_score[:, :k] > thr) + _correct_k = np.logical_or.reduce(_correct_k, axis=1) + res_thr.append((_correct_k.sum() * 100. / num)) + if res_single: + res.append(res_thr[0]) + else: + res.append(res_thr) + return res + + +def accuracy_torch(pred, target, topk=(1, ), thrs=0.): + if isinstance(thrs, Number): + thrs = (thrs, ) + res_single = True + elif isinstance(thrs, tuple): + res_single = False + else: + raise TypeError( + f'thrs should be a number or tuple, but got {type(thrs)}.') + + res = [] + maxk = max(topk) + num = pred.size(0) + pred = pred.float() + pred_score, pred_label = pred.topk(maxk, dim=1) + pred_label = pred_label.t() + correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) + for k in topk: + res_thr = [] + for thr in thrs: + # Only prediction values larger than thr are counted as correct + _correct = correct & (pred_score.t() > thr) + correct_k = _correct[:k].reshape(-1).float().sum(0, keepdim=True) + res_thr.append((correct_k.mul_(100. / num))) + if res_single: + res.append(res_thr[0]) + else: + res.append(res_thr) + return res + + +def accuracy(pred, target, topk=1, thrs=0.): + """Calculate accuracy according to the prediction and target. + + Args: + pred (torch.Tensor | np.array): The model prediction. + target (torch.Tensor | np.array): The target of each prediction + topk (int | tuple[int]): If the predictions in ``topk`` + matches the target, the predictions will be regarded as + correct ones. Defaults to 1. + thrs (Number | tuple[Number], optional): Predictions with scores under + the thresholds are considered negative. Default to 0. + + Returns: + torch.Tensor | list[torch.Tensor] | list[list[torch.Tensor]]: Accuracy + - torch.Tensor: If both ``topk`` and ``thrs`` is a single value. + - list[torch.Tensor]: If one of ``topk`` or ``thrs`` is a tuple. + - list[list[torch.Tensor]]: If both ``topk`` and ``thrs`` is a \ + tuple. And the first dim is ``topk``, the second dim is ``thrs``. + """ + assert isinstance(topk, (int, tuple)) + if isinstance(topk, int): + topk = (topk, ) + return_single = True + else: + return_single = False + + assert isinstance(pred, (torch.Tensor, np.ndarray)), \ + f'The pred should be torch.Tensor or np.ndarray ' \ + f'instead of {type(pred)}.' + assert isinstance(target, (torch.Tensor, np.ndarray)), \ + f'The target should be torch.Tensor or np.ndarray ' \ + f'instead of {type(target)}.' + + # torch version is faster in most situations. + to_tensor = (lambda x: torch.from_numpy(x) + if isinstance(x, np.ndarray) else x) + pred = to_tensor(pred) + target = to_tensor(target) + + res = accuracy_torch(pred, target, topk, thrs) + + return res[0] if return_single else res + + +class Accuracy(nn.Module): + + def __init__(self, topk=(1, )): + """Module to calculate the accuracy. + + Args: + topk (tuple): The criterion used to calculate the + accuracy. Defaults to (1,). + """ + super().__init__() + self.topk = topk + + def forward(self, pred, target): + """Forward function to calculate accuracy. + + Args: + pred (torch.Tensor): Prediction of models. + target (torch.Tensor): Target for each prediction. + + Returns: + list[torch.Tensor]: The accuracies under different topk criterions. + """ + return accuracy(pred, target, self.topk) diff --git a/mmcls/models/losses/asymmetric_loss.py b/mmcls/models/losses/asymmetric_loss.py new file mode 100644 index 0000000..1c3b574 --- /dev/null +++ b/mmcls/models/losses/asymmetric_loss.py @@ -0,0 +1,149 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from ..builder import LOSSES +from .utils import convert_to_one_hot, weight_reduce_loss + + +def asymmetric_loss(pred, + target, + weight=None, + gamma_pos=1.0, + gamma_neg=4.0, + clip=0.05, + reduction='mean', + avg_factor=None, + use_sigmoid=True, + eps=1e-8): + r"""asymmetric loss. + + Please refer to the `paper `__ for + details. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction with + shape (N, \*). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, ). Defaults to None. + gamma_pos (float): positive focusing parameter. Defaults to 0.0. + gamma_neg (float): Negative focusing parameter. We usually set + gamma_neg > gamma_pos. Defaults to 4.0. + clip (float, optional): Probability margin. Defaults to 0.05. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". If reduction is 'none' , loss + is same shape as pred and label. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + use_sigmoid (bool): Whether the prediction uses sigmoid instead + of softmax. Defaults to True. + eps (float): The minimum value of the argument of logarithm. Defaults + to 1e-8. + + Returns: + torch.Tensor: Loss. + """ + assert pred.shape == \ + target.shape, 'pred and target should be in the same shape.' + + if use_sigmoid: + pred_sigmoid = pred.sigmoid() + else: + pred_sigmoid = nn.functional.softmax(pred, dim=-1) + + target = target.type_as(pred) + + if clip and clip > 0: + pt = (1 - pred_sigmoid + + clip).clamp(max=1) * (1 - target) + pred_sigmoid * target + else: + pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target + asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg * + (1 - target)) + loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight + if weight is not None: + assert weight.dim() == 1 + weight = weight.float() + if pred.dim() > 1: + weight = weight.reshape(-1, 1) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module() +class AsymmetricLoss(nn.Module): + """asymmetric loss. + + Args: + gamma_pos (float): positive focusing parameter. + Defaults to 0.0. + gamma_neg (float): Negative focusing parameter. We + usually set gamma_neg > gamma_pos. Defaults to 4.0. + clip (float, optional): Probability margin. Defaults to 0.05. + reduction (str): The method used to reduce the loss into + a scalar. + loss_weight (float): Weight of loss. Defaults to 1.0. + use_sigmoid (bool): Whether the prediction uses sigmoid instead + of softmax. Defaults to True. + eps (float): The minimum value of the argument of logarithm. Defaults + to 1e-8. + """ + + def __init__(self, + gamma_pos=0.0, + gamma_neg=4.0, + clip=0.05, + reduction='mean', + loss_weight=1.0, + use_sigmoid=True, + eps=1e-8): + super(AsymmetricLoss, self).__init__() + self.gamma_pos = gamma_pos + self.gamma_neg = gamma_neg + self.clip = clip + self.reduction = reduction + self.loss_weight = loss_weight + self.use_sigmoid = use_sigmoid + self.eps = eps + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + r"""asymmetric loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction + with shape (N, \*), N or (N,1). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, \*). Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The method used to reduce the + loss into a scalar. Options are "none", "mean" and "sum". + Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if target.dim() == 1 or (target.dim() == 2 and target.shape[1] == 1): + target = convert_to_one_hot(target.view(-1, 1), pred.shape[-1]) + loss_cls = self.loss_weight * asymmetric_loss( + pred, + target, + weight, + gamma_pos=self.gamma_pos, + gamma_neg=self.gamma_neg, + clip=self.clip, + reduction=reduction, + avg_factor=avg_factor, + use_sigmoid=self.use_sigmoid, + eps=self.eps) + return loss_cls diff --git a/mmcls/models/losses/cross_entropy_loss.py b/mmcls/models/losses/cross_entropy_loss.py new file mode 100644 index 0000000..0b92212 --- /dev/null +++ b/mmcls/models/losses/cross_entropy_loss.py @@ -0,0 +1,209 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import weight_reduce_loss + + +def cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None): + """Calculate the CrossEntropy loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + label (torch.Tensor): The gt label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (torch.Tensor, optional): The weight for each class with + shape (C), C is the number of classes. Default None. + + Returns: + torch.Tensor: The calculated loss + """ + # element-wise losses + loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none') + + # apply weights and do the reduction + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def soft_cross_entropy(pred, + label, + weight=None, + reduction='mean', + class_weight=None, + avg_factor=None): + """Calculate the Soft CrossEntropy loss. The label can be float. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + label (torch.Tensor): The gt label of the prediction with shape (N, C). + When using "mixup", the label can be float. + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (torch.Tensor, optional): The weight for each class with + shape (C), C is the number of classes. Default None. + + Returns: + torch.Tensor: The calculated loss + """ + # element-wise losses + loss = -label * F.log_softmax(pred, dim=-1) + if class_weight is not None: + loss *= class_weight + loss = loss.sum(dim=-1) + + # apply weights and do the reduction + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def binary_cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None, + pos_weight=None): + r"""Calculate the binary CrossEntropy loss with logits. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + label (torch.Tensor): The gt label with shape (N, \*). + weight (torch.Tensor, optional): Element-wise weight of loss with shape + (N, ). Defaults to None. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". If reduction is 'none' , loss + is same shape as pred and label. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (torch.Tensor, optional): The weight for each class with + shape (C), C is the number of classes. Default None. + pos_weight (torch.Tensor, optional): The positive weight for each + class with shape (C), C is the number of classes. Default None. + + Returns: + torch.Tensor: The calculated loss + """ + # Ensure that the size of class_weight is consistent with pred and label to + # avoid automatic boracast, + assert pred.dim() == label.dim() + + if class_weight is not None: + N = pred.size()[0] + class_weight = class_weight.repeat(N, 1) + loss = F.binary_cross_entropy_with_logits( + pred, + label, + weight=class_weight, + pos_weight=pos_weight, + reduction='none') + + # apply weights and do the reduction + if weight is not None: + assert weight.dim() == 1 + weight = weight.float() + if pred.dim() > 1: + weight = weight.reshape(-1, 1) + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + return loss + + +@LOSSES.register_module() +class CrossEntropyLoss(nn.Module): + """Cross entropy loss. + + Args: + use_sigmoid (bool): Whether the prediction uses sigmoid + of softmax. Defaults to False. + use_soft (bool): Whether to use the soft version of CrossEntropyLoss. + Defaults to False. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". Defaults to 'mean'. + loss_weight (float): Weight of the loss. Defaults to 1.0. + class_weight (List[float], optional): The weight for each class with + shape (C), C is the number of classes. Default None. + pos_weight (List[float], optional): The positive weight for each + class with shape (C), C is the number of classes. Only enabled in + BCE loss when ``use_sigmoid`` is True. Default None. + """ + + def __init__(self, + use_sigmoid=False, + use_soft=False, + reduction='mean', + loss_weight=1.0, + class_weight=None, + pos_weight=None): + super(CrossEntropyLoss, self).__init__() + self.use_sigmoid = use_sigmoid + self.use_soft = use_soft + assert not ( + self.use_soft and self.use_sigmoid + ), 'use_sigmoid and use_soft could not be set simultaneously' + + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = class_weight + self.pos_weight = pos_weight + + if self.use_sigmoid: + self.cls_criterion = binary_cross_entropy + elif self.use_soft: + self.cls_criterion = soft_cross_entropy + else: + self.cls_criterion = cross_entropy + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + if self.class_weight is not None: + class_weight = cls_score.new_tensor(self.class_weight) + else: + class_weight = None + + # only BCE loss has pos_weight + if self.pos_weight is not None and self.use_sigmoid: + pos_weight = cls_score.new_tensor(self.pos_weight) + kwargs.update({'pos_weight': pos_weight}) + else: + pos_weight = None + + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + weight, + class_weight=class_weight, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_cls diff --git a/mmcls/models/losses/focal_loss.py b/mmcls/models/losses/focal_loss.py new file mode 100644 index 0000000..8bd0c45 --- /dev/null +++ b/mmcls/models/losses/focal_loss.py @@ -0,0 +1,116 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import convert_to_one_hot, weight_reduce_loss + + +def sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + r"""Sigmoid focal loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction with + shape (N, \*). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, ). Defaults to None. + gamma (float): The gamma for calculating the modulating factor. + Defaults to 2.0. + alpha (float): A balanced form for Focal Loss. Defaults to 0.25. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". If reduction is 'none' , + loss is same shape as pred and label. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + assert pred.shape == \ + target.shape, 'pred and target should be in the same shape.' + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + if weight is not None: + assert weight.dim() == 1 + weight = weight.float() + if pred.dim() > 1: + weight = weight.reshape(-1, 1) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module() +class FocalLoss(nn.Module): + """Focal loss. + + Args: + gamma (float): Focusing parameter in focal loss. + Defaults to 2.0. + alpha (float): The parameter in balanced form of focal + loss. Defaults to 0.25. + reduction (str): The method used to reduce the loss into + a scalar. Options are "none" and "mean". Defaults to 'mean'. + loss_weight (float): Weight of loss. Defaults to 1.0. + """ + + def __init__(self, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0): + + super(FocalLoss, self).__init__() + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + r"""Sigmoid focal loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction + with shape (N, \*), N or (N,1). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, \*). Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The method used to reduce the + loss into a scalar. Options are "none", "mean" and "sum". + Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if target.dim() == 1 or (target.dim() == 2 and target.shape[1] == 1): + target = convert_to_one_hot(target.view(-1, 1), pred.shape[-1]) + loss_cls = self.loss_weight * sigmoid_focal_loss( + pred, + target, + weight, + gamma=self.gamma, + alpha=self.alpha, + reduction=reduction, + avg_factor=avg_factor) + return loss_cls diff --git a/mmcls/models/losses/label_smooth_loss.py b/mmcls/models/losses/label_smooth_loss.py new file mode 100644 index 0000000..daa7344 --- /dev/null +++ b/mmcls/models/losses/label_smooth_loss.py @@ -0,0 +1,157 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from ..builder import LOSSES +from .cross_entropy_loss import CrossEntropyLoss +from .utils import convert_to_one_hot + + +@LOSSES.register_module() +class LabelSmoothLoss(nn.Module): + r"""Initializer for the label smoothed cross entropy loss. + + Refers to `Rethinking the Inception Architecture for Computer Vision + `_ + + This decreases gap between output scores and encourages generalization. + Labels provided to forward can be one-hot like vectors (NxC) or class + indices (Nx1). + And this accepts linear combination of one-hot like labels from mixup or + cutmix except multi-label task. + + Args: + label_smooth_val (float): The degree of label smoothing. + num_classes (int, optional): Number of classes. Defaults to None. + mode (str): Refers to notes, Options are 'original', 'classy_vision', + 'multi_label'. Defaults to 'original' + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". Defaults to 'mean'. + loss_weight (float): Weight of the loss. Defaults to 1.0. + + Notes: + if the mode is "original", this will use the same label smooth method + as the original paper as: + + .. math:: + (1-\epsilon)\delta_{k, y} + \frac{\epsilon}{K} + + where epsilon is the `label_smooth_val`, K is the num_classes and + delta(k,y) is Dirac delta, which equals 1 for k=y and 0 otherwise. + + if the mode is "classy_vision", this will use the same label smooth + method as the facebookresearch/ClassyVision repo as: + + .. math:: + \frac{\delta_{k, y} + \epsilon/K}{1+\epsilon} + + if the mode is "multi_label", this will accept labels from multi-label + task and smoothing them as: + + .. math:: + (1-2\epsilon)\delta_{k, y} + \epsilon + """ + + def __init__(self, + label_smooth_val, + num_classes=None, + mode='original', + reduction='mean', + loss_weight=1.0): + super().__init__() + self.num_classes = num_classes + self.loss_weight = loss_weight + + assert (isinstance(label_smooth_val, float) + and 0 <= label_smooth_val < 1), \ + f'LabelSmoothLoss accepts a float label_smooth_val ' \ + f'over [0, 1), but gets {label_smooth_val}' + self.label_smooth_val = label_smooth_val + + accept_reduction = {'none', 'mean', 'sum'} + assert reduction in accept_reduction, \ + f'LabelSmoothLoss supports reduction {accept_reduction}, ' \ + f'but gets {mode}.' + self.reduction = reduction + + accept_mode = {'original', 'classy_vision', 'multi_label'} + assert mode in accept_mode, \ + f'LabelSmoothLoss supports mode {accept_mode}, but gets {mode}.' + self.mode = mode + + self._eps = label_smooth_val + if mode == 'classy_vision': + self._eps = label_smooth_val / (1 + label_smooth_val) + if mode == 'multi_label': + self.ce = CrossEntropyLoss(use_sigmoid=True) + self.smooth_label = self.multilabel_smooth_label + else: + self.ce = CrossEntropyLoss(use_soft=True) + self.smooth_label = self.original_smooth_label + + def generate_one_hot_like_label(self, label): + """This function takes one-hot or index label vectors and computes one- + hot like label vectors (float)""" + # check if targets are inputted as class integers + if label.dim() == 1 or (label.dim() == 2 and label.shape[1] == 1): + label = convert_to_one_hot(label.view(-1, 1), self.num_classes) + return label.float() + + def original_smooth_label(self, one_hot_like_label): + assert self.num_classes > 0 + smooth_label = one_hot_like_label * (1 - self._eps) + smooth_label += self._eps / self.num_classes + return smooth_label + + def multilabel_smooth_label(self, one_hot_like_label): + assert self.num_classes > 0 + smooth_label = torch.full_like(one_hot_like_label, self._eps) + smooth_label.masked_fill_(one_hot_like_label > 0, 1 - self._eps) + return smooth_label + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + r"""Label smooth loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + label (torch.Tensor): The ground truth label of the prediction + with shape (N, \*). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, \*). Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The method used to reduce the + loss into a scalar. Options are "none", "mean" and "sum". + Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + if self.num_classes is not None: + assert self.num_classes == cls_score.shape[1], \ + f'num_classes should equal to cls_score.shape[1], ' \ + f'but got num_classes: {self.num_classes} and ' \ + f'cls_score.shape[1]: {cls_score.shape[1]}' + else: + self.num_classes = cls_score.shape[1] + + one_hot_like_label = self.generate_one_hot_like_label(label=label) + assert one_hot_like_label.shape == cls_score.shape, \ + f'LabelSmoothLoss requires output and target ' \ + f'to be same shape, but got output.shape: {cls_score.shape} ' \ + f'and target.shape: {one_hot_like_label.shape}' + + smoothed_label = self.smooth_label(one_hot_like_label) + return self.ce.forward( + cls_score, + smoothed_label, + weight=weight, + avg_factor=avg_factor, + reduction_override=reduction_override, + **kwargs) diff --git a/mmcls/models/losses/seesaw_loss.py b/mmcls/models/losses/seesaw_loss.py new file mode 100644 index 0000000..14176de --- /dev/null +++ b/mmcls/models/losses/seesaw_loss.py @@ -0,0 +1,173 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# migrate from mmdetection with modifications +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import weight_reduce_loss + + +def seesaw_ce_loss(cls_score, + labels, + weight, + cum_samples, + num_classes, + p, + q, + eps, + reduction='mean', + avg_factor=None): + """Calculate the Seesaw CrossEntropy loss. + + Args: + cls_score (torch.Tensor): The prediction with shape (N, C), + C is the number of classes. + labels (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor): Sample-wise loss weight. + cum_samples (torch.Tensor): Cumulative samples for each category. + num_classes (int): The number of classes. + p (float): The ``p`` in the mitigation factor. + q (float): The ``q`` in the compenstation factor. + eps (float): The minimal value of divisor to smooth + the computation of compensation factor + reduction (str, optional): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + + Returns: + torch.Tensor: The calculated loss + """ + assert cls_score.size(-1) == num_classes + assert len(cum_samples) == num_classes + + onehot_labels = F.one_hot(labels, num_classes) + seesaw_weights = cls_score.new_ones(onehot_labels.size()) + + # mitigation factor + if p > 0: + sample_ratio_matrix = cum_samples[None, :].clamp( + min=1) / cum_samples[:, None].clamp(min=1) + index = (sample_ratio_matrix < 1.0).float() + sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index + ) # M_{ij} + mitigation_factor = sample_weights[labels.long(), :] + seesaw_weights = seesaw_weights * mitigation_factor + + # compensation factor + if q > 0: + scores = F.softmax(cls_score.detach(), dim=1) + self_scores = scores[ + torch.arange(0, len(scores)).to(scores.device).long(), + labels.long()] + score_matrix = scores / self_scores[:, None].clamp(min=eps) + index = (score_matrix > 1.0).float() + compensation_factor = score_matrix.pow(q) * index + (1 - index) + seesaw_weights = seesaw_weights * compensation_factor + + cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels)) + + loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none') + + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + return loss + + +@LOSSES.register_module() +class SeesawLoss(nn.Module): + """Implementation of seesaw loss. + + Refers to `Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021) + `_ + + Args: + use_sigmoid (bool): Whether the prediction uses sigmoid of softmax. + Only False is supported. Defaults to False. + p (float): The ``p`` in the mitigation factor. + Defaults to 0.8. + q (float): The ``q`` in the compenstation factor. + Defaults to 2.0. + num_classes (int): The number of classes. + Default to 1000 for the ImageNet dataset. + eps (float): The minimal value of divisor to smooth + the computation of compensation factor, default to 1e-2. + reduction (str): The method that reduces the loss to a scalar. + Options are "none", "mean" and "sum". Default to "mean". + loss_weight (float): The weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + use_sigmoid=False, + p=0.8, + q=2.0, + num_classes=1000, + eps=1e-2, + reduction='mean', + loss_weight=1.0): + super(SeesawLoss, self).__init__() + assert not use_sigmoid, '`use_sigmoid` is not supported' + self.use_sigmoid = False + self.p = p + self.q = q + self.num_classes = num_classes + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + self.cls_criterion = seesaw_ce_loss + + # cumulative samples for each category + self.register_buffer('cum_samples', + torch.zeros(self.num_classes, dtype=torch.float)) + + def forward(self, + cls_score, + labels, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + cls_score (torch.Tensor): The prediction with shape (N, C). + labels (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + Returns: + torch.Tensor: The calculated loss + """ + assert reduction_override in (None, 'none', 'mean', 'sum'), \ + f'The `reduction_override` should be one of (None, "none", ' \ + f'"mean", "sum"), but get "{reduction_override}".' + assert cls_score.size(0) == labels.view(-1).size(0), \ + f'Expected `labels` shape [{cls_score.size(0)}], ' \ + f'but got {list(labels.size())}' + reduction = ( + reduction_override if reduction_override else self.reduction) + assert cls_score.size(-1) == self.num_classes, \ + f'The channel number of output ({cls_score.size(-1)}) does ' \ + f'not match the `num_classes` of seesaw loss ({self.num_classes}).' + + # accumulate the samples for each category + unique_labels = labels.unique() + for u_l in unique_labels: + inds_ = labels == u_l.item() + self.cum_samples[u_l] += inds_.sum() + + if weight is not None: + weight = weight.float() + else: + weight = labels.new_ones(labels.size(), dtype=torch.float) + + # calculate loss_cls_classes + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, labels, weight, self.cum_samples, self.num_classes, + self.p, self.q, self.eps, reduction, avg_factor) + + return loss_cls diff --git a/mmcls/models/losses/utils.py b/mmcls/models/losses/utils.py new file mode 100644 index 0000000..a65b68a --- /dev/null +++ b/mmcls/models/losses/utils.py @@ -0,0 +1,119 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools + +import torch +import torch.nn.functional as F + + +def reduce_loss(loss, reduction): + """Reduce loss as specified. + + Args: + loss (Tensor): Elementwise loss tensor. + reduction (str): Options are "none", "mean" and "sum". + + Return: + Tensor: Reduced loss tensor. + """ + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, elementwise_mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() + + +def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): Element-wise loss. + weight (Tensor): Element-wise weights. + reduction (str): Same as built-in losses of PyTorch. + avg_factor (float): Average factor when computing the mean of losses. + + Returns: + Tensor: Processed loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + loss = loss * weight + + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + loss = reduce_loss(loss, reduction) + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + loss = loss.sum() / avg_factor + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return loss + + +def weighted_loss(loss_func): + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + ``loss_func(pred, target, **kwargs)``. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like ``loss_func(pred, target, weight=None, reduction='mean', + avg_factor=None, **kwargs)``. + + :Example: + + >>> import torch + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, avg_factor=2) + tensor(1.5000) + """ + + @functools.wraps(loss_func) + def wrapper(pred, + target, + weight=None, + reduction='mean', + avg_factor=None, + **kwargs): + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + return wrapper + + +def convert_to_one_hot(targets: torch.Tensor, classes) -> torch.Tensor: + """This function converts target class indices to one-hot vectors, given + the number of classes. + + Args: + targets (Tensor): The ground truth label of the prediction + with shape (N, 1) + classes (int): the number of classes. + + Returns: + Tensor: Processed loss values. + """ + assert (torch.max(targets).item() < + classes), 'Class Index must be less than number of classes' + one_hot_targets = F.one_hot( + targets.long().squeeze(-1), num_classes=classes) + return one_hot_targets diff --git a/mmcls/models/necks/__init__.py b/mmcls/models/necks/__init__.py new file mode 100644 index 0000000..5826d3a --- /dev/null +++ b/mmcls/models/necks/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .gap import GlobalAveragePooling +from .gem import GeneralizedMeanPooling +from .hr_fuse import HRFuseScales + +from ...gpvit_dev.models.necks.group_neck import GroupNeck + +__all__ = ['GlobalAveragePooling', 'GeneralizedMeanPooling', 'HRFuseScales','GroupNeck'] diff --git a/mmcls/models/necks/gap.py b/mmcls/models/necks/gap.py new file mode 100644 index 0000000..f64cce0 --- /dev/null +++ b/mmcls/models/necks/gap.py @@ -0,0 +1,45 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from ..builder import NECKS + + +@NECKS.register_module() +class GlobalAveragePooling(nn.Module): + """Global Average Pooling neck. + + Note that we use `view` to remove extra channel after pooling. We do not + use `squeeze` as it will also remove the batch dimension when the tensor + has a batch dimension of size 1, which can lead to unexpected errors. + + Args: + dim (int): Dimensions of each sample channel, can be one of {1, 2, 3}. + Default: 2 + """ + + def __init__(self, dim=2): + super(GlobalAveragePooling, self).__init__() + assert dim in [1, 2, 3], 'GlobalAveragePooling dim only support ' \ + f'{1, 2, 3}, get {dim} instead.' + if dim == 1: + self.gap = nn.AdaptiveAvgPool1d(1) + elif dim == 2: + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + else: + self.gap = nn.AdaptiveAvgPool3d((1, 1, 1)) + + def init_weights(self): + pass + + def forward(self, inputs): + if isinstance(inputs, tuple): + outs = tuple([self.gap(x) for x in inputs]) + outs = tuple( + [out.view(x.size(0), -1) for out, x in zip(outs, inputs)]) + elif isinstance(inputs, torch.Tensor): + outs = self.gap(inputs) + outs = outs.view(inputs.size(0), -1) + else: + raise TypeError('neck inputs should be tuple or torch.tensor') + return outs diff --git a/mmcls/models/necks/gem.py b/mmcls/models/necks/gem.py new file mode 100644 index 0000000..f499357 --- /dev/null +++ b/mmcls/models/necks/gem.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import Tensor, nn +from torch.nn import functional as F +from torch.nn.parameter import Parameter + +from ..builder import NECKS + + +def gem(x: Tensor, p: Parameter, eps: float = 1e-6, clamp=True) -> Tensor: + if clamp: + x = x.clamp(min=eps) + return F.avg_pool2d(x.pow(p), (x.size(-2), x.size(-1))).pow(1. / p) + + +@NECKS.register_module() +class GeneralizedMeanPooling(nn.Module): + """Generalized Mean Pooling neck. + + Note that we use `view` to remove extra channel after pooling. We do not + use `squeeze` as it will also remove the batch dimension when the tensor + has a batch dimension of size 1, which can lead to unexpected errors. + + Args: + p (float): Parameter value. + Default: 3. + eps (float): epsilon. + Default: 1e-6 + clamp (bool): Use clamp before pooling. + Default: True + """ + + def __init__(self, p=3., eps=1e-6, clamp=True): + assert p >= 1, "'p' must be a value greater then 1" + super(GeneralizedMeanPooling, self).__init__() + self.p = Parameter(torch.ones(1) * p) + self.eps = eps + self.clamp = clamp + + def forward(self, inputs): + if isinstance(inputs, tuple): + outs = tuple([ + gem(x, p=self.p, eps=self.eps, clamp=self.clamp) + for x in inputs + ]) + outs = tuple( + [out.view(x.size(0), -1) for out, x in zip(outs, inputs)]) + elif isinstance(inputs, torch.Tensor): + outs = gem(inputs, p=self.p, eps=self.eps, clamp=self.clamp) + outs = outs.view(inputs.size(0), -1) + else: + raise TypeError('neck inputs should be tuple or torch.tensor') + return outs diff --git a/mmcls/models/necks/hr_fuse.py b/mmcls/models/necks/hr_fuse.py new file mode 100644 index 0000000..1acc382 --- /dev/null +++ b/mmcls/models/necks/hr_fuse.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn.bricks import ConvModule +from mmcv.runner import BaseModule + +from ..backbones.resnet import Bottleneck, ResLayer +from ..builder import NECKS + + +@NECKS.register_module() +class HRFuseScales(BaseModule): + """Fuse feature map of multiple scales in HRNet. + + Args: + in_channels (list[int]): The input channels of all scales. + out_channels (int): The channels of fused feature map. + Defaults to 2048. + norm_cfg (dict): dictionary to construct norm layers. + Defaults to ``dict(type='BN', momentum=0.1)``. + init_cfg (dict | list[dict], optional): Initialization config dict. + Defaults to ``dict(type='Normal', layer='Linear', std=0.01))``. + """ + + def __init__(self, + in_channels, + out_channels=2048, + norm_cfg=dict(type='BN', momentum=0.1), + init_cfg=dict(type='Normal', layer='Linear', std=0.01)): + super(HRFuseScales, self).__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.norm_cfg = norm_cfg + + block_type = Bottleneck + out_channels = [128, 256, 512, 1024] + + # Increase the channels on each resolution + # from C, 2C, 4C, 8C to 128, 256, 512, 1024 + increase_layers = [] + for i in range(len(in_channels)): + increase_layers.append( + ResLayer( + block_type, + in_channels=in_channels[i], + out_channels=out_channels[i], + num_blocks=1, + stride=1, + )) + self.increase_layers = nn.ModuleList(increase_layers) + + # Downsample feature maps in each scale. + downsample_layers = [] + for i in range(len(in_channels) - 1): + downsample_layers.append( + ConvModule( + in_channels=out_channels[i], + out_channels=out_channels[i + 1], + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + bias=False, + )) + self.downsample_layers = nn.ModuleList(downsample_layers) + + # The final conv block before final classifier linear layer. + self.final_layer = ConvModule( + in_channels=out_channels[3], + out_channels=self.out_channels, + kernel_size=1, + norm_cfg=self.norm_cfg, + bias=False, + ) + + def forward(self, x): + assert isinstance(x, tuple) and len(x) == len(self.in_channels) + + feat = self.increase_layers[0](x[0]) + for i in range(len(self.downsample_layers)): + feat = self.downsample_layers[i](feat) + \ + self.increase_layers[i + 1](x[i + 1]) + + return (self.final_layer(feat), ) diff --git a/mmcls/models/utils/__init__.py b/mmcls/models/utils/__init__.py new file mode 100644 index 0000000..05af4db --- /dev/null +++ b/mmcls/models/utils/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .attention import MultiheadAttention, ShiftWindowMSA, WindowMSAV2 +from .augment.augments import Augments +from .channel_shuffle import channel_shuffle +from .embed import (HybridEmbed, PatchEmbed, PatchMerging, resize_pos_embed, + resize_relative_position_bias_table) +from .helpers import is_tracing, to_2tuple, to_3tuple, to_4tuple, to_ntuple +from .inverted_residual import InvertedResidual +from .layer_scale import LayerScale +from .make_divisible import make_divisible +from .position_encoding import ConditionalPositionEncoding +from .se_layer import SELayer + +__all__ = [ + 'channel_shuffle', 'make_divisible', 'InvertedResidual', 'SELayer', + 'to_ntuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'PatchEmbed', + 'PatchMerging', 'HybridEmbed', 'Augments', 'ShiftWindowMSA', 'is_tracing', + 'MultiheadAttention', 'ConditionalPositionEncoding', 'resize_pos_embed', + 'resize_relative_position_bias_table', 'WindowMSAV2', 'LayerScale' +] diff --git a/mmcls/models/utils/attention.py b/mmcls/models/utils/attention.py new file mode 100644 index 0000000..1aae72a --- /dev/null +++ b/mmcls/models/utils/attention.py @@ -0,0 +1,564 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn.bricks.registry import DROPOUT_LAYERS +from mmcv.cnn.bricks.transformer import build_dropout +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner.base_module import BaseModule + +from ..builder import ATTENTION +from .helpers import to_2tuple + + +class WindowMSA(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Args: + embed_dims (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + attn_drop (float, optional): Dropout ratio of attention weight. + Defaults to 0. + proj_drop (float, optional): Dropout ratio of output. Defaults to 0. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + window_size, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop=0., + proj_drop=0., + init_cfg=None): + + super().__init__(init_cfg) + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = qk_scale or head_embed_dims**-0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # About 2x faster than original impl + Wh, Ww = self.window_size + rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) + rel_position_index = rel_index_coords + rel_index_coords.T + rel_position_index = rel_position_index.flip(1).contiguous() + self.register_buffer('relative_position_index', rel_position_index) + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop) + + self.softmax = nn.Softmax(dim=-1) + + def init_weights(self): + super(WindowMSA, self).init_weights() + + trunc_normal_(self.relative_position_bias_table, std=0.02) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (num_windows*B, N, C) + mask (tensor, Optional): mask with shape of (num_windows, Wh*Ww, + Wh*Ww), value should be between (-inf, 0]. + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @staticmethod + def double_step_seq(step1, len1, step2, len2): + seq1 = torch.arange(0, step1 * len1, step1) + seq2 = torch.arange(0, step2 * len2, step2) + return (seq1[:, None] + seq2[None, :]).reshape(1, -1) + + +class WindowMSAV2(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Based on implementation on Swin Transformer V2 original repo. Refers to + https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer_v2.py + for more details. + + Args: + embed_dims (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Defaults to True. + attn_drop (float, optional): Dropout ratio of attention weight. + Defaults to 0. + proj_drop (float, optional): Dropout ratio of output. Defaults to 0. + pretrained_window_size (tuple(int)): The height and width of the window + in pre-training. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + window_size, + num_heads, + qkv_bias=True, + attn_drop=0., + proj_drop=0., + cpb_mlp_hidden_dims=512, + pretrained_window_size=(0, 0), + init_cfg=None, + **kwargs): # accept extra arguments + + super().__init__(init_cfg) + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + + # Use small network for continuous relative position bias + self.cpb_mlp = nn.Sequential( + nn.Linear( + in_features=2, out_features=cpb_mlp_hidden_dims, bias=True), + nn.ReLU(inplace=True), + nn.Linear( + in_features=cpb_mlp_hidden_dims, + out_features=num_heads, + bias=False)) + + # Add learnable scalar for cosine attention + self.logit_scale = nn.Parameter( + torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True) + + # get relative_coords_table + relative_coords_h = torch.arange( + -(self.window_size[0] - 1), + self.window_size[0], + dtype=torch.float32) + relative_coords_w = torch.arange( + -(self.window_size[1] - 1), + self.window_size[1], + dtype=torch.float32) + relative_coords_table = torch.stack( + torch.meshgrid([relative_coords_h, relative_coords_w])).permute( + 1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2 + if pretrained_window_size[0] > 0: + relative_coords_table[:, :, :, 0] /= ( + pretrained_window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= ( + pretrained_window_size[1] - 1) + else: + relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1) + relative_coords_table *= 8 # normalize to -8, 8 + relative_coords_table = torch.sign(relative_coords_table) * torch.log2( + torch.abs(relative_coords_table) + 1.0) / np.log2(8) + self.register_buffer('relative_coords_table', relative_coords_table) + + # get pair-wise relative position index + # for each token inside the window + indexes_h = torch.arange(self.window_size[0]) + indexes_w = torch.arange(self.window_size[1]) + coordinates = torch.stack( + torch.meshgrid([indexes_h, indexes_w]), dim=0) # 2, Wh, Ww + coordinates = torch.flatten(coordinates, start_dim=1) # 2, Wh*Ww + # 2, Wh*Ww, Wh*Ww + relative_coordinates = coordinates[:, :, None] - coordinates[:, + None, :] + relative_coordinates = relative_coordinates.permute( + 1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + + relative_coordinates[:, :, 0] += self.window_size[ + 0] - 1 # shift to start from 0 + relative_coordinates[:, :, 1] += self.window_size[1] - 1 + relative_coordinates[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coordinates.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer('relative_position_index', + relative_position_index) + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(embed_dims)) + self.v_bias = nn.Parameter(torch.zeros(embed_dims)) + else: + self.q_bias = None + self.v_bias = None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop) + + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (num_windows*B, N, C) + mask (tensor, Optional): mask with shape of (num_windows, Wh*Ww, + Wh*Ww), value should be between (-inf, 0]. + """ + B_, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat( + (self.q_bias, + torch.zeros_like(self.v_bias, + requires_grad=False), self.v_bias)) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B_, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + + # cosine attention + attn = ( + F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) + logit_scale = torch.clamp( + self.logit_scale, max=np.log(1. / 0.01)).exp() + attn = attn * logit_scale + + relative_position_bias_table = self.cpb_mlp( + self.relative_coords_table).view(-1, self.num_heads) + relative_position_bias = relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + relative_position_bias = 16 * torch.sigmoid(relative_position_bias) + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +@ATTENTION.register_module() +class ShiftWindowMSA(BaseModule): + """Shift Window Multihead Self-Attention Module. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. + shift_size (int, optional): The shift step of each window towards + right-bottom. If zero, act as regular window-msa. Defaults to 0. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Defaults to True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Defaults to None. + attn_drop (float, optional): Dropout ratio of attention weight. + Defaults to 0.0. + proj_drop (float, optional): Dropout ratio of output. Defaults to 0. + dropout_layer (dict, optional): The dropout_layer used before output. + Defaults to dict(type='DropPath', drop_prob=0.). + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + version (str, optional): Version of implementation of Swin + Transformers. Defaults to `v1`. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + shift_size=0, + qkv_bias=True, + qk_scale=None, + attn_drop=0, + proj_drop=0, + dropout_layer=dict(type='DropPath', drop_prob=0.), + pad_small_map=False, + input_resolution=None, + auto_pad=None, + window_msa=WindowMSA, + msa_cfg=dict(), + init_cfg=None): + super().__init__(init_cfg) + + if input_resolution is not None or auto_pad is not None: + warnings.warn( + 'The ShiftWindowMSA in new version has supported auto padding ' + 'and dynamic input shape in all condition. And the argument ' + '`auto_pad` and `input_resolution` have been deprecated.', + DeprecationWarning) + + self.shift_size = shift_size + self.window_size = window_size + assert 0 <= self.shift_size < self.window_size + + assert issubclass(window_msa, BaseModule), \ + 'Expect Window based multi-head self-attention Module is type of' \ + f'{type(BaseModule)}, but got {type(window_msa)}.' + self.w_msa = window_msa( + embed_dims=embed_dims, + window_size=to_2tuple(self.window_size), + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=proj_drop, + **msa_cfg, + ) + + self.drop = build_dropout(dropout_layer) + self.pad_small_map = pad_small_map + + def forward(self, query, hw_shape): + B, L, C = query.shape + H, W = hw_shape + assert L == H * W, f"The query length {L} doesn't match the input "\ + f'shape ({H}, {W}).' + query = query.view(B, H, W, C) + + window_size = self.window_size + shift_size = self.shift_size + + if min(H, W) == window_size: + # If not pad small feature map, avoid shifting when the window size + # is equal to the size of feature map. It's to align with the + # behavior of the original implementation. + shift_size = shift_size if self.pad_small_map else 0 + elif min(H, W) < window_size: + # In the original implementation, the window size will be shrunk + # to the size of feature map. The behavior is different with + # swin-transformer for downstream tasks. To support dynamic input + # shape, we don't allow this feature. + assert self.pad_small_map, \ + f'The input shape ({H}, {W}) is smaller than the window ' \ + f'size ({window_size}). Please set `pad_small_map=True`, or ' \ + 'decrease the `window_size`.' + + pad_r = (window_size - W % window_size) % window_size + pad_b = (window_size - H % window_size) % window_size + query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b)) + + H_pad, W_pad = query.shape[1], query.shape[2] + + # cyclic shift + if shift_size > 0: + query = torch.roll( + query, shifts=(-shift_size, -shift_size), dims=(1, 2)) + + attn_mask = self.get_attn_mask((H_pad, W_pad), + window_size=window_size, + shift_size=shift_size, + device=query.device) + + # nW*B, window_size, window_size, C + query_windows = self.window_partition(query, window_size) + # nW*B, window_size*window_size, C + query_windows = query_windows.view(-1, window_size**2, C) + + # W-MSA/SW-MSA (nW*B, window_size*window_size, C) + attn_windows = self.w_msa(query_windows, mask=attn_mask) + + # merge windows + attn_windows = attn_windows.view(-1, window_size, window_size, C) + + # B H' W' C + shifted_x = self.window_reverse(attn_windows, H_pad, W_pad, + window_size) + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll( + shifted_x, shifts=(shift_size, shift_size), dims=(1, 2)) + else: + x = shifted_x + + if H != H_pad or W != W_pad: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + x = self.drop(x) + + return x + + @staticmethod + def window_reverse(windows, H, W, window_size): + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, + window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + @staticmethod + def window_partition(x, window_size): + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, + window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() + windows = windows.view(-1, window_size, window_size, C) + return windows + + @staticmethod + def get_attn_mask(hw_shape, window_size, shift_size, device=None): + if shift_size > 0: + img_mask = torch.zeros(1, *hw_shape, 1, device=device) + h_slices = (slice(0, -window_size), slice(-window_size, + -shift_size), + slice(-shift_size, None)) + w_slices = (slice(0, -window_size), slice(-window_size, + -shift_size), + slice(-shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + # nW, window_size, window_size, 1 + mask_windows = ShiftWindowMSA.window_partition( + img_mask, window_size) + mask_windows = mask_windows.view(-1, window_size * window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0) + attn_mask = attn_mask.masked_fill(attn_mask == 0, 0.0) + else: + attn_mask = None + return attn_mask + + +class MultiheadAttention(BaseModule): + """Multi-head Attention Module. + + This module implements multi-head attention that supports different input + dims and embed dims. And it also supports a shortcut from ``value``, which + is useful if input dims is not the same with embed dims. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + input_dims (int, optional): The input dimension, and if None, + use ``embed_dims``. Defaults to None. + attn_drop (float): Dropout rate of the dropout layer after the + attention calculation of query and key. Defaults to 0. + proj_drop (float): Dropout rate of the dropout layer after the + output projection. Defaults to 0. + dropout_layer (dict): The dropout config before adding the shortcut. + Defaults to ``dict(type='Dropout', drop_prob=0.)``. + qkv_bias (bool): If True, add a learnable bias to q, k, v. + Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + proj_bias (bool) If True, add a learnable bias to output projection. + Defaults to True. + v_shortcut (bool): Add a shortcut from value to output. It's usually + used if ``input_dims`` is different from ``embed_dims``. + Defaults to False. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + input_dims=None, + attn_drop=0., + proj_drop=0., + dropout_layer=dict(type='Dropout', drop_prob=0.), + qkv_bias=True, + qk_scale=None, + proj_bias=True, + v_shortcut=False, + init_cfg=None): + super(MultiheadAttention, self).__init__(init_cfg=init_cfg) + + self.input_dims = input_dims or embed_dims + self.embed_dims = embed_dims + self.num_heads = num_heads + self.v_shortcut = v_shortcut + + self.head_dims = embed_dims // num_heads + self.scale = qk_scale or self.head_dims**-0.5 + + self.qkv = nn.Linear(self.input_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(embed_dims, embed_dims, bias=proj_bias) + self.proj_drop = nn.Dropout(proj_drop) + + self.out_drop = DROPOUT_LAYERS.build(dropout_layer) + + def forward(self, x): + B, N, _ = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + self.head_dims).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, self.embed_dims) + x = self.proj(x) + x = self.out_drop(self.proj_drop(x)) + + if self.v_shortcut: + x = v.squeeze(1) + x + return x diff --git a/mmcls/models/utils/augment/__init__.py b/mmcls/models/utils/augment/__init__.py new file mode 100644 index 0000000..9f92cd5 --- /dev/null +++ b/mmcls/models/utils/augment/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .augments import Augments +from .cutmix import BatchCutMixLayer +from .identity import Identity +from .mixup import BatchMixupLayer +from .resizemix import BatchResizeMixLayer + +__all__ = ('Augments', 'BatchCutMixLayer', 'Identity', 'BatchMixupLayer', + 'BatchResizeMixLayer') diff --git a/mmcls/models/utils/augment/augments.py b/mmcls/models/utils/augment/augments.py new file mode 100644 index 0000000..8455e93 --- /dev/null +++ b/mmcls/models/utils/augment/augments.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random + +import numpy as np + +from .builder import build_augment + + +class Augments(object): + """Data augments. + + We implement some data augmentation methods, such as mixup, cutmix. + + Args: + augments_cfg (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict`): + Config dict of augments + + Example: + >>> augments_cfg = [ + dict(type='BatchCutMix', alpha=1., num_classes=10, prob=0.5), + dict(type='BatchMixup', alpha=1., num_classes=10, prob=0.3) + ] + >>> augments = Augments(augments_cfg) + >>> imgs = torch.randn(16, 3, 32, 32) + >>> label = torch.randint(0, 10, (16, )) + >>> imgs, label = augments(imgs, label) + + To decide which augmentation within Augments block is used + the following rule is applied. + We pick augmentation based on the probabilities. In the example above, + we decide if we should use BatchCutMix with probability 0.5, + BatchMixup 0.3. As Identity is not in augments_cfg, we use Identity with + probability 1 - 0.5 - 0.3 = 0.2. + """ + + def __init__(self, augments_cfg): + super(Augments, self).__init__() + + if isinstance(augments_cfg, dict): + augments_cfg = [augments_cfg] + + assert len(augments_cfg) > 0, \ + 'The length of augments_cfg should be positive.' + self.augments = [build_augment(cfg) for cfg in augments_cfg] + self.augment_probs = [aug.prob for aug in self.augments] + + has_identity = any([cfg['type'] == 'Identity' for cfg in augments_cfg]) + if has_identity: + assert sum(self.augment_probs) == 1.0,\ + 'The sum of augmentation probabilities should equal to 1,' \ + ' but got {:.2f}'.format(sum(self.augment_probs)) + else: + assert sum(self.augment_probs) <= 1.0,\ + 'The sum of augmentation probabilities should less than or ' \ + 'equal to 1, but got {:.2f}'.format(sum(self.augment_probs)) + identity_prob = 1 - sum(self.augment_probs) + if identity_prob > 0: + num_classes = self.augments[0].num_classes + self.augments += [ + build_augment( + dict( + type='Identity', + num_classes=num_classes, + prob=identity_prob)) + ] + self.augment_probs += [identity_prob] + + def __call__(self, img, gt_label): + if self.augments: + random_state = np.random.RandomState(random.randint(0, 2**32 - 1)) + aug = random_state.choice(self.augments, p=self.augment_probs) + return aug(img, gt_label) + return img, gt_label diff --git a/mmcls/models/utils/augment/builder.py b/mmcls/models/utils/augment/builder.py new file mode 100644 index 0000000..5d1205e --- /dev/null +++ b/mmcls/models/utils/augment/builder.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.utils import Registry, build_from_cfg + +AUGMENT = Registry('augment') + + +def build_augment(cfg, default_args=None): + return build_from_cfg(cfg, AUGMENT, default_args) diff --git a/mmcls/models/utils/augment/cutmix.py b/mmcls/models/utils/augment/cutmix.py new file mode 100644 index 0000000..0d8ba9d --- /dev/null +++ b/mmcls/models/utils/augment/cutmix.py @@ -0,0 +1,175 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import numpy as np +import torch + +from .builder import AUGMENT +from .utils import one_hot_encoding + + +class BaseCutMixLayer(object, metaclass=ABCMeta): + """Base class for CutMixLayer. + + Args: + alpha (float): Parameters for Beta distribution. Positive(>0) + num_classes (int): The number of classes + prob (float): MixUp probability. It should be in range [0, 1]. + Default to 1.0 + cutmix_minmax (List[float], optional): cutmix min/max image ratio. + (as percent of image size). When cutmix_minmax is not None, we + generate cutmix bounding-box using cutmix_minmax instead of alpha + correct_lam (bool): Whether to apply lambda correction when cutmix bbox + clipped by image borders. Default to True + """ + + def __init__(self, + alpha, + num_classes, + prob=1.0, + cutmix_minmax=None, + correct_lam=True): + super(BaseCutMixLayer, self).__init__() + + assert isinstance(alpha, float) and alpha > 0 + assert isinstance(num_classes, int) + assert isinstance(prob, float) and 0.0 <= prob <= 1.0 + + self.alpha = alpha + self.num_classes = num_classes + self.prob = prob + self.cutmix_minmax = cutmix_minmax + self.correct_lam = correct_lam + + def rand_bbox_minmax(self, img_shape, count=None): + """Min-Max CutMix bounding-box Inspired by Darknet cutmix + implementation. It generates a random rectangular bbox based on min/max + percent values applied to each dimension of the input image. + + Typical defaults for minmax are usually in the .2-.3 for min and + .8-.9 range for max. + + Args: + img_shape (tuple): Image shape as tuple + count (int, optional): Number of bbox to generate. Default to None + """ + assert len(self.cutmix_minmax) == 2 + img_h, img_w = img_shape[-2:] + cut_h = np.random.randint( + int(img_h * self.cutmix_minmax[0]), + int(img_h * self.cutmix_minmax[1]), + size=count) + cut_w = np.random.randint( + int(img_w * self.cutmix_minmax[0]), + int(img_w * self.cutmix_minmax[1]), + size=count) + yl = np.random.randint(0, img_h - cut_h, size=count) + xl = np.random.randint(0, img_w - cut_w, size=count) + yu = yl + cut_h + xu = xl + cut_w + return yl, yu, xl, xu + + def rand_bbox(self, img_shape, lam, margin=0., count=None): + """Standard CutMix bounding-box that generates a random square bbox + based on lambda value. This implementation includes support for + enforcing a border margin as percent of bbox dimensions. + + Args: + img_shape (tuple): Image shape as tuple + lam (float): Cutmix lambda value + margin (float): Percentage of bbox dimension to enforce as margin + (reduce amount of box outside image). Default to 0. + count (int, optional): Number of bbox to generate. Default to None + """ + ratio = np.sqrt(1 - lam) + img_h, img_w = img_shape[-2:] + cut_h, cut_w = int(img_h * ratio), int(img_w * ratio) + margin_y, margin_x = int(margin * cut_h), int(margin * cut_w) + cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count) + cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count) + yl = np.clip(cy - cut_h // 2, 0, img_h) + yh = np.clip(cy + cut_h // 2, 0, img_h) + xl = np.clip(cx - cut_w // 2, 0, img_w) + xh = np.clip(cx + cut_w // 2, 0, img_w) + return yl, yh, xl, xh + + def cutmix_bbox_and_lam(self, img_shape, lam, count=None): + """Generate bbox and apply lambda correction. + + Args: + img_shape (tuple): Image shape as tuple + lam (float): Cutmix lambda value + count (int, optional): Number of bbox to generate. Default to None + """ + if self.cutmix_minmax is not None: + yl, yu, xl, xu = self.rand_bbox_minmax(img_shape, count=count) + else: + yl, yu, xl, xu = self.rand_bbox(img_shape, lam, count=count) + if self.correct_lam or self.cutmix_minmax is not None: + bbox_area = (yu - yl) * (xu - xl) + lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1]) + return (yl, yu, xl, xu), lam + + @abstractmethod + def cutmix(self, imgs, gt_label): + pass + + +@AUGMENT.register_module(name='BatchCutMix') +class BatchCutMixLayer(BaseCutMixLayer): + r"""CutMix layer for a batch of data. + + CutMix is a method to improve the network's generalization capability. It's + proposed in `CutMix: Regularization Strategy to Train Strong Classifiers + with Localizable Features ` + + With this method, patches are cut and pasted among training images where + the ground truth labels are also mixed proportionally to the area of the + patches. + + Args: + alpha (float): Parameters for Beta distribution to generate the + mixing ratio. It should be a positive number. More details + can be found in :class:`BatchMixupLayer`. + num_classes (int): The number of classes + prob (float): The probability to execute cutmix. It should be in + range [0, 1]. Defaults to 1.0. + cutmix_minmax (List[float], optional): The min/max area ratio of the + patches. If not None, the bounding-box of patches is uniform + sampled within this ratio range, and the ``alpha`` will be ignored. + Otherwise, the bounding-box is generated according to the + ``alpha``. Defaults to None. + correct_lam (bool): Whether to apply lambda correction when cutmix bbox + clipped by image borders. Defaults to True. + + Note: + If the ``cutmix_minmax`` is None, how to generate the bounding-box of + patches according to the ``alpha``? + + First, generate a :math:`\lambda`, details can be found in + :class:`BatchMixupLayer`. And then, the area ratio of the bounding-box + is calculated by: + + .. math:: + \text{ratio} = \sqrt{1-\lambda} + """ + + def __init__(self, *args, **kwargs): + super(BatchCutMixLayer, self).__init__(*args, **kwargs) + + def cutmix(self, img, gt_label): + one_hot_gt_label = one_hot_encoding(gt_label, self.num_classes) + lam = np.random.beta(self.alpha, self.alpha) + batch_size = img.size(0) + index = torch.randperm(batch_size) + + (bby1, bby2, bbx1, + bbx2), lam = self.cutmix_bbox_and_lam(img.shape, lam) + img[:, :, bby1:bby2, bbx1:bbx2] = \ + img[index, :, bby1:bby2, bbx1:bbx2] + mixed_gt_label = lam * one_hot_gt_label + ( + 1 - lam) * one_hot_gt_label[index, :] + return img, mixed_gt_label + + def __call__(self, img, gt_label): + return self.cutmix(img, gt_label) diff --git a/mmcls/models/utils/augment/identity.py b/mmcls/models/utils/augment/identity.py new file mode 100644 index 0000000..ae3a3df --- /dev/null +++ b/mmcls/models/utils/augment/identity.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import AUGMENT +from .utils import one_hot_encoding + + +@AUGMENT.register_module(name='Identity') +class Identity(object): + """Change gt_label to one_hot encoding and keep img as the same. + + Args: + num_classes (int): The number of classes. + prob (float): MixUp probability. It should be in range [0, 1]. + Default to 1.0 + """ + + def __init__(self, num_classes, prob=1.0): + super(Identity, self).__init__() + + assert isinstance(num_classes, int) + assert isinstance(prob, float) and 0.0 <= prob <= 1.0 + + self.num_classes = num_classes + self.prob = prob + + def one_hot(self, gt_label): + return one_hot_encoding(gt_label, self.num_classes) + + def __call__(self, img, gt_label): + return img, self.one_hot(gt_label) diff --git a/mmcls/models/utils/augment/mixup.py b/mmcls/models/utils/augment/mixup.py new file mode 100644 index 0000000..e8899dd --- /dev/null +++ b/mmcls/models/utils/augment/mixup.py @@ -0,0 +1,80 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import numpy as np +import torch + +from .builder import AUGMENT +from .utils import one_hot_encoding + + +class BaseMixupLayer(object, metaclass=ABCMeta): + """Base class for MixupLayer. + + Args: + alpha (float): Parameters for Beta distribution to generate the + mixing ratio. It should be a positive number. + num_classes (int): The number of classes. + prob (float): MixUp probability. It should be in range [0, 1]. + Default to 1.0 + """ + + def __init__(self, alpha, num_classes, prob=1.0): + super(BaseMixupLayer, self).__init__() + + assert isinstance(alpha, float) and alpha > 0 + assert isinstance(num_classes, int) + assert isinstance(prob, float) and 0.0 <= prob <= 1.0 + + self.alpha = alpha + self.num_classes = num_classes + self.prob = prob + + @abstractmethod + def mixup(self, imgs, gt_label): + pass + + +@AUGMENT.register_module(name='BatchMixup') +class BatchMixupLayer(BaseMixupLayer): + r"""Mixup layer for a batch of data. + + Mixup is a method to reduces the memorization of corrupt labels and + increases the robustness to adversarial examples. It's + proposed in `mixup: Beyond Empirical Risk Minimization + ` + + This method simply linearly mix pairs of data and their labels. + + Args: + alpha (float): Parameters for Beta distribution to generate the + mixing ratio. It should be a positive number. More details + are in the note. + num_classes (int): The number of classes. + prob (float): The probability to execute mixup. It should be in + range [0, 1]. Default sto 1.0. + + Note: + The :math:`\alpha` (``alpha``) determines a random distribution + :math:`Beta(\alpha, \alpha)`. For each batch of data, we sample + a mixing ratio (marked as :math:`\lambda`, ``lam``) from the random + distribution. + """ + + def __init__(self, *args, **kwargs): + super(BatchMixupLayer, self).__init__(*args, **kwargs) + + def mixup(self, img, gt_label): + one_hot_gt_label = one_hot_encoding(gt_label, self.num_classes) + lam = np.random.beta(self.alpha, self.alpha) + batch_size = img.size(0) + index = torch.randperm(batch_size) + + mixed_img = lam * img + (1 - lam) * img[index, :] + mixed_gt_label = lam * one_hot_gt_label + ( + 1 - lam) * one_hot_gt_label[index, :] + + return mixed_img, mixed_gt_label + + def __call__(self, img, gt_label): + return self.mixup(img, gt_label) diff --git a/mmcls/models/utils/augment/resizemix.py b/mmcls/models/utils/augment/resizemix.py new file mode 100644 index 0000000..1506cc3 --- /dev/null +++ b/mmcls/models/utils/augment/resizemix.py @@ -0,0 +1,93 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +import torch.nn.functional as F + +from mmcls.models.utils.augment.builder import AUGMENT +from .cutmix import BatchCutMixLayer +from .utils import one_hot_encoding + + +@AUGMENT.register_module(name='BatchResizeMix') +class BatchResizeMixLayer(BatchCutMixLayer): + r"""ResizeMix Random Paste layer for a batch of data. + + The ResizeMix will resize an image to a small patch and paste it on another + image. It's proposed in `ResizeMix: Mixing Data with Preserved Object + Information and True Labels `_ + + Args: + alpha (float): Parameters for Beta distribution to generate the + mixing ratio. It should be a positive number. More details + can be found in :class:`BatchMixupLayer`. + num_classes (int): The number of classes. + lam_min(float): The minimum value of lam. Defaults to 0.1. + lam_max(float): The maximum value of lam. Defaults to 0.8. + interpolation (str): algorithm used for upsampling: + 'nearest' | 'linear' | 'bilinear' | 'bicubic' | 'trilinear' | + 'area'. Default to 'bilinear'. + prob (float): The probability to execute resizemix. It should be in + range [0, 1]. Defaults to 1.0. + cutmix_minmax (List[float], optional): The min/max area ratio of the + patches. If not None, the bounding-box of patches is uniform + sampled within this ratio range, and the ``alpha`` will be ignored. + Otherwise, the bounding-box is generated according to the + ``alpha``. Defaults to None. + correct_lam (bool): Whether to apply lambda correction when cutmix bbox + clipped by image borders. Defaults to True + **kwargs: Any other parameters accpeted by :class:`BatchCutMixLayer`. + + Note: + The :math:`\lambda` (``lam``) is the mixing ratio. It's a random + variable which follows :math:`Beta(\alpha, \alpha)` and is mapped + to the range [``lam_min``, ``lam_max``]. + + .. math:: + \lambda = \frac{Beta(\alpha, \alpha)} + {\lambda_{max} - \lambda_{min}} + \lambda_{min} + + And the resize ratio of source images is calculated by :math:`\lambda`: + + .. math:: + \text{ratio} = \sqrt{1-\lambda} + """ + + def __init__(self, + alpha, + num_classes, + lam_min: float = 0.1, + lam_max: float = 0.8, + interpolation='bilinear', + prob=1.0, + cutmix_minmax=None, + correct_lam=True, + **kwargs): + super(BatchResizeMixLayer, self).__init__( + alpha=alpha, + num_classes=num_classes, + prob=prob, + cutmix_minmax=cutmix_minmax, + correct_lam=correct_lam, + **kwargs) + self.lam_min = lam_min + self.lam_max = lam_max + self.interpolation = interpolation + + def cutmix(self, img, gt_label): + one_hot_gt_label = one_hot_encoding(gt_label, self.num_classes) + + lam = np.random.beta(self.alpha, self.alpha) + lam = lam * (self.lam_max - self.lam_min) + self.lam_min + batch_size = img.size(0) + index = torch.randperm(batch_size) + + (bby1, bby2, bbx1, + bbx2), lam = self.cutmix_bbox_and_lam(img.shape, lam) + + img[:, :, bby1:bby2, bbx1:bbx2] = F.interpolate( + img[index], + size=(bby2 - bby1, bbx2 - bbx1), + mode=self.interpolation) + mixed_gt_label = lam * one_hot_gt_label + ( + 1 - lam) * one_hot_gt_label[index, :] + return img, mixed_gt_label diff --git a/mmcls/models/utils/augment/utils.py b/mmcls/models/utils/augment/utils.py new file mode 100644 index 0000000..e972d54 --- /dev/null +++ b/mmcls/models/utils/augment/utils.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn.functional as F + + +def one_hot_encoding(gt, num_classes): + """Change gt_label to one_hot encoding. + + If the shape has 2 or more + dimensions, return it without encoding. + Args: + gt (Tensor): The gt label with shape (N,) or shape (N, */). + num_classes (int): The number of classes. + Return: + Tensor: One hot gt label. + """ + if gt.ndim == 1: + # multi-class classification + return F.one_hot(gt, num_classes=num_classes) + else: + # binary classification + # example. [[0], [1], [1]] + # multi-label classification + # example. [[0, 1, 1], [1, 0, 0], [1, 1, 1]] + return gt diff --git a/mmcls/models/utils/channel_shuffle.py b/mmcls/models/utils/channel_shuffle.py new file mode 100644 index 0000000..27006a8 --- /dev/null +++ b/mmcls/models/utils/channel_shuffle.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def channel_shuffle(x, groups): + """Channel Shuffle operation. + + This function enables cross-group information flow for multiple groups + convolution layers. + + Args: + x (Tensor): The input tensor. + groups (int): The number of groups to divide the input tensor + in the channel dimension. + + Returns: + Tensor: The output tensor after channel shuffle operation. + """ + + batch_size, num_channels, height, width = x.size() + assert (num_channels % groups == 0), ('num_channels should be ' + 'divisible by groups') + channels_per_group = num_channels // groups + + x = x.view(batch_size, groups, channels_per_group, height, width) + x = torch.transpose(x, 1, 2).contiguous() + x = x.view(batch_size, -1, height, width) + + return x diff --git a/mmcls/models/utils/embed.py b/mmcls/models/utils/embed.py new file mode 100644 index 0000000..ff65fc4 --- /dev/null +++ b/mmcls/models/utils/embed.py @@ -0,0 +1,420 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.cnn.bricks.transformer import AdaptivePadding +from mmcv.runner.base_module import BaseModule + +from .helpers import to_2tuple + + +def resize_pos_embed(pos_embed, + src_shape, + dst_shape, + mode='bicubic', + num_extra_tokens=1): + """Resize pos_embed weights. + + Args: + pos_embed (torch.Tensor): Position embedding weights with shape + [1, L, C]. + src_shape (tuple): The resolution of downsampled origin training + image, in format (H, W). + dst_shape (tuple): The resolution of downsampled new training + image, in format (H, W). + mode (str): Algorithm used for upsampling. Choose one from 'nearest', + 'linear', 'bilinear', 'bicubic' and 'trilinear'. + Defaults to 'bicubic'. + num_extra_tokens (int): The number of extra tokens, such as cls_token. + Defaults to 1. + + Returns: + torch.Tensor: The resized pos_embed of shape [1, L_new, C] + """ + if src_shape[0] == dst_shape[0] and src_shape[1] == dst_shape[1]: + return pos_embed + assert pos_embed.ndim == 3, 'shape of pos_embed must be [1, L, C]' + _, L, C = pos_embed.shape + src_h, src_w = src_shape + assert L == src_h * src_w + num_extra_tokens, \ + f"The length of `pos_embed` ({L}) doesn't match the expected " \ + f'shape ({src_h}*{src_w}+{num_extra_tokens}). Please check the' \ + '`img_size` argument.' + extra_tokens = pos_embed[:, :num_extra_tokens] + + src_weight = pos_embed[:, num_extra_tokens:] + src_weight = src_weight.reshape(1, src_h, src_w, C).permute(0, 3, 1, 2) + + dst_weight = F.interpolate( + src_weight, size=dst_shape, align_corners=False, mode=mode) + dst_weight = torch.flatten(dst_weight, 2).transpose(1, 2) + + return torch.cat((extra_tokens, dst_weight), dim=1) + + +def resize_relative_position_bias_table(src_shape, dst_shape, table, num_head): + """Resize relative position bias table. + + Args: + src_shape (int): The resolution of downsampled origin training + image, in format (H, W). + dst_shape (int): The resolution of downsampled new training + image, in format (H, W). + table (tensor): The relative position bias of the pretrained model. + num_head (int): Number of attention heads. + + Returns: + torch.Tensor: The resized relative position bias table. + """ + from scipy import interpolate + + def geometric_progression(a, r, n): + return a * (1.0 - r**n) / (1.0 - r) + + left, right = 1.01, 1.5 + while right - left > 1e-6: + q = (left + right) / 2.0 + gp = geometric_progression(1, q, src_shape // 2) + if gp > dst_shape // 2: + right = q + else: + left = q + + dis = [] + cur = 1 + for i in range(src_shape // 2): + dis.append(cur) + cur += q**(i + 1) + + r_ids = [-_ for _ in reversed(dis)] + + x = r_ids + [0] + dis + y = r_ids + [0] + dis + + t = dst_shape // 2.0 + dx = np.arange(-t, t + 0.1, 1.0) + dy = np.arange(-t, t + 0.1, 1.0) + + all_rel_pos_bias = [] + + for i in range(num_head): + z = table[:, i].view(src_shape, src_shape).float().numpy() + f_cubic = interpolate.interp2d(x, y, z, kind='cubic') + all_rel_pos_bias.append( + torch.Tensor(f_cubic(dx, + dy)).contiguous().view(-1, + 1).to(table.device)) + new_rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) + return new_rel_pos_bias + + +class PatchEmbed(BaseModule): + """Image to Patch Embedding. + + We use a conv layer to implement PatchEmbed. + + Args: + img_size (int | tuple): The size of input image. Default: 224 + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None + conv_cfg (dict, optional): The config dict for conv layers. + Default: None + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None + """ + + def __init__(self, + img_size=224, + in_channels=3, + embed_dims=768, + norm_cfg=None, + conv_cfg=None, + init_cfg=None): + super(PatchEmbed, self).__init__(init_cfg) + warnings.warn('The `PatchEmbed` in mmcls will be deprecated. ' + 'Please use `mmcv.cnn.bricks.transformer.PatchEmbed`. ' + "It's more general and supports dynamic input shape") + + if isinstance(img_size, int): + img_size = to_2tuple(img_size) + elif isinstance(img_size, tuple): + if len(img_size) == 1: + img_size = to_2tuple(img_size[0]) + assert len(img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(img_size)}' + + self.img_size = img_size + self.embed_dims = embed_dims + + # Use conv layer to embed + conv_cfg = conv_cfg or dict() + _conv_cfg = dict( + type='Conv2d', kernel_size=16, stride=16, padding=0, dilation=1) + _conv_cfg.update(conv_cfg) + self.projection = build_conv_layer(_conv_cfg, in_channels, embed_dims) + + # Calculate how many patches a input image is splited to. + h_out, w_out = [(self.img_size[i] + 2 * self.projection.padding[i] - + self.projection.dilation[i] * + (self.projection.kernel_size[i] - 1) - 1) // + self.projection.stride[i] + 1 for i in range(2)] + + self.patches_resolution = (h_out, w_out) + self.num_patches = h_out * w_out + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't " \ + f'match model ({self.img_size[0]}*{self.img_size[1]}).' + # The output size is (B, N, D), where N=H*W/P/P, D is embid_dim + x = self.projection(x).flatten(2).transpose(1, 2) + + if self.norm is not None: + x = self.norm(x) + + return x + + +# Modified from pytorch-image-models +class HybridEmbed(BaseModule): + """CNN Feature Map Embedding. + + Extract feature map from CNN, flatten, + project to embedding dim. + + Args: + backbone (nn.Module): CNN backbone + img_size (int | tuple): The size of input image. Default: 224 + feature_size (int | tuple, optional): Size of feature map extracted by + CNN backbone. Default: None + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_cfg (dict, optional): The config dict for conv layers. + Default: None. + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, + backbone, + img_size=224, + feature_size=None, + in_channels=3, + embed_dims=768, + conv_cfg=None, + init_cfg=None): + super(HybridEmbed, self).__init__(init_cfg) + assert isinstance(backbone, nn.Module) + if isinstance(img_size, int): + img_size = to_2tuple(img_size) + elif isinstance(img_size, tuple): + if len(img_size) == 1: + img_size = to_2tuple(img_size[0]) + assert len(img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(img_size)}' + + self.img_size = img_size + self.backbone = backbone + if feature_size is None: + with torch.no_grad(): + # FIXME this is hacky, but most reliable way of + # determining the exact dim of the output feature + # map for all networks, the feature metadata has + # reliable channel and stride info, but using + # stride to calc feature dim requires info about padding of + # each stage that isn't captured. + training = backbone.training + if training: + backbone.eval() + o = self.backbone( + torch.zeros(1, in_channels, img_size[0], img_size[1])) + if isinstance(o, (list, tuple)): + # last feature if backbone outputs list/tuple of features + o = o[-1] + feature_size = o.shape[-2:] + feature_dim = o.shape[1] + backbone.train(training) + else: + feature_size = to_2tuple(feature_size) + if hasattr(self.backbone, 'feature_info'): + feature_dim = self.backbone.feature_info.channels()[-1] + else: + feature_dim = self.backbone.num_features + self.num_patches = feature_size[0] * feature_size[1] + + # Use conv layer to embed + conv_cfg = conv_cfg or dict() + _conv_cfg = dict( + type='Conv2d', kernel_size=1, stride=1, padding=0, dilation=1) + _conv_cfg.update(conv_cfg) + self.projection = build_conv_layer(_conv_cfg, feature_dim, embed_dims) + + def forward(self, x): + x = self.backbone(x) + if isinstance(x, (list, tuple)): + # last feature if backbone outputs list/tuple of features + x = x[-1] + x = self.projection(x).flatten(2).transpose(1, 2) + return x + + +class PatchMerging(BaseModule): + """Merge patch feature map. Modified from mmcv, which uses pre-norm layer + whereas Swin V2 uses post-norm here. Therefore, add extra parameter to + decide whether use post-norm or not. + + This layer groups feature map by kernel_size, and applies norm and linear + layers to the grouped feature map ((used in Swin Transformer)). + Our implementation uses `nn.Unfold` to + merge patches, which is about 25% faster than the original + implementation. However, we need to modify pretrained + models for compatibility. + + Args: + in_channels (int): The num of input channels. + to gets fully covered by filter and stride you specified. + out_channels (int): The num of output channels. + kernel_size (int | tuple, optional): the kernel size in the unfold + layer. Defaults to 2. + stride (int | tuple, optional): the stride of the sliding blocks in the + unfold layer. Defaults to None. (Would be set as `kernel_size`) + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Defaults to "corner". + dilation (int | tuple, optional): dilation parameter in the unfold + layer. Default: 1. + bias (bool, optional): Whether to add bias in linear layer or not. + Defaults to False. + norm_cfg (dict, optional): Config dict for normalization layer. + Defaults to dict(type='LN'). + is_post_norm (bool): Whether to use post normalization here. + Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=2, + stride=None, + padding='corner', + dilation=1, + bias=False, + norm_cfg=dict(type='LN'), + is_post_norm=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.is_post_norm = is_post_norm + + if stride: + stride = stride + else: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adaptive_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of unfold + padding = 0 + else: + self.adaptive_padding = None + + padding = to_2tuple(padding) + self.sampler = nn.Unfold( + kernel_size=kernel_size, + dilation=dilation, + padding=padding, + stride=stride) + + sample_dim = kernel_size[0] * kernel_size[1] * in_channels + + self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) + + if norm_cfg is not None: + # build pre or post norm layer based on different channels + if self.is_post_norm: + self.norm = build_norm_layer(norm_cfg, out_channels)[1] + else: + self.norm = build_norm_layer(norm_cfg, sample_dim)[1] + else: + self.norm = None + + def forward(self, x, input_size): + """ + Args: + x (Tensor): Has shape (B, H*W, C_in). + input_size (tuple[int]): The spatial shape of x, arrange as (H, W). + Default: None. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) + - out_size (tuple[int]): Spatial shape of x, arrange as + (Merged_H, Merged_W). + """ + B, L, C = x.shape + assert isinstance(input_size, Sequence), f'Expect ' \ + f'input_size is ' \ + f'`Sequence` ' \ + f'but get {input_size}' + + H, W = input_size + assert L == H * W, 'input feature has wrong size' + + x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W + + if self.adaptive_padding: + x = self.adaptive_padding(x) + H, W = x.shape[-2:] + + # Use nn.Unfold to merge patch. About 25% faster than original method, + # but need to modify pretrained model for compatibility + # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) + x = self.sampler(x) + + out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * + (self.sampler.kernel_size[0] - 1) - + 1) // self.sampler.stride[0] + 1 + out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * + (self.sampler.kernel_size[1] - 1) - + 1) // self.sampler.stride[1] + 1 + + output_size = (out_h, out_w) + x = x.transpose(1, 2) # B, H/2*W/2, 4*C + + if self.is_post_norm: + # use post-norm here + x = self.reduction(x) + x = self.norm(x) if self.norm else x + else: + x = self.norm(x) if self.norm else x + x = self.reduction(x) + + return x, output_size diff --git a/mmcls/models/utils/helpers.py b/mmcls/models/utils/helpers.py new file mode 100644 index 0000000..bf55424 --- /dev/null +++ b/mmcls/models/utils/helpers.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import collections.abc +import warnings +from itertools import repeat + +import torch +from mmcv.utils import digit_version + + +def is_tracing() -> bool: + """Determine whether the model is called during the tracing of code with + ``torch.jit.trace``.""" + if digit_version(torch.__version__) >= digit_version('1.6.0'): + on_trace = torch.jit.is_tracing() + # In PyTorch 1.6, torch.jit.is_tracing has a bug. + # Refers to https://github.com/pytorch/pytorch/issues/42448 + if isinstance(on_trace, bool): + return on_trace + else: + return torch._C._is_tracing() + else: + warnings.warn( + 'torch.jit.is_tracing is only supported after v1.6.0. ' + 'Therefore is_tracing returns False automatically. Please ' + 'set on_trace manually if you are using trace.', UserWarning) + return False + + +# From PyTorch internals +def _ntuple(n): + """A `to_tuple` function generator. + + It returns a function, this function will repeat the input to a tuple of + length ``n`` if the input is not an Iterable object, otherwise, return the + input directly. + + Args: + n (int): The number of the target length. + """ + + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple diff --git a/mmcls/models/utils/inverted_residual.py b/mmcls/models/utils/inverted_residual.py new file mode 100644 index 0000000..7c43294 --- /dev/null +++ b/mmcls/models/utils/inverted_residual.py @@ -0,0 +1,125 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule +from mmcv.cnn.bricks import DropPath +from mmcv.runner import BaseModule + +from .se_layer import SELayer + + +class InvertedResidual(BaseModule): + """Inverted Residual Block. + + Args: + in_channels (int): The input channels of this module. + out_channels (int): The output channels of this module. + mid_channels (int): The input channels of the depthwise convolution. + kernel_size (int): The kernel size of the depthwise convolution. + Defaults to 3. + stride (int): The stride of the depthwise convolution. Defaults to 1. + se_cfg (dict, optional): Config dict for se layer. Defaults to None, + which means no se layer. + conv_cfg (dict): Config dict for convolution layer. Defaults to None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='BN')``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='ReLU')``. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict | list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels, + kernel_size=3, + stride=1, + se_cfg=None, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + drop_path_rate=0., + with_cp=False, + init_cfg=None): + super(InvertedResidual, self).__init__(init_cfg) + self.with_res_shortcut = (stride == 1 and in_channels == out_channels) + assert stride in [1, 2] + self.with_cp = with_cp + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.with_se = se_cfg is not None + self.with_expand_conv = (mid_channels != in_channels) + + if self.with_se: + assert isinstance(se_cfg, dict) + + if self.with_expand_conv: + self.expand_conv = ConvModule( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.depthwise_conv = ConvModule( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + padding=kernel_size // 2, + groups=mid_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + if self.with_se: + self.se = SELayer(**se_cfg) + self.linear_conv = ConvModule( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x): + """Forward function. + + Args: + x (torch.Tensor): The input tensor. + + Returns: + torch.Tensor: The output tensor. + """ + + def _inner_forward(x): + out = x + + if self.with_expand_conv: + out = self.expand_conv(out) + + out = self.depthwise_conv(out) + + if self.with_se: + out = self.se(out) + + out = self.linear_conv(out) + + if self.with_res_shortcut: + return x + self.drop_path(out) + else: + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out diff --git a/mmcls/models/utils/layer_scale.py b/mmcls/models/utils/layer_scale.py new file mode 100644 index 0000000..fbd89bc --- /dev/null +++ b/mmcls/models/utils/layer_scale.py @@ -0,0 +1,35 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import nn + + +class LayerScale(nn.Module): + """LayerScale layer. + + Args: + dim (int): Dimension of input features. + inplace (bool): inplace: can optionally do the + operation in-place. Default: ``False`` + data_format (str): The input data format, can be 'channels_last' + and 'channels_first', representing (B, C, H, W) and + (B, N, C) format data respectively. + """ + + def __init__(self, + dim: int, + inplace: bool = False, + data_format: str = 'channels_last'): + super().__init__() + assert data_format in ('channels_last', 'channels_first'), \ + "'data_format' could only be channels_last or channels_first." + self.inplace = inplace + self.data_format = data_format + self.weight = nn.Parameter(torch.ones(dim) * 1e-5) + + def forward(self, x): + if self.data_format == 'channels_first': + if self.inplace: + return x.mul_(self.weight.view(-1, 1, 1)) + else: + return x * self.weight.view(-1, 1, 1) + return x.mul_(self.weight) if self.inplace else x * self.weight diff --git a/mmcls/models/utils/make_divisible.py b/mmcls/models/utils/make_divisible.py new file mode 100644 index 0000000..1ec7468 --- /dev/null +++ b/mmcls/models/utils/make_divisible.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +def make_divisible(value, divisor, min_value=None, min_ratio=0.9): + """Make divisible function. + + This function rounds the channel number down to the nearest value that can + be divisible by the divisor. + + Args: + value (int): The original channel number. + divisor (int): The divisor to fully divide the channel number. + min_value (int, optional): The minimum value of the output channel. + Default: None, means that the minimum value equal to the divisor. + min_ratio (float): The minimum ratio of the rounded channel + number to the original channel number. Default: 0.9. + Returns: + int: The modified output channel number + """ + + if min_value is None: + min_value = divisor + new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than (1-min_ratio). + if new_value < min_ratio * value: + new_value += divisor + return new_value diff --git a/mmcls/models/utils/position_encoding.py b/mmcls/models/utils/position_encoding.py new file mode 100644 index 0000000..99f32de --- /dev/null +++ b/mmcls/models/utils/position_encoding.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.runner.base_module import BaseModule + + +class ConditionalPositionEncoding(BaseModule): + """The Conditional Position Encoding (CPE) module. + + The CPE is the implementation of 'Conditional Positional Encodings + for Vision Transformers '_. + + Args: + in_channels (int): Number of input channels. + embed_dims (int): The feature dimension. Default: 768. + stride (int): Stride of conv layer. Default: 1. + """ + + def __init__(self, in_channels, embed_dims=768, stride=1, init_cfg=None): + super(ConditionalPositionEncoding, self).__init__(init_cfg=init_cfg) + self.proj = nn.Conv2d( + in_channels, + embed_dims, + kernel_size=3, + stride=stride, + padding=1, + bias=True, + groups=embed_dims) + self.stride = stride + + def forward(self, x, hw_shape): + B, N, C = x.shape + H, W = hw_shape + feat_token = x + # convert (B, N, C) to (B, C, H, W) + cnn_feat = feat_token.transpose(1, 2).view(B, C, H, W).contiguous() + if self.stride == 1: + x = self.proj(cnn_feat) + cnn_feat + else: + x = self.proj(cnn_feat) + x = x.flatten(2).transpose(1, 2) + return x diff --git a/mmcls/models/utils/se_layer.py b/mmcls/models/utils/se_layer.py new file mode 100644 index 0000000..47a830a --- /dev/null +++ b/mmcls/models/utils/se_layer.py @@ -0,0 +1,80 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule + +from .make_divisible import make_divisible + + +class SELayer(BaseModule): + """Squeeze-and-Excitation Module. + + Args: + channels (int): The input (and output) channels of the SE layer. + squeeze_channels (None or int): The intermediate channel number of + SElayer. Default: None, means the value of ``squeeze_channels`` + is ``make_divisible(channels // ratio, divisor)``. + ratio (int): Squeeze ratio in SELayer, the intermediate channel will + be ``make_divisible(channels // ratio, divisor)``. Only used when + ``squeeze_channels`` is None. Default: 16. + divisor(int): The divisor to true divide the channel number. Only + used when ``squeeze_channels`` is None. Default: 8. + conv_cfg (None or dict): Config dict for convolution layer. Default: + None, which means using conv2d. + return_weight(bool): Whether to return the weight. Default: False. + act_cfg (dict or Sequence[dict]): Config dict for activation layer. + If act_cfg is a dict, two activation layers will be configurated + by this dict. If act_cfg is a sequence of dicts, the first + activation layer will be configurated by the first dict and the + second activation layer will be configurated by the second dict. + Default: (dict(type='ReLU'), dict(type='Sigmoid')) + """ + + def __init__(self, + channels, + squeeze_channels=None, + ratio=16, + divisor=8, + bias='auto', + conv_cfg=None, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), + return_weight=False, + init_cfg=None): + super(SELayer, self).__init__(init_cfg) + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert mmcv.is_tuple_of(act_cfg, dict) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + if squeeze_channels is None: + squeeze_channels = make_divisible(channels // ratio, divisor) + assert isinstance(squeeze_channels, int) and squeeze_channels > 0, \ + '"squeeze_channels" should be a positive integer, but get ' + \ + f'{squeeze_channels} instead.' + self.return_weight = return_weight + self.conv1 = ConvModule( + in_channels=channels, + out_channels=squeeze_channels, + kernel_size=1, + stride=1, + bias=bias, + conv_cfg=conv_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=squeeze_channels, + out_channels=channels, + kernel_size=1, + stride=1, + bias=bias, + conv_cfg=conv_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + out = self.global_avgpool(x) + out = self.conv1(out) + out = self.conv2(out) + if self.return_weight: + return out + else: + return x * out diff --git a/mmcls/utils/__init__.py b/mmcls/utils/__init__.py new file mode 100644 index 0000000..abfea81 --- /dev/null +++ b/mmcls/utils/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .collect_env import collect_env +from .device import auto_select_device +from .distribution import wrap_distributed_model, wrap_non_distributed_model +from .logger import get_root_logger, load_json_log +from .setup_env import setup_multi_processes + +__all__ = [ + 'collect_env', 'get_root_logger', 'load_json_log', 'setup_multi_processes', + 'wrap_non_distributed_model', 'wrap_distributed_model', + 'auto_select_device' +] diff --git a/mmcls/utils/collect_env.py b/mmcls/utils/collect_env.py new file mode 100644 index 0000000..adb5030 --- /dev/null +++ b/mmcls/utils/collect_env.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.utils import collect_env as collect_base_env +from mmcv.utils import get_git_hash + +import mmcls + + +def collect_env(): + """Collect the information of the running environments.""" + env_info = collect_base_env() + env_info['MMClassification'] = mmcls.__version__ + '+' + get_git_hash()[:7] + return env_info + + +if __name__ == '__main__': + for name, val in collect_env().items(): + print(f'{name}: {val}') diff --git a/mmcls/utils/device.py b/mmcls/utils/device.py new file mode 100644 index 0000000..ee4848a --- /dev/null +++ b/mmcls/utils/device.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch +from mmcv.utils import digit_version + + +def auto_select_device() -> str: + mmcv_version = digit_version(mmcv.__version__) + if mmcv_version >= digit_version('1.6.0'): + from mmcv.device import get_device + return get_device() + elif torch.cuda.is_available(): + return 'cuda' + else: + return 'cpu' diff --git a/mmcls/utils/distribution.py b/mmcls/utils/distribution.py new file mode 100644 index 0000000..c6e4c72 --- /dev/null +++ b/mmcls/utils/distribution.py @@ -0,0 +1,80 @@ +# Copyright (c) OpenMMLab. All rights reserved. + + +def wrap_non_distributed_model(model, device='cuda', dim=0, *args, **kwargs): + """Wrap module in non-distributed environment by device type. + + - For CUDA, wrap as :obj:`mmcv.parallel.MMDataParallel`. + - For MPS, wrap as :obj:`mmcv.device.mps.MPSDataParallel`. + - For CPU & IPU, not wrap the model. + + Args: + model(:class:`nn.Module`): model to be parallelized. + device(str): device type, cuda, cpu or mlu. Defaults to cuda. + dim(int): Dimension used to scatter the data. Defaults to 0. + + Returns: + model(nn.Module): the model to be parallelized. + """ + if device == 'npu': + from mmcv.device.npu import NPUDataParallel + model = NPUDataParallel(model.npu(), dim=dim, *args, **kwargs) + elif device == 'mlu': + from mmcv.device.mlu import MLUDataParallel + model = MLUDataParallel(model.mlu(), dim=dim, *args, **kwargs) + elif device == 'cuda': + from mmcv.parallel import MMDataParallel + model = MMDataParallel(model.cuda(), dim=dim, *args, **kwargs) + elif device == 'cpu': + model = model.cpu() + elif device == 'ipu': + model = model.cpu() + elif device == 'mps': + from mmcv.device import mps + model = mps.MPSDataParallel(model.to('mps'), dim=dim, *args, **kwargs) + else: + raise RuntimeError(f'Unavailable device "{device}"') + + return model + + +def wrap_distributed_model(model, device='cuda', *args, **kwargs): + """Build DistributedDataParallel module by device type. + + - For CUDA, wrap as :obj:`mmcv.parallel.MMDistributedDataParallel`. + - Other device types are not supported by now. + + Args: + model(:class:`nn.Module`): module to be parallelized. + device(str): device type, mlu or cuda. + + Returns: + model(:class:`nn.Module`): the module to be parallelized + + References: + .. [1] https://pytorch.org/docs/stable/generated/torch.nn.parallel. + DistributedDataParallel.html + """ + if device == 'npu': + from mmcv.device.npu import NPUDistributedDataParallel + from torch.npu import current_device + model = NPUDistributedDataParallel( + model.npu(), *args, device_ids=[current_device()], **kwargs) + elif device == 'mlu': + import os + + from mmcv.device.mlu import MLUDistributedDataParallel + model = MLUDistributedDataParallel( + model.mlu(), + *args, + device_ids=[int(os.environ['LOCAL_RANK'])], + **kwargs) + elif device == 'cuda': + from mmcv.parallel import MMDistributedDataParallel + from torch.cuda import current_device + model = MMDistributedDataParallel( + model.cuda(), *args, device_ids=[current_device()], **kwargs) + else: + raise RuntimeError(f'Unavailable device "{device}"') + + return model diff --git a/mmcls/utils/logger.py b/mmcls/utils/logger.py new file mode 100644 index 0000000..2d77fcb --- /dev/null +++ b/mmcls/utils/logger.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import logging +from collections import defaultdict + +from mmcv.utils import get_logger + + +def get_root_logger(log_file=None, log_level=logging.INFO): + """Get root logger. + + Args: + log_file (str, optional): File path of log. Defaults to None. + log_level (int, optional): The level of logger. + Defaults to :obj:`logging.INFO`. + + Returns: + :obj:`logging.Logger`: The obtained logger + """ + return get_logger('mmcls', log_file, log_level) + + +def load_json_log(json_log): + """load and convert json_logs to log_dicts. + + Args: + json_log (str): The path of the json log file. + + Returns: + dict[int, dict[str, list]]: + Key is the epoch, value is a sub dict. The keys in each sub dict + are different metrics, e.g. memory, bbox_mAP, and the value is a + list of corresponding values in all iterations in this epoch. + + .. code-block:: python + + # An example output + { + 1: {'iter': [100, 200, 300], 'loss': [6.94, 6.73, 6.53]}, + 2: {'iter': [100, 200, 300], 'loss': [6.33, 6.20, 6.07]}, + ... + } + """ + log_dict = dict() + with open(json_log, 'r') as log_file: + for line in log_file: + log = json.loads(line.strip()) + # skip lines without `epoch` field + if 'epoch' not in log: + continue + epoch = log.pop('epoch') + if epoch not in log_dict: + log_dict[epoch] = defaultdict(list) + for k, v in log.items(): + log_dict[epoch][k].append(v) + return log_dict diff --git a/mmcls/utils/setup_env.py b/mmcls/utils/setup_env.py new file mode 100644 index 0000000..21def2f --- /dev/null +++ b/mmcls/utils/setup_env.py @@ -0,0 +1,47 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import platform +import warnings + +import cv2 +import torch.multiprocessing as mp + + +def setup_multi_processes(cfg): + """Setup multi-processing environment variables.""" + # set multi-process start method as `fork` to speed up the training + if platform.system() != 'Windows': + mp_start_method = cfg.get('mp_start_method', 'fork') + current_method = mp.get_start_method(allow_none=True) + if current_method is not None and current_method != mp_start_method: + warnings.warn( + f'Multi-processing start method `{mp_start_method}` is ' + f'different from the previous setting `{current_method}`.' + f'It will be force set to `{mp_start_method}`. You can change ' + f'this behavior by changing `mp_start_method` in your config.') + mp.set_start_method(mp_start_method, force=True) + + # disable opencv multithreading to avoid system being overloaded + opencv_num_threads = cfg.get('opencv_num_threads', 0) + cv2.setNumThreads(opencv_num_threads) + + # setup OMP threads + # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa + if 'OMP_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1: + omp_num_threads = 1 + warnings.warn( + f'Setting OMP_NUM_THREADS environment variable for each process ' + f'to be {omp_num_threads} in default, to avoid your system being ' + f'overloaded, please further tune the variable for optimal ' + f'performance in your application as needed.') + os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) + + # setup MKL threads + if 'MKL_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1: + mkl_num_threads = 1 + warnings.warn( + f'Setting MKL_NUM_THREADS environment variable for each process ' + f'to be {mkl_num_threads} in default, to avoid your system being ' + f'overloaded, please further tune the variable for optimal ' + f'performance in your application as needed.') + os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) diff --git a/mmcls/version.py b/mmcls/version.py new file mode 100644 index 0000000..91d1cf5 --- /dev/null +++ b/mmcls/version.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved + +__version__ = '0.25.0' + + +def parse_version_info(version_str): + """Parse a version string into a tuple. + + Args: + version_str (str): The version string. + Returns: + tuple[int | str]: The version info, e.g., "1.3.0" is parsed into + (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). + """ + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__) + +__all__ = ['__version__', 'version_info', 'parse_version_info'] diff --git a/model-index.yml b/model-index.yml new file mode 100644 index 0000000..56c7dc9 --- /dev/null +++ b/model-index.yml @@ -0,0 +1,34 @@ +Import: + - configs/mobilenet_v2/metafile.yml + - configs/resnet/metafile.yml + - configs/res2net/metafile.yml + - configs/resnext/metafile.yml + - configs/seresnet/metafile.yml + - configs/shufflenet_v1/metafile.yml + - configs/shufflenet_v2/metafile.yml + - configs/swin_transformer/metafile.yml + - configs/swin_transformer_v2/metafile.yml + - configs/vgg/metafile.yml + - configs/repvgg/metafile.yml + - configs/tnt/metafile.yml + - configs/vision_transformer/metafile.yml + - configs/t2t_vit/metafile.yml + - configs/mlp_mixer/metafile.yml + - configs/conformer/metafile.yml + - configs/regnet/metafile.yml + - configs/deit/metafile.yml + - configs/twins/metafile.yml + - configs/efficientnet/metafile.yml + - configs/convnext/metafile.yml + - configs/hrnet/metafile.yml + - configs/repmlp/metafile.yml + - configs/wrn/metafile.yml + - configs/van/metafile.yml + - configs/cspnet/metafile.yml + - configs/convmixer/metafile.yml + - configs/densenet/metafile.yml + - configs/poolformer/metafile.yml + - configs/csra/metafile.yml + - configs/mvit/metafile.yml + - configs/efficientformer/metafile.yml + - configs/hornet/metafile.yml diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..6da5ade --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +-r requirements/optional.txt +-r requirements/runtime.txt +-r requirements/tests.txt diff --git a/requirements/docs.txt b/requirements/docs.txt new file mode 100644 index 0000000..a2d2c53 --- /dev/null +++ b/requirements/docs.txt @@ -0,0 +1,6 @@ +docutils==0.17.1 +myst-parser +git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme +sphinx==4.5.0 +sphinx-copybutton +sphinx_markdown_tables diff --git a/requirements/mminstall.txt b/requirements/mminstall.txt new file mode 100644 index 0000000..52d725c --- /dev/null +++ b/requirements/mminstall.txt @@ -0,0 +1,2 @@ +mmcv-full>=1.4.2,<1.9.0 +einops>=0.6.0 \ No newline at end of file diff --git a/requirements/optional.txt b/requirements/optional.txt new file mode 100644 index 0000000..cc02280 --- /dev/null +++ b/requirements/optional.txt @@ -0,0 +1,5 @@ +albumentations>=0.3.2 --no-binary qudida,albumentations +colorama +requests +rich +scipy diff --git a/requirements/readthedocs.txt b/requirements/readthedocs.txt new file mode 100644 index 0000000..3b34625 --- /dev/null +++ b/requirements/readthedocs.txt @@ -0,0 +1,3 @@ +mmcv>=1.4.2 +torch +torchvision diff --git a/requirements/runtime.txt b/requirements/runtime.txt new file mode 100644 index 0000000..7810a20 --- /dev/null +++ b/requirements/runtime.txt @@ -0,0 +1,4 @@ +matplotlib>=3.1.0 +numpy +packaging +einops>=0.6.0 \ No newline at end of file diff --git a/requirements/tests.txt b/requirements/tests.txt new file mode 100644 index 0000000..29d351b --- /dev/null +++ b/requirements/tests.txt @@ -0,0 +1,8 @@ +codecov +flake8 +interrogate +isort==4.3.21 +mmdet +pytest +xdoctest >= 0.10.0 +yapf diff --git a/resources/gpvit_release_intro.png b/resources/gpvit_release_intro.png new file mode 100644 index 0000000..3e348f0 Binary files /dev/null and b/resources/gpvit_release_intro.png differ diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..8c24f5f --- /dev/null +++ b/setup.cfg @@ -0,0 +1,23 @@ +[bdist_wheel] +universal=1 + +[aliases] +test=pytest + +[yapf] +based_on_style = pep8 +blank_line_before_nested_class_or_def = true +split_before_expression_after_opening_paren = true + +[isort] +line_length = 79 +multi_line_output = 0 +extra_standard_library = pkg_resources,setuptools +known_first_party = mmcls +no_lines_before = STDLIB,LOCALFOLDER +default_section = THIRDPARTY + +[codespell] +skip = *.ipynb +quiet-level = 3 +ignore-words-list = patten,confectionary,nd,ty,formating,dows diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..e7a9c63 --- /dev/null +++ b/setup.py @@ -0,0 +1,194 @@ +import os +import os.path as osp +import shutil +import sys +import warnings +from setuptools import find_packages, setup + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +def get_version(): + version_file = 'mmcls/version.py' + with open(version_file, 'r', encoding='utf-8') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +def parse_requirements(fname='requirements.txt', with_version=True): + """Parse the package dependencies listed in a requirements file but strips + specific versioning information. + + Args: + fname (str): path to requirements file + with_version (bool, default=False): if True include version specs + + Returns: + List[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + import re + import sys + from os.path import exists + require_fpath = fname + + def parse_line(line): + """Parse information from a line in a requirements text file.""" + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + if '--' in version: + # the `extras_require` doesn't accept options. + version = version.split('--')[0].strip() + info['version'] = (op, version) + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + + +def add_mim_extension(): + """Add extra files that are required to support MIM into the package. + + These files will be added by creating a symlink to the originals if the + package is installed in `editable` mode (e.g. pip install -e .), or by + copying from the originals otherwise. + """ + + # parse installment mode + if 'develop' in sys.argv: + # installed by `pip install -e .` + mode = 'symlink' + elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: + # installed by `pip install .` + # or create source distribution by `python setup.py sdist` + mode = 'copy' + else: + return + + filenames = ['tools', 'configs', 'model-index.yml'] + repo_path = osp.dirname(__file__) + mim_path = osp.join(repo_path, 'mmcls', '.mim') + os.makedirs(mim_path, exist_ok=True) + + for filename in filenames: + if osp.exists(filename): + src_path = osp.join(repo_path, filename) + tar_path = osp.join(mim_path, filename) + + if osp.isfile(tar_path) or osp.islink(tar_path): + os.remove(tar_path) + elif osp.isdir(tar_path): + shutil.rmtree(tar_path) + + if mode == 'symlink': + src_relpath = osp.relpath(src_path, osp.dirname(tar_path)) + try: + os.symlink(src_relpath, tar_path) + except OSError: + # Creating a symbolic link on windows may raise an + # `OSError: [WinError 1314]` due to privilege. If + # the error happens, the src file will be copied + mode = 'copy' + warnings.warn( + f'Failed to create a symbolic link for {src_relpath}, ' + f'and it will be copied to {tar_path}') + else: + continue + + if mode == 'copy': + if osp.isfile(src_path): + shutil.copyfile(src_path, tar_path) + elif osp.isdir(src_path): + shutil.copytree(src_path, tar_path) + else: + warnings.warn(f'Cannot copy file {src_path}.') + else: + raise ValueError(f'Invalid mode {mode}') + + +if __name__ == '__main__': + add_mim_extension() + setup( + name='mmcls', + version=get_version(), + description='OpenMMLab Image Classification Toolbox and Benchmark', + long_description=readme(), + long_description_content_type='text/markdown', + keywords='computer vision, image classification', + packages=find_packages(exclude=('configs', 'tools', 'demo')), + include_package_data=True, + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + ], + url='https://github.com/open-mmlab/mmclassification', + author='MMClassification Contributors', + author_email='openmmlab@gmail.com', + license='Apache License 2.0', + install_requires=parse_requirements('requirements/runtime.txt'), + extras_require={ + 'all': parse_requirements('requirements.txt'), + 'tests': parse_requirements('requirements/tests.txt'), + 'optional': parse_requirements('requirements/optional.txt'), + 'mim': parse_requirements('requirements/mminstall.txt'), + }, + zip_safe=False) diff --git a/tests/data/color.jpg b/tests/data/color.jpg new file mode 100644 index 0000000..2f19ebc Binary files /dev/null and b/tests/data/color.jpg differ diff --git a/tests/data/dataset/a/1.JPG b/tests/data/dataset/a/1.JPG new file mode 100644 index 0000000..e69de29 diff --git a/tests/data/dataset/ann.txt b/tests/data/dataset/ann.txt new file mode 100644 index 0000000..a21a9c4 --- /dev/null +++ b/tests/data/dataset/ann.txt @@ -0,0 +1,3 @@ +a/1.JPG 0 +b/2.jpeg 1 +b/subb/2.jpeg 1 diff --git a/tests/data/dataset/b/2.jpeg b/tests/data/dataset/b/2.jpeg new file mode 100644 index 0000000..e69de29 diff --git a/tests/data/dataset/b/subb/3.jpg b/tests/data/dataset/b/subb/3.jpg new file mode 100644 index 0000000..e69de29 diff --git a/tests/data/dataset/classes.txt b/tests/data/dataset/classes.txt new file mode 100644 index 0000000..c012a51 --- /dev/null +++ b/tests/data/dataset/classes.txt @@ -0,0 +1,2 @@ +bus +car diff --git a/tests/data/gray.jpg b/tests/data/gray.jpg new file mode 100644 index 0000000..94edd73 Binary files /dev/null and b/tests/data/gray.jpg differ diff --git a/tests/data/retinanet.py b/tests/data/retinanet.py new file mode 100644 index 0000000..e7e6ea0 --- /dev/null +++ b/tests/data/retinanet.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# small RetinaNet +num_classes = 3 + +# model settings +model = dict( + type='RetinaNet', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_input', + num_outs=5), + bbox_head=dict( + type='RetinaHead', + num_classes=num_classes, + in_channels=256, + stacked_convs=1, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict(test=dict(pipeline=test_pipeline)) diff --git a/tests/data/test.logjson b/tests/data/test.logjson new file mode 100644 index 0000000..dd9a160 --- /dev/null +++ b/tests/data/test.logjson @@ -0,0 +1,10 @@ +{"a": "b"} +{"mode": "train", "epoch": 1, "iter": 10, "lr": 0.01309, "memory": 0, "data_time": 0.0072, "time": 0.00727} +{"mode": "train", "epoch": 1, "iter": 20, "lr": 0.02764, "memory": 0, "data_time": 0.00044, "time": 0.00046} +{"mode": "train", "epoch": 1, "iter": 30, "lr": 0.04218, "memory": 0, "data_time": 0.00028, "time": 0.0003} +{"mode": "train", "epoch": 1, "iter": 40, "lr": 0.05673, "memory": 0, "data_time": 0.00027, "time": 0.00029} +{"mode": "train", "epoch": 2, "iter": 10, "lr": 0.17309, "memory": 0, "data_time": 0.00048, "time": 0.0005} +{"mode": "train", "epoch": 2, "iter": 20, "lr": 0.18763, "memory": 0, "data_time": 0.00038, "time": 0.0004} +{"mode": "train", "epoch": 2, "iter": 30, "lr": 0.20218, "memory": 0, "data_time": 0.00037, "time": 0.00039} +{"mode": "train", "epoch": 3, "iter": 10, "lr": 0.33305, "memory": 0, "data_time": 0.00045, "time": 0.00046} +{"mode": "train", "epoch": 3, "iter": 20, "lr": 0.34759, "memory": 0, "data_time": 0.0003, "time": 0.00032} \ No newline at end of file diff --git a/tests/test_data/test_builder.py b/tests/test_data/test_builder.py new file mode 100644 index 0000000..c911b98 --- /dev/null +++ b/tests/test_data/test_builder.py @@ -0,0 +1,272 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from copy import deepcopy +from unittest.mock import patch + +import torch +from mmcv.utils import digit_version + +from mmcls.datasets import ImageNet, build_dataloader, build_dataset +from mmcls.datasets.dataset_wrappers import (ClassBalancedDataset, + ConcatDataset, KFoldDataset, + RepeatDataset) + + +class TestDataloaderBuilder(): + + @classmethod + def setup_class(cls): + cls.data = list(range(20)) + cls.samples_per_gpu = 5 + cls.workers_per_gpu = 1 + + @patch('mmcls.datasets.builder.get_dist_info', return_value=(0, 1)) + def test_single_gpu(self, _): + common_cfg = dict( + dataset=self.data, + samples_per_gpu=self.samples_per_gpu, + workers_per_gpu=self.workers_per_gpu, + dist=False) + + # Test default config + dataloader = build_dataloader(**common_cfg) + + if digit_version(torch.__version__) >= digit_version('1.8.0'): + assert dataloader.persistent_workers + elif hasattr(dataloader, 'persistent_workers'): + assert not dataloader.persistent_workers + + assert dataloader.batch_size == self.samples_per_gpu + assert dataloader.num_workers == self.workers_per_gpu + assert not all( + torch.cat(list(iter(dataloader))) == torch.tensor(self.data)) + + # Test without shuffle + dataloader = build_dataloader(**common_cfg, shuffle=False) + assert all( + torch.cat(list(iter(dataloader))) == torch.tensor(self.data)) + + # Test with custom sampler_cfg + dataloader = build_dataloader( + **common_cfg, + sampler_cfg=dict(type='RepeatAugSampler', selected_round=0), + shuffle=False) + expect = [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6] + assert all(torch.cat(list(iter(dataloader))) == torch.tensor(expect)) + + @patch('mmcls.datasets.builder.get_dist_info', return_value=(0, 1)) + def test_multi_gpu(self, _): + common_cfg = dict( + dataset=self.data, + samples_per_gpu=self.samples_per_gpu, + workers_per_gpu=self.workers_per_gpu, + num_gpus=2, + dist=False) + + # Test default config + dataloader = build_dataloader(**common_cfg) + + if digit_version(torch.__version__) >= digit_version('1.8.0'): + assert dataloader.persistent_workers + elif hasattr(dataloader, 'persistent_workers'): + assert not dataloader.persistent_workers + + assert dataloader.batch_size == self.samples_per_gpu * 2 + assert dataloader.num_workers == self.workers_per_gpu * 2 + assert not all( + torch.cat(list(iter(dataloader))) == torch.tensor(self.data)) + + # Test without shuffle + dataloader = build_dataloader(**common_cfg, shuffle=False) + assert all( + torch.cat(list(iter(dataloader))) == torch.tensor(self.data)) + + # Test with custom sampler_cfg + dataloader = build_dataloader( + **common_cfg, + sampler_cfg=dict(type='RepeatAugSampler', selected_round=0), + shuffle=False) + expect = torch.tensor( + [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6]) + assert all(torch.cat(list(iter(dataloader))) == expect) + + @patch('mmcls.datasets.builder.get_dist_info', return_value=(1, 2)) + def test_distributed(self, _): + common_cfg = dict( + dataset=self.data, + samples_per_gpu=self.samples_per_gpu, + workers_per_gpu=self.workers_per_gpu, + num_gpus=2, # num_gpus will be ignored in distributed environment. + dist=True) + + # Test default config + dataloader = build_dataloader(**common_cfg) + + if digit_version(torch.__version__) >= digit_version('1.8.0'): + assert dataloader.persistent_workers + elif hasattr(dataloader, 'persistent_workers'): + assert not dataloader.persistent_workers + + assert dataloader.batch_size == self.samples_per_gpu + assert dataloader.num_workers == self.workers_per_gpu + non_expect = torch.tensor(self.data[1::2]) + assert not all(torch.cat(list(iter(dataloader))) == non_expect) + + # Test without shuffle + dataloader = build_dataloader(**common_cfg, shuffle=False) + expect = torch.tensor(self.data[1::2]) + assert all(torch.cat(list(iter(dataloader))) == expect) + + # Test with custom sampler_cfg + dataloader = build_dataloader( + **common_cfg, + sampler_cfg=dict(type='RepeatAugSampler', selected_round=0), + shuffle=False) + expect = torch.tensor( + [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6][1::2]) + assert all(torch.cat(list(iter(dataloader))) == expect) + + +class TestDatasetBuilder(): + + @classmethod + def setup_class(cls): + data_prefix = osp.join(osp.dirname(__file__), '../data/dataset') + cls.dataset_cfg = dict( + type='ImageNet', + data_prefix=data_prefix, + ann_file=osp.join(data_prefix, 'ann.txt'), + pipeline=[], + test_mode=False, + ) + + def test_normal_dataset(self): + # Test build + dataset = build_dataset(self.dataset_cfg) + assert isinstance(dataset, ImageNet) + assert dataset.test_mode == self.dataset_cfg['test_mode'] + + # Test default_args + dataset = build_dataset(self.dataset_cfg, {'test_mode': True}) + assert dataset.test_mode == self.dataset_cfg['test_mode'] + + cp_cfg = deepcopy(self.dataset_cfg) + cp_cfg.pop('test_mode') + dataset = build_dataset(cp_cfg, {'test_mode': True}) + assert dataset.test_mode + + def test_concat_dataset(self): + # Test build + dataset = build_dataset([self.dataset_cfg, self.dataset_cfg]) + assert isinstance(dataset, ConcatDataset) + assert dataset.datasets[0].test_mode == self.dataset_cfg['test_mode'] + + # Test default_args + dataset = build_dataset([self.dataset_cfg, self.dataset_cfg], + {'test_mode': True}) + assert dataset.datasets[0].test_mode == self.dataset_cfg['test_mode'] + + cp_cfg = deepcopy(self.dataset_cfg) + cp_cfg.pop('test_mode') + dataset = build_dataset([cp_cfg, cp_cfg], {'test_mode': True}) + assert dataset.datasets[0].test_mode + + def test_repeat_dataset(self): + # Test build + dataset = build_dataset( + dict(type='RepeatDataset', dataset=self.dataset_cfg, times=3)) + assert isinstance(dataset, RepeatDataset) + assert dataset.dataset.test_mode == self.dataset_cfg['test_mode'] + + # Test default_args + dataset = build_dataset( + dict(type='RepeatDataset', dataset=self.dataset_cfg, times=3), + {'test_mode': True}) + assert dataset.dataset.test_mode == self.dataset_cfg['test_mode'] + + cp_cfg = deepcopy(self.dataset_cfg) + cp_cfg.pop('test_mode') + dataset = build_dataset( + dict(type='RepeatDataset', dataset=cp_cfg, times=3), + {'test_mode': True}) + assert dataset.dataset.test_mode + + def test_class_balance_dataset(self): + # Test build + dataset = build_dataset( + dict( + type='ClassBalancedDataset', + dataset=self.dataset_cfg, + oversample_thr=1., + )) + assert isinstance(dataset, ClassBalancedDataset) + assert dataset.dataset.test_mode == self.dataset_cfg['test_mode'] + + # Test default_args + dataset = build_dataset( + dict( + type='ClassBalancedDataset', + dataset=self.dataset_cfg, + oversample_thr=1., + ), {'test_mode': True}) + assert dataset.dataset.test_mode == self.dataset_cfg['test_mode'] + + cp_cfg = deepcopy(self.dataset_cfg) + cp_cfg.pop('test_mode') + dataset = build_dataset( + dict( + type='ClassBalancedDataset', + dataset=cp_cfg, + oversample_thr=1., + ), {'test_mode': True}) + assert dataset.dataset.test_mode + + def test_kfold_dataset(self): + # Test build + dataset = build_dataset( + dict( + type='KFoldDataset', + dataset=self.dataset_cfg, + fold=0, + num_splits=5, + test_mode=False, + )) + assert isinstance(dataset, KFoldDataset) + assert not dataset.test_mode + assert dataset.dataset.test_mode == self.dataset_cfg['test_mode'] + + # Test default_args + dataset = build_dataset( + dict( + type='KFoldDataset', + dataset=self.dataset_cfg, + fold=0, + num_splits=5, + test_mode=False, + ), + default_args={ + 'test_mode': True, + 'classes': [1, 2, 3] + }) + assert not dataset.test_mode + assert dataset.dataset.test_mode == self.dataset_cfg['test_mode'] + assert dataset.dataset.CLASSES == [1, 2, 3] + + cp_cfg = deepcopy(self.dataset_cfg) + cp_cfg.pop('test_mode') + dataset = build_dataset( + dict( + type='KFoldDataset', + dataset=self.dataset_cfg, + fold=0, + num_splits=5, + ), + default_args={ + 'test_mode': True, + 'classes': [1, 2, 3] + }) + # The test_mode in default_args will be passed to KFoldDataset + assert dataset.test_mode + assert not dataset.dataset.test_mode + # Other default_args will be passed to child dataset. + assert dataset.dataset.CLASSES == [1, 2, 3] diff --git a/tests/test_data/test_datasets/test_common.py b/tests/test_data/test_datasets/test_common.py new file mode 100644 index 0000000..5ec3818 --- /dev/null +++ b/tests/test_data/test_datasets/test_common.py @@ -0,0 +1,911 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import pickle +import tempfile +from unittest import TestCase +from unittest.mock import patch + +import numpy as np +import torch + +from mmcls.datasets import DATASETS +from mmcls.datasets import BaseDataset as _BaseDataset +from mmcls.datasets import MultiLabelDataset as _MultiLabelDataset + +ASSETS_ROOT = osp.abspath( + osp.join(osp.dirname(__file__), '../../data/dataset')) + + +class BaseDataset(_BaseDataset): + + def load_annotations(self): + pass + + +class MultiLabelDataset(_MultiLabelDataset): + + def load_annotations(self): + pass + + +DATASETS.module_dict['BaseDataset'] = BaseDataset +DATASETS.module_dict['MultiLabelDataset'] = MultiLabelDataset + + +class TestBaseDataset(TestCase): + DATASET_TYPE = 'BaseDataset' + + DEFAULT_ARGS = dict(data_prefix='', pipeline=[]) + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + with patch.object(dataset_class, 'load_annotations'): + # Test default behavior + cfg = {**self.DEFAULT_ARGS, 'classes': None, 'ann_file': None} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, dataset_class.CLASSES) + self.assertFalse(dataset.test_mode) + self.assertIsNone(dataset.ann_file) + + # Test setting classes as a tuple + cfg = {**self.DEFAULT_ARGS, 'classes': ('bus', 'car')} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ('bus', 'car')) + + # Test setting classes as a tuple + cfg = {**self.DEFAULT_ARGS, 'classes': ['bus', 'car']} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ['bus', 'car']) + + # Test setting classes through a file + classes_file = osp.join(ASSETS_ROOT, 'classes.txt') + cfg = {**self.DEFAULT_ARGS, 'classes': classes_file} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ['bus', 'car']) + self.assertEqual(dataset.class_to_idx, {'bus': 0, 'car': 1}) + + # Test invalid classes + cfg = {**self.DEFAULT_ARGS, 'classes': dict(classes=1)} + with self.assertRaisesRegex(ValueError, "type "): + dataset_class(**cfg) + + def test_get_cat_ids(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + fake_ann = [ + dict( + img_prefix='', + img_info=dict(), + gt_label=np.array(0, dtype=np.int64)) + ] + + with patch.object(dataset_class, 'load_annotations') as mock_load: + mock_load.return_value = fake_ann + dataset = dataset_class(**self.DEFAULT_ARGS) + + cat_ids = dataset.get_cat_ids(0) + self.assertIsInstance(cat_ids, list) + self.assertEqual(len(cat_ids), 1) + self.assertIsInstance(cat_ids[0], int) + + def test_evaluate(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + fake_ann = [ + dict(gt_label=np.array(0, dtype=np.int64)), + dict(gt_label=np.array(0, dtype=np.int64)), + dict(gt_label=np.array(1, dtype=np.int64)), + dict(gt_label=np.array(2, dtype=np.int64)), + dict(gt_label=np.array(1, dtype=np.int64)), + dict(gt_label=np.array(0, dtype=np.int64)), + ] + + with patch.object(dataset_class, 'load_annotations') as mock_load: + mock_load.return_value = fake_ann + dataset = dataset_class(**self.DEFAULT_ARGS) + + fake_results = np.array([ + [0.7, 0.0, 0.3], + [0.5, 0.2, 0.3], + [0.4, 0.5, 0.1], + [0.0, 0.0, 1.0], + [0.0, 0.0, 1.0], + [0.0, 0.0, 1.0], + ]) + + eval_results = dataset.evaluate( + fake_results, + metric=['precision', 'recall', 'f1_score', 'support', 'accuracy'], + metric_options={'topk': 1}) + + # Test results + self.assertAlmostEqual( + eval_results['precision'], (1 + 1 + 1 / 3) / 3 * 100.0, places=4) + self.assertAlmostEqual( + eval_results['recall'], (2 / 3 + 1 / 2 + 1) / 3 * 100.0, places=4) + self.assertAlmostEqual( + eval_results['f1_score'], (4 / 5 + 2 / 3 + 1 / 2) / 3 * 100.0, + places=4) + self.assertEqual(eval_results['support'], 6) + self.assertAlmostEqual(eval_results['accuracy'], 4 / 6 * 100, places=4) + + # test indices + eval_results_ = dataset.evaluate( + fake_results[:5], + metric=['precision', 'recall', 'f1_score', 'support', 'accuracy'], + metric_options={'topk': 1}, + indices=range(5)) + self.assertAlmostEqual( + eval_results_['precision'], (1 + 1 + 1 / 2) / 3 * 100.0, places=4) + self.assertAlmostEqual( + eval_results_['recall'], (1 + 1 / 2 + 1) / 3 * 100.0, places=4) + self.assertAlmostEqual( + eval_results_['f1_score'], (1 + 2 / 3 + 2 / 3) / 3 * 100.0, + places=4) + self.assertEqual(eval_results_['support'], 5) + self.assertAlmostEqual( + eval_results_['accuracy'], 4 / 5 * 100, places=4) + + # test input as tensor + fake_results_tensor = torch.from_numpy(fake_results) + eval_results_ = dataset.evaluate( + fake_results_tensor, + metric=['precision', 'recall', 'f1_score', 'support', 'accuracy'], + metric_options={'topk': 1}) + assert eval_results_ == eval_results + + # test thr + eval_results = dataset.evaluate( + fake_results, + metric=['precision', 'recall', 'f1_score', 'accuracy'], + metric_options={ + 'thrs': 0.6, + 'topk': 1 + }) + + self.assertAlmostEqual( + eval_results['precision'], (1 + 0 + 1 / 3) / 3 * 100.0, places=4) + self.assertAlmostEqual( + eval_results['recall'], (1 / 3 + 0 + 1) / 3 * 100.0, places=4) + self.assertAlmostEqual( + eval_results['f1_score'], (1 / 2 + 0 + 1 / 2) / 3 * 100.0, + places=4) + self.assertAlmostEqual(eval_results['accuracy'], 2 / 6 * 100, places=4) + + # thrs must be a number or tuple + with self.assertRaises(TypeError): + dataset.evaluate( + fake_results, + metric=['precision', 'recall', 'f1_score', 'accuracy'], + metric_options={ + 'thrs': 'thr', + 'topk': 1 + }) + + # test topk and thr as tuple + eval_results = dataset.evaluate( + fake_results, + metric=['precision', 'recall', 'f1_score', 'accuracy'], + metric_options={ + 'thrs': (0.5, 0.6), + 'topk': (1, 2) + }) + self.assertEqual( + { + 'precision_thr_0.50', 'precision_thr_0.60', 'recall_thr_0.50', + 'recall_thr_0.60', 'f1_score_thr_0.50', 'f1_score_thr_0.60', + 'accuracy_top-1_thr_0.50', 'accuracy_top-1_thr_0.60', + 'accuracy_top-2_thr_0.50', 'accuracy_top-2_thr_0.60' + }, eval_results.keys()) + + self.assertIsInstance(eval_results['precision_thr_0.50'], float) + self.assertIsInstance(eval_results['recall_thr_0.50'], float) + self.assertIsInstance(eval_results['f1_score_thr_0.50'], float) + self.assertIsInstance(eval_results['accuracy_top-1_thr_0.50'], float) + + # test topk is tuple while thrs is number + eval_results = dataset.evaluate( + fake_results, + metric='accuracy', + metric_options={ + 'thrs': 0.5, + 'topk': (1, 2) + }) + self.assertEqual({'accuracy_top-1', 'accuracy_top-2'}, + eval_results.keys()) + self.assertIsInstance(eval_results['accuracy_top-1'], float) + + # test topk is number while thrs is tuple + eval_results = dataset.evaluate( + fake_results, + metric='accuracy', + metric_options={ + 'thrs': (0.5, 0.6), + 'topk': 1 + }) + self.assertEqual({'accuracy_thr_0.50', 'accuracy_thr_0.60'}, + eval_results.keys()) + self.assertIsInstance(eval_results['accuracy_thr_0.50'], float) + + # test evaluation results for classes + eval_results = dataset.evaluate( + fake_results, + metric=['precision', 'recall', 'f1_score', 'support'], + metric_options={'average_mode': 'none'}) + self.assertEqual(eval_results['precision'].shape, (3, )) + self.assertEqual(eval_results['recall'].shape, (3, )) + self.assertEqual(eval_results['f1_score'].shape, (3, )) + self.assertEqual(eval_results['support'].shape, (3, )) + + # the average_mode method must be valid + with self.assertRaises(ValueError): + dataset.evaluate( + fake_results, + metric=['precision', 'recall', 'f1_score', 'support'], + metric_options={'average_mode': 'micro'}) + + # the metric must be valid for the dataset + with self.assertRaisesRegex(ValueError, + "{'unknown'} is not supported"): + dataset.evaluate(fake_results, metric='unknown') + + +class TestMultiLabelDataset(TestBaseDataset): + DATASET_TYPE = 'MultiLabelDataset' + + def test_get_cat_ids(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + fake_ann = [ + dict( + img_prefix='', + img_info=dict(), + gt_label=np.array([0, 1, 1, 0], dtype=np.uint8)) + ] + + with patch.object(dataset_class, 'load_annotations') as mock_load: + mock_load.return_value = fake_ann + dataset = dataset_class(**self.DEFAULT_ARGS) + + cat_ids = dataset.get_cat_ids(0) + self.assertIsInstance(cat_ids, list) + self.assertEqual(len(cat_ids), 2) + self.assertIsInstance(cat_ids[0], int) + self.assertEqual(cat_ids, [1, 2]) + + def test_evaluate(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + fake_ann = [ + dict(gt_label=np.array([1, 1, 0, -1], dtype=np.int8)), + dict(gt_label=np.array([1, 1, 0, -1], dtype=np.int8)), + dict(gt_label=np.array([0, -1, 1, -1], dtype=np.int8)), + dict(gt_label=np.array([0, 1, 0, -1], dtype=np.int8)), + dict(gt_label=np.array([0, 1, 0, -1], dtype=np.int8)), + ] + + with patch.object(dataset_class, 'load_annotations') as mock_load: + mock_load.return_value = fake_ann + dataset = dataset_class(**self.DEFAULT_ARGS) + + fake_results = np.array([ + [0.9, 0.8, 0.3, 0.2], + [0.1, 0.2, 0.2, 0.1], + [0.7, 0.5, 0.9, 0.3], + [0.8, 0.1, 0.1, 0.2], + [0.8, 0.1, 0.1, 0.2], + ]) + + # the metric must be valid for the dataset + with self.assertRaisesRegex(ValueError, + "{'unknown'} is not supported"): + dataset.evaluate(fake_results, metric='unknown') + + # only one metric + eval_results = dataset.evaluate(fake_results, metric='mAP') + self.assertEqual(eval_results.keys(), {'mAP'}) + self.assertAlmostEqual(eval_results['mAP'], 67.5, places=4) + + # multiple metrics + eval_results = dataset.evaluate( + fake_results, metric=['mAP', 'CR', 'OF1']) + self.assertEqual(eval_results.keys(), {'mAP', 'CR', 'OF1'}) + self.assertAlmostEqual(eval_results['mAP'], 67.50, places=2) + self.assertAlmostEqual(eval_results['CR'], 43.75, places=2) + self.assertAlmostEqual(eval_results['OF1'], 42.86, places=2) + + +class TestCustomDataset(TestBaseDataset): + DATASET_TYPE = 'CustomDataset' + + def test_load_annotations(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # test load without ann_file + cfg = { + **self.DEFAULT_ARGS, + 'data_prefix': ASSETS_ROOT, + 'ann_file': None, + } + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + self.assertEqual(dataset.CLASSES, ['a', 'b']) # auto infer classes + self.assertEqual( + dataset.data_infos[0], { + 'img_prefix': ASSETS_ROOT, + 'img_info': { + 'filename': 'a/1.JPG' + }, + 'gt_label': np.array(0) + }) + self.assertEqual( + dataset.data_infos[2], { + 'img_prefix': ASSETS_ROOT, + 'img_info': { + 'filename': 'b/subb/3.jpg' + }, + 'gt_label': np.array(1) + }) + + # test ann_file assertion + cfg = { + **self.DEFAULT_ARGS, + 'data_prefix': ASSETS_ROOT, + 'ann_file': ['ann_file.txt'], + } + with self.assertRaisesRegex(TypeError, 'must be a str'): + dataset_class(**cfg) + + # test load with ann_file + cfg = { + **self.DEFAULT_ARGS, + 'data_prefix': ASSETS_ROOT, + 'ann_file': osp.join(ASSETS_ROOT, 'ann.txt'), + } + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + # custom dataset won't infer CLASSES from ann_file + self.assertEqual(dataset.CLASSES, dataset_class.CLASSES) + self.assertEqual( + dataset.data_infos[0], { + 'img_prefix': ASSETS_ROOT, + 'img_info': { + 'filename': 'a/1.JPG' + }, + 'gt_label': np.array(0) + }) + self.assertEqual( + dataset.data_infos[2], { + 'img_prefix': ASSETS_ROOT, + 'img_info': { + 'filename': 'b/subb/2.jpeg' + }, + 'gt_label': np.array(1) + }) + + # test extensions filter + cfg = { + **self.DEFAULT_ARGS, 'data_prefix': ASSETS_ROOT, + 'ann_file': None, + 'extensions': ('.txt', ) + } + with self.assertRaisesRegex(RuntimeError, + 'Supported extensions are: .txt'): + dataset_class(**cfg) + + cfg = { + **self.DEFAULT_ARGS, 'data_prefix': ASSETS_ROOT, + 'ann_file': None, + 'extensions': ('.jpeg', ) + } + with self.assertWarnsRegex(UserWarning, + 'Supported extensions are: .jpeg'): + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 1) + self.assertEqual( + dataset.data_infos[0], { + 'img_prefix': ASSETS_ROOT, + 'img_info': { + 'filename': 'b/2.jpeg' + }, + 'gt_label': np.array(1) + }) + + # test classes check + cfg = { + **self.DEFAULT_ARGS, + 'data_prefix': ASSETS_ROOT, + 'classes': ['apple', 'banana'], + 'ann_file': None, + } + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ['apple', 'banana']) + + cfg['classes'] = ['apple', 'banana', 'dog'] + with self.assertRaisesRegex(AssertionError, + r"\(2\) doesn't match .* classes \(3\)"): + dataset_class(**cfg) + + +class TestImageNet(TestBaseDataset): + DATASET_TYPE = 'ImageNet' + + def test_load_annotations(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # test classes number + cfg = { + **self.DEFAULT_ARGS, + 'data_prefix': ASSETS_ROOT, + 'ann_file': None, + } + with self.assertRaisesRegex( + AssertionError, r"\(2\) doesn't match .* classes \(1000\)"): + dataset_class(**cfg) + + # test override classes + cfg = { + **self.DEFAULT_ARGS, + 'data_prefix': ASSETS_ROOT, + 'classes': ['cat', 'dog'], + 'ann_file': None, + } + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + self.assertEqual(dataset.CLASSES, ['cat', 'dog']) + + +class TestImageNet21k(TestBaseDataset): + DATASET_TYPE = 'ImageNet21k' + + DEFAULT_ARGS = dict( + data_prefix=ASSETS_ROOT, + pipeline=[], + classes=['cat', 'dog'], + ann_file=osp.join(ASSETS_ROOT, 'ann.txt'), + serialize_data=False) + + def test_initialize(self): + super().test_initialize() + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # The multi_label option is not implemented not. + cfg = {**self.DEFAULT_ARGS, 'multi_label': True} + with self.assertRaisesRegex(NotImplementedError, 'not supported'): + dataset_class(**cfg) + + # Warn about ann_file + cfg = {**self.DEFAULT_ARGS, 'ann_file': None} + with self.assertWarnsRegex(UserWarning, 'specify the `ann_file`'): + dataset_class(**cfg) + + # Warn about classes + cfg = {**self.DEFAULT_ARGS, 'classes': None} + with self.assertWarnsRegex(UserWarning, 'specify the `classes`'): + dataset_class(**cfg) + + def test_load_annotations(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test with serialize_data=False + cfg = {**self.DEFAULT_ARGS, 'serialize_data': False} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset.data_infos), 3) + self.assertEqual(len(dataset), 3) + self.assertEqual( + dataset[0], { + 'img_prefix': ASSETS_ROOT, + 'img_info': { + 'filename': 'a/1.JPG' + }, + 'gt_label': np.array(0) + }) + self.assertEqual( + dataset[2], { + 'img_prefix': ASSETS_ROOT, + 'img_info': { + 'filename': 'b/subb/2.jpeg' + }, + 'gt_label': np.array(1) + }) + + # Test with serialize_data=True + cfg = {**self.DEFAULT_ARGS, 'serialize_data': True} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset.data_infos), 0) # data_infos is clear. + self.assertEqual(len(dataset), 3) + self.assertEqual( + dataset[0], { + 'img_prefix': ASSETS_ROOT, + 'img_info': { + 'filename': 'a/1.JPG' + }, + 'gt_label': np.array(0) + }) + self.assertEqual( + dataset[2], { + 'img_prefix': ASSETS_ROOT, + 'img_info': { + 'filename': 'b/subb/2.jpeg' + }, + 'gt_label': np.array(1) + }) + + +class TestMNIST(TestBaseDataset): + DATASET_TYPE = 'MNIST' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + data_prefix = tmpdir.name + cls.DEFAULT_ARGS = dict(data_prefix=data_prefix, pipeline=[]) + + dataset_class = DATASETS.get(cls.DATASET_TYPE) + + def rm_suffix(s): + return s[:s.rfind('.')] + + train_image_file = osp.join( + data_prefix, + rm_suffix(dataset_class.resources['train_image_file'][0])) + train_label_file = osp.join( + data_prefix, + rm_suffix(dataset_class.resources['train_label_file'][0])) + test_image_file = osp.join( + data_prefix, + rm_suffix(dataset_class.resources['test_image_file'][0])) + test_label_file = osp.join( + data_prefix, + rm_suffix(dataset_class.resources['test_label_file'][0])) + cls.fake_img = np.random.randint(0, 255, size=(28, 28), dtype=np.uint8) + cls.fake_label = np.random.randint(0, 10, size=(1, ), dtype=np.uint8) + + for file in [train_image_file, test_image_file]: + magic = b'\x00\x00\x08\x03' # num_dims = 3, type = uint8 + head = b'\x00\x00\x00\x01' + b'\x00\x00\x00\x1c' * 2 # (1, 28, 28) + data = magic + head + cls.fake_img.flatten().tobytes() + with open(file, 'wb') as f: + f.write(data) + + for file in [train_label_file, test_label_file]: + magic = b'\x00\x00\x08\x01' # num_dims = 3, type = uint8 + head = b'\x00\x00\x00\x01' # (1, ) + data = magic + head + cls.fake_label.tobytes() + with open(file, 'wb') as f: + f.write(data) + + def test_load_annotations(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + with patch.object(dataset_class, 'download'): + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 1) + + data_info = dataset[0] + np.testing.assert_equal(data_info['img'], self.fake_img) + np.testing.assert_equal(data_info['gt_label'], self.fake_label) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestCIFAR10(TestBaseDataset): + DATASET_TYPE = 'CIFAR10' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + data_prefix = tmpdir.name + cls.DEFAULT_ARGS = dict(data_prefix=data_prefix, pipeline=[]) + + dataset_class = DATASETS.get(cls.DATASET_TYPE) + base_folder = osp.join(data_prefix, dataset_class.base_folder) + os.mkdir(base_folder) + + cls.fake_imgs = np.random.randint( + 0, 255, size=(6, 3 * 32 * 32), dtype=np.uint8) + cls.fake_labels = np.random.randint(0, 10, size=(6, )) + cls.fake_classes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + batch1 = dict( + data=cls.fake_imgs[:2], labels=cls.fake_labels[:2].tolist()) + with open(osp.join(base_folder, 'data_batch_1'), 'wb') as f: + f.write(pickle.dumps(batch1)) + + batch2 = dict( + data=cls.fake_imgs[2:4], labels=cls.fake_labels[2:4].tolist()) + with open(osp.join(base_folder, 'data_batch_2'), 'wb') as f: + f.write(pickle.dumps(batch2)) + + test_batch = dict( + data=cls.fake_imgs[4:], labels=cls.fake_labels[4:].tolist()) + with open(osp.join(base_folder, 'test_batch'), 'wb') as f: + f.write(pickle.dumps(test_batch)) + + meta = {dataset_class.meta['key']: cls.fake_classes} + meta_filename = dataset_class.meta['filename'] + with open(osp.join(base_folder, meta_filename), 'wb') as f: + f.write(pickle.dumps(meta)) + + dataset_class.train_list = [['data_batch_1', None], + ['data_batch_2', None]] + dataset_class.test_list = [['test_batch', None]] + dataset_class.meta['md5'] = None + + def test_load_annotations(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 4) + self.assertEqual(dataset.CLASSES, self.fake_classes) + + data_info = dataset[0] + fake_img = self.fake_imgs[0].reshape(3, 32, 32).transpose(1, 2, 0) + np.testing.assert_equal(data_info['img'], fake_img) + np.testing.assert_equal(data_info['gt_label'], self.fake_labels[0]) + + # Test with test_mode=True + cfg = {**self.DEFAULT_ARGS, 'test_mode': True} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 2) + + data_info = dataset[0] + fake_img = self.fake_imgs[4].reshape(3, 32, 32).transpose(1, 2, 0) + np.testing.assert_equal(data_info['img'], fake_img) + np.testing.assert_equal(data_info['gt_label'], self.fake_labels[4]) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestCIFAR100(TestCIFAR10): + DATASET_TYPE = 'CIFAR100' + + +class TestVOC(TestMultiLabelDataset): + DATASET_TYPE = 'VOC' + + DEFAULT_ARGS = dict(data_prefix='VOC2007', pipeline=[]) + + +class TestCUB(TestBaseDataset): + DATASET_TYPE = 'CUB' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.data_prefix = tmpdir.name + cls.ann_file = osp.join(cls.data_prefix, 'ann_file.txt') + cls.image_class_labels_file = osp.join(cls.data_prefix, 'classes.txt') + cls.train_test_split_file = osp.join(cls.data_prefix, 'split.txt') + cls.train_test_split_file2 = osp.join(cls.data_prefix, 'split2.txt') + cls.DEFAULT_ARGS = dict( + data_prefix=cls.data_prefix, + pipeline=[], + ann_file=cls.ann_file, + image_class_labels_file=cls.image_class_labels_file, + train_test_split_file=cls.train_test_split_file) + + with open(cls.ann_file, 'w') as f: + f.write('\n'.join([ + '1 1.txt', + '2 2.txt', + '3 3.txt', + ])) + + with open(cls.image_class_labels_file, 'w') as f: + f.write('\n'.join([ + '1 2', + '2 3', + '3 1', + ])) + + with open(cls.train_test_split_file, 'w') as f: + f.write('\n'.join([ + '1 0', + '2 1', + '3 1', + ])) + + with open(cls.train_test_split_file2, 'w') as f: + f.write('\n'.join([ + '1 0', + '2 1', + ])) + + def test_load_annotations(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 2) + self.assertEqual(dataset.CLASSES, dataset_class.CLASSES) + + data_info = dataset[0] + np.testing.assert_equal(data_info['img_prefix'], self.data_prefix) + np.testing.assert_equal(data_info['img_info'], {'filename': '2.txt'}) + np.testing.assert_equal(data_info['gt_label'], 3 - 1) + + # Test with test_mode=True + cfg = {**self.DEFAULT_ARGS, 'test_mode': True} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 1) + + data_info = dataset[0] + np.testing.assert_equal(data_info['img_prefix'], self.data_prefix) + np.testing.assert_equal(data_info['img_info'], {'filename': '1.txt'}) + np.testing.assert_equal(data_info['gt_label'], 2 - 1) + + # Test if the numbers of line are not match + cfg = { + **self.DEFAULT_ARGS, 'train_test_split_file': + self.train_test_split_file2 + } + with self.assertRaisesRegex(AssertionError, 'should have same length'): + dataset_class(**cfg) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestStanfordCars(TestBaseDataset): + DATASET_TYPE = 'StanfordCars' + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + with patch.object(dataset_class, 'load_annotations'): + # Test with test_mode=False, ann_file is None + cfg = {**self.DEFAULT_ARGS, 'test_mode': False, 'ann_file': None} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, dataset_class.CLASSES) + self.assertFalse(dataset.test_mode) + self.assertIsNone(dataset.ann_file) + self.assertIsNotNone(dataset.train_ann_file) + + # Test with test_mode=False, ann_file is not None + cfg = { + **self.DEFAULT_ARGS, 'test_mode': False, + 'ann_file': 'train_ann_file.mat' + } + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, dataset_class.CLASSES) + self.assertFalse(dataset.test_mode) + self.assertIsNotNone(dataset.ann_file) + self.assertEqual(dataset.ann_file, 'train_ann_file.mat') + self.assertIsNotNone(dataset.train_ann_file) + + # Test with test_mode=True, ann_file is None + cfg = {**self.DEFAULT_ARGS, 'test_mode': True, 'ann_file': None} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, dataset_class.CLASSES) + self.assertTrue(dataset.test_mode) + self.assertIsNone(dataset.ann_file) + self.assertIsNotNone(dataset.test_ann_file) + + # Test with test_mode=True, ann_file is not None + cfg = { + **self.DEFAULT_ARGS, 'test_mode': True, + 'ann_file': 'test_ann_file.mat' + } + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, dataset_class.CLASSES) + self.assertTrue(dataset.test_mode) + self.assertIsNotNone(dataset.ann_file) + self.assertEqual(dataset.ann_file, 'test_ann_file.mat') + self.assertIsNotNone(dataset.test_ann_file) + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.data_prefix = tmpdir.name + cls.ann_file = None + devkit = osp.join(cls.data_prefix, 'devkit') + if not osp.exists(devkit): + os.mkdir(devkit) + cls.train_ann_file = osp.join(devkit, 'cars_train_annos.mat') + cls.test_ann_file = osp.join(devkit, 'cars_test_annos_withlabels.mat') + cls.DEFAULT_ARGS = dict( + data_prefix=cls.data_prefix, pipeline=[], test_mode=False) + + try: + import scipy.io as sio + except ImportError: + raise ImportError( + 'please run `pip install scipy` to install package `scipy`.') + + sio.savemat( + cls.train_ann_file, { + 'annotations': [( + (np.array([1]), np.array([10]), np.array( + [20]), np.array([50]), 15, np.array(['001.jpg'])), + (np.array([2]), np.array([15]), np.array( + [240]), np.array([250]), 15, np.array(['002.jpg'])), + (np.array([89]), np.array([150]), np.array( + [278]), np.array([388]), 150, np.array(['012.jpg'])), + )] + }) + + sio.savemat( + cls.test_ann_file, { + 'annotations': + [((np.array([89]), np.array([150]), np.array( + [278]), np.array([388]), 150, np.array(['025.jpg'])), + (np.array([155]), np.array([10]), np.array( + [200]), np.array([233]), 0, np.array(['111.jpg'])), + (np.array([25]), np.array([115]), np.array( + [240]), np.array([360]), 15, np.array(['265.jpg'])))] + }) + + def test_load_annotations(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test with test_mode=False and ann_file=None + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 3) + self.assertEqual(dataset.CLASSES, dataset_class.CLASSES) + + data_info = dataset[0] + np.testing.assert_equal(data_info['img_prefix'], + osp.join(self.data_prefix, 'cars_train')) + np.testing.assert_equal(data_info['img_info'], {'filename': '001.jpg'}) + np.testing.assert_equal(data_info['gt_label'], 15 - 1) + + # Test with test_mode=True and ann_file=None + cfg = {**self.DEFAULT_ARGS, 'test_mode': True} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + + data_info = dataset[0] + np.testing.assert_equal(data_info['img_prefix'], + osp.join(self.data_prefix, 'cars_test')) + np.testing.assert_equal(data_info['img_info'], {'filename': '025.jpg'}) + np.testing.assert_equal(data_info['gt_label'], 150 - 1) + + # Test with test_mode=False, ann_file is not None + cfg = { + **self.DEFAULT_ARGS, 'test_mode': False, + 'ann_file': self.train_ann_file + } + dataset = dataset_class(**cfg) + data_info = dataset[0] + np.testing.assert_equal(data_info['img_prefix'], + osp.join(self.data_prefix, 'cars_train')) + np.testing.assert_equal(data_info['img_info'], {'filename': '001.jpg'}) + np.testing.assert_equal(data_info['gt_label'], 15 - 1) + + # Test with test_mode=True, ann_file is not None + cfg = { + **self.DEFAULT_ARGS, 'test_mode': True, + 'ann_file': self.test_ann_file + } + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + + data_info = dataset[0] + np.testing.assert_equal(data_info['img_prefix'], + osp.join(self.data_prefix, 'cars_test')) + np.testing.assert_equal(data_info['img_info'], {'filename': '025.jpg'}) + np.testing.assert_equal(data_info['gt_label'], 150 - 1) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() diff --git a/tests/test_data/test_datasets/test_dataset_utils.py b/tests/test_data/test_datasets/test_dataset_utils.py new file mode 100644 index 0000000..d29b203 --- /dev/null +++ b/tests/test_data/test_datasets/test_dataset_utils.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import random +import string + +from mmcls.datasets.utils import check_integrity, rm_suffix + + +def test_dataset_utils(): + # test rm_suffix + assert rm_suffix('a.jpg') == 'a' + assert rm_suffix('a.bak.jpg') == 'a.bak' + assert rm_suffix('a.bak.jpg', suffix='.jpg') == 'a.bak' + assert rm_suffix('a.bak.jpg', suffix='.bak.jpg') == 'a' + + # test check_integrity + rand_file = ''.join(random.sample(string.ascii_letters, 10)) + assert not check_integrity(rand_file, md5=None) + assert not check_integrity(rand_file, md5=2333) + test_file = osp.join(osp.dirname(__file__), '../../data/color.jpg') + assert check_integrity(test_file, md5='08252e5100cb321fe74e0e12a724ce14') + assert not check_integrity(test_file, md5=2333) diff --git a/tests/test_data/test_datasets/test_dataset_wrapper.py b/tests/test_data/test_datasets/test_dataset_wrapper.py new file mode 100644 index 0000000..fc4e266 --- /dev/null +++ b/tests/test_data/test_datasets/test_dataset_wrapper.py @@ -0,0 +1,192 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import bisect +import math +from collections import defaultdict +from unittest.mock import MagicMock, patch + +import numpy as np +import pytest + +from mmcls.datasets import (BaseDataset, ClassBalancedDataset, ConcatDataset, + KFoldDataset, RepeatDataset) + + +def mock_evaluate(results, + metric='accuracy', + metric_options=None, + indices=None, + logger=None): + return dict( + results=results, + metric=metric, + metric_options=metric_options, + indices=indices, + logger=logger) + + +@patch.multiple(BaseDataset, __abstractmethods__=set()) +def construct_toy_multi_label_dataset(length): + BaseDataset.CLASSES = ('foo', 'bar') + BaseDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx) + dataset = BaseDataset(data_prefix='', pipeline=[], test_mode=True) + cat_ids_list = [ + np.random.randint(0, 80, num).tolist() + for num in np.random.randint(1, 20, length) + ] + dataset.data_infos = MagicMock() + dataset.data_infos.__len__.return_value = length + dataset.get_cat_ids = MagicMock(side_effect=lambda idx: cat_ids_list[idx]) + dataset.get_gt_labels = \ + MagicMock(side_effect=lambda: np.array(cat_ids_list)) + dataset.evaluate = MagicMock(side_effect=mock_evaluate) + return dataset, cat_ids_list + + +@patch.multiple(BaseDataset, __abstractmethods__=set()) +def construct_toy_single_label_dataset(length): + BaseDataset.CLASSES = ('foo', 'bar') + BaseDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx) + dataset = BaseDataset(data_prefix='', pipeline=[], test_mode=True) + cat_ids_list = [[np.random.randint(0, 80)] for _ in range(length)] + dataset.data_infos = MagicMock() + dataset.data_infos.__len__.return_value = length + dataset.get_cat_ids = MagicMock(side_effect=lambda idx: cat_ids_list[idx]) + dataset.get_gt_labels = \ + MagicMock(side_effect=lambda: cat_ids_list) + dataset.evaluate = MagicMock(side_effect=mock_evaluate) + return dataset, cat_ids_list + + +@pytest.mark.parametrize('construct_dataset', [ + 'construct_toy_multi_label_dataset', 'construct_toy_single_label_dataset' +]) +def test_concat_dataset(construct_dataset): + construct_toy_dataset = eval(construct_dataset) + dataset_a, cat_ids_list_a = construct_toy_dataset(10) + dataset_b, cat_ids_list_b = construct_toy_dataset(20) + + concat_dataset = ConcatDataset([dataset_a, dataset_b]) + assert concat_dataset[5] == 5 + assert concat_dataset[25] == 15 + assert concat_dataset.get_cat_ids(5) == cat_ids_list_a[5] + assert concat_dataset.get_cat_ids(25) == cat_ids_list_b[15] + assert len(concat_dataset) == len(dataset_a) + len(dataset_b) + assert concat_dataset.CLASSES == BaseDataset.CLASSES + + +@pytest.mark.parametrize('construct_dataset', [ + 'construct_toy_multi_label_dataset', 'construct_toy_single_label_dataset' +]) +def test_repeat_dataset(construct_dataset): + construct_toy_dataset = eval(construct_dataset) + dataset, cat_ids_list = construct_toy_dataset(10) + repeat_dataset = RepeatDataset(dataset, 10) + assert repeat_dataset[5] == 5 + assert repeat_dataset[15] == 5 + assert repeat_dataset[27] == 7 + assert repeat_dataset.get_cat_ids(5) == cat_ids_list[5] + assert repeat_dataset.get_cat_ids(15) == cat_ids_list[5] + assert repeat_dataset.get_cat_ids(27) == cat_ids_list[7] + assert len(repeat_dataset) == 10 * len(dataset) + assert repeat_dataset.CLASSES == BaseDataset.CLASSES + + +@pytest.mark.parametrize('construct_dataset', [ + 'construct_toy_multi_label_dataset', 'construct_toy_single_label_dataset' +]) +def test_class_balanced_dataset(construct_dataset): + construct_toy_dataset = eval(construct_dataset) + dataset, cat_ids_list = construct_toy_dataset(10) + + category_freq = defaultdict(int) + for cat_ids in cat_ids_list: + cat_ids = set(cat_ids) + for cat_id in cat_ids: + category_freq[cat_id] += 1 + for k, v in category_freq.items(): + category_freq[k] = v / len(cat_ids_list) + + mean_freq = np.mean(list(category_freq.values())) + repeat_thr = mean_freq + + category_repeat = { + cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq)) + for cat_id, cat_freq in category_freq.items() + } + + repeat_factors = [] + for cat_ids in cat_ids_list: + cat_ids = set(cat_ids) + repeat_factor = max({category_repeat[cat_id] for cat_id in cat_ids}) + repeat_factors.append(math.ceil(repeat_factor)) + repeat_factors_cumsum = np.cumsum(repeat_factors) + repeat_factor_dataset = ClassBalancedDataset(dataset, repeat_thr) + assert repeat_factor_dataset.CLASSES == BaseDataset.CLASSES + assert len(repeat_factor_dataset) == repeat_factors_cumsum[-1] + for idx in np.random.randint(0, len(repeat_factor_dataset), 3): + assert repeat_factor_dataset[idx] == bisect.bisect_right( + repeat_factors_cumsum, idx) + + +@pytest.mark.parametrize('construct_dataset', [ + 'construct_toy_multi_label_dataset', 'construct_toy_single_label_dataset' +]) +def test_kfold_dataset(construct_dataset): + construct_toy_dataset = eval(construct_dataset) + dataset, cat_ids_list = construct_toy_dataset(10) + + # test without random seed + train_datasets = [ + KFoldDataset(dataset, fold=i, num_splits=3, test_mode=False) + for i in range(5) + ] + test_datasets = [ + KFoldDataset(dataset, fold=i, num_splits=3, test_mode=True) + for i in range(5) + ] + + assert sum([i.indices for i in test_datasets], []) == list(range(10)) + for train_set, test_set in zip(train_datasets, test_datasets): + train_samples = [train_set[i] for i in range(len(train_set))] + test_samples = [test_set[i] for i in range(len(test_set))] + assert set(train_samples + test_samples) == set(range(10)) + + # test with random seed + train_datasets = [ + KFoldDataset(dataset, fold=i, num_splits=3, test_mode=False, seed=1) + for i in range(5) + ] + test_datasets = [ + KFoldDataset(dataset, fold=i, num_splits=3, test_mode=True, seed=1) + for i in range(5) + ] + + assert sum([i.indices for i in test_datasets], []) != list(range(10)) + assert set(sum([i.indices for i in test_datasets], [])) == set(range(10)) + for train_set, test_set in zip(train_datasets, test_datasets): + train_samples = [train_set[i] for i in range(len(train_set))] + test_samples = [test_set[i] for i in range(len(test_set))] + assert set(train_samples + test_samples) == set(range(10)) + + # test behavior of get_cat_ids method + for train_set, test_set in zip(train_datasets, test_datasets): + for i in range(len(train_set)): + cat_ids = train_set.get_cat_ids(i) + assert cat_ids == cat_ids_list[train_set.indices[i]] + for i in range(len(test_set)): + cat_ids = test_set.get_cat_ids(i) + assert cat_ids == cat_ids_list[test_set.indices[i]] + + # test behavior of get_gt_labels method + for train_set, test_set in zip(train_datasets, test_datasets): + for i in range(len(train_set)): + gt_label = train_set.get_gt_labels()[i] + assert gt_label == cat_ids_list[train_set.indices[i]] + for i in range(len(test_set)): + gt_label = test_set.get_gt_labels()[i] + assert gt_label == cat_ids_list[test_set.indices[i]] + + # test evaluate + for test_set in test_datasets: + eval_inputs = test_set.evaluate(None) + assert eval_inputs['indices'] == test_set.indices diff --git a/tests/test_data/test_datasets/test_sampler.py b/tests/test_data/test_datasets/test_sampler.py new file mode 100644 index 0000000..683b953 --- /dev/null +++ b/tests/test_data/test_datasets/test_sampler.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from unittest.mock import MagicMock, patch + +import numpy as np + +from mmcls.datasets import BaseDataset, RepeatAugSampler, build_sampler + + +@patch.multiple(BaseDataset, __abstractmethods__=set()) +def construct_toy_single_label_dataset(length): + BaseDataset.CLASSES = ('foo', 'bar') + BaseDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx) + dataset = BaseDataset(data_prefix='', pipeline=[], test_mode=True) + cat_ids_list = [[np.random.randint(0, 80)] for _ in range(length)] + dataset.data_infos = MagicMock() + dataset.data_infos.__len__.return_value = length + dataset.get_cat_ids = MagicMock(side_effect=lambda idx: cat_ids_list[idx]) + return dataset, cat_ids_list + + +@patch('mmcls.datasets.samplers.repeat_aug.get_dist_info', return_value=(0, 1)) +def test_sampler_builder(_): + assert build_sampler(None) is None + dataset = construct_toy_single_label_dataset(1000)[0] + build_sampler(dict(type='RepeatAugSampler', dataset=dataset)) + + +@patch('mmcls.datasets.samplers.repeat_aug.get_dist_info', return_value=(0, 1)) +def test_rep_aug(_): + dataset = construct_toy_single_label_dataset(1000)[0] + ra = RepeatAugSampler(dataset, selected_round=0, shuffle=False) + ra.set_epoch(0) + assert len(ra) == 1000 + ra = RepeatAugSampler(dataset) + assert len(ra) == 768 + val = None + for idx, content in enumerate(ra): + if idx % 3 == 0: + val = content + else: + assert val is not None + assert content == val + + +@patch('mmcls.datasets.samplers.repeat_aug.get_dist_info', return_value=(0, 2)) +def test_rep_aug_dist(_): + dataset = construct_toy_single_label_dataset(1000)[0] + ra = RepeatAugSampler(dataset, selected_round=0, shuffle=False) + ra.set_epoch(0) + assert len(ra) == 1000 // 2 + ra = RepeatAugSampler(dataset) + assert len(ra) == 768 // 2 diff --git a/tests/test_data/test_pipelines/test_auto_augment.py b/tests/test_data/test_pipelines/test_auto_augment.py new file mode 100644 index 0000000..388ff46 --- /dev/null +++ b/tests/test_data/test_pipelines/test_auto_augment.py @@ -0,0 +1,1242 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import random + +import mmcv +import numpy as np +import pytest +from mmcv.utils import build_from_cfg + +from mmcls.datasets.builder import PIPELINES + + +def construct_toy_data(): + img = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], + dtype=np.uint8) + img = np.stack([img, img, img], axis=-1) + results = dict() + # image + results['ori_img'] = img + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + return results + + +def construct_toy_data_photometric(): + img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]], + dtype=np.uint8) + img = np.stack([img, img, img], axis=-1) + results = dict() + # image + results['ori_img'] = img + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + return results + + +def test_auto_augment(): + policies = [[ + dict(type='Posterize', bits=4, prob=0.4), + dict(type='Rotate', angle=30., prob=0.6) + ]] + + # test assertion for policies + with pytest.raises(AssertionError): + # policies shouldn't be empty + transform = dict(type='AutoAugment', policies=[]) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + # policy should have type + invalid_policies = copy.deepcopy(policies) + invalid_policies[0][0].pop('type') + transform = dict(type='AutoAugment', policies=invalid_policies) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + # sub policy should be a non-empty list + invalid_policies = copy.deepcopy(policies) + invalid_policies[0] = [] + transform = dict(type='AutoAugment', policies=invalid_policies) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + # policy should be valid in PIPELINES registry. + invalid_policies = copy.deepcopy(policies) + invalid_policies.append([dict(type='Wrong_policy')]) + transform = dict(type='AutoAugment', policies=invalid_policies) + build_from_cfg(transform, PIPELINES) + + # test hparams + transform = dict( + type='AutoAugment', + policies=policies, + hparams=dict(pad_val=15, interpolation='nearest')) + pipeline = build_from_cfg(transform, PIPELINES) + # use hparams if not set in policies config + assert pipeline.policies[0][1]['pad_val'] == 15 + assert pipeline.policies[0][1]['interpolation'] == 'nearest' + + +def test_rand_augment(): + policies = [ + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 1), + pad_val=128, + prob=1., + direction='horizontal', + interpolation='nearest'), + dict(type='Invert', prob=1.), + dict( + type='Rotate', + magnitude_key='angle', + magnitude_range=(0, 90), + prob=0.) + ] + # test assertion for num_policies + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1.5, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=-1, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + # test assertion for magnitude_level + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=None) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=-1) + build_from_cfg(transform, PIPELINES) + # test assertion for magnitude_std + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=12, + magnitude_std=None) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=12, + magnitude_std='unknown') + build_from_cfg(transform, PIPELINES) + # test assertion for total_level + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=12, + total_level=None) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=12, + total_level=-30) + build_from_cfg(transform, PIPELINES) + # test assertion for policies + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=[], + num_policies=2, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + invalid_policies = copy.deepcopy(policies) + invalid_policies.append(('Wrong_policy')) + transform = dict( + type='RandAugment', + policies=invalid_policies, + num_policies=2, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + invalid_policies = copy.deepcopy(policies) + invalid_policies.append(dict(type='Wrong_policy')) + transform = dict( + type='RandAugment', + policies=invalid_policies, + num_policies=2, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + invalid_policies = copy.deepcopy(policies) + invalid_policies[2].pop('type') + transform = dict( + type='RandAugment', + policies=invalid_policies, + num_policies=2, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + invalid_policies = copy.deepcopy(policies) + invalid_policies[2].pop('magnitude_range') + transform = dict( + type='RandAugment', + policies=invalid_policies, + num_policies=2, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + + # test case where num_policies = 1 + random.seed(1) + np.random.seed(0) + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=12) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + # apply translate + img_augmented = np.array( + [[128, 128, 1, 2], [128, 128, 5, 6], [128, 128, 9, 10]], + dtype=np.uint8) + img_augmented = np.stack([img_augmented, img_augmented, img_augmented], + axis=-1) + assert (results['img'] == img_augmented).all() + + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=12) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + # apply rotation with prob=0. + assert (results['img'] == results['ori_img']).all() + + # test case where magnitude_range is reversed + random.seed(1) + np.random.seed(0) + results = construct_toy_data() + reversed_policies = [ + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(1, 0), + pad_val=128, + prob=1., + direction='horizontal'), + dict(type='Invert', prob=1.), + dict( + type='Rotate', + magnitude_key='angle', + magnitude_range=(30, 0), + prob=0.) + ] + transform = dict( + type='RandAugment', + policies=reversed_policies, + num_policies=1, + magnitude_level=30) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case where num_policies = 2 + random.seed(0) + np.random.seed(0) + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + # apply rotate and rotate with prob=0 + assert (results['img'] == results['ori_img']).all() + + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + # apply invert and translate + img_augmented = np.array( + [[252, 251, 128, 128], [248, 247, 128, 128], [244, 243, 128, 128]], + dtype=np.uint8) + img_augmented = np.stack([img_augmented, img_augmented, img_augmented], + axis=-1) + assert (results['img'] == img_augmented).all() + + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=0) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + # apply invert and invert + assert (results['img'] == results['ori_img']).all() + + # test case where magnitude_level = 0 + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=0) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + # apply rotate and translate + assert (results['img'] == results['ori_img']).all() + + # test case where magnitude_std = "inf" + random.seed(3) + np.random.seed(3) + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12, + magnitude_std='inf') + pipeline = build_from_cfg(transform, PIPELINES) + # apply invert and translate (magnitude=0.148) + results = pipeline(results) + img_augmented = np.array( + [[127, 254, 253, 252], [127, 250, 249, 248], [127, 246, 245, 244]], + dtype=np.uint8) + img_augmented = np.stack([img_augmented, img_augmented, img_augmented], + axis=-1) + np.testing.assert_array_equal(results['img'], img_augmented) + + # test case where magnitude_std = 0.5 + random.seed(3) + np.random.seed(3) + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12, + magnitude_std=0.5) + pipeline = build_from_cfg(transform, PIPELINES) + # apply invert and translate (magnitude=0.384) + results = pipeline(results) + img_augmented = np.array( + [[127, 127, 254, 253], [127, 127, 250, 249], [127, 127, 246, 245]], + dtype=np.uint8) + img_augmented = np.stack([img_augmented, img_augmented, img_augmented], + axis=-1) + np.testing.assert_array_equal(results['img'], img_augmented) + + # test case where magnitude_std is negative + random.seed(3) + np.random.seed(0) + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12, + magnitude_std=-1) + pipeline = build_from_cfg(transform, PIPELINES) + # apply translate (magnitude=0.4) and invert + results = pipeline(results) + img_augmented = np.array( + [[127, 127, 254, 253], [127, 127, 250, 249], [127, 127, 246, 245]], + dtype=np.uint8) + img_augmented = np.stack([img_augmented, img_augmented, img_augmented], + axis=-1) + np.testing.assert_array_equal(results['img'], img_augmented) + + # test hparams + random.seed(8) + np.random.seed(0) + results = construct_toy_data() + policies[2]['prob'] = 1.0 + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12, + magnitude_std=-1, + hparams=dict(pad_val=15, interpolation='nearest')) + pipeline = build_from_cfg(transform, PIPELINES) + # apply translate (magnitude=0.4) and rotate (angle=36) + results = pipeline(results) + img_augmented = np.array( + [[128, 128, 128, 15], [128, 128, 5, 2], [15, 9, 9, 6]], dtype=np.uint8) + img_augmented = np.stack([img_augmented, img_augmented, img_augmented], + axis=-1) + np.testing.assert_array_equal(results['img'], img_augmented) + # hparams won't override setting in policies config + assert pipeline.policies[0]['pad_val'] == 128 + # use hparams if not set in policies config + assert pipeline.policies[2]['pad_val'] == 15 + assert pipeline.policies[2]['interpolation'] == 'nearest' + + +def test_shear(): + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='Shear', magnitude=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid pad_val + with pytest.raises(AssertionError): + transform = dict(type='Shear', magnitude=0.5, pad_val=(0, 0)) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Shear', magnitude=0.5, prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid direction + with pytest.raises(AssertionError): + transform = dict(type='Shear', magnitude=0.5, direction='diagonal') + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict(type='Shear', magnitude=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when magnitude = 0, therefore no shear + results = construct_toy_data() + transform = dict(type='Shear', magnitude=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob = 0, therefore no shear + results = construct_toy_data() + transform = dict(type='Shear', magnitude=0.5, prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test shear horizontally, magnitude=1 + results = construct_toy_data() + transform = dict( + type='Shear', magnitude=1, pad_val=0, prob=1., random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + sheared_img = np.array([[1, 2, 3, 4], [0, 5, 6, 7], [0, 0, 9, 10]], + dtype=np.uint8) + sheared_img = np.stack([sheared_img, sheared_img, sheared_img], axis=-1) + assert (results['img'] == sheared_img).all() + assert (results['img'] == results['img2']).all() + + # test shear vertically, magnitude=-1 + results = construct_toy_data() + transform = dict( + type='Shear', + magnitude=-1, + pad_val=0, + prob=1., + direction='vertical', + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + sheared_img = np.array([[1, 6, 11, 0], [5, 10, 0, 0], [9, 0, 0, 0]], + dtype=np.uint8) + sheared_img = np.stack([sheared_img, sheared_img, sheared_img], axis=-1) + assert (results['img'] == sheared_img).all() + + # test shear vertically, magnitude=1, random_negative_prob=1 + results = construct_toy_data() + transform = dict( + type='Shear', + magnitude=1, + pad_val=0, + prob=1., + direction='vertical', + random_negative_prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + sheared_img = np.array([[1, 6, 11, 0], [5, 10, 0, 0], [9, 0, 0, 0]], + dtype=np.uint8) + sheared_img = np.stack([sheared_img, sheared_img, sheared_img], axis=-1) + assert (results['img'] == sheared_img).all() + + # test auto aug with shear + results = construct_toy_data() + policies = [[transform]] + autoaug = dict(type='AutoAugment', policies=policies) + pipeline = build_from_cfg(autoaug, PIPELINES) + results = pipeline(results) + assert (results['img'] == sheared_img).all() + + +def test_translate(): + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='Translate', magnitude=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid pad_val + with pytest.raises(AssertionError): + transform = dict(type='Translate', magnitude=0.5, pad_val=(0, 0)) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Translate', magnitude=0.5, prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid direction + with pytest.raises(AssertionError): + transform = dict(type='Translate', magnitude=0.5, direction='diagonal') + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict( + type='Translate', magnitude=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when magnitude=0, therefore no translate + results = construct_toy_data() + transform = dict(type='Translate', magnitude=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=0, therefore no translate + results = construct_toy_data() + transform = dict(type='Translate', magnitude=1., prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test translate horizontally, magnitude=0.5 + results = construct_toy_data() + transform = dict( + type='Translate', + magnitude=0.5, + pad_val=0, + prob=1., + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + translated_img = np.array([[0, 0, 1, 2], [0, 0, 5, 6], [0, 0, 9, 10]], + dtype=np.uint8) + translated_img = np.stack([translated_img, translated_img, translated_img], + axis=-1) + assert (results['img'] == translated_img).all() + assert (results['img'] == results['img2']).all() + + # test translate vertically, magnitude=-0.5 + results = construct_toy_data() + transform = dict( + type='Translate', + magnitude=-0.5, + pad_val=0, + prob=1., + direction='vertical', + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + translated_img = np.array([[9, 10, 11, 12], [0, 0, 0, 0], [0, 0, 0, 0]], + dtype=np.uint8) + translated_img = np.stack([translated_img, translated_img, translated_img], + axis=-1) + assert (results['img'] == translated_img).all() + + # test translate vertically, magnitude=0.5, random_negative_prob=1 + results = construct_toy_data() + transform = dict( + type='Translate', + magnitude=0.5, + pad_val=0, + prob=1., + direction='vertical', + random_negative_prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + translated_img = np.array([[9, 10, 11, 12], [0, 0, 0, 0], [0, 0, 0, 0]], + dtype=np.uint8) + translated_img = np.stack([translated_img, translated_img, translated_img], + axis=-1) + assert (results['img'] == translated_img).all() + + +def test_rotate(): + # test assertion for invalid type of angle + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid type of center + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=90., center=0) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid length of center + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=90., center=(0, )) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid scale + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=90., scale=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid pad_val + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=90., pad_val=(0, 0)) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=90., prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when angle=0, therefore no rotation + results = construct_toy_data() + transform = dict(type='Rotate', angle=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when angle=360, therefore no rotation + results = construct_toy_data() + transform = dict(type='Rotate', angle=360., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=0, therefore no rotation + results = construct_toy_data() + transform = dict(type='Rotate', angle=90., prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test rotate clockwise, angle=30. + results = construct_toy_data() + transform = dict( + type='Rotate', angle=30., pad_val=0, prob=1., random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + rotated_img = np.array([[5, 2, 2, 0], [9, 6, 7, 4], [0, 11, 11, 8]], + dtype=np.uint8) + rotated_img = np.stack([rotated_img, rotated_img, rotated_img], axis=-1) + assert (results['img'] == rotated_img).all() + assert (results['img'] == results['img2']).all() + + # test rotate clockwise, angle=90, center=(1,1) + results = construct_toy_data() + transform = dict( + type='Rotate', + angle=90., + center=(1, 1), + prob=1., + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + rotated_img = np.array([[9, 5, 1, 128], [10, 6, 2, 128], [11, 7, 3, 128]], + dtype=np.uint8) + rotated_img = np.stack([rotated_img, rotated_img, rotated_img], axis=-1) + assert (results['img'] == rotated_img).all() + assert (results['img'] == results['img2']).all() + + # test rotate counter-clockwise, angle=90. + results = construct_toy_data() + transform = dict( + type='Rotate', angle=-90., pad_val=0, prob=1., random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + rotated_img = np.array([[4, 8, 12, 0], [3, 7, 11, 0], [2, 6, 10, 0]], + dtype=np.uint8) + rotated_img = np.stack([rotated_img, rotated_img, rotated_img], axis=-1) + assert (results['img'] == rotated_img).all() + assert (results['img'] == results['img2']).all() + + # test rotate counter-clockwise, angle=90, random_negative_prob=1 + results = construct_toy_data() + transform = dict( + type='Rotate', angle=-90., pad_val=0, prob=1., random_negative_prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + rotated_img = np.array([[0, 10, 6, 2], [0, 11, 7, 3], [0, 12, 8, 4]], + dtype=np.uint8) + rotated_img = np.stack([rotated_img, rotated_img, rotated_img], axis=-1) + assert (results['img'] == rotated_img).all() + assert (results['img'] == results['img2']).all() + + +def test_auto_contrast(): + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='AutoContrast', prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no auto_contrast + results = construct_toy_data() + transform = dict(type='AutoContrast', prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=1 + results = construct_toy_data() + transform = dict(type='AutoContrast', prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + auto_contrasted_img = np.array( + [[0, 23, 46, 69], [92, 115, 139, 162], [185, 208, 231, 255]], + dtype=np.uint8) + auto_contrasted_img = np.stack( + [auto_contrasted_img, auto_contrasted_img, auto_contrasted_img], + axis=-1) + assert (results['img'] == auto_contrasted_img).all() + assert (results['img'] == results['img2']).all() + + +def test_invert(): + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Invert', prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no invert + results = construct_toy_data() + transform = dict(type='Invert', prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=1 + results = construct_toy_data() + transform = dict(type='Invert', prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + inverted_img = np.array( + [[254, 253, 252, 251], [250, 249, 248, 247], [246, 245, 244, 243]], + dtype=np.uint8) + inverted_img = np.stack([inverted_img, inverted_img, inverted_img], + axis=-1) + assert (results['img'] == inverted_img).all() + assert (results['img'] == results['img2']).all() + + +def test_equalize(nb_rand_test=100): + + def _imequalize(img): + # equalize the image using PIL.ImageOps.equalize + from PIL import Image, ImageOps + img = Image.fromarray(img) + equalized_img = np.asarray(ImageOps.equalize(img)) + return equalized_img + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Equalize', prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no equalize + results = construct_toy_data() + transform = dict(type='Equalize', prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=1 with randomly sampled image. + results = construct_toy_data() + transform = dict(type='Equalize', prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + for _ in range(nb_rand_test): + img = np.clip(np.random.normal(0, 1, (256, 256, 3)) * 260, 0, + 255).astype(np.uint8) + results['img'] = img + results = pipeline(copy.deepcopy(results)) + assert (results['img'] == _imequalize(img)).all() + + +def test_solarize(): + # test assertion for invalid type of thr + with pytest.raises(AssertionError): + transform = dict(type='Solarize', thr=(1, 2)) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no solarize + results = construct_toy_data_photometric() + transform = dict(type='Solarize', thr=128, prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when thr=256, therefore no solarize + results = construct_toy_data_photometric() + transform = dict(type='Solarize', thr=256, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when thr=128 + results = construct_toy_data_photometric() + transform = dict(type='Solarize', thr=128, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_solarized = np.array([[0, 127, 0], [1, 127, 1], [2, 126, 2]], + dtype=np.uint8) + img_solarized = np.stack([img_solarized, img_solarized, img_solarized], + axis=-1) + assert (results['img'] == img_solarized).all() + assert (results['img'] == results['img2']).all() + + # test case when thr=100 + results = construct_toy_data_photometric() + transform = dict(type='Solarize', thr=100, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_solarized = np.array([[0, 127, 0], [1, 128, 1], [2, 126, 2]], + dtype=np.uint8) + img_solarized = np.stack([img_solarized, img_solarized, img_solarized], + axis=-1) + assert (results['img'] == img_solarized).all() + assert (results['img'] == results['img2']).all() + + +def test_solarize_add(): + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='SolarizeAdd', magnitude=(1, 2)) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid type of thr + with pytest.raises(AssertionError): + transform = dict(type='SolarizeAdd', magnitude=100, thr=(1, 2)) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no solarize + results = construct_toy_data_photometric() + transform = dict(type='SolarizeAdd', magnitude=100, thr=128, prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when thr=0, therefore no solarize + results = construct_toy_data_photometric() + transform = dict(type='SolarizeAdd', magnitude=100, thr=0, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when thr=128, magnitude=100 + results = construct_toy_data_photometric() + transform = dict(type='SolarizeAdd', magnitude=100, thr=128, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_solarized = np.array( + [[100, 128, 255], [101, 227, 254], [102, 129, 253]], dtype=np.uint8) + img_solarized = np.stack([img_solarized, img_solarized, img_solarized], + axis=-1) + assert (results['img'] == img_solarized).all() + assert (results['img'] == results['img2']).all() + + # test case when thr=100, magnitude=50 + results = construct_toy_data_photometric() + transform = dict(type='SolarizeAdd', magnitude=50, thr=100, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_solarized = np.array([[50, 128, 255], [51, 127, 254], [52, 129, 253]], + dtype=np.uint8) + img_solarized = np.stack([img_solarized, img_solarized, img_solarized], + axis=-1) + assert (results['img'] == img_solarized).all() + assert (results['img'] == results['img2']).all() + + +def test_posterize(): + # test assertion for invalid value of bits + with pytest.raises(AssertionError): + transform = dict(type='Posterize', bits=10) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no posterize + results = construct_toy_data_photometric() + transform = dict(type='Posterize', bits=4, prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when bits=8, therefore no solarize + results = construct_toy_data_photometric() + transform = dict(type='Posterize', bits=8, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when bits=1 + results = construct_toy_data_photometric() + transform = dict(type='Posterize', bits=1, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_posterized = np.array([[0, 128, 128], [0, 0, 128], [0, 128, 128]], + dtype=np.uint8) + img_posterized = np.stack([img_posterized, img_posterized, img_posterized], + axis=-1) + assert (results['img'] == img_posterized).all() + assert (results['img'] == results['img2']).all() + + # test case when bits=3 + results = construct_toy_data_photometric() + transform = dict(type='Posterize', bits=3, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_posterized = np.array([[0, 128, 224], [0, 96, 224], [0, 128, 224]], + dtype=np.uint8) + img_posterized = np.stack([img_posterized, img_posterized, img_posterized], + axis=-1) + assert (results['img'] == img_posterized).all() + assert (results['img'] == results['img2']).all() + + +def test_contrast(nb_rand_test=100): + + def _adjust_contrast(img, factor): + from PIL import Image + from PIL.ImageEnhance import Contrast + + # Image.fromarray defaultly supports RGB, not BGR. + # convert from BGR to RGB + img = Image.fromarray(img[..., ::-1], mode='RGB') + contrasted_img = Contrast(img).enhance(factor) + # convert from RGB to BGR + return np.asarray(contrasted_img)[..., ::-1] + + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='Contrast', magnitude=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Contrast', magnitude=0.5, prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict( + type='Contrast', magnitude=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when magnitude=0, therefore no adjusting contrast + results = construct_toy_data_photometric() + transform = dict(type='Contrast', magnitude=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=0, therefore no adjusting contrast + results = construct_toy_data_photometric() + transform = dict(type='Contrast', magnitude=1., prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=1 with randomly sampled image. + results = construct_toy_data() + for _ in range(nb_rand_test): + magnitude = np.random.uniform() * np.random.choice([-1, 1]) + transform = dict( + type='Contrast', + magnitude=magnitude, + prob=1., + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + img = np.clip(np.random.uniform(0, 1, (256, 256, 3)) * 260, 0, + 255).astype(np.uint8) + results['img'] = img + results = pipeline(copy.deepcopy(results)) + # Note the gap (less_equal 1) between PIL.ImageEnhance.Contrast + # and mmcv.adjust_contrast comes from the gap that converts from + # a color image to gray image using mmcv or PIL. + np.testing.assert_allclose( + results['img'], + _adjust_contrast(img, 1 + magnitude), + rtol=0, + atol=1) + + +def test_color_transform(): + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='ColorTransform', magnitude=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='ColorTransform', magnitude=0.5, prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict( + type='ColorTransform', magnitude=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when magnitude=0, therefore no color transform + results = construct_toy_data_photometric() + transform = dict(type='ColorTransform', magnitude=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=0, therefore no color transform + results = construct_toy_data_photometric() + transform = dict(type='ColorTransform', magnitude=1., prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when magnitude=-1, therefore got gray img + results = construct_toy_data_photometric() + transform = dict( + type='ColorTransform', magnitude=-1., prob=1., random_negative_prob=0) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_gray = mmcv.bgr2gray(results['ori_img']) + img_gray = np.stack([img_gray, img_gray, img_gray], axis=-1) + assert (results['img'] == img_gray).all() + + # test case when magnitude=0.5 + results = construct_toy_data_photometric() + transform = dict( + type='ColorTransform', magnitude=.5, prob=1., random_negative_prob=0) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_r = np.round( + np.clip((results['ori_img'] * 0.5 + img_gray * 0.5), 0, + 255)).astype(results['ori_img'].dtype) + assert (results['img'] == img_r).all() + assert (results['img'] == results['img2']).all() + + # test case when magnitude=0.3, random_negative_prob=1 + results = construct_toy_data_photometric() + transform = dict( + type='ColorTransform', magnitude=.3, prob=1., random_negative_prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_r = np.round( + np.clip((results['ori_img'] * 0.7 + img_gray * 0.3), 0, + 255)).astype(results['ori_img'].dtype) + assert (results['img'] == img_r).all() + assert (results['img'] == results['img2']).all() + + +def test_brightness(nb_rand_test=100): + + def _adjust_brightness(img, factor): + # adjust the brightness of image using + # PIL.ImageEnhance.Brightness + from PIL import Image + from PIL.ImageEnhance import Brightness + img = Image.fromarray(img) + brightened_img = Brightness(img).enhance(factor) + return np.asarray(brightened_img) + + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='Brightness', magnitude=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Brightness', magnitude=0.5, prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict( + type='Brightness', magnitude=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when magnitude=0, therefore no adjusting brightness + results = construct_toy_data_photometric() + transform = dict(type='Brightness', magnitude=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=0, therefore no adjusting brightness + results = construct_toy_data_photometric() + transform = dict(type='Brightness', magnitude=1., prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=1 with randomly sampled image. + results = construct_toy_data() + for _ in range(nb_rand_test): + magnitude = np.random.uniform() * np.random.choice([-1, 1]) + transform = dict( + type='Brightness', + magnitude=magnitude, + prob=1., + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + img = np.clip(np.random.uniform(0, 1, (256, 256, 3)) * 260, 0, + 255).astype(np.uint8) + results['img'] = img + results = pipeline(copy.deepcopy(results)) + np.testing.assert_allclose( + results['img'], + _adjust_brightness(img, 1 + magnitude), + rtol=0, + atol=1) + + +def test_sharpness(nb_rand_test=100): + + def _adjust_sharpness(img, factor): + # adjust the sharpness of image using + # PIL.ImageEnhance.Sharpness + from PIL import Image + from PIL.ImageEnhance import Sharpness + img = Image.fromarray(img) + sharpened_img = Sharpness(img).enhance(factor) + return np.asarray(sharpened_img) + + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='Sharpness', magnitude=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Sharpness', magnitude=0.5, prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict( + type='Sharpness', magnitude=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when magnitude=0, therefore no adjusting sharpness + results = construct_toy_data_photometric() + transform = dict(type='Sharpness', magnitude=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=0, therefore no adjusting sharpness + results = construct_toy_data_photometric() + transform = dict(type='Sharpness', magnitude=1., prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=1 with randomly sampled image. + results = construct_toy_data() + for _ in range(nb_rand_test): + magnitude = np.random.uniform() * np.random.choice([-1, 1]) + transform = dict( + type='Sharpness', + magnitude=magnitude, + prob=1., + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + img = np.clip(np.random.uniform(0, 1, (256, 256, 3)) * 260, 0, + 255).astype(np.uint8) + results['img'] = img + results = pipeline(copy.deepcopy(results)) + np.testing.assert_allclose( + results['img'][1:-1, 1:-1], + _adjust_sharpness(img, 1 + magnitude)[1:-1, 1:-1], + rtol=0, + atol=1) + + +def test_cutout(): + + # test assertion for invalid type of shape + with pytest.raises(TypeError): + transform = dict(type='Cutout', shape=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Cutout', shape=1, prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no cutout + results = construct_toy_data() + transform = dict(type='Cutout', shape=2, prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when shape=0, therefore no cutout + results = construct_toy_data() + transform = dict(type='Cutout', shape=0, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when shape=6, therefore the whole img has been cut + results = construct_toy_data() + transform = dict(type='Cutout', shape=6, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == np.ones_like(results['ori_img']) * 128).all() + + # test case when shape is int + np.random.seed(0) + results = construct_toy_data() + transform = dict(type='Cutout', shape=1, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_cutout = np.array([[1, 2, 3, 4], [5, 128, 7, 8], [9, 10, 11, 12]], + dtype=np.uint8) + img_cutout = np.stack([img_cutout, img_cutout, img_cutout], axis=-1) + assert (results['img'] == img_cutout).all() + + # test case when shape is tuple + np.random.seed(0) + results = construct_toy_data() + transform = dict(type='Cutout', shape=(1, 2), pad_val=0, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_cutout = np.array([[1, 2, 3, 4], [5, 0, 0, 8], [9, 10, 11, 12]], + dtype=np.uint8) + img_cutout = np.stack([img_cutout, img_cutout, img_cutout], axis=-1) + assert (results['img'] == img_cutout).all() diff --git a/tests/test_data/test_pipelines/test_loading.py b/tests/test_data/test_pipelines/test_loading.py new file mode 100644 index 0000000..928fbc8 --- /dev/null +++ b/tests/test_data/test_pipelines/test_loading.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp + +import numpy as np + +from mmcls.datasets.pipelines import LoadImageFromFile + + +class TestLoading(object): + + @classmethod + def setup_class(cls): + cls.data_prefix = osp.join(osp.dirname(__file__), '../../data') + + def test_load_img(self): + results = dict( + img_prefix=self.data_prefix, img_info=dict(filename='color.jpg')) + transform = LoadImageFromFile() + results = transform(copy.deepcopy(results)) + assert results['filename'] == osp.join(self.data_prefix, 'color.jpg') + assert results['ori_filename'] == 'color.jpg' + assert results['img'].shape == (300, 400, 3) + assert results['img'].dtype == np.uint8 + assert results['img_shape'] == (300, 400, 3) + assert results['ori_shape'] == (300, 400, 3) + np.testing.assert_equal(results['img_norm_cfg']['mean'], + np.zeros(3, dtype=np.float32)) + assert repr(transform) == transform.__class__.__name__ + \ + "(to_float32=False, color_type='color', " + \ + "file_client_args={'backend': 'disk'})" + + # no img_prefix + results = dict( + img_prefix=None, img_info=dict(filename='tests/data/color.jpg')) + transform = LoadImageFromFile() + results = transform(copy.deepcopy(results)) + assert results['filename'] == 'tests/data/color.jpg' + assert results['img'].shape == (300, 400, 3) + + # to_float32 + transform = LoadImageFromFile(to_float32=True) + results = transform(copy.deepcopy(results)) + assert results['img'].dtype == np.float32 + + # gray image + results = dict( + img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg')) + transform = LoadImageFromFile() + results = transform(copy.deepcopy(results)) + assert results['img'].shape == (288, 512, 3) + assert results['img'].dtype == np.uint8 + + transform = LoadImageFromFile(color_type='unchanged') + results = transform(copy.deepcopy(results)) + assert results['img'].shape == (288, 512) + assert results['img'].dtype == np.uint8 + np.testing.assert_equal(results['img_norm_cfg']['mean'], + np.zeros(1, dtype=np.float32)) diff --git a/tests/test_data/test_pipelines/test_transform.py b/tests/test_data/test_pipelines/test_transform.py new file mode 100644 index 0000000..b23e84b --- /dev/null +++ b/tests/test_data/test_pipelines/test_transform.py @@ -0,0 +1,1301 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +import random + +import mmcv +import numpy as np +import pytest +import torch +import torchvision +from mmcv.utils import build_from_cfg +from numpy.testing import assert_array_almost_equal, assert_array_equal +from PIL import Image +from torchvision import transforms + +import mmcls.datasets.pipelines.transforms as mmcls_transforms +from mmcls.datasets.builder import PIPELINES +from mmcls.datasets.pipelines import Compose + + +def construct_toy_data(): + img = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], + dtype=np.uint8) + img = np.stack([img, img, img], axis=-1) + results = dict() + # image + results['ori_img'] = img + results['img'] = copy.deepcopy(img) + results['ori_shape'] = img.shape + results['img_shape'] = img.shape + return results + + +def test_resize(): + # test assertion if size is smaller than 0 + with pytest.raises(AssertionError): + transform = dict(type='Resize', size=-1) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple but the second value is smaller than 0 + # and the second value is not equal to -1 + with pytest.raises(AssertionError): + transform = dict(type='Resize', size=(224, -2)) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple but the first value is smaller than 0 + with pytest.raises(AssertionError): + transform = dict(type='Resize', size=(-1, 224)) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple and len(size) < 2 + with pytest.raises(AssertionError): + transform = dict(type='Resize', size=(224, )) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple len(size) > 2 + with pytest.raises(AssertionError): + transform = dict(type='Resize', size=(224, 224, 3)) + build_from_cfg(transform, PIPELINES) + + # test assertion when interpolation is invalid + with pytest.raises(AssertionError): + transform = dict(type='Resize', size=224, interpolation='2333') + build_from_cfg(transform, PIPELINES) + + # test assertion when resize_short is invalid + with pytest.raises(AssertionError): + transform = dict(type='Resize', size=224, adaptive_side='False') + build_from_cfg(transform, PIPELINES) + + # test repr + transform = dict(type='Resize', size=224) + resize_module = build_from_cfg(transform, PIPELINES) + assert isinstance(repr(resize_module), str) + + # read test image + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + + def reset_results(results, original_img): + results['img'] = copy.deepcopy(original_img) + results['img2'] = copy.deepcopy(original_img) + results['img_shape'] = original_img.shape + results['ori_shape'] = original_img.shape + results['img_fields'] = ['img', 'img2'] + return results + + # test resize when size is int + transform = dict(type='Resize', size=224, interpolation='bilinear') + resize_module = build_from_cfg(transform, PIPELINES) + results = resize_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 224, 3) + + # test resize when size is tuple and the second value is -1 + transform = dict(type='Resize', size=(224, -1), interpolation='bilinear') + resize_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = resize_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 298, 3) + + # test resize when size is tuple + transform = dict(type='Resize', size=(224, 224), interpolation='bilinear') + resize_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = resize_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 224, 3) + + # test resize when resize_height != resize_width + transform = dict(type='Resize', size=(224, 256), interpolation='bilinear') + resize_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = resize_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 256, 3) + + # test resize when size is larger than img.shape + img_height, img_width, _ = original_img.shape + transform = dict( + type='Resize', + size=(img_height * 2, img_width * 2), + interpolation='bilinear') + resize_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = resize_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (img_height * 2, img_width * 2, 3) + + # test resize with different backends + transform_cv2 = dict( + type='Resize', + size=(224, 256), + interpolation='bilinear', + backend='cv2') + transform_pil = dict( + type='Resize', + size=(224, 256), + interpolation='bilinear', + backend='pillow') + resize_module_cv2 = build_from_cfg(transform_cv2, PIPELINES) + resize_module_pil = build_from_cfg(transform_pil, PIPELINES) + results = reset_results(results, original_img) + results['img_fields'] = ['img'] + results_cv2 = resize_module_cv2(results) + results['img_fields'] = ['img2'] + results_pil = resize_module_pil(results) + assert np.allclose(results_cv2['img'], results_pil['img2'], atol=45) + + # compare results with torchvision + transform = dict(type='Resize', size=(224, 224), interpolation='area') + resize_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = resize_module(results) + resize_module = transforms.Resize( + size=(224, 224), interpolation=Image.BILINEAR) + pil_img = Image.fromarray(original_img) + resized_img = resize_module(pil_img) + resized_img = np.array(resized_img) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 224, 3) + assert np.allclose(results['img'], resized_img, atol=30) + + # test resize when size is tuple, the second value is -1 + # and adaptive_side='long' + transform = dict( + type='Resize', + size=(224, -1), + adaptive_side='long', + interpolation='bilinear') + resize_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = resize_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (168, 224, 3) + + # test resize when size is tuple, the second value is -1 + # and adaptive_side='long', h > w + transform1 = dict(type='Resize', size=(300, 200), interpolation='bilinear') + resize_module1 = build_from_cfg(transform1, PIPELINES) + transform2 = dict( + type='Resize', + size=(224, -1), + adaptive_side='long', + interpolation='bilinear') + resize_module2 = build_from_cfg(transform2, PIPELINES) + results = reset_results(results, original_img) + results = resize_module1(results) + results = resize_module2(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 149, 3) + + # test resize when size is tuple, the second value is -1 + # and adaptive_side='short', h > w + transform1 = dict(type='Resize', size=(300, 200), interpolation='bilinear') + resize_module1 = build_from_cfg(transform1, PIPELINES) + transform2 = dict( + type='Resize', + size=(224, -1), + adaptive_side='short', + interpolation='bilinear') + resize_module2 = build_from_cfg(transform2, PIPELINES) + results = reset_results(results, original_img) + results = resize_module1(results) + results = resize_module2(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (336, 224, 3) + + # test interpolation method checking + with pytest.raises(AssertionError): + transform = dict( + type='Resize', size=(300, 200), backend='cv2', interpolation='box') + resize_module = build_from_cfg(transform, PIPELINES) + + with pytest.raises(AssertionError): + transform = dict( + type='Resize', + size=(300, 200), + backend='pillow', + interpolation='area') + resize_module = build_from_cfg(transform, PIPELINES) + + +def test_pad(): + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + + # test assertion if shape is None + with pytest.raises(AssertionError): + transform = dict(type='Pad', size=None) + pad_module = build_from_cfg(transform, PIPELINES) + pad_result = pad_module(copy.deepcopy(results)) + assert np.equal(pad_result['img'], pad_result['img2']).all() + assert pad_result['img_shape'] == (400, 400, 3) + + # test if pad is valid + transform = dict(type='Pad', size=(400, 400)) + pad_module = build_from_cfg(transform, PIPELINES) + pad_result = pad_module(copy.deepcopy(results)) + assert isinstance(repr(pad_module), str) + assert np.equal(pad_result['img'], pad_result['img2']).all() + assert pad_result['img_shape'] == (400, 400, 3) + assert np.allclose(pad_result['img'][-100:, :, :], 0) + + # test if pad_to_square is valid + transform = dict(type='Pad', pad_to_square=True) + pad_module = build_from_cfg(transform, PIPELINES) + pad_result = pad_module(copy.deepcopy(results)) + assert isinstance(repr(pad_module), str) + assert np.equal(pad_result['img'], pad_result['img2']).all() + assert pad_result['img_shape'] == (400, 400, 3) + assert np.allclose(pad_result['img'][-100:, :, :], 0) + + +def test_center_crop(): + # test assertion if size is smaller than 0 + with pytest.raises(AssertionError): + transform = dict(type='CenterCrop', crop_size=-1) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple but one value is smaller than 0 + with pytest.raises(AssertionError): + transform = dict(type='CenterCrop', crop_size=(224, -1)) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple and len(size) < 2 + with pytest.raises(AssertionError): + transform = dict(type='CenterCrop', crop_size=(224, )) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple len(size) > 2 + with pytest.raises(AssertionError): + transform = dict(type='CenterCrop', crop_size=(224, 224, 3)) + build_from_cfg(transform, PIPELINES) + + # test assertion if efficientnet is True and crop_size is tuple + with pytest.raises(AssertionError): + transform = dict( + type='CenterCrop', + crop_size=(224, 224), + efficientnet_style=True, + ) + build_from_cfg(transform, PIPELINES) + + # test assertion if efficientnet is True and interpolation is invalid + with pytest.raises(AssertionError): + transform = dict( + type='CenterCrop', + crop_size=224, + efficientnet_style=True, + interpolation='2333') + build_from_cfg(transform, PIPELINES) + + # test assertion if efficientnet is True and crop_padding is negative + with pytest.raises(AssertionError): + transform = dict( + type='CenterCrop', + crop_size=224, + efficientnet_style=True, + crop_padding=-1) + build_from_cfg(transform, PIPELINES) + + # test repr + transform = dict(type='CenterCrop', crop_size=224) + center_crop_module = build_from_cfg(transform, PIPELINES) + assert isinstance(repr(center_crop_module), str) + + # read test image + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + + def reset_results(results, original_img): + results['img'] = copy.deepcopy(original_img) + results['img2'] = copy.deepcopy(original_img) + results['img_shape'] = original_img.shape + results['ori_shape'] = original_img.shape + return results + + # test CenterCrop when size is int + transform = dict(type='CenterCrop', crop_size=224) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 224, 3) + + # test CenterCrop when size is int and efficientnet_style is True + # and crop_padding=0 + transform = dict( + type='CenterCrop', + crop_size=224, + efficientnet_style=True, + crop_padding=0) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 224, 3) + results_img = copy.deepcopy(results['img']) + + short_edge = min(*results['ori_shape'][:2]) + transform = dict(type='CenterCrop', crop_size=short_edge) + baseline_center_crop_module = build_from_cfg(transform, PIPELINES) + transform = dict(type='Resize', size=224) + baseline_resize_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = baseline_center_crop_module(results) + results = baseline_resize_module(results) + assert np.equal(results['img'], results_img).all() + + # test CenterCrop when size is tuple + transform = dict(type='CenterCrop', crop_size=(224, 224)) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 224, 3) + + # test CenterCrop when crop_height != crop_width + transform = dict(type='CenterCrop', crop_size=(256, 224)) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (256, 224, 3) + + # test CenterCrop when crop_size is equal to img.shape + img_height, img_width, _ = original_img.shape + transform = dict(type='CenterCrop', crop_size=(img_height, img_width)) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (img_height, img_width, 3) + + # test CenterCrop when crop_size is larger than img.shape + transform = dict( + type='CenterCrop', crop_size=(img_height * 2, img_width * 2)) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (img_height, img_width, 3) + + # test CenterCrop when crop_width is smaller than img_width + transform = dict(type='CenterCrop', crop_size=(img_height, img_width / 2)) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (img_height, img_width / 2, 3) + + # test CenterCrop when crop_height is smaller than img_height + transform = dict(type='CenterCrop', crop_size=(img_height / 2, img_width)) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (img_height / 2, img_width, 3) + + # compare results with torchvision + transform = dict(type='CenterCrop', crop_size=224) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + center_crop_module = transforms.CenterCrop(size=224) + pil_img = Image.fromarray(original_img) + cropped_img = center_crop_module(pil_img) + cropped_img = np.array(cropped_img) + assert np.equal(results['img'], results['img2']).all() + assert np.equal(results['img'], cropped_img).all() + + +def test_normalize(): + img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + + # test repr + transform = dict(type='Normalize', **img_norm_cfg) + normalize_module = build_from_cfg(transform, PIPELINES) + assert isinstance(repr(normalize_module), str) + + # read data + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + + norm_results = normalize_module(results) + assert np.equal(norm_results['img'], norm_results['img2']).all() + + # compare results with manual computation + mean = np.array(img_norm_cfg['mean']) + std = np.array(img_norm_cfg['std']) + normalized_img = (original_img[..., ::-1] - mean) / std + assert np.allclose(norm_results['img'], normalized_img) + + # compare results with torchvision + normalize_module = transforms.Normalize(mean=mean, std=std) + tensor_img = original_img[..., ::-1].copy() + tensor_img = torch.Tensor(tensor_img.transpose(2, 0, 1)) + normalized_img = normalize_module(tensor_img) + normalized_img = np.array(normalized_img).transpose(1, 2, 0) + assert np.equal(norm_results['img'], normalized_img).all() + + +def test_randomcrop(): + ori_img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + ori_img_pil = Image.open( + osp.join(osp.dirname(__file__), '../../data/color.jpg')) + seed = random.randint(0, 100) + + # test crop size is int + kwargs = dict(size=200, padding=0, pad_if_needed=True, fill=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + kwargs = dict(size=200, padding=0, pad_if_needed=True, pad_val=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + + # test __repr__() + print(composed_transform) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (200, 200, 3) + assert np.array(baseline).shape == (200, 200, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test crop size < image size + kwargs = dict(size=(200, 300), padding=0, pad_if_needed=True, fill=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + kwargs = dict(size=(200, 300), padding=0, pad_if_needed=True, pad_val=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (200, 300, 3) + assert np.array(baseline).shape == (200, 300, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test crop size > image size + kwargs = dict(size=(600, 700), padding=0, pad_if_needed=True, fill=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + kwargs = dict(size=(600, 700), padding=0, pad_if_needed=True, pad_val=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (600, 700, 3) + assert np.array(baseline).shape == (600, 700, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test crop size == image size + kwargs = dict( + size=(ori_img.shape[0], ori_img.shape[1]), + padding=0, + pad_if_needed=True, + fill=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + kwargs = dict( + size=(ori_img.shape[0], ori_img.shape[1]), + padding=0, + pad_if_needed=True, + pad_val=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + + assert np.array(img).shape == (img.shape[0], img.shape[1], 3) + assert np.array(baseline).shape == (img.shape[0], img.shape[1], 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + assert_array_equal(ori_img, img) + assert_array_equal(np.array(baseline), np.array(ori_img_pil)) + + # test different padding mode + for mode in ['constant', 'edge', 'reflect', 'symmetric']: + kwargs = dict(size=(500, 600), padding=0, pad_if_needed=True, fill=0) + kwargs['padding_mode'] = mode + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + kwargs = dict( + size=(500, 600), padding=0, pad_if_needed=True, pad_val=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (500, 600, 3) + assert np.array(baseline).shape == (500, 600, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len( + (img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + +def test_randomresizedcrop(): + ori_img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + ori_img_pil = Image.open( + osp.join(osp.dirname(__file__), '../../data/color.jpg')) + + seed = random.randint(0, 100) + + # test when scale is not of kind (min, max) + with pytest.raises(ValueError): + kwargs = dict( + size=(200, 300), scale=(1.0, 0.08), ratio=(3. / 4., 4. / 3.)) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + composed_transform(results)['img'] + + # test when ratio is not of kind (min, max) + with pytest.raises(ValueError): + kwargs = dict( + size=(200, 300), scale=(0.08, 1.0), ratio=(4. / 3., 3. / 4.)) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + composed_transform(results)['img'] + + # test when efficientnet_style is True and crop_padding < 0 + with pytest.raises(AssertionError): + kwargs = dict(size=200, efficientnet_style=True, crop_padding=-1) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + composed_transform(results)['img'] + + # test crop size is int + kwargs = dict(size=200, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + + # test __repr__() + print(composed_transform) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (200, 200, 3) + assert np.array(baseline).shape == (200, 200, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test crop size < image size + kwargs = dict(size=(200, 300), scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (200, 300, 3) + assert np.array(baseline).shape == (200, 300, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test crop size < image size when efficientnet_style = True + kwargs = dict( + size=200, + scale=(0.08, 1.0), + ratio=(3. / 4., 4. / 3.), + efficientnet_style=True) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert img.shape == (200, 200, 3) + + # test crop size > image size + kwargs = dict(size=(600, 700), scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (600, 700, 3) + assert np.array(baseline).shape == (600, 700, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test crop size < image size when efficientnet_style = True + kwargs = dict( + size=600, + scale=(0.08, 1.0), + ratio=(3. / 4., 4. / 3.), + efficientnet_style=True) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert img.shape == (600, 600, 3) + + # test cropping the whole image + kwargs = dict( + size=(ori_img.shape[0], ori_img.shape[1]), + scale=(1.0, 2.0), + ratio=(1.0, 2.0)) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (ori_img.shape[0], ori_img.shape[1], 3) + assert np.array(baseline).shape == (ori_img.shape[0], ori_img.shape[1], 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + # assert_array_equal(ori_img, img) + # assert_array_equal(np.array(ori_img_pil), np.array(baseline)) + + # test central crop when in_ratio < min(ratio) + kwargs = dict( + size=(ori_img.shape[0], ori_img.shape[1]), + scale=(1.0, 2.0), + ratio=(2., 3.)) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (ori_img.shape[0], ori_img.shape[1], 3) + assert np.array(baseline).shape == (ori_img.shape[0], ori_img.shape[1], 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test central crop when in_ratio > max(ratio) + kwargs = dict( + size=(ori_img.shape[0], ori_img.shape[1]), + scale=(1.0, 2.0), + ratio=(3. / 4., 1)) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (ori_img.shape[0], ori_img.shape[1], 3) + assert np.array(baseline).shape == (ori_img.shape[0], ori_img.shape[1], 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test central crop when max_attempts = 0 and efficientnet_style = True + kwargs = dict( + size=200, + scale=(0.08, 1.0), + ratio=(3. / 4., 4. / 3.), + efficientnet_style=True, + max_attempts=0, + crop_padding=32) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + + kwargs = dict(crop_size=200, efficientnet_style=True, crop_padding=32) + resize_kwargs = dict(size=200) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.CenterCrop(**kwargs)]) + aug.extend([mmcls_transforms.Resize(**resize_kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + baseline = composed_transform(results)['img'] + + assert img.shape == baseline.shape + assert np.equal(img, baseline).all() + + # test central crop when max_attempts = 0 and efficientnet_style = True + kwargs = dict( + size=200, + scale=(0.08, 1.0), + ratio=(3. / 4., 4. / 3.), + efficientnet_style=True, + max_attempts=100, + min_covered=1) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + + kwargs = dict(crop_size=200, efficientnet_style=True, crop_padding=32) + resize_kwargs = dict(size=200) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.CenterCrop(**kwargs)]) + aug.extend([mmcls_transforms.Resize(**resize_kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + baseline = composed_transform(results)['img'] + + assert img.shape == baseline.shape + assert np.equal(img, baseline).all() + + # test different interpolation types + for mode in ['nearest', 'bilinear', 'bicubic', 'area', 'lanczos']: + kwargs = dict( + size=(600, 700), + scale=(0.08, 1.0), + ratio=(3. / 4., 4. / 3.), + interpolation=mode) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert img.shape == (600, 700, 3) + + +def test_randomgrayscale(): + + # test rgb2gray, return the grayscale image with p>1 + in_img = np.random.rand(10, 10, 3).astype(np.float32) + kwargs = dict(gray_prob=2) + + aug = [] + aug.extend([mmcls_transforms.RandomGrayscale(**kwargs)]) + composed_transform = Compose(aug) + print(composed_transform) + results = dict() + results['img'] = in_img + img = composed_transform(results)['img'] + computed_gray = ( + in_img[:, :, 0] * 0.299 + in_img[:, :, 1] * 0.587 + + in_img[:, :, 2] * 0.114) + for i in range(img.shape[2]): + assert_array_almost_equal(img[:, :, i], computed_gray, decimal=4) + assert img.shape == (10, 10, 3) + + # test rgb2gray, return the original image with p=-1 + in_img = np.random.rand(10, 10, 3).astype(np.float32) + kwargs = dict(gray_prob=-1) + + aug = [] + aug.extend([mmcls_transforms.RandomGrayscale(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = in_img + img = composed_transform(results)['img'] + assert_array_equal(img, in_img) + assert img.shape == (10, 10, 3) + + # test image with one channel with our method + # and the function from torchvision + in_img = np.random.rand(10, 10, 1).astype(np.float32) + kwargs = dict(gray_prob=2) + + aug = [] + aug.extend([mmcls_transforms.RandomGrayscale(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = in_img + img = composed_transform(results)['img'] + assert_array_equal(img, in_img) + assert img.shape == (10, 10, 1) + + in_img_pil = Image.fromarray(in_img[:, :, 0], mode='L') + kwargs = dict(p=2) + aug = [] + aug.extend([torchvision.transforms.RandomGrayscale(**kwargs)]) + composed_transform = Compose(aug) + img_pil = composed_transform(in_img_pil) + assert_array_equal(np.array(img_pil), np.array(in_img_pil)) + assert np.array(img_pil).shape == (10, 10) + + +def test_randomflip(): + # test assertion if flip probability is smaller than 0 + with pytest.raises(AssertionError): + transform = dict(type='RandomFlip', flip_prob=-1) + build_from_cfg(transform, PIPELINES) + + # test assertion if flip probability is larger than 1 + with pytest.raises(AssertionError): + transform = dict(type='RandomFlip', flip_prob=2) + build_from_cfg(transform, PIPELINES) + + # test assertion if direction is not horizontal and vertical + with pytest.raises(AssertionError): + transform = dict(type='RandomFlip', direction='random') + build_from_cfg(transform, PIPELINES) + + # test assertion if direction is not lowercase + with pytest.raises(AssertionError): + transform = dict(type='RandomFlip', direction='Horizontal') + build_from_cfg(transform, PIPELINES) + + # read test image + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + + def reset_results(results, original_img): + results['img'] = copy.deepcopy(original_img) + results['img2'] = copy.deepcopy(original_img) + results['img_shape'] = original_img.shape + results['ori_shape'] = original_img.shape + return results + + # test RandomFlip when flip_prob is 0 + transform = dict(type='RandomFlip', flip_prob=0) + flip_module = build_from_cfg(transform, PIPELINES) + results = flip_module(results) + assert np.equal(results['img'], original_img).all() + assert np.equal(results['img'], results['img2']).all() + + # test RandomFlip when flip_prob is 1 + transform = dict(type='RandomFlip', flip_prob=1) + flip_module = build_from_cfg(transform, PIPELINES) + results = flip_module(results) + assert np.equal(results['img'], results['img2']).all() + + # compare horizontal flip with torchvision + transform = dict(type='RandomFlip', flip_prob=1, direction='horizontal') + flip_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = flip_module(results) + flip_module = transforms.RandomHorizontalFlip(p=1) + pil_img = Image.fromarray(original_img) + flipped_img = flip_module(pil_img) + flipped_img = np.array(flipped_img) + assert np.equal(results['img'], results['img2']).all() + assert np.equal(results['img'], flipped_img).all() + + # compare vertical flip with torchvision + transform = dict(type='RandomFlip', flip_prob=1, direction='vertical') + flip_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = flip_module(results) + flip_module = transforms.RandomVerticalFlip(p=1) + pil_img = Image.fromarray(original_img) + flipped_img = flip_module(pil_img) + flipped_img = np.array(flipped_img) + assert np.equal(results['img'], results['img2']).all() + assert np.equal(results['img'], flipped_img).all() + + +def test_random_erasing(): + # test erase_prob assertion + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', erase_prob=-1.) + build_from_cfg(cfg, PIPELINES) + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', erase_prob=1) + build_from_cfg(cfg, PIPELINES) + + # test area_ratio assertion + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', min_area_ratio=-1.) + build_from_cfg(cfg, PIPELINES) + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', max_area_ratio=1) + build_from_cfg(cfg, PIPELINES) + with pytest.raises(AssertionError): + # min_area_ratio should be smaller than max_area_ratio + cfg = dict( + type='RandomErasing', min_area_ratio=0.6, max_area_ratio=0.4) + build_from_cfg(cfg, PIPELINES) + + # test aspect_range assertion + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', aspect_range='str') + build_from_cfg(cfg, PIPELINES) + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', aspect_range=-1) + build_from_cfg(cfg, PIPELINES) + with pytest.raises(AssertionError): + # In aspect_range (min, max), min should be smaller than max. + cfg = dict(type='RandomErasing', aspect_range=[1.6, 0.6]) + build_from_cfg(cfg, PIPELINES) + + # test mode assertion + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', mode='unknown') + build_from_cfg(cfg, PIPELINES) + + # test fill_std assertion + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', fill_std='unknown') + build_from_cfg(cfg, PIPELINES) + + # test implicit conversion of aspect_range + cfg = dict(type='RandomErasing', aspect_range=0.5) + random_erasing = build_from_cfg(cfg, PIPELINES) + assert random_erasing.aspect_range == (0.5, 2.) + + cfg = dict(type='RandomErasing', aspect_range=2.) + random_erasing = build_from_cfg(cfg, PIPELINES) + assert random_erasing.aspect_range == (0.5, 2.) + + # test implicit conversion of fill_color + cfg = dict(type='RandomErasing', fill_color=15) + random_erasing = build_from_cfg(cfg, PIPELINES) + assert random_erasing.fill_color == [15, 15, 15] + + # test implicit conversion of fill_std + cfg = dict(type='RandomErasing', fill_std=0.5) + random_erasing = build_from_cfg(cfg, PIPELINES) + assert random_erasing.fill_std == [0.5, 0.5, 0.5] + + # test when erase_prob=0. + results = construct_toy_data() + cfg = dict( + type='RandomErasing', + erase_prob=0., + mode='const', + fill_color=(255, 255, 255)) + random_erasing = build_from_cfg(cfg, PIPELINES) + results = random_erasing(results) + np.testing.assert_array_equal(results['img'], results['ori_img']) + + # test mode 'const' + random.seed(0) + np.random.seed(0) + results = construct_toy_data() + cfg = dict( + type='RandomErasing', + erase_prob=1., + mode='const', + fill_color=(255, 255, 255)) + random_erasing = build_from_cfg(cfg, PIPELINES) + results = random_erasing(results) + + expect_out = np.array([[1, 255, 3, 4], [5, 255, 7, 8], [9, 10, 11, 12]], + dtype=np.uint8) + expect_out = np.stack([expect_out] * 3, axis=-1) + np.testing.assert_array_equal(results['img'], expect_out) + + # test mode 'rand' with normal distribution + random.seed(0) + np.random.seed(0) + results = construct_toy_data() + cfg = dict(type='RandomErasing', erase_prob=1., mode='rand') + random_erasing = build_from_cfg(cfg, PIPELINES) + results = random_erasing(results) + + expect_out = results['ori_img'] + expect_out[:2, 1] = [[159, 98, 76], [14, 69, 122]] + np.testing.assert_array_equal(results['img'], expect_out) + + # test mode 'rand' with uniform distribution + random.seed(0) + np.random.seed(0) + results = construct_toy_data() + cfg = dict( + type='RandomErasing', + erase_prob=1., + mode='rand', + fill_std=(10, 255, 0)) + random_erasing = build_from_cfg(cfg, PIPELINES) + results = random_erasing(results) + + expect_out = results['ori_img'] + expect_out[:2, 1] = [[113, 255, 128], [126, 83, 128]] + np.testing.assert_array_equal(results['img'], expect_out) + + +def test_color_jitter(): + # read test image + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + + def reset_results(results, original_img): + results['img'] = copy.deepcopy(original_img) + results['img2'] = copy.deepcopy(original_img) + results['img_shape'] = original_img.shape + results['ori_shape'] = original_img.shape + return results + + transform = dict( + type='ColorJitter', brightness=0., contrast=0., saturation=0.) + colorjitter_module = build_from_cfg(transform, PIPELINES) + results = colorjitter_module(results) + assert np.equal(results['img'], original_img).all() + assert np.equal(results['img'], results['img2']).all() + + results = reset_results(results, original_img) + transform = dict( + type='ColorJitter', brightness=0.3, contrast=0.3, saturation=0.3) + colorjitter_module = build_from_cfg(transform, PIPELINES) + results = colorjitter_module(results) + assert not np.equal(results['img'], original_img).all() + + +def test_lighting(): + # test assertion if eigval or eigvec is wrong type or length + with pytest.raises(AssertionError): + transform = dict(type='Lighting', eigval=1, eigvec=[[1, 0, 0]]) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + transform = dict(type='Lighting', eigval=[1], eigvec=[1, 0, 0]) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + transform = dict( + type='Lighting', eigval=[1, 2], eigvec=[[1, 0, 0], [0, 1]]) + build_from_cfg(transform, PIPELINES) + + # read test image + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + + def reset_results(results, original_img): + results['img'] = copy.deepcopy(original_img) + results['img2'] = copy.deepcopy(original_img) + results['img_shape'] = original_img.shape + results['ori_shape'] = original_img.shape + return results + + eigval = [0.2175, 0.0188, 0.0045] + eigvec = [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203]] + transform = dict(type='Lighting', eigval=eigval, eigvec=eigvec) + lightening_module = build_from_cfg(transform, PIPELINES) + results = lightening_module(results) + assert not np.equal(results['img'], results['img2']).all() + assert results['img'].dtype == float + assert results['img2'].dtype == float + + results = reset_results(results, original_img) + transform = dict( + type='Lighting', + eigval=eigval, + eigvec=eigvec, + alphastd=0., + to_rgb=False) + lightening_module = build_from_cfg(transform, PIPELINES) + results = lightening_module(results) + assert np.equal(results['img'], original_img).all() + assert np.equal(results['img'], results['img2']).all() + assert results['img'].dtype == float + assert results['img2'].dtype == float + + +def test_albu_transform(): + results = dict( + img_prefix=osp.join(osp.dirname(__file__), '../../data'), + img_info=dict(filename='color.jpg'), + gt_label=np.array(1)) + + # Define simple pipeline + load = dict(type='LoadImageFromFile') + load = build_from_cfg(load, PIPELINES) + + albu_transform = dict( + type='Albu', + transforms=[ + dict(type='ChannelShuffle', p=1), + dict( + type='ShiftScaleRotate', + shift_limit=0.0625, + scale_limit=0.0, + rotate_limit=0, + interpolation=1, + p=1) + ]) + albu_transform = build_from_cfg(albu_transform, PIPELINES) + + normalize = dict(type='Normalize', mean=[0] * 3, std=[0] * 3, to_rgb=True) + normalize = build_from_cfg(normalize, PIPELINES) + + # Execute transforms + results = load(results) + results = albu_transform(results) + results = normalize(results) + + assert results['img'].dtype == np.float32 + assert results['gt_label'].shape == np.array(1).shape diff --git a/tests/test_downstream/test_mmdet_inference.py b/tests/test_downstream/test_mmdet_inference.py new file mode 100644 index 0000000..096c5db --- /dev/null +++ b/tests/test_downstream/test_mmdet_inference.py @@ -0,0 +1,118 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmcv import Config +from mmdet.apis import inference_detector +from mmdet.models import build_detector + +from mmcls.models import (MobileNetV2, MobileNetV3, RegNet, ResNeSt, ResNet, + ResNeXt, SEResNet, SEResNeXt, SwinTransformer, + TIMMBackbone) +from mmcls.models.backbones.timm_backbone import timm + +backbone_configs = dict( + mobilenetv2=dict( + backbone=dict( + type='mmcls.MobileNetV2', + widen_factor=1.0, + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), + out_indices=(4, 7)), + out_channels=[96, 1280]), + mobilenetv3=dict( + backbone=dict( + type='mmcls.MobileNetV3', + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), + out_indices=range(7, 12)), + out_channels=[48, 48, 96, 96, 96]), + regnet=dict( + backbone=dict(type='mmcls.RegNet', arch='regnetx_400mf'), + out_channels=384), + resnext=dict( + backbone=dict( + type='mmcls.ResNeXt', depth=50, groups=32, width_per_group=4), + out_channels=2048), + resnet=dict( + backbone=dict(type='mmcls.ResNet', depth=50), out_channels=2048), + seresnet=dict( + backbone=dict(type='mmcls.SEResNet', depth=50), out_channels=2048), + seresnext=dict( + backbone=dict( + type='mmcls.SEResNeXt', depth=50, groups=32, width_per_group=4), + out_channels=2048), + resnest=dict( + backbone=dict( + type='mmcls.ResNeSt', + depth=50, + radix=2, + reduction_factor=4, + out_indices=(0, 1, 2, 3)), + out_channels=[256, 512, 1024, 2048]), + swin=dict( + backbone=dict( + type='mmcls.SwinTransformer', + arch='small', + drop_path_rate=0.2, + img_size=800, + out_indices=(2, 3)), + out_channels=[384, 768]), + timm_efficientnet=dict( + backbone=dict( + type='mmcls.TIMMBackbone', + model_name='efficientnet_b1', + features_only=True, + pretrained=False, + out_indices=(1, 2, 3, 4)), + out_channels=[24, 40, 112, 320]), + timm_resnet=dict( + backbone=dict( + type='mmcls.TIMMBackbone', + model_name='resnet50', + features_only=True, + pretrained=False, + out_indices=(1, 2, 3, 4)), + out_channels=[256, 512, 1024, 2048])) + +module_mapping = { + 'mobilenetv2': MobileNetV2, + 'mobilenetv3': MobileNetV3, + 'regnet': RegNet, + 'resnext': ResNeXt, + 'resnet': ResNet, + 'seresnext': SEResNeXt, + 'seresnet': SEResNet, + 'resnest': ResNeSt, + 'swin': SwinTransformer, + 'timm_efficientnet': TIMMBackbone, + 'timm_resnet': TIMMBackbone +} + + +def test_mmdet_inference(): + config_path = './tests/data/retinanet.py' + rng = np.random.RandomState(0) + img1 = rng.rand(100, 100, 3) + + for module_name, backbone_config in backbone_configs.items(): + module = module_mapping[module_name] + if module is TIMMBackbone and timm is None: + print(f'skip {module_name} because timm is not available') + continue + print(f'test {module_name}') + config = Config.fromfile(config_path) + config.model.backbone = backbone_config['backbone'] + out_channels = backbone_config['out_channels'] + if isinstance(out_channels, int): + config.model.neck = None + config.model.bbox_head.in_channels = out_channels + anchor_generator = config.model.bbox_head.anchor_generator + anchor_generator.strides = anchor_generator.strides[:1] + else: + config.model.neck.in_channels = out_channels + + model = build_detector(config.model) + assert isinstance(model.backbone, module) + + model.cfg = config + + model.eval() + result = inference_detector(model, img1) + assert len(result) == config.num_classes diff --git a/tests/test_metrics/test_losses.py b/tests/test_metrics/test_losses.py new file mode 100644 index 0000000..74eec62 --- /dev/null +++ b/tests/test_metrics/test_losses.py @@ -0,0 +1,362 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models import build_loss + + +def test_asymmetric_loss(): + # test asymmetric_loss + cls_score = torch.Tensor([[5, -5, 0], [5, -5, 0]]) + label = torch.Tensor([[1, 0, 1], [0, 1, 0]]) + weight = torch.tensor([0.5, 0.5]) + + loss_cfg = dict( + type='AsymmetricLoss', + gamma_pos=1.0, + gamma_neg=4.0, + clip=0.05, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(3.80845 / 3)) + + # test asymmetric_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(3.80845 / 6)) + + # test asymmetric_loss without clip + loss_cfg = dict( + type='AsymmetricLoss', + gamma_pos=1.0, + gamma_neg=4.0, + clip=None, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(5.1186 / 3)) + + # test asymmetric_loss with softmax for single label task + cls_score = torch.Tensor([[5, -5, 0], [5, -5, 0]]) + label = torch.Tensor([0, 1]) + weight = torch.tensor([0.5, 0.5]) + loss_cfg = dict( + type='AsymmetricLoss', + gamma_pos=0.0, + gamma_neg=0.0, + clip=None, + reduction='mean', + loss_weight=1.0, + use_sigmoid=False, + eps=1e-8) + loss = build_loss(loss_cfg) + # test asymmetric_loss for single label task without weight + assert torch.allclose(loss(cls_score, label), torch.tensor(2.5045)) + # test asymmetric_loss for single label task with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(2.5045 * 0.5)) + + # test soft asymmetric_loss with softmax + cls_score = torch.Tensor([[5, -5, 0], [5, -5, 0]]) + label = torch.Tensor([[1, 0, 0], [0, 1, 0]]) + weight = torch.tensor([0.5, 0.5]) + loss_cfg = dict( + type='AsymmetricLoss', + gamma_pos=0.0, + gamma_neg=0.0, + clip=None, + reduction='mean', + loss_weight=1.0, + use_sigmoid=False, + eps=1e-8) + loss = build_loss(loss_cfg) + # test soft asymmetric_loss with softmax without weight + assert torch.allclose(loss(cls_score, label), torch.tensor(2.5045)) + # test soft asymmetric_loss with softmax with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(2.5045 * 0.5)) + + +def test_cross_entropy_loss(): + with pytest.raises(AssertionError): + # use_sigmoid and use_soft could not be set simultaneously + loss_cfg = dict( + type='CrossEntropyLoss', use_sigmoid=True, use_soft=True) + loss = build_loss(loss_cfg) + + # test ce_loss + cls_score = torch.Tensor([[-1000, 1000], [100, -100]]) + label = torch.Tensor([0, 1]).long() + class_weight = [0.3, 0.7] # class 0 : 0.3, class 1 : 0.7 + weight = torch.tensor([0.6, 0.4]) + + # test ce_loss without class weight + loss_cfg = dict(type='CrossEntropyLoss', reduction='mean', loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(1100.)) + # test ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(640.)) + + # test ce_loss with class weight + loss_cfg = dict( + type='CrossEntropyLoss', + reduction='mean', + loss_weight=1.0, + class_weight=class_weight) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(370.)) + # test ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(208.)) + + # test bce_loss + cls_score = torch.Tensor([[-200, 100], [500, -1000], [300, -300]]) + label = torch.Tensor([[1, 0], [0, 1], [1, 0]]) + weight = torch.Tensor([0.6, 0.4, 0.5]) + class_weight = [0.1, 0.9] # class 0: 0.1, class 1: 0.9 + pos_weight = [0.1, 0.2] + + # test bce_loss without class weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(300.)) + # test ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(130.)) + + # test bce_loss with class weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0, + class_weight=class_weight) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(176.667)) + # test bce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(74.333)) + + # test bce loss with pos_weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0, + pos_weight=pos_weight) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(136.6667)) + + # test soft_ce_loss + cls_score = torch.Tensor([[-1000, 1000], [100, -100]]) + label = torch.Tensor([[1.0, 0.0], [0.0, 1.0]]) + class_weight = [0.3, 0.7] # class 0 : 0.3, class 1 : 0.7 + weight = torch.tensor([0.6, 0.4]) + + # test soft_ce_loss without class weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_soft=True, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(1100.)) + # test soft_ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(640.)) + + # test soft_ce_loss with class weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_soft=True, + reduction='mean', + loss_weight=1.0, + class_weight=class_weight) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(370.)) + # test soft_ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(208.)) + + +def test_focal_loss(): + # test focal_loss + cls_score = torch.Tensor([[5, -5, 0], [5, -5, 0]]) + label = torch.Tensor([[1, 0, 1], [0, 1, 0]]) + weight = torch.tensor([0.5, 0.5]) + + loss_cfg = dict( + type='FocalLoss', + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(0.8522)) + # test focal_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(0.8522 / 2)) + # test focal loss for single label task + cls_score = torch.Tensor([[5, -5, 0], [5, -5, 0]]) + label = torch.Tensor([0, 1]) + weight = torch.tensor([0.5, 0.5]) + assert torch.allclose(loss(cls_score, label), torch.tensor(0.86664125)) + # test focal_loss single label with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(0.86664125 / 2)) + + +def test_label_smooth_loss(): + # test label_smooth_val assertion + with pytest.raises(AssertionError): + loss_cfg = dict(type='LabelSmoothLoss', label_smooth_val=1.0) + build_loss(loss_cfg) + + with pytest.raises(AssertionError): + loss_cfg = dict(type='LabelSmoothLoss', label_smooth_val='str') + build_loss(loss_cfg) + + # test reduction assertion + with pytest.raises(AssertionError): + loss_cfg = dict( + type='LabelSmoothLoss', label_smooth_val=0.1, reduction='unknown') + build_loss(loss_cfg) + + # test mode assertion + with pytest.raises(AssertionError): + loss_cfg = dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='unknown') + build_loss(loss_cfg) + + # test original mode label smooth loss + cls_score = torch.tensor([[1., -1.]]) + label = torch.tensor([0]) + + loss_cfg = dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + correct = 0.2269 # from timm + assert loss(cls_score, label) - correct <= 0.0001 + + # test classy_vision mode label smooth loss + loss_cfg = dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='classy_vision', + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + correct = 0.2178 # from ClassyVision + assert loss(cls_score, label) - correct <= 0.0001 + + # test multi_label mode label smooth loss + cls_score = torch.tensor([[1., -1., 1]]) + label = torch.tensor([[1, 0, 1]]) + + loss_cfg = dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='multi_label', + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + smooth_label = torch.tensor([[0.9, 0.1, 0.9]]) + correct = torch.binary_cross_entropy_with_logits(cls_score, + smooth_label).mean() + assert torch.allclose(loss(cls_score, label), correct) + + # test label linear combination smooth loss + cls_score = torch.tensor([[1., -1., 0.]]) + label1 = torch.tensor([[1., 0., 0.]]) + label2 = torch.tensor([[0., 0., 1.]]) + label_mix = label1 * 0.6 + label2 * 0.4 + + loss_cfg = dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + reduction='mean', + num_classes=3, + loss_weight=1.0) + loss = build_loss(loss_cfg) + smooth_label1 = loss.original_smooth_label(label1) + smooth_label2 = loss.original_smooth_label(label2) + label_smooth_mix = smooth_label1 * 0.6 + smooth_label2 * 0.4 + correct = (-torch.log_softmax(cls_score, -1) * label_smooth_mix).sum() + + assert loss(cls_score, label_mix) - correct <= 0.0001 + + # test label smooth loss with weight + cls_score = torch.tensor([[1., -1.], [1., -1.]]) + label = torch.tensor([0, 1]) + weight = torch.tensor([0.5, 0.5]) + + loss_cfg = dict( + type='LabelSmoothLoss', + reduction='mean', + label_smooth_val=0.1, + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose( + loss(cls_score, label, weight=weight), + loss(cls_score, label) / 2) + + +# migrate from mmdetection with modifications +def test_seesaw_loss(): + # only softmax version of Seesaw Loss is implemented + with pytest.raises(AssertionError): + loss_cfg = dict(type='SeesawLoss', use_sigmoid=True, loss_weight=1.0) + build_loss(loss_cfg) + + # test that cls_score.size(-1) == num_classes + loss_cls_cfg = dict( + type='SeesawLoss', p=0.0, q=0.0, loss_weight=1.0, num_classes=2) + loss_cls = build_loss(loss_cls_cfg) + # the length of fake_pred should be num_classe = 4 + with pytest.raises(AssertionError): + fake_pred = torch.Tensor([[-100, 100, -100]]) + fake_label = torch.Tensor([1]).long() + loss_cls(fake_pred, fake_label) + # the length of fake_pred should be num_classes + 2 = 4 + with pytest.raises(AssertionError): + fake_pred = torch.Tensor([[-100, 100, -100, 100]]) + fake_label = torch.Tensor([1]).long() + loss_cls(fake_pred, fake_label) + + # test the calculation without p and q + loss_cls_cfg = dict( + type='SeesawLoss', p=0.0, q=0.0, loss_weight=1.0, num_classes=2) + loss_cls = build_loss(loss_cls_cfg) + fake_pred = torch.Tensor([[-100, 100]]) + fake_label = torch.Tensor([1]).long() + loss = loss_cls(fake_pred, fake_label) + assert torch.allclose(loss, torch.tensor(0.)) + + # test the calculation with p and without q + loss_cls_cfg = dict( + type='SeesawLoss', p=1.0, q=0.0, loss_weight=1.0, num_classes=2) + loss_cls = build_loss(loss_cls_cfg) + fake_pred = torch.Tensor([[-100, 100]]) + fake_label = torch.Tensor([0]).long() + loss_cls.cum_samples[0] = torch.exp(torch.Tensor([20])) + loss = loss_cls(fake_pred, fake_label) + assert torch.allclose(loss, torch.tensor(180.)) + + # test the calculation with q and without p + loss_cls_cfg = dict( + type='SeesawLoss', p=0.0, q=1.0, loss_weight=1.0, num_classes=2) + loss_cls = build_loss(loss_cls_cfg) + fake_pred = torch.Tensor([[-100, 100]]) + fake_label = torch.Tensor([0]).long() + loss = loss_cls(fake_pred, fake_label) + assert torch.allclose(loss, torch.tensor(200.) + torch.tensor(100.).log()) diff --git a/tests/test_metrics/test_metrics.py b/tests/test_metrics/test_metrics.py new file mode 100644 index 0000000..67acb09 --- /dev/null +++ b/tests/test_metrics/test_metrics.py @@ -0,0 +1,93 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial + +import pytest +import torch + +from mmcls.core import average_performance, mAP +from mmcls.models.losses.accuracy import Accuracy, accuracy_numpy + + +def test_mAP(): + target = torch.Tensor([[1, 1, 0, -1], [1, 1, 0, -1], [0, -1, 1, -1], + [0, 1, 0, -1]]) + pred = torch.Tensor([[0.9, 0.8, 0.3, 0.2], [0.1, 0.2, 0.2, 0.1], + [0.7, 0.5, 0.9, 0.3], [0.8, 0.1, 0.1, 0.2]]) + + # target and pred should both be np.ndarray or torch.Tensor + with pytest.raises(TypeError): + target_list = target.tolist() + _ = mAP(pred, target_list) + + # target and pred should be in the same shape + with pytest.raises(AssertionError): + target_shorter = target[:-1] + _ = mAP(pred, target_shorter) + + assert mAP(pred, target) == pytest.approx(68.75, rel=1e-2) + + target_no_difficult = torch.Tensor([[1, 1, 0, 0], [0, 1, 0, 0], + [0, 0, 1, 0], [1, 0, 0, 0]]) + assert mAP(pred, target_no_difficult) == pytest.approx(70.83, rel=1e-2) + + +def test_average_performance(): + target = torch.Tensor([[1, 1, 0, -1], [1, 1, 0, -1], [0, -1, 1, -1], + [0, 1, 0, -1], [0, 1, 0, -1]]) + pred = torch.Tensor([[0.9, 0.8, 0.3, 0.2], [0.1, 0.2, 0.2, 0.1], + [0.7, 0.5, 0.9, 0.3], [0.8, 0.1, 0.1, 0.2], + [0.8, 0.1, 0.1, 0.2]]) + + # target and pred should both be np.ndarray or torch.Tensor + with pytest.raises(TypeError): + target_list = target.tolist() + _ = average_performance(pred, target_list) + + # target and pred should be in the same shape + with pytest.raises(AssertionError): + target_shorter = target[:-1] + _ = average_performance(pred, target_shorter) + + assert average_performance(pred, target) == average_performance( + pred, target, thr=0.5) + assert average_performance(pred, target, thr=0.5, k=2) \ + == average_performance(pred, target, thr=0.5) + assert average_performance( + pred, target, thr=0.3) == pytest.approx( + (31.25, 43.75, 36.46, 33.33, 42.86, 37.50), rel=1e-2) + assert average_performance( + pred, target, k=2) == pytest.approx( + (43.75, 50.00, 46.67, 40.00, 57.14, 47.06), rel=1e-2) + + +def test_accuracy(): + pred_tensor = torch.tensor([[0.1, 0.2, 0.4], [0.2, 0.5, 0.3], + [0.4, 0.3, 0.1], [0.8, 0.9, 0.0]]) + target_tensor = torch.tensor([2, 0, 0, 0]) + pred_array = pred_tensor.numpy() + target_array = target_tensor.numpy() + + acc_top1 = 50. + acc_top2 = 75. + + compute_acc = Accuracy(topk=1) + assert compute_acc(pred_tensor, target_tensor) == acc_top1 + assert compute_acc(pred_array, target_array) == acc_top1 + + compute_acc = Accuracy(topk=(1, )) + assert compute_acc(pred_tensor, target_tensor)[0] == acc_top1 + assert compute_acc(pred_array, target_array)[0] == acc_top1 + + compute_acc = Accuracy(topk=(1, 2)) + assert compute_acc(pred_tensor, target_array)[0] == acc_top1 + assert compute_acc(pred_tensor, target_tensor)[1] == acc_top2 + assert compute_acc(pred_array, target_array)[0] == acc_top1 + assert compute_acc(pred_array, target_array)[1] == acc_top2 + + with pytest.raises(AssertionError): + compute_acc(pred_tensor, 'other_type') + + # test accuracy_numpy + compute_acc = partial(accuracy_numpy, topk=(1, 2)) + assert compute_acc(pred_array, target_array)[0] == acc_top1 + assert compute_acc(pred_array, target_array)[1] == acc_top2 diff --git a/tests/test_metrics/test_utils.py b/tests/test_metrics/test_utils.py new file mode 100644 index 0000000..962a1f8 --- /dev/null +++ b/tests/test_metrics/test_utils.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.losses.utils import convert_to_one_hot + + +def ori_convert_to_one_hot(targets: torch.Tensor, classes) -> torch.Tensor: + assert (torch.max(targets).item() < + classes), 'Class Index must be less than number of classes' + one_hot_targets = torch.zeros((targets.shape[0], classes), + dtype=torch.long, + device=targets.device) + one_hot_targets.scatter_(1, targets.long(), 1) + return one_hot_targets + + +def test_convert_to_one_hot(): + # label should smaller than classes + targets = torch.tensor([1, 2, 3, 8, 5]) + classes = 5 + with pytest.raises(AssertionError): + _ = convert_to_one_hot(targets, classes) + + # test with original impl + classes = 10 + targets = torch.randint(high=classes, size=(10, 1)) + ori_one_hot_targets = torch.zeros((targets.shape[0], classes), + dtype=torch.long, + device=targets.device) + ori_one_hot_targets.scatter_(1, targets.long(), 1) + one_hot_targets = convert_to_one_hot(targets, classes) + assert torch.equal(ori_one_hot_targets, one_hot_targets) + + +# test cuda version +@pytest.mark.skipif( + not torch.cuda.is_available(), reason='requires CUDA support') +def test_convert_to_one_hot_cuda(): + # test with original impl + classes = 10 + targets = torch.randint(high=classes, size=(10, 1)).cuda() + ori_one_hot_targets = torch.zeros((targets.shape[0], classes), + dtype=torch.long, + device=targets.device) + ori_one_hot_targets.scatter_(1, targets.long(), 1) + one_hot_targets = convert_to_one_hot(targets, classes) + assert torch.equal(ori_one_hot_targets, one_hot_targets) + assert ori_one_hot_targets.device == one_hot_targets.device diff --git a/tests/test_models/test_backbones/__init__.py b/tests/test_models/test_backbones/__init__.py new file mode 100644 index 0000000..ef101fe --- /dev/null +++ b/tests/test_models/test_backbones/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/tests/test_models/test_backbones/test_conformer.py b/tests/test_models/test_backbones/test_conformer.py new file mode 100644 index 0000000..317079a --- /dev/null +++ b/tests/test_models/test_backbones/test_conformer.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy + +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import Conformer + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_conformer_backbone(): + + cfg_ori = dict( + arch='T', + drop_path_rate=0.1, + ) + + with pytest.raises(AssertionError): + # test invalid arch + cfg = deepcopy(cfg_ori) + cfg['arch'] = 'unknown' + Conformer(**cfg) + + with pytest.raises(AssertionError): + # test arch without essential keys + cfg = deepcopy(cfg_ori) + cfg['arch'] = {'embed_dims': 24, 'channel_ratio': 6, 'num_heads': 9} + Conformer(**cfg) + + # Test Conformer small model with patch size of 16 + model = Conformer(**cfg_ori) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(3, 3, 224, 224) + conv_feature, transformer_feature = model(imgs)[-1] + assert conv_feature.shape == (3, 64 * 1 * 4 + ) # base_channels * channel_ratio * 4 + assert transformer_feature.shape == (3, 384) + + # Test Conformer with irregular input size. + model = Conformer(**cfg_ori) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(3, 3, 241, 241) + conv_feature, transformer_feature = model(imgs)[-1] + assert conv_feature.shape == (3, 64 * 1 * 4 + ) # base_channels * channel_ratio * 4 + assert transformer_feature.shape == (3, 384) + + imgs = torch.randn(3, 3, 321, 221) + conv_feature, transformer_feature = model(imgs)[-1] + assert conv_feature.shape == (3, 64 * 1 * 4 + ) # base_channels * channel_ratio * 4 + assert transformer_feature.shape == (3, 384) + + # Test custom arch Conformer without output cls token + cfg = deepcopy(cfg_ori) + cfg['arch'] = { + 'embed_dims': 128, + 'depths': 15, + 'num_heads': 16, + 'channel_ratio': 3, + } + cfg['with_cls_token'] = False + cfg['base_channels'] = 32 + model = Conformer(**cfg) + conv_feature, transformer_feature = model(imgs)[-1] + assert conv_feature.shape == (3, 32 * 3 * 4) + assert transformer_feature.shape == (3, 128) + + # Test Conformer with multi out indices + cfg = deepcopy(cfg_ori) + cfg['out_indices'] = [4, 8, 12] + model = Conformer(**cfg) + outs = model(imgs) + assert len(outs) == 3 + # stage 1 + conv_feature, transformer_feature = outs[0] + assert conv_feature.shape == (3, 64 * 1) + assert transformer_feature.shape == (3, 384) + # stage 2 + conv_feature, transformer_feature = outs[1] + assert conv_feature.shape == (3, 64 * 1 * 2) + assert transformer_feature.shape == (3, 384) + # stage 3 + conv_feature, transformer_feature = outs[2] + assert conv_feature.shape == (3, 64 * 1 * 4) + assert transformer_feature.shape == (3, 384) diff --git a/tests/test_models/test_backbones/test_convmixer.py b/tests/test_models/test_backbones/test_convmixer.py new file mode 100644 index 0000000..7d2219e --- /dev/null +++ b/tests/test_models/test_backbones/test_convmixer.py @@ -0,0 +1,84 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.backbones import ConvMixer + + +def test_assertion(): + with pytest.raises(AssertionError): + ConvMixer(arch='unknown') + + with pytest.raises(AssertionError): + # ConvMixer arch dict should include essential_keys, + ConvMixer(arch=dict(channels=[2, 3, 4, 5])) + + with pytest.raises(AssertionError): + # ConvMixer out_indices should be valid depth. + ConvMixer(out_indices=-100) + + +def test_convmixer(): + + # Test forward + model = ConvMixer(arch='768/32') + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 768, 32, 32]) + + # Test forward with multiple outputs + model = ConvMixer(arch='768/32', out_indices=range(32)) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 32 + for f in feat: + assert f.shape == torch.Size([1, 768, 32, 32]) + + # Test with custom arch + model = ConvMixer( + arch={ + 'embed_dims': 99, + 'depth': 5, + 'patch_size': 5, + 'kernel_size': 9 + }, + out_indices=range(5)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + for f in feat: + assert f.shape == torch.Size([1, 99, 44, 44]) + + # Test with even kernel size arch + model = ConvMixer(arch={ + 'embed_dims': 99, + 'depth': 5, + 'patch_size': 5, + 'kernel_size': 8 + }) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 99, 44, 44]) + + # Test frozen_stages + model = ConvMixer(arch='768/32', frozen_stages=10) + model.init_weights() + model.train() + + for i in range(10): + assert not model.stages[i].training + + for i in range(10, 32): + assert model.stages[i].training diff --git a/tests/test_models/test_backbones/test_convnext.py b/tests/test_models/test_backbones/test_convnext.py new file mode 100644 index 0000000..ccd002d --- /dev/null +++ b/tests/test_models/test_backbones/test_convnext.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.backbones import ConvNeXt + + +def test_assertion(): + with pytest.raises(AssertionError): + ConvNeXt(arch='unknown') + + with pytest.raises(AssertionError): + # ConvNeXt arch dict should include 'embed_dims', + ConvNeXt(arch=dict(channels=[2, 3, 4, 5])) + + with pytest.raises(AssertionError): + # ConvNeXt arch dict should include 'embed_dims', + ConvNeXt(arch=dict(depths=[2, 3, 4], channels=[2, 3, 4, 5])) + + +def test_convnext(): + + # Test forward + model = ConvNeXt(arch='tiny', out_indices=-1) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 768]) + + # Test forward with multiple outputs + model = ConvNeXt(arch='small', out_indices=(0, 1, 2, 3)) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 96]) + assert feat[1].shape == torch.Size([1, 192]) + assert feat[2].shape == torch.Size([1, 384]) + assert feat[3].shape == torch.Size([1, 768]) + + # Test with custom arch + model = ConvNeXt( + arch={ + 'depths': [2, 3, 4, 5, 6], + 'channels': [16, 32, 64, 128, 256] + }, + out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size([1, 16]) + assert feat[1].shape == torch.Size([1, 32]) + assert feat[2].shape == torch.Size([1, 64]) + assert feat[3].shape == torch.Size([1, 128]) + assert feat[4].shape == torch.Size([1, 256]) + + # Test without gap before final norm + model = ConvNeXt( + arch='small', out_indices=(0, 1, 2, 3), gap_before_final_norm=False) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 96, 56, 56]) + assert feat[1].shape == torch.Size([1, 192, 28, 28]) + assert feat[2].shape == torch.Size([1, 384, 14, 14]) + assert feat[3].shape == torch.Size([1, 768, 7, 7]) + + # Test frozen_stages + model = ConvNeXt(arch='small', out_indices=(0, 1, 2, 3), frozen_stages=2) + model.init_weights() + model.train() + + for i in range(2): + assert not model.downsample_layers[i].training + assert not model.stages[i].training + + for i in range(2, 4): + assert model.downsample_layers[i].training + assert model.stages[i].training + + # Test Activation Checkpointing + model = ConvNeXt(arch='tiny', out_indices=-1, with_cp=True) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 768]) diff --git a/tests/test_models/test_backbones/test_cspnet.py b/tests/test_models/test_backbones/test_cspnet.py new file mode 100644 index 0000000..ef76264 --- /dev/null +++ b/tests/test_models/test_backbones/test_cspnet.py @@ -0,0 +1,147 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from functools import partial +from unittest import TestCase + +import torch +from mmcv.cnn import ConvModule +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmcls.models.backbones import CSPDarkNet, CSPResNet, CSPResNeXt +from mmcls.models.backbones.cspnet import (CSPNet, DarknetBottleneck, + ResNetBottleneck, ResNeXtBottleneck) + + +class TestCSPNet(TestCase): + + def setUp(self): + self.arch = dict( + block_fn=(DarknetBottleneck, ResNetBottleneck, ResNeXtBottleneck), + in_channels=(32, 64, 128), + out_channels=(64, 128, 256), + num_blocks=(1, 2, 8), + expand_ratio=(2, 1, 1), + bottle_ratio=(3, 1, 1), + has_downsampler=True, + down_growth=True, + block_args=({}, {}, dict(base_channels=32))) + self.stem_fn = partial(torch.nn.Conv2d, out_channels=32, kernel_size=3) + + def test_structure(self): + # Test with attribute arch_setting. + model = CSPNet(arch=self.arch, stem_fn=self.stem_fn, out_indices=[-1]) + self.assertEqual(len(model.stages), 3) + self.assertEqual(type(model.stages[0].blocks[0]), DarknetBottleneck) + self.assertEqual(type(model.stages[1].blocks[0]), ResNetBottleneck) + self.assertEqual(type(model.stages[2].blocks[0]), ResNeXtBottleneck) + + +class TestCSPDarkNet(TestCase): + + def setUp(self): + self.class_name = CSPDarkNet + self.cfg = dict(depth=53) + self.out_channels = [64, 128, 256, 512, 1024] + self.all_out_indices = [0, 1, 2, 3, 4] + self.frozen_stages = 2 + self.stem_down = (1, 1) + self.num_stages = 5 + + def test_structure(self): + # Test invalid default depths + with self.assertRaisesRegex(AssertionError, 'depth must be one of'): + cfg = deepcopy(self.cfg) + cfg['depth'] = 'unknown' + self.class_name(**cfg) + + # Test out_indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = {1: 1} + with self.assertRaisesRegex(AssertionError, "get "): + self.class_name(**cfg) + cfg['out_indices'] = [0, 13] + with self.assertRaisesRegex(AssertionError, 'Invalid out_indices 13'): + self.class_name(**cfg) + + # Test model structure + cfg = deepcopy(self.cfg) + model = self.class_name(**cfg) + self.assertEqual(len(model.stages), self.num_stages) + + def test_forward(self): + imgs = torch.randn(3, 3, 224, 224) + + # test without output_cls_token + cfg = deepcopy(self.cfg) + model = self.class_name(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + self.assertEqual(outs[-1].size(), (3, self.out_channels[-1], 7, 7)) + + # Test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = self.all_out_indices + model = self.class_name(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), len(self.all_out_indices)) + w, h = 224 / self.stem_down[0], 224 / self.stem_down[1] + for i, out in enumerate(outs): + self.assertEqual( + out.size(), + (3, self.out_channels[i], w // 2**(i + 1), h // 2**(i + 1))) + + # Test frozen stages + cfg = deepcopy(self.cfg) + cfg['frozen_stages'] = self.frozen_stages + model = self.class_name(**cfg) + model.init_weights() + model.train() + assert model.stem.training is False + for param in model.stem.parameters(): + assert param.requires_grad is False + for i in range(self.frozen_stages + 1): + stage = model.stages[i] + for mod in stage.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False, i + for param in stage.parameters(): + assert param.requires_grad is False + + +class TestCSPResNet(TestCSPDarkNet): + + def setUp(self): + self.class_name = CSPResNet + self.cfg = dict(depth=50) + self.out_channels = [128, 256, 512, 1024] + self.all_out_indices = [0, 1, 2, 3] + self.frozen_stages = 2 + self.stem_down = (2, 2) + self.num_stages = 4 + + def test_deep_stem(self, ): + cfg = deepcopy(self.cfg) + cfg['deep_stem'] = True + model = self.class_name(**cfg) + self.assertEqual(len(model.stem), 3) + for i in range(3): + self.assertEqual(type(model.stem[i]), ConvModule) + + +class TestCSPResNeXt(TestCSPDarkNet): + + def setUp(self): + self.class_name = CSPResNeXt + self.cfg = dict(depth=50) + self.out_channels = [256, 512, 1024, 2048] + self.all_out_indices = [0, 1, 2, 3] + self.frozen_stages = 2 + self.stem_down = (2, 2) + self.num_stages = 4 + + +if __name__ == '__main__': + import unittest + unittest.main() diff --git a/tests/test_models/test_backbones/test_deit.py b/tests/test_models/test_backbones/test_deit.py new file mode 100644 index 0000000..5f11a3a --- /dev/null +++ b/tests/test_models/test_backbones/test_deit.py @@ -0,0 +1,131 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import os +import tempfile +from copy import deepcopy +from unittest import TestCase + +import torch +from mmcv.runner import load_checkpoint, save_checkpoint + +from mmcls.models.backbones import DistilledVisionTransformer +from .utils import timm_resize_pos_embed + + +class TestDeiT(TestCase): + + def setUp(self): + self.cfg = dict( + arch='deit-base', img_size=224, patch_size=16, drop_rate=0.1) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = DistilledVisionTransformer(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + # The pos_embed is all zero before initialize + self.assertTrue(torch.allclose(model.dist_token, torch.tensor(0.))) + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse(torch.allclose(model.dist_token, torch.tensor(0.))) + + # test load checkpoint + pretrain_pos_embed = model.pos_embed.clone().detach() + tmpdir = tempfile.gettempdir() + checkpoint = os.path.join(tmpdir, 'test.pth') + save_checkpoint(model, checkpoint) + cfg = deepcopy(self.cfg) + model = DistilledVisionTransformer(**cfg) + load_checkpoint(model, checkpoint, strict=True) + self.assertTrue(torch.allclose(model.pos_embed, pretrain_pos_embed)) + + # test load checkpoint with different img_size + cfg = deepcopy(self.cfg) + cfg['img_size'] = 384 + model = DistilledVisionTransformer(**cfg) + load_checkpoint(model, checkpoint, strict=True) + resized_pos_embed = timm_resize_pos_embed( + pretrain_pos_embed, model.pos_embed, num_tokens=2) + self.assertTrue(torch.allclose(model.pos_embed, resized_pos_embed)) + + os.remove(checkpoint) + + def test_forward(self): + imgs = torch.randn(3, 3, 224, 224) + + # test with_cls_token=False + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['output_cls_token'] = True + with self.assertRaisesRegex(AssertionError, 'but got False'): + DistilledVisionTransformer(**cfg) + + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['output_cls_token'] = False + model = DistilledVisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (3, 768, 14, 14)) + + # test with output_cls_token + cfg = deepcopy(self.cfg) + model = DistilledVisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token, cls_token, dist_token = outs[-1] + self.assertEqual(patch_token.shape, (3, 768, 14, 14)) + self.assertEqual(cls_token.shape, (3, 768)) + self.assertEqual(dist_token.shape, (3, 768)) + + # test without output_cls_token + cfg = deepcopy(self.cfg) + cfg['output_cls_token'] = False + model = DistilledVisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (3, 768, 14, 14)) + + # Test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = [-3, -2, -1] + model = DistilledVisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 3) + for out in outs: + patch_token, cls_token, dist_token = out + self.assertEqual(patch_token.shape, (3, 768, 14, 14)) + self.assertEqual(cls_token.shape, (3, 768)) + self.assertEqual(dist_token.shape, (3, 768)) + + # Test forward with dynamic input size + imgs1 = torch.randn(3, 3, 224, 224) + imgs2 = torch.randn(3, 3, 256, 256) + imgs3 = torch.randn(3, 3, 256, 309) + cfg = deepcopy(self.cfg) + model = DistilledVisionTransformer(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token, cls_token, dist_token = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 16), + math.ceil(imgs.shape[3] / 16)) + self.assertEqual(patch_token.shape, (3, 768, *expect_feat_shape)) + self.assertEqual(cls_token.shape, (3, 768)) + self.assertEqual(dist_token.shape, (3, 768)) diff --git a/tests/test_models/test_backbones/test_densenet.py b/tests/test_models/test_backbones/test_densenet.py new file mode 100644 index 0000000..5e4c73b --- /dev/null +++ b/tests/test_models/test_backbones/test_densenet.py @@ -0,0 +1,95 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.backbones import DenseNet + + +def test_assertion(): + with pytest.raises(AssertionError): + DenseNet(arch='unknown') + + with pytest.raises(AssertionError): + # DenseNet arch dict should include essential_keys, + DenseNet(arch=dict(channels=[2, 3, 4, 5])) + + with pytest.raises(AssertionError): + # DenseNet out_indices should be valid depth. + DenseNet(out_indices=-100) + + +def test_DenseNet(): + + # Test forward + model = DenseNet(arch='121') + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 1024, 7, 7]) + + # Test memory efficient option + model = DenseNet(arch='121', memory_efficient=True) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 1024, 7, 7]) + + # Test drop rate + model = DenseNet(arch='121', drop_rate=0.05) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 1024, 7, 7]) + + # Test forward with multiple outputs + model = DenseNet(arch='121', out_indices=(0, 1, 2, 3)) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 128, 28, 28]) + assert feat[1].shape == torch.Size([1, 256, 14, 14]) + assert feat[2].shape == torch.Size([1, 512, 7, 7]) + assert feat[3].shape == torch.Size([1, 1024, 7, 7]) + + # Test with custom arch + model = DenseNet( + arch={ + 'growth_rate': 20, + 'depths': [4, 8, 12, 16, 20], + 'init_channels': 40, + }, + out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size([1, 60, 28, 28]) + assert feat[1].shape == torch.Size([1, 110, 14, 14]) + assert feat[2].shape == torch.Size([1, 175, 7, 7]) + assert feat[3].shape == torch.Size([1, 247, 3, 3]) + assert feat[4].shape == torch.Size([1, 647, 3, 3]) + + # Test frozen_stages + model = DenseNet(arch='121', out_indices=(0, 1, 2, 3), frozen_stages=2) + model.init_weights() + model.train() + + for i in range(2): + assert not model.stages[i].training + assert not model.transitions[i].training + + for i in range(2, 4): + assert model.stages[i].training + assert model.transitions[i].training diff --git a/tests/test_models/test_backbones/test_efficientformer.py b/tests/test_models/test_backbones/test_efficientformer.py new file mode 100644 index 0000000..88aad52 --- /dev/null +++ b/tests/test_models/test_backbones/test_efficientformer.py @@ -0,0 +1,199 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import torch +from mmcv.cnn import ConvModule +from torch import nn + +from mmcls.models.backbones import EfficientFormer +from mmcls.models.backbones.efficientformer import (AttentionWithBias, Flat, + Meta3D, Meta4D) +from mmcls.models.backbones.poolformer import Pooling + + +class TestEfficientFormer(TestCase): + + def setUp(self): + self.cfg = dict(arch='l1', drop_path_rate=0.1) + self.arch = EfficientFormer.arch_settings['l1'] + self.custom_arch = { + 'layers': [1, 1, 1, 4], + 'embed_dims': [48, 96, 224, 448], + 'downsamples': [False, True, True, True], + 'vit_num': 2, + } + self.custom_cfg = dict(arch=self.custom_arch) + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'Unavailable arch'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + EfficientFormer(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'must have'): + cfg = deepcopy(self.custom_cfg) + cfg['arch'].pop('layers') + EfficientFormer(**cfg) + + # Test vit_num < 0 + with self.assertRaisesRegex(AssertionError, "'vit_num' must"): + cfg = deepcopy(self.custom_cfg) + cfg['arch']['vit_num'] = -1 + EfficientFormer(**cfg) + + # Test vit_num > last stage layers + with self.assertRaisesRegex(AssertionError, "'vit_num' must"): + cfg = deepcopy(self.custom_cfg) + cfg['arch']['vit_num'] = 10 + EfficientFormer(**cfg) + + # Test out_ind + with self.assertRaisesRegex(AssertionError, '"out_indices" must'): + cfg = deepcopy(self.custom_cfg) + cfg['out_indices'] = dict + EfficientFormer(**cfg) + + # Test custom arch + cfg = deepcopy(self.custom_cfg) + model = EfficientFormer(**cfg) + self.assertEqual(len(model.patch_embed), 2) + layers = self.custom_arch['layers'] + downsamples = self.custom_arch['downsamples'] + vit_num = self.custom_arch['vit_num'] + + for i, stage in enumerate(model.network): + if downsamples[i]: + self.assertIsInstance(stage[0], ConvModule) + self.assertEqual(stage[0].conv.stride, (2, 2)) + self.assertTrue(hasattr(stage[0].conv, 'bias')) + self.assertTrue(isinstance(stage[0].bn, nn.BatchNorm2d)) + + if i < len(model.network) - 1: + self.assertIsInstance(stage[-1], Meta4D) + self.assertIsInstance(stage[-1].token_mixer, Pooling) + self.assertEqual(len(stage) - downsamples[i], layers[i]) + elif vit_num > 0: + self.assertIsInstance(stage[-1], Meta3D) + self.assertIsInstance(stage[-1].token_mixer, AttentionWithBias) + self.assertEqual(len(stage) - downsamples[i] - 1, layers[i]) + flat_layer_idx = len(stage) - vit_num - downsamples[i] + self.assertIsInstance(stage[flat_layer_idx], Flat) + count = 0 + for layer in stage: + if isinstance(layer, Meta3D): + count += 1 + self.assertEqual(count, vit_num) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear'), + dict(type='Constant', layer=['LayerScale'], val=1e-4) + ] + model = EfficientFormer(**cfg) + ori_weight = model.patch_embed[0].conv.weight.clone().detach() + ori_ls_weight = model.network[0][-1].ls1.weight.clone().detach() + + model.init_weights() + initialized_weight = model.patch_embed[0].conv.weight + initialized_ls_weight = model.network[0][-1].ls1.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse(torch.allclose(ori_ls_weight, initialized_ls_weight)) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + # test last stage output + cfg = deepcopy(self.cfg) + model = EfficientFormer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 448, 49)) + assert hasattr(model, 'norm3') + assert isinstance(getattr(model, 'norm3'), nn.LayerNorm) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 1, 2, 3) + cfg['reshape_last_feat'] = True + model = EfficientFormer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + # Test out features shape + for dim, stride, out in zip(self.arch['embed_dims'], [1, 2, 4, 8], + outs): + self.assertEqual(out.shape, (1, dim, 56 // stride, 56 // stride)) + + # Test norm layer + for i in range(4): + assert hasattr(model, f'norm{i}') + stage_norm = getattr(model, f'norm{i}') + assert isinstance(stage_norm, nn.GroupNorm) + assert stage_norm.num_groups == 1 + + # Test vit_num == 0 + cfg = deepcopy(self.custom_cfg) + cfg['arch']['vit_num'] = 0 + cfg['out_indices'] = (0, 1, 2, 3) + model = EfficientFormer(**cfg) + for i in range(4): + assert hasattr(model, f'norm{i}') + stage_norm = getattr(model, f'norm{i}') + assert isinstance(stage_norm, nn.GroupNorm) + assert stage_norm.num_groups == 1 + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = EfficientFormer(**cfg) + layers = self.arch['layers'] + for i, block in enumerate(model.network): + expect_prob = 0.2 / (sum(layers) - 1) * i + if hasattr(block, 'drop_path'): + if expect_prob == 0: + self.assertIsInstance(block.drop_path, torch.nn.Identity) + else: + self.assertAlmostEqual(block.drop_path.drop_prob, + expect_prob) + + # test with first stage frozen. + cfg = deepcopy(self.cfg) + frozen_stages = 1 + cfg['frozen_stages'] = frozen_stages + cfg['out_indices'] = (0, 1, 2, 3) + model = EfficientFormer(**cfg) + model.init_weights() + model.train() + + # the patch_embed and first stage should not require grad. + self.assertFalse(model.patch_embed.training) + for param in model.patch_embed.parameters(): + self.assertFalse(param.requires_grad) + for i in range(frozen_stages): + module = model.network[i] + for param in module.parameters(): + self.assertFalse(param.requires_grad) + for param in model.norm0.parameters(): + self.assertFalse(param.requires_grad) + + # the second stage should require grad. + for i in range(frozen_stages + 1, 4): + module = model.network[i] + for param in module.parameters(): + self.assertTrue(param.requires_grad) + if hasattr(model, f'norm{i}'): + norm = getattr(model, f'norm{i}') + for param in norm.parameters(): + self.assertTrue(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_efficientnet.py b/tests/test_models/test_backbones/test_efficientnet.py new file mode 100644 index 0000000..d424b23 --- /dev/null +++ b/tests/test_models/test_backbones/test_efficientnet.py @@ -0,0 +1,144 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import EfficientNet + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_efficientnet_backbone(): + archs = ['b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b7', 'b8', 'es', 'em', 'el'] + with pytest.raises(TypeError): + # pretrained must be a string path + model = EfficientNet() + model.init_weights(pretrained=0) + + with pytest.raises(AssertionError): + # arch must in arc_settings + EfficientNet(arch='others') + + for arch in archs: + with pytest.raises(ValueError): + # frozen_stages must less than 7 + EfficientNet(arch=arch, frozen_stages=12) + + # Test EfficientNet + model = EfficientNet() + model.init_weights() + model.train() + + # Test EfficientNet with first stage frozen + frozen_stages = 7 + model = EfficientNet(arch='b0', frozen_stages=frozen_stages) + model.init_weights() + model.train() + for i in range(frozen_stages): + layer = model.layers[i] + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test EfficientNet with norm eval + model = EfficientNet(norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test EfficientNet forward with 'b0' arch + out_channels = [32, 16, 24, 40, 112, 320, 1280] + model = EfficientNet(arch='b0', out_indices=(0, 1, 2, 3, 4, 5, 6)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size([1, out_channels[0], 112, 112]) + assert feat[1].shape == torch.Size([1, out_channels[1], 112, 112]) + assert feat[2].shape == torch.Size([1, out_channels[2], 56, 56]) + assert feat[3].shape == torch.Size([1, out_channels[3], 28, 28]) + assert feat[4].shape == torch.Size([1, out_channels[4], 14, 14]) + assert feat[5].shape == torch.Size([1, out_channels[5], 7, 7]) + assert feat[6].shape == torch.Size([1, out_channels[6], 7, 7]) + + # Test EfficientNet forward with 'b0' arch and GroupNorm + out_channels = [32, 16, 24, 40, 112, 320, 1280] + model = EfficientNet( + arch='b0', + out_indices=(0, 1, 2, 3, 4, 5, 6), + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size([1, out_channels[0], 112, 112]) + assert feat[1].shape == torch.Size([1, out_channels[1], 112, 112]) + assert feat[2].shape == torch.Size([1, out_channels[2], 56, 56]) + assert feat[3].shape == torch.Size([1, out_channels[3], 28, 28]) + assert feat[4].shape == torch.Size([1, out_channels[4], 14, 14]) + assert feat[5].shape == torch.Size([1, out_channels[5], 7, 7]) + assert feat[6].shape == torch.Size([1, out_channels[6], 7, 7]) + + # Test EfficientNet forward with 'es' arch + out_channels = [32, 24, 32, 48, 144, 192, 1280] + model = EfficientNet(arch='es', out_indices=(0, 1, 2, 3, 4, 5, 6)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size([1, out_channels[0], 112, 112]) + assert feat[1].shape == torch.Size([1, out_channels[1], 112, 112]) + assert feat[2].shape == torch.Size([1, out_channels[2], 56, 56]) + assert feat[3].shape == torch.Size([1, out_channels[3], 28, 28]) + assert feat[4].shape == torch.Size([1, out_channels[4], 14, 14]) + assert feat[5].shape == torch.Size([1, out_channels[5], 7, 7]) + assert feat[6].shape == torch.Size([1, out_channels[6], 7, 7]) + + # Test EfficientNet forward with 'es' arch and GroupNorm + out_channels = [32, 24, 32, 48, 144, 192, 1280] + model = EfficientNet( + arch='es', + out_indices=(0, 1, 2, 3, 4, 5, 6), + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size([1, out_channels[0], 112, 112]) + assert feat[1].shape == torch.Size([1, out_channels[1], 112, 112]) + assert feat[2].shape == torch.Size([1, out_channels[2], 56, 56]) + assert feat[3].shape == torch.Size([1, out_channels[3], 28, 28]) + assert feat[4].shape == torch.Size([1, out_channels[4], 14, 14]) + assert feat[5].shape == torch.Size([1, out_channels[5], 7, 7]) + assert feat[6].shape == torch.Size([1, out_channels[6], 7, 7]) diff --git a/tests/test_models/test_backbones/test_hornet.py b/tests/test_models/test_backbones/test_hornet.py new file mode 100644 index 0000000..5fdd84b --- /dev/null +++ b/tests/test_models/test_backbones/test_hornet.py @@ -0,0 +1,174 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from copy import deepcopy +from itertools import chain +from unittest import TestCase + +import pytest +import torch +from mmcv.utils import digit_version +from mmcv.utils.parrots_wrapper import _BatchNorm +from torch import nn + +from mmcls.models.backbones import HorNet + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +@pytest.mark.skipif( + digit_version(torch.__version__) < digit_version('1.7.0'), + reason='torch.fft is not available before 1.7.0') +class TestHorNet(TestCase): + + def setUp(self): + self.cfg = dict( + arch='t', drop_path_rate=0.1, gap_before_final_norm=False) + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + HorNet(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'depths': [1, 1, 1, 1], + 'orders': [1, 1, 1, 1], + } + HorNet(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + base_dim = 64 + depths = [2, 3, 18, 2] + embed_dims = [base_dim, base_dim * 2, base_dim * 4, base_dim * 8] + cfg['arch'] = { + 'base_dim': + base_dim, + 'depths': + depths, + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=14, w=8), + dict(type='GF', h=7, w=4) + ], + } + model = HorNet(**cfg) + + for i in range(len(depths)): + stage = model.stages[i] + self.assertEqual(stage[-1].out_channels, embed_dims[i]) + self.assertEqual(len(stage), depths[i]) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = HorNet(**cfg) + ori_weight = model.downsample_layers[0][0].weight.clone().detach() + + model.init_weights() + initialized_weight = model.downsample_layers[0][0].weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + def test_forward(self): + imgs = torch.randn(3, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = HorNet(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (3, 512, 7, 7)) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 1, 2, 3) + model = HorNet(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for emb_size, stride, out in zip([64, 128, 256, 512], [1, 2, 4, 8], + outs): + self.assertEqual(out.shape, + (3, emb_size, 56 // stride, 56 // stride)) + + # test with dynamic input shape + imgs1 = torch.randn(3, 3, 224, 224) + imgs2 = torch.randn(3, 3, 256, 256) + imgs3 = torch.randn(3, 3, 256, 309) + cfg = deepcopy(self.cfg) + model = HorNet(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + expect_feat_shape = (math.floor(imgs.shape[2] / 32), + math.floor(imgs.shape[3] / 32)) + self.assertEqual(feat.shape, (3, 512, *expect_feat_shape)) + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = HorNet(**cfg) + depths = model.arch_settings['depths'] + stages = model.stages + blocks = chain(*[stage for stage in stages]) + total_depth = sum(depths) + dpr = [ + x.item() + for x in torch.linspace(0, cfg['drop_path_rate'], total_depth) + ] + for i, (block, expect_prob) in enumerate(zip(blocks, dpr)): + if expect_prob == 0: + assert isinstance(block.drop_path, nn.Identity) + else: + self.assertAlmostEqual(block.drop_path.drop_prob, expect_prob) + + # test VAN with first stage frozen. + cfg = deepcopy(self.cfg) + frozen_stages = 0 + cfg['frozen_stages'] = frozen_stages + cfg['out_indices'] = (0, 1, 2, 3) + model = HorNet(**cfg) + model.init_weights() + model.train() + + # the patch_embed and first stage should not require grad. + for i in range(frozen_stages + 1): + down = model.downsample_layers[i] + for param in down.parameters(): + self.assertFalse(param.requires_grad) + blocks = model.stages[i] + for param in blocks.parameters(): + self.assertFalse(param.requires_grad) + + # the second stage should require grad. + for i in range(frozen_stages + 1, 4): + down = model.downsample_layers[i] + for param in down.parameters(): + self.assertTrue(param.requires_grad) + blocks = model.stages[i] + for param in blocks.parameters(): + self.assertTrue(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_hrnet.py b/tests/test_models/test_backbones/test_hrnet.py new file mode 100644 index 0000000..cb9909a --- /dev/null +++ b/tests/test_models/test_backbones/test_hrnet.py @@ -0,0 +1,93 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import HRNet + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +@pytest.mark.parametrize('base_channels', [18, 30, 32, 40, 44, 48, 64]) +def test_hrnet_arch_zoo(base_channels): + + cfg_ori = dict(arch=f'w{base_channels}') + + # Test HRNet model with input size of 224 + model = HRNet(**cfg_ori) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(3, 3, 224, 224) + outs = model(imgs) + out_channels = base_channels + out_size = 56 + assert isinstance(outs, tuple) + for out in outs: + assert out.shape == (3, out_channels, out_size, out_size) + out_channels = out_channels * 2 + out_size = out_size // 2 + + +def test_hrnet_custom_arch(): + + cfg_ori = dict( + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BOTTLENECK', + num_blocks=(4, 4, 2), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 3, 4, 4), + num_channels=(32, 64, 152, 256)), + ), ) + + # Test HRNet model with input size of 224 + model = HRNet(**cfg_ori) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(3, 3, 224, 224) + outs = model(imgs) + out_channels = (32, 64, 152, 256) + out_size = 56 + assert isinstance(outs, tuple) + for out, out_channel in zip(outs, out_channels): + assert out.shape == (3, out_channel, out_size, out_size) + out_size = out_size // 2 diff --git a/tests/test_models/test_backbones/test_mlp_mixer.py b/tests/test_models/test_backbones/test_mlp_mixer.py new file mode 100644 index 0000000..d065a68 --- /dev/null +++ b/tests/test_models/test_backbones/test_mlp_mixer.py @@ -0,0 +1,119 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import MlpMixer + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +class TestMLPMixer(TestCase): + + def setUp(self): + self.cfg = dict( + arch='b', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]) + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + MlpMixer(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 24, + 'num_layers': 16, + 'tokens_mlp_dims': 4096 + } + MlpMixer(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 128, + 'num_layers': 6, + 'tokens_mlp_dims': 256, + 'channels_mlp_dims': 1024 + } + model = MlpMixer(**cfg) + self.assertEqual(model.embed_dims, 128) + self.assertEqual(model.num_layers, 6) + for layer in model.layers: + self.assertEqual(layer.token_mix.feedforward_channels, 256) + self.assertEqual(layer.channel_mix.feedforward_channels, 1024) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = MlpMixer(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + def test_forward(self): + imgs = torch.randn(3, 3, 224, 224) + + # test forward with single out indices + cfg = deepcopy(self.cfg) + model = MlpMixer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (3, 768, 196)) + + # test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = [-3, -2, -1] + model = MlpMixer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 3) + for feat in outs: + self.assertEqual(feat.shape, (3, 768, 196)) + + # test with invalid input shape + imgs2 = torch.randn(3, 3, 256, 256) + cfg = deepcopy(self.cfg) + model = MlpMixer(**cfg) + with self.assertRaisesRegex(AssertionError, 'dynamic input shape.'): + model(imgs2) diff --git a/tests/test_models/test_backbones/test_mobilenet_v2.py b/tests/test_models/test_backbones/test_mobilenet_v2.py new file mode 100644 index 0000000..9ea7557 --- /dev/null +++ b/tests/test_models/test_backbones/test_mobilenet_v2.py @@ -0,0 +1,259 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import MobileNetV2 +from mmcls.models.backbones.mobilenet_v2 import InvertedResidual + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (InvertedResidual, )): + return True + return False + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_mobilenetv2_invertedresidual(): + + with pytest.raises(AssertionError): + # stride must be in [1, 2] + InvertedResidual(16, 24, stride=3, expand_ratio=6) + + # Test InvertedResidual with checkpoint forward, stride=1 + block = InvertedResidual(16, 24, stride=1, expand_ratio=6) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + # Test InvertedResidual with expand_ratio=1 + block = InvertedResidual(16, 16, stride=1, expand_ratio=1) + assert len(block.conv) == 2 + + # Test InvertedResidual with use_res_connect + block = InvertedResidual(16, 16, stride=1, expand_ratio=6) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert block.use_res_connect is True + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual with checkpoint forward, stride=2 + block = InvertedResidual(16, 24, stride=2, expand_ratio=6) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 28, 28)) + + # Test InvertedResidual with checkpoint forward + block = InvertedResidual(16, 24, stride=1, expand_ratio=6, with_cp=True) + assert block.with_cp + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + # Test InvertedResidual with act_cfg=dict(type='ReLU') + block = InvertedResidual( + 16, 24, stride=1, expand_ratio=6, act_cfg=dict(type='ReLU')) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + +def test_mobilenetv2_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = MobileNetV2() + model.init_weights(pretrained=0) + + with pytest.raises(ValueError): + # frozen_stages must in range(-1, 8) + MobileNetV2(frozen_stages=8) + + with pytest.raises(ValueError): + # out_indices in range(0, 8) + MobileNetV2(out_indices=[8]) + + # Test MobileNetV2 with first stage frozen + frozen_stages = 1 + model = MobileNetV2(frozen_stages=frozen_stages) + model.init_weights() + model.train() + + for mod in model.conv1.modules(): + for param in mod.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test MobileNetV2 with norm_eval=True + model = MobileNetV2(norm_eval=True) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), False) + + # Test MobileNetV2 forward with widen_factor=1.0 + model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8)) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 8 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) + assert feat[7].shape == torch.Size((1, 1280, 7, 7)) + + # Test MobileNetV2 forward with widen_factor=0.5 + model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 8, 112, 112)) + assert feat[1].shape == torch.Size((1, 16, 56, 56)) + assert feat[2].shape == torch.Size((1, 16, 28, 28)) + assert feat[3].shape == torch.Size((1, 32, 14, 14)) + assert feat[4].shape == torch.Size((1, 48, 14, 14)) + assert feat[5].shape == torch.Size((1, 80, 7, 7)) + assert feat[6].shape == torch.Size((1, 160, 7, 7)) + + # Test MobileNetV2 forward with widen_factor=2.0 + model = MobileNetV2(widen_factor=2.0) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 2560, 7, 7)) + + # Test MobileNetV2 forward with out_indices=None + model = MobileNetV2(widen_factor=1.0) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 1280, 7, 7)) + + # Test MobileNetV2 forward with dict(type='ReLU') + model = MobileNetV2( + widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) + + # Test MobileNetV2 with BatchNorm forward + model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) + + # Test MobileNetV2 with GroupNorm forward + model = MobileNetV2( + widen_factor=1.0, + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), + out_indices=range(0, 7)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) + + # Test MobileNetV2 with layers 1, 3, 5 out forward + model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 32, 28, 28)) + assert feat[2].shape == torch.Size((1, 96, 14, 14)) + + # Test MobileNetV2 with checkpoint forward + model = MobileNetV2( + widen_factor=1.0, with_cp=True, out_indices=range(0, 7)) + for m in model.modules(): + if is_block(m): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) diff --git a/tests/test_models/test_backbones/test_mobilenet_v3.py b/tests/test_models/test_backbones/test_mobilenet_v3.py new file mode 100644 index 0000000..b122dbd --- /dev/null +++ b/tests/test_models/test_backbones/test_mobilenet_v3.py @@ -0,0 +1,175 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import MobileNetV3 +from mmcls.models.utils import InvertedResidual + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_mobilenetv3_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = MobileNetV3() + model.init_weights(pretrained=0) + + with pytest.raises(AssertionError): + # arch must in [small, large] + MobileNetV3(arch='others') + + with pytest.raises(ValueError): + # frozen_stages must less than 13 when arch is small + MobileNetV3(arch='small', frozen_stages=13) + + with pytest.raises(ValueError): + # frozen_stages must less than 17 when arch is large + MobileNetV3(arch='large', frozen_stages=17) + + with pytest.raises(ValueError): + # max out_indices must less than 13 when arch is small + MobileNetV3(arch='small', out_indices=(13, )) + + with pytest.raises(ValueError): + # max out_indices must less than 17 when arch is large + MobileNetV3(arch='large', out_indices=(17, )) + + # Test MobileNetV3 + model = MobileNetV3() + model.init_weights() + model.train() + + # Test MobileNetV3 with first stage frozen + frozen_stages = 1 + model = MobileNetV3(frozen_stages=frozen_stages) + model.init_weights() + model.train() + for i in range(0, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test MobileNetV3 with norm eval + model = MobileNetV3(norm_eval=True, out_indices=range(0, 12)) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test MobileNetV3 forward with small arch + model = MobileNetV3(out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 13 + assert feat[0].shape == torch.Size([1, 16, 112, 112]) + assert feat[1].shape == torch.Size([1, 16, 56, 56]) + assert feat[2].shape == torch.Size([1, 24, 28, 28]) + assert feat[3].shape == torch.Size([1, 24, 28, 28]) + assert feat[4].shape == torch.Size([1, 40, 14, 14]) + assert feat[5].shape == torch.Size([1, 40, 14, 14]) + assert feat[6].shape == torch.Size([1, 40, 14, 14]) + assert feat[7].shape == torch.Size([1, 48, 14, 14]) + assert feat[8].shape == torch.Size([1, 48, 14, 14]) + assert feat[9].shape == torch.Size([1, 96, 7, 7]) + assert feat[10].shape == torch.Size([1, 96, 7, 7]) + assert feat[11].shape == torch.Size([1, 96, 7, 7]) + assert feat[12].shape == torch.Size([1, 576, 7, 7]) + + # Test MobileNetV3 forward with small arch and GroupNorm + model = MobileNetV3( + out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 13 + assert feat[0].shape == torch.Size([1, 16, 112, 112]) + assert feat[1].shape == torch.Size([1, 16, 56, 56]) + assert feat[2].shape == torch.Size([1, 24, 28, 28]) + assert feat[3].shape == torch.Size([1, 24, 28, 28]) + assert feat[4].shape == torch.Size([1, 40, 14, 14]) + assert feat[5].shape == torch.Size([1, 40, 14, 14]) + assert feat[6].shape == torch.Size([1, 40, 14, 14]) + assert feat[7].shape == torch.Size([1, 48, 14, 14]) + assert feat[8].shape == torch.Size([1, 48, 14, 14]) + assert feat[9].shape == torch.Size([1, 96, 7, 7]) + assert feat[10].shape == torch.Size([1, 96, 7, 7]) + assert feat[11].shape == torch.Size([1, 96, 7, 7]) + assert feat[12].shape == torch.Size([1, 576, 7, 7]) + + # Test MobileNetV3 forward with large arch + model = MobileNetV3( + arch='large', + out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 17 + assert feat[0].shape == torch.Size([1, 16, 112, 112]) + assert feat[1].shape == torch.Size([1, 16, 112, 112]) + assert feat[2].shape == torch.Size([1, 24, 56, 56]) + assert feat[3].shape == torch.Size([1, 24, 56, 56]) + assert feat[4].shape == torch.Size([1, 40, 28, 28]) + assert feat[5].shape == torch.Size([1, 40, 28, 28]) + assert feat[6].shape == torch.Size([1, 40, 28, 28]) + assert feat[7].shape == torch.Size([1, 80, 14, 14]) + assert feat[8].shape == torch.Size([1, 80, 14, 14]) + assert feat[9].shape == torch.Size([1, 80, 14, 14]) + assert feat[10].shape == torch.Size([1, 80, 14, 14]) + assert feat[11].shape == torch.Size([1, 112, 14, 14]) + assert feat[12].shape == torch.Size([1, 112, 14, 14]) + assert feat[13].shape == torch.Size([1, 160, 7, 7]) + assert feat[14].shape == torch.Size([1, 160, 7, 7]) + assert feat[15].shape == torch.Size([1, 160, 7, 7]) + assert feat[16].shape == torch.Size([1, 960, 7, 7]) + + # Test MobileNetV3 forward with large arch + model = MobileNetV3(arch='large', out_indices=(0, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 16, 112, 112]) + + # Test MobileNetV3 with checkpoint forward + model = MobileNetV3(with_cp=True) + for m in model.modules(): + if isinstance(m, InvertedResidual): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 576, 7, 7]) diff --git a/tests/test_models/test_backbones/test_mvit.py b/tests/test_models/test_backbones/test_mvit.py new file mode 100644 index 0000000..a37e93f --- /dev/null +++ b/tests/test_models/test_backbones/test_mvit.py @@ -0,0 +1,185 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import torch + +from mmcls.models.backbones import MViT + + +class TestMViT(TestCase): + + def setUp(self): + self.cfg = dict(arch='tiny', img_size=224, drop_path_rate=0.1) + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + MViT(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 96, + 'num_layers': 10, + } + MViT(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + embed_dims = 96 + num_layers = 10 + num_heads = 1 + downscale_indices = (2, 5, 7) + cfg['arch'] = { + 'embed_dims': embed_dims, + 'num_layers': num_layers, + 'num_heads': num_heads, + 'downscale_indices': downscale_indices + } + model = MViT(**cfg) + self.assertEqual(len(model.blocks), num_layers) + for i, block in enumerate(model.blocks): + if i in downscale_indices: + num_heads *= 2 + embed_dims *= 2 + self.assertEqual(block.out_dims, embed_dims) + self.assertEqual(block.attn.num_heads, num_heads) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['use_abs_pos_embed'] = True + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = MViT(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + # The pos_embed is all zero before initialize + self.assertTrue(torch.allclose(model.pos_embed, torch.tensor(0.))) + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse(torch.allclose(model.pos_embed, torch.tensor(0.))) + self.assertFalse( + torch.allclose(model.blocks[0].attn.rel_pos_h, torch.tensor(0.))) + self.assertFalse( + torch.allclose(model.blocks[0].attn.rel_pos_w, torch.tensor(0.))) + + # test rel_pos_zero_init + cfg = deepcopy(self.cfg) + cfg['rel_pos_zero_init'] = True + model = MViT(**cfg) + model.init_weights() + self.assertTrue( + torch.allclose(model.blocks[0].attn.rel_pos_h, torch.tensor(0.))) + self.assertTrue( + torch.allclose(model.blocks[0].attn.rel_pos_w, torch.tensor(0.))) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = MViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 768, 7, 7)) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_scales'] = (0, 1, 2, 3) + model = MViT(**cfg) + model.init_weights() + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for stride, out in zip([1, 2, 4, 8], outs): + self.assertEqual(out.shape, + (1, 96 * stride, 56 // stride, 56 // stride)) + + # test dim_mul_in_attention = False + cfg = deepcopy(self.cfg) + cfg['out_scales'] = (0, 1, 2, 3) + cfg['dim_mul_in_attention'] = False + model = MViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for dim_mul, stride, out in zip([2, 4, 8, 8], [1, 2, 4, 8], outs): + self.assertEqual(out.shape, + (1, 96 * dim_mul, 56 // stride, 56 // stride)) + + # test rel_pos_spatial = False + cfg = deepcopy(self.cfg) + cfg['out_scales'] = (0, 1, 2, 3) + cfg['rel_pos_spatial'] = False + cfg['img_size'] = None + model = MViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for stride, out in zip([1, 2, 4, 8], outs): + self.assertEqual(out.shape, + (1, 96 * stride, 56 // stride, 56 // stride)) + + # test residual_pooling = False + cfg = deepcopy(self.cfg) + cfg['out_scales'] = (0, 1, 2, 3) + cfg['residual_pooling'] = False + model = MViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for stride, out in zip([1, 2, 4, 8], outs): + self.assertEqual(out.shape, + (1, 96 * stride, 56 // stride, 56 // stride)) + + # test use_abs_pos_embed = True + cfg = deepcopy(self.cfg) + cfg['out_scales'] = (0, 1, 2, 3) + cfg['use_abs_pos_embed'] = True + model = MViT(**cfg) + model.init_weights() + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for stride, out in zip([1, 2, 4, 8], outs): + self.assertEqual(out.shape, + (1, 96 * stride, 56 // stride, 56 // stride)) + + # test dynamic inputs shape + cfg = deepcopy(self.cfg) + cfg['out_scales'] = (0, 1, 2, 3) + model = MViT(**cfg) + imgs = torch.randn(1, 3, 352, 260) + h_resolution = (352 + 2 * 3 - 7) // 4 + 1 + w_resolution = (260 + 2 * 3 - 7) // 4 + 1 + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + expect_h = h_resolution + expect_w = w_resolution + for i, out in enumerate(outs): + self.assertEqual(out.shape, (1, 96 * 2**i, expect_h, expect_w)) + expect_h = (expect_h + 2 * 1 - 3) // 2 + 1 + expect_w = (expect_w + 2 * 1 - 3) // 2 + 1 + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = MViT(**cfg) + for i, block in enumerate(model.blocks): + expect_prob = 0.2 / (model.num_layers - 1) * i + if expect_prob > 0: + self.assertAlmostEqual(block.drop_path.drop_prob, expect_prob) diff --git a/tests/test_models/test_backbones/test_poolformer.py b/tests/test_models/test_backbones/test_poolformer.py new file mode 100644 index 0000000..8e60b81 --- /dev/null +++ b/tests/test_models/test_backbones/test_poolformer.py @@ -0,0 +1,143 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import torch + +from mmcls.models.backbones import PoolFormer +from mmcls.models.backbones.poolformer import PoolFormerBlock + + +class TestPoolFormer(TestCase): + + def setUp(self): + arch = 's12' + self.cfg = dict(arch=arch, drop_path_rate=0.1) + self.arch = PoolFormer.arch_settings[arch] + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'Unavailable arch'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + PoolFormer(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'must have "layers"'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 96, + 'num_heads': [3, 6, 12, 16], + } + PoolFormer(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + layers = [2, 2, 4, 2] + embed_dims = [6, 12, 6, 12] + mlp_ratios = [2, 3, 4, 4] + layer_scale_init_value = 1e-4 + cfg['arch'] = dict( + layers=layers, + embed_dims=embed_dims, + mlp_ratios=mlp_ratios, + layer_scale_init_value=layer_scale_init_value, + ) + model = PoolFormer(**cfg) + for i, stage in enumerate(model.network): + if not isinstance(stage, PoolFormerBlock): + continue + self.assertEqual(len(stage), layers[i]) + self.assertEqual(stage[0].mlp.fc1.in_channels, embed_dims[i]) + self.assertEqual(stage[0].mlp.fc1.out_channels, + embed_dims[i] * mlp_ratios[i]) + self.assertTrue( + torch.allclose(stage[0].layer_scale_1, + torch.tensor(layer_scale_init_value))) + self.assertTrue( + torch.allclose(stage[0].layer_scale_2, + torch.tensor(layer_scale_init_value))) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = PoolFormer(**cfg) + ori_weight = model.patch_embed.proj.weight.clone().detach() + + model.init_weights() + initialized_weight = model.patch_embed.proj.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = PoolFormer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 512, 7, 7)) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 2, 4, 6) + model = PoolFormer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for dim, stride, out in zip(self.arch['embed_dims'], [1, 2, 4, 8], + outs): + self.assertEqual(out.shape, (1, dim, 56 // stride, 56 // stride)) + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = PoolFormer(**cfg) + layers = self.arch['layers'] + for i, block in enumerate(model.network): + expect_prob = 0.2 / (sum(layers) - 1) * i + if hasattr(block, 'drop_path'): + if expect_prob == 0: + self.assertIsInstance(block.drop_path, torch.nn.Identity) + else: + self.assertAlmostEqual(block.drop_path.drop_prob, + expect_prob) + + # test with first stage frozen. + cfg = deepcopy(self.cfg) + frozen_stages = 1 + cfg['frozen_stages'] = frozen_stages + cfg['out_indices'] = (0, 2, 4, 6) + model = PoolFormer(**cfg) + model.init_weights() + model.train() + + # the patch_embed and first stage should not require grad. + self.assertFalse(model.patch_embed.training) + for param in model.patch_embed.parameters(): + self.assertFalse(param.requires_grad) + for i in range(frozen_stages): + module = model.network[i] + for param in module.parameters(): + self.assertFalse(param.requires_grad) + for param in model.norm0.parameters(): + self.assertFalse(param.requires_grad) + + # the second stage should require grad. + for i in range(frozen_stages + 1, 7): + module = model.network[i] + for param in module.parameters(): + self.assertTrue(param.requires_grad) + if hasattr(model, f'norm{i}'): + norm = getattr(model, f'norm{i}') + for param in norm.parameters(): + self.assertTrue(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_regnet.py b/tests/test_models/test_backbones/test_regnet.py new file mode 100644 index 0000000..67de1c8 --- /dev/null +++ b/tests/test_models/test_backbones/test_regnet.py @@ -0,0 +1,94 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.backbones import RegNet + +regnet_test_data = [ + ('regnetx_400mf', + dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, + bot_mul=1.0), [32, 64, 160, 384]), + ('regnetx_800mf', + dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, + bot_mul=1.0), [64, 128, 288, 672]), + ('regnetx_1.6gf', + dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, + bot_mul=1.0), [72, 168, 408, 912]), + ('regnetx_3.2gf', + dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, + bot_mul=1.0), [96, 192, 432, 1008]), + ('regnetx_4.0gf', + dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, + bot_mul=1.0), [80, 240, 560, 1360]), + ('regnetx_6.4gf', + dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, + bot_mul=1.0), [168, 392, 784, 1624]), + ('regnetx_8.0gf', + dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, + bot_mul=1.0), [80, 240, 720, 1920]), + ('regnetx_12gf', + dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, + bot_mul=1.0), [224, 448, 896, 2240]), +] + + +@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data) +def test_regnet_backbone(arch_name, arch, out_channels): + with pytest.raises(AssertionError): + # ResNeXt depth should be in [50, 101, 152] + RegNet(arch_name + '233') + + # output the last feature map + model = RegNet(arch_name) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == (1, out_channels[-1], 7, 7) + + # output feature map of all stages + model = RegNet(arch_name, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, out_channels[0], 56, 56) + assert feat[1].shape == (1, out_channels[1], 28, 28) + assert feat[2].shape == (1, out_channels[2], 14, 14) + assert feat[3].shape == (1, out_channels[3], 7, 7) + + +@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data) +def test_custom_arch(arch_name, arch, out_channels): + # output the last feature map + model = RegNet(arch) + model.init_weights() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == (1, out_channels[-1], 7, 7) + + # output feature map of all stages + model = RegNet(arch, out_indices=(0, 1, 2, 3)) + model.init_weights() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, out_channels[0], 56, 56) + assert feat[1].shape == (1, out_channels[1], 28, 28) + assert feat[2].shape == (1, out_channels[2], 14, 14) + assert feat[3].shape == (1, out_channels[3], 7, 7) + + +def test_exception(): + # arch must be a str or dict + with pytest.raises(TypeError): + _ = RegNet(50) diff --git a/tests/test_models/test_backbones/test_repmlp.py b/tests/test_models/test_backbones/test_repmlp.py new file mode 100644 index 0000000..dcab2cf --- /dev/null +++ b/tests/test_models/test_backbones/test_repmlp.py @@ -0,0 +1,172 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import tempfile +from copy import deepcopy +from unittest import TestCase + +import torch +from mmcv.runner import load_checkpoint, save_checkpoint + +from mmcls.models.backbones import RepMLPNet + + +class TestRepMLP(TestCase): + + def setUp(self): + # default model setting + self.cfg = dict( + arch='b', + img_size=224, + out_indices=(3, ), + reparam_conv_kernels=(1, 3), + final_norm=True) + + # default model setting and output stage channels + self.model_forward_settings = [ + dict(model_name='B', out_sizes=(96, 192, 384, 768)), + ] + + # temp ckpt path + self.ckpt_path = os.path.join(tempfile.gettempdir(), 'ckpt.pth') + + def test_arch(self): + # Test invalid arch data type + with self.assertRaisesRegex(AssertionError, 'arch needs a dict'): + cfg = deepcopy(self.cfg) + cfg['arch'] = [96, 192, 384, 768] + RepMLPNet(**cfg) + + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'A' + RepMLPNet(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'channels': [96, 192, 384, 768], + 'depths': [2, 2, 12, 2] + } + RepMLPNet(**cfg) + + # test len(arch['depths']) equals to len(arch['channels']) + # equals to len(arch['sharesets_nums']) + with self.assertRaisesRegex(AssertionError, 'Length of setting'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'channels': [96, 192, 384, 768], + 'depths': [2, 2, 12, 2], + 'sharesets_nums': [1, 4, 32] + } + RepMLPNet(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + channels = [96, 192, 384, 768] + depths = [2, 2, 12, 2] + sharesets_nums = [1, 4, 32, 128] + cfg['arch'] = { + 'channels': channels, + 'depths': depths, + 'sharesets_nums': sharesets_nums + } + cfg['out_indices'] = (0, 1, 2, 3) + model = RepMLPNet(**cfg) + for i, stage in enumerate(model.stages): + self.assertEqual(len(stage), depths[i]) + self.assertEqual(stage[0].repmlp_block.channels, channels[i]) + self.assertEqual(stage[0].repmlp_block.deploy, False) + self.assertEqual(stage[0].repmlp_block.num_sharesets, + sharesets_nums[i]) + + def test_init(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = RepMLPNet(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + cfg = deepcopy(self.cfg) + model = RepMLPNet(**cfg) + feat = model(imgs) + self.assertTrue(isinstance(feat, tuple)) + self.assertEqual(len(feat), 1) + self.assertTrue(isinstance(feat[0], torch.Tensor)) + self.assertEqual(feat[0].shape, torch.Size((1, 768, 7, 7))) + + imgs = torch.randn(1, 3, 256, 256) + with self.assertRaisesRegex(AssertionError, "doesn't support dynamic"): + model(imgs) + + # Test RepMLPNet model forward + for model_test_setting in self.model_forward_settings: + model = RepMLPNet( + model_test_setting['model_name'], + out_indices=(0, 1, 2, 3), + final_norm=False) + model.init_weights() + + model.train() + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual( + feat[0].shape, + torch.Size((1, model_test_setting['out_sizes'][1], 28, 28))) + self.assertEqual( + feat[1].shape, + torch.Size((1, model_test_setting['out_sizes'][2], 14, 14))) + self.assertEqual( + feat[2].shape, + torch.Size((1, model_test_setting['out_sizes'][3], 7, 7))) + self.assertEqual( + feat[3].shape, + torch.Size((1, model_test_setting['out_sizes'][3], 7, 7))) + + def test_deploy_(self): + # Test output before and load from deploy checkpoint + imgs = torch.randn((1, 3, 224, 224)) + cfg = dict( + arch='b', out_indices=( + 1, + 3, + ), reparam_conv_kernels=(1, 3, 5)) + model = RepMLPNet(**cfg) + + model.eval() + feats = model(imgs) + model.switch_to_deploy() + for m in model.modules(): + if hasattr(m, 'deploy'): + self.assertTrue(m.deploy) + model.eval() + feats_ = model(imgs) + assert len(feats) == len(feats_) + for i in range(len(feats)): + self.assertTrue( + torch.allclose( + feats[i].sum(), feats_[i].sum(), rtol=0.1, atol=0.1)) + + cfg['deploy'] = True + model_deploy = RepMLPNet(**cfg) + model_deploy.eval() + save_checkpoint(model, self.ckpt_path) + load_checkpoint(model_deploy, self.ckpt_path, strict=True) + feats__ = model_deploy(imgs) + + assert len(feats_) == len(feats__) + for i in range(len(feats)): + self.assertTrue(torch.allclose(feats__[i], feats_[i])) diff --git a/tests/test_models/test_backbones/test_repvgg.py b/tests/test_models/test_backbones/test_repvgg.py new file mode 100644 index 0000000..beecdff --- /dev/null +++ b/tests/test_models/test_backbones/test_repvgg.py @@ -0,0 +1,350 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import tempfile + +import pytest +import torch +from mmcv.runner import load_checkpoint, save_checkpoint +from torch import nn +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import RepVGG +from mmcls.models.backbones.repvgg import RepVGGBlock +from mmcls.models.utils import SELayer + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def is_repvgg_block(modules): + if isinstance(modules, RepVGGBlock): + return True + return False + + +def test_repvgg_repvggblock(): + # Test RepVGGBlock with in_channels != out_channels, stride = 1 + block = RepVGGBlock(5, 10, stride=1) + block.eval() + x = torch.randn(1, 5, 16, 16) + x_out_not_deploy = block(x) + assert block.branch_norm is None + assert not hasattr(block, 'branch_reparam') + assert hasattr(block, 'branch_1x1') + assert hasattr(block, 'branch_3x3') + assert hasattr(block, 'branch_norm') + assert block.se_cfg is None + assert x_out_not_deploy.shape == torch.Size((1, 10, 16, 16)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 10, 16, 16)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with in_channels == out_channels, stride = 1 + block = RepVGGBlock(12, 12, stride=1) + block.eval() + x = torch.randn(1, 12, 8, 8) + x_out_not_deploy = block(x) + assert isinstance(block.branch_norm, nn.BatchNorm2d) + assert not hasattr(block, 'branch_reparam') + assert x_out_not_deploy.shape == torch.Size((1, 12, 8, 8)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 12, 8, 8)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with in_channels == out_channels, stride = 2 + block = RepVGGBlock(16, 16, stride=2) + block.eval() + x = torch.randn(1, 16, 8, 8) + x_out_not_deploy = block(x) + assert block.branch_norm is None + assert x_out_not_deploy.shape == torch.Size((1, 16, 4, 4)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 16, 4, 4)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with padding == dilation == 2 + block = RepVGGBlock(14, 14, stride=1, padding=2, dilation=2) + block.eval() + x = torch.randn(1, 14, 16, 16) + x_out_not_deploy = block(x) + assert isinstance(block.branch_norm, nn.BatchNorm2d) + assert x_out_not_deploy.shape == torch.Size((1, 14, 16, 16)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 14, 16, 16)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with groups = 2 + block = RepVGGBlock(4, 4, stride=1, groups=2) + block.eval() + x = torch.randn(1, 4, 5, 6) + x_out_not_deploy = block(x) + assert x_out_not_deploy.shape == torch.Size((1, 4, 5, 6)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 4, 5, 6)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with se + se_cfg = dict(ratio=4, divisor=1) + block = RepVGGBlock(18, 18, stride=1, se_cfg=se_cfg) + block.train() + x = torch.randn(1, 18, 5, 5) + x_out_not_deploy = block(x) + assert isinstance(block.se_layer, SELayer) + assert x_out_not_deploy.shape == torch.Size((1, 18, 5, 5)) + + # Test RepVGGBlock with checkpoint forward + block = RepVGGBlock(24, 24, stride=1, with_cp=True) + assert block.with_cp + x = torch.randn(1, 24, 7, 7) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 7, 7)) + + # Test RepVGGBlock with deploy == True + block = RepVGGBlock(8, 8, stride=1, deploy=True) + assert isinstance(block.branch_reparam, nn.Conv2d) + assert not hasattr(block, 'branch_3x3') + assert not hasattr(block, 'branch_1x1') + assert not hasattr(block, 'branch_norm') + x = torch.randn(1, 8, 16, 16) + x_out = block(x) + assert x_out.shape == torch.Size((1, 8, 16, 16)) + + +def test_repvgg_backbone(): + with pytest.raises(TypeError): + # arch must be str or dict + RepVGG(arch=[4, 6, 16, 1]) + + with pytest.raises(AssertionError): + # arch must in arch_settings + RepVGG(arch='A3') + + with pytest.raises(KeyError): + # arch must have num_blocks and width_factor + arch = dict(num_blocks=[2, 4, 14, 1]) + RepVGG(arch=arch) + + # len(arch['num_blocks']) == len(arch['width_factor']) + # == len(strides) == len(dilations) + with pytest.raises(AssertionError): + arch = dict(num_blocks=[2, 4, 14, 1], width_factor=[0.75, 0.75, 0.75]) + RepVGG(arch=arch) + + # len(strides) must equal to 4 + with pytest.raises(AssertionError): + RepVGG('A0', strides=(1, 1, 1)) + + # len(dilations) must equal to 4 + with pytest.raises(AssertionError): + RepVGG('A0', strides=(1, 1, 1, 1), dilations=(1, 1, 2)) + + # max(out_indices) < len(arch['num_blocks']) + with pytest.raises(AssertionError): + RepVGG('A0', out_indices=(5, )) + + # max(arch['group_idx'].keys()) <= sum(arch['num_blocks']) + with pytest.raises(AssertionError): + arch = dict( + num_blocks=[2, 4, 14, 1], + width_factor=[0.75, 0.75, 0.75], + group_idx={22: 2}) + RepVGG(arch=arch) + + # Test RepVGG norm state + model = RepVGG('A0') + model.train() + assert check_norm_state(model.modules(), True) + + # Test RepVGG with first stage frozen + frozen_stages = 1 + model = RepVGG('A0', frozen_stages=frozen_stages) + model.train() + for param in model.stem.parameters(): + assert param.requires_grad is False + for i in range(0, frozen_stages): + stage_name = model.stages[i] + stage = model.__getattr__(stage_name) + for mod in stage: + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in stage.parameters(): + assert param.requires_grad is False + + # Test RepVGG with norm_eval + model = RepVGG('A0', norm_eval=True) + model.train() + assert check_norm_state(model.modules(), False) + + # Test RepVGG forward with layer 3 forward + model = RepVGG('A0', out_indices=(3, )) + model.init_weights() + model.eval() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 32, 32) + feat = model(imgs) + assert isinstance(feat, tuple) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 1280, 1, 1)) + + # Test with custom arch + cfg = dict( + num_blocks=[3, 5, 7, 3], + width_factor=[1, 1, 1, 1], + group_layer_map=None, + se_cfg=None, + stem_channels=16) + model = RepVGG(arch=cfg, out_indices=(3, )) + model.eval() + assert model.stem.out_channels == min(16, 64 * 1) + + imgs = torch.randn(1, 3, 32, 32) + feat = model(imgs) + assert isinstance(feat, tuple) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 512, 1, 1)) + + # Test RepVGG forward + model_test_settings = [ + dict(model_name='A0', out_sizes=(48, 96, 192, 1280)), + dict(model_name='A1', out_sizes=(64, 128, 256, 1280)), + dict(model_name='A2', out_sizes=(96, 192, 384, 1408)), + dict(model_name='B0', out_sizes=(64, 128, 256, 1280)), + dict(model_name='B1', out_sizes=(128, 256, 512, 2048)), + dict(model_name='B1g2', out_sizes=(128, 256, 512, 2048)), + dict(model_name='B1g4', out_sizes=(128, 256, 512, 2048)), + dict(model_name='B2', out_sizes=(160, 320, 640, 2560)), + dict(model_name='B2g2', out_sizes=(160, 320, 640, 2560)), + dict(model_name='B2g4', out_sizes=(160, 320, 640, 2560)), + dict(model_name='B3', out_sizes=(192, 384, 768, 2560)), + dict(model_name='B3g2', out_sizes=(192, 384, 768, 2560)), + dict(model_name='B3g4', out_sizes=(192, 384, 768, 2560)), + dict(model_name='D2se', out_sizes=(160, 320, 640, 2560)) + ] + + choose_models = ['A0', 'B1', 'B1g2'] + # Test RepVGG model forward + for model_test_setting in model_test_settings: + if model_test_setting['model_name'] not in choose_models: + continue + model = RepVGG( + model_test_setting['model_name'], out_indices=(0, 1, 2, 3)) + model.init_weights() + model.eval() + + # Test Norm + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 32, 32) + feat = model(imgs) + assert feat[0].shape == torch.Size( + (1, model_test_setting['out_sizes'][0], 8, 8)) + assert feat[1].shape == torch.Size( + (1, model_test_setting['out_sizes'][1], 4, 4)) + assert feat[2].shape == torch.Size( + (1, model_test_setting['out_sizes'][2], 2, 2)) + assert feat[3].shape == torch.Size( + (1, model_test_setting['out_sizes'][3], 1, 1)) + + # Test eval of "train" mode and "deploy" mode + gap = nn.AdaptiveAvgPool2d(output_size=(1)) + fc = nn.Linear(model_test_setting['out_sizes'][3], 10) + model.eval() + feat = model(imgs) + pred = fc(gap(feat[3]).flatten(1)) + model.switch_to_deploy() + for m in model.modules(): + if isinstance(m, RepVGGBlock): + assert m.deploy is True + feat_deploy = model(imgs) + pred_deploy = fc(gap(feat_deploy[3]).flatten(1)) + for i in range(4): + torch.allclose(feat[i], feat_deploy[i]) + torch.allclose(pred, pred_deploy) + + # Test RepVGG forward with add_ppf + model = RepVGG('A0', out_indices=(3, ), add_ppf=True) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert isinstance(feat, tuple) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 1280, 2, 2)) + + # Test RepVGG forward with 'stem_channels' not in arch + arch = dict( + num_blocks=[2, 4, 14, 1], + width_factor=[0.75, 0.75, 0.75, 2.5], + group_layer_map=None, + se_cfg=None) + model = RepVGG(arch, add_ppf=True) + model.stem.in_channels = min(64, 64 * 0.75) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert isinstance(feat, tuple) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 1280, 2, 2)) + + +def test_repvgg_load(): + # Test output before and load from deploy checkpoint + model = RepVGG('A1', out_indices=(0, 1, 2, 3)) + inputs = torch.randn((1, 3, 32, 32)) + ckpt_path = os.path.join(tempfile.gettempdir(), 'ckpt.pth') + model.switch_to_deploy() + model.eval() + outputs = model(inputs) + + model_deploy = RepVGG('A1', out_indices=(0, 1, 2, 3), deploy=True) + save_checkpoint(model, ckpt_path) + load_checkpoint(model_deploy, ckpt_path, strict=True) + + outputs_load = model_deploy(inputs) + for feat, feat_load in zip(outputs, outputs_load): + assert torch.allclose(feat, feat_load) diff --git a/tests/test_models/test_backbones/test_res2net.py b/tests/test_models/test_backbones/test_res2net.py new file mode 100644 index 0000000..173d3e6 --- /dev/null +++ b/tests/test_models/test_backbones/test_res2net.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmcls.models.backbones import Res2Net + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_resnet_cifar(): + # Only support depth 50, 101 and 152 + with pytest.raises(KeyError): + Res2Net(depth=18) + + # test the feature map size when depth is 50 + # and deep_stem=True, avg_down=True + model = Res2Net( + depth=50, out_indices=(0, 1, 2, 3), deep_stem=True, avg_down=True) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model.stem(imgs) + assert feat.shape == (1, 64, 112, 112) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # test the feature map size when depth is 101 + # and deep_stem=False, avg_down=False + model = Res2Net( + depth=101, out_indices=(0, 1, 2, 3), deep_stem=False, avg_down=False) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model.conv1(imgs) + assert feat.shape == (1, 64, 112, 112) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # Test Res2Net with first stage frozen + frozen_stages = 1 + model = Res2Net(depth=50, frozen_stages=frozen_stages, deep_stem=False) + model.init_weights() + model.train() + assert check_norm_state([model.norm1], False) + for param in model.conv1.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False diff --git a/tests/test_models/test_backbones/test_resnest.py b/tests/test_models/test_backbones/test_resnest.py new file mode 100644 index 0000000..7a0b250 --- /dev/null +++ b/tests/test_models/test_backbones/test_resnest.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.backbones import ResNeSt +from mmcls.models.backbones.resnest import Bottleneck as BottleneckS + + +def test_bottleneck(): + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow') + + # Test ResNeSt Bottleneck structure + block = BottleneckS( + 64, 256, radix=2, reduction_factor=4, stride=2, style='pytorch') + assert block.avd_layer.stride == 2 + assert block.conv2.channels == 64 + + # Test ResNeSt Bottleneck forward + block = BottleneckS(64, 64, radix=2, reduction_factor=4) + x = torch.randn(2, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([2, 64, 56, 56]) + + +def test_resnest(): + with pytest.raises(KeyError): + # ResNeSt depth should be in [50, 101, 152, 200] + ResNeSt(depth=18) + + # Test ResNeSt with radix 2, reduction_factor 4 + model = ResNeSt( + depth=50, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([2, 256, 56, 56]) + assert feat[1].shape == torch.Size([2, 512, 28, 28]) + assert feat[2].shape == torch.Size([2, 1024, 14, 14]) + assert feat[3].shape == torch.Size([2, 2048, 7, 7]) diff --git a/tests/test_models/test_backbones/test_resnet.py b/tests/test_models/test_backbones/test_resnet.py new file mode 100644 index 0000000..8ff8bc8 --- /dev/null +++ b/tests/test_models/test_backbones/test_resnet.py @@ -0,0 +1,618 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmcls.models.backbones import ResNet, ResNetV1c, ResNetV1d +from mmcls.models.backbones.resnet import (BasicBlock, Bottleneck, ResLayer, + get_expansion) + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (BasicBlock, Bottleneck)): + return True + return False + + +def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.equal(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.equal(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_get_expansion(): + assert get_expansion(Bottleneck, 2) == 2 + assert get_expansion(BasicBlock) == 1 + assert get_expansion(Bottleneck) == 4 + + class MyResBlock(nn.Module): + + expansion = 8 + + assert get_expansion(MyResBlock) == 8 + + # expansion must be an integer or None + with pytest.raises(TypeError): + get_expansion(Bottleneck, '0') + + # expansion is not specified and cannot be inferred + with pytest.raises(TypeError): + + class SomeModule(nn.Module): + pass + + get_expansion(SomeModule) + + +def test_basic_block(): + # expansion must be 1 + with pytest.raises(AssertionError): + BasicBlock(64, 64, expansion=2) + + # BasicBlock with stride 1, out_channels == in_channels + block = BasicBlock(64, 64) + assert block.in_channels == 64 + assert block.mid_channels == 64 + assert block.out_channels == 64 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 64 + assert block.conv1.kernel_size == (3, 3) + assert block.conv1.stride == (1, 1) + assert block.conv2.in_channels == 64 + assert block.conv2.out_channels == 64 + assert block.conv2.kernel_size == (3, 3) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # BasicBlock with stride 1 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, bias=False), nn.BatchNorm2d(128)) + block = BasicBlock(64, 128, downsample=downsample) + assert block.in_channels == 64 + assert block.mid_channels == 128 + assert block.out_channels == 128 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 128 + assert block.conv1.kernel_size == (3, 3) + assert block.conv1.stride == (1, 1) + assert block.conv2.in_channels == 128 + assert block.conv2.out_channels == 128 + assert block.conv2.kernel_size == (3, 3) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 128, 56, 56]) + + # BasicBlock with stride 2 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, stride=2, bias=False), + nn.BatchNorm2d(128)) + block = BasicBlock(64, 128, stride=2, downsample=downsample) + assert block.in_channels == 64 + assert block.mid_channels == 128 + assert block.out_channels == 128 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 128 + assert block.conv1.kernel_size == (3, 3) + assert block.conv1.stride == (2, 2) + assert block.conv2.in_channels == 128 + assert block.conv2.out_channels == 128 + assert block.conv2.kernel_size == (3, 3) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 128, 28, 28]) + + # forward with checkpointing + block = BasicBlock(64, 64, with_cp=True) + assert block.with_cp + x = torch.randn(1, 64, 56, 56, requires_grad=True) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_bottleneck(): + # style must be in ['pytorch', 'caffe'] + with pytest.raises(AssertionError): + Bottleneck(64, 64, style='tensorflow') + + # expansion must be divisible by out_channels + with pytest.raises(AssertionError): + Bottleneck(64, 64, expansion=3) + + # Test Bottleneck style + block = Bottleneck(64, 64, stride=2, style='pytorch') + assert block.conv1.stride == (1, 1) + assert block.conv2.stride == (2, 2) + block = Bottleneck(64, 64, stride=2, style='caffe') + assert block.conv1.stride == (2, 2) + assert block.conv2.stride == (1, 1) + + # Bottleneck with stride 1 + block = Bottleneck(64, 64, style='pytorch') + assert block.in_channels == 64 + assert block.mid_channels == 16 + assert block.out_channels == 64 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 16 + assert block.conv1.kernel_size == (1, 1) + assert block.conv2.in_channels == 16 + assert block.conv2.out_channels == 16 + assert block.conv2.kernel_size == (3, 3) + assert block.conv3.in_channels == 16 + assert block.conv3.out_channels == 64 + assert block.conv3.kernel_size == (1, 1) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 64, 56, 56) + + # Bottleneck with stride 1 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1), nn.BatchNorm2d(128)) + block = Bottleneck(64, 128, style='pytorch', downsample=downsample) + assert block.in_channels == 64 + assert block.mid_channels == 32 + assert block.out_channels == 128 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 32 + assert block.conv1.kernel_size == (1, 1) + assert block.conv2.in_channels == 32 + assert block.conv2.out_channels == 32 + assert block.conv2.kernel_size == (3, 3) + assert block.conv3.in_channels == 32 + assert block.conv3.out_channels == 128 + assert block.conv3.kernel_size == (1, 1) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 128, 56, 56) + + # Bottleneck with stride 2 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, stride=2), nn.BatchNorm2d(128)) + block = Bottleneck( + 64, 128, stride=2, style='pytorch', downsample=downsample) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 128, 28, 28) + + # Bottleneck with expansion 2 + block = Bottleneck(64, 64, style='pytorch', expansion=2) + assert block.in_channels == 64 + assert block.mid_channels == 32 + assert block.out_channels == 64 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 32 + assert block.conv1.kernel_size == (1, 1) + assert block.conv2.in_channels == 32 + assert block.conv2.out_channels == 32 + assert block.conv2.kernel_size == (3, 3) + assert block.conv3.in_channels == 32 + assert block.conv3.out_channels == 64 + assert block.conv3.kernel_size == (1, 1) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 64, 56, 56) + + # Test Bottleneck with checkpointing + block = Bottleneck(64, 64, with_cp=True) + block.train() + assert block.with_cp + x = torch.randn(1, 64, 56, 56, requires_grad=True) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_basicblock_reslayer(): + # 3 BasicBlock w/o downsample + layer = ResLayer(BasicBlock, 3, 32, 32) + assert len(layer) == 3 + for i in range(3): + assert layer[i].in_channels == 32 + assert layer[i].out_channels == 32 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 32, 56, 56) + + # 3 BasicBlock w/ stride 1 and downsample + layer = ResLayer(BasicBlock, 3, 32, 64) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].downsample is not None and len(layer[0].downsample) == 2 + assert isinstance(layer[0].downsample[0], nn.Conv2d) + assert layer[0].downsample[0].stride == (1, 1) + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 56, 56) + + # 3 BasicBlock w/ stride 2 and downsample + layer = ResLayer(BasicBlock, 3, 32, 64, stride=2) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 2 + assert layer[0].downsample is not None and len(layer[0].downsample) == 2 + assert isinstance(layer[0].downsample[0], nn.Conv2d) + assert layer[0].downsample[0].stride == (2, 2) + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 28, 28) + + # 3 BasicBlock w/ stride 2 and downsample with avg pool + layer = ResLayer(BasicBlock, 3, 32, 64, stride=2, avg_down=True) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 2 + assert layer[0].downsample is not None and len(layer[0].downsample) == 3 + assert isinstance(layer[0].downsample[0], nn.AvgPool2d) + assert layer[0].downsample[0].stride == 2 + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 28, 28) + + +def test_bottleneck_reslayer(): + # 3 Bottleneck w/o downsample + layer = ResLayer(Bottleneck, 3, 32, 32) + assert len(layer) == 3 + for i in range(3): + assert layer[i].in_channels == 32 + assert layer[i].out_channels == 32 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 32, 56, 56) + + # 3 Bottleneck w/ stride 1 and downsample + layer = ResLayer(Bottleneck, 3, 32, 64) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 1 + assert layer[0].conv1.out_channels == 16 + assert layer[0].downsample is not None and len(layer[0].downsample) == 2 + assert isinstance(layer[0].downsample[0], nn.Conv2d) + assert layer[0].downsample[0].stride == (1, 1) + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].conv1.out_channels == 16 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 56, 56) + + # 3 Bottleneck w/ stride 2 and downsample + layer = ResLayer(Bottleneck, 3, 32, 64, stride=2) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 2 + assert layer[0].conv1.out_channels == 16 + assert layer[0].downsample is not None and len(layer[0].downsample) == 2 + assert isinstance(layer[0].downsample[0], nn.Conv2d) + assert layer[0].downsample[0].stride == (2, 2) + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].conv1.out_channels == 16 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 28, 28) + + # 3 Bottleneck w/ stride 2 and downsample with avg pool + layer = ResLayer(Bottleneck, 3, 32, 64, stride=2, avg_down=True) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 2 + assert layer[0].conv1.out_channels == 16 + assert layer[0].downsample is not None and len(layer[0].downsample) == 3 + assert isinstance(layer[0].downsample[0], nn.AvgPool2d) + assert layer[0].downsample[0].stride == 2 + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].conv1.out_channels == 16 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 28, 28) + + # 3 Bottleneck with custom expansion + layer = ResLayer(Bottleneck, 3, 32, 32, expansion=2) + assert len(layer) == 3 + for i in range(3): + assert layer[i].in_channels == 32 + assert layer[i].out_channels == 32 + assert layer[i].stride == 1 + assert layer[i].conv1.out_channels == 16 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 32, 56, 56) + + +def test_resnet(): + """Test resnet backbone.""" + with pytest.raises(KeyError): + # ResNet depth should be in [18, 34, 50, 101, 152] + ResNet(20) + + with pytest.raises(AssertionError): + # In ResNet: 1 <= num_stages <= 4 + ResNet(50, num_stages=0) + + with pytest.raises(AssertionError): + # In ResNet: 1 <= num_stages <= 4 + ResNet(50, num_stages=5) + + with pytest.raises(AssertionError): + # len(strides) == len(dilations) == num_stages + ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) + + with pytest.raises(TypeError): + # pretrained must be a string path + model = ResNet(50) + model.init_weights(pretrained=0) + + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + ResNet(50, style='tensorflow') + + # Test ResNet50 norm_eval=True + model = ResNet(50, norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test ResNet50 with torchvision pretrained weight + model = ResNet( + depth=50, + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test ResNet50 with first stage frozen + frozen_stages = 1 + model = ResNet(50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + assert model.norm1.training is False + for layer in [model.conv1, model.norm1]: + for param in layer.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test ResNet18 forward + model = ResNet(18, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 64, 56, 56) + assert feat[1].shape == (1, 128, 28, 28) + assert feat[2].shape == (1, 256, 14, 14) + assert feat[3].shape == (1, 512, 7, 7) + + # Test ResNet50 with BatchNorm forward + model = ResNet(50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # Test ResNet50 with DropPath forward + model = ResNet(50, out_indices=(0, 1, 2, 3), drop_path_rate=0.5) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # Test ResNet50 with layers 1, 2, 3 out forward + model = ResNet(50, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + + # Test ResNet50 with layers 3 (top feature maps) out forward + model = ResNet(50, out_indices=(3, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == (1, 2048, 7, 7) + + # Test ResNet50 with checkpoint forward + model = ResNet(50, out_indices=(0, 1, 2, 3), with_cp=True) + for m in model.modules(): + if is_block(m): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # zero initialization of residual blocks + model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + assert all_zeros(m.norm3) + elif isinstance(m, BasicBlock): + assert all_zeros(m.norm2) + + # non-zero initialization of residual blocks + model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=False) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + assert not all_zeros(m.norm3) + elif isinstance(m, BasicBlock): + assert not all_zeros(m.norm2) + + +def test_resnet_v1c(): + model = ResNetV1c(depth=50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + assert len(model.stem) == 3 + for i in range(3): + assert isinstance(model.stem[i], ConvModule) + + imgs = torch.randn(1, 3, 224, 224) + feat = model.stem(imgs) + assert feat.shape == (1, 64, 112, 112) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # Test ResNet50V1d with first stage frozen + frozen_stages = 1 + model = ResNetV1d(depth=50, frozen_stages=frozen_stages) + assert len(model.stem) == 3 + for i in range(3): + assert isinstance(model.stem[i], ConvModule) + model.init_weights() + model.train() + check_norm_state(model.stem, False) + for param in model.stem.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + +def test_resnet_v1d(): + model = ResNetV1d(depth=50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + assert len(model.stem) == 3 + for i in range(3): + assert isinstance(model.stem[i], ConvModule) + + imgs = torch.randn(1, 3, 224, 224) + feat = model.stem(imgs) + assert feat.shape == (1, 64, 112, 112) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # Test ResNet50V1d with first stage frozen + frozen_stages = 1 + model = ResNetV1d(depth=50, frozen_stages=frozen_stages) + assert len(model.stem) == 3 + for i in range(3): + assert isinstance(model.stem[i], ConvModule) + model.init_weights() + model.train() + check_norm_state(model.stem, False) + for param in model.stem.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + +def test_resnet_half_channel(): + model = ResNet(50, base_channels=32, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 128, 56, 56) + assert feat[1].shape == (1, 256, 28, 28) + assert feat[2].shape == (1, 512, 14, 14) + assert feat[3].shape == (1, 1024, 7, 7) diff --git a/tests/test_models/test_backbones/test_resnet_cifar.py b/tests/test_models/test_backbones/test_resnet_cifar.py new file mode 100644 index 0000000..af7bba6 --- /dev/null +++ b/tests/test_models/test_backbones/test_resnet_cifar.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmcls.models.backbones import ResNet_CIFAR + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_resnet_cifar(): + # deep_stem must be False + with pytest.raises(AssertionError): + ResNet_CIFAR(depth=18, deep_stem=True) + + # test the feature map size when depth is 18 + model = ResNet_CIFAR(depth=18, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 32, 32) + feat = model.conv1(imgs) + assert feat.shape == (1, 64, 32, 32) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 64, 32, 32) + assert feat[1].shape == (1, 128, 16, 16) + assert feat[2].shape == (1, 256, 8, 8) + assert feat[3].shape == (1, 512, 4, 4) + + # test the feature map size when depth is 50 + model = ResNet_CIFAR(depth=50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 32, 32) + feat = model.conv1(imgs) + assert feat.shape == (1, 64, 32, 32) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 32, 32) + assert feat[1].shape == (1, 512, 16, 16) + assert feat[2].shape == (1, 1024, 8, 8) + assert feat[3].shape == (1, 2048, 4, 4) + + # Test ResNet_CIFAR with first stage frozen + frozen_stages = 1 + model = ResNet_CIFAR(depth=50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + check_norm_state([model.norm1], False) + for param in model.conv1.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False diff --git a/tests/test_models/test_backbones/test_resnext.py b/tests/test_models/test_backbones/test_resnext.py new file mode 100644 index 0000000..4ee15f9 --- /dev/null +++ b/tests/test_models/test_backbones/test_resnext.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.backbones import ResNeXt +from mmcls.models.backbones.resnext import Bottleneck as BottleneckX + + +def test_bottleneck(): + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + BottleneckX(64, 64, groups=32, width_per_group=4, style='tensorflow') + + # Test ResNeXt Bottleneck structure + block = BottleneckX( + 64, 256, groups=32, width_per_group=4, stride=2, style='pytorch') + assert block.conv2.stride == (2, 2) + assert block.conv2.groups == 32 + assert block.conv2.out_channels == 128 + + # Test ResNeXt Bottleneck forward + block = BottleneckX(64, 64, base_channels=16, groups=32, width_per_group=4) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_resnext(): + with pytest.raises(KeyError): + # ResNeXt depth should be in [50, 101, 152] + ResNeXt(depth=18) + + # Test ResNeXt with group 32, width_per_group 4 + model = ResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(0, 1, 2, 3)) + for m in model.modules(): + if isinstance(m, BottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test ResNeXt with group 32, width_per_group 4 and layers 3 out forward + model = ResNeXt(depth=50, groups=32, width_per_group=4, out_indices=(3, )) + for m in model.modules(): + if isinstance(m, BottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 2048, 7, 7]) diff --git a/tests/test_models/test_backbones/test_seresnet.py b/tests/test_models/test_backbones/test_seresnet.py new file mode 100644 index 0000000..3267020 --- /dev/null +++ b/tests/test_models/test_backbones/test_seresnet.py @@ -0,0 +1,247 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import AvgPool2d +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import SEResNet +from mmcls.models.backbones.resnet import ResLayer +from mmcls.models.backbones.seresnet import SEBottleneck, SELayer + + +def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.equal(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.equal(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_selayer(): + # Test selayer forward + layer = SELayer(64) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test selayer forward with different ratio + layer = SELayer(64, ratio=8) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_bottleneck(): + + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + SEBottleneck(64, 64, style='tensorflow') + + # Test SEBottleneck with checkpoint forward + block = SEBottleneck(64, 64, with_cp=True) + assert block.with_cp + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test Bottleneck style + block = SEBottleneck(64, 256, stride=2, style='pytorch') + assert block.conv1.stride == (1, 1) + assert block.conv2.stride == (2, 2) + block = SEBottleneck(64, 256, stride=2, style='caffe') + assert block.conv1.stride == (2, 2) + assert block.conv2.stride == (1, 1) + + # Test Bottleneck forward + block = SEBottleneck(64, 64) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_res_layer(): + # Test ResLayer of 3 Bottleneck w\o downsample + layer = ResLayer(SEBottleneck, 3, 64, 64, se_ratio=16) + assert len(layer) == 3 + assert layer[0].conv1.in_channels == 64 + assert layer[0].conv1.out_channels == 16 + for i in range(1, len(layer)): + assert layer[i].conv1.in_channels == 64 + assert layer[i].conv1.out_channels == 16 + for i in range(len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test ResLayer of 3 SEBottleneck with downsample + layer = ResLayer(SEBottleneck, 3, 64, 256, se_ratio=16) + assert layer[0].downsample[0].out_channels == 256 + for i in range(1, len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 256, 56, 56]) + + # Test ResLayer of 3 SEBottleneck with stride=2 + layer = ResLayer(SEBottleneck, 3, 64, 256, stride=2, se_ratio=8) + assert layer[0].downsample[0].out_channels == 256 + assert layer[0].downsample[0].stride == (2, 2) + for i in range(1, len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 256, 28, 28]) + + # Test ResLayer of 3 SEBottleneck with stride=2 and average downsample + layer = ResLayer( + SEBottleneck, 3, 64, 256, stride=2, avg_down=True, se_ratio=8) + assert isinstance(layer[0].downsample[0], AvgPool2d) + assert layer[0].downsample[1].out_channels == 256 + assert layer[0].downsample[1].stride == (1, 1) + for i in range(1, len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 256, 28, 28]) + + +def test_seresnet(): + """Test resnet backbone.""" + with pytest.raises(KeyError): + # SEResNet depth should be in [50, 101, 152] + SEResNet(20) + + with pytest.raises(AssertionError): + # In SEResNet: 1 <= num_stages <= 4 + SEResNet(50, num_stages=0) + + with pytest.raises(AssertionError): + # In SEResNet: 1 <= num_stages <= 4 + SEResNet(50, num_stages=5) + + with pytest.raises(AssertionError): + # len(strides) == len(dilations) == num_stages + SEResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) + + with pytest.raises(TypeError): + # pretrained must be a string path + model = SEResNet(50) + model.init_weights(pretrained=0) + + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + SEResNet(50, style='tensorflow') + + # Test SEResNet50 norm_eval=True + model = SEResNet(50, norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test SEResNet50 with torchvision pretrained weight + model = SEResNet( + depth=50, + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test SEResNet50 with first stage frozen + frozen_stages = 1 + model = SEResNet(50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + assert model.norm1.training is False + for layer in [model.conv1, model.norm1]: + for param in layer.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test SEResNet50 with BatchNorm forward + model = SEResNet(50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test SEResNet50 with layers 1, 2, 3 out forward + model = SEResNet(50, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + + # Test SEResNet50 with layers 3 (top feature maps) out forward + model = SEResNet(50, out_indices=(3, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 2048, 7, 7]) + + # Test SEResNet50 with checkpoint forward + model = SEResNet(50, out_indices=(0, 1, 2, 3), with_cp=True) + for m in model.modules(): + if isinstance(m, SEBottleneck): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test SEResNet50 zero initialization of residual + model = SEResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True) + model.init_weights() + for m in model.modules(): + if isinstance(m, SEBottleneck): + assert all_zeros(m.norm3) + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) diff --git a/tests/test_models/test_backbones/test_seresnext.py b/tests/test_models/test_backbones/test_seresnext.py new file mode 100644 index 0000000..2431c07 --- /dev/null +++ b/tests/test_models/test_backbones/test_seresnext.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.backbones import SEResNeXt +from mmcls.models.backbones.seresnext import SEBottleneck as SEBottleneckX + + +def test_bottleneck(): + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + SEBottleneckX(64, 64, groups=32, width_per_group=4, style='tensorflow') + + # Test SEResNeXt Bottleneck structure + block = SEBottleneckX( + 64, 256, groups=32, width_per_group=4, stride=2, style='pytorch') + assert block.width_per_group == 4 + assert block.conv2.stride == (2, 2) + assert block.conv2.groups == 32 + assert block.conv2.out_channels == 128 + assert block.conv2.out_channels == block.mid_channels + + # Test SEResNeXt Bottleneck structure (groups=1) + block = SEBottleneckX( + 64, 256, groups=1, width_per_group=4, stride=2, style='pytorch') + assert block.conv2.stride == (2, 2) + assert block.conv2.groups == 1 + assert block.conv2.out_channels == 64 + assert block.mid_channels == 64 + assert block.conv2.out_channels == block.mid_channels + + # Test SEResNeXt Bottleneck forward + block = SEBottleneckX( + 64, 64, base_channels=16, groups=32, width_per_group=4) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_seresnext(): + with pytest.raises(KeyError): + # SEResNeXt depth should be in [50, 101, 152] + SEResNeXt(depth=18) + + # Test SEResNeXt with group 32, width_per_group 4 + model = SEResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(0, 1, 2, 3)) + for m in model.modules(): + if isinstance(m, SEBottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test SEResNeXt with group 32, width_per_group 4 and layers 3 out forward + model = SEResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(3, )) + for m in model.modules(): + if isinstance(m, SEBottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 2048, 7, 7]) diff --git a/tests/test_models/test_backbones/test_shufflenet_v1.py b/tests/test_models/test_backbones/test_shufflenet_v1.py new file mode 100644 index 0000000..97beee7 --- /dev/null +++ b/tests/test_models/test_backbones/test_shufflenet_v1.py @@ -0,0 +1,246 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import ShuffleNetV1 +from mmcls.models.backbones.shufflenet_v1 import ShuffleUnit + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (ShuffleUnit, )): + return True + return False + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_shufflenetv1_shuffleuint(): + + with pytest.raises(ValueError): + # combine must be in ['add', 'concat'] + ShuffleUnit(24, 16, groups=3, first_block=True, combine='test') + + with pytest.raises(AssertionError): + # in_channels must be equal tp = outplanes when combine='add' + ShuffleUnit(64, 24, groups=4, first_block=True, combine='add') + + # Test ShuffleUnit with combine='add' + block = ShuffleUnit(24, 24, groups=3, first_block=True, combine='add') + x = torch.randn(1, 24, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + # Test ShuffleUnit with combine='concat' + block = ShuffleUnit(24, 240, groups=3, first_block=True, combine='concat') + x = torch.randn(1, 24, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 240, 28, 28)) + + # Test ShuffleUnit with checkpoint forward + block = ShuffleUnit( + 24, 24, groups=3, first_block=True, combine='add', with_cp=True) + assert block.with_cp + x = torch.randn(1, 24, 56, 56) + x.requires_grad = True + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + +def test_shufflenetv1_backbone(): + + with pytest.raises(ValueError): + # frozen_stages must be in range(-1, 4) + ShuffleNetV1(frozen_stages=10) + + with pytest.raises(ValueError): + # the item in out_indices must be in range(0, 4) + ShuffleNetV1(out_indices=[5]) + + with pytest.raises(ValueError): + # groups must be in [1, 2, 3, 4, 8] + ShuffleNetV1(groups=10) + + with pytest.raises(TypeError): + # pretrained must be str or None + model = ShuffleNetV1() + model.init_weights(pretrained=1) + + # Test ShuffleNetV1 norm state + model = ShuffleNetV1() + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + + # Test ShuffleNetV1 with first stage frozen + frozen_stages = 1 + model = ShuffleNetV1(frozen_stages=frozen_stages, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + for param in model.conv1.parameters(): + assert param.requires_grad is False + for i in range(frozen_stages): + layer = model.layers[i] + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test ShuffleNetV1 forward with groups=1 + model = ShuffleNetV1(groups=1, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 144, 28, 28)) + assert feat[1].shape == torch.Size((1, 288, 14, 14)) + assert feat[2].shape == torch.Size((1, 576, 7, 7)) + + # Test ShuffleNetV1 forward with groups=2 + model = ShuffleNetV1(groups=2, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 200, 28, 28)) + assert feat[1].shape == torch.Size((1, 400, 14, 14)) + assert feat[2].shape == torch.Size((1, 800, 7, 7)) + + # Test ShuffleNetV1 forward with groups=3 + model = ShuffleNetV1(groups=3, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 240, 28, 28)) + assert feat[1].shape == torch.Size((1, 480, 14, 14)) + assert feat[2].shape == torch.Size((1, 960, 7, 7)) + + # Test ShuffleNetV1 forward with groups=4 + model = ShuffleNetV1(groups=4, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 272, 28, 28)) + assert feat[1].shape == torch.Size((1, 544, 14, 14)) + assert feat[2].shape == torch.Size((1, 1088, 7, 7)) + + # Test ShuffleNetV1 forward with groups=8 + model = ShuffleNetV1(groups=8, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 384, 28, 28)) + assert feat[1].shape == torch.Size((1, 768, 14, 14)) + assert feat[2].shape == torch.Size((1, 1536, 7, 7)) + + # Test ShuffleNetV1 forward with GroupNorm forward + model = ShuffleNetV1( + groups=3, + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), + out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 240, 28, 28)) + assert feat[1].shape == torch.Size((1, 480, 14, 14)) + assert feat[2].shape == torch.Size((1, 960, 7, 7)) + + # Test ShuffleNetV1 forward with layers 1, 2 forward + model = ShuffleNetV1(groups=3, out_indices=(1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 2 + assert feat[0].shape == torch.Size((1, 480, 14, 14)) + assert feat[1].shape == torch.Size((1, 960, 7, 7)) + + # Test ShuffleNetV1 forward with layers 2 forward + model = ShuffleNetV1(groups=3, out_indices=(2, )) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 960, 7, 7)) + + # Test ShuffleNetV1 forward with checkpoint forward + model = ShuffleNetV1(groups=3, with_cp=True) + for m in model.modules(): + if is_block(m): + assert m.with_cp + + # Test ShuffleNetV1 with norm_eval + model = ShuffleNetV1(norm_eval=True) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), False) diff --git a/tests/test_models/test_backbones/test_shufflenet_v2.py b/tests/test_models/test_backbones/test_shufflenet_v2.py new file mode 100644 index 0000000..b7ab495 --- /dev/null +++ b/tests/test_models/test_backbones/test_shufflenet_v2.py @@ -0,0 +1,205 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import ShuffleNetV2 +from mmcls.models.backbones.shufflenet_v2 import InvertedResidual + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (InvertedResidual, )): + return True + return False + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_shufflenetv2_invertedresidual(): + + with pytest.raises(AssertionError): + # when stride==1, in_channels should be equal to out_channels // 2 * 2 + InvertedResidual(24, 32, stride=1) + + with pytest.raises(AssertionError): + # when in_channels != out_channels // 2 * 2, stride should not be + # equal to 1. + InvertedResidual(24, 32, stride=1) + + # Test InvertedResidual forward + block = InvertedResidual(24, 48, stride=2) + x = torch.randn(1, 24, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 48, 28, 28)) + + # Test InvertedResidual with checkpoint forward + block = InvertedResidual(48, 48, stride=1, with_cp=True) + assert block.with_cp + x = torch.randn(1, 48, 56, 56) + x.requires_grad = True + x_out = block(x) + assert x_out.shape == torch.Size((1, 48, 56, 56)) + + +def test_shufflenetv2_backbone(): + + with pytest.raises(ValueError): + # groups must be in 0.5, 1.0, 1.5, 2.0] + ShuffleNetV2(widen_factor=3.0) + + with pytest.raises(ValueError): + # frozen_stages must be in [0, 1, 2, 3] + ShuffleNetV2(widen_factor=1.0, frozen_stages=4) + + with pytest.raises(ValueError): + # out_indices must be in [0, 1, 2, 3] + ShuffleNetV2(widen_factor=1.0, out_indices=(4, )) + + with pytest.raises(TypeError): + # pretrained must be str or None + model = ShuffleNetV2() + model.init_weights(pretrained=1) + + # Test ShuffleNetV2 norm state + model = ShuffleNetV2() + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + + # Test ShuffleNetV2 with first stage frozen + frozen_stages = 1 + model = ShuffleNetV2(frozen_stages=frozen_stages) + model.init_weights() + model.train() + for param in model.conv1.parameters(): + assert param.requires_grad is False + for i in range(0, frozen_stages): + layer = model.layers[i] + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test ShuffleNetV2 with norm_eval + model = ShuffleNetV2(norm_eval=True) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), False) + + # Test ShuffleNetV2 forward with widen_factor=0.5 + model = ShuffleNetV2(widen_factor=0.5, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 48, 28, 28)) + assert feat[1].shape == torch.Size((1, 96, 14, 14)) + assert feat[2].shape == torch.Size((1, 192, 7, 7)) + + # Test ShuffleNetV2 forward with widen_factor=1.0 + model = ShuffleNetV2(widen_factor=1.0, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 116, 28, 28)) + assert feat[1].shape == torch.Size((1, 232, 14, 14)) + assert feat[2].shape == torch.Size((1, 464, 7, 7)) + + # Test ShuffleNetV2 forward with widen_factor=1.5 + model = ShuffleNetV2(widen_factor=1.5, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 176, 28, 28)) + assert feat[1].shape == torch.Size((1, 352, 14, 14)) + assert feat[2].shape == torch.Size((1, 704, 7, 7)) + + # Test ShuffleNetV2 forward with widen_factor=2.0 + model = ShuffleNetV2(widen_factor=2.0, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 244, 28, 28)) + assert feat[1].shape == torch.Size((1, 488, 14, 14)) + assert feat[2].shape == torch.Size((1, 976, 7, 7)) + + # Test ShuffleNetV2 forward with layers 3 forward + model = ShuffleNetV2(widen_factor=1.0, out_indices=(2, )) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 464, 7, 7)) + + # Test ShuffleNetV2 forward with layers 1 2 forward + model = ShuffleNetV2(widen_factor=1.0, out_indices=(1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 2 + assert feat[0].shape == torch.Size((1, 232, 14, 14)) + assert feat[1].shape == torch.Size((1, 464, 7, 7)) + + # Test ShuffleNetV2 forward with checkpoint forward + model = ShuffleNetV2(widen_factor=1.0, with_cp=True) + for m in model.modules(): + if is_block(m): + assert m.with_cp diff --git a/tests/test_models/test_backbones/test_swin_transformer.py b/tests/test_models/test_backbones/test_swin_transformer.py new file mode 100644 index 0000000..3394730 --- /dev/null +++ b/tests/test_models/test_backbones/test_swin_transformer.py @@ -0,0 +1,255 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import os +import tempfile +from copy import deepcopy +from itertools import chain +from unittest import TestCase + +import torch +from mmcv.runner import load_checkpoint, save_checkpoint +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmcls.models.backbones import SwinTransformer +from mmcls.models.backbones.swin_transformer import SwinBlock +from .utils import timm_resize_pos_embed + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +class TestSwinTransformer(TestCase): + + def setUp(self): + self.cfg = dict( + arch='b', img_size=224, patch_size=4, drop_path_rate=0.1) + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + SwinTransformer(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 96, + 'num_heads': [3, 6, 12, 16], + } + SwinTransformer(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + depths = [2, 2, 4, 2] + num_heads = [6, 12, 6, 12] + cfg['arch'] = { + 'embed_dims': 256, + 'depths': depths, + 'num_heads': num_heads + } + model = SwinTransformer(**cfg) + for i, stage in enumerate(model.stages): + self.assertEqual(stage.embed_dims, 256 * (2**i)) + self.assertEqual(len(stage.blocks), depths[i]) + self.assertEqual(stage.blocks[0].attn.w_msa.num_heads, + num_heads[i]) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['use_abs_pos_embed'] = True + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = SwinTransformer(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + # The pos_embed is all zero before initialize + self.assertTrue( + torch.allclose(model.absolute_pos_embed, torch.tensor(0.))) + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse( + torch.allclose(model.absolute_pos_embed, torch.tensor(0.))) + + pretrain_pos_embed = model.absolute_pos_embed.clone().detach() + + tmpdir = tempfile.gettempdir() + # Save v3 checkpoints + checkpoint_v2 = os.path.join(tmpdir, 'v3.pth') + save_checkpoint(model, checkpoint_v2) + # Save v1 checkpoints + setattr(model, 'norm', model.norm3) + setattr(model.stages[0].blocks[1].attn, 'attn_mask', + torch.zeros(64, 49, 49)) + model._version = 1 + del model.norm3 + checkpoint_v1 = os.path.join(tmpdir, 'v1.pth') + save_checkpoint(model, checkpoint_v1) + + # test load v1 checkpoint + cfg = deepcopy(self.cfg) + cfg['use_abs_pos_embed'] = True + model = SwinTransformer(**cfg) + load_checkpoint(model, checkpoint_v1, strict=True) + + # test load v3 checkpoint + cfg = deepcopy(self.cfg) + cfg['use_abs_pos_embed'] = True + model = SwinTransformer(**cfg) + load_checkpoint(model, checkpoint_v2, strict=True) + + # test load v3 checkpoint with different img_size + cfg = deepcopy(self.cfg) + cfg['img_size'] = 384 + cfg['use_abs_pos_embed'] = True + model = SwinTransformer(**cfg) + load_checkpoint(model, checkpoint_v2, strict=True) + resized_pos_embed = timm_resize_pos_embed( + pretrain_pos_embed, model.absolute_pos_embed, num_tokens=0) + self.assertTrue( + torch.allclose(model.absolute_pos_embed, resized_pos_embed)) + + os.remove(checkpoint_v1) + os.remove(checkpoint_v2) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = SwinTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 1024, 7, 7)) + + # test with window_size=12 + cfg = deepcopy(self.cfg) + cfg['window_size'] = 12 + model = SwinTransformer(**cfg) + outs = model(torch.randn(1, 3, 384, 384)) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 1024, 12, 12)) + with self.assertRaisesRegex(AssertionError, r'the window size \(12\)'): + model(torch.randn(1, 3, 224, 224)) + + # test with pad_small_map=True + cfg = deepcopy(self.cfg) + cfg['window_size'] = 12 + cfg['pad_small_map'] = True + model = SwinTransformer(**cfg) + outs = model(torch.randn(1, 3, 224, 224)) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 1024, 7, 7)) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 1, 2, 3) + model = SwinTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for stride, out in zip([1, 2, 4, 8], outs): + self.assertEqual(out.shape, + (1, 128 * stride, 56 // stride, 56 // stride)) + + # test with checkpoint forward + cfg = deepcopy(self.cfg) + cfg['with_cp'] = True + model = SwinTransformer(**cfg) + for m in model.modules(): + if isinstance(m, SwinBlock): + self.assertTrue(m.with_cp) + model.init_weights() + model.train() + + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 1024, 7, 7)) + + # test with dynamic input shape + imgs1 = torch.randn(1, 3, 224, 224) + imgs2 = torch.randn(1, 3, 256, 256) + imgs3 = torch.randn(1, 3, 256, 309) + cfg = deepcopy(self.cfg) + model = SwinTransformer(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 32), + math.ceil(imgs.shape[3] / 32)) + self.assertEqual(feat.shape, (1, 1024, *expect_feat_shape)) + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = SwinTransformer(**cfg) + depths = model.arch_settings['depths'] + blocks = chain(*[stage.blocks for stage in model.stages]) + for i, block in enumerate(blocks): + expect_prob = 0.2 / (sum(depths) - 1) * i + self.assertAlmostEqual(block.ffn.dropout_layer.drop_prob, + expect_prob) + self.assertAlmostEqual(block.attn.drop.drop_prob, expect_prob) + + # test Swin-Transformer with norm_eval=True + cfg = deepcopy(self.cfg) + cfg['norm_eval'] = True + cfg['norm_cfg'] = dict(type='BN') + cfg['stage_cfgs'] = dict(block_cfgs=dict(norm_cfg=dict(type='BN'))) + model = SwinTransformer(**cfg) + model.init_weights() + model.train() + self.assertTrue(check_norm_state(model.modules(), False)) + + # test Swin-Transformer with first stage frozen. + cfg = deepcopy(self.cfg) + frozen_stages = 0 + cfg['frozen_stages'] = frozen_stages + cfg['out_indices'] = (0, 1, 2, 3) + model = SwinTransformer(**cfg) + model.init_weights() + model.train() + + # the patch_embed and first stage should not require grad. + self.assertFalse(model.patch_embed.training) + for param in model.patch_embed.parameters(): + self.assertFalse(param.requires_grad) + for i in range(frozen_stages + 1): + stage = model.stages[i] + for param in stage.parameters(): + self.assertFalse(param.requires_grad) + for param in model.norm0.parameters(): + self.assertFalse(param.requires_grad) + + # the second stage should require grad. + for i in range(frozen_stages + 1, 4): + stage = model.stages[i] + for param in stage.parameters(): + self.assertTrue(param.requires_grad) + norm = getattr(model, f'norm{i}') + for param in norm.parameters(): + self.assertTrue(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_swin_transformer_v2.py b/tests/test_models/test_backbones/test_swin_transformer_v2.py new file mode 100644 index 0000000..1fd4314 --- /dev/null +++ b/tests/test_models/test_backbones/test_swin_transformer_v2.py @@ -0,0 +1,243 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import os +import tempfile +from copy import deepcopy +from itertools import chain +from unittest import TestCase + +import torch +from mmcv.runner import load_checkpoint, save_checkpoint +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmcls.models.backbones import SwinTransformerV2 +from mmcls.models.backbones.swin_transformer import SwinBlock +from .utils import timm_resize_pos_embed + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +class TestSwinTransformerV2(TestCase): + + def setUp(self): + self.cfg = dict( + arch='b', img_size=256, patch_size=4, drop_path_rate=0.1) + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + SwinTransformerV2(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 96, + 'num_heads': [3, 6, 12, 16], + } + SwinTransformerV2(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + depths = [2, 2, 6, 2] + num_heads = [6, 12, 6, 12] + cfg['arch'] = { + 'embed_dims': 256, + 'depths': depths, + 'num_heads': num_heads, + 'extra_norm_every_n_blocks': 2 + } + model = SwinTransformerV2(**cfg) + for i, stage in enumerate(model.stages): + self.assertEqual(stage.out_channels, 256 * (2**i)) + self.assertEqual(len(stage.blocks), depths[i]) + self.assertEqual(stage.blocks[0].attn.w_msa.num_heads, + num_heads[i]) + self.assertIsInstance(model.stages[2].blocks[5], torch.nn.Module) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['use_abs_pos_embed'] = True + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = SwinTransformerV2(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + # The pos_embed is all zero before initialize + self.assertTrue( + torch.allclose(model.absolute_pos_embed, torch.tensor(0.))) + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse( + torch.allclose(model.absolute_pos_embed, torch.tensor(0.))) + + pretrain_pos_embed = model.absolute_pos_embed.clone().detach() + + tmpdir = tempfile.TemporaryDirectory() + # Save checkpoints + checkpoint = os.path.join(tmpdir.name, 'checkpoint.pth') + save_checkpoint(model, checkpoint) + + # test load checkpoint + cfg = deepcopy(self.cfg) + cfg['use_abs_pos_embed'] = True + model = SwinTransformerV2(**cfg) + load_checkpoint(model, checkpoint, strict=False) + + # test load checkpoint with different img_size + cfg = deepcopy(self.cfg) + cfg['img_size'] = 384 + cfg['use_abs_pos_embed'] = True + model = SwinTransformerV2(**cfg) + load_checkpoint(model, checkpoint, strict=False) + resized_pos_embed = timm_resize_pos_embed( + pretrain_pos_embed, model.absolute_pos_embed, num_tokens=0) + self.assertTrue( + torch.allclose(model.absolute_pos_embed, resized_pos_embed)) + + tmpdir.cleanup() + + def test_forward(self): + imgs = torch.randn(1, 3, 256, 256) + + cfg = deepcopy(self.cfg) + model = SwinTransformerV2(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 1024, 8, 8)) + + # test with window_size=12 + cfg = deepcopy(self.cfg) + cfg['window_size'] = 12 + model = SwinTransformerV2(**cfg) + outs = model(torch.randn(1, 3, 384, 384)) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 1024, 12, 12)) + with self.assertRaisesRegex(AssertionError, r'the window size \(12\)'): + model(torch.randn(1, 3, 256, 256)) + + # test with pad_small_map=True + cfg = deepcopy(self.cfg) + cfg['window_size'] = 12 + cfg['pad_small_map'] = True + model = SwinTransformerV2(**cfg) + outs = model(torch.randn(1, 3, 256, 256)) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 1024, 8, 8)) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 1, 2, 3) + model = SwinTransformerV2(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for stride, out in zip([1, 2, 4, 8], outs): + self.assertEqual(out.shape, + (1, 128 * stride, 64 // stride, 64 // stride)) + + # test with checkpoint forward + cfg = deepcopy(self.cfg) + cfg['with_cp'] = True + model = SwinTransformerV2(**cfg) + for m in model.modules(): + if isinstance(m, SwinBlock): + self.assertTrue(m.with_cp) + model.init_weights() + model.train() + + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 1024, 8, 8)) + + # test with dynamic input shape + imgs1 = torch.randn(1, 3, 224, 224) + imgs2 = torch.randn(1, 3, 256, 256) + imgs3 = torch.randn(1, 3, 256, 309) + cfg = deepcopy(self.cfg) + cfg['pad_small_map'] = True + model = SwinTransformerV2(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 32), + math.ceil(imgs.shape[3] / 32)) + self.assertEqual(feat.shape, (1, 1024, *expect_feat_shape)) + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = SwinTransformerV2(**cfg) + depths = model.arch_settings['depths'] + blocks = chain(*[stage.blocks for stage in model.stages]) + for i, block in enumerate(blocks): + expect_prob = 0.2 / (sum(depths) - 1) * i + self.assertAlmostEqual(block.ffn.dropout_layer.drop_prob, + expect_prob) + self.assertAlmostEqual(block.attn.drop.drop_prob, expect_prob) + + # test Swin-Transformer V2 with norm_eval=True + cfg = deepcopy(self.cfg) + cfg['norm_eval'] = True + cfg['norm_cfg'] = dict(type='BN') + cfg['stage_cfgs'] = dict(block_cfgs=dict(norm_cfg=dict(type='BN'))) + model = SwinTransformerV2(**cfg) + model.init_weights() + model.train() + self.assertTrue(check_norm_state(model.modules(), False)) + + # test Swin-Transformer V2 with first stage frozen. + cfg = deepcopy(self.cfg) + frozen_stages = 0 + cfg['frozen_stages'] = frozen_stages + cfg['out_indices'] = (0, 1, 2, 3) + model = SwinTransformerV2(**cfg) + model.init_weights() + model.train() + + # the patch_embed and first stage should not require grad. + self.assertFalse(model.patch_embed.training) + for param in model.patch_embed.parameters(): + self.assertFalse(param.requires_grad) + for i in range(frozen_stages + 1): + stage = model.stages[i] + for param in stage.parameters(): + self.assertFalse(param.requires_grad) + for param in model.norm0.parameters(): + self.assertFalse(param.requires_grad) + + # the second stage should require grad. + for i in range(frozen_stages + 1, 4): + stage = model.stages[i] + for param in stage.parameters(): + self.assertTrue(param.requires_grad) + norm = getattr(model, f'norm{i}') + for param in norm.parameters(): + self.assertTrue(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_t2t_vit.py b/tests/test_models/test_backbones/test_t2t_vit.py new file mode 100644 index 0000000..f3103c6 --- /dev/null +++ b/tests/test_models/test_backbones/test_t2t_vit.py @@ -0,0 +1,188 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import os +import tempfile +from copy import deepcopy +from unittest import TestCase + +import numpy as np +import torch +from mmcv.runner import load_checkpoint, save_checkpoint + +from mmcls.models.backbones import T2T_ViT +from mmcls.models.backbones.t2t_vit import get_sinusoid_encoding +from .utils import timm_resize_pos_embed + + +class TestT2TViT(TestCase): + + def setUp(self): + self.cfg = dict( + img_size=224, + in_channels=3, + embed_dims=384, + t2t_cfg=dict( + token_dims=64, + use_performer=False, + ), + num_layers=14, + drop_path_rate=0.1) + + def test_structure(self): + # The performer hasn't been implemented + cfg = deepcopy(self.cfg) + cfg['t2t_cfg']['use_performer'] = True + with self.assertRaises(NotImplementedError): + T2T_ViT(**cfg) + + # Test out_indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = {1: 1} + with self.assertRaisesRegex(AssertionError, "get "): + T2T_ViT(**cfg) + cfg['out_indices'] = [0, 15] + with self.assertRaisesRegex(AssertionError, 'Invalid out_indices 15'): + T2T_ViT(**cfg) + + # Test model structure + cfg = deepcopy(self.cfg) + model = T2T_ViT(**cfg) + self.assertEqual(len(model.encoder), 14) + dpr_inc = 0.1 / (14 - 1) + dpr = 0 + for layer in model.encoder: + self.assertEqual(layer.attn.embed_dims, 384) + # The default mlp_ratio is 3 + self.assertEqual(layer.ffn.feedforward_channels, 384 * 3) + self.assertAlmostEqual(layer.attn.out_drop.drop_prob, dpr) + self.assertAlmostEqual(layer.ffn.dropout_layer.drop_prob, dpr) + dpr += dpr_inc + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [dict(type='TruncNormal', layer='Linear', std=.02)] + model = T2T_ViT(**cfg) + ori_weight = model.tokens_to_token.project.weight.clone().detach() + + model.init_weights() + initialized_weight = model.tokens_to_token.project.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + # test load checkpoint + pretrain_pos_embed = model.pos_embed.clone().detach() + tmpdir = tempfile.gettempdir() + checkpoint = os.path.join(tmpdir, 'test.pth') + save_checkpoint(model, checkpoint) + cfg = deepcopy(self.cfg) + model = T2T_ViT(**cfg) + load_checkpoint(model, checkpoint, strict=True) + self.assertTrue(torch.allclose(model.pos_embed, pretrain_pos_embed)) + + # test load checkpoint with different img_size + cfg = deepcopy(self.cfg) + cfg['img_size'] = 384 + model = T2T_ViT(**cfg) + load_checkpoint(model, checkpoint, strict=True) + resized_pos_embed = timm_resize_pos_embed(pretrain_pos_embed, + model.pos_embed) + self.assertTrue(torch.allclose(model.pos_embed, resized_pos_embed)) + + os.remove(checkpoint) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + # test with_cls_token=False + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['output_cls_token'] = True + with self.assertRaisesRegex(AssertionError, 'but got False'): + T2T_ViT(**cfg) + + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['output_cls_token'] = False + model = T2T_ViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 384, 14, 14)) + + # test with output_cls_token + cfg = deepcopy(self.cfg) + model = T2T_ViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token, cls_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 384, 14, 14)) + self.assertEqual(cls_token.shape, (1, 384)) + + # test without output_cls_token + cfg = deepcopy(self.cfg) + cfg['output_cls_token'] = False + model = T2T_ViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 384, 14, 14)) + + # Test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = [-3, -2, -1] + model = T2T_ViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 3) + for out in outs: + patch_token, cls_token = out + self.assertEqual(patch_token.shape, (1, 384, 14, 14)) + self.assertEqual(cls_token.shape, (1, 384)) + + # Test forward with dynamic input size + imgs1 = torch.randn(1, 3, 224, 224) + imgs2 = torch.randn(1, 3, 256, 256) + imgs3 = torch.randn(1, 3, 256, 309) + cfg = deepcopy(self.cfg) + model = T2T_ViT(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token, cls_token = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 16), + math.ceil(imgs.shape[3] / 16)) + self.assertEqual(patch_token.shape, (1, 384, *expect_feat_shape)) + self.assertEqual(cls_token.shape, (1, 384)) + + +def test_get_sinusoid_encoding(): + # original numpy based third-party implementation copied from mmcls + # https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31 + def get_sinusoid_encoding_numpy(n_position, d_hid): + + def get_position_angle_vec(position): + return [ + position / np.power(10000, 2 * (hid_j // 2) / d_hid) + for hid_j in range(d_hid) + ] + + sinusoid_table = np.array( + [get_position_angle_vec(pos_i) for pos_i in range(n_position)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.FloatTensor(sinusoid_table).unsqueeze(0) + + n_positions = [128, 256, 512, 1024] + embed_dims = [128, 256, 512, 1024] + for n_position in n_positions: + for embed_dim in embed_dims: + out_mmcls = get_sinusoid_encoding(n_position, embed_dim) + out_numpy = get_sinusoid_encoding_numpy(n_position, embed_dim) + error = (out_mmcls - out_numpy).abs().max() + assert error < 1e-9, 'Test case n_position=%d, embed_dim=%d failed' + return diff --git a/tests/test_models/test_backbones/test_timm_backbone.py b/tests/test_models/test_backbones/test_timm_backbone.py new file mode 100644 index 0000000..4628309 --- /dev/null +++ b/tests/test_models/test_backbones/test_timm_backbone.py @@ -0,0 +1,204 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch import nn +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import TIMMBackbone + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_timm_backbone(): + """Test timm backbones, features_only=False (default).""" + with pytest.raises(TypeError): + # TIMMBackbone has 1 required positional argument: 'model_name' + model = TIMMBackbone(pretrained=True) + + with pytest.raises(TypeError): + # pretrained must be bool + model = TIMMBackbone(model_name='resnet18', pretrained='model.pth') + + # Test resnet18 from timm + model = TIMMBackbone(model_name='resnet18') + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + assert isinstance(model.timm_model.global_pool.pool, nn.Identity) + assert isinstance(model.timm_model.fc, nn.Identity) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 512, 7, 7)) + + # Test efficientnet_b1 with pretrained weights + model = TIMMBackbone(model_name='efficientnet_b1', pretrained=True) + model.init_weights() + model.train() + assert isinstance(model.timm_model.global_pool.pool, nn.Identity) + assert isinstance(model.timm_model.classifier, nn.Identity) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 1280, 7, 7)) + + # Test vit_tiny_patch16_224 with pretrained weights + model = TIMMBackbone(model_name='vit_tiny_patch16_224', pretrained=True) + model.init_weights() + model.train() + assert isinstance(model.timm_model.head, nn.Identity) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + # Disable the test since TIMM's behavior changes between 0.5.4 and 0.5.5 + # assert feat[0].shape == torch.Size((1, 197, 192)) + + +def test_timm_backbone_features_only(): + """Test timm backbones, features_only=True.""" + # Test different norm_layer, can be: 'SyncBN', 'BN2d', 'GN', 'LN', 'IN' + # Test resnet18 from timm, norm_layer='BN2d' + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=32, + norm_layer='BN2d') + + # Test resnet18 from timm, norm_layer='SyncBN' + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=32, + norm_layer='SyncBN') + + # Test resnet18 from timm, output_stride=32 + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=32) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(1, 3, 224, 224) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 64, 112, 112)) + assert feats[1].shape == torch.Size((1, 64, 56, 56)) + assert feats[2].shape == torch.Size((1, 128, 28, 28)) + assert feats[3].shape == torch.Size((1, 256, 14, 14)) + assert feats[4].shape == torch.Size((1, 512, 7, 7)) + + # Test resnet18 from timm, output_stride=32, out_indices=(1, 2, 3) + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=32, + out_indices=(1, 2, 3)) + imgs = torch.randn(1, 3, 224, 224) + feats = model(imgs) + assert len(feats) == 3 + assert feats[0].shape == torch.Size((1, 64, 56, 56)) + assert feats[1].shape == torch.Size((1, 128, 28, 28)) + assert feats[2].shape == torch.Size((1, 256, 14, 14)) + + # Test resnet18 from timm, output_stride=16 + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=16) + imgs = torch.randn(1, 3, 224, 224) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 64, 112, 112)) + assert feats[1].shape == torch.Size((1, 64, 56, 56)) + assert feats[2].shape == torch.Size((1, 128, 28, 28)) + assert feats[3].shape == torch.Size((1, 256, 14, 14)) + assert feats[4].shape == torch.Size((1, 512, 14, 14)) + + # Test resnet18 from timm, output_stride=8 + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=8) + imgs = torch.randn(1, 3, 224, 224) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 64, 112, 112)) + assert feats[1].shape == torch.Size((1, 64, 56, 56)) + assert feats[2].shape == torch.Size((1, 128, 28, 28)) + assert feats[3].shape == torch.Size((1, 256, 28, 28)) + assert feats[4].shape == torch.Size((1, 512, 28, 28)) + + # Test efficientnet_b1 with pretrained weights + model = TIMMBackbone( + model_name='efficientnet_b1', features_only=True, pretrained=True) + imgs = torch.randn(1, 3, 64, 64) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 16, 32, 32)) + assert feats[1].shape == torch.Size((1, 24, 16, 16)) + assert feats[2].shape == torch.Size((1, 40, 8, 8)) + assert feats[3].shape == torch.Size((1, 112, 4, 4)) + assert feats[4].shape == torch.Size((1, 320, 2, 2)) + + # Test resnetv2_50x1_bitm from timm, output_stride=8 + model = TIMMBackbone( + model_name='resnetv2_50x1_bitm', + features_only=True, + pretrained=False, + output_stride=8) + imgs = torch.randn(1, 3, 8, 8) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 64, 4, 4)) + assert feats[1].shape == torch.Size((1, 256, 2, 2)) + assert feats[2].shape == torch.Size((1, 512, 1, 1)) + assert feats[3].shape == torch.Size((1, 1024, 1, 1)) + assert feats[4].shape == torch.Size((1, 2048, 1, 1)) + + # Test resnetv2_50x3_bitm from timm, output_stride=8 + model = TIMMBackbone( + model_name='resnetv2_50x3_bitm', + features_only=True, + pretrained=False, + output_stride=8) + imgs = torch.randn(1, 3, 8, 8) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 192, 4, 4)) + assert feats[1].shape == torch.Size((1, 768, 2, 2)) + assert feats[2].shape == torch.Size((1, 1536, 1, 1)) + assert feats[3].shape == torch.Size((1, 3072, 1, 1)) + assert feats[4].shape == torch.Size((1, 6144, 1, 1)) + + # Test resnetv2_101x1_bitm from timm, output_stride=8 + model = TIMMBackbone( + model_name='resnetv2_101x1_bitm', + features_only=True, + pretrained=False, + output_stride=8) + imgs = torch.randn(1, 3, 8, 8) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 64, 4, 4)) + assert feats[1].shape == torch.Size((1, 256, 2, 2)) + assert feats[2].shape == torch.Size((1, 512, 1, 1)) + assert feats[3].shape == torch.Size((1, 1024, 1, 1)) + assert feats[4].shape == torch.Size((1, 2048, 1, 1)) diff --git a/tests/test_models/test_backbones/test_tnt.py b/tests/test_models/test_backbones/test_tnt.py new file mode 100644 index 0000000..2feffd6 --- /dev/null +++ b/tests/test_models/test_backbones/test_tnt.py @@ -0,0 +1,50 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import TNT + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_tnt_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = TNT() + model.init_weights(pretrained=0) + + # Test tnt_base_patch16_224 + model = TNT() + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 640)) + + # Test tnt with embed_dims=768 + arch = { + 'embed_dims_outer': 768, + 'embed_dims_inner': 48, + 'num_layers': 12, + 'num_heads_outer': 6, + 'num_heads_inner': 4 + } + model = TNT(arch=arch) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 768)) diff --git a/tests/test_models/test_backbones/test_twins.py b/tests/test_models/test_backbones/test_twins.py new file mode 100644 index 0000000..b692584 --- /dev/null +++ b/tests/test_models/test_backbones/test_twins.py @@ -0,0 +1,243 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import pytest +import torch +import torch.nn as nn + +from mmcls.models.backbones.twins import (PCPVT, SVT, + GlobalSubsampledAttention, + LocallyGroupedSelfAttention) + + +def test_LSA_module(): + lsa = LocallyGroupedSelfAttention(embed_dims=32, window_size=3) + outs = lsa(torch.randn(1, 3136, 32), (56, 56)) + assert outs.shape == torch.Size([1, 3136, 32]) + + +def test_GSA_module(): + gsa = GlobalSubsampledAttention(embed_dims=32, num_heads=8) + outs = gsa(torch.randn(1, 3136, 32), (56, 56)) + assert outs.shape == torch.Size([1, 3136, 32]) + + +def test_pcpvt(): + # test init + path = 'PATH_THAT_DO_NOT_EXIST' + + # init_cfg loads pretrain from an non-existent file + model = PCPVT('s', init_cfg=dict(type='Pretrained', checkpoint=path)) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # init_cfg=123, whose type is unsupported + model = PCPVT('s', init_cfg=123) + with pytest.raises(TypeError): + model.init_weights() + + H, W = (64, 64) + temp = torch.randn((1, 3, H, W)) + + # test output last feat + model = PCPVT('small') + model.init_weights() + outs = model(temp) + assert len(outs) == 1 + assert outs[-1].shape == (1, 512, H // 32, W // 32) + + # test with mutil outputs + model = PCPVT('small', out_indices=(0, 1, 2, 3)) + model.init_weights() + outs = model(temp) + assert len(outs) == 4 + assert outs[0].shape == (1, 64, H // 4, W // 4) + assert outs[1].shape == (1, 128, H // 8, W // 8) + assert outs[2].shape == (1, 320, H // 16, W // 16) + assert outs[3].shape == (1, 512, H // 32, W // 32) + + # test with arch of dict + arch = { + 'embed_dims': [64, 128, 320, 512], + 'depths': [3, 4, 18, 3], + 'num_heads': [1, 2, 5, 8], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [8, 8, 4, 4], + 'sr_ratios': [8, 4, 2, 1] + } + + pcpvt_arch = copy.deepcopy(arch) + model = PCPVT(pcpvt_arch, out_indices=(0, 1, 2, 3)) + model.init_weights() + outs = model(temp) + assert len(outs) == 4 + assert outs[0].shape == (1, 64, H // 4, W // 4) + assert outs[1].shape == (1, 128, H // 8, W // 8) + assert outs[2].shape == (1, 320, H // 16, W // 16) + assert outs[3].shape == (1, 512, H // 32, W // 32) + + # assert length of arch value not equal + pcpvt_arch = copy.deepcopy(arch) + pcpvt_arch['sr_ratios'] = [8, 4, 2] + with pytest.raises(AssertionError): + model = PCPVT(pcpvt_arch, out_indices=(0, 1, 2, 3)) + + # assert lack arch essential_keys + pcpvt_arch = copy.deepcopy(arch) + del pcpvt_arch['sr_ratios'] + with pytest.raises(AssertionError): + model = PCPVT(pcpvt_arch, out_indices=(0, 1, 2, 3)) + + # assert arch value not list + pcpvt_arch = copy.deepcopy(arch) + pcpvt_arch['sr_ratios'] = 1 + with pytest.raises(AssertionError): + model = PCPVT(pcpvt_arch, out_indices=(0, 1, 2, 3)) + + pcpvt_arch = copy.deepcopy(arch) + pcpvt_arch['sr_ratios'] = '1, 2, 3, 4' + with pytest.raises(AssertionError): + model = PCPVT(pcpvt_arch, out_indices=(0, 1, 2, 3)) + + # test norm_after_stage is bool True + model = PCPVT('small', norm_after_stage=True, norm_cfg=dict(type='LN')) + for i in range(model.num_stage): + assert hasattr(model, f'norm_after_stage{i}') + assert isinstance(getattr(model, f'norm_after_stage{i}'), nn.LayerNorm) + + # test norm_after_stage is bool Flase + model = PCPVT('small', norm_after_stage=False) + for i in range(model.num_stage): + assert hasattr(model, f'norm_after_stage{i}') + assert isinstance(getattr(model, f'norm_after_stage{i}'), nn.Identity) + + # test norm_after_stage is bool list + norm_after_stage = [False, True, False, True] + model = PCPVT('small', norm_after_stage=norm_after_stage) + assert len(norm_after_stage) == model.num_stage + for i in range(model.num_stage): + assert hasattr(model, f'norm_after_stage{i}') + norm_layer = getattr(model, f'norm_after_stage{i}') + if norm_after_stage[i]: + assert isinstance(norm_layer, nn.LayerNorm) + else: + assert isinstance(norm_layer, nn.Identity) + + # test norm_after_stage is not bool list + norm_after_stage = [False, 'True', False, True] + with pytest.raises(AssertionError): + model = PCPVT('small', norm_after_stage=norm_after_stage) + + +def test_svt(): + # test init + path = 'PATH_THAT_DO_NOT_EXIST' + + # init_cfg loads pretrain from an non-existent file + model = SVT('s', init_cfg=dict(type='Pretrained', checkpoint=path)) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # init_cfg=123, whose type is unsupported + model = SVT('s', init_cfg=123) + with pytest.raises(TypeError): + model.init_weights() + + # Test feature map output + H, W = (64, 64) + temp = torch.randn((1, 3, H, W)) + + model = SVT('s') + model.init_weights() + outs = model(temp) + assert len(outs) == 1 + assert outs[-1].shape == (1, 512, H // 32, W // 32) + + # test with mutil outputs + model = SVT('small', out_indices=(0, 1, 2, 3)) + model.init_weights() + outs = model(temp) + assert len(outs) == 4 + assert outs[0].shape == (1, 64, H // 4, W // 4) + assert outs[1].shape == (1, 128, H // 8, W // 8) + assert outs[2].shape == (1, 256, H // 16, W // 16) + assert outs[3].shape == (1, 512, H // 32, W // 32) + + # test with arch of dict + arch = { + 'embed_dims': [96, 192, 384, 768], + 'depths': [2, 2, 18, 2], + 'num_heads': [3, 6, 12, 24], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [4, 4, 4, 4], + 'sr_ratios': [8, 4, 2, 1], + 'window_sizes': [7, 7, 7, 7] + } + model = SVT(arch, out_indices=(0, 1, 2, 3)) + model.init_weights() + outs = model(temp) + assert len(outs) == 4 + assert outs[0].shape == (1, 96, H // 4, W // 4) + assert outs[1].shape == (1, 192, H // 8, W // 8) + assert outs[2].shape == (1, 384, H // 16, W // 16) + assert outs[3].shape == (1, 768, H // 32, W // 32) + + # assert length of arch value not equal + svt_arch = copy.deepcopy(arch) + svt_arch['sr_ratios'] = [8, 4, 2] + with pytest.raises(AssertionError): + model = SVT(svt_arch, out_indices=(0, 1, 2, 3)) + + # assert lack arch essential_keys + svt_arch = copy.deepcopy(arch) + del svt_arch['window_sizes'] + with pytest.raises(AssertionError): + model = SVT(svt_arch, out_indices=(0, 1, 2, 3)) + + # assert arch value not list + svt_arch = copy.deepcopy(arch) + svt_arch['sr_ratios'] = 1 + with pytest.raises(AssertionError): + model = SVT(svt_arch, out_indices=(0, 1, 2, 3)) + + svt_arch = copy.deepcopy(arch) + svt_arch['sr_ratios'] = '1, 2, 3, 4' + with pytest.raises(AssertionError): + model = SVT(svt_arch, out_indices=(0, 1, 2, 3)) + + # test norm_after_stage is bool True + model = SVT('small', norm_after_stage=True, norm_cfg=dict(type='LN')) + for i in range(model.num_stage): + assert hasattr(model, f'norm_after_stage{i}') + assert isinstance(getattr(model, f'norm_after_stage{i}'), nn.LayerNorm) + + # test norm_after_stage is bool Flase + model = SVT('small', norm_after_stage=False) + for i in range(model.num_stage): + assert hasattr(model, f'norm_after_stage{i}') + assert isinstance(getattr(model, f'norm_after_stage{i}'), nn.Identity) + + # test norm_after_stage is bool list + norm_after_stage = [False, True, False, True] + model = SVT('small', norm_after_stage=norm_after_stage) + assert len(norm_after_stage) == model.num_stage + for i in range(model.num_stage): + assert hasattr(model, f'norm_after_stage{i}') + norm_layer = getattr(model, f'norm_after_stage{i}') + if norm_after_stage[i]: + assert isinstance(norm_layer, nn.LayerNorm) + else: + assert isinstance(norm_layer, nn.Identity) + + # test norm_after_stage is not bool list + norm_after_stage = [False, 'True', False, True] + with pytest.raises(AssertionError): + model = SVT('small', norm_after_stage=norm_after_stage) diff --git a/tests/test_models/test_backbones/test_van.py b/tests/test_models/test_backbones/test_van.py new file mode 100644 index 0000000..136ce97 --- /dev/null +++ b/tests/test_models/test_backbones/test_van.py @@ -0,0 +1,188 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from copy import deepcopy +from itertools import chain +from unittest import TestCase + +import torch +from mmcv.utils.parrots_wrapper import _BatchNorm +from torch import nn + +from mmcls.models.backbones import VAN + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +class TestVAN(TestCase): + + def setUp(self): + self.cfg = dict(arch='t', drop_path_rate=0.1) + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + VAN(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': [32, 64, 160, 256], + 'ffn_ratios': [8, 8, 4, 4], + } + VAN(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + embed_dims = [32, 64, 160, 256] + depths = [3, 3, 5, 2] + ffn_ratios = [8, 8, 4, 4] + cfg['arch'] = { + 'embed_dims': embed_dims, + 'depths': depths, + 'ffn_ratios': ffn_ratios + } + model = VAN(**cfg) + + for i in range(len(depths)): + stage = getattr(model, f'blocks{i + 1}') + self.assertEqual(stage[-1].out_channels, embed_dims[i]) + self.assertEqual(len(stage), depths[i]) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = VAN(**cfg) + ori_weight = model.patch_embed1.projection.weight.clone().detach() + + model.init_weights() + initialized_weight = model.patch_embed1.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + def test_forward(self): + imgs = torch.randn(3, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = VAN(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (3, 256, 7, 7)) + + # test with patch_sizes + cfg = deepcopy(self.cfg) + cfg['patch_sizes'] = [7, 5, 5, 5] + model = VAN(**cfg) + outs = model(torch.randn(3, 3, 224, 224)) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (3, 256, 3, 3)) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 1, 2, 3) + model = VAN(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for emb_size, stride, out in zip([32, 64, 160, 256], [1, 2, 4, 8], + outs): + self.assertEqual(out.shape, + (3, emb_size, 56 // stride, 56 // stride)) + + # test with dynamic input shape + imgs1 = torch.randn(3, 3, 224, 224) + imgs2 = torch.randn(3, 3, 256, 256) + imgs3 = torch.randn(3, 3, 256, 309) + cfg = deepcopy(self.cfg) + model = VAN(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 32), + math.ceil(imgs.shape[3] / 32)) + self.assertEqual(feat.shape, (3, 256, *expect_feat_shape)) + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = VAN(**cfg) + depths = model.arch_settings['depths'] + stages = [model.blocks1, model.blocks2, model.blocks3, model.blocks4] + blocks = chain(*[stage for stage in stages]) + total_depth = sum(depths) + dpr = [ + x.item() + for x in torch.linspace(0, cfg['drop_path_rate'], total_depth) + ] + for i, (block, expect_prob) in enumerate(zip(blocks, dpr)): + if expect_prob == 0: + assert isinstance(block.drop_path, nn.Identity) + else: + self.assertAlmostEqual(block.drop_path.drop_prob, expect_prob) + + # test VAN with norm_eval=True + cfg = deepcopy(self.cfg) + cfg['norm_eval'] = True + cfg['norm_cfg'] = dict(type='BN') + model = VAN(**cfg) + model.init_weights() + model.train() + self.assertTrue(check_norm_state(model.modules(), False)) + + # test VAN with first stage frozen. + cfg = deepcopy(self.cfg) + frozen_stages = 0 + cfg['frozen_stages'] = frozen_stages + cfg['out_indices'] = (0, 1, 2, 3) + model = VAN(**cfg) + model.init_weights() + model.train() + + # the patch_embed and first stage should not require grad. + self.assertFalse(model.patch_embed1.training) + for param in model.patch_embed1.parameters(): + self.assertFalse(param.requires_grad) + for i in range(frozen_stages + 1): + patch = getattr(model, f'patch_embed{i+1}') + for param in patch.parameters(): + self.assertFalse(param.requires_grad) + blocks = getattr(model, f'blocks{i + 1}') + for param in blocks.parameters(): + self.assertFalse(param.requires_grad) + norm = getattr(model, f'norm{i + 1}') + for param in norm.parameters(): + self.assertFalse(param.requires_grad) + + # the second stage should require grad. + for i in range(frozen_stages + 1, 4): + patch = getattr(model, f'patch_embed{i + 1}') + for param in patch.parameters(): + self.assertTrue(param.requires_grad) + blocks = getattr(model, f'blocks{i+1}') + for param in blocks.parameters(): + self.assertTrue(param.requires_grad) + norm = getattr(model, f'norm{i + 1}') + for param in norm.parameters(): + self.assertTrue(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_vgg.py b/tests/test_models/test_backbones/test_vgg.py new file mode 100644 index 0000000..4e81779 --- /dev/null +++ b/tests/test_models/test_backbones/test_vgg.py @@ -0,0 +1,139 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmcls.models.backbones import VGG + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_vgg(): + """Test VGG backbone.""" + with pytest.raises(KeyError): + # VGG depth should be in [11, 13, 16, 19] + VGG(18) + + with pytest.raises(AssertionError): + # In VGG: 1 <= num_stages <= 5 + VGG(11, num_stages=0) + + with pytest.raises(AssertionError): + # In VGG: 1 <= num_stages <= 5 + VGG(11, num_stages=6) + + with pytest.raises(AssertionError): + # len(dilations) == num_stages + VGG(11, dilations=(1, 1), num_stages=3) + + with pytest.raises(TypeError): + # pretrained must be a string path + model = VGG(11) + model.init_weights(pretrained=0) + + # Test VGG11 norm_eval=True + model = VGG(11, norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test VGG11 forward without classifiers + model = VGG(11, out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + assert feat[3].shape == (1, 512, 14, 14) + assert feat[4].shape == (1, 512, 7, 7) + + # Test VGG11 forward with classifiers + model = VGG(11, num_classes=10, out_indices=(0, 1, 2, 3, 4, 5)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 6 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + assert feat[3].shape == (1, 512, 14, 14) + assert feat[4].shape == (1, 512, 7, 7) + assert feat[5].shape == (1, 10) + + # Test VGG11BN forward + model = VGG(11, norm_cfg=dict(type='BN'), out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + assert feat[3].shape == (1, 512, 14, 14) + assert feat[4].shape == (1, 512, 7, 7) + + # Test VGG11BN forward with classifiers + model = VGG( + 11, + num_classes=10, + norm_cfg=dict(type='BN'), + out_indices=(0, 1, 2, 3, 4, 5)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 6 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + assert feat[3].shape == (1, 512, 14, 14) + assert feat[4].shape == (1, 512, 7, 7) + assert feat[5].shape == (1, 10) + + # Test VGG13 with layers 1, 2, 3 out forward + model = VGG(13, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + + # Test VGG16 with top feature maps out forward + model = VGG(16) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == (1, 512, 7, 7) + + # Test VGG19 with classification score out forward + model = VGG(19, num_classes=10) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == (1, 10) diff --git a/tests/test_models/test_backbones/test_vision_transformer.py b/tests/test_models/test_backbones/test_vision_transformer.py new file mode 100644 index 0000000..26cc737 --- /dev/null +++ b/tests/test_models/test_backbones/test_vision_transformer.py @@ -0,0 +1,183 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import os +import tempfile +from copy import deepcopy +from unittest import TestCase + +import torch +from mmcv.runner import load_checkpoint, save_checkpoint + +from mmcls.models.backbones import VisionTransformer +from .utils import timm_resize_pos_embed + + +class TestVisionTransformer(TestCase): + + def setUp(self): + self.cfg = dict( + arch='b', img_size=224, patch_size=16, drop_path_rate=0.1) + + def test_structure(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + VisionTransformer(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + } + VisionTransformer(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 128, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 1024 + } + model = VisionTransformer(**cfg) + self.assertEqual(model.embed_dims, 128) + self.assertEqual(model.num_layers, 24) + for layer in model.layers: + self.assertEqual(layer.attn.num_heads, 16) + self.assertEqual(layer.ffn.feedforward_channels, 1024) + + # Test out_indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = {1: 1} + with self.assertRaisesRegex(AssertionError, "get "): + VisionTransformer(**cfg) + cfg['out_indices'] = [0, 13] + with self.assertRaisesRegex(AssertionError, 'Invalid out_indices 13'): + VisionTransformer(**cfg) + + # Test model structure + cfg = deepcopy(self.cfg) + model = VisionTransformer(**cfg) + self.assertEqual(len(model.layers), 12) + dpr_inc = 0.1 / (12 - 1) + dpr = 0 + for layer in model.layers: + self.assertEqual(layer.attn.embed_dims, 768) + self.assertEqual(layer.attn.num_heads, 12) + self.assertEqual(layer.ffn.feedforward_channels, 3072) + self.assertAlmostEqual(layer.attn.out_drop.drop_prob, dpr) + self.assertAlmostEqual(layer.ffn.dropout_layer.drop_prob, dpr) + dpr += dpr_inc + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = VisionTransformer(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + # The pos_embed is all zero before initialize + self.assertTrue(torch.allclose(model.pos_embed, torch.tensor(0.))) + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse(torch.allclose(model.pos_embed, torch.tensor(0.))) + + # test load checkpoint + pretrain_pos_embed = model.pos_embed.clone().detach() + tmpdir = tempfile.gettempdir() + checkpoint = os.path.join(tmpdir, 'test.pth') + save_checkpoint(model, checkpoint) + cfg = deepcopy(self.cfg) + model = VisionTransformer(**cfg) + load_checkpoint(model, checkpoint, strict=True) + self.assertTrue(torch.allclose(model.pos_embed, pretrain_pos_embed)) + + # test load checkpoint with different img_size + cfg = deepcopy(self.cfg) + cfg['img_size'] = 384 + model = VisionTransformer(**cfg) + load_checkpoint(model, checkpoint, strict=True) + resized_pos_embed = timm_resize_pos_embed(pretrain_pos_embed, + model.pos_embed) + self.assertTrue(torch.allclose(model.pos_embed, resized_pos_embed)) + + os.remove(checkpoint) + + def test_forward(self): + imgs = torch.randn(3, 3, 224, 224) + + # test with_cls_token=False + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['output_cls_token'] = True + with self.assertRaisesRegex(AssertionError, 'but got False'): + VisionTransformer(**cfg) + + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['output_cls_token'] = False + model = VisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (3, 768, 14, 14)) + + # test with output_cls_token + cfg = deepcopy(self.cfg) + model = VisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token, cls_token = outs[-1] + self.assertEqual(patch_token.shape, (3, 768, 14, 14)) + self.assertEqual(cls_token.shape, (3, 768)) + + # test without output_cls_token + cfg = deepcopy(self.cfg) + cfg['output_cls_token'] = False + model = VisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (3, 768, 14, 14)) + + # Test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = [-3, -2, -1] + model = VisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 3) + for out in outs: + patch_token, cls_token = out + self.assertEqual(patch_token.shape, (3, 768, 14, 14)) + self.assertEqual(cls_token.shape, (3, 768)) + + # Test forward with dynamic input size + imgs1 = torch.randn(3, 3, 224, 224) + imgs2 = torch.randn(3, 3, 256, 256) + imgs3 = torch.randn(3, 3, 256, 309) + cfg = deepcopy(self.cfg) + model = VisionTransformer(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token, cls_token = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 16), + math.ceil(imgs.shape[3] / 16)) + self.assertEqual(patch_token.shape, (3, 768, *expect_feat_shape)) + self.assertEqual(cls_token.shape, (3, 768)) diff --git a/tests/test_models/test_backbones/utils.py b/tests/test_models/test_backbones/utils.py new file mode 100644 index 0000000..aba9caf --- /dev/null +++ b/tests/test_models/test_backbones/utils.py @@ -0,0 +1,31 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn.functional as F + + +def timm_resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): + """Timm version pos embed resize function. + + copied from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + """ # noqa:E501 + ntok_new = posemb_new.shape[1] + if num_tokens: + posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, + num_tokens:] + ntok_new -= num_tokens + else: + posemb_tok, posemb_grid = posemb[:, :0], posemb[0] + gs_old = int(math.sqrt(len(posemb_grid))) + if not len(gs_new): # backwards compatibility + gs_new = [int(math.sqrt(ntok_new))] * 2 + assert len(gs_new) >= 2 + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, + -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate( + posemb_grid, size=gs_new, mode='bicubic', align_corners=False) + posemb_grid = posemb_grid.permute(0, 2, 3, + 1).reshape(1, gs_new[0] * gs_new[1], -1) + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + return posemb diff --git a/tests/test_models/test_classifiers.py b/tests/test_models/test_classifiers.py new file mode 100644 index 0000000..d021b2f --- /dev/null +++ b/tests/test_models/test_classifiers.py @@ -0,0 +1,326 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import tempfile +from copy import deepcopy + +import numpy as np +import torch +from mmcv import ConfigDict + +from mmcls.models import CLASSIFIERS +from mmcls.models.classifiers import ImageClassifier + + +def test_image_classifier(): + model_cfg = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss'))) + + imgs = torch.randn(16, 3, 32, 32) + label = torch.randint(0, 10, (16, )) + + model_cfg_ = deepcopy(model_cfg) + model = CLASSIFIERS.build(model_cfg_) + + # test property + assert model.with_neck + assert model.with_head + + # test train_step + outputs = model.train_step({'img': imgs, 'gt_label': label}, None) + assert outputs['loss'].item() > 0 + assert outputs['num_samples'] == 16 + + # test train_step without optimizer + outputs = model.train_step({'img': imgs, 'gt_label': label}) + assert outputs['loss'].item() > 0 + assert outputs['num_samples'] == 16 + + # test val_step + outputs = model.val_step({'img': imgs, 'gt_label': label}, None) + assert outputs['loss'].item() > 0 + assert outputs['num_samples'] == 16 + + # test val_step without optimizer + outputs = model.val_step({'img': imgs, 'gt_label': label}) + assert outputs['loss'].item() > 0 + assert outputs['num_samples'] == 16 + + # test forward + losses = model(imgs, return_loss=True, gt_label=label) + assert losses['loss'].item() > 0 + + # test forward_test + model_cfg_ = deepcopy(model_cfg) + model = CLASSIFIERS.build(model_cfg_) + pred = model(imgs, return_loss=False, img_metas=None) + assert isinstance(pred, list) and len(pred) == 16 + + single_img = torch.randn(1, 3, 32, 32) + pred = model(single_img, return_loss=False, img_metas=None) + assert isinstance(pred, list) and len(pred) == 1 + + pred = model.simple_test(imgs, softmax=False) + assert isinstance(pred, list) and len(pred) == 16 + assert len(pred[0] == 10) + + pred = model.simple_test(imgs, softmax=False, post_process=False) + assert isinstance(pred, torch.Tensor) + assert pred.shape == (16, 10) + + soft_pred = model.simple_test(imgs, softmax=True, post_process=False) + assert isinstance(soft_pred, torch.Tensor) + assert soft_pred.shape == (16, 10) + torch.testing.assert_allclose(soft_pred, torch.softmax(pred, dim=1)) + + # test pretrained + model_cfg_ = deepcopy(model_cfg) + model_cfg_['pretrained'] = 'checkpoint' + model = CLASSIFIERS.build(model_cfg_) + assert model.init_cfg == dict(type='Pretrained', checkpoint='checkpoint') + + # test show_result + img = np.random.randint(0, 256, (224, 224, 3)).astype(np.uint8) + result = dict(pred_class='cat', pred_label=0, pred_score=0.9) + + with tempfile.TemporaryDirectory() as tmpdir: + out_file = osp.join(tmpdir, 'out.png') + model.show_result(img, result, out_file=out_file) + assert osp.exists(out_file) + + with tempfile.TemporaryDirectory() as tmpdir: + out_file = osp.join(tmpdir, 'out.png') + model.show_result(img, result, out_file=out_file) + assert osp.exists(out_file) + + +def test_image_classifier_with_mixup(): + # Test mixup in ImageClassifier + model_cfg = dict( + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, + use_soft=True)), + train_cfg=dict( + augments=dict( + type='BatchMixup', alpha=1., num_classes=10, prob=1.))) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + imgs = torch.randn(16, 3, 32, 32) + label = torch.randint(0, 10, (16, )) + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + +def test_image_classifier_with_cutmix(): + + # Test cutmix in ImageClassifier + model_cfg = dict( + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, + use_soft=True)), + train_cfg=dict( + augments=dict( + type='BatchCutMix', alpha=1., num_classes=10, prob=1.))) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + imgs = torch.randn(16, 3, 32, 32) + label = torch.randint(0, 10, (16, )) + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + +def test_image_classifier_with_augments(): + + imgs = torch.randn(16, 3, 32, 32) + label = torch.randint(0, 10, (16, )) + + # Test cutmix and mixup in ImageClassifier + model_cfg = dict( + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, + use_soft=True)), + train_cfg=dict(augments=[ + dict(type='BatchCutMix', alpha=1., num_classes=10, prob=0.5), + dict(type='BatchMixup', alpha=1., num_classes=10, prob=0.3), + dict(type='Identity', num_classes=10, prob=0.2) + ])) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + # Test cutmix with cutmix_minmax in ImageClassifier + model_cfg['train_cfg'] = dict( + augments=dict( + type='BatchCutMix', + alpha=1., + num_classes=10, + prob=1., + cutmix_minmax=[0.2, 0.8])) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + # Test not using train_cfg + model_cfg = dict( + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0))) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + imgs = torch.randn(16, 3, 32, 32) + label = torch.randint(0, 10, (16, )) + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + # Test not using cutmix and mixup in ImageClassifier + model_cfg['train_cfg'] = dict(augments=None) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + +def test_classifier_extract_feat(): + model_cfg = ConfigDict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=18, + num_stages=4, + out_indices=(0, 1, 2, 3), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss'), + topk=(1, 5), + )) + + model = CLASSIFIERS.build(model_cfg) + + # test backbone output + outs = model.extract_feat(torch.rand(1, 3, 224, 224), stage='backbone') + assert outs[0].shape == (1, 64, 56, 56) + assert outs[1].shape == (1, 128, 28, 28) + assert outs[2].shape == (1, 256, 14, 14) + assert outs[3].shape == (1, 512, 7, 7) + + # test neck output + outs = model.extract_feat(torch.rand(1, 3, 224, 224), stage='neck') + assert outs[0].shape == (1, 64) + assert outs[1].shape == (1, 128) + assert outs[2].shape == (1, 256) + assert outs[3].shape == (1, 512) + + # test pre_logits output + out = model.extract_feat(torch.rand(1, 3, 224, 224), stage='pre_logits') + assert out.shape == (1, 512) + + # test transformer style feature extraction + model_cfg = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', arch='b', out_indices=[-3, -2, -1]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + hidden_dim=1024, + loss=dict(type='CrossEntropyLoss'), + )) + model = CLASSIFIERS.build(model_cfg) + + # test backbone output + outs = model.extract_feat(torch.rand(1, 3, 224, 224), stage='backbone') + for out in outs: + patch_token, cls_token = out + assert patch_token.shape == (1, 768, 14, 14) + assert cls_token.shape == (1, 768) + + # test neck output (the same with backbone) + outs = model.extract_feat(torch.rand(1, 3, 224, 224), stage='neck') + for out in outs: + patch_token, cls_token = out + assert patch_token.shape == (1, 768, 14, 14) + assert cls_token.shape == (1, 768) + + # test pre_logits output + out = model.extract_feat(torch.rand(1, 3, 224, 224), stage='pre_logits') + assert out.shape == (1, 1024) + + # test extract_feats + multi_imgs = [torch.rand(1, 3, 224, 224) for _ in range(3)] + outs = model.extract_feats(multi_imgs) + for outs_per_img in outs: + for out in outs_per_img: + patch_token, cls_token = out + assert patch_token.shape == (1, 768, 14, 14) + assert cls_token.shape == (1, 768) + + outs = model.extract_feats(multi_imgs, stage='pre_logits') + for out_per_img in outs: + assert out_per_img.shape == (1, 1024) + + out = model.forward_dummy(torch.rand(1, 3, 224, 224)) + assert out.shape == (1, 1024) diff --git a/tests/test_models/test_heads.py b/tests/test_models/test_heads.py new file mode 100644 index 0000000..e0ecdb6 --- /dev/null +++ b/tests/test_models/test_heads.py @@ -0,0 +1,400 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest.mock import patch + +import pytest +import torch + +from mmcls.models.heads import (ClsHead, ConformerHead, CSRAClsHead, + DeiTClsHead, EfficientFormerClsHead, + LinearClsHead, MultiLabelClsHead, + MultiLabelLinearClsHead, StackedLinearClsHead, + VisionTransformerClsHead) + + +@pytest.mark.parametrize('feat', [torch.rand(4, 10), (torch.rand(4, 10), )]) +def test_cls_head(feat): + fake_gt_label = torch.randint(0, 10, (4, )) + + # test forward_train with cal_acc=True + head = ClsHead(cal_acc=True) + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + assert 'accuracy' in losses + + # test forward_train with cal_acc=False + head = ClsHead() + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + # test forward_train with weight + weight = torch.tensor([0.5, 0.5, 0.5, 0.5]) + losses_ = head.forward_train(feat, fake_gt_label) + losses = head.forward_train(feat, fake_gt_label, weight=weight) + assert losses['loss'].item() == losses_['loss'].item() * 0.5 + + # test simple_test with post_process + pred = head.simple_test(feat) + assert isinstance(pred, list) and len(pred) == 4 + with patch('torch.onnx.is_in_onnx_export', return_value=True): + pred = head.simple_test(feat) + assert pred.shape == (4, 10) + + # test simple_test without post_process + pred = head.simple_test(feat, post_process=False) + assert isinstance(pred, torch.Tensor) and pred.shape == (4, 10) + logits = head.simple_test(feat, softmax=False, post_process=False) + torch.testing.assert_allclose(pred, torch.softmax(logits, dim=1)) + + # test pre_logits + features = head.pre_logits(feat) + if isinstance(feat, tuple): + torch.testing.assert_allclose(features, feat[0]) + else: + torch.testing.assert_allclose(features, feat) + + +@pytest.mark.parametrize('feat', [torch.rand(4, 3), (torch.rand(4, 3), )]) +def test_linear_head(feat): + + fake_gt_label = torch.randint(0, 10, (4, )) + + # test LinearClsHead forward + head = LinearClsHead(10, 3) + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + # test init weights + head = LinearClsHead(10, 3) + head.init_weights() + assert abs(head.fc.weight).sum() > 0 + + # test simple_test with post_process + pred = head.simple_test(feat) + assert isinstance(pred, list) and len(pred) == 4 + with patch('torch.onnx.is_in_onnx_export', return_value=True): + pred = head.simple_test(feat) + assert pred.shape == (4, 10) + + # test simple_test without post_process + pred = head.simple_test(feat, post_process=False) + assert isinstance(pred, torch.Tensor) and pred.shape == (4, 10) + logits = head.simple_test(feat, softmax=False, post_process=False) + torch.testing.assert_allclose(pred, torch.softmax(logits, dim=1)) + + # test pre_logits + features = head.pre_logits(feat) + if isinstance(feat, tuple): + torch.testing.assert_allclose(features, feat[0]) + else: + torch.testing.assert_allclose(features, feat) + + +@pytest.mark.parametrize('feat', [torch.rand(4, 10), (torch.rand(4, 10), )]) +def test_multilabel_head(feat): + head = MultiLabelClsHead() + fake_gt_label = torch.randint(0, 2, (4, 10)) + + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + # test simple_test with post_process + pred = head.simple_test(feat) + assert isinstance(pred, list) and len(pred) == 4 + with patch('torch.onnx.is_in_onnx_export', return_value=True): + pred = head.simple_test(feat) + assert pred.shape == (4, 10) + + # test simple_test without post_process + pred = head.simple_test(feat, post_process=False) + assert isinstance(pred, torch.Tensor) and pred.shape == (4, 10) + logits = head.simple_test(feat, sigmoid=False, post_process=False) + torch.testing.assert_allclose(pred, torch.sigmoid(logits)) + + # test pre_logits + features = head.pre_logits(feat) + if isinstance(feat, tuple): + torch.testing.assert_allclose(features, feat[0]) + else: + torch.testing.assert_allclose(features, feat) + + +@pytest.mark.parametrize('feat', [torch.rand(4, 5), (torch.rand(4, 5), )]) +def test_multilabel_linear_head(feat): + head = MultiLabelLinearClsHead(10, 5) + fake_gt_label = torch.randint(0, 2, (4, 10)) + + head.init_weights() + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + # test simple_test with post_process + pred = head.simple_test(feat) + assert isinstance(pred, list) and len(pred) == 4 + with patch('torch.onnx.is_in_onnx_export', return_value=True): + pred = head.simple_test(feat) + assert pred.shape == (4, 10) + + # test simple_test without post_process + pred = head.simple_test(feat, post_process=False) + assert isinstance(pred, torch.Tensor) and pred.shape == (4, 10) + logits = head.simple_test(feat, sigmoid=False, post_process=False) + torch.testing.assert_allclose(pred, torch.sigmoid(logits)) + + # test pre_logits + features = head.pre_logits(feat) + if isinstance(feat, tuple): + torch.testing.assert_allclose(features, feat[0]) + else: + torch.testing.assert_allclose(features, feat) + + +@pytest.mark.parametrize('feat', [torch.rand(4, 5), (torch.rand(4, 5), )]) +def test_stacked_linear_cls_head(feat): + # test assertion + with pytest.raises(AssertionError): + StackedLinearClsHead(num_classes=3, in_channels=5, mid_channels=10) + + with pytest.raises(AssertionError): + StackedLinearClsHead(num_classes=-1, in_channels=5, mid_channels=[10]) + + fake_gt_label = torch.randint(0, 2, (4, )) # B, num_classes + + # test forward with default setting + head = StackedLinearClsHead( + num_classes=10, in_channels=5, mid_channels=[20]) + head.init_weights() + + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + # test simple_test with post_process + pred = head.simple_test(feat) + assert isinstance(pred, list) and len(pred) == 4 + with patch('torch.onnx.is_in_onnx_export', return_value=True): + pred = head.simple_test(feat) + assert pred.shape == (4, 10) + + # test simple_test without post_process + pred = head.simple_test(feat, post_process=False) + assert isinstance(pred, torch.Tensor) and pred.shape == (4, 10) + logits = head.simple_test(feat, softmax=False, post_process=False) + torch.testing.assert_allclose(pred, torch.softmax(logits, dim=1)) + + # test pre_logits + features = head.pre_logits(feat) + assert features.shape == (4, 20) + + # test forward with full function + head = StackedLinearClsHead( + num_classes=3, + in_channels=5, + mid_channels=[8, 10], + dropout_rate=0.2, + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='HSwish')) + head.init_weights() + + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + +def test_vit_head(): + fake_features = ([torch.rand(4, 7, 7, 16), torch.rand(4, 100)], ) + fake_gt_label = torch.randint(0, 10, (4, )) + + # test vit head forward + head = VisionTransformerClsHead(10, 100) + losses = head.forward_train(fake_features, fake_gt_label) + assert not hasattr(head.layers, 'pre_logits') + assert not hasattr(head.layers, 'act') + assert losses['loss'].item() > 0 + + # test vit head forward with hidden layer + head = VisionTransformerClsHead(10, 100, hidden_dim=20) + losses = head.forward_train(fake_features, fake_gt_label) + assert hasattr(head.layers, 'pre_logits') and hasattr(head.layers, 'act') + assert losses['loss'].item() > 0 + + # test vit head init_weights + head = VisionTransformerClsHead(10, 100, hidden_dim=20) + head.init_weights() + assert abs(head.layers.pre_logits.weight).sum() > 0 + + head = VisionTransformerClsHead(10, 100, hidden_dim=20) + # test simple_test with post_process + pred = head.simple_test(fake_features) + assert isinstance(pred, list) and len(pred) == 4 + with patch('torch.onnx.is_in_onnx_export', return_value=True): + pred = head.simple_test(fake_features) + assert pred.shape == (4, 10) + + # test simple_test without post_process + pred = head.simple_test(fake_features, post_process=False) + assert isinstance(pred, torch.Tensor) and pred.shape == (4, 10) + logits = head.simple_test(fake_features, softmax=False, post_process=False) + torch.testing.assert_allclose(pred, torch.softmax(logits, dim=1)) + + # test pre_logits + features = head.pre_logits(fake_features) + assert features.shape == (4, 20) + + # test assertion + with pytest.raises(ValueError): + VisionTransformerClsHead(-1, 100) + + +def test_conformer_head(): + fake_features = ([torch.rand(4, 64), torch.rand(4, 96)], ) + fake_gt_label = torch.randint(0, 10, (4, )) + + # test conformer head forward + head = ConformerHead(num_classes=10, in_channels=[64, 96]) + losses = head.forward_train(fake_features, fake_gt_label) + assert losses['loss'].item() > 0 + + # test simple_test with post_process + pred = head.simple_test(fake_features) + assert isinstance(pred, list) and len(pred) == 4 + with patch('torch.onnx.is_in_onnx_export', return_value=True): + pred = head.simple_test(fake_features) + assert pred.shape == (4, 10) + + # test simple_test without post_process + pred = head.simple_test(fake_features, post_process=False) + assert isinstance(pred, torch.Tensor) and pred.shape == (4, 10) + logits = head.simple_test(fake_features, softmax=False, post_process=False) + torch.testing.assert_allclose(pred, torch.softmax(sum(logits), dim=1)) + + # test pre_logits + features = head.pre_logits(fake_features) + assert features is fake_features[0] + + +def test_deit_head(): + fake_features = ([ + torch.rand(4, 7, 7, 16), + torch.rand(4, 100), + torch.rand(4, 100) + ], ) + fake_gt_label = torch.randint(0, 10, (4, )) + + # test deit head forward + head = DeiTClsHead(num_classes=10, in_channels=100) + losses = head.forward_train(fake_features, fake_gt_label) + assert not hasattr(head.layers, 'pre_logits') + assert not hasattr(head.layers, 'act') + assert losses['loss'].item() > 0 + + # test deit head forward with hidden layer + head = DeiTClsHead(num_classes=10, in_channels=100, hidden_dim=20) + losses = head.forward_train(fake_features, fake_gt_label) + assert hasattr(head.layers, 'pre_logits') and hasattr(head.layers, 'act') + assert losses['loss'].item() > 0 + + # test deit head init_weights + head = DeiTClsHead(10, 100, hidden_dim=20) + head.init_weights() + assert abs(head.layers.pre_logits.weight).sum() > 0 + + head = DeiTClsHead(10, 100, hidden_dim=20) + # test simple_test with post_process + pred = head.simple_test(fake_features) + assert isinstance(pred, list) and len(pred) == 4 + with patch('torch.onnx.is_in_onnx_export', return_value=True): + pred = head.simple_test(fake_features) + assert pred.shape == (4, 10) + + # test simple_test without post_process + pred = head.simple_test(fake_features, post_process=False) + assert isinstance(pred, torch.Tensor) and pred.shape == (4, 10) + logits = head.simple_test(fake_features, softmax=False, post_process=False) + torch.testing.assert_allclose(pred, torch.softmax(logits, dim=1)) + + # test pre_logits + cls_token, dist_token = head.pre_logits(fake_features) + assert cls_token.shape == (4, 20) + assert dist_token.shape == (4, 20) + + # test assertion + with pytest.raises(ValueError): + DeiTClsHead(-1, 100) + + +def test_efficientformer_head(): + fake_features = (torch.rand(4, 64), ) + fake_gt_label = torch.randint(0, 10, (4, )) + + # Test without distillation head + head = EfficientFormerClsHead( + num_classes=10, in_channels=64, distillation=False) + + # test EfficientFormer head forward + losses = head.forward_train(fake_features, fake_gt_label) + assert losses['loss'].item() > 0 + + # test simple_test with post_process + pred = head.simple_test(fake_features) + assert isinstance(pred, list) and len(pred) == 4 + with patch('torch.onnx.is_in_onnx_export', return_value=True): + pred = head.simple_test(fake_features) + assert pred.shape == (4, 10) + + # test simple_test without post_process + pred = head.simple_test(fake_features, post_process=False) + assert isinstance(pred, torch.Tensor) and pred.shape == (4, 10) + logits = head.simple_test(fake_features, softmax=False, post_process=False) + torch.testing.assert_allclose(pred, torch.softmax(logits, dim=1)) + + # test pre_logits + features = head.pre_logits(fake_features) + assert features is fake_features[0] + + # Test without distillation head + head = EfficientFormerClsHead(num_classes=10, in_channels=64) + assert hasattr(head, 'head') + assert hasattr(head, 'dist_head') + + # Test loss + with pytest.raises(NotImplementedError): + losses = head.forward_train(fake_features, fake_gt_label) + + # test simple_test with post_process + pred = head.simple_test(fake_features) + assert isinstance(pred, list) and len(pred) == 4 + with patch('torch.onnx.is_in_onnx_export', return_value=True): + pred = head.simple_test(fake_features) + assert pred.shape == (4, 10) + + # test simple_test without post_process + pred = head.simple_test(fake_features, post_process=False) + assert isinstance(pred, torch.Tensor) and pred.shape == (4, 10) + logits = head.simple_test(fake_features, softmax=False, post_process=False) + torch.testing.assert_allclose(pred, torch.softmax(logits, dim=1)) + + # test pre_logits + features = head.pre_logits(fake_features) + assert features is fake_features[0] + + +@pytest.mark.parametrize( + 'feat', [torch.rand(4, 20, 20, 30), (torch.rand(4, 20, 20, 30), )]) +def test_csra_head(feat): + head = CSRAClsHead(num_classes=10, in_channels=20, num_heads=1, lam=0.1) + fake_gt_label = torch.randint(0, 2, (4, 10)) + + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + # test simple_test with post_process + pred = head.simple_test(feat) + assert isinstance(pred, list) and len(pred) == 4 + with patch('torch.onnx.is_in_onnx_export', return_value=True): + pred = head.simple_test(feat) + assert pred.shape == (4, 10) + + # test pre_logits + features = head.pre_logits(feat) + if isinstance(feat, tuple): + torch.testing.assert_allclose(features, feat[0]) + else: + torch.testing.assert_allclose(features, feat) diff --git a/tests/test_models/test_neck.py b/tests/test_models/test_neck.py new file mode 100644 index 0000000..b554e3d --- /dev/null +++ b/tests/test_models/test_neck.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.necks import (GeneralizedMeanPooling, GlobalAveragePooling, + HRFuseScales) + + +def test_gap_neck(): + + # test 1d gap_neck + neck = GlobalAveragePooling(dim=1) + # batch_size, num_features, feature_size + fake_input = torch.rand(1, 16, 24) + + output = neck(fake_input) + # batch_size, num_features + assert output.shape == (1, 16) + + # test 1d gap_neck + neck = GlobalAveragePooling(dim=2) + # batch_size, num_features, feature_size(2) + fake_input = torch.rand(1, 16, 24, 24) + + output = neck(fake_input) + # batch_size, num_features + assert output.shape == (1, 16) + + # test 1d gap_neck + neck = GlobalAveragePooling(dim=3) + # batch_size, num_features, feature_size(3) + fake_input = torch.rand(1, 16, 24, 24, 5) + + output = neck(fake_input) + # batch_size, num_features + assert output.shape == (1, 16) + + with pytest.raises(AssertionError): + # dim must in [1, 2, 3] + GlobalAveragePooling(dim='other') + + +def test_gem_neck(): + + # test gem_neck + neck = GeneralizedMeanPooling() + # batch_size, num_features, feature_size(2) + fake_input = torch.rand(1, 16, 24, 24) + + output = neck(fake_input) + # batch_size, num_features + assert output.shape == (1, 16) + + # test tuple input gem_neck + neck = GeneralizedMeanPooling() + # batch_size, num_features, feature_size(2) + fake_input = (torch.rand(1, 8, 24, 24), torch.rand(1, 16, 24, 24)) + + output = neck(fake_input) + # batch_size, num_features + assert output[0].shape == (1, 8) + assert output[1].shape == (1, 16) + + with pytest.raises(AssertionError): + # p must be a value greater then 1 + GeneralizedMeanPooling(p=0.5) + + +def test_hr_fuse_scales(): + + in_channels = (18, 32, 64, 128) + neck = HRFuseScales(in_channels=in_channels, out_channels=1024) + + feat_size = 56 + inputs = [] + for in_channel in in_channels: + input_tensor = torch.rand(3, in_channel, feat_size, feat_size) + inputs.append(input_tensor) + feat_size = feat_size // 2 + + with pytest.raises(AssertionError): + neck(inputs) + + outs = neck(tuple(inputs)) + assert isinstance(outs, tuple) + assert len(outs) == 1 + assert outs[0].shape == (3, 1024, 7, 7) diff --git a/tests/test_models/test_utils/test_attention.py b/tests/test_models/test_utils/test_attention.py new file mode 100644 index 0000000..cc37d13 --- /dev/null +++ b/tests/test_models/test_utils/test_attention.py @@ -0,0 +1,208 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial +from unittest import TestCase +from unittest.mock import ANY, MagicMock + +import pytest +import torch +from mmcv.utils import TORCH_VERSION, digit_version + +from mmcls.models.utils.attention import ShiftWindowMSA, WindowMSA + +if digit_version(TORCH_VERSION) >= digit_version('1.10.0a0'): + torch_meshgrid_ij = partial(torch.meshgrid, indexing='ij') +else: + torch_meshgrid_ij = torch.meshgrid # Uses indexing='ij' by default + + +def get_relative_position_index(window_size): + """Method from original code of Swin-Transformer.""" + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch_meshgrid_ij([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + # 2, Wh*Ww, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + # Wh*Ww, Wh*Ww, 2 + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + return relative_position_index + + +class TestWindowMSA(TestCase): + + def test_forward(self): + attn = WindowMSA(embed_dims=96, window_size=(7, 7), num_heads=4) + inputs = torch.rand((16, 7 * 7, 96)) + output = attn(inputs) + self.assertEqual(output.shape, inputs.shape) + + # test non-square window_size + attn = WindowMSA(embed_dims=96, window_size=(6, 7), num_heads=4) + inputs = torch.rand((16, 6 * 7, 96)) + output = attn(inputs) + self.assertEqual(output.shape, inputs.shape) + + def test_relative_pos_embed(self): + attn = WindowMSA(embed_dims=96, window_size=(7, 8), num_heads=4) + self.assertEqual(attn.relative_position_bias_table.shape, + ((2 * 7 - 1) * (2 * 8 - 1), 4)) + # test relative_position_index + expected_rel_pos_index = get_relative_position_index((7, 8)) + self.assertTrue( + torch.allclose(attn.relative_position_index, + expected_rel_pos_index)) + + # test default init + self.assertTrue( + torch.allclose(attn.relative_position_bias_table, + torch.tensor(0.))) + attn.init_weights() + self.assertFalse( + torch.allclose(attn.relative_position_bias_table, + torch.tensor(0.))) + + def test_qkv_bias(self): + # test qkv_bias=True + attn = WindowMSA( + embed_dims=96, window_size=(7, 7), num_heads=4, qkv_bias=True) + self.assertEqual(attn.qkv.bias.shape, (96 * 3, )) + + # test qkv_bias=False + attn = WindowMSA( + embed_dims=96, window_size=(7, 7), num_heads=4, qkv_bias=False) + self.assertIsNone(attn.qkv.bias) + + def tets_qk_scale(self): + # test default qk_scale + attn = WindowMSA( + embed_dims=96, window_size=(7, 7), num_heads=4, qk_scale=None) + head_dims = 96 // 4 + self.assertAlmostEqual(attn.scale, head_dims**-0.5) + + # test specified qk_scale + attn = WindowMSA( + embed_dims=96, window_size=(7, 7), num_heads=4, qk_scale=0.3) + self.assertEqual(attn.scale, 0.3) + + def test_attn_drop(self): + inputs = torch.rand(16, 7 * 7, 96) + attn = WindowMSA( + embed_dims=96, window_size=(7, 7), num_heads=4, attn_drop=1.0) + # drop all attn output, output shuold be equal to proj.bias + self.assertTrue(torch.allclose(attn(inputs), attn.proj.bias)) + + def test_prob_drop(self): + inputs = torch.rand(16, 7 * 7, 96) + attn = WindowMSA( + embed_dims=96, window_size=(7, 7), num_heads=4, proj_drop=1.0) + self.assertTrue(torch.allclose(attn(inputs), torch.tensor(0.))) + + def test_mask(self): + inputs = torch.rand(16, 7 * 7, 96) + attn = WindowMSA(embed_dims=96, window_size=(7, 7), num_heads=4) + mask = torch.zeros((4, 49, 49)) + # Mask the first column + mask[:, 0, :] = -100 + mask[:, :, 0] = -100 + outs = attn(inputs, mask=mask) + inputs[:, 0, :].normal_() + outs_with_mask = attn(inputs, mask=mask) + torch.testing.assert_allclose(outs[:, 1:, :], outs_with_mask[:, 1:, :]) + + +class TestShiftWindowMSA(TestCase): + + def test_forward(self): + inputs = torch.rand((1, 14 * 14, 96)) + attn = ShiftWindowMSA(embed_dims=96, window_size=7, num_heads=4) + output = attn(inputs, (14, 14)) + self.assertEqual(output.shape, inputs.shape) + self.assertEqual(attn.w_msa.relative_position_bias_table.shape, + ((2 * 7 - 1)**2, 4)) + + # test forward with shift_size + attn = ShiftWindowMSA( + embed_dims=96, window_size=7, num_heads=4, shift_size=3) + output = attn(inputs, (14, 14)) + assert output.shape == (inputs.shape) + + # test irregular input shape + input_resolution = (19, 18) + attn = ShiftWindowMSA(embed_dims=96, num_heads=4, window_size=7) + inputs = torch.rand((1, 19 * 18, 96)) + output = attn(inputs, input_resolution) + assert output.shape == (inputs.shape) + + # test wrong input_resolution + input_resolution = (14, 14) + attn = ShiftWindowMSA(embed_dims=96, num_heads=4, window_size=7) + inputs = torch.rand((1, 14 * 14, 96)) + with pytest.raises(AssertionError): + attn(inputs, (14, 15)) + + def test_pad_small_map(self): + # test pad_small_map=True + inputs = torch.rand((1, 6 * 7, 96)) + attn = ShiftWindowMSA( + embed_dims=96, + window_size=7, + num_heads=4, + shift_size=3, + pad_small_map=True) + attn.get_attn_mask = MagicMock(wraps=attn.get_attn_mask) + output = attn(inputs, (6, 7)) + self.assertEqual(output.shape, inputs.shape) + attn.get_attn_mask.assert_called_once_with((7, 7), + window_size=7, + shift_size=3, + device=ANY) + + # test pad_small_map=False + inputs = torch.rand((1, 6 * 7, 96)) + attn = ShiftWindowMSA( + embed_dims=96, + window_size=7, + num_heads=4, + shift_size=3, + pad_small_map=False) + with self.assertRaisesRegex(AssertionError, r'the window size \(7\)'): + attn(inputs, (6, 7)) + + # test pad_small_map=False, and the input size equals to window size + inputs = torch.rand((1, 7 * 7, 96)) + attn.get_attn_mask = MagicMock(wraps=attn.get_attn_mask) + output = attn(inputs, (7, 7)) + self.assertEqual(output.shape, inputs.shape) + attn.get_attn_mask.assert_called_once_with((7, 7), + window_size=7, + shift_size=0, + device=ANY) + + def test_drop_layer(self): + inputs = torch.rand((1, 14 * 14, 96)) + attn = ShiftWindowMSA( + embed_dims=96, + window_size=7, + num_heads=4, + dropout_layer=dict(type='Dropout', drop_prob=1.0)) + attn.init_weights() + # drop all attn output, output shuold be equal to proj.bias + self.assertTrue( + torch.allclose(attn(inputs, (14, 14)), torch.tensor(0.))) + + def test_deprecation(self): + # test deprecated arguments + with pytest.warns(DeprecationWarning): + ShiftWindowMSA( + embed_dims=96, + num_heads=4, + window_size=7, + input_resolution=(14, 14)) + + with pytest.warns(DeprecationWarning): + ShiftWindowMSA( + embed_dims=96, num_heads=4, window_size=7, auto_pad=True) diff --git a/tests/test_models/test_utils/test_augment.py b/tests/test_models/test_utils/test_augment.py new file mode 100644 index 0000000..d1987fa --- /dev/null +++ b/tests/test_models/test_utils/test_augment.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.utils import Augments + +augment_cfgs = [ + dict(type='BatchCutMix', alpha=1., prob=1.), + dict(type='BatchMixup', alpha=1., prob=1.), + dict(type='Identity', prob=1.), + dict(type='BatchResizeMix', alpha=1., prob=1.) +] + + +def test_augments(): + imgs = torch.randn(4, 3, 32, 32) + labels = torch.randint(0, 10, (4, )) + + # Test cutmix + augments_cfg = dict(type='BatchCutMix', alpha=1., num_classes=10, prob=1.) + augs = Augments(augments_cfg) + mixed_imgs, mixed_labels = augs(imgs, labels) + assert mixed_imgs.shape == torch.Size((4, 3, 32, 32)) + assert mixed_labels.shape == torch.Size((4, 10)) + + # Test mixup + augments_cfg = dict(type='BatchMixup', alpha=1., num_classes=10, prob=1.) + augs = Augments(augments_cfg) + mixed_imgs, mixed_labels = augs(imgs, labels) + assert mixed_imgs.shape == torch.Size((4, 3, 32, 32)) + assert mixed_labels.shape == torch.Size((4, 10)) + + # Test resizemix + augments_cfg = dict( + type='BatchResizeMix', alpha=1., num_classes=10, prob=1.) + augs = Augments(augments_cfg) + mixed_imgs, mixed_labels = augs(imgs, labels) + assert mixed_imgs.shape == torch.Size((4, 3, 32, 32)) + assert mixed_labels.shape == torch.Size((4, 10)) + + # Test cutmixup + augments_cfg = [ + dict(type='BatchCutMix', alpha=1., num_classes=10, prob=0.5), + dict(type='BatchMixup', alpha=1., num_classes=10, prob=0.3) + ] + augs = Augments(augments_cfg) + mixed_imgs, mixed_labels = augs(imgs, labels) + assert mixed_imgs.shape == torch.Size((4, 3, 32, 32)) + assert mixed_labels.shape == torch.Size((4, 10)) + + augments_cfg = [ + dict(type='BatchCutMix', alpha=1., num_classes=10, prob=0.5), + dict(type='BatchMixup', alpha=1., num_classes=10, prob=0.5) + ] + augs = Augments(augments_cfg) + mixed_imgs, mixed_labels = augs(imgs, labels) + assert mixed_imgs.shape == torch.Size((4, 3, 32, 32)) + assert mixed_labels.shape == torch.Size((4, 10)) + + augments_cfg = [ + dict(type='BatchCutMix', alpha=1., num_classes=10, prob=0.5), + dict(type='BatchMixup', alpha=1., num_classes=10, prob=0.3), + dict(type='Identity', num_classes=10, prob=0.2) + ] + augs = Augments(augments_cfg) + mixed_imgs, mixed_labels = augs(imgs, labels) + assert mixed_imgs.shape == torch.Size((4, 3, 32, 32)) + assert mixed_labels.shape == torch.Size((4, 10)) + + +@pytest.mark.parametrize('cfg', augment_cfgs) +def test_binary_augment(cfg): + + cfg_ = dict(num_classes=1, **cfg) + augs = Augments(cfg_) + + imgs = torch.randn(4, 3, 32, 32) + labels = torch.randint(0, 2, (4, 1)).float() + + mixed_imgs, mixed_labels = augs(imgs, labels) + assert mixed_imgs.shape == torch.Size((4, 3, 32, 32)) + assert mixed_labels.shape == torch.Size((4, 1)) + + +@pytest.mark.parametrize('cfg', augment_cfgs) +def test_multilabel_augment(cfg): + + cfg_ = dict(num_classes=10, **cfg) + augs = Augments(cfg_) + + imgs = torch.randn(4, 3, 32, 32) + labels = torch.randint(0, 2, (4, 10)).float() + + mixed_imgs, mixed_labels = augs(imgs, labels) + assert mixed_imgs.shape == torch.Size((4, 3, 32, 32)) + assert mixed_labels.shape == torch.Size((4, 10)) diff --git a/tests/test_models/test_utils/test_embed.py b/tests/test_models/test_utils/test_embed.py new file mode 100644 index 0000000..eb7356b --- /dev/null +++ b/tests/test_models/test_utils/test_embed.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.backbones import VGG +from mmcls.models.utils import HybridEmbed, PatchEmbed, PatchMerging + + +def cal_unfold_dim(dim, kernel_size, stride, padding=0, dilation=1): + return (dim + 2 * padding - dilation * (kernel_size - 1) - 1) // stride + 1 + + +def test_patch_embed(): + # Test PatchEmbed + patch_embed = PatchEmbed() + img = torch.randn(1, 3, 224, 224) + img = patch_embed(img) + assert img.shape == torch.Size((1, 196, 768)) + + # Test PatchEmbed with stride = 8 + conv_cfg = dict(kernel_size=16, stride=8) + patch_embed = PatchEmbed(conv_cfg=conv_cfg) + img = torch.randn(1, 3, 224, 224) + img = patch_embed(img) + assert img.shape == torch.Size((1, 729, 768)) + + +def test_hybrid_embed(): + # Test VGG11 HybridEmbed + backbone = VGG(11, norm_eval=True) + backbone.init_weights() + patch_embed = HybridEmbed(backbone) + img = torch.randn(1, 3, 224, 224) + img = patch_embed(img) + assert img.shape == torch.Size((1, 49, 768)) + + +def test_patch_merging(): + settings = dict(in_channels=16, out_channels=32, padding=0) + downsample = PatchMerging(**settings) + + # test forward with wrong dims + with pytest.raises(AssertionError): + inputs = torch.rand((1, 16, 56 * 56)) + downsample(inputs, input_size=(56, 56)) + + # test patch merging forward + inputs = torch.rand((1, 56 * 56, 16)) + out, output_size = downsample(inputs, input_size=(56, 56)) + assert output_size == (28, 28) + assert out.shape == (1, 28 * 28, 32) + + # test different kernel_size in each direction + downsample = PatchMerging(kernel_size=(2, 3), **settings) + out, output_size = downsample(inputs, input_size=(56, 56)) + expected_dim = cal_unfold_dim(56, 2, 2) * cal_unfold_dim(56, 3, 3) + assert downsample.sampler.kernel_size == (2, 3) + assert output_size == (cal_unfold_dim(56, 2, 2), cal_unfold_dim(56, 3, 3)) + assert out.shape == (1, expected_dim, 32) + + # test default stride + downsample = PatchMerging(kernel_size=6, **settings) + assert downsample.sampler.stride == (6, 6) + + # test stride=3 + downsample = PatchMerging(kernel_size=6, stride=3, **settings) + out, output_size = downsample(inputs, input_size=(56, 56)) + assert downsample.sampler.stride == (3, 3) + assert out.shape == (1, cal_unfold_dim(56, 6, stride=3)**2, 32) + + # test padding + downsample = PatchMerging( + in_channels=16, out_channels=32, kernel_size=6, padding=2) + out, output_size = downsample(inputs, input_size=(56, 56)) + assert downsample.sampler.padding == (2, 2) + assert out.shape == (1, cal_unfold_dim(56, 6, 6, padding=2)**2, 32) + + # test str padding + downsample = PatchMerging(in_channels=16, out_channels=32, kernel_size=6) + out, output_size = downsample(inputs, input_size=(56, 56)) + assert downsample.sampler.padding == (0, 0) + assert out.shape == (1, cal_unfold_dim(56, 6, 6, padding=2)**2, 32) + + # test dilation + downsample = PatchMerging(kernel_size=6, dilation=2, **settings) + out, output_size = downsample(inputs, input_size=(56, 56)) + assert downsample.sampler.dilation == (2, 2) + assert out.shape == (1, cal_unfold_dim(56, 6, 6, dilation=2)**2, 32) diff --git a/tests/test_models/test_utils/test_inverted_residual.py b/tests/test_models/test_utils/test_inverted_residual.py new file mode 100644 index 0000000..8c36327 --- /dev/null +++ b/tests/test_models/test_utils/test_inverted_residual.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.utils import InvertedResidual, SELayer + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def test_inverted_residual(): + + with pytest.raises(AssertionError): + # stride must be in [1, 2] + InvertedResidual(16, 16, 32, stride=3) + + with pytest.raises(AssertionError): + # se_cfg must be None or dict + InvertedResidual(16, 16, 32, se_cfg=list()) + + # Add expand conv if in_channels and mid_channels is not the same + assert InvertedResidual(32, 16, 32).with_expand_conv is False + assert InvertedResidual(16, 16, 32).with_expand_conv is True + + # Test InvertedResidual forward, stride=1 + block = InvertedResidual(16, 16, 32, stride=1) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert getattr(block, 'se', None) is None + assert block.with_res_shortcut + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward, stride=2 + block = InvertedResidual(16, 16, 32, stride=2) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert not block.with_res_shortcut + assert x_out.shape == torch.Size((1, 16, 28, 28)) + + # Test InvertedResidual forward with se layer + se_cfg = dict(channels=32) + block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert isinstance(block.se, SELayer) + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward without expand conv + block = InvertedResidual(32, 16, 32) + x = torch.randn(1, 32, 56, 56) + x_out = block(x) + assert getattr(block, 'expand_conv', None) is None + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward with GroupNorm + block = InvertedResidual( + 16, 16, 32, norm_cfg=dict(type='GN', num_groups=2)) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + for m in block.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward with HSigmoid + block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid')) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward with checkpoint + block = InvertedResidual(16, 16, 32, with_cp=True) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert block.with_cp + assert x_out.shape == torch.Size((1, 16, 56, 56)) diff --git a/tests/test_models/test_utils/test_layer_scale.py b/tests/test_models/test_utils/test_layer_scale.py new file mode 100644 index 0000000..824be99 --- /dev/null +++ b/tests/test_models/test_utils/test_layer_scale.py @@ -0,0 +1,48 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmcls.models.utils import LayerScale + + +class TestLayerScale(TestCase): + + def test_init(self): + with self.assertRaisesRegex(AssertionError, "'data_format' could"): + cfg = dict( + dim=10, + inplace=False, + data_format='BNC', + ) + LayerScale(**cfg) + + cfg = dict(dim=10) + ls = LayerScale(**cfg) + assert torch.equal(ls.weight, + torch.ones(10, requires_grad=True) * 1e-5) + + def forward(self): + # Test channels_last + cfg = dict(dim=256, inplace=False, data_format='channels_last') + ls_channels_last = LayerScale(**cfg) + x = torch.randn((4, 49, 256)) + out = ls_channels_last(x) + self.assertEqual(tuple(out.size()), (4, 49, 256)) + assert torch.equal(x * 1e-5, out) + + # Test channels_first + cfg = dict(dim=256, inplace=False, data_format='channels_first') + ls_channels_first = LayerScale(**cfg) + x = torch.randn((4, 256, 7, 7)) + out = ls_channels_first(x) + self.assertEqual(tuple(out.size()), (4, 256, 7, 7)) + assert torch.equal(x * 1e-5, out) + + # Test inplace True + cfg = dict(dim=256, inplace=True, data_format='channels_first') + ls_channels_first = LayerScale(**cfg) + x = torch.randn((4, 256, 7, 7)) + out = ls_channels_first(x) + self.assertEqual(tuple(out.size()), (4, 256, 7, 7)) + self.assertIs(x, out) diff --git a/tests/test_models/test_utils/test_misc.py b/tests/test_models/test_utils/test_misc.py new file mode 100644 index 0000000..86df85f --- /dev/null +++ b/tests/test_models/test_utils/test_misc.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmcv.utils import digit_version + +from mmcls.models.utils import channel_shuffle, is_tracing, make_divisible + + +def test_make_divisible(): + # test min_value is None + result = make_divisible(34, 8, None) + assert result == 32 + + # test when new_value > min_ratio * value + result = make_divisible(10, 8, min_ratio=0.9) + assert result == 16 + + # test min_value = 0.8 + result = make_divisible(33, 8, min_ratio=0.8) + assert result == 32 + + +def test_channel_shuffle(): + x = torch.randn(1, 24, 56, 56) + with pytest.raises(AssertionError): + # num_channels should be divisible by groups + channel_shuffle(x, 7) + + groups = 3 + batch_size, num_channels, height, width = x.size() + channels_per_group = num_channels // groups + out = channel_shuffle(x, groups) + # test the output value when groups = 3 + for b in range(batch_size): + for c in range(num_channels): + c_out = c % channels_per_group * groups + c // channels_per_group + for i in range(height): + for j in range(width): + assert x[b, c, i, j] == out[b, c_out, i, j] + + +@pytest.mark.skipif( + digit_version(torch.__version__) < digit_version('1.6.0'), + reason='torch.jit.is_tracing is not available before 1.6.0') +def test_is_tracing(): + + def foo(x): + if is_tracing(): + return x + else: + return x.tolist() + + x = torch.rand(3) + # test without trace + assert isinstance(foo(x), list) + + # test with trace + traced_foo = torch.jit.trace(foo, (torch.rand(1), )) + assert isinstance(traced_foo(x), torch.Tensor) diff --git a/tests/test_models/test_utils/test_position_encoding.py b/tests/test_models/test_utils/test_position_encoding.py new file mode 100644 index 0000000..feb171c --- /dev/null +++ b/tests/test_models/test_utils/test_position_encoding.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmcls.models.utils import ConditionalPositionEncoding + + +def test_conditional_position_encoding_module(): + CPE = ConditionalPositionEncoding(in_channels=32, embed_dims=32, stride=2) + outs = CPE(torch.randn(1, 3136, 32), (56, 56)) + assert outs.shape == torch.Size([1, 784, 32]) diff --git a/tests/test_models/test_utils/test_se.py b/tests/test_models/test_utils/test_se.py new file mode 100644 index 0000000..8cb8c50 --- /dev/null +++ b/tests/test_models/test_utils/test_se.py @@ -0,0 +1,95 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.utils import SELayer + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def test_se(): + with pytest.raises(AssertionError): + # base_channels must be a number + SELayer(16, squeeze_channels='32') + + with pytest.raises(AssertionError): + # base_channels must be None or a number larger than 0 + SELayer(16, squeeze_channels=-1) + + with pytest.raises(AssertionError): + # act_cfg must be two dict tuple + SELayer( + 16, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid'), + dict(type='ReLU'))) + + # Test SELayer forward, channels=64 + input = torch.randn((4, 64, 112, 112)) + se = SELayer(64) + output = se(input) + assert se.conv1.out_channels == 8 + assert se.conv2.in_channels == 8 + assert output.shape == torch.Size((4, 64, 112, 112)) + + # Test SELayer forward, ratio=4 + input = torch.randn((4, 128, 112, 112)) + se = SELayer(128, ratio=4) + output = se(input) + assert se.conv1.out_channels == 32 + assert se.conv2.in_channels == 32 + assert output.shape == torch.Size((4, 128, 112, 112)) + + # Test SELayer forward, channels=54, ratio=4 + # channels cannot be divisible by ratio + input = torch.randn((1, 54, 76, 103)) + se = SELayer(54, ratio=4) + output = se(input) + assert se.conv1.out_channels == 16 + assert se.conv2.in_channels == 16 + assert output.shape == torch.Size((1, 54, 76, 103)) + + # Test SELayer forward, divisor=2 + se = SELayer(54, ratio=4, divisor=2) + output = se(input) + assert se.conv1.out_channels == 14 + assert se.conv2.in_channels == 14 + assert output.shape == torch.Size((1, 54, 76, 103)) + + # Test SELayer forward, squeeze_channels=25 + input = torch.randn((1, 128, 56, 56)) + se = SELayer(128, squeeze_channels=25) + output = se(input) + assert se.conv1.out_channels == 25 + assert se.conv2.in_channels == 25 + assert output.shape == torch.Size((1, 128, 56, 56)) + + # Test SELayer forward, not used ratio and divisor + input = torch.randn((1, 128, 56, 56)) + se = SELayer( + 128, + squeeze_channels=13, + ratio=4, + divisor=8, + ) + output = se(input) + assert se.conv1.out_channels == 13 + assert se.conv2.in_channels == 13 + assert output.shape == torch.Size((1, 128, 56, 56)) + + # Test SELayer with HSigmoid activate layer + input = torch.randn((4, 128, 56, 56)) + se = SELayer( + 128, + squeeze_channels=25, + act_cfg=(dict(type='ReLU'), dict(type='HSigmoid'))) + output = se(input) + assert se.conv1.out_channels == 25 + assert se.conv2.in_channels == 25 + assert output.shape == torch.Size((4, 128, 56, 56)) diff --git a/tests/test_runtime/test_eval_hook.py b/tests/test_runtime/test_eval_hook.py new file mode 100644 index 0000000..b925bde --- /dev/null +++ b/tests/test_runtime/test_eval_hook.py @@ -0,0 +1,204 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import tempfile +from unittest.mock import MagicMock, patch + +import mmcv.runner +import pytest +import torch +import torch.nn as nn +from mmcv.runner import obj_from_dict +from mmcv.runner.hooks import DistEvalHook, EvalHook +from torch.utils.data import DataLoader, Dataset + +from mmcls.apis import single_gpu_test + + +class ExampleDataset(Dataset): + + def __getitem__(self, idx): + results = dict(img=torch.tensor([1]), img_metas=dict()) + return results + + def __len__(self): + return 1 + + +class ExampleModel(nn.Module): + + def __init__(self): + super(ExampleModel, self).__init__() + self.test_cfg = None + self.conv = nn.Conv2d(3, 3, 3) + + def forward(self, img, img_metas, test_mode=False, **kwargs): + return img + + def train_step(self, data_batch, optimizer): + loss = self.forward(**data_batch) + return dict(loss=loss) + + +def test_iter_eval_hook(): + with pytest.raises(TypeError): + test_dataset = ExampleModel() + data_loader = [ + DataLoader( + test_dataset, + batch_size=1, + sampler=None, + num_worker=0, + shuffle=False) + ] + EvalHook(data_loader, by_epoch=False) + + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + loader = DataLoader(test_dataset, batch_size=1) + model = ExampleModel() + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + + # test EvalHook + with tempfile.TemporaryDirectory() as tmpdir: + eval_hook = EvalHook(data_loader, by_epoch=False) + runner = mmcv.runner.IterBasedRunner( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger(), + max_iters=1) + runner.register_hook(eval_hook) + runner.run([loader], [('train', 1)], 1) + test_dataset.evaluate.assert_called_with([torch.tensor([1])], + logger=runner.logger) + + +def test_epoch_eval_hook(): + with pytest.raises(TypeError): + test_dataset = ExampleModel() + data_loader = [ + DataLoader( + test_dataset, + batch_size=1, + sampler=None, + num_worker=0, + shuffle=False) + ] + EvalHook(data_loader, by_epoch=True) + + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + loader = DataLoader(test_dataset, batch_size=1) + model = ExampleModel() + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + + # test EvalHook with interval + with tempfile.TemporaryDirectory() as tmpdir: + eval_hook = EvalHook(data_loader, by_epoch=True, interval=2) + runner = mmcv.runner.EpochBasedRunner( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger(), + max_epochs=2) + runner.register_hook(eval_hook) + runner.run([loader], [('train', 1)]) + test_dataset.evaluate.assert_called_once_with([torch.tensor([1])], + logger=runner.logger) + + +def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): + results = single_gpu_test(model, data_loader) + return results + + +@patch('mmcls.apis.multi_gpu_test', multi_gpu_test) +def test_dist_eval_hook(): + with pytest.raises(TypeError): + test_dataset = ExampleModel() + data_loader = [ + DataLoader( + test_dataset, + batch_size=1, + sampler=None, + num_worker=0, + shuffle=False) + ] + DistEvalHook(data_loader, by_epoch=False) + + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + loader = DataLoader(test_dataset, batch_size=1) + model = ExampleModel() + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + + # test DistEvalHook + with tempfile.TemporaryDirectory() as tmpdir: + p = patch('mmcv.engine.multi_gpu_test', multi_gpu_test) + p.start() + eval_hook = DistEvalHook(data_loader, by_epoch=False) + runner = mmcv.runner.IterBasedRunner( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger(), + max_iters=1) + runner.register_hook(eval_hook) + runner.run([loader], [('train', 1)]) + test_dataset.evaluate.assert_called_with([torch.tensor([1])], + logger=runner.logger) + p.stop() + + +@patch('mmcls.apis.multi_gpu_test', multi_gpu_test) +def test_dist_eval_hook_epoch(): + with pytest.raises(TypeError): + test_dataset = ExampleModel() + data_loader = [ + DataLoader( + test_dataset, + batch_size=1, + sampler=None, + num_worker=0, + shuffle=False) + ] + DistEvalHook(data_loader) + + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + loader = DataLoader(test_dataset, batch_size=1) + model = ExampleModel() + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + + # test DistEvalHook + with tempfile.TemporaryDirectory() as tmpdir: + p = patch('mmcv.engine.multi_gpu_test', multi_gpu_test) + p.start() + eval_hook = DistEvalHook(data_loader, by_epoch=True, interval=2) + runner = mmcv.runner.EpochBasedRunner( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger(), + max_epochs=2) + runner.register_hook(eval_hook) + runner.run([loader], [('train', 1)]) + test_dataset.evaluate.assert_called_with([torch.tensor([1])], + logger=runner.logger) + p.stop() diff --git a/tests/test_runtime/test_hooks.py b/tests/test_runtime/test_hooks.py new file mode 100644 index 0000000..70140d9 --- /dev/null +++ b/tests/test_runtime/test_hooks.py @@ -0,0 +1,158 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import shutil +import tempfile + +import numpy as np +import pytest +import torch +import torch.nn as nn +from mmcv.runner import build_runner +from mmcv.runner.hooks import Hook, IterTimerHook +from torch.utils.data import DataLoader + +import mmcls.core # noqa: F401 + + +def _build_demo_runner_without_hook(runner_type='EpochBasedRunner', + max_epochs=1, + max_iters=None, + multi_optimziers=False): + + class Model(nn.Module): + + def __init__(self): + super().__init__() + self.linear = nn.Linear(2, 1) + self.conv = nn.Conv2d(3, 3, 3) + + def forward(self, x): + return self.linear(x) + + def train_step(self, x, optimizer, **kwargs): + return dict(loss=self(x)) + + def val_step(self, x, optimizer, **kwargs): + return dict(loss=self(x)) + + model = Model() + + if multi_optimziers: + optimizer = { + 'model1': + torch.optim.SGD(model.linear.parameters(), lr=0.02, momentum=0.95), + 'model2': + torch.optim.SGD(model.conv.parameters(), lr=0.01, momentum=0.9), + } + else: + optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95) + + tmp_dir = tempfile.mkdtemp() + runner = build_runner( + dict(type=runner_type), + default_args=dict( + model=model, + work_dir=tmp_dir, + optimizer=optimizer, + logger=logging.getLogger(), + max_epochs=max_epochs, + max_iters=max_iters)) + return runner + + +def _build_demo_runner(runner_type='EpochBasedRunner', + max_epochs=1, + max_iters=None, + multi_optimziers=False): + + log_config = dict( + interval=1, hooks=[ + dict(type='TextLoggerHook'), + ]) + + runner = _build_demo_runner_without_hook(runner_type, max_epochs, + max_iters, multi_optimziers) + + runner.register_checkpoint_hook(dict(interval=1)) + runner.register_logger_hooks(log_config) + return runner + + +class ValueCheckHook(Hook): + + def __init__(self, check_dict, by_epoch=False): + super().__init__() + self.check_dict = check_dict + self.by_epoch = by_epoch + + def after_iter(self, runner): + if self.by_epoch: + return + if runner.iter in self.check_dict: + for attr, target in self.check_dict[runner.iter].items(): + value = eval(f'runner.{attr}') + assert np.isclose(value, target), \ + (f'The value of `runner.{attr}` is {value}, ' + f'not equals to {target}') + + def after_epoch(self, runner): + if not self.by_epoch: + return + if runner.epoch in self.check_dict: + for attr, target in self.check_dict[runner.epoch]: + value = eval(f'runner.{attr}') + assert np.isclose(value, target), \ + (f'The value of `runner.{attr}` is {value}, ' + f'not equals to {target}') + + +@pytest.mark.parametrize('multi_optimziers', (True, False)) +def test_cosine_cooldown_hook(multi_optimziers): + """xdoctest -m tests/test_hooks.py test_cosine_runner_hook.""" + loader = DataLoader(torch.ones((10, 2))) + runner = _build_demo_runner(multi_optimziers=multi_optimziers) + + # add momentum LR scheduler + hook_cfg = dict( + type='CosineAnnealingCooldownLrUpdaterHook', + by_epoch=False, + cool_down_time=2, + cool_down_ratio=0.1, + min_lr_ratio=0.1, + warmup_iters=2, + warmup_ratio=0.9) + runner.register_hook_from_cfg(hook_cfg) + runner.register_hook_from_cfg(dict(type='IterTimerHook')) + runner.register_hook(IterTimerHook()) + + if multi_optimziers: + check_hook = ValueCheckHook({ + 0: { + 'current_lr()["model1"][0]': 0.02, + 'current_lr()["model2"][0]': 0.01, + }, + 5: { + 'current_lr()["model1"][0]': 0.0075558491, + 'current_lr()["model2"][0]': 0.0037779246, + }, + 9: { + 'current_lr()["model1"][0]': 0.0002, + 'current_lr()["model2"][0]': 0.0001, + } + }) + else: + check_hook = ValueCheckHook({ + 0: { + 'current_lr()[0]': 0.02, + }, + 5: { + 'current_lr()[0]': 0.0075558491, + }, + 9: { + 'current_lr()[0]': 0.0002, + } + }) + runner.register_hook(check_hook, priority='LOWEST') + + runner.run([loader], [('train', 1)]) + shutil.rmtree(runner.work_dir) diff --git a/tests/test_runtime/test_num_class_hook.py b/tests/test_runtime/test_num_class_hook.py new file mode 100644 index 0000000..fe8fb05 --- /dev/null +++ b/tests/test_runtime/test_num_class_hook.py @@ -0,0 +1,84 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import tempfile +from unittest.mock import MagicMock + +import mmcv.runner as mmcv_runner +import pytest +import torch +from mmcv.runner import obj_from_dict +from torch.utils.data import DataLoader, Dataset + +from mmcls.core.hook import ClassNumCheckHook +from mmcls.models.heads.base_head import BaseHead + + +class ExampleDataset(Dataset): + + def __init__(self, CLASSES): + self.CLASSES = CLASSES + + def __getitem__(self, idx): + results = dict(img=torch.tensor([1]), img_metas=dict()) + return results + + def __len__(self): + return 1 + + +class ExampleHead(BaseHead): + + def __init__(self, init_cfg=None): + super(BaseHead, self).__init__(init_cfg) + self.num_classes = 4 + + def forward_train(self, x, gt_label=None, **kwargs): + pass + + +class ExampleModel(torch.nn.Module): + + def __init__(self): + super(ExampleModel, self).__init__() + self.test_cfg = None + self.conv = torch.nn.Conv2d(3, 3, 3) + self.head = ExampleHead() + + def forward(self, img, img_metas, test_mode=False, **kwargs): + return img + + def train_step(self, data_batch, optimizer): + loss = self.forward(**data_batch) + return dict(loss=loss) + + +@pytest.mark.parametrize('runner_type', + ['EpochBasedRunner', 'IterBasedRunner']) +@pytest.mark.parametrize( + 'CLASSES', [None, ('A', 'B', 'C', 'D', 'E'), ('A', 'B', 'C', 'D')]) +def test_num_class_hook(runner_type, CLASSES): + test_dataset = ExampleDataset(CLASSES) + loader = DataLoader(test_dataset, batch_size=1) + model = ExampleModel() + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + + with tempfile.TemporaryDirectory() as tmpdir: + num_class_hook = ClassNumCheckHook() + logger_mock = MagicMock(spec=logging.Logger) + runner = getattr(mmcv_runner, runner_type)( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logger_mock, + max_epochs=1) + runner.register_hook(num_class_hook) + if CLASSES is None: + runner.run([loader], [('train', 1)], 1) + logger_mock.warning.assert_called() + elif len(CLASSES) != 4: + with pytest.raises(AssertionError): + runner.run([loader], [('train', 1)], 1) + else: + runner.run([loader], [('train', 1)], 1) diff --git a/tests/test_runtime/test_optimizer.py b/tests/test_runtime/test_optimizer.py new file mode 100644 index 0000000..2fdaeb0 --- /dev/null +++ b/tests/test_runtime/test_optimizer.py @@ -0,0 +1,309 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools +from collections import OrderedDict +from copy import deepcopy +from typing import Iterable + +import torch +import torch.nn as nn +from mmcv.runner import build_optimizer +from mmcv.runner.optimizer.builder import OPTIMIZERS +from mmcv.utils.registry import build_from_cfg +from torch.autograd import Variable +from torch.optim.optimizer import Optimizer + +import mmcls.core # noqa: F401 + +base_lr = 0.01 +base_wd = 0.0001 + + +def assert_equal(x, y): + if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor): + torch.testing.assert_allclose(x, y.to(x.device)) + elif isinstance(x, OrderedDict) and isinstance(y, OrderedDict): + for x_value, y_value in zip(x.values(), y.values()): + assert_equal(x_value, y_value) + elif isinstance(x, dict) and isinstance(y, dict): + assert x.keys() == y.keys() + for key in x.keys(): + assert_equal(x[key], y[key]) + elif isinstance(x, str) and isinstance(y, str): + assert x == y + elif isinstance(x, Iterable) and isinstance(y, Iterable): + assert len(x) == len(y) + for x_item, y_item in zip(x, y): + assert_equal(x_item, y_item) + else: + assert x == y + + +class SubModel(nn.Module): + + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(2, 2, kernel_size=1, groups=2) + self.gn = nn.GroupNorm(2, 2) + self.fc = nn.Linear(2, 2) + self.param1 = nn.Parameter(torch.ones(1)) + + def forward(self, x): + return x + + +class ExampleModel(nn.Module): + + def __init__(self): + super().__init__() + self.param1 = nn.Parameter(torch.ones(1)) + self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False) + self.conv2 = nn.Conv2d(4, 2, kernel_size=1) + self.bn = nn.BatchNorm2d(2) + self.sub = SubModel() + self.fc = nn.Linear(2, 1) + + def forward(self, x): + return x + + +def check_lamb_optimizer(optimizer, + model, + bias_lr_mult=1, + bias_decay_mult=1, + norm_decay_mult=1, + dwconv_decay_mult=1): + param_groups = optimizer.param_groups + assert isinstance(optimizer, Optimizer) + assert optimizer.defaults['lr'] == base_lr + assert optimizer.defaults['weight_decay'] == base_wd + model_parameters = list(model.parameters()) + assert len(param_groups) == len(model_parameters) + for i, param in enumerate(model_parameters): + param_group = param_groups[i] + assert torch.equal(param_group['params'][0], param) + # param1 + param1 = param_groups[0] + assert param1['lr'] == base_lr + assert param1['weight_decay'] == base_wd + # conv1.weight + conv1_weight = param_groups[1] + assert conv1_weight['lr'] == base_lr + assert conv1_weight['weight_decay'] == base_wd + # conv2.weight + conv2_weight = param_groups[2] + assert conv2_weight['lr'] == base_lr + assert conv2_weight['weight_decay'] == base_wd + # conv2.bias + conv2_bias = param_groups[3] + assert conv2_bias['lr'] == base_lr * bias_lr_mult + assert conv2_bias['weight_decay'] == base_wd * bias_decay_mult + # bn.weight + bn_weight = param_groups[4] + assert bn_weight['lr'] == base_lr + assert bn_weight['weight_decay'] == base_wd * norm_decay_mult + # bn.bias + bn_bias = param_groups[5] + assert bn_bias['lr'] == base_lr + assert bn_bias['weight_decay'] == base_wd * norm_decay_mult + # sub.param1 + sub_param1 = param_groups[6] + assert sub_param1['lr'] == base_lr + assert sub_param1['weight_decay'] == base_wd + # sub.conv1.weight + sub_conv1_weight = param_groups[7] + assert sub_conv1_weight['lr'] == base_lr + assert sub_conv1_weight['weight_decay'] == base_wd * dwconv_decay_mult + # sub.conv1.bias + sub_conv1_bias = param_groups[8] + assert sub_conv1_bias['lr'] == base_lr * bias_lr_mult + assert sub_conv1_bias['weight_decay'] == base_wd * dwconv_decay_mult + # sub.gn.weight + sub_gn_weight = param_groups[9] + assert sub_gn_weight['lr'] == base_lr + assert sub_gn_weight['weight_decay'] == base_wd * norm_decay_mult + # sub.gn.bias + sub_gn_bias = param_groups[10] + assert sub_gn_bias['lr'] == base_lr + assert sub_gn_bias['weight_decay'] == base_wd * norm_decay_mult + # sub.fc1.weight + sub_fc_weight = param_groups[11] + assert sub_fc_weight['lr'] == base_lr + assert sub_fc_weight['weight_decay'] == base_wd + # sub.fc1.bias + sub_fc_bias = param_groups[12] + assert sub_fc_bias['lr'] == base_lr * bias_lr_mult + assert sub_fc_bias['weight_decay'] == base_wd * bias_decay_mult + # fc1.weight + fc_weight = param_groups[13] + assert fc_weight['lr'] == base_lr + assert fc_weight['weight_decay'] == base_wd + # fc1.bias + fc_bias = param_groups[14] + assert fc_bias['lr'] == base_lr * bias_lr_mult + assert fc_bias['weight_decay'] == base_wd * bias_decay_mult + + +def _test_state_dict(weight, bias, input, constructor): + weight = Variable(weight, requires_grad=True) + bias = Variable(bias, requires_grad=True) + inputs = Variable(input) + + def fn_base(optimizer, weight, bias): + optimizer.zero_grad() + i = input_cuda if weight.is_cuda else inputs + loss = (weight.mv(i) + bias).pow(2).sum() + loss.backward() + return loss + + optimizer = constructor(weight, bias) + fn = functools.partial(fn_base, optimizer, weight, bias) + + # Prime the optimizer + for _ in range(20): + optimizer.step(fn) + # Clone the weights and construct new optimizer for them + weight_c = Variable(weight.data.clone(), requires_grad=True) + bias_c = Variable(bias.data.clone(), requires_grad=True) + optimizer_c = constructor(weight_c, bias_c) + fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c) + # Load state dict + state_dict = deepcopy(optimizer.state_dict()) + state_dict_c = deepcopy(optimizer.state_dict()) + optimizer_c.load_state_dict(state_dict_c) + # Run both optimizations in parallel + for _ in range(20): + optimizer.step(fn) + optimizer_c.step(fn_c) + assert_equal(weight, weight_c) + assert_equal(bias, bias_c) + # Make sure state dict wasn't modified + assert_equal(state_dict, state_dict_c) + # Make sure state dict is deterministic with equal + # but not identical parameters + # NOTE: The state_dict of optimizers in PyTorch 1.5 have random keys, + state_dict = deepcopy(optimizer.state_dict()) + state_dict_c = deepcopy(optimizer_c.state_dict()) + keys = state_dict['param_groups'][-1]['params'] + keys_c = state_dict_c['param_groups'][-1]['params'] + for key, key_c in zip(keys, keys_c): + assert_equal(optimizer.state_dict()['state'][key], + optimizer_c.state_dict()['state'][key_c]) + # Make sure repeated parameters have identical representation in state dict + optimizer_c.param_groups.extend(optimizer_c.param_groups) + assert_equal(optimizer_c.state_dict()['param_groups'][0], + optimizer_c.state_dict()['param_groups'][1]) + + # Check that state dict can be loaded even when we cast parameters + # to a different type and move to a different device. + if not torch.cuda.is_available(): + return + + input_cuda = Variable(inputs.data.float().cuda()) + weight_cuda = Variable(weight.data.float().cuda(), requires_grad=True) + bias_cuda = Variable(bias.data.float().cuda(), requires_grad=True) + optimizer_cuda = constructor(weight_cuda, bias_cuda) + fn_cuda = functools.partial(fn_base, optimizer_cuda, weight_cuda, + bias_cuda) + + state_dict = deepcopy(optimizer.state_dict()) + state_dict_c = deepcopy(optimizer.state_dict()) + optimizer_cuda.load_state_dict(state_dict_c) + + # Make sure state dict wasn't modified + assert_equal(state_dict, state_dict_c) + + for _ in range(20): + optimizer.step(fn) + optimizer_cuda.step(fn_cuda) + assert_equal(weight, weight_cuda) + assert_equal(bias, bias_cuda) + + # validate deepcopy() copies all public attributes + def getPublicAttr(obj): + return set(k for k in obj.__dict__ if not k.startswith('_')) + + assert_equal(getPublicAttr(optimizer), getPublicAttr(deepcopy(optimizer))) + + +def _test_basic_cases_template(weight, bias, inputs, constructor, + scheduler_constructors): + """Copied from PyTorch.""" + weight = Variable(weight, requires_grad=True) + bias = Variable(bias, requires_grad=True) + inputs = Variable(inputs) + optimizer = constructor(weight, bias) + schedulers = [] + for scheduler_constructor in scheduler_constructors: + schedulers.append(scheduler_constructor(optimizer)) + + # to check if the optimizer can be printed as a string + optimizer.__repr__() + + def fn(): + optimizer.zero_grad() + y = weight.mv(inputs) + if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device(): + y = y.cuda(bias.get_device()) + loss = (y + bias).pow(2).sum() + loss.backward() + return loss + + initial_value = fn().item() + for _ in range(200): + for scheduler in schedulers: + scheduler.step() + optimizer.step(fn) + + assert fn().item() < initial_value + + +def _test_basic_cases(constructor, + scheduler_constructors=None, + ignore_multidevice=False): + """Copied from PyTorch.""" + if scheduler_constructors is None: + scheduler_constructors = [] + _test_state_dict( + torch.randn(10, 5), torch.randn(10), torch.randn(5), constructor) + _test_basic_cases_template( + torch.randn(10, 5), torch.randn(10), torch.randn(5), constructor, + scheduler_constructors) + # non-contiguous parameters + _test_basic_cases_template( + torch.randn(10, 5, 2)[..., 0], + torch.randn(10, 2)[..., 0], torch.randn(5), constructor, + scheduler_constructors) + # CUDA + if not torch.cuda.is_available(): + return + _test_basic_cases_template( + torch.randn(10, 5).cuda(), + torch.randn(10).cuda(), + torch.randn(5).cuda(), constructor, scheduler_constructors) + # Multi-GPU + if not torch.cuda.device_count() > 1 or ignore_multidevice: + return + _test_basic_cases_template( + torch.randn(10, 5).cuda(0), + torch.randn(10).cuda(1), + torch.randn(5).cuda(0), constructor, scheduler_constructors) + + +def test_lamb_optimizer(): + model = ExampleModel() + optimizer_cfg = dict( + type='Lamb', + lr=base_lr, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=base_wd, + paramwise_cfg=dict( + bias_lr_mult=2, + bias_decay_mult=0.5, + norm_decay_mult=0, + dwconv_decay_mult=0.1)) + optimizer = build_optimizer(model, optimizer_cfg) + check_lamb_optimizer(optimizer, model, **optimizer_cfg['paramwise_cfg']) + + _test_basic_cases(lambda weight, bias: build_from_cfg( + dict(type='Lamb', params=[weight, bias], lr=base_lr), OPTIMIZERS)) diff --git a/tests/test_runtime/test_preciseBN_hook.py b/tests/test_runtime/test_preciseBN_hook.py new file mode 100644 index 0000000..f9375f9 --- /dev/null +++ b/tests/test_runtime/test_preciseBN_hook.py @@ -0,0 +1,274 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch +import torch.nn as nn +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import EpochBasedRunner, IterBasedRunner, build_optimizer +from mmcv.utils import get_logger +from mmcv.utils.logging import print_log +from torch.utils.data import DataLoader, Dataset + +from mmcls.core.hook import PreciseBNHook +from mmcls.models.classifiers import BaseClassifier + + +class ExampleDataset(Dataset): + + def __init__(self): + self.index = 0 + + def __getitem__(self, idx): + results = dict(imgs=torch.tensor([1.0], dtype=torch.float32)) + return results + + def __len__(self): + return 1 + + +class BiggerDataset(ExampleDataset): + + def __init__(self, fixed_values=range(0, 12)): + assert len(self) == len(fixed_values) + self.fixed_values = fixed_values + + def __getitem__(self, idx): + results = dict( + imgs=torch.tensor([self.fixed_values[idx]], dtype=torch.float32)) + return results + + def __len__(self): + # a bigger dataset + return 12 + + +class ExampleModel(BaseClassifier): + + def __init__(self): + super().__init__() + self.conv = nn.Linear(1, 1) + self.bn = nn.BatchNorm1d(1) + self.test_cfg = None + + def forward(self, imgs, return_loss=False): + return self.bn(self.conv(imgs)) + + def simple_test(self, img, img_metas=None, **kwargs): + return {} + + def extract_feat(self, img, stage='neck'): + return () + + def forward_train(self, img, gt_label, **kwargs): + return {'loss': 0.5} + + def train_step(self, data_batch, optimizer=None, **kwargs): + self.forward(**data_batch) + outputs = { + 'loss': 0.5, + 'log_vars': { + 'accuracy': 0.98 + }, + 'num_samples': 1 + } + return outputs + + +class SingleBNModel(ExampleModel): + + def __init__(self): + super().__init__() + self.bn = nn.BatchNorm1d(1) + self.test_cfg = None + + def forward(self, imgs, return_loss=False): + return self.bn(imgs) + + +class GNExampleModel(ExampleModel): + + def __init__(self): + super().__init__() + self.conv = nn.Linear(1, 1) + self.bn = nn.GroupNorm(1, 1) + self.test_cfg = None + + +class NoBNExampleModel(ExampleModel): + + def __init__(self): + super().__init__() + self.conv = nn.Linear(1, 1) + self.test_cfg = None + + def forward(self, imgs, return_loss=False): + return self.conv(imgs) + + +def test_precise_bn(): + optimizer_cfg = dict( + type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) + + test_dataset = ExampleDataset() + loader = DataLoader(test_dataset, batch_size=2) + model = ExampleModel() + optimizer = build_optimizer(model, optimizer_cfg) + logger = get_logger('precise_bn') + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + logger=logger, + max_epochs=1) + + with pytest.raises(AssertionError): + # num_samples must be larger than 0 + precise_bn_hook = PreciseBNHook(num_samples=-1) + runner.register_hook(precise_bn_hook) + runner.run([loader], [('train', 1)]) + + with pytest.raises(AssertionError): + # interval must be larger than 0 + precise_bn_hook = PreciseBNHook(interval=0) + runner.register_hook(precise_bn_hook) + runner.run([loader], [('train', 1)]) + + with pytest.raises(AssertionError): + # interval must be larger than 0 + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + logger=logger, + max_epochs=1) + precise_bn_hook = PreciseBNHook(interval=0) + runner.register_hook(precise_bn_hook) + runner.run([loader], [('train', 1)]) + + with pytest.raises(AssertionError): + # only support EpochBaseRunner + runner = IterBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + logger=logger, + max_epochs=1) + precise_bn_hook = PreciseBNHook(interval=2) + runner.register_hook(precise_bn_hook) + print_log(runner) + runner.run([loader], [('train', 1)]) + + # test non-DDP model + test_bigger_dataset = BiggerDataset() + loader = DataLoader(test_bigger_dataset, batch_size=2) + loaders = [loader] + precise_bn_hook = PreciseBNHook(num_samples=4) + assert precise_bn_hook.num_samples == 4 + assert precise_bn_hook.interval == 1 + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + logger=logger, + max_epochs=1) + runner.register_hook(precise_bn_hook) + runner.run(loaders, [('train', 1)]) + + # test DP model + test_bigger_dataset = BiggerDataset() + loader = DataLoader(test_bigger_dataset, batch_size=2) + loaders = [loader] + precise_bn_hook = PreciseBNHook(num_samples=4) + assert precise_bn_hook.num_samples == 4 + assert precise_bn_hook.interval == 1 + model = MMDataParallel(model) + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + logger=logger, + max_epochs=1) + runner.register_hook(precise_bn_hook) + runner.run(loaders, [('train', 1)]) + + # test model w/ gn layer + loader = DataLoader(test_bigger_dataset, batch_size=2) + loaders = [loader] + precise_bn_hook = PreciseBNHook(num_samples=4) + assert precise_bn_hook.num_samples == 4 + assert precise_bn_hook.interval == 1 + model = GNExampleModel() + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + logger=logger, + max_epochs=1) + runner.register_hook(precise_bn_hook) + runner.run(loaders, [('train', 1)]) + + # test model without bn layer + loader = DataLoader(test_bigger_dataset, batch_size=2) + loaders = [loader] + precise_bn_hook = PreciseBNHook(num_samples=4) + assert precise_bn_hook.num_samples == 4 + assert precise_bn_hook.interval == 1 + model = NoBNExampleModel() + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + logger=logger, + max_epochs=1) + runner.register_hook(precise_bn_hook) + runner.run(loaders, [('train', 1)]) + + # test how precise it is + loader = DataLoader(test_bigger_dataset, batch_size=2) + loaders = [loader] + precise_bn_hook = PreciseBNHook(num_samples=12) + assert precise_bn_hook.num_samples == 12 + assert precise_bn_hook.interval == 1 + model = SingleBNModel() + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + logger=logger, + max_epochs=1) + runner.register_hook(precise_bn_hook) + runner.run(loaders, [('train', 1)]) + imgs_list = list() + for loader in loaders: + for i, data in enumerate(loader): + imgs_list.append(np.array(data['imgs'])) + mean = np.mean([np.mean(batch) for batch in imgs_list]) + # bassel correction used in Pytorch, therefore ddof=1 + var = np.mean([np.var(batch, ddof=1) for batch in imgs_list]) + assert np.equal(mean, model.bn.running_mean) + assert np.equal(var, model.bn.running_var) + + @pytest.mark.skipif( + not torch.cuda.is_available(), reason='requires CUDA support') + def test_ddp_model_precise_bn(): + # test DDP model + test_bigger_dataset = BiggerDataset() + loader = DataLoader(test_bigger_dataset, batch_size=2) + loaders = [loader] + precise_bn_hook = PreciseBNHook(num_samples=5) + assert precise_bn_hook.num_samples == 5 + assert precise_bn_hook.interval == 1 + model = ExampleModel() + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + find_unused_parameters=True) + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + logger=logger, + max_epochs=1) + runner.register_hook(precise_bn_hook) + runner.run(loaders, [('train', 1)]) diff --git a/tests/test_utils/test_device.py b/tests/test_utils/test_device.py new file mode 100644 index 0000000..eb10bb2 --- /dev/null +++ b/tests/test_utils/test_device.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase +from unittest.mock import patch + +import mmcv + +from mmcls.utils import auto_select_device + + +class TestAutoSelectDevice(TestCase): + + @patch.object(mmcv, '__version__', '1.6.0') + @patch('mmcv.device.get_device', create=True) + def test_mmcv(self, mock): + auto_select_device() + mock.assert_called_once() + + @patch.object(mmcv, '__version__', '1.5.0') + @patch('torch.cuda.is_available', return_value=True) + def test_cuda(self, mock): + device = auto_select_device() + self.assertEqual(device, 'cuda') + + @patch.object(mmcv, '__version__', '1.5.0') + @patch('torch.cuda.is_available', return_value=False) + def test_cpu(self, mock): + device = auto_select_device() + self.assertEqual(device, 'cpu') diff --git a/tests/test_utils/test_logger.py b/tests/test_utils/test_logger.py new file mode 100644 index 0000000..97a6fb0 --- /dev/null +++ b/tests/test_utils/test_logger.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import tempfile + +import mmcv.utils.logging + +from mmcls.utils import get_root_logger, load_json_log + + +def test_get_root_logger(): + # Reset the initialized log + mmcv.utils.logging.logger_initialized = {} + with tempfile.TemporaryDirectory() as tmpdirname: + log_path = osp.join(tmpdirname, 'test.log') + + logger = get_root_logger(log_file=log_path) + message1 = 'adhsuadghj' + logger.info(message1) + + logger2 = get_root_logger() + message2 = 'm,tkrgmkr' + logger2.info(message2) + + with open(log_path, 'r') as f: + lines = f.readlines() + assert message1 in lines[0] + assert message2 in lines[1] + + assert logger is logger2 + + handlers = list(logger.handlers) + for handler in handlers: + handler.close() + logger.removeHandler(handler) + os.remove(log_path) + + +def test_load_json_log(): + log_path = 'tests/data/test.logjson' + log_dict = load_json_log(log_path) + + # test log_dict + assert set(log_dict.keys()) == set([1, 2, 3]) + + # test epoch dict in log_dict + assert set(log_dict[1].keys()) == set( + ['iter', 'lr', 'memory', 'data_time', 'time', 'mode']) + assert isinstance(log_dict[1]['lr'], list) + assert len(log_dict[1]['iter']) == 4 + assert len(log_dict[1]['lr']) == 4 + assert len(log_dict[2]['iter']) == 3 + assert len(log_dict[2]['lr']) == 3 + assert log_dict[3]['iter'] == [10, 20] + assert log_dict[3]['lr'] == [0.33305, 0.34759] diff --git a/tests/test_utils/test_setup_env.py b/tests/test_utils/test_setup_env.py new file mode 100644 index 0000000..2679dbb --- /dev/null +++ b/tests/test_utils/test_setup_env.py @@ -0,0 +1,68 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import multiprocessing as mp +import os +import platform + +import cv2 +from mmcv import Config + +from mmcls.utils import setup_multi_processes + + +def test_setup_multi_processes(): + # temp save system setting + sys_start_mehod = mp.get_start_method(allow_none=True) + sys_cv_threads = cv2.getNumThreads() + # pop and temp save system env vars + sys_omp_threads = os.environ.pop('OMP_NUM_THREADS', default=None) + sys_mkl_threads = os.environ.pop('MKL_NUM_THREADS', default=None) + + # test config without setting env + config = dict(data=dict(workers_per_gpu=2)) + cfg = Config(config) + setup_multi_processes(cfg) + assert os.getenv('OMP_NUM_THREADS') == '1' + assert os.getenv('MKL_NUM_THREADS') == '1' + # when set to 0, the num threads will be 1 + assert cv2.getNumThreads() == 1 + if platform.system() != 'Windows': + assert mp.get_start_method() == 'fork' + + # test num workers <= 1 + os.environ.pop('OMP_NUM_THREADS') + os.environ.pop('MKL_NUM_THREADS') + config = dict(data=dict(workers_per_gpu=0)) + cfg = Config(config) + setup_multi_processes(cfg) + assert 'OMP_NUM_THREADS' not in os.environ + assert 'MKL_NUM_THREADS' not in os.environ + + # test manually set env var + os.environ['OMP_NUM_THREADS'] = '4' + config = dict(data=dict(workers_per_gpu=2)) + cfg = Config(config) + setup_multi_processes(cfg) + assert os.getenv('OMP_NUM_THREADS') == '4' + + # test manually set opencv threads and mp start method + config = dict( + data=dict(workers_per_gpu=2), + opencv_num_threads=4, + mp_start_method='spawn') + cfg = Config(config) + setup_multi_processes(cfg) + assert cv2.getNumThreads() == 4 + assert mp.get_start_method() == 'spawn' + + # revert setting to avoid affecting other programs + if sys_start_mehod: + mp.set_start_method(sys_start_mehod, force=True) + cv2.setNumThreads(sys_cv_threads) + if sys_omp_threads: + os.environ['OMP_NUM_THREADS'] = sys_omp_threads + else: + os.environ.pop('OMP_NUM_THREADS') + if sys_mkl_threads: + os.environ['MKL_NUM_THREADS'] = sys_mkl_threads + else: + os.environ.pop('MKL_NUM_THREADS') diff --git a/tests/test_utils/test_version_utils.py b/tests/test_utils/test_version_utils.py new file mode 100644 index 0000000..f4bb389 --- /dev/null +++ b/tests/test_utils/test_version_utils.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcls import digit_version + + +def test_digit_version(): + assert digit_version('0.2.16') == (0, 2, 16, 0, 0, 0) + assert digit_version('1.2.3') == (1, 2, 3, 0, 0, 0) + assert digit_version('1.2.3rc0') == (1, 2, 3, 0, -1, 0) + assert digit_version('1.2.3rc1') == (1, 2, 3, 0, -1, 1) + assert digit_version('1.0rc0') == (1, 0, 0, 0, -1, 0) + assert digit_version('1.0') == digit_version('1.0.0') + assert digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5') + assert digit_version('1.0.0dev') < digit_version('1.0.0a') + assert digit_version('1.0.0a') < digit_version('1.0.0a1') + assert digit_version('1.0.0a') < digit_version('1.0.0b') + assert digit_version('1.0.0b') < digit_version('1.0.0rc') + assert digit_version('1.0.0rc1') < digit_version('1.0.0') + assert digit_version('1.0.0') < digit_version('1.0.0post') + assert digit_version('1.0.0post') < digit_version('1.0.0post1') + assert digit_version('v1') == (1, 0, 0, 0, 0, 0) + assert digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0) diff --git a/tests/test_utils/test_visualization.py b/tests/test_utils/test_visualization.py new file mode 100644 index 0000000..1bc4c2b --- /dev/null +++ b/tests/test_utils/test_visualization.py @@ -0,0 +1,100 @@ +# Copyright (c) Open-MMLab. All rights reserved. +import os +import os.path as osp +import tempfile +from unittest.mock import MagicMock + +import matplotlib.pyplot as plt +import mmcv +import numpy as np +import pytest + +from mmcls.core import visualization as vis + + +def test_color(): + assert vis.color_val_matplotlib(mmcv.Color.blue) == (0., 0., 1.) + assert vis.color_val_matplotlib('green') == (0., 1., 0.) + assert vis.color_val_matplotlib((1, 2, 3)) == (3 / 255, 2 / 255, 1 / 255) + assert vis.color_val_matplotlib(100) == (100 / 255, 100 / 255, 100 / 255) + assert vis.color_val_matplotlib(np.zeros(3, dtype=int)) == (0., 0., 0.) + # forbid white color + with pytest.raises(TypeError): + vis.color_val_matplotlib([255, 255, 255]) + # forbid float + with pytest.raises(TypeError): + vis.color_val_matplotlib(1.0) + # overflowed + with pytest.raises(AssertionError): + vis.color_val_matplotlib((0, 0, 500)) + + +def test_imshow_infos(): + tmp_dir = osp.join(tempfile.gettempdir(), 'image_infos') + tmp_filename = osp.join(tmp_dir, 'image.jpg') + + image = np.ones((10, 10, 3), np.uint8) + result = {'pred_label': 1, 'pred_class': 'bird', 'pred_score': 0.98} + out_image = vis.imshow_infos( + image, result, out_file=tmp_filename, show=False) + assert osp.isfile(tmp_filename) + assert image.shape == out_image.shape + assert not np.allclose(image, out_image) + os.remove(tmp_filename) + + # test grayscale images + image = np.ones((10, 10), np.uint8) + result = {'pred_label': 1, 'pred_class': 'bird', 'pred_score': 0.98} + out_image = vis.imshow_infos( + image, result, out_file=tmp_filename, show=False) + assert osp.isfile(tmp_filename) + assert image.shape == out_image.shape[:2] + os.remove(tmp_filename) + + +def test_figure_context_manager(): + # test show multiple images with the same figure. + images = [ + np.random.randint(0, 255, (100, 100, 3), np.uint8) for _ in range(5) + ] + result = {'pred_label': 1, 'pred_class': 'bird', 'pred_score': 0.98} + + with vis.ImshowInfosContextManager() as manager: + fig_show = manager.fig_show + fig_save = manager.fig_save + + # Test time out + fig_show.canvas.start_event_loop = MagicMock() + fig_show.canvas.end_event_loop = MagicMock() + for image in images: + ret, out_image = manager.put_img_infos(image, result, show=True) + assert ret == 0 + assert image.shape == out_image.shape + assert not np.allclose(image, out_image) + assert fig_show is manager.fig_show + assert fig_save is manager.fig_save + + # Test continue key + fig_show.canvas.start_event_loop = ( + lambda _: fig_show.canvas.key_press_event(' ')) + for image in images: + ret, out_image = manager.put_img_infos(image, result, show=True) + assert ret == 0 + assert image.shape == out_image.shape + assert not np.allclose(image, out_image) + assert fig_show is manager.fig_show + assert fig_save is manager.fig_save + + # Test close figure manually + fig_show = manager.fig_show + + def destroy(*_, **__): + fig_show.canvas.close_event() + plt.close(fig_show) + + fig_show.canvas.start_event_loop = destroy + ret, out_image = manager.put_img_infos(images[0], result, show=True) + assert ret == 1 + assert image.shape == out_image.shape + assert not np.allclose(image, out_image) + assert fig_save is manager.fig_save diff --git a/tools/analysis_tools/analyze_logs.py b/tools/analysis_tools/analyze_logs.py new file mode 100644 index 0000000..b8623ae --- /dev/null +++ b/tools/analysis_tools/analyze_logs.py @@ -0,0 +1,215 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import re + +import matplotlib.pyplot as plt +import numpy as np + +from mmcls.utils import load_json_log + +TEST_METRICS = ('precision', 'recall', 'f1_score', 'support', 'mAP', 'CP', + 'CR', 'CF1', 'OP', 'OR', 'OF1', 'accuracy') + + +def cal_train_time(log_dicts, args): + """Compute the average time per training iteration.""" + for i, log_dict in enumerate(log_dicts): + print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') + all_times = [] + for epoch in log_dict.keys(): + if args.include_outliers: + all_times.append(log_dict[epoch]['time']) + else: + all_times.append(log_dict[epoch]['time'][1:]) + all_times = np.array(all_times) + epoch_ave_time = all_times.mean(-1) + slowest_epoch = epoch_ave_time.argmax() + fastest_epoch = epoch_ave_time.argmin() + std_over_epoch = epoch_ave_time.std() + print(f'slowest epoch {slowest_epoch + 1}, ' + f'average time is {epoch_ave_time[slowest_epoch]:.4f}') + print(f'fastest epoch {fastest_epoch + 1}, ' + f'average time is {epoch_ave_time[fastest_epoch]:.4f}') + print(f'time std over epochs is {std_over_epoch:.4f}') + print(f'average iter time: {np.mean(all_times):.4f} s/iter') + print() + + +def get_legends(args): + """if legend is None, use {filename}_{key} as legend.""" + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + # remove '.json' in the end of log names + basename = os.path.basename(json_log)[:-5] + if basename.endswith('.log'): + basename = basename[:-4] + legend.append(f'{basename}_{metric}') + assert len(legend) == (len(args.json_logs) * len(args.keys)) + return legend + + +def plot_phase_train(metric, log_dict, epochs, curve_label, json_log): + """plot phase of train cruve.""" + if metric not in log_dict[epochs[0]]: + raise KeyError(f'{json_log} does not contain metric {metric}' + f' in train mode') + xs, ys = [], [] + for epoch in epochs: + iters = log_dict[epoch]['iter'] + if log_dict[epoch]['mode'][-1] == 'val': + iters = iters[:-1] + num_iters_per_epoch = iters[-1] + assert len(iters) > 0, ( + 'The training log is empty, please try to reduce the ' + 'interval of log in config file.') + xs.append(np.array(iters) / num_iters_per_epoch + (epoch - 1)) + ys.append(np.array(log_dict[epoch][metric][:len(iters)])) + xs = np.concatenate(xs) + ys = np.concatenate(ys) + plt.xlabel('Epochs') + plt.plot(xs, ys, label=curve_label, linewidth=0.75) + + +def plot_phase_val(metric, log_dict, epochs, curve_label, json_log): + """plot phase of val cruves.""" + # some epoch may not have evaluation. as [(train, 5),(val, 1)] + xs = [e for e in epochs if metric in log_dict[e]] + ys = [log_dict[e][metric] for e in xs if metric in log_dict[e]] + assert len(xs) > 0, (f'{json_log} does not contain metric {metric}') + plt.xlabel('Epochs') + plt.plot(xs, ys, label=curve_label, linewidth=0.75) + + +def plot_curve_helper(log_dicts, metrics, args, legend): + """plot curves from log_dicts by metrics.""" + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + epochs = list(log_dict.keys()) + for j, metric in enumerate(metrics): + json_log = args.json_logs[i] + print(f'plot curve of {json_log}, metric is {metric}') + curve_label = legend[i * num_metrics + j] + if any(m in metric for m in TEST_METRICS): + plot_phase_val(metric, log_dict, epochs, curve_label, json_log) + else: + plot_phase_train(metric, log_dict, epochs, curve_label, + json_log) + plt.legend() + + +def plot_curve(log_dicts, args): + """Plot train metric-iter graph.""" + # set backend and style + if args.backend is not None: + plt.switch_backend(args.backend) + try: + import seaborn as sns + sns.set_style(args.style) + except ImportError: + print("Attention: The plot style won't be applied because 'seaborn' " + 'package is not installed, please install it if you want better ' + 'show style.') + + # set plot window size + wind_w, wind_h = args.window_size.split('*') + wind_w, wind_h = int(wind_w), int(wind_h) + plt.figure(figsize=(wind_w, wind_h)) + + # get legends and metrics + legends = get_legends(args) + metrics = args.keys + + # plot curves from log_dicts by metrics + plot_curve_helper(log_dicts, metrics, args, legends) + + # set title and show or save + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print(f'save curve to: {args.out}') + plt.savefig(args.out) + plt.cla() + + +def add_plot_parser(subparsers): + parser_plt = subparsers.add_parser( + 'plot_curve', help='parser for plotting curves') + parser_plt.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_plt.add_argument( + '--keys', + type=str, + nargs='+', + default=['loss'], + help='the metric that you want to plot') + parser_plt.add_argument('--title', type=str, help='title of figure') + parser_plt.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser_plt.add_argument( + '--backend', type=str, default=None, help='backend of plt') + parser_plt.add_argument( + '--style', type=str, default='whitegrid', help='style of plt') + parser_plt.add_argument('--out', type=str, default=None) + parser_plt.add_argument( + '--window-size', + default='12*7', + help='size of the window to display images, in format of "$W*$H".') + + +def add_time_parser(subparsers): + parser_time = subparsers.add_parser( + 'cal_train_time', + help='parser for computing the average time per training iteration') + parser_time.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_time.add_argument( + '--include-outliers', + action='store_true', + help='include the first value of every epoch when computing ' + 'the average time') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + # currently only support plot curve and calculate average train time + subparsers = parser.add_subparsers(dest='task', help='task parser') + add_plot_parser(subparsers) + add_time_parser(subparsers) + args = parser.parse_args() + + if hasattr(args, 'window_size') and args.window_size != '': + assert re.match(r'\d+\*\d+', args.window_size), \ + "'window-size' must be in format 'W*H'." + return args + + +def main(): + args = parse_args() + + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + + log_dicts = [load_json_log(json_log) for json_log in json_logs] + + eval(args.task)(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/analyze_results.py b/tools/analysis_tools/analyze_results.py new file mode 100644 index 0000000..82555ad --- /dev/null +++ b/tools/analysis_tools/analyze_results.py @@ -0,0 +1,115 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +import mmcv +from mmcv import DictAction + +from mmcls.datasets import build_dataset +from mmcls.models import build_classifier + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMCls evaluate prediction success/fail') + parser.add_argument('config', help='test config file path') + parser.add_argument('result', help='test result json/pkl file') + parser.add_argument('--out-dir', help='dir to store output files') + parser.add_argument( + '--topk', + default=20, + type=int, + help='Number of images to select for success/fail') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + + return args + + +def save_imgs(result_dir, folder_name, results, model): + full_dir = osp.join(result_dir, folder_name) + mmcv.mkdir_or_exist(full_dir) + mmcv.dump(results, osp.join(full_dir, folder_name + '.json')) + + # save imgs + show_keys = ['pred_score', 'pred_class', 'gt_class'] + for result in results: + result_show = dict((k, v) for k, v in result.items() if k in show_keys) + outfile = osp.join(full_dir, osp.basename(result['filename'])) + model.show_result(result['filename'], result_show, out_file=outfile) + + +def main(): + args = parse_args() + + # load test results + outputs = mmcv.load(args.result) + assert ('pred_score' in outputs and 'pred_class' in outputs + and 'pred_label' in outputs), \ + 'No "pred_label", "pred_score" or "pred_class" in result file, ' \ + 'please set "--out-items" in test.py' + + cfg = mmcv.Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + model = build_classifier(cfg.model) + + # build the dataloader + dataset = build_dataset(cfg.data.test) + filenames = list() + for info in dataset.data_infos: + if info['img_prefix'] is not None: + filename = osp.join(info['img_prefix'], + info['img_info']['filename']) + else: + filename = info['img_info']['filename'] + filenames.append(filename) + gt_labels = list(dataset.get_gt_labels()) + gt_classes = [dataset.CLASSES[x] for x in gt_labels] + + outputs['filename'] = filenames + outputs['gt_label'] = gt_labels + outputs['gt_class'] = gt_classes + + need_keys = [ + 'filename', 'gt_label', 'gt_class', 'pred_score', 'pred_label', + 'pred_class' + ] + outputs = {k: v for k, v in outputs.items() if k in need_keys} + outputs_list = list() + for i in range(len(gt_labels)): + output = dict() + for k in outputs.keys(): + output[k] = outputs[k][i] + outputs_list.append(output) + + # sort result + outputs_list = sorted(outputs_list, key=lambda x: x['pred_score']) + + success = list() + fail = list() + for output in outputs_list: + if output['pred_label'] == output['gt_label']: + success.append(output) + else: + fail.append(output) + + success = success[:args.topk] + fail = fail[:args.topk] + + save_imgs(args.out_dir, 'success', success, model) + save_imgs(args.out_dir, 'fail', fail, model) + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/eval_metric.py b/tools/analysis_tools/eval_metric.py new file mode 100644 index 0000000..1c95dbc --- /dev/null +++ b/tools/analysis_tools/eval_metric.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import mmcv +from mmcv import Config, DictAction + +from mmcls.datasets import build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser(description='Evaluate metric of the ' + 'results saved in pkl format') + parser.add_argument('config', help='Config of the model') + parser.add_argument('pkl_results', help='Results in pickle format') + parser.add_argument( + '--metrics', + type=str, + nargs='+', + help='Evaluation metrics, which depends on the dataset, e.g., ' + '"accuracy", "precision", "recall" and "support".') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--metric-options', + nargs='+', + action=DictAction, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be kwargs for dataset.evaluate() function') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + outputs = mmcv.load(args.pkl_results) + assert 'class_scores' in outputs, \ + 'No "class_scores" in result file, please set "--out-items" in test.py' + + cfg = Config.fromfile(args.config) + assert args.metrics, ( + 'Please specify at least one metric the argument "--metrics".') + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + cfg.data.test.test_mode = True + + dataset = build_dataset(cfg.data.test) + pred_score = outputs['class_scores'] + + eval_kwargs = cfg.get('evaluation', {}).copy() + # hard-code way to remove EvalHook args + for key in [ + 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule' + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update( + dict(metric=args.metrics, metric_options=args.metric_options)) + print(dataset.evaluate(pred_score, **eval_kwargs)) + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/get_flops.py b/tools/analysis_tools/get_flops.py new file mode 100644 index 0000000..45a8785 --- /dev/null +++ b/tools/analysis_tools/get_flops.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +from mmcv import Config +from mmcv.cnn.utils import get_model_complexity_info + +from mmcls.models import build_classifier + + +def parse_args(): + parser = argparse.ArgumentParser(description='Get model flops and params') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[224, 224], + help='input image size') + args = parser.parse_args() + return args + + +def main(): + + args = parse_args() + + if len(args.shape) == 1: + input_shape = (3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = (3, ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + cfg = Config.fromfile(args.config) + model = build_classifier(cfg.model) + model.eval() + + if hasattr(model, 'forward_dummy'): + model.forward = model.forward_dummy + else: + raise NotImplementedError( + 'FLOPs counter is currently not currently supported with {}'. + format(model.__class__.__name__)) + + flops, params = get_model_complexity_info(model, input_shape) + split_line = '=' * 30 + print(f'{split_line}\nInput shape: {input_shape}\n' + f'Flops: {flops}\nParams: {params}\n{split_line}') + print('!!!Please be cautious if you use the results in papers. ' + 'You may need to check if all ops are supported and verify that the ' + 'flops computation is correct.') + + +if __name__ == '__main__': + main() diff --git a/tools/convert_models/efficientnet_to_mmcls.py b/tools/convert_models/efficientnet_to_mmcls.py new file mode 100644 index 0000000..d1b097b --- /dev/null +++ b/tools/convert_models/efficientnet_to_mmcls.py @@ -0,0 +1,215 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os + +import numpy as np +import torch +from mmcv.runner import Sequential +from tensorflow.python.training import py_checkpoint_reader + +from mmcls.models.backbones.efficientnet import EfficientNet + + +def tf2pth(v): + if v.ndim == 4: + return np.ascontiguousarray(v.transpose(3, 2, 0, 1)) + elif v.ndim == 2: + return np.ascontiguousarray(v.transpose()) + return v + + +def read_ckpt(ckpt): + reader = py_checkpoint_reader.NewCheckpointReader(ckpt) + weights = { + n: torch.as_tensor(tf2pth(reader.get_tensor(n))) + for (n, _) in reader.get_variable_to_shape_map().items() + } + return weights + + +def map_key(weight): + m = dict() + has_expand_conv = set() + is_MBConv = set() + max_idx = 0 + name = None + for k, v in weight.items(): + seg = k.split('/') + if len(seg) == 1: + continue + if 'edgetpu' in seg[0]: + name = 'e' + seg[0][21:].lower() + else: + name = seg[0][13:] + if seg[2] == 'tpu_batch_normalization_2': + has_expand_conv.add(seg[1]) + if seg[1].startswith('blocks_'): + idx = int(seg[1][7:]) + 1 + max_idx = max(max_idx, idx) + if 'depthwise' in k: + is_MBConv.add(seg[1]) + + model = EfficientNet(name) + idx2key = [] + for idx, module in enumerate(model.layers): + if isinstance(module, Sequential): + for j in range(len(module)): + idx2key.append('{}.{}'.format(idx, j)) + else: + idx2key.append('{}'.format(idx)) + + for k, v in weight.items(): + + if 'Exponential' in k or 'RMS' in k: + continue + + seg = k.split('/') + if len(seg) == 1: + continue + if seg[2] == 'depthwise_conv2d': + v = v.transpose(1, 0) + + if seg[1] == 'stem': + prefix = 'backbone.layers.{}'.format(idx2key[0]) + mapping = { + 'conv2d/kernel': 'conv.weight', + 'tpu_batch_normalization/beta': 'bn.bias', + 'tpu_batch_normalization/gamma': 'bn.weight', + 'tpu_batch_normalization/moving_mean': 'bn.running_mean', + 'tpu_batch_normalization/moving_variance': 'bn.running_var', + } + suffix = mapping['/'.join(seg[2:])] + m[prefix + '.' + suffix] = v + + elif seg[1].startswith('blocks_'): + idx = int(seg[1][7:]) + 1 + prefix = '.'.join(['backbone', 'layers', idx2key[idx]]) + if seg[1] not in is_MBConv: + mapping = { + 'conv2d/kernel': + 'conv1.conv.weight', + 'tpu_batch_normalization/gamma': + 'conv1.bn.weight', + 'tpu_batch_normalization/beta': + 'conv1.bn.bias', + 'tpu_batch_normalization/moving_mean': + 'conv1.bn.running_mean', + 'tpu_batch_normalization/moving_variance': + 'conv1.bn.running_var', + 'conv2d_1/kernel': + 'conv2.conv.weight', + 'tpu_batch_normalization_1/gamma': + 'conv2.bn.weight', + 'tpu_batch_normalization_1/beta': + 'conv2.bn.bias', + 'tpu_batch_normalization_1/moving_mean': + 'conv2.bn.running_mean', + 'tpu_batch_normalization_1/moving_variance': + 'conv2.bn.running_var', + } + else: + + base_mapping = { + 'depthwise_conv2d/depthwise_kernel': + 'depthwise_conv.conv.weight', + 'se/conv2d/kernel': 'se.conv1.conv.weight', + 'se/conv2d/bias': 'se.conv1.conv.bias', + 'se/conv2d_1/kernel': 'se.conv2.conv.weight', + 'se/conv2d_1/bias': 'se.conv2.conv.bias' + } + + if seg[1] not in has_expand_conv: + mapping = { + 'conv2d/kernel': + 'linear_conv.conv.weight', + 'tpu_batch_normalization/beta': + 'depthwise_conv.bn.bias', + 'tpu_batch_normalization/gamma': + 'depthwise_conv.bn.weight', + 'tpu_batch_normalization/moving_mean': + 'depthwise_conv.bn.running_mean', + 'tpu_batch_normalization/moving_variance': + 'depthwise_conv.bn.running_var', + 'tpu_batch_normalization_1/beta': + 'linear_conv.bn.bias', + 'tpu_batch_normalization_1/gamma': + 'linear_conv.bn.weight', + 'tpu_batch_normalization_1/moving_mean': + 'linear_conv.bn.running_mean', + 'tpu_batch_normalization_1/moving_variance': + 'linear_conv.bn.running_var', + } + else: + mapping = { + 'depthwise_conv2d/depthwise_kernel': + 'depthwise_conv.conv.weight', + 'conv2d/kernel': + 'expand_conv.conv.weight', + 'conv2d_1/kernel': + 'linear_conv.conv.weight', + 'tpu_batch_normalization/beta': + 'expand_conv.bn.bias', + 'tpu_batch_normalization/gamma': + 'expand_conv.bn.weight', + 'tpu_batch_normalization/moving_mean': + 'expand_conv.bn.running_mean', + 'tpu_batch_normalization/moving_variance': + 'expand_conv.bn.running_var', + 'tpu_batch_normalization_1/beta': + 'depthwise_conv.bn.bias', + 'tpu_batch_normalization_1/gamma': + 'depthwise_conv.bn.weight', + 'tpu_batch_normalization_1/moving_mean': + 'depthwise_conv.bn.running_mean', + 'tpu_batch_normalization_1/moving_variance': + 'depthwise_conv.bn.running_var', + 'tpu_batch_normalization_2/beta': + 'linear_conv.bn.bias', + 'tpu_batch_normalization_2/gamma': + 'linear_conv.bn.weight', + 'tpu_batch_normalization_2/moving_mean': + 'linear_conv.bn.running_mean', + 'tpu_batch_normalization_2/moving_variance': + 'linear_conv.bn.running_var', + } + mapping.update(base_mapping) + suffix = mapping['/'.join(seg[2:])] + m[prefix + '.' + suffix] = v + elif seg[1] == 'head': + seq_key = idx2key[max_idx + 1] + mapping = { + 'conv2d/kernel': + 'backbone.layers.{}.conv.weight'.format(seq_key), + 'tpu_batch_normalization/beta': + 'backbone.layers.{}.bn.bias'.format(seq_key), + 'tpu_batch_normalization/gamma': + 'backbone.layers.{}.bn.weight'.format(seq_key), + 'tpu_batch_normalization/moving_mean': + 'backbone.layers.{}.bn.running_mean'.format(seq_key), + 'tpu_batch_normalization/moving_variance': + 'backbone.layers.{}.bn.running_var'.format(seq_key), + 'dense/kernel': + 'head.fc.weight', + 'dense/bias': + 'head.fc.bias' + } + key = mapping['/'.join(seg[2:])] + if name.startswith('e') and 'fc' in key: + v = v[1:] + m[key] = v + return m + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('infile', type=str, help='Path to the ckpt.') + parser.add_argument('outfile', type=str, help='Output file.') + args = parser.parse_args() + assert args.outfile + + outdir = os.path.dirname(os.path.abspath(args.outfile)) + if not os.path.exists(outdir): + os.makedirs(outdir) + weights = read_ckpt(args.infile) + weights = map_key(weights) + torch.save(weights, args.outfile) diff --git a/tools/convert_models/hornet2mmcls.py b/tools/convert_models/hornet2mmcls.py new file mode 100644 index 0000000..6f39ffb --- /dev/null +++ b/tools/convert_models/hornet2mmcls.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmcv +import torch +from mmcv.runner import CheckpointLoader + + +def convert_hornet(ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('head'): + new_k = k.replace('head.', 'head.fc.') + new_ckpt[new_k] = new_v + continue + elif k.startswith('norm'): + new_k = k.replace('norm.', 'norm3.') + elif 'gnconv.pws' in k: + new_k = k.replace('gnconv.pws', 'gnconv.projs') + elif 'gamma1' in k: + new_k = k.replace('gamma1', 'gamma1.weight') + elif 'gamma2' in k: + new_k = k.replace('gamma2', 'gamma2.weight') + else: + new_k = k + + if not new_k.startswith('head'): + new_k = 'backbone.' + new_k + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained van models to mmcls style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + + weight = convert_hornet(state_dict) + mmcv.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/convert_models/mlpmixer_to_mmcls.py b/tools/convert_models/mlpmixer_to_mmcls.py new file mode 100644 index 0000000..6096c13 --- /dev/null +++ b/tools/convert_models/mlpmixer_to_mmcls.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from pathlib import Path + +import torch + + +def convert_weights(weight): + """Weight Converter. + + Converts the weights from timm to mmcls + + Args: + weight (dict): weight dict from timm + + Returns: converted weight dict for mmcls + """ + result = dict() + result['meta'] = dict() + temp = dict() + mapping = { + 'stem': 'patch_embed', + 'proj': 'projection', + 'mlp_tokens.fc1': 'token_mix.layers.0.0', + 'mlp_tokens.fc2': 'token_mix.layers.1', + 'mlp_channels.fc1': 'channel_mix.layers.0.0', + 'mlp_channels.fc2': 'channel_mix.layers.1', + 'norm1': 'ln1', + 'norm2': 'ln2', + 'norm.': 'ln1.', + 'blocks': 'layers' + } + for k, v in weight.items(): + for mk, mv in mapping.items(): + if mk in k: + k = k.replace(mk, mv) + if k.startswith('head.'): + temp['head.fc.' + k[5:]] = v + else: + temp['backbone.' + k] = v + result['state_dict'] = temp + return result + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + dst = Path(args.dst) + if dst.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit(1) + dst.parent.mkdir(parents=True, exist_ok=True) + + original_model = torch.load(args.src, map_location='cpu') + converted_model = convert_weights(original_model) + torch.save(converted_model, args.dst) diff --git a/tools/convert_models/mobilenetv2_to_mmcls.py b/tools/convert_models/mobilenetv2_to_mmcls.py new file mode 100644 index 0000000..7f6654e --- /dev/null +++ b/tools/convert_models/mobilenetv2_to_mmcls.py @@ -0,0 +1,135 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + + +def convert_conv1(model_key, model_weight, state_dict, converted_names): + if model_key.find('features.0.0') >= 0: + new_key = model_key.replace('features.0.0', 'backbone.conv1.conv') + else: + new_key = model_key.replace('features.0.1', 'backbone.conv1.bn') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_conv5(model_key, model_weight, state_dict, converted_names): + if model_key.find('features.18.0') >= 0: + new_key = model_key.replace('features.18.0', 'backbone.conv2.conv') + else: + new_key = model_key.replace('features.18.1', 'backbone.conv2.bn') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_head(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('classifier.1', 'head.fc') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_block(model_key, model_weight, state_dict, converted_names): + split_keys = model_key.split('.') + layer_id = int(split_keys[1]) + new_layer_id = 0 + sub_id = 0 + if layer_id == 1: + new_layer_id = 1 + sub_id = 0 + elif layer_id in range(2, 4): + new_layer_id = 2 + sub_id = layer_id - 2 + elif layer_id in range(4, 7): + new_layer_id = 3 + sub_id = layer_id - 4 + elif layer_id in range(7, 11): + new_layer_id = 4 + sub_id = layer_id - 7 + elif layer_id in range(11, 14): + new_layer_id = 5 + sub_id = layer_id - 11 + elif layer_id in range(14, 17): + new_layer_id = 6 + sub_id = layer_id - 14 + elif layer_id == 17: + new_layer_id = 7 + sub_id = 0 + + new_key = model_key.replace(f'features.{layer_id}', + f'backbone.layer{new_layer_id}.{sub_id}') + if new_layer_id == 1: + if new_key.find('conv.0.0') >= 0: + new_key = new_key.replace('conv.0.0', 'conv.0.conv') + elif new_key.find('conv.0.1') >= 0: + new_key = new_key.replace('conv.0.1', 'conv.0.bn') + elif new_key.find('conv.1') >= 0: + new_key = new_key.replace('conv.1', 'conv.1.conv') + elif new_key.find('conv.2') >= 0: + new_key = new_key.replace('conv.2', 'conv.1.bn') + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + else: + if new_key.find('conv.0.0') >= 0: + new_key = new_key.replace('conv.0.0', 'conv.0.conv') + elif new_key.find('conv.0.1') >= 0: + new_key = new_key.replace('conv.0.1', 'conv.0.bn') + elif new_key.find('conv.1.0') >= 0: + new_key = new_key.replace('conv.1.0', 'conv.1.conv') + elif new_key.find('conv.1.1') >= 0: + new_key = new_key.replace('conv.1.1', 'conv.1.bn') + elif new_key.find('conv.2') >= 0: + new_key = new_key.replace('conv.2', 'conv.2.conv') + elif new_key.find('conv.3') >= 0: + new_key = new_key.replace('conv.3', 'conv.2.bn') + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + print(f'Convert {model_key} to {new_key}') + state_dict[new_key] = model_weight + converted_names.add(model_key) + + +def convert(src, dst): + """Convert keys in torchvision pretrained MobileNetV2 models to mmcls + style.""" + + # load pytorch model + blobs = torch.load(src, map_location='cpu') + + # convert to pytorch style + state_dict = OrderedDict() + converted_names = set() + + for key, weight in blobs.items(): + if 'features.0' in key: + convert_conv1(key, weight, state_dict, converted_names) + elif 'classifier' in key: + convert_head(key, weight, state_dict, converted_names) + elif 'features.18' in key: + convert_conv5(key, weight, state_dict, converted_names) + else: + convert_block(key, weight, state_dict, converted_names) + + # check if all layers are converted + for key in blobs: + if key not in converted_names: + print(f'not converted: {key}') + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/convert_models/publish_model.py b/tools/convert_models/publish_model.py new file mode 100644 index 0000000..a80f3e2 --- /dev/null +++ b/tools/convert_models/publish_model.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import datetime +import subprocess +from pathlib import Path + +import torch +from mmcv import digit_version + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + parser.add_argument('out_file', help='output checkpoint filename') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + if digit_version(torch.__version__) >= digit_version('1.6'): + torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) + else: + torch.save(checkpoint, out_file) + + sha = subprocess.check_output(['sha256sum', out_file]).decode() + if out_file.endswith('.pth'): + out_file_name = out_file[:-4] + else: + out_file_name = out_file + + current_date = datetime.datetime.now().strftime('%Y%m%d') + final_file = out_file_name + f'_{current_date}-{sha[:8]}.pth' + subprocess.Popen(['mv', out_file, final_file]) + + print(f'Successfully generated the publish-ckpt as {final_file}.') + + +def main(): + args = parse_args() + out_dir = Path(args.out_file).parent + if not out_dir.exists(): + raise ValueError(f'Directory {out_dir} does not exist, ' + 'please generate it manually.') + process_checkpoint(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/tools/convert_models/reparameterize_model.py b/tools/convert_models/reparameterize_model.py new file mode 100644 index 0000000..5224c35 --- /dev/null +++ b/tools/convert_models/reparameterize_model.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from pathlib import Path + +import torch + +from mmcls.apis import init_model +from mmcls.models.classifiers import ImageClassifier + + +def convert_classifier_to_deploy(model, save_path): + print('Converting...') + assert hasattr(model, 'backbone') and \ + hasattr(model.backbone, 'switch_to_deploy'), \ + '`model.backbone` must has method of "switch_to_deploy".' \ + f' But {model.backbone.__class__} does not have.' + + model.backbone.switch_to_deploy() + torch.save(model.state_dict(), save_path) + + print('Done! Save at path "{}"'.format(save_path)) + + +def main(): + parser = argparse.ArgumentParser( + description='Convert the parameters of the repvgg block ' + 'from training mode to deployment mode.') + parser.add_argument( + 'config_path', + help='The path to the configuration file of the network ' + 'containing the repvgg block.') + parser.add_argument( + 'checkpoint_path', + help='The path to the checkpoint file corresponding to the model.') + parser.add_argument( + 'save_path', + help='The path where the converted checkpoint file is stored.') + args = parser.parse_args() + + save_path = Path(args.save_path) + if save_path.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit() + save_path.parent.mkdir(parents=True, exist_ok=True) + + model = init_model( + args.config_path, checkpoint=args.checkpoint_path, device='cpu') + assert isinstance(model, ImageClassifier), \ + '`model` must be a `mmcls.classifiers.ImageClassifier` instance.' + + convert_classifier_to_deploy(model, args.save_path) + + +if __name__ == '__main__': + main() diff --git a/tools/convert_models/reparameterize_repvgg.py b/tools/convert_models/reparameterize_repvgg.py new file mode 100644 index 0000000..e075d83 --- /dev/null +++ b/tools/convert_models/reparameterize_repvgg.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import warnings +from pathlib import Path + +import torch + +from mmcls.apis import init_model + +bright_style, reset_style = '\x1b[1m', '\x1b[0m' +red_text, blue_text = '\x1b[31m', '\x1b[34m' +white_background = '\x1b[107m' + +msg = bright_style + red_text +msg += 'DeprecationWarning: This tool will be deprecated in future. ' +msg += red_text + 'Welcome to use the ' +msg += white_background +msg += '"tools/convert_models/reparameterize_model.py"' +msg += reset_style +warnings.warn(msg) + + +def convert_repvggblock_param(config_path, checkpoint_path, save_path): + model = init_model(config_path, checkpoint=checkpoint_path) + print('Converting...') + + model.backbone.switch_to_deploy() + torch.save(model.state_dict(), save_path) + + print('Done! Save at path "{}"'.format(save_path)) + + +def main(): + parser = argparse.ArgumentParser( + description='Convert the parameters of the repvgg block ' + 'from training mode to deployment mode.') + parser.add_argument( + 'config_path', + help='The path to the configuration file of the network ' + 'containing the repvgg block.') + parser.add_argument( + 'checkpoint_path', + help='The path to the checkpoint file corresponding to the model.') + parser.add_argument( + 'save_path', + help='The path where the converted checkpoint file is stored.') + args = parser.parse_args() + + save_path = Path(args.save_path) + if save_path.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit(1) + save_path.parent.mkdir(parents=True, exist_ok=True) + + convert_repvggblock_param(args.config_path, args.checkpoint_path, + args.save_path) + + +if __name__ == '__main__': + main() diff --git a/tools/convert_models/repvgg_to_mmcls.py b/tools/convert_models/repvgg_to_mmcls.py new file mode 100644 index 0000000..b7a1f05 --- /dev/null +++ b/tools/convert_models/repvgg_to_mmcls.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict +from pathlib import Path + +import torch + + +def convert(src, dst): + print('Converting...') + blobs = torch.load(src, map_location='cpu') + converted_state_dict = OrderedDict() + + for key in blobs: + splited_key = key.split('.') + splited_key = ['norm' if i == 'bn' else i for i in splited_key] + splited_key = [ + 'branch_norm' if i == 'rbr_identity' else i for i in splited_key + ] + splited_key = [ + 'branch_1x1' if i == 'rbr_1x1' else i for i in splited_key + ] + splited_key = [ + 'branch_3x3' if i == 'rbr_dense' else i for i in splited_key + ] + splited_key = [ + 'backbone.stem' if i[:6] == 'stage0' else i for i in splited_key + ] + splited_key = [ + 'backbone.stage_' + i[5] if i[:5] == 'stage' else i + for i in splited_key + ] + splited_key = ['se_layer' if i == 'se' else i for i in splited_key] + splited_key = ['conv1.conv' if i == 'down' else i for i in splited_key] + splited_key = ['conv2.conv' if i == 'up' else i for i in splited_key] + splited_key = ['head.fc' if i == 'linear' else i for i in splited_key] + new_key = '.'.join(splited_key) + converted_state_dict[new_key] = blobs[key] + + torch.save(converted_state_dict, dst) + print('Done!') + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + dst = Path(args.dst) + if dst.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit(1) + dst.parent.mkdir(parents=True, exist_ok=True) + + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/convert_models/shufflenetv2_to_mmcls.py b/tools/convert_models/shufflenetv2_to_mmcls.py new file mode 100644 index 0000000..69046c3 --- /dev/null +++ b/tools/convert_models/shufflenetv2_to_mmcls.py @@ -0,0 +1,113 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + + +def convert_conv1(model_key, model_weight, state_dict, converted_names): + if model_key.find('conv1.0') >= 0: + new_key = model_key.replace('conv1.0', 'backbone.conv1.conv') + else: + new_key = model_key.replace('conv1.1', 'backbone.conv1.bn') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_conv5(model_key, model_weight, state_dict, converted_names): + if model_key.find('conv5.0') >= 0: + new_key = model_key.replace('conv5.0', 'backbone.layers.3.conv') + else: + new_key = model_key.replace('conv5.1', 'backbone.layers.3.bn') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_head(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('fc', 'head.fc') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_block(model_key, model_weight, state_dict, converted_names): + split_keys = model_key.split('.') + layer, block, branch = split_keys[:3] + layer_id = int(layer[-1]) - 2 + new_key = model_key.replace(layer, f'backbone.layers.{layer_id}') + + if branch == 'branch1': + if new_key.find('branch1.0') >= 0: + new_key = new_key.replace('branch1.0', 'branch1.0.conv') + elif new_key.find('branch1.1') >= 0: + new_key = new_key.replace('branch1.1', 'branch1.0.bn') + elif new_key.find('branch1.2') >= 0: + new_key = new_key.replace('branch1.2', 'branch1.1.conv') + elif new_key.find('branch1.3') >= 0: + new_key = new_key.replace('branch1.3', 'branch1.1.bn') + elif branch == 'branch2': + + if new_key.find('branch2.0') >= 0: + new_key = new_key.replace('branch2.0', 'branch2.0.conv') + elif new_key.find('branch2.1') >= 0: + new_key = new_key.replace('branch2.1', 'branch2.0.bn') + elif new_key.find('branch2.3') >= 0: + new_key = new_key.replace('branch2.3', 'branch2.1.conv') + elif new_key.find('branch2.4') >= 0: + new_key = new_key.replace('branch2.4', 'branch2.1.bn') + elif new_key.find('branch2.5') >= 0: + new_key = new_key.replace('branch2.5', 'branch2.2.conv') + elif new_key.find('branch2.6') >= 0: + new_key = new_key.replace('branch2.6', 'branch2.2.bn') + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + print(f'Convert {model_key} to {new_key}') + state_dict[new_key] = model_weight + converted_names.add(model_key) + + +def convert(src, dst): + """Convert keys in torchvision pretrained ShuffleNetV2 models to mmcls + style.""" + + # load pytorch model + blobs = torch.load(src, map_location='cpu') + + # convert to pytorch style + state_dict = OrderedDict() + converted_names = set() + + for key, weight in blobs.items(): + if 'conv1' in key: + convert_conv1(key, weight, state_dict, converted_names) + elif 'fc' in key: + convert_head(key, weight, state_dict, converted_names) + elif key.startswith('s'): + convert_block(key, weight, state_dict, converted_names) + elif 'conv5' in key: + convert_conv5(key, weight, state_dict, converted_names) + + # check if all layers are converted + for key in blobs: + if key not in converted_names: + print(f'not converted: {key}') + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/convert_models/torchvision_to_mmcls.py b/tools/convert_models/torchvision_to_mmcls.py new file mode 100644 index 0000000..679b791 --- /dev/null +++ b/tools/convert_models/torchvision_to_mmcls.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict +from pathlib import Path + +import torch + + +def convert_resnet(src_dict, dst_dict): + """convert resnet checkpoints from torchvision.""" + for key, value in src_dict.items(): + if not key.startswith('fc'): + dst_dict['backbone.' + key] = value + else: + dst_dict['head.' + key] = value + + +# model name to convert function +CONVERT_F_DICT = { + 'resnet': convert_resnet, +} + + +def convert(src: str, dst: str, convert_f: callable): + print('Converting...') + blobs = torch.load(src, map_location='cpu') + converted_state_dict = OrderedDict() + + # convert key in weight + convert_f(blobs, converted_state_dict) + + torch.save(converted_state_dict, dst) + print('Done!') + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + parser.add_argument( + 'model', type=str, help='The algorithm needs to change the keys.') + args = parser.parse_args() + + dst = Path(args.dst) + if dst.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit(1) + dst.parent.mkdir(parents=True, exist_ok=True) + + # this tool only support model in CONVERT_F_DICT + support_models = list(CONVERT_F_DICT.keys()) + if args.model not in CONVERT_F_DICT: + print(f'The "{args.model}" has not been supported to convert now.') + print(f'This tool only supports {", ".join(support_models)}.') + print('If you have done the converting job, PR is welcome!') + exit(1) + + convert_f = CONVERT_F_DICT[args.model] + convert(args.src, args.dst, convert_f) + + +if __name__ == '__main__': + main() diff --git a/tools/convert_models/twins2mmcls.py b/tools/convert_models/twins2mmcls.py new file mode 100644 index 0000000..e0ea04c --- /dev/null +++ b/tools/convert_models/twins2mmcls.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmcv +import torch +from mmcv.runner import CheckpointLoader + + +def convert_twins(args, ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('head'): + new_k = k.replace('head.', 'head.fc.') + new_ckpt[new_k] = new_v + continue + elif k.startswith('patch_embeds'): + if 'proj.' in k: + new_k = k.replace('proj.', 'projection.') + else: + new_k = k + elif k.startswith('blocks'): + k = k.replace('blocks', 'stages') + # Union + if 'mlp.fc1' in k: + new_k = k.replace('mlp.fc1', 'ffn.layers.0.0') + elif 'mlp.fc2' in k: + new_k = k.replace('mlp.fc2', 'ffn.layers.1') + + else: + new_k = k + new_k = new_k.replace('blocks.', 'layers.') + elif k.startswith('pos_block'): + new_k = k.replace('pos_block', 'position_encodings') + if 'proj.0.' in new_k: + new_k = new_k.replace('proj.0.', 'proj.') + elif k.startswith('norm'): + new_k = k.replace('norm', 'norm_after_stage3') + else: + new_k = k + new_k = 'backbone.' + new_k + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in timm pretrained vit models to ' + 'MMClassification style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'state_dict' in checkpoint: + # timm checkpoint + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + weight = convert_twins(args, state_dict) + mmcv.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/convert_models/van2mmcls.py b/tools/convert_models/van2mmcls.py new file mode 100644 index 0000000..5ea7d9c --- /dev/null +++ b/tools/convert_models/van2mmcls.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmcv +import torch +from mmcv.runner import CheckpointLoader + + +def convert_van(ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('head'): + new_k = k.replace('head.', 'head.fc.') + new_ckpt[new_k] = new_v + continue + elif k.startswith('patch_embed'): + if 'proj.' in k: + new_k = k.replace('proj.', 'projection.') + else: + new_k = k + elif k.startswith('block'): + new_k = k.replace('block', 'blocks') + if 'attn.spatial_gating_unit' in new_k: + new_k = new_k.replace('conv0', 'DW_conv') + new_k = new_k.replace('conv_spatial', 'DW_D_conv') + if 'dwconv.dwconv' in new_k: + new_k = new_k.replace('dwconv.dwconv', 'dwconv') + else: + new_k = k + + if not new_k.startswith('head'): + new_k = 'backbone.' + new_k + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained van models to mmcls style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + weight = convert_van(state_dict) + mmcv.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/convert_models/vgg_to_mmcls.py b/tools/convert_models/vgg_to_mmcls.py new file mode 100644 index 0000000..b5ab87f --- /dev/null +++ b/tools/convert_models/vgg_to_mmcls.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +from collections import OrderedDict + +import torch + + +def get_layer_maps(layer_num, with_bn): + layer_maps = {'conv': {}, 'bn': {}} + if with_bn: + if layer_num == 11: + layer_idxs = [0, 4, 8, 11, 15, 18, 22, 25] + elif layer_num == 13: + layer_idxs = [0, 3, 7, 10, 14, 17, 21, 24, 28, 31] + elif layer_num == 16: + layer_idxs = [0, 3, 7, 10, 14, 17, 20, 24, 27, 30, 34, 37, 40] + elif layer_num == 19: + layer_idxs = [ + 0, 3, 7, 10, 14, 17, 20, 23, 27, 30, 33, 36, 40, 43, 46, 49 + ] + else: + raise ValueError(f'Invalid number of layers: {layer_num}') + for i, layer_idx in enumerate(layer_idxs): + if i == 0: + new_layer_idx = layer_idx + else: + new_layer_idx += int((layer_idx - layer_idxs[i - 1]) / 2) + layer_maps['conv'][layer_idx] = new_layer_idx + layer_maps['bn'][layer_idx + 1] = new_layer_idx + else: + if layer_num == 11: + layer_idxs = [0, 3, 6, 8, 11, 13, 16, 18] + new_layer_idxs = [0, 2, 4, 5, 7, 8, 10, 11] + elif layer_num == 13: + layer_idxs = [0, 2, 5, 7, 10, 12, 15, 17, 20, 22] + new_layer_idxs = [0, 1, 3, 4, 6, 7, 9, 10, 12, 13] + elif layer_num == 16: + layer_idxs = [0, 2, 5, 7, 10, 12, 14, 17, 19, 21, 24, 26, 28] + new_layer_idxs = [0, 1, 3, 4, 6, 7, 8, 10, 11, 12, 14, 15, 16] + elif layer_num == 19: + layer_idxs = [ + 0, 2, 5, 7, 10, 12, 14, 16, 19, 21, 23, 25, 28, 30, 32, 34 + ] + new_layer_idxs = [ + 0, 1, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19 + ] + else: + raise ValueError(f'Invalid number of layers: {layer_num}') + + layer_maps['conv'] = { + layer_idx: new_layer_idx + for layer_idx, new_layer_idx in zip(layer_idxs, new_layer_idxs) + } + + return layer_maps + + +def convert(src, dst, layer_num, with_bn=False): + """Convert keys in torchvision pretrained VGG models to mmcls style.""" + + # load pytorch model + assert os.path.isfile(src), f'no checkpoint found at {src}' + blobs = torch.load(src, map_location='cpu') + + # convert to pytorch style + state_dict = OrderedDict() + + layer_maps = get_layer_maps(layer_num, with_bn) + + prefix = 'backbone' + delimiter = '.' + for key, weight in blobs.items(): + if 'features' in key: + module, layer_idx, weight_type = key.split(delimiter) + new_key = delimiter.join([prefix, key]) + layer_idx = int(layer_idx) + for layer_key, maps in layer_maps.items(): + if layer_idx in maps: + new_layer_idx = maps[layer_idx] + new_key = delimiter.join([ + prefix, 'features', + str(new_layer_idx), layer_key, weight_type + ]) + state_dict[new_key] = weight + print(f'Convert {key} to {new_key}') + elif 'classifier' in key: + new_key = delimiter.join([prefix, key]) + state_dict[new_key] = weight + print(f'Convert {key} to {new_key}') + else: + state_dict[key] = weight + + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src torchvision model path') + parser.add_argument('dst', help='save path') + parser.add_argument( + '--bn', action='store_true', help='whether original vgg has BN') + parser.add_argument( + '--layer-num', + type=int, + choices=[11, 13, 16, 19], + default=11, + help='number of VGG layers') + args = parser.parse_args() + convert(args.src, args.dst, layer_num=args.layer_num, with_bn=args.bn) + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_tools/create_lmdb_dataset.py b/tools/dataset_tools/create_lmdb_dataset.py new file mode 100644 index 0000000..31259f1 --- /dev/null +++ b/tools/dataset_tools/create_lmdb_dataset.py @@ -0,0 +1,148 @@ +""" +Author: Chenhongyi Yang +Reference: We are sorry that we cannot find this script's original authors, but we are appreciate about their work. +""" + +import glob +import os +import re +import time +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor +from typing import Tuple + +import cv2 +import lmdb + +import argparse +parser = argparse.ArgumentParser('Convert LMDB dataset') +parser.add_argument('train-img-dir', 'Path to ImageNet training images') +parser.add_argument('train-out', 'Path to output training lmdb dataset') +parser.add_argument('val-img-dir', 'Path to ImageNet validation images') +parser.add_argument('val-out', 'Path to output validation lmdb dataset') +args = parser.parse_args() + +_10TB = 10 * (1 << 40) + +class LmdbDataExporter(object): + """ + making LMDB database + """ + label_pattern = re.compile(r'/.*/.*?(\d+)$') + + def __init__(self, + img_dir=None, + output_path=None, + batch_size=100): + """ + img_dir: imgs directory + output_path: LMDB output path + """ + self.img_dir = img_dir + self.output_path = output_path + self.batch_size = batch_size + self.label_list = list() + + if not os.path.exists(img_dir): + raise Exception(f'{img_dir} is not exists!') + + if not os.path.exists(output_path): + os.makedirs(output_path) + + self.lmdb_env = lmdb.open(output_path, map_size=_10TB, max_dbs=4) + self.label_dict = defaultdict(int) + + def export(self): + idx = 0 + results = [] + st = time.time() + iter_img_lst = self.read_imgs() + length = self.get_length() + while True: + items = [] + try: + while len(items) < self.batch_size: + items.append(next(iter_img_lst)) + except StopIteration: + break + + with ThreadPoolExecutor() as executor: + results.extend(executor.map(self._extract_once, items)) + + if len(results) >= self.batch_size: + self.save_to_lmdb(results) + idx += self.batch_size + et = time.time() + print(f'time: {(et-st)}(s) count: {idx}') + st = time.time() + if length - idx <= self.batch_size: + self.batch_size = 1 + del results[:] + + et = time.time() + print(f'time: {(et-st)}(s) count: {idx}') + self.save_to_lmdb(results) + self.save_total(idx) + print('Total length:', len(results)) + del results[:] + + def save_to_lmdb(self, results): + """ + persist to lmdb + """ + with self.lmdb_env.begin(write=True) as txn: + while results: + img_key, img_byte = results.pop() + if img_key is None or img_byte is None: + continue + txn.put(img_key, img_byte) + + def save_total(self, total: int): + """ + persist all numbers of imgs + """ + with self.lmdb_env.begin(write=True, buffers=True) as txn: + txn.put('total'.encode(), str(total).encode()) + + def _extract_once(self, item) -> Tuple[bytes, bytes]: + full_path = item[-1] + imageKey = item[1] + + img = cv2.imread(full_path) + if img is None: + print(f'{full_path} is a bad img file.') + return None, None + _, img_byte = cv2.imencode('.JPEG', img) + return (imageKey.encode('ascii'), img_byte.tobytes()) + + def get_length(self): + img_list = glob.glob(os.path.join(self.img_dir, '*/*.JPEG')) + return len(img_list) + + def read_imgs(self): + img_list = glob.glob(os.path.join(self.img_dir, '*/*.JPEG')) + + for idx, item_img in enumerate(img_list): + write_key = os.path.split(item_img)[-1] + item = (idx, write_key, item_img) + yield item + + +if __name__ == '__main__': + train_input_dir = args.train_img_dir + train_output_path = args.train_out + + val_input_dir = args.val_img_dir + val_output_path = args.val_out + + exporter_train = LmdbDataExporter( + train_input_dir, + train_output_path, + batch_size=10000) + exporter_train.export() + + exporter_val = LmdbDataExporter( + val_input_dir, + val_output_path, + batch_size=10000) + exporter_val.export() \ No newline at end of file diff --git a/tools/deployment/mmcls2torchserve.py b/tools/deployment/mmcls2torchserve.py new file mode 100644 index 0000000..b4ab14d --- /dev/null +++ b/tools/deployment/mmcls2torchserve.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser, Namespace +from pathlib import Path +from tempfile import TemporaryDirectory + +import mmcv + +try: + from model_archiver.model_packaging import package_model + from model_archiver.model_packaging_utils import ModelExportUtils +except ImportError: + package_model = None + + +def mmcls2torchserve( + config_file: str, + checkpoint_file: str, + output_folder: str, + model_name: str, + model_version: str = '1.0', + force: bool = False, +): + """Converts mmclassification model (config + checkpoint) to TorchServe + `.mar`. + + Args: + config_file: + In MMClassification config format. + The contents vary for each task repository. + checkpoint_file: + In MMClassification checkpoint format. + The contents vary for each task repository. + output_folder: + Folder where `{model_name}.mar` will be created. + The file created will be in TorchServe archive format. + model_name: + If not None, used for naming the `{model_name}.mar` file + that will be created under `output_folder`. + If None, `{Path(checkpoint_file).stem}` will be used. + model_version: + Model's version. + force: + If True, if there is an existing `{model_name}.mar` + file under `output_folder` it will be overwritten. + """ + mmcv.mkdir_or_exist(output_folder) + + config = mmcv.Config.fromfile(config_file) + + with TemporaryDirectory() as tmpdir: + config.dump(f'{tmpdir}/config.py') + + args = Namespace( + **{ + 'model_file': f'{tmpdir}/config.py', + 'serialized_file': checkpoint_file, + 'handler': f'{Path(__file__).parent}/mmcls_handler.py', + 'model_name': model_name or Path(checkpoint_file).stem, + 'version': model_version, + 'export_path': output_folder, + 'force': force, + 'requirements_file': None, + 'extra_files': None, + 'runtime': 'python', + 'archive_format': 'default' + }) + manifest = ModelExportUtils.generate_manifest_json(args) + package_model(args, manifest) + + +def parse_args(): + parser = ArgumentParser( + description='Convert mmcls models to TorchServe `.mar` format.') + parser.add_argument('config', type=str, help='config file path') + parser.add_argument('checkpoint', type=str, help='checkpoint file path') + parser.add_argument( + '--output-folder', + type=str, + required=True, + help='Folder where `{model_name}.mar` will be created.') + parser.add_argument( + '--model-name', + type=str, + default=None, + help='If not None, used for naming the `{model_name}.mar`' + 'file that will be created under `output_folder`.' + 'If None, `{Path(checkpoint_file).stem}` will be used.') + parser.add_argument( + '--model-version', + type=str, + default='1.0', + help='Number used for versioning.') + parser.add_argument( + '-f', + '--force', + action='store_true', + help='overwrite the existing `{model_name}.mar`') + args = parser.parse_args() + + return args + + +if __name__ == '__main__': + args = parse_args() + + if package_model is None: + raise ImportError('`torch-model-archiver` is required.' + 'Try: pip install torch-model-archiver') + + mmcls2torchserve(args.config, args.checkpoint, args.output_folder, + args.model_name, args.model_version, args.force) diff --git a/tools/deployment/mmcls_handler.py b/tools/deployment/mmcls_handler.py new file mode 100644 index 0000000..68815e9 --- /dev/null +++ b/tools/deployment/mmcls_handler.py @@ -0,0 +1,51 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import base64 +import os + +import mmcv +import torch +from ts.torch_handler.base_handler import BaseHandler + +from mmcls.apis import inference_model, init_model + + +class MMclsHandler(BaseHandler): + + def initialize(self, context): + properties = context.system_properties + self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu' + self.device = torch.device(self.map_location + ':' + + str(properties.get('gpu_id')) if torch.cuda. + is_available() else self.map_location) + self.manifest = context.manifest + + model_dir = properties.get('model_dir') + serialized_file = self.manifest['model']['serializedFile'] + checkpoint = os.path.join(model_dir, serialized_file) + self.config_file = os.path.join(model_dir, 'config.py') + + self.model = init_model(self.config_file, checkpoint, self.device) + self.initialized = True + + def preprocess(self, data): + images = [] + + for row in data: + image = row.get('data') or row.get('body') + if isinstance(image, str): + image = base64.b64decode(image) + image = mmcv.imfrombytes(image) + images.append(image) + + return images + + def inference(self, data, *args, **kwargs): + results = [] + for image in data: + results.append(inference_model(self.model, image)) + return results + + def postprocess(self, data): + for result in data: + result['pred_label'] = int(result['pred_label']) + return data diff --git a/tools/deployment/onnx2tensorrt.py b/tools/deployment/onnx2tensorrt.py new file mode 100644 index 0000000..8f71b61 --- /dev/null +++ b/tools/deployment/onnx2tensorrt.py @@ -0,0 +1,155 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import warnings + +import numpy as np + + +def get_GiB(x: int): + """return x GiB.""" + return x * (1 << 30) + + +def onnx2tensorrt(onnx_file, + trt_file, + input_shape, + max_batch_size, + fp16_mode=False, + verify=False, + workspace_size=1): + """Create tensorrt engine from onnx model. + + Args: + onnx_file (str): Filename of the input ONNX model file. + trt_file (str): Filename of the output TensorRT engine file. + input_shape (list[int]): Input shape of the model. + eg [1, 3, 224, 224]. + max_batch_size (int): Max batch size of the model. + verify (bool, optional): Whether to verify the converted model. + Defaults to False. + workspace_size (int, optional): Maximum workspace of GPU. + Defaults to 1. + """ + import onnx + from mmcv.tensorrt import TRTWraper, onnx2trt, save_trt_engine + + onnx_model = onnx.load(onnx_file) + # create trt engine and wrapper + assert max_batch_size >= 1 + max_shape = [max_batch_size] + list(input_shape[1:]) + opt_shape_dict = {'input': [input_shape, input_shape, max_shape]} + max_workspace_size = get_GiB(workspace_size) + trt_engine = onnx2trt( + onnx_model, + opt_shape_dict, + fp16_mode=fp16_mode, + max_workspace_size=max_workspace_size) + save_dir, _ = osp.split(trt_file) + if save_dir: + os.makedirs(save_dir, exist_ok=True) + save_trt_engine(trt_engine, trt_file) + print(f'Successfully created TensorRT engine: {trt_file}') + + if verify: + import onnxruntime as ort + import torch + + input_img = torch.randn(*input_shape) + input_img_cpu = input_img.detach().cpu().numpy() + input_img_cuda = input_img.cuda() + + # Get results from ONNXRuntime + session_options = ort.SessionOptions() + sess = ort.InferenceSession(onnx_file, session_options) + + # get input and output names + input_names = [_.name for _ in sess.get_inputs()] + output_names = [_.name for _ in sess.get_outputs()] + + onnx_outputs = sess.run(None, { + input_names[0]: input_img_cpu, + }) + + # Get results from TensorRT + trt_model = TRTWraper(trt_file, input_names, output_names) + with torch.no_grad(): + trt_outputs = trt_model({input_names[0]: input_img_cuda}) + trt_outputs = [ + trt_outputs[_].detach().cpu().numpy() for _ in output_names + ] + + # Compare results + np.testing.assert_allclose( + onnx_outputs[0], trt_outputs[0], rtol=1e-05, atol=1e-05) + print('The numerical values are the same ' + + 'between ONNXRuntime and TensorRT') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert MMClassification models from ONNX to TensorRT') + parser.add_argument('model', help='Filename of the input ONNX model') + parser.add_argument( + '--trt-file', + type=str, + default='tmp.trt', + help='Filename of the output TensorRT engine') + parser.add_argument( + '--verify', + action='store_true', + help='Verify the outputs of ONNXRuntime and TensorRT') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[224, 224], + help='Input size of the model') + parser.add_argument( + '--max-batch-size', + type=int, + default=1, + help='Maximum batch size of TensorRT model.') + parser.add_argument('--fp16', action='store_true', help='Enable fp16 mode') + parser.add_argument( + '--workspace-size', + type=int, + default=1, + help='Max workspace size of GPU in GiB') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + + args = parse_args() + + if len(args.shape) == 1: + input_shape = (1, 3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = (1, 3) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + # Create TensorRT engine + onnx2tensorrt( + args.model, + args.trt_file, + input_shape, + args.max_batch_size, + fp16_mode=args.fp16, + verify=args.verify, + workspace_size=args.workspace_size) + + # Following strings of text style are from colorama package + bright_style, reset_style = '\x1b[1m', '\x1b[0m' + red_text, blue_text = '\x1b[31m', '\x1b[34m' + white_background = '\x1b[107m' + + msg = white_background + bright_style + red_text + msg += 'DeprecationWarning: This tool will be deprecated in future. ' + msg += blue_text + 'Welcome to use the unified model deployment toolbox ' + msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' + msg += reset_style + warnings.warn(msg) diff --git a/tools/deployment/pytorch2mlmodel.py b/tools/deployment/pytorch2mlmodel.py new file mode 100644 index 0000000..814cbe9 --- /dev/null +++ b/tools/deployment/pytorch2mlmodel.py @@ -0,0 +1,160 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import warnings +from functools import partial + +import mmcv +import numpy as np +import torch +from mmcv.runner import load_checkpoint +from torch import nn + +from mmcls.models import build_classifier + +torch.manual_seed(3) + +try: + import coremltools as ct +except ImportError: + raise ImportError('Please install coremltools to enable output file.') + + +def _demo_mm_inputs(input_shape: tuple, num_classes: int): + """Create a superset of inputs needed to run test or train batches. + + Args: + input_shape (tuple): + input batch dimensions + num_classes (int): + number of semantic classes + """ + (N, C, H, W) = input_shape + rng = np.random.RandomState(0) + imgs = rng.rand(*input_shape) + gt_labels = rng.randint( + low=0, high=num_classes, size=(N, 1)).astype(np.uint8) + mm_inputs = { + 'imgs': torch.FloatTensor(imgs).requires_grad_(False), + 'gt_labels': torch.LongTensor(gt_labels), + } + return mm_inputs + + +def pytorch2mlmodel(model: nn.Module, input_shape: tuple, output_file: str, + add_norm: bool, norm: dict): + """Export Pytorch model to mlmodel format that can be deployed in apple + devices through torch.jit.trace and the coremltools library. + + Optionally, embed the normalization step as a layer to the model. + + Args: + model (nn.Module): Pytorch model we want to export. + input_shape (tuple): Use this input shape to construct + the corresponding dummy input and execute the model. + show (bool): Whether print the computation graph. Default: False. + output_file (string): The path to where we store the output + TorchScript model. + add_norm (bool): Whether to embed the normalization layer to the + output model. + norm (dict): image normalization config for embedding it as a layer + to the output model. + """ + model.cpu().eval() + + num_classes = model.head.num_classes + mm_inputs = _demo_mm_inputs(input_shape, num_classes) + + imgs = mm_inputs.pop('imgs') + img_list = [img[None, :] for img in imgs] + model.forward = partial(model.forward, img_metas={}, return_loss=False) + + with torch.no_grad(): + trace_model = torch.jit.trace(model, img_list[0]) + save_dir, _ = osp.split(output_file) + if save_dir: + os.makedirs(save_dir, exist_ok=True) + + if add_norm: + means, stds = norm.mean, norm.std + if stds.count(stds[0]) != len(stds): + warnings.warn(f'Image std from config is {stds}. However, ' + 'current version of coremltools (5.1) uses a ' + 'global std rather than the channel-specific ' + 'values that torchvision uses. A mean will be ' + 'taken but this might tamper with the resulting ' + 'model\'s predictions. For more details refer ' + 'to the coreml docs on ImageType pre-processing') + scale = np.mean(stds) + else: + scale = stds[0] + + bias = [-mean / scale for mean in means] + image_input = ct.ImageType( + name='input_1', + shape=input_shape, + scale=1 / scale, + bias=bias, + color_layout='RGB', + channel_first=True) + + coreml_model = ct.convert(trace_model, inputs=[image_input]) + coreml_model.save(output_file) + else: + coreml_model = ct.convert( + trace_model, inputs=[ct.TensorType(shape=input_shape)]) + coreml_model.save(output_file) + + print(f'Successfully exported coreml model: {output_file}') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert MMCls to MlModel format for apple devices') + parser.add_argument('config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file', type=str) + parser.add_argument('--output-file', type=str, default='model.mlmodel') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[224, 224], + help='input image size') + parser.add_argument( + '--add-norm-layer', + action='store_true', + help='embed normalization layer to deployed model') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + if len(args.shape) == 1: + input_shape = (1, 3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = ( + 1, + 3, + ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + cfg = mmcv.Config.fromfile(args.config) + cfg.model.pretrained = None + + # build the model and load checkpoint + classifier = build_classifier(cfg.model) + + if args.checkpoint: + load_checkpoint(classifier, args.checkpoint, map_location='cpu') + + # convert model to mlmodel file + pytorch2mlmodel( + classifier, + input_shape, + output_file=args.output_file, + add_norm=args.add_norm_layer, + norm=cfg.img_norm_cfg) diff --git a/tools/deployment/pytorch2onnx.py b/tools/deployment/pytorch2onnx.py new file mode 100644 index 0000000..85d795f --- /dev/null +++ b/tools/deployment/pytorch2onnx.py @@ -0,0 +1,232 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import warnings +from functools import partial + +import mmcv +import numpy as np +import onnxruntime as rt +import torch +from mmcv.onnx import register_extra_symbolics +from mmcv.runner import load_checkpoint + +from mmcls.models import build_classifier + +torch.manual_seed(3) + + +def _demo_mm_inputs(input_shape, num_classes): + """Create a superset of inputs needed to run test or train batches. + + Args: + input_shape (tuple): + input batch dimensions + num_classes (int): + number of semantic classes + """ + (N, C, H, W) = input_shape + rng = np.random.RandomState(0) + imgs = rng.rand(*input_shape) + gt_labels = rng.randint( + low=0, high=num_classes, size=(N, 1)).astype(np.uint8) + mm_inputs = { + 'imgs': torch.FloatTensor(imgs).requires_grad_(True), + 'gt_labels': torch.LongTensor(gt_labels), + } + return mm_inputs + + +def pytorch2onnx(model, + input_shape, + opset_version=11, + dynamic_export=False, + show=False, + output_file='tmp.onnx', + do_simplify=False, + verify=False): + """Export Pytorch model to ONNX model and verify the outputs are same + between Pytorch and ONNX. + + Args: + model (nn.Module): Pytorch model we want to export. + input_shape (tuple): Use this input shape to construct + the corresponding dummy input and execute the model. + opset_version (int): The onnx op version. Default: 11. + show (bool): Whether print the computation graph. Default: False. + output_file (string): The path to where we store the output ONNX model. + Default: `tmp.onnx`. + verify (bool): Whether compare the outputs between Pytorch and ONNX. + Default: False. + """ + model.cpu().eval() + + if hasattr(model.head, 'num_classes'): + num_classes = model.head.num_classes + # Some backbones use `num_classes=-1` to disable top classifier. + elif getattr(model.backbone, 'num_classes', -1) > 0: + num_classes = model.backbone.num_classes + else: + raise AttributeError('Cannot find "num_classes" in both head and ' + 'backbone, please check the config file.') + + mm_inputs = _demo_mm_inputs(input_shape, num_classes) + + imgs = mm_inputs.pop('imgs') + img_list = [img[None, :] for img in imgs] + + # replace original forward function + origin_forward = model.forward + model.forward = partial(model.forward, img_metas={}, return_loss=False) + register_extra_symbolics(opset_version) + + # support dynamic shape export + if dynamic_export: + dynamic_axes = { + 'input': { + 0: 'batch', + 2: 'width', + 3: 'height' + }, + 'probs': { + 0: 'batch' + } + } + else: + dynamic_axes = {} + + with torch.no_grad(): + torch.onnx.export( + model, (img_list, ), + output_file, + input_names=['input'], + output_names=['probs'], + export_params=True, + keep_initializers_as_inputs=True, + dynamic_axes=dynamic_axes, + verbose=show, + opset_version=opset_version) + print(f'Successfully exported ONNX model: {output_file}') + model.forward = origin_forward + + if do_simplify: + import onnx + import onnxsim + from mmcv import digit_version + + min_required_version = '0.4.0' + assert digit_version(onnxsim.__version__) >= digit_version( + min_required_version + ), f'Requires to install onnxsim>={min_required_version}' + + model_opt, check_ok = onnxsim.simplify(output_file) + if check_ok: + onnx.save(model_opt, output_file) + print(f'Successfully simplified ONNX model: {output_file}') + else: + print('Failed to simplify ONNX model.') + if verify: + # check by onnx + import onnx + onnx_model = onnx.load(output_file) + onnx.checker.check_model(onnx_model) + + # test the dynamic model + if dynamic_export: + dynamic_test_inputs = _demo_mm_inputs( + (input_shape[0], input_shape[1], input_shape[2] * 2, + input_shape[3] * 2), model.head.num_classes) + imgs = dynamic_test_inputs.pop('imgs') + img_list = [img[None, :] for img in imgs] + + # check the numerical value + # get pytorch output + pytorch_result = model(img_list, img_metas={}, return_loss=False)[0] + + # get onnx output + input_all = [node.name for node in onnx_model.graph.input] + input_initializer = [ + node.name for node in onnx_model.graph.initializer + ] + net_feed_input = list(set(input_all) - set(input_initializer)) + assert (len(net_feed_input) == 1) + sess = rt.InferenceSession(output_file) + onnx_result = sess.run( + None, {net_feed_input[0]: img_list[0].detach().numpy()})[0] + if not np.allclose(pytorch_result, onnx_result): + raise ValueError( + 'The outputs are different between Pytorch and ONNX') + print('The outputs are same between Pytorch and ONNX') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Convert MMCls to ONNX') + parser.add_argument('config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file', default=None) + parser.add_argument('--show', action='store_true', help='show onnx graph') + parser.add_argument( + '--verify', action='store_true', help='verify the onnx model') + parser.add_argument('--output-file', type=str, default='tmp.onnx') + parser.add_argument('--opset-version', type=int, default=11) + parser.add_argument( + '--simplify', + action='store_true', + help='Whether to simplify onnx model.') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[224, 224], + help='input image size') + parser.add_argument( + '--dynamic-export', + action='store_true', + help='Whether to export ONNX with dynamic input shape. \ + Defaults to False.') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + if len(args.shape) == 1: + input_shape = (1, 3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = ( + 1, + 3, + ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + cfg = mmcv.Config.fromfile(args.config) + cfg.model.pretrained = None + + # build the model and load checkpoint + classifier = build_classifier(cfg.model) + + if args.checkpoint: + load_checkpoint(classifier, args.checkpoint, map_location='cpu') + + # convert model to onnx file + pytorch2onnx( + classifier, + input_shape, + opset_version=args.opset_version, + show=args.show, + dynamic_export=args.dynamic_export, + output_file=args.output_file, + do_simplify=args.simplify, + verify=args.verify) + + # Following strings of text style are from colorama package + bright_style, reset_style = '\x1b[1m', '\x1b[0m' + red_text, blue_text = '\x1b[31m', '\x1b[34m' + white_background = '\x1b[107m' + + msg = white_background + bright_style + red_text + msg += 'DeprecationWarning: This tool will be deprecated in future. ' + msg += blue_text + 'Welcome to use the unified model deployment toolbox ' + msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' + msg += reset_style + warnings.warn(msg) diff --git a/tools/deployment/pytorch2torchscript.py b/tools/deployment/pytorch2torchscript.py new file mode 100644 index 0000000..f261b7c --- /dev/null +++ b/tools/deployment/pytorch2torchscript.py @@ -0,0 +1,139 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +from functools import partial + +import mmcv +import numpy as np +import torch +from mmcv.runner import load_checkpoint +from torch import nn + +from mmcls.models import build_classifier + +torch.manual_seed(3) + + +def _demo_mm_inputs(input_shape: tuple, num_classes: int): + """Create a superset of inputs needed to run test or train batches. + + Args: + input_shape (tuple): + input batch dimensions + num_classes (int): + number of semantic classes + """ + (N, C, H, W) = input_shape + rng = np.random.RandomState(0) + imgs = rng.rand(*input_shape) + gt_labels = rng.randint( + low=0, high=num_classes, size=(N, 1)).astype(np.uint8) + mm_inputs = { + 'imgs': torch.FloatTensor(imgs).requires_grad_(False), + 'gt_labels': torch.LongTensor(gt_labels), + } + return mm_inputs + + +def pytorch2torchscript(model: nn.Module, input_shape: tuple, output_file: str, + verify: bool): + """Export Pytorch model to TorchScript model through torch.jit.trace and + verify the outputs are same between Pytorch and TorchScript. + + Args: + model (nn.Module): Pytorch model we want to export. + input_shape (tuple): Use this input shape to construct + the corresponding dummy input and execute the model. + show (bool): Whether print the computation graph. Default: False. + output_file (string): The path to where we store the output + TorchScript model. + verify (bool): Whether compare the outputs between Pytorch + and TorchScript through loading generated output_file. + """ + model.cpu().eval() + + num_classes = model.head.num_classes + mm_inputs = _demo_mm_inputs(input_shape, num_classes) + + imgs = mm_inputs.pop('imgs') + img_list = [img[None, :] for img in imgs] + + # replace original forward function + origin_forward = model.forward + model.forward = partial(model.forward, img_metas={}, return_loss=False) + + with torch.no_grad(): + trace_model = torch.jit.trace(model, img_list[0]) + save_dir, _ = osp.split(output_file) + if save_dir: + os.makedirs(save_dir, exist_ok=True) + trace_model.save(output_file) + print(f'Successfully exported TorchScript model: {output_file}') + model.forward = origin_forward + + if verify: + # load by torch.jit + jit_model = torch.jit.load(output_file) + + # check the numerical value + # get pytorch output + pytorch_result = model(img_list, img_metas={}, return_loss=False)[0] + + # get jit output + jit_result = jit_model(img_list[0])[0].detach().numpy() + if not np.allclose(pytorch_result, jit_result): + raise ValueError( + 'The outputs are different between Pytorch and TorchScript') + print('The outputs are same between Pytorch and TorchScript') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert MMCls to TorchScript') + parser.add_argument('config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file', type=str) + parser.add_argument( + '--verify', + action='store_true', + help='verify the TorchScript model', + default=False) + parser.add_argument('--output-file', type=str, default='tmp.pt') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[224, 224], + help='input image size') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + if len(args.shape) == 1: + input_shape = (1, 3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = ( + 1, + 3, + ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + cfg = mmcv.Config.fromfile(args.config) + cfg.model.pretrained = None + + # build the model and load checkpoint + classifier = build_classifier(cfg.model) + + if args.checkpoint: + load_checkpoint(classifier, args.checkpoint, map_location='cpu') + + # convert model to TorchScript file + pytorch2torchscript( + classifier, + input_shape, + output_file=args.output_file, + verify=args.verify) diff --git a/tools/deployment/test.py b/tools/deployment/test.py new file mode 100644 index 0000000..5977f53 --- /dev/null +++ b/tools/deployment/test.py @@ -0,0 +1,128 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import warnings + +import mmcv +import numpy as np +from mmcv import DictAction +from mmcv.parallel import MMDataParallel + +from mmcls.apis import single_gpu_test +from mmcls.core.export import ONNXRuntimeClassifier, TensorRTClassifier +from mmcls.datasets import build_dataloader, build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Test (and eval) an ONNX model using ONNXRuntime.') + parser.add_argument('config', help='model config file') + parser.add_argument('model', help='filename of the input ONNX model') + parser.add_argument( + '--backend', + help='Backend of the model.', + choices=['onnxruntime', 'tensorrt']) + parser.add_argument( + '--out', type=str, help='output result file in pickle format') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file.') + parser.add_argument( + '--metrics', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g., ' + '"accuracy", "precision", "recall", "f1_score", "support" for single ' + 'label dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for ' + 'multi-label dataset') + parser.add_argument( + '--metric-options', + nargs='+', + action=DictAction, + default={}, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be parsed as a dict metric_options for dataset.evaluate()' + ' function.') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--show-dir', help='directory where painted images will be saved') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): + raise ValueError('The output file must be a pkl file.') + + cfg = mmcv.Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # build dataset and dataloader + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + samples_per_gpu=cfg.data.samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + shuffle=False, + round_up=False) + + # build onnxruntime model and run inference. + if args.backend == 'onnxruntime': + model = ONNXRuntimeClassifier( + args.model, class_names=dataset.CLASSES, device_id=0) + elif args.backend == 'tensorrt': + model = TensorRTClassifier( + args.model, class_names=dataset.CLASSES, device_id=0) + else: + print('Unknown backend: {}.'.format(args.model)) + exit(1) + + model = MMDataParallel(model, device_ids=[0]) + model.CLASSES = dataset.CLASSES + outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) + + if args.metrics: + results = dataset.evaluate(outputs, args.metrics, args.metric_options) + for k, v in results.items(): + print(f'\n{k} : {v:.2f}') + else: + warnings.warn('Evaluation metrics are not specified.') + scores = np.vstack(outputs) + pred_score = np.max(scores, axis=1) + pred_label = np.argmax(scores, axis=1) + pred_class = [dataset.CLASSES[lb] for lb in pred_label] + results = { + 'pred_score': pred_score, + 'pred_label': pred_label, + 'pred_class': pred_class + } + if not args.out: + print('\nthe predicted result for the first element is ' + f'pred_score = {pred_score[0]:.2f}, ' + f'pred_label = {pred_label[0]} ' + f'and pred_class = {pred_class[0]}. ' + 'Specify --out to save all results to files.') + if args.out: + print(f'\nwriting results to {args.out}') + mmcv.dump(results, args.out) + + +if __name__ == '__main__': + main() + + # Following strings of text style are from colorama package + bright_style, reset_style = '\x1b[1m', '\x1b[0m' + red_text, blue_text = '\x1b[31m', '\x1b[34m' + white_background = '\x1b[107m' + + msg = white_background + bright_style + red_text + msg += 'DeprecationWarning: This tool will be deprecated in future. ' + msg += blue_text + 'Welcome to use the unified model deployment toolbox ' + msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' + msg += reset_style + warnings.warn(msg) diff --git a/tools/deployment/test_torchserver.py b/tools/deployment/test_torchserver.py new file mode 100644 index 0000000..1be611f --- /dev/null +++ b/tools/deployment/test_torchserver.py @@ -0,0 +1,45 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser + +import numpy as np +import requests + +from mmcls.apis import inference_model, init_model, show_result_pyplot + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('img', help='Image file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('model_name', help='The model name in the server') + parser.add_argument( + '--inference-addr', + default='127.0.0.1:8080', + help='Address and port of the inference server') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + args = parser.parse_args() + return args + + +def main(args): + # Inference single image by native apis. + model = init_model(args.config, args.checkpoint, device=args.device) + model_result = inference_model(model, args.img) + show_result_pyplot(model, args.img, model_result, title='pytorch_result') + + # Inference single image by torchserve engine. + url = 'http://' + args.inference_addr + '/predictions/' + args.model_name + with open(args.img, 'rb') as image: + response = requests.post(url, image) + server_result = response.json() + show_result_pyplot(model, args.img, server_result, title='server_result') + + assert np.allclose(model_result['pred_score'], server_result['pred_score']) + print('Test complete, the results of PyTorch and TorchServe are the same.') + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/tools/dist_test.sh b/tools/dist_test.sh new file mode 100644 index 0000000..dea131b --- /dev/null +++ b/tools/dist_test.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/test.py \ + $CONFIG \ + $CHECKPOINT \ + --launcher pytorch \ + ${@:4} diff --git a/tools/dist_train.sh b/tools/dist_train.sh new file mode 100644 index 0000000..3fca764 --- /dev/null +++ b/tools/dist_train.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/train.py \ + $CONFIG \ + --launcher pytorch ${@:3} diff --git a/tools/dist_train_arm.sh b/tools/dist_train_arm.sh new file mode 100644 index 0000000..c73b0d2 --- /dev/null +++ b/tools/dist_train_arm.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +export PYTHONPATH="$(dirname $0)/..":$PYTHONPATH + +# launch core setting +KERNEL_NUM=$(($(nproc)/GPUS)) + +# dist env setting +export WORLD_SIZE=$((NNODES*GPUS)) +export MASTER_ADDR=$MASTER_ADDR +export MASTER_PORT=$PORT +LOCAL_RANK_START=$((NODE_RANK*GPUS)) +LOCAL_RANK_END=$((LOCAL_RANK_START+GPUS)) + +for((RANK_ID=LOCAL_RANK_START;RANK_ID segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + if args.summary: + summary(args, cfg) + return + + # resume from the previous experiment + if args.resume_from is not None: + cfg.resume_from = args.resume_from + resume_kfold = torch.load(cfg.resume_from).get('meta', + {}).get('kfold', None) + if resume_kfold is None: + raise RuntimeError( + 'No "meta" key in checkpoints or no "kfold" in the meta dict. ' + 'Please check if the resume checkpoint from a k-fold ' + 'cross-valid experiment.') + resume_fold = resume_kfold['fold'] + assert args.num_splits == resume_kfold['num_splits'] + else: + resume_fold = 0 + + if args.gpus is not None: + cfg.gpu_ids = range(1) + warnings.warn('`--gpus` is deprecated because we only support ' + 'single GPU mode in non-distributed training. ' + 'Use `gpus=1` now.') + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids[0:1] + warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' + 'Because we only support single GPU mode in ' + 'non-distributed training. Use the first GPU ' + 'in `gpu_ids` now.') + if args.gpus is None and args.gpu_ids is None: + cfg.gpu_ids = [args.gpu_id] + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + _, world_size = get_dist_info() + cfg.gpu_ids = range(world_size) + + # init a unified random seed + seed = init_random_seed(args.seed) + + # create work_dir + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + + if args.fold is not None: + folds = [args.fold] + else: + folds = range(resume_fold, args.num_splits) + + for fold in folds: + cfg_ = copy_config(cfg) + if fold != resume_fold: + cfg_.resume_from = None + train_single_fold(args, cfg_, fold, distributed, seed) + + if args.fold is None: + summary(args, cfg) + + +if __name__ == '__main__': + main() diff --git a/tools/misc/print_config.py b/tools/misc/print_config.py new file mode 100644 index 0000000..a2781a6 --- /dev/null +++ b/tools/misc/print_config.py @@ -0,0 +1,35 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +from mmcv import Config, DictAction + + +def parse_args(): + parser = argparse.ArgumentParser(description='Print the whole config') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + print(f'Config:\n{cfg.pretty_text}') + + +if __name__ == '__main__': + main() diff --git a/tools/misc/verify_dataset.py b/tools/misc/verify_dataset.py new file mode 100644 index 0000000..6114adb --- /dev/null +++ b/tools/misc/verify_dataset.py @@ -0,0 +1,131 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import fcntl +import os +from pathlib import Path + +from mmcv import Config, DictAction, track_parallel_progress, track_progress + +from mmcls.datasets import PIPELINES, build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser(description='Verify Dataset') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--out-path', + type=str, + default='brokenfiles.log', + help='output path of all the broken files. If the specified path ' + 'already exists, delete the previous file ') + parser.add_argument( + '--phase', + default='train', + type=str, + choices=['train', 'test', 'val'], + help='phase of dataset to visualize, accept "train" "test" and "val".') + parser.add_argument( + '--num-process', type=int, default=1, help='number of process to use') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + assert args.out_path is not None + assert args.num_process > 0 + return args + + +class DatasetValidator(): + """the dataset tool class to check if all file are broken.""" + + def __init__(self, dataset_cfg, log_file_path, phase): + super(DatasetValidator, self).__init__() + # keep only LoadImageFromFile pipeline + assert dataset_cfg.data[phase].pipeline[0][ + 'type'] == 'LoadImageFromFile', 'This tool is only for dataset ' \ + 'that needs to load image from files.' + self.pipeline = PIPELINES.build(dataset_cfg.data[phase].pipeline[0]) + dataset_cfg.data[phase].pipeline = [] + dataset = build_dataset(dataset_cfg.data[phase]) + + self.dataset = dataset + self.log_file_path = log_file_path + + def valid_idx(self, idx): + item = self.dataset[idx] + try: + item = self.pipeline(item) + except Exception: + with open(self.log_file_path, 'a') as f: + # add file lock to prevent multi-process writing errors + fcntl.flock(f.fileno(), fcntl.LOCK_EX) + filepath = os.path.join(item['img_prefix'], + item['img_info']['filename']) + f.write(filepath + '\n') + print(f'{filepath} cannot be read correctly, please check it.') + # Release files lock automatic using with + + def __len__(self): + return len(self.dataset) + + +def print_info(log_file_path): + """print some information and do extra action.""" + print() + with open(log_file_path, 'r') as f: + context = f.read().strip() + if context == '': + print('There is no broken file found.') + os.remove(log_file_path) + else: + num_file = len(context.split('\n')) + print(f'{num_file} broken files found, name list save in file:' + f'{log_file_path}') + print() + + +def main(): + # parse cfg and args + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # touch output file to save broken files list. + output_path = Path(args.out_path) + if not output_path.parent.exists(): + raise Exception('log_file parent directory not found.') + if output_path.exists(): + os.remove(output_path) + output_path.touch() + + # do valid + validator = DatasetValidator(cfg, output_path, args.phase) + + if args.num_process > 1: + # The default chunksize calcuation method of Pool.map + chunksize, extra = divmod(len(validator), args.num_process * 8) + if extra: + chunksize += 1 + + track_parallel_progress( + validator.valid_idx, + list(range(len(validator))), + args.num_process, + chunksize=chunksize, + keep_order=False) + else: + track_progress(validator.valid_idx, list(range(len(validator)))) + + print_info(output_path) + + +if __name__ == '__main__': + main() diff --git a/tools/slurm_test.sh b/tools/slurm_test.sh new file mode 100644 index 0000000..6dd67e5 --- /dev/null +++ b/tools/slurm_test.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +CHECKPOINT=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +PY_ARGS=${@:5} +SRUN_ARGS=${SRUN_ARGS:-""} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} diff --git a/tools/slurm_train.sh b/tools/slurm_train.sh new file mode 100644 index 0000000..b3feb3d --- /dev/null +++ b/tools/slurm_train.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +WORK_DIR=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:5} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} diff --git a/tools/test.py b/tools/test.py new file mode 100644 index 0000000..74f1515 --- /dev/null +++ b/tools/test.py @@ -0,0 +1,243 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import warnings +from numbers import Number + +import mmcv +import numpy as np +import torch +from mmcv import DictAction +from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, + wrap_fp16_model) + +from mmcls.apis import multi_gpu_test, single_gpu_test +from mmcls.datasets import build_dataloader, build_dataset +from mmcls.models import build_classifier +from mmcls.utils import (auto_select_device, get_root_logger, + setup_multi_processes, wrap_distributed_model, + wrap_non_distributed_model) + + +def parse_args(): + parser = argparse.ArgumentParser(description='mmcls test model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--out', help='output result file') + out_options = ['class_scores', 'pred_score', 'pred_label', 'pred_class'] + parser.add_argument( + '--out-items', + nargs='+', + default=['all'], + choices=out_options + ['none', 'all'], + help='Besides metrics, what items will be included in the output ' + f'result file. You can choose some of ({", ".join(out_options)}), ' + 'or use "all" to include all above, or use "none" to disable all of ' + 'above. Defaults to output all.', + metavar='') + parser.add_argument( + '--metrics', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g., ' + '"accuracy", "precision", "recall", "f1_score", "support" for single ' + 'label dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for ' + 'multi-label dataset') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--show-dir', help='directory where painted images will be saved') + parser.add_argument( + '--gpu-collect', + action='store_true', + help='whether to use gpu to collect results') + parser.add_argument('--tmpdir', help='tmp dir for writing some results') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--metric-options', + nargs='+', + action=DictAction, + default={}, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be parsed as a dict metric_options for dataset.evaluate()' + ' function.') + parser.add_argument( + '--show-options', + nargs='+', + action=DictAction, + help='custom options for show_result. key-value pair in xxx=yyy.' + 'Check available options in `model.show_result`.') + parser.add_argument( + '--gpu-ids', + type=int, + nargs='+', + help='(Deprecated, please use --gpu-id) ids of gpus to use ' + '(only applicable to non-distributed testing)') + parser.add_argument( + '--gpu-id', + type=int, + default=0, + help='id of gpu to use ' + '(only applicable to non-distributed testing)') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument('--device', help='device used for testing') + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + assert args.metrics or args.out, \ + 'Please specify at least one of output path and evaluation metrics.' + + return args + + +def main(): + args = parse_args() + + cfg = mmcv.Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # set multi-process settings + setup_multi_processes(cfg) + + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + cfg.model.pretrained = None + + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids[0:1] + warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' + 'Because we only support single GPU mode in ' + 'non-distributed testing. Use the first GPU ' + 'in `gpu_ids` now.') + else: + cfg.gpu_ids = [args.gpu_id] + cfg.device = args.device or auto_select_device() + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + dataset = build_dataset(cfg.data.test, default_args=dict(test_mode=True)) + + # build the dataloader + # The default loader config + loader_cfg = dict( + # cfg.gpus will be ignored if distributed + num_gpus=1 if cfg.device == 'ipu' else len(cfg.gpu_ids), + dist=distributed, + round_up=True, + ) + # The overall dataloader settings + loader_cfg.update({ + k: v + for k, v in cfg.data.items() if k not in [ + 'train', 'val', 'test', 'train_dataloader', 'val_dataloader', + 'test_dataloader' + ] + }) + test_loader_cfg = { + **loader_cfg, + 'shuffle': False, # Not shuffle by default + 'sampler_cfg': None, # Not use sampler by default + **cfg.data.get('test_dataloader', {}), + } + # the extra round_up data will be removed during gpu/cpu collect + data_loader = build_dataloader(dataset, **test_loader_cfg) + + # build the model and load checkpoint + model = build_classifier(cfg.model) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') + + if 'CLASSES' in checkpoint.get('meta', {}): + CLASSES = checkpoint['meta']['CLASSES'] + else: + from mmcls.datasets import ImageNet + warnings.simplefilter('once') + warnings.warn('Class names are not saved in the checkpoint\'s ' + 'meta data, use imagenet by default.') + CLASSES = ImageNet.CLASSES + + if not distributed: + model = wrap_non_distributed_model( + model, device=cfg.device, device_ids=cfg.gpu_ids) + if cfg.device == 'ipu': + from mmcv.device.ipu import cfg2options, ipu_model_wrapper + opts = cfg2options(cfg.runner.get('options_cfg', {})) + if fp16_cfg is not None: + model.half() + model = ipu_model_wrapper(model, opts, fp16_cfg=fp16_cfg) + data_loader.init(opts['inference']) + model.CLASSES = CLASSES + show_kwargs = args.show_options or {} + outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, + **show_kwargs) + else: + model = wrap_distributed_model( + model, device=cfg.device, broadcast_buffers=False) + outputs = multi_gpu_test(model, data_loader, args.tmpdir, + args.gpu_collect) + + rank, _ = get_dist_info() + if rank == 0: + results = {} + logger = get_root_logger() + if args.metrics: + eval_results = dataset.evaluate( + results=outputs, + metric=args.metrics, + metric_options=args.metric_options, + logger=logger) + results.update(eval_results) + for k, v in eval_results.items(): + if isinstance(v, np.ndarray): + v = [round(out, 2) for out in v.tolist()] + elif isinstance(v, Number): + v = round(v, 2) + else: + raise ValueError(f'Unsupport metric type: {type(v)}') + print(f'\n{k} : {v}') + if args.out: + if 'none' not in args.out_items: + scores = np.vstack(outputs) + pred_score = np.max(scores, axis=1) + pred_label = np.argmax(scores, axis=1) + pred_class = [CLASSES[lb] for lb in pred_label] + res_items = { + 'class_scores': scores, + 'pred_score': pred_score, + 'pred_label': pred_label, + 'pred_class': pred_class + } + if 'all' in args.out_items: + results.update(res_items) + else: + for key in args.out_items: + results[key] = res_items[key] + print(f'\ndumping results to {args.out}') + mmcv.dump(results, args.out) + + +if __name__ == '__main__': + main() diff --git a/tools/train.py b/tools/train.py new file mode 100644 index 0000000..f05b46e --- /dev/null +++ b/tools/train.py @@ -0,0 +1,208 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import copy +import os +import os.path as osp +import time +import warnings + +import mmcv +import torch +import torch.distributed as dist +from mmcv import Config, DictAction +from mmcv.runner import get_dist_info, init_dist + +from mmcls import __version__ +from mmcls.apis import init_random_seed, set_random_seed, train_model +from mmcls.datasets import build_dataset +from mmcls.models import build_classifier +from mmcls.utils import (auto_select_device, collect_env, get_root_logger, + setup_multi_processes) + + +from mmcls.gpvit_dev.amp.runner import AmpEpochBasedRunner, AmpIterBasedRunner + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a model') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume-from', help='the checkpoint file to resume from') + parser.add_argument( + '--no-validate', + action='store_true', + help='whether not to evaluate the checkpoint during training') + group_gpus = parser.add_mutually_exclusive_group() + group_gpus.add_argument( + '--device', help='device used for training. (Deprecated)') + group_gpus.add_argument( + '--gpus', + type=int, + help='(Deprecated, please use --gpu-id) number of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-ids', + type=int, + nargs='+', + help='(Deprecated, please use --gpu-id) ids of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-id', + type=int, + default=0, + help='id of gpu to use ' + '(only applicable to non-distributed training)') + parser.add_argument( + '--ipu-replicas', + type=int, + default=None, + help='num of ipu replicas to use') + parser.add_argument('--seed', type=int, default=None, help='random seed') + parser.add_argument( + '--diff-seed', + action='store_true', + help='Whether or not set different seeds for different ranks') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # set multi-process settings + setup_multi_processes(cfg) + + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + if args.resume_from is not None: + cfg.resume_from = args.resume_from + if args.gpus is not None: + cfg.gpu_ids = range(1) + warnings.warn('`--gpus` is deprecated because we only support ' + 'single GPU mode in non-distributed training. ' + 'Use `gpus=1` now.') + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids[0:1] + warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' + 'Because we only support single GPU mode in ' + 'non-distributed training. Use the first GPU ' + 'in `gpu_ids` now.') + if args.gpus is None and args.gpu_ids is None: + cfg.gpu_ids = [args.gpu_id] + + if args.ipu_replicas is not None: + cfg.ipu_replicas = args.ipu_replicas + args.device = 'ipu' + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + _, world_size = get_dist_info() + cfg.gpu_ids = range(world_size) + + # create work_dir + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + # dump config + cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) + # init the logger before other steps + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + log_file = osp.join(cfg.work_dir, f'{timestamp}.log') + logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) + + # init the meta dict to record some important information such as + # environment info and seed, which will be logged + meta = dict() + # log env info + env_info_dict = collect_env() + env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) + dash_line = '-' * 60 + '\n' + logger.info('Environment info:\n' + dash_line + env_info + '\n' + + dash_line) + meta['env_info'] = env_info + + # log some basic info + logger.info(f'Distributed training: {distributed}') + logger.info(f'Config:\n{cfg.pretty_text}') + + # set random seeds + cfg.device = args.device or auto_select_device() + seed = init_random_seed(args.seed, device=cfg.device) + seed = seed + dist.get_rank() if args.diff_seed else seed + logger.info(f'Set random seed to {seed}, ' + f'deterministic: {args.deterministic}') + set_random_seed(seed, deterministic=args.deterministic) + cfg.seed = seed + meta['seed'] = seed + + model = build_classifier(cfg.model) + model.init_weights() + + datasets = [build_dataset(cfg.data.train)] + if len(cfg.workflow) == 2: + val_dataset = copy.deepcopy(cfg.data.val) + val_dataset.pipeline = cfg.data.train.pipeline + datasets.append(build_dataset(val_dataset)) + + # save mmcls version, config file content and class names in + # runner as meta data + meta.update( + dict( + mmcls_version=__version__, + config=cfg.pretty_text, + CLASSES=datasets[0].CLASSES)) + + # add an attribute for visualization convenience + train_model( + model, + datasets, + cfg, + distributed=distributed, + validate=(not args.no_validate), + timestamp=timestamp, + device=cfg.device, + meta=meta) + + +if __name__ == '__main__': + main() diff --git a/tools/visualizations/vis_cam.py b/tools/visualizations/vis_cam.py new file mode 100644 index 0000000..a1fcada --- /dev/null +++ b/tools/visualizations/vis_cam.py @@ -0,0 +1,356 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import copy +import math +import pkg_resources +import re +from pathlib import Path + +import mmcv +import numpy as np +from mmcv import Config, DictAction +from mmcv.utils import to_2tuple +from torch.nn import BatchNorm1d, BatchNorm2d, GroupNorm, LayerNorm + +from mmcls import digit_version +from mmcls.apis import init_model +from mmcls.datasets.pipelines import Compose + +try: + from pytorch_grad_cam import (EigenCAM, EigenGradCAM, GradCAM, + GradCAMPlusPlus, LayerCAM, XGradCAM) + from pytorch_grad_cam.activations_and_gradients import \ + ActivationsAndGradients + from pytorch_grad_cam.utils.image import show_cam_on_image +except ImportError: + raise ImportError('Please run `pip install "grad-cam>=1.3.6"` to install ' + '3rd party package pytorch_grad_cam.') + +# set of transforms, which just change data format, not change the pictures +FORMAT_TRANSFORMS_SET = {'ToTensor', 'Normalize', 'ImageToTensor', 'Collect'} + +# Supported grad-cam type map +METHOD_MAP = { + 'gradcam': GradCAM, + 'gradcam++': GradCAMPlusPlus, + 'xgradcam': XGradCAM, + 'eigencam': EigenCAM, + 'eigengradcam': EigenGradCAM, + 'layercam': LayerCAM, +} + + +def parse_args(): + parser = argparse.ArgumentParser(description='Visualize CAM') + parser.add_argument('img', help='Image file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--target-layers', + default=[], + nargs='+', + type=str, + help='The target layers to get CAM, if not set, the tool will ' + 'specify the norm layer in the last block. Backbones ' + 'implemented by users are recommended to manually specify' + ' target layers in commmad statement.') + parser.add_argument( + '--preview-model', + default=False, + action='store_true', + help='To preview all the model layers') + parser.add_argument( + '--method', + default='GradCAM', + help='Type of method to use, supports ' + f'{", ".join(list(METHOD_MAP.keys()))}.') + parser.add_argument( + '--target-category', + default=[], + nargs='+', + type=int, + help='The target category to get CAM, default to use result ' + 'get from given model.') + parser.add_argument( + '--eigen-smooth', + default=False, + action='store_true', + help='Reduce noise by taking the first principle componenet of ' + '``cam_weights*activations``') + parser.add_argument( + '--aug-smooth', + default=False, + action='store_true', + help='Wether to use test time augmentation, default not to use') + parser.add_argument( + '--save-path', + type=Path, + help='The path to save visualize cam image, default not to save.') + parser.add_argument('--device', default='cpu', help='Device to use cpu') + parser.add_argument( + '--vit-like', + action='store_true', + help='Whether the network is a ViT-like network.') + parser.add_argument( + '--num-extra-tokens', + type=int, + help='The number of extra tokens in ViT-like backbones. Defaults to' + ' use num_extra_tokens of the backbone.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + if args.method.lower() not in METHOD_MAP.keys(): + raise ValueError(f'invalid CAM type {args.method},' + f' supports {", ".join(list(METHOD_MAP.keys()))}.') + + return args + + +def build_reshape_transform(model, args): + """Build reshape_transform for `cam.activations_and_grads`, which is + necessary for ViT-like networks.""" + # ViT_based_Transformers have an additional clstoken in features + if not args.vit_like: + + def check_shape(tensor): + assert len(tensor.size()) != 3, \ + (f"The input feature's shape is {tensor.size()}, and it seems " + 'to have been flattened or from a vit-like network. ' + "Please use `--vit-like` if it's from a vit-like network.") + return tensor + + return check_shape + + if args.num_extra_tokens is not None: + num_extra_tokens = args.num_extra_tokens + elif hasattr(model.backbone, 'num_extra_tokens'): + num_extra_tokens = model.backbone.num_extra_tokens + else: + num_extra_tokens = 1 + + def _reshape_transform(tensor): + """reshape_transform helper.""" + assert len(tensor.size()) == 3, \ + (f"The input feature's shape is {tensor.size()}, " + 'and the feature seems not from a vit-like network?') + tensor = tensor[:, num_extra_tokens:, :] + # get heat_map_height and heat_map_width, preset input is a square + heat_map_area = tensor.size()[1] + height, width = to_2tuple(int(math.sqrt(heat_map_area))) + assert height * height == heat_map_area, \ + (f"The input feature's length ({heat_map_area+num_extra_tokens}) " + f'minus num-extra-tokens ({num_extra_tokens}) is {heat_map_area},' + ' which is not a perfect square number. Please check if you used ' + 'a wrong num-extra-tokens.') + result = tensor.reshape(tensor.size(0), height, width, tensor.size(2)) + + # Bring the channels to the first dimension, like in CNNs. + result = result.transpose(2, 3).transpose(1, 2) + return result + + return _reshape_transform + + +def apply_transforms(img_path, pipeline_cfg): + """Apply transforms pipeline and get both formatted data and the image + without formatting.""" + data = dict(img_info=dict(filename=img_path), img_prefix=None) + + def split_pipeline_cfg(pipeline_cfg): + """to split the transfoms into image_transforms and + format_transforms.""" + image_transforms_cfg, format_transforms_cfg = [], [] + if pipeline_cfg[0]['type'] != 'LoadImageFromFile': + pipeline_cfg.insert(0, dict(type='LoadImageFromFile')) + for transform in pipeline_cfg: + if transform['type'] in FORMAT_TRANSFORMS_SET: + format_transforms_cfg.append(transform) + else: + image_transforms_cfg.append(transform) + return image_transforms_cfg, format_transforms_cfg + + image_transforms, format_transforms = split_pipeline_cfg(pipeline_cfg) + image_transforms = Compose(image_transforms) + format_transforms = Compose(format_transforms) + + intermediate_data = image_transforms(data) + inference_img = copy.deepcopy(intermediate_data['img']) + format_data = format_transforms(intermediate_data) + + return format_data, inference_img + + +class MMActivationsAndGradients(ActivationsAndGradients): + """Activations and gradients manager for mmcls models.""" + + def __call__(self, x): + self.gradients = [] + self.activations = [] + return self.model( + x, return_loss=False, softmax=False, post_process=False) + + +def init_cam(method, model, target_layers, use_cuda, reshape_transform): + """Construct the CAM object once, In order to be compatible with mmcls, + here we modify the ActivationsAndGradients object.""" + + GradCAM_Class = METHOD_MAP[method.lower()] + cam = GradCAM_Class( + model=model, target_layers=target_layers, use_cuda=use_cuda) + # Release the original hooks in ActivationsAndGradients to use + # MMActivationsAndGradients. + cam.activations_and_grads.release() + cam.activations_and_grads = MMActivationsAndGradients( + cam.model, cam.target_layers, reshape_transform) + + return cam + + +def get_layer(layer_str, model): + """get model layer from given str.""" + cur_layer = model + layer_names = layer_str.strip().split('.') + + def get_children_by_name(model, name): + try: + return getattr(model, name) + except AttributeError as e: + raise AttributeError( + e.args[0] + + '. Please use `--preview-model` to check keys at first.') + + def get_children_by_eval(model, name): + try: + return eval(f'model{name}', {}, {'model': model}) + except (AttributeError, IndexError) as e: + raise AttributeError( + e.args[0] + + '. Please use `--preview-model` to check keys at first.') + + for layer_name in layer_names: + match_res = re.match('(?P.+?)(?P(\\[.+\\])+)', + layer_name) + if match_res: + layer_name = match_res.groupdict()['name'] + indices = match_res.groupdict()['indices'] + cur_layer = get_children_by_name(cur_layer, layer_name) + cur_layer = get_children_by_eval(cur_layer, indices) + else: + cur_layer = get_children_by_name(cur_layer, layer_name) + + return cur_layer + + +def show_cam_grad(grayscale_cam, src_img, title, out_path=None): + """fuse src_img and grayscale_cam and show or save.""" + grayscale_cam = grayscale_cam[0, :] + src_img = np.float32(src_img) / 255 + visualization_img = show_cam_on_image( + src_img, grayscale_cam, use_rgb=False) + + if out_path: + mmcv.imwrite(visualization_img, str(out_path)) + else: + mmcv.imshow(visualization_img, win_name=title) + + +def get_default_traget_layers(model, args): + """get default target layers from given model, here choose nrom type layer + as default target layer.""" + norm_layers = [] + for m in model.backbone.modules(): + if isinstance(m, (BatchNorm2d, LayerNorm, GroupNorm, BatchNorm1d)): + norm_layers.append(m) + if len(norm_layers) == 0: + raise ValueError( + '`--target-layers` is empty. Please use `--preview-model`' + ' to check keys at first and then specify `target-layers`.') + # if the model is CNN model or Swin model, just use the last norm + # layer as the target-layer, if the model is ViT model, the final + # classification is done on the class token computed in the last + # attention block, the output will not be affected by the 14x14 + # channels in the last layer. The gradient of the output with + # respect to them, will be 0! here use the last 3rd norm layer. + # means the first norm of the last decoder block. + if args.vit_like: + if args.num_extra_tokens: + num_extra_tokens = args.num_extra_tokens + elif hasattr(model.backbone, 'num_extra_tokens'): + num_extra_tokens = model.backbone.num_extra_tokens + else: + raise AttributeError('Please set num_extra_tokens in backbone' + " or using 'num-extra-tokens'") + + # if a vit-like backbone's num_extra_tokens bigger than 0, view it + # as a VisionTransformer backbone, eg. DeiT, T2T-ViT. + if num_extra_tokens >= 1: + print('Automatically choose the last norm layer before the ' + 'final attention block as target_layer..') + return [norm_layers[-3]] + print('Automatically choose the last norm layer as target_layer.') + target_layers = [norm_layers[-1]] + return target_layers + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # build the model from a config file and a checkpoint file + model = init_model(cfg, args.checkpoint, device=args.device) + if args.preview_model: + print(model) + print('\n Please remove `--preview-model` to get the CAM.') + return + + # apply transform and perpare data + data, src_img = apply_transforms(args.img, cfg.data.test.pipeline) + + # build target layers + if args.target_layers: + target_layers = [ + get_layer(layer, model) for layer in args.target_layers + ] + else: + target_layers = get_default_traget_layers(model, args) + + # init a cam grad calculator + use_cuda = ('cuda' in args.device) + reshape_transform = build_reshape_transform(model, args) + cam = init_cam(args.method, model, target_layers, use_cuda, + reshape_transform) + + # warp the target_category with ClassifierOutputTarget in grad_cam>=1.3.7, + # to fix the bug in #654. + targets = None + if args.target_category: + grad_cam_v = pkg_resources.get_distribution('grad_cam').version + if digit_version(grad_cam_v) >= digit_version('1.3.7'): + from pytorch_grad_cam.utils.model_targets import \ + ClassifierOutputTarget + targets = [ClassifierOutputTarget(c) for c in args.target_category] + else: + targets = args.target_category + + # calculate cam grads and show|save the visualization image + grayscale_cam = cam( + data['img'].unsqueeze(0), + targets, + eigen_smooth=args.eigen_smooth, + aug_smooth=args.aug_smooth) + show_cam_grad( + grayscale_cam, src_img, title=args.method, out_path=args.save_path) + + +if __name__ == '__main__': + main() diff --git a/tools/visualizations/vis_lr.py b/tools/visualizations/vis_lr.py new file mode 100644 index 0000000..bd34421 --- /dev/null +++ b/tools/visualizations/vis_lr.py @@ -0,0 +1,334 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import re +import time +from pathlib import Path +from pprint import pformat + +import matplotlib.pyplot as plt +import mmcv +import torch.nn as nn +from mmcv import Config, DictAction, ProgressBar +from mmcv.runner import (EpochBasedRunner, IterBasedRunner, IterLoader, + build_optimizer) +from torch.utils.data import DataLoader + +from mmcls.utils import get_root_logger + + +class DummyEpochBasedRunner(EpochBasedRunner): + """Fake Epoch-based Runner. + + This runner won't train model, and it will only call hooks and return all + learning rate in each iteration. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.progress_bar = ProgressBar(self._max_epochs, start=False) + + def train(self, data_loader, **kwargs): + lr_list = [] + self.model.train() + self.mode = 'train' + self.data_loader = data_loader + self._max_iters = self._max_epochs * len(self.data_loader) + self.call_hook('before_train_epoch') + for i in range(len(self.data_loader)): + self._inner_iter = i + self.call_hook('before_train_iter') + lr_list.append(self.current_lr()) + self.call_hook('after_train_iter') + self._iter += 1 + + self.call_hook('after_train_epoch') + self._epoch += 1 + self.progress_bar.update(1) + return lr_list + + def run(self, data_loaders, workflow, **kwargs): + assert isinstance(data_loaders, list) + assert mmcv.is_list_of(workflow, tuple) + assert len(data_loaders) == len(workflow) + + assert self._max_epochs is not None, ( + 'max_epochs must be specified during instantiation') + + for i, flow in enumerate(workflow): + mode, epochs = flow + if mode == 'train': + self._max_iters = self._max_epochs * len(data_loaders[i]) + break + + self.logger.info('workflow: %s, max: %d epochs', workflow, + self._max_epochs) + self.call_hook('before_run') + + self.progress_bar.start() + lr_list = [] + while self.epoch < self._max_epochs: + for i, flow in enumerate(workflow): + mode, epochs = flow + if isinstance(mode, str): # self.train() + if not hasattr(self, mode): + raise ValueError( + f'runner has no method named "{mode}" to run an ' + 'epoch') + epoch_runner = getattr(self, mode) + else: + raise TypeError( + 'mode in workflow must be a str, but got {}'.format( + type(mode))) + + for _ in range(epochs): + if mode == 'train' and self.epoch >= self._max_epochs: + break + lr_list.extend(epoch_runner(data_loaders[i], **kwargs)) + + self.progress_bar.file.write('\n') + time.sleep(1) # wait for some hooks like loggers to finish + self.call_hook('after_run') + return lr_list + + +class DummyIterBasedRunner(IterBasedRunner): + """Fake Iter-based Runner. + + This runner won't train model, and it will only call hooks and return all + learning rate in each iteration. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.progress_bar = ProgressBar(self._max_iters, start=False) + + def train(self, data_loader, **kwargs): + lr_list = [] + self.model.train() + self.mode = 'train' + self.data_loader = data_loader + self._epoch = data_loader.epoch + next(data_loader) + self.call_hook('before_train_iter') + lr_list.append(self.current_lr()) + self.call_hook('after_train_iter') + self._inner_iter += 1 + self._iter += 1 + self.progress_bar.update(1) + return lr_list + + def run(self, data_loaders, workflow, **kwargs): + assert isinstance(data_loaders, list) + assert mmcv.is_list_of(workflow, tuple) + assert len(data_loaders) == len(workflow) + assert self._max_iters is not None, ( + 'max_iters must be specified during instantiation') + + self.logger.info('workflow: %s, max: %d iters', workflow, + self._max_iters) + self.call_hook('before_run') + + iter_loaders = [IterLoader(x) for x in data_loaders] + + self.call_hook('before_epoch') + + self.progress_bar.start() + lr_list = [] + while self.iter < self._max_iters: + for i, flow in enumerate(workflow): + self._inner_iter = 0 + mode, iters = flow + if not isinstance(mode, str) or not hasattr(self, mode): + raise ValueError( + 'runner has no method named "{}" to run a workflow'. + format(mode)) + iter_runner = getattr(self, mode) + for _ in range(iters): + if mode == 'train' and self.iter >= self._max_iters: + break + lr_list.extend(iter_runner(iter_loaders[i], **kwargs)) + + self.progress_bar.file.write('\n') + time.sleep(1) # wait for some hooks like loggers to finish + self.call_hook('after_epoch') + self.call_hook('after_run') + return lr_list + + +class SimpleModel(nn.Module): + """simple model that do nothing in train_step.""" + + def __init__(self): + super(SimpleModel, self).__init__() + self.conv = nn.Conv2d(1, 1, 1) + + def train_step(self, *args, **kwargs): + pass + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Visualize a Dataset Pipeline') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--dataset-size', + type=int, + help='The size of the dataset. If specify, `build_dataset` will ' + 'be skipped and use this size as the dataset size.') + parser.add_argument( + '--ngpus', + type=int, + default=1, + help='The number of GPUs used in training.') + parser.add_argument('--title', type=str, help='title of figure') + parser.add_argument( + '--style', type=str, default='whitegrid', help='style of plt') + parser.add_argument( + '--save-path', + type=Path, + help='The learning rate curve plot save path') + parser.add_argument( + '--window-size', + default='12*7', + help='Size of the window to display images, in format of "$W*$H".') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + if args.window_size != '': + assert re.match(r'\d+\*\d+', args.window_size), \ + "'window-size' must be in format 'W*H'." + + return args + + +def plot_curve(lr_list, args, iters_per_epoch, by_epoch=True): + """Plot learning rate vs iter graph.""" + try: + import seaborn as sns + sns.set_style(args.style) + except ImportError: + print("Attention: The plot style won't be applied because 'seaborn' " + 'package is not installed, please install it if you want better ' + 'show style.') + wind_w, wind_h = args.window_size.split('*') + wind_w, wind_h = int(wind_w), int(wind_h) + plt.figure(figsize=(wind_w, wind_h)) + # if legend is None, use {filename}_{key} as legend + + ax: plt.Axes = plt.subplot() + + ax.plot(lr_list, linewidth=1) + if by_epoch: + ax.xaxis.tick_top() + ax.set_xlabel('Iters') + ax.xaxis.set_label_position('top') + sec_ax = ax.secondary_xaxis( + 'bottom', + functions=(lambda x: x / iters_per_epoch, + lambda y: y * iters_per_epoch)) + sec_ax.set_xlabel('Epochs') + # ticks = range(0, len(lr_list), iters_per_epoch) + # plt.xticks(ticks=ticks, labels=range(len(ticks))) + else: + plt.xlabel('Iters') + plt.ylabel('Learning Rate') + + if args.title is None: + plt.title(f'{osp.basename(args.config)} Learning Rate curve') + else: + plt.title(args.title) + + if args.save_path: + plt.savefig(args.save_path) + print(f'The learning rate graph is saved at {args.save_path}') + plt.show() + + +def simulate_train(data_loader, cfg, by_epoch=True): + # build logger, data_loader, model and optimizer + logger = get_root_logger() + data_loaders = [data_loader] + model = SimpleModel() + optimizer = build_optimizer(model, cfg.optimizer) + + # build runner + if by_epoch: + runner = DummyEpochBasedRunner( + max_epochs=cfg.runner.max_epochs, + model=model, + optimizer=optimizer, + logger=logger) + else: + runner = DummyIterBasedRunner( + max_iters=cfg.runner.max_iters, + model=model, + optimizer=optimizer, + logger=logger) + + # register hooks + runner.register_training_hooks( + lr_config=cfg.lr_config, + custom_hooks_config=cfg.get('custom_hooks', None), + ) + + # only use the first train workflow + workflow = cfg.workflow[:1] + assert workflow[0][0] == 'train' + return runner.run(data_loaders, cfg.workflow) + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # make sure save_root exists + if args.save_path and not args.save_path.parent.exists(): + raise Exception(f'The save path is {args.save_path}, and directory ' + f"'{args.save_path.parent}' do not exist.") + + # init logger + logger = get_root_logger(log_level=cfg.log_level) + logger.info('Lr config : \n\n' + pformat(cfg.lr_config, sort_dicts=False) + + '\n') + + by_epoch = True if cfg.runner.type == 'EpochBasedRunner' else False + + # prepare data loader + batch_size = cfg.data.samples_per_gpu * args.ngpus + + if args.dataset_size is None and by_epoch: + from mmcls.datasets.builder import build_dataset + dataset_size = len(build_dataset(cfg.data.train)) + else: + dataset_size = args.dataset_size or batch_size + + fake_dataset = list(range(dataset_size)) + data_loader = DataLoader(fake_dataset, batch_size=batch_size) + dataset_info = (f'\nDataset infos:' + f'\n - Dataset size: {dataset_size}' + f'\n - Samples per GPU: {cfg.data.samples_per_gpu}' + f'\n - Number of GPUs: {args.ngpus}' + f'\n - Total batch size: {batch_size}') + if by_epoch: + dataset_info += f'\n - Iterations per epoch: {len(data_loader)}' + logger.info(dataset_info) + + # simulation training process + lr_list = simulate_train(data_loader, cfg, by_epoch) + + plot_curve(lr_list, args, len(data_loader), by_epoch) + + +if __name__ == '__main__': + main() diff --git a/tools/visualizations/vis_pipeline.py b/tools/visualizations/vis_pipeline.py new file mode 100644 index 0000000..ffb9b18 --- /dev/null +++ b/tools/visualizations/vis_pipeline.py @@ -0,0 +1,337 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import copy +import itertools +import os +import re +import sys +import warnings +from pathlib import Path +from typing import List + +import cv2 +import mmcv +import numpy as np +from mmcv import Config, DictAction, ProgressBar + +from mmcls.core import visualization as vis +from mmcls.datasets.builder import PIPELINES, build_dataset, build_from_cfg +from mmcls.models.utils import to_2tuple + +# text style +bright_style, reset_style = '\x1b[1m', '\x1b[0m' +red_text, blue_text = '\x1b[31m', '\x1b[34m' +white_background = '\x1b[107m' + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Visualize a Dataset Pipeline') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--skip-type', + type=str, + nargs='*', + default=['ToTensor', 'Normalize', 'ImageToTensor', 'Collect'], + help='the pipelines to skip when visualizing') + parser.add_argument( + '--output-dir', + default='', + type=str, + help='folder to save output pictures, if not set, do not save.') + parser.add_argument( + '--phase', + default='train', + type=str, + choices=['train', 'test', 'val'], + help='phase of dataset to visualize, accept "train" "test" and "val".' + ' Default train.') + parser.add_argument( + '--number', + type=int, + default=sys.maxsize, + help='number of images selected to visualize, must bigger than 0. if ' + 'the number is bigger than length of dataset, show all the images in ' + 'dataset; default "sys.maxsize", show all images in dataset') + parser.add_argument( + '--mode', + default='concat', + type=str, + choices=['original', 'transformed', 'concat', 'pipeline'], + help='display mode; display original pictures or transformed pictures' + ' or comparison pictures. "original" means show images load from disk' + '; "transformed" means to show images after transformed; "concat" ' + 'means show images stitched by "original" and "output" images. ' + '"pipeline" means show all the intermediate images. Default concat.') + parser.add_argument( + '--show', + default=False, + action='store_true', + help='whether to display images in pop-up window. Default False.') + parser.add_argument( + '--adaptive', + default=False, + action='store_true', + help='whether to automatically adjust the visualization image size') + parser.add_argument( + '--min-edge-length', + default=200, + type=int, + help='the min edge length when visualizing images, used when ' + '"--adaptive" is true. Default 200.') + parser.add_argument( + '--max-edge-length', + default=800, + type=int, + help='the max edge length when visualizing images, used when ' + '"--adaptive" is true. Default 1000.') + parser.add_argument( + '--bgr2rgb', + default=False, + action='store_true', + help='flip the color channel order of images') + parser.add_argument( + '--window-size', + default='12*7', + help='size of the window to display images, in format of "$W*$H".') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--show-options', + nargs='+', + action=DictAction, + help='custom options for display. key-value pair in xxx=yyy. options ' + 'in `mmcls.core.visualization.ImshowInfosContextManager.put_img_infos`' + ) + args = parser.parse_args() + + assert args.number > 0, "'args.number' must be larger than zero." + if args.window_size != '': + assert re.match(r'\d+\*\d+', args.window_size), \ + "'window-size' must be in format 'W*H'." + if args.output_dir == '' and not args.show: + raise ValueError("if '--output-dir' and '--show' are not set, " + 'nothing will happen when the program running.') + + if args.show_options is None: + args.show_options = {} + return args + + +def retrieve_data_cfg(config_path, skip_type, cfg_options, phase): + cfg = Config.fromfile(config_path) + if cfg_options is not None: + cfg.merge_from_dict(cfg_options) + data_cfg = cfg.data[phase] + while 'dataset' in data_cfg: + data_cfg = data_cfg['dataset'] + data_cfg['pipeline'] = [ + x for x in data_cfg.pipeline if x['type'] not in skip_type + ] + + return cfg + + +def build_dataset_pipelines(cfg, phase): + """build dataset and pipeline from config. + + Separate the pipeline except 'LoadImageFromFile' step if + 'LoadImageFromFile' in the pipeline. + """ + data_cfg = cfg.data[phase] + loadimage_pipeline = [] + if len(data_cfg.pipeline + ) != 0 and data_cfg.pipeline[0]['type'] == 'LoadImageFromFile': + loadimage_pipeline.append(data_cfg.pipeline.pop(0)) + origin_pipeline = data_cfg.pipeline + data_cfg.pipeline = loadimage_pipeline + dataset = build_dataset(data_cfg) + pipelines = { + pipeline_cfg['type']: build_from_cfg(pipeline_cfg, PIPELINES) + for pipeline_cfg in origin_pipeline + } + + return dataset, pipelines + + +def prepare_imgs(args, imgs: List[np.ndarray], steps=None): + """prepare the showing picture.""" + ori_shapes = [img.shape for img in imgs] + # adaptive adjustment to rescale pictures + if args.adaptive: + for i, img in enumerate(imgs): + imgs[i] = adaptive_size(img, args.min_edge_length, + args.max_edge_length) + else: + # if src image is too large or too small, + # warning a "--adaptive" message. + for ori_h, ori_w, _ in ori_shapes: + if (args.min_edge_length > ori_h or args.min_edge_length > ori_w + or args.max_edge_length < ori_h + or args.max_edge_length < ori_w): + msg = red_text + msg += 'The visualization picture is too small or too large to' + msg += ' put text information on it, please add ' + msg += bright_style + red_text + white_background + msg += '"--adaptive"' + msg += reset_style + red_text + msg += ' to adaptively rescale the showing pictures' + msg += reset_style + warnings.warn(msg) + + if len(imgs) == 1: + return imgs[0] + else: + return concat_imgs(imgs, steps, ori_shapes) + + +def concat_imgs(imgs, steps, ori_shapes): + """Concat list of pictures into a single big picture, align height here.""" + show_shapes = [img.shape for img in imgs] + show_heights = [shape[0] for shape in show_shapes] + show_widths = [shape[1] for shape in show_shapes] + + max_height = max(show_heights) + text_height = 20 + font_size = 0.5 + pic_horizontal_gap = min(show_widths) // 10 + for i, img in enumerate(imgs): + cur_height = show_heights[i] + pad_height = max_height - cur_height + pad_top, pad_bottom = to_2tuple(pad_height // 2) + # handle instance that the pad_height is an odd number + if pad_height % 2 == 1: + pad_top = pad_top + 1 + pad_bottom += text_height * 3 # keep pxs to put step information text + pad_left, pad_right = to_2tuple(pic_horizontal_gap) + # make border + img = cv2.copyMakeBorder( + img, + pad_top, + pad_bottom, + pad_left, + pad_right, + cv2.BORDER_CONSTANT, + value=(255, 255, 255)) + # put transform phase information in the bottom + imgs[i] = cv2.putText( + img=img, + text=steps[i], + org=(pic_horizontal_gap, max_height + text_height // 2), + fontFace=cv2.FONT_HERSHEY_TRIPLEX, + fontScale=font_size, + color=(255, 0, 0), + lineType=1) + # put image size information in the bottom + imgs[i] = cv2.putText( + img=img, + text=str(ori_shapes[i]), + org=(pic_horizontal_gap, max_height + int(text_height * 1.5)), + fontFace=cv2.FONT_HERSHEY_TRIPLEX, + fontScale=font_size, + color=(255, 0, 0), + lineType=1) + + # Height alignment for concatenating + board = np.concatenate(imgs, axis=1) + return board + + +def adaptive_size(image, min_edge_length, max_edge_length, src_shape=None): + """rescale image if image is too small to put text like cifar.""" + assert min_edge_length >= 0 and max_edge_length >= 0 + assert max_edge_length >= min_edge_length + src_shape = image.shape if src_shape is None else src_shape + image_h, image_w, _ = src_shape + + if image_h < min_edge_length or image_w < min_edge_length: + image = mmcv.imrescale( + image, min(min_edge_length / image_h, min_edge_length / image_h)) + if image_h > max_edge_length or image_w > max_edge_length: + image = mmcv.imrescale( + image, max(max_edge_length / image_h, max_edge_length / image_w)) + return image + + +def get_display_img(args, item, pipelines): + """get image to display.""" + # srcs picture could be in RGB or BGR order due to different backends. + if args.bgr2rgb: + item['img'] = mmcv.bgr2rgb(item['img']) + src_image = item['img'].copy() + pipeline_images = [src_image] + + # get intermediate images through pipelines + if args.mode in ['transformed', 'concat', 'pipeline']: + for pipeline in pipelines.values(): + item = pipeline(item) + trans_image = copy.deepcopy(item['img']) + trans_image = np.ascontiguousarray(trans_image, dtype=np.uint8) + pipeline_images.append(trans_image) + + # concatenate images to be showed according to mode + if args.mode == 'original': + image = prepare_imgs(args, [src_image], ['src']) + elif args.mode == 'transformed': + image = prepare_imgs(args, [pipeline_images[-1]], ['transformed']) + elif args.mode == 'concat': + steps = ['src', 'transformed'] + image = prepare_imgs(args, [pipeline_images[0], pipeline_images[-1]], + steps) + elif args.mode == 'pipeline': + steps = ['src'] + list(pipelines.keys()) + image = prepare_imgs(args, pipeline_images, steps) + + return image + + +def main(): + args = parse_args() + wind_w, wind_h = args.window_size.split('*') + wind_w, wind_h = int(wind_w), int(wind_h) # showing windows size + cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options, + args.phase) + + dataset, pipelines = build_dataset_pipelines(cfg, args.phase) + CLASSES = dataset.CLASSES + display_number = min(args.number, len(dataset)) + progressBar = ProgressBar(display_number) + + with vis.ImshowInfosContextManager(fig_size=(wind_w, wind_h)) as manager: + for i, item in enumerate(itertools.islice(dataset, display_number)): + image = get_display_img(args, item, pipelines) + + # dist_path is None as default, means not saving pictures + dist_path = None + if args.output_dir: + # some datasets don't have filenames, such as cifar + src_path = item.get('filename', '{}.jpg'.format(i)) + dist_path = os.path.join(args.output_dir, Path(src_path).name) + + infos = dict(label=CLASSES[item['gt_label']]) + + ret, _ = manager.put_img_infos( + image, + infos, + font_size=20, + out_file=dist_path, + show=args.show, + **args.show_options) + + progressBar.update() + + if ret == 1: + print('\nMannualy interrupted.') + break + + +if __name__ == '__main__': + main()